1 /* 2 * OMAP2+ common Power & Reset Management (PRM) IP block functions 3 * 4 * Copyright (C) 2011 Texas Instruments, Inc. 5 * Tero Kristo <t-kristo@ti.com> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 * 12 * For historical purposes, the API used to configure the PRM 13 * interrupt handler refers to it as the "PRCM interrupt." The 14 * underlying registers are located in the PRM on OMAP3/4. 15 * 16 * XXX This code should eventually be moved to a PRM driver. 17 */ 18 19 #include <linux/kernel.h> 20 #include <linux/module.h> 21 #include <linux/init.h> 22 #include <linux/io.h> 23 #include <linux/irq.h> 24 #include <linux/interrupt.h> 25 #include <linux/slab.h> 26 #include <linux/of.h> 27 #include <linux/of_address.h> 28 #include <linux/clk-provider.h> 29 #include <linux/clk/ti.h> 30 31 #include "soc.h" 32 #include "prm2xxx_3xxx.h" 33 #include "prm2xxx.h" 34 #include "prm3xxx.h" 35 #include "prm44xx.h" 36 #include "common.h" 37 #include "clock.h" 38 #include "cm.h" 39 #include "control.h" 40 41 /* 42 * OMAP_PRCM_MAX_NR_PENDING_REG: maximum number of PRM_IRQ*_MPU regs 43 * XXX this is technically not needed, since 44 * omap_prcm_register_chain_handler() could allocate this based on the 45 * actual amount of memory needed for the SoC 46 */ 47 #define OMAP_PRCM_MAX_NR_PENDING_REG 2 48 49 /* 50 * prcm_irq_chips: an array of all of the "generic IRQ chips" in use 51 * by the PRCM interrupt handler code. There will be one 'chip' per 52 * PRM_{IRQSTATUS,IRQENABLE}_MPU register pair. (So OMAP3 will have 53 * one "chip" and OMAP4 will have two.) 54 */ 55 static struct irq_chip_generic **prcm_irq_chips; 56 57 /* 58 * prcm_irq_setup: the PRCM IRQ parameters for the hardware the code 59 * is currently running on. Defined and passed by initialization code 60 * that calls omap_prcm_register_chain_handler(). 61 */ 62 static struct omap_prcm_irq_setup *prcm_irq_setup; 63 64 /* prm_base: base virtual address of the PRM IP block */ 65 void __iomem *prm_base; 66 67 u16 prm_features; 68 69 /* 70 * prm_ll_data: function pointers to SoC-specific implementations of 71 * common PRM functions 72 */ 73 static struct prm_ll_data null_prm_ll_data; 74 static struct prm_ll_data *prm_ll_data = &null_prm_ll_data; 75 76 /* Private functions */ 77 78 /* 79 * Move priority events from events to priority_events array 80 */ 81 static void omap_prcm_events_filter_priority(unsigned long *events, 82 unsigned long *priority_events) 83 { 84 int i; 85 86 for (i = 0; i < prcm_irq_setup->nr_regs; i++) { 87 priority_events[i] = 88 events[i] & prcm_irq_setup->priority_mask[i]; 89 events[i] ^= priority_events[i]; 90 } 91 } 92 93 /* 94 * PRCM Interrupt Handler 95 * 96 * This is a common handler for the OMAP PRCM interrupts. Pending 97 * interrupts are detected by a call to prcm_pending_events and 98 * dispatched accordingly. Clearing of the wakeup events should be 99 * done by the SoC specific individual handlers. 100 */ 101 static void omap_prcm_irq_handler(unsigned int irq, struct irq_desc *desc) 102 { 103 unsigned long pending[OMAP_PRCM_MAX_NR_PENDING_REG]; 104 unsigned long priority_pending[OMAP_PRCM_MAX_NR_PENDING_REG]; 105 struct irq_chip *chip = irq_desc_get_chip(desc); 106 unsigned int virtirq; 107 int nr_irq = prcm_irq_setup->nr_regs * 32; 108 109 /* 110 * If we are suspended, mask all interrupts from PRCM level, 111 * this does not ack them, and they will be pending until we 112 * re-enable the interrupts, at which point the 113 * omap_prcm_irq_handler will be executed again. The 114 * _save_and_clear_irqen() function must ensure that the PRM 115 * write to disable all IRQs has reached the PRM before 116 * returning, or spurious PRCM interrupts may occur during 117 * suspend. 118 */ 119 if (prcm_irq_setup->suspended) { 120 prcm_irq_setup->save_and_clear_irqen(prcm_irq_setup->saved_mask); 121 prcm_irq_setup->suspend_save_flag = true; 122 } 123 124 /* 125 * Loop until all pending irqs are handled, since 126 * generic_handle_irq() can cause new irqs to come 127 */ 128 while (!prcm_irq_setup->suspended) { 129 prcm_irq_setup->read_pending_irqs(pending); 130 131 /* No bit set, then all IRQs are handled */ 132 if (find_first_bit(pending, nr_irq) >= nr_irq) 133 break; 134 135 omap_prcm_events_filter_priority(pending, priority_pending); 136 137 /* 138 * Loop on all currently pending irqs so that new irqs 139 * cannot starve previously pending irqs 140 */ 141 142 /* Serve priority events first */ 143 for_each_set_bit(virtirq, priority_pending, nr_irq) 144 generic_handle_irq(prcm_irq_setup->base_irq + virtirq); 145 146 /* Serve normal events next */ 147 for_each_set_bit(virtirq, pending, nr_irq) 148 generic_handle_irq(prcm_irq_setup->base_irq + virtirq); 149 } 150 if (chip->irq_ack) 151 chip->irq_ack(&desc->irq_data); 152 if (chip->irq_eoi) 153 chip->irq_eoi(&desc->irq_data); 154 chip->irq_unmask(&desc->irq_data); 155 156 prcm_irq_setup->ocp_barrier(); /* avoid spurious IRQs */ 157 } 158 159 /* Public functions */ 160 161 /** 162 * omap_prcm_event_to_irq - given a PRCM event name, returns the 163 * corresponding IRQ on which the handler should be registered 164 * @name: name of the PRCM interrupt bit to look up - see struct omap_prcm_irq 165 * 166 * Returns the Linux internal IRQ ID corresponding to @name upon success, 167 * or -ENOENT upon failure. 168 */ 169 int omap_prcm_event_to_irq(const char *name) 170 { 171 int i; 172 173 if (!prcm_irq_setup || !name) 174 return -ENOENT; 175 176 for (i = 0; i < prcm_irq_setup->nr_irqs; i++) 177 if (!strcmp(prcm_irq_setup->irqs[i].name, name)) 178 return prcm_irq_setup->base_irq + 179 prcm_irq_setup->irqs[i].offset; 180 181 return -ENOENT; 182 } 183 184 /** 185 * omap_prcm_irq_cleanup - reverses memory allocated and other steps 186 * done by omap_prcm_register_chain_handler() 187 * 188 * No return value. 189 */ 190 void omap_prcm_irq_cleanup(void) 191 { 192 unsigned int irq; 193 int i; 194 195 if (!prcm_irq_setup) { 196 pr_err("PRCM: IRQ handler not initialized; cannot cleanup\n"); 197 return; 198 } 199 200 if (prcm_irq_chips) { 201 for (i = 0; i < prcm_irq_setup->nr_regs; i++) { 202 if (prcm_irq_chips[i]) 203 irq_remove_generic_chip(prcm_irq_chips[i], 204 0xffffffff, 0, 0); 205 prcm_irq_chips[i] = NULL; 206 } 207 kfree(prcm_irq_chips); 208 prcm_irq_chips = NULL; 209 } 210 211 kfree(prcm_irq_setup->saved_mask); 212 prcm_irq_setup->saved_mask = NULL; 213 214 kfree(prcm_irq_setup->priority_mask); 215 prcm_irq_setup->priority_mask = NULL; 216 217 if (prcm_irq_setup->xlate_irq) 218 irq = prcm_irq_setup->xlate_irq(prcm_irq_setup->irq); 219 else 220 irq = prcm_irq_setup->irq; 221 irq_set_chained_handler(irq, NULL); 222 223 if (prcm_irq_setup->base_irq > 0) 224 irq_free_descs(prcm_irq_setup->base_irq, 225 prcm_irq_setup->nr_regs * 32); 226 prcm_irq_setup->base_irq = 0; 227 } 228 229 void omap_prcm_irq_prepare(void) 230 { 231 prcm_irq_setup->suspended = true; 232 } 233 234 void omap_prcm_irq_complete(void) 235 { 236 prcm_irq_setup->suspended = false; 237 238 /* If we have not saved the masks, do not attempt to restore */ 239 if (!prcm_irq_setup->suspend_save_flag) 240 return; 241 242 prcm_irq_setup->suspend_save_flag = false; 243 244 /* 245 * Re-enable all masked PRCM irq sources, this causes the PRCM 246 * interrupt to fire immediately if the events were masked 247 * previously in the chain handler 248 */ 249 prcm_irq_setup->restore_irqen(prcm_irq_setup->saved_mask); 250 } 251 252 /** 253 * omap_prcm_register_chain_handler - initializes the prcm chained interrupt 254 * handler based on provided parameters 255 * @irq_setup: hardware data about the underlying PRM/PRCM 256 * 257 * Set up the PRCM chained interrupt handler on the PRCM IRQ. Sets up 258 * one generic IRQ chip per PRM interrupt status/enable register pair. 259 * Returns 0 upon success, -EINVAL if called twice or if invalid 260 * arguments are passed, or -ENOMEM on any other error. 261 */ 262 int omap_prcm_register_chain_handler(struct omap_prcm_irq_setup *irq_setup) 263 { 264 int nr_regs; 265 u32 mask[OMAP_PRCM_MAX_NR_PENDING_REG]; 266 int offset, i; 267 struct irq_chip_generic *gc; 268 struct irq_chip_type *ct; 269 unsigned int irq; 270 271 if (!irq_setup) 272 return -EINVAL; 273 274 nr_regs = irq_setup->nr_regs; 275 276 if (prcm_irq_setup) { 277 pr_err("PRCM: already initialized; won't reinitialize\n"); 278 return -EINVAL; 279 } 280 281 if (nr_regs > OMAP_PRCM_MAX_NR_PENDING_REG) { 282 pr_err("PRCM: nr_regs too large\n"); 283 return -EINVAL; 284 } 285 286 prcm_irq_setup = irq_setup; 287 288 prcm_irq_chips = kzalloc(sizeof(void *) * nr_regs, GFP_KERNEL); 289 prcm_irq_setup->saved_mask = kzalloc(sizeof(u32) * nr_regs, GFP_KERNEL); 290 prcm_irq_setup->priority_mask = kzalloc(sizeof(u32) * nr_regs, 291 GFP_KERNEL); 292 293 if (!prcm_irq_chips || !prcm_irq_setup->saved_mask || 294 !prcm_irq_setup->priority_mask) { 295 pr_err("PRCM: kzalloc failed\n"); 296 goto err; 297 } 298 299 memset(mask, 0, sizeof(mask)); 300 301 for (i = 0; i < irq_setup->nr_irqs; i++) { 302 offset = irq_setup->irqs[i].offset; 303 mask[offset >> 5] |= 1 << (offset & 0x1f); 304 if (irq_setup->irqs[i].priority) 305 irq_setup->priority_mask[offset >> 5] |= 306 1 << (offset & 0x1f); 307 } 308 309 if (irq_setup->xlate_irq) 310 irq = irq_setup->xlate_irq(irq_setup->irq); 311 else 312 irq = irq_setup->irq; 313 irq_set_chained_handler(irq, omap_prcm_irq_handler); 314 315 irq_setup->base_irq = irq_alloc_descs(-1, 0, irq_setup->nr_regs * 32, 316 0); 317 318 if (irq_setup->base_irq < 0) { 319 pr_err("PRCM: failed to allocate irq descs: %d\n", 320 irq_setup->base_irq); 321 goto err; 322 } 323 324 for (i = 0; i < irq_setup->nr_regs; i++) { 325 gc = irq_alloc_generic_chip("PRCM", 1, 326 irq_setup->base_irq + i * 32, prm_base, 327 handle_level_irq); 328 329 if (!gc) { 330 pr_err("PRCM: failed to allocate generic chip\n"); 331 goto err; 332 } 333 ct = gc->chip_types; 334 ct->chip.irq_ack = irq_gc_ack_set_bit; 335 ct->chip.irq_mask = irq_gc_mask_clr_bit; 336 ct->chip.irq_unmask = irq_gc_mask_set_bit; 337 338 ct->regs.ack = irq_setup->ack + i * 4; 339 ct->regs.mask = irq_setup->mask + i * 4; 340 341 irq_setup_generic_chip(gc, mask[i], 0, IRQ_NOREQUEST, 0); 342 prcm_irq_chips[i] = gc; 343 } 344 345 if (of_have_populated_dt()) { 346 int irq = omap_prcm_event_to_irq("io"); 347 omap_pcs_legacy_init(irq, irq_setup->reconfigure_io_chain); 348 } 349 350 return 0; 351 352 err: 353 omap_prcm_irq_cleanup(); 354 return -ENOMEM; 355 } 356 357 /** 358 * omap2_set_globals_prm - set the PRM base address (for early use) 359 * @prm: PRM base virtual address 360 * 361 * XXX Will be replaced when the PRM/CM drivers are completed. 362 */ 363 void __init omap2_set_globals_prm(void __iomem *prm) 364 { 365 prm_base = prm; 366 } 367 368 /** 369 * prm_read_reset_sources - return the sources of the SoC's last reset 370 * 371 * Return a u32 bitmask representing the reset sources that caused the 372 * SoC to reset. The low-level per-SoC functions called by this 373 * function remap the SoC-specific reset source bits into an 374 * OMAP-common set of reset source bits, defined in 375 * arch/arm/mach-omap2/prm.h. Returns the standardized reset source 376 * u32 bitmask from the hardware upon success, or returns (1 << 377 * OMAP_UNKNOWN_RST_SRC_ID_SHIFT) if no low-level read_reset_sources() 378 * function was registered. 379 */ 380 u32 prm_read_reset_sources(void) 381 { 382 u32 ret = 1 << OMAP_UNKNOWN_RST_SRC_ID_SHIFT; 383 384 if (prm_ll_data->read_reset_sources) 385 ret = prm_ll_data->read_reset_sources(); 386 else 387 WARN_ONCE(1, "prm: %s: no mapping function defined for reset sources\n", __func__); 388 389 return ret; 390 } 391 392 /** 393 * prm_was_any_context_lost_old - was device context lost? (old API) 394 * @part: PRM partition ID (e.g., OMAP4430_PRM_PARTITION) 395 * @inst: PRM instance offset (e.g., OMAP4430_PRM_MPU_INST) 396 * @idx: CONTEXT register offset 397 * 398 * Return 1 if any bits were set in the *_CONTEXT_* register 399 * identified by (@part, @inst, @idx), which means that some context 400 * was lost for that module; otherwise, return 0. XXX Deprecated; 401 * callers need to use a less-SoC-dependent way to identify hardware 402 * IP blocks. 403 */ 404 bool prm_was_any_context_lost_old(u8 part, s16 inst, u16 idx) 405 { 406 bool ret = true; 407 408 if (prm_ll_data->was_any_context_lost_old) 409 ret = prm_ll_data->was_any_context_lost_old(part, inst, idx); 410 else 411 WARN_ONCE(1, "prm: %s: no mapping function defined\n", 412 __func__); 413 414 return ret; 415 } 416 417 /** 418 * prm_clear_context_lost_flags_old - clear context loss flags (old API) 419 * @part: PRM partition ID (e.g., OMAP4430_PRM_PARTITION) 420 * @inst: PRM instance offset (e.g., OMAP4430_PRM_MPU_INST) 421 * @idx: CONTEXT register offset 422 * 423 * Clear hardware context loss bits for the module identified by 424 * (@part, @inst, @idx). No return value. XXX Deprecated; callers 425 * need to use a less-SoC-dependent way to identify hardware IP 426 * blocks. 427 */ 428 void prm_clear_context_loss_flags_old(u8 part, s16 inst, u16 idx) 429 { 430 if (prm_ll_data->clear_context_loss_flags_old) 431 prm_ll_data->clear_context_loss_flags_old(part, inst, idx); 432 else 433 WARN_ONCE(1, "prm: %s: no mapping function defined\n", 434 __func__); 435 } 436 437 /** 438 * omap_prm_assert_hardreset - assert hardreset for an IP block 439 * @shift: register bit shift corresponding to the reset line 440 * @part: PRM partition 441 * @prm_mod: PRM submodule base or instance offset 442 * @offset: register offset 443 * 444 * Asserts a hardware reset line for an IP block. 445 */ 446 int omap_prm_assert_hardreset(u8 shift, u8 part, s16 prm_mod, u16 offset) 447 { 448 if (!prm_ll_data->assert_hardreset) { 449 WARN_ONCE(1, "prm: %s: no mapping function defined\n", 450 __func__); 451 return -EINVAL; 452 } 453 454 return prm_ll_data->assert_hardreset(shift, part, prm_mod, offset); 455 } 456 457 /** 458 * omap_prm_deassert_hardreset - deassert hardreset for an IP block 459 * @shift: register bit shift corresponding to the reset line 460 * @st_shift: reset status bit shift corresponding to the reset line 461 * @part: PRM partition 462 * @prm_mod: PRM submodule base or instance offset 463 * @offset: register offset 464 * @st_offset: status register offset 465 * 466 * Deasserts a hardware reset line for an IP block. 467 */ 468 int omap_prm_deassert_hardreset(u8 shift, u8 st_shift, u8 part, s16 prm_mod, 469 u16 offset, u16 st_offset) 470 { 471 if (!prm_ll_data->deassert_hardreset) { 472 WARN_ONCE(1, "prm: %s: no mapping function defined\n", 473 __func__); 474 return -EINVAL; 475 } 476 477 return prm_ll_data->deassert_hardreset(shift, st_shift, part, prm_mod, 478 offset, st_offset); 479 } 480 481 /** 482 * omap_prm_is_hardreset_asserted - check the hardreset status for an IP block 483 * @shift: register bit shift corresponding to the reset line 484 * @part: PRM partition 485 * @prm_mod: PRM submodule base or instance offset 486 * @offset: register offset 487 * 488 * Checks if a hardware reset line for an IP block is enabled or not. 489 */ 490 int omap_prm_is_hardreset_asserted(u8 shift, u8 part, s16 prm_mod, u16 offset) 491 { 492 if (!prm_ll_data->is_hardreset_asserted) { 493 WARN_ONCE(1, "prm: %s: no mapping function defined\n", 494 __func__); 495 return -EINVAL; 496 } 497 498 return prm_ll_data->is_hardreset_asserted(shift, part, prm_mod, offset); 499 } 500 501 /** 502 * omap_prm_reconfigure_io_chain - clear latches and reconfigure I/O chain 503 * 504 * Clear any previously-latched I/O wakeup events and ensure that the 505 * I/O wakeup gates are aligned with the current mux settings. 506 * Calls SoC specific I/O chain reconfigure function if available, 507 * otherwise does nothing. 508 */ 509 void omap_prm_reconfigure_io_chain(void) 510 { 511 if (!prcm_irq_setup || !prcm_irq_setup->reconfigure_io_chain) 512 return; 513 514 prcm_irq_setup->reconfigure_io_chain(); 515 } 516 517 /** 518 * omap_prm_reset_system - trigger global SW reset 519 * 520 * Triggers SoC specific global warm reset to reboot the device. 521 */ 522 void omap_prm_reset_system(void) 523 { 524 if (!prm_ll_data->reset_system) { 525 WARN_ONCE(1, "prm: %s: no mapping function defined\n", 526 __func__); 527 return; 528 } 529 530 prm_ll_data->reset_system(); 531 532 while (1) 533 cpu_relax(); 534 } 535 536 /** 537 * prm_register - register per-SoC low-level data with the PRM 538 * @pld: low-level per-SoC OMAP PRM data & function pointers to register 539 * 540 * Register per-SoC low-level OMAP PRM data and function pointers with 541 * the OMAP PRM common interface. The caller must keep the data 542 * pointed to by @pld valid until it calls prm_unregister() and 543 * it returns successfully. Returns 0 upon success, -EINVAL if @pld 544 * is NULL, or -EEXIST if prm_register() has already been called 545 * without an intervening prm_unregister(). 546 */ 547 int prm_register(struct prm_ll_data *pld) 548 { 549 if (!pld) 550 return -EINVAL; 551 552 if (prm_ll_data != &null_prm_ll_data) 553 return -EEXIST; 554 555 prm_ll_data = pld; 556 557 return 0; 558 } 559 560 /** 561 * prm_unregister - unregister per-SoC low-level data & function pointers 562 * @pld: low-level per-SoC OMAP PRM data & function pointers to unregister 563 * 564 * Unregister per-SoC low-level OMAP PRM data and function pointers 565 * that were previously registered with prm_register(). The 566 * caller may not destroy any of the data pointed to by @pld until 567 * this function returns successfully. Returns 0 upon success, or 568 * -EINVAL if @pld is NULL or if @pld does not match the struct 569 * prm_ll_data * previously registered by prm_register(). 570 */ 571 int prm_unregister(struct prm_ll_data *pld) 572 { 573 if (!pld || prm_ll_data != pld) 574 return -EINVAL; 575 576 prm_ll_data = &null_prm_ll_data; 577 578 return 0; 579 } 580 581 static const struct of_device_id omap_prcm_dt_match_table[] = { 582 { .compatible = "ti,am3-prcm" }, 583 { .compatible = "ti,am3-scrm" }, 584 { .compatible = "ti,am4-prcm" }, 585 { .compatible = "ti,am4-scrm" }, 586 { .compatible = "ti,dm814-prcm" }, 587 { .compatible = "ti,dm814-scrm" }, 588 { .compatible = "ti,dm816-prcm" }, 589 { .compatible = "ti,dm816-scrm" }, 590 { .compatible = "ti,omap2-prcm" }, 591 { .compatible = "ti,omap2-scrm" }, 592 { .compatible = "ti,omap3-prm" }, 593 { .compatible = "ti,omap3-cm" }, 594 { .compatible = "ti,omap3-scrm" }, 595 { .compatible = "ti,omap4-cm1" }, 596 { .compatible = "ti,omap4-prm" }, 597 { .compatible = "ti,omap4-cm2" }, 598 { .compatible = "ti,omap4-scrm" }, 599 { .compatible = "ti,omap5-prm" }, 600 { .compatible = "ti,omap5-cm-core-aon" }, 601 { .compatible = "ti,omap5-scrm" }, 602 { .compatible = "ti,omap5-cm-core" }, 603 { .compatible = "ti,dra7-prm" }, 604 { .compatible = "ti,dra7-cm-core-aon" }, 605 { .compatible = "ti,dra7-cm-core" }, 606 { } 607 }; 608 609 static struct clk_hw_omap memmap_dummy_ck = { 610 .flags = MEMMAP_ADDRESSING, 611 }; 612 613 static u32 prm_clk_readl(void __iomem *reg) 614 { 615 return omap2_clk_readl(&memmap_dummy_ck, reg); 616 } 617 618 static void prm_clk_writel(u32 val, void __iomem *reg) 619 { 620 omap2_clk_writel(val, &memmap_dummy_ck, reg); 621 } 622 623 static struct ti_clk_ll_ops omap_clk_ll_ops = { 624 .clk_readl = prm_clk_readl, 625 .clk_writel = prm_clk_writel, 626 }; 627 628 int __init of_prcm_init(void) 629 { 630 struct device_node *np; 631 void __iomem *mem; 632 int memmap_index = 0; 633 634 ti_clk_ll_ops = &omap_clk_ll_ops; 635 636 for_each_matching_node(np, omap_prcm_dt_match_table) { 637 mem = of_iomap(np, 0); 638 clk_memmaps[memmap_index] = mem; 639 ti_dt_clk_init_provider(np, memmap_index); 640 memmap_index++; 641 } 642 643 return 0; 644 } 645 646 void __init omap3_prcm_legacy_iomaps_init(void) 647 { 648 ti_clk_ll_ops = &omap_clk_ll_ops; 649 650 clk_memmaps[TI_CLKM_CM] = cm_base + OMAP3430_IVA2_MOD; 651 clk_memmaps[TI_CLKM_PRM] = prm_base + OMAP3430_IVA2_MOD; 652 clk_memmaps[TI_CLKM_SCRM] = omap_ctrl_base_get(); 653 } 654 655 static int __init prm_late_init(void) 656 { 657 if (prm_ll_data->late_init) 658 return prm_ll_data->late_init(); 659 return 0; 660 } 661 subsys_initcall(prm_late_init); 662