1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * OMAP2+ common Power & Reset Management (PRM) IP block functions
4 *
5 * Copyright (C) 2011 Texas Instruments, Inc.
6 * Tero Kristo <t-kristo@ti.com>
7 *
8 * For historical purposes, the API used to configure the PRM
9 * interrupt handler refers to it as the "PRCM interrupt." The
10 * underlying registers are located in the PRM on OMAP3/4.
11 *
12 * XXX This code should eventually be moved to a PRM driver.
13 */
14
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/io.h>
19 #include <linux/irq.h>
20 #include <linux/interrupt.h>
21 #include <linux/slab.h>
22 #include <linux/of.h>
23 #include <linux/of_address.h>
24 #include <linux/clk-provider.h>
25 #include <linux/clk/ti.h>
26
27 #include "soc.h"
28 #include "prm2xxx_3xxx.h"
29 #include "prm2xxx.h"
30 #include "prm3xxx.h"
31 #include "prm33xx.h"
32 #include "prm44xx.h"
33 #include "prm54xx.h"
34 #include "prm7xx.h"
35 #include "prcm43xx.h"
36 #include "common.h"
37 #include "clock.h"
38 #include "cm.h"
39 #include "control.h"
40
41 /*
42 * OMAP_PRCM_MAX_NR_PENDING_REG: maximum number of PRM_IRQ*_MPU regs
43 * XXX this is technically not needed, since
44 * omap_prcm_register_chain_handler() could allocate this based on the
45 * actual amount of memory needed for the SoC
46 */
47 #define OMAP_PRCM_MAX_NR_PENDING_REG 2
48
49 /*
50 * prcm_irq_chips: an array of all of the "generic IRQ chips" in use
51 * by the PRCM interrupt handler code. There will be one 'chip' per
52 * PRM_{IRQSTATUS,IRQENABLE}_MPU register pair. (So OMAP3 will have
53 * one "chip" and OMAP4 will have two.)
54 */
55 static struct irq_chip_generic **prcm_irq_chips;
56
57 /*
58 * prcm_irq_setup: the PRCM IRQ parameters for the hardware the code
59 * is currently running on. Defined and passed by initialization code
60 * that calls omap_prcm_register_chain_handler().
61 */
62 static struct omap_prcm_irq_setup *prcm_irq_setup;
63
64 /* prm_base: base virtual address of the PRM IP block */
65 struct omap_domain_base prm_base;
66
67 u16 prm_features;
68
69 /*
70 * Platforms that implement different reboot modes can store the requested
71 * mode here.
72 */
73 enum reboot_mode prm_reboot_mode;
74
75 /*
76 * prm_ll_data: function pointers to SoC-specific implementations of
77 * common PRM functions
78 */
79 static struct prm_ll_data null_prm_ll_data;
80 static struct prm_ll_data *prm_ll_data = &null_prm_ll_data;
81
82 /* Private functions */
83
84 /*
85 * Move priority events from events to priority_events array
86 */
omap_prcm_events_filter_priority(unsigned long * events,unsigned long * priority_events)87 static void omap_prcm_events_filter_priority(unsigned long *events,
88 unsigned long *priority_events)
89 {
90 int i;
91
92 for (i = 0; i < prcm_irq_setup->nr_regs; i++) {
93 priority_events[i] =
94 events[i] & prcm_irq_setup->priority_mask[i];
95 events[i] ^= priority_events[i];
96 }
97 }
98
99 /*
100 * PRCM Interrupt Handler
101 *
102 * This is a common handler for the OMAP PRCM interrupts. Pending
103 * interrupts are detected by a call to prcm_pending_events and
104 * dispatched accordingly. Clearing of the wakeup events should be
105 * done by the SoC specific individual handlers.
106 */
omap_prcm_irq_handler(struct irq_desc * desc)107 static void omap_prcm_irq_handler(struct irq_desc *desc)
108 {
109 unsigned long pending[OMAP_PRCM_MAX_NR_PENDING_REG];
110 unsigned long priority_pending[OMAP_PRCM_MAX_NR_PENDING_REG];
111 struct irq_chip *chip = irq_desc_get_chip(desc);
112 unsigned int virtirq;
113 int nr_irq = prcm_irq_setup->nr_regs * 32;
114
115 /*
116 * If we are suspended, mask all interrupts from PRCM level,
117 * this does not ack them, and they will be pending until we
118 * re-enable the interrupts, at which point the
119 * omap_prcm_irq_handler will be executed again. The
120 * _save_and_clear_irqen() function must ensure that the PRM
121 * write to disable all IRQs has reached the PRM before
122 * returning, or spurious PRCM interrupts may occur during
123 * suspend.
124 */
125 if (prcm_irq_setup->suspended) {
126 prcm_irq_setup->save_and_clear_irqen(prcm_irq_setup->saved_mask);
127 prcm_irq_setup->suspend_save_flag = true;
128 }
129
130 /*
131 * Loop until all pending irqs are handled, since
132 * generic_handle_irq() can cause new irqs to come
133 */
134 while (!prcm_irq_setup->suspended) {
135 prcm_irq_setup->read_pending_irqs(pending);
136
137 /* No bit set, then all IRQs are handled */
138 if (find_first_bit(pending, nr_irq) >= nr_irq)
139 break;
140
141 omap_prcm_events_filter_priority(pending, priority_pending);
142
143 /*
144 * Loop on all currently pending irqs so that new irqs
145 * cannot starve previously pending irqs
146 */
147
148 /* Serve priority events first */
149 for_each_set_bit(virtirq, priority_pending, nr_irq)
150 generic_handle_irq(prcm_irq_setup->base_irq + virtirq);
151
152 /* Serve normal events next */
153 for_each_set_bit(virtirq, pending, nr_irq)
154 generic_handle_irq(prcm_irq_setup->base_irq + virtirq);
155 }
156 if (chip->irq_ack)
157 chip->irq_ack(&desc->irq_data);
158 if (chip->irq_eoi)
159 chip->irq_eoi(&desc->irq_data);
160 chip->irq_unmask(&desc->irq_data);
161
162 prcm_irq_setup->ocp_barrier(); /* avoid spurious IRQs */
163 }
164
165 /* Public functions */
166
167 /**
168 * omap_prcm_event_to_irq - given a PRCM event name, returns the
169 * corresponding IRQ on which the handler should be registered
170 * @name: name of the PRCM interrupt bit to look up - see struct omap_prcm_irq
171 *
172 * Returns the Linux internal IRQ ID corresponding to @name upon success,
173 * or -ENOENT upon failure.
174 */
omap_prcm_event_to_irq(const char * name)175 int omap_prcm_event_to_irq(const char *name)
176 {
177 int i;
178
179 if (!prcm_irq_setup || !name)
180 return -ENOENT;
181
182 for (i = 0; i < prcm_irq_setup->nr_irqs; i++)
183 if (!strcmp(prcm_irq_setup->irqs[i].name, name))
184 return prcm_irq_setup->base_irq +
185 prcm_irq_setup->irqs[i].offset;
186
187 return -ENOENT;
188 }
189
190 /**
191 * omap_prcm_irq_cleanup - reverses memory allocated and other steps
192 * done by omap_prcm_register_chain_handler()
193 *
194 * No return value.
195 */
omap_prcm_irq_cleanup(void)196 static void omap_prcm_irq_cleanup(void)
197 {
198 unsigned int irq;
199 int i;
200
201 if (!prcm_irq_setup) {
202 pr_err("PRCM: IRQ handler not initialized; cannot cleanup\n");
203 return;
204 }
205
206 if (prcm_irq_chips) {
207 for (i = 0; i < prcm_irq_setup->nr_regs; i++) {
208 if (prcm_irq_chips[i])
209 irq_remove_generic_chip(prcm_irq_chips[i],
210 0xffffffff, 0, 0);
211 prcm_irq_chips[i] = NULL;
212 }
213 kfree(prcm_irq_chips);
214 prcm_irq_chips = NULL;
215 }
216
217 kfree(prcm_irq_setup->saved_mask);
218 prcm_irq_setup->saved_mask = NULL;
219
220 kfree(prcm_irq_setup->priority_mask);
221 prcm_irq_setup->priority_mask = NULL;
222
223 irq = prcm_irq_setup->irq;
224 irq_set_chained_handler(irq, NULL);
225
226 if (prcm_irq_setup->base_irq > 0)
227 irq_free_descs(prcm_irq_setup->base_irq,
228 prcm_irq_setup->nr_regs * 32);
229 prcm_irq_setup->base_irq = 0;
230 }
231
omap_prcm_irq_prepare(void)232 void omap_prcm_irq_prepare(void)
233 {
234 prcm_irq_setup->suspended = true;
235 }
236
omap_prcm_irq_complete(void)237 void omap_prcm_irq_complete(void)
238 {
239 prcm_irq_setup->suspended = false;
240
241 /* If we have not saved the masks, do not attempt to restore */
242 if (!prcm_irq_setup->suspend_save_flag)
243 return;
244
245 prcm_irq_setup->suspend_save_flag = false;
246
247 /*
248 * Re-enable all masked PRCM irq sources, this causes the PRCM
249 * interrupt to fire immediately if the events were masked
250 * previously in the chain handler
251 */
252 prcm_irq_setup->restore_irqen(prcm_irq_setup->saved_mask);
253 }
254
255 /**
256 * omap_prcm_register_chain_handler - initializes the prcm chained interrupt
257 * handler based on provided parameters
258 * @irq_setup: hardware data about the underlying PRM/PRCM
259 *
260 * Set up the PRCM chained interrupt handler on the PRCM IRQ. Sets up
261 * one generic IRQ chip per PRM interrupt status/enable register pair.
262 * Returns 0 upon success, -EINVAL if called twice or if invalid
263 * arguments are passed, or -ENOMEM on any other error.
264 */
omap_prcm_register_chain_handler(struct omap_prcm_irq_setup * irq_setup)265 int omap_prcm_register_chain_handler(struct omap_prcm_irq_setup *irq_setup)
266 {
267 int nr_regs;
268 u32 mask[OMAP_PRCM_MAX_NR_PENDING_REG];
269 int offset, i, irq;
270 struct irq_chip_generic *gc;
271 struct irq_chip_type *ct;
272
273 if (!irq_setup)
274 return -EINVAL;
275
276 nr_regs = irq_setup->nr_regs;
277
278 if (prcm_irq_setup) {
279 pr_err("PRCM: already initialized; won't reinitialize\n");
280 return -EINVAL;
281 }
282
283 if (nr_regs > OMAP_PRCM_MAX_NR_PENDING_REG) {
284 pr_err("PRCM: nr_regs too large\n");
285 return -EINVAL;
286 }
287
288 prcm_irq_setup = irq_setup;
289
290 prcm_irq_chips = kcalloc(nr_regs, sizeof(void *), GFP_KERNEL);
291 prcm_irq_setup->saved_mask = kcalloc(nr_regs, sizeof(u32),
292 GFP_KERNEL);
293 prcm_irq_setup->priority_mask = kcalloc(nr_regs, sizeof(u32),
294 GFP_KERNEL);
295
296 if (!prcm_irq_chips || !prcm_irq_setup->saved_mask ||
297 !prcm_irq_setup->priority_mask)
298 goto err;
299
300 memset(mask, 0, sizeof(mask));
301
302 for (i = 0; i < irq_setup->nr_irqs; i++) {
303 offset = irq_setup->irqs[i].offset;
304 mask[offset >> 5] |= 1 << (offset & 0x1f);
305 if (irq_setup->irqs[i].priority)
306 irq_setup->priority_mask[offset >> 5] |=
307 1 << (offset & 0x1f);
308 }
309
310 irq = irq_setup->irq;
311 irq_set_chained_handler(irq, omap_prcm_irq_handler);
312
313 irq_setup->base_irq = irq_alloc_descs(-1, 0, irq_setup->nr_regs * 32,
314 0);
315
316 if (irq_setup->base_irq < 0) {
317 pr_err("PRCM: failed to allocate irq descs: %d\n",
318 irq_setup->base_irq);
319 goto err;
320 }
321
322 for (i = 0; i < irq_setup->nr_regs; i++) {
323 gc = irq_alloc_generic_chip("PRCM", 1,
324 irq_setup->base_irq + i * 32, prm_base.va,
325 handle_level_irq);
326
327 if (!gc) {
328 pr_err("PRCM: failed to allocate generic chip\n");
329 goto err;
330 }
331 ct = gc->chip_types;
332 ct->chip.irq_ack = irq_gc_ack_set_bit;
333 ct->chip.irq_mask = irq_gc_mask_clr_bit;
334 ct->chip.irq_unmask = irq_gc_mask_set_bit;
335
336 ct->regs.ack = irq_setup->ack + i * 4;
337 ct->regs.mask = irq_setup->mask + i * 4;
338
339 irq_setup_generic_chip(gc, mask[i], 0, IRQ_NOREQUEST, 0);
340 prcm_irq_chips[i] = gc;
341 }
342
343 irq = omap_prcm_event_to_irq("io");
344 omap_pcs_legacy_init(irq, irq_setup->reconfigure_io_chain);
345
346 return 0;
347
348 err:
349 omap_prcm_irq_cleanup();
350 return -ENOMEM;
351 }
352
353 /**
354 * prm_was_any_context_lost_old - was device context lost? (old API)
355 * @part: PRM partition ID (e.g., OMAP4430_PRM_PARTITION)
356 * @inst: PRM instance offset (e.g., OMAP4430_PRM_MPU_INST)
357 * @idx: CONTEXT register offset
358 *
359 * Return 1 if any bits were set in the *_CONTEXT_* register
360 * identified by (@part, @inst, @idx), which means that some context
361 * was lost for that module; otherwise, return 0. XXX Deprecated;
362 * callers need to use a less-SoC-dependent way to identify hardware
363 * IP blocks.
364 */
prm_was_any_context_lost_old(u8 part,s16 inst,u16 idx)365 bool prm_was_any_context_lost_old(u8 part, s16 inst, u16 idx)
366 {
367 bool ret = true;
368
369 if (prm_ll_data->was_any_context_lost_old)
370 ret = prm_ll_data->was_any_context_lost_old(part, inst, idx);
371 else
372 WARN_ONCE(1, "prm: %s: no mapping function defined\n",
373 __func__);
374
375 return ret;
376 }
377
378 /**
379 * prm_clear_context_loss_flags_old - clear context loss flags (old API)
380 * @part: PRM partition ID (e.g., OMAP4430_PRM_PARTITION)
381 * @inst: PRM instance offset (e.g., OMAP4430_PRM_MPU_INST)
382 * @idx: CONTEXT register offset
383 *
384 * Clear hardware context loss bits for the module identified by
385 * (@part, @inst, @idx). No return value. XXX Deprecated; callers
386 * need to use a less-SoC-dependent way to identify hardware IP
387 * blocks.
388 */
prm_clear_context_loss_flags_old(u8 part,s16 inst,u16 idx)389 void prm_clear_context_loss_flags_old(u8 part, s16 inst, u16 idx)
390 {
391 if (prm_ll_data->clear_context_loss_flags_old)
392 prm_ll_data->clear_context_loss_flags_old(part, inst, idx);
393 else
394 WARN_ONCE(1, "prm: %s: no mapping function defined\n",
395 __func__);
396 }
397
398 /**
399 * omap_prm_assert_hardreset - assert hardreset for an IP block
400 * @shift: register bit shift corresponding to the reset line
401 * @part: PRM partition
402 * @prm_mod: PRM submodule base or instance offset
403 * @offset: register offset
404 *
405 * Asserts a hardware reset line for an IP block.
406 */
omap_prm_assert_hardreset(u8 shift,u8 part,s16 prm_mod,u16 offset)407 int omap_prm_assert_hardreset(u8 shift, u8 part, s16 prm_mod, u16 offset)
408 {
409 if (!prm_ll_data->assert_hardreset) {
410 WARN_ONCE(1, "prm: %s: no mapping function defined\n",
411 __func__);
412 return -EINVAL;
413 }
414
415 return prm_ll_data->assert_hardreset(shift, part, prm_mod, offset);
416 }
417
418 /**
419 * omap_prm_deassert_hardreset - deassert hardreset for an IP block
420 * @shift: register bit shift corresponding to the reset line
421 * @st_shift: reset status bit shift corresponding to the reset line
422 * @part: PRM partition
423 * @prm_mod: PRM submodule base or instance offset
424 * @offset: register offset
425 * @st_offset: status register offset
426 *
427 * Deasserts a hardware reset line for an IP block.
428 */
omap_prm_deassert_hardreset(u8 shift,u8 st_shift,u8 part,s16 prm_mod,u16 offset,u16 st_offset)429 int omap_prm_deassert_hardreset(u8 shift, u8 st_shift, u8 part, s16 prm_mod,
430 u16 offset, u16 st_offset)
431 {
432 if (!prm_ll_data->deassert_hardreset) {
433 WARN_ONCE(1, "prm: %s: no mapping function defined\n",
434 __func__);
435 return -EINVAL;
436 }
437
438 return prm_ll_data->deassert_hardreset(shift, st_shift, part, prm_mod,
439 offset, st_offset);
440 }
441
442 /**
443 * omap_prm_is_hardreset_asserted - check the hardreset status for an IP block
444 * @shift: register bit shift corresponding to the reset line
445 * @part: PRM partition
446 * @prm_mod: PRM submodule base or instance offset
447 * @offset: register offset
448 *
449 * Checks if a hardware reset line for an IP block is enabled or not.
450 */
omap_prm_is_hardreset_asserted(u8 shift,u8 part,s16 prm_mod,u16 offset)451 int omap_prm_is_hardreset_asserted(u8 shift, u8 part, s16 prm_mod, u16 offset)
452 {
453 if (!prm_ll_data->is_hardreset_asserted) {
454 WARN_ONCE(1, "prm: %s: no mapping function defined\n",
455 __func__);
456 return -EINVAL;
457 }
458
459 return prm_ll_data->is_hardreset_asserted(shift, part, prm_mod, offset);
460 }
461
462 /**
463 * omap_prm_reset_system - trigger global SW reset
464 *
465 * Triggers SoC specific global warm reset to reboot the device.
466 */
omap_prm_reset_system(void)467 void omap_prm_reset_system(void)
468 {
469 if (!prm_ll_data->reset_system) {
470 WARN_ONCE(1, "prm: %s: no mapping function defined\n",
471 __func__);
472 return;
473 }
474
475 prm_ll_data->reset_system();
476
477 while (1) {
478 cpu_relax();
479 wfe();
480 }
481 }
482
483 /**
484 * omap_prm_clear_mod_irqs - clear wake-up events from PRCM interrupt
485 * @module: PRM module to clear wakeups from
486 * @regs: register to clear
487 * @wkst_mask: wkst bits to clear
488 *
489 * Clears any wakeup events for the module and register set defined.
490 * Uses SoC specific implementation to do the actual wakeup status
491 * clearing.
492 */
omap_prm_clear_mod_irqs(s16 module,u8 regs,u32 wkst_mask)493 int omap_prm_clear_mod_irqs(s16 module, u8 regs, u32 wkst_mask)
494 {
495 if (!prm_ll_data->clear_mod_irqs) {
496 WARN_ONCE(1, "prm: %s: no mapping function defined\n",
497 __func__);
498 return -EINVAL;
499 }
500
501 return prm_ll_data->clear_mod_irqs(module, regs, wkst_mask);
502 }
503
504 /**
505 * omap_prm_vp_check_txdone - check voltage processor TX done status
506 * @vp_id: unique VP instance ID
507 *
508 * Checks if voltage processor transmission has been completed.
509 * Returns non-zero if a transmission has completed, 0 otherwise.
510 */
omap_prm_vp_check_txdone(u8 vp_id)511 u32 omap_prm_vp_check_txdone(u8 vp_id)
512 {
513 if (!prm_ll_data->vp_check_txdone) {
514 WARN_ONCE(1, "prm: %s: no mapping function defined\n",
515 __func__);
516 return 0;
517 }
518
519 return prm_ll_data->vp_check_txdone(vp_id);
520 }
521
522 /**
523 * omap_prm_vp_clear_txdone - clears voltage processor TX done status
524 * @vp_id: unique VP instance ID
525 *
526 * Clears the status bit for completed voltage processor transmission
527 * returned by prm_vp_check_txdone.
528 */
omap_prm_vp_clear_txdone(u8 vp_id)529 void omap_prm_vp_clear_txdone(u8 vp_id)
530 {
531 if (!prm_ll_data->vp_clear_txdone) {
532 WARN_ONCE(1, "prm: %s: no mapping function defined\n",
533 __func__);
534 return;
535 }
536
537 prm_ll_data->vp_clear_txdone(vp_id);
538 }
539
540 /**
541 * prm_register - register per-SoC low-level data with the PRM
542 * @pld: low-level per-SoC OMAP PRM data & function pointers to register
543 *
544 * Register per-SoC low-level OMAP PRM data and function pointers with
545 * the OMAP PRM common interface. The caller must keep the data
546 * pointed to by @pld valid until it calls prm_unregister() and
547 * it returns successfully. Returns 0 upon success, -EINVAL if @pld
548 * is NULL, or -EEXIST if prm_register() has already been called
549 * without an intervening prm_unregister().
550 */
prm_register(struct prm_ll_data * pld)551 int prm_register(struct prm_ll_data *pld)
552 {
553 if (!pld)
554 return -EINVAL;
555
556 if (prm_ll_data != &null_prm_ll_data)
557 return -EEXIST;
558
559 prm_ll_data = pld;
560
561 return 0;
562 }
563
564 /**
565 * prm_unregister - unregister per-SoC low-level data & function pointers
566 * @pld: low-level per-SoC OMAP PRM data & function pointers to unregister
567 *
568 * Unregister per-SoC low-level OMAP PRM data and function pointers
569 * that were previously registered with prm_register(). The
570 * caller may not destroy any of the data pointed to by @pld until
571 * this function returns successfully. Returns 0 upon success, or
572 * -EINVAL if @pld is NULL or if @pld does not match the struct
573 * prm_ll_data * previously registered by prm_register().
574 */
prm_unregister(struct prm_ll_data * pld)575 int prm_unregister(struct prm_ll_data *pld)
576 {
577 if (!pld || prm_ll_data != pld)
578 return -EINVAL;
579
580 prm_ll_data = &null_prm_ll_data;
581
582 return 0;
583 }
584
585 #ifdef CONFIG_ARCH_OMAP2
586 static struct omap_prcm_init_data omap2_prm_data __initdata = {
587 .index = TI_CLKM_PRM,
588 .init = omap2xxx_prm_init,
589 };
590 #endif
591
592 #ifdef CONFIG_ARCH_OMAP3
593 static struct omap_prcm_init_data omap3_prm_data __initdata = {
594 .index = TI_CLKM_PRM,
595 .init = omap3xxx_prm_init,
596
597 /*
598 * IVA2 offset is a negative value, must offset the prm_base
599 * address by this to get it to positive
600 */
601 .offset = -OMAP3430_IVA2_MOD,
602 };
603 #endif
604
605 #if defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_TI81XX)
606 static struct omap_prcm_init_data am3_prm_data __initdata = {
607 .index = TI_CLKM_PRM,
608 .init = am33xx_prm_init,
609 };
610 #endif
611
612 #ifdef CONFIG_SOC_TI81XX
613 static struct omap_prcm_init_data dm814_pllss_data __initdata = {
614 .index = TI_CLKM_PLLSS,
615 .init = am33xx_prm_init,
616 };
617 #endif
618
619 #ifdef CONFIG_ARCH_OMAP4
620 static struct omap_prcm_init_data omap4_prm_data __initdata = {
621 .index = TI_CLKM_PRM,
622 .init = omap44xx_prm_init,
623 .device_inst_offset = OMAP4430_PRM_DEVICE_INST,
624 .flags = PRM_HAS_IO_WAKEUP | PRM_HAS_VOLTAGE,
625 };
626 #endif
627
628 #ifdef CONFIG_SOC_OMAP5
629 static struct omap_prcm_init_data omap5_prm_data __initdata = {
630 .index = TI_CLKM_PRM,
631 .init = omap44xx_prm_init,
632 .device_inst_offset = OMAP54XX_PRM_DEVICE_INST,
633 .flags = PRM_HAS_IO_WAKEUP | PRM_HAS_VOLTAGE,
634 };
635 #endif
636
637 #ifdef CONFIG_SOC_DRA7XX
638 static struct omap_prcm_init_data dra7_prm_data __initdata = {
639 .index = TI_CLKM_PRM,
640 .init = omap44xx_prm_init,
641 .device_inst_offset = DRA7XX_PRM_DEVICE_INST,
642 .flags = PRM_HAS_IO_WAKEUP,
643 };
644 #endif
645
646 #ifdef CONFIG_SOC_AM43XX
647 static struct omap_prcm_init_data am4_prm_data __initdata = {
648 .index = TI_CLKM_PRM,
649 .init = omap44xx_prm_init,
650 .device_inst_offset = AM43XX_PRM_DEVICE_INST,
651 .flags = PRM_HAS_IO_WAKEUP,
652 };
653 #endif
654
655 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5)
656 static struct omap_prcm_init_data scrm_data __initdata = {
657 .index = TI_CLKM_SCRM,
658 };
659 #endif
660
661 static const struct of_device_id omap_prcm_dt_match_table[] __initconst = {
662 #ifdef CONFIG_SOC_AM33XX
663 { .compatible = "ti,am3-prcm", .data = &am3_prm_data },
664 #endif
665 #ifdef CONFIG_SOC_AM43XX
666 { .compatible = "ti,am4-prcm", .data = &am4_prm_data },
667 #endif
668 #ifdef CONFIG_SOC_TI81XX
669 { .compatible = "ti,dm814-prcm", .data = &am3_prm_data },
670 { .compatible = "ti,dm814-pllss", .data = &dm814_pllss_data },
671 { .compatible = "ti,dm816-prcm", .data = &am3_prm_data },
672 #endif
673 #ifdef CONFIG_ARCH_OMAP2
674 { .compatible = "ti,omap2-prcm", .data = &omap2_prm_data },
675 #endif
676 #ifdef CONFIG_ARCH_OMAP3
677 { .compatible = "ti,omap3-prm", .data = &omap3_prm_data },
678 #endif
679 #ifdef CONFIG_ARCH_OMAP4
680 { .compatible = "ti,omap4-prm", .data = &omap4_prm_data },
681 { .compatible = "ti,omap4-scrm", .data = &scrm_data },
682 #endif
683 #ifdef CONFIG_SOC_OMAP5
684 { .compatible = "ti,omap5-prm", .data = &omap5_prm_data },
685 { .compatible = "ti,omap5-scrm", .data = &scrm_data },
686 #endif
687 #ifdef CONFIG_SOC_DRA7XX
688 { .compatible = "ti,dra7-prm", .data = &dra7_prm_data },
689 #endif
690 { }
691 };
692
693 /**
694 * omap2_prm_base_init - initialize iomappings for the PRM driver
695 *
696 * Detects and initializes the iomappings for the PRM driver, based
697 * on the DT data. Returns 0 in success, negative error value
698 * otherwise.
699 */
omap2_prm_base_init(void)700 static int __init omap2_prm_base_init(void)
701 {
702 struct device_node *np;
703 const struct of_device_id *match;
704 struct omap_prcm_init_data *data;
705 struct resource res;
706 int ret;
707
708 for_each_matching_node_and_match(np, omap_prcm_dt_match_table, &match) {
709 data = (struct omap_prcm_init_data *)match->data;
710
711 ret = of_address_to_resource(np, 0, &res);
712 if (ret) {
713 of_node_put(np);
714 return ret;
715 }
716
717 data->mem = ioremap(res.start, resource_size(&res));
718
719 if (data->index == TI_CLKM_PRM) {
720 prm_base.va = data->mem + data->offset;
721 prm_base.pa = res.start + data->offset;
722 }
723
724 data->np = np;
725
726 if (data->init)
727 data->init(data);
728 }
729
730 return 0;
731 }
732
omap2_prcm_base_init(void)733 int __init omap2_prcm_base_init(void)
734 {
735 int ret;
736
737 ret = omap2_prm_base_init();
738 if (ret)
739 return ret;
740
741 return omap2_cm_base_init();
742 }
743
744 /**
745 * omap_prcm_init - low level init for the PRCM drivers
746 *
747 * Initializes the low level clock infrastructure for PRCM drivers.
748 * Returns 0 in success, negative error value in failure.
749 */
omap_prcm_init(void)750 int __init omap_prcm_init(void)
751 {
752 struct device_node *np;
753 const struct of_device_id *match;
754 const struct omap_prcm_init_data *data;
755 int ret;
756
757 for_each_matching_node_and_match(np, omap_prcm_dt_match_table, &match) {
758 data = match->data;
759
760 ret = omap2_clk_provider_init(np, data->index, NULL, data->mem);
761 if (ret) {
762 of_node_put(np);
763 return ret;
764 }
765 }
766
767 omap_cm_init();
768
769 return 0;
770 }
771
prm_late_init(void)772 static int __init prm_late_init(void)
773 {
774 if (prm_ll_data->late_init)
775 return prm_ll_data->late_init();
776 return 0;
777 }
778 subsys_initcall(prm_late_init);
779