xref: /linux/arch/arm/mach-omap2/prm_common.c (revision 93d90ad708b8da6efc0e487b66111aa9db7f70c7)
1 /*
2  * OMAP2+ common Power & Reset Management (PRM) IP block functions
3  *
4  * Copyright (C) 2011 Texas Instruments, Inc.
5  * Tero Kristo <t-kristo@ti.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  *
12  * For historical purposes, the API used to configure the PRM
13  * interrupt handler refers to it as the "PRCM interrupt."  The
14  * underlying registers are located in the PRM on OMAP3/4.
15  *
16  * XXX This code should eventually be moved to a PRM driver.
17  */
18 
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/init.h>
22 #include <linux/io.h>
23 #include <linux/irq.h>
24 #include <linux/interrupt.h>
25 #include <linux/slab.h>
26 #include <linux/of.h>
27 #include <linux/of_address.h>
28 #include <linux/clk-provider.h>
29 #include <linux/clk/ti.h>
30 
31 #include "soc.h"
32 #include "prm2xxx_3xxx.h"
33 #include "prm2xxx.h"
34 #include "prm3xxx.h"
35 #include "prm44xx.h"
36 #include "common.h"
37 #include "clock.h"
38 
39 /*
40  * OMAP_PRCM_MAX_NR_PENDING_REG: maximum number of PRM_IRQ*_MPU regs
41  * XXX this is technically not needed, since
42  * omap_prcm_register_chain_handler() could allocate this based on the
43  * actual amount of memory needed for the SoC
44  */
45 #define OMAP_PRCM_MAX_NR_PENDING_REG		2
46 
47 /*
48  * prcm_irq_chips: an array of all of the "generic IRQ chips" in use
49  * by the PRCM interrupt handler code.  There will be one 'chip' per
50  * PRM_{IRQSTATUS,IRQENABLE}_MPU register pair.  (So OMAP3 will have
51  * one "chip" and OMAP4 will have two.)
52  */
53 static struct irq_chip_generic **prcm_irq_chips;
54 
55 /*
56  * prcm_irq_setup: the PRCM IRQ parameters for the hardware the code
57  * is currently running on.  Defined and passed by initialization code
58  * that calls omap_prcm_register_chain_handler().
59  */
60 static struct omap_prcm_irq_setup *prcm_irq_setup;
61 
62 /* prm_base: base virtual address of the PRM IP block */
63 void __iomem *prm_base;
64 
65 u16 prm_features;
66 
67 /*
68  * prm_ll_data: function pointers to SoC-specific implementations of
69  * common PRM functions
70  */
71 static struct prm_ll_data null_prm_ll_data;
72 static struct prm_ll_data *prm_ll_data = &null_prm_ll_data;
73 
74 /* Private functions */
75 
76 /*
77  * Move priority events from events to priority_events array
78  */
79 static void omap_prcm_events_filter_priority(unsigned long *events,
80 	unsigned long *priority_events)
81 {
82 	int i;
83 
84 	for (i = 0; i < prcm_irq_setup->nr_regs; i++) {
85 		priority_events[i] =
86 			events[i] & prcm_irq_setup->priority_mask[i];
87 		events[i] ^= priority_events[i];
88 	}
89 }
90 
91 /*
92  * PRCM Interrupt Handler
93  *
94  * This is a common handler for the OMAP PRCM interrupts. Pending
95  * interrupts are detected by a call to prcm_pending_events and
96  * dispatched accordingly. Clearing of the wakeup events should be
97  * done by the SoC specific individual handlers.
98  */
99 static void omap_prcm_irq_handler(unsigned int irq, struct irq_desc *desc)
100 {
101 	unsigned long pending[OMAP_PRCM_MAX_NR_PENDING_REG];
102 	unsigned long priority_pending[OMAP_PRCM_MAX_NR_PENDING_REG];
103 	struct irq_chip *chip = irq_desc_get_chip(desc);
104 	unsigned int virtirq;
105 	int nr_irq = prcm_irq_setup->nr_regs * 32;
106 
107 	/*
108 	 * If we are suspended, mask all interrupts from PRCM level,
109 	 * this does not ack them, and they will be pending until we
110 	 * re-enable the interrupts, at which point the
111 	 * omap_prcm_irq_handler will be executed again.  The
112 	 * _save_and_clear_irqen() function must ensure that the PRM
113 	 * write to disable all IRQs has reached the PRM before
114 	 * returning, or spurious PRCM interrupts may occur during
115 	 * suspend.
116 	 */
117 	if (prcm_irq_setup->suspended) {
118 		prcm_irq_setup->save_and_clear_irqen(prcm_irq_setup->saved_mask);
119 		prcm_irq_setup->suspend_save_flag = true;
120 	}
121 
122 	/*
123 	 * Loop until all pending irqs are handled, since
124 	 * generic_handle_irq() can cause new irqs to come
125 	 */
126 	while (!prcm_irq_setup->suspended) {
127 		prcm_irq_setup->read_pending_irqs(pending);
128 
129 		/* No bit set, then all IRQs are handled */
130 		if (find_first_bit(pending, nr_irq) >= nr_irq)
131 			break;
132 
133 		omap_prcm_events_filter_priority(pending, priority_pending);
134 
135 		/*
136 		 * Loop on all currently pending irqs so that new irqs
137 		 * cannot starve previously pending irqs
138 		 */
139 
140 		/* Serve priority events first */
141 		for_each_set_bit(virtirq, priority_pending, nr_irq)
142 			generic_handle_irq(prcm_irq_setup->base_irq + virtirq);
143 
144 		/* Serve normal events next */
145 		for_each_set_bit(virtirq, pending, nr_irq)
146 			generic_handle_irq(prcm_irq_setup->base_irq + virtirq);
147 	}
148 	if (chip->irq_ack)
149 		chip->irq_ack(&desc->irq_data);
150 	if (chip->irq_eoi)
151 		chip->irq_eoi(&desc->irq_data);
152 	chip->irq_unmask(&desc->irq_data);
153 
154 	prcm_irq_setup->ocp_barrier(); /* avoid spurious IRQs */
155 }
156 
157 /* Public functions */
158 
159 /**
160  * omap_prcm_event_to_irq - given a PRCM event name, returns the
161  * corresponding IRQ on which the handler should be registered
162  * @name: name of the PRCM interrupt bit to look up - see struct omap_prcm_irq
163  *
164  * Returns the Linux internal IRQ ID corresponding to @name upon success,
165  * or -ENOENT upon failure.
166  */
167 int omap_prcm_event_to_irq(const char *name)
168 {
169 	int i;
170 
171 	if (!prcm_irq_setup || !name)
172 		return -ENOENT;
173 
174 	for (i = 0; i < prcm_irq_setup->nr_irqs; i++)
175 		if (!strcmp(prcm_irq_setup->irqs[i].name, name))
176 			return prcm_irq_setup->base_irq +
177 				prcm_irq_setup->irqs[i].offset;
178 
179 	return -ENOENT;
180 }
181 
182 /**
183  * omap_prcm_irq_cleanup - reverses memory allocated and other steps
184  * done by omap_prcm_register_chain_handler()
185  *
186  * No return value.
187  */
188 void omap_prcm_irq_cleanup(void)
189 {
190 	int i;
191 
192 	if (!prcm_irq_setup) {
193 		pr_err("PRCM: IRQ handler not initialized; cannot cleanup\n");
194 		return;
195 	}
196 
197 	if (prcm_irq_chips) {
198 		for (i = 0; i < prcm_irq_setup->nr_regs; i++) {
199 			if (prcm_irq_chips[i])
200 				irq_remove_generic_chip(prcm_irq_chips[i],
201 					0xffffffff, 0, 0);
202 			prcm_irq_chips[i] = NULL;
203 		}
204 		kfree(prcm_irq_chips);
205 		prcm_irq_chips = NULL;
206 	}
207 
208 	kfree(prcm_irq_setup->saved_mask);
209 	prcm_irq_setup->saved_mask = NULL;
210 
211 	kfree(prcm_irq_setup->priority_mask);
212 	prcm_irq_setup->priority_mask = NULL;
213 
214 	irq_set_chained_handler(prcm_irq_setup->irq, NULL);
215 
216 	if (prcm_irq_setup->base_irq > 0)
217 		irq_free_descs(prcm_irq_setup->base_irq,
218 			prcm_irq_setup->nr_regs * 32);
219 	prcm_irq_setup->base_irq = 0;
220 }
221 
222 void omap_prcm_irq_prepare(void)
223 {
224 	prcm_irq_setup->suspended = true;
225 }
226 
227 void omap_prcm_irq_complete(void)
228 {
229 	prcm_irq_setup->suspended = false;
230 
231 	/* If we have not saved the masks, do not attempt to restore */
232 	if (!prcm_irq_setup->suspend_save_flag)
233 		return;
234 
235 	prcm_irq_setup->suspend_save_flag = false;
236 
237 	/*
238 	 * Re-enable all masked PRCM irq sources, this causes the PRCM
239 	 * interrupt to fire immediately if the events were masked
240 	 * previously in the chain handler
241 	 */
242 	prcm_irq_setup->restore_irqen(prcm_irq_setup->saved_mask);
243 }
244 
245 /**
246  * omap_prcm_register_chain_handler - initializes the prcm chained interrupt
247  * handler based on provided parameters
248  * @irq_setup: hardware data about the underlying PRM/PRCM
249  *
250  * Set up the PRCM chained interrupt handler on the PRCM IRQ.  Sets up
251  * one generic IRQ chip per PRM interrupt status/enable register pair.
252  * Returns 0 upon success, -EINVAL if called twice or if invalid
253  * arguments are passed, or -ENOMEM on any other error.
254  */
255 int omap_prcm_register_chain_handler(struct omap_prcm_irq_setup *irq_setup)
256 {
257 	int nr_regs;
258 	u32 mask[OMAP_PRCM_MAX_NR_PENDING_REG];
259 	int offset, i;
260 	struct irq_chip_generic *gc;
261 	struct irq_chip_type *ct;
262 
263 	if (!irq_setup)
264 		return -EINVAL;
265 
266 	nr_regs = irq_setup->nr_regs;
267 
268 	if (prcm_irq_setup) {
269 		pr_err("PRCM: already initialized; won't reinitialize\n");
270 		return -EINVAL;
271 	}
272 
273 	if (nr_regs > OMAP_PRCM_MAX_NR_PENDING_REG) {
274 		pr_err("PRCM: nr_regs too large\n");
275 		return -EINVAL;
276 	}
277 
278 	prcm_irq_setup = irq_setup;
279 
280 	prcm_irq_chips = kzalloc(sizeof(void *) * nr_regs, GFP_KERNEL);
281 	prcm_irq_setup->saved_mask = kzalloc(sizeof(u32) * nr_regs, GFP_KERNEL);
282 	prcm_irq_setup->priority_mask = kzalloc(sizeof(u32) * nr_regs,
283 		GFP_KERNEL);
284 
285 	if (!prcm_irq_chips || !prcm_irq_setup->saved_mask ||
286 	    !prcm_irq_setup->priority_mask) {
287 		pr_err("PRCM: kzalloc failed\n");
288 		goto err;
289 	}
290 
291 	memset(mask, 0, sizeof(mask));
292 
293 	for (i = 0; i < irq_setup->nr_irqs; i++) {
294 		offset = irq_setup->irqs[i].offset;
295 		mask[offset >> 5] |= 1 << (offset & 0x1f);
296 		if (irq_setup->irqs[i].priority)
297 			irq_setup->priority_mask[offset >> 5] |=
298 				1 << (offset & 0x1f);
299 	}
300 
301 	irq_set_chained_handler(irq_setup->irq, omap_prcm_irq_handler);
302 
303 	irq_setup->base_irq = irq_alloc_descs(-1, 0, irq_setup->nr_regs * 32,
304 		0);
305 
306 	if (irq_setup->base_irq < 0) {
307 		pr_err("PRCM: failed to allocate irq descs: %d\n",
308 			irq_setup->base_irq);
309 		goto err;
310 	}
311 
312 	for (i = 0; i < irq_setup->nr_regs; i++) {
313 		gc = irq_alloc_generic_chip("PRCM", 1,
314 			irq_setup->base_irq + i * 32, prm_base,
315 			handle_level_irq);
316 
317 		if (!gc) {
318 			pr_err("PRCM: failed to allocate generic chip\n");
319 			goto err;
320 		}
321 		ct = gc->chip_types;
322 		ct->chip.irq_ack = irq_gc_ack_set_bit;
323 		ct->chip.irq_mask = irq_gc_mask_clr_bit;
324 		ct->chip.irq_unmask = irq_gc_mask_set_bit;
325 
326 		ct->regs.ack = irq_setup->ack + i * 4;
327 		ct->regs.mask = irq_setup->mask + i * 4;
328 
329 		irq_setup_generic_chip(gc, mask[i], 0, IRQ_NOREQUEST, 0);
330 		prcm_irq_chips[i] = gc;
331 	}
332 
333 	if (of_have_populated_dt()) {
334 		int irq = omap_prcm_event_to_irq("io");
335 		omap_pcs_legacy_init(irq, irq_setup->reconfigure_io_chain);
336 	}
337 
338 	return 0;
339 
340 err:
341 	omap_prcm_irq_cleanup();
342 	return -ENOMEM;
343 }
344 
345 /**
346  * omap2_set_globals_prm - set the PRM base address (for early use)
347  * @prm: PRM base virtual address
348  *
349  * XXX Will be replaced when the PRM/CM drivers are completed.
350  */
351 void __init omap2_set_globals_prm(void __iomem *prm)
352 {
353 	prm_base = prm;
354 }
355 
356 /**
357  * prm_read_reset_sources - return the sources of the SoC's last reset
358  *
359  * Return a u32 bitmask representing the reset sources that caused the
360  * SoC to reset.  The low-level per-SoC functions called by this
361  * function remap the SoC-specific reset source bits into an
362  * OMAP-common set of reset source bits, defined in
363  * arch/arm/mach-omap2/prm.h.  Returns the standardized reset source
364  * u32 bitmask from the hardware upon success, or returns (1 <<
365  * OMAP_UNKNOWN_RST_SRC_ID_SHIFT) if no low-level read_reset_sources()
366  * function was registered.
367  */
368 u32 prm_read_reset_sources(void)
369 {
370 	u32 ret = 1 << OMAP_UNKNOWN_RST_SRC_ID_SHIFT;
371 
372 	if (prm_ll_data->read_reset_sources)
373 		ret = prm_ll_data->read_reset_sources();
374 	else
375 		WARN_ONCE(1, "prm: %s: no mapping function defined for reset sources\n", __func__);
376 
377 	return ret;
378 }
379 
380 /**
381  * prm_was_any_context_lost_old - was device context lost? (old API)
382  * @part: PRM partition ID (e.g., OMAP4430_PRM_PARTITION)
383  * @inst: PRM instance offset (e.g., OMAP4430_PRM_MPU_INST)
384  * @idx: CONTEXT register offset
385  *
386  * Return 1 if any bits were set in the *_CONTEXT_* register
387  * identified by (@part, @inst, @idx), which means that some context
388  * was lost for that module; otherwise, return 0.  XXX Deprecated;
389  * callers need to use a less-SoC-dependent way to identify hardware
390  * IP blocks.
391  */
392 bool prm_was_any_context_lost_old(u8 part, s16 inst, u16 idx)
393 {
394 	bool ret = true;
395 
396 	if (prm_ll_data->was_any_context_lost_old)
397 		ret = prm_ll_data->was_any_context_lost_old(part, inst, idx);
398 	else
399 		WARN_ONCE(1, "prm: %s: no mapping function defined\n",
400 			  __func__);
401 
402 	return ret;
403 }
404 
405 /**
406  * prm_clear_context_lost_flags_old - clear context loss flags (old API)
407  * @part: PRM partition ID (e.g., OMAP4430_PRM_PARTITION)
408  * @inst: PRM instance offset (e.g., OMAP4430_PRM_MPU_INST)
409  * @idx: CONTEXT register offset
410  *
411  * Clear hardware context loss bits for the module identified by
412  * (@part, @inst, @idx).  No return value.  XXX Deprecated; callers
413  * need to use a less-SoC-dependent way to identify hardware IP
414  * blocks.
415  */
416 void prm_clear_context_loss_flags_old(u8 part, s16 inst, u16 idx)
417 {
418 	if (prm_ll_data->clear_context_loss_flags_old)
419 		prm_ll_data->clear_context_loss_flags_old(part, inst, idx);
420 	else
421 		WARN_ONCE(1, "prm: %s: no mapping function defined\n",
422 			  __func__);
423 }
424 
425 /**
426  * omap_prm_assert_hardreset - assert hardreset for an IP block
427  * @shift: register bit shift corresponding to the reset line
428  * @part: PRM partition
429  * @prm_mod: PRM submodule base or instance offset
430  * @offset: register offset
431  *
432  * Asserts a hardware reset line for an IP block.
433  */
434 int omap_prm_assert_hardreset(u8 shift, u8 part, s16 prm_mod, u16 offset)
435 {
436 	if (!prm_ll_data->assert_hardreset) {
437 		WARN_ONCE(1, "prm: %s: no mapping function defined\n",
438 			  __func__);
439 		return -EINVAL;
440 	}
441 
442 	return prm_ll_data->assert_hardreset(shift, part, prm_mod, offset);
443 }
444 
445 /**
446  * omap_prm_deassert_hardreset - deassert hardreset for an IP block
447  * @shift: register bit shift corresponding to the reset line
448  * @st_shift: reset status bit shift corresponding to the reset line
449  * @part: PRM partition
450  * @prm_mod: PRM submodule base or instance offset
451  * @offset: register offset
452  * @st_offset: status register offset
453  *
454  * Deasserts a hardware reset line for an IP block.
455  */
456 int omap_prm_deassert_hardreset(u8 shift, u8 st_shift, u8 part, s16 prm_mod,
457 				u16 offset, u16 st_offset)
458 {
459 	if (!prm_ll_data->deassert_hardreset) {
460 		WARN_ONCE(1, "prm: %s: no mapping function defined\n",
461 			  __func__);
462 		return -EINVAL;
463 	}
464 
465 	return prm_ll_data->deassert_hardreset(shift, st_shift, part, prm_mod,
466 					       offset, st_offset);
467 }
468 
469 /**
470  * omap_prm_is_hardreset_asserted - check the hardreset status for an IP block
471  * @shift: register bit shift corresponding to the reset line
472  * @part: PRM partition
473  * @prm_mod: PRM submodule base or instance offset
474  * @offset: register offset
475  *
476  * Checks if a hardware reset line for an IP block is enabled or not.
477  */
478 int omap_prm_is_hardreset_asserted(u8 shift, u8 part, s16 prm_mod, u16 offset)
479 {
480 	if (!prm_ll_data->is_hardreset_asserted) {
481 		WARN_ONCE(1, "prm: %s: no mapping function defined\n",
482 			  __func__);
483 		return -EINVAL;
484 	}
485 
486 	return prm_ll_data->is_hardreset_asserted(shift, part, prm_mod, offset);
487 }
488 
489 /**
490  * omap_prm_reconfigure_io_chain - clear latches and reconfigure I/O chain
491  *
492  * Clear any previously-latched I/O wakeup events and ensure that the
493  * I/O wakeup gates are aligned with the current mux settings.
494  * Calls SoC specific I/O chain reconfigure function if available,
495  * otherwise does nothing.
496  */
497 void omap_prm_reconfigure_io_chain(void)
498 {
499 	if (!prcm_irq_setup || !prcm_irq_setup->reconfigure_io_chain)
500 		return;
501 
502 	prcm_irq_setup->reconfigure_io_chain();
503 }
504 
505 /**
506  * omap_prm_reset_system - trigger global SW reset
507  *
508  * Triggers SoC specific global warm reset to reboot the device.
509  */
510 void omap_prm_reset_system(void)
511 {
512 	if (!prm_ll_data->reset_system) {
513 		WARN_ONCE(1, "prm: %s: no mapping function defined\n",
514 			  __func__);
515 		return;
516 	}
517 
518 	prm_ll_data->reset_system();
519 
520 	while (1)
521 		cpu_relax();
522 }
523 
524 /**
525  * prm_register - register per-SoC low-level data with the PRM
526  * @pld: low-level per-SoC OMAP PRM data & function pointers to register
527  *
528  * Register per-SoC low-level OMAP PRM data and function pointers with
529  * the OMAP PRM common interface.  The caller must keep the data
530  * pointed to by @pld valid until it calls prm_unregister() and
531  * it returns successfully.  Returns 0 upon success, -EINVAL if @pld
532  * is NULL, or -EEXIST if prm_register() has already been called
533  * without an intervening prm_unregister().
534  */
535 int prm_register(struct prm_ll_data *pld)
536 {
537 	if (!pld)
538 		return -EINVAL;
539 
540 	if (prm_ll_data != &null_prm_ll_data)
541 		return -EEXIST;
542 
543 	prm_ll_data = pld;
544 
545 	return 0;
546 }
547 
548 /**
549  * prm_unregister - unregister per-SoC low-level data & function pointers
550  * @pld: low-level per-SoC OMAP PRM data & function pointers to unregister
551  *
552  * Unregister per-SoC low-level OMAP PRM data and function pointers
553  * that were previously registered with prm_register().  The
554  * caller may not destroy any of the data pointed to by @pld until
555  * this function returns successfully.  Returns 0 upon success, or
556  * -EINVAL if @pld is NULL or if @pld does not match the struct
557  * prm_ll_data * previously registered by prm_register().
558  */
559 int prm_unregister(struct prm_ll_data *pld)
560 {
561 	if (!pld || prm_ll_data != pld)
562 		return -EINVAL;
563 
564 	prm_ll_data = &null_prm_ll_data;
565 
566 	return 0;
567 }
568 
569 static const struct of_device_id omap_prcm_dt_match_table[] = {
570 	{ .compatible = "ti,am3-prcm" },
571 	{ .compatible = "ti,am3-scrm" },
572 	{ .compatible = "ti,am4-prcm" },
573 	{ .compatible = "ti,am4-scrm" },
574 	{ .compatible = "ti,omap2-prcm" },
575 	{ .compatible = "ti,omap2-scrm" },
576 	{ .compatible = "ti,omap3-prm" },
577 	{ .compatible = "ti,omap3-cm" },
578 	{ .compatible = "ti,omap3-scrm" },
579 	{ .compatible = "ti,omap4-cm1" },
580 	{ .compatible = "ti,omap4-prm" },
581 	{ .compatible = "ti,omap4-cm2" },
582 	{ .compatible = "ti,omap4-scrm" },
583 	{ .compatible = "ti,omap5-prm" },
584 	{ .compatible = "ti,omap5-cm-core-aon" },
585 	{ .compatible = "ti,omap5-scrm" },
586 	{ .compatible = "ti,omap5-cm-core" },
587 	{ .compatible = "ti,dra7-prm" },
588 	{ .compatible = "ti,dra7-cm-core-aon" },
589 	{ .compatible = "ti,dra7-cm-core" },
590 	{ }
591 };
592 
593 static struct clk_hw_omap memmap_dummy_ck = {
594 	.flags = MEMMAP_ADDRESSING,
595 };
596 
597 static u32 prm_clk_readl(void __iomem *reg)
598 {
599 	return omap2_clk_readl(&memmap_dummy_ck, reg);
600 }
601 
602 static void prm_clk_writel(u32 val, void __iomem *reg)
603 {
604 	omap2_clk_writel(val, &memmap_dummy_ck, reg);
605 }
606 
607 static struct ti_clk_ll_ops omap_clk_ll_ops = {
608 	.clk_readl = prm_clk_readl,
609 	.clk_writel = prm_clk_writel,
610 };
611 
612 int __init of_prcm_init(void)
613 {
614 	struct device_node *np;
615 	void __iomem *mem;
616 	int memmap_index = 0;
617 
618 	ti_clk_ll_ops = &omap_clk_ll_ops;
619 
620 	for_each_matching_node(np, omap_prcm_dt_match_table) {
621 		mem = of_iomap(np, 0);
622 		clk_memmaps[memmap_index] = mem;
623 		ti_dt_clk_init_provider(np, memmap_index);
624 		memmap_index++;
625 	}
626 
627 	return 0;
628 }
629 
630 static int __init prm_late_init(void)
631 {
632 	if (prm_ll_data->late_init)
633 		return prm_ll_data->late_init();
634 	return 0;
635 }
636 subsys_initcall(prm_late_init);
637