1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * OMAP WakeupGen Source file
4 *
5 * OMAP WakeupGen is the interrupt controller extension used along
6 * with ARM GIC to wake the CPU out from low power states on
7 * external interrupts. It is responsible for generating wakeup
8 * event from the incoming interrupts and enable bits. It is
9 * implemented in MPU always ON power domain. During normal operation,
10 * WakeupGen delivers external interrupts directly to the GIC.
11 *
12 * Copyright (C) 2011 Texas Instruments, Inc.
13 * Santosh Shilimkar <santosh.shilimkar@ti.com>
14 */
15
16 #include <linux/kernel.h>
17 #include <linux/init.h>
18 #include <linux/io.h>
19 #include <linux/irq.h>
20 #include <linux/irqchip.h>
21 #include <linux/irqdomain.h>
22 #include <linux/of_address.h>
23 #include <linux/platform_device.h>
24 #include <linux/cpu.h>
25 #include <linux/notifier.h>
26 #include <linux/cpu_pm.h>
27
28 #include "omap-wakeupgen.h"
29 #include "omap-secure.h"
30
31 #include "soc.h"
32 #include "omap4-sar-layout.h"
33 #include "common.h"
34 #include "pm.h"
35
36 #define AM43XX_NR_REG_BANKS 7
37 #define AM43XX_IRQS 224
38 #define MAX_NR_REG_BANKS AM43XX_NR_REG_BANKS
39 #define MAX_IRQS AM43XX_IRQS
40 #define DEFAULT_NR_REG_BANKS 5
41 #define DEFAULT_IRQS 160
42 #define WKG_MASK_ALL 0x00000000
43 #define WKG_UNMASK_ALL 0xffffffff
44 #define CPU_ENA_OFFSET 0x400
45 #define CPU0_ID 0x0
46 #define CPU1_ID 0x1
47 #define OMAP4_NR_BANKS 4
48 #define OMAP4_NR_IRQS 128
49
50 #define SYS_NIRQ1_EXT_SYS_IRQ_1 7
51 #define SYS_NIRQ2_EXT_SYS_IRQ_2 119
52
53 static void __iomem *wakeupgen_base;
54 static void __iomem *sar_base;
55 static DEFINE_RAW_SPINLOCK(wakeupgen_lock);
56 static unsigned int irq_target_cpu[MAX_IRQS];
57 static unsigned int irq_banks = DEFAULT_NR_REG_BANKS;
58 static unsigned int max_irqs = DEFAULT_IRQS;
59 static unsigned int omap_secure_apis;
60
61 #ifdef CONFIG_CPU_PM
62 static unsigned int wakeupgen_context[MAX_NR_REG_BANKS];
63 #endif
64
65 struct omap_wakeupgen_ops {
66 void (*save_context)(void);
67 void (*restore_context)(void);
68 };
69
70 static struct omap_wakeupgen_ops *wakeupgen_ops;
71
72 /*
73 * Static helper functions.
74 */
wakeupgen_readl(u8 idx,u32 cpu)75 static inline u32 wakeupgen_readl(u8 idx, u32 cpu)
76 {
77 return readl_relaxed(wakeupgen_base + OMAP_WKG_ENB_A_0 +
78 (cpu * CPU_ENA_OFFSET) + (idx * 4));
79 }
80
wakeupgen_writel(u32 val,u8 idx,u32 cpu)81 static inline void wakeupgen_writel(u32 val, u8 idx, u32 cpu)
82 {
83 writel_relaxed(val, wakeupgen_base + OMAP_WKG_ENB_A_0 +
84 (cpu * CPU_ENA_OFFSET) + (idx * 4));
85 }
86
sar_writel(u32 val,u32 offset,u8 idx)87 static inline void sar_writel(u32 val, u32 offset, u8 idx)
88 {
89 writel_relaxed(val, sar_base + offset + (idx * 4));
90 }
91
_wakeupgen_get_irq_info(u32 irq,u32 * bit_posn,u8 * reg_index)92 static inline int _wakeupgen_get_irq_info(u32 irq, u32 *bit_posn, u8 *reg_index)
93 {
94 /*
95 * Each WakeupGen register controls 32 interrupt.
96 * i.e. 1 bit per SPI IRQ
97 */
98 *reg_index = irq >> 5;
99 *bit_posn = irq %= 32;
100
101 return 0;
102 }
103
_wakeupgen_clear(unsigned int irq,unsigned int cpu)104 static void _wakeupgen_clear(unsigned int irq, unsigned int cpu)
105 {
106 u32 val, bit_number;
107 u8 i;
108
109 if (_wakeupgen_get_irq_info(irq, &bit_number, &i))
110 return;
111
112 val = wakeupgen_readl(i, cpu);
113 val &= ~BIT(bit_number);
114 wakeupgen_writel(val, i, cpu);
115 }
116
_wakeupgen_set(unsigned int irq,unsigned int cpu)117 static void _wakeupgen_set(unsigned int irq, unsigned int cpu)
118 {
119 u32 val, bit_number;
120 u8 i;
121
122 if (_wakeupgen_get_irq_info(irq, &bit_number, &i))
123 return;
124
125 val = wakeupgen_readl(i, cpu);
126 val |= BIT(bit_number);
127 wakeupgen_writel(val, i, cpu);
128 }
129
130 /*
131 * Architecture specific Mask extension
132 */
wakeupgen_mask(struct irq_data * d)133 static void wakeupgen_mask(struct irq_data *d)
134 {
135 unsigned long flags;
136
137 raw_spin_lock_irqsave(&wakeupgen_lock, flags);
138 _wakeupgen_clear(d->hwirq, irq_target_cpu[d->hwirq]);
139 raw_spin_unlock_irqrestore(&wakeupgen_lock, flags);
140 irq_chip_mask_parent(d);
141 }
142
143 /*
144 * Architecture specific Unmask extension
145 */
wakeupgen_unmask(struct irq_data * d)146 static void wakeupgen_unmask(struct irq_data *d)
147 {
148 unsigned long flags;
149
150 raw_spin_lock_irqsave(&wakeupgen_lock, flags);
151 _wakeupgen_set(d->hwirq, irq_target_cpu[d->hwirq]);
152 raw_spin_unlock_irqrestore(&wakeupgen_lock, flags);
153 irq_chip_unmask_parent(d);
154 }
155
156 /*
157 * The sys_nirq pins bypass peripheral modules and are wired directly
158 * to MPUSS wakeupgen. They get automatically inverted for GIC.
159 */
wakeupgen_irq_set_type(struct irq_data * d,unsigned int type)160 static int wakeupgen_irq_set_type(struct irq_data *d, unsigned int type)
161 {
162 bool inverted = false;
163
164 switch (type) {
165 case IRQ_TYPE_LEVEL_LOW:
166 type &= ~IRQ_TYPE_LEVEL_MASK;
167 type |= IRQ_TYPE_LEVEL_HIGH;
168 inverted = true;
169 break;
170 case IRQ_TYPE_EDGE_FALLING:
171 type &= ~IRQ_TYPE_EDGE_BOTH;
172 type |= IRQ_TYPE_EDGE_RISING;
173 inverted = true;
174 break;
175 default:
176 break;
177 }
178
179 if (inverted && d->hwirq != SYS_NIRQ1_EXT_SYS_IRQ_1 &&
180 d->hwirq != SYS_NIRQ2_EXT_SYS_IRQ_2)
181 pr_warn("wakeupgen: irq%li polarity inverted in dts\n",
182 d->hwirq);
183
184 return irq_chip_set_type_parent(d, type);
185 }
186
187 #ifdef CONFIG_HOTPLUG_CPU
188 static DEFINE_PER_CPU(u32 [MAX_NR_REG_BANKS], irqmasks);
189
_wakeupgen_save_masks(unsigned int cpu)190 static void _wakeupgen_save_masks(unsigned int cpu)
191 {
192 u8 i;
193
194 for (i = 0; i < irq_banks; i++)
195 per_cpu(irqmasks, cpu)[i] = wakeupgen_readl(i, cpu);
196 }
197
_wakeupgen_restore_masks(unsigned int cpu)198 static void _wakeupgen_restore_masks(unsigned int cpu)
199 {
200 u8 i;
201
202 for (i = 0; i < irq_banks; i++)
203 wakeupgen_writel(per_cpu(irqmasks, cpu)[i], i, cpu);
204 }
205
_wakeupgen_set_all(unsigned int cpu,unsigned int reg)206 static void _wakeupgen_set_all(unsigned int cpu, unsigned int reg)
207 {
208 u8 i;
209
210 for (i = 0; i < irq_banks; i++)
211 wakeupgen_writel(reg, i, cpu);
212 }
213
214 /*
215 * Mask or unmask all interrupts on given CPU.
216 * 0 = Mask all interrupts on the 'cpu'
217 * 1 = Unmask all interrupts on the 'cpu'
218 * Ensure that the initial mask is maintained. This is faster than
219 * iterating through GIC registers to arrive at the correct masks.
220 */
wakeupgen_irqmask_all(unsigned int cpu,unsigned int set)221 static void wakeupgen_irqmask_all(unsigned int cpu, unsigned int set)
222 {
223 unsigned long flags;
224
225 raw_spin_lock_irqsave(&wakeupgen_lock, flags);
226 if (set) {
227 _wakeupgen_save_masks(cpu);
228 _wakeupgen_set_all(cpu, WKG_MASK_ALL);
229 } else {
230 _wakeupgen_set_all(cpu, WKG_UNMASK_ALL);
231 _wakeupgen_restore_masks(cpu);
232 }
233 raw_spin_unlock_irqrestore(&wakeupgen_lock, flags);
234 }
235 #endif
236
237 #ifdef CONFIG_CPU_PM
omap4_irq_save_context(void)238 static inline void omap4_irq_save_context(void)
239 {
240 u32 i, val;
241
242 if (omap_rev() == OMAP4430_REV_ES1_0)
243 return;
244
245 for (i = 0; i < irq_banks; i++) {
246 /* Save the CPUx interrupt mask for IRQ 0 to 127 */
247 val = wakeupgen_readl(i, 0);
248 sar_writel(val, WAKEUPGENENB_OFFSET_CPU0, i);
249 val = wakeupgen_readl(i, 1);
250 sar_writel(val, WAKEUPGENENB_OFFSET_CPU1, i);
251
252 /*
253 * Disable the secure interrupts for CPUx. The restore
254 * code blindly restores secure and non-secure interrupt
255 * masks from SAR RAM. Secure interrupts are not suppose
256 * to be enabled from HLOS. So overwrite the SAR location
257 * so that the secure interrupt remains disabled.
258 */
259 sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU0, i);
260 sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU1, i);
261 }
262
263 /* Save AuxBoot* registers */
264 val = readl_relaxed(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
265 writel_relaxed(val, sar_base + AUXCOREBOOT0_OFFSET);
266 val = readl_relaxed(wakeupgen_base + OMAP_AUX_CORE_BOOT_1);
267 writel_relaxed(val, sar_base + AUXCOREBOOT1_OFFSET);
268
269 /* Save SyncReq generation logic */
270 val = readl_relaxed(wakeupgen_base + OMAP_PTMSYNCREQ_MASK);
271 writel_relaxed(val, sar_base + PTMSYNCREQ_MASK_OFFSET);
272 val = readl_relaxed(wakeupgen_base + OMAP_PTMSYNCREQ_EN);
273 writel_relaxed(val, sar_base + PTMSYNCREQ_EN_OFFSET);
274
275 /* Set the Backup Bit Mask status */
276 val = readl_relaxed(sar_base + SAR_BACKUP_STATUS_OFFSET);
277 val |= SAR_BACKUP_STATUS_WAKEUPGEN;
278 writel_relaxed(val, sar_base + SAR_BACKUP_STATUS_OFFSET);
279
280 }
281
omap5_irq_save_context(void)282 static inline void omap5_irq_save_context(void)
283 {
284 u32 i, val;
285
286 for (i = 0; i < irq_banks; i++) {
287 /* Save the CPUx interrupt mask for IRQ 0 to 159 */
288 val = wakeupgen_readl(i, 0);
289 sar_writel(val, OMAP5_WAKEUPGENENB_OFFSET_CPU0, i);
290 val = wakeupgen_readl(i, 1);
291 sar_writel(val, OMAP5_WAKEUPGENENB_OFFSET_CPU1, i);
292 sar_writel(0x0, OMAP5_WAKEUPGENENB_SECURE_OFFSET_CPU0, i);
293 sar_writel(0x0, OMAP5_WAKEUPGENENB_SECURE_OFFSET_CPU1, i);
294 }
295
296 /* Save AuxBoot* registers */
297 val = readl_relaxed(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
298 writel_relaxed(val, sar_base + OMAP5_AUXCOREBOOT0_OFFSET);
299 val = readl_relaxed(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
300 writel_relaxed(val, sar_base + OMAP5_AUXCOREBOOT1_OFFSET);
301
302 /* Set the Backup Bit Mask status */
303 val = readl_relaxed(sar_base + OMAP5_SAR_BACKUP_STATUS_OFFSET);
304 val |= SAR_BACKUP_STATUS_WAKEUPGEN;
305 writel_relaxed(val, sar_base + OMAP5_SAR_BACKUP_STATUS_OFFSET);
306
307 }
308
am43xx_irq_save_context(void)309 static inline void am43xx_irq_save_context(void)
310 {
311 u32 i;
312
313 for (i = 0; i < irq_banks; i++) {
314 wakeupgen_context[i] = wakeupgen_readl(i, 0);
315 wakeupgen_writel(0, i, CPU0_ID);
316 }
317 }
318
319 /*
320 * Save WakeupGen interrupt context in SAR BANK3. Restore is done by
321 * ROM code. WakeupGen IP is integrated along with GIC to manage the
322 * interrupt wakeups from CPU low power states. It manages
323 * masking/unmasking of Shared peripheral interrupts(SPI). So the
324 * interrupt enable/disable control should be in sync and consistent
325 * at WakeupGen and GIC so that interrupts are not lost.
326 */
irq_save_context(void)327 static void irq_save_context(void)
328 {
329 /* DRA7 has no SAR to save */
330 if (soc_is_dra7xx())
331 return;
332
333 if (wakeupgen_ops && wakeupgen_ops->save_context)
334 wakeupgen_ops->save_context();
335 }
336
337 /*
338 * Clear WakeupGen SAR backup status.
339 */
irq_sar_clear(void)340 static void irq_sar_clear(void)
341 {
342 u32 val;
343 u32 offset = SAR_BACKUP_STATUS_OFFSET;
344 /* DRA7 has no SAR to save */
345 if (soc_is_dra7xx())
346 return;
347
348 if (soc_is_omap54xx())
349 offset = OMAP5_SAR_BACKUP_STATUS_OFFSET;
350
351 val = readl_relaxed(sar_base + offset);
352 val &= ~SAR_BACKUP_STATUS_WAKEUPGEN;
353 writel_relaxed(val, sar_base + offset);
354 }
355
am43xx_irq_restore_context(void)356 static void am43xx_irq_restore_context(void)
357 {
358 u32 i;
359
360 for (i = 0; i < irq_banks; i++)
361 wakeupgen_writel(wakeupgen_context[i], i, CPU0_ID);
362 }
363
irq_restore_context(void)364 static void irq_restore_context(void)
365 {
366 if (wakeupgen_ops && wakeupgen_ops->restore_context)
367 wakeupgen_ops->restore_context();
368 }
369
370 /*
371 * Save GIC and Wakeupgen interrupt context using secure API
372 * for HS/EMU devices.
373 */
irq_save_secure_context(void)374 static void irq_save_secure_context(void)
375 {
376 u32 ret;
377
378 ret = omap_secure_dispatcher(OMAP4_HAL_SAVEGIC_INDEX,
379 FLAG_START_CRITICAL,
380 0, 0, 0, 0, 0);
381 if (ret != API_HAL_RET_VALUE_OK)
382 pr_err("GIC and Wakeupgen context save failed\n");
383 }
384
385 /* Define ops for context save and restore for each SoC */
386 static struct omap_wakeupgen_ops omap4_wakeupgen_ops = {
387 .save_context = omap4_irq_save_context,
388 .restore_context = irq_sar_clear,
389 };
390
391 static struct omap_wakeupgen_ops omap5_wakeupgen_ops = {
392 .save_context = omap5_irq_save_context,
393 .restore_context = irq_sar_clear,
394 };
395
396 static struct omap_wakeupgen_ops am43xx_wakeupgen_ops = {
397 .save_context = am43xx_irq_save_context,
398 .restore_context = am43xx_irq_restore_context,
399 };
400 #else
401 static struct omap_wakeupgen_ops omap4_wakeupgen_ops = {};
402 static struct omap_wakeupgen_ops omap5_wakeupgen_ops = {};
403 static struct omap_wakeupgen_ops am43xx_wakeupgen_ops = {};
404 #endif
405
406 #ifdef CONFIG_HOTPLUG_CPU
omap_wakeupgen_cpu_online(unsigned int cpu)407 static int omap_wakeupgen_cpu_online(unsigned int cpu)
408 {
409 wakeupgen_irqmask_all(cpu, 0);
410 return 0;
411 }
412
omap_wakeupgen_cpu_dead(unsigned int cpu)413 static int omap_wakeupgen_cpu_dead(unsigned int cpu)
414 {
415 wakeupgen_irqmask_all(cpu, 1);
416 return 0;
417 }
418
irq_hotplug_init(void)419 static void __init irq_hotplug_init(void)
420 {
421 cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "arm/omap-wake:online",
422 omap_wakeupgen_cpu_online, NULL);
423 cpuhp_setup_state_nocalls(CPUHP_ARM_OMAP_WAKE_DEAD,
424 "arm/omap-wake:dead", NULL,
425 omap_wakeupgen_cpu_dead);
426 }
427 #else
irq_hotplug_init(void)428 static void __init irq_hotplug_init(void)
429 {}
430 #endif
431
432 #ifdef CONFIG_CPU_PM
irq_notifier(struct notifier_block * self,unsigned long cmd,void * v)433 static int irq_notifier(struct notifier_block *self, unsigned long cmd, void *v)
434 {
435 switch (cmd) {
436 case CPU_CLUSTER_PM_ENTER:
437 if (omap_type() == OMAP2_DEVICE_TYPE_GP || soc_is_am43xx())
438 irq_save_context();
439 else
440 irq_save_secure_context();
441 break;
442 case CPU_CLUSTER_PM_EXIT:
443 if (omap_type() == OMAP2_DEVICE_TYPE_GP || soc_is_am43xx())
444 irq_restore_context();
445 break;
446 }
447 return NOTIFY_OK;
448 }
449
450 static struct notifier_block irq_notifier_block = {
451 .notifier_call = irq_notifier,
452 };
453
irq_pm_init(void)454 static void __init irq_pm_init(void)
455 {
456 /* FIXME: Remove this when MPU OSWR support is added */
457 if (!IS_PM44XX_ERRATUM(PM_OMAP4_CPU_OSWR_DISABLE))
458 cpu_pm_register_notifier(&irq_notifier_block);
459 }
460 #else
irq_pm_init(void)461 static void __init irq_pm_init(void)
462 {}
463 #endif
464
omap_get_wakeupgen_base(void)465 void __iomem *omap_get_wakeupgen_base(void)
466 {
467 return wakeupgen_base;
468 }
469
omap_secure_apis_support(void)470 int omap_secure_apis_support(void)
471 {
472 return omap_secure_apis;
473 }
474
475 static struct irq_chip wakeupgen_chip = {
476 .name = "WUGEN",
477 .irq_eoi = irq_chip_eoi_parent,
478 .irq_mask = wakeupgen_mask,
479 .irq_unmask = wakeupgen_unmask,
480 .irq_retrigger = irq_chip_retrigger_hierarchy,
481 .irq_set_type = wakeupgen_irq_set_type,
482 .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
483 #ifdef CONFIG_SMP
484 .irq_set_affinity = irq_chip_set_affinity_parent,
485 #endif
486 };
487
wakeupgen_domain_translate(struct irq_domain * d,struct irq_fwspec * fwspec,unsigned long * hwirq,unsigned int * type)488 static int wakeupgen_domain_translate(struct irq_domain *d,
489 struct irq_fwspec *fwspec,
490 unsigned long *hwirq,
491 unsigned int *type)
492 {
493 if (is_of_node(fwspec->fwnode)) {
494 if (fwspec->param_count != 3)
495 return -EINVAL;
496
497 /* No PPI should point to this domain */
498 if (fwspec->param[0] != 0)
499 return -EINVAL;
500
501 *hwirq = fwspec->param[1];
502 *type = fwspec->param[2];
503 return 0;
504 }
505
506 return -EINVAL;
507 }
508
wakeupgen_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * data)509 static int wakeupgen_domain_alloc(struct irq_domain *domain,
510 unsigned int virq,
511 unsigned int nr_irqs, void *data)
512 {
513 struct irq_fwspec *fwspec = data;
514 struct irq_fwspec parent_fwspec;
515 irq_hw_number_t hwirq;
516 int i;
517
518 if (fwspec->param_count != 3)
519 return -EINVAL; /* Not GIC compliant */
520 if (fwspec->param[0] != 0)
521 return -EINVAL; /* No PPI should point to this domain */
522
523 hwirq = fwspec->param[1];
524 if (hwirq >= MAX_IRQS)
525 return -EINVAL; /* Can't deal with this */
526
527 for (i = 0; i < nr_irqs; i++)
528 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
529 &wakeupgen_chip, NULL);
530
531 parent_fwspec = *fwspec;
532 parent_fwspec.fwnode = domain->parent->fwnode;
533 return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
534 &parent_fwspec);
535 }
536
537 static const struct irq_domain_ops wakeupgen_domain_ops = {
538 .translate = wakeupgen_domain_translate,
539 .alloc = wakeupgen_domain_alloc,
540 .free = irq_domain_free_irqs_common,
541 };
542
543 /*
544 * Initialise the wakeupgen module.
545 */
wakeupgen_init(struct device_node * node,struct device_node * parent)546 static int __init wakeupgen_init(struct device_node *node,
547 struct device_node *parent)
548 {
549 struct irq_domain *parent_domain, *domain;
550 int i;
551 unsigned int boot_cpu = smp_processor_id();
552 u32 val;
553
554 if (!parent) {
555 pr_err("%pOF: no parent, giving up\n", node);
556 return -ENODEV;
557 }
558
559 parent_domain = irq_find_host(parent);
560 if (!parent_domain) {
561 pr_err("%pOF: unable to obtain parent domain\n", node);
562 return -ENXIO;
563 }
564 /* Not supported on OMAP4 ES1.0 silicon */
565 if (omap_rev() == OMAP4430_REV_ES1_0) {
566 WARN(1, "WakeupGen: Not supported on OMAP4430 ES1.0\n");
567 return -EPERM;
568 }
569
570 /* Static mapping, never released */
571 wakeupgen_base = of_iomap(node, 0);
572 if (WARN_ON(!wakeupgen_base))
573 return -ENOMEM;
574
575 if (cpu_is_omap44xx()) {
576 irq_banks = OMAP4_NR_BANKS;
577 max_irqs = OMAP4_NR_IRQS;
578 omap_secure_apis = 1;
579 wakeupgen_ops = &omap4_wakeupgen_ops;
580 } else if (soc_is_omap54xx()) {
581 wakeupgen_ops = &omap5_wakeupgen_ops;
582 } else if (soc_is_am43xx()) {
583 irq_banks = AM43XX_NR_REG_BANKS;
584 max_irqs = AM43XX_IRQS;
585 wakeupgen_ops = &am43xx_wakeupgen_ops;
586 }
587
588 domain = irq_domain_add_hierarchy(parent_domain, 0, max_irqs,
589 node, &wakeupgen_domain_ops,
590 NULL);
591 if (!domain) {
592 iounmap(wakeupgen_base);
593 return -ENOMEM;
594 }
595
596 /* Clear all IRQ bitmasks at wakeupGen level */
597 for (i = 0; i < irq_banks; i++) {
598 wakeupgen_writel(0, i, CPU0_ID);
599 if (!soc_is_am43xx())
600 wakeupgen_writel(0, i, CPU1_ID);
601 }
602
603 /*
604 * FIXME: Add support to set_smp_affinity() once the core
605 * GIC code has necessary hooks in place.
606 */
607
608 /* Associate all the IRQs to boot CPU like GIC init does. */
609 for (i = 0; i < max_irqs; i++)
610 irq_target_cpu[i] = boot_cpu;
611
612 /*
613 * Enables OMAP5 ES2 PM Mode using ES2_PM_MODE in AMBA_IF_MODE
614 * 0x0: ES1 behavior, CPU cores would enter and exit OFF mode together.
615 * 0x1: ES2 behavior, CPU cores are allowed to enter/exit OFF mode
616 * independently.
617 * This needs to be set one time thanks to always ON domain.
618 *
619 * We do not support ES1 behavior anymore. OMAP5 is assumed to be
620 * ES2.0, and the same is applicable for DRA7.
621 */
622 if (soc_is_omap54xx() || soc_is_dra7xx()) {
623 val = __raw_readl(wakeupgen_base + OMAP_AMBA_IF_MODE);
624 val |= BIT(5);
625 omap_smc1(OMAP5_MON_AMBA_IF_INDEX, val);
626 }
627
628 irq_hotplug_init();
629 irq_pm_init();
630
631 sar_base = omap4_get_sar_ram_base();
632
633 return 0;
634 }
635 IRQCHIP_DECLARE(ti_wakeupgen, "ti,omap4-wugen-mpu", wakeupgen_init);
636