Lines Matching +full:per +full:- +full:cpu +full:- +full:cluster
6 * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
10 #define pr_fmt(fmt) "irq-mips-gic: " fmt
26 #include <asm/mips-cps.h>
30 #include <dt-bindings/interrupt-controller/mips-gic.h>
35 /* Add 2 to convert GIC CPU pin to core interrupt */
44 #define GIC_HWIRQ_TO_LOCAL(x) ((x) - GIC_LOCAL_HWIRQ_BASE)
47 #define GIC_HWIRQ_TO_SHARED(x) ((x) - GIC_SHARED_HWIRQ_BASE)
71 unsigned int cpu; in __gic_with_next_online_cpu() local
73 /* Discover the next online CPU */ in __gic_with_next_online_cpu()
74 cpu = cpumask_next(prev, cpu_online_mask); in __gic_with_next_online_cpu()
77 if (cpu >= nr_cpu_ids) in __gic_with_next_online_cpu()
78 return cpu; in __gic_with_next_online_cpu()
81 * Move the access lock to the next CPU's GIC local register block. in __gic_with_next_online_cpu()
86 write_gic_vl_other(mips_cm_vp_id(cpu)); in __gic_with_next_online_cpu()
88 return cpu; in __gic_with_next_online_cpu()
98 * for_each_online_cpu_gic() - Iterate over online CPUs, access local registers
99 * @cpu: An integer variable to hold the current CPU number
107 #define for_each_online_cpu_gic(cpu, gic_lock) \ argument
109 for ((cpu) = __gic_with_next_online_cpu(-1); \
110 (cpu) < nr_cpu_ids; \
112 (cpu) = __gic_with_next_online_cpu(cpu))
115 * gic_irq_lock_cluster() - Lock redirect block access to IRQ's cluster
119 * within the remote cluster that the IRQ corresponding to @d is affine to,
122 * If @d is affine to the local cluster then no locking is performed and this
130 * caller should trivially access GIC registers in the local cluster.
136 unsigned int cpu, cl; in gic_irq_lock_cluster() local
138 cpu = cpumask_first(irq_data_get_effective_affinity_mask(d)); in gic_irq_lock_cluster()
139 BUG_ON(cpu >= NR_CPUS); in gic_irq_lock_cluster()
141 cl = cpu_cluster(&cpu_data[cpu]); in gic_irq_lock_cluster()
186 irq -= GIC_PIN_TO_VEC_OFFSET; in gic_bind_eic_interrupt()
192 static void gic_send_ipi(struct irq_data *d, unsigned int cpu) in gic_send_ipi() argument
217 return -1; in gic_get_c0_perfcount_int()
229 return -1; in gic_get_c0_fdc_int()
243 /* Get per-cpu bitmaps */ in gic_handle_shared_int()
267 unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq); in gic_mask_irq()
281 unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq); in gic_unmask_irq()
282 unsigned int cpu; in gic_unmask_irq() local
292 cpu = cpumask_first(irq_data_get_effective_affinity_mask(d)); in gic_unmask_irq()
293 set_bit(intr, per_cpu_ptr(pcpu_masks, cpu)); in gic_unmask_irq()
298 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq); in gic_ack_irq()
313 irq = GIC_HWIRQ_TO_SHARED(d->hwirq); in gic_set_type()
371 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq); in gic_set_affinity()
372 unsigned int cpu, cl, old_cpu, old_cl; in gic_set_affinity() local
377 * ie. CPU in Linux parlance, at a time. Therefore we always route to in gic_set_affinity()
378 * the first forced or online CPU in the mask. in gic_set_affinity()
381 cpu = cpumask_first(cpumask); in gic_set_affinity()
383 cpu = cpumask_first_and(cpumask, cpu_online_mask); in gic_set_affinity()
385 if (cpu >= NR_CPUS) in gic_set_affinity()
386 return -EINVAL; in gic_set_affinity()
390 cl = cpu_cluster(&cpu_data[cpu]); in gic_set_affinity()
396 * interrupt to any VP(E) in the old cluster. in gic_set_affinity()
408 * Update effective affinity - after this gic_irq_lock_cluster() will in gic_set_affinity()
409 * begin operating on the new cluster. in gic_set_affinity()
411 irq_data_update_effective_affinity(d, cpumask_of(cpu)); in gic_set_affinity()
415 * trigger type in the new cluster. in gic_set_affinity()
424 write_gic_redir_map_vp(irq, BIT(mips_cm_vp_id(cpu))); in gic_set_affinity()
429 set_bit(irq, per_cpu_ptr(pcpu_masks, cpu)); in gic_set_affinity()
434 write_gic_map_vp(irq, BIT(mips_cm_vp_id(cpu))); in gic_set_affinity()
439 set_bit(irq, per_cpu_ptr(pcpu_masks, cpu)); in gic_set_affinity()
492 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); in gic_mask_local_irq()
499 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); in gic_unmask_local_irq()
513 int intr, cpu; in gic_mask_local_irq_all_vpes() local
518 intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); in gic_mask_local_irq_all_vpes()
520 cd->mask = false; in gic_mask_local_irq_all_vpes()
522 for_each_online_cpu_gic(cpu, &gic_lock) in gic_mask_local_irq_all_vpes()
529 int intr, cpu; in gic_unmask_local_irq_all_vpes() local
534 intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); in gic_unmask_local_irq_all_vpes()
536 cd->mask = true; in gic_unmask_local_irq_all_vpes()
538 for_each_online_cpu_gic(cpu, &gic_lock) in gic_unmask_local_irq_all_vpes()
561 write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map); in gic_all_vpes_irq_cpu_online()
562 if (cd->mask) in gic_all_vpes_irq_cpu_online()
588 irq_hw_number_t hw, unsigned int cpu) in gic_shared_irq_domain_map() argument
595 irq_data_update_effective_affinity(data, cpumask_of(cpu)); in gic_shared_irq_domain_map()
603 write_gic_redir_map_vp(intr, BIT(mips_cm_vp_id(cpu))); in gic_shared_irq_domain_map()
607 write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu))); in gic_shared_irq_domain_map()
621 return -EINVAL; in gic_irq_domain_xlate()
628 return -EINVAL; in gic_irq_domain_xlate()
639 int err, cpu; in gic_irq_domain_map() local
646 return -EBUSY; in gic_irq_domain_map()
663 * If adding support for more per-cpu interrupts, keep the in gic_irq_domain_map()
676 cd->map = map; in gic_irq_domain_map()
699 return -EPERM; in gic_irq_domain_map()
702 for_each_online_cpu_gic(cpu, &gic_lock) in gic_irq_domain_map()
715 if (fwspec->param[0] == GIC_SHARED) in gic_irq_domain_alloc()
716 hwirq = GIC_SHARED_TO_HWIRQ(fwspec->param[1]); in gic_irq_domain_alloc()
718 hwirq = GIC_LOCAL_TO_HWIRQ(fwspec->param[1]); in gic_irq_domain_alloc()
757 int cpu, ret, i; in gic_ipi_domain_alloc() local
761 return -ENOMEM; in gic_ipi_domain_alloc()
766 return -EBUSY; in gic_ipi_domain_alloc()
770 /* map the hwirq for each cpu consecutively */ in gic_ipi_domain_alloc()
772 for_each_cpu(cpu, ipimask) { in gic_ipi_domain_alloc()
781 ret = irq_domain_set_hwirq_and_chip(d->parent, virq + i, hwirq, in gic_ipi_domain_alloc()
787 /* Set affinity to cpu. */ in gic_ipi_domain_alloc()
789 cpumask_of(cpu)); in gic_ipi_domain_alloc()
794 ret = gic_shared_irq_domain_map(d, virq + i, hwirq, cpu); in gic_ipi_domain_alloc()
828 is_ipi = d->bus_token == bus_token; in gic_ipi_domain_match()
829 return (!node || to_of_node(d->fwnode) == node) && is_ipi; in gic_ipi_domain_match()
854 return -ENXIO; in gic_register_ipi_domain()
860 !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) { in gic_register_ipi_domain()
864 * Reserve 2 interrupts per possible CPU/VP for use as IPIs, in gic_register_ipi_domain()
868 bitmap_set(ipi_resrv, gic_shared_intrs - num_ipis, num_ipis); in gic_register_ipi_domain()
885 static int gic_cpu_startup(unsigned int cpu) in gic_cpu_startup() argument
910 /* Find the first available CPU vector. */ in gic_of_init()
913 while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors", in gic_of_init()
919 pr_err("No CPU vectors available\n"); in gic_of_init()
920 return -ENODEV; in gic_of_init()
926 * in the device-tree. in gic_of_init()
936 return -ENODEV; in gic_of_init()
952 return -ENOMEM; in gic_of_init()
965 gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET; in gic_of_init()
976 return -ENXIO; in gic_of_init()
986 * Initialise each cluster's GIC shared registers to sane default in gic_of_init()
989 * to gic_cpu_startup for each cpu. in gic_of_init()
1009 pr_warn("No CPU cores on the cluster %d skip it\n", cl); in gic_of_init()