Lines Matching +full:4 +full:- +full:cpu
1 // SPDX-License-Identifier: GPL-2.0-only
10 * o There is one CPU Interface per CPU, which sends interrupts sent
12 * associated CPU. The base address of the CPU interface is usually
14 * on the CPU it is accessed from.
16 * Note that IRQs 0-31 are special - they are local to each CPU.
18 * registers are banked per-cpu for these sources.
27 #include <linux/cpu.h>
42 #include <linux/irqchip/arm-gic.h>
50 #include "irq-gic-common.h"
80 u32 saved_spi_target[DIV_ROUND_UP(1020, 4)];
114 * The GIC mapping of CPU interfaces does not necessarily match
115 * the logical CPU numbering. Let's use a mapping as returned
140 return raw_cpu_read(*base->percpu_base); in __get_base()
142 return base->common_base; in __get_base()
145 #define gic_data_dist_base(d) __get_base(&(d)->dist_base)
146 #define gic_data_cpu_base(d) __get_base(&(d)->cpu_base)
148 #define gic_data_dist_base(d) ((d)->dist_base.common_base)
149 #define gic_data_cpu_base(d) ((d)->cpu_base.common_base)
183 writel_relaxed(mask, gic_dist_base(d) + offset + (irqd_to_hwirq(d) / 32) * 4); in gic_poke_irq()
190 return !!(readl_relaxed(gic_dist_base(d) + offset + (irqd_to_hwirq(d) / 32) * 4) & mask); in gic_peek_irq()
261 return -EINVAL; in gic_irq_set_irqchip_state()
285 return -EINVAL; in gic_irq_get_irqchip_state()
299 return type != IRQ_TYPE_EDGE_RISING ? -EINVAL : 0; in gic_set_type()
304 return -EINVAL; in gic_set_type()
309 pr_warn("GIC: PPI%ld is secure or misconfigured\n", gicirq - 16); in gic_set_type()
320 return -EINVAL; in gic_irq_set_vcpu_affinity()
352 * Ensure any shared data written by the CPU sending the IPI in gic_handle_irq()
361 * The GIC encodes the source CPU in GICC_IAR, in gic_handle_irq()
370 generic_handle_domain_irq(gic->domain, irqnr); in gic_handle_irq()
391 ret = generic_handle_domain_irq(chip_data->domain, gic_irq); in gic_handle_cascade_irq()
402 if (gic->domain->pm_dev) in gic_irq_print_chip()
403 seq_printf(p, gic->domain->pm_dev->of_node->name); in gic_irq_print_chip()
405 seq_printf(p, "GIC-%d", (int)(gic - &gic_data[0])); in gic_irq_print_chip()
420 for (i = mask = 0; i < 32; i += 4) { in gic_get_cpumask()
429 pr_crit("GIC CPU mask not found - kernel will fail to boot.\n"); in gic_get_cpumask()
451 for (i = 0; i < 4; i++) in gic_cpu_if_up()
452 writel_relaxed(0, cpu_base + GIC_CPU_ACTIVEPRIO + i * 4); in gic_cpu_if_up()
468 unsigned int gic_irqs = gic->gic_irqs; in gic_dist_init()
474 * Set all global interrupts to this CPU only. in gic_dist_init()
479 for (i = 32; i < gic_irqs; i += 4) in gic_dist_init()
480 writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); in gic_dist_init()
491 unsigned int cpu_mask, cpu = smp_processor_id(); in gic_cpu_init() local
495 * Setting up the CPU map is only relevant for the primary GIC in gic_cpu_init()
497 * with the CPU(s). in gic_cpu_init()
501 * Get what the GIC says our CPU mask is. in gic_cpu_init()
503 if (WARN_ON(cpu >= NR_GIC_CPU_IF)) in gic_cpu_init()
504 return -EINVAL; in gic_cpu_init()
508 gic_cpu_map[cpu] = cpu_mask; in gic_cpu_init()
515 if (i != cpu) in gic_cpu_init()
533 return -EINVAL; in gic_cpu_if_down()
548 * platform-specific wakeup source must be enabled.
559 gic_irqs = gic->gic_irqs; in gic_dist_save()
566 gic->saved_spi_conf[i] = in gic_dist_save()
567 readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); in gic_dist_save()
569 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) in gic_dist_save()
570 gic->saved_spi_target[i] = in gic_dist_save()
571 readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4); in gic_dist_save()
574 gic->saved_spi_enable[i] = in gic_dist_save()
575 readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); in gic_dist_save()
578 gic->saved_spi_active[i] = in gic_dist_save()
579 readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4); in gic_dist_save()
587 * the GIC and need to be handled by the platform-specific wakeup source.
598 gic_irqs = gic->gic_irqs; in gic_dist_restore()
607 writel_relaxed(gic->saved_spi_conf[i], in gic_dist_restore()
608 dist_base + GIC_DIST_CONFIG + i * 4); in gic_dist_restore()
610 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) in gic_dist_restore()
612 dist_base + GIC_DIST_PRI + i * 4); in gic_dist_restore()
614 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) in gic_dist_restore()
615 writel_relaxed(gic->saved_spi_target[i], in gic_dist_restore()
616 dist_base + GIC_DIST_TARGET + i * 4); in gic_dist_restore()
620 dist_base + GIC_DIST_ENABLE_CLEAR + i * 4); in gic_dist_restore()
621 writel_relaxed(gic->saved_spi_enable[i], in gic_dist_restore()
622 dist_base + GIC_DIST_ENABLE_SET + i * 4); in gic_dist_restore()
627 dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4); in gic_dist_restore()
628 writel_relaxed(gic->saved_spi_active[i], in gic_dist_restore()
629 dist_base + GIC_DIST_ACTIVE_SET + i * 4); in gic_dist_restore()
651 ptr = raw_cpu_ptr(gic->saved_ppi_enable); in gic_cpu_save()
653 ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); in gic_cpu_save()
655 ptr = raw_cpu_ptr(gic->saved_ppi_active); in gic_cpu_save()
657 ptr[i] = readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4); in gic_cpu_save()
659 ptr = raw_cpu_ptr(gic->saved_ppi_conf); in gic_cpu_save()
661 ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); in gic_cpu_save()
681 ptr = raw_cpu_ptr(gic->saved_ppi_enable); in gic_cpu_restore()
684 dist_base + GIC_DIST_ENABLE_CLEAR + i * 4); in gic_cpu_restore()
685 writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4); in gic_cpu_restore()
688 ptr = raw_cpu_ptr(gic->saved_ppi_active); in gic_cpu_restore()
691 dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4); in gic_cpu_restore()
692 writel_relaxed(ptr[i], dist_base + GIC_DIST_ACTIVE_SET + i * 4); in gic_cpu_restore()
695 ptr = raw_cpu_ptr(gic->saved_ppi_conf); in gic_cpu_restore()
697 writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4); in gic_cpu_restore()
699 for (i = 0; i < DIV_ROUND_UP(32, 4); i++) in gic_cpu_restore()
701 dist_base + GIC_DIST_PRI + i * 4); in gic_cpu_restore()
739 gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4, in gic_pm_init()
741 if (WARN_ON(!gic->saved_ppi_enable)) in gic_pm_init()
742 return -ENOMEM; in gic_pm_init()
744 gic->saved_ppi_active = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4, in gic_pm_init()
746 if (WARN_ON(!gic->saved_ppi_active)) in gic_pm_init()
749 gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4, in gic_pm_init()
751 if (WARN_ON(!gic->saved_ppi_conf)) in gic_pm_init()
760 free_percpu(gic->saved_ppi_active); in gic_pm_init()
762 free_percpu(gic->saved_ppi_enable); in gic_pm_init()
764 return -ENOMEM; in gic_pm_init()
784 addr -= offset; in rmw_writeb()
798 unsigned int cpu; in gic_set_affinity() local
801 return -EINVAL; in gic_set_affinity()
804 cpu = cpumask_any_and(mask_val, cpu_online_mask); in gic_set_affinity()
806 cpu = cpumask_first(mask_val); in gic_set_affinity()
808 if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) in gic_set_affinity()
809 return -EINVAL; in gic_set_affinity()
812 rmw_writeb(gic_cpu_map[cpu], reg); in gic_set_affinity()
814 writeb_relaxed(gic_cpu_map[cpu], reg); in gic_set_affinity()
815 irq_data_update_effective_affinity(d, cpumask_of(cpu)); in gic_set_affinity()
822 int cpu; in gic_ipi_send_mask() local
826 /* Only one CPU? let's do a self-IPI... */ in gic_ipi_send_mask()
827 writel_relaxed(2 << 24 | d->hwirq, in gic_ipi_send_mask()
834 /* Convert our logical CPU mask into a physical one. */ in gic_ipi_send_mask()
835 for_each_cpu(cpu, mask) in gic_ipi_send_mask()
836 map |= gic_cpu_map[cpu]; in gic_ipi_send_mask()
845 writel_relaxed(map << 16 | d->hwirq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT); in gic_ipi_send_mask()
850 static int gic_starting_cpu(unsigned int cpu) in gic_starting_cpu() argument
859 .fwnode = gic_data[0].domain->fwnode, in gic_smp_init()
915 * gic_send_sgi - send a SGI directly to given CPU interface number
917 * cpu_id: the ID for the destination CPU interface
929 * gic_get_cpu_id - get the CPU interface ID for the specified CPU
931 * @cpu: the logical CPU number to get the GIC ID for.
933 * Return the CPU interface ID for the given logical CPU number,
934 * or -1 if the CPU number is too large or the interface ID is
937 int gic_get_cpu_id(unsigned int cpu) in gic_get_cpu_id() argument
941 if (cpu >= NR_GIC_CPU_IF) in gic_get_cpu_id()
942 return -1; in gic_get_cpu_id()
943 cpu_bit = gic_cpu_map[cpu]; in gic_get_cpu_id()
944 if (cpu_bit & (cpu_bit - 1)) in gic_get_cpu_id()
945 return -1; in gic_get_cpu_id()
950 * gic_migrate_target - migrate IRQs to another CPU interface
952 * @new_cpu_id: the CPU target ID to migrate IRQs to
954 * Migrate all peripheral interrupts with a target matching the current CPU
955 * to the interface corresponding to @new_cpu_id. The CPU interface mapping
956 * is also updated. Targets to other CPU interfaces are unchanged.
963 int i, ror_val, cpu = smp_processor_id(); in gic_migrate_target() local
973 cur_cpu_id = __ffs(gic_cpu_map[cpu]); in gic_migrate_target()
975 ror_val = (cur_cpu_id - new_cpu_id) & 31; in gic_migrate_target()
979 /* Update the target interface for this logical CPU */ in gic_migrate_target()
980 gic_cpu_map[cpu] = 1 << new_cpu_id; in gic_migrate_target()
984 * CPU interface and migrate them to the new CPU interface. in gic_migrate_target()
985 * We skip DIST_TARGET 0 to 7 as they are read-only. in gic_migrate_target()
987 for (i = 8; i < DIV_ROUND_UP(gic_irqs, 4); i++) { in gic_migrate_target()
988 val = readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4); in gic_migrate_target()
993 writel_relaxed(val, dist_base + GIC_DIST_TARGET + i*4); in gic_migrate_target()
1009 for (i = 0; i < 16; i += 4) { in gic_migrate_target()
1015 for (j = i; j < i + 4; j++) { in gic_migrate_target()
1025 * gic_get_sgir_physaddr - get the physical address for the SGI register
1055 struct gic_chip_data *gic = d->host_data; in gic_irq_domain_map()
1065 irq_domain_set_info(d, irq, hw, chip, d->host_data, in gic_irq_domain_map()
1069 irq_domain_set_info(d, irq, hw, chip, d->host_data, in gic_irq_domain_map()
1086 if (fwspec->param_count == 1 && fwspec->param[0] < 16) { in gic_irq_domain_translate()
1087 *hwirq = fwspec->param[0]; in gic_irq_domain_translate()
1092 if (is_of_node(fwspec->fwnode)) { in gic_irq_domain_translate()
1093 if (fwspec->param_count < 3) in gic_irq_domain_translate()
1094 return -EINVAL; in gic_irq_domain_translate()
1096 switch (fwspec->param[0]) { in gic_irq_domain_translate()
1098 *hwirq = fwspec->param[1] + 32; in gic_irq_domain_translate()
1101 *hwirq = fwspec->param[1] + 16; in gic_irq_domain_translate()
1104 return -EINVAL; in gic_irq_domain_translate()
1107 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; in gic_irq_domain_translate()
1115 if (is_fwnode_irqchip(fwspec->fwnode)) { in gic_irq_domain_translate()
1116 if(fwspec->param_count != 2) in gic_irq_domain_translate()
1117 return -EINVAL; in gic_irq_domain_translate()
1119 if (fwspec->param[0] < 16) { in gic_irq_domain_translate()
1121 fwspec->param[0]); in gic_irq_domain_translate()
1122 return -EINVAL; in gic_irq_domain_translate()
1125 *hwirq = fwspec->param[0]; in gic_irq_domain_translate()
1126 *type = fwspec->param[1]; in gic_irq_domain_translate()
1133 return -EINVAL; in gic_irq_domain_translate()
1168 if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) { in gic_init_bases()
1169 /* Frankein-GIC without banked registers... */ in gic_init_bases()
1170 unsigned int cpu; in gic_init_bases() local
1172 gic->dist_base.percpu_base = alloc_percpu(void __iomem *); in gic_init_bases()
1173 gic->cpu_base.percpu_base = alloc_percpu(void __iomem *); in gic_init_bases()
1174 if (WARN_ON(!gic->dist_base.percpu_base || in gic_init_bases()
1175 !gic->cpu_base.percpu_base)) { in gic_init_bases()
1176 ret = -ENOMEM; in gic_init_bases()
1180 for_each_possible_cpu(cpu) { in gic_init_bases()
1181 u32 mpidr = cpu_logical_map(cpu); in gic_init_bases()
1183 unsigned long offset = gic->percpu_offset * core_id; in gic_init_bases()
1184 *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = in gic_init_bases()
1185 gic->raw_dist_base + offset; in gic_init_bases()
1186 *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = in gic_init_bases()
1187 gic->raw_cpu_base + offset; in gic_init_bases()
1193 WARN(gic->percpu_offset, in gic_init_bases()
1195 gic->percpu_offset); in gic_init_bases()
1196 gic->dist_base.common_base = gic->raw_dist_base; in gic_init_bases()
1197 gic->cpu_base.common_base = gic->raw_cpu_base; in gic_init_bases()
1208 gic->gic_irqs = gic_irqs; in gic_init_bases()
1210 gic->domain = irq_domain_create_linear(handle, gic_irqs, in gic_init_bases()
1213 if (WARN_ON(!gic->domain)) { in gic_init_bases()
1214 ret = -ENODEV; in gic_init_bases()
1230 if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) { in gic_init_bases()
1231 free_percpu(gic->dist_base.percpu_base); in gic_init_bases()
1232 free_percpu(gic->cpu_base.percpu_base); in gic_init_bases()
1243 if (WARN_ON(!gic || gic->domain)) in __gic_init_bases()
1244 return -EINVAL; in __gic_init_bases()
1248 * Initialize the CPU interface map to all CPUs. in __gic_init_bases()
1249 * It will be refined as each CPU probes its ID. in __gic_init_bases()
1272 if (gic->raw_dist_base) in gic_teardown()
1273 iounmap(gic->raw_dist_base); in gic_teardown()
1274 if (gic->raw_cpu_base) in gic_teardown()
1275 iounmap(gic->raw_cpu_base); in gic_teardown()
1337 cpuif_res.end = cpuif_res.start + SZ_128K -1; in gic_check_eoimode()
1343 * Verify that we have the first 4kB of a GICv2 in gic_check_eoimode()
1358 pr_warn("GIC: Adjusting CPU interface base to %pa\n", in gic_check_eoimode()
1392 return -EINVAL; in gic_of_setup()
1394 gic->raw_dist_base = of_iomap(node, 0); in gic_of_setup()
1395 if (WARN(!gic->raw_dist_base, "unable to map gic dist registers\n")) in gic_of_setup()
1398 gic->raw_cpu_base = of_iomap(node, 1); in gic_of_setup()
1399 if (WARN(!gic->raw_cpu_base, "unable to map gic cpu registers\n")) in gic_of_setup()
1402 if (of_property_read_u32(node, "cpu-offset", &gic->percpu_offset)) in gic_of_setup()
1403 gic->percpu_offset = 0; in gic_of_setup()
1412 return -ENOMEM; in gic_of_setup()
1419 if (!dev || !dev->of_node || !gic || !irq) in gic_of_init_child()
1420 return -EINVAL; in gic_of_init_child()
1424 return -ENOMEM; in gic_of_init_child()
1426 ret = gic_of_setup(*gic, dev->of_node); in gic_of_init_child()
1430 ret = gic_init_bases(*gic, &dev->of_node->fwnode); in gic_of_init_child()
1436 irq_domain_set_pm_device((*gic)->domain, dev); in gic_of_init_child()
1473 return -ENODEV; in gic_of_init()
1476 return -EINVAL; in gic_of_init()
1486 * or the CPU interface is too small. in gic_of_init()
1488 if (gic_cnt == 0 && !gic_check_eoimode(node, &gic->raw_cpu_base)) in gic_of_init()
1491 ret = __gic_init_bases(gic, &node->fwnode); in gic_of_init()
1508 gicv2m_init(&node->fwnode, gic_data[gic_cnt].domain); in gic_of_init()
1513 IRQCHIP_DECLARE(gic_400, "arm,gic-400", gic_of_init);
1514 IRQCHIP_DECLARE(arm11mp_gic, "arm,arm11mp-gic", gic_of_init);
1515 IRQCHIP_DECLARE(arm1176jzf_dc_gic, "arm,arm1176jzf-devchip-gic", gic_of_init);
1516 IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init);
1517 IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init);
1518 IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init);
1519 IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
1520 IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);
1544 return -EINVAL; in gic_acpi_parse_madt_cpu()
1547 * There is no support for non-banked GICv1/2 register in ACPI spec. in gic_acpi_parse_madt_cpu()
1548 * All CPU interface addresses have to be the same. in gic_acpi_parse_madt_cpu()
1550 gic_cpu_base = processor->base_address; in gic_acpi_parse_madt_cpu()
1552 return -EINVAL; in gic_acpi_parse_madt_cpu()
1555 acpi_data.maint_irq = processor->vgic_interrupt; in gic_acpi_parse_madt_cpu()
1556 acpi_data.maint_irq_mode = (processor->flags & ACPI_MADT_VGIC_IRQ_MODE) ? in gic_acpi_parse_madt_cpu()
1558 acpi_data.vctrl_base = processor->gich_base_address; in gic_acpi_parse_madt_cpu()
1559 acpi_data.vcpu_base = processor->gicv_base_address; in gic_acpi_parse_madt_cpu()
1584 return (dist->version == ape->driver_data && in gic_validate_dist()
1585 (dist->version != ACPI_MADT_GIC_VERSION_NONE || in gic_validate_dist()
1605 vctrl_res->flags = IORESOURCE_MEM; in gic_acpi_setup_kvm_info()
1606 vctrl_res->start = acpi_data.vctrl_base; in gic_acpi_setup_kvm_info()
1607 vctrl_res->end = vctrl_res->start + ACPI_GICV2_VCTRL_MEM_SIZE - 1; in gic_acpi_setup_kvm_info()
1612 vcpu_res->flags = IORESOURCE_MEM; in gic_acpi_setup_kvm_info()
1613 vcpu_res->start = acpi_data.vcpu_base; in gic_acpi_setup_kvm_info()
1614 vcpu_res->end = vcpu_res->start + ACPI_GICV2_VCPU_MEM_SIZE - 1; in gic_acpi_setup_kvm_info()
1641 /* Collect CPU base addresses */ in gic_v2_acpi_init()
1646 return -EINVAL; in gic_v2_acpi_init()
1649 gic->raw_cpu_base = ioremap(acpi_data.cpu_phys_base, ACPI_GIC_CPU_IF_MEM_SIZE); in gic_v2_acpi_init()
1650 if (!gic->raw_cpu_base) { in gic_v2_acpi_init()
1652 return -ENOMEM; in gic_v2_acpi_init()
1656 gic->raw_dist_base = ioremap(dist->base_address, in gic_v2_acpi_init()
1658 if (!gic->raw_dist_base) { in gic_v2_acpi_init()
1661 return -ENOMEM; in gic_v2_acpi_init()
1666 * guarantees that we'll always have a GICv2, so the CPU in gic_v2_acpi_init()
1673 * Initialize GIC instance zero (no multi-GIC support). in gic_v2_acpi_init()
1675 gsi_domain_handle = irq_domain_alloc_fwnode(&dist->base_address); in gic_v2_acpi_init()
1679 return -ENOMEM; in gic_v2_acpi_init()