/linux/Documentation/devicetree/bindings/mailbox/ |
H A D | xlnx,zynqmp-ipi-mailbox.yaml | 4 $id: http://devicetree.org/schemas/mailbox/xlnx,zynqmp-ipi-mailbox.yaml# 7 title: Xilinx IPI(Inter Processor Interrupt) mailbox controller 10 The Xilinx IPI(Inter Processor Interrupt) mailbox controller is to manage 11 messaging between two Xilinx Zynq UltraScale+ MPSoC IPI agents. Each IPI 15 | Xilinx ZynqMP IPI Controller | 27 Hardware | | IPI Agent | | IPI Buffers | | 32 | Xilinx IPI Agent Block | 41 - xlnx,zynqmp-ipi-mailbox 42 - xlnx,versal-ipi-mailbox 69 xlnx,ipi-id: [all …]
|
/linux/drivers/mailbox/ |
H A D | zynqmp-ipi-mailbox.c | 3 * Xilinx Inter Processor Interrupt(IPI) Mailbox Driver 17 #include <linux/mailbox/zynqmp-ipi-message.h> 24 /* IPI agent ID any */ 27 /* indicate if ZynqMP IPI mailbox driver uses SMC calls or HVC calls */ 31 /* Default IPI SMC function IDs */ 40 /* IPI SMC Macros */ 50 /* IPI mailbox status */ 55 #define IPI_MB_CHNL_TX 0 /* IPI mailbox TX channel */ 56 #define IPI_MB_CHNL_RX 1 /* IPI mailbox RX channel */ 58 /* IPI Message Buffer Information */ [all …]
|
/linux/arch/mips/kvm/ |
H A D | loongson_ipi.c | 3 * Loongson-3 Virtual IPI interrupt support. 53 static int loongson_vipi_read(struct loongson_kvm_ipi *ipi, in loongson_vipi_read() argument 61 struct ipi_state *s = &(ipi->ipistate[id]); in loongson_vipi_read() 98 static int loongson_vipi_write(struct loongson_kvm_ipi *ipi, in loongson_vipi_write() argument 106 struct kvm *kvm = ipi->kvm; in loongson_vipi_write() 108 struct ipi_state *s = &(ipi->ipistate[id]); in loongson_vipi_write() 157 struct loongson_kvm_ipi *ipi; in kvm_ipi_read() local 161 ipi = ipi_device->ipi; in kvm_ipi_read() 163 spin_lock_irqsave(&ipi->lock, flags); in kvm_ipi_read() 164 loongson_vipi_read(ipi, addr, len, val); in kvm_ipi_read() [all …]
|
/linux/kernel/irq/ |
H A D | ipi.c | 6 * This file contains driver APIs to the IPI subsystem. 9 #define pr_fmt(fmt) "genirq/ipi: " fmt 15 * irq_reserve_ipi() - Setup an IPI to destination cpumask 16 * @domain: IPI domain 17 * @dest: cpumask of CPUs which can receive the IPI 19 * Allocate a virq that can be used to send IPI to any CPU in dest mask. 31 pr_warn("Reservation on a non IPI domain\n"); in irq_reserve_ipi() 59 * The IPI requires a separate HW irq on each CPU. We require in irq_reserve_ipi() 62 * several IPI ranges. in irq_reserve_ipi() 80 pr_warn("Can't reserve IPI, failed to alloc descs\n"); in irq_reserve_ipi() [all …]
|
H A D | ipi-mux.c | 3 * Multiplex several virtual IPIs over a single HW IPI. 9 #define pr_fmt(fmt) "ipi-mux: " fmt 49 /* If a pending IPI was unmasked, raise a parent IPI immediately. */ in ipi_mux_unmask() 81 * The flag writes must complete before the physical IPI is in ipi_mux_send_mask() 92 .name = "IPI Mux", 146 * parent IPI. 149 * @mux_send: callback to trigger parent IPI for a particular CPU 170 fwnode = irq_domain_alloc_named_fwnode("IPI-Mux"); in ipi_mux_create() 172 pr_err("unable to create IPI Mux fwnode\n"); in ipi_mux_create() 180 pr_err("unable to add IPI Mux domain\n"); in ipi_mux_create() [all …]
|
/linux/drivers/remoteproc/ |
H A D | xlnx_r5_remoteproc.c | 12 #include <linux/mailbox/zynqmp-ipi-message.h> 22 /* IPI buffer MAX length */ 143 * @ipi: pointer to mailbox information 156 struct mbox_info *ipi; member 203 struct mbox_info *ipi; in handle_event_notified() local 206 ipi = container_of(work, struct mbox_info, mbox_work); in handle_event_notified() 207 rproc = ipi->r5_core->rproc; in handle_event_notified() 210 * We only use IPI for interrupt. The RPU firmware side may or may in handle_event_notified() 211 * not write the notifyid when it trigger IPI. in handle_event_notified() 224 * Receive data from ipi buffer, ack interrupt and then [all …]
|
/linux/include/linux/rpmsg/ |
H A D | mtk_rpmsg.h | 15 * struct mtk_rpmsg_info - IPI functions tied to the rpmsg device. 16 * @register_ipi: register IPI handler for an IPI id. 17 * @unregister_ipi: unregister IPI handler for a registered IPI id. 18 * @send_ipi: send IPI to an IPI id. wait is the timeout (in msecs) to wait 20 * @ns_ipi_id: the IPI id used for name service, or -1 if name service isn't
|
/linux/drivers/media/platform/mediatek/vpu/ |
H A D | mtk_vpu.h | 32 * For other IPI below, AP should send the request 52 * @IPI_MAX: The maximum IPI number 82 * vpu_ipi_register - register an ipi function 85 * @id: IPI ID 86 * @handler: IPI handler 87 * @name: IPI name 88 * @priv: private data for IPI handler 90 * Register an ipi function to receive ipi interrupt from VPU. 92 * Return: Return 0 if ipi registers successfully, otherwise it is failed. 101 * @id: IPI ID [all …]
|
/linux/include/trace/events/ |
H A D | ipi.h | 3 #define TRACE_SYSTEM ipi 13 * @mask: mask of recipient CPUs for the IPI 14 * @reason: string identifying the IPI purpose 100 * ipi_entry - called immediately before the IPI handler 102 * @reason: string identifying the IPI purpose 106 * for that IPI. 116 * ipi_exit - called immediately after the IPI handler returns 118 * @reason: string identifying the IPI purpose 122 * that IPI.
|
/linux/Documentation/virt/kvm/ |
H A D | vcpu-requests.rst | 49 order to perform some KVM maintenance. To do so, an IPI is sent, forcing 55 1) Send an IPI. This forces a guest mode exit. 70 as well as to avoid sending unnecessary IPIs (see "IPI Reduction"), and 71 even to ensure IPI acknowledgements are waited upon (see "Waiting for 160 then the caller will wait for each VCPU to acknowledge its IPI before 162 If, for example, the VCPU is sleeping, so no IPI is necessary, then 192 kick will send an IPI to force an exit from guest mode when necessary. 197 enter guest mode. This means that an optimized implementation (see "IPI 198 Reduction") must be certain when it's safe to not send the IPI. One 208 !kvm_request_pending() on its last check and then not receiving an IPI for [all …]
|
/linux/arch/hexagon/kernel/ |
H A D | smp.c | 38 static inline void __handle_ipi(unsigned long *ops, struct ipi_data *ipi, in __handle_ipi() argument 69 /* Used for IPI call from other CPU's to unmask int */ 77 * This is based on Alpha's IPI stuff. 85 struct ipi_data *ipi = &per_cpu(ipi_data, cpu); in handle_ipi() local 88 while ((ops = xchg(&ipi->bits, 0)) != 0) in handle_ipi() 89 __handle_ipi(&ops, ipi, cpu); in handle_ipi() 102 struct ipi_data *ipi = &per_cpu(ipi_data, cpu); in send_ipi() local 104 set_bit(msg, &ipi->bits); in send_ipi() 208 /* Also need to register the interrupts for IPI */ in smp_prepare_cpus()
|
/linux/arch/arc/kernel/ |
H A D | smp.c | 244 * figure out what msg was sent. For those which don't (ARC has dedicated IPI 270 * Call the platform specific IPI kick function, but avoid if possible: in ipi_send_msg_one() 273 * IPI corresponding to that msg. This is true, even if it is already in in ipi_send_msg_one() 274 * IPI handler, because !@old means it has not yet dequeued the msg(s) in ipi_send_msg_one() 315 * ipi_cpu_stop - handle IPI from smp_send_stop() 348 * Has hooks for platform specific IPI 355 pr_debug("IPI [%ld] received on cpu %d\n", in do_IPI() 362 * "dequeue" the msg corresponding to this IPI (and possibly other in do_IPI() 373 pr_info("IPI with bogus msg %ld in %ld\n", msg, copy); in do_IPI() 381 * API called by platform code to hookup arch-common ISR to their IPI IRQ [all …]
|
/linux/arch/riscv/kernel/ |
H A D | sbi-ipi.c | 3 * Multiplex several IPIs over a single HW IPI. 70 * Don't disable IPI when CPU goes offline because in sbi_ipi_init() 72 * via generic IPI-Mux in sbi_ipi_init() 75 "irqchip/sbi-ipi:starting", in sbi_ipi_init() 79 pr_info("providing IPIs using SBI IPI extension\n"); in sbi_ipi_init()
|
H A D | smp.c | 3 * SMP initialisation and IPI support 121 int ipi = irq - ipi_virq_base; in handle_IPI() local 123 switch (ipi) { in handle_IPI() 151 pr_warn("CPU%d: unhandled IPI%d\n", cpu, ipi); in handle_IPI() 199 "IPI", &ipi_dummy_dev); in riscv_ipi_set_virq_range() 226 seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, in show_ipi_stats()
|
/linux/kernel/sched/ |
H A D | membarrier.c | 12 * A) Userspace thread execution after IPI vs membarrier's memory 13 * barrier before sending the IPI 22 * CPU1 after the IPI-induced memory barrier: 29 * b: send IPI IPI-induced mb 46 * before the IPI-induced memory barrier on CPU1. 48 * B) Userspace thread execution before IPI vs membarrier's memory 49 * barrier after completing the IPI 68 * b: send IPI IPI-induced mb 80 * after the IPI-induced memory barrier on CPU1. 177 * ensure that memory on remote CPUs that occur before the IPI in ipi_sync_core() [all …]
|
/linux/drivers/irqchip/ |
H A D | irq-riscv-imsic-early.c | 49 /* Create IMSIC IPI multiplexing */ in imsic_ipi_domain_init() 159 /* Initialize IPI domain */ in imsic_early_probe() 162 pr_err("%pfwP: Failed to initialize IPI domain\n", fwnode); in imsic_early_probe() 173 * because this affects IPI and the masking/unmasking of in imsic_early_probe() 174 * virtual IPIs is done via generic IPI-Mux in imsic_early_probe() 254 * Even if imsic_platform_acpi_probe() fails, the IPI part of IMSIC can in imsic_early_acpi_init() 256 * DT where IPI works but MSI probe fails for some reason. in imsic_early_acpi_init()
|
H A D | irq-armada-370-xp.c | 138 /* IPI and MSI interrupt definitions for IPI platforms */ 145 /* MSI interrupt definitions for non-IPI platforms */ 158 * @ipi_domain: IPI domain 195 * We distinguish IPI availability in the IC by the IC not having a in mpic_is_ipi_available() 365 /* Unmask low 16 MSI irqs on non-IPI platforms */ in mpic_msi_init() 431 * other CPUs before issuing the IPI. in mpic_ipi_send_mask() 447 .name = "IPI", 541 /* Unmask IPI interrupt */ in mpic_smp_cpu_init() 721 /* IPI Handling */ in mpic_handle_irq() 853 * it is used to distinguish between IPI and non-IPI platforms. in mpic_of_init() [all …]
|
/linux/tools/testing/selftests/kvm/x86_64/ |
H A D | xapic_ipi_test.c | 9 * Test that when the APIC is in xAPIC mode, a vCPU can send an IPI to wake 15 * has reentered HLT before sending the next IPI. While the vCPUs are running, 41 * Vector for IPI from sender vCPU to halting vCPU. 48 * Incremented in the IPI handler. Provides evidence to the sender that the IPI 115 * Runs on halter vCPU when IPI arrives. Write an arbitrary non-zero value to 158 * Send IPI to halter vCPU. in sender_guest_code() 159 * First IPI can be sent unconditionally because halter vCPU in sender_guest_code() 168 * 1. Received the IPI in sender_guest_code() 346 "IPI, HLT and wake count have not increased " in do_migrations() 454 "IPI sender vCPU thread started. Letting vCPUs run for %d seconds.\n", in main()
|
/linux/Documentation/admin-guide/hw-vuln/ |
H A D | core-scheduling.rst | 112 Once a task has been selected for all the siblings in the core, an IPI is sent to 113 siblings for whom a new task was selected. Siblings on receiving the IPI will 130 When the highest priority task is selected to run, a reschedule-IPI is sent to 142 (victim) to enter idle mode. This is because the sending of the IPI would bring 145 which may not be worth protecting. It is also possible that the IPI is received 171 IPI processing delays 173 Core scheduling selects only trusted tasks to run together. IPI is used to notify 175 receiving of the IPI on some arch (on x86, this has not been observed). This may 177 IPI. Even though cache is flushed on entry to user mode, victim tasks on siblings
|
/linux/arch/powerpc/sysdev/xics/ |
H A D | icp-opal.c | 26 /* Clear any pending IPI */ in icp_opal_teardown_cpu() 33 * We take the ipi irq but and never return so we need to EOI the IPI, in icp_opal_flush_ipi() 92 * Here be dragons. The caller has asked to allow only IPI's and not in icp_opal_set_cpu_priority() 159 /* Clear pending IPI */ in icp_opal_flush_interrupt()
|
/linux/Documentation/devicetree/bindings/powerpc/fsl/ |
H A D | mpic.txt | 71 non-IPI interrupts to a single CPU at a time (EG: Freescale MPIC). 127 2 = MPIC inter-processor interrupt (IPI) 130 the MPIC IPI number. The type-specific 193 * MPIC IPI interrupts. Note the interrupt 196 ipi@410a0 { 197 compatible = "fsl,mpic-ipi";
|
/linux/kernel/ |
H A D | smp.c | 3 * Generic helpers for smp ipi calls 30 #include <trace/events/ipi.h> 300 …pr_alert("csd: Re-sending CSD lock (#%d) IPI from CPU#%02d to CPU#%02d\n", *bug_id, raw_smp_proces… in csd_lock_wait_toolong() 314 * For non-synchronous ipi calls the csd can still be in use by the 384 * even if we haven't sent the smp_call IPI yet (e.g. the stopper in __smp_call_single_queue() 400 * the head of the list to pull the entry off it in the IPI handler in __smp_call_single_queue() 450 * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks 452 * Invoked by arch to handle an IPI for call function single. 467 * invoked by the generic IPI handler, as well as by a CPU about to go offline, 468 * to ensure that all pending IPI callbacks are run before it goes completely [all …]
|
/linux/tools/perf/pmu-events/arch/riscv/ |
H A D | riscv-sbi-firmware.json | 39 "PublicDescription": "Sent IPI to other HART event", 42 "BriefDescription": "Sent IPI to other HART event" 45 "PublicDescription": "Received IPI from other HART event", 48 "BriefDescription": "Received IPI from other HART event"
|
/linux/arch/arm64/kernel/ |
H A D | smp.c | 3 * SMP initialisation and IPI support 56 #include <trace/events/ipi.h> 844 seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, in arch_show_interrupts() 931 * returned false our backtrace attempt will just use a regular IPI. in arch_trigger_cpumask_backtrace() 1006 pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr); in do_handle_IPI() 1026 static bool ipi_should_be_nmi(enum ipi_msg_type ipi) in ipi_should_be_nmi() argument 1031 switch (ipi) { in ipi_should_be_nmi() 1089 "IPI", &irq_stat); in set_smp_ipi_range() 1090 WARN(err, "Could not request IPI %d as NMI, err=%d\n", in set_smp_ipi_range() 1094 "IPI", &irq_stat); in set_smp_ipi_range() [all …]
|
/linux/arch/powerpc/kvm/ |
H A D | book3s_hv_builtin.c | 200 * to memory vs. the IPI/message. 227 /* Else poke the target with an IPI */ in kvmhv_rm_send_ipi() 243 /* Order setting of exit map vs. msgsnd/IPI */ in kvmhv_interrupt_vcore() 332 * If we have an interrupt that's not an IPI, check if we have a 378 * -1 if there was a guest wakeup IPI (which has now been cleared) 413 /* see if a host IPI is pending */ in kvmppc_read_one_intr() 449 * If it is an IPI, clear the MFRR and EOI it. in kvmppc_read_one_intr() 470 * We need to re-check host IPI now in case it got set in the in kvmppc_read_one_intr() 477 * we need to resend that IPI, bummer in kvmppc_read_one_intr() 490 /* OK, it's an IPI for us */ in kvmppc_read_one_intr()
|