/linux/arch/x86/kernel/ |
H A D | nmi.c | 18 #include <linux/nmi.h> 31 #include <asm/nmi.h> 41 #include <trace/events/nmi.h> 91 * Prevent NMI reason port (0x61) being accessed simultaneously, can 92 * only be used in NMI handler. 128 "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n", in nmi_check_duration() 161 /* return total number of NMI events handled */ in nmi_handle() 178 * internal NMI handler call chains (SERR and IO_CHECK). in __register_nmi_handler() 207 * the name passed in to describe the nmi handler in unregister_nmi_handler() 212 "Trying to free NMI (%s) from NMI context!\n", n->name); in unregister_nmi_handler() [all …]
|
H A D | nmi_selftest.c | 3 * arch/x86/kernel/nmi-selftest.c 5 * Testsuite for NMI: IPIs 20 #include <asm/nmi.h> 28 /* check to see if NMI IPIs work on this machine */ 75 /* sync above data before sending NMI */ in test_nmi_ipi() 151 printk("| NMI testsuite:\n"); in nmi_selftest()
|
H A D | smp.c | 33 #include <asm/nmi.h> 123 /* We are registered on stopping cpu too, avoid spurious NMI */ in smp_stop_nmi_callback() 177 * 3) If #2 timed out send an NMI to the CPUs which did not in native_stop_other_cpus() 186 * other CPU is still handling the NMI. Again, there is no in native_stop_other_cpus() 199 * prevent an NMI shutdown attempt in case that not all in native_stop_other_cpus() 207 /* if the REBOOT_VECTOR didn't work, try with the NMI */ in native_stop_other_cpus() 210 * If NMI IPI is enabled, try to register the stop handler in native_stop_other_cpus() 217 pr_emerg("Shutting down cpus with NMI\n"); in native_stop_other_cpus()
|
/linux/arch/x86/platform/uv/ |
H A D | uv_nmi.c | 3 * SGI NMI support routines 16 #include <linux/nmi.h> 27 #include <asm/nmi.h> 35 * UV handler for NMI 37 * Handle system-wide NMI events generated by the global 'power nmi' command. 39 * Basic operation is to field the NMI interrupt on each CPU and wait 40 * until all CPU's have arrived into the nmi handler. If some CPU's do not 41 * make it into the handler, try and force them in with the IPI(NMI) signal. 47 * To do this we register our primary NMI notifier on the NMI_UNKNOWN 48 * chain. This reduces the number of false NMI calls when the perf [all …]
|
/linux/Documentation/RCU/ |
H A D | NMI-RCU.rst | 3 Using RCU to Protect Dynamic NMI Handlers 10 how to do this, drawing loosely from Zwane Mwaikambo's NMI-timer 21 The dummy_nmi_callback() function is a "dummy" NMI handler that does 23 the NMI handler to take the default machine-specific action:: 28 NMI handler:: 45 The do_nmi() function processes each NMI. It first disables preemption 47 count of NMIs. It then invokes the NMI handler stored in the nmi_callback 49 default_do_nmi() function to handle a machine-specific NMI. Finally, 63 Back to the discussion of NMI and RCU:: 70 The set_nmi_callback() function registers an NMI handler. Note that any [all …]
|
/linux/Documentation/devicetree/bindings/interrupt-controller/ |
H A D | allwinner,sun7i-a20-sc-nmi.yaml | 4 $id: http://devicetree.org/schemas/interrupt-controller/allwinner,sun7i-a20-sc-nmi.yaml# 25 - const: allwinner,sun6i-a31-sc-nmi 27 - const: allwinner,sun7i-a20-sc-nmi 28 - const: allwinner,sun9i-a80-nmi 31 - allwinner,sun8i-v3s-nmi 32 - allwinner,sun50i-a100-nmi 33 - allwinner,sun50i-h616-nmi 34 - const: allwinner,sun9i-a80-nmi 56 compatible = "allwinner,sun7i-a20-sc-nmi";
|
H A D | renesas,rzg2l-irqc.yaml | 15 interrupts of NMI, IRQ, and GPIOINT and the interrupts of the built-in peripheral 19 - NMI edge select (NMI is not treated as NMI exception and supports fall edge and 36 description: The first cell should contain a macro RZG2L_{NMI,IRQX} included in the 52 - description: NMI interrupt 104 - const: nmi 263 interrupt-names = "nmi",
|
/linux/arch/sparc/kernel/ |
H A D | nmi.c | 2 /* Pseudo NMI support on sparc64 systems. 6 * The NMI watchdog support and infrastructure is based almost 7 * entirely upon the x86 NMI support code. 13 #include <linux/nmi.h> 29 /* We don't have a real NMI on sparc64, but we can fake one 40 * >0: the NMI watchdog is active, but can be disabled 41 * <0: the NMI watchdog has not been set up, and cannot be enabled 42 * 0: the NMI watchdog is disabled, but can be enabled 100 if (notify_die(DIE_NMI, "nmi", regs, 0, in perfctr_irq() 114 die_nmi("BUG: NMI Watchdog detected LOCKUP", in perfctr_irq() [all …]
|
/linux/arch/x86/entry/ |
H A D | entry_64.S | 944 * only on return from non-NMI IST interrupts that came 1110 * the iretq it performs will take us out of NMI context. 1112 * NMI is using the top of the stack of the previous NMI. We 1113 * can't let it execute because the nested NMI will corrupt the 1114 * stack of the previous NMI. NMI handlers are not re-entrant 1121 * is an NMI stack. 1122 * If the variable is not set and the stack is not the NMI 1128 * o Continue processing the NMI 1129 * If the variable is set or the previous stack is the NMI stack: 1131 * o return back to the first NMI [all …]
|
/linux/arch/x86/lib/ |
H A D | usercopy.c | 14 * copy_from_user_nmi - NMI safe copy from user 22 * from NMI context. Despite the name it is not restricted to be called 23 * from NMI context. It is safe to be called from any other context as 27 * For NMI context invocations this relies on the nested NMI work to allow 28 * atomic faults from the NMI path; the nested NMI paths are careful to 43 * Even though this function is typically called from NMI/IRQ context in copy_from_user_nmi()
|
/linux/lib/ |
H A D | nmi_backtrace.c | 3 * NMI backtrace support 8 * HW NMI watchdog support 12 * Arch specific calls to support NMI watchdog 14 * Bits copied from original nmi.c file 19 #include <linux/nmi.h> 56 * Don't try to send an NMI to this cpu; it may work on some in nmi_trigger_cpumask_backtrace() 65 pr_info("Sending NMI from CPU %d to CPUs %*pbl:\n", in nmi_trigger_cpumask_backtrace() 101 * Allow nested NMI backtraces while serializing in nmi_cpu_backtrace() 106 pr_warn("NMI backtrace for cpu %d skipped: idling at %pS\n", in nmi_cpu_backtrace() 109 pr_warn("NMI backtrace for cpu %d\n", cpu); in nmi_cpu_backtrace()
|
H A D | genalloc.c | 8 * It is safe to use the allocator in NMI handlers and other special 21 * On architectures that don't have NMI-safe cmpxchg implementation, 22 * the allocator can NOT be used in NMI handler. So code uses the 23 * allocator in NMI handler should depend on 274 * Can not be used in NMI handler on architectures without 275 * NMI-safe cmpxchg implementation. 336 * Can not be used in NMI handler on architectures without 337 * NMI-safe cmpxchg implementation. 357 * given pool allocation function. Can not be used in NMI handler on 358 * architectures without NMI-safe cmpxchg implementation. [all …]
|
/linux/Documentation/watchdog/ |
H A D | hpwdt.rst | 2 HPE iLO NMI Watchdog Driver 11 The HPE iLO NMI Watchdog driver is a kernel module that provides basic 12 watchdog functionality and handler for the iLO "Generate NMI to System" 34 NMI is delivered to the system. Setting the value to 35 zero disables the pretimeout NMI. 42 kdumptimeout Minimum timeout in seconds to apply upon receipt of an NMI 53 Due to limitations in the iLO hardware, the NMI pretimeout if enabled, 58 Upon receipt of an NMI from the iLO, the hpwdt driver will initiate a 70 If the hpwdt does not receive the NMI associated with an expiring timer, 76 The HPE iLO NMI Watchdog Driver and documentation were originally developed
|
/linux/kernel/ |
H A D | watchdog_perf.c | 9 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks 13 #define pr_fmt(fmt) "NMI watchdog: " fmt 15 #include <linux/nmi.h> 39 * So it runs effectively with 2.5 times the rate of the NMI in watchdog_update_hrtimer_threshold() 41 * the NMI watchdog expires. The NMI watchdog on x86 is based on in watchdog_update_hrtimer_threshold() 43 * might run way faster than expected and the NMI fires in a in watchdog_update_hrtimer_threshold() 46 * enough to get the NMI period smaller than the hrtimer watchdog in watchdog_update_hrtimer_threshold() 49 * The sample threshold is used to check in the NMI handler whether in watchdog_update_hrtimer_threshold() 50 * the minimum time between two NMI samples has elapsed. That in watchdog_update_hrtimer_threshold() 261 * watchdog_hardlockup_probe - Probe whether NMI event is available at all [all …]
|
/linux/arch/powerpc/kernel/ |
H A D | watchdog.c | 7 * This uses code from arch/sparc/kernel/nmi.c and kernel/watchdog.c 17 #include <linux/nmi.h> 32 #include <asm/nmi.h> 41 * The local soft-NMI, and the SMP checker. 43 * The soft-NMI checker can detect lockups on the local CPU. When interrupts 47 * watchdog's soft_nmi_interrupt(), which appears to Linux as an NMI 50 * The soft-NMI checker will compare the heartbeat timestamp for this CPU 54 * The limitation of the soft-NMI watchdog is that it does not work when 70 * Some platforms implement true NMI IPIs, which can be used by the SMP 72 * state with the NMI IPI, to get crash/debug data from it. This way the [all …]
|
/linux/include/xen/interface/ |
H A D | nmi.h | 3 * nmi.h 5 * NMI callback registration and reason codes. 16 * NMI reason codes: 25 /* Unknown hardware-generated NMI. */ 35 * Register NMI callback for this (calling) VCPU. Currently this only makes 47 * Deregister NMI callback for this (calling) VCPU.
|
/linux/Documentation/core-api/ |
H A D | entry.rst | 16 exceptions`_, `NMI and NMI-like exceptions`_. 197 NMI and NMI-like exceptions 200 NMIs and NMI-like exceptions (machine checks, double faults, debug 209 NMIs and other NMI-like exceptions handle state transitions without 226 preemption count modification in the NMI entry/exit case must not be 233 noinstr void nmi(struct pt_regs *regs) 277 NMIs can happen in any context. For example, an NMI-like exception triggered 278 while handling an NMI. So NMI entry code has to be reentrant and state updates
|
/linux/arch/x86/kernel/apic/ |
H A D | hw_nmi.c | 3 * HW NMI watchdog support 7 * Arch specific calls to support NMI watchdog 9 * Bits copied from original nmi.c file 14 #include <asm/nmi.h> 20 #include <linux/nmi.h>
|
/linux/Documentation/trace/ |
H A D | events-nmi.rst | 2 NMI Trace Events 7 /sys/kernel/tracing/events/nmi 14 NMI handlers are hogging large amounts of CPU time. The kernel 17 INFO: NMI handler took too long to run: 9.207 msecs 34 cd /sys/kernel/tracing/events/nmi/nmi_handler
|
H A D | osnoise-tracer.rst | 21 NMI or in the hardware itself. At the end of the period, hwlatd enables 23 prints a NMI occurrence counter. If the output does not report NMI 25 the latency. The hwlat detects the NMI execution by observing 26 the entry and exit of a NMI. 34 interference. The interference counter for NMI, IRQs, SoftIRQs, and 65 … CPU# |||| TIMESTAMP IN US IN US AVAILABLE IN US HW NMI IRQ SIRQ THREAD 142 - osnoise:nmi_noise: noise from NMI, including the duration.
|
/linux/arch/sh/boards/mach-sdk7786/ |
H A D | nmi.c | 3 * SDK7786 FPGA NMI Support. 21 * Default to the manual NMI switch. 40 pr_warn("Unknown NMI mode %s\n", str); in nmi_mode_setup() 43 printk("Set NMI mode to %d\n", nmi_mode); in nmi_mode_setup() 72 /* Set the NMI source */ in sdk7786_nmi_init()
|
/linux/drivers/irqchip/ |
H A D | irq-sunxi-nmi.c | 2 * Allwinner A20/A31 SoCs NMI IRQ chip driver. 11 #define DRV_NAME "sunxi-nmi" 30 * For deprecated sun6i-a31-sc-nmi compatible. 207 /* Clear any pending NMI interrupts */ in sunxi_sc_nmi_irq_init() 225 IRQCHIP_DECLARE(sun6i_sc_nmi, "allwinner,sun6i-a31-sc-nmi", sun6i_sc_nmi_irq_init); 232 IRQCHIP_DECLARE(sun7i_sc_nmi, "allwinner,sun7i-a20-sc-nmi", sun7i_sc_nmi_irq_init); 239 IRQCHIP_DECLARE(sun9i_nmi, "allwinner,sun9i-a80-nmi", sun9i_nmi_irq_init);
|
/linux/arch/x86/platform/intel-mid/ |
H A D | intel-mid.c | 79 * Moorestown does not have external NMI source nor port 0x61 to report 80 * NMI status. The possible NMI sources are from pmu as a result of NMI 82 * misled NMI handler.
|
/linux/arch/mips/sgi-ip27/ |
H A D | ip27-nmi.c | 10 #include <asm/sn/nmi.h> 59 pr_emerg("NMI nasid %d: slice %d\n", nasid, slice); in nmi_cpu_eframe_save() 164 * Save the nmi cpu registers for all cpus in the system. 188 * Wait up to 15 seconds for the other cpus to respond to the NMI. in nmi_dump() 189 * If a cpu has not responded after 10 sec, send it 1 additional NMI. in nmi_dump() 191 * - sometimes a MMSC fail to NMI all cpus. in nmi_dump() 227 * Save the nmi cpu registers for all cpu in the eframe format. in nmi_dump()
|
/linux/arch/x86/kernel/cpu/microcode/ |
H A D | core.c | 31 #include <linux/nmi.h> 290 /* If invoked directly, tickle the NMI watchdog */ in wait_for_cpus() 313 /* If invoked directly, tickle the NMI watchdog */ in wait_for_ctrl() 423 /* Enable the rendezvous handler and send NMI */ in kick_offline_cpus() 466 * in the NMI rendezvous to protect against a concurrent NMI on affected 500 * safe against an NMI which is delivered to the secondary SMT sibling 502 * up in #INT3, #DB and #PF. The IRET from those exceptions reenables NMI 503 * which is the opposite of what the NMI rendezvous is trying to achieve. 507 * path which must be NMI safe until the primary thread completed the 522 /* Enable the NMI handler and raise NMI */ in load_cpus_stopped() [all …]
|