Lines Matching +full:cpu +full:- +full:offset
1 // SPDX-License-Identifier: GPL-2.0-or-later
6 * Copyright (C) 2007-2017 Silicon Graphics, Inc. All rights reserved.
10 #include <linux/cpu.h>
37 * Handle system-wide NMI events generated by the global 'power nmi' command.
39 * Basic operation is to field the NMI interrupt on each CPU and wait
40 * until all CPU's have arrived into the nmi handler. If some CPU's do not
50 * second (~4M/s for 1024 CPU threads). Our secondary NMI handler is
66 /* Non-zero indicates newer SMM NMI handler present */
83 #define PCH_PCR_GPIO_ADDRESS(offset) (int *)((u64)(pch_base) | (u64)(offset)) argument
91 static atomic_t uv_nmi_cpu = ATOMIC_INIT(-1);
92 static atomic_t uv_nmi_cpus_in_nmi = ATOMIC_INIT(-1);
116 return sprintf(buffer, "%lu\n", local64_read((local64_t *)kp->arg)); in param_get_local64()
122 local64_set((local64_t *)kp->arg, 0); in param_set_local64()
203 [nmi_act_dump] = "dump process stack for each cpu",
204 [nmi_act_ips] = "dump Inst Ptr info for each cpu",
230 pr_err("UV: %-8s - %s\n", actions[i], actions_desc[i]); in param_set_action()
232 return -EINVAL; in param_set_action()
253 uvh_nmi_mmrx_type = "OCRD0-EXTIO_INT0"; in uv_nmi_setup_mmrs()
263 uvh_nmi_mmrx_type = "OCRD1-EXTIO_INT0"; in uv_nmi_setup_mmrs()
293 hub_nmi->nmi_value = uv_read_local_mmr(nmi_mmr); in uv_nmi_test_mmr()
294 atomic_inc(&hub_nmi->read_mmr_count); in uv_nmi_test_mmr()
295 return !!(hub_nmi->nmi_value & nmi_mmr_pending); in uv_nmi_test_mmr()
315 static void uv_init_hubless_pch_io(int offset, int mask, int data) in uv_init_hubless_pch_io() argument
317 int *addr = PCH_PCR_GPIO_ADDRESS(offset); in uv_init_hubless_pch_io()
347 unsigned int offset; member
352 .offset = 0x84,
359 .offset = 0x104,
364 .offset = 0x124,
369 .offset = 0x144,
374 .offset = 0x164,
381 .offset = 0x114,
386 .offset = 0x134,
391 .offset = 0x154,
396 .offset = 0x174,
403 .offset = 0x4c0,
433 * = 1 # Disable the output buffer; i.e. Hi-Z
443 .offset = 0x4c4,
461 uv_init_hubless_pch_io(init_nmi[i].offset, in uv_init_hubless_pch_d0()
472 hub_nmi->nmi_value = status; in uv_nmi_test_hubless()
473 atomic_inc(&hub_nmi->read_mmr_count); in uv_nmi_test_hubless()
486 if (hub_nmi->hub_present) in uv_test_nmi()
489 if (hub_nmi->pch_owner) /* Only PCH owner can check status */ in uv_test_nmi()
492 return -1; in uv_test_nmi()
496 * If first CPU in on this hub, set hub_nmi "in_nmi" and "owner" values and
497 * return true. If first CPU in on the system, set global "in_nmi" flag.
499 static int uv_set_in_nmi(int cpu, struct uv_hub_nmi_s *hub_nmi) in uv_set_in_nmi() argument
501 int first = atomic_add_unless(&hub_nmi->in_nmi, 1, 1); in uv_set_in_nmi()
504 atomic_set(&hub_nmi->cpu_owner, cpu); in uv_set_in_nmi()
506 atomic_set(&uv_nmi_cpu, cpu); in uv_set_in_nmi()
508 atomic_inc(&hub_nmi->nmi_count); in uv_set_in_nmi()
516 int cpu = smp_processor_id(); in uv_check_nmi() local
524 nmi = atomic_read(&hub_nmi->in_nmi); in uv_check_nmi()
528 if (raw_spin_trylock(&hub_nmi->nmi_lock)) { in uv_check_nmi()
533 uv_set_in_nmi(cpu, hub_nmi); in uv_check_nmi()
538 /* A non-PCH node in a hubless system waits for NMI */ in uv_check_nmi()
543 raw_spin_unlock(&hub_nmi->nmi_lock); in uv_check_nmi()
551 /* Re-check hub in_nmi flag */ in uv_check_nmi()
552 nmi = atomic_read(&hub_nmi->in_nmi); in uv_check_nmi()
564 uv_set_in_nmi(cpu, hub_nmi); in uv_check_nmi()
569 raw_spin_unlock(&hub_nmi->nmi_lock); in uv_check_nmi()
580 static inline void uv_clear_nmi(int cpu) in uv_clear_nmi() argument
584 if (cpu == atomic_read(&hub_nmi->cpu_owner)) { in uv_clear_nmi()
585 atomic_set(&hub_nmi->cpu_owner, -1); in uv_clear_nmi()
586 atomic_set(&hub_nmi->in_nmi, 0); in uv_clear_nmi()
587 if (hub_nmi->hub_present) in uv_clear_nmi()
591 raw_spin_unlock(&hub_nmi->nmi_lock); in uv_clear_nmi()
595 /* Ping non-responding CPU's attempting to force them into the NMI handler */
598 int cpu; in uv_nmi_nr_cpus_ping() local
600 for_each_cpu(cpu, uv_nmi_cpu_mask) in uv_nmi_nr_cpus_ping()
601 uv_cpu_nmi_per(cpu).pinging = 1; in uv_nmi_nr_cpus_ping()
606 /* Clean up flags for CPU's that ignored both NMI and ping */
609 int cpu; in uv_nmi_cleanup_mask() local
611 for_each_cpu(cpu, uv_nmi_cpu_mask) { in uv_nmi_cleanup_mask()
612 uv_cpu_nmi_per(cpu).pinging = 0; in uv_nmi_cleanup_mask()
613 uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_OUT; in uv_nmi_cleanup_mask()
614 cpumask_clear_cpu(cpu, uv_nmi_cpu_mask); in uv_nmi_cleanup_mask()
618 /* Loop waiting as CPU's enter NMI handler */
623 int cpu = smp_processor_id(); in uv_nmi_wait_cpus() local
629 k = n - cpumask_weight(uv_nmi_cpu_mask); in uv_nmi_wait_cpus()
632 /* PCH NMI causes only one CPU to respond */ in uv_nmi_wait_cpus()
634 cpumask_clear_cpu(cpu, uv_nmi_cpu_mask); in uv_nmi_wait_cpus()
635 return n - k - 1; in uv_nmi_wait_cpus()
653 if (last_k != k) { /* abort if no new CPU's coming in */ in uv_nmi_wait_cpus()
659 /* Extend delay if waiting only for CPU 0: */ in uv_nmi_wait_cpus()
660 if (waiting && (n - k) == 1 && in uv_nmi_wait_cpus()
667 return n - k; in uv_nmi_wait_cpus()
670 /* Wait until all slave CPU's have entered UV NMI handler */
673 /* Indicate this CPU is in: */ in uv_nmi_wait()
676 /* If not the first CPU in (the master), then we are a slave CPU */ in uv_nmi_wait()
681 /* Wait for all other CPU's to gather here */ in uv_nmi_wait()
692 /* If all CPU's are in, then done */ in uv_nmi_wait()
708 pr_info("\nUV: %4s %6s %-32s %s (Note: PID 0 not listed)\n", in uv_nmi_dump_cpu_ip_hdr()
709 "CPU", "PID", "COMMAND", "IP"); in uv_nmi_dump_cpu_ip_hdr()
713 static void uv_nmi_dump_cpu_ip(int cpu, struct pt_regs *regs) in uv_nmi_dump_cpu_ip() argument
715 pr_info("UV: %4d %6d %-32.32s %pS", in uv_nmi_dump_cpu_ip()
716 cpu, current->pid, current->comm, (void *)regs->ip); in uv_nmi_dump_cpu_ip()
720 * Dump this CPU's state. If action was set to "kdump" and the crash_kexec
723 * action "ips" only displays instruction pointers for the non-idle CPU's.
726 static void uv_nmi_dump_state_cpu(int cpu, struct pt_regs *regs) in uv_nmi_dump_state_cpu() argument
730 if (cpu == 0) in uv_nmi_dump_state_cpu()
733 if (current->pid != 0 || uv_nmi_action != nmi_act_ips) in uv_nmi_dump_state_cpu()
734 uv_nmi_dump_cpu_ip(cpu, regs); in uv_nmi_dump_state_cpu()
737 pr_info("UV:%sNMI process trace for CPU %d\n", dots, cpu); in uv_nmi_dump_state_cpu()
744 /* Trigger a slave CPU to dump its state */
745 static void uv_nmi_trigger_dump(int cpu) in uv_nmi_trigger_dump() argument
749 if (uv_cpu_nmi_per(cpu).state != UV_NMI_STATE_IN) in uv_nmi_trigger_dump()
752 uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP; in uv_nmi_trigger_dump()
756 if (uv_cpu_nmi_per(cpu).state in uv_nmi_trigger_dump()
759 } while (--retry > 0); in uv_nmi_trigger_dump()
761 pr_crit("UV: CPU %d stuck in process dump function\n", cpu); in uv_nmi_trigger_dump()
762 uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP_DONE; in uv_nmi_trigger_dump()
765 /* Wait until all CPU's ready to exit */
779 /* Current "health" check is to check which CPU's are responsive */
780 static void uv_nmi_action_health(int cpu, struct pt_regs *regs, int master) in uv_nmi_action_health() argument
784 int out = num_online_cpus() - in; in uv_nmi_action_health()
786 pr_alert("UV: NMI CPU health check (non-responding:%d)\n", out); in uv_nmi_action_health()
795 /* Walk through CPU list and dump state of each */
796 static void uv_nmi_dump_state(int cpu, struct pt_regs *regs, int master) in uv_nmi_dump_state() argument
803 pr_alert("UV: tracing %s for %d CPUs from CPU %d\n", in uv_nmi_dump_state()
805 atomic_read(&uv_nmi_cpus_in_nmi), cpu); in uv_nmi_dump_state()
812 else if (tcpu == cpu) in uv_nmi_dump_state()
827 uv_nmi_dump_state_cpu(cpu, regs); in uv_nmi_dump_state()
840 static void uv_nmi_kdump(int cpu, int main, struct pt_regs *regs) in uv_nmi_kdump() argument
851 pr_emerg("UV: NMI executing crash_kexec on CPU%d\n", cpu); in uv_nmi_kdump()
884 return -1; in uv_nmi_kdb_reason()
895 static void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master) in uv_call_kgdb_kdb() argument
905 ret = kgdb_nmicallin(cpu, X86_TRAP_NMI, regs, reason, in uv_call_kgdb_kdb()
922 kgdb_nmicallback(cpu, regs); in uv_call_kgdb_kdb()
928 static inline void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master) in uv_call_kgdb_kdb() argument
940 int cpu = smp_processor_id(); in uv_handle_nmi() local
952 /* Indicate we are the first CPU into the NMI handler */ in uv_handle_nmi()
953 master = (atomic_read(&uv_nmi_cpu) == cpu); in uv_handle_nmi()
957 uv_nmi_kdump(cpu, master, regs); in uv_handle_nmi()
964 /* Pause as all CPU's enter the NMI handler */ in uv_handle_nmi()
970 uv_nmi_action_health(cpu, regs, master); in uv_handle_nmi()
974 uv_nmi_dump_state(cpu, regs, master); in uv_handle_nmi()
978 uv_call_kgdb_kdb(cpu, regs, master); in uv_handle_nmi()
991 uv_clear_nmi(cpu); in uv_handle_nmi()
997 atomic_set(&uv_nmi_cpus_in_nmi, -1); in uv_handle_nmi()
998 atomic_set(&uv_nmi_cpu, -1); in uv_handle_nmi()
1011 * NMI handler for pulling in CPU's when perf events are grabbing our NMI
1044 * Unmask NMI on all CPU's in uv_nmi_init()
1055 int cpu; in uv_nmi_setup_common() local
1061 for_each_present_cpu(cpu) { in uv_nmi_setup_common()
1062 int nid = cpu_to_node(cpu); in uv_nmi_setup_common()
1067 raw_spin_lock_init(&(uv_hub_nmi_list[nid]->nmi_lock)); in uv_nmi_setup_common()
1068 atomic_set(&uv_hub_nmi_list[nid]->cpu_owner, -1); in uv_nmi_setup_common()
1069 uv_hub_nmi_list[nid]->hub_present = hubbed; in uv_nmi_setup_common()
1070 uv_hub_nmi_list[nid]->pch_owner = (nid == 0); in uv_nmi_setup_common()
1072 uv_hub_nmi_per(cpu) = uv_hub_nmi_list[nid]; in uv_nmi_setup_common()