1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 2a88b5ba8SSam Ravnborg /* smp.c: Sparc64 SMP support. 3a88b5ba8SSam Ravnborg * 4a88b5ba8SSam Ravnborg * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net) 5a88b5ba8SSam Ravnborg */ 6a88b5ba8SSam Ravnborg 7066bcacaSPaul Gortmaker #include <linux/export.h> 8a88b5ba8SSam Ravnborg #include <linux/kernel.h> 968e21be2SIngo Molnar #include <linux/sched/mm.h> 10ef8bd77fSIngo Molnar #include <linux/sched/hotplug.h> 11a88b5ba8SSam Ravnborg #include <linux/mm.h> 12a88b5ba8SSam Ravnborg #include <linux/pagemap.h> 13a88b5ba8SSam Ravnborg #include <linux/threads.h> 14a88b5ba8SSam Ravnborg #include <linux/smp.h> 15a88b5ba8SSam Ravnborg #include <linux/interrupt.h> 16a88b5ba8SSam Ravnborg #include <linux/kernel_stat.h> 17a88b5ba8SSam Ravnborg #include <linux/delay.h> 18a88b5ba8SSam Ravnborg #include <linux/init.h> 19a88b5ba8SSam Ravnborg #include <linux/spinlock.h> 20a88b5ba8SSam Ravnborg #include <linux/fs.h> 21a88b5ba8SSam Ravnborg #include <linux/seq_file.h> 22a88b5ba8SSam Ravnborg #include <linux/cache.h> 23a88b5ba8SSam Ravnborg #include <linux/jiffies.h> 24a88b5ba8SSam Ravnborg #include <linux/profile.h> 2557c8a661SMike Rapoport #include <linux/memblock.h> 264fd78a5fSDavid S. Miller #include <linux/vmalloc.h> 279960e9e8SDavid S. Miller #include <linux/ftrace.h> 28a88b5ba8SSam Ravnborg #include <linux/cpu.h> 295a0e3ad6STejun Heo #include <linux/slab.h> 30d3091298SSam Ravnborg #include <linux/kgdb.h> 31a88b5ba8SSam Ravnborg 32a88b5ba8SSam Ravnborg #include <asm/head.h> 33a88b5ba8SSam Ravnborg #include <asm/ptrace.h> 3460063497SArun Sharma #include <linux/atomic.h> 35a88b5ba8SSam Ravnborg #include <asm/tlbflush.h> 36a88b5ba8SSam Ravnborg #include <asm/mmu_context.h> 37a88b5ba8SSam Ravnborg #include <asm/cpudata.h> 38a88b5ba8SSam Ravnborg #include <asm/hvtramp.h> 39a88b5ba8SSam Ravnborg #include <asm/io.h> 40a88b5ba8SSam Ravnborg #include <asm/timer.h> 4159dec13bSSam Ravnborg #include <asm/setup.h> 42a88b5ba8SSam Ravnborg 43a88b5ba8SSam Ravnborg #include <asm/irq.h> 44a88b5ba8SSam Ravnborg #include <asm/irq_regs.h> 45a88b5ba8SSam Ravnborg #include <asm/page.h> 46a88b5ba8SSam Ravnborg #include <asm/pgtable.h> 47a88b5ba8SSam Ravnborg #include <asm/oplib.h> 487c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 49a88b5ba8SSam Ravnborg #include <asm/starfire.h> 50a88b5ba8SSam Ravnborg #include <asm/tlb.h> 51a88b5ba8SSam Ravnborg #include <asm/sections.h> 52a88b5ba8SSam Ravnborg #include <asm/prom.h> 53a88b5ba8SSam Ravnborg #include <asm/mdesc.h> 54a88b5ba8SSam Ravnborg #include <asm/ldc.h> 55a88b5ba8SSam Ravnborg #include <asm/hypervisor.h> 56b62818e5SDavid S. Miller #include <asm/pcr.h> 57a88b5ba8SSam Ravnborg 58280ff974SHong H. Pham #include "cpumap.h" 59a0c54a21SSam Ravnborg #include "kernel.h" 60280ff974SHong H. Pham 61a88b5ba8SSam Ravnborg DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; 62a88b5ba8SSam Ravnborg cpumask_t cpu_core_map[NR_CPUS] __read_mostly = 63a88b5ba8SSam Ravnborg { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; 64a88b5ba8SSam Ravnborg 65acc455cfSchris hyser cpumask_t cpu_core_sib_map[NR_CPUS] __read_mostly = { 66acc455cfSchris hyser [0 ... NR_CPUS-1] = CPU_MASK_NONE }; 67acc455cfSchris hyser 68d624716bSAtish Patra cpumask_t cpu_core_sib_cache_map[NR_CPUS] __read_mostly = { 69d624716bSAtish Patra [0 ... NR_CPUS - 1] = CPU_MASK_NONE }; 70d624716bSAtish Patra 71a88b5ba8SSam Ravnborg EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); 72a88b5ba8SSam Ravnborg EXPORT_SYMBOL(cpu_core_map); 73acc455cfSchris hyser EXPORT_SYMBOL(cpu_core_sib_map); 74d624716bSAtish Patra EXPORT_SYMBOL(cpu_core_sib_cache_map); 75a88b5ba8SSam Ravnborg 76a88b5ba8SSam Ravnborg static cpumask_t smp_commenced_mask; 77a88b5ba8SSam Ravnborg 788536e02eSVijay Kumar static DEFINE_PER_CPU(bool, poke); 798536e02eSVijay Kumar static bool cpu_poke; 808536e02eSVijay Kumar 81a88b5ba8SSam Ravnborg void smp_info(struct seq_file *m) 82a88b5ba8SSam Ravnborg { 83a88b5ba8SSam Ravnborg int i; 84a88b5ba8SSam Ravnborg 85a88b5ba8SSam Ravnborg seq_printf(m, "State:\n"); 86a88b5ba8SSam Ravnborg for_each_online_cpu(i) 87a88b5ba8SSam Ravnborg seq_printf(m, "CPU%d:\t\tonline\n", i); 88a88b5ba8SSam Ravnborg } 89a88b5ba8SSam Ravnborg 90a88b5ba8SSam Ravnborg void smp_bogo(struct seq_file *m) 91a88b5ba8SSam Ravnborg { 92a88b5ba8SSam Ravnborg int i; 93a88b5ba8SSam Ravnborg 94a88b5ba8SSam Ravnborg for_each_online_cpu(i) 95a88b5ba8SSam Ravnborg seq_printf(m, 96a88b5ba8SSam Ravnborg "Cpu%dClkTck\t: %016lx\n", 97a88b5ba8SSam Ravnborg i, cpu_data(i).clock_tick); 98a88b5ba8SSam Ravnborg } 99a88b5ba8SSam Ravnborg 100a88b5ba8SSam Ravnborg extern void setup_sparc64_timer(void); 101a88b5ba8SSam Ravnborg 102a88b5ba8SSam Ravnborg static volatile unsigned long callin_flag = 0; 103a88b5ba8SSam Ravnborg 1042066aaddSPaul Gortmaker void smp_callin(void) 105a88b5ba8SSam Ravnborg { 106a88b5ba8SSam Ravnborg int cpuid = hard_smp_processor_id(); 107a88b5ba8SSam Ravnborg 108a88b5ba8SSam Ravnborg __local_per_cpu_offset = __per_cpu_offset(cpuid); 109a88b5ba8SSam Ravnborg 110a88b5ba8SSam Ravnborg if (tlb_type == hypervisor) 111a88b5ba8SSam Ravnborg sun4v_ktsb_register(); 112a88b5ba8SSam Ravnborg 113a88b5ba8SSam Ravnborg __flush_tlb_all(); 114a88b5ba8SSam Ravnborg 115a88b5ba8SSam Ravnborg setup_sparc64_timer(); 116a88b5ba8SSam Ravnborg 117a88b5ba8SSam Ravnborg if (cheetah_pcache_forced_on) 118a88b5ba8SSam Ravnborg cheetah_enable_pcache(); 119a88b5ba8SSam Ravnborg 120a88b5ba8SSam Ravnborg callin_flag = 1; 121a88b5ba8SSam Ravnborg __asm__ __volatile__("membar #Sync\n\t" 122a88b5ba8SSam Ravnborg "flush %%g6" : : : "memory"); 123a88b5ba8SSam Ravnborg 124a88b5ba8SSam Ravnborg /* Clear this or we will die instantly when we 125a88b5ba8SSam Ravnborg * schedule back to this idler... 126a88b5ba8SSam Ravnborg */ 127a88b5ba8SSam Ravnborg current_thread_info()->new_child = 0; 128a88b5ba8SSam Ravnborg 129a88b5ba8SSam Ravnborg /* Attach to the address space of init_task. */ 130f1f10076SVegard Nossum mmgrab(&init_mm); 131a88b5ba8SSam Ravnborg current->active_mm = &init_mm; 132a88b5ba8SSam Ravnborg 133a88b5ba8SSam Ravnborg /* inform the notifiers about the new cpu */ 134a88b5ba8SSam Ravnborg notify_cpu_starting(cpuid); 135a88b5ba8SSam Ravnborg 136fb1fece5SKOSAKI Motohiro while (!cpumask_test_cpu(cpuid, &smp_commenced_mask)) 137a88b5ba8SSam Ravnborg rmb(); 138a88b5ba8SSam Ravnborg 139fb1fece5SKOSAKI Motohiro set_cpu_online(cpuid, true); 140a88b5ba8SSam Ravnborg 141a88b5ba8SSam Ravnborg /* idle thread is expected to have preempt disabled */ 142a88b5ba8SSam Ravnborg preempt_disable(); 14387fa05aeSSam Ravnborg 144ce2521bfSKirill Tkhai local_irq_enable(); 145ce2521bfSKirill Tkhai 146fc6d73d6SThomas Gleixner cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); 147a88b5ba8SSam Ravnborg } 148a88b5ba8SSam Ravnborg 149a88b5ba8SSam Ravnborg void cpu_panic(void) 150a88b5ba8SSam Ravnborg { 151a88b5ba8SSam Ravnborg printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id()); 152a88b5ba8SSam Ravnborg panic("SMP bolixed\n"); 153a88b5ba8SSam Ravnborg } 154a88b5ba8SSam Ravnborg 155a88b5ba8SSam Ravnborg /* This tick register synchronization scheme is taken entirely from 156a88b5ba8SSam Ravnborg * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit. 157a88b5ba8SSam Ravnborg * 158a88b5ba8SSam Ravnborg * The only change I've made is to rework it so that the master 159a88b5ba8SSam Ravnborg * initiates the synchonization instead of the slave. -DaveM 160a88b5ba8SSam Ravnborg */ 161a88b5ba8SSam Ravnborg 162a88b5ba8SSam Ravnborg #define MASTER 0 163a88b5ba8SSam Ravnborg #define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long)) 164a88b5ba8SSam Ravnborg 165a88b5ba8SSam Ravnborg #define NUM_ROUNDS 64 /* magic value */ 166a88b5ba8SSam Ravnborg #define NUM_ITERS 5 /* likewise */ 167a88b5ba8SSam Ravnborg 16849b6c01fSKirill Tkhai static DEFINE_RAW_SPINLOCK(itc_sync_lock); 169a88b5ba8SSam Ravnborg static unsigned long go[SLAVE + 1]; 170a88b5ba8SSam Ravnborg 171a88b5ba8SSam Ravnborg #define DEBUG_TICK_SYNC 0 172a88b5ba8SSam Ravnborg 173a88b5ba8SSam Ravnborg static inline long get_delta (long *rt, long *master) 174a88b5ba8SSam Ravnborg { 175a88b5ba8SSam Ravnborg unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0; 176a88b5ba8SSam Ravnborg unsigned long tcenter, t0, t1, tm; 177a88b5ba8SSam Ravnborg unsigned long i; 178a88b5ba8SSam Ravnborg 179a88b5ba8SSam Ravnborg for (i = 0; i < NUM_ITERS; i++) { 180a88b5ba8SSam Ravnborg t0 = tick_ops->get_tick(); 181a88b5ba8SSam Ravnborg go[MASTER] = 1; 182a88b5ba8SSam Ravnborg membar_safe("#StoreLoad"); 183a88b5ba8SSam Ravnborg while (!(tm = go[SLAVE])) 184a88b5ba8SSam Ravnborg rmb(); 185a88b5ba8SSam Ravnborg go[SLAVE] = 0; 186a88b5ba8SSam Ravnborg wmb(); 187a88b5ba8SSam Ravnborg t1 = tick_ops->get_tick(); 188a88b5ba8SSam Ravnborg 189a88b5ba8SSam Ravnborg if (t1 - t0 < best_t1 - best_t0) 190a88b5ba8SSam Ravnborg best_t0 = t0, best_t1 = t1, best_tm = tm; 191a88b5ba8SSam Ravnborg } 192a88b5ba8SSam Ravnborg 193a88b5ba8SSam Ravnborg *rt = best_t1 - best_t0; 194a88b5ba8SSam Ravnborg *master = best_tm - best_t0; 195a88b5ba8SSam Ravnborg 196a88b5ba8SSam Ravnborg /* average best_t0 and best_t1 without overflow: */ 197a88b5ba8SSam Ravnborg tcenter = (best_t0/2 + best_t1/2); 198a88b5ba8SSam Ravnborg if (best_t0 % 2 + best_t1 % 2 == 2) 199a88b5ba8SSam Ravnborg tcenter++; 200a88b5ba8SSam Ravnborg return tcenter - best_tm; 201a88b5ba8SSam Ravnborg } 202a88b5ba8SSam Ravnborg 203a88b5ba8SSam Ravnborg void smp_synchronize_tick_client(void) 204a88b5ba8SSam Ravnborg { 205a88b5ba8SSam Ravnborg long i, delta, adj, adjust_latency = 0, done = 0; 206c6fee081SDavid S. Miller unsigned long flags, rt, master_time_stamp; 207a88b5ba8SSam Ravnborg #if DEBUG_TICK_SYNC 208a88b5ba8SSam Ravnborg struct { 209a88b5ba8SSam Ravnborg long rt; /* roundtrip time */ 210a88b5ba8SSam Ravnborg long master; /* master's timestamp */ 211a88b5ba8SSam Ravnborg long diff; /* difference between midpoint and master's timestamp */ 212a88b5ba8SSam Ravnborg long lat; /* estimate of itc adjustment latency */ 213a88b5ba8SSam Ravnborg } t[NUM_ROUNDS]; 214a88b5ba8SSam Ravnborg #endif 215a88b5ba8SSam Ravnborg 216a88b5ba8SSam Ravnborg go[MASTER] = 1; 217a88b5ba8SSam Ravnborg 218a88b5ba8SSam Ravnborg while (go[MASTER]) 219a88b5ba8SSam Ravnborg rmb(); 220a88b5ba8SSam Ravnborg 221a88b5ba8SSam Ravnborg local_irq_save(flags); 222a88b5ba8SSam Ravnborg { 223a88b5ba8SSam Ravnborg for (i = 0; i < NUM_ROUNDS; i++) { 224a88b5ba8SSam Ravnborg delta = get_delta(&rt, &master_time_stamp); 225c6fee081SDavid S. Miller if (delta == 0) 226a88b5ba8SSam Ravnborg done = 1; /* let's lock on to this... */ 227a88b5ba8SSam Ravnborg 228a88b5ba8SSam Ravnborg if (!done) { 229a88b5ba8SSam Ravnborg if (i > 0) { 230a88b5ba8SSam Ravnborg adjust_latency += -delta; 231a88b5ba8SSam Ravnborg adj = -delta + adjust_latency/4; 232a88b5ba8SSam Ravnborg } else 233a88b5ba8SSam Ravnborg adj = -delta; 234a88b5ba8SSam Ravnborg 235a88b5ba8SSam Ravnborg tick_ops->add_tick(adj); 236a88b5ba8SSam Ravnborg } 237a88b5ba8SSam Ravnborg #if DEBUG_TICK_SYNC 238a88b5ba8SSam Ravnborg t[i].rt = rt; 239a88b5ba8SSam Ravnborg t[i].master = master_time_stamp; 240a88b5ba8SSam Ravnborg t[i].diff = delta; 241a88b5ba8SSam Ravnborg t[i].lat = adjust_latency/4; 242a88b5ba8SSam Ravnborg #endif 243a88b5ba8SSam Ravnborg } 244a88b5ba8SSam Ravnborg } 245a88b5ba8SSam Ravnborg local_irq_restore(flags); 246a88b5ba8SSam Ravnborg 247a88b5ba8SSam Ravnborg #if DEBUG_TICK_SYNC 248a88b5ba8SSam Ravnborg for (i = 0; i < NUM_ROUNDS; i++) 249a88b5ba8SSam Ravnborg printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n", 250a88b5ba8SSam Ravnborg t[i].rt, t[i].master, t[i].diff, t[i].lat); 251a88b5ba8SSam Ravnborg #endif 252a88b5ba8SSam Ravnborg 253a88b5ba8SSam Ravnborg printk(KERN_INFO "CPU %d: synchronized TICK with master CPU " 254a88b5ba8SSam Ravnborg "(last diff %ld cycles, maxerr %lu cycles)\n", 255a88b5ba8SSam Ravnborg smp_processor_id(), delta, rt); 256a88b5ba8SSam Ravnborg } 257a88b5ba8SSam Ravnborg 258a88b5ba8SSam Ravnborg static void smp_start_sync_tick_client(int cpu); 259a88b5ba8SSam Ravnborg 260a88b5ba8SSam Ravnborg static void smp_synchronize_one_tick(int cpu) 261a88b5ba8SSam Ravnborg { 262a88b5ba8SSam Ravnborg unsigned long flags, i; 263a88b5ba8SSam Ravnborg 264a88b5ba8SSam Ravnborg go[MASTER] = 0; 265a88b5ba8SSam Ravnborg 266a88b5ba8SSam Ravnborg smp_start_sync_tick_client(cpu); 267a88b5ba8SSam Ravnborg 268a88b5ba8SSam Ravnborg /* wait for client to be ready */ 269a88b5ba8SSam Ravnborg while (!go[MASTER]) 270a88b5ba8SSam Ravnborg rmb(); 271a88b5ba8SSam Ravnborg 272a88b5ba8SSam Ravnborg /* now let the client proceed into his loop */ 273a88b5ba8SSam Ravnborg go[MASTER] = 0; 274a88b5ba8SSam Ravnborg membar_safe("#StoreLoad"); 275a88b5ba8SSam Ravnborg 27649b6c01fSKirill Tkhai raw_spin_lock_irqsave(&itc_sync_lock, flags); 277a88b5ba8SSam Ravnborg { 278a88b5ba8SSam Ravnborg for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) { 279a88b5ba8SSam Ravnborg while (!go[MASTER]) 280a88b5ba8SSam Ravnborg rmb(); 281a88b5ba8SSam Ravnborg go[MASTER] = 0; 282a88b5ba8SSam Ravnborg wmb(); 283a88b5ba8SSam Ravnborg go[SLAVE] = tick_ops->get_tick(); 284a88b5ba8SSam Ravnborg membar_safe("#StoreLoad"); 285a88b5ba8SSam Ravnborg } 286a88b5ba8SSam Ravnborg } 28749b6c01fSKirill Tkhai raw_spin_unlock_irqrestore(&itc_sync_lock, flags); 288a88b5ba8SSam Ravnborg } 289a88b5ba8SSam Ravnborg 290a88b5ba8SSam Ravnborg #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU) 2912066aaddSPaul Gortmaker static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg, 2922066aaddSPaul Gortmaker void **descrp) 293a88b5ba8SSam Ravnborg { 294a88b5ba8SSam Ravnborg extern unsigned long sparc64_ttable_tl0; 295a88b5ba8SSam Ravnborg extern unsigned long kern_locked_tte_data; 296a88b5ba8SSam Ravnborg struct hvtramp_descr *hdesc; 297a88b5ba8SSam Ravnborg unsigned long trampoline_ra; 298a88b5ba8SSam Ravnborg struct trap_per_cpu *tb; 299a88b5ba8SSam Ravnborg u64 tte_vaddr, tte_data; 300a88b5ba8SSam Ravnborg unsigned long hv_err; 301a88b5ba8SSam Ravnborg int i; 302a88b5ba8SSam Ravnborg 303a88b5ba8SSam Ravnborg hdesc = kzalloc(sizeof(*hdesc) + 304a88b5ba8SSam Ravnborg (sizeof(struct hvtramp_mapping) * 305a88b5ba8SSam Ravnborg num_kernel_image_mappings - 1), 306a88b5ba8SSam Ravnborg GFP_KERNEL); 307a88b5ba8SSam Ravnborg if (!hdesc) { 308a88b5ba8SSam Ravnborg printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate " 309a88b5ba8SSam Ravnborg "hvtramp_descr.\n"); 310a88b5ba8SSam Ravnborg return; 311a88b5ba8SSam Ravnborg } 312557fe0e8SDavid S. Miller *descrp = hdesc; 313a88b5ba8SSam Ravnborg 314a88b5ba8SSam Ravnborg hdesc->cpu = cpu; 315a88b5ba8SSam Ravnborg hdesc->num_mappings = num_kernel_image_mappings; 316a88b5ba8SSam Ravnborg 317a88b5ba8SSam Ravnborg tb = &trap_block[cpu]; 318a88b5ba8SSam Ravnborg 319a88b5ba8SSam Ravnborg hdesc->fault_info_va = (unsigned long) &tb->fault_info; 320a88b5ba8SSam Ravnborg hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info); 321a88b5ba8SSam Ravnborg 322a88b5ba8SSam Ravnborg hdesc->thread_reg = thread_reg; 323a88b5ba8SSam Ravnborg 324a88b5ba8SSam Ravnborg tte_vaddr = (unsigned long) KERNBASE; 325a88b5ba8SSam Ravnborg tte_data = kern_locked_tte_data; 326a88b5ba8SSam Ravnborg 327a88b5ba8SSam Ravnborg for (i = 0; i < hdesc->num_mappings; i++) { 328a88b5ba8SSam Ravnborg hdesc->maps[i].vaddr = tte_vaddr; 329a88b5ba8SSam Ravnborg hdesc->maps[i].tte = tte_data; 330a88b5ba8SSam Ravnborg tte_vaddr += 0x400000; 331a88b5ba8SSam Ravnborg tte_data += 0x400000; 332a88b5ba8SSam Ravnborg } 333a88b5ba8SSam Ravnborg 334a88b5ba8SSam Ravnborg trampoline_ra = kimage_addr_to_ra(hv_cpu_startup); 335a88b5ba8SSam Ravnborg 336a88b5ba8SSam Ravnborg hv_err = sun4v_cpu_start(cpu, trampoline_ra, 337a88b5ba8SSam Ravnborg kimage_addr_to_ra(&sparc64_ttable_tl0), 338a88b5ba8SSam Ravnborg __pa(hdesc)); 339a88b5ba8SSam Ravnborg if (hv_err) 340a88b5ba8SSam Ravnborg printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() " 341a88b5ba8SSam Ravnborg "gives error %lu\n", hv_err); 342a88b5ba8SSam Ravnborg } 343a88b5ba8SSam Ravnborg #endif 344a88b5ba8SSam Ravnborg 345a88b5ba8SSam Ravnborg extern unsigned long sparc64_cpu_startup; 346a88b5ba8SSam Ravnborg 347a88b5ba8SSam Ravnborg /* The OBP cpu startup callback truncates the 3rd arg cookie to 348a88b5ba8SSam Ravnborg * 32-bits (I think) so to be safe we have it read the pointer 349a88b5ba8SSam Ravnborg * contained here so we work on >4GB machines. -DaveM 350a88b5ba8SSam Ravnborg */ 351a88b5ba8SSam Ravnborg static struct thread_info *cpu_new_thread = NULL; 352a88b5ba8SSam Ravnborg 3532066aaddSPaul Gortmaker static int smp_boot_one_cpu(unsigned int cpu, struct task_struct *idle) 354a88b5ba8SSam Ravnborg { 355a88b5ba8SSam Ravnborg unsigned long entry = 356a88b5ba8SSam Ravnborg (unsigned long)(&sparc64_cpu_startup); 357a88b5ba8SSam Ravnborg unsigned long cookie = 358a88b5ba8SSam Ravnborg (unsigned long)(&cpu_new_thread); 359557fe0e8SDavid S. Miller void *descr = NULL; 360a88b5ba8SSam Ravnborg int timeout, ret; 361a88b5ba8SSam Ravnborg 362a88b5ba8SSam Ravnborg callin_flag = 0; 363f0a2bc7eSThomas Gleixner cpu_new_thread = task_thread_info(idle); 364a88b5ba8SSam Ravnborg 365a88b5ba8SSam Ravnborg if (tlb_type == hypervisor) { 366a88b5ba8SSam Ravnborg #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU) 367a88b5ba8SSam Ravnborg if (ldom_domaining_enabled) 368a88b5ba8SSam Ravnborg ldom_startcpu_cpuid(cpu, 369557fe0e8SDavid S. Miller (unsigned long) cpu_new_thread, 370557fe0e8SDavid S. Miller &descr); 371a88b5ba8SSam Ravnborg else 372a88b5ba8SSam Ravnborg #endif 373a88b5ba8SSam Ravnborg prom_startcpu_cpuid(cpu, entry, cookie); 374a88b5ba8SSam Ravnborg } else { 375a88b5ba8SSam Ravnborg struct device_node *dp = of_find_node_by_cpuid(cpu); 376a88b5ba8SSam Ravnborg 3776016a363SGrant Likely prom_startcpu(dp->phandle, entry, cookie); 378a88b5ba8SSam Ravnborg } 379a88b5ba8SSam Ravnborg 380a88b5ba8SSam Ravnborg for (timeout = 0; timeout < 50000; timeout++) { 381a88b5ba8SSam Ravnborg if (callin_flag) 382a88b5ba8SSam Ravnborg break; 383a88b5ba8SSam Ravnborg udelay(100); 384a88b5ba8SSam Ravnborg } 385a88b5ba8SSam Ravnborg 386a88b5ba8SSam Ravnborg if (callin_flag) { 387a88b5ba8SSam Ravnborg ret = 0; 388a88b5ba8SSam Ravnborg } else { 389a88b5ba8SSam Ravnborg printk("Processor %d is stuck.\n", cpu); 390a88b5ba8SSam Ravnborg ret = -ENODEV; 391a88b5ba8SSam Ravnborg } 392a88b5ba8SSam Ravnborg cpu_new_thread = NULL; 393a88b5ba8SSam Ravnborg 394557fe0e8SDavid S. Miller kfree(descr); 395a88b5ba8SSam Ravnborg 396a88b5ba8SSam Ravnborg return ret; 397a88b5ba8SSam Ravnborg } 398a88b5ba8SSam Ravnborg 399a88b5ba8SSam Ravnborg static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu) 400a88b5ba8SSam Ravnborg { 401a88b5ba8SSam Ravnborg u64 result, target; 402a88b5ba8SSam Ravnborg int stuck, tmp; 403a88b5ba8SSam Ravnborg 404a88b5ba8SSam Ravnborg if (this_is_starfire) { 405a88b5ba8SSam Ravnborg /* map to real upaid */ 406a88b5ba8SSam Ravnborg cpu = (((cpu & 0x3c) << 1) | 407a88b5ba8SSam Ravnborg ((cpu & 0x40) >> 4) | 408a88b5ba8SSam Ravnborg (cpu & 0x3)); 409a88b5ba8SSam Ravnborg } 410a88b5ba8SSam Ravnborg 411a88b5ba8SSam Ravnborg target = (cpu << 14) | 0x70; 412a88b5ba8SSam Ravnborg again: 413a88b5ba8SSam Ravnborg /* Ok, this is the real Spitfire Errata #54. 414a88b5ba8SSam Ravnborg * One must read back from a UDB internal register 415a88b5ba8SSam Ravnborg * after writes to the UDB interrupt dispatch, but 416a88b5ba8SSam Ravnborg * before the membar Sync for that write. 417a88b5ba8SSam Ravnborg * So we use the high UDB control register (ASI 0x7f, 418a88b5ba8SSam Ravnborg * ADDR 0x20) for the dummy read. -DaveM 419a88b5ba8SSam Ravnborg */ 420a88b5ba8SSam Ravnborg tmp = 0x40; 421a88b5ba8SSam Ravnborg __asm__ __volatile__( 422a88b5ba8SSam Ravnborg "wrpr %1, %2, %%pstate\n\t" 423a88b5ba8SSam Ravnborg "stxa %4, [%0] %3\n\t" 424a88b5ba8SSam Ravnborg "stxa %5, [%0+%8] %3\n\t" 425a88b5ba8SSam Ravnborg "add %0, %8, %0\n\t" 426a88b5ba8SSam Ravnborg "stxa %6, [%0+%8] %3\n\t" 427a88b5ba8SSam Ravnborg "membar #Sync\n\t" 428a88b5ba8SSam Ravnborg "stxa %%g0, [%7] %3\n\t" 429a88b5ba8SSam Ravnborg "membar #Sync\n\t" 430a88b5ba8SSam Ravnborg "mov 0x20, %%g1\n\t" 431a88b5ba8SSam Ravnborg "ldxa [%%g1] 0x7f, %%g0\n\t" 432a88b5ba8SSam Ravnborg "membar #Sync" 433a88b5ba8SSam Ravnborg : "=r" (tmp) 434a88b5ba8SSam Ravnborg : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W), 435a88b5ba8SSam Ravnborg "r" (data0), "r" (data1), "r" (data2), "r" (target), 436a88b5ba8SSam Ravnborg "r" (0x10), "0" (tmp) 437a88b5ba8SSam Ravnborg : "g1"); 438a88b5ba8SSam Ravnborg 439a88b5ba8SSam Ravnborg /* NOTE: PSTATE_IE is still clear. */ 440a88b5ba8SSam Ravnborg stuck = 100000; 441a88b5ba8SSam Ravnborg do { 442a88b5ba8SSam Ravnborg __asm__ __volatile__("ldxa [%%g0] %1, %0" 443a88b5ba8SSam Ravnborg : "=r" (result) 444a88b5ba8SSam Ravnborg : "i" (ASI_INTR_DISPATCH_STAT)); 445a88b5ba8SSam Ravnborg if (result == 0) { 446a88b5ba8SSam Ravnborg __asm__ __volatile__("wrpr %0, 0x0, %%pstate" 447a88b5ba8SSam Ravnborg : : "r" (pstate)); 448a88b5ba8SSam Ravnborg return; 449a88b5ba8SSam Ravnborg } 450a88b5ba8SSam Ravnborg stuck -= 1; 451a88b5ba8SSam Ravnborg if (stuck == 0) 452a88b5ba8SSam Ravnborg break; 453a88b5ba8SSam Ravnborg } while (result & 0x1); 454a88b5ba8SSam Ravnborg __asm__ __volatile__("wrpr %0, 0x0, %%pstate" 455a88b5ba8SSam Ravnborg : : "r" (pstate)); 456a88b5ba8SSam Ravnborg if (stuck == 0) { 45790181136SSam Ravnborg printk("CPU[%d]: mondo stuckage result[%016llx]\n", 458a88b5ba8SSam Ravnborg smp_processor_id(), result); 459a88b5ba8SSam Ravnborg } else { 460a88b5ba8SSam Ravnborg udelay(2); 461a88b5ba8SSam Ravnborg goto again; 462a88b5ba8SSam Ravnborg } 463a88b5ba8SSam Ravnborg } 464a88b5ba8SSam Ravnborg 465a88b5ba8SSam Ravnborg static void spitfire_xcall_deliver(struct trap_per_cpu *tb, int cnt) 466a88b5ba8SSam Ravnborg { 467a88b5ba8SSam Ravnborg u64 *mondo, data0, data1, data2; 468a88b5ba8SSam Ravnborg u16 *cpu_list; 469a88b5ba8SSam Ravnborg u64 pstate; 470a88b5ba8SSam Ravnborg int i; 471a88b5ba8SSam Ravnborg 472a88b5ba8SSam Ravnborg __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); 473a88b5ba8SSam Ravnborg cpu_list = __va(tb->cpu_list_pa); 474a88b5ba8SSam Ravnborg mondo = __va(tb->cpu_mondo_block_pa); 475a88b5ba8SSam Ravnborg data0 = mondo[0]; 476a88b5ba8SSam Ravnborg data1 = mondo[1]; 477a88b5ba8SSam Ravnborg data2 = mondo[2]; 478a88b5ba8SSam Ravnborg for (i = 0; i < cnt; i++) 479a88b5ba8SSam Ravnborg spitfire_xcall_helper(data0, data1, data2, pstate, cpu_list[i]); 480a88b5ba8SSam Ravnborg } 481a88b5ba8SSam Ravnborg 482a88b5ba8SSam Ravnborg /* Cheetah now allows to send the whole 64-bytes of data in the interrupt 483a88b5ba8SSam Ravnborg * packet, but we have no use for that. However we do take advantage of 484a88b5ba8SSam Ravnborg * the new pipelining feature (ie. dispatch to multiple cpus simultaneously). 485a88b5ba8SSam Ravnborg */ 486a88b5ba8SSam Ravnborg static void cheetah_xcall_deliver(struct trap_per_cpu *tb, int cnt) 487a88b5ba8SSam Ravnborg { 488a88b5ba8SSam Ravnborg int nack_busy_id, is_jbus, need_more; 489a88b5ba8SSam Ravnborg u64 *mondo, pstate, ver, busy_mask; 490a88b5ba8SSam Ravnborg u16 *cpu_list; 491a88b5ba8SSam Ravnborg 492a88b5ba8SSam Ravnborg cpu_list = __va(tb->cpu_list_pa); 493a88b5ba8SSam Ravnborg mondo = __va(tb->cpu_mondo_block_pa); 494a88b5ba8SSam Ravnborg 495a88b5ba8SSam Ravnborg /* Unfortunately, someone at Sun had the brilliant idea to make the 496a88b5ba8SSam Ravnborg * busy/nack fields hard-coded by ITID number for this Ultra-III 497a88b5ba8SSam Ravnborg * derivative processor. 498a88b5ba8SSam Ravnborg */ 499a88b5ba8SSam Ravnborg __asm__ ("rdpr %%ver, %0" : "=r" (ver)); 500a88b5ba8SSam Ravnborg is_jbus = ((ver >> 32) == __JALAPENO_ID || 501a88b5ba8SSam Ravnborg (ver >> 32) == __SERRANO_ID); 502a88b5ba8SSam Ravnborg 503a88b5ba8SSam Ravnborg __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); 504a88b5ba8SSam Ravnborg 505a88b5ba8SSam Ravnborg retry: 506a88b5ba8SSam Ravnborg need_more = 0; 507a88b5ba8SSam Ravnborg __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t" 508a88b5ba8SSam Ravnborg : : "r" (pstate), "i" (PSTATE_IE)); 509a88b5ba8SSam Ravnborg 510a88b5ba8SSam Ravnborg /* Setup the dispatch data registers. */ 511a88b5ba8SSam Ravnborg __asm__ __volatile__("stxa %0, [%3] %6\n\t" 512a88b5ba8SSam Ravnborg "stxa %1, [%4] %6\n\t" 513a88b5ba8SSam Ravnborg "stxa %2, [%5] %6\n\t" 514a88b5ba8SSam Ravnborg "membar #Sync\n\t" 515a88b5ba8SSam Ravnborg : /* no outputs */ 516a88b5ba8SSam Ravnborg : "r" (mondo[0]), "r" (mondo[1]), "r" (mondo[2]), 517a88b5ba8SSam Ravnborg "r" (0x40), "r" (0x50), "r" (0x60), 518a88b5ba8SSam Ravnborg "i" (ASI_INTR_W)); 519a88b5ba8SSam Ravnborg 520a88b5ba8SSam Ravnborg nack_busy_id = 0; 521a88b5ba8SSam Ravnborg busy_mask = 0; 522a88b5ba8SSam Ravnborg { 523a88b5ba8SSam Ravnborg int i; 524a88b5ba8SSam Ravnborg 525a88b5ba8SSam Ravnborg for (i = 0; i < cnt; i++) { 526a88b5ba8SSam Ravnborg u64 target, nr; 527a88b5ba8SSam Ravnborg 528a88b5ba8SSam Ravnborg nr = cpu_list[i]; 529a88b5ba8SSam Ravnborg if (nr == 0xffff) 530a88b5ba8SSam Ravnborg continue; 531a88b5ba8SSam Ravnborg 532a88b5ba8SSam Ravnborg target = (nr << 14) | 0x70; 533a88b5ba8SSam Ravnborg if (is_jbus) { 534a88b5ba8SSam Ravnborg busy_mask |= (0x1UL << (nr * 2)); 535a88b5ba8SSam Ravnborg } else { 536a88b5ba8SSam Ravnborg target |= (nack_busy_id << 24); 537a88b5ba8SSam Ravnborg busy_mask |= (0x1UL << 538a88b5ba8SSam Ravnborg (nack_busy_id * 2)); 539a88b5ba8SSam Ravnborg } 540a88b5ba8SSam Ravnborg __asm__ __volatile__( 541a88b5ba8SSam Ravnborg "stxa %%g0, [%0] %1\n\t" 542a88b5ba8SSam Ravnborg "membar #Sync\n\t" 543a88b5ba8SSam Ravnborg : /* no outputs */ 544a88b5ba8SSam Ravnborg : "r" (target), "i" (ASI_INTR_W)); 545a88b5ba8SSam Ravnborg nack_busy_id++; 546a88b5ba8SSam Ravnborg if (nack_busy_id == 32) { 547a88b5ba8SSam Ravnborg need_more = 1; 548a88b5ba8SSam Ravnborg break; 549a88b5ba8SSam Ravnborg } 550a88b5ba8SSam Ravnborg } 551a88b5ba8SSam Ravnborg } 552a88b5ba8SSam Ravnborg 553a88b5ba8SSam Ravnborg /* Now, poll for completion. */ 554a88b5ba8SSam Ravnborg { 555a88b5ba8SSam Ravnborg u64 dispatch_stat, nack_mask; 556a88b5ba8SSam Ravnborg long stuck; 557a88b5ba8SSam Ravnborg 558a88b5ba8SSam Ravnborg stuck = 100000 * nack_busy_id; 559a88b5ba8SSam Ravnborg nack_mask = busy_mask << 1; 560a88b5ba8SSam Ravnborg do { 561a88b5ba8SSam Ravnborg __asm__ __volatile__("ldxa [%%g0] %1, %0" 562a88b5ba8SSam Ravnborg : "=r" (dispatch_stat) 563a88b5ba8SSam Ravnborg : "i" (ASI_INTR_DISPATCH_STAT)); 564a88b5ba8SSam Ravnborg if (!(dispatch_stat & (busy_mask | nack_mask))) { 565a88b5ba8SSam Ravnborg __asm__ __volatile__("wrpr %0, 0x0, %%pstate" 566a88b5ba8SSam Ravnborg : : "r" (pstate)); 567a88b5ba8SSam Ravnborg if (unlikely(need_more)) { 568a88b5ba8SSam Ravnborg int i, this_cnt = 0; 569a88b5ba8SSam Ravnborg for (i = 0; i < cnt; i++) { 570a88b5ba8SSam Ravnborg if (cpu_list[i] == 0xffff) 571a88b5ba8SSam Ravnborg continue; 572a88b5ba8SSam Ravnborg cpu_list[i] = 0xffff; 573a88b5ba8SSam Ravnborg this_cnt++; 574a88b5ba8SSam Ravnborg if (this_cnt == 32) 575a88b5ba8SSam Ravnborg break; 576a88b5ba8SSam Ravnborg } 577a88b5ba8SSam Ravnborg goto retry; 578a88b5ba8SSam Ravnborg } 579a88b5ba8SSam Ravnborg return; 580a88b5ba8SSam Ravnborg } 581a88b5ba8SSam Ravnborg if (!--stuck) 582a88b5ba8SSam Ravnborg break; 583a88b5ba8SSam Ravnborg } while (dispatch_stat & busy_mask); 584a88b5ba8SSam Ravnborg 585a88b5ba8SSam Ravnborg __asm__ __volatile__("wrpr %0, 0x0, %%pstate" 586a88b5ba8SSam Ravnborg : : "r" (pstate)); 587a88b5ba8SSam Ravnborg 588a88b5ba8SSam Ravnborg if (dispatch_stat & busy_mask) { 589a88b5ba8SSam Ravnborg /* Busy bits will not clear, continue instead 590a88b5ba8SSam Ravnborg * of freezing up on this cpu. 591a88b5ba8SSam Ravnborg */ 59290181136SSam Ravnborg printk("CPU[%d]: mondo stuckage result[%016llx]\n", 593a88b5ba8SSam Ravnborg smp_processor_id(), dispatch_stat); 594a88b5ba8SSam Ravnborg } else { 595a88b5ba8SSam Ravnborg int i, this_busy_nack = 0; 596a88b5ba8SSam Ravnborg 597a88b5ba8SSam Ravnborg /* Delay some random time with interrupts enabled 598a88b5ba8SSam Ravnborg * to prevent deadlock. 599a88b5ba8SSam Ravnborg */ 600a88b5ba8SSam Ravnborg udelay(2 * nack_busy_id); 601a88b5ba8SSam Ravnborg 602a88b5ba8SSam Ravnborg /* Clear out the mask bits for cpus which did not 603a88b5ba8SSam Ravnborg * NACK us. 604a88b5ba8SSam Ravnborg */ 605a88b5ba8SSam Ravnborg for (i = 0; i < cnt; i++) { 606a88b5ba8SSam Ravnborg u64 check_mask, nr; 607a88b5ba8SSam Ravnborg 608a88b5ba8SSam Ravnborg nr = cpu_list[i]; 609a88b5ba8SSam Ravnborg if (nr == 0xffff) 610a88b5ba8SSam Ravnborg continue; 611a88b5ba8SSam Ravnborg 612a88b5ba8SSam Ravnborg if (is_jbus) 613a88b5ba8SSam Ravnborg check_mask = (0x2UL << (2*nr)); 614a88b5ba8SSam Ravnborg else 615a88b5ba8SSam Ravnborg check_mask = (0x2UL << 616a88b5ba8SSam Ravnborg this_busy_nack); 617a88b5ba8SSam Ravnborg if ((dispatch_stat & check_mask) == 0) 618a88b5ba8SSam Ravnborg cpu_list[i] = 0xffff; 619a88b5ba8SSam Ravnborg this_busy_nack += 2; 620a88b5ba8SSam Ravnborg if (this_busy_nack == 64) 621a88b5ba8SSam Ravnborg break; 622a88b5ba8SSam Ravnborg } 623a88b5ba8SSam Ravnborg 624a88b5ba8SSam Ravnborg goto retry; 625a88b5ba8SSam Ravnborg } 626a88b5ba8SSam Ravnborg } 627a88b5ba8SSam Ravnborg } 628a88b5ba8SSam Ravnborg 6299d53caecSJane Chu #define CPU_MONDO_COUNTER(cpuid) (cpu_mondo_counter[cpuid]) 6309d53caecSJane Chu #define MONDO_USEC_WAIT_MIN 2 6319d53caecSJane Chu #define MONDO_USEC_WAIT_MAX 100 6329d53caecSJane Chu #define MONDO_RETRY_LIMIT 500000 6339d53caecSJane Chu 6349d53caecSJane Chu /* Multi-cpu list version. 6359d53caecSJane Chu * 6369d53caecSJane Chu * Deliver xcalls to 'cnt' number of cpus in 'cpu_list'. 6379d53caecSJane Chu * Sometimes not all cpus receive the mondo, requiring us to re-send 6389d53caecSJane Chu * the mondo until all cpus have received, or cpus are truly stuck 6399d53caecSJane Chu * unable to receive mondo, and we timeout. 6409d53caecSJane Chu * Occasionally a target cpu strand is borrowed briefly by hypervisor to 6419d53caecSJane Chu * perform guest service, such as PCIe error handling. Consider the 6429d53caecSJane Chu * service time, 1 second overall wait is reasonable for 1 cpu. 6439d53caecSJane Chu * Here two in-between mondo check wait time are defined: 2 usec for 6449d53caecSJane Chu * single cpu quick turn around and up to 100usec for large cpu count. 6459d53caecSJane Chu * Deliver mondo to large number of cpus could take longer, we adjusts 6469d53caecSJane Chu * the retry count as long as target cpus are making forward progress. 6479d53caecSJane Chu */ 648a88b5ba8SSam Ravnborg static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt) 649a88b5ba8SSam Ravnborg { 6509d53caecSJane Chu int this_cpu, tot_cpus, prev_sent, i, rem; 6519d53caecSJane Chu int usec_wait, retries, tot_retries; 6529d53caecSJane Chu u16 first_cpu = 0xffff; 6539d53caecSJane Chu unsigned long xc_rcvd = 0; 654a88b5ba8SSam Ravnborg unsigned long status; 6559d53caecSJane Chu int ecpuerror_id = 0; 6569d53caecSJane Chu int enocpu_id = 0; 657a88b5ba8SSam Ravnborg u16 *cpu_list; 6589d53caecSJane Chu u16 cpu; 659a88b5ba8SSam Ravnborg 660a88b5ba8SSam Ravnborg this_cpu = smp_processor_id(); 661a88b5ba8SSam Ravnborg cpu_list = __va(tb->cpu_list_pa); 6629d53caecSJane Chu usec_wait = cnt * MONDO_USEC_WAIT_MIN; 6639d53caecSJane Chu if (usec_wait > MONDO_USEC_WAIT_MAX) 6649d53caecSJane Chu usec_wait = MONDO_USEC_WAIT_MAX; 6659d53caecSJane Chu retries = tot_retries = 0; 6669d53caecSJane Chu tot_cpus = cnt; 667a88b5ba8SSam Ravnborg prev_sent = 0; 6689d53caecSJane Chu 669a88b5ba8SSam Ravnborg do { 6709d53caecSJane Chu int n_sent, mondo_delivered, target_cpu_busy; 671a88b5ba8SSam Ravnborg 672a88b5ba8SSam Ravnborg status = sun4v_cpu_mondo_send(cnt, 673a88b5ba8SSam Ravnborg tb->cpu_list_pa, 674a88b5ba8SSam Ravnborg tb->cpu_mondo_block_pa); 675a88b5ba8SSam Ravnborg 676a88b5ba8SSam Ravnborg /* HV_EOK means all cpus received the xcall, we're done. */ 677a88b5ba8SSam Ravnborg if (likely(status == HV_EOK)) 6789d53caecSJane Chu goto xcall_done; 6799d53caecSJane Chu 6809d53caecSJane Chu /* If not these non-fatal errors, panic */ 6819d53caecSJane Chu if (unlikely((status != HV_EWOULDBLOCK) && 6829d53caecSJane Chu (status != HV_ECPUERROR) && 6839d53caecSJane Chu (status != HV_ENOCPU))) 6849d53caecSJane Chu goto fatal_errors; 685a88b5ba8SSam Ravnborg 686a88b5ba8SSam Ravnborg /* First, see if we made any forward progress. 687a88b5ba8SSam Ravnborg * 6889d53caecSJane Chu * Go through the cpu_list, count the target cpus that have 6899d53caecSJane Chu * received our mondo (n_sent), and those that did not (rem). 6909d53caecSJane Chu * Re-pack cpu_list with the cpus remain to be retried in the 6919d53caecSJane Chu * front - this simplifies tracking the truly stalled cpus. 6929d53caecSJane Chu * 693a88b5ba8SSam Ravnborg * The hypervisor indicates successful sends by setting 694a88b5ba8SSam Ravnborg * cpu list entries to the value 0xffff. 6959d53caecSJane Chu * 6969d53caecSJane Chu * EWOULDBLOCK means some target cpus did not receive the 6979d53caecSJane Chu * mondo and retry usually helps. 6989d53caecSJane Chu * 6999d53caecSJane Chu * ECPUERROR means at least one target cpu is in error state, 7009d53caecSJane Chu * it's usually safe to skip the faulty cpu and retry. 7019d53caecSJane Chu * 7029d53caecSJane Chu * ENOCPU means one of the target cpu doesn't belong to the 7039d53caecSJane Chu * domain, perhaps offlined which is unexpected, but not 7049d53caecSJane Chu * fatal and it's okay to skip the offlined cpu. 705a88b5ba8SSam Ravnborg */ 7069d53caecSJane Chu rem = 0; 707a88b5ba8SSam Ravnborg n_sent = 0; 708a88b5ba8SSam Ravnborg for (i = 0; i < cnt; i++) { 7099d53caecSJane Chu cpu = cpu_list[i]; 7109d53caecSJane Chu if (likely(cpu == 0xffff)) { 711a88b5ba8SSam Ravnborg n_sent++; 7129d53caecSJane Chu } else if ((status == HV_ECPUERROR) && 7139d53caecSJane Chu (sun4v_cpu_state(cpu) == HV_CPU_STATE_ERROR)) { 7149d53caecSJane Chu ecpuerror_id = cpu + 1; 7159d53caecSJane Chu } else if (status == HV_ENOCPU && !cpu_online(cpu)) { 7169d53caecSJane Chu enocpu_id = cpu + 1; 7179d53caecSJane Chu } else { 7189d53caecSJane Chu cpu_list[rem++] = cpu; 7199d53caecSJane Chu } 720a88b5ba8SSam Ravnborg } 721a88b5ba8SSam Ravnborg 7229d53caecSJane Chu /* No cpu remained, we're done. */ 7239d53caecSJane Chu if (rem == 0) 7249d53caecSJane Chu break; 725a88b5ba8SSam Ravnborg 7269d53caecSJane Chu /* Otherwise, update the cpu count for retry. */ 7279d53caecSJane Chu cnt = rem; 7289d53caecSJane Chu 7299d53caecSJane Chu /* Record the overall number of mondos received by the 7309d53caecSJane Chu * first of the remaining cpus. 7319d53caecSJane Chu */ 7329d53caecSJane Chu if (first_cpu != cpu_list[0]) { 7339d53caecSJane Chu first_cpu = cpu_list[0]; 7349d53caecSJane Chu xc_rcvd = CPU_MONDO_COUNTER(first_cpu); 7359d53caecSJane Chu } 7369d53caecSJane Chu 7379d53caecSJane Chu /* Was any mondo delivered successfully? */ 7389d53caecSJane Chu mondo_delivered = (n_sent > prev_sent); 739a88b5ba8SSam Ravnborg prev_sent = n_sent; 740a88b5ba8SSam Ravnborg 7419d53caecSJane Chu /* or, was any target cpu busy processing other mondos? */ 7429d53caecSJane Chu target_cpu_busy = (xc_rcvd < CPU_MONDO_COUNTER(first_cpu)); 7439d53caecSJane Chu xc_rcvd = CPU_MONDO_COUNTER(first_cpu); 7449d53caecSJane Chu 7459d53caecSJane Chu /* Retry count is for no progress. If we're making progress, 7469d53caecSJane Chu * reset the retry count. 747a88b5ba8SSam Ravnborg */ 7489d53caecSJane Chu if (likely(mondo_delivered || target_cpu_busy)) { 7499d53caecSJane Chu tot_retries += retries; 7509d53caecSJane Chu retries = 0; 7519d53caecSJane Chu } else if (unlikely(retries > MONDO_RETRY_LIMIT)) { 752a88b5ba8SSam Ravnborg goto fatal_mondo_timeout; 753a88b5ba8SSam Ravnborg } 7549d53caecSJane Chu 7559d53caecSJane Chu /* Delay a little bit to let other cpus catch up on 7569d53caecSJane Chu * their cpu mondo queue work. 7579d53caecSJane Chu */ 7589d53caecSJane Chu if (!mondo_delivered) 7599d53caecSJane Chu udelay(usec_wait); 7609d53caecSJane Chu 7619d53caecSJane Chu retries++; 762a88b5ba8SSam Ravnborg } while (1); 763a88b5ba8SSam Ravnborg 7649d53caecSJane Chu xcall_done: 7659d53caecSJane Chu if (unlikely(ecpuerror_id > 0)) { 7669d53caecSJane Chu pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) was in error state\n", 7679d53caecSJane Chu this_cpu, ecpuerror_id - 1); 7689d53caecSJane Chu } else if (unlikely(enocpu_id > 0)) { 7699d53caecSJane Chu pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) does not belong to the domain\n", 7709d53caecSJane Chu this_cpu, enocpu_id - 1); 7719d53caecSJane Chu } 772a88b5ba8SSam Ravnborg return; 773a88b5ba8SSam Ravnborg 7749d53caecSJane Chu fatal_errors: 7759d53caecSJane Chu /* fatal errors include bad alignment, etc */ 7769d53caecSJane Chu pr_crit("CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) mondo_block_pa(%lx)\n", 7779d53caecSJane Chu this_cpu, tot_cpus, tb->cpu_list_pa, tb->cpu_mondo_block_pa); 7789d53caecSJane Chu panic("Unexpected SUN4V mondo error %lu\n", status); 779a88b5ba8SSam Ravnborg 780a88b5ba8SSam Ravnborg fatal_mondo_timeout: 7819d53caecSJane Chu /* some cpus being non-responsive to the cpu mondo */ 7829d53caecSJane Chu pr_crit("CPU[%d]: SUN4V mondo timeout, cpu(%d) made no forward progress after %d retries. Total target cpus(%d).\n", 7839d53caecSJane Chu this_cpu, first_cpu, (tot_retries + retries), tot_cpus); 7849d53caecSJane Chu panic("SUN4V mondo timeout panic\n"); 785a88b5ba8SSam Ravnborg } 786a88b5ba8SSam Ravnborg 787a88b5ba8SSam Ravnborg static void (*xcall_deliver_impl)(struct trap_per_cpu *, int); 788a88b5ba8SSam Ravnborg 789a88b5ba8SSam Ravnborg static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask) 790a88b5ba8SSam Ravnborg { 791a88b5ba8SSam Ravnborg struct trap_per_cpu *tb; 792a88b5ba8SSam Ravnborg int this_cpu, i, cnt; 793a88b5ba8SSam Ravnborg unsigned long flags; 794a88b5ba8SSam Ravnborg u16 *cpu_list; 795a88b5ba8SSam Ravnborg u64 *mondo; 796a88b5ba8SSam Ravnborg 797a88b5ba8SSam Ravnborg /* We have to do this whole thing with interrupts fully disabled. 798a88b5ba8SSam Ravnborg * Otherwise if we send an xcall from interrupt context it will 799a88b5ba8SSam Ravnborg * corrupt both our mondo block and cpu list state. 800a88b5ba8SSam Ravnborg * 801a88b5ba8SSam Ravnborg * One consequence of this is that we cannot use timeout mechanisms 802a88b5ba8SSam Ravnborg * that depend upon interrupts being delivered locally. So, for 803a88b5ba8SSam Ravnborg * example, we cannot sample jiffies and expect it to advance. 804a88b5ba8SSam Ravnborg * 805a88b5ba8SSam Ravnborg * Fortunately, udelay() uses %stick/%tick so we can use that. 806a88b5ba8SSam Ravnborg */ 807a88b5ba8SSam Ravnborg local_irq_save(flags); 808a88b5ba8SSam Ravnborg 809a88b5ba8SSam Ravnborg this_cpu = smp_processor_id(); 810a88b5ba8SSam Ravnborg tb = &trap_block[this_cpu]; 811a88b5ba8SSam Ravnborg 812a88b5ba8SSam Ravnborg mondo = __va(tb->cpu_mondo_block_pa); 813a88b5ba8SSam Ravnborg mondo[0] = data0; 814a88b5ba8SSam Ravnborg mondo[1] = data1; 815a88b5ba8SSam Ravnborg mondo[2] = data2; 816a88b5ba8SSam Ravnborg wmb(); 817a88b5ba8SSam Ravnborg 818a88b5ba8SSam Ravnborg cpu_list = __va(tb->cpu_list_pa); 819a88b5ba8SSam Ravnborg 820a88b5ba8SSam Ravnborg /* Setup the initial cpu list. */ 821a88b5ba8SSam Ravnborg cnt = 0; 8228e757281SRusty Russell for_each_cpu(i, mask) { 823a88b5ba8SSam Ravnborg if (i == this_cpu || !cpu_online(i)) 824a88b5ba8SSam Ravnborg continue; 825a88b5ba8SSam Ravnborg cpu_list[cnt++] = i; 826a88b5ba8SSam Ravnborg } 827a88b5ba8SSam Ravnborg 828a88b5ba8SSam Ravnborg if (cnt) 829a88b5ba8SSam Ravnborg xcall_deliver_impl(tb, cnt); 830a88b5ba8SSam Ravnborg 831a88b5ba8SSam Ravnborg local_irq_restore(flags); 832a88b5ba8SSam Ravnborg } 833a88b5ba8SSam Ravnborg 834a88b5ba8SSam Ravnborg /* Send cross call to all processors mentioned in MASK_P 835a88b5ba8SSam Ravnborg * except self. Really, there are only two cases currently, 836fb1fece5SKOSAKI Motohiro * "cpu_online_mask" and "mm_cpumask(mm)". 837a88b5ba8SSam Ravnborg */ 838a88b5ba8SSam Ravnborg static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask) 839a88b5ba8SSam Ravnborg { 840a88b5ba8SSam Ravnborg u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff)); 841a88b5ba8SSam Ravnborg 842a88b5ba8SSam Ravnborg xcall_deliver(data0, data1, data2, mask); 843a88b5ba8SSam Ravnborg } 844a88b5ba8SSam Ravnborg 845a88b5ba8SSam Ravnborg /* Send cross call to all processors except self. */ 846a88b5ba8SSam Ravnborg static void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2) 847a88b5ba8SSam Ravnborg { 848fb1fece5SKOSAKI Motohiro smp_cross_call_masked(func, ctx, data1, data2, cpu_online_mask); 849a88b5ba8SSam Ravnborg } 850a88b5ba8SSam Ravnborg 851a88b5ba8SSam Ravnborg extern unsigned long xcall_sync_tick; 852a88b5ba8SSam Ravnborg 853a88b5ba8SSam Ravnborg static void smp_start_sync_tick_client(int cpu) 854a88b5ba8SSam Ravnborg { 855a88b5ba8SSam Ravnborg xcall_deliver((u64) &xcall_sync_tick, 0, 0, 856fb1fece5SKOSAKI Motohiro cpumask_of(cpu)); 857a88b5ba8SSam Ravnborg } 858a88b5ba8SSam Ravnborg 859a88b5ba8SSam Ravnborg extern unsigned long xcall_call_function; 860a88b5ba8SSam Ravnborg 861f46df02aSRusty Russell void arch_send_call_function_ipi_mask(const struct cpumask *mask) 862a88b5ba8SSam Ravnborg { 863f46df02aSRusty Russell xcall_deliver((u64) &xcall_call_function, 0, 0, mask); 864a88b5ba8SSam Ravnborg } 865a88b5ba8SSam Ravnborg 866a88b5ba8SSam Ravnborg extern unsigned long xcall_call_function_single; 867a88b5ba8SSam Ravnborg 868a88b5ba8SSam Ravnborg void arch_send_call_function_single_ipi(int cpu) 869a88b5ba8SSam Ravnborg { 870a88b5ba8SSam Ravnborg xcall_deliver((u64) &xcall_call_function_single, 0, 0, 871fb1fece5SKOSAKI Motohiro cpumask_of(cpu)); 872a88b5ba8SSam Ravnborg } 873a88b5ba8SSam Ravnborg 8749960e9e8SDavid S. Miller void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs) 875a88b5ba8SSam Ravnborg { 876a88b5ba8SSam Ravnborg clear_softint(1 << irq); 877ab5c7809SDavid S. Miller irq_enter(); 878a88b5ba8SSam Ravnborg generic_smp_call_function_interrupt(); 879ab5c7809SDavid S. Miller irq_exit(); 880a88b5ba8SSam Ravnborg } 881a88b5ba8SSam Ravnborg 8829960e9e8SDavid S. Miller void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs) 883a88b5ba8SSam Ravnborg { 884a88b5ba8SSam Ravnborg clear_softint(1 << irq); 885ab5c7809SDavid S. Miller irq_enter(); 886a88b5ba8SSam Ravnborg generic_smp_call_function_single_interrupt(); 887ab5c7809SDavid S. Miller irq_exit(); 888a88b5ba8SSam Ravnborg } 889a88b5ba8SSam Ravnborg 890a88b5ba8SSam Ravnborg static void tsb_sync(void *info) 891a88b5ba8SSam Ravnborg { 892a88b5ba8SSam Ravnborg struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()]; 893a88b5ba8SSam Ravnborg struct mm_struct *mm = info; 894a88b5ba8SSam Ravnborg 89542b2aa86SJustin P. Mattock /* It is not valid to test "current->active_mm == mm" here. 896a88b5ba8SSam Ravnborg * 897a88b5ba8SSam Ravnborg * The value of "current" is not changed atomically with 898a88b5ba8SSam Ravnborg * switch_mm(). But that's OK, we just need to check the 899a88b5ba8SSam Ravnborg * current cpu's trap block PGD physical address. 900a88b5ba8SSam Ravnborg */ 901a88b5ba8SSam Ravnborg if (tp->pgd_paddr == __pa(mm->pgd)) 902a88b5ba8SSam Ravnborg tsb_context_switch(mm); 903a88b5ba8SSam Ravnborg } 904a88b5ba8SSam Ravnborg 905a88b5ba8SSam Ravnborg void smp_tsb_sync(struct mm_struct *mm) 906a88b5ba8SSam Ravnborg { 90781f1adf0SRusty Russell smp_call_function_many(mm_cpumask(mm), tsb_sync, mm, 1); 908a88b5ba8SSam Ravnborg } 909a88b5ba8SSam Ravnborg 910a88b5ba8SSam Ravnborg extern unsigned long xcall_flush_tlb_mm; 911f36391d2SDavid S. Miller extern unsigned long xcall_flush_tlb_page; 912a88b5ba8SSam Ravnborg extern unsigned long xcall_flush_tlb_kernel_range; 913a88b5ba8SSam Ravnborg extern unsigned long xcall_fetch_glob_regs; 914916ca14aSDavid S. Miller extern unsigned long xcall_fetch_glob_pmu; 915916ca14aSDavid S. Miller extern unsigned long xcall_fetch_glob_pmu_n4; 916a88b5ba8SSam Ravnborg extern unsigned long xcall_receive_signal; 917a88b5ba8SSam Ravnborg extern unsigned long xcall_new_mmu_context_version; 918a88b5ba8SSam Ravnborg #ifdef CONFIG_KGDB 919a88b5ba8SSam Ravnborg extern unsigned long xcall_kgdb_capture; 920a88b5ba8SSam Ravnborg #endif 921a88b5ba8SSam Ravnborg 922a88b5ba8SSam Ravnborg #ifdef DCACHE_ALIASING_POSSIBLE 923a88b5ba8SSam Ravnborg extern unsigned long xcall_flush_dcache_page_cheetah; 924a88b5ba8SSam Ravnborg #endif 925a88b5ba8SSam Ravnborg extern unsigned long xcall_flush_dcache_page_spitfire; 926a88b5ba8SSam Ravnborg 927a88b5ba8SSam Ravnborg static inline void __local_flush_dcache_page(struct page *page) 928a88b5ba8SSam Ravnborg { 929a88b5ba8SSam Ravnborg #ifdef DCACHE_ALIASING_POSSIBLE 930a88b5ba8SSam Ravnborg __flush_dcache_page(page_address(page), 931a88b5ba8SSam Ravnborg ((tlb_type == spitfire) && 932cb9f753aSHuang Ying page_mapping_file(page) != NULL)); 933a88b5ba8SSam Ravnborg #else 934cb9f753aSHuang Ying if (page_mapping_file(page) != NULL && 935a88b5ba8SSam Ravnborg tlb_type == spitfire) 936a88b5ba8SSam Ravnborg __flush_icache_page(__pa(page_address(page))); 937a88b5ba8SSam Ravnborg #endif 938a88b5ba8SSam Ravnborg } 939a88b5ba8SSam Ravnborg 940a88b5ba8SSam Ravnborg void smp_flush_dcache_page_impl(struct page *page, int cpu) 941a88b5ba8SSam Ravnborg { 942a88b5ba8SSam Ravnborg int this_cpu; 943a88b5ba8SSam Ravnborg 944a88b5ba8SSam Ravnborg if (tlb_type == hypervisor) 945a88b5ba8SSam Ravnborg return; 946a88b5ba8SSam Ravnborg 947a88b5ba8SSam Ravnborg #ifdef CONFIG_DEBUG_DCFLUSH 948a88b5ba8SSam Ravnborg atomic_inc(&dcpage_flushes); 949a88b5ba8SSam Ravnborg #endif 950a88b5ba8SSam Ravnborg 951a88b5ba8SSam Ravnborg this_cpu = get_cpu(); 952a88b5ba8SSam Ravnborg 953a88b5ba8SSam Ravnborg if (cpu == this_cpu) { 954a88b5ba8SSam Ravnborg __local_flush_dcache_page(page); 955a88b5ba8SSam Ravnborg } else if (cpu_online(cpu)) { 956a88b5ba8SSam Ravnborg void *pg_addr = page_address(page); 957a88b5ba8SSam Ravnborg u64 data0 = 0; 958a88b5ba8SSam Ravnborg 959a88b5ba8SSam Ravnborg if (tlb_type == spitfire) { 960a88b5ba8SSam Ravnborg data0 = ((u64)&xcall_flush_dcache_page_spitfire); 961cb9f753aSHuang Ying if (page_mapping_file(page) != NULL) 962a88b5ba8SSam Ravnborg data0 |= ((u64)1 << 32); 963a88b5ba8SSam Ravnborg } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { 964a88b5ba8SSam Ravnborg #ifdef DCACHE_ALIASING_POSSIBLE 965a88b5ba8SSam Ravnborg data0 = ((u64)&xcall_flush_dcache_page_cheetah); 966a88b5ba8SSam Ravnborg #endif 967a88b5ba8SSam Ravnborg } 968a88b5ba8SSam Ravnborg if (data0) { 969a88b5ba8SSam Ravnborg xcall_deliver(data0, __pa(pg_addr), 970fb1fece5SKOSAKI Motohiro (u64) pg_addr, cpumask_of(cpu)); 971a88b5ba8SSam Ravnborg #ifdef CONFIG_DEBUG_DCFLUSH 972a88b5ba8SSam Ravnborg atomic_inc(&dcpage_flushes_xcall); 973a88b5ba8SSam Ravnborg #endif 974a88b5ba8SSam Ravnborg } 975a88b5ba8SSam Ravnborg } 976a88b5ba8SSam Ravnborg 977a88b5ba8SSam Ravnborg put_cpu(); 978a88b5ba8SSam Ravnborg } 979a88b5ba8SSam Ravnborg 980a88b5ba8SSam Ravnborg void flush_dcache_page_all(struct mm_struct *mm, struct page *page) 981a88b5ba8SSam Ravnborg { 982a88b5ba8SSam Ravnborg void *pg_addr; 983a88b5ba8SSam Ravnborg u64 data0; 984a88b5ba8SSam Ravnborg 985a88b5ba8SSam Ravnborg if (tlb_type == hypervisor) 986a88b5ba8SSam Ravnborg return; 987a88b5ba8SSam Ravnborg 988c6fee081SDavid S. Miller preempt_disable(); 989a88b5ba8SSam Ravnborg 990a88b5ba8SSam Ravnborg #ifdef CONFIG_DEBUG_DCFLUSH 991a88b5ba8SSam Ravnborg atomic_inc(&dcpage_flushes); 992a88b5ba8SSam Ravnborg #endif 993a88b5ba8SSam Ravnborg data0 = 0; 994a88b5ba8SSam Ravnborg pg_addr = page_address(page); 995a88b5ba8SSam Ravnborg if (tlb_type == spitfire) { 996a88b5ba8SSam Ravnborg data0 = ((u64)&xcall_flush_dcache_page_spitfire); 997cb9f753aSHuang Ying if (page_mapping_file(page) != NULL) 998a88b5ba8SSam Ravnborg data0 |= ((u64)1 << 32); 999a88b5ba8SSam Ravnborg } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { 1000a88b5ba8SSam Ravnborg #ifdef DCACHE_ALIASING_POSSIBLE 1001a88b5ba8SSam Ravnborg data0 = ((u64)&xcall_flush_dcache_page_cheetah); 1002a88b5ba8SSam Ravnborg #endif 1003a88b5ba8SSam Ravnborg } 1004a88b5ba8SSam Ravnborg if (data0) { 1005a88b5ba8SSam Ravnborg xcall_deliver(data0, __pa(pg_addr), 1006fb1fece5SKOSAKI Motohiro (u64) pg_addr, cpu_online_mask); 1007a88b5ba8SSam Ravnborg #ifdef CONFIG_DEBUG_DCFLUSH 1008a88b5ba8SSam Ravnborg atomic_inc(&dcpage_flushes_xcall); 1009a88b5ba8SSam Ravnborg #endif 1010a88b5ba8SSam Ravnborg } 1011a88b5ba8SSam Ravnborg __local_flush_dcache_page(page); 1012a88b5ba8SSam Ravnborg 1013c6fee081SDavid S. Miller preempt_enable(); 1014a88b5ba8SSam Ravnborg } 1015a88b5ba8SSam Ravnborg 1016a88b5ba8SSam Ravnborg #ifdef CONFIG_KGDB 10179ef7fa50SDouglas Anderson void kgdb_roundup_cpus(void) 1018a88b5ba8SSam Ravnborg { 1019a88b5ba8SSam Ravnborg smp_cross_call(&xcall_kgdb_capture, 0, 0, 0); 1020a88b5ba8SSam Ravnborg } 1021a88b5ba8SSam Ravnborg #endif 1022a88b5ba8SSam Ravnborg 1023a88b5ba8SSam Ravnborg void smp_fetch_global_regs(void) 1024a88b5ba8SSam Ravnborg { 1025a88b5ba8SSam Ravnborg smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0); 1026a88b5ba8SSam Ravnborg } 1027a88b5ba8SSam Ravnborg 1028916ca14aSDavid S. Miller void smp_fetch_global_pmu(void) 1029916ca14aSDavid S. Miller { 1030916ca14aSDavid S. Miller if (tlb_type == hypervisor && 1031916ca14aSDavid S. Miller sun4v_chip_type >= SUN4V_CHIP_NIAGARA4) 1032916ca14aSDavid S. Miller smp_cross_call(&xcall_fetch_glob_pmu_n4, 0, 0, 0); 1033916ca14aSDavid S. Miller else 1034916ca14aSDavid S. Miller smp_cross_call(&xcall_fetch_glob_pmu, 0, 0, 0); 1035916ca14aSDavid S. Miller } 1036916ca14aSDavid S. Miller 1037a88b5ba8SSam Ravnborg /* We know that the window frames of the user have been flushed 1038a88b5ba8SSam Ravnborg * to the stack before we get here because all callers of us 1039a88b5ba8SSam Ravnborg * are flush_tlb_*() routines, and these run after flush_cache_*() 1040a88b5ba8SSam Ravnborg * which performs the flushw. 1041a88b5ba8SSam Ravnborg * 1042a88b5ba8SSam Ravnborg * The SMP TLB coherency scheme we use works as follows: 1043a88b5ba8SSam Ravnborg * 1044a88b5ba8SSam Ravnborg * 1) mm->cpu_vm_mask is a bit mask of which cpus an address 1045a88b5ba8SSam Ravnborg * space has (potentially) executed on, this is the heuristic 1046a88b5ba8SSam Ravnborg * we use to avoid doing cross calls. 1047a88b5ba8SSam Ravnborg * 1048a88b5ba8SSam Ravnborg * Also, for flushing from kswapd and also for clones, we 1049a88b5ba8SSam Ravnborg * use cpu_vm_mask as the list of cpus to make run the TLB. 1050a88b5ba8SSam Ravnborg * 1051a88b5ba8SSam Ravnborg * 2) TLB context numbers are shared globally across all processors 1052a88b5ba8SSam Ravnborg * in the system, this allows us to play several games to avoid 1053a88b5ba8SSam Ravnborg * cross calls. 1054a88b5ba8SSam Ravnborg * 1055a88b5ba8SSam Ravnborg * One invariant is that when a cpu switches to a process, and 1056a88b5ba8SSam Ravnborg * that processes tsk->active_mm->cpu_vm_mask does not have the 1057a88b5ba8SSam Ravnborg * current cpu's bit set, that tlb context is flushed locally. 1058a88b5ba8SSam Ravnborg * 1059a88b5ba8SSam Ravnborg * If the address space is non-shared (ie. mm->count == 1) we avoid 1060a88b5ba8SSam Ravnborg * cross calls when we want to flush the currently running process's 1061a88b5ba8SSam Ravnborg * tlb state. This is done by clearing all cpu bits except the current 1062f9384d41SDavid S. Miller * processor's in current->mm->cpu_vm_mask and performing the 1063a88b5ba8SSam Ravnborg * flush locally only. This will force any subsequent cpus which run 1064a88b5ba8SSam Ravnborg * this task to flush the context from the local tlb if the process 1065a88b5ba8SSam Ravnborg * migrates to another cpu (again). 1066a88b5ba8SSam Ravnborg * 1067a88b5ba8SSam Ravnborg * 3) For shared address spaces (threads) and swapping we bite the 1068a88b5ba8SSam Ravnborg * bullet for most cases and perform the cross call (but only to 1069a88b5ba8SSam Ravnborg * the cpus listed in cpu_vm_mask). 1070a88b5ba8SSam Ravnborg * 1071a88b5ba8SSam Ravnborg * The performance gain from "optimizing" away the cross call for threads is 1072a88b5ba8SSam Ravnborg * questionable (in theory the big win for threads is the massive sharing of 1073a88b5ba8SSam Ravnborg * address space state across processors). 1074a88b5ba8SSam Ravnborg */ 1075a88b5ba8SSam Ravnborg 1076a88b5ba8SSam Ravnborg /* This currently is only used by the hugetlb arch pre-fault 1077a88b5ba8SSam Ravnborg * hook on UltraSPARC-III+ and later when changing the pagesize 1078a88b5ba8SSam Ravnborg * bits of the context register for an address space. 1079a88b5ba8SSam Ravnborg */ 1080a88b5ba8SSam Ravnborg void smp_flush_tlb_mm(struct mm_struct *mm) 1081a88b5ba8SSam Ravnborg { 1082a88b5ba8SSam Ravnborg u32 ctx = CTX_HWBITS(mm->context); 1083a88b5ba8SSam Ravnborg int cpu = get_cpu(); 1084a88b5ba8SSam Ravnborg 1085a88b5ba8SSam Ravnborg if (atomic_read(&mm->mm_users) == 1) { 108681f1adf0SRusty Russell cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); 1087a88b5ba8SSam Ravnborg goto local_flush_and_out; 1088a88b5ba8SSam Ravnborg } 1089a88b5ba8SSam Ravnborg 1090a88b5ba8SSam Ravnborg smp_cross_call_masked(&xcall_flush_tlb_mm, 1091a88b5ba8SSam Ravnborg ctx, 0, 0, 109281f1adf0SRusty Russell mm_cpumask(mm)); 1093a88b5ba8SSam Ravnborg 1094a88b5ba8SSam Ravnborg local_flush_and_out: 1095a88b5ba8SSam Ravnborg __flush_tlb_mm(ctx, SECONDARY_CONTEXT); 1096a88b5ba8SSam Ravnborg 1097a88b5ba8SSam Ravnborg put_cpu(); 1098a88b5ba8SSam Ravnborg } 1099a88b5ba8SSam Ravnborg 1100f36391d2SDavid S. Miller struct tlb_pending_info { 1101f36391d2SDavid S. Miller unsigned long ctx; 1102f36391d2SDavid S. Miller unsigned long nr; 1103f36391d2SDavid S. Miller unsigned long *vaddrs; 1104f36391d2SDavid S. Miller }; 1105f36391d2SDavid S. Miller 1106f36391d2SDavid S. Miller static void tlb_pending_func(void *info) 1107f36391d2SDavid S. Miller { 1108f36391d2SDavid S. Miller struct tlb_pending_info *t = info; 1109f36391d2SDavid S. Miller 1110f36391d2SDavid S. Miller __flush_tlb_pending(t->ctx, t->nr, t->vaddrs); 1111f36391d2SDavid S. Miller } 1112f36391d2SDavid S. Miller 1113a88b5ba8SSam Ravnborg void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs) 1114a88b5ba8SSam Ravnborg { 1115a88b5ba8SSam Ravnborg u32 ctx = CTX_HWBITS(mm->context); 1116f36391d2SDavid S. Miller struct tlb_pending_info info; 1117f36391d2SDavid S. Miller int cpu = get_cpu(); 1118f36391d2SDavid S. Miller 1119f36391d2SDavid S. Miller info.ctx = ctx; 1120f36391d2SDavid S. Miller info.nr = nr; 1121f36391d2SDavid S. Miller info.vaddrs = vaddrs; 1122f36391d2SDavid S. Miller 1123f36391d2SDavid S. Miller if (mm == current->mm && atomic_read(&mm->mm_users) == 1) 1124f36391d2SDavid S. Miller cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); 1125f36391d2SDavid S. Miller else 1126f36391d2SDavid S. Miller smp_call_function_many(mm_cpumask(mm), tlb_pending_func, 1127f36391d2SDavid S. Miller &info, 1); 1128f36391d2SDavid S. Miller 1129f36391d2SDavid S. Miller __flush_tlb_pending(ctx, nr, vaddrs); 1130f36391d2SDavid S. Miller 1131f36391d2SDavid S. Miller put_cpu(); 1132f36391d2SDavid S. Miller } 1133f36391d2SDavid S. Miller 1134f36391d2SDavid S. Miller void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr) 1135f36391d2SDavid S. Miller { 1136f36391d2SDavid S. Miller unsigned long context = CTX_HWBITS(mm->context); 1137a88b5ba8SSam Ravnborg int cpu = get_cpu(); 1138a88b5ba8SSam Ravnborg 1139f9384d41SDavid S. Miller if (mm == current->mm && atomic_read(&mm->mm_users) == 1) 114081f1adf0SRusty Russell cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); 1141a88b5ba8SSam Ravnborg else 1142f36391d2SDavid S. Miller smp_cross_call_masked(&xcall_flush_tlb_page, 1143f36391d2SDavid S. Miller context, vaddr, 0, 114481f1adf0SRusty Russell mm_cpumask(mm)); 1145f36391d2SDavid S. Miller __flush_tlb_page(context, vaddr); 1146a88b5ba8SSam Ravnborg 1147a88b5ba8SSam Ravnborg put_cpu(); 1148a88b5ba8SSam Ravnborg } 1149a88b5ba8SSam Ravnborg 1150a88b5ba8SSam Ravnborg void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end) 1151a88b5ba8SSam Ravnborg { 1152a88b5ba8SSam Ravnborg start &= PAGE_MASK; 1153a88b5ba8SSam Ravnborg end = PAGE_ALIGN(end); 1154a88b5ba8SSam Ravnborg if (start != end) { 1155a88b5ba8SSam Ravnborg smp_cross_call(&xcall_flush_tlb_kernel_range, 1156a88b5ba8SSam Ravnborg 0, start, end); 1157a88b5ba8SSam Ravnborg 1158a88b5ba8SSam Ravnborg __flush_tlb_kernel_range(start, end); 1159a88b5ba8SSam Ravnborg } 1160a88b5ba8SSam Ravnborg } 1161a88b5ba8SSam Ravnborg 1162a88b5ba8SSam Ravnborg /* CPU capture. */ 1163a88b5ba8SSam Ravnborg /* #define CAPTURE_DEBUG */ 1164a88b5ba8SSam Ravnborg extern unsigned long xcall_capture; 1165a88b5ba8SSam Ravnborg 1166a88b5ba8SSam Ravnborg static atomic_t smp_capture_depth = ATOMIC_INIT(0); 1167a88b5ba8SSam Ravnborg static atomic_t smp_capture_registry = ATOMIC_INIT(0); 1168a88b5ba8SSam Ravnborg static unsigned long penguins_are_doing_time; 1169a88b5ba8SSam Ravnborg 1170a88b5ba8SSam Ravnborg void smp_capture(void) 1171a88b5ba8SSam Ravnborg { 11724f3316c2SPeter Zijlstra int result = atomic_add_return(1, &smp_capture_depth); 1173a88b5ba8SSam Ravnborg 1174a88b5ba8SSam Ravnborg if (result == 1) { 1175a88b5ba8SSam Ravnborg int ncpus = num_online_cpus(); 1176a88b5ba8SSam Ravnborg 1177a88b5ba8SSam Ravnborg #ifdef CAPTURE_DEBUG 1178a88b5ba8SSam Ravnborg printk("CPU[%d]: Sending penguins to jail...", 1179a88b5ba8SSam Ravnborg smp_processor_id()); 1180a88b5ba8SSam Ravnborg #endif 1181a88b5ba8SSam Ravnborg penguins_are_doing_time = 1; 1182a88b5ba8SSam Ravnborg atomic_inc(&smp_capture_registry); 1183a88b5ba8SSam Ravnborg smp_cross_call(&xcall_capture, 0, 0, 0); 1184a88b5ba8SSam Ravnborg while (atomic_read(&smp_capture_registry) != ncpus) 1185a88b5ba8SSam Ravnborg rmb(); 1186a88b5ba8SSam Ravnborg #ifdef CAPTURE_DEBUG 1187a88b5ba8SSam Ravnborg printk("done\n"); 1188a88b5ba8SSam Ravnborg #endif 1189a88b5ba8SSam Ravnborg } 1190a88b5ba8SSam Ravnborg } 1191a88b5ba8SSam Ravnborg 1192a88b5ba8SSam Ravnborg void smp_release(void) 1193a88b5ba8SSam Ravnborg { 1194a88b5ba8SSam Ravnborg if (atomic_dec_and_test(&smp_capture_depth)) { 1195a88b5ba8SSam Ravnborg #ifdef CAPTURE_DEBUG 1196a88b5ba8SSam Ravnborg printk("CPU[%d]: Giving pardon to " 1197a88b5ba8SSam Ravnborg "imprisoned penguins\n", 1198a88b5ba8SSam Ravnborg smp_processor_id()); 1199a88b5ba8SSam Ravnborg #endif 1200a88b5ba8SSam Ravnborg penguins_are_doing_time = 0; 1201a88b5ba8SSam Ravnborg membar_safe("#StoreLoad"); 1202a88b5ba8SSam Ravnborg atomic_dec(&smp_capture_registry); 1203a88b5ba8SSam Ravnborg } 1204a88b5ba8SSam Ravnborg } 1205a88b5ba8SSam Ravnborg 1206a88b5ba8SSam Ravnborg /* Imprisoned penguins run with %pil == PIL_NORMAL_MAX, but PSTATE_IE 1207a88b5ba8SSam Ravnborg * set, so they can service tlb flush xcalls... 1208a88b5ba8SSam Ravnborg */ 1209a88b5ba8SSam Ravnborg extern void prom_world(int); 1210a88b5ba8SSam Ravnborg 12119960e9e8SDavid S. Miller void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs) 1212a88b5ba8SSam Ravnborg { 1213a88b5ba8SSam Ravnborg clear_softint(1 << irq); 1214a88b5ba8SSam Ravnborg 1215a88b5ba8SSam Ravnborg preempt_disable(); 1216a88b5ba8SSam Ravnborg 1217a88b5ba8SSam Ravnborg __asm__ __volatile__("flushw"); 1218a88b5ba8SSam Ravnborg prom_world(1); 1219a88b5ba8SSam Ravnborg atomic_inc(&smp_capture_registry); 1220a88b5ba8SSam Ravnborg membar_safe("#StoreLoad"); 1221a88b5ba8SSam Ravnborg while (penguins_are_doing_time) 1222a88b5ba8SSam Ravnborg rmb(); 1223a88b5ba8SSam Ravnborg atomic_dec(&smp_capture_registry); 1224a88b5ba8SSam Ravnborg prom_world(0); 1225a88b5ba8SSam Ravnborg 1226a88b5ba8SSam Ravnborg preempt_enable(); 1227a88b5ba8SSam Ravnborg } 1228a88b5ba8SSam Ravnborg 1229a88b5ba8SSam Ravnborg /* /proc/profile writes can call this, don't __init it please. */ 1230a88b5ba8SSam Ravnborg int setup_profiling_timer(unsigned int multiplier) 1231a88b5ba8SSam Ravnborg { 1232a88b5ba8SSam Ravnborg return -EINVAL; 1233a88b5ba8SSam Ravnborg } 1234a88b5ba8SSam Ravnborg 1235a88b5ba8SSam Ravnborg void __init smp_prepare_cpus(unsigned int max_cpus) 1236a88b5ba8SSam Ravnborg { 1237a88b5ba8SSam Ravnborg } 1238a88b5ba8SSam Ravnborg 12397c9503b8SGreg Kroah-Hartman void smp_prepare_boot_cpu(void) 1240a88b5ba8SSam Ravnborg { 1241a88b5ba8SSam Ravnborg } 1242a88b5ba8SSam Ravnborg 1243a88b5ba8SSam Ravnborg void __init smp_setup_processor_id(void) 1244a88b5ba8SSam Ravnborg { 1245a88b5ba8SSam Ravnborg if (tlb_type == spitfire) 1246a88b5ba8SSam Ravnborg xcall_deliver_impl = spitfire_xcall_deliver; 1247a88b5ba8SSam Ravnborg else if (tlb_type == cheetah || tlb_type == cheetah_plus) 1248a88b5ba8SSam Ravnborg xcall_deliver_impl = cheetah_xcall_deliver; 1249a88b5ba8SSam Ravnborg else 1250a88b5ba8SSam Ravnborg xcall_deliver_impl = hypervisor_xcall_deliver; 1251a88b5ba8SSam Ravnborg } 1252a88b5ba8SSam Ravnborg 12539b2f753eSAtish Patra void __init smp_fill_in_cpu_possible_map(void) 12549b2f753eSAtish Patra { 12559b2f753eSAtish Patra int possible_cpus = num_possible_cpus(); 12569b2f753eSAtish Patra int i; 12579b2f753eSAtish Patra 12589b2f753eSAtish Patra if (possible_cpus > nr_cpu_ids) 12599b2f753eSAtish Patra possible_cpus = nr_cpu_ids; 12609b2f753eSAtish Patra 12619b2f753eSAtish Patra for (i = 0; i < possible_cpus; i++) 12629b2f753eSAtish Patra set_cpu_possible(i, true); 12639b2f753eSAtish Patra for (; i < NR_CPUS; i++) 12649b2f753eSAtish Patra set_cpu_possible(i, false); 12659b2f753eSAtish Patra } 12669b2f753eSAtish Patra 12677c9503b8SGreg Kroah-Hartman void smp_fill_in_sib_core_maps(void) 1268a88b5ba8SSam Ravnborg { 1269a88b5ba8SSam Ravnborg unsigned int i; 1270a88b5ba8SSam Ravnborg 1271a88b5ba8SSam Ravnborg for_each_present_cpu(i) { 1272a88b5ba8SSam Ravnborg unsigned int j; 1273a88b5ba8SSam Ravnborg 1274fb1fece5SKOSAKI Motohiro cpumask_clear(&cpu_core_map[i]); 1275a88b5ba8SSam Ravnborg if (cpu_data(i).core_id == 0) { 1276fb1fece5SKOSAKI Motohiro cpumask_set_cpu(i, &cpu_core_map[i]); 1277a88b5ba8SSam Ravnborg continue; 1278a88b5ba8SSam Ravnborg } 1279a88b5ba8SSam Ravnborg 1280a88b5ba8SSam Ravnborg for_each_present_cpu(j) { 1281a88b5ba8SSam Ravnborg if (cpu_data(i).core_id == 1282a88b5ba8SSam Ravnborg cpu_data(j).core_id) 1283fb1fece5SKOSAKI Motohiro cpumask_set_cpu(j, &cpu_core_map[i]); 1284a88b5ba8SSam Ravnborg } 1285a88b5ba8SSam Ravnborg } 1286a88b5ba8SSam Ravnborg 1287a88b5ba8SSam Ravnborg for_each_present_cpu(i) { 1288a88b5ba8SSam Ravnborg unsigned int j; 1289a88b5ba8SSam Ravnborg 1290acc455cfSchris hyser for_each_present_cpu(j) { 1291d624716bSAtish Patra if (cpu_data(i).max_cache_id == 1292d624716bSAtish Patra cpu_data(j).max_cache_id) 1293d624716bSAtish Patra cpumask_set_cpu(j, &cpu_core_sib_cache_map[i]); 1294d624716bSAtish Patra 1295acc455cfSchris hyser if (cpu_data(i).sock_id == cpu_data(j).sock_id) 1296acc455cfSchris hyser cpumask_set_cpu(j, &cpu_core_sib_map[i]); 1297acc455cfSchris hyser } 1298acc455cfSchris hyser } 1299acc455cfSchris hyser 1300acc455cfSchris hyser for_each_present_cpu(i) { 1301acc455cfSchris hyser unsigned int j; 1302acc455cfSchris hyser 1303fb1fece5SKOSAKI Motohiro cpumask_clear(&per_cpu(cpu_sibling_map, i)); 1304a88b5ba8SSam Ravnborg if (cpu_data(i).proc_id == -1) { 1305fb1fece5SKOSAKI Motohiro cpumask_set_cpu(i, &per_cpu(cpu_sibling_map, i)); 1306a88b5ba8SSam Ravnborg continue; 1307a88b5ba8SSam Ravnborg } 1308a88b5ba8SSam Ravnborg 1309a88b5ba8SSam Ravnborg for_each_present_cpu(j) { 1310a88b5ba8SSam Ravnborg if (cpu_data(i).proc_id == 1311a88b5ba8SSam Ravnborg cpu_data(j).proc_id) 1312fb1fece5SKOSAKI Motohiro cpumask_set_cpu(j, &per_cpu(cpu_sibling_map, i)); 1313a88b5ba8SSam Ravnborg } 1314a88b5ba8SSam Ravnborg } 1315a88b5ba8SSam Ravnborg } 1316a88b5ba8SSam Ravnborg 13172066aaddSPaul Gortmaker int __cpu_up(unsigned int cpu, struct task_struct *tidle) 1318a88b5ba8SSam Ravnborg { 1319f0a2bc7eSThomas Gleixner int ret = smp_boot_one_cpu(cpu, tidle); 1320a88b5ba8SSam Ravnborg 1321a88b5ba8SSam Ravnborg if (!ret) { 1322fb1fece5SKOSAKI Motohiro cpumask_set_cpu(cpu, &smp_commenced_mask); 1323fb1fece5SKOSAKI Motohiro while (!cpu_online(cpu)) 1324a88b5ba8SSam Ravnborg mb(); 1325fb1fece5SKOSAKI Motohiro if (!cpu_online(cpu)) { 1326a88b5ba8SSam Ravnborg ret = -ENODEV; 1327a88b5ba8SSam Ravnborg } else { 1328a88b5ba8SSam Ravnborg /* On SUN4V, writes to %tick and %stick are 1329a88b5ba8SSam Ravnborg * not allowed. 1330a88b5ba8SSam Ravnborg */ 1331a88b5ba8SSam Ravnborg if (tlb_type != hypervisor) 1332a88b5ba8SSam Ravnborg smp_synchronize_one_tick(cpu); 1333a88b5ba8SSam Ravnborg } 1334a88b5ba8SSam Ravnborg } 1335a88b5ba8SSam Ravnborg return ret; 1336a88b5ba8SSam Ravnborg } 1337a88b5ba8SSam Ravnborg 1338a88b5ba8SSam Ravnborg #ifdef CONFIG_HOTPLUG_CPU 1339a88b5ba8SSam Ravnborg void cpu_play_dead(void) 1340a88b5ba8SSam Ravnborg { 1341a88b5ba8SSam Ravnborg int cpu = smp_processor_id(); 1342a88b5ba8SSam Ravnborg unsigned long pstate; 1343a88b5ba8SSam Ravnborg 1344a88b5ba8SSam Ravnborg idle_task_exit(); 1345a88b5ba8SSam Ravnborg 1346a88b5ba8SSam Ravnborg if (tlb_type == hypervisor) { 1347a88b5ba8SSam Ravnborg struct trap_per_cpu *tb = &trap_block[cpu]; 1348a88b5ba8SSam Ravnborg 1349a88b5ba8SSam Ravnborg sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO, 1350a88b5ba8SSam Ravnborg tb->cpu_mondo_pa, 0); 1351a88b5ba8SSam Ravnborg sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO, 1352a88b5ba8SSam Ravnborg tb->dev_mondo_pa, 0); 1353a88b5ba8SSam Ravnborg sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR, 1354a88b5ba8SSam Ravnborg tb->resum_mondo_pa, 0); 1355a88b5ba8SSam Ravnborg sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR, 1356a88b5ba8SSam Ravnborg tb->nonresum_mondo_pa, 0); 1357a88b5ba8SSam Ravnborg } 1358a88b5ba8SSam Ravnborg 1359fb1fece5SKOSAKI Motohiro cpumask_clear_cpu(cpu, &smp_commenced_mask); 1360a88b5ba8SSam Ravnborg membar_safe("#Sync"); 1361a88b5ba8SSam Ravnborg 1362a88b5ba8SSam Ravnborg local_irq_disable(); 1363a88b5ba8SSam Ravnborg 1364a88b5ba8SSam Ravnborg __asm__ __volatile__( 1365a88b5ba8SSam Ravnborg "rdpr %%pstate, %0\n\t" 1366a88b5ba8SSam Ravnborg "wrpr %0, %1, %%pstate" 1367a88b5ba8SSam Ravnborg : "=r" (pstate) 1368a88b5ba8SSam Ravnborg : "i" (PSTATE_IE)); 1369a88b5ba8SSam Ravnborg 1370a88b5ba8SSam Ravnborg while (1) 1371a88b5ba8SSam Ravnborg barrier(); 1372a88b5ba8SSam Ravnborg } 1373a88b5ba8SSam Ravnborg 1374a88b5ba8SSam Ravnborg int __cpu_disable(void) 1375a88b5ba8SSam Ravnborg { 1376a88b5ba8SSam Ravnborg int cpu = smp_processor_id(); 1377a88b5ba8SSam Ravnborg cpuinfo_sparc *c; 1378a88b5ba8SSam Ravnborg int i; 1379a88b5ba8SSam Ravnborg 1380fb1fece5SKOSAKI Motohiro for_each_cpu(i, &cpu_core_map[cpu]) 1381fb1fece5SKOSAKI Motohiro cpumask_clear_cpu(cpu, &cpu_core_map[i]); 1382fb1fece5SKOSAKI Motohiro cpumask_clear(&cpu_core_map[cpu]); 1383a88b5ba8SSam Ravnborg 1384fb1fece5SKOSAKI Motohiro for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu)) 1385fb1fece5SKOSAKI Motohiro cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i)); 1386fb1fece5SKOSAKI Motohiro cpumask_clear(&per_cpu(cpu_sibling_map, cpu)); 1387a88b5ba8SSam Ravnborg 1388a88b5ba8SSam Ravnborg c = &cpu_data(cpu); 1389a88b5ba8SSam Ravnborg 1390a88b5ba8SSam Ravnborg c->core_id = 0; 1391a88b5ba8SSam Ravnborg c->proc_id = -1; 1392a88b5ba8SSam Ravnborg 1393a88b5ba8SSam Ravnborg smp_wmb(); 1394a88b5ba8SSam Ravnborg 1395a88b5ba8SSam Ravnborg /* Make sure no interrupts point to this cpu. */ 1396a88b5ba8SSam Ravnborg fixup_irqs(); 1397a88b5ba8SSam Ravnborg 1398a88b5ba8SSam Ravnborg local_irq_enable(); 1399a88b5ba8SSam Ravnborg mdelay(1); 1400a88b5ba8SSam Ravnborg local_irq_disable(); 1401a88b5ba8SSam Ravnborg 1402fb1fece5SKOSAKI Motohiro set_cpu_online(cpu, false); 1403a88b5ba8SSam Ravnborg 1404280ff974SHong H. Pham cpu_map_rebuild(); 1405280ff974SHong H. Pham 1406a88b5ba8SSam Ravnborg return 0; 1407a88b5ba8SSam Ravnborg } 1408a88b5ba8SSam Ravnborg 1409a88b5ba8SSam Ravnborg void __cpu_die(unsigned int cpu) 1410a88b5ba8SSam Ravnborg { 1411a88b5ba8SSam Ravnborg int i; 1412a88b5ba8SSam Ravnborg 1413a88b5ba8SSam Ravnborg for (i = 0; i < 100; i++) { 1414a88b5ba8SSam Ravnborg smp_rmb(); 1415fb1fece5SKOSAKI Motohiro if (!cpumask_test_cpu(cpu, &smp_commenced_mask)) 1416a88b5ba8SSam Ravnborg break; 1417a88b5ba8SSam Ravnborg msleep(100); 1418a88b5ba8SSam Ravnborg } 1419fb1fece5SKOSAKI Motohiro if (cpumask_test_cpu(cpu, &smp_commenced_mask)) { 1420a88b5ba8SSam Ravnborg printk(KERN_ERR "CPU %u didn't die...\n", cpu); 1421a88b5ba8SSam Ravnborg } else { 1422a88b5ba8SSam Ravnborg #if defined(CONFIG_SUN_LDOMS) 1423a88b5ba8SSam Ravnborg unsigned long hv_err; 1424a88b5ba8SSam Ravnborg int limit = 100; 1425a88b5ba8SSam Ravnborg 1426a88b5ba8SSam Ravnborg do { 1427a88b5ba8SSam Ravnborg hv_err = sun4v_cpu_stop(cpu); 1428a88b5ba8SSam Ravnborg if (hv_err == HV_EOK) { 1429fb1fece5SKOSAKI Motohiro set_cpu_present(cpu, false); 1430a88b5ba8SSam Ravnborg break; 1431a88b5ba8SSam Ravnborg } 1432a88b5ba8SSam Ravnborg } while (--limit > 0); 1433a88b5ba8SSam Ravnborg if (limit <= 0) { 1434a88b5ba8SSam Ravnborg printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n", 1435a88b5ba8SSam Ravnborg hv_err); 1436a88b5ba8SSam Ravnborg } 1437a88b5ba8SSam Ravnborg #endif 1438a88b5ba8SSam Ravnborg } 1439a88b5ba8SSam Ravnborg } 1440a88b5ba8SSam Ravnborg #endif 1441a88b5ba8SSam Ravnborg 1442a88b5ba8SSam Ravnborg void __init smp_cpus_done(unsigned int max_cpus) 1443a88b5ba8SSam Ravnborg { 1444a88b5ba8SSam Ravnborg } 1445a88b5ba8SSam Ravnborg 14468536e02eSVijay Kumar static void send_cpu_ipi(int cpu) 14478536e02eSVijay Kumar { 14488536e02eSVijay Kumar xcall_deliver((u64) &xcall_receive_signal, 14498536e02eSVijay Kumar 0, 0, cpumask_of(cpu)); 14508536e02eSVijay Kumar } 14518536e02eSVijay Kumar 14528536e02eSVijay Kumar void scheduler_poke(void) 14538536e02eSVijay Kumar { 14548536e02eSVijay Kumar if (!cpu_poke) 14558536e02eSVijay Kumar return; 14568536e02eSVijay Kumar 14578536e02eSVijay Kumar if (!__this_cpu_read(poke)) 14588536e02eSVijay Kumar return; 14598536e02eSVijay Kumar 14608536e02eSVijay Kumar __this_cpu_write(poke, false); 14618536e02eSVijay Kumar set_softint(1 << PIL_SMP_RECEIVE_SIGNAL); 14628536e02eSVijay Kumar } 14638536e02eSVijay Kumar 14648536e02eSVijay Kumar static unsigned long send_cpu_poke(int cpu) 14658536e02eSVijay Kumar { 14668536e02eSVijay Kumar unsigned long hv_err; 14678536e02eSVijay Kumar 14688536e02eSVijay Kumar per_cpu(poke, cpu) = true; 14698536e02eSVijay Kumar hv_err = sun4v_cpu_poke(cpu); 14708536e02eSVijay Kumar if (hv_err != HV_EOK) { 14718536e02eSVijay Kumar per_cpu(poke, cpu) = false; 14728536e02eSVijay Kumar pr_err_ratelimited("%s: sun4v_cpu_poke() fails err=%lu\n", 14738536e02eSVijay Kumar __func__, hv_err); 14748536e02eSVijay Kumar } 14758536e02eSVijay Kumar 14768536e02eSVijay Kumar return hv_err; 14778536e02eSVijay Kumar } 14788536e02eSVijay Kumar 1479a88b5ba8SSam Ravnborg void smp_send_reschedule(int cpu) 1480a88b5ba8SSam Ravnborg { 14811a36265bSKirill Tkhai if (cpu == smp_processor_id()) { 14821a36265bSKirill Tkhai WARN_ON_ONCE(preemptible()); 14831a36265bSKirill Tkhai set_softint(1 << PIL_SMP_RECEIVE_SIGNAL); 14848536e02eSVijay Kumar return; 14851a36265bSKirill Tkhai } 14868536e02eSVijay Kumar 14878536e02eSVijay Kumar /* Use cpu poke to resume idle cpu if supported. */ 14888536e02eSVijay Kumar if (cpu_poke && idle_cpu(cpu)) { 14898536e02eSVijay Kumar unsigned long ret; 14908536e02eSVijay Kumar 14918536e02eSVijay Kumar ret = send_cpu_poke(cpu); 14928536e02eSVijay Kumar if (ret == HV_EOK) 14938536e02eSVijay Kumar return; 14948536e02eSVijay Kumar } 14958536e02eSVijay Kumar 14968536e02eSVijay Kumar /* Use IPI in following cases: 14978536e02eSVijay Kumar * - cpu poke not supported 14988536e02eSVijay Kumar * - cpu not idle 14998536e02eSVijay Kumar * - send_cpu_poke() returns with error 15008536e02eSVijay Kumar */ 15018536e02eSVijay Kumar send_cpu_ipi(cpu); 15028536e02eSVijay Kumar } 15038536e02eSVijay Kumar 15048536e02eSVijay Kumar void smp_init_cpu_poke(void) 15058536e02eSVijay Kumar { 15068536e02eSVijay Kumar unsigned long major; 15078536e02eSVijay Kumar unsigned long minor; 15088536e02eSVijay Kumar int ret; 15098536e02eSVijay Kumar 15108536e02eSVijay Kumar if (tlb_type != hypervisor) 15118536e02eSVijay Kumar return; 15128536e02eSVijay Kumar 15138536e02eSVijay Kumar ret = sun4v_hvapi_get(HV_GRP_CORE, &major, &minor); 15148536e02eSVijay Kumar if (ret) { 15158536e02eSVijay Kumar pr_debug("HV_GRP_CORE is not registered\n"); 15168536e02eSVijay Kumar return; 15178536e02eSVijay Kumar } 15188536e02eSVijay Kumar 15198536e02eSVijay Kumar if (major == 1 && minor >= 6) { 15208536e02eSVijay Kumar /* CPU POKE is registered. */ 15218536e02eSVijay Kumar cpu_poke = true; 15228536e02eSVijay Kumar return; 15238536e02eSVijay Kumar } 15248536e02eSVijay Kumar 15258536e02eSVijay Kumar pr_debug("CPU_POKE not supported\n"); 1526a88b5ba8SSam Ravnborg } 1527a88b5ba8SSam Ravnborg 15289960e9e8SDavid S. Miller void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs) 1529a88b5ba8SSam Ravnborg { 1530a88b5ba8SSam Ravnborg clear_softint(1 << irq); 1531184748ccSPeter Zijlstra scheduler_ipi(); 1532a88b5ba8SSam Ravnborg } 1533a88b5ba8SSam Ravnborg 153494ab5990SDave Kleikamp static void stop_this_cpu(void *dummy) 153594ab5990SDave Kleikamp { 1536cffb3e76SVijay Kumar set_cpu_online(smp_processor_id(), false); 153794ab5990SDave Kleikamp prom_stopself(); 153894ab5990SDave Kleikamp } 153994ab5990SDave Kleikamp 1540a88b5ba8SSam Ravnborg void smp_send_stop(void) 1541a88b5ba8SSam Ravnborg { 154294ab5990SDave Kleikamp int cpu; 154394ab5990SDave Kleikamp 154494ab5990SDave Kleikamp if (tlb_type == hypervisor) { 15457dd4fcf5SVijay Kumar int this_cpu = smp_processor_id(); 15467dd4fcf5SVijay Kumar #ifdef CONFIG_SERIAL_SUNHV 15477dd4fcf5SVijay Kumar sunhv_migrate_hvcons_irq(this_cpu); 15487dd4fcf5SVijay Kumar #endif 154994ab5990SDave Kleikamp for_each_online_cpu(cpu) { 15507dd4fcf5SVijay Kumar if (cpu == this_cpu) 155194ab5990SDave Kleikamp continue; 1552cffb3e76SVijay Kumar 1553cffb3e76SVijay Kumar set_cpu_online(cpu, false); 155494ab5990SDave Kleikamp #ifdef CONFIG_SUN_LDOMS 155594ab5990SDave Kleikamp if (ldom_domaining_enabled) { 155694ab5990SDave Kleikamp unsigned long hv_err; 155794ab5990SDave Kleikamp hv_err = sun4v_cpu_stop(cpu); 155894ab5990SDave Kleikamp if (hv_err) 155994ab5990SDave Kleikamp printk(KERN_ERR "sun4v_cpu_stop() " 156094ab5990SDave Kleikamp "failed err=%lu\n", hv_err); 156194ab5990SDave Kleikamp } else 156294ab5990SDave Kleikamp #endif 156394ab5990SDave Kleikamp prom_stopcpu_cpuid(cpu); 156494ab5990SDave Kleikamp } 156594ab5990SDave Kleikamp } else 156694ab5990SDave Kleikamp smp_call_function(stop_this_cpu, NULL, 0); 1567a88b5ba8SSam Ravnborg } 1568a88b5ba8SSam Ravnborg 15694fd78a5fSDavid S. Miller /** 15704fd78a5fSDavid S. Miller * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu 15714fd78a5fSDavid S. Miller * @cpu: cpu to allocate for 15724fd78a5fSDavid S. Miller * @size: size allocation in bytes 15734fd78a5fSDavid S. Miller * @align: alignment 15744fd78a5fSDavid S. Miller * 15754fd78a5fSDavid S. Miller * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper 15764fd78a5fSDavid S. Miller * does the right thing for NUMA regardless of the current 15774fd78a5fSDavid S. Miller * configuration. 15784fd78a5fSDavid S. Miller * 15794fd78a5fSDavid S. Miller * RETURNS: 15804fd78a5fSDavid S. Miller * Pointer to the allocated area on success, NULL on failure. 15814fd78a5fSDavid S. Miller */ 1582bcb2107fSTejun Heo static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size, 1583bcb2107fSTejun Heo size_t align) 15844fd78a5fSDavid S. Miller { 15854fd78a5fSDavid S. Miller const unsigned long goal = __pa(MAX_DMA_ADDRESS); 15864fd78a5fSDavid S. Miller #ifdef CONFIG_NEED_MULTIPLE_NODES 15874fd78a5fSDavid S. Miller int node = cpu_to_node(cpu); 15884fd78a5fSDavid S. Miller void *ptr; 15894fd78a5fSDavid S. Miller 15904fd78a5fSDavid S. Miller if (!node_online(node) || !NODE_DATA(node)) { 15914fc4a09eSMike Rapoport ptr = memblock_alloc_from(size, align, goal); 15924fd78a5fSDavid S. Miller pr_info("cpu %d has no node %d or node-local memory\n", 15934fd78a5fSDavid S. Miller cpu, node); 15944fd78a5fSDavid S. Miller pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n", 15954fd78a5fSDavid S. Miller cpu, size, __pa(ptr)); 15964fd78a5fSDavid S. Miller } else { 1597ccfa2a0fSMike Rapoport ptr = memblock_alloc_try_nid(size, align, goal, 159897ad1087SMike Rapoport MEMBLOCK_ALLOC_ACCESSIBLE, node); 15994fd78a5fSDavid S. Miller pr_debug("per cpu data for cpu%d %lu bytes on node%d at " 16004fd78a5fSDavid S. Miller "%016lx\n", cpu, size, node, __pa(ptr)); 16014fd78a5fSDavid S. Miller } 16024fd78a5fSDavid S. Miller return ptr; 16034fd78a5fSDavid S. Miller #else 16044fc4a09eSMike Rapoport return memblock_alloc_from(size, align, goal); 16054fd78a5fSDavid S. Miller #endif 16064fd78a5fSDavid S. Miller } 16074fd78a5fSDavid S. Miller 1608bcb2107fSTejun Heo static void __init pcpu_free_bootmem(void *ptr, size_t size) 16094fd78a5fSDavid S. Miller { 16102013288fSMike Rapoport memblock_free(__pa(ptr), size); 16114fd78a5fSDavid S. Miller } 16124fd78a5fSDavid S. Miller 1613a70c6913STejun Heo static int __init pcpu_cpu_distance(unsigned int from, unsigned int to) 1614bcb2107fSTejun Heo { 1615bcb2107fSTejun Heo if (cpu_to_node(from) == cpu_to_node(to)) 1616bcb2107fSTejun Heo return LOCAL_DISTANCE; 1617bcb2107fSTejun Heo else 1618bcb2107fSTejun Heo return REMOTE_DISTANCE; 16194fd78a5fSDavid S. Miller } 16204fd78a5fSDavid S. Miller 1621a70c6913STejun Heo static void __init pcpu_populate_pte(unsigned long addr) 1622a70c6913STejun Heo { 1623a70c6913STejun Heo pgd_t *pgd = pgd_offset_k(addr); 1624*5637bc50SMike Rapoport p4d_t *p4d; 1625a70c6913STejun Heo pud_t *pud; 1626a70c6913STejun Heo pmd_t *pmd; 1627a70c6913STejun Heo 1628ac55c768SDavid S. Miller if (pgd_none(*pgd)) { 1629ac55c768SDavid S. Miller pud_t *new; 1630ac55c768SDavid S. Miller 16314fc4a09eSMike Rapoport new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); 1632b1e1c869SMike Rapoport if (!new) 1633b1e1c869SMike Rapoport goto err_alloc; 1634ac55c768SDavid S. Miller pgd_populate(&init_mm, pgd, new); 1635ac55c768SDavid S. Miller } 1636ac55c768SDavid S. Miller 1637*5637bc50SMike Rapoport p4d = p4d_offset(pgd, addr); 1638*5637bc50SMike Rapoport if (p4d_none(*p4d)) { 1639*5637bc50SMike Rapoport pud_t *new; 1640*5637bc50SMike Rapoport 1641*5637bc50SMike Rapoport new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); 1642*5637bc50SMike Rapoport if (!new) 1643*5637bc50SMike Rapoport goto err_alloc; 1644*5637bc50SMike Rapoport p4d_populate(&init_mm, p4d, new); 1645*5637bc50SMike Rapoport } 1646*5637bc50SMike Rapoport 1647*5637bc50SMike Rapoport pud = pud_offset(p4d, addr); 1648a70c6913STejun Heo if (pud_none(*pud)) { 1649a70c6913STejun Heo pmd_t *new; 1650a70c6913STejun Heo 16514fc4a09eSMike Rapoport new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); 1652b1e1c869SMike Rapoport if (!new) 1653b1e1c869SMike Rapoport goto err_alloc; 1654a70c6913STejun Heo pud_populate(&init_mm, pud, new); 1655a70c6913STejun Heo } 1656a70c6913STejun Heo 1657a70c6913STejun Heo pmd = pmd_offset(pud, addr); 1658a70c6913STejun Heo if (!pmd_present(*pmd)) { 1659a70c6913STejun Heo pte_t *new; 1660a70c6913STejun Heo 16614fc4a09eSMike Rapoport new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); 1662b1e1c869SMike Rapoport if (!new) 1663b1e1c869SMike Rapoport goto err_alloc; 1664a70c6913STejun Heo pmd_populate_kernel(&init_mm, pmd, new); 1665a70c6913STejun Heo } 1666b1e1c869SMike Rapoport 1667b1e1c869SMike Rapoport return; 1668b1e1c869SMike Rapoport 1669b1e1c869SMike Rapoport err_alloc: 1670b1e1c869SMike Rapoport panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n", 1671b1e1c869SMike Rapoport __func__, PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); 1672a70c6913STejun Heo } 1673a70c6913STejun Heo 167473fffc03SDavid S. Miller void __init setup_per_cpu_areas(void) 1675a88b5ba8SSam Ravnborg { 1676bcb2107fSTejun Heo unsigned long delta; 1677bcb2107fSTejun Heo unsigned int cpu; 1678a70c6913STejun Heo int rc = -EINVAL; 1679a88b5ba8SSam Ravnborg 1680a70c6913STejun Heo if (pcpu_chosen_fc != PCPU_FC_PAGE) { 1681bcb2107fSTejun Heo rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, 1682bcb2107fSTejun Heo PERCPU_DYNAMIC_RESERVE, 4 << 20, 1683a70c6913STejun Heo pcpu_cpu_distance, 1684a70c6913STejun Heo pcpu_alloc_bootmem, 1685bcb2107fSTejun Heo pcpu_free_bootmem); 1686fb435d52STejun Heo if (rc) 1687a70c6913STejun Heo pr_warning("PERCPU: %s allocator failed (%d), " 1688a70c6913STejun Heo "falling back to page size\n", 1689a70c6913STejun Heo pcpu_fc_names[pcpu_chosen_fc], rc); 1690a70c6913STejun Heo } 1691a70c6913STejun Heo if (rc < 0) 1692a70c6913STejun Heo rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, 1693a70c6913STejun Heo pcpu_alloc_bootmem, 1694a70c6913STejun Heo pcpu_free_bootmem, 1695a70c6913STejun Heo pcpu_populate_pte); 1696a70c6913STejun Heo if (rc < 0) 1697a70c6913STejun Heo panic("cannot initialize percpu area (err=%d)", rc); 16984fd78a5fSDavid S. Miller 16994fd78a5fSDavid S. Miller delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; 1700fb435d52STejun Heo for_each_possible_cpu(cpu) 1701fb435d52STejun Heo __per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu]; 1702a88b5ba8SSam Ravnborg 1703a88b5ba8SSam Ravnborg /* Setup %g5 for the boot cpu. */ 1704a88b5ba8SSam Ravnborg __local_per_cpu_offset = __per_cpu_offset(smp_processor_id()); 1705b696fdc2SDavid S. Miller 1706b696fdc2SDavid S. Miller of_fill_in_cpu_data(); 1707b696fdc2SDavid S. Miller if (tlb_type == hypervisor) 17086ac5c610SStephen Rothwell mdesc_fill_in_cpu_data(cpu_all_mask); 1709a88b5ba8SSam Ravnborg } 1710