1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 2a88b5ba8SSam Ravnborg /* smp.c: Sparc64 SMP support. 3a88b5ba8SSam Ravnborg * 4a88b5ba8SSam Ravnborg * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net) 5a88b5ba8SSam Ravnborg */ 6a88b5ba8SSam Ravnborg 7066bcacaSPaul Gortmaker #include <linux/export.h> 8a88b5ba8SSam Ravnborg #include <linux/kernel.h> 968e21be2SIngo Molnar #include <linux/sched/mm.h> 10ef8bd77fSIngo Molnar #include <linux/sched/hotplug.h> 11a88b5ba8SSam Ravnborg #include <linux/mm.h> 12a88b5ba8SSam Ravnborg #include <linux/pagemap.h> 13a88b5ba8SSam Ravnborg #include <linux/threads.h> 14a88b5ba8SSam Ravnborg #include <linux/smp.h> 15a88b5ba8SSam Ravnborg #include <linux/interrupt.h> 16a88b5ba8SSam Ravnborg #include <linux/kernel_stat.h> 17a88b5ba8SSam Ravnborg #include <linux/delay.h> 18a88b5ba8SSam Ravnborg #include <linux/init.h> 19a88b5ba8SSam Ravnborg #include <linux/spinlock.h> 20a88b5ba8SSam Ravnborg #include <linux/fs.h> 21a88b5ba8SSam Ravnborg #include <linux/seq_file.h> 22a88b5ba8SSam Ravnborg #include <linux/cache.h> 23a88b5ba8SSam Ravnborg #include <linux/jiffies.h> 24a88b5ba8SSam Ravnborg #include <linux/profile.h> 2557c8a661SMike Rapoport #include <linux/memblock.h> 264fd78a5fSDavid S. Miller #include <linux/vmalloc.h> 279960e9e8SDavid S. Miller #include <linux/ftrace.h> 28a88b5ba8SSam Ravnborg #include <linux/cpu.h> 295a0e3ad6STejun Heo #include <linux/slab.h> 30d3091298SSam Ravnborg #include <linux/kgdb.h> 31a88b5ba8SSam Ravnborg 32a88b5ba8SSam Ravnborg #include <asm/head.h> 33a88b5ba8SSam Ravnborg #include <asm/ptrace.h> 3460063497SArun Sharma #include <linux/atomic.h> 35a88b5ba8SSam Ravnborg #include <asm/tlbflush.h> 36a88b5ba8SSam Ravnborg #include <asm/mmu_context.h> 37a88b5ba8SSam Ravnborg #include <asm/cpudata.h> 38a88b5ba8SSam Ravnborg #include <asm/hvtramp.h> 39a88b5ba8SSam Ravnborg #include <asm/io.h> 40a88b5ba8SSam Ravnborg #include <asm/timer.h> 4159dec13bSSam Ravnborg #include <asm/setup.h> 42a88b5ba8SSam Ravnborg 43a88b5ba8SSam Ravnborg #include <asm/irq.h> 44a88b5ba8SSam Ravnborg #include <asm/irq_regs.h> 45a88b5ba8SSam Ravnborg #include <asm/page.h> 46a88b5ba8SSam Ravnborg #include <asm/oplib.h> 477c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 48a88b5ba8SSam Ravnborg #include <asm/starfire.h> 49a88b5ba8SSam Ravnborg #include <asm/tlb.h> 50ca15ca40SMike Rapoport #include <asm/pgalloc.h> 51a88b5ba8SSam Ravnborg #include <asm/sections.h> 52a88b5ba8SSam Ravnborg #include <asm/prom.h> 53a88b5ba8SSam Ravnborg #include <asm/mdesc.h> 54a88b5ba8SSam Ravnborg #include <asm/ldc.h> 55a88b5ba8SSam Ravnborg #include <asm/hypervisor.h> 56b62818e5SDavid S. Miller #include <asm/pcr.h> 57a88b5ba8SSam Ravnborg 58280ff974SHong H. Pham #include "cpumap.h" 59a0c54a21SSam Ravnborg #include "kernel.h" 60280ff974SHong H. Pham 61a88b5ba8SSam Ravnborg DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; 62a88b5ba8SSam Ravnborg cpumask_t cpu_core_map[NR_CPUS] __read_mostly = 63a88b5ba8SSam Ravnborg { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; 64a88b5ba8SSam Ravnborg 65acc455cfSchris hyser cpumask_t cpu_core_sib_map[NR_CPUS] __read_mostly = { 66acc455cfSchris hyser [0 ... NR_CPUS-1] = CPU_MASK_NONE }; 67acc455cfSchris hyser 68d624716bSAtish Patra cpumask_t cpu_core_sib_cache_map[NR_CPUS] __read_mostly = { 69d624716bSAtish Patra [0 ... NR_CPUS - 1] = CPU_MASK_NONE }; 70d624716bSAtish Patra 71a88b5ba8SSam Ravnborg EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); 72a88b5ba8SSam Ravnborg EXPORT_SYMBOL(cpu_core_map); 73acc455cfSchris hyser EXPORT_SYMBOL(cpu_core_sib_map); 74d624716bSAtish Patra EXPORT_SYMBOL(cpu_core_sib_cache_map); 75a88b5ba8SSam Ravnborg 76a88b5ba8SSam Ravnborg static cpumask_t smp_commenced_mask; 77a88b5ba8SSam Ravnborg 788536e02eSVijay Kumar static DEFINE_PER_CPU(bool, poke); 798536e02eSVijay Kumar static bool cpu_poke; 808536e02eSVijay Kumar 81a88b5ba8SSam Ravnborg void smp_info(struct seq_file *m) 82a88b5ba8SSam Ravnborg { 83a88b5ba8SSam Ravnborg int i; 84a88b5ba8SSam Ravnborg 85a88b5ba8SSam Ravnborg seq_printf(m, "State:\n"); 86a88b5ba8SSam Ravnborg for_each_online_cpu(i) 87a88b5ba8SSam Ravnborg seq_printf(m, "CPU%d:\t\tonline\n", i); 88a88b5ba8SSam Ravnborg } 89a88b5ba8SSam Ravnborg 90a88b5ba8SSam Ravnborg void smp_bogo(struct seq_file *m) 91a88b5ba8SSam Ravnborg { 92a88b5ba8SSam Ravnborg int i; 93a88b5ba8SSam Ravnborg 94a88b5ba8SSam Ravnborg for_each_online_cpu(i) 95a88b5ba8SSam Ravnborg seq_printf(m, 96a88b5ba8SSam Ravnborg "Cpu%dClkTck\t: %016lx\n", 97a88b5ba8SSam Ravnborg i, cpu_data(i).clock_tick); 98a88b5ba8SSam Ravnborg } 99a88b5ba8SSam Ravnborg 100a88b5ba8SSam Ravnborg extern void setup_sparc64_timer(void); 101a88b5ba8SSam Ravnborg 102a88b5ba8SSam Ravnborg static volatile unsigned long callin_flag = 0; 103a88b5ba8SSam Ravnborg 1042066aaddSPaul Gortmaker void smp_callin(void) 105a88b5ba8SSam Ravnborg { 106a88b5ba8SSam Ravnborg int cpuid = hard_smp_processor_id(); 107a88b5ba8SSam Ravnborg 108a88b5ba8SSam Ravnborg __local_per_cpu_offset = __per_cpu_offset(cpuid); 109a88b5ba8SSam Ravnborg 110a88b5ba8SSam Ravnborg if (tlb_type == hypervisor) 111a88b5ba8SSam Ravnborg sun4v_ktsb_register(); 112a88b5ba8SSam Ravnborg 113a88b5ba8SSam Ravnborg __flush_tlb_all(); 114a88b5ba8SSam Ravnborg 115a88b5ba8SSam Ravnborg setup_sparc64_timer(); 116a88b5ba8SSam Ravnborg 117a88b5ba8SSam Ravnborg if (cheetah_pcache_forced_on) 118a88b5ba8SSam Ravnborg cheetah_enable_pcache(); 119a88b5ba8SSam Ravnborg 120a88b5ba8SSam Ravnborg callin_flag = 1; 121a88b5ba8SSam Ravnborg __asm__ __volatile__("membar #Sync\n\t" 122a88b5ba8SSam Ravnborg "flush %%g6" : : : "memory"); 123a88b5ba8SSam Ravnborg 124a88b5ba8SSam Ravnborg /* Clear this or we will die instantly when we 125a88b5ba8SSam Ravnborg * schedule back to this idler... 126a88b5ba8SSam Ravnborg */ 127a88b5ba8SSam Ravnborg current_thread_info()->new_child = 0; 128a88b5ba8SSam Ravnborg 129a88b5ba8SSam Ravnborg /* Attach to the address space of init_task. */ 130f1f10076SVegard Nossum mmgrab(&init_mm); 131a88b5ba8SSam Ravnborg current->active_mm = &init_mm; 132a88b5ba8SSam Ravnborg 133a88b5ba8SSam Ravnborg /* inform the notifiers about the new cpu */ 134a88b5ba8SSam Ravnborg notify_cpu_starting(cpuid); 135a88b5ba8SSam Ravnborg 136fb1fece5SKOSAKI Motohiro while (!cpumask_test_cpu(cpuid, &smp_commenced_mask)) 137a88b5ba8SSam Ravnborg rmb(); 138a88b5ba8SSam Ravnborg 139fb1fece5SKOSAKI Motohiro set_cpu_online(cpuid, true); 140a88b5ba8SSam Ravnborg 141ce2521bfSKirill Tkhai local_irq_enable(); 142ce2521bfSKirill Tkhai 143fc6d73d6SThomas Gleixner cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); 144a88b5ba8SSam Ravnborg } 145a88b5ba8SSam Ravnborg 146a88b5ba8SSam Ravnborg void cpu_panic(void) 147a88b5ba8SSam Ravnborg { 148a88b5ba8SSam Ravnborg printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id()); 149a88b5ba8SSam Ravnborg panic("SMP bolixed\n"); 150a88b5ba8SSam Ravnborg } 151a88b5ba8SSam Ravnborg 152a88b5ba8SSam Ravnborg /* This tick register synchronization scheme is taken entirely from 153a88b5ba8SSam Ravnborg * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit. 154a88b5ba8SSam Ravnborg * 155a88b5ba8SSam Ravnborg * The only change I've made is to rework it so that the master 156a88b5ba8SSam Ravnborg * initiates the synchonization instead of the slave. -DaveM 157a88b5ba8SSam Ravnborg */ 158a88b5ba8SSam Ravnborg 159a88b5ba8SSam Ravnborg #define MASTER 0 160a88b5ba8SSam Ravnborg #define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long)) 161a88b5ba8SSam Ravnborg 162a88b5ba8SSam Ravnborg #define NUM_ROUNDS 64 /* magic value */ 163a88b5ba8SSam Ravnborg #define NUM_ITERS 5 /* likewise */ 164a88b5ba8SSam Ravnborg 16549b6c01fSKirill Tkhai static DEFINE_RAW_SPINLOCK(itc_sync_lock); 166a88b5ba8SSam Ravnborg static unsigned long go[SLAVE + 1]; 167a88b5ba8SSam Ravnborg 168a88b5ba8SSam Ravnborg #define DEBUG_TICK_SYNC 0 169a88b5ba8SSam Ravnborg 170a88b5ba8SSam Ravnborg static inline long get_delta (long *rt, long *master) 171a88b5ba8SSam Ravnborg { 172a88b5ba8SSam Ravnborg unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0; 173a88b5ba8SSam Ravnborg unsigned long tcenter, t0, t1, tm; 174a88b5ba8SSam Ravnborg unsigned long i; 175a88b5ba8SSam Ravnborg 176a88b5ba8SSam Ravnborg for (i = 0; i < NUM_ITERS; i++) { 177a88b5ba8SSam Ravnborg t0 = tick_ops->get_tick(); 178a88b5ba8SSam Ravnborg go[MASTER] = 1; 179a88b5ba8SSam Ravnborg membar_safe("#StoreLoad"); 180a88b5ba8SSam Ravnborg while (!(tm = go[SLAVE])) 181a88b5ba8SSam Ravnborg rmb(); 182a88b5ba8SSam Ravnborg go[SLAVE] = 0; 183a88b5ba8SSam Ravnborg wmb(); 184a88b5ba8SSam Ravnborg t1 = tick_ops->get_tick(); 185a88b5ba8SSam Ravnborg 186a88b5ba8SSam Ravnborg if (t1 - t0 < best_t1 - best_t0) 187a88b5ba8SSam Ravnborg best_t0 = t0, best_t1 = t1, best_tm = tm; 188a88b5ba8SSam Ravnborg } 189a88b5ba8SSam Ravnborg 190a88b5ba8SSam Ravnborg *rt = best_t1 - best_t0; 191a88b5ba8SSam Ravnborg *master = best_tm - best_t0; 192a88b5ba8SSam Ravnborg 193a88b5ba8SSam Ravnborg /* average best_t0 and best_t1 without overflow: */ 194a88b5ba8SSam Ravnborg tcenter = (best_t0/2 + best_t1/2); 195a88b5ba8SSam Ravnborg if (best_t0 % 2 + best_t1 % 2 == 2) 196a88b5ba8SSam Ravnborg tcenter++; 197a88b5ba8SSam Ravnborg return tcenter - best_tm; 198a88b5ba8SSam Ravnborg } 199a88b5ba8SSam Ravnborg 200a88b5ba8SSam Ravnborg void smp_synchronize_tick_client(void) 201a88b5ba8SSam Ravnborg { 202a88b5ba8SSam Ravnborg long i, delta, adj, adjust_latency = 0, done = 0; 203c6fee081SDavid S. Miller unsigned long flags, rt, master_time_stamp; 204a88b5ba8SSam Ravnborg #if DEBUG_TICK_SYNC 205a88b5ba8SSam Ravnborg struct { 206a88b5ba8SSam Ravnborg long rt; /* roundtrip time */ 207a88b5ba8SSam Ravnborg long master; /* master's timestamp */ 208a88b5ba8SSam Ravnborg long diff; /* difference between midpoint and master's timestamp */ 209a88b5ba8SSam Ravnborg long lat; /* estimate of itc adjustment latency */ 210a88b5ba8SSam Ravnborg } t[NUM_ROUNDS]; 211a88b5ba8SSam Ravnborg #endif 212a88b5ba8SSam Ravnborg 213a88b5ba8SSam Ravnborg go[MASTER] = 1; 214a88b5ba8SSam Ravnborg 215a88b5ba8SSam Ravnborg while (go[MASTER]) 216a88b5ba8SSam Ravnborg rmb(); 217a88b5ba8SSam Ravnborg 218a88b5ba8SSam Ravnborg local_irq_save(flags); 219a88b5ba8SSam Ravnborg { 220a88b5ba8SSam Ravnborg for (i = 0; i < NUM_ROUNDS; i++) { 221a88b5ba8SSam Ravnborg delta = get_delta(&rt, &master_time_stamp); 222c6fee081SDavid S. Miller if (delta == 0) 223a88b5ba8SSam Ravnborg done = 1; /* let's lock on to this... */ 224a88b5ba8SSam Ravnborg 225a88b5ba8SSam Ravnborg if (!done) { 226a88b5ba8SSam Ravnborg if (i > 0) { 227a88b5ba8SSam Ravnborg adjust_latency += -delta; 228a88b5ba8SSam Ravnborg adj = -delta + adjust_latency/4; 229a88b5ba8SSam Ravnborg } else 230a88b5ba8SSam Ravnborg adj = -delta; 231a88b5ba8SSam Ravnborg 232a88b5ba8SSam Ravnborg tick_ops->add_tick(adj); 233a88b5ba8SSam Ravnborg } 234a88b5ba8SSam Ravnborg #if DEBUG_TICK_SYNC 235a88b5ba8SSam Ravnborg t[i].rt = rt; 236a88b5ba8SSam Ravnborg t[i].master = master_time_stamp; 237a88b5ba8SSam Ravnborg t[i].diff = delta; 238a88b5ba8SSam Ravnborg t[i].lat = adjust_latency/4; 239a88b5ba8SSam Ravnborg #endif 240a88b5ba8SSam Ravnborg } 241a88b5ba8SSam Ravnborg } 242a88b5ba8SSam Ravnborg local_irq_restore(flags); 243a88b5ba8SSam Ravnborg 244a88b5ba8SSam Ravnborg #if DEBUG_TICK_SYNC 245a88b5ba8SSam Ravnborg for (i = 0; i < NUM_ROUNDS; i++) 246a88b5ba8SSam Ravnborg printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n", 247a88b5ba8SSam Ravnborg t[i].rt, t[i].master, t[i].diff, t[i].lat); 248a88b5ba8SSam Ravnborg #endif 249a88b5ba8SSam Ravnborg 250a88b5ba8SSam Ravnborg printk(KERN_INFO "CPU %d: synchronized TICK with master CPU " 251a88b5ba8SSam Ravnborg "(last diff %ld cycles, maxerr %lu cycles)\n", 252a88b5ba8SSam Ravnborg smp_processor_id(), delta, rt); 253a88b5ba8SSam Ravnborg } 254a88b5ba8SSam Ravnborg 255a88b5ba8SSam Ravnborg static void smp_start_sync_tick_client(int cpu); 256a88b5ba8SSam Ravnborg 257a88b5ba8SSam Ravnborg static void smp_synchronize_one_tick(int cpu) 258a88b5ba8SSam Ravnborg { 259a88b5ba8SSam Ravnborg unsigned long flags, i; 260a88b5ba8SSam Ravnborg 261a88b5ba8SSam Ravnborg go[MASTER] = 0; 262a88b5ba8SSam Ravnborg 263a88b5ba8SSam Ravnborg smp_start_sync_tick_client(cpu); 264a88b5ba8SSam Ravnborg 265a88b5ba8SSam Ravnborg /* wait for client to be ready */ 266a88b5ba8SSam Ravnborg while (!go[MASTER]) 267a88b5ba8SSam Ravnborg rmb(); 268a88b5ba8SSam Ravnborg 269a88b5ba8SSam Ravnborg /* now let the client proceed into his loop */ 270a88b5ba8SSam Ravnborg go[MASTER] = 0; 271a88b5ba8SSam Ravnborg membar_safe("#StoreLoad"); 272a88b5ba8SSam Ravnborg 27349b6c01fSKirill Tkhai raw_spin_lock_irqsave(&itc_sync_lock, flags); 274a88b5ba8SSam Ravnborg { 275a88b5ba8SSam Ravnborg for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) { 276a88b5ba8SSam Ravnborg while (!go[MASTER]) 277a88b5ba8SSam Ravnborg rmb(); 278a88b5ba8SSam Ravnborg go[MASTER] = 0; 279a88b5ba8SSam Ravnborg wmb(); 280a88b5ba8SSam Ravnborg go[SLAVE] = tick_ops->get_tick(); 281a88b5ba8SSam Ravnborg membar_safe("#StoreLoad"); 282a88b5ba8SSam Ravnborg } 283a88b5ba8SSam Ravnborg } 28449b6c01fSKirill Tkhai raw_spin_unlock_irqrestore(&itc_sync_lock, flags); 285a88b5ba8SSam Ravnborg } 286a88b5ba8SSam Ravnborg 287a88b5ba8SSam Ravnborg #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU) 2882066aaddSPaul Gortmaker static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg, 2892066aaddSPaul Gortmaker void **descrp) 290a88b5ba8SSam Ravnborg { 291a88b5ba8SSam Ravnborg extern unsigned long sparc64_ttable_tl0; 292a88b5ba8SSam Ravnborg extern unsigned long kern_locked_tte_data; 293a88b5ba8SSam Ravnborg struct hvtramp_descr *hdesc; 294a88b5ba8SSam Ravnborg unsigned long trampoline_ra; 295a88b5ba8SSam Ravnborg struct trap_per_cpu *tb; 296a88b5ba8SSam Ravnborg u64 tte_vaddr, tte_data; 297a88b5ba8SSam Ravnborg unsigned long hv_err; 298a88b5ba8SSam Ravnborg int i; 299a88b5ba8SSam Ravnborg 300a88b5ba8SSam Ravnborg hdesc = kzalloc(sizeof(*hdesc) + 301a88b5ba8SSam Ravnborg (sizeof(struct hvtramp_mapping) * 302a88b5ba8SSam Ravnborg num_kernel_image_mappings - 1), 303a88b5ba8SSam Ravnborg GFP_KERNEL); 304a88b5ba8SSam Ravnborg if (!hdesc) { 305a88b5ba8SSam Ravnborg printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate " 306a88b5ba8SSam Ravnborg "hvtramp_descr.\n"); 307a88b5ba8SSam Ravnborg return; 308a88b5ba8SSam Ravnborg } 309557fe0e8SDavid S. Miller *descrp = hdesc; 310a88b5ba8SSam Ravnborg 311a88b5ba8SSam Ravnborg hdesc->cpu = cpu; 312a88b5ba8SSam Ravnborg hdesc->num_mappings = num_kernel_image_mappings; 313a88b5ba8SSam Ravnborg 314a88b5ba8SSam Ravnborg tb = &trap_block[cpu]; 315a88b5ba8SSam Ravnborg 316a88b5ba8SSam Ravnborg hdesc->fault_info_va = (unsigned long) &tb->fault_info; 317a88b5ba8SSam Ravnborg hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info); 318a88b5ba8SSam Ravnborg 319a88b5ba8SSam Ravnborg hdesc->thread_reg = thread_reg; 320a88b5ba8SSam Ravnborg 321a88b5ba8SSam Ravnborg tte_vaddr = (unsigned long) KERNBASE; 322a88b5ba8SSam Ravnborg tte_data = kern_locked_tte_data; 323a88b5ba8SSam Ravnborg 324a88b5ba8SSam Ravnborg for (i = 0; i < hdesc->num_mappings; i++) { 325a88b5ba8SSam Ravnborg hdesc->maps[i].vaddr = tte_vaddr; 326a88b5ba8SSam Ravnborg hdesc->maps[i].tte = tte_data; 327a88b5ba8SSam Ravnborg tte_vaddr += 0x400000; 328a88b5ba8SSam Ravnborg tte_data += 0x400000; 329a88b5ba8SSam Ravnborg } 330a88b5ba8SSam Ravnborg 331a88b5ba8SSam Ravnborg trampoline_ra = kimage_addr_to_ra(hv_cpu_startup); 332a88b5ba8SSam Ravnborg 333a88b5ba8SSam Ravnborg hv_err = sun4v_cpu_start(cpu, trampoline_ra, 334a88b5ba8SSam Ravnborg kimage_addr_to_ra(&sparc64_ttable_tl0), 335a88b5ba8SSam Ravnborg __pa(hdesc)); 336a88b5ba8SSam Ravnborg if (hv_err) 337a88b5ba8SSam Ravnborg printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() " 338a88b5ba8SSam Ravnborg "gives error %lu\n", hv_err); 339a88b5ba8SSam Ravnborg } 340a88b5ba8SSam Ravnborg #endif 341a88b5ba8SSam Ravnborg 342a88b5ba8SSam Ravnborg extern unsigned long sparc64_cpu_startup; 343a88b5ba8SSam Ravnborg 344a88b5ba8SSam Ravnborg /* The OBP cpu startup callback truncates the 3rd arg cookie to 345a88b5ba8SSam Ravnborg * 32-bits (I think) so to be safe we have it read the pointer 346a88b5ba8SSam Ravnborg * contained here so we work on >4GB machines. -DaveM 347a88b5ba8SSam Ravnborg */ 348a88b5ba8SSam Ravnborg static struct thread_info *cpu_new_thread = NULL; 349a88b5ba8SSam Ravnborg 3502066aaddSPaul Gortmaker static int smp_boot_one_cpu(unsigned int cpu, struct task_struct *idle) 351a88b5ba8SSam Ravnborg { 352a88b5ba8SSam Ravnborg unsigned long entry = 353a88b5ba8SSam Ravnborg (unsigned long)(&sparc64_cpu_startup); 354a88b5ba8SSam Ravnborg unsigned long cookie = 355a88b5ba8SSam Ravnborg (unsigned long)(&cpu_new_thread); 356557fe0e8SDavid S. Miller void *descr = NULL; 357a88b5ba8SSam Ravnborg int timeout, ret; 358a88b5ba8SSam Ravnborg 359a88b5ba8SSam Ravnborg callin_flag = 0; 360f0a2bc7eSThomas Gleixner cpu_new_thread = task_thread_info(idle); 361a88b5ba8SSam Ravnborg 362a88b5ba8SSam Ravnborg if (tlb_type == hypervisor) { 363a88b5ba8SSam Ravnborg #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU) 364a88b5ba8SSam Ravnborg if (ldom_domaining_enabled) 365a88b5ba8SSam Ravnborg ldom_startcpu_cpuid(cpu, 366557fe0e8SDavid S. Miller (unsigned long) cpu_new_thread, 367557fe0e8SDavid S. Miller &descr); 368a88b5ba8SSam Ravnborg else 369a88b5ba8SSam Ravnborg #endif 370a88b5ba8SSam Ravnborg prom_startcpu_cpuid(cpu, entry, cookie); 371a88b5ba8SSam Ravnborg } else { 372a88b5ba8SSam Ravnborg struct device_node *dp = of_find_node_by_cpuid(cpu); 373a88b5ba8SSam Ravnborg 3746016a363SGrant Likely prom_startcpu(dp->phandle, entry, cookie); 375a88b5ba8SSam Ravnborg } 376a88b5ba8SSam Ravnborg 377a88b5ba8SSam Ravnborg for (timeout = 0; timeout < 50000; timeout++) { 378a88b5ba8SSam Ravnborg if (callin_flag) 379a88b5ba8SSam Ravnborg break; 380a88b5ba8SSam Ravnborg udelay(100); 381a88b5ba8SSam Ravnborg } 382a88b5ba8SSam Ravnborg 383a88b5ba8SSam Ravnborg if (callin_flag) { 384a88b5ba8SSam Ravnborg ret = 0; 385a88b5ba8SSam Ravnborg } else { 386a88b5ba8SSam Ravnborg printk("Processor %d is stuck.\n", cpu); 387a88b5ba8SSam Ravnborg ret = -ENODEV; 388a88b5ba8SSam Ravnborg } 389a88b5ba8SSam Ravnborg cpu_new_thread = NULL; 390a88b5ba8SSam Ravnborg 391557fe0e8SDavid S. Miller kfree(descr); 392a88b5ba8SSam Ravnborg 393a88b5ba8SSam Ravnborg return ret; 394a88b5ba8SSam Ravnborg } 395a88b5ba8SSam Ravnborg 396a88b5ba8SSam Ravnborg static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu) 397a88b5ba8SSam Ravnborg { 398a88b5ba8SSam Ravnborg u64 result, target; 399a88b5ba8SSam Ravnborg int stuck, tmp; 400a88b5ba8SSam Ravnborg 401a88b5ba8SSam Ravnborg if (this_is_starfire) { 402a88b5ba8SSam Ravnborg /* map to real upaid */ 403a88b5ba8SSam Ravnborg cpu = (((cpu & 0x3c) << 1) | 404a88b5ba8SSam Ravnborg ((cpu & 0x40) >> 4) | 405a88b5ba8SSam Ravnborg (cpu & 0x3)); 406a88b5ba8SSam Ravnborg } 407a88b5ba8SSam Ravnborg 408a88b5ba8SSam Ravnborg target = (cpu << 14) | 0x70; 409a88b5ba8SSam Ravnborg again: 410a88b5ba8SSam Ravnborg /* Ok, this is the real Spitfire Errata #54. 411a88b5ba8SSam Ravnborg * One must read back from a UDB internal register 412a88b5ba8SSam Ravnborg * after writes to the UDB interrupt dispatch, but 413a88b5ba8SSam Ravnborg * before the membar Sync for that write. 414a88b5ba8SSam Ravnborg * So we use the high UDB control register (ASI 0x7f, 415a88b5ba8SSam Ravnborg * ADDR 0x20) for the dummy read. -DaveM 416a88b5ba8SSam Ravnborg */ 417a88b5ba8SSam Ravnborg tmp = 0x40; 418a88b5ba8SSam Ravnborg __asm__ __volatile__( 419a88b5ba8SSam Ravnborg "wrpr %1, %2, %%pstate\n\t" 420a88b5ba8SSam Ravnborg "stxa %4, [%0] %3\n\t" 421a88b5ba8SSam Ravnborg "stxa %5, [%0+%8] %3\n\t" 422a88b5ba8SSam Ravnborg "add %0, %8, %0\n\t" 423a88b5ba8SSam Ravnborg "stxa %6, [%0+%8] %3\n\t" 424a88b5ba8SSam Ravnborg "membar #Sync\n\t" 425a88b5ba8SSam Ravnborg "stxa %%g0, [%7] %3\n\t" 426a88b5ba8SSam Ravnborg "membar #Sync\n\t" 427a88b5ba8SSam Ravnborg "mov 0x20, %%g1\n\t" 428a88b5ba8SSam Ravnborg "ldxa [%%g1] 0x7f, %%g0\n\t" 429a88b5ba8SSam Ravnborg "membar #Sync" 430a88b5ba8SSam Ravnborg : "=r" (tmp) 431a88b5ba8SSam Ravnborg : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W), 432a88b5ba8SSam Ravnborg "r" (data0), "r" (data1), "r" (data2), "r" (target), 433a88b5ba8SSam Ravnborg "r" (0x10), "0" (tmp) 434a88b5ba8SSam Ravnborg : "g1"); 435a88b5ba8SSam Ravnborg 436a88b5ba8SSam Ravnborg /* NOTE: PSTATE_IE is still clear. */ 437a88b5ba8SSam Ravnborg stuck = 100000; 438a88b5ba8SSam Ravnborg do { 439a88b5ba8SSam Ravnborg __asm__ __volatile__("ldxa [%%g0] %1, %0" 440a88b5ba8SSam Ravnborg : "=r" (result) 441a88b5ba8SSam Ravnborg : "i" (ASI_INTR_DISPATCH_STAT)); 442a88b5ba8SSam Ravnborg if (result == 0) { 443a88b5ba8SSam Ravnborg __asm__ __volatile__("wrpr %0, 0x0, %%pstate" 444a88b5ba8SSam Ravnborg : : "r" (pstate)); 445a88b5ba8SSam Ravnborg return; 446a88b5ba8SSam Ravnborg } 447a88b5ba8SSam Ravnborg stuck -= 1; 448a88b5ba8SSam Ravnborg if (stuck == 0) 449a88b5ba8SSam Ravnborg break; 450a88b5ba8SSam Ravnborg } while (result & 0x1); 451a88b5ba8SSam Ravnborg __asm__ __volatile__("wrpr %0, 0x0, %%pstate" 452a88b5ba8SSam Ravnborg : : "r" (pstate)); 453a88b5ba8SSam Ravnborg if (stuck == 0) { 45490181136SSam Ravnborg printk("CPU[%d]: mondo stuckage result[%016llx]\n", 455a88b5ba8SSam Ravnborg smp_processor_id(), result); 456a88b5ba8SSam Ravnborg } else { 457a88b5ba8SSam Ravnborg udelay(2); 458a88b5ba8SSam Ravnborg goto again; 459a88b5ba8SSam Ravnborg } 460a88b5ba8SSam Ravnborg } 461a88b5ba8SSam Ravnborg 462a88b5ba8SSam Ravnborg static void spitfire_xcall_deliver(struct trap_per_cpu *tb, int cnt) 463a88b5ba8SSam Ravnborg { 464a88b5ba8SSam Ravnborg u64 *mondo, data0, data1, data2; 465a88b5ba8SSam Ravnborg u16 *cpu_list; 466a88b5ba8SSam Ravnborg u64 pstate; 467a88b5ba8SSam Ravnborg int i; 468a88b5ba8SSam Ravnborg 469a88b5ba8SSam Ravnborg __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); 470a88b5ba8SSam Ravnborg cpu_list = __va(tb->cpu_list_pa); 471a88b5ba8SSam Ravnborg mondo = __va(tb->cpu_mondo_block_pa); 472a88b5ba8SSam Ravnborg data0 = mondo[0]; 473a88b5ba8SSam Ravnborg data1 = mondo[1]; 474a88b5ba8SSam Ravnborg data2 = mondo[2]; 475a88b5ba8SSam Ravnborg for (i = 0; i < cnt; i++) 476a88b5ba8SSam Ravnborg spitfire_xcall_helper(data0, data1, data2, pstate, cpu_list[i]); 477a88b5ba8SSam Ravnborg } 478a88b5ba8SSam Ravnborg 479a88b5ba8SSam Ravnborg /* Cheetah now allows to send the whole 64-bytes of data in the interrupt 480a88b5ba8SSam Ravnborg * packet, but we have no use for that. However we do take advantage of 481a88b5ba8SSam Ravnborg * the new pipelining feature (ie. dispatch to multiple cpus simultaneously). 482a88b5ba8SSam Ravnborg */ 483a88b5ba8SSam Ravnborg static void cheetah_xcall_deliver(struct trap_per_cpu *tb, int cnt) 484a88b5ba8SSam Ravnborg { 485a88b5ba8SSam Ravnborg int nack_busy_id, is_jbus, need_more; 486a88b5ba8SSam Ravnborg u64 *mondo, pstate, ver, busy_mask; 487a88b5ba8SSam Ravnborg u16 *cpu_list; 488a88b5ba8SSam Ravnborg 489a88b5ba8SSam Ravnborg cpu_list = __va(tb->cpu_list_pa); 490a88b5ba8SSam Ravnborg mondo = __va(tb->cpu_mondo_block_pa); 491a88b5ba8SSam Ravnborg 492a88b5ba8SSam Ravnborg /* Unfortunately, someone at Sun had the brilliant idea to make the 493a88b5ba8SSam Ravnborg * busy/nack fields hard-coded by ITID number for this Ultra-III 494a88b5ba8SSam Ravnborg * derivative processor. 495a88b5ba8SSam Ravnborg */ 496a88b5ba8SSam Ravnborg __asm__ ("rdpr %%ver, %0" : "=r" (ver)); 497a88b5ba8SSam Ravnborg is_jbus = ((ver >> 32) == __JALAPENO_ID || 498a88b5ba8SSam Ravnborg (ver >> 32) == __SERRANO_ID); 499a88b5ba8SSam Ravnborg 500a88b5ba8SSam Ravnborg __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); 501a88b5ba8SSam Ravnborg 502a88b5ba8SSam Ravnborg retry: 503a88b5ba8SSam Ravnborg need_more = 0; 504a88b5ba8SSam Ravnborg __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t" 505a88b5ba8SSam Ravnborg : : "r" (pstate), "i" (PSTATE_IE)); 506a88b5ba8SSam Ravnborg 507a88b5ba8SSam Ravnborg /* Setup the dispatch data registers. */ 508a88b5ba8SSam Ravnborg __asm__ __volatile__("stxa %0, [%3] %6\n\t" 509a88b5ba8SSam Ravnborg "stxa %1, [%4] %6\n\t" 510a88b5ba8SSam Ravnborg "stxa %2, [%5] %6\n\t" 511a88b5ba8SSam Ravnborg "membar #Sync\n\t" 512a88b5ba8SSam Ravnborg : /* no outputs */ 513a88b5ba8SSam Ravnborg : "r" (mondo[0]), "r" (mondo[1]), "r" (mondo[2]), 514a88b5ba8SSam Ravnborg "r" (0x40), "r" (0x50), "r" (0x60), 515a88b5ba8SSam Ravnborg "i" (ASI_INTR_W)); 516a88b5ba8SSam Ravnborg 517a88b5ba8SSam Ravnborg nack_busy_id = 0; 518a88b5ba8SSam Ravnborg busy_mask = 0; 519a88b5ba8SSam Ravnborg { 520a88b5ba8SSam Ravnborg int i; 521a88b5ba8SSam Ravnborg 522a88b5ba8SSam Ravnborg for (i = 0; i < cnt; i++) { 523a88b5ba8SSam Ravnborg u64 target, nr; 524a88b5ba8SSam Ravnborg 525a88b5ba8SSam Ravnborg nr = cpu_list[i]; 526a88b5ba8SSam Ravnborg if (nr == 0xffff) 527a88b5ba8SSam Ravnborg continue; 528a88b5ba8SSam Ravnborg 529a88b5ba8SSam Ravnborg target = (nr << 14) | 0x70; 530a88b5ba8SSam Ravnborg if (is_jbus) { 531a88b5ba8SSam Ravnborg busy_mask |= (0x1UL << (nr * 2)); 532a88b5ba8SSam Ravnborg } else { 533a88b5ba8SSam Ravnborg target |= (nack_busy_id << 24); 534a88b5ba8SSam Ravnborg busy_mask |= (0x1UL << 535a88b5ba8SSam Ravnborg (nack_busy_id * 2)); 536a88b5ba8SSam Ravnborg } 537a88b5ba8SSam Ravnborg __asm__ __volatile__( 538a88b5ba8SSam Ravnborg "stxa %%g0, [%0] %1\n\t" 539a88b5ba8SSam Ravnborg "membar #Sync\n\t" 540a88b5ba8SSam Ravnborg : /* no outputs */ 541a88b5ba8SSam Ravnborg : "r" (target), "i" (ASI_INTR_W)); 542a88b5ba8SSam Ravnborg nack_busy_id++; 543a88b5ba8SSam Ravnborg if (nack_busy_id == 32) { 544a88b5ba8SSam Ravnborg need_more = 1; 545a88b5ba8SSam Ravnborg break; 546a88b5ba8SSam Ravnborg } 547a88b5ba8SSam Ravnborg } 548a88b5ba8SSam Ravnborg } 549a88b5ba8SSam Ravnborg 550a88b5ba8SSam Ravnborg /* Now, poll for completion. */ 551a88b5ba8SSam Ravnborg { 552a88b5ba8SSam Ravnborg u64 dispatch_stat, nack_mask; 553a88b5ba8SSam Ravnborg long stuck; 554a88b5ba8SSam Ravnborg 555a88b5ba8SSam Ravnborg stuck = 100000 * nack_busy_id; 556a88b5ba8SSam Ravnborg nack_mask = busy_mask << 1; 557a88b5ba8SSam Ravnborg do { 558a88b5ba8SSam Ravnborg __asm__ __volatile__("ldxa [%%g0] %1, %0" 559a88b5ba8SSam Ravnborg : "=r" (dispatch_stat) 560a88b5ba8SSam Ravnborg : "i" (ASI_INTR_DISPATCH_STAT)); 561a88b5ba8SSam Ravnborg if (!(dispatch_stat & (busy_mask | nack_mask))) { 562a88b5ba8SSam Ravnborg __asm__ __volatile__("wrpr %0, 0x0, %%pstate" 563a88b5ba8SSam Ravnborg : : "r" (pstate)); 564a88b5ba8SSam Ravnborg if (unlikely(need_more)) { 565a88b5ba8SSam Ravnborg int i, this_cnt = 0; 566a88b5ba8SSam Ravnborg for (i = 0; i < cnt; i++) { 567a88b5ba8SSam Ravnborg if (cpu_list[i] == 0xffff) 568a88b5ba8SSam Ravnborg continue; 569a88b5ba8SSam Ravnborg cpu_list[i] = 0xffff; 570a88b5ba8SSam Ravnborg this_cnt++; 571a88b5ba8SSam Ravnborg if (this_cnt == 32) 572a88b5ba8SSam Ravnborg break; 573a88b5ba8SSam Ravnborg } 574a88b5ba8SSam Ravnborg goto retry; 575a88b5ba8SSam Ravnborg } 576a88b5ba8SSam Ravnborg return; 577a88b5ba8SSam Ravnborg } 578a88b5ba8SSam Ravnborg if (!--stuck) 579a88b5ba8SSam Ravnborg break; 580a88b5ba8SSam Ravnborg } while (dispatch_stat & busy_mask); 581a88b5ba8SSam Ravnborg 582a88b5ba8SSam Ravnborg __asm__ __volatile__("wrpr %0, 0x0, %%pstate" 583a88b5ba8SSam Ravnborg : : "r" (pstate)); 584a88b5ba8SSam Ravnborg 585a88b5ba8SSam Ravnborg if (dispatch_stat & busy_mask) { 586a88b5ba8SSam Ravnborg /* Busy bits will not clear, continue instead 587a88b5ba8SSam Ravnborg * of freezing up on this cpu. 588a88b5ba8SSam Ravnborg */ 58990181136SSam Ravnborg printk("CPU[%d]: mondo stuckage result[%016llx]\n", 590a88b5ba8SSam Ravnborg smp_processor_id(), dispatch_stat); 591a88b5ba8SSam Ravnborg } else { 592a88b5ba8SSam Ravnborg int i, this_busy_nack = 0; 593a88b5ba8SSam Ravnborg 594a88b5ba8SSam Ravnborg /* Delay some random time with interrupts enabled 595a88b5ba8SSam Ravnborg * to prevent deadlock. 596a88b5ba8SSam Ravnborg */ 597a88b5ba8SSam Ravnborg udelay(2 * nack_busy_id); 598a88b5ba8SSam Ravnborg 599a88b5ba8SSam Ravnborg /* Clear out the mask bits for cpus which did not 600a88b5ba8SSam Ravnborg * NACK us. 601a88b5ba8SSam Ravnborg */ 602a88b5ba8SSam Ravnborg for (i = 0; i < cnt; i++) { 603a88b5ba8SSam Ravnborg u64 check_mask, nr; 604a88b5ba8SSam Ravnborg 605a88b5ba8SSam Ravnborg nr = cpu_list[i]; 606a88b5ba8SSam Ravnborg if (nr == 0xffff) 607a88b5ba8SSam Ravnborg continue; 608a88b5ba8SSam Ravnborg 609a88b5ba8SSam Ravnborg if (is_jbus) 610a88b5ba8SSam Ravnborg check_mask = (0x2UL << (2*nr)); 611a88b5ba8SSam Ravnborg else 612a88b5ba8SSam Ravnborg check_mask = (0x2UL << 613a88b5ba8SSam Ravnborg this_busy_nack); 614a88b5ba8SSam Ravnborg if ((dispatch_stat & check_mask) == 0) 615a88b5ba8SSam Ravnborg cpu_list[i] = 0xffff; 616a88b5ba8SSam Ravnborg this_busy_nack += 2; 617a88b5ba8SSam Ravnborg if (this_busy_nack == 64) 618a88b5ba8SSam Ravnborg break; 619a88b5ba8SSam Ravnborg } 620a88b5ba8SSam Ravnborg 621a88b5ba8SSam Ravnborg goto retry; 622a88b5ba8SSam Ravnborg } 623a88b5ba8SSam Ravnborg } 624a88b5ba8SSam Ravnborg } 625a88b5ba8SSam Ravnborg 6269d53caecSJane Chu #define CPU_MONDO_COUNTER(cpuid) (cpu_mondo_counter[cpuid]) 6279d53caecSJane Chu #define MONDO_USEC_WAIT_MIN 2 6289d53caecSJane Chu #define MONDO_USEC_WAIT_MAX 100 6299d53caecSJane Chu #define MONDO_RETRY_LIMIT 500000 6309d53caecSJane Chu 6319d53caecSJane Chu /* Multi-cpu list version. 6329d53caecSJane Chu * 6339d53caecSJane Chu * Deliver xcalls to 'cnt' number of cpus in 'cpu_list'. 6349d53caecSJane Chu * Sometimes not all cpus receive the mondo, requiring us to re-send 6359d53caecSJane Chu * the mondo until all cpus have received, or cpus are truly stuck 6369d53caecSJane Chu * unable to receive mondo, and we timeout. 6379d53caecSJane Chu * Occasionally a target cpu strand is borrowed briefly by hypervisor to 6389d53caecSJane Chu * perform guest service, such as PCIe error handling. Consider the 6399d53caecSJane Chu * service time, 1 second overall wait is reasonable for 1 cpu. 6409d53caecSJane Chu * Here two in-between mondo check wait time are defined: 2 usec for 6419d53caecSJane Chu * single cpu quick turn around and up to 100usec for large cpu count. 6429d53caecSJane Chu * Deliver mondo to large number of cpus could take longer, we adjusts 6439d53caecSJane Chu * the retry count as long as target cpus are making forward progress. 6449d53caecSJane Chu */ 645a88b5ba8SSam Ravnborg static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt) 646a88b5ba8SSam Ravnborg { 6479d53caecSJane Chu int this_cpu, tot_cpus, prev_sent, i, rem; 6489d53caecSJane Chu int usec_wait, retries, tot_retries; 6499d53caecSJane Chu u16 first_cpu = 0xffff; 6509d53caecSJane Chu unsigned long xc_rcvd = 0; 651a88b5ba8SSam Ravnborg unsigned long status; 6529d53caecSJane Chu int ecpuerror_id = 0; 6539d53caecSJane Chu int enocpu_id = 0; 654a88b5ba8SSam Ravnborg u16 *cpu_list; 6559d53caecSJane Chu u16 cpu; 656a88b5ba8SSam Ravnborg 657a88b5ba8SSam Ravnborg this_cpu = smp_processor_id(); 658a88b5ba8SSam Ravnborg cpu_list = __va(tb->cpu_list_pa); 6599d53caecSJane Chu usec_wait = cnt * MONDO_USEC_WAIT_MIN; 6609d53caecSJane Chu if (usec_wait > MONDO_USEC_WAIT_MAX) 6619d53caecSJane Chu usec_wait = MONDO_USEC_WAIT_MAX; 6629d53caecSJane Chu retries = tot_retries = 0; 6639d53caecSJane Chu tot_cpus = cnt; 664a88b5ba8SSam Ravnborg prev_sent = 0; 6659d53caecSJane Chu 666a88b5ba8SSam Ravnborg do { 6679d53caecSJane Chu int n_sent, mondo_delivered, target_cpu_busy; 668a88b5ba8SSam Ravnborg 669a88b5ba8SSam Ravnborg status = sun4v_cpu_mondo_send(cnt, 670a88b5ba8SSam Ravnborg tb->cpu_list_pa, 671a88b5ba8SSam Ravnborg tb->cpu_mondo_block_pa); 672a88b5ba8SSam Ravnborg 673a88b5ba8SSam Ravnborg /* HV_EOK means all cpus received the xcall, we're done. */ 674a88b5ba8SSam Ravnborg if (likely(status == HV_EOK)) 6759d53caecSJane Chu goto xcall_done; 6769d53caecSJane Chu 6779d53caecSJane Chu /* If not these non-fatal errors, panic */ 6789d53caecSJane Chu if (unlikely((status != HV_EWOULDBLOCK) && 6799d53caecSJane Chu (status != HV_ECPUERROR) && 6809d53caecSJane Chu (status != HV_ENOCPU))) 6819d53caecSJane Chu goto fatal_errors; 682a88b5ba8SSam Ravnborg 683a88b5ba8SSam Ravnborg /* First, see if we made any forward progress. 684a88b5ba8SSam Ravnborg * 6859d53caecSJane Chu * Go through the cpu_list, count the target cpus that have 6869d53caecSJane Chu * received our mondo (n_sent), and those that did not (rem). 6879d53caecSJane Chu * Re-pack cpu_list with the cpus remain to be retried in the 6889d53caecSJane Chu * front - this simplifies tracking the truly stalled cpus. 6899d53caecSJane Chu * 690a88b5ba8SSam Ravnborg * The hypervisor indicates successful sends by setting 691a88b5ba8SSam Ravnborg * cpu list entries to the value 0xffff. 6929d53caecSJane Chu * 6939d53caecSJane Chu * EWOULDBLOCK means some target cpus did not receive the 6949d53caecSJane Chu * mondo and retry usually helps. 6959d53caecSJane Chu * 6969d53caecSJane Chu * ECPUERROR means at least one target cpu is in error state, 6979d53caecSJane Chu * it's usually safe to skip the faulty cpu and retry. 6989d53caecSJane Chu * 6999d53caecSJane Chu * ENOCPU means one of the target cpu doesn't belong to the 7009d53caecSJane Chu * domain, perhaps offlined which is unexpected, but not 7019d53caecSJane Chu * fatal and it's okay to skip the offlined cpu. 702a88b5ba8SSam Ravnborg */ 7039d53caecSJane Chu rem = 0; 704a88b5ba8SSam Ravnborg n_sent = 0; 705a88b5ba8SSam Ravnborg for (i = 0; i < cnt; i++) { 7069d53caecSJane Chu cpu = cpu_list[i]; 7079d53caecSJane Chu if (likely(cpu == 0xffff)) { 708a88b5ba8SSam Ravnborg n_sent++; 7099d53caecSJane Chu } else if ((status == HV_ECPUERROR) && 7109d53caecSJane Chu (sun4v_cpu_state(cpu) == HV_CPU_STATE_ERROR)) { 7119d53caecSJane Chu ecpuerror_id = cpu + 1; 7129d53caecSJane Chu } else if (status == HV_ENOCPU && !cpu_online(cpu)) { 7139d53caecSJane Chu enocpu_id = cpu + 1; 7149d53caecSJane Chu } else { 7159d53caecSJane Chu cpu_list[rem++] = cpu; 7169d53caecSJane Chu } 717a88b5ba8SSam Ravnborg } 718a88b5ba8SSam Ravnborg 7199d53caecSJane Chu /* No cpu remained, we're done. */ 7209d53caecSJane Chu if (rem == 0) 7219d53caecSJane Chu break; 722a88b5ba8SSam Ravnborg 7239d53caecSJane Chu /* Otherwise, update the cpu count for retry. */ 7249d53caecSJane Chu cnt = rem; 7259d53caecSJane Chu 7269d53caecSJane Chu /* Record the overall number of mondos received by the 7279d53caecSJane Chu * first of the remaining cpus. 7289d53caecSJane Chu */ 7299d53caecSJane Chu if (first_cpu != cpu_list[0]) { 7309d53caecSJane Chu first_cpu = cpu_list[0]; 7319d53caecSJane Chu xc_rcvd = CPU_MONDO_COUNTER(first_cpu); 7329d53caecSJane Chu } 7339d53caecSJane Chu 7349d53caecSJane Chu /* Was any mondo delivered successfully? */ 7359d53caecSJane Chu mondo_delivered = (n_sent > prev_sent); 736a88b5ba8SSam Ravnborg prev_sent = n_sent; 737a88b5ba8SSam Ravnborg 7389d53caecSJane Chu /* or, was any target cpu busy processing other mondos? */ 7399d53caecSJane Chu target_cpu_busy = (xc_rcvd < CPU_MONDO_COUNTER(first_cpu)); 7409d53caecSJane Chu xc_rcvd = CPU_MONDO_COUNTER(first_cpu); 7419d53caecSJane Chu 7429d53caecSJane Chu /* Retry count is for no progress. If we're making progress, 7439d53caecSJane Chu * reset the retry count. 744a88b5ba8SSam Ravnborg */ 7459d53caecSJane Chu if (likely(mondo_delivered || target_cpu_busy)) { 7469d53caecSJane Chu tot_retries += retries; 7479d53caecSJane Chu retries = 0; 7489d53caecSJane Chu } else if (unlikely(retries > MONDO_RETRY_LIMIT)) { 749a88b5ba8SSam Ravnborg goto fatal_mondo_timeout; 750a88b5ba8SSam Ravnborg } 7519d53caecSJane Chu 7529d53caecSJane Chu /* Delay a little bit to let other cpus catch up on 7539d53caecSJane Chu * their cpu mondo queue work. 7549d53caecSJane Chu */ 7559d53caecSJane Chu if (!mondo_delivered) 7569d53caecSJane Chu udelay(usec_wait); 7579d53caecSJane Chu 7589d53caecSJane Chu retries++; 759a88b5ba8SSam Ravnborg } while (1); 760a88b5ba8SSam Ravnborg 7619d53caecSJane Chu xcall_done: 7629d53caecSJane Chu if (unlikely(ecpuerror_id > 0)) { 7639d53caecSJane Chu pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) was in error state\n", 7649d53caecSJane Chu this_cpu, ecpuerror_id - 1); 7659d53caecSJane Chu } else if (unlikely(enocpu_id > 0)) { 7669d53caecSJane Chu pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) does not belong to the domain\n", 7679d53caecSJane Chu this_cpu, enocpu_id - 1); 7689d53caecSJane Chu } 769a88b5ba8SSam Ravnborg return; 770a88b5ba8SSam Ravnborg 7719d53caecSJane Chu fatal_errors: 7729d53caecSJane Chu /* fatal errors include bad alignment, etc */ 7739d53caecSJane Chu pr_crit("CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) mondo_block_pa(%lx)\n", 7749d53caecSJane Chu this_cpu, tot_cpus, tb->cpu_list_pa, tb->cpu_mondo_block_pa); 7759d53caecSJane Chu panic("Unexpected SUN4V mondo error %lu\n", status); 776a88b5ba8SSam Ravnborg 777a88b5ba8SSam Ravnborg fatal_mondo_timeout: 7789d53caecSJane Chu /* some cpus being non-responsive to the cpu mondo */ 7799d53caecSJane Chu pr_crit("CPU[%d]: SUN4V mondo timeout, cpu(%d) made no forward progress after %d retries. Total target cpus(%d).\n", 7809d53caecSJane Chu this_cpu, first_cpu, (tot_retries + retries), tot_cpus); 7819d53caecSJane Chu panic("SUN4V mondo timeout panic\n"); 782a88b5ba8SSam Ravnborg } 783a88b5ba8SSam Ravnborg 784a88b5ba8SSam Ravnborg static void (*xcall_deliver_impl)(struct trap_per_cpu *, int); 785a88b5ba8SSam Ravnborg 786a88b5ba8SSam Ravnborg static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask) 787a88b5ba8SSam Ravnborg { 788a88b5ba8SSam Ravnborg struct trap_per_cpu *tb; 789a88b5ba8SSam Ravnborg int this_cpu, i, cnt; 790a88b5ba8SSam Ravnborg unsigned long flags; 791a88b5ba8SSam Ravnborg u16 *cpu_list; 792a88b5ba8SSam Ravnborg u64 *mondo; 793a88b5ba8SSam Ravnborg 794a88b5ba8SSam Ravnborg /* We have to do this whole thing with interrupts fully disabled. 795a88b5ba8SSam Ravnborg * Otherwise if we send an xcall from interrupt context it will 796a88b5ba8SSam Ravnborg * corrupt both our mondo block and cpu list state. 797a88b5ba8SSam Ravnborg * 798a88b5ba8SSam Ravnborg * One consequence of this is that we cannot use timeout mechanisms 799a88b5ba8SSam Ravnborg * that depend upon interrupts being delivered locally. So, for 800a88b5ba8SSam Ravnborg * example, we cannot sample jiffies and expect it to advance. 801a88b5ba8SSam Ravnborg * 802a88b5ba8SSam Ravnborg * Fortunately, udelay() uses %stick/%tick so we can use that. 803a88b5ba8SSam Ravnborg */ 804a88b5ba8SSam Ravnborg local_irq_save(flags); 805a88b5ba8SSam Ravnborg 806a88b5ba8SSam Ravnborg this_cpu = smp_processor_id(); 807a88b5ba8SSam Ravnborg tb = &trap_block[this_cpu]; 808a88b5ba8SSam Ravnborg 809a88b5ba8SSam Ravnborg mondo = __va(tb->cpu_mondo_block_pa); 810a88b5ba8SSam Ravnborg mondo[0] = data0; 811a88b5ba8SSam Ravnborg mondo[1] = data1; 812a88b5ba8SSam Ravnborg mondo[2] = data2; 813a88b5ba8SSam Ravnborg wmb(); 814a88b5ba8SSam Ravnborg 815a88b5ba8SSam Ravnborg cpu_list = __va(tb->cpu_list_pa); 816a88b5ba8SSam Ravnborg 817a88b5ba8SSam Ravnborg /* Setup the initial cpu list. */ 818a88b5ba8SSam Ravnborg cnt = 0; 8198e757281SRusty Russell for_each_cpu(i, mask) { 820a88b5ba8SSam Ravnborg if (i == this_cpu || !cpu_online(i)) 821a88b5ba8SSam Ravnborg continue; 822a88b5ba8SSam Ravnborg cpu_list[cnt++] = i; 823a88b5ba8SSam Ravnborg } 824a88b5ba8SSam Ravnborg 825a88b5ba8SSam Ravnborg if (cnt) 826a88b5ba8SSam Ravnborg xcall_deliver_impl(tb, cnt); 827a88b5ba8SSam Ravnborg 828a88b5ba8SSam Ravnborg local_irq_restore(flags); 829a88b5ba8SSam Ravnborg } 830a88b5ba8SSam Ravnborg 831a88b5ba8SSam Ravnborg /* Send cross call to all processors mentioned in MASK_P 832a88b5ba8SSam Ravnborg * except self. Really, there are only two cases currently, 833fb1fece5SKOSAKI Motohiro * "cpu_online_mask" and "mm_cpumask(mm)". 834a88b5ba8SSam Ravnborg */ 835a88b5ba8SSam Ravnborg static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask) 836a88b5ba8SSam Ravnborg { 837a88b5ba8SSam Ravnborg u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff)); 838a88b5ba8SSam Ravnborg 839a88b5ba8SSam Ravnborg xcall_deliver(data0, data1, data2, mask); 840a88b5ba8SSam Ravnborg } 841a88b5ba8SSam Ravnborg 842a88b5ba8SSam Ravnborg /* Send cross call to all processors except self. */ 843a88b5ba8SSam Ravnborg static void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2) 844a88b5ba8SSam Ravnborg { 845fb1fece5SKOSAKI Motohiro smp_cross_call_masked(func, ctx, data1, data2, cpu_online_mask); 846a88b5ba8SSam Ravnborg } 847a88b5ba8SSam Ravnborg 848a88b5ba8SSam Ravnborg extern unsigned long xcall_sync_tick; 849a88b5ba8SSam Ravnborg 850a88b5ba8SSam Ravnborg static void smp_start_sync_tick_client(int cpu) 851a88b5ba8SSam Ravnborg { 852a88b5ba8SSam Ravnborg xcall_deliver((u64) &xcall_sync_tick, 0, 0, 853fb1fece5SKOSAKI Motohiro cpumask_of(cpu)); 854a88b5ba8SSam Ravnborg } 855a88b5ba8SSam Ravnborg 856a88b5ba8SSam Ravnborg extern unsigned long xcall_call_function; 857a88b5ba8SSam Ravnborg 858f46df02aSRusty Russell void arch_send_call_function_ipi_mask(const struct cpumask *mask) 859a88b5ba8SSam Ravnborg { 860f46df02aSRusty Russell xcall_deliver((u64) &xcall_call_function, 0, 0, mask); 861a88b5ba8SSam Ravnborg } 862a88b5ba8SSam Ravnborg 863a88b5ba8SSam Ravnborg extern unsigned long xcall_call_function_single; 864a88b5ba8SSam Ravnborg 865a88b5ba8SSam Ravnborg void arch_send_call_function_single_ipi(int cpu) 866a88b5ba8SSam Ravnborg { 867a88b5ba8SSam Ravnborg xcall_deliver((u64) &xcall_call_function_single, 0, 0, 868fb1fece5SKOSAKI Motohiro cpumask_of(cpu)); 869a88b5ba8SSam Ravnborg } 870a88b5ba8SSam Ravnborg 8719960e9e8SDavid S. Miller void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs) 872a88b5ba8SSam Ravnborg { 873a88b5ba8SSam Ravnborg clear_softint(1 << irq); 874ab5c7809SDavid S. Miller irq_enter(); 875a88b5ba8SSam Ravnborg generic_smp_call_function_interrupt(); 876ab5c7809SDavid S. Miller irq_exit(); 877a88b5ba8SSam Ravnborg } 878a88b5ba8SSam Ravnborg 8799960e9e8SDavid S. Miller void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs) 880a88b5ba8SSam Ravnborg { 881a88b5ba8SSam Ravnborg clear_softint(1 << irq); 882ab5c7809SDavid S. Miller irq_enter(); 883a88b5ba8SSam Ravnborg generic_smp_call_function_single_interrupt(); 884ab5c7809SDavid S. Miller irq_exit(); 885a88b5ba8SSam Ravnborg } 886a88b5ba8SSam Ravnborg 887a88b5ba8SSam Ravnborg static void tsb_sync(void *info) 888a88b5ba8SSam Ravnborg { 889a88b5ba8SSam Ravnborg struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()]; 890a88b5ba8SSam Ravnborg struct mm_struct *mm = info; 891a88b5ba8SSam Ravnborg 89242b2aa86SJustin P. Mattock /* It is not valid to test "current->active_mm == mm" here. 893a88b5ba8SSam Ravnborg * 894a88b5ba8SSam Ravnborg * The value of "current" is not changed atomically with 895a88b5ba8SSam Ravnborg * switch_mm(). But that's OK, we just need to check the 896a88b5ba8SSam Ravnborg * current cpu's trap block PGD physical address. 897a88b5ba8SSam Ravnborg */ 898a88b5ba8SSam Ravnborg if (tp->pgd_paddr == __pa(mm->pgd)) 899a88b5ba8SSam Ravnborg tsb_context_switch(mm); 900a88b5ba8SSam Ravnborg } 901a88b5ba8SSam Ravnborg 902a88b5ba8SSam Ravnborg void smp_tsb_sync(struct mm_struct *mm) 903a88b5ba8SSam Ravnborg { 90481f1adf0SRusty Russell smp_call_function_many(mm_cpumask(mm), tsb_sync, mm, 1); 905a88b5ba8SSam Ravnborg } 906a88b5ba8SSam Ravnborg 907a88b5ba8SSam Ravnborg extern unsigned long xcall_flush_tlb_mm; 908f36391d2SDavid S. Miller extern unsigned long xcall_flush_tlb_page; 909a88b5ba8SSam Ravnborg extern unsigned long xcall_flush_tlb_kernel_range; 910a88b5ba8SSam Ravnborg extern unsigned long xcall_fetch_glob_regs; 911916ca14aSDavid S. Miller extern unsigned long xcall_fetch_glob_pmu; 912916ca14aSDavid S. Miller extern unsigned long xcall_fetch_glob_pmu_n4; 913a88b5ba8SSam Ravnborg extern unsigned long xcall_receive_signal; 914a88b5ba8SSam Ravnborg extern unsigned long xcall_new_mmu_context_version; 915a88b5ba8SSam Ravnborg #ifdef CONFIG_KGDB 916a88b5ba8SSam Ravnborg extern unsigned long xcall_kgdb_capture; 917a88b5ba8SSam Ravnborg #endif 918a88b5ba8SSam Ravnborg 919a88b5ba8SSam Ravnborg #ifdef DCACHE_ALIASING_POSSIBLE 920a88b5ba8SSam Ravnborg extern unsigned long xcall_flush_dcache_page_cheetah; 921a88b5ba8SSam Ravnborg #endif 922a88b5ba8SSam Ravnborg extern unsigned long xcall_flush_dcache_page_spitfire; 923a88b5ba8SSam Ravnborg 924a88b5ba8SSam Ravnborg static inline void __local_flush_dcache_page(struct page *page) 925a88b5ba8SSam Ravnborg { 926a88b5ba8SSam Ravnborg #ifdef DCACHE_ALIASING_POSSIBLE 927a88b5ba8SSam Ravnborg __flush_dcache_page(page_address(page), 928a88b5ba8SSam Ravnborg ((tlb_type == spitfire) && 929cb9f753aSHuang Ying page_mapping_file(page) != NULL)); 930a88b5ba8SSam Ravnborg #else 931cb9f753aSHuang Ying if (page_mapping_file(page) != NULL && 932a88b5ba8SSam Ravnborg tlb_type == spitfire) 933a88b5ba8SSam Ravnborg __flush_icache_page(__pa(page_address(page))); 934a88b5ba8SSam Ravnborg #endif 935a88b5ba8SSam Ravnborg } 936a88b5ba8SSam Ravnborg 937a88b5ba8SSam Ravnborg void smp_flush_dcache_page_impl(struct page *page, int cpu) 938a88b5ba8SSam Ravnborg { 939a88b5ba8SSam Ravnborg int this_cpu; 940a88b5ba8SSam Ravnborg 941a88b5ba8SSam Ravnborg if (tlb_type == hypervisor) 942a88b5ba8SSam Ravnborg return; 943a88b5ba8SSam Ravnborg 944a88b5ba8SSam Ravnborg #ifdef CONFIG_DEBUG_DCFLUSH 945a88b5ba8SSam Ravnborg atomic_inc(&dcpage_flushes); 946a88b5ba8SSam Ravnborg #endif 947a88b5ba8SSam Ravnborg 948a88b5ba8SSam Ravnborg this_cpu = get_cpu(); 949a88b5ba8SSam Ravnborg 950a88b5ba8SSam Ravnborg if (cpu == this_cpu) { 951a88b5ba8SSam Ravnborg __local_flush_dcache_page(page); 952a88b5ba8SSam Ravnborg } else if (cpu_online(cpu)) { 953a88b5ba8SSam Ravnborg void *pg_addr = page_address(page); 954a88b5ba8SSam Ravnborg u64 data0 = 0; 955a88b5ba8SSam Ravnborg 956a88b5ba8SSam Ravnborg if (tlb_type == spitfire) { 957a88b5ba8SSam Ravnborg data0 = ((u64)&xcall_flush_dcache_page_spitfire); 958cb9f753aSHuang Ying if (page_mapping_file(page) != NULL) 959a88b5ba8SSam Ravnborg data0 |= ((u64)1 << 32); 960a88b5ba8SSam Ravnborg } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { 961a88b5ba8SSam Ravnborg #ifdef DCACHE_ALIASING_POSSIBLE 962a88b5ba8SSam Ravnborg data0 = ((u64)&xcall_flush_dcache_page_cheetah); 963a88b5ba8SSam Ravnborg #endif 964a88b5ba8SSam Ravnborg } 965a88b5ba8SSam Ravnborg if (data0) { 966a88b5ba8SSam Ravnborg xcall_deliver(data0, __pa(pg_addr), 967fb1fece5SKOSAKI Motohiro (u64) pg_addr, cpumask_of(cpu)); 968a88b5ba8SSam Ravnborg #ifdef CONFIG_DEBUG_DCFLUSH 969a88b5ba8SSam Ravnborg atomic_inc(&dcpage_flushes_xcall); 970a88b5ba8SSam Ravnborg #endif 971a88b5ba8SSam Ravnborg } 972a88b5ba8SSam Ravnborg } 973a88b5ba8SSam Ravnborg 974a88b5ba8SSam Ravnborg put_cpu(); 975a88b5ba8SSam Ravnborg } 976a88b5ba8SSam Ravnborg 977a88b5ba8SSam Ravnborg void flush_dcache_page_all(struct mm_struct *mm, struct page *page) 978a88b5ba8SSam Ravnborg { 979a88b5ba8SSam Ravnborg void *pg_addr; 980a88b5ba8SSam Ravnborg u64 data0; 981a88b5ba8SSam Ravnborg 982a88b5ba8SSam Ravnborg if (tlb_type == hypervisor) 983a88b5ba8SSam Ravnborg return; 984a88b5ba8SSam Ravnborg 985c6fee081SDavid S. Miller preempt_disable(); 986a88b5ba8SSam Ravnborg 987a88b5ba8SSam Ravnborg #ifdef CONFIG_DEBUG_DCFLUSH 988a88b5ba8SSam Ravnborg atomic_inc(&dcpage_flushes); 989a88b5ba8SSam Ravnborg #endif 990a88b5ba8SSam Ravnborg data0 = 0; 991a88b5ba8SSam Ravnborg pg_addr = page_address(page); 992a88b5ba8SSam Ravnborg if (tlb_type == spitfire) { 993a88b5ba8SSam Ravnborg data0 = ((u64)&xcall_flush_dcache_page_spitfire); 994cb9f753aSHuang Ying if (page_mapping_file(page) != NULL) 995a88b5ba8SSam Ravnborg data0 |= ((u64)1 << 32); 996a88b5ba8SSam Ravnborg } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { 997a88b5ba8SSam Ravnborg #ifdef DCACHE_ALIASING_POSSIBLE 998a88b5ba8SSam Ravnborg data0 = ((u64)&xcall_flush_dcache_page_cheetah); 999a88b5ba8SSam Ravnborg #endif 1000a88b5ba8SSam Ravnborg } 1001a88b5ba8SSam Ravnborg if (data0) { 1002a88b5ba8SSam Ravnborg xcall_deliver(data0, __pa(pg_addr), 1003fb1fece5SKOSAKI Motohiro (u64) pg_addr, cpu_online_mask); 1004a88b5ba8SSam Ravnborg #ifdef CONFIG_DEBUG_DCFLUSH 1005a88b5ba8SSam Ravnborg atomic_inc(&dcpage_flushes_xcall); 1006a88b5ba8SSam Ravnborg #endif 1007a88b5ba8SSam Ravnborg } 1008a88b5ba8SSam Ravnborg __local_flush_dcache_page(page); 1009a88b5ba8SSam Ravnborg 1010c6fee081SDavid S. Miller preempt_enable(); 1011a88b5ba8SSam Ravnborg } 1012a88b5ba8SSam Ravnborg 1013a88b5ba8SSam Ravnborg #ifdef CONFIG_KGDB 10149ef7fa50SDouglas Anderson void kgdb_roundup_cpus(void) 1015a88b5ba8SSam Ravnborg { 1016a88b5ba8SSam Ravnborg smp_cross_call(&xcall_kgdb_capture, 0, 0, 0); 1017a88b5ba8SSam Ravnborg } 1018a88b5ba8SSam Ravnborg #endif 1019a88b5ba8SSam Ravnborg 1020a88b5ba8SSam Ravnborg void smp_fetch_global_regs(void) 1021a88b5ba8SSam Ravnborg { 1022a88b5ba8SSam Ravnborg smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0); 1023a88b5ba8SSam Ravnborg } 1024a88b5ba8SSam Ravnborg 1025916ca14aSDavid S. Miller void smp_fetch_global_pmu(void) 1026916ca14aSDavid S. Miller { 1027916ca14aSDavid S. Miller if (tlb_type == hypervisor && 1028916ca14aSDavid S. Miller sun4v_chip_type >= SUN4V_CHIP_NIAGARA4) 1029916ca14aSDavid S. Miller smp_cross_call(&xcall_fetch_glob_pmu_n4, 0, 0, 0); 1030916ca14aSDavid S. Miller else 1031916ca14aSDavid S. Miller smp_cross_call(&xcall_fetch_glob_pmu, 0, 0, 0); 1032916ca14aSDavid S. Miller } 1033916ca14aSDavid S. Miller 1034a88b5ba8SSam Ravnborg /* We know that the window frames of the user have been flushed 1035a88b5ba8SSam Ravnborg * to the stack before we get here because all callers of us 1036a88b5ba8SSam Ravnborg * are flush_tlb_*() routines, and these run after flush_cache_*() 1037a88b5ba8SSam Ravnborg * which performs the flushw. 1038a88b5ba8SSam Ravnborg * 1039bafb056cSNicholas Piggin * mm->cpu_vm_mask is a bit mask of which cpus an address 1040a88b5ba8SSam Ravnborg * space has (potentially) executed on, this is the heuristic 1041bafb056cSNicholas Piggin * we use to limit cross calls. 1042a88b5ba8SSam Ravnborg */ 1043a88b5ba8SSam Ravnborg 1044a88b5ba8SSam Ravnborg /* This currently is only used by the hugetlb arch pre-fault 1045a88b5ba8SSam Ravnborg * hook on UltraSPARC-III+ and later when changing the pagesize 1046a88b5ba8SSam Ravnborg * bits of the context register for an address space. 1047a88b5ba8SSam Ravnborg */ 1048a88b5ba8SSam Ravnborg void smp_flush_tlb_mm(struct mm_struct *mm) 1049a88b5ba8SSam Ravnborg { 1050a88b5ba8SSam Ravnborg u32 ctx = CTX_HWBITS(mm->context); 1051a88b5ba8SSam Ravnborg 1052bafb056cSNicholas Piggin get_cpu(); 1053a88b5ba8SSam Ravnborg 1054a88b5ba8SSam Ravnborg smp_cross_call_masked(&xcall_flush_tlb_mm, 1055a88b5ba8SSam Ravnborg ctx, 0, 0, 105681f1adf0SRusty Russell mm_cpumask(mm)); 1057a88b5ba8SSam Ravnborg 1058a88b5ba8SSam Ravnborg __flush_tlb_mm(ctx, SECONDARY_CONTEXT); 1059a88b5ba8SSam Ravnborg 1060a88b5ba8SSam Ravnborg put_cpu(); 1061a88b5ba8SSam Ravnborg } 1062a88b5ba8SSam Ravnborg 1063f36391d2SDavid S. Miller struct tlb_pending_info { 1064f36391d2SDavid S. Miller unsigned long ctx; 1065f36391d2SDavid S. Miller unsigned long nr; 1066f36391d2SDavid S. Miller unsigned long *vaddrs; 1067f36391d2SDavid S. Miller }; 1068f36391d2SDavid S. Miller 1069f36391d2SDavid S. Miller static void tlb_pending_func(void *info) 1070f36391d2SDavid S. Miller { 1071f36391d2SDavid S. Miller struct tlb_pending_info *t = info; 1072f36391d2SDavid S. Miller 1073f36391d2SDavid S. Miller __flush_tlb_pending(t->ctx, t->nr, t->vaddrs); 1074f36391d2SDavid S. Miller } 1075f36391d2SDavid S. Miller 1076a88b5ba8SSam Ravnborg void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs) 1077a88b5ba8SSam Ravnborg { 1078a88b5ba8SSam Ravnborg u32 ctx = CTX_HWBITS(mm->context); 1079f36391d2SDavid S. Miller struct tlb_pending_info info; 1080bafb056cSNicholas Piggin 1081bafb056cSNicholas Piggin get_cpu(); 1082f36391d2SDavid S. Miller 1083f36391d2SDavid S. Miller info.ctx = ctx; 1084f36391d2SDavid S. Miller info.nr = nr; 1085f36391d2SDavid S. Miller info.vaddrs = vaddrs; 1086f36391d2SDavid S. Miller 1087f36391d2SDavid S. Miller smp_call_function_many(mm_cpumask(mm), tlb_pending_func, 1088f36391d2SDavid S. Miller &info, 1); 1089f36391d2SDavid S. Miller 1090f36391d2SDavid S. Miller __flush_tlb_pending(ctx, nr, vaddrs); 1091f36391d2SDavid S. Miller 1092f36391d2SDavid S. Miller put_cpu(); 1093f36391d2SDavid S. Miller } 1094f36391d2SDavid S. Miller 1095f36391d2SDavid S. Miller void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr) 1096f36391d2SDavid S. Miller { 1097f36391d2SDavid S. Miller unsigned long context = CTX_HWBITS(mm->context); 1098a88b5ba8SSam Ravnborg 1099bafb056cSNicholas Piggin get_cpu(); 1100bafb056cSNicholas Piggin 1101f36391d2SDavid S. Miller smp_cross_call_masked(&xcall_flush_tlb_page, 1102f36391d2SDavid S. Miller context, vaddr, 0, 110381f1adf0SRusty Russell mm_cpumask(mm)); 1104bafb056cSNicholas Piggin 1105f36391d2SDavid S. Miller __flush_tlb_page(context, vaddr); 1106a88b5ba8SSam Ravnborg 1107a88b5ba8SSam Ravnborg put_cpu(); 1108a88b5ba8SSam Ravnborg } 1109a88b5ba8SSam Ravnborg 1110a88b5ba8SSam Ravnborg void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end) 1111a88b5ba8SSam Ravnborg { 1112a88b5ba8SSam Ravnborg start &= PAGE_MASK; 1113a88b5ba8SSam Ravnborg end = PAGE_ALIGN(end); 1114a88b5ba8SSam Ravnborg if (start != end) { 1115a88b5ba8SSam Ravnborg smp_cross_call(&xcall_flush_tlb_kernel_range, 1116a88b5ba8SSam Ravnborg 0, start, end); 1117a88b5ba8SSam Ravnborg 1118a88b5ba8SSam Ravnborg __flush_tlb_kernel_range(start, end); 1119a88b5ba8SSam Ravnborg } 1120a88b5ba8SSam Ravnborg } 1121a88b5ba8SSam Ravnborg 1122a88b5ba8SSam Ravnborg /* CPU capture. */ 1123a88b5ba8SSam Ravnborg /* #define CAPTURE_DEBUG */ 1124a88b5ba8SSam Ravnborg extern unsigned long xcall_capture; 1125a88b5ba8SSam Ravnborg 1126a88b5ba8SSam Ravnborg static atomic_t smp_capture_depth = ATOMIC_INIT(0); 1127a88b5ba8SSam Ravnborg static atomic_t smp_capture_registry = ATOMIC_INIT(0); 1128a88b5ba8SSam Ravnborg static unsigned long penguins_are_doing_time; 1129a88b5ba8SSam Ravnborg 1130a88b5ba8SSam Ravnborg void smp_capture(void) 1131a88b5ba8SSam Ravnborg { 11324f3316c2SPeter Zijlstra int result = atomic_add_return(1, &smp_capture_depth); 1133a88b5ba8SSam Ravnborg 1134a88b5ba8SSam Ravnborg if (result == 1) { 1135a88b5ba8SSam Ravnborg int ncpus = num_online_cpus(); 1136a88b5ba8SSam Ravnborg 1137a88b5ba8SSam Ravnborg #ifdef CAPTURE_DEBUG 1138a88b5ba8SSam Ravnborg printk("CPU[%d]: Sending penguins to jail...", 1139a88b5ba8SSam Ravnborg smp_processor_id()); 1140a88b5ba8SSam Ravnborg #endif 1141a88b5ba8SSam Ravnborg penguins_are_doing_time = 1; 1142a88b5ba8SSam Ravnborg atomic_inc(&smp_capture_registry); 1143a88b5ba8SSam Ravnborg smp_cross_call(&xcall_capture, 0, 0, 0); 1144a88b5ba8SSam Ravnborg while (atomic_read(&smp_capture_registry) != ncpus) 1145a88b5ba8SSam Ravnborg rmb(); 1146a88b5ba8SSam Ravnborg #ifdef CAPTURE_DEBUG 1147a88b5ba8SSam Ravnborg printk("done\n"); 1148a88b5ba8SSam Ravnborg #endif 1149a88b5ba8SSam Ravnborg } 1150a88b5ba8SSam Ravnborg } 1151a88b5ba8SSam Ravnborg 1152a88b5ba8SSam Ravnborg void smp_release(void) 1153a88b5ba8SSam Ravnborg { 1154a88b5ba8SSam Ravnborg if (atomic_dec_and_test(&smp_capture_depth)) { 1155a88b5ba8SSam Ravnborg #ifdef CAPTURE_DEBUG 1156a88b5ba8SSam Ravnborg printk("CPU[%d]: Giving pardon to " 1157a88b5ba8SSam Ravnborg "imprisoned penguins\n", 1158a88b5ba8SSam Ravnborg smp_processor_id()); 1159a88b5ba8SSam Ravnborg #endif 1160a88b5ba8SSam Ravnborg penguins_are_doing_time = 0; 1161a88b5ba8SSam Ravnborg membar_safe("#StoreLoad"); 1162a88b5ba8SSam Ravnborg atomic_dec(&smp_capture_registry); 1163a88b5ba8SSam Ravnborg } 1164a88b5ba8SSam Ravnborg } 1165a88b5ba8SSam Ravnborg 1166a88b5ba8SSam Ravnborg /* Imprisoned penguins run with %pil == PIL_NORMAL_MAX, but PSTATE_IE 1167a88b5ba8SSam Ravnborg * set, so they can service tlb flush xcalls... 1168a88b5ba8SSam Ravnborg */ 1169a88b5ba8SSam Ravnborg extern void prom_world(int); 1170a88b5ba8SSam Ravnborg 11719960e9e8SDavid S. Miller void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs) 1172a88b5ba8SSam Ravnborg { 1173a88b5ba8SSam Ravnborg clear_softint(1 << irq); 1174a88b5ba8SSam Ravnborg 1175a88b5ba8SSam Ravnborg preempt_disable(); 1176a88b5ba8SSam Ravnborg 1177a88b5ba8SSam Ravnborg __asm__ __volatile__("flushw"); 1178a88b5ba8SSam Ravnborg prom_world(1); 1179a88b5ba8SSam Ravnborg atomic_inc(&smp_capture_registry); 1180a88b5ba8SSam Ravnborg membar_safe("#StoreLoad"); 1181a88b5ba8SSam Ravnborg while (penguins_are_doing_time) 1182a88b5ba8SSam Ravnborg rmb(); 1183a88b5ba8SSam Ravnborg atomic_dec(&smp_capture_registry); 1184a88b5ba8SSam Ravnborg prom_world(0); 1185a88b5ba8SSam Ravnborg 1186a88b5ba8SSam Ravnborg preempt_enable(); 1187a88b5ba8SSam Ravnborg } 1188a88b5ba8SSam Ravnborg 1189a88b5ba8SSam Ravnborg /* /proc/profile writes can call this, don't __init it please. */ 1190a88b5ba8SSam Ravnborg int setup_profiling_timer(unsigned int multiplier) 1191a88b5ba8SSam Ravnborg { 1192a88b5ba8SSam Ravnborg return -EINVAL; 1193a88b5ba8SSam Ravnborg } 1194a88b5ba8SSam Ravnborg 1195a88b5ba8SSam Ravnborg void __init smp_prepare_cpus(unsigned int max_cpus) 1196a88b5ba8SSam Ravnborg { 1197a88b5ba8SSam Ravnborg } 1198a88b5ba8SSam Ravnborg 11997c9503b8SGreg Kroah-Hartman void smp_prepare_boot_cpu(void) 1200a88b5ba8SSam Ravnborg { 1201a88b5ba8SSam Ravnborg } 1202a88b5ba8SSam Ravnborg 1203a88b5ba8SSam Ravnborg void __init smp_setup_processor_id(void) 1204a88b5ba8SSam Ravnborg { 1205a88b5ba8SSam Ravnborg if (tlb_type == spitfire) 1206a88b5ba8SSam Ravnborg xcall_deliver_impl = spitfire_xcall_deliver; 1207a88b5ba8SSam Ravnborg else if (tlb_type == cheetah || tlb_type == cheetah_plus) 1208a88b5ba8SSam Ravnborg xcall_deliver_impl = cheetah_xcall_deliver; 1209a88b5ba8SSam Ravnborg else 1210a88b5ba8SSam Ravnborg xcall_deliver_impl = hypervisor_xcall_deliver; 1211a88b5ba8SSam Ravnborg } 1212a88b5ba8SSam Ravnborg 12139b2f753eSAtish Patra void __init smp_fill_in_cpu_possible_map(void) 12149b2f753eSAtish Patra { 12159b2f753eSAtish Patra int possible_cpus = num_possible_cpus(); 12169b2f753eSAtish Patra int i; 12179b2f753eSAtish Patra 12189b2f753eSAtish Patra if (possible_cpus > nr_cpu_ids) 12199b2f753eSAtish Patra possible_cpus = nr_cpu_ids; 12209b2f753eSAtish Patra 12219b2f753eSAtish Patra for (i = 0; i < possible_cpus; i++) 12229b2f753eSAtish Patra set_cpu_possible(i, true); 12239b2f753eSAtish Patra for (; i < NR_CPUS; i++) 12249b2f753eSAtish Patra set_cpu_possible(i, false); 12259b2f753eSAtish Patra } 12269b2f753eSAtish Patra 12277c9503b8SGreg Kroah-Hartman void smp_fill_in_sib_core_maps(void) 1228a88b5ba8SSam Ravnborg { 1229a88b5ba8SSam Ravnborg unsigned int i; 1230a88b5ba8SSam Ravnborg 1231a88b5ba8SSam Ravnborg for_each_present_cpu(i) { 1232a88b5ba8SSam Ravnborg unsigned int j; 1233a88b5ba8SSam Ravnborg 1234fb1fece5SKOSAKI Motohiro cpumask_clear(&cpu_core_map[i]); 1235a88b5ba8SSam Ravnborg if (cpu_data(i).core_id == 0) { 1236fb1fece5SKOSAKI Motohiro cpumask_set_cpu(i, &cpu_core_map[i]); 1237a88b5ba8SSam Ravnborg continue; 1238a88b5ba8SSam Ravnborg } 1239a88b5ba8SSam Ravnborg 1240a88b5ba8SSam Ravnborg for_each_present_cpu(j) { 1241a88b5ba8SSam Ravnborg if (cpu_data(i).core_id == 1242a88b5ba8SSam Ravnborg cpu_data(j).core_id) 1243fb1fece5SKOSAKI Motohiro cpumask_set_cpu(j, &cpu_core_map[i]); 1244a88b5ba8SSam Ravnborg } 1245a88b5ba8SSam Ravnborg } 1246a88b5ba8SSam Ravnborg 1247a88b5ba8SSam Ravnborg for_each_present_cpu(i) { 1248a88b5ba8SSam Ravnborg unsigned int j; 1249a88b5ba8SSam Ravnborg 1250acc455cfSchris hyser for_each_present_cpu(j) { 1251d624716bSAtish Patra if (cpu_data(i).max_cache_id == 1252d624716bSAtish Patra cpu_data(j).max_cache_id) 1253d624716bSAtish Patra cpumask_set_cpu(j, &cpu_core_sib_cache_map[i]); 1254d624716bSAtish Patra 1255acc455cfSchris hyser if (cpu_data(i).sock_id == cpu_data(j).sock_id) 1256acc455cfSchris hyser cpumask_set_cpu(j, &cpu_core_sib_map[i]); 1257acc455cfSchris hyser } 1258acc455cfSchris hyser } 1259acc455cfSchris hyser 1260acc455cfSchris hyser for_each_present_cpu(i) { 1261acc455cfSchris hyser unsigned int j; 1262acc455cfSchris hyser 1263fb1fece5SKOSAKI Motohiro cpumask_clear(&per_cpu(cpu_sibling_map, i)); 1264a88b5ba8SSam Ravnborg if (cpu_data(i).proc_id == -1) { 1265fb1fece5SKOSAKI Motohiro cpumask_set_cpu(i, &per_cpu(cpu_sibling_map, i)); 1266a88b5ba8SSam Ravnborg continue; 1267a88b5ba8SSam Ravnborg } 1268a88b5ba8SSam Ravnborg 1269a88b5ba8SSam Ravnborg for_each_present_cpu(j) { 1270a88b5ba8SSam Ravnborg if (cpu_data(i).proc_id == 1271a88b5ba8SSam Ravnborg cpu_data(j).proc_id) 1272fb1fece5SKOSAKI Motohiro cpumask_set_cpu(j, &per_cpu(cpu_sibling_map, i)); 1273a88b5ba8SSam Ravnborg } 1274a88b5ba8SSam Ravnborg } 1275a88b5ba8SSam Ravnborg } 1276a88b5ba8SSam Ravnborg 12772066aaddSPaul Gortmaker int __cpu_up(unsigned int cpu, struct task_struct *tidle) 1278a88b5ba8SSam Ravnborg { 1279f0a2bc7eSThomas Gleixner int ret = smp_boot_one_cpu(cpu, tidle); 1280a88b5ba8SSam Ravnborg 1281a88b5ba8SSam Ravnborg if (!ret) { 1282fb1fece5SKOSAKI Motohiro cpumask_set_cpu(cpu, &smp_commenced_mask); 1283fb1fece5SKOSAKI Motohiro while (!cpu_online(cpu)) 1284a88b5ba8SSam Ravnborg mb(); 1285fb1fece5SKOSAKI Motohiro if (!cpu_online(cpu)) { 1286a88b5ba8SSam Ravnborg ret = -ENODEV; 1287a88b5ba8SSam Ravnborg } else { 1288a88b5ba8SSam Ravnborg /* On SUN4V, writes to %tick and %stick are 1289a88b5ba8SSam Ravnborg * not allowed. 1290a88b5ba8SSam Ravnborg */ 1291a88b5ba8SSam Ravnborg if (tlb_type != hypervisor) 1292a88b5ba8SSam Ravnborg smp_synchronize_one_tick(cpu); 1293a88b5ba8SSam Ravnborg } 1294a88b5ba8SSam Ravnborg } 1295a88b5ba8SSam Ravnborg return ret; 1296a88b5ba8SSam Ravnborg } 1297a88b5ba8SSam Ravnborg 1298a88b5ba8SSam Ravnborg #ifdef CONFIG_HOTPLUG_CPU 1299a88b5ba8SSam Ravnborg void cpu_play_dead(void) 1300a88b5ba8SSam Ravnborg { 1301a88b5ba8SSam Ravnborg int cpu = smp_processor_id(); 1302a88b5ba8SSam Ravnborg unsigned long pstate; 1303a88b5ba8SSam Ravnborg 1304a88b5ba8SSam Ravnborg idle_task_exit(); 1305a88b5ba8SSam Ravnborg 1306a88b5ba8SSam Ravnborg if (tlb_type == hypervisor) { 1307a88b5ba8SSam Ravnborg struct trap_per_cpu *tb = &trap_block[cpu]; 1308a88b5ba8SSam Ravnborg 1309a88b5ba8SSam Ravnborg sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO, 1310a88b5ba8SSam Ravnborg tb->cpu_mondo_pa, 0); 1311a88b5ba8SSam Ravnborg sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO, 1312a88b5ba8SSam Ravnborg tb->dev_mondo_pa, 0); 1313a88b5ba8SSam Ravnborg sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR, 1314a88b5ba8SSam Ravnborg tb->resum_mondo_pa, 0); 1315a88b5ba8SSam Ravnborg sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR, 1316a88b5ba8SSam Ravnborg tb->nonresum_mondo_pa, 0); 1317a88b5ba8SSam Ravnborg } 1318a88b5ba8SSam Ravnborg 1319fb1fece5SKOSAKI Motohiro cpumask_clear_cpu(cpu, &smp_commenced_mask); 1320a88b5ba8SSam Ravnborg membar_safe("#Sync"); 1321a88b5ba8SSam Ravnborg 1322a88b5ba8SSam Ravnborg local_irq_disable(); 1323a88b5ba8SSam Ravnborg 1324a88b5ba8SSam Ravnborg __asm__ __volatile__( 1325a88b5ba8SSam Ravnborg "rdpr %%pstate, %0\n\t" 1326a88b5ba8SSam Ravnborg "wrpr %0, %1, %%pstate" 1327a88b5ba8SSam Ravnborg : "=r" (pstate) 1328a88b5ba8SSam Ravnborg : "i" (PSTATE_IE)); 1329a88b5ba8SSam Ravnborg 1330a88b5ba8SSam Ravnborg while (1) 1331a88b5ba8SSam Ravnborg barrier(); 1332a88b5ba8SSam Ravnborg } 1333a88b5ba8SSam Ravnborg 1334a88b5ba8SSam Ravnborg int __cpu_disable(void) 1335a88b5ba8SSam Ravnborg { 1336a88b5ba8SSam Ravnborg int cpu = smp_processor_id(); 1337a88b5ba8SSam Ravnborg cpuinfo_sparc *c; 1338a88b5ba8SSam Ravnborg int i; 1339a88b5ba8SSam Ravnborg 1340fb1fece5SKOSAKI Motohiro for_each_cpu(i, &cpu_core_map[cpu]) 1341fb1fece5SKOSAKI Motohiro cpumask_clear_cpu(cpu, &cpu_core_map[i]); 1342fb1fece5SKOSAKI Motohiro cpumask_clear(&cpu_core_map[cpu]); 1343a88b5ba8SSam Ravnborg 1344fb1fece5SKOSAKI Motohiro for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu)) 1345fb1fece5SKOSAKI Motohiro cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i)); 1346fb1fece5SKOSAKI Motohiro cpumask_clear(&per_cpu(cpu_sibling_map, cpu)); 1347a88b5ba8SSam Ravnborg 1348a88b5ba8SSam Ravnborg c = &cpu_data(cpu); 1349a88b5ba8SSam Ravnborg 1350a88b5ba8SSam Ravnborg c->core_id = 0; 1351a88b5ba8SSam Ravnborg c->proc_id = -1; 1352a88b5ba8SSam Ravnborg 1353a88b5ba8SSam Ravnborg smp_wmb(); 1354a88b5ba8SSam Ravnborg 1355a88b5ba8SSam Ravnborg /* Make sure no interrupts point to this cpu. */ 1356a88b5ba8SSam Ravnborg fixup_irqs(); 1357a88b5ba8SSam Ravnborg 1358a88b5ba8SSam Ravnborg local_irq_enable(); 1359a88b5ba8SSam Ravnborg mdelay(1); 1360a88b5ba8SSam Ravnborg local_irq_disable(); 1361a88b5ba8SSam Ravnborg 1362fb1fece5SKOSAKI Motohiro set_cpu_online(cpu, false); 1363a88b5ba8SSam Ravnborg 1364280ff974SHong H. Pham cpu_map_rebuild(); 1365280ff974SHong H. Pham 1366a88b5ba8SSam Ravnborg return 0; 1367a88b5ba8SSam Ravnborg } 1368a88b5ba8SSam Ravnborg 1369a88b5ba8SSam Ravnborg void __cpu_die(unsigned int cpu) 1370a88b5ba8SSam Ravnborg { 1371a88b5ba8SSam Ravnborg int i; 1372a88b5ba8SSam Ravnborg 1373a88b5ba8SSam Ravnborg for (i = 0; i < 100; i++) { 1374a88b5ba8SSam Ravnborg smp_rmb(); 1375fb1fece5SKOSAKI Motohiro if (!cpumask_test_cpu(cpu, &smp_commenced_mask)) 1376a88b5ba8SSam Ravnborg break; 1377a88b5ba8SSam Ravnborg msleep(100); 1378a88b5ba8SSam Ravnborg } 1379fb1fece5SKOSAKI Motohiro if (cpumask_test_cpu(cpu, &smp_commenced_mask)) { 1380a88b5ba8SSam Ravnborg printk(KERN_ERR "CPU %u didn't die...\n", cpu); 1381a88b5ba8SSam Ravnborg } else { 1382a88b5ba8SSam Ravnborg #if defined(CONFIG_SUN_LDOMS) 1383a88b5ba8SSam Ravnborg unsigned long hv_err; 1384a88b5ba8SSam Ravnborg int limit = 100; 1385a88b5ba8SSam Ravnborg 1386a88b5ba8SSam Ravnborg do { 1387a88b5ba8SSam Ravnborg hv_err = sun4v_cpu_stop(cpu); 1388a88b5ba8SSam Ravnborg if (hv_err == HV_EOK) { 1389fb1fece5SKOSAKI Motohiro set_cpu_present(cpu, false); 1390a88b5ba8SSam Ravnborg break; 1391a88b5ba8SSam Ravnborg } 1392a88b5ba8SSam Ravnborg } while (--limit > 0); 1393a88b5ba8SSam Ravnborg if (limit <= 0) { 1394a88b5ba8SSam Ravnborg printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n", 1395a88b5ba8SSam Ravnborg hv_err); 1396a88b5ba8SSam Ravnborg } 1397a88b5ba8SSam Ravnborg #endif 1398a88b5ba8SSam Ravnborg } 1399a88b5ba8SSam Ravnborg } 1400a88b5ba8SSam Ravnborg #endif 1401a88b5ba8SSam Ravnborg 1402a88b5ba8SSam Ravnborg void __init smp_cpus_done(unsigned int max_cpus) 1403a88b5ba8SSam Ravnborg { 1404a88b5ba8SSam Ravnborg } 1405a88b5ba8SSam Ravnborg 14068536e02eSVijay Kumar static void send_cpu_ipi(int cpu) 14078536e02eSVijay Kumar { 14088536e02eSVijay Kumar xcall_deliver((u64) &xcall_receive_signal, 14098536e02eSVijay Kumar 0, 0, cpumask_of(cpu)); 14108536e02eSVijay Kumar } 14118536e02eSVijay Kumar 14128536e02eSVijay Kumar void scheduler_poke(void) 14138536e02eSVijay Kumar { 14148536e02eSVijay Kumar if (!cpu_poke) 14158536e02eSVijay Kumar return; 14168536e02eSVijay Kumar 14178536e02eSVijay Kumar if (!__this_cpu_read(poke)) 14188536e02eSVijay Kumar return; 14198536e02eSVijay Kumar 14208536e02eSVijay Kumar __this_cpu_write(poke, false); 14218536e02eSVijay Kumar set_softint(1 << PIL_SMP_RECEIVE_SIGNAL); 14228536e02eSVijay Kumar } 14238536e02eSVijay Kumar 14248536e02eSVijay Kumar static unsigned long send_cpu_poke(int cpu) 14258536e02eSVijay Kumar { 14268536e02eSVijay Kumar unsigned long hv_err; 14278536e02eSVijay Kumar 14288536e02eSVijay Kumar per_cpu(poke, cpu) = true; 14298536e02eSVijay Kumar hv_err = sun4v_cpu_poke(cpu); 14308536e02eSVijay Kumar if (hv_err != HV_EOK) { 14318536e02eSVijay Kumar per_cpu(poke, cpu) = false; 14328536e02eSVijay Kumar pr_err_ratelimited("%s: sun4v_cpu_poke() fails err=%lu\n", 14338536e02eSVijay Kumar __func__, hv_err); 14348536e02eSVijay Kumar } 14358536e02eSVijay Kumar 14368536e02eSVijay Kumar return hv_err; 14378536e02eSVijay Kumar } 14388536e02eSVijay Kumar 1439a88b5ba8SSam Ravnborg void smp_send_reschedule(int cpu) 1440a88b5ba8SSam Ravnborg { 14411a36265bSKirill Tkhai if (cpu == smp_processor_id()) { 14421a36265bSKirill Tkhai WARN_ON_ONCE(preemptible()); 14431a36265bSKirill Tkhai set_softint(1 << PIL_SMP_RECEIVE_SIGNAL); 14448536e02eSVijay Kumar return; 14451a36265bSKirill Tkhai } 14468536e02eSVijay Kumar 14478536e02eSVijay Kumar /* Use cpu poke to resume idle cpu if supported. */ 14488536e02eSVijay Kumar if (cpu_poke && idle_cpu(cpu)) { 14498536e02eSVijay Kumar unsigned long ret; 14508536e02eSVijay Kumar 14518536e02eSVijay Kumar ret = send_cpu_poke(cpu); 14528536e02eSVijay Kumar if (ret == HV_EOK) 14538536e02eSVijay Kumar return; 14548536e02eSVijay Kumar } 14558536e02eSVijay Kumar 14568536e02eSVijay Kumar /* Use IPI in following cases: 14578536e02eSVijay Kumar * - cpu poke not supported 14588536e02eSVijay Kumar * - cpu not idle 14598536e02eSVijay Kumar * - send_cpu_poke() returns with error 14608536e02eSVijay Kumar */ 14618536e02eSVijay Kumar send_cpu_ipi(cpu); 14628536e02eSVijay Kumar } 14638536e02eSVijay Kumar 14648536e02eSVijay Kumar void smp_init_cpu_poke(void) 14658536e02eSVijay Kumar { 14668536e02eSVijay Kumar unsigned long major; 14678536e02eSVijay Kumar unsigned long minor; 14688536e02eSVijay Kumar int ret; 14698536e02eSVijay Kumar 14708536e02eSVijay Kumar if (tlb_type != hypervisor) 14718536e02eSVijay Kumar return; 14728536e02eSVijay Kumar 14738536e02eSVijay Kumar ret = sun4v_hvapi_get(HV_GRP_CORE, &major, &minor); 14748536e02eSVijay Kumar if (ret) { 14758536e02eSVijay Kumar pr_debug("HV_GRP_CORE is not registered\n"); 14768536e02eSVijay Kumar return; 14778536e02eSVijay Kumar } 14788536e02eSVijay Kumar 14798536e02eSVijay Kumar if (major == 1 && minor >= 6) { 14808536e02eSVijay Kumar /* CPU POKE is registered. */ 14818536e02eSVijay Kumar cpu_poke = true; 14828536e02eSVijay Kumar return; 14838536e02eSVijay Kumar } 14848536e02eSVijay Kumar 14858536e02eSVijay Kumar pr_debug("CPU_POKE not supported\n"); 1486a88b5ba8SSam Ravnborg } 1487a88b5ba8SSam Ravnborg 14889960e9e8SDavid S. Miller void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs) 1489a88b5ba8SSam Ravnborg { 1490a88b5ba8SSam Ravnborg clear_softint(1 << irq); 1491184748ccSPeter Zijlstra scheduler_ipi(); 1492a88b5ba8SSam Ravnborg } 1493a88b5ba8SSam Ravnborg 149494ab5990SDave Kleikamp static void stop_this_cpu(void *dummy) 149594ab5990SDave Kleikamp { 1496cffb3e76SVijay Kumar set_cpu_online(smp_processor_id(), false); 149794ab5990SDave Kleikamp prom_stopself(); 149894ab5990SDave Kleikamp } 149994ab5990SDave Kleikamp 1500a88b5ba8SSam Ravnborg void smp_send_stop(void) 1501a88b5ba8SSam Ravnborg { 150294ab5990SDave Kleikamp int cpu; 150394ab5990SDave Kleikamp 150494ab5990SDave Kleikamp if (tlb_type == hypervisor) { 15057dd4fcf5SVijay Kumar int this_cpu = smp_processor_id(); 15067dd4fcf5SVijay Kumar #ifdef CONFIG_SERIAL_SUNHV 15077dd4fcf5SVijay Kumar sunhv_migrate_hvcons_irq(this_cpu); 15087dd4fcf5SVijay Kumar #endif 150994ab5990SDave Kleikamp for_each_online_cpu(cpu) { 15107dd4fcf5SVijay Kumar if (cpu == this_cpu) 151194ab5990SDave Kleikamp continue; 1512cffb3e76SVijay Kumar 1513cffb3e76SVijay Kumar set_cpu_online(cpu, false); 151494ab5990SDave Kleikamp #ifdef CONFIG_SUN_LDOMS 151594ab5990SDave Kleikamp if (ldom_domaining_enabled) { 151694ab5990SDave Kleikamp unsigned long hv_err; 151794ab5990SDave Kleikamp hv_err = sun4v_cpu_stop(cpu); 151894ab5990SDave Kleikamp if (hv_err) 151994ab5990SDave Kleikamp printk(KERN_ERR "sun4v_cpu_stop() " 152094ab5990SDave Kleikamp "failed err=%lu\n", hv_err); 152194ab5990SDave Kleikamp } else 152294ab5990SDave Kleikamp #endif 152394ab5990SDave Kleikamp prom_stopcpu_cpuid(cpu); 152494ab5990SDave Kleikamp } 152594ab5990SDave Kleikamp } else 152694ab5990SDave Kleikamp smp_call_function(stop_this_cpu, NULL, 0); 1527a88b5ba8SSam Ravnborg } 1528a88b5ba8SSam Ravnborg 1529a70c6913STejun Heo static int __init pcpu_cpu_distance(unsigned int from, unsigned int to) 1530bcb2107fSTejun Heo { 1531bcb2107fSTejun Heo if (cpu_to_node(from) == cpu_to_node(to)) 1532bcb2107fSTejun Heo return LOCAL_DISTANCE; 1533bcb2107fSTejun Heo else 1534bcb2107fSTejun Heo return REMOTE_DISTANCE; 15354fd78a5fSDavid S. Miller } 15364fd78a5fSDavid S. Miller 15371ca3fb3aSKefeng Wang static int __init pcpu_cpu_to_node(int cpu) 15381ca3fb3aSKefeng Wang { 15391ca3fb3aSKefeng Wang return cpu_to_node(cpu); 15401ca3fb3aSKefeng Wang } 15411ca3fb3aSKefeng Wang 1542a70c6913STejun Heo static void __init pcpu_populate_pte(unsigned long addr) 1543a70c6913STejun Heo { 1544a70c6913STejun Heo pgd_t *pgd = pgd_offset_k(addr); 15455637bc50SMike Rapoport p4d_t *p4d; 1546a70c6913STejun Heo pud_t *pud; 1547a70c6913STejun Heo pmd_t *pmd; 1548a70c6913STejun Heo 1549ac55c768SDavid S. Miller if (pgd_none(*pgd)) { 1550ac55c768SDavid S. Miller pud_t *new; 1551ac55c768SDavid S. Miller 15524fc4a09eSMike Rapoport new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); 1553b1e1c869SMike Rapoport if (!new) 1554b1e1c869SMike Rapoport goto err_alloc; 1555ac55c768SDavid S. Miller pgd_populate(&init_mm, pgd, new); 1556ac55c768SDavid S. Miller } 1557ac55c768SDavid S. Miller 15585637bc50SMike Rapoport p4d = p4d_offset(pgd, addr); 15595637bc50SMike Rapoport if (p4d_none(*p4d)) { 15605637bc50SMike Rapoport pud_t *new; 15615637bc50SMike Rapoport 15625637bc50SMike Rapoport new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); 15635637bc50SMike Rapoport if (!new) 15645637bc50SMike Rapoport goto err_alloc; 15655637bc50SMike Rapoport p4d_populate(&init_mm, p4d, new); 15665637bc50SMike Rapoport } 15675637bc50SMike Rapoport 15685637bc50SMike Rapoport pud = pud_offset(p4d, addr); 1569a70c6913STejun Heo if (pud_none(*pud)) { 1570a70c6913STejun Heo pmd_t *new; 1571a70c6913STejun Heo 15724fc4a09eSMike Rapoport new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); 1573b1e1c869SMike Rapoport if (!new) 1574b1e1c869SMike Rapoport goto err_alloc; 1575a70c6913STejun Heo pud_populate(&init_mm, pud, new); 1576a70c6913STejun Heo } 1577a70c6913STejun Heo 1578a70c6913STejun Heo pmd = pmd_offset(pud, addr); 1579a70c6913STejun Heo if (!pmd_present(*pmd)) { 1580a70c6913STejun Heo pte_t *new; 1581a70c6913STejun Heo 15824fc4a09eSMike Rapoport new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); 1583b1e1c869SMike Rapoport if (!new) 1584b1e1c869SMike Rapoport goto err_alloc; 1585a70c6913STejun Heo pmd_populate_kernel(&init_mm, pmd, new); 1586a70c6913STejun Heo } 1587b1e1c869SMike Rapoport 1588b1e1c869SMike Rapoport return; 1589b1e1c869SMike Rapoport 1590b1e1c869SMike Rapoport err_alloc: 1591b1e1c869SMike Rapoport panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n", 1592b1e1c869SMike Rapoport __func__, PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); 1593a70c6913STejun Heo } 1594a70c6913STejun Heo 159573fffc03SDavid S. Miller void __init setup_per_cpu_areas(void) 1596a88b5ba8SSam Ravnborg { 1597bcb2107fSTejun Heo unsigned long delta; 1598bcb2107fSTejun Heo unsigned int cpu; 1599a70c6913STejun Heo int rc = -EINVAL; 1600a88b5ba8SSam Ravnborg 1601a70c6913STejun Heo if (pcpu_chosen_fc != PCPU_FC_PAGE) { 1602bcb2107fSTejun Heo rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, 1603bcb2107fSTejun Heo PERCPU_DYNAMIC_RESERVE, 4 << 20, 1604a70c6913STejun Heo pcpu_cpu_distance, 1605*23f91716SKefeng Wang pcpu_cpu_to_node); 1606fb435d52STejun Heo if (rc) 1607eb1414ecSKefeng Wang pr_warn("PERCPU: %s allocator failed (%d), " 1608a70c6913STejun Heo "falling back to page size\n", 1609a70c6913STejun Heo pcpu_fc_names[pcpu_chosen_fc], rc); 1610a70c6913STejun Heo } 1611a70c6913STejun Heo if (rc < 0) 1612a70c6913STejun Heo rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, 16131ca3fb3aSKefeng Wang pcpu_cpu_to_node, 1614a70c6913STejun Heo pcpu_populate_pte); 1615a70c6913STejun Heo if (rc < 0) 1616a70c6913STejun Heo panic("cannot initialize percpu area (err=%d)", rc); 16174fd78a5fSDavid S. Miller 16184fd78a5fSDavid S. Miller delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; 1619fb435d52STejun Heo for_each_possible_cpu(cpu) 1620fb435d52STejun Heo __per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu]; 1621a88b5ba8SSam Ravnborg 1622a88b5ba8SSam Ravnborg /* Setup %g5 for the boot cpu. */ 1623a88b5ba8SSam Ravnborg __local_per_cpu_offset = __per_cpu_offset(smp_processor_id()); 1624b696fdc2SDavid S. Miller 1625b696fdc2SDavid S. Miller of_fill_in_cpu_data(); 1626b696fdc2SDavid S. Miller if (tlb_type == hypervisor) 16276ac5c610SStephen Rothwell mdesc_fill_in_cpu_data(cpu_all_mask); 1628a88b5ba8SSam Ravnborg } 1629