1a88b5ba8SSam Ravnborg /* smp.c: Sparc64 SMP support. 2a88b5ba8SSam Ravnborg * 3a88b5ba8SSam Ravnborg * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net) 4a88b5ba8SSam Ravnborg */ 5a88b5ba8SSam Ravnborg 6066bcacaSPaul Gortmaker #include <linux/export.h> 7a88b5ba8SSam Ravnborg #include <linux/kernel.h> 8a88b5ba8SSam Ravnborg #include <linux/sched.h> 9a88b5ba8SSam Ravnborg #include <linux/mm.h> 10a88b5ba8SSam Ravnborg #include <linux/pagemap.h> 11a88b5ba8SSam Ravnborg #include <linux/threads.h> 12a88b5ba8SSam Ravnborg #include <linux/smp.h> 13a88b5ba8SSam Ravnborg #include <linux/interrupt.h> 14a88b5ba8SSam Ravnborg #include <linux/kernel_stat.h> 15a88b5ba8SSam Ravnborg #include <linux/delay.h> 16a88b5ba8SSam Ravnborg #include <linux/init.h> 17a88b5ba8SSam Ravnborg #include <linux/spinlock.h> 18a88b5ba8SSam Ravnborg #include <linux/fs.h> 19a88b5ba8SSam Ravnborg #include <linux/seq_file.h> 20a88b5ba8SSam Ravnborg #include <linux/cache.h> 21a88b5ba8SSam Ravnborg #include <linux/jiffies.h> 22a88b5ba8SSam Ravnborg #include <linux/profile.h> 2373fffc03SDavid S. Miller #include <linux/bootmem.h> 244fd78a5fSDavid S. Miller #include <linux/vmalloc.h> 259960e9e8SDavid S. Miller #include <linux/ftrace.h> 26a88b5ba8SSam Ravnborg #include <linux/cpu.h> 275a0e3ad6STejun Heo #include <linux/slab.h> 28a88b5ba8SSam Ravnborg 29a88b5ba8SSam Ravnborg #include <asm/head.h> 30a88b5ba8SSam Ravnborg #include <asm/ptrace.h> 3160063497SArun Sharma #include <linux/atomic.h> 32a88b5ba8SSam Ravnborg #include <asm/tlbflush.h> 33a88b5ba8SSam Ravnborg #include <asm/mmu_context.h> 34a88b5ba8SSam Ravnborg #include <asm/cpudata.h> 35a88b5ba8SSam Ravnborg #include <asm/hvtramp.h> 36a88b5ba8SSam Ravnborg #include <asm/io.h> 37a88b5ba8SSam Ravnborg #include <asm/timer.h> 38a88b5ba8SSam Ravnborg 39a88b5ba8SSam Ravnborg #include <asm/irq.h> 40a88b5ba8SSam Ravnborg #include <asm/irq_regs.h> 41a88b5ba8SSam Ravnborg #include <asm/page.h> 42a88b5ba8SSam Ravnborg #include <asm/pgtable.h> 43a88b5ba8SSam Ravnborg #include <asm/oplib.h> 44a88b5ba8SSam Ravnborg #include <asm/uaccess.h> 45a88b5ba8SSam Ravnborg #include <asm/starfire.h> 46a88b5ba8SSam Ravnborg #include <asm/tlb.h> 47a88b5ba8SSam Ravnborg #include <asm/sections.h> 48a88b5ba8SSam Ravnborg #include <asm/prom.h> 49a88b5ba8SSam Ravnborg #include <asm/mdesc.h> 50a88b5ba8SSam Ravnborg #include <asm/ldc.h> 51a88b5ba8SSam Ravnborg #include <asm/hypervisor.h> 52b62818e5SDavid S. Miller #include <asm/pcr.h> 53a88b5ba8SSam Ravnborg 54280ff974SHong H. Pham #include "cpumap.h" 55280ff974SHong H. Pham 56a88b5ba8SSam Ravnborg int sparc64_multi_core __read_mostly; 57a88b5ba8SSam Ravnborg 58a88b5ba8SSam Ravnborg DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; 59a88b5ba8SSam Ravnborg cpumask_t cpu_core_map[NR_CPUS] __read_mostly = 60a88b5ba8SSam Ravnborg { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; 61a88b5ba8SSam Ravnborg 62a88b5ba8SSam Ravnborg EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); 63a88b5ba8SSam Ravnborg EXPORT_SYMBOL(cpu_core_map); 64a88b5ba8SSam Ravnborg 65a88b5ba8SSam Ravnborg static cpumask_t smp_commenced_mask; 66a88b5ba8SSam Ravnborg 67a88b5ba8SSam Ravnborg void smp_info(struct seq_file *m) 68a88b5ba8SSam Ravnborg { 69a88b5ba8SSam Ravnborg int i; 70a88b5ba8SSam Ravnborg 71a88b5ba8SSam Ravnborg seq_printf(m, "State:\n"); 72a88b5ba8SSam Ravnborg for_each_online_cpu(i) 73a88b5ba8SSam Ravnborg seq_printf(m, "CPU%d:\t\tonline\n", i); 74a88b5ba8SSam Ravnborg } 75a88b5ba8SSam Ravnborg 76a88b5ba8SSam Ravnborg void smp_bogo(struct seq_file *m) 77a88b5ba8SSam Ravnborg { 78a88b5ba8SSam Ravnborg int i; 79a88b5ba8SSam Ravnborg 80a88b5ba8SSam Ravnborg for_each_online_cpu(i) 81a88b5ba8SSam Ravnborg seq_printf(m, 82a88b5ba8SSam Ravnborg "Cpu%dClkTck\t: %016lx\n", 83a88b5ba8SSam Ravnborg i, cpu_data(i).clock_tick); 84a88b5ba8SSam Ravnborg } 85a88b5ba8SSam Ravnborg 86a88b5ba8SSam Ravnborg extern void setup_sparc64_timer(void); 87a88b5ba8SSam Ravnborg 88a88b5ba8SSam Ravnborg static volatile unsigned long callin_flag = 0; 89a88b5ba8SSam Ravnborg 902066aaddSPaul Gortmaker void smp_callin(void) 91a88b5ba8SSam Ravnborg { 92a88b5ba8SSam Ravnborg int cpuid = hard_smp_processor_id(); 93a88b5ba8SSam Ravnborg 94a88b5ba8SSam Ravnborg __local_per_cpu_offset = __per_cpu_offset(cpuid); 95a88b5ba8SSam Ravnborg 96a88b5ba8SSam Ravnborg if (tlb_type == hypervisor) 97a88b5ba8SSam Ravnborg sun4v_ktsb_register(); 98a88b5ba8SSam Ravnborg 99a88b5ba8SSam Ravnborg __flush_tlb_all(); 100a88b5ba8SSam Ravnborg 101a88b5ba8SSam Ravnborg setup_sparc64_timer(); 102a88b5ba8SSam Ravnborg 103a88b5ba8SSam Ravnborg if (cheetah_pcache_forced_on) 104a88b5ba8SSam Ravnborg cheetah_enable_pcache(); 105a88b5ba8SSam Ravnborg 106a88b5ba8SSam Ravnborg callin_flag = 1; 107a88b5ba8SSam Ravnborg __asm__ __volatile__("membar #Sync\n\t" 108a88b5ba8SSam Ravnborg "flush %%g6" : : : "memory"); 109a88b5ba8SSam Ravnborg 110a88b5ba8SSam Ravnborg /* Clear this or we will die instantly when we 111a88b5ba8SSam Ravnborg * schedule back to this idler... 112a88b5ba8SSam Ravnborg */ 113a88b5ba8SSam Ravnborg current_thread_info()->new_child = 0; 114a88b5ba8SSam Ravnborg 115a88b5ba8SSam Ravnborg /* Attach to the address space of init_task. */ 116a88b5ba8SSam Ravnborg atomic_inc(&init_mm.mm_count); 117a88b5ba8SSam Ravnborg current->active_mm = &init_mm; 118a88b5ba8SSam Ravnborg 119a88b5ba8SSam Ravnborg /* inform the notifiers about the new cpu */ 120a88b5ba8SSam Ravnborg notify_cpu_starting(cpuid); 121a88b5ba8SSam Ravnborg 122fb1fece5SKOSAKI Motohiro while (!cpumask_test_cpu(cpuid, &smp_commenced_mask)) 123a88b5ba8SSam Ravnborg rmb(); 124a88b5ba8SSam Ravnborg 125fb1fece5SKOSAKI Motohiro set_cpu_online(cpuid, true); 126bc683300SYong Zhang local_irq_enable(); 127a88b5ba8SSam Ravnborg 128a88b5ba8SSam Ravnborg /* idle thread is expected to have preempt disabled */ 129a88b5ba8SSam Ravnborg preempt_disable(); 13087fa05aeSSam Ravnborg 13187fa05aeSSam Ravnborg cpu_startup_entry(CPUHP_ONLINE); 132a88b5ba8SSam Ravnborg } 133a88b5ba8SSam Ravnborg 134a88b5ba8SSam Ravnborg void cpu_panic(void) 135a88b5ba8SSam Ravnborg { 136a88b5ba8SSam Ravnborg printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id()); 137a88b5ba8SSam Ravnborg panic("SMP bolixed\n"); 138a88b5ba8SSam Ravnborg } 139a88b5ba8SSam Ravnborg 140a88b5ba8SSam Ravnborg /* This tick register synchronization scheme is taken entirely from 141a88b5ba8SSam Ravnborg * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit. 142a88b5ba8SSam Ravnborg * 143a88b5ba8SSam Ravnborg * The only change I've made is to rework it so that the master 144a88b5ba8SSam Ravnborg * initiates the synchonization instead of the slave. -DaveM 145a88b5ba8SSam Ravnborg */ 146a88b5ba8SSam Ravnborg 147a88b5ba8SSam Ravnborg #define MASTER 0 148a88b5ba8SSam Ravnborg #define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long)) 149a88b5ba8SSam Ravnborg 150a88b5ba8SSam Ravnborg #define NUM_ROUNDS 64 /* magic value */ 151a88b5ba8SSam Ravnborg #define NUM_ITERS 5 /* likewise */ 152a88b5ba8SSam Ravnborg 153a88b5ba8SSam Ravnborg static DEFINE_SPINLOCK(itc_sync_lock); 154a88b5ba8SSam Ravnborg static unsigned long go[SLAVE + 1]; 155a88b5ba8SSam Ravnborg 156a88b5ba8SSam Ravnborg #define DEBUG_TICK_SYNC 0 157a88b5ba8SSam Ravnborg 158a88b5ba8SSam Ravnborg static inline long get_delta (long *rt, long *master) 159a88b5ba8SSam Ravnborg { 160a88b5ba8SSam Ravnborg unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0; 161a88b5ba8SSam Ravnborg unsigned long tcenter, t0, t1, tm; 162a88b5ba8SSam Ravnborg unsigned long i; 163a88b5ba8SSam Ravnborg 164a88b5ba8SSam Ravnborg for (i = 0; i < NUM_ITERS; i++) { 165a88b5ba8SSam Ravnborg t0 = tick_ops->get_tick(); 166a88b5ba8SSam Ravnborg go[MASTER] = 1; 167a88b5ba8SSam Ravnborg membar_safe("#StoreLoad"); 168a88b5ba8SSam Ravnborg while (!(tm = go[SLAVE])) 169a88b5ba8SSam Ravnborg rmb(); 170a88b5ba8SSam Ravnborg go[SLAVE] = 0; 171a88b5ba8SSam Ravnborg wmb(); 172a88b5ba8SSam Ravnborg t1 = tick_ops->get_tick(); 173a88b5ba8SSam Ravnborg 174a88b5ba8SSam Ravnborg if (t1 - t0 < best_t1 - best_t0) 175a88b5ba8SSam Ravnborg best_t0 = t0, best_t1 = t1, best_tm = tm; 176a88b5ba8SSam Ravnborg } 177a88b5ba8SSam Ravnborg 178a88b5ba8SSam Ravnborg *rt = best_t1 - best_t0; 179a88b5ba8SSam Ravnborg *master = best_tm - best_t0; 180a88b5ba8SSam Ravnborg 181a88b5ba8SSam Ravnborg /* average best_t0 and best_t1 without overflow: */ 182a88b5ba8SSam Ravnborg tcenter = (best_t0/2 + best_t1/2); 183a88b5ba8SSam Ravnborg if (best_t0 % 2 + best_t1 % 2 == 2) 184a88b5ba8SSam Ravnborg tcenter++; 185a88b5ba8SSam Ravnborg return tcenter - best_tm; 186a88b5ba8SSam Ravnborg } 187a88b5ba8SSam Ravnborg 188a88b5ba8SSam Ravnborg void smp_synchronize_tick_client(void) 189a88b5ba8SSam Ravnborg { 190a88b5ba8SSam Ravnborg long i, delta, adj, adjust_latency = 0, done = 0; 191c6fee081SDavid S. Miller unsigned long flags, rt, master_time_stamp; 192a88b5ba8SSam Ravnborg #if DEBUG_TICK_SYNC 193a88b5ba8SSam Ravnborg struct { 194a88b5ba8SSam Ravnborg long rt; /* roundtrip time */ 195a88b5ba8SSam Ravnborg long master; /* master's timestamp */ 196a88b5ba8SSam Ravnborg long diff; /* difference between midpoint and master's timestamp */ 197a88b5ba8SSam Ravnborg long lat; /* estimate of itc adjustment latency */ 198a88b5ba8SSam Ravnborg } t[NUM_ROUNDS]; 199a88b5ba8SSam Ravnborg #endif 200a88b5ba8SSam Ravnborg 201a88b5ba8SSam Ravnborg go[MASTER] = 1; 202a88b5ba8SSam Ravnborg 203a88b5ba8SSam Ravnborg while (go[MASTER]) 204a88b5ba8SSam Ravnborg rmb(); 205a88b5ba8SSam Ravnborg 206a88b5ba8SSam Ravnborg local_irq_save(flags); 207a88b5ba8SSam Ravnborg { 208a88b5ba8SSam Ravnborg for (i = 0; i < NUM_ROUNDS; i++) { 209a88b5ba8SSam Ravnborg delta = get_delta(&rt, &master_time_stamp); 210c6fee081SDavid S. Miller if (delta == 0) 211a88b5ba8SSam Ravnborg done = 1; /* let's lock on to this... */ 212a88b5ba8SSam Ravnborg 213a88b5ba8SSam Ravnborg if (!done) { 214a88b5ba8SSam Ravnborg if (i > 0) { 215a88b5ba8SSam Ravnborg adjust_latency += -delta; 216a88b5ba8SSam Ravnborg adj = -delta + adjust_latency/4; 217a88b5ba8SSam Ravnborg } else 218a88b5ba8SSam Ravnborg adj = -delta; 219a88b5ba8SSam Ravnborg 220a88b5ba8SSam Ravnborg tick_ops->add_tick(adj); 221a88b5ba8SSam Ravnborg } 222a88b5ba8SSam Ravnborg #if DEBUG_TICK_SYNC 223a88b5ba8SSam Ravnborg t[i].rt = rt; 224a88b5ba8SSam Ravnborg t[i].master = master_time_stamp; 225a88b5ba8SSam Ravnborg t[i].diff = delta; 226a88b5ba8SSam Ravnborg t[i].lat = adjust_latency/4; 227a88b5ba8SSam Ravnborg #endif 228a88b5ba8SSam Ravnborg } 229a88b5ba8SSam Ravnborg } 230a88b5ba8SSam Ravnborg local_irq_restore(flags); 231a88b5ba8SSam Ravnborg 232a88b5ba8SSam Ravnborg #if DEBUG_TICK_SYNC 233a88b5ba8SSam Ravnborg for (i = 0; i < NUM_ROUNDS; i++) 234a88b5ba8SSam Ravnborg printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n", 235a88b5ba8SSam Ravnborg t[i].rt, t[i].master, t[i].diff, t[i].lat); 236a88b5ba8SSam Ravnborg #endif 237a88b5ba8SSam Ravnborg 238a88b5ba8SSam Ravnborg printk(KERN_INFO "CPU %d: synchronized TICK with master CPU " 239a88b5ba8SSam Ravnborg "(last diff %ld cycles, maxerr %lu cycles)\n", 240a88b5ba8SSam Ravnborg smp_processor_id(), delta, rt); 241a88b5ba8SSam Ravnborg } 242a88b5ba8SSam Ravnborg 243a88b5ba8SSam Ravnborg static void smp_start_sync_tick_client(int cpu); 244a88b5ba8SSam Ravnborg 245a88b5ba8SSam Ravnborg static void smp_synchronize_one_tick(int cpu) 246a88b5ba8SSam Ravnborg { 247a88b5ba8SSam Ravnborg unsigned long flags, i; 248a88b5ba8SSam Ravnborg 249a88b5ba8SSam Ravnborg go[MASTER] = 0; 250a88b5ba8SSam Ravnborg 251a88b5ba8SSam Ravnborg smp_start_sync_tick_client(cpu); 252a88b5ba8SSam Ravnborg 253a88b5ba8SSam Ravnborg /* wait for client to be ready */ 254a88b5ba8SSam Ravnborg while (!go[MASTER]) 255a88b5ba8SSam Ravnborg rmb(); 256a88b5ba8SSam Ravnborg 257a88b5ba8SSam Ravnborg /* now let the client proceed into his loop */ 258a88b5ba8SSam Ravnborg go[MASTER] = 0; 259a88b5ba8SSam Ravnborg membar_safe("#StoreLoad"); 260a88b5ba8SSam Ravnborg 261a88b5ba8SSam Ravnborg spin_lock_irqsave(&itc_sync_lock, flags); 262a88b5ba8SSam Ravnborg { 263a88b5ba8SSam Ravnborg for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) { 264a88b5ba8SSam Ravnborg while (!go[MASTER]) 265a88b5ba8SSam Ravnborg rmb(); 266a88b5ba8SSam Ravnborg go[MASTER] = 0; 267a88b5ba8SSam Ravnborg wmb(); 268a88b5ba8SSam Ravnborg go[SLAVE] = tick_ops->get_tick(); 269a88b5ba8SSam Ravnborg membar_safe("#StoreLoad"); 270a88b5ba8SSam Ravnborg } 271a88b5ba8SSam Ravnborg } 272a88b5ba8SSam Ravnborg spin_unlock_irqrestore(&itc_sync_lock, flags); 273a88b5ba8SSam Ravnborg } 274a88b5ba8SSam Ravnborg 275a88b5ba8SSam Ravnborg #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU) 276a88b5ba8SSam Ravnborg /* XXX Put this in some common place. XXX */ 277a88b5ba8SSam Ravnborg static unsigned long kimage_addr_to_ra(void *p) 278a88b5ba8SSam Ravnborg { 279a88b5ba8SSam Ravnborg unsigned long val = (unsigned long) p; 280a88b5ba8SSam Ravnborg 281a88b5ba8SSam Ravnborg return kern_base + (val - KERNBASE); 282a88b5ba8SSam Ravnborg } 283a88b5ba8SSam Ravnborg 2842066aaddSPaul Gortmaker static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg, 2852066aaddSPaul Gortmaker void **descrp) 286a88b5ba8SSam Ravnborg { 287a88b5ba8SSam Ravnborg extern unsigned long sparc64_ttable_tl0; 288a88b5ba8SSam Ravnborg extern unsigned long kern_locked_tte_data; 289a88b5ba8SSam Ravnborg struct hvtramp_descr *hdesc; 290a88b5ba8SSam Ravnborg unsigned long trampoline_ra; 291a88b5ba8SSam Ravnborg struct trap_per_cpu *tb; 292a88b5ba8SSam Ravnborg u64 tte_vaddr, tte_data; 293a88b5ba8SSam Ravnborg unsigned long hv_err; 294a88b5ba8SSam Ravnborg int i; 295a88b5ba8SSam Ravnborg 296a88b5ba8SSam Ravnborg hdesc = kzalloc(sizeof(*hdesc) + 297a88b5ba8SSam Ravnborg (sizeof(struct hvtramp_mapping) * 298a88b5ba8SSam Ravnborg num_kernel_image_mappings - 1), 299a88b5ba8SSam Ravnborg GFP_KERNEL); 300a88b5ba8SSam Ravnborg if (!hdesc) { 301a88b5ba8SSam Ravnborg printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate " 302a88b5ba8SSam Ravnborg "hvtramp_descr.\n"); 303a88b5ba8SSam Ravnborg return; 304a88b5ba8SSam Ravnborg } 305557fe0e8SDavid S. Miller *descrp = hdesc; 306a88b5ba8SSam Ravnborg 307a88b5ba8SSam Ravnborg hdesc->cpu = cpu; 308a88b5ba8SSam Ravnborg hdesc->num_mappings = num_kernel_image_mappings; 309a88b5ba8SSam Ravnborg 310a88b5ba8SSam Ravnborg tb = &trap_block[cpu]; 311a88b5ba8SSam Ravnborg 312a88b5ba8SSam Ravnborg hdesc->fault_info_va = (unsigned long) &tb->fault_info; 313a88b5ba8SSam Ravnborg hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info); 314a88b5ba8SSam Ravnborg 315a88b5ba8SSam Ravnborg hdesc->thread_reg = thread_reg; 316a88b5ba8SSam Ravnborg 317a88b5ba8SSam Ravnborg tte_vaddr = (unsigned long) KERNBASE; 318a88b5ba8SSam Ravnborg tte_data = kern_locked_tte_data; 319a88b5ba8SSam Ravnborg 320a88b5ba8SSam Ravnborg for (i = 0; i < hdesc->num_mappings; i++) { 321a88b5ba8SSam Ravnborg hdesc->maps[i].vaddr = tte_vaddr; 322a88b5ba8SSam Ravnborg hdesc->maps[i].tte = tte_data; 323a88b5ba8SSam Ravnborg tte_vaddr += 0x400000; 324a88b5ba8SSam Ravnborg tte_data += 0x400000; 325a88b5ba8SSam Ravnborg } 326a88b5ba8SSam Ravnborg 327a88b5ba8SSam Ravnborg trampoline_ra = kimage_addr_to_ra(hv_cpu_startup); 328a88b5ba8SSam Ravnborg 329a88b5ba8SSam Ravnborg hv_err = sun4v_cpu_start(cpu, trampoline_ra, 330a88b5ba8SSam Ravnborg kimage_addr_to_ra(&sparc64_ttable_tl0), 331a88b5ba8SSam Ravnborg __pa(hdesc)); 332a88b5ba8SSam Ravnborg if (hv_err) 333a88b5ba8SSam Ravnborg printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() " 334a88b5ba8SSam Ravnborg "gives error %lu\n", hv_err); 335a88b5ba8SSam Ravnborg } 336a88b5ba8SSam Ravnborg #endif 337a88b5ba8SSam Ravnborg 338a88b5ba8SSam Ravnborg extern unsigned long sparc64_cpu_startup; 339a88b5ba8SSam Ravnborg 340a88b5ba8SSam Ravnborg /* The OBP cpu startup callback truncates the 3rd arg cookie to 341a88b5ba8SSam Ravnborg * 32-bits (I think) so to be safe we have it read the pointer 342a88b5ba8SSam Ravnborg * contained here so we work on >4GB machines. -DaveM 343a88b5ba8SSam Ravnborg */ 344a88b5ba8SSam Ravnborg static struct thread_info *cpu_new_thread = NULL; 345a88b5ba8SSam Ravnborg 3462066aaddSPaul Gortmaker static int smp_boot_one_cpu(unsigned int cpu, struct task_struct *idle) 347a88b5ba8SSam Ravnborg { 348a88b5ba8SSam Ravnborg unsigned long entry = 349a88b5ba8SSam Ravnborg (unsigned long)(&sparc64_cpu_startup); 350a88b5ba8SSam Ravnborg unsigned long cookie = 351a88b5ba8SSam Ravnborg (unsigned long)(&cpu_new_thread); 352557fe0e8SDavid S. Miller void *descr = NULL; 353a88b5ba8SSam Ravnborg int timeout, ret; 354a88b5ba8SSam Ravnborg 355a88b5ba8SSam Ravnborg callin_flag = 0; 356f0a2bc7eSThomas Gleixner cpu_new_thread = task_thread_info(idle); 357a88b5ba8SSam Ravnborg 358a88b5ba8SSam Ravnborg if (tlb_type == hypervisor) { 359a88b5ba8SSam Ravnborg #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU) 360a88b5ba8SSam Ravnborg if (ldom_domaining_enabled) 361a88b5ba8SSam Ravnborg ldom_startcpu_cpuid(cpu, 362557fe0e8SDavid S. Miller (unsigned long) cpu_new_thread, 363557fe0e8SDavid S. Miller &descr); 364a88b5ba8SSam Ravnborg else 365a88b5ba8SSam Ravnborg #endif 366a88b5ba8SSam Ravnborg prom_startcpu_cpuid(cpu, entry, cookie); 367a88b5ba8SSam Ravnborg } else { 368a88b5ba8SSam Ravnborg struct device_node *dp = of_find_node_by_cpuid(cpu); 369a88b5ba8SSam Ravnborg 3706016a363SGrant Likely prom_startcpu(dp->phandle, entry, cookie); 371a88b5ba8SSam Ravnborg } 372a88b5ba8SSam Ravnborg 373a88b5ba8SSam Ravnborg for (timeout = 0; timeout < 50000; timeout++) { 374a88b5ba8SSam Ravnborg if (callin_flag) 375a88b5ba8SSam Ravnborg break; 376a88b5ba8SSam Ravnborg udelay(100); 377a88b5ba8SSam Ravnborg } 378a88b5ba8SSam Ravnborg 379a88b5ba8SSam Ravnborg if (callin_flag) { 380a88b5ba8SSam Ravnborg ret = 0; 381a88b5ba8SSam Ravnborg } else { 382a88b5ba8SSam Ravnborg printk("Processor %d is stuck.\n", cpu); 383a88b5ba8SSam Ravnborg ret = -ENODEV; 384a88b5ba8SSam Ravnborg } 385a88b5ba8SSam Ravnborg cpu_new_thread = NULL; 386a88b5ba8SSam Ravnborg 387557fe0e8SDavid S. Miller kfree(descr); 388a88b5ba8SSam Ravnborg 389a88b5ba8SSam Ravnborg return ret; 390a88b5ba8SSam Ravnborg } 391a88b5ba8SSam Ravnborg 392a88b5ba8SSam Ravnborg static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu) 393a88b5ba8SSam Ravnborg { 394a88b5ba8SSam Ravnborg u64 result, target; 395a88b5ba8SSam Ravnborg int stuck, tmp; 396a88b5ba8SSam Ravnborg 397a88b5ba8SSam Ravnborg if (this_is_starfire) { 398a88b5ba8SSam Ravnborg /* map to real upaid */ 399a88b5ba8SSam Ravnborg cpu = (((cpu & 0x3c) << 1) | 400a88b5ba8SSam Ravnborg ((cpu & 0x40) >> 4) | 401a88b5ba8SSam Ravnborg (cpu & 0x3)); 402a88b5ba8SSam Ravnborg } 403a88b5ba8SSam Ravnborg 404a88b5ba8SSam Ravnborg target = (cpu << 14) | 0x70; 405a88b5ba8SSam Ravnborg again: 406a88b5ba8SSam Ravnborg /* Ok, this is the real Spitfire Errata #54. 407a88b5ba8SSam Ravnborg * One must read back from a UDB internal register 408a88b5ba8SSam Ravnborg * after writes to the UDB interrupt dispatch, but 409a88b5ba8SSam Ravnborg * before the membar Sync for that write. 410a88b5ba8SSam Ravnborg * So we use the high UDB control register (ASI 0x7f, 411a88b5ba8SSam Ravnborg * ADDR 0x20) for the dummy read. -DaveM 412a88b5ba8SSam Ravnborg */ 413a88b5ba8SSam Ravnborg tmp = 0x40; 414a88b5ba8SSam Ravnborg __asm__ __volatile__( 415a88b5ba8SSam Ravnborg "wrpr %1, %2, %%pstate\n\t" 416a88b5ba8SSam Ravnborg "stxa %4, [%0] %3\n\t" 417a88b5ba8SSam Ravnborg "stxa %5, [%0+%8] %3\n\t" 418a88b5ba8SSam Ravnborg "add %0, %8, %0\n\t" 419a88b5ba8SSam Ravnborg "stxa %6, [%0+%8] %3\n\t" 420a88b5ba8SSam Ravnborg "membar #Sync\n\t" 421a88b5ba8SSam Ravnborg "stxa %%g0, [%7] %3\n\t" 422a88b5ba8SSam Ravnborg "membar #Sync\n\t" 423a88b5ba8SSam Ravnborg "mov 0x20, %%g1\n\t" 424a88b5ba8SSam Ravnborg "ldxa [%%g1] 0x7f, %%g0\n\t" 425a88b5ba8SSam Ravnborg "membar #Sync" 426a88b5ba8SSam Ravnborg : "=r" (tmp) 427a88b5ba8SSam Ravnborg : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W), 428a88b5ba8SSam Ravnborg "r" (data0), "r" (data1), "r" (data2), "r" (target), 429a88b5ba8SSam Ravnborg "r" (0x10), "0" (tmp) 430a88b5ba8SSam Ravnborg : "g1"); 431a88b5ba8SSam Ravnborg 432a88b5ba8SSam Ravnborg /* NOTE: PSTATE_IE is still clear. */ 433a88b5ba8SSam Ravnborg stuck = 100000; 434a88b5ba8SSam Ravnborg do { 435a88b5ba8SSam Ravnborg __asm__ __volatile__("ldxa [%%g0] %1, %0" 436a88b5ba8SSam Ravnborg : "=r" (result) 437a88b5ba8SSam Ravnborg : "i" (ASI_INTR_DISPATCH_STAT)); 438a88b5ba8SSam Ravnborg if (result == 0) { 439a88b5ba8SSam Ravnborg __asm__ __volatile__("wrpr %0, 0x0, %%pstate" 440a88b5ba8SSam Ravnborg : : "r" (pstate)); 441a88b5ba8SSam Ravnborg return; 442a88b5ba8SSam Ravnborg } 443a88b5ba8SSam Ravnborg stuck -= 1; 444a88b5ba8SSam Ravnborg if (stuck == 0) 445a88b5ba8SSam Ravnborg break; 446a88b5ba8SSam Ravnborg } while (result & 0x1); 447a88b5ba8SSam Ravnborg __asm__ __volatile__("wrpr %0, 0x0, %%pstate" 448a88b5ba8SSam Ravnborg : : "r" (pstate)); 449a88b5ba8SSam Ravnborg if (stuck == 0) { 45090181136SSam Ravnborg printk("CPU[%d]: mondo stuckage result[%016llx]\n", 451a88b5ba8SSam Ravnborg smp_processor_id(), result); 452a88b5ba8SSam Ravnborg } else { 453a88b5ba8SSam Ravnborg udelay(2); 454a88b5ba8SSam Ravnborg goto again; 455a88b5ba8SSam Ravnborg } 456a88b5ba8SSam Ravnborg } 457a88b5ba8SSam Ravnborg 458a88b5ba8SSam Ravnborg static void spitfire_xcall_deliver(struct trap_per_cpu *tb, int cnt) 459a88b5ba8SSam Ravnborg { 460a88b5ba8SSam Ravnborg u64 *mondo, data0, data1, data2; 461a88b5ba8SSam Ravnborg u16 *cpu_list; 462a88b5ba8SSam Ravnborg u64 pstate; 463a88b5ba8SSam Ravnborg int i; 464a88b5ba8SSam Ravnborg 465a88b5ba8SSam Ravnborg __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); 466a88b5ba8SSam Ravnborg cpu_list = __va(tb->cpu_list_pa); 467a88b5ba8SSam Ravnborg mondo = __va(tb->cpu_mondo_block_pa); 468a88b5ba8SSam Ravnborg data0 = mondo[0]; 469a88b5ba8SSam Ravnborg data1 = mondo[1]; 470a88b5ba8SSam Ravnborg data2 = mondo[2]; 471a88b5ba8SSam Ravnborg for (i = 0; i < cnt; i++) 472a88b5ba8SSam Ravnborg spitfire_xcall_helper(data0, data1, data2, pstate, cpu_list[i]); 473a88b5ba8SSam Ravnborg } 474a88b5ba8SSam Ravnborg 475a88b5ba8SSam Ravnborg /* Cheetah now allows to send the whole 64-bytes of data in the interrupt 476a88b5ba8SSam Ravnborg * packet, but we have no use for that. However we do take advantage of 477a88b5ba8SSam Ravnborg * the new pipelining feature (ie. dispatch to multiple cpus simultaneously). 478a88b5ba8SSam Ravnborg */ 479a88b5ba8SSam Ravnborg static void cheetah_xcall_deliver(struct trap_per_cpu *tb, int cnt) 480a88b5ba8SSam Ravnborg { 481a88b5ba8SSam Ravnborg int nack_busy_id, is_jbus, need_more; 482a88b5ba8SSam Ravnborg u64 *mondo, pstate, ver, busy_mask; 483a88b5ba8SSam Ravnborg u16 *cpu_list; 484a88b5ba8SSam Ravnborg 485a88b5ba8SSam Ravnborg cpu_list = __va(tb->cpu_list_pa); 486a88b5ba8SSam Ravnborg mondo = __va(tb->cpu_mondo_block_pa); 487a88b5ba8SSam Ravnborg 488a88b5ba8SSam Ravnborg /* Unfortunately, someone at Sun had the brilliant idea to make the 489a88b5ba8SSam Ravnborg * busy/nack fields hard-coded by ITID number for this Ultra-III 490a88b5ba8SSam Ravnborg * derivative processor. 491a88b5ba8SSam Ravnborg */ 492a88b5ba8SSam Ravnborg __asm__ ("rdpr %%ver, %0" : "=r" (ver)); 493a88b5ba8SSam Ravnborg is_jbus = ((ver >> 32) == __JALAPENO_ID || 494a88b5ba8SSam Ravnborg (ver >> 32) == __SERRANO_ID); 495a88b5ba8SSam Ravnborg 496a88b5ba8SSam Ravnborg __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); 497a88b5ba8SSam Ravnborg 498a88b5ba8SSam Ravnborg retry: 499a88b5ba8SSam Ravnborg need_more = 0; 500a88b5ba8SSam Ravnborg __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t" 501a88b5ba8SSam Ravnborg : : "r" (pstate), "i" (PSTATE_IE)); 502a88b5ba8SSam Ravnborg 503a88b5ba8SSam Ravnborg /* Setup the dispatch data registers. */ 504a88b5ba8SSam Ravnborg __asm__ __volatile__("stxa %0, [%3] %6\n\t" 505a88b5ba8SSam Ravnborg "stxa %1, [%4] %6\n\t" 506a88b5ba8SSam Ravnborg "stxa %2, [%5] %6\n\t" 507a88b5ba8SSam Ravnborg "membar #Sync\n\t" 508a88b5ba8SSam Ravnborg : /* no outputs */ 509a88b5ba8SSam Ravnborg : "r" (mondo[0]), "r" (mondo[1]), "r" (mondo[2]), 510a88b5ba8SSam Ravnborg "r" (0x40), "r" (0x50), "r" (0x60), 511a88b5ba8SSam Ravnborg "i" (ASI_INTR_W)); 512a88b5ba8SSam Ravnborg 513a88b5ba8SSam Ravnborg nack_busy_id = 0; 514a88b5ba8SSam Ravnborg busy_mask = 0; 515a88b5ba8SSam Ravnborg { 516a88b5ba8SSam Ravnborg int i; 517a88b5ba8SSam Ravnborg 518a88b5ba8SSam Ravnborg for (i = 0; i < cnt; i++) { 519a88b5ba8SSam Ravnborg u64 target, nr; 520a88b5ba8SSam Ravnborg 521a88b5ba8SSam Ravnborg nr = cpu_list[i]; 522a88b5ba8SSam Ravnborg if (nr == 0xffff) 523a88b5ba8SSam Ravnborg continue; 524a88b5ba8SSam Ravnborg 525a88b5ba8SSam Ravnborg target = (nr << 14) | 0x70; 526a88b5ba8SSam Ravnborg if (is_jbus) { 527a88b5ba8SSam Ravnborg busy_mask |= (0x1UL << (nr * 2)); 528a88b5ba8SSam Ravnborg } else { 529a88b5ba8SSam Ravnborg target |= (nack_busy_id << 24); 530a88b5ba8SSam Ravnborg busy_mask |= (0x1UL << 531a88b5ba8SSam Ravnborg (nack_busy_id * 2)); 532a88b5ba8SSam Ravnborg } 533a88b5ba8SSam Ravnborg __asm__ __volatile__( 534a88b5ba8SSam Ravnborg "stxa %%g0, [%0] %1\n\t" 535a88b5ba8SSam Ravnborg "membar #Sync\n\t" 536a88b5ba8SSam Ravnborg : /* no outputs */ 537a88b5ba8SSam Ravnborg : "r" (target), "i" (ASI_INTR_W)); 538a88b5ba8SSam Ravnborg nack_busy_id++; 539a88b5ba8SSam Ravnborg if (nack_busy_id == 32) { 540a88b5ba8SSam Ravnborg need_more = 1; 541a88b5ba8SSam Ravnborg break; 542a88b5ba8SSam Ravnborg } 543a88b5ba8SSam Ravnborg } 544a88b5ba8SSam Ravnborg } 545a88b5ba8SSam Ravnborg 546a88b5ba8SSam Ravnborg /* Now, poll for completion. */ 547a88b5ba8SSam Ravnborg { 548a88b5ba8SSam Ravnborg u64 dispatch_stat, nack_mask; 549a88b5ba8SSam Ravnborg long stuck; 550a88b5ba8SSam Ravnborg 551a88b5ba8SSam Ravnborg stuck = 100000 * nack_busy_id; 552a88b5ba8SSam Ravnborg nack_mask = busy_mask << 1; 553a88b5ba8SSam Ravnborg do { 554a88b5ba8SSam Ravnborg __asm__ __volatile__("ldxa [%%g0] %1, %0" 555a88b5ba8SSam Ravnborg : "=r" (dispatch_stat) 556a88b5ba8SSam Ravnborg : "i" (ASI_INTR_DISPATCH_STAT)); 557a88b5ba8SSam Ravnborg if (!(dispatch_stat & (busy_mask | nack_mask))) { 558a88b5ba8SSam Ravnborg __asm__ __volatile__("wrpr %0, 0x0, %%pstate" 559a88b5ba8SSam Ravnborg : : "r" (pstate)); 560a88b5ba8SSam Ravnborg if (unlikely(need_more)) { 561a88b5ba8SSam Ravnborg int i, this_cnt = 0; 562a88b5ba8SSam Ravnborg for (i = 0; i < cnt; i++) { 563a88b5ba8SSam Ravnborg if (cpu_list[i] == 0xffff) 564a88b5ba8SSam Ravnborg continue; 565a88b5ba8SSam Ravnborg cpu_list[i] = 0xffff; 566a88b5ba8SSam Ravnborg this_cnt++; 567a88b5ba8SSam Ravnborg if (this_cnt == 32) 568a88b5ba8SSam Ravnborg break; 569a88b5ba8SSam Ravnborg } 570a88b5ba8SSam Ravnborg goto retry; 571a88b5ba8SSam Ravnborg } 572a88b5ba8SSam Ravnborg return; 573a88b5ba8SSam Ravnborg } 574a88b5ba8SSam Ravnborg if (!--stuck) 575a88b5ba8SSam Ravnborg break; 576a88b5ba8SSam Ravnborg } while (dispatch_stat & busy_mask); 577a88b5ba8SSam Ravnborg 578a88b5ba8SSam Ravnborg __asm__ __volatile__("wrpr %0, 0x0, %%pstate" 579a88b5ba8SSam Ravnborg : : "r" (pstate)); 580a88b5ba8SSam Ravnborg 581a88b5ba8SSam Ravnborg if (dispatch_stat & busy_mask) { 582a88b5ba8SSam Ravnborg /* Busy bits will not clear, continue instead 583a88b5ba8SSam Ravnborg * of freezing up on this cpu. 584a88b5ba8SSam Ravnborg */ 58590181136SSam Ravnborg printk("CPU[%d]: mondo stuckage result[%016llx]\n", 586a88b5ba8SSam Ravnborg smp_processor_id(), dispatch_stat); 587a88b5ba8SSam Ravnborg } else { 588a88b5ba8SSam Ravnborg int i, this_busy_nack = 0; 589a88b5ba8SSam Ravnborg 590a88b5ba8SSam Ravnborg /* Delay some random time with interrupts enabled 591a88b5ba8SSam Ravnborg * to prevent deadlock. 592a88b5ba8SSam Ravnborg */ 593a88b5ba8SSam Ravnborg udelay(2 * nack_busy_id); 594a88b5ba8SSam Ravnborg 595a88b5ba8SSam Ravnborg /* Clear out the mask bits for cpus which did not 596a88b5ba8SSam Ravnborg * NACK us. 597a88b5ba8SSam Ravnborg */ 598a88b5ba8SSam Ravnborg for (i = 0; i < cnt; i++) { 599a88b5ba8SSam Ravnborg u64 check_mask, nr; 600a88b5ba8SSam Ravnborg 601a88b5ba8SSam Ravnborg nr = cpu_list[i]; 602a88b5ba8SSam Ravnborg if (nr == 0xffff) 603a88b5ba8SSam Ravnborg continue; 604a88b5ba8SSam Ravnborg 605a88b5ba8SSam Ravnborg if (is_jbus) 606a88b5ba8SSam Ravnborg check_mask = (0x2UL << (2*nr)); 607a88b5ba8SSam Ravnborg else 608a88b5ba8SSam Ravnborg check_mask = (0x2UL << 609a88b5ba8SSam Ravnborg this_busy_nack); 610a88b5ba8SSam Ravnborg if ((dispatch_stat & check_mask) == 0) 611a88b5ba8SSam Ravnborg cpu_list[i] = 0xffff; 612a88b5ba8SSam Ravnborg this_busy_nack += 2; 613a88b5ba8SSam Ravnborg if (this_busy_nack == 64) 614a88b5ba8SSam Ravnborg break; 615a88b5ba8SSam Ravnborg } 616a88b5ba8SSam Ravnborg 617a88b5ba8SSam Ravnborg goto retry; 618a88b5ba8SSam Ravnborg } 619a88b5ba8SSam Ravnborg } 620a88b5ba8SSam Ravnborg } 621a88b5ba8SSam Ravnborg 622a88b5ba8SSam Ravnborg /* Multi-cpu list version. */ 623a88b5ba8SSam Ravnborg static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt) 624a88b5ba8SSam Ravnborg { 625a88b5ba8SSam Ravnborg int retries, this_cpu, prev_sent, i, saw_cpu_error; 626a88b5ba8SSam Ravnborg unsigned long status; 627a88b5ba8SSam Ravnborg u16 *cpu_list; 628a88b5ba8SSam Ravnborg 629a88b5ba8SSam Ravnborg this_cpu = smp_processor_id(); 630a88b5ba8SSam Ravnborg 631a88b5ba8SSam Ravnborg cpu_list = __va(tb->cpu_list_pa); 632a88b5ba8SSam Ravnborg 633a88b5ba8SSam Ravnborg saw_cpu_error = 0; 634a88b5ba8SSam Ravnborg retries = 0; 635a88b5ba8SSam Ravnborg prev_sent = 0; 636a88b5ba8SSam Ravnborg do { 637a88b5ba8SSam Ravnborg int forward_progress, n_sent; 638a88b5ba8SSam Ravnborg 639a88b5ba8SSam Ravnborg status = sun4v_cpu_mondo_send(cnt, 640a88b5ba8SSam Ravnborg tb->cpu_list_pa, 641a88b5ba8SSam Ravnborg tb->cpu_mondo_block_pa); 642a88b5ba8SSam Ravnborg 643a88b5ba8SSam Ravnborg /* HV_EOK means all cpus received the xcall, we're done. */ 644a88b5ba8SSam Ravnborg if (likely(status == HV_EOK)) 645a88b5ba8SSam Ravnborg break; 646a88b5ba8SSam Ravnborg 647a88b5ba8SSam Ravnborg /* First, see if we made any forward progress. 648a88b5ba8SSam Ravnborg * 649a88b5ba8SSam Ravnborg * The hypervisor indicates successful sends by setting 650a88b5ba8SSam Ravnborg * cpu list entries to the value 0xffff. 651a88b5ba8SSam Ravnborg */ 652a88b5ba8SSam Ravnborg n_sent = 0; 653a88b5ba8SSam Ravnborg for (i = 0; i < cnt; i++) { 654a88b5ba8SSam Ravnborg if (likely(cpu_list[i] == 0xffff)) 655a88b5ba8SSam Ravnborg n_sent++; 656a88b5ba8SSam Ravnborg } 657a88b5ba8SSam Ravnborg 658a88b5ba8SSam Ravnborg forward_progress = 0; 659a88b5ba8SSam Ravnborg if (n_sent > prev_sent) 660a88b5ba8SSam Ravnborg forward_progress = 1; 661a88b5ba8SSam Ravnborg 662a88b5ba8SSam Ravnborg prev_sent = n_sent; 663a88b5ba8SSam Ravnborg 664a88b5ba8SSam Ravnborg /* If we get a HV_ECPUERROR, then one or more of the cpus 665a88b5ba8SSam Ravnborg * in the list are in error state. Use the cpu_state() 666a88b5ba8SSam Ravnborg * hypervisor call to find out which cpus are in error state. 667a88b5ba8SSam Ravnborg */ 668a88b5ba8SSam Ravnborg if (unlikely(status == HV_ECPUERROR)) { 669a88b5ba8SSam Ravnborg for (i = 0; i < cnt; i++) { 670a88b5ba8SSam Ravnborg long err; 671a88b5ba8SSam Ravnborg u16 cpu; 672a88b5ba8SSam Ravnborg 673a88b5ba8SSam Ravnborg cpu = cpu_list[i]; 674a88b5ba8SSam Ravnborg if (cpu == 0xffff) 675a88b5ba8SSam Ravnborg continue; 676a88b5ba8SSam Ravnborg 677a88b5ba8SSam Ravnborg err = sun4v_cpu_state(cpu); 678a88b5ba8SSam Ravnborg if (err == HV_CPU_STATE_ERROR) { 679a88b5ba8SSam Ravnborg saw_cpu_error = (cpu + 1); 680a88b5ba8SSam Ravnborg cpu_list[i] = 0xffff; 681a88b5ba8SSam Ravnborg } 682a88b5ba8SSam Ravnborg } 683a88b5ba8SSam Ravnborg } else if (unlikely(status != HV_EWOULDBLOCK)) 684a88b5ba8SSam Ravnborg goto fatal_mondo_error; 685a88b5ba8SSam Ravnborg 686a88b5ba8SSam Ravnborg /* Don't bother rewriting the CPU list, just leave the 687a88b5ba8SSam Ravnborg * 0xffff and non-0xffff entries in there and the 688a88b5ba8SSam Ravnborg * hypervisor will do the right thing. 689a88b5ba8SSam Ravnborg * 690a88b5ba8SSam Ravnborg * Only advance timeout state if we didn't make any 691a88b5ba8SSam Ravnborg * forward progress. 692a88b5ba8SSam Ravnborg */ 693a88b5ba8SSam Ravnborg if (unlikely(!forward_progress)) { 694a88b5ba8SSam Ravnborg if (unlikely(++retries > 10000)) 695a88b5ba8SSam Ravnborg goto fatal_mondo_timeout; 696a88b5ba8SSam Ravnborg 697a88b5ba8SSam Ravnborg /* Delay a little bit to let other cpus catch up 698a88b5ba8SSam Ravnborg * on their cpu mondo queue work. 699a88b5ba8SSam Ravnborg */ 700a88b5ba8SSam Ravnborg udelay(2 * cnt); 701a88b5ba8SSam Ravnborg } 702a88b5ba8SSam Ravnborg } while (1); 703a88b5ba8SSam Ravnborg 704a88b5ba8SSam Ravnborg if (unlikely(saw_cpu_error)) 705a88b5ba8SSam Ravnborg goto fatal_mondo_cpu_error; 706a88b5ba8SSam Ravnborg 707a88b5ba8SSam Ravnborg return; 708a88b5ba8SSam Ravnborg 709a88b5ba8SSam Ravnborg fatal_mondo_cpu_error: 710a88b5ba8SSam Ravnborg printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus " 711a88b5ba8SSam Ravnborg "(including %d) were in error state\n", 712a88b5ba8SSam Ravnborg this_cpu, saw_cpu_error - 1); 713a88b5ba8SSam Ravnborg return; 714a88b5ba8SSam Ravnborg 715a88b5ba8SSam Ravnborg fatal_mondo_timeout: 716a88b5ba8SSam Ravnborg printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward " 717a88b5ba8SSam Ravnborg " progress after %d retries.\n", 718a88b5ba8SSam Ravnborg this_cpu, retries); 719a88b5ba8SSam Ravnborg goto dump_cpu_list_and_out; 720a88b5ba8SSam Ravnborg 721a88b5ba8SSam Ravnborg fatal_mondo_error: 722a88b5ba8SSam Ravnborg printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n", 723a88b5ba8SSam Ravnborg this_cpu, status); 724a88b5ba8SSam Ravnborg printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) " 725a88b5ba8SSam Ravnborg "mondo_block_pa(%lx)\n", 726a88b5ba8SSam Ravnborg this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa); 727a88b5ba8SSam Ravnborg 728a88b5ba8SSam Ravnborg dump_cpu_list_and_out: 729a88b5ba8SSam Ravnborg printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu); 730a88b5ba8SSam Ravnborg for (i = 0; i < cnt; i++) 731a88b5ba8SSam Ravnborg printk("%u ", cpu_list[i]); 732a88b5ba8SSam Ravnborg printk("]\n"); 733a88b5ba8SSam Ravnborg } 734a88b5ba8SSam Ravnborg 735a88b5ba8SSam Ravnborg static void (*xcall_deliver_impl)(struct trap_per_cpu *, int); 736a88b5ba8SSam Ravnborg 737a88b5ba8SSam Ravnborg static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask) 738a88b5ba8SSam Ravnborg { 739a88b5ba8SSam Ravnborg struct trap_per_cpu *tb; 740a88b5ba8SSam Ravnborg int this_cpu, i, cnt; 741a88b5ba8SSam Ravnborg unsigned long flags; 742a88b5ba8SSam Ravnborg u16 *cpu_list; 743a88b5ba8SSam Ravnborg u64 *mondo; 744a88b5ba8SSam Ravnborg 745a88b5ba8SSam Ravnborg /* We have to do this whole thing with interrupts fully disabled. 746a88b5ba8SSam Ravnborg * Otherwise if we send an xcall from interrupt context it will 747a88b5ba8SSam Ravnborg * corrupt both our mondo block and cpu list state. 748a88b5ba8SSam Ravnborg * 749a88b5ba8SSam Ravnborg * One consequence of this is that we cannot use timeout mechanisms 750a88b5ba8SSam Ravnborg * that depend upon interrupts being delivered locally. So, for 751a88b5ba8SSam Ravnborg * example, we cannot sample jiffies and expect it to advance. 752a88b5ba8SSam Ravnborg * 753a88b5ba8SSam Ravnborg * Fortunately, udelay() uses %stick/%tick so we can use that. 754a88b5ba8SSam Ravnborg */ 755a88b5ba8SSam Ravnborg local_irq_save(flags); 756a88b5ba8SSam Ravnborg 757a88b5ba8SSam Ravnborg this_cpu = smp_processor_id(); 758a88b5ba8SSam Ravnborg tb = &trap_block[this_cpu]; 759a88b5ba8SSam Ravnborg 760a88b5ba8SSam Ravnborg mondo = __va(tb->cpu_mondo_block_pa); 761a88b5ba8SSam Ravnborg mondo[0] = data0; 762a88b5ba8SSam Ravnborg mondo[1] = data1; 763a88b5ba8SSam Ravnborg mondo[2] = data2; 764a88b5ba8SSam Ravnborg wmb(); 765a88b5ba8SSam Ravnborg 766a88b5ba8SSam Ravnborg cpu_list = __va(tb->cpu_list_pa); 767a88b5ba8SSam Ravnborg 768a88b5ba8SSam Ravnborg /* Setup the initial cpu list. */ 769a88b5ba8SSam Ravnborg cnt = 0; 7708e757281SRusty Russell for_each_cpu(i, mask) { 771a88b5ba8SSam Ravnborg if (i == this_cpu || !cpu_online(i)) 772a88b5ba8SSam Ravnborg continue; 773a88b5ba8SSam Ravnborg cpu_list[cnt++] = i; 774a88b5ba8SSam Ravnborg } 775a88b5ba8SSam Ravnborg 776a88b5ba8SSam Ravnborg if (cnt) 777a88b5ba8SSam Ravnborg xcall_deliver_impl(tb, cnt); 778a88b5ba8SSam Ravnborg 779a88b5ba8SSam Ravnborg local_irq_restore(flags); 780a88b5ba8SSam Ravnborg } 781a88b5ba8SSam Ravnborg 782a88b5ba8SSam Ravnborg /* Send cross call to all processors mentioned in MASK_P 783a88b5ba8SSam Ravnborg * except self. Really, there are only two cases currently, 784fb1fece5SKOSAKI Motohiro * "cpu_online_mask" and "mm_cpumask(mm)". 785a88b5ba8SSam Ravnborg */ 786a88b5ba8SSam Ravnborg static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask) 787a88b5ba8SSam Ravnborg { 788a88b5ba8SSam Ravnborg u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff)); 789a88b5ba8SSam Ravnborg 790a88b5ba8SSam Ravnborg xcall_deliver(data0, data1, data2, mask); 791a88b5ba8SSam Ravnborg } 792a88b5ba8SSam Ravnborg 793a88b5ba8SSam Ravnborg /* Send cross call to all processors except self. */ 794a88b5ba8SSam Ravnborg static void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2) 795a88b5ba8SSam Ravnborg { 796fb1fece5SKOSAKI Motohiro smp_cross_call_masked(func, ctx, data1, data2, cpu_online_mask); 797a88b5ba8SSam Ravnborg } 798a88b5ba8SSam Ravnborg 799a88b5ba8SSam Ravnborg extern unsigned long xcall_sync_tick; 800a88b5ba8SSam Ravnborg 801a88b5ba8SSam Ravnborg static void smp_start_sync_tick_client(int cpu) 802a88b5ba8SSam Ravnborg { 803a88b5ba8SSam Ravnborg xcall_deliver((u64) &xcall_sync_tick, 0, 0, 804fb1fece5SKOSAKI Motohiro cpumask_of(cpu)); 805a88b5ba8SSam Ravnborg } 806a88b5ba8SSam Ravnborg 807a88b5ba8SSam Ravnborg extern unsigned long xcall_call_function; 808a88b5ba8SSam Ravnborg 809f46df02aSRusty Russell void arch_send_call_function_ipi_mask(const struct cpumask *mask) 810a88b5ba8SSam Ravnborg { 811f46df02aSRusty Russell xcall_deliver((u64) &xcall_call_function, 0, 0, mask); 812a88b5ba8SSam Ravnborg } 813a88b5ba8SSam Ravnborg 814a88b5ba8SSam Ravnborg extern unsigned long xcall_call_function_single; 815a88b5ba8SSam Ravnborg 816a88b5ba8SSam Ravnborg void arch_send_call_function_single_ipi(int cpu) 817a88b5ba8SSam Ravnborg { 818a88b5ba8SSam Ravnborg xcall_deliver((u64) &xcall_call_function_single, 0, 0, 819fb1fece5SKOSAKI Motohiro cpumask_of(cpu)); 820a88b5ba8SSam Ravnborg } 821a88b5ba8SSam Ravnborg 8229960e9e8SDavid S. Miller void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs) 823a88b5ba8SSam Ravnborg { 824a88b5ba8SSam Ravnborg clear_softint(1 << irq); 825a88b5ba8SSam Ravnborg generic_smp_call_function_interrupt(); 826a88b5ba8SSam Ravnborg } 827a88b5ba8SSam Ravnborg 8289960e9e8SDavid S. Miller void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs) 829a88b5ba8SSam Ravnborg { 830a88b5ba8SSam Ravnborg clear_softint(1 << irq); 831a88b5ba8SSam Ravnborg generic_smp_call_function_single_interrupt(); 832a88b5ba8SSam Ravnborg } 833a88b5ba8SSam Ravnborg 834a88b5ba8SSam Ravnborg static void tsb_sync(void *info) 835a88b5ba8SSam Ravnborg { 836a88b5ba8SSam Ravnborg struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()]; 837a88b5ba8SSam Ravnborg struct mm_struct *mm = info; 838a88b5ba8SSam Ravnborg 83942b2aa86SJustin P. Mattock /* It is not valid to test "current->active_mm == mm" here. 840a88b5ba8SSam Ravnborg * 841a88b5ba8SSam Ravnborg * The value of "current" is not changed atomically with 842a88b5ba8SSam Ravnborg * switch_mm(). But that's OK, we just need to check the 843a88b5ba8SSam Ravnborg * current cpu's trap block PGD physical address. 844a88b5ba8SSam Ravnborg */ 845a88b5ba8SSam Ravnborg if (tp->pgd_paddr == __pa(mm->pgd)) 846a88b5ba8SSam Ravnborg tsb_context_switch(mm); 847a88b5ba8SSam Ravnborg } 848a88b5ba8SSam Ravnborg 849a88b5ba8SSam Ravnborg void smp_tsb_sync(struct mm_struct *mm) 850a88b5ba8SSam Ravnborg { 85181f1adf0SRusty Russell smp_call_function_many(mm_cpumask(mm), tsb_sync, mm, 1); 852a88b5ba8SSam Ravnborg } 853a88b5ba8SSam Ravnborg 854a88b5ba8SSam Ravnborg extern unsigned long xcall_flush_tlb_mm; 855f36391d2SDavid S. Miller extern unsigned long xcall_flush_tlb_page; 856a88b5ba8SSam Ravnborg extern unsigned long xcall_flush_tlb_kernel_range; 857a88b5ba8SSam Ravnborg extern unsigned long xcall_fetch_glob_regs; 858916ca14aSDavid S. Miller extern unsigned long xcall_fetch_glob_pmu; 859916ca14aSDavid S. Miller extern unsigned long xcall_fetch_glob_pmu_n4; 860a88b5ba8SSam Ravnborg extern unsigned long xcall_receive_signal; 861a88b5ba8SSam Ravnborg extern unsigned long xcall_new_mmu_context_version; 862a88b5ba8SSam Ravnborg #ifdef CONFIG_KGDB 863a88b5ba8SSam Ravnborg extern unsigned long xcall_kgdb_capture; 864a88b5ba8SSam Ravnborg #endif 865a88b5ba8SSam Ravnborg 866a88b5ba8SSam Ravnborg #ifdef DCACHE_ALIASING_POSSIBLE 867a88b5ba8SSam Ravnborg extern unsigned long xcall_flush_dcache_page_cheetah; 868a88b5ba8SSam Ravnborg #endif 869a88b5ba8SSam Ravnborg extern unsigned long xcall_flush_dcache_page_spitfire; 870a88b5ba8SSam Ravnborg 871a88b5ba8SSam Ravnborg #ifdef CONFIG_DEBUG_DCFLUSH 872a88b5ba8SSam Ravnborg extern atomic_t dcpage_flushes; 873a88b5ba8SSam Ravnborg extern atomic_t dcpage_flushes_xcall; 874a88b5ba8SSam Ravnborg #endif 875a88b5ba8SSam Ravnborg 876a88b5ba8SSam Ravnborg static inline void __local_flush_dcache_page(struct page *page) 877a88b5ba8SSam Ravnborg { 878a88b5ba8SSam Ravnborg #ifdef DCACHE_ALIASING_POSSIBLE 879a88b5ba8SSam Ravnborg __flush_dcache_page(page_address(page), 880a88b5ba8SSam Ravnborg ((tlb_type == spitfire) && 881a88b5ba8SSam Ravnborg page_mapping(page) != NULL)); 882a88b5ba8SSam Ravnborg #else 883a88b5ba8SSam Ravnborg if (page_mapping(page) != NULL && 884a88b5ba8SSam Ravnborg tlb_type == spitfire) 885a88b5ba8SSam Ravnborg __flush_icache_page(__pa(page_address(page))); 886a88b5ba8SSam Ravnborg #endif 887a88b5ba8SSam Ravnborg } 888a88b5ba8SSam Ravnborg 889a88b5ba8SSam Ravnborg void smp_flush_dcache_page_impl(struct page *page, int cpu) 890a88b5ba8SSam Ravnborg { 891a88b5ba8SSam Ravnborg int this_cpu; 892a88b5ba8SSam Ravnborg 893a88b5ba8SSam Ravnborg if (tlb_type == hypervisor) 894a88b5ba8SSam Ravnborg return; 895a88b5ba8SSam Ravnborg 896a88b5ba8SSam Ravnborg #ifdef CONFIG_DEBUG_DCFLUSH 897a88b5ba8SSam Ravnborg atomic_inc(&dcpage_flushes); 898a88b5ba8SSam Ravnborg #endif 899a88b5ba8SSam Ravnborg 900a88b5ba8SSam Ravnborg this_cpu = get_cpu(); 901a88b5ba8SSam Ravnborg 902a88b5ba8SSam Ravnborg if (cpu == this_cpu) { 903a88b5ba8SSam Ravnborg __local_flush_dcache_page(page); 904a88b5ba8SSam Ravnborg } else if (cpu_online(cpu)) { 905a88b5ba8SSam Ravnborg void *pg_addr = page_address(page); 906a88b5ba8SSam Ravnborg u64 data0 = 0; 907a88b5ba8SSam Ravnborg 908a88b5ba8SSam Ravnborg if (tlb_type == spitfire) { 909a88b5ba8SSam Ravnborg data0 = ((u64)&xcall_flush_dcache_page_spitfire); 910a88b5ba8SSam Ravnborg if (page_mapping(page) != NULL) 911a88b5ba8SSam Ravnborg data0 |= ((u64)1 << 32); 912a88b5ba8SSam Ravnborg } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { 913a88b5ba8SSam Ravnborg #ifdef DCACHE_ALIASING_POSSIBLE 914a88b5ba8SSam Ravnborg data0 = ((u64)&xcall_flush_dcache_page_cheetah); 915a88b5ba8SSam Ravnborg #endif 916a88b5ba8SSam Ravnborg } 917a88b5ba8SSam Ravnborg if (data0) { 918a88b5ba8SSam Ravnborg xcall_deliver(data0, __pa(pg_addr), 919fb1fece5SKOSAKI Motohiro (u64) pg_addr, cpumask_of(cpu)); 920a88b5ba8SSam Ravnborg #ifdef CONFIG_DEBUG_DCFLUSH 921a88b5ba8SSam Ravnborg atomic_inc(&dcpage_flushes_xcall); 922a88b5ba8SSam Ravnborg #endif 923a88b5ba8SSam Ravnborg } 924a88b5ba8SSam Ravnborg } 925a88b5ba8SSam Ravnborg 926a88b5ba8SSam Ravnborg put_cpu(); 927a88b5ba8SSam Ravnborg } 928a88b5ba8SSam Ravnborg 929a88b5ba8SSam Ravnborg void flush_dcache_page_all(struct mm_struct *mm, struct page *page) 930a88b5ba8SSam Ravnborg { 931a88b5ba8SSam Ravnborg void *pg_addr; 932a88b5ba8SSam Ravnborg u64 data0; 933a88b5ba8SSam Ravnborg 934a88b5ba8SSam Ravnborg if (tlb_type == hypervisor) 935a88b5ba8SSam Ravnborg return; 936a88b5ba8SSam Ravnborg 937c6fee081SDavid S. Miller preempt_disable(); 938a88b5ba8SSam Ravnborg 939a88b5ba8SSam Ravnborg #ifdef CONFIG_DEBUG_DCFLUSH 940a88b5ba8SSam Ravnborg atomic_inc(&dcpage_flushes); 941a88b5ba8SSam Ravnborg #endif 942a88b5ba8SSam Ravnborg data0 = 0; 943a88b5ba8SSam Ravnborg pg_addr = page_address(page); 944a88b5ba8SSam Ravnborg if (tlb_type == spitfire) { 945a88b5ba8SSam Ravnborg data0 = ((u64)&xcall_flush_dcache_page_spitfire); 946a88b5ba8SSam Ravnborg if (page_mapping(page) != NULL) 947a88b5ba8SSam Ravnborg data0 |= ((u64)1 << 32); 948a88b5ba8SSam Ravnborg } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { 949a88b5ba8SSam Ravnborg #ifdef DCACHE_ALIASING_POSSIBLE 950a88b5ba8SSam Ravnborg data0 = ((u64)&xcall_flush_dcache_page_cheetah); 951a88b5ba8SSam Ravnborg #endif 952a88b5ba8SSam Ravnborg } 953a88b5ba8SSam Ravnborg if (data0) { 954a88b5ba8SSam Ravnborg xcall_deliver(data0, __pa(pg_addr), 955fb1fece5SKOSAKI Motohiro (u64) pg_addr, cpu_online_mask); 956a88b5ba8SSam Ravnborg #ifdef CONFIG_DEBUG_DCFLUSH 957a88b5ba8SSam Ravnborg atomic_inc(&dcpage_flushes_xcall); 958a88b5ba8SSam Ravnborg #endif 959a88b5ba8SSam Ravnborg } 960a88b5ba8SSam Ravnborg __local_flush_dcache_page(page); 961a88b5ba8SSam Ravnborg 962c6fee081SDavid S. Miller preempt_enable(); 963a88b5ba8SSam Ravnborg } 964a88b5ba8SSam Ravnborg 9659960e9e8SDavid S. Miller void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) 966a88b5ba8SSam Ravnborg { 967a88b5ba8SSam Ravnborg struct mm_struct *mm; 968a88b5ba8SSam Ravnborg unsigned long flags; 969a88b5ba8SSam Ravnborg 970a88b5ba8SSam Ravnborg clear_softint(1 << irq); 971a88b5ba8SSam Ravnborg 972a88b5ba8SSam Ravnborg /* See if we need to allocate a new TLB context because 973a88b5ba8SSam Ravnborg * the version of the one we are using is now out of date. 974a88b5ba8SSam Ravnborg */ 975a88b5ba8SSam Ravnborg mm = current->active_mm; 976a88b5ba8SSam Ravnborg if (unlikely(!mm || (mm == &init_mm))) 977a88b5ba8SSam Ravnborg return; 978a88b5ba8SSam Ravnborg 979a88b5ba8SSam Ravnborg spin_lock_irqsave(&mm->context.lock, flags); 980a88b5ba8SSam Ravnborg 981a88b5ba8SSam Ravnborg if (unlikely(!CTX_VALID(mm->context))) 982a88b5ba8SSam Ravnborg get_new_mmu_context(mm); 983a88b5ba8SSam Ravnborg 984a88b5ba8SSam Ravnborg spin_unlock_irqrestore(&mm->context.lock, flags); 985a88b5ba8SSam Ravnborg 986a88b5ba8SSam Ravnborg load_secondary_context(mm); 987a88b5ba8SSam Ravnborg __flush_tlb_mm(CTX_HWBITS(mm->context), 988a88b5ba8SSam Ravnborg SECONDARY_CONTEXT); 989a88b5ba8SSam Ravnborg } 990a88b5ba8SSam Ravnborg 991a88b5ba8SSam Ravnborg void smp_new_mmu_context_version(void) 992a88b5ba8SSam Ravnborg { 993a88b5ba8SSam Ravnborg smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0); 994a88b5ba8SSam Ravnborg } 995a88b5ba8SSam Ravnborg 996a88b5ba8SSam Ravnborg #ifdef CONFIG_KGDB 997a88b5ba8SSam Ravnborg void kgdb_roundup_cpus(unsigned long flags) 998a88b5ba8SSam Ravnborg { 999a88b5ba8SSam Ravnborg smp_cross_call(&xcall_kgdb_capture, 0, 0, 0); 1000a88b5ba8SSam Ravnborg } 1001a88b5ba8SSam Ravnborg #endif 1002a88b5ba8SSam Ravnborg 1003a88b5ba8SSam Ravnborg void smp_fetch_global_regs(void) 1004a88b5ba8SSam Ravnborg { 1005a88b5ba8SSam Ravnborg smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0); 1006a88b5ba8SSam Ravnborg } 1007a88b5ba8SSam Ravnborg 1008916ca14aSDavid S. Miller void smp_fetch_global_pmu(void) 1009916ca14aSDavid S. Miller { 1010916ca14aSDavid S. Miller if (tlb_type == hypervisor && 1011916ca14aSDavid S. Miller sun4v_chip_type >= SUN4V_CHIP_NIAGARA4) 1012916ca14aSDavid S. Miller smp_cross_call(&xcall_fetch_glob_pmu_n4, 0, 0, 0); 1013916ca14aSDavid S. Miller else 1014916ca14aSDavid S. Miller smp_cross_call(&xcall_fetch_glob_pmu, 0, 0, 0); 1015916ca14aSDavid S. Miller } 1016916ca14aSDavid S. Miller 1017a88b5ba8SSam Ravnborg /* We know that the window frames of the user have been flushed 1018a88b5ba8SSam Ravnborg * to the stack before we get here because all callers of us 1019a88b5ba8SSam Ravnborg * are flush_tlb_*() routines, and these run after flush_cache_*() 1020a88b5ba8SSam Ravnborg * which performs the flushw. 1021a88b5ba8SSam Ravnborg * 1022a88b5ba8SSam Ravnborg * The SMP TLB coherency scheme we use works as follows: 1023a88b5ba8SSam Ravnborg * 1024a88b5ba8SSam Ravnborg * 1) mm->cpu_vm_mask is a bit mask of which cpus an address 1025a88b5ba8SSam Ravnborg * space has (potentially) executed on, this is the heuristic 1026a88b5ba8SSam Ravnborg * we use to avoid doing cross calls. 1027a88b5ba8SSam Ravnborg * 1028a88b5ba8SSam Ravnborg * Also, for flushing from kswapd and also for clones, we 1029a88b5ba8SSam Ravnborg * use cpu_vm_mask as the list of cpus to make run the TLB. 1030a88b5ba8SSam Ravnborg * 1031a88b5ba8SSam Ravnborg * 2) TLB context numbers are shared globally across all processors 1032a88b5ba8SSam Ravnborg * in the system, this allows us to play several games to avoid 1033a88b5ba8SSam Ravnborg * cross calls. 1034a88b5ba8SSam Ravnborg * 1035a88b5ba8SSam Ravnborg * One invariant is that when a cpu switches to a process, and 1036a88b5ba8SSam Ravnborg * that processes tsk->active_mm->cpu_vm_mask does not have the 1037a88b5ba8SSam Ravnborg * current cpu's bit set, that tlb context is flushed locally. 1038a88b5ba8SSam Ravnborg * 1039a88b5ba8SSam Ravnborg * If the address space is non-shared (ie. mm->count == 1) we avoid 1040a88b5ba8SSam Ravnborg * cross calls when we want to flush the currently running process's 1041a88b5ba8SSam Ravnborg * tlb state. This is done by clearing all cpu bits except the current 1042f9384d41SDavid S. Miller * processor's in current->mm->cpu_vm_mask and performing the 1043a88b5ba8SSam Ravnborg * flush locally only. This will force any subsequent cpus which run 1044a88b5ba8SSam Ravnborg * this task to flush the context from the local tlb if the process 1045a88b5ba8SSam Ravnborg * migrates to another cpu (again). 1046a88b5ba8SSam Ravnborg * 1047a88b5ba8SSam Ravnborg * 3) For shared address spaces (threads) and swapping we bite the 1048a88b5ba8SSam Ravnborg * bullet for most cases and perform the cross call (but only to 1049a88b5ba8SSam Ravnborg * the cpus listed in cpu_vm_mask). 1050a88b5ba8SSam Ravnborg * 1051a88b5ba8SSam Ravnborg * The performance gain from "optimizing" away the cross call for threads is 1052a88b5ba8SSam Ravnborg * questionable (in theory the big win for threads is the massive sharing of 1053a88b5ba8SSam Ravnborg * address space state across processors). 1054a88b5ba8SSam Ravnborg */ 1055a88b5ba8SSam Ravnborg 1056a88b5ba8SSam Ravnborg /* This currently is only used by the hugetlb arch pre-fault 1057a88b5ba8SSam Ravnborg * hook on UltraSPARC-III+ and later when changing the pagesize 1058a88b5ba8SSam Ravnborg * bits of the context register for an address space. 1059a88b5ba8SSam Ravnborg */ 1060a88b5ba8SSam Ravnborg void smp_flush_tlb_mm(struct mm_struct *mm) 1061a88b5ba8SSam Ravnborg { 1062a88b5ba8SSam Ravnborg u32 ctx = CTX_HWBITS(mm->context); 1063a88b5ba8SSam Ravnborg int cpu = get_cpu(); 1064a88b5ba8SSam Ravnborg 1065a88b5ba8SSam Ravnborg if (atomic_read(&mm->mm_users) == 1) { 106681f1adf0SRusty Russell cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); 1067a88b5ba8SSam Ravnborg goto local_flush_and_out; 1068a88b5ba8SSam Ravnborg } 1069a88b5ba8SSam Ravnborg 1070a88b5ba8SSam Ravnborg smp_cross_call_masked(&xcall_flush_tlb_mm, 1071a88b5ba8SSam Ravnborg ctx, 0, 0, 107281f1adf0SRusty Russell mm_cpumask(mm)); 1073a88b5ba8SSam Ravnborg 1074a88b5ba8SSam Ravnborg local_flush_and_out: 1075a88b5ba8SSam Ravnborg __flush_tlb_mm(ctx, SECONDARY_CONTEXT); 1076a88b5ba8SSam Ravnborg 1077a88b5ba8SSam Ravnborg put_cpu(); 1078a88b5ba8SSam Ravnborg } 1079a88b5ba8SSam Ravnborg 1080f36391d2SDavid S. Miller struct tlb_pending_info { 1081f36391d2SDavid S. Miller unsigned long ctx; 1082f36391d2SDavid S. Miller unsigned long nr; 1083f36391d2SDavid S. Miller unsigned long *vaddrs; 1084f36391d2SDavid S. Miller }; 1085f36391d2SDavid S. Miller 1086f36391d2SDavid S. Miller static void tlb_pending_func(void *info) 1087f36391d2SDavid S. Miller { 1088f36391d2SDavid S. Miller struct tlb_pending_info *t = info; 1089f36391d2SDavid S. Miller 1090f36391d2SDavid S. Miller __flush_tlb_pending(t->ctx, t->nr, t->vaddrs); 1091f36391d2SDavid S. Miller } 1092f36391d2SDavid S. Miller 1093a88b5ba8SSam Ravnborg void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs) 1094a88b5ba8SSam Ravnborg { 1095a88b5ba8SSam Ravnborg u32 ctx = CTX_HWBITS(mm->context); 1096f36391d2SDavid S. Miller struct tlb_pending_info info; 1097f36391d2SDavid S. Miller int cpu = get_cpu(); 1098f36391d2SDavid S. Miller 1099f36391d2SDavid S. Miller info.ctx = ctx; 1100f36391d2SDavid S. Miller info.nr = nr; 1101f36391d2SDavid S. Miller info.vaddrs = vaddrs; 1102f36391d2SDavid S. Miller 1103f36391d2SDavid S. Miller if (mm == current->mm && atomic_read(&mm->mm_users) == 1) 1104f36391d2SDavid S. Miller cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); 1105f36391d2SDavid S. Miller else 1106f36391d2SDavid S. Miller smp_call_function_many(mm_cpumask(mm), tlb_pending_func, 1107f36391d2SDavid S. Miller &info, 1); 1108f36391d2SDavid S. Miller 1109f36391d2SDavid S. Miller __flush_tlb_pending(ctx, nr, vaddrs); 1110f36391d2SDavid S. Miller 1111f36391d2SDavid S. Miller put_cpu(); 1112f36391d2SDavid S. Miller } 1113f36391d2SDavid S. Miller 1114f36391d2SDavid S. Miller void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr) 1115f36391d2SDavid S. Miller { 1116f36391d2SDavid S. Miller unsigned long context = CTX_HWBITS(mm->context); 1117a88b5ba8SSam Ravnborg int cpu = get_cpu(); 1118a88b5ba8SSam Ravnborg 1119f9384d41SDavid S. Miller if (mm == current->mm && atomic_read(&mm->mm_users) == 1) 112081f1adf0SRusty Russell cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); 1121a88b5ba8SSam Ravnborg else 1122f36391d2SDavid S. Miller smp_cross_call_masked(&xcall_flush_tlb_page, 1123f36391d2SDavid S. Miller context, vaddr, 0, 112481f1adf0SRusty Russell mm_cpumask(mm)); 1125f36391d2SDavid S. Miller __flush_tlb_page(context, vaddr); 1126a88b5ba8SSam Ravnborg 1127a88b5ba8SSam Ravnborg put_cpu(); 1128a88b5ba8SSam Ravnborg } 1129a88b5ba8SSam Ravnborg 1130a88b5ba8SSam Ravnborg void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end) 1131a88b5ba8SSam Ravnborg { 1132a88b5ba8SSam Ravnborg start &= PAGE_MASK; 1133a88b5ba8SSam Ravnborg end = PAGE_ALIGN(end); 1134a88b5ba8SSam Ravnborg if (start != end) { 1135a88b5ba8SSam Ravnborg smp_cross_call(&xcall_flush_tlb_kernel_range, 1136a88b5ba8SSam Ravnborg 0, start, end); 1137a88b5ba8SSam Ravnborg 1138a88b5ba8SSam Ravnborg __flush_tlb_kernel_range(start, end); 1139a88b5ba8SSam Ravnborg } 1140a88b5ba8SSam Ravnborg } 1141a88b5ba8SSam Ravnborg 1142a88b5ba8SSam Ravnborg /* CPU capture. */ 1143a88b5ba8SSam Ravnborg /* #define CAPTURE_DEBUG */ 1144a88b5ba8SSam Ravnborg extern unsigned long xcall_capture; 1145a88b5ba8SSam Ravnborg 1146a88b5ba8SSam Ravnborg static atomic_t smp_capture_depth = ATOMIC_INIT(0); 1147a88b5ba8SSam Ravnborg static atomic_t smp_capture_registry = ATOMIC_INIT(0); 1148a88b5ba8SSam Ravnborg static unsigned long penguins_are_doing_time; 1149a88b5ba8SSam Ravnborg 1150a88b5ba8SSam Ravnborg void smp_capture(void) 1151a88b5ba8SSam Ravnborg { 1152a88b5ba8SSam Ravnborg int result = atomic_add_ret(1, &smp_capture_depth); 1153a88b5ba8SSam Ravnborg 1154a88b5ba8SSam Ravnborg if (result == 1) { 1155a88b5ba8SSam Ravnborg int ncpus = num_online_cpus(); 1156a88b5ba8SSam Ravnborg 1157a88b5ba8SSam Ravnborg #ifdef CAPTURE_DEBUG 1158a88b5ba8SSam Ravnborg printk("CPU[%d]: Sending penguins to jail...", 1159a88b5ba8SSam Ravnborg smp_processor_id()); 1160a88b5ba8SSam Ravnborg #endif 1161a88b5ba8SSam Ravnborg penguins_are_doing_time = 1; 1162a88b5ba8SSam Ravnborg atomic_inc(&smp_capture_registry); 1163a88b5ba8SSam Ravnborg smp_cross_call(&xcall_capture, 0, 0, 0); 1164a88b5ba8SSam Ravnborg while (atomic_read(&smp_capture_registry) != ncpus) 1165a88b5ba8SSam Ravnborg rmb(); 1166a88b5ba8SSam Ravnborg #ifdef CAPTURE_DEBUG 1167a88b5ba8SSam Ravnborg printk("done\n"); 1168a88b5ba8SSam Ravnborg #endif 1169a88b5ba8SSam Ravnborg } 1170a88b5ba8SSam Ravnborg } 1171a88b5ba8SSam Ravnborg 1172a88b5ba8SSam Ravnborg void smp_release(void) 1173a88b5ba8SSam Ravnborg { 1174a88b5ba8SSam Ravnborg if (atomic_dec_and_test(&smp_capture_depth)) { 1175a88b5ba8SSam Ravnborg #ifdef CAPTURE_DEBUG 1176a88b5ba8SSam Ravnborg printk("CPU[%d]: Giving pardon to " 1177a88b5ba8SSam Ravnborg "imprisoned penguins\n", 1178a88b5ba8SSam Ravnborg smp_processor_id()); 1179a88b5ba8SSam Ravnborg #endif 1180a88b5ba8SSam Ravnborg penguins_are_doing_time = 0; 1181a88b5ba8SSam Ravnborg membar_safe("#StoreLoad"); 1182a88b5ba8SSam Ravnborg atomic_dec(&smp_capture_registry); 1183a88b5ba8SSam Ravnborg } 1184a88b5ba8SSam Ravnborg } 1185a88b5ba8SSam Ravnborg 1186a88b5ba8SSam Ravnborg /* Imprisoned penguins run with %pil == PIL_NORMAL_MAX, but PSTATE_IE 1187a88b5ba8SSam Ravnborg * set, so they can service tlb flush xcalls... 1188a88b5ba8SSam Ravnborg */ 1189a88b5ba8SSam Ravnborg extern void prom_world(int); 1190a88b5ba8SSam Ravnborg 11919960e9e8SDavid S. Miller void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs) 1192a88b5ba8SSam Ravnborg { 1193a88b5ba8SSam Ravnborg clear_softint(1 << irq); 1194a88b5ba8SSam Ravnborg 1195a88b5ba8SSam Ravnborg preempt_disable(); 1196a88b5ba8SSam Ravnborg 1197a88b5ba8SSam Ravnborg __asm__ __volatile__("flushw"); 1198a88b5ba8SSam Ravnborg prom_world(1); 1199a88b5ba8SSam Ravnborg atomic_inc(&smp_capture_registry); 1200a88b5ba8SSam Ravnborg membar_safe("#StoreLoad"); 1201a88b5ba8SSam Ravnborg while (penguins_are_doing_time) 1202a88b5ba8SSam Ravnborg rmb(); 1203a88b5ba8SSam Ravnborg atomic_dec(&smp_capture_registry); 1204a88b5ba8SSam Ravnborg prom_world(0); 1205a88b5ba8SSam Ravnborg 1206a88b5ba8SSam Ravnborg preempt_enable(); 1207a88b5ba8SSam Ravnborg } 1208a88b5ba8SSam Ravnborg 1209a88b5ba8SSam Ravnborg /* /proc/profile writes can call this, don't __init it please. */ 1210a88b5ba8SSam Ravnborg int setup_profiling_timer(unsigned int multiplier) 1211a88b5ba8SSam Ravnborg { 1212a88b5ba8SSam Ravnborg return -EINVAL; 1213a88b5ba8SSam Ravnborg } 1214a88b5ba8SSam Ravnborg 1215a88b5ba8SSam Ravnborg void __init smp_prepare_cpus(unsigned int max_cpus) 1216a88b5ba8SSam Ravnborg { 1217a88b5ba8SSam Ravnborg } 1218a88b5ba8SSam Ravnborg 12197c9503b8SGreg Kroah-Hartman void smp_prepare_boot_cpu(void) 1220a88b5ba8SSam Ravnborg { 1221a88b5ba8SSam Ravnborg } 1222a88b5ba8SSam Ravnborg 1223a88b5ba8SSam Ravnborg void __init smp_setup_processor_id(void) 1224a88b5ba8SSam Ravnborg { 1225a88b5ba8SSam Ravnborg if (tlb_type == spitfire) 1226a88b5ba8SSam Ravnborg xcall_deliver_impl = spitfire_xcall_deliver; 1227a88b5ba8SSam Ravnborg else if (tlb_type == cheetah || tlb_type == cheetah_plus) 1228a88b5ba8SSam Ravnborg xcall_deliver_impl = cheetah_xcall_deliver; 1229a88b5ba8SSam Ravnborg else 1230a88b5ba8SSam Ravnborg xcall_deliver_impl = hypervisor_xcall_deliver; 1231a88b5ba8SSam Ravnborg } 1232a88b5ba8SSam Ravnborg 12337c9503b8SGreg Kroah-Hartman void smp_fill_in_sib_core_maps(void) 1234a88b5ba8SSam Ravnborg { 1235a88b5ba8SSam Ravnborg unsigned int i; 1236a88b5ba8SSam Ravnborg 1237a88b5ba8SSam Ravnborg for_each_present_cpu(i) { 1238a88b5ba8SSam Ravnborg unsigned int j; 1239a88b5ba8SSam Ravnborg 1240fb1fece5SKOSAKI Motohiro cpumask_clear(&cpu_core_map[i]); 1241a88b5ba8SSam Ravnborg if (cpu_data(i).core_id == 0) { 1242fb1fece5SKOSAKI Motohiro cpumask_set_cpu(i, &cpu_core_map[i]); 1243a88b5ba8SSam Ravnborg continue; 1244a88b5ba8SSam Ravnborg } 1245a88b5ba8SSam Ravnborg 1246a88b5ba8SSam Ravnborg for_each_present_cpu(j) { 1247a88b5ba8SSam Ravnborg if (cpu_data(i).core_id == 1248a88b5ba8SSam Ravnborg cpu_data(j).core_id) 1249fb1fece5SKOSAKI Motohiro cpumask_set_cpu(j, &cpu_core_map[i]); 1250a88b5ba8SSam Ravnborg } 1251a88b5ba8SSam Ravnborg } 1252a88b5ba8SSam Ravnborg 1253a88b5ba8SSam Ravnborg for_each_present_cpu(i) { 1254a88b5ba8SSam Ravnborg unsigned int j; 1255a88b5ba8SSam Ravnborg 1256fb1fece5SKOSAKI Motohiro cpumask_clear(&per_cpu(cpu_sibling_map, i)); 1257a88b5ba8SSam Ravnborg if (cpu_data(i).proc_id == -1) { 1258fb1fece5SKOSAKI Motohiro cpumask_set_cpu(i, &per_cpu(cpu_sibling_map, i)); 1259a88b5ba8SSam Ravnborg continue; 1260a88b5ba8SSam Ravnborg } 1261a88b5ba8SSam Ravnborg 1262a88b5ba8SSam Ravnborg for_each_present_cpu(j) { 1263a88b5ba8SSam Ravnborg if (cpu_data(i).proc_id == 1264a88b5ba8SSam Ravnborg cpu_data(j).proc_id) 1265fb1fece5SKOSAKI Motohiro cpumask_set_cpu(j, &per_cpu(cpu_sibling_map, i)); 1266a88b5ba8SSam Ravnborg } 1267a88b5ba8SSam Ravnborg } 1268a88b5ba8SSam Ravnborg } 1269a88b5ba8SSam Ravnborg 12702066aaddSPaul Gortmaker int __cpu_up(unsigned int cpu, struct task_struct *tidle) 1271a88b5ba8SSam Ravnborg { 1272f0a2bc7eSThomas Gleixner int ret = smp_boot_one_cpu(cpu, tidle); 1273a88b5ba8SSam Ravnborg 1274a88b5ba8SSam Ravnborg if (!ret) { 1275fb1fece5SKOSAKI Motohiro cpumask_set_cpu(cpu, &smp_commenced_mask); 1276fb1fece5SKOSAKI Motohiro while (!cpu_online(cpu)) 1277a88b5ba8SSam Ravnborg mb(); 1278fb1fece5SKOSAKI Motohiro if (!cpu_online(cpu)) { 1279a88b5ba8SSam Ravnborg ret = -ENODEV; 1280a88b5ba8SSam Ravnborg } else { 1281a88b5ba8SSam Ravnborg /* On SUN4V, writes to %tick and %stick are 1282a88b5ba8SSam Ravnborg * not allowed. 1283a88b5ba8SSam Ravnborg */ 1284a88b5ba8SSam Ravnborg if (tlb_type != hypervisor) 1285a88b5ba8SSam Ravnborg smp_synchronize_one_tick(cpu); 1286a88b5ba8SSam Ravnborg } 1287a88b5ba8SSam Ravnborg } 1288a88b5ba8SSam Ravnborg return ret; 1289a88b5ba8SSam Ravnborg } 1290a88b5ba8SSam Ravnborg 1291a88b5ba8SSam Ravnborg #ifdef CONFIG_HOTPLUG_CPU 1292a88b5ba8SSam Ravnborg void cpu_play_dead(void) 1293a88b5ba8SSam Ravnborg { 1294a88b5ba8SSam Ravnborg int cpu = smp_processor_id(); 1295a88b5ba8SSam Ravnborg unsigned long pstate; 1296a88b5ba8SSam Ravnborg 1297a88b5ba8SSam Ravnborg idle_task_exit(); 1298a88b5ba8SSam Ravnborg 1299a88b5ba8SSam Ravnborg if (tlb_type == hypervisor) { 1300a88b5ba8SSam Ravnborg struct trap_per_cpu *tb = &trap_block[cpu]; 1301a88b5ba8SSam Ravnborg 1302a88b5ba8SSam Ravnborg sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO, 1303a88b5ba8SSam Ravnborg tb->cpu_mondo_pa, 0); 1304a88b5ba8SSam Ravnborg sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO, 1305a88b5ba8SSam Ravnborg tb->dev_mondo_pa, 0); 1306a88b5ba8SSam Ravnborg sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR, 1307a88b5ba8SSam Ravnborg tb->resum_mondo_pa, 0); 1308a88b5ba8SSam Ravnborg sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR, 1309a88b5ba8SSam Ravnborg tb->nonresum_mondo_pa, 0); 1310a88b5ba8SSam Ravnborg } 1311a88b5ba8SSam Ravnborg 1312fb1fece5SKOSAKI Motohiro cpumask_clear_cpu(cpu, &smp_commenced_mask); 1313a88b5ba8SSam Ravnborg membar_safe("#Sync"); 1314a88b5ba8SSam Ravnborg 1315a88b5ba8SSam Ravnborg local_irq_disable(); 1316a88b5ba8SSam Ravnborg 1317a88b5ba8SSam Ravnborg __asm__ __volatile__( 1318a88b5ba8SSam Ravnborg "rdpr %%pstate, %0\n\t" 1319a88b5ba8SSam Ravnborg "wrpr %0, %1, %%pstate" 1320a88b5ba8SSam Ravnborg : "=r" (pstate) 1321a88b5ba8SSam Ravnborg : "i" (PSTATE_IE)); 1322a88b5ba8SSam Ravnborg 1323a88b5ba8SSam Ravnborg while (1) 1324a88b5ba8SSam Ravnborg barrier(); 1325a88b5ba8SSam Ravnborg } 1326a88b5ba8SSam Ravnborg 1327a88b5ba8SSam Ravnborg int __cpu_disable(void) 1328a88b5ba8SSam Ravnborg { 1329a88b5ba8SSam Ravnborg int cpu = smp_processor_id(); 1330a88b5ba8SSam Ravnborg cpuinfo_sparc *c; 1331a88b5ba8SSam Ravnborg int i; 1332a88b5ba8SSam Ravnborg 1333fb1fece5SKOSAKI Motohiro for_each_cpu(i, &cpu_core_map[cpu]) 1334fb1fece5SKOSAKI Motohiro cpumask_clear_cpu(cpu, &cpu_core_map[i]); 1335fb1fece5SKOSAKI Motohiro cpumask_clear(&cpu_core_map[cpu]); 1336a88b5ba8SSam Ravnborg 1337fb1fece5SKOSAKI Motohiro for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu)) 1338fb1fece5SKOSAKI Motohiro cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i)); 1339fb1fece5SKOSAKI Motohiro cpumask_clear(&per_cpu(cpu_sibling_map, cpu)); 1340a88b5ba8SSam Ravnborg 1341a88b5ba8SSam Ravnborg c = &cpu_data(cpu); 1342a88b5ba8SSam Ravnborg 1343a88b5ba8SSam Ravnborg c->core_id = 0; 1344a88b5ba8SSam Ravnborg c->proc_id = -1; 1345a88b5ba8SSam Ravnborg 1346a88b5ba8SSam Ravnborg smp_wmb(); 1347a88b5ba8SSam Ravnborg 1348a88b5ba8SSam Ravnborg /* Make sure no interrupts point to this cpu. */ 1349a88b5ba8SSam Ravnborg fixup_irqs(); 1350a88b5ba8SSam Ravnborg 1351a88b5ba8SSam Ravnborg local_irq_enable(); 1352a88b5ba8SSam Ravnborg mdelay(1); 1353a88b5ba8SSam Ravnborg local_irq_disable(); 1354a88b5ba8SSam Ravnborg 1355fb1fece5SKOSAKI Motohiro set_cpu_online(cpu, false); 1356a88b5ba8SSam Ravnborg 1357280ff974SHong H. Pham cpu_map_rebuild(); 1358280ff974SHong H. Pham 1359a88b5ba8SSam Ravnborg return 0; 1360a88b5ba8SSam Ravnborg } 1361a88b5ba8SSam Ravnborg 1362a88b5ba8SSam Ravnborg void __cpu_die(unsigned int cpu) 1363a88b5ba8SSam Ravnborg { 1364a88b5ba8SSam Ravnborg int i; 1365a88b5ba8SSam Ravnborg 1366a88b5ba8SSam Ravnborg for (i = 0; i < 100; i++) { 1367a88b5ba8SSam Ravnborg smp_rmb(); 1368fb1fece5SKOSAKI Motohiro if (!cpumask_test_cpu(cpu, &smp_commenced_mask)) 1369a88b5ba8SSam Ravnborg break; 1370a88b5ba8SSam Ravnborg msleep(100); 1371a88b5ba8SSam Ravnborg } 1372fb1fece5SKOSAKI Motohiro if (cpumask_test_cpu(cpu, &smp_commenced_mask)) { 1373a88b5ba8SSam Ravnborg printk(KERN_ERR "CPU %u didn't die...\n", cpu); 1374a88b5ba8SSam Ravnborg } else { 1375a88b5ba8SSam Ravnborg #if defined(CONFIG_SUN_LDOMS) 1376a88b5ba8SSam Ravnborg unsigned long hv_err; 1377a88b5ba8SSam Ravnborg int limit = 100; 1378a88b5ba8SSam Ravnborg 1379a88b5ba8SSam Ravnborg do { 1380a88b5ba8SSam Ravnborg hv_err = sun4v_cpu_stop(cpu); 1381a88b5ba8SSam Ravnborg if (hv_err == HV_EOK) { 1382fb1fece5SKOSAKI Motohiro set_cpu_present(cpu, false); 1383a88b5ba8SSam Ravnborg break; 1384a88b5ba8SSam Ravnborg } 1385a88b5ba8SSam Ravnborg } while (--limit > 0); 1386a88b5ba8SSam Ravnborg if (limit <= 0) { 1387a88b5ba8SSam Ravnborg printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n", 1388a88b5ba8SSam Ravnborg hv_err); 1389a88b5ba8SSam Ravnborg } 1390a88b5ba8SSam Ravnborg #endif 1391a88b5ba8SSam Ravnborg } 1392a88b5ba8SSam Ravnborg } 1393a88b5ba8SSam Ravnborg #endif 1394a88b5ba8SSam Ravnborg 1395a88b5ba8SSam Ravnborg void __init smp_cpus_done(unsigned int max_cpus) 1396a88b5ba8SSam Ravnborg { 1397b62818e5SDavid S. Miller pcr_arch_init(); 1398a88b5ba8SSam Ravnborg } 1399a88b5ba8SSam Ravnborg 1400a88b5ba8SSam Ravnborg void smp_send_reschedule(int cpu) 1401a88b5ba8SSam Ravnborg { 1402*1a36265bSKirill Tkhai if (cpu == smp_processor_id()) { 1403*1a36265bSKirill Tkhai WARN_ON_ONCE(preemptible()); 1404*1a36265bSKirill Tkhai set_softint(1 << PIL_SMP_RECEIVE_SIGNAL); 1405*1a36265bSKirill Tkhai } else { 1406*1a36265bSKirill Tkhai xcall_deliver((u64) &xcall_receive_signal, 1407*1a36265bSKirill Tkhai 0, 0, cpumask_of(cpu)); 1408*1a36265bSKirill Tkhai } 1409a88b5ba8SSam Ravnborg } 1410a88b5ba8SSam Ravnborg 14119960e9e8SDavid S. Miller void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs) 1412a88b5ba8SSam Ravnborg { 1413a88b5ba8SSam Ravnborg clear_softint(1 << irq); 1414184748ccSPeter Zijlstra scheduler_ipi(); 1415a88b5ba8SSam Ravnborg } 1416a88b5ba8SSam Ravnborg 1417a88b5ba8SSam Ravnborg /* This is a nop because we capture all other cpus 1418a88b5ba8SSam Ravnborg * anyways when making the PROM active. 1419a88b5ba8SSam Ravnborg */ 1420a88b5ba8SSam Ravnborg void smp_send_stop(void) 1421a88b5ba8SSam Ravnborg { 1422a88b5ba8SSam Ravnborg } 1423a88b5ba8SSam Ravnborg 14244fd78a5fSDavid S. Miller /** 14254fd78a5fSDavid S. Miller * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu 14264fd78a5fSDavid S. Miller * @cpu: cpu to allocate for 14274fd78a5fSDavid S. Miller * @size: size allocation in bytes 14284fd78a5fSDavid S. Miller * @align: alignment 14294fd78a5fSDavid S. Miller * 14304fd78a5fSDavid S. Miller * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper 14314fd78a5fSDavid S. Miller * does the right thing for NUMA regardless of the current 14324fd78a5fSDavid S. Miller * configuration. 14334fd78a5fSDavid S. Miller * 14344fd78a5fSDavid S. Miller * RETURNS: 14354fd78a5fSDavid S. Miller * Pointer to the allocated area on success, NULL on failure. 14364fd78a5fSDavid S. Miller */ 1437bcb2107fSTejun Heo static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size, 1438bcb2107fSTejun Heo size_t align) 14394fd78a5fSDavid S. Miller { 14404fd78a5fSDavid S. Miller const unsigned long goal = __pa(MAX_DMA_ADDRESS); 14414fd78a5fSDavid S. Miller #ifdef CONFIG_NEED_MULTIPLE_NODES 14424fd78a5fSDavid S. Miller int node = cpu_to_node(cpu); 14434fd78a5fSDavid S. Miller void *ptr; 14444fd78a5fSDavid S. Miller 14454fd78a5fSDavid S. Miller if (!node_online(node) || !NODE_DATA(node)) { 14464fd78a5fSDavid S. Miller ptr = __alloc_bootmem(size, align, goal); 14474fd78a5fSDavid S. Miller pr_info("cpu %d has no node %d or node-local memory\n", 14484fd78a5fSDavid S. Miller cpu, node); 14494fd78a5fSDavid S. Miller pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n", 14504fd78a5fSDavid S. Miller cpu, size, __pa(ptr)); 14514fd78a5fSDavid S. Miller } else { 14524fd78a5fSDavid S. Miller ptr = __alloc_bootmem_node(NODE_DATA(node), 14534fd78a5fSDavid S. Miller size, align, goal); 14544fd78a5fSDavid S. Miller pr_debug("per cpu data for cpu%d %lu bytes on node%d at " 14554fd78a5fSDavid S. Miller "%016lx\n", cpu, size, node, __pa(ptr)); 14564fd78a5fSDavid S. Miller } 14574fd78a5fSDavid S. Miller return ptr; 14584fd78a5fSDavid S. Miller #else 14594fd78a5fSDavid S. Miller return __alloc_bootmem(size, align, goal); 14604fd78a5fSDavid S. Miller #endif 14614fd78a5fSDavid S. Miller } 14624fd78a5fSDavid S. Miller 1463bcb2107fSTejun Heo static void __init pcpu_free_bootmem(void *ptr, size_t size) 14644fd78a5fSDavid S. Miller { 1465bcb2107fSTejun Heo free_bootmem(__pa(ptr), size); 14664fd78a5fSDavid S. Miller } 14674fd78a5fSDavid S. Miller 1468a70c6913STejun Heo static int __init pcpu_cpu_distance(unsigned int from, unsigned int to) 1469bcb2107fSTejun Heo { 1470bcb2107fSTejun Heo if (cpu_to_node(from) == cpu_to_node(to)) 1471bcb2107fSTejun Heo return LOCAL_DISTANCE; 1472bcb2107fSTejun Heo else 1473bcb2107fSTejun Heo return REMOTE_DISTANCE; 14744fd78a5fSDavid S. Miller } 14754fd78a5fSDavid S. Miller 1476a70c6913STejun Heo static void __init pcpu_populate_pte(unsigned long addr) 1477a70c6913STejun Heo { 1478a70c6913STejun Heo pgd_t *pgd = pgd_offset_k(addr); 1479a70c6913STejun Heo pud_t *pud; 1480a70c6913STejun Heo pmd_t *pmd; 1481a70c6913STejun Heo 1482a70c6913STejun Heo pud = pud_offset(pgd, addr); 1483a70c6913STejun Heo if (pud_none(*pud)) { 1484a70c6913STejun Heo pmd_t *new; 1485a70c6913STejun Heo 1486a70c6913STejun Heo new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); 1487a70c6913STejun Heo pud_populate(&init_mm, pud, new); 1488a70c6913STejun Heo } 1489a70c6913STejun Heo 1490a70c6913STejun Heo pmd = pmd_offset(pud, addr); 1491a70c6913STejun Heo if (!pmd_present(*pmd)) { 1492a70c6913STejun Heo pte_t *new; 1493a70c6913STejun Heo 1494a70c6913STejun Heo new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); 1495a70c6913STejun Heo pmd_populate_kernel(&init_mm, pmd, new); 1496a70c6913STejun Heo } 1497a70c6913STejun Heo } 1498a70c6913STejun Heo 149973fffc03SDavid S. Miller void __init setup_per_cpu_areas(void) 1500a88b5ba8SSam Ravnborg { 1501bcb2107fSTejun Heo unsigned long delta; 1502bcb2107fSTejun Heo unsigned int cpu; 1503a70c6913STejun Heo int rc = -EINVAL; 1504a88b5ba8SSam Ravnborg 1505a70c6913STejun Heo if (pcpu_chosen_fc != PCPU_FC_PAGE) { 1506bcb2107fSTejun Heo rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, 1507bcb2107fSTejun Heo PERCPU_DYNAMIC_RESERVE, 4 << 20, 1508a70c6913STejun Heo pcpu_cpu_distance, 1509a70c6913STejun Heo pcpu_alloc_bootmem, 1510bcb2107fSTejun Heo pcpu_free_bootmem); 1511fb435d52STejun Heo if (rc) 1512a70c6913STejun Heo pr_warning("PERCPU: %s allocator failed (%d), " 1513a70c6913STejun Heo "falling back to page size\n", 1514a70c6913STejun Heo pcpu_fc_names[pcpu_chosen_fc], rc); 1515a70c6913STejun Heo } 1516a70c6913STejun Heo if (rc < 0) 1517a70c6913STejun Heo rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, 1518a70c6913STejun Heo pcpu_alloc_bootmem, 1519a70c6913STejun Heo pcpu_free_bootmem, 1520a70c6913STejun Heo pcpu_populate_pte); 1521a70c6913STejun Heo if (rc < 0) 1522a70c6913STejun Heo panic("cannot initialize percpu area (err=%d)", rc); 15234fd78a5fSDavid S. Miller 15244fd78a5fSDavid S. Miller delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; 1525fb435d52STejun Heo for_each_possible_cpu(cpu) 1526fb435d52STejun Heo __per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu]; 1527a88b5ba8SSam Ravnborg 1528a88b5ba8SSam Ravnborg /* Setup %g5 for the boot cpu. */ 1529a88b5ba8SSam Ravnborg __local_per_cpu_offset = __per_cpu_offset(smp_processor_id()); 1530b696fdc2SDavid S. Miller 1531b696fdc2SDavid S. Miller of_fill_in_cpu_data(); 1532b696fdc2SDavid S. Miller if (tlb_type == hypervisor) 15336ac5c610SStephen Rothwell mdesc_fill_in_cpu_data(cpu_all_mask); 1534a88b5ba8SSam Ravnborg } 1535