xref: /titanic_51/usr/src/uts/sun4v/cpu/niagara.c (revision 575a742678105d588b7c8e1653b57a7e3d78440b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <sys/systm.h>
31 #include <sys/archsystm.h>
32 #include <sys/machparam.h>
33 #include <sys/machsystm.h>
34 #include <sys/cpu.h>
35 #include <sys/elf_SPARC.h>
36 #include <vm/hat_sfmmu.h>
37 #include <vm/page.h>
38 #include <sys/cpuvar.h>
39 #include <sys/async.h>
40 #include <sys/cmn_err.h>
41 #include <sys/debug.h>
42 #include <sys/dditypes.h>
43 #include <sys/sunddi.h>
44 #include <sys/cpu_module.h>
45 #include <sys/prom_debug.h>
46 #include <sys/vmsystm.h>
47 #include <sys/prom_plat.h>
48 #include <sys/sysmacros.h>
49 #include <sys/intreg.h>
50 #include <sys/machtrap.h>
51 #include <sys/ontrap.h>
52 #include <sys/ivintr.h>
53 #include <sys/atomic.h>
54 #include <sys/panic.h>
55 #include <sys/dtrace.h>
56 #include <sys/simulate.h>
57 #include <sys/fault.h>
58 #include <sys/niagararegs.h>
59 #include <sys/trapstat.h>
60 #include <sys/hsvc.h>
61 #include <sys/mutex_impl.h>
62 
63 #define	NI_MMU_PAGESIZE_MASK	((1 << TTE8K) | (1 << TTE64K) | (1 << TTE4M) \
64 				    | (1 << TTE256M))
65 
66 uint_t root_phys_addr_lo_mask = 0xffffffffU;
67 static niagara_mmustat_t *cpu_tstat_va;		/* VA of mmustat buffer */
68 static uint64_t cpu_tstat_pa;			/* PA of mmustat buffer */
69 char cpu_module_name[] = "SUNW,UltraSPARC-T1";
70 
71 /*
72  * Hypervisor services information for the NIAGARA CPU module
73  */
74 static boolean_t niagara_hsvc_available = B_TRUE;
75 static uint64_t niagara_sup_minor;		/* Supported minor number */
76 static hsvc_info_t niagara_hsvc = {
77 	HSVC_REV_1, NULL, HSVC_GROUP_NIAGARA_CPU, 1, 0, cpu_module_name
78 };
79 
80 void
81 cpu_setup(void)
82 {
83 	extern int mmu_exported_pagesize_mask;
84 	extern int cpc_has_overflow_intr;
85 	int status;
86 	char *ni_isa_set[] = {
87 	    "sparcv9+vis",
88 	    "sparcv9+vis2",
89 	    "sparcv8plus+vis",
90 	    "sparcv8plus+vis2",
91 	    NULL
92 	};
93 
94 	/*
95 	 * Negotiate the API version for Niagara specific hypervisor
96 	 * services.
97 	 */
98 	status = hsvc_register(&niagara_hsvc, &niagara_sup_minor);
99 	if (status != 0) {
100 		cmn_err(CE_WARN, "%s: cannot negotiate hypervisor services "
101 		    "group: 0x%lx major: 0x%lx minor: 0x%lx errno: %d\n",
102 		    niagara_hsvc.hsvc_modname, niagara_hsvc.hsvc_group,
103 		    niagara_hsvc.hsvc_major, niagara_hsvc.hsvc_minor, status);
104 		niagara_hsvc_available = B_FALSE;
105 	}
106 
107 	/*
108 	 * The setup common to all CPU modules is done in cpu_setup_common
109 	 * routine.
110 	 */
111 	cpu_setup_common(ni_isa_set);
112 
113 	cache |= (CACHE_PTAG | CACHE_IOCOHERENT);
114 
115 	if (broken_md_flag) {
116 		/*
117 		 * Turn on the missing bits supported by Niagara CPU in
118 		 * MMU pagesize mask returned by MD.
119 		 */
120 		mmu_exported_pagesize_mask |= NI_MMU_PAGESIZE_MASK;
121 	} else {
122 		if ((mmu_exported_pagesize_mask &
123 		    DEFAULT_SUN4V_MMU_PAGESIZE_MASK) !=
124 		    DEFAULT_SUN4V_MMU_PAGESIZE_MASK)
125 			cmn_err(CE_PANIC, "machine description"
126 			    " does not have required sun4v page sizes"
127 			    " 8K, 64K and 4M: MD mask is 0x%x",
128 			    mmu_exported_pagesize_mask);
129 	}
130 
131 	cpu_hwcap_flags |= AV_SPARC_ASI_BLK_INIT;
132 
133 	/*
134 	 * Niagara supports a 48-bit subset of the full 64-bit virtual
135 	 * address space. Virtual addresses between 0x0000800000000000
136 	 * and 0xffff.7fff.ffff.ffff inclusive lie within a "VA Hole"
137 	 * and must never be mapped. In addition, software must not use
138 	 * pages within 4GB of the VA hole as instruction pages to
139 	 * avoid problems with prefetching into the VA hole.
140 	 */
141 	hole_start = (caddr_t)((1ull << (va_bits - 1)) - (1ull << 32));
142 	hole_end = (caddr_t)((0ull - (1ull << (va_bits - 1))) + (1ull << 32));
143 
144 	/*
145 	 * Niagara has a performance counter overflow interrupt
146 	 */
147 	cpc_has_overflow_intr = 1;
148 
149 	shctx_on = 0;
150 }
151 
152 #define	MB(n)	((n) * 1024 * 1024)
153 /*
154  * Set the magic constants of the implementation.
155  */
156 void
157 cpu_fiximp(struct cpu_node *cpunode)
158 {
159 	/*
160 	 * The Cache node is optional in MD. Therefore in case "Cache"
161 	 * node does not exists in MD, set the default L2 cache associativity,
162 	 * size, linesize.
163 	 */
164 	if (cpunode->ecache_size == 0)
165 		cpunode->ecache_size = MB(3);
166 	if (cpunode->ecache_linesize == 0)
167 		cpunode->ecache_linesize = 64;
168 	if (cpunode->ecache_associativity == 0)
169 		cpunode->ecache_associativity = 12;
170 }
171 
172 void
173 cpu_map_exec_units(struct cpu *cp)
174 {
175 	ASSERT(MUTEX_HELD(&cpu_lock));
176 
177 	/*
178 	 * The cpu_ipipe and cpu_fpu fields are initialized based on
179 	 * the execution unit sharing information from the MD. They
180 	 * default to the CPU id in the absence of such information.
181 	 */
182 	cp->cpu_m.cpu_ipipe = cpunodes[cp->cpu_id].exec_unit_mapping;
183 	if (cp->cpu_m.cpu_ipipe == NO_EU_MAPPING_FOUND)
184 		cp->cpu_m.cpu_ipipe = (id_t)(cp->cpu_id);
185 
186 	cp->cpu_m.cpu_fpu = cpunodes[cp->cpu_id].fpu_mapping;
187 	if (cp->cpu_m.cpu_fpu == NO_EU_MAPPING_FOUND)
188 		cp->cpu_m.cpu_fpu = (id_t)(cp->cpu_id);
189 
190 	/*
191 	 * Niagara defines the the core to be at the ipipe level
192 	 */
193 	cp->cpu_m.cpu_core = cp->cpu_m.cpu_ipipe;
194 
195 	/*
196 	 * Niagara systems just have one chip. Therefore, the chip id
197 	 * mpipe id are always 0.
198 	 */
199 	cp->cpu_m.cpu_chip = 0;
200 	cp->cpu_m.cpu_mpipe = 0;
201 }
202 
203 void
204 cpu_mutex_delay(void)
205 {
206 	/*
207 	 * Dummy is the thread-private target of the cas.  If multiple strands
208 	 * have the same kernel call stack, dummy could fall at the same VA and
209 	 * hence the same L2 cache bank.  To avoid this, create multiple dummy
210 	 * words spread across several cache lines.
211 	 */
212 	struct {
213 		long val;
214 		long pad[7];
215 	} dummy[4];
216 
217 	long *ptr = &(dummy[CPU->cpu_seqid & 0x03].val);
218 	cas_delay(ptr);
219 }
220 
221 static int niagara_cpucnt;
222 
223 void
224 cpu_init_private(struct cpu *cp)
225 {
226 	extern void niagara_kstat_init(void);
227 
228 	ASSERT(MUTEX_HELD(&cpu_lock));
229 
230 	cpu_map_exec_units(cp);
231 
232 	if ((niagara_cpucnt++ == 0) && (niagara_hsvc_available == B_TRUE))
233 		niagara_kstat_init();
234 
235 	mutex_delay = cpu_mutex_delay;
236 }
237 
238 /*ARGSUSED*/
239 void
240 cpu_uninit_private(struct cpu *cp)
241 {
242 	extern void niagara_kstat_fini(void);
243 
244 	ASSERT(MUTEX_HELD(&cpu_lock));
245 
246 	if ((--niagara_cpucnt == 0) && (niagara_hsvc_available == B_TRUE))
247 		niagara_kstat_fini();
248 }
249 
250 /*
251  * On Niagara, any flush will cause all preceding stores to be
252  * synchronized wrt the i$, regardless of address or ASI.  In fact,
253  * the address is ignored, so we always flush address 0.
254  */
255 /*ARGSUSED*/
256 void
257 dtrace_flush_sec(uintptr_t addr)
258 {
259 	doflush(0);
260 }
261 
262 #define	IS_FLOAT(i) (((i) & 0x1000000) != 0)
263 #define	IS_IBIT_SET(x)	(x & 0x2000)
264 #define	IS_VIS1(op, op3)(op == 2 && op3 == 0x36)
265 #define	IS_PARTIAL_OR_SHORT_FLOAT_LD_ST(op, op3, asi)		\
266 		(op == 3 && (op3 == IOP_V8_LDDFA ||		\
267 		op3 == IOP_V8_STDFA) &&	asi > ASI_SNFL)
268 int
269 vis1_partial_support(struct regs *rp, k_siginfo_t *siginfo, uint_t *fault)
270 {
271 	char *badaddr;
272 	int instr;
273 	uint_t	optype, op3, asi;
274 	uint_t	ignor;
275 
276 	if (!USERMODE(rp->r_tstate))
277 		return (-1);
278 
279 	instr = fetch_user_instr((caddr_t)rp->r_pc);
280 
281 	optype = (instr >> 30) & 0x3;
282 	op3 = (instr >> 19) & 0x3f;
283 	ignor = (instr >> 5) & 0xff;
284 	if (IS_IBIT_SET(instr)) {
285 		asi = (uint32_t)((rp->r_tstate >> TSTATE_ASI_SHIFT) &
286 		    TSTATE_ASI_MASK);
287 	} else {
288 		asi = ignor;
289 	}
290 
291 	if (!IS_VIS1(optype, op3) &&
292 	    !IS_PARTIAL_OR_SHORT_FLOAT_LD_ST(optype, op3, asi)) {
293 		return (-1);
294 	}
295 	switch (simulate_unimp(rp, &badaddr)) {
296 	case SIMU_RETRY:
297 		break;	/* regs are already set up */
298 		/*NOTREACHED*/
299 
300 	case SIMU_SUCCESS:
301 		/*
302 		 * skip the successfully
303 		 * simulated instruction
304 		 */
305 		rp->r_pc = rp->r_npc;
306 		rp->r_npc += 4;
307 		break;
308 		/*NOTREACHED*/
309 
310 	case SIMU_FAULT:
311 		siginfo->si_signo = SIGSEGV;
312 		siginfo->si_code = SEGV_MAPERR;
313 		siginfo->si_addr = badaddr;
314 		*fault = FLTBOUNDS;
315 		break;
316 
317 	case SIMU_DZERO:
318 		siginfo->si_signo = SIGFPE;
319 		siginfo->si_code = FPE_INTDIV;
320 		siginfo->si_addr = (caddr_t)rp->r_pc;
321 		*fault = FLTIZDIV;
322 		break;
323 
324 	case SIMU_UNALIGN:
325 		siginfo->si_signo = SIGBUS;
326 		siginfo->si_code = BUS_ADRALN;
327 		siginfo->si_addr = badaddr;
328 		*fault = FLTACCESS;
329 		break;
330 
331 	case SIMU_ILLEGAL:
332 	default:
333 		siginfo->si_signo = SIGILL;
334 		op3 = (instr >> 19) & 0x3F;
335 		if ((IS_FLOAT(instr) && (op3 == IOP_V8_STQFA) ||
336 		    (op3 == IOP_V8_STDFA)))
337 			siginfo->si_code = ILL_ILLADR;
338 		else
339 			siginfo->si_code = ILL_ILLOPC;
340 		siginfo->si_addr = (caddr_t)rp->r_pc;
341 		*fault = FLTILL;
342 		break;
343 	}
344 	return (0);
345 }
346 
347 /*
348  * Trapstat support for Niagara processor
349  */
350 int
351 cpu_trapstat_conf(int cmd)
352 {
353 	size_t len;
354 	uint64_t mmustat_pa, hvret;
355 	int status = 0;
356 
357 	if (niagara_hsvc_available == B_FALSE)
358 		return (ENOTSUP);
359 
360 	switch (cmd) {
361 	case CPU_TSTATCONF_INIT:
362 		ASSERT(cpu_tstat_va == NULL);
363 		len = (NCPU+1) * sizeof (niagara_mmustat_t);
364 		cpu_tstat_va = contig_mem_alloc_align(len,
365 		    sizeof (niagara_mmustat_t));
366 		if (cpu_tstat_va == NULL)
367 			status = EAGAIN;
368 		else {
369 			bzero(cpu_tstat_va, len);
370 			cpu_tstat_pa = va_to_pa(cpu_tstat_va);
371 		}
372 		break;
373 
374 	case CPU_TSTATCONF_FINI:
375 		if (cpu_tstat_va) {
376 			len = (NCPU+1) * sizeof (niagara_mmustat_t);
377 			contig_mem_free(cpu_tstat_va, len);
378 			cpu_tstat_va = NULL;
379 			cpu_tstat_pa = 0;
380 		}
381 		break;
382 
383 	case CPU_TSTATCONF_ENABLE:
384 		hvret = hv_niagara_mmustat_conf((cpu_tstat_pa +
385 		    (CPU->cpu_id+1) * sizeof (niagara_mmustat_t)),
386 		    (uint64_t *)&mmustat_pa);
387 		if (hvret != H_EOK)
388 			status = EINVAL;
389 		break;
390 
391 	case CPU_TSTATCONF_DISABLE:
392 		hvret = hv_niagara_mmustat_conf(0, (uint64_t *)&mmustat_pa);
393 		if (hvret != H_EOK)
394 			status = EINVAL;
395 		break;
396 
397 	default:
398 		status = EINVAL;
399 		break;
400 	}
401 	return (status);
402 }
403 
404 void
405 cpu_trapstat_data(void *buf, uint_t tstat_pgszs)
406 {
407 	niagara_mmustat_t	*mmustatp;
408 	tstat_pgszdata_t	*tstatp = (tstat_pgszdata_t *)buf;
409 	int	i;
410 
411 	if (cpu_tstat_va == NULL)
412 		return;
413 
414 	mmustatp = &((niagara_mmustat_t *)cpu_tstat_va)[CPU->cpu_id+1];
415 	if (tstat_pgszs > NIAGARA_MMUSTAT_PGSZS)
416 		tstat_pgszs = NIAGARA_MMUSTAT_PGSZS;
417 
418 	for (i = 0; i < tstat_pgszs; i++, tstatp++) {
419 		tstatp->tpgsz_kernel.tmode_itlb.ttlb_tlb.tmiss_count =
420 		    mmustatp->kitsb[i].tsbhit_count;
421 		tstatp->tpgsz_kernel.tmode_itlb.ttlb_tlb.tmiss_time =
422 		    mmustatp->kitsb[i].tsbhit_time;
423 		tstatp->tpgsz_user.tmode_itlb.ttlb_tlb.tmiss_count =
424 		    mmustatp->uitsb[i].tsbhit_count;
425 		tstatp->tpgsz_user.tmode_itlb.ttlb_tlb.tmiss_time =
426 		    mmustatp->uitsb[i].tsbhit_time;
427 		tstatp->tpgsz_kernel.tmode_dtlb.ttlb_tlb.tmiss_count =
428 		    mmustatp->kdtsb[i].tsbhit_count;
429 		tstatp->tpgsz_kernel.tmode_dtlb.ttlb_tlb.tmiss_time =
430 		    mmustatp->kdtsb[i].tsbhit_time;
431 		tstatp->tpgsz_user.tmode_dtlb.ttlb_tlb.tmiss_count =
432 		    mmustatp->udtsb[i].tsbhit_count;
433 		tstatp->tpgsz_user.tmode_dtlb.ttlb_tlb.tmiss_time =
434 		    mmustatp->udtsb[i].tsbhit_time;
435 	}
436 }
437