xref: /titanic_44/usr/src/uts/sun4v/cpu/niagara2_asm.s (revision 4df55fde49134f9735f84011f23a767c75e393c7)
144961713Sgirish/*
244961713Sgirish * CDDL HEADER START
344961713Sgirish *
444961713Sgirish * The contents of this file are subject to the terms of the
544961713Sgirish * Common Development and Distribution License (the "License").
644961713Sgirish * You may not use this file except in compliance with the License.
744961713Sgirish *
844961713Sgirish * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
944961713Sgirish * or http://www.opensolaris.org/os/licensing.
1044961713Sgirish * See the License for the specific language governing permissions
1144961713Sgirish * and limitations under the License.
1244961713Sgirish *
1344961713Sgirish * When distributing Covered Code, include this CDDL HEADER in each
1444961713Sgirish * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
1544961713Sgirish * If applicable, add the following below this CDDL HEADER, with the
1644961713Sgirish * fields enclosed by brackets "[]" replaced with your own identifying
1744961713Sgirish * information: Portions Copyright [yyyy] [name of copyright owner]
1844961713Sgirish *
1944961713Sgirish * CDDL HEADER END
2044961713Sgirish */
2144961713Sgirish/*
22*4df55fdeSJanie Lu * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
2344961713Sgirish * Use is subject to license terms.
2444961713Sgirish */
2544961713Sgirish
2644961713Sgirish#if !defined(lint)
2744961713Sgirish#include "assym.h"
2844961713Sgirish#endif
2944961713Sgirish
3044961713Sgirish/*
3144961713Sgirish * Niagara2 processor specific assembly routines
3244961713Sgirish */
3344961713Sgirish
3444961713Sgirish#include <sys/asm_linkage.h>
3544961713Sgirish#include <sys/machasi.h>
3644961713Sgirish#include <sys/machparam.h>
3744961713Sgirish#include <sys/hypervisor_api.h>
3844961713Sgirish#include <sys/niagara2regs.h>
3944961713Sgirish#include <sys/machasi.h>
4044961713Sgirish#include <sys/niagaraasi.h>
4144961713Sgirish#include <vm/hat_sfmmu.h>
4244961713Sgirish
4344961713Sgirish#if defined(lint)
4444961713Sgirish/*ARGSUSED*/
4544961713Sgirishuint64_t
4644961713Sgirishhv_niagara_getperf(uint64_t perfreg, uint64_t *datap)
4744961713Sgirish{ return (0); }
4844961713Sgirish
4944961713Sgirish/*ARGSUSED*/
5044961713Sgirishuint64_t
5144961713Sgirishhv_niagara_setperf(uint64_t perfreg, uint64_t data)
5244961713Sgirish{ return (0); }
5344961713Sgirish
5444961713Sgirish#else   /* lint */
5544961713Sgirish
5644961713Sgirish	/*
5744961713Sgirish	 * hv_niagara_getperf(uint64_t perfreg, uint64_t *datap)
5844961713Sgirish	 */
5944961713Sgirish	ENTRY(hv_niagara_getperf)
6044961713Sgirish	mov     %o1, %o4                        ! save datap
6159ac0c16Sdavemq#if defined(NIAGARA2_IMPL)
6244961713Sgirish	mov     HV_NIAGARA2_GETPERF, %o5
6359ac0c16Sdavemq#elif defined(VFALLS_IMPL)
6459ac0c16Sdavemq	mov	HV_VFALLS_GETPERF, %o5
65*4df55fdeSJanie Lu#elif defined(KT_IMPL)
66*4df55fdeSJanie Lu	mov	HV_KT_GETPERF, %o5
6759ac0c16Sdavemq#endif
6844961713Sgirish	ta      FAST_TRAP
6944961713Sgirish	brz,a   %o0, 1f
7044961713Sgirish	stx     %o1, [%o4]
7144961713Sgirish1:
7244961713Sgirish	retl
7344961713Sgirish	nop
7444961713Sgirish	SET_SIZE(hv_niagara_getperf)
7544961713Sgirish
7644961713Sgirish	/*
7744961713Sgirish	 * hv_niagara_setperf(uint64_t perfreg, uint64_t data)
7844961713Sgirish	 */
7944961713Sgirish	ENTRY(hv_niagara_setperf)
8059ac0c16Sdavemq#if defined(NIAGARA2_IMPL)
8144961713Sgirish	mov     HV_NIAGARA2_SETPERF, %o5
8259ac0c16Sdavemq#elif defined(VFALLS_IMPL)
8359ac0c16Sdavemq	mov     HV_VFALLS_SETPERF, %o5
84*4df55fdeSJanie Lu#elif defined(KT_IMPL)
85*4df55fdeSJanie Lu	mov     HV_KT_SETPERF, %o5
8659ac0c16Sdavemq#endif
8744961713Sgirish	ta      FAST_TRAP
8844961713Sgirish	retl
8944961713Sgirish	nop
9044961713Sgirish	SET_SIZE(hv_niagara_setperf)
9144961713Sgirish
9244961713Sgirish#endif /* !lint */
9344961713Sgirish
9444961713Sgirish#if defined (lint)
9544961713Sgirish/*
9644961713Sgirish * Invalidate all of the entries within the TSB, by setting the inv bit
9744961713Sgirish * in the tte_tag field of each tsbe.
9844961713Sgirish *
9944961713Sgirish * We take advantage of the fact that the TSBs are page aligned and a
10044961713Sgirish * multiple of PAGESIZE to use ASI_BLK_INIT_xxx ASI.
10144961713Sgirish *
10244961713Sgirish * See TSB_LOCK_ENTRY and the miss handlers for how this works in practice
10344961713Sgirish * (in short, we set all bits in the upper word of the tag, and we give the
10444961713Sgirish * invalid bit precedence over other tag bits in both places).
10544961713Sgirish */
10644961713Sgirish/*ARGSUSED*/
10744961713Sgirishvoid
10844961713Sgirishcpu_inv_tsb(caddr_t tsb_base, uint_t tsb_bytes)
10944961713Sgirish{}
11044961713Sgirish
11144961713Sgirish#else /* lint */
11244961713Sgirish
11344961713Sgirish	ENTRY(cpu_inv_tsb)
11444961713Sgirish
11544961713Sgirish	/*
11644961713Sgirish	 * The following code assumes that the tsb_base (%o0) is 256 bytes
11744961713Sgirish	 * aligned and the tsb_bytes count is multiple of 256 bytes.
11844961713Sgirish	 */
11944961713Sgirish
12044961713Sgirish	wr	%g0, ASI_BLK_INIT_ST_QUAD_LDD_P, %asi
12144961713Sgirish	set	TSBTAG_INVALID, %o2
12244961713Sgirish	sllx	%o2, 32, %o2		! INV bit in upper 32 bits of the tag
12344961713Sgirish1:
12444961713Sgirish	stxa	%o2, [%o0+0x0]%asi
12544961713Sgirish	stxa	%o2, [%o0+0x40]%asi
12644961713Sgirish	stxa	%o2, [%o0+0x80]%asi
12744961713Sgirish	stxa	%o2, [%o0+0xc0]%asi
12844961713Sgirish
12944961713Sgirish	stxa	%o2, [%o0+0x10]%asi
13044961713Sgirish	stxa	%o2, [%o0+0x20]%asi
13144961713Sgirish	stxa	%o2, [%o0+0x30]%asi
13244961713Sgirish
13344961713Sgirish	stxa	%o2, [%o0+0x50]%asi
13444961713Sgirish	stxa	%o2, [%o0+0x60]%asi
13544961713Sgirish	stxa	%o2, [%o0+0x70]%asi
13644961713Sgirish
13744961713Sgirish	stxa	%o2, [%o0+0x90]%asi
13844961713Sgirish	stxa	%o2, [%o0+0xa0]%asi
13944961713Sgirish	stxa	%o2, [%o0+0xb0]%asi
14044961713Sgirish
14144961713Sgirish	stxa	%o2, [%o0+0xd0]%asi
14244961713Sgirish	stxa	%o2, [%o0+0xe0]%asi
14344961713Sgirish	stxa	%o2, [%o0+0xf0]%asi
14444961713Sgirish
14544961713Sgirish	subcc	%o1, 0x100, %o1
14644961713Sgirish	bgu,pt	%ncc, 1b
14744961713Sgirish	add	%o0, 0x100, %o0
14844961713Sgirish
14944961713Sgirish	membar	#Sync
15044961713Sgirish	retl
15144961713Sgirish	nop
15244961713Sgirish
15344961713Sgirish	SET_SIZE(cpu_inv_tsb)
15444961713Sgirish#endif /* lint */
155895ca178Sae112802
156895ca178Sae112802#if defined (lint)
157895ca178Sae112802/*
158895ca178Sae112802 * This is CPU specific delay routine for atomic backoff. It is used in case
159895ca178Sae112802 * of Niagara2 and VF CPUs. The rd instruction uses less resources than casx
160895ca178Sae112802 * on these CPUs.
161895ca178Sae112802 */
162895ca178Sae112802void
163895ca178Sae112802cpu_atomic_delay(void)
164895ca178Sae112802{}
165895ca178Sae112802#else	/* lint */
166895ca178Sae112802	ENTRY(cpu_atomic_delay)
167895ca178Sae112802	rd	%ccr, %g0
168895ca178Sae112802	rd	%ccr, %g0
169895ca178Sae112802	retl
170895ca178Sae112802	rd	%ccr, %g0
171895ca178Sae112802	SET_SIZE(cpu_atomic_delay)
172895ca178Sae112802#endif	/* lint */
173