xref: /titanic_41/usr/src/uts/sun4v/cpu/niagara2_asm.s (revision cde2885fdf538266ee2a3b08dee2d5075ce8fa2b)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#pragma ident	"%Z%%M%	%I%	%E% SMI"
27
28#if !defined(lint)
29#include "assym.h"
30#endif
31
32/*
33 * Niagara2 processor specific assembly routines
34 */
35
36#include <sys/asm_linkage.h>
37#include <sys/machasi.h>
38#include <sys/machparam.h>
39#include <sys/hypervisor_api.h>
40#include <sys/niagara2regs.h>
41#include <sys/machasi.h>
42#include <sys/niagaraasi.h>
43#include <vm/hat_sfmmu.h>
44
45#if defined(lint)
46/*ARGSUSED*/
47uint64_t
48hv_niagara_getperf(uint64_t perfreg, uint64_t *datap)
49{ return (0); }
50
51/*ARGSUSED*/
52uint64_t
53hv_niagara_setperf(uint64_t perfreg, uint64_t data)
54{ return (0); }
55
56#else   /* lint */
57
58	/*
59	 * hv_niagara_getperf(uint64_t perfreg, uint64_t *datap)
60	 */
61	ENTRY(hv_niagara_getperf)
62	mov     %o1, %o4                        ! save datap
63#if defined(NIAGARA2_IMPL)
64	mov     HV_NIAGARA2_GETPERF, %o5
65#elif defined(VFALLS_IMPL)
66	mov	HV_VFALLS_GETPERF, %o5
67#endif
68	ta      FAST_TRAP
69	brz,a   %o0, 1f
70	stx     %o1, [%o4]
711:
72	retl
73	nop
74	SET_SIZE(hv_niagara_getperf)
75
76	/*
77	 * hv_niagara_setperf(uint64_t perfreg, uint64_t data)
78	 */
79	ENTRY(hv_niagara_setperf)
80#if defined(NIAGARA2_IMPL)
81	mov     HV_NIAGARA2_SETPERF, %o5
82#elif defined(VFALLS_IMPL)
83	mov     HV_VFALLS_SETPERF, %o5
84#endif
85	ta      FAST_TRAP
86	retl
87	nop
88	SET_SIZE(hv_niagara_setperf)
89
90#endif /* !lint */
91
92#if defined (lint)
93/*
94 * Invalidate all of the entries within the TSB, by setting the inv bit
95 * in the tte_tag field of each tsbe.
96 *
97 * We take advantage of the fact that the TSBs are page aligned and a
98 * multiple of PAGESIZE to use ASI_BLK_INIT_xxx ASI.
99 *
100 * See TSB_LOCK_ENTRY and the miss handlers for how this works in practice
101 * (in short, we set all bits in the upper word of the tag, and we give the
102 * invalid bit precedence over other tag bits in both places).
103 */
104/*ARGSUSED*/
105void
106cpu_inv_tsb(caddr_t tsb_base, uint_t tsb_bytes)
107{}
108
109#else /* lint */
110
111	ENTRY(cpu_inv_tsb)
112
113	/*
114	 * The following code assumes that the tsb_base (%o0) is 256 bytes
115	 * aligned and the tsb_bytes count is multiple of 256 bytes.
116	 */
117
118	wr	%g0, ASI_BLK_INIT_ST_QUAD_LDD_P, %asi
119	set	TSBTAG_INVALID, %o2
120	sllx	%o2, 32, %o2		! INV bit in upper 32 bits of the tag
1211:
122	stxa	%o2, [%o0+0x0]%asi
123	stxa	%o2, [%o0+0x40]%asi
124	stxa	%o2, [%o0+0x80]%asi
125	stxa	%o2, [%o0+0xc0]%asi
126
127	stxa	%o2, [%o0+0x10]%asi
128	stxa	%o2, [%o0+0x20]%asi
129	stxa	%o2, [%o0+0x30]%asi
130
131	stxa	%o2, [%o0+0x50]%asi
132	stxa	%o2, [%o0+0x60]%asi
133	stxa	%o2, [%o0+0x70]%asi
134
135	stxa	%o2, [%o0+0x90]%asi
136	stxa	%o2, [%o0+0xa0]%asi
137	stxa	%o2, [%o0+0xb0]%asi
138
139	stxa	%o2, [%o0+0xd0]%asi
140	stxa	%o2, [%o0+0xe0]%asi
141	stxa	%o2, [%o0+0xf0]%asi
142
143	subcc	%o1, 0x100, %o1
144	bgu,pt	%ncc, 1b
145	add	%o0, 0x100, %o0
146
147	membar	#Sync
148	retl
149	nop
150
151	SET_SIZE(cpu_inv_tsb)
152#endif /* lint */
153
154#if defined (lint)
155/*
156 * This is CPU specific delay routine for atomic backoff. It is used in case
157 * of Niagara2 and VF CPUs. The rd instruction uses less resources than casx
158 * on these CPUs.
159 */
160void
161cpu_atomic_delay(void)
162{}
163#else	/* lint */
164	ENTRY(cpu_atomic_delay)
165	rd	%ccr, %g0
166	rd	%ccr, %g0
167	retl
168	rd	%ccr, %g0
169	SET_SIZE(cpu_atomic_delay)
170#endif	/* lint */
171