1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26#if !defined(lint) 27#include "assym.h" 28#endif 29 30/* 31 * Niagara2 processor specific assembly routines 32 */ 33 34#include <sys/asm_linkage.h> 35#include <sys/machasi.h> 36#include <sys/machparam.h> 37#include <sys/hypervisor_api.h> 38#include <sys/niagara2regs.h> 39#include <sys/machasi.h> 40#include <sys/niagaraasi.h> 41#include <vm/hat_sfmmu.h> 42 43#if defined(lint) 44/*ARGSUSED*/ 45uint64_t 46hv_niagara_getperf(uint64_t perfreg, uint64_t *datap) 47{ return (0); } 48 49/*ARGSUSED*/ 50uint64_t 51hv_niagara_setperf(uint64_t perfreg, uint64_t data) 52{ return (0); } 53 54#else /* lint */ 55 56 /* 57 * hv_niagara_getperf(uint64_t perfreg, uint64_t *datap) 58 */ 59 ENTRY(hv_niagara_getperf) 60 mov %o1, %o4 ! save datap 61#if defined(NIAGARA2_IMPL) 62 mov HV_NIAGARA2_GETPERF, %o5 63#elif defined(VFALLS_IMPL) 64 mov HV_VFALLS_GETPERF, %o5 65#elif defined(KT_IMPL) 66 mov HV_KT_GETPERF, %o5 67#endif 68 ta FAST_TRAP 69 brz,a %o0, 1f 70 stx %o1, [%o4] 711: 72 retl 73 nop 74 SET_SIZE(hv_niagara_getperf) 75 76 /* 77 * hv_niagara_setperf(uint64_t perfreg, uint64_t data) 78 */ 79 ENTRY(hv_niagara_setperf) 80#if defined(NIAGARA2_IMPL) 81 mov HV_NIAGARA2_SETPERF, %o5 82#elif defined(VFALLS_IMPL) 83 mov HV_VFALLS_SETPERF, %o5 84#elif defined(KT_IMPL) 85 mov HV_KT_SETPERF, %o5 86#endif 87 ta FAST_TRAP 88 retl 89 nop 90 SET_SIZE(hv_niagara_setperf) 91 92#endif /* !lint */ 93 94#if defined (lint) 95/* 96 * Invalidate all of the entries within the TSB, by setting the inv bit 97 * in the tte_tag field of each tsbe. 98 * 99 * We take advantage of the fact that the TSBs are page aligned and a 100 * multiple of PAGESIZE to use ASI_BLK_INIT_xxx ASI. 101 * 102 * See TSB_LOCK_ENTRY and the miss handlers for how this works in practice 103 * (in short, we set all bits in the upper word of the tag, and we give the 104 * invalid bit precedence over other tag bits in both places). 105 */ 106/*ARGSUSED*/ 107void 108cpu_inv_tsb(caddr_t tsb_base, uint_t tsb_bytes) 109{} 110 111#else /* lint */ 112 113 ENTRY(cpu_inv_tsb) 114 115 /* 116 * The following code assumes that the tsb_base (%o0) is 256 bytes 117 * aligned and the tsb_bytes count is multiple of 256 bytes. 118 */ 119 120 wr %g0, ASI_BLK_INIT_ST_QUAD_LDD_P, %asi 121 set TSBTAG_INVALID, %o2 122 sllx %o2, 32, %o2 ! INV bit in upper 32 bits of the tag 1231: 124 stxa %o2, [%o0+0x0]%asi 125 stxa %o2, [%o0+0x40]%asi 126 stxa %o2, [%o0+0x80]%asi 127 stxa %o2, [%o0+0xc0]%asi 128 129 stxa %o2, [%o0+0x10]%asi 130 stxa %o2, [%o0+0x20]%asi 131 stxa %o2, [%o0+0x30]%asi 132 133 stxa %o2, [%o0+0x50]%asi 134 stxa %o2, [%o0+0x60]%asi 135 stxa %o2, [%o0+0x70]%asi 136 137 stxa %o2, [%o0+0x90]%asi 138 stxa %o2, [%o0+0xa0]%asi 139 stxa %o2, [%o0+0xb0]%asi 140 141 stxa %o2, [%o0+0xd0]%asi 142 stxa %o2, [%o0+0xe0]%asi 143 stxa %o2, [%o0+0xf0]%asi 144 145 subcc %o1, 0x100, %o1 146 bgu,pt %ncc, 1b 147 add %o0, 0x100, %o0 148 149 membar #Sync 150 retl 151 nop 152 153 SET_SIZE(cpu_inv_tsb) 154#endif /* lint */ 155 156#if defined (lint) 157/* 158 * This is CPU specific delay routine for atomic backoff. It is used in case 159 * of Niagara2 and VF CPUs. The rd instruction uses less resources than casx 160 * on these CPUs. 161 */ 162void 163cpu_atomic_delay(void) 164{} 165#else /* lint */ 166 ENTRY(cpu_atomic_delay) 167 rd %ccr, %g0 168 rd %ccr, %g0 169 retl 170 rd %ccr, %g0 171 SET_SIZE(cpu_atomic_delay) 172#endif /* lint */ 173