1 /*- 2 * Copyright (c) 1998 Doug Rabson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #ifndef _MACHINE_CPUFUNC_H_ 30 #define _MACHINE_CPUFUNC_H_ 31 32 #ifdef _KERNEL 33 34 #include <sys/types.h> 35 36 #include <machine/psl.h> 37 38 struct thread; 39 40 #ifdef __GNUC__ 41 42 static __inline void 43 breakpoint(void) 44 { 45 46 return; 47 } 48 49 #endif 50 51 /* CPU register mangling inlines */ 52 53 static __inline void 54 mtmsr(register_t value) 55 { 56 57 __asm __volatile ("mtmsr %0" :: "r"(value)); 58 } 59 60 static __inline register_t 61 mfmsr(void) 62 { 63 register_t value; 64 65 __asm __volatile ("mfmsr %0" : "=r"(value)); 66 67 return (value); 68 } 69 70 static __inline void 71 mtsrin(vm_offset_t va, register_t value) 72 { 73 74 __asm __volatile ("mtsrin %0,%1" :: "r"(value), "r"(va)); 75 } 76 77 static __inline register_t 78 mfsrin(vm_offset_t va) 79 { 80 register_t value; 81 82 __asm __volatile ("mfsrin %0,%1" : "=r"(value) : "r"(va)); 83 84 return (value); 85 } 86 87 static __inline void 88 mtdec(register_t value) 89 { 90 91 __asm __volatile ("mtdec %0" :: "r"(value)); 92 } 93 94 static __inline register_t 95 mfdec(void) 96 { 97 register_t value; 98 99 __asm __volatile ("mfdec %0" : "=r"(value)); 100 101 return (value); 102 } 103 104 static __inline void 105 eieio(void) 106 { 107 108 __asm __volatile ("eieio"); 109 } 110 111 static __inline void 112 isync(void) 113 { 114 115 __asm __volatile ("isync"); 116 } 117 118 static __inline register_t 119 intr_disable(void) 120 { 121 register_t msr; 122 123 msr = mfmsr(); 124 mtmsr(msr & ~(PSL_EE|PSL_RI)); 125 return (msr); 126 } 127 128 static __inline void 129 intr_restore(register_t msr) 130 { 131 132 mtmsr(msr); 133 } 134 135 static __inline void 136 restore_intr(unsigned int msr) 137 { 138 139 mtmsr(msr); 140 } 141 142 static __inline void 143 powerpc_mb(void) 144 { 145 146 __asm __volatile("eieio; sync" : : : "memory"); 147 } 148 149 static __inline struct pcpu * 150 powerpc_get_pcpup(void) 151 { 152 struct pcpu *ret; 153 154 __asm ("mfsprg %0, 0" : "=r"(ret)); 155 156 return(ret); 157 } 158 159 #endif /* _KERNEL */ 160 161 #endif /* !_MACHINE_CPUFUNC_H_ */ 162