1 /*- 2 * Copyright (c) 2008 Joseph Koshy 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 /* 28 * Common code for handling Intel CPUs. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/pmc.h> 36 #include <sys/pmckern.h> 37 #include <sys/systm.h> 38 39 #include <machine/cpu.h> 40 #include <machine/cputypes.h> 41 #include <machine/md_var.h> 42 #include <machine/specialreg.h> 43 44 static int 45 intel_switch_in(struct pmc_cpu *pc, struct pmc_process *pp) 46 { 47 (void) pc; 48 49 PMCDBG(MDP,SWI,1, "pc=%p pp=%p enable-msr=%d", pc, pp, 50 pp->pp_flags & PMC_PP_ENABLE_MSR_ACCESS); 51 52 /* allow the RDPMC instruction if needed */ 53 if (pp->pp_flags & PMC_PP_ENABLE_MSR_ACCESS) 54 load_cr4(rcr4() | CR4_PCE); 55 56 PMCDBG(MDP,SWI,1, "cr4=0x%jx", (uintmax_t) rcr4()); 57 58 return 0; 59 } 60 61 static int 62 intel_switch_out(struct pmc_cpu *pc, struct pmc_process *pp) 63 { 64 (void) pc; 65 (void) pp; /* can be NULL */ 66 67 PMCDBG(MDP,SWO,1, "pc=%p pp=%p cr4=0x%jx", pc, pp, 68 (uintmax_t) rcr4()); 69 70 /* always turn off the RDPMC instruction */ 71 load_cr4(rcr4() & ~CR4_PCE); 72 73 return 0; 74 } 75 76 struct pmc_mdep * 77 pmc_intel_initialize(void) 78 { 79 struct pmc_mdep *pmc_mdep; 80 enum pmc_cputype cputype; 81 int error, model, nclasses, ncpus; 82 83 KASSERT(cpu_vendor_id == CPU_VENDOR_INTEL, 84 ("[intel,%d] Initializing non-intel processor", __LINE__)); 85 86 PMCDBG(MDP,INI,0, "intel-initialize cpuid=0x%x", cpu_id); 87 88 cputype = -1; 89 nclasses = 2; 90 91 model = ((cpu_id & 0xF0000) >> 12) | ((cpu_id & 0xF0) >> 4); 92 93 switch (cpu_id & 0xF00) { 94 #if defined(__i386__) 95 case 0x500: /* Pentium family processors */ 96 cputype = PMC_CPU_INTEL_P5; 97 break; 98 #endif 99 case 0x600: /* Pentium Pro, Celeron, Pentium II & III */ 100 switch (model) { 101 #if defined(__i386__) 102 case 0x1: 103 cputype = PMC_CPU_INTEL_P6; 104 break; 105 case 0x3: case 0x5: 106 cputype = PMC_CPU_INTEL_PII; 107 break; 108 case 0x6: case 0x16: 109 cputype = PMC_CPU_INTEL_CL; 110 break; 111 case 0x7: case 0x8: case 0xA: case 0xB: 112 cputype = PMC_CPU_INTEL_PIII; 113 break; 114 case 0x9: case 0xD: 115 cputype = PMC_CPU_INTEL_PM; 116 break; 117 #endif 118 case 0xE: 119 cputype = PMC_CPU_INTEL_CORE; 120 break; 121 case 0xF: 122 cputype = PMC_CPU_INTEL_CORE2; 123 nclasses = 3; 124 break; 125 case 0x17: 126 cputype = PMC_CPU_INTEL_CORE2EXTREME; 127 nclasses = 3; 128 break; 129 case 0x1C: /* Per Intel document 320047-002. */ 130 cputype = PMC_CPU_INTEL_ATOM; 131 nclasses = 3; 132 break; 133 case 0x1A: 134 case 0x1E: /* Per Intel document 253669-032 9/2009, pages A-2 and A-57 */ 135 case 0x1F: /* Per Intel document 253669-032 9/2009, pages A-2 and A-57 */ 136 cputype = PMC_CPU_INTEL_COREI7; 137 nclasses = 3; 138 break; 139 } 140 break; 141 #if defined(__i386__) || defined(__amd64__) 142 case 0xF00: /* P4 */ 143 if (model >= 0 && model <= 6) /* known models */ 144 cputype = PMC_CPU_INTEL_PIV; 145 break; 146 } 147 #endif 148 149 if ((int) cputype == -1) { 150 printf("pmc: Unknown Intel CPU.\n"); 151 return (NULL); 152 } 153 154 pmc_mdep = malloc(sizeof(struct pmc_mdep) + nclasses * 155 sizeof(struct pmc_classdep), M_PMC, M_WAITOK|M_ZERO); 156 157 pmc_mdep->pmd_cputype = cputype; 158 pmc_mdep->pmd_nclass = nclasses; 159 160 pmc_mdep->pmd_switch_in = intel_switch_in; 161 pmc_mdep->pmd_switch_out = intel_switch_out; 162 163 ncpus = pmc_cpu_max(); 164 165 error = pmc_tsc_initialize(pmc_mdep, ncpus); 166 if (error) 167 goto error; 168 169 switch (cputype) { 170 #if defined(__i386__) || defined(__amd64__) 171 /* 172 * Intel Core, Core 2 and Atom processors. 173 */ 174 case PMC_CPU_INTEL_ATOM: 175 case PMC_CPU_INTEL_CORE: 176 case PMC_CPU_INTEL_CORE2: 177 case PMC_CPU_INTEL_CORE2EXTREME: 178 case PMC_CPU_INTEL_COREI7: 179 error = pmc_core_initialize(pmc_mdep, ncpus); 180 break; 181 182 /* 183 * Intel Pentium 4 Processors, and P4/EMT64 processors. 184 */ 185 186 case PMC_CPU_INTEL_PIV: 187 error = pmc_p4_initialize(pmc_mdep, ncpus); 188 189 KASSERT(pmc_mdep->pmd_npmc == TSC_NPMCS + P4_NPMCS, 190 ("[intel,%d] incorrect npmc count %d", __LINE__, 191 pmc_mdep->pmd_npmc)); 192 break; 193 #endif 194 195 #if defined(__i386__) 196 /* 197 * P6 Family Processors 198 */ 199 200 case PMC_CPU_INTEL_P6: 201 case PMC_CPU_INTEL_CL: 202 case PMC_CPU_INTEL_PII: 203 case PMC_CPU_INTEL_PIII: 204 case PMC_CPU_INTEL_PM: 205 error = pmc_p6_initialize(pmc_mdep, ncpus); 206 207 KASSERT(pmc_mdep->pmd_npmc == TSC_NPMCS + P6_NPMCS, 208 ("[intel,%d] incorrect npmc count %d", __LINE__, 209 pmc_mdep->pmd_npmc)); 210 break; 211 212 /* 213 * Intel Pentium PMCs. 214 */ 215 216 case PMC_CPU_INTEL_P5: 217 error = pmc_p5_initialize(pmc_mdep, ncpus); 218 219 KASSERT(pmc_mdep->pmd_npmc == TSC_NPMCS + PENTIUM_NPMCS, 220 ("[intel,%d] incorrect npmc count %d", __LINE__, 221 pmc_mdep->pmd_npmc)); 222 break; 223 #endif 224 225 default: 226 KASSERT(0, ("[intel,%d] Unknown CPU type", __LINE__)); 227 } 228 229 230 error: 231 if (error) { 232 free(pmc_mdep, M_PMC); 233 pmc_mdep = NULL; 234 } 235 236 return (pmc_mdep); 237 } 238 239 void 240 pmc_intel_finalize(struct pmc_mdep *md) 241 { 242 pmc_tsc_finalize(md); 243 244 switch (md->pmd_cputype) { 245 #if defined(__i386__) || defined(__amd64__) 246 case PMC_CPU_INTEL_ATOM: 247 case PMC_CPU_INTEL_CORE: 248 case PMC_CPU_INTEL_CORE2: 249 case PMC_CPU_INTEL_CORE2EXTREME: 250 pmc_core_finalize(md); 251 break; 252 253 case PMC_CPU_INTEL_PIV: 254 pmc_p4_finalize(md); 255 break; 256 #endif 257 #if defined(__i386__) 258 case PMC_CPU_INTEL_P6: 259 case PMC_CPU_INTEL_CL: 260 case PMC_CPU_INTEL_PII: 261 case PMC_CPU_INTEL_PIII: 262 case PMC_CPU_INTEL_PM: 263 pmc_p6_finalize(md); 264 break; 265 case PMC_CPU_INTEL_P5: 266 pmc_p5_finalize(md); 267 break; 268 #endif 269 default: 270 KASSERT(0, ("[intel,%d] unknown CPU type", __LINE__)); 271 } 272 } 273