1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #define PSMI_1_5 29 #include <sys/smp_impldefs.h> 30 #include <sys/psm.h> 31 #include <sys/psm_modctl.h> 32 #include <sys/pit.h> 33 #include <sys/cmn_err.h> 34 #include <sys/strlog.h> 35 #include <sys/clock.h> 36 #include <sys/debug.h> 37 #include <sys/rtc.h> 38 #include <sys/x86_archext.h> 39 #include <sys/cpupart.h> 40 #include <sys/cpuvar.h> 41 #include <sys/cpu.h> 42 #include <sys/pghw.h> 43 #include <sys/disp.h> 44 #include <sys/archsystm.h> 45 #include <sys/machsystm.h> 46 #include <sys/sysmacros.h> 47 #include <sys/param.h> 48 #include <sys/promif.h> 49 #include <sys/mach_intr.h> 50 #include <vm/hat_i86.h> 51 52 #define OFFSETOF(s, m) (size_t)(&(((s *)0)->m)) 53 54 /* 55 * Local function prototypes 56 */ 57 static int mp_disable_intr(processorid_t cpun); 58 static void mp_enable_intr(processorid_t cpun); 59 static void mach_init(); 60 static void mach_picinit(); 61 static uint64_t mach_calchz(uint32_t pit_counter, uint64_t *processor_clks); 62 static int machhztomhz(uint64_t cpu_freq_hz); 63 static uint64_t mach_getcpufreq(void); 64 static void mach_fixcpufreq(void); 65 static int mach_clkinit(int, int *); 66 static void mach_smpinit(void); 67 static void mach_set_softintr(int ipl, struct av_softinfo *); 68 static int mach_softlvl_to_vect(int ipl); 69 static void mach_get_platform(int owner); 70 static void mach_construct_info(); 71 static int mach_translate_irq(dev_info_t *dip, int irqno); 72 static int mach_intr_ops(dev_info_t *, ddi_intr_handle_impl_t *, 73 psm_intr_op_t, int *); 74 static void mach_notify_error(int level, char *errmsg); 75 static hrtime_t dummy_hrtime(void); 76 static void dummy_scalehrtime(hrtime_t *); 77 static void cpu_idle(void); 78 static void cpu_wakeup(cpu_t *, int); 79 static void cpu_idle_mwait(void); 80 static void cpu_wakeup_mwait(cpu_t *, int); 81 /* 82 * External reference functions 83 */ 84 extern void return_instr(); 85 extern uint64_t freq_tsc(uint32_t *); 86 #if defined(__i386) 87 extern uint64_t freq_notsc(uint32_t *); 88 #endif 89 extern void pc_gethrestime(timestruc_t *); 90 extern int cpuid_get_coreid(cpu_t *); 91 extern int cpuid_get_chipid(cpu_t *); 92 93 /* 94 * PSM functions initialization 95 */ 96 void (*psm_shutdownf)(int, int) = (void (*)(int, int))return_instr; 97 void (*psm_preshutdownf)(int, int) = (void (*)(int, int))return_instr; 98 void (*psm_notifyf)(int) = (void (*)(int))return_instr; 99 void (*psm_set_idle_cpuf)(int) = (void (*)(int))return_instr; 100 void (*psm_unset_idle_cpuf)(int) = (void (*)(int))return_instr; 101 void (*psminitf)() = mach_init; 102 void (*picinitf)() = return_instr; 103 int (*clkinitf)(int, int *) = (int (*)(int, int *))return_instr; 104 int (*ap_mlsetup)() = (int (*)(void))return_instr; 105 void (*send_dirintf)() = return_instr; 106 void (*setspl)(int) = (void (*)(int))return_instr; 107 int (*addspl)(int, int, int, int) = (int (*)(int, int, int, int))return_instr; 108 int (*delspl)(int, int, int, int) = (int (*)(int, int, int, int))return_instr; 109 void (*setsoftint)(int, struct av_softinfo *)= 110 (void (*)(int, struct av_softinfo *))return_instr; 111 int (*slvltovect)(int) = (int (*)(int))return_instr; 112 int (*setlvl)(int, int *) = (int (*)(int, int *))return_instr; 113 void (*setlvlx)(int, int) = (void (*)(int, int))return_instr; 114 int (*psm_disable_intr)(int) = mp_disable_intr; 115 void (*psm_enable_intr)(int) = mp_enable_intr; 116 hrtime_t (*gethrtimef)(void) = dummy_hrtime; 117 hrtime_t (*gethrtimeunscaledf)(void) = dummy_hrtime; 118 void (*scalehrtimef)(hrtime_t *) = dummy_scalehrtime; 119 int (*psm_translate_irq)(dev_info_t *, int) = mach_translate_irq; 120 void (*gethrestimef)(timestruc_t *) = pc_gethrestime; 121 void (*psm_notify_error)(int, char *) = (void (*)(int, char *))NULL; 122 int (*psm_get_clockirq)(int) = NULL; 123 int (*psm_get_ipivect)(int, int) = NULL; 124 125 int (*psm_clkinit)(int) = NULL; 126 void (*psm_timer_reprogram)(hrtime_t) = NULL; 127 void (*psm_timer_enable)(void) = NULL; 128 void (*psm_timer_disable)(void) = NULL; 129 void (*psm_post_cyclic_setup)(void *arg) = NULL; 130 int (*psm_intr_ops)(dev_info_t *, ddi_intr_handle_impl_t *, psm_intr_op_t, 131 int *) = mach_intr_ops; 132 133 void (*notify_error)(int, char *) = (void (*)(int, char *))return_instr; 134 void (*hrtime_tick)(void) = return_instr; 135 136 int tsc_gethrtime_enable = 1; 137 int tsc_gethrtime_initted = 0; 138 139 /* 140 * Local Static Data 141 */ 142 static struct psm_ops mach_ops; 143 static struct psm_ops *mach_set[4] = {&mach_ops, NULL, NULL, NULL}; 144 static ushort_t mach_ver[4] = {0, 0, 0, 0}; 145 146 /* 147 * If non-zero, idle cpus will become "halted" when there's 148 * no work to do. 149 */ 150 int idle_cpu_use_hlt = 1; 151 152 /* 153 * If non-zero, idle cpus will use mwait if available to halt instead of hlt. 154 */ 155 int idle_cpu_prefer_mwait = 1; 156 157 158 /*ARGSUSED*/ 159 int 160 pg_plat_hw_shared(cpu_t *cp, pghw_type_t hw) 161 { 162 switch (hw) { 163 case PGHW_IPIPE: 164 if (x86_feature & (X86_HTT)) { 165 /* 166 * Hyper-threading is SMT 167 */ 168 return (1); 169 } else { 170 return (0); 171 } 172 case PGHW_CHIP: 173 if (x86_feature & (X86_CMP|X86_HTT)) 174 return (1); 175 else 176 return (0); 177 default: 178 return (0); 179 } 180 } 181 182 /* 183 * Compare two CPUs and see if they have a pghw_type_t sharing relationship 184 * If pghw_type_t is an unsupported hardware type, then return -1 185 */ 186 int 187 pg_plat_cpus_share(cpu_t *cpu_a, cpu_t *cpu_b, pghw_type_t hw) 188 { 189 id_t pgp_a, pgp_b; 190 191 pgp_a = pg_plat_hw_instance_id(cpu_a, hw); 192 pgp_b = pg_plat_hw_instance_id(cpu_b, hw); 193 194 if (pgp_a == -1 || pgp_b == -1) 195 return (-1); 196 197 return (pgp_a == pgp_b); 198 } 199 200 /* 201 * Return a physical instance identifier for known hardware sharing 202 * relationships 203 */ 204 id_t 205 pg_plat_hw_instance_id(cpu_t *cpu, pghw_type_t hw) 206 { 207 switch (hw) { 208 case PGHW_IPIPE: 209 return (cpuid_get_coreid(cpu)); 210 case PGHW_CHIP: 211 return (cpuid_get_chipid(cpu)); 212 default: 213 return (-1); 214 } 215 } 216 217 int 218 pg_plat_hw_level(pghw_type_t hw) 219 { 220 int i; 221 static pghw_type_t hw_hier[] = { 222 PGHW_IPIPE, 223 PGHW_CHIP, 224 PGHW_NUM_COMPONENTS 225 }; 226 227 for (i = 0; hw_hier[i] != PGHW_NUM_COMPONENTS; i++) { 228 if (hw_hier[i] == hw) 229 return (i); 230 } 231 return (-1); 232 } 233 234 id_t 235 pg_plat_get_core_id(cpu_t *cpu) 236 { 237 return ((id_t)cpuid_get_coreid(cpu)); 238 } 239 240 void 241 cmp_set_nosteal_interval(void) 242 { 243 /* Set the nosteal interval (used by disp_getbest()) to 100us */ 244 nosteal_nsec = 100000UL; 245 } 246 247 /* 248 * Routine to ensure initial callers to hrtime gets 0 as return 249 */ 250 static hrtime_t 251 dummy_hrtime(void) 252 { 253 return (0); 254 } 255 256 /* ARGSUSED */ 257 static void 258 dummy_scalehrtime(hrtime_t *ticks) 259 {} 260 261 /* 262 * Idle the present CPU until awoken via an interrupt 263 */ 264 static void 265 cpu_idle(void) 266 { 267 cpu_t *cpup = CPU; 268 processorid_t cpun = cpup->cpu_id; 269 cpupart_t *cp = cpup->cpu_part; 270 int hset_update = 1; 271 272 /* 273 * If this CPU is online, and there's multiple CPUs 274 * in the system, then we should notate our halting 275 * by adding ourselves to the partition's halted CPU 276 * bitmap. This allows other CPUs to find/awaken us when 277 * work becomes available. 278 */ 279 if (cpup->cpu_flags & CPU_OFFLINE || ncpus == 1) 280 hset_update = 0; 281 282 /* 283 * Add ourselves to the partition's halted CPUs bitmask 284 * and set our HALTED flag, if necessary. 285 * 286 * When a thread becomes runnable, it is placed on the queue 287 * and then the halted cpuset is checked to determine who 288 * (if anyone) should be awoken. We therefore need to first 289 * add ourselves to the halted cpuset, and and then check if there 290 * is any work available. 291 * 292 * Note that memory barriers after updating the HALTED flag 293 * are not necessary since an atomic operation (updating the bitmap) 294 * immediately follows. On x86 the atomic operation acts as a 295 * memory barrier for the update of cpu_disp_flags. 296 */ 297 if (hset_update) { 298 cpup->cpu_disp_flags |= CPU_DISP_HALTED; 299 CPUSET_ATOMIC_ADD(cp->cp_mach->mc_haltset, cpun); 300 } 301 302 /* 303 * Check to make sure there's really nothing to do. 304 * Work destined for this CPU may become available after 305 * this check. We'll be notified through the clearing of our 306 * bit in the halted CPU bitmask, and a poke. 307 */ 308 if (disp_anywork()) { 309 if (hset_update) { 310 cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 311 CPUSET_ATOMIC_DEL(cp->cp_mach->mc_haltset, cpun); 312 } 313 return; 314 } 315 316 /* 317 * We're on our way to being halted. 318 * 319 * Disable interrupts now, so that we'll awaken immediately 320 * after halting if someone tries to poke us between now and 321 * the time we actually halt. 322 * 323 * We check for the presence of our bit after disabling interrupts. 324 * If it's cleared, we'll return. If the bit is cleared after 325 * we check then the poke will pop us out of the halted state. 326 * 327 * This means that the ordering of the poke and the clearing 328 * of the bit by cpu_wakeup is important. 329 * cpu_wakeup() must clear, then poke. 330 * cpu_idle() must disable interrupts, then check for the bit. 331 */ 332 cli(); 333 334 if (hset_update && !CPU_IN_SET(cp->cp_mach->mc_haltset, cpun)) { 335 cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 336 sti(); 337 return; 338 } 339 340 /* 341 * The check for anything locally runnable is here for performance 342 * and isn't needed for correctness. disp_nrunnable ought to be 343 * in our cache still, so it's inexpensive to check, and if there 344 * is anything runnable we won't have to wait for the poke. 345 */ 346 if (cpup->cpu_disp->disp_nrunnable != 0) { 347 if (hset_update) { 348 cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 349 CPUSET_ATOMIC_DEL(cp->cp_mach->mc_haltset, cpun); 350 } 351 sti(); 352 return; 353 } 354 355 mach_cpu_idle(); 356 357 /* 358 * We're no longer halted 359 */ 360 if (hset_update) { 361 cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 362 CPUSET_ATOMIC_DEL(cp->cp_mach->mc_haltset, cpun); 363 } 364 } 365 366 367 /* 368 * If "cpu" is halted, then wake it up clearing its halted bit in advance. 369 * Otherwise, see if other CPUs in the cpu partition are halted and need to 370 * be woken up so that they can steal the thread we placed on this CPU. 371 * This function is only used on MP systems. 372 */ 373 static void 374 cpu_wakeup(cpu_t *cpu, int bound) 375 { 376 uint_t cpu_found; 377 int result; 378 cpupart_t *cp; 379 380 cp = cpu->cpu_part; 381 if (CPU_IN_SET(cp->cp_mach->mc_haltset, cpu->cpu_id)) { 382 /* 383 * Clear the halted bit for that CPU since it will be 384 * poked in a moment. 385 */ 386 CPUSET_ATOMIC_DEL(cp->cp_mach->mc_haltset, cpu->cpu_id); 387 /* 388 * We may find the current CPU present in the halted cpuset 389 * if we're in the context of an interrupt that occurred 390 * before we had a chance to clear our bit in cpu_idle(). 391 * Poking ourself is obviously unnecessary, since if 392 * we're here, we're not halted. 393 */ 394 if (cpu != CPU) 395 poke_cpu(cpu->cpu_id); 396 return; 397 } else { 398 /* 399 * This cpu isn't halted, but it's idle or undergoing a 400 * context switch. No need to awaken anyone else. 401 */ 402 if (cpu->cpu_thread == cpu->cpu_idle_thread || 403 cpu->cpu_disp_flags & CPU_DISP_DONTSTEAL) 404 return; 405 } 406 407 /* 408 * No need to wake up other CPUs if the thread we just enqueued 409 * is bound. 410 */ 411 if (bound) 412 return; 413 414 415 /* 416 * See if there's any other halted CPUs. If there are, then 417 * select one, and awaken it. 418 * It's possible that after we find a CPU, somebody else 419 * will awaken it before we get the chance. 420 * In that case, look again. 421 */ 422 do { 423 CPUSET_FIND(cp->cp_mach->mc_haltset, cpu_found); 424 if (cpu_found == CPUSET_NOTINSET) 425 return; 426 427 ASSERT(cpu_found >= 0 && cpu_found < NCPU); 428 CPUSET_ATOMIC_XDEL(cp->cp_mach->mc_haltset, cpu_found, result); 429 } while (result < 0); 430 431 if (cpu_found != CPU->cpu_id) 432 poke_cpu(cpu_found); 433 } 434 435 /* 436 * Idle the present CPU until awoken via touching its monitored line 437 */ 438 static void 439 cpu_idle_mwait(void) 440 { 441 volatile uint32_t *mcpu_mwait = CPU->cpu_m.mcpu_mwait; 442 cpu_t *cpup = CPU; 443 processorid_t cpun = cpup->cpu_id; 444 cpupart_t *cp = cpup->cpu_part; 445 int hset_update = 1; 446 447 /* 448 * Set our mcpu_mwait here, so we can tell if anyone trys to 449 * wake us between now and when we call mwait. No other cpu will 450 * attempt to set our mcpu_mwait until we add ourself to the haltset. 451 */ 452 *mcpu_mwait = MWAIT_HALTED; 453 454 /* 455 * If this CPU is online, and there's multiple CPUs 456 * in the system, then we should notate our halting 457 * by adding ourselves to the partition's halted CPU 458 * bitmap. This allows other CPUs to find/awaken us when 459 * work becomes available. 460 */ 461 if (cpup->cpu_flags & CPU_OFFLINE || ncpus == 1) 462 hset_update = 0; 463 464 /* 465 * Add ourselves to the partition's halted CPUs bitmask 466 * and set our HALTED flag, if necessary. 467 * 468 * When a thread becomes runnable, it is placed on the queue 469 * and then the halted cpuset is checked to determine who 470 * (if anyone) should be awoken. We therefore need to first 471 * add ourselves to the halted cpuset, and and then check if there 472 * is any work available. 473 * 474 * Note that memory barriers after updating the HALTED flag 475 * are not necessary since an atomic operation (updating the bitmap) 476 * immediately follows. On x86 the atomic operation acts as a 477 * memory barrier for the update of cpu_disp_flags. 478 */ 479 if (hset_update) { 480 cpup->cpu_disp_flags |= CPU_DISP_HALTED; 481 CPUSET_ATOMIC_ADD(cp->cp_mach->mc_haltset, cpun); 482 } 483 484 /* 485 * Check to make sure there's really nothing to do. 486 * Work destined for this CPU may become available after 487 * this check. We'll be notified through the clearing of our 488 * bit in the halted CPU bitmask, and a write to our mcpu_mwait. 489 * 490 * disp_anywork() checks disp_nrunnable, so we do not have to later. 491 */ 492 if (disp_anywork()) { 493 if (hset_update) { 494 cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 495 CPUSET_ATOMIC_DEL(cp->cp_mach->mc_haltset, cpun); 496 } 497 return; 498 } 499 500 /* 501 * We're on our way to being halted. 502 * To avoid a lost wakeup, arm the monitor before checking if another 503 * cpu wrote to mcpu_mwait to wake us up. 504 */ 505 i86_monitor(mcpu_mwait, 0, 0); 506 if (*mcpu_mwait == MWAIT_HALTED) { 507 tlb_going_idle(); 508 i86_mwait(0, 0); 509 tlb_service(); 510 } 511 512 /* 513 * We're no longer halted 514 */ 515 if (hset_update) { 516 cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 517 CPUSET_ATOMIC_DEL(cp->cp_mach->mc_haltset, cpun); 518 } 519 } 520 521 /* 522 * If "cpu" is halted in mwait, then wake it up clearing its halted bit in 523 * advance. Otherwise, see if other CPUs in the cpu partition are halted and 524 * need to be woken up so that they can steal the thread we placed on this CPU. 525 * This function is only used on MP systems. 526 */ 527 static void 528 cpu_wakeup_mwait(cpu_t *cp, int bound) 529 { 530 cpupart_t *cpu_part; 531 uint_t cpu_found; 532 int result; 533 534 cpu_part = cp->cpu_part; 535 536 /* 537 * Clear the halted bit for that CPU since it will be woken up 538 * in a moment. 539 */ 540 if (CPU_IN_SET(cpu_part->cp_mach->mc_haltset, cp->cpu_id)) { 541 /* 542 * Clear the halted bit for that CPU since it will be 543 * poked in a moment. 544 */ 545 CPUSET_ATOMIC_DEL(cpu_part->cp_mach->mc_haltset, cp->cpu_id); 546 /* 547 * We may find the current CPU present in the halted cpuset 548 * if we're in the context of an interrupt that occurred 549 * before we had a chance to clear our bit in cpu_idle(). 550 * Waking ourself is obviously unnecessary, since if 551 * we're here, we're not halted. 552 * 553 * monitor/mwait wakeup via writing to our cache line is 554 * harmless and less expensive than always checking if we 555 * are waking ourself which is an uncommon case. 556 */ 557 MWAIT_WAKEUP(cp); /* write to monitored line */ 558 return; 559 } else { 560 /* 561 * This cpu isn't halted, but it's idle or undergoing a 562 * context switch. No need to awaken anyone else. 563 */ 564 if (cp->cpu_thread == cp->cpu_idle_thread || 565 cp->cpu_disp_flags & CPU_DISP_DONTSTEAL) 566 return; 567 } 568 569 /* 570 * No need to wake up other CPUs if the thread we just enqueued 571 * is bound. 572 */ 573 if (bound) 574 return; 575 576 577 /* 578 * See if there's any other halted CPUs. If there are, then 579 * select one, and awaken it. 580 * It's possible that after we find a CPU, somebody else 581 * will awaken it before we get the chance. 582 * In that case, look again. 583 */ 584 do { 585 CPUSET_FIND(cpu_part->cp_mach->mc_haltset, cpu_found); 586 if (cpu_found == CPUSET_NOTINSET) 587 return; 588 589 ASSERT(cpu_found >= 0 && cpu_found < NCPU); 590 CPUSET_ATOMIC_XDEL(cpu_part->cp_mach->mc_haltset, cpu_found, 591 result); 592 } while (result < 0); 593 594 /* 595 * Do not check if cpu_found is ourself as monitor/mwait wakeup is 596 * cheap. 597 */ 598 MWAIT_WAKEUP(cpu[cpu_found]); /* write to monitored line */ 599 } 600 601 void (*cpu_pause_handler)(volatile char *) = NULL; 602 603 static int 604 mp_disable_intr(int cpun) 605 { 606 /* 607 * switch to the offline cpu 608 */ 609 affinity_set(cpun); 610 /* 611 * raise ipl to just below cross call 612 */ 613 splx(XC_MED_PIL-1); 614 /* 615 * set base spl to prevent the next swtch to idle from 616 * lowering back to ipl 0 617 */ 618 CPU->cpu_intr_actv |= (1 << (XC_MED_PIL-1)); 619 set_base_spl(); 620 affinity_clear(); 621 return (DDI_SUCCESS); 622 } 623 624 static void 625 mp_enable_intr(int cpun) 626 { 627 /* 628 * switch to the online cpu 629 */ 630 affinity_set(cpun); 631 /* 632 * clear the interrupt active mask 633 */ 634 CPU->cpu_intr_actv &= ~(1 << (XC_MED_PIL-1)); 635 set_base_spl(); 636 (void) spl0(); 637 affinity_clear(); 638 } 639 640 static void 641 mach_get_platform(int owner) 642 { 643 void **srv_opsp; 644 void **clt_opsp; 645 int i; 646 int total_ops; 647 648 /* fix up psm ops */ 649 srv_opsp = (void **)mach_set[0]; 650 clt_opsp = (void **)mach_set[owner]; 651 if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01) 652 total_ops = sizeof (struct psm_ops_ver01) / 653 sizeof (void (*)(void)); 654 else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_1) 655 /* no psm_notify_func */ 656 total_ops = OFFSETOF(struct psm_ops, psm_notify_func) / 657 sizeof (void (*)(void)); 658 else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_2) 659 /* no psm_timer funcs */ 660 total_ops = OFFSETOF(struct psm_ops, psm_timer_reprogram) / 661 sizeof (void (*)(void)); 662 else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_3) 663 /* no psm_preshutdown function */ 664 total_ops = OFFSETOF(struct psm_ops, psm_preshutdown) / 665 sizeof (void (*)(void)); 666 else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_4) 667 /* no psm_preshutdown function */ 668 total_ops = OFFSETOF(struct psm_ops, psm_intr_ops) / 669 sizeof (void (*)(void)); 670 else 671 total_ops = sizeof (struct psm_ops) / sizeof (void (*)(void)); 672 673 /* 674 * Save the version of the PSM module, in case we need to 675 * bahave differently based on version. 676 */ 677 mach_ver[0] = mach_ver[owner]; 678 679 for (i = 0; i < total_ops; i++) 680 if (clt_opsp[i] != NULL) 681 srv_opsp[i] = clt_opsp[i]; 682 } 683 684 static void 685 mach_construct_info() 686 { 687 struct psm_sw *swp; 688 int mach_cnt[PSM_OWN_OVERRIDE+1] = {0}; 689 int conflict_owner = 0; 690 691 if (psmsw->psw_forw == psmsw) 692 panic("No valid PSM modules found"); 693 mutex_enter(&psmsw_lock); 694 for (swp = psmsw->psw_forw; swp != psmsw; swp = swp->psw_forw) { 695 if (!(swp->psw_flag & PSM_MOD_IDENTIFY)) 696 continue; 697 mach_set[swp->psw_infop->p_owner] = swp->psw_infop->p_ops; 698 mach_ver[swp->psw_infop->p_owner] = swp->psw_infop->p_version; 699 mach_cnt[swp->psw_infop->p_owner]++; 700 } 701 mutex_exit(&psmsw_lock); 702 703 mach_get_platform(PSM_OWN_SYS_DEFAULT); 704 705 /* check to see are there any conflicts */ 706 if (mach_cnt[PSM_OWN_EXCLUSIVE] > 1) 707 conflict_owner = PSM_OWN_EXCLUSIVE; 708 if (mach_cnt[PSM_OWN_OVERRIDE] > 1) 709 conflict_owner = PSM_OWN_OVERRIDE; 710 if (conflict_owner) { 711 /* remove all psm modules except uppc */ 712 cmn_err(CE_WARN, 713 "Conflicts detected on the following PSM modules:"); 714 mutex_enter(&psmsw_lock); 715 for (swp = psmsw->psw_forw; swp != psmsw; swp = swp->psw_forw) { 716 if (swp->psw_infop->p_owner == conflict_owner) 717 cmn_err(CE_WARN, "%s ", 718 swp->psw_infop->p_mach_idstring); 719 } 720 mutex_exit(&psmsw_lock); 721 cmn_err(CE_WARN, 722 "Setting the system back to SINGLE processor mode!"); 723 cmn_err(CE_WARN, 724 "Please edit /etc/mach to remove the invalid PSM module."); 725 return; 726 } 727 728 if (mach_set[PSM_OWN_EXCLUSIVE]) 729 mach_get_platform(PSM_OWN_EXCLUSIVE); 730 731 if (mach_set[PSM_OWN_OVERRIDE]) 732 mach_get_platform(PSM_OWN_OVERRIDE); 733 } 734 735 static void 736 mach_init() 737 { 738 struct psm_ops *pops; 739 740 mach_construct_info(); 741 742 pops = mach_set[0]; 743 744 /* register the interrupt and clock initialization rotuines */ 745 picinitf = mach_picinit; 746 clkinitf = mach_clkinit; 747 psm_get_clockirq = pops->psm_get_clockirq; 748 749 /* register the interrupt setup code */ 750 slvltovect = mach_softlvl_to_vect; 751 addspl = pops->psm_addspl; 752 delspl = pops->psm_delspl; 753 754 if (pops->psm_translate_irq) 755 psm_translate_irq = pops->psm_translate_irq; 756 if (pops->psm_intr_ops) 757 psm_intr_ops = pops->psm_intr_ops; 758 759 #if defined(PSMI_1_2) || defined(PSMI_1_3) || defined(PSMI_1_4) 760 /* 761 * Time-of-day functionality now handled in TOD modules. 762 * (Warn about PSM modules that think that we're going to use 763 * their ops vectors.) 764 */ 765 if (pops->psm_tod_get) 766 cmn_err(CE_WARN, "obsolete psm_tod_get op %p", 767 (void *)pops->psm_tod_get); 768 769 if (pops->psm_tod_set) 770 cmn_err(CE_WARN, "obsolete psm_tod_set op %p", 771 (void *)pops->psm_tod_set); 772 #endif 773 774 if (pops->psm_notify_error) { 775 psm_notify_error = mach_notify_error; 776 notify_error = pops->psm_notify_error; 777 } 778 779 (*pops->psm_softinit)(); 780 781 /* 782 * Initialize the dispatcher's function hooks 783 * to enable CPU halting when idle. 784 * Do not use monitor/mwait if idle_cpu_use_hlt is not set(spin idle). 785 * Allocate monitor/mwait buffer for cpu0. 786 */ 787 if (idle_cpu_use_hlt) { 788 if ((x86_feature & X86_MWAIT) && idle_cpu_prefer_mwait) { 789 CPU->cpu_m.mcpu_mwait = mach_alloc_mwait(CPU); 790 idle_cpu = cpu_idle_mwait; 791 } else { 792 idle_cpu = cpu_idle; 793 } 794 } 795 796 mach_smpinit(); 797 } 798 799 /* 800 * Return a pointer to memory suitable for monitor/mwait use. Memory must be 801 * aligned as specified by cpuid (a cache line size). 802 */ 803 uint32_t * 804 mach_alloc_mwait(cpu_t *cp) 805 { 806 size_t mwait_size = cpuid_get_mwait_size(cp); 807 uint32_t *ret; 808 809 if (mwait_size < sizeof (uint32_t) || !ISP2(mwait_size)) 810 panic("Can't handle mwait size %ld", (long)mwait_size); 811 812 /* 813 * kmem_alloc() returns cache line size aligned data for mwait_size 814 * allocations. mwait_size is currently cache line sized. Neither 815 * of these implementation details are guarantied to be true in the 816 * future. 817 * 818 * First try allocating mwait_size as kmem_alloc() currently returns 819 * correctly aligned memory. If kmem_alloc() does not return 820 * mwait_size aligned memory, then use mwait_size ROUNDUP. 821 */ 822 ret = kmem_zalloc(mwait_size, KM_SLEEP); 823 if (ret == (uint32_t *)P2ROUNDUP((uintptr_t)ret, mwait_size)) { 824 *ret = MWAIT_RUNNING; 825 return (ret); 826 } else { 827 kmem_free(ret, mwait_size); 828 ret = kmem_zalloc(mwait_size * 2, KM_SLEEP); 829 ret = (uint32_t *)P2ROUNDUP((uintptr_t)ret, mwait_size); 830 *ret = MWAIT_RUNNING; 831 return (ret); 832 } 833 } 834 835 static void 836 mach_smpinit(void) 837 { 838 struct psm_ops *pops; 839 processorid_t cpu_id; 840 int cnt; 841 cpuset_t cpumask; 842 843 pops = mach_set[0]; 844 845 cpu_id = -1; 846 cpu_id = (*pops->psm_get_next_processorid)(cpu_id); 847 for (cnt = 0, CPUSET_ZERO(cpumask); cpu_id != -1; cnt++) { 848 CPUSET_ADD(cpumask, cpu_id); 849 cpu_id = (*pops->psm_get_next_processorid)(cpu_id); 850 } 851 852 mp_cpus = cpumask; 853 854 /* MP related routines */ 855 ap_mlsetup = pops->psm_post_cpu_start; 856 send_dirintf = pops->psm_send_ipi; 857 858 /* optional MP related routines */ 859 if (pops->psm_shutdown) 860 psm_shutdownf = pops->psm_shutdown; 861 if (pops->psm_preshutdown) 862 psm_preshutdownf = pops->psm_preshutdown; 863 if (pops->psm_notify_func) 864 psm_notifyf = pops->psm_notify_func; 865 if (pops->psm_set_idlecpu) 866 psm_set_idle_cpuf = pops->psm_set_idlecpu; 867 if (pops->psm_unset_idlecpu) 868 psm_unset_idle_cpuf = pops->psm_unset_idlecpu; 869 870 psm_clkinit = pops->psm_clkinit; 871 872 if (pops->psm_timer_reprogram) 873 psm_timer_reprogram = pops->psm_timer_reprogram; 874 875 if (pops->psm_timer_enable) 876 psm_timer_enable = pops->psm_timer_enable; 877 878 if (pops->psm_timer_disable) 879 psm_timer_disable = pops->psm_timer_disable; 880 881 if (pops->psm_post_cyclic_setup) 882 psm_post_cyclic_setup = pops->psm_post_cyclic_setup; 883 884 /* check for multiple cpu's */ 885 if (cnt < 2) 886 return; 887 888 /* check for MP platforms */ 889 if (pops->psm_cpu_start == NULL) 890 return; 891 892 /* 893 * Set the dispatcher hook to enable cpu "wake up" 894 * when a thread becomes runnable. 895 */ 896 if (idle_cpu_use_hlt) 897 if ((x86_feature & X86_MWAIT) && idle_cpu_prefer_mwait) 898 disp_enq_thread = cpu_wakeup_mwait; 899 else 900 disp_enq_thread = cpu_wakeup; 901 902 if (pops->psm_disable_intr) 903 psm_disable_intr = pops->psm_disable_intr; 904 if (pops->psm_enable_intr) 905 psm_enable_intr = pops->psm_enable_intr; 906 907 psm_get_ipivect = pops->psm_get_ipivect; 908 909 (void) add_avintr((void *)NULL, XC_HI_PIL, xc_serv, "xc_hi_intr", 910 (*pops->psm_get_ipivect)(XC_HI_PIL, PSM_INTR_IPI_HI), 911 (caddr_t)X_CALL_HIPRI, NULL, NULL, NULL); 912 (void) add_avintr((void *)NULL, XC_MED_PIL, xc_serv, "xc_med_intr", 913 (*pops->psm_get_ipivect)(XC_MED_PIL, PSM_INTR_IPI_LO), 914 (caddr_t)X_CALL_MEDPRI, NULL, NULL, NULL); 915 916 (void) (*pops->psm_get_ipivect)(XC_CPUPOKE_PIL, PSM_INTR_POKE); 917 } 918 919 static void 920 mach_picinit() 921 { 922 struct psm_ops *pops; 923 924 pops = mach_set[0]; 925 926 /* register the interrupt handlers */ 927 setlvl = pops->psm_intr_enter; 928 setlvlx = pops->psm_intr_exit; 929 930 /* initialize the interrupt hardware */ 931 (*pops->psm_picinit)(); 932 933 /* set interrupt mask for current ipl */ 934 setspl = pops->psm_setspl; 935 cli(); 936 setspl(CPU->cpu_pri); 937 } 938 939 uint_t cpu_freq; /* MHz */ 940 uint64_t cpu_freq_hz; /* measured (in hertz) */ 941 942 #define MEGA_HZ 1000000 943 944 static uint64_t 945 mach_calchz(uint32_t pit_counter, uint64_t *processor_clks) 946 { 947 uint64_t cpu_hz; 948 949 if ((pit_counter == 0) || (*processor_clks == 0) || 950 (*processor_clks > (((uint64_t)-1) / PIT_HZ))) 951 return (0); 952 953 cpu_hz = ((uint64_t)PIT_HZ * *processor_clks) / pit_counter; 954 955 return (cpu_hz); 956 } 957 958 static uint64_t 959 mach_getcpufreq(void) 960 { 961 uint32_t pit_counter; 962 uint64_t processor_clks; 963 964 if (x86_feature & X86_TSC) { 965 /* 966 * We have a TSC. freq_tsc() knows how to measure the number 967 * of clock cycles sampled against the PIT. 968 */ 969 ulong_t flags = clear_int_flag(); 970 processor_clks = freq_tsc(&pit_counter); 971 restore_int_flag(flags); 972 return (mach_calchz(pit_counter, &processor_clks)); 973 } else if (x86_vendor == X86_VENDOR_Cyrix || x86_type == X86_TYPE_P5) { 974 #if defined(__amd64) 975 panic("mach_getcpufreq: no TSC!"); 976 #elif defined(__i386) 977 /* 978 * We are a Cyrix based on a 6x86 core or an Intel Pentium 979 * for which freq_notsc() knows how to measure the number of 980 * elapsed clock cycles sampled against the PIT 981 */ 982 ulong_t flags = clear_int_flag(); 983 processor_clks = freq_notsc(&pit_counter); 984 restore_int_flag(flags); 985 return (mach_calchz(pit_counter, &processor_clks)); 986 #endif /* __i386 */ 987 } 988 989 /* We do not know how to calculate cpu frequency for this cpu. */ 990 return (0); 991 } 992 993 /* 994 * If the clock speed of a cpu is found to be reported incorrectly, do not add 995 * to this array, instead improve the accuracy of the algorithm that determines 996 * the clock speed of the processor or extend the implementation to support the 997 * vendor as appropriate. This is here only to support adjusting the speed on 998 * older slower processors that mach_fixcpufreq() would not be able to account 999 * for otherwise. 1000 */ 1001 static int x86_cpu_freq[] = { 60, 75, 80, 90, 120, 160, 166, 175, 180, 233 }; 1002 1003 /* 1004 * On fast processors the clock frequency that is measured may be off by 1005 * a few MHz from the value printed on the part. This is a combination of 1006 * the factors that for such fast parts being off by this much is within 1007 * the tolerances for manufacture and because of the difficulties in the 1008 * measurement that can lead to small error. This function uses some 1009 * heuristics in order to tweak the value that was measured to match what 1010 * is most likely printed on the part. 1011 * 1012 * Some examples: 1013 * AMD Athlon 1000 mhz measured as 998 mhz 1014 * Intel Pentium III Xeon 733 mhz measured as 731 mhz 1015 * Intel Pentium IV 1500 mhz measured as 1495mhz 1016 * 1017 * If in the future this function is no longer sufficient to correct 1018 * for the error in the measurement, then the algorithm used to perform 1019 * the measurement will have to be improved in order to increase accuracy 1020 * rather than adding horrible and questionable kludges here. 1021 * 1022 * This is called after the cyclics subsystem because of the potential 1023 * that the heuristics within may give a worse estimate of the clock 1024 * frequency than the value that was measured. 1025 */ 1026 static void 1027 mach_fixcpufreq(void) 1028 { 1029 uint32_t freq, mul, near66, delta66, near50, delta50, fixed, delta, i; 1030 1031 freq = (uint32_t)cpu_freq; 1032 1033 /* 1034 * Find the nearest integer multiple of 200/3 (about 66) MHz to the 1035 * measured speed taking into account that the 667 MHz parts were 1036 * the first to round-up. 1037 */ 1038 mul = (uint32_t)((3 * (uint64_t)freq + 100) / 200); 1039 near66 = (uint32_t)((200 * (uint64_t)mul + ((mul >= 10) ? 1 : 0)) / 3); 1040 delta66 = (near66 > freq) ? (near66 - freq) : (freq - near66); 1041 1042 /* Find the nearest integer multiple of 50 MHz to the measured speed */ 1043 mul = (freq + 25) / 50; 1044 near50 = mul * 50; 1045 delta50 = (near50 > freq) ? (near50 - freq) : (freq - near50); 1046 1047 /* Find the closer of the two */ 1048 if (delta66 < delta50) { 1049 fixed = near66; 1050 delta = delta66; 1051 } else { 1052 fixed = near50; 1053 delta = delta50; 1054 } 1055 1056 if (fixed > INT_MAX) 1057 return; 1058 1059 /* 1060 * Some older parts have a core clock frequency that is not an 1061 * integral multiple of 50 or 66 MHz. Check if one of the old 1062 * clock frequencies is closer to the measured value than any 1063 * of the integral multiples of 50 an 66, and if so set fixed 1064 * and delta appropriately to represent the closest value. 1065 */ 1066 i = sizeof (x86_cpu_freq) / sizeof (int); 1067 while (i > 0) { 1068 i--; 1069 1070 if (x86_cpu_freq[i] <= freq) { 1071 mul = freq - x86_cpu_freq[i]; 1072 1073 if (mul < delta) { 1074 fixed = x86_cpu_freq[i]; 1075 delta = mul; 1076 } 1077 1078 break; 1079 } 1080 1081 mul = x86_cpu_freq[i] - freq; 1082 1083 if (mul < delta) { 1084 fixed = x86_cpu_freq[i]; 1085 delta = mul; 1086 } 1087 } 1088 1089 /* 1090 * Set a reasonable maximum for how much to correct the measured 1091 * result by. This check is here to prevent the adjustment made 1092 * by this function from being more harm than good. It is entirely 1093 * possible that in the future parts will be made that are not 1094 * integral multiples of 66 or 50 in clock frequency or that 1095 * someone may overclock a part to some odd frequency. If the 1096 * measured value is farther from the corrected value than 1097 * allowed, then assume the corrected value is in error and use 1098 * the measured value. 1099 */ 1100 if (6 < delta) 1101 return; 1102 1103 cpu_freq = (int)fixed; 1104 } 1105 1106 1107 static int 1108 machhztomhz(uint64_t cpu_freq_hz) 1109 { 1110 uint64_t cpu_mhz; 1111 1112 /* Round to nearest MHZ */ 1113 cpu_mhz = (cpu_freq_hz + (MEGA_HZ / 2)) / MEGA_HZ; 1114 1115 if (cpu_mhz > INT_MAX) 1116 return (0); 1117 1118 return ((int)cpu_mhz); 1119 1120 } 1121 1122 1123 static int 1124 mach_clkinit(int preferred_mode, int *set_mode) 1125 { 1126 struct psm_ops *pops; 1127 int resolution; 1128 1129 pops = mach_set[0]; 1130 1131 cpu_freq_hz = mach_getcpufreq(); 1132 1133 cpu_freq = machhztomhz(cpu_freq_hz); 1134 1135 if (!(x86_feature & X86_TSC) || (cpu_freq == 0)) 1136 tsc_gethrtime_enable = 0; 1137 1138 if (tsc_gethrtime_enable) { 1139 tsc_hrtimeinit(cpu_freq_hz); 1140 gethrtimef = tsc_gethrtime; 1141 gethrtimeunscaledf = tsc_gethrtimeunscaled; 1142 scalehrtimef = tsc_scalehrtime; 1143 hrtime_tick = tsc_tick; 1144 tsc_gethrtime_initted = 1; 1145 } else { 1146 if (pops->psm_hrtimeinit) 1147 (*pops->psm_hrtimeinit)(); 1148 gethrtimef = pops->psm_gethrtime; 1149 gethrtimeunscaledf = gethrtimef; 1150 /* scalehrtimef will remain dummy */ 1151 } 1152 1153 mach_fixcpufreq(); 1154 1155 if (mach_ver[0] >= PSM_INFO_VER01_3) { 1156 if ((preferred_mode == TIMER_ONESHOT) && 1157 (tsc_gethrtime_enable)) { 1158 1159 resolution = (*pops->psm_clkinit)(0); 1160 if (resolution != 0) { 1161 *set_mode = TIMER_ONESHOT; 1162 return (resolution); 1163 } 1164 1165 } 1166 1167 /* 1168 * either periodic mode was requested or could not set to 1169 * one-shot mode 1170 */ 1171 resolution = (*pops->psm_clkinit)(hz); 1172 /* 1173 * psm should be able to do periodic, so we do not check 1174 * for return value of psm_clkinit here. 1175 */ 1176 *set_mode = TIMER_PERIODIC; 1177 return (resolution); 1178 } else { 1179 /* 1180 * PSMI interface prior to PSMI_3 does not define a return 1181 * value for psm_clkinit, so the return value is ignored. 1182 */ 1183 (void) (*pops->psm_clkinit)(hz); 1184 *set_mode = TIMER_PERIODIC; 1185 return (nsec_per_tick); 1186 } 1187 } 1188 1189 /*ARGSUSED*/ 1190 static void 1191 mach_psm_set_softintr(int ipl, struct av_softinfo *pending) 1192 { 1193 struct psm_ops *pops; 1194 1195 /* invoke hardware interrupt */ 1196 pops = mach_set[0]; 1197 (*pops->psm_set_softintr)(ipl); 1198 } 1199 1200 static int 1201 mach_softlvl_to_vect(int ipl) 1202 { 1203 int softvect; 1204 struct psm_ops *pops; 1205 1206 pops = mach_set[0]; 1207 1208 /* check for null handler for set soft interrupt call */ 1209 if (pops->psm_set_softintr == NULL) { 1210 setsoftint = av_set_softint_pending; 1211 return (PSM_SV_SOFTWARE); 1212 } 1213 1214 softvect = (*pops->psm_softlvl_to_irq)(ipl); 1215 /* check for hardware scheme */ 1216 if (softvect > PSM_SV_SOFTWARE) { 1217 setsoftint = mach_psm_set_softintr; 1218 return (softvect); 1219 } 1220 1221 if (softvect == PSM_SV_SOFTWARE) 1222 setsoftint = av_set_softint_pending; 1223 else /* hardware and software mixed scheme */ 1224 setsoftint = mach_set_softintr; 1225 1226 return (PSM_SV_SOFTWARE); 1227 } 1228 1229 static void 1230 mach_set_softintr(int ipl, struct av_softinfo *pending) 1231 { 1232 struct psm_ops *pops; 1233 1234 /* set software pending bits */ 1235 av_set_softint_pending(ipl, pending); 1236 1237 /* check if dosoftint will be called at the end of intr */ 1238 if (CPU_ON_INTR(CPU) || (curthread->t_intr)) 1239 return; 1240 1241 /* invoke hardware interrupt */ 1242 pops = mach_set[0]; 1243 (*pops->psm_set_softintr)(ipl); 1244 } 1245 1246 #ifdef DEBUG 1247 /* 1248 * This is here to allow us to simulate cpus that refuse to start. 1249 */ 1250 cpuset_t cpufailset; 1251 #endif 1252 1253 int 1254 mach_cpu_start(struct cpu *cp, void *ctx) 1255 { 1256 struct psm_ops *pops = mach_set[0]; 1257 processorid_t id = cp->cpu_id; 1258 1259 #ifdef DEBUG 1260 if (CPU_IN_SET(cpufailset, id)) 1261 return (0); 1262 #endif 1263 return ((*pops->psm_cpu_start)(id, ctx)); 1264 } 1265 1266 /*ARGSUSED*/ 1267 static int 1268 mach_translate_irq(dev_info_t *dip, int irqno) 1269 { 1270 return (irqno); /* default to NO translation */ 1271 } 1272 1273 static void 1274 mach_notify_error(int level, char *errmsg) 1275 { 1276 /* 1277 * SL_FATAL is pass in once panicstr is set, deliver it 1278 * as CE_PANIC. Also, translate SL_ codes back to CE_ 1279 * codes for the psmi handler 1280 */ 1281 if (level & SL_FATAL) 1282 (*notify_error)(CE_PANIC, errmsg); 1283 else if (level & SL_WARN) 1284 (*notify_error)(CE_WARN, errmsg); 1285 else if (level & SL_NOTE) 1286 (*notify_error)(CE_NOTE, errmsg); 1287 else if (level & SL_CONSOLE) 1288 (*notify_error)(CE_CONT, errmsg); 1289 } 1290 1291 /* 1292 * It provides the default basic intr_ops interface for the new DDI 1293 * interrupt framework if the PSM doesn't have one. 1294 * 1295 * Input: 1296 * dip - pointer to the dev_info structure of the requested device 1297 * hdlp - pointer to the internal interrupt handle structure for the 1298 * requested interrupt 1299 * intr_op - opcode for this call 1300 * result - pointer to the integer that will hold the result to be 1301 * passed back if return value is PSM_SUCCESS 1302 * 1303 * Output: 1304 * return value is either PSM_SUCCESS or PSM_FAILURE 1305 */ 1306 static int 1307 mach_intr_ops(dev_info_t *dip, ddi_intr_handle_impl_t *hdlp, 1308 psm_intr_op_t intr_op, int *result) 1309 { 1310 struct intrspec *ispec; 1311 1312 switch (intr_op) { 1313 case PSM_INTR_OP_CHECK_MSI: 1314 *result = hdlp->ih_type & ~(DDI_INTR_TYPE_MSI | 1315 DDI_INTR_TYPE_MSIX); 1316 break; 1317 case PSM_INTR_OP_ALLOC_VECTORS: 1318 if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) 1319 *result = 1; 1320 else 1321 *result = 0; 1322 break; 1323 case PSM_INTR_OP_FREE_VECTORS: 1324 break; 1325 case PSM_INTR_OP_NAVAIL_VECTORS: 1326 if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) 1327 *result = 1; 1328 else 1329 *result = 0; 1330 break; 1331 case PSM_INTR_OP_XLATE_VECTOR: 1332 ispec = ((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp; 1333 *result = psm_translate_irq(dip, ispec->intrspec_vec); 1334 break; 1335 case PSM_INTR_OP_GET_CAP: 1336 *result = 0; 1337 break; 1338 case PSM_INTR_OP_GET_PENDING: 1339 case PSM_INTR_OP_CLEAR_MASK: 1340 case PSM_INTR_OP_SET_MASK: 1341 case PSM_INTR_OP_GET_SHARED: 1342 case PSM_INTR_OP_SET_PRI: 1343 case PSM_INTR_OP_SET_CAP: 1344 case PSM_INTR_OP_SET_CPU: 1345 case PSM_INTR_OP_GET_INTR: 1346 default: 1347 return (PSM_FAILURE); 1348 } 1349 return (PSM_SUCCESS); 1350 } 1351