1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #define PSMI_1_5 29 #include <sys/smp_impldefs.h> 30 #include <sys/psm.h> 31 #include <sys/psm_modctl.h> 32 #include <sys/pit.h> 33 #include <sys/cmn_err.h> 34 #include <sys/strlog.h> 35 #include <sys/clock.h> 36 #include <sys/debug.h> 37 #include <sys/rtc.h> 38 #include <sys/x86_archext.h> 39 #include <sys/cpupart.h> 40 #include <sys/cpuvar.h> 41 #include <sys/cmt.h> 42 #include <sys/cpu.h> 43 #include <sys/disp.h> 44 #include <sys/archsystm.h> 45 #include <sys/machsystm.h> 46 #include <sys/sysmacros.h> 47 #include <sys/param.h> 48 #include <sys/promif.h> 49 #include <sys/mach_intr.h> 50 #include <vm/hat_i86.h> 51 52 #define OFFSETOF(s, m) (size_t)(&(((s *)0)->m)) 53 54 /* 55 * Local function prototypes 56 */ 57 static int mp_disable_intr(processorid_t cpun); 58 static void mp_enable_intr(processorid_t cpun); 59 static void mach_init(); 60 static void mach_picinit(); 61 static uint64_t mach_calchz(uint32_t pit_counter, uint64_t *processor_clks); 62 static int machhztomhz(uint64_t cpu_freq_hz); 63 static uint64_t mach_getcpufreq(void); 64 static void mach_fixcpufreq(void); 65 static int mach_clkinit(int, int *); 66 static void mach_smpinit(void); 67 static void mach_set_softintr(int ipl, struct av_softinfo *); 68 static int mach_softlvl_to_vect(int ipl); 69 static void mach_get_platform(int owner); 70 static void mach_construct_info(); 71 static int mach_translate_irq(dev_info_t *dip, int irqno); 72 static int mach_intr_ops(dev_info_t *, ddi_intr_handle_impl_t *, 73 psm_intr_op_t, int *); 74 static void mach_notify_error(int level, char *errmsg); 75 static hrtime_t dummy_hrtime(void); 76 static void dummy_scalehrtime(hrtime_t *); 77 static void cpu_idle(void); 78 static void cpu_wakeup(cpu_t *, int); 79 static void cpu_idle_mwait(void); 80 static void cpu_wakeup_mwait(cpu_t *, int); 81 /* 82 * External reference functions 83 */ 84 extern void return_instr(); 85 extern uint64_t freq_tsc(uint32_t *); 86 #if defined(__i386) 87 extern uint64_t freq_notsc(uint32_t *); 88 #endif 89 extern void pc_gethrestime(timestruc_t *); 90 extern int cpuid_get_coreid(cpu_t *); 91 extern int cpuid_get_chipid(cpu_t *); 92 93 /* 94 * PSM functions initialization 95 */ 96 void (*psm_shutdownf)(int, int) = (void (*)(int, int))return_instr; 97 void (*psm_preshutdownf)(int, int) = (void (*)(int, int))return_instr; 98 void (*psm_notifyf)(int) = (void (*)(int))return_instr; 99 void (*psm_set_idle_cpuf)(int) = (void (*)(int))return_instr; 100 void (*psm_unset_idle_cpuf)(int) = (void (*)(int))return_instr; 101 void (*psminitf)() = mach_init; 102 void (*picinitf)() = return_instr; 103 int (*clkinitf)(int, int *) = (int (*)(int, int *))return_instr; 104 int (*ap_mlsetup)() = (int (*)(void))return_instr; 105 void (*send_dirintf)() = return_instr; 106 void (*setspl)(int) = (void (*)(int))return_instr; 107 int (*addspl)(int, int, int, int) = (int (*)(int, int, int, int))return_instr; 108 int (*delspl)(int, int, int, int) = (int (*)(int, int, int, int))return_instr; 109 void (*setsoftint)(int, struct av_softinfo *)= 110 (void (*)(int, struct av_softinfo *))return_instr; 111 int (*slvltovect)(int) = (int (*)(int))return_instr; 112 int (*setlvl)(int, int *) = (int (*)(int, int *))return_instr; 113 void (*setlvlx)(int, int) = (void (*)(int, int))return_instr; 114 int (*psm_disable_intr)(int) = mp_disable_intr; 115 void (*psm_enable_intr)(int) = mp_enable_intr; 116 hrtime_t (*gethrtimef)(void) = dummy_hrtime; 117 hrtime_t (*gethrtimeunscaledf)(void) = dummy_hrtime; 118 void (*scalehrtimef)(hrtime_t *) = dummy_scalehrtime; 119 int (*psm_translate_irq)(dev_info_t *, int) = mach_translate_irq; 120 void (*gethrestimef)(timestruc_t *) = pc_gethrestime; 121 void (*psm_notify_error)(int, char *) = (void (*)(int, char *))NULL; 122 int (*psm_get_clockirq)(int) = NULL; 123 int (*psm_get_ipivect)(int, int) = NULL; 124 125 int (*psm_clkinit)(int) = NULL; 126 void (*psm_timer_reprogram)(hrtime_t) = NULL; 127 void (*psm_timer_enable)(void) = NULL; 128 void (*psm_timer_disable)(void) = NULL; 129 void (*psm_post_cyclic_setup)(void *arg) = NULL; 130 int (*psm_intr_ops)(dev_info_t *, ddi_intr_handle_impl_t *, psm_intr_op_t, 131 int *) = mach_intr_ops; 132 133 void (*notify_error)(int, char *) = (void (*)(int, char *))return_instr; 134 void (*hrtime_tick)(void) = return_instr; 135 136 int tsc_gethrtime_enable = 1; 137 int tsc_gethrtime_initted = 0; 138 139 /* 140 * Local Static Data 141 */ 142 static struct psm_ops mach_ops; 143 static struct psm_ops *mach_set[4] = {&mach_ops, NULL, NULL, NULL}; 144 static ushort_t mach_ver[4] = {0, 0, 0, 0}; 145 146 /* 147 * If non-zero, idle cpus will become "halted" when there's 148 * no work to do. 149 */ 150 int idle_cpu_use_hlt = 1; 151 152 /* 153 * If non-zero, idle cpus will use mwait if available to halt instead of hlt. 154 */ 155 int idle_cpu_prefer_mwait = 1; 156 157 158 /*ARGSUSED*/ 159 int 160 pg_plat_hw_shared(cpu_t *cp, pghw_type_t hw) 161 { 162 switch (hw) { 163 case PGHW_IPIPE: 164 if (x86_feature & (X86_HTT)) { 165 /* 166 * Hyper-threading is SMT 167 */ 168 return (1); 169 } else { 170 return (0); 171 } 172 case PGHW_CHIP: 173 if (x86_feature & (X86_CMP|X86_HTT)) 174 return (1); 175 else 176 return (0); 177 case PGHW_CACHE: 178 if (cpuid_get_ncpu_sharing_last_cache(cp) > 1) 179 return (1); 180 else 181 return (0); 182 default: 183 return (0); 184 } 185 } 186 187 /* 188 * Compare two CPUs and see if they have a pghw_type_t sharing relationship 189 * If pghw_type_t is an unsupported hardware type, then return -1 190 */ 191 int 192 pg_plat_cpus_share(cpu_t *cpu_a, cpu_t *cpu_b, pghw_type_t hw) 193 { 194 id_t pgp_a, pgp_b; 195 196 pgp_a = pg_plat_hw_instance_id(cpu_a, hw); 197 pgp_b = pg_plat_hw_instance_id(cpu_b, hw); 198 199 if (pgp_a == -1 || pgp_b == -1) 200 return (-1); 201 202 return (pgp_a == pgp_b); 203 } 204 205 /* 206 * Return a physical instance identifier for known hardware sharing 207 * relationships 208 */ 209 id_t 210 pg_plat_hw_instance_id(cpu_t *cpu, pghw_type_t hw) 211 { 212 switch (hw) { 213 case PGHW_IPIPE: 214 return (cpuid_get_coreid(cpu)); 215 case PGHW_CACHE: 216 return (cpuid_get_last_lvl_cacheid(cpu)); 217 case PGHW_CHIP: 218 return (cpuid_get_chipid(cpu)); 219 default: 220 return (-1); 221 } 222 } 223 224 int 225 pg_plat_hw_level(pghw_type_t hw) 226 { 227 int i; 228 static pghw_type_t hw_hier[] = { 229 PGHW_IPIPE, 230 PGHW_CACHE, 231 PGHW_CHIP, 232 PGHW_NUM_COMPONENTS 233 }; 234 235 for (i = 0; hw_hier[i] != PGHW_NUM_COMPONENTS; i++) { 236 if (hw_hier[i] == hw) 237 return (i); 238 } 239 return (-1); 240 } 241 242 /* 243 * Return 1 if CMT load balancing policies should be 244 * implemented across instances of the specified hardware 245 * sharing relationship. 246 */ 247 int 248 pg_plat_cmt_load_bal_hw(pghw_type_t hw) 249 { 250 if (hw == PGHW_IPIPE || 251 hw == PGHW_FPU || 252 hw == PGHW_CHIP || 253 hw == PGHW_CACHE) 254 return (1); 255 else 256 return (0); 257 } 258 259 260 /* 261 * Return 1 if thread affinity polices should be implemented 262 * for instances of the specifed hardware sharing relationship. 263 */ 264 int 265 pg_plat_cmt_affinity_hw(pghw_type_t hw) 266 { 267 if (hw == PGHW_CACHE) 268 return (1); 269 else 270 return (0); 271 } 272 273 id_t 274 pg_plat_get_core_id(cpu_t *cpu) 275 { 276 return ((id_t)cpuid_get_coreid(cpu)); 277 } 278 279 void 280 cmp_set_nosteal_interval(void) 281 { 282 /* Set the nosteal interval (used by disp_getbest()) to 100us */ 283 nosteal_nsec = 100000UL; 284 } 285 286 /* 287 * Routine to ensure initial callers to hrtime gets 0 as return 288 */ 289 static hrtime_t 290 dummy_hrtime(void) 291 { 292 return (0); 293 } 294 295 /* ARGSUSED */ 296 static void 297 dummy_scalehrtime(hrtime_t *ticks) 298 {} 299 300 /* 301 * Idle the present CPU until awoken via an interrupt 302 */ 303 static void 304 cpu_idle(void) 305 { 306 cpu_t *cpup = CPU; 307 processorid_t cpun = cpup->cpu_id; 308 cpupart_t *cp = cpup->cpu_part; 309 int hset_update = 1; 310 311 /* 312 * If this CPU is online, and there's multiple CPUs 313 * in the system, then we should notate our halting 314 * by adding ourselves to the partition's halted CPU 315 * bitmap. This allows other CPUs to find/awaken us when 316 * work becomes available. 317 */ 318 if (cpup->cpu_flags & CPU_OFFLINE || ncpus == 1) 319 hset_update = 0; 320 321 /* 322 * Add ourselves to the partition's halted CPUs bitmask 323 * and set our HALTED flag, if necessary. 324 * 325 * When a thread becomes runnable, it is placed on the queue 326 * and then the halted cpuset is checked to determine who 327 * (if anyone) should be awoken. We therefore need to first 328 * add ourselves to the halted cpuset, and and then check if there 329 * is any work available. 330 * 331 * Note that memory barriers after updating the HALTED flag 332 * are not necessary since an atomic operation (updating the bitmap) 333 * immediately follows. On x86 the atomic operation acts as a 334 * memory barrier for the update of cpu_disp_flags. 335 */ 336 if (hset_update) { 337 cpup->cpu_disp_flags |= CPU_DISP_HALTED; 338 CPUSET_ATOMIC_ADD(cp->cp_mach->mc_haltset, cpun); 339 } 340 341 /* 342 * Check to make sure there's really nothing to do. 343 * Work destined for this CPU may become available after 344 * this check. We'll be notified through the clearing of our 345 * bit in the halted CPU bitmask, and a poke. 346 */ 347 if (disp_anywork()) { 348 if (hset_update) { 349 cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 350 CPUSET_ATOMIC_DEL(cp->cp_mach->mc_haltset, cpun); 351 } 352 return; 353 } 354 355 /* 356 * We're on our way to being halted. 357 * 358 * Disable interrupts now, so that we'll awaken immediately 359 * after halting if someone tries to poke us between now and 360 * the time we actually halt. 361 * 362 * We check for the presence of our bit after disabling interrupts. 363 * If it's cleared, we'll return. If the bit is cleared after 364 * we check then the poke will pop us out of the halted state. 365 * 366 * This means that the ordering of the poke and the clearing 367 * of the bit by cpu_wakeup is important. 368 * cpu_wakeup() must clear, then poke. 369 * cpu_idle() must disable interrupts, then check for the bit. 370 */ 371 cli(); 372 373 if (hset_update && !CPU_IN_SET(cp->cp_mach->mc_haltset, cpun)) { 374 cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 375 sti(); 376 return; 377 } 378 379 /* 380 * The check for anything locally runnable is here for performance 381 * and isn't needed for correctness. disp_nrunnable ought to be 382 * in our cache still, so it's inexpensive to check, and if there 383 * is anything runnable we won't have to wait for the poke. 384 */ 385 if (cpup->cpu_disp->disp_nrunnable != 0) { 386 if (hset_update) { 387 cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 388 CPUSET_ATOMIC_DEL(cp->cp_mach->mc_haltset, cpun); 389 } 390 sti(); 391 return; 392 } 393 394 mach_cpu_idle(); 395 396 /* 397 * We're no longer halted 398 */ 399 if (hset_update) { 400 cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 401 CPUSET_ATOMIC_DEL(cp->cp_mach->mc_haltset, cpun); 402 } 403 } 404 405 406 /* 407 * If "cpu" is halted, then wake it up clearing its halted bit in advance. 408 * Otherwise, see if other CPUs in the cpu partition are halted and need to 409 * be woken up so that they can steal the thread we placed on this CPU. 410 * This function is only used on MP systems. 411 */ 412 static void 413 cpu_wakeup(cpu_t *cpu, int bound) 414 { 415 uint_t cpu_found; 416 int result; 417 cpupart_t *cp; 418 419 cp = cpu->cpu_part; 420 if (CPU_IN_SET(cp->cp_mach->mc_haltset, cpu->cpu_id)) { 421 /* 422 * Clear the halted bit for that CPU since it will be 423 * poked in a moment. 424 */ 425 CPUSET_ATOMIC_DEL(cp->cp_mach->mc_haltset, cpu->cpu_id); 426 /* 427 * We may find the current CPU present in the halted cpuset 428 * if we're in the context of an interrupt that occurred 429 * before we had a chance to clear our bit in cpu_idle(). 430 * Poking ourself is obviously unnecessary, since if 431 * we're here, we're not halted. 432 */ 433 if (cpu != CPU) 434 poke_cpu(cpu->cpu_id); 435 return; 436 } else { 437 /* 438 * This cpu isn't halted, but it's idle or undergoing a 439 * context switch. No need to awaken anyone else. 440 */ 441 if (cpu->cpu_thread == cpu->cpu_idle_thread || 442 cpu->cpu_disp_flags & CPU_DISP_DONTSTEAL) 443 return; 444 } 445 446 /* 447 * No need to wake up other CPUs if the thread we just enqueued 448 * is bound. 449 */ 450 if (bound) 451 return; 452 453 454 /* 455 * See if there's any other halted CPUs. If there are, then 456 * select one, and awaken it. 457 * It's possible that after we find a CPU, somebody else 458 * will awaken it before we get the chance. 459 * In that case, look again. 460 */ 461 do { 462 CPUSET_FIND(cp->cp_mach->mc_haltset, cpu_found); 463 if (cpu_found == CPUSET_NOTINSET) 464 return; 465 466 ASSERT(cpu_found >= 0 && cpu_found < NCPU); 467 CPUSET_ATOMIC_XDEL(cp->cp_mach->mc_haltset, cpu_found, result); 468 } while (result < 0); 469 470 if (cpu_found != CPU->cpu_id) 471 poke_cpu(cpu_found); 472 } 473 474 /* 475 * Idle the present CPU until awoken via touching its monitored line 476 */ 477 static void 478 cpu_idle_mwait(void) 479 { 480 volatile uint32_t *mcpu_mwait = CPU->cpu_m.mcpu_mwait; 481 cpu_t *cpup = CPU; 482 processorid_t cpun = cpup->cpu_id; 483 cpupart_t *cp = cpup->cpu_part; 484 int hset_update = 1; 485 486 /* 487 * Set our mcpu_mwait here, so we can tell if anyone trys to 488 * wake us between now and when we call mwait. No other cpu will 489 * attempt to set our mcpu_mwait until we add ourself to the haltset. 490 */ 491 *mcpu_mwait = MWAIT_HALTED; 492 493 /* 494 * If this CPU is online, and there's multiple CPUs 495 * in the system, then we should notate our halting 496 * by adding ourselves to the partition's halted CPU 497 * bitmap. This allows other CPUs to find/awaken us when 498 * work becomes available. 499 */ 500 if (cpup->cpu_flags & CPU_OFFLINE || ncpus == 1) 501 hset_update = 0; 502 503 /* 504 * Add ourselves to the partition's halted CPUs bitmask 505 * and set our HALTED flag, if necessary. 506 * 507 * When a thread becomes runnable, it is placed on the queue 508 * and then the halted cpuset is checked to determine who 509 * (if anyone) should be awoken. We therefore need to first 510 * add ourselves to the halted cpuset, and and then check if there 511 * is any work available. 512 * 513 * Note that memory barriers after updating the HALTED flag 514 * are not necessary since an atomic operation (updating the bitmap) 515 * immediately follows. On x86 the atomic operation acts as a 516 * memory barrier for the update of cpu_disp_flags. 517 */ 518 if (hset_update) { 519 cpup->cpu_disp_flags |= CPU_DISP_HALTED; 520 CPUSET_ATOMIC_ADD(cp->cp_mach->mc_haltset, cpun); 521 } 522 523 /* 524 * Check to make sure there's really nothing to do. 525 * Work destined for this CPU may become available after 526 * this check. We'll be notified through the clearing of our 527 * bit in the halted CPU bitmask, and a write to our mcpu_mwait. 528 * 529 * disp_anywork() checks disp_nrunnable, so we do not have to later. 530 */ 531 if (disp_anywork()) { 532 if (hset_update) { 533 cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 534 CPUSET_ATOMIC_DEL(cp->cp_mach->mc_haltset, cpun); 535 } 536 return; 537 } 538 539 /* 540 * We're on our way to being halted. 541 * To avoid a lost wakeup, arm the monitor before checking if another 542 * cpu wrote to mcpu_mwait to wake us up. 543 */ 544 i86_monitor(mcpu_mwait, 0, 0); 545 if (*mcpu_mwait == MWAIT_HALTED) { 546 tlb_going_idle(); 547 i86_mwait(0, 0); 548 tlb_service(); 549 } 550 551 /* 552 * We're no longer halted 553 */ 554 if (hset_update) { 555 cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 556 CPUSET_ATOMIC_DEL(cp->cp_mach->mc_haltset, cpun); 557 } 558 } 559 560 /* 561 * If "cpu" is halted in mwait, then wake it up clearing its halted bit in 562 * advance. Otherwise, see if other CPUs in the cpu partition are halted and 563 * need to be woken up so that they can steal the thread we placed on this CPU. 564 * This function is only used on MP systems. 565 */ 566 static void 567 cpu_wakeup_mwait(cpu_t *cp, int bound) 568 { 569 cpupart_t *cpu_part; 570 uint_t cpu_found; 571 int result; 572 573 cpu_part = cp->cpu_part; 574 575 /* 576 * Clear the halted bit for that CPU since it will be woken up 577 * in a moment. 578 */ 579 if (CPU_IN_SET(cpu_part->cp_mach->mc_haltset, cp->cpu_id)) { 580 /* 581 * Clear the halted bit for that CPU since it will be 582 * poked in a moment. 583 */ 584 CPUSET_ATOMIC_DEL(cpu_part->cp_mach->mc_haltset, cp->cpu_id); 585 /* 586 * We may find the current CPU present in the halted cpuset 587 * if we're in the context of an interrupt that occurred 588 * before we had a chance to clear our bit in cpu_idle(). 589 * Waking ourself is obviously unnecessary, since if 590 * we're here, we're not halted. 591 * 592 * monitor/mwait wakeup via writing to our cache line is 593 * harmless and less expensive than always checking if we 594 * are waking ourself which is an uncommon case. 595 */ 596 MWAIT_WAKEUP(cp); /* write to monitored line */ 597 return; 598 } else { 599 /* 600 * This cpu isn't halted, but it's idle or undergoing a 601 * context switch. No need to awaken anyone else. 602 */ 603 if (cp->cpu_thread == cp->cpu_idle_thread || 604 cp->cpu_disp_flags & CPU_DISP_DONTSTEAL) 605 return; 606 } 607 608 /* 609 * No need to wake up other CPUs if the thread we just enqueued 610 * is bound. 611 */ 612 if (bound) 613 return; 614 615 616 /* 617 * See if there's any other halted CPUs. If there are, then 618 * select one, and awaken it. 619 * It's possible that after we find a CPU, somebody else 620 * will awaken it before we get the chance. 621 * In that case, look again. 622 */ 623 do { 624 CPUSET_FIND(cpu_part->cp_mach->mc_haltset, cpu_found); 625 if (cpu_found == CPUSET_NOTINSET) 626 return; 627 628 ASSERT(cpu_found >= 0 && cpu_found < NCPU); 629 CPUSET_ATOMIC_XDEL(cpu_part->cp_mach->mc_haltset, cpu_found, 630 result); 631 } while (result < 0); 632 633 /* 634 * Do not check if cpu_found is ourself as monitor/mwait wakeup is 635 * cheap. 636 */ 637 MWAIT_WAKEUP(cpu[cpu_found]); /* write to monitored line */ 638 } 639 640 void (*cpu_pause_handler)(volatile char *) = NULL; 641 642 static int 643 mp_disable_intr(int cpun) 644 { 645 /* 646 * switch to the offline cpu 647 */ 648 affinity_set(cpun); 649 /* 650 * raise ipl to just below cross call 651 */ 652 splx(XC_MED_PIL-1); 653 /* 654 * set base spl to prevent the next swtch to idle from 655 * lowering back to ipl 0 656 */ 657 CPU->cpu_intr_actv |= (1 << (XC_MED_PIL-1)); 658 set_base_spl(); 659 affinity_clear(); 660 return (DDI_SUCCESS); 661 } 662 663 static void 664 mp_enable_intr(int cpun) 665 { 666 /* 667 * switch to the online cpu 668 */ 669 affinity_set(cpun); 670 /* 671 * clear the interrupt active mask 672 */ 673 CPU->cpu_intr_actv &= ~(1 << (XC_MED_PIL-1)); 674 set_base_spl(); 675 (void) spl0(); 676 affinity_clear(); 677 } 678 679 static void 680 mach_get_platform(int owner) 681 { 682 void **srv_opsp; 683 void **clt_opsp; 684 int i; 685 int total_ops; 686 687 /* fix up psm ops */ 688 srv_opsp = (void **)mach_set[0]; 689 clt_opsp = (void **)mach_set[owner]; 690 if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01) 691 total_ops = sizeof (struct psm_ops_ver01) / 692 sizeof (void (*)(void)); 693 else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_1) 694 /* no psm_notify_func */ 695 total_ops = OFFSETOF(struct psm_ops, psm_notify_func) / 696 sizeof (void (*)(void)); 697 else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_2) 698 /* no psm_timer funcs */ 699 total_ops = OFFSETOF(struct psm_ops, psm_timer_reprogram) / 700 sizeof (void (*)(void)); 701 else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_3) 702 /* no psm_preshutdown function */ 703 total_ops = OFFSETOF(struct psm_ops, psm_preshutdown) / 704 sizeof (void (*)(void)); 705 else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_4) 706 /* no psm_preshutdown function */ 707 total_ops = OFFSETOF(struct psm_ops, psm_intr_ops) / 708 sizeof (void (*)(void)); 709 else 710 total_ops = sizeof (struct psm_ops) / sizeof (void (*)(void)); 711 712 /* 713 * Save the version of the PSM module, in case we need to 714 * bahave differently based on version. 715 */ 716 mach_ver[0] = mach_ver[owner]; 717 718 for (i = 0; i < total_ops; i++) 719 if (clt_opsp[i] != NULL) 720 srv_opsp[i] = clt_opsp[i]; 721 } 722 723 static void 724 mach_construct_info() 725 { 726 struct psm_sw *swp; 727 int mach_cnt[PSM_OWN_OVERRIDE+1] = {0}; 728 int conflict_owner = 0; 729 730 if (psmsw->psw_forw == psmsw) 731 panic("No valid PSM modules found"); 732 mutex_enter(&psmsw_lock); 733 for (swp = psmsw->psw_forw; swp != psmsw; swp = swp->psw_forw) { 734 if (!(swp->psw_flag & PSM_MOD_IDENTIFY)) 735 continue; 736 mach_set[swp->psw_infop->p_owner] = swp->psw_infop->p_ops; 737 mach_ver[swp->psw_infop->p_owner] = swp->psw_infop->p_version; 738 mach_cnt[swp->psw_infop->p_owner]++; 739 } 740 mutex_exit(&psmsw_lock); 741 742 mach_get_platform(PSM_OWN_SYS_DEFAULT); 743 744 /* check to see are there any conflicts */ 745 if (mach_cnt[PSM_OWN_EXCLUSIVE] > 1) 746 conflict_owner = PSM_OWN_EXCLUSIVE; 747 if (mach_cnt[PSM_OWN_OVERRIDE] > 1) 748 conflict_owner = PSM_OWN_OVERRIDE; 749 if (conflict_owner) { 750 /* remove all psm modules except uppc */ 751 cmn_err(CE_WARN, 752 "Conflicts detected on the following PSM modules:"); 753 mutex_enter(&psmsw_lock); 754 for (swp = psmsw->psw_forw; swp != psmsw; swp = swp->psw_forw) { 755 if (swp->psw_infop->p_owner == conflict_owner) 756 cmn_err(CE_WARN, "%s ", 757 swp->psw_infop->p_mach_idstring); 758 } 759 mutex_exit(&psmsw_lock); 760 cmn_err(CE_WARN, 761 "Setting the system back to SINGLE processor mode!"); 762 cmn_err(CE_WARN, 763 "Please edit /etc/mach to remove the invalid PSM module."); 764 return; 765 } 766 767 if (mach_set[PSM_OWN_EXCLUSIVE]) 768 mach_get_platform(PSM_OWN_EXCLUSIVE); 769 770 if (mach_set[PSM_OWN_OVERRIDE]) 771 mach_get_platform(PSM_OWN_OVERRIDE); 772 } 773 774 static void 775 mach_init() 776 { 777 struct psm_ops *pops; 778 779 mach_construct_info(); 780 781 pops = mach_set[0]; 782 783 /* register the interrupt and clock initialization rotuines */ 784 picinitf = mach_picinit; 785 clkinitf = mach_clkinit; 786 psm_get_clockirq = pops->psm_get_clockirq; 787 788 /* register the interrupt setup code */ 789 slvltovect = mach_softlvl_to_vect; 790 addspl = pops->psm_addspl; 791 delspl = pops->psm_delspl; 792 793 if (pops->psm_translate_irq) 794 psm_translate_irq = pops->psm_translate_irq; 795 if (pops->psm_intr_ops) 796 psm_intr_ops = pops->psm_intr_ops; 797 798 #if defined(PSMI_1_2) || defined(PSMI_1_3) || defined(PSMI_1_4) 799 /* 800 * Time-of-day functionality now handled in TOD modules. 801 * (Warn about PSM modules that think that we're going to use 802 * their ops vectors.) 803 */ 804 if (pops->psm_tod_get) 805 cmn_err(CE_WARN, "obsolete psm_tod_get op %p", 806 (void *)pops->psm_tod_get); 807 808 if (pops->psm_tod_set) 809 cmn_err(CE_WARN, "obsolete psm_tod_set op %p", 810 (void *)pops->psm_tod_set); 811 #endif 812 813 if (pops->psm_notify_error) { 814 psm_notify_error = mach_notify_error; 815 notify_error = pops->psm_notify_error; 816 } 817 818 (*pops->psm_softinit)(); 819 820 /* 821 * Initialize the dispatcher's function hooks 822 * to enable CPU halting when idle. 823 * Do not use monitor/mwait if idle_cpu_use_hlt is not set(spin idle). 824 * Allocate monitor/mwait buffer for cpu0. 825 */ 826 if (idle_cpu_use_hlt) { 827 if ((x86_feature & X86_MWAIT) && idle_cpu_prefer_mwait) { 828 CPU->cpu_m.mcpu_mwait = mach_alloc_mwait(CPU); 829 idle_cpu = cpu_idle_mwait; 830 } else { 831 idle_cpu = cpu_idle; 832 } 833 } 834 835 mach_smpinit(); 836 } 837 838 /* 839 * Return a pointer to memory suitable for monitor/mwait use. Memory must be 840 * aligned as specified by cpuid (a cache line size). 841 */ 842 uint32_t * 843 mach_alloc_mwait(cpu_t *cp) 844 { 845 size_t mwait_size = cpuid_get_mwait_size(cp); 846 uint32_t *ret; 847 848 if (mwait_size < sizeof (uint32_t) || !ISP2(mwait_size)) 849 panic("Can't handle mwait size %ld", (long)mwait_size); 850 851 /* 852 * kmem_alloc() returns cache line size aligned data for mwait_size 853 * allocations. mwait_size is currently cache line sized. Neither 854 * of these implementation details are guarantied to be true in the 855 * future. 856 * 857 * First try allocating mwait_size as kmem_alloc() currently returns 858 * correctly aligned memory. If kmem_alloc() does not return 859 * mwait_size aligned memory, then use mwait_size ROUNDUP. 860 */ 861 ret = kmem_zalloc(mwait_size, KM_SLEEP); 862 if (ret == (uint32_t *)P2ROUNDUP((uintptr_t)ret, mwait_size)) { 863 *ret = MWAIT_RUNNING; 864 return (ret); 865 } else { 866 kmem_free(ret, mwait_size); 867 ret = kmem_zalloc(mwait_size * 2, KM_SLEEP); 868 ret = (uint32_t *)P2ROUNDUP((uintptr_t)ret, mwait_size); 869 *ret = MWAIT_RUNNING; 870 return (ret); 871 } 872 } 873 874 static void 875 mach_smpinit(void) 876 { 877 struct psm_ops *pops; 878 processorid_t cpu_id; 879 int cnt; 880 cpuset_t cpumask; 881 882 pops = mach_set[0]; 883 884 cpu_id = -1; 885 cpu_id = (*pops->psm_get_next_processorid)(cpu_id); 886 for (cnt = 0, CPUSET_ZERO(cpumask); cpu_id != -1; cnt++) { 887 CPUSET_ADD(cpumask, cpu_id); 888 cpu_id = (*pops->psm_get_next_processorid)(cpu_id); 889 } 890 891 mp_cpus = cpumask; 892 893 /* MP related routines */ 894 ap_mlsetup = pops->psm_post_cpu_start; 895 send_dirintf = pops->psm_send_ipi; 896 897 /* optional MP related routines */ 898 if (pops->psm_shutdown) 899 psm_shutdownf = pops->psm_shutdown; 900 if (pops->psm_preshutdown) 901 psm_preshutdownf = pops->psm_preshutdown; 902 if (pops->psm_notify_func) 903 psm_notifyf = pops->psm_notify_func; 904 if (pops->psm_set_idlecpu) 905 psm_set_idle_cpuf = pops->psm_set_idlecpu; 906 if (pops->psm_unset_idlecpu) 907 psm_unset_idle_cpuf = pops->psm_unset_idlecpu; 908 909 psm_clkinit = pops->psm_clkinit; 910 911 if (pops->psm_timer_reprogram) 912 psm_timer_reprogram = pops->psm_timer_reprogram; 913 914 if (pops->psm_timer_enable) 915 psm_timer_enable = pops->psm_timer_enable; 916 917 if (pops->psm_timer_disable) 918 psm_timer_disable = pops->psm_timer_disable; 919 920 if (pops->psm_post_cyclic_setup) 921 psm_post_cyclic_setup = pops->psm_post_cyclic_setup; 922 923 /* check for multiple cpu's */ 924 if (cnt < 2) 925 return; 926 927 /* check for MP platforms */ 928 if (pops->psm_cpu_start == NULL) 929 return; 930 931 /* 932 * Set the dispatcher hook to enable cpu "wake up" 933 * when a thread becomes runnable. 934 */ 935 if (idle_cpu_use_hlt) 936 if ((x86_feature & X86_MWAIT) && idle_cpu_prefer_mwait) 937 disp_enq_thread = cpu_wakeup_mwait; 938 else 939 disp_enq_thread = cpu_wakeup; 940 941 if (pops->psm_disable_intr) 942 psm_disable_intr = pops->psm_disable_intr; 943 if (pops->psm_enable_intr) 944 psm_enable_intr = pops->psm_enable_intr; 945 946 psm_get_ipivect = pops->psm_get_ipivect; 947 948 (void) add_avintr((void *)NULL, XC_HI_PIL, xc_serv, "xc_hi_intr", 949 (*pops->psm_get_ipivect)(XC_HI_PIL, PSM_INTR_IPI_HI), 950 (caddr_t)X_CALL_HIPRI, NULL, NULL, NULL); 951 (void) add_avintr((void *)NULL, XC_MED_PIL, xc_serv, "xc_med_intr", 952 (*pops->psm_get_ipivect)(XC_MED_PIL, PSM_INTR_IPI_LO), 953 (caddr_t)X_CALL_MEDPRI, NULL, NULL, NULL); 954 955 (void) (*pops->psm_get_ipivect)(XC_CPUPOKE_PIL, PSM_INTR_POKE); 956 } 957 958 static void 959 mach_picinit() 960 { 961 struct psm_ops *pops; 962 963 pops = mach_set[0]; 964 965 /* register the interrupt handlers */ 966 setlvl = pops->psm_intr_enter; 967 setlvlx = pops->psm_intr_exit; 968 969 /* initialize the interrupt hardware */ 970 (*pops->psm_picinit)(); 971 972 /* set interrupt mask for current ipl */ 973 setspl = pops->psm_setspl; 974 cli(); 975 setspl(CPU->cpu_pri); 976 } 977 978 uint_t cpu_freq; /* MHz */ 979 uint64_t cpu_freq_hz; /* measured (in hertz) */ 980 981 #define MEGA_HZ 1000000 982 983 static uint64_t 984 mach_calchz(uint32_t pit_counter, uint64_t *processor_clks) 985 { 986 uint64_t cpu_hz; 987 988 if ((pit_counter == 0) || (*processor_clks == 0) || 989 (*processor_clks > (((uint64_t)-1) / PIT_HZ))) 990 return (0); 991 992 cpu_hz = ((uint64_t)PIT_HZ * *processor_clks) / pit_counter; 993 994 return (cpu_hz); 995 } 996 997 static uint64_t 998 mach_getcpufreq(void) 999 { 1000 uint32_t pit_counter; 1001 uint64_t processor_clks; 1002 1003 if (x86_feature & X86_TSC) { 1004 /* 1005 * We have a TSC. freq_tsc() knows how to measure the number 1006 * of clock cycles sampled against the PIT. 1007 */ 1008 ulong_t flags = clear_int_flag(); 1009 processor_clks = freq_tsc(&pit_counter); 1010 restore_int_flag(flags); 1011 return (mach_calchz(pit_counter, &processor_clks)); 1012 } else if (x86_vendor == X86_VENDOR_Cyrix || x86_type == X86_TYPE_P5) { 1013 #if defined(__amd64) 1014 panic("mach_getcpufreq: no TSC!"); 1015 #elif defined(__i386) 1016 /* 1017 * We are a Cyrix based on a 6x86 core or an Intel Pentium 1018 * for which freq_notsc() knows how to measure the number of 1019 * elapsed clock cycles sampled against the PIT 1020 */ 1021 ulong_t flags = clear_int_flag(); 1022 processor_clks = freq_notsc(&pit_counter); 1023 restore_int_flag(flags); 1024 return (mach_calchz(pit_counter, &processor_clks)); 1025 #endif /* __i386 */ 1026 } 1027 1028 /* We do not know how to calculate cpu frequency for this cpu. */ 1029 return (0); 1030 } 1031 1032 /* 1033 * If the clock speed of a cpu is found to be reported incorrectly, do not add 1034 * to this array, instead improve the accuracy of the algorithm that determines 1035 * the clock speed of the processor or extend the implementation to support the 1036 * vendor as appropriate. This is here only to support adjusting the speed on 1037 * older slower processors that mach_fixcpufreq() would not be able to account 1038 * for otherwise. 1039 */ 1040 static int x86_cpu_freq[] = { 60, 75, 80, 90, 120, 160, 166, 175, 180, 233 }; 1041 1042 /* 1043 * On fast processors the clock frequency that is measured may be off by 1044 * a few MHz from the value printed on the part. This is a combination of 1045 * the factors that for such fast parts being off by this much is within 1046 * the tolerances for manufacture and because of the difficulties in the 1047 * measurement that can lead to small error. This function uses some 1048 * heuristics in order to tweak the value that was measured to match what 1049 * is most likely printed on the part. 1050 * 1051 * Some examples: 1052 * AMD Athlon 1000 mhz measured as 998 mhz 1053 * Intel Pentium III Xeon 733 mhz measured as 731 mhz 1054 * Intel Pentium IV 1500 mhz measured as 1495mhz 1055 * 1056 * If in the future this function is no longer sufficient to correct 1057 * for the error in the measurement, then the algorithm used to perform 1058 * the measurement will have to be improved in order to increase accuracy 1059 * rather than adding horrible and questionable kludges here. 1060 * 1061 * This is called after the cyclics subsystem because of the potential 1062 * that the heuristics within may give a worse estimate of the clock 1063 * frequency than the value that was measured. 1064 */ 1065 static void 1066 mach_fixcpufreq(void) 1067 { 1068 uint32_t freq, mul, near66, delta66, near50, delta50, fixed, delta, i; 1069 1070 freq = (uint32_t)cpu_freq; 1071 1072 /* 1073 * Find the nearest integer multiple of 200/3 (about 66) MHz to the 1074 * measured speed taking into account that the 667 MHz parts were 1075 * the first to round-up. 1076 */ 1077 mul = (uint32_t)((3 * (uint64_t)freq + 100) / 200); 1078 near66 = (uint32_t)((200 * (uint64_t)mul + ((mul >= 10) ? 1 : 0)) / 3); 1079 delta66 = (near66 > freq) ? (near66 - freq) : (freq - near66); 1080 1081 /* Find the nearest integer multiple of 50 MHz to the measured speed */ 1082 mul = (freq + 25) / 50; 1083 near50 = mul * 50; 1084 delta50 = (near50 > freq) ? (near50 - freq) : (freq - near50); 1085 1086 /* Find the closer of the two */ 1087 if (delta66 < delta50) { 1088 fixed = near66; 1089 delta = delta66; 1090 } else { 1091 fixed = near50; 1092 delta = delta50; 1093 } 1094 1095 if (fixed > INT_MAX) 1096 return; 1097 1098 /* 1099 * Some older parts have a core clock frequency that is not an 1100 * integral multiple of 50 or 66 MHz. Check if one of the old 1101 * clock frequencies is closer to the measured value than any 1102 * of the integral multiples of 50 an 66, and if so set fixed 1103 * and delta appropriately to represent the closest value. 1104 */ 1105 i = sizeof (x86_cpu_freq) / sizeof (int); 1106 while (i > 0) { 1107 i--; 1108 1109 if (x86_cpu_freq[i] <= freq) { 1110 mul = freq - x86_cpu_freq[i]; 1111 1112 if (mul < delta) { 1113 fixed = x86_cpu_freq[i]; 1114 delta = mul; 1115 } 1116 1117 break; 1118 } 1119 1120 mul = x86_cpu_freq[i] - freq; 1121 1122 if (mul < delta) { 1123 fixed = x86_cpu_freq[i]; 1124 delta = mul; 1125 } 1126 } 1127 1128 /* 1129 * Set a reasonable maximum for how much to correct the measured 1130 * result by. This check is here to prevent the adjustment made 1131 * by this function from being more harm than good. It is entirely 1132 * possible that in the future parts will be made that are not 1133 * integral multiples of 66 or 50 in clock frequency or that 1134 * someone may overclock a part to some odd frequency. If the 1135 * measured value is farther from the corrected value than 1136 * allowed, then assume the corrected value is in error and use 1137 * the measured value. 1138 */ 1139 if (6 < delta) 1140 return; 1141 1142 cpu_freq = (int)fixed; 1143 } 1144 1145 1146 static int 1147 machhztomhz(uint64_t cpu_freq_hz) 1148 { 1149 uint64_t cpu_mhz; 1150 1151 /* Round to nearest MHZ */ 1152 cpu_mhz = (cpu_freq_hz + (MEGA_HZ / 2)) / MEGA_HZ; 1153 1154 if (cpu_mhz > INT_MAX) 1155 return (0); 1156 1157 return ((int)cpu_mhz); 1158 1159 } 1160 1161 1162 static int 1163 mach_clkinit(int preferred_mode, int *set_mode) 1164 { 1165 struct psm_ops *pops; 1166 int resolution; 1167 1168 pops = mach_set[0]; 1169 1170 cpu_freq_hz = mach_getcpufreq(); 1171 1172 cpu_freq = machhztomhz(cpu_freq_hz); 1173 1174 if (!(x86_feature & X86_TSC) || (cpu_freq == 0)) 1175 tsc_gethrtime_enable = 0; 1176 1177 if (tsc_gethrtime_enable) { 1178 tsc_hrtimeinit(cpu_freq_hz); 1179 gethrtimef = tsc_gethrtime; 1180 gethrtimeunscaledf = tsc_gethrtimeunscaled; 1181 scalehrtimef = tsc_scalehrtime; 1182 hrtime_tick = tsc_tick; 1183 tsc_gethrtime_initted = 1; 1184 } else { 1185 if (pops->psm_hrtimeinit) 1186 (*pops->psm_hrtimeinit)(); 1187 gethrtimef = pops->psm_gethrtime; 1188 gethrtimeunscaledf = gethrtimef; 1189 /* scalehrtimef will remain dummy */ 1190 } 1191 1192 mach_fixcpufreq(); 1193 1194 if (mach_ver[0] >= PSM_INFO_VER01_3) { 1195 if ((preferred_mode == TIMER_ONESHOT) && 1196 (tsc_gethrtime_enable)) { 1197 1198 resolution = (*pops->psm_clkinit)(0); 1199 if (resolution != 0) { 1200 *set_mode = TIMER_ONESHOT; 1201 return (resolution); 1202 } 1203 1204 } 1205 1206 /* 1207 * either periodic mode was requested or could not set to 1208 * one-shot mode 1209 */ 1210 resolution = (*pops->psm_clkinit)(hz); 1211 /* 1212 * psm should be able to do periodic, so we do not check 1213 * for return value of psm_clkinit here. 1214 */ 1215 *set_mode = TIMER_PERIODIC; 1216 return (resolution); 1217 } else { 1218 /* 1219 * PSMI interface prior to PSMI_3 does not define a return 1220 * value for psm_clkinit, so the return value is ignored. 1221 */ 1222 (void) (*pops->psm_clkinit)(hz); 1223 *set_mode = TIMER_PERIODIC; 1224 return (nsec_per_tick); 1225 } 1226 } 1227 1228 /*ARGSUSED*/ 1229 static void 1230 mach_psm_set_softintr(int ipl, struct av_softinfo *pending) 1231 { 1232 struct psm_ops *pops; 1233 1234 /* invoke hardware interrupt */ 1235 pops = mach_set[0]; 1236 (*pops->psm_set_softintr)(ipl); 1237 } 1238 1239 static int 1240 mach_softlvl_to_vect(int ipl) 1241 { 1242 int softvect; 1243 struct psm_ops *pops; 1244 1245 pops = mach_set[0]; 1246 1247 /* check for null handler for set soft interrupt call */ 1248 if (pops->psm_set_softintr == NULL) { 1249 setsoftint = av_set_softint_pending; 1250 return (PSM_SV_SOFTWARE); 1251 } 1252 1253 softvect = (*pops->psm_softlvl_to_irq)(ipl); 1254 /* check for hardware scheme */ 1255 if (softvect > PSM_SV_SOFTWARE) { 1256 setsoftint = mach_psm_set_softintr; 1257 return (softvect); 1258 } 1259 1260 if (softvect == PSM_SV_SOFTWARE) 1261 setsoftint = av_set_softint_pending; 1262 else /* hardware and software mixed scheme */ 1263 setsoftint = mach_set_softintr; 1264 1265 return (PSM_SV_SOFTWARE); 1266 } 1267 1268 static void 1269 mach_set_softintr(int ipl, struct av_softinfo *pending) 1270 { 1271 struct psm_ops *pops; 1272 1273 /* set software pending bits */ 1274 av_set_softint_pending(ipl, pending); 1275 1276 /* check if dosoftint will be called at the end of intr */ 1277 if (CPU_ON_INTR(CPU) || (curthread->t_intr)) 1278 return; 1279 1280 /* invoke hardware interrupt */ 1281 pops = mach_set[0]; 1282 (*pops->psm_set_softintr)(ipl); 1283 } 1284 1285 #ifdef DEBUG 1286 /* 1287 * This is here to allow us to simulate cpus that refuse to start. 1288 */ 1289 cpuset_t cpufailset; 1290 #endif 1291 1292 int 1293 mach_cpu_start(struct cpu *cp, void *ctx) 1294 { 1295 struct psm_ops *pops = mach_set[0]; 1296 processorid_t id = cp->cpu_id; 1297 1298 #ifdef DEBUG 1299 if (CPU_IN_SET(cpufailset, id)) 1300 return (0); 1301 #endif 1302 return ((*pops->psm_cpu_start)(id, ctx)); 1303 } 1304 1305 /*ARGSUSED*/ 1306 static int 1307 mach_translate_irq(dev_info_t *dip, int irqno) 1308 { 1309 return (irqno); /* default to NO translation */ 1310 } 1311 1312 static void 1313 mach_notify_error(int level, char *errmsg) 1314 { 1315 /* 1316 * SL_FATAL is pass in once panicstr is set, deliver it 1317 * as CE_PANIC. Also, translate SL_ codes back to CE_ 1318 * codes for the psmi handler 1319 */ 1320 if (level & SL_FATAL) 1321 (*notify_error)(CE_PANIC, errmsg); 1322 else if (level & SL_WARN) 1323 (*notify_error)(CE_WARN, errmsg); 1324 else if (level & SL_NOTE) 1325 (*notify_error)(CE_NOTE, errmsg); 1326 else if (level & SL_CONSOLE) 1327 (*notify_error)(CE_CONT, errmsg); 1328 } 1329 1330 /* 1331 * It provides the default basic intr_ops interface for the new DDI 1332 * interrupt framework if the PSM doesn't have one. 1333 * 1334 * Input: 1335 * dip - pointer to the dev_info structure of the requested device 1336 * hdlp - pointer to the internal interrupt handle structure for the 1337 * requested interrupt 1338 * intr_op - opcode for this call 1339 * result - pointer to the integer that will hold the result to be 1340 * passed back if return value is PSM_SUCCESS 1341 * 1342 * Output: 1343 * return value is either PSM_SUCCESS or PSM_FAILURE 1344 */ 1345 static int 1346 mach_intr_ops(dev_info_t *dip, ddi_intr_handle_impl_t *hdlp, 1347 psm_intr_op_t intr_op, int *result) 1348 { 1349 struct intrspec *ispec; 1350 1351 switch (intr_op) { 1352 case PSM_INTR_OP_CHECK_MSI: 1353 *result = hdlp->ih_type & ~(DDI_INTR_TYPE_MSI | 1354 DDI_INTR_TYPE_MSIX); 1355 break; 1356 case PSM_INTR_OP_ALLOC_VECTORS: 1357 if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) 1358 *result = 1; 1359 else 1360 *result = 0; 1361 break; 1362 case PSM_INTR_OP_FREE_VECTORS: 1363 break; 1364 case PSM_INTR_OP_NAVAIL_VECTORS: 1365 if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) 1366 *result = 1; 1367 else 1368 *result = 0; 1369 break; 1370 case PSM_INTR_OP_XLATE_VECTOR: 1371 ispec = ((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp; 1372 *result = psm_translate_irq(dip, ispec->intrspec_vec); 1373 break; 1374 case PSM_INTR_OP_GET_CAP: 1375 *result = 0; 1376 break; 1377 case PSM_INTR_OP_GET_PENDING: 1378 case PSM_INTR_OP_CLEAR_MASK: 1379 case PSM_INTR_OP_SET_MASK: 1380 case PSM_INTR_OP_GET_SHARED: 1381 case PSM_INTR_OP_SET_PRI: 1382 case PSM_INTR_OP_SET_CAP: 1383 case PSM_INTR_OP_SET_CPU: 1384 case PSM_INTR_OP_GET_INTR: 1385 default: 1386 return (PSM_FAILURE); 1387 } 1388 return (PSM_SUCCESS); 1389 } 1390