1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/types.h> 29 #include <sys/param.h> 30 #include <sys/kmem.h> 31 #include <sys/cpu_sgnblk_defs.h> 32 #include <vm/seg.h> 33 #include <sys/iommu.h> 34 #include <sys/vtrace.h> 35 #include <sys/intreg.h> 36 #include <sys/ivintr.h> 37 #include <sys/cpuvar.h> 38 #include <sys/systm.h> 39 #include <sys/machsystm.h> 40 #include <sys/cyclic.h> 41 #include <sys/cpu_sgn.h> 42 43 extern cpu_sgnblk_t *cpu_sgnblkp[NCPU]; 44 extern struct cpu *SIGBCPU; 45 extern void power_down(const char *); 46 47 uint_t bbus_intr_inum; 48 uint64_t bbus_poll_inum; 49 50 /* 51 * Support for sgnblk polling. 52 */ 53 54 /* Internal function prototypes */ 55 static void sgnblk_poll_init(); 56 static uint_t bbus_poll(caddr_t arg1, caddr_t arg2); 57 static void sgnblk_poll_handler(void *unused); 58 #ifdef THROTTLE 59 static void sgnblk_poll_throttle(uint64_t interval); 60 #endif /* THROTTLE */ 61 62 /* Default sgnblk polling interval is every 5 seconds. */ 63 #define ONE_SECOND (1000000) /* in usecs */ 64 #ifdef THROTTLE 65 #define SGNBLK_POLL_INTERVAL (5 * ONE_SECOND) 66 #define SGNBLK_POLL_FAST (ONE_SECOND >> 1) 67 #define SGNBLK_POLL_FAST_WIN ((60 * ONE_SECOND) / \ 68 SGNBLK_POLL_FAST) 69 #else /* THROTTLE */ 70 /* 71 * Until we can find a way to throttle back to 0.5 second intervals 72 * we're stuck fixed on 2.5 second intervals. 73 */ 74 #define SGNBLK_POLL_INTERVAL ((2 * ONE_SECOND) + (ONE_SECOND >> 1)) 75 #endif /* THROTTLE */ 76 77 #define MAX_SGNBLK_POLL_CLNT 5 78 79 void (*pollclntfunc[MAX_SGNBLK_POLL_CLNT])(); 80 /* 81 * sgnblk_mutex Protects juggling & sgnblk_poll_refs[]. 82 * sgnblk_poll_mutex Protects pollclntfunc[]. 83 */ 84 kmutex_t sgnblk_mutex; 85 kmutex_t sgnblk_poll_mutex; 86 static uint64_t sgnblk_poll_interval = SGNBLK_POLL_INTERVAL; 87 #ifdef THROTTLE 88 static uint64_t sgnblk_poll_fast = SGNBLK_POLL_FAST; 89 static int64_t sgnblk_poll_fast_win = SGNBLK_POLL_FAST_WIN; 90 #endif /* THROTTLE */ 91 static processorid_t sgnblk_pollcpu = -1; 92 /* 93 * Note that the sigblock polling depends on CY_HIGH_LEVEL 94 * being higher than PIL_13 since we ultimately need to 95 * dispatch a PIL_13 soft handler. 96 * Also, we assume one sgnblk handler for the entire system. 97 * Once upon a time we had them per-cpu. With the Cyclic stuff 98 * we would have to bind our cyclic handler to a cpu and doing 99 * this prevents that cpu from being offlined. Since the Cyclic 100 * subsystem could indirectly juggle us without us knowing we 101 * have to assume we're running from any possible cpu and not 102 * always SIGBCPU. 103 */ 104 #ifdef THROTTLE 105 static cyclic_id_t sgnblk_poll_cycid = CYCLIC_NONE; 106 #endif /* THROTTLE */ 107 static cyc_handler_t sgnblk_poll_cychandler = { 108 sgnblk_poll_handler, 109 NULL, 110 CY_HIGH_LEVEL 111 }; 112 static cyc_time_t sgnblk_poll_time; 113 114 /* 115 * Anybody that references the polling (SIGBCPU) can 116 * register a callback function that will be called if 117 * the polling cpu is juggled, e.g. during a DR operation. 118 */ 119 #define MAX_SGNBLK_POLL_REFS 10 120 121 struct sgnblk_poll_refs { 122 void (*callback)(cpu_sgnblk_t *sigbp, void *arg); 123 void *arg; 124 } sgnblk_poll_refs[MAX_SGNBLK_POLL_REFS]; 125 126 /* 127 * Bootbus intr handler: Generic handler for all SSP/CBS 128 * interrupt requests initiated via the hw bootbus intr 129 * mechanism. This is similar to the level15 130 * interrupt handling for sigb commands in the CS6400. 131 * Most of these code were stolen from the sigb stuff in 132 * in CS6400. 133 */ 134 135 extern struct cpu cpu0; 136 137 /*ARGSUSED*/ 138 static uint_t 139 bbus_intr(caddr_t arg) 140 { 141 int cmd = 0; 142 processorid_t cpu_id = CPU->cpu_id; 143 int retflag; 144 int resp = 0; 145 proc_t *initpp; 146 147 ASSERT(cpu_sgnblkp[cpu_id] != NULL); 148 149 /* 150 * Check for unsolicited messages in the host's mailbox. 151 */ 152 retflag = cpu_sgnblkp[cpu_id]->sigb_host_mbox.flag; 153 154 switch (retflag) { 155 case CBS_TO_HOST: 156 retflag = HOST_TO_CBS; 157 break; 158 default: 159 retflag = SIGB_MBOX_EMPTY; 160 break; 161 } 162 if (retflag == SIGB_MBOX_EMPTY) 163 return (0); /* interrupt not claimed */ 164 165 /* 166 * We only look for UNSOLICITED messages, i.e. commands. 167 * Responses to these commands are returned into the same 168 * mailbox from which the command was received, i.e. host's. 169 * 170 * If the host should solicit a message from the SSP, that 171 * message/command goes into the SSP's mailbox (sigb_ssp_mbox). 172 * The responses (from the SSP) to these messages will be 173 * read from the ssp mailbox by whomever solicited it, but 174 * will NOT be handled through this level 15 interrupt 175 * mechanism. 176 * 177 * Note that use of the flag field of the signature block mailbox 178 * structure and the mailbox protocol itself, serializes access 179 * to these mailboxes. 180 */ 181 182 resp = 0; 183 184 /* 185 * The first sizeof (uint_t) bytes of the data field 186 * is the command. 187 */ 188 cmd = cpu_sgnblkp[cpu_id]->sigb_host_mbox.cmd; 189 190 switch (cmd) { 191 case SSP_GOTO_OBP: 192 /* 193 * Let's set the mailbox flag to BUSY while we are in OBP 194 */ 195 cpu_sgnblkp[cpu_id]->sigb_host_mbox.flag = SIGB_MBOX_BUSY; 196 197 debug_enter("SSP requested (SSP_GOTO_OBP)"); 198 /* 199 * This command does NOT require a response. 200 */ 201 resp = 0; 202 break; 203 204 case SSP_GOTO_PANIC: 205 /* 206 * Let's reset the mailbox flag before we bail. 207 */ 208 cpu_sgnblkp[cpu_id]->sigb_host_mbox.flag = SIGB_MBOX_EMPTY; 209 210 cmn_err(CE_PANIC, "SSP requested (SSP_GOTO_PANIC)\n"); 211 /* should never reach this point */ 212 213 resp = 0; 214 break; 215 case SSP_ENVIRON: 216 /* 217 * Environmental Interrupt. 218 */ 219 220 /* 221 * Send SIGPWR to init(1) it will run rc0, which will uadmin to 222 * powerdown. 223 */ 224 225 mutex_enter(&pidlock); 226 initpp = prfind(P_INITPID); 227 mutex_exit(&pidlock); 228 229 /* 230 * If we're still booting and init(1) isn't set up yet, 231 * simply halt. 232 */ 233 if (initpp == NULL) { 234 extern void halt(char *); 235 cmn_err(CE_WARN, "?Environmental Interrupt"); 236 power_down((char *)NULL); 237 halt("Power off the System!\n"); /* just in case */ 238 } 239 240 /* 241 * else, graceful shutdown with inittab and all getting involved 242 * 243 * XXX: Do we Need to modify the init process for the Cray 6400! 244 */ 245 psignal(initpp, SIGPWR); 246 247 /* 248 * XXX: kick off a sanity timeout panic in case the /etc/inittab 249 * or /etc/rc0 files are hosed. The 6400 needs to hang here 250 * when we return from psignal. 251 * 252 * cmn_err(CE_PANIC, "SSP requested (SSP_ENVIRON)\n"); 253 * should never reach this point 254 */ 255 256 resp = 0; 257 break; 258 /* 259 * Could handle more mailbox commands right here. 260 */ 261 262 default: 263 resp = SIGB_BAD_MBOX_CMD; 264 break; 265 } 266 267 /* 268 * If resp is non-zero then we'll automatically reset 269 * the handler_sigb lock once we've sent the response, 270 * however if no response is needed, then resetlck must 271 * be set so that the handler_sigb lock is reset. 272 */ 273 if (resp != 0) { 274 /* 275 * Had some kind of trouble handling the mailbox 276 * command. Need to send back an error response 277 * and back out of the cpu_sgnblk handling. 278 */ 279 cpu_sgnblkp[cpu_id]->sigb_host_mbox.cmd = resp; 280 bcopy((caddr_t)&cmd, 281 (caddr_t)&cpu_sgnblkp[cpu_id]->sigb_host_mbox.data[0], 282 sizeof (cmd)); 283 cpu_sgnblkp[cpu_id]->sigb_host_mbox.flag = retflag; 284 } else { 285 /* 286 * No response expected, but we still have to 287 * reset the flag to empty for the next person. 288 */ 289 cpu_sgnblkp[cpu_id]->sigb_host_mbox.flag = SIGB_MBOX_EMPTY; 290 } 291 return (1); /* interrupt claimed */ 292 } 293 294 void 295 register_bbus_intr() 296 { 297 /* 298 * Starfire's ASIC have the capability to generate a mondo 299 * vector. The SSP uses this capability via the Boot Bus to 300 * send an interrupt to a domain. 301 * 302 * The SSP generates a mondo with: 303 * ign = UPAID_TO_IGN(bootcpu_upaid) 304 * ino = 0 305 * 306 * An interrupt handler is added for this inum. 307 */ 308 bbus_intr_inum = UPAID_TO_IGN(cpu0.cpu_id) * MAX_INO; 309 VERIFY(add_ivintr(bbus_intr_inum, PIL_13, (intrfunc)bbus_intr, 310 NULL, NULL, NULL) == 0); 311 312 313 /* 314 * Due to a HW flaw in starfire, liberal use 315 * of bootbus intrs under heavy system load 316 * may cause the machine to arbstop. The workaround 317 * is to provide a polling mechanism thru the signature 318 * block interface to allow another way for the SSP to 319 * interrupt the host. Applications like IDN which generate 320 * a high degree of SSP to host interruptions for 321 * synchronization will need to use the polling facility 322 * instead of the hw bootbus interrupt mechanism. 323 * The HW bootbus intr support is left intact as it 324 * will still be used by existing SSP applications for system 325 * recovery in the event of system hangs etc.. In such situations, 326 * HW bootbus intr is a better mechanism as it is HW generated 327 * level 15 interrupt that has a better chance of kicking 328 * a otherwise hung OS into recovery. 329 * 330 * Polling is done by scheduling a constant tick timer 331 * interrupt at a certain predefined interval. 332 * The handler will do a poll and if there is a 333 * "intr" request, scheduled a soft level 13 intr 334 * to handle it. Allocate the inum for the level 335 * 13 intr here. 336 */ 337 bbus_poll_inum = add_softintr(PIL_13, bbus_poll, 0, SOFTINT_ST); 338 } 339 340 static void 341 sgnblk_poll_init() 342 { 343 ASSERT(MUTEX_HELD(&sgnblk_mutex)); 344 345 mutex_init(&sgnblk_poll_mutex, NULL, 346 MUTEX_SPIN, (void *)ipltospl(PIL_14)); 347 sgnblk_pollcpu = SIGBCPU->cpu_id; 348 mutex_enter(&cpu_lock); 349 sgnblk_poll_time.cyt_when = 0ull; 350 sgnblk_poll_time.cyt_interval = sgnblk_poll_interval * 1000ull; 351 #ifdef THROTTLE 352 sgnblk_poll_cycid = cyclic_add(&sgnblk_poll_cychandler, 353 &sgnblk_poll_time); 354 #else /* THROTTLE */ 355 (void) cyclic_add(&sgnblk_poll_cychandler, &sgnblk_poll_time); 356 #endif /* THROTTLE */ 357 mutex_exit(&cpu_lock); 358 ASSERT(sgnblk_pollcpu == SIGBCPU->cpu_id); 359 } 360 361 int 362 sgnblk_poll_register(void(*func)(processorid_t cpu_id, 363 cpu_sgnblk_t *cpu_sgnblkp)) 364 { 365 int i; 366 367 /* 368 * See if we need to initialize 369 * sgnblk polling 370 */ 371 mutex_enter(&sgnblk_mutex); 372 if (sgnblk_pollcpu == -1) 373 sgnblk_poll_init(); 374 mutex_exit(&sgnblk_mutex); 375 376 mutex_enter(&sgnblk_poll_mutex); 377 378 /* 379 * Look for a empty slot 380 */ 381 for (i = 0; i < MAX_SGNBLK_POLL_CLNT; i++) { 382 if (pollclntfunc[i] == NULL) { 383 pollclntfunc[i] = func; 384 mutex_exit(&sgnblk_poll_mutex); 385 return (1); 386 } 387 } 388 mutex_exit(&sgnblk_poll_mutex); 389 return (0); /* failed */ 390 } 391 392 int 393 sgnblk_poll_unregister(void(*func)(processorid_t cpu_id, 394 cpu_sgnblk_t *cpu_sgnblkp)) 395 { 396 int i; 397 398 mutex_enter(&sgnblk_poll_mutex); 399 400 /* 401 * Look for the slot matching the function passed in. 402 */ 403 for (i = 0; i < MAX_SGNBLK_POLL_CLNT; i++) { 404 if (pollclntfunc[i] == func) { 405 pollclntfunc[i] = NULL; 406 mutex_exit(&sgnblk_poll_mutex); 407 return (1); 408 } 409 } 410 mutex_exit(&sgnblk_poll_mutex); 411 return (0); /* failed */ 412 } 413 414 415 /* 416 * For DR support. 417 * Juggle poll tick client to another cpu 418 * Assumed to be called single threaded. 419 */ 420 void 421 juggle_sgnblk_poll(struct cpu *cp) 422 { 423 int i; 424 425 mutex_enter(&sgnblk_mutex); 426 427 if (sgnblk_pollcpu == -1 || 428 (cp != NULL && sgnblk_pollcpu == cp->cpu_id)) { 429 mutex_exit(&sgnblk_mutex); 430 return; 431 } 432 433 /* 434 * Disable by simply returning here 435 * Passing a null cp is assumed to be 436 * sgnpoll disable request. 437 */ 438 if (cp == NULL) { 439 for (i = 0; i < MAX_SGNBLK_POLL_REFS; i++) { 440 void (*func)(), *arg; 441 442 if ((func = sgnblk_poll_refs[i].callback) != NULL) { 443 arg = sgnblk_poll_refs[i].arg; 444 (*func)(NULL, arg); 445 } 446 } 447 mutex_exit(&sgnblk_mutex); 448 return; 449 } 450 451 sgnblk_pollcpu = cp->cpu_id; 452 453 for (i = 0; i < MAX_SGNBLK_POLL_REFS; i++) { 454 void (*func)(), *arg; 455 456 if ((func = sgnblk_poll_refs[i].callback) != NULL) { 457 arg = sgnblk_poll_refs[i].arg; 458 (*func)(cpu_sgnblkp[sgnblk_pollcpu], arg); 459 } 460 } 461 462 mutex_exit(&sgnblk_mutex); 463 } 464 465 #ifdef THROTTLE 466 /*ARGSUSED0*/ 467 static void 468 _sgnblk_poll_throttle(void *unused) 469 { 470 mutex_enter(&cpu_lock); 471 if (sgnblk_poll_cycid != CYCLIC_NONE) { 472 cyclic_remove(sgnblk_poll_cycid); 473 sgnblk_poll_cycid = CYCLIC_NONE; 474 } 475 476 if (sgnblk_poll_time.cyt_interval > 0ull) 477 sgnblk_poll_cycid = cyclic_add(&sgnblk_poll_cychandler, 478 &sgnblk_poll_time); 479 mutex_exit(&cpu_lock); 480 } 481 482 /* 483 * We don't want to remove the cyclic within the context of 484 * the handler so we kick off the throttle in background 485 * via a timeout call. 486 */ 487 static void 488 sgnblk_poll_throttle(uint64_t new_interval) 489 { 490 mutex_enter(&cpu_lock); 491 sgnblk_poll_time.cyt_when = 0ull; 492 sgnblk_poll_time.cyt_interval = new_interval * 1000ull; 493 mutex_exit(&cpu_lock); 494 495 (void) timeout(_sgnblk_poll_throttle, NULL, (clock_t)0); 496 } 497 #endif /* THROTTLE */ 498 499 /* 500 * High priority interrupt handler (PIL_14) 501 * for signature block mbox polling. 502 */ 503 /*ARGSUSED0*/ 504 static void 505 sgnblk_poll_handler(void *unused) 506 { 507 processorid_t cpuid = SIGBCPU->cpu_id; 508 #ifdef THROTTLE 509 static int64_t sb_window = -1; 510 static uint64_t sb_interval = 0; 511 #endif /* THROTTLE */ 512 513 if (cpu_sgnblkp[cpuid] == NULL) 514 return; 515 516 /* 517 * Poll for SSP requests 518 */ 519 if (cpu_sgnblkp[cpuid]->sigb_host_mbox.intr == SIGB_INTR_SEND) { 520 /* reset the flag - sure hope this is atomic */ 521 cpu_sgnblkp[cpuid]->sigb_host_mbox.intr = SIGB_INTR_OFF; 522 523 #ifdef THROTTLE 524 /* 525 * Go into fast poll mode for a short duration 526 * (SGNBLK_POLL_FAST_WIN) in SGNBLK_POLL_FAST interval. 527 * The assumption here is that we just got activity 528 * on the mbox poll, the probability of more coming down 529 * the pipe is high - so let's look more often. 530 */ 531 if ((sb_window < 0) && (sb_interval > sgnblk_poll_fast)) { 532 sb_interval = sgnblk_poll_fast; 533 sgnblk_poll_throttle(sb_interval); 534 } 535 sb_window = sgnblk_poll_fast_win; 536 #endif /* THROTTLE */ 537 538 /* schedule poll processing */ 539 setsoftint(bbus_poll_inum); 540 541 #ifdef THROTTLE 542 } else if (sb_window >= 0) { 543 /* Revert to slow polling once fast window ends */ 544 if ((--sb_window < 0) && 545 (sb_interval < sgnblk_poll_interval)) { 546 sb_interval = sgnblk_poll_interval; 547 sgnblk_poll_throttle(sb_interval); 548 } 549 #endif /* THROTTLE */ 550 } 551 } 552 553 /*ARGSUSED*/ 554 static uint_t 555 bbus_poll(caddr_t arg1, caddr_t arg2) 556 { 557 int i; 558 processorid_t cpu_id = SIGBCPU->cpu_id; 559 cpu_sgnblk_t *sgnblkp = cpu_sgnblkp[cpu_id]; 560 561 /* 562 * Go thru the poll client array and call the 563 * poll client functions one by one 564 */ 565 mutex_enter(&sgnblk_poll_mutex); 566 567 for (i = 0; i < MAX_SGNBLK_POLL_CLNT; i++) { 568 void (*func)(processorid_t cpuid, cpu_sgnblk_t *sgnblkp); 569 570 if ((func = pollclntfunc[i]) != NULL) { 571 mutex_exit(&sgnblk_poll_mutex); 572 (*func)(cpu_id, sgnblkp); 573 mutex_enter(&sgnblk_poll_mutex); 574 } 575 } 576 mutex_exit(&sgnblk_poll_mutex); 577 578 return (1); 579 } 580 581 int 582 sgnblk_poll_reference(void (*callback)(cpu_sgnblk_t *sigb, void *arg), 583 void *arg) 584 { 585 int i, slot; 586 cpu_sgnblk_t *sigbp; 587 588 if (callback == NULL) 589 return (-1); 590 591 mutex_enter(&sgnblk_mutex); 592 /* 593 * First verify caller is not already registered. 594 */ 595 slot = -1; 596 for (i = 0; i < MAX_SGNBLK_POLL_REFS; i++) { 597 if ((slot == -1) && (sgnblk_poll_refs[i].callback == NULL)) { 598 slot = i; 599 continue; 600 } 601 if (sgnblk_poll_refs[i].callback == callback) { 602 mutex_exit(&sgnblk_mutex); 603 return (-1); 604 } 605 } 606 /* 607 * Now find an empty entry. 608 */ 609 if (slot == -1) { 610 mutex_exit(&sgnblk_mutex); 611 return (-1); 612 } 613 sgnblk_poll_refs[slot].callback = callback; 614 sgnblk_poll_refs[slot].arg = arg; 615 616 sigbp = (sgnblk_pollcpu != -1) ? cpu_sgnblkp[sgnblk_pollcpu] : NULL; 617 618 (*callback)(sigbp, arg); 619 620 mutex_exit(&sgnblk_mutex); 621 622 return (0); 623 } 624 625 void 626 sgnblk_poll_unreference(void (*callback)(cpu_sgnblk_t *sigb, void *arg)) 627 { 628 int i; 629 630 mutex_enter(&sgnblk_mutex); 631 for (i = 0; i < MAX_SGNBLK_POLL_REFS; i++) { 632 if (sgnblk_poll_refs[i].callback == callback) { 633 void *arg; 634 635 arg = sgnblk_poll_refs[i].arg; 636 (*callback)(NULL, arg); 637 sgnblk_poll_refs[i].callback = NULL; 638 sgnblk_poll_refs[i].arg = NULL; 639 break; 640 } 641 } 642 mutex_exit(&sgnblk_mutex); 643 } 644