1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * A CPR derivative specifically for sbd 31 */ 32 33 #include <sys/types.h> 34 #include <sys/systm.h> 35 #include <sys/machparam.h> 36 #include <sys/machsystm.h> 37 #include <sys/ddi.h> 38 #define SUNDDI_IMPL 39 #include <sys/sunddi.h> 40 #include <sys/sunndi.h> 41 #include <sys/devctl.h> 42 #include <sys/time.h> 43 #include <sys/kmem.h> 44 #include <nfs/lm.h> 45 #include <sys/ddi_impldefs.h> 46 #include <sys/ndi_impldefs.h> 47 #include <sys/obpdefs.h> 48 #include <sys/cmn_err.h> 49 #include <sys/debug.h> 50 #include <sys/errno.h> 51 #include <sys/callb.h> 52 #include <sys/clock.h> 53 #include <sys/x_call.h> 54 #include <sys/cpuvar.h> 55 #include <sys/epm.h> 56 #include <sys/vfs.h> 57 58 #ifdef DEBUG 59 #include <sys/note.h> 60 #endif 61 62 #include <sys/promif.h> 63 #include <sys/conf.h> 64 #include <sys/cyclic.h> 65 66 #include <sys/sbd_ioctl.h> 67 #include <sys/sbd.h> 68 #include <sys/sbdp_priv.h> 69 #include <sys/cpu_sgnblk_defs.h> 70 71 static char * 72 sbdp_get_err_buf(sbd_error_t *ep) 73 { 74 return (ep->e_rsc); 75 } 76 77 extern void e_ddi_enter_driver_list(struct devnames *dnp, int *listcnt); 78 extern void e_ddi_exit_driver_list(struct devnames *dnp, int listcnt); 79 extern int is_pseudo_device(dev_info_t *dip); 80 81 extern kmutex_t cpu_lock; 82 83 static int sbdp_is_real_device(dev_info_t *dip); 84 #ifdef DEBUG 85 static int sbdp_bypass_device(char *dname); 86 #endif 87 static int sbdp_check_dip(dev_info_t *dip, void *arg, uint_t ref); 88 89 static int sbdp_resolve_devname(dev_info_t *dip, char *buffer, 90 char *alias); 91 92 int sbdp_test_suspend(sbdp_handle_t *hp); 93 94 #define SR_STATE(srh) ((srh)->sr_suspend_state) 95 #define SR_SET_STATE(srh, state) (SR_STATE((srh)) = (state)) 96 #define SR_FAILED_DIP(srh) ((srh)->sr_failed_dip) 97 98 #define SR_FLAG_WATCHDOG 0x1 99 #define SR_CHECK_FLAG(srh, flag) ((srh)->sr_flags & (flag)) 100 #define SR_SET_FLAG(srh, flag) ((srh)->sr_flags |= (flag)) 101 #define SR_CLEAR_FLAG(srh, flag) ((srh)->sr_flags &= ~(flag)) 102 103 #ifdef DEBUG 104 /* 105 * Just for testing. List of drivers to bypass when performing a suspend. 106 */ 107 static char *sbdp_bypass_list[] = { 108 /* "sgsbbc", this is an example when needed */ 109 "" 110 }; 111 #endif 112 113 #define SKIP_SYNC /* bypass sync ops in sbdp_suspend */ 114 115 /* 116 * sbdp_skip_user_threads is used to control if user threads should 117 * be suspended. If sbdp_skip_user_threads is true, the rest of the 118 * flags are not used; if it is false, sbdp_check_user_stop_result 119 * will be used to control whether or not we need to check suspend 120 * result, and sbdp_allow_blocked_threads will be used to control 121 * whether or not we allow suspend to continue if there are blocked 122 * threads. We allow all combinations of sbdp_check_user_stop_result 123 * and sbdp_allow_block_threads, even though it might not make much 124 * sense to not allow block threads when we don't even check stop 125 * result. 126 */ 127 static int sbdp_skip_user_threads = 0; /* default to FALSE */ 128 static int sbdp_check_user_stop_result = 1; /* default to TRUE */ 129 static int sbdp_allow_blocked_threads = 1; /* default to TRUE */ 130 131 132 static void 133 sbdp_stop_intr(void) 134 { 135 kpreempt_disable(); 136 cyclic_suspend(); 137 } 138 139 static void 140 sbdp_enable_intr(void) 141 { 142 cyclic_resume(); 143 kpreempt_enable(); 144 } 145 146 sbdp_sr_handle_t * 147 sbdp_get_sr_handle(void) 148 { 149 sbdp_sr_handle_t *srh; 150 srh = kmem_zalloc(sizeof (sbdp_sr_handle_t), KM_SLEEP); 151 152 return (srh); 153 } 154 155 void 156 sbdp_release_sr_handle(sbdp_sr_handle_t *srh) 157 { 158 ASSERT(SR_FAILED_DIP(srh) == NULL); 159 kmem_free((caddr_t)srh, sizeof (sbdp_sr_handle_t)); 160 } 161 162 static int 163 sbdp_is_real_device(dev_info_t *dip) 164 { 165 struct regspec *regbuf = NULL; 166 int length = 0; 167 int rc; 168 169 if (ddi_get_driver(dip) == NULL) 170 return (0); 171 172 if (DEVI(dip)->devi_pm_flags & (PMC_NEEDS_SR|PMC_PARENTAL_SR)) 173 return (1); 174 if (DEVI(dip)->devi_pm_flags & PMC_NO_SR) 175 return (0); 176 177 /* 178 * now the general case 179 */ 180 rc = ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "reg", 181 (caddr_t)®buf, &length); 182 ASSERT(rc != DDI_PROP_NO_MEMORY); 183 if (rc != DDI_PROP_SUCCESS) { 184 return (0); 185 } else { 186 if ((length > 0) && (regbuf != NULL)) 187 kmem_free(regbuf, length); 188 return (1); 189 } 190 } 191 192 #ifdef DEBUG 193 static int 194 sbdp_bypass_device(char *dname) 195 { 196 int i; 197 char **lname; 198 /* check the bypass list */ 199 for (i = 0, lname = &sbdp_bypass_list[i]; **lname != '\0'; lname++) { 200 SBDP_DBG_QR("Checking %s\n", *lname); 201 if (strcmp(dname, sbdp_bypass_list[i++]) == 0) 202 return (1); 203 } 204 return (0); 205 } 206 #endif 207 208 static int 209 sbdp_resolve_devname(dev_info_t *dip, char *buffer, char *alias) 210 { 211 major_t devmajor; 212 char *aka, *name; 213 214 *buffer = *alias = 0; 215 216 if (dip == NULL) 217 return (-1); 218 219 if ((name = ddi_get_name(dip)) == NULL) 220 name = "<null name>"; 221 222 aka = name; 223 224 if ((devmajor = ddi_name_to_major(aka)) != -1) 225 aka = ddi_major_to_name(devmajor); 226 227 (void) strcpy(buffer, name); 228 229 if (strcmp(name, aka)) 230 (void) strcpy(alias, aka); 231 else 232 *alias = 0; 233 234 return (0); 235 } 236 237 typedef struct sbdp_ref { 238 int *refcount; 239 sbd_error_t *sep; 240 } sbdp_ref_t; 241 242 static int 243 sbdp_check_dip(dev_info_t *dip, void *arg, uint_t ref) 244 { 245 char *dname; 246 sbdp_ref_t *sbrp = (sbdp_ref_t *)arg; 247 248 if (dip == NULL) 249 return (DDI_WALK_CONTINUE); 250 251 ASSERT(sbrp->sep != NULL); 252 ASSERT(sbrp->refcount != NULL); 253 254 if (!sbdp_is_real_device(dip)) 255 return (DDI_WALK_CONTINUE); 256 257 dname = ddi_binding_name(dip); 258 259 if ((strcmp(dname, "pciclass,060940") == 0) || (strcmp(dname, 260 "pciclass,060980") == 0)) { 261 (void) ddi_pathname(dip, sbdp_get_err_buf(sbrp->sep)); 262 sbdp_set_err(sbrp->sep, ESBD_BUSY, NULL); 263 (*sbrp->refcount)++; 264 return (DDI_WALK_TERMINATE); 265 } 266 267 #ifdef DEBUG 268 if (sbdp_bypass_device(dname)) 269 return (DDI_WALK_CONTINUE); 270 #endif 271 272 if (ref) { 273 (*sbrp->refcount)++; 274 SBDP_DBG_QR("\n%s (major# %d) is referenced\n", 275 dname, ddi_name_to_major(dname)); 276 (void) ddi_pathname(dip, sbdp_get_err_buf(sbrp->sep)); 277 sbdp_set_err(sbrp->sep, ESBD_BUSY, NULL); 278 return (DDI_WALK_TERMINATE); 279 } 280 return (DDI_WALK_CONTINUE); 281 } 282 283 void 284 sbdp_check_devices(dev_info_t *dip, int *refcount, sbd_error_t *sep) 285 { 286 sbdp_ref_t sbr; 287 288 sbr.refcount = refcount; 289 sbr.sep = sep; 290 291 ASSERT(e_ddi_branch_held(dip)); 292 293 (void) e_ddi_branch_referenced(dip, sbdp_check_dip, &sbr); 294 } 295 296 static int 297 sbdp_suspend_devices(dev_info_t *dip, sbdp_sr_handle_t *srh) 298 { 299 int circ; 300 major_t major; 301 char *dname; 302 303 for (; dip != NULL; dip = ddi_get_next_sibling(dip)) { 304 char d_name[40], d_alias[40], *d_info; 305 306 ndi_devi_enter(dip, &circ); 307 if (sbdp_suspend_devices(ddi_get_child(dip), srh)) { 308 ndi_devi_exit(dip, circ); 309 return (ENXIO); 310 } 311 ndi_devi_exit(dip, circ); 312 313 if (!sbdp_is_real_device(dip)) 314 continue; 315 316 major = (major_t)-1; 317 if ((dname = DEVI(dip)->devi_binding_name) != NULL) 318 major = ddi_name_to_major(dname); 319 320 #ifdef DEBUG 321 if (sbdp_bypass_device(dname)) { 322 SBDP_DBG_QR("bypassed suspend of %s (major# %d)\n", 323 dname, major); 324 continue; 325 } 326 #endif 327 328 if ((d_info = ddi_get_name_addr(dip)) == NULL) 329 d_info = "<null>"; 330 331 d_name[0] = 0; 332 if (sbdp_resolve_devname(dip, d_name, d_alias) == 0) { 333 if (d_alias[0] != 0) { 334 SBDP_DBG_QR("\tsuspending %s@%s (aka %s)\n", 335 d_name, d_info, d_alias); 336 } else { 337 SBDP_DBG_QR("\tsuspending %s@%s\n", 338 d_name, d_info); 339 } 340 } else { 341 SBDP_DBG_QR("\tsuspending %s@%s\n", dname, d_info); 342 } 343 344 if (devi_detach(dip, DDI_SUSPEND) != DDI_SUCCESS) { 345 (void) sprintf(sbdp_get_err_buf(&srh->sep), 346 "%d", major); 347 348 sbdp_set_err(&srh->sep, ESGT_SUSPEND, NULL); 349 ndi_hold_devi(dip); 350 SR_FAILED_DIP(srh) = dip; 351 return (DDI_FAILURE); 352 } 353 } 354 355 return (DDI_SUCCESS); 356 } 357 358 static void 359 sbdp_resume_devices(dev_info_t *start, sbdp_sr_handle_t *srh) 360 { 361 int circ; 362 dev_info_t *dip, *next, *last = NULL; 363 char *bn; 364 sbd_error_t *sep; 365 366 sep = &srh->sep; 367 368 /* attach in reverse device tree order */ 369 while (last != start) { 370 dip = start; 371 next = ddi_get_next_sibling(dip); 372 while (next != last && dip != SR_FAILED_DIP(srh)) { 373 dip = next; 374 next = ddi_get_next_sibling(dip); 375 } 376 if (dip == SR_FAILED_DIP(srh)) { 377 /* Release hold acquired in sbdp_suspend_devices() */ 378 ndi_rele_devi(dip); 379 SR_FAILED_DIP(srh) = NULL; 380 } else if (sbdp_is_real_device(dip) && 381 SR_FAILED_DIP(srh) == NULL) { 382 383 if (DEVI(dip)->devi_binding_name != NULL) { 384 bn = ddi_binding_name(dip); 385 } 386 #ifdef DEBUG 387 if (!sbdp_bypass_device(bn)) { 388 #else 389 { 390 #endif 391 char d_name[40], d_alias[40], *d_info; 392 393 d_name[0] = 0; 394 d_info = ddi_get_name_addr(dip); 395 if (d_info == NULL) 396 d_info = "<null>"; 397 398 if (!sbdp_resolve_devname(dip, d_name, 399 d_alias)) { 400 if (d_alias[0] != 0) { 401 SBDP_DBG_QR("\tresuming " 402 "%s@%s (aka %s)\n", 403 d_name, d_info, 404 d_alias); 405 } else { 406 SBDP_DBG_QR("\tresuming " 407 "%s@%s\n", 408 d_name, d_info); 409 } 410 } else { 411 SBDP_DBG_QR("\tresuming %s@%s\n", 412 bn, d_info); 413 } 414 415 if (devi_attach(dip, DDI_RESUME) != 416 DDI_SUCCESS) { 417 /* 418 * Print a console warning, 419 * set an errno of ESGT_RESUME, 420 * and save the driver major 421 * number in the e_str. 422 */ 423 424 (void) sprintf(sbdp_get_err_buf(sep), 425 "%s@%s", 426 d_name[0] ? d_name : bn, d_info); 427 SBDP_DBG_QR("\tFAILED to resume " 428 "%s\n", sbdp_get_err_buf(sep)); 429 sbdp_set_err(sep, 430 ESGT_RESUME, NULL); 431 } 432 } 433 } 434 ndi_devi_enter(dip, &circ); 435 sbdp_resume_devices(ddi_get_child(dip), srh); 436 ndi_devi_exit(dip, circ); 437 last = dip; 438 } 439 } 440 441 /* 442 * True if thread is virtually stopped. Similar to CPR_VSTOPPED 443 * but from DR point of view. These user threads are waiting in 444 * the kernel. Once they return from kernel, they will process 445 * the stop signal and stop. 446 */ 447 #define SBDP_VSTOPPED(t) \ 448 ((t)->t_state == TS_SLEEP && \ 449 (t)->t_wchan != NULL && \ 450 (t)->t_astflag && \ 451 ((t)->t_proc_flag & TP_CHKPT)) 452 453 454 static int 455 sbdp_stop_user_threads(sbdp_sr_handle_t *srh) 456 { 457 int count; 458 char cache_psargs[PSARGSZ]; 459 kthread_id_t cache_tp; 460 uint_t cache_t_state; 461 int bailout; 462 sbd_error_t *sep; 463 kthread_id_t tp; 464 465 extern void add_one_utstop(); 466 extern void utstop_timedwait(clock_t); 467 extern void utstop_init(void); 468 469 #define SBDP_UTSTOP_RETRY 4 470 #define SBDP_UTSTOP_WAIT hz 471 472 if (sbdp_skip_user_threads) 473 return (DDI_SUCCESS); 474 475 sep = &srh->sep; 476 ASSERT(sep); 477 478 utstop_init(); 479 480 /* we need to try a few times to get past fork, etc. */ 481 for (count = 0; count < SBDP_UTSTOP_RETRY; count++) { 482 /* walk the entire threadlist */ 483 mutex_enter(&pidlock); 484 for (tp = curthread->t_next; tp != curthread; tp = tp->t_next) { 485 proc_t *p = ttoproc(tp); 486 487 /* handle kernel threads separately */ 488 if (p->p_as == &kas || p->p_stat == SZOMB) 489 continue; 490 491 mutex_enter(&p->p_lock); 492 thread_lock(tp); 493 494 if (tp->t_state == TS_STOPPED) { 495 /* add another reason to stop this thread */ 496 tp->t_schedflag &= ~TS_RESUME; 497 } else { 498 tp->t_proc_flag |= TP_CHKPT; 499 500 thread_unlock(tp); 501 mutex_exit(&p->p_lock); 502 add_one_utstop(); 503 mutex_enter(&p->p_lock); 504 thread_lock(tp); 505 506 aston(tp); 507 508 if (tp->t_state == TS_SLEEP && 509 (tp->t_flag & T_WAKEABLE)) { 510 setrun_locked(tp); 511 } 512 513 } 514 515 /* grab thread if needed */ 516 if (tp->t_state == TS_ONPROC && tp->t_cpu != CPU) 517 poke_cpu(tp->t_cpu->cpu_id); 518 519 520 thread_unlock(tp); 521 mutex_exit(&p->p_lock); 522 } 523 mutex_exit(&pidlock); 524 525 526 /* let everything catch up */ 527 utstop_timedwait(count * count * SBDP_UTSTOP_WAIT); 528 529 530 /* now, walk the threadlist again to see if we are done */ 531 mutex_enter(&pidlock); 532 for (tp = curthread->t_next, bailout = 0; 533 tp != curthread; tp = tp->t_next) { 534 proc_t *p = ttoproc(tp); 535 536 /* handle kernel threads separately */ 537 if (p->p_as == &kas || p->p_stat == SZOMB) 538 continue; 539 540 /* 541 * If this thread didn't stop, and we don't allow 542 * unstopped blocked threads, bail. 543 */ 544 thread_lock(tp); 545 if (!CPR_ISTOPPED(tp) && 546 !(sbdp_allow_blocked_threads && 547 SBDP_VSTOPPED(tp))) { 548 549 /* nope, cache the details for later */ 550 bcopy(p->p_user.u_psargs, cache_psargs, 551 sizeof (cache_psargs)); 552 cache_tp = tp; 553 cache_t_state = tp->t_state; 554 bailout = 1; 555 } 556 thread_unlock(tp); 557 } 558 mutex_exit(&pidlock); 559 560 /* were all the threads stopped? */ 561 if (!bailout) 562 break; 563 } 564 565 /* were we unable to stop all threads after a few tries? */ 566 if (bailout) { 567 cmn_err(CE_NOTE, "process: %s id: %p state: %x\n", 568 cache_psargs, cache_tp, cache_t_state); 569 570 (void) sprintf(sbdp_get_err_buf(sep), "%s", cache_psargs); 571 sbdp_set_err(sep, ESGT_UTHREAD, NULL); 572 return (ESRCH); 573 } 574 575 return (DDI_SUCCESS); 576 } 577 578 static void 579 sbdp_start_user_threads(void) 580 { 581 kthread_id_t tp; 582 583 mutex_enter(&pidlock); 584 585 /* walk all threads and release them */ 586 for (tp = curthread->t_next; tp != curthread; tp = tp->t_next) { 587 proc_t *p = ttoproc(tp); 588 589 /* skip kernel threads */ 590 if (ttoproc(tp)->p_as == &kas) 591 continue; 592 593 mutex_enter(&p->p_lock); 594 tp->t_proc_flag &= ~TP_CHKPT; 595 mutex_exit(&p->p_lock); 596 597 thread_lock(tp); 598 if (CPR_ISTOPPED(tp)) { 599 /* back on the runq */ 600 tp->t_schedflag |= TS_RESUME; 601 setrun_locked(tp); 602 } 603 thread_unlock(tp); 604 } 605 606 mutex_exit(&pidlock); 607 } 608 609 static void 610 sbdp_signal_user(int sig) 611 { 612 struct proc *p; 613 614 mutex_enter(&pidlock); 615 616 for (p = practive; p != NULL; p = p->p_next) { 617 /* only user threads */ 618 if (p->p_exec == NULL || p->p_stat == SZOMB || 619 p == proc_init || p == ttoproc(curthread)) 620 continue; 621 622 mutex_enter(&p->p_lock); 623 sigtoproc(p, NULL, sig); 624 mutex_exit(&p->p_lock); 625 } 626 627 mutex_exit(&pidlock); 628 629 /* add a bit of delay */ 630 delay(hz); 631 } 632 633 static uint_t saved_watchdog_seconds; 634 635 void 636 sbdp_resume(sbdp_sr_handle_t *srh) 637 { 638 /* 639 * update the signature block 640 */ 641 CPU_SIGNATURE(OS_SIG, SIGST_RESUME_INPROGRESS, SIGSUBST_NULL, 642 CPU->cpu_id); 643 644 switch (SR_STATE(srh)) { 645 case SBDP_SRSTATE_FULL: 646 647 ASSERT(MUTEX_HELD(&cpu_lock)); 648 649 sbdp_enable_intr(); /* enable intr & clock */ 650 651 /* 652 * release all the other cpus 653 * using start_cpus() vice sbdp_release_cpus() 654 */ 655 start_cpus(); 656 mutex_exit(&cpu_lock); 657 658 /* 659 * If we suspended hw watchdog at suspend, 660 * re-enable it now. 661 */ 662 663 if (SR_CHECK_FLAG(srh, SR_FLAG_WATCHDOG)) { 664 mutex_enter(&tod_lock); 665 tod_ops.tod_set_watchdog_timer( 666 saved_watchdog_seconds); 667 mutex_exit(&tod_lock); 668 } 669 670 /* FALLTHROUGH */ 671 672 case SBDP_SRSTATE_DRIVER: 673 /* 674 * resume devices: root node doesn't have to 675 * be held in any way. 676 */ 677 sbdp_resume_devices(ddi_root_node(), srh); 678 679 /* 680 * resume the lock manager 681 */ 682 lm_cprresume(); 683 684 /* FALLTHROUGH */ 685 686 case SBDP_SRSTATE_USER: 687 /* 688 * finally, resume user threads 689 */ 690 if (!sbdp_skip_user_threads) { 691 SBDP_DBG_QR("DR: resuming user threads...\n"); 692 sbdp_start_user_threads(); 693 } 694 /* FALLTHROUGH */ 695 696 case SBDP_SRSTATE_BEGIN: 697 default: 698 /* 699 * let those who care know that we've just resumed 700 */ 701 SBDP_DBG_QR("sending SIGTHAW...\n"); 702 sbdp_signal_user(SIGTHAW); 703 break; 704 } 705 706 i_ndi_allow_device_tree_changes(srh->sh_ndi); 707 708 /* 709 * update the signature block 710 */ 711 CPU_SIGNATURE(OS_SIG, SIGST_RUN, SIGSUBST_NULL, CPU->cpu_id); 712 713 SBDP_DBG_QR("DR: resume COMPLETED\n"); 714 } 715 716 int 717 sbdp_suspend(sbdp_sr_handle_t *srh) 718 { 719 int force; 720 int rc = DDI_SUCCESS; 721 722 force = (srh && (srh->sr_flags & SBDP_IOCTL_FLAG_FORCE)); 723 724 /* 725 * if no force flag, check for unsafe drivers 726 */ 727 if (force) { 728 SBDP_DBG_QR("\nsbdp_suspend invoked with force flag"); 729 } 730 731 /* 732 * update the signature block 733 */ 734 CPU_SIGNATURE(OS_SIG, SIGST_QUIESCE_INPROGRESS, SIGSUBST_NULL, 735 CPU->cpu_id); 736 737 i_ndi_block_device_tree_changes(&srh->sh_ndi); 738 739 /* 740 * first, stop all user threads 741 */ 742 SBDP_DBG_QR("SBDP: suspending user threads...\n"); 743 SR_SET_STATE(srh, SBDP_SRSTATE_USER); 744 if (((rc = sbdp_stop_user_threads(srh)) != DDI_SUCCESS) && 745 sbdp_check_user_stop_result) { 746 sbdp_resume(srh); 747 return (rc); 748 } 749 750 #ifndef SKIP_SYNC 751 /* 752 * This sync swap out all user pages 753 */ 754 vfs_sync(SYNC_ALL); 755 #endif 756 757 /* 758 * special treatment for lock manager 759 */ 760 lm_cprsuspend(); 761 762 #ifndef SKIP_SYNC 763 /* 764 * sync the file system in case we never make it back 765 */ 766 sync(); 767 768 #endif 769 /* 770 * now suspend drivers 771 */ 772 SBDP_DBG_QR("SBDP: suspending drivers...\n"); 773 SR_SET_STATE(srh, SBDP_SRSTATE_DRIVER); 774 775 /* 776 * Root node doesn't have to be held in any way. 777 */ 778 if ((rc = sbdp_suspend_devices(ddi_root_node(), srh)) != DDI_SUCCESS) { 779 sbdp_resume(srh); 780 return (rc); 781 } 782 783 /* 784 * finally, grab all cpus 785 */ 786 SR_SET_STATE(srh, SBDP_SRSTATE_FULL); 787 788 /* 789 * if watchdog was activated, disable it 790 */ 791 if (watchdog_activated) { 792 mutex_enter(&tod_lock); 793 saved_watchdog_seconds = tod_ops.tod_clear_watchdog_timer(); 794 mutex_exit(&tod_lock); 795 SR_SET_FLAG(srh, SR_FLAG_WATCHDOG); 796 } else { 797 SR_CLEAR_FLAG(srh, SR_FLAG_WATCHDOG); 798 } 799 800 mutex_enter(&cpu_lock); 801 pause_cpus(NULL); 802 sbdp_stop_intr(); 803 804 /* 805 * update the signature block 806 */ 807 CPU_SIGNATURE(OS_SIG, SIGST_QUIESCED, SIGSUBST_NULL, CPU->cpu_id); 808 809 return (rc); 810 } 811 812 /*ARGSUSED*/ 813 int 814 sbdp_test_suspend(sbdp_handle_t *hp) 815 { 816 sbdp_sr_handle_t *srh; 817 int err; 818 819 SBDP_DBG_QR("%s...\n", "sbdp_test_suspend"); 820 821 srh = sbdp_get_sr_handle(); 822 823 srh->sr_flags = hp->h_flags; 824 825 if ((err = sbdp_suspend(srh)) == DDI_SUCCESS) { 826 sbdp_resume(srh); 827 } else { 828 SBDP_DBG_MISC("sbdp_suspend() failed, err = 0x%x\n", err); 829 } 830 sbdp_release_sr_handle(srh); 831 832 return (0); 833 } 834 835 #ifdef DEBUG 836 int 837 sbdp_passthru_test_quiesce(sbdp_handle_t *hp, void *arg) 838 { 839 _NOTE(ARGUNUSED(arg)) 840 841 return (sbdp_test_suspend(hp)); 842 } 843 #endif 844