1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /*
28 * A CPR derivative specifically for sbd
29 */
30
31 #include <sys/types.h>
32 #include <sys/systm.h>
33 #include <sys/machparam.h>
34 #include <sys/machsystm.h>
35 #include <sys/ddi.h>
36 #define SUNDDI_IMPL
37 #include <sys/sunddi.h>
38 #include <sys/sunndi.h>
39 #include <sys/devctl.h>
40 #include <sys/time.h>
41 #include <sys/kmem.h>
42 #include <nfs/lm.h>
43 #include <sys/ddi_impldefs.h>
44 #include <sys/ndi_impldefs.h>
45 #include <sys/obpdefs.h>
46 #include <sys/cmn_err.h>
47 #include <sys/debug.h>
48 #include <sys/errno.h>
49 #include <sys/callb.h>
50 #include <sys/clock.h>
51 #include <sys/x_call.h>
52 #include <sys/cpuvar.h>
53 #include <sys/epm.h>
54 #include <sys/vfs.h>
55
56 #ifdef DEBUG
57 #include <sys/note.h>
58 #endif
59
60 #include <sys/promif.h>
61 #include <sys/conf.h>
62 #include <sys/cyclic.h>
63
64 #include <sys/sbd_ioctl.h>
65 #include <sys/sbd.h>
66 #include <sys/sbdp_priv.h>
67 #include <sys/cpu_sgnblk_defs.h>
68
69 static char *
sbdp_get_err_buf(sbd_error_t * ep)70 sbdp_get_err_buf(sbd_error_t *ep)
71 {
72 return (ep->e_rsc);
73 }
74
75 extern void e_ddi_enter_driver_list(struct devnames *dnp, int *listcnt);
76 extern void e_ddi_exit_driver_list(struct devnames *dnp, int listcnt);
77 extern int is_pseudo_device(dev_info_t *dip);
78
79 extern kmutex_t cpu_lock;
80
81 static int sbdp_is_real_device(dev_info_t *dip);
82 #ifdef DEBUG
83 static int sbdp_bypass_device(char *dname);
84 #endif
85 static int sbdp_check_dip(dev_info_t *dip, void *arg, uint_t ref);
86
87 static int sbdp_resolve_devname(dev_info_t *dip, char *buffer,
88 char *alias);
89
90 int sbdp_test_suspend(sbdp_handle_t *hp);
91
92 #define SR_STATE(srh) ((srh)->sr_suspend_state)
93 #define SR_SET_STATE(srh, state) (SR_STATE((srh)) = (state))
94 #define SR_FAILED_DIP(srh) ((srh)->sr_failed_dip)
95
96 #define SR_FLAG_WATCHDOG 0x1
97 #define SR_CHECK_FLAG(srh, flag) ((srh)->sr_flags & (flag))
98 #define SR_SET_FLAG(srh, flag) ((srh)->sr_flags |= (flag))
99 #define SR_CLEAR_FLAG(srh, flag) ((srh)->sr_flags &= ~(flag))
100
101 #ifdef DEBUG
102 /*
103 * Just for testing. List of drivers to bypass when performing a suspend.
104 */
105 static char *sbdp_bypass_list[] = {
106 /* "sgsbbc", this is an example when needed */
107 ""
108 };
109 #endif
110
111 #define SKIP_SYNC /* bypass sync ops in sbdp_suspend */
112
113 /*
114 * sbdp_skip_user_threads is used to control if user threads should
115 * be suspended. If sbdp_skip_user_threads is true, the rest of the
116 * flags are not used; if it is false, sbdp_check_user_stop_result
117 * will be used to control whether or not we need to check suspend
118 * result, and sbdp_allow_blocked_threads will be used to control
119 * whether or not we allow suspend to continue if there are blocked
120 * threads. We allow all combinations of sbdp_check_user_stop_result
121 * and sbdp_allow_block_threads, even though it might not make much
122 * sense to not allow block threads when we don't even check stop
123 * result.
124 */
125 static int sbdp_skip_user_threads = 0; /* default to FALSE */
126 static int sbdp_check_user_stop_result = 1; /* default to TRUE */
127 static int sbdp_allow_blocked_threads = 1; /* default to TRUE */
128
129
130 static void
sbdp_stop_intr(void)131 sbdp_stop_intr(void)
132 {
133 kpreempt_disable();
134 cyclic_suspend();
135 }
136
137 static void
sbdp_enable_intr(void)138 sbdp_enable_intr(void)
139 {
140 cyclic_resume();
141 kpreempt_enable();
142 }
143
144 sbdp_sr_handle_t *
sbdp_get_sr_handle(void)145 sbdp_get_sr_handle(void)
146 {
147 sbdp_sr_handle_t *srh;
148 srh = kmem_zalloc(sizeof (sbdp_sr_handle_t), KM_SLEEP);
149
150 return (srh);
151 }
152
153 void
sbdp_release_sr_handle(sbdp_sr_handle_t * srh)154 sbdp_release_sr_handle(sbdp_sr_handle_t *srh)
155 {
156 ASSERT(SR_FAILED_DIP(srh) == NULL);
157 kmem_free((caddr_t)srh, sizeof (sbdp_sr_handle_t));
158 }
159
160 static int
sbdp_is_real_device(dev_info_t * dip)161 sbdp_is_real_device(dev_info_t *dip)
162 {
163 struct regspec *regbuf = NULL;
164 int length = 0;
165 int rc;
166
167 if (ddi_get_driver(dip) == NULL)
168 return (0);
169
170 if (DEVI(dip)->devi_pm_flags & (PMC_NEEDS_SR|PMC_PARENTAL_SR))
171 return (1);
172 if (DEVI(dip)->devi_pm_flags & PMC_NO_SR)
173 return (0);
174
175 /*
176 * now the general case
177 */
178 rc = ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "reg",
179 (caddr_t)®buf, &length);
180 ASSERT(rc != DDI_PROP_NO_MEMORY);
181 if (rc != DDI_PROP_SUCCESS) {
182 return (0);
183 } else {
184 if ((length > 0) && (regbuf != NULL))
185 kmem_free(regbuf, length);
186 return (1);
187 }
188 }
189
190 #ifdef DEBUG
191 static int
sbdp_bypass_device(char * dname)192 sbdp_bypass_device(char *dname)
193 {
194 int i;
195 char **lname;
196 /* check the bypass list */
197 for (i = 0, lname = &sbdp_bypass_list[i]; **lname != '\0'; lname++) {
198 SBDP_DBG_QR("Checking %s\n", *lname);
199 if (strcmp(dname, sbdp_bypass_list[i++]) == 0)
200 return (1);
201 }
202 return (0);
203 }
204 #endif
205
206 static int
sbdp_resolve_devname(dev_info_t * dip,char * buffer,char * alias)207 sbdp_resolve_devname(dev_info_t *dip, char *buffer, char *alias)
208 {
209 major_t devmajor;
210 char *aka, *name;
211
212 *buffer = *alias = 0;
213
214 if (dip == NULL)
215 return (-1);
216
217 if ((name = ddi_get_name(dip)) == NULL)
218 name = "<null name>";
219
220 aka = name;
221
222 if ((devmajor = ddi_name_to_major(aka)) != -1)
223 aka = ddi_major_to_name(devmajor);
224
225 (void) strcpy(buffer, name);
226
227 if (strcmp(name, aka))
228 (void) strcpy(alias, aka);
229 else
230 *alias = 0;
231
232 return (0);
233 }
234
235 typedef struct sbdp_ref {
236 int *refcount;
237 int *refcount_non_gldv3;
238 sbd_error_t *sep;
239 } sbdp_ref_t;
240
241 static int
sbdp_check_dip(dev_info_t * dip,void * arg,uint_t ref)242 sbdp_check_dip(dev_info_t *dip, void *arg, uint_t ref)
243 {
244 char *dname;
245 sbdp_ref_t *sbrp = (sbdp_ref_t *)arg;
246
247 if (dip == NULL)
248 return (DDI_WALK_CONTINUE);
249
250 ASSERT(sbrp->sep != NULL);
251 ASSERT(sbrp->refcount != NULL);
252
253 if (!sbdp_is_real_device(dip))
254 return (DDI_WALK_CONTINUE);
255
256 dname = ddi_binding_name(dip);
257
258 if ((strcmp(dname, "pciclass,060940") == 0) || (strcmp(dname,
259 "pciclass,060980") == 0)) {
260 (void) ddi_pathname(dip, sbdp_get_err_buf(sbrp->sep));
261 sbdp_set_err(sbrp->sep, ESBD_BUSY, NULL);
262 (*sbrp->refcount)++;
263 return (DDI_WALK_TERMINATE);
264 }
265
266 #ifdef DEBUG
267 if (sbdp_bypass_device(dname))
268 return (DDI_WALK_CONTINUE);
269 #endif
270
271 if (ref) {
272 major_t major;
273
274 (*sbrp->refcount)++;
275 SBDP_DBG_QR("\n%s (major# %d) is referenced\n",
276 dname, ddi_name_to_major(dname));
277 (void) ddi_pathname(dip, sbdp_get_err_buf(sbrp->sep));
278 major = ddi_driver_major(dip);
279 if (sbrp->refcount_non_gldv3 && NETWORK_PHYSDRV(major) &&
280 !GLDV3_DRV(major)) {
281 (*sbrp->refcount_non_gldv3)++;
282 return (DDI_WALK_CONTINUE);
283 }
284 sbdp_set_err(sbrp->sep, ESBD_BUSY, NULL);
285 return (DDI_WALK_TERMINATE);
286 }
287 return (DDI_WALK_CONTINUE);
288 }
289
290 void
sbdp_check_devices(dev_info_t * dip,int * refcount,sbd_error_t * sep,int * refcount_non_gldv3)291 sbdp_check_devices(dev_info_t *dip, int *refcount, sbd_error_t *sep,
292 int *refcount_non_gldv3)
293 {
294 sbdp_ref_t sbr;
295
296 sbr.refcount = refcount;
297 sbr.refcount_non_gldv3 = refcount_non_gldv3;
298 sbr.sep = sep;
299
300 ASSERT(e_ddi_branch_held(dip));
301
302 (void) e_ddi_branch_referenced(dip, sbdp_check_dip, &sbr);
303 }
304
305 /*
306 * Starting from the root node suspend all devices in the device tree.
307 * Assumes that all devices have already been marked busy.
308 */
309 static int
sbdp_suspend_devices_(dev_info_t * dip,sbdp_sr_handle_t * srh)310 sbdp_suspend_devices_(dev_info_t *dip, sbdp_sr_handle_t *srh)
311 {
312 major_t major;
313 char *dname;
314
315 for (; dip != NULL; dip = ddi_get_next_sibling(dip)) {
316 char d_name[40], d_alias[40], *d_info;
317
318 if (sbdp_suspend_devices_(ddi_get_child(dip), srh)) {
319 return (ENXIO);
320 }
321
322 if (!sbdp_is_real_device(dip))
323 continue;
324
325 major = (major_t)-1;
326 if ((dname = DEVI(dip)->devi_binding_name) != NULL)
327 major = ddi_name_to_major(dname);
328
329 #ifdef DEBUG
330 if (sbdp_bypass_device(dname)) {
331 SBDP_DBG_QR("bypassed suspend of %s (major# %d)\n",
332 dname, major);
333 continue;
334 }
335 #endif
336
337 if ((d_info = ddi_get_name_addr(dip)) == NULL)
338 d_info = "<null>";
339
340 d_name[0] = 0;
341 if (sbdp_resolve_devname(dip, d_name, d_alias) == 0) {
342 if (d_alias[0] != 0) {
343 SBDP_DBG_QR("\tsuspending %s@%s (aka %s)\n",
344 d_name, d_info, d_alias);
345 } else {
346 SBDP_DBG_QR("\tsuspending %s@%s\n",
347 d_name, d_info);
348 }
349 } else {
350 SBDP_DBG_QR("\tsuspending %s@%s\n", dname, d_info);
351 }
352
353 if (devi_detach(dip, DDI_SUSPEND) != DDI_SUCCESS) {
354 (void) sprintf(sbdp_get_err_buf(&srh->sep),
355 "%d", major);
356
357 sbdp_set_err(&srh->sep, ESGT_SUSPEND, NULL);
358 ndi_hold_devi(dip);
359 SR_FAILED_DIP(srh) = dip;
360 return (DDI_FAILURE);
361 }
362 }
363
364 return (DDI_SUCCESS);
365 }
366
367 /*ARGSUSED*/
368 static int
sbdp_suspend_devices_enter(dev_info_t * dip,void * arg)369 sbdp_suspend_devices_enter(dev_info_t *dip, void *arg)
370 {
371 struct dev_info *devi = DEVI(dip);
372 ndi_devi_enter(dip, &devi->devi_circular);
373 return (DDI_WALK_CONTINUE);
374 }
375
376 /*ARGSUSED*/
377 static int
sbdp_suspend_devices_exit(dev_info_t * dip,void * arg)378 sbdp_suspend_devices_exit(dev_info_t *dip, void *arg)
379 {
380 struct dev_info *devi = DEVI(dip);
381 ndi_devi_exit(dip, devi->devi_circular);
382 return (DDI_WALK_CONTINUE);
383 }
384
385 /*
386 * Before suspending devices first mark all device nodes busy. This
387 * avoids a deadlock situation when another thread holds a device busy
388 * and accesses an already suspended device.
389 */
390 static int
sbdp_suspend_devices(dev_info_t * dip,sbdp_sr_handle_t * srh)391 sbdp_suspend_devices(dev_info_t *dip, sbdp_sr_handle_t *srh)
392 {
393 int rv;
394
395 /* assumes dip is ddi_root_node so no ndi_devi_enter required */
396 ASSERT(dip == ddi_root_node());
397 ddi_walk_devs(dip, sbdp_suspend_devices_enter, NULL);
398 rv = sbdp_suspend_devices_(dip, srh);
399 ddi_walk_devs(dip, sbdp_suspend_devices_exit, NULL);
400 return (rv);
401 }
402
403 static void
sbdp_resume_devices(dev_info_t * start,sbdp_sr_handle_t * srh)404 sbdp_resume_devices(dev_info_t *start, sbdp_sr_handle_t *srh)
405 {
406 int circ;
407 dev_info_t *dip, *next, *last = NULL;
408 char *bn;
409 sbd_error_t *sep;
410
411 sep = &srh->sep;
412
413 /* attach in reverse device tree order */
414 while (last != start) {
415 dip = start;
416 next = ddi_get_next_sibling(dip);
417 while (next != last && dip != SR_FAILED_DIP(srh)) {
418 dip = next;
419 next = ddi_get_next_sibling(dip);
420 }
421 if (dip == SR_FAILED_DIP(srh)) {
422 /* Release hold acquired in sbdp_suspend_devices() */
423 ndi_rele_devi(dip);
424 SR_FAILED_DIP(srh) = NULL;
425 } else if (sbdp_is_real_device(dip) &&
426 SR_FAILED_DIP(srh) == NULL) {
427
428 if (DEVI(dip)->devi_binding_name != NULL) {
429 bn = ddi_binding_name(dip);
430 }
431 #ifdef DEBUG
432 if (!sbdp_bypass_device(bn)) {
433 #else
434 {
435 #endif
436 char d_name[40], d_alias[40], *d_info;
437
438 d_name[0] = 0;
439 d_info = ddi_get_name_addr(dip);
440 if (d_info == NULL)
441 d_info = "<null>";
442
443 if (!sbdp_resolve_devname(dip, d_name,
444 d_alias)) {
445 if (d_alias[0] != 0) {
446 SBDP_DBG_QR("\tresuming "
447 "%s@%s (aka %s)\n",
448 d_name, d_info,
449 d_alias);
450 } else {
451 SBDP_DBG_QR("\tresuming "
452 "%s@%s\n",
453 d_name, d_info);
454 }
455 } else {
456 SBDP_DBG_QR("\tresuming %s@%s\n",
457 bn, d_info);
458 }
459
460 if (devi_attach(dip, DDI_RESUME) !=
461 DDI_SUCCESS) {
462 /*
463 * Print a console warning,
464 * set an errno of ESGT_RESUME,
465 * and save the driver major
466 * number in the e_str.
467 */
468
469 (void) sprintf(sbdp_get_err_buf(sep),
470 "%s@%s",
471 d_name[0] ? d_name : bn, d_info);
472 SBDP_DBG_QR("\tFAILED to resume "
473 "%s\n", sbdp_get_err_buf(sep));
474 sbdp_set_err(sep,
475 ESGT_RESUME, NULL);
476 }
477 }
478 }
479 ndi_devi_enter(dip, &circ);
480 sbdp_resume_devices(ddi_get_child(dip), srh);
481 ndi_devi_exit(dip, circ);
482 last = dip;
483 }
484 }
485
486 /*
487 * True if thread is virtually stopped. Similar to CPR_VSTOPPED
488 * but from DR point of view. These user threads are waiting in
489 * the kernel. Once they return from kernel, they will process
490 * the stop signal and stop.
491 */
492 #define SBDP_VSTOPPED(t) \
493 ((t)->t_state == TS_SLEEP && \
494 (t)->t_wchan != NULL && \
495 (t)->t_astflag && \
496 ((t)->t_proc_flag & TP_CHKPT))
497
498
499 static int
500 sbdp_stop_user_threads(sbdp_sr_handle_t *srh)
501 {
502 int count;
503 char cache_psargs[PSARGSZ];
504 kthread_id_t cache_tp;
505 uint_t cache_t_state;
506 int bailout;
507 sbd_error_t *sep;
508 kthread_id_t tp;
509
510 extern void add_one_utstop();
511 extern void utstop_timedwait(clock_t);
512 extern void utstop_init(void);
513
514 #define SBDP_UTSTOP_RETRY 4
515 #define SBDP_UTSTOP_WAIT hz
516
517 if (sbdp_skip_user_threads)
518 return (DDI_SUCCESS);
519
520 sep = &srh->sep;
521 ASSERT(sep);
522
523 utstop_init();
524
525 /* we need to try a few times to get past fork, etc. */
526 for (count = 0; count < SBDP_UTSTOP_RETRY; count++) {
527 /* walk the entire threadlist */
528 mutex_enter(&pidlock);
529 for (tp = curthread->t_next; tp != curthread; tp = tp->t_next) {
530 proc_t *p = ttoproc(tp);
531
532 /* handle kernel threads separately */
533 if (p->p_as == &kas || p->p_stat == SZOMB)
534 continue;
535
536 mutex_enter(&p->p_lock);
537 thread_lock(tp);
538
539 if (tp->t_state == TS_STOPPED) {
540 /* add another reason to stop this thread */
541 tp->t_schedflag &= ~TS_RESUME;
542 } else {
543 tp->t_proc_flag |= TP_CHKPT;
544
545 thread_unlock(tp);
546 mutex_exit(&p->p_lock);
547 add_one_utstop();
548 mutex_enter(&p->p_lock);
549 thread_lock(tp);
550
551 aston(tp);
552
553 if (ISWAKEABLE(tp) || ISWAITING(tp)) {
554 setrun_locked(tp);
555 }
556 }
557
558 /* grab thread if needed */
559 if (tp->t_state == TS_ONPROC && tp->t_cpu != CPU)
560 poke_cpu(tp->t_cpu->cpu_id);
561
562
563 thread_unlock(tp);
564 mutex_exit(&p->p_lock);
565 }
566 mutex_exit(&pidlock);
567
568
569 /* let everything catch up */
570 utstop_timedwait(count * count * SBDP_UTSTOP_WAIT);
571
572
573 /* now, walk the threadlist again to see if we are done */
574 mutex_enter(&pidlock);
575 for (tp = curthread->t_next, bailout = 0;
576 tp != curthread; tp = tp->t_next) {
577 proc_t *p = ttoproc(tp);
578
579 /* handle kernel threads separately */
580 if (p->p_as == &kas || p->p_stat == SZOMB)
581 continue;
582
583 /*
584 * If this thread didn't stop, and we don't allow
585 * unstopped blocked threads, bail.
586 */
587 thread_lock(tp);
588 if (!CPR_ISTOPPED(tp) &&
589 !(sbdp_allow_blocked_threads &&
590 SBDP_VSTOPPED(tp))) {
591
592 /* nope, cache the details for later */
593 bcopy(p->p_user.u_psargs, cache_psargs,
594 sizeof (cache_psargs));
595 cache_tp = tp;
596 cache_t_state = tp->t_state;
597 bailout = 1;
598 }
599 thread_unlock(tp);
600 }
601 mutex_exit(&pidlock);
602
603 /* were all the threads stopped? */
604 if (!bailout)
605 break;
606 }
607
608 /* were we unable to stop all threads after a few tries? */
609 if (bailout) {
610 cmn_err(CE_NOTE, "process: %s id: %p state: %x\n",
611 cache_psargs, (void *)cache_tp, cache_t_state);
612
613 (void) sprintf(sbdp_get_err_buf(sep), "%s", cache_psargs);
614 sbdp_set_err(sep, ESGT_UTHREAD, NULL);
615 return (ESRCH);
616 }
617
618 return (DDI_SUCCESS);
619 }
620
621 static void
622 sbdp_start_user_threads(void)
623 {
624 kthread_id_t tp;
625
626 mutex_enter(&pidlock);
627
628 /* walk all threads and release them */
629 for (tp = curthread->t_next; tp != curthread; tp = tp->t_next) {
630 proc_t *p = ttoproc(tp);
631
632 /* skip kernel threads */
633 if (ttoproc(tp)->p_as == &kas)
634 continue;
635
636 mutex_enter(&p->p_lock);
637 tp->t_proc_flag &= ~TP_CHKPT;
638 mutex_exit(&p->p_lock);
639
640 thread_lock(tp);
641 if (CPR_ISTOPPED(tp)) {
642 /* back on the runq */
643 tp->t_schedflag |= TS_RESUME;
644 setrun_locked(tp);
645 }
646 thread_unlock(tp);
647 }
648
649 mutex_exit(&pidlock);
650 }
651
652 static void
653 sbdp_signal_user(int sig)
654 {
655 struct proc *p;
656
657 mutex_enter(&pidlock);
658
659 for (p = practive; p != NULL; p = p->p_next) {
660 /* only user threads */
661 if (p->p_exec == NULL || p->p_stat == SZOMB ||
662 p == proc_init || p == ttoproc(curthread))
663 continue;
664
665 mutex_enter(&p->p_lock);
666 sigtoproc(p, NULL, sig);
667 mutex_exit(&p->p_lock);
668 }
669
670 mutex_exit(&pidlock);
671
672 /* add a bit of delay */
673 delay(hz);
674 }
675
676 static uint_t saved_watchdog_seconds;
677
678 void
679 sbdp_resume(sbdp_sr_handle_t *srh)
680 {
681 /*
682 * update the signature block
683 */
684 CPU_SIGNATURE(OS_SIG, SIGST_RESUME_INPROGRESS, SIGSUBST_NULL,
685 CPU->cpu_id);
686
687 switch (SR_STATE(srh)) {
688 case SBDP_SRSTATE_FULL:
689
690 ASSERT(MUTEX_HELD(&cpu_lock));
691
692 /*
693 * Prevent false alarm in tod_validate() due to tod
694 * value change between suspend and resume
695 */
696 mutex_enter(&tod_lock);
697 tod_status_set(TOD_DR_RESUME_DONE);
698 mutex_exit(&tod_lock);
699
700 sbdp_enable_intr(); /* enable intr & clock */
701
702 /*
703 * release all the other cpus
704 * using start_cpus() vice sbdp_release_cpus()
705 */
706 start_cpus();
707 mutex_exit(&cpu_lock);
708
709 /*
710 * If we suspended hw watchdog at suspend,
711 * re-enable it now.
712 */
713 if (SR_CHECK_FLAG(srh, SR_FLAG_WATCHDOG)) {
714 mutex_enter(&tod_lock);
715 tod_ops.tod_set_watchdog_timer(
716 saved_watchdog_seconds);
717 mutex_exit(&tod_lock);
718 }
719
720 /* FALLTHROUGH */
721
722 case SBDP_SRSTATE_DRIVER:
723 /*
724 * resume devices: root node doesn't have to
725 * be held in any way.
726 */
727 sbdp_resume_devices(ddi_root_node(), srh);
728
729 /*
730 * resume the lock manager
731 */
732 lm_cprresume();
733
734 /* FALLTHROUGH */
735
736 case SBDP_SRSTATE_USER:
737 /*
738 * finally, resume user threads
739 */
740 if (!sbdp_skip_user_threads) {
741 SBDP_DBG_QR("DR: resuming user threads...\n");
742 sbdp_start_user_threads();
743 }
744 /* FALLTHROUGH */
745
746 case SBDP_SRSTATE_BEGIN:
747 default:
748 /*
749 * let those who care know that we've just resumed
750 */
751 SBDP_DBG_QR("sending SIGTHAW...\n");
752 sbdp_signal_user(SIGTHAW);
753 break;
754 }
755
756 /*
757 * update the signature block
758 */
759 CPU_SIGNATURE(OS_SIG, SIGST_RUN, SIGSUBST_NULL, CPU->cpu_id);
760
761 SBDP_DBG_QR("DR: resume COMPLETED\n");
762 }
763
764 int
765 sbdp_suspend(sbdp_sr_handle_t *srh)
766 {
767 int force;
768 int rc = DDI_SUCCESS;
769
770 force = (srh && (srh->sr_flags & SBDP_IOCTL_FLAG_FORCE));
771
772 /*
773 * if no force flag, check for unsafe drivers
774 */
775 if (force) {
776 SBDP_DBG_QR("\nsbdp_suspend invoked with force flag");
777 }
778
779 /*
780 * update the signature block
781 */
782 CPU_SIGNATURE(OS_SIG, SIGST_QUIESCE_INPROGRESS, SIGSUBST_NULL,
783 CPU->cpu_id);
784
785 /*
786 * first, stop all user threads
787 */
788 SBDP_DBG_QR("SBDP: suspending user threads...\n");
789 SR_SET_STATE(srh, SBDP_SRSTATE_USER);
790 if (((rc = sbdp_stop_user_threads(srh)) != DDI_SUCCESS) &&
791 sbdp_check_user_stop_result) {
792 sbdp_resume(srh);
793 return (rc);
794 }
795
796 #ifndef SKIP_SYNC
797 /*
798 * This sync swap out all user pages
799 */
800 vfs_sync(SYNC_ALL);
801 #endif
802
803 /*
804 * special treatment for lock manager
805 */
806 lm_cprsuspend();
807
808 #ifndef SKIP_SYNC
809 /*
810 * sync the file system in case we never make it back
811 */
812 sync();
813
814 #endif
815 /*
816 * now suspend drivers
817 */
818 SBDP_DBG_QR("SBDP: suspending drivers...\n");
819 SR_SET_STATE(srh, SBDP_SRSTATE_DRIVER);
820
821 /*
822 * Root node doesn't have to be held in any way.
823 */
824 if ((rc = sbdp_suspend_devices(ddi_root_node(), srh)) != DDI_SUCCESS) {
825 sbdp_resume(srh);
826 return (rc);
827 }
828
829 /*
830 * finally, grab all cpus
831 */
832 SR_SET_STATE(srh, SBDP_SRSTATE_FULL);
833
834 /*
835 * if watchdog was activated, disable it
836 */
837 if (watchdog_activated) {
838 mutex_enter(&tod_lock);
839 saved_watchdog_seconds = tod_ops.tod_clear_watchdog_timer();
840 mutex_exit(&tod_lock);
841 SR_SET_FLAG(srh, SR_FLAG_WATCHDOG);
842 } else {
843 SR_CLEAR_FLAG(srh, SR_FLAG_WATCHDOG);
844 }
845
846 mutex_enter(&cpu_lock);
847 pause_cpus(NULL, NULL);
848 sbdp_stop_intr();
849
850 /*
851 * update the signature block
852 */
853 CPU_SIGNATURE(OS_SIG, SIGST_QUIESCED, SIGSUBST_NULL, CPU->cpu_id);
854
855 return (rc);
856 }
857
858 /*ARGSUSED*/
859 int
860 sbdp_test_suspend(sbdp_handle_t *hp)
861 {
862 sbdp_sr_handle_t *srh;
863 int err;
864
865 SBDP_DBG_QR("%s...\n", "sbdp_test_suspend");
866
867 srh = sbdp_get_sr_handle();
868
869 srh->sr_flags = hp->h_flags;
870
871 if ((err = sbdp_suspend(srh)) == DDI_SUCCESS) {
872 sbdp_resume(srh);
873 } else {
874 SBDP_DBG_MISC("sbdp_suspend() failed, err = 0x%x\n", err);
875 }
876 sbdp_release_sr_handle(srh);
877
878 return (0);
879 }
880
881 #ifdef DEBUG
882 int
883 sbdp_passthru_test_quiesce(sbdp_handle_t *hp, void *arg)
884 {
885 _NOTE(ARGUNUSED(arg))
886
887 return (sbdp_test_suspend(hp));
888 }
889 #endif
890