1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 /*
27 * generic scsi device watch
28 */
29
30 #if DEBUG || lint
31 #define SWDEBUG
32 #endif
33
34 /*
35 * debug goodies
36 */
37 #ifdef SWDEBUG
38 static int swdebug = 0;
39 #define DEBUGGING ((scsi_options & SCSI_DEBUG_TGT) && sddebug > 1)
40 #define SW_DEBUG if (swdebug == 1) scsi_log
41 #define SW_DEBUG2 if (swdebug > 1) scsi_log
42 #else /* SWDEBUG */
43 #define swdebug (0)
44 #define DEBUGGING (0)
45 #define SW_DEBUG if (0) scsi_log
46 #define SW_DEBUG2 if (0) scsi_log
47 #endif
48
49
50
51 /*
52 * Includes, Declarations and Local Data
53 */
54
55 #include <sys/note.h>
56 #include <sys/scsi/scsi.h>
57 #include <sys/var.h>
58 #include <sys/proc.h>
59 #include <sys/thread.h>
60 #include <sys/callb.h>
61
62 /*
63 * macro for filling in lun value for scsi-1 support
64 */
65 #define FILL_SCSI1_LUN(devp, pkt) \
66 if ((devp->sd_address.a_lun > 0) && \
67 (devp->sd_inq->inq_ansi == 0x1)) { \
68 ((union scsi_cdb *)(pkt)->pkt_cdbp)->scc_lun = \
69 devp->sd_address.a_lun; \
70 }
71
72 char *sw_label = "scsi-watch";
73
74 static int scsi_watch_io_time = SCSI_WATCH_IO_TIME;
75
76 /*
77 * all info resides in the scsi watch structure
78 *
79 * the monitoring is performed by one separate thread which works
80 * from a linked list of scsi_watch_request packets
81 */
82 static struct scsi_watch {
83 kthread_t *sw_thread; /* the watch thread */
84 kmutex_t sw_mutex; /* mutex protecting list */
85 /* and this structure */
86 kcondvar_t sw_cv; /* cv for waking up thread */
87 struct scsi_watch_request *sw_head; /* head of linked list */
88 /* of request structures */
89 uchar_t sw_state; /* for suspend-resume */
90 uchar_t sw_flags; /* to start at head of list */
91 /* for watch thread */
92 struct scsi_watch_request *swr_current; /* the command waiting to be */
93 /* processed by the watch */
94 /* thread which is being */
95 /* blocked */
96 } sw;
97
98 #if !defined(lint)
99 _NOTE(MUTEX_PROTECTS_DATA(scsi_watch::sw_mutex, scsi_watch))
100 #endif
101
102 /*
103 * Values for sw_state
104 */
105 #define SW_RUNNING 0
106 #define SW_SUSPEND_REQUESTED 1
107 #define SW_SUSPENDED 2
108
109 /*
110 * values for sw_flags
111 */
112 #define SW_START_HEAD 0x1
113
114 struct scsi_watch_request {
115 struct scsi_watch_request *swr_next; /* linked request list */
116 struct scsi_watch_request *swr_prev;
117 clock_t swr_interval; /* interval between TURs */
118 clock_t swr_timeout; /* count down */
119 uchar_t swr_busy; /* TUR in progress */
120 uchar_t swr_what; /* watch or stop */
121 uchar_t swr_sense_length; /* required sense length */
122 struct scsi_pkt *swr_pkt; /* TUR pkt itself */
123 struct scsi_pkt *swr_rqpkt; /* request sense pkt */
124 struct buf *swr_rqbp; /* bp for request sense data */
125 struct buf *swr_mmcbp; /* bp for MMC command data */
126 int (*swr_callback)(); /* callback to driver */
127 caddr_t swr_callback_arg;
128 kcondvar_t swr_terminate_cv; /* cv to wait on to cleanup */
129 /* request synchronously */
130 int swr_ref; /* refer count to the swr */
131 uchar_t suspend_destroy; /* flag for free later */
132 };
133
134 /*
135 * values for swr flags
136 */
137 #define SUSPEND_DESTROY 1
138
139 #if !defined(lint)
140 _NOTE(SCHEME_PROTECTS_DATA("unshared data", scsi_watch_request))
141 #endif
142
143 /*
144 * values for sw_what
145 */
146 #define SWR_WATCH 0 /* device watch */
147 #define SWR_STOP 1 /* stop monitoring and destroy swr */
148 #define SWR_SUSPEND_REQUESTED 2 /* req. pending suspend */
149 #define SWR_SUSPENDED 3 /* req. is suspended */
150
151 static opaque_t scsi_watch_request_submit_impl(struct scsi_device *devp,
152 int interval, int sense_length, int (*callback)(), caddr_t cb_arg,
153 boolean_t mmc);
154 static void scsi_watch_request_destroy(struct scsi_watch_request *swr);
155 static void scsi_watch_thread(void);
156 static void scsi_watch_request_intr(struct scsi_pkt *pkt);
157
158 /*
159 * setup, called from _init(), the thread is created when we need it
160 * and exits when there is nothing to do anymore and everything has been
161 * cleaned up (ie. resources deallocated)
162 */
163 void
scsi_watch_init()164 scsi_watch_init()
165 {
166 /* NO OTHER THREADS ARE RUNNING */
167 mutex_init(&sw.sw_mutex, NULL, MUTEX_DRIVER, NULL);
168 cv_init(&sw.sw_cv, NULL, CV_DRIVER, NULL);
169 sw.sw_state = SW_RUNNING;
170 sw.sw_flags = 0;
171 sw.swr_current = NULL;
172 }
173
174 /*
175 * cleaning up, called from _fini()
176 */
177 void
scsi_watch_fini()178 scsi_watch_fini()
179 {
180 /* NO OTHER THREADS ARE RUNNING */
181 /*
182 * hope and pray that the thread has exited
183 */
184 ASSERT(sw.sw_thread == 0);
185 mutex_destroy(&sw.sw_mutex);
186 cv_destroy(&sw.sw_cv);
187 }
188
189 /*
190 * allocate an swr (scsi watch request structure) and initialize pkts
191 */
192 #define ROUTE &devp->sd_address
193
194 opaque_t
scsi_watch_request_submit(struct scsi_device * devp,int interval,int sense_length,int (* callback)(),caddr_t cb_arg)195 scsi_watch_request_submit(
196 struct scsi_device *devp,
197 int interval,
198 int sense_length,
199 int (*callback)(), /* callback function */
200 caddr_t cb_arg) /* device number */
201 {
202 return (scsi_watch_request_submit_impl(devp, interval, sense_length,
203 callback, cb_arg, B_FALSE));
204 }
205
206 opaque_t
scsi_mmc_watch_request_submit(struct scsi_device * devp,int interval,int sense_length,int (* callback)(),caddr_t cb_arg)207 scsi_mmc_watch_request_submit(
208 struct scsi_device *devp,
209 int interval,
210 int sense_length,
211 int (*callback)(), /* callback function */
212 caddr_t cb_arg) /* device number */
213 {
214 return (scsi_watch_request_submit_impl(devp, interval, sense_length,
215 callback, cb_arg, B_TRUE));
216 }
217
218 static opaque_t
scsi_watch_request_submit_impl(struct scsi_device * devp,int interval,int sense_length,int (* callback)(),caddr_t cb_arg,boolean_t mmc)219 scsi_watch_request_submit_impl(
220 struct scsi_device *devp,
221 int interval,
222 int sense_length,
223 int (*callback)(), /* callback function */
224 caddr_t cb_arg, /* device number */
225 boolean_t mmc)
226 {
227 register struct scsi_watch_request *swr = NULL;
228 register struct scsi_watch_request *sswr, *p;
229 struct buf *bp = NULL;
230 struct buf *mmcbp = NULL;
231 struct scsi_pkt *rqpkt = NULL;
232 struct scsi_pkt *pkt = NULL;
233 uchar_t dtype;
234
235 SW_DEBUG((dev_info_t *)NULL, sw_label, SCSI_DEBUG,
236 "scsi_watch_request_submit: Entering ...\n");
237
238 mutex_enter(&sw.sw_mutex);
239 if (sw.sw_thread == 0) {
240 register kthread_t *t;
241
242 t = thread_create((caddr_t)NULL, 0, scsi_watch_thread,
243 NULL, 0, &p0, TS_RUN, v.v_maxsyspri - 2);
244 sw.sw_thread = t;
245 }
246
247 for (p = sw.sw_head; p != NULL; p = p->swr_next) {
248 if ((p->swr_callback_arg == cb_arg) &&
249 (p->swr_callback == callback))
250 break;
251 }
252
253 /* update time interval for an existing request */
254 if (p) {
255 if (p->swr_what != SWR_STOP) {
256 p->swr_timeout = p->swr_interval
257 = drv_usectohz(interval);
258 p->swr_what = SWR_WATCH;
259 p->swr_ref++;
260 cv_signal(&sw.sw_cv);
261 mutex_exit(&sw.sw_mutex);
262 return ((opaque_t)p);
263 }
264 }
265 mutex_exit(&sw.sw_mutex);
266
267 /*
268 * allocate space for scsi_watch_request
269 */
270 swr = kmem_zalloc(sizeof (struct scsi_watch_request), KM_SLEEP);
271
272 /*
273 * allocate request sense bp and pkt and make cmd
274 * we shouldn't really need it if ARQ is enabled but it is useful
275 * if the ARQ failed.
276 */
277 bp = scsi_alloc_consistent_buf(ROUTE, NULL,
278 sense_length, B_READ, SLEEP_FUNC, NULL);
279
280 rqpkt = scsi_init_pkt(ROUTE, (struct scsi_pkt *)NULL,
281 bp, CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL);
282
283 (void) scsi_setup_cdb((union scsi_cdb *)rqpkt->pkt_cdbp,
284 SCMD_REQUEST_SENSE, 0, SENSE_LENGTH, 0);
285 FILL_SCSI1_LUN(devp, rqpkt);
286 rqpkt->pkt_private = (opaque_t)swr;
287 rqpkt->pkt_time = scsi_watch_io_time;
288 rqpkt->pkt_comp = scsi_watch_request_intr;
289 rqpkt->pkt_flags |= FLAG_HEAD;
290
291 /*
292 * Create TUR pkt or GET STATUS EVENT NOTIFICATION for MMC requests or
293 * a zero byte WRITE(10) based on the disk-type for reservation state.
294 * For inq_dtype of SBC (DIRECT, dtype == 0)
295 * OR for RBC devices (dtype is 0xE) AND for
296 * ANSI version of SPC/SPC-2/SPC-3 (inq_ansi == 3-5).
297 */
298
299 dtype = devp->sd_inq->inq_dtype & DTYPE_MASK;
300 if (mmc) {
301 mmcbp = scsi_alloc_consistent_buf(ROUTE, NULL,
302 8, B_READ, SLEEP_FUNC, NULL);
303
304 pkt = scsi_init_pkt(ROUTE, (struct scsi_pkt *)NULL, mmcbp,
305 CDB_GROUP1, sizeof (struct scsi_arq_status),
306 0, 0, SLEEP_FUNC, NULL);
307
308 (void) scsi_setup_cdb((union scsi_cdb *)pkt->pkt_cdbp,
309 SCMD_GET_EVENT_STATUS_NOTIFICATION, 0, 8, 0);
310 pkt->pkt_cdbp[1] = 1; /* polled */
311 pkt->pkt_cdbp[4] = 1 << SD_GESN_MEDIA_CLASS;
312 } else if (((dtype == 0) || (dtype == 0xE)) &&
313 (devp->sd_inq->inq_ansi > 2)) {
314 pkt = scsi_init_pkt(ROUTE, (struct scsi_pkt *)NULL, NULL,
315 CDB_GROUP1, sizeof (struct scsi_arq_status),
316 0, 0, SLEEP_FUNC, NULL);
317
318 (void) scsi_setup_cdb((union scsi_cdb *)pkt->pkt_cdbp,
319 SCMD_WRITE_G1, 0, 0, 0);
320 } else {
321 pkt = scsi_init_pkt(ROUTE, (struct scsi_pkt *)NULL, NULL,
322 CDB_GROUP0, sizeof (struct scsi_arq_status),
323 0, 0, SLEEP_FUNC, NULL);
324
325 (void) scsi_setup_cdb((union scsi_cdb *)pkt->pkt_cdbp,
326 SCMD_TEST_UNIT_READY, 0, 0, 0);
327 FILL_SCSI1_LUN(devp, pkt);
328 }
329
330 pkt->pkt_private = (opaque_t)swr;
331 pkt->pkt_time = scsi_watch_io_time;
332 pkt->pkt_comp = scsi_watch_request_intr;
333 if (scsi_ifgetcap(&pkt->pkt_address, "tagged-qing", 1) == 1) {
334 pkt->pkt_flags |= FLAG_STAG;
335 }
336
337 /*
338 * set the allocated resources in swr
339 */
340 swr->swr_rqbp = bp;
341 swr->swr_rqpkt = rqpkt;
342 swr->swr_mmcbp = mmcbp;
343 swr->swr_pkt = pkt;
344 swr->swr_timeout = swr->swr_interval = drv_usectohz(interval);
345 swr->swr_callback = callback;
346 swr->swr_callback_arg = cb_arg;
347 swr->swr_what = SWR_WATCH;
348 swr->swr_sense_length = (uchar_t)sense_length;
349 swr->swr_ref = 1;
350 cv_init(&swr->swr_terminate_cv, NULL, CV_DRIVER, NULL);
351
352 /*
353 * add to the list and wake up the thread
354 */
355 mutex_enter(&sw.sw_mutex);
356 swr->swr_next = sw.sw_head;
357 swr->swr_prev = NULL;
358 if (sw.sw_head) {
359 sw.sw_head->swr_prev = swr;
360 }
361 sw.sw_head = swr;
362
363 /*
364 * reset all timeouts, so all requests are in sync again
365 * XXX there is a small window where the watch thread releases
366 * the mutex so that could upset the resyncing
367 */
368 sswr = swr;
369 while (sswr) {
370 sswr->swr_timeout = swr->swr_interval;
371 sswr = sswr->swr_next;
372 }
373 cv_signal(&sw.sw_cv);
374 mutex_exit(&sw.sw_mutex);
375 return ((opaque_t)swr);
376 }
377
378
379 /*
380 * called by (eg. pwr management) to resume the scsi_watch_thread
381 */
382 void
scsi_watch_resume(opaque_t token)383 scsi_watch_resume(opaque_t token)
384 {
385 struct scsi_watch_request *swr = (struct scsi_watch_request *)NULL;
386 /*
387 * Change the state to SW_RUNNING and wake up the scsi_watch_thread
388 */
389 SW_DEBUG(0, sw_label, SCSI_DEBUG, "scsi_watch_resume:\n");
390 mutex_enter(&sw.sw_mutex);
391
392 if (!sw.sw_head)
393 goto exit;
394
395 /* search for token */
396 for (swr = sw.sw_head; swr; swr = swr->swr_next) {
397 if (swr == (struct scsi_watch_request *)token)
398 break;
399 }
400
401 /* if we can't find this value, then we just do nothing */
402 if (swr == (struct scsi_watch_request *)NULL)
403 goto exit;
404
405 swr->swr_what = SWR_WATCH;
406
407
408 /* see if all swr's are awake, then start the thread again */
409 for (swr = sw.sw_head; swr; swr = swr->swr_next) {
410 if (swr->swr_what != SWR_WATCH)
411 goto exit;
412 }
413
414 sw.sw_state = SW_RUNNING;
415 cv_signal(&sw.sw_cv);
416
417 exit:
418 mutex_exit(&sw.sw_mutex);
419 }
420
421
422 /*
423 * called by clients (eg. pwr management) to suspend the scsi_watch_thread
424 */
425 void
scsi_watch_suspend(opaque_t token)426 scsi_watch_suspend(opaque_t token)
427 {
428 struct scsi_watch_request *swr = (struct scsi_watch_request *)NULL;
429 clock_t halfsec_delay = drv_usectohz(500000);
430
431 SW_DEBUG(0, sw_label, SCSI_DEBUG, "scsi_watch_suspend:\n");
432
433 mutex_enter(&sw.sw_mutex);
434
435 if (!sw.sw_head)
436 goto exit;
437
438 /* search for token */
439 for (swr = sw.sw_head; swr; swr = swr->swr_next) {
440 if (swr == (struct scsi_watch_request *)token)
441 break;
442 }
443
444 /* if we can't find this value, then we just do nothing */
445 if (swr == (struct scsi_watch_request *)NULL)
446 goto exit;
447
448
449 for (;;) {
450 if (swr->swr_busy) {
451 /*
452 * XXX: Assumes that this thread can rerun
453 * till all outstanding cmds are complete
454 */
455 swr->swr_what = SWR_SUSPEND_REQUESTED;
456 (void) cv_reltimedwait(&sw.sw_cv, &sw.sw_mutex,
457 halfsec_delay, TR_CLOCK_TICK);
458 } else {
459 swr->swr_what = SWR_SUSPENDED;
460 break;
461 }
462 }
463
464 /* see if all swr's are suspended, then suspend the thread */
465 for (swr = sw.sw_head; swr; swr = swr->swr_next) {
466 if (swr->swr_what != SWR_SUSPENDED)
467 goto exit;
468 }
469
470 sw.sw_state = SW_SUSPENDED;
471
472 exit:
473 mutex_exit(&sw.sw_mutex);
474 }
475
476 /*
477 * destroy swr, called for watch thread
478 */
479 static void
scsi_watch_request_destroy(struct scsi_watch_request * swr)480 scsi_watch_request_destroy(struct scsi_watch_request *swr)
481 {
482 ASSERT(MUTEX_HELD(&sw.sw_mutex));
483 ASSERT(swr->swr_busy == 0);
484
485 SW_DEBUG((dev_info_t *)NULL, sw_label, SCSI_DEBUG,
486 "scsi_watch_request_destroy: Entering ...\n");
487 if (swr->swr_ref != 0)
488 return;
489
490 /*
491 * remove swr from linked list and destroy pkts
492 */
493 if (swr->swr_prev) {
494 swr->swr_prev->swr_next = swr->swr_next;
495 }
496 if (swr->swr_next) {
497 swr->swr_next->swr_prev = swr->swr_prev;
498 }
499 if (sw.sw_head == swr) {
500 sw.sw_head = swr->swr_next;
501 }
502 if (sw.swr_current == swr) {
503 swr->suspend_destroy = SUSPEND_DESTROY;
504 sw.swr_current = NULL;
505 }
506
507 scsi_destroy_pkt(swr->swr_rqpkt);
508 scsi_free_consistent_buf(swr->swr_rqbp);
509 if (swr->swr_mmcbp != NULL) {
510 scsi_free_consistent_buf(swr->swr_mmcbp);
511 }
512 scsi_destroy_pkt(swr->swr_pkt);
513 cv_signal(&swr->swr_terminate_cv);
514 }
515
516 /*
517 * scsi_watch_request_terminate()
518 * called by requestor to terminate any pending watch request.
519 * if the request is currently "busy", and the caller cannot wait, failure
520 * is returned. O/w the request is cleaned up immediately.
521 */
522 int
scsi_watch_request_terminate(opaque_t token,int flags)523 scsi_watch_request_terminate(opaque_t token, int flags)
524 {
525 struct scsi_watch_request *swr =
526 (struct scsi_watch_request *)token;
527 struct scsi_watch_request *sswr;
528
529 int count = 0;
530 int free_flag = 0;
531
532 /*
533 * We try to clean up this request if we can. We also inform
534 * the watch thread that we mucked around the list so it has
535 * to start reading from head of list again.
536 */
537 SW_DEBUG((dev_info_t *)NULL, sw_label, SCSI_DEBUG,
538 "scsi_watch_request_terminate: Entering(0x%p) ...\n",
539 (void *)swr);
540 mutex_enter(&sw.sw_mutex);
541
542 /*
543 * check if it is still in the list
544 */
545 sswr = sw.sw_head;
546 while (sswr) {
547 if (sswr == swr) {
548 swr->swr_ref--;
549 count = swr->swr_ref;
550
551 if (swr->swr_busy) {
552 if (flags == SCSI_WATCH_TERMINATE_NOWAIT) {
553 mutex_exit(&sw.sw_mutex);
554 return (SCSI_WATCH_TERMINATE_FAIL);
555 }
556 if (count != 0 && flags !=
557 SCSI_WATCH_TERMINATE_ALL_WAIT) {
558 mutex_exit(&sw.sw_mutex);
559 return (SCSI_WATCH_TERMINATE_SUCCESS);
560 }
561 if (SCSI_WATCH_TERMINATE_ALL_WAIT == flags) {
562 swr->swr_ref = 0;
563 count = 0;
564 }
565 swr->swr_what = SWR_STOP;
566 cv_wait(&swr->swr_terminate_cv, &sw.sw_mutex);
567 free_flag = 1;
568 goto done;
569 } else {
570 if (SCSI_WATCH_TERMINATE_NOWAIT == flags ||
571 SCSI_WATCH_TERMINATE_ALL_WAIT == flags) {
572 swr->swr_ref = 0;
573 count = 0;
574 }
575 scsi_watch_request_destroy(swr);
576 if (0 == count) {
577 sw.sw_flags |= SW_START_HEAD;
578 free_flag = 1;
579 }
580 goto done;
581 }
582 }
583 sswr = sswr->swr_next;
584 }
585 done:
586 mutex_exit(&sw.sw_mutex);
587 if (!sswr) {
588 return (SCSI_WATCH_TERMINATE_FAIL);
589 }
590 if (1 == free_flag &&
591 sswr->suspend_destroy != SUSPEND_DESTROY) {
592 cv_destroy(&swr->swr_terminate_cv);
593 kmem_free((caddr_t)swr, sizeof (struct scsi_watch_request));
594 }
595
596 return (SCSI_WATCH_TERMINATE_SUCCESS);
597 }
598
599
600 /*
601 * The routines scsi_watch_thread & scsi_watch_request_intr are
602 * on different threads.
603 * If there is no work to be done by the lower level driver
604 * then swr->swr_busy will not be set.
605 * In this case we will call CALLB_CPR_SAFE_BEGIN before
606 * calling cv_timedwait.
607 * In the other case where there is work to be done by
608 * the lower level driver then the flag swr->swr_busy will
609 * be set.
610 * We cannot call CALLB_CPR_SAFE_BEGIN at this point the reason
611 * is the intr thread can interfere with our operations. So
612 * we do a cv_timedwait here. Now at the completion of the
613 * lower level driver's work we will call CALLB_CPR_SAFE_BEGIN
614 * in scsi_watch_request_intr.
615 * In all the cases we will call CALLB_CPR_SAFE_END only if
616 * we already called a CALLB_CPR_SAFE_BEGIN and this is flagged
617 * by sw_cpr_flag.
618 * Warlock has a problem when we use different locks
619 * on the same type of structure in different contexts.
620 * We use callb_cpr_t in both scsi_watch and esp_callback threads.
621 * we use different mutexe's in different threads. And
622 * this is not acceptable to warlock. To avoid this
623 * problem we use the same name for the mutex in
624 * both scsi_watch & esp_callback. when __lock_lint is not defined
625 * esp_callback uses the mutex on the stack and in scsi_watch
626 * a static variable. But when __lock_lint is defined
627 * we make a mutex which is global in esp_callback and
628 * a external mutex for scsi_watch.
629 */
630 static int sw_cmd_count = 0;
631 static int sw_cpr_flag = 0;
632 static callb_cpr_t cpr_info;
633 #ifndef __lock_lint
634 static kmutex_t cpr_mutex;
635 #else
636 extern kmutex_t cpr_mutex;
637 #endif
638
639 #if !defined(lint)
640 _NOTE(MUTEX_PROTECTS_DATA(cpr_mutex, cpr_info))
_NOTE(MUTEX_PROTECTS_DATA (cpr_mutex,sw_cmd_count))641 _NOTE(MUTEX_PROTECTS_DATA(cpr_mutex, sw_cmd_count))
642 #endif
643 /*
644 * the scsi watch thread:
645 * it either wakes up if there is work to do or if the cv_timeait
646 * timed out
647 * normally, it wakes up every <delay> seconds and checks the list.
648 * the interval is not very accurate if the cv was signalled but that
649 * really doesn't matter much
650 * it is more important that we fire off all TURs simulataneously so
651 * we don't have to wake up frequently
652 */
653 static void
654 scsi_watch_thread()
655 {
656 struct scsi_watch_request *swr, *next;
657 clock_t last_delay = 0;
658 clock_t next_delay = 0;
659 clock_t onesec = drv_usectohz(1000000);
660 clock_t exit_delay = 60 * onesec;
661
662 SW_DEBUG((dev_info_t *)NULL, sw_label, SCSI_DEBUG,
663 "scsi_watch_thread: Entering ...\n");
664
665 #if !defined(lint)
666 _NOTE(NO_COMPETING_THREADS_NOW);
667 #endif
668 mutex_init(&cpr_mutex, NULL, MUTEX_DRIVER, NULL);
669 CALLB_CPR_INIT(&cpr_info,
670 &cpr_mutex, callb_generic_cpr, "scsi_watch");
671 sw_cpr_flag = 0;
672 #if !defined(lint)
673 /*LINTED*/
674 _NOTE(COMPETING_THREADS_NOW);
675 #endif
676 /*
677 * grab the mutex and wait for work
678 */
679 mutex_enter(&sw.sw_mutex);
680 if (sw.sw_head == NULL) {
681 cv_wait(&sw.sw_cv, &sw.sw_mutex);
682 }
683
684 /*
685 * now loop forever for work; if queue is empty exit
686 */
687 for (;;) {
688 head:
689 swr = sw.sw_head;
690 while (swr) {
691
692 /*
693 * If state is not running, wait for scsi_watch_resume
694 * to signal restart, but before going into cv_wait
695 * need to let the PM framework know that it is safe
696 * to stop this thread for CPR
697 */
698 if (sw.sw_state != SW_RUNNING) {
699 SW_DEBUG(0, sw_label, SCSI_DEBUG,
700 "scsi_watch_thread suspended\n");
701 mutex_enter(&cpr_mutex);
702 if (!sw_cmd_count) {
703 CALLB_CPR_SAFE_BEGIN(&cpr_info);
704 sw_cpr_flag = 1;
705 }
706 mutex_exit(&cpr_mutex);
707 sw.swr_current = swr;
708 cv_wait(&sw.sw_cv, &sw.sw_mutex);
709
710
711 /*
712 * Need to let the PM framework know that it
713 * is no longer safe to stop the thread for
714 * CPR.
715 */
716 mutex_exit(&sw.sw_mutex);
717 mutex_enter(&cpr_mutex);
718 if (sw_cpr_flag == 1) {
719 CALLB_CPR_SAFE_END(
720 &cpr_info, &cpr_mutex);
721 sw_cpr_flag = 0;
722 }
723 mutex_exit(&cpr_mutex);
724 mutex_enter(&sw.sw_mutex);
725 if (SUSPEND_DESTROY == swr->suspend_destroy) {
726 cv_destroy(&swr->swr_terminate_cv);
727 kmem_free((caddr_t)swr,
728 sizeof (struct scsi_watch_request));
729 goto head;
730 } else {
731 sw.swr_current = NULL;
732 }
733 }
734 if (next_delay == 0) {
735 next_delay = swr->swr_timeout;
736 } else {
737 next_delay = min(swr->swr_timeout, next_delay);
738 }
739
740 swr->swr_timeout -= last_delay;
741 next = swr->swr_next;
742
743 SW_DEBUG((dev_info_t *)NULL, sw_label, SCSI_DEBUG,
744 "scsi_watch_thread: "
745 "swr(0x%p),what=%x,timeout=%lx,"
746 "interval=%lx,delay=%lx\n",
747 (void *)swr, swr->swr_what, swr->swr_timeout,
748 swr->swr_interval, last_delay);
749
750 switch (swr->swr_what) {
751 case SWR_SUSPENDED:
752 case SWR_SUSPEND_REQUESTED:
753 /* if we are suspended, don't do anything */
754 break;
755
756 case SWR_STOP:
757 if (swr->swr_busy == 0) {
758 scsi_watch_request_destroy(swr);
759 }
760 break;
761
762 default:
763 if (swr->swr_timeout <= 0 && !swr->swr_busy) {
764 swr->swr_busy = 1;
765 swr->swr_timeout = swr->swr_interval;
766
767 /*
768 * submit the cmd and let the completion
769 * function handle the result
770 * release the mutex (good practice)
771 * this should be safe even if the list
772 * is changing
773 */
774 mutex_exit(&sw.sw_mutex);
775 mutex_enter(&cpr_mutex);
776 sw_cmd_count++;
777 mutex_exit(&cpr_mutex);
778 SW_DEBUG((dev_info_t *)NULL,
779 sw_label, SCSI_DEBUG,
780 "scsi_watch_thread: "
781 "Starting TUR\n");
782 if (scsi_transport(swr->swr_pkt) !=
783 TRAN_ACCEPT) {
784
785 /*
786 * try again later
787 */
788 swr->swr_busy = 0;
789 SW_DEBUG((dev_info_t *)NULL,
790 sw_label, SCSI_DEBUG,
791 "scsi_watch_thread: "
792 "Transport Failed\n");
793 mutex_enter(&cpr_mutex);
794 sw_cmd_count--;
795 mutex_exit(&cpr_mutex);
796 }
797 mutex_enter(&sw.sw_mutex);
798 }
799 break;
800 }
801 swr = next;
802 if (sw.sw_flags & SW_START_HEAD) {
803 sw.sw_flags &= ~SW_START_HEAD;
804 goto head;
805 }
806 }
807
808 /*
809 * delay using cv_timedwait; we return when
810 * signalled or timed out
811 */
812 if (sw.sw_head != NULL) {
813 if (next_delay <= 0) {
814 next_delay = onesec;
815 }
816 } else {
817 next_delay = exit_delay;
818 }
819
820 mutex_enter(&cpr_mutex);
821 if (!sw_cmd_count) {
822 CALLB_CPR_SAFE_BEGIN(&cpr_info);
823 sw_cpr_flag = 1;
824 }
825 mutex_exit(&cpr_mutex);
826 /*
827 * if we return from cv_timedwait because we were
828 * signalled, the delay is not accurate but that doesn't
829 * really matter
830 */
831 (void) cv_reltimedwait(&sw.sw_cv, &sw.sw_mutex, next_delay,
832 TR_CLOCK_TICK);
833 mutex_exit(&sw.sw_mutex);
834 mutex_enter(&cpr_mutex);
835 if (sw_cpr_flag == 1) {
836 CALLB_CPR_SAFE_END(&cpr_info, &cpr_mutex);
837 sw_cpr_flag = 0;
838 }
839 mutex_exit(&cpr_mutex);
840 mutex_enter(&sw.sw_mutex);
841 last_delay = next_delay;
842 next_delay = 0;
843
844 /*
845 * is there still work to do?
846 */
847 if (sw.sw_head == NULL) {
848 break;
849 }
850 }
851
852 /*
853 * no more work to do, reset sw_thread and exit
854 */
855 sw.sw_thread = 0;
856 mutex_exit(&sw.sw_mutex);
857 #ifndef __lock_lint
858 mutex_enter(&cpr_mutex);
859 CALLB_CPR_EXIT(&cpr_info);
860 #endif
861 mutex_destroy(&cpr_mutex);
862 SW_DEBUG((dev_info_t *)NULL, sw_label, SCSI_DEBUG,
863 "scsi_watch_thread: Exiting ...\n");
864 }
865
866 /*
867 * callback completion function for scsi watch pkt
868 */
869 #define SCBP(pkt) ((struct scsi_status *)(pkt)->pkt_scbp)
870 #define SCBP_C(pkt) ((*(pkt)->pkt_scbp) & STATUS_MASK)
871
872 static void
scsi_watch_request_intr(struct scsi_pkt * pkt)873 scsi_watch_request_intr(struct scsi_pkt *pkt)
874 {
875 struct scsi_watch_result result;
876 struct scsi_watch_request *swr =
877 (struct scsi_watch_request *)pkt->pkt_private;
878 struct scsi_status *rqstatusp;
879 struct scsi_extended_sense *rqsensep = NULL;
880 int amt = 0;
881
882 SW_DEBUG((dev_info_t *)NULL, sw_label, SCSI_DEBUG,
883 "scsi_watch_intr: Entering ...\n");
884
885 /*
886 * first check if it is the TUR or RQS pkt
887 */
888 if (pkt == swr->swr_pkt) {
889 if (SCBP_C(pkt) != STATUS_GOOD &&
890 SCBP_C(pkt) != STATUS_RESERVATION_CONFLICT) {
891 if (SCBP(pkt)->sts_chk &&
892 ((pkt->pkt_state & STATE_ARQ_DONE) == 0)) {
893
894 /*
895 * submit the request sense pkt
896 */
897 SW_DEBUG((dev_info_t *)NULL,
898 sw_label, SCSI_DEBUG,
899 "scsi_watch_intr: "
900 "Submitting a Request Sense "
901 "Packet\n");
902 if (scsi_transport(swr->swr_rqpkt) !=
903 TRAN_ACCEPT) {
904
905 /*
906 * just give up and try again later
907 */
908 SW_DEBUG((dev_info_t *)NULL,
909 sw_label, SCSI_DEBUG,
910 "scsi_watch_intr: "
911 "Request Sense "
912 "Transport Failed\n");
913 goto done;
914 }
915
916 /*
917 * wait for rqsense to complete
918 */
919 return;
920
921 } else if (SCBP(pkt)->sts_chk) {
922
923 /*
924 * check the autorequest sense data
925 */
926 struct scsi_arq_status *arqstat =
927 (struct scsi_arq_status *)pkt->pkt_scbp;
928
929 rqstatusp = &arqstat->sts_rqpkt_status;
930 rqsensep = &arqstat->sts_sensedata;
931 amt = swr->swr_sense_length -
932 arqstat->sts_rqpkt_resid;
933 SW_DEBUG((dev_info_t *)NULL,
934 sw_label, SCSI_DEBUG,
935 "scsi_watch_intr: "
936 "Auto Request Sense, amt=%x\n", amt);
937 }
938 }
939
940 } else if (pkt == swr->swr_rqpkt) {
941
942 /*
943 * check the request sense data
944 */
945 rqstatusp = (struct scsi_status *)pkt->pkt_scbp;
946 rqsensep = (struct scsi_extended_sense *)
947 swr->swr_rqbp->b_un.b_addr;
948 amt = swr->swr_sense_length - pkt->pkt_resid;
949 SW_DEBUG((dev_info_t *)NULL, sw_label, SCSI_DEBUG,
950 "scsi_watch_intr: "
951 "Request Sense Completed, amt=%x\n", amt);
952 } else {
953
954 /*
955 * should not reach here!!!
956 */
957 scsi_log((dev_info_t *)NULL, sw_label, CE_PANIC,
958 "scsi_watch_intr: Bad Packet(0x%p)", (void *)pkt);
959 }
960
961 if (rqsensep) {
962
963 /*
964 * check rqsense status and data
965 */
966 if (rqstatusp->sts_busy || rqstatusp->sts_chk) {
967
968 /*
969 * try again later
970 */
971 SW_DEBUG((dev_info_t *)NULL, sw_label, SCSI_DEBUG,
972 "scsi_watch_intr: "
973 "Auto Request Sense Failed - "
974 "Busy or Check Condition\n");
975 goto done;
976 }
977
978 SW_DEBUG((dev_info_t *)NULL, sw_label, SCSI_DEBUG,
979 "scsi_watch_intr: "
980 "es_key=%x, adq=%x, amt=%x\n",
981 rqsensep->es_key, rqsensep->es_add_code, amt);
982 }
983
984 /*
985 * callback to target driver to do the real work
986 */
987 result.statusp = SCBP(swr->swr_pkt);
988 result.sensep = rqsensep;
989 result.actual_sense_length = (uchar_t)amt;
990 result.pkt = swr->swr_pkt;
991 if (swr->swr_mmcbp != NULL) {
992 bcopy(swr->swr_mmcbp->b_un.b_addr, result.mmc_data, 8);
993 }
994
995 if ((*swr->swr_callback)(swr->swr_callback_arg, &result)) {
996 swr->swr_what = SWR_STOP;
997 }
998
999 done:
1000 swr->swr_busy = 0;
1001 mutex_enter(&cpr_mutex);
1002 sw_cmd_count --;
1003 if (!sw_cmd_count) {
1004 CALLB_CPR_SAFE_BEGIN(&cpr_info);
1005 sw_cpr_flag = 1;
1006 }
1007 mutex_exit(&cpr_mutex);
1008 }
1009
1010 /*
1011 * scsi_watch_get_ref_count
1012 * called by clients to query the reference count for a given token.
1013 * return the number of reference count or 0 if the given token is
1014 * not found.
1015 */
1016 int
scsi_watch_get_ref_count(opaque_t token)1017 scsi_watch_get_ref_count(opaque_t token)
1018 {
1019 struct scsi_watch_request *swr =
1020 (struct scsi_watch_request *)token;
1021 struct scsi_watch_request *sswr;
1022 int rval = 0;
1023
1024 SW_DEBUG((dev_info_t *)NULL, sw_label, SCSI_DEBUG,
1025 "scsi_watch_get_ref_count: Entering(0x%p) ...\n",
1026 (void *)swr);
1027 mutex_enter(&sw.sw_mutex);
1028
1029 sswr = sw.sw_head;
1030 while (sswr) {
1031 if (sswr == swr) {
1032 rval = swr->swr_ref;
1033 mutex_exit(&sw.sw_mutex);
1034 return (rval);
1035 }
1036 sswr = sswr->swr_next;
1037 }
1038
1039 mutex_exit(&sw.sw_mutex);
1040 return (rval);
1041 }
1042