1 /*-
2 * Common functions for CAM "type" (peripheral) drivers.
3 *
4 * SPDX-License-Identifier: BSD-2-Clause
5 *
6 * Copyright (c) 1997, 1998 Justin T. Gibbs.
7 * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry.
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions, and the following disclaimer,
15 * without modification, immediately at the beginning of the file.
16 * 2. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/types.h>
35 #include <sys/malloc.h>
36 #include <sys/kernel.h>
37 #include <sys/bio.h>
38 #include <sys/conf.h>
39 #include <sys/devctl.h>
40 #include <sys/lock.h>
41 #include <sys/mutex.h>
42 #include <sys/buf.h>
43 #include <sys/proc.h>
44 #include <sys/devicestat.h>
45 #include <sys/sbuf.h>
46 #include <sys/sysctl.h>
47 #include <vm/vm.h>
48 #include <vm/vm_extern.h>
49
50 #include <cam/cam.h>
51 #include <cam/cam_ccb.h>
52 #include <cam/cam_compat.h>
53 #include <cam/cam_queue.h>
54 #include <cam/cam_xpt_periph.h>
55 #include <cam/cam_xpt_internal.h>
56 #include <cam/cam_periph.h>
57 #include <cam/cam_debug.h>
58 #include <cam/cam_sim.h>
59
60 #include <cam/scsi/scsi_all.h>
61 #include <cam/scsi/scsi_message.h>
62 #include <cam/scsi/scsi_pass.h>
63
64 static u_int camperiphnextunit(struct periph_driver *p_drv,
65 u_int newunit, bool wired,
66 path_id_t pathid, target_id_t target,
67 lun_id_t lun);
68 static u_int camperiphunit(struct periph_driver *p_drv,
69 path_id_t pathid, target_id_t target,
70 lun_id_t lun,
71 const char *sn);
72 static void camperiphdone(struct cam_periph *periph,
73 union ccb *done_ccb);
74 static void camperiphfree(struct cam_periph *periph);
75 static int camperiphscsistatuserror(union ccb *ccb,
76 union ccb **orig_ccb,
77 cam_flags camflags,
78 uint32_t sense_flags,
79 int *openings,
80 uint32_t *relsim_flags,
81 uint32_t *timeout,
82 uint32_t *action,
83 const char **action_string);
84 static int camperiphscsisenseerror(union ccb *ccb,
85 union ccb **orig_ccb,
86 cam_flags camflags,
87 uint32_t sense_flags,
88 int *openings,
89 uint32_t *relsim_flags,
90 uint32_t *timeout,
91 uint32_t *action,
92 const char **action_string);
93 static void cam_periph_devctl_notify(union ccb *ccb);
94
95 static int nperiph_drivers;
96 static int initialized = 0;
97 struct periph_driver **periph_drivers;
98
99 static MALLOC_DEFINE(M_CAMPERIPH, "CAM periph", "CAM peripheral buffers");
100
101 static int periph_selto_delay = 1000;
102 TUNABLE_INT("kern.cam.periph_selto_delay", &periph_selto_delay);
103 static int periph_noresrc_delay = 500;
104 TUNABLE_INT("kern.cam.periph_noresrc_delay", &periph_noresrc_delay);
105 static int periph_busy_delay = 500;
106 TUNABLE_INT("kern.cam.periph_busy_delay", &periph_busy_delay);
107
108 static u_int periph_mapmem_thresh = 65536;
109 SYSCTL_UINT(_kern_cam, OID_AUTO, mapmem_thresh, CTLFLAG_RWTUN,
110 &periph_mapmem_thresh, 0, "Threshold for user-space buffer mapping");
111
112 void
periphdriver_register(void * data)113 periphdriver_register(void *data)
114 {
115 struct periph_driver *drv = (struct periph_driver *)data;
116 struct periph_driver **newdrivers, **old;
117 int ndrivers;
118
119 again:
120 ndrivers = nperiph_drivers + 2;
121 newdrivers = malloc(sizeof(*newdrivers) * ndrivers, M_CAMPERIPH,
122 M_WAITOK);
123 xpt_lock_buses();
124 if (ndrivers != nperiph_drivers + 2) {
125 /*
126 * Lost race against itself; go around.
127 */
128 xpt_unlock_buses();
129 free(newdrivers, M_CAMPERIPH);
130 goto again;
131 }
132 if (periph_drivers)
133 bcopy(periph_drivers, newdrivers,
134 sizeof(*newdrivers) * nperiph_drivers);
135 newdrivers[nperiph_drivers] = drv;
136 newdrivers[nperiph_drivers + 1] = NULL;
137 old = periph_drivers;
138 periph_drivers = newdrivers;
139 nperiph_drivers++;
140 xpt_unlock_buses();
141 if (old)
142 free(old, M_CAMPERIPH);
143 /* If driver marked as early or it is late now, initialize it. */
144 if (((drv->flags & CAM_PERIPH_DRV_EARLY) != 0 && initialized > 0) ||
145 initialized > 1)
146 (*drv->init)();
147 }
148
149 int
periphdriver_unregister(void * data)150 periphdriver_unregister(void *data)
151 {
152 struct periph_driver *drv = (struct periph_driver *)data;
153 int error, n;
154
155 /* If driver marked as early or it is late now, deinitialize it. */
156 if (((drv->flags & CAM_PERIPH_DRV_EARLY) != 0 && initialized > 0) ||
157 initialized > 1) {
158 if (drv->deinit == NULL) {
159 printf("CAM periph driver '%s' doesn't have deinit.\n",
160 drv->driver_name);
161 return (EOPNOTSUPP);
162 }
163 error = drv->deinit();
164 if (error != 0)
165 return (error);
166 }
167
168 xpt_lock_buses();
169 for (n = 0; n < nperiph_drivers && periph_drivers[n] != drv; n++)
170 ;
171 KASSERT(n < nperiph_drivers,
172 ("Periph driver '%s' was not registered", drv->driver_name));
173 for (; n + 1 < nperiph_drivers; n++)
174 periph_drivers[n] = periph_drivers[n + 1];
175 periph_drivers[n + 1] = NULL;
176 nperiph_drivers--;
177 xpt_unlock_buses();
178 return (0);
179 }
180
181 void
periphdriver_init(int level)182 periphdriver_init(int level)
183 {
184 int i, early;
185
186 initialized = max(initialized, level);
187 for (i = 0; periph_drivers[i] != NULL; i++) {
188 early = (periph_drivers[i]->flags & CAM_PERIPH_DRV_EARLY) ? 1 : 2;
189 if (early == initialized)
190 (*periph_drivers[i]->init)();
191 }
192 }
193
194 cam_status
cam_periph_alloc(periph_ctor_t * periph_ctor,periph_oninv_t * periph_oninvalidate,periph_dtor_t * periph_dtor,periph_start_t * periph_start,char * name,cam_periph_type type,struct cam_path * path,ac_callback_t * ac_callback,ac_code code,void * arg)195 cam_periph_alloc(periph_ctor_t *periph_ctor,
196 periph_oninv_t *periph_oninvalidate,
197 periph_dtor_t *periph_dtor, periph_start_t *periph_start,
198 char *name, cam_periph_type type, struct cam_path *path,
199 ac_callback_t *ac_callback, ac_code code, void *arg)
200 {
201 struct periph_driver **p_drv;
202 struct cam_sim *sim;
203 struct cam_periph *periph;
204 struct cam_periph *cur_periph;
205 path_id_t path_id;
206 target_id_t target_id;
207 lun_id_t lun_id;
208 cam_status status;
209 u_int init_level;
210
211 init_level = 0;
212 /*
213 * Handle Hot-Plug scenarios. If there is already a peripheral
214 * of our type assigned to this path, we are likely waiting for
215 * final close on an old, invalidated, peripheral. If this is
216 * the case, queue up a deferred call to the peripheral's async
217 * handler. If it looks like a mistaken re-allocation, complain.
218 */
219 if ((periph = cam_periph_find(path, name)) != NULL) {
220 if ((periph->flags & CAM_PERIPH_INVALID) != 0
221 && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) {
222 periph->flags |= CAM_PERIPH_NEW_DEV_FOUND;
223 periph->deferred_callback = ac_callback;
224 periph->deferred_ac = code;
225 return (CAM_REQ_INPROG);
226 } else {
227 printf("cam_periph_alloc: attempt to re-allocate "
228 "valid device %s%d rejected flags %#x "
229 "refcount %d\n", periph->periph_name,
230 periph->unit_number, periph->flags,
231 periph->refcount);
232 }
233 return (CAM_REQ_INVALID);
234 }
235
236 periph = (struct cam_periph *)malloc(sizeof(*periph), M_CAMPERIPH,
237 M_NOWAIT|M_ZERO);
238
239 if (periph == NULL)
240 return (CAM_RESRC_UNAVAIL);
241
242 init_level++;
243
244 sim = xpt_path_sim(path);
245 path_id = xpt_path_path_id(path);
246 target_id = xpt_path_target_id(path);
247 lun_id = xpt_path_lun_id(path);
248 periph->periph_start = periph_start;
249 periph->periph_dtor = periph_dtor;
250 periph->periph_oninval = periph_oninvalidate;
251 periph->type = type;
252 periph->periph_name = name;
253 periph->scheduled_priority = CAM_PRIORITY_NONE;
254 periph->immediate_priority = CAM_PRIORITY_NONE;
255 periph->refcount = 1; /* Dropped by invalidation. */
256 periph->sim = sim;
257 SLIST_INIT(&periph->ccb_list);
258 status = xpt_create_path(&path, periph, path_id, target_id, lun_id);
259 if (status != CAM_REQ_CMP)
260 goto failure;
261 periph->path = path;
262
263 xpt_lock_buses();
264 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
265 if (strcmp((*p_drv)->driver_name, name) == 0)
266 break;
267 }
268 if (*p_drv == NULL) {
269 printf("cam_periph_alloc: invalid periph name '%s'\n", name);
270 xpt_unlock_buses();
271 xpt_free_path(periph->path);
272 free(periph, M_CAMPERIPH);
273 return (CAM_REQ_INVALID);
274 }
275 periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id,
276 path->device->serial_num);
277 cur_periph = TAILQ_FIRST(&(*p_drv)->units);
278 while (cur_periph != NULL
279 && cur_periph->unit_number < periph->unit_number)
280 cur_periph = TAILQ_NEXT(cur_periph, unit_links);
281 if (cur_periph != NULL) {
282 KASSERT(cur_periph->unit_number != periph->unit_number,
283 ("duplicate units on periph list"));
284 TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links);
285 } else {
286 TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links);
287 (*p_drv)->generation++;
288 }
289 xpt_unlock_buses();
290
291 init_level++;
292
293 status = xpt_add_periph(periph);
294 if (status != CAM_REQ_CMP)
295 goto failure;
296
297 init_level++;
298 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph created\n"));
299
300 status = periph_ctor(periph, arg);
301
302 if (status == CAM_REQ_CMP)
303 init_level++;
304
305 failure:
306 switch (init_level) {
307 case 4:
308 /* Initialized successfully */
309 break;
310 case 3:
311 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n"));
312 xpt_remove_periph(periph);
313 /* FALLTHROUGH */
314 case 2:
315 xpt_lock_buses();
316 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
317 xpt_unlock_buses();
318 xpt_free_path(periph->path);
319 /* FALLTHROUGH */
320 case 1:
321 free(periph, M_CAMPERIPH);
322 /* FALLTHROUGH */
323 case 0:
324 /* No cleanup to perform. */
325 break;
326 default:
327 panic("%s: Unknown init level", __func__);
328 }
329 return(status);
330 }
331
332 /*
333 * Find a peripheral structure with the specified path, target, lun,
334 * and (optionally) type. If the name is NULL, this function will return
335 * the first peripheral driver that matches the specified path.
336 */
337 struct cam_periph *
cam_periph_find(struct cam_path * path,char * name)338 cam_periph_find(struct cam_path *path, char *name)
339 {
340 struct periph_driver **p_drv;
341 struct cam_periph *periph;
342
343 xpt_lock_buses();
344 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
345 if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0))
346 continue;
347
348 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
349 if (xpt_path_comp(periph->path, path) == 0) {
350 xpt_unlock_buses();
351 cam_periph_assert(periph, MA_OWNED);
352 return(periph);
353 }
354 }
355 if (name != NULL) {
356 xpt_unlock_buses();
357 return(NULL);
358 }
359 }
360 xpt_unlock_buses();
361 return(NULL);
362 }
363
364 /*
365 * Find peripheral driver instances attached to the specified path.
366 */
367 int
cam_periph_list(struct cam_path * path,struct sbuf * sb)368 cam_periph_list(struct cam_path *path, struct sbuf *sb)
369 {
370 struct sbuf local_sb;
371 struct periph_driver **p_drv;
372 struct cam_periph *periph;
373 int count;
374 int sbuf_alloc_len;
375
376 sbuf_alloc_len = 16;
377 retry:
378 sbuf_new(&local_sb, NULL, sbuf_alloc_len, SBUF_FIXEDLEN);
379 count = 0;
380 xpt_lock_buses();
381 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
382 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
383 if (xpt_path_comp(periph->path, path) != 0)
384 continue;
385
386 if (sbuf_len(&local_sb) != 0)
387 sbuf_cat(&local_sb, ",");
388
389 sbuf_printf(&local_sb, "%s%d", periph->periph_name,
390 periph->unit_number);
391
392 if (sbuf_error(&local_sb) == ENOMEM) {
393 sbuf_alloc_len *= 2;
394 xpt_unlock_buses();
395 sbuf_delete(&local_sb);
396 goto retry;
397 }
398 count++;
399 }
400 }
401 xpt_unlock_buses();
402 sbuf_finish(&local_sb);
403 if (sbuf_len(sb) != 0)
404 sbuf_cat(sb, ",");
405 sbuf_cat(sb, sbuf_data(&local_sb));
406 sbuf_delete(&local_sb);
407 return (count);
408 }
409
410 int
cam_periph_acquire(struct cam_periph * periph)411 cam_periph_acquire(struct cam_periph *periph)
412 {
413 int status;
414
415 if (periph == NULL)
416 return (EINVAL);
417
418 status = ENOENT;
419 xpt_lock_buses();
420 if ((periph->flags & CAM_PERIPH_INVALID) == 0) {
421 periph->refcount++;
422 status = 0;
423 }
424 xpt_unlock_buses();
425
426 return (status);
427 }
428
429 void
cam_periph_doacquire(struct cam_periph * periph)430 cam_periph_doacquire(struct cam_periph *periph)
431 {
432
433 xpt_lock_buses();
434 KASSERT(periph->refcount >= 1,
435 ("cam_periph_doacquire() with refcount == %d", periph->refcount));
436 periph->refcount++;
437 xpt_unlock_buses();
438 }
439
440 void
cam_periph_release_locked_buses(struct cam_periph * periph)441 cam_periph_release_locked_buses(struct cam_periph *periph)
442 {
443
444 cam_periph_assert(periph, MA_OWNED);
445 KASSERT(periph->refcount >= 1, ("periph->refcount >= 1"));
446 if (--periph->refcount == 0)
447 camperiphfree(periph);
448 }
449
450 void
cam_periph_release_locked(struct cam_periph * periph)451 cam_periph_release_locked(struct cam_periph *periph)
452 {
453
454 if (periph == NULL)
455 return;
456
457 xpt_lock_buses();
458 cam_periph_release_locked_buses(periph);
459 xpt_unlock_buses();
460 }
461
462 void
cam_periph_release(struct cam_periph * periph)463 cam_periph_release(struct cam_periph *periph)
464 {
465 struct mtx *mtx;
466
467 if (periph == NULL)
468 return;
469
470 cam_periph_assert(periph, MA_NOTOWNED);
471 mtx = cam_periph_mtx(periph);
472 mtx_lock(mtx);
473 cam_periph_release_locked(periph);
474 mtx_unlock(mtx);
475 }
476
477 /*
478 * hold/unhold act as mutual exclusion for sections of the code that
479 * need to sleep and want to make sure that other sections that
480 * will interfere are held off. This only protects exclusive sections
481 * from each other.
482 */
483 int
cam_periph_hold(struct cam_periph * periph,int priority)484 cam_periph_hold(struct cam_periph *periph, int priority)
485 {
486 int error;
487
488 /*
489 * Increment the reference count on the peripheral
490 * while we wait for our lock attempt to succeed
491 * to ensure the peripheral doesn't disappear out
492 * from user us while we sleep.
493 */
494
495 if (cam_periph_acquire(periph) != 0)
496 return (ENXIO);
497
498 cam_periph_assert(periph, MA_OWNED);
499 while ((periph->flags & CAM_PERIPH_LOCKED) != 0) {
500 periph->flags |= CAM_PERIPH_LOCK_WANTED;
501 if ((error = cam_periph_sleep(periph, periph, priority,
502 "caplck", 0)) != 0) {
503 cam_periph_release_locked(periph);
504 return (error);
505 }
506 if (periph->flags & CAM_PERIPH_INVALID) {
507 cam_periph_release_locked(periph);
508 return (ENXIO);
509 }
510 }
511
512 periph->flags |= CAM_PERIPH_LOCKED;
513 return (0);
514 }
515
516 void
cam_periph_unhold(struct cam_periph * periph)517 cam_periph_unhold(struct cam_periph *periph)
518 {
519
520 cam_periph_assert(periph, MA_OWNED);
521
522 periph->flags &= ~CAM_PERIPH_LOCKED;
523 if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) {
524 periph->flags &= ~CAM_PERIPH_LOCK_WANTED;
525 wakeup(periph);
526 }
527
528 cam_periph_release_locked(periph);
529 }
530
531 void
cam_periph_hold_boot(struct cam_periph * periph)532 cam_periph_hold_boot(struct cam_periph *periph)
533 {
534
535 root_mount_hold_token(periph->periph_name, &periph->periph_rootmount);
536 }
537
538 void
cam_periph_release_boot(struct cam_periph * periph)539 cam_periph_release_boot(struct cam_periph *periph)
540 {
541
542 root_mount_rel(&periph->periph_rootmount);
543 }
544
545 /*
546 * Look for the next unit number that is not currently in use for this
547 * peripheral type starting at "newunit". Also exclude unit numbers that
548 * are reserved by for future "hardwiring" unless we already know that this
549 * is a potential wired device. Only assume that the device is "wired" the
550 * first time through the loop since after that we'll be looking at unit
551 * numbers that did not match a wiring entry.
552 */
553 static u_int
camperiphnextunit(struct periph_driver * p_drv,u_int newunit,bool wired,path_id_t pathid,target_id_t target,lun_id_t lun)554 camperiphnextunit(struct periph_driver *p_drv, u_int newunit, bool wired,
555 path_id_t pathid, target_id_t target, lun_id_t lun)
556 {
557 struct cam_periph *periph;
558 char *periph_name;
559 int i, val, dunit, r;
560 const char *dname, *strval;
561
562 periph_name = p_drv->driver_name;
563 for (;;newunit++) {
564 for (periph = TAILQ_FIRST(&p_drv->units);
565 periph != NULL && periph->unit_number != newunit;
566 periph = TAILQ_NEXT(periph, unit_links))
567 ;
568
569 if (periph != NULL && periph->unit_number == newunit) {
570 if (wired) {
571 xpt_print(periph->path, "Duplicate Wired "
572 "Device entry!\n");
573 xpt_print(periph->path, "Second device (%s "
574 "device at scbus%d target %d lun %d) will "
575 "not be wired\n", periph_name, pathid,
576 target, lun);
577 wired = false;
578 }
579 continue;
580 }
581 if (wired)
582 break;
583
584 /*
585 * Don't allow the mere presence of any attributes of a device
586 * means that it is for a wired down entry. Instead, insist that
587 * one of the matching criteria from camperiphunit be present
588 * for the device.
589 */
590 i = 0;
591 dname = periph_name;
592 for (;;) {
593 r = resource_find_dev(&i, dname, &dunit, NULL, NULL);
594 if (r != 0)
595 break;
596
597 if (newunit != dunit)
598 continue;
599 if (resource_string_value(dname, dunit, "sn", &strval) == 0 ||
600 resource_int_value(dname, dunit, "lun", &val) == 0 ||
601 resource_int_value(dname, dunit, "target", &val) == 0 ||
602 resource_string_value(dname, dunit, "at", &strval) == 0)
603 break;
604 }
605 if (r != 0)
606 break;
607 }
608 return (newunit);
609 }
610
611 static u_int
camperiphunit(struct periph_driver * p_drv,path_id_t pathid,target_id_t target,lun_id_t lun,const char * sn)612 camperiphunit(struct periph_driver *p_drv, path_id_t pathid,
613 target_id_t target, lun_id_t lun, const char *sn)
614 {
615 bool wired = false;
616 u_int unit;
617 int i, val, dunit;
618 const char *dname, *strval;
619 char pathbuf[32], *periph_name;
620
621 periph_name = p_drv->driver_name;
622 snprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid);
623 unit = 0;
624 i = 0;
625 dname = periph_name;
626
627 for (wired = false; resource_find_dev(&i, dname, &dunit, NULL, NULL) == 0;
628 wired = false) {
629 if (resource_string_value(dname, dunit, "at", &strval) == 0) {
630 if (strcmp(strval, pathbuf) != 0)
631 continue;
632 wired = true;
633 }
634 if (resource_int_value(dname, dunit, "target", &val) == 0) {
635 if (val != target)
636 continue;
637 wired = true;
638 }
639 if (resource_int_value(dname, dunit, "lun", &val) == 0) {
640 if (val != lun)
641 continue;
642 wired = true;
643 }
644 if (resource_string_value(dname, dunit, "sn", &strval) == 0) {
645 if (sn == NULL || strcmp(strval, sn) != 0)
646 continue;
647 wired = true;
648 }
649 if (wired) {
650 unit = dunit;
651 break;
652 }
653 }
654
655 /*
656 * Either start from 0 looking for the next unit or from
657 * the unit number given in the resource config. This way,
658 * if we have wildcard matches, we don't return the same
659 * unit number twice.
660 */
661 unit = camperiphnextunit(p_drv, unit, wired, pathid, target, lun);
662
663 return (unit);
664 }
665
666 void
cam_periph_invalidate(struct cam_periph * periph)667 cam_periph_invalidate(struct cam_periph *periph)
668 {
669
670 cam_periph_assert(periph, MA_OWNED);
671 /*
672 * We only tear down the device the first time a peripheral is
673 * invalidated.
674 */
675 if ((periph->flags & CAM_PERIPH_INVALID) != 0)
676 return;
677
678 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph invalidated\n"));
679 if ((periph->flags & CAM_PERIPH_ANNOUNCED) && !rebooting) {
680 struct sbuf sb;
681 char buffer[160];
682
683 sbuf_new(&sb, buffer, 160, SBUF_FIXEDLEN);
684 xpt_denounce_periph_sbuf(periph, &sb);
685 sbuf_finish(&sb);
686 sbuf_putbuf(&sb);
687 }
688 periph->flags |= CAM_PERIPH_INVALID;
689 periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND;
690 if (periph->periph_oninval != NULL)
691 periph->periph_oninval(periph);
692 cam_periph_release_locked(periph);
693 }
694
695 static void
camperiphfree(struct cam_periph * periph)696 camperiphfree(struct cam_periph *periph)
697 {
698 struct periph_driver **p_drv;
699 struct periph_driver *drv;
700
701 cam_periph_assert(periph, MA_OWNED);
702 KASSERT(periph->periph_allocating == 0, ("%s%d: freed while allocating",
703 periph->periph_name, periph->unit_number));
704 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
705 if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0)
706 break;
707 }
708 if (*p_drv == NULL) {
709 printf("camperiphfree: attempt to free non-existant periph\n");
710 return;
711 }
712 /*
713 * Cache a pointer to the periph_driver structure. If a
714 * periph_driver is added or removed from the array (see
715 * periphdriver_register()) while we drop the toplogy lock
716 * below, p_drv may change. This doesn't protect against this
717 * particular periph_driver going away. That will require full
718 * reference counting in the periph_driver infrastructure.
719 */
720 drv = *p_drv;
721
722 /*
723 * We need to set this flag before dropping the topology lock, to
724 * let anyone who is traversing the list that this peripheral is
725 * about to be freed, and there will be no more reference count
726 * checks.
727 */
728 periph->flags |= CAM_PERIPH_FREE;
729
730 /*
731 * The peripheral destructor semantics dictate calling with only the
732 * SIM mutex held. Since it might sleep, it should not be called
733 * with the topology lock held.
734 */
735 xpt_unlock_buses();
736
737 /*
738 * We need to call the peripheral destructor prior to removing the
739 * peripheral from the list. Otherwise, we risk running into a
740 * scenario where the peripheral unit number may get reused
741 * (because it has been removed from the list), but some resources
742 * used by the peripheral are still hanging around. In particular,
743 * the devfs nodes used by some peripherals like the pass(4) driver
744 * aren't fully cleaned up until the destructor is run. If the
745 * unit number is reused before the devfs instance is fully gone,
746 * devfs will panic.
747 */
748 if (periph->periph_dtor != NULL)
749 periph->periph_dtor(periph);
750
751 /*
752 * The peripheral list is protected by the topology lock. We have to
753 * remove the periph from the drv list before we call deferred_ac. The
754 * AC_FOUND_DEVICE callback won't create a new periph if it's still there.
755 */
756 xpt_lock_buses();
757
758 TAILQ_REMOVE(&drv->units, periph, unit_links);
759 drv->generation++;
760
761 xpt_remove_periph(periph);
762
763 xpt_unlock_buses();
764 if ((periph->flags & CAM_PERIPH_ANNOUNCED) && !rebooting)
765 xpt_print(periph->path, "Periph destroyed\n");
766 else
767 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n"));
768
769 if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) {
770 switch (periph->deferred_ac) {
771 case AC_FOUND_DEVICE: {
772 struct ccb_getdev cgd;
773
774 xpt_gdev_type(&cgd, periph->path);
775 periph->deferred_callback(NULL, periph->deferred_ac,
776 periph->path, &cgd);
777 break;
778 }
779 case AC_PATH_REGISTERED: {
780 struct ccb_pathinq cpi;
781
782 xpt_path_inq(&cpi, periph->path);
783 periph->deferred_callback(NULL, periph->deferred_ac,
784 periph->path, &cpi);
785 break;
786 }
787 default:
788 periph->deferred_callback(NULL, periph->deferred_ac,
789 periph->path, NULL);
790 break;
791 }
792 }
793 xpt_free_path(periph->path);
794 free(periph, M_CAMPERIPH);
795 xpt_lock_buses();
796 }
797
798 /*
799 * Map user virtual pointers into kernel virtual address space, so we can
800 * access the memory. This is now a generic function that centralizes most
801 * of the sanity checks on the data flags, if any.
802 * This also only works for up to maxphys memory. Since we use
803 * buffers to map stuff in and out, we're limited to the buffer size.
804 */
805 int
cam_periph_mapmem(union ccb * ccb,struct cam_periph_map_info * mapinfo,u_int maxmap)806 cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo,
807 u_int maxmap)
808 {
809 int numbufs, i;
810 uint8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
811 uint32_t lengths[CAM_PERIPH_MAXMAPS];
812 uint32_t dirs[CAM_PERIPH_MAXMAPS];
813
814 bzero(mapinfo, sizeof(*mapinfo));
815 if (maxmap == 0)
816 maxmap = DFLTPHYS; /* traditional default */
817 else if (maxmap > maxphys)
818 maxmap = maxphys; /* for safety */
819 switch(ccb->ccb_h.func_code) {
820 case XPT_DEV_MATCH:
821 if (ccb->cdm.match_buf_len == 0) {
822 printf("cam_periph_mapmem: invalid match buffer "
823 "length 0\n");
824 return(EINVAL);
825 }
826 if (ccb->cdm.pattern_buf_len > 0) {
827 data_ptrs[0] = (uint8_t **)&ccb->cdm.patterns;
828 lengths[0] = ccb->cdm.pattern_buf_len;
829 dirs[0] = CAM_DIR_OUT;
830 data_ptrs[1] = (uint8_t **)&ccb->cdm.matches;
831 lengths[1] = ccb->cdm.match_buf_len;
832 dirs[1] = CAM_DIR_IN;
833 numbufs = 2;
834 } else {
835 data_ptrs[0] = (uint8_t **)&ccb->cdm.matches;
836 lengths[0] = ccb->cdm.match_buf_len;
837 dirs[0] = CAM_DIR_IN;
838 numbufs = 1;
839 }
840 /*
841 * This request will not go to the hardware, no reason
842 * to be so strict. vmapbuf() is able to map up to maxphys.
843 */
844 maxmap = maxphys;
845 break;
846 case XPT_SCSI_IO:
847 case XPT_CONT_TARGET_IO:
848 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
849 return(0);
850 if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
851 return (EINVAL);
852 data_ptrs[0] = &ccb->csio.data_ptr;
853 lengths[0] = ccb->csio.dxfer_len;
854 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
855 numbufs = 1;
856 break;
857 case XPT_ATA_IO:
858 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
859 return(0);
860 if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
861 return (EINVAL);
862 data_ptrs[0] = &ccb->ataio.data_ptr;
863 lengths[0] = ccb->ataio.dxfer_len;
864 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
865 numbufs = 1;
866 break;
867 case XPT_MMC_IO:
868 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
869 return(0);
870 /* Two mappings: one for cmd->data and one for cmd->data->data */
871 data_ptrs[0] = (unsigned char **)&ccb->mmcio.cmd.data;
872 lengths[0] = sizeof(struct mmc_data *);
873 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
874 data_ptrs[1] = (unsigned char **)&ccb->mmcio.cmd.data->data;
875 lengths[1] = ccb->mmcio.cmd.data->len;
876 dirs[1] = ccb->ccb_h.flags & CAM_DIR_MASK;
877 numbufs = 2;
878 break;
879 case XPT_SMP_IO:
880 data_ptrs[0] = &ccb->smpio.smp_request;
881 lengths[0] = ccb->smpio.smp_request_len;
882 dirs[0] = CAM_DIR_OUT;
883 data_ptrs[1] = &ccb->smpio.smp_response;
884 lengths[1] = ccb->smpio.smp_response_len;
885 dirs[1] = CAM_DIR_IN;
886 numbufs = 2;
887 break;
888 case XPT_NVME_IO:
889 case XPT_NVME_ADMIN:
890 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
891 return (0);
892 if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
893 return (EINVAL);
894 data_ptrs[0] = &ccb->nvmeio.data_ptr;
895 lengths[0] = ccb->nvmeio.dxfer_len;
896 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
897 numbufs = 1;
898 break;
899 case XPT_DEV_ADVINFO:
900 if (ccb->cdai.bufsiz == 0)
901 return (0);
902
903 data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
904 lengths[0] = ccb->cdai.bufsiz;
905 dirs[0] = CAM_DIR_IN;
906 numbufs = 1;
907
908 /*
909 * This request will not go to the hardware, no reason
910 * to be so strict. vmapbuf() is able to map up to maxphys.
911 */
912 maxmap = maxphys;
913 break;
914 default:
915 return(EINVAL);
916 break; /* NOTREACHED */
917 }
918
919 /*
920 * Check the transfer length and permissions first, so we don't
921 * have to unmap any previously mapped buffers.
922 */
923 for (i = 0; i < numbufs; i++) {
924 if (lengths[i] > maxmap) {
925 printf("cam_periph_mapmem: attempt to map %lu bytes, "
926 "which is greater than %lu\n",
927 (long)(lengths[i]), (u_long)maxmap);
928 return (E2BIG);
929 }
930 }
931
932 for (i = 0; i < numbufs; i++) {
933 /* Save the user's data address. */
934 mapinfo->orig[i] = *data_ptrs[i];
935
936 /*
937 * For small buffers use malloc+copyin/copyout instead of
938 * mapping to KVA to avoid expensive TLB shootdowns. For
939 * small allocations malloc is backed by UMA, and so much
940 * cheaper on SMP systems.
941 */
942 if (lengths[i] <= periph_mapmem_thresh &&
943 ccb->ccb_h.func_code != XPT_MMC_IO) {
944 *data_ptrs[i] = malloc(lengths[i], M_CAMPERIPH,
945 M_WAITOK);
946 if (dirs[i] != CAM_DIR_IN) {
947 if (copyin(mapinfo->orig[i], *data_ptrs[i],
948 lengths[i]) != 0) {
949 free(*data_ptrs[i], M_CAMPERIPH);
950 *data_ptrs[i] = mapinfo->orig[i];
951 goto fail;
952 }
953 } else
954 bzero(*data_ptrs[i], lengths[i]);
955 continue;
956 }
957
958 /*
959 * Get the buffer.
960 */
961 mapinfo->bp[i] = uma_zalloc(pbuf_zone, M_WAITOK);
962
963 /* set the direction */
964 mapinfo->bp[i]->b_iocmd = (dirs[i] == CAM_DIR_OUT) ?
965 BIO_WRITE : BIO_READ;
966
967 /* Map the buffer into kernel memory. */
968 if (vmapbuf(mapinfo->bp[i], *data_ptrs[i], lengths[i], 1) < 0) {
969 uma_zfree(pbuf_zone, mapinfo->bp[i]);
970 goto fail;
971 }
972
973 /* set our pointer to the new mapped area */
974 *data_ptrs[i] = mapinfo->bp[i]->b_data;
975 }
976
977 /*
978 * Now that we've gotten this far, change ownership to the kernel
979 * of the buffers so that we don't run afoul of returning to user
980 * space with locks (on the buffer) held.
981 */
982 for (i = 0; i < numbufs; i++) {
983 if (mapinfo->bp[i])
984 BUF_KERNPROC(mapinfo->bp[i]);
985 }
986
987 mapinfo->num_bufs_used = numbufs;
988 return(0);
989
990 fail:
991 for (i--; i >= 0; i--) {
992 if (mapinfo->bp[i]) {
993 vunmapbuf(mapinfo->bp[i]);
994 uma_zfree(pbuf_zone, mapinfo->bp[i]);
995 } else
996 free(*data_ptrs[i], M_CAMPERIPH);
997 *data_ptrs[i] = mapinfo->orig[i];
998 }
999 return(EACCES);
1000 }
1001
1002 /*
1003 * Unmap memory segments mapped into kernel virtual address space by
1004 * cam_periph_mapmem().
1005 */
1006 int
cam_periph_unmapmem(union ccb * ccb,struct cam_periph_map_info * mapinfo)1007 cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
1008 {
1009 int error, numbufs, i;
1010 uint8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
1011 uint32_t lengths[CAM_PERIPH_MAXMAPS];
1012 uint32_t dirs[CAM_PERIPH_MAXMAPS];
1013
1014 if (mapinfo->num_bufs_used <= 0) {
1015 /* nothing to free and the process wasn't held. */
1016 return (0);
1017 }
1018
1019 switch (ccb->ccb_h.func_code) {
1020 case XPT_DEV_MATCH:
1021 if (ccb->cdm.pattern_buf_len > 0) {
1022 data_ptrs[0] = (uint8_t **)&ccb->cdm.patterns;
1023 lengths[0] = ccb->cdm.pattern_buf_len;
1024 dirs[0] = CAM_DIR_OUT;
1025 data_ptrs[1] = (uint8_t **)&ccb->cdm.matches;
1026 lengths[1] = ccb->cdm.match_buf_len;
1027 dirs[1] = CAM_DIR_IN;
1028 numbufs = 2;
1029 } else {
1030 data_ptrs[0] = (uint8_t **)&ccb->cdm.matches;
1031 lengths[0] = ccb->cdm.match_buf_len;
1032 dirs[0] = CAM_DIR_IN;
1033 numbufs = 1;
1034 }
1035 break;
1036 case XPT_SCSI_IO:
1037 case XPT_CONT_TARGET_IO:
1038 data_ptrs[0] = &ccb->csio.data_ptr;
1039 lengths[0] = ccb->csio.dxfer_len;
1040 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
1041 numbufs = 1;
1042 break;
1043 case XPT_ATA_IO:
1044 data_ptrs[0] = &ccb->ataio.data_ptr;
1045 lengths[0] = ccb->ataio.dxfer_len;
1046 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
1047 numbufs = 1;
1048 break;
1049 case XPT_MMC_IO:
1050 data_ptrs[0] = (uint8_t **)&ccb->mmcio.cmd.data;
1051 lengths[0] = sizeof(struct mmc_data *);
1052 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
1053 data_ptrs[1] = (uint8_t **)&ccb->mmcio.cmd.data->data;
1054 lengths[1] = ccb->mmcio.cmd.data->len;
1055 dirs[1] = ccb->ccb_h.flags & CAM_DIR_MASK;
1056 numbufs = 2;
1057 break;
1058 case XPT_SMP_IO:
1059 data_ptrs[0] = &ccb->smpio.smp_request;
1060 lengths[0] = ccb->smpio.smp_request_len;
1061 dirs[0] = CAM_DIR_OUT;
1062 data_ptrs[1] = &ccb->smpio.smp_response;
1063 lengths[1] = ccb->smpio.smp_response_len;
1064 dirs[1] = CAM_DIR_IN;
1065 numbufs = 2;
1066 break;
1067 case XPT_NVME_IO:
1068 case XPT_NVME_ADMIN:
1069 data_ptrs[0] = &ccb->nvmeio.data_ptr;
1070 lengths[0] = ccb->nvmeio.dxfer_len;
1071 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
1072 numbufs = 1;
1073 break;
1074 case XPT_DEV_ADVINFO:
1075 data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
1076 lengths[0] = ccb->cdai.bufsiz;
1077 dirs[0] = CAM_DIR_IN;
1078 numbufs = 1;
1079 break;
1080 default:
1081 numbufs = 0;
1082 break;
1083 }
1084
1085 error = 0;
1086 for (i = 0; i < numbufs; i++) {
1087 if (mapinfo->bp[i]) {
1088 /* unmap the buffer */
1089 vunmapbuf(mapinfo->bp[i]);
1090
1091 /* release the buffer */
1092 uma_zfree(pbuf_zone, mapinfo->bp[i]);
1093 } else {
1094 if (dirs[i] != CAM_DIR_OUT) {
1095 int error1;
1096
1097 error1 = copyout(*data_ptrs[i], mapinfo->orig[i],
1098 lengths[i]);
1099 if (error == 0)
1100 error = error1;
1101 }
1102 free(*data_ptrs[i], M_CAMPERIPH);
1103 }
1104
1105 /* Set the user's pointer back to the original value */
1106 *data_ptrs[i] = mapinfo->orig[i];
1107 }
1108
1109 return (error);
1110 }
1111
1112 int
cam_periph_ioctl(struct cam_periph * periph,u_long cmd,caddr_t addr,int (* error_routine)(union ccb * ccb,cam_flags camflags,uint32_t sense_flags))1113 cam_periph_ioctl(struct cam_periph *periph, u_long cmd, caddr_t addr,
1114 int (*error_routine)(union ccb *ccb,
1115 cam_flags camflags,
1116 uint32_t sense_flags))
1117 {
1118 union ccb *ccb;
1119 int error;
1120 int found;
1121
1122 error = found = 0;
1123
1124 switch(cmd){
1125 case CAMGETPASSTHRU_0x19:
1126 case CAMGETPASSTHRU:
1127 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
1128 xpt_setup_ccb(&ccb->ccb_h,
1129 ccb->ccb_h.path,
1130 CAM_PRIORITY_NORMAL);
1131 ccb->ccb_h.func_code = XPT_GDEVLIST;
1132
1133 /*
1134 * Basically, the point of this is that we go through
1135 * getting the list of devices, until we find a passthrough
1136 * device. In the current version of the CAM code, the
1137 * only way to determine what type of device we're dealing
1138 * with is by its name.
1139 */
1140 while (found == 0) {
1141 ccb->cgdl.index = 0;
1142 ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS;
1143 while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) {
1144 /* we want the next device in the list */
1145 xpt_action(ccb);
1146 if (strncmp(ccb->cgdl.periph_name,
1147 "pass", 4) == 0){
1148 found = 1;
1149 break;
1150 }
1151 }
1152 if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) &&
1153 (found == 0)) {
1154 ccb->cgdl.periph_name[0] = '\0';
1155 ccb->cgdl.unit_number = 0;
1156 break;
1157 }
1158 }
1159
1160 /* copy the result back out */
1161 bcopy(ccb, addr, sizeof(union ccb));
1162
1163 /* and release the ccb */
1164 xpt_release_ccb(ccb);
1165
1166 break;
1167 default:
1168 error = ENOTTY;
1169 break;
1170 }
1171 return(error);
1172 }
1173
1174 static void
cam_periph_done_panic(struct cam_periph * periph,union ccb * done_ccb)1175 cam_periph_done_panic(struct cam_periph *periph, union ccb *done_ccb)
1176 {
1177
1178 panic("%s: already done with ccb %p", __func__, done_ccb);
1179 }
1180
1181 static void
cam_periph_done(struct cam_periph * periph,union ccb * done_ccb)1182 cam_periph_done(struct cam_periph *periph, union ccb *done_ccb)
1183 {
1184
1185 /* Caller will release the CCB */
1186 xpt_path_assert(done_ccb->ccb_h.path, MA_OWNED);
1187 done_ccb->ccb_h.cbfcnp = cam_periph_done_panic;
1188 wakeup(&done_ccb->ccb_h.cbfcnp);
1189 }
1190
1191 static void
cam_periph_ccbwait(union ccb * ccb)1192 cam_periph_ccbwait(union ccb *ccb)
1193 {
1194
1195 if ((ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
1196 while (ccb->ccb_h.cbfcnp != cam_periph_done_panic)
1197 xpt_path_sleep(ccb->ccb_h.path, &ccb->ccb_h.cbfcnp,
1198 PRIBIO, "cbwait", 0);
1199 }
1200 KASSERT(ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX &&
1201 (ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG,
1202 ("%s: proceeding with incomplete ccb: ccb=%p, func_code=%#x, "
1203 "status=%#x, index=%d", __func__, ccb, ccb->ccb_h.func_code,
1204 ccb->ccb_h.status, ccb->ccb_h.pinfo.index));
1205 }
1206
1207 /*
1208 * Dispatch a CCB and wait for it to complete. If the CCB has set a
1209 * callback function (ccb->ccb_h.cbfcnp), it will be overwritten and lost.
1210 */
1211 int
cam_periph_runccb(union ccb * ccb,int (* error_routine)(union ccb * ccb,cam_flags camflags,uint32_t sense_flags),cam_flags camflags,uint32_t sense_flags,struct devstat * ds)1212 cam_periph_runccb(union ccb *ccb,
1213 int (*error_routine)(union ccb *ccb,
1214 cam_flags camflags,
1215 uint32_t sense_flags),
1216 cam_flags camflags, uint32_t sense_flags,
1217 struct devstat *ds)
1218 {
1219 struct bintime *starttime;
1220 struct bintime ltime;
1221 int error;
1222 bool must_poll;
1223 uint32_t timeout = 1;
1224
1225 starttime = NULL;
1226 xpt_path_assert(ccb->ccb_h.path, MA_OWNED);
1227 KASSERT((ccb->ccb_h.flags & CAM_UNLOCKED) == 0,
1228 ("%s: ccb=%p, func_code=%#x, flags=%#x", __func__, ccb,
1229 ccb->ccb_h.func_code, ccb->ccb_h.flags));
1230
1231 /*
1232 * If the user has supplied a stats structure, and if we understand
1233 * this particular type of ccb, record the transaction start.
1234 */
1235 if (ds != NULL &&
1236 (ccb->ccb_h.func_code == XPT_SCSI_IO ||
1237 ccb->ccb_h.func_code == XPT_ATA_IO ||
1238 ccb->ccb_h.func_code == XPT_NVME_IO)) {
1239 starttime = <ime;
1240 binuptime(starttime);
1241 devstat_start_transaction(ds, starttime);
1242 }
1243
1244 /*
1245 * We must poll the I/O while we're dumping. The scheduler is normally
1246 * stopped for dumping, except when we call doadump from ddb. While the
1247 * scheduler is running in this case, we still need to poll the I/O to
1248 * avoid sleeping waiting for the ccb to complete.
1249 *
1250 * A panic triggered dump stops the scheduler, any callback from the
1251 * shutdown_post_sync event will run with the scheduler stopped, but
1252 * before we're officially dumping. To avoid hanging in adashutdown
1253 * initiated commands (or other similar situations), we have to test for
1254 * either dumping or SCHEDULER_STOPPED() here.
1255 *
1256 * To avoid locking problems, dumping/polling callers must call
1257 * without a periph lock held.
1258 */
1259 must_poll = dumping || SCHEDULER_STOPPED();
1260 ccb->ccb_h.cbfcnp = cam_periph_done;
1261
1262 /*
1263 * If we're polling, then we need to ensure that we have ample resources
1264 * in the periph. cam_periph_error can reschedule the ccb by calling
1265 * xpt_action and returning ERESTART, so we have to effect the polling
1266 * in the do loop below.
1267 */
1268 if (must_poll) {
1269 if (cam_sim_pollable(ccb->ccb_h.path->bus->sim))
1270 timeout = xpt_poll_setup(ccb);
1271 else
1272 timeout = 0;
1273 }
1274
1275 if (timeout == 0) {
1276 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
1277 error = EBUSY;
1278 } else {
1279 xpt_action(ccb);
1280 do {
1281 if (must_poll) {
1282 xpt_pollwait(ccb, timeout);
1283 timeout = ccb->ccb_h.timeout * 10;
1284 } else {
1285 cam_periph_ccbwait(ccb);
1286 }
1287 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
1288 error = 0;
1289 else if (error_routine != NULL) {
1290 /*
1291 * cbfcnp is modified by cam_periph_ccbwait so
1292 * reset it before we call the error routine
1293 * which may call xpt_done.
1294 */
1295 ccb->ccb_h.cbfcnp = cam_periph_done;
1296 error = (*error_routine)(ccb, camflags, sense_flags);
1297 } else
1298 error = 0;
1299 } while (error == ERESTART);
1300 }
1301
1302 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1303 cam_release_devq(ccb->ccb_h.path,
1304 /* relsim_flags */0,
1305 /* openings */0,
1306 /* timeout */0,
1307 /* getcount_only */ FALSE);
1308 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1309 }
1310
1311 if (ds != NULL) {
1312 uint32_t bytes;
1313 devstat_tag_type tag;
1314 bool valid = true;
1315
1316 if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
1317 bytes = ccb->csio.dxfer_len - ccb->csio.resid;
1318 tag = (devstat_tag_type)(ccb->csio.tag_action & 0x3);
1319 } else if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1320 bytes = ccb->ataio.dxfer_len - ccb->ataio.resid;
1321 tag = (devstat_tag_type)0;
1322 } else if (ccb->ccb_h.func_code == XPT_NVME_IO) {
1323 bytes = ccb->nvmeio.dxfer_len; /* NB: resid no possible */
1324 tag = (devstat_tag_type)0;
1325 } else {
1326 valid = false;
1327 }
1328 if (valid)
1329 devstat_end_transaction(ds, bytes, tag,
1330 ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) ?
1331 DEVSTAT_NO_DATA : (ccb->ccb_h.flags & CAM_DIR_OUT) ?
1332 DEVSTAT_WRITE : DEVSTAT_READ, NULL, starttime);
1333 }
1334
1335 return(error);
1336 }
1337
1338 void
cam_freeze_devq(struct cam_path * path)1339 cam_freeze_devq(struct cam_path *path)
1340 {
1341 struct ccb_hdr ccb_h;
1342
1343 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("cam_freeze_devq\n"));
1344 memset(&ccb_h, 0, sizeof(ccb_h));
1345 xpt_setup_ccb(&ccb_h, path, /*priority*/1);
1346 ccb_h.func_code = XPT_NOOP;
1347 ccb_h.flags = CAM_DEV_QFREEZE;
1348 xpt_action((union ccb *)&ccb_h);
1349 }
1350
1351 uint32_t
cam_release_devq(struct cam_path * path,uint32_t relsim_flags,uint32_t openings,uint32_t arg,int getcount_only)1352 cam_release_devq(struct cam_path *path, uint32_t relsim_flags,
1353 uint32_t openings, uint32_t arg,
1354 int getcount_only)
1355 {
1356 struct ccb_relsim crs;
1357
1358 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("cam_release_devq(%u, %u, %u, %d)\n",
1359 relsim_flags, openings, arg, getcount_only));
1360 memset(&crs, 0, sizeof(crs));
1361 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
1362 crs.ccb_h.func_code = XPT_REL_SIMQ;
1363 crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0;
1364 crs.release_flags = relsim_flags;
1365 crs.openings = openings;
1366 crs.release_timeout = arg;
1367 xpt_action((union ccb *)&crs);
1368 return (crs.qfrozen_cnt);
1369 }
1370
1371 #define saved_ccb_ptr ppriv_ptr0
1372 static void
camperiphdone(struct cam_periph * periph,union ccb * done_ccb)1373 camperiphdone(struct cam_periph *periph, union ccb *done_ccb)
1374 {
1375 union ccb *saved_ccb;
1376 cam_status status;
1377 struct scsi_start_stop_unit *scsi_cmd;
1378 int error = 0, error_code, sense_key, asc, ascq;
1379 uint16_t done_flags;
1380
1381 scsi_cmd = (struct scsi_start_stop_unit *)
1382 &done_ccb->csio.cdb_io.cdb_bytes;
1383 status = done_ccb->ccb_h.status;
1384
1385 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1386 if (scsi_extract_sense_ccb(done_ccb,
1387 &error_code, &sense_key, &asc, &ascq)) {
1388 /*
1389 * If the error is "invalid field in CDB",
1390 * and the load/eject flag is set, turn the
1391 * flag off and try again. This is just in
1392 * case the drive in question barfs on the
1393 * load eject flag. The CAM code should set
1394 * the load/eject flag by default for
1395 * removable media.
1396 */
1397 if ((scsi_cmd->opcode == START_STOP_UNIT) &&
1398 ((scsi_cmd->how & SSS_LOEJ) != 0) &&
1399 (asc == 0x24) && (ascq == 0x00)) {
1400 scsi_cmd->how &= ~SSS_LOEJ;
1401 if (status & CAM_DEV_QFRZN) {
1402 cam_release_devq(done_ccb->ccb_h.path,
1403 0, 0, 0, 0);
1404 done_ccb->ccb_h.status &=
1405 ~CAM_DEV_QFRZN;
1406 }
1407 xpt_action(done_ccb);
1408 goto out;
1409 }
1410 }
1411 error = cam_periph_error(done_ccb, 0,
1412 SF_RETRY_UA | SF_NO_PRINT);
1413 if (error == ERESTART)
1414 goto out;
1415 if (done_ccb->ccb_h.status & CAM_DEV_QFRZN) {
1416 cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0);
1417 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1418 }
1419 } else {
1420 /*
1421 * If we have successfully taken a device from the not
1422 * ready to ready state, re-scan the device and re-get
1423 * the inquiry information. Many devices (mostly disks)
1424 * don't properly report their inquiry information unless
1425 * they are spun up.
1426 */
1427 if (scsi_cmd->opcode == START_STOP_UNIT)
1428 xpt_async(AC_INQ_CHANGED, done_ccb->ccb_h.path, NULL);
1429 }
1430
1431 /* If we tried long wait and still failed, remember that. */
1432 if ((periph->flags & CAM_PERIPH_RECOVERY_WAIT) &&
1433 (done_ccb->csio.cdb_io.cdb_bytes[0] == TEST_UNIT_READY)) {
1434 periph->flags &= ~CAM_PERIPH_RECOVERY_WAIT;
1435 if (error != 0 && done_ccb->ccb_h.retry_count == 0)
1436 periph->flags |= CAM_PERIPH_RECOVERY_WAIT_FAILED;
1437 }
1438
1439 /*
1440 * After recovery action(s) completed, return to the original CCB.
1441 * If the recovery CCB has failed, considering its own possible
1442 * retries and recovery, assume we are back in state where we have
1443 * been originally, but without recovery hopes left. In such case,
1444 * after the final attempt below, we cancel any further retries,
1445 * blocking by that also any new recovery attempts for this CCB,
1446 * and the result will be the final one returned to the CCB owher.
1447 */
1448 saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr;
1449 KASSERT(saved_ccb->ccb_h.func_code == XPT_SCSI_IO,
1450 ("%s: saved_ccb func_code %#x != XPT_SCSI_IO",
1451 __func__, saved_ccb->ccb_h.func_code));
1452 KASSERT(done_ccb->ccb_h.func_code == XPT_SCSI_IO,
1453 ("%s: done_ccb func_code %#x != XPT_SCSI_IO",
1454 __func__, done_ccb->ccb_h.func_code));
1455 saved_ccb->ccb_h.periph_links = done_ccb->ccb_h.periph_links;
1456 done_flags = done_ccb->ccb_h.alloc_flags;
1457 bcopy(saved_ccb, done_ccb, sizeof(struct ccb_scsiio));
1458 done_ccb->ccb_h.alloc_flags = done_flags;
1459 xpt_free_ccb(saved_ccb);
1460 if (done_ccb->ccb_h.cbfcnp != camperiphdone)
1461 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1462 if (error != 0)
1463 done_ccb->ccb_h.retry_count = 0;
1464 xpt_action(done_ccb);
1465
1466 out:
1467 /* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */
1468 cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0);
1469 }
1470
1471 /*
1472 * Generic Async Event handler. Peripheral drivers usually
1473 * filter out the events that require personal attention,
1474 * and leave the rest to this function.
1475 */
1476 void
cam_periph_async(struct cam_periph * periph,uint32_t code,struct cam_path * path,void * arg)1477 cam_periph_async(struct cam_periph *periph, uint32_t code,
1478 struct cam_path *path, void *arg)
1479 {
1480 switch (code) {
1481 case AC_LOST_DEVICE:
1482 cam_periph_invalidate(periph);
1483 break;
1484 default:
1485 break;
1486 }
1487 }
1488
1489 void
cam_periph_bus_settle(struct cam_periph * periph,u_int bus_settle)1490 cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle)
1491 {
1492 struct ccb_getdevstats cgds;
1493
1494 memset(&cgds, 0, sizeof(cgds));
1495 xpt_setup_ccb(&cgds.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1496 cgds.ccb_h.func_code = XPT_GDEV_STATS;
1497 xpt_action((union ccb *)&cgds);
1498 cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle);
1499 }
1500
1501 void
cam_periph_freeze_after_event(struct cam_periph * periph,struct timeval * event_time,u_int duration_ms)1502 cam_periph_freeze_after_event(struct cam_periph *periph,
1503 struct timeval* event_time, u_int duration_ms)
1504 {
1505 struct timeval delta;
1506 struct timeval duration_tv;
1507
1508 if (!timevalisset(event_time))
1509 return;
1510
1511 microtime(&delta);
1512 timevalsub(&delta, event_time);
1513 duration_tv.tv_sec = duration_ms / 1000;
1514 duration_tv.tv_usec = (duration_ms % 1000) * 1000;
1515 if (timevalcmp(&delta, &duration_tv, <)) {
1516 timevalsub(&duration_tv, &delta);
1517
1518 duration_ms = duration_tv.tv_sec * 1000;
1519 duration_ms += duration_tv.tv_usec / 1000;
1520 cam_freeze_devq(periph->path);
1521 cam_release_devq(periph->path,
1522 RELSIM_RELEASE_AFTER_TIMEOUT,
1523 /*reduction*/0,
1524 /*timeout*/duration_ms,
1525 /*getcount_only*/0);
1526 }
1527
1528 }
1529
1530 static int
camperiphscsistatuserror(union ccb * ccb,union ccb ** orig_ccb,cam_flags camflags,uint32_t sense_flags,int * openings,uint32_t * relsim_flags,uint32_t * timeout,uint32_t * action,const char ** action_string)1531 camperiphscsistatuserror(union ccb *ccb, union ccb **orig_ccb,
1532 cam_flags camflags, uint32_t sense_flags,
1533 int *openings, uint32_t *relsim_flags,
1534 uint32_t *timeout, uint32_t *action, const char **action_string)
1535 {
1536 struct cam_periph *periph;
1537 int error;
1538
1539 switch (ccb->csio.scsi_status) {
1540 case SCSI_STATUS_OK:
1541 case SCSI_STATUS_COND_MET:
1542 case SCSI_STATUS_INTERMED:
1543 case SCSI_STATUS_INTERMED_COND_MET:
1544 error = 0;
1545 break;
1546 case SCSI_STATUS_CMD_TERMINATED:
1547 case SCSI_STATUS_CHECK_COND:
1548 error = camperiphscsisenseerror(ccb, orig_ccb,
1549 camflags,
1550 sense_flags,
1551 openings,
1552 relsim_flags,
1553 timeout,
1554 action,
1555 action_string);
1556 break;
1557 case SCSI_STATUS_QUEUE_FULL:
1558 {
1559 /* no decrement */
1560 struct ccb_getdevstats cgds;
1561
1562 /*
1563 * First off, find out what the current
1564 * transaction counts are.
1565 */
1566 memset(&cgds, 0, sizeof(cgds));
1567 xpt_setup_ccb(&cgds.ccb_h,
1568 ccb->ccb_h.path,
1569 CAM_PRIORITY_NORMAL);
1570 cgds.ccb_h.func_code = XPT_GDEV_STATS;
1571 xpt_action((union ccb *)&cgds);
1572
1573 /*
1574 * If we were the only transaction active, treat
1575 * the QUEUE FULL as if it were a BUSY condition.
1576 */
1577 if (cgds.dev_active != 0) {
1578 int total_openings;
1579
1580 /*
1581 * Reduce the number of openings to
1582 * be 1 less than the amount it took
1583 * to get a queue full bounded by the
1584 * minimum allowed tag count for this
1585 * device.
1586 */
1587 total_openings = cgds.dev_active + cgds.dev_openings;
1588 *openings = cgds.dev_active;
1589 if (*openings < cgds.mintags)
1590 *openings = cgds.mintags;
1591 if (*openings < total_openings)
1592 *relsim_flags = RELSIM_ADJUST_OPENINGS;
1593 else {
1594 /*
1595 * Some devices report queue full for
1596 * temporary resource shortages. For
1597 * this reason, we allow a minimum
1598 * tag count to be entered via a
1599 * quirk entry to prevent the queue
1600 * count on these devices from falling
1601 * to a pessimisticly low value. We
1602 * still wait for the next successful
1603 * completion, however, before queueing
1604 * more transactions to the device.
1605 */
1606 *relsim_flags = RELSIM_RELEASE_AFTER_CMDCMPLT;
1607 }
1608 *timeout = 0;
1609 error = ERESTART;
1610 *action &= ~SSQ_PRINT_SENSE;
1611 break;
1612 }
1613 /* FALLTHROUGH */
1614 }
1615 case SCSI_STATUS_BUSY:
1616 /*
1617 * Restart the queue after either another
1618 * command completes or a 1 second timeout.
1619 */
1620 periph = xpt_path_periph(ccb->ccb_h.path);
1621 if (periph->flags & CAM_PERIPH_INVALID) {
1622 error = ENXIO;
1623 *action_string = "Periph was invalidated";
1624 } else if ((sense_flags & SF_RETRY_BUSY) != 0 ||
1625 ccb->ccb_h.retry_count > 0) {
1626 if ((sense_flags & SF_RETRY_BUSY) == 0)
1627 ccb->ccb_h.retry_count--;
1628 error = ERESTART;
1629 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT
1630 | RELSIM_RELEASE_AFTER_CMDCMPLT;
1631 *timeout = 1000;
1632 } else {
1633 error = EIO;
1634 *action_string = "Retries exhausted";
1635 }
1636 break;
1637 case SCSI_STATUS_RESERV_CONFLICT:
1638 default:
1639 error = EIO;
1640 break;
1641 }
1642 return (error);
1643 }
1644
1645 static int
camperiphscsisenseerror(union ccb * ccb,union ccb ** orig,cam_flags camflags,uint32_t sense_flags,int * openings,uint32_t * relsim_flags,uint32_t * timeout,uint32_t * action,const char ** action_string)1646 camperiphscsisenseerror(union ccb *ccb, union ccb **orig,
1647 cam_flags camflags, uint32_t sense_flags,
1648 int *openings, uint32_t *relsim_flags,
1649 uint32_t *timeout, uint32_t *action, const char **action_string)
1650 {
1651 struct cam_periph *periph;
1652 union ccb *orig_ccb = ccb;
1653 int error, recoveryccb;
1654 uint16_t flags;
1655
1656 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
1657 if (ccb->ccb_h.func_code == XPT_SCSI_IO && ccb->csio.bio != NULL)
1658 biotrack(ccb->csio.bio, __func__);
1659 #endif
1660
1661 periph = xpt_path_periph(ccb->ccb_h.path);
1662 recoveryccb = (ccb->ccb_h.cbfcnp == camperiphdone);
1663 if ((periph->flags & CAM_PERIPH_RECOVERY_INPROG) && !recoveryccb) {
1664 /*
1665 * If error recovery is already in progress, don't attempt
1666 * to process this error, but requeue it unconditionally
1667 * and attempt to process it once error recovery has
1668 * completed. This failed command is probably related to
1669 * the error that caused the currently active error recovery
1670 * action so our current recovery efforts should also
1671 * address this command. Be aware that the error recovery
1672 * code assumes that only one recovery action is in progress
1673 * on a particular peripheral instance at any given time
1674 * (e.g. only one saved CCB for error recovery) so it is
1675 * imperitive that we don't violate this assumption.
1676 */
1677 error = ERESTART;
1678 *action &= ~SSQ_PRINT_SENSE;
1679 } else {
1680 scsi_sense_action err_action;
1681 struct ccb_getdev cgd;
1682
1683 /*
1684 * Grab the inquiry data for this device.
1685 */
1686 xpt_gdev_type(&cgd, ccb->ccb_h.path);
1687
1688 err_action = scsi_error_action(&ccb->csio, &cgd.inq_data,
1689 sense_flags);
1690 error = err_action & SS_ERRMASK;
1691
1692 /*
1693 * Do not autostart sequential access devices
1694 * to avoid unexpected tape loading.
1695 */
1696 if ((err_action & SS_MASK) == SS_START &&
1697 SID_TYPE(&cgd.inq_data) == T_SEQUENTIAL) {
1698 *action_string = "Will not autostart a "
1699 "sequential access device";
1700 goto sense_error_done;
1701 }
1702
1703 /*
1704 * Avoid recovery recursion if recovery action is the same.
1705 */
1706 if ((err_action & SS_MASK) >= SS_START && recoveryccb) {
1707 if (((err_action & SS_MASK) == SS_START &&
1708 ccb->csio.cdb_io.cdb_bytes[0] == START_STOP_UNIT) ||
1709 ((err_action & SS_MASK) == SS_TUR &&
1710 (ccb->csio.cdb_io.cdb_bytes[0] == TEST_UNIT_READY))) {
1711 err_action = SS_RETRY|SSQ_DECREMENT_COUNT|EIO;
1712 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1713 *timeout = 500;
1714 }
1715 }
1716
1717 /*
1718 * If the recovery action will consume a retry,
1719 * make sure we actually have retries available.
1720 */
1721 if ((err_action & SSQ_DECREMENT_COUNT) != 0) {
1722 if (ccb->ccb_h.retry_count > 0 &&
1723 (periph->flags & CAM_PERIPH_INVALID) == 0)
1724 ccb->ccb_h.retry_count--;
1725 else {
1726 *action_string = "Retries exhausted";
1727 goto sense_error_done;
1728 }
1729 }
1730
1731 if ((err_action & SS_MASK) >= SS_START) {
1732 /*
1733 * Do common portions of commands that
1734 * use recovery CCBs.
1735 */
1736 orig_ccb = xpt_alloc_ccb_nowait();
1737 if (orig_ccb == NULL) {
1738 *action_string = "Can't allocate recovery CCB";
1739 goto sense_error_done;
1740 }
1741 /*
1742 * Clear freeze flag for original request here, as
1743 * this freeze will be dropped as part of ERESTART.
1744 */
1745 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1746
1747 KASSERT(ccb->ccb_h.func_code == XPT_SCSI_IO,
1748 ("%s: ccb func_code %#x != XPT_SCSI_IO",
1749 __func__, ccb->ccb_h.func_code));
1750 flags = orig_ccb->ccb_h.alloc_flags;
1751 bcopy(ccb, orig_ccb, sizeof(struct ccb_scsiio));
1752 orig_ccb->ccb_h.alloc_flags = flags;
1753 }
1754
1755 switch (err_action & SS_MASK) {
1756 case SS_NOP:
1757 *action_string = "No recovery action needed";
1758 error = 0;
1759 break;
1760 case SS_RETRY:
1761 *action_string = "Retrying command (per sense data)";
1762 error = ERESTART;
1763 break;
1764 case SS_FAIL:
1765 *action_string = "Unretryable error";
1766 break;
1767 case SS_START:
1768 {
1769 int le;
1770
1771 /*
1772 * Send a start unit command to the device, and
1773 * then retry the command.
1774 */
1775 *action_string = "Attempting to start unit";
1776 periph->flags |= CAM_PERIPH_RECOVERY_INPROG;
1777
1778 /*
1779 * Check for removable media and set
1780 * load/eject flag appropriately.
1781 */
1782 if (SID_IS_REMOVABLE(&cgd.inq_data))
1783 le = TRUE;
1784 else
1785 le = FALSE;
1786
1787 scsi_start_stop(&ccb->csio,
1788 /*retries*/1,
1789 camperiphdone,
1790 MSG_SIMPLE_Q_TAG,
1791 /*start*/TRUE,
1792 /*load/eject*/le,
1793 /*immediate*/FALSE,
1794 SSD_FULL_SIZE,
1795 /*timeout*/50000);
1796 break;
1797 }
1798 case SS_TUR:
1799 {
1800 /*
1801 * Send a Test Unit Ready to the device.
1802 * If the 'many' flag is set, we send 120
1803 * test unit ready commands, one every half
1804 * second. Otherwise, we just send one TUR.
1805 * We only want to do this if the retry
1806 * count has not been exhausted.
1807 */
1808 int retries;
1809
1810 if ((err_action & SSQ_MANY) != 0 && (periph->flags &
1811 CAM_PERIPH_RECOVERY_WAIT_FAILED) == 0) {
1812 periph->flags |= CAM_PERIPH_RECOVERY_WAIT;
1813 *action_string = "Polling device for readiness";
1814 retries = 120;
1815 } else {
1816 *action_string = "Testing device for readiness";
1817 retries = 1;
1818 }
1819 periph->flags |= CAM_PERIPH_RECOVERY_INPROG;
1820 scsi_test_unit_ready(&ccb->csio,
1821 retries,
1822 camperiphdone,
1823 MSG_SIMPLE_Q_TAG,
1824 SSD_FULL_SIZE,
1825 /*timeout*/5000);
1826
1827 /*
1828 * Accomplish our 500ms delay by deferring
1829 * the release of our device queue appropriately.
1830 */
1831 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1832 *timeout = 500;
1833 break;
1834 }
1835 default:
1836 panic("Unhandled error action %x", err_action);
1837 }
1838
1839 if ((err_action & SS_MASK) >= SS_START) {
1840 /*
1841 * Drop the priority, so that the recovery
1842 * CCB is the first to execute. Freeze the queue
1843 * after this command is sent so that we can
1844 * restore the old csio and have it queued in
1845 * the proper order before we release normal
1846 * transactions to the device.
1847 */
1848 ccb->ccb_h.pinfo.priority--;
1849 ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1850 ccb->ccb_h.saved_ccb_ptr = orig_ccb;
1851 error = ERESTART;
1852 *orig = orig_ccb;
1853 }
1854
1855 sense_error_done:
1856 *action = err_action;
1857 }
1858 return (error);
1859 }
1860
1861 /*
1862 * Generic error handler. Peripheral drivers usually filter
1863 * out the errors that they handle in a unique manner, then
1864 * call this function.
1865 */
1866 int
cam_periph_error(union ccb * ccb,cam_flags camflags,uint32_t sense_flags)1867 cam_periph_error(union ccb *ccb, cam_flags camflags,
1868 uint32_t sense_flags)
1869 {
1870 struct cam_path *newpath;
1871 union ccb *orig_ccb, *scan_ccb;
1872 struct cam_periph *periph;
1873 const char *action_string;
1874 cam_status status;
1875 bool frozen;
1876 int error, openings, devctl_err;
1877 uint32_t action, relsim_flags, timeout;
1878
1879 action = SSQ_PRINT_SENSE;
1880 periph = xpt_path_periph(ccb->ccb_h.path);
1881 action_string = NULL;
1882 status = ccb->ccb_h.status;
1883 frozen = (status & CAM_DEV_QFRZN) != 0;
1884 status &= CAM_STATUS_MASK;
1885 devctl_err = openings = relsim_flags = timeout = 0;
1886 orig_ccb = ccb;
1887
1888 /* Filter the errors that should be reported via devctl */
1889 switch (ccb->ccb_h.status & CAM_STATUS_MASK) {
1890 case CAM_CMD_TIMEOUT:
1891 case CAM_REQ_ABORTED:
1892 case CAM_REQ_CMP_ERR:
1893 case CAM_REQ_TERMIO:
1894 case CAM_UNREC_HBA_ERROR:
1895 case CAM_DATA_RUN_ERR:
1896 case CAM_SCSI_STATUS_ERROR:
1897 case CAM_ATA_STATUS_ERROR:
1898 case CAM_SMP_STATUS_ERROR:
1899 case CAM_DEV_NOT_THERE:
1900 case CAM_NVME_STATUS_ERROR:
1901 devctl_err++;
1902 break;
1903 default:
1904 break;
1905 }
1906
1907 switch (status) {
1908 case CAM_REQ_CMP:
1909 error = 0;
1910 action &= ~SSQ_PRINT_SENSE;
1911 break;
1912 case CAM_SCSI_STATUS_ERROR:
1913 error = camperiphscsistatuserror(ccb, &orig_ccb,
1914 camflags, sense_flags, &openings, &relsim_flags,
1915 &timeout, &action, &action_string);
1916 break;
1917 case CAM_AUTOSENSE_FAIL:
1918 error = EIO; /* we have to kill the command */
1919 break;
1920 case CAM_UA_ABORT:
1921 case CAM_UA_TERMIO:
1922 case CAM_MSG_REJECT_REC:
1923 /* XXX Don't know that these are correct */
1924 error = EIO;
1925 break;
1926 case CAM_SEL_TIMEOUT:
1927 if ((camflags & CAM_RETRY_SELTO) != 0) {
1928 if (ccb->ccb_h.retry_count > 0 &&
1929 (periph->flags & CAM_PERIPH_INVALID) == 0) {
1930 ccb->ccb_h.retry_count--;
1931 error = ERESTART;
1932
1933 /*
1934 * Wait a bit to give the device
1935 * time to recover before we try again.
1936 */
1937 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1938 timeout = periph_selto_delay;
1939 break;
1940 }
1941 action_string = "Retries exhausted";
1942 }
1943 /* FALLTHROUGH */
1944 case CAM_DEV_NOT_THERE:
1945 error = ENXIO;
1946 action = SSQ_LOST;
1947 break;
1948 case CAM_REQ_INVALID:
1949 case CAM_PATH_INVALID:
1950 case CAM_NO_HBA:
1951 case CAM_PROVIDE_FAIL:
1952 case CAM_REQ_TOO_BIG:
1953 case CAM_LUN_INVALID:
1954 case CAM_TID_INVALID:
1955 case CAM_FUNC_NOTAVAIL:
1956 error = EINVAL;
1957 break;
1958 case CAM_SCSI_BUS_RESET:
1959 case CAM_BDR_SENT:
1960 /*
1961 * Commands that repeatedly timeout and cause these
1962 * kinds of error recovery actions, should return
1963 * CAM_CMD_TIMEOUT, which allows us to safely assume
1964 * that this command was an innocent bystander to
1965 * these events and should be unconditionally
1966 * retried.
1967 */
1968 case CAM_REQUEUE_REQ:
1969 /* Unconditional requeue if device is still there */
1970 if (periph->flags & CAM_PERIPH_INVALID) {
1971 action_string = "Periph was invalidated";
1972 error = ENXIO;
1973 } else if (sense_flags & SF_NO_RETRY) {
1974 error = EIO;
1975 action_string = "Retry was blocked";
1976 } else {
1977 error = ERESTART;
1978 action &= ~SSQ_PRINT_SENSE;
1979 }
1980 break;
1981 case CAM_RESRC_UNAVAIL:
1982 /* Wait a bit for the resource shortage to abate. */
1983 timeout = periph_noresrc_delay;
1984 /* FALLTHROUGH */
1985 case CAM_BUSY:
1986 if (timeout == 0) {
1987 /* Wait a bit for the busy condition to abate. */
1988 timeout = periph_busy_delay;
1989 }
1990 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1991 /* FALLTHROUGH */
1992 case CAM_ATA_STATUS_ERROR:
1993 case CAM_NVME_STATUS_ERROR:
1994 case CAM_SMP_STATUS_ERROR:
1995 case CAM_REQ_CMP_ERR:
1996 case CAM_CMD_TIMEOUT:
1997 case CAM_UNEXP_BUSFREE:
1998 case CAM_UNCOR_PARITY:
1999 case CAM_DATA_RUN_ERR:
2000 default:
2001 if (periph->flags & CAM_PERIPH_INVALID) {
2002 error = ENXIO;
2003 action_string = "Periph was invalidated";
2004 } else if (ccb->ccb_h.retry_count == 0) {
2005 error = EIO;
2006 action_string = "Retries exhausted";
2007 } else if (sense_flags & SF_NO_RETRY) {
2008 error = EIO;
2009 action_string = "Retry was blocked";
2010 } else {
2011 ccb->ccb_h.retry_count--;
2012 error = ERESTART;
2013 }
2014 break;
2015 }
2016
2017 if ((sense_flags & SF_PRINT_ALWAYS) ||
2018 CAM_DEBUGGED(ccb->ccb_h.path, CAM_DEBUG_INFO))
2019 action |= SSQ_PRINT_SENSE;
2020 else if (sense_flags & SF_NO_PRINT)
2021 action &= ~SSQ_PRINT_SENSE;
2022 if ((action & SSQ_PRINT_SENSE) != 0)
2023 cam_error_print(orig_ccb, CAM_ESF_ALL, CAM_EPF_ALL);
2024 if (error != 0 && (action & SSQ_PRINT_SENSE) != 0) {
2025 if (error != ERESTART) {
2026 if (action_string == NULL)
2027 action_string = "Unretryable error";
2028 xpt_print(ccb->ccb_h.path, "Error %d, %s\n",
2029 error, action_string);
2030 } else if (action_string != NULL)
2031 xpt_print(ccb->ccb_h.path, "%s\n", action_string);
2032 else {
2033 xpt_print(ccb->ccb_h.path,
2034 "Retrying command, %d more tries remain\n",
2035 ccb->ccb_h.retry_count);
2036 }
2037 }
2038
2039 if (devctl_err && (error != 0 || (action & SSQ_PRINT_SENSE) != 0))
2040 cam_periph_devctl_notify(orig_ccb);
2041
2042 if ((action & SSQ_LOST) != 0) {
2043 lun_id_t lun_id;
2044
2045 /*
2046 * For a selection timeout, we consider all of the LUNs on
2047 * the target to be gone. If the status is CAM_DEV_NOT_THERE,
2048 * then we only get rid of the device(s) specified by the
2049 * path in the original CCB.
2050 */
2051 if (status == CAM_SEL_TIMEOUT)
2052 lun_id = CAM_LUN_WILDCARD;
2053 else
2054 lun_id = xpt_path_lun_id(ccb->ccb_h.path);
2055
2056 /* Should we do more if we can't create the path?? */
2057 if (xpt_create_path(&newpath, periph,
2058 xpt_path_path_id(ccb->ccb_h.path),
2059 xpt_path_target_id(ccb->ccb_h.path),
2060 lun_id) == CAM_REQ_CMP) {
2061 /*
2062 * Let peripheral drivers know that this
2063 * device has gone away.
2064 */
2065 xpt_async(AC_LOST_DEVICE, newpath, NULL);
2066 xpt_free_path(newpath);
2067 }
2068 }
2069
2070 /* Broadcast UNIT ATTENTIONs to all periphs. */
2071 if ((action & SSQ_UA) != 0)
2072 xpt_async(AC_UNIT_ATTENTION, orig_ccb->ccb_h.path, orig_ccb);
2073
2074 /* Rescan target on "Reported LUNs data has changed" */
2075 if ((action & SSQ_RESCAN) != 0) {
2076 if (xpt_create_path(&newpath, NULL,
2077 xpt_path_path_id(ccb->ccb_h.path),
2078 xpt_path_target_id(ccb->ccb_h.path),
2079 CAM_LUN_WILDCARD) == CAM_REQ_CMP) {
2080 scan_ccb = xpt_alloc_ccb_nowait();
2081 if (scan_ccb != NULL) {
2082 scan_ccb->ccb_h.path = newpath;
2083 scan_ccb->ccb_h.func_code = XPT_SCAN_TGT;
2084 scan_ccb->crcn.flags = 0;
2085 xpt_rescan(scan_ccb);
2086 } else {
2087 xpt_print(newpath,
2088 "Can't allocate CCB to rescan target\n");
2089 xpt_free_path(newpath);
2090 }
2091 }
2092 }
2093
2094 /* Attempt a retry */
2095 if (error == ERESTART || error == 0) {
2096 if (frozen)
2097 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
2098 if (error == ERESTART)
2099 xpt_action(ccb);
2100 if (frozen)
2101 cam_release_devq(ccb->ccb_h.path,
2102 relsim_flags,
2103 openings,
2104 timeout,
2105 /*getcount_only*/0);
2106 }
2107
2108 return (error);
2109 }
2110
2111 #define CAM_PERIPH_DEVD_MSG_SIZE 256
2112
2113 static void
cam_periph_devctl_notify(union ccb * ccb)2114 cam_periph_devctl_notify(union ccb *ccb)
2115 {
2116 struct cam_periph *periph;
2117 struct ccb_getdev *cgd;
2118 struct sbuf sb;
2119 int serr, sk, asc, ascq;
2120 char *sbmsg, *type;
2121
2122 sbmsg = malloc(CAM_PERIPH_DEVD_MSG_SIZE, M_CAMPERIPH, M_NOWAIT);
2123 if (sbmsg == NULL)
2124 return;
2125
2126 sbuf_new(&sb, sbmsg, CAM_PERIPH_DEVD_MSG_SIZE, SBUF_FIXEDLEN);
2127
2128 periph = xpt_path_periph(ccb->ccb_h.path);
2129 sbuf_printf(&sb, "device=%s%d ", periph->periph_name,
2130 periph->unit_number);
2131
2132 sbuf_cat(&sb, "serial=\"");
2133 if ((cgd = (struct ccb_getdev *)xpt_alloc_ccb_nowait()) != NULL) {
2134 xpt_gdev_type(cgd, ccb->ccb_h.path);
2135 if (cgd->ccb_h.status == CAM_REQ_CMP)
2136 sbuf_bcat(&sb, cgd->serial_num, cgd->serial_num_len);
2137 xpt_free_ccb((union ccb *)cgd);
2138 }
2139 sbuf_cat(&sb, "\" ");
2140 sbuf_printf(&sb, "cam_status=\"0x%x\" ", ccb->ccb_h.status);
2141
2142 switch (ccb->ccb_h.status & CAM_STATUS_MASK) {
2143 case CAM_CMD_TIMEOUT:
2144 sbuf_printf(&sb, "timeout=%d ", ccb->ccb_h.timeout);
2145 type = "timeout";
2146 break;
2147 case CAM_SCSI_STATUS_ERROR:
2148 sbuf_printf(&sb, "scsi_status=%d ", ccb->csio.scsi_status);
2149 if (scsi_extract_sense_ccb(ccb, &serr, &sk, &asc, &ascq))
2150 sbuf_printf(&sb, "scsi_sense=\"%02x %02x %02x %02x\" ",
2151 serr, sk, asc, ascq);
2152 type = "error";
2153 break;
2154 case CAM_ATA_STATUS_ERROR:
2155 sbuf_cat(&sb, "RES=\"");
2156 ata_res_sbuf(&ccb->ataio.res, &sb);
2157 sbuf_cat(&sb, "\" ");
2158 type = "error";
2159 break;
2160 case CAM_NVME_STATUS_ERROR:
2161 {
2162 struct ccb_nvmeio *n = &ccb->nvmeio;
2163
2164 sbuf_printf(&sb, "sct=\"%02x\" sc=\"%02x\" cdw0=\"%08x\" ",
2165 NVME_STATUS_GET_SCT(n->cpl.status),
2166 NVME_STATUS_GET_SC(n->cpl.status), n->cpl.cdw0);
2167 type = "error";
2168 break;
2169 }
2170 default:
2171 type = "error";
2172 break;
2173 }
2174
2175
2176 switch (ccb->ccb_h.func_code) {
2177 case XPT_SCSI_IO:
2178 sbuf_cat(&sb, "CDB=\"");
2179 scsi_cdb_sbuf(scsiio_cdb_ptr(&ccb->csio), &sb);
2180 sbuf_cat(&sb, "\" ");
2181 break;
2182 case XPT_ATA_IO:
2183 sbuf_cat(&sb, "ACB=\"");
2184 ata_cmd_sbuf(&ccb->ataio.cmd, &sb);
2185 sbuf_cat(&sb, "\" ");
2186 break;
2187 case XPT_NVME_IO:
2188 case XPT_NVME_ADMIN:
2189 {
2190 struct ccb_nvmeio *n = &ccb->nvmeio;
2191 struct nvme_command *cmd = &n->cmd;
2192
2193 // XXX Likely should be nvme_cmd_sbuf
2194 sbuf_printf(&sb, "opc=\"%02x\" fuse=\"%02x\" cid=\"%04x\" "
2195 "nsid=\"%08x\" cdw10=\"%08x\" cdw11=\"%08x\" cdw12=\"%08x\" "
2196 "cdw13=\"%08x\" cdw14=\"%08x\" cdw15=\"%08x\" ",
2197 cmd->opc, cmd->fuse, cmd->cid, cmd->nsid, cmd->cdw10,
2198 cmd->cdw11, cmd->cdw12, cmd->cdw13, cmd->cdw14, cmd->cdw15);
2199 break;
2200 }
2201 default:
2202 break;
2203 }
2204
2205 if (sbuf_finish(&sb) == 0)
2206 devctl_notify("CAM", "periph", type, sbuf_data(&sb));
2207 sbuf_delete(&sb);
2208 free(sbmsg, M_CAMPERIPH);
2209 }
2210
2211 /*
2212 * Sysctl to force an invalidation of the drive right now. Can be
2213 * called with CTLFLAG_MPSAFE since we take periph lock.
2214 */
2215 int
cam_periph_invalidate_sysctl(SYSCTL_HANDLER_ARGS)2216 cam_periph_invalidate_sysctl(SYSCTL_HANDLER_ARGS)
2217 {
2218 struct cam_periph *periph;
2219 int error, value;
2220
2221 periph = arg1;
2222 value = 0;
2223 error = sysctl_handle_int(oidp, &value, 0, req);
2224 if (error != 0 || req->newptr == NULL || value != 1)
2225 return (error);
2226
2227 cam_periph_lock(periph);
2228 cam_periph_invalidate(periph);
2229 cam_periph_unlock(periph);
2230
2231 return (0);
2232 }
2233