xref: /freebsd/sys/cam/scsi/scsi_target.c (revision 1b6c76a2fe091c74f08427e6c870851025a9cf67)
1 /*
2  * Implementation of a simple Target Mode SCSI Proccessor Target driver for CAM.
3  *
4  * Copyright (c) 1998, 1999 Justin T. Gibbs.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification, immediately at the beginning of the file.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 
31 #include <sys/param.h>
32 #include <sys/queue.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/types.h>
36 #include <sys/bio.h>
37 #include <sys/conf.h>
38 #include <sys/devicestat.h>
39 #include <sys/malloc.h>
40 #include <sys/poll.h>
41 #include <sys/selinfo.h>
42 #include <sys/uio.h>
43 
44 #include <cam/cam.h>
45 #include <cam/cam_ccb.h>
46 #include <cam/cam_extend.h>
47 #include <cam/cam_periph.h>
48 #include <cam/cam_queue.h>
49 #include <cam/cam_xpt_periph.h>
50 #include <cam/cam_debug.h>
51 
52 #include <cam/scsi/scsi_all.h>
53 #include <cam/scsi/scsi_pt.h>
54 #include <cam/scsi/scsi_targetio.h>
55 #include <cam/scsi/scsi_message.h>
56 
57 typedef enum {
58 	TARG_STATE_NORMAL,
59 	TARG_STATE_EXCEPTION,
60 	TARG_STATE_TEARDOWN
61 } targ_state;
62 
63 typedef enum {
64 	TARG_FLAG_NONE		 = 0x00,
65 	TARG_FLAG_SEND_EOF	 = 0x01,
66 	TARG_FLAG_RECEIVE_EOF	 = 0x02,
67 	TARG_FLAG_LUN_ENABLED	 = 0x04
68 } targ_flags;
69 
70 typedef enum {
71 	TARG_CCB_NONE		= 0x00,
72 	TARG_CCB_WAITING	= 0x01,
73 	TARG_CCB_HELDQ		= 0x02,
74 	TARG_CCB_ABORT_TO_HELDQ = 0x04
75 } targ_ccb_flags;
76 
77 #define MAX_ACCEPT	16
78 #define MAX_IMMEDIATE	16
79 #define MAX_BUF_SIZE	256	/* Max inquiry/sense/mode page transfer */
80 #define MAX_INITIATORS	256	/* includes widest fibre channel for now */
81 
82 #define MIN(a, b) ((a > b) ? b : a)
83 
84 #define TARG_CONTROL_UNIT 0xffff00ff
85 #define TARG_IS_CONTROL_DEV(unit) ((unit) == TARG_CONTROL_UNIT)
86 
87 #define TARG_TAG_WILDCARD ((u_int)~0)
88 
89 /* Offsets into our private CCB area for storing accept information */
90 #define ccb_flags	ppriv_field0
91 #define ccb_descr	ppriv_ptr1
92 
93 /* We stick a pointer to the originating accept TIO in each continue I/O CCB */
94 #define ccb_atio	ppriv_ptr1
95 
96 struct targ_softc {
97 	/* CTIOs pending on the controller */
98 	struct		ccb_queue pending_queue;
99 
100 	/* ATIOs awaiting CTIO resources from the XPT */
101 	struct		ccb_queue work_queue;
102 
103 	/*
104 	 * ATIOs for SEND operations waiting for 'write'
105 	 * buffer resources from our userland daemon.
106 	 */
107 	struct		ccb_queue snd_ccb_queue;
108 
109 	/*
110 	 * ATIOs for RCV operations waiting for 'read'
111 	 * buffer resources from our userland daemon.
112 	 */
113 	struct		ccb_queue rcv_ccb_queue;
114 
115 	/*
116 	 * ATIOs for commands unknown to the kernel driver.
117 	 * These are queued for the userland daemon to
118 	 * consume.
119 	 */
120 	struct		ccb_queue unknown_atio_queue;
121 
122 	/*
123 	 * Userland buffers for SEND commands waiting for
124 	 * SEND ATIOs to be queued by an initiator.
125 	 */
126 	struct		bio_queue_head snd_bio_queue;
127 
128 	/*
129 	 * Userland buffers for RCV commands waiting for
130 	 * RCV ATIOs to be queued by an initiator.
131 	 */
132 	struct		bio_queue_head rcv_bio_queue;
133 	struct		devstat device_stats;
134 	dev_t		targ_dev;
135 	struct		selinfo snd_select;
136 	struct		selinfo rcv_select;
137 	targ_state	state;
138 	targ_flags	flags;
139 	targ_exception	exceptions;
140 	u_int		init_level;
141 	u_int		inq_data_len;
142 	struct		scsi_inquiry_data *inq_data;
143 	struct		ccb_accept_tio *accept_tio_list;
144 	struct		ccb_hdr_slist immed_notify_slist;
145 	struct		initiator_state istate[MAX_INITIATORS];
146 };
147 
148 struct targ_cmd_desc {
149 	struct	  ccb_accept_tio* atio_link;
150 	u_int	  data_resid;	/* How much left to transfer */
151 	u_int	  data_increment;/* Amount to send before next disconnect */
152 	void*	  data;		/* The data. Can be from backing_store or not */
153 	void*	  backing_store;/* Backing store allocated for this descriptor*/
154 	struct	  bio *bp;	/* Buffer for this transfer */
155 	u_int	  max_size;	/* Size of backing_store */
156 	u_int32_t timeout;
157 	u_int8_t  status;	/* Status to return to initiator */
158 };
159 
160 static	d_open_t	targopen;
161 static	d_close_t	targclose;
162 static	d_read_t	targread;
163 static	d_write_t	targwrite;
164 static	d_ioctl_t	targioctl;
165 static	d_poll_t	targpoll;
166 static	d_strategy_t	targstrategy;
167 
168 #define TARG_CDEV_MAJOR	65
169 static struct cdevsw targ_cdevsw = {
170 	/* open */	targopen,
171 	/* close */	targclose,
172 	/* read */	targread,
173 	/* write */	targwrite,
174 	/* ioctl */	targioctl,
175 	/* poll */	targpoll,
176 	/* mmap */	nommap,
177 	/* strategy */	targstrategy,
178 	/* name */	"targ",
179 	/* maj */	TARG_CDEV_MAJOR,
180 	/* dump */	nodump,
181 	/* psize */	nopsize,
182 	/* flags */	0,
183 };
184 
185 static int		targsendccb(struct cam_periph *periph, union ccb *ccb,
186 				    union ccb *inccb);
187 static periph_init_t	targinit;
188 static void		targasync(void *callback_arg, u_int32_t code,
189 				struct cam_path *path, void *arg);
190 static int		targallocinstance(struct ioc_alloc_unit *alloc_unit);
191 static int		targfreeinstance(struct ioc_alloc_unit *alloc_unit);
192 static cam_status	targenlun(struct cam_periph *periph);
193 static cam_status	targdislun(struct cam_periph *periph);
194 static periph_ctor_t	targctor;
195 static periph_dtor_t	targdtor;
196 static void		targrunqueue(struct cam_periph *periph,
197 				     struct targ_softc *softc);
198 static periph_start_t	targstart;
199 static void		targdone(struct cam_periph *periph,
200 				 union ccb *done_ccb);
201 static void		targfireexception(struct cam_periph *periph,
202 					  struct targ_softc *softc);
203 static void		targinoterror(struct cam_periph *periph,
204 				      struct targ_softc *softc,
205 				      struct ccb_immed_notify *inot);
206 static  int		targerror(union ccb *ccb, u_int32_t cam_flags,
207 				  u_int32_t sense_flags);
208 static struct targ_cmd_desc*	allocdescr(void);
209 static void		freedescr(struct targ_cmd_desc *buf);
210 static void		fill_sense(struct targ_softc *softc,
211 				   u_int initiator_id, u_int error_code,
212 				   u_int sense_key, u_int asc, u_int ascq);
213 static void		copy_sense(struct targ_softc *softc,
214 				   struct initiator_state *istate,
215 				   u_int8_t *sense_buffer, size_t sense_len);
216 static void	set_unit_attention_cond(struct cam_periph *periph,
217 					u_int initiator_id, ua_types ua);
218 static void	set_ca_condition(struct cam_periph *periph,
219 				 u_int initiator_id, ca_types ca);
220 static void	abort_pending_transactions(struct cam_periph *periph,
221 					   u_int initiator_id, u_int tag_id,
222 					   int errno, int to_held_queue);
223 
224 static struct periph_driver targdriver =
225 {
226 	targinit, "targ",
227 	TAILQ_HEAD_INITIALIZER(targdriver.units), /* generation */ 0
228 };
229 
230 PERIPHDRIVER_DECLARE(targ, targdriver);
231 
232 static struct extend_array *targperiphs;
233 static dev_t targ_ctl_dev;
234 
235 static void
236 targinit(void)
237 {
238 	/*
239 	 * Create our extend array for storing the devices we attach to.
240 	 */
241 	targperiphs = cam_extend_new();
242 	if (targperiphs == NULL) {
243 		printf("targ: Failed to alloc extend array!\n");
244 		return;
245 	}
246 	targ_ctl_dev = make_dev(&targ_cdevsw, TARG_CONTROL_UNIT, UID_ROOT,
247 	    GID_OPERATOR, 0600, "%s.ctl", "targ");
248 	if (targ_ctl_dev == (dev_t) 0) {
249 		printf("targ: failed to create control dev\n");
250 	}
251 }
252 
253 static void
254 targasync(void *callback_arg, u_int32_t code,
255 	  struct cam_path *path, void *arg)
256 {
257 	struct cam_periph *periph;
258 	struct targ_softc *softc;
259 
260 	periph = (struct cam_periph *)callback_arg;
261 	softc = (struct targ_softc *)periph->softc;
262 	switch (code) {
263 	case AC_PATH_DEREGISTERED:
264 	{
265 		/* XXX Implement */
266 		break;
267 	}
268 	default:
269 		break;
270 	}
271 }
272 
273 /* Attempt to enable our lun */
274 static cam_status
275 targenlun(struct cam_periph *periph)
276 {
277 	union ccb immed_ccb;
278 	struct targ_softc *softc;
279 	cam_status status;
280 	int i;
281 
282 	softc = (struct targ_softc *)periph->softc;
283 
284 	if ((softc->flags & TARG_FLAG_LUN_ENABLED) != 0)
285 		return (CAM_REQ_CMP);
286 
287 	xpt_setup_ccb(&immed_ccb.ccb_h, periph->path, /*priority*/1);
288 	immed_ccb.ccb_h.func_code = XPT_EN_LUN;
289 
290 	/* Don't need support for any vendor specific commands */
291 	immed_ccb.cel.grp6_len = 0;
292 	immed_ccb.cel.grp7_len = 0;
293 	immed_ccb.cel.enable = 1;
294 	xpt_action(&immed_ccb);
295 	status = immed_ccb.ccb_h.status;
296 	if (status != CAM_REQ_CMP) {
297 		xpt_print_path(periph->path);
298 		printf("targenlun - Enable Lun Rejected with status 0x%x\n",
299 		       status);
300 		return (status);
301 	}
302 
303 	softc->flags |= TARG_FLAG_LUN_ENABLED;
304 
305 	/*
306 	 * Build up a buffer of accept target I/O
307 	 * operations for incoming selections.
308 	 */
309 	for (i = 0; i < MAX_ACCEPT; i++) {
310 		struct ccb_accept_tio *atio;
311 
312 		atio = (struct ccb_accept_tio*)malloc(sizeof(*atio), M_DEVBUF,
313 						      M_NOWAIT);
314 		if (atio == NULL) {
315 			status = CAM_RESRC_UNAVAIL;
316 			break;
317 		}
318 
319 		atio->ccb_h.ccb_descr = allocdescr();
320 
321 		if (atio->ccb_h.ccb_descr == NULL) {
322 			free(atio, M_DEVBUF);
323 			status = CAM_RESRC_UNAVAIL;
324 			break;
325 		}
326 
327 		xpt_setup_ccb(&atio->ccb_h, periph->path, /*priority*/1);
328 		atio->ccb_h.func_code = XPT_ACCEPT_TARGET_IO;
329 		atio->ccb_h.cbfcnp = targdone;
330 		atio->ccb_h.ccb_flags = TARG_CCB_NONE;
331 		xpt_action((union ccb *)atio);
332 		status = atio->ccb_h.status;
333 		if (status != CAM_REQ_INPROG) {
334 			xpt_print_path(periph->path);
335 			printf("Queue of atio failed\n");
336 			freedescr(atio->ccb_h.ccb_descr);
337 			free(atio, M_DEVBUF);
338 			break;
339 		}
340 		((struct targ_cmd_desc*)atio->ccb_h.ccb_descr)->atio_link =
341 		    softc->accept_tio_list;
342 		softc->accept_tio_list = atio;
343 	}
344 
345 	if (i == 0) {
346 		xpt_print_path(periph->path);
347 		printf("targenlun - Could not allocate accept tio CCBs: "
348 		       "status = 0x%x\n", status);
349 		targdislun(periph);
350 		return (CAM_REQ_CMP_ERR);
351 	}
352 
353 	/*
354 	 * Build up a buffer of immediate notify CCBs
355 	 * so the SIM can tell us of asynchronous target mode events.
356 	 */
357 	for (i = 0; i < MAX_ACCEPT; i++) {
358 		struct ccb_immed_notify *inot;
359 
360 		inot = (struct ccb_immed_notify*)malloc(sizeof(*inot), M_DEVBUF,
361 						        M_NOWAIT);
362 
363 		if (inot == NULL) {
364 			status = CAM_RESRC_UNAVAIL;
365 			break;
366 		}
367 
368 		xpt_setup_ccb(&inot->ccb_h, periph->path, /*priority*/1);
369 		inot->ccb_h.func_code = XPT_IMMED_NOTIFY;
370 		inot->ccb_h.cbfcnp = targdone;
371 		SLIST_INSERT_HEAD(&softc->immed_notify_slist, &inot->ccb_h,
372 				  periph_links.sle);
373 		xpt_action((union ccb *)inot);
374 	}
375 
376 	if (i == 0) {
377 		xpt_print_path(periph->path);
378 		printf("targenlun - Could not allocate immediate notify CCBs: "
379 		       "status = 0x%x\n", status);
380 		targdislun(periph);
381 		return (CAM_REQ_CMP_ERR);
382 	}
383 
384 	return (CAM_REQ_CMP);
385 }
386 
387 static cam_status
388 targdislun(struct cam_periph *periph)
389 {
390 	union ccb ccb;
391 	struct targ_softc *softc;
392 	struct ccb_accept_tio* atio;
393 	struct ccb_hdr *ccb_h;
394 
395 	softc = (struct targ_softc *)periph->softc;
396 	if ((softc->flags & TARG_FLAG_LUN_ENABLED) == 0)
397 		return CAM_REQ_CMP;
398 
399 	/* XXX Block for Continue I/O completion */
400 
401 	/* Kill off all ACCECPT and IMMEDIATE CCBs */
402 	while ((atio = softc->accept_tio_list) != NULL) {
403 
404 		softc->accept_tio_list =
405 		    ((struct targ_cmd_desc*)atio->ccb_h.ccb_descr)->atio_link;
406 		xpt_setup_ccb(&ccb.cab.ccb_h, periph->path, /*priority*/1);
407 		ccb.cab.ccb_h.func_code = XPT_ABORT;
408 		ccb.cab.abort_ccb = (union ccb *)atio;
409 		xpt_action(&ccb);
410 	}
411 
412 	while ((ccb_h = SLIST_FIRST(&softc->immed_notify_slist)) != NULL) {
413 		SLIST_REMOVE_HEAD(&softc->immed_notify_slist, periph_links.sle);
414 		xpt_setup_ccb(&ccb.cab.ccb_h, periph->path, /*priority*/1);
415 		ccb.cab.ccb_h.func_code = XPT_ABORT;
416 		ccb.cab.abort_ccb = (union ccb *)ccb_h;
417 		xpt_action(&ccb);
418 	}
419 
420 	/*
421 	 * Dissable this lun.
422 	 */
423 	xpt_setup_ccb(&ccb.cel.ccb_h, periph->path, /*priority*/1);
424 	ccb.cel.ccb_h.func_code = XPT_EN_LUN;
425 	ccb.cel.enable = 0;
426 	xpt_action(&ccb);
427 
428 	if (ccb.cel.ccb_h.status != CAM_REQ_CMP)
429 		printf("targdislun - Disabling lun on controller failed "
430 		       "with status 0x%x\n", ccb.cel.ccb_h.status);
431 	else
432 		softc->flags &= ~TARG_FLAG_LUN_ENABLED;
433 	return (ccb.cel.ccb_h.status);
434 }
435 
436 static cam_status
437 targctor(struct cam_periph *periph, void *arg)
438 {
439 	struct ccb_pathinq *cpi;
440 	struct targ_softc *softc;
441 	int i;
442 
443 	cpi = (struct ccb_pathinq *)arg;
444 
445 	/* Allocate our per-instance private storage */
446 	softc = (struct targ_softc *)malloc(sizeof(*softc), M_DEVBUF, M_NOWAIT);
447 	if (softc == NULL) {
448 		printf("targctor: unable to malloc softc\n");
449 		return (CAM_REQ_CMP_ERR);
450 	}
451 
452 	bzero(softc, sizeof(*softc));
453 	TAILQ_INIT(&softc->pending_queue);
454 	TAILQ_INIT(&softc->work_queue);
455 	TAILQ_INIT(&softc->snd_ccb_queue);
456 	TAILQ_INIT(&softc->rcv_ccb_queue);
457 	TAILQ_INIT(&softc->unknown_atio_queue);
458 	bioq_init(&softc->snd_bio_queue);
459 	bioq_init(&softc->rcv_bio_queue);
460 	softc->accept_tio_list = NULL;
461 	SLIST_INIT(&softc->immed_notify_slist);
462 	softc->state = TARG_STATE_NORMAL;
463 	periph->softc = softc;
464 	softc->init_level++;
465 
466 	cam_extend_set(targperiphs, periph->unit_number, periph);
467 
468 	/*
469 	 * We start out life with a UA to indicate power-on/reset.
470 	 */
471 	for (i = 0; i < MAX_INITIATORS; i++)
472 		softc->istate[i].pending_ua = UA_POWER_ON;
473 
474 	/*
475 	 * Allocate an initial inquiry data buffer.  We might allow the
476 	 * user to override this later via an ioctl.
477 	 */
478 	softc->inq_data_len = sizeof(*softc->inq_data);
479 	softc->inq_data = malloc(softc->inq_data_len, M_DEVBUF, M_NOWAIT);
480 	if (softc->inq_data == NULL) {
481 		printf("targctor - Unable to malloc inquiry data\n");
482 		targdtor(periph);
483 		return (CAM_RESRC_UNAVAIL);
484 	}
485 	bzero(softc->inq_data, softc->inq_data_len);
486 	softc->inq_data->device = T_PROCESSOR | (SID_QUAL_LU_CONNECTED << 5);
487 	softc->inq_data->version = 2;
488 	softc->inq_data->response_format = 2; /* SCSI2 Inquiry Format */
489 	softc->inq_data->flags =
490 	    cpi->hba_inquiry & (PI_SDTR_ABLE|PI_WIDE_16|PI_WIDE_32|PI_TAG_ABLE);
491 	softc->inq_data->additional_length = softc->inq_data_len - 4;
492 	strncpy(softc->inq_data->vendor, "FreeBSD ", SID_VENDOR_SIZE);
493 	strncpy(softc->inq_data->product, "TM-PT           ", SID_PRODUCT_SIZE);
494 	strncpy(softc->inq_data->revision, "0.0 ", SID_REVISION_SIZE);
495 	softc->targ_dev = make_dev(&targ_cdevsw, periph->unit_number, UID_ROOT,
496 				   GID_OPERATOR, 0600, "%s%d",
497 				   periph->periph_name, periph->unit_number);
498 	softc->init_level++;
499 	return (CAM_REQ_CMP);
500 }
501 
502 static void
503 targdtor(struct cam_periph *periph)
504 {
505 	struct targ_softc *softc;
506 
507 	softc = (struct targ_softc *)periph->softc;
508 
509 	softc->state = TARG_STATE_TEARDOWN;
510 
511 	targdislun(periph);
512 
513 	cam_extend_release(targperiphs, periph->unit_number);
514 
515 	switch (softc->init_level) {
516 	default:
517 		/* FALLTHROUGH */
518 	case 2:
519 		free(softc->inq_data, M_DEVBUF);
520 		destroy_dev(softc->targ_dev);
521 		/* FALLTHROUGH */
522 	case 1:
523 		free(softc, M_DEVBUF);
524 		break;
525 	case 0:
526 		panic("targdtor - impossible init level");;
527 	}
528 }
529 
530 static int
531 targopen(dev_t dev, int flags, int fmt, struct proc *p)
532 {
533 	struct cam_periph *periph;
534 	struct	targ_softc *softc;
535 	u_int unit;
536 	cam_status status;
537 	int error;
538 	int s;
539 
540 	unit = minor(dev);
541 
542 	/* An open of the control device always succeeds */
543 	if (TARG_IS_CONTROL_DEV(unit))
544 		return 0;
545 
546 	s = splsoftcam();
547 	periph = cam_extend_get(targperiphs, unit);
548 	if (periph == NULL) {
549         	splx(s);
550 		return (ENXIO);
551 	}
552 	if ((error = cam_periph_lock(periph, PRIBIO | PCATCH)) != 0) {
553 		splx(s);
554 		return (error);
555 	}
556 
557 	softc = (struct targ_softc *)periph->softc;
558 	if ((softc->flags & TARG_FLAG_LUN_ENABLED) == 0) {
559 		if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
560 			splx(s);
561 			cam_periph_unlock(periph);
562 			return(ENXIO);
563 		}
564 	}
565         splx(s);
566 
567 	status = targenlun(periph);
568 	switch (status) {
569 	case CAM_REQ_CMP:
570 		error = 0;
571 		break;
572 	case CAM_RESRC_UNAVAIL:
573 		error = ENOMEM;
574 		break;
575 	case CAM_LUN_ALRDY_ENA:
576 		error = EADDRINUSE;
577 		break;
578 	default:
579 		error = ENXIO;
580 		break;
581 	}
582         cam_periph_unlock(periph);
583 	if (error) {
584 		cam_periph_release(periph);
585 	}
586 	return (error);
587 }
588 
589 static int
590 targclose(dev_t dev, int flag, int fmt, struct proc *p)
591 {
592 	struct	cam_periph *periph;
593 	struct	targ_softc *softc;
594 	u_int	unit;
595 	int	s;
596 	int	error;
597 
598 	unit = minor(dev);
599 
600 	/* A close of the control device always succeeds */
601 	if (TARG_IS_CONTROL_DEV(unit))
602 		return 0;
603 
604 	s = splsoftcam();
605 	periph = cam_extend_get(targperiphs, unit);
606 	if (periph == NULL) {
607 		splx(s);
608 		return (ENXIO);
609 	}
610 	if ((error = cam_periph_lock(periph, PRIBIO)) != 0)
611 		return (error);
612 	softc = (struct targ_softc *)periph->softc;
613 	splx(s);
614 
615 	targdislun(periph);
616 
617 	cam_periph_unlock(periph);
618 	cam_periph_release(periph);
619 
620 	return (0);
621 }
622 
623 static int
624 targallocinstance(struct ioc_alloc_unit *alloc_unit)
625 {
626 	struct ccb_pathinq cpi;
627 	struct cam_path *path;
628 	struct cam_periph *periph;
629 	cam_status status;
630 	int free_path_on_return;
631 	int error;
632 
633 	free_path_on_return = 0;
634 	status = xpt_create_path(&path, /*periph*/NULL,
635 				 alloc_unit->path_id,
636 				 alloc_unit->target_id,
637 				 alloc_unit->lun_id);
638 	if (status != CAM_REQ_CMP) {
639 		printf("Couldn't Allocate Path %x\n", status);
640 		goto fail;
641 	}
642 
643 	free_path_on_return++;
644 
645 
646 	xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
647 	cpi.ccb_h.func_code = XPT_PATH_INQ;
648 	xpt_action((union ccb *)&cpi);
649 	status = cpi.ccb_h.status;
650 
651 	if (status != CAM_REQ_CMP) {
652 		printf("Couldn't CPI %x\n", status);
653 		goto fail;
654 	}
655 
656 	/* Can only alloc units on controllers that support target mode */
657 	if ((cpi.target_sprt & PIT_PROCESSOR) == 0) {
658 		printf("Controller does not support target mode%x\n", status);
659 		status = CAM_PATH_INVALID;
660 		goto fail;
661 	}
662 
663 	/* Ensure that we don't already have an instance for this unit. */
664 	if ((periph = cam_periph_find(path, "targ")) != NULL) {
665 		status = CAM_LUN_ALRDY_ENA;
666 		goto fail;
667 	}
668 
669 	/*
670 	 * Allocate a peripheral instance for
671 	 * this target instance.
672 	 */
673 	status = cam_periph_alloc(targctor, NULL, targdtor, targstart,
674 				  "targ", CAM_PERIPH_BIO, path, targasync,
675 				  0, &cpi);
676 
677 fail:
678 	switch (status) {
679 	case CAM_REQ_CMP:
680 	{
681 		struct cam_periph *periph;
682 
683 		if ((periph = cam_periph_find(path, "targ")) == NULL)
684 			panic("targallocinstance: Succeeded but no periph?");
685 		error = 0;
686 		alloc_unit->unit = periph->unit_number;
687 		break;
688 	}
689 	case CAM_RESRC_UNAVAIL:
690 		error = ENOMEM;
691 		break;
692 	case CAM_LUN_ALRDY_ENA:
693 		error = EADDRINUSE;
694 		break;
695 	default:
696 		printf("targallocinstance: Unexpected CAM status %x\n", status);
697 		/* FALLTHROUGH */
698 	case CAM_PATH_INVALID:
699 		error = ENXIO;
700 		break;
701 	case CAM_PROVIDE_FAIL:
702 		error = ENODEV;
703 		break;
704 	}
705 
706 	if (free_path_on_return != 0)
707 		xpt_free_path(path);
708 
709 	return (error);
710 }
711 
712 static int
713 targfreeinstance(struct ioc_alloc_unit *alloc_unit)
714 {
715 	struct cam_path *path;
716 	struct cam_periph *periph;
717 	struct targ_softc *softc;
718 	cam_status status;
719 	int free_path_on_return;
720 	int error;
721 
722 	periph = NULL;
723 	free_path_on_return = 0;
724 	status = xpt_create_path(&path, /*periph*/NULL,
725 				 alloc_unit->path_id,
726 				 alloc_unit->target_id,
727 				 alloc_unit->lun_id);
728 	free_path_on_return++;
729 
730 	if (status != CAM_REQ_CMP)
731 		goto fail;
732 
733 	/* Find our instance. */
734 	if ((periph = cam_periph_find(path, "targ")) == NULL) {
735 		xpt_print_path(path);
736 		printf("Invalid path specified for freeing target instance\n");
737 		status = CAM_PATH_INVALID;
738 		goto fail;
739 	}
740 
741         softc = (struct targ_softc *)periph->softc;
742 
743         if ((softc->flags & TARG_FLAG_LUN_ENABLED) != 0) {
744 		status = CAM_BUSY;
745 		goto fail;
746 	}
747 
748 fail:
749 	if (free_path_on_return != 0)
750 		xpt_free_path(path);
751 
752 	switch (status) {
753 	case CAM_REQ_CMP:
754 		if (periph != NULL)
755 			cam_periph_invalidate(periph);
756 		error = 0;
757 		break;
758 	case CAM_RESRC_UNAVAIL:
759 		error = ENOMEM;
760 		break;
761 	case CAM_LUN_ALRDY_ENA:
762 		error = EADDRINUSE;
763 		break;
764 	default:
765 		printf("targfreeinstance: Unexpected CAM status %x\n", status);
766 		/* FALLTHROUGH */
767 	case CAM_PATH_INVALID:
768 		error = ENODEV;
769 		break;
770 	}
771 	return (error);
772 }
773 
774 static int
775 targioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
776 {
777 	struct cam_periph *periph;
778 	struct targ_softc *softc;
779 	u_int  unit;
780 	int    error;
781 
782 	unit = minor(dev);
783 	error = 0;
784 	if (TARG_IS_CONTROL_DEV(unit)) {
785 		switch (cmd) {
786 		case TARGCTLIOALLOCUNIT:
787 			error = targallocinstance((struct ioc_alloc_unit*)addr);
788 			break;
789 		case TARGCTLIOFREEUNIT:
790 			error = targfreeinstance((struct ioc_alloc_unit*)addr);
791 			break;
792 		default:
793 			error = EINVAL;
794 			break;
795 		}
796 		return (error);
797 	}
798 
799 	periph = cam_extend_get(targperiphs, unit);
800 	if (periph == NULL)
801 		return (ENXIO);
802 	softc = (struct targ_softc *)periph->softc;
803 	switch (cmd) {
804 	case TARGIOCFETCHEXCEPTION:
805 		*((targ_exception *)addr) = softc->exceptions;
806 		break;
807 	case TARGIOCCLEAREXCEPTION:
808 	{
809 		targ_exception clear_mask;
810 
811 		clear_mask = *((targ_exception *)addr);
812 		if ((clear_mask & TARG_EXCEPT_UNKNOWN_ATIO) != 0) {
813 			struct ccb_hdr *ccbh;
814 
815 			ccbh = TAILQ_FIRST(&softc->unknown_atio_queue);
816 			if (ccbh != NULL) {
817 				TAILQ_REMOVE(&softc->unknown_atio_queue,
818 					     ccbh, periph_links.tqe);
819 				/* Requeue the ATIO back to the controller */
820 				ccbh->ccb_flags = TARG_CCB_NONE;
821 				xpt_action((union ccb *)ccbh);
822 				ccbh = TAILQ_FIRST(&softc->unknown_atio_queue);
823 			}
824 			if (ccbh != NULL)
825 				clear_mask &= ~TARG_EXCEPT_UNKNOWN_ATIO;
826 		}
827 		softc->exceptions &= ~clear_mask;
828 		if (softc->exceptions == TARG_EXCEPT_NONE
829 		 && softc->state == TARG_STATE_EXCEPTION) {
830 			softc->state = TARG_STATE_NORMAL;
831 			targrunqueue(periph, softc);
832 		}
833 		break;
834 	}
835 	case TARGIOCFETCHATIO:
836 	{
837 		struct ccb_hdr *ccbh;
838 
839 		ccbh = TAILQ_FIRST(&softc->unknown_atio_queue);
840 		if (ccbh != NULL) {
841 			bcopy(ccbh, addr, sizeof(struct ccb_accept_tio));
842 		} else {
843 			error = ENOENT;
844 		}
845 		break;
846 	}
847 	case TARGIOCCOMMAND:
848 	{
849 		union ccb *inccb;
850 		union ccb *ccb;
851 
852 		/*
853 		 * XXX JGibbs
854 		 * This code is lifted directly from the pass-thru driver.
855 		 * Perhaps this should be moved to a library????
856 		 */
857 		inccb = (union ccb *)addr;
858 		ccb = cam_periph_getccb(periph, inccb->ccb_h.pinfo.priority);
859 
860 		error = targsendccb(periph, ccb, inccb);
861 
862 		xpt_release_ccb(ccb);
863 
864 		break;
865 	}
866 	case TARGIOCGETISTATE:
867 	case TARGIOCSETISTATE:
868 	{
869 		struct ioc_initiator_state *ioc_istate;
870 
871 		ioc_istate = (struct ioc_initiator_state *)addr;
872 		if (ioc_istate->initiator_id > MAX_INITIATORS) {
873 			error = EINVAL;
874 			break;
875 		}
876 		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
877 			  ("GET/SETISTATE for %d\n", ioc_istate->initiator_id));
878 		if (cmd == TARGIOCGETISTATE) {
879 			bcopy(&softc->istate[ioc_istate->initiator_id],
880 			      &ioc_istate->istate, sizeof(ioc_istate->istate));
881 		} else {
882 			bcopy(&ioc_istate->istate,
883 			      &softc->istate[ioc_istate->initiator_id],
884 			      sizeof(ioc_istate->istate));
885 		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
886 			  ("pending_ca now %x\n",
887 			   softc->istate[ioc_istate->initiator_id].pending_ca));
888 		}
889 		break;
890 	}
891 #ifdef	CAMDEBUG
892 	case TARGIODEBUG:
893 	{
894 		union ccb ccb;
895 		bzero (&ccb, sizeof ccb);
896 		if (xpt_create_path(&ccb.ccb_h.path, periph,
897 		    xpt_path_path_id(periph->path),
898 		    xpt_path_target_id(periph->path),
899 		    xpt_path_lun_id(periph->path)) != CAM_REQ_CMP) {
900 			error = EINVAL;
901 			break;
902 		}
903 		if (*((int *)addr)) {
904 			ccb.cdbg.flags = CAM_DEBUG_PERIPH;
905 		} else {
906 			ccb.cdbg.flags = CAM_DEBUG_NONE;
907 		}
908 		xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path, 0);
909 		ccb.ccb_h.func_code = XPT_DEBUG;
910 		ccb.ccb_h.path_id = xpt_path_path_id(ccb.ccb_h.path);
911 		ccb.ccb_h.target_id = xpt_path_target_id(ccb.ccb_h.path);
912 		ccb.ccb_h.target_lun = xpt_path_lun_id(ccb.ccb_h.path);
913 		ccb.ccb_h.cbfcnp = targdone;
914 		xpt_action(&ccb);
915 		if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
916 			error = EIO;
917 		} else {
918 			error = 0;
919 		}
920 		xpt_free_path(ccb.ccb_h.path);
921 		break;
922 	}
923 #endif
924 	default:
925 		error = ENOTTY;
926 		break;
927 	}
928 	return (error);
929 }
930 
931 /*
932  * XXX JGibbs lifted from pass-thru driver.
933  * Generally, "ccb" should be the CCB supplied by the kernel.  "inccb"
934  * should be the CCB that is copied in from the user.
935  */
936 static int
937 targsendccb(struct cam_periph *periph, union ccb *ccb, union ccb *inccb)
938 {
939 	struct targ_softc *softc;
940 	struct cam_periph_map_info mapinfo;
941 	int error, need_unmap;
942 	int s;
943 
944 	softc = (struct targ_softc *)periph->softc;
945 
946 	need_unmap = 0;
947 
948 	/*
949 	 * There are some fields in the CCB header that need to be
950 	 * preserved, the rest we get from the user.
951 	 */
952 	xpt_merge_ccb(ccb, inccb);
953 
954 	/*
955 	 * There's no way for the user to have a completion
956 	 * function, so we put our own completion function in here.
957 	 */
958 	ccb->ccb_h.cbfcnp = targdone;
959 
960 	/*
961 	 * We only attempt to map the user memory into kernel space
962 	 * if they haven't passed in a physical memory pointer,
963 	 * and if there is actually an I/O operation to perform.
964 	 * Right now cam_periph_mapmem() only supports SCSI and device
965 	 * match CCBs.  For the SCSI CCBs, we only pass the CCB in if
966 	 * there's actually data to map.  cam_periph_mapmem() will do the
967 	 * right thing, even if there isn't data to map, but since CCBs
968 	 * without data are a reasonably common occurance (e.g. test unit
969 	 * ready), it will save a few cycles if we check for it here.
970 	 */
971 	if (((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0)
972 	 && (((ccb->ccb_h.func_code == XPT_CONT_TARGET_IO)
973 	    && ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE))
974 	  || (ccb->ccb_h.func_code == XPT_DEV_MATCH))) {
975 
976 		bzero(&mapinfo, sizeof(mapinfo));
977 
978 		error = cam_periph_mapmem(ccb, &mapinfo);
979 
980 		/*
981 		 * cam_periph_mapmem returned an error, we can't continue.
982 		 * Return the error to the user.
983 		 */
984 		if (error)
985 			return(error);
986 
987 		/*
988 		 * We successfully mapped the memory in, so we need to
989 		 * unmap it when the transaction is done.
990 		 */
991 		need_unmap = 1;
992 	}
993 
994 	/*
995 	 * Once queued on the pending CCB list, this CCB will be protected
996 	 * by the error recovery handling used for 'buffer I/O' ccbs.  Since
997 	 * we are in a process context here, however, the software interrupt
998 	 * for this driver may deliver an event invalidating this CCB just
999 	 * before we queue it.  Close this race condition by blocking
1000 	 * software interrupt delivery, checking for any pertinent queued
1001 	 * events, and only then queuing this CCB.
1002 	 */
1003 	s = splsoftcam();
1004 	if (softc->exceptions == 0) {
1005 		if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO)
1006 			TAILQ_INSERT_TAIL(&softc->pending_queue, &ccb->ccb_h,
1007 					  periph_links.tqe);
1008 
1009 		/*
1010 		 * If the user wants us to perform any error recovery,
1011 		 * then honor that request.  Otherwise, it's up to the
1012 		 * user to perform any error recovery.
1013 		 */
1014 		error = cam_periph_runccb(ccb, /* error handler */NULL,
1015 					  CAM_RETRY_SELTO, SF_RETRY_UA,
1016 					  &softc->device_stats);
1017 
1018 		if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO)
1019 			TAILQ_REMOVE(&softc->pending_queue, &ccb->ccb_h,
1020 				     periph_links.tqe);
1021 	} else {
1022 		ccb->ccb_h.status = CAM_UNACKED_EVENT;
1023 		error = 0;
1024 	}
1025 	splx(s);
1026 
1027 	if (need_unmap != 0)
1028 		cam_periph_unmapmem(ccb, &mapinfo);
1029 
1030 	ccb->ccb_h.cbfcnp = NULL;
1031 	ccb->ccb_h.periph_priv = inccb->ccb_h.periph_priv;
1032 	bcopy(ccb, inccb, sizeof(union ccb));
1033 
1034 	return(error);
1035 }
1036 
1037 
1038 static int
1039 targpoll(dev_t dev, int poll_events, struct proc *p)
1040 {
1041 	struct cam_periph *periph;
1042 	struct targ_softc *softc;
1043 	u_int  unit;
1044 	int    revents;
1045 	int    s;
1046 
1047 	unit = minor(dev);
1048 
1049 	/* ioctl is the only supported operation of the control device */
1050 	if (TARG_IS_CONTROL_DEV(unit))
1051 		return EINVAL;
1052 
1053 	periph = cam_extend_get(targperiphs, unit);
1054 	if (periph == NULL)
1055 		return (ENXIO);
1056 	softc = (struct targ_softc *)periph->softc;
1057 
1058 	revents = 0;
1059 	s = splcam();
1060 	if ((poll_events & (POLLOUT | POLLWRNORM)) != 0) {
1061 		if (TAILQ_FIRST(&softc->rcv_ccb_queue) != NULL
1062 		 && bioq_first(&softc->rcv_bio_queue) == NULL)
1063 			revents |= poll_events & (POLLOUT | POLLWRNORM);
1064 	}
1065 	if ((poll_events & (POLLIN | POLLRDNORM)) != 0) {
1066 		if (TAILQ_FIRST(&softc->snd_ccb_queue) != NULL
1067 		 && bioq_first(&softc->snd_bio_queue) == NULL)
1068 			revents |= poll_events & (POLLIN | POLLRDNORM);
1069 	}
1070 
1071 	if (softc->state != TARG_STATE_NORMAL)
1072 		revents |= POLLERR;
1073 
1074 	if (revents == 0) {
1075 		if (poll_events & (POLLOUT | POLLWRNORM))
1076 			selrecord(p, &softc->rcv_select);
1077 		if (poll_events & (POLLIN | POLLRDNORM))
1078 			selrecord(p, &softc->snd_select);
1079 	}
1080 	splx(s);
1081 	return (revents);
1082 }
1083 
1084 static int
1085 targread(dev_t dev, struct uio *uio, int ioflag)
1086 {
1087 	u_int  unit;
1088 
1089 	unit = minor(dev);
1090 	/* ioctl is the only supported operation of the control device */
1091 	if (TARG_IS_CONTROL_DEV(unit))
1092 		return EINVAL;
1093 
1094 	if (uio->uio_iovcnt == 0
1095 	 || uio->uio_iov->iov_len == 0) {
1096 		/* EOF */
1097 		struct cam_periph *periph;
1098 		struct targ_softc *softc;
1099 		int    s;
1100 
1101 		s = splcam();
1102 		periph = cam_extend_get(targperiphs, unit);
1103 		if (periph == NULL)
1104 			return (ENXIO);
1105 		softc = (struct targ_softc *)periph->softc;
1106 		softc->flags |= TARG_FLAG_SEND_EOF;
1107 		splx(s);
1108 		targrunqueue(periph, softc);
1109 		return (0);
1110 	}
1111 	return(physread(dev, uio, ioflag));
1112 }
1113 
1114 static int
1115 targwrite(dev_t dev, struct uio *uio, int ioflag)
1116 {
1117 	u_int  unit;
1118 
1119 	unit = minor(dev);
1120 	/* ioctl is the only supported operation of the control device */
1121 	if (TARG_IS_CONTROL_DEV(unit))
1122 		return EINVAL;
1123 
1124 	if (uio->uio_iovcnt == 0
1125 	 || uio->uio_iov->iov_len == 0) {
1126 		/* EOF */
1127 		struct cam_periph *periph;
1128 		struct targ_softc *softc;
1129 		int    s;
1130 
1131 		s = splcam();
1132 		periph = cam_extend_get(targperiphs, unit);
1133 		if (periph == NULL)
1134 			return (ENXIO);
1135 		softc = (struct targ_softc *)periph->softc;
1136 		softc->flags |= TARG_FLAG_RECEIVE_EOF;
1137 		splx(s);
1138 		targrunqueue(periph, softc);
1139 		return (0);
1140 	}
1141 	return(physwrite(dev, uio, ioflag));
1142 }
1143 
1144 /*
1145  * Actually translate the requested transfer into one the physical driver
1146  * can understand.  The transfer is described by a buf and will include
1147  * only one physical transfer.
1148  */
1149 static void
1150 targstrategy(struct bio *bp)
1151 {
1152 	struct cam_periph *periph;
1153 	struct targ_softc *softc;
1154 	u_int  unit;
1155 	int    s;
1156 
1157 	unit = minor(bp->bio_dev);
1158 	bp->bio_resid = bp->bio_bcount;
1159 
1160 	/* ioctl is the only supported operation of the control device */
1161 	if (TARG_IS_CONTROL_DEV(unit)) {
1162 		biofinish(bp, NULL, EINVAL);
1163 		return;
1164 	}
1165 
1166 	periph = cam_extend_get(targperiphs, unit);
1167 	if (periph == NULL) {
1168 		biofinish(bp, NULL, ENXIO);
1169 		return;
1170 	}
1171 	softc = (struct targ_softc *)periph->softc;
1172 
1173 	/*
1174 	 * Mask interrupts so that the device cannot be invalidated until
1175 	 * after we are in the queue.  Otherwise, we might not properly
1176 	 * clean up one of the buffers.
1177 	 */
1178 	s = splbio();
1179 
1180 	/*
1181 	 * If there is an exception pending, error out
1182 	 */
1183 	if (softc->state != TARG_STATE_NORMAL) {
1184 		splx(s);
1185 		if (softc->state == TARG_STATE_EXCEPTION
1186 		 && (softc->exceptions & TARG_EXCEPT_DEVICE_INVALID) == 0)
1187 			s = EBUSY;
1188 		else
1189 			s = ENXIO;
1190 		biofinish(bp, NULL, s);
1191 		return;
1192 	}
1193 
1194 	/*
1195 	 * Place it in the queue of buffers available for either
1196 	 * SEND or RECEIVE commands.
1197 	 *
1198 	 */
1199 	bp->bio_resid = bp->bio_bcount;
1200 	if (bp->bio_cmd == BIO_READ) {
1201 		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1202 			  ("Queued a SEND buffer\n"));
1203 		bioq_insert_tail(&softc->snd_bio_queue, bp);
1204 	} else {
1205 		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1206 			  ("Queued a RECEIVE buffer\n"));
1207 		bioq_insert_tail(&softc->rcv_bio_queue, bp);
1208 	}
1209 
1210 	splx(s);
1211 
1212 	/*
1213 	 * Attempt to use the new buffer to service any pending
1214 	 * target commands.
1215 	 */
1216 	targrunqueue(periph, softc);
1217 
1218 	return;
1219 }
1220 
1221 static void
1222 targrunqueue(struct cam_periph *periph, struct targ_softc *softc)
1223 {
1224 	struct  ccb_queue *pending_queue;
1225 	struct	ccb_accept_tio *atio;
1226 	struct	bio_queue_head *bioq;
1227 	struct	bio *bp;
1228 	struct	targ_cmd_desc *desc;
1229 	struct	ccb_hdr *ccbh;
1230 	int	s;
1231 
1232 	s = splbio();
1233 	pending_queue = NULL;
1234 	bioq = NULL;
1235 	ccbh = NULL;
1236 	/* Only run one request at a time to maintain data ordering. */
1237 	if (softc->state != TARG_STATE_NORMAL
1238 	 || TAILQ_FIRST(&softc->work_queue) != NULL
1239 	 || TAILQ_FIRST(&softc->pending_queue) != NULL) {
1240 		splx(s);
1241 		return;
1242 	}
1243 
1244 	if (((bp = bioq_first(&softc->snd_bio_queue)) != NULL
1245 	  || (softc->flags & TARG_FLAG_SEND_EOF) != 0)
1246 	 && (ccbh = TAILQ_FIRST(&softc->snd_ccb_queue)) != NULL) {
1247 
1248 		if (bp == NULL)
1249 			softc->flags &= ~TARG_FLAG_SEND_EOF;
1250 		else {
1251 			CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1252 				  ("De-Queued a SEND buffer %ld\n",
1253 				   bp->bio_bcount));
1254 		}
1255 		bioq = &softc->snd_bio_queue;
1256 		pending_queue = &softc->snd_ccb_queue;
1257 	} else if (((bp = bioq_first(&softc->rcv_bio_queue)) != NULL
1258 	  	 || (softc->flags & TARG_FLAG_RECEIVE_EOF) != 0)
1259 		&& (ccbh = TAILQ_FIRST(&softc->rcv_ccb_queue)) != NULL) {
1260 
1261 		if (bp == NULL)
1262 			softc->flags &= ~TARG_FLAG_RECEIVE_EOF;
1263 		else {
1264 			CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1265 				  ("De-Queued a RECEIVE buffer %ld\n",
1266 				   bp->bio_bcount));
1267 		}
1268 		bioq = &softc->rcv_bio_queue;
1269 		pending_queue = &softc->rcv_ccb_queue;
1270 	}
1271 
1272 	if (pending_queue != NULL) {
1273 		/* Process a request */
1274 		atio = (struct ccb_accept_tio *)ccbh;
1275 		TAILQ_REMOVE(pending_queue, ccbh, periph_links.tqe);
1276 		desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
1277 		desc->bp = bp;
1278 		if (bp == NULL) {
1279 			/* EOF */
1280 			desc->data = NULL;
1281 			desc->data_increment = 0;
1282 			desc->data_resid = 0;
1283 			atio->ccb_h.flags &= ~CAM_DIR_MASK;
1284 			atio->ccb_h.flags |= CAM_DIR_NONE;
1285 		} else {
1286 			bioq_remove(bioq, bp);
1287 			desc->data = &bp->bio_data[bp->bio_bcount - bp->bio_resid];
1288 			desc->data_increment =
1289 			    MIN(desc->data_resid, bp->bio_resid);
1290 		}
1291 		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1292 			  ("Buffer command: data %p: datacnt %d\n",
1293 			   desc->data, desc->data_increment));
1294 		TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h,
1295 				  periph_links.tqe);
1296 	}
1297 	atio = (struct ccb_accept_tio *)TAILQ_FIRST(&softc->work_queue);
1298 	if (atio != NULL) {
1299 		int priority;
1300 
1301 		priority = (atio->ccb_h.flags & CAM_DIS_DISCONNECT) ? 0 : 1;
1302 		splx(s);
1303 		xpt_schedule(periph, priority);
1304 	} else
1305 		splx(s);
1306 }
1307 
1308 static void
1309 targstart(struct cam_periph *periph, union ccb *start_ccb)
1310 {
1311 	struct targ_softc *softc;
1312 	struct ccb_hdr *ccbh;
1313 	struct ccb_accept_tio *atio;
1314 	struct targ_cmd_desc *desc;
1315 	struct ccb_scsiio *csio;
1316 	targ_ccb_flags flags;
1317 	int    s;
1318 
1319 	softc = (struct targ_softc *)periph->softc;
1320 
1321 	s = splbio();
1322 	ccbh = TAILQ_FIRST(&softc->work_queue);
1323 	if (periph->immediate_priority <= periph->pinfo.priority) {
1324 		start_ccb->ccb_h.ccb_flags = TARG_CCB_WAITING;
1325 		SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
1326 				  periph_links.sle);
1327 		periph->immediate_priority = CAM_PRIORITY_NONE;
1328 		splx(s);
1329 		wakeup(&periph->ccb_list);
1330 	} else if (ccbh == NULL) {
1331 		splx(s);
1332 		xpt_release_ccb(start_ccb);
1333 	} else {
1334 		TAILQ_REMOVE(&softc->work_queue, ccbh, periph_links.tqe);
1335 		splx(s);
1336 		atio = (struct ccb_accept_tio*)ccbh;
1337 		desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
1338 
1339 		/* Is this a tagged request? */
1340 		flags = atio->ccb_h.flags &
1341 		    (CAM_DIS_DISCONNECT|CAM_TAG_ACTION_VALID|CAM_DIR_MASK);
1342 
1343 		/*
1344 		 * If we are done with the transaction, tell the
1345 		 * controller to send status and perform a CMD_CMPLT.
1346 		 */
1347 		if (desc->data_resid == desc->data_increment)
1348 			flags |= CAM_SEND_STATUS;
1349 
1350 		csio = &start_ccb->csio;
1351 		cam_fill_ctio(csio,
1352 			      /*retries*/2,
1353 			      targdone,
1354 			      flags,
1355 			      (flags & CAM_TAG_ACTION_VALID)?
1356 				MSG_SIMPLE_Q_TAG : 0,
1357 			      atio->tag_id,
1358 			      atio->init_id,
1359 			      desc->status,
1360 			      /*data_ptr*/desc->data_increment == 0
1361 					  ? NULL : desc->data,
1362 			      /*dxfer_len*/desc->data_increment,
1363 			      /*timeout*/desc->timeout);
1364 
1365 		if ((flags & CAM_SEND_STATUS) != 0
1366 		 && (desc->status == SCSI_STATUS_CHECK_COND
1367 		  || desc->status == SCSI_STATUS_CMD_TERMINATED)) {
1368 			struct initiator_state *istate;
1369 
1370 			istate = &softc->istate[atio->init_id];
1371 			csio->sense_len = istate->sense_data.extra_len
1372 					+ offsetof(struct scsi_sense_data,
1373 						   extra_len);
1374 			bcopy(&istate->sense_data, &csio->sense_data,
1375 			      csio->sense_len);
1376 			csio->ccb_h.flags |= CAM_SEND_SENSE;
1377 		} else {
1378 			csio->sense_len = 0;
1379 		}
1380 
1381 		start_ccb->ccb_h.ccb_flags = TARG_CCB_NONE;
1382 		start_ccb->ccb_h.ccb_atio = atio;
1383 		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1384 			  ("Sending a CTIO (flags 0x%x)\n", csio->ccb_h.flags));
1385 		TAILQ_INSERT_TAIL(&softc->pending_queue, &csio->ccb_h,
1386 				  periph_links.tqe);
1387 		xpt_action(start_ccb);
1388 		/*
1389 		 * If the queue was frozen waiting for the response
1390 		 * to this ATIO (for instance disconnection was disallowed),
1391 		 * then release it now that our response has been queued.
1392 		 */
1393 		if ((atio->ccb_h.flags & CAM_DEV_QFRZN) != 0) {
1394 			cam_release_devq(periph->path,
1395 					 /*relsim_flags*/0,
1396 					 /*reduction*/0,
1397 					 /*timeout*/0,
1398 					 /*getcount_only*/0);
1399 			atio->ccb_h.flags &= ~CAM_DEV_QFRZN;
1400 		}
1401 		s = splbio();
1402 		ccbh = TAILQ_FIRST(&softc->work_queue);
1403 		splx(s);
1404 	}
1405 	if (ccbh != NULL)
1406 		targrunqueue(periph, softc);
1407 }
1408 
1409 static void
1410 targdone(struct cam_periph *periph, union ccb *done_ccb)
1411 {
1412 	struct targ_softc *softc;
1413 
1414 	softc = (struct targ_softc *)periph->softc;
1415 
1416 	if (done_ccb->ccb_h.ccb_flags == TARG_CCB_WAITING) {
1417 		/* Caller will release the CCB */
1418 		wakeup(&done_ccb->ccb_h.cbfcnp);
1419 		return;
1420 	}
1421 
1422 	CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1423 		("targdone %x\n", done_ccb->ccb_h.func_code));
1424 
1425 	switch (done_ccb->ccb_h.func_code) {
1426 	case XPT_ACCEPT_TARGET_IO:
1427 	{
1428 		struct ccb_accept_tio *atio;
1429 		struct targ_cmd_desc *descr;
1430 		struct initiator_state *istate;
1431 		u_int8_t *cdb;
1432 		int priority;
1433 
1434 		atio = &done_ccb->atio;
1435 		descr = (struct targ_cmd_desc*)atio->ccb_h.ccb_descr;
1436 		istate = &softc->istate[atio->init_id];
1437 		cdb = atio->cdb_io.cdb_bytes;
1438 		if (softc->state == TARG_STATE_TEARDOWN
1439 		 || atio->ccb_h.status == CAM_REQ_ABORTED) {
1440 			freedescr(descr);
1441 			free(done_ccb, M_DEVBUF);
1442 			return;
1443 		}
1444 
1445 #ifdef	CAMDEBUG
1446 		{
1447 			int i;
1448 			char dcb[128];
1449  			for (dcb[0] = 0, i = 0; i < atio->cdb_len; i++) {
1450 				snprintf(dcb, sizeof dcb,
1451 				    "%s %02x", dcb, cdb[i] & 0xff);
1452 			}
1453 			CAM_DEBUG(periph->path,
1454 			    CAM_DEBUG_PERIPH, ("cdb:%s\n", dcb));
1455 		}
1456 #endif
1457 		if (atio->sense_len != 0) {
1458 			CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1459 				  ("ATIO with sense_len\n"));
1460 
1461 			/*
1462 			 * We had an error in the reception of
1463 			 * this command.  Immediately issue a CA.
1464 			 */
1465 			atio->ccb_h.flags &= ~CAM_DIR_MASK;
1466 			atio->ccb_h.flags |= CAM_DIR_NONE;
1467 			descr->data_resid = 0;
1468 			descr->data_increment = 0;
1469 			descr->timeout = 5 * 1000;
1470 			descr->status = SCSI_STATUS_CHECK_COND;
1471 			copy_sense(softc, istate, (u_int8_t *)&atio->sense_data,
1472 				   atio->sense_len);
1473 			set_ca_condition(periph, atio->init_id, CA_CMD_SENSE);
1474 		} else if (istate->pending_ca == 0
1475 			&& istate->pending_ua != 0
1476 			&& cdb[0] != INQUIRY) {
1477 
1478 			CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1479 			    ("pending_ca %d pending_ua %d\n",
1480 			    istate->pending_ca, istate->pending_ua));
1481 
1482 			/* Pending UA, tell initiator */
1483 			/* Direction is always relative to the initator */
1484 			atio->ccb_h.flags &= ~CAM_DIR_MASK;
1485 			atio->ccb_h.flags |= CAM_DIR_NONE;
1486 			descr->data_resid = 0;
1487 			descr->data_increment = 0;
1488 			descr->timeout = 5 * 1000;
1489 			descr->status = SCSI_STATUS_CHECK_COND;
1490 			fill_sense(softc, atio->init_id,
1491 				   SSD_CURRENT_ERROR, SSD_KEY_UNIT_ATTENTION,
1492 				   0x29,
1493 				   istate->pending_ua == UA_POWER_ON ? 1 : 2);
1494 			set_ca_condition(periph, atio->init_id, CA_UNIT_ATTN);
1495 		} else {
1496 			/*
1497 			 * Save the current CA and UA status so
1498 			 * they can be used by this command.
1499 			 */
1500 			ua_types pending_ua;
1501 			ca_types pending_ca;
1502 
1503 			pending_ua = istate->pending_ua;
1504 			pending_ca = istate->pending_ca;
1505 
1506 			/*
1507 			 * As per the SCSI2 spec, any command that occurs
1508 			 * after a CA is reported, clears the CA.  We must
1509 			 * also clear the UA condition, if any, that caused
1510 			 * the CA to occur assuming the UA is not for a
1511 			 * persistant condition.
1512 			 */
1513 			istate->pending_ca = CA_NONE;
1514 			if (pending_ca == CA_UNIT_ATTN)
1515 				istate->pending_ua = UA_NONE;
1516 
1517 			/*
1518 			 * Determine the type of incoming command and
1519 			 * setup our buffer for a response.
1520 			 */
1521 			switch (cdb[0]) {
1522 			case INQUIRY:
1523 			{
1524 				struct scsi_inquiry *inq;
1525 				struct scsi_sense_data *sense;
1526 
1527 				inq = (struct scsi_inquiry *)cdb;
1528 				sense = &istate->sense_data;
1529 				descr->status = SCSI_STATUS_OK;
1530 				CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1531 					  ("Saw an inquiry!\n"));
1532 				/*
1533 				 * Validate the command.  We don't
1534 				 * support any VPD pages, so complain
1535 				 * if EVPD is set.
1536 				 */
1537 				if ((inq->byte2 & SI_EVPD) != 0
1538 				 || inq->page_code != 0) {
1539 					atio->ccb_h.flags &= ~CAM_DIR_MASK;
1540 					atio->ccb_h.flags |= CAM_DIR_NONE;
1541 					descr->data_resid = 0;
1542 					descr->data_increment = 0;
1543 					descr->timeout = 5 * 1000;
1544 					descr->status = SCSI_STATUS_CHECK_COND;
1545 					fill_sense(softc, atio->init_id,
1546 						   SSD_CURRENT_ERROR,
1547 						   SSD_KEY_ILLEGAL_REQUEST,
1548 						   /*asc*/0x24, /*ascq*/0x00);
1549 					sense->extra_len =
1550 						offsetof(struct scsi_sense_data,
1551 							 extra_bytes)
1552 					      - offsetof(struct scsi_sense_data,
1553 							 extra_len);
1554 					set_ca_condition(periph, atio->init_id,
1555 							 CA_CMD_SENSE);
1556 				}
1557 
1558 				if ((inq->byte2 & SI_EVPD) != 0) {
1559 					sense->sense_key_spec[0] =
1560 					    SSD_SCS_VALID|SSD_FIELDPTR_CMD
1561 					   |SSD_BITPTR_VALID| /*bit value*/1;
1562 					sense->sense_key_spec[1] = 0;
1563 					sense->sense_key_spec[2] =
1564 					    offsetof(struct scsi_inquiry,
1565 						     byte2);
1566 				} else if (inq->page_code != 0) {
1567 					sense->sense_key_spec[0] =
1568 					    SSD_SCS_VALID|SSD_FIELDPTR_CMD;
1569 					sense->sense_key_spec[1] = 0;
1570 					sense->sense_key_spec[2] =
1571 					    offsetof(struct scsi_inquiry,
1572 						     page_code);
1573 				}
1574 				if (descr->status == SCSI_STATUS_CHECK_COND)
1575 					break;
1576 
1577 				/*
1578 				 * Direction is always relative
1579 				 * to the initator.
1580 				 */
1581 				atio->ccb_h.flags &= ~CAM_DIR_MASK;
1582 				atio->ccb_h.flags |= CAM_DIR_IN;
1583 				descr->data = softc->inq_data;
1584 				descr->data_resid =
1585 				    MIN(softc->inq_data_len,
1586 					SCSI_CDB6_LEN(inq->length));
1587 				descr->data_increment = descr->data_resid;
1588 				descr->timeout = 5 * 1000;
1589 				break;
1590 			}
1591 			case TEST_UNIT_READY:
1592 				atio->ccb_h.flags &= ~CAM_DIR_MASK;
1593 				atio->ccb_h.flags |= CAM_DIR_NONE;
1594 				descr->data_resid = 0;
1595 				descr->data_increment = 0;
1596 				descr->timeout = 5 * 1000;
1597 				descr->status = SCSI_STATUS_OK;
1598 				break;
1599 			case REQUEST_SENSE:
1600 			{
1601 				struct scsi_request_sense *rsense;
1602 				struct scsi_sense_data *sense;
1603 
1604 				rsense = (struct scsi_request_sense *)cdb;
1605 				sense = &istate->sense_data;
1606 				if (pending_ca == 0) {
1607 					fill_sense(softc, atio->init_id,
1608 						   SSD_CURRENT_ERROR,
1609 						   SSD_KEY_NO_SENSE, 0x00,
1610 						   0x00);
1611 					CAM_DEBUG(periph->path,
1612 						  CAM_DEBUG_PERIPH,
1613 						  ("No pending CA!\n"));
1614 				}
1615 				/*
1616 				 * Direction is always relative
1617 				 * to the initator.
1618 				 */
1619 				atio->ccb_h.flags &= ~CAM_DIR_MASK;
1620 				atio->ccb_h.flags |= CAM_DIR_IN;
1621 				descr->data = sense;
1622 				descr->data_resid =
1623 			 		offsetof(struct scsi_sense_data,
1624 						 extra_len)
1625 				      + sense->extra_len;
1626 				descr->data_resid =
1627 				    MIN(descr->data_resid,
1628 					SCSI_CDB6_LEN(rsense->length));
1629 				descr->data_increment = descr->data_resid;
1630 				descr->timeout = 5 * 1000;
1631 				descr->status = SCSI_STATUS_OK;
1632 				break;
1633 			}
1634 			case RECEIVE:
1635 			case SEND:
1636 			{
1637 				struct scsi_send_receive *sr;
1638 
1639 				sr = (struct scsi_send_receive *)cdb;
1640 
1641 				/*
1642 				 * Direction is always relative
1643 				 * to the initator.
1644 				 */
1645 				atio->ccb_h.flags &= ~CAM_DIR_MASK;
1646 				descr->data_resid = scsi_3btoul(sr->xfer_len);
1647 				descr->timeout = 5 * 1000;
1648 				descr->status = SCSI_STATUS_OK;
1649 				if (cdb[0] == SEND) {
1650 					atio->ccb_h.flags |= CAM_DIR_OUT;
1651 					CAM_DEBUG(periph->path,
1652 						  CAM_DEBUG_PERIPH,
1653 						  ("Saw a SEND!\n"));
1654 					atio->ccb_h.flags |= CAM_DIR_OUT;
1655 					TAILQ_INSERT_TAIL(&softc->snd_ccb_queue,
1656 							  &atio->ccb_h,
1657 							  periph_links.tqe);
1658 					selwakeup(&softc->snd_select);
1659 				} else {
1660 					atio->ccb_h.flags |= CAM_DIR_IN;
1661 					CAM_DEBUG(periph->path,
1662 						  CAM_DEBUG_PERIPH,
1663 						  ("Saw a RECEIVE!\n"));
1664 					TAILQ_INSERT_TAIL(&softc->rcv_ccb_queue,
1665 							  &atio->ccb_h,
1666 							  periph_links.tqe);
1667 					selwakeup(&softc->rcv_select);
1668 				}
1669 				/*
1670 				 * Attempt to satisfy this request with
1671 				 * a user buffer.
1672 				 */
1673 				targrunqueue(periph, softc);
1674 				return;
1675 			}
1676 			default:
1677 				/*
1678 				 * Queue for consumption by our userland
1679 				 * counterpart and  transition to the exception
1680 				 * state.
1681 				 */
1682 				TAILQ_INSERT_TAIL(&softc->unknown_atio_queue,
1683 						  &atio->ccb_h,
1684 						  periph_links.tqe);
1685 				softc->exceptions |= TARG_EXCEPT_UNKNOWN_ATIO;
1686 				targfireexception(periph, softc);
1687 				return;
1688 			}
1689 		}
1690 
1691 		/* Queue us up to receive a Continue Target I/O ccb. */
1692 		if ((atio->ccb_h.flags & CAM_DIS_DISCONNECT) != 0) {
1693 			TAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h,
1694 					  periph_links.tqe);
1695 			priority = 0;
1696 		} else {
1697 			TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h,
1698 					  periph_links.tqe);
1699 			priority = 1;
1700 		}
1701 		xpt_schedule(periph, priority);
1702 		break;
1703 	}
1704 	case XPT_CONT_TARGET_IO:
1705 	{
1706 		struct ccb_scsiio *csio;
1707 		struct ccb_accept_tio *atio;
1708 		struct targ_cmd_desc *desc;
1709 		struct bio *bp;
1710 		int    error;
1711 
1712 		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1713 			  ("Received completed CTIO\n"));
1714 		csio = &done_ccb->csio;
1715 		atio = (struct ccb_accept_tio*)done_ccb->ccb_h.ccb_atio;
1716 		desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
1717 
1718 		TAILQ_REMOVE(&softc->pending_queue, &done_ccb->ccb_h,
1719 			     periph_links.tqe);
1720 
1721 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1722 			printf("CCB with error %x\n", done_ccb->ccb_h.status);
1723 			error = targerror(done_ccb, 0, 0);
1724 			if (error == ERESTART)
1725 				break;
1726 			/*
1727 			 * Right now we don't need to do anything
1728 			 * prior to unfreezing the queue...
1729 			 */
1730 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1731 				printf("Releasing Queue\n");
1732 				cam_release_devq(done_ccb->ccb_h.path,
1733 						 /*relsim_flags*/0,
1734 						 /*reduction*/0,
1735 						 /*timeout*/0,
1736 						 /*getcount_only*/0);
1737 			}
1738 		} else
1739 			error = 0;
1740 
1741 		/*
1742 		 * If we shipped back sense data when completing
1743 		 * this command, clear the pending CA for it.
1744 		 */
1745 		if (done_ccb->ccb_h.status & CAM_SENT_SENSE) {
1746 			struct initiator_state *istate;
1747 
1748 			istate = &softc->istate[csio->init_id];
1749 			if (istate->pending_ca == CA_UNIT_ATTN)
1750 				istate->pending_ua = UA_NONE;
1751 			istate->pending_ca = CA_NONE;
1752 			softc->istate[csio->init_id].pending_ca = CA_NONE;
1753 			done_ccb->ccb_h.status &= ~CAM_SENT_SENSE;
1754 			CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1755 				  ("Sent Sense\n"));
1756 		}
1757 		done_ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1758 
1759 		desc->data_increment -= csio->resid;
1760 		desc->data_resid -= desc->data_increment;
1761 		if ((bp = desc->bp) != NULL) {
1762 
1763 			bp->bio_resid -= desc->data_increment;
1764 			bp->bio_error = error;
1765 
1766 			CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1767 				  ("Buffer I/O Completed - Resid %ld:%d\n",
1768 				   bp->bio_resid, desc->data_resid));
1769 			/*
1770 			 * Send the buffer back to the client if
1771 			 * either the command has completed or all
1772 			 * buffer space has been consumed.
1773 			 */
1774 			if (desc->data_resid == 0
1775 			 || bp->bio_resid == 0
1776 			 || error != 0) {
1777 				if (bp->bio_resid != 0)
1778 					/* Short transfer */
1779 					bp->bio_flags |= BIO_ERROR;
1780 
1781 				CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1782 					  ("Completing a buffer\n"));
1783 				biodone(bp);
1784 				desc->bp = NULL;
1785 			}
1786 		}
1787 
1788 		xpt_release_ccb(done_ccb);
1789 		if (softc->state != TARG_STATE_TEARDOWN) {
1790 
1791 			if (desc->data_resid == 0) {
1792 				/*
1793 				 * Send the original accept TIO back to the
1794 				 * controller to handle more work.
1795 				 */
1796 				CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1797 					  ("Returning ATIO to target SIM\n"));
1798 				atio->ccb_h.ccb_flags = TARG_CCB_NONE;
1799 				xpt_action((union ccb *)atio);
1800 				break;
1801 			}
1802 
1803 			/* Queue us up for another buffer */
1804 			if (atio->cdb_io.cdb_bytes[0] == SEND) {
1805 				if (desc->bp != NULL)
1806 					TAILQ_INSERT_HEAD(
1807 						&softc->snd_bio_queue.queue,
1808 						bp, bio_queue);
1809 				TAILQ_INSERT_HEAD(&softc->snd_ccb_queue,
1810 						  &atio->ccb_h,
1811 						  periph_links.tqe);
1812 			} else {
1813 				if (desc->bp != NULL)
1814 					TAILQ_INSERT_HEAD(
1815 						&softc->rcv_bio_queue.queue,
1816 						bp, bio_queue);
1817 				TAILQ_INSERT_HEAD(&softc->rcv_ccb_queue,
1818 						  &atio->ccb_h,
1819 						  periph_links.tqe);
1820 			}
1821 			desc->bp = NULL;
1822 			targrunqueue(periph, softc);
1823 		} else {
1824 			if (desc->bp != NULL) {
1825 				bp->bio_flags |= BIO_ERROR;
1826 				bp->bio_error = ENXIO;
1827 				biodone(bp);
1828 			}
1829 			freedescr(desc);
1830 			free(atio, M_DEVBUF);
1831 		}
1832 		break;
1833 	}
1834 	case XPT_IMMED_NOTIFY:
1835 	{
1836 		int frozen;
1837 
1838 		frozen = (done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0;
1839 		if (softc->state == TARG_STATE_TEARDOWN) {
1840 			SLIST_REMOVE(&softc->immed_notify_slist,
1841 				     &done_ccb->ccb_h, ccb_hdr,
1842 				     periph_links.sle);
1843 			free(done_ccb, M_DEVBUF);
1844 		} else if (done_ccb->ccb_h.status == CAM_REQ_ABORTED) {
1845 			free(done_ccb, M_DEVBUF);
1846 		} else {
1847 			printf("Saw event %x:%x\n", done_ccb->ccb_h.status,
1848 			       done_ccb->cin.message_args[0]);
1849 			/* Process error condition. */
1850 			targinoterror(periph, softc, &done_ccb->cin);
1851 
1852 			/* Requeue for another immediate event */
1853 			xpt_action(done_ccb);
1854 		}
1855 		if (frozen != 0)
1856 			cam_release_devq(periph->path,
1857 					 /*relsim_flags*/0,
1858 					 /*opening reduction*/0,
1859 					 /*timeout*/0,
1860 					 /*getcount_only*/0);
1861 		break;
1862 	}
1863 	case XPT_DEBUG:
1864 		wakeup(&done_ccb->ccb_h.cbfcnp);
1865 		break;
1866 	default:
1867 		panic("targdone: Impossible xpt opcode %x encountered.",
1868 		      done_ccb->ccb_h.func_code);
1869 		/* NOTREACHED */
1870 		break;
1871 	}
1872 }
1873 
1874 /*
1875  * Transition to the exception state and notify our symbiotic
1876  * userland process of the change.
1877  */
1878 static void
1879 targfireexception(struct cam_periph *periph, struct targ_softc *softc)
1880 {
1881 	/*
1882 	 * return all pending buffers with short read/write status so our
1883 	 * process unblocks, and do a selwakeup on any process queued
1884 	 * waiting for reads or writes.  When the selwakeup is performed,
1885 	 * the waking process will wakeup, call our poll routine again,
1886 	 * and pick up the exception.
1887 	 */
1888 	struct bio *bp;
1889 
1890 	if (softc->state != TARG_STATE_NORMAL)
1891 		/* Already either tearing down or in exception state */
1892 		return;
1893 
1894 	softc->state = TARG_STATE_EXCEPTION;
1895 
1896 	while ((bp = bioq_first(&softc->snd_bio_queue)) != NULL) {
1897 		bioq_remove(&softc->snd_bio_queue, bp);
1898 		bp->bio_flags |= BIO_ERROR;
1899 		biodone(bp);
1900 	}
1901 
1902 	while ((bp = bioq_first(&softc->rcv_bio_queue)) != NULL) {
1903 		bioq_remove(&softc->snd_bio_queue, bp);
1904 		bp->bio_flags |= BIO_ERROR;
1905 		biodone(bp);
1906 	}
1907 
1908 	selwakeup(&softc->snd_select);
1909 	selwakeup(&softc->rcv_select);
1910 }
1911 
1912 static void
1913 targinoterror(struct cam_periph *periph, struct targ_softc *softc,
1914 	      struct ccb_immed_notify *inot)
1915 {
1916 	cam_status status;
1917 	int sense;
1918 
1919 	status = inot->ccb_h.status;
1920 	sense = (status & CAM_AUTOSNS_VALID) != 0;
1921 	status &= CAM_STATUS_MASK;
1922 	switch (status) {
1923 	case CAM_SCSI_BUS_RESET:
1924 		set_unit_attention_cond(periph, /*init_id*/CAM_TARGET_WILDCARD,
1925 					UA_BUS_RESET);
1926 		abort_pending_transactions(periph,
1927 					   /*init_id*/CAM_TARGET_WILDCARD,
1928 					   TARG_TAG_WILDCARD, EINTR,
1929 					   /*to_held_queue*/FALSE);
1930 		softc->exceptions |= TARG_EXCEPT_BUS_RESET_SEEN;
1931 		targfireexception(periph, softc);
1932 		break;
1933 	case CAM_BDR_SENT:
1934 		set_unit_attention_cond(periph, /*init_id*/CAM_TARGET_WILDCARD,
1935 					UA_BDR);
1936 		abort_pending_transactions(periph, CAM_TARGET_WILDCARD,
1937 					   TARG_TAG_WILDCARD, EINTR,
1938 					   /*to_held_queue*/FALSE);
1939 		softc->exceptions |= TARG_EXCEPT_BDR_RECEIVED;
1940 		targfireexception(periph, softc);
1941 		break;
1942 	case CAM_MESSAGE_RECV:
1943 		switch (inot->message_args[0]) {
1944 		case MSG_INITIATOR_DET_ERR:
1945 			break;
1946 		case MSG_ABORT:
1947 			break;
1948 		case MSG_BUS_DEV_RESET:
1949 			break;
1950 		case MSG_ABORT_TAG:
1951 			break;
1952 		case MSG_CLEAR_QUEUE:
1953 			break;
1954 		case MSG_TERM_IO_PROC:
1955 			break;
1956 		default:
1957 			break;
1958 		}
1959 		break;
1960 	default:
1961 		break;
1962 	}
1963 }
1964 
1965 static int
1966 targerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
1967 {
1968 	struct cam_periph *periph;
1969 	struct targ_softc *softc;
1970 	struct ccb_scsiio *csio;
1971 	struct initiator_state *istate;
1972 	cam_status status;
1973 	int frozen;
1974 	int sense;
1975 	int error;
1976 	int on_held_queue;
1977 
1978 	periph = xpt_path_periph(ccb->ccb_h.path);
1979 	softc = (struct targ_softc *)periph->softc;
1980 	status = ccb->ccb_h.status;
1981 	sense = (status & CAM_AUTOSNS_VALID) != 0;
1982 	frozen = (status & CAM_DEV_QFRZN) != 0;
1983 	status &= CAM_STATUS_MASK;
1984 	on_held_queue = FALSE;
1985 	csio = &ccb->csio;
1986 	istate = &softc->istate[csio->init_id];
1987 	switch (status) {
1988 	case CAM_REQ_ABORTED:
1989 		if ((ccb->ccb_h.ccb_flags & TARG_CCB_ABORT_TO_HELDQ) != 0) {
1990 
1991 			/*
1992 			 * Place this CCB into the initiators
1993 			 * 'held' queue until the pending CA is cleared.
1994 			 * If there is no CA pending, reissue immediately.
1995 			 */
1996 			if (istate->pending_ca == 0) {
1997 				ccb->ccb_h.ccb_flags = TARG_CCB_NONE;
1998 				xpt_action(ccb);
1999 			} else {
2000 				ccb->ccb_h.ccb_flags = TARG_CCB_HELDQ;
2001 				TAILQ_INSERT_TAIL(&softc->pending_queue,
2002 						  &ccb->ccb_h,
2003 						  periph_links.tqe);
2004 			}
2005 			/* The command will be retried at a later time. */
2006 			on_held_queue = TRUE;
2007 			error = ERESTART;
2008 			break;
2009 		}
2010 		/* FALLTHROUGH */
2011 	case CAM_SCSI_BUS_RESET:
2012 	case CAM_BDR_SENT:
2013 	case CAM_REQ_TERMIO:
2014 	case CAM_CMD_TIMEOUT:
2015 		/* Assume we did not send any data */
2016 		csio->resid = csio->dxfer_len;
2017 		error = EIO;
2018 		break;
2019 	case CAM_SEL_TIMEOUT:
2020 		if (ccb->ccb_h.retry_count > 0) {
2021 			ccb->ccb_h.retry_count--;
2022 			error = ERESTART;
2023 		} else {
2024 			/* "Select or reselect failure" */
2025 			csio->resid = csio->dxfer_len;
2026 			fill_sense(softc, csio->init_id, SSD_CURRENT_ERROR,
2027 				   SSD_KEY_HARDWARE_ERROR, 0x45, 0x00);
2028 			set_ca_condition(periph, csio->init_id, CA_CMD_SENSE);
2029 			error = EIO;
2030 		}
2031 		break;
2032 	case CAM_UNCOR_PARITY:
2033 		/* "SCSI parity error" */
2034 		fill_sense(softc, csio->init_id, SSD_CURRENT_ERROR,
2035 			   SSD_KEY_HARDWARE_ERROR, 0x47, 0x00);
2036 		set_ca_condition(periph, csio->init_id, CA_CMD_SENSE);
2037 		csio->resid = csio->dxfer_len;
2038 		error = EIO;
2039 		break;
2040 	case CAM_NO_HBA:
2041 		csio->resid = csio->dxfer_len;
2042 		error = ENXIO;
2043 		break;
2044 	case CAM_SEQUENCE_FAIL:
2045 		if (sense != 0) {
2046 			copy_sense(softc, istate, (u_int8_t *)&csio->sense_data,
2047 				   csio->sense_len);
2048 			set_ca_condition(periph, csio->init_id, CA_CMD_SENSE);
2049 		}
2050 		csio->resid = csio->dxfer_len;
2051 		error = EIO;
2052 		break;
2053 	case CAM_IDE:
2054 		/* "Initiator detected error message received" */
2055 		fill_sense(softc, csio->init_id, SSD_CURRENT_ERROR,
2056 			   SSD_KEY_HARDWARE_ERROR, 0x48, 0x00);
2057 		set_ca_condition(periph, csio->init_id, CA_CMD_SENSE);
2058 		csio->resid = csio->dxfer_len;
2059 		error = EIO;
2060 		break;
2061 	case CAM_REQUEUE_REQ:
2062 		printf("Requeue Request!\n");
2063 		error = ERESTART;
2064 		break;
2065 	default:
2066 		csio->resid = csio->dxfer_len;
2067 		error = EIO;
2068 		panic("targerror: Unexpected status %x encounterd", status);
2069 		/* NOTREACHED */
2070 	}
2071 
2072 	if (error == ERESTART || error == 0) {
2073 		/* Clear the QFRZN flag as we will release the queue */
2074 		if (frozen != 0)
2075 			ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
2076 
2077 		if (error == ERESTART && !on_held_queue)
2078 			xpt_action(ccb);
2079 
2080 		if (frozen != 0)
2081 			cam_release_devq(ccb->ccb_h.path,
2082 					 /*relsim_flags*/0,
2083 					 /*opening reduction*/0,
2084 					 /*timeout*/0,
2085 					 /*getcount_only*/0);
2086 	}
2087 	return (error);
2088 }
2089 
2090 static struct targ_cmd_desc*
2091 allocdescr()
2092 {
2093 	struct targ_cmd_desc* descr;
2094 
2095 	/* Allocate the targ_descr structure */
2096 	descr = (struct targ_cmd_desc *)malloc(sizeof(*descr),
2097 					       M_DEVBUF, M_NOWAIT);
2098 	if (descr == NULL)
2099 		return (NULL);
2100 
2101 	bzero(descr, sizeof(*descr));
2102 
2103 	/* Allocate buffer backing store */
2104 	descr->backing_store = malloc(MAX_BUF_SIZE, M_DEVBUF, M_NOWAIT);
2105 	if (descr->backing_store == NULL) {
2106 		free(descr, M_DEVBUF);
2107 		return (NULL);
2108 	}
2109 	descr->max_size = MAX_BUF_SIZE;
2110 	return (descr);
2111 }
2112 
2113 static void
2114 freedescr(struct targ_cmd_desc *descr)
2115 {
2116 	free(descr->backing_store, M_DEVBUF);
2117 	free(descr, M_DEVBUF);
2118 }
2119 
2120 static void
2121 fill_sense(struct targ_softc *softc, u_int initiator_id, u_int error_code,
2122 	   u_int sense_key, u_int asc, u_int ascq)
2123 {
2124 	struct initiator_state *istate;
2125 	struct scsi_sense_data *sense;
2126 
2127 	istate = &softc->istate[initiator_id];
2128 	sense = &istate->sense_data;
2129 	bzero(sense, sizeof(*sense));
2130 	sense->error_code = error_code;
2131 	sense->flags = sense_key;
2132 	sense->add_sense_code = asc;
2133 	sense->add_sense_code_qual = ascq;
2134 
2135 	sense->extra_len = offsetof(struct scsi_sense_data, fru)
2136 			 - offsetof(struct scsi_sense_data, extra_len);
2137 }
2138 
2139 static void
2140 copy_sense(struct targ_softc *softc, struct initiator_state *istate,
2141 	   u_int8_t *sense_buffer, size_t sense_len)
2142 {
2143 	struct scsi_sense_data *sense;
2144 	size_t copylen;
2145 
2146 	sense = &istate->sense_data;
2147 	copylen = sizeof(*sense);
2148 	if (copylen > sense_len)
2149 		copylen = sense_len;
2150 	bcopy(sense_buffer, sense, copylen);
2151 }
2152 
2153 static void
2154 set_unit_attention_cond(struct cam_periph *periph,
2155 			u_int initiator_id, ua_types ua)
2156 {
2157 	int start;
2158 	int end;
2159 	struct targ_softc *softc;
2160 
2161 	softc = (struct targ_softc *)periph->softc;
2162 	if (initiator_id == CAM_TARGET_WILDCARD) {
2163 		start = 0;
2164 		end = MAX_INITIATORS - 1;
2165 	} else
2166 		start = end = initiator_id;
2167 
2168 	while (start <= end) {
2169 		softc->istate[start].pending_ua = ua;
2170 		start++;
2171 	}
2172 }
2173 
2174 static void
2175 set_ca_condition(struct cam_periph *periph, u_int initiator_id, ca_types ca)
2176 {
2177 	struct targ_softc *softc;
2178 
2179 	softc = (struct targ_softc *)periph->softc;
2180 	softc->istate[initiator_id].pending_ca = ca;
2181 	abort_pending_transactions(periph, initiator_id, TARG_TAG_WILDCARD,
2182 				   /*errno*/0, /*to_held_queue*/TRUE);
2183 }
2184 
2185 static void
2186 abort_pending_transactions(struct cam_periph *periph, u_int initiator_id,
2187 			   u_int tag_id, int errno, int to_held_queue)
2188 {
2189 	struct ccb_abort cab;
2190 	struct ccb_queue *atio_queues[3];
2191 	struct targ_softc *softc;
2192 	struct ccb_hdr *ccbh;
2193 	u_int i;
2194 
2195 	softc = (struct targ_softc *)periph->softc;
2196 
2197 	atio_queues[0] = &softc->work_queue;
2198 	atio_queues[1] = &softc->snd_ccb_queue;
2199 	atio_queues[2] = &softc->rcv_ccb_queue;
2200 
2201 	/* First address the ATIOs awaiting resources */
2202 	for (i = 0; i < (sizeof(atio_queues) / sizeof(*atio_queues)); i++) {
2203 		struct ccb_queue *atio_queue;
2204 
2205 		if (to_held_queue) {
2206 			/*
2207 			 * The device queue is frozen anyway, so there
2208 			 * is nothing for us to do.
2209 			 */
2210 			continue;
2211 		}
2212 		atio_queue = atio_queues[i];
2213 		ccbh = TAILQ_FIRST(atio_queue);
2214 		while (ccbh != NULL) {
2215 			struct ccb_accept_tio *atio;
2216 			struct targ_cmd_desc *desc;
2217 
2218 			atio = (struct ccb_accept_tio *)ccbh;
2219 			desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
2220 			ccbh = TAILQ_NEXT(ccbh, periph_links.tqe);
2221 
2222 			/* Only abort the CCBs that match */
2223 			if ((atio->init_id != initiator_id
2224 			  && initiator_id != CAM_TARGET_WILDCARD)
2225 			 || (tag_id != TARG_TAG_WILDCARD
2226 			  && ((atio->ccb_h.flags & CAM_TAG_ACTION_VALID) == 0
2227 			   || atio->tag_id != tag_id)))
2228 				continue;
2229 
2230 			TAILQ_REMOVE(atio_queue, &atio->ccb_h,
2231 				     periph_links.tqe);
2232 
2233 			CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
2234 				  ("Aborting ATIO\n"));
2235 			if (desc->bp != NULL) {
2236 				desc->bp->bio_flags |= BIO_ERROR;
2237 				if (softc->state != TARG_STATE_TEARDOWN)
2238 					desc->bp->bio_error = errno;
2239 				else
2240 					desc->bp->bio_error = ENXIO;
2241 				biodone(desc->bp);
2242 				desc->bp = NULL;
2243 			}
2244 			if (softc->state == TARG_STATE_TEARDOWN) {
2245 				freedescr(desc);
2246 				free(atio, M_DEVBUF);
2247 			} else {
2248 				/* Return the ATIO back to the controller */
2249 				atio->ccb_h.ccb_flags = TARG_CCB_NONE;
2250 				xpt_action((union ccb *)atio);
2251 			}
2252 		}
2253 	}
2254 
2255 	ccbh = TAILQ_FIRST(&softc->pending_queue);
2256 	while (ccbh != NULL) {
2257 		struct ccb_scsiio *csio;
2258 
2259 		csio = (struct ccb_scsiio *)ccbh;
2260 		ccbh = TAILQ_NEXT(ccbh, periph_links.tqe);
2261 
2262 		/* Only abort the CCBs that match */
2263 		if ((csio->init_id != initiator_id
2264 		  && initiator_id != CAM_TARGET_WILDCARD)
2265 		 || (tag_id != TARG_TAG_WILDCARD
2266 		  && ((csio->ccb_h.flags & CAM_TAG_ACTION_VALID) == 0
2267 		   || csio->tag_id != tag_id)))
2268 			continue;
2269 
2270 		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
2271 			  ("Aborting CTIO\n"));
2272 
2273 		TAILQ_REMOVE(&softc->pending_queue, &csio->ccb_h,
2274 			     periph_links.tqe);
2275 
2276 		if (to_held_queue != 0)
2277 			csio->ccb_h.ccb_flags |= TARG_CCB_ABORT_TO_HELDQ;
2278 		xpt_setup_ccb(&cab.ccb_h, csio->ccb_h.path, /*priority*/1);
2279 		cab.abort_ccb = (union ccb *)csio;
2280 		xpt_action((union ccb *)&cab);
2281 		if (cab.ccb_h.status != CAM_REQ_CMP) {
2282 			xpt_print_path(cab.ccb_h.path);
2283 			printf("Unable to abort CCB.  Status %x\n",
2284 			       cab.ccb_h.status);
2285 		}
2286 	}
2287 }
2288