xref: /freebsd/sys/cam/scsi/scsi_target.c (revision 0640d357f29fb1c0daaaffadd0416c5981413afd)
1 /*
2  * Implementation of a simple Target Mode SCSI Proccessor Target driver for CAM.
3  *
4  * Copyright (c) 1998 Justin T. Gibbs.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification, immediately at the beginning of the file.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  *      $Id: scsi_target.c,v 1.2 1998/09/15 22:05:42 gibbs Exp $
29  */
30 #include <stddef.h>	/* For offsetof */
31 
32 #include <sys/param.h>
33 #include <sys/queue.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/types.h>
37 #include <sys/buf.h>
38 #include <sys/conf.h>
39 #include <sys/devicestat.h>
40 #include <sys/malloc.h>
41 #include <sys/poll.h>
42 #include <sys/select.h>	/* For struct selinfo. */
43 #include <sys/uio.h>
44 
45 #include <cam/cam.h>
46 #include <cam/cam_ccb.h>
47 #include <cam/cam_extend.h>
48 #include <cam/cam_periph.h>
49 #include <cam/cam_xpt_periph.h>
50 #include <cam/cam_debug.h>
51 
52 #include <cam/scsi/scsi_all.h>
53 #include <cam/scsi/scsi_pt.h>
54 #include <cam/scsi/scsi_targetio.h>
55 #include <cam/scsi/scsi_message.h>
56 
57 typedef enum {
58 	TARG_STATE_NORMAL,
59 	TARG_STATE_EXCEPTION,
60 	TARG_STATE_TEARDOWN
61 } targ_state;
62 
63 typedef enum {
64 	TARG_FLAG_NONE		 = 0x00,
65 	TARG_FLAG_SEND_EOF	 = 0x01,
66 	TARG_FLAG_RECEIVE_EOF	 = 0x02
67 } targ_flags;
68 
69 typedef enum {
70 	TARG_CCB_WORKQ,
71 	TARG_CCB_WAITING
72 } targ_ccb_types;
73 
74 #define MAX_ACCEPT	16
75 #define MAX_IMMEDIATE	16
76 #define MAX_BUF_SIZE	256	/* Max inquiry/sense/mode page transfer */
77 #define MAX_INITIATORS	16	/* XXX More for Fibre-Channel */
78 
79 #define MIN(a, b) ((a > b) ? b : a)
80 
81 /* Offsets into our private CCB area for storing accept information */
82 #define ccb_type	ppriv_field0
83 #define ccb_descr	ppriv_ptr1
84 
85 /* We stick a pointer to the originating accept TIO in each continue I/O CCB */
86 #define ccb_atio	ppriv_ptr1
87 
88 TAILQ_HEAD(ccb_queue, ccb_hdr);
89 
90 struct targ_softc {
91 	struct		ccb_queue pending_queue;
92 	struct		ccb_queue work_queue;
93 	struct		ccb_queue snd_ccb_queue;
94 	struct		ccb_queue rcv_ccb_queue;
95 	struct		ccb_queue unknown_atio_queue;
96 	struct		buf_queue_head snd_buf_queue;
97 	struct		buf_queue_head rcv_buf_queue;
98 	struct		devstat device_stats;
99 	struct		selinfo snd_select;
100 	struct		selinfo rcv_select;
101 	targ_state	state;
102 	targ_flags	flags;
103 	targ_exception	exceptions;
104 	u_int		init_level;
105 	u_int		inq_data_len;
106 	struct		scsi_inquiry_data *inq_data;
107 	struct		initiator_state istate[MAX_INITIATORS];
108 };
109 
110 struct targ_cmd_desc {
111 	SLIST_ENTRY(targ_cmd_desc) links;
112 	u_int	  data_resid;	/* How much left to transfer */
113 	u_int	  data_increment;/* Amount to send before next disconnect */
114 	void*	  data;		/* The data. Can be from backing_store or not */
115 	void*	  backing_store;/* Backing store allocated for this descriptor*/
116 	struct	  buf *bp;	/* Buffer for this transfer */
117 	u_int	  max_size;	/* Size of backing_store */
118 	u_int32_t timeout;
119 	u_int8_t  status;	/* Status to return to initiator */
120 };
121 
122 static	d_open_t	targopen;
123 static	d_close_t	targclose;
124 static	d_read_t	targread;
125 static	d_write_t	targwrite;
126 static	d_ioctl_t	targioctl;
127 static	d_poll_t	targpoll;
128 static	d_strategy_t	targstrategy;
129 
130 #define TARG_CDEV_MAJOR	65
131 static struct cdevsw targ_cdevsw = {
132 	/*d_open*/	targopen,
133 	/*d_close*/	targclose,
134 	/*d_read*/	targread,
135 	/*d_write*/	targwrite,
136 	/*d_ioctl*/	targioctl,
137 	/*d_stop*/	nostop,
138 	/*d_reset*/	noreset,
139 	/*d_devtotty*/	nodevtotty,
140 	/*d_poll*/	targpoll,
141 	/*d_mmap*/	nommap,
142 	/*d_strategy*/	targstrategy,
143 	/*d_name*/	"targ",
144 	/*d_spare*/	NULL,
145 	/*d_maj*/	-1,
146 	/*d_dump*/	nodump,
147 	/*d_psize*/	nopsize,
148 	/*d_flags*/	0,
149 	/*d_maxio*/	0,
150 	/*b_maj*/	-1
151 };
152 
153 static int		targsendccb(struct cam_periph *periph, union ccb *ccb,
154 				    union ccb *inccb);
155 static periph_init_t	targinit;
156 static void		targasync(void *callback_arg, u_int32_t code,
157 				struct cam_path *path, void *arg);
158 static periph_ctor_t	targctor;
159 static periph_dtor_t	targdtor;
160 static void		targrunqueue(struct cam_periph *periph,
161 				     struct targ_softc *softc);
162 static periph_start_t	targstart;
163 static void		targdone(struct cam_periph *periph,
164 				 union ccb *done_ccb);
165 static void		targfireexception(struct cam_periph *periph,
166 					  struct targ_softc *softc);
167 static  int		targerror(union ccb *ccb, u_int32_t cam_flags,
168 				  u_int32_t sense_flags);
169 static struct targ_cmd_desc*	allocdescr(void);
170 static void		freedescr(struct targ_cmd_desc *buf);
171 static void		fill_sense(struct scsi_sense_data *sense,
172 				   u_int error_code, u_int sense_key,
173 				   u_int asc, u_int ascq);
174 
175 static struct periph_driver targdriver =
176 {
177 	targinit, "targ",
178 	TAILQ_HEAD_INITIALIZER(targdriver.units), /* generation */ 0
179 };
180 
181 DATA_SET(periphdriver_set, targdriver);
182 
183 static struct extend_array *targperiphs;
184 
185 static void
186 targinit(void)
187 {
188 	cam_status status;
189 	struct cam_path *path;
190 
191 	/*
192 	 * Create our extend array for storing the devices we attach to.
193 	 */
194 	targperiphs = cam_extend_new();
195 	if (targperiphs == NULL) {
196 		printf("targ: Failed to alloc extend array!\n");
197 		return;
198 	}
199 
200 	/*
201 	 * Install a global async callback.  This callback will
202 	 * receive async callbacks like "new path registered".
203 	 */
204 	status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
205 				 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
206 
207 	if (status == CAM_REQ_CMP) {
208 		struct ccb_setasync csa;
209 
210 		xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5);
211 		csa.ccb_h.func_code = XPT_SASYNC_CB;
212 		csa.event_enable = AC_PATH_REGISTERED;
213 		csa.callback = targasync;
214 		csa.callback_arg = NULL;
215 		xpt_action((union ccb *)&csa);
216 		status = csa.ccb_h.status;
217 		xpt_free_path(path);
218         }
219 
220 	if (status != CAM_REQ_CMP) {
221 		printf("targ: Failed to attach master async callback "
222 		       "due to status 0x%x!\n", status);
223 	} else {
224 		/* If we were successfull, register our devsw */
225 		dev_t dev;
226 
227 		dev = makedev(TARG_CDEV_MAJOR, 0);
228 		cdevsw_add(&dev,&targ_cdevsw, NULL);
229 	}
230 }
231 
232 static void
233 targasync(void *callback_arg, u_int32_t code,
234 	  struct cam_path *path, void *arg)
235 {
236 	struct cam_periph *periph;
237 
238 	periph = (struct cam_periph *)callback_arg;
239 	switch (code) {
240 	case AC_PATH_REGISTERED:
241 	{
242 		struct ccb_pathinq *cpi;
243 		struct cam_path *new_path;
244 		cam_status status;
245 
246 		cpi = (struct ccb_pathinq *)arg;
247 
248 		/* Only attach to controllers that support target mode */
249 		if ((cpi->target_sprt & PIT_PROCESSOR) == 0)
250 			break;
251 
252 		/*
253 		 * Allocate a peripheral instance for
254 		 * this target instance.
255 		 */
256 		status = xpt_create_path(&new_path, NULL,
257 					 xpt_path_path_id(path),
258 					 cpi->initiator_id, /*lun*/0);
259 		if (status != CAM_REQ_CMP) {
260 			printf("targasync: Unable to create path "
261 				"due to status 0x%x\n", status);
262 			break;
263 		}
264 		status = cam_periph_alloc(targctor, NULL, targdtor, targstart,
265 					  "targ", CAM_PERIPH_BIO,
266 					  new_path, targasync,
267 					  AC_PATH_REGISTERED,
268 					  cpi);
269 		xpt_free_path(new_path);
270 		if (status != CAM_REQ_CMP
271 		 && status != CAM_REQ_INPROG)
272 			printf("targasync: Unable to attach to new device "
273 				"due to status 0x%x\n", status);
274 		break;
275 	}
276 	case AC_PATH_DEREGISTERED:
277 	{
278 		/* XXX Implement */
279 		break;
280 	}
281 	case AC_BUS_RESET:
282 	{
283 		/* Flush transaction queue */
284 	}
285 	default:
286 		break;
287 	}
288 }
289 
290 static cam_status
291 targctor(struct cam_periph *periph, void *arg)
292 {
293 	union ccb immed_ccb;
294 	struct targ_softc *softc;
295 	cam_status status;
296 	int i;
297 
298 	/* Allocate our per-instance private storage */
299 	softc = (struct targ_softc *)malloc(sizeof(*softc), M_DEVBUF, M_NOWAIT);
300 	if (softc == NULL) {
301 		printf("targctor: unable to malloc softc\n");
302 		return (CAM_REQ_CMP_ERR);
303 	}
304 
305 	bzero(softc, sizeof(softc));
306 	TAILQ_INIT(&softc->pending_queue);
307 	TAILQ_INIT(&softc->work_queue);
308 	TAILQ_INIT(&softc->snd_ccb_queue);
309 	TAILQ_INIT(&softc->rcv_ccb_queue);
310 	TAILQ_INIT(&softc->unknown_atio_queue);
311 	bufq_init(&softc->snd_buf_queue);
312 	bufq_init(&softc->rcv_buf_queue);
313 	softc->state = TARG_STATE_NORMAL;
314 	periph->softc = softc;
315 	softc->init_level++;
316 
317 	cam_extend_set(targperiphs, periph->unit_number, periph);
318 
319 	/*
320 	 * We start out life with a UA to indicate power-on/reset.
321 	 */
322 	for (i = 0; i < MAX_INITIATORS; i++)
323 		softc->istate[i].pending_ua = UA_POWER_ON;
324 
325 	/*
326 	 * Allocate an initial inquiry data buffer.  We might allow the
327 	 * user to override this later via an ioctl.
328 	 */
329 	softc->inq_data_len = sizeof(*softc->inq_data);
330 	softc->inq_data = malloc(softc->inq_data_len, M_DEVBUF, M_NOWAIT);
331 	if (softc->inq_data == NULL) {
332 		printf("targctor - Unable to malloc inquiry data\n");
333 		targdtor(periph);
334 	}
335 	bzero(softc->inq_data, softc->inq_data_len);
336 	softc->inq_data->device = T_PROCESSOR | (SID_QUAL_LU_CONNECTED << 5);
337 	softc->inq_data->version = 2;
338 	softc->inq_data->response_format = 2; /* SCSI2 Inquiry Format */
339 	softc->inq_data->additional_length = softc->inq_data_len - 4;
340 	strncpy(softc->inq_data->vendor, "FreeBSD ", SID_VENDOR_SIZE);
341 	strncpy(softc->inq_data->product, "TM-PT           ", SID_PRODUCT_SIZE);
342 	strncpy(softc->inq_data->revision, "0.0 ", SID_REVISION_SIZE);
343 	softc->init_level++;
344 
345 	/* Attempt to enable the lun of interrest */
346 	xpt_setup_ccb(&immed_ccb.ccb_h, periph->path, /*priority*/1);
347 	immed_ccb.ccb_h.func_code = XPT_EN_LUN;
348 
349 	/* Don't need support for any vendor specific commands */
350 	immed_ccb.cel.grp6_len = 0;
351 	immed_ccb.cel.grp7_len = 0;
352 	immed_ccb.cel.enable = 1;
353 	xpt_action(&immed_ccb);
354 	status = immed_ccb.ccb_h.status;
355 
356 	if (status != CAM_REQ_CMP) {
357 		xpt_print_path(periph->path);
358 		printf("targctor - Enable Lun Rejected for status 0x%x\n",
359 		       status);
360 		targdtor(periph);
361 		return (status);
362 	}
363 
364 	softc->init_level++;
365 
366 	/*
367 	 * Build up a buffer of accept target I/O
368 	 * operations for incoming selections.
369 	 */
370 	for (i = 0; i < MAX_ACCEPT; i++) {
371 		struct ccb_accept_tio *atio;
372 
373 		atio = (struct ccb_accept_tio*)malloc(sizeof(*atio), M_DEVBUF,
374 						      M_NOWAIT);
375 		if (atio == NULL) {
376 			status = CAM_RESRC_UNAVAIL;
377 			break;
378 		}
379 
380 		atio->ccb_h.ccb_descr = allocdescr();
381 
382 		if (atio->ccb_h.ccb_descr == NULL) {
383 			free(atio, M_DEVBUF);
384 			status = CAM_RESRC_UNAVAIL;
385 			break;
386 		}
387 
388 		xpt_setup_ccb(&atio->ccb_h, periph->path, /*priority*/1);
389 		atio->ccb_h.func_code = XPT_ACCEPT_TARGET_IO;
390 		atio->ccb_h.cbfcnp = targdone;
391 		xpt_action((union ccb *)atio);
392 		status = atio->ccb_h.status;
393 		if (status != CAM_REQ_INPROG) {
394 			free(atio, M_DEVBUF);
395 			break;
396 		}
397 	}
398 
399 	if (i == 0) {
400 		xpt_print_path(periph->path);
401 		printf("targctor - Could not allocate accept tio CCBs: "
402 		       "status = 0x%x\n", status);
403 		targdtor(periph);
404 		return (CAM_REQ_CMP_ERR);
405 	}
406 
407 	/*
408 	 * Build up a buffer of immediate notify CCBs
409 	 * so the SIM can tell us of asynchronous target mode events.
410 	 */
411 	for (i = 0; i < MAX_ACCEPT; i++) {
412 		struct ccb_immed_notify *inot;
413 
414 		inot = (struct ccb_immed_notify*)malloc(sizeof(*inot), M_DEVBUF,
415 						        M_NOWAIT);
416 
417 		if (inot == NULL) {
418 			status = CAM_RESRC_UNAVAIL;
419 			break;
420 		}
421 
422 		xpt_setup_ccb(&inot->ccb_h, periph->path, /*priority*/1);
423 		inot->ccb_h.func_code = XPT_IMMED_NOTIFY;
424 		inot->ccb_h.cbfcnp = targdone;
425 		xpt_action((union ccb *)inot);
426 		status = inot->ccb_h.status;
427 		if (status != CAM_REQ_INPROG) {
428 			free(inot, M_DEVBUF);
429 			break;
430 		}
431 	}
432 
433 	if (i == 0) {
434 		xpt_print_path(periph->path);
435 		printf("targctor - Could not allocate immediate notify CCBs: "
436 		       "status = 0x%x\n", status);
437 		targdtor(periph);
438 		return (CAM_REQ_CMP_ERR);
439 	}
440 
441 	return (CAM_REQ_CMP);
442 }
443 
444 static void
445 targdtor(struct cam_periph *periph)
446 {
447 	struct targ_softc *softc;
448 
449 	softc = (struct targ_softc *)periph->softc;
450 
451 	softc->state = TARG_STATE_TEARDOWN;
452 
453 	switch (softc->init_level) {
454 	default:
455 		/* FALLTHROUGH */
456 	case 3:
457 	{
458 		struct ccb_en_lun cel;
459 		/*
460 		 * XXX Spec requires abort of all ACCEPT and
461 		 * IMMEDIATE CCBS first.  Act accordingly.
462 		 */
463 		/*
464 		 * Dissable this lun.
465 		 */
466 		xpt_setup_ccb(&cel.ccb_h, periph->path, /*priority*/1);
467 		cel.ccb_h.func_code = XPT_EN_LUN;
468 		cel.enable = 0;
469 		xpt_action((union ccb *)&cel);
470 		/* FALLTHROUGH */
471 	}
472 	case 2:
473 		free(softc->inq_data, M_DEVBUF);
474 		/* FALLTHROUGH */
475 	case 1:
476 		free(softc, M_DEVBUF);
477 		break;
478 	case 0:
479 		panic("targdtor - impossible init level");;
480 	}
481 }
482 
483 static int
484 targopen(dev_t dev, int flags, int fmt, struct proc *p)
485 {
486 	struct cam_periph *periph;
487 	struct targ_softc *softc;
488 	u_int  unit;
489 	int    s;
490 
491 	unit = minor(dev);
492 	periph = cam_extend_get(targperiphs, unit);
493 	if (periph == NULL)
494 		return (ENXIO);
495 	softc = (struct targ_softc *)periph->softc;
496 
497 	return (0);
498 }
499 
500 static int
501 targclose(dev_t dev, int flag, int fmt, struct proc *p)
502 {
503 	struct cam_periph *periph;
504 	struct targ_softc *softc;
505 	u_int  unit;
506 	int    s;
507 
508 	unit = minor(dev);
509 	periph = cam_extend_get(targperiphs, unit);
510 	if (periph == NULL)
511 		return (ENXIO);
512 	softc = (struct targ_softc *)periph->softc;
513 
514 	return (0);
515 }
516 
517 static int
518 targioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
519 {
520 	struct cam_periph *periph;
521 	struct targ_softc *softc;
522 	u_int  unit;
523 	int    error;
524 	int    s;
525 
526 	unit = minor(dev);
527 	periph = cam_extend_get(targperiphs, unit);
528 	if (periph == NULL)
529 		return (ENXIO);
530 	softc = (struct targ_softc *)periph->softc;
531 	error = 0;
532 	switch (cmd) {
533 	case TARGIOCFETCHEXCEPTION:
534 		*((targ_exception *)addr) = softc->exceptions;
535 		break;
536 	case TARGIOCCLEAREXCEPTION:
537 	{
538 		targ_exception clear_mask;
539 
540 		clear_mask = *((targ_exception *)addr);
541 		if ((clear_mask & TARG_EXCEPT_UNKNOWN_ATIO) != 0) {
542 			struct ccb_hdr *ccbh;
543 
544 			ccbh = TAILQ_FIRST(&softc->unknown_atio_queue);
545 			if (ccbh != NULL) {
546 				TAILQ_REMOVE(&softc->unknown_atio_queue,
547 					     ccbh, periph_links.tqe);
548 				ccbh = TAILQ_FIRST(&softc->unknown_atio_queue);
549 			}
550 			if (ccbh != NULL)
551 				clear_mask &= ~TARG_EXCEPT_UNKNOWN_ATIO;
552 		}
553 		softc->exceptions &= ~clear_mask;
554 		if (softc->exceptions == TARG_EXCEPT_NONE
555 		 && softc->state == TARG_STATE_EXCEPTION) {
556 			softc->state = TARG_STATE_NORMAL;
557 			targrunqueue(periph, softc);
558 		}
559 		break;
560 	}
561 	case TARGIOCFETCHATIO:
562 	{
563 		struct ccb_hdr *ccbh;
564 
565 		ccbh = TAILQ_FIRST(&softc->unknown_atio_queue);
566 		if (ccbh != NULL) {
567 			bcopy(ccbh, addr, sizeof(struct ccb_accept_tio));
568 		} else {
569 			error = ENOENT;
570 		}
571 		break;
572 	}
573 	case TARGIOCCOMMAND:
574 	{
575 		union ccb *inccb;
576 		union ccb *ccb;
577 
578 		/*
579 		 * XXX JGibbs
580 		 * This code is lifted directly from the pass-thru driver.
581 		 * Perhaps this should be moved to a library????
582 		 */
583 		inccb = (union ccb *)addr;
584 		ccb = cam_periph_getccb(periph, inccb->ccb_h.pinfo.priority);
585 
586 		error = targsendccb(periph, ccb, inccb);
587 
588 		xpt_release_ccb(ccb);
589 
590 		break;
591 	}
592 	case TARGIOCGETISTATE:
593 	case TARGIOCSETISTATE:
594 	{
595 		struct ioc_initiator_state *ioc_istate;
596 
597 		ioc_istate = (struct ioc_initiator_state *)addr;
598 		if (ioc_istate->initiator_id > MAX_INITIATORS) {
599 			error = EINVAL;
600 			break;
601 		}
602 		xpt_print_path(periph->path);
603 		printf("GET/SETISTATE for %d\n", ioc_istate->initiator_id);
604 		if (cmd == TARGIOCGETISTATE) {
605 			bcopy(&softc->istate[ioc_istate->initiator_id],
606 			      &ioc_istate->istate, sizeof(ioc_istate->istate));
607 		} else {
608 			bcopy(&ioc_istate->istate,
609 			      &softc->istate[ioc_istate->initiator_id],
610 			      sizeof(ioc_istate->istate));
611 			xpt_print_path(periph->path);
612 			printf("pending_ca now %x\n",
613 			       softc->istate[ioc_istate->initiator_id].pending_ca);
614 		}
615 		break;
616 	}
617 	default:
618 		error = ENOTTY;
619 		break;
620 	}
621 	return (error);
622 }
623 
624 /*
625  * XXX JGibbs lifted from pass-thru driver.
626  * Generally, "ccb" should be the CCB supplied by the kernel.  "inccb"
627  * should be the CCB that is copied in from the user.
628  */
629 static int
630 targsendccb(struct cam_periph *periph, union ccb *ccb, union ccb *inccb)
631 {
632 	struct buf *bp[2];
633 	struct targ_softc *softc;
634 	struct cam_periph_map_info mapinfo;
635 	int error, need_unmap;
636 
637 	softc = (struct targ_softc *)periph->softc;
638 
639 	need_unmap = 0;
640 
641 	/*
642 	 * There are some fields in the CCB header that need to be
643 	 * preserved, the rest we get from the user.
644 	 */
645 	xpt_merge_ccb(ccb, inccb);
646 
647 	/*
648 	 * There's no way for the user to have a completion
649 	 * function, so we put our own completion function in here.
650 	 */
651 	ccb->ccb_h.cbfcnp = targdone;
652 
653 	/*
654 	 * We only attempt to map the user memory into kernel space
655 	 * if they haven't passed in a physical memory pointer,
656 	 * and if there is actually an I/O operation to perform.
657 	 * Right now cam_periph_mapmem() only supports SCSI and device
658 	 * match CCBs.  For the SCSI CCBs, we only pass the CCB in if
659 	 * there's actually data to map.  cam_periph_mapmem() will do the
660 	 * right thing, even if there isn't data to map, but since CCBs
661 	 * without data are a reasonably common occurance (e.g. test unit
662 	 * ready), it will save a few cycles if we check for it here.
663 	 */
664 	if (((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0)
665 	 && (((ccb->ccb_h.func_code == XPT_CONT_TARGET_IO)
666 	    && ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE))
667 	  || (ccb->ccb_h.func_code == XPT_DEV_MATCH))) {
668 
669 		bzero(&mapinfo, sizeof(mapinfo));
670 
671 		error = cam_periph_mapmem(ccb, &mapinfo);
672 
673 		/*
674 		 * cam_periph_mapmem returned an error, we can't continue.
675 		 * Return the error to the user.
676 		 */
677 		if (error)
678 			return(error);
679 
680 		/*
681 		 * We successfully mapped the memory in, so we need to
682 		 * unmap it when the transaction is done.
683 		 */
684 		need_unmap = 1;
685 	}
686 
687 	/*
688 	 * If the user wants us to perform any error recovery, then honor
689 	 * that request.  Otherwise, it's up to the user to perform any
690 	 * error recovery.
691 	 */
692 	error = cam_periph_runccb(ccb,
693 				  (ccb->ccb_h.flags & CAM_PASS_ERR_RECOVER) ?
694 				  targerror : NULL,
695 				  /* cam_flags */ 0,
696 				  /* sense_flags */SF_RETRY_UA,
697 				  &softc->device_stats);
698 
699 	if (need_unmap != 0)
700 		cam_periph_unmapmem(ccb, &mapinfo);
701 
702 	ccb->ccb_h.cbfcnp = NULL;
703 	ccb->ccb_h.periph_priv = inccb->ccb_h.periph_priv;
704 	bcopy(ccb, inccb, sizeof(union ccb));
705 
706 	return(error);
707 }
708 
709 
710 static int
711 targpoll(dev_t dev, int poll_events, struct proc *p)
712 {
713 	struct cam_periph *periph;
714 	struct targ_softc *softc;
715 	u_int  unit;
716 	int    revents;
717 	int    s;
718 
719 	unit = minor(dev);
720 	periph = cam_extend_get(targperiphs, unit);
721 	if (periph == NULL)
722 		return (ENXIO);
723 	softc = (struct targ_softc *)periph->softc;
724 
725 	revents = 0;
726 	s = splcam();
727 	if ((poll_events & (POLLOUT | POLLWRNORM)) != 0) {
728 		if (TAILQ_FIRST(&softc->rcv_ccb_queue) != NULL
729 		 && bufq_first(&softc->rcv_buf_queue) == NULL)
730 			revents |= poll_events & (POLLOUT | POLLWRNORM);
731 	}
732 	if ((poll_events & (POLLIN | POLLRDNORM)) != 0) {
733 		if (TAILQ_FIRST(&softc->snd_ccb_queue) != NULL
734 		 && bufq_first(&softc->snd_buf_queue) == NULL)
735 			revents |= poll_events & (POLLIN | POLLRDNORM);
736 	}
737 
738 	if (softc->state != TARG_STATE_NORMAL)
739 		revents |= POLLERR;
740 
741 	if (revents == 0) {
742 		if (poll_events & (POLLOUT | POLLWRNORM))
743 			selrecord(p, &softc->rcv_select);
744 		if (poll_events & (POLLIN | POLLRDNORM))
745 			selrecord(p, &softc->snd_select);
746 	}
747 	splx(s);
748 	return (revents);
749 }
750 
751 static int
752 targread(dev_t dev, struct uio *uio, int ioflag)
753 {
754 	if (uio->uio_iovcnt == 0
755 	 || uio->uio_iov->iov_len == 0) {
756 		/* EOF */
757 		struct cam_periph *periph;
758 		struct targ_softc *softc;
759 		u_int  unit;
760 		int    s;
761 
762 		s = splcam();
763 		unit = minor(dev);
764 		periph = cam_extend_get(targperiphs, unit);
765 		if (periph == NULL)
766 			return (ENXIO);
767 		softc = (struct targ_softc *)periph->softc;
768 		softc->flags |= TARG_FLAG_SEND_EOF;
769 		splx(s);
770 		targrunqueue(periph, softc);
771 		return (0);
772 	}
773 	return(physio(targstrategy, NULL, dev, 1, minphys, uio));
774 }
775 
776 static int
777 targwrite(dev_t dev, struct uio *uio, int ioflag)
778 {
779 	if (uio->uio_iovcnt == 0
780 	 || uio->uio_iov->iov_len == 0) {
781 		/* EOF */
782 		struct cam_periph *periph;
783 		struct targ_softc *softc;
784 		u_int  unit;
785 		int    s;
786 
787 		s = splcam();
788 		unit = minor(dev);
789 		periph = cam_extend_get(targperiphs, unit);
790 		if (periph == NULL)
791 			return (ENXIO);
792 		softc = (struct targ_softc *)periph->softc;
793 		softc->flags |= TARG_FLAG_RECEIVE_EOF;
794 		splx(s);
795 		targrunqueue(periph, softc);
796 		return (0);
797 	}
798 	return(physio(targstrategy, NULL, dev, 0, minphys, uio));
799 }
800 
801 /*
802  * Actually translate the requested transfer into one the physical driver
803  * can understand.  The transfer is described by a buf and will include
804  * only one physical transfer.
805  */
806 static void
807 targstrategy(struct buf *bp)
808 {
809 	struct cam_periph *periph;
810 	struct targ_softc *softc;
811 	u_int  unit;
812 	int    s;
813 
814 	unit = minor(bp->b_dev);
815 	periph = cam_extend_get(targperiphs, unit);
816 	if (periph == NULL) {
817 		bp->b_error = ENXIO;
818 		goto bad;
819 	}
820 	softc = (struct targ_softc *)periph->softc;
821 
822 	/*
823 	 * Mask interrupts so that the device cannot be invalidated until
824 	 * after we are in the queue.  Otherwise, we might not properly
825 	 * clean up one of the buffers.
826 	 */
827 	s = splbio();
828 
829 	/*
830 	 * If there is an exception pending, error out
831 	 */
832 	if (softc->state != TARG_STATE_NORMAL) {
833 		splx(s);
834 		if (softc->state == TARG_STATE_EXCEPTION
835 		 && (softc->exceptions & TARG_EXCEPT_DEVICE_INVALID) == 0)
836 			bp->b_error = EBUSY;
837 		else
838 			bp->b_error = ENXIO;
839 		goto bad;
840 	}
841 
842 	/*
843 	 * Place it in the queue of buffers available for either
844 	 * SEND or RECEIVE commands.
845 	 *
846 	 */
847 	bp->b_resid = bp->b_bcount;
848 	if ((bp->b_flags & B_READ) != 0) {
849 		xpt_print_path(periph->path);
850 		printf("Queued a SEND buffer\n");
851 		bufq_insert_tail(&softc->snd_buf_queue, bp);
852 	} else {
853 		xpt_print_path(periph->path);
854 		printf("Queued a RECEIVE buffer\n");
855 		bufq_insert_tail(&softc->rcv_buf_queue, bp);
856 	}
857 
858 	splx(s);
859 
860 	/*
861 	 * Attempt to use the new buffer to service any pending
862 	 * target commands.
863 	 */
864 	targrunqueue(periph, softc);
865 
866 	return;
867 bad:
868 	bp->b_flags |= B_ERROR;
869 
870 	/*
871 	 * Correctly set the buf to indicate a completed xfer
872 	 */
873 	bp->b_resid = bp->b_bcount;
874 	biodone(bp);
875 }
876 
877 static void
878 targrunqueue(struct cam_periph *periph, struct targ_softc *softc)
879 {
880 	struct  ccb_queue *pending_queue;
881 	struct	ccb_accept_tio *atio;
882 	struct	buf_queue_head *bufq;
883 	struct	buf *bp;
884 	struct	targ_cmd_desc *desc;
885 	struct	ccb_hdr *ccbh;
886 	int	added;
887 	int	s;
888 
889 	s = splbio();
890 	pending_queue = NULL;
891 	bufq = NULL;
892 	ccbh = NULL;
893 	/* Only run one request at a time to maintain data ordering. */
894 	if (softc->state != TARG_STATE_NORMAL
895 	 || TAILQ_FIRST(&softc->work_queue) != NULL
896 	 || TAILQ_FIRST(&softc->pending_queue) != NULL) {
897 		splx(s);
898 		return;
899 	}
900 
901 	if (((bp = bufq_first(&softc->snd_buf_queue)) != NULL
902 	  || (softc->flags & TARG_FLAG_SEND_EOF) != 0)
903 	 && (ccbh = TAILQ_FIRST(&softc->snd_ccb_queue)) != NULL) {
904 
905 		if (bp == NULL)
906 			softc->flags &= ~TARG_FLAG_SEND_EOF;
907 		else {
908 			xpt_print_path(periph->path);
909 			printf("De-Queued a SEND buffer %ld\n",
910 			       bp->b_bcount);
911 		}
912 		bufq = &softc->snd_buf_queue;
913 		pending_queue = &softc->snd_ccb_queue;
914 	} else if (((bp = bufq_first(&softc->rcv_buf_queue)) != NULL
915 	  	 || (softc->flags & TARG_FLAG_RECEIVE_EOF) != 0)
916 		&& (ccbh = TAILQ_FIRST(&softc->rcv_ccb_queue)) != NULL) {
917 
918 		if (bp == NULL)
919 			softc->flags &= ~TARG_FLAG_RECEIVE_EOF;
920 		else {
921 			xpt_print_path(periph->path);
922 			printf("De-Queued a RECEIVE buffer %ld\n",
923 			       bp->b_bcount);
924 		}
925 		bufq = &softc->rcv_buf_queue;
926 		pending_queue = &softc->rcv_ccb_queue;
927 	}
928 
929 	if (pending_queue != NULL) {
930 		/* Process a request */
931 		atio = (struct ccb_accept_tio *)ccbh;
932 		TAILQ_REMOVE(pending_queue, ccbh, periph_links.tqe);
933 		desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
934 		desc->bp = bp;
935 		if (bp == NULL) {
936 			/* EOF */
937 			desc->data = NULL;
938 			desc->data_increment = 0;
939 			desc->data_resid = 0;
940 			atio->ccb_h.flags &= ~CAM_DIR_MASK;
941 			atio->ccb_h.flags |= CAM_DIR_NONE;
942 		} else {
943 			bufq_remove(bufq, bp);
944 			desc->data = &bp->b_data[bp->b_bcount - bp->b_resid];
945 			desc->data_increment =
946 			    MIN(desc->data_resid, bp->b_resid);
947 		}
948 		xpt_print_path(periph->path);
949 		printf("Buffer command: data %x: datacnt %d\n",
950 		       (intptr_t)desc->data, desc->data_increment);
951 		TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h,
952 				  periph_links.tqe);
953 	}
954 	if (TAILQ_FIRST(&softc->work_queue) != NULL) {
955 		splx(s);
956 		xpt_schedule(periph, /*XXX priority*/1);
957 	} else
958 		splx(s);
959 }
960 
961 static void
962 targstart(struct cam_periph *periph, union ccb *start_ccb)
963 {
964 	struct targ_softc *softc;
965 	struct ccb_hdr *ccbh;
966 	struct ccb_accept_tio *atio;
967 	struct targ_cmd_desc *desc;
968 	struct ccb_scsiio *csio;
969 	ccb_flags flags;
970 	int    s;
971 
972 	softc = (struct targ_softc *)periph->softc;
973 
974 	s = splbio();
975 	ccbh = TAILQ_FIRST(&softc->work_queue);
976 	if (periph->immediate_priority <= periph->pinfo.priority) {
977 		start_ccb->ccb_h.ccb_type = TARG_CCB_WAITING;
978 		SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
979 				  periph_links.sle);
980 		periph->immediate_priority = CAM_PRIORITY_NONE;
981 		splx(s);
982 		wakeup(&periph->ccb_list);
983 	} else if (ccbh == NULL) {
984 		splx(s);
985 		xpt_release_ccb(start_ccb);
986 	} else {
987 		TAILQ_REMOVE(&softc->work_queue, ccbh, periph_links.tqe);
988 		TAILQ_INSERT_HEAD(&softc->pending_queue, ccbh,
989 				  periph_links.tqe);
990 		splx(s);
991 		atio = (struct ccb_accept_tio*)ccbh;
992 		desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
993 
994 		/* Is this a tagged request? */
995 		flags = atio->ccb_h.flags & (CAM_TAG_ACTION_VALID|CAM_DIR_MASK);
996 
997 		/*
998 		 * If we are done with the transaction, tell the
999 		 * controller to send status and perform a CMD_CMPLT.
1000 		 */
1001 		if (desc->data_resid == desc->data_increment)
1002 			flags |= CAM_SEND_STATUS;
1003 
1004 		csio = &start_ccb->csio;
1005 		cam_fill_ctio(csio,
1006 			      /*retries*/2,
1007 			      targdone,
1008 			      flags,
1009 			      /*tag_action*/MSG_SIMPLE_Q_TAG,
1010 			      atio->tag_id,
1011 			      atio->init_id,
1012 			      desc->status,
1013 			      /*data_ptr*/desc->data_increment == 0
1014 					  ? NULL : desc->data,
1015 			      /*dxfer_len*/desc->data_increment,
1016 			      /*timeout*/desc->timeout);
1017 
1018 		start_ccb->ccb_h.ccb_type = TARG_CCB_WORKQ;
1019 		start_ccb->ccb_h.ccb_atio = atio;
1020 		xpt_print_path(periph->path);
1021 		printf("Sending a CTIO\n");
1022 		xpt_action(start_ccb);
1023 		s = splbio();
1024 		ccbh = TAILQ_FIRST(&softc->work_queue);
1025 		splx(s);
1026 	}
1027 	if (ccbh != NULL)
1028 		targrunqueue(periph, softc);
1029 }
1030 
1031 static void
1032 targdone(struct cam_periph *periph, union ccb *done_ccb)
1033 {
1034 	struct targ_softc *softc;
1035 
1036 	softc = (struct targ_softc *)periph->softc;
1037 
1038 	if (done_ccb->ccb_h.ccb_type == TARG_CCB_WAITING) {
1039 		/* Caller will release the CCB */
1040 		wakeup(&done_ccb->ccb_h.cbfcnp);
1041 		return;
1042 	}
1043 
1044 	switch (done_ccb->ccb_h.func_code) {
1045 	case XPT_ACCEPT_TARGET_IO:
1046 	{
1047 		struct ccb_accept_tio *atio;
1048 		struct targ_cmd_desc *descr;
1049 		struct initiator_state *istate;
1050 		u_int8_t *cdb;
1051 
1052 		atio = &done_ccb->atio;
1053 		descr = (struct targ_cmd_desc*)atio->ccb_h.ccb_descr;
1054 		istate = &softc->istate[atio->init_id];
1055 		cdb = atio->cdb_io.cdb_bytes;
1056 		if (softc->state == TARG_STATE_TEARDOWN) {
1057 			freedescr(descr);
1058 			free(done_ccb, M_DEVBUF);
1059 			return;
1060 		}
1061 
1062 		if (istate->pending_ca == 0
1063 		 && istate->pending_ua != 0
1064 		 && cdb[0] != INQUIRY) {
1065 			/* Pending UA, tell initiator */
1066 			/* Direction is always relative to the initator */
1067 			istate->pending_ca = CA_UNIT_ATTN;
1068 			atio->ccb_h.flags &= ~CAM_DIR_MASK;
1069 			atio->ccb_h.flags |= CAM_DIR_NONE;
1070 			descr->data_resid = 0;
1071 			descr->data_increment = 0;
1072 			descr->timeout = 5 * 1000;
1073 			descr->status = SCSI_STATUS_CHECK_COND;
1074 		} else {
1075 			/*
1076 			 * Save the current CA and UA status so
1077 			 * they can be used by this command.
1078 			 */
1079 			ua_types pending_ua;
1080 			ca_types pending_ca;
1081 
1082 			pending_ua = istate->pending_ua;
1083 			pending_ca = istate->pending_ca;
1084 
1085 			/*
1086 			 * As per the SCSI2 spec, any command that occurs
1087 			 * after a CA is reported, clears the CA.  If the
1088 			 * command is not an inquiry, we are also supposed
1089 			 * to clear the UA condition, if any, that caused
1090 			 * the CA to occur assuming the UA is not a
1091 			 * persistant state.
1092 			 */
1093 			istate->pending_ca = CA_NONE;
1094 			if ((pending_ca
1095 			   & (CA_CMD_SENSE|CA_UNIT_ATTN)) == CA_UNIT_ATTN
1096 			 && cdb[0] != INQUIRY)
1097 				istate->pending_ua = UA_NONE;
1098 
1099 			/*
1100 			 * Determine the type of incoming command and
1101 			 * setup our buffer for a response.
1102 			 */
1103 			switch (cdb[0]) {
1104 			case INQUIRY:
1105 			{
1106 				struct scsi_inquiry *inq;
1107 				struct scsi_sense_data *sense;
1108 
1109 				inq = (struct scsi_inquiry *)cdb;
1110 				sense = &istate->sense_data;
1111 				xpt_print_path(periph->path);
1112 				printf("Saw an inquiry!\n");
1113 				/*
1114 				 * Validate the command.  We don't
1115 				 * support any VPD pages, so complain
1116 				 * if EVPD is set.
1117 				 */
1118 				if ((inq->byte2 & SI_EVPD) != 0
1119 				 || inq->page_code != 0) {
1120 					istate->pending_ca = CA_CMD_SENSE;
1121 					atio->ccb_h.flags &= ~CAM_DIR_MASK;
1122 					atio->ccb_h.flags |= CAM_DIR_NONE;
1123 					descr->data_resid = 0;
1124 					descr->data_increment = 0;
1125 					descr->status = SCSI_STATUS_CHECK_COND;
1126 					fill_sense(sense,
1127 						   SSD_CURRENT_ERROR,
1128 						   SSD_KEY_ILLEGAL_REQUEST,
1129 						   /*asc*/0x24, /*ascq*/0x00);
1130 					sense->extra_len =
1131 						offsetof(struct scsi_sense_data,
1132 							 extra_bytes)
1133 					      - offsetof(struct scsi_sense_data,
1134 							 extra_len);
1135 				}
1136 
1137 				if ((inq->byte2 & SI_EVPD) != 0) {
1138 					sense->sense_key_spec[0] =
1139 					    SSD_SCS_VALID|SSD_FIELDPTR_CMD
1140 					   |SSD_BITPTR_VALID| /*bit value*/1;
1141 					sense->sense_key_spec[1] = 0;
1142 					sense->sense_key_spec[2] =
1143 					    offsetof(struct scsi_inquiry,
1144 						     byte2);
1145 					break;
1146 				} else if (inq->page_code != 0) {
1147 					sense->sense_key_spec[0] =
1148 					    SSD_SCS_VALID|SSD_FIELDPTR_CMD;
1149 					sense->sense_key_spec[1] = 0;
1150 					sense->sense_key_spec[2] =
1151 					    offsetof(struct scsi_inquiry,
1152 						     page_code);
1153 					break;
1154 				}
1155 				/*
1156 				 * Direction is always relative
1157 				 * to the initator.
1158 				 */
1159 				atio->ccb_h.flags &= ~CAM_DIR_MASK;
1160 				atio->ccb_h.flags |= CAM_DIR_IN;
1161 				descr->data = softc->inq_data;
1162 				descr->data_resid = MIN(softc->inq_data_len,
1163 						       inq->length);
1164 				descr->data_increment = descr->data_resid;
1165 				descr->timeout = 5 * 1000;
1166 				descr->status = SCSI_STATUS_OK;
1167 				break;
1168 			}
1169 			case TEST_UNIT_READY:
1170 				atio->ccb_h.flags &= ~CAM_DIR_MASK;
1171 				atio->ccb_h.flags |= CAM_DIR_NONE;
1172 				descr->data_resid = 0;
1173 				descr->data_increment = 0;
1174 				descr->status = SCSI_STATUS_OK;
1175 				break;
1176 			case REQUEST_SENSE:
1177 			{
1178 				struct scsi_request_sense *rsense;
1179 				struct scsi_sense_data *sense;
1180 
1181 				rsense = (struct scsi_request_sense *)cdb;
1182 				sense = &istate->sense_data;
1183 				if (pending_ca == 0) {
1184 					fill_sense(sense, SSD_CURRENT_ERROR,
1185 						   SSD_KEY_NO_SENSE, 0x00,
1186 						   0x00);
1187 					xpt_print_path(periph->path);
1188 					printf("No pending CA!\n");
1189 				} else if (pending_ca == CA_UNIT_ATTN) {
1190 					u_int ascq;
1191 
1192 					if (pending_ua == UA_POWER_ON)
1193 						ascq = 0x1;
1194 					else
1195 						ascq = 0x2;
1196 					fill_sense(sense, SSD_CURRENT_ERROR,
1197 						   SSD_KEY_UNIT_ATTENTION,
1198 						   0x29, ascq);
1199 					xpt_print_path(periph->path);
1200 					printf("Pending UA!\n");
1201 				}
1202 				/*
1203 				 * Direction is always relative
1204 				 * to the initator.
1205 				 */
1206 				atio->ccb_h.flags &= ~CAM_DIR_MASK;
1207 				atio->ccb_h.flags |= CAM_DIR_IN;
1208 				descr->data = sense;
1209 				descr->data_resid =
1210 			 		offsetof(struct scsi_sense_data,
1211 						 extra_len)
1212 				      + sense->extra_len;
1213 				descr->data_resid = MIN(descr->data_resid,
1214 						       rsense->length);
1215 				descr->data_increment = descr->data_resid;
1216 				descr->timeout = 5 * 1000;
1217 				descr->status = SCSI_STATUS_OK;
1218 				break;
1219 			}
1220 			case RECEIVE:
1221 			case SEND:
1222 			{
1223 				struct scsi_send_receive *sr;
1224 
1225 				sr = (struct scsi_send_receive *)cdb;
1226 
1227 				/*
1228 				 * Direction is always relative
1229 				 * to the initator.
1230 				 */
1231 				atio->ccb_h.flags &= ~CAM_DIR_MASK;
1232 				if (cdb[0] == SEND) {
1233 					atio->ccb_h.flags |= CAM_DIR_OUT;
1234 					xpt_print_path(periph->path);
1235 					printf("Saw a SEND!\n");
1236 					atio->ccb_h.flags |= CAM_DIR_OUT;
1237 					TAILQ_INSERT_TAIL(&softc->snd_ccb_queue,
1238 							  &atio->ccb_h,
1239 							  periph_links.tqe);
1240 					selwakeup(&softc->snd_select);
1241 				} else {
1242 					atio->ccb_h.flags |= CAM_DIR_IN;
1243 					xpt_print_path(periph->path);
1244 					printf("Saw a RECEIVE!\n");
1245 					TAILQ_INSERT_TAIL(&softc->rcv_ccb_queue,
1246 							  &atio->ccb_h,
1247 							  periph_links.tqe);
1248 					selwakeup(&softc->rcv_select);
1249 				}
1250 				descr->data_resid = scsi_3btoul(sr->xfer_len);
1251 				descr->timeout = 5 * 1000;
1252 				descr->status = SCSI_STATUS_OK;
1253 				/*
1254 				 * Attempt to satisfy this request with
1255 				 * a user buffer.
1256 				 */
1257 				targrunqueue(periph, softc);
1258 				return;
1259 			}
1260 			default:
1261 				/*
1262 				 * Queue for consumption by our userland
1263 				 * counterpart and  transition to the exception
1264 				 * state.
1265 				 */
1266 				TAILQ_INSERT_TAIL(&softc->unknown_atio_queue,
1267 						  &atio->ccb_h,
1268 						  periph_links.tqe);
1269 				softc->exceptions |= TARG_EXCEPT_UNKNOWN_ATIO;
1270 				targfireexception(periph, softc);
1271 				return;
1272 			}
1273 		}
1274 
1275 		/* Queue us up to receive a Continue Target I/O ccb. */
1276 		TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h,
1277 				  periph_links.tqe);
1278 		xpt_schedule(periph, /*priority*/1);
1279 		break;
1280 	}
1281 	case XPT_CONT_TARGET_IO:
1282 	{
1283 		struct ccb_accept_tio *atio;
1284 		struct targ_cmd_desc *desc;
1285 		struct buf *bp;
1286 
1287 		xpt_print_path(done_ccb->ccb_h.path);
1288 		printf("Received completed CTIO\n");
1289 		atio = (struct ccb_accept_tio*)done_ccb->ccb_h.ccb_atio;
1290 		desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
1291 
1292 		TAILQ_REMOVE(&softc->pending_queue, &atio->ccb_h,
1293 			     periph_links.tqe);
1294 
1295 		/* XXX Check for errors */
1296 		desc->data_resid -= desc->data_increment;
1297 		if ((bp = desc->bp) != NULL) {
1298 
1299 			bp->b_resid -= desc->data_increment;
1300 			bp->b_error = 0;
1301 
1302 			xpt_print_path(done_ccb->ccb_h.path);
1303 			printf("Buffer I/O Completed - Resid %ld:%d\n",
1304 			       bp->b_resid, desc->data_resid);
1305 			/*
1306 			 * Send the buffer back to the client if
1307 			 * either the command has completed or all
1308 			 * buffer space has been consumed.
1309 			 */
1310 			if (desc->data_resid == 0
1311 			 || bp->b_resid == 0) {
1312 				if (bp->b_resid != 0)
1313 					/* Short transfer */
1314 					bp->b_flags |= B_ERROR;
1315 
1316 				xpt_print_path(done_ccb->ccb_h.path);
1317 				printf("Completing a buffer\n");
1318 				biodone(bp);
1319 				desc->bp = NULL;
1320 			}
1321 		}
1322 
1323 		xpt_release_ccb(done_ccb);
1324 		if (softc->state != TARG_STATE_TEARDOWN) {
1325 
1326 			if (desc->data_resid == 0) {
1327 				/*
1328 				 * Send the original accept TIO back to the
1329 				 * controller to handle more work.
1330 				 */
1331 				xpt_print_path(atio->ccb_h.path);
1332 				printf("Returning ATIO to target\n");
1333 				xpt_action((union ccb *)atio);
1334 				break;
1335 			}
1336 
1337 			if (desc->bp != NULL)
1338 				panic("targ%d: desc->bp should be NULL",
1339 				      periph->unit_number);
1340 
1341 			/* Queue us up for another buffer */
1342 			if (atio->cdb_io.cdb_bytes[0] == SEND) {
1343 				TAILQ_INSERT_HEAD(&softc->snd_ccb_queue,
1344 						  &atio->ccb_h,
1345 						  periph_links.tqe);
1346 			} else {
1347 				TAILQ_INSERT_HEAD(&softc->rcv_ccb_queue,
1348 						  &atio->ccb_h,
1349 						  periph_links.tqe);
1350 			}
1351 			desc->bp = NULL;
1352 			targrunqueue(periph, softc);
1353 		} else {
1354 			if (desc->bp != NULL) {
1355 				bp->b_flags |= B_ERROR;
1356 				bp->b_error = ENXIO;
1357 				biodone(bp);
1358 			}
1359 			freedescr(desc);
1360 			free(atio, M_DEVBUF);
1361 		}
1362 		break;
1363 	}
1364 	case XPT_IMMED_NOTIFY:
1365 	{
1366 		if (softc->state == TARG_STATE_TEARDOWN) {
1367 			free(done_ccb, M_DEVBUF);
1368 		}
1369 		break;
1370 	}
1371 	}
1372 }
1373 
1374 /*
1375  * Transition to the exception state and notify our symbiotic
1376  * userland process of the change.
1377  */
1378 static void
1379 targfireexception(struct cam_periph *periph, struct targ_softc *softc)
1380 {
1381 	/*
1382 	 * return all pending buffers with short read/write status so our
1383 	 * process unblocks, and do a selwakeup on any process queued
1384 	 * waiting for reads or writes.  When the selwakeup is performed,
1385 	 * the waking process will wakeup, call our poll routine again,
1386 	 * and pick up the exception.
1387 	 */
1388 	struct buf *bp;
1389 
1390 	if (softc->state != TARG_STATE_NORMAL)
1391 		/* Already either tearing down or in exception state */
1392 		return;
1393 
1394 	softc->state = TARG_STATE_EXCEPTION;
1395 
1396 	while ((bp = bufq_first(&softc->snd_buf_queue)) != NULL) {
1397 		bufq_remove(&softc->snd_buf_queue, bp);
1398 		bp->b_flags |= B_ERROR;
1399 		biodone(bp);
1400 	}
1401 
1402 	while ((bp = bufq_first(&softc->rcv_buf_queue)) != NULL) {
1403 		bufq_remove(&softc->snd_buf_queue, bp);
1404 		bp->b_flags |= B_ERROR;
1405 		biodone(bp);
1406 	}
1407 
1408 	selwakeup(&softc->snd_select);
1409 	selwakeup(&softc->rcv_select);
1410 }
1411 
1412 static int
1413 targerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
1414 {
1415 	return 0;
1416 }
1417 
1418 static struct targ_cmd_desc*
1419 allocdescr()
1420 {
1421 	struct targ_cmd_desc* descr;
1422 
1423 	/* Allocate the targ_descr structure */
1424 	descr = (struct targ_cmd_desc *)malloc(sizeof(*descr),
1425 					       M_DEVBUF, M_NOWAIT);
1426 	if (descr == NULL)
1427 		return (NULL);
1428 
1429 	bzero(descr, sizeof(*descr));
1430 
1431 	/* Allocate buffer backing store */
1432 	descr->backing_store = malloc(MAX_BUF_SIZE, M_DEVBUF, M_NOWAIT);
1433 	if (descr->backing_store == NULL) {
1434 		free(descr, M_DEVBUF);
1435 		return (NULL);
1436 	}
1437 	descr->max_size = MAX_BUF_SIZE;
1438 	return (descr);
1439 }
1440 
1441 static void
1442 freedescr(struct targ_cmd_desc *descr)
1443 {
1444 	free(descr->data, M_DEVBUF);
1445 	free(descr, M_DEVBUF);
1446 }
1447 
1448 static void
1449 fill_sense(struct scsi_sense_data *sense, u_int error_code, u_int sense_key,
1450 	   u_int asc, u_int ascq)
1451 {
1452 	bzero(sense, sizeof(*sense));
1453 	sense->error_code = error_code;
1454 	sense->flags = sense_key;
1455 	sense->add_sense_code = asc;
1456 	sense->add_sense_code_qual = ascq;
1457 
1458 	sense->extra_len = offsetof(struct scsi_sense_data, fru)
1459 			 - offsetof(struct scsi_sense_data, extra_len);
1460 }
1461