xref: /freebsd/sys/cam/scsi/scsi_target.c (revision 7f9d26bd9d1b2754da8429257edbde0a8237f84f)
1 /*
2  * Implementation of a simple Target Mode SCSI Proccessor Target driver for CAM.
3  *
4  * Copyright (c) 1998, 1999 Justin T. Gibbs.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification, immediately at the beginning of the file.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 #include <stddef.h>	/* For offsetof */
31 
32 #include <sys/param.h>
33 #include <sys/queue.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/types.h>
37 #include <sys/buf.h>
38 #include <sys/conf.h>
39 #include <sys/devicestat.h>
40 #include <sys/malloc.h>
41 #include <sys/poll.h>
42 #include <sys/select.h>	/* For struct selinfo. */
43 #include <sys/uio.h>
44 
45 #include <cam/cam.h>
46 #include <cam/cam_ccb.h>
47 #include <cam/cam_extend.h>
48 #include <cam/cam_periph.h>
49 #include <cam/cam_queue.h>
50 #include <cam/cam_xpt_periph.h>
51 #include <cam/cam_debug.h>
52 
53 #include <cam/scsi/scsi_all.h>
54 #include <cam/scsi/scsi_pt.h>
55 #include <cam/scsi/scsi_targetio.h>
56 #include <cam/scsi/scsi_message.h>
57 
58 typedef enum {
59 	TARG_STATE_NORMAL,
60 	TARG_STATE_EXCEPTION,
61 	TARG_STATE_TEARDOWN
62 } targ_state;
63 
64 typedef enum {
65 	TARG_FLAG_NONE		 = 0x00,
66 	TARG_FLAG_SEND_EOF	 = 0x01,
67 	TARG_FLAG_RECEIVE_EOF	 = 0x02,
68 	TARG_FLAG_LUN_ENABLED	 = 0x04
69 } targ_flags;
70 
71 typedef enum {
72 	TARG_CCB_NONE		= 0x00,
73 	TARG_CCB_WAITING	= 0x01,
74 	TARG_CCB_HELDQ		= 0x02,
75 	TARG_CCB_ABORT_TO_HELDQ = 0x04
76 } targ_ccb_flags;
77 
78 #define MAX_ACCEPT	16
79 #define MAX_IMMEDIATE	16
80 #define MAX_BUF_SIZE	256	/* Max inquiry/sense/mode page transfer */
81 #define MAX_INITIATORS	16	/* XXX More for Fibre-Channel */
82 
83 #define MIN(a, b) ((a > b) ? b : a)
84 
85 #define TARG_CONTROL_UNIT 0xffff00ff
86 #define TARG_IS_CONTROL_DEV(unit) ((unit) == TARG_CONTROL_UNIT)
87 
88 #define TARG_TAG_WILDCARD ((u_int)~0)
89 
90 /* Offsets into our private CCB area for storing accept information */
91 #define ccb_flags	ppriv_field0
92 #define ccb_descr	ppriv_ptr1
93 
94 /* We stick a pointer to the originating accept TIO in each continue I/O CCB */
95 #define ccb_atio	ppriv_ptr1
96 
97 struct targ_softc {
98 	/* CTIOs pending on the controller */
99 	struct		ccb_queue pending_queue;
100 
101 	/* ATIOs awaiting CTIO resources from the XPT */
102 	struct		ccb_queue work_queue;
103 
104 	/*
105 	 * ATIOs for SEND operations waiting for 'write'
106 	 * buffer resources from our userland daemon.
107 	 */
108 	struct		ccb_queue snd_ccb_queue;
109 
110 	/*
111 	 * ATIOs for RCV operations waiting for 'read'
112 	 * buffer resources from our userland daemon.
113 	 */
114 	struct		ccb_queue rcv_ccb_queue;
115 
116 	/*
117 	 * ATIOs for commands unknown to the kernel driver.
118 	 * These are queued for the userland daemon to
119 	 * consume.
120 	 */
121 	struct		ccb_queue unknown_atio_queue;
122 
123 	/*
124 	 * Userland buffers for SEND commands waiting for
125 	 * SEND ATIOs to be queued by an initiator.
126 	 */
127 	struct		buf_queue_head snd_buf_queue;
128 
129 	/*
130 	 * Userland buffers for RCV commands waiting for
131 	 * RCV ATIOs to be queued by an initiator.
132 	 */
133 	struct		buf_queue_head rcv_buf_queue;
134 	struct		devstat device_stats;
135 	struct		selinfo snd_select;
136 	struct		selinfo rcv_select;
137 	targ_state	state;
138 	targ_flags	flags;
139 	targ_exception	exceptions;
140 	u_int		init_level;
141 	u_int		inq_data_len;
142 	struct		scsi_inquiry_data *inq_data;
143 	struct		ccb_accept_tio *accept_tio_list;
144 	struct		ccb_hdr_slist immed_notify_slist;
145 	struct		initiator_state istate[MAX_INITIATORS];
146 };
147 
148 struct targ_cmd_desc {
149 	struct	  ccb_accept_tio* atio_link;
150 	u_int	  data_resid;	/* How much left to transfer */
151 	u_int	  data_increment;/* Amount to send before next disconnect */
152 	void*	  data;		/* The data. Can be from backing_store or not */
153 	void*	  backing_store;/* Backing store allocated for this descriptor*/
154 	struct	  buf *bp;	/* Buffer for this transfer */
155 	u_int	  max_size;	/* Size of backing_store */
156 	u_int32_t timeout;
157 	u_int8_t  status;	/* Status to return to initiator */
158 };
159 
160 static	d_open_t	targopen;
161 static	d_close_t	targclose;
162 static	d_read_t	targread;
163 static	d_write_t	targwrite;
164 static	d_ioctl_t	targioctl;
165 static	d_poll_t	targpoll;
166 static	d_strategy_t	targstrategy;
167 
168 #define TARG_CDEV_MAJOR	65
169 static struct cdevsw targ_cdevsw = {
170 	/* open */	targopen,
171 	/* close */	targclose,
172 	/* read */	targread,
173 	/* write */	targwrite,
174 	/* ioctl */	targioctl,
175 	/* poll */	targpoll,
176 	/* mmap */	nommap,
177 	/* strategy */	targstrategy,
178 	/* name */	"targ",
179 	/* maj */	TARG_CDEV_MAJOR,
180 	/* dump */	nodump,
181 	/* psize */	nopsize,
182 	/* flags */	0,
183 	/* bmaj */	-1
184 };
185 
186 static int		targsendccb(struct cam_periph *periph, union ccb *ccb,
187 				    union ccb *inccb);
188 static periph_init_t	targinit;
189 static void		targasync(void *callback_arg, u_int32_t code,
190 				struct cam_path *path, void *arg);
191 static int		targallocinstance(struct ioc_alloc_unit *alloc_unit);
192 static int		targfreeinstance(struct ioc_alloc_unit *alloc_unit);
193 static cam_status	targenlun(struct cam_periph *periph);
194 static cam_status	targdislun(struct cam_periph *periph);
195 static periph_ctor_t	targctor;
196 static periph_dtor_t	targdtor;
197 static void		targrunqueue(struct cam_periph *periph,
198 				     struct targ_softc *softc);
199 static periph_start_t	targstart;
200 static void		targdone(struct cam_periph *periph,
201 				 union ccb *done_ccb);
202 static void		targfireexception(struct cam_periph *periph,
203 					  struct targ_softc *softc);
204 static void		targinoterror(struct cam_periph *periph,
205 				      struct targ_softc *softc,
206 				      struct ccb_immed_notify *inot);
207 static  int		targerror(union ccb *ccb, u_int32_t cam_flags,
208 				  u_int32_t sense_flags);
209 static struct targ_cmd_desc*	allocdescr(void);
210 static void		freedescr(struct targ_cmd_desc *buf);
211 static void		fill_sense(struct targ_softc *softc,
212 				   u_int initiator_id, u_int error_code,
213 				   u_int sense_key, u_int asc, u_int ascq);
214 static void		copy_sense(struct targ_softc *softc,
215 				   struct ccb_scsiio *csio);
216 static void	set_unit_attention_cond(struct cam_periph *periph,
217 					u_int initiator_id, ua_types ua);
218 static void	set_contingent_allegiance_cond(struct cam_periph *periph,
219 					       u_int initiator_id, ca_types ca);
220 static void	abort_pending_transactions(struct cam_periph *periph,
221 					   u_int initiator_id, u_int tag_id,
222 					   int errno, int to_held_queue);
223 
224 static struct periph_driver targdriver =
225 {
226 	targinit, "targ",
227 	TAILQ_HEAD_INITIALIZER(targdriver.units), /* generation */ 0
228 };
229 
230 DATA_SET(periphdriver_set, targdriver);
231 
232 static struct extend_array *targperiphs;
233 
234 static void
235 targinit(void)
236 {
237 
238 	/*
239 	 * Create our extend array for storing the devices we attach to.
240 	 */
241 	targperiphs = cam_extend_new();
242 	if (targperiphs == NULL) {
243 		printf("targ: Failed to alloc extend array!\n");
244 		return;
245 	}
246 
247 	/* If we were successfull, register our devsw */
248 	cdevsw_add(&targ_cdevsw);
249 }
250 
251 static void
252 targasync(void *callback_arg, u_int32_t code,
253 	  struct cam_path *path, void *arg)
254 {
255 	struct cam_periph *periph;
256 	struct targ_softc *softc;
257 
258 	periph = (struct cam_periph *)callback_arg;
259 	softc = (struct targ_softc *)periph->softc;
260 	switch (code) {
261 	case AC_PATH_DEREGISTERED:
262 	{
263 		/* XXX Implement */
264 		break;
265 	}
266 	default:
267 		break;
268 	}
269 }
270 
271 /* Attempt to enable our lun */
272 static cam_status
273 targenlun(struct cam_periph *periph)
274 {
275 	union ccb immed_ccb;
276 	struct targ_softc *softc;
277 	cam_status status;
278 	int i;
279 
280 	softc = (struct targ_softc *)periph->softc;
281 
282 	if ((softc->flags & TARG_FLAG_LUN_ENABLED) != 0)
283 		return (CAM_REQ_CMP);
284 
285 	xpt_setup_ccb(&immed_ccb.ccb_h, periph->path, /*priority*/1);
286 	immed_ccb.ccb_h.func_code = XPT_EN_LUN;
287 
288 	/* Don't need support for any vendor specific commands */
289 	immed_ccb.cel.grp6_len = 0;
290 	immed_ccb.cel.grp7_len = 0;
291 	immed_ccb.cel.enable = 1;
292 	xpt_action(&immed_ccb);
293 	status = immed_ccb.ccb_h.status;
294 	if (status != CAM_REQ_CMP) {
295 		xpt_print_path(periph->path);
296 		printf("targenlun - Enable Lun Rejected for status 0x%x\n",
297 		       status);
298 		return (status);
299 	}
300 
301 	softc->flags |= TARG_FLAG_LUN_ENABLED;
302 
303 	/*
304 	 * Build up a buffer of accept target I/O
305 	 * operations for incoming selections.
306 	 */
307 	for (i = 0; i < MAX_ACCEPT; i++) {
308 		struct ccb_accept_tio *atio;
309 
310 		atio = (struct ccb_accept_tio*)malloc(sizeof(*atio), M_DEVBUF,
311 						      M_NOWAIT);
312 		if (atio == NULL) {
313 			status = CAM_RESRC_UNAVAIL;
314 			break;
315 		}
316 
317 		atio->ccb_h.ccb_descr = allocdescr();
318 
319 		if (atio->ccb_h.ccb_descr == NULL) {
320 			free(atio, M_DEVBUF);
321 			status = CAM_RESRC_UNAVAIL;
322 			break;
323 		}
324 
325 		xpt_setup_ccb(&atio->ccb_h, periph->path, /*priority*/1);
326 		atio->ccb_h.func_code = XPT_ACCEPT_TARGET_IO;
327 		atio->ccb_h.cbfcnp = targdone;
328 		xpt_action((union ccb *)atio);
329 		status = atio->ccb_h.status;
330 		if (status != CAM_REQ_INPROG) {
331 			xpt_print_path(periph->path);
332 			printf("Queue of atio failed\n");
333 			freedescr(atio->ccb_h.ccb_descr);
334 			free(atio, M_DEVBUF);
335 			break;
336 		}
337 		((struct targ_cmd_desc*)atio->ccb_h.ccb_descr)->atio_link =
338 		    softc->accept_tio_list;
339 		softc->accept_tio_list = atio;
340 	}
341 
342 	if (i == 0) {
343 		xpt_print_path(periph->path);
344 		printf("targenlun - Could not allocate accept tio CCBs: "
345 		       "status = 0x%x\n", status);
346 		targdislun(periph);
347 		return (CAM_REQ_CMP_ERR);
348 	}
349 
350 	/*
351 	 * Build up a buffer of immediate notify CCBs
352 	 * so the SIM can tell us of asynchronous target mode events.
353 	 */
354 	for (i = 0; i < MAX_ACCEPT; i++) {
355 		struct ccb_immed_notify *inot;
356 
357 		inot = (struct ccb_immed_notify*)malloc(sizeof(*inot), M_DEVBUF,
358 						        M_NOWAIT);
359 
360 		if (inot == NULL) {
361 			status = CAM_RESRC_UNAVAIL;
362 			break;
363 		}
364 
365 		xpt_setup_ccb(&inot->ccb_h, periph->path, /*priority*/1);
366 		inot->ccb_h.func_code = XPT_IMMED_NOTIFY;
367 		inot->ccb_h.cbfcnp = targdone;
368 		SLIST_INSERT_HEAD(&softc->immed_notify_slist, &inot->ccb_h,
369 				  periph_links.sle);
370 		xpt_action((union ccb *)inot);
371 	}
372 
373 	if (i == 0) {
374 		xpt_print_path(periph->path);
375 		printf("targenlun - Could not allocate immediate notify CCBs: "
376 		       "status = 0x%x\n", status);
377 		targdislun(periph);
378 		return (CAM_REQ_CMP_ERR);
379 	}
380 
381 	return (CAM_REQ_CMP);
382 }
383 
384 static cam_status
385 targdislun(struct cam_periph *periph)
386 {
387 	union ccb ccb;
388 	struct targ_softc *softc;
389 	struct ccb_accept_tio* atio;
390 	struct ccb_hdr *ccb_h;
391 
392 	softc = (struct targ_softc *)periph->softc;
393 	if ((softc->flags & TARG_FLAG_LUN_ENABLED) == 0)
394 		return CAM_REQ_CMP;
395 
396 	/* XXX Block for Continue I/O completion */
397 
398 	/* Kill off all ACCECPT and IMMEDIATE CCBs */
399 	while ((atio = softc->accept_tio_list) != NULL) {
400 
401 		softc->accept_tio_list =
402 		    ((struct targ_cmd_desc*)atio->ccb_h.ccb_descr)->atio_link;
403 		xpt_setup_ccb(&ccb.cab.ccb_h, periph->path, /*priority*/1);
404 		ccb.cab.ccb_h.func_code = XPT_ABORT;
405 		ccb.cab.abort_ccb = (union ccb *)atio;
406 		xpt_action(&ccb);
407 	}
408 
409 	while ((ccb_h = SLIST_FIRST(&softc->immed_notify_slist)) != NULL) {
410 		SLIST_REMOVE_HEAD(&softc->immed_notify_slist, periph_links.sle);
411 		xpt_setup_ccb(&ccb.cab.ccb_h, periph->path, /*priority*/1);
412 		ccb.cab.ccb_h.func_code = XPT_ABORT;
413 		ccb.cab.abort_ccb = (union ccb *)ccb_h;
414 		xpt_action(&ccb);
415 	}
416 
417 	/*
418 	 * Dissable this lun.
419 	 */
420 	xpt_setup_ccb(&ccb.cel.ccb_h, periph->path, /*priority*/1);
421 	ccb.cel.ccb_h.func_code = XPT_EN_LUN;
422 	ccb.cel.enable = 0;
423 	xpt_action(&ccb);
424 
425 	if (ccb.cel.ccb_h.status != CAM_REQ_CMP)
426 		printf("targdislun - Disabling lun on controller failed "
427 		       "with status 0x%x\n", ccb.cel.ccb_h.status);
428 	else
429 		softc->flags &= ~TARG_FLAG_LUN_ENABLED;
430 	return (ccb.cel.ccb_h.status);
431 }
432 
433 static cam_status
434 targctor(struct cam_periph *periph, void *arg)
435 {
436 	struct ccb_pathinq *cpi;
437 	struct targ_softc *softc;
438 	int i;
439 
440 	cpi = (struct ccb_pathinq *)arg;
441 
442 	/* Allocate our per-instance private storage */
443 	softc = (struct targ_softc *)malloc(sizeof(*softc), M_DEVBUF, M_NOWAIT);
444 	if (softc == NULL) {
445 		printf("targctor: unable to malloc softc\n");
446 		return (CAM_REQ_CMP_ERR);
447 	}
448 
449 	bzero(softc, sizeof(*softc));
450 	TAILQ_INIT(&softc->pending_queue);
451 	TAILQ_INIT(&softc->work_queue);
452 	TAILQ_INIT(&softc->snd_ccb_queue);
453 	TAILQ_INIT(&softc->rcv_ccb_queue);
454 	TAILQ_INIT(&softc->unknown_atio_queue);
455 	bufq_init(&softc->snd_buf_queue);
456 	bufq_init(&softc->rcv_buf_queue);
457 	softc->accept_tio_list = NULL;
458 	SLIST_INIT(&softc->immed_notify_slist);
459 	softc->state = TARG_STATE_NORMAL;
460 	periph->softc = softc;
461 	softc->init_level++;
462 
463 	cam_extend_set(targperiphs, periph->unit_number, periph);
464 
465 	/*
466 	 * We start out life with a UA to indicate power-on/reset.
467 	 */
468 	for (i = 0; i < MAX_INITIATORS; i++)
469 		softc->istate[i].pending_ua = UA_POWER_ON;
470 
471 	/*
472 	 * Allocate an initial inquiry data buffer.  We might allow the
473 	 * user to override this later via an ioctl.
474 	 */
475 	softc->inq_data_len = sizeof(*softc->inq_data);
476 	softc->inq_data = malloc(softc->inq_data_len, M_DEVBUF, M_NOWAIT);
477 	if (softc->inq_data == NULL) {
478 		printf("targctor - Unable to malloc inquiry data\n");
479 		targdtor(periph);
480 		return (CAM_RESRC_UNAVAIL);
481 	}
482 	bzero(softc->inq_data, softc->inq_data_len);
483 	softc->inq_data->device = T_PROCESSOR | (SID_QUAL_LU_CONNECTED << 5);
484 	softc->inq_data->version = 2;
485 	softc->inq_data->response_format = 2; /* SCSI2 Inquiry Format */
486 	softc->inq_data->flags =
487 	    cpi->hba_inquiry & (PI_SDTR_ABLE|PI_WIDE_16|PI_WIDE_32);
488 	softc->inq_data->additional_length = softc->inq_data_len - 4;
489 	strncpy(softc->inq_data->vendor, "FreeBSD ", SID_VENDOR_SIZE);
490 	strncpy(softc->inq_data->product, "TM-PT           ", SID_PRODUCT_SIZE);
491 	strncpy(softc->inq_data->revision, "0.0 ", SID_REVISION_SIZE);
492 	softc->init_level++;
493 	return (CAM_REQ_CMP);
494 }
495 
496 static void
497 targdtor(struct cam_periph *periph)
498 {
499 	struct targ_softc *softc;
500 
501 	softc = (struct targ_softc *)periph->softc;
502 
503 	softc->state = TARG_STATE_TEARDOWN;
504 
505 	targdislun(periph);
506 
507 	cam_extend_release(targperiphs, periph->unit_number);
508 
509 	switch (softc->init_level) {
510 	default:
511 		/* FALLTHROUGH */
512 	case 2:
513 		free(softc->inq_data, M_DEVBUF);
514 		/* FALLTHROUGH */
515 	case 1:
516 		free(softc, M_DEVBUF);
517 		break;
518 	case 0:
519 		panic("targdtor - impossible init level");;
520 	}
521 }
522 
523 static int
524 targopen(dev_t dev, int flags, int fmt, struct proc *p)
525 {
526 	struct cam_periph *periph;
527 	struct	targ_softc *softc;
528 	u_int unit;
529 	cam_status status;
530 	int error;
531 	int s;
532 
533 	unit = minor(dev);
534 
535 	/* An open of the control device always succeeds */
536 	if (TARG_IS_CONTROL_DEV(unit))
537 		return 0;
538 
539 	s = splsoftcam();
540 	periph = cam_extend_get(targperiphs, unit);
541 	if (periph == NULL) {
542 		return (ENXIO);
543         	splx(s);
544 	}
545 	if ((error = cam_periph_lock(periph, PRIBIO | PCATCH)) != 0) {
546 		splx(s);
547 		return (error);
548 	}
549 
550 	softc = (struct targ_softc *)periph->softc;
551 	if ((softc->flags & TARG_FLAG_LUN_ENABLED) == 0) {
552 		if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
553 			splx(s);
554 			cam_periph_unlock(periph);
555 			return(ENXIO);
556 		}
557 	}
558         splx(s);
559 
560 	status = targenlun(periph);
561 	switch (status) {
562 	case CAM_REQ_CMP:
563 		error = 0;
564 		break;
565 	case CAM_RESRC_UNAVAIL:
566 		error = ENOMEM;
567 		break;
568 	case CAM_LUN_ALRDY_ENA:
569 		error = EADDRINUSE;
570 		break;
571 	default:
572 		error = ENXIO;
573 		break;
574 	}
575         cam_periph_unlock(periph);
576 	return (error);
577 }
578 
579 static int
580 targclose(dev_t dev, int flag, int fmt, struct proc *p)
581 {
582 	struct	cam_periph *periph;
583 	struct	targ_softc *softc;
584 	u_int	unit;
585 	int	s;
586 	int	error;
587 
588 	unit = minor(dev);
589 
590 	/* A close of the control device always succeeds */
591 	if (TARG_IS_CONTROL_DEV(unit))
592 		return 0;
593 
594 	s = splsoftcam();
595 	periph = cam_extend_get(targperiphs, unit);
596 	if (periph == NULL) {
597 		splx(s);
598 		return (ENXIO);
599 	}
600 	if ((error = cam_periph_lock(periph, PRIBIO)) != 0)
601 		return (error);
602 	softc = (struct targ_softc *)periph->softc;
603 	splx(s);
604 
605 	targdislun(periph);
606 
607 	cam_periph_unlock(periph);
608 	cam_periph_release(periph);
609 
610 	return (0);
611 }
612 
613 static int
614 targallocinstance(struct ioc_alloc_unit *alloc_unit)
615 {
616 	struct ccb_pathinq cpi;
617 	struct cam_path *path;
618 	struct cam_periph *periph;
619 	cam_status status;
620 	int free_path_on_return;
621 	int error;
622 
623 	free_path_on_return = 0;
624 	status = xpt_create_path(&path, /*periph*/NULL,
625 				 alloc_unit->path_id,
626 				 alloc_unit->target_id,
627 				 alloc_unit->lun_id);
628 	if (status != CAM_REQ_CMP) {
629 		printf("Couldn't Allocate Path %x\n", status);
630 		goto fail;
631 	}
632 
633 	free_path_on_return++;
634 
635 
636 	xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
637 	cpi.ccb_h.func_code = XPT_PATH_INQ;
638 	xpt_action((union ccb *)&cpi);
639 	status = cpi.ccb_h.status;
640 
641 	if (status != CAM_REQ_CMP) {
642 		printf("Couldn't CPI %x\n", status);
643 		goto fail;
644 	}
645 
646 	/* Can only alloc units on controllers that support target mode */
647 	if ((cpi.target_sprt & PIT_PROCESSOR) == 0) {
648 		printf("Controller does not support target mode%x\n", status);
649 		status = CAM_PATH_INVALID;
650 		goto fail;
651 	}
652 
653 	/* Ensure that we don't already have an instance for this unit. */
654 	if ((periph = cam_periph_find(path, "targ")) != NULL) {
655 		status = CAM_LUN_ALRDY_ENA;
656 		goto fail;
657 	}
658 
659 	/*
660 	 * Allocate a peripheral instance for
661 	 * this target instance.
662 	 */
663 	status = cam_periph_alloc(targctor, NULL, targdtor, targstart,
664 				  "targ", CAM_PERIPH_BIO, path, targasync,
665 				  0, &cpi);
666 
667 fail:
668 	switch (status) {
669 	case CAM_REQ_CMP:
670 	{
671 		struct cam_periph *periph;
672 
673 		if ((periph = cam_periph_find(path, "targ")) == NULL)
674 			panic("targallocinstance: Succeeded but no periph?");
675 		error = 0;
676 		alloc_unit->unit = periph->unit_number;
677 		break;
678 	}
679 	case CAM_RESRC_UNAVAIL:
680 		error = ENOMEM;
681 		break;
682 	case CAM_LUN_ALRDY_ENA:
683 		error = EADDRINUSE;
684 		break;
685 	default:
686 		printf("targallocinstance: Unexpected CAM status %x\n", status);
687 		/* FALLTHROUGH */
688 	case CAM_PATH_INVALID:
689 		error = ENXIO;
690 		break;
691 	case CAM_PROVIDE_FAIL:
692 		error = ENODEV;
693 		break;
694 	}
695 
696 	if (free_path_on_return != 0)
697 		xpt_free_path(path);
698 
699 	return (error);
700 }
701 
702 static int
703 targfreeinstance(struct ioc_alloc_unit *alloc_unit)
704 {
705 	struct cam_path *path;
706 	struct cam_periph *periph;
707 	struct targ_softc *softc;
708 	cam_status status;
709 	int free_path_on_return;
710 	int error;
711 
712 	periph = NULL;
713 	free_path_on_return = 0;
714 	status = xpt_create_path(&path, /*periph*/NULL,
715 				 alloc_unit->path_id,
716 				 alloc_unit->target_id,
717 				 alloc_unit->lun_id);
718 	free_path_on_return++;
719 
720 	if (status != CAM_REQ_CMP)
721 		goto fail;
722 
723 	/* Find our instance. */
724 	if ((periph = cam_periph_find(path, "targ")) == NULL) {
725 		xpt_print_path(path);
726 		status = CAM_PATH_INVALID;
727 		goto fail;
728 	}
729 
730         softc = (struct targ_softc *)periph->softc;
731 
732         if ((softc->flags & TARG_FLAG_LUN_ENABLED) != 0) {
733 		status = CAM_BUSY;
734 		goto fail;
735 	}
736 
737 fail:
738 	if (free_path_on_return != 0)
739 		xpt_free_path(path);
740 
741 	switch (status) {
742 	case CAM_REQ_CMP:
743 		if (periph != NULL)
744 			cam_periph_invalidate(periph);
745 		error = 0;
746 		break;
747 	case CAM_RESRC_UNAVAIL:
748 		error = ENOMEM;
749 		break;
750 	case CAM_LUN_ALRDY_ENA:
751 		error = EADDRINUSE;
752 		break;
753 	default:
754 		printf("targfreeinstance: Unexpected CAM status %x\n", status);
755 		/* FALLTHROUGH */
756 	case CAM_PATH_INVALID:
757 		error = ENODEV;
758 		break;
759 	}
760 	return (error);
761 }
762 
763 static int
764 targioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
765 {
766 	struct cam_periph *periph;
767 	struct targ_softc *softc;
768 	u_int  unit;
769 	int    error;
770 
771 	unit = minor(dev);
772 	error = 0;
773 	if (TARG_IS_CONTROL_DEV(unit)) {
774 		switch (cmd) {
775 		case TARGCTLIOALLOCUNIT:
776 			error = targallocinstance((struct ioc_alloc_unit*)addr);
777 			break;
778 		case TARGCTLIOFREEUNIT:
779 			error = targfreeinstance((struct ioc_alloc_unit*)addr);
780 			break;
781 		default:
782 			error = EINVAL;
783 			break;
784 		}
785 		return (error);
786 	}
787 
788 	periph = cam_extend_get(targperiphs, unit);
789 	if (periph == NULL)
790 		return (ENXIO);
791 	softc = (struct targ_softc *)periph->softc;
792 	switch (cmd) {
793 	case TARGIOCFETCHEXCEPTION:
794 		*((targ_exception *)addr) = softc->exceptions;
795 		break;
796 	case TARGIOCCLEAREXCEPTION:
797 	{
798 		targ_exception clear_mask;
799 
800 		clear_mask = *((targ_exception *)addr);
801 		if ((clear_mask & TARG_EXCEPT_UNKNOWN_ATIO) != 0) {
802 			struct ccb_hdr *ccbh;
803 
804 			ccbh = TAILQ_FIRST(&softc->unknown_atio_queue);
805 			if (ccbh != NULL) {
806 				TAILQ_REMOVE(&softc->unknown_atio_queue,
807 					     ccbh, periph_links.tqe);
808 				/* Requeue the ATIO back to the controller */
809 				xpt_action((union ccb *)ccbh);
810 				ccbh = TAILQ_FIRST(&softc->unknown_atio_queue);
811 			}
812 			if (ccbh != NULL)
813 				clear_mask &= ~TARG_EXCEPT_UNKNOWN_ATIO;
814 		}
815 		softc->exceptions &= ~clear_mask;
816 		if (softc->exceptions == TARG_EXCEPT_NONE
817 		 && softc->state == TARG_STATE_EXCEPTION) {
818 			softc->state = TARG_STATE_NORMAL;
819 			targrunqueue(periph, softc);
820 		}
821 		break;
822 	}
823 	case TARGIOCFETCHATIO:
824 	{
825 		struct ccb_hdr *ccbh;
826 
827 		ccbh = TAILQ_FIRST(&softc->unknown_atio_queue);
828 		if (ccbh != NULL) {
829 			bcopy(ccbh, addr, sizeof(struct ccb_accept_tio));
830 		} else {
831 			error = ENOENT;
832 		}
833 		break;
834 	}
835 	case TARGIOCCOMMAND:
836 	{
837 		union ccb *inccb;
838 		union ccb *ccb;
839 
840 		/*
841 		 * XXX JGibbs
842 		 * This code is lifted directly from the pass-thru driver.
843 		 * Perhaps this should be moved to a library????
844 		 */
845 		inccb = (union ccb *)addr;
846 		ccb = cam_periph_getccb(periph, inccb->ccb_h.pinfo.priority);
847 
848 		error = targsendccb(periph, ccb, inccb);
849 
850 		xpt_release_ccb(ccb);
851 
852 		break;
853 	}
854 	case TARGIOCGETISTATE:
855 	case TARGIOCSETISTATE:
856 	{
857 		struct ioc_initiator_state *ioc_istate;
858 
859 		ioc_istate = (struct ioc_initiator_state *)addr;
860 		if (ioc_istate->initiator_id > MAX_INITIATORS) {
861 			error = EINVAL;
862 			break;
863 		}
864 		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
865 			  ("GET/SETISTATE for %d\n", ioc_istate->initiator_id));
866 		if (cmd == TARGIOCGETISTATE) {
867 			bcopy(&softc->istate[ioc_istate->initiator_id],
868 			      &ioc_istate->istate, sizeof(ioc_istate->istate));
869 		} else {
870 			bcopy(&ioc_istate->istate,
871 			      &softc->istate[ioc_istate->initiator_id],
872 			      sizeof(ioc_istate->istate));
873 		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
874 			  ("pending_ca now %x\n",
875 			   softc->istate[ioc_istate->initiator_id].pending_ca));
876 		}
877 		break;
878 	}
879 	default:
880 		error = ENOTTY;
881 		break;
882 	}
883 	return (error);
884 }
885 
886 /*
887  * XXX JGibbs lifted from pass-thru driver.
888  * Generally, "ccb" should be the CCB supplied by the kernel.  "inccb"
889  * should be the CCB that is copied in from the user.
890  */
891 static int
892 targsendccb(struct cam_periph *periph, union ccb *ccb, union ccb *inccb)
893 {
894 	struct targ_softc *softc;
895 	struct cam_periph_map_info mapinfo;
896 	int error, need_unmap;
897 	int s;
898 
899 	softc = (struct targ_softc *)periph->softc;
900 
901 	need_unmap = 0;
902 
903 	/*
904 	 * There are some fields in the CCB header that need to be
905 	 * preserved, the rest we get from the user.
906 	 */
907 	xpt_merge_ccb(ccb, inccb);
908 
909 	/*
910 	 * There's no way for the user to have a completion
911 	 * function, so we put our own completion function in here.
912 	 */
913 	ccb->ccb_h.cbfcnp = targdone;
914 
915 	/*
916 	 * We only attempt to map the user memory into kernel space
917 	 * if they haven't passed in a physical memory pointer,
918 	 * and if there is actually an I/O operation to perform.
919 	 * Right now cam_periph_mapmem() only supports SCSI and device
920 	 * match CCBs.  For the SCSI CCBs, we only pass the CCB in if
921 	 * there's actually data to map.  cam_periph_mapmem() will do the
922 	 * right thing, even if there isn't data to map, but since CCBs
923 	 * without data are a reasonably common occurance (e.g. test unit
924 	 * ready), it will save a few cycles if we check for it here.
925 	 */
926 	if (((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0)
927 	 && (((ccb->ccb_h.func_code == XPT_CONT_TARGET_IO)
928 	    && ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE))
929 	  || (ccb->ccb_h.func_code == XPT_DEV_MATCH))) {
930 
931 		bzero(&mapinfo, sizeof(mapinfo));
932 
933 		error = cam_periph_mapmem(ccb, &mapinfo);
934 
935 		/*
936 		 * cam_periph_mapmem returned an error, we can't continue.
937 		 * Return the error to the user.
938 		 */
939 		if (error)
940 			return(error);
941 
942 		/*
943 		 * We successfully mapped the memory in, so we need to
944 		 * unmap it when the transaction is done.
945 		 */
946 		need_unmap = 1;
947 	}
948 
949 	/*
950 	 * Once queued on the pending CCB list, this CCB will be protected
951 	 * by the error recovery handling used for 'buffer I/O' ccbs.  Since
952 	 * we are in a process context here, however, the software interrupt
953 	 * for this driver may deliver an event invalidating this CCB just
954 	 * before we queue it.  Close this race condition by blocking
955 	 * software interrupt delivery, checking for any pertinent queued
956 	 * events, and only then queuing this CCB.
957 	 */
958 	s = splsoftcam();
959 	if (softc->exceptions == 0) {
960 		if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO)
961 			TAILQ_INSERT_TAIL(&softc->pending_queue, &ccb->ccb_h,
962 					  periph_links.tqe);
963 
964 		/*
965 		 * If the user wants us to perform any error recovery,
966 		 * then honor that request.  Otherwise, it's up to the
967 		 * user to perform any error recovery.
968 		 */
969 		error = cam_periph_runccb(ccb,
970 					  /* error handler */NULL,
971 					  /* cam_flags */ 0,
972 					  /* sense_flags */SF_RETRY_UA,
973 					  &softc->device_stats);
974 
975 		if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO)
976 			TAILQ_INSERT_TAIL(&softc->pending_queue, &ccb->ccb_h,
977 					  periph_links.tqe);
978 	} else {
979 		ccb->ccb_h.status = CAM_UNACKED_EVENT;
980 		error = 0;
981 	}
982 	splx(s);
983 
984 	if (need_unmap != 0)
985 		cam_periph_unmapmem(ccb, &mapinfo);
986 
987 	ccb->ccb_h.cbfcnp = NULL;
988 	ccb->ccb_h.periph_priv = inccb->ccb_h.periph_priv;
989 	bcopy(ccb, inccb, sizeof(union ccb));
990 
991 	return(error);
992 }
993 
994 
995 static int
996 targpoll(dev_t dev, int poll_events, struct proc *p)
997 {
998 	struct cam_periph *periph;
999 	struct targ_softc *softc;
1000 	u_int  unit;
1001 	int    revents;
1002 	int    s;
1003 
1004 	unit = minor(dev);
1005 
1006 	/* ioctl is the only supported operation of the control device */
1007 	if (TARG_IS_CONTROL_DEV(unit))
1008 		return EINVAL;
1009 
1010 	periph = cam_extend_get(targperiphs, unit);
1011 	if (periph == NULL)
1012 		return (ENXIO);
1013 	softc = (struct targ_softc *)periph->softc;
1014 
1015 	revents = 0;
1016 	s = splcam();
1017 	if ((poll_events & (POLLOUT | POLLWRNORM)) != 0) {
1018 		if (TAILQ_FIRST(&softc->rcv_ccb_queue) != NULL
1019 		 && bufq_first(&softc->rcv_buf_queue) == NULL)
1020 			revents |= poll_events & (POLLOUT | POLLWRNORM);
1021 	}
1022 	if ((poll_events & (POLLIN | POLLRDNORM)) != 0) {
1023 		if (TAILQ_FIRST(&softc->snd_ccb_queue) != NULL
1024 		 && bufq_first(&softc->snd_buf_queue) == NULL)
1025 			revents |= poll_events & (POLLIN | POLLRDNORM);
1026 	}
1027 
1028 	if (softc->state != TARG_STATE_NORMAL)
1029 		revents |= POLLERR;
1030 
1031 	if (revents == 0) {
1032 		if (poll_events & (POLLOUT | POLLWRNORM))
1033 			selrecord(p, &softc->rcv_select);
1034 		if (poll_events & (POLLIN | POLLRDNORM))
1035 			selrecord(p, &softc->snd_select);
1036 	}
1037 	splx(s);
1038 	return (revents);
1039 }
1040 
1041 static int
1042 targread(dev_t dev, struct uio *uio, int ioflag)
1043 {
1044 	u_int  unit;
1045 
1046 	unit = minor(dev);
1047 	/* ioctl is the only supported operation of the control device */
1048 	if (TARG_IS_CONTROL_DEV(unit))
1049 		return EINVAL;
1050 
1051 	if (uio->uio_iovcnt == 0
1052 	 || uio->uio_iov->iov_len == 0) {
1053 		/* EOF */
1054 		struct cam_periph *periph;
1055 		struct targ_softc *softc;
1056 		int    s;
1057 
1058 		s = splcam();
1059 		periph = cam_extend_get(targperiphs, unit);
1060 		if (periph == NULL)
1061 			return (ENXIO);
1062 		softc = (struct targ_softc *)periph->softc;
1063 		softc->flags |= TARG_FLAG_SEND_EOF;
1064 		splx(s);
1065 		targrunqueue(periph, softc);
1066 		return (0);
1067 	}
1068 	return(physread(dev, uio, ioflag));
1069 }
1070 
1071 static int
1072 targwrite(dev_t dev, struct uio *uio, int ioflag)
1073 {
1074 	u_int  unit;
1075 
1076 	unit = minor(dev);
1077 	/* ioctl is the only supported operation of the control device */
1078 	if (TARG_IS_CONTROL_DEV(unit))
1079 		return EINVAL;
1080 
1081 	if (uio->uio_iovcnt == 0
1082 	 || uio->uio_iov->iov_len == 0) {
1083 		/* EOF */
1084 		struct cam_periph *periph;
1085 		struct targ_softc *softc;
1086 		int    s;
1087 
1088 		s = splcam();
1089 		periph = cam_extend_get(targperiphs, unit);
1090 		if (periph == NULL)
1091 			return (ENXIO);
1092 		softc = (struct targ_softc *)periph->softc;
1093 		softc->flags |= TARG_FLAG_RECEIVE_EOF;
1094 		splx(s);
1095 		targrunqueue(periph, softc);
1096 		return (0);
1097 	}
1098 	return(physwrite(dev, uio, ioflag));
1099 }
1100 
1101 /*
1102  * Actually translate the requested transfer into one the physical driver
1103  * can understand.  The transfer is described by a buf and will include
1104  * only one physical transfer.
1105  */
1106 static void
1107 targstrategy(struct buf *bp)
1108 {
1109 	struct cam_periph *periph;
1110 	struct targ_softc *softc;
1111 	u_int  unit;
1112 	int    s;
1113 
1114 	unit = minor(bp->b_dev);
1115 
1116 	/* ioctl is the only supported operation of the control device */
1117 	if (TARG_IS_CONTROL_DEV(unit)) {
1118 		bp->b_error = EINVAL;
1119 		goto bad;
1120 	}
1121 
1122 	periph = cam_extend_get(targperiphs, unit);
1123 	if (periph == NULL) {
1124 		bp->b_error = ENXIO;
1125 		goto bad;
1126 	}
1127 	softc = (struct targ_softc *)periph->softc;
1128 
1129 	/*
1130 	 * Mask interrupts so that the device cannot be invalidated until
1131 	 * after we are in the queue.  Otherwise, we might not properly
1132 	 * clean up one of the buffers.
1133 	 */
1134 	s = splbio();
1135 
1136 	/*
1137 	 * If there is an exception pending, error out
1138 	 */
1139 	if (softc->state != TARG_STATE_NORMAL) {
1140 		splx(s);
1141 		if (softc->state == TARG_STATE_EXCEPTION
1142 		 && (softc->exceptions & TARG_EXCEPT_DEVICE_INVALID) == 0)
1143 			bp->b_error = EBUSY;
1144 		else
1145 			bp->b_error = ENXIO;
1146 		goto bad;
1147 	}
1148 
1149 	/*
1150 	 * Place it in the queue of buffers available for either
1151 	 * SEND or RECEIVE commands.
1152 	 *
1153 	 */
1154 	bp->b_resid = bp->b_bcount;
1155 	if ((bp->b_flags & B_READ) != 0) {
1156 		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1157 			  ("Queued a SEND buffer\n"));
1158 		bufq_insert_tail(&softc->snd_buf_queue, bp);
1159 	} else {
1160 		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1161 			  ("Queued a RECEIVE buffer\n"));
1162 		bufq_insert_tail(&softc->rcv_buf_queue, bp);
1163 	}
1164 
1165 	splx(s);
1166 
1167 	/*
1168 	 * Attempt to use the new buffer to service any pending
1169 	 * target commands.
1170 	 */
1171 	targrunqueue(periph, softc);
1172 
1173 	return;
1174 bad:
1175 	bp->b_flags |= B_ERROR;
1176 
1177 	/*
1178 	 * Correctly set the buf to indicate a completed xfer
1179 	 */
1180 	bp->b_resid = bp->b_bcount;
1181 	biodone(bp);
1182 }
1183 
1184 static void
1185 targrunqueue(struct cam_periph *periph, struct targ_softc *softc)
1186 {
1187 	struct  ccb_queue *pending_queue;
1188 	struct	ccb_accept_tio *atio;
1189 	struct	buf_queue_head *bufq;
1190 	struct	buf *bp;
1191 	struct	targ_cmd_desc *desc;
1192 	struct	ccb_hdr *ccbh;
1193 	int	s;
1194 
1195 	s = splbio();
1196 	pending_queue = NULL;
1197 	bufq = NULL;
1198 	ccbh = NULL;
1199 	/* Only run one request at a time to maintain data ordering. */
1200 	if (softc->state != TARG_STATE_NORMAL
1201 	 || TAILQ_FIRST(&softc->work_queue) != NULL
1202 	 || TAILQ_FIRST(&softc->pending_queue) != NULL) {
1203 		splx(s);
1204 		return;
1205 	}
1206 
1207 	if (((bp = bufq_first(&softc->snd_buf_queue)) != NULL
1208 	  || (softc->flags & TARG_FLAG_SEND_EOF) != 0)
1209 	 && (ccbh = TAILQ_FIRST(&softc->snd_ccb_queue)) != NULL) {
1210 
1211 		if (bp == NULL)
1212 			softc->flags &= ~TARG_FLAG_SEND_EOF;
1213 		else {
1214 			CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1215 				  ("De-Queued a SEND buffer %ld\n",
1216 				   bp->b_bcount));
1217 		}
1218 		bufq = &softc->snd_buf_queue;
1219 		pending_queue = &softc->snd_ccb_queue;
1220 	} else if (((bp = bufq_first(&softc->rcv_buf_queue)) != NULL
1221 	  	 || (softc->flags & TARG_FLAG_RECEIVE_EOF) != 0)
1222 		&& (ccbh = TAILQ_FIRST(&softc->rcv_ccb_queue)) != NULL) {
1223 
1224 		if (bp == NULL)
1225 			softc->flags &= ~TARG_FLAG_RECEIVE_EOF;
1226 		else {
1227 			CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1228 				  ("De-Queued a RECEIVE buffer %ld\n",
1229 				   bp->b_bcount));
1230 		}
1231 		bufq = &softc->rcv_buf_queue;
1232 		pending_queue = &softc->rcv_ccb_queue;
1233 	}
1234 
1235 	if (pending_queue != NULL) {
1236 		/* Process a request */
1237 		atio = (struct ccb_accept_tio *)ccbh;
1238 		TAILQ_REMOVE(pending_queue, ccbh, periph_links.tqe);
1239 		desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
1240 		desc->bp = bp;
1241 		if (bp == NULL) {
1242 			/* EOF */
1243 			desc->data = NULL;
1244 			desc->data_increment = 0;
1245 			desc->data_resid = 0;
1246 			atio->ccb_h.flags &= ~CAM_DIR_MASK;
1247 			atio->ccb_h.flags |= CAM_DIR_NONE;
1248 		} else {
1249 			bufq_remove(bufq, bp);
1250 			desc->data = &bp->b_data[bp->b_bcount - bp->b_resid];
1251 			desc->data_increment =
1252 			    MIN(desc->data_resid, bp->b_resid);
1253 		}
1254 		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1255 			  ("Buffer command: data %x: datacnt %d\n",
1256 			   (intptr_t)desc->data, desc->data_increment));
1257 		TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h,
1258 				  periph_links.tqe);
1259 	}
1260 	if (TAILQ_FIRST(&softc->work_queue) != NULL) {
1261 		splx(s);
1262 		xpt_schedule(periph, /*XXX priority*/1);
1263 	} else
1264 		splx(s);
1265 }
1266 
1267 static void
1268 targstart(struct cam_periph *periph, union ccb *start_ccb)
1269 {
1270 	struct targ_softc *softc;
1271 	struct ccb_hdr *ccbh;
1272 	struct ccb_accept_tio *atio;
1273 	struct targ_cmd_desc *desc;
1274 	struct ccb_scsiio *csio;
1275 	targ_ccb_flags flags;
1276 	int    s;
1277 
1278 	softc = (struct targ_softc *)periph->softc;
1279 
1280 	s = splbio();
1281 	ccbh = TAILQ_FIRST(&softc->work_queue);
1282 	if (periph->immediate_priority <= periph->pinfo.priority) {
1283 		start_ccb->ccb_h.ccb_flags = TARG_CCB_WAITING;
1284 		SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
1285 				  periph_links.sle);
1286 		periph->immediate_priority = CAM_PRIORITY_NONE;
1287 		splx(s);
1288 		wakeup(&periph->ccb_list);
1289 	} else if (ccbh == NULL) {
1290 		splx(s);
1291 		xpt_release_ccb(start_ccb);
1292 	} else {
1293 		TAILQ_REMOVE(&softc->work_queue, ccbh, periph_links.tqe);
1294 		splx(s);
1295 		atio = (struct ccb_accept_tio*)ccbh;
1296 		desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
1297 
1298 		/* Is this a tagged request? */
1299 		flags = atio->ccb_h.flags & (CAM_TAG_ACTION_VALID|CAM_DIR_MASK);
1300 
1301 		/*
1302 		 * If we are done with the transaction, tell the
1303 		 * controller to send status and perform a CMD_CMPLT.
1304 		 */
1305 		if (desc->data_resid == desc->data_increment)
1306 			flags |= CAM_SEND_STATUS;
1307 
1308 		csio = &start_ccb->csio;
1309 		cam_fill_ctio(csio,
1310 			      /*retries*/2,
1311 			      targdone,
1312 			      flags,
1313 			      /*tag_action*/MSG_SIMPLE_Q_TAG,
1314 			      atio->tag_id,
1315 			      atio->init_id,
1316 			      desc->status,
1317 			      /*data_ptr*/desc->data_increment == 0
1318 					  ? NULL : desc->data,
1319 			      /*dxfer_len*/desc->data_increment,
1320 			      /*timeout*/desc->timeout);
1321 
1322 		start_ccb->ccb_h.ccb_flags = TARG_CCB_NONE;
1323 		start_ccb->ccb_h.ccb_atio = atio;
1324 		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1325 			  ("Sending a CTIO\n"));
1326 		TAILQ_INSERT_TAIL(&softc->pending_queue, &csio->ccb_h,
1327 				  periph_links.tqe);
1328 		xpt_action(start_ccb);
1329 		s = splbio();
1330 		ccbh = TAILQ_FIRST(&softc->work_queue);
1331 		splx(s);
1332 	}
1333 	if (ccbh != NULL)
1334 		targrunqueue(periph, softc);
1335 }
1336 
1337 static void
1338 targdone(struct cam_periph *periph, union ccb *done_ccb)
1339 {
1340 	struct targ_softc *softc;
1341 
1342 	softc = (struct targ_softc *)periph->softc;
1343 
1344 	if (done_ccb->ccb_h.ccb_flags == TARG_CCB_WAITING) {
1345 		/* Caller will release the CCB */
1346 		wakeup(&done_ccb->ccb_h.cbfcnp);
1347 		return;
1348 	}
1349 
1350 	switch (done_ccb->ccb_h.func_code) {
1351 	case XPT_ACCEPT_TARGET_IO:
1352 	{
1353 		struct ccb_accept_tio *atio;
1354 		struct targ_cmd_desc *descr;
1355 		struct initiator_state *istate;
1356 		u_int8_t *cdb;
1357 
1358 		atio = &done_ccb->atio;
1359 		descr = (struct targ_cmd_desc*)atio->ccb_h.ccb_descr;
1360 		istate = &softc->istate[atio->init_id];
1361 		cdb = atio->cdb_io.cdb_bytes;
1362 		if (softc->state == TARG_STATE_TEARDOWN
1363 		 || atio->ccb_h.status == CAM_REQ_ABORTED) {
1364 			freedescr(descr);
1365 			free(done_ccb, M_DEVBUF);
1366 			return;
1367 		}
1368 
1369 		if (istate->pending_ca == 0
1370 		 && istate->pending_ua != 0
1371 		 && cdb[0] != INQUIRY) {
1372 			/* Pending UA, tell initiator */
1373 			/* Direction is always relative to the initator */
1374 			istate->pending_ca = CA_UNIT_ATTN;
1375 			atio->ccb_h.flags &= ~CAM_DIR_MASK;
1376 			atio->ccb_h.flags |= CAM_DIR_NONE;
1377 			descr->data_resid = 0;
1378 			descr->data_increment = 0;
1379 			descr->timeout = 5 * 1000;
1380 			descr->status = SCSI_STATUS_CHECK_COND;
1381 		} else {
1382 			/*
1383 			 * Save the current CA and UA status so
1384 			 * they can be used by this command.
1385 			 */
1386 			ua_types pending_ua;
1387 			ca_types pending_ca;
1388 
1389 			pending_ua = istate->pending_ua;
1390 			pending_ca = istate->pending_ca;
1391 
1392 			/*
1393 			 * As per the SCSI2 spec, any command that occurs
1394 			 * after a CA is reported, clears the CA.  If the
1395 			 * command is not an inquiry, we are also supposed
1396 			 * to clear the UA condition, if any, that caused
1397 			 * the CA to occur assuming the UA is not a
1398 			 * persistant state.
1399 			 */
1400 			istate->pending_ca = CA_NONE;
1401 			if ((pending_ca
1402 			   & (CA_CMD_SENSE|CA_UNIT_ATTN)) == CA_UNIT_ATTN
1403 			 && cdb[0] != INQUIRY)
1404 				istate->pending_ua = UA_NONE;
1405 
1406 			/*
1407 			 * Determine the type of incoming command and
1408 			 * setup our buffer for a response.
1409 			 */
1410 			switch (cdb[0]) {
1411 			case INQUIRY:
1412 			{
1413 				struct scsi_inquiry *inq;
1414 				struct scsi_sense_data *sense;
1415 
1416 				inq = (struct scsi_inquiry *)cdb;
1417 				sense = &istate->sense_data;
1418 				CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1419 					  ("Saw an inquiry!\n"));
1420 				/*
1421 				 * Validate the command.  We don't
1422 				 * support any VPD pages, so complain
1423 				 * if EVPD is set.
1424 				 */
1425 				if ((inq->byte2 & SI_EVPD) != 0
1426 				 || inq->page_code != 0) {
1427 					istate->pending_ca = CA_CMD_SENSE;
1428 					atio->ccb_h.flags &= ~CAM_DIR_MASK;
1429 					atio->ccb_h.flags |= CAM_DIR_NONE;
1430 					descr->data_resid = 0;
1431 					descr->data_increment = 0;
1432 					descr->status = SCSI_STATUS_CHECK_COND;
1433 					fill_sense(softc, atio->init_id,
1434 						   SSD_CURRENT_ERROR,
1435 						   SSD_KEY_ILLEGAL_REQUEST,
1436 						   /*asc*/0x24, /*ascq*/0x00);
1437 					sense->extra_len =
1438 						offsetof(struct scsi_sense_data,
1439 							 extra_bytes)
1440 					      - offsetof(struct scsi_sense_data,
1441 							 extra_len);
1442 				}
1443 
1444 				if ((inq->byte2 & SI_EVPD) != 0) {
1445 					sense->sense_key_spec[0] =
1446 					    SSD_SCS_VALID|SSD_FIELDPTR_CMD
1447 					   |SSD_BITPTR_VALID| /*bit value*/1;
1448 					sense->sense_key_spec[1] = 0;
1449 					sense->sense_key_spec[2] =
1450 					    offsetof(struct scsi_inquiry,
1451 						     byte2);
1452 					break;
1453 				} else if (inq->page_code != 0) {
1454 					sense->sense_key_spec[0] =
1455 					    SSD_SCS_VALID|SSD_FIELDPTR_CMD;
1456 					sense->sense_key_spec[1] = 0;
1457 					sense->sense_key_spec[2] =
1458 					    offsetof(struct scsi_inquiry,
1459 						     page_code);
1460 					break;
1461 				}
1462 				/*
1463 				 * Direction is always relative
1464 				 * to the initator.
1465 				 */
1466 				atio->ccb_h.flags &= ~CAM_DIR_MASK;
1467 				atio->ccb_h.flags |= CAM_DIR_IN;
1468 				descr->data = softc->inq_data;
1469 				descr->data_resid = MIN(softc->inq_data_len,
1470 						       inq->length);
1471 				descr->data_increment = descr->data_resid;
1472 				descr->timeout = 5 * 1000;
1473 				descr->status = SCSI_STATUS_OK;
1474 				break;
1475 			}
1476 			case TEST_UNIT_READY:
1477 				atio->ccb_h.flags &= ~CAM_DIR_MASK;
1478 				atio->ccb_h.flags |= CAM_DIR_NONE;
1479 				descr->data_resid = 0;
1480 				descr->data_increment = 0;
1481 				descr->timeout = 5 * 1000;
1482 				descr->status = SCSI_STATUS_OK;
1483 				break;
1484 			case REQUEST_SENSE:
1485 			{
1486 				struct scsi_request_sense *rsense;
1487 				struct scsi_sense_data *sense;
1488 
1489 				rsense = (struct scsi_request_sense *)cdb;
1490 				sense = &istate->sense_data;
1491 				if (pending_ca == 0) {
1492 					fill_sense(softc, atio->init_id,
1493 						   SSD_CURRENT_ERROR,
1494 						   SSD_KEY_NO_SENSE, 0x00,
1495 						   0x00);
1496 					CAM_DEBUG(periph->path,
1497 						  CAM_DEBUG_PERIPH,
1498 						  ("No pending CA!\n"));
1499 				} else if (pending_ca == CA_UNIT_ATTN) {
1500 					u_int ascq;
1501 
1502 					if (pending_ua == UA_POWER_ON)
1503 						ascq = 0x1;
1504 					else
1505 						ascq = 0x2;
1506 					fill_sense(softc, atio->init_id,
1507 						   SSD_CURRENT_ERROR,
1508 						   SSD_KEY_UNIT_ATTENTION,
1509 						   0x29, ascq);
1510 					CAM_DEBUG(periph->path,
1511 						  CAM_DEBUG_PERIPH,
1512 						  ("Pending UA!\n"));
1513 				}
1514 				/*
1515 				 * Direction is always relative
1516 				 * to the initator.
1517 				 */
1518 				atio->ccb_h.flags &= ~CAM_DIR_MASK;
1519 				atio->ccb_h.flags |= CAM_DIR_IN;
1520 				descr->data = sense;
1521 				descr->data_resid =
1522 			 		offsetof(struct scsi_sense_data,
1523 						 extra_len)
1524 				      + sense->extra_len;
1525 				descr->data_resid = MIN(descr->data_resid,
1526 						       rsense->length);
1527 				descr->data_increment = descr->data_resid;
1528 				descr->timeout = 5 * 1000;
1529 				descr->status = SCSI_STATUS_OK;
1530 				break;
1531 			}
1532 			case RECEIVE:
1533 			case SEND:
1534 			{
1535 				struct scsi_send_receive *sr;
1536 
1537 				sr = (struct scsi_send_receive *)cdb;
1538 
1539 				/*
1540 				 * Direction is always relative
1541 				 * to the initator.
1542 				 */
1543 				atio->ccb_h.flags &= ~CAM_DIR_MASK;
1544 				descr->data_resid = scsi_3btoul(sr->xfer_len);
1545 				descr->timeout = 5 * 1000;
1546 				descr->status = SCSI_STATUS_OK;
1547 				if (cdb[0] == SEND) {
1548 					atio->ccb_h.flags |= CAM_DIR_OUT;
1549 					CAM_DEBUG(periph->path,
1550 						  CAM_DEBUG_PERIPH,
1551 						  ("Saw a SEND!\n"));
1552 					atio->ccb_h.flags |= CAM_DIR_OUT;
1553 					TAILQ_INSERT_TAIL(&softc->snd_ccb_queue,
1554 							  &atio->ccb_h,
1555 							  periph_links.tqe);
1556 					selwakeup(&softc->snd_select);
1557 				} else {
1558 					atio->ccb_h.flags |= CAM_DIR_IN;
1559 					CAM_DEBUG(periph->path,
1560 						  CAM_DEBUG_PERIPH,
1561 						  ("Saw a RECEIVE!\n"));
1562 					TAILQ_INSERT_TAIL(&softc->rcv_ccb_queue,
1563 							  &atio->ccb_h,
1564 							  periph_links.tqe);
1565 					selwakeup(&softc->rcv_select);
1566 				}
1567 				/*
1568 				 * Attempt to satisfy this request with
1569 				 * a user buffer.
1570 				 */
1571 				targrunqueue(periph, softc);
1572 				return;
1573 			}
1574 			default:
1575 				/*
1576 				 * Queue for consumption by our userland
1577 				 * counterpart and  transition to the exception
1578 				 * state.
1579 				 */
1580 				TAILQ_INSERT_TAIL(&softc->unknown_atio_queue,
1581 						  &atio->ccb_h,
1582 						  periph_links.tqe);
1583 				softc->exceptions |= TARG_EXCEPT_UNKNOWN_ATIO;
1584 				targfireexception(periph, softc);
1585 				return;
1586 			}
1587 		}
1588 
1589 		/* Queue us up to receive a Continue Target I/O ccb. */
1590 		TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h,
1591 				  periph_links.tqe);
1592 		xpt_schedule(periph, /*priority*/1);
1593 		break;
1594 	}
1595 	case XPT_CONT_TARGET_IO:
1596 	{
1597 		struct ccb_scsiio *csio;
1598 		struct ccb_accept_tio *atio;
1599 		struct targ_cmd_desc *desc;
1600 		struct buf *bp;
1601 		int    error;
1602 
1603 		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1604 			  ("Received completed CTIO\n"));
1605 		csio = &done_ccb->csio;
1606 		atio = (struct ccb_accept_tio*)done_ccb->ccb_h.ccb_atio;
1607 		desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
1608 
1609 		TAILQ_REMOVE(&softc->pending_queue, &done_ccb->ccb_h,
1610 			     periph_links.tqe);
1611 
1612 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1613 			printf("CCB with error %x\n", done_ccb->ccb_h.status);
1614 			error = targerror(done_ccb, 0, 0);
1615 			if (error == ERESTART)
1616 				break;
1617 			/*
1618 			 * Right now we don't need to do anything
1619 			 * prior to unfreezing the queue...
1620 			 */
1621 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1622 				printf("Releasing Queue\n");
1623 				cam_release_devq(done_ccb->ccb_h.path,
1624 						 /*relsim_flags*/0,
1625 						 /*reduction*/0,
1626 						 /*timeout*/0,
1627 						 /*getcount_only*/0);
1628 			}
1629 		} else
1630 			error = 0;
1631 		desc->data_increment -= csio->resid;
1632 		desc->data_resid -= desc->data_increment;
1633 		if ((bp = desc->bp) != NULL) {
1634 
1635 			bp->b_resid -= desc->data_increment;
1636 			bp->b_error = error;
1637 
1638 			CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1639 				  ("Buffer I/O Completed - Resid %ld:%d\n",
1640 				   bp->b_resid, desc->data_resid));
1641 			/*
1642 			 * Send the buffer back to the client if
1643 			 * either the command has completed or all
1644 			 * buffer space has been consumed.
1645 			 */
1646 			if (desc->data_resid == 0
1647 			 || bp->b_resid == 0
1648 			 || error != 0) {
1649 				if (bp->b_resid != 0)
1650 					/* Short transfer */
1651 					bp->b_flags |= B_ERROR;
1652 
1653 				CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1654 					  ("Completing a buffer\n"));
1655 				biodone(bp);
1656 				desc->bp = NULL;
1657 			}
1658 		}
1659 
1660 		xpt_release_ccb(done_ccb);
1661 		if (softc->state != TARG_STATE_TEARDOWN) {
1662 
1663 			if (desc->data_resid == 0) {
1664 				/*
1665 				 * Send the original accept TIO back to the
1666 				 * controller to handle more work.
1667 				 */
1668 				CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1669 					  ("Returning ATIO to target\n"));
1670 				xpt_action((union ccb *)atio);
1671 				break;
1672 			}
1673 
1674 			/* Queue us up for another buffer */
1675 			if (atio->cdb_io.cdb_bytes[0] == SEND) {
1676 				if (desc->bp != NULL)
1677 					TAILQ_INSERT_HEAD(
1678 						&softc->snd_buf_queue.queue,
1679 						bp, b_act);
1680 				TAILQ_INSERT_HEAD(&softc->snd_ccb_queue,
1681 						  &atio->ccb_h,
1682 						  periph_links.tqe);
1683 			} else {
1684 				if (desc->bp != NULL)
1685 					TAILQ_INSERT_HEAD(
1686 						&softc->rcv_buf_queue.queue,
1687 						bp, b_act);
1688 				TAILQ_INSERT_HEAD(&softc->rcv_ccb_queue,
1689 						  &atio->ccb_h,
1690 						  periph_links.tqe);
1691 			}
1692 			desc->bp = NULL;
1693 			targrunqueue(periph, softc);
1694 		} else {
1695 			if (desc->bp != NULL) {
1696 				bp->b_flags |= B_ERROR;
1697 				bp->b_error = ENXIO;
1698 				biodone(bp);
1699 			}
1700 			freedescr(desc);
1701 			free(atio, M_DEVBUF);
1702 		}
1703 		break;
1704 	}
1705 	case XPT_IMMED_NOTIFY:
1706 	{
1707 		int frozen;
1708 
1709 		frozen = (done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0;
1710 		if (softc->state == TARG_STATE_TEARDOWN) {
1711 			SLIST_REMOVE(&softc->immed_notify_slist,
1712 				     &done_ccb->ccb_h, ccb_hdr,
1713 				     periph_links.sle);
1714 			free(done_ccb, M_DEVBUF);
1715 		} else if (done_ccb->ccb_h.status == CAM_REQ_ABORTED) {
1716 			free(done_ccb, M_DEVBUF);
1717 		} else {
1718 			printf("Saw event %x:%x\n", done_ccb->ccb_h.status,
1719 			       done_ccb->cin.message_args[0]);
1720 			/* Process error condition. */
1721 			targinoterror(periph, softc, &done_ccb->cin);
1722 
1723 			/* Requeue for another immediate event */
1724 			xpt_action(done_ccb);
1725 		}
1726 		if (frozen != 0)
1727 			cam_release_devq(periph->path,
1728 					 /*relsim_flags*/0,
1729 					 /*opening reduction*/0,
1730 					 /*timeout*/0,
1731 					 /*getcount_only*/0);
1732 		break;
1733 	}
1734 	default:
1735 		panic("targdone: Impossible xpt opcode %x encountered.",
1736 		      done_ccb->ccb_h.func_code);
1737 		/* NOTREACHED */
1738 		break;
1739 	}
1740 }
1741 
1742 /*
1743  * Transition to the exception state and notify our symbiotic
1744  * userland process of the change.
1745  */
1746 static void
1747 targfireexception(struct cam_periph *periph, struct targ_softc *softc)
1748 {
1749 	/*
1750 	 * return all pending buffers with short read/write status so our
1751 	 * process unblocks, and do a selwakeup on any process queued
1752 	 * waiting for reads or writes.  When the selwakeup is performed,
1753 	 * the waking process will wakeup, call our poll routine again,
1754 	 * and pick up the exception.
1755 	 */
1756 	struct buf *bp;
1757 
1758 	if (softc->state != TARG_STATE_NORMAL)
1759 		/* Already either tearing down or in exception state */
1760 		return;
1761 
1762 	softc->state = TARG_STATE_EXCEPTION;
1763 
1764 	while ((bp = bufq_first(&softc->snd_buf_queue)) != NULL) {
1765 		bufq_remove(&softc->snd_buf_queue, bp);
1766 		bp->b_flags |= B_ERROR;
1767 		biodone(bp);
1768 	}
1769 
1770 	while ((bp = bufq_first(&softc->rcv_buf_queue)) != NULL) {
1771 		bufq_remove(&softc->snd_buf_queue, bp);
1772 		bp->b_flags |= B_ERROR;
1773 		biodone(bp);
1774 	}
1775 
1776 	selwakeup(&softc->snd_select);
1777 	selwakeup(&softc->rcv_select);
1778 }
1779 
1780 static void
1781 targinoterror(struct cam_periph *periph, struct targ_softc *softc,
1782 	      struct ccb_immed_notify *inot)
1783 {
1784 	cam_status status;
1785 	int sense;
1786 
1787 	status = inot->ccb_h.status;
1788 	sense = (status & CAM_AUTOSNS_VALID) != 0;
1789 	status &= CAM_STATUS_MASK;
1790 	switch (status) {
1791 	case CAM_SCSI_BUS_RESET:
1792 		set_unit_attention_cond(periph, /*init_id*/CAM_TARGET_WILDCARD,
1793 					UA_BUS_RESET);
1794 		abort_pending_transactions(periph,
1795 					   /*init_id*/CAM_TARGET_WILDCARD,
1796 					   TARG_TAG_WILDCARD, EINTR,
1797 					   /*to_held_queue*/FALSE);
1798 		softc->exceptions |= TARG_EXCEPT_BUS_RESET_SEEN;
1799 		targfireexception(periph, softc);
1800 		break;
1801 	case CAM_BDR_SENT:
1802 		set_unit_attention_cond(periph, /*init_id*/CAM_TARGET_WILDCARD,
1803 					UA_BDR);
1804 		abort_pending_transactions(periph, CAM_TARGET_WILDCARD,
1805 					   TARG_TAG_WILDCARD, EINTR,
1806 					   /*to_held_queue*/FALSE);
1807 		softc->exceptions |= TARG_EXCEPT_BDR_RECEIVED;
1808 		targfireexception(periph, softc);
1809 		break;
1810 	case CAM_MESSAGE_RECV:
1811 		switch (inot->message_args[0]) {
1812 		case MSG_INITIATOR_DET_ERR:
1813 			break;
1814 		case MSG_ABORT:
1815 			break;
1816 		case MSG_BUS_DEV_RESET:
1817 			break;
1818 		case MSG_ABORT_TAG:
1819 			break;
1820 		case MSG_CLEAR_QUEUE:
1821 			break;
1822 		case MSG_TERM_IO_PROC:
1823 			break;
1824 		default:
1825 			break;
1826 		}
1827 		break;
1828 	default:
1829 		break;
1830 	}
1831 }
1832 
1833 static int
1834 targerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
1835 {
1836 	struct cam_periph *periph;
1837 	struct targ_softc *softc;
1838 	struct ccb_scsiio *csio;
1839 	cam_status status;
1840 	int frozen;
1841 	int sense;
1842 	int error;
1843 	int on_held_queue;
1844 
1845 	periph = xpt_path_periph(ccb->ccb_h.path);
1846 	softc = (struct targ_softc *)periph->softc;
1847 	status = ccb->ccb_h.status;
1848 	sense = (status & CAM_AUTOSNS_VALID) != 0;
1849 	frozen = (status & CAM_DEV_QFRZN) != 0;
1850 	status &= CAM_STATUS_MASK;
1851 	on_held_queue = FALSE;
1852 	csio = &ccb->csio;
1853 	switch (status) {
1854 	case CAM_REQ_ABORTED:
1855 		printf("Request Aborted!\n");
1856 		if ((ccb->ccb_h.ccb_flags & TARG_CCB_ABORT_TO_HELDQ) != 0) {
1857 			struct initiator_state *istate;
1858 
1859 			/*
1860 			 * Place this CCB into the initiators
1861 			 * 'held' queue until the pending CA is cleared.
1862 			 * If there is no CA pending, reissue immediately.
1863 			 */
1864 			istate = &softc->istate[ccb->csio.init_id];
1865 			if (istate->pending_ca == 0) {
1866 				ccb->ccb_h.ccb_flags = TARG_CCB_NONE;
1867 				xpt_action(ccb);
1868 			} else {
1869 				ccb->ccb_h.ccb_flags = TARG_CCB_HELDQ;
1870 				TAILQ_INSERT_TAIL(&softc->pending_queue,
1871 						  &ccb->ccb_h,
1872 						  periph_links.tqe);
1873 			}
1874 			/* The command will be retried at a later time. */
1875 			on_held_queue = TRUE;
1876 			error = ERESTART;
1877 			break;
1878 		}
1879 		/* FALLTHROUGH */
1880 	case CAM_SCSI_BUS_RESET:
1881 	case CAM_BDR_SENT:
1882 	case CAM_REQ_TERMIO:
1883 	case CAM_CMD_TIMEOUT:
1884 		/* Assume we did not send any data */
1885 		csio->resid = csio->dxfer_len;
1886 		error = EIO;
1887 		break;
1888 	case CAM_SEL_TIMEOUT:
1889 		if (ccb->ccb_h.retry_count > 0) {
1890 			ccb->ccb_h.retry_count--;
1891 			error = ERESTART;
1892 		} else {
1893 			/* "Select or reselect failure" */
1894 			csio->resid = csio->dxfer_len;
1895 			fill_sense(softc, csio->init_id, SSD_CURRENT_ERROR,
1896 				   SSD_KEY_HARDWARE_ERROR, 0x45, 0x00);
1897 			set_contingent_allegiance_cond(periph,
1898 						       csio->init_id,
1899 						       CA_CMD_SENSE);
1900 			error = EIO;
1901 		}
1902 		break;
1903 	case CAM_UNCOR_PARITY:
1904 		/* "SCSI parity error" */
1905 		fill_sense(softc, csio->init_id, SSD_CURRENT_ERROR,
1906 			   SSD_KEY_HARDWARE_ERROR, 0x47, 0x00);
1907 		set_contingent_allegiance_cond(periph, csio->init_id,
1908 					       CA_CMD_SENSE);
1909 		csio->resid = csio->dxfer_len;
1910 		error = EIO;
1911 		break;
1912 	case CAM_NO_HBA:
1913 		csio->resid = csio->dxfer_len;
1914 		error = ENXIO;
1915 		break;
1916 	case CAM_SEQUENCE_FAIL:
1917 		if (sense != 0) {
1918 			copy_sense(softc, csio);
1919 			set_contingent_allegiance_cond(periph,
1920 						       csio->init_id,
1921 						       CA_CMD_SENSE);
1922 		}
1923 		csio->resid = csio->dxfer_len;
1924 		error = EIO;
1925 		break;
1926 	case CAM_IDE:
1927 		/* "Initiator detected error message received" */
1928 		fill_sense(softc, csio->init_id, SSD_CURRENT_ERROR,
1929 			   SSD_KEY_HARDWARE_ERROR, 0x48, 0x00);
1930 		set_contingent_allegiance_cond(periph, csio->init_id,
1931 					       CA_CMD_SENSE);
1932 		csio->resid = csio->dxfer_len;
1933 		error = EIO;
1934 		break;
1935 	case CAM_REQUEUE_REQ:
1936 		printf("Requeue Request!\n");
1937 		error = ERESTART;
1938 		break;
1939 	default:
1940 		csio->resid = csio->dxfer_len;
1941 		error = EIO;
1942 		panic("targerror: Unexpected status %x encounterd", status);
1943 		/* NOTREACHED */
1944 	}
1945 
1946 	if (error == ERESTART || error == 0) {
1947 		/* Clear the QFRZN flag as we will release the queue */
1948 		if (frozen != 0)
1949 			ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1950 
1951 		if (error == ERESTART && !on_held_queue)
1952 			xpt_action(ccb);
1953 
1954 		if (frozen != 0)
1955 			cam_release_devq(ccb->ccb_h.path,
1956 					 /*relsim_flags*/0,
1957 					 /*opening reduction*/0,
1958 					 /*timeout*/0,
1959 					 /*getcount_only*/0);
1960 	}
1961 	return (error);
1962 }
1963 
1964 static struct targ_cmd_desc*
1965 allocdescr()
1966 {
1967 	struct targ_cmd_desc* descr;
1968 
1969 	/* Allocate the targ_descr structure */
1970 	descr = (struct targ_cmd_desc *)malloc(sizeof(*descr),
1971 					       M_DEVBUF, M_NOWAIT);
1972 	if (descr == NULL)
1973 		return (NULL);
1974 
1975 	bzero(descr, sizeof(*descr));
1976 
1977 	/* Allocate buffer backing store */
1978 	descr->backing_store = malloc(MAX_BUF_SIZE, M_DEVBUF, M_NOWAIT);
1979 	if (descr->backing_store == NULL) {
1980 		free(descr, M_DEVBUF);
1981 		return (NULL);
1982 	}
1983 	descr->max_size = MAX_BUF_SIZE;
1984 	return (descr);
1985 }
1986 
1987 static void
1988 freedescr(struct targ_cmd_desc *descr)
1989 {
1990 	free(descr->backing_store, M_DEVBUF);
1991 	free(descr, M_DEVBUF);
1992 }
1993 
1994 static void
1995 fill_sense(struct targ_softc *softc, u_int initiator_id, u_int error_code,
1996 	   u_int sense_key, u_int asc, u_int ascq)
1997 {
1998 	struct initiator_state *istate;
1999 	struct scsi_sense_data *sense;
2000 
2001 	istate = &softc->istate[initiator_id];
2002 	sense = &istate->sense_data;
2003 	bzero(sense, sizeof(*sense));
2004 	sense->error_code = error_code;
2005 	sense->flags = sense_key;
2006 	sense->add_sense_code = asc;
2007 	sense->add_sense_code_qual = ascq;
2008 
2009 	sense->extra_len = offsetof(struct scsi_sense_data, fru)
2010 			 - offsetof(struct scsi_sense_data, extra_len);
2011 }
2012 
2013 static void
2014 copy_sense(struct targ_softc *softc, struct ccb_scsiio *csio)
2015 {
2016 	struct initiator_state *istate;
2017 	struct scsi_sense_data *sense;
2018 	size_t copylen;
2019 
2020 	istate = &softc->istate[csio->init_id];
2021 	sense = &istate->sense_data;
2022 	copylen = sizeof(*sense);
2023 	if (copylen > csio->sense_len)
2024 		copylen = csio->sense_len;
2025 	bcopy(&csio->sense_data, sense, copylen);
2026 }
2027 
2028 static void
2029 set_unit_attention_cond(struct cam_periph *periph,
2030 			u_int initiator_id, ua_types ua)
2031 {
2032 	int start;
2033 	int end;
2034 	struct targ_softc *softc;
2035 
2036 	softc = (struct targ_softc *)periph->softc;
2037 	if (initiator_id == CAM_TARGET_WILDCARD) {
2038 		start = 0;
2039 		end = MAX_INITIATORS - 1;
2040 	} else
2041 		start = end = initiator_id;
2042 
2043 	while (start <= end) {
2044 		softc->istate[start].pending_ua = ua;
2045 		start++;
2046 	}
2047 }
2048 
2049 static void
2050 set_contingent_allegiance_cond(struct cam_periph *periph,
2051 			       u_int initiator_id, ca_types ca)
2052 {
2053 	struct targ_softc *softc;
2054 
2055 	softc = (struct targ_softc *)periph->softc;
2056 	softc->istate[initiator_id].pending_ca = ca;
2057 	abort_pending_transactions(periph, initiator_id, TARG_TAG_WILDCARD,
2058 				   /* errno */0, /*to_held_queue*/TRUE);
2059 }
2060 
2061 static void
2062 abort_pending_transactions(struct cam_periph *periph, u_int initiator_id,
2063 			   u_int tag_id, int errno, int to_held_queue)
2064 {
2065 	struct ccb_abort cab;
2066 	struct ccb_queue *atio_queues[3];
2067 	struct targ_softc *softc;
2068 	struct ccb_hdr *ccbh;
2069 	u_int i;
2070 
2071 	softc = (struct targ_softc *)periph->softc;
2072 
2073 	atio_queues[0] = &softc->work_queue;
2074 	atio_queues[1] = &softc->snd_ccb_queue;
2075 	atio_queues[2] = &softc->rcv_ccb_queue;
2076 
2077 	/* First address the ATIOs awaiting resources */
2078 	for (i = 0; i < (sizeof(atio_queues) / sizeof(*atio_queues)); i++) {
2079 		struct ccb_queue *atio_queue;
2080 
2081 		if (to_held_queue) {
2082 			/*
2083 			 * The device queue is frozen anyway, so there
2084 			 * is nothing for us to do.
2085 			 */
2086 			continue;
2087 		}
2088 		atio_queue = atio_queues[i];
2089 		ccbh = TAILQ_FIRST(atio_queue);
2090 		while (ccbh != NULL) {
2091 			struct ccb_accept_tio *atio;
2092 			struct targ_cmd_desc *desc;
2093 
2094 			atio = (struct ccb_accept_tio *)ccbh;
2095 			desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
2096 			ccbh = TAILQ_NEXT(ccbh, periph_links.tqe);
2097 
2098 			/* Only abort the CCBs that match */
2099 			if ((atio->init_id != initiator_id
2100 			  && initiator_id != CAM_TARGET_WILDCARD)
2101 			 || (tag_id != TARG_TAG_WILDCARD
2102 			  && ((atio->ccb_h.flags & CAM_TAG_ACTION_VALID) == 0
2103 			   || atio->tag_id != tag_id)))
2104 				continue;
2105 
2106 			TAILQ_REMOVE(atio_queue, &atio->ccb_h,
2107 				     periph_links.tqe);
2108 
2109 			CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
2110 				  ("Aborting ATIO\n"));
2111 			if (desc->bp != NULL) {
2112 				desc->bp->b_flags |= B_ERROR;
2113 				if (softc->state != TARG_STATE_TEARDOWN)
2114 					desc->bp->b_error = errno;
2115 				else
2116 					desc->bp->b_error = ENXIO;
2117 				biodone(desc->bp);
2118 				desc->bp = NULL;
2119 			}
2120 			if (softc->state == TARG_STATE_TEARDOWN) {
2121 				freedescr(desc);
2122 				free(atio, M_DEVBUF);
2123 			} else {
2124 				/* Return the ATIO back to the controller */
2125 				xpt_action((union ccb *)atio);
2126 			}
2127 		}
2128 	}
2129 
2130 	ccbh = TAILQ_FIRST(&softc->pending_queue);
2131 	while (ccbh != NULL) {
2132 		struct ccb_scsiio *csio;
2133 
2134 		csio = (struct ccb_scsiio *)ccbh;
2135 		ccbh = TAILQ_NEXT(ccbh, periph_links.tqe);
2136 
2137 		/* Only abort the CCBs that match */
2138 		if ((csio->init_id != initiator_id
2139 		  && initiator_id != CAM_TARGET_WILDCARD)
2140 		 || (tag_id != TARG_TAG_WILDCARD
2141 		  && ((csio->ccb_h.flags & CAM_TAG_ACTION_VALID) == 0
2142 		   || csio->tag_id != tag_id)))
2143 			continue;
2144 
2145 		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
2146 			  ("Aborting CTIO\n"));
2147 
2148 		TAILQ_REMOVE(&softc->work_queue, &csio->ccb_h,
2149 			     periph_links.tqe);
2150 
2151 		if (to_held_queue != 0)
2152 			csio->ccb_h.ccb_flags |= TARG_CCB_ABORT_TO_HELDQ;
2153 		xpt_setup_ccb(&cab.ccb_h, csio->ccb_h.path, /*priority*/1);
2154 		cab.abort_ccb = (union ccb *)csio;
2155 		xpt_action((union ccb *)&cab);
2156 		if (cab.ccb_h.status != CAM_REQ_CMP) {
2157 			xpt_print_path(cab.ccb_h.path);
2158 			printf("Unable to abort CCB.  Status %x\n",
2159 			       cab.ccb_h.status);
2160 		}
2161 	}
2162 }
2163