xref: /freebsd/sys/cam/scsi/scsi_target.c (revision 3ff369fed2a08f32dda232c10470b949bef9489f)
1 /*
2  * Implementation of a simple Target Mode SCSI Proccessor Target driver for CAM.
3  *
4  * Copyright (c) 1998, 1999, 2001 Justin T. Gibbs.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification, immediately at the beginning of the file.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 
31 #include <sys/param.h>
32 #include <sys/queue.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/types.h>
36 #include <sys/bio.h>
37 #include <sys/conf.h>
38 #include <sys/devicestat.h>
39 #include <sys/malloc.h>
40 #include <sys/poll.h>
41 #include <sys/selinfo.h>
42 #include <sys/uio.h>
43 
44 #include <cam/cam.h>
45 #include <cam/cam_ccb.h>
46 #include <cam/cam_extend.h>
47 #include <cam/cam_periph.h>
48 #include <cam/cam_queue.h>
49 #include <cam/cam_xpt_periph.h>
50 #include <cam/cam_debug.h>
51 
52 #include <cam/scsi/scsi_all.h>
53 #include <cam/scsi/scsi_pt.h>
54 #include <cam/scsi/scsi_targetio.h>
55 #include <cam/scsi/scsi_message.h>
56 
57 typedef enum {
58 	TARG_STATE_NORMAL,
59 	TARG_STATE_EXCEPTION,
60 	TARG_STATE_TEARDOWN
61 } targ_state;
62 
63 typedef enum {
64 	TARG_FLAG_NONE		 = 0x00,
65 	TARG_FLAG_SEND_EOF	 = 0x01,
66 	TARG_FLAG_RECEIVE_EOF	 = 0x02,
67 	TARG_FLAG_LUN_ENABLED	 = 0x04
68 } targ_flags;
69 
70 typedef enum {
71 	TARG_CCB_NONE		= 0x00,
72 	TARG_CCB_WAITING	= 0x01,
73 	TARG_CCB_HELDQ		= 0x02,
74 	TARG_CCB_ABORT_TO_HELDQ = 0x04
75 } targ_ccb_flags;
76 
77 #define MAX_ACCEPT	16
78 #define MAX_IMMEDIATE	16
79 #define MAX_BUF_SIZE	256	/* Max inquiry/sense/mode page transfer */
80 #define MAX_INITIATORS	256	/* includes widest fibre channel for now */
81 
82 #define MIN(a, b) ((a > b) ? b : a)
83 
84 #define TARG_CONTROL_UNIT 0xffff00ff
85 #define TARG_IS_CONTROL_DEV(unit) ((unit) == TARG_CONTROL_UNIT)
86 
87 #define TARG_TAG_WILDCARD ((u_int)~0)
88 
89 /* Offsets into our private CCB area for storing accept information */
90 #define ccb_flags	ppriv_field0
91 #define ccb_descr	ppriv_ptr1
92 
93 /* We stick a pointer to the originating accept TIO in each continue I/O CCB */
94 #define ccb_atio	ppriv_ptr1
95 
96 /*
97  * When we're constructing a unit, we point to passed in user inquiry data here.
98  */
99 #define ccb_inq		ppriv_ptr1
100 
101 struct targ_softc {
102 	/* CTIOs pending on the controller */
103 	struct		ccb_queue pending_queue;
104 
105 	/* ATIOs awaiting CTIO resources from the XPT */
106 	struct		ccb_queue work_queue;
107 
108 	/*
109 	 * ATIOs for SEND operations waiting for 'write'
110 	 * buffer resources from our userland daemon.
111 	 */
112 	struct		ccb_queue snd_ccb_queue;
113 
114 	/*
115 	 * ATIOs for RCV operations waiting for 'read'
116 	 * buffer resources from our userland daemon.
117 	 */
118 	struct		ccb_queue rcv_ccb_queue;
119 
120 	/*
121 	 * ATIOs for commands unknown to the kernel driver.
122 	 * These are queued for the userland daemon to
123 	 * consume.
124 	 */
125 	struct		ccb_queue unknown_atio_queue;
126 
127 	/*
128 	 * Userland buffers for SEND commands waiting for
129 	 * SEND ATIOs to be queued by an initiator.
130 	 */
131 	struct		bio_queue_head snd_bio_queue;
132 
133 	/*
134 	 * Userland buffers for RCV commands waiting for
135 	 * RCV ATIOs to be queued by an initiator.
136 	 */
137 	struct		bio_queue_head rcv_bio_queue;
138 	struct		devstat device_stats;
139 	dev_t		targ_dev;
140 	struct		selinfo snd_select;
141 	struct		selinfo rcv_select;
142 	targ_state	state;
143 	targ_flags	flags;
144 	targ_exception	exceptions;
145 	u_int		init_level;
146 	u_int		inq_data_len;
147 	struct		scsi_inquiry_data *inq_data;
148 	struct		ccb_accept_tio *accept_tio_list;
149 	struct		ccb_hdr_slist immed_notify_slist;
150 	struct		initiator_state istate[MAX_INITIATORS];
151 };
152 
153 struct targ_cmd_desc {
154 	struct	  ccb_accept_tio* atio_link;
155 	u_int	  data_resid;	/* How much left to transfer */
156 	u_int	  data_increment;/* Amount to send before next disconnect */
157 	void*	  data;		/* The data. Can be from backing_store or not */
158 	void*	  backing_store;/* Backing store allocated for this descriptor*/
159 	struct	  bio *bp;	/* Buffer for this transfer */
160 	u_int	  max_size;	/* Size of backing_store */
161 	u_int32_t timeout;
162 	u_int32_t
163 		user_atio	: 1, /* user ATIO (will define last CTIO) */
164 		status		: 8; /* Status to return to initiator */
165 };
166 
167 static	d_open_t	targopen;
168 static	d_close_t	targclose;
169 static	d_read_t	targread;
170 static	d_write_t	targwrite;
171 static	d_ioctl_t	targioctl;
172 static	d_poll_t	targpoll;
173 static	d_strategy_t	targstrategy;
174 
175 #define TARG_CDEV_MAJOR	65
176 static struct cdevsw targ_cdevsw = {
177 	/* open */	targopen,
178 	/* close */	targclose,
179 	/* read */	targread,
180 	/* write */	targwrite,
181 	/* ioctl */	targioctl,
182 	/* poll */	targpoll,
183 	/* mmap */	nommap,
184 	/* strategy */	targstrategy,
185 	/* name */	"targ",
186 	/* maj */	TARG_CDEV_MAJOR,
187 	/* dump */	nodump,
188 	/* psize */	nopsize,
189 	/* flags */	0,
190 };
191 
192 static int		targsendccb(struct cam_periph *periph, union ccb *ccb,
193 				    union ccb *inccb);
194 static periph_init_t	targinit;
195 static void		targasync(void *callback_arg, u_int32_t code,
196 				struct cam_path *path, void *arg);
197 static int		targallocinstance(void *, u_long);
198 static int		targfreeinstance(struct ioc_alloc_unit *);
199 static cam_status	targenlun(struct cam_periph *periph);
200 static cam_status	targdislun(struct cam_periph *periph);
201 static periph_ctor_t	targctor;
202 static periph_dtor_t	targdtor;
203 static void		targrunqueue(struct cam_periph *periph,
204 				     struct targ_softc *softc);
205 static periph_start_t	targstart;
206 static void		targdone(struct cam_periph *periph,
207 				 union ccb *done_ccb);
208 static void		targfireexception(struct cam_periph *periph,
209 					  struct targ_softc *softc);
210 static void		targinoterror(struct cam_periph *periph,
211 				      struct targ_softc *softc,
212 				      struct ccb_immed_notify *inot);
213 static  int		targerror(union ccb *ccb, u_int32_t cam_flags,
214 				  u_int32_t sense_flags);
215 static struct targ_cmd_desc*	allocdescr(void);
216 static void		freedescr(struct targ_cmd_desc *buf);
217 static void		fill_sense(struct targ_softc *softc,
218 				   u_int initiator_id, u_int error_code,
219 				   u_int sense_key, u_int asc, u_int ascq);
220 static void		copy_sense(struct targ_softc *softc,
221 				   struct initiator_state *istate,
222 				   u_int8_t *sense_buffer, size_t sense_len);
223 static void	set_unit_attention_cond(struct cam_periph *periph,
224 					u_int initiator_id, ua_types ua);
225 static void	set_ca_condition(struct cam_periph *periph,
226 				 u_int initiator_id, ca_types ca);
227 static void	abort_pending_transactions(struct cam_periph *periph,
228 					   u_int initiator_id, u_int tag_id,
229 					   int errno, int to_held_queue);
230 
231 static struct periph_driver targdriver =
232 {
233 	targinit, "targ",
234 	TAILQ_HEAD_INITIALIZER(targdriver.units), /* generation */ 0
235 };
236 
237 PERIPHDRIVER_DECLARE(targ, targdriver);
238 
239 static struct extend_array *targperiphs;
240 static dev_t targ_ctl_dev;
241 
242 static void
243 targinit(void)
244 {
245 	/*
246 	 * Create our extend array for storing the devices we attach to.
247 	 */
248 	targperiphs = cam_extend_new();
249 	if (targperiphs == NULL) {
250 		printf("targ: Failed to alloc extend array!\n");
251 		return;
252 	}
253 	targ_ctl_dev = make_dev(&targ_cdevsw, TARG_CONTROL_UNIT, UID_ROOT,
254 	    GID_OPERATOR, 0600, "%s.ctl", "targ");
255 	if (targ_ctl_dev == (dev_t) 0) {
256 		printf("targ: failed to create control dev\n");
257 	}
258 }
259 
260 static void
261 targasync(void *callback_arg, u_int32_t code,
262 	  struct cam_path *path, void *arg)
263 {
264 	struct cam_periph *periph;
265 	struct targ_softc *softc;
266 
267 	periph = (struct cam_periph *)callback_arg;
268 	softc = (struct targ_softc *)periph->softc;
269 	switch (code) {
270 	case AC_PATH_DEREGISTERED:
271 	{
272 		/* XXX Implement */
273 		break;
274 	}
275 	default:
276 		break;
277 	}
278 }
279 
280 /* Attempt to enable our lun */
281 static cam_status
282 targenlun(struct cam_periph *periph)
283 {
284 	union ccb immed_ccb;
285 	struct targ_softc *softc;
286 	cam_status status;
287 	int i;
288 
289 	softc = (struct targ_softc *)periph->softc;
290 
291 	if ((softc->flags & TARG_FLAG_LUN_ENABLED) != 0)
292 		return (CAM_REQ_CMP);
293 
294 	xpt_setup_ccb(&immed_ccb.ccb_h, periph->path, /*priority*/1);
295 	immed_ccb.ccb_h.func_code = XPT_EN_LUN;
296 
297 	/* Don't need support for any vendor specific commands */
298 	immed_ccb.cel.grp6_len = 0;
299 	immed_ccb.cel.grp7_len = 0;
300 	immed_ccb.cel.enable = 1;
301 	xpt_action(&immed_ccb);
302 	status = immed_ccb.ccb_h.status;
303 	if (status != CAM_REQ_CMP) {
304 		xpt_print_path(periph->path);
305 		printf("targenlun - Enable Lun Rejected with status 0x%x\n",
306 		       status);
307 		return (status);
308 	}
309 
310 	softc->flags |= TARG_FLAG_LUN_ENABLED;
311 
312 	/*
313 	 * Build up a buffer of accept target I/O
314 	 * operations for incoming selections.
315 	 */
316 	for (i = 0; i < MAX_ACCEPT; i++) {
317 		struct ccb_accept_tio *atio;
318 
319 		atio = (struct ccb_accept_tio*)malloc(sizeof(*atio), M_DEVBUF,
320 						      M_NOWAIT);
321 		if (atio == NULL) {
322 			status = CAM_RESRC_UNAVAIL;
323 			break;
324 		}
325 
326 		atio->ccb_h.ccb_descr = allocdescr();
327 
328 		if (atio->ccb_h.ccb_descr == NULL) {
329 			free(atio, M_DEVBUF);
330 			status = CAM_RESRC_UNAVAIL;
331 			break;
332 		}
333 
334 		xpt_setup_ccb(&atio->ccb_h, periph->path, /*priority*/1);
335 		atio->ccb_h.func_code = XPT_ACCEPT_TARGET_IO;
336 		atio->ccb_h.cbfcnp = targdone;
337 		atio->ccb_h.ccb_flags = TARG_CCB_NONE;
338 		xpt_action((union ccb *)atio);
339 		status = atio->ccb_h.status;
340 		if (status != CAM_REQ_INPROG) {
341 			xpt_print_path(periph->path);
342 			printf("Queue of atio failed\n");
343 			freedescr(atio->ccb_h.ccb_descr);
344 			free(atio, M_DEVBUF);
345 			break;
346 		}
347 		((struct targ_cmd_desc*)atio->ccb_h.ccb_descr)->atio_link =
348 		    softc->accept_tio_list;
349 		softc->accept_tio_list = atio;
350 	}
351 
352 	if (i == 0) {
353 		xpt_print_path(periph->path);
354 		printf("targenlun - Could not allocate accept tio CCBs: "
355 		       "status = 0x%x\n", status);
356 		targdislun(periph);
357 		return (CAM_REQ_CMP_ERR);
358 	}
359 
360 	/*
361 	 * Build up a buffer of immediate notify CCBs
362 	 * so the SIM can tell us of asynchronous target mode events.
363 	 */
364 	for (i = 0; i < MAX_ACCEPT; i++) {
365 		struct ccb_immed_notify *inot;
366 
367 		inot = (struct ccb_immed_notify*)malloc(sizeof(*inot), M_DEVBUF,
368 						        M_NOWAIT);
369 
370 		if (inot == NULL) {
371 			status = CAM_RESRC_UNAVAIL;
372 			break;
373 		}
374 
375 		xpt_setup_ccb(&inot->ccb_h, periph->path, /*priority*/1);
376 		inot->ccb_h.func_code = XPT_IMMED_NOTIFY;
377 		inot->ccb_h.cbfcnp = targdone;
378 		SLIST_INSERT_HEAD(&softc->immed_notify_slist, &inot->ccb_h,
379 				  periph_links.sle);
380 		xpt_action((union ccb *)inot);
381 	}
382 
383 	if (i == 0) {
384 		xpt_print_path(periph->path);
385 		printf("targenlun - Could not allocate immediate notify CCBs: "
386 		       "status = 0x%x\n", status);
387 		targdislun(periph);
388 		return (CAM_REQ_CMP_ERR);
389 	}
390 
391 	return (CAM_REQ_CMP);
392 }
393 
394 static cam_status
395 targdislun(struct cam_periph *periph)
396 {
397 	union ccb ccb;
398 	struct targ_softc *softc;
399 	struct ccb_accept_tio* atio;
400 	struct ccb_hdr *ccb_h;
401 
402 	softc = (struct targ_softc *)periph->softc;
403 	if ((softc->flags & TARG_FLAG_LUN_ENABLED) == 0)
404 		return CAM_REQ_CMP;
405 
406 	/* XXX Block for Continue I/O completion */
407 
408 	/* Kill off all ACCECPT and IMMEDIATE CCBs */
409 	while ((atio = softc->accept_tio_list) != NULL) {
410 
411 		softc->accept_tio_list =
412 		    ((struct targ_cmd_desc*)atio->ccb_h.ccb_descr)->atio_link;
413 		xpt_setup_ccb(&ccb.cab.ccb_h, periph->path, /*priority*/1);
414 		ccb.cab.ccb_h.func_code = XPT_ABORT;
415 		ccb.cab.abort_ccb = (union ccb *)atio;
416 		xpt_action(&ccb);
417 	}
418 
419 	while ((ccb_h = SLIST_FIRST(&softc->immed_notify_slist)) != NULL) {
420 		SLIST_REMOVE_HEAD(&softc->immed_notify_slist, periph_links.sle);
421 		xpt_setup_ccb(&ccb.cab.ccb_h, periph->path, /*priority*/1);
422 		ccb.cab.ccb_h.func_code = XPT_ABORT;
423 		ccb.cab.abort_ccb = (union ccb *)ccb_h;
424 		xpt_action(&ccb);
425 	}
426 
427 	/*
428 	 * Dissable this lun.
429 	 */
430 	xpt_setup_ccb(&ccb.cel.ccb_h, periph->path, /*priority*/1);
431 	ccb.cel.ccb_h.func_code = XPT_EN_LUN;
432 	ccb.cel.enable = 0;
433 	xpt_action(&ccb);
434 
435 	if (ccb.cel.ccb_h.status != CAM_REQ_CMP)
436 		printf("targdislun - Disabling lun on controller failed "
437 		       "with status 0x%x\n", ccb.cel.ccb_h.status);
438 	else
439 		softc->flags &= ~TARG_FLAG_LUN_ENABLED;
440 	return (ccb.cel.ccb_h.status);
441 }
442 
443 static cam_status
444 targctor(struct cam_periph *periph, void *arg)
445 {
446 	struct ccb_pathinq *cpi;
447 	struct targ_softc *softc;
448 	int i;
449 
450 	cpi = (struct ccb_pathinq *)arg;
451 
452 	/* Allocate our per-instance private storage */
453 	softc = (struct targ_softc *)malloc(sizeof(*softc), M_DEVBUF, M_NOWAIT);
454 	if (softc == NULL) {
455 		printf("targctor: unable to malloc softc\n");
456 		return (CAM_REQ_CMP_ERR);
457 	}
458 
459 	bzero(softc, sizeof(*softc));
460 	TAILQ_INIT(&softc->pending_queue);
461 	TAILQ_INIT(&softc->work_queue);
462 	TAILQ_INIT(&softc->snd_ccb_queue);
463 	TAILQ_INIT(&softc->rcv_ccb_queue);
464 	TAILQ_INIT(&softc->unknown_atio_queue);
465 	bioq_init(&softc->snd_bio_queue);
466 	bioq_init(&softc->rcv_bio_queue);
467 	softc->accept_tio_list = NULL;
468 	SLIST_INIT(&softc->immed_notify_slist);
469 	softc->state = TARG_STATE_NORMAL;
470 	periph->softc = softc;
471 	softc->init_level++;
472 
473 	cam_extend_set(targperiphs, periph->unit_number, periph);
474 
475 	/*
476 	 * We start out life with a UA to indicate power-on/reset.
477 	 */
478 	for (i = 0; i < MAX_INITIATORS; i++)
479 		softc->istate[i].pending_ua = UA_POWER_ON;
480 
481 	/*
482 	 * Allocate an inquiry data buffer.
483 	 * We let the user to override this if desired.
484 	 */
485 	softc->inq_data_len = sizeof(*softc->inq_data);
486 	softc->inq_data = malloc(softc->inq_data_len, M_DEVBUF, M_NOWAIT);
487 	if (softc->inq_data == NULL) {
488 		printf("targctor - Unable to malloc inquiry data\n");
489 		targdtor(periph);
490 		return (CAM_RESRC_UNAVAIL);
491 	}
492 	if (cpi->ccb_h.ccb_inq) {
493 		bcopy(cpi->ccb_h.ccb_inq, softc->inq_data, softc->inq_data_len);
494 	} else {
495 		bzero(softc->inq_data, softc->inq_data_len);
496 		softc->inq_data->device =
497 		    T_PROCESSOR | (SID_QUAL_LU_CONNECTED << 5);
498 		softc->inq_data->version = 2;
499 		softc->inq_data->response_format = 2; /* SCSI2 Inquiry Format */
500 		softc->inq_data->additional_length = softc->inq_data_len - 4;
501 		strncpy(softc->inq_data->vendor, "FreeBSD ", SID_VENDOR_SIZE);
502 		strncpy(softc->inq_data->product,
503 		    "TM-PT           ", SID_PRODUCT_SIZE);
504 		strncpy(softc->inq_data->revision, "0.0 ", SID_REVISION_SIZE);
505 	}
506 
507 	/*
508 	 * Preserve the SIM's capabilities here. Don't let user applications
509 	 * do something dumb.
510 	 */
511 	if (softc->inq_data->version >= 2) {
512 		softc->inq_data->flags &=
513 		    ~(PI_SDTR_ABLE|PI_WIDE_16|PI_WIDE_32|PI_TAG_ABLE);
514 		softc->inq_data->flags |= (cpi->hba_inquiry &
515 		    (PI_SDTR_ABLE|PI_WIDE_16|PI_WIDE_32|PI_TAG_ABLE));
516 	}
517 	softc->targ_dev = make_dev(&targ_cdevsw, periph->unit_number, UID_ROOT,
518 				   GID_OPERATOR, 0600, "%s%d",
519 				   periph->periph_name, periph->unit_number);
520 
521 	softc->init_level++;
522 	return (CAM_REQ_CMP);
523 }
524 
525 static void
526 targdtor(struct cam_periph *periph)
527 {
528 	struct targ_softc *softc;
529 
530 	softc = (struct targ_softc *)periph->softc;
531 
532 	softc->state = TARG_STATE_TEARDOWN;
533 
534 	targdislun(periph);
535 
536 	cam_extend_release(targperiphs, periph->unit_number);
537 
538 	switch (softc->init_level) {
539 	default:
540 		/* FALLTHROUGH */
541 	case 2:
542 		free(softc->inq_data, M_DEVBUF);
543 		destroy_dev(softc->targ_dev);
544 		/* FALLTHROUGH */
545 	case 1:
546 		free(softc, M_DEVBUF);
547 		break;
548 	case 0:
549 		panic("targdtor - impossible init level");;
550 	}
551 }
552 
553 static int
554 targopen(dev_t dev, int flags, int fmt, struct thread *td)
555 {
556 	struct cam_periph *periph;
557 	struct	targ_softc *softc;
558 	u_int unit;
559 	cam_status status;
560 	int error;
561 	int s;
562 
563 	unit = minor(dev);
564 
565 	/* An open of the control device always succeeds */
566 	if (TARG_IS_CONTROL_DEV(unit))
567 		return 0;
568 
569 	s = splsoftcam();
570 	periph = cam_extend_get(targperiphs, unit);
571 	if (periph == NULL) {
572         	splx(s);
573 		return (ENXIO);
574 	}
575 	if ((error = cam_periph_lock(periph, PRIBIO | PCATCH)) != 0) {
576 		splx(s);
577 		return (error);
578 	}
579 
580 	softc = (struct targ_softc *)periph->softc;
581 	if ((softc->flags & TARG_FLAG_LUN_ENABLED) == 0) {
582 		if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
583 			splx(s);
584 			cam_periph_unlock(periph);
585 			return(ENXIO);
586 		}
587 	}
588         splx(s);
589 
590 	status = targenlun(periph);
591 	switch (status) {
592 	case CAM_REQ_CMP:
593 		error = 0;
594 		break;
595 	case CAM_RESRC_UNAVAIL:
596 		error = ENOMEM;
597 		break;
598 	case CAM_LUN_ALRDY_ENA:
599 		error = EADDRINUSE;
600 		break;
601 	default:
602 		error = ENXIO;
603 		break;
604 	}
605         cam_periph_unlock(periph);
606 	if (error) {
607 		cam_periph_release(periph);
608 	}
609 	return (error);
610 }
611 
612 static int
613 targclose(dev_t dev, int flag, int fmt, struct thread *td)
614 {
615 	struct	cam_periph *periph;
616 	struct	targ_softc *softc;
617 	u_int	unit;
618 	int	s;
619 	int	error;
620 
621 	unit = minor(dev);
622 
623 	/* A close of the control device always succeeds */
624 	if (TARG_IS_CONTROL_DEV(unit))
625 		return 0;
626 
627 	s = splsoftcam();
628 	periph = cam_extend_get(targperiphs, unit);
629 	if (periph == NULL) {
630 		splx(s);
631 		return (ENXIO);
632 	}
633 	if ((error = cam_periph_lock(periph, PRIBIO)) != 0)
634 		return (error);
635 	softc = (struct targ_softc *)periph->softc;
636 	splx(s);
637 
638 	targdislun(periph);
639 
640 	cam_periph_unlock(periph);
641 	cam_periph_release(periph);
642 
643 	return (0);
644 }
645 
646 static int
647 targallocinstance(void *arg, u_long cmd)
648 {
649 	struct ioc_alloc_unit *alloc_unit = arg;
650 	struct scsi_inquiry_data local;
651 	struct ccb_pathinq cpi;
652 	struct cam_path *path;
653 	struct cam_periph *periph;
654 	cam_status status;
655 	int free_path_on_return;
656 	int error;
657 
658 	free_path_on_return = 0;
659 	status = xpt_create_path(&path, /*periph*/NULL,
660 				 alloc_unit->path_id,
661 				 alloc_unit->target_id,
662 				 alloc_unit->lun_id);
663 	if (status != CAM_REQ_CMP) {
664 		printf("Couldn't Allocate Path %x\n", status);
665 		goto fail;
666 	}
667 
668 	free_path_on_return++;
669 
670 	xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
671 	cpi.ccb_h.func_code = XPT_PATH_INQ;
672 	xpt_action((union ccb *)&cpi);
673 	status = cpi.ccb_h.status;
674 
675 	if (status != CAM_REQ_CMP) {
676 		printf("Couldn't CPI %x\n", status);
677 		goto fail;
678 	}
679 
680 	/* Can only alloc units on controllers that support target mode */
681 	if ((cpi.target_sprt & PIT_PROCESSOR) == 0) {
682 		printf("Controller does not support target mode - status %x\n",
683 		       status);
684 		status = CAM_PATH_INVALID;
685 		goto fail;
686 	}
687 
688 	/* Ensure that we don't already have an instance for this unit. */
689 	if ((periph = cam_periph_find(path, "targ")) != NULL) {
690 		status = CAM_LUN_ALRDY_ENA;
691 		goto fail;
692 	}
693 
694 	if (cmd == TARGCTLIOALLOCUNIT) {
695 		status = copyin(alloc_unit->inquiry_data, &local, sizeof local);
696 		if (status)
697 			goto fail;
698 		cpi.ccb_h.ccb_inq = &local;
699 	} else {
700 		cpi.ccb_h.ccb_inq = NULL;
701 	}
702 
703 
704 	/*
705 	 * Allocate a peripheral instance for
706 	 * this target instance.
707 	 */
708 	status = cam_periph_alloc(targctor, NULL, targdtor, targstart,
709 				  "targ", CAM_PERIPH_BIO, path, targasync,
710 				  0, &cpi);
711 
712 fail:
713 	switch (status) {
714 	case CAM_REQ_CMP:
715 	{
716 		struct cam_periph *periph;
717 
718 		if ((periph = cam_periph_find(path, "targ")) == NULL)
719 			panic("targallocinstance: Succeeded but no periph?");
720 		error = 0;
721 		alloc_unit->unit = periph->unit_number;
722 		break;
723 	}
724 	case CAM_RESRC_UNAVAIL:
725 		error = ENOMEM;
726 		break;
727 	case CAM_LUN_ALRDY_ENA:
728 		error = EADDRINUSE;
729 		break;
730 	default:
731 		printf("targallocinstance: Unexpected CAM status %x\n", status);
732 		/* FALLTHROUGH */
733 	case CAM_PATH_INVALID:
734 		error = ENXIO;
735 		break;
736 	case CAM_PROVIDE_FAIL:
737 		error = ENODEV;
738 		break;
739 	}
740 
741 	if (free_path_on_return != 0)
742 		xpt_free_path(path);
743 
744 	return (error);
745 }
746 
747 static int
748 targfreeinstance(struct ioc_alloc_unit *alloc_unit)
749 {
750 	struct cam_path *path;
751 	struct cam_periph *periph;
752 	struct targ_softc *softc;
753 	cam_status status;
754 	int free_path_on_return;
755 	int error;
756 
757 	periph = NULL;
758 	free_path_on_return = 0;
759 	status = xpt_create_path(&path, /*periph*/NULL,
760 				 alloc_unit->path_id,
761 				 alloc_unit->target_id,
762 				 alloc_unit->lun_id);
763 	free_path_on_return++;
764 
765 	if (status != CAM_REQ_CMP)
766 		goto fail;
767 
768 	/* Find our instance. */
769 	if ((periph = cam_periph_find(path, "targ")) == NULL) {
770 		xpt_print_path(path);
771 		printf("Invalid path specified for freeing target instance\n");
772 		status = CAM_PATH_INVALID;
773 		goto fail;
774 	}
775 
776         softc = (struct targ_softc *)periph->softc;
777 
778         if ((softc->flags & TARG_FLAG_LUN_ENABLED) != 0) {
779 		status = CAM_BUSY;
780 		goto fail;
781 	}
782 
783 fail:
784 	if (free_path_on_return != 0)
785 		xpt_free_path(path);
786 
787 	switch (status) {
788 	case CAM_REQ_CMP:
789 		if (periph != NULL)
790 			cam_periph_invalidate(periph);
791 		error = 0;
792 		break;
793 	case CAM_RESRC_UNAVAIL:
794 		error = ENOMEM;
795 		break;
796 	case CAM_LUN_ALRDY_ENA:
797 		error = EADDRINUSE;
798 		break;
799 	default:
800 		printf("targfreeinstance: Unexpected CAM status %x\n", status);
801 		/* FALLTHROUGH */
802 	case CAM_PATH_INVALID:
803 		error = ENODEV;
804 		break;
805 	}
806 	return (error);
807 }
808 
809 static int
810 targioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
811 {
812 	struct cam_periph *periph;
813 	struct targ_softc *softc;
814 	u_int  unit;
815 	int    error;
816 
817 	unit = minor(dev);
818 	error = 0;
819 	if (TARG_IS_CONTROL_DEV(unit)) {
820 		switch (cmd) {
821 		case OTARGCTLIOALLOCUNIT:
822 		case TARGCTLIOALLOCUNIT:
823 			error = targallocinstance(addr, cmd);
824 			break;
825 		case OTARGCTLIOFREEUNIT:
826 		case TARGCTLIOFREEUNIT:
827 			/*
828 			 * Old_ioc_alloc_unit and ioc_alloc_unit are the
829 			 * same with respect to what we need from the structure
830 			 * for this function.
831 			 */
832 			error = targfreeinstance((struct ioc_alloc_unit*)addr);
833 			break;
834 		default:
835 			error = EINVAL;
836 			break;
837 		}
838 		return (error);
839 	}
840 
841 	periph = cam_extend_get(targperiphs, unit);
842 	if (periph == NULL)
843 		return (ENXIO);
844 	softc = (struct targ_softc *)periph->softc;
845 	switch (cmd) {
846 	case TARGIOCFETCHEXCEPTION:
847 		*((targ_exception *)addr) = softc->exceptions;
848 		break;
849 	case TARGIOCCLEAREXCEPTION:
850 	{
851 		targ_exception clear_mask;
852 
853 		clear_mask = *((targ_exception *)addr);
854 		if ((clear_mask & TARG_EXCEPT_UNKNOWN_ATIO) != 0) {
855 			struct ccb_hdr *ccbh;
856 
857 			ccbh = TAILQ_FIRST(&softc->unknown_atio_queue);
858 			if (ccbh != NULL) {
859 				TAILQ_REMOVE(&softc->unknown_atio_queue,
860 					     ccbh, periph_links.tqe);
861 				/* Requeue the ATIO back to the controller */
862 				ccbh->ccb_flags = TARG_CCB_NONE;
863 				xpt_action((union ccb *)ccbh);
864 				ccbh = TAILQ_FIRST(&softc->unknown_atio_queue);
865 			}
866 			if (ccbh != NULL)
867 				clear_mask &= ~TARG_EXCEPT_UNKNOWN_ATIO;
868 		}
869 		softc->exceptions &= ~clear_mask;
870 		if (softc->exceptions == TARG_EXCEPT_NONE
871 		 && softc->state == TARG_STATE_EXCEPTION) {
872 			softc->state = TARG_STATE_NORMAL;
873 			targrunqueue(periph, softc);
874 		}
875 		break;
876 	}
877 	case TARGIOCFETCHATIO:
878 	{
879 		struct ccb_hdr *ccbh;
880 
881 		ccbh = TAILQ_FIRST(&softc->unknown_atio_queue);
882 		if (ccbh != NULL) {
883 			bcopy(ccbh, addr, sizeof(struct ccb_accept_tio));
884 		} else {
885 			error = ENOENT;
886 		}
887 		break;
888 	}
889 	case TARGIOCCOMMAND:
890 	{
891 		union ccb *inccb;
892 		union ccb *ccb;
893 
894 		/*
895 		 * XXX JGibbs
896 		 * This code is lifted directly from the pass-thru driver.
897 		 * Perhaps this should be moved to a library????
898 		 */
899 		inccb = (union ccb *)addr;
900 		ccb = cam_periph_getccb(periph, inccb->ccb_h.pinfo.priority);
901 
902 		error = targsendccb(periph, ccb, inccb);
903 
904 		xpt_release_ccb(ccb);
905 
906 		break;
907 	}
908 	case TARGIOCGETISTATE:
909 	case TARGIOCSETISTATE:
910 	{
911 		struct ioc_initiator_state *ioc_istate;
912 
913 		ioc_istate = (struct ioc_initiator_state *)addr;
914 		if (ioc_istate->initiator_id > MAX_INITIATORS) {
915 			error = EINVAL;
916 			break;
917 		}
918 		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
919 			  ("GET/SETISTATE for %d\n", ioc_istate->initiator_id));
920 		if (cmd == TARGIOCGETISTATE) {
921 			bcopy(&softc->istate[ioc_istate->initiator_id],
922 			      &ioc_istate->istate, sizeof(ioc_istate->istate));
923 		} else {
924 			bcopy(&ioc_istate->istate,
925 			      &softc->istate[ioc_istate->initiator_id],
926 			      sizeof(ioc_istate->istate));
927 		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
928 			  ("pending_ca now %x\n",
929 			   softc->istate[ioc_istate->initiator_id].pending_ca));
930 		}
931 		break;
932 	}
933 	case TARGIODEBUG:
934 	{
935 #ifdef	CAMDEBUG
936 		union ccb ccb;
937 		bzero (&ccb, sizeof ccb);
938 		if (xpt_create_path(&ccb.ccb_h.path, periph,
939 		    xpt_path_path_id(periph->path),
940 		    xpt_path_target_id(periph->path),
941 		    xpt_path_lun_id(periph->path)) != CAM_REQ_CMP) {
942 			error = EINVAL;
943 			break;
944 		}
945 		if (*((int *)addr)) {
946 			ccb.cdbg.flags = CAM_DEBUG_PERIPH;
947 		} else {
948 			ccb.cdbg.flags = CAM_DEBUG_NONE;
949 		}
950 		xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path, 0);
951 		ccb.ccb_h.func_code = XPT_DEBUG;
952 		ccb.ccb_h.path_id = xpt_path_path_id(ccb.ccb_h.path);
953 		ccb.ccb_h.target_id = xpt_path_target_id(ccb.ccb_h.path);
954 		ccb.ccb_h.target_lun = xpt_path_lun_id(ccb.ccb_h.path);
955 		ccb.ccb_h.cbfcnp = targdone;
956 		xpt_action(&ccb);
957 		if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
958 			error = EIO;
959 		} else {
960 			error = 0;
961 		}
962 		xpt_free_path(ccb.ccb_h.path);
963 #else
964 		error = 0;
965 #endif
966 		break;
967 	}
968 	default:
969 		error = ENOTTY;
970 		break;
971 	}
972 	return (error);
973 }
974 
975 /*
976  * XXX JGibbs lifted from pass-thru driver.
977  * Generally, "ccb" should be the CCB supplied by the kernel.  "inccb"
978  * should be the CCB that is copied in from the user.
979  */
980 static int
981 targsendccb(struct cam_periph *periph, union ccb *ccb, union ccb *inccb)
982 {
983 	struct targ_softc *softc;
984 	struct cam_periph_map_info mapinfo;
985 	int error, need_unmap;
986 	int s;
987 
988 	softc = (struct targ_softc *)periph->softc;
989 
990 	need_unmap = 0;
991 
992 	/*
993 	 * There are some fields in the CCB header that need to be
994 	 * preserved, the rest we get from the user.
995 	 */
996 	xpt_merge_ccb(ccb, inccb);
997 
998 	/*
999 	 * There's no way for the user to have a completion
1000 	 * function, so we put our own completion function in here.
1001 	 */
1002 	ccb->ccb_h.cbfcnp = targdone;
1003 
1004 	/*
1005 	 * We only attempt to map the user memory into kernel space
1006 	 * if they haven't passed in a physical memory pointer,
1007 	 * and if there is actually an I/O operation to perform.
1008 	 * Right now cam_periph_mapmem() only supports SCSI and device
1009 	 * match CCBs.  For the SCSI CCBs, we only pass the CCB in if
1010 	 * there's actually data to map.  cam_periph_mapmem() will do the
1011 	 * right thing, even if there isn't data to map, but since CCBs
1012 	 * without data are a reasonably common occurance (e.g. test unit
1013 	 * ready), it will save a few cycles if we check for it here.
1014 	 */
1015 	if (((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0)
1016 	 && (((ccb->ccb_h.func_code == XPT_CONT_TARGET_IO)
1017 	    && ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE))
1018 	  || (ccb->ccb_h.func_code == XPT_DEV_MATCH))) {
1019 
1020 		bzero(&mapinfo, sizeof(mapinfo));
1021 
1022 		error = cam_periph_mapmem(ccb, &mapinfo);
1023 
1024 		/*
1025 		 * cam_periph_mapmem returned an error, we can't continue.
1026 		 * Return the error to the user.
1027 		 */
1028 		if (error)
1029 			return(error);
1030 
1031 		/*
1032 		 * We successfully mapped the memory in, so we need to
1033 		 * unmap it when the transaction is done.
1034 		 */
1035 		need_unmap = 1;
1036 	}
1037 
1038 	/*
1039 	 * Once queued on the pending CCB list, this CCB will be protected
1040 	 * by the error recovery handling used for 'buffer I/O' ccbs.  Since
1041 	 * we are in a process context here, however, the software interrupt
1042 	 * for this driver may deliver an event invalidating this CCB just
1043 	 * before we queue it.  Close this race condition by blocking
1044 	 * software interrupt delivery, checking for any pertinent queued
1045 	 * events, and only then queuing this CCB.
1046 	 */
1047 	s = splsoftcam();
1048 	if (softc->exceptions == 0) {
1049 		if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO)
1050 			TAILQ_INSERT_TAIL(&softc->pending_queue, &ccb->ccb_h,
1051 					  periph_links.tqe);
1052 
1053 		/*
1054 		 * If the user wants us to perform any error recovery,
1055 		 * then honor that request.  Otherwise, it's up to the
1056 		 * user to perform any error recovery.
1057 		 */
1058 		error = cam_periph_runccb(ccb, /* error handler */NULL,
1059 					  CAM_RETRY_SELTO, SF_RETRY_UA,
1060 					  &softc->device_stats);
1061 
1062 		if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO)
1063 			TAILQ_REMOVE(&softc->pending_queue, &ccb->ccb_h,
1064 				     periph_links.tqe);
1065 	} else {
1066 		ccb->ccb_h.status = CAM_UNACKED_EVENT;
1067 		error = 0;
1068 	}
1069 	splx(s);
1070 
1071 	if (need_unmap != 0)
1072 		cam_periph_unmapmem(ccb, &mapinfo);
1073 
1074 	ccb->ccb_h.cbfcnp = NULL;
1075 	ccb->ccb_h.periph_priv = inccb->ccb_h.periph_priv;
1076 	bcopy(ccb, inccb, sizeof(union ccb));
1077 
1078 	return(error);
1079 }
1080 
1081 
1082 static int
1083 targpoll(dev_t dev, int poll_events, struct thread *td)
1084 {
1085 	struct cam_periph *periph;
1086 	struct targ_softc *softc;
1087 	u_int  unit;
1088 	int    revents;
1089 	int    s;
1090 
1091 	unit = minor(dev);
1092 
1093 	/* ioctl is the only supported operation of the control device */
1094 	if (TARG_IS_CONTROL_DEV(unit))
1095 		return EINVAL;
1096 
1097 	periph = cam_extend_get(targperiphs, unit);
1098 	if (periph == NULL)
1099 		return (ENXIO);
1100 	softc = (struct targ_softc *)periph->softc;
1101 
1102 	revents = 0;
1103 	s = splcam();
1104 	if ((poll_events & (POLLOUT | POLLWRNORM)) != 0) {
1105 		if (TAILQ_FIRST(&softc->rcv_ccb_queue) != NULL
1106 		 && bioq_first(&softc->rcv_bio_queue) == NULL)
1107 			revents |= poll_events & (POLLOUT | POLLWRNORM);
1108 	}
1109 	if ((poll_events & (POLLIN | POLLRDNORM)) != 0) {
1110 		if (TAILQ_FIRST(&softc->snd_ccb_queue) != NULL
1111 		 && bioq_first(&softc->snd_bio_queue) == NULL)
1112 			revents |= poll_events & (POLLIN | POLLRDNORM);
1113 	}
1114 
1115 	if (softc->state != TARG_STATE_NORMAL)
1116 		revents |= POLLERR;
1117 
1118 	if (revents == 0) {
1119 		if (poll_events & (POLLOUT | POLLWRNORM))
1120 			selrecord(td, &softc->rcv_select);
1121 		if (poll_events & (POLLIN | POLLRDNORM))
1122 			selrecord(td, &softc->snd_select);
1123 	}
1124 	splx(s);
1125 	return (revents);
1126 }
1127 
1128 static int
1129 targread(dev_t dev, struct uio *uio, int ioflag)
1130 {
1131 	u_int  unit;
1132 
1133 	unit = minor(dev);
1134 	/* ioctl is the only supported operation of the control device */
1135 	if (TARG_IS_CONTROL_DEV(unit))
1136 		return EINVAL;
1137 
1138 	if (uio->uio_iovcnt == 0
1139 	 || uio->uio_iov->iov_len == 0) {
1140 		/* EOF */
1141 		struct cam_periph *periph;
1142 		struct targ_softc *softc;
1143 		int    s;
1144 
1145 		s = splcam();
1146 		periph = cam_extend_get(targperiphs, unit);
1147 		if (periph == NULL)
1148 			return (ENXIO);
1149 		softc = (struct targ_softc *)periph->softc;
1150 		softc->flags |= TARG_FLAG_SEND_EOF;
1151 		splx(s);
1152 		targrunqueue(periph, softc);
1153 		return (0);
1154 	}
1155 	return(physread(dev, uio, ioflag));
1156 }
1157 
1158 static int
1159 targwrite(dev_t dev, struct uio *uio, int ioflag)
1160 {
1161 	u_int  unit;
1162 
1163 	unit = minor(dev);
1164 	/* ioctl is the only supported operation of the control device */
1165 	if (TARG_IS_CONTROL_DEV(unit))
1166 		return EINVAL;
1167 
1168 	if (uio->uio_iovcnt == 0
1169 	 || uio->uio_iov->iov_len == 0) {
1170 		/* EOF */
1171 		struct cam_periph *periph;
1172 		struct targ_softc *softc;
1173 		int    s;
1174 
1175 		s = splcam();
1176 		periph = cam_extend_get(targperiphs, unit);
1177 		if (periph == NULL)
1178 			return (ENXIO);
1179 		softc = (struct targ_softc *)periph->softc;
1180 		softc->flags |= TARG_FLAG_RECEIVE_EOF;
1181 		splx(s);
1182 		targrunqueue(periph, softc);
1183 		return (0);
1184 	}
1185 	return(physwrite(dev, uio, ioflag));
1186 }
1187 
1188 /*
1189  * Actually translate the requested transfer into one the physical driver
1190  * can understand.  The transfer is described by a buf and will include
1191  * only one physical transfer.
1192  */
1193 static void
1194 targstrategy(struct bio *bp)
1195 {
1196 	struct cam_periph *periph;
1197 	struct targ_softc *softc;
1198 	u_int  unit;
1199 	int    s;
1200 
1201 	unit = minor(bp->bio_dev);
1202 	bp->bio_resid = bp->bio_bcount;
1203 
1204 	/* ioctl is the only supported operation of the control device */
1205 	if (TARG_IS_CONTROL_DEV(unit)) {
1206 		biofinish(bp, NULL, EINVAL);
1207 		return;
1208 	}
1209 
1210 	periph = cam_extend_get(targperiphs, unit);
1211 	if (periph == NULL) {
1212 		biofinish(bp, NULL, ENXIO);
1213 		return;
1214 	}
1215 	softc = (struct targ_softc *)periph->softc;
1216 
1217 	/*
1218 	 * Mask interrupts so that the device cannot be invalidated until
1219 	 * after we are in the queue.  Otherwise, we might not properly
1220 	 * clean up one of the buffers.
1221 	 */
1222 	s = splbio();
1223 
1224 	/*
1225 	 * If there is an exception pending, error out
1226 	 */
1227 	if (softc->state != TARG_STATE_NORMAL) {
1228 		splx(s);
1229 		if (softc->state == TARG_STATE_EXCEPTION
1230 		 && (softc->exceptions & TARG_EXCEPT_DEVICE_INVALID) == 0)
1231 			s = EBUSY;
1232 		else
1233 			s = ENXIO;
1234 		biofinish(bp, NULL, s);
1235 		return;
1236 	}
1237 
1238 	/*
1239 	 * Place it in the queue of buffers available for either
1240 	 * SEND or RECEIVE commands.
1241 	 *
1242 	 */
1243 	bp->bio_resid = bp->bio_bcount;
1244 	if (bp->bio_cmd == BIO_READ) {
1245 		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1246 			  ("Queued a SEND buffer\n"));
1247 		bioq_insert_tail(&softc->snd_bio_queue, bp);
1248 	} else {
1249 		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1250 			  ("Queued a RECEIVE buffer\n"));
1251 		bioq_insert_tail(&softc->rcv_bio_queue, bp);
1252 	}
1253 
1254 	splx(s);
1255 
1256 	/*
1257 	 * Attempt to use the new buffer to service any pending
1258 	 * target commands.
1259 	 */
1260 	targrunqueue(periph, softc);
1261 
1262 	return;
1263 }
1264 
1265 static void
1266 targrunqueue(struct cam_periph *periph, struct targ_softc *softc)
1267 {
1268 	struct  ccb_queue *pending_queue;
1269 	struct	ccb_accept_tio *atio;
1270 	struct	bio_queue_head *bioq;
1271 	struct	bio *bp;
1272 	struct	targ_cmd_desc *desc;
1273 	struct	ccb_hdr *ccbh;
1274 	int	s;
1275 
1276 	s = splbio();
1277 	pending_queue = NULL;
1278 	bioq = NULL;
1279 	ccbh = NULL;
1280 	/* Only run one request at a time to maintain data ordering. */
1281 	if (softc->state != TARG_STATE_NORMAL
1282 	 || TAILQ_FIRST(&softc->work_queue) != NULL
1283 	 || TAILQ_FIRST(&softc->pending_queue) != NULL) {
1284 		splx(s);
1285 		return;
1286 	}
1287 
1288 	if (((bp = bioq_first(&softc->snd_bio_queue)) != NULL
1289 	  || (softc->flags & TARG_FLAG_SEND_EOF) != 0)
1290 	 && (ccbh = TAILQ_FIRST(&softc->snd_ccb_queue)) != NULL) {
1291 
1292 		if (bp == NULL)
1293 			softc->flags &= ~TARG_FLAG_SEND_EOF;
1294 		else {
1295 			CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1296 				  ("De-Queued a SEND buffer %ld\n",
1297 				   bp->bio_bcount));
1298 		}
1299 		bioq = &softc->snd_bio_queue;
1300 		pending_queue = &softc->snd_ccb_queue;
1301 	} else if (((bp = bioq_first(&softc->rcv_bio_queue)) != NULL
1302 	  	 || (softc->flags & TARG_FLAG_RECEIVE_EOF) != 0)
1303 		&& (ccbh = TAILQ_FIRST(&softc->rcv_ccb_queue)) != NULL) {
1304 
1305 		if (bp == NULL)
1306 			softc->flags &= ~TARG_FLAG_RECEIVE_EOF;
1307 		else {
1308 			CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1309 				  ("De-Queued a RECEIVE buffer %ld\n",
1310 				   bp->bio_bcount));
1311 		}
1312 		bioq = &softc->rcv_bio_queue;
1313 		pending_queue = &softc->rcv_ccb_queue;
1314 	}
1315 
1316 	if (pending_queue != NULL) {
1317 		/* Process a request */
1318 		atio = (struct ccb_accept_tio *)ccbh;
1319 		TAILQ_REMOVE(pending_queue, ccbh, periph_links.tqe);
1320 		desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
1321 		desc->bp = bp;
1322 		if (bp == NULL) {
1323 			/* EOF */
1324 			desc->data = NULL;
1325 			desc->data_increment = 0;
1326 			desc->data_resid = 0;
1327 			atio->ccb_h.flags &= ~CAM_DIR_MASK;
1328 			atio->ccb_h.flags |= CAM_DIR_NONE;
1329 		} else {
1330 			bioq_remove(bioq, bp);
1331 			desc->data = &bp->bio_data[bp->bio_bcount - bp->bio_resid];
1332 			desc->data_increment =
1333 			    MIN(desc->data_resid, bp->bio_resid);
1334 		}
1335 		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1336 			  ("Buffer command: data %p: datacnt %d\n",
1337 			   desc->data, desc->data_increment));
1338 		TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h,
1339 				  periph_links.tqe);
1340 	}
1341 	atio = (struct ccb_accept_tio *)TAILQ_FIRST(&softc->work_queue);
1342 	if (atio != NULL) {
1343 		int priority;
1344 
1345 		priority = (atio->ccb_h.flags & CAM_DIS_DISCONNECT) ? 0 : 1;
1346 		splx(s);
1347 		xpt_schedule(periph, priority);
1348 	} else
1349 		splx(s);
1350 }
1351 
1352 static void
1353 targstart(struct cam_periph *periph, union ccb *start_ccb)
1354 {
1355 	struct targ_softc *softc;
1356 	struct ccb_hdr *ccbh;
1357 	struct ccb_accept_tio *atio;
1358 	struct targ_cmd_desc *desc;
1359 	struct ccb_scsiio *csio;
1360 	targ_ccb_flags flags;
1361 	int    s;
1362 
1363 	softc = (struct targ_softc *)periph->softc;
1364 
1365 	s = splbio();
1366 	ccbh = TAILQ_FIRST(&softc->work_queue);
1367 	if (periph->immediate_priority <= periph->pinfo.priority) {
1368 		start_ccb->ccb_h.ccb_flags = TARG_CCB_WAITING;
1369 		SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
1370 				  periph_links.sle);
1371 		periph->immediate_priority = CAM_PRIORITY_NONE;
1372 		splx(s);
1373 		wakeup(&periph->ccb_list);
1374 	} else if (ccbh == NULL) {
1375 		splx(s);
1376 		xpt_release_ccb(start_ccb);
1377 	} else {
1378 		TAILQ_REMOVE(&softc->work_queue, ccbh, periph_links.tqe);
1379 		splx(s);
1380 		atio = (struct ccb_accept_tio*)ccbh;
1381 		desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
1382 
1383 		/* Is this a tagged request? */
1384 		flags = atio->ccb_h.flags & (CAM_DIS_DISCONNECT |
1385 		    CAM_TAG_ACTION_VALID | CAM_DIR_MASK | CAM_SEND_STATUS);
1386 
1387 		/*
1388 		 * If we are done with the transaction, tell the
1389 		 * controller to send status and perform a CMD_CMPLT.
1390 		 */
1391 		if (desc->user_atio == 0 &&
1392 		    desc->data_resid == desc->data_increment) {
1393 			flags |= CAM_SEND_STATUS;
1394 		}
1395 
1396 		csio = &start_ccb->csio;
1397 		cam_fill_ctio(csio,
1398 			      /*retries*/2,
1399 			      targdone,
1400 			      flags,
1401 			      (flags & CAM_TAG_ACTION_VALID) ?
1402 				MSG_SIMPLE_Q_TAG : 0,
1403 			      atio->tag_id,
1404 			      atio->init_id,
1405 			      desc->status,
1406 			      /*data_ptr*/desc->data_increment == 0
1407 					  ? NULL : desc->data,
1408 			      /*dxfer_len*/desc->data_increment,
1409 			      /*timeout*/desc->timeout);
1410 
1411 		if ((flags & CAM_SEND_STATUS) != 0
1412 		 && (desc->status == SCSI_STATUS_CHECK_COND
1413 		  || desc->status == SCSI_STATUS_CMD_TERMINATED)) {
1414 			struct initiator_state *istate;
1415 
1416 			istate = &softc->istate[atio->init_id];
1417 			csio->sense_len = istate->sense_data.extra_len
1418 					+ offsetof(struct scsi_sense_data,
1419 						   extra_len);
1420 			bcopy(&istate->sense_data, &csio->sense_data,
1421 			      csio->sense_len);
1422 			csio->ccb_h.flags |= CAM_SEND_SENSE;
1423 		} else {
1424 			csio->sense_len = 0;
1425 		}
1426 
1427 		start_ccb->ccb_h.ccb_flags = TARG_CCB_NONE;
1428 		start_ccb->ccb_h.ccb_atio = atio;
1429 		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1430 			  ("Sending a CTIO (flags 0x%x)\n", csio->ccb_h.flags));
1431 		TAILQ_INSERT_TAIL(&softc->pending_queue, &csio->ccb_h,
1432 				  periph_links.tqe);
1433 		xpt_action(start_ccb);
1434 		/*
1435 		 * If the queue was frozen waiting for the response
1436 		 * to this ATIO (for instance disconnection was disallowed),
1437 		 * then release it now that our response has been queued.
1438 		 */
1439 		if ((atio->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1440 			cam_release_devq(periph->path,
1441 					 /*relsim_flags*/0,
1442 					 /*reduction*/0,
1443 					 /*timeout*/0,
1444 					 /*getcount_only*/0);
1445 			atio->ccb_h.status &= ~CAM_DEV_QFRZN;
1446 		}
1447 		s = splbio();
1448 		ccbh = TAILQ_FIRST(&softc->work_queue);
1449 		splx(s);
1450 	}
1451 	if (ccbh != NULL)
1452 		targrunqueue(periph, softc);
1453 }
1454 
1455 static void
1456 targdone(struct cam_periph *periph, union ccb *done_ccb)
1457 {
1458 	struct targ_softc *softc;
1459 
1460 	softc = (struct targ_softc *)periph->softc;
1461 
1462 	if (done_ccb->ccb_h.ccb_flags == TARG_CCB_WAITING) {
1463 		/* Caller will release the CCB */
1464 		wakeup(&done_ccb->ccb_h.cbfcnp);
1465 		return;
1466 	}
1467 
1468 	CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1469 		("targdone %x\n", done_ccb->ccb_h.func_code));
1470 
1471 	switch (done_ccb->ccb_h.func_code) {
1472 	case XPT_ACCEPT_TARGET_IO:
1473 	{
1474 		struct ccb_accept_tio *atio;
1475 		struct targ_cmd_desc *descr;
1476 		struct initiator_state *istate;
1477 		u_int8_t *cdb;
1478 		int priority;
1479 
1480 		atio = &done_ccb->atio;
1481 		descr = (struct targ_cmd_desc*)atio->ccb_h.ccb_descr;
1482 		istate = &softc->istate[atio->init_id];
1483 		cdb = atio->cdb_io.cdb_bytes;
1484 		if (softc->state == TARG_STATE_TEARDOWN
1485 		 || atio->ccb_h.status == CAM_REQ_ABORTED) {
1486 			freedescr(descr);
1487 			free(done_ccb, M_DEVBUF);
1488 			return;
1489 		}
1490 		descr->data_resid = 0;
1491 		descr->data_increment = 0;
1492 		descr->user_atio = 0;
1493 
1494 #ifdef	CAMDEBUG
1495 		{
1496 			int i;
1497 			char dcb[128];
1498  			for (dcb[0] = 0, i = 0; i < atio->cdb_len; i++) {
1499 				snprintf(dcb, sizeof dcb,
1500 				    "%s %02x", dcb, cdb[i] & 0xff);
1501 			}
1502 			CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1503 			    ("flags %x cdb:%s\n", atio->ccb_h.flags, dcb));
1504 		}
1505 #endif
1506 		if (atio->sense_len != 0) {
1507 			CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1508 				  ("ATIO with sense_len\n"));
1509 
1510 			/*
1511 			 * We had an error in the reception of
1512 			 * this command.  Immediately issue a CA.
1513 			 */
1514 			atio->ccb_h.flags &= ~CAM_DIR_MASK;
1515 			atio->ccb_h.flags |= CAM_DIR_NONE;
1516 			descr->timeout = 5 * 1000;
1517 			descr->status = SCSI_STATUS_CHECK_COND;
1518 			copy_sense(softc, istate, (u_int8_t *)&atio->sense_data,
1519 				   atio->sense_len);
1520 			set_ca_condition(periph, atio->init_id, CA_CMD_SENSE);
1521 		} else if (istate->pending_ca == 0
1522 			&& istate->pending_ua != 0
1523 			&& cdb[0] != INQUIRY) {
1524 
1525 			CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1526 			    ("pending_ca %d pending_ua %d\n",
1527 			    istate->pending_ca, istate->pending_ua));
1528 
1529 			/* Pending UA, tell initiator */
1530 			/* Direction is always relative to the initator */
1531 			atio->ccb_h.flags &= ~CAM_DIR_MASK;
1532 			atio->ccb_h.flags |= CAM_DIR_NONE;
1533 			descr->timeout = 5 * 1000;
1534 			descr->status = SCSI_STATUS_CHECK_COND;
1535 			fill_sense(softc, atio->init_id,
1536 				   SSD_CURRENT_ERROR, SSD_KEY_UNIT_ATTENTION,
1537 				   0x29,
1538 				   istate->pending_ua == UA_POWER_ON ? 1 : 2);
1539 			set_ca_condition(periph, atio->init_id, CA_UNIT_ATTN);
1540 		} else {
1541 			/*
1542 			 * Save the current CA and UA status so
1543 			 * they can be used by this command.
1544 			 */
1545 			ua_types pending_ua;
1546 			ca_types pending_ca;
1547 
1548 			pending_ua = istate->pending_ua;
1549 			pending_ca = istate->pending_ca;
1550 
1551 			/*
1552 			 * As per the SCSI2 spec, any command that occurs
1553 			 * after a CA is reported, clears the CA.  We must
1554 			 * also clear the UA condition, if any, that caused
1555 			 * the CA to occur assuming the UA is not for a
1556 			 * persistant condition.
1557 			 */
1558 			istate->pending_ca = CA_NONE;
1559 			if (pending_ca == CA_UNIT_ATTN)
1560 				istate->pending_ua = UA_NONE;
1561 
1562 			/*
1563 			 * Determine the type of incoming command and
1564 			 * setup our buffer for a response.
1565 			 */
1566 			switch (cdb[0]) {
1567 			case INQUIRY:
1568 			{
1569 				struct scsi_inquiry *inq;
1570 				struct scsi_sense_data *sense;
1571 
1572 				inq = (struct scsi_inquiry *)cdb;
1573 				sense = &istate->sense_data;
1574 				descr->status = SCSI_STATUS_OK;
1575 				CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1576 					  ("Saw an inquiry!\n"));
1577 				/*
1578 				 * Validate the command.  We don't
1579 				 * support any VPD pages, so complain
1580 				 * if EVPD is set.
1581 				 */
1582 				if ((inq->byte2 & SI_EVPD) != 0
1583 				 || inq->page_code != 0) {
1584 					atio->ccb_h.flags &= ~CAM_DIR_MASK;
1585 					atio->ccb_h.flags |= CAM_DIR_NONE;
1586 					descr->timeout = 5 * 1000;
1587 					descr->status = SCSI_STATUS_CHECK_COND;
1588 					fill_sense(softc, atio->init_id,
1589 						   SSD_CURRENT_ERROR,
1590 						   SSD_KEY_ILLEGAL_REQUEST,
1591 						   /*asc*/0x24, /*ascq*/0x00);
1592 					sense->extra_len =
1593 						offsetof(struct scsi_sense_data,
1594 							 extra_bytes)
1595 					      - offsetof(struct scsi_sense_data,
1596 							 extra_len);
1597 					set_ca_condition(periph, atio->init_id,
1598 							 CA_CMD_SENSE);
1599 				}
1600 
1601 				if ((inq->byte2 & SI_EVPD) != 0) {
1602 					sense->sense_key_spec[0] =
1603 					    SSD_SCS_VALID|SSD_FIELDPTR_CMD
1604 					   |SSD_BITPTR_VALID| /*bit value*/1;
1605 					sense->sense_key_spec[1] = 0;
1606 					sense->sense_key_spec[2] =
1607 					    offsetof(struct scsi_inquiry,
1608 						     byte2);
1609 				} else if (inq->page_code != 0) {
1610 					sense->sense_key_spec[0] =
1611 					    SSD_SCS_VALID|SSD_FIELDPTR_CMD;
1612 					sense->sense_key_spec[1] = 0;
1613 					sense->sense_key_spec[2] =
1614 					    offsetof(struct scsi_inquiry,
1615 						     page_code);
1616 				}
1617 				if (descr->status == SCSI_STATUS_CHECK_COND)
1618 					break;
1619 
1620 				/*
1621 				 * Direction is always relative
1622 				 * to the initator.
1623 				 */
1624 				atio->ccb_h.flags &= ~CAM_DIR_MASK;
1625 				atio->ccb_h.flags |= CAM_DIR_IN;
1626 				descr->data = softc->inq_data;
1627 				descr->data_resid =
1628 				    MIN(softc->inq_data_len,
1629 					SCSI_CDB6_LEN(inq->length));
1630 				descr->data_increment = descr->data_resid;
1631 				descr->timeout = 5 * 1000;
1632 				break;
1633 			}
1634 			case TEST_UNIT_READY:
1635 				atio->ccb_h.flags &= ~CAM_DIR_MASK;
1636 				atio->ccb_h.flags |= CAM_DIR_NONE;
1637 				descr->timeout = 5 * 1000;
1638 				descr->status = SCSI_STATUS_OK;
1639 				break;
1640 			case REQUEST_SENSE:
1641 			{
1642 				struct scsi_request_sense *rsense;
1643 				struct scsi_sense_data *sense;
1644 
1645 				rsense = (struct scsi_request_sense *)cdb;
1646 				sense = &istate->sense_data;
1647 				if (pending_ca == 0) {
1648 					fill_sense(softc, atio->init_id,
1649 						   SSD_CURRENT_ERROR,
1650 						   SSD_KEY_NO_SENSE, 0x00,
1651 						   0x00);
1652 					CAM_DEBUG(periph->path,
1653 						  CAM_DEBUG_PERIPH,
1654 						  ("No pending CA!\n"));
1655 				}
1656 				/*
1657 				 * Direction is always relative
1658 				 * to the initator.
1659 				 */
1660 				atio->ccb_h.flags &= ~CAM_DIR_MASK;
1661 				atio->ccb_h.flags |= CAM_DIR_IN;
1662 				descr->data = sense;
1663 				descr->data_resid =
1664 			 		offsetof(struct scsi_sense_data,
1665 						 extra_len)
1666 				      + sense->extra_len;
1667 				descr->data_resid =
1668 				    MIN(descr->data_resid,
1669 					SCSI_CDB6_LEN(rsense->length));
1670 				descr->data_increment = descr->data_resid;
1671 				descr->timeout = 5 * 1000;
1672 				descr->status = SCSI_STATUS_OK;
1673 				break;
1674 			}
1675 			case RECEIVE:
1676 			case SEND:
1677 			if (SID_TYPE(softc->inq_data) == T_PROCESSOR) {
1678 				struct scsi_send_receive *sr;
1679 
1680 				sr = (struct scsi_send_receive *)cdb;
1681 
1682 				/*
1683 				 * Direction is always relative
1684 				 * to the initator.
1685 				 */
1686 				atio->ccb_h.flags &= ~CAM_DIR_MASK;
1687 				descr->data_resid = scsi_3btoul(sr->xfer_len);
1688 				descr->timeout = 5 * 1000;
1689 				descr->status = SCSI_STATUS_OK;
1690 				if (cdb[0] == SEND) {
1691 					atio->ccb_h.flags |= CAM_DIR_OUT;
1692 					CAM_DEBUG(periph->path,
1693 						  CAM_DEBUG_PERIPH,
1694 						  ("Saw a SEND!\n"));
1695 					atio->ccb_h.flags |= CAM_DIR_OUT;
1696 					TAILQ_INSERT_TAIL(&softc->snd_ccb_queue,
1697 							  &atio->ccb_h,
1698 							  periph_links.tqe);
1699 					selwakeup(&softc->snd_select);
1700 				} else {
1701 					atio->ccb_h.flags |= CAM_DIR_IN;
1702 					CAM_DEBUG(periph->path,
1703 						  CAM_DEBUG_PERIPH,
1704 						  ("Saw a RECEIVE!\n"));
1705 					TAILQ_INSERT_TAIL(&softc->rcv_ccb_queue,
1706 							  &atio->ccb_h,
1707 							  periph_links.tqe);
1708 					selwakeup(&softc->rcv_select);
1709 				}
1710 				/*
1711 				 * Attempt to satisfy this request with
1712 				 * a user buffer.
1713 				 */
1714 				targrunqueue(periph, softc);
1715 				return;
1716 			}
1717 			default:
1718 				/*
1719 				 * Queue for consumption by our userland
1720 				 * counterpart and  transition to the exception
1721 				 * state.
1722 				 */
1723 				descr->data_resid = 0;
1724 				descr->data_increment = 0;
1725 				descr->user_atio = 1;
1726 				TAILQ_INSERT_TAIL(&softc->unknown_atio_queue,
1727 						  &atio->ccb_h,
1728 						  periph_links.tqe);
1729 				softc->exceptions |= TARG_EXCEPT_UNKNOWN_ATIO;
1730 				targfireexception(periph, softc);
1731 				return;
1732 			}
1733 		}
1734 
1735 		/* Queue us up to receive a Continue Target I/O ccb. */
1736 		if ((atio->ccb_h.flags & CAM_DIS_DISCONNECT) != 0) {
1737 			TAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h,
1738 					  periph_links.tqe);
1739 			priority = 0;
1740 		} else {
1741 			TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h,
1742 					  periph_links.tqe);
1743 			priority = 1;
1744 		}
1745 		xpt_schedule(periph, priority);
1746 		break;
1747 	}
1748 	case XPT_CONT_TARGET_IO:
1749 	{
1750 		struct ccb_scsiio *csio;
1751 		struct ccb_accept_tio *atio;
1752 		struct targ_cmd_desc *desc;
1753 		struct bio *bp;
1754 		int    error, lastctio;
1755 
1756 		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1757 			  ("Received completed CTIO\n"));
1758 		csio = &done_ccb->csio;
1759 		atio = (struct ccb_accept_tio*)done_ccb->ccb_h.ccb_atio;
1760 		desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
1761 
1762 		TAILQ_REMOVE(&softc->pending_queue, &done_ccb->ccb_h,
1763 			     periph_links.tqe);
1764 
1765 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1766 			printf("CCB with error %x\n", done_ccb->ccb_h.status);
1767 			error = targerror(done_ccb, 0, 0);
1768 			if (error == ERESTART)
1769 				break;
1770 			/*
1771 			 * Right now we don't need to do anything
1772 			 * prior to unfreezing the queue.  This may
1773 			 * change if certain errors are reported while
1774 			 * we are in a connected state.
1775 			 */
1776 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1777 				printf("Releasing Queue\n");
1778 				cam_release_devq(done_ccb->ccb_h.path,
1779 						 /*relsim_flags*/0,
1780 						 /*reduction*/0,
1781 						 /*timeout*/0,
1782 						 /*getcount_only*/0);
1783 				done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1784 			}
1785 		} else
1786 			error = 0;
1787 
1788 		/*
1789 		 * If we shipped back sense data when completing
1790 		 * this command, clear the pending CA for it.
1791 		 */
1792 		if (done_ccb->ccb_h.status & CAM_SENT_SENSE) {
1793 			struct initiator_state *istate;
1794 
1795 			istate = &softc->istate[csio->init_id];
1796 			if (istate->pending_ca == CA_UNIT_ATTN)
1797 				istate->pending_ua = UA_NONE;
1798 			istate->pending_ca = CA_NONE;
1799 			softc->istate[csio->init_id].pending_ca = CA_NONE;
1800 			done_ccb->ccb_h.status &= ~CAM_SENT_SENSE;
1801 			CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1802 				  ("Sent Sense\n"));
1803 			done_ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1804 		}
1805 
1806 		if (done_ccb->ccb_h.status & CAM_AUTOSNS_VALID) {
1807 			struct initiator_state *istate;
1808 
1809 			istate = &softc->istate[csio->init_id];
1810 			copy_sense(softc, istate, (u_int8_t *)&csio->sense_data,
1811 				   csio->sense_len);
1812 			set_ca_condition(periph, csio->init_id, CA_CMD_SENSE);
1813 			done_ccb->ccb_h.status &= ~CAM_AUTOSNS_VALID;
1814 		}
1815 		/*
1816 		 * Was this the last CTIO?
1817 		 */
1818 		lastctio = done_ccb->ccb_h.status & CAM_SEND_STATUS;
1819 
1820 		desc->data_increment -= csio->resid;
1821 		desc->data_resid -= desc->data_increment;
1822 		if ((bp = desc->bp) != NULL) {
1823 
1824 			bp->bio_resid -= desc->data_increment;
1825 			bp->bio_error = error;
1826 
1827 			CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1828 				  ("Buffer I/O Completed - Resid %ld:%d\n",
1829 				   bp->bio_resid, desc->data_resid));
1830 			/*
1831 			 * Send the buffer back to the client if
1832 			 * either the command has completed or all
1833 			 * buffer space has been consumed.
1834 			 */
1835 			if (desc->data_resid == 0
1836 			 || bp->bio_resid == 0
1837 			 || error != 0) {
1838 				if (bp->bio_resid != 0)
1839 					/* Short transfer */
1840 					bp->bio_flags |= BIO_ERROR;
1841 
1842 				CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1843 					  ("Completing a buffer\n"));
1844 				biodone(bp);
1845 				desc->bp = NULL;
1846 			}
1847 		}
1848 
1849 		if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1850 			atio->ccb_h.status |= CAM_DEV_QFRZN;
1851 		xpt_release_ccb(done_ccb);
1852 		if (softc->state != TARG_STATE_TEARDOWN) {
1853 			if (lastctio) {
1854 				/*
1855 				 * Send the original accept TIO back to the
1856 				 * controller to handle more work.
1857 				 */
1858 				CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1859 					  ("Returning ATIO to target SIM\n"));
1860 				atio->ccb_h.ccb_flags = TARG_CCB_NONE;
1861 				xpt_action((union ccb *)atio);
1862 				break;
1863 			}
1864 
1865 			if (SID_TYPE(softc->inq_data) == T_PROCESSOR) {
1866 				/* Queue us up for another buffer */
1867 				if (atio->cdb_io.cdb_bytes[0] == SEND) {
1868 					if (desc->bp != NULL)
1869 				TAILQ_INSERT_HEAD(&softc->snd_bio_queue.queue,
1870 						  bp, bio_queue);
1871 				TAILQ_INSERT_HEAD(&softc->snd_ccb_queue,
1872 						  &atio->ccb_h,
1873 						  periph_links.tqe);
1874 				} else {
1875 					if (desc->bp != NULL)
1876 				TAILQ_INSERT_HEAD(&softc->rcv_bio_queue.queue,
1877 						  bp, bio_queue);
1878 				TAILQ_INSERT_HEAD(&softc->rcv_ccb_queue,
1879 						  &atio->ccb_h,
1880 						  periph_links.tqe);
1881 				}
1882 				desc->bp = NULL;
1883 			}
1884 			targrunqueue(periph, softc);
1885 		} else {
1886 			if (desc->bp != NULL) {
1887 				bp->bio_flags |= BIO_ERROR;
1888 				bp->bio_error = ENXIO;
1889 				biodone(bp);
1890 			}
1891 			freedescr(desc);
1892 			free(atio, M_DEVBUF);
1893 		}
1894 		break;
1895 	}
1896 	case XPT_IMMED_NOTIFY:
1897 	{
1898 		int frozen;
1899 
1900 		frozen = (done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0;
1901 		if (softc->state == TARG_STATE_TEARDOWN) {
1902 			SLIST_REMOVE(&softc->immed_notify_slist,
1903 				     &done_ccb->ccb_h, ccb_hdr,
1904 				     periph_links.sle);
1905 			free(done_ccb, M_DEVBUF);
1906 		} else if (done_ccb->ccb_h.status == CAM_REQ_ABORTED) {
1907 			free(done_ccb, M_DEVBUF);
1908 		} else {
1909 			printf("Saw event %x:%x\n", done_ccb->ccb_h.status,
1910 			       done_ccb->cin.message_args[0]);
1911 			/* Process error condition. */
1912 			targinoterror(periph, softc, &done_ccb->cin);
1913 
1914 			/* Requeue for another immediate event */
1915 			xpt_action(done_ccb);
1916 		}
1917 		if (frozen != 0)
1918 			cam_release_devq(periph->path,
1919 					 /*relsim_flags*/0,
1920 					 /*opening reduction*/0,
1921 					 /*timeout*/0,
1922 					 /*getcount_only*/0);
1923 		break;
1924 	}
1925 	case XPT_DEBUG:
1926 		wakeup(&done_ccb->ccb_h.cbfcnp);
1927 		break;
1928 	default:
1929 		panic("targdone: Impossible xpt opcode %x encountered.",
1930 		      done_ccb->ccb_h.func_code);
1931 		/* NOTREACHED */
1932 		break;
1933 	}
1934 }
1935 
1936 /*
1937  * Transition to the exception state and notify our symbiotic
1938  * userland process of the change.
1939  */
1940 static void
1941 targfireexception(struct cam_periph *periph, struct targ_softc *softc)
1942 {
1943 	/*
1944 	 * return all pending buffers with short read/write status so our
1945 	 * process unblocks, and do a selwakeup on any process queued
1946 	 * waiting for reads or writes.  When the selwakeup is performed,
1947 	 * the waking process will wakeup, call our poll routine again,
1948 	 * and pick up the exception.
1949 	 */
1950 	struct bio *bp;
1951 
1952 	if (softc->state != TARG_STATE_NORMAL)
1953 		/* Already either tearing down or in exception state */
1954 		return;
1955 
1956 	softc->state = TARG_STATE_EXCEPTION;
1957 
1958 	while ((bp = bioq_first(&softc->snd_bio_queue)) != NULL) {
1959 		bioq_remove(&softc->snd_bio_queue, bp);
1960 		bp->bio_flags |= BIO_ERROR;
1961 		biodone(bp);
1962 	}
1963 
1964 	while ((bp = bioq_first(&softc->rcv_bio_queue)) != NULL) {
1965 		bioq_remove(&softc->snd_bio_queue, bp);
1966 		bp->bio_flags |= BIO_ERROR;
1967 		biodone(bp);
1968 	}
1969 
1970 	selwakeup(&softc->snd_select);
1971 	selwakeup(&softc->rcv_select);
1972 }
1973 
1974 static void
1975 targinoterror(struct cam_periph *periph, struct targ_softc *softc,
1976 	      struct ccb_immed_notify *inot)
1977 {
1978 	cam_status status;
1979 	int sense;
1980 
1981 	status = inot->ccb_h.status;
1982 	sense = (status & CAM_AUTOSNS_VALID) != 0;
1983 	status &= CAM_STATUS_MASK;
1984 	switch (status) {
1985 	case CAM_SCSI_BUS_RESET:
1986 		set_unit_attention_cond(periph, /*init_id*/CAM_TARGET_WILDCARD,
1987 					UA_BUS_RESET);
1988 		abort_pending_transactions(periph,
1989 					   /*init_id*/CAM_TARGET_WILDCARD,
1990 					   TARG_TAG_WILDCARD, EINTR,
1991 					   /*to_held_queue*/FALSE);
1992 		softc->exceptions |= TARG_EXCEPT_BUS_RESET_SEEN;
1993 		targfireexception(periph, softc);
1994 		break;
1995 	case CAM_BDR_SENT:
1996 		set_unit_attention_cond(periph, /*init_id*/CAM_TARGET_WILDCARD,
1997 					UA_BDR);
1998 		abort_pending_transactions(periph, CAM_TARGET_WILDCARD,
1999 					   TARG_TAG_WILDCARD, EINTR,
2000 					   /*to_held_queue*/FALSE);
2001 		softc->exceptions |= TARG_EXCEPT_BDR_RECEIVED;
2002 		targfireexception(periph, softc);
2003 		break;
2004 	case CAM_MESSAGE_RECV:
2005 		switch (inot->message_args[0]) {
2006 		case MSG_INITIATOR_DET_ERR:
2007 			break;
2008 		case MSG_ABORT:
2009 			break;
2010 		case MSG_BUS_DEV_RESET:
2011 			break;
2012 		case MSG_ABORT_TAG:
2013 			break;
2014 		case MSG_CLEAR_QUEUE:
2015 			break;
2016 		case MSG_TERM_IO_PROC:
2017 			break;
2018 		default:
2019 			break;
2020 		}
2021 		break;
2022 	default:
2023 		break;
2024 	}
2025 }
2026 
2027 static int
2028 targerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
2029 {
2030 	struct cam_periph *periph;
2031 	struct targ_softc *softc;
2032 	struct ccb_scsiio *csio;
2033 	struct initiator_state *istate;
2034 	cam_status status;
2035 	int frozen;
2036 	int sense;
2037 	int error;
2038 	int on_held_queue;
2039 
2040 	periph = xpt_path_periph(ccb->ccb_h.path);
2041 	softc = (struct targ_softc *)periph->softc;
2042 	status = ccb->ccb_h.status;
2043 	sense = (status & CAM_AUTOSNS_VALID) != 0;
2044 	frozen = (status & CAM_DEV_QFRZN) != 0;
2045 	status &= CAM_STATUS_MASK;
2046 	on_held_queue = FALSE;
2047 	csio = &ccb->csio;
2048 	istate = &softc->istate[csio->init_id];
2049 	switch (status) {
2050 	case CAM_REQ_ABORTED:
2051 		if ((ccb->ccb_h.ccb_flags & TARG_CCB_ABORT_TO_HELDQ) != 0) {
2052 
2053 			/*
2054 			 * Place this CCB into the initiators
2055 			 * 'held' queue until the pending CA is cleared.
2056 			 * If there is no CA pending, reissue immediately.
2057 			 */
2058 			if (istate->pending_ca == 0) {
2059 				ccb->ccb_h.ccb_flags = TARG_CCB_NONE;
2060 				xpt_action(ccb);
2061 			} else {
2062 				ccb->ccb_h.ccb_flags = TARG_CCB_HELDQ;
2063 				TAILQ_INSERT_TAIL(&softc->pending_queue,
2064 						  &ccb->ccb_h,
2065 						  periph_links.tqe);
2066 			}
2067 			/* The command will be retried at a later time. */
2068 			on_held_queue = TRUE;
2069 			error = ERESTART;
2070 			break;
2071 		}
2072 		/* FALLTHROUGH */
2073 	case CAM_SCSI_BUS_RESET:
2074 	case CAM_BDR_SENT:
2075 	case CAM_REQ_TERMIO:
2076 	case CAM_CMD_TIMEOUT:
2077 		/* Assume we did not send any data */
2078 		csio->resid = csio->dxfer_len;
2079 		error = EIO;
2080 		break;
2081 	case CAM_SEL_TIMEOUT:
2082 		if (ccb->ccb_h.retry_count > 0) {
2083 			ccb->ccb_h.retry_count--;
2084 			error = ERESTART;
2085 		} else {
2086 			/* "Select or reselect failure" */
2087 			csio->resid = csio->dxfer_len;
2088 			fill_sense(softc, csio->init_id, SSD_CURRENT_ERROR,
2089 				   SSD_KEY_HARDWARE_ERROR, 0x45, 0x00);
2090 			set_ca_condition(periph, csio->init_id, CA_CMD_SENSE);
2091 			error = EIO;
2092 		}
2093 		break;
2094 	case CAM_UNCOR_PARITY:
2095 		/* "SCSI parity error" */
2096 		fill_sense(softc, csio->init_id, SSD_CURRENT_ERROR,
2097 			   SSD_KEY_HARDWARE_ERROR, 0x47, 0x00);
2098 		set_ca_condition(periph, csio->init_id, CA_CMD_SENSE);
2099 		csio->resid = csio->dxfer_len;
2100 		error = EIO;
2101 		break;
2102 	case CAM_NO_HBA:
2103 		csio->resid = csio->dxfer_len;
2104 		error = ENXIO;
2105 		break;
2106 	case CAM_SEQUENCE_FAIL:
2107 		if (sense != 0) {
2108 			copy_sense(softc, istate, (u_int8_t *)&csio->sense_data,
2109 				   csio->sense_len);
2110 			set_ca_condition(periph, csio->init_id, CA_CMD_SENSE);
2111 		}
2112 		csio->resid = csio->dxfer_len;
2113 		error = EIO;
2114 		break;
2115 	case CAM_IDE:
2116 		/* "Initiator detected error message received" */
2117 		fill_sense(softc, csio->init_id, SSD_CURRENT_ERROR,
2118 			   SSD_KEY_HARDWARE_ERROR, 0x48, 0x00);
2119 		set_ca_condition(periph, csio->init_id, CA_CMD_SENSE);
2120 		csio->resid = csio->dxfer_len;
2121 		error = EIO;
2122 		break;
2123 	case CAM_REQUEUE_REQ:
2124 		printf("Requeue Request!\n");
2125 		error = ERESTART;
2126 		break;
2127 	default:
2128 		csio->resid = csio->dxfer_len;
2129 		error = EIO;
2130 		panic("targerror: Unexpected status %x encounterd", status);
2131 		/* NOTREACHED */
2132 	}
2133 
2134 	if (error == ERESTART || error == 0) {
2135 		/* Clear the QFRZN flag as we will release the queue */
2136 		if (frozen != 0)
2137 			ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
2138 
2139 		if (error == ERESTART && !on_held_queue)
2140 			xpt_action(ccb);
2141 
2142 		if (frozen != 0)
2143 			cam_release_devq(ccb->ccb_h.path,
2144 					 /*relsim_flags*/0,
2145 					 /*opening reduction*/0,
2146 					 /*timeout*/0,
2147 					 /*getcount_only*/0);
2148 	}
2149 	return (error);
2150 }
2151 
2152 static struct targ_cmd_desc*
2153 allocdescr()
2154 {
2155 	struct targ_cmd_desc* descr;
2156 
2157 	/* Allocate the targ_descr structure */
2158 	descr = (struct targ_cmd_desc *)
2159 	    malloc(sizeof(*descr), M_DEVBUF, M_NOWAIT);
2160 	if (descr == NULL)
2161 		return (NULL);
2162 
2163 	bzero(descr, sizeof(*descr));
2164 
2165 	/* Allocate buffer backing store */
2166 	descr->backing_store = malloc(MAX_BUF_SIZE, M_DEVBUF, M_NOWAIT);
2167 	if (descr->backing_store == NULL) {
2168 		free(descr, M_DEVBUF);
2169 		return (NULL);
2170 	}
2171 	descr->max_size = MAX_BUF_SIZE;
2172 	return (descr);
2173 }
2174 
2175 static void
2176 freedescr(struct targ_cmd_desc *descr)
2177 {
2178 	free(descr->backing_store, M_DEVBUF);
2179 	free(descr, M_DEVBUF);
2180 }
2181 
2182 static void
2183 fill_sense(struct targ_softc *softc, u_int initiator_id, u_int error_code,
2184 	   u_int sense_key, u_int asc, u_int ascq)
2185 {
2186 	struct initiator_state *istate;
2187 	struct scsi_sense_data *sense;
2188 
2189 	istate = &softc->istate[initiator_id];
2190 	sense = &istate->sense_data;
2191 	bzero(sense, sizeof(*sense));
2192 	sense->error_code = error_code;
2193 	sense->flags = sense_key;
2194 	sense->add_sense_code = asc;
2195 	sense->add_sense_code_qual = ascq;
2196 
2197 	sense->extra_len = offsetof(struct scsi_sense_data, fru)
2198 			 - offsetof(struct scsi_sense_data, extra_len);
2199 }
2200 
2201 static void
2202 copy_sense(struct targ_softc *softc, struct initiator_state *istate,
2203 	   u_int8_t *sense_buffer, size_t sense_len)
2204 {
2205 	struct scsi_sense_data *sense;
2206 	size_t copylen;
2207 
2208 	sense = &istate->sense_data;
2209 	copylen = sizeof(*sense);
2210 	if (copylen > sense_len)
2211 		copylen = sense_len;
2212 	bcopy(sense_buffer, sense, copylen);
2213 }
2214 
2215 static void
2216 set_unit_attention_cond(struct cam_periph *periph,
2217 			u_int initiator_id, ua_types ua)
2218 {
2219 	int start;
2220 	int end;
2221 	struct targ_softc *softc;
2222 
2223 	softc = (struct targ_softc *)periph->softc;
2224 	if (initiator_id == CAM_TARGET_WILDCARD) {
2225 		start = 0;
2226 		end = MAX_INITIATORS - 1;
2227 	} else
2228 		start = end = initiator_id;
2229 
2230 	while (start <= end) {
2231 		softc->istate[start].pending_ua = ua;
2232 		start++;
2233 	}
2234 }
2235 
2236 static void
2237 set_ca_condition(struct cam_periph *periph, u_int initiator_id, ca_types ca)
2238 {
2239 	struct targ_softc *softc;
2240 
2241 	softc = (struct targ_softc *)periph->softc;
2242 	softc->istate[initiator_id].pending_ca = ca;
2243 	abort_pending_transactions(periph, initiator_id, TARG_TAG_WILDCARD,
2244 				   /*errno*/0, /*to_held_queue*/TRUE);
2245 }
2246 
2247 static void
2248 abort_pending_transactions(struct cam_periph *periph, u_int initiator_id,
2249 			   u_int tag_id, int errno, int to_held_queue)
2250 {
2251 	struct ccb_abort cab;
2252 	struct ccb_queue *atio_queues[3];
2253 	struct targ_softc *softc;
2254 	struct ccb_hdr *ccbh;
2255 	u_int i;
2256 
2257 	softc = (struct targ_softc *)periph->softc;
2258 
2259 	atio_queues[0] = &softc->work_queue;
2260 	atio_queues[1] = &softc->snd_ccb_queue;
2261 	atio_queues[2] = &softc->rcv_ccb_queue;
2262 
2263 	/* First address the ATIOs awaiting resources */
2264 	for (i = 0; i < (sizeof(atio_queues) / sizeof(*atio_queues)); i++) {
2265 		struct ccb_queue *atio_queue;
2266 
2267 		if (to_held_queue) {
2268 			/*
2269 			 * The device queue is frozen anyway, so there
2270 			 * is nothing for us to do.
2271 			 */
2272 			continue;
2273 		}
2274 		atio_queue = atio_queues[i];
2275 		ccbh = TAILQ_FIRST(atio_queue);
2276 		while (ccbh != NULL) {
2277 			struct ccb_accept_tio *atio;
2278 			struct targ_cmd_desc *desc;
2279 
2280 			atio = (struct ccb_accept_tio *)ccbh;
2281 			desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
2282 			ccbh = TAILQ_NEXT(ccbh, periph_links.tqe);
2283 
2284 			/* Only abort the CCBs that match */
2285 			if ((atio->init_id != initiator_id
2286 			  && initiator_id != CAM_TARGET_WILDCARD)
2287 			 || (tag_id != TARG_TAG_WILDCARD
2288 			  && ((atio->ccb_h.flags & CAM_TAG_ACTION_VALID) == 0
2289 			   || atio->tag_id != tag_id)))
2290 				continue;
2291 
2292 			TAILQ_REMOVE(atio_queue, &atio->ccb_h,
2293 				     periph_links.tqe);
2294 
2295 			CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
2296 				  ("Aborting ATIO\n"));
2297 			if (desc->bp != NULL) {
2298 				desc->bp->bio_flags |= BIO_ERROR;
2299 				if (softc->state != TARG_STATE_TEARDOWN)
2300 					desc->bp->bio_error = errno;
2301 				else
2302 					desc->bp->bio_error = ENXIO;
2303 				biodone(desc->bp);
2304 				desc->bp = NULL;
2305 			}
2306 			if (softc->state == TARG_STATE_TEARDOWN) {
2307 				freedescr(desc);
2308 				free(atio, M_DEVBUF);
2309 			} else {
2310 				/* Return the ATIO back to the controller */
2311 				atio->ccb_h.ccb_flags = TARG_CCB_NONE;
2312 				xpt_action((union ccb *)atio);
2313 			}
2314 		}
2315 	}
2316 
2317 	ccbh = TAILQ_FIRST(&softc->pending_queue);
2318 	while (ccbh != NULL) {
2319 		struct ccb_scsiio *csio;
2320 
2321 		csio = (struct ccb_scsiio *)ccbh;
2322 		ccbh = TAILQ_NEXT(ccbh, periph_links.tqe);
2323 
2324 		/* Only abort the CCBs that match */
2325 		if ((csio->init_id != initiator_id
2326 		  && initiator_id != CAM_TARGET_WILDCARD)
2327 		 || (tag_id != TARG_TAG_WILDCARD
2328 		  && ((csio->ccb_h.flags & CAM_TAG_ACTION_VALID) == 0
2329 		   || csio->tag_id != tag_id)))
2330 			continue;
2331 
2332 		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
2333 			  ("Aborting CTIO\n"));
2334 
2335 		TAILQ_REMOVE(&softc->pending_queue, &csio->ccb_h,
2336 			     periph_links.tqe);
2337 
2338 		if (to_held_queue != 0)
2339 			csio->ccb_h.ccb_flags |= TARG_CCB_ABORT_TO_HELDQ;
2340 		xpt_setup_ccb(&cab.ccb_h, csio->ccb_h.path, /*priority*/1);
2341 		cab.abort_ccb = (union ccb *)csio;
2342 		xpt_action((union ccb *)&cab);
2343 		if (cab.ccb_h.status != CAM_REQ_CMP) {
2344 			xpt_print_path(cab.ccb_h.path);
2345 			printf("Unable to abort CCB.  Status %x\n",
2346 			       cab.ccb_h.status);
2347 		}
2348 	}
2349 }
2350