xref: /freebsd/sys/cam/scsi/scsi_target.c (revision daf1cffce2e07931f27c6c6998652e90df6ba87e)
1 /*
2  * Implementation of a simple Target Mode SCSI Proccessor Target driver for CAM.
3  *
4  * Copyright (c) 1998, 1999 Justin T. Gibbs.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification, immediately at the beginning of the file.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 #include <stddef.h>	/* For offsetof */
31 
32 #include <sys/param.h>
33 #include <sys/queue.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/types.h>
37 #include <sys/buf.h>
38 #include <sys/conf.h>
39 #include <sys/devicestat.h>
40 #include <sys/malloc.h>
41 #include <sys/poll.h>
42 #include <sys/select.h>	/* For struct selinfo. */
43 #include <sys/uio.h>
44 
45 #include <cam/cam.h>
46 #include <cam/cam_ccb.h>
47 #include <cam/cam_extend.h>
48 #include <cam/cam_periph.h>
49 #include <cam/cam_queue.h>
50 #include <cam/cam_xpt_periph.h>
51 #include <cam/cam_debug.h>
52 
53 #include <cam/scsi/scsi_all.h>
54 #include <cam/scsi/scsi_pt.h>
55 #include <cam/scsi/scsi_targetio.h>
56 #include <cam/scsi/scsi_message.h>
57 
58 typedef enum {
59 	TARG_STATE_NORMAL,
60 	TARG_STATE_EXCEPTION,
61 	TARG_STATE_TEARDOWN
62 } targ_state;
63 
64 typedef enum {
65 	TARG_FLAG_NONE		 = 0x00,
66 	TARG_FLAG_SEND_EOF	 = 0x01,
67 	TARG_FLAG_RECEIVE_EOF	 = 0x02,
68 	TARG_FLAG_LUN_ENABLED	 = 0x04
69 } targ_flags;
70 
71 typedef enum {
72 	TARG_CCB_NONE		= 0x00,
73 	TARG_CCB_WAITING	= 0x01,
74 	TARG_CCB_HELDQ		= 0x02,
75 	TARG_CCB_ABORT_TO_HELDQ = 0x04
76 } targ_ccb_flags;
77 
78 #define MAX_ACCEPT	16
79 #define MAX_IMMEDIATE	16
80 #define MAX_BUF_SIZE	256	/* Max inquiry/sense/mode page transfer */
81 #define MAX_INITIATORS	256	/* includes widest fibre channel for now */
82 
83 #define MIN(a, b) ((a > b) ? b : a)
84 
85 #define TARG_CONTROL_UNIT 0xffff00ff
86 #define TARG_IS_CONTROL_DEV(unit) ((unit) == TARG_CONTROL_UNIT)
87 
88 #define TARG_TAG_WILDCARD ((u_int)~0)
89 
90 /* Offsets into our private CCB area for storing accept information */
91 #define ccb_flags	ppriv_field0
92 #define ccb_descr	ppriv_ptr1
93 
94 /* We stick a pointer to the originating accept TIO in each continue I/O CCB */
95 #define ccb_atio	ppriv_ptr1
96 
97 struct targ_softc {
98 	/* CTIOs pending on the controller */
99 	struct		ccb_queue pending_queue;
100 
101 	/* ATIOs awaiting CTIO resources from the XPT */
102 	struct		ccb_queue work_queue;
103 
104 	/*
105 	 * ATIOs for SEND operations waiting for 'write'
106 	 * buffer resources from our userland daemon.
107 	 */
108 	struct		ccb_queue snd_ccb_queue;
109 
110 	/*
111 	 * ATIOs for RCV operations waiting for 'read'
112 	 * buffer resources from our userland daemon.
113 	 */
114 	struct		ccb_queue rcv_ccb_queue;
115 
116 	/*
117 	 * ATIOs for commands unknown to the kernel driver.
118 	 * These are queued for the userland daemon to
119 	 * consume.
120 	 */
121 	struct		ccb_queue unknown_atio_queue;
122 
123 	/*
124 	 * Userland buffers for SEND commands waiting for
125 	 * SEND ATIOs to be queued by an initiator.
126 	 */
127 	struct		buf_queue_head snd_buf_queue;
128 
129 	/*
130 	 * Userland buffers for RCV commands waiting for
131 	 * RCV ATIOs to be queued by an initiator.
132 	 */
133 	struct		buf_queue_head rcv_buf_queue;
134 	struct		devstat device_stats;
135 	dev_t		targ_dev;
136 	struct		selinfo snd_select;
137 	struct		selinfo rcv_select;
138 	targ_state	state;
139 	targ_flags	flags;
140 	targ_exception	exceptions;
141 	u_int		init_level;
142 	u_int		inq_data_len;
143 	struct		scsi_inquiry_data *inq_data;
144 	struct		ccb_accept_tio *accept_tio_list;
145 	struct		ccb_hdr_slist immed_notify_slist;
146 	struct		initiator_state istate[MAX_INITIATORS];
147 };
148 
149 struct targ_cmd_desc {
150 	struct	  ccb_accept_tio* atio_link;
151 	u_int	  data_resid;	/* How much left to transfer */
152 	u_int	  data_increment;/* Amount to send before next disconnect */
153 	void*	  data;		/* The data. Can be from backing_store or not */
154 	void*	  backing_store;/* Backing store allocated for this descriptor*/
155 	struct	  buf *bp;	/* Buffer for this transfer */
156 	u_int	  max_size;	/* Size of backing_store */
157 	u_int32_t timeout;
158 	u_int8_t  status;	/* Status to return to initiator */
159 };
160 
161 static	d_open_t	targopen;
162 static	d_close_t	targclose;
163 static	d_read_t	targread;
164 static	d_write_t	targwrite;
165 static	d_ioctl_t	targioctl;
166 static	d_poll_t	targpoll;
167 static	d_strategy_t	targstrategy;
168 
169 #define TARG_CDEV_MAJOR	65
170 static struct cdevsw targ_cdevsw = {
171 	/* open */	targopen,
172 	/* close */	targclose,
173 	/* read */	targread,
174 	/* write */	targwrite,
175 	/* ioctl */	targioctl,
176 	/* poll */	targpoll,
177 	/* mmap */	nommap,
178 	/* strategy */	targstrategy,
179 	/* name */	"targ",
180 	/* maj */	TARG_CDEV_MAJOR,
181 	/* dump */	nodump,
182 	/* psize */	nopsize,
183 	/* flags */	0,
184 	/* bmaj */	-1
185 };
186 
187 static int		targsendccb(struct cam_periph *periph, union ccb *ccb,
188 				    union ccb *inccb);
189 static periph_init_t	targinit;
190 static void		targasync(void *callback_arg, u_int32_t code,
191 				struct cam_path *path, void *arg);
192 static int		targallocinstance(struct ioc_alloc_unit *alloc_unit);
193 static int		targfreeinstance(struct ioc_alloc_unit *alloc_unit);
194 static cam_status	targenlun(struct cam_periph *periph);
195 static cam_status	targdislun(struct cam_periph *periph);
196 static periph_ctor_t	targctor;
197 static periph_dtor_t	targdtor;
198 static void		targrunqueue(struct cam_periph *periph,
199 				     struct targ_softc *softc);
200 static periph_start_t	targstart;
201 static void		targdone(struct cam_periph *periph,
202 				 union ccb *done_ccb);
203 static void		targfireexception(struct cam_periph *periph,
204 					  struct targ_softc *softc);
205 static void		targinoterror(struct cam_periph *periph,
206 				      struct targ_softc *softc,
207 				      struct ccb_immed_notify *inot);
208 static  int		targerror(union ccb *ccb, u_int32_t cam_flags,
209 				  u_int32_t sense_flags);
210 static struct targ_cmd_desc*	allocdescr(void);
211 static void		freedescr(struct targ_cmd_desc *buf);
212 static void		fill_sense(struct targ_softc *softc,
213 				   u_int initiator_id, u_int error_code,
214 				   u_int sense_key, u_int asc, u_int ascq);
215 static void		copy_sense(struct targ_softc *softc,
216 				   struct initiator_state *istate,
217 				   u_int8_t *sense_buffer, size_t sense_len);
218 static void	set_unit_attention_cond(struct cam_periph *periph,
219 					u_int initiator_id, ua_types ua);
220 static void	set_ca_condition(struct cam_periph *periph,
221 				 u_int initiator_id, ca_types ca);
222 static void	abort_pending_transactions(struct cam_periph *periph,
223 					   u_int initiator_id, u_int tag_id,
224 					   int errno, int to_held_queue);
225 
226 static struct periph_driver targdriver =
227 {
228 	targinit, "targ",
229 	TAILQ_HEAD_INITIALIZER(targdriver.units), /* generation */ 0
230 };
231 
232 DATA_SET(periphdriver_set, targdriver);
233 
234 static struct extend_array *targperiphs;
235 static dev_t targ_ctl_dev;
236 
237 static void
238 targinit(void)
239 {
240 	/*
241 	 * Create our extend array for storing the devices we attach to.
242 	 */
243 	targperiphs = cam_extend_new();
244 	if (targperiphs == NULL) {
245 		printf("targ: Failed to alloc extend array!\n");
246 		return;
247 	}
248 	targ_ctl_dev = make_dev(&targ_cdevsw, TARG_CONTROL_UNIT, UID_ROOT,
249 	    GID_OPERATOR, 0600, "%s.ctl", "targ");
250 	if (targ_ctl_dev == (dev_t) 0) {
251 		printf("targ: failed to create control dev\n");
252 	}
253 }
254 
255 static void
256 targasync(void *callback_arg, u_int32_t code,
257 	  struct cam_path *path, void *arg)
258 {
259 	struct cam_periph *periph;
260 	struct targ_softc *softc;
261 
262 	periph = (struct cam_periph *)callback_arg;
263 	softc = (struct targ_softc *)periph->softc;
264 	switch (code) {
265 	case AC_PATH_DEREGISTERED:
266 	{
267 		/* XXX Implement */
268 		break;
269 	}
270 	default:
271 		break;
272 	}
273 }
274 
275 /* Attempt to enable our lun */
276 static cam_status
277 targenlun(struct cam_periph *periph)
278 {
279 	union ccb immed_ccb;
280 	struct targ_softc *softc;
281 	cam_status status;
282 	int i;
283 
284 	softc = (struct targ_softc *)periph->softc;
285 
286 	if ((softc->flags & TARG_FLAG_LUN_ENABLED) != 0)
287 		return (CAM_REQ_CMP);
288 
289 	xpt_setup_ccb(&immed_ccb.ccb_h, periph->path, /*priority*/1);
290 	immed_ccb.ccb_h.func_code = XPT_EN_LUN;
291 
292 	/* Don't need support for any vendor specific commands */
293 	immed_ccb.cel.grp6_len = 0;
294 	immed_ccb.cel.grp7_len = 0;
295 	immed_ccb.cel.enable = 1;
296 	xpt_action(&immed_ccb);
297 	status = immed_ccb.ccb_h.status;
298 	if (status != CAM_REQ_CMP) {
299 		xpt_print_path(periph->path);
300 		printf("targenlun - Enable Lun Rejected with status 0x%x\n",
301 		       status);
302 		return (status);
303 	}
304 
305 	softc->flags |= TARG_FLAG_LUN_ENABLED;
306 
307 	/*
308 	 * Build up a buffer of accept target I/O
309 	 * operations for incoming selections.
310 	 */
311 	for (i = 0; i < MAX_ACCEPT; i++) {
312 		struct ccb_accept_tio *atio;
313 
314 		atio = (struct ccb_accept_tio*)malloc(sizeof(*atio), M_DEVBUF,
315 						      M_NOWAIT);
316 		if (atio == NULL) {
317 			status = CAM_RESRC_UNAVAIL;
318 			break;
319 		}
320 
321 		atio->ccb_h.ccb_descr = allocdescr();
322 
323 		if (atio->ccb_h.ccb_descr == NULL) {
324 			free(atio, M_DEVBUF);
325 			status = CAM_RESRC_UNAVAIL;
326 			break;
327 		}
328 
329 		xpt_setup_ccb(&atio->ccb_h, periph->path, /*priority*/1);
330 		atio->ccb_h.func_code = XPT_ACCEPT_TARGET_IO;
331 		atio->ccb_h.cbfcnp = targdone;
332 		xpt_action((union ccb *)atio);
333 		status = atio->ccb_h.status;
334 		if (status != CAM_REQ_INPROG) {
335 			xpt_print_path(periph->path);
336 			printf("Queue of atio failed\n");
337 			freedescr(atio->ccb_h.ccb_descr);
338 			free(atio, M_DEVBUF);
339 			break;
340 		}
341 		((struct targ_cmd_desc*)atio->ccb_h.ccb_descr)->atio_link =
342 		    softc->accept_tio_list;
343 		softc->accept_tio_list = atio;
344 	}
345 
346 	if (i == 0) {
347 		xpt_print_path(periph->path);
348 		printf("targenlun - Could not allocate accept tio CCBs: "
349 		       "status = 0x%x\n", status);
350 		targdislun(periph);
351 		return (CAM_REQ_CMP_ERR);
352 	}
353 
354 	/*
355 	 * Build up a buffer of immediate notify CCBs
356 	 * so the SIM can tell us of asynchronous target mode events.
357 	 */
358 	for (i = 0; i < MAX_ACCEPT; i++) {
359 		struct ccb_immed_notify *inot;
360 
361 		inot = (struct ccb_immed_notify*)malloc(sizeof(*inot), M_DEVBUF,
362 						        M_NOWAIT);
363 
364 		if (inot == NULL) {
365 			status = CAM_RESRC_UNAVAIL;
366 			break;
367 		}
368 
369 		xpt_setup_ccb(&inot->ccb_h, periph->path, /*priority*/1);
370 		inot->ccb_h.func_code = XPT_IMMED_NOTIFY;
371 		inot->ccb_h.cbfcnp = targdone;
372 		SLIST_INSERT_HEAD(&softc->immed_notify_slist, &inot->ccb_h,
373 				  periph_links.sle);
374 		xpt_action((union ccb *)inot);
375 	}
376 
377 	if (i == 0) {
378 		xpt_print_path(periph->path);
379 		printf("targenlun - Could not allocate immediate notify CCBs: "
380 		       "status = 0x%x\n", status);
381 		targdislun(periph);
382 		return (CAM_REQ_CMP_ERR);
383 	}
384 
385 	return (CAM_REQ_CMP);
386 }
387 
388 static cam_status
389 targdislun(struct cam_periph *periph)
390 {
391 	union ccb ccb;
392 	struct targ_softc *softc;
393 	struct ccb_accept_tio* atio;
394 	struct ccb_hdr *ccb_h;
395 
396 	softc = (struct targ_softc *)periph->softc;
397 	if ((softc->flags & TARG_FLAG_LUN_ENABLED) == 0)
398 		return CAM_REQ_CMP;
399 
400 	/* XXX Block for Continue I/O completion */
401 
402 	/* Kill off all ACCECPT and IMMEDIATE CCBs */
403 	while ((atio = softc->accept_tio_list) != NULL) {
404 
405 		softc->accept_tio_list =
406 		    ((struct targ_cmd_desc*)atio->ccb_h.ccb_descr)->atio_link;
407 		xpt_setup_ccb(&ccb.cab.ccb_h, periph->path, /*priority*/1);
408 		ccb.cab.ccb_h.func_code = XPT_ABORT;
409 		ccb.cab.abort_ccb = (union ccb *)atio;
410 		xpt_action(&ccb);
411 	}
412 
413 	while ((ccb_h = SLIST_FIRST(&softc->immed_notify_slist)) != NULL) {
414 		SLIST_REMOVE_HEAD(&softc->immed_notify_slist, periph_links.sle);
415 		xpt_setup_ccb(&ccb.cab.ccb_h, periph->path, /*priority*/1);
416 		ccb.cab.ccb_h.func_code = XPT_ABORT;
417 		ccb.cab.abort_ccb = (union ccb *)ccb_h;
418 		xpt_action(&ccb);
419 	}
420 
421 	/*
422 	 * Dissable this lun.
423 	 */
424 	xpt_setup_ccb(&ccb.cel.ccb_h, periph->path, /*priority*/1);
425 	ccb.cel.ccb_h.func_code = XPT_EN_LUN;
426 	ccb.cel.enable = 0;
427 	xpt_action(&ccb);
428 
429 	if (ccb.cel.ccb_h.status != CAM_REQ_CMP)
430 		printf("targdislun - Disabling lun on controller failed "
431 		       "with status 0x%x\n", ccb.cel.ccb_h.status);
432 	else
433 		softc->flags &= ~TARG_FLAG_LUN_ENABLED;
434 	return (ccb.cel.ccb_h.status);
435 }
436 
437 static cam_status
438 targctor(struct cam_periph *periph, void *arg)
439 {
440 	struct ccb_pathinq *cpi;
441 	struct targ_softc *softc;
442 	int i;
443 
444 	cpi = (struct ccb_pathinq *)arg;
445 
446 	/* Allocate our per-instance private storage */
447 	softc = (struct targ_softc *)malloc(sizeof(*softc), M_DEVBUF, M_NOWAIT);
448 	if (softc == NULL) {
449 		printf("targctor: unable to malloc softc\n");
450 		return (CAM_REQ_CMP_ERR);
451 	}
452 
453 	bzero(softc, sizeof(*softc));
454 	TAILQ_INIT(&softc->pending_queue);
455 	TAILQ_INIT(&softc->work_queue);
456 	TAILQ_INIT(&softc->snd_ccb_queue);
457 	TAILQ_INIT(&softc->rcv_ccb_queue);
458 	TAILQ_INIT(&softc->unknown_atio_queue);
459 	bufq_init(&softc->snd_buf_queue);
460 	bufq_init(&softc->rcv_buf_queue);
461 	softc->accept_tio_list = NULL;
462 	SLIST_INIT(&softc->immed_notify_slist);
463 	softc->state = TARG_STATE_NORMAL;
464 	periph->softc = softc;
465 	softc->init_level++;
466 
467 	cam_extend_set(targperiphs, periph->unit_number, periph);
468 
469 	/*
470 	 * We start out life with a UA to indicate power-on/reset.
471 	 */
472 	for (i = 0; i < MAX_INITIATORS; i++)
473 		softc->istate[i].pending_ua = UA_POWER_ON;
474 
475 	/*
476 	 * Allocate an initial inquiry data buffer.  We might allow the
477 	 * user to override this later via an ioctl.
478 	 */
479 	softc->inq_data_len = sizeof(*softc->inq_data);
480 	softc->inq_data = malloc(softc->inq_data_len, M_DEVBUF, M_NOWAIT);
481 	if (softc->inq_data == NULL) {
482 		printf("targctor - Unable to malloc inquiry data\n");
483 		targdtor(periph);
484 		return (CAM_RESRC_UNAVAIL);
485 	}
486 	bzero(softc->inq_data, softc->inq_data_len);
487 	softc->inq_data->device = T_PROCESSOR | (SID_QUAL_LU_CONNECTED << 5);
488 	softc->inq_data->version = 2;
489 	softc->inq_data->response_format = 2; /* SCSI2 Inquiry Format */
490 	softc->inq_data->flags =
491 	    cpi->hba_inquiry & (PI_SDTR_ABLE|PI_WIDE_16|PI_WIDE_32);
492 	softc->inq_data->additional_length = softc->inq_data_len - 4;
493 	strncpy(softc->inq_data->vendor, "FreeBSD ", SID_VENDOR_SIZE);
494 	strncpy(softc->inq_data->product, "TM-PT           ", SID_PRODUCT_SIZE);
495 	strncpy(softc->inq_data->revision, "0.0 ", SID_REVISION_SIZE);
496 	softc->targ_dev = make_dev(&targ_cdevsw, periph->unit_number, UID_ROOT,
497 				   GID_OPERATOR, 0600, "%s%d",
498 				   periph->periph_name, periph->unit_number);
499 	softc->init_level++;
500 	return (CAM_REQ_CMP);
501 }
502 
503 static void
504 targdtor(struct cam_periph *periph)
505 {
506 	struct targ_softc *softc;
507 
508 	softc = (struct targ_softc *)periph->softc;
509 
510 	softc->state = TARG_STATE_TEARDOWN;
511 
512 	targdislun(periph);
513 
514 	cam_extend_release(targperiphs, periph->unit_number);
515 
516 	switch (softc->init_level) {
517 	default:
518 		/* FALLTHROUGH */
519 	case 2:
520 		free(softc->inq_data, M_DEVBUF);
521 		destroy_dev(softc->targ_dev);
522 		/* FALLTHROUGH */
523 	case 1:
524 		free(softc, M_DEVBUF);
525 		break;
526 	case 0:
527 		panic("targdtor - impossible init level");;
528 	}
529 }
530 
531 static int
532 targopen(dev_t dev, int flags, int fmt, struct proc *p)
533 {
534 	struct cam_periph *periph;
535 	struct	targ_softc *softc;
536 	u_int unit;
537 	cam_status status;
538 	int error;
539 	int s;
540 
541 	unit = minor(dev);
542 
543 	/* An open of the control device always succeeds */
544 	if (TARG_IS_CONTROL_DEV(unit))
545 		return 0;
546 
547 	s = splsoftcam();
548 	periph = cam_extend_get(targperiphs, unit);
549 	if (periph == NULL) {
550 		return (ENXIO);
551         	splx(s);
552 	}
553 	if ((error = cam_periph_lock(periph, PRIBIO | PCATCH)) != 0) {
554 		splx(s);
555 		return (error);
556 	}
557 
558 	softc = (struct targ_softc *)periph->softc;
559 	if ((softc->flags & TARG_FLAG_LUN_ENABLED) == 0) {
560 		if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
561 			splx(s);
562 			cam_periph_unlock(periph);
563 			return(ENXIO);
564 		}
565 	}
566         splx(s);
567 
568 	status = targenlun(periph);
569 	switch (status) {
570 	case CAM_REQ_CMP:
571 		error = 0;
572 		break;
573 	case CAM_RESRC_UNAVAIL:
574 		error = ENOMEM;
575 		break;
576 	case CAM_LUN_ALRDY_ENA:
577 		error = EADDRINUSE;
578 		break;
579 	default:
580 		error = ENXIO;
581 		break;
582 	}
583         cam_periph_unlock(periph);
584 	if (error) {
585 		cam_periph_release(periph);
586 	}
587 	return (error);
588 }
589 
590 static int
591 targclose(dev_t dev, int flag, int fmt, struct proc *p)
592 {
593 	struct	cam_periph *periph;
594 	struct	targ_softc *softc;
595 	u_int	unit;
596 	int	s;
597 	int	error;
598 
599 	unit = minor(dev);
600 
601 	/* A close of the control device always succeeds */
602 	if (TARG_IS_CONTROL_DEV(unit))
603 		return 0;
604 
605 	s = splsoftcam();
606 	periph = cam_extend_get(targperiphs, unit);
607 	if (periph == NULL) {
608 		splx(s);
609 		return (ENXIO);
610 	}
611 	if ((error = cam_periph_lock(periph, PRIBIO)) != 0)
612 		return (error);
613 	softc = (struct targ_softc *)periph->softc;
614 	splx(s);
615 
616 	targdislun(periph);
617 
618 	cam_periph_unlock(periph);
619 	cam_periph_release(periph);
620 
621 	return (0);
622 }
623 
624 static int
625 targallocinstance(struct ioc_alloc_unit *alloc_unit)
626 {
627 	struct ccb_pathinq cpi;
628 	struct cam_path *path;
629 	struct cam_periph *periph;
630 	cam_status status;
631 	int free_path_on_return;
632 	int error;
633 
634 	free_path_on_return = 0;
635 	status = xpt_create_path(&path, /*periph*/NULL,
636 				 alloc_unit->path_id,
637 				 alloc_unit->target_id,
638 				 alloc_unit->lun_id);
639 	if (status != CAM_REQ_CMP) {
640 		printf("Couldn't Allocate Path %x\n", status);
641 		goto fail;
642 	}
643 
644 	free_path_on_return++;
645 
646 
647 	xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
648 	cpi.ccb_h.func_code = XPT_PATH_INQ;
649 	xpt_action((union ccb *)&cpi);
650 	status = cpi.ccb_h.status;
651 
652 	if (status != CAM_REQ_CMP) {
653 		printf("Couldn't CPI %x\n", status);
654 		goto fail;
655 	}
656 
657 	/* Can only alloc units on controllers that support target mode */
658 	if ((cpi.target_sprt & PIT_PROCESSOR) == 0) {
659 		printf("Controller does not support target mode%x\n", status);
660 		status = CAM_PATH_INVALID;
661 		goto fail;
662 	}
663 
664 	/* Ensure that we don't already have an instance for this unit. */
665 	if ((periph = cam_periph_find(path, "targ")) != NULL) {
666 		status = CAM_LUN_ALRDY_ENA;
667 		goto fail;
668 	}
669 
670 	/*
671 	 * Allocate a peripheral instance for
672 	 * this target instance.
673 	 */
674 	status = cam_periph_alloc(targctor, NULL, targdtor, targstart,
675 				  "targ", CAM_PERIPH_BIO, path, targasync,
676 				  0, &cpi);
677 
678 fail:
679 	switch (status) {
680 	case CAM_REQ_CMP:
681 	{
682 		struct cam_periph *periph;
683 
684 		if ((periph = cam_periph_find(path, "targ")) == NULL)
685 			panic("targallocinstance: Succeeded but no periph?");
686 		error = 0;
687 		alloc_unit->unit = periph->unit_number;
688 		break;
689 	}
690 	case CAM_RESRC_UNAVAIL:
691 		error = ENOMEM;
692 		break;
693 	case CAM_LUN_ALRDY_ENA:
694 		error = EADDRINUSE;
695 		break;
696 	default:
697 		printf("targallocinstance: Unexpected CAM status %x\n", status);
698 		/* FALLTHROUGH */
699 	case CAM_PATH_INVALID:
700 		error = ENXIO;
701 		break;
702 	case CAM_PROVIDE_FAIL:
703 		error = ENODEV;
704 		break;
705 	}
706 
707 	if (free_path_on_return != 0)
708 		xpt_free_path(path);
709 
710 	return (error);
711 }
712 
713 static int
714 targfreeinstance(struct ioc_alloc_unit *alloc_unit)
715 {
716 	struct cam_path *path;
717 	struct cam_periph *periph;
718 	struct targ_softc *softc;
719 	cam_status status;
720 	int free_path_on_return;
721 	int error;
722 
723 	periph = NULL;
724 	free_path_on_return = 0;
725 	status = xpt_create_path(&path, /*periph*/NULL,
726 				 alloc_unit->path_id,
727 				 alloc_unit->target_id,
728 				 alloc_unit->lun_id);
729 	free_path_on_return++;
730 
731 	if (status != CAM_REQ_CMP)
732 		goto fail;
733 
734 	/* Find our instance. */
735 	if ((periph = cam_periph_find(path, "targ")) == NULL) {
736 		xpt_print_path(path);
737 		status = CAM_PATH_INVALID;
738 		goto fail;
739 	}
740 
741         softc = (struct targ_softc *)periph->softc;
742 
743         if ((softc->flags & TARG_FLAG_LUN_ENABLED) != 0) {
744 		status = CAM_BUSY;
745 		goto fail;
746 	}
747 
748 fail:
749 	if (free_path_on_return != 0)
750 		xpt_free_path(path);
751 
752 	switch (status) {
753 	case CAM_REQ_CMP:
754 		if (periph != NULL)
755 			cam_periph_invalidate(periph);
756 		error = 0;
757 		break;
758 	case CAM_RESRC_UNAVAIL:
759 		error = ENOMEM;
760 		break;
761 	case CAM_LUN_ALRDY_ENA:
762 		error = EADDRINUSE;
763 		break;
764 	default:
765 		printf("targfreeinstance: Unexpected CAM status %x\n", status);
766 		/* FALLTHROUGH */
767 	case CAM_PATH_INVALID:
768 		error = ENODEV;
769 		break;
770 	}
771 	return (error);
772 }
773 
774 static int
775 targioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
776 {
777 	struct cam_periph *periph;
778 	struct targ_softc *softc;
779 	u_int  unit;
780 	int    error;
781 
782 	unit = minor(dev);
783 	error = 0;
784 	if (TARG_IS_CONTROL_DEV(unit)) {
785 		switch (cmd) {
786 		case TARGCTLIOALLOCUNIT:
787 			error = targallocinstance((struct ioc_alloc_unit*)addr);
788 			break;
789 		case TARGCTLIOFREEUNIT:
790 			error = targfreeinstance((struct ioc_alloc_unit*)addr);
791 			break;
792 		default:
793 			error = EINVAL;
794 			break;
795 		}
796 		return (error);
797 	}
798 
799 	periph = cam_extend_get(targperiphs, unit);
800 	if (periph == NULL)
801 		return (ENXIO);
802 	softc = (struct targ_softc *)periph->softc;
803 	switch (cmd) {
804 	case TARGIOCFETCHEXCEPTION:
805 		*((targ_exception *)addr) = softc->exceptions;
806 		break;
807 	case TARGIOCCLEAREXCEPTION:
808 	{
809 		targ_exception clear_mask;
810 
811 		clear_mask = *((targ_exception *)addr);
812 		if ((clear_mask & TARG_EXCEPT_UNKNOWN_ATIO) != 0) {
813 			struct ccb_hdr *ccbh;
814 
815 			ccbh = TAILQ_FIRST(&softc->unknown_atio_queue);
816 			if (ccbh != NULL) {
817 				TAILQ_REMOVE(&softc->unknown_atio_queue,
818 					     ccbh, periph_links.tqe);
819 				/* Requeue the ATIO back to the controller */
820 				xpt_action((union ccb *)ccbh);
821 				ccbh = TAILQ_FIRST(&softc->unknown_atio_queue);
822 			}
823 			if (ccbh != NULL)
824 				clear_mask &= ~TARG_EXCEPT_UNKNOWN_ATIO;
825 		}
826 		softc->exceptions &= ~clear_mask;
827 		if (softc->exceptions == TARG_EXCEPT_NONE
828 		 && softc->state == TARG_STATE_EXCEPTION) {
829 			softc->state = TARG_STATE_NORMAL;
830 			targrunqueue(periph, softc);
831 		}
832 		break;
833 	}
834 	case TARGIOCFETCHATIO:
835 	{
836 		struct ccb_hdr *ccbh;
837 
838 		ccbh = TAILQ_FIRST(&softc->unknown_atio_queue);
839 		if (ccbh != NULL) {
840 			bcopy(ccbh, addr, sizeof(struct ccb_accept_tio));
841 		} else {
842 			error = ENOENT;
843 		}
844 		break;
845 	}
846 	case TARGIOCCOMMAND:
847 	{
848 		union ccb *inccb;
849 		union ccb *ccb;
850 
851 		/*
852 		 * XXX JGibbs
853 		 * This code is lifted directly from the pass-thru driver.
854 		 * Perhaps this should be moved to a library????
855 		 */
856 		inccb = (union ccb *)addr;
857 		ccb = cam_periph_getccb(periph, inccb->ccb_h.pinfo.priority);
858 
859 		error = targsendccb(periph, ccb, inccb);
860 
861 		xpt_release_ccb(ccb);
862 
863 		break;
864 	}
865 	case TARGIOCGETISTATE:
866 	case TARGIOCSETISTATE:
867 	{
868 		struct ioc_initiator_state *ioc_istate;
869 
870 		ioc_istate = (struct ioc_initiator_state *)addr;
871 		if (ioc_istate->initiator_id > MAX_INITIATORS) {
872 			error = EINVAL;
873 			break;
874 		}
875 		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
876 			  ("GET/SETISTATE for %d\n", ioc_istate->initiator_id));
877 		if (cmd == TARGIOCGETISTATE) {
878 			bcopy(&softc->istate[ioc_istate->initiator_id],
879 			      &ioc_istate->istate, sizeof(ioc_istate->istate));
880 		} else {
881 			bcopy(&ioc_istate->istate,
882 			      &softc->istate[ioc_istate->initiator_id],
883 			      sizeof(ioc_istate->istate));
884 		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
885 			  ("pending_ca now %x\n",
886 			   softc->istate[ioc_istate->initiator_id].pending_ca));
887 		}
888 		break;
889 	}
890 	default:
891 		error = ENOTTY;
892 		break;
893 	}
894 	return (error);
895 }
896 
897 /*
898  * XXX JGibbs lifted from pass-thru driver.
899  * Generally, "ccb" should be the CCB supplied by the kernel.  "inccb"
900  * should be the CCB that is copied in from the user.
901  */
902 static int
903 targsendccb(struct cam_periph *periph, union ccb *ccb, union ccb *inccb)
904 {
905 	struct targ_softc *softc;
906 	struct cam_periph_map_info mapinfo;
907 	int error, need_unmap;
908 	int s;
909 
910 	softc = (struct targ_softc *)periph->softc;
911 
912 	need_unmap = 0;
913 
914 	/*
915 	 * There are some fields in the CCB header that need to be
916 	 * preserved, the rest we get from the user.
917 	 */
918 	xpt_merge_ccb(ccb, inccb);
919 
920 	/*
921 	 * There's no way for the user to have a completion
922 	 * function, so we put our own completion function in here.
923 	 */
924 	ccb->ccb_h.cbfcnp = targdone;
925 
926 	/*
927 	 * We only attempt to map the user memory into kernel space
928 	 * if they haven't passed in a physical memory pointer,
929 	 * and if there is actually an I/O operation to perform.
930 	 * Right now cam_periph_mapmem() only supports SCSI and device
931 	 * match CCBs.  For the SCSI CCBs, we only pass the CCB in if
932 	 * there's actually data to map.  cam_periph_mapmem() will do the
933 	 * right thing, even if there isn't data to map, but since CCBs
934 	 * without data are a reasonably common occurance (e.g. test unit
935 	 * ready), it will save a few cycles if we check for it here.
936 	 */
937 	if (((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0)
938 	 && (((ccb->ccb_h.func_code == XPT_CONT_TARGET_IO)
939 	    && ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE))
940 	  || (ccb->ccb_h.func_code == XPT_DEV_MATCH))) {
941 
942 		bzero(&mapinfo, sizeof(mapinfo));
943 
944 		error = cam_periph_mapmem(ccb, &mapinfo);
945 
946 		/*
947 		 * cam_periph_mapmem returned an error, we can't continue.
948 		 * Return the error to the user.
949 		 */
950 		if (error)
951 			return(error);
952 
953 		/*
954 		 * We successfully mapped the memory in, so we need to
955 		 * unmap it when the transaction is done.
956 		 */
957 		need_unmap = 1;
958 	}
959 
960 	/*
961 	 * Once queued on the pending CCB list, this CCB will be protected
962 	 * by the error recovery handling used for 'buffer I/O' ccbs.  Since
963 	 * we are in a process context here, however, the software interrupt
964 	 * for this driver may deliver an event invalidating this CCB just
965 	 * before we queue it.  Close this race condition by blocking
966 	 * software interrupt delivery, checking for any pertinent queued
967 	 * events, and only then queuing this CCB.
968 	 */
969 	s = splsoftcam();
970 	if (softc->exceptions == 0) {
971 		if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO)
972 			TAILQ_INSERT_TAIL(&softc->pending_queue, &ccb->ccb_h,
973 					  periph_links.tqe);
974 
975 		/*
976 		 * If the user wants us to perform any error recovery,
977 		 * then honor that request.  Otherwise, it's up to the
978 		 * user to perform any error recovery.
979 		 */
980 		error = cam_periph_runccb(ccb,
981 					  /* error handler */NULL,
982 					  /* cam_flags */ 0,
983 					  /* sense_flags */SF_RETRY_UA,
984 					  &softc->device_stats);
985 
986 		if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO)
987 			TAILQ_INSERT_TAIL(&softc->pending_queue, &ccb->ccb_h,
988 					  periph_links.tqe);
989 	} else {
990 		ccb->ccb_h.status = CAM_UNACKED_EVENT;
991 		error = 0;
992 	}
993 	splx(s);
994 
995 	if (need_unmap != 0)
996 		cam_periph_unmapmem(ccb, &mapinfo);
997 
998 	ccb->ccb_h.cbfcnp = NULL;
999 	ccb->ccb_h.periph_priv = inccb->ccb_h.periph_priv;
1000 	bcopy(ccb, inccb, sizeof(union ccb));
1001 
1002 	return(error);
1003 }
1004 
1005 
1006 static int
1007 targpoll(dev_t dev, int poll_events, struct proc *p)
1008 {
1009 	struct cam_periph *periph;
1010 	struct targ_softc *softc;
1011 	u_int  unit;
1012 	int    revents;
1013 	int    s;
1014 
1015 	unit = minor(dev);
1016 
1017 	/* ioctl is the only supported operation of the control device */
1018 	if (TARG_IS_CONTROL_DEV(unit))
1019 		return EINVAL;
1020 
1021 	periph = cam_extend_get(targperiphs, unit);
1022 	if (periph == NULL)
1023 		return (ENXIO);
1024 	softc = (struct targ_softc *)periph->softc;
1025 
1026 	revents = 0;
1027 	s = splcam();
1028 	if ((poll_events & (POLLOUT | POLLWRNORM)) != 0) {
1029 		if (TAILQ_FIRST(&softc->rcv_ccb_queue) != NULL
1030 		 && bufq_first(&softc->rcv_buf_queue) == NULL)
1031 			revents |= poll_events & (POLLOUT | POLLWRNORM);
1032 	}
1033 	if ((poll_events & (POLLIN | POLLRDNORM)) != 0) {
1034 		if (TAILQ_FIRST(&softc->snd_ccb_queue) != NULL
1035 		 && bufq_first(&softc->snd_buf_queue) == NULL)
1036 			revents |= poll_events & (POLLIN | POLLRDNORM);
1037 	}
1038 
1039 	if (softc->state != TARG_STATE_NORMAL)
1040 		revents |= POLLERR;
1041 
1042 	if (revents == 0) {
1043 		if (poll_events & (POLLOUT | POLLWRNORM))
1044 			selrecord(p, &softc->rcv_select);
1045 		if (poll_events & (POLLIN | POLLRDNORM))
1046 			selrecord(p, &softc->snd_select);
1047 	}
1048 	splx(s);
1049 	return (revents);
1050 }
1051 
1052 static int
1053 targread(dev_t dev, struct uio *uio, int ioflag)
1054 {
1055 	u_int  unit;
1056 
1057 	unit = minor(dev);
1058 	/* ioctl is the only supported operation of the control device */
1059 	if (TARG_IS_CONTROL_DEV(unit))
1060 		return EINVAL;
1061 
1062 	if (uio->uio_iovcnt == 0
1063 	 || uio->uio_iov->iov_len == 0) {
1064 		/* EOF */
1065 		struct cam_periph *periph;
1066 		struct targ_softc *softc;
1067 		int    s;
1068 
1069 		s = splcam();
1070 		periph = cam_extend_get(targperiphs, unit);
1071 		if (periph == NULL)
1072 			return (ENXIO);
1073 		softc = (struct targ_softc *)periph->softc;
1074 		softc->flags |= TARG_FLAG_SEND_EOF;
1075 		splx(s);
1076 		targrunqueue(periph, softc);
1077 		return (0);
1078 	}
1079 	return(physread(dev, uio, ioflag));
1080 }
1081 
1082 static int
1083 targwrite(dev_t dev, struct uio *uio, int ioflag)
1084 {
1085 	u_int  unit;
1086 
1087 	unit = minor(dev);
1088 	/* ioctl is the only supported operation of the control device */
1089 	if (TARG_IS_CONTROL_DEV(unit))
1090 		return EINVAL;
1091 
1092 	if (uio->uio_iovcnt == 0
1093 	 || uio->uio_iov->iov_len == 0) {
1094 		/* EOF */
1095 		struct cam_periph *periph;
1096 		struct targ_softc *softc;
1097 		int    s;
1098 
1099 		s = splcam();
1100 		periph = cam_extend_get(targperiphs, unit);
1101 		if (periph == NULL)
1102 			return (ENXIO);
1103 		softc = (struct targ_softc *)periph->softc;
1104 		softc->flags |= TARG_FLAG_RECEIVE_EOF;
1105 		splx(s);
1106 		targrunqueue(periph, softc);
1107 		return (0);
1108 	}
1109 	return(physwrite(dev, uio, ioflag));
1110 }
1111 
1112 /*
1113  * Actually translate the requested transfer into one the physical driver
1114  * can understand.  The transfer is described by a buf and will include
1115  * only one physical transfer.
1116  */
1117 static void
1118 targstrategy(struct buf *bp)
1119 {
1120 	struct cam_periph *periph;
1121 	struct targ_softc *softc;
1122 	u_int  unit;
1123 	int    s;
1124 
1125 	unit = minor(bp->b_dev);
1126 
1127 	/* ioctl is the only supported operation of the control device */
1128 	if (TARG_IS_CONTROL_DEV(unit)) {
1129 		bp->b_error = EINVAL;
1130 		goto bad;
1131 	}
1132 
1133 	periph = cam_extend_get(targperiphs, unit);
1134 	if (periph == NULL) {
1135 		bp->b_error = ENXIO;
1136 		goto bad;
1137 	}
1138 	softc = (struct targ_softc *)periph->softc;
1139 
1140 	/*
1141 	 * Mask interrupts so that the device cannot be invalidated until
1142 	 * after we are in the queue.  Otherwise, we might not properly
1143 	 * clean up one of the buffers.
1144 	 */
1145 	s = splbio();
1146 
1147 	/*
1148 	 * If there is an exception pending, error out
1149 	 */
1150 	if (softc->state != TARG_STATE_NORMAL) {
1151 		splx(s);
1152 		if (softc->state == TARG_STATE_EXCEPTION
1153 		 && (softc->exceptions & TARG_EXCEPT_DEVICE_INVALID) == 0)
1154 			bp->b_error = EBUSY;
1155 		else
1156 			bp->b_error = ENXIO;
1157 		goto bad;
1158 	}
1159 
1160 	/*
1161 	 * Place it in the queue of buffers available for either
1162 	 * SEND or RECEIVE commands.
1163 	 *
1164 	 */
1165 	bp->b_resid = bp->b_bcount;
1166 	if ((bp->b_flags & B_READ) != 0) {
1167 		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1168 			  ("Queued a SEND buffer\n"));
1169 		bufq_insert_tail(&softc->snd_buf_queue, bp);
1170 	} else {
1171 		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1172 			  ("Queued a RECEIVE buffer\n"));
1173 		bufq_insert_tail(&softc->rcv_buf_queue, bp);
1174 	}
1175 
1176 	splx(s);
1177 
1178 	/*
1179 	 * Attempt to use the new buffer to service any pending
1180 	 * target commands.
1181 	 */
1182 	targrunqueue(periph, softc);
1183 
1184 	return;
1185 bad:
1186 	bp->b_flags |= B_ERROR;
1187 
1188 	/*
1189 	 * Correctly set the buf to indicate a completed xfer
1190 	 */
1191 	bp->b_resid = bp->b_bcount;
1192 	biodone(bp);
1193 }
1194 
1195 static void
1196 targrunqueue(struct cam_periph *periph, struct targ_softc *softc)
1197 {
1198 	struct  ccb_queue *pending_queue;
1199 	struct	ccb_accept_tio *atio;
1200 	struct	buf_queue_head *bufq;
1201 	struct	buf *bp;
1202 	struct	targ_cmd_desc *desc;
1203 	struct	ccb_hdr *ccbh;
1204 	int	s;
1205 
1206 	s = splbio();
1207 	pending_queue = NULL;
1208 	bufq = NULL;
1209 	ccbh = NULL;
1210 	/* Only run one request at a time to maintain data ordering. */
1211 	if (softc->state != TARG_STATE_NORMAL
1212 	 || TAILQ_FIRST(&softc->work_queue) != NULL
1213 	 || TAILQ_FIRST(&softc->pending_queue) != NULL) {
1214 		splx(s);
1215 		return;
1216 	}
1217 
1218 	if (((bp = bufq_first(&softc->snd_buf_queue)) != NULL
1219 	  || (softc->flags & TARG_FLAG_SEND_EOF) != 0)
1220 	 && (ccbh = TAILQ_FIRST(&softc->snd_ccb_queue)) != NULL) {
1221 
1222 		if (bp == NULL)
1223 			softc->flags &= ~TARG_FLAG_SEND_EOF;
1224 		else {
1225 			CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1226 				  ("De-Queued a SEND buffer %ld\n",
1227 				   bp->b_bcount));
1228 		}
1229 		bufq = &softc->snd_buf_queue;
1230 		pending_queue = &softc->snd_ccb_queue;
1231 	} else if (((bp = bufq_first(&softc->rcv_buf_queue)) != NULL
1232 	  	 || (softc->flags & TARG_FLAG_RECEIVE_EOF) != 0)
1233 		&& (ccbh = TAILQ_FIRST(&softc->rcv_ccb_queue)) != NULL) {
1234 
1235 		if (bp == NULL)
1236 			softc->flags &= ~TARG_FLAG_RECEIVE_EOF;
1237 		else {
1238 			CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1239 				  ("De-Queued a RECEIVE buffer %ld\n",
1240 				   bp->b_bcount));
1241 		}
1242 		bufq = &softc->rcv_buf_queue;
1243 		pending_queue = &softc->rcv_ccb_queue;
1244 	}
1245 
1246 	if (pending_queue != NULL) {
1247 		/* Process a request */
1248 		atio = (struct ccb_accept_tio *)ccbh;
1249 		TAILQ_REMOVE(pending_queue, ccbh, periph_links.tqe);
1250 		desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
1251 		desc->bp = bp;
1252 		if (bp == NULL) {
1253 			/* EOF */
1254 			desc->data = NULL;
1255 			desc->data_increment = 0;
1256 			desc->data_resid = 0;
1257 			atio->ccb_h.flags &= ~CAM_DIR_MASK;
1258 			atio->ccb_h.flags |= CAM_DIR_NONE;
1259 		} else {
1260 			bufq_remove(bufq, bp);
1261 			desc->data = &bp->b_data[bp->b_bcount - bp->b_resid];
1262 			desc->data_increment =
1263 			    MIN(desc->data_resid, bp->b_resid);
1264 			desc->data_increment =
1265 			    MIN(desc->data_increment, 32);
1266 		}
1267 		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1268 			  ("Buffer command: data %x: datacnt %d\n",
1269 			   (intptr_t)desc->data, desc->data_increment));
1270 		TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h,
1271 				  periph_links.tqe);
1272 	}
1273 	if (TAILQ_FIRST(&softc->work_queue) != NULL) {
1274 		splx(s);
1275 		xpt_schedule(periph, /*XXX priority*/1);
1276 	} else
1277 		splx(s);
1278 }
1279 
1280 static void
1281 targstart(struct cam_periph *periph, union ccb *start_ccb)
1282 {
1283 	struct targ_softc *softc;
1284 	struct ccb_hdr *ccbh;
1285 	struct ccb_accept_tio *atio;
1286 	struct targ_cmd_desc *desc;
1287 	struct ccb_scsiio *csio;
1288 	targ_ccb_flags flags;
1289 	int    s;
1290 
1291 	softc = (struct targ_softc *)periph->softc;
1292 
1293 	s = splbio();
1294 	ccbh = TAILQ_FIRST(&softc->work_queue);
1295 	if (periph->immediate_priority <= periph->pinfo.priority) {
1296 		start_ccb->ccb_h.ccb_flags = TARG_CCB_WAITING;
1297 		SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
1298 				  periph_links.sle);
1299 		periph->immediate_priority = CAM_PRIORITY_NONE;
1300 		splx(s);
1301 		wakeup(&periph->ccb_list);
1302 	} else if (ccbh == NULL) {
1303 		splx(s);
1304 		xpt_release_ccb(start_ccb);
1305 	} else {
1306 		TAILQ_REMOVE(&softc->work_queue, ccbh, periph_links.tqe);
1307 		splx(s);
1308 		atio = (struct ccb_accept_tio*)ccbh;
1309 		desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
1310 
1311 		/* Is this a tagged request? */
1312 		flags = atio->ccb_h.flags & (CAM_TAG_ACTION_VALID|CAM_DIR_MASK);
1313 
1314 		/*
1315 		 * If we are done with the transaction, tell the
1316 		 * controller to send status and perform a CMD_CMPLT.
1317 		 */
1318 		if (desc->data_resid == desc->data_increment)
1319 			flags |= CAM_SEND_STATUS;
1320 
1321 		csio = &start_ccb->csio;
1322 		cam_fill_ctio(csio,
1323 			      /*retries*/2,
1324 			      targdone,
1325 			      flags,
1326 			      /*tag_action*/MSG_SIMPLE_Q_TAG,
1327 			      atio->tag_id,
1328 			      atio->init_id,
1329 			      desc->status,
1330 			      /*data_ptr*/desc->data_increment == 0
1331 					  ? NULL : desc->data,
1332 			      /*dxfer_len*/desc->data_increment,
1333 			      /*timeout*/desc->timeout);
1334 
1335 		if ((flags & CAM_SEND_STATUS) != 0
1336 		 && (desc->status == SCSI_STATUS_CHECK_COND
1337 		  || desc->status == SCSI_STATUS_CMD_TERMINATED)) {
1338 			struct initiator_state *istate;
1339 
1340 			istate = &softc->istate[atio->init_id];
1341 			csio->sense_len = istate->sense_data.extra_len
1342 					+ offsetof(struct scsi_sense_data,
1343 						   extra_len);
1344 			bcopy(&istate->sense_data, &csio->sense_data,
1345 			      csio->sense_len);
1346 			csio->ccb_h.flags |= CAM_SEND_SENSE;
1347 		} else {
1348 			csio->sense_len = 0;
1349 		}
1350 
1351 		start_ccb->ccb_h.ccb_flags = TARG_CCB_NONE;
1352 		start_ccb->ccb_h.ccb_atio = atio;
1353 		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1354 			  ("Sending a CTIO\n"));
1355 		TAILQ_INSERT_TAIL(&softc->pending_queue, &csio->ccb_h,
1356 				  periph_links.tqe);
1357 		xpt_action(start_ccb);
1358 		s = splbio();
1359 		ccbh = TAILQ_FIRST(&softc->work_queue);
1360 		splx(s);
1361 	}
1362 	if (ccbh != NULL)
1363 		targrunqueue(periph, softc);
1364 }
1365 
1366 static void
1367 targdone(struct cam_periph *periph, union ccb *done_ccb)
1368 {
1369 	struct targ_softc *softc;
1370 
1371 	softc = (struct targ_softc *)periph->softc;
1372 
1373 	if (done_ccb->ccb_h.ccb_flags == TARG_CCB_WAITING) {
1374 		/* Caller will release the CCB */
1375 		wakeup(&done_ccb->ccb_h.cbfcnp);
1376 		return;
1377 	}
1378 
1379 	switch (done_ccb->ccb_h.func_code) {
1380 	case XPT_ACCEPT_TARGET_IO:
1381 	{
1382 		struct ccb_accept_tio *atio;
1383 		struct targ_cmd_desc *descr;
1384 		struct initiator_state *istate;
1385 		u_int8_t *cdb;
1386 
1387 		atio = &done_ccb->atio;
1388 		descr = (struct targ_cmd_desc*)atio->ccb_h.ccb_descr;
1389 		istate = &softc->istate[atio->init_id];
1390 		cdb = atio->cdb_io.cdb_bytes;
1391 		if (softc->state == TARG_STATE_TEARDOWN
1392 		 || atio->ccb_h.status == CAM_REQ_ABORTED) {
1393 			freedescr(descr);
1394 			free(done_ccb, M_DEVBUF);
1395 			return;
1396 		}
1397 
1398 		if (atio->sense_len != 0) {
1399 
1400 			/*
1401 			 * We had an error in the reception of
1402 			 * this command.  Immediately issue a CA.
1403 			 */
1404 			atio->ccb_h.flags &= ~CAM_DIR_MASK;
1405 			atio->ccb_h.flags |= CAM_DIR_NONE;
1406 			descr->data_resid = 0;
1407 			descr->data_increment = 0;
1408 			descr->status = SCSI_STATUS_CHECK_COND;
1409 			copy_sense(softc, istate, (u_int8_t *)&atio->sense_data,
1410 				   atio->sense_len);
1411 			set_ca_condition(periph, atio->init_id, CA_CMD_SENSE);
1412 		} else if (istate->pending_ca == 0
1413 			&& istate->pending_ua != 0
1414 			&& cdb[0] != INQUIRY) {
1415 
1416 			/* Pending UA, tell initiator */
1417 			/* Direction is always relative to the initator */
1418 			atio->ccb_h.flags &= ~CAM_DIR_MASK;
1419 			atio->ccb_h.flags |= CAM_DIR_NONE;
1420 			descr->data_resid = 0;
1421 			descr->data_increment = 0;
1422 			descr->timeout = 5 * 1000;
1423 			descr->status = SCSI_STATUS_CHECK_COND;
1424 			fill_sense(softc, atio->init_id,
1425 				   SSD_CURRENT_ERROR, SSD_KEY_UNIT_ATTENTION,
1426 				   0x29,
1427 				   istate->pending_ua == UA_POWER_ON ? 1 : 2);
1428 			set_ca_condition(periph, atio->init_id, CA_UNIT_ATTN);
1429 		} else {
1430 			/*
1431 			 * Save the current CA and UA status so
1432 			 * they can be used by this command.
1433 			 */
1434 			ua_types pending_ua;
1435 			ca_types pending_ca;
1436 
1437 			pending_ua = istate->pending_ua;
1438 			pending_ca = istate->pending_ca;
1439 
1440 			/*
1441 			 * As per the SCSI2 spec, any command that occurs
1442 			 * after a CA is reported, clears the CA.  We must
1443 			 * also clear the UA condition, if any, that caused
1444 			 * the CA to occur assuming the UA is not for a
1445 			 * persistant condition.
1446 			 */
1447 			istate->pending_ca = CA_NONE;
1448 			if (pending_ca == CA_UNIT_ATTN)
1449 				istate->pending_ua = UA_NONE;
1450 
1451 			/*
1452 			 * Determine the type of incoming command and
1453 			 * setup our buffer for a response.
1454 			 */
1455 			switch (cdb[0]) {
1456 			case INQUIRY:
1457 			{
1458 				struct scsi_inquiry *inq;
1459 				struct scsi_sense_data *sense;
1460 
1461 				inq = (struct scsi_inquiry *)cdb;
1462 				sense = &istate->sense_data;
1463 				CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1464 					  ("Saw an inquiry!\n"));
1465 				/*
1466 				 * Validate the command.  We don't
1467 				 * support any VPD pages, so complain
1468 				 * if EVPD is set.
1469 				 */
1470 				if ((inq->byte2 & SI_EVPD) != 0
1471 				 || inq->page_code != 0) {
1472 					atio->ccb_h.flags &= ~CAM_DIR_MASK;
1473 					atio->ccb_h.flags |= CAM_DIR_NONE;
1474 					descr->data_resid = 0;
1475 					descr->data_increment = 0;
1476 					descr->status = SCSI_STATUS_CHECK_COND;
1477 					fill_sense(softc, atio->init_id,
1478 						   SSD_CURRENT_ERROR,
1479 						   SSD_KEY_ILLEGAL_REQUEST,
1480 						   /*asc*/0x24, /*ascq*/0x00);
1481 					sense->extra_len =
1482 						offsetof(struct scsi_sense_data,
1483 							 extra_bytes)
1484 					      - offsetof(struct scsi_sense_data,
1485 							 extra_len);
1486 					set_ca_condition(periph, atio->init_id,
1487 							 CA_CMD_SENSE);
1488 				}
1489 
1490 				if ((inq->byte2 & SI_EVPD) != 0) {
1491 					sense->sense_key_spec[0] =
1492 					    SSD_SCS_VALID|SSD_FIELDPTR_CMD
1493 					   |SSD_BITPTR_VALID| /*bit value*/1;
1494 					sense->sense_key_spec[1] = 0;
1495 					sense->sense_key_spec[2] =
1496 					    offsetof(struct scsi_inquiry,
1497 						     byte2);
1498 				} else if (inq->page_code != 0) {
1499 					sense->sense_key_spec[0] =
1500 					    SSD_SCS_VALID|SSD_FIELDPTR_CMD;
1501 					sense->sense_key_spec[1] = 0;
1502 					sense->sense_key_spec[2] =
1503 					    offsetof(struct scsi_inquiry,
1504 						     page_code);
1505 				}
1506 				if (descr->status == SCSI_STATUS_CHECK_COND)
1507 					break;
1508 
1509 				/*
1510 				 * Direction is always relative
1511 				 * to the initator.
1512 				 */
1513 				atio->ccb_h.flags &= ~CAM_DIR_MASK;
1514 				atio->ccb_h.flags |= CAM_DIR_IN;
1515 				descr->data = softc->inq_data;
1516 				descr->data_resid = MIN(softc->inq_data_len,
1517 						       inq->length);
1518 				descr->data_increment = descr->data_resid;
1519 				descr->timeout = 5 * 1000;
1520 				descr->status = SCSI_STATUS_OK;
1521 				break;
1522 			}
1523 			case TEST_UNIT_READY:
1524 				atio->ccb_h.flags &= ~CAM_DIR_MASK;
1525 				atio->ccb_h.flags |= CAM_DIR_NONE;
1526 				descr->data_resid = 0;
1527 				descr->data_increment = 0;
1528 				descr->timeout = 5 * 1000;
1529 				descr->status = SCSI_STATUS_OK;
1530 				break;
1531 			case REQUEST_SENSE:
1532 			{
1533 				struct scsi_request_sense *rsense;
1534 				struct scsi_sense_data *sense;
1535 
1536 				rsense = (struct scsi_request_sense *)cdb;
1537 				sense = &istate->sense_data;
1538 				if (pending_ca == 0) {
1539 					fill_sense(softc, atio->init_id,
1540 						   SSD_CURRENT_ERROR,
1541 						   SSD_KEY_NO_SENSE, 0x00,
1542 						   0x00);
1543 					CAM_DEBUG(periph->path,
1544 						  CAM_DEBUG_PERIPH,
1545 						  ("No pending CA!\n"));
1546 				}
1547 				/*
1548 				 * Direction is always relative
1549 				 * to the initator.
1550 				 */
1551 				atio->ccb_h.flags &= ~CAM_DIR_MASK;
1552 				atio->ccb_h.flags |= CAM_DIR_IN;
1553 				descr->data = sense;
1554 				descr->data_resid =
1555 			 		offsetof(struct scsi_sense_data,
1556 						 extra_len)
1557 				      + sense->extra_len;
1558 				descr->data_resid = MIN(descr->data_resid,
1559 						       rsense->length);
1560 				descr->data_increment = descr->data_resid;
1561 				descr->timeout = 5 * 1000;
1562 				descr->status = SCSI_STATUS_OK;
1563 				break;
1564 			}
1565 			case RECEIVE:
1566 			case SEND:
1567 			{
1568 				struct scsi_send_receive *sr;
1569 
1570 				sr = (struct scsi_send_receive *)cdb;
1571 
1572 				/*
1573 				 * Direction is always relative
1574 				 * to the initator.
1575 				 */
1576 				atio->ccb_h.flags &= ~CAM_DIR_MASK;
1577 				descr->data_resid = scsi_3btoul(sr->xfer_len);
1578 				descr->timeout = 5 * 1000;
1579 				descr->status = SCSI_STATUS_OK;
1580 				if (cdb[0] == SEND) {
1581 					atio->ccb_h.flags |= CAM_DIR_OUT;
1582 					CAM_DEBUG(periph->path,
1583 						  CAM_DEBUG_PERIPH,
1584 						  ("Saw a SEND!\n"));
1585 					atio->ccb_h.flags |= CAM_DIR_OUT;
1586 					TAILQ_INSERT_TAIL(&softc->snd_ccb_queue,
1587 							  &atio->ccb_h,
1588 							  periph_links.tqe);
1589 					selwakeup(&softc->snd_select);
1590 				} else {
1591 					atio->ccb_h.flags |= CAM_DIR_IN;
1592 					CAM_DEBUG(periph->path,
1593 						  CAM_DEBUG_PERIPH,
1594 						  ("Saw a RECEIVE!\n"));
1595 					TAILQ_INSERT_TAIL(&softc->rcv_ccb_queue,
1596 							  &atio->ccb_h,
1597 							  periph_links.tqe);
1598 					selwakeup(&softc->rcv_select);
1599 				}
1600 				/*
1601 				 * Attempt to satisfy this request with
1602 				 * a user buffer.
1603 				 */
1604 				targrunqueue(periph, softc);
1605 				return;
1606 			}
1607 			default:
1608 				/*
1609 				 * Queue for consumption by our userland
1610 				 * counterpart and  transition to the exception
1611 				 * state.
1612 				 */
1613 				TAILQ_INSERT_TAIL(&softc->unknown_atio_queue,
1614 						  &atio->ccb_h,
1615 						  periph_links.tqe);
1616 				softc->exceptions |= TARG_EXCEPT_UNKNOWN_ATIO;
1617 				targfireexception(periph, softc);
1618 				return;
1619 			}
1620 		}
1621 
1622 		/* Queue us up to receive a Continue Target I/O ccb. */
1623 		TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h,
1624 				  periph_links.tqe);
1625 		xpt_schedule(periph, /*priority*/1);
1626 		break;
1627 	}
1628 	case XPT_CONT_TARGET_IO:
1629 	{
1630 		struct ccb_scsiio *csio;
1631 		struct ccb_accept_tio *atio;
1632 		struct targ_cmd_desc *desc;
1633 		struct buf *bp;
1634 		int    error;
1635 
1636 		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1637 			  ("Received completed CTIO\n"));
1638 		csio = &done_ccb->csio;
1639 		atio = (struct ccb_accept_tio*)done_ccb->ccb_h.ccb_atio;
1640 		desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
1641 
1642 		TAILQ_REMOVE(&softc->pending_queue, &done_ccb->ccb_h,
1643 			     periph_links.tqe);
1644 
1645 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1646 			printf("CCB with error %x\n", done_ccb->ccb_h.status);
1647 			error = targerror(done_ccb, 0, 0);
1648 			if (error == ERESTART)
1649 				break;
1650 			/*
1651 			 * Right now we don't need to do anything
1652 			 * prior to unfreezing the queue...
1653 			 */
1654 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1655 				printf("Releasing Queue\n");
1656 				cam_release_devq(done_ccb->ccb_h.path,
1657 						 /*relsim_flags*/0,
1658 						 /*reduction*/0,
1659 						 /*timeout*/0,
1660 						 /*getcount_only*/0);
1661 			}
1662 		} else
1663 			error = 0;
1664 
1665 		/*
1666 		 * If we shipped back sense data when completing
1667 		 * this command, clear the pending CA for it.
1668 		 */
1669 		if (done_ccb->ccb_h.status & CAM_SENT_SENSE) {
1670 			struct initiator_state *istate;
1671 
1672 			istate = &softc->istate[csio->init_id];
1673 			if (istate->pending_ca == CA_UNIT_ATTN)
1674 				istate->pending_ua = UA_NONE;
1675 			istate->pending_ca = CA_NONE;
1676 			softc->istate[csio->init_id].pending_ca = CA_NONE;
1677 			done_ccb->ccb_h.status &= ~CAM_SENT_SENSE;
1678 		}
1679 		done_ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1680 
1681 		desc->data_increment -= csio->resid;
1682 		desc->data_resid -= desc->data_increment;
1683 		if ((bp = desc->bp) != NULL) {
1684 
1685 			bp->b_resid -= desc->data_increment;
1686 			bp->b_error = error;
1687 
1688 			CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1689 				  ("Buffer I/O Completed - Resid %ld:%d\n",
1690 				   bp->b_resid, desc->data_resid));
1691 			/*
1692 			 * Send the buffer back to the client if
1693 			 * either the command has completed or all
1694 			 * buffer space has been consumed.
1695 			 */
1696 			if (desc->data_resid == 0
1697 			 || bp->b_resid == 0
1698 			 || error != 0) {
1699 				if (bp->b_resid != 0)
1700 					/* Short transfer */
1701 					bp->b_flags |= B_ERROR;
1702 
1703 				CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1704 					  ("Completing a buffer\n"));
1705 				biodone(bp);
1706 				desc->bp = NULL;
1707 			}
1708 		}
1709 
1710 		xpt_release_ccb(done_ccb);
1711 		if (softc->state != TARG_STATE_TEARDOWN) {
1712 
1713 			if (desc->data_resid == 0) {
1714 				/*
1715 				 * Send the original accept TIO back to the
1716 				 * controller to handle more work.
1717 				 */
1718 				CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1719 					  ("Returning ATIO to target\n"));
1720 				xpt_action((union ccb *)atio);
1721 				break;
1722 			}
1723 
1724 			/* Queue us up for another buffer */
1725 			if (atio->cdb_io.cdb_bytes[0] == SEND) {
1726 				if (desc->bp != NULL)
1727 					TAILQ_INSERT_HEAD(
1728 						&softc->snd_buf_queue.queue,
1729 						bp, b_act);
1730 				TAILQ_INSERT_HEAD(&softc->snd_ccb_queue,
1731 						  &atio->ccb_h,
1732 						  periph_links.tqe);
1733 			} else {
1734 				if (desc->bp != NULL)
1735 					TAILQ_INSERT_HEAD(
1736 						&softc->rcv_buf_queue.queue,
1737 						bp, b_act);
1738 				TAILQ_INSERT_HEAD(&softc->rcv_ccb_queue,
1739 						  &atio->ccb_h,
1740 						  periph_links.tqe);
1741 			}
1742 			desc->bp = NULL;
1743 			targrunqueue(periph, softc);
1744 		} else {
1745 			if (desc->bp != NULL) {
1746 				bp->b_flags |= B_ERROR;
1747 				bp->b_error = ENXIO;
1748 				biodone(bp);
1749 			}
1750 			freedescr(desc);
1751 			free(atio, M_DEVBUF);
1752 		}
1753 		break;
1754 	}
1755 	case XPT_IMMED_NOTIFY:
1756 	{
1757 		int frozen;
1758 
1759 		frozen = (done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0;
1760 		if (softc->state == TARG_STATE_TEARDOWN) {
1761 			SLIST_REMOVE(&softc->immed_notify_slist,
1762 				     &done_ccb->ccb_h, ccb_hdr,
1763 				     periph_links.sle);
1764 			free(done_ccb, M_DEVBUF);
1765 		} else if (done_ccb->ccb_h.status == CAM_REQ_ABORTED) {
1766 			free(done_ccb, M_DEVBUF);
1767 		} else {
1768 			printf("Saw event %x:%x\n", done_ccb->ccb_h.status,
1769 			       done_ccb->cin.message_args[0]);
1770 			/* Process error condition. */
1771 			targinoterror(periph, softc, &done_ccb->cin);
1772 
1773 			/* Requeue for another immediate event */
1774 			xpt_action(done_ccb);
1775 		}
1776 		if (frozen != 0)
1777 			cam_release_devq(periph->path,
1778 					 /*relsim_flags*/0,
1779 					 /*opening reduction*/0,
1780 					 /*timeout*/0,
1781 					 /*getcount_only*/0);
1782 		break;
1783 	}
1784 	default:
1785 		panic("targdone: Impossible xpt opcode %x encountered.",
1786 		      done_ccb->ccb_h.func_code);
1787 		/* NOTREACHED */
1788 		break;
1789 	}
1790 }
1791 
1792 /*
1793  * Transition to the exception state and notify our symbiotic
1794  * userland process of the change.
1795  */
1796 static void
1797 targfireexception(struct cam_periph *periph, struct targ_softc *softc)
1798 {
1799 	/*
1800 	 * return all pending buffers with short read/write status so our
1801 	 * process unblocks, and do a selwakeup on any process queued
1802 	 * waiting for reads or writes.  When the selwakeup is performed,
1803 	 * the waking process will wakeup, call our poll routine again,
1804 	 * and pick up the exception.
1805 	 */
1806 	struct buf *bp;
1807 
1808 	if (softc->state != TARG_STATE_NORMAL)
1809 		/* Already either tearing down or in exception state */
1810 		return;
1811 
1812 	softc->state = TARG_STATE_EXCEPTION;
1813 
1814 	while ((bp = bufq_first(&softc->snd_buf_queue)) != NULL) {
1815 		bufq_remove(&softc->snd_buf_queue, bp);
1816 		bp->b_flags |= B_ERROR;
1817 		biodone(bp);
1818 	}
1819 
1820 	while ((bp = bufq_first(&softc->rcv_buf_queue)) != NULL) {
1821 		bufq_remove(&softc->snd_buf_queue, bp);
1822 		bp->b_flags |= B_ERROR;
1823 		biodone(bp);
1824 	}
1825 
1826 	selwakeup(&softc->snd_select);
1827 	selwakeup(&softc->rcv_select);
1828 }
1829 
1830 static void
1831 targinoterror(struct cam_periph *periph, struct targ_softc *softc,
1832 	      struct ccb_immed_notify *inot)
1833 {
1834 	cam_status status;
1835 	int sense;
1836 
1837 	status = inot->ccb_h.status;
1838 	sense = (status & CAM_AUTOSNS_VALID) != 0;
1839 	status &= CAM_STATUS_MASK;
1840 	switch (status) {
1841 	case CAM_SCSI_BUS_RESET:
1842 		set_unit_attention_cond(periph, /*init_id*/CAM_TARGET_WILDCARD,
1843 					UA_BUS_RESET);
1844 		abort_pending_transactions(periph,
1845 					   /*init_id*/CAM_TARGET_WILDCARD,
1846 					   TARG_TAG_WILDCARD, EINTR,
1847 					   /*to_held_queue*/FALSE);
1848 		softc->exceptions |= TARG_EXCEPT_BUS_RESET_SEEN;
1849 		targfireexception(periph, softc);
1850 		break;
1851 	case CAM_BDR_SENT:
1852 		set_unit_attention_cond(periph, /*init_id*/CAM_TARGET_WILDCARD,
1853 					UA_BDR);
1854 		abort_pending_transactions(periph, CAM_TARGET_WILDCARD,
1855 					   TARG_TAG_WILDCARD, EINTR,
1856 					   /*to_held_queue*/FALSE);
1857 		softc->exceptions |= TARG_EXCEPT_BDR_RECEIVED;
1858 		targfireexception(periph, softc);
1859 		break;
1860 	case CAM_MESSAGE_RECV:
1861 		switch (inot->message_args[0]) {
1862 		case MSG_INITIATOR_DET_ERR:
1863 			break;
1864 		case MSG_ABORT:
1865 			break;
1866 		case MSG_BUS_DEV_RESET:
1867 			break;
1868 		case MSG_ABORT_TAG:
1869 			break;
1870 		case MSG_CLEAR_QUEUE:
1871 			break;
1872 		case MSG_TERM_IO_PROC:
1873 			break;
1874 		default:
1875 			break;
1876 		}
1877 		break;
1878 	default:
1879 		break;
1880 	}
1881 }
1882 
1883 static int
1884 targerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
1885 {
1886 	struct cam_periph *periph;
1887 	struct targ_softc *softc;
1888 	struct ccb_scsiio *csio;
1889 	struct initiator_state *istate;
1890 	cam_status status;
1891 	int frozen;
1892 	int sense;
1893 	int error;
1894 	int on_held_queue;
1895 
1896 	periph = xpt_path_periph(ccb->ccb_h.path);
1897 	softc = (struct targ_softc *)periph->softc;
1898 	status = ccb->ccb_h.status;
1899 	sense = (status & CAM_AUTOSNS_VALID) != 0;
1900 	frozen = (status & CAM_DEV_QFRZN) != 0;
1901 	status &= CAM_STATUS_MASK;
1902 	on_held_queue = FALSE;
1903 	csio = &ccb->csio;
1904 	istate = &softc->istate[csio->init_id];
1905 	switch (status) {
1906 	case CAM_REQ_ABORTED:
1907 		printf("Request Aborted!\n");
1908 		if ((ccb->ccb_h.ccb_flags & TARG_CCB_ABORT_TO_HELDQ) != 0) {
1909 
1910 			/*
1911 			 * Place this CCB into the initiators
1912 			 * 'held' queue until the pending CA is cleared.
1913 			 * If there is no CA pending, reissue immediately.
1914 			 */
1915 			if (istate->pending_ca == 0) {
1916 				ccb->ccb_h.ccb_flags = TARG_CCB_NONE;
1917 				xpt_action(ccb);
1918 			} else {
1919 				ccb->ccb_h.ccb_flags = TARG_CCB_HELDQ;
1920 				TAILQ_INSERT_TAIL(&softc->pending_queue,
1921 						  &ccb->ccb_h,
1922 						  periph_links.tqe);
1923 			}
1924 			/* The command will be retried at a later time. */
1925 			on_held_queue = TRUE;
1926 			error = ERESTART;
1927 			break;
1928 		}
1929 		/* FALLTHROUGH */
1930 	case CAM_SCSI_BUS_RESET:
1931 	case CAM_BDR_SENT:
1932 	case CAM_REQ_TERMIO:
1933 	case CAM_CMD_TIMEOUT:
1934 		/* Assume we did not send any data */
1935 		csio->resid = csio->dxfer_len;
1936 		error = EIO;
1937 		break;
1938 	case CAM_SEL_TIMEOUT:
1939 		if (ccb->ccb_h.retry_count > 0) {
1940 			ccb->ccb_h.retry_count--;
1941 			error = ERESTART;
1942 		} else {
1943 			/* "Select or reselect failure" */
1944 			csio->resid = csio->dxfer_len;
1945 			fill_sense(softc, csio->init_id, SSD_CURRENT_ERROR,
1946 				   SSD_KEY_HARDWARE_ERROR, 0x45, 0x00);
1947 			set_ca_condition(periph, csio->init_id, CA_CMD_SENSE);
1948 			error = EIO;
1949 		}
1950 		break;
1951 	case CAM_UNCOR_PARITY:
1952 		/* "SCSI parity error" */
1953 		fill_sense(softc, csio->init_id, SSD_CURRENT_ERROR,
1954 			   SSD_KEY_HARDWARE_ERROR, 0x47, 0x00);
1955 		set_ca_condition(periph, csio->init_id,
1956 					       CA_CMD_SENSE);
1957 		csio->resid = csio->dxfer_len;
1958 		error = EIO;
1959 		break;
1960 	case CAM_NO_HBA:
1961 		csio->resid = csio->dxfer_len;
1962 		error = ENXIO;
1963 		break;
1964 	case CAM_SEQUENCE_FAIL:
1965 		if (sense != 0) {
1966 			copy_sense(softc, istate, (u_int8_t *)&csio->sense_data,
1967 				   csio->sense_len);
1968 			set_ca_condition(periph,
1969 						       csio->init_id,
1970 						       CA_CMD_SENSE);
1971 		}
1972 		csio->resid = csio->dxfer_len;
1973 		error = EIO;
1974 		break;
1975 	case CAM_IDE:
1976 		/* "Initiator detected error message received" */
1977 		fill_sense(softc, csio->init_id, SSD_CURRENT_ERROR,
1978 			   SSD_KEY_HARDWARE_ERROR, 0x48, 0x00);
1979 		set_ca_condition(periph, csio->init_id,
1980 					       CA_CMD_SENSE);
1981 		csio->resid = csio->dxfer_len;
1982 		error = EIO;
1983 		break;
1984 	case CAM_REQUEUE_REQ:
1985 		printf("Requeue Request!\n");
1986 		error = ERESTART;
1987 		break;
1988 	default:
1989 		csio->resid = csio->dxfer_len;
1990 		error = EIO;
1991 		panic("targerror: Unexpected status %x encounterd", status);
1992 		/* NOTREACHED */
1993 	}
1994 
1995 	if (error == ERESTART || error == 0) {
1996 		/* Clear the QFRZN flag as we will release the queue */
1997 		if (frozen != 0)
1998 			ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1999 
2000 		if (error == ERESTART && !on_held_queue)
2001 			xpt_action(ccb);
2002 
2003 		if (frozen != 0)
2004 			cam_release_devq(ccb->ccb_h.path,
2005 					 /*relsim_flags*/0,
2006 					 /*opening reduction*/0,
2007 					 /*timeout*/0,
2008 					 /*getcount_only*/0);
2009 	}
2010 	return (error);
2011 }
2012 
2013 static struct targ_cmd_desc*
2014 allocdescr()
2015 {
2016 	struct targ_cmd_desc* descr;
2017 
2018 	/* Allocate the targ_descr structure */
2019 	descr = (struct targ_cmd_desc *)malloc(sizeof(*descr),
2020 					       M_DEVBUF, M_NOWAIT);
2021 	if (descr == NULL)
2022 		return (NULL);
2023 
2024 	bzero(descr, sizeof(*descr));
2025 
2026 	/* Allocate buffer backing store */
2027 	descr->backing_store = malloc(MAX_BUF_SIZE, M_DEVBUF, M_NOWAIT);
2028 	if (descr->backing_store == NULL) {
2029 		free(descr, M_DEVBUF);
2030 		return (NULL);
2031 	}
2032 	descr->max_size = MAX_BUF_SIZE;
2033 	return (descr);
2034 }
2035 
2036 static void
2037 freedescr(struct targ_cmd_desc *descr)
2038 {
2039 	free(descr->backing_store, M_DEVBUF);
2040 	free(descr, M_DEVBUF);
2041 }
2042 
2043 static void
2044 fill_sense(struct targ_softc *softc, u_int initiator_id, u_int error_code,
2045 	   u_int sense_key, u_int asc, u_int ascq)
2046 {
2047 	struct initiator_state *istate;
2048 	struct scsi_sense_data *sense;
2049 
2050 	istate = &softc->istate[initiator_id];
2051 	sense = &istate->sense_data;
2052 	bzero(sense, sizeof(*sense));
2053 	sense->error_code = error_code;
2054 	sense->flags = sense_key;
2055 	sense->add_sense_code = asc;
2056 	sense->add_sense_code_qual = ascq;
2057 
2058 	sense->extra_len = offsetof(struct scsi_sense_data, fru)
2059 			 - offsetof(struct scsi_sense_data, extra_len);
2060 }
2061 
2062 static void
2063 copy_sense(struct targ_softc *softc, struct initiator_state *istate,
2064 	   u_int8_t *sense_buffer, size_t sense_len)
2065 {
2066 	struct scsi_sense_data *sense;
2067 	size_t copylen;
2068 
2069 	sense = &istate->sense_data;
2070 	copylen = sizeof(*sense);
2071 	if (copylen > sense_len)
2072 		copylen = sense_len;
2073 	bcopy(sense_buffer, sense, copylen);
2074 }
2075 
2076 static void
2077 set_unit_attention_cond(struct cam_periph *periph,
2078 			u_int initiator_id, ua_types ua)
2079 {
2080 	int start;
2081 	int end;
2082 	struct targ_softc *softc;
2083 
2084 	softc = (struct targ_softc *)periph->softc;
2085 	if (initiator_id == CAM_TARGET_WILDCARD) {
2086 		start = 0;
2087 		end = MAX_INITIATORS - 1;
2088 	} else
2089 		start = end = initiator_id;
2090 
2091 	while (start <= end) {
2092 		softc->istate[start].pending_ua = ua;
2093 		start++;
2094 	}
2095 }
2096 
2097 static void
2098 set_ca_condition(struct cam_periph *periph, u_int initiator_id, ca_types ca)
2099 {
2100 	struct targ_softc *softc;
2101 
2102 	softc = (struct targ_softc *)periph->softc;
2103 	softc->istate[initiator_id].pending_ca = ca;
2104 	abort_pending_transactions(periph, initiator_id, TARG_TAG_WILDCARD,
2105 				   /* errno */0, /*to_held_queue*/TRUE);
2106 }
2107 
2108 static void
2109 abort_pending_transactions(struct cam_periph *periph, u_int initiator_id,
2110 			   u_int tag_id, int errno, int to_held_queue)
2111 {
2112 	struct ccb_abort cab;
2113 	struct ccb_queue *atio_queues[3];
2114 	struct targ_softc *softc;
2115 	struct ccb_hdr *ccbh;
2116 	u_int i;
2117 
2118 	softc = (struct targ_softc *)periph->softc;
2119 
2120 	atio_queues[0] = &softc->work_queue;
2121 	atio_queues[1] = &softc->snd_ccb_queue;
2122 	atio_queues[2] = &softc->rcv_ccb_queue;
2123 
2124 	/* First address the ATIOs awaiting resources */
2125 	for (i = 0; i < (sizeof(atio_queues) / sizeof(*atio_queues)); i++) {
2126 		struct ccb_queue *atio_queue;
2127 
2128 		if (to_held_queue) {
2129 			/*
2130 			 * The device queue is frozen anyway, so there
2131 			 * is nothing for us to do.
2132 			 */
2133 			continue;
2134 		}
2135 		atio_queue = atio_queues[i];
2136 		ccbh = TAILQ_FIRST(atio_queue);
2137 		while (ccbh != NULL) {
2138 			struct ccb_accept_tio *atio;
2139 			struct targ_cmd_desc *desc;
2140 
2141 			atio = (struct ccb_accept_tio *)ccbh;
2142 			desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
2143 			ccbh = TAILQ_NEXT(ccbh, periph_links.tqe);
2144 
2145 			/* Only abort the CCBs that match */
2146 			if ((atio->init_id != initiator_id
2147 			  && initiator_id != CAM_TARGET_WILDCARD)
2148 			 || (tag_id != TARG_TAG_WILDCARD
2149 			  && ((atio->ccb_h.flags & CAM_TAG_ACTION_VALID) == 0
2150 			   || atio->tag_id != tag_id)))
2151 				continue;
2152 
2153 			TAILQ_REMOVE(atio_queue, &atio->ccb_h,
2154 				     periph_links.tqe);
2155 
2156 			CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
2157 				  ("Aborting ATIO\n"));
2158 			if (desc->bp != NULL) {
2159 				desc->bp->b_flags |= B_ERROR;
2160 				if (softc->state != TARG_STATE_TEARDOWN)
2161 					desc->bp->b_error = errno;
2162 				else
2163 					desc->bp->b_error = ENXIO;
2164 				biodone(desc->bp);
2165 				desc->bp = NULL;
2166 			}
2167 			if (softc->state == TARG_STATE_TEARDOWN) {
2168 				freedescr(desc);
2169 				free(atio, M_DEVBUF);
2170 			} else {
2171 				/* Return the ATIO back to the controller */
2172 				xpt_action((union ccb *)atio);
2173 			}
2174 		}
2175 	}
2176 
2177 	ccbh = TAILQ_FIRST(&softc->pending_queue);
2178 	while (ccbh != NULL) {
2179 		struct ccb_scsiio *csio;
2180 
2181 		csio = (struct ccb_scsiio *)ccbh;
2182 		ccbh = TAILQ_NEXT(ccbh, periph_links.tqe);
2183 
2184 		/* Only abort the CCBs that match */
2185 		if ((csio->init_id != initiator_id
2186 		  && initiator_id != CAM_TARGET_WILDCARD)
2187 		 || (tag_id != TARG_TAG_WILDCARD
2188 		  && ((csio->ccb_h.flags & CAM_TAG_ACTION_VALID) == 0
2189 		   || csio->tag_id != tag_id)))
2190 			continue;
2191 
2192 		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
2193 			  ("Aborting CTIO\n"));
2194 
2195 		TAILQ_REMOVE(&softc->work_queue, &csio->ccb_h,
2196 			     periph_links.tqe);
2197 
2198 		if (to_held_queue != 0)
2199 			csio->ccb_h.ccb_flags |= TARG_CCB_ABORT_TO_HELDQ;
2200 		xpt_setup_ccb(&cab.ccb_h, csio->ccb_h.path, /*priority*/1);
2201 		cab.abort_ccb = (union ccb *)csio;
2202 		xpt_action((union ccb *)&cab);
2203 		if (cab.ccb_h.status != CAM_REQ_CMP) {
2204 			xpt_print_path(cab.ccb_h.path);
2205 			printf("Unable to abort CCB.  Status %x\n",
2206 			       cab.ccb_h.status);
2207 		}
2208 	}
2209 }
2210