xref: /freebsd/sys/cam/scsi/scsi_target.c (revision 5129159789cc9d7bc514e4546b88e3427695002d)
1 /*
2  * Implementation of a simple Target Mode SCSI Proccessor Target driver for CAM.
3  *
4  * Copyright (c) 1998, 1999 Justin T. Gibbs.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification, immediately at the beginning of the file.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 #include <stddef.h>	/* For offsetof */
31 
32 #include <sys/param.h>
33 #include <sys/queue.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/types.h>
37 #include <sys/buf.h>
38 #include <sys/conf.h>
39 #include <sys/devicestat.h>
40 #include <sys/malloc.h>
41 #include <sys/poll.h>
42 #include <sys/select.h>	/* For struct selinfo. */
43 #include <sys/uio.h>
44 
45 #include <cam/cam.h>
46 #include <cam/cam_ccb.h>
47 #include <cam/cam_extend.h>
48 #include <cam/cam_periph.h>
49 #include <cam/cam_queue.h>
50 #include <cam/cam_xpt_periph.h>
51 #include <cam/cam_debug.h>
52 
53 #include <cam/scsi/scsi_all.h>
54 #include <cam/scsi/scsi_pt.h>
55 #include <cam/scsi/scsi_targetio.h>
56 #include <cam/scsi/scsi_message.h>
57 
58 typedef enum {
59 	TARG_STATE_NORMAL,
60 	TARG_STATE_EXCEPTION,
61 	TARG_STATE_TEARDOWN
62 } targ_state;
63 
64 typedef enum {
65 	TARG_FLAG_NONE		 = 0x00,
66 	TARG_FLAG_SEND_EOF	 = 0x01,
67 	TARG_FLAG_RECEIVE_EOF	 = 0x02,
68 	TARG_FLAG_LUN_ENABLED	 = 0x04
69 } targ_flags;
70 
71 typedef enum {
72 	TARG_CCB_NONE		= 0x00,
73 	TARG_CCB_WAITING	= 0x01,
74 	TARG_CCB_HELDQ		= 0x02,
75 	TARG_CCB_ABORT_TO_HELDQ = 0x04
76 } targ_ccb_flags;
77 
78 #define MAX_ACCEPT	16
79 #define MAX_IMMEDIATE	16
80 #define MAX_BUF_SIZE	256	/* Max inquiry/sense/mode page transfer */
81 #define MAX_INITIATORS	16	/* XXX More for Fibre-Channel */
82 
83 #define MIN(a, b) ((a > b) ? b : a)
84 
85 #define TARG_CONTROL_UNIT 0xffff00ff
86 #define TARG_IS_CONTROL_DEV(unit) ((unit) == TARG_CONTROL_UNIT)
87 
88 #define TARG_TAG_WILDCARD ((u_int)~0)
89 
90 /* Offsets into our private CCB area for storing accept information */
91 #define ccb_flags	ppriv_field0
92 #define ccb_descr	ppriv_ptr1
93 
94 /* We stick a pointer to the originating accept TIO in each continue I/O CCB */
95 #define ccb_atio	ppriv_ptr1
96 
97 struct targ_softc {
98 	/* CTIOs pending on the controller */
99 	struct		ccb_queue pending_queue;
100 
101 	/* ATIOs awaiting CTIO resources from the XPT */
102 	struct		ccb_queue work_queue;
103 
104 	/*
105 	 * ATIOs for SEND operations waiting for 'write'
106 	 * buffer resources from our userland daemon.
107 	 */
108 	struct		ccb_queue snd_ccb_queue;
109 
110 	/*
111 	 * ATIOs for RCV operations waiting for 'read'
112 	 * buffer resources from our userland daemon.
113 	 */
114 	struct		ccb_queue rcv_ccb_queue;
115 
116 	/*
117 	 * ATIOs for commands unknown to the kernel driver.
118 	 * These are queued for the userland daemon to
119 	 * consume.
120 	 */
121 	struct		ccb_queue unknown_atio_queue;
122 
123 	/*
124 	 * Userland buffers for SEND commands waiting for
125 	 * SEND ATIOs to be queued by an initiator.
126 	 */
127 	struct		buf_queue_head snd_buf_queue;
128 
129 	/*
130 	 * Userland buffers for RCV commands waiting for
131 	 * RCV ATIOs to be queued by an initiator.
132 	 */
133 	struct		buf_queue_head rcv_buf_queue;
134 	struct		devstat device_stats;
135 	dev_t		targ_dev;
136 	dev_t		ctl_dev;
137 	struct		selinfo snd_select;
138 	struct		selinfo rcv_select;
139 	targ_state	state;
140 	targ_flags	flags;
141 	targ_exception	exceptions;
142 	u_int		init_level;
143 	u_int		inq_data_len;
144 	struct		scsi_inquiry_data *inq_data;
145 	struct		ccb_accept_tio *accept_tio_list;
146 	struct		ccb_hdr_slist immed_notify_slist;
147 	struct		initiator_state istate[MAX_INITIATORS];
148 };
149 
150 struct targ_cmd_desc {
151 	struct	  ccb_accept_tio* atio_link;
152 	u_int	  data_resid;	/* How much left to transfer */
153 	u_int	  data_increment;/* Amount to send before next disconnect */
154 	void*	  data;		/* The data. Can be from backing_store or not */
155 	void*	  backing_store;/* Backing store allocated for this descriptor*/
156 	struct	  buf *bp;	/* Buffer for this transfer */
157 	u_int	  max_size;	/* Size of backing_store */
158 	u_int32_t timeout;
159 	u_int8_t  status;	/* Status to return to initiator */
160 };
161 
162 static	d_open_t	targopen;
163 static	d_close_t	targclose;
164 static	d_read_t	targread;
165 static	d_write_t	targwrite;
166 static	d_ioctl_t	targioctl;
167 static	d_poll_t	targpoll;
168 static	d_strategy_t	targstrategy;
169 
170 #define TARG_CDEV_MAJOR	65
171 static struct cdevsw targ_cdevsw = {
172 	/* open */	targopen,
173 	/* close */	targclose,
174 	/* read */	targread,
175 	/* write */	targwrite,
176 	/* ioctl */	targioctl,
177 	/* poll */	targpoll,
178 	/* mmap */	nommap,
179 	/* strategy */	targstrategy,
180 	/* name */	"targ",
181 	/* maj */	TARG_CDEV_MAJOR,
182 	/* dump */	nodump,
183 	/* psize */	nopsize,
184 	/* flags */	0,
185 	/* bmaj */	-1
186 };
187 
188 static int		targsendccb(struct cam_periph *periph, union ccb *ccb,
189 				    union ccb *inccb);
190 static periph_init_t	targinit;
191 static void		targasync(void *callback_arg, u_int32_t code,
192 				struct cam_path *path, void *arg);
193 static int		targallocinstance(struct ioc_alloc_unit *alloc_unit);
194 static int		targfreeinstance(struct ioc_alloc_unit *alloc_unit);
195 static cam_status	targenlun(struct cam_periph *periph);
196 static cam_status	targdislun(struct cam_periph *periph);
197 static periph_ctor_t	targctor;
198 static periph_dtor_t	targdtor;
199 static void		targrunqueue(struct cam_periph *periph,
200 				     struct targ_softc *softc);
201 static periph_start_t	targstart;
202 static void		targdone(struct cam_periph *periph,
203 				 union ccb *done_ccb);
204 static void		targfireexception(struct cam_periph *periph,
205 					  struct targ_softc *softc);
206 static void		targinoterror(struct cam_periph *periph,
207 				      struct targ_softc *softc,
208 				      struct ccb_immed_notify *inot);
209 static  int		targerror(union ccb *ccb, u_int32_t cam_flags,
210 				  u_int32_t sense_flags);
211 static struct targ_cmd_desc*	allocdescr(void);
212 static void		freedescr(struct targ_cmd_desc *buf);
213 static void		fill_sense(struct targ_softc *softc,
214 				   u_int initiator_id, u_int error_code,
215 				   u_int sense_key, u_int asc, u_int ascq);
216 static void		copy_sense(struct targ_softc *softc,
217 				   struct ccb_scsiio *csio);
218 static void	set_unit_attention_cond(struct cam_periph *periph,
219 					u_int initiator_id, ua_types ua);
220 static void	set_contingent_allegiance_cond(struct cam_periph *periph,
221 					       u_int initiator_id, ca_types ca);
222 static void	abort_pending_transactions(struct cam_periph *periph,
223 					   u_int initiator_id, u_int tag_id,
224 					   int errno, int to_held_queue);
225 
226 static struct periph_driver targdriver =
227 {
228 	targinit, "targ",
229 	TAILQ_HEAD_INITIALIZER(targdriver.units), /* generation */ 0
230 };
231 
232 DATA_SET(periphdriver_set, targdriver);
233 
234 static struct extend_array *targperiphs;
235 
236 static void
237 targinit(void)
238 {
239 
240 	/*
241 	 * Create our extend array for storing the devices we attach to.
242 	 */
243 	targperiphs = cam_extend_new();
244 	if (targperiphs == NULL) {
245 		printf("targ: Failed to alloc extend array!\n");
246 		return;
247 	}
248 }
249 
250 static void
251 targasync(void *callback_arg, u_int32_t code,
252 	  struct cam_path *path, void *arg)
253 {
254 	struct cam_periph *periph;
255 	struct targ_softc *softc;
256 
257 	periph = (struct cam_periph *)callback_arg;
258 	softc = (struct targ_softc *)periph->softc;
259 	switch (code) {
260 	case AC_PATH_DEREGISTERED:
261 	{
262 		/* XXX Implement */
263 		break;
264 	}
265 	default:
266 		break;
267 	}
268 }
269 
270 /* Attempt to enable our lun */
271 static cam_status
272 targenlun(struct cam_periph *periph)
273 {
274 	union ccb immed_ccb;
275 	struct targ_softc *softc;
276 	cam_status status;
277 	int i;
278 
279 	softc = (struct targ_softc *)periph->softc;
280 
281 	if ((softc->flags & TARG_FLAG_LUN_ENABLED) != 0)
282 		return (CAM_REQ_CMP);
283 
284 	xpt_setup_ccb(&immed_ccb.ccb_h, periph->path, /*priority*/1);
285 	immed_ccb.ccb_h.func_code = XPT_EN_LUN;
286 
287 	/* Don't need support for any vendor specific commands */
288 	immed_ccb.cel.grp6_len = 0;
289 	immed_ccb.cel.grp7_len = 0;
290 	immed_ccb.cel.enable = 1;
291 	xpt_action(&immed_ccb);
292 	status = immed_ccb.ccb_h.status;
293 	if (status != CAM_REQ_CMP) {
294 		xpt_print_path(periph->path);
295 		printf("targenlun - Enable Lun Rejected for status 0x%x\n",
296 		       status);
297 		return (status);
298 	}
299 
300 	softc->flags |= TARG_FLAG_LUN_ENABLED;
301 
302 	/*
303 	 * Build up a buffer of accept target I/O
304 	 * operations for incoming selections.
305 	 */
306 	for (i = 0; i < MAX_ACCEPT; i++) {
307 		struct ccb_accept_tio *atio;
308 
309 		atio = (struct ccb_accept_tio*)malloc(sizeof(*atio), M_DEVBUF,
310 						      M_NOWAIT);
311 		if (atio == NULL) {
312 			status = CAM_RESRC_UNAVAIL;
313 			break;
314 		}
315 
316 		atio->ccb_h.ccb_descr = allocdescr();
317 
318 		if (atio->ccb_h.ccb_descr == NULL) {
319 			free(atio, M_DEVBUF);
320 			status = CAM_RESRC_UNAVAIL;
321 			break;
322 		}
323 
324 		xpt_setup_ccb(&atio->ccb_h, periph->path, /*priority*/1);
325 		atio->ccb_h.func_code = XPT_ACCEPT_TARGET_IO;
326 		atio->ccb_h.cbfcnp = targdone;
327 		xpt_action((union ccb *)atio);
328 		status = atio->ccb_h.status;
329 		if (status != CAM_REQ_INPROG) {
330 			xpt_print_path(periph->path);
331 			printf("Queue of atio failed\n");
332 			freedescr(atio->ccb_h.ccb_descr);
333 			free(atio, M_DEVBUF);
334 			break;
335 		}
336 		((struct targ_cmd_desc*)atio->ccb_h.ccb_descr)->atio_link =
337 		    softc->accept_tio_list;
338 		softc->accept_tio_list = atio;
339 	}
340 
341 	if (i == 0) {
342 		xpt_print_path(periph->path);
343 		printf("targenlun - Could not allocate accept tio CCBs: "
344 		       "status = 0x%x\n", status);
345 		targdislun(periph);
346 		return (CAM_REQ_CMP_ERR);
347 	}
348 
349 	/*
350 	 * Build up a buffer of immediate notify CCBs
351 	 * so the SIM can tell us of asynchronous target mode events.
352 	 */
353 	for (i = 0; i < MAX_ACCEPT; i++) {
354 		struct ccb_immed_notify *inot;
355 
356 		inot = (struct ccb_immed_notify*)malloc(sizeof(*inot), M_DEVBUF,
357 						        M_NOWAIT);
358 
359 		if (inot == NULL) {
360 			status = CAM_RESRC_UNAVAIL;
361 			break;
362 		}
363 
364 		xpt_setup_ccb(&inot->ccb_h, periph->path, /*priority*/1);
365 		inot->ccb_h.func_code = XPT_IMMED_NOTIFY;
366 		inot->ccb_h.cbfcnp = targdone;
367 		SLIST_INSERT_HEAD(&softc->immed_notify_slist, &inot->ccb_h,
368 				  periph_links.sle);
369 		xpt_action((union ccb *)inot);
370 	}
371 
372 	if (i == 0) {
373 		xpt_print_path(periph->path);
374 		printf("targenlun - Could not allocate immediate notify CCBs: "
375 		       "status = 0x%x\n", status);
376 		targdislun(periph);
377 		return (CAM_REQ_CMP_ERR);
378 	}
379 
380 	return (CAM_REQ_CMP);
381 }
382 
383 static cam_status
384 targdislun(struct cam_periph *periph)
385 {
386 	union ccb ccb;
387 	struct targ_softc *softc;
388 	struct ccb_accept_tio* atio;
389 	struct ccb_hdr *ccb_h;
390 
391 	softc = (struct targ_softc *)periph->softc;
392 	if ((softc->flags & TARG_FLAG_LUN_ENABLED) == 0)
393 		return CAM_REQ_CMP;
394 
395 	/* XXX Block for Continue I/O completion */
396 
397 	/* Kill off all ACCECPT and IMMEDIATE CCBs */
398 	while ((atio = softc->accept_tio_list) != NULL) {
399 
400 		softc->accept_tio_list =
401 		    ((struct targ_cmd_desc*)atio->ccb_h.ccb_descr)->atio_link;
402 		xpt_setup_ccb(&ccb.cab.ccb_h, periph->path, /*priority*/1);
403 		ccb.cab.ccb_h.func_code = XPT_ABORT;
404 		ccb.cab.abort_ccb = (union ccb *)atio;
405 		xpt_action(&ccb);
406 	}
407 
408 	while ((ccb_h = SLIST_FIRST(&softc->immed_notify_slist)) != NULL) {
409 		SLIST_REMOVE_HEAD(&softc->immed_notify_slist, periph_links.sle);
410 		xpt_setup_ccb(&ccb.cab.ccb_h, periph->path, /*priority*/1);
411 		ccb.cab.ccb_h.func_code = XPT_ABORT;
412 		ccb.cab.abort_ccb = (union ccb *)ccb_h;
413 		xpt_action(&ccb);
414 	}
415 
416 	/*
417 	 * Dissable this lun.
418 	 */
419 	xpt_setup_ccb(&ccb.cel.ccb_h, periph->path, /*priority*/1);
420 	ccb.cel.ccb_h.func_code = XPT_EN_LUN;
421 	ccb.cel.enable = 0;
422 	xpt_action(&ccb);
423 
424 	if (ccb.cel.ccb_h.status != CAM_REQ_CMP)
425 		printf("targdislun - Disabling lun on controller failed "
426 		       "with status 0x%x\n", ccb.cel.ccb_h.status);
427 	else
428 		softc->flags &= ~TARG_FLAG_LUN_ENABLED;
429 	return (ccb.cel.ccb_h.status);
430 }
431 
432 static cam_status
433 targctor(struct cam_periph *periph, void *arg)
434 {
435 	struct ccb_pathinq *cpi;
436 	struct targ_softc *softc;
437 	int i;
438 
439 	cpi = (struct ccb_pathinq *)arg;
440 
441 	/* Allocate our per-instance private storage */
442 	softc = (struct targ_softc *)malloc(sizeof(*softc), M_DEVBUF, M_NOWAIT);
443 	if (softc == NULL) {
444 		printf("targctor: unable to malloc softc\n");
445 		return (CAM_REQ_CMP_ERR);
446 	}
447 
448 	bzero(softc, sizeof(*softc));
449 	TAILQ_INIT(&softc->pending_queue);
450 	TAILQ_INIT(&softc->work_queue);
451 	TAILQ_INIT(&softc->snd_ccb_queue);
452 	TAILQ_INIT(&softc->rcv_ccb_queue);
453 	TAILQ_INIT(&softc->unknown_atio_queue);
454 	bufq_init(&softc->snd_buf_queue);
455 	bufq_init(&softc->rcv_buf_queue);
456 	softc->accept_tio_list = NULL;
457 	SLIST_INIT(&softc->immed_notify_slist);
458 	softc->state = TARG_STATE_NORMAL;
459 	periph->softc = softc;
460 	softc->init_level++;
461 
462 	cam_extend_set(targperiphs, periph->unit_number, periph);
463 
464 	/*
465 	 * We start out life with a UA to indicate power-on/reset.
466 	 */
467 	for (i = 0; i < MAX_INITIATORS; i++)
468 		softc->istate[i].pending_ua = UA_POWER_ON;
469 
470 	/*
471 	 * Allocate an initial inquiry data buffer.  We might allow the
472 	 * user to override this later via an ioctl.
473 	 */
474 	softc->inq_data_len = sizeof(*softc->inq_data);
475 	softc->inq_data = malloc(softc->inq_data_len, M_DEVBUF, M_NOWAIT);
476 	if (softc->inq_data == NULL) {
477 		printf("targctor - Unable to malloc inquiry data\n");
478 		targdtor(periph);
479 		return (CAM_RESRC_UNAVAIL);
480 	}
481 	bzero(softc->inq_data, softc->inq_data_len);
482 	softc->inq_data->device = T_PROCESSOR | (SID_QUAL_LU_CONNECTED << 5);
483 	softc->inq_data->version = 2;
484 	softc->inq_data->response_format = 2; /* SCSI2 Inquiry Format */
485 	softc->inq_data->flags =
486 	    cpi->hba_inquiry & (PI_SDTR_ABLE|PI_WIDE_16|PI_WIDE_32);
487 	softc->inq_data->additional_length = softc->inq_data_len - 4;
488 	strncpy(softc->inq_data->vendor, "FreeBSD ", SID_VENDOR_SIZE);
489 	strncpy(softc->inq_data->product, "TM-PT           ", SID_PRODUCT_SIZE);
490 	strncpy(softc->inq_data->revision, "0.0 ", SID_REVISION_SIZE);
491 	softc->targ_dev = make_dev(&targ_cdevsw, periph->unit_number, UID_ROOT,
492 				   GID_OPERATOR, 0600, "%s%d",
493 				   periph->periph_name, periph->unit_number);
494 	softc->ctl_dev = make_dev(&targ_cdevsw, TARG_CONTROL_UNIT, UID_ROOT,
495 				   GID_OPERATOR, 0600, "%s%d.ctl",
496 				   periph->periph_name, periph->unit_number);
497 
498 	softc->init_level++;
499 	return (CAM_REQ_CMP);
500 }
501 
502 static void
503 targdtor(struct cam_periph *periph)
504 {
505 	struct targ_softc *softc;
506 
507 	softc = (struct targ_softc *)periph->softc;
508 
509 	softc->state = TARG_STATE_TEARDOWN;
510 
511 	targdislun(periph);
512 
513 	cam_extend_release(targperiphs, periph->unit_number);
514 
515 	switch (softc->init_level) {
516 	default:
517 		/* FALLTHROUGH */
518 	case 2:
519 		free(softc->inq_data, M_DEVBUF);
520 		destroy_dev(softc->targ_dev);
521 		destroy_dev(softc->ctl_dev);
522 		/* FALLTHROUGH */
523 	case 1:
524 		free(softc, M_DEVBUF);
525 		break;
526 	case 0:
527 		panic("targdtor - impossible init level");;
528 	}
529 }
530 
531 static int
532 targopen(dev_t dev, int flags, int fmt, struct proc *p)
533 {
534 	struct cam_periph *periph;
535 	struct	targ_softc *softc;
536 	u_int unit;
537 	cam_status status;
538 	int error;
539 	int s;
540 
541 	unit = minor(dev);
542 
543 	/* An open of the control device always succeeds */
544 	if (TARG_IS_CONTROL_DEV(unit))
545 		return 0;
546 
547 	s = splsoftcam();
548 	periph = cam_extend_get(targperiphs, unit);
549 	if (periph == NULL) {
550 		return (ENXIO);
551         	splx(s);
552 	}
553 	if ((error = cam_periph_lock(periph, PRIBIO | PCATCH)) != 0) {
554 		splx(s);
555 		return (error);
556 	}
557 
558 	softc = (struct targ_softc *)periph->softc;
559 	if ((softc->flags & TARG_FLAG_LUN_ENABLED) == 0) {
560 		if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
561 			splx(s);
562 			cam_periph_unlock(periph);
563 			return(ENXIO);
564 		}
565 	}
566         splx(s);
567 
568 	status = targenlun(periph);
569 	switch (status) {
570 	case CAM_REQ_CMP:
571 		error = 0;
572 		break;
573 	case CAM_RESRC_UNAVAIL:
574 		error = ENOMEM;
575 		break;
576 	case CAM_LUN_ALRDY_ENA:
577 		error = EADDRINUSE;
578 		break;
579 	default:
580 		error = ENXIO;
581 		break;
582 	}
583         cam_periph_unlock(periph);
584 	return (error);
585 }
586 
587 static int
588 targclose(dev_t dev, int flag, int fmt, struct proc *p)
589 {
590 	struct	cam_periph *periph;
591 	struct	targ_softc *softc;
592 	u_int	unit;
593 	int	s;
594 	int	error;
595 
596 	unit = minor(dev);
597 
598 	/* A close of the control device always succeeds */
599 	if (TARG_IS_CONTROL_DEV(unit))
600 		return 0;
601 
602 	s = splsoftcam();
603 	periph = cam_extend_get(targperiphs, unit);
604 	if (periph == NULL) {
605 		splx(s);
606 		return (ENXIO);
607 	}
608 	if ((error = cam_periph_lock(periph, PRIBIO)) != 0)
609 		return (error);
610 	softc = (struct targ_softc *)periph->softc;
611 	splx(s);
612 
613 	targdislun(periph);
614 
615 	cam_periph_unlock(periph);
616 	cam_periph_release(periph);
617 
618 	return (0);
619 }
620 
621 static int
622 targallocinstance(struct ioc_alloc_unit *alloc_unit)
623 {
624 	struct ccb_pathinq cpi;
625 	struct cam_path *path;
626 	struct cam_periph *periph;
627 	cam_status status;
628 	int free_path_on_return;
629 	int error;
630 
631 	free_path_on_return = 0;
632 	status = xpt_create_path(&path, /*periph*/NULL,
633 				 alloc_unit->path_id,
634 				 alloc_unit->target_id,
635 				 alloc_unit->lun_id);
636 	if (status != CAM_REQ_CMP) {
637 		printf("Couldn't Allocate Path %x\n", status);
638 		goto fail;
639 	}
640 
641 	free_path_on_return++;
642 
643 
644 	xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
645 	cpi.ccb_h.func_code = XPT_PATH_INQ;
646 	xpt_action((union ccb *)&cpi);
647 	status = cpi.ccb_h.status;
648 
649 	if (status != CAM_REQ_CMP) {
650 		printf("Couldn't CPI %x\n", status);
651 		goto fail;
652 	}
653 
654 	/* Can only alloc units on controllers that support target mode */
655 	if ((cpi.target_sprt & PIT_PROCESSOR) == 0) {
656 		printf("Controller does not support target mode%x\n", status);
657 		status = CAM_PATH_INVALID;
658 		goto fail;
659 	}
660 
661 	/* Ensure that we don't already have an instance for this unit. */
662 	if ((periph = cam_periph_find(path, "targ")) != NULL) {
663 		status = CAM_LUN_ALRDY_ENA;
664 		goto fail;
665 	}
666 
667 	/*
668 	 * Allocate a peripheral instance for
669 	 * this target instance.
670 	 */
671 	status = cam_periph_alloc(targctor, NULL, targdtor, targstart,
672 				  "targ", CAM_PERIPH_BIO, path, targasync,
673 				  0, &cpi);
674 
675 fail:
676 	switch (status) {
677 	case CAM_REQ_CMP:
678 	{
679 		struct cam_periph *periph;
680 
681 		if ((periph = cam_periph_find(path, "targ")) == NULL)
682 			panic("targallocinstance: Succeeded but no periph?");
683 		error = 0;
684 		alloc_unit->unit = periph->unit_number;
685 		break;
686 	}
687 	case CAM_RESRC_UNAVAIL:
688 		error = ENOMEM;
689 		break;
690 	case CAM_LUN_ALRDY_ENA:
691 		error = EADDRINUSE;
692 		break;
693 	default:
694 		printf("targallocinstance: Unexpected CAM status %x\n", status);
695 		/* FALLTHROUGH */
696 	case CAM_PATH_INVALID:
697 		error = ENXIO;
698 		break;
699 	case CAM_PROVIDE_FAIL:
700 		error = ENODEV;
701 		break;
702 	}
703 
704 	if (free_path_on_return != 0)
705 		xpt_free_path(path);
706 
707 	return (error);
708 }
709 
710 static int
711 targfreeinstance(struct ioc_alloc_unit *alloc_unit)
712 {
713 	struct cam_path *path;
714 	struct cam_periph *periph;
715 	struct targ_softc *softc;
716 	cam_status status;
717 	int free_path_on_return;
718 	int error;
719 
720 	periph = NULL;
721 	free_path_on_return = 0;
722 	status = xpt_create_path(&path, /*periph*/NULL,
723 				 alloc_unit->path_id,
724 				 alloc_unit->target_id,
725 				 alloc_unit->lun_id);
726 	free_path_on_return++;
727 
728 	if (status != CAM_REQ_CMP)
729 		goto fail;
730 
731 	/* Find our instance. */
732 	if ((periph = cam_periph_find(path, "targ")) == NULL) {
733 		xpt_print_path(path);
734 		status = CAM_PATH_INVALID;
735 		goto fail;
736 	}
737 
738         softc = (struct targ_softc *)periph->softc;
739 
740         if ((softc->flags & TARG_FLAG_LUN_ENABLED) != 0) {
741 		status = CAM_BUSY;
742 		goto fail;
743 	}
744 
745 fail:
746 	if (free_path_on_return != 0)
747 		xpt_free_path(path);
748 
749 	switch (status) {
750 	case CAM_REQ_CMP:
751 		if (periph != NULL)
752 			cam_periph_invalidate(periph);
753 		error = 0;
754 		break;
755 	case CAM_RESRC_UNAVAIL:
756 		error = ENOMEM;
757 		break;
758 	case CAM_LUN_ALRDY_ENA:
759 		error = EADDRINUSE;
760 		break;
761 	default:
762 		printf("targfreeinstance: Unexpected CAM status %x\n", status);
763 		/* FALLTHROUGH */
764 	case CAM_PATH_INVALID:
765 		error = ENODEV;
766 		break;
767 	}
768 	return (error);
769 }
770 
771 static int
772 targioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
773 {
774 	struct cam_periph *periph;
775 	struct targ_softc *softc;
776 	u_int  unit;
777 	int    error;
778 
779 	unit = minor(dev);
780 	error = 0;
781 	if (TARG_IS_CONTROL_DEV(unit)) {
782 		switch (cmd) {
783 		case TARGCTLIOALLOCUNIT:
784 			error = targallocinstance((struct ioc_alloc_unit*)addr);
785 			break;
786 		case TARGCTLIOFREEUNIT:
787 			error = targfreeinstance((struct ioc_alloc_unit*)addr);
788 			break;
789 		default:
790 			error = EINVAL;
791 			break;
792 		}
793 		return (error);
794 	}
795 
796 	periph = cam_extend_get(targperiphs, unit);
797 	if (periph == NULL)
798 		return (ENXIO);
799 	softc = (struct targ_softc *)periph->softc;
800 	switch (cmd) {
801 	case TARGIOCFETCHEXCEPTION:
802 		*((targ_exception *)addr) = softc->exceptions;
803 		break;
804 	case TARGIOCCLEAREXCEPTION:
805 	{
806 		targ_exception clear_mask;
807 
808 		clear_mask = *((targ_exception *)addr);
809 		if ((clear_mask & TARG_EXCEPT_UNKNOWN_ATIO) != 0) {
810 			struct ccb_hdr *ccbh;
811 
812 			ccbh = TAILQ_FIRST(&softc->unknown_atio_queue);
813 			if (ccbh != NULL) {
814 				TAILQ_REMOVE(&softc->unknown_atio_queue,
815 					     ccbh, periph_links.tqe);
816 				/* Requeue the ATIO back to the controller */
817 				xpt_action((union ccb *)ccbh);
818 				ccbh = TAILQ_FIRST(&softc->unknown_atio_queue);
819 			}
820 			if (ccbh != NULL)
821 				clear_mask &= ~TARG_EXCEPT_UNKNOWN_ATIO;
822 		}
823 		softc->exceptions &= ~clear_mask;
824 		if (softc->exceptions == TARG_EXCEPT_NONE
825 		 && softc->state == TARG_STATE_EXCEPTION) {
826 			softc->state = TARG_STATE_NORMAL;
827 			targrunqueue(periph, softc);
828 		}
829 		break;
830 	}
831 	case TARGIOCFETCHATIO:
832 	{
833 		struct ccb_hdr *ccbh;
834 
835 		ccbh = TAILQ_FIRST(&softc->unknown_atio_queue);
836 		if (ccbh != NULL) {
837 			bcopy(ccbh, addr, sizeof(struct ccb_accept_tio));
838 		} else {
839 			error = ENOENT;
840 		}
841 		break;
842 	}
843 	case TARGIOCCOMMAND:
844 	{
845 		union ccb *inccb;
846 		union ccb *ccb;
847 
848 		/*
849 		 * XXX JGibbs
850 		 * This code is lifted directly from the pass-thru driver.
851 		 * Perhaps this should be moved to a library????
852 		 */
853 		inccb = (union ccb *)addr;
854 		ccb = cam_periph_getccb(periph, inccb->ccb_h.pinfo.priority);
855 
856 		error = targsendccb(periph, ccb, inccb);
857 
858 		xpt_release_ccb(ccb);
859 
860 		break;
861 	}
862 	case TARGIOCGETISTATE:
863 	case TARGIOCSETISTATE:
864 	{
865 		struct ioc_initiator_state *ioc_istate;
866 
867 		ioc_istate = (struct ioc_initiator_state *)addr;
868 		if (ioc_istate->initiator_id > MAX_INITIATORS) {
869 			error = EINVAL;
870 			break;
871 		}
872 		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
873 			  ("GET/SETISTATE for %d\n", ioc_istate->initiator_id));
874 		if (cmd == TARGIOCGETISTATE) {
875 			bcopy(&softc->istate[ioc_istate->initiator_id],
876 			      &ioc_istate->istate, sizeof(ioc_istate->istate));
877 		} else {
878 			bcopy(&ioc_istate->istate,
879 			      &softc->istate[ioc_istate->initiator_id],
880 			      sizeof(ioc_istate->istate));
881 		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
882 			  ("pending_ca now %x\n",
883 			   softc->istate[ioc_istate->initiator_id].pending_ca));
884 		}
885 		break;
886 	}
887 	default:
888 		error = ENOTTY;
889 		break;
890 	}
891 	return (error);
892 }
893 
894 /*
895  * XXX JGibbs lifted from pass-thru driver.
896  * Generally, "ccb" should be the CCB supplied by the kernel.  "inccb"
897  * should be the CCB that is copied in from the user.
898  */
899 static int
900 targsendccb(struct cam_periph *periph, union ccb *ccb, union ccb *inccb)
901 {
902 	struct targ_softc *softc;
903 	struct cam_periph_map_info mapinfo;
904 	int error, need_unmap;
905 	int s;
906 
907 	softc = (struct targ_softc *)periph->softc;
908 
909 	need_unmap = 0;
910 
911 	/*
912 	 * There are some fields in the CCB header that need to be
913 	 * preserved, the rest we get from the user.
914 	 */
915 	xpt_merge_ccb(ccb, inccb);
916 
917 	/*
918 	 * There's no way for the user to have a completion
919 	 * function, so we put our own completion function in here.
920 	 */
921 	ccb->ccb_h.cbfcnp = targdone;
922 
923 	/*
924 	 * We only attempt to map the user memory into kernel space
925 	 * if they haven't passed in a physical memory pointer,
926 	 * and if there is actually an I/O operation to perform.
927 	 * Right now cam_periph_mapmem() only supports SCSI and device
928 	 * match CCBs.  For the SCSI CCBs, we only pass the CCB in if
929 	 * there's actually data to map.  cam_periph_mapmem() will do the
930 	 * right thing, even if there isn't data to map, but since CCBs
931 	 * without data are a reasonably common occurance (e.g. test unit
932 	 * ready), it will save a few cycles if we check for it here.
933 	 */
934 	if (((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0)
935 	 && (((ccb->ccb_h.func_code == XPT_CONT_TARGET_IO)
936 	    && ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE))
937 	  || (ccb->ccb_h.func_code == XPT_DEV_MATCH))) {
938 
939 		bzero(&mapinfo, sizeof(mapinfo));
940 
941 		error = cam_periph_mapmem(ccb, &mapinfo);
942 
943 		/*
944 		 * cam_periph_mapmem returned an error, we can't continue.
945 		 * Return the error to the user.
946 		 */
947 		if (error)
948 			return(error);
949 
950 		/*
951 		 * We successfully mapped the memory in, so we need to
952 		 * unmap it when the transaction is done.
953 		 */
954 		need_unmap = 1;
955 	}
956 
957 	/*
958 	 * Once queued on the pending CCB list, this CCB will be protected
959 	 * by the error recovery handling used for 'buffer I/O' ccbs.  Since
960 	 * we are in a process context here, however, the software interrupt
961 	 * for this driver may deliver an event invalidating this CCB just
962 	 * before we queue it.  Close this race condition by blocking
963 	 * software interrupt delivery, checking for any pertinent queued
964 	 * events, and only then queuing this CCB.
965 	 */
966 	s = splsoftcam();
967 	if (softc->exceptions == 0) {
968 		if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO)
969 			TAILQ_INSERT_TAIL(&softc->pending_queue, &ccb->ccb_h,
970 					  periph_links.tqe);
971 
972 		/*
973 		 * If the user wants us to perform any error recovery,
974 		 * then honor that request.  Otherwise, it's up to the
975 		 * user to perform any error recovery.
976 		 */
977 		error = cam_periph_runccb(ccb,
978 					  /* error handler */NULL,
979 					  /* cam_flags */ 0,
980 					  /* sense_flags */SF_RETRY_UA,
981 					  &softc->device_stats);
982 
983 		if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO)
984 			TAILQ_INSERT_TAIL(&softc->pending_queue, &ccb->ccb_h,
985 					  periph_links.tqe);
986 	} else {
987 		ccb->ccb_h.status = CAM_UNACKED_EVENT;
988 		error = 0;
989 	}
990 	splx(s);
991 
992 	if (need_unmap != 0)
993 		cam_periph_unmapmem(ccb, &mapinfo);
994 
995 	ccb->ccb_h.cbfcnp = NULL;
996 	ccb->ccb_h.periph_priv = inccb->ccb_h.periph_priv;
997 	bcopy(ccb, inccb, sizeof(union ccb));
998 
999 	return(error);
1000 }
1001 
1002 
1003 static int
1004 targpoll(dev_t dev, int poll_events, struct proc *p)
1005 {
1006 	struct cam_periph *periph;
1007 	struct targ_softc *softc;
1008 	u_int  unit;
1009 	int    revents;
1010 	int    s;
1011 
1012 	unit = minor(dev);
1013 
1014 	/* ioctl is the only supported operation of the control device */
1015 	if (TARG_IS_CONTROL_DEV(unit))
1016 		return EINVAL;
1017 
1018 	periph = cam_extend_get(targperiphs, unit);
1019 	if (periph == NULL)
1020 		return (ENXIO);
1021 	softc = (struct targ_softc *)periph->softc;
1022 
1023 	revents = 0;
1024 	s = splcam();
1025 	if ((poll_events & (POLLOUT | POLLWRNORM)) != 0) {
1026 		if (TAILQ_FIRST(&softc->rcv_ccb_queue) != NULL
1027 		 && bufq_first(&softc->rcv_buf_queue) == NULL)
1028 			revents |= poll_events & (POLLOUT | POLLWRNORM);
1029 	}
1030 	if ((poll_events & (POLLIN | POLLRDNORM)) != 0) {
1031 		if (TAILQ_FIRST(&softc->snd_ccb_queue) != NULL
1032 		 && bufq_first(&softc->snd_buf_queue) == NULL)
1033 			revents |= poll_events & (POLLIN | POLLRDNORM);
1034 	}
1035 
1036 	if (softc->state != TARG_STATE_NORMAL)
1037 		revents |= POLLERR;
1038 
1039 	if (revents == 0) {
1040 		if (poll_events & (POLLOUT | POLLWRNORM))
1041 			selrecord(p, &softc->rcv_select);
1042 		if (poll_events & (POLLIN | POLLRDNORM))
1043 			selrecord(p, &softc->snd_select);
1044 	}
1045 	splx(s);
1046 	return (revents);
1047 }
1048 
1049 static int
1050 targread(dev_t dev, struct uio *uio, int ioflag)
1051 {
1052 	u_int  unit;
1053 
1054 	unit = minor(dev);
1055 	/* ioctl is the only supported operation of the control device */
1056 	if (TARG_IS_CONTROL_DEV(unit))
1057 		return EINVAL;
1058 
1059 	if (uio->uio_iovcnt == 0
1060 	 || uio->uio_iov->iov_len == 0) {
1061 		/* EOF */
1062 		struct cam_periph *periph;
1063 		struct targ_softc *softc;
1064 		int    s;
1065 
1066 		s = splcam();
1067 		periph = cam_extend_get(targperiphs, unit);
1068 		if (periph == NULL)
1069 			return (ENXIO);
1070 		softc = (struct targ_softc *)periph->softc;
1071 		softc->flags |= TARG_FLAG_SEND_EOF;
1072 		splx(s);
1073 		targrunqueue(periph, softc);
1074 		return (0);
1075 	}
1076 	return(physread(dev, uio, ioflag));
1077 }
1078 
1079 static int
1080 targwrite(dev_t dev, struct uio *uio, int ioflag)
1081 {
1082 	u_int  unit;
1083 
1084 	unit = minor(dev);
1085 	/* ioctl is the only supported operation of the control device */
1086 	if (TARG_IS_CONTROL_DEV(unit))
1087 		return EINVAL;
1088 
1089 	if (uio->uio_iovcnt == 0
1090 	 || uio->uio_iov->iov_len == 0) {
1091 		/* EOF */
1092 		struct cam_periph *periph;
1093 		struct targ_softc *softc;
1094 		int    s;
1095 
1096 		s = splcam();
1097 		periph = cam_extend_get(targperiphs, unit);
1098 		if (periph == NULL)
1099 			return (ENXIO);
1100 		softc = (struct targ_softc *)periph->softc;
1101 		softc->flags |= TARG_FLAG_RECEIVE_EOF;
1102 		splx(s);
1103 		targrunqueue(periph, softc);
1104 		return (0);
1105 	}
1106 	return(physwrite(dev, uio, ioflag));
1107 }
1108 
1109 /*
1110  * Actually translate the requested transfer into one the physical driver
1111  * can understand.  The transfer is described by a buf and will include
1112  * only one physical transfer.
1113  */
1114 static void
1115 targstrategy(struct buf *bp)
1116 {
1117 	struct cam_periph *periph;
1118 	struct targ_softc *softc;
1119 	u_int  unit;
1120 	int    s;
1121 
1122 	unit = minor(bp->b_dev);
1123 
1124 	/* ioctl is the only supported operation of the control device */
1125 	if (TARG_IS_CONTROL_DEV(unit)) {
1126 		bp->b_error = EINVAL;
1127 		goto bad;
1128 	}
1129 
1130 	periph = cam_extend_get(targperiphs, unit);
1131 	if (periph == NULL) {
1132 		bp->b_error = ENXIO;
1133 		goto bad;
1134 	}
1135 	softc = (struct targ_softc *)periph->softc;
1136 
1137 	/*
1138 	 * Mask interrupts so that the device cannot be invalidated until
1139 	 * after we are in the queue.  Otherwise, we might not properly
1140 	 * clean up one of the buffers.
1141 	 */
1142 	s = splbio();
1143 
1144 	/*
1145 	 * If there is an exception pending, error out
1146 	 */
1147 	if (softc->state != TARG_STATE_NORMAL) {
1148 		splx(s);
1149 		if (softc->state == TARG_STATE_EXCEPTION
1150 		 && (softc->exceptions & TARG_EXCEPT_DEVICE_INVALID) == 0)
1151 			bp->b_error = EBUSY;
1152 		else
1153 			bp->b_error = ENXIO;
1154 		goto bad;
1155 	}
1156 
1157 	/*
1158 	 * Place it in the queue of buffers available for either
1159 	 * SEND or RECEIVE commands.
1160 	 *
1161 	 */
1162 	bp->b_resid = bp->b_bcount;
1163 	if ((bp->b_flags & B_READ) != 0) {
1164 		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1165 			  ("Queued a SEND buffer\n"));
1166 		bufq_insert_tail(&softc->snd_buf_queue, bp);
1167 	} else {
1168 		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1169 			  ("Queued a RECEIVE buffer\n"));
1170 		bufq_insert_tail(&softc->rcv_buf_queue, bp);
1171 	}
1172 
1173 	splx(s);
1174 
1175 	/*
1176 	 * Attempt to use the new buffer to service any pending
1177 	 * target commands.
1178 	 */
1179 	targrunqueue(periph, softc);
1180 
1181 	return;
1182 bad:
1183 	bp->b_flags |= B_ERROR;
1184 
1185 	/*
1186 	 * Correctly set the buf to indicate a completed xfer
1187 	 */
1188 	bp->b_resid = bp->b_bcount;
1189 	biodone(bp);
1190 }
1191 
1192 static void
1193 targrunqueue(struct cam_periph *periph, struct targ_softc *softc)
1194 {
1195 	struct  ccb_queue *pending_queue;
1196 	struct	ccb_accept_tio *atio;
1197 	struct	buf_queue_head *bufq;
1198 	struct	buf *bp;
1199 	struct	targ_cmd_desc *desc;
1200 	struct	ccb_hdr *ccbh;
1201 	int	s;
1202 
1203 	s = splbio();
1204 	pending_queue = NULL;
1205 	bufq = NULL;
1206 	ccbh = NULL;
1207 	/* Only run one request at a time to maintain data ordering. */
1208 	if (softc->state != TARG_STATE_NORMAL
1209 	 || TAILQ_FIRST(&softc->work_queue) != NULL
1210 	 || TAILQ_FIRST(&softc->pending_queue) != NULL) {
1211 		splx(s);
1212 		return;
1213 	}
1214 
1215 	if (((bp = bufq_first(&softc->snd_buf_queue)) != NULL
1216 	  || (softc->flags & TARG_FLAG_SEND_EOF) != 0)
1217 	 && (ccbh = TAILQ_FIRST(&softc->snd_ccb_queue)) != NULL) {
1218 
1219 		if (bp == NULL)
1220 			softc->flags &= ~TARG_FLAG_SEND_EOF;
1221 		else {
1222 			CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1223 				  ("De-Queued a SEND buffer %ld\n",
1224 				   bp->b_bcount));
1225 		}
1226 		bufq = &softc->snd_buf_queue;
1227 		pending_queue = &softc->snd_ccb_queue;
1228 	} else if (((bp = bufq_first(&softc->rcv_buf_queue)) != NULL
1229 	  	 || (softc->flags & TARG_FLAG_RECEIVE_EOF) != 0)
1230 		&& (ccbh = TAILQ_FIRST(&softc->rcv_ccb_queue)) != NULL) {
1231 
1232 		if (bp == NULL)
1233 			softc->flags &= ~TARG_FLAG_RECEIVE_EOF;
1234 		else {
1235 			CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1236 				  ("De-Queued a RECEIVE buffer %ld\n",
1237 				   bp->b_bcount));
1238 		}
1239 		bufq = &softc->rcv_buf_queue;
1240 		pending_queue = &softc->rcv_ccb_queue;
1241 	}
1242 
1243 	if (pending_queue != NULL) {
1244 		/* Process a request */
1245 		atio = (struct ccb_accept_tio *)ccbh;
1246 		TAILQ_REMOVE(pending_queue, ccbh, periph_links.tqe);
1247 		desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
1248 		desc->bp = bp;
1249 		if (bp == NULL) {
1250 			/* EOF */
1251 			desc->data = NULL;
1252 			desc->data_increment = 0;
1253 			desc->data_resid = 0;
1254 			atio->ccb_h.flags &= ~CAM_DIR_MASK;
1255 			atio->ccb_h.flags |= CAM_DIR_NONE;
1256 		} else {
1257 			bufq_remove(bufq, bp);
1258 			desc->data = &bp->b_data[bp->b_bcount - bp->b_resid];
1259 			desc->data_increment =
1260 			    MIN(desc->data_resid, bp->b_resid);
1261 		}
1262 		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1263 			  ("Buffer command: data %x: datacnt %d\n",
1264 			   (intptr_t)desc->data, desc->data_increment));
1265 		TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h,
1266 				  periph_links.tqe);
1267 	}
1268 	if (TAILQ_FIRST(&softc->work_queue) != NULL) {
1269 		splx(s);
1270 		xpt_schedule(periph, /*XXX priority*/1);
1271 	} else
1272 		splx(s);
1273 }
1274 
1275 static void
1276 targstart(struct cam_periph *periph, union ccb *start_ccb)
1277 {
1278 	struct targ_softc *softc;
1279 	struct ccb_hdr *ccbh;
1280 	struct ccb_accept_tio *atio;
1281 	struct targ_cmd_desc *desc;
1282 	struct ccb_scsiio *csio;
1283 	targ_ccb_flags flags;
1284 	int    s;
1285 
1286 	softc = (struct targ_softc *)periph->softc;
1287 
1288 	s = splbio();
1289 	ccbh = TAILQ_FIRST(&softc->work_queue);
1290 	if (periph->immediate_priority <= periph->pinfo.priority) {
1291 		start_ccb->ccb_h.ccb_flags = TARG_CCB_WAITING;
1292 		SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
1293 				  periph_links.sle);
1294 		periph->immediate_priority = CAM_PRIORITY_NONE;
1295 		splx(s);
1296 		wakeup(&periph->ccb_list);
1297 	} else if (ccbh == NULL) {
1298 		splx(s);
1299 		xpt_release_ccb(start_ccb);
1300 	} else {
1301 		TAILQ_REMOVE(&softc->work_queue, ccbh, periph_links.tqe);
1302 		splx(s);
1303 		atio = (struct ccb_accept_tio*)ccbh;
1304 		desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
1305 
1306 		/* Is this a tagged request? */
1307 		flags = atio->ccb_h.flags & (CAM_TAG_ACTION_VALID|CAM_DIR_MASK);
1308 
1309 		/*
1310 		 * If we are done with the transaction, tell the
1311 		 * controller to send status and perform a CMD_CMPLT.
1312 		 */
1313 		if (desc->data_resid == desc->data_increment)
1314 			flags |= CAM_SEND_STATUS;
1315 
1316 		csio = &start_ccb->csio;
1317 		cam_fill_ctio(csio,
1318 			      /*retries*/2,
1319 			      targdone,
1320 			      flags,
1321 			      /*tag_action*/MSG_SIMPLE_Q_TAG,
1322 			      atio->tag_id,
1323 			      atio->init_id,
1324 			      desc->status,
1325 			      /*data_ptr*/desc->data_increment == 0
1326 					  ? NULL : desc->data,
1327 			      /*dxfer_len*/desc->data_increment,
1328 			      /*timeout*/desc->timeout);
1329 
1330 		start_ccb->ccb_h.ccb_flags = TARG_CCB_NONE;
1331 		start_ccb->ccb_h.ccb_atio = atio;
1332 		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1333 			  ("Sending a CTIO\n"));
1334 		TAILQ_INSERT_TAIL(&softc->pending_queue, &csio->ccb_h,
1335 				  periph_links.tqe);
1336 		xpt_action(start_ccb);
1337 		s = splbio();
1338 		ccbh = TAILQ_FIRST(&softc->work_queue);
1339 		splx(s);
1340 	}
1341 	if (ccbh != NULL)
1342 		targrunqueue(periph, softc);
1343 }
1344 
1345 static void
1346 targdone(struct cam_periph *periph, union ccb *done_ccb)
1347 {
1348 	struct targ_softc *softc;
1349 
1350 	softc = (struct targ_softc *)periph->softc;
1351 
1352 	if (done_ccb->ccb_h.ccb_flags == TARG_CCB_WAITING) {
1353 		/* Caller will release the CCB */
1354 		wakeup(&done_ccb->ccb_h.cbfcnp);
1355 		return;
1356 	}
1357 
1358 	switch (done_ccb->ccb_h.func_code) {
1359 	case XPT_ACCEPT_TARGET_IO:
1360 	{
1361 		struct ccb_accept_tio *atio;
1362 		struct targ_cmd_desc *descr;
1363 		struct initiator_state *istate;
1364 		u_int8_t *cdb;
1365 
1366 		atio = &done_ccb->atio;
1367 		descr = (struct targ_cmd_desc*)atio->ccb_h.ccb_descr;
1368 		istate = &softc->istate[atio->init_id];
1369 		cdb = atio->cdb_io.cdb_bytes;
1370 		if (softc->state == TARG_STATE_TEARDOWN
1371 		 || atio->ccb_h.status == CAM_REQ_ABORTED) {
1372 			freedescr(descr);
1373 			free(done_ccb, M_DEVBUF);
1374 			return;
1375 		}
1376 
1377 		if (istate->pending_ca == 0
1378 		 && istate->pending_ua != 0
1379 		 && cdb[0] != INQUIRY) {
1380 			/* Pending UA, tell initiator */
1381 			/* Direction is always relative to the initator */
1382 			istate->pending_ca = CA_UNIT_ATTN;
1383 			atio->ccb_h.flags &= ~CAM_DIR_MASK;
1384 			atio->ccb_h.flags |= CAM_DIR_NONE;
1385 			descr->data_resid = 0;
1386 			descr->data_increment = 0;
1387 			descr->timeout = 5 * 1000;
1388 			descr->status = SCSI_STATUS_CHECK_COND;
1389 		} else {
1390 			/*
1391 			 * Save the current CA and UA status so
1392 			 * they can be used by this command.
1393 			 */
1394 			ua_types pending_ua;
1395 			ca_types pending_ca;
1396 
1397 			pending_ua = istate->pending_ua;
1398 			pending_ca = istate->pending_ca;
1399 
1400 			/*
1401 			 * As per the SCSI2 spec, any command that occurs
1402 			 * after a CA is reported, clears the CA.  If the
1403 			 * command is not an inquiry, we are also supposed
1404 			 * to clear the UA condition, if any, that caused
1405 			 * the CA to occur assuming the UA is not a
1406 			 * persistant state.
1407 			 */
1408 			istate->pending_ca = CA_NONE;
1409 			if ((pending_ca
1410 			   & (CA_CMD_SENSE|CA_UNIT_ATTN)) == CA_UNIT_ATTN
1411 			 && cdb[0] != INQUIRY)
1412 				istate->pending_ua = UA_NONE;
1413 
1414 			/*
1415 			 * Determine the type of incoming command and
1416 			 * setup our buffer for a response.
1417 			 */
1418 			switch (cdb[0]) {
1419 			case INQUIRY:
1420 			{
1421 				struct scsi_inquiry *inq;
1422 				struct scsi_sense_data *sense;
1423 
1424 				inq = (struct scsi_inquiry *)cdb;
1425 				sense = &istate->sense_data;
1426 				CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1427 					  ("Saw an inquiry!\n"));
1428 				/*
1429 				 * Validate the command.  We don't
1430 				 * support any VPD pages, so complain
1431 				 * if EVPD is set.
1432 				 */
1433 				if ((inq->byte2 & SI_EVPD) != 0
1434 				 || inq->page_code != 0) {
1435 					istate->pending_ca = CA_CMD_SENSE;
1436 					atio->ccb_h.flags &= ~CAM_DIR_MASK;
1437 					atio->ccb_h.flags |= CAM_DIR_NONE;
1438 					descr->data_resid = 0;
1439 					descr->data_increment = 0;
1440 					descr->status = SCSI_STATUS_CHECK_COND;
1441 					fill_sense(softc, atio->init_id,
1442 						   SSD_CURRENT_ERROR,
1443 						   SSD_KEY_ILLEGAL_REQUEST,
1444 						   /*asc*/0x24, /*ascq*/0x00);
1445 					sense->extra_len =
1446 						offsetof(struct scsi_sense_data,
1447 							 extra_bytes)
1448 					      - offsetof(struct scsi_sense_data,
1449 							 extra_len);
1450 				}
1451 
1452 				if ((inq->byte2 & SI_EVPD) != 0) {
1453 					sense->sense_key_spec[0] =
1454 					    SSD_SCS_VALID|SSD_FIELDPTR_CMD
1455 					   |SSD_BITPTR_VALID| /*bit value*/1;
1456 					sense->sense_key_spec[1] = 0;
1457 					sense->sense_key_spec[2] =
1458 					    offsetof(struct scsi_inquiry,
1459 						     byte2);
1460 					break;
1461 				} else if (inq->page_code != 0) {
1462 					sense->sense_key_spec[0] =
1463 					    SSD_SCS_VALID|SSD_FIELDPTR_CMD;
1464 					sense->sense_key_spec[1] = 0;
1465 					sense->sense_key_spec[2] =
1466 					    offsetof(struct scsi_inquiry,
1467 						     page_code);
1468 					break;
1469 				}
1470 				/*
1471 				 * Direction is always relative
1472 				 * to the initator.
1473 				 */
1474 				atio->ccb_h.flags &= ~CAM_DIR_MASK;
1475 				atio->ccb_h.flags |= CAM_DIR_IN;
1476 				descr->data = softc->inq_data;
1477 				descr->data_resid = MIN(softc->inq_data_len,
1478 						       inq->length);
1479 				descr->data_increment = descr->data_resid;
1480 				descr->timeout = 5 * 1000;
1481 				descr->status = SCSI_STATUS_OK;
1482 				break;
1483 			}
1484 			case TEST_UNIT_READY:
1485 				atio->ccb_h.flags &= ~CAM_DIR_MASK;
1486 				atio->ccb_h.flags |= CAM_DIR_NONE;
1487 				descr->data_resid = 0;
1488 				descr->data_increment = 0;
1489 				descr->timeout = 5 * 1000;
1490 				descr->status = SCSI_STATUS_OK;
1491 				break;
1492 			case REQUEST_SENSE:
1493 			{
1494 				struct scsi_request_sense *rsense;
1495 				struct scsi_sense_data *sense;
1496 
1497 				rsense = (struct scsi_request_sense *)cdb;
1498 				sense = &istate->sense_data;
1499 				if (pending_ca == 0) {
1500 					fill_sense(softc, atio->init_id,
1501 						   SSD_CURRENT_ERROR,
1502 						   SSD_KEY_NO_SENSE, 0x00,
1503 						   0x00);
1504 					CAM_DEBUG(periph->path,
1505 						  CAM_DEBUG_PERIPH,
1506 						  ("No pending CA!\n"));
1507 				} else if (pending_ca == CA_UNIT_ATTN) {
1508 					u_int ascq;
1509 
1510 					if (pending_ua == UA_POWER_ON)
1511 						ascq = 0x1;
1512 					else
1513 						ascq = 0x2;
1514 					fill_sense(softc, atio->init_id,
1515 						   SSD_CURRENT_ERROR,
1516 						   SSD_KEY_UNIT_ATTENTION,
1517 						   0x29, ascq);
1518 					CAM_DEBUG(periph->path,
1519 						  CAM_DEBUG_PERIPH,
1520 						  ("Pending UA!\n"));
1521 				}
1522 				/*
1523 				 * Direction is always relative
1524 				 * to the initator.
1525 				 */
1526 				atio->ccb_h.flags &= ~CAM_DIR_MASK;
1527 				atio->ccb_h.flags |= CAM_DIR_IN;
1528 				descr->data = sense;
1529 				descr->data_resid =
1530 			 		offsetof(struct scsi_sense_data,
1531 						 extra_len)
1532 				      + sense->extra_len;
1533 				descr->data_resid = MIN(descr->data_resid,
1534 						       rsense->length);
1535 				descr->data_increment = descr->data_resid;
1536 				descr->timeout = 5 * 1000;
1537 				descr->status = SCSI_STATUS_OK;
1538 				break;
1539 			}
1540 			case RECEIVE:
1541 			case SEND:
1542 			{
1543 				struct scsi_send_receive *sr;
1544 
1545 				sr = (struct scsi_send_receive *)cdb;
1546 
1547 				/*
1548 				 * Direction is always relative
1549 				 * to the initator.
1550 				 */
1551 				atio->ccb_h.flags &= ~CAM_DIR_MASK;
1552 				descr->data_resid = scsi_3btoul(sr->xfer_len);
1553 				descr->timeout = 5 * 1000;
1554 				descr->status = SCSI_STATUS_OK;
1555 				if (cdb[0] == SEND) {
1556 					atio->ccb_h.flags |= CAM_DIR_OUT;
1557 					CAM_DEBUG(periph->path,
1558 						  CAM_DEBUG_PERIPH,
1559 						  ("Saw a SEND!\n"));
1560 					atio->ccb_h.flags |= CAM_DIR_OUT;
1561 					TAILQ_INSERT_TAIL(&softc->snd_ccb_queue,
1562 							  &atio->ccb_h,
1563 							  periph_links.tqe);
1564 					selwakeup(&softc->snd_select);
1565 				} else {
1566 					atio->ccb_h.flags |= CAM_DIR_IN;
1567 					CAM_DEBUG(periph->path,
1568 						  CAM_DEBUG_PERIPH,
1569 						  ("Saw a RECEIVE!\n"));
1570 					TAILQ_INSERT_TAIL(&softc->rcv_ccb_queue,
1571 							  &atio->ccb_h,
1572 							  periph_links.tqe);
1573 					selwakeup(&softc->rcv_select);
1574 				}
1575 				/*
1576 				 * Attempt to satisfy this request with
1577 				 * a user buffer.
1578 				 */
1579 				targrunqueue(periph, softc);
1580 				return;
1581 			}
1582 			default:
1583 				/*
1584 				 * Queue for consumption by our userland
1585 				 * counterpart and  transition to the exception
1586 				 * state.
1587 				 */
1588 				TAILQ_INSERT_TAIL(&softc->unknown_atio_queue,
1589 						  &atio->ccb_h,
1590 						  periph_links.tqe);
1591 				softc->exceptions |= TARG_EXCEPT_UNKNOWN_ATIO;
1592 				targfireexception(periph, softc);
1593 				return;
1594 			}
1595 		}
1596 
1597 		/* Queue us up to receive a Continue Target I/O ccb. */
1598 		TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h,
1599 				  periph_links.tqe);
1600 		xpt_schedule(periph, /*priority*/1);
1601 		break;
1602 	}
1603 	case XPT_CONT_TARGET_IO:
1604 	{
1605 		struct ccb_scsiio *csio;
1606 		struct ccb_accept_tio *atio;
1607 		struct targ_cmd_desc *desc;
1608 		struct buf *bp;
1609 		int    error;
1610 
1611 		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1612 			  ("Received completed CTIO\n"));
1613 		csio = &done_ccb->csio;
1614 		atio = (struct ccb_accept_tio*)done_ccb->ccb_h.ccb_atio;
1615 		desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
1616 
1617 		TAILQ_REMOVE(&softc->pending_queue, &done_ccb->ccb_h,
1618 			     periph_links.tqe);
1619 
1620 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1621 			printf("CCB with error %x\n", done_ccb->ccb_h.status);
1622 			error = targerror(done_ccb, 0, 0);
1623 			if (error == ERESTART)
1624 				break;
1625 			/*
1626 			 * Right now we don't need to do anything
1627 			 * prior to unfreezing the queue...
1628 			 */
1629 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1630 				printf("Releasing Queue\n");
1631 				cam_release_devq(done_ccb->ccb_h.path,
1632 						 /*relsim_flags*/0,
1633 						 /*reduction*/0,
1634 						 /*timeout*/0,
1635 						 /*getcount_only*/0);
1636 			}
1637 		} else
1638 			error = 0;
1639 		desc->data_increment -= csio->resid;
1640 		desc->data_resid -= desc->data_increment;
1641 		if ((bp = desc->bp) != NULL) {
1642 
1643 			bp->b_resid -= desc->data_increment;
1644 			bp->b_error = error;
1645 
1646 			CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1647 				  ("Buffer I/O Completed - Resid %ld:%d\n",
1648 				   bp->b_resid, desc->data_resid));
1649 			/*
1650 			 * Send the buffer back to the client if
1651 			 * either the command has completed or all
1652 			 * buffer space has been consumed.
1653 			 */
1654 			if (desc->data_resid == 0
1655 			 || bp->b_resid == 0
1656 			 || error != 0) {
1657 				if (bp->b_resid != 0)
1658 					/* Short transfer */
1659 					bp->b_flags |= B_ERROR;
1660 
1661 				CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1662 					  ("Completing a buffer\n"));
1663 				biodone(bp);
1664 				desc->bp = NULL;
1665 			}
1666 		}
1667 
1668 		xpt_release_ccb(done_ccb);
1669 		if (softc->state != TARG_STATE_TEARDOWN) {
1670 
1671 			if (desc->data_resid == 0) {
1672 				/*
1673 				 * Send the original accept TIO back to the
1674 				 * controller to handle more work.
1675 				 */
1676 				CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1677 					  ("Returning ATIO to target\n"));
1678 				xpt_action((union ccb *)atio);
1679 				break;
1680 			}
1681 
1682 			/* Queue us up for another buffer */
1683 			if (atio->cdb_io.cdb_bytes[0] == SEND) {
1684 				if (desc->bp != NULL)
1685 					TAILQ_INSERT_HEAD(
1686 						&softc->snd_buf_queue.queue,
1687 						bp, b_act);
1688 				TAILQ_INSERT_HEAD(&softc->snd_ccb_queue,
1689 						  &atio->ccb_h,
1690 						  periph_links.tqe);
1691 			} else {
1692 				if (desc->bp != NULL)
1693 					TAILQ_INSERT_HEAD(
1694 						&softc->rcv_buf_queue.queue,
1695 						bp, b_act);
1696 				TAILQ_INSERT_HEAD(&softc->rcv_ccb_queue,
1697 						  &atio->ccb_h,
1698 						  periph_links.tqe);
1699 			}
1700 			desc->bp = NULL;
1701 			targrunqueue(periph, softc);
1702 		} else {
1703 			if (desc->bp != NULL) {
1704 				bp->b_flags |= B_ERROR;
1705 				bp->b_error = ENXIO;
1706 				biodone(bp);
1707 			}
1708 			freedescr(desc);
1709 			free(atio, M_DEVBUF);
1710 		}
1711 		break;
1712 	}
1713 	case XPT_IMMED_NOTIFY:
1714 	{
1715 		int frozen;
1716 
1717 		frozen = (done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0;
1718 		if (softc->state == TARG_STATE_TEARDOWN) {
1719 			SLIST_REMOVE(&softc->immed_notify_slist,
1720 				     &done_ccb->ccb_h, ccb_hdr,
1721 				     periph_links.sle);
1722 			free(done_ccb, M_DEVBUF);
1723 		} else if (done_ccb->ccb_h.status == CAM_REQ_ABORTED) {
1724 			free(done_ccb, M_DEVBUF);
1725 		} else {
1726 			printf("Saw event %x:%x\n", done_ccb->ccb_h.status,
1727 			       done_ccb->cin.message_args[0]);
1728 			/* Process error condition. */
1729 			targinoterror(periph, softc, &done_ccb->cin);
1730 
1731 			/* Requeue for another immediate event */
1732 			xpt_action(done_ccb);
1733 		}
1734 		if (frozen != 0)
1735 			cam_release_devq(periph->path,
1736 					 /*relsim_flags*/0,
1737 					 /*opening reduction*/0,
1738 					 /*timeout*/0,
1739 					 /*getcount_only*/0);
1740 		break;
1741 	}
1742 	default:
1743 		panic("targdone: Impossible xpt opcode %x encountered.",
1744 		      done_ccb->ccb_h.func_code);
1745 		/* NOTREACHED */
1746 		break;
1747 	}
1748 }
1749 
1750 /*
1751  * Transition to the exception state and notify our symbiotic
1752  * userland process of the change.
1753  */
1754 static void
1755 targfireexception(struct cam_periph *periph, struct targ_softc *softc)
1756 {
1757 	/*
1758 	 * return all pending buffers with short read/write status so our
1759 	 * process unblocks, and do a selwakeup on any process queued
1760 	 * waiting for reads or writes.  When the selwakeup is performed,
1761 	 * the waking process will wakeup, call our poll routine again,
1762 	 * and pick up the exception.
1763 	 */
1764 	struct buf *bp;
1765 
1766 	if (softc->state != TARG_STATE_NORMAL)
1767 		/* Already either tearing down or in exception state */
1768 		return;
1769 
1770 	softc->state = TARG_STATE_EXCEPTION;
1771 
1772 	while ((bp = bufq_first(&softc->snd_buf_queue)) != NULL) {
1773 		bufq_remove(&softc->snd_buf_queue, bp);
1774 		bp->b_flags |= B_ERROR;
1775 		biodone(bp);
1776 	}
1777 
1778 	while ((bp = bufq_first(&softc->rcv_buf_queue)) != NULL) {
1779 		bufq_remove(&softc->snd_buf_queue, bp);
1780 		bp->b_flags |= B_ERROR;
1781 		biodone(bp);
1782 	}
1783 
1784 	selwakeup(&softc->snd_select);
1785 	selwakeup(&softc->rcv_select);
1786 }
1787 
1788 static void
1789 targinoterror(struct cam_periph *periph, struct targ_softc *softc,
1790 	      struct ccb_immed_notify *inot)
1791 {
1792 	cam_status status;
1793 	int sense;
1794 
1795 	status = inot->ccb_h.status;
1796 	sense = (status & CAM_AUTOSNS_VALID) != 0;
1797 	status &= CAM_STATUS_MASK;
1798 	switch (status) {
1799 	case CAM_SCSI_BUS_RESET:
1800 		set_unit_attention_cond(periph, /*init_id*/CAM_TARGET_WILDCARD,
1801 					UA_BUS_RESET);
1802 		abort_pending_transactions(periph,
1803 					   /*init_id*/CAM_TARGET_WILDCARD,
1804 					   TARG_TAG_WILDCARD, EINTR,
1805 					   /*to_held_queue*/FALSE);
1806 		softc->exceptions |= TARG_EXCEPT_BUS_RESET_SEEN;
1807 		targfireexception(periph, softc);
1808 		break;
1809 	case CAM_BDR_SENT:
1810 		set_unit_attention_cond(periph, /*init_id*/CAM_TARGET_WILDCARD,
1811 					UA_BDR);
1812 		abort_pending_transactions(periph, CAM_TARGET_WILDCARD,
1813 					   TARG_TAG_WILDCARD, EINTR,
1814 					   /*to_held_queue*/FALSE);
1815 		softc->exceptions |= TARG_EXCEPT_BDR_RECEIVED;
1816 		targfireexception(periph, softc);
1817 		break;
1818 	case CAM_MESSAGE_RECV:
1819 		switch (inot->message_args[0]) {
1820 		case MSG_INITIATOR_DET_ERR:
1821 			break;
1822 		case MSG_ABORT:
1823 			break;
1824 		case MSG_BUS_DEV_RESET:
1825 			break;
1826 		case MSG_ABORT_TAG:
1827 			break;
1828 		case MSG_CLEAR_QUEUE:
1829 			break;
1830 		case MSG_TERM_IO_PROC:
1831 			break;
1832 		default:
1833 			break;
1834 		}
1835 		break;
1836 	default:
1837 		break;
1838 	}
1839 }
1840 
1841 static int
1842 targerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
1843 {
1844 	struct cam_periph *periph;
1845 	struct targ_softc *softc;
1846 	struct ccb_scsiio *csio;
1847 	cam_status status;
1848 	int frozen;
1849 	int sense;
1850 	int error;
1851 	int on_held_queue;
1852 
1853 	periph = xpt_path_periph(ccb->ccb_h.path);
1854 	softc = (struct targ_softc *)periph->softc;
1855 	status = ccb->ccb_h.status;
1856 	sense = (status & CAM_AUTOSNS_VALID) != 0;
1857 	frozen = (status & CAM_DEV_QFRZN) != 0;
1858 	status &= CAM_STATUS_MASK;
1859 	on_held_queue = FALSE;
1860 	csio = &ccb->csio;
1861 	switch (status) {
1862 	case CAM_REQ_ABORTED:
1863 		printf("Request Aborted!\n");
1864 		if ((ccb->ccb_h.ccb_flags & TARG_CCB_ABORT_TO_HELDQ) != 0) {
1865 			struct initiator_state *istate;
1866 
1867 			/*
1868 			 * Place this CCB into the initiators
1869 			 * 'held' queue until the pending CA is cleared.
1870 			 * If there is no CA pending, reissue immediately.
1871 			 */
1872 			istate = &softc->istate[ccb->csio.init_id];
1873 			if (istate->pending_ca == 0) {
1874 				ccb->ccb_h.ccb_flags = TARG_CCB_NONE;
1875 				xpt_action(ccb);
1876 			} else {
1877 				ccb->ccb_h.ccb_flags = TARG_CCB_HELDQ;
1878 				TAILQ_INSERT_TAIL(&softc->pending_queue,
1879 						  &ccb->ccb_h,
1880 						  periph_links.tqe);
1881 			}
1882 			/* The command will be retried at a later time. */
1883 			on_held_queue = TRUE;
1884 			error = ERESTART;
1885 			break;
1886 		}
1887 		/* FALLTHROUGH */
1888 	case CAM_SCSI_BUS_RESET:
1889 	case CAM_BDR_SENT:
1890 	case CAM_REQ_TERMIO:
1891 	case CAM_CMD_TIMEOUT:
1892 		/* Assume we did not send any data */
1893 		csio->resid = csio->dxfer_len;
1894 		error = EIO;
1895 		break;
1896 	case CAM_SEL_TIMEOUT:
1897 		if (ccb->ccb_h.retry_count > 0) {
1898 			ccb->ccb_h.retry_count--;
1899 			error = ERESTART;
1900 		} else {
1901 			/* "Select or reselect failure" */
1902 			csio->resid = csio->dxfer_len;
1903 			fill_sense(softc, csio->init_id, SSD_CURRENT_ERROR,
1904 				   SSD_KEY_HARDWARE_ERROR, 0x45, 0x00);
1905 			set_contingent_allegiance_cond(periph,
1906 						       csio->init_id,
1907 						       CA_CMD_SENSE);
1908 			error = EIO;
1909 		}
1910 		break;
1911 	case CAM_UNCOR_PARITY:
1912 		/* "SCSI parity error" */
1913 		fill_sense(softc, csio->init_id, SSD_CURRENT_ERROR,
1914 			   SSD_KEY_HARDWARE_ERROR, 0x47, 0x00);
1915 		set_contingent_allegiance_cond(periph, csio->init_id,
1916 					       CA_CMD_SENSE);
1917 		csio->resid = csio->dxfer_len;
1918 		error = EIO;
1919 		break;
1920 	case CAM_NO_HBA:
1921 		csio->resid = csio->dxfer_len;
1922 		error = ENXIO;
1923 		break;
1924 	case CAM_SEQUENCE_FAIL:
1925 		if (sense != 0) {
1926 			copy_sense(softc, csio);
1927 			set_contingent_allegiance_cond(periph,
1928 						       csio->init_id,
1929 						       CA_CMD_SENSE);
1930 		}
1931 		csio->resid = csio->dxfer_len;
1932 		error = EIO;
1933 		break;
1934 	case CAM_IDE:
1935 		/* "Initiator detected error message received" */
1936 		fill_sense(softc, csio->init_id, SSD_CURRENT_ERROR,
1937 			   SSD_KEY_HARDWARE_ERROR, 0x48, 0x00);
1938 		set_contingent_allegiance_cond(periph, csio->init_id,
1939 					       CA_CMD_SENSE);
1940 		csio->resid = csio->dxfer_len;
1941 		error = EIO;
1942 		break;
1943 	case CAM_REQUEUE_REQ:
1944 		printf("Requeue Request!\n");
1945 		error = ERESTART;
1946 		break;
1947 	default:
1948 		csio->resid = csio->dxfer_len;
1949 		error = EIO;
1950 		panic("targerror: Unexpected status %x encounterd", status);
1951 		/* NOTREACHED */
1952 	}
1953 
1954 	if (error == ERESTART || error == 0) {
1955 		/* Clear the QFRZN flag as we will release the queue */
1956 		if (frozen != 0)
1957 			ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1958 
1959 		if (error == ERESTART && !on_held_queue)
1960 			xpt_action(ccb);
1961 
1962 		if (frozen != 0)
1963 			cam_release_devq(ccb->ccb_h.path,
1964 					 /*relsim_flags*/0,
1965 					 /*opening reduction*/0,
1966 					 /*timeout*/0,
1967 					 /*getcount_only*/0);
1968 	}
1969 	return (error);
1970 }
1971 
1972 static struct targ_cmd_desc*
1973 allocdescr()
1974 {
1975 	struct targ_cmd_desc* descr;
1976 
1977 	/* Allocate the targ_descr structure */
1978 	descr = (struct targ_cmd_desc *)malloc(sizeof(*descr),
1979 					       M_DEVBUF, M_NOWAIT);
1980 	if (descr == NULL)
1981 		return (NULL);
1982 
1983 	bzero(descr, sizeof(*descr));
1984 
1985 	/* Allocate buffer backing store */
1986 	descr->backing_store = malloc(MAX_BUF_SIZE, M_DEVBUF, M_NOWAIT);
1987 	if (descr->backing_store == NULL) {
1988 		free(descr, M_DEVBUF);
1989 		return (NULL);
1990 	}
1991 	descr->max_size = MAX_BUF_SIZE;
1992 	return (descr);
1993 }
1994 
1995 static void
1996 freedescr(struct targ_cmd_desc *descr)
1997 {
1998 	free(descr->backing_store, M_DEVBUF);
1999 	free(descr, M_DEVBUF);
2000 }
2001 
2002 static void
2003 fill_sense(struct targ_softc *softc, u_int initiator_id, u_int error_code,
2004 	   u_int sense_key, u_int asc, u_int ascq)
2005 {
2006 	struct initiator_state *istate;
2007 	struct scsi_sense_data *sense;
2008 
2009 	istate = &softc->istate[initiator_id];
2010 	sense = &istate->sense_data;
2011 	bzero(sense, sizeof(*sense));
2012 	sense->error_code = error_code;
2013 	sense->flags = sense_key;
2014 	sense->add_sense_code = asc;
2015 	sense->add_sense_code_qual = ascq;
2016 
2017 	sense->extra_len = offsetof(struct scsi_sense_data, fru)
2018 			 - offsetof(struct scsi_sense_data, extra_len);
2019 }
2020 
2021 static void
2022 copy_sense(struct targ_softc *softc, struct ccb_scsiio *csio)
2023 {
2024 	struct initiator_state *istate;
2025 	struct scsi_sense_data *sense;
2026 	size_t copylen;
2027 
2028 	istate = &softc->istate[csio->init_id];
2029 	sense = &istate->sense_data;
2030 	copylen = sizeof(*sense);
2031 	if (copylen > csio->sense_len)
2032 		copylen = csio->sense_len;
2033 	bcopy(&csio->sense_data, sense, copylen);
2034 }
2035 
2036 static void
2037 set_unit_attention_cond(struct cam_periph *periph,
2038 			u_int initiator_id, ua_types ua)
2039 {
2040 	int start;
2041 	int end;
2042 	struct targ_softc *softc;
2043 
2044 	softc = (struct targ_softc *)periph->softc;
2045 	if (initiator_id == CAM_TARGET_WILDCARD) {
2046 		start = 0;
2047 		end = MAX_INITIATORS - 1;
2048 	} else
2049 		start = end = initiator_id;
2050 
2051 	while (start <= end) {
2052 		softc->istate[start].pending_ua = ua;
2053 		start++;
2054 	}
2055 }
2056 
2057 static void
2058 set_contingent_allegiance_cond(struct cam_periph *periph,
2059 			       u_int initiator_id, ca_types ca)
2060 {
2061 	struct targ_softc *softc;
2062 
2063 	softc = (struct targ_softc *)periph->softc;
2064 	softc->istate[initiator_id].pending_ca = ca;
2065 	abort_pending_transactions(periph, initiator_id, TARG_TAG_WILDCARD,
2066 				   /* errno */0, /*to_held_queue*/TRUE);
2067 }
2068 
2069 static void
2070 abort_pending_transactions(struct cam_periph *periph, u_int initiator_id,
2071 			   u_int tag_id, int errno, int to_held_queue)
2072 {
2073 	struct ccb_abort cab;
2074 	struct ccb_queue *atio_queues[3];
2075 	struct targ_softc *softc;
2076 	struct ccb_hdr *ccbh;
2077 	u_int i;
2078 
2079 	softc = (struct targ_softc *)periph->softc;
2080 
2081 	atio_queues[0] = &softc->work_queue;
2082 	atio_queues[1] = &softc->snd_ccb_queue;
2083 	atio_queues[2] = &softc->rcv_ccb_queue;
2084 
2085 	/* First address the ATIOs awaiting resources */
2086 	for (i = 0; i < (sizeof(atio_queues) / sizeof(*atio_queues)); i++) {
2087 		struct ccb_queue *atio_queue;
2088 
2089 		if (to_held_queue) {
2090 			/*
2091 			 * The device queue is frozen anyway, so there
2092 			 * is nothing for us to do.
2093 			 */
2094 			continue;
2095 		}
2096 		atio_queue = atio_queues[i];
2097 		ccbh = TAILQ_FIRST(atio_queue);
2098 		while (ccbh != NULL) {
2099 			struct ccb_accept_tio *atio;
2100 			struct targ_cmd_desc *desc;
2101 
2102 			atio = (struct ccb_accept_tio *)ccbh;
2103 			desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
2104 			ccbh = TAILQ_NEXT(ccbh, periph_links.tqe);
2105 
2106 			/* Only abort the CCBs that match */
2107 			if ((atio->init_id != initiator_id
2108 			  && initiator_id != CAM_TARGET_WILDCARD)
2109 			 || (tag_id != TARG_TAG_WILDCARD
2110 			  && ((atio->ccb_h.flags & CAM_TAG_ACTION_VALID) == 0
2111 			   || atio->tag_id != tag_id)))
2112 				continue;
2113 
2114 			TAILQ_REMOVE(atio_queue, &atio->ccb_h,
2115 				     periph_links.tqe);
2116 
2117 			CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
2118 				  ("Aborting ATIO\n"));
2119 			if (desc->bp != NULL) {
2120 				desc->bp->b_flags |= B_ERROR;
2121 				if (softc->state != TARG_STATE_TEARDOWN)
2122 					desc->bp->b_error = errno;
2123 				else
2124 					desc->bp->b_error = ENXIO;
2125 				biodone(desc->bp);
2126 				desc->bp = NULL;
2127 			}
2128 			if (softc->state == TARG_STATE_TEARDOWN) {
2129 				freedescr(desc);
2130 				free(atio, M_DEVBUF);
2131 			} else {
2132 				/* Return the ATIO back to the controller */
2133 				xpt_action((union ccb *)atio);
2134 			}
2135 		}
2136 	}
2137 
2138 	ccbh = TAILQ_FIRST(&softc->pending_queue);
2139 	while (ccbh != NULL) {
2140 		struct ccb_scsiio *csio;
2141 
2142 		csio = (struct ccb_scsiio *)ccbh;
2143 		ccbh = TAILQ_NEXT(ccbh, periph_links.tqe);
2144 
2145 		/* Only abort the CCBs that match */
2146 		if ((csio->init_id != initiator_id
2147 		  && initiator_id != CAM_TARGET_WILDCARD)
2148 		 || (tag_id != TARG_TAG_WILDCARD
2149 		  && ((csio->ccb_h.flags & CAM_TAG_ACTION_VALID) == 0
2150 		   || csio->tag_id != tag_id)))
2151 			continue;
2152 
2153 		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
2154 			  ("Aborting CTIO\n"));
2155 
2156 		TAILQ_REMOVE(&softc->work_queue, &csio->ccb_h,
2157 			     periph_links.tqe);
2158 
2159 		if (to_held_queue != 0)
2160 			csio->ccb_h.ccb_flags |= TARG_CCB_ABORT_TO_HELDQ;
2161 		xpt_setup_ccb(&cab.ccb_h, csio->ccb_h.path, /*priority*/1);
2162 		cab.abort_ccb = (union ccb *)csio;
2163 		xpt_action((union ccb *)&cab);
2164 		if (cab.ccb_h.status != CAM_REQ_CMP) {
2165 			xpt_print_path(cab.ccb_h.path);
2166 			printf("Unable to abort CCB.  Status %x\n",
2167 			       cab.ccb_h.status);
2168 		}
2169 	}
2170 }
2171