xref: /freebsd/sys/cam/ctl/ctl.c (revision ddd5b8e9b4d8957fce018c520657cdfa4ecffad3)
1 /*-
2  * Copyright (c) 2003-2009 Silicon Graphics International Corp.
3  * Copyright (c) 2012 The FreeBSD Foundation
4  * All rights reserved.
5  *
6  * Portions of this software were developed by Edward Tomasz Napierala
7  * under sponsorship from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions, and the following disclaimer,
14  *    without modification.
15  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
16  *    substantially similar to the "NO WARRANTY" disclaimer below
17  *    ("Disclaimer") and any redistribution must be conditioned upon
18  *    including a substantially similar Disclaimer requirement for further
19  *    binary redistribution.
20  *
21  * NO WARRANTY
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
25  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
30  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
31  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGES.
33  *
34  * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl.c#8 $
35  */
36 /*
37  * CAM Target Layer, a SCSI device emulation subsystem.
38  *
39  * Author: Ken Merry <ken@FreeBSD.org>
40  */
41 
42 #define _CTL_C
43 
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/types.h>
51 #include <sys/kthread.h>
52 #include <sys/bio.h>
53 #include <sys/fcntl.h>
54 #include <sys/lock.h>
55 #include <sys/module.h>
56 #include <sys/mutex.h>
57 #include <sys/condvar.h>
58 #include <sys/malloc.h>
59 #include <sys/conf.h>
60 #include <sys/ioccom.h>
61 #include <sys/queue.h>
62 #include <sys/sbuf.h>
63 #include <sys/endian.h>
64 #include <sys/sysctl.h>
65 
66 #include <cam/cam.h>
67 #include <cam/scsi/scsi_all.h>
68 #include <cam/scsi/scsi_da.h>
69 #include <cam/ctl/ctl_io.h>
70 #include <cam/ctl/ctl.h>
71 #include <cam/ctl/ctl_frontend.h>
72 #include <cam/ctl/ctl_frontend_internal.h>
73 #include <cam/ctl/ctl_util.h>
74 #include <cam/ctl/ctl_backend.h>
75 #include <cam/ctl/ctl_ioctl.h>
76 #include <cam/ctl/ctl_ha.h>
77 #include <cam/ctl/ctl_private.h>
78 #include <cam/ctl/ctl_debug.h>
79 #include <cam/ctl/ctl_scsi_all.h>
80 #include <cam/ctl/ctl_error.h>
81 
82 #include "opt_ctl.h"
83 
84 struct ctl_softc *control_softc = NULL;
85 
86 /*
87  * The default is to run with CTL_DONE_THREAD turned on.  Completed
88  * transactions are queued for processing by the CTL work thread.  When
89  * CTL_DONE_THREAD is not defined, completed transactions are processed in
90  * the caller's context.
91  */
92 #define CTL_DONE_THREAD
93 
94 /*
95  * Use the serial number and device ID provided by the backend, rather than
96  * making up our own.
97  */
98 #define CTL_USE_BACKEND_SN
99 
100 /*
101  * Size and alignment macros needed for Copan-specific HA hardware.  These
102  * can go away when the HA code is re-written, and uses busdma for any
103  * hardware.
104  */
105 #define	CTL_ALIGN_8B(target, source, type)				\
106 	if (((uint32_t)source & 0x7) != 0)				\
107 		target = (type)(source + (0x8 - ((uint32_t)source & 0x7)));\
108 	else								\
109 		target = (type)source;
110 
111 #define	CTL_SIZE_8B(target, size)					\
112 	if ((size & 0x7) != 0)						\
113 		target = size + (0x8 - (size & 0x7));			\
114 	else								\
115 		target = size;
116 
117 #define CTL_ALIGN_8B_MARGIN	16
118 
119 /*
120  * Template mode pages.
121  */
122 
123 /*
124  * Note that these are default values only.  The actual values will be
125  * filled in when the user does a mode sense.
126  */
127 static struct copan_power_subpage power_page_default = {
128 	/*page_code*/ PWR_PAGE_CODE | SMPH_SPF,
129 	/*subpage*/ PWR_SUBPAGE_CODE,
130 	/*page_length*/ {(sizeof(struct copan_power_subpage) - 4) & 0xff00,
131 			 (sizeof(struct copan_power_subpage) - 4) & 0x00ff},
132 	/*page_version*/ PWR_VERSION,
133 	/* total_luns */ 26,
134 	/* max_active_luns*/ PWR_DFLT_MAX_LUNS,
135 	/*reserved*/ {0, 0, 0, 0, 0, 0, 0, 0, 0,
136 		      0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
137 		      0, 0, 0, 0, 0, 0}
138 };
139 
140 static struct copan_power_subpage power_page_changeable = {
141 	/*page_code*/ PWR_PAGE_CODE | SMPH_SPF,
142 	/*subpage*/ PWR_SUBPAGE_CODE,
143 	/*page_length*/ {(sizeof(struct copan_power_subpage) - 4) & 0xff00,
144 			 (sizeof(struct copan_power_subpage) - 4) & 0x00ff},
145 	/*page_version*/ 0,
146 	/* total_luns */ 0,
147 	/* max_active_luns*/ 0,
148 	/*reserved*/ {0, 0, 0, 0, 0, 0, 0, 0, 0,
149 		      0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
150 		      0, 0, 0, 0, 0, 0}
151 };
152 
153 static struct copan_aps_subpage aps_page_default = {
154 	APS_PAGE_CODE | SMPH_SPF, //page_code
155 	APS_SUBPAGE_CODE, //subpage
156 	{(sizeof(struct copan_aps_subpage) - 4) & 0xff00,
157 	 (sizeof(struct copan_aps_subpage) - 4) & 0x00ff}, //page_length
158 	APS_VERSION, //page_version
159 	0, //lock_active
160 	{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
161 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
162 	0, 0, 0, 0, 0} //reserved
163 };
164 
165 static struct copan_aps_subpage aps_page_changeable = {
166 	APS_PAGE_CODE | SMPH_SPF, //page_code
167 	APS_SUBPAGE_CODE, //subpage
168 	{(sizeof(struct copan_aps_subpage) - 4) & 0xff00,
169 	 (sizeof(struct copan_aps_subpage) - 4) & 0x00ff}, //page_length
170 	0, //page_version
171 	0, //lock_active
172 	{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
173 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
174 	0, 0, 0, 0, 0} //reserved
175 };
176 
177 static struct copan_debugconf_subpage debugconf_page_default = {
178 	DBGCNF_PAGE_CODE | SMPH_SPF,	/* page_code */
179 	DBGCNF_SUBPAGE_CODE,		/* subpage */
180 	{(sizeof(struct copan_debugconf_subpage) - 4) >> 8,
181 	 (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */
182 	DBGCNF_VERSION,			/* page_version */
183 	{CTL_TIME_IO_DEFAULT_SECS>>8,
184 	 CTL_TIME_IO_DEFAULT_SECS>>0},	/* ctl_time_io_secs */
185 };
186 
187 static struct copan_debugconf_subpage debugconf_page_changeable = {
188 	DBGCNF_PAGE_CODE | SMPH_SPF,	/* page_code */
189 	DBGCNF_SUBPAGE_CODE,		/* subpage */
190 	{(sizeof(struct copan_debugconf_subpage) - 4) >> 8,
191 	 (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */
192 	0,				/* page_version */
193 	{0xff,0xff},			/* ctl_time_io_secs */
194 };
195 
196 static struct scsi_format_page format_page_default = {
197 	/*page_code*/SMS_FORMAT_DEVICE_PAGE,
198 	/*page_length*/sizeof(struct scsi_format_page) - 2,
199 	/*tracks_per_zone*/ {0, 0},
200 	/*alt_sectors_per_zone*/ {0, 0},
201 	/*alt_tracks_per_zone*/ {0, 0},
202 	/*alt_tracks_per_lun*/ {0, 0},
203 	/*sectors_per_track*/ {(CTL_DEFAULT_SECTORS_PER_TRACK >> 8) & 0xff,
204 			        CTL_DEFAULT_SECTORS_PER_TRACK & 0xff},
205 	/*bytes_per_sector*/ {0, 0},
206 	/*interleave*/ {0, 0},
207 	/*track_skew*/ {0, 0},
208 	/*cylinder_skew*/ {0, 0},
209 	/*flags*/ SFP_HSEC,
210 	/*reserved*/ {0, 0, 0}
211 };
212 
213 static struct scsi_format_page format_page_changeable = {
214 	/*page_code*/SMS_FORMAT_DEVICE_PAGE,
215 	/*page_length*/sizeof(struct scsi_format_page) - 2,
216 	/*tracks_per_zone*/ {0, 0},
217 	/*alt_sectors_per_zone*/ {0, 0},
218 	/*alt_tracks_per_zone*/ {0, 0},
219 	/*alt_tracks_per_lun*/ {0, 0},
220 	/*sectors_per_track*/ {0, 0},
221 	/*bytes_per_sector*/ {0, 0},
222 	/*interleave*/ {0, 0},
223 	/*track_skew*/ {0, 0},
224 	/*cylinder_skew*/ {0, 0},
225 	/*flags*/ 0,
226 	/*reserved*/ {0, 0, 0}
227 };
228 
229 static struct scsi_rigid_disk_page rigid_disk_page_default = {
230 	/*page_code*/SMS_RIGID_DISK_PAGE,
231 	/*page_length*/sizeof(struct scsi_rigid_disk_page) - 2,
232 	/*cylinders*/ {0, 0, 0},
233 	/*heads*/ CTL_DEFAULT_HEADS,
234 	/*start_write_precomp*/ {0, 0, 0},
235 	/*start_reduced_current*/ {0, 0, 0},
236 	/*step_rate*/ {0, 0},
237 	/*landing_zone_cylinder*/ {0, 0, 0},
238 	/*rpl*/ SRDP_RPL_DISABLED,
239 	/*rotational_offset*/ 0,
240 	/*reserved1*/ 0,
241 	/*rotation_rate*/ {(CTL_DEFAULT_ROTATION_RATE >> 8) & 0xff,
242 			   CTL_DEFAULT_ROTATION_RATE & 0xff},
243 	/*reserved2*/ {0, 0}
244 };
245 
246 static struct scsi_rigid_disk_page rigid_disk_page_changeable = {
247 	/*page_code*/SMS_RIGID_DISK_PAGE,
248 	/*page_length*/sizeof(struct scsi_rigid_disk_page) - 2,
249 	/*cylinders*/ {0, 0, 0},
250 	/*heads*/ 0,
251 	/*start_write_precomp*/ {0, 0, 0},
252 	/*start_reduced_current*/ {0, 0, 0},
253 	/*step_rate*/ {0, 0},
254 	/*landing_zone_cylinder*/ {0, 0, 0},
255 	/*rpl*/ 0,
256 	/*rotational_offset*/ 0,
257 	/*reserved1*/ 0,
258 	/*rotation_rate*/ {0, 0},
259 	/*reserved2*/ {0, 0}
260 };
261 
262 static struct scsi_caching_page caching_page_default = {
263 	/*page_code*/SMS_CACHING_PAGE,
264 	/*page_length*/sizeof(struct scsi_caching_page) - 2,
265 	/*flags1*/ SCP_DISC | SCP_WCE,
266 	/*ret_priority*/ 0,
267 	/*disable_pf_transfer_len*/ {0xff, 0xff},
268 	/*min_prefetch*/ {0, 0},
269 	/*max_prefetch*/ {0xff, 0xff},
270 	/*max_pf_ceiling*/ {0xff, 0xff},
271 	/*flags2*/ 0,
272 	/*cache_segments*/ 0,
273 	/*cache_seg_size*/ {0, 0},
274 	/*reserved*/ 0,
275 	/*non_cache_seg_size*/ {0, 0, 0}
276 };
277 
278 static struct scsi_caching_page caching_page_changeable = {
279 	/*page_code*/SMS_CACHING_PAGE,
280 	/*page_length*/sizeof(struct scsi_caching_page) - 2,
281 	/*flags1*/ 0,
282 	/*ret_priority*/ 0,
283 	/*disable_pf_transfer_len*/ {0, 0},
284 	/*min_prefetch*/ {0, 0},
285 	/*max_prefetch*/ {0, 0},
286 	/*max_pf_ceiling*/ {0, 0},
287 	/*flags2*/ 0,
288 	/*cache_segments*/ 0,
289 	/*cache_seg_size*/ {0, 0},
290 	/*reserved*/ 0,
291 	/*non_cache_seg_size*/ {0, 0, 0}
292 };
293 
294 static struct scsi_control_page control_page_default = {
295 	/*page_code*/SMS_CONTROL_MODE_PAGE,
296 	/*page_length*/sizeof(struct scsi_control_page) - 2,
297 	/*rlec*/0,
298 	/*queue_flags*/0,
299 	/*eca_and_aen*/0,
300 	/*reserved*/0,
301 	/*aen_holdoff_period*/{0, 0}
302 };
303 
304 static struct scsi_control_page control_page_changeable = {
305 	/*page_code*/SMS_CONTROL_MODE_PAGE,
306 	/*page_length*/sizeof(struct scsi_control_page) - 2,
307 	/*rlec*/SCP_DSENSE,
308 	/*queue_flags*/0,
309 	/*eca_and_aen*/0,
310 	/*reserved*/0,
311 	/*aen_holdoff_period*/{0, 0}
312 };
313 
314 
315 /*
316  * XXX KDM move these into the softc.
317  */
318 static int rcv_sync_msg;
319 static int persis_offset;
320 static uint8_t ctl_pause_rtr;
321 static int     ctl_is_single = 1;
322 static int     index_to_aps_page;
323 #ifdef CTL_DISABLE
324 int	   ctl_disable = 1;
325 #else
326 int	   ctl_disable = 0;
327 #endif
328 
329 SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer");
330 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, disable, CTLFLAG_RDTUN, &ctl_disable, 0,
331 	   "Disable CTL");
332 TUNABLE_INT("kern.cam.ctl.disable", &ctl_disable);
333 
334 /*
335  * Serial number (0x80), device id (0x83), and supported pages (0x00)
336  */
337 #define SCSI_EVPD_NUM_SUPPORTED_PAGES	3
338 
339 static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event,
340 				  int param);
341 static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest);
342 static int ctl_init(void);
343 void ctl_shutdown(void);
344 static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td);
345 static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td);
346 static void ctl_ioctl_online(void *arg);
347 static void ctl_ioctl_offline(void *arg);
348 static int ctl_ioctl_targ_enable(void *arg, struct ctl_id targ_id);
349 static int ctl_ioctl_targ_disable(void *arg, struct ctl_id targ_id);
350 static int ctl_ioctl_lun_enable(void *arg, struct ctl_id targ_id, int lun_id);
351 static int ctl_ioctl_lun_disable(void *arg, struct ctl_id targ_id, int lun_id);
352 static int ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio);
353 static int ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio, int have_lock);
354 static int ctl_ioctl_submit_wait(union ctl_io *io);
355 static void ctl_ioctl_datamove(union ctl_io *io);
356 static void ctl_ioctl_done(union ctl_io *io);
357 static void ctl_ioctl_hard_startstop_callback(void *arg,
358 					      struct cfi_metatask *metatask);
359 static void ctl_ioctl_bbrread_callback(void *arg,struct cfi_metatask *metatask);
360 static int ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num,
361 			      struct ctl_ooa *ooa_hdr,
362 			      struct ctl_ooa_entry *kern_entries);
363 static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
364 		     struct thread *td);
365 uint32_t ctl_get_resindex(struct ctl_nexus *nexus);
366 uint32_t ctl_port_idx(int port_num);
367 #ifdef unused
368 static union ctl_io *ctl_malloc_io(ctl_io_type io_type, uint32_t targ_port,
369 				   uint32_t targ_target, uint32_t targ_lun,
370 				   int can_wait);
371 static void ctl_kfree_io(union ctl_io *io);
372 #endif /* unused */
373 static void ctl_free_io_internal(union ctl_io *io, int have_lock);
374 static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun,
375 			 struct ctl_be_lun *be_lun, struct ctl_id target_id);
376 static int ctl_free_lun(struct ctl_lun *lun);
377 static void ctl_create_lun(struct ctl_be_lun *be_lun);
378 /**
379 static void ctl_failover_change_pages(struct ctl_softc *softc,
380 				      struct ctl_scsiio *ctsio, int master);
381 **/
382 
383 static int ctl_do_mode_select(union ctl_io *io);
384 static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun,
385 			   uint64_t res_key, uint64_t sa_res_key,
386 			   uint8_t type, uint32_t residx,
387 			   struct ctl_scsiio *ctsio,
388 			   struct scsi_per_res_out *cdb,
389 			   struct scsi_per_res_out_parms* param);
390 static void ctl_pro_preempt_other(struct ctl_lun *lun,
391 				  union ctl_ha_msg *msg);
392 static void ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg);
393 static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len);
394 static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len);
395 static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len);
396 static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio);
397 static int ctl_inquiry_std(struct ctl_scsiio *ctsio);
398 static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint32_t *len);
399 static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2);
400 static ctl_action ctl_check_for_blockage(union ctl_io *pending_io,
401 					 union ctl_io *ooa_io);
402 static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io,
403 				union ctl_io *starting_io);
404 static int ctl_check_blocked(struct ctl_lun *lun);
405 static int ctl_scsiio_lun_check(struct ctl_softc *ctl_softc,
406 				struct ctl_lun *lun,
407 				struct ctl_cmd_entry *entry,
408 				struct ctl_scsiio *ctsio);
409 //static int ctl_check_rtr(union ctl_io *pending_io, struct ctl_softc *softc);
410 static void ctl_failover(void);
411 static int ctl_scsiio_precheck(struct ctl_softc *ctl_softc,
412 			       struct ctl_scsiio *ctsio);
413 static int ctl_scsiio(struct ctl_scsiio *ctsio);
414 
415 static int ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io);
416 static int ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io,
417 			    ctl_ua_type ua_type);
418 static int ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io,
419 			 ctl_ua_type ua_type);
420 static int ctl_abort_task(union ctl_io *io);
421 static void ctl_run_task_queue(struct ctl_softc *ctl_softc);
422 #ifdef CTL_IO_DELAY
423 static void ctl_datamove_timer_wakeup(void *arg);
424 static void ctl_done_timer_wakeup(void *arg);
425 #endif /* CTL_IO_DELAY */
426 
427 static void ctl_send_datamove_done(union ctl_io *io, int have_lock);
428 static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq);
429 static int ctl_datamove_remote_dm_write_cb(union ctl_io *io);
430 static void ctl_datamove_remote_write(union ctl_io *io);
431 static int ctl_datamove_remote_dm_read_cb(union ctl_io *io);
432 static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq);
433 static int ctl_datamove_remote_sgl_setup(union ctl_io *io);
434 static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command,
435 				    ctl_ha_dt_cb callback);
436 static void ctl_datamove_remote_read(union ctl_io *io);
437 static void ctl_datamove_remote(union ctl_io *io);
438 static int ctl_process_done(union ctl_io *io, int have_lock);
439 static void ctl_work_thread(void *arg);
440 
441 /*
442  * Load the serialization table.  This isn't very pretty, but is probably
443  * the easiest way to do it.
444  */
445 #include "ctl_ser_table.c"
446 
447 /*
448  * We only need to define open, close and ioctl routines for this driver.
449  */
450 static struct cdevsw ctl_cdevsw = {
451 	.d_version =	D_VERSION,
452 	.d_flags =	0,
453 	.d_open =	ctl_open,
454 	.d_close =	ctl_close,
455 	.d_ioctl =	ctl_ioctl,
456 	.d_name =	"ctl",
457 };
458 
459 
460 MALLOC_DEFINE(M_CTL, "ctlmem", "Memory used for CTL");
461 
462 static int ctl_module_event_handler(module_t, int /*modeventtype_t*/, void *);
463 
464 static moduledata_t ctl_moduledata = {
465 	"ctl",
466 	ctl_module_event_handler,
467 	NULL
468 };
469 
470 DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD);
471 MODULE_VERSION(ctl, 1);
472 
473 static void
474 ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc,
475 			    union ctl_ha_msg *msg_info)
476 {
477 	struct ctl_scsiio *ctsio;
478 
479 	if (msg_info->hdr.original_sc == NULL) {
480 		printf("%s: original_sc == NULL!\n", __func__);
481 		/* XXX KDM now what? */
482 		return;
483 	}
484 
485 	ctsio = &msg_info->hdr.original_sc->scsiio;
486 	ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE;
487 	ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO;
488 	ctsio->io_hdr.status = msg_info->hdr.status;
489 	ctsio->scsi_status = msg_info->scsi.scsi_status;
490 	ctsio->sense_len = msg_info->scsi.sense_len;
491 	ctsio->sense_residual = msg_info->scsi.sense_residual;
492 	ctsio->residual = msg_info->scsi.residual;
493 	memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data,
494 	       sizeof(ctsio->sense_data));
495 	memcpy(&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes,
496 	       &msg_info->scsi.lbalen, sizeof(msg_info->scsi.lbalen));
497 	STAILQ_INSERT_TAIL(&ctl_softc->isc_queue, &ctsio->io_hdr, links);
498 	ctl_wakeup_thread();
499 }
500 
501 static void
502 ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc,
503 				union ctl_ha_msg *msg_info)
504 {
505 	struct ctl_scsiio *ctsio;
506 
507 	if (msg_info->hdr.serializing_sc == NULL) {
508 		printf("%s: serializing_sc == NULL!\n", __func__);
509 		/* XXX KDM now what? */
510 		return;
511 	}
512 
513 	ctsio = &msg_info->hdr.serializing_sc->scsiio;
514 #if 0
515 	/*
516 	 * Attempt to catch the situation where an I/O has
517 	 * been freed, and we're using it again.
518 	 */
519 	if (ctsio->io_hdr.io_type == 0xff) {
520 		union ctl_io *tmp_io;
521 		tmp_io = (union ctl_io *)ctsio;
522 		printf("%s: %p use after free!\n", __func__,
523 		       ctsio);
524 		printf("%s: type %d msg %d cdb %x iptl: "
525 		       "%d:%d:%d:%d tag 0x%04x "
526 		       "flag %#x status %x\n",
527 			__func__,
528 			tmp_io->io_hdr.io_type,
529 			tmp_io->io_hdr.msg_type,
530 			tmp_io->scsiio.cdb[0],
531 			tmp_io->io_hdr.nexus.initid.id,
532 			tmp_io->io_hdr.nexus.targ_port,
533 			tmp_io->io_hdr.nexus.targ_target.id,
534 			tmp_io->io_hdr.nexus.targ_lun,
535 			(tmp_io->io_hdr.io_type ==
536 			CTL_IO_TASK) ?
537 			tmp_io->taskio.tag_num :
538 			tmp_io->scsiio.tag_num,
539 		        tmp_io->io_hdr.flags,
540 			tmp_io->io_hdr.status);
541 	}
542 #endif
543 	ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO;
544 	STAILQ_INSERT_TAIL(&ctl_softc->isc_queue, &ctsio->io_hdr, links);
545 	ctl_wakeup_thread();
546 }
547 
548 /*
549  * ISC (Inter Shelf Communication) event handler.  Events from the HA
550  * subsystem come in here.
551  */
552 static void
553 ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
554 {
555 	struct ctl_softc *ctl_softc;
556 	union ctl_io *io;
557 	struct ctl_prio *presio;
558 	ctl_ha_status isc_status;
559 
560 	ctl_softc = control_softc;
561 	io = NULL;
562 
563 
564 #if 0
565 	printf("CTL: Isc Msg event %d\n", event);
566 #endif
567 	if (event == CTL_HA_EVT_MSG_RECV) {
568 		union ctl_ha_msg msg_info;
569 
570 		isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, &msg_info,
571 					     sizeof(msg_info), /*wait*/ 0);
572 #if 0
573 		printf("CTL: msg_type %d\n", msg_info.msg_type);
574 #endif
575 		if (isc_status != 0) {
576 			printf("Error receiving message, status = %d\n",
577 			       isc_status);
578 			return;
579 		}
580 		mtx_lock(&ctl_softc->ctl_lock);
581 
582 		switch (msg_info.hdr.msg_type) {
583 		case CTL_MSG_SERIALIZE:
584 #if 0
585 			printf("Serialize\n");
586 #endif
587 			io = ctl_alloc_io((void *)ctl_softc->othersc_pool);
588 			if (io == NULL) {
589 				printf("ctl_isc_event_handler: can't allocate "
590 				       "ctl_io!\n");
591 				/* Bad Juju */
592 				/* Need to set busy and send msg back */
593 				mtx_unlock(&ctl_softc->ctl_lock);
594 				msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
595 				msg_info.hdr.status = CTL_SCSI_ERROR;
596 				msg_info.scsi.scsi_status = SCSI_STATUS_BUSY;
597 				msg_info.scsi.sense_len = 0;
598 			        if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
599 				    sizeof(msg_info), 0) > CTL_HA_STATUS_SUCCESS){
600 				}
601 				goto bailout;
602 			}
603 			ctl_zero_io(io);
604 			// populate ctsio from msg_info
605 			io->io_hdr.io_type = CTL_IO_SCSI;
606 			io->io_hdr.msg_type = CTL_MSG_SERIALIZE;
607 			io->io_hdr.original_sc = msg_info.hdr.original_sc;
608 #if 0
609 			printf("pOrig %x\n", (int)msg_info.original_sc);
610 #endif
611 			io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC |
612 					    CTL_FLAG_IO_ACTIVE;
613 			/*
614 			 * If we're in serialization-only mode, we don't
615 			 * want to go through full done processing.  Thus
616 			 * the COPY flag.
617 			 *
618 			 * XXX KDM add another flag that is more specific.
619 			 */
620 			if (ctl_softc->ha_mode == CTL_HA_MODE_SER_ONLY)
621 				io->io_hdr.flags |= CTL_FLAG_INT_COPY;
622 			io->io_hdr.nexus = msg_info.hdr.nexus;
623 #if 0
624 			printf("targ %d, port %d, iid %d, lun %d\n",
625 			       io->io_hdr.nexus.targ_target.id,
626 			       io->io_hdr.nexus.targ_port,
627 			       io->io_hdr.nexus.initid.id,
628 			       io->io_hdr.nexus.targ_lun);
629 #endif
630 			io->scsiio.tag_num = msg_info.scsi.tag_num;
631 			io->scsiio.tag_type = msg_info.scsi.tag_type;
632 			memcpy(io->scsiio.cdb, msg_info.scsi.cdb,
633 			       CTL_MAX_CDBLEN);
634 			if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) {
635 				struct ctl_cmd_entry *entry;
636 				uint8_t opcode;
637 
638 				opcode = io->scsiio.cdb[0];
639 				entry = &ctl_cmd_table[opcode];
640 				io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK;
641 				io->io_hdr.flags |=
642 					entry->flags & CTL_FLAG_DATA_MASK;
643 			}
644 			STAILQ_INSERT_TAIL(&ctl_softc->isc_queue,
645 					   &io->io_hdr, links);
646 			ctl_wakeup_thread();
647 			break;
648 
649 		/* Performed on the Originating SC, XFER mode only */
650 		case CTL_MSG_DATAMOVE: {
651 			struct ctl_sg_entry *sgl;
652 			int i, j;
653 
654 			io = msg_info.hdr.original_sc;
655 			if (io == NULL) {
656 				printf("%s: original_sc == NULL!\n", __func__);
657 				/* XXX KDM do something here */
658 				break;
659 			}
660 			io->io_hdr.msg_type = CTL_MSG_DATAMOVE;
661 			io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE;
662 			/*
663 			 * Keep track of this, we need to send it back over
664 			 * when the datamove is complete.
665 			 */
666 			io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc;
667 
668 			if (msg_info.dt.sg_sequence == 0) {
669 				/*
670 				 * XXX KDM we use the preallocated S/G list
671 				 * here, but we'll need to change this to
672 				 * dynamic allocation if we need larger S/G
673 				 * lists.
674 				 */
675 				if (msg_info.dt.kern_sg_entries >
676 				    sizeof(io->io_hdr.remote_sglist) /
677 				    sizeof(io->io_hdr.remote_sglist[0])) {
678 					printf("%s: number of S/G entries "
679 					    "needed %u > allocated num %zd\n",
680 					    __func__,
681 					    msg_info.dt.kern_sg_entries,
682 					    sizeof(io->io_hdr.remote_sglist)/
683 					    sizeof(io->io_hdr.remote_sglist[0]));
684 
685 					/*
686 					 * XXX KDM send a message back to
687 					 * the other side to shut down the
688 					 * DMA.  The error will come back
689 					 * through via the normal channel.
690 					 */
691 					break;
692 				}
693 				sgl = io->io_hdr.remote_sglist;
694 				memset(sgl, 0,
695 				       sizeof(io->io_hdr.remote_sglist));
696 
697 				io->scsiio.kern_data_ptr = (uint8_t *)sgl;
698 
699 				io->scsiio.kern_sg_entries =
700 					msg_info.dt.kern_sg_entries;
701 				io->scsiio.rem_sg_entries =
702 					msg_info.dt.kern_sg_entries;
703 				io->scsiio.kern_data_len =
704 					msg_info.dt.kern_data_len;
705 				io->scsiio.kern_total_len =
706 					msg_info.dt.kern_total_len;
707 				io->scsiio.kern_data_resid =
708 					msg_info.dt.kern_data_resid;
709 				io->scsiio.kern_rel_offset =
710 					msg_info.dt.kern_rel_offset;
711 				/*
712 				 * Clear out per-DMA flags.
713 				 */
714 				io->io_hdr.flags &= ~CTL_FLAG_RDMA_MASK;
715 				/*
716 				 * Add per-DMA flags that are set for this
717 				 * particular DMA request.
718 				 */
719 				io->io_hdr.flags |= msg_info.dt.flags &
720 						    CTL_FLAG_RDMA_MASK;
721 			} else
722 				sgl = (struct ctl_sg_entry *)
723 					io->scsiio.kern_data_ptr;
724 
725 			for (i = msg_info.dt.sent_sg_entries, j = 0;
726 			     i < (msg_info.dt.sent_sg_entries +
727 			     msg_info.dt.cur_sg_entries); i++, j++) {
728 				sgl[i].addr = msg_info.dt.sg_list[j].addr;
729 				sgl[i].len = msg_info.dt.sg_list[j].len;
730 
731 #if 0
732 				printf("%s: L: %p,%d -> %p,%d j=%d, i=%d\n",
733 				       __func__,
734 				       msg_info.dt.sg_list[j].addr,
735 				       msg_info.dt.sg_list[j].len,
736 				       sgl[i].addr, sgl[i].len, j, i);
737 #endif
738 			}
739 #if 0
740 			memcpy(&sgl[msg_info.dt.sent_sg_entries],
741 			       msg_info.dt.sg_list,
742 			       sizeof(*sgl) * msg_info.dt.cur_sg_entries);
743 #endif
744 
745 			/*
746 			 * If this is the last piece of the I/O, we've got
747 			 * the full S/G list.  Queue processing in the thread.
748 			 * Otherwise wait for the next piece.
749 			 */
750 			if (msg_info.dt.sg_last != 0) {
751 				STAILQ_INSERT_TAIL(&ctl_softc->isc_queue,
752 						   &io->io_hdr, links);
753 				ctl_wakeup_thread();
754 			}
755 			break;
756 		}
757 		/* Performed on the Serializing (primary) SC, XFER mode only */
758 		case CTL_MSG_DATAMOVE_DONE: {
759 			if (msg_info.hdr.serializing_sc == NULL) {
760 				printf("%s: serializing_sc == NULL!\n",
761 				       __func__);
762 				/* XXX KDM now what? */
763 				break;
764 			}
765 			/*
766 			 * We grab the sense information here in case
767 			 * there was a failure, so we can return status
768 			 * back to the initiator.
769 			 */
770 			io = msg_info.hdr.serializing_sc;
771 			io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE;
772 			io->io_hdr.status = msg_info.hdr.status;
773 			io->scsiio.scsi_status = msg_info.scsi.scsi_status;
774 			io->scsiio.sense_len = msg_info.scsi.sense_len;
775 			io->scsiio.sense_residual =msg_info.scsi.sense_residual;
776 			io->io_hdr.port_status = msg_info.scsi.fetd_status;
777 			io->scsiio.residual = msg_info.scsi.residual;
778 			memcpy(&io->scsiio.sense_data,&msg_info.scsi.sense_data,
779 			       sizeof(io->scsiio.sense_data));
780 
781 			STAILQ_INSERT_TAIL(&ctl_softc->isc_queue,
782 					   &io->io_hdr, links);
783 			ctl_wakeup_thread();
784 			break;
785 		}
786 
787 		/* Preformed on Originating SC, SER_ONLY mode */
788 		case CTL_MSG_R2R:
789 			io = msg_info.hdr.original_sc;
790 			if (io == NULL) {
791 				printf("%s: Major Bummer\n", __func__);
792 				mtx_unlock(&ctl_softc->ctl_lock);
793 				return;
794 			} else {
795 #if 0
796 				printf("pOrig %x\n",(int) ctsio);
797 #endif
798 			}
799 			io->io_hdr.msg_type = CTL_MSG_R2R;
800 			io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc;
801 			STAILQ_INSERT_TAIL(&ctl_softc->isc_queue,
802 					   &io->io_hdr, links);
803 			ctl_wakeup_thread();
804 			break;
805 
806 		/*
807 		 * Performed on Serializing(i.e. primary SC) SC in SER_ONLY
808 		 * mode.
809 		 * Performed on the Originating (i.e. secondary) SC in XFER
810 		 * mode
811 		 */
812 		case CTL_MSG_FINISH_IO:
813 			if (ctl_softc->ha_mode == CTL_HA_MODE_XFER)
814 				ctl_isc_handler_finish_xfer(ctl_softc,
815 							    &msg_info);
816 			else
817 				ctl_isc_handler_finish_ser_only(ctl_softc,
818 								&msg_info);
819 			break;
820 
821 		/* Preformed on Originating SC */
822 		case CTL_MSG_BAD_JUJU:
823 			io = msg_info.hdr.original_sc;
824 			if (io == NULL) {
825 				printf("%s: Bad JUJU!, original_sc is NULL!\n",
826 				       __func__);
827 				break;
828 			}
829 			ctl_copy_sense_data(&msg_info, io);
830 			/*
831 			 * IO should have already been cleaned up on other
832 			 * SC so clear this flag so we won't send a message
833 			 * back to finish the IO there.
834 			 */
835 			io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC;
836 			io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE;
837 
838 			/* io = msg_info.hdr.serializing_sc; */
839 			io->io_hdr.msg_type = CTL_MSG_BAD_JUJU;
840 		        STAILQ_INSERT_TAIL(&ctl_softc->isc_queue,
841 					   &io->io_hdr, links);
842 			ctl_wakeup_thread();
843 			break;
844 
845 		/* Handle resets sent from the other side */
846 		case CTL_MSG_MANAGE_TASKS: {
847 			struct ctl_taskio *taskio;
848 			taskio = (struct ctl_taskio *)ctl_alloc_io(
849 				(void *)ctl_softc->othersc_pool);
850 			if (taskio == NULL) {
851 				printf("ctl_isc_event_handler: can't allocate "
852 				       "ctl_io!\n");
853 				/* Bad Juju */
854 				/* should I just call the proper reset func
855 				   here??? */
856 				mtx_unlock(&ctl_softc->ctl_lock);
857 				goto bailout;
858 			}
859 			ctl_zero_io((union ctl_io *)taskio);
860 			taskio->io_hdr.io_type = CTL_IO_TASK;
861 			taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC;
862 			taskio->io_hdr.nexus = msg_info.hdr.nexus;
863 			taskio->task_action = msg_info.task.task_action;
864 			taskio->tag_num = msg_info.task.tag_num;
865 			taskio->tag_type = msg_info.task.tag_type;
866 #ifdef CTL_TIME_IO
867 			taskio->io_hdr.start_time = time_uptime;
868 			getbintime(&taskio->io_hdr.start_bt);
869 #if 0
870 			cs_prof_gettime(&taskio->io_hdr.start_ticks);
871 #endif
872 #endif /* CTL_TIME_IO */
873 		        STAILQ_INSERT_TAIL(&ctl_softc->task_queue,
874 					   &taskio->io_hdr, links);
875 			ctl_softc->flags |= CTL_FLAG_TASK_PENDING;
876 			ctl_wakeup_thread();
877 			break;
878 		}
879 		/* Persistent Reserve action which needs attention */
880 		case CTL_MSG_PERS_ACTION:
881 			presio = (struct ctl_prio *)ctl_alloc_io(
882 				(void *)ctl_softc->othersc_pool);
883 			if (presio == NULL) {
884 				printf("ctl_isc_event_handler: can't allocate "
885 				       "ctl_io!\n");
886 				/* Bad Juju */
887 				/* Need to set busy and send msg back */
888 				mtx_unlock(&ctl_softc->ctl_lock);
889 				goto bailout;
890 			}
891 			ctl_zero_io((union ctl_io *)presio);
892 			presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION;
893 			presio->pr_msg = msg_info.pr;
894 		        STAILQ_INSERT_TAIL(&ctl_softc->isc_queue,
895 					   &presio->io_hdr, links);
896 			ctl_wakeup_thread();
897 			break;
898 		case CTL_MSG_SYNC_FE:
899 			rcv_sync_msg = 1;
900 			break;
901 		case CTL_MSG_APS_LOCK: {
902 			// It's quicker to execute this then to
903 			// queue it.
904 			struct ctl_lun *lun;
905 			struct ctl_page_index *page_index;
906 			struct copan_aps_subpage *current_sp;
907 
908 			lun = ctl_softc->ctl_luns[msg_info.hdr.nexus.targ_lun];
909 			page_index = &lun->mode_pages.index[index_to_aps_page];
910 			current_sp = (struct copan_aps_subpage *)
911 				     (page_index->page_data +
912 				     (page_index->page_len * CTL_PAGE_CURRENT));
913 
914 			current_sp->lock_active = msg_info.aps.lock_flag;
915 		        break;
916 		}
917 		default:
918 		        printf("How did I get here?\n");
919 		}
920 		mtx_unlock(&ctl_softc->ctl_lock);
921 	} else if (event == CTL_HA_EVT_MSG_SENT) {
922 		if (param != CTL_HA_STATUS_SUCCESS) {
923 			printf("Bad status from ctl_ha_msg_send status %d\n",
924 			       param);
925 		}
926 		return;
927 	} else if (event == CTL_HA_EVT_DISCONNECT) {
928 		printf("CTL: Got a disconnect from Isc\n");
929 		return;
930 	} else {
931 		printf("ctl_isc_event_handler: Unknown event %d\n", event);
932 		return;
933 	}
934 
935 bailout:
936 	return;
937 }
938 
939 static void
940 ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest)
941 {
942 	struct scsi_sense_data *sense;
943 
944 	sense = &dest->scsiio.sense_data;
945 	bcopy(&src->scsi.sense_data, sense, sizeof(*sense));
946 	dest->scsiio.scsi_status = src->scsi.scsi_status;
947 	dest->scsiio.sense_len = src->scsi.sense_len;
948 	dest->io_hdr.status = src->hdr.status;
949 }
950 
951 static int
952 ctl_init(void)
953 {
954 	struct ctl_softc *softc;
955 	struct ctl_io_pool *internal_pool, *emergency_pool, *other_pool;
956 	struct ctl_frontend *fe;
957 	struct ctl_lun *lun;
958         uint8_t sc_id =0;
959 #if 0
960 	int i;
961 #endif
962 	int error, retval;
963 	//int isc_retval;
964 
965 	retval = 0;
966 	ctl_pause_rtr = 0;
967         rcv_sync_msg = 0;
968 
969 	/* If we're disabled, don't initialize. */
970 	if (ctl_disable != 0)
971 		return (0);
972 
973 	control_softc = malloc(sizeof(*control_softc), M_DEVBUF,
974 			       M_WAITOK | M_ZERO);
975 	softc = control_softc;
976 
977 	softc->dev = make_dev(&ctl_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600,
978 			      "cam/ctl");
979 
980 	softc->dev->si_drv1 = softc;
981 
982 	/*
983 	 * By default, return a "bad LUN" peripheral qualifier for unknown
984 	 * LUNs.  The user can override this default using the tunable or
985 	 * sysctl.  See the comment in ctl_inquiry_std() for more details.
986 	 */
987 	softc->inquiry_pq_no_lun = 1;
988 	TUNABLE_INT_FETCH("kern.cam.ctl.inquiry_pq_no_lun",
989 			  &softc->inquiry_pq_no_lun);
990 	sysctl_ctx_init(&softc->sysctl_ctx);
991 	softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx,
992 		SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl",
993 		CTLFLAG_RD, 0, "CAM Target Layer");
994 
995 	if (softc->sysctl_tree == NULL) {
996 		printf("%s: unable to allocate sysctl tree\n", __func__);
997 		destroy_dev(softc->dev);
998 		free(control_softc, M_DEVBUF);
999 		control_softc = NULL;
1000 		return (ENOMEM);
1001 	}
1002 
1003 	SYSCTL_ADD_INT(&softc->sysctl_ctx,
1004 		       SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
1005 		       "inquiry_pq_no_lun", CTLFLAG_RW,
1006 		       &softc->inquiry_pq_no_lun, 0,
1007 		       "Report no lun possible for invalid LUNs");
1008 
1009 	mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF);
1010 	softc->open_count = 0;
1011 
1012 	/*
1013 	 * Default to actually sending a SYNCHRONIZE CACHE command down to
1014 	 * the drive.
1015 	 */
1016 	softc->flags = CTL_FLAG_REAL_SYNC;
1017 
1018 	/*
1019 	 * In Copan's HA scheme, the "master" and "slave" roles are
1020 	 * figured out through the slot the controller is in.  Although it
1021 	 * is an active/active system, someone has to be in charge.
1022  	 */
1023 #ifdef NEEDTOPORT
1024         scmicro_rw(SCMICRO_GET_SHELF_ID, &sc_id);
1025 #endif
1026 
1027         if (sc_id == 0) {
1028 		softc->flags |= CTL_FLAG_MASTER_SHELF;
1029 		persis_offset = 0;
1030 	} else
1031 		persis_offset = CTL_MAX_INITIATORS;
1032 
1033 	/*
1034 	 * XXX KDM need to figure out where we want to get our target ID
1035 	 * and WWID.  Is it different on each port?
1036 	 */
1037 	softc->target.id = 0;
1038 	softc->target.wwid[0] = 0x12345678;
1039 	softc->target.wwid[1] = 0x87654321;
1040 	STAILQ_INIT(&softc->lun_list);
1041 	STAILQ_INIT(&softc->pending_lun_queue);
1042 	STAILQ_INIT(&softc->task_queue);
1043 	STAILQ_INIT(&softc->incoming_queue);
1044 	STAILQ_INIT(&softc->rtr_queue);
1045 	STAILQ_INIT(&softc->done_queue);
1046 	STAILQ_INIT(&softc->isc_queue);
1047 	STAILQ_INIT(&softc->fe_list);
1048 	STAILQ_INIT(&softc->be_list);
1049 	STAILQ_INIT(&softc->io_pools);
1050 
1051 	lun = &softc->lun;
1052 
1053 	/*
1054 	 * We don't bother calling these with ctl_lock held here, because,
1055 	 * in theory, no one else can try to do anything while we're in our
1056 	 * module init routine.
1057 	 */
1058 	if (ctl_pool_create(softc, CTL_POOL_INTERNAL, CTL_POOL_ENTRIES_INTERNAL,
1059 			    &internal_pool)!= 0){
1060 		printf("ctl: can't allocate %d entry internal pool, "
1061 		       "exiting\n", CTL_POOL_ENTRIES_INTERNAL);
1062 		return (ENOMEM);
1063 	}
1064 
1065 	if (ctl_pool_create(softc, CTL_POOL_EMERGENCY,
1066 			    CTL_POOL_ENTRIES_EMERGENCY, &emergency_pool) != 0) {
1067 		printf("ctl: can't allocate %d entry emergency pool, "
1068 		       "exiting\n", CTL_POOL_ENTRIES_EMERGENCY);
1069 		ctl_pool_free(softc, internal_pool);
1070 		return (ENOMEM);
1071 	}
1072 
1073 	if (ctl_pool_create(softc, CTL_POOL_4OTHERSC, CTL_POOL_ENTRIES_OTHER_SC,
1074 	                    &other_pool) != 0)
1075 	{
1076 		printf("ctl: can't allocate %d entry other SC pool, "
1077 		       "exiting\n", CTL_POOL_ENTRIES_OTHER_SC);
1078 		ctl_pool_free(softc, internal_pool);
1079 		ctl_pool_free(softc, emergency_pool);
1080 		return (ENOMEM);
1081 	}
1082 
1083 	softc->internal_pool = internal_pool;
1084 	softc->emergency_pool = emergency_pool;
1085 	softc->othersc_pool = other_pool;
1086 
1087 	ctl_pool_acquire(internal_pool);
1088 	ctl_pool_acquire(emergency_pool);
1089 	ctl_pool_acquire(other_pool);
1090 
1091 	/*
1092 	 * We used to allocate a processor LUN here.  The new scheme is to
1093 	 * just let the user allocate LUNs as he sees fit.
1094 	 */
1095 #if 0
1096 	mtx_lock(&softc->ctl_lock);
1097 	ctl_alloc_lun(softc, lun, /*be_lun*/NULL, /*target*/softc->target);
1098 	mtx_unlock(&softc->ctl_lock);
1099 #endif
1100 
1101 	error = kproc_create(ctl_work_thread, softc, &softc->work_thread, 0, 0,
1102 			 "ctl_thrd");
1103 	if (error != 0) {
1104 		printf("error creating CTL work thread!\n");
1105 		ctl_free_lun(lun);
1106 		ctl_pool_free(softc, internal_pool);
1107 		ctl_pool_free(softc, emergency_pool);
1108 		ctl_pool_free(softc, other_pool);
1109 		return (error);
1110 	}
1111 	printf("ctl: CAM Target Layer loaded\n");
1112 
1113 	/*
1114 	 * Initialize the initiator and portname mappings
1115 	 */
1116 	memset(softc->wwpn_iid, 0, sizeof(softc->wwpn_iid));
1117 
1118 	/*
1119 	 * Initialize the ioctl front end.
1120 	 */
1121 	fe = &softc->ioctl_info.fe;
1122 	sprintf(softc->ioctl_info.port_name, "CTL ioctl");
1123 	fe->port_type = CTL_PORT_IOCTL;
1124 	fe->num_requested_ctl_io = 100;
1125 	fe->port_name = softc->ioctl_info.port_name;
1126 	fe->port_online = ctl_ioctl_online;
1127 	fe->port_offline = ctl_ioctl_offline;
1128 	fe->onoff_arg = &softc->ioctl_info;
1129 	fe->targ_enable = ctl_ioctl_targ_enable;
1130 	fe->targ_disable = ctl_ioctl_targ_disable;
1131 	fe->lun_enable = ctl_ioctl_lun_enable;
1132 	fe->lun_disable = ctl_ioctl_lun_disable;
1133 	fe->targ_lun_arg = &softc->ioctl_info;
1134 	fe->fe_datamove = ctl_ioctl_datamove;
1135 	fe->fe_done = ctl_ioctl_done;
1136 	fe->max_targets = 15;
1137 	fe->max_target_id = 15;
1138 
1139 	if (ctl_frontend_register(&softc->ioctl_info.fe,
1140 	                  (softc->flags & CTL_FLAG_MASTER_SHELF)) != 0) {
1141 		printf("ctl: ioctl front end registration failed, will "
1142 		       "continue anyway\n");
1143 	}
1144 
1145 #ifdef CTL_IO_DELAY
1146 	if (sizeof(struct callout) > CTL_TIMER_BYTES) {
1147 		printf("sizeof(struct callout) %zd > CTL_TIMER_BYTES %zd\n",
1148 		       sizeof(struct callout), CTL_TIMER_BYTES);
1149 		return (EINVAL);
1150 	}
1151 #endif /* CTL_IO_DELAY */
1152 
1153 	return (0);
1154 }
1155 
1156 void
1157 ctl_shutdown(void)
1158 {
1159 	struct ctl_softc *softc;
1160 	struct ctl_lun *lun, *next_lun;
1161 	struct ctl_io_pool *pool, *next_pool;
1162 
1163 	softc = (struct ctl_softc *)control_softc;
1164 
1165 	if (ctl_frontend_deregister(&softc->ioctl_info.fe) != 0)
1166 		printf("ctl: ioctl front end deregistration failed\n");
1167 
1168 	mtx_lock(&softc->ctl_lock);
1169 
1170 	/*
1171 	 * Free up each LUN.
1172 	 */
1173 	for (lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; lun = next_lun){
1174 		next_lun = STAILQ_NEXT(lun, links);
1175 		ctl_free_lun(lun);
1176 	}
1177 
1178 	/*
1179 	 * This will rip the rug out from under any FETDs or anyone else
1180 	 * that has a pool allocated.  Since we increment our module
1181 	 * refcount any time someone outside the main CTL module allocates
1182 	 * a pool, we shouldn't have any problems here.  The user won't be
1183 	 * able to unload the CTL module until client modules have
1184 	 * successfully unloaded.
1185 	 */
1186 	for (pool = STAILQ_FIRST(&softc->io_pools); pool != NULL;
1187 	     pool = next_pool) {
1188 		next_pool = STAILQ_NEXT(pool, links);
1189 		ctl_pool_free(softc, pool);
1190 	}
1191 
1192 	mtx_unlock(&softc->ctl_lock);
1193 
1194 #if 0
1195 	ctl_shutdown_thread(softc->work_thread);
1196 #endif
1197 
1198 	mtx_destroy(&softc->ctl_lock);
1199 
1200 	destroy_dev(softc->dev);
1201 
1202 	sysctl_ctx_free(&softc->sysctl_ctx);
1203 
1204 	free(control_softc, M_DEVBUF);
1205 	control_softc = NULL;
1206 
1207 	printf("ctl: CAM Target Layer unloaded\n");
1208 }
1209 
1210 static int
1211 ctl_module_event_handler(module_t mod, int what, void *arg)
1212 {
1213 
1214 	switch (what) {
1215 	case MOD_LOAD:
1216 		return (ctl_init());
1217 	case MOD_UNLOAD:
1218 		return (EBUSY);
1219 	default:
1220 		return (EOPNOTSUPP);
1221 	}
1222 }
1223 
1224 /*
1225  * XXX KDM should we do some access checks here?  Bump a reference count to
1226  * prevent a CTL module from being unloaded while someone has it open?
1227  */
1228 static int
1229 ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td)
1230 {
1231 	return (0);
1232 }
1233 
1234 static int
1235 ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td)
1236 {
1237 	return (0);
1238 }
1239 
1240 int
1241 ctl_port_enable(ctl_port_type port_type)
1242 {
1243 	struct ctl_softc *softc;
1244 	struct ctl_frontend *fe;
1245 
1246 	if (ctl_is_single == 0) {
1247 		union ctl_ha_msg msg_info;
1248 		int isc_retval;
1249 
1250 #if 0
1251 		printf("%s: HA mode, synchronizing frontend enable\n",
1252 		        __func__);
1253 #endif
1254 		msg_info.hdr.msg_type = CTL_MSG_SYNC_FE;
1255 	        if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
1256 		        sizeof(msg_info), 1 )) > CTL_HA_STATUS_SUCCESS) {
1257 			printf("Sync msg send error retval %d\n", isc_retval);
1258 		}
1259 		if (!rcv_sync_msg) {
1260 			isc_retval=ctl_ha_msg_recv(CTL_HA_CHAN_CTL, &msg_info,
1261 			        sizeof(msg_info), 1);
1262 		}
1263 #if 0
1264         	printf("CTL:Frontend Enable\n");
1265 	} else {
1266 		printf("%s: single mode, skipping frontend synchronization\n",
1267 		        __func__);
1268 #endif
1269 	}
1270 
1271 	softc = control_softc;
1272 
1273 	STAILQ_FOREACH(fe, &softc->fe_list, links) {
1274 		if (port_type & fe->port_type)
1275 		{
1276 #if 0
1277 			printf("port %d\n", fe->targ_port);
1278 #endif
1279 			ctl_frontend_online(fe);
1280 		}
1281 	}
1282 
1283 	return (0);
1284 }
1285 
1286 int
1287 ctl_port_disable(ctl_port_type port_type)
1288 {
1289 	struct ctl_softc *softc;
1290 	struct ctl_frontend *fe;
1291 
1292 	softc = control_softc;
1293 
1294 	STAILQ_FOREACH(fe, &softc->fe_list, links) {
1295 		if (port_type & fe->port_type)
1296 			ctl_frontend_offline(fe);
1297 	}
1298 
1299 	return (0);
1300 }
1301 
1302 /*
1303  * Returns 0 for success, 1 for failure.
1304  * Currently the only failure mode is if there aren't enough entries
1305  * allocated.  So, in case of a failure, look at num_entries_dropped,
1306  * reallocate and try again.
1307  */
1308 int
1309 ctl_port_list(struct ctl_port_entry *entries, int num_entries_alloced,
1310 	      int *num_entries_filled, int *num_entries_dropped,
1311 	      ctl_port_type port_type, int no_virtual)
1312 {
1313 	struct ctl_softc *softc;
1314 	struct ctl_frontend *fe;
1315 	int entries_dropped, entries_filled;
1316 	int retval;
1317 	int i;
1318 
1319 	softc = control_softc;
1320 
1321 	retval = 0;
1322 	entries_filled = 0;
1323 	entries_dropped = 0;
1324 
1325 	i = 0;
1326 	mtx_lock(&softc->ctl_lock);
1327 	STAILQ_FOREACH(fe, &softc->fe_list, links) {
1328 		struct ctl_port_entry *entry;
1329 
1330 		if ((fe->port_type & port_type) == 0)
1331 			continue;
1332 
1333 		if ((no_virtual != 0)
1334 		 && (fe->virtual_port != 0))
1335 			continue;
1336 
1337 		if (entries_filled >= num_entries_alloced) {
1338 			entries_dropped++;
1339 			continue;
1340 		}
1341 		entry = &entries[i];
1342 
1343 		entry->port_type = fe->port_type;
1344 		strlcpy(entry->port_name, fe->port_name,
1345 			sizeof(entry->port_name));
1346 		entry->physical_port = fe->physical_port;
1347 		entry->virtual_port = fe->virtual_port;
1348 		entry->wwnn = fe->wwnn;
1349 		entry->wwpn = fe->wwpn;
1350 
1351 		i++;
1352 		entries_filled++;
1353 	}
1354 
1355 	mtx_unlock(&softc->ctl_lock);
1356 
1357 	if (entries_dropped > 0)
1358 		retval = 1;
1359 
1360 	*num_entries_dropped = entries_dropped;
1361 	*num_entries_filled = entries_filled;
1362 
1363 	return (retval);
1364 }
1365 
1366 static void
1367 ctl_ioctl_online(void *arg)
1368 {
1369 	struct ctl_ioctl_info *ioctl_info;
1370 
1371 	ioctl_info = (struct ctl_ioctl_info *)arg;
1372 
1373 	ioctl_info->flags |= CTL_IOCTL_FLAG_ENABLED;
1374 }
1375 
1376 static void
1377 ctl_ioctl_offline(void *arg)
1378 {
1379 	struct ctl_ioctl_info *ioctl_info;
1380 
1381 	ioctl_info = (struct ctl_ioctl_info *)arg;
1382 
1383 	ioctl_info->flags &= ~CTL_IOCTL_FLAG_ENABLED;
1384 }
1385 
1386 /*
1387  * Remove an initiator by port number and initiator ID.
1388  * Returns 0 for success, 1 for failure.
1389  * Assumes the caller does NOT hold the CTL lock.
1390  */
1391 int
1392 ctl_remove_initiator(int32_t targ_port, uint32_t iid)
1393 {
1394 	struct ctl_softc *softc;
1395 
1396 	softc = control_softc;
1397 
1398 	if ((targ_port < 0)
1399 	 || (targ_port > CTL_MAX_PORTS)) {
1400 		printf("%s: invalid port number %d\n", __func__, targ_port);
1401 		return (1);
1402 	}
1403 	if (iid > CTL_MAX_INIT_PER_PORT) {
1404 		printf("%s: initiator ID %u > maximun %u!\n",
1405 		       __func__, iid, CTL_MAX_INIT_PER_PORT);
1406 		return (1);
1407 	}
1408 
1409 	mtx_lock(&softc->ctl_lock);
1410 
1411 	softc->wwpn_iid[targ_port][iid].in_use = 0;
1412 
1413 	mtx_unlock(&softc->ctl_lock);
1414 
1415 	return (0);
1416 }
1417 
1418 /*
1419  * Add an initiator to the initiator map.
1420  * Returns 0 for success, 1 for failure.
1421  * Assumes the caller does NOT hold the CTL lock.
1422  */
1423 int
1424 ctl_add_initiator(uint64_t wwpn, int32_t targ_port, uint32_t iid)
1425 {
1426 	struct ctl_softc *softc;
1427 	int retval;
1428 
1429 	softc = control_softc;
1430 
1431 	retval = 0;
1432 
1433 	if ((targ_port < 0)
1434 	 || (targ_port > CTL_MAX_PORTS)) {
1435 		printf("%s: invalid port number %d\n", __func__, targ_port);
1436 		return (1);
1437 	}
1438 	if (iid > CTL_MAX_INIT_PER_PORT) {
1439 		printf("%s: WWPN %#jx initiator ID %u > maximun %u!\n",
1440 		       __func__, wwpn, iid, CTL_MAX_INIT_PER_PORT);
1441 		return (1);
1442 	}
1443 
1444 	mtx_lock(&softc->ctl_lock);
1445 
1446 	if (softc->wwpn_iid[targ_port][iid].in_use != 0) {
1447 		/*
1448 		 * We don't treat this as an error.
1449 		 */
1450 		if (softc->wwpn_iid[targ_port][iid].wwpn == wwpn) {
1451 			printf("%s: port %d iid %u WWPN %#jx arrived again?\n",
1452 			       __func__, targ_port, iid, (uintmax_t)wwpn);
1453 			goto bailout;
1454 		}
1455 
1456 		/*
1457 		 * This is an error, but what do we do about it?  The
1458 		 * driver is telling us we have a new WWPN for this
1459 		 * initiator ID, so we pretty much need to use it.
1460 		 */
1461 		printf("%s: port %d iid %u WWPN %#jx arrived, WWPN %#jx is "
1462 		       "still at that address\n", __func__, targ_port, iid,
1463 		       (uintmax_t)wwpn,
1464 		       (uintmax_t)softc->wwpn_iid[targ_port][iid].wwpn);
1465 
1466 		/*
1467 		 * XXX KDM clear have_ca and ua_pending on each LUN for
1468 		 * this initiator.
1469 		 */
1470 	}
1471 	softc->wwpn_iid[targ_port][iid].in_use = 1;
1472 	softc->wwpn_iid[targ_port][iid].iid = iid;
1473 	softc->wwpn_iid[targ_port][iid].wwpn = wwpn;
1474 	softc->wwpn_iid[targ_port][iid].port = targ_port;
1475 
1476 bailout:
1477 
1478 	mtx_unlock(&softc->ctl_lock);
1479 
1480 	return (retval);
1481 }
1482 
1483 /*
1484  * XXX KDM should we pretend to do something in the target/lun
1485  * enable/disable functions?
1486  */
1487 static int
1488 ctl_ioctl_targ_enable(void *arg, struct ctl_id targ_id)
1489 {
1490 	return (0);
1491 }
1492 
1493 static int
1494 ctl_ioctl_targ_disable(void *arg, struct ctl_id targ_id)
1495 {
1496 	return (0);
1497 }
1498 
1499 static int
1500 ctl_ioctl_lun_enable(void *arg, struct ctl_id targ_id, int lun_id)
1501 {
1502 	return (0);
1503 }
1504 
1505 static int
1506 ctl_ioctl_lun_disable(void *arg, struct ctl_id targ_id, int lun_id)
1507 {
1508 	return (0);
1509 }
1510 
1511 /*
1512  * Data movement routine for the CTL ioctl frontend port.
1513  */
1514 static int
1515 ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio)
1516 {
1517 	struct ctl_sg_entry *ext_sglist, *kern_sglist;
1518 	struct ctl_sg_entry ext_entry, kern_entry;
1519 	int ext_sglen, ext_sg_entries, kern_sg_entries;
1520 	int ext_sg_start, ext_offset;
1521 	int len_to_copy, len_copied;
1522 	int kern_watermark, ext_watermark;
1523 	int ext_sglist_malloced;
1524 	int i, j;
1525 
1526 	ext_sglist_malloced = 0;
1527 	ext_sg_start = 0;
1528 	ext_offset = 0;
1529 
1530 	CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove\n"));
1531 
1532 	/*
1533 	 * If this flag is set, fake the data transfer.
1534 	 */
1535 	if (ctsio->io_hdr.flags & CTL_FLAG_NO_DATAMOVE) {
1536 		ctsio->ext_data_filled = ctsio->ext_data_len;
1537 		goto bailout;
1538 	}
1539 
1540 	/*
1541 	 * To simplify things here, if we have a single buffer, stick it in
1542 	 * a S/G entry and just make it a single entry S/G list.
1543 	 */
1544 	if (ctsio->io_hdr.flags & CTL_FLAG_EDPTR_SGLIST) {
1545 		int len_seen;
1546 
1547 		ext_sglen = ctsio->ext_sg_entries * sizeof(*ext_sglist);
1548 
1549 		ext_sglist = (struct ctl_sg_entry *)malloc(ext_sglen, M_CTL,
1550 							   M_WAITOK);
1551 		ext_sglist_malloced = 1;
1552 		if (copyin(ctsio->ext_data_ptr, ext_sglist,
1553 				   ext_sglen) != 0) {
1554 			ctl_set_internal_failure(ctsio,
1555 						 /*sks_valid*/ 0,
1556 						 /*retry_count*/ 0);
1557 			goto bailout;
1558 		}
1559 		ext_sg_entries = ctsio->ext_sg_entries;
1560 		len_seen = 0;
1561 		for (i = 0; i < ext_sg_entries; i++) {
1562 			if ((len_seen + ext_sglist[i].len) >=
1563 			     ctsio->ext_data_filled) {
1564 				ext_sg_start = i;
1565 				ext_offset = ctsio->ext_data_filled - len_seen;
1566 				break;
1567 			}
1568 			len_seen += ext_sglist[i].len;
1569 		}
1570 	} else {
1571 		ext_sglist = &ext_entry;
1572 		ext_sglist->addr = ctsio->ext_data_ptr;
1573 		ext_sglist->len = ctsio->ext_data_len;
1574 		ext_sg_entries = 1;
1575 		ext_sg_start = 0;
1576 		ext_offset = ctsio->ext_data_filled;
1577 	}
1578 
1579 	if (ctsio->kern_sg_entries > 0) {
1580 		kern_sglist = (struct ctl_sg_entry *)ctsio->kern_data_ptr;
1581 		kern_sg_entries = ctsio->kern_sg_entries;
1582 	} else {
1583 		kern_sglist = &kern_entry;
1584 		kern_sglist->addr = ctsio->kern_data_ptr;
1585 		kern_sglist->len = ctsio->kern_data_len;
1586 		kern_sg_entries = 1;
1587 	}
1588 
1589 
1590 	kern_watermark = 0;
1591 	ext_watermark = ext_offset;
1592 	len_copied = 0;
1593 	for (i = ext_sg_start, j = 0;
1594 	     i < ext_sg_entries && j < kern_sg_entries;) {
1595 		uint8_t *ext_ptr, *kern_ptr;
1596 
1597 		len_to_copy = ctl_min(ext_sglist[i].len - ext_watermark,
1598 				      kern_sglist[j].len - kern_watermark);
1599 
1600 		ext_ptr = (uint8_t *)ext_sglist[i].addr;
1601 		ext_ptr = ext_ptr + ext_watermark;
1602 		if (ctsio->io_hdr.flags & CTL_FLAG_BUS_ADDR) {
1603 			/*
1604 			 * XXX KDM fix this!
1605 			 */
1606 			panic("need to implement bus address support");
1607 #if 0
1608 			kern_ptr = bus_to_virt(kern_sglist[j].addr);
1609 #endif
1610 		} else
1611 			kern_ptr = (uint8_t *)kern_sglist[j].addr;
1612 		kern_ptr = kern_ptr + kern_watermark;
1613 
1614 		kern_watermark += len_to_copy;
1615 		ext_watermark += len_to_copy;
1616 
1617 		if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
1618 		     CTL_FLAG_DATA_IN) {
1619 			CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d "
1620 					 "bytes to user\n", len_to_copy));
1621 			CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p "
1622 					 "to %p\n", kern_ptr, ext_ptr));
1623 			if (copyout(kern_ptr, ext_ptr, len_to_copy) != 0) {
1624 				ctl_set_internal_failure(ctsio,
1625 							 /*sks_valid*/ 0,
1626 							 /*retry_count*/ 0);
1627 				goto bailout;
1628 			}
1629 		} else {
1630 			CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d "
1631 					 "bytes from user\n", len_to_copy));
1632 			CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p "
1633 					 "to %p\n", ext_ptr, kern_ptr));
1634 			if (copyin(ext_ptr, kern_ptr, len_to_copy)!= 0){
1635 				ctl_set_internal_failure(ctsio,
1636 							 /*sks_valid*/ 0,
1637 							 /*retry_count*/0);
1638 				goto bailout;
1639 			}
1640 		}
1641 
1642 		len_copied += len_to_copy;
1643 
1644 		if (ext_sglist[i].len == ext_watermark) {
1645 			i++;
1646 			ext_watermark = 0;
1647 		}
1648 
1649 		if (kern_sglist[j].len == kern_watermark) {
1650 			j++;
1651 			kern_watermark = 0;
1652 		}
1653 	}
1654 
1655 	ctsio->ext_data_filled += len_copied;
1656 
1657 	CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_sg_entries: %d, "
1658 			 "kern_sg_entries: %d\n", ext_sg_entries,
1659 			 kern_sg_entries));
1660 	CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_data_len = %d, "
1661 			 "kern_data_len = %d\n", ctsio->ext_data_len,
1662 			 ctsio->kern_data_len));
1663 
1664 
1665 	/* XXX KDM set residual?? */
1666 bailout:
1667 
1668 	if (ext_sglist_malloced != 0)
1669 		free(ext_sglist, M_CTL);
1670 
1671 	return (CTL_RETVAL_COMPLETE);
1672 }
1673 
1674 /*
1675  * Serialize a command that went down the "wrong" side, and so was sent to
1676  * this controller for execution.  The logic is a little different than the
1677  * standard case in ctl_scsiio_precheck().  Errors in this case need to get
1678  * sent back to the other side, but in the success case, we execute the
1679  * command on this side (XFER mode) or tell the other side to execute it
1680  * (SER_ONLY mode).
1681  */
1682 static int
1683 ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio, int have_lock)
1684 {
1685 	struct ctl_softc *ctl_softc;
1686 	union ctl_ha_msg msg_info;
1687 	struct ctl_lun *lun;
1688 	int retval = 0;
1689 
1690 	ctl_softc = control_softc;
1691 	if (have_lock == 0)
1692 		mtx_lock(&ctl_softc->ctl_lock);
1693 
1694 	lun = ctl_softc->ctl_luns[ctsio->io_hdr.nexus.targ_lun];
1695 	if (lun==NULL)
1696 	{
1697 		/*
1698 		 * Why isn't LUN defined? The other side wouldn't
1699 		 * send a cmd if the LUN is undefined.
1700 		 */
1701 		printf("%s: Bad JUJU!, LUN is NULL!\n", __func__);
1702 
1703 		/* "Logical unit not supported" */
1704 		ctl_set_sense_data(&msg_info.scsi.sense_data,
1705 				   lun,
1706 				   /*sense_format*/SSD_TYPE_NONE,
1707 				   /*current_error*/ 1,
1708 				   /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1709 				   /*asc*/ 0x25,
1710 				   /*ascq*/ 0x00,
1711 				   SSD_ELEM_NONE);
1712 
1713 		msg_info.scsi.sense_len = SSD_FULL_SIZE;
1714 		msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND;
1715 		msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
1716 		msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
1717 		msg_info.hdr.serializing_sc = NULL;
1718 		msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
1719 	        if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
1720 				sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
1721 		}
1722 		if (have_lock == 0)
1723 			mtx_unlock(&ctl_softc->ctl_lock);
1724 		return(1);
1725 
1726 	}
1727 
1728     	TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
1729 
1730 	switch (ctl_check_ooa(lun, (union ctl_io *)ctsio,
1731 		(union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq,
1732 		 ooa_links))) {
1733 	case CTL_ACTION_BLOCK:
1734 		ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED;
1735 		TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr,
1736 				  blocked_links);
1737 		break;
1738 	case CTL_ACTION_PASS:
1739 	case CTL_ACTION_SKIP:
1740 		if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) {
1741 			ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
1742 			STAILQ_INSERT_TAIL(&ctl_softc->rtr_queue,
1743 					   &ctsio->io_hdr, links);
1744 		} else {
1745 
1746 			/* send msg back to other side */
1747 			msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
1748 			msg_info.hdr.serializing_sc = (union ctl_io *)ctsio;
1749 			msg_info.hdr.msg_type = CTL_MSG_R2R;
1750 #if 0
1751 			printf("2. pOrig %x\n", (int)msg_info.hdr.original_sc);
1752 #endif
1753 		        if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
1754 			    sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
1755 			}
1756 		}
1757 		break;
1758 	case CTL_ACTION_OVERLAP:
1759 		/* OVERLAPPED COMMANDS ATTEMPTED */
1760 		ctl_set_sense_data(&msg_info.scsi.sense_data,
1761 				   lun,
1762 				   /*sense_format*/SSD_TYPE_NONE,
1763 				   /*current_error*/ 1,
1764 				   /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1765 				   /*asc*/ 0x4E,
1766 				   /*ascq*/ 0x00,
1767 				   SSD_ELEM_NONE);
1768 
1769 		msg_info.scsi.sense_len = SSD_FULL_SIZE;
1770 		msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND;
1771 		msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
1772 		msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
1773 		msg_info.hdr.serializing_sc = NULL;
1774 		msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
1775 #if 0
1776 		printf("BAD JUJU:Major Bummer Overlap\n");
1777 #endif
1778 		TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
1779 		retval = 1;
1780 		if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
1781 		    sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
1782 		}
1783 		break;
1784 	case CTL_ACTION_OVERLAP_TAG:
1785 		/* TAGGED OVERLAPPED COMMANDS (NN = QUEUE TAG) */
1786 		ctl_set_sense_data(&msg_info.scsi.sense_data,
1787 				   lun,
1788 				   /*sense_format*/SSD_TYPE_NONE,
1789 				   /*current_error*/ 1,
1790 				   /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1791 				   /*asc*/ 0x4D,
1792 				   /*ascq*/ ctsio->tag_num & 0xff,
1793 				   SSD_ELEM_NONE);
1794 
1795 		msg_info.scsi.sense_len = SSD_FULL_SIZE;
1796 		msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND;
1797 		msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
1798 		msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
1799 		msg_info.hdr.serializing_sc = NULL;
1800 		msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
1801 #if 0
1802 		printf("BAD JUJU:Major Bummer Overlap Tag\n");
1803 #endif
1804 		TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
1805 		retval = 1;
1806 		if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
1807 		    sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
1808 		}
1809 		break;
1810 	case CTL_ACTION_ERROR:
1811 	default:
1812 		/* "Internal target failure" */
1813 		ctl_set_sense_data(&msg_info.scsi.sense_data,
1814 				   lun,
1815 				   /*sense_format*/SSD_TYPE_NONE,
1816 				   /*current_error*/ 1,
1817 				   /*sense_key*/ SSD_KEY_HARDWARE_ERROR,
1818 				   /*asc*/ 0x44,
1819 				   /*ascq*/ 0x00,
1820 				   SSD_ELEM_NONE);
1821 
1822 		msg_info.scsi.sense_len = SSD_FULL_SIZE;
1823 		msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND;
1824 		msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
1825 		msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
1826 		msg_info.hdr.serializing_sc = NULL;
1827 		msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
1828 #if 0
1829 		printf("BAD JUJU:Major Bummer HW Error\n");
1830 #endif
1831 		TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
1832 		retval = 1;
1833 		if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
1834 		    sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
1835 		}
1836 		break;
1837 	}
1838 	if (have_lock == 0)
1839 		mtx_unlock(&ctl_softc->ctl_lock);
1840 	return (retval);
1841 }
1842 
1843 static int
1844 ctl_ioctl_submit_wait(union ctl_io *io)
1845 {
1846 	struct ctl_fe_ioctl_params params;
1847 	ctl_fe_ioctl_state last_state;
1848 	int done, retval;
1849 
1850 	retval = 0;
1851 
1852 	bzero(&params, sizeof(params));
1853 
1854 	mtx_init(&params.ioctl_mtx, "ctliocmtx", NULL, MTX_DEF);
1855 	cv_init(&params.sem, "ctlioccv");
1856 	params.state = CTL_IOCTL_INPROG;
1857 	last_state = params.state;
1858 
1859 	io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = &params;
1860 
1861 	CTL_DEBUG_PRINT(("ctl_ioctl_submit_wait\n"));
1862 
1863 	/* This shouldn't happen */
1864 	if ((retval = ctl_queue(io)) != CTL_RETVAL_COMPLETE)
1865 		return (retval);
1866 
1867 	done = 0;
1868 
1869 	do {
1870 		mtx_lock(&params.ioctl_mtx);
1871 		/*
1872 		 * Check the state here, and don't sleep if the state has
1873 		 * already changed (i.e. wakeup has already occured, but we
1874 		 * weren't waiting yet).
1875 		 */
1876 		if (params.state == last_state) {
1877 			/* XXX KDM cv_wait_sig instead? */
1878 			cv_wait(&params.sem, &params.ioctl_mtx);
1879 		}
1880 		last_state = params.state;
1881 
1882 		switch (params.state) {
1883 		case CTL_IOCTL_INPROG:
1884 			/* Why did we wake up? */
1885 			/* XXX KDM error here? */
1886 			mtx_unlock(&params.ioctl_mtx);
1887 			break;
1888 		case CTL_IOCTL_DATAMOVE:
1889 			CTL_DEBUG_PRINT(("got CTL_IOCTL_DATAMOVE\n"));
1890 
1891 			/*
1892 			 * change last_state back to INPROG to avoid
1893 			 * deadlock on subsequent data moves.
1894 			 */
1895 			params.state = last_state = CTL_IOCTL_INPROG;
1896 
1897 			mtx_unlock(&params.ioctl_mtx);
1898 			ctl_ioctl_do_datamove(&io->scsiio);
1899 			/*
1900 			 * Note that in some cases, most notably writes,
1901 			 * this will queue the I/O and call us back later.
1902 			 * In other cases, generally reads, this routine
1903 			 * will immediately call back and wake us up,
1904 			 * probably using our own context.
1905 			 */
1906 			io->scsiio.be_move_done(io);
1907 			break;
1908 		case CTL_IOCTL_DONE:
1909 			mtx_unlock(&params.ioctl_mtx);
1910 			CTL_DEBUG_PRINT(("got CTL_IOCTL_DONE\n"));
1911 			done = 1;
1912 			break;
1913 		default:
1914 			mtx_unlock(&params.ioctl_mtx);
1915 			/* XXX KDM error here? */
1916 			break;
1917 		}
1918 	} while (done == 0);
1919 
1920 	mtx_destroy(&params.ioctl_mtx);
1921 	cv_destroy(&params.sem);
1922 
1923 	return (CTL_RETVAL_COMPLETE);
1924 }
1925 
1926 static void
1927 ctl_ioctl_datamove(union ctl_io *io)
1928 {
1929 	struct ctl_fe_ioctl_params *params;
1930 
1931 	params = (struct ctl_fe_ioctl_params *)
1932 		io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
1933 
1934 	mtx_lock(&params->ioctl_mtx);
1935 	params->state = CTL_IOCTL_DATAMOVE;
1936 	cv_broadcast(&params->sem);
1937 	mtx_unlock(&params->ioctl_mtx);
1938 }
1939 
1940 static void
1941 ctl_ioctl_done(union ctl_io *io)
1942 {
1943 	struct ctl_fe_ioctl_params *params;
1944 
1945 	params = (struct ctl_fe_ioctl_params *)
1946 		io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
1947 
1948 	mtx_lock(&params->ioctl_mtx);
1949 	params->state = CTL_IOCTL_DONE;
1950 	cv_broadcast(&params->sem);
1951 	mtx_unlock(&params->ioctl_mtx);
1952 }
1953 
1954 static void
1955 ctl_ioctl_hard_startstop_callback(void *arg, struct cfi_metatask *metatask)
1956 {
1957 	struct ctl_fe_ioctl_startstop_info *sd_info;
1958 
1959 	sd_info = (struct ctl_fe_ioctl_startstop_info *)arg;
1960 
1961 	sd_info->hs_info.status = metatask->status;
1962 	sd_info->hs_info.total_luns = metatask->taskinfo.startstop.total_luns;
1963 	sd_info->hs_info.luns_complete =
1964 		metatask->taskinfo.startstop.luns_complete;
1965 	sd_info->hs_info.luns_failed = metatask->taskinfo.startstop.luns_failed;
1966 
1967 	cv_broadcast(&sd_info->sem);
1968 }
1969 
1970 static void
1971 ctl_ioctl_bbrread_callback(void *arg, struct cfi_metatask *metatask)
1972 {
1973 	struct ctl_fe_ioctl_bbrread_info *fe_bbr_info;
1974 
1975 	fe_bbr_info = (struct ctl_fe_ioctl_bbrread_info *)arg;
1976 
1977 	mtx_lock(fe_bbr_info->lock);
1978 	fe_bbr_info->bbr_info->status = metatask->status;
1979 	fe_bbr_info->bbr_info->bbr_status = metatask->taskinfo.bbrread.status;
1980 	fe_bbr_info->wakeup_done = 1;
1981 	mtx_unlock(fe_bbr_info->lock);
1982 
1983 	cv_broadcast(&fe_bbr_info->sem);
1984 }
1985 
1986 /*
1987  * Must be called with the ctl_lock held.
1988  * Returns 0 for success, errno for failure.
1989  */
1990 static int
1991 ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num,
1992 		   struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries)
1993 {
1994 	union ctl_io *io;
1995 	int retval;
1996 
1997 	retval = 0;
1998 
1999 	for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); (io != NULL);
2000 	     (*cur_fill_num)++, io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr,
2001 	     ooa_links)) {
2002 		struct ctl_ooa_entry *entry;
2003 
2004 		/*
2005 		 * If we've got more than we can fit, just count the
2006 		 * remaining entries.
2007 		 */
2008 		if (*cur_fill_num >= ooa_hdr->alloc_num)
2009 			continue;
2010 
2011 		entry = &kern_entries[*cur_fill_num];
2012 
2013 		entry->tag_num = io->scsiio.tag_num;
2014 		entry->lun_num = lun->lun;
2015 #ifdef CTL_TIME_IO
2016 		entry->start_bt = io->io_hdr.start_bt;
2017 #endif
2018 		bcopy(io->scsiio.cdb, entry->cdb, io->scsiio.cdb_len);
2019 		entry->cdb_len = io->scsiio.cdb_len;
2020 		if (io->io_hdr.flags & CTL_FLAG_BLOCKED)
2021 			entry->cmd_flags |= CTL_OOACMD_FLAG_BLOCKED;
2022 
2023 		if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG)
2024 			entry->cmd_flags |= CTL_OOACMD_FLAG_DMA;
2025 
2026 		if (io->io_hdr.flags & CTL_FLAG_ABORT)
2027 			entry->cmd_flags |= CTL_OOACMD_FLAG_ABORT;
2028 
2029 		if (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR)
2030 			entry->cmd_flags |= CTL_OOACMD_FLAG_RTR;
2031 
2032 		if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED)
2033 			entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED;
2034 	}
2035 
2036 	return (retval);
2037 }
2038 
2039 static void *
2040 ctl_copyin_alloc(void *user_addr, int len, char *error_str,
2041 		 size_t error_str_len)
2042 {
2043 	void *kptr;
2044 
2045 	kptr = malloc(len, M_CTL, M_WAITOK | M_ZERO);
2046 
2047 	if (copyin(user_addr, kptr, len) != 0) {
2048 		snprintf(error_str, error_str_len, "Error copying %d bytes "
2049 			 "from user address %p to kernel address %p", len,
2050 			 user_addr, kptr);
2051 		free(kptr, M_CTL);
2052 		return (NULL);
2053 	}
2054 
2055 	return (kptr);
2056 }
2057 
2058 static void
2059 ctl_free_args(int num_be_args, struct ctl_be_arg *be_args)
2060 {
2061 	int i;
2062 
2063 	if (be_args == NULL)
2064 		return;
2065 
2066 	for (i = 0; i < num_be_args; i++) {
2067 		free(be_args[i].kname, M_CTL);
2068 		free(be_args[i].kvalue, M_CTL);
2069 	}
2070 
2071 	free(be_args, M_CTL);
2072 }
2073 
2074 static struct ctl_be_arg *
2075 ctl_copyin_args(int num_be_args, struct ctl_be_arg *be_args,
2076 		char *error_str, size_t error_str_len)
2077 {
2078 	struct ctl_be_arg *args;
2079 	int i;
2080 
2081 	args = ctl_copyin_alloc(be_args, num_be_args * sizeof(*be_args),
2082 				error_str, error_str_len);
2083 
2084 	if (args == NULL)
2085 		goto bailout;
2086 
2087 	for (i = 0; i < num_be_args; i++) {
2088 		args[i].kname = NULL;
2089 		args[i].kvalue = NULL;
2090 	}
2091 
2092 	for (i = 0; i < num_be_args; i++) {
2093 		uint8_t *tmpptr;
2094 
2095 		args[i].kname = ctl_copyin_alloc(args[i].name,
2096 			args[i].namelen, error_str, error_str_len);
2097 		if (args[i].kname == NULL)
2098 			goto bailout;
2099 
2100 		if (args[i].kname[args[i].namelen - 1] != '\0') {
2101 			snprintf(error_str, error_str_len, "Argument %d "
2102 				 "name is not NUL-terminated", i);
2103 			goto bailout;
2104 		}
2105 
2106 		args[i].kvalue = NULL;
2107 
2108 		tmpptr = ctl_copyin_alloc(args[i].value,
2109 			args[i].vallen, error_str, error_str_len);
2110 		if (tmpptr == NULL)
2111 			goto bailout;
2112 
2113 		args[i].kvalue = tmpptr;
2114 
2115 		if ((args[i].flags & CTL_BEARG_ASCII)
2116 		 && (tmpptr[args[i].vallen - 1] != '\0')) {
2117 			snprintf(error_str, error_str_len, "Argument %d "
2118 				 "value is not NUL-terminated", i);
2119 			goto bailout;
2120 		}
2121 	}
2122 
2123 	return (args);
2124 bailout:
2125 
2126 	ctl_free_args(num_be_args, args);
2127 
2128 	return (NULL);
2129 }
2130 
2131 /*
2132  * Escape characters that are illegal or not recommended in XML.
2133  */
2134 int
2135 ctl_sbuf_printf_esc(struct sbuf *sb, char *str)
2136 {
2137 	int retval;
2138 
2139 	retval = 0;
2140 
2141 	for (; *str; str++) {
2142 		switch (*str) {
2143 		case '&':
2144 			retval = sbuf_printf(sb, "&amp;");
2145 			break;
2146 		case '>':
2147 			retval = sbuf_printf(sb, "&gt;");
2148 			break;
2149 		case '<':
2150 			retval = sbuf_printf(sb, "&lt;");
2151 			break;
2152 		default:
2153 			retval = sbuf_putc(sb, *str);
2154 			break;
2155 		}
2156 
2157 		if (retval != 0)
2158 			break;
2159 
2160 	}
2161 
2162 	return (retval);
2163 }
2164 
2165 static int
2166 ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
2167 	  struct thread *td)
2168 {
2169 	struct ctl_softc *softc;
2170 	int retval;
2171 
2172 	softc = control_softc;
2173 
2174 	retval = 0;
2175 
2176 	switch (cmd) {
2177 	case CTL_IO: {
2178 		union ctl_io *io;
2179 		void *pool_tmp;
2180 
2181 		/*
2182 		 * If we haven't been "enabled", don't allow any SCSI I/O
2183 		 * to this FETD.
2184 		 */
2185 		if ((softc->ioctl_info.flags & CTL_IOCTL_FLAG_ENABLED) == 0) {
2186 			retval = -EPERM;
2187 			break;
2188 		}
2189 
2190 		io = ctl_alloc_io(softc->ioctl_info.fe.ctl_pool_ref);
2191 		if (io == NULL) {
2192 			printf("ctl_ioctl: can't allocate ctl_io!\n");
2193 			retval = -ENOSPC;
2194 			break;
2195 		}
2196 
2197 		/*
2198 		 * Need to save the pool reference so it doesn't get
2199 		 * spammed by the user's ctl_io.
2200 		 */
2201 		pool_tmp = io->io_hdr.pool;
2202 
2203 		memcpy(io, (void *)addr, sizeof(*io));
2204 
2205 		io->io_hdr.pool = pool_tmp;
2206 		/*
2207 		 * No status yet, so make sure the status is set properly.
2208 		 */
2209 		io->io_hdr.status = CTL_STATUS_NONE;
2210 
2211 		/*
2212 		 * The user sets the initiator ID, target and LUN IDs.
2213 		 */
2214 		io->io_hdr.nexus.targ_port = softc->ioctl_info.fe.targ_port;
2215 		io->io_hdr.flags |= CTL_FLAG_USER_REQ;
2216 		if ((io->io_hdr.io_type == CTL_IO_SCSI)
2217 		 && (io->scsiio.tag_type != CTL_TAG_UNTAGGED))
2218 			io->scsiio.tag_num = softc->ioctl_info.cur_tag_num++;
2219 
2220 		retval = ctl_ioctl_submit_wait(io);
2221 
2222 		if (retval != 0) {
2223 			ctl_free_io(io);
2224 			break;
2225 		}
2226 
2227 		memcpy((void *)addr, io, sizeof(*io));
2228 
2229 		/* return this to our pool */
2230 		ctl_free_io(io);
2231 
2232 		break;
2233 	}
2234 	case CTL_ENABLE_PORT:
2235 	case CTL_DISABLE_PORT:
2236 	case CTL_SET_PORT_WWNS: {
2237 		struct ctl_frontend *fe;
2238 		struct ctl_port_entry *entry;
2239 
2240 		entry = (struct ctl_port_entry *)addr;
2241 
2242 		mtx_lock(&softc->ctl_lock);
2243 		STAILQ_FOREACH(fe, &softc->fe_list, links) {
2244 			int action, done;
2245 
2246 			action = 0;
2247 			done = 0;
2248 
2249 			if ((entry->port_type == CTL_PORT_NONE)
2250 			 && (entry->targ_port == fe->targ_port)) {
2251 				/*
2252 				 * If the user only wants to enable or
2253 				 * disable or set WWNs on a specific port,
2254 				 * do the operation and we're done.
2255 				 */
2256 				action = 1;
2257 				done = 1;
2258 			} else if (entry->port_type & fe->port_type) {
2259 				/*
2260 				 * Compare the user's type mask with the
2261 				 * particular frontend type to see if we
2262 				 * have a match.
2263 				 */
2264 				action = 1;
2265 				done = 0;
2266 
2267 				/*
2268 				 * Make sure the user isn't trying to set
2269 				 * WWNs on multiple ports at the same time.
2270 				 */
2271 				if (cmd == CTL_SET_PORT_WWNS) {
2272 					printf("%s: Can't set WWNs on "
2273 					       "multiple ports\n", __func__);
2274 					retval = EINVAL;
2275 					break;
2276 				}
2277 			}
2278 			if (action != 0) {
2279 				/*
2280 				 * XXX KDM we have to drop the lock here,
2281 				 * because the online/offline operations
2282 				 * can potentially block.  We need to
2283 				 * reference count the frontends so they
2284 				 * can't go away,
2285 				 */
2286 				mtx_unlock(&softc->ctl_lock);
2287 
2288 				if (cmd == CTL_ENABLE_PORT) {
2289 					struct ctl_lun *lun;
2290 
2291 					STAILQ_FOREACH(lun, &softc->lun_list,
2292 						       links) {
2293 						fe->lun_enable(fe->targ_lun_arg,
2294 						    lun->target,
2295 						    lun->lun);
2296 					}
2297 
2298 					ctl_frontend_online(fe);
2299 				} else if (cmd == CTL_DISABLE_PORT) {
2300 					struct ctl_lun *lun;
2301 
2302 					ctl_frontend_offline(fe);
2303 
2304 					STAILQ_FOREACH(lun, &softc->lun_list,
2305 						       links) {
2306 						fe->lun_disable(
2307 						    fe->targ_lun_arg,
2308 						    lun->target,
2309 						    lun->lun);
2310 					}
2311 				}
2312 
2313 				mtx_lock(&softc->ctl_lock);
2314 
2315 				if (cmd == CTL_SET_PORT_WWNS)
2316 					ctl_frontend_set_wwns(fe,
2317 					    (entry->flags & CTL_PORT_WWNN_VALID) ?
2318 					    1 : 0, entry->wwnn,
2319 					    (entry->flags & CTL_PORT_WWPN_VALID) ?
2320 					    1 : 0, entry->wwpn);
2321 			}
2322 			if (done != 0)
2323 				break;
2324 		}
2325 		mtx_unlock(&softc->ctl_lock);
2326 		break;
2327 	}
2328 	case CTL_GET_PORT_LIST: {
2329 		struct ctl_frontend *fe;
2330 		struct ctl_port_list *list;
2331 		int i;
2332 
2333 		list = (struct ctl_port_list *)addr;
2334 
2335 		if (list->alloc_len != (list->alloc_num *
2336 		    sizeof(struct ctl_port_entry))) {
2337 			printf("%s: CTL_GET_PORT_LIST: alloc_len %u != "
2338 			       "alloc_num %u * sizeof(struct ctl_port_entry) "
2339 			       "%zu\n", __func__, list->alloc_len,
2340 			       list->alloc_num, sizeof(struct ctl_port_entry));
2341 			retval = EINVAL;
2342 			break;
2343 		}
2344 		list->fill_len = 0;
2345 		list->fill_num = 0;
2346 		list->dropped_num = 0;
2347 		i = 0;
2348 		mtx_lock(&softc->ctl_lock);
2349 		STAILQ_FOREACH(fe, &softc->fe_list, links) {
2350 			struct ctl_port_entry entry, *list_entry;
2351 
2352 			if (list->fill_num >= list->alloc_num) {
2353 				list->dropped_num++;
2354 				continue;
2355 			}
2356 
2357 			entry.port_type = fe->port_type;
2358 			strlcpy(entry.port_name, fe->port_name,
2359 				sizeof(entry.port_name));
2360 			entry.targ_port = fe->targ_port;
2361 			entry.physical_port = fe->physical_port;
2362 			entry.virtual_port = fe->virtual_port;
2363 			entry.wwnn = fe->wwnn;
2364 			entry.wwpn = fe->wwpn;
2365 			if (fe->status & CTL_PORT_STATUS_ONLINE)
2366 				entry.online = 1;
2367 			else
2368 				entry.online = 0;
2369 
2370 			list_entry = &list->entries[i];
2371 
2372 			retval = copyout(&entry, list_entry, sizeof(entry));
2373 			if (retval != 0) {
2374 				printf("%s: CTL_GET_PORT_LIST: copyout "
2375 				       "returned %d\n", __func__, retval);
2376 				break;
2377 			}
2378 			i++;
2379 			list->fill_num++;
2380 			list->fill_len += sizeof(entry);
2381 		}
2382 		mtx_unlock(&softc->ctl_lock);
2383 
2384 		/*
2385 		 * If this is non-zero, we had a copyout fault, so there's
2386 		 * probably no point in attempting to set the status inside
2387 		 * the structure.
2388 		 */
2389 		if (retval != 0)
2390 			break;
2391 
2392 		if (list->dropped_num > 0)
2393 			list->status = CTL_PORT_LIST_NEED_MORE_SPACE;
2394 		else
2395 			list->status = CTL_PORT_LIST_OK;
2396 		break;
2397 	}
2398 	case CTL_DUMP_OOA: {
2399 		struct ctl_lun *lun;
2400 		union ctl_io *io;
2401 		char printbuf[128];
2402 		struct sbuf sb;
2403 
2404 		mtx_lock(&softc->ctl_lock);
2405 		printf("Dumping OOA queues:\n");
2406 		STAILQ_FOREACH(lun, &softc->lun_list, links) {
2407 			for (io = (union ctl_io *)TAILQ_FIRST(
2408 			     &lun->ooa_queue); io != NULL;
2409 			     io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr,
2410 			     ooa_links)) {
2411 				sbuf_new(&sb, printbuf, sizeof(printbuf),
2412 					 SBUF_FIXEDLEN);
2413 				sbuf_printf(&sb, "LUN %jd tag 0x%04x%s%s%s%s: ",
2414 					    (intmax_t)lun->lun,
2415 					    io->scsiio.tag_num,
2416 					    (io->io_hdr.flags &
2417 					    CTL_FLAG_BLOCKED) ? "" : " BLOCKED",
2418 					    (io->io_hdr.flags &
2419 					    CTL_FLAG_DMA_INPROG) ? " DMA" : "",
2420 					    (io->io_hdr.flags &
2421 					    CTL_FLAG_ABORT) ? " ABORT" : "",
2422 			                    (io->io_hdr.flags &
2423 		                        CTL_FLAG_IS_WAS_ON_RTR) ? " RTR" : "");
2424 				ctl_scsi_command_string(&io->scsiio, NULL, &sb);
2425 				sbuf_finish(&sb);
2426 				printf("%s\n", sbuf_data(&sb));
2427 			}
2428 		}
2429 		printf("OOA queues dump done\n");
2430 		mtx_unlock(&softc->ctl_lock);
2431 		break;
2432 	}
2433 	case CTL_GET_OOA: {
2434 		struct ctl_lun *lun;
2435 		struct ctl_ooa *ooa_hdr;
2436 		struct ctl_ooa_entry *entries;
2437 		uint32_t cur_fill_num;
2438 
2439 		ooa_hdr = (struct ctl_ooa *)addr;
2440 
2441 		if ((ooa_hdr->alloc_len == 0)
2442 		 || (ooa_hdr->alloc_num == 0)) {
2443 			printf("%s: CTL_GET_OOA: alloc len %u and alloc num %u "
2444 			       "must be non-zero\n", __func__,
2445 			       ooa_hdr->alloc_len, ooa_hdr->alloc_num);
2446 			retval = EINVAL;
2447 			break;
2448 		}
2449 
2450 		if (ooa_hdr->alloc_len != (ooa_hdr->alloc_num *
2451 		    sizeof(struct ctl_ooa_entry))) {
2452 			printf("%s: CTL_GET_OOA: alloc len %u must be alloc "
2453 			       "num %d * sizeof(struct ctl_ooa_entry) %zd\n",
2454 			       __func__, ooa_hdr->alloc_len,
2455 			       ooa_hdr->alloc_num,sizeof(struct ctl_ooa_entry));
2456 			retval = EINVAL;
2457 			break;
2458 		}
2459 
2460 		entries = malloc(ooa_hdr->alloc_len, M_CTL, M_WAITOK | M_ZERO);
2461 		if (entries == NULL) {
2462 			printf("%s: could not allocate %d bytes for OOA "
2463 			       "dump\n", __func__, ooa_hdr->alloc_len);
2464 			retval = ENOMEM;
2465 			break;
2466 		}
2467 
2468 		mtx_lock(&softc->ctl_lock);
2469 		if (((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0)
2470 		 && ((ooa_hdr->lun_num > CTL_MAX_LUNS)
2471 		  || (softc->ctl_luns[ooa_hdr->lun_num] == NULL))) {
2472 			mtx_unlock(&softc->ctl_lock);
2473 			free(entries, M_CTL);
2474 			printf("%s: CTL_GET_OOA: invalid LUN %ju\n",
2475 			       __func__, (uintmax_t)ooa_hdr->lun_num);
2476 			retval = EINVAL;
2477 			break;
2478 		}
2479 
2480 		cur_fill_num = 0;
2481 
2482 		if (ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) {
2483 			STAILQ_FOREACH(lun, &softc->lun_list, links) {
2484 				retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num,
2485 					ooa_hdr, entries);
2486 				if (retval != 0)
2487 					break;
2488 			}
2489 			if (retval != 0) {
2490 				mtx_unlock(&softc->ctl_lock);
2491 				free(entries, M_CTL);
2492 				break;
2493 			}
2494 		} else {
2495 			lun = softc->ctl_luns[ooa_hdr->lun_num];
2496 
2497 			retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num,ooa_hdr,
2498 						    entries);
2499 		}
2500 		mtx_unlock(&softc->ctl_lock);
2501 
2502 		ooa_hdr->fill_num = min(cur_fill_num, ooa_hdr->alloc_num);
2503 		ooa_hdr->fill_len = ooa_hdr->fill_num *
2504 			sizeof(struct ctl_ooa_entry);
2505 		retval = copyout(entries, ooa_hdr->entries, ooa_hdr->fill_len);
2506 		if (retval != 0) {
2507 			printf("%s: error copying out %d bytes for OOA dump\n",
2508 			       __func__, ooa_hdr->fill_len);
2509 		}
2510 
2511 		getbintime(&ooa_hdr->cur_bt);
2512 
2513 		if (cur_fill_num > ooa_hdr->alloc_num) {
2514 			ooa_hdr->dropped_num = cur_fill_num -ooa_hdr->alloc_num;
2515 			ooa_hdr->status = CTL_OOA_NEED_MORE_SPACE;
2516 		} else {
2517 			ooa_hdr->dropped_num = 0;
2518 			ooa_hdr->status = CTL_OOA_OK;
2519 		}
2520 
2521 		free(entries, M_CTL);
2522 		break;
2523 	}
2524 	case CTL_CHECK_OOA: {
2525 		union ctl_io *io;
2526 		struct ctl_lun *lun;
2527 		struct ctl_ooa_info *ooa_info;
2528 
2529 
2530 		ooa_info = (struct ctl_ooa_info *)addr;
2531 
2532 		if (ooa_info->lun_id >= CTL_MAX_LUNS) {
2533 			ooa_info->status = CTL_OOA_INVALID_LUN;
2534 			break;
2535 		}
2536 		mtx_lock(&softc->ctl_lock);
2537 		lun = softc->ctl_luns[ooa_info->lun_id];
2538 		if (lun == NULL) {
2539 			mtx_unlock(&softc->ctl_lock);
2540 			ooa_info->status = CTL_OOA_INVALID_LUN;
2541 			break;
2542 		}
2543 
2544 		ooa_info->num_entries = 0;
2545 		for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue);
2546 		     io != NULL; io = (union ctl_io *)TAILQ_NEXT(
2547 		     &io->io_hdr, ooa_links)) {
2548 			ooa_info->num_entries++;
2549 		}
2550 
2551 		mtx_unlock(&softc->ctl_lock);
2552 		ooa_info->status = CTL_OOA_SUCCESS;
2553 
2554 		break;
2555 	}
2556 	case CTL_HARD_START:
2557 	case CTL_HARD_STOP: {
2558 		struct ctl_fe_ioctl_startstop_info ss_info;
2559 		struct cfi_metatask *metatask;
2560 		struct mtx hs_mtx;
2561 
2562 		mtx_init(&hs_mtx, "HS Mutex", NULL, MTX_DEF);
2563 
2564 		cv_init(&ss_info.sem, "hard start/stop cv" );
2565 
2566 		metatask = cfi_alloc_metatask(/*can_wait*/ 1);
2567 		if (metatask == NULL) {
2568 			retval = ENOMEM;
2569 			mtx_destroy(&hs_mtx);
2570 			break;
2571 		}
2572 
2573 		if (cmd == CTL_HARD_START)
2574 			metatask->tasktype = CFI_TASK_STARTUP;
2575 		else
2576 			metatask->tasktype = CFI_TASK_SHUTDOWN;
2577 
2578 		metatask->callback = ctl_ioctl_hard_startstop_callback;
2579 		metatask->callback_arg = &ss_info;
2580 
2581 		cfi_action(metatask);
2582 
2583 		/* Wait for the callback */
2584 		mtx_lock(&hs_mtx);
2585 		cv_wait_sig(&ss_info.sem, &hs_mtx);
2586 		mtx_unlock(&hs_mtx);
2587 
2588 		/*
2589 		 * All information has been copied from the metatask by the
2590 		 * time cv_broadcast() is called, so we free the metatask here.
2591 		 */
2592 		cfi_free_metatask(metatask);
2593 
2594 		memcpy((void *)addr, &ss_info.hs_info, sizeof(ss_info.hs_info));
2595 
2596 		mtx_destroy(&hs_mtx);
2597 		break;
2598 	}
2599 	case CTL_BBRREAD: {
2600 		struct ctl_bbrread_info *bbr_info;
2601 		struct ctl_fe_ioctl_bbrread_info fe_bbr_info;
2602 		struct mtx bbr_mtx;
2603 		struct cfi_metatask *metatask;
2604 
2605 		bbr_info = (struct ctl_bbrread_info *)addr;
2606 
2607 		bzero(&fe_bbr_info, sizeof(fe_bbr_info));
2608 
2609 		bzero(&bbr_mtx, sizeof(bbr_mtx));
2610 		mtx_init(&bbr_mtx, "BBR Mutex", NULL, MTX_DEF);
2611 
2612 		fe_bbr_info.bbr_info = bbr_info;
2613 		fe_bbr_info.lock = &bbr_mtx;
2614 
2615 		cv_init(&fe_bbr_info.sem, "BBR read cv");
2616 		metatask = cfi_alloc_metatask(/*can_wait*/ 1);
2617 
2618 		if (metatask == NULL) {
2619 			mtx_destroy(&bbr_mtx);
2620 			cv_destroy(&fe_bbr_info.sem);
2621 			retval = ENOMEM;
2622 			break;
2623 		}
2624 		metatask->tasktype = CFI_TASK_BBRREAD;
2625 		metatask->callback = ctl_ioctl_bbrread_callback;
2626 		metatask->callback_arg = &fe_bbr_info;
2627 		metatask->taskinfo.bbrread.lun_num = bbr_info->lun_num;
2628 		metatask->taskinfo.bbrread.lba = bbr_info->lba;
2629 		metatask->taskinfo.bbrread.len = bbr_info->len;
2630 
2631 		cfi_action(metatask);
2632 
2633 		mtx_lock(&bbr_mtx);
2634 		while (fe_bbr_info.wakeup_done == 0)
2635 			cv_wait_sig(&fe_bbr_info.sem, &bbr_mtx);
2636 		mtx_unlock(&bbr_mtx);
2637 
2638 		bbr_info->status = metatask->status;
2639 		bbr_info->bbr_status = metatask->taskinfo.bbrread.status;
2640 		bbr_info->scsi_status = metatask->taskinfo.bbrread.scsi_status;
2641 		memcpy(&bbr_info->sense_data,
2642 		       &metatask->taskinfo.bbrread.sense_data,
2643 		       ctl_min(sizeof(bbr_info->sense_data),
2644 			       sizeof(metatask->taskinfo.bbrread.sense_data)));
2645 
2646 		cfi_free_metatask(metatask);
2647 
2648 		mtx_destroy(&bbr_mtx);
2649 		cv_destroy(&fe_bbr_info.sem);
2650 
2651 		break;
2652 	}
2653 	case CTL_DELAY_IO: {
2654 		struct ctl_io_delay_info *delay_info;
2655 #ifdef CTL_IO_DELAY
2656 		struct ctl_lun *lun;
2657 #endif /* CTL_IO_DELAY */
2658 
2659 		delay_info = (struct ctl_io_delay_info *)addr;
2660 
2661 #ifdef CTL_IO_DELAY
2662 		mtx_lock(&softc->ctl_lock);
2663 
2664 		if ((delay_info->lun_id > CTL_MAX_LUNS)
2665 		 || (softc->ctl_luns[delay_info->lun_id] == NULL)) {
2666 			delay_info->status = CTL_DELAY_STATUS_INVALID_LUN;
2667 		} else {
2668 			lun = softc->ctl_luns[delay_info->lun_id];
2669 
2670 			delay_info->status = CTL_DELAY_STATUS_OK;
2671 
2672 			switch (delay_info->delay_type) {
2673 			case CTL_DELAY_TYPE_CONT:
2674 				break;
2675 			case CTL_DELAY_TYPE_ONESHOT:
2676 				break;
2677 			default:
2678 				delay_info->status =
2679 					CTL_DELAY_STATUS_INVALID_TYPE;
2680 				break;
2681 			}
2682 
2683 			switch (delay_info->delay_loc) {
2684 			case CTL_DELAY_LOC_DATAMOVE:
2685 				lun->delay_info.datamove_type =
2686 					delay_info->delay_type;
2687 				lun->delay_info.datamove_delay =
2688 					delay_info->delay_secs;
2689 				break;
2690 			case CTL_DELAY_LOC_DONE:
2691 				lun->delay_info.done_type =
2692 					delay_info->delay_type;
2693 				lun->delay_info.done_delay =
2694 					delay_info->delay_secs;
2695 				break;
2696 			default:
2697 				delay_info->status =
2698 					CTL_DELAY_STATUS_INVALID_LOC;
2699 				break;
2700 			}
2701 		}
2702 
2703 		mtx_unlock(&softc->ctl_lock);
2704 #else
2705 		delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED;
2706 #endif /* CTL_IO_DELAY */
2707 		break;
2708 	}
2709 	case CTL_REALSYNC_SET: {
2710 		int *syncstate;
2711 
2712 		syncstate = (int *)addr;
2713 
2714 		mtx_lock(&softc->ctl_lock);
2715 		switch (*syncstate) {
2716 		case 0:
2717 			softc->flags &= ~CTL_FLAG_REAL_SYNC;
2718 			break;
2719 		case 1:
2720 			softc->flags |= CTL_FLAG_REAL_SYNC;
2721 			break;
2722 		default:
2723 			retval = -EINVAL;
2724 			break;
2725 		}
2726 		mtx_unlock(&softc->ctl_lock);
2727 		break;
2728 	}
2729 	case CTL_REALSYNC_GET: {
2730 		int *syncstate;
2731 
2732 		syncstate = (int*)addr;
2733 
2734 		mtx_lock(&softc->ctl_lock);
2735 		if (softc->flags & CTL_FLAG_REAL_SYNC)
2736 			*syncstate = 1;
2737 		else
2738 			*syncstate = 0;
2739 		mtx_unlock(&softc->ctl_lock);
2740 
2741 		break;
2742 	}
2743 	case CTL_SETSYNC:
2744 	case CTL_GETSYNC: {
2745 		struct ctl_sync_info *sync_info;
2746 		struct ctl_lun *lun;
2747 
2748 		sync_info = (struct ctl_sync_info *)addr;
2749 
2750 		mtx_lock(&softc->ctl_lock);
2751 		lun = softc->ctl_luns[sync_info->lun_id];
2752 		if (lun == NULL) {
2753 			mtx_unlock(&softc->ctl_lock);
2754 			sync_info->status = CTL_GS_SYNC_NO_LUN;
2755 		}
2756 		/*
2757 		 * Get or set the sync interval.  We're not bounds checking
2758 		 * in the set case, hopefully the user won't do something
2759 		 * silly.
2760 		 */
2761 		if (cmd == CTL_GETSYNC)
2762 			sync_info->sync_interval = lun->sync_interval;
2763 		else
2764 			lun->sync_interval = sync_info->sync_interval;
2765 
2766 		mtx_unlock(&softc->ctl_lock);
2767 
2768 		sync_info->status = CTL_GS_SYNC_OK;
2769 
2770 		break;
2771 	}
2772 	case CTL_GETSTATS: {
2773 		struct ctl_stats *stats;
2774 		struct ctl_lun *lun;
2775 		int i;
2776 
2777 		stats = (struct ctl_stats *)addr;
2778 
2779 		if ((sizeof(struct ctl_lun_io_stats) * softc->num_luns) >
2780 		     stats->alloc_len) {
2781 			stats->status = CTL_SS_NEED_MORE_SPACE;
2782 			stats->num_luns = softc->num_luns;
2783 			break;
2784 		}
2785 		/*
2786 		 * XXX KDM no locking here.  If the LUN list changes,
2787 		 * things can blow up.
2788 		 */
2789 		for (i = 0, lun = STAILQ_FIRST(&softc->lun_list); lun != NULL;
2790 		     i++, lun = STAILQ_NEXT(lun, links)) {
2791 			retval = copyout(&lun->stats, &stats->lun_stats[i],
2792 					 sizeof(lun->stats));
2793 			if (retval != 0)
2794 				break;
2795 		}
2796 		stats->num_luns = softc->num_luns;
2797 		stats->fill_len = sizeof(struct ctl_lun_io_stats) *
2798 				 softc->num_luns;
2799 		stats->status = CTL_SS_OK;
2800 #ifdef CTL_TIME_IO
2801 		stats->flags = CTL_STATS_FLAG_TIME_VALID;
2802 #else
2803 		stats->flags = CTL_STATS_FLAG_NONE;
2804 #endif
2805 		getnanouptime(&stats->timestamp);
2806 		break;
2807 	}
2808 	case CTL_ERROR_INJECT: {
2809 		struct ctl_error_desc *err_desc, *new_err_desc;
2810 		struct ctl_lun *lun;
2811 
2812 		err_desc = (struct ctl_error_desc *)addr;
2813 
2814 		new_err_desc = malloc(sizeof(*new_err_desc), M_CTL,
2815 				      M_WAITOK | M_ZERO);
2816 		bcopy(err_desc, new_err_desc, sizeof(*new_err_desc));
2817 
2818 		mtx_lock(&softc->ctl_lock);
2819 		lun = softc->ctl_luns[err_desc->lun_id];
2820 		if (lun == NULL) {
2821 			mtx_unlock(&softc->ctl_lock);
2822 			printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n",
2823 			       __func__, (uintmax_t)err_desc->lun_id);
2824 			retval = EINVAL;
2825 			break;
2826 		}
2827 
2828 		/*
2829 		 * We could do some checking here to verify the validity
2830 		 * of the request, but given the complexity of error
2831 		 * injection requests, the checking logic would be fairly
2832 		 * complex.
2833 		 *
2834 		 * For now, if the request is invalid, it just won't get
2835 		 * executed and might get deleted.
2836 		 */
2837 		STAILQ_INSERT_TAIL(&lun->error_list, new_err_desc, links);
2838 
2839 		/*
2840 		 * XXX KDM check to make sure the serial number is unique,
2841 		 * in case we somehow manage to wrap.  That shouldn't
2842 		 * happen for a very long time, but it's the right thing to
2843 		 * do.
2844 		 */
2845 		new_err_desc->serial = lun->error_serial;
2846 		err_desc->serial = lun->error_serial;
2847 		lun->error_serial++;
2848 
2849 		mtx_unlock(&softc->ctl_lock);
2850 		break;
2851 	}
2852 	case CTL_ERROR_INJECT_DELETE: {
2853 		struct ctl_error_desc *delete_desc, *desc, *desc2;
2854 		struct ctl_lun *lun;
2855 		int delete_done;
2856 
2857 		delete_desc = (struct ctl_error_desc *)addr;
2858 		delete_done = 0;
2859 
2860 		mtx_lock(&softc->ctl_lock);
2861 		lun = softc->ctl_luns[delete_desc->lun_id];
2862 		if (lun == NULL) {
2863 			mtx_unlock(&softc->ctl_lock);
2864 			printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n",
2865 			       __func__, (uintmax_t)delete_desc->lun_id);
2866 			retval = EINVAL;
2867 			break;
2868 		}
2869 		STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) {
2870 			if (desc->serial != delete_desc->serial)
2871 				continue;
2872 
2873 			STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc,
2874 				      links);
2875 			free(desc, M_CTL);
2876 			delete_done = 1;
2877 		}
2878 		mtx_unlock(&softc->ctl_lock);
2879 		if (delete_done == 0) {
2880 			printf("%s: CTL_ERROR_INJECT_DELETE: can't find "
2881 			       "error serial %ju on LUN %u\n", __func__,
2882 			       delete_desc->serial, delete_desc->lun_id);
2883 			retval = EINVAL;
2884 			break;
2885 		}
2886 		break;
2887 	}
2888 	case CTL_DUMP_STRUCTS: {
2889 		int i, j, k;
2890 		struct ctl_frontend *fe;
2891 
2892 		printf("CTL IID to WWPN map start:\n");
2893 		for (i = 0; i < CTL_MAX_PORTS; i++) {
2894 			for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) {
2895 				if (softc->wwpn_iid[i][j].in_use == 0)
2896 					continue;
2897 
2898 				printf("port %d iid %u WWPN %#jx\n",
2899 				       softc->wwpn_iid[i][j].port,
2900 				       softc->wwpn_iid[i][j].iid,
2901 				       (uintmax_t)softc->wwpn_iid[i][j].wwpn);
2902 			}
2903 		}
2904 		printf("CTL IID to WWPN map end\n");
2905 		printf("CTL Persistent Reservation information start:\n");
2906 		for (i = 0; i < CTL_MAX_LUNS; i++) {
2907 			struct ctl_lun *lun;
2908 
2909 			lun = softc->ctl_luns[i];
2910 
2911 			if ((lun == NULL)
2912 			 || ((lun->flags & CTL_LUN_DISABLED) != 0))
2913 				continue;
2914 
2915 			for (j = 0; j < (CTL_MAX_PORTS * 2); j++) {
2916 				for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){
2917 					if (lun->per_res[j+k].registered == 0)
2918 						continue;
2919 					printf("LUN %d port %d iid %d key "
2920 					       "%#jx\n", i, j, k,
2921 					       (uintmax_t)scsi_8btou64(
2922 					       lun->per_res[j+k].res_key.key));
2923 				}
2924 			}
2925 		}
2926 		printf("CTL Persistent Reservation information end\n");
2927 		printf("CTL Frontends:\n");
2928 		/*
2929 		 * XXX KDM calling this without a lock.  We'd likely want
2930 		 * to drop the lock before calling the frontend's dump
2931 		 * routine anyway.
2932 		 */
2933 		STAILQ_FOREACH(fe, &softc->fe_list, links) {
2934 			printf("Frontend %s Type %u pport %d vport %d WWNN "
2935 			       "%#jx WWPN %#jx\n", fe->port_name, fe->port_type,
2936 			       fe->physical_port, fe->virtual_port,
2937 			       (uintmax_t)fe->wwnn, (uintmax_t)fe->wwpn);
2938 
2939 			/*
2940 			 * Frontends are not required to support the dump
2941 			 * routine.
2942 			 */
2943 			if (fe->fe_dump == NULL)
2944 				continue;
2945 
2946 			fe->fe_dump();
2947 		}
2948 		printf("CTL Frontend information end\n");
2949 		break;
2950 	}
2951 	case CTL_LUN_REQ: {
2952 		struct ctl_lun_req *lun_req;
2953 		struct ctl_backend_driver *backend;
2954 
2955 		lun_req = (struct ctl_lun_req *)addr;
2956 
2957 		backend = ctl_backend_find(lun_req->backend);
2958 		if (backend == NULL) {
2959 			lun_req->status = CTL_LUN_ERROR;
2960 			snprintf(lun_req->error_str,
2961 				 sizeof(lun_req->error_str),
2962 				 "Backend \"%s\" not found.",
2963 				 lun_req->backend);
2964 			break;
2965 		}
2966 		if (lun_req->num_be_args > 0) {
2967 			lun_req->kern_be_args = ctl_copyin_args(
2968 				lun_req->num_be_args,
2969 				lun_req->be_args,
2970 				lun_req->error_str,
2971 				sizeof(lun_req->error_str));
2972 			if (lun_req->kern_be_args == NULL) {
2973 				lun_req->status = CTL_LUN_ERROR;
2974 				break;
2975 			}
2976 		}
2977 
2978 		retval = backend->ioctl(dev, cmd, addr, flag, td);
2979 
2980 		if (lun_req->num_be_args > 0) {
2981 			ctl_free_args(lun_req->num_be_args,
2982 				      lun_req->kern_be_args);
2983 		}
2984 		break;
2985 	}
2986 	case CTL_LUN_LIST: {
2987 		struct sbuf *sb;
2988 		struct ctl_lun *lun;
2989 		struct ctl_lun_list *list;
2990 
2991 		list = (struct ctl_lun_list *)addr;
2992 
2993 		/*
2994 		 * Allocate a fixed length sbuf here, based on the length
2995 		 * of the user's buffer.  We could allocate an auto-extending
2996 		 * buffer, and then tell the user how much larger our
2997 		 * amount of data is than his buffer, but that presents
2998 		 * some problems:
2999 		 *
3000 		 * 1.  The sbuf(9) routines use a blocking malloc, and so
3001 		 *     we can't hold a lock while calling them with an
3002 		 *     auto-extending buffer.
3003  		 *
3004 		 * 2.  There is not currently a LUN reference counting
3005 		 *     mechanism, outside of outstanding transactions on
3006 		 *     the LUN's OOA queue.  So a LUN could go away on us
3007 		 *     while we're getting the LUN number, backend-specific
3008 		 *     information, etc.  Thus, given the way things
3009 		 *     currently work, we need to hold the CTL lock while
3010 		 *     grabbing LUN information.
3011 		 *
3012 		 * So, from the user's standpoint, the best thing to do is
3013 		 * allocate what he thinks is a reasonable buffer length,
3014 		 * and then if he gets a CTL_LUN_LIST_NEED_MORE_SPACE error,
3015 		 * double the buffer length and try again.  (And repeat
3016 		 * that until he succeeds.)
3017 		 */
3018 		sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN);
3019 		if (sb == NULL) {
3020 			list->status = CTL_LUN_LIST_ERROR;
3021 			snprintf(list->error_str, sizeof(list->error_str),
3022 				 "Unable to allocate %d bytes for LUN list",
3023 				 list->alloc_len);
3024 			break;
3025 		}
3026 
3027 		sbuf_printf(sb, "<ctllunlist>\n");
3028 
3029 		mtx_lock(&softc->ctl_lock);
3030 
3031 		STAILQ_FOREACH(lun, &softc->lun_list, links) {
3032 			retval = sbuf_printf(sb, "<lun id=\"%ju\">\n",
3033 					     (uintmax_t)lun->lun);
3034 
3035 			/*
3036 			 * Bail out as soon as we see that we've overfilled
3037 			 * the buffer.
3038 			 */
3039 			if (retval != 0)
3040 				break;
3041 
3042 			retval = sbuf_printf(sb, "<backend_type>%s"
3043 					     "</backend_type>\n",
3044 					     (lun->backend == NULL) ?  "none" :
3045 					     lun->backend->name);
3046 
3047 			if (retval != 0)
3048 				break;
3049 
3050 			retval = sbuf_printf(sb, "<lun_type>%d</lun_type>\n",
3051 					     lun->be_lun->lun_type);
3052 
3053 			if (retval != 0)
3054 				break;
3055 
3056 			if (lun->backend == NULL) {
3057 				retval = sbuf_printf(sb, "</lun>\n");
3058 				if (retval != 0)
3059 					break;
3060 				continue;
3061 			}
3062 
3063 			retval = sbuf_printf(sb, "<size>%ju</size>\n",
3064 					     (lun->be_lun->maxlba > 0) ?
3065 					     lun->be_lun->maxlba + 1 : 0);
3066 
3067 			if (retval != 0)
3068 				break;
3069 
3070 			retval = sbuf_printf(sb, "<blocksize>%u</blocksize>\n",
3071 					     lun->be_lun->blocksize);
3072 
3073 			if (retval != 0)
3074 				break;
3075 
3076 			retval = sbuf_printf(sb, "<serial_number>");
3077 
3078 			if (retval != 0)
3079 				break;
3080 
3081 			retval = ctl_sbuf_printf_esc(sb,
3082 						     lun->be_lun->serial_num);
3083 
3084 			if (retval != 0)
3085 				break;
3086 
3087 			retval = sbuf_printf(sb, "</serial_number>\n");
3088 
3089 			if (retval != 0)
3090 				break;
3091 
3092 			retval = sbuf_printf(sb, "<device_id>");
3093 
3094 			if (retval != 0)
3095 				break;
3096 
3097 			retval = ctl_sbuf_printf_esc(sb,lun->be_lun->device_id);
3098 
3099 			if (retval != 0)
3100 				break;
3101 
3102 			retval = sbuf_printf(sb, "</device_id>\n");
3103 
3104 			if (retval != 0)
3105 				break;
3106 
3107 			if (lun->backend->lun_info == NULL) {
3108 				retval = sbuf_printf(sb, "</lun>\n");
3109 				if (retval != 0)
3110 					break;
3111 				continue;
3112 			}
3113 
3114 			retval =lun->backend->lun_info(lun->be_lun->be_lun, sb);
3115 
3116 			if (retval != 0)
3117 				break;
3118 
3119 			retval = sbuf_printf(sb, "</lun>\n");
3120 
3121 			if (retval != 0)
3122 				break;
3123 		}
3124 		mtx_unlock(&softc->ctl_lock);
3125 
3126 		if ((retval != 0)
3127 		 || ((retval = sbuf_printf(sb, "</ctllunlist>\n")) != 0)) {
3128 			retval = 0;
3129 			sbuf_delete(sb);
3130 			list->status = CTL_LUN_LIST_NEED_MORE_SPACE;
3131 			snprintf(list->error_str, sizeof(list->error_str),
3132 				 "Out of space, %d bytes is too small",
3133 				 list->alloc_len);
3134 			break;
3135 		}
3136 
3137 		sbuf_finish(sb);
3138 
3139 		retval = copyout(sbuf_data(sb), list->lun_xml,
3140 				 sbuf_len(sb) + 1);
3141 
3142 		list->fill_len = sbuf_len(sb) + 1;
3143 		list->status = CTL_LUN_LIST_OK;
3144 		sbuf_delete(sb);
3145 		break;
3146 	}
3147 	default: {
3148 		/* XXX KDM should we fix this? */
3149 #if 0
3150 		struct ctl_backend_driver *backend;
3151 		unsigned int type;
3152 		int found;
3153 
3154 		found = 0;
3155 
3156 		/*
3157 		 * We encode the backend type as the ioctl type for backend
3158 		 * ioctls.  So parse it out here, and then search for a
3159 		 * backend of this type.
3160 		 */
3161 		type = _IOC_TYPE(cmd);
3162 
3163 		STAILQ_FOREACH(backend, &softc->be_list, links) {
3164 			if (backend->type == type) {
3165 				found = 1;
3166 				break;
3167 			}
3168 		}
3169 		if (found == 0) {
3170 			printf("ctl: unknown ioctl command %#lx or backend "
3171 			       "%d\n", cmd, type);
3172 			retval = -EINVAL;
3173 			break;
3174 		}
3175 		retval = backend->ioctl(dev, cmd, addr, flag, td);
3176 #endif
3177 		retval = ENOTTY;
3178 		break;
3179 	}
3180 	}
3181 	return (retval);
3182 }
3183 
3184 uint32_t
3185 ctl_get_initindex(struct ctl_nexus *nexus)
3186 {
3187 	if (nexus->targ_port < CTL_MAX_PORTS)
3188 		return (nexus->initid.id +
3189 			(nexus->targ_port * CTL_MAX_INIT_PER_PORT));
3190 	else
3191 		return (nexus->initid.id +
3192 		       ((nexus->targ_port - CTL_MAX_PORTS) *
3193 			CTL_MAX_INIT_PER_PORT));
3194 }
3195 
3196 uint32_t
3197 ctl_get_resindex(struct ctl_nexus *nexus)
3198 {
3199 	return (nexus->initid.id + (nexus->targ_port * CTL_MAX_INIT_PER_PORT));
3200 }
3201 
3202 uint32_t
3203 ctl_port_idx(int port_num)
3204 {
3205 	if (port_num < CTL_MAX_PORTS)
3206 		return(port_num);
3207 	else
3208 		return(port_num - CTL_MAX_PORTS);
3209 }
3210 
3211 /*
3212  * Note:  This only works for bitmask sizes that are at least 32 bits, and
3213  * that are a power of 2.
3214  */
3215 int
3216 ctl_ffz(uint32_t *mask, uint32_t size)
3217 {
3218 	uint32_t num_chunks, num_pieces;
3219 	int i, j;
3220 
3221 	num_chunks = (size >> 5);
3222 	if (num_chunks == 0)
3223 		num_chunks++;
3224 	num_pieces = ctl_min((sizeof(uint32_t) * 8), size);
3225 
3226 	for (i = 0; i < num_chunks; i++) {
3227 		for (j = 0; j < num_pieces; j++) {
3228 			if ((mask[i] & (1 << j)) == 0)
3229 				return ((i << 5) + j);
3230 		}
3231 	}
3232 
3233 	return (-1);
3234 }
3235 
3236 int
3237 ctl_set_mask(uint32_t *mask, uint32_t bit)
3238 {
3239 	uint32_t chunk, piece;
3240 
3241 	chunk = bit >> 5;
3242 	piece = bit % (sizeof(uint32_t) * 8);
3243 
3244 	if ((mask[chunk] & (1 << piece)) != 0)
3245 		return (-1);
3246 	else
3247 		mask[chunk] |= (1 << piece);
3248 
3249 	return (0);
3250 }
3251 
3252 int
3253 ctl_clear_mask(uint32_t *mask, uint32_t bit)
3254 {
3255 	uint32_t chunk, piece;
3256 
3257 	chunk = bit >> 5;
3258 	piece = bit % (sizeof(uint32_t) * 8);
3259 
3260 	if ((mask[chunk] & (1 << piece)) == 0)
3261 		return (-1);
3262 	else
3263 		mask[chunk] &= ~(1 << piece);
3264 
3265 	return (0);
3266 }
3267 
3268 int
3269 ctl_is_set(uint32_t *mask, uint32_t bit)
3270 {
3271 	uint32_t chunk, piece;
3272 
3273 	chunk = bit >> 5;
3274 	piece = bit % (sizeof(uint32_t) * 8);
3275 
3276 	if ((mask[chunk] & (1 << piece)) == 0)
3277 		return (0);
3278 	else
3279 		return (1);
3280 }
3281 
3282 #ifdef unused
3283 /*
3284  * The bus, target and lun are optional, they can be filled in later.
3285  * can_wait is used to determine whether we can wait on the malloc or not.
3286  */
3287 union ctl_io*
3288 ctl_malloc_io(ctl_io_type io_type, uint32_t targ_port, uint32_t targ_target,
3289 	      uint32_t targ_lun, int can_wait)
3290 {
3291 	union ctl_io *io;
3292 
3293 	if (can_wait)
3294 		io = (union ctl_io *)malloc(sizeof(*io), M_CTL, M_WAITOK);
3295 	else
3296 		io = (union ctl_io *)malloc(sizeof(*io), M_CTL, M_NOWAIT);
3297 
3298 	if (io != NULL) {
3299 		io->io_hdr.io_type = io_type;
3300 		io->io_hdr.targ_port = targ_port;
3301 		/*
3302 		 * XXX KDM this needs to change/go away.  We need to move
3303 		 * to a preallocated pool of ctl_scsiio structures.
3304 		 */
3305 		io->io_hdr.nexus.targ_target.id = targ_target;
3306 		io->io_hdr.nexus.targ_lun = targ_lun;
3307 	}
3308 
3309 	return (io);
3310 }
3311 
3312 void
3313 ctl_kfree_io(union ctl_io *io)
3314 {
3315 	free(io, M_CTL);
3316 }
3317 #endif /* unused */
3318 
3319 /*
3320  * ctl_softc, pool_type, total_ctl_io are passed in.
3321  * npool is passed out.
3322  */
3323 int
3324 ctl_pool_create(struct ctl_softc *ctl_softc, ctl_pool_type pool_type,
3325 		uint32_t total_ctl_io, struct ctl_io_pool **npool)
3326 {
3327 	uint32_t i;
3328 	union ctl_io *cur_io, *next_io;
3329 	struct ctl_io_pool *pool;
3330 	int retval;
3331 
3332 	retval = 0;
3333 
3334 	pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL,
3335 					    M_NOWAIT | M_ZERO);
3336 	if (pool == NULL) {
3337 		retval = -ENOMEM;
3338 		goto bailout;
3339 	}
3340 
3341 	pool->type = pool_type;
3342 	pool->ctl_softc = ctl_softc;
3343 
3344 	mtx_lock(&ctl_softc->ctl_lock);
3345 	pool->id = ctl_softc->cur_pool_id++;
3346 	mtx_unlock(&ctl_softc->ctl_lock);
3347 
3348 	pool->flags = CTL_POOL_FLAG_NONE;
3349 	STAILQ_INIT(&pool->free_queue);
3350 
3351 	/*
3352 	 * XXX KDM other options here:
3353 	 * - allocate a page at a time
3354 	 * - allocate one big chunk of memory.
3355 	 * Page allocation might work well, but would take a little more
3356 	 * tracking.
3357 	 */
3358 	for (i = 0; i < total_ctl_io; i++) {
3359 		cur_io = (union ctl_io *)malloc(sizeof(*cur_io), M_CTL,
3360 						M_NOWAIT);
3361 		if (cur_io == NULL) {
3362 			retval = ENOMEM;
3363 			break;
3364 		}
3365 		cur_io->io_hdr.pool = pool;
3366 		STAILQ_INSERT_TAIL(&pool->free_queue, &cur_io->io_hdr, links);
3367 		pool->total_ctl_io++;
3368 		pool->free_ctl_io++;
3369 	}
3370 
3371 	if (retval != 0) {
3372 		for (cur_io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue);
3373 		     cur_io != NULL; cur_io = next_io) {
3374 			next_io = (union ctl_io *)STAILQ_NEXT(&cur_io->io_hdr,
3375 							      links);
3376 			STAILQ_REMOVE(&pool->free_queue, &cur_io->io_hdr,
3377 				      ctl_io_hdr, links);
3378 			free(cur_io, M_CTL);
3379 		}
3380 
3381 		free(pool, M_CTL);
3382 		goto bailout;
3383 	}
3384 	mtx_lock(&ctl_softc->ctl_lock);
3385 	ctl_softc->num_pools++;
3386 	STAILQ_INSERT_TAIL(&ctl_softc->io_pools, pool, links);
3387 	/*
3388 	 * Increment our usage count if this is an external consumer, so we
3389 	 * can't get unloaded until the external consumer (most likely a
3390 	 * FETD) unloads and frees his pool.
3391 	 *
3392 	 * XXX KDM will this increment the caller's module use count, or
3393 	 * mine?
3394 	 */
3395 #if 0
3396 	if ((pool_type != CTL_POOL_EMERGENCY)
3397 	 && (pool_type != CTL_POOL_INTERNAL)
3398 	 && (pool_type != CTL_POOL_IOCTL)
3399 	 && (pool_type != CTL_POOL_4OTHERSC))
3400 		MOD_INC_USE_COUNT;
3401 #endif
3402 
3403 	mtx_unlock(&ctl_softc->ctl_lock);
3404 
3405 	*npool = pool;
3406 
3407 bailout:
3408 
3409 	return (retval);
3410 }
3411 
3412 /*
3413  * Caller must hold ctl_softc->ctl_lock.
3414  */
3415 int
3416 ctl_pool_acquire(struct ctl_io_pool *pool)
3417 {
3418 	if (pool == NULL)
3419 		return (-EINVAL);
3420 
3421 	if (pool->flags & CTL_POOL_FLAG_INVALID)
3422 		return (-EINVAL);
3423 
3424 	pool->refcount++;
3425 
3426 	return (0);
3427 }
3428 
3429 /*
3430  * Caller must hold ctl_softc->ctl_lock.
3431  */
3432 int
3433 ctl_pool_invalidate(struct ctl_io_pool *pool)
3434 {
3435 	if (pool == NULL)
3436 		return (-EINVAL);
3437 
3438 	pool->flags |= CTL_POOL_FLAG_INVALID;
3439 
3440 	return (0);
3441 }
3442 
3443 /*
3444  * Caller must hold ctl_softc->ctl_lock.
3445  */
3446 int
3447 ctl_pool_release(struct ctl_io_pool *pool)
3448 {
3449 	if (pool == NULL)
3450 		return (-EINVAL);
3451 
3452 	if ((--pool->refcount == 0)
3453 	 && (pool->flags & CTL_POOL_FLAG_INVALID)) {
3454 		ctl_pool_free(pool->ctl_softc, pool);
3455 	}
3456 
3457 	return (0);
3458 }
3459 
3460 /*
3461  * Must be called with ctl_softc->ctl_lock held.
3462  */
3463 void
3464 ctl_pool_free(struct ctl_softc *ctl_softc, struct ctl_io_pool *pool)
3465 {
3466 	union ctl_io *cur_io, *next_io;
3467 
3468 	for (cur_io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue);
3469 	     cur_io != NULL; cur_io = next_io) {
3470 		next_io = (union ctl_io *)STAILQ_NEXT(&cur_io->io_hdr,
3471 						      links);
3472 		STAILQ_REMOVE(&pool->free_queue, &cur_io->io_hdr, ctl_io_hdr,
3473 			      links);
3474 		free(cur_io, M_CTL);
3475 	}
3476 
3477 	STAILQ_REMOVE(&ctl_softc->io_pools, pool, ctl_io_pool, links);
3478 	ctl_softc->num_pools--;
3479 
3480 	/*
3481 	 * XXX KDM will this decrement the caller's usage count or mine?
3482 	 */
3483 #if 0
3484 	if ((pool->type != CTL_POOL_EMERGENCY)
3485 	 && (pool->type != CTL_POOL_INTERNAL)
3486 	 && (pool->type != CTL_POOL_IOCTL))
3487 		MOD_DEC_USE_COUNT;
3488 #endif
3489 
3490 	free(pool, M_CTL);
3491 }
3492 
3493 /*
3494  * This routine does not block (except for spinlocks of course).
3495  * It tries to allocate a ctl_io union from the caller's pool as quickly as
3496  * possible.
3497  */
3498 union ctl_io *
3499 ctl_alloc_io(void *pool_ref)
3500 {
3501 	union ctl_io *io;
3502 	struct ctl_softc *ctl_softc;
3503 	struct ctl_io_pool *pool, *npool;
3504 	struct ctl_io_pool *emergency_pool;
3505 
3506 	pool = (struct ctl_io_pool *)pool_ref;
3507 
3508 	if (pool == NULL) {
3509 		printf("%s: pool is NULL\n", __func__);
3510 		return (NULL);
3511 	}
3512 
3513 	emergency_pool = NULL;
3514 
3515 	ctl_softc = pool->ctl_softc;
3516 
3517 	mtx_lock(&ctl_softc->ctl_lock);
3518 	/*
3519 	 * First, try to get the io structure from the user's pool.
3520 	 */
3521 	if (ctl_pool_acquire(pool) == 0) {
3522 		io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue);
3523 		if (io != NULL) {
3524 			STAILQ_REMOVE_HEAD(&pool->free_queue, links);
3525 			pool->total_allocated++;
3526 			pool->free_ctl_io--;
3527 			mtx_unlock(&ctl_softc->ctl_lock);
3528 			return (io);
3529 		} else
3530 			ctl_pool_release(pool);
3531 	}
3532 	/*
3533 	 * If he doesn't have any io structures left, search for an
3534 	 * emergency pool and grab one from there.
3535 	 */
3536 	STAILQ_FOREACH(npool, &ctl_softc->io_pools, links) {
3537 		if (npool->type != CTL_POOL_EMERGENCY)
3538 			continue;
3539 
3540 		if (ctl_pool_acquire(npool) != 0)
3541 			continue;
3542 
3543 		emergency_pool = npool;
3544 
3545 		io = (union ctl_io *)STAILQ_FIRST(&npool->free_queue);
3546 		if (io != NULL) {
3547 			STAILQ_REMOVE_HEAD(&npool->free_queue, links);
3548 			npool->total_allocated++;
3549 			npool->free_ctl_io--;
3550 			mtx_unlock(&ctl_softc->ctl_lock);
3551 			return (io);
3552 		} else
3553 			ctl_pool_release(npool);
3554 	}
3555 
3556 	/* Drop the spinlock before we malloc */
3557 	mtx_unlock(&ctl_softc->ctl_lock);
3558 
3559 	/*
3560 	 * The emergency pool (if it exists) didn't have one, so try an
3561 	 * atomic (i.e. nonblocking) malloc and see if we get lucky.
3562 	 */
3563 	io = (union ctl_io *)malloc(sizeof(*io), M_CTL, M_NOWAIT);
3564 	if (io != NULL) {
3565 		/*
3566 		 * If the emergency pool exists but is empty, add this
3567 		 * ctl_io to its list when it gets freed.
3568 		 */
3569 		if (emergency_pool != NULL) {
3570 			mtx_lock(&ctl_softc->ctl_lock);
3571 			if (ctl_pool_acquire(emergency_pool) == 0) {
3572 				io->io_hdr.pool = emergency_pool;
3573 				emergency_pool->total_ctl_io++;
3574 				/*
3575 				 * Need to bump this, otherwise
3576 				 * total_allocated and total_freed won't
3577 				 * match when we no longer have anything
3578 				 * outstanding.
3579 				 */
3580 				emergency_pool->total_allocated++;
3581 			}
3582 			mtx_unlock(&ctl_softc->ctl_lock);
3583 		} else
3584 			io->io_hdr.pool = NULL;
3585 	}
3586 
3587 	return (io);
3588 }
3589 
3590 static void
3591 ctl_free_io_internal(union ctl_io *io, int have_lock)
3592 {
3593 	if (io == NULL)
3594 		return;
3595 
3596 	/*
3597 	 * If this ctl_io has a pool, return it to that pool.
3598 	 */
3599 	if (io->io_hdr.pool != NULL) {
3600 		struct ctl_io_pool *pool;
3601 #if 0
3602 		struct ctl_softc *ctl_softc;
3603 		union ctl_io *tmp_io;
3604 		unsigned long xflags;
3605 		int i;
3606 
3607 		ctl_softc = control_softc;
3608 #endif
3609 
3610 		pool = (struct ctl_io_pool *)io->io_hdr.pool;
3611 
3612 		if (have_lock == 0)
3613 			mtx_lock(&pool->ctl_softc->ctl_lock);
3614 #if 0
3615 		save_flags(xflags);
3616 
3617 		for (i = 0, tmp_io = (union ctl_io *)STAILQ_FIRST(
3618 		     &ctl_softc->task_queue); tmp_io != NULL; i++,
3619 		     tmp_io = (union ctl_io *)STAILQ_NEXT(&tmp_io->io_hdr,
3620 		     links)) {
3621 			if (tmp_io == io) {
3622 				printf("%s: %p is still on the task queue!\n",
3623 				       __func__, tmp_io);
3624 				printf("%s: (%d): type %d "
3625 				       "msg %d cdb %x iptl: "
3626 				       "%d:%d:%d:%d tag 0x%04x "
3627 				       "flg %#lx\n",
3628 					__func__, i,
3629 					tmp_io->io_hdr.io_type,
3630 					tmp_io->io_hdr.msg_type,
3631 					tmp_io->scsiio.cdb[0],
3632 					tmp_io->io_hdr.nexus.initid.id,
3633 					tmp_io->io_hdr.nexus.targ_port,
3634 					tmp_io->io_hdr.nexus.targ_target.id,
3635 					tmp_io->io_hdr.nexus.targ_lun,
3636 					(tmp_io->io_hdr.io_type ==
3637 					CTL_IO_TASK) ?
3638 					tmp_io->taskio.tag_num :
3639 					tmp_io->scsiio.tag_num,
3640 					xflags);
3641 				panic("I/O still on the task queue!");
3642 			}
3643 		}
3644 #endif
3645 		io->io_hdr.io_type = 0xff;
3646 		STAILQ_INSERT_TAIL(&pool->free_queue, &io->io_hdr, links);
3647 		pool->total_freed++;
3648 		pool->free_ctl_io++;
3649 		ctl_pool_release(pool);
3650 		if (have_lock == 0)
3651 			mtx_unlock(&pool->ctl_softc->ctl_lock);
3652 	} else {
3653 		/*
3654 		 * Otherwise, just free it.  We probably malloced it and
3655 		 * the emergency pool wasn't available.
3656 		 */
3657 		free(io, M_CTL);
3658 	}
3659 
3660 }
3661 
3662 void
3663 ctl_free_io(union ctl_io *io)
3664 {
3665 	ctl_free_io_internal(io, /*have_lock*/ 0);
3666 }
3667 
3668 void
3669 ctl_zero_io(union ctl_io *io)
3670 {
3671 	void *pool_ref;
3672 
3673 	if (io == NULL)
3674 		return;
3675 
3676 	/*
3677 	 * May need to preserve linked list pointers at some point too.
3678 	 */
3679 	pool_ref = io->io_hdr.pool;
3680 
3681 	memset(io, 0, sizeof(*io));
3682 
3683 	io->io_hdr.pool = pool_ref;
3684 }
3685 
3686 /*
3687  * This routine is currently used for internal copies of ctl_ios that need
3688  * to persist for some reason after we've already returned status to the
3689  * FETD.  (Thus the flag set.)
3690  *
3691  * XXX XXX
3692  * Note that this makes a blind copy of all fields in the ctl_io, except
3693  * for the pool reference.  This includes any memory that has been
3694  * allocated!  That memory will no longer be valid after done has been
3695  * called, so this would be VERY DANGEROUS for command that actually does
3696  * any reads or writes.  Right now (11/7/2005), this is only used for immediate
3697  * start and stop commands, which don't transfer any data, so this is not a
3698  * problem.  If it is used for anything else, the caller would also need to
3699  * allocate data buffer space and this routine would need to be modified to
3700  * copy the data buffer(s) as well.
3701  */
3702 void
3703 ctl_copy_io(union ctl_io *src, union ctl_io *dest)
3704 {
3705 	void *pool_ref;
3706 
3707 	if ((src == NULL)
3708 	 || (dest == NULL))
3709 		return;
3710 
3711 	/*
3712 	 * May need to preserve linked list pointers at some point too.
3713 	 */
3714 	pool_ref = dest->io_hdr.pool;
3715 
3716 	memcpy(dest, src, ctl_min(sizeof(*src), sizeof(*dest)));
3717 
3718 	dest->io_hdr.pool = pool_ref;
3719 	/*
3720 	 * We need to know that this is an internal copy, and doesn't need
3721 	 * to get passed back to the FETD that allocated it.
3722 	 */
3723 	dest->io_hdr.flags |= CTL_FLAG_INT_COPY;
3724 }
3725 
3726 #ifdef NEEDTOPORT
3727 static void
3728 ctl_update_power_subpage(struct copan_power_subpage *page)
3729 {
3730 	int num_luns, num_partitions, config_type;
3731 	struct ctl_softc *softc;
3732 	cs_BOOL_t aor_present, shelf_50pct_power;
3733 	cs_raidset_personality_t rs_type;
3734 	int max_active_luns;
3735 
3736 	softc = control_softc;
3737 
3738 	/* subtract out the processor LUN */
3739 	num_luns = softc->num_luns - 1;
3740 	/*
3741 	 * Default to 7 LUNs active, which was the only number we allowed
3742 	 * in the past.
3743 	 */
3744 	max_active_luns = 7;
3745 
3746 	num_partitions = config_GetRsPartitionInfo();
3747 	config_type = config_GetConfigType();
3748 	shelf_50pct_power = config_GetShelfPowerMode();
3749 	aor_present = config_IsAorRsPresent();
3750 
3751 	rs_type = ddb_GetRsRaidType(1);
3752 	if ((rs_type != CS_RAIDSET_PERSONALITY_RAID5)
3753 	 && (rs_type != CS_RAIDSET_PERSONALITY_RAID1)) {
3754 		EPRINT(0, "Unsupported RS type %d!", rs_type);
3755 	}
3756 
3757 
3758 	page->total_luns = num_luns;
3759 
3760 	switch (config_type) {
3761 	case 40:
3762 		/*
3763 		 * In a 40 drive configuration, it doesn't matter what DC
3764 		 * cards we have, whether we have AOR enabled or not,
3765 		 * partitioning or not, or what type of RAIDset we have.
3766 		 * In that scenario, we can power up every LUN we present
3767 		 * to the user.
3768 		 */
3769 		max_active_luns = num_luns;
3770 
3771 		break;
3772 	case 64:
3773 		if (shelf_50pct_power == CS_FALSE) {
3774 			/* 25% power */
3775 			if (aor_present == CS_TRUE) {
3776 				if (rs_type ==
3777 				     CS_RAIDSET_PERSONALITY_RAID5) {
3778 					max_active_luns = 7;
3779 				} else if (rs_type ==
3780 					 CS_RAIDSET_PERSONALITY_RAID1){
3781 					max_active_luns = 14;
3782 				} else {
3783 					/* XXX KDM now what?? */
3784 				}
3785 			} else {
3786 				if (rs_type ==
3787 				     CS_RAIDSET_PERSONALITY_RAID5) {
3788 					max_active_luns = 8;
3789 				} else if (rs_type ==
3790 					 CS_RAIDSET_PERSONALITY_RAID1){
3791 					max_active_luns = 16;
3792 				} else {
3793 					/* XXX KDM now what?? */
3794 				}
3795 			}
3796 		} else {
3797 			/* 50% power */
3798 			/*
3799 			 * With 50% power in a 64 drive configuration, we
3800 			 * can power all LUNs we present.
3801 			 */
3802 			max_active_luns = num_luns;
3803 		}
3804 		break;
3805 	case 112:
3806 		if (shelf_50pct_power == CS_FALSE) {
3807 			/* 25% power */
3808 			if (aor_present == CS_TRUE) {
3809 				if (rs_type ==
3810 				     CS_RAIDSET_PERSONALITY_RAID5) {
3811 					max_active_luns = 7;
3812 				} else if (rs_type ==
3813 					 CS_RAIDSET_PERSONALITY_RAID1){
3814 					max_active_luns = 14;
3815 				} else {
3816 					/* XXX KDM now what?? */
3817 				}
3818 			} else {
3819 				if (rs_type ==
3820 				     CS_RAIDSET_PERSONALITY_RAID5) {
3821 					max_active_luns = 8;
3822 				} else if (rs_type ==
3823 					 CS_RAIDSET_PERSONALITY_RAID1){
3824 					max_active_luns = 16;
3825 				} else {
3826 					/* XXX KDM now what?? */
3827 				}
3828 			}
3829 		} else {
3830 			/* 50% power */
3831 			if (aor_present == CS_TRUE) {
3832 				if (rs_type ==
3833 				     CS_RAIDSET_PERSONALITY_RAID5) {
3834 					max_active_luns = 14;
3835 				} else if (rs_type ==
3836 					 CS_RAIDSET_PERSONALITY_RAID1){
3837 					/*
3838 					 * We're assuming here that disk
3839 					 * caching is enabled, and so we're
3840 					 * able to power up half of each
3841 					 * LUN, and cache all writes.
3842 					 */
3843 					max_active_luns = num_luns;
3844 				} else {
3845 					/* XXX KDM now what?? */
3846 				}
3847 			} else {
3848 				if (rs_type ==
3849 				     CS_RAIDSET_PERSONALITY_RAID5) {
3850 					max_active_luns = 15;
3851 				} else if (rs_type ==
3852 					 CS_RAIDSET_PERSONALITY_RAID1){
3853 					max_active_luns = 30;
3854 				} else {
3855 					/* XXX KDM now what?? */
3856 				}
3857 			}
3858 		}
3859 		break;
3860 	default:
3861 		/*
3862 		 * In this case, we have an unknown configuration, so we
3863 		 * just use the default from above.
3864 		 */
3865 		break;
3866 	}
3867 
3868 	page->max_active_luns = max_active_luns;
3869 #if 0
3870 	printk("%s: total_luns = %d, max_active_luns = %d\n", __func__,
3871 	       page->total_luns, page->max_active_luns);
3872 #endif
3873 }
3874 #endif /* NEEDTOPORT */
3875 
3876 /*
3877  * This routine could be used in the future to load default and/or saved
3878  * mode page parameters for a particuar lun.
3879  */
3880 static int
3881 ctl_init_page_index(struct ctl_lun *lun)
3882 {
3883 	int i;
3884 	struct ctl_page_index *page_index;
3885 	struct ctl_softc *softc;
3886 
3887 	memcpy(&lun->mode_pages.index, page_index_template,
3888 	       sizeof(page_index_template));
3889 
3890 	softc = lun->ctl_softc;
3891 
3892 	for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
3893 
3894 		page_index = &lun->mode_pages.index[i];
3895 		/*
3896 		 * If this is a disk-only mode page, there's no point in
3897 		 * setting it up.  For some pages, we have to have some
3898 		 * basic information about the disk in order to calculate the
3899 		 * mode page data.
3900 		 */
3901 		if ((lun->be_lun->lun_type != T_DIRECT)
3902 		 && (page_index->page_flags & CTL_PAGE_FLAG_DISK_ONLY))
3903 			continue;
3904 
3905 		switch (page_index->page_code & SMPH_PC_MASK) {
3906 		case SMS_FORMAT_DEVICE_PAGE: {
3907 			struct scsi_format_page *format_page;
3908 
3909 			if (page_index->subpage != SMS_SUBPAGE_PAGE_0)
3910 				panic("subpage is incorrect!");
3911 
3912 			/*
3913 			 * Sectors per track are set above.  Bytes per
3914 			 * sector need to be set here on a per-LUN basis.
3915 			 */
3916 			memcpy(&lun->mode_pages.format_page[CTL_PAGE_CURRENT],
3917 			       &format_page_default,
3918 			       sizeof(format_page_default));
3919 			memcpy(&lun->mode_pages.format_page[
3920 			       CTL_PAGE_CHANGEABLE], &format_page_changeable,
3921 			       sizeof(format_page_changeable));
3922 			memcpy(&lun->mode_pages.format_page[CTL_PAGE_DEFAULT],
3923 			       &format_page_default,
3924 			       sizeof(format_page_default));
3925 			memcpy(&lun->mode_pages.format_page[CTL_PAGE_SAVED],
3926 			       &format_page_default,
3927 			       sizeof(format_page_default));
3928 
3929 			format_page = &lun->mode_pages.format_page[
3930 				CTL_PAGE_CURRENT];
3931 			scsi_ulto2b(lun->be_lun->blocksize,
3932 				    format_page->bytes_per_sector);
3933 
3934 			format_page = &lun->mode_pages.format_page[
3935 				CTL_PAGE_DEFAULT];
3936 			scsi_ulto2b(lun->be_lun->blocksize,
3937 				    format_page->bytes_per_sector);
3938 
3939 			format_page = &lun->mode_pages.format_page[
3940 				CTL_PAGE_SAVED];
3941 			scsi_ulto2b(lun->be_lun->blocksize,
3942 				    format_page->bytes_per_sector);
3943 
3944 			page_index->page_data =
3945 				(uint8_t *)lun->mode_pages.format_page;
3946 			break;
3947 		}
3948 		case SMS_RIGID_DISK_PAGE: {
3949 			struct scsi_rigid_disk_page *rigid_disk_page;
3950 			uint32_t sectors_per_cylinder;
3951 			uint64_t cylinders;
3952 #ifndef	__XSCALE__
3953 			int shift;
3954 #endif /* !__XSCALE__ */
3955 
3956 			if (page_index->subpage != SMS_SUBPAGE_PAGE_0)
3957 				panic("invalid subpage value %d",
3958 				      page_index->subpage);
3959 
3960 			/*
3961 			 * Rotation rate and sectors per track are set
3962 			 * above.  We calculate the cylinders here based on
3963 			 * capacity.  Due to the number of heads and
3964 			 * sectors per track we're using, smaller arrays
3965 			 * may turn out to have 0 cylinders.  Linux and
3966 			 * FreeBSD don't pay attention to these mode pages
3967 			 * to figure out capacity, but Solaris does.  It
3968 			 * seems to deal with 0 cylinders just fine, and
3969 			 * works out a fake geometry based on the capacity.
3970 			 */
3971 			memcpy(&lun->mode_pages.rigid_disk_page[
3972 			       CTL_PAGE_CURRENT], &rigid_disk_page_default,
3973 			       sizeof(rigid_disk_page_default));
3974 			memcpy(&lun->mode_pages.rigid_disk_page[
3975 			       CTL_PAGE_CHANGEABLE],&rigid_disk_page_changeable,
3976 			       sizeof(rigid_disk_page_changeable));
3977 			memcpy(&lun->mode_pages.rigid_disk_page[
3978 			       CTL_PAGE_DEFAULT], &rigid_disk_page_default,
3979 			       sizeof(rigid_disk_page_default));
3980 			memcpy(&lun->mode_pages.rigid_disk_page[
3981 			       CTL_PAGE_SAVED], &rigid_disk_page_default,
3982 			       sizeof(rigid_disk_page_default));
3983 
3984 			sectors_per_cylinder = CTL_DEFAULT_SECTORS_PER_TRACK *
3985 				CTL_DEFAULT_HEADS;
3986 
3987 			/*
3988 			 * The divide method here will be more accurate,
3989 			 * probably, but results in floating point being
3990 			 * used in the kernel on i386 (__udivdi3()).  On the
3991 			 * XScale, though, __udivdi3() is implemented in
3992 			 * software.
3993 			 *
3994 			 * The shift method for cylinder calculation is
3995 			 * accurate if sectors_per_cylinder is a power of
3996 			 * 2.  Otherwise it might be slightly off -- you
3997 			 * might have a bit of a truncation problem.
3998 			 */
3999 #ifdef	__XSCALE__
4000 			cylinders = (lun->be_lun->maxlba + 1) /
4001 				sectors_per_cylinder;
4002 #else
4003 			for (shift = 31; shift > 0; shift--) {
4004 				if (sectors_per_cylinder & (1 << shift))
4005 					break;
4006 			}
4007 			cylinders = (lun->be_lun->maxlba + 1) >> shift;
4008 #endif
4009 
4010 			/*
4011 			 * We've basically got 3 bytes, or 24 bits for the
4012 			 * cylinder size in the mode page.  If we're over,
4013 			 * just round down to 2^24.
4014 			 */
4015 			if (cylinders > 0xffffff)
4016 				cylinders = 0xffffff;
4017 
4018 			rigid_disk_page = &lun->mode_pages.rigid_disk_page[
4019 				CTL_PAGE_CURRENT];
4020 			scsi_ulto3b(cylinders, rigid_disk_page->cylinders);
4021 
4022 			rigid_disk_page = &lun->mode_pages.rigid_disk_page[
4023 				CTL_PAGE_DEFAULT];
4024 			scsi_ulto3b(cylinders, rigid_disk_page->cylinders);
4025 
4026 			rigid_disk_page = &lun->mode_pages.rigid_disk_page[
4027 				CTL_PAGE_SAVED];
4028 			scsi_ulto3b(cylinders, rigid_disk_page->cylinders);
4029 
4030 			page_index->page_data =
4031 				(uint8_t *)lun->mode_pages.rigid_disk_page;
4032 			break;
4033 		}
4034 		case SMS_CACHING_PAGE: {
4035 
4036 			if (page_index->subpage != SMS_SUBPAGE_PAGE_0)
4037 				panic("invalid subpage value %d",
4038 				      page_index->subpage);
4039 			/*
4040 			 * Defaults should be okay here, no calculations
4041 			 * needed.
4042 			 */
4043 			memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT],
4044 			       &caching_page_default,
4045 			       sizeof(caching_page_default));
4046 			memcpy(&lun->mode_pages.caching_page[
4047 			       CTL_PAGE_CHANGEABLE], &caching_page_changeable,
4048 			       sizeof(caching_page_changeable));
4049 			memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT],
4050 			       &caching_page_default,
4051 			       sizeof(caching_page_default));
4052 			memcpy(&lun->mode_pages.caching_page[CTL_PAGE_SAVED],
4053 			       &caching_page_default,
4054 			       sizeof(caching_page_default));
4055 			page_index->page_data =
4056 				(uint8_t *)lun->mode_pages.caching_page;
4057 			break;
4058 		}
4059 		case SMS_CONTROL_MODE_PAGE: {
4060 
4061 			if (page_index->subpage != SMS_SUBPAGE_PAGE_0)
4062 				panic("invalid subpage value %d",
4063 				      page_index->subpage);
4064 
4065 			/*
4066 			 * Defaults should be okay here, no calculations
4067 			 * needed.
4068 			 */
4069 			memcpy(&lun->mode_pages.control_page[CTL_PAGE_CURRENT],
4070 			       &control_page_default,
4071 			       sizeof(control_page_default));
4072 			memcpy(&lun->mode_pages.control_page[
4073 			       CTL_PAGE_CHANGEABLE], &control_page_changeable,
4074 			       sizeof(control_page_changeable));
4075 			memcpy(&lun->mode_pages.control_page[CTL_PAGE_DEFAULT],
4076 			       &control_page_default,
4077 			       sizeof(control_page_default));
4078 			memcpy(&lun->mode_pages.control_page[CTL_PAGE_SAVED],
4079 			       &control_page_default,
4080 			       sizeof(control_page_default));
4081 			page_index->page_data =
4082 				(uint8_t *)lun->mode_pages.control_page;
4083 			break;
4084 
4085 		}
4086 		case SMS_VENDOR_SPECIFIC_PAGE:{
4087 			switch (page_index->subpage) {
4088 			case PWR_SUBPAGE_CODE: {
4089 				struct copan_power_subpage *current_page,
4090 							   *saved_page;
4091 
4092 				memcpy(&lun->mode_pages.power_subpage[
4093 				       CTL_PAGE_CURRENT],
4094 				       &power_page_default,
4095 				       sizeof(power_page_default));
4096 				memcpy(&lun->mode_pages.power_subpage[
4097 				       CTL_PAGE_CHANGEABLE],
4098 				       &power_page_changeable,
4099 				       sizeof(power_page_changeable));
4100 				memcpy(&lun->mode_pages.power_subpage[
4101 				       CTL_PAGE_DEFAULT],
4102 				       &power_page_default,
4103 				       sizeof(power_page_default));
4104 				memcpy(&lun->mode_pages.power_subpage[
4105 				       CTL_PAGE_SAVED],
4106 				       &power_page_default,
4107 				       sizeof(power_page_default));
4108 				page_index->page_data =
4109 				    (uint8_t *)lun->mode_pages.power_subpage;
4110 
4111 				current_page = (struct copan_power_subpage *)
4112 					(page_index->page_data +
4113 					 (page_index->page_len *
4114 					  CTL_PAGE_CURRENT));
4115 			        saved_page = (struct copan_power_subpage *)
4116 				        (page_index->page_data +
4117 					 (page_index->page_len *
4118 					  CTL_PAGE_SAVED));
4119 				break;
4120 			}
4121 			case APS_SUBPAGE_CODE: {
4122 				struct copan_aps_subpage *current_page,
4123 							 *saved_page;
4124 
4125 				// This gets set multiple times but
4126 				// it should always be the same. It's
4127 				// only done during init so who cares.
4128 				index_to_aps_page = i;
4129 
4130 				memcpy(&lun->mode_pages.aps_subpage[
4131 				       CTL_PAGE_CURRENT],
4132 				       &aps_page_default,
4133 				       sizeof(aps_page_default));
4134 				memcpy(&lun->mode_pages.aps_subpage[
4135 				       CTL_PAGE_CHANGEABLE],
4136 				       &aps_page_changeable,
4137 				       sizeof(aps_page_changeable));
4138 				memcpy(&lun->mode_pages.aps_subpage[
4139 				       CTL_PAGE_DEFAULT],
4140 				       &aps_page_default,
4141 				       sizeof(aps_page_default));
4142 				memcpy(&lun->mode_pages.aps_subpage[
4143 				       CTL_PAGE_SAVED],
4144 				       &aps_page_default,
4145 				       sizeof(aps_page_default));
4146 				page_index->page_data =
4147 					(uint8_t *)lun->mode_pages.aps_subpage;
4148 
4149 				current_page = (struct copan_aps_subpage *)
4150 					(page_index->page_data +
4151 					 (page_index->page_len *
4152 					  CTL_PAGE_CURRENT));
4153 				saved_page = (struct copan_aps_subpage *)
4154 					(page_index->page_data +
4155 					 (page_index->page_len *
4156 					  CTL_PAGE_SAVED));
4157 				break;
4158 			}
4159 			case DBGCNF_SUBPAGE_CODE: {
4160 				struct copan_debugconf_subpage *current_page,
4161 							       *saved_page;
4162 
4163 				memcpy(&lun->mode_pages.debugconf_subpage[
4164 				       CTL_PAGE_CURRENT],
4165 				       &debugconf_page_default,
4166 				       sizeof(debugconf_page_default));
4167 				memcpy(&lun->mode_pages.debugconf_subpage[
4168 				       CTL_PAGE_CHANGEABLE],
4169 				       &debugconf_page_changeable,
4170 				       sizeof(debugconf_page_changeable));
4171 				memcpy(&lun->mode_pages.debugconf_subpage[
4172 				       CTL_PAGE_DEFAULT],
4173 				       &debugconf_page_default,
4174 				       sizeof(debugconf_page_default));
4175 				memcpy(&lun->mode_pages.debugconf_subpage[
4176 				       CTL_PAGE_SAVED],
4177 				       &debugconf_page_default,
4178 				       sizeof(debugconf_page_default));
4179 				page_index->page_data =
4180 					(uint8_t *)lun->mode_pages.debugconf_subpage;
4181 
4182 				current_page = (struct copan_debugconf_subpage *)
4183 					(page_index->page_data +
4184 					 (page_index->page_len *
4185 					  CTL_PAGE_CURRENT));
4186 				saved_page = (struct copan_debugconf_subpage *)
4187 					(page_index->page_data +
4188 					 (page_index->page_len *
4189 					  CTL_PAGE_SAVED));
4190 				break;
4191 			}
4192 			default:
4193 				panic("invalid subpage value %d",
4194 				      page_index->subpage);
4195 				break;
4196 			}
4197    			break;
4198 		}
4199 		default:
4200 			panic("invalid page value %d",
4201 			      page_index->page_code & SMPH_PC_MASK);
4202 			break;
4203     	}
4204 	}
4205 
4206 	return (CTL_RETVAL_COMPLETE);
4207 }
4208 
4209 /*
4210  * LUN allocation.
4211  *
4212  * Requirements:
4213  * - caller allocates and zeros LUN storage, or passes in a NULL LUN if he
4214  *   wants us to allocate the LUN and he can block.
4215  * - ctl_softc is always set
4216  * - be_lun is set if the LUN has a backend (needed for disk LUNs)
4217  *
4218  * Returns 0 for success, non-zero (errno) for failure.
4219  */
4220 static int
4221 ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun,
4222 	      struct ctl_be_lun *const be_lun, struct ctl_id target_id)
4223 {
4224 	struct ctl_lun *nlun, *lun;
4225 	struct ctl_frontend *fe;
4226 	int lun_number, i, lun_malloced;
4227 
4228 	if (be_lun == NULL)
4229 		return (EINVAL);
4230 
4231 	/*
4232 	 * We currently only support Direct Access or Processor LUN types.
4233 	 */
4234 	switch (be_lun->lun_type) {
4235 	case T_DIRECT:
4236 		break;
4237 	case T_PROCESSOR:
4238 		break;
4239 	case T_SEQUENTIAL:
4240 	case T_CHANGER:
4241 	default:
4242 		be_lun->lun_config_status(be_lun->be_lun,
4243 					  CTL_LUN_CONFIG_FAILURE);
4244 		break;
4245 	}
4246 	if (ctl_lun == NULL) {
4247 		lun = malloc(sizeof(*lun), M_CTL, M_WAITOK);
4248 		lun_malloced = 1;
4249 	} else {
4250 		lun_malloced = 0;
4251 		lun = ctl_lun;
4252 	}
4253 
4254 	memset(lun, 0, sizeof(*lun));
4255 	if (lun_malloced)
4256 		lun->flags = CTL_LUN_MALLOCED;
4257 
4258 	mtx_lock(&ctl_softc->ctl_lock);
4259 	/*
4260 	 * See if the caller requested a particular LUN number.  If so, see
4261 	 * if it is available.  Otherwise, allocate the first available LUN.
4262 	 */
4263 	if (be_lun->flags & CTL_LUN_FLAG_ID_REQ) {
4264 		if ((be_lun->req_lun_id > (CTL_MAX_LUNS - 1))
4265 		 || (ctl_is_set(ctl_softc->ctl_lun_mask, be_lun->req_lun_id))) {
4266 			mtx_unlock(&ctl_softc->ctl_lock);
4267 			if (be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) {
4268 				printf("ctl: requested LUN ID %d is higher "
4269 				       "than CTL_MAX_LUNS - 1 (%d)\n",
4270 				       be_lun->req_lun_id, CTL_MAX_LUNS - 1);
4271 			} else {
4272 				/*
4273 				 * XXX KDM return an error, or just assign
4274 				 * another LUN ID in this case??
4275 				 */
4276 				printf("ctl: requested LUN ID %d is already "
4277 				       "in use\n", be_lun->req_lun_id);
4278 			}
4279 			if (lun->flags & CTL_LUN_MALLOCED)
4280 				free(lun, M_CTL);
4281 			be_lun->lun_config_status(be_lun->be_lun,
4282 						  CTL_LUN_CONFIG_FAILURE);
4283 			return (ENOSPC);
4284 		}
4285 		lun_number = be_lun->req_lun_id;
4286 	} else {
4287 		lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, CTL_MAX_LUNS);
4288 		if (lun_number == -1) {
4289 			mtx_unlock(&ctl_softc->ctl_lock);
4290 			printf("ctl: can't allocate LUN on target %ju, out of "
4291 			       "LUNs\n", (uintmax_t)target_id.id);
4292 			if (lun->flags & CTL_LUN_MALLOCED)
4293 				free(lun, M_CTL);
4294 			be_lun->lun_config_status(be_lun->be_lun,
4295 						  CTL_LUN_CONFIG_FAILURE);
4296 			return (ENOSPC);
4297 		}
4298 	}
4299 	ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number);
4300 
4301 	lun->target = target_id;
4302 	lun->lun = lun_number;
4303 	lun->be_lun = be_lun;
4304 	/*
4305 	 * The processor LUN is always enabled.  Disk LUNs come on line
4306 	 * disabled, and must be enabled by the backend.
4307 	 */
4308 	lun->flags |= CTL_LUN_DISABLED;
4309 	lun->backend = be_lun->be;
4310 	be_lun->ctl_lun = lun;
4311 	be_lun->lun_id = lun_number;
4312 	atomic_add_int(&be_lun->be->num_luns, 1);
4313 	if (be_lun->flags & CTL_LUN_FLAG_POWERED_OFF)
4314 		lun->flags |= CTL_LUN_STOPPED;
4315 
4316 	if (be_lun->flags & CTL_LUN_FLAG_INOPERABLE)
4317 		lun->flags |= CTL_LUN_INOPERABLE;
4318 
4319 	if (be_lun->flags & CTL_LUN_FLAG_PRIMARY)
4320 		lun->flags |= CTL_LUN_PRIMARY_SC;
4321 
4322 	lun->ctl_softc = ctl_softc;
4323 	TAILQ_INIT(&lun->ooa_queue);
4324 	TAILQ_INIT(&lun->blocked_queue);
4325 	STAILQ_INIT(&lun->error_list);
4326 
4327 	/*
4328 	 * Initialize the mode page index.
4329 	 */
4330 	ctl_init_page_index(lun);
4331 
4332 	/*
4333 	 * Set the poweron UA for all initiators on this LUN only.
4334 	 */
4335 	for (i = 0; i < CTL_MAX_INITIATORS; i++)
4336 		lun->pending_sense[i].ua_pending = CTL_UA_POWERON;
4337 
4338 	/*
4339 	 * Now, before we insert this lun on the lun list, set the lun
4340 	 * inventory changed UA for all other luns.
4341 	 */
4342 	STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) {
4343 		for (i = 0; i < CTL_MAX_INITIATORS; i++) {
4344 			nlun->pending_sense[i].ua_pending |= CTL_UA_LUN_CHANGE;
4345 		}
4346 	}
4347 
4348 	STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links);
4349 
4350 	ctl_softc->ctl_luns[lun_number] = lun;
4351 
4352 	ctl_softc->num_luns++;
4353 
4354 	/* Setup statistics gathering */
4355 	lun->stats.device_type = be_lun->lun_type;
4356 	lun->stats.lun_number = lun_number;
4357 	if (lun->stats.device_type == T_DIRECT)
4358 		lun->stats.blocksize = be_lun->blocksize;
4359 	else
4360 		lun->stats.flags = CTL_LUN_STATS_NO_BLOCKSIZE;
4361 	for (i = 0;i < CTL_MAX_PORTS;i++)
4362 		lun->stats.ports[i].targ_port = i;
4363 
4364 	mtx_unlock(&ctl_softc->ctl_lock);
4365 
4366 	lun->be_lun->lun_config_status(lun->be_lun->be_lun, CTL_LUN_CONFIG_OK);
4367 
4368 	/*
4369 	 * Run through each registered FETD and bring it online if it isn't
4370 	 * already.  Enable the target ID if it hasn't been enabled, and
4371 	 * enable this particular LUN.
4372 	 */
4373 	STAILQ_FOREACH(fe, &ctl_softc->fe_list, links) {
4374 		int retval;
4375 
4376 		/*
4377 		 * XXX KDM this only works for ONE TARGET ID.  We'll need
4378 		 * to do things differently if we go to a multiple target
4379 		 * ID scheme.
4380 		 */
4381 		if ((fe->status & CTL_PORT_STATUS_TARG_ONLINE) == 0) {
4382 
4383 			retval = fe->targ_enable(fe->targ_lun_arg, target_id);
4384 			if (retval != 0) {
4385 				printf("ctl_alloc_lun: FETD %s port %d "
4386 				       "returned error %d for targ_enable on "
4387 				       "target %ju\n", fe->port_name,
4388 				       fe->targ_port, retval,
4389 				       (uintmax_t)target_id.id);
4390 			} else
4391 				fe->status |= CTL_PORT_STATUS_TARG_ONLINE;
4392 		}
4393 
4394 		retval = fe->lun_enable(fe->targ_lun_arg, target_id,lun_number);
4395 		if (retval != 0) {
4396 			printf("ctl_alloc_lun: FETD %s port %d returned error "
4397 			       "%d for lun_enable on target %ju lun %d\n",
4398 			       fe->port_name, fe->targ_port, retval,
4399 			       (uintmax_t)target_id.id, lun_number);
4400 		} else
4401 			fe->status |= CTL_PORT_STATUS_LUN_ONLINE;
4402 	}
4403 	return (0);
4404 }
4405 
4406 /*
4407  * Delete a LUN.
4408  * Assumptions:
4409  * - caller holds ctl_softc->ctl_lock.
4410  * - LUN has already been marked invalid and any pending I/O has been taken
4411  *   care of.
4412  */
4413 static int
4414 ctl_free_lun(struct ctl_lun *lun)
4415 {
4416 	struct ctl_softc *softc;
4417 #if 0
4418 	struct ctl_frontend *fe;
4419 #endif
4420 	struct ctl_lun *nlun;
4421 	union ctl_io *io, *next_io;
4422 	int i;
4423 
4424 	softc = lun->ctl_softc;
4425 
4426 	STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links);
4427 
4428 	ctl_clear_mask(softc->ctl_lun_mask, lun->lun);
4429 
4430 	softc->ctl_luns[lun->lun] = NULL;
4431 
4432 	if (TAILQ_FIRST(&lun->ooa_queue) != NULL) {
4433 		printf("ctl_free_lun: aieee!! freeing a LUN with "
4434 		       "outstanding I/O!!\n");
4435 	}
4436 
4437 	/*
4438 	 * If we have anything pending on the RtR queue, remove it.
4439 	 */
4440 	for (io = (union ctl_io *)STAILQ_FIRST(&softc->rtr_queue); io != NULL;
4441 	     io = next_io) {
4442 		next_io = (union ctl_io *)STAILQ_NEXT(&io->io_hdr, links);
4443 		if ((io->io_hdr.nexus.targ_target.id == lun->target.id)
4444 		 && (io->io_hdr.nexus.targ_lun == lun->lun))
4445 			STAILQ_REMOVE(&softc->rtr_queue, &io->io_hdr,
4446 				      ctl_io_hdr, links);
4447 	}
4448 
4449 	/*
4450 	 * Then remove everything from the blocked queue.
4451 	 */
4452 	for (io = (union ctl_io *)TAILQ_FIRST(&lun->blocked_queue); io != NULL;
4453 	     io = next_io) {
4454 		next_io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr,blocked_links);
4455 		TAILQ_REMOVE(&lun->blocked_queue, &io->io_hdr, blocked_links);
4456 		io->io_hdr.flags &= ~CTL_FLAG_BLOCKED;
4457 	}
4458 
4459 	/*
4460 	 * Now clear out the OOA queue, and free all the I/O.
4461 	 * XXX KDM should we notify the FETD here?  We probably need to
4462 	 * quiesce the LUN before deleting it.
4463 	 */
4464 	for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); io != NULL;
4465 	     io = next_io) {
4466 		next_io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, ooa_links);
4467 		TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links);
4468 		ctl_free_io_internal(io, /*have_lock*/ 1);
4469 	}
4470 
4471 	softc->num_luns--;
4472 
4473 	/*
4474 	 * XXX KDM this scheme only works for a single target/multiple LUN
4475 	 * setup.  It needs to be revamped for a multiple target scheme.
4476 	 *
4477 	 * XXX KDM this results in fe->lun_disable() getting called twice,
4478 	 * once when ctl_disable_lun() is called, and a second time here.
4479 	 * We really need to re-think the LUN disable semantics.  There
4480 	 * should probably be several steps/levels to LUN removal:
4481 	 *  - disable
4482 	 *  - invalidate
4483 	 *  - free
4484  	 *
4485 	 * Right now we only have a disable method when communicating to
4486 	 * the front end ports, at least for individual LUNs.
4487 	 */
4488 #if 0
4489 	STAILQ_FOREACH(fe, &softc->fe_list, links) {
4490 		int retval;
4491 
4492 		retval = fe->lun_disable(fe->targ_lun_arg, lun->target,
4493 					 lun->lun);
4494 		if (retval != 0) {
4495 			printf("ctl_free_lun: FETD %s port %d returned error "
4496 			       "%d for lun_disable on target %ju lun %jd\n",
4497 			       fe->port_name, fe->targ_port, retval,
4498 			       (uintmax_t)lun->target.id, (intmax_t)lun->lun);
4499 		}
4500 
4501 		if (STAILQ_FIRST(&softc->lun_list) == NULL) {
4502 			fe->status &= ~CTL_PORT_STATUS_LUN_ONLINE;
4503 
4504 			retval = fe->targ_disable(fe->targ_lun_arg,lun->target);
4505 			if (retval != 0) {
4506 				printf("ctl_free_lun: FETD %s port %d "
4507 				       "returned error %d for targ_disable on "
4508 				       "target %ju\n", fe->port_name,
4509 				       fe->targ_port, retval,
4510 				       (uintmax_t)lun->target.id);
4511 			} else
4512 				fe->status &= ~CTL_PORT_STATUS_TARG_ONLINE;
4513 
4514 			if ((fe->status & CTL_PORT_STATUS_TARG_ONLINE) != 0)
4515 				continue;
4516 
4517 #if 0
4518 			fe->port_offline(fe->onoff_arg);
4519 			fe->status &= ~CTL_PORT_STATUS_ONLINE;
4520 #endif
4521 		}
4522 	}
4523 #endif
4524 
4525 	/*
4526 	 * Tell the backend to free resources, if this LUN has a backend.
4527 	 */
4528 	atomic_subtract_int(&lun->be_lun->be->num_luns, 1);
4529 	lun->be_lun->lun_shutdown(lun->be_lun->be_lun);
4530 
4531 	if (lun->flags & CTL_LUN_MALLOCED)
4532 		free(lun, M_CTL);
4533 
4534 	STAILQ_FOREACH(nlun, &softc->lun_list, links) {
4535 		for (i = 0; i < CTL_MAX_INITIATORS; i++) {
4536 			nlun->pending_sense[i].ua_pending |= CTL_UA_LUN_CHANGE;
4537 		}
4538 	}
4539 
4540 	return (0);
4541 }
4542 
4543 static void
4544 ctl_create_lun(struct ctl_be_lun *be_lun)
4545 {
4546 	struct ctl_softc *ctl_softc;
4547 
4548 	ctl_softc = control_softc;
4549 
4550 	/*
4551 	 * ctl_alloc_lun() should handle all potential failure cases.
4552 	 */
4553 	ctl_alloc_lun(ctl_softc, NULL, be_lun, ctl_softc->target);
4554 }
4555 
4556 int
4557 ctl_add_lun(struct ctl_be_lun *be_lun)
4558 {
4559 	struct ctl_softc *ctl_softc;
4560 
4561 	ctl_softc = control_softc;
4562 
4563 	mtx_lock(&ctl_softc->ctl_lock);
4564 	STAILQ_INSERT_TAIL(&ctl_softc->pending_lun_queue, be_lun, links);
4565 	mtx_unlock(&ctl_softc->ctl_lock);
4566 
4567 	ctl_wakeup_thread();
4568 
4569 	return (0);
4570 }
4571 
4572 int
4573 ctl_enable_lun(struct ctl_be_lun *be_lun)
4574 {
4575 	struct ctl_softc *ctl_softc;
4576 	struct ctl_frontend *fe, *nfe;
4577 	struct ctl_lun *lun;
4578 	int retval;
4579 
4580 	ctl_softc = control_softc;
4581 
4582 	lun = (struct ctl_lun *)be_lun->ctl_lun;
4583 
4584 	mtx_lock(&ctl_softc->ctl_lock);
4585 	if ((lun->flags & CTL_LUN_DISABLED) == 0) {
4586 		/*
4587 		 * eh?  Why did we get called if the LUN is already
4588 		 * enabled?
4589 		 */
4590 		mtx_unlock(&ctl_softc->ctl_lock);
4591 		return (0);
4592 	}
4593 	lun->flags &= ~CTL_LUN_DISABLED;
4594 
4595 	for (fe = STAILQ_FIRST(&ctl_softc->fe_list); fe != NULL; fe = nfe) {
4596 		nfe = STAILQ_NEXT(fe, links);
4597 
4598 		/*
4599 		 * Drop the lock while we call the FETD's enable routine.
4600 		 * This can lead to a callback into CTL (at least in the
4601 		 * case of the internal initiator frontend.
4602 		 */
4603 		mtx_unlock(&ctl_softc->ctl_lock);
4604 		retval = fe->lun_enable(fe->targ_lun_arg, lun->target,lun->lun);
4605 		mtx_lock(&ctl_softc->ctl_lock);
4606 		if (retval != 0) {
4607 			printf("%s: FETD %s port %d returned error "
4608 			       "%d for lun_enable on target %ju lun %jd\n",
4609 			       __func__, fe->port_name, fe->targ_port, retval,
4610 			       (uintmax_t)lun->target.id, (intmax_t)lun->lun);
4611 		}
4612 #if 0
4613 		 else {
4614             /* NOTE:  TODO:  why does lun enable affect port status? */
4615 			fe->status |= CTL_PORT_STATUS_LUN_ONLINE;
4616 		}
4617 #endif
4618 	}
4619 
4620 	mtx_unlock(&ctl_softc->ctl_lock);
4621 
4622 	return (0);
4623 }
4624 
4625 int
4626 ctl_disable_lun(struct ctl_be_lun *be_lun)
4627 {
4628 	struct ctl_softc *ctl_softc;
4629 	struct ctl_frontend *fe;
4630 	struct ctl_lun *lun;
4631 	int retval;
4632 
4633 	ctl_softc = control_softc;
4634 
4635 	lun = (struct ctl_lun *)be_lun->ctl_lun;
4636 
4637 	mtx_lock(&ctl_softc->ctl_lock);
4638 
4639 	if (lun->flags & CTL_LUN_DISABLED) {
4640 		mtx_unlock(&ctl_softc->ctl_lock);
4641 		return (0);
4642 	}
4643 	lun->flags |= CTL_LUN_DISABLED;
4644 
4645 	STAILQ_FOREACH(fe, &ctl_softc->fe_list, links) {
4646 		mtx_unlock(&ctl_softc->ctl_lock);
4647 		/*
4648 		 * Drop the lock before we call the frontend's disable
4649 		 * routine, to avoid lock order reversals.
4650 		 *
4651 		 * XXX KDM what happens if the frontend list changes while
4652 		 * we're traversing it?  It's unlikely, but should be handled.
4653 		 */
4654 		retval = fe->lun_disable(fe->targ_lun_arg, lun->target,
4655 					 lun->lun);
4656 		mtx_lock(&ctl_softc->ctl_lock);
4657 		if (retval != 0) {
4658 			printf("ctl_alloc_lun: FETD %s port %d returned error "
4659 			       "%d for lun_disable on target %ju lun %jd\n",
4660 			       fe->port_name, fe->targ_port, retval,
4661 			       (uintmax_t)lun->target.id, (intmax_t)lun->lun);
4662 		}
4663 	}
4664 
4665 	mtx_unlock(&ctl_softc->ctl_lock);
4666 
4667 	return (0);
4668 }
4669 
4670 int
4671 ctl_start_lun(struct ctl_be_lun *be_lun)
4672 {
4673 	struct ctl_softc *ctl_softc;
4674 	struct ctl_lun *lun;
4675 
4676 	ctl_softc = control_softc;
4677 
4678 	lun = (struct ctl_lun *)be_lun->ctl_lun;
4679 
4680 	mtx_lock(&ctl_softc->ctl_lock);
4681 	lun->flags &= ~CTL_LUN_STOPPED;
4682 	mtx_unlock(&ctl_softc->ctl_lock);
4683 
4684 	return (0);
4685 }
4686 
4687 int
4688 ctl_stop_lun(struct ctl_be_lun *be_lun)
4689 {
4690 	struct ctl_softc *ctl_softc;
4691 	struct ctl_lun *lun;
4692 
4693 	ctl_softc = control_softc;
4694 
4695 	lun = (struct ctl_lun *)be_lun->ctl_lun;
4696 
4697 	mtx_lock(&ctl_softc->ctl_lock);
4698 	lun->flags |= CTL_LUN_STOPPED;
4699 	mtx_unlock(&ctl_softc->ctl_lock);
4700 
4701 	return (0);
4702 }
4703 
4704 int
4705 ctl_lun_offline(struct ctl_be_lun *be_lun)
4706 {
4707 	struct ctl_softc *ctl_softc;
4708 	struct ctl_lun *lun;
4709 
4710 	ctl_softc = control_softc;
4711 
4712 	lun = (struct ctl_lun *)be_lun->ctl_lun;
4713 
4714 	mtx_lock(&ctl_softc->ctl_lock);
4715 	lun->flags |= CTL_LUN_OFFLINE;
4716 	mtx_unlock(&ctl_softc->ctl_lock);
4717 
4718 	return (0);
4719 }
4720 
4721 int
4722 ctl_lun_online(struct ctl_be_lun *be_lun)
4723 {
4724 	struct ctl_softc *ctl_softc;
4725 	struct ctl_lun *lun;
4726 
4727 	ctl_softc = control_softc;
4728 
4729 	lun = (struct ctl_lun *)be_lun->ctl_lun;
4730 
4731 	mtx_lock(&ctl_softc->ctl_lock);
4732 	lun->flags &= ~CTL_LUN_OFFLINE;
4733 	mtx_unlock(&ctl_softc->ctl_lock);
4734 
4735 	return (0);
4736 }
4737 
4738 int
4739 ctl_invalidate_lun(struct ctl_be_lun *be_lun)
4740 {
4741 	struct ctl_softc *ctl_softc;
4742 	struct ctl_lun *lun;
4743 
4744 	ctl_softc = control_softc;
4745 
4746 	lun = (struct ctl_lun *)be_lun->ctl_lun;
4747 
4748 	mtx_lock(&ctl_softc->ctl_lock);
4749 
4750 	/*
4751 	 * The LUN needs to be disabled before it can be marked invalid.
4752 	 */
4753 	if ((lun->flags & CTL_LUN_DISABLED) == 0) {
4754 		mtx_unlock(&ctl_softc->ctl_lock);
4755 		return (-1);
4756 	}
4757 	/*
4758 	 * Mark the LUN invalid.
4759 	 */
4760 	lun->flags |= CTL_LUN_INVALID;
4761 
4762 	/*
4763 	 * If there is nothing in the OOA queue, go ahead and free the LUN.
4764 	 * If we have something in the OOA queue, we'll free it when the
4765 	 * last I/O completes.
4766 	 */
4767 	if (TAILQ_FIRST(&lun->ooa_queue) == NULL)
4768 		ctl_free_lun(lun);
4769 	mtx_unlock(&ctl_softc->ctl_lock);
4770 
4771 	return (0);
4772 }
4773 
4774 int
4775 ctl_lun_inoperable(struct ctl_be_lun *be_lun)
4776 {
4777 	struct ctl_softc *ctl_softc;
4778 	struct ctl_lun *lun;
4779 
4780 	ctl_softc = control_softc;
4781 	lun = (struct ctl_lun *)be_lun->ctl_lun;
4782 
4783 	mtx_lock(&ctl_softc->ctl_lock);
4784 	lun->flags |= CTL_LUN_INOPERABLE;
4785 	mtx_unlock(&ctl_softc->ctl_lock);
4786 
4787 	return (0);
4788 }
4789 
4790 int
4791 ctl_lun_operable(struct ctl_be_lun *be_lun)
4792 {
4793 	struct ctl_softc *ctl_softc;
4794 	struct ctl_lun *lun;
4795 
4796 	ctl_softc = control_softc;
4797 	lun = (struct ctl_lun *)be_lun->ctl_lun;
4798 
4799 	mtx_lock(&ctl_softc->ctl_lock);
4800 	lun->flags &= ~CTL_LUN_INOPERABLE;
4801 	mtx_unlock(&ctl_softc->ctl_lock);
4802 
4803 	return (0);
4804 }
4805 
4806 int
4807 ctl_lun_power_lock(struct ctl_be_lun *be_lun, struct ctl_nexus *nexus,
4808 		   int lock)
4809 {
4810 	struct ctl_softc *softc;
4811 	struct ctl_lun *lun;
4812 	struct copan_aps_subpage *current_sp;
4813 	struct ctl_page_index *page_index;
4814 	int i;
4815 
4816 	softc = control_softc;
4817 
4818 	mtx_lock(&softc->ctl_lock);
4819 
4820 	lun = (struct ctl_lun *)be_lun->ctl_lun;
4821 
4822 	page_index = NULL;
4823 	for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
4824 		if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) !=
4825 		     APS_PAGE_CODE)
4826 			continue;
4827 
4828 		if (lun->mode_pages.index[i].subpage != APS_SUBPAGE_CODE)
4829 			continue;
4830 		page_index = &lun->mode_pages.index[i];
4831 	}
4832 
4833 	if (page_index == NULL) {
4834 		mtx_unlock(&softc->ctl_lock);
4835 		printf("%s: APS subpage not found for lun %ju!\n", __func__,
4836 		       (uintmax_t)lun->lun);
4837 		return (1);
4838 	}
4839 #if 0
4840 	if ((softc->aps_locked_lun != 0)
4841 	 && (softc->aps_locked_lun != lun->lun)) {
4842 		printf("%s: attempt to lock LUN %llu when %llu is already "
4843 		       "locked\n");
4844 		mtx_unlock(&softc->ctl_lock);
4845 		return (1);
4846 	}
4847 #endif
4848 
4849 	current_sp = (struct copan_aps_subpage *)(page_index->page_data +
4850 		(page_index->page_len * CTL_PAGE_CURRENT));
4851 
4852 	if (lock != 0) {
4853 		current_sp->lock_active = APS_LOCK_ACTIVE;
4854 		softc->aps_locked_lun = lun->lun;
4855 	} else {
4856 		current_sp->lock_active = 0;
4857 		softc->aps_locked_lun = 0;
4858 	}
4859 
4860 
4861 	/*
4862 	 * If we're in HA mode, try to send the lock message to the other
4863 	 * side.
4864 	 */
4865 	if (ctl_is_single == 0) {
4866 		int isc_retval;
4867 		union ctl_ha_msg lock_msg;
4868 
4869 		lock_msg.hdr.nexus = *nexus;
4870 		lock_msg.hdr.msg_type = CTL_MSG_APS_LOCK;
4871 		if (lock != 0)
4872 			lock_msg.aps.lock_flag = 1;
4873 		else
4874 			lock_msg.aps.lock_flag = 0;
4875 		isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &lock_msg,
4876 					 sizeof(lock_msg), 0);
4877 		if (isc_retval > CTL_HA_STATUS_SUCCESS) {
4878 			printf("%s: APS (lock=%d) error returned from "
4879 			       "ctl_ha_msg_send: %d\n", __func__, lock, isc_retval);
4880 			mtx_unlock(&softc->ctl_lock);
4881 			return (1);
4882 		}
4883 	}
4884 
4885 	mtx_unlock(&softc->ctl_lock);
4886 
4887 	return (0);
4888 }
4889 
4890 void
4891 ctl_lun_capacity_changed(struct ctl_be_lun *be_lun)
4892 {
4893 	struct ctl_lun *lun;
4894 	struct ctl_softc *softc;
4895 	int i;
4896 
4897 	softc = control_softc;
4898 
4899 	mtx_lock(&softc->ctl_lock);
4900 
4901 	lun = (struct ctl_lun *)be_lun->ctl_lun;
4902 
4903 	for (i = 0; i < CTL_MAX_INITIATORS; i++)
4904 		lun->pending_sense[i].ua_pending |= CTL_UA_CAPACITY_CHANGED;
4905 
4906 	mtx_unlock(&softc->ctl_lock);
4907 }
4908 
4909 /*
4910  * Backend "memory move is complete" callback for requests that never
4911  * make it down to say RAIDCore's configuration code.
4912  */
4913 int
4914 ctl_config_move_done(union ctl_io *io)
4915 {
4916 	int retval;
4917 
4918 	retval = CTL_RETVAL_COMPLETE;
4919 
4920 
4921 	CTL_DEBUG_PRINT(("ctl_config_move_done\n"));
4922 	/*
4923 	 * XXX KDM this shouldn't happen, but what if it does?
4924 	 */
4925 	if (io->io_hdr.io_type != CTL_IO_SCSI)
4926 		panic("I/O type isn't CTL_IO_SCSI!");
4927 
4928 	if ((io->io_hdr.port_status == 0)
4929 	 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0)
4930 	 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE))
4931 		io->io_hdr.status = CTL_SUCCESS;
4932 	else if ((io->io_hdr.port_status != 0)
4933 	      && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0)
4934 	      && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)){
4935 		/*
4936 		 * For hardware error sense keys, the sense key
4937 		 * specific value is defined to be a retry count,
4938 		 * but we use it to pass back an internal FETD
4939 		 * error code.  XXX KDM  Hopefully the FETD is only
4940 		 * using 16 bits for an error code, since that's
4941 		 * all the space we have in the sks field.
4942 		 */
4943 		ctl_set_internal_failure(&io->scsiio,
4944 					 /*sks_valid*/ 1,
4945 					 /*retry_count*/
4946 					 io->io_hdr.port_status);
4947 		free(io->scsiio.kern_data_ptr, M_CTL);
4948 		ctl_done(io);
4949 		goto bailout;
4950 	}
4951 
4952 	if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN)
4953 	 || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
4954 	 || ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) {
4955 		/*
4956 		 * XXX KDM just assuming a single pointer here, and not a
4957 		 * S/G list.  If we start using S/G lists for config data,
4958 		 * we'll need to know how to clean them up here as well.
4959 		 */
4960 		free(io->scsiio.kern_data_ptr, M_CTL);
4961 		/* Hopefully the user has already set the status... */
4962 		ctl_done(io);
4963 	} else {
4964 		/*
4965 		 * XXX KDM now we need to continue data movement.  Some
4966 		 * options:
4967 		 * - call ctl_scsiio() again?  We don't do this for data
4968 		 *   writes, because for those at least we know ahead of
4969 		 *   time where the write will go and how long it is.  For
4970 		 *   config writes, though, that information is largely
4971 		 *   contained within the write itself, thus we need to
4972 		 *   parse out the data again.
4973 		 *
4974 		 * - Call some other function once the data is in?
4975 		 */
4976 
4977 		/*
4978 		 * XXX KDM call ctl_scsiio() again for now, and check flag
4979 		 * bits to see whether we're allocated or not.
4980 		 */
4981 		retval = ctl_scsiio(&io->scsiio);
4982 	}
4983 bailout:
4984 	return (retval);
4985 }
4986 
4987 /*
4988  * This gets called by a backend driver when it is done with a
4989  * configuration write.
4990  */
4991 void
4992 ctl_config_write_done(union ctl_io *io)
4993 {
4994 	/*
4995 	 * If the IO_CONT flag is set, we need to call the supplied
4996 	 * function to continue processing the I/O, instead of completing
4997 	 * the I/O just yet.
4998 	 *
4999 	 * If there is an error, though, we don't want to keep processing.
5000 	 * Instead, just send status back to the initiator.
5001 	 */
5002 	if ((io->io_hdr.flags & CTL_FLAG_IO_CONT)
5003 	 && (((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)
5004 	  || ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS))) {
5005 		io->scsiio.io_cont(io);
5006 		return;
5007 	}
5008 	/*
5009 	 * Since a configuration write can be done for commands that actually
5010 	 * have data allocated, like write buffer, and commands that have
5011 	 * no data, like start/stop unit, we need to check here.
5012 	 */
5013 	if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT)
5014 		free(io->scsiio.kern_data_ptr, M_CTL);
5015 	ctl_done(io);
5016 }
5017 
5018 /*
5019  * SCSI release command.
5020  */
5021 int
5022 ctl_scsi_release(struct ctl_scsiio *ctsio)
5023 {
5024 	int length, longid, thirdparty_id, resv_id;
5025 	struct ctl_softc *ctl_softc;
5026 	struct ctl_lun *lun;
5027 
5028 	length = 0;
5029 	resv_id = 0;
5030 
5031 	CTL_DEBUG_PRINT(("ctl_scsi_release\n"));
5032 
5033 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5034 	ctl_softc = control_softc;
5035 
5036 	switch (ctsio->cdb[0]) {
5037 	case RELEASE: {
5038 		struct scsi_release *cdb;
5039 
5040 		cdb = (struct scsi_release *)ctsio->cdb;
5041 		if ((cdb->byte2 & 0x1f) != 0) {
5042 			ctl_set_invalid_field(ctsio,
5043 					      /*sks_valid*/ 1,
5044 					      /*command*/ 1,
5045 					      /*field*/ 1,
5046 					      /*bit_valid*/ 0,
5047 					      /*bit*/ 0);
5048 			ctl_done((union ctl_io *)ctsio);
5049 			return (CTL_RETVAL_COMPLETE);
5050 		}
5051 		break;
5052 	}
5053 	case RELEASE_10: {
5054 		struct scsi_release_10 *cdb;
5055 
5056 		cdb = (struct scsi_release_10 *)ctsio->cdb;
5057 
5058 		if ((cdb->byte2 & SR10_EXTENT) != 0) {
5059 			ctl_set_invalid_field(ctsio,
5060 					      /*sks_valid*/ 1,
5061 					      /*command*/ 1,
5062 					      /*field*/ 1,
5063 					      /*bit_valid*/ 1,
5064 					      /*bit*/ 0);
5065 			ctl_done((union ctl_io *)ctsio);
5066 			return (CTL_RETVAL_COMPLETE);
5067 
5068 		}
5069 
5070 		if ((cdb->byte2 & SR10_3RDPTY) != 0) {
5071 			ctl_set_invalid_field(ctsio,
5072 					      /*sks_valid*/ 1,
5073 					      /*command*/ 1,
5074 					      /*field*/ 1,
5075 					      /*bit_valid*/ 1,
5076 					      /*bit*/ 4);
5077 			ctl_done((union ctl_io *)ctsio);
5078 			return (CTL_RETVAL_COMPLETE);
5079 		}
5080 
5081 		if (cdb->byte2 & SR10_LONGID)
5082 			longid = 1;
5083 		else
5084 			thirdparty_id = cdb->thirdparty_id;
5085 
5086 		resv_id = cdb->resv_id;
5087 		length = scsi_2btoul(cdb->length);
5088 		break;
5089 	}
5090 	}
5091 
5092 
5093 	/*
5094 	 * XXX KDM right now, we only support LUN reservation.  We don't
5095 	 * support 3rd party reservations, or extent reservations, which
5096 	 * might actually need the parameter list.  If we've gotten this
5097 	 * far, we've got a LUN reservation.  Anything else got kicked out
5098 	 * above.  So, according to SPC, ignore the length.
5099 	 */
5100 	length = 0;
5101 
5102 	if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0)
5103 	 && (length > 0)) {
5104 		ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK);
5105 		ctsio->kern_data_len = length;
5106 		ctsio->kern_total_len = length;
5107 		ctsio->kern_data_resid = 0;
5108 		ctsio->kern_rel_offset = 0;
5109 		ctsio->kern_sg_entries = 0;
5110 		ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
5111 		ctsio->be_move_done = ctl_config_move_done;
5112 		ctl_datamove((union ctl_io *)ctsio);
5113 
5114 		return (CTL_RETVAL_COMPLETE);
5115 	}
5116 
5117 	if (length > 0)
5118 		thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr);
5119 
5120 	mtx_lock(&ctl_softc->ctl_lock);
5121 
5122 	/*
5123 	 * According to SPC, it is not an error for an intiator to attempt
5124 	 * to release a reservation on a LUN that isn't reserved, or that
5125 	 * is reserved by another initiator.  The reservation can only be
5126 	 * released, though, by the initiator who made it or by one of
5127 	 * several reset type events.
5128 	 */
5129 	if (lun->flags & CTL_LUN_RESERVED) {
5130 		if ((ctsio->io_hdr.nexus.initid.id == lun->rsv_nexus.initid.id)
5131 		 && (ctsio->io_hdr.nexus.targ_port == lun->rsv_nexus.targ_port)
5132 		 && (ctsio->io_hdr.nexus.targ_target.id ==
5133 		     lun->rsv_nexus.targ_target.id)) {
5134 			lun->flags &= ~CTL_LUN_RESERVED;
5135 		}
5136 	}
5137 
5138 	ctsio->scsi_status = SCSI_STATUS_OK;
5139 	ctsio->io_hdr.status = CTL_SUCCESS;
5140 
5141 	if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
5142 		free(ctsio->kern_data_ptr, M_CTL);
5143 		ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
5144 	}
5145 
5146 	mtx_unlock(&ctl_softc->ctl_lock);
5147 
5148 	ctl_done((union ctl_io *)ctsio);
5149 	return (CTL_RETVAL_COMPLETE);
5150 }
5151 
5152 int
5153 ctl_scsi_reserve(struct ctl_scsiio *ctsio)
5154 {
5155 	int extent, thirdparty, longid;
5156 	int resv_id, length;
5157 	uint64_t thirdparty_id;
5158 	struct ctl_softc *ctl_softc;
5159 	struct ctl_lun *lun;
5160 
5161 	extent = 0;
5162 	thirdparty = 0;
5163 	longid = 0;
5164 	resv_id = 0;
5165 	length = 0;
5166 	thirdparty_id = 0;
5167 
5168 	CTL_DEBUG_PRINT(("ctl_reserve\n"));
5169 
5170 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5171 	ctl_softc = control_softc;
5172 
5173 	switch (ctsio->cdb[0]) {
5174 	case RESERVE: {
5175 		struct scsi_reserve *cdb;
5176 
5177 		cdb = (struct scsi_reserve *)ctsio->cdb;
5178 		if ((cdb->byte2 & 0x1f) != 0) {
5179 			ctl_set_invalid_field(ctsio,
5180 					      /*sks_valid*/ 1,
5181 					      /*command*/ 1,
5182 					      /*field*/ 1,
5183 					      /*bit_valid*/ 0,
5184 					      /*bit*/ 0);
5185 			ctl_done((union ctl_io *)ctsio);
5186 			return (CTL_RETVAL_COMPLETE);
5187 		}
5188 		resv_id = cdb->resv_id;
5189 		length = scsi_2btoul(cdb->length);
5190 		break;
5191 	}
5192 	case RESERVE_10: {
5193 		struct scsi_reserve_10 *cdb;
5194 
5195 		cdb = (struct scsi_reserve_10 *)ctsio->cdb;
5196 
5197 		if ((cdb->byte2 & SR10_EXTENT) != 0) {
5198 			ctl_set_invalid_field(ctsio,
5199 					      /*sks_valid*/ 1,
5200 					      /*command*/ 1,
5201 					      /*field*/ 1,
5202 					      /*bit_valid*/ 1,
5203 					      /*bit*/ 0);
5204 			ctl_done((union ctl_io *)ctsio);
5205 			return (CTL_RETVAL_COMPLETE);
5206 		}
5207 		if ((cdb->byte2 & SR10_3RDPTY) != 0) {
5208 			ctl_set_invalid_field(ctsio,
5209 					      /*sks_valid*/ 1,
5210 					      /*command*/ 1,
5211 					      /*field*/ 1,
5212 					      /*bit_valid*/ 1,
5213 					      /*bit*/ 4);
5214 			ctl_done((union ctl_io *)ctsio);
5215 			return (CTL_RETVAL_COMPLETE);
5216 		}
5217 		if (cdb->byte2 & SR10_LONGID)
5218 			longid = 1;
5219 		else
5220 			thirdparty_id = cdb->thirdparty_id;
5221 
5222 		resv_id = cdb->resv_id;
5223 		length = scsi_2btoul(cdb->length);
5224 		break;
5225 	}
5226 	}
5227 
5228 	/*
5229 	 * XXX KDM right now, we only support LUN reservation.  We don't
5230 	 * support 3rd party reservations, or extent reservations, which
5231 	 * might actually need the parameter list.  If we've gotten this
5232 	 * far, we've got a LUN reservation.  Anything else got kicked out
5233 	 * above.  So, according to SPC, ignore the length.
5234 	 */
5235 	length = 0;
5236 
5237 	if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0)
5238 	 && (length > 0)) {
5239 		ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK);
5240 		ctsio->kern_data_len = length;
5241 		ctsio->kern_total_len = length;
5242 		ctsio->kern_data_resid = 0;
5243 		ctsio->kern_rel_offset = 0;
5244 		ctsio->kern_sg_entries = 0;
5245 		ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
5246 		ctsio->be_move_done = ctl_config_move_done;
5247 		ctl_datamove((union ctl_io *)ctsio);
5248 
5249 		return (CTL_RETVAL_COMPLETE);
5250 	}
5251 
5252 	if (length > 0)
5253 		thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr);
5254 
5255 	mtx_lock(&ctl_softc->ctl_lock);
5256 	if (lun->flags & CTL_LUN_RESERVED) {
5257 		if ((ctsio->io_hdr.nexus.initid.id != lun->rsv_nexus.initid.id)
5258 		 || (ctsio->io_hdr.nexus.targ_port != lun->rsv_nexus.targ_port)
5259 		 || (ctsio->io_hdr.nexus.targ_target.id !=
5260 		     lun->rsv_nexus.targ_target.id)) {
5261 			ctsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT;
5262 			ctsio->io_hdr.status = CTL_SCSI_ERROR;
5263 			goto bailout;
5264 		}
5265 	}
5266 
5267 	lun->flags |= CTL_LUN_RESERVED;
5268 	lun->rsv_nexus = ctsio->io_hdr.nexus;
5269 
5270 	ctsio->scsi_status = SCSI_STATUS_OK;
5271 	ctsio->io_hdr.status = CTL_SUCCESS;
5272 
5273 bailout:
5274 	if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
5275 		free(ctsio->kern_data_ptr, M_CTL);
5276 		ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
5277 	}
5278 
5279 	mtx_unlock(&ctl_softc->ctl_lock);
5280 
5281 	ctl_done((union ctl_io *)ctsio);
5282 	return (CTL_RETVAL_COMPLETE);
5283 }
5284 
5285 int
5286 ctl_start_stop(struct ctl_scsiio *ctsio)
5287 {
5288 	struct scsi_start_stop_unit *cdb;
5289 	struct ctl_lun *lun;
5290 	struct ctl_softc *ctl_softc;
5291 	int retval;
5292 
5293 	CTL_DEBUG_PRINT(("ctl_start_stop\n"));
5294 
5295 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5296 	ctl_softc = control_softc;
5297 	retval = 0;
5298 
5299 	cdb = (struct scsi_start_stop_unit *)ctsio->cdb;
5300 
5301 	/*
5302 	 * XXX KDM
5303 	 * We don't support the immediate bit on a stop unit.  In order to
5304 	 * do that, we would need to code up a way to know that a stop is
5305 	 * pending, and hold off any new commands until it completes, one
5306 	 * way or another.  Then we could accept or reject those commands
5307 	 * depending on its status.  We would almost need to do the reverse
5308 	 * of what we do below for an immediate start -- return the copy of
5309 	 * the ctl_io to the FETD with status to send to the host (and to
5310 	 * free the copy!) and then free the original I/O once the stop
5311 	 * actually completes.  That way, the OOA queue mechanism can work
5312 	 * to block commands that shouldn't proceed.  Another alternative
5313 	 * would be to put the copy in the queue in place of the original,
5314 	 * and return the original back to the caller.  That could be
5315 	 * slightly safer..
5316 	 */
5317 	if ((cdb->byte2 & SSS_IMMED)
5318 	 && ((cdb->how & SSS_START) == 0)) {
5319 		ctl_set_invalid_field(ctsio,
5320 				      /*sks_valid*/ 1,
5321 				      /*command*/ 1,
5322 				      /*field*/ 1,
5323 				      /*bit_valid*/ 1,
5324 				      /*bit*/ 0);
5325 		ctl_done((union ctl_io *)ctsio);
5326 		return (CTL_RETVAL_COMPLETE);
5327 	}
5328 
5329 	/*
5330 	 * We don't support the power conditions field.  We need to check
5331 	 * this prior to checking the load/eject and start/stop bits.
5332 	 */
5333 	if ((cdb->how & SSS_PC_MASK) != SSS_PC_START_VALID) {
5334 		ctl_set_invalid_field(ctsio,
5335 				      /*sks_valid*/ 1,
5336 				      /*command*/ 1,
5337 				      /*field*/ 4,
5338 				      /*bit_valid*/ 1,
5339 				      /*bit*/ 4);
5340 		ctl_done((union ctl_io *)ctsio);
5341 		return (CTL_RETVAL_COMPLETE);
5342 	}
5343 
5344 	/*
5345 	 * Media isn't removable, so we can't load or eject it.
5346 	 */
5347 	if ((cdb->how & SSS_LOEJ) != 0) {
5348 		ctl_set_invalid_field(ctsio,
5349 				      /*sks_valid*/ 1,
5350 				      /*command*/ 1,
5351 				      /*field*/ 4,
5352 				      /*bit_valid*/ 1,
5353 				      /*bit*/ 1);
5354 		ctl_done((union ctl_io *)ctsio);
5355 		return (CTL_RETVAL_COMPLETE);
5356 	}
5357 
5358 	if ((lun->flags & CTL_LUN_PR_RESERVED)
5359 	 && ((cdb->how & SSS_START)==0)) {
5360 		uint32_t residx;
5361 
5362 		residx = ctl_get_resindex(&ctsio->io_hdr.nexus);
5363 		if (!lun->per_res[residx].registered
5364 		 || (lun->pr_res_idx!=residx && lun->res_type < 4)) {
5365 
5366 			ctl_set_reservation_conflict(ctsio);
5367 			ctl_done((union ctl_io *)ctsio);
5368 			return (CTL_RETVAL_COMPLETE);
5369 		}
5370 	}
5371 
5372 	/*
5373 	 * If there is no backend on this device, we can't start or stop
5374 	 * it.  In theory we shouldn't get any start/stop commands in the
5375 	 * first place at this level if the LUN doesn't have a backend.
5376 	 * That should get stopped by the command decode code.
5377 	 */
5378 	if (lun->backend == NULL) {
5379 		ctl_set_invalid_opcode(ctsio);
5380 		ctl_done((union ctl_io *)ctsio);
5381 		return (CTL_RETVAL_COMPLETE);
5382 	}
5383 
5384 	/*
5385 	 * XXX KDM Copan-specific offline behavior.
5386 	 * Figure out a reasonable way to port this?
5387 	 */
5388 #ifdef NEEDTOPORT
5389 	mtx_lock(&ctl_softc->ctl_lock);
5390 
5391 	if (((cdb->byte2 & SSS_ONOFFLINE) == 0)
5392 	 && (lun->flags & CTL_LUN_OFFLINE)) {
5393 		/*
5394 		 * If the LUN is offline, and the on/offline bit isn't set,
5395 		 * reject the start or stop.  Otherwise, let it through.
5396 		 */
5397 		mtx_unlock(&ctl_softc->ctl_lock);
5398 		ctl_set_lun_not_ready(ctsio);
5399 		ctl_done((union ctl_io *)ctsio);
5400 	} else {
5401 		mtx_unlock(&ctl_softc->ctl_lock);
5402 #endif /* NEEDTOPORT */
5403 		/*
5404 		 * This could be a start or a stop when we're online,
5405 		 * or a stop/offline or start/online.  A start or stop when
5406 		 * we're offline is covered in the case above.
5407 		 */
5408 		/*
5409 		 * In the non-immediate case, we send the request to
5410 		 * the backend and return status to the user when
5411 		 * it is done.
5412 		 *
5413 		 * In the immediate case, we allocate a new ctl_io
5414 		 * to hold a copy of the request, and send that to
5415 		 * the backend.  We then set good status on the
5416 		 * user's request and return it immediately.
5417 		 */
5418 		if (cdb->byte2 & SSS_IMMED) {
5419 			union ctl_io *new_io;
5420 
5421 			new_io = ctl_alloc_io(ctsio->io_hdr.pool);
5422 			if (new_io == NULL) {
5423 				ctl_set_busy(ctsio);
5424 				ctl_done((union ctl_io *)ctsio);
5425 			} else {
5426 				ctl_copy_io((union ctl_io *)ctsio,
5427 					    new_io);
5428 				retval = lun->backend->config_write(new_io);
5429 				ctl_set_success(ctsio);
5430 				ctl_done((union ctl_io *)ctsio);
5431 			}
5432 		} else {
5433 			retval = lun->backend->config_write(
5434 				(union ctl_io *)ctsio);
5435 		}
5436 #ifdef NEEDTOPORT
5437 	}
5438 #endif
5439 	return (retval);
5440 }
5441 
5442 /*
5443  * We support the SYNCHRONIZE CACHE command (10 and 16 byte versions), but
5444  * we don't really do anything with the LBA and length fields if the user
5445  * passes them in.  Instead we'll just flush out the cache for the entire
5446  * LUN.
5447  */
5448 int
5449 ctl_sync_cache(struct ctl_scsiio *ctsio)
5450 {
5451 	struct ctl_lun *lun;
5452 	struct ctl_softc *ctl_softc;
5453 	uint64_t starting_lba;
5454 	uint32_t block_count;
5455 	int reladr, immed;
5456 	int retval;
5457 
5458 	CTL_DEBUG_PRINT(("ctl_sync_cache\n"));
5459 
5460 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5461 	ctl_softc = control_softc;
5462 	retval = 0;
5463 	reladr = 0;
5464 	immed = 0;
5465 
5466 	switch (ctsio->cdb[0]) {
5467 	case SYNCHRONIZE_CACHE: {
5468 		struct scsi_sync_cache *cdb;
5469 		cdb = (struct scsi_sync_cache *)ctsio->cdb;
5470 
5471 		if (cdb->byte2 & SSC_RELADR)
5472 			reladr = 1;
5473 
5474 		if (cdb->byte2 & SSC_IMMED)
5475 			immed = 1;
5476 
5477 		starting_lba = scsi_4btoul(cdb->begin_lba);
5478 		block_count = scsi_2btoul(cdb->lb_count);
5479 		break;
5480 	}
5481 	case SYNCHRONIZE_CACHE_16: {
5482 		struct scsi_sync_cache_16 *cdb;
5483 		cdb = (struct scsi_sync_cache_16 *)ctsio->cdb;
5484 
5485 		if (cdb->byte2 & SSC_RELADR)
5486 			reladr = 1;
5487 
5488 		if (cdb->byte2 & SSC_IMMED)
5489 			immed = 1;
5490 
5491 		starting_lba = scsi_8btou64(cdb->begin_lba);
5492 		block_count = scsi_4btoul(cdb->lb_count);
5493 		break;
5494 	}
5495 	default:
5496 		ctl_set_invalid_opcode(ctsio);
5497 		ctl_done((union ctl_io *)ctsio);
5498 		goto bailout;
5499 		break; /* NOTREACHED */
5500 	}
5501 
5502 	if (immed) {
5503 		/*
5504 		 * We don't support the immediate bit.  Since it's in the
5505 		 * same place for the 10 and 16 byte SYNCHRONIZE CACHE
5506 		 * commands, we can just return the same error in either
5507 		 * case.
5508 		 */
5509 		ctl_set_invalid_field(ctsio,
5510 				      /*sks_valid*/ 1,
5511 				      /*command*/ 1,
5512 				      /*field*/ 1,
5513 				      /*bit_valid*/ 1,
5514 				      /*bit*/ 1);
5515 		ctl_done((union ctl_io *)ctsio);
5516 		goto bailout;
5517 	}
5518 
5519 	if (reladr) {
5520 		/*
5521 		 * We don't support the reladr bit either.  It can only be
5522 		 * used with linked commands, and we don't support linked
5523 		 * commands.  Since the bit is in the same place for the
5524 		 * 10 and 16 byte SYNCHRONIZE CACHE * commands, we can
5525 		 * just return the same error in either case.
5526 		 */
5527 		ctl_set_invalid_field(ctsio,
5528 				      /*sks_valid*/ 1,
5529 				      /*command*/ 1,
5530 				      /*field*/ 1,
5531 				      /*bit_valid*/ 1,
5532 				      /*bit*/ 0);
5533 		ctl_done((union ctl_io *)ctsio);
5534 		goto bailout;
5535 	}
5536 
5537 	/*
5538 	 * We check the LBA and length, but don't do anything with them.
5539 	 * A SYNCHRONIZE CACHE will cause the entire cache for this lun to
5540 	 * get flushed.  This check will just help satisfy anyone who wants
5541 	 * to see an error for an out of range LBA.
5542 	 */
5543 	if ((starting_lba + block_count) > (lun->be_lun->maxlba + 1)) {
5544 		ctl_set_lba_out_of_range(ctsio);
5545 		ctl_done((union ctl_io *)ctsio);
5546 		goto bailout;
5547 	}
5548 
5549 	/*
5550 	 * If this LUN has no backend, we can't flush the cache anyway.
5551 	 */
5552 	if (lun->backend == NULL) {
5553 		ctl_set_invalid_opcode(ctsio);
5554 		ctl_done((union ctl_io *)ctsio);
5555 		goto bailout;
5556 	}
5557 
5558 	/*
5559 	 * Check to see whether we're configured to send the SYNCHRONIZE
5560 	 * CACHE command directly to the back end.
5561 	 */
5562 	mtx_lock(&ctl_softc->ctl_lock);
5563 	if ((ctl_softc->flags & CTL_FLAG_REAL_SYNC)
5564 	 && (++(lun->sync_count) >= lun->sync_interval)) {
5565 		lun->sync_count = 0;
5566 		mtx_unlock(&ctl_softc->ctl_lock);
5567 		retval = lun->backend->config_write((union ctl_io *)ctsio);
5568 	} else {
5569 		mtx_unlock(&ctl_softc->ctl_lock);
5570 		ctl_set_success(ctsio);
5571 		ctl_done((union ctl_io *)ctsio);
5572 	}
5573 
5574 bailout:
5575 
5576 	return (retval);
5577 }
5578 
5579 int
5580 ctl_format(struct ctl_scsiio *ctsio)
5581 {
5582 	struct scsi_format *cdb;
5583 	struct ctl_lun *lun;
5584 	struct ctl_softc *ctl_softc;
5585 	int length, defect_list_len;
5586 
5587 	CTL_DEBUG_PRINT(("ctl_format\n"));
5588 
5589 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5590 	ctl_softc = control_softc;
5591 
5592 	cdb = (struct scsi_format *)ctsio->cdb;
5593 
5594 	length = 0;
5595 	if (cdb->byte2 & SF_FMTDATA) {
5596 		if (cdb->byte2 & SF_LONGLIST)
5597 			length = sizeof(struct scsi_format_header_long);
5598 		else
5599 			length = sizeof(struct scsi_format_header_short);
5600 	}
5601 
5602 	if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0)
5603 	 && (length > 0)) {
5604 		ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK);
5605 		ctsio->kern_data_len = length;
5606 		ctsio->kern_total_len = length;
5607 		ctsio->kern_data_resid = 0;
5608 		ctsio->kern_rel_offset = 0;
5609 		ctsio->kern_sg_entries = 0;
5610 		ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
5611 		ctsio->be_move_done = ctl_config_move_done;
5612 		ctl_datamove((union ctl_io *)ctsio);
5613 
5614 		return (CTL_RETVAL_COMPLETE);
5615 	}
5616 
5617 	defect_list_len = 0;
5618 
5619 	if (cdb->byte2 & SF_FMTDATA) {
5620 		if (cdb->byte2 & SF_LONGLIST) {
5621 			struct scsi_format_header_long *header;
5622 
5623 			header = (struct scsi_format_header_long *)
5624 				ctsio->kern_data_ptr;
5625 
5626 			defect_list_len = scsi_4btoul(header->defect_list_len);
5627 			if (defect_list_len != 0) {
5628 				ctl_set_invalid_field(ctsio,
5629 						      /*sks_valid*/ 1,
5630 						      /*command*/ 0,
5631 						      /*field*/ 2,
5632 						      /*bit_valid*/ 0,
5633 						      /*bit*/ 0);
5634 				goto bailout;
5635 			}
5636 		} else {
5637 			struct scsi_format_header_short *header;
5638 
5639 			header = (struct scsi_format_header_short *)
5640 				ctsio->kern_data_ptr;
5641 
5642 			defect_list_len = scsi_2btoul(header->defect_list_len);
5643 			if (defect_list_len != 0) {
5644 				ctl_set_invalid_field(ctsio,
5645 						      /*sks_valid*/ 1,
5646 						      /*command*/ 0,
5647 						      /*field*/ 2,
5648 						      /*bit_valid*/ 0,
5649 						      /*bit*/ 0);
5650 				goto bailout;
5651 			}
5652 		}
5653 	}
5654 
5655 	/*
5656 	 * The format command will clear out the "Medium format corrupted"
5657 	 * status if set by the configuration code.  That status is really
5658 	 * just a way to notify the host that we have lost the media, and
5659 	 * get them to issue a command that will basically make them think
5660 	 * they're blowing away the media.
5661 	 */
5662 	mtx_lock(&ctl_softc->ctl_lock);
5663 	lun->flags &= ~CTL_LUN_INOPERABLE;
5664 	mtx_unlock(&ctl_softc->ctl_lock);
5665 
5666 	ctsio->scsi_status = SCSI_STATUS_OK;
5667 	ctsio->io_hdr.status = CTL_SUCCESS;
5668 bailout:
5669 
5670 	if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
5671 		free(ctsio->kern_data_ptr, M_CTL);
5672 		ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
5673 	}
5674 
5675 	ctl_done((union ctl_io *)ctsio);
5676 	return (CTL_RETVAL_COMPLETE);
5677 }
5678 
5679 int
5680 ctl_write_buffer(struct ctl_scsiio *ctsio)
5681 {
5682 	struct scsi_write_buffer *cdb;
5683 	struct copan_page_header *header;
5684 	struct ctl_lun *lun;
5685 	struct ctl_softc *ctl_softc;
5686 	int buffer_offset, len;
5687 	int retval;
5688 
5689 	header = NULL;
5690 
5691 	retval = CTL_RETVAL_COMPLETE;
5692 
5693 	CTL_DEBUG_PRINT(("ctl_write_buffer\n"));
5694 
5695 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5696 	ctl_softc = control_softc;
5697 	cdb = (struct scsi_write_buffer *)ctsio->cdb;
5698 
5699 	if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA) {
5700 		ctl_set_invalid_field(ctsio,
5701 				      /*sks_valid*/ 1,
5702 				      /*command*/ 1,
5703 				      /*field*/ 1,
5704 				      /*bit_valid*/ 1,
5705 				      /*bit*/ 4);
5706 		ctl_done((union ctl_io *)ctsio);
5707 		return (CTL_RETVAL_COMPLETE);
5708 	}
5709 	if (cdb->buffer_id != 0) {
5710 		ctl_set_invalid_field(ctsio,
5711 				      /*sks_valid*/ 1,
5712 				      /*command*/ 1,
5713 				      /*field*/ 2,
5714 				      /*bit_valid*/ 0,
5715 				      /*bit*/ 0);
5716 		ctl_done((union ctl_io *)ctsio);
5717 		return (CTL_RETVAL_COMPLETE);
5718 	}
5719 
5720 	len = scsi_3btoul(cdb->length);
5721 	buffer_offset = scsi_3btoul(cdb->offset);
5722 
5723 	if (len > sizeof(lun->write_buffer)) {
5724 		ctl_set_invalid_field(ctsio,
5725 				      /*sks_valid*/ 1,
5726 				      /*command*/ 1,
5727 				      /*field*/ 6,
5728 				      /*bit_valid*/ 0,
5729 				      /*bit*/ 0);
5730 		ctl_done((union ctl_io *)ctsio);
5731 		return (CTL_RETVAL_COMPLETE);
5732 	}
5733 
5734 	if (buffer_offset != 0) {
5735 		ctl_set_invalid_field(ctsio,
5736 				      /*sks_valid*/ 1,
5737 				      /*command*/ 1,
5738 				      /*field*/ 3,
5739 				      /*bit_valid*/ 0,
5740 				      /*bit*/ 0);
5741 		ctl_done((union ctl_io *)ctsio);
5742 		return (CTL_RETVAL_COMPLETE);
5743 	}
5744 
5745 	/*
5746 	 * If we've got a kernel request that hasn't been malloced yet,
5747 	 * malloc it and tell the caller the data buffer is here.
5748 	 */
5749 	if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
5750 		ctsio->kern_data_ptr = lun->write_buffer;
5751 		ctsio->kern_data_len = len;
5752 		ctsio->kern_total_len = len;
5753 		ctsio->kern_data_resid = 0;
5754 		ctsio->kern_rel_offset = 0;
5755 		ctsio->kern_sg_entries = 0;
5756 		ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
5757 		ctsio->be_move_done = ctl_config_move_done;
5758 		ctl_datamove((union ctl_io *)ctsio);
5759 
5760 		return (CTL_RETVAL_COMPLETE);
5761 	}
5762 
5763 	ctl_done((union ctl_io *)ctsio);
5764 
5765 	return (CTL_RETVAL_COMPLETE);
5766 }
5767 
5768 /*
5769  * Note that this function currently doesn't actually do anything inside
5770  * CTL to enforce things if the DQue bit is turned on.
5771  *
5772  * Also note that this function can't be used in the default case, because
5773  * the DQue bit isn't set in the changeable mask for the control mode page
5774  * anyway.  This is just here as an example for how to implement a page
5775  * handler, and a placeholder in case we want to allow the user to turn
5776  * tagged queueing on and off.
5777  *
5778  * The D_SENSE bit handling is functional, however, and will turn
5779  * descriptor sense on and off for a given LUN.
5780  */
5781 int
5782 ctl_control_page_handler(struct ctl_scsiio *ctsio,
5783 			 struct ctl_page_index *page_index, uint8_t *page_ptr)
5784 {
5785 	struct scsi_control_page *current_cp, *saved_cp, *user_cp;
5786 	struct ctl_lun *lun;
5787 	struct ctl_softc *softc;
5788 	int set_ua;
5789 	uint32_t initidx;
5790 
5791 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5792 	initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
5793 	set_ua = 0;
5794 
5795 	user_cp = (struct scsi_control_page *)page_ptr;
5796 	current_cp = (struct scsi_control_page *)
5797 		(page_index->page_data + (page_index->page_len *
5798 		CTL_PAGE_CURRENT));
5799 	saved_cp = (struct scsi_control_page *)
5800 		(page_index->page_data + (page_index->page_len *
5801 		CTL_PAGE_SAVED));
5802 
5803 	softc = control_softc;
5804 
5805 	mtx_lock(&softc->ctl_lock);
5806 	if (((current_cp->rlec & SCP_DSENSE) == 0)
5807 	 && ((user_cp->rlec & SCP_DSENSE) != 0)) {
5808 		/*
5809 		 * Descriptor sense is currently turned off and the user
5810 		 * wants to turn it on.
5811 		 */
5812 		current_cp->rlec |= SCP_DSENSE;
5813 		saved_cp->rlec |= SCP_DSENSE;
5814 		lun->flags |= CTL_LUN_SENSE_DESC;
5815 		set_ua = 1;
5816 	} else if (((current_cp->rlec & SCP_DSENSE) != 0)
5817 		&& ((user_cp->rlec & SCP_DSENSE) == 0)) {
5818 		/*
5819 		 * Descriptor sense is currently turned on, and the user
5820 		 * wants to turn it off.
5821 		 */
5822 		current_cp->rlec &= ~SCP_DSENSE;
5823 		saved_cp->rlec &= ~SCP_DSENSE;
5824 		lun->flags &= ~CTL_LUN_SENSE_DESC;
5825 		set_ua = 1;
5826 	}
5827 	if (current_cp->queue_flags & SCP_QUEUE_DQUE) {
5828 		if (user_cp->queue_flags & SCP_QUEUE_DQUE) {
5829 #ifdef NEEDTOPORT
5830 			csevent_log(CSC_CTL | CSC_SHELF_SW |
5831 				    CTL_UNTAG_TO_UNTAG,
5832 				    csevent_LogType_Trace,
5833 				    csevent_Severity_Information,
5834 				    csevent_AlertLevel_Green,
5835 				    csevent_FRU_Firmware,
5836 				    csevent_FRU_Unknown,
5837 				    "Received untagged to untagged transition");
5838 #endif /* NEEDTOPORT */
5839 		} else {
5840 #ifdef NEEDTOPORT
5841 			csevent_log(CSC_CTL | CSC_SHELF_SW |
5842 				    CTL_UNTAG_TO_TAG,
5843 				    csevent_LogType_ConfigChange,
5844 				    csevent_Severity_Information,
5845 				    csevent_AlertLevel_Green,
5846 				    csevent_FRU_Firmware,
5847 				    csevent_FRU_Unknown,
5848 				    "Received untagged to tagged "
5849 				    "queueing transition");
5850 #endif /* NEEDTOPORT */
5851 
5852 			current_cp->queue_flags &= ~SCP_QUEUE_DQUE;
5853 			saved_cp->queue_flags &= ~SCP_QUEUE_DQUE;
5854 			set_ua = 1;
5855 		}
5856 	} else {
5857 		if (user_cp->queue_flags & SCP_QUEUE_DQUE) {
5858 #ifdef NEEDTOPORT
5859 			csevent_log(CSC_CTL | CSC_SHELF_SW |
5860 				    CTL_TAG_TO_UNTAG,
5861 				    csevent_LogType_ConfigChange,
5862 				    csevent_Severity_Warning,
5863 				    csevent_AlertLevel_Yellow,
5864 				    csevent_FRU_Firmware,
5865 				    csevent_FRU_Unknown,
5866 				    "Received tagged queueing to untagged "
5867 				    "transition");
5868 #endif /* NEEDTOPORT */
5869 
5870 			current_cp->queue_flags |= SCP_QUEUE_DQUE;
5871 			saved_cp->queue_flags |= SCP_QUEUE_DQUE;
5872 			set_ua = 1;
5873 		} else {
5874 #ifdef NEEDTOPORT
5875 			csevent_log(CSC_CTL | CSC_SHELF_SW |
5876 				    CTL_TAG_TO_TAG,
5877 				    csevent_LogType_Trace,
5878 				    csevent_Severity_Information,
5879 				    csevent_AlertLevel_Green,
5880 				    csevent_FRU_Firmware,
5881 				    csevent_FRU_Unknown,
5882 				    "Received tagged queueing to tagged "
5883 				    "queueing transition");
5884 #endif /* NEEDTOPORT */
5885 		}
5886 	}
5887 	if (set_ua != 0) {
5888 		int i;
5889 		/*
5890 		 * Let other initiators know that the mode
5891 		 * parameters for this LUN have changed.
5892 		 */
5893 		for (i = 0; i < CTL_MAX_INITIATORS; i++) {
5894 			if (i == initidx)
5895 				continue;
5896 
5897 			lun->pending_sense[i].ua_pending |=
5898 				CTL_UA_MODE_CHANGE;
5899 		}
5900 	}
5901 	mtx_unlock(&softc->ctl_lock);
5902 
5903 	return (0);
5904 }
5905 
5906 int
5907 ctl_power_sp_handler(struct ctl_scsiio *ctsio,
5908 		     struct ctl_page_index *page_index, uint8_t *page_ptr)
5909 {
5910 	return (0);
5911 }
5912 
5913 int
5914 ctl_power_sp_sense_handler(struct ctl_scsiio *ctsio,
5915 			   struct ctl_page_index *page_index, int pc)
5916 {
5917 	struct copan_power_subpage *page;
5918 
5919 	page = (struct copan_power_subpage *)page_index->page_data +
5920 		(page_index->page_len * pc);
5921 
5922 	switch (pc) {
5923 	case SMS_PAGE_CTRL_CHANGEABLE >> 6:
5924 		/*
5925 		 * We don't update the changable bits for this page.
5926 		 */
5927 		break;
5928 	case SMS_PAGE_CTRL_CURRENT >> 6:
5929 	case SMS_PAGE_CTRL_DEFAULT >> 6:
5930 	case SMS_PAGE_CTRL_SAVED >> 6:
5931 #ifdef NEEDTOPORT
5932 		ctl_update_power_subpage(page);
5933 #endif
5934 		break;
5935 	default:
5936 #ifdef NEEDTOPORT
5937 		EPRINT(0, "Invalid PC %d!!", pc);
5938 #endif
5939 		break;
5940 	}
5941 	return (0);
5942 }
5943 
5944 
5945 int
5946 ctl_aps_sp_handler(struct ctl_scsiio *ctsio,
5947 		   struct ctl_page_index *page_index, uint8_t *page_ptr)
5948 {
5949 	struct copan_aps_subpage *user_sp;
5950 	struct copan_aps_subpage *current_sp;
5951 	union ctl_modepage_info *modepage_info;
5952 	struct ctl_softc *softc;
5953 	struct ctl_lun *lun;
5954 	int retval;
5955 
5956 	retval = CTL_RETVAL_COMPLETE;
5957 	current_sp = (struct copan_aps_subpage *)(page_index->page_data +
5958 		     (page_index->page_len * CTL_PAGE_CURRENT));
5959 	softc = control_softc;
5960 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5961 
5962 	user_sp = (struct copan_aps_subpage *)page_ptr;
5963 
5964 	modepage_info = (union ctl_modepage_info *)
5965 		ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes;
5966 
5967 	modepage_info->header.page_code = page_index->page_code & SMPH_PC_MASK;
5968 	modepage_info->header.subpage = page_index->subpage;
5969 	modepage_info->aps.lock_active = user_sp->lock_active;
5970 
5971 	mtx_lock(&softc->ctl_lock);
5972 
5973 	/*
5974 	 * If there is a request to lock the LUN and another LUN is locked
5975 	 * this is an error. If the requested LUN is already locked ignore
5976 	 * the request. If no LUN is locked attempt to lock it.
5977 	 * if there is a request to unlock the LUN and the LUN is currently
5978 	 * locked attempt to unlock it. Otherwise ignore the request. i.e.
5979 	 * if another LUN is locked or no LUN is locked.
5980 	 */
5981 	if (user_sp->lock_active & APS_LOCK_ACTIVE) {
5982 		if (softc->aps_locked_lun == lun->lun) {
5983 			/*
5984 			 * This LUN is already locked, so we're done.
5985 			 */
5986 			retval = CTL_RETVAL_COMPLETE;
5987 		} else if (softc->aps_locked_lun == 0) {
5988 			/*
5989 			 * No one has the lock, pass the request to the
5990 			 * backend.
5991 			 */
5992 			retval = lun->backend->config_write(
5993 				(union ctl_io *)ctsio);
5994 		} else {
5995 			/*
5996 			 * Someone else has the lock, throw out the request.
5997 			 */
5998 			ctl_set_already_locked(ctsio);
5999 			free(ctsio->kern_data_ptr, M_CTL);
6000 			ctl_done((union ctl_io *)ctsio);
6001 
6002 			/*
6003 			 * Set the return value so that ctl_do_mode_select()
6004 			 * won't try to complete the command.  We already
6005 			 * completed it here.
6006 			 */
6007 			retval = CTL_RETVAL_ERROR;
6008 		}
6009 	} else if (softc->aps_locked_lun == lun->lun) {
6010 		/*
6011 		 * This LUN is locked, so pass the unlock request to the
6012 		 * backend.
6013 		 */
6014 		retval = lun->backend->config_write((union ctl_io *)ctsio);
6015 	}
6016 	mtx_unlock(&softc->ctl_lock);
6017 
6018 	return (retval);
6019 }
6020 
6021 int
6022 ctl_debugconf_sp_select_handler(struct ctl_scsiio *ctsio,
6023 				struct ctl_page_index *page_index,
6024 				uint8_t *page_ptr)
6025 {
6026 	uint8_t *c;
6027 	int i;
6028 
6029 	c = ((struct copan_debugconf_subpage *)page_ptr)->ctl_time_io_secs;
6030 	ctl_time_io_secs =
6031 		(c[0] << 8) |
6032 		(c[1] << 0) |
6033 		0;
6034 	CTL_DEBUG_PRINT(("set ctl_time_io_secs to %d\n", ctl_time_io_secs));
6035 	printf("set ctl_time_io_secs to %d\n", ctl_time_io_secs);
6036 	printf("page data:");
6037 	for (i=0; i<8; i++)
6038 		printf(" %.2x",page_ptr[i]);
6039 	printf("\n");
6040 	return (0);
6041 }
6042 
6043 int
6044 ctl_debugconf_sp_sense_handler(struct ctl_scsiio *ctsio,
6045 			       struct ctl_page_index *page_index,
6046 			       int pc)
6047 {
6048 	struct copan_debugconf_subpage *page;
6049 
6050 	page = (struct copan_debugconf_subpage *)page_index->page_data +
6051 		(page_index->page_len * pc);
6052 
6053 	switch (pc) {
6054 	case SMS_PAGE_CTRL_CHANGEABLE >> 6:
6055 	case SMS_PAGE_CTRL_DEFAULT >> 6:
6056 	case SMS_PAGE_CTRL_SAVED >> 6:
6057 		/*
6058 		 * We don't update the changable or default bits for this page.
6059 		 */
6060 		break;
6061 	case SMS_PAGE_CTRL_CURRENT >> 6:
6062 		page->ctl_time_io_secs[0] = ctl_time_io_secs >> 8;
6063 		page->ctl_time_io_secs[1] = ctl_time_io_secs >> 0;
6064 		break;
6065 	default:
6066 #ifdef NEEDTOPORT
6067 		EPRINT(0, "Invalid PC %d!!", pc);
6068 #endif /* NEEDTOPORT */
6069 		break;
6070 	}
6071 	return (0);
6072 }
6073 
6074 
6075 static int
6076 ctl_do_mode_select(union ctl_io *io)
6077 {
6078 	struct scsi_mode_page_header *page_header;
6079 	struct ctl_page_index *page_index;
6080 	struct ctl_scsiio *ctsio;
6081 	int control_dev, page_len;
6082 	int page_len_offset, page_len_size;
6083 	union ctl_modepage_info *modepage_info;
6084 	struct ctl_lun *lun;
6085 	int *len_left, *len_used;
6086 	int retval, i;
6087 
6088 	ctsio = &io->scsiio;
6089 	page_index = NULL;
6090 	page_len = 0;
6091 	retval = CTL_RETVAL_COMPLETE;
6092 
6093 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6094 
6095 	if (lun->be_lun->lun_type != T_DIRECT)
6096 		control_dev = 1;
6097 	else
6098 		control_dev = 0;
6099 
6100 	modepage_info = (union ctl_modepage_info *)
6101 		ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes;
6102 	len_left = &modepage_info->header.len_left;
6103 	len_used = &modepage_info->header.len_used;
6104 
6105 do_next_page:
6106 
6107 	page_header = (struct scsi_mode_page_header *)
6108 		(ctsio->kern_data_ptr + *len_used);
6109 
6110 	if (*len_left == 0) {
6111 		free(ctsio->kern_data_ptr, M_CTL);
6112 		ctl_set_success(ctsio);
6113 		ctl_done((union ctl_io *)ctsio);
6114 		return (CTL_RETVAL_COMPLETE);
6115 	} else if (*len_left < sizeof(struct scsi_mode_page_header)) {
6116 
6117 		free(ctsio->kern_data_ptr, M_CTL);
6118 		ctl_set_param_len_error(ctsio);
6119 		ctl_done((union ctl_io *)ctsio);
6120 		return (CTL_RETVAL_COMPLETE);
6121 
6122 	} else if ((page_header->page_code & SMPH_SPF)
6123 		&& (*len_left < sizeof(struct scsi_mode_page_header_sp))) {
6124 
6125 		free(ctsio->kern_data_ptr, M_CTL);
6126 		ctl_set_param_len_error(ctsio);
6127 		ctl_done((union ctl_io *)ctsio);
6128 		return (CTL_RETVAL_COMPLETE);
6129 	}
6130 
6131 
6132 	/*
6133 	 * XXX KDM should we do something with the block descriptor?
6134 	 */
6135 	for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
6136 
6137 		if ((control_dev != 0)
6138 		 && (lun->mode_pages.index[i].page_flags &
6139 		     CTL_PAGE_FLAG_DISK_ONLY))
6140 			continue;
6141 
6142 		if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) !=
6143 		    (page_header->page_code & SMPH_PC_MASK))
6144 			continue;
6145 
6146 		/*
6147 		 * If neither page has a subpage code, then we've got a
6148 		 * match.
6149 		 */
6150 		if (((lun->mode_pages.index[i].page_code & SMPH_SPF) == 0)
6151 		 && ((page_header->page_code & SMPH_SPF) == 0)) {
6152 			page_index = &lun->mode_pages.index[i];
6153 			page_len = page_header->page_length;
6154 			break;
6155 		}
6156 
6157 		/*
6158 		 * If both pages have subpages, then the subpage numbers
6159 		 * have to match.
6160 		 */
6161 		if ((lun->mode_pages.index[i].page_code & SMPH_SPF)
6162 		  && (page_header->page_code & SMPH_SPF)) {
6163 			struct scsi_mode_page_header_sp *sph;
6164 
6165 			sph = (struct scsi_mode_page_header_sp *)page_header;
6166 
6167 			if (lun->mode_pages.index[i].subpage ==
6168 			    sph->subpage) {
6169 				page_index = &lun->mode_pages.index[i];
6170 				page_len = scsi_2btoul(sph->page_length);
6171 				break;
6172 			}
6173 		}
6174 	}
6175 
6176 	/*
6177 	 * If we couldn't find the page, or if we don't have a mode select
6178 	 * handler for it, send back an error to the user.
6179 	 */
6180 	if ((page_index == NULL)
6181 	 || (page_index->select_handler == NULL)) {
6182 		ctl_set_invalid_field(ctsio,
6183 				      /*sks_valid*/ 1,
6184 				      /*command*/ 0,
6185 				      /*field*/ *len_used,
6186 				      /*bit_valid*/ 0,
6187 				      /*bit*/ 0);
6188 		free(ctsio->kern_data_ptr, M_CTL);
6189 		ctl_done((union ctl_io *)ctsio);
6190 		return (CTL_RETVAL_COMPLETE);
6191 	}
6192 
6193 	if (page_index->page_code & SMPH_SPF) {
6194 		page_len_offset = 2;
6195 		page_len_size = 2;
6196 	} else {
6197 		page_len_size = 1;
6198 		page_len_offset = 1;
6199 	}
6200 
6201 	/*
6202 	 * If the length the initiator gives us isn't the one we specify in
6203 	 * the mode page header, or if they didn't specify enough data in
6204 	 * the CDB to avoid truncating this page, kick out the request.
6205 	 */
6206 	if ((page_len != (page_index->page_len - page_len_offset -
6207 			  page_len_size))
6208 	 || (*len_left < page_index->page_len)) {
6209 
6210 
6211 		ctl_set_invalid_field(ctsio,
6212 				      /*sks_valid*/ 1,
6213 				      /*command*/ 0,
6214 				      /*field*/ *len_used + page_len_offset,
6215 				      /*bit_valid*/ 0,
6216 				      /*bit*/ 0);
6217 		free(ctsio->kern_data_ptr, M_CTL);
6218 		ctl_done((union ctl_io *)ctsio);
6219 		return (CTL_RETVAL_COMPLETE);
6220 	}
6221 
6222 	/*
6223 	 * Run through the mode page, checking to make sure that the bits
6224 	 * the user changed are actually legal for him to change.
6225 	 */
6226 	for (i = 0; i < page_index->page_len; i++) {
6227 		uint8_t *user_byte, *change_mask, *current_byte;
6228 		int bad_bit;
6229 		int j;
6230 
6231 		user_byte = (uint8_t *)page_header + i;
6232 		change_mask = page_index->page_data +
6233 			      (page_index->page_len * CTL_PAGE_CHANGEABLE) + i;
6234 		current_byte = page_index->page_data +
6235 			       (page_index->page_len * CTL_PAGE_CURRENT) + i;
6236 
6237 		/*
6238 		 * Check to see whether the user set any bits in this byte
6239 		 * that he is not allowed to set.
6240 		 */
6241 		if ((*user_byte & ~(*change_mask)) ==
6242 		    (*current_byte & ~(*change_mask)))
6243 			continue;
6244 
6245 		/*
6246 		 * Go through bit by bit to determine which one is illegal.
6247 		 */
6248 		bad_bit = 0;
6249 		for (j = 7; j >= 0; j--) {
6250 			if ((((1 << i) & ~(*change_mask)) & *user_byte) !=
6251 			    (((1 << i) & ~(*change_mask)) & *current_byte)) {
6252 				bad_bit = i;
6253 				break;
6254 			}
6255 		}
6256 		ctl_set_invalid_field(ctsio,
6257 				      /*sks_valid*/ 1,
6258 				      /*command*/ 0,
6259 				      /*field*/ *len_used + i,
6260 				      /*bit_valid*/ 1,
6261 				      /*bit*/ bad_bit);
6262 		free(ctsio->kern_data_ptr, M_CTL);
6263 		ctl_done((union ctl_io *)ctsio);
6264 		return (CTL_RETVAL_COMPLETE);
6265 	}
6266 
6267 	/*
6268 	 * Decrement these before we call the page handler, since we may
6269 	 * end up getting called back one way or another before the handler
6270 	 * returns to this context.
6271 	 */
6272 	*len_left -= page_index->page_len;
6273 	*len_used += page_index->page_len;
6274 
6275 	retval = page_index->select_handler(ctsio, page_index,
6276 					    (uint8_t *)page_header);
6277 
6278 	/*
6279 	 * If the page handler returns CTL_RETVAL_QUEUED, then we need to
6280 	 * wait until this queued command completes to finish processing
6281 	 * the mode page.  If it returns anything other than
6282 	 * CTL_RETVAL_COMPLETE (e.g. CTL_RETVAL_ERROR), then it should have
6283 	 * already set the sense information, freed the data pointer, and
6284 	 * completed the io for us.
6285 	 */
6286 	if (retval != CTL_RETVAL_COMPLETE)
6287 		goto bailout_no_done;
6288 
6289 	/*
6290 	 * If the initiator sent us more than one page, parse the next one.
6291 	 */
6292 	if (*len_left > 0)
6293 		goto do_next_page;
6294 
6295 	ctl_set_success(ctsio);
6296 	free(ctsio->kern_data_ptr, M_CTL);
6297 	ctl_done((union ctl_io *)ctsio);
6298 
6299 bailout_no_done:
6300 
6301 	return (CTL_RETVAL_COMPLETE);
6302 
6303 }
6304 
6305 int
6306 ctl_mode_select(struct ctl_scsiio *ctsio)
6307 {
6308 	int param_len, pf, sp;
6309 	int header_size, bd_len;
6310 	int len_left, len_used;
6311 	struct ctl_page_index *page_index;
6312 	struct ctl_lun *lun;
6313 	int control_dev, page_len;
6314 	union ctl_modepage_info *modepage_info;
6315 	int retval;
6316 
6317 	pf = 0;
6318 	sp = 0;
6319 	page_len = 0;
6320 	len_used = 0;
6321 	len_left = 0;
6322 	retval = 0;
6323 	bd_len = 0;
6324 	page_index = NULL;
6325 
6326 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6327 
6328 	if (lun->be_lun->lun_type != T_DIRECT)
6329 		control_dev = 1;
6330 	else
6331 		control_dev = 0;
6332 
6333 	switch (ctsio->cdb[0]) {
6334 	case MODE_SELECT_6: {
6335 		struct scsi_mode_select_6 *cdb;
6336 
6337 		cdb = (struct scsi_mode_select_6 *)ctsio->cdb;
6338 
6339 		pf = (cdb->byte2 & SMS_PF) ? 1 : 0;
6340 		sp = (cdb->byte2 & SMS_SP) ? 1 : 0;
6341 
6342 		param_len = cdb->length;
6343 		header_size = sizeof(struct scsi_mode_header_6);
6344 		break;
6345 	}
6346 	case MODE_SELECT_10: {
6347 		struct scsi_mode_select_10 *cdb;
6348 
6349 		cdb = (struct scsi_mode_select_10 *)ctsio->cdb;
6350 
6351 		pf = (cdb->byte2 & SMS_PF) ? 1 : 0;
6352 		sp = (cdb->byte2 & SMS_SP) ? 1 : 0;
6353 
6354 		param_len = scsi_2btoul(cdb->length);
6355 		header_size = sizeof(struct scsi_mode_header_10);
6356 		break;
6357 	}
6358 	default:
6359 		ctl_set_invalid_opcode(ctsio);
6360 		ctl_done((union ctl_io *)ctsio);
6361 		return (CTL_RETVAL_COMPLETE);
6362 		break; /* NOTREACHED */
6363 	}
6364 
6365 	/*
6366 	 * From SPC-3:
6367 	 * "A parameter list length of zero indicates that the Data-Out Buffer
6368 	 * shall be empty. This condition shall not be considered as an error."
6369 	 */
6370 	if (param_len == 0) {
6371 		ctl_set_success(ctsio);
6372 		ctl_done((union ctl_io *)ctsio);
6373 		return (CTL_RETVAL_COMPLETE);
6374 	}
6375 
6376 	/*
6377 	 * Since we'll hit this the first time through, prior to
6378 	 * allocation, we don't need to free a data buffer here.
6379 	 */
6380 	if (param_len < header_size) {
6381 		ctl_set_param_len_error(ctsio);
6382 		ctl_done((union ctl_io *)ctsio);
6383 		return (CTL_RETVAL_COMPLETE);
6384 	}
6385 
6386 	/*
6387 	 * Allocate the data buffer and grab the user's data.  In theory,
6388 	 * we shouldn't have to sanity check the parameter list length here
6389 	 * because the maximum size is 64K.  We should be able to malloc
6390 	 * that much without too many problems.
6391 	 */
6392 	if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
6393 		ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK);
6394 		ctsio->kern_data_len = param_len;
6395 		ctsio->kern_total_len = param_len;
6396 		ctsio->kern_data_resid = 0;
6397 		ctsio->kern_rel_offset = 0;
6398 		ctsio->kern_sg_entries = 0;
6399 		ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
6400 		ctsio->be_move_done = ctl_config_move_done;
6401 		ctl_datamove((union ctl_io *)ctsio);
6402 
6403 		return (CTL_RETVAL_COMPLETE);
6404 	}
6405 
6406 	switch (ctsio->cdb[0]) {
6407 	case MODE_SELECT_6: {
6408 		struct scsi_mode_header_6 *mh6;
6409 
6410 		mh6 = (struct scsi_mode_header_6 *)ctsio->kern_data_ptr;
6411 		bd_len = mh6->blk_desc_len;
6412 		break;
6413 	}
6414 	case MODE_SELECT_10: {
6415 		struct scsi_mode_header_10 *mh10;
6416 
6417 		mh10 = (struct scsi_mode_header_10 *)ctsio->kern_data_ptr;
6418 		bd_len = scsi_2btoul(mh10->blk_desc_len);
6419 		break;
6420 	}
6421 	default:
6422 		panic("Invalid CDB type %#x", ctsio->cdb[0]);
6423 		break;
6424 	}
6425 
6426 	if (param_len < (header_size + bd_len)) {
6427 		free(ctsio->kern_data_ptr, M_CTL);
6428 		ctl_set_param_len_error(ctsio);
6429 		ctl_done((union ctl_io *)ctsio);
6430 		return (CTL_RETVAL_COMPLETE);
6431 	}
6432 
6433 	/*
6434 	 * Set the IO_CONT flag, so that if this I/O gets passed to
6435 	 * ctl_config_write_done(), it'll get passed back to
6436 	 * ctl_do_mode_select() for further processing, or completion if
6437 	 * we're all done.
6438 	 */
6439 	ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT;
6440 	ctsio->io_cont = ctl_do_mode_select;
6441 
6442 	modepage_info = (union ctl_modepage_info *)
6443 		ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes;
6444 
6445 	memset(modepage_info, 0, sizeof(*modepage_info));
6446 
6447 	len_left = param_len - header_size - bd_len;
6448 	len_used = header_size + bd_len;
6449 
6450 	modepage_info->header.len_left = len_left;
6451 	modepage_info->header.len_used = len_used;
6452 
6453 	return (ctl_do_mode_select((union ctl_io *)ctsio));
6454 }
6455 
6456 int
6457 ctl_mode_sense(struct ctl_scsiio *ctsio)
6458 {
6459 	struct ctl_lun *lun;
6460 	int pc, page_code, dbd, llba, subpage;
6461 	int alloc_len, page_len, header_len, total_len;
6462 	struct scsi_mode_block_descr *block_desc;
6463 	struct ctl_page_index *page_index;
6464 	int control_dev;
6465 
6466 	dbd = 0;
6467 	llba = 0;
6468 	block_desc = NULL;
6469 	page_index = NULL;
6470 
6471 	CTL_DEBUG_PRINT(("ctl_mode_sense\n"));
6472 
6473 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6474 
6475 	if (lun->be_lun->lun_type != T_DIRECT)
6476 		control_dev = 1;
6477 	else
6478 		control_dev = 0;
6479 
6480 	switch (ctsio->cdb[0]) {
6481 	case MODE_SENSE_6: {
6482 		struct scsi_mode_sense_6 *cdb;
6483 
6484 		cdb = (struct scsi_mode_sense_6 *)ctsio->cdb;
6485 
6486 		header_len = sizeof(struct scsi_mode_hdr_6);
6487 		if (cdb->byte2 & SMS_DBD)
6488 			dbd = 1;
6489 		else
6490 			header_len += sizeof(struct scsi_mode_block_descr);
6491 
6492 		pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6;
6493 		page_code = cdb->page & SMS_PAGE_CODE;
6494 		subpage = cdb->subpage;
6495 		alloc_len = cdb->length;
6496 		break;
6497 	}
6498 	case MODE_SENSE_10: {
6499 		struct scsi_mode_sense_10 *cdb;
6500 
6501 		cdb = (struct scsi_mode_sense_10 *)ctsio->cdb;
6502 
6503 		header_len = sizeof(struct scsi_mode_hdr_10);
6504 
6505 		if (cdb->byte2 & SMS_DBD)
6506 			dbd = 1;
6507 		else
6508 			header_len += sizeof(struct scsi_mode_block_descr);
6509 		if (cdb->byte2 & SMS10_LLBAA)
6510 			llba = 1;
6511 		pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6;
6512 		page_code = cdb->page & SMS_PAGE_CODE;
6513 		subpage = cdb->subpage;
6514 		alloc_len = scsi_2btoul(cdb->length);
6515 		break;
6516 	}
6517 	default:
6518 		ctl_set_invalid_opcode(ctsio);
6519 		ctl_done((union ctl_io *)ctsio);
6520 		return (CTL_RETVAL_COMPLETE);
6521 		break; /* NOTREACHED */
6522 	}
6523 
6524 	/*
6525 	 * We have to make a first pass through to calculate the size of
6526 	 * the pages that match the user's query.  Then we allocate enough
6527 	 * memory to hold it, and actually copy the data into the buffer.
6528 	 */
6529 	switch (page_code) {
6530 	case SMS_ALL_PAGES_PAGE: {
6531 		int i;
6532 
6533 		page_len = 0;
6534 
6535 		/*
6536 		 * At the moment, values other than 0 and 0xff here are
6537 		 * reserved according to SPC-3.
6538 		 */
6539 		if ((subpage != SMS_SUBPAGE_PAGE_0)
6540 		 && (subpage != SMS_SUBPAGE_ALL)) {
6541 			ctl_set_invalid_field(ctsio,
6542 					      /*sks_valid*/ 1,
6543 					      /*command*/ 1,
6544 					      /*field*/ 3,
6545 					      /*bit_valid*/ 0,
6546 					      /*bit*/ 0);
6547 			ctl_done((union ctl_io *)ctsio);
6548 			return (CTL_RETVAL_COMPLETE);
6549 		}
6550 
6551 		for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
6552 			if ((control_dev != 0)
6553 			 && (lun->mode_pages.index[i].page_flags &
6554 			     CTL_PAGE_FLAG_DISK_ONLY))
6555 				continue;
6556 
6557 			/*
6558 			 * We don't use this subpage if the user didn't
6559 			 * request all subpages.
6560 			 */
6561 			if ((lun->mode_pages.index[i].subpage != 0)
6562 			 && (subpage == SMS_SUBPAGE_PAGE_0))
6563 				continue;
6564 
6565 #if 0
6566 			printf("found page %#x len %d\n",
6567 			       lun->mode_pages.index[i].page_code &
6568 			       SMPH_PC_MASK,
6569 			       lun->mode_pages.index[i].page_len);
6570 #endif
6571 			page_len += lun->mode_pages.index[i].page_len;
6572 		}
6573 		break;
6574 	}
6575 	default: {
6576 		int i;
6577 
6578 		page_len = 0;
6579 
6580 		for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
6581 			/* Look for the right page code */
6582 			if ((lun->mode_pages.index[i].page_code &
6583 			     SMPH_PC_MASK) != page_code)
6584 				continue;
6585 
6586 			/* Look for the right subpage or the subpage wildcard*/
6587 			if ((lun->mode_pages.index[i].subpage != subpage)
6588 			 && (subpage != SMS_SUBPAGE_ALL))
6589 				continue;
6590 
6591 			/* Make sure the page is supported for this dev type */
6592 			if ((control_dev != 0)
6593 			 && (lun->mode_pages.index[i].page_flags &
6594 			     CTL_PAGE_FLAG_DISK_ONLY))
6595 				continue;
6596 
6597 #if 0
6598 			printf("found page %#x len %d\n",
6599 			       lun->mode_pages.index[i].page_code &
6600 			       SMPH_PC_MASK,
6601 			       lun->mode_pages.index[i].page_len);
6602 #endif
6603 
6604 			page_len += lun->mode_pages.index[i].page_len;
6605 		}
6606 
6607 		if (page_len == 0) {
6608 			ctl_set_invalid_field(ctsio,
6609 					      /*sks_valid*/ 1,
6610 					      /*command*/ 1,
6611 					      /*field*/ 2,
6612 					      /*bit_valid*/ 1,
6613 					      /*bit*/ 5);
6614 			ctl_done((union ctl_io *)ctsio);
6615 			return (CTL_RETVAL_COMPLETE);
6616 		}
6617 		break;
6618 	}
6619 	}
6620 
6621 	total_len = header_len + page_len;
6622 #if 0
6623 	printf("header_len = %d, page_len = %d, total_len = %d\n",
6624 	       header_len, page_len, total_len);
6625 #endif
6626 
6627 	ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
6628 	ctsio->kern_sg_entries = 0;
6629 	ctsio->kern_data_resid = 0;
6630 	ctsio->kern_rel_offset = 0;
6631 	if (total_len < alloc_len) {
6632 		ctsio->residual = alloc_len - total_len;
6633 		ctsio->kern_data_len = total_len;
6634 		ctsio->kern_total_len = total_len;
6635 	} else {
6636 		ctsio->residual = 0;
6637 		ctsio->kern_data_len = alloc_len;
6638 		ctsio->kern_total_len = alloc_len;
6639 	}
6640 
6641 	switch (ctsio->cdb[0]) {
6642 	case MODE_SENSE_6: {
6643 		struct scsi_mode_hdr_6 *header;
6644 
6645 		header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr;
6646 
6647 		header->datalen = ctl_min(total_len - 1, 254);
6648 
6649 		if (dbd)
6650 			header->block_descr_len = 0;
6651 		else
6652 			header->block_descr_len =
6653 				sizeof(struct scsi_mode_block_descr);
6654 		block_desc = (struct scsi_mode_block_descr *)&header[1];
6655 		break;
6656 	}
6657 	case MODE_SENSE_10: {
6658 		struct scsi_mode_hdr_10 *header;
6659 		int datalen;
6660 
6661 		header = (struct scsi_mode_hdr_10 *)ctsio->kern_data_ptr;
6662 
6663 		datalen = ctl_min(total_len - 2, 65533);
6664 		scsi_ulto2b(datalen, header->datalen);
6665 		if (dbd)
6666 			scsi_ulto2b(0, header->block_descr_len);
6667 		else
6668 			scsi_ulto2b(sizeof(struct scsi_mode_block_descr),
6669 				    header->block_descr_len);
6670 		block_desc = (struct scsi_mode_block_descr *)&header[1];
6671 		break;
6672 	}
6673 	default:
6674 		panic("invalid CDB type %#x", ctsio->cdb[0]);
6675 		break; /* NOTREACHED */
6676 	}
6677 
6678 	/*
6679 	 * If we've got a disk, use its blocksize in the block
6680 	 * descriptor.  Otherwise, just set it to 0.
6681 	 */
6682 	if (dbd == 0) {
6683 		if (control_dev != 0)
6684 			scsi_ulto3b(lun->be_lun->blocksize,
6685 				    block_desc->block_len);
6686 		else
6687 			scsi_ulto3b(0, block_desc->block_len);
6688 	}
6689 
6690 	switch (page_code) {
6691 	case SMS_ALL_PAGES_PAGE: {
6692 		int i, data_used;
6693 
6694 		data_used = header_len;
6695 		for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
6696 			struct ctl_page_index *page_index;
6697 
6698 			page_index = &lun->mode_pages.index[i];
6699 
6700 			if ((control_dev != 0)
6701 			 && (page_index->page_flags &
6702 			    CTL_PAGE_FLAG_DISK_ONLY))
6703 				continue;
6704 
6705 			/*
6706 			 * We don't use this subpage if the user didn't
6707 			 * request all subpages.  We already checked (above)
6708 			 * to make sure the user only specified a subpage
6709 			 * of 0 or 0xff in the SMS_ALL_PAGES_PAGE case.
6710 			 */
6711 			if ((page_index->subpage != 0)
6712 			 && (subpage == SMS_SUBPAGE_PAGE_0))
6713 				continue;
6714 
6715 			/*
6716 			 * Call the handler, if it exists, to update the
6717 			 * page to the latest values.
6718 			 */
6719 			if (page_index->sense_handler != NULL)
6720 				page_index->sense_handler(ctsio, page_index,pc);
6721 
6722 			memcpy(ctsio->kern_data_ptr + data_used,
6723 			       page_index->page_data +
6724 			       (page_index->page_len * pc),
6725 			       page_index->page_len);
6726 			data_used += page_index->page_len;
6727 		}
6728 		break;
6729 	}
6730 	default: {
6731 		int i, data_used;
6732 
6733 		data_used = header_len;
6734 
6735 		for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
6736 			struct ctl_page_index *page_index;
6737 
6738 			page_index = &lun->mode_pages.index[i];
6739 
6740 			/* Look for the right page code */
6741 			if ((page_index->page_code & SMPH_PC_MASK) != page_code)
6742 				continue;
6743 
6744 			/* Look for the right subpage or the subpage wildcard*/
6745 			if ((page_index->subpage != subpage)
6746 			 && (subpage != SMS_SUBPAGE_ALL))
6747 				continue;
6748 
6749 			/* Make sure the page is supported for this dev type */
6750 			if ((control_dev != 0)
6751 			 && (page_index->page_flags &
6752 			     CTL_PAGE_FLAG_DISK_ONLY))
6753 				continue;
6754 
6755 			/*
6756 			 * Call the handler, if it exists, to update the
6757 			 * page to the latest values.
6758 			 */
6759 			if (page_index->sense_handler != NULL)
6760 				page_index->sense_handler(ctsio, page_index,pc);
6761 
6762 			memcpy(ctsio->kern_data_ptr + data_used,
6763 			       page_index->page_data +
6764 			       (page_index->page_len * pc),
6765 			       page_index->page_len);
6766 			data_used += page_index->page_len;
6767 		}
6768 		break;
6769 	}
6770 	}
6771 
6772 	ctsio->scsi_status = SCSI_STATUS_OK;
6773 
6774 	ctsio->be_move_done = ctl_config_move_done;
6775 	ctl_datamove((union ctl_io *)ctsio);
6776 
6777 	return (CTL_RETVAL_COMPLETE);
6778 }
6779 
6780 int
6781 ctl_read_capacity(struct ctl_scsiio *ctsio)
6782 {
6783 	struct scsi_read_capacity *cdb;
6784 	struct scsi_read_capacity_data *data;
6785 	struct ctl_lun *lun;
6786 	uint32_t lba;
6787 
6788 	CTL_DEBUG_PRINT(("ctl_read_capacity\n"));
6789 
6790 	cdb = (struct scsi_read_capacity *)ctsio->cdb;
6791 
6792 	lba = scsi_4btoul(cdb->addr);
6793 	if (((cdb->pmi & SRC_PMI) == 0)
6794 	 && (lba != 0)) {
6795 		ctl_set_invalid_field(/*ctsio*/ ctsio,
6796 				      /*sks_valid*/ 1,
6797 				      /*command*/ 1,
6798 				      /*field*/ 2,
6799 				      /*bit_valid*/ 0,
6800 				      /*bit*/ 0);
6801 		ctl_done((union ctl_io *)ctsio);
6802 		return (CTL_RETVAL_COMPLETE);
6803 	}
6804 
6805 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6806 
6807 	ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO);
6808 	data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr;
6809 	ctsio->residual = 0;
6810 	ctsio->kern_data_len = sizeof(*data);
6811 	ctsio->kern_total_len = sizeof(*data);
6812 	ctsio->kern_data_resid = 0;
6813 	ctsio->kern_rel_offset = 0;
6814 	ctsio->kern_sg_entries = 0;
6815 
6816 	/*
6817 	 * If the maximum LBA is greater than 0xfffffffe, the user must
6818 	 * issue a SERVICE ACTION IN (16) command, with the read capacity
6819 	 * serivce action set.
6820 	 */
6821 	if (lun->be_lun->maxlba > 0xfffffffe)
6822 		scsi_ulto4b(0xffffffff, data->addr);
6823 	else
6824 		scsi_ulto4b(lun->be_lun->maxlba, data->addr);
6825 
6826 	/*
6827 	 * XXX KDM this may not be 512 bytes...
6828 	 */
6829 	scsi_ulto4b(lun->be_lun->blocksize, data->length);
6830 
6831 	ctsio->scsi_status = SCSI_STATUS_OK;
6832 
6833 	ctsio->be_move_done = ctl_config_move_done;
6834 	ctl_datamove((union ctl_io *)ctsio);
6835 
6836 	return (CTL_RETVAL_COMPLETE);
6837 }
6838 
6839 static int
6840 ctl_read_capacity_16(struct ctl_scsiio *ctsio)
6841 {
6842 	struct scsi_read_capacity_16 *cdb;
6843 	struct scsi_read_capacity_data_long *data;
6844 	struct ctl_lun *lun;
6845 	uint64_t lba;
6846 	uint32_t alloc_len;
6847 
6848 	CTL_DEBUG_PRINT(("ctl_read_capacity_16\n"));
6849 
6850 	cdb = (struct scsi_read_capacity_16 *)ctsio->cdb;
6851 
6852 	alloc_len = scsi_4btoul(cdb->alloc_len);
6853 	lba = scsi_8btou64(cdb->addr);
6854 
6855 	if ((cdb->reladr & SRC16_PMI)
6856 	 && (lba != 0)) {
6857 		ctl_set_invalid_field(/*ctsio*/ ctsio,
6858 				      /*sks_valid*/ 1,
6859 				      /*command*/ 1,
6860 				      /*field*/ 2,
6861 				      /*bit_valid*/ 0,
6862 				      /*bit*/ 0);
6863 		ctl_done((union ctl_io *)ctsio);
6864 		return (CTL_RETVAL_COMPLETE);
6865 	}
6866 
6867 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6868 
6869 	ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO);
6870 	data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr;
6871 
6872 	if (sizeof(*data) < alloc_len) {
6873 		ctsio->residual = alloc_len - sizeof(*data);
6874 		ctsio->kern_data_len = sizeof(*data);
6875 		ctsio->kern_total_len = sizeof(*data);
6876 	} else {
6877 		ctsio->residual = 0;
6878 		ctsio->kern_data_len = alloc_len;
6879 		ctsio->kern_total_len = alloc_len;
6880 	}
6881 	ctsio->kern_data_resid = 0;
6882 	ctsio->kern_rel_offset = 0;
6883 	ctsio->kern_sg_entries = 0;
6884 
6885 	scsi_u64to8b(lun->be_lun->maxlba, data->addr);
6886 	/* XXX KDM this may not be 512 bytes... */
6887 	scsi_ulto4b(lun->be_lun->blocksize, data->length);
6888 
6889 	ctsio->scsi_status = SCSI_STATUS_OK;
6890 
6891 	ctsio->be_move_done = ctl_config_move_done;
6892 	ctl_datamove((union ctl_io *)ctsio);
6893 
6894 	return (CTL_RETVAL_COMPLETE);
6895 }
6896 
6897 int
6898 ctl_service_action_in(struct ctl_scsiio *ctsio)
6899 {
6900 	struct scsi_service_action_in *cdb;
6901 	int retval;
6902 
6903 	CTL_DEBUG_PRINT(("ctl_service_action_in\n"));
6904 
6905 	cdb = (struct scsi_service_action_in *)ctsio->cdb;
6906 
6907 	retval = CTL_RETVAL_COMPLETE;
6908 
6909 	switch (cdb->service_action) {
6910 	case SRC16_SERVICE_ACTION:
6911 		retval = ctl_read_capacity_16(ctsio);
6912 		break;
6913 	default:
6914 		ctl_set_invalid_field(/*ctsio*/ ctsio,
6915 				      /*sks_valid*/ 1,
6916 				      /*command*/ 1,
6917 				      /*field*/ 1,
6918 				      /*bit_valid*/ 1,
6919 				      /*bit*/ 4);
6920 		ctl_done((union ctl_io *)ctsio);
6921 		break;
6922 	}
6923 
6924 	return (retval);
6925 }
6926 
6927 int
6928 ctl_maintenance_in(struct ctl_scsiio *ctsio)
6929 {
6930 	struct scsi_maintenance_in *cdb;
6931 	int retval;
6932 	int alloc_len, total_len = 0;
6933 	int num_target_port_groups, single;
6934 	struct ctl_lun *lun;
6935 	struct ctl_softc *softc;
6936 	struct scsi_target_group_data *rtg_ptr;
6937 	struct scsi_target_port_group_descriptor *tpg_desc_ptr1, *tpg_desc_ptr2;
6938 	struct scsi_target_port_descriptor  *tp_desc_ptr1_1, *tp_desc_ptr1_2,
6939 	                                    *tp_desc_ptr2_1, *tp_desc_ptr2_2;
6940 
6941 	CTL_DEBUG_PRINT(("ctl_maintenance_in\n"));
6942 
6943 	cdb = (struct scsi_maintenance_in *)ctsio->cdb;
6944 	softc = control_softc;
6945 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6946 
6947 	retval = CTL_RETVAL_COMPLETE;
6948 
6949 	if ((cdb->byte2 & SERVICE_ACTION_MASK) != SA_RPRT_TRGT_GRP) {
6950 		ctl_set_invalid_field(/*ctsio*/ ctsio,
6951 				      /*sks_valid*/ 1,
6952 				      /*command*/ 1,
6953 				      /*field*/ 1,
6954 				      /*bit_valid*/ 1,
6955 				      /*bit*/ 4);
6956 		ctl_done((union ctl_io *)ctsio);
6957 		return(retval);
6958 	}
6959 
6960 	mtx_lock(&softc->ctl_lock);
6961 	single = ctl_is_single;
6962 	mtx_unlock(&softc->ctl_lock);
6963 
6964 	if (single)
6965         	num_target_port_groups = NUM_TARGET_PORT_GROUPS - 1;
6966 	else
6967         	num_target_port_groups = NUM_TARGET_PORT_GROUPS;
6968 
6969 	total_len = sizeof(struct scsi_target_group_data) +
6970 		sizeof(struct scsi_target_port_group_descriptor) *
6971 		num_target_port_groups +
6972 		sizeof(struct scsi_target_port_descriptor) *
6973 		NUM_PORTS_PER_GRP * num_target_port_groups;
6974 
6975 	alloc_len = scsi_4btoul(cdb->length);
6976 
6977 	ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
6978 
6979 	ctsio->kern_sg_entries = 0;
6980 
6981 	if (total_len < alloc_len) {
6982 		ctsio->residual = alloc_len - total_len;
6983 		ctsio->kern_data_len = total_len;
6984 		ctsio->kern_total_len = total_len;
6985 	} else {
6986 		ctsio->residual = 0;
6987 		ctsio->kern_data_len = alloc_len;
6988 		ctsio->kern_total_len = alloc_len;
6989 	}
6990 	ctsio->kern_data_resid = 0;
6991 	ctsio->kern_rel_offset = 0;
6992 
6993 	rtg_ptr = (struct scsi_target_group_data *)ctsio->kern_data_ptr;
6994 
6995 	tpg_desc_ptr1 = &rtg_ptr->groups[0];
6996 	tp_desc_ptr1_1 = &tpg_desc_ptr1->descriptors[0];
6997 	tp_desc_ptr1_2 = (struct scsi_target_port_descriptor *)
6998 	        &tp_desc_ptr1_1->desc_list[0];
6999 
7000 	if (single == 0) {
7001 		tpg_desc_ptr2 = (struct scsi_target_port_group_descriptor *)
7002 	                &tp_desc_ptr1_2->desc_list[0];
7003 		tp_desc_ptr2_1 = &tpg_desc_ptr2->descriptors[0];
7004 		tp_desc_ptr2_2 = (struct scsi_target_port_descriptor *)
7005 	        	&tp_desc_ptr2_1->desc_list[0];
7006         } else {
7007 		tpg_desc_ptr2 = NULL;
7008 		tp_desc_ptr2_1 = NULL;
7009 		tp_desc_ptr2_2 = NULL;
7010 	}
7011 
7012 	scsi_ulto4b(total_len - 4, rtg_ptr->length);
7013 	if (single == 0) {
7014         	if (ctsio->io_hdr.nexus.targ_port < CTL_MAX_PORTS) {
7015 			if (lun->flags & CTL_LUN_PRIMARY_SC) {
7016 				tpg_desc_ptr1->pref_state = TPG_PRIMARY;
7017 				tpg_desc_ptr2->pref_state =
7018 					TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED;
7019 			} else {
7020 				tpg_desc_ptr1->pref_state =
7021 					TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED;
7022 				tpg_desc_ptr2->pref_state = TPG_PRIMARY;
7023 			}
7024 		} else {
7025 			if (lun->flags & CTL_LUN_PRIMARY_SC) {
7026 				tpg_desc_ptr1->pref_state =
7027 					TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED;
7028 				tpg_desc_ptr2->pref_state = TPG_PRIMARY;
7029 			} else {
7030 				tpg_desc_ptr1->pref_state = TPG_PRIMARY;
7031 				tpg_desc_ptr2->pref_state =
7032 					TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED;
7033 			}
7034 		}
7035 	} else {
7036 		tpg_desc_ptr1->pref_state = TPG_PRIMARY;
7037 	}
7038 	tpg_desc_ptr1->support = 0;
7039 	tpg_desc_ptr1->target_port_group[1] = 1;
7040 	tpg_desc_ptr1->status = TPG_IMPLICIT;
7041 	tpg_desc_ptr1->target_port_count= NUM_PORTS_PER_GRP;
7042 
7043 	if (single == 0) {
7044 		tpg_desc_ptr2->support = 0;
7045 		tpg_desc_ptr2->target_port_group[1] = 2;
7046 		tpg_desc_ptr2->status = TPG_IMPLICIT;
7047 		tpg_desc_ptr2->target_port_count = NUM_PORTS_PER_GRP;
7048 
7049 		tp_desc_ptr1_1->relative_target_port_identifier[1] = 1;
7050 		tp_desc_ptr1_2->relative_target_port_identifier[1] = 2;
7051 
7052 		tp_desc_ptr2_1->relative_target_port_identifier[1] = 9;
7053 		tp_desc_ptr2_2->relative_target_port_identifier[1] = 10;
7054 	} else {
7055         	if (ctsio->io_hdr.nexus.targ_port < CTL_MAX_PORTS) {
7056 			tp_desc_ptr1_1->relative_target_port_identifier[1] = 1;
7057 			tp_desc_ptr1_2->relative_target_port_identifier[1] = 2;
7058 		} else {
7059 			tp_desc_ptr1_1->relative_target_port_identifier[1] = 9;
7060 			tp_desc_ptr1_2->relative_target_port_identifier[1] = 10;
7061 		}
7062 	}
7063 
7064 	ctsio->be_move_done = ctl_config_move_done;
7065 
7066 	CTL_DEBUG_PRINT(("buf = %x %x %x %x %x %x %x %x\n",
7067 			 ctsio->kern_data_ptr[0], ctsio->kern_data_ptr[1],
7068 			 ctsio->kern_data_ptr[2], ctsio->kern_data_ptr[3],
7069 			 ctsio->kern_data_ptr[4], ctsio->kern_data_ptr[5],
7070 			 ctsio->kern_data_ptr[6], ctsio->kern_data_ptr[7]));
7071 
7072 	ctl_datamove((union ctl_io *)ctsio);
7073 	return(retval);
7074 }
7075 
7076 int
7077 ctl_persistent_reserve_in(struct ctl_scsiio *ctsio)
7078 {
7079 	struct scsi_per_res_in *cdb;
7080 	int alloc_len, total_len = 0;
7081 	/* struct scsi_per_res_in_rsrv in_data; */
7082 	struct ctl_lun *lun;
7083 	struct ctl_softc *softc;
7084 
7085 	CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n"));
7086 
7087 	softc = control_softc;
7088 
7089 	cdb = (struct scsi_per_res_in *)ctsio->cdb;
7090 
7091 	alloc_len = scsi_2btoul(cdb->length);
7092 
7093 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
7094 
7095 retry:
7096 	mtx_lock(&softc->ctl_lock);
7097 	switch (cdb->action) {
7098 	case SPRI_RK: /* read keys */
7099 		total_len = sizeof(struct scsi_per_res_in_keys) +
7100 			lun->pr_key_count *
7101 			sizeof(struct scsi_per_res_key);
7102 		break;
7103 	case SPRI_RR: /* read reservation */
7104 		if (lun->flags & CTL_LUN_PR_RESERVED)
7105 			total_len = sizeof(struct scsi_per_res_in_rsrv);
7106 		else
7107 			total_len = sizeof(struct scsi_per_res_in_header);
7108 		break;
7109 	case SPRI_RC: /* report capabilities */
7110 		total_len = sizeof(struct scsi_per_res_cap);
7111 		break;
7112 	case SPRI_RS: /* read full status */
7113 	default:
7114 		mtx_unlock(&softc->ctl_lock);
7115 		ctl_set_invalid_field(ctsio,
7116 				      /*sks_valid*/ 1,
7117 				      /*command*/ 1,
7118 				      /*field*/ 1,
7119 				      /*bit_valid*/ 1,
7120 				      /*bit*/ 0);
7121 		ctl_done((union ctl_io *)ctsio);
7122 		return (CTL_RETVAL_COMPLETE);
7123 		break; /* NOTREACHED */
7124 	}
7125 	mtx_unlock(&softc->ctl_lock);
7126 
7127 	ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
7128 
7129 	if (total_len < alloc_len) {
7130 		ctsio->residual = alloc_len - total_len;
7131 		ctsio->kern_data_len = total_len;
7132 		ctsio->kern_total_len = total_len;
7133 	} else {
7134 		ctsio->residual = 0;
7135 		ctsio->kern_data_len = alloc_len;
7136 		ctsio->kern_total_len = alloc_len;
7137 	}
7138 
7139 	ctsio->kern_data_resid = 0;
7140 	ctsio->kern_rel_offset = 0;
7141 	ctsio->kern_sg_entries = 0;
7142 
7143 	mtx_lock(&softc->ctl_lock);
7144 	switch (cdb->action) {
7145 	case SPRI_RK: { // read keys
7146         struct scsi_per_res_in_keys *res_keys;
7147 		int i, key_count;
7148 
7149 		res_keys = (struct scsi_per_res_in_keys*)ctsio->kern_data_ptr;
7150 
7151 		/*
7152 		 * We had to drop the lock to allocate our buffer, which
7153 		 * leaves time for someone to come in with another
7154 		 * persistent reservation.  (That is unlikely, though,
7155 		 * since this should be the only persistent reservation
7156 		 * command active right now.)
7157 		 */
7158 		if (total_len != (sizeof(struct scsi_per_res_in_keys) +
7159 		    (lun->pr_key_count *
7160 		     sizeof(struct scsi_per_res_key)))){
7161 			mtx_unlock(&softc->ctl_lock);
7162 			free(ctsio->kern_data_ptr, M_CTL);
7163 			printf("%s: reservation length changed, retrying\n",
7164 			       __func__);
7165 			goto retry;
7166 		}
7167 
7168 		scsi_ulto4b(lun->PRGeneration, res_keys->header.generation);
7169 
7170 		scsi_ulto4b(sizeof(struct scsi_per_res_key) *
7171 			     lun->pr_key_count, res_keys->header.length);
7172 
7173 		for (i = 0, key_count = 0; i < 2*CTL_MAX_INITIATORS; i++) {
7174 			if (!lun->per_res[i].registered)
7175 				continue;
7176 
7177 			/*
7178 			 * We used lun->pr_key_count to calculate the
7179 			 * size to allocate.  If it turns out the number of
7180 			 * initiators with the registered flag set is
7181 			 * larger than that (i.e. they haven't been kept in
7182 			 * sync), we've got a problem.
7183 			 */
7184 			if (key_count >= lun->pr_key_count) {
7185 #ifdef NEEDTOPORT
7186 				csevent_log(CSC_CTL | CSC_SHELF_SW |
7187 					    CTL_PR_ERROR,
7188 					    csevent_LogType_Fault,
7189 					    csevent_AlertLevel_Yellow,
7190 					    csevent_FRU_ShelfController,
7191 					    csevent_FRU_Firmware,
7192 				        csevent_FRU_Unknown,
7193 					    "registered keys %d >= key "
7194 					    "count %d", key_count,
7195 					    lun->pr_key_count);
7196 #endif
7197 				key_count++;
7198 				continue;
7199 			}
7200 			memcpy(res_keys->keys[key_count].key,
7201 			       lun->per_res[i].res_key.key,
7202 			       ctl_min(sizeof(res_keys->keys[key_count].key),
7203 			       sizeof(lun->per_res[i].res_key)));
7204 			key_count++;
7205 		}
7206 		break;
7207 	}
7208 	case SPRI_RR: { // read reservation
7209 		struct scsi_per_res_in_rsrv *res;
7210 		int tmp_len, header_only;
7211 
7212 		res = (struct scsi_per_res_in_rsrv *)ctsio->kern_data_ptr;
7213 
7214 		scsi_ulto4b(lun->PRGeneration, res->header.generation);
7215 
7216 		if (lun->flags & CTL_LUN_PR_RESERVED)
7217 		{
7218 			tmp_len = sizeof(struct scsi_per_res_in_rsrv);
7219 			scsi_ulto4b(sizeof(struct scsi_per_res_in_rsrv_data),
7220 				    res->header.length);
7221 			header_only = 0;
7222 		} else {
7223 			tmp_len = sizeof(struct scsi_per_res_in_header);
7224 			scsi_ulto4b(0, res->header.length);
7225 			header_only = 1;
7226 		}
7227 
7228 		/*
7229 		 * We had to drop the lock to allocate our buffer, which
7230 		 * leaves time for someone to come in with another
7231 		 * persistent reservation.  (That is unlikely, though,
7232 		 * since this should be the only persistent reservation
7233 		 * command active right now.)
7234 		 */
7235 		if (tmp_len != total_len) {
7236 			mtx_unlock(&softc->ctl_lock);
7237 			free(ctsio->kern_data_ptr, M_CTL);
7238 			printf("%s: reservation status changed, retrying\n",
7239 			       __func__);
7240 			goto retry;
7241 		}
7242 
7243 		/*
7244 		 * No reservation held, so we're done.
7245 		 */
7246 		if (header_only != 0)
7247 			break;
7248 
7249 		/*
7250 		 * If the registration is an All Registrants type, the key
7251 		 * is 0, since it doesn't really matter.
7252 		 */
7253 		if (lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) {
7254 			memcpy(res->data.reservation,
7255 			       &lun->per_res[lun->pr_res_idx].res_key,
7256 			       sizeof(struct scsi_per_res_key));
7257 		}
7258 		res->data.scopetype = lun->res_type;
7259 		break;
7260 	}
7261 	case SPRI_RC:     //report capabilities
7262 	{
7263 		struct scsi_per_res_cap *res_cap;
7264 		uint16_t type_mask;
7265 
7266 		res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr;
7267 		scsi_ulto2b(sizeof(*res_cap), res_cap->length);
7268 		res_cap->flags2 |= SPRI_TMV;
7269 		type_mask = SPRI_TM_WR_EX_AR |
7270 			    SPRI_TM_EX_AC_RO |
7271 			    SPRI_TM_WR_EX_RO |
7272 			    SPRI_TM_EX_AC |
7273 			    SPRI_TM_WR_EX |
7274 			    SPRI_TM_EX_AC_AR;
7275 		scsi_ulto2b(type_mask, res_cap->type_mask);
7276 		break;
7277 	}
7278 	case SPRI_RS: //read full status
7279 	default:
7280 		/*
7281 		 * This is a bug, because we just checked for this above,
7282 		 * and should have returned an error.
7283 		 */
7284 		panic("Invalid PR type %x", cdb->action);
7285 		break; /* NOTREACHED */
7286 	}
7287 	mtx_unlock(&softc->ctl_lock);
7288 
7289 	ctsio->be_move_done = ctl_config_move_done;
7290 
7291 	CTL_DEBUG_PRINT(("buf = %x %x %x %x %x %x %x %x\n",
7292 			 ctsio->kern_data_ptr[0], ctsio->kern_data_ptr[1],
7293 			 ctsio->kern_data_ptr[2], ctsio->kern_data_ptr[3],
7294 			 ctsio->kern_data_ptr[4], ctsio->kern_data_ptr[5],
7295 			 ctsio->kern_data_ptr[6], ctsio->kern_data_ptr[7]));
7296 
7297 	ctl_datamove((union ctl_io *)ctsio);
7298 
7299 	return (CTL_RETVAL_COMPLETE);
7300 }
7301 
7302 /*
7303  * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if
7304  * it should return.
7305  */
7306 static int
7307 ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
7308 		uint64_t sa_res_key, uint8_t type, uint32_t residx,
7309 		struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb,
7310 		struct scsi_per_res_out_parms* param)
7311 {
7312 	union ctl_ha_msg persis_io;
7313 	int retval, i;
7314 	int isc_retval;
7315 
7316 	retval = 0;
7317 
7318 	if (sa_res_key == 0) {
7319 		mtx_lock(&softc->ctl_lock);
7320 		if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) {
7321 			/* validate scope and type */
7322 			if ((cdb->scope_type & SPR_SCOPE_MASK) !=
7323 			     SPR_LU_SCOPE) {
7324 				mtx_unlock(&softc->ctl_lock);
7325 				ctl_set_invalid_field(/*ctsio*/ ctsio,
7326 						      /*sks_valid*/ 1,
7327 						      /*command*/ 1,
7328 						      /*field*/ 2,
7329 						      /*bit_valid*/ 1,
7330 						      /*bit*/ 4);
7331 				ctl_done((union ctl_io *)ctsio);
7332 				return (1);
7333 			}
7334 
7335 		        if (type>8 || type==2 || type==4 || type==0) {
7336 				mtx_unlock(&softc->ctl_lock);
7337 				ctl_set_invalid_field(/*ctsio*/ ctsio,
7338        	           				      /*sks_valid*/ 1,
7339 						      /*command*/ 1,
7340 						      /*field*/ 2,
7341 						      /*bit_valid*/ 1,
7342 						      /*bit*/ 0);
7343 				ctl_done((union ctl_io *)ctsio);
7344 				return (1);
7345 		        }
7346 
7347 			/* temporarily unregister this nexus */
7348 			lun->per_res[residx].registered = 0;
7349 
7350 			/*
7351 			 * Unregister everybody else and build UA for
7352 			 * them
7353 			 */
7354 			for(i=0; i < 2*CTL_MAX_INITIATORS; i++) {
7355 				if (lun->per_res[i].registered == 0)
7356 					continue;
7357 
7358 				if (!persis_offset
7359 				 && i <CTL_MAX_INITIATORS)
7360 					lun->pending_sense[i].ua_pending |=
7361 						CTL_UA_REG_PREEMPT;
7362 				else if (persis_offset
7363 				      && i >= persis_offset)
7364 					lun->pending_sense[i-persis_offset
7365 						].ua_pending |=
7366 						CTL_UA_REG_PREEMPT;
7367 				lun->per_res[i].registered = 0;
7368 				memset(&lun->per_res[i].res_key, 0,
7369 				       sizeof(struct scsi_per_res_key));
7370 			}
7371 			lun->per_res[residx].registered = 1;
7372 			lun->pr_key_count = 1;
7373 			lun->res_type = type;
7374 			if (lun->res_type != SPR_TYPE_WR_EX_AR
7375 			 && lun->res_type != SPR_TYPE_EX_AC_AR)
7376 				lun->pr_res_idx = residx;
7377 
7378 			mtx_unlock(&softc->ctl_lock);
7379 			/* send msg to other side */
7380 			persis_io.hdr.nexus = ctsio->io_hdr.nexus;
7381 			persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
7382 			persis_io.pr.pr_info.action = CTL_PR_PREEMPT;
7383 			persis_io.pr.pr_info.residx = lun->pr_res_idx;
7384 			persis_io.pr.pr_info.res_type = type;
7385 			memcpy(persis_io.pr.pr_info.sa_res_key,
7386 			       param->serv_act_res_key,
7387 			       sizeof(param->serv_act_res_key));
7388 			if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
7389 			     &persis_io, sizeof(persis_io), 0)) >
7390 			     CTL_HA_STATUS_SUCCESS) {
7391 				printf("CTL:Persis Out error returned "
7392 				       "from ctl_ha_msg_send %d\n",
7393 				       isc_retval);
7394 			}
7395 		} else {
7396 			/* not all registrants */
7397 			mtx_unlock(&softc->ctl_lock);
7398 			free(ctsio->kern_data_ptr, M_CTL);
7399 			ctl_set_invalid_field(ctsio,
7400 					      /*sks_valid*/ 1,
7401 					      /*command*/ 0,
7402 					      /*field*/ 8,
7403 					      /*bit_valid*/ 0,
7404 					      /*bit*/ 0);
7405 			ctl_done((union ctl_io *)ctsio);
7406 			return (1);
7407 		}
7408 	} else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS
7409 		|| !(lun->flags & CTL_LUN_PR_RESERVED)) {
7410 		int found = 0;
7411 
7412 		mtx_lock(&softc->ctl_lock);
7413 		if (res_key == sa_res_key) {
7414 			/* special case */
7415 			/*
7416 			 * The spec implies this is not good but doesn't
7417 			 * say what to do. There are two choices either
7418 			 * generate a res conflict or check condition
7419 			 * with illegal field in parameter data. Since
7420 			 * that is what is done when the sa_res_key is
7421 			 * zero I'll take that approach since this has
7422 			 * to do with the sa_res_key.
7423 			 */
7424 			mtx_unlock(&softc->ctl_lock);
7425 			free(ctsio->kern_data_ptr, M_CTL);
7426 			ctl_set_invalid_field(ctsio,
7427 					      /*sks_valid*/ 1,
7428 					      /*command*/ 0,
7429 					      /*field*/ 8,
7430 					      /*bit_valid*/ 0,
7431 					      /*bit*/ 0);
7432 			ctl_done((union ctl_io *)ctsio);
7433 			return (1);
7434 		}
7435 
7436 		for (i=0; i < 2*CTL_MAX_INITIATORS; i++) {
7437 			if (lun->per_res[i].registered
7438 			 && memcmp(param->serv_act_res_key,
7439 			    lun->per_res[i].res_key.key,
7440 			    sizeof(struct scsi_per_res_key)) != 0)
7441 				continue;
7442 
7443 			found = 1;
7444 			lun->per_res[i].registered = 0;
7445 			memset(&lun->per_res[i].res_key, 0,
7446 			       sizeof(struct scsi_per_res_key));
7447 			lun->pr_key_count--;
7448 
7449 			if (!persis_offset
7450 			 && i < CTL_MAX_INITIATORS)
7451 				lun->pending_sense[i].ua_pending |=
7452 					CTL_UA_REG_PREEMPT;
7453 			else if (persis_offset
7454 			      && i >= persis_offset)
7455 				lun->pending_sense[i-persis_offset].ua_pending|=
7456 					CTL_UA_REG_PREEMPT;
7457 		}
7458 		mtx_unlock(&softc->ctl_lock);
7459 		if (!found) {
7460 			free(ctsio->kern_data_ptr, M_CTL);
7461 			ctl_set_reservation_conflict(ctsio);
7462 			ctl_done((union ctl_io *)ctsio);
7463 			return (CTL_RETVAL_COMPLETE);
7464 		}
7465 		/* send msg to other side */
7466 		persis_io.hdr.nexus = ctsio->io_hdr.nexus;
7467 		persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
7468 		persis_io.pr.pr_info.action = CTL_PR_PREEMPT;
7469 		persis_io.pr.pr_info.residx = lun->pr_res_idx;
7470 		persis_io.pr.pr_info.res_type = type;
7471 		memcpy(persis_io.pr.pr_info.sa_res_key,
7472 		       param->serv_act_res_key,
7473 		       sizeof(param->serv_act_res_key));
7474 		if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
7475 		     &persis_io, sizeof(persis_io), 0)) >
7476 		     CTL_HA_STATUS_SUCCESS) {
7477 			printf("CTL:Persis Out error returned from "
7478 			       "ctl_ha_msg_send %d\n", isc_retval);
7479 		}
7480 	} else {
7481 		/* Reserved but not all registrants */
7482 		/* sa_res_key is res holder */
7483 		if (memcmp(param->serv_act_res_key,
7484                    lun->per_res[lun->pr_res_idx].res_key.key,
7485                    sizeof(struct scsi_per_res_key)) == 0) {
7486 			/* validate scope and type */
7487 			if ((cdb->scope_type & SPR_SCOPE_MASK) !=
7488 			     SPR_LU_SCOPE) {
7489 				ctl_set_invalid_field(/*ctsio*/ ctsio,
7490 						      /*sks_valid*/ 1,
7491 						      /*command*/ 1,
7492 						      /*field*/ 2,
7493 						      /*bit_valid*/ 1,
7494 						      /*bit*/ 4);
7495 				ctl_done((union ctl_io *)ctsio);
7496 				return (1);
7497 			}
7498 
7499 			if (type>8 || type==2 || type==4 || type==0) {
7500 				ctl_set_invalid_field(/*ctsio*/ ctsio,
7501 						      /*sks_valid*/ 1,
7502 						      /*command*/ 1,
7503 						      /*field*/ 2,
7504 						      /*bit_valid*/ 1,
7505 						      /*bit*/ 0);
7506 				ctl_done((union ctl_io *)ctsio);
7507 				return (1);
7508 			}
7509 
7510 			/*
7511 			 * Do the following:
7512 			 * if sa_res_key != res_key remove all
7513 			 * registrants w/sa_res_key and generate UA
7514 			 * for these registrants(Registrations
7515 			 * Preempted) if it wasn't an exclusive
7516 			 * reservation generate UA(Reservations
7517 			 * Preempted) for all other registered nexuses
7518 			 * if the type has changed. Establish the new
7519 			 * reservation and holder. If res_key and
7520 			 * sa_res_key are the same do the above
7521 			 * except don't unregister the res holder.
7522 			 */
7523 
7524 			/*
7525 			 * Temporarily unregister so it won't get
7526 			 * removed or UA generated
7527 			 */
7528 			lun->per_res[residx].registered = 0;
7529 			for(i=0; i < 2*CTL_MAX_INITIATORS; i++) {
7530 				if (lun->per_res[i].registered == 0)
7531 					continue;
7532 
7533 				if (memcmp(param->serv_act_res_key,
7534 				    lun->per_res[i].res_key.key,
7535 				    sizeof(struct scsi_per_res_key)) == 0) {
7536 					lun->per_res[i].registered = 0;
7537 					memset(&lun->per_res[i].res_key,
7538 					       0,
7539 					       sizeof(struct scsi_per_res_key));
7540 					lun->pr_key_count--;
7541 
7542 					if (!persis_offset
7543 					 && i < CTL_MAX_INITIATORS)
7544 						lun->pending_sense[i
7545 							].ua_pending |=
7546 							CTL_UA_REG_PREEMPT;
7547 					else if (persis_offset
7548 					      && i >= persis_offset)
7549 						lun->pending_sense[
7550 						  i-persis_offset].ua_pending |=
7551 						  CTL_UA_REG_PREEMPT;
7552 				} else if (type != lun->res_type
7553 					&& (lun->res_type == SPR_TYPE_WR_EX_RO
7554 					 || lun->res_type ==SPR_TYPE_EX_AC_RO)){
7555 						if (!persis_offset
7556 						 && i < CTL_MAX_INITIATORS)
7557 							lun->pending_sense[i
7558 							].ua_pending |=
7559 							CTL_UA_RES_RELEASE;
7560 						else if (persis_offset
7561 						      && i >= persis_offset)
7562 							lun->pending_sense[
7563 							i-persis_offset
7564 							].ua_pending |=
7565 							CTL_UA_RES_RELEASE;
7566 				}
7567 			}
7568 			lun->per_res[residx].registered = 1;
7569 			lun->res_type = type;
7570 			if (lun->res_type != SPR_TYPE_WR_EX_AR
7571 			 && lun->res_type != SPR_TYPE_EX_AC_AR)
7572 				lun->pr_res_idx = residx;
7573 			else
7574 				lun->pr_res_idx =
7575 					CTL_PR_ALL_REGISTRANTS;
7576 
7577 			persis_io.hdr.nexus = ctsio->io_hdr.nexus;
7578 			persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
7579 			persis_io.pr.pr_info.action = CTL_PR_PREEMPT;
7580 			persis_io.pr.pr_info.residx = lun->pr_res_idx;
7581 			persis_io.pr.pr_info.res_type = type;
7582 			memcpy(persis_io.pr.pr_info.sa_res_key,
7583 			       param->serv_act_res_key,
7584 			       sizeof(param->serv_act_res_key));
7585 			if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
7586 			     &persis_io, sizeof(persis_io), 0)) >
7587 			     CTL_HA_STATUS_SUCCESS) {
7588 				printf("CTL:Persis Out error returned "
7589 				       "from ctl_ha_msg_send %d\n",
7590 				       isc_retval);
7591 			}
7592 		} else {
7593 			/*
7594 			 * sa_res_key is not the res holder just
7595 			 * remove registrants
7596 			 */
7597 			int found=0;
7598 			mtx_lock(&softc->ctl_lock);
7599 
7600 			for (i=0; i < 2*CTL_MAX_INITIATORS; i++) {
7601 				if (memcmp(param->serv_act_res_key,
7602 				    lun->per_res[i].res_key.key,
7603 				    sizeof(struct scsi_per_res_key)) != 0)
7604 					continue;
7605 
7606 				found = 1;
7607 				lun->per_res[i].registered = 0;
7608 				memset(&lun->per_res[i].res_key, 0,
7609 				       sizeof(struct scsi_per_res_key));
7610 				lun->pr_key_count--;
7611 
7612 				if (!persis_offset
7613 				 && i < CTL_MAX_INITIATORS)
7614 					lun->pending_sense[i].ua_pending |=
7615 						CTL_UA_REG_PREEMPT;
7616 				else if (persis_offset
7617 				      && i >= persis_offset)
7618 					lun->pending_sense[
7619 						i-persis_offset].ua_pending |=
7620 						CTL_UA_REG_PREEMPT;
7621 			}
7622 
7623 			if (!found) {
7624 				mtx_unlock(&softc->ctl_lock);
7625 				free(ctsio->kern_data_ptr, M_CTL);
7626 				ctl_set_reservation_conflict(ctsio);
7627 				ctl_done((union ctl_io *)ctsio);
7628 		        	return (1);
7629 			}
7630 			mtx_unlock(&softc->ctl_lock);
7631 			persis_io.hdr.nexus = ctsio->io_hdr.nexus;
7632 			persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
7633 			persis_io.pr.pr_info.action = CTL_PR_PREEMPT;
7634 			persis_io.pr.pr_info.residx = lun->pr_res_idx;
7635 			persis_io.pr.pr_info.res_type = type;
7636 			memcpy(persis_io.pr.pr_info.sa_res_key,
7637 			       param->serv_act_res_key,
7638 			       sizeof(param->serv_act_res_key));
7639 			if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
7640 			     &persis_io, sizeof(persis_io), 0)) >
7641 			     CTL_HA_STATUS_SUCCESS) {
7642 				printf("CTL:Persis Out error returned "
7643 				       "from ctl_ha_msg_send %d\n",
7644 				isc_retval);
7645 			}
7646 		}
7647 	}
7648 
7649 	lun->PRGeneration++;
7650 
7651 	return (retval);
7652 }
7653 
7654 static void
7655 ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg)
7656 {
7657 	int i;
7658 
7659 	if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS
7660 	 || lun->pr_res_idx == CTL_PR_NO_RESERVATION
7661 	 || memcmp(&lun->per_res[lun->pr_res_idx].res_key,
7662 		   msg->pr.pr_info.sa_res_key,
7663 		   sizeof(struct scsi_per_res_key)) != 0) {
7664 		uint64_t sa_res_key;
7665 		sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key);
7666 
7667 		if (sa_res_key == 0) {
7668 			/* temporarily unregister this nexus */
7669 			lun->per_res[msg->pr.pr_info.residx].registered = 0;
7670 
7671 			/*
7672 			 * Unregister everybody else and build UA for
7673 			 * them
7674 			 */
7675 			for(i=0; i < 2*CTL_MAX_INITIATORS; i++) {
7676 				if (lun->per_res[i].registered == 0)
7677 					continue;
7678 
7679 				if (!persis_offset
7680 				 && i < CTL_MAX_INITIATORS)
7681 					lun->pending_sense[i].ua_pending |=
7682 						CTL_UA_REG_PREEMPT;
7683 				else if (persis_offset && i >= persis_offset)
7684 					lun->pending_sense[i -
7685 						persis_offset].ua_pending |=
7686 						CTL_UA_REG_PREEMPT;
7687 				lun->per_res[i].registered = 0;
7688 				memset(&lun->per_res[i].res_key, 0,
7689 				       sizeof(struct scsi_per_res_key));
7690 			}
7691 
7692 			lun->per_res[msg->pr.pr_info.residx].registered = 1;
7693 			lun->pr_key_count = 1;
7694 			lun->res_type = msg->pr.pr_info.res_type;
7695 			if (lun->res_type != SPR_TYPE_WR_EX_AR
7696 			 && lun->res_type != SPR_TYPE_EX_AC_AR)
7697 				lun->pr_res_idx = msg->pr.pr_info.residx;
7698 		} else {
7699 		        for (i=0; i < 2*CTL_MAX_INITIATORS; i++) {
7700 				if (memcmp(msg->pr.pr_info.sa_res_key,
7701 		                   lun->per_res[i].res_key.key,
7702 		                   sizeof(struct scsi_per_res_key)) != 0)
7703 					continue;
7704 
7705 				lun->per_res[i].registered = 0;
7706 				memset(&lun->per_res[i].res_key, 0,
7707 				       sizeof(struct scsi_per_res_key));
7708 				lun->pr_key_count--;
7709 
7710 				if (!persis_offset
7711 				 && i < persis_offset)
7712 					lun->pending_sense[i].ua_pending |=
7713 						CTL_UA_REG_PREEMPT;
7714 				else if (persis_offset
7715 				      && i >= persis_offset)
7716 					lun->pending_sense[i -
7717 						persis_offset].ua_pending |=
7718 						CTL_UA_REG_PREEMPT;
7719 			}
7720 		}
7721 	} else {
7722 		/*
7723 		 * Temporarily unregister so it won't get removed
7724 		 * or UA generated
7725 		 */
7726 		lun->per_res[msg->pr.pr_info.residx].registered = 0;
7727 		for (i=0; i < 2*CTL_MAX_INITIATORS; i++) {
7728 			if (lun->per_res[i].registered == 0)
7729 				continue;
7730 
7731 			if (memcmp(msg->pr.pr_info.sa_res_key,
7732 	                   lun->per_res[i].res_key.key,
7733 	                   sizeof(struct scsi_per_res_key)) == 0) {
7734 				lun->per_res[i].registered = 0;
7735 				memset(&lun->per_res[i].res_key, 0,
7736 				       sizeof(struct scsi_per_res_key));
7737 				lun->pr_key_count--;
7738 				if (!persis_offset
7739 				 && i < CTL_MAX_INITIATORS)
7740 					lun->pending_sense[i].ua_pending |=
7741 						CTL_UA_REG_PREEMPT;
7742 				else if (persis_offset
7743 				      && i >= persis_offset)
7744 					lun->pending_sense[i -
7745 						persis_offset].ua_pending |=
7746 						CTL_UA_REG_PREEMPT;
7747 			} else if (msg->pr.pr_info.res_type != lun->res_type
7748 				&& (lun->res_type == SPR_TYPE_WR_EX_RO
7749 				 || lun->res_type == SPR_TYPE_EX_AC_RO)) {
7750 					if (!persis_offset
7751 					 && i < persis_offset)
7752 						lun->pending_sense[i
7753 							].ua_pending |=
7754 							CTL_UA_RES_RELEASE;
7755 					else if (persis_offset
7756 					      && i >= persis_offset)
7757 					lun->pending_sense[i -
7758 						persis_offset].ua_pending |=
7759 						CTL_UA_RES_RELEASE;
7760 			}
7761 		}
7762 		lun->per_res[msg->pr.pr_info.residx].registered = 1;
7763 		lun->res_type = msg->pr.pr_info.res_type;
7764 		if (lun->res_type != SPR_TYPE_WR_EX_AR
7765 		 && lun->res_type != SPR_TYPE_EX_AC_AR)
7766 			lun->pr_res_idx = msg->pr.pr_info.residx;
7767 		else
7768 			lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS;
7769 	}
7770 	lun->PRGeneration++;
7771 
7772 }
7773 
7774 
7775 int
7776 ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
7777 {
7778 	int retval;
7779 	int isc_retval;
7780 	u_int32_t param_len;
7781 	struct scsi_per_res_out *cdb;
7782 	struct ctl_lun *lun;
7783 	struct scsi_per_res_out_parms* param;
7784 	struct ctl_softc *softc;
7785 	uint32_t residx;
7786 	uint64_t res_key, sa_res_key;
7787 	uint8_t type;
7788 	union ctl_ha_msg persis_io;
7789 	int    i;
7790 
7791 	CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n"));
7792 
7793 	retval = CTL_RETVAL_COMPLETE;
7794 
7795 	softc = control_softc;
7796 
7797 	cdb = (struct scsi_per_res_out *)ctsio->cdb;
7798 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
7799 
7800 	/*
7801 	 * We only support whole-LUN scope.  The scope & type are ignored for
7802 	 * register, register and ignore existing key and clear.
7803 	 * We sometimes ignore scope and type on preempts too!!
7804 	 * Verify reservation type here as well.
7805 	 */
7806 	type = cdb->scope_type & SPR_TYPE_MASK;
7807 	if ((cdb->action == SPRO_RESERVE)
7808 	 || (cdb->action == SPRO_RELEASE)) {
7809 		if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) {
7810 			ctl_set_invalid_field(/*ctsio*/ ctsio,
7811 					      /*sks_valid*/ 1,
7812 					      /*command*/ 1,
7813 					      /*field*/ 2,
7814 					      /*bit_valid*/ 1,
7815 					      /*bit*/ 4);
7816 			ctl_done((union ctl_io *)ctsio);
7817 			return (CTL_RETVAL_COMPLETE);
7818 		}
7819 
7820 		if (type>8 || type==2 || type==4 || type==0) {
7821 			ctl_set_invalid_field(/*ctsio*/ ctsio,
7822 					      /*sks_valid*/ 1,
7823 					      /*command*/ 1,
7824 					      /*field*/ 2,
7825 					      /*bit_valid*/ 1,
7826 					      /*bit*/ 0);
7827 			ctl_done((union ctl_io *)ctsio);
7828 			return (CTL_RETVAL_COMPLETE);
7829 		}
7830 	}
7831 
7832 	switch (cdb->action & SPRO_ACTION_MASK) {
7833 	case SPRO_REGISTER:
7834 	case SPRO_RESERVE:
7835 	case SPRO_RELEASE:
7836 	case SPRO_CLEAR:
7837 	case SPRO_PREEMPT:
7838 	case SPRO_REG_IGNO:
7839 		break;
7840 	case SPRO_REG_MOVE:
7841 	case SPRO_PRE_ABO:
7842 	default:
7843 		ctl_set_invalid_field(/*ctsio*/ ctsio,
7844 				      /*sks_valid*/ 1,
7845 				      /*command*/ 1,
7846 				      /*field*/ 1,
7847 				      /*bit_valid*/ 1,
7848 				      /*bit*/ 0);
7849 		ctl_done((union ctl_io *)ctsio);
7850 		return (CTL_RETVAL_COMPLETE);
7851 		break; /* NOTREACHED */
7852 	}
7853 
7854 	param_len = scsi_4btoul(cdb->length);
7855 
7856 	if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
7857 		ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK);
7858 		ctsio->kern_data_len = param_len;
7859 		ctsio->kern_total_len = param_len;
7860 		ctsio->kern_data_resid = 0;
7861 		ctsio->kern_rel_offset = 0;
7862 		ctsio->kern_sg_entries = 0;
7863 		ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7864 		ctsio->be_move_done = ctl_config_move_done;
7865 		ctl_datamove((union ctl_io *)ctsio);
7866 
7867 		return (CTL_RETVAL_COMPLETE);
7868 	}
7869 
7870 	param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr;
7871 
7872 	residx = ctl_get_resindex(&ctsio->io_hdr.nexus);
7873 	res_key = scsi_8btou64(param->res_key.key);
7874 	sa_res_key = scsi_8btou64(param->serv_act_res_key);
7875 
7876 	/*
7877 	 * Validate the reservation key here except for SPRO_REG_IGNO
7878 	 * This must be done for all other service actions
7879 	 */
7880 	if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) {
7881 		mtx_lock(&softc->ctl_lock);
7882 		if (lun->per_res[residx].registered) {
7883 		    if (memcmp(param->res_key.key,
7884 			       lun->per_res[residx].res_key.key,
7885 			       ctl_min(sizeof(param->res_key),
7886 			       sizeof(lun->per_res[residx].res_key))) != 0) {
7887 				/*
7888 				 * The current key passed in doesn't match
7889 				 * the one the initiator previously
7890 				 * registered.
7891 				 */
7892 				mtx_unlock(&softc->ctl_lock);
7893 				free(ctsio->kern_data_ptr, M_CTL);
7894 				ctl_set_reservation_conflict(ctsio);
7895 				ctl_done((union ctl_io *)ctsio);
7896 				return (CTL_RETVAL_COMPLETE);
7897 			}
7898 		} else if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REGISTER) {
7899 			/*
7900 			 * We are not registered
7901 			 */
7902 			mtx_unlock(&softc->ctl_lock);
7903 			free(ctsio->kern_data_ptr, M_CTL);
7904 			ctl_set_reservation_conflict(ctsio);
7905 			ctl_done((union ctl_io *)ctsio);
7906 			return (CTL_RETVAL_COMPLETE);
7907 		} else if (res_key != 0) {
7908 			/*
7909 			 * We are not registered and trying to register but
7910 			 * the register key isn't zero.
7911 			 */
7912 			mtx_unlock(&softc->ctl_lock);
7913 			free(ctsio->kern_data_ptr, M_CTL);
7914 			ctl_set_reservation_conflict(ctsio);
7915 			ctl_done((union ctl_io *)ctsio);
7916 			return (CTL_RETVAL_COMPLETE);
7917 		}
7918 		mtx_unlock(&softc->ctl_lock);
7919 	}
7920 
7921 	switch (cdb->action & SPRO_ACTION_MASK) {
7922 	case SPRO_REGISTER:
7923 	case SPRO_REG_IGNO: {
7924 
7925 #if 0
7926 		printf("Registration received\n");
7927 #endif
7928 
7929 		/*
7930 		 * We don't support any of these options, as we report in
7931 		 * the read capabilities request (see
7932 		 * ctl_persistent_reserve_in(), above).
7933 		 */
7934 		if ((param->flags & SPR_SPEC_I_PT)
7935 		 || (param->flags & SPR_ALL_TG_PT)
7936 		 || (param->flags & SPR_APTPL)) {
7937 			int bit_ptr;
7938 
7939 			if (param->flags & SPR_APTPL)
7940 				bit_ptr = 0;
7941 			else if (param->flags & SPR_ALL_TG_PT)
7942 				bit_ptr = 2;
7943 			else /* SPR_SPEC_I_PT */
7944 				bit_ptr = 3;
7945 
7946 			free(ctsio->kern_data_ptr, M_CTL);
7947 			ctl_set_invalid_field(ctsio,
7948 					      /*sks_valid*/ 1,
7949 					      /*command*/ 0,
7950 					      /*field*/ 20,
7951 					      /*bit_valid*/ 1,
7952 					      /*bit*/ bit_ptr);
7953 			ctl_done((union ctl_io *)ctsio);
7954 			return (CTL_RETVAL_COMPLETE);
7955 		}
7956 
7957 		mtx_lock(&softc->ctl_lock);
7958 
7959 		/*
7960 		 * The initiator wants to clear the
7961 		 * key/unregister.
7962 		 */
7963 		if (sa_res_key == 0) {
7964 			if ((res_key == 0
7965 			  && (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER)
7966 			 || ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO
7967 			  && !lun->per_res[residx].registered)) {
7968 				mtx_unlock(&softc->ctl_lock);
7969 				goto done;
7970 			}
7971 
7972 			lun->per_res[residx].registered = 0;
7973 			memset(&lun->per_res[residx].res_key,
7974 			       0, sizeof(lun->per_res[residx].res_key));
7975 			lun->pr_key_count--;
7976 
7977 			if (residx == lun->pr_res_idx) {
7978 				lun->flags &= ~CTL_LUN_PR_RESERVED;
7979 				lun->pr_res_idx = CTL_PR_NO_RESERVATION;
7980 
7981 				if ((lun->res_type == SPR_TYPE_WR_EX_RO
7982 				  || lun->res_type == SPR_TYPE_EX_AC_RO)
7983 				 && lun->pr_key_count) {
7984 					/*
7985 					 * If the reservation is a registrants
7986 					 * only type we need to generate a UA
7987 					 * for other registered inits.  The
7988 					 * sense code should be RESERVATIONS
7989 					 * RELEASED
7990 					 */
7991 
7992 					for (i = 0; i < CTL_MAX_INITIATORS;i++){
7993 						if (lun->per_res[
7994 						    i+persis_offset].registered
7995 						    == 0)
7996 							continue;
7997 						lun->pending_sense[i
7998 							].ua_pending |=
7999 							CTL_UA_RES_RELEASE;
8000 					}
8001 				}
8002 				lun->res_type = 0;
8003 			} else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) {
8004 				if (lun->pr_key_count==0) {
8005 					lun->flags &= ~CTL_LUN_PR_RESERVED;
8006 					lun->res_type = 0;
8007 					lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8008 				}
8009 			}
8010 			persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8011 			persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8012 			persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY;
8013 			persis_io.pr.pr_info.residx = residx;
8014 			if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL,
8015 			     &persis_io, sizeof(persis_io), 0 )) >
8016 			     CTL_HA_STATUS_SUCCESS) {
8017 				printf("CTL:Persis Out error returned from "
8018 				       "ctl_ha_msg_send %d\n", isc_retval);
8019 			}
8020 			mtx_unlock(&softc->ctl_lock);
8021 		} else /* sa_res_key != 0 */ {
8022 
8023 			/*
8024 			 * If we aren't registered currently then increment
8025 			 * the key count and set the registered flag.
8026 			 */
8027 			if (!lun->per_res[residx].registered) {
8028 				lun->pr_key_count++;
8029 				lun->per_res[residx].registered = 1;
8030 			}
8031 
8032 			memcpy(&lun->per_res[residx].res_key,
8033 			       param->serv_act_res_key,
8034 			       ctl_min(sizeof(param->serv_act_res_key),
8035 			       sizeof(lun->per_res[residx].res_key)));
8036 
8037 			persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8038 			persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8039 			persis_io.pr.pr_info.action = CTL_PR_REG_KEY;
8040 			persis_io.pr.pr_info.residx = residx;
8041 			memcpy(persis_io.pr.pr_info.sa_res_key,
8042 			       param->serv_act_res_key,
8043 			       sizeof(param->serv_act_res_key));
8044 			mtx_unlock(&softc->ctl_lock);
8045 			if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
8046 			     &persis_io, sizeof(persis_io), 0)) >
8047 			     CTL_HA_STATUS_SUCCESS) {
8048 				printf("CTL:Persis Out error returned from "
8049 				       "ctl_ha_msg_send %d\n", isc_retval);
8050 			}
8051 		}
8052 		lun->PRGeneration++;
8053 
8054 		break;
8055 	}
8056 	case SPRO_RESERVE:
8057 #if 0
8058                 printf("Reserve executed type %d\n", type);
8059 #endif
8060 		mtx_lock(&softc->ctl_lock);
8061 		if (lun->flags & CTL_LUN_PR_RESERVED) {
8062 			/*
8063 			 * if this isn't the reservation holder and it's
8064 			 * not a "all registrants" type or if the type is
8065 			 * different then we have a conflict
8066 			 */
8067 			if ((lun->pr_res_idx != residx
8068 			  && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS)
8069 			 || lun->res_type != type) {
8070 				mtx_unlock(&softc->ctl_lock);
8071 				free(ctsio->kern_data_ptr, M_CTL);
8072 				ctl_set_reservation_conflict(ctsio);
8073 				ctl_done((union ctl_io *)ctsio);
8074 				return (CTL_RETVAL_COMPLETE);
8075 			}
8076 		} else /* create a reservation */ {
8077 			/*
8078 			 * If it's not an "all registrants" type record
8079 			 * reservation holder
8080 			 */
8081 			if (type != SPR_TYPE_WR_EX_AR
8082 			 && type != SPR_TYPE_EX_AC_AR)
8083 				lun->pr_res_idx = residx; /* Res holder */
8084 			else
8085 				lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS;
8086 
8087 			lun->flags |= CTL_LUN_PR_RESERVED;
8088 			lun->res_type = type;
8089 
8090 			mtx_unlock(&softc->ctl_lock);
8091 
8092 			/* send msg to other side */
8093 			persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8094 			persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8095 			persis_io.pr.pr_info.action = CTL_PR_RESERVE;
8096 			persis_io.pr.pr_info.residx = lun->pr_res_idx;
8097 			persis_io.pr.pr_info.res_type = type;
8098 			if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
8099 			     &persis_io, sizeof(persis_io), 0)) >
8100 			     CTL_HA_STATUS_SUCCESS) {
8101 				printf("CTL:Persis Out error returned from "
8102 				       "ctl_ha_msg_send %d\n", isc_retval);
8103 			}
8104 		}
8105 		break;
8106 
8107 	case SPRO_RELEASE:
8108 		mtx_lock(&softc->ctl_lock);
8109 		if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) {
8110 			/* No reservation exists return good status */
8111 			mtx_unlock(&softc->ctl_lock);
8112 			goto done;
8113 		}
8114 		/*
8115 		 * Is this nexus a reservation holder?
8116 		 */
8117 		if (lun->pr_res_idx != residx
8118 		 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) {
8119 			/*
8120 			 * not a res holder return good status but
8121 			 * do nothing
8122 			 */
8123 			mtx_unlock(&softc->ctl_lock);
8124 			goto done;
8125 		}
8126 
8127 		if (lun->res_type != type) {
8128 			mtx_unlock(&softc->ctl_lock);
8129 			free(ctsio->kern_data_ptr, M_CTL);
8130 			ctl_set_illegal_pr_release(ctsio);
8131 			ctl_done((union ctl_io *)ctsio);
8132 			return (CTL_RETVAL_COMPLETE);
8133 		}
8134 
8135 		/* okay to release */
8136 		lun->flags &= ~CTL_LUN_PR_RESERVED;
8137 		lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8138 		lun->res_type = 0;
8139 
8140 		/*
8141 		 * if this isn't an exclusive access
8142 		 * res generate UA for all other
8143 		 * registrants.
8144 		 */
8145 		if (type != SPR_TYPE_EX_AC
8146 		 && type != SPR_TYPE_WR_EX) {
8147 			/*
8148 			 * temporarily unregister so we don't generate UA
8149 			 */
8150 			lun->per_res[residx].registered = 0;
8151 
8152 			for (i = 0; i < CTL_MAX_INITIATORS; i++) {
8153 				if (lun->per_res[i+persis_offset].registered
8154 				    == 0)
8155 					continue;
8156 				lun->pending_sense[i].ua_pending |=
8157 					CTL_UA_RES_RELEASE;
8158 			}
8159 
8160 			lun->per_res[residx].registered = 1;
8161 		}
8162 		mtx_unlock(&softc->ctl_lock);
8163 		/* Send msg to other side */
8164 		persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8165 		persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8166 		persis_io.pr.pr_info.action = CTL_PR_RELEASE;
8167 		if ((isc_retval=ctl_ha_msg_send( CTL_HA_CHAN_CTL, &persis_io,
8168 		     sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) {
8169 			printf("CTL:Persis Out error returned from "
8170 			       "ctl_ha_msg_send %d\n", isc_retval);
8171 		}
8172 		break;
8173 
8174 	case SPRO_CLEAR:
8175 		/* send msg to other side */
8176 
8177 		mtx_lock(&softc->ctl_lock);
8178 		lun->flags &= ~CTL_LUN_PR_RESERVED;
8179 		lun->res_type = 0;
8180 		lun->pr_key_count = 0;
8181 		lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8182 
8183 
8184 		memset(&lun->per_res[residx].res_key,
8185 		       0, sizeof(lun->per_res[residx].res_key));
8186 		lun->per_res[residx].registered = 0;
8187 
8188 		for (i=0; i < 2*CTL_MAX_INITIATORS; i++)
8189 			if (lun->per_res[i].registered) {
8190 				if (!persis_offset && i < CTL_MAX_INITIATORS)
8191 					lun->pending_sense[i].ua_pending |=
8192 						CTL_UA_RES_PREEMPT;
8193 				else if (persis_offset && i >= persis_offset)
8194 					lun->pending_sense[i-persis_offset
8195 					    ].ua_pending |= CTL_UA_RES_PREEMPT;
8196 
8197 				memset(&lun->per_res[i].res_key,
8198 				       0, sizeof(struct scsi_per_res_key));
8199 				lun->per_res[i].registered = 0;
8200 			}
8201 		lun->PRGeneration++;
8202 		mtx_unlock(&softc->ctl_lock);
8203 		persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8204 		persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8205 		persis_io.pr.pr_info.action = CTL_PR_CLEAR;
8206 		if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
8207 		     sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) {
8208 			printf("CTL:Persis Out error returned from "
8209 			       "ctl_ha_msg_send %d\n", isc_retval);
8210 		}
8211 		break;
8212 
8213 	case SPRO_PREEMPT: {
8214 		int nretval;
8215 
8216 		nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type,
8217 					  residx, ctsio, cdb, param);
8218 		if (nretval != 0)
8219 			return (CTL_RETVAL_COMPLETE);
8220 		break;
8221 	}
8222 	case SPRO_REG_MOVE:
8223 	case SPRO_PRE_ABO:
8224 	default:
8225 		free(ctsio->kern_data_ptr, M_CTL);
8226 		ctl_set_invalid_field(/*ctsio*/ ctsio,
8227 				      /*sks_valid*/ 1,
8228 				      /*command*/ 1,
8229 				      /*field*/ 1,
8230 				      /*bit_valid*/ 1,
8231 				      /*bit*/ 0);
8232 		ctl_done((union ctl_io *)ctsio);
8233 		return (CTL_RETVAL_COMPLETE);
8234 		break; /* NOTREACHED */
8235 	}
8236 
8237 done:
8238 	free(ctsio->kern_data_ptr, M_CTL);
8239 	ctl_set_success(ctsio);
8240 	ctl_done((union ctl_io *)ctsio);
8241 
8242 	return (retval);
8243 }
8244 
8245 /*
8246  * This routine is for handling a message from the other SC pertaining to
8247  * persistent reserve out. All the error checking will have been done
8248  * so only perorming the action need be done here to keep the two
8249  * in sync.
8250  */
8251 static void
8252 ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg)
8253 {
8254 	struct ctl_lun *lun;
8255 	struct ctl_softc *softc;
8256 	int i;
8257 
8258 	softc = control_softc;
8259 
8260 	mtx_lock(&softc->ctl_lock);
8261 
8262 	lun = softc->ctl_luns[msg->hdr.nexus.targ_lun];
8263 	switch(msg->pr.pr_info.action) {
8264 	case CTL_PR_REG_KEY:
8265 		if (!lun->per_res[msg->pr.pr_info.residx].registered) {
8266 			lun->per_res[msg->pr.pr_info.residx].registered = 1;
8267 			lun->pr_key_count++;
8268 		}
8269 		lun->PRGeneration++;
8270 		memcpy(&lun->per_res[msg->pr.pr_info.residx].res_key,
8271 		       msg->pr.pr_info.sa_res_key,
8272 		       sizeof(struct scsi_per_res_key));
8273 		break;
8274 
8275 	case CTL_PR_UNREG_KEY:
8276 		lun->per_res[msg->pr.pr_info.residx].registered = 0;
8277 		memset(&lun->per_res[msg->pr.pr_info.residx].res_key,
8278 		       0, sizeof(struct scsi_per_res_key));
8279 		lun->pr_key_count--;
8280 
8281 		/* XXX Need to see if the reservation has been released */
8282 		/* if so do we need to generate UA? */
8283 		if (msg->pr.pr_info.residx == lun->pr_res_idx) {
8284 			lun->flags &= ~CTL_LUN_PR_RESERVED;
8285 			lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8286 
8287 			if ((lun->res_type == SPR_TYPE_WR_EX_RO
8288 			  || lun->res_type == SPR_TYPE_EX_AC_RO)
8289 			 && lun->pr_key_count) {
8290 				/*
8291 				 * If the reservation is a registrants
8292 				 * only type we need to generate a UA
8293 				 * for other registered inits.  The
8294 				 * sense code should be RESERVATIONS
8295 				 * RELEASED
8296 				 */
8297 
8298 				for (i = 0; i < CTL_MAX_INITIATORS; i++) {
8299 					if (lun->per_res[i+
8300 					    persis_offset].registered == 0)
8301 						continue;
8302 
8303 					lun->pending_sense[i
8304 						].ua_pending |=
8305 						CTL_UA_RES_RELEASE;
8306 				}
8307 			}
8308 			lun->res_type = 0;
8309 		} else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) {
8310 			if (lun->pr_key_count==0) {
8311 				lun->flags &= ~CTL_LUN_PR_RESERVED;
8312 				lun->res_type = 0;
8313 				lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8314 			}
8315 		}
8316 		lun->PRGeneration++;
8317 		break;
8318 
8319 	case CTL_PR_RESERVE:
8320 		lun->flags |= CTL_LUN_PR_RESERVED;
8321 		lun->res_type = msg->pr.pr_info.res_type;
8322 		lun->pr_res_idx = msg->pr.pr_info.residx;
8323 
8324 		break;
8325 
8326 	case CTL_PR_RELEASE:
8327 		/*
8328 		 * if this isn't an exclusive access res generate UA for all
8329 		 * other registrants.
8330 		 */
8331 		if (lun->res_type != SPR_TYPE_EX_AC
8332 		 && lun->res_type != SPR_TYPE_WR_EX) {
8333 			for (i = 0; i < CTL_MAX_INITIATORS; i++)
8334 				if (lun->per_res[i+persis_offset].registered)
8335 					lun->pending_sense[i].ua_pending |=
8336 						CTL_UA_RES_RELEASE;
8337 		}
8338 
8339 		lun->flags &= ~CTL_LUN_PR_RESERVED;
8340 		lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8341 		lun->res_type = 0;
8342 		break;
8343 
8344 	case CTL_PR_PREEMPT:
8345 		ctl_pro_preempt_other(lun, msg);
8346 		break;
8347 	case CTL_PR_CLEAR:
8348 		lun->flags &= ~CTL_LUN_PR_RESERVED;
8349 		lun->res_type = 0;
8350 		lun->pr_key_count = 0;
8351 		lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8352 
8353 		for (i=0; i < 2*CTL_MAX_INITIATORS; i++) {
8354 			if (lun->per_res[i].registered == 0)
8355 				continue;
8356 			if (!persis_offset
8357 			 && i < CTL_MAX_INITIATORS)
8358 				lun->pending_sense[i].ua_pending |=
8359 					CTL_UA_RES_PREEMPT;
8360 			else if (persis_offset
8361 			      && i >= persis_offset)
8362    				lun->pending_sense[i-persis_offset].ua_pending|=
8363 					CTL_UA_RES_PREEMPT;
8364 			memset(&lun->per_res[i].res_key, 0,
8365 			       sizeof(struct scsi_per_res_key));
8366 			lun->per_res[i].registered = 0;
8367 		}
8368 		lun->PRGeneration++;
8369 		break;
8370 	}
8371 
8372 	mtx_unlock(&softc->ctl_lock);
8373 }
8374 
8375 int
8376 ctl_read_write(struct ctl_scsiio *ctsio)
8377 {
8378 	struct ctl_lun *lun;
8379 	struct ctl_lba_len lbalen;
8380 	uint64_t lba;
8381 	uint32_t num_blocks;
8382 	int reladdr, fua, dpo, ebp;
8383 	int retval;
8384 	int isread;
8385 
8386 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
8387 
8388 	CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0]));
8389 
8390 	reladdr = 0;
8391 	fua = 0;
8392 	dpo = 0;
8393 	ebp = 0;
8394 
8395 	retval = CTL_RETVAL_COMPLETE;
8396 
8397 	isread = ctsio->cdb[0] == READ_6  || ctsio->cdb[0] == READ_10
8398 	      || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16;
8399 	if (lun->flags & CTL_LUN_PR_RESERVED && isread) {
8400 		uint32_t residx;
8401 
8402 		/*
8403 		 * XXX KDM need a lock here.
8404 		 */
8405 		residx = ctl_get_resindex(&ctsio->io_hdr.nexus);
8406 		if ((lun->res_type == SPR_TYPE_EX_AC
8407 		  && residx != lun->pr_res_idx)
8408 		 || ((lun->res_type == SPR_TYPE_EX_AC_RO
8409 		   || lun->res_type == SPR_TYPE_EX_AC_AR)
8410 		  && !lun->per_res[residx].registered)) {
8411 			ctl_set_reservation_conflict(ctsio);
8412 			ctl_done((union ctl_io *)ctsio);
8413 			return (CTL_RETVAL_COMPLETE);
8414 	        }
8415 	}
8416 
8417 	switch (ctsio->cdb[0]) {
8418 	case READ_6:
8419 	case WRITE_6: {
8420 		struct scsi_rw_6 *cdb;
8421 
8422 		cdb = (struct scsi_rw_6 *)ctsio->cdb;
8423 
8424 		lba = scsi_3btoul(cdb->addr);
8425 		/* only 5 bits are valid in the most significant address byte */
8426 		lba &= 0x1fffff;
8427 		num_blocks = cdb->length;
8428 		/*
8429 		 * This is correct according to SBC-2.
8430 		 */
8431 		if (num_blocks == 0)
8432 			num_blocks = 256;
8433 		break;
8434 	}
8435 	case READ_10:
8436 	case WRITE_10: {
8437 		struct scsi_rw_10 *cdb;
8438 
8439 		cdb = (struct scsi_rw_10 *)ctsio->cdb;
8440 
8441 		if (cdb->byte2 & SRW10_RELADDR)
8442 			reladdr = 1;
8443 		if (cdb->byte2 & SRW10_FUA)
8444 			fua = 1;
8445 		if (cdb->byte2 & SRW10_DPO)
8446 			dpo = 1;
8447 
8448 		if ((cdb->opcode == WRITE_10)
8449 		 && (cdb->byte2 & SRW10_EBP))
8450 			ebp = 1;
8451 
8452 		lba = scsi_4btoul(cdb->addr);
8453 		num_blocks = scsi_2btoul(cdb->length);
8454 		break;
8455 	}
8456 	case WRITE_VERIFY_10: {
8457 		struct scsi_write_verify_10 *cdb;
8458 
8459 		cdb = (struct scsi_write_verify_10 *)ctsio->cdb;
8460 
8461 		/*
8462 		 * XXX KDM we should do actual write verify support at some
8463 		 * point.  This is obviously fake, we're just translating
8464 		 * things to a write.  So we don't even bother checking the
8465 		 * BYTCHK field, since we don't do any verification.  If
8466 		 * the user asks for it, we'll just pretend we did it.
8467 		 */
8468 		if (cdb->byte2 & SWV_DPO)
8469 			dpo = 1;
8470 
8471 		lba = scsi_4btoul(cdb->addr);
8472 		num_blocks = scsi_2btoul(cdb->length);
8473 		break;
8474 	}
8475 	case READ_12:
8476 	case WRITE_12: {
8477 		struct scsi_rw_12 *cdb;
8478 
8479 		cdb = (struct scsi_rw_12 *)ctsio->cdb;
8480 
8481 		if (cdb->byte2 & SRW12_RELADDR)
8482 			reladdr = 1;
8483 		if (cdb->byte2 & SRW12_FUA)
8484 			fua = 1;
8485 		if (cdb->byte2 & SRW12_DPO)
8486 			dpo = 1;
8487 		lba = scsi_4btoul(cdb->addr);
8488 		num_blocks = scsi_4btoul(cdb->length);
8489 		break;
8490 	}
8491 	case WRITE_VERIFY_12: {
8492 		struct scsi_write_verify_12 *cdb;
8493 
8494 		cdb = (struct scsi_write_verify_12 *)ctsio->cdb;
8495 
8496 		if (cdb->byte2 & SWV_DPO)
8497 			dpo = 1;
8498 
8499 		lba = scsi_4btoul(cdb->addr);
8500 		num_blocks = scsi_4btoul(cdb->length);
8501 
8502 		break;
8503 	}
8504 	case READ_16:
8505 	case WRITE_16: {
8506 		struct scsi_rw_16 *cdb;
8507 
8508 		cdb = (struct scsi_rw_16 *)ctsio->cdb;
8509 
8510 		if (cdb->byte2 & SRW12_RELADDR)
8511 			reladdr = 1;
8512 		if (cdb->byte2 & SRW12_FUA)
8513 			fua = 1;
8514 		if (cdb->byte2 & SRW12_DPO)
8515 			dpo = 1;
8516 
8517 		lba = scsi_8btou64(cdb->addr);
8518 		num_blocks = scsi_4btoul(cdb->length);
8519 		break;
8520 	}
8521 	case WRITE_VERIFY_16: {
8522 		struct scsi_write_verify_16 *cdb;
8523 
8524 		cdb = (struct scsi_write_verify_16 *)ctsio->cdb;
8525 
8526 		if (cdb->byte2 & SWV_DPO)
8527 			dpo = 1;
8528 
8529 		lba = scsi_8btou64(cdb->addr);
8530 		num_blocks = scsi_4btoul(cdb->length);
8531 		break;
8532 	}
8533 	default:
8534 		/*
8535 		 * We got a command we don't support.  This shouldn't
8536 		 * happen, commands should be filtered out above us.
8537 		 */
8538 		ctl_set_invalid_opcode(ctsio);
8539 		ctl_done((union ctl_io *)ctsio);
8540 
8541 		return (CTL_RETVAL_COMPLETE);
8542 		break; /* NOTREACHED */
8543 	}
8544 
8545 	/*
8546 	 * XXX KDM what do we do with the DPO and FUA bits?  FUA might be
8547 	 * interesting for us, but if RAIDCore is in write-back mode,
8548 	 * getting it to do write-through for a particular transaction may
8549 	 * not be possible.
8550 	 */
8551 	/*
8552 	 * We don't support relative addressing.  That also requires
8553 	 * supporting linked commands, which we don't do.
8554 	 */
8555 	if (reladdr != 0) {
8556 		ctl_set_invalid_field(ctsio,
8557 				      /*sks_valid*/ 1,
8558 				      /*command*/ 1,
8559 				      /*field*/ 1,
8560 				      /*bit_valid*/ 1,
8561 				      /*bit*/ 0);
8562 		ctl_done((union ctl_io *)ctsio);
8563 		return (CTL_RETVAL_COMPLETE);
8564 	}
8565 
8566 	/*
8567 	 * The first check is to make sure we're in bounds, the second
8568 	 * check is to catch wrap-around problems.  If the lba + num blocks
8569 	 * is less than the lba, then we've wrapped around and the block
8570 	 * range is invalid anyway.
8571 	 */
8572 	if (((lba + num_blocks) > (lun->be_lun->maxlba + 1))
8573 	 || ((lba + num_blocks) < lba)) {
8574 		ctl_set_lba_out_of_range(ctsio);
8575 		ctl_done((union ctl_io *)ctsio);
8576 		return (CTL_RETVAL_COMPLETE);
8577 	}
8578 
8579 	/*
8580 	 * According to SBC-3, a transfer length of 0 is not an error.
8581 	 * Note that this cannot happen with WRITE(6) or READ(6), since 0
8582 	 * translates to 256 blocks for those commands.
8583 	 */
8584 	if (num_blocks == 0) {
8585 		ctl_set_success(ctsio);
8586 		ctl_done((union ctl_io *)ctsio);
8587 		return (CTL_RETVAL_COMPLETE);
8588 	}
8589 
8590 	lbalen.lba = lba;
8591 	lbalen.len = num_blocks;
8592 	memcpy(ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, &lbalen,
8593 	       sizeof(lbalen));
8594 
8595 	CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n"));
8596 
8597 	retval = lun->backend->data_submit((union ctl_io *)ctsio);
8598 
8599 	return (retval);
8600 }
8601 
8602 int
8603 ctl_report_luns(struct ctl_scsiio *ctsio)
8604 {
8605 	struct scsi_report_luns *cdb;
8606 	struct scsi_report_luns_data *lun_data;
8607 	struct ctl_lun *lun, *request_lun;
8608 	int num_luns, retval;
8609 	uint32_t alloc_len, lun_datalen;
8610 	int num_filled, well_known;
8611 	uint32_t initidx;
8612 
8613 	retval = CTL_RETVAL_COMPLETE;
8614 	well_known = 0;
8615 
8616 	cdb = (struct scsi_report_luns *)ctsio->cdb;
8617 
8618 	CTL_DEBUG_PRINT(("ctl_report_luns\n"));
8619 
8620 	mtx_lock(&control_softc->ctl_lock);
8621 	num_luns = control_softc->num_luns;
8622 	mtx_unlock(&control_softc->ctl_lock);
8623 
8624 	switch (cdb->select_report) {
8625 	case RPL_REPORT_DEFAULT:
8626 	case RPL_REPORT_ALL:
8627 		break;
8628 	case RPL_REPORT_WELLKNOWN:
8629 		well_known = 1;
8630 		num_luns = 0;
8631 		break;
8632 	default:
8633 		ctl_set_invalid_field(ctsio,
8634 				      /*sks_valid*/ 1,
8635 				      /*command*/ 1,
8636 				      /*field*/ 2,
8637 				      /*bit_valid*/ 0,
8638 				      /*bit*/ 0);
8639 		ctl_done((union ctl_io *)ctsio);
8640 		return (retval);
8641 		break; /* NOTREACHED */
8642 	}
8643 
8644 	alloc_len = scsi_4btoul(cdb->length);
8645 	/*
8646 	 * The initiator has to allocate at least 16 bytes for this request,
8647 	 * so he can at least get the header and the first LUN.  Otherwise
8648 	 * we reject the request (per SPC-3 rev 14, section 6.21).
8649 	 */
8650 	if (alloc_len < (sizeof(struct scsi_report_luns_data) +
8651 	    sizeof(struct scsi_report_luns_lundata))) {
8652 		ctl_set_invalid_field(ctsio,
8653 				      /*sks_valid*/ 1,
8654 				      /*command*/ 1,
8655 				      /*field*/ 6,
8656 				      /*bit_valid*/ 0,
8657 				      /*bit*/ 0);
8658 		ctl_done((union ctl_io *)ctsio);
8659 		return (retval);
8660 	}
8661 
8662 	request_lun = (struct ctl_lun *)
8663 		ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
8664 
8665 	lun_datalen = sizeof(*lun_data) +
8666 		(num_luns * sizeof(struct scsi_report_luns_lundata));
8667 
8668 	ctsio->kern_data_ptr = malloc(lun_datalen, M_CTL, M_WAITOK | M_ZERO);
8669 	lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr;
8670 	ctsio->kern_sg_entries = 0;
8671 
8672 	if (lun_datalen < alloc_len) {
8673 		ctsio->residual = alloc_len - lun_datalen;
8674 		ctsio->kern_data_len = lun_datalen;
8675 		ctsio->kern_total_len = lun_datalen;
8676 	} else {
8677 		ctsio->residual = 0;
8678 		ctsio->kern_data_len = alloc_len;
8679 		ctsio->kern_total_len = alloc_len;
8680 	}
8681 	ctsio->kern_data_resid = 0;
8682 	ctsio->kern_rel_offset = 0;
8683 	ctsio->kern_sg_entries = 0;
8684 
8685 	initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
8686 
8687 	/*
8688 	 * We set this to the actual data length, regardless of how much
8689 	 * space we actually have to return results.  If the user looks at
8690 	 * this value, he'll know whether or not he allocated enough space
8691 	 * and reissue the command if necessary.  We don't support well
8692 	 * known logical units, so if the user asks for that, return none.
8693 	 */
8694 	scsi_ulto4b(lun_datalen - 8, lun_data->length);
8695 
8696 	mtx_lock(&control_softc->ctl_lock);
8697 	for (num_filled = 0, lun = STAILQ_FIRST(&control_softc->lun_list);
8698 	     (lun != NULL) && (num_filled < num_luns);
8699 	     lun = STAILQ_NEXT(lun, links)) {
8700 
8701 		if (lun->lun <= 0xff) {
8702 			/*
8703 			 * Peripheral addressing method, bus number 0.
8704 			 */
8705 			lun_data->luns[num_filled].lundata[0] =
8706 				RPL_LUNDATA_ATYP_PERIPH;
8707 			lun_data->luns[num_filled].lundata[1] = lun->lun;
8708 			num_filled++;
8709 		} else if (lun->lun <= 0x3fff) {
8710 			/*
8711 			 * Flat addressing method.
8712 			 */
8713 			lun_data->luns[num_filled].lundata[0] =
8714 				RPL_LUNDATA_ATYP_FLAT |
8715 				(lun->lun & RPL_LUNDATA_FLAT_LUN_MASK);
8716 #ifdef OLDCTLHEADERS
8717 				(SRLD_ADDR_FLAT << SRLD_ADDR_SHIFT) |
8718 				(lun->lun & SRLD_BUS_LUN_MASK);
8719 #endif
8720 			lun_data->luns[num_filled].lundata[1] =
8721 #ifdef OLDCTLHEADERS
8722 				lun->lun >> SRLD_BUS_LUN_BITS;
8723 #endif
8724 				lun->lun >> RPL_LUNDATA_FLAT_LUN_BITS;
8725 			num_filled++;
8726 		} else {
8727 			printf("ctl_report_luns: bogus LUN number %jd, "
8728 			       "skipping\n", (intmax_t)lun->lun);
8729 		}
8730 		/*
8731 		 * According to SPC-3, rev 14 section 6.21:
8732 		 *
8733 		 * "The execution of a REPORT LUNS command to any valid and
8734 		 * installed logical unit shall clear the REPORTED LUNS DATA
8735 		 * HAS CHANGED unit attention condition for all logical
8736 		 * units of that target with respect to the requesting
8737 		 * initiator. A valid and installed logical unit is one
8738 		 * having a PERIPHERAL QUALIFIER of 000b in the standard
8739 		 * INQUIRY data (see 6.4.2)."
8740 		 *
8741 		 * If request_lun is NULL, the LUN this report luns command
8742 		 * was issued to is either disabled or doesn't exist. In that
8743 		 * case, we shouldn't clear any pending lun change unit
8744 		 * attention.
8745 		 */
8746 		if (request_lun != NULL)
8747 			lun->pending_sense[initidx].ua_pending &=
8748 				~CTL_UA_LUN_CHANGE;
8749 	}
8750 	mtx_unlock(&control_softc->ctl_lock);
8751 
8752 	/*
8753 	 * We can only return SCSI_STATUS_CHECK_COND when we can't satisfy
8754 	 * this request.
8755 	 */
8756 	ctsio->scsi_status = SCSI_STATUS_OK;
8757 
8758 	ctsio->be_move_done = ctl_config_move_done;
8759 	ctl_datamove((union ctl_io *)ctsio);
8760 
8761 	return (retval);
8762 }
8763 
8764 int
8765 ctl_request_sense(struct ctl_scsiio *ctsio)
8766 {
8767 	struct scsi_request_sense *cdb;
8768 	struct scsi_sense_data *sense_ptr;
8769 	struct ctl_lun *lun;
8770 	uint32_t initidx;
8771 	int have_error;
8772 	scsi_sense_data_type sense_format;
8773 
8774 	cdb = (struct scsi_request_sense *)ctsio->cdb;
8775 
8776 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
8777 
8778 	CTL_DEBUG_PRINT(("ctl_request_sense\n"));
8779 
8780 	/*
8781 	 * Determine which sense format the user wants.
8782 	 */
8783 	if (cdb->byte2 & SRS_DESC)
8784 		sense_format = SSD_TYPE_DESC;
8785 	else
8786 		sense_format = SSD_TYPE_FIXED;
8787 
8788 	ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK);
8789 	sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr;
8790 	ctsio->kern_sg_entries = 0;
8791 
8792 	/*
8793 	 * struct scsi_sense_data, which is currently set to 256 bytes, is
8794 	 * larger than the largest allowed value for the length field in the
8795 	 * REQUEST SENSE CDB, which is 252 bytes as of SPC-4.
8796 	 */
8797 	ctsio->residual = 0;
8798 	ctsio->kern_data_len = cdb->length;
8799 	ctsio->kern_total_len = cdb->length;
8800 
8801 	ctsio->kern_data_resid = 0;
8802 	ctsio->kern_rel_offset = 0;
8803 	ctsio->kern_sg_entries = 0;
8804 
8805 	/*
8806 	 * If we don't have a LUN, we don't have any pending sense.
8807 	 */
8808 	if (lun == NULL)
8809 		goto no_sense;
8810 
8811 	have_error = 0;
8812 	initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
8813 	/*
8814 	 * Check for pending sense, and then for pending unit attentions.
8815 	 * Pending sense gets returned first, then pending unit attentions.
8816 	 */
8817 	mtx_lock(&lun->ctl_softc->ctl_lock);
8818 	if (ctl_is_set(lun->have_ca, initidx)) {
8819 		scsi_sense_data_type stored_format;
8820 
8821 		/*
8822 		 * Check to see which sense format was used for the stored
8823 		 * sense data.
8824 		 */
8825 		stored_format = scsi_sense_type(
8826 		    &lun->pending_sense[initidx].sense);
8827 
8828 		/*
8829 		 * If the user requested a different sense format than the
8830 		 * one we stored, then we need to convert it to the other
8831 		 * format.  If we're going from descriptor to fixed format
8832 		 * sense data, we may lose things in translation, depending
8833 		 * on what options were used.
8834 		 *
8835 		 * If the stored format is SSD_TYPE_NONE (i.e. invalid),
8836 		 * for some reason we'll just copy it out as-is.
8837 		 */
8838 		if ((stored_format == SSD_TYPE_FIXED)
8839 		 && (sense_format == SSD_TYPE_DESC))
8840 			ctl_sense_to_desc((struct scsi_sense_data_fixed *)
8841 			    &lun->pending_sense[initidx].sense,
8842 			    (struct scsi_sense_data_desc *)sense_ptr);
8843 		else if ((stored_format == SSD_TYPE_DESC)
8844 		      && (sense_format == SSD_TYPE_FIXED))
8845 			ctl_sense_to_fixed((struct scsi_sense_data_desc *)
8846 			    &lun->pending_sense[initidx].sense,
8847 			    (struct scsi_sense_data_fixed *)sense_ptr);
8848 		else
8849 			memcpy(sense_ptr, &lun->pending_sense[initidx].sense,
8850 			       ctl_min(sizeof(*sense_ptr),
8851 			       sizeof(lun->pending_sense[initidx].sense)));
8852 
8853 		ctl_clear_mask(lun->have_ca, initidx);
8854 		have_error = 1;
8855 	} else if (lun->pending_sense[initidx].ua_pending != CTL_UA_NONE) {
8856 		ctl_ua_type ua_type;
8857 
8858 		ua_type = ctl_build_ua(lun->pending_sense[initidx].ua_pending,
8859 				       sense_ptr, sense_format);
8860 		if (ua_type != CTL_UA_NONE) {
8861 			have_error = 1;
8862 			/* We're reporting this UA, so clear it */
8863 			lun->pending_sense[initidx].ua_pending &= ~ua_type;
8864 		}
8865 	}
8866 	mtx_unlock(&lun->ctl_softc->ctl_lock);
8867 
8868 	/*
8869 	 * We already have a pending error, return it.
8870 	 */
8871 	if (have_error != 0) {
8872 		/*
8873 		 * We report the SCSI status as OK, since the status of the
8874 		 * request sense command itself is OK.
8875 		 */
8876 		ctsio->scsi_status = SCSI_STATUS_OK;
8877 
8878 		/*
8879 		 * We report 0 for the sense length, because we aren't doing
8880 		 * autosense in this case.  We're reporting sense as
8881 		 * parameter data.
8882 		 */
8883 		ctsio->sense_len = 0;
8884 
8885 		ctsio->be_move_done = ctl_config_move_done;
8886 		ctl_datamove((union ctl_io *)ctsio);
8887 
8888 		return (CTL_RETVAL_COMPLETE);
8889 	}
8890 
8891 no_sense:
8892 
8893 	/*
8894 	 * No sense information to report, so we report that everything is
8895 	 * okay.
8896 	 */
8897 	ctl_set_sense_data(sense_ptr,
8898 			   lun,
8899 			   sense_format,
8900 			   /*current_error*/ 1,
8901 			   /*sense_key*/ SSD_KEY_NO_SENSE,
8902 			   /*asc*/ 0x00,
8903 			   /*ascq*/ 0x00,
8904 			   SSD_ELEM_NONE);
8905 
8906 	ctsio->scsi_status = SCSI_STATUS_OK;
8907 
8908 	/*
8909 	 * We report 0 for the sense length, because we aren't doing
8910 	 * autosense in this case.  We're reporting sense as parameter data.
8911 	 */
8912 	ctsio->sense_len = 0;
8913 	ctsio->be_move_done = ctl_config_move_done;
8914 	ctl_datamove((union ctl_io *)ctsio);
8915 
8916 	return (CTL_RETVAL_COMPLETE);
8917 }
8918 
8919 int
8920 ctl_tur(struct ctl_scsiio *ctsio)
8921 {
8922 	struct ctl_lun *lun;
8923 
8924 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
8925 
8926 	CTL_DEBUG_PRINT(("ctl_tur\n"));
8927 
8928 	if (lun == NULL)
8929 		return (-EINVAL);
8930 
8931 	ctsio->scsi_status = SCSI_STATUS_OK;
8932 	ctsio->io_hdr.status = CTL_SUCCESS;
8933 
8934 	ctl_done((union ctl_io *)ctsio);
8935 
8936 	return (CTL_RETVAL_COMPLETE);
8937 }
8938 
8939 #ifdef notyet
8940 static int
8941 ctl_cmddt_inquiry(struct ctl_scsiio *ctsio)
8942 {
8943 
8944 }
8945 #endif
8946 
8947 static int
8948 ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len)
8949 {
8950 	struct scsi_vpd_supported_pages *pages;
8951 	int sup_page_size;
8952 	struct ctl_lun *lun;
8953 
8954 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
8955 
8956 	sup_page_size = sizeof(struct scsi_vpd_supported_pages) +
8957 		SCSI_EVPD_NUM_SUPPORTED_PAGES;
8958 	/*
8959 	 * XXX KDM GFP_???  We probably don't want to wait here,
8960 	 * unless we end up having a process/thread context.
8961 	 */
8962 	ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO);
8963 	if (ctsio->kern_data_ptr == NULL) {
8964 		ctsio->io_hdr.status = CTL_SCSI_ERROR;
8965 		ctsio->scsi_status = SCSI_STATUS_BUSY;
8966 		ctl_done((union ctl_io *)ctsio);
8967 		return (CTL_RETVAL_COMPLETE);
8968 	}
8969 	pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr;
8970 	ctsio->kern_sg_entries = 0;
8971 
8972 	if (sup_page_size < alloc_len) {
8973 		ctsio->residual = alloc_len - sup_page_size;
8974 		ctsio->kern_data_len = sup_page_size;
8975 		ctsio->kern_total_len = sup_page_size;
8976 	} else {
8977 		ctsio->residual = 0;
8978 		ctsio->kern_data_len = alloc_len;
8979 		ctsio->kern_total_len = alloc_len;
8980 	}
8981 	ctsio->kern_data_resid = 0;
8982 	ctsio->kern_rel_offset = 0;
8983 	ctsio->kern_sg_entries = 0;
8984 
8985 	/*
8986 	 * The control device is always connected.  The disk device, on the
8987 	 * other hand, may not be online all the time.  Need to change this
8988 	 * to figure out whether the disk device is actually online or not.
8989 	 */
8990 	if (lun != NULL)
8991 		pages->device = (SID_QUAL_LU_CONNECTED << 5) |
8992 				lun->be_lun->lun_type;
8993 	else
8994 		pages->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
8995 
8996 	pages->length = SCSI_EVPD_NUM_SUPPORTED_PAGES;
8997 	/* Supported VPD pages */
8998 	pages->page_list[0] = SVPD_SUPPORTED_PAGES;
8999 	/* Serial Number */
9000 	pages->page_list[1] = SVPD_UNIT_SERIAL_NUMBER;
9001 	/* Device Identification */
9002 	pages->page_list[2] = SVPD_DEVICE_ID;
9003 
9004 	ctsio->scsi_status = SCSI_STATUS_OK;
9005 
9006 	ctsio->be_move_done = ctl_config_move_done;
9007 	ctl_datamove((union ctl_io *)ctsio);
9008 
9009 	return (CTL_RETVAL_COMPLETE);
9010 }
9011 
9012 static int
9013 ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len)
9014 {
9015 	struct scsi_vpd_unit_serial_number *sn_ptr;
9016 	struct ctl_lun *lun;
9017 #ifndef CTL_USE_BACKEND_SN
9018 	char tmpstr[32];
9019 #endif
9020 
9021 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9022 
9023 	/* XXX KDM which malloc flags here?? */
9024 	ctsio->kern_data_ptr = malloc(sizeof(*sn_ptr), M_CTL, M_WAITOK | M_ZERO);
9025 	if (ctsio->kern_data_ptr == NULL) {
9026 		ctsio->io_hdr.status = CTL_SCSI_ERROR;
9027 		ctsio->scsi_status = SCSI_STATUS_BUSY;
9028 		ctl_done((union ctl_io *)ctsio);
9029 		return (CTL_RETVAL_COMPLETE);
9030 	}
9031 	sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr;
9032 	ctsio->kern_sg_entries = 0;
9033 
9034 	if (sizeof(*sn_ptr) < alloc_len) {
9035 		ctsio->residual = alloc_len - sizeof(*sn_ptr);
9036 		ctsio->kern_data_len = sizeof(*sn_ptr);
9037 		ctsio->kern_total_len = sizeof(*sn_ptr);
9038 	} else {
9039 		ctsio->residual = 0;
9040 		ctsio->kern_data_len = alloc_len;
9041 		ctsio->kern_total_len = alloc_len;
9042 	}
9043 	ctsio->kern_data_resid = 0;
9044 	ctsio->kern_rel_offset = 0;
9045 	ctsio->kern_sg_entries = 0;
9046 
9047 	/*
9048 	 * The control device is always connected.  The disk device, on the
9049 	 * other hand, may not be online all the time.  Need to change this
9050 	 * to figure out whether the disk device is actually online or not.
9051 	 */
9052 	if (lun != NULL)
9053 		sn_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
9054 				  lun->be_lun->lun_type;
9055 	else
9056 		sn_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9057 
9058 	sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER;
9059 	sn_ptr->length = ctl_min(sizeof(*sn_ptr) - 4, CTL_SN_LEN);
9060 #ifdef CTL_USE_BACKEND_SN
9061 	/*
9062 	 * If we don't have a LUN, we just leave the serial number as
9063 	 * all spaces.
9064 	 */
9065 	memset(sn_ptr->serial_num, 0x20, sizeof(sn_ptr->serial_num));
9066 	if (lun != NULL) {
9067 		strncpy((char *)sn_ptr->serial_num,
9068 			(char *)lun->be_lun->serial_num, CTL_SN_LEN);
9069 	}
9070 #else
9071 	/*
9072 	 * Note that we're using a non-unique serial number here,
9073 	 */
9074 	snprintf(tmpstr, sizeof(tmpstr), "MYSERIALNUMIS000");
9075 	memset(sn_ptr->serial_num, 0x20, sizeof(sn_ptr->serial_num));
9076 	strncpy(sn_ptr->serial_num, tmpstr, ctl_min(CTL_SN_LEN,
9077 		ctl_min(sizeof(tmpstr), sizeof(*sn_ptr) - 4)));
9078 #endif
9079 	ctsio->scsi_status = SCSI_STATUS_OK;
9080 
9081 	ctsio->be_move_done = ctl_config_move_done;
9082 	ctl_datamove((union ctl_io *)ctsio);
9083 
9084 	return (CTL_RETVAL_COMPLETE);
9085 }
9086 
9087 
9088 static int
9089 ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len)
9090 {
9091 	struct scsi_vpd_device_id *devid_ptr;
9092 	struct scsi_vpd_id_descriptor *desc, *desc1;
9093 	struct scsi_vpd_id_descriptor *desc2, *desc3; /* for types 4h and 5h */
9094 	struct scsi_vpd_id_t10 *t10id;
9095 	struct ctl_softc *ctl_softc;
9096 	struct ctl_lun *lun;
9097 	struct ctl_frontend *fe;
9098 #ifndef CTL_USE_BACKEND_SN
9099 	char tmpstr[32];
9100 #endif /* CTL_USE_BACKEND_SN */
9101 	int devid_len;
9102 
9103 	ctl_softc = control_softc;
9104 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9105 
9106 	devid_len = sizeof(struct scsi_vpd_device_id) +
9107 		sizeof(struct scsi_vpd_id_descriptor) +
9108 		sizeof(struct scsi_vpd_id_t10) + CTL_DEVID_LEN +
9109 		sizeof(struct scsi_vpd_id_descriptor) + CTL_WWPN_LEN +
9110 		sizeof(struct scsi_vpd_id_descriptor) +
9111 		sizeof(struct scsi_vpd_id_rel_trgt_port_id) +
9112 		sizeof(struct scsi_vpd_id_descriptor) +
9113 		sizeof(struct scsi_vpd_id_trgt_port_grp_id);
9114 
9115 	/* XXX KDM which malloc flags here ?? */
9116 	ctsio->kern_data_ptr = malloc(devid_len, M_CTL, M_WAITOK | M_ZERO);
9117 	if (ctsio->kern_data_ptr == NULL) {
9118 		ctsio->io_hdr.status = CTL_SCSI_ERROR;
9119 		ctsio->scsi_status = SCSI_STATUS_BUSY;
9120 		ctl_done((union ctl_io *)ctsio);
9121 		return (CTL_RETVAL_COMPLETE);
9122 	}
9123 	devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr;
9124 	ctsio->kern_sg_entries = 0;
9125 
9126 	if (devid_len < alloc_len) {
9127 		ctsio->residual = alloc_len - devid_len;
9128 		ctsio->kern_data_len = devid_len;
9129 		ctsio->kern_total_len = devid_len;
9130 	} else {
9131 		ctsio->residual = 0;
9132 		ctsio->kern_data_len = alloc_len;
9133 		ctsio->kern_total_len = alloc_len;
9134 	}
9135 	ctsio->kern_data_resid = 0;
9136 	ctsio->kern_rel_offset = 0;
9137 	ctsio->kern_sg_entries = 0;
9138 
9139 	desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list;
9140 	t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0];
9141 	desc1 = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
9142 		sizeof(struct scsi_vpd_id_t10) + CTL_DEVID_LEN);
9143 	desc2 = (struct scsi_vpd_id_descriptor *)(&desc1->identifier[0] +
9144 	          CTL_WWPN_LEN);
9145 	desc3 = (struct scsi_vpd_id_descriptor *)(&desc2->identifier[0] +
9146 	         sizeof(struct scsi_vpd_id_rel_trgt_port_id));
9147 
9148 	/*
9149 	 * The control device is always connected.  The disk device, on the
9150 	 * other hand, may not be online all the time.
9151 	 */
9152 	if (lun != NULL)
9153 		devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
9154 				     lun->be_lun->lun_type;
9155 	else
9156 		devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9157 
9158 	devid_ptr->page_code = SVPD_DEVICE_ID;
9159 
9160 	scsi_ulto2b(devid_len - 4, devid_ptr->length);
9161 
9162 	mtx_lock(&ctl_softc->ctl_lock);
9163 
9164 	fe = ctl_softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)];
9165 
9166 	/*
9167 	 * For Fibre channel,
9168 	 */
9169 	if (fe->port_type == CTL_PORT_FC)
9170 	{
9171 		desc->proto_codeset = (SCSI_PROTO_FC << 4) |
9172 				      SVPD_ID_CODESET_ASCII;
9173         	desc1->proto_codeset = (SCSI_PROTO_FC << 4) |
9174 		              SVPD_ID_CODESET_BINARY;
9175 	}
9176 	else
9177 	{
9178 		desc->proto_codeset = (SCSI_PROTO_SPI << 4) |
9179 				      SVPD_ID_CODESET_ASCII;
9180         	desc1->proto_codeset = (SCSI_PROTO_SPI << 4) |
9181 		              SVPD_ID_CODESET_BINARY;
9182 	}
9183 	desc2->proto_codeset = desc3->proto_codeset = desc1->proto_codeset;
9184 	mtx_unlock(&ctl_softc->ctl_lock);
9185 
9186 	/*
9187 	 * We're using a LUN association here.  i.e., this device ID is a
9188 	 * per-LUN identifier.
9189 	 */
9190 	desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10;
9191 	desc->length = sizeof(*t10id) + CTL_DEVID_LEN;
9192 	strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor));
9193 
9194 	/*
9195 	 * desc1 is for the WWPN which is a port asscociation.
9196 	 */
9197 	desc1->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | SVPD_ID_TYPE_NAA;
9198 	desc1->length = CTL_WWPN_LEN;
9199 	/* XXX Call Reggie's get_WWNN func here then add port # to the end */
9200 	/* For testing just create the WWPN */
9201 #if 0
9202 	ddb_GetWWNN((char *)desc1->identifier);
9203 
9204 	/* NOTE: if the port is 0 or 8 we don't want to subtract 1 */
9205 	/* This is so Copancontrol will return something sane */
9206 	if (ctsio->io_hdr.nexus.targ_port!=0 &&
9207 	    ctsio->io_hdr.nexus.targ_port!=8)
9208 		desc1->identifier[7] += ctsio->io_hdr.nexus.targ_port-1;
9209 	else
9210 		desc1->identifier[7] += ctsio->io_hdr.nexus.targ_port;
9211 #endif
9212 
9213 	be64enc(desc1->identifier, fe->wwpn);
9214 
9215 	/*
9216 	 * desc2 is for the Relative Target Port(type 4h) identifier
9217 	 */
9218 	desc2->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT
9219 	                 | SVPD_ID_TYPE_RELTARG;
9220 	desc2->length = 4;
9221 //#if 0
9222 	/* NOTE: if the port is 0 or 8 we don't want to subtract 1 */
9223 	/* This is so Copancontrol will return something sane */
9224 	if (ctsio->io_hdr.nexus.targ_port!=0 &&
9225 	    ctsio->io_hdr.nexus.targ_port!=8)
9226 		desc2->identifier[3] = ctsio->io_hdr.nexus.targ_port - 1;
9227 	else
9228 	        desc2->identifier[3] = ctsio->io_hdr.nexus.targ_port;
9229 //#endif
9230 
9231 	/*
9232 	 * desc3 is for the Target Port Group(type 5h) identifier
9233 	 */
9234 	desc3->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT
9235 	                 | SVPD_ID_TYPE_TPORTGRP;
9236 	desc3->length = 4;
9237 	if (ctsio->io_hdr.nexus.targ_port < CTL_MAX_PORTS || ctl_is_single)
9238 		desc3->identifier[3] = 1;
9239 	else
9240 		desc3->identifier[3] = 2;
9241 
9242 #ifdef CTL_USE_BACKEND_SN
9243 	/*
9244 	 * If we've actually got a backend, copy the device id from the
9245 	 * per-LUN data.  Otherwise, set it to all spaces.
9246 	 */
9247 	if (lun != NULL) {
9248 		/*
9249 		 * Copy the backend's LUN ID.
9250 		 */
9251 		strncpy((char *)t10id->vendor_spec_id,
9252 			(char *)lun->be_lun->device_id, CTL_DEVID_LEN);
9253 	} else {
9254 		/*
9255 		 * No backend, set this to spaces.
9256 		 */
9257 		memset(t10id->vendor_spec_id, 0x20, CTL_DEVID_LEN);
9258 	}
9259 #else
9260 	snprintf(tmpstr, sizeof(tmpstr), "MYDEVICEIDIS%4d",
9261 		 (lun != NULL) ?  (int)lun->lun : 0);
9262 	strncpy(t10id->vendor_spec_id, tmpstr, ctl_min(CTL_DEVID_LEN,
9263 		sizeof(tmpstr)));
9264 #endif
9265 
9266 	ctsio->scsi_status = SCSI_STATUS_OK;
9267 
9268 	ctsio->be_move_done = ctl_config_move_done;
9269 	ctl_datamove((union ctl_io *)ctsio);
9270 
9271 	return (CTL_RETVAL_COMPLETE);
9272 }
9273 
9274 static int
9275 ctl_inquiry_evpd(struct ctl_scsiio *ctsio)
9276 {
9277 	struct scsi_inquiry *cdb;
9278 	int alloc_len, retval;
9279 
9280 	cdb = (struct scsi_inquiry *)ctsio->cdb;
9281 
9282 	retval = CTL_RETVAL_COMPLETE;
9283 
9284 	alloc_len = scsi_2btoul(cdb->length);
9285 
9286 	switch (cdb->page_code) {
9287 	case SVPD_SUPPORTED_PAGES:
9288 		retval = ctl_inquiry_evpd_supported(ctsio, alloc_len);
9289 		break;
9290 	case SVPD_UNIT_SERIAL_NUMBER:
9291 		retval = ctl_inquiry_evpd_serial(ctsio, alloc_len);
9292 		break;
9293 	case SVPD_DEVICE_ID:
9294 		retval = ctl_inquiry_evpd_devid(ctsio, alloc_len);
9295 		break;
9296 	default:
9297 		ctl_set_invalid_field(ctsio,
9298 				      /*sks_valid*/ 1,
9299 				      /*command*/ 1,
9300 				      /*field*/ 2,
9301 				      /*bit_valid*/ 0,
9302 				      /*bit*/ 0);
9303 		ctl_done((union ctl_io *)ctsio);
9304 		retval = CTL_RETVAL_COMPLETE;
9305 		break;
9306 	}
9307 
9308 	return (retval);
9309 }
9310 
9311 static int
9312 ctl_inquiry_std(struct ctl_scsiio *ctsio)
9313 {
9314 	struct scsi_inquiry_data *inq_ptr;
9315 	struct scsi_inquiry *cdb;
9316 	struct ctl_softc *ctl_softc;
9317 	struct ctl_lun *lun;
9318 	uint32_t alloc_len;
9319 	int is_fc;
9320 
9321 	ctl_softc = control_softc;
9322 
9323 	/*
9324 	 * Figure out whether we're talking to a Fibre Channel port or not.
9325 	 * We treat the ioctl front end, and any SCSI adapters, as packetized
9326 	 * SCSI front ends.
9327 	 */
9328 	mtx_lock(&ctl_softc->ctl_lock);
9329 	if (ctl_softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)]->port_type !=
9330 	    CTL_PORT_FC)
9331 		is_fc = 0;
9332 	else
9333 		is_fc = 1;
9334 	mtx_unlock(&ctl_softc->ctl_lock);
9335 
9336 	lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9337 	cdb = (struct scsi_inquiry *)ctsio->cdb;
9338 	alloc_len = scsi_2btoul(cdb->length);
9339 
9340 	/*
9341 	 * We malloc the full inquiry data size here and fill it
9342 	 * in.  If the user only asks for less, we'll give him
9343 	 * that much.
9344 	 */
9345 	/* XXX KDM what malloc flags should we use here?? */
9346 	ctsio->kern_data_ptr = malloc(sizeof(*inq_ptr), M_CTL, M_WAITOK | M_ZERO);
9347 	if (ctsio->kern_data_ptr == NULL) {
9348 		ctsio->io_hdr.status = CTL_SCSI_ERROR;
9349 		ctsio->scsi_status = SCSI_STATUS_BUSY;
9350 		ctl_done((union ctl_io *)ctsio);
9351 		return (CTL_RETVAL_COMPLETE);
9352 	}
9353 	inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr;
9354 	ctsio->kern_sg_entries = 0;
9355 	ctsio->kern_data_resid = 0;
9356 	ctsio->kern_rel_offset = 0;
9357 
9358 	if (sizeof(*inq_ptr) < alloc_len) {
9359 		ctsio->residual = alloc_len - sizeof(*inq_ptr);
9360 		ctsio->kern_data_len = sizeof(*inq_ptr);
9361 		ctsio->kern_total_len = sizeof(*inq_ptr);
9362 	} else {
9363 		ctsio->residual = 0;
9364 		ctsio->kern_data_len = alloc_len;
9365 		ctsio->kern_total_len = alloc_len;
9366 	}
9367 
9368 	/*
9369 	 * If we have a LUN configured, report it as connected.  Otherwise,
9370 	 * report that it is offline or no device is supported, depending
9371 	 * on the value of inquiry_pq_no_lun.
9372 	 *
9373 	 * According to the spec (SPC-4 r34), the peripheral qualifier
9374 	 * SID_QUAL_LU_OFFLINE (001b) is used in the following scenario:
9375 	 *
9376 	 * "A peripheral device having the specified peripheral device type
9377 	 * is not connected to this logical unit. However, the device
9378 	 * server is capable of supporting the specified peripheral device
9379 	 * type on this logical unit."
9380 	 *
9381 	 * According to the same spec, the peripheral qualifier
9382 	 * SID_QUAL_BAD_LU (011b) is used in this scenario:
9383 	 *
9384 	 * "The device server is not capable of supporting a peripheral
9385 	 * device on this logical unit. For this peripheral qualifier the
9386 	 * peripheral device type shall be set to 1Fh. All other peripheral
9387 	 * device type values are reserved for this peripheral qualifier."
9388 	 *
9389 	 * Given the text, it would seem that we probably want to report that
9390 	 * the LUN is offline here.  There is no LUN connected, but we can
9391 	 * support a LUN at the given LUN number.
9392 	 *
9393 	 * In the real world, though, it sounds like things are a little
9394 	 * different:
9395 	 *
9396 	 * - Linux, when presented with a LUN with the offline peripheral
9397 	 *   qualifier, will create an sg driver instance for it.  So when
9398 	 *   you attach it to CTL, you wind up with a ton of sg driver
9399 	 *   instances.  (One for every LUN that Linux bothered to probe.)
9400 	 *   Linux does this despite the fact that it issues a REPORT LUNs
9401 	 *   to LUN 0 to get the inventory of supported LUNs.
9402 	 *
9403 	 * - There is other anecdotal evidence (from Emulex folks) about
9404 	 *   arrays that use the offline peripheral qualifier for LUNs that
9405 	 *   are on the "passive" path in an active/passive array.
9406 	 *
9407 	 * So the solution is provide a hopefully reasonable default
9408 	 * (return bad/no LUN) and allow the user to change the behavior
9409 	 * with a tunable/sysctl variable.
9410 	 */
9411 	if (lun != NULL)
9412 		inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
9413 				  lun->be_lun->lun_type;
9414 	else if (ctl_softc->inquiry_pq_no_lun == 0)
9415 		inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9416 	else
9417 		inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE;
9418 
9419 	/* RMB in byte 2 is 0 */
9420 	inq_ptr->version = SCSI_REV_SPC3;
9421 
9422 	/*
9423 	 * According to SAM-3, even if a device only supports a single
9424 	 * level of LUN addressing, it should still set the HISUP bit:
9425 	 *
9426 	 * 4.9.1 Logical unit numbers overview
9427 	 *
9428 	 * All logical unit number formats described in this standard are
9429 	 * hierarchical in structure even when only a single level in that
9430 	 * hierarchy is used. The HISUP bit shall be set to one in the
9431 	 * standard INQUIRY data (see SPC-2) when any logical unit number
9432 	 * format described in this standard is used.  Non-hierarchical
9433 	 * formats are outside the scope of this standard.
9434 	 *
9435 	 * Therefore we set the HiSup bit here.
9436 	 *
9437 	 * The reponse format is 2, per SPC-3.
9438 	 */
9439 	inq_ptr->response_format = SID_HiSup | 2;
9440 
9441 	inq_ptr->additional_length = sizeof(*inq_ptr) - 4;
9442 	CTL_DEBUG_PRINT(("additional_length = %d\n",
9443 			 inq_ptr->additional_length));
9444 
9445 	inq_ptr->spc3_flags = SPC3_SID_TPGS_IMPLICIT;
9446 	/* 16 bit addressing */
9447 	if (is_fc == 0)
9448 		inq_ptr->spc2_flags = SPC2_SID_ADDR16;
9449 	/* XXX set the SID_MultiP bit here if we're actually going to
9450 	   respond on multiple ports */
9451 	inq_ptr->spc2_flags |= SPC2_SID_MultiP;
9452 
9453 	/* 16 bit data bus, synchronous transfers */
9454 	/* XXX these flags don't apply for FC */
9455 	if (is_fc == 0)
9456 		inq_ptr->flags = SID_WBus16 | SID_Sync;
9457 	/*
9458 	 * XXX KDM do we want to support tagged queueing on the control
9459 	 * device at all?
9460 	 */
9461 	if ((lun == NULL)
9462 	 || (lun->be_lun->lun_type != T_PROCESSOR))
9463 		inq_ptr->flags |= SID_CmdQue;
9464 	/*
9465 	 * Per SPC-3, unused bytes in ASCII strings are filled with spaces.
9466 	 * We have 8 bytes for the vendor name, and 16 bytes for the device
9467 	 * name and 4 bytes for the revision.
9468 	 */
9469 	strncpy(inq_ptr->vendor, CTL_VENDOR, sizeof(inq_ptr->vendor));
9470 	if (lun == NULL) {
9471 		strcpy(inq_ptr->product, CTL_DIRECT_PRODUCT);
9472 	} else {
9473 		switch (lun->be_lun->lun_type) {
9474 		case T_DIRECT:
9475 			strcpy(inq_ptr->product, CTL_DIRECT_PRODUCT);
9476 			break;
9477 		case T_PROCESSOR:
9478 			strcpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT);
9479 			break;
9480 		default:
9481 			strcpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT);
9482 			break;
9483 		}
9484 	}
9485 
9486 	/*
9487 	 * XXX make this a macro somewhere so it automatically gets
9488 	 * incremented when we make changes.
9489 	 */
9490 	strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision));
9491 
9492 	/*
9493 	 * For parallel SCSI, we support double transition and single
9494 	 * transition clocking.  We also support QAS (Quick Arbitration
9495 	 * and Selection) and Information Unit transfers on both the
9496 	 * control and array devices.
9497 	 */
9498 	if (is_fc == 0)
9499 		inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS |
9500 				    SID_SPI_IUS;
9501 
9502 	/* SAM-3 */
9503 	scsi_ulto2b(0x0060, inq_ptr->version1);
9504 	/* SPC-3 (no version claimed) XXX should we claim a version? */
9505 	scsi_ulto2b(0x0300, inq_ptr->version2);
9506 	if (is_fc) {
9507 		/* FCP-2 ANSI INCITS.350:2003 */
9508 		scsi_ulto2b(0x0917, inq_ptr->version3);
9509 	} else {
9510 		/* SPI-4 ANSI INCITS.362:200x */
9511 		scsi_ulto2b(0x0B56, inq_ptr->version3);
9512 	}
9513 
9514 	if (lun == NULL) {
9515 		/* SBC-2 (no version claimed) XXX should we claim a version? */
9516 		scsi_ulto2b(0x0320, inq_ptr->version4);
9517 	} else {
9518 		switch (lun->be_lun->lun_type) {
9519 		case T_DIRECT:
9520 			/*
9521 			 * SBC-2 (no version claimed) XXX should we claim a
9522 			 * version?
9523 			 */
9524 			scsi_ulto2b(0x0320, inq_ptr->version4);
9525 			break;
9526 		case T_PROCESSOR:
9527 		default:
9528 			break;
9529 		}
9530 	}
9531 
9532 	ctsio->scsi_status = SCSI_STATUS_OK;
9533 	if (ctsio->kern_data_len > 0) {
9534 		ctsio->be_move_done = ctl_config_move_done;
9535 		ctl_datamove((union ctl_io *)ctsio);
9536 	} else {
9537 		ctsio->io_hdr.status = CTL_SUCCESS;
9538 		ctl_done((union ctl_io *)ctsio);
9539 	}
9540 
9541 	return (CTL_RETVAL_COMPLETE);
9542 }
9543 
9544 int
9545 ctl_inquiry(struct ctl_scsiio *ctsio)
9546 {
9547 	struct scsi_inquiry *cdb;
9548 	int retval;
9549 
9550 	cdb = (struct scsi_inquiry *)ctsio->cdb;
9551 
9552 	retval = 0;
9553 
9554 	CTL_DEBUG_PRINT(("ctl_inquiry\n"));
9555 
9556 	/*
9557 	 * Right now, we don't support the CmdDt inquiry information.
9558 	 * This would be nice to support in the future.  When we do
9559 	 * support it, we should change this test so that it checks to make
9560 	 * sure SI_EVPD and SI_CMDDT aren't both set at the same time.
9561 	 */
9562 #ifdef notyet
9563 	if (((cdb->byte2 & SI_EVPD)
9564 	 && (cdb->byte2 & SI_CMDDT)))
9565 #endif
9566 	if (cdb->byte2 & SI_CMDDT) {
9567 		/*
9568 		 * Point to the SI_CMDDT bit.  We might change this
9569 		 * when we support SI_CMDDT, but since both bits would be
9570 		 * "wrong", this should probably just stay as-is then.
9571 		 */
9572 		ctl_set_invalid_field(ctsio,
9573 				      /*sks_valid*/ 1,
9574 				      /*command*/ 1,
9575 				      /*field*/ 1,
9576 				      /*bit_valid*/ 1,
9577 				      /*bit*/ 1);
9578 		ctl_done((union ctl_io *)ctsio);
9579 		return (CTL_RETVAL_COMPLETE);
9580 	}
9581 	if (cdb->byte2 & SI_EVPD)
9582 		retval = ctl_inquiry_evpd(ctsio);
9583 #ifdef notyet
9584 	else if (cdb->byte2 & SI_CMDDT)
9585 		retval = ctl_inquiry_cmddt(ctsio);
9586 #endif
9587 	else
9588 		retval = ctl_inquiry_std(ctsio);
9589 
9590 	return (retval);
9591 }
9592 
9593 /*
9594  * For known CDB types, parse the LBA and length.
9595  */
9596 static int
9597 ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint32_t *len)
9598 {
9599 	if (io->io_hdr.io_type != CTL_IO_SCSI)
9600 		return (1);
9601 
9602 	switch (io->scsiio.cdb[0]) {
9603 	case READ_6:
9604 	case WRITE_6: {
9605 		struct scsi_rw_6 *cdb;
9606 
9607 		cdb = (struct scsi_rw_6 *)io->scsiio.cdb;
9608 
9609 		*lba = scsi_3btoul(cdb->addr);
9610 		/* only 5 bits are valid in the most significant address byte */
9611 		*lba &= 0x1fffff;
9612 		*len = cdb->length;
9613 		break;
9614 	}
9615 	case READ_10:
9616 	case WRITE_10: {
9617 		struct scsi_rw_10 *cdb;
9618 
9619 		cdb = (struct scsi_rw_10 *)io->scsiio.cdb;
9620 
9621 		*lba = scsi_4btoul(cdb->addr);
9622 		*len = scsi_2btoul(cdb->length);
9623 		break;
9624 	}
9625 	case WRITE_VERIFY_10: {
9626 		struct scsi_write_verify_10 *cdb;
9627 
9628 		cdb = (struct scsi_write_verify_10 *)io->scsiio.cdb;
9629 
9630 		*lba = scsi_4btoul(cdb->addr);
9631 		*len = scsi_2btoul(cdb->length);
9632 		break;
9633 	}
9634 	case READ_12:
9635 	case WRITE_12: {
9636 		struct scsi_rw_12 *cdb;
9637 
9638 		cdb = (struct scsi_rw_12 *)io->scsiio.cdb;
9639 
9640 		*lba = scsi_4btoul(cdb->addr);
9641 		*len = scsi_4btoul(cdb->length);
9642 		break;
9643 	}
9644 	case WRITE_VERIFY_12: {
9645 		struct scsi_write_verify_12 *cdb;
9646 
9647 		cdb = (struct scsi_write_verify_12 *)io->scsiio.cdb;
9648 
9649 		*lba = scsi_4btoul(cdb->addr);
9650 		*len = scsi_4btoul(cdb->length);
9651 		break;
9652 	}
9653 	case READ_16:
9654 	case WRITE_16: {
9655 		struct scsi_rw_16 *cdb;
9656 
9657 		cdb = (struct scsi_rw_16 *)io->scsiio.cdb;
9658 
9659 		*lba = scsi_8btou64(cdb->addr);
9660 		*len = scsi_4btoul(cdb->length);
9661 		break;
9662 	}
9663 	case WRITE_VERIFY_16: {
9664 		struct scsi_write_verify_16 *cdb;
9665 
9666 		cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb;
9667 
9668 
9669 		*lba = scsi_8btou64(cdb->addr);
9670 		*len = scsi_4btoul(cdb->length);
9671 		break;
9672 	}
9673 	default:
9674 		return (1);
9675 		break; /* NOTREACHED */
9676 	}
9677 
9678 	return (0);
9679 }
9680 
9681 static ctl_action
9682 ctl_extent_check_lba(uint64_t lba1, uint32_t len1, uint64_t lba2, uint32_t len2)
9683 {
9684 	uint64_t endlba1, endlba2;
9685 
9686 	endlba1 = lba1 + len1 - 1;
9687 	endlba2 = lba2 + len2 - 1;
9688 
9689 	if ((endlba1 < lba2)
9690 	 || (endlba2 < lba1))
9691 		return (CTL_ACTION_PASS);
9692 	else
9693 		return (CTL_ACTION_BLOCK);
9694 }
9695 
9696 static ctl_action
9697 ctl_extent_check(union ctl_io *io1, union ctl_io *io2)
9698 {
9699 	uint64_t lba1, lba2;
9700 	uint32_t len1, len2;
9701 	int retval;
9702 
9703 	retval = ctl_get_lba_len(io1, &lba1, &len1);
9704 	if (retval != 0)
9705 		return (CTL_ACTION_ERROR);
9706 
9707 	retval = ctl_get_lba_len(io2, &lba2, &len2);
9708 	if (retval != 0)
9709 		return (CTL_ACTION_ERROR);
9710 
9711 	return (ctl_extent_check_lba(lba1, len1, lba2, len2));
9712 }
9713 
9714 static ctl_action
9715 ctl_check_for_blockage(union ctl_io *pending_io, union ctl_io *ooa_io)
9716 {
9717 	struct ctl_cmd_entry *pending_entry, *ooa_entry;
9718 	ctl_serialize_action *serialize_row;
9719 
9720 	/*
9721 	 * The initiator attempted multiple untagged commands at the same
9722 	 * time.  Can't do that.
9723 	 */
9724 	if ((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED)
9725 	 && (ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED)
9726 	 && ((pending_io->io_hdr.nexus.targ_port ==
9727 	      ooa_io->io_hdr.nexus.targ_port)
9728 	  && (pending_io->io_hdr.nexus.initid.id ==
9729 	      ooa_io->io_hdr.nexus.initid.id))
9730 	 && ((ooa_io->io_hdr.flags & CTL_FLAG_ABORT) == 0))
9731 		return (CTL_ACTION_OVERLAP);
9732 
9733 	/*
9734 	 * The initiator attempted to send multiple tagged commands with
9735 	 * the same ID.  (It's fine if different initiators have the same
9736 	 * tag ID.)
9737 	 *
9738 	 * Even if all of those conditions are true, we don't kill the I/O
9739 	 * if the command ahead of us has been aborted.  We won't end up
9740 	 * sending it to the FETD, and it's perfectly legal to resend a
9741 	 * command with the same tag number as long as the previous
9742 	 * instance of this tag number has been aborted somehow.
9743 	 */
9744 	if ((pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED)
9745 	 && (ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED)
9746 	 && (pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num)
9747 	 && ((pending_io->io_hdr.nexus.targ_port ==
9748 	      ooa_io->io_hdr.nexus.targ_port)
9749 	  && (pending_io->io_hdr.nexus.initid.id ==
9750 	      ooa_io->io_hdr.nexus.initid.id))
9751 	 && ((ooa_io->io_hdr.flags & CTL_FLAG_ABORT) == 0))
9752 		return (CTL_ACTION_OVERLAP_TAG);
9753 
9754 	/*
9755 	 * If we get a head of queue tag, SAM-3 says that we should
9756 	 * immediately execute it.
9757 	 *
9758 	 * What happens if this command would normally block for some other
9759 	 * reason?  e.g. a request sense with a head of queue tag
9760 	 * immediately after a write.  Normally that would block, but this
9761 	 * will result in its getting executed immediately...
9762 	 *
9763 	 * We currently return "pass" instead of "skip", so we'll end up
9764 	 * going through the rest of the queue to check for overlapped tags.
9765 	 *
9766 	 * XXX KDM check for other types of blockage first??
9767 	 */
9768 	if (pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)
9769 		return (CTL_ACTION_PASS);
9770 
9771 	/*
9772 	 * Ordered tags have to block until all items ahead of them
9773 	 * have completed.  If we get called with an ordered tag, we always
9774 	 * block, if something else is ahead of us in the queue.
9775 	 */
9776 	if (pending_io->scsiio.tag_type == CTL_TAG_ORDERED)
9777 		return (CTL_ACTION_BLOCK);
9778 
9779 	/*
9780 	 * Simple tags get blocked until all head of queue and ordered tags
9781 	 * ahead of them have completed.  I'm lumping untagged commands in
9782 	 * with simple tags here.  XXX KDM is that the right thing to do?
9783 	 */
9784 	if (((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED)
9785 	  || (pending_io->scsiio.tag_type == CTL_TAG_SIMPLE))
9786 	 && ((ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)
9787 	  || (ooa_io->scsiio.tag_type == CTL_TAG_ORDERED)))
9788 		return (CTL_ACTION_BLOCK);
9789 
9790 	pending_entry = &ctl_cmd_table[pending_io->scsiio.cdb[0]];
9791 	ooa_entry = &ctl_cmd_table[ooa_io->scsiio.cdb[0]];
9792 
9793 	serialize_row = ctl_serialize_table[ooa_entry->seridx];
9794 
9795 	switch (serialize_row[pending_entry->seridx]) {
9796 	case CTL_SER_BLOCK:
9797 		return (CTL_ACTION_BLOCK);
9798 		break; /* NOTREACHED */
9799 	case CTL_SER_EXTENT:
9800 		return (ctl_extent_check(pending_io, ooa_io));
9801 		break; /* NOTREACHED */
9802 	case CTL_SER_PASS:
9803 		return (CTL_ACTION_PASS);
9804 		break; /* NOTREACHED */
9805 	case CTL_SER_SKIP:
9806 		return (CTL_ACTION_SKIP);
9807 		break;
9808 	default:
9809 		panic("invalid serialization value %d",
9810 		      serialize_row[pending_entry->seridx]);
9811 		break; /* NOTREACHED */
9812 	}
9813 
9814 	return (CTL_ACTION_ERROR);
9815 }
9816 
9817 /*
9818  * Check for blockage or overlaps against the OOA (Order Of Arrival) queue.
9819  * Assumptions:
9820  * - caller holds ctl_lock
9821  * - pending_io is generally either incoming, or on the blocked queue
9822  * - starting I/O is the I/O we want to start the check with.
9823  */
9824 static ctl_action
9825 ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io,
9826 	      union ctl_io *starting_io)
9827 {
9828 	union ctl_io *ooa_io;
9829 	ctl_action action;
9830 
9831 	/*
9832 	 * Run back along the OOA queue, starting with the current
9833 	 * blocked I/O and going through every I/O before it on the
9834 	 * queue.  If starting_io is NULL, we'll just end up returning
9835 	 * CTL_ACTION_PASS.
9836 	 */
9837 	for (ooa_io = starting_io; ooa_io != NULL;
9838 	     ooa_io = (union ctl_io *)TAILQ_PREV(&ooa_io->io_hdr, ctl_ooaq,
9839 	     ooa_links)){
9840 
9841 		/*
9842 		 * This routine just checks to see whether
9843 		 * cur_blocked is blocked by ooa_io, which is ahead
9844 		 * of it in the queue.  It doesn't queue/dequeue
9845 		 * cur_blocked.
9846 		 */
9847 		action = ctl_check_for_blockage(pending_io, ooa_io);
9848 		switch (action) {
9849 		case CTL_ACTION_BLOCK:
9850 		case CTL_ACTION_OVERLAP:
9851 		case CTL_ACTION_OVERLAP_TAG:
9852 		case CTL_ACTION_SKIP:
9853 		case CTL_ACTION_ERROR:
9854 			return (action);
9855 			break; /* NOTREACHED */
9856 		case CTL_ACTION_PASS:
9857 			break;
9858 		default:
9859 			panic("invalid action %d", action);
9860 			break;  /* NOTREACHED */
9861 		}
9862 	}
9863 
9864 	return (CTL_ACTION_PASS);
9865 }
9866 
9867 /*
9868  * Assumptions:
9869  * - An I/O has just completed, and has been removed from the per-LUN OOA
9870  *   queue, so some items on the blocked queue may now be unblocked.
9871  * - The caller holds ctl_softc->ctl_lock
9872  */
9873 static int
9874 ctl_check_blocked(struct ctl_lun *lun)
9875 {
9876 	union ctl_io *cur_blocked, *next_blocked;
9877 
9878 	/*
9879 	 * Run forward from the head of the blocked queue, checking each
9880 	 * entry against the I/Os prior to it on the OOA queue to see if
9881 	 * there is still any blockage.
9882 	 *
9883 	 * We cannot use the TAILQ_FOREACH() macro, because it can't deal
9884 	 * with our removing a variable on it while it is traversing the
9885 	 * list.
9886 	 */
9887 	for (cur_blocked = (union ctl_io *)TAILQ_FIRST(&lun->blocked_queue);
9888 	     cur_blocked != NULL; cur_blocked = next_blocked) {
9889 		union ctl_io *prev_ooa;
9890 		ctl_action action;
9891 
9892 		next_blocked = (union ctl_io *)TAILQ_NEXT(&cur_blocked->io_hdr,
9893 							  blocked_links);
9894 
9895 		prev_ooa = (union ctl_io *)TAILQ_PREV(&cur_blocked->io_hdr,
9896 						      ctl_ooaq, ooa_links);
9897 
9898 		/*
9899 		 * If cur_blocked happens to be the first item in the OOA
9900 		 * queue now, prev_ooa will be NULL, and the action
9901 		 * returned will just be CTL_ACTION_PASS.
9902 		 */
9903 		action = ctl_check_ooa(lun, cur_blocked, prev_ooa);
9904 
9905 		switch (action) {
9906 		case CTL_ACTION_BLOCK:
9907 			/* Nothing to do here, still blocked */
9908 			break;
9909 		case CTL_ACTION_OVERLAP:
9910 		case CTL_ACTION_OVERLAP_TAG:
9911 			/*
9912 			 * This shouldn't happen!  In theory we've already
9913 			 * checked this command for overlap...
9914 			 */
9915 			break;
9916 		case CTL_ACTION_PASS:
9917 		case CTL_ACTION_SKIP: {
9918 			struct ctl_softc *softc;
9919 			struct ctl_cmd_entry *entry;
9920 			uint32_t initidx;
9921 			uint8_t opcode;
9922 			int isc_retval;
9923 
9924 			/*
9925 			 * The skip case shouldn't happen, this transaction
9926 			 * should have never made it onto the blocked queue.
9927 			 */
9928 			/*
9929 			 * This I/O is no longer blocked, we can remove it
9930 			 * from the blocked queue.  Since this is a TAILQ
9931 			 * (doubly linked list), we can do O(1) removals
9932 			 * from any place on the list.
9933 			 */
9934 			TAILQ_REMOVE(&lun->blocked_queue, &cur_blocked->io_hdr,
9935 				     blocked_links);
9936 			cur_blocked->io_hdr.flags &= ~CTL_FLAG_BLOCKED;
9937 
9938 			if (cur_blocked->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC){
9939 				/*
9940 				 * Need to send IO back to original side to
9941 				 * run
9942 				 */
9943 				union ctl_ha_msg msg_info;
9944 
9945 				msg_info.hdr.original_sc =
9946 					cur_blocked->io_hdr.original_sc;
9947 				msg_info.hdr.serializing_sc = cur_blocked;
9948 				msg_info.hdr.msg_type = CTL_MSG_R2R;
9949 				if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
9950 				     &msg_info, sizeof(msg_info), 0)) >
9951 				     CTL_HA_STATUS_SUCCESS) {
9952 					printf("CTL:Check Blocked error from "
9953 					       "ctl_ha_msg_send %d\n",
9954 					       isc_retval);
9955 				}
9956 				break;
9957 			}
9958 			opcode = cur_blocked->scsiio.cdb[0];
9959 			entry = &ctl_cmd_table[opcode];
9960 			softc = control_softc;
9961 
9962 			initidx = ctl_get_initindex(&cur_blocked->io_hdr.nexus);
9963 
9964 			/*
9965 			 * Check this I/O for LUN state changes that may
9966 			 * have happened while this command was blocked.
9967 			 * The LUN state may have been changed by a command
9968 			 * ahead of us in the queue, so we need to re-check
9969 			 * for any states that can be caused by SCSI
9970 			 * commands.
9971 			 */
9972 			if (ctl_scsiio_lun_check(softc, lun, entry,
9973 						 &cur_blocked->scsiio) == 0) {
9974 				cur_blocked->io_hdr.flags |=
9975 				                      CTL_FLAG_IS_WAS_ON_RTR;
9976 				STAILQ_INSERT_TAIL(&lun->ctl_softc->rtr_queue,
9977 						   &cur_blocked->io_hdr, links);
9978 				/*
9979 				 * In the non CTL_DONE_THREAD case, we need
9980 				 * to wake up the work thread here.  When
9981 				 * we're processing completed requests from
9982 				 * the work thread context, we'll pop back
9983 				 * around and end up pulling things off the
9984 				 * RtR queue.  When we aren't processing
9985 				 * things from the work thread context,
9986 				 * though, we won't ever check the RtR queue.
9987 				 * So we need to wake up the thread to clear
9988 				 * things off the queue.  Otherwise this
9989 				 * transaction will just sit on the RtR queue
9990 				 * until a new I/O comes in.  (Which may or
9991 				 * may not happen...)
9992 				 */
9993 #ifndef CTL_DONE_THREAD
9994 				ctl_wakeup_thread();
9995 #endif
9996 			} else
9997 				ctl_done_lock(cur_blocked, /*have_lock*/ 1);
9998 			break;
9999 		}
10000 		default:
10001 			/*
10002 			 * This probably shouldn't happen -- we shouldn't
10003 			 * get CTL_ACTION_ERROR, or anything else.
10004 			 */
10005 			break;
10006 		}
10007 	}
10008 
10009 	return (CTL_RETVAL_COMPLETE);
10010 }
10011 
10012 /*
10013  * This routine (with one exception) checks LUN flags that can be set by
10014  * commands ahead of us in the OOA queue.  These flags have to be checked
10015  * when a command initially comes in, and when we pull a command off the
10016  * blocked queue and are preparing to execute it.  The reason we have to
10017  * check these flags for commands on the blocked queue is that the LUN
10018  * state may have been changed by a command ahead of us while we're on the
10019  * blocked queue.
10020  *
10021  * Ordering is somewhat important with these checks, so please pay
10022  * careful attention to the placement of any new checks.
10023  */
10024 static int
10025 ctl_scsiio_lun_check(struct ctl_softc *ctl_softc, struct ctl_lun *lun,
10026 		     struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio)
10027 {
10028 	int retval;
10029 
10030 	retval = 0;
10031 
10032 	/*
10033 	 * If this shelf is a secondary shelf controller, we have to reject
10034 	 * any media access commands.
10035 	 */
10036 #if 0
10037 	/* No longer needed for HA */
10038 	if (((ctl_softc->flags & CTL_FLAG_MASTER_SHELF) == 0)
10039 	 && ((entry->flags & CTL_CMD_FLAG_OK_ON_SECONDARY) == 0)) {
10040 		ctl_set_lun_standby(ctsio);
10041 		retval = 1;
10042 		goto bailout;
10043 	}
10044 #endif
10045 
10046 	/*
10047 	 * Check for a reservation conflict.  If this command isn't allowed
10048 	 * even on reserved LUNs, and if this initiator isn't the one who
10049 	 * reserved us, reject the command with a reservation conflict.
10050 	 */
10051 	if ((lun->flags & CTL_LUN_RESERVED)
10052 	 && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) {
10053 		if ((ctsio->io_hdr.nexus.initid.id != lun->rsv_nexus.initid.id)
10054 		 || (ctsio->io_hdr.nexus.targ_port != lun->rsv_nexus.targ_port)
10055 		 || (ctsio->io_hdr.nexus.targ_target.id !=
10056 		     lun->rsv_nexus.targ_target.id)) {
10057 			ctsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT;
10058 			ctsio->io_hdr.status = CTL_SCSI_ERROR;
10059 			retval = 1;
10060 			goto bailout;
10061 		}
10062 	}
10063 
10064 	if ( (lun->flags & CTL_LUN_PR_RESERVED)
10065 	 && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV) == 0)) {
10066 		uint32_t residx;
10067 
10068 		residx = ctl_get_resindex(&ctsio->io_hdr.nexus);
10069 		/*
10070 		 * if we aren't registered or it's a res holder type
10071 		 * reservation and this isn't the res holder then set a
10072 		 * conflict.
10073 		 * NOTE: Commands which might be allowed on write exclusive
10074 		 * type reservations are checked in the particular command
10075 		 * for a conflict. Read and SSU are the only ones.
10076 		 */
10077 		if (!lun->per_res[residx].registered
10078 		 || (residx != lun->pr_res_idx && lun->res_type < 4)) {
10079 			ctsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT;
10080 			ctsio->io_hdr.status = CTL_SCSI_ERROR;
10081 			retval = 1;
10082 			goto bailout;
10083 		}
10084 
10085 	}
10086 
10087 	if ((lun->flags & CTL_LUN_OFFLINE)
10088 	 && ((entry->flags & CTL_CMD_FLAG_OK_ON_OFFLINE) == 0)) {
10089 		ctl_set_lun_not_ready(ctsio);
10090 		retval = 1;
10091 		goto bailout;
10092 	}
10093 
10094 	/*
10095 	 * If the LUN is stopped, see if this particular command is allowed
10096 	 * for a stopped lun.  Otherwise, reject it with 0x04,0x02.
10097 	 */
10098 	if ((lun->flags & CTL_LUN_STOPPED)
10099 	 && ((entry->flags & CTL_CMD_FLAG_OK_ON_STOPPED) == 0)) {
10100 		/* "Logical unit not ready, initializing cmd. required" */
10101 		ctl_set_lun_stopped(ctsio);
10102 		retval = 1;
10103 		goto bailout;
10104 	}
10105 
10106 	if ((lun->flags & CTL_LUN_INOPERABLE)
10107 	 && ((entry->flags & CTL_CMD_FLAG_OK_ON_INOPERABLE) == 0)) {
10108 		/* "Medium format corrupted" */
10109 		ctl_set_medium_format_corrupted(ctsio);
10110 		retval = 1;
10111 		goto bailout;
10112 	}
10113 
10114 bailout:
10115 	return (retval);
10116 
10117 }
10118 
10119 static void
10120 ctl_failover_io(union ctl_io *io, int have_lock)
10121 {
10122 	ctl_set_busy(&io->scsiio);
10123 	ctl_done_lock(io, have_lock);
10124 }
10125 
10126 static void
10127 ctl_failover(void)
10128 {
10129 	struct ctl_lun *lun;
10130 	struct ctl_softc *ctl_softc;
10131 	union ctl_io *next_io, *pending_io;
10132 	union ctl_io *io;
10133 	int lun_idx;
10134 	int i;
10135 
10136 	ctl_softc = control_softc;
10137 
10138 	mtx_lock(&ctl_softc->ctl_lock);
10139 	/*
10140 	 * Remove any cmds from the other SC from the rtr queue.  These
10141 	 * will obviously only be for LUNs for which we're the primary.
10142 	 * We can't send status or get/send data for these commands.
10143 	 * Since they haven't been executed yet, we can just remove them.
10144 	 * We'll either abort them or delete them below, depending on
10145 	 * which HA mode we're in.
10146 	 */
10147 	for (io = (union ctl_io *)STAILQ_FIRST(&ctl_softc->rtr_queue);
10148 	     io != NULL; io = next_io) {
10149 		next_io = (union ctl_io *)STAILQ_NEXT(&io->io_hdr, links);
10150 		if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)
10151 			STAILQ_REMOVE(&ctl_softc->rtr_queue, &io->io_hdr,
10152 				      ctl_io_hdr, links);
10153 	}
10154 
10155 	for (lun_idx=0; lun_idx < ctl_softc->num_luns; lun_idx++) {
10156 		lun = ctl_softc->ctl_luns[lun_idx];
10157 		if (lun==NULL)
10158 			continue;
10159 
10160 		/*
10161 		 * Processor LUNs are primary on both sides.
10162 		 * XXX will this always be true?
10163 		 */
10164 		if (lun->be_lun->lun_type == T_PROCESSOR)
10165 			continue;
10166 
10167 		if ((lun->flags & CTL_LUN_PRIMARY_SC)
10168 		 && (ctl_softc->ha_mode == CTL_HA_MODE_SER_ONLY)) {
10169 			printf("FAILOVER: primary lun %d\n", lun_idx);
10170 		        /*
10171 			 * Remove all commands from the other SC. First from the
10172 			 * blocked queue then from the ooa queue. Once we have
10173 			 * removed them. Call ctl_check_blocked to see if there
10174 			 * is anything that can run.
10175 			 */
10176 			for (io = (union ctl_io *)TAILQ_FIRST(
10177 			     &lun->blocked_queue); io != NULL; io = next_io) {
10178 
10179 		        	next_io = (union ctl_io *)TAILQ_NEXT(
10180 				    &io->io_hdr, blocked_links);
10181 
10182 				if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) {
10183 					TAILQ_REMOVE(&lun->blocked_queue,
10184 						     &io->io_hdr,blocked_links);
10185 					io->io_hdr.flags &= ~CTL_FLAG_BLOCKED;
10186 					TAILQ_REMOVE(&lun->ooa_queue,
10187 						     &io->io_hdr, ooa_links);
10188 
10189 					ctl_free_io_internal(io, 1);
10190 				}
10191 			}
10192 
10193 			for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue);
10194 	     		     io != NULL; io = next_io) {
10195 
10196 		        	next_io = (union ctl_io *)TAILQ_NEXT(
10197 				    &io->io_hdr, ooa_links);
10198 
10199 				if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) {
10200 
10201 					TAILQ_REMOVE(&lun->ooa_queue,
10202 						&io->io_hdr,
10203 					     	ooa_links);
10204 
10205 					ctl_free_io_internal(io, 1);
10206 				}
10207 			}
10208 			ctl_check_blocked(lun);
10209 		} else if ((lun->flags & CTL_LUN_PRIMARY_SC)
10210 			&& (ctl_softc->ha_mode == CTL_HA_MODE_XFER)) {
10211 
10212 			printf("FAILOVER: primary lun %d\n", lun_idx);
10213 			/*
10214 			 * Abort all commands from the other SC.  We can't
10215 			 * send status back for them now.  These should get
10216 			 * cleaned up when they are completed or come out
10217 			 * for a datamove operation.
10218 			 */
10219 			for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue);
10220 	     		     io != NULL; io = next_io) {
10221 		        	next_io = (union ctl_io *)TAILQ_NEXT(
10222 					&io->io_hdr, ooa_links);
10223 
10224 				if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)
10225 					io->io_hdr.flags |= CTL_FLAG_ABORT;
10226 			}
10227 		} else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0)
10228 			&& (ctl_softc->ha_mode == CTL_HA_MODE_XFER)) {
10229 
10230 			printf("FAILOVER: secondary lun %d\n", lun_idx);
10231 
10232 			lun->flags |= CTL_LUN_PRIMARY_SC;
10233 
10234 			/*
10235 			 * We send all I/O that was sent to this controller
10236 			 * and redirected to the other side back with
10237 			 * busy status, and have the initiator retry it.
10238 			 * Figuring out how much data has been transferred,
10239 			 * etc. and picking up where we left off would be
10240 			 * very tricky.
10241 			 *
10242 			 * XXX KDM need to remove I/O from the blocked
10243 			 * queue as well!
10244 			 */
10245 			for (pending_io = (union ctl_io *)TAILQ_FIRST(
10246 			     &lun->ooa_queue); pending_io != NULL;
10247 			     pending_io = next_io) {
10248 
10249 				next_io =  (union ctl_io *)TAILQ_NEXT(
10250 					&pending_io->io_hdr, ooa_links);
10251 
10252 				pending_io->io_hdr.flags &=
10253 					~CTL_FLAG_SENT_2OTHER_SC;
10254 
10255 				if (pending_io->io_hdr.flags &
10256 				    CTL_FLAG_IO_ACTIVE) {
10257 					pending_io->io_hdr.flags |=
10258 						CTL_FLAG_FAILOVER;
10259 				} else {
10260 					ctl_set_busy(&pending_io->scsiio);
10261 					ctl_done_lock(pending_io,
10262 						      /*have_lock*/1);
10263 				}
10264 			}
10265 
10266 			/*
10267 			 * Build Unit Attention
10268 			 */
10269 			for (i = 0; i < CTL_MAX_INITIATORS; i++) {
10270 				lun->pending_sense[i].ua_pending |=
10271 				                     CTL_UA_ASYM_ACC_CHANGE;
10272 			}
10273 		} else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0)
10274 			&& (ctl_softc->ha_mode == CTL_HA_MODE_SER_ONLY)) {
10275 			printf("FAILOVER: secondary lun %d\n", lun_idx);
10276 			/*
10277 			 * if the first io on the OOA is not on the RtR queue
10278 			 * add it.
10279 			 */
10280 			lun->flags |= CTL_LUN_PRIMARY_SC;
10281 
10282 			pending_io = (union ctl_io *)TAILQ_FIRST(
10283 			    &lun->ooa_queue);
10284 			if (pending_io==NULL) {
10285 				printf("Nothing on OOA queue\n");
10286 				continue;
10287 			}
10288 
10289 			pending_io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC;
10290 			if ((pending_io->io_hdr.flags &
10291 			     CTL_FLAG_IS_WAS_ON_RTR) == 0) {
10292 				pending_io->io_hdr.flags |=
10293 				    CTL_FLAG_IS_WAS_ON_RTR;
10294 				STAILQ_INSERT_TAIL(&ctl_softc->rtr_queue,
10295 						   &pending_io->io_hdr, links);
10296 			}
10297 #if 0
10298 			else
10299 			{
10300 				printf("Tag 0x%04x is running\n",
10301 				      pending_io->scsiio.tag_num);
10302 			}
10303 #endif
10304 
10305 			next_io = (union ctl_io *)TAILQ_NEXT(
10306 			    &pending_io->io_hdr, ooa_links);
10307 			for (pending_io=next_io; pending_io != NULL;
10308 			     pending_io = next_io) {
10309 				pending_io->io_hdr.flags &=
10310 				    ~CTL_FLAG_SENT_2OTHER_SC;
10311 				next_io = (union ctl_io *)TAILQ_NEXT(
10312 					&pending_io->io_hdr, ooa_links);
10313 				if (pending_io->io_hdr.flags &
10314 				    CTL_FLAG_IS_WAS_ON_RTR) {
10315 #if 0
10316 				        printf("Tag 0x%04x is running\n",
10317 				      		pending_io->scsiio.tag_num);
10318 #endif
10319 					continue;
10320 				}
10321 
10322 				switch (ctl_check_ooa(lun, pending_io,
10323 			            (union ctl_io *)TAILQ_PREV(
10324 				    &pending_io->io_hdr, ctl_ooaq,
10325 				    ooa_links))) {
10326 
10327 				case CTL_ACTION_BLOCK:
10328 					TAILQ_INSERT_TAIL(&lun->blocked_queue,
10329 							  &pending_io->io_hdr,
10330 							  blocked_links);
10331 					pending_io->io_hdr.flags |=
10332 					    CTL_FLAG_BLOCKED;
10333 					break;
10334 				case CTL_ACTION_PASS:
10335 				case CTL_ACTION_SKIP:
10336 					pending_io->io_hdr.flags |=
10337 					    CTL_FLAG_IS_WAS_ON_RTR;
10338 					STAILQ_INSERT_TAIL(
10339 					    &ctl_softc->rtr_queue,
10340 					    &pending_io->io_hdr, links);
10341 					break;
10342 				case CTL_ACTION_OVERLAP:
10343 					ctl_set_overlapped_cmd(
10344 					    (struct ctl_scsiio *)pending_io);
10345 					ctl_done_lock(pending_io,
10346 						      /*have_lock*/ 1);
10347 					break;
10348 				case CTL_ACTION_OVERLAP_TAG:
10349 					ctl_set_overlapped_tag(
10350 					    (struct ctl_scsiio *)pending_io,
10351 					    pending_io->scsiio.tag_num & 0xff);
10352 					ctl_done_lock(pending_io,
10353 						      /*have_lock*/ 1);
10354 					break;
10355 				case CTL_ACTION_ERROR:
10356 				default:
10357 					ctl_set_internal_failure(
10358 						(struct ctl_scsiio *)pending_io,
10359 						0,  // sks_valid
10360 						0); //retry count
10361 					ctl_done_lock(pending_io,
10362 						      /*have_lock*/ 1);
10363 					break;
10364 				}
10365 			}
10366 
10367 			/*
10368 			 * Build Unit Attention
10369 			 */
10370 			for (i = 0; i < CTL_MAX_INITIATORS; i++) {
10371 				lun->pending_sense[i].ua_pending |=
10372 				                     CTL_UA_ASYM_ACC_CHANGE;
10373 			}
10374 		} else {
10375 			panic("Unhandled HA mode failover, LUN flags = %#x, "
10376 			      "ha_mode = #%x", lun->flags, ctl_softc->ha_mode);
10377 		}
10378 	}
10379 	ctl_pause_rtr = 0;
10380 	mtx_unlock(&ctl_softc->ctl_lock);
10381 }
10382 
10383 static int
10384 ctl_scsiio_precheck(struct ctl_softc *ctl_softc, struct ctl_scsiio *ctsio)
10385 {
10386 	struct ctl_lun *lun;
10387 	struct ctl_cmd_entry *entry;
10388 	uint8_t opcode;
10389 	uint32_t initidx;
10390 	int retval;
10391 
10392 	retval = 0;
10393 
10394 	lun = NULL;
10395 
10396 	opcode = ctsio->cdb[0];
10397 
10398 	mtx_lock(&ctl_softc->ctl_lock);
10399 
10400 	if ((ctsio->io_hdr.nexus.targ_lun < CTL_MAX_LUNS)
10401 	 && (ctl_softc->ctl_luns[ctsio->io_hdr.nexus.targ_lun] != NULL)) {
10402 		lun = ctl_softc->ctl_luns[ctsio->io_hdr.nexus.targ_lun];
10403 		/*
10404 		 * If the LUN is invalid, pretend that it doesn't exist.
10405 		 * It will go away as soon as all pending I/O has been
10406 		 * completed.
10407 		 */
10408 		if (lun->flags & CTL_LUN_DISABLED) {
10409 			lun = NULL;
10410 		} else {
10411 			ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun;
10412 			ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr =
10413 				lun->be_lun;
10414 			if (lun->be_lun->lun_type == T_PROCESSOR) {
10415 				ctsio->io_hdr.flags |= CTL_FLAG_CONTROL_DEV;
10416 			}
10417 		}
10418 	} else {
10419 		ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL;
10420 		ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL;
10421 	}
10422 
10423 	entry = &ctl_cmd_table[opcode];
10424 
10425 	ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK;
10426 	ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK;
10427 
10428 	/*
10429 	 * Check to see whether we can send this command to LUNs that don't
10430 	 * exist.  This should pretty much only be the case for inquiry
10431 	 * and request sense.  Further checks, below, really require having
10432 	 * a LUN, so we can't really check the command anymore.  Just put
10433 	 * it on the rtr queue.
10434 	 */
10435 	if (lun == NULL) {
10436 		if (entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS)
10437 			goto queue_rtr;
10438 
10439 		ctl_set_unsupported_lun(ctsio);
10440 		mtx_unlock(&ctl_softc->ctl_lock);
10441 		ctl_done((union ctl_io *)ctsio);
10442 		goto bailout;
10443 	} else {
10444 		/*
10445 		 * Every I/O goes into the OOA queue for a particular LUN, and
10446 		 * stays there until completion.
10447 		 */
10448 		TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
10449 
10450 		/*
10451 		 * Make sure we support this particular command on this LUN.
10452 		 * e.g., we don't support writes to the control LUN.
10453 		 */
10454 		switch (lun->be_lun->lun_type) {
10455 		case T_PROCESSOR:
10456 		 	if (((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0)
10457 			 && ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS)
10458 			      == 0)) {
10459 				ctl_set_invalid_opcode(ctsio);
10460 				mtx_unlock(&ctl_softc->ctl_lock);
10461 				ctl_done((union ctl_io *)ctsio);
10462 				goto bailout;
10463 			}
10464 			break;
10465 		case T_DIRECT:
10466 			if (((entry->flags & CTL_CMD_FLAG_OK_ON_SLUN) == 0)
10467 			 && ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS)
10468 			      == 0)){
10469 				ctl_set_invalid_opcode(ctsio);
10470 				mtx_unlock(&ctl_softc->ctl_lock);
10471 				ctl_done((union ctl_io *)ctsio);
10472 				goto bailout;
10473 			}
10474 			break;
10475 		default:
10476 			printf("Unsupported CTL LUN type %d\n",
10477 			       lun->be_lun->lun_type);
10478 			panic("Unsupported CTL LUN type %d\n",
10479 			      lun->be_lun->lun_type);
10480 			break; /* NOTREACHED */
10481 		}
10482 	}
10483 
10484 	initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
10485 
10486 	/*
10487 	 * If we've got a request sense, it'll clear the contingent
10488 	 * allegiance condition.  Otherwise, if we have a CA condition for
10489 	 * this initiator, clear it, because it sent down a command other
10490 	 * than request sense.
10491 	 */
10492 	if ((opcode != REQUEST_SENSE)
10493 	 && (ctl_is_set(lun->have_ca, initidx)))
10494 		ctl_clear_mask(lun->have_ca, initidx);
10495 
10496 	/*
10497 	 * If the command has this flag set, it handles its own unit
10498 	 * attention reporting, we shouldn't do anything.  Otherwise we
10499 	 * check for any pending unit attentions, and send them back to the
10500 	 * initiator.  We only do this when a command initially comes in,
10501 	 * not when we pull it off the blocked queue.
10502 	 *
10503 	 * According to SAM-3, section 5.3.2, the order that things get
10504 	 * presented back to the host is basically unit attentions caused
10505 	 * by some sort of reset event, busy status, reservation conflicts
10506 	 * or task set full, and finally any other status.
10507 	 *
10508 	 * One issue here is that some of the unit attentions we report
10509 	 * don't fall into the "reset" category (e.g. "reported luns data
10510 	 * has changed").  So reporting it here, before the reservation
10511 	 * check, may be technically wrong.  I guess the only thing to do
10512 	 * would be to check for and report the reset events here, and then
10513 	 * check for the other unit attention types after we check for a
10514 	 * reservation conflict.
10515 	 *
10516 	 * XXX KDM need to fix this
10517 	 */
10518 	if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) {
10519 		ctl_ua_type ua_type;
10520 
10521 		ua_type = lun->pending_sense[initidx].ua_pending;
10522 		if (ua_type != CTL_UA_NONE) {
10523 			scsi_sense_data_type sense_format;
10524 
10525 			if (lun != NULL)
10526 				sense_format = (lun->flags &
10527 				    CTL_LUN_SENSE_DESC) ? SSD_TYPE_DESC :
10528 				    SSD_TYPE_FIXED;
10529 			else
10530 				sense_format = SSD_TYPE_FIXED;
10531 
10532 			ua_type = ctl_build_ua(ua_type, &ctsio->sense_data,
10533 					       sense_format);
10534 			if (ua_type != CTL_UA_NONE) {
10535 				ctsio->scsi_status = SCSI_STATUS_CHECK_COND;
10536 				ctsio->io_hdr.status = CTL_SCSI_ERROR |
10537 						       CTL_AUTOSENSE;
10538 				ctsio->sense_len = SSD_FULL_SIZE;
10539 				lun->pending_sense[initidx].ua_pending &=
10540 					~ua_type;
10541 				mtx_unlock(&ctl_softc->ctl_lock);
10542 				ctl_done((union ctl_io *)ctsio);
10543 				goto bailout;
10544 			}
10545 		}
10546 	}
10547 
10548 
10549 	if (ctl_scsiio_lun_check(ctl_softc, lun, entry, ctsio) != 0) {
10550 		mtx_unlock(&ctl_softc->ctl_lock);
10551 		ctl_done((union ctl_io *)ctsio);
10552 		goto bailout;
10553 	}
10554 
10555 	/*
10556 	 * XXX CHD this is where we want to send IO to other side if
10557 	 * this LUN is secondary on this SC. We will need to make a copy
10558 	 * of the IO and flag the IO on this side as SENT_2OTHER and the flag
10559 	 * the copy we send as FROM_OTHER.
10560 	 * We also need to stuff the address of the original IO so we can
10561 	 * find it easily. Something similar will need be done on the other
10562 	 * side so when we are done we can find the copy.
10563 	 */
10564 	if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) {
10565 		union ctl_ha_msg msg_info;
10566 		int isc_retval;
10567 
10568 		ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC;
10569 
10570 		msg_info.hdr.msg_type = CTL_MSG_SERIALIZE;
10571 		msg_info.hdr.original_sc = (union ctl_io *)ctsio;
10572 #if 0
10573 		printf("1. ctsio %p\n", ctsio);
10574 #endif
10575 		msg_info.hdr.serializing_sc = NULL;
10576 		msg_info.hdr.nexus = ctsio->io_hdr.nexus;
10577 		msg_info.scsi.tag_num = ctsio->tag_num;
10578 		msg_info.scsi.tag_type = ctsio->tag_type;
10579 		memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN);
10580 
10581 		ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
10582 
10583 		if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
10584 		    (void *)&msg_info, sizeof(msg_info), 0)) >
10585 		    CTL_HA_STATUS_SUCCESS) {
10586 			printf("CTL:precheck, ctl_ha_msg_send returned %d\n",
10587 			       isc_retval);
10588 			printf("CTL:opcode is %x\n",opcode);
10589 		} else {
10590 #if 0
10591 			printf("CTL:Precheck sent msg, opcode is %x\n",opcode);
10592 #endif
10593 		}
10594 
10595 		/*
10596 		 * XXX KDM this I/O is off the incoming queue, but hasn't
10597 		 * been inserted on any other queue.  We may need to come
10598 		 * up with a holding queue while we wait for serialization
10599 		 * so that we have an idea of what we're waiting for from
10600 		 * the other side.
10601 		 */
10602 		goto bailout_unlock;
10603 	}
10604 
10605 	switch (ctl_check_ooa(lun, (union ctl_io *)ctsio,
10606 			      (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr,
10607 			      ctl_ooaq, ooa_links))) {
10608 	case CTL_ACTION_BLOCK:
10609 		ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED;
10610 		TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr,
10611 				  blocked_links);
10612 		goto bailout_unlock;
10613 		break; /* NOTREACHED */
10614 	case CTL_ACTION_PASS:
10615 	case CTL_ACTION_SKIP:
10616 		goto queue_rtr;
10617 		break; /* NOTREACHED */
10618 	case CTL_ACTION_OVERLAP:
10619 		ctl_set_overlapped_cmd(ctsio);
10620 		mtx_unlock(&ctl_softc->ctl_lock);
10621 		ctl_done((union ctl_io *)ctsio);
10622 		goto bailout;
10623 		break; /* NOTREACHED */
10624 	case CTL_ACTION_OVERLAP_TAG:
10625 		ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff);
10626 		mtx_unlock(&ctl_softc->ctl_lock);
10627 		ctl_done((union ctl_io *)ctsio);
10628 		goto bailout;
10629 		break; /* NOTREACHED */
10630 	case CTL_ACTION_ERROR:
10631 	default:
10632 		ctl_set_internal_failure(ctsio,
10633 					 /*sks_valid*/ 0,
10634 					 /*retry_count*/ 0);
10635 		mtx_unlock(&ctl_softc->ctl_lock);
10636 		ctl_done((union ctl_io *)ctsio);
10637 		goto bailout;
10638 		break; /* NOTREACHED */
10639 	}
10640 
10641 	goto bailout_unlock;
10642 
10643 queue_rtr:
10644 	ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
10645 	STAILQ_INSERT_TAIL(&ctl_softc->rtr_queue, &ctsio->io_hdr, links);
10646 
10647 bailout_unlock:
10648 	mtx_unlock(&ctl_softc->ctl_lock);
10649 
10650 bailout:
10651 	return (retval);
10652 }
10653 
10654 static int
10655 ctl_scsiio(struct ctl_scsiio *ctsio)
10656 {
10657 	int retval;
10658 	struct ctl_cmd_entry *entry;
10659 
10660 	retval = CTL_RETVAL_COMPLETE;
10661 
10662 	CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0]));
10663 
10664 	entry = &ctl_cmd_table[ctsio->cdb[0]];
10665 
10666 	/*
10667 	 * If this I/O has been aborted, just send it straight to
10668 	 * ctl_done() without executing it.
10669 	 */
10670 	if (ctsio->io_hdr.flags & CTL_FLAG_ABORT) {
10671 		ctl_done((union ctl_io *)ctsio);
10672 		goto bailout;
10673 	}
10674 
10675 	/*
10676 	 * All the checks should have been handled by ctl_scsiio_precheck().
10677 	 * We should be clear now to just execute the I/O.
10678 	 */
10679 	retval = entry->execute(ctsio);
10680 
10681 bailout:
10682 	return (retval);
10683 }
10684 
10685 /*
10686  * Since we only implement one target right now, a bus reset simply resets
10687  * our single target.
10688  */
10689 static int
10690 ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io)
10691 {
10692 	return(ctl_target_reset(ctl_softc, io, CTL_UA_BUS_RESET));
10693 }
10694 
10695 static int
10696 ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io,
10697 		 ctl_ua_type ua_type)
10698 {
10699 	struct ctl_lun *lun;
10700 	int retval;
10701 
10702 	if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) {
10703 		union ctl_ha_msg msg_info;
10704 
10705 		io->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC;
10706 		msg_info.hdr.nexus = io->io_hdr.nexus;
10707 		if (ua_type==CTL_UA_TARG_RESET)
10708 			msg_info.task.task_action = CTL_TASK_TARGET_RESET;
10709 		else
10710 			msg_info.task.task_action = CTL_TASK_BUS_RESET;
10711 		msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS;
10712 		msg_info.hdr.original_sc = NULL;
10713 		msg_info.hdr.serializing_sc = NULL;
10714 		if (CTL_HA_STATUS_SUCCESS != ctl_ha_msg_send(CTL_HA_CHAN_CTL,
10715 		    (void *)&msg_info, sizeof(msg_info), 0)) {
10716 		}
10717 	}
10718 	retval = 0;
10719 
10720 	STAILQ_FOREACH(lun, &ctl_softc->lun_list, links)
10721 		retval += ctl_lun_reset(lun, io, ua_type);
10722 
10723 	return (retval);
10724 }
10725 
10726 /*
10727  * The LUN should always be set.  The I/O is optional, and is used to
10728  * distinguish between I/Os sent by this initiator, and by other
10729  * initiators.  We set unit attention for initiators other than this one.
10730  * SAM-3 is vague on this point.  It does say that a unit attention should
10731  * be established for other initiators when a LUN is reset (see section
10732  * 5.7.3), but it doesn't specifically say that the unit attention should
10733  * be established for this particular initiator when a LUN is reset.  Here
10734  * is the relevant text, from SAM-3 rev 8:
10735  *
10736  * 5.7.2 When a SCSI initiator port aborts its own tasks
10737  *
10738  * When a SCSI initiator port causes its own task(s) to be aborted, no
10739  * notification that the task(s) have been aborted shall be returned to
10740  * the SCSI initiator port other than the completion response for the
10741  * command or task management function action that caused the task(s) to
10742  * be aborted and notification(s) associated with related effects of the
10743  * action (e.g., a reset unit attention condition).
10744  *
10745  * XXX KDM for now, we're setting unit attention for all initiators.
10746  */
10747 static int
10748 ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type)
10749 {
10750 	union ctl_io *xio;
10751 #if 0
10752 	uint32_t initindex;
10753 #endif
10754 	int i;
10755 
10756 	/*
10757 	 * Run through the OOA queue and abort each I/O.
10758 	 */
10759 #if 0
10760 	TAILQ_FOREACH((struct ctl_io_hdr *)xio, &lun->ooa_queue, ooa_links) {
10761 #endif
10762 	for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL;
10763 	     xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) {
10764 		xio->io_hdr.flags |= CTL_FLAG_ABORT;
10765 	}
10766 
10767 	/*
10768 	 * This version sets unit attention for every
10769 	 */
10770 #if 0
10771 	initindex = ctl_get_initindex(&io->io_hdr.nexus);
10772 	for (i = 0; i < CTL_MAX_INITIATORS; i++) {
10773 		if (initindex == i)
10774 			continue;
10775 		lun->pending_sense[i].ua_pending |= ua_type;
10776 	}
10777 #endif
10778 
10779 	/*
10780 	 * A reset (any kind, really) clears reservations established with
10781 	 * RESERVE/RELEASE.  It does not clear reservations established
10782 	 * with PERSISTENT RESERVE OUT, but we don't support that at the
10783 	 * moment anyway.  See SPC-2, section 5.6.  SPC-3 doesn't address
10784 	 * reservations made with the RESERVE/RELEASE commands, because
10785 	 * those commands are obsolete in SPC-3.
10786 	 */
10787 	lun->flags &= ~CTL_LUN_RESERVED;
10788 
10789 	for (i = 0; i < CTL_MAX_INITIATORS; i++) {
10790 		ctl_clear_mask(lun->have_ca, i);
10791 		lun->pending_sense[i].ua_pending |= ua_type;
10792 	}
10793 
10794 	return (0);
10795 }
10796 
10797 static int
10798 ctl_abort_task(union ctl_io *io)
10799 {
10800 	union ctl_io *xio;
10801 	struct ctl_lun *lun;
10802 	struct ctl_softc *ctl_softc;
10803 #if 0
10804 	struct sbuf sb;
10805 	char printbuf[128];
10806 #endif
10807 	int found;
10808 
10809 	ctl_softc = control_softc;
10810 	found = 0;
10811 
10812 	/*
10813 	 * Look up the LUN.
10814 	 */
10815 	if ((io->io_hdr.nexus.targ_lun < CTL_MAX_LUNS)
10816 	 && (ctl_softc->ctl_luns[io->io_hdr.nexus.targ_lun] != NULL))
10817 		lun = ctl_softc->ctl_luns[io->io_hdr.nexus.targ_lun];
10818 	else
10819 		goto bailout;
10820 
10821 #if 0
10822 	printf("ctl_abort_task: called for lun %lld, tag %d type %d\n",
10823 	       lun->lun, io->taskio.tag_num, io->taskio.tag_type);
10824 #endif
10825 
10826 	/*
10827 	 * Run through the OOA queue and attempt to find the given I/O.
10828 	 * The target port, initiator ID, tag type and tag number have to
10829 	 * match the values that we got from the initiator.  If we have an
10830 	 * untagged command to abort, simply abort the first untagged command
10831 	 * we come to.  We only allow one untagged command at a time of course.
10832 	 */
10833 #if 0
10834 	TAILQ_FOREACH((struct ctl_io_hdr *)xio, &lun->ooa_queue, ooa_links) {
10835 #endif
10836 	for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL;
10837 	     xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) {
10838 #if 0
10839 		sbuf_new(&sb, printbuf, sizeof(printbuf), SBUF_FIXEDLEN);
10840 
10841 		sbuf_printf(&sb, "LUN %lld tag %d type %d%s%s%s%s: ",
10842 			    lun->lun, xio->scsiio.tag_num,
10843 			    xio->scsiio.tag_type,
10844 			    (xio->io_hdr.blocked_links.tqe_prev
10845 			    == NULL) ? "" : " BLOCKED",
10846 			    (xio->io_hdr.flags &
10847 			    CTL_FLAG_DMA_INPROG) ? " DMA" : "",
10848 			    (xio->io_hdr.flags &
10849 			    CTL_FLAG_ABORT) ? " ABORT" : "",
10850 			    (xio->io_hdr.flags &
10851 			    CTL_FLAG_IS_WAS_ON_RTR ? " RTR" : ""));
10852 		ctl_scsi_command_string(&xio->scsiio, NULL, &sb);
10853 		sbuf_finish(&sb);
10854 		printf("%s\n", sbuf_data(&sb));
10855 #endif
10856 
10857 		if ((xio->io_hdr.nexus.targ_port == io->io_hdr.nexus.targ_port)
10858 		 && (xio->io_hdr.nexus.initid.id ==
10859 		     io->io_hdr.nexus.initid.id)) {
10860 			/*
10861 			 * If the abort says that the task is untagged, the
10862 			 * task in the queue must be untagged.  Otherwise,
10863 			 * we just check to see whether the tag numbers
10864 			 * match.  This is because the QLogic firmware
10865 			 * doesn't pass back the tag type in an abort
10866 			 * request.
10867 			 */
10868 #if 0
10869 			if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED)
10870 			  && (io->taskio.tag_type == CTL_TAG_UNTAGGED))
10871 			 || (xio->scsiio.tag_num == io->taskio.tag_num)) {
10872 #endif
10873 			/*
10874 			 * XXX KDM we've got problems with FC, because it
10875 			 * doesn't send down a tag type with aborts.  So we
10876 			 * can only really go by the tag number...
10877 			 * This may cause problems with parallel SCSI.
10878 			 * Need to figure that out!!
10879 			 */
10880 			if (xio->scsiio.tag_num == io->taskio.tag_num) {
10881 				xio->io_hdr.flags |= CTL_FLAG_ABORT;
10882 				found = 1;
10883 				if ((io->io_hdr.flags &
10884 				     CTL_FLAG_FROM_OTHER_SC) == 0 &&
10885 				    !(lun->flags & CTL_LUN_PRIMARY_SC)) {
10886 					union ctl_ha_msg msg_info;
10887 
10888 					io->io_hdr.flags |=
10889 					                CTL_FLAG_SENT_2OTHER_SC;
10890 					msg_info.hdr.nexus = io->io_hdr.nexus;
10891 					msg_info.task.task_action =
10892 						CTL_TASK_ABORT_TASK;
10893 					msg_info.task.tag_num =
10894 						io->taskio.tag_num;
10895 					msg_info.task.tag_type =
10896 						io->taskio.tag_type;
10897 					msg_info.hdr.msg_type =
10898 						CTL_MSG_MANAGE_TASKS;
10899 					msg_info.hdr.original_sc = NULL;
10900 					msg_info.hdr.serializing_sc = NULL;
10901 #if 0
10902 					printf("Sent Abort to other side\n");
10903 #endif
10904 					if (CTL_HA_STATUS_SUCCESS !=
10905 					        ctl_ha_msg_send(CTL_HA_CHAN_CTL,
10906 		    				(void *)&msg_info,
10907 						sizeof(msg_info), 0)) {
10908 					}
10909 				}
10910 #if 0
10911 				printf("ctl_abort_task: found I/O to abort\n");
10912 #endif
10913 				break;
10914 			}
10915 		}
10916 	}
10917 
10918 bailout:
10919 
10920 	if (found == 0) {
10921 		/*
10922 		 * This isn't really an error.  It's entirely possible for
10923 		 * the abort and command completion to cross on the wire.
10924 		 * This is more of an informative/diagnostic error.
10925 		 */
10926 #if 0
10927 		printf("ctl_abort_task: ABORT sent for nonexistent I/O: "
10928 		       "%d:%d:%d:%d tag %d type %d\n",
10929 		       io->io_hdr.nexus.initid.id,
10930 		       io->io_hdr.nexus.targ_port,
10931 		       io->io_hdr.nexus.targ_target.id,
10932 		       io->io_hdr.nexus.targ_lun, io->taskio.tag_num,
10933 		       io->taskio.tag_type);
10934 #endif
10935 		return (1);
10936 	} else
10937 		return (0);
10938 }
10939 
10940 /*
10941  * Assumptions:  caller holds ctl_softc->ctl_lock
10942  *
10943  * This routine cannot block!  It must be callable from an interrupt
10944  * handler as well as from the work thread.
10945  */
10946 static void
10947 ctl_run_task_queue(struct ctl_softc *ctl_softc)
10948 {
10949 	union ctl_io *io, *next_io;
10950 
10951 	CTL_DEBUG_PRINT(("ctl_run_task_queue\n"));
10952 
10953 	for (io = (union ctl_io *)STAILQ_FIRST(&ctl_softc->task_queue);
10954 	     io != NULL; io = next_io) {
10955 		int retval;
10956 		const char *task_desc;
10957 
10958 		next_io = (union ctl_io *)STAILQ_NEXT(&io->io_hdr, links);
10959 
10960 		retval = 0;
10961 
10962 		switch (io->io_hdr.io_type) {
10963 		case CTL_IO_TASK: {
10964 			task_desc = ctl_scsi_task_string(&io->taskio);
10965 			if (task_desc != NULL) {
10966 #ifdef NEEDTOPORT
10967 				csevent_log(CSC_CTL | CSC_SHELF_SW |
10968 					    CTL_TASK_REPORT,
10969 					    csevent_LogType_Trace,
10970 					    csevent_Severity_Information,
10971 					    csevent_AlertLevel_Green,
10972 					    csevent_FRU_Firmware,
10973 					    csevent_FRU_Unknown,
10974 					    "CTL: received task: %s",task_desc);
10975 #endif
10976 			} else {
10977 #ifdef NEEDTOPORT
10978 				csevent_log(CSC_CTL | CSC_SHELF_SW |
10979 					    CTL_TASK_REPORT,
10980 					    csevent_LogType_Trace,
10981 					    csevent_Severity_Information,
10982 					    csevent_AlertLevel_Green,
10983 					    csevent_FRU_Firmware,
10984 					    csevent_FRU_Unknown,
10985 					    "CTL: received unknown task "
10986 					    "type: %d (%#x)",
10987 					    io->taskio.task_action,
10988 					    io->taskio.task_action);
10989 #endif
10990 			}
10991 			switch (io->taskio.task_action) {
10992 			case CTL_TASK_ABORT_TASK:
10993 				retval = ctl_abort_task(io);
10994 				break;
10995 			case CTL_TASK_ABORT_TASK_SET:
10996 				break;
10997 			case CTL_TASK_CLEAR_ACA:
10998 				break;
10999 			case CTL_TASK_CLEAR_TASK_SET:
11000 				break;
11001 			case CTL_TASK_LUN_RESET: {
11002 				struct ctl_lun *lun;
11003 				uint32_t targ_lun;
11004 				int retval;
11005 
11006 				targ_lun = io->io_hdr.nexus.targ_lun;
11007 
11008 				if ((targ_lun < CTL_MAX_LUNS)
11009 				 && (ctl_softc->ctl_luns[targ_lun] != NULL))
11010 					lun = ctl_softc->ctl_luns[targ_lun];
11011 				else {
11012 					retval = 1;
11013 					break;
11014 				}
11015 
11016 				if (!(io->io_hdr.flags &
11017 				    CTL_FLAG_FROM_OTHER_SC)) {
11018 					union ctl_ha_msg msg_info;
11019 
11020 					io->io_hdr.flags |=
11021 						CTL_FLAG_SENT_2OTHER_SC;
11022 					msg_info.hdr.msg_type =
11023 						CTL_MSG_MANAGE_TASKS;
11024 					msg_info.hdr.nexus = io->io_hdr.nexus;
11025 					msg_info.task.task_action =
11026 						CTL_TASK_LUN_RESET;
11027 					msg_info.hdr.original_sc = NULL;
11028 					msg_info.hdr.serializing_sc = NULL;
11029 					if (CTL_HA_STATUS_SUCCESS !=
11030 					    ctl_ha_msg_send(CTL_HA_CHAN_CTL,
11031 					    (void *)&msg_info,
11032 					    sizeof(msg_info), 0)) {
11033 					}
11034 				}
11035 
11036 				retval = ctl_lun_reset(lun, io,
11037 						       CTL_UA_LUN_RESET);
11038 				break;
11039 			}
11040 			case CTL_TASK_TARGET_RESET:
11041 				retval = ctl_target_reset(ctl_softc, io,
11042 							  CTL_UA_TARG_RESET);
11043 				break;
11044 			case CTL_TASK_BUS_RESET:
11045 				retval = ctl_bus_reset(ctl_softc, io);
11046 				break;
11047 			case CTL_TASK_PORT_LOGIN:
11048 				break;
11049 			case CTL_TASK_PORT_LOGOUT:
11050 				break;
11051 			default:
11052 				printf("ctl_run_task_queue: got unknown task "
11053 				       "management event %d\n",
11054 				       io->taskio.task_action);
11055 				break;
11056 			}
11057 			if (retval == 0)
11058 				io->io_hdr.status = CTL_SUCCESS;
11059 			else
11060 				io->io_hdr.status = CTL_ERROR;
11061 
11062 			STAILQ_REMOVE(&ctl_softc->task_queue, &io->io_hdr,
11063 				      ctl_io_hdr, links);
11064 			/*
11065 			 * This will queue this I/O to the done queue, but the
11066 			 * work thread won't be able to process it until we
11067 			 * return and the lock is released.
11068 			 */
11069 			ctl_done_lock(io, /*have_lock*/ 1);
11070 			break;
11071 		}
11072 		default: {
11073 
11074 			printf("%s: invalid I/O type %d msg %d cdb %x"
11075 			       " iptl: %ju:%d:%ju:%d tag 0x%04x\n",
11076 			       __func__, io->io_hdr.io_type,
11077 			       io->io_hdr.msg_type, io->scsiio.cdb[0],
11078 			       (uintmax_t)io->io_hdr.nexus.initid.id,
11079 			       io->io_hdr.nexus.targ_port,
11080 			       (uintmax_t)io->io_hdr.nexus.targ_target.id,
11081 			       io->io_hdr.nexus.targ_lun,
11082 			       (io->io_hdr.io_type == CTL_IO_TASK) ?
11083 			       io->taskio.tag_num : io->scsiio.tag_num);
11084 			STAILQ_REMOVE(&ctl_softc->task_queue, &io->io_hdr,
11085 				      ctl_io_hdr, links);
11086 			ctl_free_io_internal(io, 1);
11087 			break;
11088 		}
11089 		}
11090 	}
11091 
11092 	ctl_softc->flags &= ~CTL_FLAG_TASK_PENDING;
11093 }
11094 
11095 /*
11096  * For HA operation.  Handle commands that come in from the other
11097  * controller.
11098  */
11099 static void
11100 ctl_handle_isc(union ctl_io *io)
11101 {
11102 	int free_io;
11103 	struct ctl_lun *lun;
11104 	struct ctl_softc *ctl_softc;
11105 
11106 	ctl_softc = control_softc;
11107 
11108 	lun = ctl_softc->ctl_luns[io->io_hdr.nexus.targ_lun];
11109 
11110 	switch (io->io_hdr.msg_type) {
11111 	case CTL_MSG_SERIALIZE:
11112 		free_io = ctl_serialize_other_sc_cmd(&io->scsiio,
11113 						     /*have_lock*/ 0);
11114 		break;
11115 	case CTL_MSG_R2R: {
11116 		uint8_t opcode;
11117 		struct ctl_cmd_entry *entry;
11118 
11119 		/*
11120 		 * This is only used in SER_ONLY mode.
11121 		 */
11122 		free_io = 0;
11123 		opcode = io->scsiio.cdb[0];
11124 		entry = &ctl_cmd_table[opcode];
11125 		mtx_lock(&ctl_softc->ctl_lock);
11126 		if (ctl_scsiio_lun_check(ctl_softc, lun,
11127 		    entry, (struct ctl_scsiio *)io) != 0) {
11128 			ctl_done_lock(io, /*have_lock*/ 1);
11129 			mtx_unlock(&ctl_softc->ctl_lock);
11130 			break;
11131 		}
11132 		io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
11133 		STAILQ_INSERT_TAIL(&ctl_softc->rtr_queue,
11134 				   &io->io_hdr, links);
11135 		mtx_unlock(&ctl_softc->ctl_lock);
11136 		break;
11137 	}
11138 	case CTL_MSG_FINISH_IO:
11139 		if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) {
11140 			free_io = 0;
11141 			ctl_done_lock(io, /*have_lock*/ 0);
11142 		} else {
11143 			free_io = 1;
11144 			mtx_lock(&ctl_softc->ctl_lock);
11145 			TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr,
11146 				     ooa_links);
11147 			STAILQ_REMOVE(&ctl_softc->task_queue,
11148 				      &io->io_hdr, ctl_io_hdr, links);
11149 			ctl_check_blocked(lun);
11150 			mtx_unlock(&ctl_softc->ctl_lock);
11151 		}
11152 		break;
11153 	case CTL_MSG_PERS_ACTION:
11154 		ctl_hndl_per_res_out_on_other_sc(
11155 			(union ctl_ha_msg *)&io->presio.pr_msg);
11156 		free_io = 1;
11157 		break;
11158 	case CTL_MSG_BAD_JUJU:
11159 		free_io = 0;
11160 		ctl_done_lock(io, /*have_lock*/ 0);
11161 		break;
11162 	case CTL_MSG_DATAMOVE:
11163 		/* Only used in XFER mode */
11164 		free_io = 0;
11165 		ctl_datamove_remote(io);
11166 		break;
11167 	case CTL_MSG_DATAMOVE_DONE:
11168 		/* Only used in XFER mode */
11169 		free_io = 0;
11170 		io->scsiio.be_move_done(io);
11171 		break;
11172 	default:
11173 		free_io = 1;
11174 		printf("%s: Invalid message type %d\n",
11175 		       __func__, io->io_hdr.msg_type);
11176 		break;
11177 	}
11178 	if (free_io)
11179 		ctl_free_io_internal(io, 0);
11180 
11181 }
11182 
11183 
11184 /*
11185  * Returns the match type in the case of a match, or CTL_LUN_PAT_NONE if
11186  * there is no match.
11187  */
11188 static ctl_lun_error_pattern
11189 ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc)
11190 {
11191 	struct ctl_cmd_entry *entry;
11192 	ctl_lun_error_pattern filtered_pattern, pattern;
11193 	uint8_t opcode;
11194 
11195 	pattern = desc->error_pattern;
11196 
11197 	/*
11198 	 * XXX KDM we need more data passed into this function to match a
11199 	 * custom pattern, and we actually need to implement custom pattern
11200 	 * matching.
11201 	 */
11202 	if (pattern & CTL_LUN_PAT_CMD)
11203 		return (CTL_LUN_PAT_CMD);
11204 
11205 	if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY)
11206 		return (CTL_LUN_PAT_ANY);
11207 
11208 	opcode = ctsio->cdb[0];
11209 	entry = &ctl_cmd_table[opcode];
11210 
11211 	filtered_pattern = entry->pattern & pattern;
11212 
11213 	/*
11214 	 * If the user requested specific flags in the pattern (e.g.
11215 	 * CTL_LUN_PAT_RANGE), make sure the command supports all of those
11216 	 * flags.
11217 	 *
11218 	 * If the user did not specify any flags, it doesn't matter whether
11219 	 * or not the command supports the flags.
11220 	 */
11221 	if ((filtered_pattern & ~CTL_LUN_PAT_MASK) !=
11222 	     (pattern & ~CTL_LUN_PAT_MASK))
11223 		return (CTL_LUN_PAT_NONE);
11224 
11225 	/*
11226 	 * If the user asked for a range check, see if the requested LBA
11227 	 * range overlaps with this command's LBA range.
11228 	 */
11229 	if (filtered_pattern & CTL_LUN_PAT_RANGE) {
11230 		uint64_t lba1;
11231 		uint32_t len1;
11232 		ctl_action action;
11233 		int retval;
11234 
11235 		retval = ctl_get_lba_len((union ctl_io *)ctsio, &lba1, &len1);
11236 		if (retval != 0)
11237 			return (CTL_LUN_PAT_NONE);
11238 
11239 		action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba,
11240 					      desc->lba_range.len);
11241 		/*
11242 		 * A "pass" means that the LBA ranges don't overlap, so
11243 		 * this doesn't match the user's range criteria.
11244 		 */
11245 		if (action == CTL_ACTION_PASS)
11246 			return (CTL_LUN_PAT_NONE);
11247 	}
11248 
11249 	return (filtered_pattern);
11250 }
11251 
11252 /*
11253  * Called with the CTL lock held.
11254  */
11255 static void
11256 ctl_inject_error(struct ctl_lun *lun, union ctl_io *io)
11257 {
11258 	struct ctl_error_desc *desc, *desc2;
11259 
11260 	STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) {
11261 		ctl_lun_error_pattern pattern;
11262 		/*
11263 		 * Check to see whether this particular command matches
11264 		 * the pattern in the descriptor.
11265 		 */
11266 		pattern = ctl_cmd_pattern_match(&io->scsiio, desc);
11267 		if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_NONE)
11268 			continue;
11269 
11270 		switch (desc->lun_error & CTL_LUN_INJ_TYPE) {
11271 		case CTL_LUN_INJ_ABORTED:
11272 			ctl_set_aborted(&io->scsiio);
11273 			break;
11274 		case CTL_LUN_INJ_MEDIUM_ERR:
11275 			ctl_set_medium_error(&io->scsiio);
11276 			break;
11277 		case CTL_LUN_INJ_UA:
11278 			/* 29h/00h  POWER ON, RESET, OR BUS DEVICE RESET
11279 			 * OCCURRED */
11280 			ctl_set_ua(&io->scsiio, 0x29, 0x00);
11281 			break;
11282 		case CTL_LUN_INJ_CUSTOM:
11283 			/*
11284 			 * We're assuming the user knows what he is doing.
11285 			 * Just copy the sense information without doing
11286 			 * checks.
11287 			 */
11288 			bcopy(&desc->custom_sense, &io->scsiio.sense_data,
11289 			      ctl_min(sizeof(desc->custom_sense),
11290 				      sizeof(io->scsiio.sense_data)));
11291 			io->scsiio.scsi_status = SCSI_STATUS_CHECK_COND;
11292 			io->scsiio.sense_len = SSD_FULL_SIZE;
11293 			io->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
11294 			break;
11295 		case CTL_LUN_INJ_NONE:
11296 		default:
11297 			/*
11298 			 * If this is an error injection type we don't know
11299 			 * about, clear the continuous flag (if it is set)
11300 			 * so it will get deleted below.
11301 			 */
11302 			desc->lun_error &= ~CTL_LUN_INJ_CONTINUOUS;
11303 			break;
11304 		}
11305 		/*
11306 		 * By default, each error injection action is a one-shot
11307 		 */
11308 		if (desc->lun_error & CTL_LUN_INJ_CONTINUOUS)
11309 			continue;
11310 
11311 		STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links);
11312 
11313 		free(desc, M_CTL);
11314 	}
11315 }
11316 
11317 #ifdef CTL_IO_DELAY
11318 static void
11319 ctl_datamove_timer_wakeup(void *arg)
11320 {
11321 	union ctl_io *io;
11322 
11323 	io = (union ctl_io *)arg;
11324 
11325 	ctl_datamove(io);
11326 }
11327 #endif /* CTL_IO_DELAY */
11328 
11329 /*
11330  * Assumption:  caller does NOT hold ctl_lock
11331  */
11332 void
11333 ctl_datamove(union ctl_io *io)
11334 {
11335 	void (*fe_datamove)(union ctl_io *io);
11336 
11337 	CTL_DEBUG_PRINT(("ctl_datamove\n"));
11338 
11339 #ifdef CTL_TIME_IO
11340 	if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) {
11341 		char str[256];
11342 		char path_str[64];
11343 		struct sbuf sb;
11344 
11345 		ctl_scsi_path_string(io, path_str, sizeof(path_str));
11346 		sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN);
11347 
11348 		sbuf_cat(&sb, path_str);
11349 		switch (io->io_hdr.io_type) {
11350 		case CTL_IO_SCSI:
11351 			ctl_scsi_command_string(&io->scsiio, NULL, &sb);
11352 			sbuf_printf(&sb, "\n");
11353 			sbuf_cat(&sb, path_str);
11354 			sbuf_printf(&sb, "Tag: 0x%04x, type %d\n",
11355 				    io->scsiio.tag_num, io->scsiio.tag_type);
11356 			break;
11357 		case CTL_IO_TASK:
11358 			sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, "
11359 				    "Tag Type: %d\n", io->taskio.task_action,
11360 				    io->taskio.tag_num, io->taskio.tag_type);
11361 			break;
11362 		default:
11363 			printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type);
11364 			panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type);
11365 			break;
11366 		}
11367 		sbuf_cat(&sb, path_str);
11368 		sbuf_printf(&sb, "ctl_datamove: %jd seconds\n",
11369 			    (intmax_t)time_uptime - io->io_hdr.start_time);
11370 		sbuf_finish(&sb);
11371 		printf("%s", sbuf_data(&sb));
11372 	}
11373 #endif /* CTL_TIME_IO */
11374 
11375 	mtx_lock(&control_softc->ctl_lock);
11376 #ifdef CTL_IO_DELAY
11377 	if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) {
11378 		struct ctl_lun *lun;
11379 
11380 		lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
11381 
11382 		io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE;
11383 	} else {
11384 		struct ctl_lun *lun;
11385 
11386 		lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
11387 		if ((lun != NULL)
11388 		 && (lun->delay_info.datamove_delay > 0)) {
11389 			struct callout *callout;
11390 
11391 			callout = (struct callout *)&io->io_hdr.timer_bytes;
11392 			callout_init(callout, /*mpsafe*/ 1);
11393 			io->io_hdr.flags |= CTL_FLAG_DELAY_DONE;
11394 			callout_reset(callout,
11395 				      lun->delay_info.datamove_delay * hz,
11396 				      ctl_datamove_timer_wakeup, io);
11397 			if (lun->delay_info.datamove_type ==
11398 			    CTL_DELAY_TYPE_ONESHOT)
11399 				lun->delay_info.datamove_delay = 0;
11400 			mtx_unlock(&control_softc->ctl_lock);
11401 			return;
11402 		}
11403 	}
11404 #endif
11405 	/*
11406 	 * If we have any pending task management commands, process them
11407 	 * first.  This is necessary to eliminate a race condition with the
11408 	 * FETD:
11409 	 *
11410 	 * - FETD submits a task management command, like an abort.
11411 	 * - Back end calls fe_datamove() to move the data for the aborted
11412 	 *   command.  The FETD can't really accept it, but if it did, it
11413 	 *   would end up transmitting data for a command that the initiator
11414 	 *   told us to abort.
11415 	 *
11416 	 * We close the race by processing all pending task management
11417 	 * commands here (we can't block!), and then check this I/O to see
11418 	 * if it has been aborted.  If so, return it to the back end with
11419 	 * bad status, so the back end can say return an error to the back end
11420 	 * and then when the back end returns an error, we can return the
11421 	 * aborted command to the FETD, so it can clean up its resources.
11422 	 */
11423 	if (control_softc->flags & CTL_FLAG_TASK_PENDING)
11424 		ctl_run_task_queue(control_softc);
11425 
11426 	/*
11427 	 * This command has been aborted.  Set the port status, so we fail
11428 	 * the data move.
11429 	 */
11430 	if (io->io_hdr.flags & CTL_FLAG_ABORT) {
11431 		printf("ctl_datamove: tag 0x%04x on (%ju:%d:%ju:%d) aborted\n",
11432 		       io->scsiio.tag_num,(uintmax_t)io->io_hdr.nexus.initid.id,
11433 		       io->io_hdr.nexus.targ_port,
11434 		       (uintmax_t)io->io_hdr.nexus.targ_target.id,
11435 		       io->io_hdr.nexus.targ_lun);
11436 		io->io_hdr.status = CTL_CMD_ABORTED;
11437 		io->io_hdr.port_status = 31337;
11438 		mtx_unlock(&control_softc->ctl_lock);
11439 		/*
11440 		 * Note that the backend, in this case, will get the
11441 		 * callback in its context.  In other cases it may get
11442 		 * called in the frontend's interrupt thread context.
11443 		 */
11444 		io->scsiio.be_move_done(io);
11445 		return;
11446 	}
11447 
11448 	/*
11449 	 * If we're in XFER mode and this I/O is from the other shelf
11450 	 * controller, we need to send the DMA to the other side to
11451 	 * actually transfer the data to/from the host.  In serialize only
11452 	 * mode the transfer happens below CTL and ctl_datamove() is only
11453 	 * called on the machine that originally received the I/O.
11454 	 */
11455 	if ((control_softc->ha_mode == CTL_HA_MODE_XFER)
11456 	 && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) {
11457 		union ctl_ha_msg msg;
11458 		uint32_t sg_entries_sent;
11459 		int do_sg_copy;
11460 		int i;
11461 
11462 		memset(&msg, 0, sizeof(msg));
11463 		msg.hdr.msg_type = CTL_MSG_DATAMOVE;
11464 		msg.hdr.original_sc = io->io_hdr.original_sc;
11465 		msg.hdr.serializing_sc = io;
11466 		msg.hdr.nexus = io->io_hdr.nexus;
11467 		msg.dt.flags = io->io_hdr.flags;
11468 		/*
11469 		 * We convert everything into a S/G list here.  We can't
11470 		 * pass by reference, only by value between controllers.
11471 		 * So we can't pass a pointer to the S/G list, only as many
11472 		 * S/G entries as we can fit in here.  If it's possible for
11473 		 * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries,
11474 		 * then we need to break this up into multiple transfers.
11475 		 */
11476 		if (io->scsiio.kern_sg_entries == 0) {
11477 			msg.dt.kern_sg_entries = 1;
11478 			/*
11479 			 * If this is in cached memory, flush the cache
11480 			 * before we send the DMA request to the other
11481 			 * controller.  We want to do this in either the
11482 			 * read or the write case.  The read case is
11483 			 * straightforward.  In the write case, we want to
11484 			 * make sure nothing is in the local cache that
11485 			 * could overwrite the DMAed data.
11486 			 */
11487 			if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) {
11488 				/*
11489 				 * XXX KDM use bus_dmamap_sync() here.
11490 				 */
11491 			}
11492 
11493 			/*
11494 			 * Convert to a physical address if this is a
11495 			 * virtual address.
11496 			 */
11497 			if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) {
11498 				msg.dt.sg_list[0].addr =
11499 					io->scsiio.kern_data_ptr;
11500 			} else {
11501 				/*
11502 				 * XXX KDM use busdma here!
11503 				 */
11504 #if 0
11505 				msg.dt.sg_list[0].addr = (void *)
11506 					vtophys(io->scsiio.kern_data_ptr);
11507 #endif
11508 			}
11509 
11510 			msg.dt.sg_list[0].len = io->scsiio.kern_data_len;
11511 			do_sg_copy = 0;
11512 		} else {
11513 			struct ctl_sg_entry *sgl;
11514 
11515 			do_sg_copy = 1;
11516 			msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries;
11517 			sgl = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
11518 			if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) {
11519 				/*
11520 				 * XXX KDM use bus_dmamap_sync() here.
11521 				 */
11522 			}
11523 		}
11524 
11525 		msg.dt.kern_data_len = io->scsiio.kern_data_len;
11526 		msg.dt.kern_total_len = io->scsiio.kern_total_len;
11527 		msg.dt.kern_data_resid = io->scsiio.kern_data_resid;
11528 		msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset;
11529 		msg.dt.sg_sequence = 0;
11530 
11531 		/*
11532 		 * Loop until we've sent all of the S/G entries.  On the
11533 		 * other end, we'll recompose these S/G entries into one
11534 		 * contiguous list before passing it to the
11535 		 */
11536 		for (sg_entries_sent = 0; sg_entries_sent <
11537 		     msg.dt.kern_sg_entries; msg.dt.sg_sequence++) {
11538 			msg.dt.cur_sg_entries = ctl_min((sizeof(msg.dt.sg_list)/
11539 				sizeof(msg.dt.sg_list[0])),
11540 				msg.dt.kern_sg_entries - sg_entries_sent);
11541 
11542 			if (do_sg_copy != 0) {
11543 				struct ctl_sg_entry *sgl;
11544 				int j;
11545 
11546 				sgl = (struct ctl_sg_entry *)
11547 					io->scsiio.kern_data_ptr;
11548 				/*
11549 				 * If this is in cached memory, flush the cache
11550 				 * before we send the DMA request to the other
11551 				 * controller.  We want to do this in either
11552 				 * the * read or the write case.  The read
11553 				 * case is straightforward.  In the write
11554 				 * case, we want to make sure nothing is
11555 				 * in the local cache that could overwrite
11556 				 * the DMAed data.
11557 				 */
11558 
11559 				for (i = sg_entries_sent, j = 0;
11560 				     i < msg.dt.cur_sg_entries; i++, j++) {
11561 					if ((io->io_hdr.flags &
11562 					     CTL_FLAG_NO_DATASYNC) == 0) {
11563 						/*
11564 						 * XXX KDM use bus_dmamap_sync()
11565 						 */
11566 					}
11567 					if ((io->io_hdr.flags &
11568 					     CTL_FLAG_BUS_ADDR) == 0) {
11569 						/*
11570 						 * XXX KDM use busdma.
11571 						 */
11572 #if 0
11573 						msg.dt.sg_list[j].addr =(void *)
11574 						       vtophys(sgl[i].addr);
11575 #endif
11576 					} else {
11577 						msg.dt.sg_list[j].addr =
11578 							sgl[i].addr;
11579 					}
11580 					msg.dt.sg_list[j].len = sgl[i].len;
11581 				}
11582 			}
11583 
11584 			sg_entries_sent += msg.dt.cur_sg_entries;
11585 			if (sg_entries_sent >= msg.dt.kern_sg_entries)
11586 				msg.dt.sg_last = 1;
11587 			else
11588 				msg.dt.sg_last = 0;
11589 
11590 			/*
11591 			 * XXX KDM drop and reacquire the lock here?
11592 			 */
11593 			if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
11594 			    sizeof(msg), 0) > CTL_HA_STATUS_SUCCESS) {
11595 				/*
11596 				 * XXX do something here.
11597 				 */
11598 			}
11599 
11600 			msg.dt.sent_sg_entries = sg_entries_sent;
11601 		}
11602 		io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
11603 		if (io->io_hdr.flags & CTL_FLAG_FAILOVER)
11604 			ctl_failover_io(io, /*have_lock*/ 1);
11605 
11606 	} else {
11607 
11608 		/*
11609 		 * Lookup the fe_datamove() function for this particular
11610 		 * front end.
11611 		 */
11612 		fe_datamove =
11613 		    control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove;
11614 		mtx_unlock(&control_softc->ctl_lock);
11615 
11616 		fe_datamove(io);
11617 	}
11618 }
11619 
11620 static void
11621 ctl_send_datamove_done(union ctl_io *io, int have_lock)
11622 {
11623 	union ctl_ha_msg msg;
11624 	int isc_status;
11625 
11626 	memset(&msg, 0, sizeof(msg));
11627 
11628 	msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE;
11629 	msg.hdr.original_sc = io;
11630 	msg.hdr.serializing_sc = io->io_hdr.serializing_sc;
11631 	msg.hdr.nexus = io->io_hdr.nexus;
11632 	msg.hdr.status = io->io_hdr.status;
11633 	msg.scsi.tag_num = io->scsiio.tag_num;
11634 	msg.scsi.tag_type = io->scsiio.tag_type;
11635 	msg.scsi.scsi_status = io->scsiio.scsi_status;
11636 	memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data,
11637 	       sizeof(io->scsiio.sense_data));
11638 	msg.scsi.sense_len = io->scsiio.sense_len;
11639 	msg.scsi.sense_residual = io->scsiio.sense_residual;
11640 	msg.scsi.fetd_status = io->io_hdr.port_status;
11641 	msg.scsi.residual = io->scsiio.residual;
11642 	io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
11643 
11644 	if (io->io_hdr.flags & CTL_FLAG_FAILOVER) {
11645 		ctl_failover_io(io, /*have_lock*/ have_lock);
11646 		return;
11647 	}
11648 
11649 	isc_status = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0);
11650 	if (isc_status > CTL_HA_STATUS_SUCCESS) {
11651 		/* XXX do something if this fails */
11652 	}
11653 
11654 }
11655 
11656 /*
11657  * The DMA to the remote side is done, now we need to tell the other side
11658  * we're done so it can continue with its data movement.
11659  */
11660 static void
11661 ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq)
11662 {
11663 	union ctl_io *io;
11664 
11665 	io = rq->context;
11666 
11667 	if (rq->ret != CTL_HA_STATUS_SUCCESS) {
11668 		printf("%s: ISC DMA write failed with error %d", __func__,
11669 		       rq->ret);
11670 		ctl_set_internal_failure(&io->scsiio,
11671 					 /*sks_valid*/ 1,
11672 					 /*retry_count*/ rq->ret);
11673 	}
11674 
11675 	ctl_dt_req_free(rq);
11676 
11677 	/*
11678 	 * In this case, we had to malloc the memory locally.  Free it.
11679 	 */
11680 	if ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0) {
11681 		int i;
11682 		for (i = 0; i < io->scsiio.kern_sg_entries; i++)
11683 			free(io->io_hdr.local_sglist[i].addr, M_CTL);
11684 	}
11685 	/*
11686 	 * The data is in local and remote memory, so now we need to send
11687 	 * status (good or back) back to the other side.
11688 	 */
11689 	ctl_send_datamove_done(io, /*have_lock*/ 0);
11690 }
11691 
11692 /*
11693  * We've moved the data from the host/controller into local memory.  Now we
11694  * need to push it over to the remote controller's memory.
11695  */
11696 static int
11697 ctl_datamove_remote_dm_write_cb(union ctl_io *io)
11698 {
11699 	int retval;
11700 
11701 	retval = 0;
11702 
11703 	retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_WRITE,
11704 					  ctl_datamove_remote_write_cb);
11705 
11706 	return (retval);
11707 }
11708 
11709 static void
11710 ctl_datamove_remote_write(union ctl_io *io)
11711 {
11712 	int retval;
11713 	void (*fe_datamove)(union ctl_io *io);
11714 
11715 	/*
11716 	 * - Get the data from the host/HBA into local memory.
11717 	 * - DMA memory from the local controller to the remote controller.
11718 	 * - Send status back to the remote controller.
11719 	 */
11720 
11721 	retval = ctl_datamove_remote_sgl_setup(io);
11722 	if (retval != 0)
11723 		return;
11724 
11725 	/* Switch the pointer over so the FETD knows what to do */
11726 	io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist;
11727 
11728 	/*
11729 	 * Use a custom move done callback, since we need to send completion
11730 	 * back to the other controller, not to the backend on this side.
11731 	 */
11732 	io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb;
11733 
11734 	fe_datamove = control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove;
11735 
11736 	fe_datamove(io);
11737 
11738 	return;
11739 
11740 }
11741 
11742 static int
11743 ctl_datamove_remote_dm_read_cb(union ctl_io *io)
11744 {
11745 #if 0
11746 	char str[256];
11747 	char path_str[64];
11748 	struct sbuf sb;
11749 #endif
11750 
11751 	/*
11752 	 * In this case, we had to malloc the memory locally.  Free it.
11753 	 */
11754 	if ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0) {
11755 		int i;
11756 		for (i = 0; i < io->scsiio.kern_sg_entries; i++)
11757 			free(io->io_hdr.local_sglist[i].addr, M_CTL);
11758 	}
11759 
11760 #if 0
11761 	scsi_path_string(io, path_str, sizeof(path_str));
11762 	sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN);
11763 	sbuf_cat(&sb, path_str);
11764 	scsi_command_string(&io->scsiio, NULL, &sb);
11765 	sbuf_printf(&sb, "\n");
11766 	sbuf_cat(&sb, path_str);
11767 	sbuf_printf(&sb, "Tag: 0x%04x, type %d\n",
11768 		    io->scsiio.tag_num, io->scsiio.tag_type);
11769 	sbuf_cat(&sb, path_str);
11770 	sbuf_printf(&sb, "%s: flags %#x, status %#x\n", __func__,
11771 		    io->io_hdr.flags, io->io_hdr.status);
11772 	sbuf_finish(&sb);
11773 	printk("%s", sbuf_data(&sb));
11774 #endif
11775 
11776 
11777 	/*
11778 	 * The read is done, now we need to send status (good or bad) back
11779 	 * to the other side.
11780 	 */
11781 	ctl_send_datamove_done(io, /*have_lock*/ 0);
11782 
11783 	return (0);
11784 }
11785 
11786 static void
11787 ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq)
11788 {
11789 	union ctl_io *io;
11790 	void (*fe_datamove)(union ctl_io *io);
11791 
11792 	io = rq->context;
11793 
11794 	if (rq->ret != CTL_HA_STATUS_SUCCESS) {
11795 		printf("%s: ISC DMA read failed with error %d", __func__,
11796 		       rq->ret);
11797 		ctl_set_internal_failure(&io->scsiio,
11798 					 /*sks_valid*/ 1,
11799 					 /*retry_count*/ rq->ret);
11800 	}
11801 
11802 	ctl_dt_req_free(rq);
11803 
11804 	/* Switch the pointer over so the FETD knows what to do */
11805 	io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist;
11806 
11807 	/*
11808 	 * Use a custom move done callback, since we need to send completion
11809 	 * back to the other controller, not to the backend on this side.
11810 	 */
11811 	io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb;
11812 
11813 	/* XXX KDM add checks like the ones in ctl_datamove? */
11814 
11815 	fe_datamove = control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove;
11816 
11817 	fe_datamove(io);
11818 }
11819 
11820 static int
11821 ctl_datamove_remote_sgl_setup(union ctl_io *io)
11822 {
11823 	struct ctl_sg_entry *local_sglist, *remote_sglist;
11824 	struct ctl_sg_entry *local_dma_sglist, *remote_dma_sglist;
11825 	struct ctl_softc *softc;
11826 	int retval;
11827 	int i;
11828 
11829 	retval = 0;
11830 	softc = control_softc;
11831 
11832 	local_sglist = io->io_hdr.local_sglist;
11833 	local_dma_sglist = io->io_hdr.local_dma_sglist;
11834 	remote_sglist = io->io_hdr.remote_sglist;
11835 	remote_dma_sglist = io->io_hdr.remote_dma_sglist;
11836 
11837 	if (io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) {
11838 		for (i = 0; i < io->scsiio.kern_sg_entries; i++) {
11839 			local_sglist[i].len = remote_sglist[i].len;
11840 
11841 			/*
11842 			 * XXX Detect the situation where the RS-level I/O
11843 			 * redirector on the other side has already read the
11844 			 * data off of the AOR RS on this side, and
11845 			 * transferred it to remote (mirror) memory on the
11846 			 * other side.  Since we already have the data in
11847 			 * memory here, we just need to use it.
11848 			 *
11849 			 * XXX KDM this can probably be removed once we
11850 			 * get the cache device code in and take the
11851 			 * current AOR implementation out.
11852 			 */
11853 #ifdef NEEDTOPORT
11854 			if ((remote_sglist[i].addr >=
11855 			     (void *)vtophys(softc->mirr->addr))
11856 			 && (remote_sglist[i].addr <
11857 			     ((void *)vtophys(softc->mirr->addr) +
11858 			     CacheMirrorOffset))) {
11859 				local_sglist[i].addr = remote_sglist[i].addr -
11860 					CacheMirrorOffset;
11861 				if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
11862 				     CTL_FLAG_DATA_IN)
11863 					io->io_hdr.flags |= CTL_FLAG_REDIR_DONE;
11864 			} else {
11865 				local_sglist[i].addr = remote_sglist[i].addr +
11866 					CacheMirrorOffset;
11867 			}
11868 #endif
11869 #if 0
11870 			printf("%s: local %p, remote %p, len %d\n",
11871 			       __func__, local_sglist[i].addr,
11872 			       remote_sglist[i].addr, local_sglist[i].len);
11873 #endif
11874 		}
11875 	} else {
11876 		uint32_t len_to_go;
11877 
11878 		/*
11879 		 * In this case, we don't have automatically allocated
11880 		 * memory for this I/O on this controller.  This typically
11881 		 * happens with internal CTL I/O -- e.g. inquiry, mode
11882 		 * sense, etc.  Anything coming from RAIDCore will have
11883 		 * a mirror area available.
11884 		 */
11885 		len_to_go = io->scsiio.kern_data_len;
11886 
11887 		/*
11888 		 * Clear the no datasync flag, we have to use malloced
11889 		 * buffers.
11890 		 */
11891 		io->io_hdr.flags &= ~CTL_FLAG_NO_DATASYNC;
11892 
11893 		/*
11894 		 * The difficult thing here is that the size of the various
11895 		 * S/G segments may be different than the size from the
11896 		 * remote controller.  That'll make it harder when DMAing
11897 		 * the data back to the other side.
11898 		 */
11899 		for (i = 0; (i < sizeof(io->io_hdr.remote_sglist) /
11900 		     sizeof(io->io_hdr.remote_sglist[0])) &&
11901 		     (len_to_go > 0); i++) {
11902 			local_sglist[i].len = ctl_min(len_to_go, 131072);
11903 			CTL_SIZE_8B(local_dma_sglist[i].len,
11904 				    local_sglist[i].len);
11905 			local_sglist[i].addr =
11906 				malloc(local_dma_sglist[i].len, M_CTL,M_WAITOK);
11907 
11908 			local_dma_sglist[i].addr = local_sglist[i].addr;
11909 
11910 			if (local_sglist[i].addr == NULL) {
11911 				int j;
11912 
11913 				printf("malloc failed for %zd bytes!",
11914 				       local_dma_sglist[i].len);
11915 				for (j = 0; j < i; j++) {
11916 					free(local_sglist[j].addr, M_CTL);
11917 				}
11918 				ctl_set_internal_failure(&io->scsiio,
11919 							 /*sks_valid*/ 1,
11920 							 /*retry_count*/ 4857);
11921 				retval = 1;
11922 				goto bailout_error;
11923 
11924 			}
11925 			/* XXX KDM do we need a sync here? */
11926 
11927 			len_to_go -= local_sglist[i].len;
11928 		}
11929 		/*
11930 		 * Reset the number of S/G entries accordingly.  The
11931 		 * original number of S/G entries is available in
11932 		 * rem_sg_entries.
11933 		 */
11934 		io->scsiio.kern_sg_entries = i;
11935 
11936 #if 0
11937 		printf("%s: kern_sg_entries = %d\n", __func__,
11938 		       io->scsiio.kern_sg_entries);
11939 		for (i = 0; i < io->scsiio.kern_sg_entries; i++)
11940 			printf("%s: sg[%d] = %p, %d (DMA: %d)\n", __func__, i,
11941 			       local_sglist[i].addr, local_sglist[i].len,
11942 			       local_dma_sglist[i].len);
11943 #endif
11944 	}
11945 
11946 
11947 	return (retval);
11948 
11949 bailout_error:
11950 
11951 	ctl_send_datamove_done(io, /*have_lock*/ 0);
11952 
11953 	return (retval);
11954 }
11955 
11956 static int
11957 ctl_datamove_remote_xfer(union ctl_io *io, unsigned command,
11958 			 ctl_ha_dt_cb callback)
11959 {
11960 	struct ctl_ha_dt_req *rq;
11961 	struct ctl_sg_entry *remote_sglist, *local_sglist;
11962 	struct ctl_sg_entry *remote_dma_sglist, *local_dma_sglist;
11963 	uint32_t local_used, remote_used, total_used;
11964 	int retval;
11965 	int i, j;
11966 
11967 	retval = 0;
11968 
11969 	rq = ctl_dt_req_alloc();
11970 
11971 	/*
11972 	 * If we failed to allocate the request, and if the DMA didn't fail
11973 	 * anyway, set busy status.  This is just a resource allocation
11974 	 * failure.
11975 	 */
11976 	if ((rq == NULL)
11977 	 && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE))
11978 		ctl_set_busy(&io->scsiio);
11979 
11980 	if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) {
11981 
11982 		if (rq != NULL)
11983 			ctl_dt_req_free(rq);
11984 
11985 		/*
11986 		 * The data move failed.  We need to return status back
11987 		 * to the other controller.  No point in trying to DMA
11988 		 * data to the remote controller.
11989 		 */
11990 
11991 		ctl_send_datamove_done(io, /*have_lock*/ 0);
11992 
11993 		retval = 1;
11994 
11995 		goto bailout;
11996 	}
11997 
11998 	local_sglist = io->io_hdr.local_sglist;
11999 	local_dma_sglist = io->io_hdr.local_dma_sglist;
12000 	remote_sglist = io->io_hdr.remote_sglist;
12001 	remote_dma_sglist = io->io_hdr.remote_dma_sglist;
12002 	local_used = 0;
12003 	remote_used = 0;
12004 	total_used = 0;
12005 
12006 	if (io->io_hdr.flags & CTL_FLAG_REDIR_DONE) {
12007 		rq->ret = CTL_HA_STATUS_SUCCESS;
12008 		rq->context = io;
12009 		callback(rq);
12010 		goto bailout;
12011 	}
12012 
12013 	/*
12014 	 * Pull/push the data over the wire from/to the other controller.
12015 	 * This takes into account the possibility that the local and
12016 	 * remote sglists may not be identical in terms of the size of
12017 	 * the elements and the number of elements.
12018 	 *
12019 	 * One fundamental assumption here is that the length allocated for
12020 	 * both the local and remote sglists is identical.  Otherwise, we've
12021 	 * essentially got a coding error of some sort.
12022 	 */
12023 	for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) {
12024 		int isc_ret;
12025 		uint32_t cur_len, dma_length;
12026 		uint8_t *tmp_ptr;
12027 
12028 		rq->id = CTL_HA_DATA_CTL;
12029 		rq->command = command;
12030 		rq->context = io;
12031 
12032 		/*
12033 		 * Both pointers should be aligned.  But it is possible
12034 		 * that the allocation length is not.  They should both
12035 		 * also have enough slack left over at the end, though,
12036 		 * to round up to the next 8 byte boundary.
12037 		 */
12038 		cur_len = ctl_min(local_sglist[i].len - local_used,
12039 				  remote_sglist[j].len - remote_used);
12040 
12041 		/*
12042 		 * In this case, we have a size issue and need to decrease
12043 		 * the size, except in the case where we actually have less
12044 		 * than 8 bytes left.  In that case, we need to increase
12045 		 * the DMA length to get the last bit.
12046 		 */
12047 		if ((cur_len & 0x7) != 0) {
12048 			if (cur_len > 0x7) {
12049 				cur_len = cur_len - (cur_len & 0x7);
12050 				dma_length = cur_len;
12051 			} else {
12052 				CTL_SIZE_8B(dma_length, cur_len);
12053 			}
12054 
12055 		} else
12056 			dma_length = cur_len;
12057 
12058 		/*
12059 		 * If we had to allocate memory for this I/O, instead of using
12060 		 * the non-cached mirror memory, we'll need to flush the cache
12061 		 * before trying to DMA to the other controller.
12062 		 *
12063 		 * We could end up doing this multiple times for the same
12064 		 * segment if we have a larger local segment than remote
12065 		 * segment.  That shouldn't be an issue.
12066 		 */
12067 		if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) {
12068 			/*
12069 			 * XXX KDM use bus_dmamap_sync() here.
12070 			 */
12071 		}
12072 
12073 		rq->size = dma_length;
12074 
12075 		tmp_ptr = (uint8_t *)local_sglist[i].addr;
12076 		tmp_ptr += local_used;
12077 
12078 		/* Use physical addresses when talking to ISC hardware */
12079 		if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) {
12080 			/* XXX KDM use busdma */
12081 #if 0
12082 			rq->local = vtophys(tmp_ptr);
12083 #endif
12084 		} else
12085 			rq->local = tmp_ptr;
12086 
12087 		tmp_ptr = (uint8_t *)remote_sglist[j].addr;
12088 		tmp_ptr += remote_used;
12089 		rq->remote = tmp_ptr;
12090 
12091 		rq->callback = NULL;
12092 
12093 		local_used += cur_len;
12094 		if (local_used >= local_sglist[i].len) {
12095 			i++;
12096 			local_used = 0;
12097 		}
12098 
12099 		remote_used += cur_len;
12100 		if (remote_used >= remote_sglist[j].len) {
12101 			j++;
12102 			remote_used = 0;
12103 		}
12104 		total_used += cur_len;
12105 
12106 		if (total_used >= io->scsiio.kern_data_len)
12107 			rq->callback = callback;
12108 
12109 		if ((rq->size & 0x7) != 0) {
12110 			printf("%s: warning: size %d is not on 8b boundary\n",
12111 			       __func__, rq->size);
12112 		}
12113 		if (((uintptr_t)rq->local & 0x7) != 0) {
12114 			printf("%s: warning: local %p not on 8b boundary\n",
12115 			       __func__, rq->local);
12116 		}
12117 		if (((uintptr_t)rq->remote & 0x7) != 0) {
12118 			printf("%s: warning: remote %p not on 8b boundary\n",
12119 			       __func__, rq->local);
12120 		}
12121 #if 0
12122 		printf("%s: %s: local %#x remote %#x size %d\n", __func__,
12123 		       (command == CTL_HA_DT_CMD_WRITE) ? "WRITE" : "READ",
12124 		       rq->local, rq->remote, rq->size);
12125 #endif
12126 
12127 		isc_ret = ctl_dt_single(rq);
12128 		if (isc_ret == CTL_HA_STATUS_WAIT)
12129 			continue;
12130 
12131 		if (isc_ret == CTL_HA_STATUS_DISCONNECT) {
12132 			rq->ret = CTL_HA_STATUS_SUCCESS;
12133 		} else {
12134 			rq->ret = isc_ret;
12135 		}
12136 		callback(rq);
12137 		goto bailout;
12138 	}
12139 
12140 bailout:
12141 	return (retval);
12142 
12143 }
12144 
12145 static void
12146 ctl_datamove_remote_read(union ctl_io *io)
12147 {
12148 	int retval;
12149 	int i;
12150 
12151 	/*
12152 	 * This will send an error to the other controller in the case of a
12153 	 * failure.
12154 	 */
12155 	retval = ctl_datamove_remote_sgl_setup(io);
12156 	if (retval != 0)
12157 		return;
12158 
12159 	retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ,
12160 					  ctl_datamove_remote_read_cb);
12161 	if ((retval != 0)
12162 	 && ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0)) {
12163 		/*
12164 		 * Make sure we free memory if there was an error..  The
12165 		 * ctl_datamove_remote_xfer() function will send the
12166 		 * datamove done message, or call the callback with an
12167 		 * error if there is a problem.
12168 		 */
12169 		for (i = 0; i < io->scsiio.kern_sg_entries; i++)
12170 			free(io->io_hdr.local_sglist[i].addr, M_CTL);
12171 	}
12172 
12173 	return;
12174 }
12175 
12176 /*
12177  * Process a datamove request from the other controller.  This is used for
12178  * XFER mode only, not SER_ONLY mode.  For writes, we DMA into local memory
12179  * first.  Once that is complete, the data gets DMAed into the remote
12180  * controller's memory.  For reads, we DMA from the remote controller's
12181  * memory into our memory first, and then move it out to the FETD.
12182  *
12183  * Should be called without the ctl_lock held.
12184  */
12185 static void
12186 ctl_datamove_remote(union ctl_io *io)
12187 {
12188 	struct ctl_softc *softc;
12189 
12190 	softc = control_softc;
12191 
12192 	/*
12193 	 * Note that we look for an aborted I/O here, but don't do some of
12194 	 * the other checks that ctl_datamove() normally does.  We don't
12195 	 * need to run the task queue, because this I/O is on the ISC
12196 	 * queue, which is executed by the work thread after the task queue.
12197 	 * We don't need to run the datamove delay code, since that should
12198 	 * have been done if need be on the other controller.
12199 	 */
12200 	mtx_lock(&softc->ctl_lock);
12201 
12202 	if (io->io_hdr.flags & CTL_FLAG_ABORT) {
12203 
12204 		printf("%s: tag 0x%04x on (%d:%d:%d:%d) aborted\n", __func__,
12205 		       io->scsiio.tag_num, io->io_hdr.nexus.initid.id,
12206 		       io->io_hdr.nexus.targ_port,
12207 		       io->io_hdr.nexus.targ_target.id,
12208 		       io->io_hdr.nexus.targ_lun);
12209 		io->io_hdr.status = CTL_CMD_ABORTED;
12210 		io->io_hdr.port_status = 31338;
12211 
12212 		mtx_unlock(&softc->ctl_lock);
12213 
12214 		ctl_send_datamove_done(io, /*have_lock*/ 0);
12215 
12216 		return;
12217 	}
12218 
12219 	if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) {
12220 		mtx_unlock(&softc->ctl_lock);
12221 		ctl_datamove_remote_write(io);
12222 	} else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN){
12223 		mtx_unlock(&softc->ctl_lock);
12224 		ctl_datamove_remote_read(io);
12225 	} else {
12226 		union ctl_ha_msg msg;
12227 		struct scsi_sense_data *sense;
12228 		uint8_t sks[3];
12229 		int retry_count;
12230 
12231 		memset(&msg, 0, sizeof(msg));
12232 
12233 		msg.hdr.msg_type = CTL_MSG_BAD_JUJU;
12234 		msg.hdr.status = CTL_SCSI_ERROR;
12235 		msg.scsi.scsi_status = SCSI_STATUS_CHECK_COND;
12236 
12237 		retry_count = 4243;
12238 
12239 		sense = &msg.scsi.sense_data;
12240 		sks[0] = SSD_SCS_VALID;
12241 		sks[1] = (retry_count >> 8) & 0xff;
12242 		sks[2] = retry_count & 0xff;
12243 
12244 		/* "Internal target failure" */
12245 		scsi_set_sense_data(sense,
12246 				    /*sense_format*/ SSD_TYPE_NONE,
12247 				    /*current_error*/ 1,
12248 				    /*sense_key*/ SSD_KEY_HARDWARE_ERROR,
12249 				    /*asc*/ 0x44,
12250 				    /*ascq*/ 0x00,
12251 				    /*type*/ SSD_ELEM_SKS,
12252 				    /*size*/ sizeof(sks),
12253 				    /*data*/ sks,
12254 				    SSD_ELEM_NONE);
12255 
12256 		io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
12257 		if (io->io_hdr.flags & CTL_FLAG_FAILOVER) {
12258 			ctl_failover_io(io, /*have_lock*/ 1);
12259 			mtx_unlock(&softc->ctl_lock);
12260 			return;
12261 		}
12262 
12263 		mtx_unlock(&softc->ctl_lock);
12264 
12265 		if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0) >
12266 		    CTL_HA_STATUS_SUCCESS) {
12267 			/* XXX KDM what to do if this fails? */
12268 		}
12269 		return;
12270 	}
12271 
12272 }
12273 
12274 static int
12275 ctl_process_done(union ctl_io *io, int have_lock)
12276 {
12277 	struct ctl_lun *lun;
12278 	struct ctl_softc *ctl_softc;
12279 	void (*fe_done)(union ctl_io *io);
12280 	uint32_t targ_port = ctl_port_idx(io->io_hdr.nexus.targ_port);
12281 
12282 	CTL_DEBUG_PRINT(("ctl_process_done\n"));
12283 
12284 	fe_done =
12285 	    control_softc->ctl_ports[targ_port]->fe_done;
12286 
12287 #ifdef CTL_TIME_IO
12288 	if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) {
12289 		char str[256];
12290 		char path_str[64];
12291 		struct sbuf sb;
12292 
12293 		ctl_scsi_path_string(io, path_str, sizeof(path_str));
12294 		sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN);
12295 
12296 		sbuf_cat(&sb, path_str);
12297 		switch (io->io_hdr.io_type) {
12298 		case CTL_IO_SCSI:
12299 			ctl_scsi_command_string(&io->scsiio, NULL, &sb);
12300 			sbuf_printf(&sb, "\n");
12301 			sbuf_cat(&sb, path_str);
12302 			sbuf_printf(&sb, "Tag: 0x%04x, type %d\n",
12303 				    io->scsiio.tag_num, io->scsiio.tag_type);
12304 			break;
12305 		case CTL_IO_TASK:
12306 			sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, "
12307 				    "Tag Type: %d\n", io->taskio.task_action,
12308 				    io->taskio.tag_num, io->taskio.tag_type);
12309 			break;
12310 		default:
12311 			printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type);
12312 			panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type);
12313 			break;
12314 		}
12315 		sbuf_cat(&sb, path_str);
12316 		sbuf_printf(&sb, "ctl_process_done: %jd seconds\n",
12317 			    (intmax_t)time_uptime - io->io_hdr.start_time);
12318 		sbuf_finish(&sb);
12319 		printf("%s", sbuf_data(&sb));
12320 	}
12321 #endif /* CTL_TIME_IO */
12322 
12323 	switch (io->io_hdr.io_type) {
12324 	case CTL_IO_SCSI:
12325 		break;
12326 	case CTL_IO_TASK:
12327 		ctl_io_error_print(io, NULL);
12328 		if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)
12329 			ctl_free_io_internal(io, /*have_lock*/ 0);
12330 		else
12331 			fe_done(io);
12332 		return (CTL_RETVAL_COMPLETE);
12333 		break;
12334 	default:
12335 		printf("ctl_process_done: invalid io type %d\n",
12336 		       io->io_hdr.io_type);
12337 		panic("ctl_process_done: invalid io type %d\n",
12338 		      io->io_hdr.io_type);
12339 		break; /* NOTREACHED */
12340 	}
12341 
12342 	lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
12343 	if (lun == NULL) {
12344 		CTL_DEBUG_PRINT(("NULL LUN for lun %d\n",
12345 				 io->io_hdr.nexus.targ_lun));
12346 		fe_done(io);
12347 		goto bailout;
12348 	}
12349 	ctl_softc = lun->ctl_softc;
12350 
12351 	/*
12352 	 * Remove this from the OOA queue.
12353 	 */
12354 	if (have_lock == 0)
12355 		mtx_lock(&ctl_softc->ctl_lock);
12356 
12357 	/*
12358 	 * Check to see if we have any errors to inject here.  We only
12359 	 * inject errors for commands that don't already have errors set.
12360 	 */
12361 	if ((STAILQ_FIRST(&lun->error_list) != NULL)
12362 	 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS))
12363 		ctl_inject_error(lun, io);
12364 
12365 	/*
12366 	 * XXX KDM how do we treat commands that aren't completed
12367 	 * successfully?
12368 	 *
12369 	 * XXX KDM should we also track I/O latency?
12370 	 */
12371 	if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) {
12372 		uint32_t blocksize;
12373 #ifdef CTL_TIME_IO
12374 		struct bintime cur_bt;
12375 #endif
12376 
12377 		if ((lun->be_lun != NULL)
12378 		 && (lun->be_lun->blocksize != 0))
12379 			blocksize = lun->be_lun->blocksize;
12380 		else
12381 			blocksize = 512;
12382 
12383 		switch (io->io_hdr.io_type) {
12384 		case CTL_IO_SCSI: {
12385 			int isread;
12386 			struct ctl_lba_len lbalen;
12387 
12388 			isread = 0;
12389 			switch (io->scsiio.cdb[0]) {
12390 			case READ_6:
12391 			case READ_10:
12392 			case READ_12:
12393 			case READ_16:
12394 				isread = 1;
12395 				/* FALLTHROUGH */
12396 			case WRITE_6:
12397 			case WRITE_10:
12398 			case WRITE_12:
12399 			case WRITE_16:
12400 			case WRITE_VERIFY_10:
12401 			case WRITE_VERIFY_12:
12402 			case WRITE_VERIFY_16:
12403 				memcpy(&lbalen, io->io_hdr.ctl_private[
12404 				       CTL_PRIV_LBA_LEN].bytes, sizeof(lbalen));
12405 
12406 				if (isread) {
12407 					lun->stats.ports[targ_port].bytes[CTL_STATS_READ] +=
12408 						lbalen.len * blocksize;
12409 					lun->stats.ports[targ_port].operations[CTL_STATS_READ]++;
12410 
12411 #ifdef CTL_TIME_IO
12412 					bintime_add(
12413 					   &lun->stats.ports[targ_port].dma_time[CTL_STATS_READ],
12414 					   &io->io_hdr.dma_bt);
12415 					lun->stats.ports[targ_port].num_dmas[CTL_STATS_READ] +=
12416 						io->io_hdr.num_dmas;
12417 					getbintime(&cur_bt);
12418 					bintime_sub(&cur_bt,
12419 						    &io->io_hdr.start_bt);
12420 
12421 					bintime_add(
12422 					    &lun->stats.ports[targ_port].time[CTL_STATS_READ],
12423 					    &cur_bt);
12424 
12425 #if 0
12426 					cs_prof_gettime(&cur_ticks);
12427 					lun->stats.time[CTL_STATS_READ] +=
12428 						cur_ticks -
12429 						io->io_hdr.start_ticks;
12430 #endif
12431 #if 0
12432 					lun->stats.time[CTL_STATS_READ] +=
12433 						jiffies - io->io_hdr.start_time;
12434 #endif
12435 #endif /* CTL_TIME_IO */
12436 				} else {
12437 					lun->stats.ports[targ_port].bytes[CTL_STATS_WRITE] +=
12438 						lbalen.len * blocksize;
12439 					lun->stats.ports[targ_port].operations[
12440 						CTL_STATS_WRITE]++;
12441 
12442 #ifdef CTL_TIME_IO
12443 					bintime_add(
12444 					  &lun->stats.ports[targ_port].dma_time[CTL_STATS_WRITE],
12445 					  &io->io_hdr.dma_bt);
12446 					lun->stats.ports[targ_port].num_dmas[CTL_STATS_WRITE] +=
12447 						io->io_hdr.num_dmas;
12448 					getbintime(&cur_bt);
12449 					bintime_sub(&cur_bt,
12450 						    &io->io_hdr.start_bt);
12451 
12452 					bintime_add(
12453 					    &lun->stats.ports[targ_port].time[CTL_STATS_WRITE],
12454 					    &cur_bt);
12455 #if 0
12456 					cs_prof_gettime(&cur_ticks);
12457 					lun->stats.ports[targ_port].time[CTL_STATS_WRITE] +=
12458 						cur_ticks -
12459 						io->io_hdr.start_ticks;
12460 					lun->stats.ports[targ_port].time[CTL_STATS_WRITE] +=
12461 						jiffies - io->io_hdr.start_time;
12462 #endif
12463 #endif /* CTL_TIME_IO */
12464 				}
12465 				break;
12466 			default:
12467 				lun->stats.ports[targ_port].operations[CTL_STATS_NO_IO]++;
12468 
12469 #ifdef CTL_TIME_IO
12470 				bintime_add(
12471 				  &lun->stats.ports[targ_port].dma_time[CTL_STATS_NO_IO],
12472 				  &io->io_hdr.dma_bt);
12473 				lun->stats.ports[targ_port].num_dmas[CTL_STATS_NO_IO] +=
12474 					io->io_hdr.num_dmas;
12475 				getbintime(&cur_bt);
12476 				bintime_sub(&cur_bt, &io->io_hdr.start_bt);
12477 
12478 				bintime_add(&lun->stats.ports[targ_port].time[CTL_STATS_NO_IO],
12479 					    &cur_bt);
12480 
12481 #if 0
12482 				cs_prof_gettime(&cur_ticks);
12483 				lun->stats.ports[targ_port].time[CTL_STATS_NO_IO] +=
12484 					cur_ticks -
12485 					io->io_hdr.start_ticks;
12486 				lun->stats.ports[targ_port].time[CTL_STATS_NO_IO] +=
12487 					jiffies - io->io_hdr.start_time;
12488 #endif
12489 #endif /* CTL_TIME_IO */
12490 				break;
12491 			}
12492 			break;
12493 		}
12494 		default:
12495 			break;
12496 		}
12497 	}
12498 
12499 	TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links);
12500 
12501 	/*
12502 	 * Run through the blocked queue on this LUN and see if anything
12503 	 * has become unblocked, now that this transaction is done.
12504 	 */
12505 	ctl_check_blocked(lun);
12506 
12507 	/*
12508 	 * If the LUN has been invalidated, free it if there is nothing
12509 	 * left on its OOA queue.
12510 	 */
12511 	if ((lun->flags & CTL_LUN_INVALID)
12512 	 && (TAILQ_FIRST(&lun->ooa_queue) == NULL))
12513 		ctl_free_lun(lun);
12514 
12515 	/*
12516 	 * If this command has been aborted, make sure we set the status
12517 	 * properly.  The FETD is responsible for freeing the I/O and doing
12518 	 * whatever it needs to do to clean up its state.
12519 	 */
12520 	if (io->io_hdr.flags & CTL_FLAG_ABORT)
12521 		io->io_hdr.status = CTL_CMD_ABORTED;
12522 
12523 	/*
12524 	 * We print out status for every task management command.  For SCSI
12525 	 * commands, we filter out any unit attention errors; they happen
12526 	 * on every boot, and would clutter up the log.  Note:  task
12527 	 * management commands aren't printed here, they are printed above,
12528 	 * since they should never even make it down here.
12529 	 */
12530 	switch (io->io_hdr.io_type) {
12531 	case CTL_IO_SCSI: {
12532 		int error_code, sense_key, asc, ascq;
12533 
12534 		sense_key = 0;
12535 
12536 		if (((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SCSI_ERROR)
12537 		 && (io->scsiio.scsi_status == SCSI_STATUS_CHECK_COND)) {
12538 			/*
12539 			 * Since this is just for printing, no need to
12540 			 * show errors here.
12541 			 */
12542 			scsi_extract_sense_len(&io->scsiio.sense_data,
12543 					       io->scsiio.sense_len,
12544 					       &error_code,
12545 					       &sense_key,
12546 					       &asc,
12547 					       &ascq,
12548 					       /*show_errors*/ 0);
12549 		}
12550 
12551 		if (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
12552 		 && (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SCSI_ERROR)
12553 		  || (io->scsiio.scsi_status != SCSI_STATUS_CHECK_COND)
12554 		  || (sense_key != SSD_KEY_UNIT_ATTENTION))) {
12555 
12556 			if ((time_uptime - ctl_softc->last_print_jiffies) <= 0){
12557 				ctl_softc->skipped_prints++;
12558 				if (have_lock == 0)
12559 					mtx_unlock(&ctl_softc->ctl_lock);
12560 			} else {
12561 				uint32_t skipped_prints;
12562 
12563 				skipped_prints = ctl_softc->skipped_prints;
12564 
12565 				ctl_softc->skipped_prints = 0;
12566 				ctl_softc->last_print_jiffies = time_uptime;
12567 
12568 				if (have_lock == 0)
12569 					mtx_unlock(&ctl_softc->ctl_lock);
12570 				if (skipped_prints > 0) {
12571 #ifdef NEEDTOPORT
12572 					csevent_log(CSC_CTL | CSC_SHELF_SW |
12573 					    CTL_ERROR_REPORT,
12574 					    csevent_LogType_Trace,
12575 					    csevent_Severity_Information,
12576 					    csevent_AlertLevel_Green,
12577 					    csevent_FRU_Firmware,
12578 					    csevent_FRU_Unknown,
12579 					    "High CTL error volume, %d prints "
12580 					    "skipped", skipped_prints);
12581 #endif
12582 				}
12583 				ctl_io_error_print(io, NULL);
12584 			}
12585 		} else {
12586 			if (have_lock == 0)
12587 				mtx_unlock(&ctl_softc->ctl_lock);
12588 		}
12589 		break;
12590 	}
12591 	case CTL_IO_TASK:
12592 		if (have_lock == 0)
12593 			mtx_unlock(&ctl_softc->ctl_lock);
12594 		ctl_io_error_print(io, NULL);
12595 		break;
12596 	default:
12597 		if (have_lock == 0)
12598 			mtx_unlock(&ctl_softc->ctl_lock);
12599 		break;
12600 	}
12601 
12602 	/*
12603 	 * Tell the FETD or the other shelf controller we're done with this
12604 	 * command.  Note that only SCSI commands get to this point.  Task
12605 	 * management commands are completed above.
12606 	 *
12607 	 * We only send status to the other controller if we're in XFER
12608 	 * mode.  In SER_ONLY mode, the I/O is done on the controller that
12609 	 * received the I/O (from CTL's perspective), and so the status is
12610 	 * generated there.
12611 	 *
12612 	 * XXX KDM if we hold the lock here, we could cause a deadlock
12613 	 * if the frontend comes back in in this context to queue
12614 	 * something.
12615 	 */
12616 	if ((ctl_softc->ha_mode == CTL_HA_MODE_XFER)
12617 	 && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) {
12618 		union ctl_ha_msg msg;
12619 
12620 		memset(&msg, 0, sizeof(msg));
12621 		msg.hdr.msg_type = CTL_MSG_FINISH_IO;
12622 		msg.hdr.original_sc = io->io_hdr.original_sc;
12623 		msg.hdr.nexus = io->io_hdr.nexus;
12624 		msg.hdr.status = io->io_hdr.status;
12625 		msg.scsi.scsi_status = io->scsiio.scsi_status;
12626 		msg.scsi.tag_num = io->scsiio.tag_num;
12627 		msg.scsi.tag_type = io->scsiio.tag_type;
12628 		msg.scsi.sense_len = io->scsiio.sense_len;
12629 		msg.scsi.sense_residual = io->scsiio.sense_residual;
12630 		msg.scsi.residual = io->scsiio.residual;
12631 		memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data,
12632 		       sizeof(io->scsiio.sense_data));
12633 		/*
12634 		 * We copy this whether or not this is an I/O-related
12635 		 * command.  Otherwise, we'd have to go and check to see
12636 		 * whether it's a read/write command, and it really isn't
12637 		 * worth it.
12638 		 */
12639 		memcpy(&msg.scsi.lbalen,
12640 		       &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes,
12641 		       sizeof(msg.scsi.lbalen));
12642 
12643 		if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
12644 				sizeof(msg), 0) > CTL_HA_STATUS_SUCCESS) {
12645 			/* XXX do something here */
12646 		}
12647 
12648 		ctl_free_io_internal(io, /*have_lock*/ 0);
12649 	} else
12650 		fe_done(io);
12651 
12652 bailout:
12653 
12654 	return (CTL_RETVAL_COMPLETE);
12655 }
12656 
12657 /*
12658  * Front end should call this if it doesn't do autosense.  When the request
12659  * sense comes back in from the initiator, we'll dequeue this and send it.
12660  */
12661 int
12662 ctl_queue_sense(union ctl_io *io)
12663 {
12664 	struct ctl_lun *lun;
12665 	struct ctl_softc *ctl_softc;
12666 	uint32_t initidx;
12667 
12668 	ctl_softc = control_softc;
12669 
12670 	CTL_DEBUG_PRINT(("ctl_queue_sense\n"));
12671 
12672 	/*
12673 	 * LUN lookup will likely move to the ctl_work_thread() once we
12674 	 * have our new queueing infrastructure (that doesn't put things on
12675 	 * a per-LUN queue initially).  That is so that we can handle
12676 	 * things like an INQUIRY to a LUN that we don't have enabled.  We
12677 	 * can't deal with that right now.
12678 	 */
12679 	mtx_lock(&ctl_softc->ctl_lock);
12680 
12681 	/*
12682 	 * If we don't have a LUN for this, just toss the sense
12683 	 * information.
12684 	 */
12685 	if ((io->io_hdr.nexus.targ_lun < CTL_MAX_LUNS)
12686 	 && (ctl_softc->ctl_luns[io->io_hdr.nexus.targ_lun] != NULL))
12687 		lun = ctl_softc->ctl_luns[io->io_hdr.nexus.targ_lun];
12688 	else
12689 		goto bailout;
12690 
12691 	initidx = ctl_get_initindex(&io->io_hdr.nexus);
12692 
12693 	/*
12694 	 * Already have CA set for this LUN...toss the sense information.
12695 	 */
12696 	if (ctl_is_set(lun->have_ca, initidx))
12697 		goto bailout;
12698 
12699 	memcpy(&lun->pending_sense[initidx].sense, &io->scsiio.sense_data,
12700 	       ctl_min(sizeof(lun->pending_sense[initidx].sense),
12701 	       sizeof(io->scsiio.sense_data)));
12702 	ctl_set_mask(lun->have_ca, initidx);
12703 
12704 bailout:
12705 	mtx_unlock(&ctl_softc->ctl_lock);
12706 
12707 	ctl_free_io(io);
12708 
12709 	return (CTL_RETVAL_COMPLETE);
12710 }
12711 
12712 /*
12713  * Primary command inlet from frontend ports.  All SCSI and task I/O
12714  * requests must go through this function.
12715  */
12716 int
12717 ctl_queue(union ctl_io *io)
12718 {
12719 	struct ctl_softc *ctl_softc;
12720 
12721 	CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0]));
12722 
12723 	ctl_softc = control_softc;
12724 
12725 #ifdef CTL_TIME_IO
12726 	io->io_hdr.start_time = time_uptime;
12727 	getbintime(&io->io_hdr.start_bt);
12728 #endif /* CTL_TIME_IO */
12729 
12730 	mtx_lock(&ctl_softc->ctl_lock);
12731 
12732 	switch (io->io_hdr.io_type) {
12733 	case CTL_IO_SCSI:
12734 		STAILQ_INSERT_TAIL(&ctl_softc->incoming_queue, &io->io_hdr,
12735 				   links);
12736 		break;
12737 	case CTL_IO_TASK:
12738 		STAILQ_INSERT_TAIL(&ctl_softc->task_queue, &io->io_hdr, links);
12739 		/*
12740 		 * Set the task pending flag.  This is necessary to close a
12741 		 * race condition with the FETD:
12742 		 *
12743 		 * - FETD submits a task management command, like an abort.
12744 		 * - Back end calls fe_datamove() to move the data for the
12745 		 *   aborted command.  The FETD can't really accept it, but
12746 		 *   if it did, it would end up transmitting data for a
12747 		 *   command that the initiator told us to abort.
12748 		 *
12749 		 * We close the race condition by setting the flag here,
12750 		 * and checking it in ctl_datamove(), before calling the
12751 		 * FETD's fe_datamove routine.  If we've got a task
12752 		 * pending, we run the task queue and then check to see
12753 		 * whether our particular I/O has been aborted.
12754 		 */
12755 		ctl_softc->flags |= CTL_FLAG_TASK_PENDING;
12756 		break;
12757 	default:
12758 		mtx_unlock(&ctl_softc->ctl_lock);
12759 		printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type);
12760 		return (-EINVAL);
12761 		break; /* NOTREACHED */
12762 	}
12763 	mtx_unlock(&ctl_softc->ctl_lock);
12764 
12765 	ctl_wakeup_thread();
12766 
12767 	return (CTL_RETVAL_COMPLETE);
12768 }
12769 
12770 #ifdef CTL_IO_DELAY
12771 static void
12772 ctl_done_timer_wakeup(void *arg)
12773 {
12774 	union ctl_io *io;
12775 
12776 	io = (union ctl_io *)arg;
12777 	ctl_done_lock(io, /*have_lock*/ 0);
12778 }
12779 #endif /* CTL_IO_DELAY */
12780 
12781 void
12782 ctl_done_lock(union ctl_io *io, int have_lock)
12783 {
12784 	struct ctl_softc *ctl_softc;
12785 #ifndef CTL_DONE_THREAD
12786 	union ctl_io *xio;
12787 #endif /* !CTL_DONE_THREAD */
12788 
12789 	ctl_softc = control_softc;
12790 
12791 	if (have_lock == 0)
12792 		mtx_lock(&ctl_softc->ctl_lock);
12793 
12794 	/*
12795 	 * Enable this to catch duplicate completion issues.
12796 	 */
12797 #if 0
12798 	if (io->io_hdr.flags & CTL_FLAG_ALREADY_DONE) {
12799 		printf("%s: type %d msg %d cdb %x iptl: "
12800 		       "%d:%d:%d:%d tag 0x%04x "
12801 		       "flag %#x status %x\n",
12802 			__func__,
12803 			io->io_hdr.io_type,
12804 			io->io_hdr.msg_type,
12805 			io->scsiio.cdb[0],
12806 			io->io_hdr.nexus.initid.id,
12807 			io->io_hdr.nexus.targ_port,
12808 			io->io_hdr.nexus.targ_target.id,
12809 			io->io_hdr.nexus.targ_lun,
12810 			(io->io_hdr.io_type ==
12811 			CTL_IO_TASK) ?
12812 			io->taskio.tag_num :
12813 			io->scsiio.tag_num,
12814 		        io->io_hdr.flags,
12815 			io->io_hdr.status);
12816 	} else
12817 		io->io_hdr.flags |= CTL_FLAG_ALREADY_DONE;
12818 #endif
12819 
12820 	/*
12821 	 * This is an internal copy of an I/O, and should not go through
12822 	 * the normal done processing logic.
12823 	 */
12824 	if (io->io_hdr.flags & CTL_FLAG_INT_COPY) {
12825 		if (have_lock == 0)
12826 			mtx_unlock(&ctl_softc->ctl_lock);
12827 		return;
12828 	}
12829 
12830 	/*
12831 	 * We need to send a msg to the serializing shelf to finish the IO
12832 	 * as well.  We don't send a finish message to the other shelf if
12833 	 * this is a task management command.  Task management commands
12834 	 * aren't serialized in the OOA queue, but rather just executed on
12835 	 * both shelf controllers for commands that originated on that
12836 	 * controller.
12837 	 */
12838 	if ((io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC)
12839 	 && (io->io_hdr.io_type != CTL_IO_TASK)) {
12840 		union ctl_ha_msg msg_io;
12841 
12842 		msg_io.hdr.msg_type = CTL_MSG_FINISH_IO;
12843 		msg_io.hdr.serializing_sc = io->io_hdr.serializing_sc;
12844 		if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_io,
12845 		    sizeof(msg_io), 0 ) != CTL_HA_STATUS_SUCCESS) {
12846 		}
12847 		/* continue on to finish IO */
12848 	}
12849 #ifdef CTL_IO_DELAY
12850 	if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) {
12851 		struct ctl_lun *lun;
12852 
12853 		lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
12854 
12855 		io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE;
12856 	} else {
12857 		struct ctl_lun *lun;
12858 
12859 		lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
12860 
12861 		if ((lun != NULL)
12862 		 && (lun->delay_info.done_delay > 0)) {
12863 			struct callout *callout;
12864 
12865 			callout = (struct callout *)&io->io_hdr.timer_bytes;
12866 			callout_init(callout, /*mpsafe*/ 1);
12867 			io->io_hdr.flags |= CTL_FLAG_DELAY_DONE;
12868 			callout_reset(callout,
12869 				      lun->delay_info.done_delay * hz,
12870 				      ctl_done_timer_wakeup, io);
12871 			if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT)
12872 				lun->delay_info.done_delay = 0;
12873 			if (have_lock == 0)
12874 				mtx_unlock(&ctl_softc->ctl_lock);
12875 			return;
12876 		}
12877 	}
12878 #endif /* CTL_IO_DELAY */
12879 
12880 	STAILQ_INSERT_TAIL(&ctl_softc->done_queue, &io->io_hdr, links);
12881 
12882 #ifdef CTL_DONE_THREAD
12883 	if (have_lock == 0)
12884 		mtx_unlock(&ctl_softc->ctl_lock);
12885 
12886 	ctl_wakeup_thread();
12887 #else /* CTL_DONE_THREAD */
12888 	for (xio = (union ctl_io *)STAILQ_FIRST(&ctl_softc->done_queue);
12889 	     xio != NULL;
12890 	     xio =(union ctl_io *)STAILQ_FIRST(&ctl_softc->done_queue)) {
12891 
12892 		STAILQ_REMOVE_HEAD(&ctl_softc->done_queue, links);
12893 
12894 		ctl_process_done(xio, /*have_lock*/ 1);
12895 	}
12896 	if (have_lock == 0)
12897 		mtx_unlock(&ctl_softc->ctl_lock);
12898 #endif /* CTL_DONE_THREAD */
12899 }
12900 
12901 void
12902 ctl_done(union ctl_io *io)
12903 {
12904 	ctl_done_lock(io, /*have_lock*/ 0);
12905 }
12906 
12907 int
12908 ctl_isc(struct ctl_scsiio *ctsio)
12909 {
12910 	struct ctl_lun *lun;
12911 	int retval;
12912 
12913 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
12914 
12915 	CTL_DEBUG_PRINT(("ctl_isc: command: %02x\n", ctsio->cdb[0]));
12916 
12917 	CTL_DEBUG_PRINT(("ctl_isc: calling data_submit()\n"));
12918 
12919 	retval = lun->backend->data_submit((union ctl_io *)ctsio);
12920 
12921 	return (retval);
12922 }
12923 
12924 
12925 static void
12926 ctl_work_thread(void *arg)
12927 {
12928 	struct ctl_softc *softc;
12929 	union ctl_io *io;
12930 	struct ctl_be_lun *be_lun;
12931 	int retval;
12932 
12933 	CTL_DEBUG_PRINT(("ctl_work_thread starting\n"));
12934 
12935 	softc = (struct ctl_softc *)arg;
12936 	if (softc == NULL)
12937 		return;
12938 
12939 	mtx_lock(&softc->ctl_lock);
12940 	for (;;) {
12941 		retval = 0;
12942 
12943 		/*
12944 		 * We handle the queues in this order:
12945 		 * - task management
12946 		 * - ISC
12947 		 * - done queue (to free up resources, unblock other commands)
12948 		 * - RtR queue
12949 		 * - incoming queue
12950 		 *
12951 		 * If those queues are empty, we break out of the loop and
12952 		 * go to sleep.
12953 		 */
12954 		io = (union ctl_io *)STAILQ_FIRST(&softc->task_queue);
12955 		if (io != NULL) {
12956 			ctl_run_task_queue(softc);
12957 			continue;
12958 		}
12959 		io = (union ctl_io *)STAILQ_FIRST(&softc->isc_queue);
12960 		if (io != NULL) {
12961 			STAILQ_REMOVE_HEAD(&softc->isc_queue, links);
12962 			ctl_handle_isc(io);
12963 			continue;
12964 		}
12965 		io = (union ctl_io *)STAILQ_FIRST(&softc->done_queue);
12966 		if (io != NULL) {
12967 			STAILQ_REMOVE_HEAD(&softc->done_queue, links);
12968 			/* clear any blocked commands, call fe_done */
12969 			mtx_unlock(&softc->ctl_lock);
12970 			/*
12971 			 * XXX KDM
12972 			 * Call this without a lock for now.  This will
12973 			 * depend on whether there is any way the FETD can
12974 			 * sleep or deadlock if called with the CTL lock
12975 			 * held.
12976 			 */
12977 			retval = ctl_process_done(io, /*have_lock*/ 0);
12978 			mtx_lock(&softc->ctl_lock);
12979 			continue;
12980 		}
12981 		if (!ctl_pause_rtr) {
12982 			io = (union ctl_io *)STAILQ_FIRST(&softc->rtr_queue);
12983 			if (io != NULL) {
12984 				STAILQ_REMOVE_HEAD(&softc->rtr_queue, links);
12985 				mtx_unlock(&softc->ctl_lock);
12986 				goto execute;
12987 			}
12988 		}
12989 		io = (union ctl_io *)STAILQ_FIRST(&softc->incoming_queue);
12990 		if (io != NULL) {
12991 			STAILQ_REMOVE_HEAD(&softc->incoming_queue, links);
12992 			mtx_unlock(&softc->ctl_lock);
12993 			ctl_scsiio_precheck(softc, &io->scsiio);
12994 			mtx_lock(&softc->ctl_lock);
12995 			continue;
12996 		}
12997 		/*
12998 		 * We might want to move this to a separate thread, so that
12999 		 * configuration requests (in this case LUN creations)
13000 		 * won't impact the I/O path.
13001 		 */
13002 		be_lun = STAILQ_FIRST(&softc->pending_lun_queue);
13003 		if (be_lun != NULL) {
13004 			STAILQ_REMOVE_HEAD(&softc->pending_lun_queue, links);
13005 			mtx_unlock(&softc->ctl_lock);
13006 			ctl_create_lun(be_lun);
13007 			mtx_lock(&softc->ctl_lock);
13008 			continue;
13009 		}
13010 
13011 		/* XXX KDM use the PDROP flag?? */
13012 		/* Sleep until we have something to do. */
13013 		mtx_sleep(softc, &softc->ctl_lock, PRIBIO, "ctl_work", 0);
13014 
13015 		/* Back to the top of the loop to see what woke us up. */
13016 		continue;
13017 
13018 execute:
13019 		retval = ctl_scsiio(&io->scsiio);
13020 		switch (retval) {
13021 		case CTL_RETVAL_COMPLETE:
13022 			break;
13023 		default:
13024 			/*
13025 			 * Probably need to make sure this doesn't happen.
13026 			 */
13027 			break;
13028 		}
13029 		mtx_lock(&softc->ctl_lock);
13030 	}
13031 }
13032 
13033 void
13034 ctl_wakeup_thread()
13035 {
13036 	struct ctl_softc *softc;
13037 
13038 	softc = control_softc;
13039 
13040 	wakeup(softc);
13041 }
13042 
13043 /* Initialization and failover */
13044 
13045 void
13046 ctl_init_isc_msg(void)
13047 {
13048 	printf("CTL: Still calling this thing\n");
13049 }
13050 
13051 /*
13052  * Init component
13053  * 	Initializes component into configuration defined by bootMode
13054  *	(see hasc-sv.c)
13055  *  	returns hasc_Status:
13056  * 		OK
13057  *		ERROR - fatal error
13058  */
13059 static ctl_ha_comp_status
13060 ctl_isc_init(struct ctl_ha_component *c)
13061 {
13062 	ctl_ha_comp_status ret = CTL_HA_COMP_STATUS_OK;
13063 
13064 	c->status = ret;
13065 	return ret;
13066 }
13067 
13068 /* Start component
13069  * 	Starts component in state requested. If component starts successfully,
13070  *	it must set its own state to the requestrd state
13071  *	When requested state is HASC_STATE_HA, the component may refine it
13072  * 	by adding _SLAVE or _MASTER flags.
13073  *	Currently allowed state transitions are:
13074  *	UNKNOWN->HA		- initial startup
13075  *	UNKNOWN->SINGLE - initial startup when no parter detected
13076  *	HA->SINGLE		- failover
13077  * returns ctl_ha_comp_status:
13078  * 		OK	- component successfully started in requested state
13079  *		FAILED  - could not start the requested state, failover may
13080  * 			  be possible
13081  *		ERROR	- fatal error detected, no future startup possible
13082  */
13083 static ctl_ha_comp_status
13084 ctl_isc_start(struct ctl_ha_component *c, ctl_ha_state state)
13085 {
13086 	ctl_ha_comp_status ret = CTL_HA_COMP_STATUS_OK;
13087 
13088 	// UNKNOWN->HA or UNKNOWN->SINGLE (bootstrap)
13089 	if (c->state == CTL_HA_STATE_UNKNOWN ) {
13090 		ctl_is_single = 0;
13091 		if (ctl_ha_msg_create(CTL_HA_CHAN_CTL, ctl_isc_event_handler)
13092 		    != CTL_HA_STATUS_SUCCESS) {
13093 			printf("ctl_isc_start: ctl_ha_msg_create failed.\n");
13094 			ret = CTL_HA_COMP_STATUS_ERROR;
13095 		}
13096 	} else if (CTL_HA_STATE_IS_HA(c->state)
13097 		&& CTL_HA_STATE_IS_SINGLE(state)){
13098 		// HA->SINGLE transition
13099 	        ctl_failover();
13100 		ctl_is_single = 1;
13101 	} else {
13102 		printf("ctl_isc_start:Invalid state transition %X->%X\n",
13103 		       c->state, state);
13104 		ret = CTL_HA_COMP_STATUS_ERROR;
13105 	}
13106 	if (CTL_HA_STATE_IS_SINGLE(state))
13107 		ctl_is_single = 1;
13108 
13109 	c->state = state;
13110 	c->status = ret;
13111 	return ret;
13112 }
13113 
13114 /*
13115  * Quiesce component
13116  * The component must clear any error conditions (set status to OK) and
13117  * prepare itself to another Start call
13118  * returns ctl_ha_comp_status:
13119  * 	OK
13120  *	ERROR
13121  */
13122 static ctl_ha_comp_status
13123 ctl_isc_quiesce(struct ctl_ha_component *c)
13124 {
13125 	int ret = CTL_HA_COMP_STATUS_OK;
13126 
13127 	ctl_pause_rtr = 1;
13128 	c->status = ret;
13129 	return ret;
13130 }
13131 
13132 struct ctl_ha_component ctl_ha_component_ctlisc =
13133 {
13134 	.name = "CTL ISC",
13135 	.state = CTL_HA_STATE_UNKNOWN,
13136 	.init = ctl_isc_init,
13137 	.start = ctl_isc_start,
13138 	.quiesce = ctl_isc_quiesce
13139 };
13140 
13141 /*
13142  *  vim: ts=8
13143  */
13144