xref: /freebsd/sys/cam/ctl/ctl_tpc.c (revision a7c09f5c127c481d29d1899cf83157a460d40d64)
1 /*-
2  * Copyright (c) 2014 Alexander Motin <mav@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer,
10  *    without modification, immediately at the beginning of the file.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/types.h>
34 #include <sys/lock.h>
35 #include <sys/module.h>
36 #include <sys/mutex.h>
37 #include <sys/condvar.h>
38 #include <sys/malloc.h>
39 #include <sys/conf.h>
40 #include <sys/queue.h>
41 #include <sys/sysctl.h>
42 #include <machine/atomic.h>
43 
44 #include <cam/cam.h>
45 #include <cam/scsi/scsi_all.h>
46 #include <cam/scsi/scsi_da.h>
47 #include <cam/ctl/ctl_io.h>
48 #include <cam/ctl/ctl.h>
49 #include <cam/ctl/ctl_frontend.h>
50 #include <cam/ctl/ctl_frontend_internal.h>
51 #include <cam/ctl/ctl_util.h>
52 #include <cam/ctl/ctl_backend.h>
53 #include <cam/ctl/ctl_ioctl.h>
54 #include <cam/ctl/ctl_ha.h>
55 #include <cam/ctl/ctl_private.h>
56 #include <cam/ctl/ctl_debug.h>
57 #include <cam/ctl/ctl_scsi_all.h>
58 #include <cam/ctl/ctl_tpc.h>
59 #include <cam/ctl/ctl_error.h>
60 
61 #define	TPC_MAX_CSCDS	64
62 #define	TPC_MAX_SEGS	64
63 #define	TPC_MAX_SEG	0
64 #define	TPC_MAX_LIST	8192
65 #define	TPC_MAX_INLINE	0
66 #define	TPC_MAX_LISTS	255
67 #define	TPC_MAX_IO_SIZE	(1024 * 1024)
68 
69 MALLOC_DEFINE(M_CTL_TPC, "ctltpc", "CTL TPC");
70 
71 typedef enum {
72 	TPC_ERR_RETRY		= 0x000,
73 	TPC_ERR_FAIL		= 0x001,
74 	TPC_ERR_MASK		= 0x0ff,
75 	TPC_ERR_NO_DECREMENT	= 0x100
76 } tpc_error_action;
77 
78 struct tpc_list;
79 TAILQ_HEAD(runl, tpc_io);
80 struct tpc_io {
81 	union ctl_io		*io;
82 	uint64_t		 lun;
83 	struct tpc_list		*list;
84 	struct runl		 run;
85 	TAILQ_ENTRY(tpc_io)	 rlinks;
86 	TAILQ_ENTRY(tpc_io)	 links;
87 };
88 
89 struct tpc_list {
90 	uint8_t			 service_action;
91 	int			 init_port;
92 	uint32_t		 init_idx;
93 	uint32_t		 list_id;
94 	uint8_t			 flags;
95 	uint8_t			*params;
96 	struct scsi_ec_cscd	*cscd;
97 	struct scsi_ec_segment	*seg[TPC_MAX_SEGS];
98 	uint8_t			*inl;
99 	int			 ncscd;
100 	int			 nseg;
101 	int			 leninl;
102 	int			 curseg;
103 	off_t			 curbytes;
104 	int			 curops;
105 	int			 stage;
106 	uint8_t			*buf;
107 	int			 segbytes;
108 	int			 tbdio;
109 	int			 error;
110 	int			 abort;
111 	int			 completed;
112 	TAILQ_HEAD(, tpc_io)	 allio;
113 	struct scsi_sense_data	 sense_data;
114 	uint8_t			 sense_len;
115 	uint8_t			 scsi_status;
116 	struct ctl_scsiio	*ctsio;
117 	struct ctl_lun		*lun;
118 	TAILQ_ENTRY(tpc_list)	 links;
119 };
120 
121 void
122 ctl_tpc_init(struct ctl_lun *lun)
123 {
124 
125 	TAILQ_INIT(&lun->tpc_lists);
126 }
127 
128 void
129 ctl_tpc_shutdown(struct ctl_lun *lun)
130 {
131 	struct tpc_list *list;
132 
133 	while ((list = TAILQ_FIRST(&lun->tpc_lists)) != NULL) {
134 		TAILQ_REMOVE(&lun->tpc_lists, list, links);
135 		KASSERT(list->completed,
136 		    ("Not completed TPC (%p) on shutdown", list));
137 		free(list, M_CTL);
138 	}
139 }
140 
141 int
142 ctl_inquiry_evpd_tpc(struct ctl_scsiio *ctsio, int alloc_len)
143 {
144 	struct scsi_vpd_tpc *tpc_ptr;
145 	struct scsi_vpd_tpc_descriptor *d_ptr;
146 	struct scsi_vpd_tpc_descriptor_sc *sc_ptr;
147 	struct scsi_vpd_tpc_descriptor_sc_descr *scd_ptr;
148 	struct scsi_vpd_tpc_descriptor_pd *pd_ptr;
149 	struct scsi_vpd_tpc_descriptor_sd *sd_ptr;
150 	struct scsi_vpd_tpc_descriptor_sdid *sdid_ptr;
151 	struct scsi_vpd_tpc_descriptor_gco *gco_ptr;
152 	struct ctl_lun *lun;
153 	int data_len;
154 
155 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
156 
157 	data_len = sizeof(struct scsi_vpd_tpc) +
158 	    roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sc) +
159 	     2 * sizeof(struct scsi_vpd_tpc_descriptor_sc_descr) + 7, 4) +
160 	    sizeof(struct scsi_vpd_tpc_descriptor_pd) +
161 	    roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sd) + 4, 4) +
162 	    roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sdid) + 2, 4) +
163 	    sizeof(struct scsi_vpd_tpc_descriptor_gco);
164 
165 	ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
166 	tpc_ptr = (struct scsi_vpd_tpc *)ctsio->kern_data_ptr;
167 	ctsio->kern_sg_entries = 0;
168 
169 	if (data_len < alloc_len) {
170 		ctsio->residual = alloc_len - data_len;
171 		ctsio->kern_data_len = data_len;
172 		ctsio->kern_total_len = data_len;
173 	} else {
174 		ctsio->residual = 0;
175 		ctsio->kern_data_len = alloc_len;
176 		ctsio->kern_total_len = alloc_len;
177 	}
178 	ctsio->kern_data_resid = 0;
179 	ctsio->kern_rel_offset = 0;
180 	ctsio->kern_sg_entries = 0;
181 
182 	/*
183 	 * The control device is always connected.  The disk device, on the
184 	 * other hand, may not be online all the time.
185 	 */
186 	if (lun != NULL)
187 		tpc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
188 				     lun->be_lun->lun_type;
189 	else
190 		tpc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
191 	tpc_ptr->page_code = SVPD_SCSI_TPC;
192 	scsi_ulto2b(data_len - 4, tpc_ptr->page_length);
193 
194 	/* Supported commands */
195 	d_ptr = (struct scsi_vpd_tpc_descriptor *)&tpc_ptr->descr[0];
196 	sc_ptr = (struct scsi_vpd_tpc_descriptor_sc *)d_ptr;
197 	scsi_ulto2b(SVPD_TPC_SC, sc_ptr->desc_type);
198 	sc_ptr->list_length = 2 * sizeof(*scd_ptr) + 7;
199 	scsi_ulto2b(roundup2(1 + sc_ptr->list_length, 4), sc_ptr->desc_length);
200 	scd_ptr = &sc_ptr->descr[0];
201 	scd_ptr->opcode = EXTENDED_COPY;
202 	scd_ptr->sa_length = 3;
203 	scd_ptr->supported_service_actions[0] = EC_EC_LID1;
204 	scd_ptr->supported_service_actions[1] = EC_EC_LID4;
205 	scd_ptr->supported_service_actions[2] = EC_COA;
206 	scd_ptr = (struct scsi_vpd_tpc_descriptor_sc_descr *)
207 	    &scd_ptr->supported_service_actions[scd_ptr->sa_length];
208 	scd_ptr->opcode = RECEIVE_COPY_STATUS;
209 	scd_ptr->sa_length = 4;
210 	scd_ptr->supported_service_actions[0] = RCS_RCS_LID1;
211 	scd_ptr->supported_service_actions[1] = RCS_RCFD;
212 	scd_ptr->supported_service_actions[2] = RCS_RCS_LID4;
213 	scd_ptr->supported_service_actions[3] = RCS_RCOP;
214 
215 	/* Parameter data. */
216 	d_ptr = (struct scsi_vpd_tpc_descriptor *)
217 	    (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
218 	pd_ptr = (struct scsi_vpd_tpc_descriptor_pd *)d_ptr;
219 	scsi_ulto2b(SVPD_TPC_PD, pd_ptr->desc_type);
220 	scsi_ulto2b(sizeof(*pd_ptr) - 4, pd_ptr->desc_length);
221 	scsi_ulto2b(TPC_MAX_CSCDS, pd_ptr->maximum_cscd_descriptor_count);
222 	scsi_ulto2b(TPC_MAX_SEGS, pd_ptr->maximum_segment_descriptor_count);
223 	scsi_ulto4b(TPC_MAX_LIST, pd_ptr->maximum_descriptor_list_length);
224 	scsi_ulto4b(TPC_MAX_INLINE, pd_ptr->maximum_inline_data_length);
225 
226 	/* Supported Descriptors */
227 	d_ptr = (struct scsi_vpd_tpc_descriptor *)
228 	    (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
229 	sd_ptr = (struct scsi_vpd_tpc_descriptor_sd *)d_ptr;
230 	scsi_ulto2b(SVPD_TPC_SD, sd_ptr->desc_type);
231 	scsi_ulto2b(roundup2(sizeof(*sd_ptr) - 4 + 4, 4), sd_ptr->desc_length);
232 	sd_ptr->list_length = 4;
233 	sd_ptr->supported_descriptor_codes[0] = EC_SEG_B2B;
234 	sd_ptr->supported_descriptor_codes[1] = EC_SEG_VERIFY;
235 	sd_ptr->supported_descriptor_codes[2] = EC_SEG_REGISTER_KEY;
236 	sd_ptr->supported_descriptor_codes[3] = EC_CSCD_ID;
237 
238 	/* Supported CSCD Descriptor IDs */
239 	d_ptr = (struct scsi_vpd_tpc_descriptor *)
240 	    (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
241 	sdid_ptr = (struct scsi_vpd_tpc_descriptor_sdid *)d_ptr;
242 	scsi_ulto2b(SVPD_TPC_SDID, sdid_ptr->desc_type);
243 	scsi_ulto2b(roundup2(sizeof(*sdid_ptr) - 4 + 2, 4), sdid_ptr->desc_length);
244 	scsi_ulto2b(2, sdid_ptr->list_length);
245 	scsi_ulto2b(0xffff, &sdid_ptr->supported_descriptor_ids[0]);
246 
247 	/* General Copy Operations */
248 	d_ptr = (struct scsi_vpd_tpc_descriptor *)
249 	    (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
250 	gco_ptr = (struct scsi_vpd_tpc_descriptor_gco *)d_ptr;
251 	scsi_ulto2b(SVPD_TPC_GCO, gco_ptr->desc_type);
252 	scsi_ulto2b(sizeof(*gco_ptr) - 4, gco_ptr->desc_length);
253 	scsi_ulto4b(TPC_MAX_LISTS, gco_ptr->total_concurrent_copies);
254 	scsi_ulto4b(TPC_MAX_LISTS, gco_ptr->maximum_identified_concurrent_copies);
255 	scsi_ulto4b(TPC_MAX_SEG, gco_ptr->maximum_segment_length);
256 	gco_ptr->data_segment_granularity = 0;
257 	gco_ptr->inline_data_granularity = 0;
258 
259 	ctsio->scsi_status = SCSI_STATUS_OK;
260 	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
261 	ctsio->be_move_done = ctl_config_move_done;
262 	ctl_datamove((union ctl_io *)ctsio);
263 
264 	return (CTL_RETVAL_COMPLETE);
265 }
266 
267 int
268 ctl_receive_copy_operating_parameters(struct ctl_scsiio *ctsio)
269 {
270 	struct ctl_lun *lun;
271 	struct scsi_receive_copy_operating_parameters *cdb;
272 	struct scsi_receive_copy_operating_parameters_data *data;
273 	int retval;
274 	int alloc_len, total_len;
275 
276 	CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n"));
277 
278 	cdb = (struct scsi_receive_copy_operating_parameters *)ctsio->cdb;
279 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
280 
281 	retval = CTL_RETVAL_COMPLETE;
282 
283 	total_len = sizeof(*data) + 4;
284 	alloc_len = scsi_4btoul(cdb->length);
285 
286 	ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
287 
288 	ctsio->kern_sg_entries = 0;
289 
290 	if (total_len < alloc_len) {
291 		ctsio->residual = alloc_len - total_len;
292 		ctsio->kern_data_len = total_len;
293 		ctsio->kern_total_len = total_len;
294 	} else {
295 		ctsio->residual = 0;
296 		ctsio->kern_data_len = alloc_len;
297 		ctsio->kern_total_len = alloc_len;
298 	}
299 	ctsio->kern_data_resid = 0;
300 	ctsio->kern_rel_offset = 0;
301 
302 	data = (struct scsi_receive_copy_operating_parameters_data *)ctsio->kern_data_ptr;
303 	scsi_ulto4b(sizeof(*data) - 4 + 4, data->length);
304 	data->snlid = RCOP_SNLID;
305 	scsi_ulto2b(TPC_MAX_CSCDS, data->maximum_cscd_descriptor_count);
306 	scsi_ulto2b(TPC_MAX_SEGS, data->maximum_segment_descriptor_count);
307 	scsi_ulto4b(TPC_MAX_LIST, data->maximum_descriptor_list_length);
308 	scsi_ulto4b(TPC_MAX_SEG, data->maximum_segment_length);
309 	scsi_ulto4b(TPC_MAX_INLINE, data->maximum_inline_data_length);
310 	scsi_ulto4b(0, data->held_data_limit);
311 	scsi_ulto4b(0, data->maximum_stream_device_transfer_size);
312 	scsi_ulto2b(TPC_MAX_LISTS, data->total_concurrent_copies);
313 	data->maximum_concurrent_copies = TPC_MAX_LISTS;
314 	data->data_segment_granularity = 0;
315 	data->inline_data_granularity = 0;
316 	data->held_data_granularity = 0;
317 	data->implemented_descriptor_list_length = 4;
318 	data->list_of_implemented_descriptor_type_codes[0] = EC_SEG_B2B;
319 	data->list_of_implemented_descriptor_type_codes[1] = EC_SEG_VERIFY;
320 	data->list_of_implemented_descriptor_type_codes[2] = EC_SEG_REGISTER_KEY;
321 	data->list_of_implemented_descriptor_type_codes[3] = EC_CSCD_ID;
322 
323 	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
324 	ctsio->be_move_done = ctl_config_move_done;
325 
326 	ctl_datamove((union ctl_io *)ctsio);
327 	return (retval);
328 }
329 
330 static struct tpc_list *
331 tpc_find_list(struct ctl_lun *lun, uint32_t list_id, uint32_t init_idx)
332 {
333 	struct tpc_list *list;
334 
335 	mtx_assert(&lun->lun_lock, MA_OWNED);
336 	TAILQ_FOREACH(list, &lun->tpc_lists, links) {
337 		if ((list->flags & EC_LIST_ID_USAGE_MASK) !=
338 		     EC_LIST_ID_USAGE_NONE && list->list_id == list_id &&
339 		    list->init_idx == init_idx)
340 			break;
341 	}
342 	return (list);
343 }
344 
345 int
346 ctl_receive_copy_status_lid1(struct ctl_scsiio *ctsio)
347 {
348 	struct ctl_lun *lun;
349 	struct scsi_receive_copy_status_lid1 *cdb;
350 	struct scsi_receive_copy_status_lid1_data *data;
351 	struct tpc_list *list;
352 	struct tpc_list list_copy;
353 	int retval;
354 	int alloc_len, total_len;
355 	uint32_t list_id;
356 
357 	CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid1\n"));
358 
359 	cdb = (struct scsi_receive_copy_status_lid1 *)ctsio->cdb;
360 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
361 
362 	retval = CTL_RETVAL_COMPLETE;
363 
364 	list_id = cdb->list_identifier;
365 	mtx_lock(&lun->lun_lock);
366 	list = tpc_find_list(lun, list_id,
367 	    ctl_get_resindex(&ctsio->io_hdr.nexus));
368 	if (list == NULL) {
369 		mtx_unlock(&lun->lun_lock);
370 		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
371 		    /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
372 		    /*bit*/ 0);
373 		ctl_done((union ctl_io *)ctsio);
374 		return (retval);
375 	}
376 	list_copy = *list;
377 	if (list->completed) {
378 		TAILQ_REMOVE(&lun->tpc_lists, list, links);
379 		free(list, M_CTL);
380 	}
381 	mtx_unlock(&lun->lun_lock);
382 
383 	total_len = sizeof(*data);
384 	alloc_len = scsi_4btoul(cdb->length);
385 
386 	ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
387 
388 	ctsio->kern_sg_entries = 0;
389 
390 	if (total_len < alloc_len) {
391 		ctsio->residual = alloc_len - total_len;
392 		ctsio->kern_data_len = total_len;
393 		ctsio->kern_total_len = total_len;
394 	} else {
395 		ctsio->residual = 0;
396 		ctsio->kern_data_len = alloc_len;
397 		ctsio->kern_total_len = alloc_len;
398 	}
399 	ctsio->kern_data_resid = 0;
400 	ctsio->kern_rel_offset = 0;
401 
402 	data = (struct scsi_receive_copy_status_lid1_data *)ctsio->kern_data_ptr;
403 	scsi_ulto4b(sizeof(*data) - 4, data->available_data);
404 	if (list_copy.completed) {
405 		if (list_copy.error || list_copy.abort)
406 			data->copy_command_status = RCS_CCS_ERROR;
407 		else
408 			data->copy_command_status = RCS_CCS_COMPLETED;
409 	} else
410 		data->copy_command_status = RCS_CCS_INPROG;
411 	scsi_ulto2b(list_copy.curseg, data->segments_processed);
412 	if (list_copy.curbytes <= UINT32_MAX) {
413 		data->transfer_count_units = RCS_TC_BYTES;
414 		scsi_ulto4b(list_copy.curbytes, data->transfer_count);
415 	} else {
416 		data->transfer_count_units = RCS_TC_MBYTES;
417 		scsi_ulto4b(list_copy.curbytes >> 20, data->transfer_count);
418 	}
419 
420 	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
421 	ctsio->be_move_done = ctl_config_move_done;
422 
423 	ctl_datamove((union ctl_io *)ctsio);
424 	return (retval);
425 }
426 
427 int
428 ctl_receive_copy_failure_details(struct ctl_scsiio *ctsio)
429 {
430 	struct ctl_lun *lun;
431 	struct scsi_receive_copy_failure_details *cdb;
432 	struct scsi_receive_copy_failure_details_data *data;
433 	struct tpc_list *list;
434 	struct tpc_list list_copy;
435 	int retval;
436 	int alloc_len, total_len;
437 	uint32_t list_id;
438 
439 	CTL_DEBUG_PRINT(("ctl_receive_copy_failure_details\n"));
440 
441 	cdb = (struct scsi_receive_copy_failure_details *)ctsio->cdb;
442 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
443 
444 	retval = CTL_RETVAL_COMPLETE;
445 
446 	list_id = cdb->list_identifier;
447 	mtx_lock(&lun->lun_lock);
448 	list = tpc_find_list(lun, list_id,
449 	    ctl_get_resindex(&ctsio->io_hdr.nexus));
450 	if (list == NULL || !list->completed) {
451 		mtx_unlock(&lun->lun_lock);
452 		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
453 		    /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
454 		    /*bit*/ 0);
455 		ctl_done((union ctl_io *)ctsio);
456 		return (retval);
457 	}
458 	list_copy = *list;
459 	TAILQ_REMOVE(&lun->tpc_lists, list, links);
460 	free(list, M_CTL);
461 	mtx_unlock(&lun->lun_lock);
462 
463 	total_len = sizeof(*data) + list_copy.sense_len;
464 	alloc_len = scsi_4btoul(cdb->length);
465 
466 	ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
467 
468 	ctsio->kern_sg_entries = 0;
469 
470 	if (total_len < alloc_len) {
471 		ctsio->residual = alloc_len - total_len;
472 		ctsio->kern_data_len = total_len;
473 		ctsio->kern_total_len = total_len;
474 	} else {
475 		ctsio->residual = 0;
476 		ctsio->kern_data_len = alloc_len;
477 		ctsio->kern_total_len = alloc_len;
478 	}
479 	ctsio->kern_data_resid = 0;
480 	ctsio->kern_rel_offset = 0;
481 
482 	data = (struct scsi_receive_copy_failure_details_data *)ctsio->kern_data_ptr;
483 	if (list_copy.completed && (list_copy.error || list_copy.abort)) {
484 		scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len,
485 		    data->available_data);
486 		data->copy_command_status = RCS_CCS_ERROR;
487 	} else
488 		scsi_ulto4b(0, data->available_data);
489 	scsi_ulto2b(list_copy.sense_len, data->sense_data_length);
490 	memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len);
491 
492 	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
493 	ctsio->be_move_done = ctl_config_move_done;
494 
495 	ctl_datamove((union ctl_io *)ctsio);
496 	return (retval);
497 }
498 
499 int
500 ctl_receive_copy_status_lid4(struct ctl_scsiio *ctsio)
501 {
502 	struct ctl_lun *lun;
503 	struct scsi_receive_copy_status_lid4 *cdb;
504 	struct scsi_receive_copy_status_lid4_data *data;
505 	struct tpc_list *list;
506 	struct tpc_list list_copy;
507 	int retval;
508 	int alloc_len, total_len;
509 	uint32_t list_id;
510 
511 	CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid4\n"));
512 
513 	cdb = (struct scsi_receive_copy_status_lid4 *)ctsio->cdb;
514 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
515 
516 	retval = CTL_RETVAL_COMPLETE;
517 
518 	list_id = scsi_4btoul(cdb->list_identifier);
519 	mtx_lock(&lun->lun_lock);
520 	list = tpc_find_list(lun, list_id,
521 	    ctl_get_resindex(&ctsio->io_hdr.nexus));
522 	if (list == NULL) {
523 		mtx_unlock(&lun->lun_lock);
524 		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
525 		    /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
526 		    /*bit*/ 0);
527 		ctl_done((union ctl_io *)ctsio);
528 		return (retval);
529 	}
530 	list_copy = *list;
531 	if (list->completed) {
532 		TAILQ_REMOVE(&lun->tpc_lists, list, links);
533 		free(list, M_CTL);
534 	}
535 	mtx_unlock(&lun->lun_lock);
536 
537 	total_len = sizeof(*data) + list_copy.sense_len;
538 	alloc_len = scsi_4btoul(cdb->length);
539 
540 	ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
541 
542 	ctsio->kern_sg_entries = 0;
543 
544 	if (total_len < alloc_len) {
545 		ctsio->residual = alloc_len - total_len;
546 		ctsio->kern_data_len = total_len;
547 		ctsio->kern_total_len = total_len;
548 	} else {
549 		ctsio->residual = 0;
550 		ctsio->kern_data_len = alloc_len;
551 		ctsio->kern_total_len = alloc_len;
552 	}
553 	ctsio->kern_data_resid = 0;
554 	ctsio->kern_rel_offset = 0;
555 
556 	data = (struct scsi_receive_copy_status_lid4_data *)ctsio->kern_data_ptr;
557 	scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len,
558 	    data->available_data);
559 	data->response_to_service_action = list_copy.service_action;
560 	if (list_copy.completed) {
561 		if (list_copy.error)
562 			data->copy_command_status = RCS_CCS_ERROR;
563 		else if (list_copy.abort)
564 			data->copy_command_status = RCS_CCS_ABORTED;
565 		else
566 			data->copy_command_status = RCS_CCS_COMPLETED;
567 	} else
568 		data->copy_command_status = RCS_CCS_INPROG_FG;
569 	scsi_ulto2b(list_copy.curops, data->operation_counter);
570 	scsi_ulto4b(UINT32_MAX, data->estimated_status_update_delay);
571 	data->transfer_count_units = RCS_TC_BYTES;
572 	scsi_u64to8b(list_copy.curbytes, data->transfer_count);
573 	scsi_ulto2b(list_copy.curseg, data->segments_processed);
574 	data->length_of_the_sense_data_field = list_copy.sense_len;
575 	data->sense_data_length = list_copy.sense_len;
576 	memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len);
577 
578 	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
579 	ctsio->be_move_done = ctl_config_move_done;
580 
581 	ctl_datamove((union ctl_io *)ctsio);
582 	return (retval);
583 }
584 
585 int
586 ctl_copy_operation_abort(struct ctl_scsiio *ctsio)
587 {
588 	struct ctl_lun *lun;
589 	struct scsi_copy_operation_abort *cdb;
590 	struct tpc_list *list;
591 	int retval;
592 	uint32_t list_id;
593 
594 	CTL_DEBUG_PRINT(("ctl_copy_operation_abort\n"));
595 
596 	cdb = (struct scsi_copy_operation_abort *)ctsio->cdb;
597 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
598 
599 	retval = CTL_RETVAL_COMPLETE;
600 
601 	list_id = scsi_4btoul(cdb->list_identifier);
602 	mtx_lock(&lun->lun_lock);
603 	list = tpc_find_list(lun, list_id,
604 	    ctl_get_resindex(&ctsio->io_hdr.nexus));
605 	if (list == NULL) {
606 		mtx_unlock(&lun->lun_lock);
607 		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
608 		    /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
609 		    /*bit*/ 0);
610 		ctl_done((union ctl_io *)ctsio);
611 		return (retval);
612 	}
613 	list->abort = 1;
614 	mtx_unlock(&lun->lun_lock);
615 
616 	ctl_set_success(ctsio);
617 	ctl_done((union ctl_io *)ctsio);
618 	return (retval);
619 }
620 
621 static uint64_t
622 tpc_resolve(struct tpc_list *list, uint16_t idx, uint32_t *ss)
623 {
624 
625 	if (idx == 0xffff) {
626 		if (ss && list->lun->be_lun)
627 			*ss = list->lun->be_lun->blocksize;
628 		return (list->lun->lun);
629 	}
630 	if (idx >= list->ncscd)
631 		return (UINT64_MAX);
632 	return (tpcl_resolve(list->init_port, &list->cscd[idx], ss));
633 }
634 
635 static int
636 tpc_process_b2b(struct tpc_list *list)
637 {
638 	struct scsi_ec_segment_b2b *seg;
639 	struct scsi_ec_cscd_dtsp *sdstp, *ddstp;
640 	struct tpc_io *tior, *tiow;
641 	struct runl run, *prun;
642 	uint64_t sl, dl;
643 	off_t srclba, dstlba, numbytes, donebytes, roundbytes;
644 	int numlba;
645 	uint32_t srcblock, dstblock;
646 
647 	if (list->stage == 1) {
648 complete:
649 		while ((tior = TAILQ_FIRST(&list->allio)) != NULL) {
650 			TAILQ_REMOVE(&list->allio, tior, links);
651 			ctl_free_io(tior->io);
652 			free(tior, M_CTL);
653 		}
654 		free(list->buf, M_CTL);
655 		if (list->abort) {
656 			ctl_set_task_aborted(list->ctsio);
657 			return (CTL_RETVAL_ERROR);
658 		} else if (list->error) {
659 			ctl_set_sense(list->ctsio, /*current_error*/ 1,
660 			    /*sense_key*/ SSD_KEY_COPY_ABORTED,
661 			    /*asc*/ 0x0d, /*ascq*/ 0x01, SSD_ELEM_NONE);
662 			return (CTL_RETVAL_ERROR);
663 		} else {
664 			list->curbytes += list->segbytes;
665 			return (CTL_RETVAL_COMPLETE);
666 		}
667 	}
668 
669 	TAILQ_INIT(&list->allio);
670 	seg = (struct scsi_ec_segment_b2b *)list->seg[list->curseg];
671 	sl = tpc_resolve(list, scsi_2btoul(seg->src_cscd), &srcblock);
672 	dl = tpc_resolve(list, scsi_2btoul(seg->dst_cscd), &dstblock);
673 	if (sl >= CTL_MAX_LUNS || dl >= CTL_MAX_LUNS) {
674 		ctl_set_sense(list->ctsio, /*current_error*/ 1,
675 		    /*sense_key*/ SSD_KEY_COPY_ABORTED,
676 		    /*asc*/ 0x08, /*ascq*/ 0x04, SSD_ELEM_NONE);
677 		return (CTL_RETVAL_ERROR);
678 	}
679 	sdstp = &list->cscd[scsi_2btoul(seg->src_cscd)].dtsp;
680 	if (scsi_3btoul(sdstp->block_length) != 0)
681 		srcblock = scsi_3btoul(sdstp->block_length);
682 	ddstp = &list->cscd[scsi_2btoul(seg->dst_cscd)].dtsp;
683 	if (scsi_3btoul(ddstp->block_length) != 0)
684 		dstblock = scsi_3btoul(ddstp->block_length);
685 	numlba = scsi_2btoul(seg->number_of_blocks);
686 	if (seg->flags & EC_SEG_DC)
687 		numbytes = (off_t)numlba * dstblock;
688 	else
689 		numbytes = (off_t)numlba * srcblock;
690 	srclba = scsi_8btou64(seg->src_lba);
691 	dstlba = scsi_8btou64(seg->dst_lba);
692 
693 //	printf("Copy %ju bytes from %ju @ %ju to %ju @ %ju\n",
694 //	    (uintmax_t)numbytes, sl, scsi_8btou64(seg->src_lba),
695 //	    dl, scsi_8btou64(seg->dst_lba));
696 
697 	if (numbytes == 0)
698 		return (CTL_RETVAL_COMPLETE);
699 
700 	if (numbytes % srcblock != 0 || numbytes % dstblock != 0) {
701 		ctl_set_sense(list->ctsio, /*current_error*/ 1,
702 		    /*sense_key*/ SSD_KEY_COPY_ABORTED,
703 		    /*asc*/ 0x26, /*ascq*/ 0x0A, SSD_ELEM_NONE);
704 		return (CTL_RETVAL_ERROR);
705 	}
706 
707 	list->buf = malloc(numbytes, M_CTL, M_WAITOK);
708 	list->segbytes = numbytes;
709 	donebytes = 0;
710 	TAILQ_INIT(&run);
711 	prun = &run;
712 	list->tbdio = 1;
713 	while (donebytes < numbytes) {
714 		roundbytes = MIN(numbytes - donebytes, TPC_MAX_IO_SIZE);
715 
716 		tior = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO);
717 		TAILQ_INIT(&tior->run);
718 		tior->list = list;
719 		TAILQ_INSERT_TAIL(&list->allio, tior, links);
720 		tior->io = tpcl_alloc_io();
721 		if (tior->io == NULL) {
722 			list->error = 1;
723 			goto complete;
724 		}
725 		ctl_scsi_read_write(tior->io,
726 				    /*data_ptr*/ &list->buf[donebytes],
727 				    /*data_len*/ roundbytes,
728 				    /*read_op*/ 1,
729 				    /*byte2*/ 0,
730 				    /*minimum_cdb_size*/ 0,
731 				    /*lba*/ srclba + donebytes / srcblock,
732 				    /*num_blocks*/ roundbytes / srcblock,
733 				    /*tag_type*/ CTL_TAG_SIMPLE,
734 				    /*control*/ 0);
735 		tior->io->io_hdr.retries = 3;
736 		tior->lun = sl;
737 		tior->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tior;
738 
739 		tiow = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO);
740 		TAILQ_INIT(&tiow->run);
741 		tiow->list = list;
742 		TAILQ_INSERT_TAIL(&list->allio, tiow, links);
743 		tiow->io = tpcl_alloc_io();
744 		if (tiow->io == NULL) {
745 			list->error = 1;
746 			goto complete;
747 		}
748 		ctl_scsi_read_write(tiow->io,
749 				    /*data_ptr*/ &list->buf[donebytes],
750 				    /*data_len*/ roundbytes,
751 				    /*read_op*/ 0,
752 				    /*byte2*/ 0,
753 				    /*minimum_cdb_size*/ 0,
754 				    /*lba*/ dstlba + donebytes / dstblock,
755 				    /*num_blocks*/ roundbytes / dstblock,
756 				    /*tag_type*/ CTL_TAG_SIMPLE,
757 				    /*control*/ 0);
758 		tiow->io->io_hdr.retries = 3;
759 		tiow->lun = dl;
760 		tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tior;
761 
762 		TAILQ_INSERT_TAIL(&tior->run, tiow, rlinks);
763 		TAILQ_INSERT_TAIL(prun, tior, rlinks);
764 		prun = &tior->run;
765 		donebytes += roundbytes;
766 	}
767 
768 	while ((tior = TAILQ_FIRST(&run)) != NULL) {
769 		TAILQ_REMOVE(&run, tior, rlinks);
770 		if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE)
771 			panic("tpcl_queue() error");
772 	}
773 
774 	list->stage++;
775 	return (CTL_RETVAL_QUEUED);
776 }
777 
778 static int
779 tpc_process_verify(struct tpc_list *list)
780 {
781 	struct scsi_ec_segment_verify *seg;
782 	struct tpc_io *tio;
783 	uint64_t sl;
784 
785 	if (list->stage == 1) {
786 complete:
787 		while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
788 			TAILQ_REMOVE(&list->allio, tio, links);
789 			ctl_free_io(tio->io);
790 			free(tio, M_CTL);
791 		}
792 		if (list->abort) {
793 			ctl_set_task_aborted(list->ctsio);
794 			return (CTL_RETVAL_ERROR);
795 		} else if (list->error) {
796 			ctl_set_sense(list->ctsio, /*current_error*/ 1,
797 			    /*sense_key*/ SSD_KEY_COPY_ABORTED,
798 			    /*asc*/ 0x0d, /*ascq*/ 0x01, SSD_ELEM_NONE);
799 			return (CTL_RETVAL_ERROR);
800 		} else
801 			return (CTL_RETVAL_COMPLETE);
802 	}
803 
804 	TAILQ_INIT(&list->allio);
805 	seg = (struct scsi_ec_segment_verify *)list->seg[list->curseg];
806 	sl = tpc_resolve(list, scsi_2btoul(seg->src_cscd), NULL);
807 	if (sl >= CTL_MAX_LUNS) {
808 		ctl_set_sense(list->ctsio, /*current_error*/ 1,
809 		    /*sense_key*/ SSD_KEY_COPY_ABORTED,
810 		    /*asc*/ 0x08, /*ascq*/ 0x04, SSD_ELEM_NONE);
811 		return (CTL_RETVAL_ERROR);
812 	}
813 
814 //	printf("Verify %ju\n", sl);
815 
816 	if ((seg->tur & 0x01) == 0)
817 		return (CTL_RETVAL_COMPLETE);
818 
819 	list->tbdio = 1;
820 	tio = malloc(sizeof(*tio), M_CTL, M_WAITOK | M_ZERO);
821 	TAILQ_INIT(&tio->run);
822 	tio->list = list;
823 	TAILQ_INSERT_TAIL(&list->allio, tio, links);
824 	tio->io = tpcl_alloc_io();
825 	if (tio->io == NULL) {
826 		list->error = 1;
827 		goto complete;
828 	}
829 	ctl_scsi_tur(tio->io, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0);
830 	tio->io->io_hdr.retries = 3;
831 	tio->lun = sl;
832 	tio->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tio;
833 	list->stage++;
834 	if (tpcl_queue(tio->io, tio->lun) != CTL_RETVAL_COMPLETE)
835 		panic("tpcl_queue() error");
836 	return (CTL_RETVAL_QUEUED);
837 }
838 
839 static int
840 tpc_process_register_key(struct tpc_list *list)
841 {
842 	struct scsi_ec_segment_register_key *seg;
843 	struct tpc_io *tio;
844 	uint64_t dl;
845 	int datalen;
846 
847 	if (list->stage == 1) {
848 complete:
849 		while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
850 			TAILQ_REMOVE(&list->allio, tio, links);
851 			ctl_free_io(tio->io);
852 			free(tio, M_CTL);
853 		}
854 		free(list->buf, M_CTL);
855 		if (list->abort) {
856 			ctl_set_task_aborted(list->ctsio);
857 			return (CTL_RETVAL_ERROR);
858 		} else if (list->error) {
859 			ctl_set_sense(list->ctsio, /*current_error*/ 1,
860 			    /*sense_key*/ SSD_KEY_COPY_ABORTED,
861 			    /*asc*/ 0x0d, /*ascq*/ 0x01, SSD_ELEM_NONE);
862 			return (CTL_RETVAL_ERROR);
863 		} else
864 			return (CTL_RETVAL_COMPLETE);
865 	}
866 
867 	TAILQ_INIT(&list->allio);
868 	seg = (struct scsi_ec_segment_register_key *)list->seg[list->curseg];
869 	dl = tpc_resolve(list, scsi_2btoul(seg->dst_cscd), NULL);
870 	if (dl >= CTL_MAX_LUNS) {
871 		ctl_set_sense(list->ctsio, /*current_error*/ 1,
872 		    /*sense_key*/ SSD_KEY_COPY_ABORTED,
873 		    /*asc*/ 0x08, /*ascq*/ 0x04, SSD_ELEM_NONE);
874 		return (CTL_RETVAL_ERROR);
875 	}
876 
877 //	printf("Register Key %ju\n", dl);
878 
879 	list->tbdio = 1;
880 	tio = malloc(sizeof(*tio), M_CTL, M_WAITOK | M_ZERO);
881 	TAILQ_INIT(&tio->run);
882 	tio->list = list;
883 	TAILQ_INSERT_TAIL(&list->allio, tio, links);
884 	tio->io = tpcl_alloc_io();
885 	if (tio->io == NULL) {
886 		list->error = 1;
887 		goto complete;
888 	}
889 	datalen = sizeof(struct scsi_per_res_out_parms);
890 	list->buf = malloc(datalen, M_CTL, M_WAITOK);
891 	ctl_scsi_persistent_res_out(tio->io,
892 	    list->buf, datalen, SPRO_REGISTER, -1,
893 	    scsi_8btou64(seg->res_key), scsi_8btou64(seg->sa_res_key),
894 	    /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0);
895 	tio->io->io_hdr.retries = 3;
896 	tio->lun = dl;
897 	tio->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tio;
898 	list->stage++;
899 	if (tpcl_queue(tio->io, tio->lun) != CTL_RETVAL_COMPLETE)
900 		panic("tpcl_queue() error");
901 	return (CTL_RETVAL_QUEUED);
902 }
903 
904 static void
905 tpc_process(struct tpc_list *list)
906 {
907 	struct ctl_lun *lun = list->lun;
908 	struct scsi_ec_segment *seg;
909 	struct ctl_scsiio *ctsio = list->ctsio;
910 	int retval = CTL_RETVAL_COMPLETE;
911 
912 //printf("ZZZ %d cscd, %d segs\n", list->ncscd, list->nseg);
913 	while (list->curseg < list->nseg) {
914 		seg = list->seg[list->curseg];
915 		switch (seg->type_code) {
916 		case EC_SEG_B2B:
917 			retval = tpc_process_b2b(list);
918 			break;
919 		case EC_SEG_VERIFY:
920 			retval = tpc_process_verify(list);
921 			break;
922 		case EC_SEG_REGISTER_KEY:
923 			retval = tpc_process_register_key(list);
924 			break;
925 		default:
926 			ctl_set_sense(ctsio, /*current_error*/ 1,
927 			    /*sense_key*/ SSD_KEY_COPY_ABORTED,
928 			    /*asc*/ 0x26, /*ascq*/ 0x09, SSD_ELEM_NONE);
929 			goto done;
930 		}
931 		if (retval == CTL_RETVAL_QUEUED)
932 			return;
933 		if (retval == CTL_RETVAL_ERROR) {
934 			list->error = 1;
935 			goto done;
936 		}
937 		list->curseg++;
938 		list->stage = 0;
939 	}
940 
941 	ctl_set_success(ctsio);
942 
943 done:
944 //printf("ZZZ done\n");
945 	mtx_lock(&lun->lun_lock);
946 	if ((list->flags & EC_LIST_ID_USAGE_MASK) == EC_LIST_ID_USAGE_NONE) {
947 		TAILQ_REMOVE(&lun->tpc_lists, list, links);
948 		free(list, M_CTL);
949 	} else {
950 		list->completed = 1;
951 		list->sense_data = ctsio->sense_data;
952 		list->sense_len = ctsio->sense_len;
953 		list->scsi_status = ctsio->scsi_status;
954 	}
955 	mtx_unlock(&lun->lun_lock);
956 
957 	ctl_done((union ctl_io *)ctsio);
958 }
959 
960 /*
961  * For any sort of check condition, busy, etc., we just retry.  We do not
962  * decrement the retry count for unit attention type errors.  These are
963  * normal, and we want to save the retry count for "real" errors.  Otherwise,
964  * we could end up with situations where a command will succeed in some
965  * situations and fail in others, depending on whether a unit attention is
966  * pending.  Also, some of our error recovery actions, most notably the
967  * LUN reset action, will cause a unit attention.
968  *
969  * We can add more detail here later if necessary.
970  */
971 static tpc_error_action
972 tpc_checkcond_parse(union ctl_io *io)
973 {
974 	tpc_error_action error_action;
975 	int error_code, sense_key, asc, ascq;
976 
977 	/*
978 	 * Default to retrying the command.
979 	 */
980 	error_action = TPC_ERR_RETRY;
981 
982 	scsi_extract_sense_len(&io->scsiio.sense_data,
983 			       io->scsiio.sense_len,
984 			       &error_code,
985 			       &sense_key,
986 			       &asc,
987 			       &ascq,
988 			       /*show_errors*/ 1);
989 
990 	switch (error_code) {
991 	case SSD_DEFERRED_ERROR:
992 	case SSD_DESC_DEFERRED_ERROR:
993 		error_action |= TPC_ERR_NO_DECREMENT;
994 		break;
995 	case SSD_CURRENT_ERROR:
996 	case SSD_DESC_CURRENT_ERROR:
997 	default:
998 		switch (sense_key) {
999 		case SSD_KEY_UNIT_ATTENTION:
1000 			error_action |= TPC_ERR_NO_DECREMENT;
1001 			break;
1002 		case SSD_KEY_HARDWARE_ERROR:
1003 			/*
1004 			 * This is our generic "something bad happened"
1005 			 * error code.  It often isn't recoverable.
1006 			 */
1007 			if ((asc == 0x44) && (ascq == 0x00))
1008 				error_action = TPC_ERR_FAIL;
1009 			break;
1010 		case SSD_KEY_NOT_READY:
1011 			/*
1012 			 * If the LUN is powered down, there likely isn't
1013 			 * much point in retrying right now.
1014 			 */
1015 			if ((asc == 0x04) && (ascq == 0x02))
1016 				error_action = TPC_ERR_FAIL;
1017 			/*
1018 			 * If the LUN is offline, there probably isn't much
1019 			 * point in retrying, either.
1020 			 */
1021 			if ((asc == 0x04) && (ascq == 0x03))
1022 				error_action = TPC_ERR_FAIL;
1023 			break;
1024 		}
1025 	}
1026 	return (error_action);
1027 }
1028 
1029 static tpc_error_action
1030 tpc_error_parse(union ctl_io *io)
1031 {
1032 	tpc_error_action error_action = TPC_ERR_RETRY;
1033 
1034 	switch (io->io_hdr.io_type) {
1035 	case CTL_IO_SCSI:
1036 		switch (io->io_hdr.status & CTL_STATUS_MASK) {
1037 		case CTL_SCSI_ERROR:
1038 			switch (io->scsiio.scsi_status) {
1039 			case SCSI_STATUS_CHECK_COND:
1040 				error_action = tpc_checkcond_parse(io);
1041 				break;
1042 			default:
1043 				break;
1044 			}
1045 			break;
1046 		default:
1047 			break;
1048 		}
1049 		break;
1050 	case CTL_IO_TASK:
1051 		break;
1052 	default:
1053 		panic("%s: invalid ctl_io type %d\n", __func__,
1054 		      io->io_hdr.io_type);
1055 		break;
1056 	}
1057 	return (error_action);
1058 }
1059 
1060 void
1061 tpc_done(union ctl_io *io)
1062 {
1063 	struct tpc_io *tio, *tior;
1064 
1065 	/*
1066 	 * Very minimal retry logic.  We basically retry if we got an error
1067 	 * back, and the retry count is greater than 0.  If we ever want
1068 	 * more sophisticated initiator type behavior, the CAM error
1069 	 * recovery code in ../common might be helpful.
1070 	 */
1071 //	if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
1072 //		ctl_io_error_print(io, NULL);
1073 	tio = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
1074 	if (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
1075 	 && (io->io_hdr.retries > 0)) {
1076 		ctl_io_status old_status;
1077 		tpc_error_action error_action;
1078 
1079 		error_action = tpc_error_parse(io);
1080 		switch (error_action & TPC_ERR_MASK) {
1081 		case TPC_ERR_FAIL:
1082 			break;
1083 		case TPC_ERR_RETRY:
1084 		default:
1085 			if ((error_action & TPC_ERR_NO_DECREMENT) == 0)
1086 				io->io_hdr.retries--;
1087 			old_status = io->io_hdr.status;
1088 			io->io_hdr.status = CTL_STATUS_NONE;
1089 			io->io_hdr.flags &= ~CTL_FLAG_ABORT;
1090 			io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC;
1091 			if (tpcl_queue(io, tio->lun) != CTL_RETVAL_COMPLETE) {
1092 				printf("%s: error returned from ctl_queue()!\n",
1093 				       __func__);
1094 				io->io_hdr.status = old_status;
1095 			} else
1096 				return;
1097 		}
1098 	}
1099 
1100 	if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
1101 		tio->list->error = 1;
1102 	else
1103 		atomic_add_int(&tio->list->curops, 1);
1104 	if (!tio->list->error && !tio->list->abort) {
1105 		while ((tior = TAILQ_FIRST(&tio->run)) != NULL) {
1106 			TAILQ_REMOVE(&tio->run, tior, rlinks);
1107 			atomic_add_int(&tio->list->tbdio, 1);
1108 			if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE)
1109 				panic("tpcl_queue() error");
1110 		}
1111 	}
1112 	if (atomic_fetchadd_int(&tio->list->tbdio, -1) == 1)
1113 		tpc_process(tio->list);
1114 }
1115 
1116 int
1117 ctl_extended_copy_lid1(struct ctl_scsiio *ctsio)
1118 {
1119 	struct scsi_extended_copy *cdb;
1120 	struct scsi_extended_copy_lid1_data *data;
1121 	struct ctl_lun *lun;
1122 	struct tpc_list *list, *tlist;
1123 	uint8_t *ptr;
1124 	char *value;
1125 	int len, off, lencscd, lenseg, leninl, nseg;
1126 
1127 	CTL_DEBUG_PRINT(("ctl_extended_copy_lid1\n"));
1128 
1129 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
1130 	cdb = (struct scsi_extended_copy *)ctsio->cdb;
1131 	len = scsi_4btoul(cdb->length);
1132 
1133 	if (len < sizeof(struct scsi_extended_copy_lid1_data) ||
1134 	    len > sizeof(struct scsi_extended_copy_lid1_data) +
1135 	    TPC_MAX_LIST + TPC_MAX_INLINE) {
1136 		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
1137 		    /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
1138 		goto done;
1139 	}
1140 
1141 	/*
1142 	 * If we've got a kernel request that hasn't been malloced yet,
1143 	 * malloc it and tell the caller the data buffer is here.
1144 	 */
1145 	if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
1146 		ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
1147 		ctsio->kern_data_len = len;
1148 		ctsio->kern_total_len = len;
1149 		ctsio->kern_data_resid = 0;
1150 		ctsio->kern_rel_offset = 0;
1151 		ctsio->kern_sg_entries = 0;
1152 		ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
1153 		ctsio->be_move_done = ctl_config_move_done;
1154 		ctl_datamove((union ctl_io *)ctsio);
1155 
1156 		return (CTL_RETVAL_COMPLETE);
1157 	}
1158 
1159 	data = (struct scsi_extended_copy_lid1_data *)ctsio->kern_data_ptr;
1160 	lencscd = scsi_2btoul(data->cscd_list_length);
1161 	lenseg = scsi_4btoul(data->segment_list_length);
1162 	leninl = scsi_4btoul(data->inline_data_length);
1163 	if (len < sizeof(struct scsi_extended_copy_lid1_data) +
1164 	    lencscd + lenseg + leninl ||
1165 	    leninl > TPC_MAX_INLINE) {
1166 		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
1167 		    /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0);
1168 		goto done;
1169 	}
1170 	if (lencscd > TPC_MAX_CSCDS * sizeof(struct scsi_ec_cscd)) {
1171 		ctl_set_sense(ctsio, /*current_error*/ 1,
1172 		    /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1173 		    /*asc*/ 0x26, /*ascq*/ 0x06, SSD_ELEM_NONE);
1174 		goto done;
1175 	}
1176 	if (lencscd + lenseg > TPC_MAX_LIST) {
1177 		ctl_set_param_len_error(ctsio);
1178 		goto done;
1179 	}
1180 
1181 	list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
1182 	list->service_action = cdb->service_action;
1183 	value = ctl_get_opt(&lun->be_lun->options, "insecure_tpc");
1184 	if (value != NULL && strcmp(value, "on") == 0)
1185 		list->init_port = -1;
1186 	else
1187 		list->init_port = ctsio->io_hdr.nexus.targ_port;
1188 	list->init_idx = ctl_get_resindex(&ctsio->io_hdr.nexus);
1189 	list->list_id = data->list_identifier;
1190 	list->flags = data->flags;
1191 	list->params = ctsio->kern_data_ptr;
1192 	list->cscd = (struct scsi_ec_cscd *)&data->data[0];
1193 	ptr = &data->data[lencscd];
1194 	for (nseg = 0, off = 0; off < lenseg; nseg++) {
1195 		if (nseg >= TPC_MAX_SEGS) {
1196 			free(list, M_CTL);
1197 			ctl_set_sense(ctsio, /*current_error*/ 1,
1198 			    /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1199 			    /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
1200 			goto done;
1201 		}
1202 		list->seg[nseg] = (struct scsi_ec_segment *)(ptr + off);
1203 		off += sizeof(struct scsi_ec_segment) +
1204 		    scsi_2btoul(list->seg[nseg]->descr_length);
1205 	}
1206 	list->inl = &data->data[lencscd + lenseg];
1207 	list->ncscd = lencscd / sizeof(struct scsi_ec_cscd);
1208 	list->nseg = nseg;
1209 	list->leninl = leninl;
1210 	list->ctsio = ctsio;
1211 	list->lun = lun;
1212 	mtx_lock(&lun->lun_lock);
1213 	if ((list->flags & EC_LIST_ID_USAGE_MASK) != EC_LIST_ID_USAGE_NONE) {
1214 		tlist = tpc_find_list(lun, list->list_id, list->init_idx);
1215 		if (tlist != NULL && !tlist->completed) {
1216 			mtx_unlock(&lun->lun_lock);
1217 			free(list, M_CTL);
1218 			ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
1219 			    /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
1220 			    /*bit*/ 0);
1221 			goto done;
1222 		}
1223 		if (tlist != NULL) {
1224 			TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
1225 			free(tlist, M_CTL);
1226 		}
1227 	}
1228 	TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
1229 	mtx_unlock(&lun->lun_lock);
1230 
1231 	tpc_process(list);
1232 	return (CTL_RETVAL_COMPLETE);
1233 
1234 done:
1235 	ctl_done((union ctl_io *)ctsio);
1236 	return (CTL_RETVAL_COMPLETE);
1237 }
1238 
1239 int
1240 ctl_extended_copy_lid4(struct ctl_scsiio *ctsio)
1241 {
1242 	struct scsi_extended_copy *cdb;
1243 	struct scsi_extended_copy_lid4_data *data;
1244 	struct ctl_lun *lun;
1245 	struct tpc_list *list, *tlist;
1246 	uint8_t *ptr;
1247 	char *value;
1248 	int len, off, lencscd, lenseg, leninl, nseg;
1249 
1250 	CTL_DEBUG_PRINT(("ctl_extended_copy_lid4\n"));
1251 
1252 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
1253 	cdb = (struct scsi_extended_copy *)ctsio->cdb;
1254 	len = scsi_4btoul(cdb->length);
1255 
1256 	if (len < sizeof(struct scsi_extended_copy_lid4_data) ||
1257 	    len > sizeof(struct scsi_extended_copy_lid4_data) +
1258 	    TPC_MAX_LIST + TPC_MAX_INLINE) {
1259 		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
1260 		    /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
1261 		goto done;
1262 	}
1263 
1264 	/*
1265 	 * If we've got a kernel request that hasn't been malloced yet,
1266 	 * malloc it and tell the caller the data buffer is here.
1267 	 */
1268 	if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
1269 		ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
1270 		ctsio->kern_data_len = len;
1271 		ctsio->kern_total_len = len;
1272 		ctsio->kern_data_resid = 0;
1273 		ctsio->kern_rel_offset = 0;
1274 		ctsio->kern_sg_entries = 0;
1275 		ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
1276 		ctsio->be_move_done = ctl_config_move_done;
1277 		ctl_datamove((union ctl_io *)ctsio);
1278 
1279 		return (CTL_RETVAL_COMPLETE);
1280 	}
1281 
1282 	data = (struct scsi_extended_copy_lid4_data *)ctsio->kern_data_ptr;
1283 	lencscd = scsi_2btoul(data->cscd_list_length);
1284 	lenseg = scsi_2btoul(data->segment_list_length);
1285 	leninl = scsi_2btoul(data->inline_data_length);
1286 	if (len < sizeof(struct scsi_extended_copy_lid4_data) +
1287 	    lencscd + lenseg + leninl ||
1288 	    leninl > TPC_MAX_INLINE) {
1289 		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
1290 		    /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0);
1291 		goto done;
1292 	}
1293 	if (lencscd > TPC_MAX_CSCDS * sizeof(struct scsi_ec_cscd)) {
1294 		ctl_set_sense(ctsio, /*current_error*/ 1,
1295 		    /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1296 		    /*asc*/ 0x26, /*ascq*/ 0x06, SSD_ELEM_NONE);
1297 		goto done;
1298 	}
1299 	if (lencscd + lenseg > TPC_MAX_LIST) {
1300 		ctl_set_param_len_error(ctsio);
1301 		goto done;
1302 	}
1303 
1304 	list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
1305 	list->service_action = cdb->service_action;
1306 	value = ctl_get_opt(&lun->be_lun->options, "insecure_tpc");
1307 	if (value != NULL && strcmp(value, "on") == 0)
1308 		list->init_port = -1;
1309 	else
1310 		list->init_port = ctsio->io_hdr.nexus.targ_port;
1311 	list->init_idx = ctl_get_resindex(&ctsio->io_hdr.nexus);
1312 	list->list_id = scsi_4btoul(data->list_identifier);
1313 	list->flags = data->flags;
1314 	list->params = ctsio->kern_data_ptr;
1315 	list->cscd = (struct scsi_ec_cscd *)&data->data[0];
1316 	ptr = &data->data[lencscd];
1317 	for (nseg = 0, off = 0; off < lenseg; nseg++) {
1318 		if (nseg >= TPC_MAX_SEGS) {
1319 			free(list, M_CTL);
1320 			ctl_set_sense(ctsio, /*current_error*/ 1,
1321 			    /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1322 			    /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
1323 			goto done;
1324 		}
1325 		list->seg[nseg] = (struct scsi_ec_segment *)(ptr + off);
1326 		off += sizeof(struct scsi_ec_segment) +
1327 		    scsi_2btoul(list->seg[nseg]->descr_length);
1328 	}
1329 	list->inl = &data->data[lencscd + lenseg];
1330 	list->ncscd = lencscd / sizeof(struct scsi_ec_cscd);
1331 	list->nseg = nseg;
1332 	list->leninl = leninl;
1333 	list->ctsio = ctsio;
1334 	list->lun = lun;
1335 	mtx_lock(&lun->lun_lock);
1336 	if ((list->flags & EC_LIST_ID_USAGE_MASK) != EC_LIST_ID_USAGE_NONE) {
1337 		tlist = tpc_find_list(lun, list->list_id, list->init_idx);
1338 		if (tlist != NULL && !tlist->completed) {
1339 			mtx_unlock(&lun->lun_lock);
1340 			free(list, M_CTL);
1341 			ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
1342 			    /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
1343 			    /*bit*/ 0);
1344 			goto done;
1345 		}
1346 		if (tlist != NULL) {
1347 			TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
1348 			free(tlist, M_CTL);
1349 		}
1350 	}
1351 	TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
1352 	mtx_unlock(&lun->lun_lock);
1353 
1354 	tpc_process(list);
1355 	return (CTL_RETVAL_COMPLETE);
1356 
1357 done:
1358 	ctl_done((union ctl_io *)ctsio);
1359 	return (CTL_RETVAL_COMPLETE);
1360 }
1361 
1362