xref: /freebsd/sys/cam/ctl/ctl_tpc.c (revision c36e54bb328697af1e6113812caecbd3bac89fe0)
1 /*-
2  * Copyright (c) 2014 Alexander Motin <mav@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer,
10  *    without modification, immediately at the beginning of the file.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/types.h>
34 #include <sys/lock.h>
35 #include <sys/module.h>
36 #include <sys/mutex.h>
37 #include <sys/condvar.h>
38 #include <sys/malloc.h>
39 #include <sys/conf.h>
40 #include <sys/queue.h>
41 #include <sys/sysctl.h>
42 #include <machine/atomic.h>
43 
44 #include <cam/cam.h>
45 #include <cam/scsi/scsi_all.h>
46 #include <cam/scsi/scsi_da.h>
47 #include <cam/ctl/ctl_io.h>
48 #include <cam/ctl/ctl.h>
49 #include <cam/ctl/ctl_frontend.h>
50 #include <cam/ctl/ctl_frontend_internal.h>
51 #include <cam/ctl/ctl_util.h>
52 #include <cam/ctl/ctl_backend.h>
53 #include <cam/ctl/ctl_ioctl.h>
54 #include <cam/ctl/ctl_ha.h>
55 #include <cam/ctl/ctl_private.h>
56 #include <cam/ctl/ctl_debug.h>
57 #include <cam/ctl/ctl_scsi_all.h>
58 #include <cam/ctl/ctl_tpc.h>
59 #include <cam/ctl/ctl_error.h>
60 
61 #define	TPC_MAX_CSCDS	64
62 #define	TPC_MAX_SEGS	64
63 #define	TPC_MAX_SEG	0
64 #define	TPC_MAX_LIST	8192
65 #define	TPC_MAX_INLINE	0
66 #define	TPC_MAX_LISTS	255
67 #define	TPC_MAX_IO_SIZE	(1024 * 1024)
68 #define	TPC_MAX_IOCHUNK_SIZE	(TPC_MAX_IO_SIZE * 16)
69 #define	TPC_MIN_TOKEN_TIMEOUT	1
70 #define	TPC_DFL_TOKEN_TIMEOUT	60
71 #define	TPC_MAX_TOKEN_TIMEOUT	600
72 
73 MALLOC_DEFINE(M_CTL_TPC, "ctltpc", "CTL TPC");
74 
75 typedef enum {
76 	TPC_ERR_RETRY		= 0x000,
77 	TPC_ERR_FAIL		= 0x001,
78 	TPC_ERR_MASK		= 0x0ff,
79 	TPC_ERR_NO_DECREMENT	= 0x100
80 } tpc_error_action;
81 
82 struct tpc_list;
83 TAILQ_HEAD(runl, tpc_io);
84 struct tpc_io {
85 	union ctl_io		*io;
86 	uint64_t		 lun;
87 	struct tpc_list		*list;
88 	struct runl		 run;
89 	TAILQ_ENTRY(tpc_io)	 rlinks;
90 	TAILQ_ENTRY(tpc_io)	 links;
91 };
92 
93 struct tpc_token {
94 	uint8_t			 token[512];
95 	uint64_t		 lun;
96 	uint32_t		 blocksize;
97 	uint8_t			*params;
98 	struct scsi_range_desc	*range;
99 	int			 nrange;
100 	int			 active;
101 	time_t			 last_active;
102 	uint32_t		 timeout;
103 	TAILQ_ENTRY(tpc_token)	 links;
104 };
105 
106 struct tpc_list {
107 	uint8_t			 service_action;
108 	int			 init_port;
109 	uint32_t		 init_idx;
110 	uint32_t		 list_id;
111 	uint8_t			 flags;
112 	uint8_t			*params;
113 	struct scsi_ec_cscd	*cscd;
114 	struct scsi_ec_segment	*seg[TPC_MAX_SEGS];
115 	uint8_t			*inl;
116 	int			 ncscd;
117 	int			 nseg;
118 	int			 leninl;
119 	struct tpc_token	*token;
120 	struct scsi_range_desc	*range;
121 	int			 nrange;
122 	off_t			 offset_into_rod;
123 
124 	int			 curseg;
125 	off_t			 cursectors;
126 	off_t			 curbytes;
127 	int			 curops;
128 	int			 stage;
129 	uint8_t			*buf;
130 	off_t			 segsectors;
131 	off_t			 segbytes;
132 	int			 tbdio;
133 	int			 error;
134 	int			 abort;
135 	int			 completed;
136 	time_t			 last_active;
137 	TAILQ_HEAD(, tpc_io)	 allio;
138 	struct scsi_sense_data	 sense_data;
139 	uint8_t			 sense_len;
140 	uint8_t			 scsi_status;
141 	struct ctl_scsiio	*ctsio;
142 	struct ctl_lun		*lun;
143 	int			 res_token_valid;
144 	uint8_t			 res_token[512];
145 	TAILQ_ENTRY(tpc_list)	 links;
146 };
147 
148 static void
149 tpc_timeout(void *arg)
150 {
151 	struct ctl_softc *softc = arg;
152 	struct ctl_lun *lun;
153 	struct tpc_token *token, *ttoken;
154 	struct tpc_list *list, *tlist;
155 
156 	/* Free completed lists with expired timeout. */
157 	STAILQ_FOREACH(lun, &softc->lun_list, links) {
158 		mtx_lock(&lun->lun_lock);
159 		TAILQ_FOREACH_SAFE(list, &lun->tpc_lists, links, tlist) {
160 			if (!list->completed || time_uptime < list->last_active +
161 			    TPC_DFL_TOKEN_TIMEOUT)
162 				continue;
163 			TAILQ_REMOVE(&lun->tpc_lists, list, links);
164 			free(list, M_CTL);
165 		}
166 		mtx_unlock(&lun->lun_lock);
167 	}
168 
169 	/* Free inactive ROD tokens with expired timeout. */
170 	mtx_lock(&softc->tpc_lock);
171 	TAILQ_FOREACH_SAFE(token, &softc->tpc_tokens, links, ttoken) {
172 		if (token->active ||
173 		    time_uptime < token->last_active + token->timeout + 1)
174 			continue;
175 		TAILQ_REMOVE(&softc->tpc_tokens, token, links);
176 		free(token->params, M_CTL);
177 		free(token, M_CTL);
178 	}
179 	mtx_unlock(&softc->tpc_lock);
180 	callout_schedule(&softc->tpc_timeout, hz);
181 }
182 
183 void
184 ctl_tpc_init(struct ctl_softc *softc)
185 {
186 
187 	mtx_init(&softc->tpc_lock, "CTL TPC mutex", NULL, MTX_DEF);
188 	TAILQ_INIT(&softc->tpc_tokens);
189 	callout_init_mtx(&softc->tpc_timeout, &softc->ctl_lock, 0);
190 	callout_reset(&softc->tpc_timeout, hz, tpc_timeout, softc);
191 }
192 
193 void
194 ctl_tpc_shutdown(struct ctl_softc *softc)
195 {
196 	struct tpc_token *token;
197 
198 	callout_drain(&softc->tpc_timeout);
199 
200 	/* Free ROD tokens. */
201 	mtx_lock(&softc->tpc_lock);
202 	while ((token = TAILQ_FIRST(&softc->tpc_tokens)) != NULL) {
203 		TAILQ_REMOVE(&softc->tpc_tokens, token, links);
204 		free(token->params, M_CTL);
205 		free(token, M_CTL);
206 	}
207 	mtx_unlock(&softc->tpc_lock);
208 	mtx_destroy(&softc->tpc_lock);
209 }
210 
211 void
212 ctl_tpc_lun_init(struct ctl_lun *lun)
213 {
214 
215 	TAILQ_INIT(&lun->tpc_lists);
216 }
217 
218 void
219 ctl_tpc_lun_shutdown(struct ctl_lun *lun)
220 {
221 	struct ctl_softc *softc = lun->ctl_softc;
222 	struct tpc_list *list;
223 	struct tpc_token *token, *ttoken;
224 
225 	/* Free lists for this LUN. */
226 	while ((list = TAILQ_FIRST(&lun->tpc_lists)) != NULL) {
227 		TAILQ_REMOVE(&lun->tpc_lists, list, links);
228 		KASSERT(list->completed,
229 		    ("Not completed TPC (%p) on shutdown", list));
230 		free(list, M_CTL);
231 	}
232 
233 	/* Free ROD tokens for this LUN. */
234 	mtx_lock(&softc->tpc_lock);
235 	TAILQ_FOREACH_SAFE(token, &softc->tpc_tokens, links, ttoken) {
236 		if (token->lun != lun->lun || token->active)
237 			continue;
238 		TAILQ_REMOVE(&softc->tpc_tokens, token, links);
239 		free(token->params, M_CTL);
240 		free(token, M_CTL);
241 	}
242 	mtx_unlock(&softc->tpc_lock);
243 }
244 
245 int
246 ctl_inquiry_evpd_tpc(struct ctl_scsiio *ctsio, int alloc_len)
247 {
248 	struct scsi_vpd_tpc *tpc_ptr;
249 	struct scsi_vpd_tpc_descriptor *d_ptr;
250 	struct scsi_vpd_tpc_descriptor_bdrl *bdrl_ptr;
251 	struct scsi_vpd_tpc_descriptor_sc *sc_ptr;
252 	struct scsi_vpd_tpc_descriptor_sc_descr *scd_ptr;
253 	struct scsi_vpd_tpc_descriptor_pd *pd_ptr;
254 	struct scsi_vpd_tpc_descriptor_sd *sd_ptr;
255 	struct scsi_vpd_tpc_descriptor_sdid *sdid_ptr;
256 	struct scsi_vpd_tpc_descriptor_rtf *rtf_ptr;
257 	struct scsi_vpd_tpc_descriptor_rtf_block *rtfb_ptr;
258 	struct scsi_vpd_tpc_descriptor_srt *srt_ptr;
259 	struct scsi_vpd_tpc_descriptor_srtd *srtd_ptr;
260 	struct scsi_vpd_tpc_descriptor_gco *gco_ptr;
261 	struct ctl_lun *lun;
262 	int data_len;
263 
264 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
265 
266 	data_len = sizeof(struct scsi_vpd_tpc) +
267 	    sizeof(struct scsi_vpd_tpc_descriptor_bdrl) +
268 	    roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sc) +
269 	     2 * sizeof(struct scsi_vpd_tpc_descriptor_sc_descr) + 11, 4) +
270 	    sizeof(struct scsi_vpd_tpc_descriptor_pd) +
271 	    roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sd) + 4, 4) +
272 	    roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sdid) + 2, 4) +
273 	    sizeof(struct scsi_vpd_tpc_descriptor_rtf) +
274 	     sizeof(struct scsi_vpd_tpc_descriptor_rtf_block) +
275 	    sizeof(struct scsi_vpd_tpc_descriptor_srt) +
276 	     2*sizeof(struct scsi_vpd_tpc_descriptor_srtd) +
277 	    sizeof(struct scsi_vpd_tpc_descriptor_gco);
278 
279 	ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
280 	tpc_ptr = (struct scsi_vpd_tpc *)ctsio->kern_data_ptr;
281 	ctsio->kern_sg_entries = 0;
282 
283 	if (data_len < alloc_len) {
284 		ctsio->residual = alloc_len - data_len;
285 		ctsio->kern_data_len = data_len;
286 		ctsio->kern_total_len = data_len;
287 	} else {
288 		ctsio->residual = 0;
289 		ctsio->kern_data_len = alloc_len;
290 		ctsio->kern_total_len = alloc_len;
291 	}
292 	ctsio->kern_data_resid = 0;
293 	ctsio->kern_rel_offset = 0;
294 	ctsio->kern_sg_entries = 0;
295 
296 	/*
297 	 * The control device is always connected.  The disk device, on the
298 	 * other hand, may not be online all the time.
299 	 */
300 	if (lun != NULL)
301 		tpc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
302 				     lun->be_lun->lun_type;
303 	else
304 		tpc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
305 	tpc_ptr->page_code = SVPD_SCSI_TPC;
306 	scsi_ulto2b(data_len - 4, tpc_ptr->page_length);
307 
308 	/* Block Device ROD Limits */
309 	d_ptr = (struct scsi_vpd_tpc_descriptor *)&tpc_ptr->descr[0];
310 	bdrl_ptr = (struct scsi_vpd_tpc_descriptor_bdrl *)d_ptr;
311 	scsi_ulto2b(SVPD_TPC_BDRL, bdrl_ptr->desc_type);
312 	scsi_ulto2b(sizeof(*bdrl_ptr) - 4, bdrl_ptr->desc_length);
313 	scsi_ulto2b(TPC_MAX_SEGS, bdrl_ptr->maximum_ranges);
314 	scsi_ulto4b(TPC_MAX_TOKEN_TIMEOUT,
315 	    bdrl_ptr->maximum_inactivity_timeout);
316 	scsi_ulto4b(TPC_DFL_TOKEN_TIMEOUT,
317 	    bdrl_ptr->default_inactivity_timeout);
318 	scsi_u64to8b(0, bdrl_ptr->maximum_token_transfer_size);
319 	scsi_u64to8b(0, bdrl_ptr->optimal_transfer_count);
320 
321 	/* Supported commands */
322 	d_ptr = (struct scsi_vpd_tpc_descriptor *)
323 	    (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
324 	sc_ptr = (struct scsi_vpd_tpc_descriptor_sc *)d_ptr;
325 	scsi_ulto2b(SVPD_TPC_SC, sc_ptr->desc_type);
326 	sc_ptr->list_length = 2 * sizeof(*scd_ptr) + 11;
327 	scsi_ulto2b(roundup2(1 + sc_ptr->list_length, 4), sc_ptr->desc_length);
328 	scd_ptr = &sc_ptr->descr[0];
329 	scd_ptr->opcode = EXTENDED_COPY;
330 	scd_ptr->sa_length = 5;
331 	scd_ptr->supported_service_actions[0] = EC_EC_LID1;
332 	scd_ptr->supported_service_actions[1] = EC_EC_LID4;
333 	scd_ptr->supported_service_actions[2] = EC_PT;
334 	scd_ptr->supported_service_actions[3] = EC_WUT;
335 	scd_ptr->supported_service_actions[4] = EC_COA;
336 	scd_ptr = (struct scsi_vpd_tpc_descriptor_sc_descr *)
337 	    &scd_ptr->supported_service_actions[scd_ptr->sa_length];
338 	scd_ptr->opcode = RECEIVE_COPY_STATUS;
339 	scd_ptr->sa_length = 6;
340 	scd_ptr->supported_service_actions[0] = RCS_RCS_LID1;
341 	scd_ptr->supported_service_actions[1] = RCS_RCFD;
342 	scd_ptr->supported_service_actions[2] = RCS_RCS_LID4;
343 	scd_ptr->supported_service_actions[3] = RCS_RCOP;
344 	scd_ptr->supported_service_actions[4] = RCS_RRTI;
345 	scd_ptr->supported_service_actions[5] = RCS_RART;
346 
347 	/* Parameter data. */
348 	d_ptr = (struct scsi_vpd_tpc_descriptor *)
349 	    (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
350 	pd_ptr = (struct scsi_vpd_tpc_descriptor_pd *)d_ptr;
351 	scsi_ulto2b(SVPD_TPC_PD, pd_ptr->desc_type);
352 	scsi_ulto2b(sizeof(*pd_ptr) - 4, pd_ptr->desc_length);
353 	scsi_ulto2b(TPC_MAX_CSCDS, pd_ptr->maximum_cscd_descriptor_count);
354 	scsi_ulto2b(TPC_MAX_SEGS, pd_ptr->maximum_segment_descriptor_count);
355 	scsi_ulto4b(TPC_MAX_LIST, pd_ptr->maximum_descriptor_list_length);
356 	scsi_ulto4b(TPC_MAX_INLINE, pd_ptr->maximum_inline_data_length);
357 
358 	/* Supported Descriptors */
359 	d_ptr = (struct scsi_vpd_tpc_descriptor *)
360 	    (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
361 	sd_ptr = (struct scsi_vpd_tpc_descriptor_sd *)d_ptr;
362 	scsi_ulto2b(SVPD_TPC_SD, sd_ptr->desc_type);
363 	scsi_ulto2b(roundup2(sizeof(*sd_ptr) - 4 + 4, 4), sd_ptr->desc_length);
364 	sd_ptr->list_length = 4;
365 	sd_ptr->supported_descriptor_codes[0] = EC_SEG_B2B;
366 	sd_ptr->supported_descriptor_codes[1] = EC_SEG_VERIFY;
367 	sd_ptr->supported_descriptor_codes[2] = EC_SEG_REGISTER_KEY;
368 	sd_ptr->supported_descriptor_codes[3] = EC_CSCD_ID;
369 
370 	/* Supported CSCD Descriptor IDs */
371 	d_ptr = (struct scsi_vpd_tpc_descriptor *)
372 	    (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
373 	sdid_ptr = (struct scsi_vpd_tpc_descriptor_sdid *)d_ptr;
374 	scsi_ulto2b(SVPD_TPC_SDID, sdid_ptr->desc_type);
375 	scsi_ulto2b(roundup2(sizeof(*sdid_ptr) - 4 + 2, 4), sdid_ptr->desc_length);
376 	scsi_ulto2b(2, sdid_ptr->list_length);
377 	scsi_ulto2b(0xffff, &sdid_ptr->supported_descriptor_ids[0]);
378 
379 	/* ROD Token Features */
380 	d_ptr = (struct scsi_vpd_tpc_descriptor *)
381 	    (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
382 	rtf_ptr = (struct scsi_vpd_tpc_descriptor_rtf *)d_ptr;
383 	scsi_ulto2b(SVPD_TPC_RTF, rtf_ptr->desc_type);
384 	scsi_ulto2b(sizeof(*rtf_ptr) - 4 + sizeof(*rtfb_ptr), rtf_ptr->desc_length);
385 	rtf_ptr->remote_tokens = 0;
386 	scsi_ulto4b(TPC_MIN_TOKEN_TIMEOUT, rtf_ptr->minimum_token_lifetime);
387 	scsi_ulto4b(UINT32_MAX, rtf_ptr->maximum_token_lifetime);
388 	scsi_ulto4b(TPC_MAX_TOKEN_TIMEOUT,
389 	    rtf_ptr->maximum_token_inactivity_timeout);
390 	scsi_ulto2b(sizeof(*rtfb_ptr), rtf_ptr->type_specific_features_length);
391 	rtfb_ptr = (struct scsi_vpd_tpc_descriptor_rtf_block *)
392 	    &rtf_ptr->type_specific_features;
393 	rtfb_ptr->type_format = SVPD_TPC_RTF_BLOCK;
394 	scsi_ulto2b(sizeof(*rtfb_ptr) - 4, rtfb_ptr->desc_length);
395 	scsi_ulto2b(0, rtfb_ptr->optimal_length_granularity);
396 	scsi_u64to8b(0, rtfb_ptr->maximum_bytes);
397 	scsi_u64to8b(0, rtfb_ptr->optimal_bytes);
398 	scsi_u64to8b(TPC_MAX_IOCHUNK_SIZE,
399 	    rtfb_ptr->optimal_bytes_to_token_per_segment);
400 	scsi_u64to8b(TPC_MAX_IOCHUNK_SIZE,
401 	    rtfb_ptr->optimal_bytes_from_token_per_segment);
402 
403 	/* Supported ROD Tokens */
404 	d_ptr = (struct scsi_vpd_tpc_descriptor *)
405 	    (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
406 	srt_ptr = (struct scsi_vpd_tpc_descriptor_srt *)d_ptr;
407 	scsi_ulto2b(SVPD_TPC_SRT, srt_ptr->desc_type);
408 	scsi_ulto2b(sizeof(*srt_ptr) - 4 + 2*sizeof(*srtd_ptr), srt_ptr->desc_length);
409 	scsi_ulto2b(2*sizeof(*srtd_ptr), srt_ptr->rod_type_descriptors_length);
410 	srtd_ptr = (struct scsi_vpd_tpc_descriptor_srtd *)
411 	    &srt_ptr->rod_type_descriptors;
412 	scsi_ulto4b(ROD_TYPE_AUR, srtd_ptr->rod_type);
413 	srtd_ptr->flags = SVPD_TPC_SRTD_TIN | SVPD_TPC_SRTD_TOUT;
414 	scsi_ulto2b(0, srtd_ptr->preference_indicator);
415 	srtd_ptr++;
416 	scsi_ulto4b(ROD_TYPE_BLOCK_ZERO, srtd_ptr->rod_type);
417 	srtd_ptr->flags = SVPD_TPC_SRTD_TIN;
418 	scsi_ulto2b(0, srtd_ptr->preference_indicator);
419 
420 	/* General Copy Operations */
421 	d_ptr = (struct scsi_vpd_tpc_descriptor *)
422 	    (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
423 	gco_ptr = (struct scsi_vpd_tpc_descriptor_gco *)d_ptr;
424 	scsi_ulto2b(SVPD_TPC_GCO, gco_ptr->desc_type);
425 	scsi_ulto2b(sizeof(*gco_ptr) - 4, gco_ptr->desc_length);
426 	scsi_ulto4b(TPC_MAX_LISTS, gco_ptr->total_concurrent_copies);
427 	scsi_ulto4b(TPC_MAX_LISTS, gco_ptr->maximum_identified_concurrent_copies);
428 	scsi_ulto4b(TPC_MAX_SEG, gco_ptr->maximum_segment_length);
429 	gco_ptr->data_segment_granularity = 0;
430 	gco_ptr->inline_data_granularity = 0;
431 
432 	ctl_set_success(ctsio);
433 	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
434 	ctsio->be_move_done = ctl_config_move_done;
435 	ctl_datamove((union ctl_io *)ctsio);
436 
437 	return (CTL_RETVAL_COMPLETE);
438 }
439 
440 int
441 ctl_receive_copy_operating_parameters(struct ctl_scsiio *ctsio)
442 {
443 	struct scsi_receive_copy_operating_parameters *cdb;
444 	struct scsi_receive_copy_operating_parameters_data *data;
445 	int retval;
446 	int alloc_len, total_len;
447 
448 	CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n"));
449 
450 	cdb = (struct scsi_receive_copy_operating_parameters *)ctsio->cdb;
451 
452 	retval = CTL_RETVAL_COMPLETE;
453 
454 	total_len = sizeof(*data) + 4;
455 	alloc_len = scsi_4btoul(cdb->length);
456 
457 	ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
458 
459 	ctsio->kern_sg_entries = 0;
460 
461 	if (total_len < alloc_len) {
462 		ctsio->residual = alloc_len - total_len;
463 		ctsio->kern_data_len = total_len;
464 		ctsio->kern_total_len = total_len;
465 	} else {
466 		ctsio->residual = 0;
467 		ctsio->kern_data_len = alloc_len;
468 		ctsio->kern_total_len = alloc_len;
469 	}
470 	ctsio->kern_data_resid = 0;
471 	ctsio->kern_rel_offset = 0;
472 
473 	data = (struct scsi_receive_copy_operating_parameters_data *)ctsio->kern_data_ptr;
474 	scsi_ulto4b(sizeof(*data) - 4 + 4, data->length);
475 	data->snlid = RCOP_SNLID;
476 	scsi_ulto2b(TPC_MAX_CSCDS, data->maximum_cscd_descriptor_count);
477 	scsi_ulto2b(TPC_MAX_SEGS, data->maximum_segment_descriptor_count);
478 	scsi_ulto4b(TPC_MAX_LIST, data->maximum_descriptor_list_length);
479 	scsi_ulto4b(TPC_MAX_SEG, data->maximum_segment_length);
480 	scsi_ulto4b(TPC_MAX_INLINE, data->maximum_inline_data_length);
481 	scsi_ulto4b(0, data->held_data_limit);
482 	scsi_ulto4b(0, data->maximum_stream_device_transfer_size);
483 	scsi_ulto2b(TPC_MAX_LISTS, data->total_concurrent_copies);
484 	data->maximum_concurrent_copies = TPC_MAX_LISTS;
485 	data->data_segment_granularity = 0;
486 	data->inline_data_granularity = 0;
487 	data->held_data_granularity = 0;
488 	data->implemented_descriptor_list_length = 4;
489 	data->list_of_implemented_descriptor_type_codes[0] = EC_SEG_B2B;
490 	data->list_of_implemented_descriptor_type_codes[1] = EC_SEG_VERIFY;
491 	data->list_of_implemented_descriptor_type_codes[2] = EC_SEG_REGISTER_KEY;
492 	data->list_of_implemented_descriptor_type_codes[3] = EC_CSCD_ID;
493 
494 	ctl_set_success(ctsio);
495 	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
496 	ctsio->be_move_done = ctl_config_move_done;
497 	ctl_datamove((union ctl_io *)ctsio);
498 	return (retval);
499 }
500 
501 static struct tpc_list *
502 tpc_find_list(struct ctl_lun *lun, uint32_t list_id, uint32_t init_idx)
503 {
504 	struct tpc_list *list;
505 
506 	mtx_assert(&lun->lun_lock, MA_OWNED);
507 	TAILQ_FOREACH(list, &lun->tpc_lists, links) {
508 		if ((list->flags & EC_LIST_ID_USAGE_MASK) !=
509 		     EC_LIST_ID_USAGE_NONE && list->list_id == list_id &&
510 		    list->init_idx == init_idx)
511 			break;
512 	}
513 	return (list);
514 }
515 
516 int
517 ctl_receive_copy_status_lid1(struct ctl_scsiio *ctsio)
518 {
519 	struct ctl_lun *lun;
520 	struct scsi_receive_copy_status_lid1 *cdb;
521 	struct scsi_receive_copy_status_lid1_data *data;
522 	struct tpc_list *list;
523 	struct tpc_list list_copy;
524 	int retval;
525 	int alloc_len, total_len;
526 	uint32_t list_id;
527 
528 	CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid1\n"));
529 
530 	cdb = (struct scsi_receive_copy_status_lid1 *)ctsio->cdb;
531 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
532 
533 	retval = CTL_RETVAL_COMPLETE;
534 
535 	list_id = cdb->list_identifier;
536 	mtx_lock(&lun->lun_lock);
537 	list = tpc_find_list(lun, list_id,
538 	    ctl_get_resindex(&ctsio->io_hdr.nexus));
539 	if (list == NULL) {
540 		mtx_unlock(&lun->lun_lock);
541 		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
542 		    /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
543 		    /*bit*/ 0);
544 		ctl_done((union ctl_io *)ctsio);
545 		return (retval);
546 	}
547 	list_copy = *list;
548 	if (list->completed) {
549 		TAILQ_REMOVE(&lun->tpc_lists, list, links);
550 		free(list, M_CTL);
551 	}
552 	mtx_unlock(&lun->lun_lock);
553 
554 	total_len = sizeof(*data);
555 	alloc_len = scsi_4btoul(cdb->length);
556 
557 	ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
558 
559 	ctsio->kern_sg_entries = 0;
560 
561 	if (total_len < alloc_len) {
562 		ctsio->residual = alloc_len - total_len;
563 		ctsio->kern_data_len = total_len;
564 		ctsio->kern_total_len = total_len;
565 	} else {
566 		ctsio->residual = 0;
567 		ctsio->kern_data_len = alloc_len;
568 		ctsio->kern_total_len = alloc_len;
569 	}
570 	ctsio->kern_data_resid = 0;
571 	ctsio->kern_rel_offset = 0;
572 
573 	data = (struct scsi_receive_copy_status_lid1_data *)ctsio->kern_data_ptr;
574 	scsi_ulto4b(sizeof(*data) - 4, data->available_data);
575 	if (list_copy.completed) {
576 		if (list_copy.error || list_copy.abort)
577 			data->copy_command_status = RCS_CCS_ERROR;
578 		else
579 			data->copy_command_status = RCS_CCS_COMPLETED;
580 	} else
581 		data->copy_command_status = RCS_CCS_INPROG;
582 	scsi_ulto2b(list_copy.curseg, data->segments_processed);
583 	if (list_copy.curbytes <= UINT32_MAX) {
584 		data->transfer_count_units = RCS_TC_BYTES;
585 		scsi_ulto4b(list_copy.curbytes, data->transfer_count);
586 	} else {
587 		data->transfer_count_units = RCS_TC_MBYTES;
588 		scsi_ulto4b(list_copy.curbytes >> 20, data->transfer_count);
589 	}
590 
591 	ctl_set_success(ctsio);
592 	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
593 	ctsio->be_move_done = ctl_config_move_done;
594 	ctl_datamove((union ctl_io *)ctsio);
595 	return (retval);
596 }
597 
598 int
599 ctl_receive_copy_failure_details(struct ctl_scsiio *ctsio)
600 {
601 	struct ctl_lun *lun;
602 	struct scsi_receive_copy_failure_details *cdb;
603 	struct scsi_receive_copy_failure_details_data *data;
604 	struct tpc_list *list;
605 	struct tpc_list list_copy;
606 	int retval;
607 	int alloc_len, total_len;
608 	uint32_t list_id;
609 
610 	CTL_DEBUG_PRINT(("ctl_receive_copy_failure_details\n"));
611 
612 	cdb = (struct scsi_receive_copy_failure_details *)ctsio->cdb;
613 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
614 
615 	retval = CTL_RETVAL_COMPLETE;
616 
617 	list_id = cdb->list_identifier;
618 	mtx_lock(&lun->lun_lock);
619 	list = tpc_find_list(lun, list_id,
620 	    ctl_get_resindex(&ctsio->io_hdr.nexus));
621 	if (list == NULL || !list->completed) {
622 		mtx_unlock(&lun->lun_lock);
623 		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
624 		    /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
625 		    /*bit*/ 0);
626 		ctl_done((union ctl_io *)ctsio);
627 		return (retval);
628 	}
629 	list_copy = *list;
630 	TAILQ_REMOVE(&lun->tpc_lists, list, links);
631 	free(list, M_CTL);
632 	mtx_unlock(&lun->lun_lock);
633 
634 	total_len = sizeof(*data) + list_copy.sense_len;
635 	alloc_len = scsi_4btoul(cdb->length);
636 
637 	ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
638 
639 	ctsio->kern_sg_entries = 0;
640 
641 	if (total_len < alloc_len) {
642 		ctsio->residual = alloc_len - total_len;
643 		ctsio->kern_data_len = total_len;
644 		ctsio->kern_total_len = total_len;
645 	} else {
646 		ctsio->residual = 0;
647 		ctsio->kern_data_len = alloc_len;
648 		ctsio->kern_total_len = alloc_len;
649 	}
650 	ctsio->kern_data_resid = 0;
651 	ctsio->kern_rel_offset = 0;
652 
653 	data = (struct scsi_receive_copy_failure_details_data *)ctsio->kern_data_ptr;
654 	if (list_copy.completed && (list_copy.error || list_copy.abort)) {
655 		scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len,
656 		    data->available_data);
657 		data->copy_command_status = RCS_CCS_ERROR;
658 	} else
659 		scsi_ulto4b(0, data->available_data);
660 	scsi_ulto2b(list_copy.sense_len, data->sense_data_length);
661 	memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len);
662 
663 	ctl_set_success(ctsio);
664 	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
665 	ctsio->be_move_done = ctl_config_move_done;
666 	ctl_datamove((union ctl_io *)ctsio);
667 	return (retval);
668 }
669 
670 int
671 ctl_receive_copy_status_lid4(struct ctl_scsiio *ctsio)
672 {
673 	struct ctl_lun *lun;
674 	struct scsi_receive_copy_status_lid4 *cdb;
675 	struct scsi_receive_copy_status_lid4_data *data;
676 	struct tpc_list *list;
677 	struct tpc_list list_copy;
678 	int retval;
679 	int alloc_len, total_len;
680 	uint32_t list_id;
681 
682 	CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid4\n"));
683 
684 	cdb = (struct scsi_receive_copy_status_lid4 *)ctsio->cdb;
685 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
686 
687 	retval = CTL_RETVAL_COMPLETE;
688 
689 	list_id = scsi_4btoul(cdb->list_identifier);
690 	mtx_lock(&lun->lun_lock);
691 	list = tpc_find_list(lun, list_id,
692 	    ctl_get_resindex(&ctsio->io_hdr.nexus));
693 	if (list == NULL) {
694 		mtx_unlock(&lun->lun_lock);
695 		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
696 		    /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
697 		    /*bit*/ 0);
698 		ctl_done((union ctl_io *)ctsio);
699 		return (retval);
700 	}
701 	list_copy = *list;
702 	if (list->completed) {
703 		TAILQ_REMOVE(&lun->tpc_lists, list, links);
704 		free(list, M_CTL);
705 	}
706 	mtx_unlock(&lun->lun_lock);
707 
708 	total_len = sizeof(*data) + list_copy.sense_len;
709 	alloc_len = scsi_4btoul(cdb->length);
710 
711 	ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
712 
713 	ctsio->kern_sg_entries = 0;
714 
715 	if (total_len < alloc_len) {
716 		ctsio->residual = alloc_len - total_len;
717 		ctsio->kern_data_len = total_len;
718 		ctsio->kern_total_len = total_len;
719 	} else {
720 		ctsio->residual = 0;
721 		ctsio->kern_data_len = alloc_len;
722 		ctsio->kern_total_len = alloc_len;
723 	}
724 	ctsio->kern_data_resid = 0;
725 	ctsio->kern_rel_offset = 0;
726 
727 	data = (struct scsi_receive_copy_status_lid4_data *)ctsio->kern_data_ptr;
728 	scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len,
729 	    data->available_data);
730 	data->response_to_service_action = list_copy.service_action;
731 	if (list_copy.completed) {
732 		if (list_copy.error)
733 			data->copy_command_status = RCS_CCS_ERROR;
734 		else if (list_copy.abort)
735 			data->copy_command_status = RCS_CCS_ABORTED;
736 		else
737 			data->copy_command_status = RCS_CCS_COMPLETED;
738 	} else
739 		data->copy_command_status = RCS_CCS_INPROG_FG;
740 	scsi_ulto2b(list_copy.curops, data->operation_counter);
741 	scsi_ulto4b(UINT32_MAX, data->estimated_status_update_delay);
742 	data->transfer_count_units = RCS_TC_BYTES;
743 	scsi_u64to8b(list_copy.curbytes, data->transfer_count);
744 	scsi_ulto2b(list_copy.curseg, data->segments_processed);
745 	data->length_of_the_sense_data_field = list_copy.sense_len;
746 	data->sense_data_length = list_copy.sense_len;
747 	memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len);
748 
749 	ctl_set_success(ctsio);
750 	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
751 	ctsio->be_move_done = ctl_config_move_done;
752 	ctl_datamove((union ctl_io *)ctsio);
753 	return (retval);
754 }
755 
756 int
757 ctl_copy_operation_abort(struct ctl_scsiio *ctsio)
758 {
759 	struct ctl_lun *lun;
760 	struct scsi_copy_operation_abort *cdb;
761 	struct tpc_list *list;
762 	int retval;
763 	uint32_t list_id;
764 
765 	CTL_DEBUG_PRINT(("ctl_copy_operation_abort\n"));
766 
767 	cdb = (struct scsi_copy_operation_abort *)ctsio->cdb;
768 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
769 
770 	retval = CTL_RETVAL_COMPLETE;
771 
772 	list_id = scsi_4btoul(cdb->list_identifier);
773 	mtx_lock(&lun->lun_lock);
774 	list = tpc_find_list(lun, list_id,
775 	    ctl_get_resindex(&ctsio->io_hdr.nexus));
776 	if (list == NULL) {
777 		mtx_unlock(&lun->lun_lock);
778 		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
779 		    /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
780 		    /*bit*/ 0);
781 		ctl_done((union ctl_io *)ctsio);
782 		return (retval);
783 	}
784 	list->abort = 1;
785 	mtx_unlock(&lun->lun_lock);
786 
787 	ctl_set_success(ctsio);
788 	ctl_done((union ctl_io *)ctsio);
789 	return (retval);
790 }
791 
792 static uint64_t
793 tpc_resolve(struct tpc_list *list, uint16_t idx, uint32_t *ss,
794     uint32_t *pb, uint32_t *pbo)
795 {
796 
797 	if (idx == 0xffff) {
798 		if (ss && list->lun->be_lun)
799 			*ss = list->lun->be_lun->blocksize;
800 		if (pb && list->lun->be_lun)
801 			*pb = list->lun->be_lun->blocksize <<
802 			    list->lun->be_lun->pblockexp;
803 		if (pbo && list->lun->be_lun)
804 			*pbo = list->lun->be_lun->blocksize *
805 			    list->lun->be_lun->pblockoff;
806 		return (list->lun->lun);
807 	}
808 	if (idx >= list->ncscd)
809 		return (UINT64_MAX);
810 	return (tpcl_resolve(list->lun->ctl_softc,
811 	    list->init_port, &list->cscd[idx], ss, pb, pbo));
812 }
813 
814 static int
815 tpc_process_b2b(struct tpc_list *list)
816 {
817 	struct scsi_ec_segment_b2b *seg;
818 	struct scsi_ec_cscd_dtsp *sdstp, *ddstp;
819 	struct tpc_io *tior, *tiow;
820 	struct runl run, *prun;
821 	uint64_t sl, dl;
822 	off_t srclba, dstlba, numbytes, donebytes, roundbytes;
823 	int numlba;
824 	uint32_t srcblock, dstblock, pb, pbo, adj;
825 
826 	if (list->stage == 1) {
827 		while ((tior = TAILQ_FIRST(&list->allio)) != NULL) {
828 			TAILQ_REMOVE(&list->allio, tior, links);
829 			ctl_free_io(tior->io);
830 			free(tior, M_CTL);
831 		}
832 		free(list->buf, M_CTL);
833 		if (list->abort) {
834 			ctl_set_task_aborted(list->ctsio);
835 			return (CTL_RETVAL_ERROR);
836 		} else if (list->error) {
837 			ctl_set_sense(list->ctsio, /*current_error*/ 1,
838 			    /*sense_key*/ SSD_KEY_COPY_ABORTED,
839 			    /*asc*/ 0x0d, /*ascq*/ 0x01, SSD_ELEM_NONE);
840 			return (CTL_RETVAL_ERROR);
841 		}
842 		list->cursectors += list->segsectors;
843 		list->curbytes += list->segbytes;
844 		return (CTL_RETVAL_COMPLETE);
845 	}
846 
847 	TAILQ_INIT(&list->allio);
848 	seg = (struct scsi_ec_segment_b2b *)list->seg[list->curseg];
849 	sl = tpc_resolve(list, scsi_2btoul(seg->src_cscd), &srcblock, NULL, NULL);
850 	dl = tpc_resolve(list, scsi_2btoul(seg->dst_cscd), &dstblock, &pb, &pbo);
851 	if (sl >= CTL_MAX_LUNS || dl >= CTL_MAX_LUNS) {
852 		ctl_set_sense(list->ctsio, /*current_error*/ 1,
853 		    /*sense_key*/ SSD_KEY_COPY_ABORTED,
854 		    /*asc*/ 0x08, /*ascq*/ 0x04, SSD_ELEM_NONE);
855 		return (CTL_RETVAL_ERROR);
856 	}
857 	if (pbo > 0)
858 		pbo = pb - pbo;
859 	sdstp = &list->cscd[scsi_2btoul(seg->src_cscd)].dtsp;
860 	if (scsi_3btoul(sdstp->block_length) != 0)
861 		srcblock = scsi_3btoul(sdstp->block_length);
862 	ddstp = &list->cscd[scsi_2btoul(seg->dst_cscd)].dtsp;
863 	if (scsi_3btoul(ddstp->block_length) != 0)
864 		dstblock = scsi_3btoul(ddstp->block_length);
865 	numlba = scsi_2btoul(seg->number_of_blocks);
866 	if (seg->flags & EC_SEG_DC)
867 		numbytes = (off_t)numlba * dstblock;
868 	else
869 		numbytes = (off_t)numlba * srcblock;
870 	srclba = scsi_8btou64(seg->src_lba);
871 	dstlba = scsi_8btou64(seg->dst_lba);
872 
873 //	printf("Copy %ju bytes from %ju @ %ju to %ju @ %ju\n",
874 //	    (uintmax_t)numbytes, sl, scsi_8btou64(seg->src_lba),
875 //	    dl, scsi_8btou64(seg->dst_lba));
876 
877 	if (numbytes == 0)
878 		return (CTL_RETVAL_COMPLETE);
879 
880 	if (numbytes % srcblock != 0 || numbytes % dstblock != 0) {
881 		ctl_set_sense(list->ctsio, /*current_error*/ 1,
882 		    /*sense_key*/ SSD_KEY_COPY_ABORTED,
883 		    /*asc*/ 0x26, /*ascq*/ 0x0A, SSD_ELEM_NONE);
884 		return (CTL_RETVAL_ERROR);
885 	}
886 
887 	list->buf = malloc(numbytes, M_CTL, M_WAITOK);
888 	list->segbytes = numbytes;
889 	list->segsectors = numbytes / dstblock;
890 	donebytes = 0;
891 	TAILQ_INIT(&run);
892 	prun = &run;
893 	list->tbdio = 1;
894 	while (donebytes < numbytes) {
895 		roundbytes = numbytes - donebytes;
896 		if (roundbytes > TPC_MAX_IO_SIZE) {
897 			roundbytes = TPC_MAX_IO_SIZE;
898 			roundbytes -= roundbytes % dstblock;
899 			if (pb > dstblock) {
900 				adj = (dstlba * dstblock + roundbytes - pbo) % pb;
901 				if (roundbytes > adj)
902 					roundbytes -= adj;
903 			}
904 		}
905 
906 		tior = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO);
907 		TAILQ_INIT(&tior->run);
908 		tior->list = list;
909 		TAILQ_INSERT_TAIL(&list->allio, tior, links);
910 		tior->io = tpcl_alloc_io();
911 		ctl_scsi_read_write(tior->io,
912 				    /*data_ptr*/ &list->buf[donebytes],
913 				    /*data_len*/ roundbytes,
914 				    /*read_op*/ 1,
915 				    /*byte2*/ 0,
916 				    /*minimum_cdb_size*/ 0,
917 				    /*lba*/ srclba,
918 				    /*num_blocks*/ roundbytes / srcblock,
919 				    /*tag_type*/ CTL_TAG_SIMPLE,
920 				    /*control*/ 0);
921 		tior->io->io_hdr.retries = 3;
922 		tior->lun = sl;
923 		tior->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tior;
924 
925 		tiow = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO);
926 		TAILQ_INIT(&tiow->run);
927 		tiow->list = list;
928 		TAILQ_INSERT_TAIL(&list->allio, tiow, links);
929 		tiow->io = tpcl_alloc_io();
930 		ctl_scsi_read_write(tiow->io,
931 				    /*data_ptr*/ &list->buf[donebytes],
932 				    /*data_len*/ roundbytes,
933 				    /*read_op*/ 0,
934 				    /*byte2*/ 0,
935 				    /*minimum_cdb_size*/ 0,
936 				    /*lba*/ dstlba,
937 				    /*num_blocks*/ roundbytes / dstblock,
938 				    /*tag_type*/ CTL_TAG_SIMPLE,
939 				    /*control*/ 0);
940 		tiow->io->io_hdr.retries = 3;
941 		tiow->lun = dl;
942 		tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow;
943 
944 		TAILQ_INSERT_TAIL(&tior->run, tiow, rlinks);
945 		TAILQ_INSERT_TAIL(prun, tior, rlinks);
946 		prun = &tior->run;
947 		donebytes += roundbytes;
948 		srclba += roundbytes / srcblock;
949 		dstlba += roundbytes / dstblock;
950 	}
951 
952 	while ((tior = TAILQ_FIRST(&run)) != NULL) {
953 		TAILQ_REMOVE(&run, tior, rlinks);
954 		if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE)
955 			panic("tpcl_queue() error");
956 	}
957 
958 	list->stage++;
959 	return (CTL_RETVAL_QUEUED);
960 }
961 
962 static int
963 tpc_process_verify(struct tpc_list *list)
964 {
965 	struct scsi_ec_segment_verify *seg;
966 	struct tpc_io *tio;
967 	uint64_t sl;
968 
969 	if (list->stage == 1) {
970 		while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
971 			TAILQ_REMOVE(&list->allio, tio, links);
972 			ctl_free_io(tio->io);
973 			free(tio, M_CTL);
974 		}
975 		if (list->abort) {
976 			ctl_set_task_aborted(list->ctsio);
977 			return (CTL_RETVAL_ERROR);
978 		} else if (list->error) {
979 			ctl_set_sense(list->ctsio, /*current_error*/ 1,
980 			    /*sense_key*/ SSD_KEY_COPY_ABORTED,
981 			    /*asc*/ 0x0d, /*ascq*/ 0x01, SSD_ELEM_NONE);
982 			return (CTL_RETVAL_ERROR);
983 		} else
984 			return (CTL_RETVAL_COMPLETE);
985 	}
986 
987 	TAILQ_INIT(&list->allio);
988 	seg = (struct scsi_ec_segment_verify *)list->seg[list->curseg];
989 	sl = tpc_resolve(list, scsi_2btoul(seg->src_cscd), NULL, NULL, NULL);
990 	if (sl >= CTL_MAX_LUNS) {
991 		ctl_set_sense(list->ctsio, /*current_error*/ 1,
992 		    /*sense_key*/ SSD_KEY_COPY_ABORTED,
993 		    /*asc*/ 0x08, /*ascq*/ 0x04, SSD_ELEM_NONE);
994 		return (CTL_RETVAL_ERROR);
995 	}
996 
997 //	printf("Verify %ju\n", sl);
998 
999 	if ((seg->tur & 0x01) == 0)
1000 		return (CTL_RETVAL_COMPLETE);
1001 
1002 	list->tbdio = 1;
1003 	tio = malloc(sizeof(*tio), M_CTL, M_WAITOK | M_ZERO);
1004 	TAILQ_INIT(&tio->run);
1005 	tio->list = list;
1006 	TAILQ_INSERT_TAIL(&list->allio, tio, links);
1007 	tio->io = tpcl_alloc_io();
1008 	ctl_scsi_tur(tio->io, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0);
1009 	tio->io->io_hdr.retries = 3;
1010 	tio->lun = sl;
1011 	tio->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tio;
1012 	list->stage++;
1013 	if (tpcl_queue(tio->io, tio->lun) != CTL_RETVAL_COMPLETE)
1014 		panic("tpcl_queue() error");
1015 	return (CTL_RETVAL_QUEUED);
1016 }
1017 
1018 static int
1019 tpc_process_register_key(struct tpc_list *list)
1020 {
1021 	struct scsi_ec_segment_register_key *seg;
1022 	struct tpc_io *tio;
1023 	uint64_t dl;
1024 	int datalen;
1025 
1026 	if (list->stage == 1) {
1027 		while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
1028 			TAILQ_REMOVE(&list->allio, tio, links);
1029 			ctl_free_io(tio->io);
1030 			free(tio, M_CTL);
1031 		}
1032 		free(list->buf, M_CTL);
1033 		if (list->abort) {
1034 			ctl_set_task_aborted(list->ctsio);
1035 			return (CTL_RETVAL_ERROR);
1036 		} else if (list->error) {
1037 			ctl_set_sense(list->ctsio, /*current_error*/ 1,
1038 			    /*sense_key*/ SSD_KEY_COPY_ABORTED,
1039 			    /*asc*/ 0x0d, /*ascq*/ 0x01, SSD_ELEM_NONE);
1040 			return (CTL_RETVAL_ERROR);
1041 		} else
1042 			return (CTL_RETVAL_COMPLETE);
1043 	}
1044 
1045 	TAILQ_INIT(&list->allio);
1046 	seg = (struct scsi_ec_segment_register_key *)list->seg[list->curseg];
1047 	dl = tpc_resolve(list, scsi_2btoul(seg->dst_cscd), NULL, NULL, NULL);
1048 	if (dl >= CTL_MAX_LUNS) {
1049 		ctl_set_sense(list->ctsio, /*current_error*/ 1,
1050 		    /*sense_key*/ SSD_KEY_COPY_ABORTED,
1051 		    /*asc*/ 0x08, /*ascq*/ 0x04, SSD_ELEM_NONE);
1052 		return (CTL_RETVAL_ERROR);
1053 	}
1054 
1055 //	printf("Register Key %ju\n", dl);
1056 
1057 	list->tbdio = 1;
1058 	tio = malloc(sizeof(*tio), M_CTL, M_WAITOK | M_ZERO);
1059 	TAILQ_INIT(&tio->run);
1060 	tio->list = list;
1061 	TAILQ_INSERT_TAIL(&list->allio, tio, links);
1062 	tio->io = tpcl_alloc_io();
1063 	datalen = sizeof(struct scsi_per_res_out_parms);
1064 	list->buf = malloc(datalen, M_CTL, M_WAITOK);
1065 	ctl_scsi_persistent_res_out(tio->io,
1066 	    list->buf, datalen, SPRO_REGISTER, -1,
1067 	    scsi_8btou64(seg->res_key), scsi_8btou64(seg->sa_res_key),
1068 	    /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0);
1069 	tio->io->io_hdr.retries = 3;
1070 	tio->lun = dl;
1071 	tio->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tio;
1072 	list->stage++;
1073 	if (tpcl_queue(tio->io, tio->lun) != CTL_RETVAL_COMPLETE)
1074 		panic("tpcl_queue() error");
1075 	return (CTL_RETVAL_QUEUED);
1076 }
1077 
1078 static off_t
1079 tpc_ranges_length(struct scsi_range_desc *range, int nrange)
1080 {
1081 	off_t length = 0;
1082 	int r;
1083 
1084 	for (r = 0; r < nrange; r++)
1085 		length += scsi_4btoul(range[r].length);
1086 	return (length);
1087 }
1088 
1089 static int
1090 tpc_skip_ranges(struct scsi_range_desc *range, int nrange, off_t skip,
1091     int *srange, off_t *soffset)
1092 {
1093 	off_t off;
1094 	int r;
1095 
1096 	r = 0;
1097 	off = 0;
1098 	while (r < nrange) {
1099 		if (skip - off < scsi_4btoul(range[r].length)) {
1100 			*srange = r;
1101 			*soffset = skip - off;
1102 			return (0);
1103 		}
1104 		off += scsi_4btoul(range[r].length);
1105 		r++;
1106 	}
1107 	return (-1);
1108 }
1109 
1110 static int
1111 tpc_process_wut(struct tpc_list *list)
1112 {
1113 	struct tpc_io *tio, *tior, *tiow;
1114 	struct runl run, *prun;
1115 	int drange, srange;
1116 	off_t doffset, soffset;
1117 	off_t srclba, dstlba, numbytes, donebytes, roundbytes;
1118 	uint32_t srcblock, dstblock, pb, pbo, adj;
1119 
1120 	if (list->stage > 0) {
1121 		/* Cleanup after previous rounds. */
1122 		while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
1123 			TAILQ_REMOVE(&list->allio, tio, links);
1124 			ctl_free_io(tio->io);
1125 			free(tio, M_CTL);
1126 		}
1127 		free(list->buf, M_CTL);
1128 		if (list->abort) {
1129 			ctl_set_task_aborted(list->ctsio);
1130 			return (CTL_RETVAL_ERROR);
1131 		} else if (list->error) {
1132 			ctl_set_sense(list->ctsio, /*current_error*/ 1,
1133 			    /*sense_key*/ SSD_KEY_COPY_ABORTED,
1134 			    /*asc*/ 0x0d, /*ascq*/ 0x01, SSD_ELEM_NONE);
1135 			return (CTL_RETVAL_ERROR);
1136 		}
1137 		list->cursectors += list->segsectors;
1138 		list->curbytes += list->segbytes;
1139 	}
1140 
1141 	/* Check where we are on destination ranges list. */
1142 	if (tpc_skip_ranges(list->range, list->nrange, list->cursectors,
1143 	    &drange, &doffset) != 0)
1144 		return (CTL_RETVAL_COMPLETE);
1145 	dstblock = list->lun->be_lun->blocksize;
1146 	pb = dstblock << list->lun->be_lun->pblockexp;
1147 	if (list->lun->be_lun->pblockoff > 0)
1148 		pbo = pb - dstblock * list->lun->be_lun->pblockoff;
1149 	else
1150 		pbo = 0;
1151 
1152 	/* Check where we are on source ranges list. */
1153 	srcblock = list->token->blocksize;
1154 	if (tpc_skip_ranges(list->token->range, list->token->nrange,
1155 	    list->offset_into_rod + list->cursectors * dstblock / srcblock,
1156 	    &srange, &soffset) != 0) {
1157 		ctl_set_sense(list->ctsio, /*current_error*/ 1,
1158 		    /*sense_key*/ SSD_KEY_COPY_ABORTED,
1159 		    /*asc*/ 0x0d, /*ascq*/ 0x04, SSD_ELEM_NONE);
1160 		return (CTL_RETVAL_ERROR);
1161 	}
1162 
1163 	srclba = scsi_8btou64(list->token->range[srange].lba) + soffset;
1164 	dstlba = scsi_8btou64(list->range[drange].lba) + doffset;
1165 	numbytes = srcblock *
1166 	    (scsi_4btoul(list->token->range[srange].length) - soffset);
1167 	numbytes = omin(numbytes, dstblock *
1168 	    (scsi_4btoul(list->range[drange].length) - doffset));
1169 	if (numbytes > TPC_MAX_IOCHUNK_SIZE) {
1170 		numbytes = TPC_MAX_IOCHUNK_SIZE;
1171 		numbytes -= numbytes % dstblock;
1172 		if (pb > dstblock) {
1173 			adj = (dstlba * dstblock + numbytes - pbo) % pb;
1174 			if (numbytes > adj)
1175 				numbytes -= adj;
1176 		}
1177 	}
1178 
1179 	if (numbytes % srcblock != 0 || numbytes % dstblock != 0) {
1180 		ctl_set_sense(list->ctsio, /*current_error*/ 1,
1181 		    /*sense_key*/ SSD_KEY_COPY_ABORTED,
1182 		    /*asc*/ 0x26, /*ascq*/ 0x0A, SSD_ELEM_NONE);
1183 		return (CTL_RETVAL_ERROR);
1184 	}
1185 
1186 	list->buf = malloc(numbytes, M_CTL, M_WAITOK |
1187 	    (list->token == NULL ? M_ZERO : 0));
1188 	list->segbytes = numbytes;
1189 	list->segsectors = numbytes / dstblock;
1190 //printf("Copy chunk of %ju sectors from %ju to %ju\n", list->segsectors,
1191 //    srclba, dstlba);
1192 	donebytes = 0;
1193 	TAILQ_INIT(&run);
1194 	prun = &run;
1195 	list->tbdio = 1;
1196 	TAILQ_INIT(&list->allio);
1197 	while (donebytes < numbytes) {
1198 		roundbytes = numbytes - donebytes;
1199 		if (roundbytes > TPC_MAX_IO_SIZE) {
1200 			roundbytes = TPC_MAX_IO_SIZE;
1201 			roundbytes -= roundbytes % dstblock;
1202 			if (pb > dstblock) {
1203 				adj = (dstlba * dstblock + roundbytes - pbo) % pb;
1204 				if (roundbytes > adj)
1205 					roundbytes -= adj;
1206 			}
1207 		}
1208 
1209 		tior = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO);
1210 		TAILQ_INIT(&tior->run);
1211 		tior->list = list;
1212 		TAILQ_INSERT_TAIL(&list->allio, tior, links);
1213 		tior->io = tpcl_alloc_io();
1214 		ctl_scsi_read_write(tior->io,
1215 				    /*data_ptr*/ &list->buf[donebytes],
1216 				    /*data_len*/ roundbytes,
1217 				    /*read_op*/ 1,
1218 				    /*byte2*/ 0,
1219 				    /*minimum_cdb_size*/ 0,
1220 				    /*lba*/ srclba,
1221 				    /*num_blocks*/ roundbytes / srcblock,
1222 				    /*tag_type*/ CTL_TAG_SIMPLE,
1223 				    /*control*/ 0);
1224 		tior->io->io_hdr.retries = 3;
1225 		tior->lun = list->token->lun;
1226 		tior->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tior;
1227 
1228 		tiow = malloc(sizeof(*tiow), M_CTL, M_WAITOK | M_ZERO);
1229 		TAILQ_INIT(&tiow->run);
1230 		tiow->list = list;
1231 		TAILQ_INSERT_TAIL(&list->allio, tiow, links);
1232 		tiow->io = tpcl_alloc_io();
1233 		ctl_scsi_read_write(tiow->io,
1234 				    /*data_ptr*/ &list->buf[donebytes],
1235 				    /*data_len*/ roundbytes,
1236 				    /*read_op*/ 0,
1237 				    /*byte2*/ 0,
1238 				    /*minimum_cdb_size*/ 0,
1239 				    /*lba*/ dstlba,
1240 				    /*num_blocks*/ roundbytes / dstblock,
1241 				    /*tag_type*/ CTL_TAG_SIMPLE,
1242 				    /*control*/ 0);
1243 		tiow->io->io_hdr.retries = 3;
1244 		tiow->lun = list->lun->lun;
1245 		tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow;
1246 
1247 		TAILQ_INSERT_TAIL(&tior->run, tiow, rlinks);
1248 		TAILQ_INSERT_TAIL(prun, tior, rlinks);
1249 		prun = &tior->run;
1250 		donebytes += roundbytes;
1251 		srclba += roundbytes / srcblock;
1252 		dstlba += roundbytes / dstblock;
1253 	}
1254 
1255 	while ((tior = TAILQ_FIRST(&run)) != NULL) {
1256 		TAILQ_REMOVE(&run, tior, rlinks);
1257 		if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE)
1258 			panic("tpcl_queue() error");
1259 	}
1260 
1261 	list->stage++;
1262 	return (CTL_RETVAL_QUEUED);
1263 }
1264 
1265 static int
1266 tpc_process_zero_wut(struct tpc_list *list)
1267 {
1268 	struct tpc_io *tio, *tiow;
1269 	struct runl run, *prun;
1270 	int r;
1271 	uint32_t dstblock, len;
1272 
1273 	if (list->stage > 0) {
1274 complete:
1275 		/* Cleanup after previous rounds. */
1276 		while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
1277 			TAILQ_REMOVE(&list->allio, tio, links);
1278 			ctl_free_io(tio->io);
1279 			free(tio, M_CTL);
1280 		}
1281 		free(list->buf, M_CTL);
1282 		if (list->abort) {
1283 			ctl_set_task_aborted(list->ctsio);
1284 			return (CTL_RETVAL_ERROR);
1285 		} else if (list->error) {
1286 			ctl_set_sense(list->ctsio, /*current_error*/ 1,
1287 			    /*sense_key*/ SSD_KEY_COPY_ABORTED,
1288 			    /*asc*/ 0x0d, /*ascq*/ 0x01, SSD_ELEM_NONE);
1289 			return (CTL_RETVAL_ERROR);
1290 		}
1291 		list->cursectors += list->segsectors;
1292 		list->curbytes += list->segbytes;
1293 		return (CTL_RETVAL_COMPLETE);
1294 	}
1295 
1296 	dstblock = list->lun->be_lun->blocksize;
1297 	list->buf = malloc(dstblock, M_CTL, M_WAITOK | M_ZERO);
1298 	TAILQ_INIT(&run);
1299 	prun = &run;
1300 	list->tbdio = 1;
1301 	TAILQ_INIT(&list->allio);
1302 	list->segsectors = 0;
1303 	for (r = 0; r < list->nrange; r++) {
1304 		len = scsi_4btoul(list->range[r].length);
1305 		if (len == 0)
1306 			continue;
1307 
1308 		tiow = malloc(sizeof(*tiow), M_CTL, M_WAITOK | M_ZERO);
1309 		TAILQ_INIT(&tiow->run);
1310 		tiow->list = list;
1311 		TAILQ_INSERT_TAIL(&list->allio, tiow, links);
1312 		tiow->io = tpcl_alloc_io();
1313 		ctl_scsi_write_same(tiow->io,
1314 				    /*data_ptr*/ list->buf,
1315 				    /*data_len*/ dstblock,
1316 				    /*byte2*/ 0,
1317 				    /*lba*/ scsi_8btou64(list->range[r].lba),
1318 				    /*num_blocks*/ len,
1319 				    /*tag_type*/ CTL_TAG_SIMPLE,
1320 				    /*control*/ 0);
1321 		tiow->io->io_hdr.retries = 3;
1322 		tiow->lun = list->lun->lun;
1323 		tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow;
1324 
1325 		TAILQ_INSERT_TAIL(prun, tiow, rlinks);
1326 		prun = &tiow->run;
1327 		list->segsectors += len;
1328 	}
1329 	list->segbytes = list->segsectors * dstblock;
1330 
1331 	if (TAILQ_EMPTY(&run))
1332 		goto complete;
1333 
1334 	while ((tiow = TAILQ_FIRST(&run)) != NULL) {
1335 		TAILQ_REMOVE(&run, tiow, rlinks);
1336 		if (tpcl_queue(tiow->io, tiow->lun) != CTL_RETVAL_COMPLETE)
1337 			panic("tpcl_queue() error");
1338 	}
1339 
1340 	list->stage++;
1341 	return (CTL_RETVAL_QUEUED);
1342 }
1343 
1344 static void
1345 tpc_process(struct tpc_list *list)
1346 {
1347 	struct ctl_lun *lun = list->lun;
1348 	struct ctl_softc *softc = lun->ctl_softc;
1349 	struct scsi_ec_segment *seg;
1350 	struct ctl_scsiio *ctsio = list->ctsio;
1351 	int retval = CTL_RETVAL_COMPLETE;
1352 
1353 	if (list->service_action == EC_WUT) {
1354 		if (list->token != NULL)
1355 			retval = tpc_process_wut(list);
1356 		else
1357 			retval = tpc_process_zero_wut(list);
1358 		if (retval == CTL_RETVAL_QUEUED)
1359 			return;
1360 		if (retval == CTL_RETVAL_ERROR) {
1361 			list->error = 1;
1362 			goto done;
1363 		}
1364 	} else {
1365 //printf("ZZZ %d cscd, %d segs\n", list->ncscd, list->nseg);
1366 		while (list->curseg < list->nseg) {
1367 			seg = list->seg[list->curseg];
1368 			switch (seg->type_code) {
1369 			case EC_SEG_B2B:
1370 				retval = tpc_process_b2b(list);
1371 				break;
1372 			case EC_SEG_VERIFY:
1373 				retval = tpc_process_verify(list);
1374 				break;
1375 			case EC_SEG_REGISTER_KEY:
1376 				retval = tpc_process_register_key(list);
1377 				break;
1378 			default:
1379 				ctl_set_sense(ctsio, /*current_error*/ 1,
1380 				    /*sense_key*/ SSD_KEY_COPY_ABORTED,
1381 				    /*asc*/ 0x26, /*ascq*/ 0x09, SSD_ELEM_NONE);
1382 				goto done;
1383 			}
1384 			if (retval == CTL_RETVAL_QUEUED)
1385 				return;
1386 			if (retval == CTL_RETVAL_ERROR) {
1387 				list->error = 1;
1388 				goto done;
1389 			}
1390 			list->curseg++;
1391 			list->stage = 0;
1392 		}
1393 	}
1394 
1395 	ctl_set_success(ctsio);
1396 
1397 done:
1398 //printf("ZZZ done\n");
1399 	free(list->params, M_CTL);
1400 	list->params = NULL;
1401 	if (list->token) {
1402 		mtx_lock(&softc->tpc_lock);
1403 		if (--list->token->active == 0)
1404 			list->token->last_active = time_uptime;
1405 		mtx_unlock(&softc->tpc_lock);
1406 		list->token = NULL;
1407 	}
1408 	mtx_lock(&lun->lun_lock);
1409 	if ((list->flags & EC_LIST_ID_USAGE_MASK) == EC_LIST_ID_USAGE_NONE) {
1410 		TAILQ_REMOVE(&lun->tpc_lists, list, links);
1411 		free(list, M_CTL);
1412 	} else {
1413 		list->completed = 1;
1414 		list->last_active = time_uptime;
1415 		list->sense_data = ctsio->sense_data;
1416 		list->sense_len = ctsio->sense_len;
1417 		list->scsi_status = ctsio->scsi_status;
1418 	}
1419 	mtx_unlock(&lun->lun_lock);
1420 
1421 	ctl_done((union ctl_io *)ctsio);
1422 }
1423 
1424 /*
1425  * For any sort of check condition, busy, etc., we just retry.  We do not
1426  * decrement the retry count for unit attention type errors.  These are
1427  * normal, and we want to save the retry count for "real" errors.  Otherwise,
1428  * we could end up with situations where a command will succeed in some
1429  * situations and fail in others, depending on whether a unit attention is
1430  * pending.  Also, some of our error recovery actions, most notably the
1431  * LUN reset action, will cause a unit attention.
1432  *
1433  * We can add more detail here later if necessary.
1434  */
1435 static tpc_error_action
1436 tpc_checkcond_parse(union ctl_io *io)
1437 {
1438 	tpc_error_action error_action;
1439 	int error_code, sense_key, asc, ascq;
1440 
1441 	/*
1442 	 * Default to retrying the command.
1443 	 */
1444 	error_action = TPC_ERR_RETRY;
1445 
1446 	scsi_extract_sense_len(&io->scsiio.sense_data,
1447 			       io->scsiio.sense_len,
1448 			       &error_code,
1449 			       &sense_key,
1450 			       &asc,
1451 			       &ascq,
1452 			       /*show_errors*/ 1);
1453 
1454 	switch (error_code) {
1455 	case SSD_DEFERRED_ERROR:
1456 	case SSD_DESC_DEFERRED_ERROR:
1457 		error_action |= TPC_ERR_NO_DECREMENT;
1458 		break;
1459 	case SSD_CURRENT_ERROR:
1460 	case SSD_DESC_CURRENT_ERROR:
1461 	default:
1462 		switch (sense_key) {
1463 		case SSD_KEY_UNIT_ATTENTION:
1464 			error_action |= TPC_ERR_NO_DECREMENT;
1465 			break;
1466 		case SSD_KEY_HARDWARE_ERROR:
1467 			/*
1468 			 * This is our generic "something bad happened"
1469 			 * error code.  It often isn't recoverable.
1470 			 */
1471 			if ((asc == 0x44) && (ascq == 0x00))
1472 				error_action = TPC_ERR_FAIL;
1473 			break;
1474 		case SSD_KEY_NOT_READY:
1475 			/*
1476 			 * If the LUN is powered down, there likely isn't
1477 			 * much point in retrying right now.
1478 			 */
1479 			if ((asc == 0x04) && (ascq == 0x02))
1480 				error_action = TPC_ERR_FAIL;
1481 			/*
1482 			 * If the LUN is offline, there probably isn't much
1483 			 * point in retrying, either.
1484 			 */
1485 			if ((asc == 0x04) && (ascq == 0x03))
1486 				error_action = TPC_ERR_FAIL;
1487 			break;
1488 		}
1489 	}
1490 	return (error_action);
1491 }
1492 
1493 static tpc_error_action
1494 tpc_error_parse(union ctl_io *io)
1495 {
1496 	tpc_error_action error_action = TPC_ERR_RETRY;
1497 
1498 	switch (io->io_hdr.io_type) {
1499 	case CTL_IO_SCSI:
1500 		switch (io->io_hdr.status & CTL_STATUS_MASK) {
1501 		case CTL_SCSI_ERROR:
1502 			switch (io->scsiio.scsi_status) {
1503 			case SCSI_STATUS_CHECK_COND:
1504 				error_action = tpc_checkcond_parse(io);
1505 				break;
1506 			default:
1507 				break;
1508 			}
1509 			break;
1510 		default:
1511 			break;
1512 		}
1513 		break;
1514 	case CTL_IO_TASK:
1515 		break;
1516 	default:
1517 		panic("%s: invalid ctl_io type %d\n", __func__,
1518 		      io->io_hdr.io_type);
1519 		break;
1520 	}
1521 	return (error_action);
1522 }
1523 
1524 void
1525 tpc_done(union ctl_io *io)
1526 {
1527 	struct tpc_io *tio, *tior;
1528 
1529 	/*
1530 	 * Very minimal retry logic.  We basically retry if we got an error
1531 	 * back, and the retry count is greater than 0.  If we ever want
1532 	 * more sophisticated initiator type behavior, the CAM error
1533 	 * recovery code in ../common might be helpful.
1534 	 */
1535 	tio = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
1536 	if (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
1537 	 && (io->io_hdr.retries > 0)) {
1538 		ctl_io_status old_status;
1539 		tpc_error_action error_action;
1540 
1541 		error_action = tpc_error_parse(io);
1542 		switch (error_action & TPC_ERR_MASK) {
1543 		case TPC_ERR_FAIL:
1544 			break;
1545 		case TPC_ERR_RETRY:
1546 		default:
1547 			if ((error_action & TPC_ERR_NO_DECREMENT) == 0)
1548 				io->io_hdr.retries--;
1549 			old_status = io->io_hdr.status;
1550 			io->io_hdr.status = CTL_STATUS_NONE;
1551 			io->io_hdr.flags &= ~CTL_FLAG_ABORT;
1552 			io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC;
1553 			if (tpcl_queue(io, tio->lun) != CTL_RETVAL_COMPLETE) {
1554 				printf("%s: error returned from ctl_queue()!\n",
1555 				       __func__);
1556 				io->io_hdr.status = old_status;
1557 			} else
1558 				return;
1559 		}
1560 	}
1561 
1562 	if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
1563 		tio->list->error = 1;
1564 	else
1565 		atomic_add_int(&tio->list->curops, 1);
1566 	if (!tio->list->error && !tio->list->abort) {
1567 		while ((tior = TAILQ_FIRST(&tio->run)) != NULL) {
1568 			TAILQ_REMOVE(&tio->run, tior, rlinks);
1569 			atomic_add_int(&tio->list->tbdio, 1);
1570 			if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE)
1571 				panic("tpcl_queue() error");
1572 		}
1573 	}
1574 	if (atomic_fetchadd_int(&tio->list->tbdio, -1) == 1)
1575 		tpc_process(tio->list);
1576 }
1577 
1578 int
1579 ctl_extended_copy_lid1(struct ctl_scsiio *ctsio)
1580 {
1581 	struct scsi_extended_copy *cdb;
1582 	struct scsi_extended_copy_lid1_data *data;
1583 	struct ctl_lun *lun;
1584 	struct tpc_list *list, *tlist;
1585 	uint8_t *ptr;
1586 	char *value;
1587 	int len, off, lencscd, lenseg, leninl, nseg;
1588 
1589 	CTL_DEBUG_PRINT(("ctl_extended_copy_lid1\n"));
1590 
1591 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
1592 	cdb = (struct scsi_extended_copy *)ctsio->cdb;
1593 	len = scsi_4btoul(cdb->length);
1594 
1595 	if (len < sizeof(struct scsi_extended_copy_lid1_data) ||
1596 	    len > sizeof(struct scsi_extended_copy_lid1_data) +
1597 	    TPC_MAX_LIST + TPC_MAX_INLINE) {
1598 		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
1599 		    /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
1600 		goto done;
1601 	}
1602 
1603 	/*
1604 	 * If we've got a kernel request that hasn't been malloced yet,
1605 	 * malloc it and tell the caller the data buffer is here.
1606 	 */
1607 	if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
1608 		ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
1609 		ctsio->kern_data_len = len;
1610 		ctsio->kern_total_len = len;
1611 		ctsio->kern_data_resid = 0;
1612 		ctsio->kern_rel_offset = 0;
1613 		ctsio->kern_sg_entries = 0;
1614 		ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
1615 		ctsio->be_move_done = ctl_config_move_done;
1616 		ctl_datamove((union ctl_io *)ctsio);
1617 
1618 		return (CTL_RETVAL_COMPLETE);
1619 	}
1620 
1621 	data = (struct scsi_extended_copy_lid1_data *)ctsio->kern_data_ptr;
1622 	lencscd = scsi_2btoul(data->cscd_list_length);
1623 	lenseg = scsi_4btoul(data->segment_list_length);
1624 	leninl = scsi_4btoul(data->inline_data_length);
1625 	if (len < sizeof(struct scsi_extended_copy_lid1_data) +
1626 	    lencscd + lenseg + leninl ||
1627 	    leninl > TPC_MAX_INLINE) {
1628 		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
1629 		    /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0);
1630 		goto done;
1631 	}
1632 	if (lencscd > TPC_MAX_CSCDS * sizeof(struct scsi_ec_cscd)) {
1633 		ctl_set_sense(ctsio, /*current_error*/ 1,
1634 		    /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1635 		    /*asc*/ 0x26, /*ascq*/ 0x06, SSD_ELEM_NONE);
1636 		goto done;
1637 	}
1638 	if (lencscd + lenseg > TPC_MAX_LIST) {
1639 		ctl_set_param_len_error(ctsio);
1640 		goto done;
1641 	}
1642 
1643 	list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
1644 	list->service_action = cdb->service_action;
1645 	value = ctl_get_opt(&lun->be_lun->options, "insecure_tpc");
1646 	if (value != NULL && strcmp(value, "on") == 0)
1647 		list->init_port = -1;
1648 	else
1649 		list->init_port = ctsio->io_hdr.nexus.targ_port;
1650 	list->init_idx = ctl_get_resindex(&ctsio->io_hdr.nexus);
1651 	list->list_id = data->list_identifier;
1652 	list->flags = data->flags;
1653 	list->params = ctsio->kern_data_ptr;
1654 	list->cscd = (struct scsi_ec_cscd *)&data->data[0];
1655 	ptr = &data->data[lencscd];
1656 	for (nseg = 0, off = 0; off < lenseg; nseg++) {
1657 		if (nseg >= TPC_MAX_SEGS) {
1658 			free(list, M_CTL);
1659 			ctl_set_sense(ctsio, /*current_error*/ 1,
1660 			    /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1661 			    /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
1662 			goto done;
1663 		}
1664 		list->seg[nseg] = (struct scsi_ec_segment *)(ptr + off);
1665 		off += sizeof(struct scsi_ec_segment) +
1666 		    scsi_2btoul(list->seg[nseg]->descr_length);
1667 	}
1668 	list->inl = &data->data[lencscd + lenseg];
1669 	list->ncscd = lencscd / sizeof(struct scsi_ec_cscd);
1670 	list->nseg = nseg;
1671 	list->leninl = leninl;
1672 	list->ctsio = ctsio;
1673 	list->lun = lun;
1674 	mtx_lock(&lun->lun_lock);
1675 	if ((list->flags & EC_LIST_ID_USAGE_MASK) != EC_LIST_ID_USAGE_NONE) {
1676 		tlist = tpc_find_list(lun, list->list_id, list->init_idx);
1677 		if (tlist != NULL && !tlist->completed) {
1678 			mtx_unlock(&lun->lun_lock);
1679 			free(list, M_CTL);
1680 			ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
1681 			    /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
1682 			    /*bit*/ 0);
1683 			goto done;
1684 		}
1685 		if (tlist != NULL) {
1686 			TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
1687 			free(tlist, M_CTL);
1688 		}
1689 	}
1690 	TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
1691 	mtx_unlock(&lun->lun_lock);
1692 
1693 	tpc_process(list);
1694 	return (CTL_RETVAL_COMPLETE);
1695 
1696 done:
1697 	if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
1698 		free(ctsio->kern_data_ptr, M_CTL);
1699 		ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
1700 	}
1701 	ctl_done((union ctl_io *)ctsio);
1702 	return (CTL_RETVAL_COMPLETE);
1703 }
1704 
1705 int
1706 ctl_extended_copy_lid4(struct ctl_scsiio *ctsio)
1707 {
1708 	struct scsi_extended_copy *cdb;
1709 	struct scsi_extended_copy_lid4_data *data;
1710 	struct ctl_lun *lun;
1711 	struct tpc_list *list, *tlist;
1712 	uint8_t *ptr;
1713 	char *value;
1714 	int len, off, lencscd, lenseg, leninl, nseg;
1715 
1716 	CTL_DEBUG_PRINT(("ctl_extended_copy_lid4\n"));
1717 
1718 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
1719 	cdb = (struct scsi_extended_copy *)ctsio->cdb;
1720 	len = scsi_4btoul(cdb->length);
1721 
1722 	if (len < sizeof(struct scsi_extended_copy_lid4_data) ||
1723 	    len > sizeof(struct scsi_extended_copy_lid4_data) +
1724 	    TPC_MAX_LIST + TPC_MAX_INLINE) {
1725 		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
1726 		    /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
1727 		goto done;
1728 	}
1729 
1730 	/*
1731 	 * If we've got a kernel request that hasn't been malloced yet,
1732 	 * malloc it and tell the caller the data buffer is here.
1733 	 */
1734 	if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
1735 		ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
1736 		ctsio->kern_data_len = len;
1737 		ctsio->kern_total_len = len;
1738 		ctsio->kern_data_resid = 0;
1739 		ctsio->kern_rel_offset = 0;
1740 		ctsio->kern_sg_entries = 0;
1741 		ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
1742 		ctsio->be_move_done = ctl_config_move_done;
1743 		ctl_datamove((union ctl_io *)ctsio);
1744 
1745 		return (CTL_RETVAL_COMPLETE);
1746 	}
1747 
1748 	data = (struct scsi_extended_copy_lid4_data *)ctsio->kern_data_ptr;
1749 	lencscd = scsi_2btoul(data->cscd_list_length);
1750 	lenseg = scsi_2btoul(data->segment_list_length);
1751 	leninl = scsi_2btoul(data->inline_data_length);
1752 	if (len < sizeof(struct scsi_extended_copy_lid4_data) +
1753 	    lencscd + lenseg + leninl ||
1754 	    leninl > TPC_MAX_INLINE) {
1755 		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
1756 		    /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0);
1757 		goto done;
1758 	}
1759 	if (lencscd > TPC_MAX_CSCDS * sizeof(struct scsi_ec_cscd)) {
1760 		ctl_set_sense(ctsio, /*current_error*/ 1,
1761 		    /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1762 		    /*asc*/ 0x26, /*ascq*/ 0x06, SSD_ELEM_NONE);
1763 		goto done;
1764 	}
1765 	if (lencscd + lenseg > TPC_MAX_LIST) {
1766 		ctl_set_param_len_error(ctsio);
1767 		goto done;
1768 	}
1769 
1770 	list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
1771 	list->service_action = cdb->service_action;
1772 	value = ctl_get_opt(&lun->be_lun->options, "insecure_tpc");
1773 	if (value != NULL && strcmp(value, "on") == 0)
1774 		list->init_port = -1;
1775 	else
1776 		list->init_port = ctsio->io_hdr.nexus.targ_port;
1777 	list->init_idx = ctl_get_resindex(&ctsio->io_hdr.nexus);
1778 	list->list_id = scsi_4btoul(data->list_identifier);
1779 	list->flags = data->flags;
1780 	list->params = ctsio->kern_data_ptr;
1781 	list->cscd = (struct scsi_ec_cscd *)&data->data[0];
1782 	ptr = &data->data[lencscd];
1783 	for (nseg = 0, off = 0; off < lenseg; nseg++) {
1784 		if (nseg >= TPC_MAX_SEGS) {
1785 			free(list, M_CTL);
1786 			ctl_set_sense(ctsio, /*current_error*/ 1,
1787 			    /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1788 			    /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
1789 			goto done;
1790 		}
1791 		list->seg[nseg] = (struct scsi_ec_segment *)(ptr + off);
1792 		off += sizeof(struct scsi_ec_segment) +
1793 		    scsi_2btoul(list->seg[nseg]->descr_length);
1794 	}
1795 	list->inl = &data->data[lencscd + lenseg];
1796 	list->ncscd = lencscd / sizeof(struct scsi_ec_cscd);
1797 	list->nseg = nseg;
1798 	list->leninl = leninl;
1799 	list->ctsio = ctsio;
1800 	list->lun = lun;
1801 	mtx_lock(&lun->lun_lock);
1802 	if ((list->flags & EC_LIST_ID_USAGE_MASK) != EC_LIST_ID_USAGE_NONE) {
1803 		tlist = tpc_find_list(lun, list->list_id, list->init_idx);
1804 		if (tlist != NULL && !tlist->completed) {
1805 			mtx_unlock(&lun->lun_lock);
1806 			free(list, M_CTL);
1807 			ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
1808 			    /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
1809 			    /*bit*/ 0);
1810 			goto done;
1811 		}
1812 		if (tlist != NULL) {
1813 			TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
1814 			free(tlist, M_CTL);
1815 		}
1816 	}
1817 	TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
1818 	mtx_unlock(&lun->lun_lock);
1819 
1820 	tpc_process(list);
1821 	return (CTL_RETVAL_COMPLETE);
1822 
1823 done:
1824 	if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
1825 		free(ctsio->kern_data_ptr, M_CTL);
1826 		ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
1827 	}
1828 	ctl_done((union ctl_io *)ctsio);
1829 	return (CTL_RETVAL_COMPLETE);
1830 }
1831 
1832 static void
1833 tpc_create_token(struct ctl_lun *lun, struct ctl_port *port, off_t len,
1834     struct scsi_token *token)
1835 {
1836 	static int id = 0;
1837 	struct scsi_vpd_id_descriptor *idd = NULL;
1838 	struct scsi_ec_cscd_id *cscd;
1839 	struct scsi_read_capacity_data_long *dtsd;
1840 	int targid_len;
1841 
1842 	scsi_ulto4b(ROD_TYPE_AUR, token->type);
1843 	scsi_ulto2b(0x01f8, token->length);
1844 	scsi_u64to8b(atomic_fetchadd_int(&id, 1), &token->body[0]);
1845 	if (lun->lun_devid)
1846 		idd = scsi_get_devid_desc((struct scsi_vpd_id_descriptor *)
1847 		    lun->lun_devid->data, lun->lun_devid->len,
1848 		    scsi_devid_is_lun_naa);
1849 	if (idd == NULL && lun->lun_devid)
1850 		idd = scsi_get_devid_desc((struct scsi_vpd_id_descriptor *)
1851 		    lun->lun_devid->data, lun->lun_devid->len,
1852 		    scsi_devid_is_lun_eui64);
1853 	if (idd != NULL) {
1854 		cscd = (struct scsi_ec_cscd_id *)&token->body[8];
1855 		cscd->type_code = EC_CSCD_ID;
1856 		cscd->luidt_pdt = T_DIRECT;
1857 		memcpy(&cscd->codeset, idd, 4 + idd->length);
1858 		scsi_ulto3b(lun->be_lun->blocksize, cscd->dtsp.block_length);
1859 	}
1860 	scsi_u64to8b(0, &token->body[40]); /* XXX: Should be 128bit value. */
1861 	scsi_u64to8b(len, &token->body[48]);
1862 
1863 	/* ROD token device type specific data (RC16 without first field) */
1864 	dtsd = (struct scsi_read_capacity_data_long *)&token->body[88 - 8];
1865 	scsi_ulto4b(lun->be_lun->blocksize, dtsd->length);
1866 	dtsd->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE;
1867 	scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, dtsd->lalba_lbp);
1868 	if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP)
1869 		dtsd->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ;
1870 
1871 	if (port->target_devid) {
1872 		targid_len = port->target_devid->len;
1873 		memcpy(&token->body[120], port->target_devid->data, targid_len);
1874 	} else
1875 		targid_len = 32;
1876 	arc4rand(&token->body[120 + targid_len], 384 - targid_len, 0);
1877 };
1878 
1879 int
1880 ctl_populate_token(struct ctl_scsiio *ctsio)
1881 {
1882 	struct scsi_populate_token *cdb;
1883 	struct scsi_populate_token_data *data;
1884 	struct ctl_softc *softc;
1885 	struct ctl_lun *lun;
1886 	struct ctl_port *port;
1887 	struct tpc_list *list, *tlist;
1888 	struct tpc_token *token;
1889 	int len, lendesc;
1890 
1891 	CTL_DEBUG_PRINT(("ctl_populate_token\n"));
1892 
1893 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
1894 	softc = lun->ctl_softc;
1895 	port = softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)];
1896 	cdb = (struct scsi_populate_token *)ctsio->cdb;
1897 	len = scsi_4btoul(cdb->length);
1898 
1899 	if (len < sizeof(struct scsi_populate_token_data) ||
1900 	    len > sizeof(struct scsi_populate_token_data) +
1901 	     TPC_MAX_SEGS * sizeof(struct scsi_range_desc)) {
1902 		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
1903 		    /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
1904 		goto done;
1905 	}
1906 
1907 	/*
1908 	 * If we've got a kernel request that hasn't been malloced yet,
1909 	 * malloc it and tell the caller the data buffer is here.
1910 	 */
1911 	if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
1912 		ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
1913 		ctsio->kern_data_len = len;
1914 		ctsio->kern_total_len = len;
1915 		ctsio->kern_data_resid = 0;
1916 		ctsio->kern_rel_offset = 0;
1917 		ctsio->kern_sg_entries = 0;
1918 		ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
1919 		ctsio->be_move_done = ctl_config_move_done;
1920 		ctl_datamove((union ctl_io *)ctsio);
1921 
1922 		return (CTL_RETVAL_COMPLETE);
1923 	}
1924 
1925 	data = (struct scsi_populate_token_data *)ctsio->kern_data_ptr;
1926 	lendesc = scsi_2btoul(data->range_descriptor_length);
1927 	if (len < sizeof(struct scsi_populate_token_data) + lendesc) {
1928 		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
1929 		    /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0);
1930 		goto done;
1931 	}
1932 /*
1933 	printf("PT(list=%u) flags=%x to=%d rt=%x len=%x\n",
1934 	    scsi_4btoul(cdb->list_identifier),
1935 	    data->flags, scsi_4btoul(data->inactivity_timeout),
1936 	    scsi_4btoul(data->rod_type),
1937 	    scsi_2btoul(data->range_descriptor_length));
1938 */
1939 	if ((data->flags & EC_PT_RTV) &&
1940 	    scsi_4btoul(data->rod_type) != ROD_TYPE_AUR) {
1941 		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
1942 		    /*field*/ 8, /*bit_valid*/ 0, /*bit*/ 0);
1943 		goto done;
1944 	}
1945 
1946 	list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
1947 	list->service_action = cdb->service_action;
1948 	list->init_port = ctsio->io_hdr.nexus.targ_port;
1949 	list->init_idx = ctl_get_resindex(&ctsio->io_hdr.nexus);
1950 	list->list_id = scsi_4btoul(cdb->list_identifier);
1951 	list->flags = data->flags;
1952 	list->ctsio = ctsio;
1953 	list->lun = lun;
1954 	mtx_lock(&lun->lun_lock);
1955 	tlist = tpc_find_list(lun, list->list_id, list->init_idx);
1956 	if (tlist != NULL && !tlist->completed) {
1957 		mtx_unlock(&lun->lun_lock);
1958 		free(list, M_CTL);
1959 		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
1960 		    /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
1961 		    /*bit*/ 0);
1962 		goto done;
1963 	}
1964 	if (tlist != NULL) {
1965 		TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
1966 		free(tlist, M_CTL);
1967 	}
1968 	TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
1969 	mtx_unlock(&lun->lun_lock);
1970 
1971 	token = malloc(sizeof(*token), M_CTL, M_WAITOK | M_ZERO);
1972 	token->lun = lun->lun;
1973 	token->blocksize = lun->be_lun->blocksize;
1974 	token->params = ctsio->kern_data_ptr;
1975 	token->range = &data->desc[0];
1976 	token->nrange = scsi_2btoul(data->range_descriptor_length) /
1977 	    sizeof(struct scsi_range_desc);
1978 	list->cursectors = tpc_ranges_length(token->range, token->nrange);
1979 	list->curbytes = (off_t)list->cursectors * lun->be_lun->blocksize;
1980 	tpc_create_token(lun, port, list->curbytes,
1981 	    (struct scsi_token *)token->token);
1982 	token->active = 0;
1983 	token->last_active = time_uptime;
1984 	token->timeout = scsi_4btoul(data->inactivity_timeout);
1985 	if (token->timeout == 0)
1986 		token->timeout = TPC_DFL_TOKEN_TIMEOUT;
1987 	else if (token->timeout < TPC_MIN_TOKEN_TIMEOUT)
1988 		token->timeout = TPC_MIN_TOKEN_TIMEOUT;
1989 	else if (token->timeout > TPC_MAX_TOKEN_TIMEOUT) {
1990 		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
1991 		    /*command*/ 0, /*field*/ 4, /*bit_valid*/ 0,
1992 		    /*bit*/ 0);
1993 	}
1994 	memcpy(list->res_token, token->token, sizeof(list->res_token));
1995 	list->res_token_valid = 1;
1996 	list->curseg = 0;
1997 	list->completed = 1;
1998 	list->last_active = time_uptime;
1999 	mtx_lock(&softc->tpc_lock);
2000 	TAILQ_INSERT_TAIL(&softc->tpc_tokens, token, links);
2001 	mtx_unlock(&softc->tpc_lock);
2002 	ctl_set_success(ctsio);
2003 	ctl_done((union ctl_io *)ctsio);
2004 	return (CTL_RETVAL_COMPLETE);
2005 
2006 done:
2007 	if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
2008 		free(ctsio->kern_data_ptr, M_CTL);
2009 		ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
2010 	}
2011 	ctl_done((union ctl_io *)ctsio);
2012 	return (CTL_RETVAL_COMPLETE);
2013 }
2014 
2015 int
2016 ctl_write_using_token(struct ctl_scsiio *ctsio)
2017 {
2018 	struct scsi_write_using_token *cdb;
2019 	struct scsi_write_using_token_data *data;
2020 	struct ctl_softc *softc;
2021 	struct ctl_lun *lun;
2022 	struct tpc_list *list, *tlist;
2023 	struct tpc_token *token;
2024 	int len, lendesc;
2025 
2026 	CTL_DEBUG_PRINT(("ctl_write_using_token\n"));
2027 
2028 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
2029 	softc = lun->ctl_softc;
2030 	cdb = (struct scsi_write_using_token *)ctsio->cdb;
2031 	len = scsi_4btoul(cdb->length);
2032 
2033 	if (len < sizeof(struct scsi_populate_token_data) ||
2034 	    len > sizeof(struct scsi_populate_token_data) +
2035 	     TPC_MAX_SEGS * sizeof(struct scsi_range_desc)) {
2036 		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
2037 		    /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
2038 		goto done;
2039 	}
2040 
2041 	/*
2042 	 * If we've got a kernel request that hasn't been malloced yet,
2043 	 * malloc it and tell the caller the data buffer is here.
2044 	 */
2045 	if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
2046 		ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
2047 		ctsio->kern_data_len = len;
2048 		ctsio->kern_total_len = len;
2049 		ctsio->kern_data_resid = 0;
2050 		ctsio->kern_rel_offset = 0;
2051 		ctsio->kern_sg_entries = 0;
2052 		ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
2053 		ctsio->be_move_done = ctl_config_move_done;
2054 		ctl_datamove((union ctl_io *)ctsio);
2055 
2056 		return (CTL_RETVAL_COMPLETE);
2057 	}
2058 
2059 	data = (struct scsi_write_using_token_data *)ctsio->kern_data_ptr;
2060 	lendesc = scsi_2btoul(data->range_descriptor_length);
2061 	if (len < sizeof(struct scsi_populate_token_data) + lendesc) {
2062 		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
2063 		    /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0);
2064 		goto done;
2065 	}
2066 /*
2067 	printf("WUT(list=%u) flags=%x off=%ju len=%x\n",
2068 	    scsi_4btoul(cdb->list_identifier),
2069 	    data->flags, scsi_8btou64(data->offset_into_rod),
2070 	    scsi_2btoul(data->range_descriptor_length));
2071 */
2072 	list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
2073 	list->service_action = cdb->service_action;
2074 	list->init_port = ctsio->io_hdr.nexus.targ_port;
2075 	list->init_idx = ctl_get_resindex(&ctsio->io_hdr.nexus);
2076 	list->list_id = scsi_4btoul(cdb->list_identifier);
2077 	list->flags = data->flags;
2078 	list->params = ctsio->kern_data_ptr;
2079 	list->range = &data->desc[0];
2080 	list->nrange = scsi_2btoul(data->range_descriptor_length) /
2081 	    sizeof(struct scsi_range_desc);
2082 	list->offset_into_rod = scsi_8btou64(data->offset_into_rod);
2083 	list->ctsio = ctsio;
2084 	list->lun = lun;
2085 	mtx_lock(&lun->lun_lock);
2086 	tlist = tpc_find_list(lun, list->list_id, list->init_idx);
2087 	if (tlist != NULL && !tlist->completed) {
2088 		mtx_unlock(&lun->lun_lock);
2089 		free(list, M_CTL);
2090 		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
2091 		    /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
2092 		    /*bit*/ 0);
2093 		goto done;
2094 	}
2095 	if (tlist != NULL) {
2096 		TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
2097 		free(tlist, M_CTL);
2098 	}
2099 	TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
2100 	mtx_unlock(&lun->lun_lock);
2101 
2102 	/* Block device zero ROD token -> no token. */
2103 	if (scsi_4btoul(data->rod_token) == ROD_TYPE_BLOCK_ZERO) {
2104 		tpc_process(list);
2105 		return (CTL_RETVAL_COMPLETE);
2106 	}
2107 
2108 	mtx_lock(&softc->tpc_lock);
2109 	TAILQ_FOREACH(token, &softc->tpc_tokens, links) {
2110 		if (memcmp(token->token, data->rod_token,
2111 		    sizeof(data->rod_token)) == 0)
2112 			break;
2113 	}
2114 	if (token != NULL) {
2115 		token->active++;
2116 		list->token = token;
2117 		if (data->flags & EC_WUT_DEL_TKN)
2118 			token->timeout = 0;
2119 	}
2120 	mtx_unlock(&softc->tpc_lock);
2121 	if (token == NULL) {
2122 		mtx_lock(&lun->lun_lock);
2123 		TAILQ_REMOVE(&lun->tpc_lists, list, links);
2124 		mtx_unlock(&lun->lun_lock);
2125 		free(list, M_CTL);
2126 		ctl_set_sense(ctsio, /*current_error*/ 1,
2127 		    /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
2128 		    /*asc*/ 0x23, /*ascq*/ 0x04, SSD_ELEM_NONE);
2129 		goto done;
2130 	}
2131 
2132 	tpc_process(list);
2133 	return (CTL_RETVAL_COMPLETE);
2134 
2135 done:
2136 	if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
2137 		free(ctsio->kern_data_ptr, M_CTL);
2138 		ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
2139 	}
2140 	ctl_done((union ctl_io *)ctsio);
2141 	return (CTL_RETVAL_COMPLETE);
2142 }
2143 
2144 int
2145 ctl_receive_rod_token_information(struct ctl_scsiio *ctsio)
2146 {
2147 	struct ctl_lun *lun;
2148 	struct scsi_receive_rod_token_information *cdb;
2149 	struct scsi_receive_copy_status_lid4_data *data;
2150 	struct tpc_list *list;
2151 	struct tpc_list list_copy;
2152 	uint8_t *ptr;
2153 	int retval;
2154 	int alloc_len, total_len, token_len;
2155 	uint32_t list_id;
2156 
2157 	CTL_DEBUG_PRINT(("ctl_receive_rod_token_information\n"));
2158 
2159 	cdb = (struct scsi_receive_rod_token_information *)ctsio->cdb;
2160 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
2161 
2162 	retval = CTL_RETVAL_COMPLETE;
2163 
2164 	list_id = scsi_4btoul(cdb->list_identifier);
2165 	mtx_lock(&lun->lun_lock);
2166 	list = tpc_find_list(lun, list_id,
2167 	    ctl_get_resindex(&ctsio->io_hdr.nexus));
2168 	if (list == NULL) {
2169 		mtx_unlock(&lun->lun_lock);
2170 		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
2171 		    /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
2172 		    /*bit*/ 0);
2173 		ctl_done((union ctl_io *)ctsio);
2174 		return (retval);
2175 	}
2176 	list_copy = *list;
2177 	if (list->completed) {
2178 		TAILQ_REMOVE(&lun->tpc_lists, list, links);
2179 		free(list, M_CTL);
2180 	}
2181 	mtx_unlock(&lun->lun_lock);
2182 
2183 	token_len = list_copy.res_token_valid ? 2 + sizeof(list_copy.res_token) : 0;
2184 	total_len = sizeof(*data) + list_copy.sense_len + 4 + token_len;
2185 	alloc_len = scsi_4btoul(cdb->length);
2186 
2187 	ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
2188 
2189 	ctsio->kern_sg_entries = 0;
2190 
2191 	if (total_len < alloc_len) {
2192 		ctsio->residual = alloc_len - total_len;
2193 		ctsio->kern_data_len = total_len;
2194 		ctsio->kern_total_len = total_len;
2195 	} else {
2196 		ctsio->residual = 0;
2197 		ctsio->kern_data_len = alloc_len;
2198 		ctsio->kern_total_len = alloc_len;
2199 	}
2200 	ctsio->kern_data_resid = 0;
2201 	ctsio->kern_rel_offset = 0;
2202 
2203 	data = (struct scsi_receive_copy_status_lid4_data *)ctsio->kern_data_ptr;
2204 	scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len +
2205 	    4 + token_len, data->available_data);
2206 	data->response_to_service_action = list_copy.service_action;
2207 	if (list_copy.completed) {
2208 		if (list_copy.error)
2209 			data->copy_command_status = RCS_CCS_ERROR;
2210 		else if (list_copy.abort)
2211 			data->copy_command_status = RCS_CCS_ABORTED;
2212 		else
2213 			data->copy_command_status = RCS_CCS_COMPLETED;
2214 	} else
2215 		data->copy_command_status = RCS_CCS_INPROG_FG;
2216 	scsi_ulto2b(list_copy.curops, data->operation_counter);
2217 	scsi_ulto4b(UINT32_MAX, data->estimated_status_update_delay);
2218 	data->transfer_count_units = RCS_TC_LBAS;
2219 	scsi_u64to8b(list_copy.cursectors, data->transfer_count);
2220 	scsi_ulto2b(list_copy.curseg, data->segments_processed);
2221 	data->length_of_the_sense_data_field = list_copy.sense_len;
2222 	data->sense_data_length = list_copy.sense_len;
2223 	memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len);
2224 
2225 	ptr = &data->sense_data[data->length_of_the_sense_data_field];
2226 	scsi_ulto4b(token_len, &ptr[0]);
2227 	if (list_copy.res_token_valid) {
2228 		scsi_ulto2b(0, &ptr[4]);
2229 		memcpy(&ptr[6], list_copy.res_token, sizeof(list_copy.res_token));
2230 	}
2231 /*
2232 	printf("RRTI(list=%u) valid=%d\n",
2233 	    scsi_4btoul(cdb->list_identifier), list_copy.res_token_valid);
2234 */
2235 	ctl_set_success(ctsio);
2236 	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
2237 	ctsio->be_move_done = ctl_config_move_done;
2238 	ctl_datamove((union ctl_io *)ctsio);
2239 	return (retval);
2240 }
2241 
2242 int
2243 ctl_report_all_rod_tokens(struct ctl_scsiio *ctsio)
2244 {
2245 	struct ctl_softc *softc;
2246 	struct ctl_lun *lun;
2247 	struct scsi_report_all_rod_tokens *cdb;
2248 	struct scsi_report_all_rod_tokens_data *data;
2249 	struct tpc_token *token;
2250 	int retval;
2251 	int alloc_len, total_len, tokens, i;
2252 
2253 	CTL_DEBUG_PRINT(("ctl_receive_rod_token_information\n"));
2254 
2255 	cdb = (struct scsi_report_all_rod_tokens *)ctsio->cdb;
2256 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
2257 	softc = lun->ctl_softc;
2258 
2259 	retval = CTL_RETVAL_COMPLETE;
2260 
2261 	tokens = 0;
2262 	mtx_lock(&softc->tpc_lock);
2263 	TAILQ_FOREACH(token, &softc->tpc_tokens, links)
2264 		tokens++;
2265 	mtx_unlock(&softc->tpc_lock);
2266 	if (tokens > 512)
2267 		tokens = 512;
2268 
2269 	total_len = sizeof(*data) + tokens * 96;
2270 	alloc_len = scsi_4btoul(cdb->length);
2271 
2272 	ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
2273 
2274 	ctsio->kern_sg_entries = 0;
2275 
2276 	if (total_len < alloc_len) {
2277 		ctsio->residual = alloc_len - total_len;
2278 		ctsio->kern_data_len = total_len;
2279 		ctsio->kern_total_len = total_len;
2280 	} else {
2281 		ctsio->residual = 0;
2282 		ctsio->kern_data_len = alloc_len;
2283 		ctsio->kern_total_len = alloc_len;
2284 	}
2285 	ctsio->kern_data_resid = 0;
2286 	ctsio->kern_rel_offset = 0;
2287 
2288 	data = (struct scsi_report_all_rod_tokens_data *)ctsio->kern_data_ptr;
2289 	i = 0;
2290 	mtx_lock(&softc->tpc_lock);
2291 	TAILQ_FOREACH(token, &softc->tpc_tokens, links) {
2292 		if (i >= tokens)
2293 			break;
2294 		memcpy(&data->rod_management_token_list[i * 96],
2295 		    token->token, 96);
2296 		i++;
2297 	}
2298 	mtx_unlock(&softc->tpc_lock);
2299 	scsi_ulto4b(sizeof(*data) - 4 + i * 96, data->available_data);
2300 /*
2301 	printf("RART tokens=%d\n", i);
2302 */
2303 	ctl_set_success(ctsio);
2304 	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
2305 	ctsio->be_move_done = ctl_config_move_done;
2306 	ctl_datamove((union ctl_io *)ctsio);
2307 	return (retval);
2308 }
2309 
2310