1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2014-2021 Alexander Motin <mav@FreeBSD.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
32 #include <sys/types.h>
33 #include <sys/lock.h>
34 #include <sys/module.h>
35 #include <sys/mutex.h>
36 #include <sys/condvar.h>
37 #include <sys/malloc.h>
38 #include <sys/conf.h>
39 #include <sys/queue.h>
40 #include <sys/sysctl.h>
41 #include <sys/nv.h>
42 #include <sys/dnv.h>
43 #include <machine/atomic.h>
44
45 #include <cam/cam.h>
46 #include <cam/scsi/scsi_all.h>
47 #include <cam/scsi/scsi_da.h>
48 #include <cam/ctl/ctl_io.h>
49 #include <cam/ctl/ctl.h>
50 #include <cam/ctl/ctl_frontend.h>
51 #include <cam/ctl/ctl_util.h>
52 #include <cam/ctl/ctl_backend.h>
53 #include <cam/ctl/ctl_ioctl.h>
54 #include <cam/ctl/ctl_ha.h>
55 #include <cam/ctl/ctl_private.h>
56 #include <cam/ctl/ctl_debug.h>
57 #include <cam/ctl/ctl_scsi_all.h>
58 #include <cam/ctl/ctl_tpc.h>
59 #include <cam/ctl/ctl_error.h>
60
61 #define TPC_MAX_CSCDS 64
62 #define TPC_MAX_SEGS 64
63 #define TPC_MAX_SEG 0
64 #define TPC_MAX_LIST 8192
65 #define TPC_MAX_INLINE 0
66 #define TPC_MAX_LISTS 255
67 #define TPC_MAX_IO_SIZE (8 * MIN(1024 * 1024, MAX(128 * 1024, maxphys)))
68 #define TPC_MAX_IOCHUNK_SIZE (TPC_MAX_IO_SIZE * 4)
69 #define TPC_MIN_TOKEN_TIMEOUT 1
70 #define TPC_DFL_TOKEN_TIMEOUT 60
71 #define TPC_MAX_TOKEN_TIMEOUT 600
72
73 MALLOC_DEFINE(M_CTL_TPC, "ctltpc", "CTL TPC");
74
75 typedef enum {
76 TPC_ERR_RETRY = 0x000,
77 TPC_ERR_FAIL = 0x001,
78 TPC_ERR_MASK = 0x0ff,
79 TPC_ERR_NO_DECREMENT = 0x100
80 } tpc_error_action;
81
82 struct tpc_list;
83 TAILQ_HEAD(runl, tpc_io);
84 struct tpc_io {
85 union ctl_io *io;
86 uint8_t target;
87 uint32_t cscd;
88 uint64_t lun;
89 uint8_t *buf;
90 struct tpc_list *list;
91 struct runl run;
92 TAILQ_ENTRY(tpc_io) rlinks;
93 TAILQ_ENTRY(tpc_io) links;
94 };
95
96 struct tpc_token {
97 uint8_t token[512];
98 uint64_t lun;
99 uint32_t blocksize;
100 uint8_t *params;
101 struct scsi_range_desc *range;
102 int nrange;
103 int active;
104 time_t last_active;
105 uint32_t timeout;
106 TAILQ_ENTRY(tpc_token) links;
107 };
108
109 struct tpc_list {
110 uint8_t service_action;
111 int init_port;
112 uint32_t init_idx;
113 uint32_t list_id;
114 uint8_t flags;
115 uint8_t *params;
116 struct scsi_ec_cscd *cscd;
117 struct scsi_ec_segment *seg[TPC_MAX_SEGS];
118 uint8_t *inl;
119 int ncscd;
120 int nseg;
121 int leninl;
122 struct tpc_token *token;
123 struct scsi_range_desc *range;
124 int nrange;
125 off_t offset_into_rod;
126
127 int curseg;
128 off_t cursectors;
129 off_t curbytes;
130 int curops;
131 int stage;
132 off_t segsectors;
133 off_t segbytes;
134 int tbdio;
135 int error;
136 int abort;
137 int completed;
138 time_t last_active;
139 TAILQ_HEAD(, tpc_io) allio;
140 struct scsi_sense_data fwd_sense_data;
141 uint8_t fwd_sense_len;
142 uint8_t fwd_scsi_status;
143 uint8_t fwd_target;
144 uint16_t fwd_cscd;
145 struct scsi_sense_data sense_data;
146 uint8_t sense_len;
147 uint8_t scsi_status;
148 struct ctl_scsiio *ctsio;
149 struct ctl_lun *lun;
150 int res_token_valid;
151 uint8_t res_token[512];
152 TAILQ_ENTRY(tpc_list) links;
153 };
154
155 static void
tpc_timeout(void * arg)156 tpc_timeout(void *arg)
157 {
158 struct ctl_softc *softc = arg;
159 struct ctl_lun *lun;
160 struct tpc_token *token, *ttoken;
161 struct tpc_list *list, *tlist;
162
163 /* Free completed lists with expired timeout. */
164 STAILQ_FOREACH(lun, &softc->lun_list, links) {
165 mtx_lock(&lun->lun_lock);
166 TAILQ_FOREACH_SAFE(list, &lun->tpc_lists, links, tlist) {
167 if (!list->completed || time_uptime < list->last_active +
168 TPC_DFL_TOKEN_TIMEOUT)
169 continue;
170 TAILQ_REMOVE(&lun->tpc_lists, list, links);
171 free(list, M_CTL);
172 }
173 mtx_unlock(&lun->lun_lock);
174 }
175
176 /* Free inactive ROD tokens with expired timeout. */
177 mtx_lock(&softc->tpc_lock);
178 TAILQ_FOREACH_SAFE(token, &softc->tpc_tokens, links, ttoken) {
179 if (token->active ||
180 time_uptime < token->last_active + token->timeout + 1)
181 continue;
182 TAILQ_REMOVE(&softc->tpc_tokens, token, links);
183 free(token->params, M_CTL);
184 free(token, M_CTL);
185 }
186 mtx_unlock(&softc->tpc_lock);
187 callout_schedule_sbt(&softc->tpc_timeout, SBT_1S, SBT_1S, 0);
188 }
189
190 void
ctl_tpc_init(struct ctl_softc * softc)191 ctl_tpc_init(struct ctl_softc *softc)
192 {
193
194 mtx_init(&softc->tpc_lock, "CTL TPC mutex", NULL, MTX_DEF);
195 TAILQ_INIT(&softc->tpc_tokens);
196 callout_init_mtx(&softc->tpc_timeout, &softc->ctl_lock, 0);
197 callout_reset_sbt(&softc->tpc_timeout, SBT_1S, SBT_1S,
198 tpc_timeout, softc, 0);
199 }
200
201 void
ctl_tpc_shutdown(struct ctl_softc * softc)202 ctl_tpc_shutdown(struct ctl_softc *softc)
203 {
204 struct tpc_token *token;
205
206 callout_drain(&softc->tpc_timeout);
207
208 /* Free ROD tokens. */
209 mtx_lock(&softc->tpc_lock);
210 while ((token = TAILQ_FIRST(&softc->tpc_tokens)) != NULL) {
211 TAILQ_REMOVE(&softc->tpc_tokens, token, links);
212 free(token->params, M_CTL);
213 free(token, M_CTL);
214 }
215 mtx_unlock(&softc->tpc_lock);
216 mtx_destroy(&softc->tpc_lock);
217 }
218
219 void
ctl_tpc_lun_init(struct ctl_lun * lun)220 ctl_tpc_lun_init(struct ctl_lun *lun)
221 {
222
223 TAILQ_INIT(&lun->tpc_lists);
224 }
225
226 void
ctl_tpc_lun_clear(struct ctl_lun * lun,uint32_t initidx)227 ctl_tpc_lun_clear(struct ctl_lun *lun, uint32_t initidx)
228 {
229 struct tpc_list *list, *tlist;
230
231 TAILQ_FOREACH_SAFE(list, &lun->tpc_lists, links, tlist) {
232 if (initidx != -1 && list->init_idx != initidx)
233 continue;
234 if (!list->completed)
235 continue;
236 TAILQ_REMOVE(&lun->tpc_lists, list, links);
237 free(list, M_CTL);
238 }
239 }
240
241 void
ctl_tpc_lun_shutdown(struct ctl_lun * lun)242 ctl_tpc_lun_shutdown(struct ctl_lun *lun)
243 {
244 struct ctl_softc *softc = lun->ctl_softc;
245 struct tpc_list *list;
246 struct tpc_token *token, *ttoken;
247
248 /* Free lists for this LUN. */
249 while ((list = TAILQ_FIRST(&lun->tpc_lists)) != NULL) {
250 TAILQ_REMOVE(&lun->tpc_lists, list, links);
251 KASSERT(list->completed,
252 ("Not completed TPC (%p) on shutdown", list));
253 free(list, M_CTL);
254 }
255
256 /* Free ROD tokens for this LUN. */
257 mtx_lock(&softc->tpc_lock);
258 TAILQ_FOREACH_SAFE(token, &softc->tpc_tokens, links, ttoken) {
259 if (token->lun != lun->lun || token->active)
260 continue;
261 TAILQ_REMOVE(&softc->tpc_tokens, token, links);
262 free(token->params, M_CTL);
263 free(token, M_CTL);
264 }
265 mtx_unlock(&softc->tpc_lock);
266 }
267
268 int
ctl_inquiry_evpd_tpc(struct ctl_scsiio * ctsio,int alloc_len)269 ctl_inquiry_evpd_tpc(struct ctl_scsiio *ctsio, int alloc_len)
270 {
271 struct ctl_lun *lun = CTL_LUN(ctsio);
272 struct scsi_vpd_tpc *tpc_ptr;
273 struct scsi_vpd_tpc_descriptor *d_ptr;
274 struct scsi_vpd_tpc_descriptor_bdrl *bdrl_ptr;
275 struct scsi_vpd_tpc_descriptor_sc *sc_ptr;
276 struct scsi_vpd_tpc_descriptor_sc_descr *scd_ptr;
277 struct scsi_vpd_tpc_descriptor_pd *pd_ptr;
278 struct scsi_vpd_tpc_descriptor_sd *sd_ptr;
279 struct scsi_vpd_tpc_descriptor_sdid *sdid_ptr;
280 struct scsi_vpd_tpc_descriptor_rtf *rtf_ptr;
281 struct scsi_vpd_tpc_descriptor_rtf_block *rtfb_ptr;
282 struct scsi_vpd_tpc_descriptor_srt *srt_ptr;
283 struct scsi_vpd_tpc_descriptor_srtd *srtd_ptr;
284 struct scsi_vpd_tpc_descriptor_gco *gco_ptr;
285 int data_len;
286
287 data_len = sizeof(struct scsi_vpd_tpc) +
288 sizeof(struct scsi_vpd_tpc_descriptor_bdrl) +
289 roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sc) +
290 2 * sizeof(struct scsi_vpd_tpc_descriptor_sc_descr) + 11, 4) +
291 sizeof(struct scsi_vpd_tpc_descriptor_pd) +
292 roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sd) + 4, 4) +
293 roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sdid) + 2, 4) +
294 sizeof(struct scsi_vpd_tpc_descriptor_rtf) +
295 sizeof(struct scsi_vpd_tpc_descriptor_rtf_block) +
296 sizeof(struct scsi_vpd_tpc_descriptor_srt) +
297 2*sizeof(struct scsi_vpd_tpc_descriptor_srtd) +
298 sizeof(struct scsi_vpd_tpc_descriptor_gco);
299
300 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
301 tpc_ptr = (struct scsi_vpd_tpc *)ctsio->kern_data_ptr;
302 ctsio->kern_rel_offset = 0;
303 ctsio->kern_sg_entries = 0;
304 ctsio->kern_data_len = min(data_len, alloc_len);
305 ctsio->kern_total_len = ctsio->kern_data_len;
306
307 /*
308 * The control device is always connected. The disk device, on the
309 * other hand, may not be online all the time.
310 */
311 if (lun != NULL)
312 tpc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
313 lun->be_lun->lun_type;
314 else
315 tpc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
316 tpc_ptr->page_code = SVPD_SCSI_TPC;
317 scsi_ulto2b(data_len - 4, tpc_ptr->page_length);
318
319 /* Block Device ROD Limits */
320 d_ptr = (struct scsi_vpd_tpc_descriptor *)&tpc_ptr->descr[0];
321 bdrl_ptr = (struct scsi_vpd_tpc_descriptor_bdrl *)d_ptr;
322 scsi_ulto2b(SVPD_TPC_BDRL, bdrl_ptr->desc_type);
323 scsi_ulto2b(sizeof(*bdrl_ptr) - 4, bdrl_ptr->desc_length);
324 scsi_ulto2b(TPC_MAX_SEGS, bdrl_ptr->maximum_ranges);
325 scsi_ulto4b(TPC_MAX_TOKEN_TIMEOUT,
326 bdrl_ptr->maximum_inactivity_timeout);
327 scsi_ulto4b(TPC_DFL_TOKEN_TIMEOUT,
328 bdrl_ptr->default_inactivity_timeout);
329 scsi_u64to8b(0, bdrl_ptr->maximum_token_transfer_size);
330 scsi_u64to8b(0, bdrl_ptr->optimal_transfer_count);
331
332 /* Supported commands */
333 d_ptr = (struct scsi_vpd_tpc_descriptor *)
334 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
335 sc_ptr = (struct scsi_vpd_tpc_descriptor_sc *)d_ptr;
336 scsi_ulto2b(SVPD_TPC_SC, sc_ptr->desc_type);
337 sc_ptr->list_length = 2 * sizeof(*scd_ptr) + 11;
338 scsi_ulto2b(roundup2(1 + sc_ptr->list_length, 4), sc_ptr->desc_length);
339 scd_ptr = &sc_ptr->descr[0];
340 scd_ptr->opcode = EXTENDED_COPY;
341 scd_ptr->sa_length = 5;
342 scd_ptr->supported_service_actions[0] = EC_EC_LID1;
343 scd_ptr->supported_service_actions[1] = EC_EC_LID4;
344 scd_ptr->supported_service_actions[2] = EC_PT;
345 scd_ptr->supported_service_actions[3] = EC_WUT;
346 scd_ptr->supported_service_actions[4] = EC_COA;
347 scd_ptr = (struct scsi_vpd_tpc_descriptor_sc_descr *)
348 &scd_ptr->supported_service_actions[scd_ptr->sa_length];
349 scd_ptr->opcode = RECEIVE_COPY_STATUS;
350 scd_ptr->sa_length = 6;
351 scd_ptr->supported_service_actions[0] = RCS_RCS_LID1;
352 scd_ptr->supported_service_actions[1] = RCS_RCFD;
353 scd_ptr->supported_service_actions[2] = RCS_RCS_LID4;
354 scd_ptr->supported_service_actions[3] = RCS_RCOP;
355 scd_ptr->supported_service_actions[4] = RCS_RRTI;
356 scd_ptr->supported_service_actions[5] = RCS_RART;
357
358 /* Parameter data. */
359 d_ptr = (struct scsi_vpd_tpc_descriptor *)
360 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
361 pd_ptr = (struct scsi_vpd_tpc_descriptor_pd *)d_ptr;
362 scsi_ulto2b(SVPD_TPC_PD, pd_ptr->desc_type);
363 scsi_ulto2b(sizeof(*pd_ptr) - 4, pd_ptr->desc_length);
364 scsi_ulto2b(TPC_MAX_CSCDS, pd_ptr->maximum_cscd_descriptor_count);
365 scsi_ulto2b(TPC_MAX_SEGS, pd_ptr->maximum_segment_descriptor_count);
366 scsi_ulto4b(TPC_MAX_LIST, pd_ptr->maximum_descriptor_list_length);
367 scsi_ulto4b(TPC_MAX_INLINE, pd_ptr->maximum_inline_data_length);
368
369 /* Supported Descriptors */
370 d_ptr = (struct scsi_vpd_tpc_descriptor *)
371 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
372 sd_ptr = (struct scsi_vpd_tpc_descriptor_sd *)d_ptr;
373 scsi_ulto2b(SVPD_TPC_SD, sd_ptr->desc_type);
374 scsi_ulto2b(roundup2(sizeof(*sd_ptr) - 4 + 4, 4), sd_ptr->desc_length);
375 sd_ptr->list_length = 4;
376 sd_ptr->supported_descriptor_codes[0] = EC_SEG_B2B;
377 sd_ptr->supported_descriptor_codes[1] = EC_SEG_VERIFY;
378 sd_ptr->supported_descriptor_codes[2] = EC_SEG_REGISTER_KEY;
379 sd_ptr->supported_descriptor_codes[3] = EC_CSCD_ID;
380
381 /* Supported CSCD Descriptor IDs */
382 d_ptr = (struct scsi_vpd_tpc_descriptor *)
383 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
384 sdid_ptr = (struct scsi_vpd_tpc_descriptor_sdid *)d_ptr;
385 scsi_ulto2b(SVPD_TPC_SDID, sdid_ptr->desc_type);
386 scsi_ulto2b(roundup2(sizeof(*sdid_ptr) - 4 + 2, 4), sdid_ptr->desc_length);
387 scsi_ulto2b(2, sdid_ptr->list_length);
388 scsi_ulto2b(0xffff, &sdid_ptr->supported_descriptor_ids[0]);
389
390 /* ROD Token Features */
391 d_ptr = (struct scsi_vpd_tpc_descriptor *)
392 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
393 rtf_ptr = (struct scsi_vpd_tpc_descriptor_rtf *)d_ptr;
394 scsi_ulto2b(SVPD_TPC_RTF, rtf_ptr->desc_type);
395 scsi_ulto2b(sizeof(*rtf_ptr) - 4 + sizeof(*rtfb_ptr), rtf_ptr->desc_length);
396 rtf_ptr->remote_tokens = 0;
397 scsi_ulto4b(TPC_MIN_TOKEN_TIMEOUT, rtf_ptr->minimum_token_lifetime);
398 scsi_ulto4b(UINT32_MAX, rtf_ptr->maximum_token_lifetime);
399 scsi_ulto4b(TPC_MAX_TOKEN_TIMEOUT,
400 rtf_ptr->maximum_token_inactivity_timeout);
401 scsi_ulto2b(sizeof(*rtfb_ptr), rtf_ptr->type_specific_features_length);
402 rtfb_ptr = (struct scsi_vpd_tpc_descriptor_rtf_block *)
403 &rtf_ptr->type_specific_features;
404 rtfb_ptr->type_format = SVPD_TPC_RTF_BLOCK;
405 scsi_ulto2b(sizeof(*rtfb_ptr) - 4, rtfb_ptr->desc_length);
406 scsi_ulto2b(0, rtfb_ptr->optimal_length_granularity);
407 scsi_u64to8b(0, rtfb_ptr->maximum_bytes);
408 scsi_u64to8b(0, rtfb_ptr->optimal_bytes);
409 scsi_u64to8b(UINT64_MAX, rtfb_ptr->optimal_bytes_to_token_per_segment);
410 scsi_u64to8b(TPC_MAX_IOCHUNK_SIZE,
411 rtfb_ptr->optimal_bytes_from_token_per_segment);
412
413 /* Supported ROD Tokens */
414 d_ptr = (struct scsi_vpd_tpc_descriptor *)
415 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
416 srt_ptr = (struct scsi_vpd_tpc_descriptor_srt *)d_ptr;
417 scsi_ulto2b(SVPD_TPC_SRT, srt_ptr->desc_type);
418 scsi_ulto2b(sizeof(*srt_ptr) - 4 + 2*sizeof(*srtd_ptr), srt_ptr->desc_length);
419 scsi_ulto2b(2*sizeof(*srtd_ptr), srt_ptr->rod_type_descriptors_length);
420 srtd_ptr = (struct scsi_vpd_tpc_descriptor_srtd *)
421 &srt_ptr->rod_type_descriptors;
422 scsi_ulto4b(ROD_TYPE_AUR, srtd_ptr->rod_type);
423 srtd_ptr->flags = SVPD_TPC_SRTD_TIN | SVPD_TPC_SRTD_TOUT;
424 scsi_ulto2b(0, srtd_ptr->preference_indicator);
425 srtd_ptr++;
426 scsi_ulto4b(ROD_TYPE_BLOCK_ZERO, srtd_ptr->rod_type);
427 srtd_ptr->flags = SVPD_TPC_SRTD_TIN;
428 scsi_ulto2b(0, srtd_ptr->preference_indicator);
429
430 /* General Copy Operations */
431 d_ptr = (struct scsi_vpd_tpc_descriptor *)
432 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
433 gco_ptr = (struct scsi_vpd_tpc_descriptor_gco *)d_ptr;
434 scsi_ulto2b(SVPD_TPC_GCO, gco_ptr->desc_type);
435 scsi_ulto2b(sizeof(*gco_ptr) - 4, gco_ptr->desc_length);
436 scsi_ulto4b(TPC_MAX_LISTS, gco_ptr->total_concurrent_copies);
437 scsi_ulto4b(TPC_MAX_LISTS, gco_ptr->maximum_identified_concurrent_copies);
438 scsi_ulto4b(TPC_MAX_SEG, gco_ptr->maximum_segment_length);
439 gco_ptr->data_segment_granularity = 0;
440 gco_ptr->inline_data_granularity = 0;
441
442 ctl_set_success(ctsio);
443 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
444 ctsio->be_move_done = ctl_config_move_done;
445 ctl_datamove((union ctl_io *)ctsio);
446
447 return (CTL_RETVAL_COMPLETE);
448 }
449
450 int
ctl_receive_copy_operating_parameters(struct ctl_scsiio * ctsio)451 ctl_receive_copy_operating_parameters(struct ctl_scsiio *ctsio)
452 {
453 struct scsi_receive_copy_operating_parameters *cdb;
454 struct scsi_receive_copy_operating_parameters_data *data;
455 int retval;
456 int alloc_len, total_len;
457
458 CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n"));
459
460 cdb = (struct scsi_receive_copy_operating_parameters *)ctsio->cdb;
461
462 retval = CTL_RETVAL_COMPLETE;
463
464 total_len = sizeof(*data) + 4;
465 alloc_len = scsi_4btoul(cdb->length);
466
467 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
468 ctsio->kern_sg_entries = 0;
469 ctsio->kern_rel_offset = 0;
470 ctsio->kern_data_len = min(total_len, alloc_len);
471 ctsio->kern_total_len = ctsio->kern_data_len;
472
473 data = (struct scsi_receive_copy_operating_parameters_data *)ctsio->kern_data_ptr;
474 scsi_ulto4b(sizeof(*data) - 4 + 4, data->length);
475 data->snlid = RCOP_SNLID;
476 scsi_ulto2b(TPC_MAX_CSCDS, data->maximum_cscd_descriptor_count);
477 scsi_ulto2b(TPC_MAX_SEGS, data->maximum_segment_descriptor_count);
478 scsi_ulto4b(TPC_MAX_LIST, data->maximum_descriptor_list_length);
479 scsi_ulto4b(TPC_MAX_SEG, data->maximum_segment_length);
480 scsi_ulto4b(TPC_MAX_INLINE, data->maximum_inline_data_length);
481 scsi_ulto4b(0, data->held_data_limit);
482 scsi_ulto4b(0, data->maximum_stream_device_transfer_size);
483 scsi_ulto2b(TPC_MAX_LISTS, data->total_concurrent_copies);
484 data->maximum_concurrent_copies = TPC_MAX_LISTS;
485 data->data_segment_granularity = 0;
486 data->inline_data_granularity = 0;
487 data->held_data_granularity = 0;
488 data->implemented_descriptor_list_length = 4;
489 data->list_of_implemented_descriptor_type_codes[0] = EC_SEG_B2B;
490 data->list_of_implemented_descriptor_type_codes[1] = EC_SEG_VERIFY;
491 data->list_of_implemented_descriptor_type_codes[2] = EC_SEG_REGISTER_KEY;
492 data->list_of_implemented_descriptor_type_codes[3] = EC_CSCD_ID;
493
494 ctl_set_success(ctsio);
495 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
496 ctsio->be_move_done = ctl_config_move_done;
497 ctl_datamove((union ctl_io *)ctsio);
498 return (retval);
499 }
500
501 static struct tpc_list *
tpc_find_list(struct ctl_lun * lun,uint32_t list_id,uint32_t init_idx)502 tpc_find_list(struct ctl_lun *lun, uint32_t list_id, uint32_t init_idx)
503 {
504 struct tpc_list *list;
505
506 mtx_assert(&lun->lun_lock, MA_OWNED);
507 TAILQ_FOREACH(list, &lun->tpc_lists, links) {
508 if ((list->flags & EC_LIST_ID_USAGE_MASK) !=
509 EC_LIST_ID_USAGE_NONE && list->list_id == list_id &&
510 list->init_idx == init_idx)
511 break;
512 }
513 return (list);
514 }
515
516 int
ctl_receive_copy_status_lid1(struct ctl_scsiio * ctsio)517 ctl_receive_copy_status_lid1(struct ctl_scsiio *ctsio)
518 {
519 struct ctl_lun *lun = CTL_LUN(ctsio);
520 struct scsi_receive_copy_status_lid1 *cdb;
521 struct scsi_receive_copy_status_lid1_data *data;
522 struct tpc_list *list;
523 struct tpc_list list_copy;
524 int retval;
525 int alloc_len, total_len;
526 uint32_t list_id;
527
528 CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid1\n"));
529
530 cdb = (struct scsi_receive_copy_status_lid1 *)ctsio->cdb;
531 retval = CTL_RETVAL_COMPLETE;
532
533 list_id = cdb->list_identifier;
534 mtx_lock(&lun->lun_lock);
535 list = tpc_find_list(lun, list_id,
536 ctl_get_initindex(&ctsio->io_hdr.nexus));
537 if (list == NULL) {
538 mtx_unlock(&lun->lun_lock);
539 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
540 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
541 /*bit*/ 0);
542 ctl_done((union ctl_io *)ctsio);
543 return (retval);
544 }
545 list_copy = *list;
546 if (list->completed) {
547 TAILQ_REMOVE(&lun->tpc_lists, list, links);
548 free(list, M_CTL);
549 }
550 mtx_unlock(&lun->lun_lock);
551
552 total_len = sizeof(*data);
553 alloc_len = scsi_4btoul(cdb->length);
554
555 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
556 ctsio->kern_sg_entries = 0;
557 ctsio->kern_rel_offset = 0;
558 ctsio->kern_data_len = min(total_len, alloc_len);
559 ctsio->kern_total_len = ctsio->kern_data_len;
560
561 data = (struct scsi_receive_copy_status_lid1_data *)ctsio->kern_data_ptr;
562 scsi_ulto4b(sizeof(*data) - 4, data->available_data);
563 if (list_copy.completed) {
564 if (list_copy.error || list_copy.abort)
565 data->copy_command_status = RCS_CCS_ERROR;
566 else
567 data->copy_command_status = RCS_CCS_COMPLETED;
568 } else
569 data->copy_command_status = RCS_CCS_INPROG;
570 scsi_ulto2b(list_copy.curseg, data->segments_processed);
571 if (list_copy.curbytes <= UINT32_MAX) {
572 data->transfer_count_units = RCS_TC_BYTES;
573 scsi_ulto4b(list_copy.curbytes, data->transfer_count);
574 } else {
575 data->transfer_count_units = RCS_TC_MBYTES;
576 scsi_ulto4b(list_copy.curbytes >> 20, data->transfer_count);
577 }
578
579 ctl_set_success(ctsio);
580 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
581 ctsio->be_move_done = ctl_config_move_done;
582 ctl_datamove((union ctl_io *)ctsio);
583 return (retval);
584 }
585
586 int
ctl_receive_copy_failure_details(struct ctl_scsiio * ctsio)587 ctl_receive_copy_failure_details(struct ctl_scsiio *ctsio)
588 {
589 struct ctl_lun *lun = CTL_LUN(ctsio);
590 struct scsi_receive_copy_failure_details *cdb;
591 struct scsi_receive_copy_failure_details_data *data;
592 struct tpc_list *list;
593 struct tpc_list list_copy;
594 int retval;
595 int alloc_len, total_len;
596 uint32_t list_id;
597
598 CTL_DEBUG_PRINT(("ctl_receive_copy_failure_details\n"));
599
600 cdb = (struct scsi_receive_copy_failure_details *)ctsio->cdb;
601 retval = CTL_RETVAL_COMPLETE;
602
603 list_id = cdb->list_identifier;
604 mtx_lock(&lun->lun_lock);
605 list = tpc_find_list(lun, list_id,
606 ctl_get_initindex(&ctsio->io_hdr.nexus));
607 if (list == NULL || !list->completed) {
608 mtx_unlock(&lun->lun_lock);
609 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
610 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
611 /*bit*/ 0);
612 ctl_done((union ctl_io *)ctsio);
613 return (retval);
614 }
615 list_copy = *list;
616 TAILQ_REMOVE(&lun->tpc_lists, list, links);
617 free(list, M_CTL);
618 mtx_unlock(&lun->lun_lock);
619
620 total_len = sizeof(*data) + list_copy.sense_len;
621 alloc_len = scsi_4btoul(cdb->length);
622
623 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
624 ctsio->kern_sg_entries = 0;
625 ctsio->kern_rel_offset = 0;
626 ctsio->kern_data_len = min(total_len, alloc_len);
627 ctsio->kern_total_len = ctsio->kern_data_len;
628
629 data = (struct scsi_receive_copy_failure_details_data *)ctsio->kern_data_ptr;
630 if (list_copy.completed && (list_copy.error || list_copy.abort)) {
631 scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len,
632 data->available_data);
633 data->copy_command_status = RCS_CCS_ERROR;
634 } else
635 scsi_ulto4b(0, data->available_data);
636 scsi_ulto2b(list_copy.sense_len, data->sense_data_length);
637 memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len);
638
639 ctl_set_success(ctsio);
640 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
641 ctsio->be_move_done = ctl_config_move_done;
642 ctl_datamove((union ctl_io *)ctsio);
643 return (retval);
644 }
645
646 int
ctl_receive_copy_status_lid4(struct ctl_scsiio * ctsio)647 ctl_receive_copy_status_lid4(struct ctl_scsiio *ctsio)
648 {
649 struct ctl_lun *lun = CTL_LUN(ctsio);
650 struct scsi_receive_copy_status_lid4 *cdb;
651 struct scsi_receive_copy_status_lid4_data *data;
652 struct tpc_list *list;
653 struct tpc_list list_copy;
654 int retval;
655 int alloc_len, total_len;
656 uint32_t list_id;
657
658 CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid4\n"));
659
660 cdb = (struct scsi_receive_copy_status_lid4 *)ctsio->cdb;
661 retval = CTL_RETVAL_COMPLETE;
662
663 list_id = scsi_4btoul(cdb->list_identifier);
664 mtx_lock(&lun->lun_lock);
665 list = tpc_find_list(lun, list_id,
666 ctl_get_initindex(&ctsio->io_hdr.nexus));
667 if (list == NULL) {
668 mtx_unlock(&lun->lun_lock);
669 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
670 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
671 /*bit*/ 0);
672 ctl_done((union ctl_io *)ctsio);
673 return (retval);
674 }
675 list_copy = *list;
676 if (list->completed) {
677 TAILQ_REMOVE(&lun->tpc_lists, list, links);
678 free(list, M_CTL);
679 }
680 mtx_unlock(&lun->lun_lock);
681
682 total_len = sizeof(*data) + list_copy.sense_len;
683 alloc_len = scsi_4btoul(cdb->length);
684
685 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
686 ctsio->kern_sg_entries = 0;
687 ctsio->kern_rel_offset = 0;
688 ctsio->kern_data_len = min(total_len, alloc_len);
689 ctsio->kern_total_len = ctsio->kern_data_len;
690
691 data = (struct scsi_receive_copy_status_lid4_data *)ctsio->kern_data_ptr;
692 scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len,
693 data->available_data);
694 data->response_to_service_action = list_copy.service_action;
695 if (list_copy.completed) {
696 if (list_copy.error)
697 data->copy_command_status = RCS_CCS_ERROR;
698 else if (list_copy.abort)
699 data->copy_command_status = RCS_CCS_ABORTED;
700 else
701 data->copy_command_status = RCS_CCS_COMPLETED;
702 } else
703 data->copy_command_status = RCS_CCS_INPROG_FG;
704 scsi_ulto2b(list_copy.curops, data->operation_counter);
705 scsi_ulto4b(UINT32_MAX, data->estimated_status_update_delay);
706 data->transfer_count_units = RCS_TC_BYTES;
707 scsi_u64to8b(list_copy.curbytes, data->transfer_count);
708 scsi_ulto2b(list_copy.curseg, data->segments_processed);
709 data->length_of_the_sense_data_field = list_copy.sense_len;
710 data->sense_data_length = list_copy.sense_len;
711 memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len);
712
713 ctl_set_success(ctsio);
714 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
715 ctsio->be_move_done = ctl_config_move_done;
716 ctl_datamove((union ctl_io *)ctsio);
717 return (retval);
718 }
719
720 int
ctl_copy_operation_abort(struct ctl_scsiio * ctsio)721 ctl_copy_operation_abort(struct ctl_scsiio *ctsio)
722 {
723 struct ctl_lun *lun = CTL_LUN(ctsio);
724 struct scsi_copy_operation_abort *cdb;
725 struct tpc_list *list;
726 int retval;
727 uint32_t list_id;
728
729 CTL_DEBUG_PRINT(("ctl_copy_operation_abort\n"));
730
731 cdb = (struct scsi_copy_operation_abort *)ctsio->cdb;
732 retval = CTL_RETVAL_COMPLETE;
733
734 list_id = scsi_4btoul(cdb->list_identifier);
735 mtx_lock(&lun->lun_lock);
736 list = tpc_find_list(lun, list_id,
737 ctl_get_initindex(&ctsio->io_hdr.nexus));
738 if (list == NULL) {
739 mtx_unlock(&lun->lun_lock);
740 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
741 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
742 /*bit*/ 0);
743 ctl_done((union ctl_io *)ctsio);
744 return (retval);
745 }
746 list->abort = 1;
747 mtx_unlock(&lun->lun_lock);
748
749 ctl_set_success(ctsio);
750 ctl_done((union ctl_io *)ctsio);
751 return (retval);
752 }
753
754 static uint64_t
tpc_resolve(struct tpc_list * list,uint16_t idx,uint32_t * ss,uint32_t * pb,uint32_t * pbo)755 tpc_resolve(struct tpc_list *list, uint16_t idx, uint32_t *ss,
756 uint32_t *pb, uint32_t *pbo)
757 {
758
759 if (idx == 0xffff) {
760 if (ss)
761 *ss = list->lun->be_lun->blocksize;
762 if (pb)
763 *pb = list->lun->be_lun->blocksize <<
764 list->lun->be_lun->pblockexp;
765 if (pbo)
766 *pbo = list->lun->be_lun->blocksize *
767 list->lun->be_lun->pblockoff;
768 return (list->lun->lun);
769 }
770 if (idx >= list->ncscd)
771 return (UINT64_MAX);
772 return (tpcl_resolve(list->lun->ctl_softc,
773 list->init_port, &list->cscd[idx], ss, pb, pbo));
774 }
775
776 static void
tpc_set_io_error_sense(struct tpc_list * list)777 tpc_set_io_error_sense(struct tpc_list *list)
778 {
779 int flen;
780 uint8_t csi[4];
781 uint8_t sks[3];
782 uint8_t fbuf[4 + 64];
783
784 scsi_ulto4b(list->curseg, csi);
785 if (list->fwd_cscd <= 0x07ff) {
786 sks[0] = SSD_SKS_SEGMENT_VALID;
787 scsi_ulto2b((uint8_t *)&list->cscd[list->fwd_cscd] -
788 list->params, &sks[1]);
789 } else
790 sks[0] = 0;
791 if (list->fwd_scsi_status) {
792 fbuf[0] = 0x0c;
793 fbuf[2] = list->fwd_target;
794 flen = list->fwd_sense_len;
795 if (flen > 64) {
796 flen = 64;
797 fbuf[2] |= SSD_FORWARDED_FSDT;
798 }
799 fbuf[1] = 2 + flen;
800 fbuf[3] = list->fwd_scsi_status;
801 bcopy(&list->fwd_sense_data, &fbuf[4], flen);
802 flen += 4;
803 } else
804 flen = 0;
805 ctl_set_sense(list->ctsio, /*current_error*/ 1,
806 /*sense_key*/ SSD_KEY_COPY_ABORTED,
807 /*asc*/ 0x0d, /*ascq*/ 0x01,
808 SSD_ELEM_COMMAND, sizeof(csi), csi,
809 sks[0] ? SSD_ELEM_SKS : SSD_ELEM_SKIP, sizeof(sks), sks,
810 flen ? SSD_ELEM_DESC : SSD_ELEM_SKIP, flen, fbuf,
811 SSD_ELEM_NONE);
812 }
813
814 static int
tpc_process_b2b(struct tpc_list * list)815 tpc_process_b2b(struct tpc_list *list)
816 {
817 struct scsi_ec_segment_b2b *seg;
818 struct scsi_ec_cscd_dtsp *sdstp, *ddstp;
819 struct tpc_io *tior, *tiow;
820 struct runl run;
821 uint64_t sl, dl;
822 off_t srclba, dstlba, numbytes, donebytes, roundbytes;
823 int numlba;
824 uint32_t srcblock, dstblock, pb, pbo, adj;
825 uint16_t scscd, dcscd;
826 uint8_t csi[4];
827
828 scsi_ulto4b(list->curseg, csi);
829 if (list->stage == 1) {
830 while ((tior = TAILQ_FIRST(&list->allio)) != NULL) {
831 TAILQ_REMOVE(&list->allio, tior, links);
832 ctl_free_io(tior->io);
833 free(tior->buf, M_CTL);
834 free(tior, M_CTL);
835 }
836 if (list->abort) {
837 ctl_set_task_aborted(list->ctsio);
838 return (CTL_RETVAL_ERROR);
839 } else if (list->error) {
840 tpc_set_io_error_sense(list);
841 return (CTL_RETVAL_ERROR);
842 }
843 list->cursectors += list->segsectors;
844 list->curbytes += list->segbytes;
845 return (CTL_RETVAL_COMPLETE);
846 }
847
848 TAILQ_INIT(&list->allio);
849 seg = (struct scsi_ec_segment_b2b *)list->seg[list->curseg];
850 scscd = scsi_2btoul(seg->src_cscd);
851 dcscd = scsi_2btoul(seg->dst_cscd);
852 sl = tpc_resolve(list, scscd, &srcblock, NULL, NULL);
853 dl = tpc_resolve(list, dcscd, &dstblock, &pb, &pbo);
854 if (sl == UINT64_MAX || dl == UINT64_MAX) {
855 ctl_set_sense(list->ctsio, /*current_error*/ 1,
856 /*sense_key*/ SSD_KEY_COPY_ABORTED,
857 /*asc*/ 0x08, /*ascq*/ 0x04,
858 SSD_ELEM_COMMAND, sizeof(csi), csi,
859 SSD_ELEM_NONE);
860 return (CTL_RETVAL_ERROR);
861 }
862 if (pbo > 0)
863 pbo = pb - pbo;
864 sdstp = &list->cscd[scscd].dtsp;
865 if (scsi_3btoul(sdstp->block_length) != 0)
866 srcblock = scsi_3btoul(sdstp->block_length);
867 ddstp = &list->cscd[dcscd].dtsp;
868 if (scsi_3btoul(ddstp->block_length) != 0)
869 dstblock = scsi_3btoul(ddstp->block_length);
870 numlba = scsi_2btoul(seg->number_of_blocks);
871 if (seg->flags & EC_SEG_DC)
872 numbytes = (off_t)numlba * dstblock;
873 else
874 numbytes = (off_t)numlba * srcblock;
875 srclba = scsi_8btou64(seg->src_lba);
876 dstlba = scsi_8btou64(seg->dst_lba);
877
878 // printf("Copy %ju bytes from %ju @ %ju to %ju @ %ju\n",
879 // (uintmax_t)numbytes, sl, scsi_8btou64(seg->src_lba),
880 // dl, scsi_8btou64(seg->dst_lba));
881
882 if (numbytes == 0)
883 return (CTL_RETVAL_COMPLETE);
884
885 if (numbytes % srcblock != 0 || numbytes % dstblock != 0) {
886 ctl_set_sense(list->ctsio, /*current_error*/ 1,
887 /*sense_key*/ SSD_KEY_COPY_ABORTED,
888 /*asc*/ 0x26, /*ascq*/ 0x0A,
889 SSD_ELEM_COMMAND, sizeof(csi), csi,
890 SSD_ELEM_NONE);
891 return (CTL_RETVAL_ERROR);
892 }
893
894 list->segbytes = numbytes;
895 list->segsectors = numbytes / dstblock;
896 donebytes = 0;
897 TAILQ_INIT(&run);
898 list->tbdio = 0;
899 while (donebytes < numbytes) {
900 roundbytes = numbytes - donebytes;
901 if (roundbytes > TPC_MAX_IO_SIZE) {
902 roundbytes = TPC_MAX_IO_SIZE;
903 roundbytes -= roundbytes % dstblock;
904 if (pb > dstblock) {
905 adj = (dstlba * dstblock + roundbytes - pbo) % pb;
906 if (roundbytes > adj)
907 roundbytes -= adj;
908 }
909 }
910
911 tior = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO);
912 TAILQ_INIT(&tior->run);
913 tior->buf = malloc(roundbytes, M_CTL, M_WAITOK);
914 tior->list = list;
915 TAILQ_INSERT_TAIL(&list->allio, tior, links);
916 tior->io = tpcl_alloc_io();
917 ctl_scsi_read_write(tior->io,
918 /*data_ptr*/ tior->buf,
919 /*data_len*/ roundbytes,
920 /*read_op*/ 1,
921 /*byte2*/ 0,
922 /*minimum_cdb_size*/ 0,
923 /*lba*/ srclba,
924 /*num_blocks*/ roundbytes / srcblock,
925 /*tag_type*/ CTL_TAG_SIMPLE,
926 /*control*/ 0);
927 tior->io->io_hdr.retries = 3;
928 tior->target = SSD_FORWARDED_SDS_EXSRC;
929 tior->cscd = scscd;
930 tior->lun = sl;
931 tior->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tior;
932
933 tiow = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO);
934 TAILQ_INIT(&tiow->run);
935 tiow->list = list;
936 TAILQ_INSERT_TAIL(&list->allio, tiow, links);
937 tiow->io = tpcl_alloc_io();
938 ctl_scsi_read_write(tiow->io,
939 /*data_ptr*/ tior->buf,
940 /*data_len*/ roundbytes,
941 /*read_op*/ 0,
942 /*byte2*/ 0,
943 /*minimum_cdb_size*/ 0,
944 /*lba*/ dstlba,
945 /*num_blocks*/ roundbytes / dstblock,
946 /*tag_type*/ CTL_TAG_SIMPLE,
947 /*control*/ 0);
948 tiow->io->io_hdr.retries = 3;
949 tiow->target = SSD_FORWARDED_SDS_EXDST;
950 tiow->cscd = dcscd;
951 tiow->lun = dl;
952 tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow;
953
954 TAILQ_INSERT_TAIL(&tior->run, tiow, rlinks);
955 TAILQ_INSERT_TAIL(&run, tior, rlinks);
956 list->tbdio++;
957 donebytes += roundbytes;
958 srclba += roundbytes / srcblock;
959 dstlba += roundbytes / dstblock;
960 }
961
962 while ((tior = TAILQ_FIRST(&run)) != NULL) {
963 TAILQ_REMOVE(&run, tior, rlinks);
964 if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE)
965 panic("tpcl_queue() error");
966 }
967
968 list->stage++;
969 return (CTL_RETVAL_QUEUED);
970 }
971
972 static int
tpc_process_verify(struct tpc_list * list)973 tpc_process_verify(struct tpc_list *list)
974 {
975 struct scsi_ec_segment_verify *seg;
976 struct tpc_io *tio;
977 uint64_t sl;
978 uint16_t cscd;
979 uint8_t csi[4];
980
981 scsi_ulto4b(list->curseg, csi);
982 if (list->stage == 1) {
983 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
984 TAILQ_REMOVE(&list->allio, tio, links);
985 ctl_free_io(tio->io);
986 free(tio, M_CTL);
987 }
988 if (list->abort) {
989 ctl_set_task_aborted(list->ctsio);
990 return (CTL_RETVAL_ERROR);
991 } else if (list->error) {
992 tpc_set_io_error_sense(list);
993 return (CTL_RETVAL_ERROR);
994 } else
995 return (CTL_RETVAL_COMPLETE);
996 }
997
998 TAILQ_INIT(&list->allio);
999 seg = (struct scsi_ec_segment_verify *)list->seg[list->curseg];
1000 cscd = scsi_2btoul(seg->src_cscd);
1001 sl = tpc_resolve(list, cscd, NULL, NULL, NULL);
1002 if (sl == UINT64_MAX) {
1003 ctl_set_sense(list->ctsio, /*current_error*/ 1,
1004 /*sense_key*/ SSD_KEY_COPY_ABORTED,
1005 /*asc*/ 0x08, /*ascq*/ 0x04,
1006 SSD_ELEM_COMMAND, sizeof(csi), csi,
1007 SSD_ELEM_NONE);
1008 return (CTL_RETVAL_ERROR);
1009 }
1010
1011 // printf("Verify %ju\n", sl);
1012
1013 if ((seg->tur & 0x01) == 0)
1014 return (CTL_RETVAL_COMPLETE);
1015
1016 list->tbdio = 1;
1017 tio = malloc(sizeof(*tio), M_CTL, M_WAITOK | M_ZERO);
1018 TAILQ_INIT(&tio->run);
1019 tio->list = list;
1020 TAILQ_INSERT_TAIL(&list->allio, tio, links);
1021 tio->io = tpcl_alloc_io();
1022 ctl_scsi_tur(tio->io, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0);
1023 tio->io->io_hdr.retries = 3;
1024 tio->target = SSD_FORWARDED_SDS_EXSRC;
1025 tio->cscd = cscd;
1026 tio->lun = sl;
1027 tio->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tio;
1028 list->stage++;
1029 if (tpcl_queue(tio->io, tio->lun) != CTL_RETVAL_COMPLETE)
1030 panic("tpcl_queue() error");
1031 return (CTL_RETVAL_QUEUED);
1032 }
1033
1034 static int
tpc_process_register_key(struct tpc_list * list)1035 tpc_process_register_key(struct tpc_list *list)
1036 {
1037 struct scsi_ec_segment_register_key *seg;
1038 struct tpc_io *tio;
1039 uint64_t dl;
1040 int datalen;
1041 uint16_t cscd;
1042 uint8_t csi[4];
1043
1044 scsi_ulto4b(list->curseg, csi);
1045 if (list->stage == 1) {
1046 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
1047 TAILQ_REMOVE(&list->allio, tio, links);
1048 ctl_free_io(tio->io);
1049 free(tio->buf, M_CTL);
1050 free(tio, M_CTL);
1051 }
1052 if (list->abort) {
1053 ctl_set_task_aborted(list->ctsio);
1054 return (CTL_RETVAL_ERROR);
1055 } else if (list->error) {
1056 tpc_set_io_error_sense(list);
1057 return (CTL_RETVAL_ERROR);
1058 } else
1059 return (CTL_RETVAL_COMPLETE);
1060 }
1061
1062 TAILQ_INIT(&list->allio);
1063 seg = (struct scsi_ec_segment_register_key *)list->seg[list->curseg];
1064 cscd = scsi_2btoul(seg->dst_cscd);
1065 dl = tpc_resolve(list, cscd, NULL, NULL, NULL);
1066 if (dl == UINT64_MAX) {
1067 ctl_set_sense(list->ctsio, /*current_error*/ 1,
1068 /*sense_key*/ SSD_KEY_COPY_ABORTED,
1069 /*asc*/ 0x08, /*ascq*/ 0x04,
1070 SSD_ELEM_COMMAND, sizeof(csi), csi,
1071 SSD_ELEM_NONE);
1072 return (CTL_RETVAL_ERROR);
1073 }
1074
1075 // printf("Register Key %ju\n", dl);
1076
1077 list->tbdio = 1;
1078 tio = malloc(sizeof(*tio), M_CTL, M_WAITOK | M_ZERO);
1079 TAILQ_INIT(&tio->run);
1080 tio->list = list;
1081 TAILQ_INSERT_TAIL(&list->allio, tio, links);
1082 tio->io = tpcl_alloc_io();
1083 datalen = sizeof(struct scsi_per_res_out_parms);
1084 tio->buf = malloc(datalen, M_CTL, M_WAITOK);
1085 ctl_scsi_persistent_res_out(tio->io,
1086 tio->buf, datalen, SPRO_REGISTER, -1,
1087 scsi_8btou64(seg->res_key), scsi_8btou64(seg->sa_res_key),
1088 /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0);
1089 tio->io->io_hdr.retries = 3;
1090 tio->target = SSD_FORWARDED_SDS_EXDST;
1091 tio->cscd = cscd;
1092 tio->lun = dl;
1093 tio->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tio;
1094 list->stage++;
1095 if (tpcl_queue(tio->io, tio->lun) != CTL_RETVAL_COMPLETE)
1096 panic("tpcl_queue() error");
1097 return (CTL_RETVAL_QUEUED);
1098 }
1099
1100 static off_t
tpc_ranges_length(struct scsi_range_desc * range,int nrange)1101 tpc_ranges_length(struct scsi_range_desc *range, int nrange)
1102 {
1103 off_t length = 0;
1104 int r;
1105
1106 for (r = 0; r < nrange; r++)
1107 length += scsi_4btoul(range[r].length);
1108 return (length);
1109 }
1110
1111 static int
tpc_check_ranges_l(struct scsi_range_desc * range,int nrange,uint64_t maxlba,uint64_t * lba)1112 tpc_check_ranges_l(struct scsi_range_desc *range, int nrange, uint64_t maxlba,
1113 uint64_t *lba)
1114 {
1115 uint64_t b1;
1116 uint32_t l1;
1117 int i;
1118
1119 for (i = 0; i < nrange; i++) {
1120 b1 = scsi_8btou64(range[i].lba);
1121 l1 = scsi_4btoul(range[i].length);
1122 if (b1 + l1 < b1 || b1 + l1 > maxlba + 1) {
1123 *lba = MAX(b1, maxlba + 1);
1124 return (-1);
1125 }
1126 }
1127 return (0);
1128 }
1129
1130 static int
tpc_check_ranges_x(struct scsi_range_desc * range,int nrange)1131 tpc_check_ranges_x(struct scsi_range_desc *range, int nrange)
1132 {
1133 uint64_t b1, b2;
1134 uint32_t l1, l2;
1135 int i, j;
1136
1137 for (i = 0; i < nrange - 1; i++) {
1138 b1 = scsi_8btou64(range[i].lba);
1139 l1 = scsi_4btoul(range[i].length);
1140 for (j = i + 1; j < nrange; j++) {
1141 b2 = scsi_8btou64(range[j].lba);
1142 l2 = scsi_4btoul(range[j].length);
1143 if (b1 + l1 > b2 && b2 + l2 > b1)
1144 return (-1);
1145 }
1146 }
1147 return (0);
1148 }
1149
1150 static int
tpc_skip_ranges(struct scsi_range_desc * range,int nrange,off_t skip,int * srange,off_t * soffset)1151 tpc_skip_ranges(struct scsi_range_desc *range, int nrange, off_t skip,
1152 int *srange, off_t *soffset)
1153 {
1154 off_t off;
1155 int r;
1156
1157 r = 0;
1158 off = 0;
1159 while (r < nrange) {
1160 if (skip - off < scsi_4btoul(range[r].length)) {
1161 *srange = r;
1162 *soffset = skip - off;
1163 return (0);
1164 }
1165 off += scsi_4btoul(range[r].length);
1166 r++;
1167 }
1168 return (-1);
1169 }
1170
1171 static int
tpc_process_wut(struct tpc_list * list)1172 tpc_process_wut(struct tpc_list *list)
1173 {
1174 struct tpc_io *tio, *tior, *tiow;
1175 struct runl run;
1176 int drange, srange;
1177 off_t doffset, soffset;
1178 off_t srclba, dstlba, numbytes, donebytes, roundbytes;
1179 uint32_t srcblock, dstblock, pb, pbo, adj;
1180
1181 if (list->stage > 0) {
1182 /* Cleanup after previous rounds. */
1183 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
1184 TAILQ_REMOVE(&list->allio, tio, links);
1185 ctl_free_io(tio->io);
1186 free(tio->buf, M_CTL);
1187 free(tio, M_CTL);
1188 }
1189 if (list->abort) {
1190 ctl_set_task_aborted(list->ctsio);
1191 return (CTL_RETVAL_ERROR);
1192 } else if (list->error) {
1193 if (list->fwd_scsi_status) {
1194 list->ctsio->io_hdr.status =
1195 CTL_SCSI_ERROR | CTL_AUTOSENSE;
1196 list->ctsio->scsi_status = list->fwd_scsi_status;
1197 list->ctsio->sense_data = list->fwd_sense_data;
1198 list->ctsio->sense_len = list->fwd_sense_len;
1199 } else {
1200 ctl_set_invalid_field(list->ctsio,
1201 /*sks_valid*/ 0, /*command*/ 0,
1202 /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0);
1203 }
1204 return (CTL_RETVAL_ERROR);
1205 }
1206 list->cursectors += list->segsectors;
1207 list->curbytes += list->segbytes;
1208 }
1209
1210 /* Check where we are on destination ranges list. */
1211 if (tpc_skip_ranges(list->range, list->nrange, list->cursectors,
1212 &drange, &doffset) != 0)
1213 return (CTL_RETVAL_COMPLETE);
1214 dstblock = list->lun->be_lun->blocksize;
1215 pb = dstblock << list->lun->be_lun->pblockexp;
1216 if (list->lun->be_lun->pblockoff > 0)
1217 pbo = pb - dstblock * list->lun->be_lun->pblockoff;
1218 else
1219 pbo = 0;
1220
1221 /* Check where we are on source ranges list. */
1222 srcblock = list->token->blocksize;
1223 if (tpc_skip_ranges(list->token->range, list->token->nrange,
1224 list->offset_into_rod + list->cursectors * dstblock / srcblock,
1225 &srange, &soffset) != 0) {
1226 ctl_set_invalid_field(list->ctsio, /*sks_valid*/ 0,
1227 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0);
1228 return (CTL_RETVAL_ERROR);
1229 }
1230
1231 srclba = scsi_8btou64(list->token->range[srange].lba) + soffset;
1232 dstlba = scsi_8btou64(list->range[drange].lba) + doffset;
1233 numbytes = srcblock *
1234 (scsi_4btoul(list->token->range[srange].length) - soffset);
1235 numbytes = omin(numbytes, dstblock *
1236 (scsi_4btoul(list->range[drange].length) - doffset));
1237 if (numbytes > TPC_MAX_IOCHUNK_SIZE) {
1238 numbytes = TPC_MAX_IOCHUNK_SIZE;
1239 numbytes -= numbytes % dstblock;
1240 if (pb > dstblock) {
1241 adj = (dstlba * dstblock + numbytes - pbo) % pb;
1242 if (numbytes > adj)
1243 numbytes -= adj;
1244 }
1245 }
1246
1247 if (numbytes % srcblock != 0 || numbytes % dstblock != 0) {
1248 ctl_set_invalid_field(list->ctsio, /*sks_valid*/ 0,
1249 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0);
1250 return (CTL_RETVAL_ERROR);
1251 }
1252
1253 list->segbytes = numbytes;
1254 list->segsectors = numbytes / dstblock;
1255 //printf("Copy chunk of %ju sectors from %ju to %ju\n", list->segsectors,
1256 // srclba, dstlba);
1257 donebytes = 0;
1258 TAILQ_INIT(&run);
1259 list->tbdio = 0;
1260 TAILQ_INIT(&list->allio);
1261 while (donebytes < numbytes) {
1262 roundbytes = numbytes - donebytes;
1263 if (roundbytes > TPC_MAX_IO_SIZE) {
1264 roundbytes = TPC_MAX_IO_SIZE;
1265 roundbytes -= roundbytes % dstblock;
1266 if (pb > dstblock) {
1267 adj = (dstlba * dstblock + roundbytes - pbo) % pb;
1268 if (roundbytes > adj)
1269 roundbytes -= adj;
1270 }
1271 }
1272
1273 tior = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO);
1274 TAILQ_INIT(&tior->run);
1275 tior->buf = malloc(roundbytes, M_CTL, M_WAITOK);
1276 tior->list = list;
1277 TAILQ_INSERT_TAIL(&list->allio, tior, links);
1278 tior->io = tpcl_alloc_io();
1279 ctl_scsi_read_write(tior->io,
1280 /*data_ptr*/ tior->buf,
1281 /*data_len*/ roundbytes,
1282 /*read_op*/ 1,
1283 /*byte2*/ 0,
1284 /*minimum_cdb_size*/ 0,
1285 /*lba*/ srclba,
1286 /*num_blocks*/ roundbytes / srcblock,
1287 /*tag_type*/ CTL_TAG_SIMPLE,
1288 /*control*/ 0);
1289 tior->io->io_hdr.retries = 3;
1290 tior->lun = list->token->lun;
1291 tior->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tior;
1292
1293 tiow = malloc(sizeof(*tiow), M_CTL, M_WAITOK | M_ZERO);
1294 TAILQ_INIT(&tiow->run);
1295 tiow->list = list;
1296 TAILQ_INSERT_TAIL(&list->allio, tiow, links);
1297 tiow->io = tpcl_alloc_io();
1298 ctl_scsi_read_write(tiow->io,
1299 /*data_ptr*/ tior->buf,
1300 /*data_len*/ roundbytes,
1301 /*read_op*/ 0,
1302 /*byte2*/ 0,
1303 /*minimum_cdb_size*/ 0,
1304 /*lba*/ dstlba,
1305 /*num_blocks*/ roundbytes / dstblock,
1306 /*tag_type*/ CTL_TAG_SIMPLE,
1307 /*control*/ 0);
1308 tiow->io->io_hdr.retries = 3;
1309 tiow->lun = list->lun->lun;
1310 tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow;
1311
1312 TAILQ_INSERT_TAIL(&tior->run, tiow, rlinks);
1313 TAILQ_INSERT_TAIL(&run, tior, rlinks);
1314 list->tbdio++;
1315 donebytes += roundbytes;
1316 srclba += roundbytes / srcblock;
1317 dstlba += roundbytes / dstblock;
1318 }
1319
1320 while ((tior = TAILQ_FIRST(&run)) != NULL) {
1321 TAILQ_REMOVE(&run, tior, rlinks);
1322 if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE)
1323 panic("tpcl_queue() error");
1324 }
1325
1326 list->stage++;
1327 return (CTL_RETVAL_QUEUED);
1328 }
1329
1330 static int
tpc_process_zero_wut(struct tpc_list * list)1331 tpc_process_zero_wut(struct tpc_list *list)
1332 {
1333 struct tpc_io *tio, *tiow;
1334 struct runl run, *prun;
1335 int r;
1336 uint32_t dstblock, len;
1337
1338 if (list->stage > 0) {
1339 complete:
1340 /* Cleanup after previous rounds. */
1341 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
1342 TAILQ_REMOVE(&list->allio, tio, links);
1343 ctl_free_io(tio->io);
1344 free(tio, M_CTL);
1345 }
1346 if (list->abort) {
1347 ctl_set_task_aborted(list->ctsio);
1348 return (CTL_RETVAL_ERROR);
1349 } else if (list->error) {
1350 if (list->fwd_scsi_status) {
1351 list->ctsio->io_hdr.status =
1352 CTL_SCSI_ERROR | CTL_AUTOSENSE;
1353 list->ctsio->scsi_status = list->fwd_scsi_status;
1354 list->ctsio->sense_data = list->fwd_sense_data;
1355 list->ctsio->sense_len = list->fwd_sense_len;
1356 } else {
1357 ctl_set_invalid_field(list->ctsio,
1358 /*sks_valid*/ 0, /*command*/ 0,
1359 /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0);
1360 }
1361 return (CTL_RETVAL_ERROR);
1362 }
1363 list->cursectors += list->segsectors;
1364 list->curbytes += list->segbytes;
1365 return (CTL_RETVAL_COMPLETE);
1366 }
1367
1368 dstblock = list->lun->be_lun->blocksize;
1369 TAILQ_INIT(&run);
1370 prun = &run;
1371 list->tbdio = 1;
1372 TAILQ_INIT(&list->allio);
1373 list->segsectors = 0;
1374 for (r = 0; r < list->nrange; r++) {
1375 len = scsi_4btoul(list->range[r].length);
1376 if (len == 0)
1377 continue;
1378
1379 tiow = malloc(sizeof(*tiow), M_CTL, M_WAITOK | M_ZERO);
1380 TAILQ_INIT(&tiow->run);
1381 tiow->list = list;
1382 TAILQ_INSERT_TAIL(&list->allio, tiow, links);
1383 tiow->io = tpcl_alloc_io();
1384 ctl_scsi_write_same(tiow->io,
1385 /*data_ptr*/ NULL,
1386 /*data_len*/ 0,
1387 /*byte2*/ SWS_NDOB,
1388 /*lba*/ scsi_8btou64(list->range[r].lba),
1389 /*num_blocks*/ len,
1390 /*tag_type*/ CTL_TAG_SIMPLE,
1391 /*control*/ 0);
1392 tiow->io->io_hdr.retries = 3;
1393 tiow->lun = list->lun->lun;
1394 tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow;
1395
1396 TAILQ_INSERT_TAIL(prun, tiow, rlinks);
1397 prun = &tiow->run;
1398 list->segsectors += len;
1399 }
1400 list->segbytes = list->segsectors * dstblock;
1401
1402 if (TAILQ_EMPTY(&run))
1403 goto complete;
1404
1405 while ((tiow = TAILQ_FIRST(&run)) != NULL) {
1406 TAILQ_REMOVE(&run, tiow, rlinks);
1407 if (tpcl_queue(tiow->io, tiow->lun) != CTL_RETVAL_COMPLETE)
1408 panic("tpcl_queue() error");
1409 }
1410
1411 list->stage++;
1412 return (CTL_RETVAL_QUEUED);
1413 }
1414
1415 static void
tpc_process(struct tpc_list * list)1416 tpc_process(struct tpc_list *list)
1417 {
1418 struct ctl_lun *lun = list->lun;
1419 struct ctl_softc *softc = lun->ctl_softc;
1420 struct scsi_ec_segment *seg;
1421 struct ctl_scsiio *ctsio = list->ctsio;
1422 int retval = CTL_RETVAL_COMPLETE;
1423 uint8_t csi[4];
1424
1425 if (list->service_action == EC_WUT) {
1426 if (list->token != NULL)
1427 retval = tpc_process_wut(list);
1428 else
1429 retval = tpc_process_zero_wut(list);
1430 if (retval == CTL_RETVAL_QUEUED)
1431 return;
1432 if (retval == CTL_RETVAL_ERROR) {
1433 list->error = 1;
1434 goto done;
1435 }
1436 } else {
1437 //printf("ZZZ %d cscd, %d segs\n", list->ncscd, list->nseg);
1438 while (list->curseg < list->nseg) {
1439 seg = list->seg[list->curseg];
1440 switch (seg->type_code) {
1441 case EC_SEG_B2B:
1442 retval = tpc_process_b2b(list);
1443 break;
1444 case EC_SEG_VERIFY:
1445 retval = tpc_process_verify(list);
1446 break;
1447 case EC_SEG_REGISTER_KEY:
1448 retval = tpc_process_register_key(list);
1449 break;
1450 default:
1451 scsi_ulto4b(list->curseg, csi);
1452 ctl_set_sense(ctsio, /*current_error*/ 1,
1453 /*sense_key*/ SSD_KEY_COPY_ABORTED,
1454 /*asc*/ 0x26, /*ascq*/ 0x09,
1455 SSD_ELEM_COMMAND, sizeof(csi), csi,
1456 SSD_ELEM_NONE);
1457 goto done;
1458 }
1459 if (retval == CTL_RETVAL_QUEUED)
1460 return;
1461 if (retval == CTL_RETVAL_ERROR) {
1462 list->error = 1;
1463 goto done;
1464 }
1465 list->curseg++;
1466 list->stage = 0;
1467 }
1468 }
1469
1470 ctl_set_success(ctsio);
1471
1472 done:
1473 //printf("ZZZ done\n");
1474 free(list->params, M_CTL);
1475 list->params = NULL;
1476 if (list->token) {
1477 mtx_lock(&softc->tpc_lock);
1478 if (--list->token->active == 0)
1479 list->token->last_active = time_uptime;
1480 mtx_unlock(&softc->tpc_lock);
1481 list->token = NULL;
1482 }
1483 mtx_lock(&lun->lun_lock);
1484 if ((list->flags & EC_LIST_ID_USAGE_MASK) == EC_LIST_ID_USAGE_NONE) {
1485 TAILQ_REMOVE(&lun->tpc_lists, list, links);
1486 free(list, M_CTL);
1487 } else {
1488 list->completed = 1;
1489 list->last_active = time_uptime;
1490 list->sense_data = ctsio->sense_data;
1491 list->sense_len = ctsio->sense_len;
1492 list->scsi_status = ctsio->scsi_status;
1493 }
1494 mtx_unlock(&lun->lun_lock);
1495
1496 ctl_done((union ctl_io *)ctsio);
1497 }
1498
1499 /*
1500 * For any sort of check condition, busy, etc., we just retry. We do not
1501 * decrement the retry count for unit attention type errors. These are
1502 * normal, and we want to save the retry count for "real" errors. Otherwise,
1503 * we could end up with situations where a command will succeed in some
1504 * situations and fail in others, depending on whether a unit attention is
1505 * pending. Also, some of our error recovery actions, most notably the
1506 * LUN reset action, will cause a unit attention.
1507 *
1508 * We can add more detail here later if necessary.
1509 */
1510 static tpc_error_action
tpc_checkcond_parse(union ctl_io * io)1511 tpc_checkcond_parse(union ctl_io *io)
1512 {
1513 tpc_error_action error_action;
1514 int error_code, sense_key, asc, ascq;
1515
1516 /*
1517 * Default to retrying the command.
1518 */
1519 error_action = TPC_ERR_RETRY;
1520
1521 scsi_extract_sense_len(&io->scsiio.sense_data,
1522 io->scsiio.sense_len,
1523 &error_code,
1524 &sense_key,
1525 &asc,
1526 &ascq,
1527 /*show_errors*/ 1);
1528
1529 switch (error_code) {
1530 case SSD_DEFERRED_ERROR:
1531 case SSD_DESC_DEFERRED_ERROR:
1532 error_action |= TPC_ERR_NO_DECREMENT;
1533 break;
1534 case SSD_CURRENT_ERROR:
1535 case SSD_DESC_CURRENT_ERROR:
1536 default:
1537 switch (sense_key) {
1538 case SSD_KEY_UNIT_ATTENTION:
1539 error_action |= TPC_ERR_NO_DECREMENT;
1540 break;
1541 case SSD_KEY_HARDWARE_ERROR:
1542 /*
1543 * This is our generic "something bad happened"
1544 * error code. It often isn't recoverable.
1545 */
1546 if ((asc == 0x44) && (ascq == 0x00))
1547 error_action = TPC_ERR_FAIL;
1548 break;
1549 case SSD_KEY_NOT_READY:
1550 /*
1551 * If the LUN is powered down, there likely isn't
1552 * much point in retrying right now.
1553 */
1554 if ((asc == 0x04) && (ascq == 0x02))
1555 error_action = TPC_ERR_FAIL;
1556 /*
1557 * If the LUN is offline, there probably isn't much
1558 * point in retrying, either.
1559 */
1560 if ((asc == 0x04) && (ascq == 0x03))
1561 error_action = TPC_ERR_FAIL;
1562 break;
1563 }
1564 }
1565 return (error_action);
1566 }
1567
1568 static tpc_error_action
tpc_error_parse(union ctl_io * io)1569 tpc_error_parse(union ctl_io *io)
1570 {
1571 tpc_error_action error_action = TPC_ERR_RETRY;
1572
1573 switch (io->io_hdr.io_type) {
1574 case CTL_IO_SCSI:
1575 switch (io->io_hdr.status & CTL_STATUS_MASK) {
1576 case CTL_SCSI_ERROR:
1577 switch (io->scsiio.scsi_status) {
1578 case SCSI_STATUS_CHECK_COND:
1579 error_action = tpc_checkcond_parse(io);
1580 break;
1581 default:
1582 break;
1583 }
1584 break;
1585 default:
1586 break;
1587 }
1588 break;
1589 case CTL_IO_TASK:
1590 break;
1591 default:
1592 panic("%s: invalid ctl_io type %d\n", __func__,
1593 io->io_hdr.io_type);
1594 break;
1595 }
1596 return (error_action);
1597 }
1598
1599 void
tpc_done(union ctl_io * io)1600 tpc_done(union ctl_io *io)
1601 {
1602 struct tpc_io *tio, *tior;
1603
1604 /*
1605 * Very minimal retry logic. We basically retry if we got an error
1606 * back, and the retry count is greater than 0. If we ever want
1607 * more sophisticated initiator type behavior, the CAM error
1608 * recovery code in ../common might be helpful.
1609 */
1610 tio = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
1611 if (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
1612 && (io->io_hdr.retries > 0)) {
1613 ctl_io_status old_status;
1614 tpc_error_action error_action;
1615
1616 error_action = tpc_error_parse(io);
1617 switch (error_action & TPC_ERR_MASK) {
1618 case TPC_ERR_FAIL:
1619 break;
1620 case TPC_ERR_RETRY:
1621 default:
1622 if ((error_action & TPC_ERR_NO_DECREMENT) == 0)
1623 io->io_hdr.retries--;
1624 old_status = io->io_hdr.status;
1625 io->io_hdr.status = CTL_STATUS_NONE;
1626 io->io_hdr.flags &= ~CTL_FLAG_ABORT;
1627 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC;
1628 if (tpcl_queue(io, tio->lun) != CTL_RETVAL_COMPLETE) {
1629 printf("%s: error returned from tpcl_queue()!\n",
1630 __func__);
1631 io->io_hdr.status = old_status;
1632 } else
1633 return;
1634 }
1635 }
1636
1637 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) {
1638 tio->list->error = 1;
1639 if (io->io_hdr.io_type == CTL_IO_SCSI &&
1640 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SCSI_ERROR) {
1641 tio->list->fwd_scsi_status = io->scsiio.scsi_status;
1642 tio->list->fwd_sense_data = io->scsiio.sense_data;
1643 tio->list->fwd_sense_len = io->scsiio.sense_len;
1644 tio->list->fwd_target = tio->target;
1645 tio->list->fwd_cscd = tio->cscd;
1646 }
1647 } else
1648 atomic_add_int(&tio->list->curops, 1);
1649 if (!tio->list->error && !tio->list->abort) {
1650 while ((tior = TAILQ_FIRST(&tio->run)) != NULL) {
1651 TAILQ_REMOVE(&tio->run, tior, rlinks);
1652 atomic_add_int(&tio->list->tbdio, 1);
1653 if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE)
1654 panic("tpcl_queue() error");
1655 }
1656 }
1657 if (atomic_fetchadd_int(&tio->list->tbdio, -1) == 1)
1658 tpc_process(tio->list);
1659 }
1660
1661 int
ctl_extended_copy_lid1(struct ctl_scsiio * ctsio)1662 ctl_extended_copy_lid1(struct ctl_scsiio *ctsio)
1663 {
1664 struct ctl_lun *lun = CTL_LUN(ctsio);
1665 struct scsi_extended_copy *cdb;
1666 struct scsi_extended_copy_lid1_data *data;
1667 struct scsi_ec_cscd *cscd;
1668 struct scsi_ec_segment *seg;
1669 struct tpc_list *list, *tlist;
1670 uint8_t *ptr;
1671 const char *value;
1672 int len, off, lencscd, lenseg, leninl, nseg;
1673
1674 CTL_DEBUG_PRINT(("ctl_extended_copy_lid1\n"));
1675
1676 cdb = (struct scsi_extended_copy *)ctsio->cdb;
1677 len = scsi_4btoul(cdb->length);
1678
1679 if (len == 0) {
1680 ctl_set_success(ctsio);
1681 goto done;
1682 }
1683 if (len < sizeof(struct scsi_extended_copy_lid1_data) ||
1684 len > sizeof(struct scsi_extended_copy_lid1_data) +
1685 TPC_MAX_LIST + TPC_MAX_INLINE) {
1686 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
1687 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
1688 goto done;
1689 }
1690
1691 /*
1692 * If we've got a kernel request that hasn't been malloced yet,
1693 * malloc it and tell the caller the data buffer is here.
1694 */
1695 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
1696 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
1697 ctsio->kern_data_len = len;
1698 ctsio->kern_total_len = len;
1699 ctsio->kern_rel_offset = 0;
1700 ctsio->kern_sg_entries = 0;
1701 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
1702 ctsio->be_move_done = ctl_config_move_done;
1703 ctl_datamove((union ctl_io *)ctsio);
1704
1705 return (CTL_RETVAL_COMPLETE);
1706 }
1707
1708 data = (struct scsi_extended_copy_lid1_data *)ctsio->kern_data_ptr;
1709 lencscd = scsi_2btoul(data->cscd_list_length);
1710 lenseg = scsi_4btoul(data->segment_list_length);
1711 leninl = scsi_4btoul(data->inline_data_length);
1712 if (lencscd > TPC_MAX_CSCDS * sizeof(struct scsi_ec_cscd)) {
1713 ctl_set_sense(ctsio, /*current_error*/ 1,
1714 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1715 /*asc*/ 0x26, /*ascq*/ 0x06, SSD_ELEM_NONE);
1716 goto done;
1717 }
1718 if (lenseg > TPC_MAX_SEGS * sizeof(struct scsi_ec_segment)) {
1719 ctl_set_sense(ctsio, /*current_error*/ 1,
1720 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1721 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
1722 goto done;
1723 }
1724 if (lencscd + lenseg > TPC_MAX_LIST ||
1725 leninl > TPC_MAX_INLINE ||
1726 len < sizeof(struct scsi_extended_copy_lid1_data) +
1727 lencscd + lenseg + leninl) {
1728 ctl_set_param_len_error(ctsio);
1729 goto done;
1730 }
1731
1732 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
1733 list->service_action = cdb->service_action;
1734 value = dnvlist_get_string(lun->be_lun->options, "insecure_tpc", NULL);
1735 if (value != NULL && strcmp(value, "on") == 0)
1736 list->init_port = -1;
1737 else
1738 list->init_port = ctsio->io_hdr.nexus.targ_port;
1739 list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus);
1740 list->list_id = data->list_identifier;
1741 list->flags = data->flags;
1742 list->params = ctsio->kern_data_ptr;
1743 list->cscd = (struct scsi_ec_cscd *)&data->data[0];
1744 ptr = &data->data[0];
1745 for (off = 0; off < lencscd; off += sizeof(struct scsi_ec_cscd)) {
1746 cscd = (struct scsi_ec_cscd *)(ptr + off);
1747 if (cscd->type_code != EC_CSCD_ID) {
1748 free(list, M_CTL);
1749 ctl_set_sense(ctsio, /*current_error*/ 1,
1750 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1751 /*asc*/ 0x26, /*ascq*/ 0x07, SSD_ELEM_NONE);
1752 goto done;
1753 }
1754 }
1755 ptr = &data->data[lencscd];
1756 for (nseg = 0, off = 0; off < lenseg; nseg++) {
1757 if (nseg >= TPC_MAX_SEGS) {
1758 free(list, M_CTL);
1759 ctl_set_sense(ctsio, /*current_error*/ 1,
1760 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1761 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
1762 goto done;
1763 }
1764 seg = (struct scsi_ec_segment *)(ptr + off);
1765 if (seg->type_code != EC_SEG_B2B &&
1766 seg->type_code != EC_SEG_VERIFY &&
1767 seg->type_code != EC_SEG_REGISTER_KEY) {
1768 free(list, M_CTL);
1769 ctl_set_sense(ctsio, /*current_error*/ 1,
1770 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1771 /*asc*/ 0x26, /*ascq*/ 0x09, SSD_ELEM_NONE);
1772 goto done;
1773 }
1774 list->seg[nseg] = seg;
1775 off += sizeof(struct scsi_ec_segment) +
1776 scsi_2btoul(seg->descr_length);
1777 }
1778 list->inl = &data->data[lencscd + lenseg];
1779 list->ncscd = lencscd / sizeof(struct scsi_ec_cscd);
1780 list->nseg = nseg;
1781 list->leninl = leninl;
1782 list->ctsio = ctsio;
1783 list->lun = lun;
1784 mtx_lock(&lun->lun_lock);
1785 if ((list->flags & EC_LIST_ID_USAGE_MASK) != EC_LIST_ID_USAGE_NONE) {
1786 tlist = tpc_find_list(lun, list->list_id, list->init_idx);
1787 if (tlist != NULL && !tlist->completed) {
1788 mtx_unlock(&lun->lun_lock);
1789 free(list, M_CTL);
1790 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
1791 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
1792 /*bit*/ 0);
1793 goto done;
1794 }
1795 if (tlist != NULL) {
1796 TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
1797 free(tlist, M_CTL);
1798 }
1799 }
1800 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
1801 mtx_unlock(&lun->lun_lock);
1802
1803 tpc_process(list);
1804 return (CTL_RETVAL_COMPLETE);
1805
1806 done:
1807 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
1808 free(ctsio->kern_data_ptr, M_CTL);
1809 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
1810 }
1811 ctl_done((union ctl_io *)ctsio);
1812 return (CTL_RETVAL_COMPLETE);
1813 }
1814
1815 int
ctl_extended_copy_lid4(struct ctl_scsiio * ctsio)1816 ctl_extended_copy_lid4(struct ctl_scsiio *ctsio)
1817 {
1818 struct ctl_lun *lun = CTL_LUN(ctsio);
1819 struct scsi_extended_copy *cdb;
1820 struct scsi_extended_copy_lid4_data *data;
1821 struct scsi_ec_cscd *cscd;
1822 struct scsi_ec_segment *seg;
1823 struct tpc_list *list, *tlist;
1824 uint8_t *ptr;
1825 const char *value;
1826 int len, off, lencscd, lenseg, leninl, nseg;
1827
1828 CTL_DEBUG_PRINT(("ctl_extended_copy_lid4\n"));
1829
1830 cdb = (struct scsi_extended_copy *)ctsio->cdb;
1831 len = scsi_4btoul(cdb->length);
1832
1833 if (len == 0) {
1834 ctl_set_success(ctsio);
1835 goto done;
1836 }
1837 if (len < sizeof(struct scsi_extended_copy_lid4_data) ||
1838 len > sizeof(struct scsi_extended_copy_lid4_data) +
1839 TPC_MAX_LIST + TPC_MAX_INLINE) {
1840 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
1841 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
1842 goto done;
1843 }
1844
1845 /*
1846 * If we've got a kernel request that hasn't been malloced yet,
1847 * malloc it and tell the caller the data buffer is here.
1848 */
1849 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
1850 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
1851 ctsio->kern_data_len = len;
1852 ctsio->kern_total_len = len;
1853 ctsio->kern_rel_offset = 0;
1854 ctsio->kern_sg_entries = 0;
1855 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
1856 ctsio->be_move_done = ctl_config_move_done;
1857 ctl_datamove((union ctl_io *)ctsio);
1858
1859 return (CTL_RETVAL_COMPLETE);
1860 }
1861
1862 data = (struct scsi_extended_copy_lid4_data *)ctsio->kern_data_ptr;
1863 lencscd = scsi_2btoul(data->cscd_list_length);
1864 lenseg = scsi_2btoul(data->segment_list_length);
1865 leninl = scsi_2btoul(data->inline_data_length);
1866 if (lencscd > TPC_MAX_CSCDS * sizeof(struct scsi_ec_cscd)) {
1867 ctl_set_sense(ctsio, /*current_error*/ 1,
1868 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1869 /*asc*/ 0x26, /*ascq*/ 0x06, SSD_ELEM_NONE);
1870 goto done;
1871 }
1872 if (lenseg > TPC_MAX_SEGS * sizeof(struct scsi_ec_segment)) {
1873 ctl_set_sense(ctsio, /*current_error*/ 1,
1874 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1875 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
1876 goto done;
1877 }
1878 if (lencscd + lenseg > TPC_MAX_LIST ||
1879 leninl > TPC_MAX_INLINE ||
1880 len < sizeof(struct scsi_extended_copy_lid1_data) +
1881 lencscd + lenseg + leninl) {
1882 ctl_set_param_len_error(ctsio);
1883 goto done;
1884 }
1885
1886 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
1887 list->service_action = cdb->service_action;
1888 value = dnvlist_get_string(lun->be_lun->options, "insecure_tpc", NULL);
1889 if (value != NULL && strcmp(value, "on") == 0)
1890 list->init_port = -1;
1891 else
1892 list->init_port = ctsio->io_hdr.nexus.targ_port;
1893 list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus);
1894 list->list_id = scsi_4btoul(data->list_identifier);
1895 list->flags = data->flags;
1896 list->params = ctsio->kern_data_ptr;
1897 list->cscd = (struct scsi_ec_cscd *)&data->data[0];
1898 ptr = &data->data[0];
1899 for (off = 0; off < lencscd; off += sizeof(struct scsi_ec_cscd)) {
1900 cscd = (struct scsi_ec_cscd *)(ptr + off);
1901 if (cscd->type_code != EC_CSCD_ID) {
1902 free(list, M_CTL);
1903 ctl_set_sense(ctsio, /*current_error*/ 1,
1904 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1905 /*asc*/ 0x26, /*ascq*/ 0x07, SSD_ELEM_NONE);
1906 goto done;
1907 }
1908 }
1909 ptr = &data->data[lencscd];
1910 for (nseg = 0, off = 0; off < lenseg; nseg++) {
1911 if (nseg >= TPC_MAX_SEGS) {
1912 free(list, M_CTL);
1913 ctl_set_sense(ctsio, /*current_error*/ 1,
1914 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1915 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
1916 goto done;
1917 }
1918 seg = (struct scsi_ec_segment *)(ptr + off);
1919 if (seg->type_code != EC_SEG_B2B &&
1920 seg->type_code != EC_SEG_VERIFY &&
1921 seg->type_code != EC_SEG_REGISTER_KEY) {
1922 free(list, M_CTL);
1923 ctl_set_sense(ctsio, /*current_error*/ 1,
1924 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1925 /*asc*/ 0x26, /*ascq*/ 0x09, SSD_ELEM_NONE);
1926 goto done;
1927 }
1928 list->seg[nseg] = seg;
1929 off += sizeof(struct scsi_ec_segment) +
1930 scsi_2btoul(seg->descr_length);
1931 }
1932 list->inl = &data->data[lencscd + lenseg];
1933 list->ncscd = lencscd / sizeof(struct scsi_ec_cscd);
1934 list->nseg = nseg;
1935 list->leninl = leninl;
1936 list->ctsio = ctsio;
1937 list->lun = lun;
1938 mtx_lock(&lun->lun_lock);
1939 if ((list->flags & EC_LIST_ID_USAGE_MASK) != EC_LIST_ID_USAGE_NONE) {
1940 tlist = tpc_find_list(lun, list->list_id, list->init_idx);
1941 if (tlist != NULL && !tlist->completed) {
1942 mtx_unlock(&lun->lun_lock);
1943 free(list, M_CTL);
1944 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
1945 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
1946 /*bit*/ 0);
1947 goto done;
1948 }
1949 if (tlist != NULL) {
1950 TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
1951 free(tlist, M_CTL);
1952 }
1953 }
1954 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
1955 mtx_unlock(&lun->lun_lock);
1956
1957 tpc_process(list);
1958 return (CTL_RETVAL_COMPLETE);
1959
1960 done:
1961 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
1962 free(ctsio->kern_data_ptr, M_CTL);
1963 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
1964 }
1965 ctl_done((union ctl_io *)ctsio);
1966 return (CTL_RETVAL_COMPLETE);
1967 }
1968
1969 static void
tpc_create_token(struct ctl_lun * lun,struct ctl_port * port,off_t len,struct scsi_token * token)1970 tpc_create_token(struct ctl_lun *lun, struct ctl_port *port, off_t len,
1971 struct scsi_token *token)
1972 {
1973 static int id = 0;
1974 struct scsi_vpd_id_descriptor *idd = NULL;
1975 struct scsi_ec_cscd_id *cscd;
1976 struct scsi_read_capacity_data_long *dtsd;
1977 int targid_len;
1978
1979 scsi_ulto4b(ROD_TYPE_AUR, token->type);
1980 scsi_ulto2b(0x01f8, token->length);
1981 scsi_u64to8b(atomic_fetchadd_int(&id, 1), &token->body[0]);
1982 if (lun->lun_devid)
1983 idd = scsi_get_devid_desc((struct scsi_vpd_id_descriptor *)
1984 lun->lun_devid->data, lun->lun_devid->len,
1985 scsi_devid_is_lun_naa);
1986 if (idd == NULL && lun->lun_devid)
1987 idd = scsi_get_devid_desc((struct scsi_vpd_id_descriptor *)
1988 lun->lun_devid->data, lun->lun_devid->len,
1989 scsi_devid_is_lun_eui64);
1990 if (idd != NULL) {
1991 cscd = (struct scsi_ec_cscd_id *)&token->body[8];
1992 cscd->type_code = EC_CSCD_ID;
1993 cscd->luidt_pdt = T_DIRECT;
1994 memcpy(&cscd->codeset, idd, 4 + idd->length);
1995 scsi_ulto3b(lun->be_lun->blocksize, cscd->dtsp.block_length);
1996 }
1997 scsi_u64to8b(0, &token->body[40]); /* XXX: Should be 128bit value. */
1998 scsi_u64to8b(len, &token->body[48]);
1999
2000 /* ROD token device type specific data (RC16 without first field) */
2001 dtsd = (struct scsi_read_capacity_data_long *)&token->body[88 - 8];
2002 scsi_ulto4b(lun->be_lun->blocksize, dtsd->length);
2003 dtsd->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE;
2004 scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, dtsd->lalba_lbp);
2005 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP)
2006 dtsd->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ;
2007
2008 if (port->target_devid) {
2009 targid_len = port->target_devid->len;
2010 memcpy(&token->body[120], port->target_devid->data, targid_len);
2011 } else
2012 targid_len = 32;
2013 arc4rand(&token->body[120 + targid_len], 384 - targid_len, 0);
2014 };
2015
2016 int
ctl_populate_token(struct ctl_scsiio * ctsio)2017 ctl_populate_token(struct ctl_scsiio *ctsio)
2018 {
2019 struct ctl_softc *softc = CTL_SOFTC(ctsio);
2020 struct ctl_port *port = CTL_PORT(ctsio);
2021 struct ctl_lun *lun = CTL_LUN(ctsio);
2022 struct scsi_populate_token *cdb;
2023 struct scsi_populate_token_data *data;
2024 struct tpc_list *list, *tlist;
2025 struct tpc_token *token;
2026 uint64_t lba;
2027 int len, lendata, lendesc;
2028
2029 CTL_DEBUG_PRINT(("ctl_populate_token\n"));
2030
2031 cdb = (struct scsi_populate_token *)ctsio->cdb;
2032 len = scsi_4btoul(cdb->length);
2033
2034 if (len < sizeof(struct scsi_populate_token_data) ||
2035 len > sizeof(struct scsi_populate_token_data) +
2036 TPC_MAX_SEGS * sizeof(struct scsi_range_desc)) {
2037 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
2038 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
2039 goto done;
2040 }
2041
2042 /*
2043 * If we've got a kernel request that hasn't been malloced yet,
2044 * malloc it and tell the caller the data buffer is here.
2045 */
2046 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
2047 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
2048 ctsio->kern_data_len = len;
2049 ctsio->kern_total_len = len;
2050 ctsio->kern_rel_offset = 0;
2051 ctsio->kern_sg_entries = 0;
2052 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
2053 ctsio->be_move_done = ctl_config_move_done;
2054 ctl_datamove((union ctl_io *)ctsio);
2055
2056 return (CTL_RETVAL_COMPLETE);
2057 }
2058
2059 data = (struct scsi_populate_token_data *)ctsio->kern_data_ptr;
2060 lendata = scsi_2btoul(data->length);
2061 if (lendata < sizeof(struct scsi_populate_token_data) - 2 +
2062 sizeof(struct scsi_range_desc)) {
2063 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
2064 /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0);
2065 goto done;
2066 }
2067 lendesc = scsi_2btoul(data->range_descriptor_length);
2068 if (lendesc < sizeof(struct scsi_range_desc) ||
2069 len < sizeof(struct scsi_populate_token_data) + lendesc ||
2070 lendata < sizeof(struct scsi_populate_token_data) - 2 + lendesc) {
2071 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
2072 /*field*/ 14, /*bit_valid*/ 0, /*bit*/ 0);
2073 goto done;
2074 }
2075 /*
2076 printf("PT(list=%u) flags=%x to=%d rt=%x len=%x\n",
2077 scsi_4btoul(cdb->list_identifier),
2078 data->flags, scsi_4btoul(data->inactivity_timeout),
2079 scsi_4btoul(data->rod_type),
2080 scsi_2btoul(data->range_descriptor_length));
2081 */
2082
2083 /* Validate INACTIVITY TIMEOUT field */
2084 if (scsi_4btoul(data->inactivity_timeout) > TPC_MAX_TOKEN_TIMEOUT) {
2085 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
2086 /*command*/ 0, /*field*/ 4, /*bit_valid*/ 0,
2087 /*bit*/ 0);
2088 goto done;
2089 }
2090
2091 /* Validate ROD TYPE field */
2092 if ((data->flags & EC_PT_RTV) &&
2093 scsi_4btoul(data->rod_type) != ROD_TYPE_AUR) {
2094 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
2095 /*field*/ 8, /*bit_valid*/ 0, /*bit*/ 0);
2096 goto done;
2097 }
2098
2099 /* Validate list of ranges */
2100 if (tpc_check_ranges_l(&data->desc[0],
2101 scsi_2btoul(data->range_descriptor_length) /
2102 sizeof(struct scsi_range_desc),
2103 lun->be_lun->maxlba, &lba) != 0) {
2104 ctl_set_lba_out_of_range(ctsio, lba);
2105 goto done;
2106 }
2107 if (tpc_check_ranges_x(&data->desc[0],
2108 scsi_2btoul(data->range_descriptor_length) /
2109 sizeof(struct scsi_range_desc)) != 0) {
2110 ctl_set_invalid_field(ctsio, /*sks_valid*/ 0,
2111 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
2112 /*bit*/ 0);
2113 goto done;
2114 }
2115
2116 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
2117 list->service_action = cdb->service_action;
2118 list->init_port = ctsio->io_hdr.nexus.targ_port;
2119 list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus);
2120 list->list_id = scsi_4btoul(cdb->list_identifier);
2121 list->flags = data->flags;
2122 list->ctsio = ctsio;
2123 list->lun = lun;
2124 mtx_lock(&lun->lun_lock);
2125 tlist = tpc_find_list(lun, list->list_id, list->init_idx);
2126 if (tlist != NULL && !tlist->completed) {
2127 mtx_unlock(&lun->lun_lock);
2128 free(list, M_CTL);
2129 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
2130 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
2131 /*bit*/ 0);
2132 goto done;
2133 }
2134 if (tlist != NULL) {
2135 TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
2136 free(tlist, M_CTL);
2137 }
2138 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
2139 mtx_unlock(&lun->lun_lock);
2140
2141 token = malloc(sizeof(*token), M_CTL, M_WAITOK | M_ZERO);
2142 token->lun = lun->lun;
2143 token->blocksize = lun->be_lun->blocksize;
2144 token->params = ctsio->kern_data_ptr;
2145 token->range = &data->desc[0];
2146 token->nrange = scsi_2btoul(data->range_descriptor_length) /
2147 sizeof(struct scsi_range_desc);
2148 list->cursectors = tpc_ranges_length(token->range, token->nrange);
2149 list->curbytes = (off_t)list->cursectors * lun->be_lun->blocksize;
2150 tpc_create_token(lun, port, list->curbytes,
2151 (struct scsi_token *)token->token);
2152 token->active = 0;
2153 token->last_active = time_uptime;
2154 token->timeout = scsi_4btoul(data->inactivity_timeout);
2155 if (token->timeout == 0)
2156 token->timeout = TPC_DFL_TOKEN_TIMEOUT;
2157 else if (token->timeout < TPC_MIN_TOKEN_TIMEOUT)
2158 token->timeout = TPC_MIN_TOKEN_TIMEOUT;
2159 memcpy(list->res_token, token->token, sizeof(list->res_token));
2160 list->res_token_valid = 1;
2161 list->curseg = 0;
2162 list->completed = 1;
2163 list->last_active = time_uptime;
2164 mtx_lock(&softc->tpc_lock);
2165 TAILQ_INSERT_TAIL(&softc->tpc_tokens, token, links);
2166 mtx_unlock(&softc->tpc_lock);
2167 ctl_set_success(ctsio);
2168 ctl_done((union ctl_io *)ctsio);
2169 return (CTL_RETVAL_COMPLETE);
2170
2171 done:
2172 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
2173 free(ctsio->kern_data_ptr, M_CTL);
2174 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
2175 }
2176 ctl_done((union ctl_io *)ctsio);
2177 return (CTL_RETVAL_COMPLETE);
2178 }
2179
2180 int
ctl_write_using_token(struct ctl_scsiio * ctsio)2181 ctl_write_using_token(struct ctl_scsiio *ctsio)
2182 {
2183 struct ctl_softc *softc = CTL_SOFTC(ctsio);
2184 struct ctl_lun *lun = CTL_LUN(ctsio);
2185 struct scsi_write_using_token *cdb;
2186 struct scsi_write_using_token_data *data;
2187 struct tpc_list *list, *tlist;
2188 struct tpc_token *token;
2189 uint64_t lba;
2190 int len, lendata, lendesc;
2191
2192 CTL_DEBUG_PRINT(("ctl_write_using_token\n"));
2193
2194 cdb = (struct scsi_write_using_token *)ctsio->cdb;
2195 len = scsi_4btoul(cdb->length);
2196
2197 if (len < sizeof(struct scsi_write_using_token_data) ||
2198 len > sizeof(struct scsi_write_using_token_data) +
2199 TPC_MAX_SEGS * sizeof(struct scsi_range_desc)) {
2200 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
2201 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
2202 goto done;
2203 }
2204
2205 /*
2206 * If we've got a kernel request that hasn't been malloced yet,
2207 * malloc it and tell the caller the data buffer is here.
2208 */
2209 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
2210 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
2211 ctsio->kern_data_len = len;
2212 ctsio->kern_total_len = len;
2213 ctsio->kern_rel_offset = 0;
2214 ctsio->kern_sg_entries = 0;
2215 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
2216 ctsio->be_move_done = ctl_config_move_done;
2217 ctl_datamove((union ctl_io *)ctsio);
2218
2219 return (CTL_RETVAL_COMPLETE);
2220 }
2221
2222 data = (struct scsi_write_using_token_data *)ctsio->kern_data_ptr;
2223 lendata = scsi_2btoul(data->length);
2224 if (lendata < sizeof(struct scsi_write_using_token_data) - 2 +
2225 sizeof(struct scsi_range_desc)) {
2226 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
2227 /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0);
2228 goto done;
2229 }
2230 lendesc = scsi_2btoul(data->range_descriptor_length);
2231 if (lendesc < sizeof(struct scsi_range_desc) ||
2232 len < sizeof(struct scsi_write_using_token_data) + lendesc ||
2233 lendata < sizeof(struct scsi_write_using_token_data) - 2 + lendesc) {
2234 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
2235 /*field*/ 534, /*bit_valid*/ 0, /*bit*/ 0);
2236 goto done;
2237 }
2238 /*
2239 printf("WUT(list=%u) flags=%x off=%ju len=%x\n",
2240 scsi_4btoul(cdb->list_identifier),
2241 data->flags, scsi_8btou64(data->offset_into_rod),
2242 scsi_2btoul(data->range_descriptor_length));
2243 */
2244
2245 /* Validate list of ranges */
2246 if (tpc_check_ranges_l(&data->desc[0],
2247 scsi_2btoul(data->range_descriptor_length) /
2248 sizeof(struct scsi_range_desc),
2249 lun->be_lun->maxlba, &lba) != 0) {
2250 ctl_set_lba_out_of_range(ctsio, lba);
2251 goto done;
2252 }
2253 if (tpc_check_ranges_x(&data->desc[0],
2254 scsi_2btoul(data->range_descriptor_length) /
2255 sizeof(struct scsi_range_desc)) != 0) {
2256 ctl_set_invalid_field(ctsio, /*sks_valid*/ 0,
2257 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
2258 /*bit*/ 0);
2259 goto done;
2260 }
2261
2262 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
2263 list->service_action = cdb->service_action;
2264 list->init_port = ctsio->io_hdr.nexus.targ_port;
2265 list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus);
2266 list->list_id = scsi_4btoul(cdb->list_identifier);
2267 list->flags = data->flags;
2268 list->params = ctsio->kern_data_ptr;
2269 list->range = &data->desc[0];
2270 list->nrange = scsi_2btoul(data->range_descriptor_length) /
2271 sizeof(struct scsi_range_desc);
2272 list->offset_into_rod = scsi_8btou64(data->offset_into_rod);
2273 list->ctsio = ctsio;
2274 list->lun = lun;
2275 mtx_lock(&lun->lun_lock);
2276 tlist = tpc_find_list(lun, list->list_id, list->init_idx);
2277 if (tlist != NULL && !tlist->completed) {
2278 mtx_unlock(&lun->lun_lock);
2279 free(list, M_CTL);
2280 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
2281 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
2282 /*bit*/ 0);
2283 goto done;
2284 }
2285 if (tlist != NULL) {
2286 TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
2287 free(tlist, M_CTL);
2288 }
2289 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
2290 mtx_unlock(&lun->lun_lock);
2291
2292 /* Block device zero ROD token -> no token. */
2293 if (scsi_4btoul(data->rod_token) == ROD_TYPE_BLOCK_ZERO) {
2294 tpc_process(list);
2295 return (CTL_RETVAL_COMPLETE);
2296 }
2297
2298 mtx_lock(&softc->tpc_lock);
2299 TAILQ_FOREACH(token, &softc->tpc_tokens, links) {
2300 if (memcmp(token->token, data->rod_token,
2301 sizeof(data->rod_token)) == 0)
2302 break;
2303 }
2304 if (token != NULL) {
2305 token->active++;
2306 list->token = token;
2307 if (data->flags & EC_WUT_DEL_TKN)
2308 token->timeout = 0;
2309 }
2310 mtx_unlock(&softc->tpc_lock);
2311 if (token == NULL) {
2312 mtx_lock(&lun->lun_lock);
2313 TAILQ_REMOVE(&lun->tpc_lists, list, links);
2314 mtx_unlock(&lun->lun_lock);
2315 free(list, M_CTL);
2316 ctl_set_sense(ctsio, /*current_error*/ 1,
2317 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
2318 /*asc*/ 0x23, /*ascq*/ 0x04, SSD_ELEM_NONE);
2319 goto done;
2320 }
2321
2322 tpc_process(list);
2323 return (CTL_RETVAL_COMPLETE);
2324
2325 done:
2326 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
2327 free(ctsio->kern_data_ptr, M_CTL);
2328 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
2329 }
2330 ctl_done((union ctl_io *)ctsio);
2331 return (CTL_RETVAL_COMPLETE);
2332 }
2333
2334 int
ctl_receive_rod_token_information(struct ctl_scsiio * ctsio)2335 ctl_receive_rod_token_information(struct ctl_scsiio *ctsio)
2336 {
2337 struct ctl_lun *lun = CTL_LUN(ctsio);
2338 struct scsi_receive_rod_token_information *cdb;
2339 struct scsi_receive_copy_status_lid4_data *data;
2340 struct tpc_list *list;
2341 struct tpc_list list_copy;
2342 uint8_t *ptr;
2343 int retval;
2344 int alloc_len, total_len, token_len;
2345 uint32_t list_id;
2346
2347 CTL_DEBUG_PRINT(("ctl_receive_rod_token_information\n"));
2348
2349 cdb = (struct scsi_receive_rod_token_information *)ctsio->cdb;
2350 retval = CTL_RETVAL_COMPLETE;
2351
2352 list_id = scsi_4btoul(cdb->list_identifier);
2353 mtx_lock(&lun->lun_lock);
2354 list = tpc_find_list(lun, list_id,
2355 ctl_get_initindex(&ctsio->io_hdr.nexus));
2356 if (list == NULL) {
2357 mtx_unlock(&lun->lun_lock);
2358 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
2359 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
2360 /*bit*/ 0);
2361 ctl_done((union ctl_io *)ctsio);
2362 return (retval);
2363 }
2364 list_copy = *list;
2365 if (list->completed) {
2366 TAILQ_REMOVE(&lun->tpc_lists, list, links);
2367 free(list, M_CTL);
2368 }
2369 mtx_unlock(&lun->lun_lock);
2370
2371 token_len = list_copy.res_token_valid ? 2 + sizeof(list_copy.res_token) : 0;
2372 total_len = sizeof(*data) + list_copy.sense_len + 4 + token_len;
2373 alloc_len = scsi_4btoul(cdb->length);
2374
2375 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
2376 ctsio->kern_sg_entries = 0;
2377 ctsio->kern_rel_offset = 0;
2378 ctsio->kern_data_len = min(total_len, alloc_len);
2379 ctsio->kern_total_len = ctsio->kern_data_len;
2380
2381 data = (struct scsi_receive_copy_status_lid4_data *)ctsio->kern_data_ptr;
2382 scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len +
2383 4 + token_len, data->available_data);
2384 data->response_to_service_action = list_copy.service_action;
2385 if (list_copy.completed) {
2386 if (list_copy.error)
2387 data->copy_command_status = RCS_CCS_ERROR;
2388 else if (list_copy.abort)
2389 data->copy_command_status = RCS_CCS_ABORTED;
2390 else
2391 data->copy_command_status = RCS_CCS_COMPLETED;
2392 } else
2393 data->copy_command_status = RCS_CCS_INPROG_FG;
2394 scsi_ulto2b(list_copy.curops, data->operation_counter);
2395 scsi_ulto4b(UINT32_MAX, data->estimated_status_update_delay);
2396 data->transfer_count_units = RCS_TC_LBAS;
2397 scsi_u64to8b(list_copy.cursectors, data->transfer_count);
2398 scsi_ulto2b(list_copy.curseg, data->segments_processed);
2399 data->length_of_the_sense_data_field = list_copy.sense_len;
2400 data->sense_data_length = list_copy.sense_len;
2401 memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len);
2402
2403 ptr = &data->sense_data[data->length_of_the_sense_data_field];
2404 scsi_ulto4b(token_len, &ptr[0]);
2405 if (list_copy.res_token_valid) {
2406 scsi_ulto2b(0, &ptr[4]);
2407 memcpy(&ptr[6], list_copy.res_token, sizeof(list_copy.res_token));
2408 }
2409 /*
2410 printf("RRTI(list=%u) valid=%d\n",
2411 scsi_4btoul(cdb->list_identifier), list_copy.res_token_valid);
2412 */
2413 ctl_set_success(ctsio);
2414 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
2415 ctsio->be_move_done = ctl_config_move_done;
2416 ctl_datamove((union ctl_io *)ctsio);
2417 return (retval);
2418 }
2419
2420 int
ctl_report_all_rod_tokens(struct ctl_scsiio * ctsio)2421 ctl_report_all_rod_tokens(struct ctl_scsiio *ctsio)
2422 {
2423 struct ctl_softc *softc = CTL_SOFTC(ctsio);
2424 struct scsi_report_all_rod_tokens *cdb;
2425 struct scsi_report_all_rod_tokens_data *data;
2426 struct tpc_token *token;
2427 int retval;
2428 int alloc_len, total_len, tokens, i;
2429
2430 CTL_DEBUG_PRINT(("ctl_receive_rod_token_information\n"));
2431
2432 cdb = (struct scsi_report_all_rod_tokens *)ctsio->cdb;
2433 retval = CTL_RETVAL_COMPLETE;
2434
2435 tokens = 0;
2436 mtx_lock(&softc->tpc_lock);
2437 TAILQ_FOREACH(token, &softc->tpc_tokens, links)
2438 tokens++;
2439 mtx_unlock(&softc->tpc_lock);
2440 if (tokens > 512)
2441 tokens = 512;
2442
2443 total_len = sizeof(*data) + tokens * 96;
2444 alloc_len = scsi_4btoul(cdb->length);
2445
2446 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
2447 ctsio->kern_sg_entries = 0;
2448 ctsio->kern_rel_offset = 0;
2449 ctsio->kern_data_len = min(total_len, alloc_len);
2450 ctsio->kern_total_len = ctsio->kern_data_len;
2451
2452 data = (struct scsi_report_all_rod_tokens_data *)ctsio->kern_data_ptr;
2453 i = 0;
2454 mtx_lock(&softc->tpc_lock);
2455 TAILQ_FOREACH(token, &softc->tpc_tokens, links) {
2456 if (i >= tokens)
2457 break;
2458 memcpy(&data->rod_management_token_list[i * 96],
2459 token->token, 96);
2460 i++;
2461 }
2462 mtx_unlock(&softc->tpc_lock);
2463 scsi_ulto4b(sizeof(*data) - 4 + i * 96, data->available_data);
2464 /*
2465 printf("RART tokens=%d\n", i);
2466 */
2467 ctl_set_success(ctsio);
2468 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
2469 ctsio->be_move_done = ctl_config_move_done;
2470 ctl_datamove((union ctl_io *)ctsio);
2471 return (retval);
2472 }
2473