1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2014 Alexander Motin <mav@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer, 12 * without modification, immediately at the beginning of the file. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/kernel.h> 35 #include <sys/types.h> 36 #include <sys/lock.h> 37 #include <sys/module.h> 38 #include <sys/mutex.h> 39 #include <sys/condvar.h> 40 #include <sys/malloc.h> 41 #include <sys/conf.h> 42 #include <sys/queue.h> 43 #include <sys/sysctl.h> 44 #include <sys/nv.h> 45 #include <sys/dnv.h> 46 #include <machine/atomic.h> 47 48 #include <cam/cam.h> 49 #include <cam/scsi/scsi_all.h> 50 #include <cam/scsi/scsi_da.h> 51 #include <cam/ctl/ctl_io.h> 52 #include <cam/ctl/ctl.h> 53 #include <cam/ctl/ctl_frontend.h> 54 #include <cam/ctl/ctl_util.h> 55 #include <cam/ctl/ctl_backend.h> 56 #include <cam/ctl/ctl_ioctl.h> 57 #include <cam/ctl/ctl_ha.h> 58 #include <cam/ctl/ctl_private.h> 59 #include <cam/ctl/ctl_debug.h> 60 #include <cam/ctl/ctl_scsi_all.h> 61 #include <cam/ctl/ctl_tpc.h> 62 #include <cam/ctl/ctl_error.h> 63 64 #define TPC_MAX_CSCDS 64 65 #define TPC_MAX_SEGS 64 66 #define TPC_MAX_SEG 0 67 #define TPC_MAX_LIST 8192 68 #define TPC_MAX_INLINE 0 69 #define TPC_MAX_LISTS 255 70 #define TPC_MAX_IO_SIZE (1024 * 1024) 71 #define TPC_MAX_IOCHUNK_SIZE (TPC_MAX_IO_SIZE * 16) 72 #define TPC_MIN_TOKEN_TIMEOUT 1 73 #define TPC_DFL_TOKEN_TIMEOUT 60 74 #define TPC_MAX_TOKEN_TIMEOUT 600 75 76 MALLOC_DEFINE(M_CTL_TPC, "ctltpc", "CTL TPC"); 77 78 typedef enum { 79 TPC_ERR_RETRY = 0x000, 80 TPC_ERR_FAIL = 0x001, 81 TPC_ERR_MASK = 0x0ff, 82 TPC_ERR_NO_DECREMENT = 0x100 83 } tpc_error_action; 84 85 struct tpc_list; 86 TAILQ_HEAD(runl, tpc_io); 87 struct tpc_io { 88 union ctl_io *io; 89 uint8_t target; 90 uint32_t cscd; 91 uint64_t lun; 92 uint8_t *buf; 93 struct tpc_list *list; 94 struct runl run; 95 TAILQ_ENTRY(tpc_io) rlinks; 96 TAILQ_ENTRY(tpc_io) links; 97 }; 98 99 struct tpc_token { 100 uint8_t token[512]; 101 uint64_t lun; 102 uint32_t blocksize; 103 uint8_t *params; 104 struct scsi_range_desc *range; 105 int nrange; 106 int active; 107 time_t last_active; 108 uint32_t timeout; 109 TAILQ_ENTRY(tpc_token) links; 110 }; 111 112 struct tpc_list { 113 uint8_t service_action; 114 int init_port; 115 uint32_t init_idx; 116 uint32_t list_id; 117 uint8_t flags; 118 uint8_t *params; 119 struct scsi_ec_cscd *cscd; 120 struct scsi_ec_segment *seg[TPC_MAX_SEGS]; 121 uint8_t *inl; 122 int ncscd; 123 int nseg; 124 int leninl; 125 struct tpc_token *token; 126 struct scsi_range_desc *range; 127 int nrange; 128 off_t offset_into_rod; 129 130 int curseg; 131 off_t cursectors; 132 off_t curbytes; 133 int curops; 134 int stage; 135 off_t segsectors; 136 off_t segbytes; 137 int tbdio; 138 int error; 139 int abort; 140 int completed; 141 time_t last_active; 142 TAILQ_HEAD(, tpc_io) allio; 143 struct scsi_sense_data fwd_sense_data; 144 uint8_t fwd_sense_len; 145 uint8_t fwd_scsi_status; 146 uint8_t fwd_target; 147 uint16_t fwd_cscd; 148 struct scsi_sense_data sense_data; 149 uint8_t sense_len; 150 uint8_t scsi_status; 151 struct ctl_scsiio *ctsio; 152 struct ctl_lun *lun; 153 int res_token_valid; 154 uint8_t res_token[512]; 155 TAILQ_ENTRY(tpc_list) links; 156 }; 157 158 static void 159 tpc_timeout(void *arg) 160 { 161 struct ctl_softc *softc = arg; 162 struct ctl_lun *lun; 163 struct tpc_token *token, *ttoken; 164 struct tpc_list *list, *tlist; 165 166 /* Free completed lists with expired timeout. */ 167 STAILQ_FOREACH(lun, &softc->lun_list, links) { 168 mtx_lock(&lun->lun_lock); 169 TAILQ_FOREACH_SAFE(list, &lun->tpc_lists, links, tlist) { 170 if (!list->completed || time_uptime < list->last_active + 171 TPC_DFL_TOKEN_TIMEOUT) 172 continue; 173 TAILQ_REMOVE(&lun->tpc_lists, list, links); 174 free(list, M_CTL); 175 } 176 mtx_unlock(&lun->lun_lock); 177 } 178 179 /* Free inactive ROD tokens with expired timeout. */ 180 mtx_lock(&softc->tpc_lock); 181 TAILQ_FOREACH_SAFE(token, &softc->tpc_tokens, links, ttoken) { 182 if (token->active || 183 time_uptime < token->last_active + token->timeout + 1) 184 continue; 185 TAILQ_REMOVE(&softc->tpc_tokens, token, links); 186 free(token->params, M_CTL); 187 free(token, M_CTL); 188 } 189 mtx_unlock(&softc->tpc_lock); 190 callout_schedule(&softc->tpc_timeout, hz); 191 } 192 193 void 194 ctl_tpc_init(struct ctl_softc *softc) 195 { 196 197 mtx_init(&softc->tpc_lock, "CTL TPC mutex", NULL, MTX_DEF); 198 TAILQ_INIT(&softc->tpc_tokens); 199 callout_init_mtx(&softc->tpc_timeout, &softc->ctl_lock, 0); 200 callout_reset(&softc->tpc_timeout, hz, tpc_timeout, softc); 201 } 202 203 void 204 ctl_tpc_shutdown(struct ctl_softc *softc) 205 { 206 struct tpc_token *token; 207 208 callout_drain(&softc->tpc_timeout); 209 210 /* Free ROD tokens. */ 211 mtx_lock(&softc->tpc_lock); 212 while ((token = TAILQ_FIRST(&softc->tpc_tokens)) != NULL) { 213 TAILQ_REMOVE(&softc->tpc_tokens, token, links); 214 free(token->params, M_CTL); 215 free(token, M_CTL); 216 } 217 mtx_unlock(&softc->tpc_lock); 218 mtx_destroy(&softc->tpc_lock); 219 } 220 221 void 222 ctl_tpc_lun_init(struct ctl_lun *lun) 223 { 224 225 TAILQ_INIT(&lun->tpc_lists); 226 } 227 228 void 229 ctl_tpc_lun_clear(struct ctl_lun *lun, uint32_t initidx) 230 { 231 struct tpc_list *list, *tlist; 232 233 TAILQ_FOREACH_SAFE(list, &lun->tpc_lists, links, tlist) { 234 if (initidx != -1 && list->init_idx != initidx) 235 continue; 236 if (!list->completed) 237 continue; 238 TAILQ_REMOVE(&lun->tpc_lists, list, links); 239 free(list, M_CTL); 240 } 241 } 242 243 void 244 ctl_tpc_lun_shutdown(struct ctl_lun *lun) 245 { 246 struct ctl_softc *softc = lun->ctl_softc; 247 struct tpc_list *list; 248 struct tpc_token *token, *ttoken; 249 250 /* Free lists for this LUN. */ 251 while ((list = TAILQ_FIRST(&lun->tpc_lists)) != NULL) { 252 TAILQ_REMOVE(&lun->tpc_lists, list, links); 253 KASSERT(list->completed, 254 ("Not completed TPC (%p) on shutdown", list)); 255 free(list, M_CTL); 256 } 257 258 /* Free ROD tokens for this LUN. */ 259 mtx_lock(&softc->tpc_lock); 260 TAILQ_FOREACH_SAFE(token, &softc->tpc_tokens, links, ttoken) { 261 if (token->lun != lun->lun || token->active) 262 continue; 263 TAILQ_REMOVE(&softc->tpc_tokens, token, links); 264 free(token->params, M_CTL); 265 free(token, M_CTL); 266 } 267 mtx_unlock(&softc->tpc_lock); 268 } 269 270 int 271 ctl_inquiry_evpd_tpc(struct ctl_scsiio *ctsio, int alloc_len) 272 { 273 struct ctl_lun *lun = CTL_LUN(ctsio); 274 struct scsi_vpd_tpc *tpc_ptr; 275 struct scsi_vpd_tpc_descriptor *d_ptr; 276 struct scsi_vpd_tpc_descriptor_bdrl *bdrl_ptr; 277 struct scsi_vpd_tpc_descriptor_sc *sc_ptr; 278 struct scsi_vpd_tpc_descriptor_sc_descr *scd_ptr; 279 struct scsi_vpd_tpc_descriptor_pd *pd_ptr; 280 struct scsi_vpd_tpc_descriptor_sd *sd_ptr; 281 struct scsi_vpd_tpc_descriptor_sdid *sdid_ptr; 282 struct scsi_vpd_tpc_descriptor_rtf *rtf_ptr; 283 struct scsi_vpd_tpc_descriptor_rtf_block *rtfb_ptr; 284 struct scsi_vpd_tpc_descriptor_srt *srt_ptr; 285 struct scsi_vpd_tpc_descriptor_srtd *srtd_ptr; 286 struct scsi_vpd_tpc_descriptor_gco *gco_ptr; 287 int data_len; 288 289 data_len = sizeof(struct scsi_vpd_tpc) + 290 sizeof(struct scsi_vpd_tpc_descriptor_bdrl) + 291 roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sc) + 292 2 * sizeof(struct scsi_vpd_tpc_descriptor_sc_descr) + 11, 4) + 293 sizeof(struct scsi_vpd_tpc_descriptor_pd) + 294 roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sd) + 4, 4) + 295 roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sdid) + 2, 4) + 296 sizeof(struct scsi_vpd_tpc_descriptor_rtf) + 297 sizeof(struct scsi_vpd_tpc_descriptor_rtf_block) + 298 sizeof(struct scsi_vpd_tpc_descriptor_srt) + 299 2*sizeof(struct scsi_vpd_tpc_descriptor_srtd) + 300 sizeof(struct scsi_vpd_tpc_descriptor_gco); 301 302 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 303 tpc_ptr = (struct scsi_vpd_tpc *)ctsio->kern_data_ptr; 304 ctsio->kern_rel_offset = 0; 305 ctsio->kern_sg_entries = 0; 306 ctsio->kern_data_len = min(data_len, alloc_len); 307 ctsio->kern_total_len = ctsio->kern_data_len; 308 309 /* 310 * The control device is always connected. The disk device, on the 311 * other hand, may not be online all the time. 312 */ 313 if (lun != NULL) 314 tpc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 315 lun->be_lun->lun_type; 316 else 317 tpc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 318 tpc_ptr->page_code = SVPD_SCSI_TPC; 319 scsi_ulto2b(data_len - 4, tpc_ptr->page_length); 320 321 /* Block Device ROD Limits */ 322 d_ptr = (struct scsi_vpd_tpc_descriptor *)&tpc_ptr->descr[0]; 323 bdrl_ptr = (struct scsi_vpd_tpc_descriptor_bdrl *)d_ptr; 324 scsi_ulto2b(SVPD_TPC_BDRL, bdrl_ptr->desc_type); 325 scsi_ulto2b(sizeof(*bdrl_ptr) - 4, bdrl_ptr->desc_length); 326 scsi_ulto2b(TPC_MAX_SEGS, bdrl_ptr->maximum_ranges); 327 scsi_ulto4b(TPC_MAX_TOKEN_TIMEOUT, 328 bdrl_ptr->maximum_inactivity_timeout); 329 scsi_ulto4b(TPC_DFL_TOKEN_TIMEOUT, 330 bdrl_ptr->default_inactivity_timeout); 331 scsi_u64to8b(0, bdrl_ptr->maximum_token_transfer_size); 332 scsi_u64to8b(0, bdrl_ptr->optimal_transfer_count); 333 334 /* Supported commands */ 335 d_ptr = (struct scsi_vpd_tpc_descriptor *) 336 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length)); 337 sc_ptr = (struct scsi_vpd_tpc_descriptor_sc *)d_ptr; 338 scsi_ulto2b(SVPD_TPC_SC, sc_ptr->desc_type); 339 sc_ptr->list_length = 2 * sizeof(*scd_ptr) + 11; 340 scsi_ulto2b(roundup2(1 + sc_ptr->list_length, 4), sc_ptr->desc_length); 341 scd_ptr = &sc_ptr->descr[0]; 342 scd_ptr->opcode = EXTENDED_COPY; 343 scd_ptr->sa_length = 5; 344 scd_ptr->supported_service_actions[0] = EC_EC_LID1; 345 scd_ptr->supported_service_actions[1] = EC_EC_LID4; 346 scd_ptr->supported_service_actions[2] = EC_PT; 347 scd_ptr->supported_service_actions[3] = EC_WUT; 348 scd_ptr->supported_service_actions[4] = EC_COA; 349 scd_ptr = (struct scsi_vpd_tpc_descriptor_sc_descr *) 350 &scd_ptr->supported_service_actions[scd_ptr->sa_length]; 351 scd_ptr->opcode = RECEIVE_COPY_STATUS; 352 scd_ptr->sa_length = 6; 353 scd_ptr->supported_service_actions[0] = RCS_RCS_LID1; 354 scd_ptr->supported_service_actions[1] = RCS_RCFD; 355 scd_ptr->supported_service_actions[2] = RCS_RCS_LID4; 356 scd_ptr->supported_service_actions[3] = RCS_RCOP; 357 scd_ptr->supported_service_actions[4] = RCS_RRTI; 358 scd_ptr->supported_service_actions[5] = RCS_RART; 359 360 /* Parameter data. */ 361 d_ptr = (struct scsi_vpd_tpc_descriptor *) 362 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length)); 363 pd_ptr = (struct scsi_vpd_tpc_descriptor_pd *)d_ptr; 364 scsi_ulto2b(SVPD_TPC_PD, pd_ptr->desc_type); 365 scsi_ulto2b(sizeof(*pd_ptr) - 4, pd_ptr->desc_length); 366 scsi_ulto2b(TPC_MAX_CSCDS, pd_ptr->maximum_cscd_descriptor_count); 367 scsi_ulto2b(TPC_MAX_SEGS, pd_ptr->maximum_segment_descriptor_count); 368 scsi_ulto4b(TPC_MAX_LIST, pd_ptr->maximum_descriptor_list_length); 369 scsi_ulto4b(TPC_MAX_INLINE, pd_ptr->maximum_inline_data_length); 370 371 /* Supported Descriptors */ 372 d_ptr = (struct scsi_vpd_tpc_descriptor *) 373 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length)); 374 sd_ptr = (struct scsi_vpd_tpc_descriptor_sd *)d_ptr; 375 scsi_ulto2b(SVPD_TPC_SD, sd_ptr->desc_type); 376 scsi_ulto2b(roundup2(sizeof(*sd_ptr) - 4 + 4, 4), sd_ptr->desc_length); 377 sd_ptr->list_length = 4; 378 sd_ptr->supported_descriptor_codes[0] = EC_SEG_B2B; 379 sd_ptr->supported_descriptor_codes[1] = EC_SEG_VERIFY; 380 sd_ptr->supported_descriptor_codes[2] = EC_SEG_REGISTER_KEY; 381 sd_ptr->supported_descriptor_codes[3] = EC_CSCD_ID; 382 383 /* Supported CSCD Descriptor IDs */ 384 d_ptr = (struct scsi_vpd_tpc_descriptor *) 385 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length)); 386 sdid_ptr = (struct scsi_vpd_tpc_descriptor_sdid *)d_ptr; 387 scsi_ulto2b(SVPD_TPC_SDID, sdid_ptr->desc_type); 388 scsi_ulto2b(roundup2(sizeof(*sdid_ptr) - 4 + 2, 4), sdid_ptr->desc_length); 389 scsi_ulto2b(2, sdid_ptr->list_length); 390 scsi_ulto2b(0xffff, &sdid_ptr->supported_descriptor_ids[0]); 391 392 /* ROD Token Features */ 393 d_ptr = (struct scsi_vpd_tpc_descriptor *) 394 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length)); 395 rtf_ptr = (struct scsi_vpd_tpc_descriptor_rtf *)d_ptr; 396 scsi_ulto2b(SVPD_TPC_RTF, rtf_ptr->desc_type); 397 scsi_ulto2b(sizeof(*rtf_ptr) - 4 + sizeof(*rtfb_ptr), rtf_ptr->desc_length); 398 rtf_ptr->remote_tokens = 0; 399 scsi_ulto4b(TPC_MIN_TOKEN_TIMEOUT, rtf_ptr->minimum_token_lifetime); 400 scsi_ulto4b(UINT32_MAX, rtf_ptr->maximum_token_lifetime); 401 scsi_ulto4b(TPC_MAX_TOKEN_TIMEOUT, 402 rtf_ptr->maximum_token_inactivity_timeout); 403 scsi_ulto2b(sizeof(*rtfb_ptr), rtf_ptr->type_specific_features_length); 404 rtfb_ptr = (struct scsi_vpd_tpc_descriptor_rtf_block *) 405 &rtf_ptr->type_specific_features; 406 rtfb_ptr->type_format = SVPD_TPC_RTF_BLOCK; 407 scsi_ulto2b(sizeof(*rtfb_ptr) - 4, rtfb_ptr->desc_length); 408 scsi_ulto2b(0, rtfb_ptr->optimal_length_granularity); 409 scsi_u64to8b(0, rtfb_ptr->maximum_bytes); 410 scsi_u64to8b(0, rtfb_ptr->optimal_bytes); 411 scsi_u64to8b(UINT64_MAX, rtfb_ptr->optimal_bytes_to_token_per_segment); 412 scsi_u64to8b(TPC_MAX_IOCHUNK_SIZE, 413 rtfb_ptr->optimal_bytes_from_token_per_segment); 414 415 /* Supported ROD Tokens */ 416 d_ptr = (struct scsi_vpd_tpc_descriptor *) 417 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length)); 418 srt_ptr = (struct scsi_vpd_tpc_descriptor_srt *)d_ptr; 419 scsi_ulto2b(SVPD_TPC_SRT, srt_ptr->desc_type); 420 scsi_ulto2b(sizeof(*srt_ptr) - 4 + 2*sizeof(*srtd_ptr), srt_ptr->desc_length); 421 scsi_ulto2b(2*sizeof(*srtd_ptr), srt_ptr->rod_type_descriptors_length); 422 srtd_ptr = (struct scsi_vpd_tpc_descriptor_srtd *) 423 &srt_ptr->rod_type_descriptors; 424 scsi_ulto4b(ROD_TYPE_AUR, srtd_ptr->rod_type); 425 srtd_ptr->flags = SVPD_TPC_SRTD_TIN | SVPD_TPC_SRTD_TOUT; 426 scsi_ulto2b(0, srtd_ptr->preference_indicator); 427 srtd_ptr++; 428 scsi_ulto4b(ROD_TYPE_BLOCK_ZERO, srtd_ptr->rod_type); 429 srtd_ptr->flags = SVPD_TPC_SRTD_TIN; 430 scsi_ulto2b(0, srtd_ptr->preference_indicator); 431 432 /* General Copy Operations */ 433 d_ptr = (struct scsi_vpd_tpc_descriptor *) 434 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length)); 435 gco_ptr = (struct scsi_vpd_tpc_descriptor_gco *)d_ptr; 436 scsi_ulto2b(SVPD_TPC_GCO, gco_ptr->desc_type); 437 scsi_ulto2b(sizeof(*gco_ptr) - 4, gco_ptr->desc_length); 438 scsi_ulto4b(TPC_MAX_LISTS, gco_ptr->total_concurrent_copies); 439 scsi_ulto4b(TPC_MAX_LISTS, gco_ptr->maximum_identified_concurrent_copies); 440 scsi_ulto4b(TPC_MAX_SEG, gco_ptr->maximum_segment_length); 441 gco_ptr->data_segment_granularity = 0; 442 gco_ptr->inline_data_granularity = 0; 443 444 ctl_set_success(ctsio); 445 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 446 ctsio->be_move_done = ctl_config_move_done; 447 ctl_datamove((union ctl_io *)ctsio); 448 449 return (CTL_RETVAL_COMPLETE); 450 } 451 452 int 453 ctl_receive_copy_operating_parameters(struct ctl_scsiio *ctsio) 454 { 455 struct scsi_receive_copy_operating_parameters *cdb; 456 struct scsi_receive_copy_operating_parameters_data *data; 457 int retval; 458 int alloc_len, total_len; 459 460 CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n")); 461 462 cdb = (struct scsi_receive_copy_operating_parameters *)ctsio->cdb; 463 464 retval = CTL_RETVAL_COMPLETE; 465 466 total_len = sizeof(*data) + 4; 467 alloc_len = scsi_4btoul(cdb->length); 468 469 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 470 ctsio->kern_sg_entries = 0; 471 ctsio->kern_rel_offset = 0; 472 ctsio->kern_data_len = min(total_len, alloc_len); 473 ctsio->kern_total_len = ctsio->kern_data_len; 474 475 data = (struct scsi_receive_copy_operating_parameters_data *)ctsio->kern_data_ptr; 476 scsi_ulto4b(sizeof(*data) - 4 + 4, data->length); 477 data->snlid = RCOP_SNLID; 478 scsi_ulto2b(TPC_MAX_CSCDS, data->maximum_cscd_descriptor_count); 479 scsi_ulto2b(TPC_MAX_SEGS, data->maximum_segment_descriptor_count); 480 scsi_ulto4b(TPC_MAX_LIST, data->maximum_descriptor_list_length); 481 scsi_ulto4b(TPC_MAX_SEG, data->maximum_segment_length); 482 scsi_ulto4b(TPC_MAX_INLINE, data->maximum_inline_data_length); 483 scsi_ulto4b(0, data->held_data_limit); 484 scsi_ulto4b(0, data->maximum_stream_device_transfer_size); 485 scsi_ulto2b(TPC_MAX_LISTS, data->total_concurrent_copies); 486 data->maximum_concurrent_copies = TPC_MAX_LISTS; 487 data->data_segment_granularity = 0; 488 data->inline_data_granularity = 0; 489 data->held_data_granularity = 0; 490 data->implemented_descriptor_list_length = 4; 491 data->list_of_implemented_descriptor_type_codes[0] = EC_SEG_B2B; 492 data->list_of_implemented_descriptor_type_codes[1] = EC_SEG_VERIFY; 493 data->list_of_implemented_descriptor_type_codes[2] = EC_SEG_REGISTER_KEY; 494 data->list_of_implemented_descriptor_type_codes[3] = EC_CSCD_ID; 495 496 ctl_set_success(ctsio); 497 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 498 ctsio->be_move_done = ctl_config_move_done; 499 ctl_datamove((union ctl_io *)ctsio); 500 return (retval); 501 } 502 503 static struct tpc_list * 504 tpc_find_list(struct ctl_lun *lun, uint32_t list_id, uint32_t init_idx) 505 { 506 struct tpc_list *list; 507 508 mtx_assert(&lun->lun_lock, MA_OWNED); 509 TAILQ_FOREACH(list, &lun->tpc_lists, links) { 510 if ((list->flags & EC_LIST_ID_USAGE_MASK) != 511 EC_LIST_ID_USAGE_NONE && list->list_id == list_id && 512 list->init_idx == init_idx) 513 break; 514 } 515 return (list); 516 } 517 518 int 519 ctl_receive_copy_status_lid1(struct ctl_scsiio *ctsio) 520 { 521 struct ctl_lun *lun = CTL_LUN(ctsio); 522 struct scsi_receive_copy_status_lid1 *cdb; 523 struct scsi_receive_copy_status_lid1_data *data; 524 struct tpc_list *list; 525 struct tpc_list list_copy; 526 int retval; 527 int alloc_len, total_len; 528 uint32_t list_id; 529 530 CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid1\n")); 531 532 cdb = (struct scsi_receive_copy_status_lid1 *)ctsio->cdb; 533 retval = CTL_RETVAL_COMPLETE; 534 535 list_id = cdb->list_identifier; 536 mtx_lock(&lun->lun_lock); 537 list = tpc_find_list(lun, list_id, 538 ctl_get_initindex(&ctsio->io_hdr.nexus)); 539 if (list == NULL) { 540 mtx_unlock(&lun->lun_lock); 541 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 542 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, 543 /*bit*/ 0); 544 ctl_done((union ctl_io *)ctsio); 545 return (retval); 546 } 547 list_copy = *list; 548 if (list->completed) { 549 TAILQ_REMOVE(&lun->tpc_lists, list, links); 550 free(list, M_CTL); 551 } 552 mtx_unlock(&lun->lun_lock); 553 554 total_len = sizeof(*data); 555 alloc_len = scsi_4btoul(cdb->length); 556 557 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 558 ctsio->kern_sg_entries = 0; 559 ctsio->kern_rel_offset = 0; 560 ctsio->kern_data_len = min(total_len, alloc_len); 561 ctsio->kern_total_len = ctsio->kern_data_len; 562 563 data = (struct scsi_receive_copy_status_lid1_data *)ctsio->kern_data_ptr; 564 scsi_ulto4b(sizeof(*data) - 4, data->available_data); 565 if (list_copy.completed) { 566 if (list_copy.error || list_copy.abort) 567 data->copy_command_status = RCS_CCS_ERROR; 568 else 569 data->copy_command_status = RCS_CCS_COMPLETED; 570 } else 571 data->copy_command_status = RCS_CCS_INPROG; 572 scsi_ulto2b(list_copy.curseg, data->segments_processed); 573 if (list_copy.curbytes <= UINT32_MAX) { 574 data->transfer_count_units = RCS_TC_BYTES; 575 scsi_ulto4b(list_copy.curbytes, data->transfer_count); 576 } else { 577 data->transfer_count_units = RCS_TC_MBYTES; 578 scsi_ulto4b(list_copy.curbytes >> 20, data->transfer_count); 579 } 580 581 ctl_set_success(ctsio); 582 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 583 ctsio->be_move_done = ctl_config_move_done; 584 ctl_datamove((union ctl_io *)ctsio); 585 return (retval); 586 } 587 588 int 589 ctl_receive_copy_failure_details(struct ctl_scsiio *ctsio) 590 { 591 struct ctl_lun *lun = CTL_LUN(ctsio); 592 struct scsi_receive_copy_failure_details *cdb; 593 struct scsi_receive_copy_failure_details_data *data; 594 struct tpc_list *list; 595 struct tpc_list list_copy; 596 int retval; 597 int alloc_len, total_len; 598 uint32_t list_id; 599 600 CTL_DEBUG_PRINT(("ctl_receive_copy_failure_details\n")); 601 602 cdb = (struct scsi_receive_copy_failure_details *)ctsio->cdb; 603 retval = CTL_RETVAL_COMPLETE; 604 605 list_id = cdb->list_identifier; 606 mtx_lock(&lun->lun_lock); 607 list = tpc_find_list(lun, list_id, 608 ctl_get_initindex(&ctsio->io_hdr.nexus)); 609 if (list == NULL || !list->completed) { 610 mtx_unlock(&lun->lun_lock); 611 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 612 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, 613 /*bit*/ 0); 614 ctl_done((union ctl_io *)ctsio); 615 return (retval); 616 } 617 list_copy = *list; 618 TAILQ_REMOVE(&lun->tpc_lists, list, links); 619 free(list, M_CTL); 620 mtx_unlock(&lun->lun_lock); 621 622 total_len = sizeof(*data) + list_copy.sense_len; 623 alloc_len = scsi_4btoul(cdb->length); 624 625 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 626 ctsio->kern_sg_entries = 0; 627 ctsio->kern_rel_offset = 0; 628 ctsio->kern_data_len = min(total_len, alloc_len); 629 ctsio->kern_total_len = ctsio->kern_data_len; 630 631 data = (struct scsi_receive_copy_failure_details_data *)ctsio->kern_data_ptr; 632 if (list_copy.completed && (list_copy.error || list_copy.abort)) { 633 scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len, 634 data->available_data); 635 data->copy_command_status = RCS_CCS_ERROR; 636 } else 637 scsi_ulto4b(0, data->available_data); 638 scsi_ulto2b(list_copy.sense_len, data->sense_data_length); 639 memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len); 640 641 ctl_set_success(ctsio); 642 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 643 ctsio->be_move_done = ctl_config_move_done; 644 ctl_datamove((union ctl_io *)ctsio); 645 return (retval); 646 } 647 648 int 649 ctl_receive_copy_status_lid4(struct ctl_scsiio *ctsio) 650 { 651 struct ctl_lun *lun = CTL_LUN(ctsio); 652 struct scsi_receive_copy_status_lid4 *cdb; 653 struct scsi_receive_copy_status_lid4_data *data; 654 struct tpc_list *list; 655 struct tpc_list list_copy; 656 int retval; 657 int alloc_len, total_len; 658 uint32_t list_id; 659 660 CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid4\n")); 661 662 cdb = (struct scsi_receive_copy_status_lid4 *)ctsio->cdb; 663 retval = CTL_RETVAL_COMPLETE; 664 665 list_id = scsi_4btoul(cdb->list_identifier); 666 mtx_lock(&lun->lun_lock); 667 list = tpc_find_list(lun, list_id, 668 ctl_get_initindex(&ctsio->io_hdr.nexus)); 669 if (list == NULL) { 670 mtx_unlock(&lun->lun_lock); 671 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 672 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, 673 /*bit*/ 0); 674 ctl_done((union ctl_io *)ctsio); 675 return (retval); 676 } 677 list_copy = *list; 678 if (list->completed) { 679 TAILQ_REMOVE(&lun->tpc_lists, list, links); 680 free(list, M_CTL); 681 } 682 mtx_unlock(&lun->lun_lock); 683 684 total_len = sizeof(*data) + list_copy.sense_len; 685 alloc_len = scsi_4btoul(cdb->length); 686 687 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 688 ctsio->kern_sg_entries = 0; 689 ctsio->kern_rel_offset = 0; 690 ctsio->kern_data_len = min(total_len, alloc_len); 691 ctsio->kern_total_len = ctsio->kern_data_len; 692 693 data = (struct scsi_receive_copy_status_lid4_data *)ctsio->kern_data_ptr; 694 scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len, 695 data->available_data); 696 data->response_to_service_action = list_copy.service_action; 697 if (list_copy.completed) { 698 if (list_copy.error) 699 data->copy_command_status = RCS_CCS_ERROR; 700 else if (list_copy.abort) 701 data->copy_command_status = RCS_CCS_ABORTED; 702 else 703 data->copy_command_status = RCS_CCS_COMPLETED; 704 } else 705 data->copy_command_status = RCS_CCS_INPROG_FG; 706 scsi_ulto2b(list_copy.curops, data->operation_counter); 707 scsi_ulto4b(UINT32_MAX, data->estimated_status_update_delay); 708 data->transfer_count_units = RCS_TC_BYTES; 709 scsi_u64to8b(list_copy.curbytes, data->transfer_count); 710 scsi_ulto2b(list_copy.curseg, data->segments_processed); 711 data->length_of_the_sense_data_field = list_copy.sense_len; 712 data->sense_data_length = list_copy.sense_len; 713 memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len); 714 715 ctl_set_success(ctsio); 716 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 717 ctsio->be_move_done = ctl_config_move_done; 718 ctl_datamove((union ctl_io *)ctsio); 719 return (retval); 720 } 721 722 int 723 ctl_copy_operation_abort(struct ctl_scsiio *ctsio) 724 { 725 struct ctl_lun *lun = CTL_LUN(ctsio); 726 struct scsi_copy_operation_abort *cdb; 727 struct tpc_list *list; 728 int retval; 729 uint32_t list_id; 730 731 CTL_DEBUG_PRINT(("ctl_copy_operation_abort\n")); 732 733 cdb = (struct scsi_copy_operation_abort *)ctsio->cdb; 734 retval = CTL_RETVAL_COMPLETE; 735 736 list_id = scsi_4btoul(cdb->list_identifier); 737 mtx_lock(&lun->lun_lock); 738 list = tpc_find_list(lun, list_id, 739 ctl_get_initindex(&ctsio->io_hdr.nexus)); 740 if (list == NULL) { 741 mtx_unlock(&lun->lun_lock); 742 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 743 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, 744 /*bit*/ 0); 745 ctl_done((union ctl_io *)ctsio); 746 return (retval); 747 } 748 list->abort = 1; 749 mtx_unlock(&lun->lun_lock); 750 751 ctl_set_success(ctsio); 752 ctl_done((union ctl_io *)ctsio); 753 return (retval); 754 } 755 756 static uint64_t 757 tpc_resolve(struct tpc_list *list, uint16_t idx, uint32_t *ss, 758 uint32_t *pb, uint32_t *pbo) 759 { 760 761 if (idx == 0xffff) { 762 if (ss) 763 *ss = list->lun->be_lun->blocksize; 764 if (pb) 765 *pb = list->lun->be_lun->blocksize << 766 list->lun->be_lun->pblockexp; 767 if (pbo) 768 *pbo = list->lun->be_lun->blocksize * 769 list->lun->be_lun->pblockoff; 770 return (list->lun->lun); 771 } 772 if (idx >= list->ncscd) 773 return (UINT64_MAX); 774 return (tpcl_resolve(list->lun->ctl_softc, 775 list->init_port, &list->cscd[idx], ss, pb, pbo)); 776 } 777 778 static void 779 tpc_set_io_error_sense(struct tpc_list *list) 780 { 781 int flen; 782 uint8_t csi[4]; 783 uint8_t sks[3]; 784 uint8_t fbuf[4 + 64]; 785 786 scsi_ulto4b(list->curseg, csi); 787 if (list->fwd_cscd <= 0x07ff) { 788 sks[0] = SSD_SKS_SEGMENT_VALID; 789 scsi_ulto2b((uint8_t *)&list->cscd[list->fwd_cscd] - 790 list->params, &sks[1]); 791 } else 792 sks[0] = 0; 793 if (list->fwd_scsi_status) { 794 fbuf[0] = 0x0c; 795 fbuf[2] = list->fwd_target; 796 flen = list->fwd_sense_len; 797 if (flen > 64) { 798 flen = 64; 799 fbuf[2] |= SSD_FORWARDED_FSDT; 800 } 801 fbuf[1] = 2 + flen; 802 fbuf[3] = list->fwd_scsi_status; 803 bcopy(&list->fwd_sense_data, &fbuf[4], flen); 804 flen += 4; 805 } else 806 flen = 0; 807 ctl_set_sense(list->ctsio, /*current_error*/ 1, 808 /*sense_key*/ SSD_KEY_COPY_ABORTED, 809 /*asc*/ 0x0d, /*ascq*/ 0x01, 810 SSD_ELEM_COMMAND, sizeof(csi), csi, 811 sks[0] ? SSD_ELEM_SKS : SSD_ELEM_SKIP, sizeof(sks), sks, 812 flen ? SSD_ELEM_DESC : SSD_ELEM_SKIP, flen, fbuf, 813 SSD_ELEM_NONE); 814 } 815 816 static int 817 tpc_process_b2b(struct tpc_list *list) 818 { 819 struct scsi_ec_segment_b2b *seg; 820 struct scsi_ec_cscd_dtsp *sdstp, *ddstp; 821 struct tpc_io *tior, *tiow; 822 struct runl run; 823 uint64_t sl, dl; 824 off_t srclba, dstlba, numbytes, donebytes, roundbytes; 825 int numlba; 826 uint32_t srcblock, dstblock, pb, pbo, adj; 827 uint16_t scscd, dcscd; 828 uint8_t csi[4]; 829 830 scsi_ulto4b(list->curseg, csi); 831 if (list->stage == 1) { 832 while ((tior = TAILQ_FIRST(&list->allio)) != NULL) { 833 TAILQ_REMOVE(&list->allio, tior, links); 834 ctl_free_io(tior->io); 835 free(tior->buf, M_CTL); 836 free(tior, M_CTL); 837 } 838 if (list->abort) { 839 ctl_set_task_aborted(list->ctsio); 840 return (CTL_RETVAL_ERROR); 841 } else if (list->error) { 842 tpc_set_io_error_sense(list); 843 return (CTL_RETVAL_ERROR); 844 } 845 list->cursectors += list->segsectors; 846 list->curbytes += list->segbytes; 847 return (CTL_RETVAL_COMPLETE); 848 } 849 850 TAILQ_INIT(&list->allio); 851 seg = (struct scsi_ec_segment_b2b *)list->seg[list->curseg]; 852 scscd = scsi_2btoul(seg->src_cscd); 853 dcscd = scsi_2btoul(seg->dst_cscd); 854 sl = tpc_resolve(list, scscd, &srcblock, NULL, NULL); 855 dl = tpc_resolve(list, dcscd, &dstblock, &pb, &pbo); 856 if (sl == UINT64_MAX || dl == UINT64_MAX) { 857 ctl_set_sense(list->ctsio, /*current_error*/ 1, 858 /*sense_key*/ SSD_KEY_COPY_ABORTED, 859 /*asc*/ 0x08, /*ascq*/ 0x04, 860 SSD_ELEM_COMMAND, sizeof(csi), csi, 861 SSD_ELEM_NONE); 862 return (CTL_RETVAL_ERROR); 863 } 864 if (pbo > 0) 865 pbo = pb - pbo; 866 sdstp = &list->cscd[scscd].dtsp; 867 if (scsi_3btoul(sdstp->block_length) != 0) 868 srcblock = scsi_3btoul(sdstp->block_length); 869 ddstp = &list->cscd[dcscd].dtsp; 870 if (scsi_3btoul(ddstp->block_length) != 0) 871 dstblock = scsi_3btoul(ddstp->block_length); 872 numlba = scsi_2btoul(seg->number_of_blocks); 873 if (seg->flags & EC_SEG_DC) 874 numbytes = (off_t)numlba * dstblock; 875 else 876 numbytes = (off_t)numlba * srcblock; 877 srclba = scsi_8btou64(seg->src_lba); 878 dstlba = scsi_8btou64(seg->dst_lba); 879 880 // printf("Copy %ju bytes from %ju @ %ju to %ju @ %ju\n", 881 // (uintmax_t)numbytes, sl, scsi_8btou64(seg->src_lba), 882 // dl, scsi_8btou64(seg->dst_lba)); 883 884 if (numbytes == 0) 885 return (CTL_RETVAL_COMPLETE); 886 887 if (numbytes % srcblock != 0 || numbytes % dstblock != 0) { 888 ctl_set_sense(list->ctsio, /*current_error*/ 1, 889 /*sense_key*/ SSD_KEY_COPY_ABORTED, 890 /*asc*/ 0x26, /*ascq*/ 0x0A, 891 SSD_ELEM_COMMAND, sizeof(csi), csi, 892 SSD_ELEM_NONE); 893 return (CTL_RETVAL_ERROR); 894 } 895 896 list->segbytes = numbytes; 897 list->segsectors = numbytes / dstblock; 898 donebytes = 0; 899 TAILQ_INIT(&run); 900 list->tbdio = 0; 901 while (donebytes < numbytes) { 902 roundbytes = numbytes - donebytes; 903 if (roundbytes > TPC_MAX_IO_SIZE) { 904 roundbytes = TPC_MAX_IO_SIZE; 905 roundbytes -= roundbytes % dstblock; 906 if (pb > dstblock) { 907 adj = (dstlba * dstblock + roundbytes - pbo) % pb; 908 if (roundbytes > adj) 909 roundbytes -= adj; 910 } 911 } 912 913 tior = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO); 914 TAILQ_INIT(&tior->run); 915 tior->buf = malloc(roundbytes, M_CTL, M_WAITOK); 916 tior->list = list; 917 TAILQ_INSERT_TAIL(&list->allio, tior, links); 918 tior->io = tpcl_alloc_io(); 919 ctl_scsi_read_write(tior->io, 920 /*data_ptr*/ tior->buf, 921 /*data_len*/ roundbytes, 922 /*read_op*/ 1, 923 /*byte2*/ 0, 924 /*minimum_cdb_size*/ 0, 925 /*lba*/ srclba, 926 /*num_blocks*/ roundbytes / srcblock, 927 /*tag_type*/ CTL_TAG_SIMPLE, 928 /*control*/ 0); 929 tior->io->io_hdr.retries = 3; 930 tior->target = SSD_FORWARDED_SDS_EXSRC; 931 tior->cscd = scscd; 932 tior->lun = sl; 933 tior->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tior; 934 935 tiow = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO); 936 TAILQ_INIT(&tiow->run); 937 tiow->list = list; 938 TAILQ_INSERT_TAIL(&list->allio, tiow, links); 939 tiow->io = tpcl_alloc_io(); 940 ctl_scsi_read_write(tiow->io, 941 /*data_ptr*/ tior->buf, 942 /*data_len*/ roundbytes, 943 /*read_op*/ 0, 944 /*byte2*/ 0, 945 /*minimum_cdb_size*/ 0, 946 /*lba*/ dstlba, 947 /*num_blocks*/ roundbytes / dstblock, 948 /*tag_type*/ CTL_TAG_SIMPLE, 949 /*control*/ 0); 950 tiow->io->io_hdr.retries = 3; 951 tiow->target = SSD_FORWARDED_SDS_EXDST; 952 tiow->cscd = dcscd; 953 tiow->lun = dl; 954 tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow; 955 956 TAILQ_INSERT_TAIL(&tior->run, tiow, rlinks); 957 TAILQ_INSERT_TAIL(&run, tior, rlinks); 958 list->tbdio++; 959 donebytes += roundbytes; 960 srclba += roundbytes / srcblock; 961 dstlba += roundbytes / dstblock; 962 } 963 964 while ((tior = TAILQ_FIRST(&run)) != NULL) { 965 TAILQ_REMOVE(&run, tior, rlinks); 966 if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE) 967 panic("tpcl_queue() error"); 968 } 969 970 list->stage++; 971 return (CTL_RETVAL_QUEUED); 972 } 973 974 static int 975 tpc_process_verify(struct tpc_list *list) 976 { 977 struct scsi_ec_segment_verify *seg; 978 struct tpc_io *tio; 979 uint64_t sl; 980 uint16_t cscd; 981 uint8_t csi[4]; 982 983 scsi_ulto4b(list->curseg, csi); 984 if (list->stage == 1) { 985 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) { 986 TAILQ_REMOVE(&list->allio, tio, links); 987 ctl_free_io(tio->io); 988 free(tio, M_CTL); 989 } 990 if (list->abort) { 991 ctl_set_task_aborted(list->ctsio); 992 return (CTL_RETVAL_ERROR); 993 } else if (list->error) { 994 tpc_set_io_error_sense(list); 995 return (CTL_RETVAL_ERROR); 996 } else 997 return (CTL_RETVAL_COMPLETE); 998 } 999 1000 TAILQ_INIT(&list->allio); 1001 seg = (struct scsi_ec_segment_verify *)list->seg[list->curseg]; 1002 cscd = scsi_2btoul(seg->src_cscd); 1003 sl = tpc_resolve(list, cscd, NULL, NULL, NULL); 1004 if (sl == UINT64_MAX) { 1005 ctl_set_sense(list->ctsio, /*current_error*/ 1, 1006 /*sense_key*/ SSD_KEY_COPY_ABORTED, 1007 /*asc*/ 0x08, /*ascq*/ 0x04, 1008 SSD_ELEM_COMMAND, sizeof(csi), csi, 1009 SSD_ELEM_NONE); 1010 return (CTL_RETVAL_ERROR); 1011 } 1012 1013 // printf("Verify %ju\n", sl); 1014 1015 if ((seg->tur & 0x01) == 0) 1016 return (CTL_RETVAL_COMPLETE); 1017 1018 list->tbdio = 1; 1019 tio = malloc(sizeof(*tio), M_CTL, M_WAITOK | M_ZERO); 1020 TAILQ_INIT(&tio->run); 1021 tio->list = list; 1022 TAILQ_INSERT_TAIL(&list->allio, tio, links); 1023 tio->io = tpcl_alloc_io(); 1024 ctl_scsi_tur(tio->io, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0); 1025 tio->io->io_hdr.retries = 3; 1026 tio->target = SSD_FORWARDED_SDS_EXSRC; 1027 tio->cscd = cscd; 1028 tio->lun = sl; 1029 tio->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tio; 1030 list->stage++; 1031 if (tpcl_queue(tio->io, tio->lun) != CTL_RETVAL_COMPLETE) 1032 panic("tpcl_queue() error"); 1033 return (CTL_RETVAL_QUEUED); 1034 } 1035 1036 static int 1037 tpc_process_register_key(struct tpc_list *list) 1038 { 1039 struct scsi_ec_segment_register_key *seg; 1040 struct tpc_io *tio; 1041 uint64_t dl; 1042 int datalen; 1043 uint16_t cscd; 1044 uint8_t csi[4]; 1045 1046 scsi_ulto4b(list->curseg, csi); 1047 if (list->stage == 1) { 1048 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) { 1049 TAILQ_REMOVE(&list->allio, tio, links); 1050 ctl_free_io(tio->io); 1051 free(tio->buf, M_CTL); 1052 free(tio, M_CTL); 1053 } 1054 if (list->abort) { 1055 ctl_set_task_aborted(list->ctsio); 1056 return (CTL_RETVAL_ERROR); 1057 } else if (list->error) { 1058 tpc_set_io_error_sense(list); 1059 return (CTL_RETVAL_ERROR); 1060 } else 1061 return (CTL_RETVAL_COMPLETE); 1062 } 1063 1064 TAILQ_INIT(&list->allio); 1065 seg = (struct scsi_ec_segment_register_key *)list->seg[list->curseg]; 1066 cscd = scsi_2btoul(seg->dst_cscd); 1067 dl = tpc_resolve(list, cscd, NULL, NULL, NULL); 1068 if (dl == UINT64_MAX) { 1069 ctl_set_sense(list->ctsio, /*current_error*/ 1, 1070 /*sense_key*/ SSD_KEY_COPY_ABORTED, 1071 /*asc*/ 0x08, /*ascq*/ 0x04, 1072 SSD_ELEM_COMMAND, sizeof(csi), csi, 1073 SSD_ELEM_NONE); 1074 return (CTL_RETVAL_ERROR); 1075 } 1076 1077 // printf("Register Key %ju\n", dl); 1078 1079 list->tbdio = 1; 1080 tio = malloc(sizeof(*tio), M_CTL, M_WAITOK | M_ZERO); 1081 TAILQ_INIT(&tio->run); 1082 tio->list = list; 1083 TAILQ_INSERT_TAIL(&list->allio, tio, links); 1084 tio->io = tpcl_alloc_io(); 1085 datalen = sizeof(struct scsi_per_res_out_parms); 1086 tio->buf = malloc(datalen, M_CTL, M_WAITOK); 1087 ctl_scsi_persistent_res_out(tio->io, 1088 tio->buf, datalen, SPRO_REGISTER, -1, 1089 scsi_8btou64(seg->res_key), scsi_8btou64(seg->sa_res_key), 1090 /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0); 1091 tio->io->io_hdr.retries = 3; 1092 tio->target = SSD_FORWARDED_SDS_EXDST; 1093 tio->cscd = cscd; 1094 tio->lun = dl; 1095 tio->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tio; 1096 list->stage++; 1097 if (tpcl_queue(tio->io, tio->lun) != CTL_RETVAL_COMPLETE) 1098 panic("tpcl_queue() error"); 1099 return (CTL_RETVAL_QUEUED); 1100 } 1101 1102 static off_t 1103 tpc_ranges_length(struct scsi_range_desc *range, int nrange) 1104 { 1105 off_t length = 0; 1106 int r; 1107 1108 for (r = 0; r < nrange; r++) 1109 length += scsi_4btoul(range[r].length); 1110 return (length); 1111 } 1112 1113 static int 1114 tpc_check_ranges_l(struct scsi_range_desc *range, int nrange, uint64_t maxlba, 1115 uint64_t *lba) 1116 { 1117 uint64_t b1; 1118 uint32_t l1; 1119 int i; 1120 1121 for (i = 0; i < nrange; i++) { 1122 b1 = scsi_8btou64(range[i].lba); 1123 l1 = scsi_4btoul(range[i].length); 1124 if (b1 + l1 < b1 || b1 + l1 > maxlba + 1) { 1125 *lba = MAX(b1, maxlba + 1); 1126 return (-1); 1127 } 1128 } 1129 return (0); 1130 } 1131 1132 static int 1133 tpc_check_ranges_x(struct scsi_range_desc *range, int nrange) 1134 { 1135 uint64_t b1, b2; 1136 uint32_t l1, l2; 1137 int i, j; 1138 1139 for (i = 0; i < nrange - 1; i++) { 1140 b1 = scsi_8btou64(range[i].lba); 1141 l1 = scsi_4btoul(range[i].length); 1142 for (j = i + 1; j < nrange; j++) { 1143 b2 = scsi_8btou64(range[j].lba); 1144 l2 = scsi_4btoul(range[j].length); 1145 if (b1 + l1 > b2 && b2 + l2 > b1) 1146 return (-1); 1147 } 1148 } 1149 return (0); 1150 } 1151 1152 static int 1153 tpc_skip_ranges(struct scsi_range_desc *range, int nrange, off_t skip, 1154 int *srange, off_t *soffset) 1155 { 1156 off_t off; 1157 int r; 1158 1159 r = 0; 1160 off = 0; 1161 while (r < nrange) { 1162 if (skip - off < scsi_4btoul(range[r].length)) { 1163 *srange = r; 1164 *soffset = skip - off; 1165 return (0); 1166 } 1167 off += scsi_4btoul(range[r].length); 1168 r++; 1169 } 1170 return (-1); 1171 } 1172 1173 static int 1174 tpc_process_wut(struct tpc_list *list) 1175 { 1176 struct tpc_io *tio, *tior, *tiow; 1177 struct runl run; 1178 int drange, srange; 1179 off_t doffset, soffset; 1180 off_t srclba, dstlba, numbytes, donebytes, roundbytes; 1181 uint32_t srcblock, dstblock, pb, pbo, adj; 1182 1183 if (list->stage > 0) { 1184 /* Cleanup after previous rounds. */ 1185 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) { 1186 TAILQ_REMOVE(&list->allio, tio, links); 1187 ctl_free_io(tio->io); 1188 free(tio->buf, M_CTL); 1189 free(tio, M_CTL); 1190 } 1191 if (list->abort) { 1192 ctl_set_task_aborted(list->ctsio); 1193 return (CTL_RETVAL_ERROR); 1194 } else if (list->error) { 1195 if (list->fwd_scsi_status) { 1196 list->ctsio->io_hdr.status = 1197 CTL_SCSI_ERROR | CTL_AUTOSENSE; 1198 list->ctsio->scsi_status = list->fwd_scsi_status; 1199 list->ctsio->sense_data = list->fwd_sense_data; 1200 list->ctsio->sense_len = list->fwd_sense_len; 1201 } else { 1202 ctl_set_invalid_field(list->ctsio, 1203 /*sks_valid*/ 0, /*command*/ 0, 1204 /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0); 1205 } 1206 return (CTL_RETVAL_ERROR); 1207 } 1208 list->cursectors += list->segsectors; 1209 list->curbytes += list->segbytes; 1210 } 1211 1212 /* Check where we are on destination ranges list. */ 1213 if (tpc_skip_ranges(list->range, list->nrange, list->cursectors, 1214 &drange, &doffset) != 0) 1215 return (CTL_RETVAL_COMPLETE); 1216 dstblock = list->lun->be_lun->blocksize; 1217 pb = dstblock << list->lun->be_lun->pblockexp; 1218 if (list->lun->be_lun->pblockoff > 0) 1219 pbo = pb - dstblock * list->lun->be_lun->pblockoff; 1220 else 1221 pbo = 0; 1222 1223 /* Check where we are on source ranges list. */ 1224 srcblock = list->token->blocksize; 1225 if (tpc_skip_ranges(list->token->range, list->token->nrange, 1226 list->offset_into_rod + list->cursectors * dstblock / srcblock, 1227 &srange, &soffset) != 0) { 1228 ctl_set_invalid_field(list->ctsio, /*sks_valid*/ 0, 1229 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0); 1230 return (CTL_RETVAL_ERROR); 1231 } 1232 1233 srclba = scsi_8btou64(list->token->range[srange].lba) + soffset; 1234 dstlba = scsi_8btou64(list->range[drange].lba) + doffset; 1235 numbytes = srcblock * 1236 (scsi_4btoul(list->token->range[srange].length) - soffset); 1237 numbytes = omin(numbytes, dstblock * 1238 (scsi_4btoul(list->range[drange].length) - doffset)); 1239 if (numbytes > TPC_MAX_IOCHUNK_SIZE) { 1240 numbytes = TPC_MAX_IOCHUNK_SIZE; 1241 numbytes -= numbytes % dstblock; 1242 if (pb > dstblock) { 1243 adj = (dstlba * dstblock + numbytes - pbo) % pb; 1244 if (numbytes > adj) 1245 numbytes -= adj; 1246 } 1247 } 1248 1249 if (numbytes % srcblock != 0 || numbytes % dstblock != 0) { 1250 ctl_set_invalid_field(list->ctsio, /*sks_valid*/ 0, 1251 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0); 1252 return (CTL_RETVAL_ERROR); 1253 } 1254 1255 list->segbytes = numbytes; 1256 list->segsectors = numbytes / dstblock; 1257 //printf("Copy chunk of %ju sectors from %ju to %ju\n", list->segsectors, 1258 // srclba, dstlba); 1259 donebytes = 0; 1260 TAILQ_INIT(&run); 1261 list->tbdio = 0; 1262 TAILQ_INIT(&list->allio); 1263 while (donebytes < numbytes) { 1264 roundbytes = numbytes - donebytes; 1265 if (roundbytes > TPC_MAX_IO_SIZE) { 1266 roundbytes = TPC_MAX_IO_SIZE; 1267 roundbytes -= roundbytes % dstblock; 1268 if (pb > dstblock) { 1269 adj = (dstlba * dstblock + roundbytes - pbo) % pb; 1270 if (roundbytes > adj) 1271 roundbytes -= adj; 1272 } 1273 } 1274 1275 tior = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO); 1276 TAILQ_INIT(&tior->run); 1277 tior->buf = malloc(roundbytes, M_CTL, M_WAITOK); 1278 tior->list = list; 1279 TAILQ_INSERT_TAIL(&list->allio, tior, links); 1280 tior->io = tpcl_alloc_io(); 1281 ctl_scsi_read_write(tior->io, 1282 /*data_ptr*/ tior->buf, 1283 /*data_len*/ roundbytes, 1284 /*read_op*/ 1, 1285 /*byte2*/ 0, 1286 /*minimum_cdb_size*/ 0, 1287 /*lba*/ srclba, 1288 /*num_blocks*/ roundbytes / srcblock, 1289 /*tag_type*/ CTL_TAG_SIMPLE, 1290 /*control*/ 0); 1291 tior->io->io_hdr.retries = 3; 1292 tior->lun = list->token->lun; 1293 tior->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tior; 1294 1295 tiow = malloc(sizeof(*tiow), M_CTL, M_WAITOK | M_ZERO); 1296 TAILQ_INIT(&tiow->run); 1297 tiow->list = list; 1298 TAILQ_INSERT_TAIL(&list->allio, tiow, links); 1299 tiow->io = tpcl_alloc_io(); 1300 ctl_scsi_read_write(tiow->io, 1301 /*data_ptr*/ tior->buf, 1302 /*data_len*/ roundbytes, 1303 /*read_op*/ 0, 1304 /*byte2*/ 0, 1305 /*minimum_cdb_size*/ 0, 1306 /*lba*/ dstlba, 1307 /*num_blocks*/ roundbytes / dstblock, 1308 /*tag_type*/ CTL_TAG_SIMPLE, 1309 /*control*/ 0); 1310 tiow->io->io_hdr.retries = 3; 1311 tiow->lun = list->lun->lun; 1312 tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow; 1313 1314 TAILQ_INSERT_TAIL(&tior->run, tiow, rlinks); 1315 TAILQ_INSERT_TAIL(&run, tior, rlinks); 1316 list->tbdio++; 1317 donebytes += roundbytes; 1318 srclba += roundbytes / srcblock; 1319 dstlba += roundbytes / dstblock; 1320 } 1321 1322 while ((tior = TAILQ_FIRST(&run)) != NULL) { 1323 TAILQ_REMOVE(&run, tior, rlinks); 1324 if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE) 1325 panic("tpcl_queue() error"); 1326 } 1327 1328 list->stage++; 1329 return (CTL_RETVAL_QUEUED); 1330 } 1331 1332 static int 1333 tpc_process_zero_wut(struct tpc_list *list) 1334 { 1335 struct tpc_io *tio, *tiow; 1336 struct runl run, *prun; 1337 int r; 1338 uint32_t dstblock, len; 1339 1340 if (list->stage > 0) { 1341 complete: 1342 /* Cleanup after previous rounds. */ 1343 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) { 1344 TAILQ_REMOVE(&list->allio, tio, links); 1345 ctl_free_io(tio->io); 1346 free(tio, M_CTL); 1347 } 1348 if (list->abort) { 1349 ctl_set_task_aborted(list->ctsio); 1350 return (CTL_RETVAL_ERROR); 1351 } else if (list->error) { 1352 if (list->fwd_scsi_status) { 1353 list->ctsio->io_hdr.status = 1354 CTL_SCSI_ERROR | CTL_AUTOSENSE; 1355 list->ctsio->scsi_status = list->fwd_scsi_status; 1356 list->ctsio->sense_data = list->fwd_sense_data; 1357 list->ctsio->sense_len = list->fwd_sense_len; 1358 } else { 1359 ctl_set_invalid_field(list->ctsio, 1360 /*sks_valid*/ 0, /*command*/ 0, 1361 /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0); 1362 } 1363 return (CTL_RETVAL_ERROR); 1364 } 1365 list->cursectors += list->segsectors; 1366 list->curbytes += list->segbytes; 1367 return (CTL_RETVAL_COMPLETE); 1368 } 1369 1370 dstblock = list->lun->be_lun->blocksize; 1371 TAILQ_INIT(&run); 1372 prun = &run; 1373 list->tbdio = 1; 1374 TAILQ_INIT(&list->allio); 1375 list->segsectors = 0; 1376 for (r = 0; r < list->nrange; r++) { 1377 len = scsi_4btoul(list->range[r].length); 1378 if (len == 0) 1379 continue; 1380 1381 tiow = malloc(sizeof(*tiow), M_CTL, M_WAITOK | M_ZERO); 1382 TAILQ_INIT(&tiow->run); 1383 tiow->list = list; 1384 TAILQ_INSERT_TAIL(&list->allio, tiow, links); 1385 tiow->io = tpcl_alloc_io(); 1386 ctl_scsi_write_same(tiow->io, 1387 /*data_ptr*/ NULL, 1388 /*data_len*/ 0, 1389 /*byte2*/ SWS_NDOB, 1390 /*lba*/ scsi_8btou64(list->range[r].lba), 1391 /*num_blocks*/ len, 1392 /*tag_type*/ CTL_TAG_SIMPLE, 1393 /*control*/ 0); 1394 tiow->io->io_hdr.retries = 3; 1395 tiow->lun = list->lun->lun; 1396 tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow; 1397 1398 TAILQ_INSERT_TAIL(prun, tiow, rlinks); 1399 prun = &tiow->run; 1400 list->segsectors += len; 1401 } 1402 list->segbytes = list->segsectors * dstblock; 1403 1404 if (TAILQ_EMPTY(&run)) 1405 goto complete; 1406 1407 while ((tiow = TAILQ_FIRST(&run)) != NULL) { 1408 TAILQ_REMOVE(&run, tiow, rlinks); 1409 if (tpcl_queue(tiow->io, tiow->lun) != CTL_RETVAL_COMPLETE) 1410 panic("tpcl_queue() error"); 1411 } 1412 1413 list->stage++; 1414 return (CTL_RETVAL_QUEUED); 1415 } 1416 1417 static void 1418 tpc_process(struct tpc_list *list) 1419 { 1420 struct ctl_lun *lun = list->lun; 1421 struct ctl_softc *softc = lun->ctl_softc; 1422 struct scsi_ec_segment *seg; 1423 struct ctl_scsiio *ctsio = list->ctsio; 1424 int retval = CTL_RETVAL_COMPLETE; 1425 uint8_t csi[4]; 1426 1427 if (list->service_action == EC_WUT) { 1428 if (list->token != NULL) 1429 retval = tpc_process_wut(list); 1430 else 1431 retval = tpc_process_zero_wut(list); 1432 if (retval == CTL_RETVAL_QUEUED) 1433 return; 1434 if (retval == CTL_RETVAL_ERROR) { 1435 list->error = 1; 1436 goto done; 1437 } 1438 } else { 1439 //printf("ZZZ %d cscd, %d segs\n", list->ncscd, list->nseg); 1440 while (list->curseg < list->nseg) { 1441 seg = list->seg[list->curseg]; 1442 switch (seg->type_code) { 1443 case EC_SEG_B2B: 1444 retval = tpc_process_b2b(list); 1445 break; 1446 case EC_SEG_VERIFY: 1447 retval = tpc_process_verify(list); 1448 break; 1449 case EC_SEG_REGISTER_KEY: 1450 retval = tpc_process_register_key(list); 1451 break; 1452 default: 1453 scsi_ulto4b(list->curseg, csi); 1454 ctl_set_sense(ctsio, /*current_error*/ 1, 1455 /*sense_key*/ SSD_KEY_COPY_ABORTED, 1456 /*asc*/ 0x26, /*ascq*/ 0x09, 1457 SSD_ELEM_COMMAND, sizeof(csi), csi, 1458 SSD_ELEM_NONE); 1459 goto done; 1460 } 1461 if (retval == CTL_RETVAL_QUEUED) 1462 return; 1463 if (retval == CTL_RETVAL_ERROR) { 1464 list->error = 1; 1465 goto done; 1466 } 1467 list->curseg++; 1468 list->stage = 0; 1469 } 1470 } 1471 1472 ctl_set_success(ctsio); 1473 1474 done: 1475 //printf("ZZZ done\n"); 1476 free(list->params, M_CTL); 1477 list->params = NULL; 1478 if (list->token) { 1479 mtx_lock(&softc->tpc_lock); 1480 if (--list->token->active == 0) 1481 list->token->last_active = time_uptime; 1482 mtx_unlock(&softc->tpc_lock); 1483 list->token = NULL; 1484 } 1485 mtx_lock(&lun->lun_lock); 1486 if ((list->flags & EC_LIST_ID_USAGE_MASK) == EC_LIST_ID_USAGE_NONE) { 1487 TAILQ_REMOVE(&lun->tpc_lists, list, links); 1488 free(list, M_CTL); 1489 } else { 1490 list->completed = 1; 1491 list->last_active = time_uptime; 1492 list->sense_data = ctsio->sense_data; 1493 list->sense_len = ctsio->sense_len; 1494 list->scsi_status = ctsio->scsi_status; 1495 } 1496 mtx_unlock(&lun->lun_lock); 1497 1498 ctl_done((union ctl_io *)ctsio); 1499 } 1500 1501 /* 1502 * For any sort of check condition, busy, etc., we just retry. We do not 1503 * decrement the retry count for unit attention type errors. These are 1504 * normal, and we want to save the retry count for "real" errors. Otherwise, 1505 * we could end up with situations where a command will succeed in some 1506 * situations and fail in others, depending on whether a unit attention is 1507 * pending. Also, some of our error recovery actions, most notably the 1508 * LUN reset action, will cause a unit attention. 1509 * 1510 * We can add more detail here later if necessary. 1511 */ 1512 static tpc_error_action 1513 tpc_checkcond_parse(union ctl_io *io) 1514 { 1515 tpc_error_action error_action; 1516 int error_code, sense_key, asc, ascq; 1517 1518 /* 1519 * Default to retrying the command. 1520 */ 1521 error_action = TPC_ERR_RETRY; 1522 1523 scsi_extract_sense_len(&io->scsiio.sense_data, 1524 io->scsiio.sense_len, 1525 &error_code, 1526 &sense_key, 1527 &asc, 1528 &ascq, 1529 /*show_errors*/ 1); 1530 1531 switch (error_code) { 1532 case SSD_DEFERRED_ERROR: 1533 case SSD_DESC_DEFERRED_ERROR: 1534 error_action |= TPC_ERR_NO_DECREMENT; 1535 break; 1536 case SSD_CURRENT_ERROR: 1537 case SSD_DESC_CURRENT_ERROR: 1538 default: 1539 switch (sense_key) { 1540 case SSD_KEY_UNIT_ATTENTION: 1541 error_action |= TPC_ERR_NO_DECREMENT; 1542 break; 1543 case SSD_KEY_HARDWARE_ERROR: 1544 /* 1545 * This is our generic "something bad happened" 1546 * error code. It often isn't recoverable. 1547 */ 1548 if ((asc == 0x44) && (ascq == 0x00)) 1549 error_action = TPC_ERR_FAIL; 1550 break; 1551 case SSD_KEY_NOT_READY: 1552 /* 1553 * If the LUN is powered down, there likely isn't 1554 * much point in retrying right now. 1555 */ 1556 if ((asc == 0x04) && (ascq == 0x02)) 1557 error_action = TPC_ERR_FAIL; 1558 /* 1559 * If the LUN is offline, there probably isn't much 1560 * point in retrying, either. 1561 */ 1562 if ((asc == 0x04) && (ascq == 0x03)) 1563 error_action = TPC_ERR_FAIL; 1564 break; 1565 } 1566 } 1567 return (error_action); 1568 } 1569 1570 static tpc_error_action 1571 tpc_error_parse(union ctl_io *io) 1572 { 1573 tpc_error_action error_action = TPC_ERR_RETRY; 1574 1575 switch (io->io_hdr.io_type) { 1576 case CTL_IO_SCSI: 1577 switch (io->io_hdr.status & CTL_STATUS_MASK) { 1578 case CTL_SCSI_ERROR: 1579 switch (io->scsiio.scsi_status) { 1580 case SCSI_STATUS_CHECK_COND: 1581 error_action = tpc_checkcond_parse(io); 1582 break; 1583 default: 1584 break; 1585 } 1586 break; 1587 default: 1588 break; 1589 } 1590 break; 1591 case CTL_IO_TASK: 1592 break; 1593 default: 1594 panic("%s: invalid ctl_io type %d\n", __func__, 1595 io->io_hdr.io_type); 1596 break; 1597 } 1598 return (error_action); 1599 } 1600 1601 void 1602 tpc_done(union ctl_io *io) 1603 { 1604 struct tpc_io *tio, *tior; 1605 1606 /* 1607 * Very minimal retry logic. We basically retry if we got an error 1608 * back, and the retry count is greater than 0. If we ever want 1609 * more sophisticated initiator type behavior, the CAM error 1610 * recovery code in ../common might be helpful. 1611 */ 1612 tio = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; 1613 if (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) 1614 && (io->io_hdr.retries > 0)) { 1615 ctl_io_status old_status; 1616 tpc_error_action error_action; 1617 1618 error_action = tpc_error_parse(io); 1619 switch (error_action & TPC_ERR_MASK) { 1620 case TPC_ERR_FAIL: 1621 break; 1622 case TPC_ERR_RETRY: 1623 default: 1624 if ((error_action & TPC_ERR_NO_DECREMENT) == 0) 1625 io->io_hdr.retries--; 1626 old_status = io->io_hdr.status; 1627 io->io_hdr.status = CTL_STATUS_NONE; 1628 io->io_hdr.flags &= ~CTL_FLAG_ABORT; 1629 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 1630 if (tpcl_queue(io, tio->lun) != CTL_RETVAL_COMPLETE) { 1631 printf("%s: error returned from tpcl_queue()!\n", 1632 __func__); 1633 io->io_hdr.status = old_status; 1634 } else 1635 return; 1636 } 1637 } 1638 1639 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) { 1640 tio->list->error = 1; 1641 if (io->io_hdr.io_type == CTL_IO_SCSI && 1642 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SCSI_ERROR) { 1643 tio->list->fwd_scsi_status = io->scsiio.scsi_status; 1644 tio->list->fwd_sense_data = io->scsiio.sense_data; 1645 tio->list->fwd_sense_len = io->scsiio.sense_len; 1646 tio->list->fwd_target = tio->target; 1647 tio->list->fwd_cscd = tio->cscd; 1648 } 1649 } else 1650 atomic_add_int(&tio->list->curops, 1); 1651 if (!tio->list->error && !tio->list->abort) { 1652 while ((tior = TAILQ_FIRST(&tio->run)) != NULL) { 1653 TAILQ_REMOVE(&tio->run, tior, rlinks); 1654 atomic_add_int(&tio->list->tbdio, 1); 1655 if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE) 1656 panic("tpcl_queue() error"); 1657 } 1658 } 1659 if (atomic_fetchadd_int(&tio->list->tbdio, -1) == 1) 1660 tpc_process(tio->list); 1661 } 1662 1663 int 1664 ctl_extended_copy_lid1(struct ctl_scsiio *ctsio) 1665 { 1666 struct ctl_lun *lun = CTL_LUN(ctsio); 1667 struct scsi_extended_copy *cdb; 1668 struct scsi_extended_copy_lid1_data *data; 1669 struct scsi_ec_cscd *cscd; 1670 struct scsi_ec_segment *seg; 1671 struct tpc_list *list, *tlist; 1672 uint8_t *ptr; 1673 const char *value; 1674 int len, off, lencscd, lenseg, leninl, nseg; 1675 1676 CTL_DEBUG_PRINT(("ctl_extended_copy_lid1\n")); 1677 1678 cdb = (struct scsi_extended_copy *)ctsio->cdb; 1679 len = scsi_4btoul(cdb->length); 1680 1681 if (len == 0) { 1682 ctl_set_success(ctsio); 1683 goto done; 1684 } 1685 if (len < sizeof(struct scsi_extended_copy_lid1_data) || 1686 len > sizeof(struct scsi_extended_copy_lid1_data) + 1687 TPC_MAX_LIST + TPC_MAX_INLINE) { 1688 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, 1689 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0); 1690 goto done; 1691 } 1692 1693 /* 1694 * If we've got a kernel request that hasn't been malloced yet, 1695 * malloc it and tell the caller the data buffer is here. 1696 */ 1697 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 1698 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 1699 ctsio->kern_data_len = len; 1700 ctsio->kern_total_len = len; 1701 ctsio->kern_rel_offset = 0; 1702 ctsio->kern_sg_entries = 0; 1703 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 1704 ctsio->be_move_done = ctl_config_move_done; 1705 ctl_datamove((union ctl_io *)ctsio); 1706 1707 return (CTL_RETVAL_COMPLETE); 1708 } 1709 1710 data = (struct scsi_extended_copy_lid1_data *)ctsio->kern_data_ptr; 1711 lencscd = scsi_2btoul(data->cscd_list_length); 1712 lenseg = scsi_4btoul(data->segment_list_length); 1713 leninl = scsi_4btoul(data->inline_data_length); 1714 if (lencscd > TPC_MAX_CSCDS * sizeof(struct scsi_ec_cscd)) { 1715 ctl_set_sense(ctsio, /*current_error*/ 1, 1716 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1717 /*asc*/ 0x26, /*ascq*/ 0x06, SSD_ELEM_NONE); 1718 goto done; 1719 } 1720 if (lenseg > TPC_MAX_SEGS * sizeof(struct scsi_ec_segment)) { 1721 ctl_set_sense(ctsio, /*current_error*/ 1, 1722 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1723 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE); 1724 goto done; 1725 } 1726 if (lencscd + lenseg > TPC_MAX_LIST || 1727 leninl > TPC_MAX_INLINE || 1728 len < sizeof(struct scsi_extended_copy_lid1_data) + 1729 lencscd + lenseg + leninl) { 1730 ctl_set_param_len_error(ctsio); 1731 goto done; 1732 } 1733 1734 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO); 1735 list->service_action = cdb->service_action; 1736 value = dnvlist_get_string(lun->be_lun->options, "insecure_tpc", NULL); 1737 if (value != NULL && strcmp(value, "on") == 0) 1738 list->init_port = -1; 1739 else 1740 list->init_port = ctsio->io_hdr.nexus.targ_port; 1741 list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus); 1742 list->list_id = data->list_identifier; 1743 list->flags = data->flags; 1744 list->params = ctsio->kern_data_ptr; 1745 list->cscd = (struct scsi_ec_cscd *)&data->data[0]; 1746 ptr = &data->data[0]; 1747 for (off = 0; off < lencscd; off += sizeof(struct scsi_ec_cscd)) { 1748 cscd = (struct scsi_ec_cscd *)(ptr + off); 1749 if (cscd->type_code != EC_CSCD_ID) { 1750 free(list, M_CTL); 1751 ctl_set_sense(ctsio, /*current_error*/ 1, 1752 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1753 /*asc*/ 0x26, /*ascq*/ 0x07, SSD_ELEM_NONE); 1754 goto done; 1755 } 1756 } 1757 ptr = &data->data[lencscd]; 1758 for (nseg = 0, off = 0; off < lenseg; nseg++) { 1759 if (nseg >= TPC_MAX_SEGS) { 1760 free(list, M_CTL); 1761 ctl_set_sense(ctsio, /*current_error*/ 1, 1762 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1763 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE); 1764 goto done; 1765 } 1766 seg = (struct scsi_ec_segment *)(ptr + off); 1767 if (seg->type_code != EC_SEG_B2B && 1768 seg->type_code != EC_SEG_VERIFY && 1769 seg->type_code != EC_SEG_REGISTER_KEY) { 1770 free(list, M_CTL); 1771 ctl_set_sense(ctsio, /*current_error*/ 1, 1772 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1773 /*asc*/ 0x26, /*ascq*/ 0x09, SSD_ELEM_NONE); 1774 goto done; 1775 } 1776 list->seg[nseg] = seg; 1777 off += sizeof(struct scsi_ec_segment) + 1778 scsi_2btoul(seg->descr_length); 1779 } 1780 list->inl = &data->data[lencscd + lenseg]; 1781 list->ncscd = lencscd / sizeof(struct scsi_ec_cscd); 1782 list->nseg = nseg; 1783 list->leninl = leninl; 1784 list->ctsio = ctsio; 1785 list->lun = lun; 1786 mtx_lock(&lun->lun_lock); 1787 if ((list->flags & EC_LIST_ID_USAGE_MASK) != EC_LIST_ID_USAGE_NONE) { 1788 tlist = tpc_find_list(lun, list->list_id, list->init_idx); 1789 if (tlist != NULL && !tlist->completed) { 1790 mtx_unlock(&lun->lun_lock); 1791 free(list, M_CTL); 1792 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 1793 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, 1794 /*bit*/ 0); 1795 goto done; 1796 } 1797 if (tlist != NULL) { 1798 TAILQ_REMOVE(&lun->tpc_lists, tlist, links); 1799 free(tlist, M_CTL); 1800 } 1801 } 1802 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links); 1803 mtx_unlock(&lun->lun_lock); 1804 1805 tpc_process(list); 1806 return (CTL_RETVAL_COMPLETE); 1807 1808 done: 1809 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 1810 free(ctsio->kern_data_ptr, M_CTL); 1811 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 1812 } 1813 ctl_done((union ctl_io *)ctsio); 1814 return (CTL_RETVAL_COMPLETE); 1815 } 1816 1817 int 1818 ctl_extended_copy_lid4(struct ctl_scsiio *ctsio) 1819 { 1820 struct ctl_lun *lun = CTL_LUN(ctsio); 1821 struct scsi_extended_copy *cdb; 1822 struct scsi_extended_copy_lid4_data *data; 1823 struct scsi_ec_cscd *cscd; 1824 struct scsi_ec_segment *seg; 1825 struct tpc_list *list, *tlist; 1826 uint8_t *ptr; 1827 const char *value; 1828 int len, off, lencscd, lenseg, leninl, nseg; 1829 1830 CTL_DEBUG_PRINT(("ctl_extended_copy_lid4\n")); 1831 1832 cdb = (struct scsi_extended_copy *)ctsio->cdb; 1833 len = scsi_4btoul(cdb->length); 1834 1835 if (len == 0) { 1836 ctl_set_success(ctsio); 1837 goto done; 1838 } 1839 if (len < sizeof(struct scsi_extended_copy_lid4_data) || 1840 len > sizeof(struct scsi_extended_copy_lid4_data) + 1841 TPC_MAX_LIST + TPC_MAX_INLINE) { 1842 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, 1843 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0); 1844 goto done; 1845 } 1846 1847 /* 1848 * If we've got a kernel request that hasn't been malloced yet, 1849 * malloc it and tell the caller the data buffer is here. 1850 */ 1851 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 1852 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 1853 ctsio->kern_data_len = len; 1854 ctsio->kern_total_len = len; 1855 ctsio->kern_rel_offset = 0; 1856 ctsio->kern_sg_entries = 0; 1857 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 1858 ctsio->be_move_done = ctl_config_move_done; 1859 ctl_datamove((union ctl_io *)ctsio); 1860 1861 return (CTL_RETVAL_COMPLETE); 1862 } 1863 1864 data = (struct scsi_extended_copy_lid4_data *)ctsio->kern_data_ptr; 1865 lencscd = scsi_2btoul(data->cscd_list_length); 1866 lenseg = scsi_2btoul(data->segment_list_length); 1867 leninl = scsi_2btoul(data->inline_data_length); 1868 if (lencscd > TPC_MAX_CSCDS * sizeof(struct scsi_ec_cscd)) { 1869 ctl_set_sense(ctsio, /*current_error*/ 1, 1870 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1871 /*asc*/ 0x26, /*ascq*/ 0x06, SSD_ELEM_NONE); 1872 goto done; 1873 } 1874 if (lenseg > TPC_MAX_SEGS * sizeof(struct scsi_ec_segment)) { 1875 ctl_set_sense(ctsio, /*current_error*/ 1, 1876 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1877 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE); 1878 goto done; 1879 } 1880 if (lencscd + lenseg > TPC_MAX_LIST || 1881 leninl > TPC_MAX_INLINE || 1882 len < sizeof(struct scsi_extended_copy_lid1_data) + 1883 lencscd + lenseg + leninl) { 1884 ctl_set_param_len_error(ctsio); 1885 goto done; 1886 } 1887 1888 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO); 1889 list->service_action = cdb->service_action; 1890 value = dnvlist_get_string(lun->be_lun->options, "insecure_tpc", NULL); 1891 if (value != NULL && strcmp(value, "on") == 0) 1892 list->init_port = -1; 1893 else 1894 list->init_port = ctsio->io_hdr.nexus.targ_port; 1895 list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus); 1896 list->list_id = scsi_4btoul(data->list_identifier); 1897 list->flags = data->flags; 1898 list->params = ctsio->kern_data_ptr; 1899 list->cscd = (struct scsi_ec_cscd *)&data->data[0]; 1900 ptr = &data->data[0]; 1901 for (off = 0; off < lencscd; off += sizeof(struct scsi_ec_cscd)) { 1902 cscd = (struct scsi_ec_cscd *)(ptr + off); 1903 if (cscd->type_code != EC_CSCD_ID) { 1904 free(list, M_CTL); 1905 ctl_set_sense(ctsio, /*current_error*/ 1, 1906 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1907 /*asc*/ 0x26, /*ascq*/ 0x07, SSD_ELEM_NONE); 1908 goto done; 1909 } 1910 } 1911 ptr = &data->data[lencscd]; 1912 for (nseg = 0, off = 0; off < lenseg; nseg++) { 1913 if (nseg >= TPC_MAX_SEGS) { 1914 free(list, M_CTL); 1915 ctl_set_sense(ctsio, /*current_error*/ 1, 1916 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1917 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE); 1918 goto done; 1919 } 1920 seg = (struct scsi_ec_segment *)(ptr + off); 1921 if (seg->type_code != EC_SEG_B2B && 1922 seg->type_code != EC_SEG_VERIFY && 1923 seg->type_code != EC_SEG_REGISTER_KEY) { 1924 free(list, M_CTL); 1925 ctl_set_sense(ctsio, /*current_error*/ 1, 1926 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1927 /*asc*/ 0x26, /*ascq*/ 0x09, SSD_ELEM_NONE); 1928 goto done; 1929 } 1930 list->seg[nseg] = seg; 1931 off += sizeof(struct scsi_ec_segment) + 1932 scsi_2btoul(seg->descr_length); 1933 } 1934 list->inl = &data->data[lencscd + lenseg]; 1935 list->ncscd = lencscd / sizeof(struct scsi_ec_cscd); 1936 list->nseg = nseg; 1937 list->leninl = leninl; 1938 list->ctsio = ctsio; 1939 list->lun = lun; 1940 mtx_lock(&lun->lun_lock); 1941 if ((list->flags & EC_LIST_ID_USAGE_MASK) != EC_LIST_ID_USAGE_NONE) { 1942 tlist = tpc_find_list(lun, list->list_id, list->init_idx); 1943 if (tlist != NULL && !tlist->completed) { 1944 mtx_unlock(&lun->lun_lock); 1945 free(list, M_CTL); 1946 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 1947 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, 1948 /*bit*/ 0); 1949 goto done; 1950 } 1951 if (tlist != NULL) { 1952 TAILQ_REMOVE(&lun->tpc_lists, tlist, links); 1953 free(tlist, M_CTL); 1954 } 1955 } 1956 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links); 1957 mtx_unlock(&lun->lun_lock); 1958 1959 tpc_process(list); 1960 return (CTL_RETVAL_COMPLETE); 1961 1962 done: 1963 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 1964 free(ctsio->kern_data_ptr, M_CTL); 1965 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 1966 } 1967 ctl_done((union ctl_io *)ctsio); 1968 return (CTL_RETVAL_COMPLETE); 1969 } 1970 1971 static void 1972 tpc_create_token(struct ctl_lun *lun, struct ctl_port *port, off_t len, 1973 struct scsi_token *token) 1974 { 1975 static int id = 0; 1976 struct scsi_vpd_id_descriptor *idd = NULL; 1977 struct scsi_ec_cscd_id *cscd; 1978 struct scsi_read_capacity_data_long *dtsd; 1979 int targid_len; 1980 1981 scsi_ulto4b(ROD_TYPE_AUR, token->type); 1982 scsi_ulto2b(0x01f8, token->length); 1983 scsi_u64to8b(atomic_fetchadd_int(&id, 1), &token->body[0]); 1984 if (lun->lun_devid) 1985 idd = scsi_get_devid_desc((struct scsi_vpd_id_descriptor *) 1986 lun->lun_devid->data, lun->lun_devid->len, 1987 scsi_devid_is_lun_naa); 1988 if (idd == NULL && lun->lun_devid) 1989 idd = scsi_get_devid_desc((struct scsi_vpd_id_descriptor *) 1990 lun->lun_devid->data, lun->lun_devid->len, 1991 scsi_devid_is_lun_eui64); 1992 if (idd != NULL) { 1993 cscd = (struct scsi_ec_cscd_id *)&token->body[8]; 1994 cscd->type_code = EC_CSCD_ID; 1995 cscd->luidt_pdt = T_DIRECT; 1996 memcpy(&cscd->codeset, idd, 4 + idd->length); 1997 scsi_ulto3b(lun->be_lun->blocksize, cscd->dtsp.block_length); 1998 } 1999 scsi_u64to8b(0, &token->body[40]); /* XXX: Should be 128bit value. */ 2000 scsi_u64to8b(len, &token->body[48]); 2001 2002 /* ROD token device type specific data (RC16 without first field) */ 2003 dtsd = (struct scsi_read_capacity_data_long *)&token->body[88 - 8]; 2004 scsi_ulto4b(lun->be_lun->blocksize, dtsd->length); 2005 dtsd->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE; 2006 scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, dtsd->lalba_lbp); 2007 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) 2008 dtsd->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ; 2009 2010 if (port->target_devid) { 2011 targid_len = port->target_devid->len; 2012 memcpy(&token->body[120], port->target_devid->data, targid_len); 2013 } else 2014 targid_len = 32; 2015 arc4rand(&token->body[120 + targid_len], 384 - targid_len, 0); 2016 }; 2017 2018 int 2019 ctl_populate_token(struct ctl_scsiio *ctsio) 2020 { 2021 struct ctl_softc *softc = CTL_SOFTC(ctsio); 2022 struct ctl_port *port = CTL_PORT(ctsio); 2023 struct ctl_lun *lun = CTL_LUN(ctsio); 2024 struct scsi_populate_token *cdb; 2025 struct scsi_populate_token_data *data; 2026 struct tpc_list *list, *tlist; 2027 struct tpc_token *token; 2028 uint64_t lba; 2029 int len, lendata, lendesc; 2030 2031 CTL_DEBUG_PRINT(("ctl_populate_token\n")); 2032 2033 cdb = (struct scsi_populate_token *)ctsio->cdb; 2034 len = scsi_4btoul(cdb->length); 2035 2036 if (len < sizeof(struct scsi_populate_token_data) || 2037 len > sizeof(struct scsi_populate_token_data) + 2038 TPC_MAX_SEGS * sizeof(struct scsi_range_desc)) { 2039 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, 2040 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0); 2041 goto done; 2042 } 2043 2044 /* 2045 * If we've got a kernel request that hasn't been malloced yet, 2046 * malloc it and tell the caller the data buffer is here. 2047 */ 2048 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 2049 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 2050 ctsio->kern_data_len = len; 2051 ctsio->kern_total_len = len; 2052 ctsio->kern_rel_offset = 0; 2053 ctsio->kern_sg_entries = 0; 2054 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 2055 ctsio->be_move_done = ctl_config_move_done; 2056 ctl_datamove((union ctl_io *)ctsio); 2057 2058 return (CTL_RETVAL_COMPLETE); 2059 } 2060 2061 data = (struct scsi_populate_token_data *)ctsio->kern_data_ptr; 2062 lendata = scsi_2btoul(data->length); 2063 if (lendata < sizeof(struct scsi_populate_token_data) - 2 + 2064 sizeof(struct scsi_range_desc)) { 2065 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, 2066 /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0); 2067 goto done; 2068 } 2069 lendesc = scsi_2btoul(data->range_descriptor_length); 2070 if (lendesc < sizeof(struct scsi_range_desc) || 2071 len < sizeof(struct scsi_populate_token_data) + lendesc || 2072 lendata < sizeof(struct scsi_populate_token_data) - 2 + lendesc) { 2073 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, 2074 /*field*/ 14, /*bit_valid*/ 0, /*bit*/ 0); 2075 goto done; 2076 } 2077 /* 2078 printf("PT(list=%u) flags=%x to=%d rt=%x len=%x\n", 2079 scsi_4btoul(cdb->list_identifier), 2080 data->flags, scsi_4btoul(data->inactivity_timeout), 2081 scsi_4btoul(data->rod_type), 2082 scsi_2btoul(data->range_descriptor_length)); 2083 */ 2084 2085 /* Validate INACTIVITY TIMEOUT field */ 2086 if (scsi_4btoul(data->inactivity_timeout) > TPC_MAX_TOKEN_TIMEOUT) { 2087 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 2088 /*command*/ 0, /*field*/ 4, /*bit_valid*/ 0, 2089 /*bit*/ 0); 2090 goto done; 2091 } 2092 2093 /* Validate ROD TYPE field */ 2094 if ((data->flags & EC_PT_RTV) && 2095 scsi_4btoul(data->rod_type) != ROD_TYPE_AUR) { 2096 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, 2097 /*field*/ 8, /*bit_valid*/ 0, /*bit*/ 0); 2098 goto done; 2099 } 2100 2101 /* Validate list of ranges */ 2102 if (tpc_check_ranges_l(&data->desc[0], 2103 scsi_2btoul(data->range_descriptor_length) / 2104 sizeof(struct scsi_range_desc), 2105 lun->be_lun->maxlba, &lba) != 0) { 2106 ctl_set_lba_out_of_range(ctsio, lba); 2107 goto done; 2108 } 2109 if (tpc_check_ranges_x(&data->desc[0], 2110 scsi_2btoul(data->range_descriptor_length) / 2111 sizeof(struct scsi_range_desc)) != 0) { 2112 ctl_set_invalid_field(ctsio, /*sks_valid*/ 0, 2113 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, 2114 /*bit*/ 0); 2115 goto done; 2116 } 2117 2118 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO); 2119 list->service_action = cdb->service_action; 2120 list->init_port = ctsio->io_hdr.nexus.targ_port; 2121 list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus); 2122 list->list_id = scsi_4btoul(cdb->list_identifier); 2123 list->flags = data->flags; 2124 list->ctsio = ctsio; 2125 list->lun = lun; 2126 mtx_lock(&lun->lun_lock); 2127 tlist = tpc_find_list(lun, list->list_id, list->init_idx); 2128 if (tlist != NULL && !tlist->completed) { 2129 mtx_unlock(&lun->lun_lock); 2130 free(list, M_CTL); 2131 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 2132 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, 2133 /*bit*/ 0); 2134 goto done; 2135 } 2136 if (tlist != NULL) { 2137 TAILQ_REMOVE(&lun->tpc_lists, tlist, links); 2138 free(tlist, M_CTL); 2139 } 2140 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links); 2141 mtx_unlock(&lun->lun_lock); 2142 2143 token = malloc(sizeof(*token), M_CTL, M_WAITOK | M_ZERO); 2144 token->lun = lun->lun; 2145 token->blocksize = lun->be_lun->blocksize; 2146 token->params = ctsio->kern_data_ptr; 2147 token->range = &data->desc[0]; 2148 token->nrange = scsi_2btoul(data->range_descriptor_length) / 2149 sizeof(struct scsi_range_desc); 2150 list->cursectors = tpc_ranges_length(token->range, token->nrange); 2151 list->curbytes = (off_t)list->cursectors * lun->be_lun->blocksize; 2152 tpc_create_token(lun, port, list->curbytes, 2153 (struct scsi_token *)token->token); 2154 token->active = 0; 2155 token->last_active = time_uptime; 2156 token->timeout = scsi_4btoul(data->inactivity_timeout); 2157 if (token->timeout == 0) 2158 token->timeout = TPC_DFL_TOKEN_TIMEOUT; 2159 else if (token->timeout < TPC_MIN_TOKEN_TIMEOUT) 2160 token->timeout = TPC_MIN_TOKEN_TIMEOUT; 2161 memcpy(list->res_token, token->token, sizeof(list->res_token)); 2162 list->res_token_valid = 1; 2163 list->curseg = 0; 2164 list->completed = 1; 2165 list->last_active = time_uptime; 2166 mtx_lock(&softc->tpc_lock); 2167 TAILQ_INSERT_TAIL(&softc->tpc_tokens, token, links); 2168 mtx_unlock(&softc->tpc_lock); 2169 ctl_set_success(ctsio); 2170 ctl_done((union ctl_io *)ctsio); 2171 return (CTL_RETVAL_COMPLETE); 2172 2173 done: 2174 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 2175 free(ctsio->kern_data_ptr, M_CTL); 2176 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 2177 } 2178 ctl_done((union ctl_io *)ctsio); 2179 return (CTL_RETVAL_COMPLETE); 2180 } 2181 2182 int 2183 ctl_write_using_token(struct ctl_scsiio *ctsio) 2184 { 2185 struct ctl_softc *softc = CTL_SOFTC(ctsio); 2186 struct ctl_lun *lun = CTL_LUN(ctsio); 2187 struct scsi_write_using_token *cdb; 2188 struct scsi_write_using_token_data *data; 2189 struct tpc_list *list, *tlist; 2190 struct tpc_token *token; 2191 uint64_t lba; 2192 int len, lendata, lendesc; 2193 2194 CTL_DEBUG_PRINT(("ctl_write_using_token\n")); 2195 2196 cdb = (struct scsi_write_using_token *)ctsio->cdb; 2197 len = scsi_4btoul(cdb->length); 2198 2199 if (len < sizeof(struct scsi_write_using_token_data) || 2200 len > sizeof(struct scsi_write_using_token_data) + 2201 TPC_MAX_SEGS * sizeof(struct scsi_range_desc)) { 2202 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, 2203 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0); 2204 goto done; 2205 } 2206 2207 /* 2208 * If we've got a kernel request that hasn't been malloced yet, 2209 * malloc it and tell the caller the data buffer is here. 2210 */ 2211 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 2212 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 2213 ctsio->kern_data_len = len; 2214 ctsio->kern_total_len = len; 2215 ctsio->kern_rel_offset = 0; 2216 ctsio->kern_sg_entries = 0; 2217 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 2218 ctsio->be_move_done = ctl_config_move_done; 2219 ctl_datamove((union ctl_io *)ctsio); 2220 2221 return (CTL_RETVAL_COMPLETE); 2222 } 2223 2224 data = (struct scsi_write_using_token_data *)ctsio->kern_data_ptr; 2225 lendata = scsi_2btoul(data->length); 2226 if (lendata < sizeof(struct scsi_write_using_token_data) - 2 + 2227 sizeof(struct scsi_range_desc)) { 2228 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, 2229 /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0); 2230 goto done; 2231 } 2232 lendesc = scsi_2btoul(data->range_descriptor_length); 2233 if (lendesc < sizeof(struct scsi_range_desc) || 2234 len < sizeof(struct scsi_write_using_token_data) + lendesc || 2235 lendata < sizeof(struct scsi_write_using_token_data) - 2 + lendesc) { 2236 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, 2237 /*field*/ 534, /*bit_valid*/ 0, /*bit*/ 0); 2238 goto done; 2239 } 2240 /* 2241 printf("WUT(list=%u) flags=%x off=%ju len=%x\n", 2242 scsi_4btoul(cdb->list_identifier), 2243 data->flags, scsi_8btou64(data->offset_into_rod), 2244 scsi_2btoul(data->range_descriptor_length)); 2245 */ 2246 2247 /* Validate list of ranges */ 2248 if (tpc_check_ranges_l(&data->desc[0], 2249 scsi_2btoul(data->range_descriptor_length) / 2250 sizeof(struct scsi_range_desc), 2251 lun->be_lun->maxlba, &lba) != 0) { 2252 ctl_set_lba_out_of_range(ctsio, lba); 2253 goto done; 2254 } 2255 if (tpc_check_ranges_x(&data->desc[0], 2256 scsi_2btoul(data->range_descriptor_length) / 2257 sizeof(struct scsi_range_desc)) != 0) { 2258 ctl_set_invalid_field(ctsio, /*sks_valid*/ 0, 2259 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, 2260 /*bit*/ 0); 2261 goto done; 2262 } 2263 2264 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO); 2265 list->service_action = cdb->service_action; 2266 list->init_port = ctsio->io_hdr.nexus.targ_port; 2267 list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus); 2268 list->list_id = scsi_4btoul(cdb->list_identifier); 2269 list->flags = data->flags; 2270 list->params = ctsio->kern_data_ptr; 2271 list->range = &data->desc[0]; 2272 list->nrange = scsi_2btoul(data->range_descriptor_length) / 2273 sizeof(struct scsi_range_desc); 2274 list->offset_into_rod = scsi_8btou64(data->offset_into_rod); 2275 list->ctsio = ctsio; 2276 list->lun = lun; 2277 mtx_lock(&lun->lun_lock); 2278 tlist = tpc_find_list(lun, list->list_id, list->init_idx); 2279 if (tlist != NULL && !tlist->completed) { 2280 mtx_unlock(&lun->lun_lock); 2281 free(list, M_CTL); 2282 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 2283 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, 2284 /*bit*/ 0); 2285 goto done; 2286 } 2287 if (tlist != NULL) { 2288 TAILQ_REMOVE(&lun->tpc_lists, tlist, links); 2289 free(tlist, M_CTL); 2290 } 2291 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links); 2292 mtx_unlock(&lun->lun_lock); 2293 2294 /* Block device zero ROD token -> no token. */ 2295 if (scsi_4btoul(data->rod_token) == ROD_TYPE_BLOCK_ZERO) { 2296 tpc_process(list); 2297 return (CTL_RETVAL_COMPLETE); 2298 } 2299 2300 mtx_lock(&softc->tpc_lock); 2301 TAILQ_FOREACH(token, &softc->tpc_tokens, links) { 2302 if (memcmp(token->token, data->rod_token, 2303 sizeof(data->rod_token)) == 0) 2304 break; 2305 } 2306 if (token != NULL) { 2307 token->active++; 2308 list->token = token; 2309 if (data->flags & EC_WUT_DEL_TKN) 2310 token->timeout = 0; 2311 } 2312 mtx_unlock(&softc->tpc_lock); 2313 if (token == NULL) { 2314 mtx_lock(&lun->lun_lock); 2315 TAILQ_REMOVE(&lun->tpc_lists, list, links); 2316 mtx_unlock(&lun->lun_lock); 2317 free(list, M_CTL); 2318 ctl_set_sense(ctsio, /*current_error*/ 1, 2319 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 2320 /*asc*/ 0x23, /*ascq*/ 0x04, SSD_ELEM_NONE); 2321 goto done; 2322 } 2323 2324 tpc_process(list); 2325 return (CTL_RETVAL_COMPLETE); 2326 2327 done: 2328 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 2329 free(ctsio->kern_data_ptr, M_CTL); 2330 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 2331 } 2332 ctl_done((union ctl_io *)ctsio); 2333 return (CTL_RETVAL_COMPLETE); 2334 } 2335 2336 int 2337 ctl_receive_rod_token_information(struct ctl_scsiio *ctsio) 2338 { 2339 struct ctl_lun *lun = CTL_LUN(ctsio); 2340 struct scsi_receive_rod_token_information *cdb; 2341 struct scsi_receive_copy_status_lid4_data *data; 2342 struct tpc_list *list; 2343 struct tpc_list list_copy; 2344 uint8_t *ptr; 2345 int retval; 2346 int alloc_len, total_len, token_len; 2347 uint32_t list_id; 2348 2349 CTL_DEBUG_PRINT(("ctl_receive_rod_token_information\n")); 2350 2351 cdb = (struct scsi_receive_rod_token_information *)ctsio->cdb; 2352 retval = CTL_RETVAL_COMPLETE; 2353 2354 list_id = scsi_4btoul(cdb->list_identifier); 2355 mtx_lock(&lun->lun_lock); 2356 list = tpc_find_list(lun, list_id, 2357 ctl_get_initindex(&ctsio->io_hdr.nexus)); 2358 if (list == NULL) { 2359 mtx_unlock(&lun->lun_lock); 2360 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 2361 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, 2362 /*bit*/ 0); 2363 ctl_done((union ctl_io *)ctsio); 2364 return (retval); 2365 } 2366 list_copy = *list; 2367 if (list->completed) { 2368 TAILQ_REMOVE(&lun->tpc_lists, list, links); 2369 free(list, M_CTL); 2370 } 2371 mtx_unlock(&lun->lun_lock); 2372 2373 token_len = list_copy.res_token_valid ? 2 + sizeof(list_copy.res_token) : 0; 2374 total_len = sizeof(*data) + list_copy.sense_len + 4 + token_len; 2375 alloc_len = scsi_4btoul(cdb->length); 2376 2377 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 2378 ctsio->kern_sg_entries = 0; 2379 ctsio->kern_rel_offset = 0; 2380 ctsio->kern_data_len = min(total_len, alloc_len); 2381 ctsio->kern_total_len = ctsio->kern_data_len; 2382 2383 data = (struct scsi_receive_copy_status_lid4_data *)ctsio->kern_data_ptr; 2384 scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len + 2385 4 + token_len, data->available_data); 2386 data->response_to_service_action = list_copy.service_action; 2387 if (list_copy.completed) { 2388 if (list_copy.error) 2389 data->copy_command_status = RCS_CCS_ERROR; 2390 else if (list_copy.abort) 2391 data->copy_command_status = RCS_CCS_ABORTED; 2392 else 2393 data->copy_command_status = RCS_CCS_COMPLETED; 2394 } else 2395 data->copy_command_status = RCS_CCS_INPROG_FG; 2396 scsi_ulto2b(list_copy.curops, data->operation_counter); 2397 scsi_ulto4b(UINT32_MAX, data->estimated_status_update_delay); 2398 data->transfer_count_units = RCS_TC_LBAS; 2399 scsi_u64to8b(list_copy.cursectors, data->transfer_count); 2400 scsi_ulto2b(list_copy.curseg, data->segments_processed); 2401 data->length_of_the_sense_data_field = list_copy.sense_len; 2402 data->sense_data_length = list_copy.sense_len; 2403 memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len); 2404 2405 ptr = &data->sense_data[data->length_of_the_sense_data_field]; 2406 scsi_ulto4b(token_len, &ptr[0]); 2407 if (list_copy.res_token_valid) { 2408 scsi_ulto2b(0, &ptr[4]); 2409 memcpy(&ptr[6], list_copy.res_token, sizeof(list_copy.res_token)); 2410 } 2411 /* 2412 printf("RRTI(list=%u) valid=%d\n", 2413 scsi_4btoul(cdb->list_identifier), list_copy.res_token_valid); 2414 */ 2415 ctl_set_success(ctsio); 2416 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 2417 ctsio->be_move_done = ctl_config_move_done; 2418 ctl_datamove((union ctl_io *)ctsio); 2419 return (retval); 2420 } 2421 2422 int 2423 ctl_report_all_rod_tokens(struct ctl_scsiio *ctsio) 2424 { 2425 struct ctl_softc *softc = CTL_SOFTC(ctsio); 2426 struct scsi_report_all_rod_tokens *cdb; 2427 struct scsi_report_all_rod_tokens_data *data; 2428 struct tpc_token *token; 2429 int retval; 2430 int alloc_len, total_len, tokens, i; 2431 2432 CTL_DEBUG_PRINT(("ctl_receive_rod_token_information\n")); 2433 2434 cdb = (struct scsi_report_all_rod_tokens *)ctsio->cdb; 2435 retval = CTL_RETVAL_COMPLETE; 2436 2437 tokens = 0; 2438 mtx_lock(&softc->tpc_lock); 2439 TAILQ_FOREACH(token, &softc->tpc_tokens, links) 2440 tokens++; 2441 mtx_unlock(&softc->tpc_lock); 2442 if (tokens > 512) 2443 tokens = 512; 2444 2445 total_len = sizeof(*data) + tokens * 96; 2446 alloc_len = scsi_4btoul(cdb->length); 2447 2448 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 2449 ctsio->kern_sg_entries = 0; 2450 ctsio->kern_rel_offset = 0; 2451 ctsio->kern_data_len = min(total_len, alloc_len); 2452 ctsio->kern_total_len = ctsio->kern_data_len; 2453 2454 data = (struct scsi_report_all_rod_tokens_data *)ctsio->kern_data_ptr; 2455 i = 0; 2456 mtx_lock(&softc->tpc_lock); 2457 TAILQ_FOREACH(token, &softc->tpc_tokens, links) { 2458 if (i >= tokens) 2459 break; 2460 memcpy(&data->rod_management_token_list[i * 96], 2461 token->token, 96); 2462 i++; 2463 } 2464 mtx_unlock(&softc->tpc_lock); 2465 scsi_ulto4b(sizeof(*data) - 4 + i * 96, data->available_data); 2466 /* 2467 printf("RART tokens=%d\n", i); 2468 */ 2469 ctl_set_success(ctsio); 2470 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 2471 ctsio->be_move_done = ctl_config_move_done; 2472 ctl_datamove((union ctl_io *)ctsio); 2473 return (retval); 2474 } 2475