1 /*- 2 * Copyright (c) 2014 Alexander Motin <mav@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/types.h> 34 #include <sys/lock.h> 35 #include <sys/module.h> 36 #include <sys/mutex.h> 37 #include <sys/condvar.h> 38 #include <sys/malloc.h> 39 #include <sys/conf.h> 40 #include <sys/queue.h> 41 #include <sys/sysctl.h> 42 #include <machine/atomic.h> 43 44 #include <cam/cam.h> 45 #include <cam/scsi/scsi_all.h> 46 #include <cam/scsi/scsi_da.h> 47 #include <cam/ctl/ctl_io.h> 48 #include <cam/ctl/ctl.h> 49 #include <cam/ctl/ctl_frontend.h> 50 #include <cam/ctl/ctl_util.h> 51 #include <cam/ctl/ctl_backend.h> 52 #include <cam/ctl/ctl_ioctl.h> 53 #include <cam/ctl/ctl_ha.h> 54 #include <cam/ctl/ctl_private.h> 55 #include <cam/ctl/ctl_debug.h> 56 #include <cam/ctl/ctl_scsi_all.h> 57 #include <cam/ctl/ctl_tpc.h> 58 #include <cam/ctl/ctl_error.h> 59 60 #define TPC_MAX_CSCDS 64 61 #define TPC_MAX_SEGS 64 62 #define TPC_MAX_SEG 0 63 #define TPC_MAX_LIST 8192 64 #define TPC_MAX_INLINE 0 65 #define TPC_MAX_LISTS 255 66 #define TPC_MAX_IO_SIZE (1024 * 1024) 67 #define TPC_MAX_IOCHUNK_SIZE (TPC_MAX_IO_SIZE * 16) 68 #define TPC_MIN_TOKEN_TIMEOUT 1 69 #define TPC_DFL_TOKEN_TIMEOUT 60 70 #define TPC_MAX_TOKEN_TIMEOUT 600 71 72 MALLOC_DEFINE(M_CTL_TPC, "ctltpc", "CTL TPC"); 73 74 typedef enum { 75 TPC_ERR_RETRY = 0x000, 76 TPC_ERR_FAIL = 0x001, 77 TPC_ERR_MASK = 0x0ff, 78 TPC_ERR_NO_DECREMENT = 0x100 79 } tpc_error_action; 80 81 struct tpc_list; 82 TAILQ_HEAD(runl, tpc_io); 83 struct tpc_io { 84 union ctl_io *io; 85 uint8_t target; 86 uint32_t cscd; 87 uint64_t lun; 88 uint8_t *buf; 89 struct tpc_list *list; 90 struct runl run; 91 TAILQ_ENTRY(tpc_io) rlinks; 92 TAILQ_ENTRY(tpc_io) links; 93 }; 94 95 struct tpc_token { 96 uint8_t token[512]; 97 uint64_t lun; 98 uint32_t blocksize; 99 uint8_t *params; 100 struct scsi_range_desc *range; 101 int nrange; 102 int active; 103 time_t last_active; 104 uint32_t timeout; 105 TAILQ_ENTRY(tpc_token) links; 106 }; 107 108 struct tpc_list { 109 uint8_t service_action; 110 int init_port; 111 uint32_t init_idx; 112 uint32_t list_id; 113 uint8_t flags; 114 uint8_t *params; 115 struct scsi_ec_cscd *cscd; 116 struct scsi_ec_segment *seg[TPC_MAX_SEGS]; 117 uint8_t *inl; 118 int ncscd; 119 int nseg; 120 int leninl; 121 struct tpc_token *token; 122 struct scsi_range_desc *range; 123 int nrange; 124 off_t offset_into_rod; 125 126 int curseg; 127 off_t cursectors; 128 off_t curbytes; 129 int curops; 130 int stage; 131 off_t segsectors; 132 off_t segbytes; 133 int tbdio; 134 int error; 135 int abort; 136 int completed; 137 time_t last_active; 138 TAILQ_HEAD(, tpc_io) allio; 139 struct scsi_sense_data fwd_sense_data; 140 uint8_t fwd_sense_len; 141 uint8_t fwd_scsi_status; 142 uint8_t fwd_target; 143 uint16_t fwd_cscd; 144 struct scsi_sense_data sense_data; 145 uint8_t sense_len; 146 uint8_t scsi_status; 147 struct ctl_scsiio *ctsio; 148 struct ctl_lun *lun; 149 int res_token_valid; 150 uint8_t res_token[512]; 151 TAILQ_ENTRY(tpc_list) links; 152 }; 153 154 static void 155 tpc_timeout(void *arg) 156 { 157 struct ctl_softc *softc = arg; 158 struct ctl_lun *lun; 159 struct tpc_token *token, *ttoken; 160 struct tpc_list *list, *tlist; 161 162 /* Free completed lists with expired timeout. */ 163 STAILQ_FOREACH(lun, &softc->lun_list, links) { 164 mtx_lock(&lun->lun_lock); 165 TAILQ_FOREACH_SAFE(list, &lun->tpc_lists, links, tlist) { 166 if (!list->completed || time_uptime < list->last_active + 167 TPC_DFL_TOKEN_TIMEOUT) 168 continue; 169 TAILQ_REMOVE(&lun->tpc_lists, list, links); 170 free(list, M_CTL); 171 } 172 mtx_unlock(&lun->lun_lock); 173 } 174 175 /* Free inactive ROD tokens with expired timeout. */ 176 mtx_lock(&softc->tpc_lock); 177 TAILQ_FOREACH_SAFE(token, &softc->tpc_tokens, links, ttoken) { 178 if (token->active || 179 time_uptime < token->last_active + token->timeout + 1) 180 continue; 181 TAILQ_REMOVE(&softc->tpc_tokens, token, links); 182 free(token->params, M_CTL); 183 free(token, M_CTL); 184 } 185 mtx_unlock(&softc->tpc_lock); 186 callout_schedule(&softc->tpc_timeout, hz); 187 } 188 189 void 190 ctl_tpc_init(struct ctl_softc *softc) 191 { 192 193 mtx_init(&softc->tpc_lock, "CTL TPC mutex", NULL, MTX_DEF); 194 TAILQ_INIT(&softc->tpc_tokens); 195 callout_init_mtx(&softc->tpc_timeout, &softc->ctl_lock, 0); 196 callout_reset(&softc->tpc_timeout, hz, tpc_timeout, softc); 197 } 198 199 void 200 ctl_tpc_shutdown(struct ctl_softc *softc) 201 { 202 struct tpc_token *token; 203 204 callout_drain(&softc->tpc_timeout); 205 206 /* Free ROD tokens. */ 207 mtx_lock(&softc->tpc_lock); 208 while ((token = TAILQ_FIRST(&softc->tpc_tokens)) != NULL) { 209 TAILQ_REMOVE(&softc->tpc_tokens, token, links); 210 free(token->params, M_CTL); 211 free(token, M_CTL); 212 } 213 mtx_unlock(&softc->tpc_lock); 214 mtx_destroy(&softc->tpc_lock); 215 } 216 217 void 218 ctl_tpc_lun_init(struct ctl_lun *lun) 219 { 220 221 TAILQ_INIT(&lun->tpc_lists); 222 } 223 224 void 225 ctl_tpc_lun_clear(struct ctl_lun *lun, uint32_t initidx) 226 { 227 struct tpc_list *list, *tlist; 228 229 TAILQ_FOREACH_SAFE(list, &lun->tpc_lists, links, tlist) { 230 if (initidx != -1 && list->init_idx != initidx) 231 continue; 232 if (!list->completed) 233 continue; 234 TAILQ_REMOVE(&lun->tpc_lists, list, links); 235 free(list, M_CTL); 236 } 237 } 238 239 void 240 ctl_tpc_lun_shutdown(struct ctl_lun *lun) 241 { 242 struct ctl_softc *softc = lun->ctl_softc; 243 struct tpc_list *list; 244 struct tpc_token *token, *ttoken; 245 246 /* Free lists for this LUN. */ 247 while ((list = TAILQ_FIRST(&lun->tpc_lists)) != NULL) { 248 TAILQ_REMOVE(&lun->tpc_lists, list, links); 249 KASSERT(list->completed, 250 ("Not completed TPC (%p) on shutdown", list)); 251 free(list, M_CTL); 252 } 253 254 /* Free ROD tokens for this LUN. */ 255 mtx_lock(&softc->tpc_lock); 256 TAILQ_FOREACH_SAFE(token, &softc->tpc_tokens, links, ttoken) { 257 if (token->lun != lun->lun || token->active) 258 continue; 259 TAILQ_REMOVE(&softc->tpc_tokens, token, links); 260 free(token->params, M_CTL); 261 free(token, M_CTL); 262 } 263 mtx_unlock(&softc->tpc_lock); 264 } 265 266 int 267 ctl_inquiry_evpd_tpc(struct ctl_scsiio *ctsio, int alloc_len) 268 { 269 struct ctl_lun *lun = CTL_LUN(ctsio); 270 struct scsi_vpd_tpc *tpc_ptr; 271 struct scsi_vpd_tpc_descriptor *d_ptr; 272 struct scsi_vpd_tpc_descriptor_bdrl *bdrl_ptr; 273 struct scsi_vpd_tpc_descriptor_sc *sc_ptr; 274 struct scsi_vpd_tpc_descriptor_sc_descr *scd_ptr; 275 struct scsi_vpd_tpc_descriptor_pd *pd_ptr; 276 struct scsi_vpd_tpc_descriptor_sd *sd_ptr; 277 struct scsi_vpd_tpc_descriptor_sdid *sdid_ptr; 278 struct scsi_vpd_tpc_descriptor_rtf *rtf_ptr; 279 struct scsi_vpd_tpc_descriptor_rtf_block *rtfb_ptr; 280 struct scsi_vpd_tpc_descriptor_srt *srt_ptr; 281 struct scsi_vpd_tpc_descriptor_srtd *srtd_ptr; 282 struct scsi_vpd_tpc_descriptor_gco *gco_ptr; 283 int data_len; 284 285 data_len = sizeof(struct scsi_vpd_tpc) + 286 sizeof(struct scsi_vpd_tpc_descriptor_bdrl) + 287 roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sc) + 288 2 * sizeof(struct scsi_vpd_tpc_descriptor_sc_descr) + 11, 4) + 289 sizeof(struct scsi_vpd_tpc_descriptor_pd) + 290 roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sd) + 4, 4) + 291 roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sdid) + 2, 4) + 292 sizeof(struct scsi_vpd_tpc_descriptor_rtf) + 293 sizeof(struct scsi_vpd_tpc_descriptor_rtf_block) + 294 sizeof(struct scsi_vpd_tpc_descriptor_srt) + 295 2*sizeof(struct scsi_vpd_tpc_descriptor_srtd) + 296 sizeof(struct scsi_vpd_tpc_descriptor_gco); 297 298 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 299 tpc_ptr = (struct scsi_vpd_tpc *)ctsio->kern_data_ptr; 300 ctsio->kern_rel_offset = 0; 301 ctsio->kern_sg_entries = 0; 302 ctsio->kern_data_len = min(data_len, alloc_len); 303 ctsio->kern_total_len = ctsio->kern_data_len; 304 305 /* 306 * The control device is always connected. The disk device, on the 307 * other hand, may not be online all the time. 308 */ 309 if (lun != NULL) 310 tpc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 311 lun->be_lun->lun_type; 312 else 313 tpc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 314 tpc_ptr->page_code = SVPD_SCSI_TPC; 315 scsi_ulto2b(data_len - 4, tpc_ptr->page_length); 316 317 /* Block Device ROD Limits */ 318 d_ptr = (struct scsi_vpd_tpc_descriptor *)&tpc_ptr->descr[0]; 319 bdrl_ptr = (struct scsi_vpd_tpc_descriptor_bdrl *)d_ptr; 320 scsi_ulto2b(SVPD_TPC_BDRL, bdrl_ptr->desc_type); 321 scsi_ulto2b(sizeof(*bdrl_ptr) - 4, bdrl_ptr->desc_length); 322 scsi_ulto2b(TPC_MAX_SEGS, bdrl_ptr->maximum_ranges); 323 scsi_ulto4b(TPC_MAX_TOKEN_TIMEOUT, 324 bdrl_ptr->maximum_inactivity_timeout); 325 scsi_ulto4b(TPC_DFL_TOKEN_TIMEOUT, 326 bdrl_ptr->default_inactivity_timeout); 327 scsi_u64to8b(0, bdrl_ptr->maximum_token_transfer_size); 328 scsi_u64to8b(0, bdrl_ptr->optimal_transfer_count); 329 330 /* Supported commands */ 331 d_ptr = (struct scsi_vpd_tpc_descriptor *) 332 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length)); 333 sc_ptr = (struct scsi_vpd_tpc_descriptor_sc *)d_ptr; 334 scsi_ulto2b(SVPD_TPC_SC, sc_ptr->desc_type); 335 sc_ptr->list_length = 2 * sizeof(*scd_ptr) + 11; 336 scsi_ulto2b(roundup2(1 + sc_ptr->list_length, 4), sc_ptr->desc_length); 337 scd_ptr = &sc_ptr->descr[0]; 338 scd_ptr->opcode = EXTENDED_COPY; 339 scd_ptr->sa_length = 5; 340 scd_ptr->supported_service_actions[0] = EC_EC_LID1; 341 scd_ptr->supported_service_actions[1] = EC_EC_LID4; 342 scd_ptr->supported_service_actions[2] = EC_PT; 343 scd_ptr->supported_service_actions[3] = EC_WUT; 344 scd_ptr->supported_service_actions[4] = EC_COA; 345 scd_ptr = (struct scsi_vpd_tpc_descriptor_sc_descr *) 346 &scd_ptr->supported_service_actions[scd_ptr->sa_length]; 347 scd_ptr->opcode = RECEIVE_COPY_STATUS; 348 scd_ptr->sa_length = 6; 349 scd_ptr->supported_service_actions[0] = RCS_RCS_LID1; 350 scd_ptr->supported_service_actions[1] = RCS_RCFD; 351 scd_ptr->supported_service_actions[2] = RCS_RCS_LID4; 352 scd_ptr->supported_service_actions[3] = RCS_RCOP; 353 scd_ptr->supported_service_actions[4] = RCS_RRTI; 354 scd_ptr->supported_service_actions[5] = RCS_RART; 355 356 /* Parameter data. */ 357 d_ptr = (struct scsi_vpd_tpc_descriptor *) 358 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length)); 359 pd_ptr = (struct scsi_vpd_tpc_descriptor_pd *)d_ptr; 360 scsi_ulto2b(SVPD_TPC_PD, pd_ptr->desc_type); 361 scsi_ulto2b(sizeof(*pd_ptr) - 4, pd_ptr->desc_length); 362 scsi_ulto2b(TPC_MAX_CSCDS, pd_ptr->maximum_cscd_descriptor_count); 363 scsi_ulto2b(TPC_MAX_SEGS, pd_ptr->maximum_segment_descriptor_count); 364 scsi_ulto4b(TPC_MAX_LIST, pd_ptr->maximum_descriptor_list_length); 365 scsi_ulto4b(TPC_MAX_INLINE, pd_ptr->maximum_inline_data_length); 366 367 /* Supported Descriptors */ 368 d_ptr = (struct scsi_vpd_tpc_descriptor *) 369 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length)); 370 sd_ptr = (struct scsi_vpd_tpc_descriptor_sd *)d_ptr; 371 scsi_ulto2b(SVPD_TPC_SD, sd_ptr->desc_type); 372 scsi_ulto2b(roundup2(sizeof(*sd_ptr) - 4 + 4, 4), sd_ptr->desc_length); 373 sd_ptr->list_length = 4; 374 sd_ptr->supported_descriptor_codes[0] = EC_SEG_B2B; 375 sd_ptr->supported_descriptor_codes[1] = EC_SEG_VERIFY; 376 sd_ptr->supported_descriptor_codes[2] = EC_SEG_REGISTER_KEY; 377 sd_ptr->supported_descriptor_codes[3] = EC_CSCD_ID; 378 379 /* Supported CSCD Descriptor IDs */ 380 d_ptr = (struct scsi_vpd_tpc_descriptor *) 381 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length)); 382 sdid_ptr = (struct scsi_vpd_tpc_descriptor_sdid *)d_ptr; 383 scsi_ulto2b(SVPD_TPC_SDID, sdid_ptr->desc_type); 384 scsi_ulto2b(roundup2(sizeof(*sdid_ptr) - 4 + 2, 4), sdid_ptr->desc_length); 385 scsi_ulto2b(2, sdid_ptr->list_length); 386 scsi_ulto2b(0xffff, &sdid_ptr->supported_descriptor_ids[0]); 387 388 /* ROD Token Features */ 389 d_ptr = (struct scsi_vpd_tpc_descriptor *) 390 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length)); 391 rtf_ptr = (struct scsi_vpd_tpc_descriptor_rtf *)d_ptr; 392 scsi_ulto2b(SVPD_TPC_RTF, rtf_ptr->desc_type); 393 scsi_ulto2b(sizeof(*rtf_ptr) - 4 + sizeof(*rtfb_ptr), rtf_ptr->desc_length); 394 rtf_ptr->remote_tokens = 0; 395 scsi_ulto4b(TPC_MIN_TOKEN_TIMEOUT, rtf_ptr->minimum_token_lifetime); 396 scsi_ulto4b(UINT32_MAX, rtf_ptr->maximum_token_lifetime); 397 scsi_ulto4b(TPC_MAX_TOKEN_TIMEOUT, 398 rtf_ptr->maximum_token_inactivity_timeout); 399 scsi_ulto2b(sizeof(*rtfb_ptr), rtf_ptr->type_specific_features_length); 400 rtfb_ptr = (struct scsi_vpd_tpc_descriptor_rtf_block *) 401 &rtf_ptr->type_specific_features; 402 rtfb_ptr->type_format = SVPD_TPC_RTF_BLOCK; 403 scsi_ulto2b(sizeof(*rtfb_ptr) - 4, rtfb_ptr->desc_length); 404 scsi_ulto2b(0, rtfb_ptr->optimal_length_granularity); 405 scsi_u64to8b(0, rtfb_ptr->maximum_bytes); 406 scsi_u64to8b(0, rtfb_ptr->optimal_bytes); 407 scsi_u64to8b(UINT64_MAX, rtfb_ptr->optimal_bytes_to_token_per_segment); 408 scsi_u64to8b(TPC_MAX_IOCHUNK_SIZE, 409 rtfb_ptr->optimal_bytes_from_token_per_segment); 410 411 /* Supported ROD Tokens */ 412 d_ptr = (struct scsi_vpd_tpc_descriptor *) 413 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length)); 414 srt_ptr = (struct scsi_vpd_tpc_descriptor_srt *)d_ptr; 415 scsi_ulto2b(SVPD_TPC_SRT, srt_ptr->desc_type); 416 scsi_ulto2b(sizeof(*srt_ptr) - 4 + 2*sizeof(*srtd_ptr), srt_ptr->desc_length); 417 scsi_ulto2b(2*sizeof(*srtd_ptr), srt_ptr->rod_type_descriptors_length); 418 srtd_ptr = (struct scsi_vpd_tpc_descriptor_srtd *) 419 &srt_ptr->rod_type_descriptors; 420 scsi_ulto4b(ROD_TYPE_AUR, srtd_ptr->rod_type); 421 srtd_ptr->flags = SVPD_TPC_SRTD_TIN | SVPD_TPC_SRTD_TOUT; 422 scsi_ulto2b(0, srtd_ptr->preference_indicator); 423 srtd_ptr++; 424 scsi_ulto4b(ROD_TYPE_BLOCK_ZERO, srtd_ptr->rod_type); 425 srtd_ptr->flags = SVPD_TPC_SRTD_TIN; 426 scsi_ulto2b(0, srtd_ptr->preference_indicator); 427 428 /* General Copy Operations */ 429 d_ptr = (struct scsi_vpd_tpc_descriptor *) 430 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length)); 431 gco_ptr = (struct scsi_vpd_tpc_descriptor_gco *)d_ptr; 432 scsi_ulto2b(SVPD_TPC_GCO, gco_ptr->desc_type); 433 scsi_ulto2b(sizeof(*gco_ptr) - 4, gco_ptr->desc_length); 434 scsi_ulto4b(TPC_MAX_LISTS, gco_ptr->total_concurrent_copies); 435 scsi_ulto4b(TPC_MAX_LISTS, gco_ptr->maximum_identified_concurrent_copies); 436 scsi_ulto4b(TPC_MAX_SEG, gco_ptr->maximum_segment_length); 437 gco_ptr->data_segment_granularity = 0; 438 gco_ptr->inline_data_granularity = 0; 439 440 ctl_set_success(ctsio); 441 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 442 ctsio->be_move_done = ctl_config_move_done; 443 ctl_datamove((union ctl_io *)ctsio); 444 445 return (CTL_RETVAL_COMPLETE); 446 } 447 448 int 449 ctl_receive_copy_operating_parameters(struct ctl_scsiio *ctsio) 450 { 451 struct scsi_receive_copy_operating_parameters *cdb; 452 struct scsi_receive_copy_operating_parameters_data *data; 453 int retval; 454 int alloc_len, total_len; 455 456 CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n")); 457 458 cdb = (struct scsi_receive_copy_operating_parameters *)ctsio->cdb; 459 460 retval = CTL_RETVAL_COMPLETE; 461 462 total_len = sizeof(*data) + 4; 463 alloc_len = scsi_4btoul(cdb->length); 464 465 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 466 ctsio->kern_sg_entries = 0; 467 ctsio->kern_rel_offset = 0; 468 ctsio->kern_data_len = min(total_len, alloc_len); 469 ctsio->kern_total_len = ctsio->kern_data_len; 470 471 data = (struct scsi_receive_copy_operating_parameters_data *)ctsio->kern_data_ptr; 472 scsi_ulto4b(sizeof(*data) - 4 + 4, data->length); 473 data->snlid = RCOP_SNLID; 474 scsi_ulto2b(TPC_MAX_CSCDS, data->maximum_cscd_descriptor_count); 475 scsi_ulto2b(TPC_MAX_SEGS, data->maximum_segment_descriptor_count); 476 scsi_ulto4b(TPC_MAX_LIST, data->maximum_descriptor_list_length); 477 scsi_ulto4b(TPC_MAX_SEG, data->maximum_segment_length); 478 scsi_ulto4b(TPC_MAX_INLINE, data->maximum_inline_data_length); 479 scsi_ulto4b(0, data->held_data_limit); 480 scsi_ulto4b(0, data->maximum_stream_device_transfer_size); 481 scsi_ulto2b(TPC_MAX_LISTS, data->total_concurrent_copies); 482 data->maximum_concurrent_copies = TPC_MAX_LISTS; 483 data->data_segment_granularity = 0; 484 data->inline_data_granularity = 0; 485 data->held_data_granularity = 0; 486 data->implemented_descriptor_list_length = 4; 487 data->list_of_implemented_descriptor_type_codes[0] = EC_SEG_B2B; 488 data->list_of_implemented_descriptor_type_codes[1] = EC_SEG_VERIFY; 489 data->list_of_implemented_descriptor_type_codes[2] = EC_SEG_REGISTER_KEY; 490 data->list_of_implemented_descriptor_type_codes[3] = EC_CSCD_ID; 491 492 ctl_set_success(ctsio); 493 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 494 ctsio->be_move_done = ctl_config_move_done; 495 ctl_datamove((union ctl_io *)ctsio); 496 return (retval); 497 } 498 499 static struct tpc_list * 500 tpc_find_list(struct ctl_lun *lun, uint32_t list_id, uint32_t init_idx) 501 { 502 struct tpc_list *list; 503 504 mtx_assert(&lun->lun_lock, MA_OWNED); 505 TAILQ_FOREACH(list, &lun->tpc_lists, links) { 506 if ((list->flags & EC_LIST_ID_USAGE_MASK) != 507 EC_LIST_ID_USAGE_NONE && list->list_id == list_id && 508 list->init_idx == init_idx) 509 break; 510 } 511 return (list); 512 } 513 514 int 515 ctl_receive_copy_status_lid1(struct ctl_scsiio *ctsio) 516 { 517 struct ctl_lun *lun = CTL_LUN(ctsio); 518 struct scsi_receive_copy_status_lid1 *cdb; 519 struct scsi_receive_copy_status_lid1_data *data; 520 struct tpc_list *list; 521 struct tpc_list list_copy; 522 int retval; 523 int alloc_len, total_len; 524 uint32_t list_id; 525 526 CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid1\n")); 527 528 cdb = (struct scsi_receive_copy_status_lid1 *)ctsio->cdb; 529 retval = CTL_RETVAL_COMPLETE; 530 531 list_id = cdb->list_identifier; 532 mtx_lock(&lun->lun_lock); 533 list = tpc_find_list(lun, list_id, 534 ctl_get_initindex(&ctsio->io_hdr.nexus)); 535 if (list == NULL) { 536 mtx_unlock(&lun->lun_lock); 537 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 538 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, 539 /*bit*/ 0); 540 ctl_done((union ctl_io *)ctsio); 541 return (retval); 542 } 543 list_copy = *list; 544 if (list->completed) { 545 TAILQ_REMOVE(&lun->tpc_lists, list, links); 546 free(list, M_CTL); 547 } 548 mtx_unlock(&lun->lun_lock); 549 550 total_len = sizeof(*data); 551 alloc_len = scsi_4btoul(cdb->length); 552 553 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 554 ctsio->kern_sg_entries = 0; 555 ctsio->kern_rel_offset = 0; 556 ctsio->kern_data_len = min(total_len, alloc_len); 557 ctsio->kern_total_len = ctsio->kern_data_len; 558 559 data = (struct scsi_receive_copy_status_lid1_data *)ctsio->kern_data_ptr; 560 scsi_ulto4b(sizeof(*data) - 4, data->available_data); 561 if (list_copy.completed) { 562 if (list_copy.error || list_copy.abort) 563 data->copy_command_status = RCS_CCS_ERROR; 564 else 565 data->copy_command_status = RCS_CCS_COMPLETED; 566 } else 567 data->copy_command_status = RCS_CCS_INPROG; 568 scsi_ulto2b(list_copy.curseg, data->segments_processed); 569 if (list_copy.curbytes <= UINT32_MAX) { 570 data->transfer_count_units = RCS_TC_BYTES; 571 scsi_ulto4b(list_copy.curbytes, data->transfer_count); 572 } else { 573 data->transfer_count_units = RCS_TC_MBYTES; 574 scsi_ulto4b(list_copy.curbytes >> 20, data->transfer_count); 575 } 576 577 ctl_set_success(ctsio); 578 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 579 ctsio->be_move_done = ctl_config_move_done; 580 ctl_datamove((union ctl_io *)ctsio); 581 return (retval); 582 } 583 584 int 585 ctl_receive_copy_failure_details(struct ctl_scsiio *ctsio) 586 { 587 struct ctl_lun *lun = CTL_LUN(ctsio); 588 struct scsi_receive_copy_failure_details *cdb; 589 struct scsi_receive_copy_failure_details_data *data; 590 struct tpc_list *list; 591 struct tpc_list list_copy; 592 int retval; 593 int alloc_len, total_len; 594 uint32_t list_id; 595 596 CTL_DEBUG_PRINT(("ctl_receive_copy_failure_details\n")); 597 598 cdb = (struct scsi_receive_copy_failure_details *)ctsio->cdb; 599 retval = CTL_RETVAL_COMPLETE; 600 601 list_id = cdb->list_identifier; 602 mtx_lock(&lun->lun_lock); 603 list = tpc_find_list(lun, list_id, 604 ctl_get_initindex(&ctsio->io_hdr.nexus)); 605 if (list == NULL || !list->completed) { 606 mtx_unlock(&lun->lun_lock); 607 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 608 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, 609 /*bit*/ 0); 610 ctl_done((union ctl_io *)ctsio); 611 return (retval); 612 } 613 list_copy = *list; 614 TAILQ_REMOVE(&lun->tpc_lists, list, links); 615 free(list, M_CTL); 616 mtx_unlock(&lun->lun_lock); 617 618 total_len = sizeof(*data) + list_copy.sense_len; 619 alloc_len = scsi_4btoul(cdb->length); 620 621 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 622 ctsio->kern_sg_entries = 0; 623 ctsio->kern_rel_offset = 0; 624 ctsio->kern_data_len = min(total_len, alloc_len); 625 ctsio->kern_total_len = ctsio->kern_data_len; 626 627 data = (struct scsi_receive_copy_failure_details_data *)ctsio->kern_data_ptr; 628 if (list_copy.completed && (list_copy.error || list_copy.abort)) { 629 scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len, 630 data->available_data); 631 data->copy_command_status = RCS_CCS_ERROR; 632 } else 633 scsi_ulto4b(0, data->available_data); 634 scsi_ulto2b(list_copy.sense_len, data->sense_data_length); 635 memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len); 636 637 ctl_set_success(ctsio); 638 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 639 ctsio->be_move_done = ctl_config_move_done; 640 ctl_datamove((union ctl_io *)ctsio); 641 return (retval); 642 } 643 644 int 645 ctl_receive_copy_status_lid4(struct ctl_scsiio *ctsio) 646 { 647 struct ctl_lun *lun = CTL_LUN(ctsio); 648 struct scsi_receive_copy_status_lid4 *cdb; 649 struct scsi_receive_copy_status_lid4_data *data; 650 struct tpc_list *list; 651 struct tpc_list list_copy; 652 int retval; 653 int alloc_len, total_len; 654 uint32_t list_id; 655 656 CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid4\n")); 657 658 cdb = (struct scsi_receive_copy_status_lid4 *)ctsio->cdb; 659 retval = CTL_RETVAL_COMPLETE; 660 661 list_id = scsi_4btoul(cdb->list_identifier); 662 mtx_lock(&lun->lun_lock); 663 list = tpc_find_list(lun, list_id, 664 ctl_get_initindex(&ctsio->io_hdr.nexus)); 665 if (list == NULL) { 666 mtx_unlock(&lun->lun_lock); 667 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 668 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, 669 /*bit*/ 0); 670 ctl_done((union ctl_io *)ctsio); 671 return (retval); 672 } 673 list_copy = *list; 674 if (list->completed) { 675 TAILQ_REMOVE(&lun->tpc_lists, list, links); 676 free(list, M_CTL); 677 } 678 mtx_unlock(&lun->lun_lock); 679 680 total_len = sizeof(*data) + list_copy.sense_len; 681 alloc_len = scsi_4btoul(cdb->length); 682 683 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 684 ctsio->kern_sg_entries = 0; 685 ctsio->kern_rel_offset = 0; 686 ctsio->kern_data_len = min(total_len, alloc_len); 687 ctsio->kern_total_len = ctsio->kern_data_len; 688 689 data = (struct scsi_receive_copy_status_lid4_data *)ctsio->kern_data_ptr; 690 scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len, 691 data->available_data); 692 data->response_to_service_action = list_copy.service_action; 693 if (list_copy.completed) { 694 if (list_copy.error) 695 data->copy_command_status = RCS_CCS_ERROR; 696 else if (list_copy.abort) 697 data->copy_command_status = RCS_CCS_ABORTED; 698 else 699 data->copy_command_status = RCS_CCS_COMPLETED; 700 } else 701 data->copy_command_status = RCS_CCS_INPROG_FG; 702 scsi_ulto2b(list_copy.curops, data->operation_counter); 703 scsi_ulto4b(UINT32_MAX, data->estimated_status_update_delay); 704 data->transfer_count_units = RCS_TC_BYTES; 705 scsi_u64to8b(list_copy.curbytes, data->transfer_count); 706 scsi_ulto2b(list_copy.curseg, data->segments_processed); 707 data->length_of_the_sense_data_field = list_copy.sense_len; 708 data->sense_data_length = list_copy.sense_len; 709 memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len); 710 711 ctl_set_success(ctsio); 712 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 713 ctsio->be_move_done = ctl_config_move_done; 714 ctl_datamove((union ctl_io *)ctsio); 715 return (retval); 716 } 717 718 int 719 ctl_copy_operation_abort(struct ctl_scsiio *ctsio) 720 { 721 struct ctl_lun *lun = CTL_LUN(ctsio); 722 struct scsi_copy_operation_abort *cdb; 723 struct tpc_list *list; 724 int retval; 725 uint32_t list_id; 726 727 CTL_DEBUG_PRINT(("ctl_copy_operation_abort\n")); 728 729 cdb = (struct scsi_copy_operation_abort *)ctsio->cdb; 730 retval = CTL_RETVAL_COMPLETE; 731 732 list_id = scsi_4btoul(cdb->list_identifier); 733 mtx_lock(&lun->lun_lock); 734 list = tpc_find_list(lun, list_id, 735 ctl_get_initindex(&ctsio->io_hdr.nexus)); 736 if (list == NULL) { 737 mtx_unlock(&lun->lun_lock); 738 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 739 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, 740 /*bit*/ 0); 741 ctl_done((union ctl_io *)ctsio); 742 return (retval); 743 } 744 list->abort = 1; 745 mtx_unlock(&lun->lun_lock); 746 747 ctl_set_success(ctsio); 748 ctl_done((union ctl_io *)ctsio); 749 return (retval); 750 } 751 752 static uint64_t 753 tpc_resolve(struct tpc_list *list, uint16_t idx, uint32_t *ss, 754 uint32_t *pb, uint32_t *pbo) 755 { 756 757 if (idx == 0xffff) { 758 if (ss && list->lun->be_lun) 759 *ss = list->lun->be_lun->blocksize; 760 if (pb && list->lun->be_lun) 761 *pb = list->lun->be_lun->blocksize << 762 list->lun->be_lun->pblockexp; 763 if (pbo && list->lun->be_lun) 764 *pbo = list->lun->be_lun->blocksize * 765 list->lun->be_lun->pblockoff; 766 return (list->lun->lun); 767 } 768 if (idx >= list->ncscd) 769 return (UINT64_MAX); 770 return (tpcl_resolve(list->lun->ctl_softc, 771 list->init_port, &list->cscd[idx], ss, pb, pbo)); 772 } 773 774 static void 775 tpc_set_io_error_sense(struct tpc_list *list) 776 { 777 int flen; 778 uint8_t csi[4]; 779 uint8_t sks[3]; 780 uint8_t fbuf[4 + 64]; 781 782 scsi_ulto4b(list->curseg, csi); 783 if (list->fwd_cscd <= 0x07ff) { 784 sks[0] = SSD_SKS_SEGMENT_VALID; 785 scsi_ulto2b((uint8_t *)&list->cscd[list->fwd_cscd] - 786 list->params, &sks[1]); 787 } else 788 sks[0] = 0; 789 if (list->fwd_scsi_status) { 790 fbuf[0] = 0x0c; 791 fbuf[2] = list->fwd_target; 792 flen = list->fwd_sense_len; 793 if (flen > 64) { 794 flen = 64; 795 fbuf[2] |= SSD_FORWARDED_FSDT; 796 } 797 fbuf[1] = 2 + flen; 798 fbuf[3] = list->fwd_scsi_status; 799 bcopy(&list->fwd_sense_data, &fbuf[4], flen); 800 flen += 4; 801 } else 802 flen = 0; 803 ctl_set_sense(list->ctsio, /*current_error*/ 1, 804 /*sense_key*/ SSD_KEY_COPY_ABORTED, 805 /*asc*/ 0x0d, /*ascq*/ 0x01, 806 SSD_ELEM_COMMAND, sizeof(csi), csi, 807 sks[0] ? SSD_ELEM_SKS : SSD_ELEM_SKIP, sizeof(sks), sks, 808 flen ? SSD_ELEM_DESC : SSD_ELEM_SKIP, flen, fbuf, 809 SSD_ELEM_NONE); 810 } 811 812 static int 813 tpc_process_b2b(struct tpc_list *list) 814 { 815 struct scsi_ec_segment_b2b *seg; 816 struct scsi_ec_cscd_dtsp *sdstp, *ddstp; 817 struct tpc_io *tior, *tiow; 818 struct runl run; 819 uint64_t sl, dl; 820 off_t srclba, dstlba, numbytes, donebytes, roundbytes; 821 int numlba; 822 uint32_t srcblock, dstblock, pb, pbo, adj; 823 uint16_t scscd, dcscd; 824 uint8_t csi[4]; 825 826 scsi_ulto4b(list->curseg, csi); 827 if (list->stage == 1) { 828 while ((tior = TAILQ_FIRST(&list->allio)) != NULL) { 829 TAILQ_REMOVE(&list->allio, tior, links); 830 ctl_free_io(tior->io); 831 free(tior->buf, M_CTL); 832 free(tior, M_CTL); 833 } 834 if (list->abort) { 835 ctl_set_task_aborted(list->ctsio); 836 return (CTL_RETVAL_ERROR); 837 } else if (list->error) { 838 tpc_set_io_error_sense(list); 839 return (CTL_RETVAL_ERROR); 840 } 841 list->cursectors += list->segsectors; 842 list->curbytes += list->segbytes; 843 return (CTL_RETVAL_COMPLETE); 844 } 845 846 TAILQ_INIT(&list->allio); 847 seg = (struct scsi_ec_segment_b2b *)list->seg[list->curseg]; 848 scscd = scsi_2btoul(seg->src_cscd); 849 dcscd = scsi_2btoul(seg->dst_cscd); 850 sl = tpc_resolve(list, scscd, &srcblock, NULL, NULL); 851 dl = tpc_resolve(list, dcscd, &dstblock, &pb, &pbo); 852 if (sl == UINT64_MAX || dl == UINT64_MAX) { 853 ctl_set_sense(list->ctsio, /*current_error*/ 1, 854 /*sense_key*/ SSD_KEY_COPY_ABORTED, 855 /*asc*/ 0x08, /*ascq*/ 0x04, 856 SSD_ELEM_COMMAND, sizeof(csi), csi, 857 SSD_ELEM_NONE); 858 return (CTL_RETVAL_ERROR); 859 } 860 if (pbo > 0) 861 pbo = pb - pbo; 862 sdstp = &list->cscd[scscd].dtsp; 863 if (scsi_3btoul(sdstp->block_length) != 0) 864 srcblock = scsi_3btoul(sdstp->block_length); 865 ddstp = &list->cscd[dcscd].dtsp; 866 if (scsi_3btoul(ddstp->block_length) != 0) 867 dstblock = scsi_3btoul(ddstp->block_length); 868 numlba = scsi_2btoul(seg->number_of_blocks); 869 if (seg->flags & EC_SEG_DC) 870 numbytes = (off_t)numlba * dstblock; 871 else 872 numbytes = (off_t)numlba * srcblock; 873 srclba = scsi_8btou64(seg->src_lba); 874 dstlba = scsi_8btou64(seg->dst_lba); 875 876 // printf("Copy %ju bytes from %ju @ %ju to %ju @ %ju\n", 877 // (uintmax_t)numbytes, sl, scsi_8btou64(seg->src_lba), 878 // dl, scsi_8btou64(seg->dst_lba)); 879 880 if (numbytes == 0) 881 return (CTL_RETVAL_COMPLETE); 882 883 if (numbytes % srcblock != 0 || numbytes % dstblock != 0) { 884 ctl_set_sense(list->ctsio, /*current_error*/ 1, 885 /*sense_key*/ SSD_KEY_COPY_ABORTED, 886 /*asc*/ 0x26, /*ascq*/ 0x0A, 887 SSD_ELEM_COMMAND, sizeof(csi), csi, 888 SSD_ELEM_NONE); 889 return (CTL_RETVAL_ERROR); 890 } 891 892 list->segbytes = numbytes; 893 list->segsectors = numbytes / dstblock; 894 donebytes = 0; 895 TAILQ_INIT(&run); 896 list->tbdio = 0; 897 while (donebytes < numbytes) { 898 roundbytes = numbytes - donebytes; 899 if (roundbytes > TPC_MAX_IO_SIZE) { 900 roundbytes = TPC_MAX_IO_SIZE; 901 roundbytes -= roundbytes % dstblock; 902 if (pb > dstblock) { 903 adj = (dstlba * dstblock + roundbytes - pbo) % pb; 904 if (roundbytes > adj) 905 roundbytes -= adj; 906 } 907 } 908 909 tior = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO); 910 TAILQ_INIT(&tior->run); 911 tior->buf = malloc(roundbytes, M_CTL, M_WAITOK); 912 tior->list = list; 913 TAILQ_INSERT_TAIL(&list->allio, tior, links); 914 tior->io = tpcl_alloc_io(); 915 ctl_scsi_read_write(tior->io, 916 /*data_ptr*/ tior->buf, 917 /*data_len*/ roundbytes, 918 /*read_op*/ 1, 919 /*byte2*/ 0, 920 /*minimum_cdb_size*/ 0, 921 /*lba*/ srclba, 922 /*num_blocks*/ roundbytes / srcblock, 923 /*tag_type*/ CTL_TAG_SIMPLE, 924 /*control*/ 0); 925 tior->io->io_hdr.retries = 3; 926 tior->target = SSD_FORWARDED_SDS_EXSRC; 927 tior->cscd = scscd; 928 tior->lun = sl; 929 tior->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tior; 930 931 tiow = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO); 932 TAILQ_INIT(&tiow->run); 933 tiow->list = list; 934 TAILQ_INSERT_TAIL(&list->allio, tiow, links); 935 tiow->io = tpcl_alloc_io(); 936 ctl_scsi_read_write(tiow->io, 937 /*data_ptr*/ tior->buf, 938 /*data_len*/ roundbytes, 939 /*read_op*/ 0, 940 /*byte2*/ 0, 941 /*minimum_cdb_size*/ 0, 942 /*lba*/ dstlba, 943 /*num_blocks*/ roundbytes / dstblock, 944 /*tag_type*/ CTL_TAG_SIMPLE, 945 /*control*/ 0); 946 tiow->io->io_hdr.retries = 3; 947 tiow->target = SSD_FORWARDED_SDS_EXDST; 948 tiow->cscd = dcscd; 949 tiow->lun = dl; 950 tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow; 951 952 TAILQ_INSERT_TAIL(&tior->run, tiow, rlinks); 953 TAILQ_INSERT_TAIL(&run, tior, rlinks); 954 list->tbdio++; 955 donebytes += roundbytes; 956 srclba += roundbytes / srcblock; 957 dstlba += roundbytes / dstblock; 958 } 959 960 while ((tior = TAILQ_FIRST(&run)) != NULL) { 961 TAILQ_REMOVE(&run, tior, rlinks); 962 if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE) 963 panic("tpcl_queue() error"); 964 } 965 966 list->stage++; 967 return (CTL_RETVAL_QUEUED); 968 } 969 970 static int 971 tpc_process_verify(struct tpc_list *list) 972 { 973 struct scsi_ec_segment_verify *seg; 974 struct tpc_io *tio; 975 uint64_t sl; 976 uint16_t cscd; 977 uint8_t csi[4]; 978 979 scsi_ulto4b(list->curseg, csi); 980 if (list->stage == 1) { 981 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) { 982 TAILQ_REMOVE(&list->allio, tio, links); 983 ctl_free_io(tio->io); 984 free(tio, M_CTL); 985 } 986 if (list->abort) { 987 ctl_set_task_aborted(list->ctsio); 988 return (CTL_RETVAL_ERROR); 989 } else if (list->error) { 990 tpc_set_io_error_sense(list); 991 return (CTL_RETVAL_ERROR); 992 } else 993 return (CTL_RETVAL_COMPLETE); 994 } 995 996 TAILQ_INIT(&list->allio); 997 seg = (struct scsi_ec_segment_verify *)list->seg[list->curseg]; 998 cscd = scsi_2btoul(seg->src_cscd); 999 sl = tpc_resolve(list, cscd, NULL, NULL, NULL); 1000 if (sl == UINT64_MAX) { 1001 ctl_set_sense(list->ctsio, /*current_error*/ 1, 1002 /*sense_key*/ SSD_KEY_COPY_ABORTED, 1003 /*asc*/ 0x08, /*ascq*/ 0x04, 1004 SSD_ELEM_COMMAND, sizeof(csi), csi, 1005 SSD_ELEM_NONE); 1006 return (CTL_RETVAL_ERROR); 1007 } 1008 1009 // printf("Verify %ju\n", sl); 1010 1011 if ((seg->tur & 0x01) == 0) 1012 return (CTL_RETVAL_COMPLETE); 1013 1014 list->tbdio = 1; 1015 tio = malloc(sizeof(*tio), M_CTL, M_WAITOK | M_ZERO); 1016 TAILQ_INIT(&tio->run); 1017 tio->list = list; 1018 TAILQ_INSERT_TAIL(&list->allio, tio, links); 1019 tio->io = tpcl_alloc_io(); 1020 ctl_scsi_tur(tio->io, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0); 1021 tio->io->io_hdr.retries = 3; 1022 tio->target = SSD_FORWARDED_SDS_EXSRC; 1023 tio->cscd = cscd; 1024 tio->lun = sl; 1025 tio->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tio; 1026 list->stage++; 1027 if (tpcl_queue(tio->io, tio->lun) != CTL_RETVAL_COMPLETE) 1028 panic("tpcl_queue() error"); 1029 return (CTL_RETVAL_QUEUED); 1030 } 1031 1032 static int 1033 tpc_process_register_key(struct tpc_list *list) 1034 { 1035 struct scsi_ec_segment_register_key *seg; 1036 struct tpc_io *tio; 1037 uint64_t dl; 1038 int datalen; 1039 uint16_t cscd; 1040 uint8_t csi[4]; 1041 1042 scsi_ulto4b(list->curseg, csi); 1043 if (list->stage == 1) { 1044 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) { 1045 TAILQ_REMOVE(&list->allio, tio, links); 1046 ctl_free_io(tio->io); 1047 free(tio->buf, M_CTL); 1048 free(tio, M_CTL); 1049 } 1050 if (list->abort) { 1051 ctl_set_task_aborted(list->ctsio); 1052 return (CTL_RETVAL_ERROR); 1053 } else if (list->error) { 1054 tpc_set_io_error_sense(list); 1055 return (CTL_RETVAL_ERROR); 1056 } else 1057 return (CTL_RETVAL_COMPLETE); 1058 } 1059 1060 TAILQ_INIT(&list->allio); 1061 seg = (struct scsi_ec_segment_register_key *)list->seg[list->curseg]; 1062 cscd = scsi_2btoul(seg->dst_cscd); 1063 dl = tpc_resolve(list, cscd, NULL, NULL, NULL); 1064 if (dl == UINT64_MAX) { 1065 ctl_set_sense(list->ctsio, /*current_error*/ 1, 1066 /*sense_key*/ SSD_KEY_COPY_ABORTED, 1067 /*asc*/ 0x08, /*ascq*/ 0x04, 1068 SSD_ELEM_COMMAND, sizeof(csi), csi, 1069 SSD_ELEM_NONE); 1070 return (CTL_RETVAL_ERROR); 1071 } 1072 1073 // printf("Register Key %ju\n", dl); 1074 1075 list->tbdio = 1; 1076 tio = malloc(sizeof(*tio), M_CTL, M_WAITOK | M_ZERO); 1077 TAILQ_INIT(&tio->run); 1078 tio->list = list; 1079 TAILQ_INSERT_TAIL(&list->allio, tio, links); 1080 tio->io = tpcl_alloc_io(); 1081 datalen = sizeof(struct scsi_per_res_out_parms); 1082 tio->buf = malloc(datalen, M_CTL, M_WAITOK); 1083 ctl_scsi_persistent_res_out(tio->io, 1084 tio->buf, datalen, SPRO_REGISTER, -1, 1085 scsi_8btou64(seg->res_key), scsi_8btou64(seg->sa_res_key), 1086 /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0); 1087 tio->io->io_hdr.retries = 3; 1088 tio->target = SSD_FORWARDED_SDS_EXDST; 1089 tio->cscd = cscd; 1090 tio->lun = dl; 1091 tio->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tio; 1092 list->stage++; 1093 if (tpcl_queue(tio->io, tio->lun) != CTL_RETVAL_COMPLETE) 1094 panic("tpcl_queue() error"); 1095 return (CTL_RETVAL_QUEUED); 1096 } 1097 1098 static off_t 1099 tpc_ranges_length(struct scsi_range_desc *range, int nrange) 1100 { 1101 off_t length = 0; 1102 int r; 1103 1104 for (r = 0; r < nrange; r++) 1105 length += scsi_4btoul(range[r].length); 1106 return (length); 1107 } 1108 1109 static int 1110 tpc_check_ranges_l(struct scsi_range_desc *range, int nrange, uint64_t maxlba, 1111 uint64_t *lba) 1112 { 1113 uint64_t b1; 1114 uint32_t l1; 1115 int i; 1116 1117 for (i = 0; i < nrange; i++) { 1118 b1 = scsi_8btou64(range[i].lba); 1119 l1 = scsi_4btoul(range[i].length); 1120 if (b1 + l1 < b1 || b1 + l1 > maxlba + 1) { 1121 *lba = MAX(b1, maxlba + 1); 1122 return (-1); 1123 } 1124 } 1125 return (0); 1126 } 1127 1128 static int 1129 tpc_check_ranges_x(struct scsi_range_desc *range, int nrange) 1130 { 1131 uint64_t b1, b2; 1132 uint32_t l1, l2; 1133 int i, j; 1134 1135 for (i = 0; i < nrange - 1; i++) { 1136 b1 = scsi_8btou64(range[i].lba); 1137 l1 = scsi_4btoul(range[i].length); 1138 for (j = i + 1; j < nrange; j++) { 1139 b2 = scsi_8btou64(range[j].lba); 1140 l2 = scsi_4btoul(range[j].length); 1141 if (b1 + l1 > b2 && b2 + l2 > b1) 1142 return (-1); 1143 } 1144 } 1145 return (0); 1146 } 1147 1148 static int 1149 tpc_skip_ranges(struct scsi_range_desc *range, int nrange, off_t skip, 1150 int *srange, off_t *soffset) 1151 { 1152 off_t off; 1153 int r; 1154 1155 r = 0; 1156 off = 0; 1157 while (r < nrange) { 1158 if (skip - off < scsi_4btoul(range[r].length)) { 1159 *srange = r; 1160 *soffset = skip - off; 1161 return (0); 1162 } 1163 off += scsi_4btoul(range[r].length); 1164 r++; 1165 } 1166 return (-1); 1167 } 1168 1169 static int 1170 tpc_process_wut(struct tpc_list *list) 1171 { 1172 struct tpc_io *tio, *tior, *tiow; 1173 struct runl run; 1174 int drange, srange; 1175 off_t doffset, soffset; 1176 off_t srclba, dstlba, numbytes, donebytes, roundbytes; 1177 uint32_t srcblock, dstblock, pb, pbo, adj; 1178 1179 if (list->stage > 0) { 1180 /* Cleanup after previous rounds. */ 1181 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) { 1182 TAILQ_REMOVE(&list->allio, tio, links); 1183 ctl_free_io(tio->io); 1184 free(tio->buf, M_CTL); 1185 free(tio, M_CTL); 1186 } 1187 if (list->abort) { 1188 ctl_set_task_aborted(list->ctsio); 1189 return (CTL_RETVAL_ERROR); 1190 } else if (list->error) { 1191 if (list->fwd_scsi_status) { 1192 list->ctsio->io_hdr.status = 1193 CTL_SCSI_ERROR | CTL_AUTOSENSE; 1194 list->ctsio->scsi_status = list->fwd_scsi_status; 1195 list->ctsio->sense_data = list->fwd_sense_data; 1196 list->ctsio->sense_len = list->fwd_sense_len; 1197 } else { 1198 ctl_set_invalid_field(list->ctsio, 1199 /*sks_valid*/ 0, /*command*/ 0, 1200 /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0); 1201 } 1202 return (CTL_RETVAL_ERROR); 1203 } 1204 list->cursectors += list->segsectors; 1205 list->curbytes += list->segbytes; 1206 } 1207 1208 /* Check where we are on destination ranges list. */ 1209 if (tpc_skip_ranges(list->range, list->nrange, list->cursectors, 1210 &drange, &doffset) != 0) 1211 return (CTL_RETVAL_COMPLETE); 1212 dstblock = list->lun->be_lun->blocksize; 1213 pb = dstblock << list->lun->be_lun->pblockexp; 1214 if (list->lun->be_lun->pblockoff > 0) 1215 pbo = pb - dstblock * list->lun->be_lun->pblockoff; 1216 else 1217 pbo = 0; 1218 1219 /* Check where we are on source ranges list. */ 1220 srcblock = list->token->blocksize; 1221 if (tpc_skip_ranges(list->token->range, list->token->nrange, 1222 list->offset_into_rod + list->cursectors * dstblock / srcblock, 1223 &srange, &soffset) != 0) { 1224 ctl_set_invalid_field(list->ctsio, /*sks_valid*/ 0, 1225 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0); 1226 return (CTL_RETVAL_ERROR); 1227 } 1228 1229 srclba = scsi_8btou64(list->token->range[srange].lba) + soffset; 1230 dstlba = scsi_8btou64(list->range[drange].lba) + doffset; 1231 numbytes = srcblock * 1232 (scsi_4btoul(list->token->range[srange].length) - soffset); 1233 numbytes = omin(numbytes, dstblock * 1234 (scsi_4btoul(list->range[drange].length) - doffset)); 1235 if (numbytes > TPC_MAX_IOCHUNK_SIZE) { 1236 numbytes = TPC_MAX_IOCHUNK_SIZE; 1237 numbytes -= numbytes % dstblock; 1238 if (pb > dstblock) { 1239 adj = (dstlba * dstblock + numbytes - pbo) % pb; 1240 if (numbytes > adj) 1241 numbytes -= adj; 1242 } 1243 } 1244 1245 if (numbytes % srcblock != 0 || numbytes % dstblock != 0) { 1246 ctl_set_invalid_field(list->ctsio, /*sks_valid*/ 0, 1247 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0); 1248 return (CTL_RETVAL_ERROR); 1249 } 1250 1251 list->segbytes = numbytes; 1252 list->segsectors = numbytes / dstblock; 1253 //printf("Copy chunk of %ju sectors from %ju to %ju\n", list->segsectors, 1254 // srclba, dstlba); 1255 donebytes = 0; 1256 TAILQ_INIT(&run); 1257 list->tbdio = 0; 1258 TAILQ_INIT(&list->allio); 1259 while (donebytes < numbytes) { 1260 roundbytes = numbytes - donebytes; 1261 if (roundbytes > TPC_MAX_IO_SIZE) { 1262 roundbytes = TPC_MAX_IO_SIZE; 1263 roundbytes -= roundbytes % dstblock; 1264 if (pb > dstblock) { 1265 adj = (dstlba * dstblock + roundbytes - pbo) % pb; 1266 if (roundbytes > adj) 1267 roundbytes -= adj; 1268 } 1269 } 1270 1271 tior = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO); 1272 TAILQ_INIT(&tior->run); 1273 tior->buf = malloc(roundbytes, M_CTL, M_WAITOK); 1274 tior->list = list; 1275 TAILQ_INSERT_TAIL(&list->allio, tior, links); 1276 tior->io = tpcl_alloc_io(); 1277 ctl_scsi_read_write(tior->io, 1278 /*data_ptr*/ tior->buf, 1279 /*data_len*/ roundbytes, 1280 /*read_op*/ 1, 1281 /*byte2*/ 0, 1282 /*minimum_cdb_size*/ 0, 1283 /*lba*/ srclba, 1284 /*num_blocks*/ roundbytes / srcblock, 1285 /*tag_type*/ CTL_TAG_SIMPLE, 1286 /*control*/ 0); 1287 tior->io->io_hdr.retries = 3; 1288 tior->lun = list->token->lun; 1289 tior->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tior; 1290 1291 tiow = malloc(sizeof(*tiow), M_CTL, M_WAITOK | M_ZERO); 1292 TAILQ_INIT(&tiow->run); 1293 tiow->list = list; 1294 TAILQ_INSERT_TAIL(&list->allio, tiow, links); 1295 tiow->io = tpcl_alloc_io(); 1296 ctl_scsi_read_write(tiow->io, 1297 /*data_ptr*/ tior->buf, 1298 /*data_len*/ roundbytes, 1299 /*read_op*/ 0, 1300 /*byte2*/ 0, 1301 /*minimum_cdb_size*/ 0, 1302 /*lba*/ dstlba, 1303 /*num_blocks*/ roundbytes / dstblock, 1304 /*tag_type*/ CTL_TAG_SIMPLE, 1305 /*control*/ 0); 1306 tiow->io->io_hdr.retries = 3; 1307 tiow->lun = list->lun->lun; 1308 tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow; 1309 1310 TAILQ_INSERT_TAIL(&tior->run, tiow, rlinks); 1311 TAILQ_INSERT_TAIL(&run, tior, rlinks); 1312 list->tbdio++; 1313 donebytes += roundbytes; 1314 srclba += roundbytes / srcblock; 1315 dstlba += roundbytes / dstblock; 1316 } 1317 1318 while ((tior = TAILQ_FIRST(&run)) != NULL) { 1319 TAILQ_REMOVE(&run, tior, rlinks); 1320 if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE) 1321 panic("tpcl_queue() error"); 1322 } 1323 1324 list->stage++; 1325 return (CTL_RETVAL_QUEUED); 1326 } 1327 1328 static int 1329 tpc_process_zero_wut(struct tpc_list *list) 1330 { 1331 struct tpc_io *tio, *tiow; 1332 struct runl run, *prun; 1333 int r; 1334 uint32_t dstblock, len; 1335 1336 if (list->stage > 0) { 1337 complete: 1338 /* Cleanup after previous rounds. */ 1339 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) { 1340 TAILQ_REMOVE(&list->allio, tio, links); 1341 ctl_free_io(tio->io); 1342 free(tio, M_CTL); 1343 } 1344 if (list->abort) { 1345 ctl_set_task_aborted(list->ctsio); 1346 return (CTL_RETVAL_ERROR); 1347 } else if (list->error) { 1348 if (list->fwd_scsi_status) { 1349 list->ctsio->io_hdr.status = 1350 CTL_SCSI_ERROR | CTL_AUTOSENSE; 1351 list->ctsio->scsi_status = list->fwd_scsi_status; 1352 list->ctsio->sense_data = list->fwd_sense_data; 1353 list->ctsio->sense_len = list->fwd_sense_len; 1354 } else { 1355 ctl_set_invalid_field(list->ctsio, 1356 /*sks_valid*/ 0, /*command*/ 0, 1357 /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0); 1358 } 1359 return (CTL_RETVAL_ERROR); 1360 } 1361 list->cursectors += list->segsectors; 1362 list->curbytes += list->segbytes; 1363 return (CTL_RETVAL_COMPLETE); 1364 } 1365 1366 dstblock = list->lun->be_lun->blocksize; 1367 TAILQ_INIT(&run); 1368 prun = &run; 1369 list->tbdio = 1; 1370 TAILQ_INIT(&list->allio); 1371 list->segsectors = 0; 1372 for (r = 0; r < list->nrange; r++) { 1373 len = scsi_4btoul(list->range[r].length); 1374 if (len == 0) 1375 continue; 1376 1377 tiow = malloc(sizeof(*tiow), M_CTL, M_WAITOK | M_ZERO); 1378 TAILQ_INIT(&tiow->run); 1379 tiow->list = list; 1380 TAILQ_INSERT_TAIL(&list->allio, tiow, links); 1381 tiow->io = tpcl_alloc_io(); 1382 ctl_scsi_write_same(tiow->io, 1383 /*data_ptr*/ NULL, 1384 /*data_len*/ 0, 1385 /*byte2*/ SWS_NDOB, 1386 /*lba*/ scsi_8btou64(list->range[r].lba), 1387 /*num_blocks*/ len, 1388 /*tag_type*/ CTL_TAG_SIMPLE, 1389 /*control*/ 0); 1390 tiow->io->io_hdr.retries = 3; 1391 tiow->lun = list->lun->lun; 1392 tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow; 1393 1394 TAILQ_INSERT_TAIL(prun, tiow, rlinks); 1395 prun = &tiow->run; 1396 list->segsectors += len; 1397 } 1398 list->segbytes = list->segsectors * dstblock; 1399 1400 if (TAILQ_EMPTY(&run)) 1401 goto complete; 1402 1403 while ((tiow = TAILQ_FIRST(&run)) != NULL) { 1404 TAILQ_REMOVE(&run, tiow, rlinks); 1405 if (tpcl_queue(tiow->io, tiow->lun) != CTL_RETVAL_COMPLETE) 1406 panic("tpcl_queue() error"); 1407 } 1408 1409 list->stage++; 1410 return (CTL_RETVAL_QUEUED); 1411 } 1412 1413 static void 1414 tpc_process(struct tpc_list *list) 1415 { 1416 struct ctl_lun *lun = list->lun; 1417 struct ctl_softc *softc = lun->ctl_softc; 1418 struct scsi_ec_segment *seg; 1419 struct ctl_scsiio *ctsio = list->ctsio; 1420 int retval = CTL_RETVAL_COMPLETE; 1421 uint8_t csi[4]; 1422 1423 if (list->service_action == EC_WUT) { 1424 if (list->token != NULL) 1425 retval = tpc_process_wut(list); 1426 else 1427 retval = tpc_process_zero_wut(list); 1428 if (retval == CTL_RETVAL_QUEUED) 1429 return; 1430 if (retval == CTL_RETVAL_ERROR) { 1431 list->error = 1; 1432 goto done; 1433 } 1434 } else { 1435 //printf("ZZZ %d cscd, %d segs\n", list->ncscd, list->nseg); 1436 while (list->curseg < list->nseg) { 1437 seg = list->seg[list->curseg]; 1438 switch (seg->type_code) { 1439 case EC_SEG_B2B: 1440 retval = tpc_process_b2b(list); 1441 break; 1442 case EC_SEG_VERIFY: 1443 retval = tpc_process_verify(list); 1444 break; 1445 case EC_SEG_REGISTER_KEY: 1446 retval = tpc_process_register_key(list); 1447 break; 1448 default: 1449 scsi_ulto4b(list->curseg, csi); 1450 ctl_set_sense(ctsio, /*current_error*/ 1, 1451 /*sense_key*/ SSD_KEY_COPY_ABORTED, 1452 /*asc*/ 0x26, /*ascq*/ 0x09, 1453 SSD_ELEM_COMMAND, sizeof(csi), csi, 1454 SSD_ELEM_NONE); 1455 goto done; 1456 } 1457 if (retval == CTL_RETVAL_QUEUED) 1458 return; 1459 if (retval == CTL_RETVAL_ERROR) { 1460 list->error = 1; 1461 goto done; 1462 } 1463 list->curseg++; 1464 list->stage = 0; 1465 } 1466 } 1467 1468 ctl_set_success(ctsio); 1469 1470 done: 1471 //printf("ZZZ done\n"); 1472 free(list->params, M_CTL); 1473 list->params = NULL; 1474 if (list->token) { 1475 mtx_lock(&softc->tpc_lock); 1476 if (--list->token->active == 0) 1477 list->token->last_active = time_uptime; 1478 mtx_unlock(&softc->tpc_lock); 1479 list->token = NULL; 1480 } 1481 mtx_lock(&lun->lun_lock); 1482 if ((list->flags & EC_LIST_ID_USAGE_MASK) == EC_LIST_ID_USAGE_NONE) { 1483 TAILQ_REMOVE(&lun->tpc_lists, list, links); 1484 free(list, M_CTL); 1485 } else { 1486 list->completed = 1; 1487 list->last_active = time_uptime; 1488 list->sense_data = ctsio->sense_data; 1489 list->sense_len = ctsio->sense_len; 1490 list->scsi_status = ctsio->scsi_status; 1491 } 1492 mtx_unlock(&lun->lun_lock); 1493 1494 ctl_done((union ctl_io *)ctsio); 1495 } 1496 1497 /* 1498 * For any sort of check condition, busy, etc., we just retry. We do not 1499 * decrement the retry count for unit attention type errors. These are 1500 * normal, and we want to save the retry count for "real" errors. Otherwise, 1501 * we could end up with situations where a command will succeed in some 1502 * situations and fail in others, depending on whether a unit attention is 1503 * pending. Also, some of our error recovery actions, most notably the 1504 * LUN reset action, will cause a unit attention. 1505 * 1506 * We can add more detail here later if necessary. 1507 */ 1508 static tpc_error_action 1509 tpc_checkcond_parse(union ctl_io *io) 1510 { 1511 tpc_error_action error_action; 1512 int error_code, sense_key, asc, ascq; 1513 1514 /* 1515 * Default to retrying the command. 1516 */ 1517 error_action = TPC_ERR_RETRY; 1518 1519 scsi_extract_sense_len(&io->scsiio.sense_data, 1520 io->scsiio.sense_len, 1521 &error_code, 1522 &sense_key, 1523 &asc, 1524 &ascq, 1525 /*show_errors*/ 1); 1526 1527 switch (error_code) { 1528 case SSD_DEFERRED_ERROR: 1529 case SSD_DESC_DEFERRED_ERROR: 1530 error_action |= TPC_ERR_NO_DECREMENT; 1531 break; 1532 case SSD_CURRENT_ERROR: 1533 case SSD_DESC_CURRENT_ERROR: 1534 default: 1535 switch (sense_key) { 1536 case SSD_KEY_UNIT_ATTENTION: 1537 error_action |= TPC_ERR_NO_DECREMENT; 1538 break; 1539 case SSD_KEY_HARDWARE_ERROR: 1540 /* 1541 * This is our generic "something bad happened" 1542 * error code. It often isn't recoverable. 1543 */ 1544 if ((asc == 0x44) && (ascq == 0x00)) 1545 error_action = TPC_ERR_FAIL; 1546 break; 1547 case SSD_KEY_NOT_READY: 1548 /* 1549 * If the LUN is powered down, there likely isn't 1550 * much point in retrying right now. 1551 */ 1552 if ((asc == 0x04) && (ascq == 0x02)) 1553 error_action = TPC_ERR_FAIL; 1554 /* 1555 * If the LUN is offline, there probably isn't much 1556 * point in retrying, either. 1557 */ 1558 if ((asc == 0x04) && (ascq == 0x03)) 1559 error_action = TPC_ERR_FAIL; 1560 break; 1561 } 1562 } 1563 return (error_action); 1564 } 1565 1566 static tpc_error_action 1567 tpc_error_parse(union ctl_io *io) 1568 { 1569 tpc_error_action error_action = TPC_ERR_RETRY; 1570 1571 switch (io->io_hdr.io_type) { 1572 case CTL_IO_SCSI: 1573 switch (io->io_hdr.status & CTL_STATUS_MASK) { 1574 case CTL_SCSI_ERROR: 1575 switch (io->scsiio.scsi_status) { 1576 case SCSI_STATUS_CHECK_COND: 1577 error_action = tpc_checkcond_parse(io); 1578 break; 1579 default: 1580 break; 1581 } 1582 break; 1583 default: 1584 break; 1585 } 1586 break; 1587 case CTL_IO_TASK: 1588 break; 1589 default: 1590 panic("%s: invalid ctl_io type %d\n", __func__, 1591 io->io_hdr.io_type); 1592 break; 1593 } 1594 return (error_action); 1595 } 1596 1597 void 1598 tpc_done(union ctl_io *io) 1599 { 1600 struct tpc_io *tio, *tior; 1601 1602 /* 1603 * Very minimal retry logic. We basically retry if we got an error 1604 * back, and the retry count is greater than 0. If we ever want 1605 * more sophisticated initiator type behavior, the CAM error 1606 * recovery code in ../common might be helpful. 1607 */ 1608 tio = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; 1609 if (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) 1610 && (io->io_hdr.retries > 0)) { 1611 ctl_io_status old_status; 1612 tpc_error_action error_action; 1613 1614 error_action = tpc_error_parse(io); 1615 switch (error_action & TPC_ERR_MASK) { 1616 case TPC_ERR_FAIL: 1617 break; 1618 case TPC_ERR_RETRY: 1619 default: 1620 if ((error_action & TPC_ERR_NO_DECREMENT) == 0) 1621 io->io_hdr.retries--; 1622 old_status = io->io_hdr.status; 1623 io->io_hdr.status = CTL_STATUS_NONE; 1624 io->io_hdr.flags &= ~CTL_FLAG_ABORT; 1625 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 1626 if (tpcl_queue(io, tio->lun) != CTL_RETVAL_COMPLETE) { 1627 printf("%s: error returned from ctl_queue()!\n", 1628 __func__); 1629 io->io_hdr.status = old_status; 1630 } else 1631 return; 1632 } 1633 } 1634 1635 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) { 1636 tio->list->error = 1; 1637 if (io->io_hdr.io_type == CTL_IO_SCSI && 1638 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SCSI_ERROR) { 1639 tio->list->fwd_scsi_status = io->scsiio.scsi_status; 1640 tio->list->fwd_sense_data = io->scsiio.sense_data; 1641 tio->list->fwd_sense_len = io->scsiio.sense_len; 1642 tio->list->fwd_target = tio->target; 1643 tio->list->fwd_cscd = tio->cscd; 1644 } 1645 } else 1646 atomic_add_int(&tio->list->curops, 1); 1647 if (!tio->list->error && !tio->list->abort) { 1648 while ((tior = TAILQ_FIRST(&tio->run)) != NULL) { 1649 TAILQ_REMOVE(&tio->run, tior, rlinks); 1650 atomic_add_int(&tio->list->tbdio, 1); 1651 if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE) 1652 panic("tpcl_queue() error"); 1653 } 1654 } 1655 if (atomic_fetchadd_int(&tio->list->tbdio, -1) == 1) 1656 tpc_process(tio->list); 1657 } 1658 1659 int 1660 ctl_extended_copy_lid1(struct ctl_scsiio *ctsio) 1661 { 1662 struct ctl_lun *lun = CTL_LUN(ctsio); 1663 struct scsi_extended_copy *cdb; 1664 struct scsi_extended_copy_lid1_data *data; 1665 struct scsi_ec_cscd *cscd; 1666 struct scsi_ec_segment *seg; 1667 struct tpc_list *list, *tlist; 1668 uint8_t *ptr; 1669 char *value; 1670 int len, off, lencscd, lenseg, leninl, nseg; 1671 1672 CTL_DEBUG_PRINT(("ctl_extended_copy_lid1\n")); 1673 1674 cdb = (struct scsi_extended_copy *)ctsio->cdb; 1675 len = scsi_4btoul(cdb->length); 1676 1677 if (len == 0) { 1678 ctl_set_success(ctsio); 1679 goto done; 1680 } 1681 if (len < sizeof(struct scsi_extended_copy_lid1_data) || 1682 len > sizeof(struct scsi_extended_copy_lid1_data) + 1683 TPC_MAX_LIST + TPC_MAX_INLINE) { 1684 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, 1685 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0); 1686 goto done; 1687 } 1688 1689 /* 1690 * If we've got a kernel request that hasn't been malloced yet, 1691 * malloc it and tell the caller the data buffer is here. 1692 */ 1693 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 1694 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 1695 ctsio->kern_data_len = len; 1696 ctsio->kern_total_len = len; 1697 ctsio->kern_rel_offset = 0; 1698 ctsio->kern_sg_entries = 0; 1699 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 1700 ctsio->be_move_done = ctl_config_move_done; 1701 ctl_datamove((union ctl_io *)ctsio); 1702 1703 return (CTL_RETVAL_COMPLETE); 1704 } 1705 1706 data = (struct scsi_extended_copy_lid1_data *)ctsio->kern_data_ptr; 1707 lencscd = scsi_2btoul(data->cscd_list_length); 1708 lenseg = scsi_4btoul(data->segment_list_length); 1709 leninl = scsi_4btoul(data->inline_data_length); 1710 if (lencscd > TPC_MAX_CSCDS * sizeof(struct scsi_ec_cscd)) { 1711 ctl_set_sense(ctsio, /*current_error*/ 1, 1712 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1713 /*asc*/ 0x26, /*ascq*/ 0x06, SSD_ELEM_NONE); 1714 goto done; 1715 } 1716 if (lenseg > TPC_MAX_SEGS * sizeof(struct scsi_ec_segment)) { 1717 ctl_set_sense(ctsio, /*current_error*/ 1, 1718 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1719 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE); 1720 goto done; 1721 } 1722 if (lencscd + lenseg > TPC_MAX_LIST || 1723 leninl > TPC_MAX_INLINE || 1724 len < sizeof(struct scsi_extended_copy_lid1_data) + 1725 lencscd + lenseg + leninl) { 1726 ctl_set_param_len_error(ctsio); 1727 goto done; 1728 } 1729 1730 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO); 1731 list->service_action = cdb->service_action; 1732 value = ctl_get_opt(&lun->be_lun->options, "insecure_tpc"); 1733 if (value != NULL && strcmp(value, "on") == 0) 1734 list->init_port = -1; 1735 else 1736 list->init_port = ctsio->io_hdr.nexus.targ_port; 1737 list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus); 1738 list->list_id = data->list_identifier; 1739 list->flags = data->flags; 1740 list->params = ctsio->kern_data_ptr; 1741 list->cscd = (struct scsi_ec_cscd *)&data->data[0]; 1742 ptr = &data->data[0]; 1743 for (off = 0; off < lencscd; off += sizeof(struct scsi_ec_cscd)) { 1744 cscd = (struct scsi_ec_cscd *)(ptr + off); 1745 if (cscd->type_code != EC_CSCD_ID) { 1746 free(list, M_CTL); 1747 ctl_set_sense(ctsio, /*current_error*/ 1, 1748 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1749 /*asc*/ 0x26, /*ascq*/ 0x07, SSD_ELEM_NONE); 1750 goto done; 1751 } 1752 } 1753 ptr = &data->data[lencscd]; 1754 for (nseg = 0, off = 0; off < lenseg; nseg++) { 1755 if (nseg >= TPC_MAX_SEGS) { 1756 free(list, M_CTL); 1757 ctl_set_sense(ctsio, /*current_error*/ 1, 1758 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1759 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE); 1760 goto done; 1761 } 1762 seg = (struct scsi_ec_segment *)(ptr + off); 1763 if (seg->type_code != EC_SEG_B2B && 1764 seg->type_code != EC_SEG_VERIFY && 1765 seg->type_code != EC_SEG_REGISTER_KEY) { 1766 free(list, M_CTL); 1767 ctl_set_sense(ctsio, /*current_error*/ 1, 1768 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1769 /*asc*/ 0x26, /*ascq*/ 0x09, SSD_ELEM_NONE); 1770 goto done; 1771 } 1772 list->seg[nseg] = seg; 1773 off += sizeof(struct scsi_ec_segment) + 1774 scsi_2btoul(seg->descr_length); 1775 } 1776 list->inl = &data->data[lencscd + lenseg]; 1777 list->ncscd = lencscd / sizeof(struct scsi_ec_cscd); 1778 list->nseg = nseg; 1779 list->leninl = leninl; 1780 list->ctsio = ctsio; 1781 list->lun = lun; 1782 mtx_lock(&lun->lun_lock); 1783 if ((list->flags & EC_LIST_ID_USAGE_MASK) != EC_LIST_ID_USAGE_NONE) { 1784 tlist = tpc_find_list(lun, list->list_id, list->init_idx); 1785 if (tlist != NULL && !tlist->completed) { 1786 mtx_unlock(&lun->lun_lock); 1787 free(list, M_CTL); 1788 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 1789 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, 1790 /*bit*/ 0); 1791 goto done; 1792 } 1793 if (tlist != NULL) { 1794 TAILQ_REMOVE(&lun->tpc_lists, tlist, links); 1795 free(tlist, M_CTL); 1796 } 1797 } 1798 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links); 1799 mtx_unlock(&lun->lun_lock); 1800 1801 tpc_process(list); 1802 return (CTL_RETVAL_COMPLETE); 1803 1804 done: 1805 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 1806 free(ctsio->kern_data_ptr, M_CTL); 1807 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 1808 } 1809 ctl_done((union ctl_io *)ctsio); 1810 return (CTL_RETVAL_COMPLETE); 1811 } 1812 1813 int 1814 ctl_extended_copy_lid4(struct ctl_scsiio *ctsio) 1815 { 1816 struct ctl_lun *lun = CTL_LUN(ctsio); 1817 struct scsi_extended_copy *cdb; 1818 struct scsi_extended_copy_lid4_data *data; 1819 struct scsi_ec_cscd *cscd; 1820 struct scsi_ec_segment *seg; 1821 struct tpc_list *list, *tlist; 1822 uint8_t *ptr; 1823 char *value; 1824 int len, off, lencscd, lenseg, leninl, nseg; 1825 1826 CTL_DEBUG_PRINT(("ctl_extended_copy_lid4\n")); 1827 1828 cdb = (struct scsi_extended_copy *)ctsio->cdb; 1829 len = scsi_4btoul(cdb->length); 1830 1831 if (len == 0) { 1832 ctl_set_success(ctsio); 1833 goto done; 1834 } 1835 if (len < sizeof(struct scsi_extended_copy_lid4_data) || 1836 len > sizeof(struct scsi_extended_copy_lid4_data) + 1837 TPC_MAX_LIST + TPC_MAX_INLINE) { 1838 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, 1839 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0); 1840 goto done; 1841 } 1842 1843 /* 1844 * If we've got a kernel request that hasn't been malloced yet, 1845 * malloc it and tell the caller the data buffer is here. 1846 */ 1847 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 1848 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 1849 ctsio->kern_data_len = len; 1850 ctsio->kern_total_len = len; 1851 ctsio->kern_rel_offset = 0; 1852 ctsio->kern_sg_entries = 0; 1853 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 1854 ctsio->be_move_done = ctl_config_move_done; 1855 ctl_datamove((union ctl_io *)ctsio); 1856 1857 return (CTL_RETVAL_COMPLETE); 1858 } 1859 1860 data = (struct scsi_extended_copy_lid4_data *)ctsio->kern_data_ptr; 1861 lencscd = scsi_2btoul(data->cscd_list_length); 1862 lenseg = scsi_2btoul(data->segment_list_length); 1863 leninl = scsi_2btoul(data->inline_data_length); 1864 if (lencscd > TPC_MAX_CSCDS * sizeof(struct scsi_ec_cscd)) { 1865 ctl_set_sense(ctsio, /*current_error*/ 1, 1866 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1867 /*asc*/ 0x26, /*ascq*/ 0x06, SSD_ELEM_NONE); 1868 goto done; 1869 } 1870 if (lenseg > TPC_MAX_SEGS * sizeof(struct scsi_ec_segment)) { 1871 ctl_set_sense(ctsio, /*current_error*/ 1, 1872 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1873 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE); 1874 goto done; 1875 } 1876 if (lencscd + lenseg > TPC_MAX_LIST || 1877 leninl > TPC_MAX_INLINE || 1878 len < sizeof(struct scsi_extended_copy_lid1_data) + 1879 lencscd + lenseg + leninl) { 1880 ctl_set_param_len_error(ctsio); 1881 goto done; 1882 } 1883 1884 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO); 1885 list->service_action = cdb->service_action; 1886 value = ctl_get_opt(&lun->be_lun->options, "insecure_tpc"); 1887 if (value != NULL && strcmp(value, "on") == 0) 1888 list->init_port = -1; 1889 else 1890 list->init_port = ctsio->io_hdr.nexus.targ_port; 1891 list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus); 1892 list->list_id = scsi_4btoul(data->list_identifier); 1893 list->flags = data->flags; 1894 list->params = ctsio->kern_data_ptr; 1895 list->cscd = (struct scsi_ec_cscd *)&data->data[0]; 1896 ptr = &data->data[0]; 1897 for (off = 0; off < lencscd; off += sizeof(struct scsi_ec_cscd)) { 1898 cscd = (struct scsi_ec_cscd *)(ptr + off); 1899 if (cscd->type_code != EC_CSCD_ID) { 1900 free(list, M_CTL); 1901 ctl_set_sense(ctsio, /*current_error*/ 1, 1902 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1903 /*asc*/ 0x26, /*ascq*/ 0x07, SSD_ELEM_NONE); 1904 goto done; 1905 } 1906 } 1907 ptr = &data->data[lencscd]; 1908 for (nseg = 0, off = 0; off < lenseg; nseg++) { 1909 if (nseg >= TPC_MAX_SEGS) { 1910 free(list, M_CTL); 1911 ctl_set_sense(ctsio, /*current_error*/ 1, 1912 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1913 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE); 1914 goto done; 1915 } 1916 seg = (struct scsi_ec_segment *)(ptr + off); 1917 if (seg->type_code != EC_SEG_B2B && 1918 seg->type_code != EC_SEG_VERIFY && 1919 seg->type_code != EC_SEG_REGISTER_KEY) { 1920 free(list, M_CTL); 1921 ctl_set_sense(ctsio, /*current_error*/ 1, 1922 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1923 /*asc*/ 0x26, /*ascq*/ 0x09, SSD_ELEM_NONE); 1924 goto done; 1925 } 1926 list->seg[nseg] = seg; 1927 off += sizeof(struct scsi_ec_segment) + 1928 scsi_2btoul(seg->descr_length); 1929 } 1930 list->inl = &data->data[lencscd + lenseg]; 1931 list->ncscd = lencscd / sizeof(struct scsi_ec_cscd); 1932 list->nseg = nseg; 1933 list->leninl = leninl; 1934 list->ctsio = ctsio; 1935 list->lun = lun; 1936 mtx_lock(&lun->lun_lock); 1937 if ((list->flags & EC_LIST_ID_USAGE_MASK) != EC_LIST_ID_USAGE_NONE) { 1938 tlist = tpc_find_list(lun, list->list_id, list->init_idx); 1939 if (tlist != NULL && !tlist->completed) { 1940 mtx_unlock(&lun->lun_lock); 1941 free(list, M_CTL); 1942 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 1943 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, 1944 /*bit*/ 0); 1945 goto done; 1946 } 1947 if (tlist != NULL) { 1948 TAILQ_REMOVE(&lun->tpc_lists, tlist, links); 1949 free(tlist, M_CTL); 1950 } 1951 } 1952 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links); 1953 mtx_unlock(&lun->lun_lock); 1954 1955 tpc_process(list); 1956 return (CTL_RETVAL_COMPLETE); 1957 1958 done: 1959 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 1960 free(ctsio->kern_data_ptr, M_CTL); 1961 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 1962 } 1963 ctl_done((union ctl_io *)ctsio); 1964 return (CTL_RETVAL_COMPLETE); 1965 } 1966 1967 static void 1968 tpc_create_token(struct ctl_lun *lun, struct ctl_port *port, off_t len, 1969 struct scsi_token *token) 1970 { 1971 static int id = 0; 1972 struct scsi_vpd_id_descriptor *idd = NULL; 1973 struct scsi_ec_cscd_id *cscd; 1974 struct scsi_read_capacity_data_long *dtsd; 1975 int targid_len; 1976 1977 scsi_ulto4b(ROD_TYPE_AUR, token->type); 1978 scsi_ulto2b(0x01f8, token->length); 1979 scsi_u64to8b(atomic_fetchadd_int(&id, 1), &token->body[0]); 1980 if (lun->lun_devid) 1981 idd = scsi_get_devid_desc((struct scsi_vpd_id_descriptor *) 1982 lun->lun_devid->data, lun->lun_devid->len, 1983 scsi_devid_is_lun_naa); 1984 if (idd == NULL && lun->lun_devid) 1985 idd = scsi_get_devid_desc((struct scsi_vpd_id_descriptor *) 1986 lun->lun_devid->data, lun->lun_devid->len, 1987 scsi_devid_is_lun_eui64); 1988 if (idd != NULL) { 1989 cscd = (struct scsi_ec_cscd_id *)&token->body[8]; 1990 cscd->type_code = EC_CSCD_ID; 1991 cscd->luidt_pdt = T_DIRECT; 1992 memcpy(&cscd->codeset, idd, 4 + idd->length); 1993 scsi_ulto3b(lun->be_lun->blocksize, cscd->dtsp.block_length); 1994 } 1995 scsi_u64to8b(0, &token->body[40]); /* XXX: Should be 128bit value. */ 1996 scsi_u64to8b(len, &token->body[48]); 1997 1998 /* ROD token device type specific data (RC16 without first field) */ 1999 dtsd = (struct scsi_read_capacity_data_long *)&token->body[88 - 8]; 2000 scsi_ulto4b(lun->be_lun->blocksize, dtsd->length); 2001 dtsd->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE; 2002 scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, dtsd->lalba_lbp); 2003 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) 2004 dtsd->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ; 2005 2006 if (port->target_devid) { 2007 targid_len = port->target_devid->len; 2008 memcpy(&token->body[120], port->target_devid->data, targid_len); 2009 } else 2010 targid_len = 32; 2011 arc4rand(&token->body[120 + targid_len], 384 - targid_len, 0); 2012 }; 2013 2014 int 2015 ctl_populate_token(struct ctl_scsiio *ctsio) 2016 { 2017 struct ctl_softc *softc = CTL_SOFTC(ctsio); 2018 struct ctl_port *port = CTL_PORT(ctsio); 2019 struct ctl_lun *lun = CTL_LUN(ctsio); 2020 struct scsi_populate_token *cdb; 2021 struct scsi_populate_token_data *data; 2022 struct tpc_list *list, *tlist; 2023 struct tpc_token *token; 2024 uint64_t lba; 2025 int len, lendata, lendesc; 2026 2027 CTL_DEBUG_PRINT(("ctl_populate_token\n")); 2028 2029 cdb = (struct scsi_populate_token *)ctsio->cdb; 2030 len = scsi_4btoul(cdb->length); 2031 2032 if (len < sizeof(struct scsi_populate_token_data) || 2033 len > sizeof(struct scsi_populate_token_data) + 2034 TPC_MAX_SEGS * sizeof(struct scsi_range_desc)) { 2035 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, 2036 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0); 2037 goto done; 2038 } 2039 2040 /* 2041 * If we've got a kernel request that hasn't been malloced yet, 2042 * malloc it and tell the caller the data buffer is here. 2043 */ 2044 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 2045 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 2046 ctsio->kern_data_len = len; 2047 ctsio->kern_total_len = len; 2048 ctsio->kern_rel_offset = 0; 2049 ctsio->kern_sg_entries = 0; 2050 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 2051 ctsio->be_move_done = ctl_config_move_done; 2052 ctl_datamove((union ctl_io *)ctsio); 2053 2054 return (CTL_RETVAL_COMPLETE); 2055 } 2056 2057 data = (struct scsi_populate_token_data *)ctsio->kern_data_ptr; 2058 lendata = scsi_2btoul(data->length); 2059 if (lendata < sizeof(struct scsi_populate_token_data) - 2 + 2060 sizeof(struct scsi_range_desc)) { 2061 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, 2062 /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0); 2063 goto done; 2064 } 2065 lendesc = scsi_2btoul(data->range_descriptor_length); 2066 if (lendesc < sizeof(struct scsi_range_desc) || 2067 len < sizeof(struct scsi_populate_token_data) + lendesc || 2068 lendata < sizeof(struct scsi_populate_token_data) - 2 + lendesc) { 2069 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, 2070 /*field*/ 14, /*bit_valid*/ 0, /*bit*/ 0); 2071 goto done; 2072 } 2073 /* 2074 printf("PT(list=%u) flags=%x to=%d rt=%x len=%x\n", 2075 scsi_4btoul(cdb->list_identifier), 2076 data->flags, scsi_4btoul(data->inactivity_timeout), 2077 scsi_4btoul(data->rod_type), 2078 scsi_2btoul(data->range_descriptor_length)); 2079 */ 2080 2081 /* Validate INACTIVITY TIMEOUT field */ 2082 if (scsi_4btoul(data->inactivity_timeout) > TPC_MAX_TOKEN_TIMEOUT) { 2083 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 2084 /*command*/ 0, /*field*/ 4, /*bit_valid*/ 0, 2085 /*bit*/ 0); 2086 goto done; 2087 } 2088 2089 /* Validate ROD TYPE field */ 2090 if ((data->flags & EC_PT_RTV) && 2091 scsi_4btoul(data->rod_type) != ROD_TYPE_AUR) { 2092 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, 2093 /*field*/ 8, /*bit_valid*/ 0, /*bit*/ 0); 2094 goto done; 2095 } 2096 2097 /* Validate list of ranges */ 2098 if (tpc_check_ranges_l(&data->desc[0], 2099 scsi_2btoul(data->range_descriptor_length) / 2100 sizeof(struct scsi_range_desc), 2101 lun->be_lun->maxlba, &lba) != 0) { 2102 ctl_set_lba_out_of_range(ctsio, lba); 2103 goto done; 2104 } 2105 if (tpc_check_ranges_x(&data->desc[0], 2106 scsi_2btoul(data->range_descriptor_length) / 2107 sizeof(struct scsi_range_desc)) != 0) { 2108 ctl_set_invalid_field(ctsio, /*sks_valid*/ 0, 2109 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, 2110 /*bit*/ 0); 2111 goto done; 2112 } 2113 2114 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO); 2115 list->service_action = cdb->service_action; 2116 list->init_port = ctsio->io_hdr.nexus.targ_port; 2117 list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus); 2118 list->list_id = scsi_4btoul(cdb->list_identifier); 2119 list->flags = data->flags; 2120 list->ctsio = ctsio; 2121 list->lun = lun; 2122 mtx_lock(&lun->lun_lock); 2123 tlist = tpc_find_list(lun, list->list_id, list->init_idx); 2124 if (tlist != NULL && !tlist->completed) { 2125 mtx_unlock(&lun->lun_lock); 2126 free(list, M_CTL); 2127 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 2128 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, 2129 /*bit*/ 0); 2130 goto done; 2131 } 2132 if (tlist != NULL) { 2133 TAILQ_REMOVE(&lun->tpc_lists, tlist, links); 2134 free(tlist, M_CTL); 2135 } 2136 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links); 2137 mtx_unlock(&lun->lun_lock); 2138 2139 token = malloc(sizeof(*token), M_CTL, M_WAITOK | M_ZERO); 2140 token->lun = lun->lun; 2141 token->blocksize = lun->be_lun->blocksize; 2142 token->params = ctsio->kern_data_ptr; 2143 token->range = &data->desc[0]; 2144 token->nrange = scsi_2btoul(data->range_descriptor_length) / 2145 sizeof(struct scsi_range_desc); 2146 list->cursectors = tpc_ranges_length(token->range, token->nrange); 2147 list->curbytes = (off_t)list->cursectors * lun->be_lun->blocksize; 2148 tpc_create_token(lun, port, list->curbytes, 2149 (struct scsi_token *)token->token); 2150 token->active = 0; 2151 token->last_active = time_uptime; 2152 token->timeout = scsi_4btoul(data->inactivity_timeout); 2153 if (token->timeout == 0) 2154 token->timeout = TPC_DFL_TOKEN_TIMEOUT; 2155 else if (token->timeout < TPC_MIN_TOKEN_TIMEOUT) 2156 token->timeout = TPC_MIN_TOKEN_TIMEOUT; 2157 memcpy(list->res_token, token->token, sizeof(list->res_token)); 2158 list->res_token_valid = 1; 2159 list->curseg = 0; 2160 list->completed = 1; 2161 list->last_active = time_uptime; 2162 mtx_lock(&softc->tpc_lock); 2163 TAILQ_INSERT_TAIL(&softc->tpc_tokens, token, links); 2164 mtx_unlock(&softc->tpc_lock); 2165 ctl_set_success(ctsio); 2166 ctl_done((union ctl_io *)ctsio); 2167 return (CTL_RETVAL_COMPLETE); 2168 2169 done: 2170 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 2171 free(ctsio->kern_data_ptr, M_CTL); 2172 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 2173 } 2174 ctl_done((union ctl_io *)ctsio); 2175 return (CTL_RETVAL_COMPLETE); 2176 } 2177 2178 int 2179 ctl_write_using_token(struct ctl_scsiio *ctsio) 2180 { 2181 struct ctl_softc *softc = CTL_SOFTC(ctsio); 2182 struct ctl_lun *lun = CTL_LUN(ctsio); 2183 struct scsi_write_using_token *cdb; 2184 struct scsi_write_using_token_data *data; 2185 struct tpc_list *list, *tlist; 2186 struct tpc_token *token; 2187 uint64_t lba; 2188 int len, lendata, lendesc; 2189 2190 CTL_DEBUG_PRINT(("ctl_write_using_token\n")); 2191 2192 cdb = (struct scsi_write_using_token *)ctsio->cdb; 2193 len = scsi_4btoul(cdb->length); 2194 2195 if (len < sizeof(struct scsi_write_using_token_data) || 2196 len > sizeof(struct scsi_write_using_token_data) + 2197 TPC_MAX_SEGS * sizeof(struct scsi_range_desc)) { 2198 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, 2199 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0); 2200 goto done; 2201 } 2202 2203 /* 2204 * If we've got a kernel request that hasn't been malloced yet, 2205 * malloc it and tell the caller the data buffer is here. 2206 */ 2207 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 2208 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 2209 ctsio->kern_data_len = len; 2210 ctsio->kern_total_len = len; 2211 ctsio->kern_rel_offset = 0; 2212 ctsio->kern_sg_entries = 0; 2213 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 2214 ctsio->be_move_done = ctl_config_move_done; 2215 ctl_datamove((union ctl_io *)ctsio); 2216 2217 return (CTL_RETVAL_COMPLETE); 2218 } 2219 2220 data = (struct scsi_write_using_token_data *)ctsio->kern_data_ptr; 2221 lendata = scsi_2btoul(data->length); 2222 if (lendata < sizeof(struct scsi_write_using_token_data) - 2 + 2223 sizeof(struct scsi_range_desc)) { 2224 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, 2225 /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0); 2226 goto done; 2227 } 2228 lendesc = scsi_2btoul(data->range_descriptor_length); 2229 if (lendesc < sizeof(struct scsi_range_desc) || 2230 len < sizeof(struct scsi_write_using_token_data) + lendesc || 2231 lendata < sizeof(struct scsi_write_using_token_data) - 2 + lendesc) { 2232 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, 2233 /*field*/ 534, /*bit_valid*/ 0, /*bit*/ 0); 2234 goto done; 2235 } 2236 /* 2237 printf("WUT(list=%u) flags=%x off=%ju len=%x\n", 2238 scsi_4btoul(cdb->list_identifier), 2239 data->flags, scsi_8btou64(data->offset_into_rod), 2240 scsi_2btoul(data->range_descriptor_length)); 2241 */ 2242 2243 /* Validate list of ranges */ 2244 if (tpc_check_ranges_l(&data->desc[0], 2245 scsi_2btoul(data->range_descriptor_length) / 2246 sizeof(struct scsi_range_desc), 2247 lun->be_lun->maxlba, &lba) != 0) { 2248 ctl_set_lba_out_of_range(ctsio, lba); 2249 goto done; 2250 } 2251 if (tpc_check_ranges_x(&data->desc[0], 2252 scsi_2btoul(data->range_descriptor_length) / 2253 sizeof(struct scsi_range_desc)) != 0) { 2254 ctl_set_invalid_field(ctsio, /*sks_valid*/ 0, 2255 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, 2256 /*bit*/ 0); 2257 goto done; 2258 } 2259 2260 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO); 2261 list->service_action = cdb->service_action; 2262 list->init_port = ctsio->io_hdr.nexus.targ_port; 2263 list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus); 2264 list->list_id = scsi_4btoul(cdb->list_identifier); 2265 list->flags = data->flags; 2266 list->params = ctsio->kern_data_ptr; 2267 list->range = &data->desc[0]; 2268 list->nrange = scsi_2btoul(data->range_descriptor_length) / 2269 sizeof(struct scsi_range_desc); 2270 list->offset_into_rod = scsi_8btou64(data->offset_into_rod); 2271 list->ctsio = ctsio; 2272 list->lun = lun; 2273 mtx_lock(&lun->lun_lock); 2274 tlist = tpc_find_list(lun, list->list_id, list->init_idx); 2275 if (tlist != NULL && !tlist->completed) { 2276 mtx_unlock(&lun->lun_lock); 2277 free(list, M_CTL); 2278 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 2279 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, 2280 /*bit*/ 0); 2281 goto done; 2282 } 2283 if (tlist != NULL) { 2284 TAILQ_REMOVE(&lun->tpc_lists, tlist, links); 2285 free(tlist, M_CTL); 2286 } 2287 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links); 2288 mtx_unlock(&lun->lun_lock); 2289 2290 /* Block device zero ROD token -> no token. */ 2291 if (scsi_4btoul(data->rod_token) == ROD_TYPE_BLOCK_ZERO) { 2292 tpc_process(list); 2293 return (CTL_RETVAL_COMPLETE); 2294 } 2295 2296 mtx_lock(&softc->tpc_lock); 2297 TAILQ_FOREACH(token, &softc->tpc_tokens, links) { 2298 if (memcmp(token->token, data->rod_token, 2299 sizeof(data->rod_token)) == 0) 2300 break; 2301 } 2302 if (token != NULL) { 2303 token->active++; 2304 list->token = token; 2305 if (data->flags & EC_WUT_DEL_TKN) 2306 token->timeout = 0; 2307 } 2308 mtx_unlock(&softc->tpc_lock); 2309 if (token == NULL) { 2310 mtx_lock(&lun->lun_lock); 2311 TAILQ_REMOVE(&lun->tpc_lists, list, links); 2312 mtx_unlock(&lun->lun_lock); 2313 free(list, M_CTL); 2314 ctl_set_sense(ctsio, /*current_error*/ 1, 2315 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 2316 /*asc*/ 0x23, /*ascq*/ 0x04, SSD_ELEM_NONE); 2317 goto done; 2318 } 2319 2320 tpc_process(list); 2321 return (CTL_RETVAL_COMPLETE); 2322 2323 done: 2324 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 2325 free(ctsio->kern_data_ptr, M_CTL); 2326 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 2327 } 2328 ctl_done((union ctl_io *)ctsio); 2329 return (CTL_RETVAL_COMPLETE); 2330 } 2331 2332 int 2333 ctl_receive_rod_token_information(struct ctl_scsiio *ctsio) 2334 { 2335 struct ctl_lun *lun = CTL_LUN(ctsio); 2336 struct scsi_receive_rod_token_information *cdb; 2337 struct scsi_receive_copy_status_lid4_data *data; 2338 struct tpc_list *list; 2339 struct tpc_list list_copy; 2340 uint8_t *ptr; 2341 int retval; 2342 int alloc_len, total_len, token_len; 2343 uint32_t list_id; 2344 2345 CTL_DEBUG_PRINT(("ctl_receive_rod_token_information\n")); 2346 2347 cdb = (struct scsi_receive_rod_token_information *)ctsio->cdb; 2348 retval = CTL_RETVAL_COMPLETE; 2349 2350 list_id = scsi_4btoul(cdb->list_identifier); 2351 mtx_lock(&lun->lun_lock); 2352 list = tpc_find_list(lun, list_id, 2353 ctl_get_initindex(&ctsio->io_hdr.nexus)); 2354 if (list == NULL) { 2355 mtx_unlock(&lun->lun_lock); 2356 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 2357 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, 2358 /*bit*/ 0); 2359 ctl_done((union ctl_io *)ctsio); 2360 return (retval); 2361 } 2362 list_copy = *list; 2363 if (list->completed) { 2364 TAILQ_REMOVE(&lun->tpc_lists, list, links); 2365 free(list, M_CTL); 2366 } 2367 mtx_unlock(&lun->lun_lock); 2368 2369 token_len = list_copy.res_token_valid ? 2 + sizeof(list_copy.res_token) : 0; 2370 total_len = sizeof(*data) + list_copy.sense_len + 4 + token_len; 2371 alloc_len = scsi_4btoul(cdb->length); 2372 2373 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 2374 ctsio->kern_sg_entries = 0; 2375 ctsio->kern_rel_offset = 0; 2376 ctsio->kern_data_len = min(total_len, alloc_len); 2377 ctsio->kern_total_len = ctsio->kern_data_len; 2378 2379 data = (struct scsi_receive_copy_status_lid4_data *)ctsio->kern_data_ptr; 2380 scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len + 2381 4 + token_len, data->available_data); 2382 data->response_to_service_action = list_copy.service_action; 2383 if (list_copy.completed) { 2384 if (list_copy.error) 2385 data->copy_command_status = RCS_CCS_ERROR; 2386 else if (list_copy.abort) 2387 data->copy_command_status = RCS_CCS_ABORTED; 2388 else 2389 data->copy_command_status = RCS_CCS_COMPLETED; 2390 } else 2391 data->copy_command_status = RCS_CCS_INPROG_FG; 2392 scsi_ulto2b(list_copy.curops, data->operation_counter); 2393 scsi_ulto4b(UINT32_MAX, data->estimated_status_update_delay); 2394 data->transfer_count_units = RCS_TC_LBAS; 2395 scsi_u64to8b(list_copy.cursectors, data->transfer_count); 2396 scsi_ulto2b(list_copy.curseg, data->segments_processed); 2397 data->length_of_the_sense_data_field = list_copy.sense_len; 2398 data->sense_data_length = list_copy.sense_len; 2399 memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len); 2400 2401 ptr = &data->sense_data[data->length_of_the_sense_data_field]; 2402 scsi_ulto4b(token_len, &ptr[0]); 2403 if (list_copy.res_token_valid) { 2404 scsi_ulto2b(0, &ptr[4]); 2405 memcpy(&ptr[6], list_copy.res_token, sizeof(list_copy.res_token)); 2406 } 2407 /* 2408 printf("RRTI(list=%u) valid=%d\n", 2409 scsi_4btoul(cdb->list_identifier), list_copy.res_token_valid); 2410 */ 2411 ctl_set_success(ctsio); 2412 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 2413 ctsio->be_move_done = ctl_config_move_done; 2414 ctl_datamove((union ctl_io *)ctsio); 2415 return (retval); 2416 } 2417 2418 int 2419 ctl_report_all_rod_tokens(struct ctl_scsiio *ctsio) 2420 { 2421 struct ctl_softc *softc = CTL_SOFTC(ctsio); 2422 struct scsi_report_all_rod_tokens *cdb; 2423 struct scsi_report_all_rod_tokens_data *data; 2424 struct tpc_token *token; 2425 int retval; 2426 int alloc_len, total_len, tokens, i; 2427 2428 CTL_DEBUG_PRINT(("ctl_receive_rod_token_information\n")); 2429 2430 cdb = (struct scsi_report_all_rod_tokens *)ctsio->cdb; 2431 retval = CTL_RETVAL_COMPLETE; 2432 2433 tokens = 0; 2434 mtx_lock(&softc->tpc_lock); 2435 TAILQ_FOREACH(token, &softc->tpc_tokens, links) 2436 tokens++; 2437 mtx_unlock(&softc->tpc_lock); 2438 if (tokens > 512) 2439 tokens = 512; 2440 2441 total_len = sizeof(*data) + tokens * 96; 2442 alloc_len = scsi_4btoul(cdb->length); 2443 2444 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 2445 ctsio->kern_sg_entries = 0; 2446 ctsio->kern_rel_offset = 0; 2447 ctsio->kern_data_len = min(total_len, alloc_len); 2448 ctsio->kern_total_len = ctsio->kern_data_len; 2449 2450 data = (struct scsi_report_all_rod_tokens_data *)ctsio->kern_data_ptr; 2451 i = 0; 2452 mtx_lock(&softc->tpc_lock); 2453 TAILQ_FOREACH(token, &softc->tpc_tokens, links) { 2454 if (i >= tokens) 2455 break; 2456 memcpy(&data->rod_management_token_list[i * 96], 2457 token->token, 96); 2458 i++; 2459 } 2460 mtx_unlock(&softc->tpc_lock); 2461 scsi_ulto4b(sizeof(*data) - 4 + i * 96, data->available_data); 2462 /* 2463 printf("RART tokens=%d\n", i); 2464 */ 2465 ctl_set_success(ctsio); 2466 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 2467 ctsio->be_move_done = ctl_config_move_done; 2468 ctl_datamove((union ctl_io *)ctsio); 2469 return (retval); 2470 } 2471 2472