1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2014-2021 Alexander Motin <mav@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer, 12 * without modification, immediately at the beginning of the file. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/types.h> 34 #include <sys/lock.h> 35 #include <sys/module.h> 36 #include <sys/mutex.h> 37 #include <sys/condvar.h> 38 #include <sys/malloc.h> 39 #include <sys/conf.h> 40 #include <sys/queue.h> 41 #include <sys/sysctl.h> 42 #include <sys/nv.h> 43 #include <sys/dnv.h> 44 #include <machine/atomic.h> 45 46 #include <cam/cam.h> 47 #include <cam/scsi/scsi_all.h> 48 #include <cam/scsi/scsi_da.h> 49 #include <cam/ctl/ctl_io.h> 50 #include <cam/ctl/ctl.h> 51 #include <cam/ctl/ctl_frontend.h> 52 #include <cam/ctl/ctl_util.h> 53 #include <cam/ctl/ctl_backend.h> 54 #include <cam/ctl/ctl_ioctl.h> 55 #include <cam/ctl/ctl_ha.h> 56 #include <cam/ctl/ctl_private.h> 57 #include <cam/ctl/ctl_debug.h> 58 #include <cam/ctl/ctl_scsi_all.h> 59 #include <cam/ctl/ctl_tpc.h> 60 #include <cam/ctl/ctl_error.h> 61 62 #define TPC_MAX_CSCDS 64 63 #define TPC_MAX_SEGS 64 64 #define TPC_MAX_SEG 0 65 #define TPC_MAX_LIST 8192 66 #define TPC_MAX_INLINE 0 67 #define TPC_MAX_LISTS 255 68 #define TPC_MAX_IO_SIZE (8 * MIN(1024 * 1024, MAX(128 * 1024, maxphys))) 69 #define TPC_MAX_IOCHUNK_SIZE (TPC_MAX_IO_SIZE * 4) 70 #define TPC_MIN_TOKEN_TIMEOUT 1 71 #define TPC_DFL_TOKEN_TIMEOUT 60 72 #define TPC_MAX_TOKEN_TIMEOUT 600 73 74 MALLOC_DEFINE(M_CTL_TPC, "ctltpc", "CTL TPC"); 75 76 typedef enum { 77 TPC_ERR_RETRY = 0x000, 78 TPC_ERR_FAIL = 0x001, 79 TPC_ERR_MASK = 0x0ff, 80 TPC_ERR_NO_DECREMENT = 0x100 81 } tpc_error_action; 82 83 struct tpc_list; 84 TAILQ_HEAD(runl, tpc_io); 85 struct tpc_io { 86 union ctl_io *io; 87 uint8_t target; 88 uint32_t cscd; 89 uint64_t lun; 90 uint8_t *buf; 91 struct tpc_list *list; 92 struct runl run; 93 TAILQ_ENTRY(tpc_io) rlinks; 94 TAILQ_ENTRY(tpc_io) links; 95 }; 96 97 struct tpc_token { 98 uint8_t token[512]; 99 uint64_t lun; 100 uint32_t blocksize; 101 uint8_t *params; 102 struct scsi_range_desc *range; 103 int nrange; 104 int active; 105 time_t last_active; 106 uint32_t timeout; 107 TAILQ_ENTRY(tpc_token) links; 108 }; 109 110 struct tpc_list { 111 uint8_t service_action; 112 int init_port; 113 uint32_t init_idx; 114 uint32_t list_id; 115 uint8_t flags; 116 uint8_t *params; 117 struct scsi_ec_cscd *cscd; 118 struct scsi_ec_segment *seg[TPC_MAX_SEGS]; 119 uint8_t *inl; 120 int ncscd; 121 int nseg; 122 int leninl; 123 struct tpc_token *token; 124 struct scsi_range_desc *range; 125 int nrange; 126 off_t offset_into_rod; 127 128 int curseg; 129 off_t cursectors; 130 off_t curbytes; 131 int curops; 132 int stage; 133 off_t segsectors; 134 off_t segbytes; 135 int tbdio; 136 int error; 137 int abort; 138 int completed; 139 time_t last_active; 140 TAILQ_HEAD(, tpc_io) allio; 141 struct scsi_sense_data fwd_sense_data; 142 uint8_t fwd_sense_len; 143 uint8_t fwd_scsi_status; 144 uint8_t fwd_target; 145 uint16_t fwd_cscd; 146 struct scsi_sense_data sense_data; 147 uint8_t sense_len; 148 uint8_t scsi_status; 149 struct ctl_scsiio *ctsio; 150 struct ctl_lun *lun; 151 int res_token_valid; 152 uint8_t res_token[512]; 153 TAILQ_ENTRY(tpc_list) links; 154 }; 155 156 static void 157 tpc_timeout(void *arg) 158 { 159 struct ctl_softc *softc = arg; 160 struct ctl_lun *lun; 161 struct tpc_token *token, *ttoken; 162 struct tpc_list *list, *tlist; 163 164 /* Free completed lists with expired timeout. */ 165 STAILQ_FOREACH(lun, &softc->lun_list, links) { 166 mtx_lock(&lun->lun_lock); 167 TAILQ_FOREACH_SAFE(list, &lun->tpc_lists, links, tlist) { 168 if (!list->completed || time_uptime < list->last_active + 169 TPC_DFL_TOKEN_TIMEOUT) 170 continue; 171 TAILQ_REMOVE(&lun->tpc_lists, list, links); 172 free(list, M_CTL); 173 } 174 mtx_unlock(&lun->lun_lock); 175 } 176 177 /* Free inactive ROD tokens with expired timeout. */ 178 mtx_lock(&softc->tpc_lock); 179 TAILQ_FOREACH_SAFE(token, &softc->tpc_tokens, links, ttoken) { 180 if (token->active || 181 time_uptime < token->last_active + token->timeout + 1) 182 continue; 183 TAILQ_REMOVE(&softc->tpc_tokens, token, links); 184 free(token->params, M_CTL); 185 free(token, M_CTL); 186 } 187 mtx_unlock(&softc->tpc_lock); 188 callout_schedule_sbt(&softc->tpc_timeout, SBT_1S, SBT_1S, 0); 189 } 190 191 void 192 ctl_tpc_init(struct ctl_softc *softc) 193 { 194 195 mtx_init(&softc->tpc_lock, "CTL TPC mutex", NULL, MTX_DEF); 196 TAILQ_INIT(&softc->tpc_tokens); 197 callout_init_mtx(&softc->tpc_timeout, &softc->ctl_lock, 0); 198 callout_reset_sbt(&softc->tpc_timeout, SBT_1S, SBT_1S, 199 tpc_timeout, softc, 0); 200 } 201 202 void 203 ctl_tpc_shutdown(struct ctl_softc *softc) 204 { 205 struct tpc_token *token; 206 207 callout_drain(&softc->tpc_timeout); 208 209 /* Free ROD tokens. */ 210 mtx_lock(&softc->tpc_lock); 211 while ((token = TAILQ_FIRST(&softc->tpc_tokens)) != NULL) { 212 TAILQ_REMOVE(&softc->tpc_tokens, token, links); 213 free(token->params, M_CTL); 214 free(token, M_CTL); 215 } 216 mtx_unlock(&softc->tpc_lock); 217 mtx_destroy(&softc->tpc_lock); 218 } 219 220 void 221 ctl_tpc_lun_init(struct ctl_lun *lun) 222 { 223 224 TAILQ_INIT(&lun->tpc_lists); 225 } 226 227 void 228 ctl_tpc_lun_clear(struct ctl_lun *lun, uint32_t initidx) 229 { 230 struct tpc_list *list, *tlist; 231 232 TAILQ_FOREACH_SAFE(list, &lun->tpc_lists, links, tlist) { 233 if (initidx != -1 && list->init_idx != initidx) 234 continue; 235 if (!list->completed) 236 continue; 237 TAILQ_REMOVE(&lun->tpc_lists, list, links); 238 free(list, M_CTL); 239 } 240 } 241 242 void 243 ctl_tpc_lun_shutdown(struct ctl_lun *lun) 244 { 245 struct ctl_softc *softc = lun->ctl_softc; 246 struct tpc_list *list; 247 struct tpc_token *token, *ttoken; 248 249 /* Free lists for this LUN. */ 250 while ((list = TAILQ_FIRST(&lun->tpc_lists)) != NULL) { 251 TAILQ_REMOVE(&lun->tpc_lists, list, links); 252 KASSERT(list->completed, 253 ("Not completed TPC (%p) on shutdown", list)); 254 free(list, M_CTL); 255 } 256 257 /* Free ROD tokens for this LUN. */ 258 mtx_lock(&softc->tpc_lock); 259 TAILQ_FOREACH_SAFE(token, &softc->tpc_tokens, links, ttoken) { 260 if (token->lun != lun->lun || token->active) 261 continue; 262 TAILQ_REMOVE(&softc->tpc_tokens, token, links); 263 free(token->params, M_CTL); 264 free(token, M_CTL); 265 } 266 mtx_unlock(&softc->tpc_lock); 267 } 268 269 int 270 ctl_inquiry_evpd_tpc(struct ctl_scsiio *ctsio, int alloc_len) 271 { 272 struct ctl_lun *lun = CTL_LUN(ctsio); 273 struct scsi_vpd_tpc *tpc_ptr; 274 struct scsi_vpd_tpc_descriptor *d_ptr; 275 struct scsi_vpd_tpc_descriptor_bdrl *bdrl_ptr; 276 struct scsi_vpd_tpc_descriptor_sc *sc_ptr; 277 struct scsi_vpd_tpc_descriptor_sc_descr *scd_ptr; 278 struct scsi_vpd_tpc_descriptor_pd *pd_ptr; 279 struct scsi_vpd_tpc_descriptor_sd *sd_ptr; 280 struct scsi_vpd_tpc_descriptor_sdid *sdid_ptr; 281 struct scsi_vpd_tpc_descriptor_rtf *rtf_ptr; 282 struct scsi_vpd_tpc_descriptor_rtf_block *rtfb_ptr; 283 struct scsi_vpd_tpc_descriptor_srt *srt_ptr; 284 struct scsi_vpd_tpc_descriptor_srtd *srtd_ptr; 285 struct scsi_vpd_tpc_descriptor_gco *gco_ptr; 286 int data_len; 287 288 data_len = sizeof(struct scsi_vpd_tpc) + 289 sizeof(struct scsi_vpd_tpc_descriptor_bdrl) + 290 roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sc) + 291 2 * sizeof(struct scsi_vpd_tpc_descriptor_sc_descr) + 11, 4) + 292 sizeof(struct scsi_vpd_tpc_descriptor_pd) + 293 roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sd) + 4, 4) + 294 roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sdid) + 2, 4) + 295 sizeof(struct scsi_vpd_tpc_descriptor_rtf) + 296 sizeof(struct scsi_vpd_tpc_descriptor_rtf_block) + 297 sizeof(struct scsi_vpd_tpc_descriptor_srt) + 298 2*sizeof(struct scsi_vpd_tpc_descriptor_srtd) + 299 sizeof(struct scsi_vpd_tpc_descriptor_gco); 300 301 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 302 tpc_ptr = (struct scsi_vpd_tpc *)ctsio->kern_data_ptr; 303 ctsio->kern_rel_offset = 0; 304 ctsio->kern_sg_entries = 0; 305 ctsio->kern_data_len = min(data_len, alloc_len); 306 ctsio->kern_total_len = ctsio->kern_data_len; 307 308 /* 309 * The control device is always connected. The disk device, on the 310 * other hand, may not be online all the time. 311 */ 312 if (lun != NULL) 313 tpc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 314 lun->be_lun->lun_type; 315 else 316 tpc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 317 tpc_ptr->page_code = SVPD_SCSI_TPC; 318 scsi_ulto2b(data_len - 4, tpc_ptr->page_length); 319 320 /* Block Device ROD Limits */ 321 d_ptr = (struct scsi_vpd_tpc_descriptor *)&tpc_ptr->descr[0]; 322 bdrl_ptr = (struct scsi_vpd_tpc_descriptor_bdrl *)d_ptr; 323 scsi_ulto2b(SVPD_TPC_BDRL, bdrl_ptr->desc_type); 324 scsi_ulto2b(sizeof(*bdrl_ptr) - 4, bdrl_ptr->desc_length); 325 scsi_ulto2b(TPC_MAX_SEGS, bdrl_ptr->maximum_ranges); 326 scsi_ulto4b(TPC_MAX_TOKEN_TIMEOUT, 327 bdrl_ptr->maximum_inactivity_timeout); 328 scsi_ulto4b(TPC_DFL_TOKEN_TIMEOUT, 329 bdrl_ptr->default_inactivity_timeout); 330 scsi_u64to8b(0, bdrl_ptr->maximum_token_transfer_size); 331 scsi_u64to8b(0, bdrl_ptr->optimal_transfer_count); 332 333 /* Supported commands */ 334 d_ptr = (struct scsi_vpd_tpc_descriptor *) 335 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length)); 336 sc_ptr = (struct scsi_vpd_tpc_descriptor_sc *)d_ptr; 337 scsi_ulto2b(SVPD_TPC_SC, sc_ptr->desc_type); 338 sc_ptr->list_length = 2 * sizeof(*scd_ptr) + 11; 339 scsi_ulto2b(roundup2(1 + sc_ptr->list_length, 4), sc_ptr->desc_length); 340 scd_ptr = &sc_ptr->descr[0]; 341 scd_ptr->opcode = EXTENDED_COPY; 342 scd_ptr->sa_length = 5; 343 scd_ptr->supported_service_actions[0] = EC_EC_LID1; 344 scd_ptr->supported_service_actions[1] = EC_EC_LID4; 345 scd_ptr->supported_service_actions[2] = EC_PT; 346 scd_ptr->supported_service_actions[3] = EC_WUT; 347 scd_ptr->supported_service_actions[4] = EC_COA; 348 scd_ptr = (struct scsi_vpd_tpc_descriptor_sc_descr *) 349 &scd_ptr->supported_service_actions[scd_ptr->sa_length]; 350 scd_ptr->opcode = RECEIVE_COPY_STATUS; 351 scd_ptr->sa_length = 6; 352 scd_ptr->supported_service_actions[0] = RCS_RCS_LID1; 353 scd_ptr->supported_service_actions[1] = RCS_RCFD; 354 scd_ptr->supported_service_actions[2] = RCS_RCS_LID4; 355 scd_ptr->supported_service_actions[3] = RCS_RCOP; 356 scd_ptr->supported_service_actions[4] = RCS_RRTI; 357 scd_ptr->supported_service_actions[5] = RCS_RART; 358 359 /* Parameter data. */ 360 d_ptr = (struct scsi_vpd_tpc_descriptor *) 361 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length)); 362 pd_ptr = (struct scsi_vpd_tpc_descriptor_pd *)d_ptr; 363 scsi_ulto2b(SVPD_TPC_PD, pd_ptr->desc_type); 364 scsi_ulto2b(sizeof(*pd_ptr) - 4, pd_ptr->desc_length); 365 scsi_ulto2b(TPC_MAX_CSCDS, pd_ptr->maximum_cscd_descriptor_count); 366 scsi_ulto2b(TPC_MAX_SEGS, pd_ptr->maximum_segment_descriptor_count); 367 scsi_ulto4b(TPC_MAX_LIST, pd_ptr->maximum_descriptor_list_length); 368 scsi_ulto4b(TPC_MAX_INLINE, pd_ptr->maximum_inline_data_length); 369 370 /* Supported Descriptors */ 371 d_ptr = (struct scsi_vpd_tpc_descriptor *) 372 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length)); 373 sd_ptr = (struct scsi_vpd_tpc_descriptor_sd *)d_ptr; 374 scsi_ulto2b(SVPD_TPC_SD, sd_ptr->desc_type); 375 scsi_ulto2b(roundup2(sizeof(*sd_ptr) - 4 + 4, 4), sd_ptr->desc_length); 376 sd_ptr->list_length = 4; 377 sd_ptr->supported_descriptor_codes[0] = EC_SEG_B2B; 378 sd_ptr->supported_descriptor_codes[1] = EC_SEG_VERIFY; 379 sd_ptr->supported_descriptor_codes[2] = EC_SEG_REGISTER_KEY; 380 sd_ptr->supported_descriptor_codes[3] = EC_CSCD_ID; 381 382 /* Supported CSCD Descriptor IDs */ 383 d_ptr = (struct scsi_vpd_tpc_descriptor *) 384 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length)); 385 sdid_ptr = (struct scsi_vpd_tpc_descriptor_sdid *)d_ptr; 386 scsi_ulto2b(SVPD_TPC_SDID, sdid_ptr->desc_type); 387 scsi_ulto2b(roundup2(sizeof(*sdid_ptr) - 4 + 2, 4), sdid_ptr->desc_length); 388 scsi_ulto2b(2, sdid_ptr->list_length); 389 scsi_ulto2b(0xffff, &sdid_ptr->supported_descriptor_ids[0]); 390 391 /* ROD Token Features */ 392 d_ptr = (struct scsi_vpd_tpc_descriptor *) 393 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length)); 394 rtf_ptr = (struct scsi_vpd_tpc_descriptor_rtf *)d_ptr; 395 scsi_ulto2b(SVPD_TPC_RTF, rtf_ptr->desc_type); 396 scsi_ulto2b(sizeof(*rtf_ptr) - 4 + sizeof(*rtfb_ptr), rtf_ptr->desc_length); 397 rtf_ptr->remote_tokens = 0; 398 scsi_ulto4b(TPC_MIN_TOKEN_TIMEOUT, rtf_ptr->minimum_token_lifetime); 399 scsi_ulto4b(UINT32_MAX, rtf_ptr->maximum_token_lifetime); 400 scsi_ulto4b(TPC_MAX_TOKEN_TIMEOUT, 401 rtf_ptr->maximum_token_inactivity_timeout); 402 scsi_ulto2b(sizeof(*rtfb_ptr), rtf_ptr->type_specific_features_length); 403 rtfb_ptr = (struct scsi_vpd_tpc_descriptor_rtf_block *) 404 &rtf_ptr->type_specific_features; 405 rtfb_ptr->type_format = SVPD_TPC_RTF_BLOCK; 406 scsi_ulto2b(sizeof(*rtfb_ptr) - 4, rtfb_ptr->desc_length); 407 scsi_ulto2b(0, rtfb_ptr->optimal_length_granularity); 408 scsi_u64to8b(0, rtfb_ptr->maximum_bytes); 409 scsi_u64to8b(0, rtfb_ptr->optimal_bytes); 410 scsi_u64to8b(UINT64_MAX, rtfb_ptr->optimal_bytes_to_token_per_segment); 411 scsi_u64to8b(TPC_MAX_IOCHUNK_SIZE, 412 rtfb_ptr->optimal_bytes_from_token_per_segment); 413 414 /* Supported ROD Tokens */ 415 d_ptr = (struct scsi_vpd_tpc_descriptor *) 416 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length)); 417 srt_ptr = (struct scsi_vpd_tpc_descriptor_srt *)d_ptr; 418 scsi_ulto2b(SVPD_TPC_SRT, srt_ptr->desc_type); 419 scsi_ulto2b(sizeof(*srt_ptr) - 4 + 2*sizeof(*srtd_ptr), srt_ptr->desc_length); 420 scsi_ulto2b(2*sizeof(*srtd_ptr), srt_ptr->rod_type_descriptors_length); 421 srtd_ptr = (struct scsi_vpd_tpc_descriptor_srtd *) 422 &srt_ptr->rod_type_descriptors; 423 scsi_ulto4b(ROD_TYPE_AUR, srtd_ptr->rod_type); 424 srtd_ptr->flags = SVPD_TPC_SRTD_TIN | SVPD_TPC_SRTD_TOUT; 425 scsi_ulto2b(0, srtd_ptr->preference_indicator); 426 srtd_ptr++; 427 scsi_ulto4b(ROD_TYPE_BLOCK_ZERO, srtd_ptr->rod_type); 428 srtd_ptr->flags = SVPD_TPC_SRTD_TIN; 429 scsi_ulto2b(0, srtd_ptr->preference_indicator); 430 431 /* General Copy Operations */ 432 d_ptr = (struct scsi_vpd_tpc_descriptor *) 433 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length)); 434 gco_ptr = (struct scsi_vpd_tpc_descriptor_gco *)d_ptr; 435 scsi_ulto2b(SVPD_TPC_GCO, gco_ptr->desc_type); 436 scsi_ulto2b(sizeof(*gco_ptr) - 4, gco_ptr->desc_length); 437 scsi_ulto4b(TPC_MAX_LISTS, gco_ptr->total_concurrent_copies); 438 scsi_ulto4b(TPC_MAX_LISTS, gco_ptr->maximum_identified_concurrent_copies); 439 scsi_ulto4b(TPC_MAX_SEG, gco_ptr->maximum_segment_length); 440 gco_ptr->data_segment_granularity = 0; 441 gco_ptr->inline_data_granularity = 0; 442 443 ctl_set_success(ctsio); 444 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 445 ctsio->be_move_done = ctl_config_move_done; 446 ctl_datamove((union ctl_io *)ctsio); 447 448 return (CTL_RETVAL_COMPLETE); 449 } 450 451 int 452 ctl_receive_copy_operating_parameters(struct ctl_scsiio *ctsio) 453 { 454 struct scsi_receive_copy_operating_parameters *cdb; 455 struct scsi_receive_copy_operating_parameters_data *data; 456 int retval; 457 int alloc_len, total_len; 458 459 CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n")); 460 461 cdb = (struct scsi_receive_copy_operating_parameters *)ctsio->cdb; 462 463 retval = CTL_RETVAL_COMPLETE; 464 465 total_len = sizeof(*data) + 4; 466 alloc_len = scsi_4btoul(cdb->length); 467 468 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 469 ctsio->kern_sg_entries = 0; 470 ctsio->kern_rel_offset = 0; 471 ctsio->kern_data_len = min(total_len, alloc_len); 472 ctsio->kern_total_len = ctsio->kern_data_len; 473 474 data = (struct scsi_receive_copy_operating_parameters_data *)ctsio->kern_data_ptr; 475 scsi_ulto4b(sizeof(*data) - 4 + 4, data->length); 476 data->snlid = RCOP_SNLID; 477 scsi_ulto2b(TPC_MAX_CSCDS, data->maximum_cscd_descriptor_count); 478 scsi_ulto2b(TPC_MAX_SEGS, data->maximum_segment_descriptor_count); 479 scsi_ulto4b(TPC_MAX_LIST, data->maximum_descriptor_list_length); 480 scsi_ulto4b(TPC_MAX_SEG, data->maximum_segment_length); 481 scsi_ulto4b(TPC_MAX_INLINE, data->maximum_inline_data_length); 482 scsi_ulto4b(0, data->held_data_limit); 483 scsi_ulto4b(0, data->maximum_stream_device_transfer_size); 484 scsi_ulto2b(TPC_MAX_LISTS, data->total_concurrent_copies); 485 data->maximum_concurrent_copies = TPC_MAX_LISTS; 486 data->data_segment_granularity = 0; 487 data->inline_data_granularity = 0; 488 data->held_data_granularity = 0; 489 data->implemented_descriptor_list_length = 4; 490 data->list_of_implemented_descriptor_type_codes[0] = EC_SEG_B2B; 491 data->list_of_implemented_descriptor_type_codes[1] = EC_SEG_VERIFY; 492 data->list_of_implemented_descriptor_type_codes[2] = EC_SEG_REGISTER_KEY; 493 data->list_of_implemented_descriptor_type_codes[3] = EC_CSCD_ID; 494 495 ctl_set_success(ctsio); 496 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 497 ctsio->be_move_done = ctl_config_move_done; 498 ctl_datamove((union ctl_io *)ctsio); 499 return (retval); 500 } 501 502 static struct tpc_list * 503 tpc_find_list(struct ctl_lun *lun, uint32_t list_id, uint32_t init_idx) 504 { 505 struct tpc_list *list; 506 507 mtx_assert(&lun->lun_lock, MA_OWNED); 508 TAILQ_FOREACH(list, &lun->tpc_lists, links) { 509 if ((list->flags & EC_LIST_ID_USAGE_MASK) != 510 EC_LIST_ID_USAGE_NONE && list->list_id == list_id && 511 list->init_idx == init_idx) 512 break; 513 } 514 return (list); 515 } 516 517 int 518 ctl_receive_copy_status_lid1(struct ctl_scsiio *ctsio) 519 { 520 struct ctl_lun *lun = CTL_LUN(ctsio); 521 struct scsi_receive_copy_status_lid1 *cdb; 522 struct scsi_receive_copy_status_lid1_data *data; 523 struct tpc_list *list; 524 struct tpc_list list_copy; 525 int retval; 526 int alloc_len, total_len; 527 uint32_t list_id; 528 529 CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid1\n")); 530 531 cdb = (struct scsi_receive_copy_status_lid1 *)ctsio->cdb; 532 retval = CTL_RETVAL_COMPLETE; 533 534 list_id = cdb->list_identifier; 535 mtx_lock(&lun->lun_lock); 536 list = tpc_find_list(lun, list_id, 537 ctl_get_initindex(&ctsio->io_hdr.nexus)); 538 if (list == NULL) { 539 mtx_unlock(&lun->lun_lock); 540 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 541 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, 542 /*bit*/ 0); 543 ctl_done((union ctl_io *)ctsio); 544 return (retval); 545 } 546 list_copy = *list; 547 if (list->completed) { 548 TAILQ_REMOVE(&lun->tpc_lists, list, links); 549 free(list, M_CTL); 550 } 551 mtx_unlock(&lun->lun_lock); 552 553 total_len = sizeof(*data); 554 alloc_len = scsi_4btoul(cdb->length); 555 556 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 557 ctsio->kern_sg_entries = 0; 558 ctsio->kern_rel_offset = 0; 559 ctsio->kern_data_len = min(total_len, alloc_len); 560 ctsio->kern_total_len = ctsio->kern_data_len; 561 562 data = (struct scsi_receive_copy_status_lid1_data *)ctsio->kern_data_ptr; 563 scsi_ulto4b(sizeof(*data) - 4, data->available_data); 564 if (list_copy.completed) { 565 if (list_copy.error || list_copy.abort) 566 data->copy_command_status = RCS_CCS_ERROR; 567 else 568 data->copy_command_status = RCS_CCS_COMPLETED; 569 } else 570 data->copy_command_status = RCS_CCS_INPROG; 571 scsi_ulto2b(list_copy.curseg, data->segments_processed); 572 if (list_copy.curbytes <= UINT32_MAX) { 573 data->transfer_count_units = RCS_TC_BYTES; 574 scsi_ulto4b(list_copy.curbytes, data->transfer_count); 575 } else { 576 data->transfer_count_units = RCS_TC_MBYTES; 577 scsi_ulto4b(list_copy.curbytes >> 20, data->transfer_count); 578 } 579 580 ctl_set_success(ctsio); 581 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 582 ctsio->be_move_done = ctl_config_move_done; 583 ctl_datamove((union ctl_io *)ctsio); 584 return (retval); 585 } 586 587 int 588 ctl_receive_copy_failure_details(struct ctl_scsiio *ctsio) 589 { 590 struct ctl_lun *lun = CTL_LUN(ctsio); 591 struct scsi_receive_copy_failure_details *cdb; 592 struct scsi_receive_copy_failure_details_data *data; 593 struct tpc_list *list; 594 struct tpc_list list_copy; 595 int retval; 596 int alloc_len, total_len; 597 uint32_t list_id; 598 599 CTL_DEBUG_PRINT(("ctl_receive_copy_failure_details\n")); 600 601 cdb = (struct scsi_receive_copy_failure_details *)ctsio->cdb; 602 retval = CTL_RETVAL_COMPLETE; 603 604 list_id = cdb->list_identifier; 605 mtx_lock(&lun->lun_lock); 606 list = tpc_find_list(lun, list_id, 607 ctl_get_initindex(&ctsio->io_hdr.nexus)); 608 if (list == NULL || !list->completed) { 609 mtx_unlock(&lun->lun_lock); 610 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 611 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, 612 /*bit*/ 0); 613 ctl_done((union ctl_io *)ctsio); 614 return (retval); 615 } 616 list_copy = *list; 617 TAILQ_REMOVE(&lun->tpc_lists, list, links); 618 free(list, M_CTL); 619 mtx_unlock(&lun->lun_lock); 620 621 total_len = sizeof(*data) + list_copy.sense_len; 622 alloc_len = scsi_4btoul(cdb->length); 623 624 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 625 ctsio->kern_sg_entries = 0; 626 ctsio->kern_rel_offset = 0; 627 ctsio->kern_data_len = min(total_len, alloc_len); 628 ctsio->kern_total_len = ctsio->kern_data_len; 629 630 data = (struct scsi_receive_copy_failure_details_data *)ctsio->kern_data_ptr; 631 if (list_copy.completed && (list_copy.error || list_copy.abort)) { 632 scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len, 633 data->available_data); 634 data->copy_command_status = RCS_CCS_ERROR; 635 } else 636 scsi_ulto4b(0, data->available_data); 637 scsi_ulto2b(list_copy.sense_len, data->sense_data_length); 638 memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len); 639 640 ctl_set_success(ctsio); 641 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 642 ctsio->be_move_done = ctl_config_move_done; 643 ctl_datamove((union ctl_io *)ctsio); 644 return (retval); 645 } 646 647 int 648 ctl_receive_copy_status_lid4(struct ctl_scsiio *ctsio) 649 { 650 struct ctl_lun *lun = CTL_LUN(ctsio); 651 struct scsi_receive_copy_status_lid4 *cdb; 652 struct scsi_receive_copy_status_lid4_data *data; 653 struct tpc_list *list; 654 struct tpc_list list_copy; 655 int retval; 656 int alloc_len, total_len; 657 uint32_t list_id; 658 659 CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid4\n")); 660 661 cdb = (struct scsi_receive_copy_status_lid4 *)ctsio->cdb; 662 retval = CTL_RETVAL_COMPLETE; 663 664 list_id = scsi_4btoul(cdb->list_identifier); 665 mtx_lock(&lun->lun_lock); 666 list = tpc_find_list(lun, list_id, 667 ctl_get_initindex(&ctsio->io_hdr.nexus)); 668 if (list == NULL) { 669 mtx_unlock(&lun->lun_lock); 670 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 671 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, 672 /*bit*/ 0); 673 ctl_done((union ctl_io *)ctsio); 674 return (retval); 675 } 676 list_copy = *list; 677 if (list->completed) { 678 TAILQ_REMOVE(&lun->tpc_lists, list, links); 679 free(list, M_CTL); 680 } 681 mtx_unlock(&lun->lun_lock); 682 683 total_len = sizeof(*data) + list_copy.sense_len; 684 alloc_len = scsi_4btoul(cdb->length); 685 686 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 687 ctsio->kern_sg_entries = 0; 688 ctsio->kern_rel_offset = 0; 689 ctsio->kern_data_len = min(total_len, alloc_len); 690 ctsio->kern_total_len = ctsio->kern_data_len; 691 692 data = (struct scsi_receive_copy_status_lid4_data *)ctsio->kern_data_ptr; 693 scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len, 694 data->available_data); 695 data->response_to_service_action = list_copy.service_action; 696 if (list_copy.completed) { 697 if (list_copy.error) 698 data->copy_command_status = RCS_CCS_ERROR; 699 else if (list_copy.abort) 700 data->copy_command_status = RCS_CCS_ABORTED; 701 else 702 data->copy_command_status = RCS_CCS_COMPLETED; 703 } else 704 data->copy_command_status = RCS_CCS_INPROG_FG; 705 scsi_ulto2b(list_copy.curops, data->operation_counter); 706 scsi_ulto4b(UINT32_MAX, data->estimated_status_update_delay); 707 data->transfer_count_units = RCS_TC_BYTES; 708 scsi_u64to8b(list_copy.curbytes, data->transfer_count); 709 scsi_ulto2b(list_copy.curseg, data->segments_processed); 710 data->length_of_the_sense_data_field = list_copy.sense_len; 711 data->sense_data_length = list_copy.sense_len; 712 memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len); 713 714 ctl_set_success(ctsio); 715 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 716 ctsio->be_move_done = ctl_config_move_done; 717 ctl_datamove((union ctl_io *)ctsio); 718 return (retval); 719 } 720 721 int 722 ctl_copy_operation_abort(struct ctl_scsiio *ctsio) 723 { 724 struct ctl_lun *lun = CTL_LUN(ctsio); 725 struct scsi_copy_operation_abort *cdb; 726 struct tpc_list *list; 727 int retval; 728 uint32_t list_id; 729 730 CTL_DEBUG_PRINT(("ctl_copy_operation_abort\n")); 731 732 cdb = (struct scsi_copy_operation_abort *)ctsio->cdb; 733 retval = CTL_RETVAL_COMPLETE; 734 735 list_id = scsi_4btoul(cdb->list_identifier); 736 mtx_lock(&lun->lun_lock); 737 list = tpc_find_list(lun, list_id, 738 ctl_get_initindex(&ctsio->io_hdr.nexus)); 739 if (list == NULL) { 740 mtx_unlock(&lun->lun_lock); 741 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 742 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, 743 /*bit*/ 0); 744 ctl_done((union ctl_io *)ctsio); 745 return (retval); 746 } 747 list->abort = 1; 748 mtx_unlock(&lun->lun_lock); 749 750 ctl_set_success(ctsio); 751 ctl_done((union ctl_io *)ctsio); 752 return (retval); 753 } 754 755 static uint64_t 756 tpc_resolve(struct tpc_list *list, uint16_t idx, uint32_t *ss, 757 uint32_t *pb, uint32_t *pbo) 758 { 759 760 if (idx == 0xffff) { 761 if (ss) 762 *ss = list->lun->be_lun->blocksize; 763 if (pb) 764 *pb = list->lun->be_lun->blocksize << 765 list->lun->be_lun->pblockexp; 766 if (pbo) 767 *pbo = list->lun->be_lun->blocksize * 768 list->lun->be_lun->pblockoff; 769 return (list->lun->lun); 770 } 771 if (idx >= list->ncscd) 772 return (UINT64_MAX); 773 return (tpcl_resolve(list->lun->ctl_softc, 774 list->init_port, &list->cscd[idx], ss, pb, pbo)); 775 } 776 777 static void 778 tpc_set_io_error_sense(struct tpc_list *list) 779 { 780 int flen; 781 uint8_t csi[4]; 782 uint8_t sks[3]; 783 uint8_t fbuf[4 + 64]; 784 785 scsi_ulto4b(list->curseg, csi); 786 if (list->fwd_cscd <= 0x07ff) { 787 sks[0] = SSD_SKS_SEGMENT_VALID; 788 scsi_ulto2b((uint8_t *)&list->cscd[list->fwd_cscd] - 789 list->params, &sks[1]); 790 } else 791 sks[0] = 0; 792 if (list->fwd_scsi_status) { 793 fbuf[0] = 0x0c; 794 fbuf[2] = list->fwd_target; 795 flen = list->fwd_sense_len; 796 if (flen > 64) { 797 flen = 64; 798 fbuf[2] |= SSD_FORWARDED_FSDT; 799 } 800 fbuf[1] = 2 + flen; 801 fbuf[3] = list->fwd_scsi_status; 802 bcopy(&list->fwd_sense_data, &fbuf[4], flen); 803 flen += 4; 804 } else 805 flen = 0; 806 ctl_set_sense(list->ctsio, /*current_error*/ 1, 807 /*sense_key*/ SSD_KEY_COPY_ABORTED, 808 /*asc*/ 0x0d, /*ascq*/ 0x01, 809 SSD_ELEM_COMMAND, sizeof(csi), csi, 810 sks[0] ? SSD_ELEM_SKS : SSD_ELEM_SKIP, sizeof(sks), sks, 811 flen ? SSD_ELEM_DESC : SSD_ELEM_SKIP, flen, fbuf, 812 SSD_ELEM_NONE); 813 } 814 815 static int 816 tpc_process_b2b(struct tpc_list *list) 817 { 818 struct scsi_ec_segment_b2b *seg; 819 struct scsi_ec_cscd_dtsp *sdstp, *ddstp; 820 struct tpc_io *tior, *tiow; 821 struct runl run; 822 uint64_t sl, dl; 823 off_t srclba, dstlba, numbytes, donebytes, roundbytes; 824 int numlba; 825 uint32_t srcblock, dstblock, pb, pbo, adj; 826 uint16_t scscd, dcscd; 827 uint8_t csi[4]; 828 829 scsi_ulto4b(list->curseg, csi); 830 if (list->stage == 1) { 831 while ((tior = TAILQ_FIRST(&list->allio)) != NULL) { 832 TAILQ_REMOVE(&list->allio, tior, links); 833 ctl_free_io(tior->io); 834 free(tior->buf, M_CTL); 835 free(tior, M_CTL); 836 } 837 if (list->abort) { 838 ctl_set_task_aborted(list->ctsio); 839 return (CTL_RETVAL_ERROR); 840 } else if (list->error) { 841 tpc_set_io_error_sense(list); 842 return (CTL_RETVAL_ERROR); 843 } 844 list->cursectors += list->segsectors; 845 list->curbytes += list->segbytes; 846 return (CTL_RETVAL_COMPLETE); 847 } 848 849 TAILQ_INIT(&list->allio); 850 seg = (struct scsi_ec_segment_b2b *)list->seg[list->curseg]; 851 scscd = scsi_2btoul(seg->src_cscd); 852 dcscd = scsi_2btoul(seg->dst_cscd); 853 sl = tpc_resolve(list, scscd, &srcblock, NULL, NULL); 854 dl = tpc_resolve(list, dcscd, &dstblock, &pb, &pbo); 855 if (sl == UINT64_MAX || dl == UINT64_MAX) { 856 ctl_set_sense(list->ctsio, /*current_error*/ 1, 857 /*sense_key*/ SSD_KEY_COPY_ABORTED, 858 /*asc*/ 0x08, /*ascq*/ 0x04, 859 SSD_ELEM_COMMAND, sizeof(csi), csi, 860 SSD_ELEM_NONE); 861 return (CTL_RETVAL_ERROR); 862 } 863 if (pbo > 0) 864 pbo = pb - pbo; 865 sdstp = &list->cscd[scscd].dtsp; 866 if (scsi_3btoul(sdstp->block_length) != 0) 867 srcblock = scsi_3btoul(sdstp->block_length); 868 ddstp = &list->cscd[dcscd].dtsp; 869 if (scsi_3btoul(ddstp->block_length) != 0) 870 dstblock = scsi_3btoul(ddstp->block_length); 871 numlba = scsi_2btoul(seg->number_of_blocks); 872 if (seg->flags & EC_SEG_DC) 873 numbytes = (off_t)numlba * dstblock; 874 else 875 numbytes = (off_t)numlba * srcblock; 876 srclba = scsi_8btou64(seg->src_lba); 877 dstlba = scsi_8btou64(seg->dst_lba); 878 879 // printf("Copy %ju bytes from %ju @ %ju to %ju @ %ju\n", 880 // (uintmax_t)numbytes, sl, scsi_8btou64(seg->src_lba), 881 // dl, scsi_8btou64(seg->dst_lba)); 882 883 if (numbytes == 0) 884 return (CTL_RETVAL_COMPLETE); 885 886 if (numbytes % srcblock != 0 || numbytes % dstblock != 0) { 887 ctl_set_sense(list->ctsio, /*current_error*/ 1, 888 /*sense_key*/ SSD_KEY_COPY_ABORTED, 889 /*asc*/ 0x26, /*ascq*/ 0x0A, 890 SSD_ELEM_COMMAND, sizeof(csi), csi, 891 SSD_ELEM_NONE); 892 return (CTL_RETVAL_ERROR); 893 } 894 895 list->segbytes = numbytes; 896 list->segsectors = numbytes / dstblock; 897 donebytes = 0; 898 TAILQ_INIT(&run); 899 list->tbdio = 0; 900 while (donebytes < numbytes) { 901 roundbytes = numbytes - donebytes; 902 if (roundbytes > TPC_MAX_IO_SIZE) { 903 roundbytes = TPC_MAX_IO_SIZE; 904 roundbytes -= roundbytes % dstblock; 905 if (pb > dstblock) { 906 adj = (dstlba * dstblock + roundbytes - pbo) % pb; 907 if (roundbytes > adj) 908 roundbytes -= adj; 909 } 910 } 911 912 tior = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO); 913 TAILQ_INIT(&tior->run); 914 tior->buf = malloc(roundbytes, M_CTL, M_WAITOK); 915 tior->list = list; 916 TAILQ_INSERT_TAIL(&list->allio, tior, links); 917 tior->io = tpcl_alloc_io(); 918 ctl_scsi_read_write(tior->io, 919 /*data_ptr*/ tior->buf, 920 /*data_len*/ roundbytes, 921 /*read_op*/ 1, 922 /*byte2*/ 0, 923 /*minimum_cdb_size*/ 0, 924 /*lba*/ srclba, 925 /*num_blocks*/ roundbytes / srcblock, 926 /*tag_type*/ CTL_TAG_SIMPLE, 927 /*control*/ 0); 928 tior->io->io_hdr.retries = 3; 929 tior->target = SSD_FORWARDED_SDS_EXSRC; 930 tior->cscd = scscd; 931 tior->lun = sl; 932 tior->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tior; 933 934 tiow = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO); 935 TAILQ_INIT(&tiow->run); 936 tiow->list = list; 937 TAILQ_INSERT_TAIL(&list->allio, tiow, links); 938 tiow->io = tpcl_alloc_io(); 939 ctl_scsi_read_write(tiow->io, 940 /*data_ptr*/ tior->buf, 941 /*data_len*/ roundbytes, 942 /*read_op*/ 0, 943 /*byte2*/ 0, 944 /*minimum_cdb_size*/ 0, 945 /*lba*/ dstlba, 946 /*num_blocks*/ roundbytes / dstblock, 947 /*tag_type*/ CTL_TAG_SIMPLE, 948 /*control*/ 0); 949 tiow->io->io_hdr.retries = 3; 950 tiow->target = SSD_FORWARDED_SDS_EXDST; 951 tiow->cscd = dcscd; 952 tiow->lun = dl; 953 tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow; 954 955 TAILQ_INSERT_TAIL(&tior->run, tiow, rlinks); 956 TAILQ_INSERT_TAIL(&run, tior, rlinks); 957 list->tbdio++; 958 donebytes += roundbytes; 959 srclba += roundbytes / srcblock; 960 dstlba += roundbytes / dstblock; 961 } 962 963 while ((tior = TAILQ_FIRST(&run)) != NULL) { 964 TAILQ_REMOVE(&run, tior, rlinks); 965 if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE) 966 panic("tpcl_queue() error"); 967 } 968 969 list->stage++; 970 return (CTL_RETVAL_QUEUED); 971 } 972 973 static int 974 tpc_process_verify(struct tpc_list *list) 975 { 976 struct scsi_ec_segment_verify *seg; 977 struct tpc_io *tio; 978 uint64_t sl; 979 uint16_t cscd; 980 uint8_t csi[4]; 981 982 scsi_ulto4b(list->curseg, csi); 983 if (list->stage == 1) { 984 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) { 985 TAILQ_REMOVE(&list->allio, tio, links); 986 ctl_free_io(tio->io); 987 free(tio, M_CTL); 988 } 989 if (list->abort) { 990 ctl_set_task_aborted(list->ctsio); 991 return (CTL_RETVAL_ERROR); 992 } else if (list->error) { 993 tpc_set_io_error_sense(list); 994 return (CTL_RETVAL_ERROR); 995 } else 996 return (CTL_RETVAL_COMPLETE); 997 } 998 999 TAILQ_INIT(&list->allio); 1000 seg = (struct scsi_ec_segment_verify *)list->seg[list->curseg]; 1001 cscd = scsi_2btoul(seg->src_cscd); 1002 sl = tpc_resolve(list, cscd, NULL, NULL, NULL); 1003 if (sl == UINT64_MAX) { 1004 ctl_set_sense(list->ctsio, /*current_error*/ 1, 1005 /*sense_key*/ SSD_KEY_COPY_ABORTED, 1006 /*asc*/ 0x08, /*ascq*/ 0x04, 1007 SSD_ELEM_COMMAND, sizeof(csi), csi, 1008 SSD_ELEM_NONE); 1009 return (CTL_RETVAL_ERROR); 1010 } 1011 1012 // printf("Verify %ju\n", sl); 1013 1014 if ((seg->tur & 0x01) == 0) 1015 return (CTL_RETVAL_COMPLETE); 1016 1017 list->tbdio = 1; 1018 tio = malloc(sizeof(*tio), M_CTL, M_WAITOK | M_ZERO); 1019 TAILQ_INIT(&tio->run); 1020 tio->list = list; 1021 TAILQ_INSERT_TAIL(&list->allio, tio, links); 1022 tio->io = tpcl_alloc_io(); 1023 ctl_scsi_tur(tio->io, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0); 1024 tio->io->io_hdr.retries = 3; 1025 tio->target = SSD_FORWARDED_SDS_EXSRC; 1026 tio->cscd = cscd; 1027 tio->lun = sl; 1028 tio->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tio; 1029 list->stage++; 1030 if (tpcl_queue(tio->io, tio->lun) != CTL_RETVAL_COMPLETE) 1031 panic("tpcl_queue() error"); 1032 return (CTL_RETVAL_QUEUED); 1033 } 1034 1035 static int 1036 tpc_process_register_key(struct tpc_list *list) 1037 { 1038 struct scsi_ec_segment_register_key *seg; 1039 struct tpc_io *tio; 1040 uint64_t dl; 1041 int datalen; 1042 uint16_t cscd; 1043 uint8_t csi[4]; 1044 1045 scsi_ulto4b(list->curseg, csi); 1046 if (list->stage == 1) { 1047 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) { 1048 TAILQ_REMOVE(&list->allio, tio, links); 1049 ctl_free_io(tio->io); 1050 free(tio->buf, M_CTL); 1051 free(tio, M_CTL); 1052 } 1053 if (list->abort) { 1054 ctl_set_task_aborted(list->ctsio); 1055 return (CTL_RETVAL_ERROR); 1056 } else if (list->error) { 1057 tpc_set_io_error_sense(list); 1058 return (CTL_RETVAL_ERROR); 1059 } else 1060 return (CTL_RETVAL_COMPLETE); 1061 } 1062 1063 TAILQ_INIT(&list->allio); 1064 seg = (struct scsi_ec_segment_register_key *)list->seg[list->curseg]; 1065 cscd = scsi_2btoul(seg->dst_cscd); 1066 dl = tpc_resolve(list, cscd, NULL, NULL, NULL); 1067 if (dl == UINT64_MAX) { 1068 ctl_set_sense(list->ctsio, /*current_error*/ 1, 1069 /*sense_key*/ SSD_KEY_COPY_ABORTED, 1070 /*asc*/ 0x08, /*ascq*/ 0x04, 1071 SSD_ELEM_COMMAND, sizeof(csi), csi, 1072 SSD_ELEM_NONE); 1073 return (CTL_RETVAL_ERROR); 1074 } 1075 1076 // printf("Register Key %ju\n", dl); 1077 1078 list->tbdio = 1; 1079 tio = malloc(sizeof(*tio), M_CTL, M_WAITOK | M_ZERO); 1080 TAILQ_INIT(&tio->run); 1081 tio->list = list; 1082 TAILQ_INSERT_TAIL(&list->allio, tio, links); 1083 tio->io = tpcl_alloc_io(); 1084 datalen = sizeof(struct scsi_per_res_out_parms); 1085 tio->buf = malloc(datalen, M_CTL, M_WAITOK); 1086 ctl_scsi_persistent_res_out(tio->io, 1087 tio->buf, datalen, SPRO_REGISTER, -1, 1088 scsi_8btou64(seg->res_key), scsi_8btou64(seg->sa_res_key), 1089 /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0); 1090 tio->io->io_hdr.retries = 3; 1091 tio->target = SSD_FORWARDED_SDS_EXDST; 1092 tio->cscd = cscd; 1093 tio->lun = dl; 1094 tio->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tio; 1095 list->stage++; 1096 if (tpcl_queue(tio->io, tio->lun) != CTL_RETVAL_COMPLETE) 1097 panic("tpcl_queue() error"); 1098 return (CTL_RETVAL_QUEUED); 1099 } 1100 1101 static off_t 1102 tpc_ranges_length(struct scsi_range_desc *range, int nrange) 1103 { 1104 off_t length = 0; 1105 int r; 1106 1107 for (r = 0; r < nrange; r++) 1108 length += scsi_4btoul(range[r].length); 1109 return (length); 1110 } 1111 1112 static int 1113 tpc_check_ranges_l(struct scsi_range_desc *range, int nrange, uint64_t maxlba, 1114 uint64_t *lba) 1115 { 1116 uint64_t b1; 1117 uint32_t l1; 1118 int i; 1119 1120 for (i = 0; i < nrange; i++) { 1121 b1 = scsi_8btou64(range[i].lba); 1122 l1 = scsi_4btoul(range[i].length); 1123 if (b1 + l1 < b1 || b1 + l1 > maxlba + 1) { 1124 *lba = MAX(b1, maxlba + 1); 1125 return (-1); 1126 } 1127 } 1128 return (0); 1129 } 1130 1131 static int 1132 tpc_check_ranges_x(struct scsi_range_desc *range, int nrange) 1133 { 1134 uint64_t b1, b2; 1135 uint32_t l1, l2; 1136 int i, j; 1137 1138 for (i = 0; i < nrange - 1; i++) { 1139 b1 = scsi_8btou64(range[i].lba); 1140 l1 = scsi_4btoul(range[i].length); 1141 for (j = i + 1; j < nrange; j++) { 1142 b2 = scsi_8btou64(range[j].lba); 1143 l2 = scsi_4btoul(range[j].length); 1144 if (b1 + l1 > b2 && b2 + l2 > b1) 1145 return (-1); 1146 } 1147 } 1148 return (0); 1149 } 1150 1151 static int 1152 tpc_skip_ranges(struct scsi_range_desc *range, int nrange, off_t skip, 1153 int *srange, off_t *soffset) 1154 { 1155 off_t off; 1156 int r; 1157 1158 r = 0; 1159 off = 0; 1160 while (r < nrange) { 1161 if (skip - off < scsi_4btoul(range[r].length)) { 1162 *srange = r; 1163 *soffset = skip - off; 1164 return (0); 1165 } 1166 off += scsi_4btoul(range[r].length); 1167 r++; 1168 } 1169 return (-1); 1170 } 1171 1172 static int 1173 tpc_process_wut(struct tpc_list *list) 1174 { 1175 struct tpc_io *tio, *tior, *tiow; 1176 struct runl run; 1177 int drange, srange; 1178 off_t doffset, soffset; 1179 off_t srclba, dstlba, numbytes, donebytes, roundbytes; 1180 uint32_t srcblock, dstblock, pb, pbo, adj; 1181 1182 if (list->stage > 0) { 1183 /* Cleanup after previous rounds. */ 1184 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) { 1185 TAILQ_REMOVE(&list->allio, tio, links); 1186 ctl_free_io(tio->io); 1187 free(tio->buf, M_CTL); 1188 free(tio, M_CTL); 1189 } 1190 if (list->abort) { 1191 ctl_set_task_aborted(list->ctsio); 1192 return (CTL_RETVAL_ERROR); 1193 } else if (list->error) { 1194 if (list->fwd_scsi_status) { 1195 list->ctsio->io_hdr.status = 1196 CTL_SCSI_ERROR | CTL_AUTOSENSE; 1197 list->ctsio->scsi_status = list->fwd_scsi_status; 1198 list->ctsio->sense_data = list->fwd_sense_data; 1199 list->ctsio->sense_len = list->fwd_sense_len; 1200 } else { 1201 ctl_set_invalid_field(list->ctsio, 1202 /*sks_valid*/ 0, /*command*/ 0, 1203 /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0); 1204 } 1205 return (CTL_RETVAL_ERROR); 1206 } 1207 list->cursectors += list->segsectors; 1208 list->curbytes += list->segbytes; 1209 } 1210 1211 /* Check where we are on destination ranges list. */ 1212 if (tpc_skip_ranges(list->range, list->nrange, list->cursectors, 1213 &drange, &doffset) != 0) 1214 return (CTL_RETVAL_COMPLETE); 1215 dstblock = list->lun->be_lun->blocksize; 1216 pb = dstblock << list->lun->be_lun->pblockexp; 1217 if (list->lun->be_lun->pblockoff > 0) 1218 pbo = pb - dstblock * list->lun->be_lun->pblockoff; 1219 else 1220 pbo = 0; 1221 1222 /* Check where we are on source ranges list. */ 1223 srcblock = list->token->blocksize; 1224 if (tpc_skip_ranges(list->token->range, list->token->nrange, 1225 list->offset_into_rod + list->cursectors * dstblock / srcblock, 1226 &srange, &soffset) != 0) { 1227 ctl_set_invalid_field(list->ctsio, /*sks_valid*/ 0, 1228 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0); 1229 return (CTL_RETVAL_ERROR); 1230 } 1231 1232 srclba = scsi_8btou64(list->token->range[srange].lba) + soffset; 1233 dstlba = scsi_8btou64(list->range[drange].lba) + doffset; 1234 numbytes = srcblock * 1235 (scsi_4btoul(list->token->range[srange].length) - soffset); 1236 numbytes = omin(numbytes, dstblock * 1237 (scsi_4btoul(list->range[drange].length) - doffset)); 1238 if (numbytes > TPC_MAX_IOCHUNK_SIZE) { 1239 numbytes = TPC_MAX_IOCHUNK_SIZE; 1240 numbytes -= numbytes % dstblock; 1241 if (pb > dstblock) { 1242 adj = (dstlba * dstblock + numbytes - pbo) % pb; 1243 if (numbytes > adj) 1244 numbytes -= adj; 1245 } 1246 } 1247 1248 if (numbytes % srcblock != 0 || numbytes % dstblock != 0) { 1249 ctl_set_invalid_field(list->ctsio, /*sks_valid*/ 0, 1250 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0); 1251 return (CTL_RETVAL_ERROR); 1252 } 1253 1254 list->segbytes = numbytes; 1255 list->segsectors = numbytes / dstblock; 1256 //printf("Copy chunk of %ju sectors from %ju to %ju\n", list->segsectors, 1257 // srclba, dstlba); 1258 donebytes = 0; 1259 TAILQ_INIT(&run); 1260 list->tbdio = 0; 1261 TAILQ_INIT(&list->allio); 1262 while (donebytes < numbytes) { 1263 roundbytes = numbytes - donebytes; 1264 if (roundbytes > TPC_MAX_IO_SIZE) { 1265 roundbytes = TPC_MAX_IO_SIZE; 1266 roundbytes -= roundbytes % dstblock; 1267 if (pb > dstblock) { 1268 adj = (dstlba * dstblock + roundbytes - pbo) % pb; 1269 if (roundbytes > adj) 1270 roundbytes -= adj; 1271 } 1272 } 1273 1274 tior = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO); 1275 TAILQ_INIT(&tior->run); 1276 tior->buf = malloc(roundbytes, M_CTL, M_WAITOK); 1277 tior->list = list; 1278 TAILQ_INSERT_TAIL(&list->allio, tior, links); 1279 tior->io = tpcl_alloc_io(); 1280 ctl_scsi_read_write(tior->io, 1281 /*data_ptr*/ tior->buf, 1282 /*data_len*/ roundbytes, 1283 /*read_op*/ 1, 1284 /*byte2*/ 0, 1285 /*minimum_cdb_size*/ 0, 1286 /*lba*/ srclba, 1287 /*num_blocks*/ roundbytes / srcblock, 1288 /*tag_type*/ CTL_TAG_SIMPLE, 1289 /*control*/ 0); 1290 tior->io->io_hdr.retries = 3; 1291 tior->lun = list->token->lun; 1292 tior->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tior; 1293 1294 tiow = malloc(sizeof(*tiow), M_CTL, M_WAITOK | M_ZERO); 1295 TAILQ_INIT(&tiow->run); 1296 tiow->list = list; 1297 TAILQ_INSERT_TAIL(&list->allio, tiow, links); 1298 tiow->io = tpcl_alloc_io(); 1299 ctl_scsi_read_write(tiow->io, 1300 /*data_ptr*/ tior->buf, 1301 /*data_len*/ roundbytes, 1302 /*read_op*/ 0, 1303 /*byte2*/ 0, 1304 /*minimum_cdb_size*/ 0, 1305 /*lba*/ dstlba, 1306 /*num_blocks*/ roundbytes / dstblock, 1307 /*tag_type*/ CTL_TAG_SIMPLE, 1308 /*control*/ 0); 1309 tiow->io->io_hdr.retries = 3; 1310 tiow->lun = list->lun->lun; 1311 tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow; 1312 1313 TAILQ_INSERT_TAIL(&tior->run, tiow, rlinks); 1314 TAILQ_INSERT_TAIL(&run, tior, rlinks); 1315 list->tbdio++; 1316 donebytes += roundbytes; 1317 srclba += roundbytes / srcblock; 1318 dstlba += roundbytes / dstblock; 1319 } 1320 1321 while ((tior = TAILQ_FIRST(&run)) != NULL) { 1322 TAILQ_REMOVE(&run, tior, rlinks); 1323 if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE) 1324 panic("tpcl_queue() error"); 1325 } 1326 1327 list->stage++; 1328 return (CTL_RETVAL_QUEUED); 1329 } 1330 1331 static int 1332 tpc_process_zero_wut(struct tpc_list *list) 1333 { 1334 struct tpc_io *tio, *tiow; 1335 struct runl run, *prun; 1336 int r; 1337 uint32_t dstblock, len; 1338 1339 if (list->stage > 0) { 1340 complete: 1341 /* Cleanup after previous rounds. */ 1342 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) { 1343 TAILQ_REMOVE(&list->allio, tio, links); 1344 ctl_free_io(tio->io); 1345 free(tio, M_CTL); 1346 } 1347 if (list->abort) { 1348 ctl_set_task_aborted(list->ctsio); 1349 return (CTL_RETVAL_ERROR); 1350 } else if (list->error) { 1351 if (list->fwd_scsi_status) { 1352 list->ctsio->io_hdr.status = 1353 CTL_SCSI_ERROR | CTL_AUTOSENSE; 1354 list->ctsio->scsi_status = list->fwd_scsi_status; 1355 list->ctsio->sense_data = list->fwd_sense_data; 1356 list->ctsio->sense_len = list->fwd_sense_len; 1357 } else { 1358 ctl_set_invalid_field(list->ctsio, 1359 /*sks_valid*/ 0, /*command*/ 0, 1360 /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0); 1361 } 1362 return (CTL_RETVAL_ERROR); 1363 } 1364 list->cursectors += list->segsectors; 1365 list->curbytes += list->segbytes; 1366 return (CTL_RETVAL_COMPLETE); 1367 } 1368 1369 dstblock = list->lun->be_lun->blocksize; 1370 TAILQ_INIT(&run); 1371 prun = &run; 1372 list->tbdio = 1; 1373 TAILQ_INIT(&list->allio); 1374 list->segsectors = 0; 1375 for (r = 0; r < list->nrange; r++) { 1376 len = scsi_4btoul(list->range[r].length); 1377 if (len == 0) 1378 continue; 1379 1380 tiow = malloc(sizeof(*tiow), M_CTL, M_WAITOK | M_ZERO); 1381 TAILQ_INIT(&tiow->run); 1382 tiow->list = list; 1383 TAILQ_INSERT_TAIL(&list->allio, tiow, links); 1384 tiow->io = tpcl_alloc_io(); 1385 ctl_scsi_write_same(tiow->io, 1386 /*data_ptr*/ NULL, 1387 /*data_len*/ 0, 1388 /*byte2*/ SWS_NDOB, 1389 /*lba*/ scsi_8btou64(list->range[r].lba), 1390 /*num_blocks*/ len, 1391 /*tag_type*/ CTL_TAG_SIMPLE, 1392 /*control*/ 0); 1393 tiow->io->io_hdr.retries = 3; 1394 tiow->lun = list->lun->lun; 1395 tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow; 1396 1397 TAILQ_INSERT_TAIL(prun, tiow, rlinks); 1398 prun = &tiow->run; 1399 list->segsectors += len; 1400 } 1401 list->segbytes = list->segsectors * dstblock; 1402 1403 if (TAILQ_EMPTY(&run)) 1404 goto complete; 1405 1406 while ((tiow = TAILQ_FIRST(&run)) != NULL) { 1407 TAILQ_REMOVE(&run, tiow, rlinks); 1408 if (tpcl_queue(tiow->io, tiow->lun) != CTL_RETVAL_COMPLETE) 1409 panic("tpcl_queue() error"); 1410 } 1411 1412 list->stage++; 1413 return (CTL_RETVAL_QUEUED); 1414 } 1415 1416 static void 1417 tpc_process(struct tpc_list *list) 1418 { 1419 struct ctl_lun *lun = list->lun; 1420 struct ctl_softc *softc = lun->ctl_softc; 1421 struct scsi_ec_segment *seg; 1422 struct ctl_scsiio *ctsio = list->ctsio; 1423 int retval = CTL_RETVAL_COMPLETE; 1424 uint8_t csi[4]; 1425 1426 if (list->service_action == EC_WUT) { 1427 if (list->token != NULL) 1428 retval = tpc_process_wut(list); 1429 else 1430 retval = tpc_process_zero_wut(list); 1431 if (retval == CTL_RETVAL_QUEUED) 1432 return; 1433 if (retval == CTL_RETVAL_ERROR) { 1434 list->error = 1; 1435 goto done; 1436 } 1437 } else { 1438 //printf("ZZZ %d cscd, %d segs\n", list->ncscd, list->nseg); 1439 while (list->curseg < list->nseg) { 1440 seg = list->seg[list->curseg]; 1441 switch (seg->type_code) { 1442 case EC_SEG_B2B: 1443 retval = tpc_process_b2b(list); 1444 break; 1445 case EC_SEG_VERIFY: 1446 retval = tpc_process_verify(list); 1447 break; 1448 case EC_SEG_REGISTER_KEY: 1449 retval = tpc_process_register_key(list); 1450 break; 1451 default: 1452 scsi_ulto4b(list->curseg, csi); 1453 ctl_set_sense(ctsio, /*current_error*/ 1, 1454 /*sense_key*/ SSD_KEY_COPY_ABORTED, 1455 /*asc*/ 0x26, /*ascq*/ 0x09, 1456 SSD_ELEM_COMMAND, sizeof(csi), csi, 1457 SSD_ELEM_NONE); 1458 goto done; 1459 } 1460 if (retval == CTL_RETVAL_QUEUED) 1461 return; 1462 if (retval == CTL_RETVAL_ERROR) { 1463 list->error = 1; 1464 goto done; 1465 } 1466 list->curseg++; 1467 list->stage = 0; 1468 } 1469 } 1470 1471 ctl_set_success(ctsio); 1472 1473 done: 1474 //printf("ZZZ done\n"); 1475 free(list->params, M_CTL); 1476 list->params = NULL; 1477 if (list->token) { 1478 mtx_lock(&softc->tpc_lock); 1479 if (--list->token->active == 0) 1480 list->token->last_active = time_uptime; 1481 mtx_unlock(&softc->tpc_lock); 1482 list->token = NULL; 1483 } 1484 mtx_lock(&lun->lun_lock); 1485 if ((list->flags & EC_LIST_ID_USAGE_MASK) == EC_LIST_ID_USAGE_NONE) { 1486 TAILQ_REMOVE(&lun->tpc_lists, list, links); 1487 free(list, M_CTL); 1488 } else { 1489 list->completed = 1; 1490 list->last_active = time_uptime; 1491 list->sense_data = ctsio->sense_data; 1492 list->sense_len = ctsio->sense_len; 1493 list->scsi_status = ctsio->scsi_status; 1494 } 1495 mtx_unlock(&lun->lun_lock); 1496 1497 ctl_done((union ctl_io *)ctsio); 1498 } 1499 1500 /* 1501 * For any sort of check condition, busy, etc., we just retry. We do not 1502 * decrement the retry count for unit attention type errors. These are 1503 * normal, and we want to save the retry count for "real" errors. Otherwise, 1504 * we could end up with situations where a command will succeed in some 1505 * situations and fail in others, depending on whether a unit attention is 1506 * pending. Also, some of our error recovery actions, most notably the 1507 * LUN reset action, will cause a unit attention. 1508 * 1509 * We can add more detail here later if necessary. 1510 */ 1511 static tpc_error_action 1512 tpc_checkcond_parse(union ctl_io *io) 1513 { 1514 tpc_error_action error_action; 1515 int error_code, sense_key, asc, ascq; 1516 1517 /* 1518 * Default to retrying the command. 1519 */ 1520 error_action = TPC_ERR_RETRY; 1521 1522 scsi_extract_sense_len(&io->scsiio.sense_data, 1523 io->scsiio.sense_len, 1524 &error_code, 1525 &sense_key, 1526 &asc, 1527 &ascq, 1528 /*show_errors*/ 1); 1529 1530 switch (error_code) { 1531 case SSD_DEFERRED_ERROR: 1532 case SSD_DESC_DEFERRED_ERROR: 1533 error_action |= TPC_ERR_NO_DECREMENT; 1534 break; 1535 case SSD_CURRENT_ERROR: 1536 case SSD_DESC_CURRENT_ERROR: 1537 default: 1538 switch (sense_key) { 1539 case SSD_KEY_UNIT_ATTENTION: 1540 error_action |= TPC_ERR_NO_DECREMENT; 1541 break; 1542 case SSD_KEY_HARDWARE_ERROR: 1543 /* 1544 * This is our generic "something bad happened" 1545 * error code. It often isn't recoverable. 1546 */ 1547 if ((asc == 0x44) && (ascq == 0x00)) 1548 error_action = TPC_ERR_FAIL; 1549 break; 1550 case SSD_KEY_NOT_READY: 1551 /* 1552 * If the LUN is powered down, there likely isn't 1553 * much point in retrying right now. 1554 */ 1555 if ((asc == 0x04) && (ascq == 0x02)) 1556 error_action = TPC_ERR_FAIL; 1557 /* 1558 * If the LUN is offline, there probably isn't much 1559 * point in retrying, either. 1560 */ 1561 if ((asc == 0x04) && (ascq == 0x03)) 1562 error_action = TPC_ERR_FAIL; 1563 break; 1564 } 1565 } 1566 return (error_action); 1567 } 1568 1569 static tpc_error_action 1570 tpc_error_parse(union ctl_io *io) 1571 { 1572 tpc_error_action error_action = TPC_ERR_RETRY; 1573 1574 switch (io->io_hdr.io_type) { 1575 case CTL_IO_SCSI: 1576 switch (io->io_hdr.status & CTL_STATUS_MASK) { 1577 case CTL_SCSI_ERROR: 1578 switch (io->scsiio.scsi_status) { 1579 case SCSI_STATUS_CHECK_COND: 1580 error_action = tpc_checkcond_parse(io); 1581 break; 1582 default: 1583 break; 1584 } 1585 break; 1586 default: 1587 break; 1588 } 1589 break; 1590 case CTL_IO_TASK: 1591 break; 1592 default: 1593 panic("%s: invalid ctl_io type %d\n", __func__, 1594 io->io_hdr.io_type); 1595 break; 1596 } 1597 return (error_action); 1598 } 1599 1600 void 1601 tpc_done(union ctl_io *io) 1602 { 1603 struct tpc_io *tio, *tior; 1604 1605 /* 1606 * Very minimal retry logic. We basically retry if we got an error 1607 * back, and the retry count is greater than 0. If we ever want 1608 * more sophisticated initiator type behavior, the CAM error 1609 * recovery code in ../common might be helpful. 1610 */ 1611 tio = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; 1612 if (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) 1613 && (io->io_hdr.retries > 0)) { 1614 ctl_io_status old_status; 1615 tpc_error_action error_action; 1616 1617 error_action = tpc_error_parse(io); 1618 switch (error_action & TPC_ERR_MASK) { 1619 case TPC_ERR_FAIL: 1620 break; 1621 case TPC_ERR_RETRY: 1622 default: 1623 if ((error_action & TPC_ERR_NO_DECREMENT) == 0) 1624 io->io_hdr.retries--; 1625 old_status = io->io_hdr.status; 1626 io->io_hdr.status = CTL_STATUS_NONE; 1627 io->io_hdr.flags &= ~CTL_FLAG_ABORT; 1628 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 1629 if (tpcl_queue(io, tio->lun) != CTL_RETVAL_COMPLETE) { 1630 printf("%s: error returned from tpcl_queue()!\n", 1631 __func__); 1632 io->io_hdr.status = old_status; 1633 } else 1634 return; 1635 } 1636 } 1637 1638 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) { 1639 tio->list->error = 1; 1640 if (io->io_hdr.io_type == CTL_IO_SCSI && 1641 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SCSI_ERROR) { 1642 tio->list->fwd_scsi_status = io->scsiio.scsi_status; 1643 tio->list->fwd_sense_data = io->scsiio.sense_data; 1644 tio->list->fwd_sense_len = io->scsiio.sense_len; 1645 tio->list->fwd_target = tio->target; 1646 tio->list->fwd_cscd = tio->cscd; 1647 } 1648 } else 1649 atomic_add_int(&tio->list->curops, 1); 1650 if (!tio->list->error && !tio->list->abort) { 1651 while ((tior = TAILQ_FIRST(&tio->run)) != NULL) { 1652 TAILQ_REMOVE(&tio->run, tior, rlinks); 1653 atomic_add_int(&tio->list->tbdio, 1); 1654 if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE) 1655 panic("tpcl_queue() error"); 1656 } 1657 } 1658 if (atomic_fetchadd_int(&tio->list->tbdio, -1) == 1) 1659 tpc_process(tio->list); 1660 } 1661 1662 int 1663 ctl_extended_copy_lid1(struct ctl_scsiio *ctsio) 1664 { 1665 struct ctl_lun *lun = CTL_LUN(ctsio); 1666 struct scsi_extended_copy *cdb; 1667 struct scsi_extended_copy_lid1_data *data; 1668 struct scsi_ec_cscd *cscd; 1669 struct scsi_ec_segment *seg; 1670 struct tpc_list *list, *tlist; 1671 uint8_t *ptr; 1672 const char *value; 1673 int len, off, lencscd, lenseg, leninl, nseg; 1674 1675 CTL_DEBUG_PRINT(("ctl_extended_copy_lid1\n")); 1676 1677 cdb = (struct scsi_extended_copy *)ctsio->cdb; 1678 len = scsi_4btoul(cdb->length); 1679 1680 if (len == 0) { 1681 ctl_set_success(ctsio); 1682 goto done; 1683 } 1684 if (len < sizeof(struct scsi_extended_copy_lid1_data) || 1685 len > sizeof(struct scsi_extended_copy_lid1_data) + 1686 TPC_MAX_LIST + TPC_MAX_INLINE) { 1687 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, 1688 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0); 1689 goto done; 1690 } 1691 1692 /* 1693 * If we've got a kernel request that hasn't been malloced yet, 1694 * malloc it and tell the caller the data buffer is here. 1695 */ 1696 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 1697 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 1698 ctsio->kern_data_len = len; 1699 ctsio->kern_total_len = len; 1700 ctsio->kern_rel_offset = 0; 1701 ctsio->kern_sg_entries = 0; 1702 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 1703 ctsio->be_move_done = ctl_config_move_done; 1704 ctl_datamove((union ctl_io *)ctsio); 1705 1706 return (CTL_RETVAL_COMPLETE); 1707 } 1708 1709 data = (struct scsi_extended_copy_lid1_data *)ctsio->kern_data_ptr; 1710 lencscd = scsi_2btoul(data->cscd_list_length); 1711 lenseg = scsi_4btoul(data->segment_list_length); 1712 leninl = scsi_4btoul(data->inline_data_length); 1713 if (lencscd > TPC_MAX_CSCDS * sizeof(struct scsi_ec_cscd)) { 1714 ctl_set_sense(ctsio, /*current_error*/ 1, 1715 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1716 /*asc*/ 0x26, /*ascq*/ 0x06, SSD_ELEM_NONE); 1717 goto done; 1718 } 1719 if (lenseg > TPC_MAX_SEGS * sizeof(struct scsi_ec_segment)) { 1720 ctl_set_sense(ctsio, /*current_error*/ 1, 1721 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1722 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE); 1723 goto done; 1724 } 1725 if (lencscd + lenseg > TPC_MAX_LIST || 1726 leninl > TPC_MAX_INLINE || 1727 len < sizeof(struct scsi_extended_copy_lid1_data) + 1728 lencscd + lenseg + leninl) { 1729 ctl_set_param_len_error(ctsio); 1730 goto done; 1731 } 1732 1733 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO); 1734 list->service_action = cdb->service_action; 1735 value = dnvlist_get_string(lun->be_lun->options, "insecure_tpc", NULL); 1736 if (value != NULL && strcmp(value, "on") == 0) 1737 list->init_port = -1; 1738 else 1739 list->init_port = ctsio->io_hdr.nexus.targ_port; 1740 list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus); 1741 list->list_id = data->list_identifier; 1742 list->flags = data->flags; 1743 list->params = ctsio->kern_data_ptr; 1744 list->cscd = (struct scsi_ec_cscd *)&data->data[0]; 1745 ptr = &data->data[0]; 1746 for (off = 0; off < lencscd; off += sizeof(struct scsi_ec_cscd)) { 1747 cscd = (struct scsi_ec_cscd *)(ptr + off); 1748 if (cscd->type_code != EC_CSCD_ID) { 1749 free(list, M_CTL); 1750 ctl_set_sense(ctsio, /*current_error*/ 1, 1751 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1752 /*asc*/ 0x26, /*ascq*/ 0x07, SSD_ELEM_NONE); 1753 goto done; 1754 } 1755 } 1756 ptr = &data->data[lencscd]; 1757 for (nseg = 0, off = 0; off < lenseg; nseg++) { 1758 if (nseg >= TPC_MAX_SEGS) { 1759 free(list, M_CTL); 1760 ctl_set_sense(ctsio, /*current_error*/ 1, 1761 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1762 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE); 1763 goto done; 1764 } 1765 seg = (struct scsi_ec_segment *)(ptr + off); 1766 if (seg->type_code != EC_SEG_B2B && 1767 seg->type_code != EC_SEG_VERIFY && 1768 seg->type_code != EC_SEG_REGISTER_KEY) { 1769 free(list, M_CTL); 1770 ctl_set_sense(ctsio, /*current_error*/ 1, 1771 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1772 /*asc*/ 0x26, /*ascq*/ 0x09, SSD_ELEM_NONE); 1773 goto done; 1774 } 1775 list->seg[nseg] = seg; 1776 off += sizeof(struct scsi_ec_segment) + 1777 scsi_2btoul(seg->descr_length); 1778 } 1779 list->inl = &data->data[lencscd + lenseg]; 1780 list->ncscd = lencscd / sizeof(struct scsi_ec_cscd); 1781 list->nseg = nseg; 1782 list->leninl = leninl; 1783 list->ctsio = ctsio; 1784 list->lun = lun; 1785 mtx_lock(&lun->lun_lock); 1786 if ((list->flags & EC_LIST_ID_USAGE_MASK) != EC_LIST_ID_USAGE_NONE) { 1787 tlist = tpc_find_list(lun, list->list_id, list->init_idx); 1788 if (tlist != NULL && !tlist->completed) { 1789 mtx_unlock(&lun->lun_lock); 1790 free(list, M_CTL); 1791 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 1792 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, 1793 /*bit*/ 0); 1794 goto done; 1795 } 1796 if (tlist != NULL) { 1797 TAILQ_REMOVE(&lun->tpc_lists, tlist, links); 1798 free(tlist, M_CTL); 1799 } 1800 } 1801 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links); 1802 mtx_unlock(&lun->lun_lock); 1803 1804 tpc_process(list); 1805 return (CTL_RETVAL_COMPLETE); 1806 1807 done: 1808 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 1809 free(ctsio->kern_data_ptr, M_CTL); 1810 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 1811 } 1812 ctl_done((union ctl_io *)ctsio); 1813 return (CTL_RETVAL_COMPLETE); 1814 } 1815 1816 int 1817 ctl_extended_copy_lid4(struct ctl_scsiio *ctsio) 1818 { 1819 struct ctl_lun *lun = CTL_LUN(ctsio); 1820 struct scsi_extended_copy *cdb; 1821 struct scsi_extended_copy_lid4_data *data; 1822 struct scsi_ec_cscd *cscd; 1823 struct scsi_ec_segment *seg; 1824 struct tpc_list *list, *tlist; 1825 uint8_t *ptr; 1826 const char *value; 1827 int len, off, lencscd, lenseg, leninl, nseg; 1828 1829 CTL_DEBUG_PRINT(("ctl_extended_copy_lid4\n")); 1830 1831 cdb = (struct scsi_extended_copy *)ctsio->cdb; 1832 len = scsi_4btoul(cdb->length); 1833 1834 if (len == 0) { 1835 ctl_set_success(ctsio); 1836 goto done; 1837 } 1838 if (len < sizeof(struct scsi_extended_copy_lid4_data) || 1839 len > sizeof(struct scsi_extended_copy_lid4_data) + 1840 TPC_MAX_LIST + TPC_MAX_INLINE) { 1841 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, 1842 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0); 1843 goto done; 1844 } 1845 1846 /* 1847 * If we've got a kernel request that hasn't been malloced yet, 1848 * malloc it and tell the caller the data buffer is here. 1849 */ 1850 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 1851 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 1852 ctsio->kern_data_len = len; 1853 ctsio->kern_total_len = len; 1854 ctsio->kern_rel_offset = 0; 1855 ctsio->kern_sg_entries = 0; 1856 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 1857 ctsio->be_move_done = ctl_config_move_done; 1858 ctl_datamove((union ctl_io *)ctsio); 1859 1860 return (CTL_RETVAL_COMPLETE); 1861 } 1862 1863 data = (struct scsi_extended_copy_lid4_data *)ctsio->kern_data_ptr; 1864 lencscd = scsi_2btoul(data->cscd_list_length); 1865 lenseg = scsi_2btoul(data->segment_list_length); 1866 leninl = scsi_2btoul(data->inline_data_length); 1867 if (lencscd > TPC_MAX_CSCDS * sizeof(struct scsi_ec_cscd)) { 1868 ctl_set_sense(ctsio, /*current_error*/ 1, 1869 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1870 /*asc*/ 0x26, /*ascq*/ 0x06, SSD_ELEM_NONE); 1871 goto done; 1872 } 1873 if (lenseg > TPC_MAX_SEGS * sizeof(struct scsi_ec_segment)) { 1874 ctl_set_sense(ctsio, /*current_error*/ 1, 1875 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1876 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE); 1877 goto done; 1878 } 1879 if (lencscd + lenseg > TPC_MAX_LIST || 1880 leninl > TPC_MAX_INLINE || 1881 len < sizeof(struct scsi_extended_copy_lid1_data) + 1882 lencscd + lenseg + leninl) { 1883 ctl_set_param_len_error(ctsio); 1884 goto done; 1885 } 1886 1887 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO); 1888 list->service_action = cdb->service_action; 1889 value = dnvlist_get_string(lun->be_lun->options, "insecure_tpc", NULL); 1890 if (value != NULL && strcmp(value, "on") == 0) 1891 list->init_port = -1; 1892 else 1893 list->init_port = ctsio->io_hdr.nexus.targ_port; 1894 list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus); 1895 list->list_id = scsi_4btoul(data->list_identifier); 1896 list->flags = data->flags; 1897 list->params = ctsio->kern_data_ptr; 1898 list->cscd = (struct scsi_ec_cscd *)&data->data[0]; 1899 ptr = &data->data[0]; 1900 for (off = 0; off < lencscd; off += sizeof(struct scsi_ec_cscd)) { 1901 cscd = (struct scsi_ec_cscd *)(ptr + off); 1902 if (cscd->type_code != EC_CSCD_ID) { 1903 free(list, M_CTL); 1904 ctl_set_sense(ctsio, /*current_error*/ 1, 1905 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1906 /*asc*/ 0x26, /*ascq*/ 0x07, SSD_ELEM_NONE); 1907 goto done; 1908 } 1909 } 1910 ptr = &data->data[lencscd]; 1911 for (nseg = 0, off = 0; off < lenseg; nseg++) { 1912 if (nseg >= TPC_MAX_SEGS) { 1913 free(list, M_CTL); 1914 ctl_set_sense(ctsio, /*current_error*/ 1, 1915 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1916 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE); 1917 goto done; 1918 } 1919 seg = (struct scsi_ec_segment *)(ptr + off); 1920 if (seg->type_code != EC_SEG_B2B && 1921 seg->type_code != EC_SEG_VERIFY && 1922 seg->type_code != EC_SEG_REGISTER_KEY) { 1923 free(list, M_CTL); 1924 ctl_set_sense(ctsio, /*current_error*/ 1, 1925 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1926 /*asc*/ 0x26, /*ascq*/ 0x09, SSD_ELEM_NONE); 1927 goto done; 1928 } 1929 list->seg[nseg] = seg; 1930 off += sizeof(struct scsi_ec_segment) + 1931 scsi_2btoul(seg->descr_length); 1932 } 1933 list->inl = &data->data[lencscd + lenseg]; 1934 list->ncscd = lencscd / sizeof(struct scsi_ec_cscd); 1935 list->nseg = nseg; 1936 list->leninl = leninl; 1937 list->ctsio = ctsio; 1938 list->lun = lun; 1939 mtx_lock(&lun->lun_lock); 1940 if ((list->flags & EC_LIST_ID_USAGE_MASK) != EC_LIST_ID_USAGE_NONE) { 1941 tlist = tpc_find_list(lun, list->list_id, list->init_idx); 1942 if (tlist != NULL && !tlist->completed) { 1943 mtx_unlock(&lun->lun_lock); 1944 free(list, M_CTL); 1945 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 1946 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, 1947 /*bit*/ 0); 1948 goto done; 1949 } 1950 if (tlist != NULL) { 1951 TAILQ_REMOVE(&lun->tpc_lists, tlist, links); 1952 free(tlist, M_CTL); 1953 } 1954 } 1955 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links); 1956 mtx_unlock(&lun->lun_lock); 1957 1958 tpc_process(list); 1959 return (CTL_RETVAL_COMPLETE); 1960 1961 done: 1962 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 1963 free(ctsio->kern_data_ptr, M_CTL); 1964 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 1965 } 1966 ctl_done((union ctl_io *)ctsio); 1967 return (CTL_RETVAL_COMPLETE); 1968 } 1969 1970 static void 1971 tpc_create_token(struct ctl_lun *lun, struct ctl_port *port, off_t len, 1972 struct scsi_token *token) 1973 { 1974 static int id = 0; 1975 struct scsi_vpd_id_descriptor *idd = NULL; 1976 struct scsi_ec_cscd_id *cscd; 1977 struct scsi_read_capacity_data_long *dtsd; 1978 int targid_len; 1979 1980 scsi_ulto4b(ROD_TYPE_AUR, token->type); 1981 scsi_ulto2b(0x01f8, token->length); 1982 scsi_u64to8b(atomic_fetchadd_int(&id, 1), &token->body[0]); 1983 if (lun->lun_devid) 1984 idd = scsi_get_devid_desc((struct scsi_vpd_id_descriptor *) 1985 lun->lun_devid->data, lun->lun_devid->len, 1986 scsi_devid_is_lun_naa); 1987 if (idd == NULL && lun->lun_devid) 1988 idd = scsi_get_devid_desc((struct scsi_vpd_id_descriptor *) 1989 lun->lun_devid->data, lun->lun_devid->len, 1990 scsi_devid_is_lun_eui64); 1991 if (idd != NULL) { 1992 cscd = (struct scsi_ec_cscd_id *)&token->body[8]; 1993 cscd->type_code = EC_CSCD_ID; 1994 cscd->luidt_pdt = T_DIRECT; 1995 memcpy(&cscd->codeset, idd, 4 + idd->length); 1996 scsi_ulto3b(lun->be_lun->blocksize, cscd->dtsp.block_length); 1997 } 1998 scsi_u64to8b(0, &token->body[40]); /* XXX: Should be 128bit value. */ 1999 scsi_u64to8b(len, &token->body[48]); 2000 2001 /* ROD token device type specific data (RC16 without first field) */ 2002 dtsd = (struct scsi_read_capacity_data_long *)&token->body[88 - 8]; 2003 scsi_ulto4b(lun->be_lun->blocksize, dtsd->length); 2004 dtsd->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE; 2005 scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, dtsd->lalba_lbp); 2006 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) 2007 dtsd->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ; 2008 2009 if (port->target_devid) { 2010 targid_len = port->target_devid->len; 2011 memcpy(&token->body[120], port->target_devid->data, targid_len); 2012 } else 2013 targid_len = 32; 2014 arc4rand(&token->body[120 + targid_len], 384 - targid_len, 0); 2015 }; 2016 2017 int 2018 ctl_populate_token(struct ctl_scsiio *ctsio) 2019 { 2020 struct ctl_softc *softc = CTL_SOFTC(ctsio); 2021 struct ctl_port *port = CTL_PORT(ctsio); 2022 struct ctl_lun *lun = CTL_LUN(ctsio); 2023 struct scsi_populate_token *cdb; 2024 struct scsi_populate_token_data *data; 2025 struct tpc_list *list, *tlist; 2026 struct tpc_token *token; 2027 uint64_t lba; 2028 int len, lendata, lendesc; 2029 2030 CTL_DEBUG_PRINT(("ctl_populate_token\n")); 2031 2032 cdb = (struct scsi_populate_token *)ctsio->cdb; 2033 len = scsi_4btoul(cdb->length); 2034 2035 if (len < sizeof(struct scsi_populate_token_data) || 2036 len > sizeof(struct scsi_populate_token_data) + 2037 TPC_MAX_SEGS * sizeof(struct scsi_range_desc)) { 2038 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, 2039 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0); 2040 goto done; 2041 } 2042 2043 /* 2044 * If we've got a kernel request that hasn't been malloced yet, 2045 * malloc it and tell the caller the data buffer is here. 2046 */ 2047 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 2048 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 2049 ctsio->kern_data_len = len; 2050 ctsio->kern_total_len = len; 2051 ctsio->kern_rel_offset = 0; 2052 ctsio->kern_sg_entries = 0; 2053 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 2054 ctsio->be_move_done = ctl_config_move_done; 2055 ctl_datamove((union ctl_io *)ctsio); 2056 2057 return (CTL_RETVAL_COMPLETE); 2058 } 2059 2060 data = (struct scsi_populate_token_data *)ctsio->kern_data_ptr; 2061 lendata = scsi_2btoul(data->length); 2062 if (lendata < sizeof(struct scsi_populate_token_data) - 2 + 2063 sizeof(struct scsi_range_desc)) { 2064 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, 2065 /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0); 2066 goto done; 2067 } 2068 lendesc = scsi_2btoul(data->range_descriptor_length); 2069 if (lendesc < sizeof(struct scsi_range_desc) || 2070 len < sizeof(struct scsi_populate_token_data) + lendesc || 2071 lendata < sizeof(struct scsi_populate_token_data) - 2 + lendesc) { 2072 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, 2073 /*field*/ 14, /*bit_valid*/ 0, /*bit*/ 0); 2074 goto done; 2075 } 2076 /* 2077 printf("PT(list=%u) flags=%x to=%d rt=%x len=%x\n", 2078 scsi_4btoul(cdb->list_identifier), 2079 data->flags, scsi_4btoul(data->inactivity_timeout), 2080 scsi_4btoul(data->rod_type), 2081 scsi_2btoul(data->range_descriptor_length)); 2082 */ 2083 2084 /* Validate INACTIVITY TIMEOUT field */ 2085 if (scsi_4btoul(data->inactivity_timeout) > TPC_MAX_TOKEN_TIMEOUT) { 2086 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 2087 /*command*/ 0, /*field*/ 4, /*bit_valid*/ 0, 2088 /*bit*/ 0); 2089 goto done; 2090 } 2091 2092 /* Validate ROD TYPE field */ 2093 if ((data->flags & EC_PT_RTV) && 2094 scsi_4btoul(data->rod_type) != ROD_TYPE_AUR) { 2095 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, 2096 /*field*/ 8, /*bit_valid*/ 0, /*bit*/ 0); 2097 goto done; 2098 } 2099 2100 /* Validate list of ranges */ 2101 if (tpc_check_ranges_l(&data->desc[0], 2102 scsi_2btoul(data->range_descriptor_length) / 2103 sizeof(struct scsi_range_desc), 2104 lun->be_lun->maxlba, &lba) != 0) { 2105 ctl_set_lba_out_of_range(ctsio, lba); 2106 goto done; 2107 } 2108 if (tpc_check_ranges_x(&data->desc[0], 2109 scsi_2btoul(data->range_descriptor_length) / 2110 sizeof(struct scsi_range_desc)) != 0) { 2111 ctl_set_invalid_field(ctsio, /*sks_valid*/ 0, 2112 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, 2113 /*bit*/ 0); 2114 goto done; 2115 } 2116 2117 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO); 2118 list->service_action = cdb->service_action; 2119 list->init_port = ctsio->io_hdr.nexus.targ_port; 2120 list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus); 2121 list->list_id = scsi_4btoul(cdb->list_identifier); 2122 list->flags = data->flags; 2123 list->ctsio = ctsio; 2124 list->lun = lun; 2125 mtx_lock(&lun->lun_lock); 2126 tlist = tpc_find_list(lun, list->list_id, list->init_idx); 2127 if (tlist != NULL && !tlist->completed) { 2128 mtx_unlock(&lun->lun_lock); 2129 free(list, M_CTL); 2130 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 2131 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, 2132 /*bit*/ 0); 2133 goto done; 2134 } 2135 if (tlist != NULL) { 2136 TAILQ_REMOVE(&lun->tpc_lists, tlist, links); 2137 free(tlist, M_CTL); 2138 } 2139 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links); 2140 mtx_unlock(&lun->lun_lock); 2141 2142 token = malloc(sizeof(*token), M_CTL, M_WAITOK | M_ZERO); 2143 token->lun = lun->lun; 2144 token->blocksize = lun->be_lun->blocksize; 2145 token->params = ctsio->kern_data_ptr; 2146 token->range = &data->desc[0]; 2147 token->nrange = scsi_2btoul(data->range_descriptor_length) / 2148 sizeof(struct scsi_range_desc); 2149 list->cursectors = tpc_ranges_length(token->range, token->nrange); 2150 list->curbytes = (off_t)list->cursectors * lun->be_lun->blocksize; 2151 tpc_create_token(lun, port, list->curbytes, 2152 (struct scsi_token *)token->token); 2153 token->active = 0; 2154 token->last_active = time_uptime; 2155 token->timeout = scsi_4btoul(data->inactivity_timeout); 2156 if (token->timeout == 0) 2157 token->timeout = TPC_DFL_TOKEN_TIMEOUT; 2158 else if (token->timeout < TPC_MIN_TOKEN_TIMEOUT) 2159 token->timeout = TPC_MIN_TOKEN_TIMEOUT; 2160 memcpy(list->res_token, token->token, sizeof(list->res_token)); 2161 list->res_token_valid = 1; 2162 list->curseg = 0; 2163 list->completed = 1; 2164 list->last_active = time_uptime; 2165 mtx_lock(&softc->tpc_lock); 2166 TAILQ_INSERT_TAIL(&softc->tpc_tokens, token, links); 2167 mtx_unlock(&softc->tpc_lock); 2168 ctl_set_success(ctsio); 2169 ctl_done((union ctl_io *)ctsio); 2170 return (CTL_RETVAL_COMPLETE); 2171 2172 done: 2173 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 2174 free(ctsio->kern_data_ptr, M_CTL); 2175 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 2176 } 2177 ctl_done((union ctl_io *)ctsio); 2178 return (CTL_RETVAL_COMPLETE); 2179 } 2180 2181 int 2182 ctl_write_using_token(struct ctl_scsiio *ctsio) 2183 { 2184 struct ctl_softc *softc = CTL_SOFTC(ctsio); 2185 struct ctl_lun *lun = CTL_LUN(ctsio); 2186 struct scsi_write_using_token *cdb; 2187 struct scsi_write_using_token_data *data; 2188 struct tpc_list *list, *tlist; 2189 struct tpc_token *token; 2190 uint64_t lba; 2191 int len, lendata, lendesc; 2192 2193 CTL_DEBUG_PRINT(("ctl_write_using_token\n")); 2194 2195 cdb = (struct scsi_write_using_token *)ctsio->cdb; 2196 len = scsi_4btoul(cdb->length); 2197 2198 if (len < sizeof(struct scsi_write_using_token_data) || 2199 len > sizeof(struct scsi_write_using_token_data) + 2200 TPC_MAX_SEGS * sizeof(struct scsi_range_desc)) { 2201 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, 2202 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0); 2203 goto done; 2204 } 2205 2206 /* 2207 * If we've got a kernel request that hasn't been malloced yet, 2208 * malloc it and tell the caller the data buffer is here. 2209 */ 2210 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 2211 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 2212 ctsio->kern_data_len = len; 2213 ctsio->kern_total_len = len; 2214 ctsio->kern_rel_offset = 0; 2215 ctsio->kern_sg_entries = 0; 2216 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 2217 ctsio->be_move_done = ctl_config_move_done; 2218 ctl_datamove((union ctl_io *)ctsio); 2219 2220 return (CTL_RETVAL_COMPLETE); 2221 } 2222 2223 data = (struct scsi_write_using_token_data *)ctsio->kern_data_ptr; 2224 lendata = scsi_2btoul(data->length); 2225 if (lendata < sizeof(struct scsi_write_using_token_data) - 2 + 2226 sizeof(struct scsi_range_desc)) { 2227 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, 2228 /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0); 2229 goto done; 2230 } 2231 lendesc = scsi_2btoul(data->range_descriptor_length); 2232 if (lendesc < sizeof(struct scsi_range_desc) || 2233 len < sizeof(struct scsi_write_using_token_data) + lendesc || 2234 lendata < sizeof(struct scsi_write_using_token_data) - 2 + lendesc) { 2235 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, 2236 /*field*/ 534, /*bit_valid*/ 0, /*bit*/ 0); 2237 goto done; 2238 } 2239 /* 2240 printf("WUT(list=%u) flags=%x off=%ju len=%x\n", 2241 scsi_4btoul(cdb->list_identifier), 2242 data->flags, scsi_8btou64(data->offset_into_rod), 2243 scsi_2btoul(data->range_descriptor_length)); 2244 */ 2245 2246 /* Validate list of ranges */ 2247 if (tpc_check_ranges_l(&data->desc[0], 2248 scsi_2btoul(data->range_descriptor_length) / 2249 sizeof(struct scsi_range_desc), 2250 lun->be_lun->maxlba, &lba) != 0) { 2251 ctl_set_lba_out_of_range(ctsio, lba); 2252 goto done; 2253 } 2254 if (tpc_check_ranges_x(&data->desc[0], 2255 scsi_2btoul(data->range_descriptor_length) / 2256 sizeof(struct scsi_range_desc)) != 0) { 2257 ctl_set_invalid_field(ctsio, /*sks_valid*/ 0, 2258 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, 2259 /*bit*/ 0); 2260 goto done; 2261 } 2262 2263 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO); 2264 list->service_action = cdb->service_action; 2265 list->init_port = ctsio->io_hdr.nexus.targ_port; 2266 list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus); 2267 list->list_id = scsi_4btoul(cdb->list_identifier); 2268 list->flags = data->flags; 2269 list->params = ctsio->kern_data_ptr; 2270 list->range = &data->desc[0]; 2271 list->nrange = scsi_2btoul(data->range_descriptor_length) / 2272 sizeof(struct scsi_range_desc); 2273 list->offset_into_rod = scsi_8btou64(data->offset_into_rod); 2274 list->ctsio = ctsio; 2275 list->lun = lun; 2276 mtx_lock(&lun->lun_lock); 2277 tlist = tpc_find_list(lun, list->list_id, list->init_idx); 2278 if (tlist != NULL && !tlist->completed) { 2279 mtx_unlock(&lun->lun_lock); 2280 free(list, M_CTL); 2281 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 2282 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, 2283 /*bit*/ 0); 2284 goto done; 2285 } 2286 if (tlist != NULL) { 2287 TAILQ_REMOVE(&lun->tpc_lists, tlist, links); 2288 free(tlist, M_CTL); 2289 } 2290 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links); 2291 mtx_unlock(&lun->lun_lock); 2292 2293 /* Block device zero ROD token -> no token. */ 2294 if (scsi_4btoul(data->rod_token) == ROD_TYPE_BLOCK_ZERO) { 2295 tpc_process(list); 2296 return (CTL_RETVAL_COMPLETE); 2297 } 2298 2299 mtx_lock(&softc->tpc_lock); 2300 TAILQ_FOREACH(token, &softc->tpc_tokens, links) { 2301 if (memcmp(token->token, data->rod_token, 2302 sizeof(data->rod_token)) == 0) 2303 break; 2304 } 2305 if (token != NULL) { 2306 token->active++; 2307 list->token = token; 2308 if (data->flags & EC_WUT_DEL_TKN) 2309 token->timeout = 0; 2310 } 2311 mtx_unlock(&softc->tpc_lock); 2312 if (token == NULL) { 2313 mtx_lock(&lun->lun_lock); 2314 TAILQ_REMOVE(&lun->tpc_lists, list, links); 2315 mtx_unlock(&lun->lun_lock); 2316 free(list, M_CTL); 2317 ctl_set_sense(ctsio, /*current_error*/ 1, 2318 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 2319 /*asc*/ 0x23, /*ascq*/ 0x04, SSD_ELEM_NONE); 2320 goto done; 2321 } 2322 2323 tpc_process(list); 2324 return (CTL_RETVAL_COMPLETE); 2325 2326 done: 2327 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 2328 free(ctsio->kern_data_ptr, M_CTL); 2329 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 2330 } 2331 ctl_done((union ctl_io *)ctsio); 2332 return (CTL_RETVAL_COMPLETE); 2333 } 2334 2335 int 2336 ctl_receive_rod_token_information(struct ctl_scsiio *ctsio) 2337 { 2338 struct ctl_lun *lun = CTL_LUN(ctsio); 2339 struct scsi_receive_rod_token_information *cdb; 2340 struct scsi_receive_copy_status_lid4_data *data; 2341 struct tpc_list *list; 2342 struct tpc_list list_copy; 2343 uint8_t *ptr; 2344 int retval; 2345 int alloc_len, total_len, token_len; 2346 uint32_t list_id; 2347 2348 CTL_DEBUG_PRINT(("ctl_receive_rod_token_information\n")); 2349 2350 cdb = (struct scsi_receive_rod_token_information *)ctsio->cdb; 2351 retval = CTL_RETVAL_COMPLETE; 2352 2353 list_id = scsi_4btoul(cdb->list_identifier); 2354 mtx_lock(&lun->lun_lock); 2355 list = tpc_find_list(lun, list_id, 2356 ctl_get_initindex(&ctsio->io_hdr.nexus)); 2357 if (list == NULL) { 2358 mtx_unlock(&lun->lun_lock); 2359 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 2360 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, 2361 /*bit*/ 0); 2362 ctl_done((union ctl_io *)ctsio); 2363 return (retval); 2364 } 2365 list_copy = *list; 2366 if (list->completed) { 2367 TAILQ_REMOVE(&lun->tpc_lists, list, links); 2368 free(list, M_CTL); 2369 } 2370 mtx_unlock(&lun->lun_lock); 2371 2372 token_len = list_copy.res_token_valid ? 2 + sizeof(list_copy.res_token) : 0; 2373 total_len = sizeof(*data) + list_copy.sense_len + 4 + token_len; 2374 alloc_len = scsi_4btoul(cdb->length); 2375 2376 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 2377 ctsio->kern_sg_entries = 0; 2378 ctsio->kern_rel_offset = 0; 2379 ctsio->kern_data_len = min(total_len, alloc_len); 2380 ctsio->kern_total_len = ctsio->kern_data_len; 2381 2382 data = (struct scsi_receive_copy_status_lid4_data *)ctsio->kern_data_ptr; 2383 scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len + 2384 4 + token_len, data->available_data); 2385 data->response_to_service_action = list_copy.service_action; 2386 if (list_copy.completed) { 2387 if (list_copy.error) 2388 data->copy_command_status = RCS_CCS_ERROR; 2389 else if (list_copy.abort) 2390 data->copy_command_status = RCS_CCS_ABORTED; 2391 else 2392 data->copy_command_status = RCS_CCS_COMPLETED; 2393 } else 2394 data->copy_command_status = RCS_CCS_INPROG_FG; 2395 scsi_ulto2b(list_copy.curops, data->operation_counter); 2396 scsi_ulto4b(UINT32_MAX, data->estimated_status_update_delay); 2397 data->transfer_count_units = RCS_TC_LBAS; 2398 scsi_u64to8b(list_copy.cursectors, data->transfer_count); 2399 scsi_ulto2b(list_copy.curseg, data->segments_processed); 2400 data->length_of_the_sense_data_field = list_copy.sense_len; 2401 data->sense_data_length = list_copy.sense_len; 2402 memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len); 2403 2404 ptr = &data->sense_data[data->length_of_the_sense_data_field]; 2405 scsi_ulto4b(token_len, &ptr[0]); 2406 if (list_copy.res_token_valid) { 2407 scsi_ulto2b(0, &ptr[4]); 2408 memcpy(&ptr[6], list_copy.res_token, sizeof(list_copy.res_token)); 2409 } 2410 /* 2411 printf("RRTI(list=%u) valid=%d\n", 2412 scsi_4btoul(cdb->list_identifier), list_copy.res_token_valid); 2413 */ 2414 ctl_set_success(ctsio); 2415 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 2416 ctsio->be_move_done = ctl_config_move_done; 2417 ctl_datamove((union ctl_io *)ctsio); 2418 return (retval); 2419 } 2420 2421 int 2422 ctl_report_all_rod_tokens(struct ctl_scsiio *ctsio) 2423 { 2424 struct ctl_softc *softc = CTL_SOFTC(ctsio); 2425 struct scsi_report_all_rod_tokens *cdb; 2426 struct scsi_report_all_rod_tokens_data *data; 2427 struct tpc_token *token; 2428 int retval; 2429 int alloc_len, total_len, tokens, i; 2430 2431 CTL_DEBUG_PRINT(("ctl_receive_rod_token_information\n")); 2432 2433 cdb = (struct scsi_report_all_rod_tokens *)ctsio->cdb; 2434 retval = CTL_RETVAL_COMPLETE; 2435 2436 tokens = 0; 2437 mtx_lock(&softc->tpc_lock); 2438 TAILQ_FOREACH(token, &softc->tpc_tokens, links) 2439 tokens++; 2440 mtx_unlock(&softc->tpc_lock); 2441 if (tokens > 512) 2442 tokens = 512; 2443 2444 total_len = sizeof(*data) + tokens * 96; 2445 alloc_len = scsi_4btoul(cdb->length); 2446 2447 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 2448 ctsio->kern_sg_entries = 0; 2449 ctsio->kern_rel_offset = 0; 2450 ctsio->kern_data_len = min(total_len, alloc_len); 2451 ctsio->kern_total_len = ctsio->kern_data_len; 2452 2453 data = (struct scsi_report_all_rod_tokens_data *)ctsio->kern_data_ptr; 2454 i = 0; 2455 mtx_lock(&softc->tpc_lock); 2456 TAILQ_FOREACH(token, &softc->tpc_tokens, links) { 2457 if (i >= tokens) 2458 break; 2459 memcpy(&data->rod_management_token_list[i * 96], 2460 token->token, 96); 2461 i++; 2462 } 2463 mtx_unlock(&softc->tpc_lock); 2464 scsi_ulto4b(sizeof(*data) - 4 + i * 96, data->available_data); 2465 /* 2466 printf("RART tokens=%d\n", i); 2467 */ 2468 ctl_set_success(ctsio); 2469 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 2470 ctsio->be_move_done = ctl_config_move_done; 2471 ctl_datamove((union ctl_io *)ctsio); 2472 return (retval); 2473 } 2474