1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2014-2021 Alexander Motin <mav@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer, 12 * without modification, immediately at the beginning of the file. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/kernel.h> 35 #include <sys/types.h> 36 #include <sys/lock.h> 37 #include <sys/module.h> 38 #include <sys/mutex.h> 39 #include <sys/condvar.h> 40 #include <sys/malloc.h> 41 #include <sys/conf.h> 42 #include <sys/queue.h> 43 #include <sys/sysctl.h> 44 #include <sys/nv.h> 45 #include <sys/dnv.h> 46 #include <machine/atomic.h> 47 48 #include <cam/cam.h> 49 #include <cam/scsi/scsi_all.h> 50 #include <cam/scsi/scsi_da.h> 51 #include <cam/ctl/ctl_io.h> 52 #include <cam/ctl/ctl.h> 53 #include <cam/ctl/ctl_frontend.h> 54 #include <cam/ctl/ctl_util.h> 55 #include <cam/ctl/ctl_backend.h> 56 #include <cam/ctl/ctl_ioctl.h> 57 #include <cam/ctl/ctl_ha.h> 58 #include <cam/ctl/ctl_private.h> 59 #include <cam/ctl/ctl_debug.h> 60 #include <cam/ctl/ctl_scsi_all.h> 61 #include <cam/ctl/ctl_tpc.h> 62 #include <cam/ctl/ctl_error.h> 63 64 #define TPC_MAX_CSCDS 64 65 #define TPC_MAX_SEGS 64 66 #define TPC_MAX_SEG 0 67 #define TPC_MAX_LIST 8192 68 #define TPC_MAX_INLINE 0 69 #define TPC_MAX_LISTS 255 70 #define TPC_MAX_IO_SIZE (8 * MIN(1024 * 1024, MAX(128 * 1024, maxphys))) 71 #define TPC_MAX_IOCHUNK_SIZE (TPC_MAX_IO_SIZE * 4) 72 #define TPC_MIN_TOKEN_TIMEOUT 1 73 #define TPC_DFL_TOKEN_TIMEOUT 60 74 #define TPC_MAX_TOKEN_TIMEOUT 600 75 76 MALLOC_DEFINE(M_CTL_TPC, "ctltpc", "CTL TPC"); 77 78 typedef enum { 79 TPC_ERR_RETRY = 0x000, 80 TPC_ERR_FAIL = 0x001, 81 TPC_ERR_MASK = 0x0ff, 82 TPC_ERR_NO_DECREMENT = 0x100 83 } tpc_error_action; 84 85 struct tpc_list; 86 TAILQ_HEAD(runl, tpc_io); 87 struct tpc_io { 88 union ctl_io *io; 89 uint8_t target; 90 uint32_t cscd; 91 uint64_t lun; 92 uint8_t *buf; 93 struct tpc_list *list; 94 struct runl run; 95 TAILQ_ENTRY(tpc_io) rlinks; 96 TAILQ_ENTRY(tpc_io) links; 97 }; 98 99 struct tpc_token { 100 uint8_t token[512]; 101 uint64_t lun; 102 uint32_t blocksize; 103 uint8_t *params; 104 struct scsi_range_desc *range; 105 int nrange; 106 int active; 107 time_t last_active; 108 uint32_t timeout; 109 TAILQ_ENTRY(tpc_token) links; 110 }; 111 112 struct tpc_list { 113 uint8_t service_action; 114 int init_port; 115 uint32_t init_idx; 116 uint32_t list_id; 117 uint8_t flags; 118 uint8_t *params; 119 struct scsi_ec_cscd *cscd; 120 struct scsi_ec_segment *seg[TPC_MAX_SEGS]; 121 uint8_t *inl; 122 int ncscd; 123 int nseg; 124 int leninl; 125 struct tpc_token *token; 126 struct scsi_range_desc *range; 127 int nrange; 128 off_t offset_into_rod; 129 130 int curseg; 131 off_t cursectors; 132 off_t curbytes; 133 int curops; 134 int stage; 135 off_t segsectors; 136 off_t segbytes; 137 int tbdio; 138 int error; 139 int abort; 140 int completed; 141 time_t last_active; 142 TAILQ_HEAD(, tpc_io) allio; 143 struct scsi_sense_data fwd_sense_data; 144 uint8_t fwd_sense_len; 145 uint8_t fwd_scsi_status; 146 uint8_t fwd_target; 147 uint16_t fwd_cscd; 148 struct scsi_sense_data sense_data; 149 uint8_t sense_len; 150 uint8_t scsi_status; 151 struct ctl_scsiio *ctsio; 152 struct ctl_lun *lun; 153 int res_token_valid; 154 uint8_t res_token[512]; 155 TAILQ_ENTRY(tpc_list) links; 156 }; 157 158 static void 159 tpc_timeout(void *arg) 160 { 161 struct ctl_softc *softc = arg; 162 struct ctl_lun *lun; 163 struct tpc_token *token, *ttoken; 164 struct tpc_list *list, *tlist; 165 166 /* Free completed lists with expired timeout. */ 167 STAILQ_FOREACH(lun, &softc->lun_list, links) { 168 mtx_lock(&lun->lun_lock); 169 TAILQ_FOREACH_SAFE(list, &lun->tpc_lists, links, tlist) { 170 if (!list->completed || time_uptime < list->last_active + 171 TPC_DFL_TOKEN_TIMEOUT) 172 continue; 173 TAILQ_REMOVE(&lun->tpc_lists, list, links); 174 free(list, M_CTL); 175 } 176 mtx_unlock(&lun->lun_lock); 177 } 178 179 /* Free inactive ROD tokens with expired timeout. */ 180 mtx_lock(&softc->tpc_lock); 181 TAILQ_FOREACH_SAFE(token, &softc->tpc_tokens, links, ttoken) { 182 if (token->active || 183 time_uptime < token->last_active + token->timeout + 1) 184 continue; 185 TAILQ_REMOVE(&softc->tpc_tokens, token, links); 186 free(token->params, M_CTL); 187 free(token, M_CTL); 188 } 189 mtx_unlock(&softc->tpc_lock); 190 callout_schedule_sbt(&softc->tpc_timeout, SBT_1S, SBT_1S, 0); 191 } 192 193 void 194 ctl_tpc_init(struct ctl_softc *softc) 195 { 196 197 mtx_init(&softc->tpc_lock, "CTL TPC mutex", NULL, MTX_DEF); 198 TAILQ_INIT(&softc->tpc_tokens); 199 callout_init_mtx(&softc->tpc_timeout, &softc->ctl_lock, 0); 200 callout_reset_sbt(&softc->tpc_timeout, SBT_1S, SBT_1S, 201 tpc_timeout, softc, 0); 202 } 203 204 void 205 ctl_tpc_shutdown(struct ctl_softc *softc) 206 { 207 struct tpc_token *token; 208 209 callout_drain(&softc->tpc_timeout); 210 211 /* Free ROD tokens. */ 212 mtx_lock(&softc->tpc_lock); 213 while ((token = TAILQ_FIRST(&softc->tpc_tokens)) != NULL) { 214 TAILQ_REMOVE(&softc->tpc_tokens, token, links); 215 free(token->params, M_CTL); 216 free(token, M_CTL); 217 } 218 mtx_unlock(&softc->tpc_lock); 219 mtx_destroy(&softc->tpc_lock); 220 } 221 222 void 223 ctl_tpc_lun_init(struct ctl_lun *lun) 224 { 225 226 TAILQ_INIT(&lun->tpc_lists); 227 } 228 229 void 230 ctl_tpc_lun_clear(struct ctl_lun *lun, uint32_t initidx) 231 { 232 struct tpc_list *list, *tlist; 233 234 TAILQ_FOREACH_SAFE(list, &lun->tpc_lists, links, tlist) { 235 if (initidx != -1 && list->init_idx != initidx) 236 continue; 237 if (!list->completed) 238 continue; 239 TAILQ_REMOVE(&lun->tpc_lists, list, links); 240 free(list, M_CTL); 241 } 242 } 243 244 void 245 ctl_tpc_lun_shutdown(struct ctl_lun *lun) 246 { 247 struct ctl_softc *softc = lun->ctl_softc; 248 struct tpc_list *list; 249 struct tpc_token *token, *ttoken; 250 251 /* Free lists for this LUN. */ 252 while ((list = TAILQ_FIRST(&lun->tpc_lists)) != NULL) { 253 TAILQ_REMOVE(&lun->tpc_lists, list, links); 254 KASSERT(list->completed, 255 ("Not completed TPC (%p) on shutdown", list)); 256 free(list, M_CTL); 257 } 258 259 /* Free ROD tokens for this LUN. */ 260 mtx_lock(&softc->tpc_lock); 261 TAILQ_FOREACH_SAFE(token, &softc->tpc_tokens, links, ttoken) { 262 if (token->lun != lun->lun || token->active) 263 continue; 264 TAILQ_REMOVE(&softc->tpc_tokens, token, links); 265 free(token->params, M_CTL); 266 free(token, M_CTL); 267 } 268 mtx_unlock(&softc->tpc_lock); 269 } 270 271 int 272 ctl_inquiry_evpd_tpc(struct ctl_scsiio *ctsio, int alloc_len) 273 { 274 struct ctl_lun *lun = CTL_LUN(ctsio); 275 struct scsi_vpd_tpc *tpc_ptr; 276 struct scsi_vpd_tpc_descriptor *d_ptr; 277 struct scsi_vpd_tpc_descriptor_bdrl *bdrl_ptr; 278 struct scsi_vpd_tpc_descriptor_sc *sc_ptr; 279 struct scsi_vpd_tpc_descriptor_sc_descr *scd_ptr; 280 struct scsi_vpd_tpc_descriptor_pd *pd_ptr; 281 struct scsi_vpd_tpc_descriptor_sd *sd_ptr; 282 struct scsi_vpd_tpc_descriptor_sdid *sdid_ptr; 283 struct scsi_vpd_tpc_descriptor_rtf *rtf_ptr; 284 struct scsi_vpd_tpc_descriptor_rtf_block *rtfb_ptr; 285 struct scsi_vpd_tpc_descriptor_srt *srt_ptr; 286 struct scsi_vpd_tpc_descriptor_srtd *srtd_ptr; 287 struct scsi_vpd_tpc_descriptor_gco *gco_ptr; 288 int data_len; 289 290 data_len = sizeof(struct scsi_vpd_tpc) + 291 sizeof(struct scsi_vpd_tpc_descriptor_bdrl) + 292 roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sc) + 293 2 * sizeof(struct scsi_vpd_tpc_descriptor_sc_descr) + 11, 4) + 294 sizeof(struct scsi_vpd_tpc_descriptor_pd) + 295 roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sd) + 4, 4) + 296 roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sdid) + 2, 4) + 297 sizeof(struct scsi_vpd_tpc_descriptor_rtf) + 298 sizeof(struct scsi_vpd_tpc_descriptor_rtf_block) + 299 sizeof(struct scsi_vpd_tpc_descriptor_srt) + 300 2*sizeof(struct scsi_vpd_tpc_descriptor_srtd) + 301 sizeof(struct scsi_vpd_tpc_descriptor_gco); 302 303 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 304 tpc_ptr = (struct scsi_vpd_tpc *)ctsio->kern_data_ptr; 305 ctsio->kern_rel_offset = 0; 306 ctsio->kern_sg_entries = 0; 307 ctsio->kern_data_len = min(data_len, alloc_len); 308 ctsio->kern_total_len = ctsio->kern_data_len; 309 310 /* 311 * The control device is always connected. The disk device, on the 312 * other hand, may not be online all the time. 313 */ 314 if (lun != NULL) 315 tpc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 316 lun->be_lun->lun_type; 317 else 318 tpc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 319 tpc_ptr->page_code = SVPD_SCSI_TPC; 320 scsi_ulto2b(data_len - 4, tpc_ptr->page_length); 321 322 /* Block Device ROD Limits */ 323 d_ptr = (struct scsi_vpd_tpc_descriptor *)&tpc_ptr->descr[0]; 324 bdrl_ptr = (struct scsi_vpd_tpc_descriptor_bdrl *)d_ptr; 325 scsi_ulto2b(SVPD_TPC_BDRL, bdrl_ptr->desc_type); 326 scsi_ulto2b(sizeof(*bdrl_ptr) - 4, bdrl_ptr->desc_length); 327 scsi_ulto2b(TPC_MAX_SEGS, bdrl_ptr->maximum_ranges); 328 scsi_ulto4b(TPC_MAX_TOKEN_TIMEOUT, 329 bdrl_ptr->maximum_inactivity_timeout); 330 scsi_ulto4b(TPC_DFL_TOKEN_TIMEOUT, 331 bdrl_ptr->default_inactivity_timeout); 332 scsi_u64to8b(0, bdrl_ptr->maximum_token_transfer_size); 333 scsi_u64to8b(0, bdrl_ptr->optimal_transfer_count); 334 335 /* Supported commands */ 336 d_ptr = (struct scsi_vpd_tpc_descriptor *) 337 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length)); 338 sc_ptr = (struct scsi_vpd_tpc_descriptor_sc *)d_ptr; 339 scsi_ulto2b(SVPD_TPC_SC, sc_ptr->desc_type); 340 sc_ptr->list_length = 2 * sizeof(*scd_ptr) + 11; 341 scsi_ulto2b(roundup2(1 + sc_ptr->list_length, 4), sc_ptr->desc_length); 342 scd_ptr = &sc_ptr->descr[0]; 343 scd_ptr->opcode = EXTENDED_COPY; 344 scd_ptr->sa_length = 5; 345 scd_ptr->supported_service_actions[0] = EC_EC_LID1; 346 scd_ptr->supported_service_actions[1] = EC_EC_LID4; 347 scd_ptr->supported_service_actions[2] = EC_PT; 348 scd_ptr->supported_service_actions[3] = EC_WUT; 349 scd_ptr->supported_service_actions[4] = EC_COA; 350 scd_ptr = (struct scsi_vpd_tpc_descriptor_sc_descr *) 351 &scd_ptr->supported_service_actions[scd_ptr->sa_length]; 352 scd_ptr->opcode = RECEIVE_COPY_STATUS; 353 scd_ptr->sa_length = 6; 354 scd_ptr->supported_service_actions[0] = RCS_RCS_LID1; 355 scd_ptr->supported_service_actions[1] = RCS_RCFD; 356 scd_ptr->supported_service_actions[2] = RCS_RCS_LID4; 357 scd_ptr->supported_service_actions[3] = RCS_RCOP; 358 scd_ptr->supported_service_actions[4] = RCS_RRTI; 359 scd_ptr->supported_service_actions[5] = RCS_RART; 360 361 /* Parameter data. */ 362 d_ptr = (struct scsi_vpd_tpc_descriptor *) 363 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length)); 364 pd_ptr = (struct scsi_vpd_tpc_descriptor_pd *)d_ptr; 365 scsi_ulto2b(SVPD_TPC_PD, pd_ptr->desc_type); 366 scsi_ulto2b(sizeof(*pd_ptr) - 4, pd_ptr->desc_length); 367 scsi_ulto2b(TPC_MAX_CSCDS, pd_ptr->maximum_cscd_descriptor_count); 368 scsi_ulto2b(TPC_MAX_SEGS, pd_ptr->maximum_segment_descriptor_count); 369 scsi_ulto4b(TPC_MAX_LIST, pd_ptr->maximum_descriptor_list_length); 370 scsi_ulto4b(TPC_MAX_INLINE, pd_ptr->maximum_inline_data_length); 371 372 /* Supported Descriptors */ 373 d_ptr = (struct scsi_vpd_tpc_descriptor *) 374 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length)); 375 sd_ptr = (struct scsi_vpd_tpc_descriptor_sd *)d_ptr; 376 scsi_ulto2b(SVPD_TPC_SD, sd_ptr->desc_type); 377 scsi_ulto2b(roundup2(sizeof(*sd_ptr) - 4 + 4, 4), sd_ptr->desc_length); 378 sd_ptr->list_length = 4; 379 sd_ptr->supported_descriptor_codes[0] = EC_SEG_B2B; 380 sd_ptr->supported_descriptor_codes[1] = EC_SEG_VERIFY; 381 sd_ptr->supported_descriptor_codes[2] = EC_SEG_REGISTER_KEY; 382 sd_ptr->supported_descriptor_codes[3] = EC_CSCD_ID; 383 384 /* Supported CSCD Descriptor IDs */ 385 d_ptr = (struct scsi_vpd_tpc_descriptor *) 386 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length)); 387 sdid_ptr = (struct scsi_vpd_tpc_descriptor_sdid *)d_ptr; 388 scsi_ulto2b(SVPD_TPC_SDID, sdid_ptr->desc_type); 389 scsi_ulto2b(roundup2(sizeof(*sdid_ptr) - 4 + 2, 4), sdid_ptr->desc_length); 390 scsi_ulto2b(2, sdid_ptr->list_length); 391 scsi_ulto2b(0xffff, &sdid_ptr->supported_descriptor_ids[0]); 392 393 /* ROD Token Features */ 394 d_ptr = (struct scsi_vpd_tpc_descriptor *) 395 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length)); 396 rtf_ptr = (struct scsi_vpd_tpc_descriptor_rtf *)d_ptr; 397 scsi_ulto2b(SVPD_TPC_RTF, rtf_ptr->desc_type); 398 scsi_ulto2b(sizeof(*rtf_ptr) - 4 + sizeof(*rtfb_ptr), rtf_ptr->desc_length); 399 rtf_ptr->remote_tokens = 0; 400 scsi_ulto4b(TPC_MIN_TOKEN_TIMEOUT, rtf_ptr->minimum_token_lifetime); 401 scsi_ulto4b(UINT32_MAX, rtf_ptr->maximum_token_lifetime); 402 scsi_ulto4b(TPC_MAX_TOKEN_TIMEOUT, 403 rtf_ptr->maximum_token_inactivity_timeout); 404 scsi_ulto2b(sizeof(*rtfb_ptr), rtf_ptr->type_specific_features_length); 405 rtfb_ptr = (struct scsi_vpd_tpc_descriptor_rtf_block *) 406 &rtf_ptr->type_specific_features; 407 rtfb_ptr->type_format = SVPD_TPC_RTF_BLOCK; 408 scsi_ulto2b(sizeof(*rtfb_ptr) - 4, rtfb_ptr->desc_length); 409 scsi_ulto2b(0, rtfb_ptr->optimal_length_granularity); 410 scsi_u64to8b(0, rtfb_ptr->maximum_bytes); 411 scsi_u64to8b(0, rtfb_ptr->optimal_bytes); 412 scsi_u64to8b(UINT64_MAX, rtfb_ptr->optimal_bytes_to_token_per_segment); 413 scsi_u64to8b(TPC_MAX_IOCHUNK_SIZE, 414 rtfb_ptr->optimal_bytes_from_token_per_segment); 415 416 /* Supported ROD Tokens */ 417 d_ptr = (struct scsi_vpd_tpc_descriptor *) 418 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length)); 419 srt_ptr = (struct scsi_vpd_tpc_descriptor_srt *)d_ptr; 420 scsi_ulto2b(SVPD_TPC_SRT, srt_ptr->desc_type); 421 scsi_ulto2b(sizeof(*srt_ptr) - 4 + 2*sizeof(*srtd_ptr), srt_ptr->desc_length); 422 scsi_ulto2b(2*sizeof(*srtd_ptr), srt_ptr->rod_type_descriptors_length); 423 srtd_ptr = (struct scsi_vpd_tpc_descriptor_srtd *) 424 &srt_ptr->rod_type_descriptors; 425 scsi_ulto4b(ROD_TYPE_AUR, srtd_ptr->rod_type); 426 srtd_ptr->flags = SVPD_TPC_SRTD_TIN | SVPD_TPC_SRTD_TOUT; 427 scsi_ulto2b(0, srtd_ptr->preference_indicator); 428 srtd_ptr++; 429 scsi_ulto4b(ROD_TYPE_BLOCK_ZERO, srtd_ptr->rod_type); 430 srtd_ptr->flags = SVPD_TPC_SRTD_TIN; 431 scsi_ulto2b(0, srtd_ptr->preference_indicator); 432 433 /* General Copy Operations */ 434 d_ptr = (struct scsi_vpd_tpc_descriptor *) 435 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length)); 436 gco_ptr = (struct scsi_vpd_tpc_descriptor_gco *)d_ptr; 437 scsi_ulto2b(SVPD_TPC_GCO, gco_ptr->desc_type); 438 scsi_ulto2b(sizeof(*gco_ptr) - 4, gco_ptr->desc_length); 439 scsi_ulto4b(TPC_MAX_LISTS, gco_ptr->total_concurrent_copies); 440 scsi_ulto4b(TPC_MAX_LISTS, gco_ptr->maximum_identified_concurrent_copies); 441 scsi_ulto4b(TPC_MAX_SEG, gco_ptr->maximum_segment_length); 442 gco_ptr->data_segment_granularity = 0; 443 gco_ptr->inline_data_granularity = 0; 444 445 ctl_set_success(ctsio); 446 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 447 ctsio->be_move_done = ctl_config_move_done; 448 ctl_datamove((union ctl_io *)ctsio); 449 450 return (CTL_RETVAL_COMPLETE); 451 } 452 453 int 454 ctl_receive_copy_operating_parameters(struct ctl_scsiio *ctsio) 455 { 456 struct scsi_receive_copy_operating_parameters *cdb; 457 struct scsi_receive_copy_operating_parameters_data *data; 458 int retval; 459 int alloc_len, total_len; 460 461 CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n")); 462 463 cdb = (struct scsi_receive_copy_operating_parameters *)ctsio->cdb; 464 465 retval = CTL_RETVAL_COMPLETE; 466 467 total_len = sizeof(*data) + 4; 468 alloc_len = scsi_4btoul(cdb->length); 469 470 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 471 ctsio->kern_sg_entries = 0; 472 ctsio->kern_rel_offset = 0; 473 ctsio->kern_data_len = min(total_len, alloc_len); 474 ctsio->kern_total_len = ctsio->kern_data_len; 475 476 data = (struct scsi_receive_copy_operating_parameters_data *)ctsio->kern_data_ptr; 477 scsi_ulto4b(sizeof(*data) - 4 + 4, data->length); 478 data->snlid = RCOP_SNLID; 479 scsi_ulto2b(TPC_MAX_CSCDS, data->maximum_cscd_descriptor_count); 480 scsi_ulto2b(TPC_MAX_SEGS, data->maximum_segment_descriptor_count); 481 scsi_ulto4b(TPC_MAX_LIST, data->maximum_descriptor_list_length); 482 scsi_ulto4b(TPC_MAX_SEG, data->maximum_segment_length); 483 scsi_ulto4b(TPC_MAX_INLINE, data->maximum_inline_data_length); 484 scsi_ulto4b(0, data->held_data_limit); 485 scsi_ulto4b(0, data->maximum_stream_device_transfer_size); 486 scsi_ulto2b(TPC_MAX_LISTS, data->total_concurrent_copies); 487 data->maximum_concurrent_copies = TPC_MAX_LISTS; 488 data->data_segment_granularity = 0; 489 data->inline_data_granularity = 0; 490 data->held_data_granularity = 0; 491 data->implemented_descriptor_list_length = 4; 492 data->list_of_implemented_descriptor_type_codes[0] = EC_SEG_B2B; 493 data->list_of_implemented_descriptor_type_codes[1] = EC_SEG_VERIFY; 494 data->list_of_implemented_descriptor_type_codes[2] = EC_SEG_REGISTER_KEY; 495 data->list_of_implemented_descriptor_type_codes[3] = EC_CSCD_ID; 496 497 ctl_set_success(ctsio); 498 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 499 ctsio->be_move_done = ctl_config_move_done; 500 ctl_datamove((union ctl_io *)ctsio); 501 return (retval); 502 } 503 504 static struct tpc_list * 505 tpc_find_list(struct ctl_lun *lun, uint32_t list_id, uint32_t init_idx) 506 { 507 struct tpc_list *list; 508 509 mtx_assert(&lun->lun_lock, MA_OWNED); 510 TAILQ_FOREACH(list, &lun->tpc_lists, links) { 511 if ((list->flags & EC_LIST_ID_USAGE_MASK) != 512 EC_LIST_ID_USAGE_NONE && list->list_id == list_id && 513 list->init_idx == init_idx) 514 break; 515 } 516 return (list); 517 } 518 519 int 520 ctl_receive_copy_status_lid1(struct ctl_scsiio *ctsio) 521 { 522 struct ctl_lun *lun = CTL_LUN(ctsio); 523 struct scsi_receive_copy_status_lid1 *cdb; 524 struct scsi_receive_copy_status_lid1_data *data; 525 struct tpc_list *list; 526 struct tpc_list list_copy; 527 int retval; 528 int alloc_len, total_len; 529 uint32_t list_id; 530 531 CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid1\n")); 532 533 cdb = (struct scsi_receive_copy_status_lid1 *)ctsio->cdb; 534 retval = CTL_RETVAL_COMPLETE; 535 536 list_id = cdb->list_identifier; 537 mtx_lock(&lun->lun_lock); 538 list = tpc_find_list(lun, list_id, 539 ctl_get_initindex(&ctsio->io_hdr.nexus)); 540 if (list == NULL) { 541 mtx_unlock(&lun->lun_lock); 542 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 543 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, 544 /*bit*/ 0); 545 ctl_done((union ctl_io *)ctsio); 546 return (retval); 547 } 548 list_copy = *list; 549 if (list->completed) { 550 TAILQ_REMOVE(&lun->tpc_lists, list, links); 551 free(list, M_CTL); 552 } 553 mtx_unlock(&lun->lun_lock); 554 555 total_len = sizeof(*data); 556 alloc_len = scsi_4btoul(cdb->length); 557 558 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 559 ctsio->kern_sg_entries = 0; 560 ctsio->kern_rel_offset = 0; 561 ctsio->kern_data_len = min(total_len, alloc_len); 562 ctsio->kern_total_len = ctsio->kern_data_len; 563 564 data = (struct scsi_receive_copy_status_lid1_data *)ctsio->kern_data_ptr; 565 scsi_ulto4b(sizeof(*data) - 4, data->available_data); 566 if (list_copy.completed) { 567 if (list_copy.error || list_copy.abort) 568 data->copy_command_status = RCS_CCS_ERROR; 569 else 570 data->copy_command_status = RCS_CCS_COMPLETED; 571 } else 572 data->copy_command_status = RCS_CCS_INPROG; 573 scsi_ulto2b(list_copy.curseg, data->segments_processed); 574 if (list_copy.curbytes <= UINT32_MAX) { 575 data->transfer_count_units = RCS_TC_BYTES; 576 scsi_ulto4b(list_copy.curbytes, data->transfer_count); 577 } else { 578 data->transfer_count_units = RCS_TC_MBYTES; 579 scsi_ulto4b(list_copy.curbytes >> 20, data->transfer_count); 580 } 581 582 ctl_set_success(ctsio); 583 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 584 ctsio->be_move_done = ctl_config_move_done; 585 ctl_datamove((union ctl_io *)ctsio); 586 return (retval); 587 } 588 589 int 590 ctl_receive_copy_failure_details(struct ctl_scsiio *ctsio) 591 { 592 struct ctl_lun *lun = CTL_LUN(ctsio); 593 struct scsi_receive_copy_failure_details *cdb; 594 struct scsi_receive_copy_failure_details_data *data; 595 struct tpc_list *list; 596 struct tpc_list list_copy; 597 int retval; 598 int alloc_len, total_len; 599 uint32_t list_id; 600 601 CTL_DEBUG_PRINT(("ctl_receive_copy_failure_details\n")); 602 603 cdb = (struct scsi_receive_copy_failure_details *)ctsio->cdb; 604 retval = CTL_RETVAL_COMPLETE; 605 606 list_id = cdb->list_identifier; 607 mtx_lock(&lun->lun_lock); 608 list = tpc_find_list(lun, list_id, 609 ctl_get_initindex(&ctsio->io_hdr.nexus)); 610 if (list == NULL || !list->completed) { 611 mtx_unlock(&lun->lun_lock); 612 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 613 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, 614 /*bit*/ 0); 615 ctl_done((union ctl_io *)ctsio); 616 return (retval); 617 } 618 list_copy = *list; 619 TAILQ_REMOVE(&lun->tpc_lists, list, links); 620 free(list, M_CTL); 621 mtx_unlock(&lun->lun_lock); 622 623 total_len = sizeof(*data) + list_copy.sense_len; 624 alloc_len = scsi_4btoul(cdb->length); 625 626 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 627 ctsio->kern_sg_entries = 0; 628 ctsio->kern_rel_offset = 0; 629 ctsio->kern_data_len = min(total_len, alloc_len); 630 ctsio->kern_total_len = ctsio->kern_data_len; 631 632 data = (struct scsi_receive_copy_failure_details_data *)ctsio->kern_data_ptr; 633 if (list_copy.completed && (list_copy.error || list_copy.abort)) { 634 scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len, 635 data->available_data); 636 data->copy_command_status = RCS_CCS_ERROR; 637 } else 638 scsi_ulto4b(0, data->available_data); 639 scsi_ulto2b(list_copy.sense_len, data->sense_data_length); 640 memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len); 641 642 ctl_set_success(ctsio); 643 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 644 ctsio->be_move_done = ctl_config_move_done; 645 ctl_datamove((union ctl_io *)ctsio); 646 return (retval); 647 } 648 649 int 650 ctl_receive_copy_status_lid4(struct ctl_scsiio *ctsio) 651 { 652 struct ctl_lun *lun = CTL_LUN(ctsio); 653 struct scsi_receive_copy_status_lid4 *cdb; 654 struct scsi_receive_copy_status_lid4_data *data; 655 struct tpc_list *list; 656 struct tpc_list list_copy; 657 int retval; 658 int alloc_len, total_len; 659 uint32_t list_id; 660 661 CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid4\n")); 662 663 cdb = (struct scsi_receive_copy_status_lid4 *)ctsio->cdb; 664 retval = CTL_RETVAL_COMPLETE; 665 666 list_id = scsi_4btoul(cdb->list_identifier); 667 mtx_lock(&lun->lun_lock); 668 list = tpc_find_list(lun, list_id, 669 ctl_get_initindex(&ctsio->io_hdr.nexus)); 670 if (list == NULL) { 671 mtx_unlock(&lun->lun_lock); 672 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 673 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, 674 /*bit*/ 0); 675 ctl_done((union ctl_io *)ctsio); 676 return (retval); 677 } 678 list_copy = *list; 679 if (list->completed) { 680 TAILQ_REMOVE(&lun->tpc_lists, list, links); 681 free(list, M_CTL); 682 } 683 mtx_unlock(&lun->lun_lock); 684 685 total_len = sizeof(*data) + list_copy.sense_len; 686 alloc_len = scsi_4btoul(cdb->length); 687 688 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 689 ctsio->kern_sg_entries = 0; 690 ctsio->kern_rel_offset = 0; 691 ctsio->kern_data_len = min(total_len, alloc_len); 692 ctsio->kern_total_len = ctsio->kern_data_len; 693 694 data = (struct scsi_receive_copy_status_lid4_data *)ctsio->kern_data_ptr; 695 scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len, 696 data->available_data); 697 data->response_to_service_action = list_copy.service_action; 698 if (list_copy.completed) { 699 if (list_copy.error) 700 data->copy_command_status = RCS_CCS_ERROR; 701 else if (list_copy.abort) 702 data->copy_command_status = RCS_CCS_ABORTED; 703 else 704 data->copy_command_status = RCS_CCS_COMPLETED; 705 } else 706 data->copy_command_status = RCS_CCS_INPROG_FG; 707 scsi_ulto2b(list_copy.curops, data->operation_counter); 708 scsi_ulto4b(UINT32_MAX, data->estimated_status_update_delay); 709 data->transfer_count_units = RCS_TC_BYTES; 710 scsi_u64to8b(list_copy.curbytes, data->transfer_count); 711 scsi_ulto2b(list_copy.curseg, data->segments_processed); 712 data->length_of_the_sense_data_field = list_copy.sense_len; 713 data->sense_data_length = list_copy.sense_len; 714 memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len); 715 716 ctl_set_success(ctsio); 717 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 718 ctsio->be_move_done = ctl_config_move_done; 719 ctl_datamove((union ctl_io *)ctsio); 720 return (retval); 721 } 722 723 int 724 ctl_copy_operation_abort(struct ctl_scsiio *ctsio) 725 { 726 struct ctl_lun *lun = CTL_LUN(ctsio); 727 struct scsi_copy_operation_abort *cdb; 728 struct tpc_list *list; 729 int retval; 730 uint32_t list_id; 731 732 CTL_DEBUG_PRINT(("ctl_copy_operation_abort\n")); 733 734 cdb = (struct scsi_copy_operation_abort *)ctsio->cdb; 735 retval = CTL_RETVAL_COMPLETE; 736 737 list_id = scsi_4btoul(cdb->list_identifier); 738 mtx_lock(&lun->lun_lock); 739 list = tpc_find_list(lun, list_id, 740 ctl_get_initindex(&ctsio->io_hdr.nexus)); 741 if (list == NULL) { 742 mtx_unlock(&lun->lun_lock); 743 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 744 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, 745 /*bit*/ 0); 746 ctl_done((union ctl_io *)ctsio); 747 return (retval); 748 } 749 list->abort = 1; 750 mtx_unlock(&lun->lun_lock); 751 752 ctl_set_success(ctsio); 753 ctl_done((union ctl_io *)ctsio); 754 return (retval); 755 } 756 757 static uint64_t 758 tpc_resolve(struct tpc_list *list, uint16_t idx, uint32_t *ss, 759 uint32_t *pb, uint32_t *pbo) 760 { 761 762 if (idx == 0xffff) { 763 if (ss) 764 *ss = list->lun->be_lun->blocksize; 765 if (pb) 766 *pb = list->lun->be_lun->blocksize << 767 list->lun->be_lun->pblockexp; 768 if (pbo) 769 *pbo = list->lun->be_lun->blocksize * 770 list->lun->be_lun->pblockoff; 771 return (list->lun->lun); 772 } 773 if (idx >= list->ncscd) 774 return (UINT64_MAX); 775 return (tpcl_resolve(list->lun->ctl_softc, 776 list->init_port, &list->cscd[idx], ss, pb, pbo)); 777 } 778 779 static void 780 tpc_set_io_error_sense(struct tpc_list *list) 781 { 782 int flen; 783 uint8_t csi[4]; 784 uint8_t sks[3]; 785 uint8_t fbuf[4 + 64]; 786 787 scsi_ulto4b(list->curseg, csi); 788 if (list->fwd_cscd <= 0x07ff) { 789 sks[0] = SSD_SKS_SEGMENT_VALID; 790 scsi_ulto2b((uint8_t *)&list->cscd[list->fwd_cscd] - 791 list->params, &sks[1]); 792 } else 793 sks[0] = 0; 794 if (list->fwd_scsi_status) { 795 fbuf[0] = 0x0c; 796 fbuf[2] = list->fwd_target; 797 flen = list->fwd_sense_len; 798 if (flen > 64) { 799 flen = 64; 800 fbuf[2] |= SSD_FORWARDED_FSDT; 801 } 802 fbuf[1] = 2 + flen; 803 fbuf[3] = list->fwd_scsi_status; 804 bcopy(&list->fwd_sense_data, &fbuf[4], flen); 805 flen += 4; 806 } else 807 flen = 0; 808 ctl_set_sense(list->ctsio, /*current_error*/ 1, 809 /*sense_key*/ SSD_KEY_COPY_ABORTED, 810 /*asc*/ 0x0d, /*ascq*/ 0x01, 811 SSD_ELEM_COMMAND, sizeof(csi), csi, 812 sks[0] ? SSD_ELEM_SKS : SSD_ELEM_SKIP, sizeof(sks), sks, 813 flen ? SSD_ELEM_DESC : SSD_ELEM_SKIP, flen, fbuf, 814 SSD_ELEM_NONE); 815 } 816 817 static int 818 tpc_process_b2b(struct tpc_list *list) 819 { 820 struct scsi_ec_segment_b2b *seg; 821 struct scsi_ec_cscd_dtsp *sdstp, *ddstp; 822 struct tpc_io *tior, *tiow; 823 struct runl run; 824 uint64_t sl, dl; 825 off_t srclba, dstlba, numbytes, donebytes, roundbytes; 826 int numlba; 827 uint32_t srcblock, dstblock, pb, pbo, adj; 828 uint16_t scscd, dcscd; 829 uint8_t csi[4]; 830 831 scsi_ulto4b(list->curseg, csi); 832 if (list->stage == 1) { 833 while ((tior = TAILQ_FIRST(&list->allio)) != NULL) { 834 TAILQ_REMOVE(&list->allio, tior, links); 835 ctl_free_io(tior->io); 836 free(tior->buf, M_CTL); 837 free(tior, M_CTL); 838 } 839 if (list->abort) { 840 ctl_set_task_aborted(list->ctsio); 841 return (CTL_RETVAL_ERROR); 842 } else if (list->error) { 843 tpc_set_io_error_sense(list); 844 return (CTL_RETVAL_ERROR); 845 } 846 list->cursectors += list->segsectors; 847 list->curbytes += list->segbytes; 848 return (CTL_RETVAL_COMPLETE); 849 } 850 851 TAILQ_INIT(&list->allio); 852 seg = (struct scsi_ec_segment_b2b *)list->seg[list->curseg]; 853 scscd = scsi_2btoul(seg->src_cscd); 854 dcscd = scsi_2btoul(seg->dst_cscd); 855 sl = tpc_resolve(list, scscd, &srcblock, NULL, NULL); 856 dl = tpc_resolve(list, dcscd, &dstblock, &pb, &pbo); 857 if (sl == UINT64_MAX || dl == UINT64_MAX) { 858 ctl_set_sense(list->ctsio, /*current_error*/ 1, 859 /*sense_key*/ SSD_KEY_COPY_ABORTED, 860 /*asc*/ 0x08, /*ascq*/ 0x04, 861 SSD_ELEM_COMMAND, sizeof(csi), csi, 862 SSD_ELEM_NONE); 863 return (CTL_RETVAL_ERROR); 864 } 865 if (pbo > 0) 866 pbo = pb - pbo; 867 sdstp = &list->cscd[scscd].dtsp; 868 if (scsi_3btoul(sdstp->block_length) != 0) 869 srcblock = scsi_3btoul(sdstp->block_length); 870 ddstp = &list->cscd[dcscd].dtsp; 871 if (scsi_3btoul(ddstp->block_length) != 0) 872 dstblock = scsi_3btoul(ddstp->block_length); 873 numlba = scsi_2btoul(seg->number_of_blocks); 874 if (seg->flags & EC_SEG_DC) 875 numbytes = (off_t)numlba * dstblock; 876 else 877 numbytes = (off_t)numlba * srcblock; 878 srclba = scsi_8btou64(seg->src_lba); 879 dstlba = scsi_8btou64(seg->dst_lba); 880 881 // printf("Copy %ju bytes from %ju @ %ju to %ju @ %ju\n", 882 // (uintmax_t)numbytes, sl, scsi_8btou64(seg->src_lba), 883 // dl, scsi_8btou64(seg->dst_lba)); 884 885 if (numbytes == 0) 886 return (CTL_RETVAL_COMPLETE); 887 888 if (numbytes % srcblock != 0 || numbytes % dstblock != 0) { 889 ctl_set_sense(list->ctsio, /*current_error*/ 1, 890 /*sense_key*/ SSD_KEY_COPY_ABORTED, 891 /*asc*/ 0x26, /*ascq*/ 0x0A, 892 SSD_ELEM_COMMAND, sizeof(csi), csi, 893 SSD_ELEM_NONE); 894 return (CTL_RETVAL_ERROR); 895 } 896 897 list->segbytes = numbytes; 898 list->segsectors = numbytes / dstblock; 899 donebytes = 0; 900 TAILQ_INIT(&run); 901 list->tbdio = 0; 902 while (donebytes < numbytes) { 903 roundbytes = numbytes - donebytes; 904 if (roundbytes > TPC_MAX_IO_SIZE) { 905 roundbytes = TPC_MAX_IO_SIZE; 906 roundbytes -= roundbytes % dstblock; 907 if (pb > dstblock) { 908 adj = (dstlba * dstblock + roundbytes - pbo) % pb; 909 if (roundbytes > adj) 910 roundbytes -= adj; 911 } 912 } 913 914 tior = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO); 915 TAILQ_INIT(&tior->run); 916 tior->buf = malloc(roundbytes, M_CTL, M_WAITOK); 917 tior->list = list; 918 TAILQ_INSERT_TAIL(&list->allio, tior, links); 919 tior->io = tpcl_alloc_io(); 920 ctl_scsi_read_write(tior->io, 921 /*data_ptr*/ tior->buf, 922 /*data_len*/ roundbytes, 923 /*read_op*/ 1, 924 /*byte2*/ 0, 925 /*minimum_cdb_size*/ 0, 926 /*lba*/ srclba, 927 /*num_blocks*/ roundbytes / srcblock, 928 /*tag_type*/ CTL_TAG_SIMPLE, 929 /*control*/ 0); 930 tior->io->io_hdr.retries = 3; 931 tior->target = SSD_FORWARDED_SDS_EXSRC; 932 tior->cscd = scscd; 933 tior->lun = sl; 934 tior->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tior; 935 936 tiow = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO); 937 TAILQ_INIT(&tiow->run); 938 tiow->list = list; 939 TAILQ_INSERT_TAIL(&list->allio, tiow, links); 940 tiow->io = tpcl_alloc_io(); 941 ctl_scsi_read_write(tiow->io, 942 /*data_ptr*/ tior->buf, 943 /*data_len*/ roundbytes, 944 /*read_op*/ 0, 945 /*byte2*/ 0, 946 /*minimum_cdb_size*/ 0, 947 /*lba*/ dstlba, 948 /*num_blocks*/ roundbytes / dstblock, 949 /*tag_type*/ CTL_TAG_SIMPLE, 950 /*control*/ 0); 951 tiow->io->io_hdr.retries = 3; 952 tiow->target = SSD_FORWARDED_SDS_EXDST; 953 tiow->cscd = dcscd; 954 tiow->lun = dl; 955 tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow; 956 957 TAILQ_INSERT_TAIL(&tior->run, tiow, rlinks); 958 TAILQ_INSERT_TAIL(&run, tior, rlinks); 959 list->tbdio++; 960 donebytes += roundbytes; 961 srclba += roundbytes / srcblock; 962 dstlba += roundbytes / dstblock; 963 } 964 965 while ((tior = TAILQ_FIRST(&run)) != NULL) { 966 TAILQ_REMOVE(&run, tior, rlinks); 967 if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE) 968 panic("tpcl_queue() error"); 969 } 970 971 list->stage++; 972 return (CTL_RETVAL_QUEUED); 973 } 974 975 static int 976 tpc_process_verify(struct tpc_list *list) 977 { 978 struct scsi_ec_segment_verify *seg; 979 struct tpc_io *tio; 980 uint64_t sl; 981 uint16_t cscd; 982 uint8_t csi[4]; 983 984 scsi_ulto4b(list->curseg, csi); 985 if (list->stage == 1) { 986 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) { 987 TAILQ_REMOVE(&list->allio, tio, links); 988 ctl_free_io(tio->io); 989 free(tio, M_CTL); 990 } 991 if (list->abort) { 992 ctl_set_task_aborted(list->ctsio); 993 return (CTL_RETVAL_ERROR); 994 } else if (list->error) { 995 tpc_set_io_error_sense(list); 996 return (CTL_RETVAL_ERROR); 997 } else 998 return (CTL_RETVAL_COMPLETE); 999 } 1000 1001 TAILQ_INIT(&list->allio); 1002 seg = (struct scsi_ec_segment_verify *)list->seg[list->curseg]; 1003 cscd = scsi_2btoul(seg->src_cscd); 1004 sl = tpc_resolve(list, cscd, NULL, NULL, NULL); 1005 if (sl == UINT64_MAX) { 1006 ctl_set_sense(list->ctsio, /*current_error*/ 1, 1007 /*sense_key*/ SSD_KEY_COPY_ABORTED, 1008 /*asc*/ 0x08, /*ascq*/ 0x04, 1009 SSD_ELEM_COMMAND, sizeof(csi), csi, 1010 SSD_ELEM_NONE); 1011 return (CTL_RETVAL_ERROR); 1012 } 1013 1014 // printf("Verify %ju\n", sl); 1015 1016 if ((seg->tur & 0x01) == 0) 1017 return (CTL_RETVAL_COMPLETE); 1018 1019 list->tbdio = 1; 1020 tio = malloc(sizeof(*tio), M_CTL, M_WAITOK | M_ZERO); 1021 TAILQ_INIT(&tio->run); 1022 tio->list = list; 1023 TAILQ_INSERT_TAIL(&list->allio, tio, links); 1024 tio->io = tpcl_alloc_io(); 1025 ctl_scsi_tur(tio->io, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0); 1026 tio->io->io_hdr.retries = 3; 1027 tio->target = SSD_FORWARDED_SDS_EXSRC; 1028 tio->cscd = cscd; 1029 tio->lun = sl; 1030 tio->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tio; 1031 list->stage++; 1032 if (tpcl_queue(tio->io, tio->lun) != CTL_RETVAL_COMPLETE) 1033 panic("tpcl_queue() error"); 1034 return (CTL_RETVAL_QUEUED); 1035 } 1036 1037 static int 1038 tpc_process_register_key(struct tpc_list *list) 1039 { 1040 struct scsi_ec_segment_register_key *seg; 1041 struct tpc_io *tio; 1042 uint64_t dl; 1043 int datalen; 1044 uint16_t cscd; 1045 uint8_t csi[4]; 1046 1047 scsi_ulto4b(list->curseg, csi); 1048 if (list->stage == 1) { 1049 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) { 1050 TAILQ_REMOVE(&list->allio, tio, links); 1051 ctl_free_io(tio->io); 1052 free(tio->buf, M_CTL); 1053 free(tio, M_CTL); 1054 } 1055 if (list->abort) { 1056 ctl_set_task_aborted(list->ctsio); 1057 return (CTL_RETVAL_ERROR); 1058 } else if (list->error) { 1059 tpc_set_io_error_sense(list); 1060 return (CTL_RETVAL_ERROR); 1061 } else 1062 return (CTL_RETVAL_COMPLETE); 1063 } 1064 1065 TAILQ_INIT(&list->allio); 1066 seg = (struct scsi_ec_segment_register_key *)list->seg[list->curseg]; 1067 cscd = scsi_2btoul(seg->dst_cscd); 1068 dl = tpc_resolve(list, cscd, NULL, NULL, NULL); 1069 if (dl == UINT64_MAX) { 1070 ctl_set_sense(list->ctsio, /*current_error*/ 1, 1071 /*sense_key*/ SSD_KEY_COPY_ABORTED, 1072 /*asc*/ 0x08, /*ascq*/ 0x04, 1073 SSD_ELEM_COMMAND, sizeof(csi), csi, 1074 SSD_ELEM_NONE); 1075 return (CTL_RETVAL_ERROR); 1076 } 1077 1078 // printf("Register Key %ju\n", dl); 1079 1080 list->tbdio = 1; 1081 tio = malloc(sizeof(*tio), M_CTL, M_WAITOK | M_ZERO); 1082 TAILQ_INIT(&tio->run); 1083 tio->list = list; 1084 TAILQ_INSERT_TAIL(&list->allio, tio, links); 1085 tio->io = tpcl_alloc_io(); 1086 datalen = sizeof(struct scsi_per_res_out_parms); 1087 tio->buf = malloc(datalen, M_CTL, M_WAITOK); 1088 ctl_scsi_persistent_res_out(tio->io, 1089 tio->buf, datalen, SPRO_REGISTER, -1, 1090 scsi_8btou64(seg->res_key), scsi_8btou64(seg->sa_res_key), 1091 /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0); 1092 tio->io->io_hdr.retries = 3; 1093 tio->target = SSD_FORWARDED_SDS_EXDST; 1094 tio->cscd = cscd; 1095 tio->lun = dl; 1096 tio->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tio; 1097 list->stage++; 1098 if (tpcl_queue(tio->io, tio->lun) != CTL_RETVAL_COMPLETE) 1099 panic("tpcl_queue() error"); 1100 return (CTL_RETVAL_QUEUED); 1101 } 1102 1103 static off_t 1104 tpc_ranges_length(struct scsi_range_desc *range, int nrange) 1105 { 1106 off_t length = 0; 1107 int r; 1108 1109 for (r = 0; r < nrange; r++) 1110 length += scsi_4btoul(range[r].length); 1111 return (length); 1112 } 1113 1114 static int 1115 tpc_check_ranges_l(struct scsi_range_desc *range, int nrange, uint64_t maxlba, 1116 uint64_t *lba) 1117 { 1118 uint64_t b1; 1119 uint32_t l1; 1120 int i; 1121 1122 for (i = 0; i < nrange; i++) { 1123 b1 = scsi_8btou64(range[i].lba); 1124 l1 = scsi_4btoul(range[i].length); 1125 if (b1 + l1 < b1 || b1 + l1 > maxlba + 1) { 1126 *lba = MAX(b1, maxlba + 1); 1127 return (-1); 1128 } 1129 } 1130 return (0); 1131 } 1132 1133 static int 1134 tpc_check_ranges_x(struct scsi_range_desc *range, int nrange) 1135 { 1136 uint64_t b1, b2; 1137 uint32_t l1, l2; 1138 int i, j; 1139 1140 for (i = 0; i < nrange - 1; i++) { 1141 b1 = scsi_8btou64(range[i].lba); 1142 l1 = scsi_4btoul(range[i].length); 1143 for (j = i + 1; j < nrange; j++) { 1144 b2 = scsi_8btou64(range[j].lba); 1145 l2 = scsi_4btoul(range[j].length); 1146 if (b1 + l1 > b2 && b2 + l2 > b1) 1147 return (-1); 1148 } 1149 } 1150 return (0); 1151 } 1152 1153 static int 1154 tpc_skip_ranges(struct scsi_range_desc *range, int nrange, off_t skip, 1155 int *srange, off_t *soffset) 1156 { 1157 off_t off; 1158 int r; 1159 1160 r = 0; 1161 off = 0; 1162 while (r < nrange) { 1163 if (skip - off < scsi_4btoul(range[r].length)) { 1164 *srange = r; 1165 *soffset = skip - off; 1166 return (0); 1167 } 1168 off += scsi_4btoul(range[r].length); 1169 r++; 1170 } 1171 return (-1); 1172 } 1173 1174 static int 1175 tpc_process_wut(struct tpc_list *list) 1176 { 1177 struct tpc_io *tio, *tior, *tiow; 1178 struct runl run; 1179 int drange, srange; 1180 off_t doffset, soffset; 1181 off_t srclba, dstlba, numbytes, donebytes, roundbytes; 1182 uint32_t srcblock, dstblock, pb, pbo, adj; 1183 1184 if (list->stage > 0) { 1185 /* Cleanup after previous rounds. */ 1186 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) { 1187 TAILQ_REMOVE(&list->allio, tio, links); 1188 ctl_free_io(tio->io); 1189 free(tio->buf, M_CTL); 1190 free(tio, M_CTL); 1191 } 1192 if (list->abort) { 1193 ctl_set_task_aborted(list->ctsio); 1194 return (CTL_RETVAL_ERROR); 1195 } else if (list->error) { 1196 if (list->fwd_scsi_status) { 1197 list->ctsio->io_hdr.status = 1198 CTL_SCSI_ERROR | CTL_AUTOSENSE; 1199 list->ctsio->scsi_status = list->fwd_scsi_status; 1200 list->ctsio->sense_data = list->fwd_sense_data; 1201 list->ctsio->sense_len = list->fwd_sense_len; 1202 } else { 1203 ctl_set_invalid_field(list->ctsio, 1204 /*sks_valid*/ 0, /*command*/ 0, 1205 /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0); 1206 } 1207 return (CTL_RETVAL_ERROR); 1208 } 1209 list->cursectors += list->segsectors; 1210 list->curbytes += list->segbytes; 1211 } 1212 1213 /* Check where we are on destination ranges list. */ 1214 if (tpc_skip_ranges(list->range, list->nrange, list->cursectors, 1215 &drange, &doffset) != 0) 1216 return (CTL_RETVAL_COMPLETE); 1217 dstblock = list->lun->be_lun->blocksize; 1218 pb = dstblock << list->lun->be_lun->pblockexp; 1219 if (list->lun->be_lun->pblockoff > 0) 1220 pbo = pb - dstblock * list->lun->be_lun->pblockoff; 1221 else 1222 pbo = 0; 1223 1224 /* Check where we are on source ranges list. */ 1225 srcblock = list->token->blocksize; 1226 if (tpc_skip_ranges(list->token->range, list->token->nrange, 1227 list->offset_into_rod + list->cursectors * dstblock / srcblock, 1228 &srange, &soffset) != 0) { 1229 ctl_set_invalid_field(list->ctsio, /*sks_valid*/ 0, 1230 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0); 1231 return (CTL_RETVAL_ERROR); 1232 } 1233 1234 srclba = scsi_8btou64(list->token->range[srange].lba) + soffset; 1235 dstlba = scsi_8btou64(list->range[drange].lba) + doffset; 1236 numbytes = srcblock * 1237 (scsi_4btoul(list->token->range[srange].length) - soffset); 1238 numbytes = omin(numbytes, dstblock * 1239 (scsi_4btoul(list->range[drange].length) - doffset)); 1240 if (numbytes > TPC_MAX_IOCHUNK_SIZE) { 1241 numbytes = TPC_MAX_IOCHUNK_SIZE; 1242 numbytes -= numbytes % dstblock; 1243 if (pb > dstblock) { 1244 adj = (dstlba * dstblock + numbytes - pbo) % pb; 1245 if (numbytes > adj) 1246 numbytes -= adj; 1247 } 1248 } 1249 1250 if (numbytes % srcblock != 0 || numbytes % dstblock != 0) { 1251 ctl_set_invalid_field(list->ctsio, /*sks_valid*/ 0, 1252 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0); 1253 return (CTL_RETVAL_ERROR); 1254 } 1255 1256 list->segbytes = numbytes; 1257 list->segsectors = numbytes / dstblock; 1258 //printf("Copy chunk of %ju sectors from %ju to %ju\n", list->segsectors, 1259 // srclba, dstlba); 1260 donebytes = 0; 1261 TAILQ_INIT(&run); 1262 list->tbdio = 0; 1263 TAILQ_INIT(&list->allio); 1264 while (donebytes < numbytes) { 1265 roundbytes = numbytes - donebytes; 1266 if (roundbytes > TPC_MAX_IO_SIZE) { 1267 roundbytes = TPC_MAX_IO_SIZE; 1268 roundbytes -= roundbytes % dstblock; 1269 if (pb > dstblock) { 1270 adj = (dstlba * dstblock + roundbytes - pbo) % pb; 1271 if (roundbytes > adj) 1272 roundbytes -= adj; 1273 } 1274 } 1275 1276 tior = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO); 1277 TAILQ_INIT(&tior->run); 1278 tior->buf = malloc(roundbytes, M_CTL, M_WAITOK); 1279 tior->list = list; 1280 TAILQ_INSERT_TAIL(&list->allio, tior, links); 1281 tior->io = tpcl_alloc_io(); 1282 ctl_scsi_read_write(tior->io, 1283 /*data_ptr*/ tior->buf, 1284 /*data_len*/ roundbytes, 1285 /*read_op*/ 1, 1286 /*byte2*/ 0, 1287 /*minimum_cdb_size*/ 0, 1288 /*lba*/ srclba, 1289 /*num_blocks*/ roundbytes / srcblock, 1290 /*tag_type*/ CTL_TAG_SIMPLE, 1291 /*control*/ 0); 1292 tior->io->io_hdr.retries = 3; 1293 tior->lun = list->token->lun; 1294 tior->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tior; 1295 1296 tiow = malloc(sizeof(*tiow), M_CTL, M_WAITOK | M_ZERO); 1297 TAILQ_INIT(&tiow->run); 1298 tiow->list = list; 1299 TAILQ_INSERT_TAIL(&list->allio, tiow, links); 1300 tiow->io = tpcl_alloc_io(); 1301 ctl_scsi_read_write(tiow->io, 1302 /*data_ptr*/ tior->buf, 1303 /*data_len*/ roundbytes, 1304 /*read_op*/ 0, 1305 /*byte2*/ 0, 1306 /*minimum_cdb_size*/ 0, 1307 /*lba*/ dstlba, 1308 /*num_blocks*/ roundbytes / dstblock, 1309 /*tag_type*/ CTL_TAG_SIMPLE, 1310 /*control*/ 0); 1311 tiow->io->io_hdr.retries = 3; 1312 tiow->lun = list->lun->lun; 1313 tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow; 1314 1315 TAILQ_INSERT_TAIL(&tior->run, tiow, rlinks); 1316 TAILQ_INSERT_TAIL(&run, tior, rlinks); 1317 list->tbdio++; 1318 donebytes += roundbytes; 1319 srclba += roundbytes / srcblock; 1320 dstlba += roundbytes / dstblock; 1321 } 1322 1323 while ((tior = TAILQ_FIRST(&run)) != NULL) { 1324 TAILQ_REMOVE(&run, tior, rlinks); 1325 if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE) 1326 panic("tpcl_queue() error"); 1327 } 1328 1329 list->stage++; 1330 return (CTL_RETVAL_QUEUED); 1331 } 1332 1333 static int 1334 tpc_process_zero_wut(struct tpc_list *list) 1335 { 1336 struct tpc_io *tio, *tiow; 1337 struct runl run, *prun; 1338 int r; 1339 uint32_t dstblock, len; 1340 1341 if (list->stage > 0) { 1342 complete: 1343 /* Cleanup after previous rounds. */ 1344 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) { 1345 TAILQ_REMOVE(&list->allio, tio, links); 1346 ctl_free_io(tio->io); 1347 free(tio, M_CTL); 1348 } 1349 if (list->abort) { 1350 ctl_set_task_aborted(list->ctsio); 1351 return (CTL_RETVAL_ERROR); 1352 } else if (list->error) { 1353 if (list->fwd_scsi_status) { 1354 list->ctsio->io_hdr.status = 1355 CTL_SCSI_ERROR | CTL_AUTOSENSE; 1356 list->ctsio->scsi_status = list->fwd_scsi_status; 1357 list->ctsio->sense_data = list->fwd_sense_data; 1358 list->ctsio->sense_len = list->fwd_sense_len; 1359 } else { 1360 ctl_set_invalid_field(list->ctsio, 1361 /*sks_valid*/ 0, /*command*/ 0, 1362 /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0); 1363 } 1364 return (CTL_RETVAL_ERROR); 1365 } 1366 list->cursectors += list->segsectors; 1367 list->curbytes += list->segbytes; 1368 return (CTL_RETVAL_COMPLETE); 1369 } 1370 1371 dstblock = list->lun->be_lun->blocksize; 1372 TAILQ_INIT(&run); 1373 prun = &run; 1374 list->tbdio = 1; 1375 TAILQ_INIT(&list->allio); 1376 list->segsectors = 0; 1377 for (r = 0; r < list->nrange; r++) { 1378 len = scsi_4btoul(list->range[r].length); 1379 if (len == 0) 1380 continue; 1381 1382 tiow = malloc(sizeof(*tiow), M_CTL, M_WAITOK | M_ZERO); 1383 TAILQ_INIT(&tiow->run); 1384 tiow->list = list; 1385 TAILQ_INSERT_TAIL(&list->allio, tiow, links); 1386 tiow->io = tpcl_alloc_io(); 1387 ctl_scsi_write_same(tiow->io, 1388 /*data_ptr*/ NULL, 1389 /*data_len*/ 0, 1390 /*byte2*/ SWS_NDOB, 1391 /*lba*/ scsi_8btou64(list->range[r].lba), 1392 /*num_blocks*/ len, 1393 /*tag_type*/ CTL_TAG_SIMPLE, 1394 /*control*/ 0); 1395 tiow->io->io_hdr.retries = 3; 1396 tiow->lun = list->lun->lun; 1397 tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow; 1398 1399 TAILQ_INSERT_TAIL(prun, tiow, rlinks); 1400 prun = &tiow->run; 1401 list->segsectors += len; 1402 } 1403 list->segbytes = list->segsectors * dstblock; 1404 1405 if (TAILQ_EMPTY(&run)) 1406 goto complete; 1407 1408 while ((tiow = TAILQ_FIRST(&run)) != NULL) { 1409 TAILQ_REMOVE(&run, tiow, rlinks); 1410 if (tpcl_queue(tiow->io, tiow->lun) != CTL_RETVAL_COMPLETE) 1411 panic("tpcl_queue() error"); 1412 } 1413 1414 list->stage++; 1415 return (CTL_RETVAL_QUEUED); 1416 } 1417 1418 static void 1419 tpc_process(struct tpc_list *list) 1420 { 1421 struct ctl_lun *lun = list->lun; 1422 struct ctl_softc *softc = lun->ctl_softc; 1423 struct scsi_ec_segment *seg; 1424 struct ctl_scsiio *ctsio = list->ctsio; 1425 int retval = CTL_RETVAL_COMPLETE; 1426 uint8_t csi[4]; 1427 1428 if (list->service_action == EC_WUT) { 1429 if (list->token != NULL) 1430 retval = tpc_process_wut(list); 1431 else 1432 retval = tpc_process_zero_wut(list); 1433 if (retval == CTL_RETVAL_QUEUED) 1434 return; 1435 if (retval == CTL_RETVAL_ERROR) { 1436 list->error = 1; 1437 goto done; 1438 } 1439 } else { 1440 //printf("ZZZ %d cscd, %d segs\n", list->ncscd, list->nseg); 1441 while (list->curseg < list->nseg) { 1442 seg = list->seg[list->curseg]; 1443 switch (seg->type_code) { 1444 case EC_SEG_B2B: 1445 retval = tpc_process_b2b(list); 1446 break; 1447 case EC_SEG_VERIFY: 1448 retval = tpc_process_verify(list); 1449 break; 1450 case EC_SEG_REGISTER_KEY: 1451 retval = tpc_process_register_key(list); 1452 break; 1453 default: 1454 scsi_ulto4b(list->curseg, csi); 1455 ctl_set_sense(ctsio, /*current_error*/ 1, 1456 /*sense_key*/ SSD_KEY_COPY_ABORTED, 1457 /*asc*/ 0x26, /*ascq*/ 0x09, 1458 SSD_ELEM_COMMAND, sizeof(csi), csi, 1459 SSD_ELEM_NONE); 1460 goto done; 1461 } 1462 if (retval == CTL_RETVAL_QUEUED) 1463 return; 1464 if (retval == CTL_RETVAL_ERROR) { 1465 list->error = 1; 1466 goto done; 1467 } 1468 list->curseg++; 1469 list->stage = 0; 1470 } 1471 } 1472 1473 ctl_set_success(ctsio); 1474 1475 done: 1476 //printf("ZZZ done\n"); 1477 free(list->params, M_CTL); 1478 list->params = NULL; 1479 if (list->token) { 1480 mtx_lock(&softc->tpc_lock); 1481 if (--list->token->active == 0) 1482 list->token->last_active = time_uptime; 1483 mtx_unlock(&softc->tpc_lock); 1484 list->token = NULL; 1485 } 1486 mtx_lock(&lun->lun_lock); 1487 if ((list->flags & EC_LIST_ID_USAGE_MASK) == EC_LIST_ID_USAGE_NONE) { 1488 TAILQ_REMOVE(&lun->tpc_lists, list, links); 1489 free(list, M_CTL); 1490 } else { 1491 list->completed = 1; 1492 list->last_active = time_uptime; 1493 list->sense_data = ctsio->sense_data; 1494 list->sense_len = ctsio->sense_len; 1495 list->scsi_status = ctsio->scsi_status; 1496 } 1497 mtx_unlock(&lun->lun_lock); 1498 1499 ctl_done((union ctl_io *)ctsio); 1500 } 1501 1502 /* 1503 * For any sort of check condition, busy, etc., we just retry. We do not 1504 * decrement the retry count for unit attention type errors. These are 1505 * normal, and we want to save the retry count for "real" errors. Otherwise, 1506 * we could end up with situations where a command will succeed in some 1507 * situations and fail in others, depending on whether a unit attention is 1508 * pending. Also, some of our error recovery actions, most notably the 1509 * LUN reset action, will cause a unit attention. 1510 * 1511 * We can add more detail here later if necessary. 1512 */ 1513 static tpc_error_action 1514 tpc_checkcond_parse(union ctl_io *io) 1515 { 1516 tpc_error_action error_action; 1517 int error_code, sense_key, asc, ascq; 1518 1519 /* 1520 * Default to retrying the command. 1521 */ 1522 error_action = TPC_ERR_RETRY; 1523 1524 scsi_extract_sense_len(&io->scsiio.sense_data, 1525 io->scsiio.sense_len, 1526 &error_code, 1527 &sense_key, 1528 &asc, 1529 &ascq, 1530 /*show_errors*/ 1); 1531 1532 switch (error_code) { 1533 case SSD_DEFERRED_ERROR: 1534 case SSD_DESC_DEFERRED_ERROR: 1535 error_action |= TPC_ERR_NO_DECREMENT; 1536 break; 1537 case SSD_CURRENT_ERROR: 1538 case SSD_DESC_CURRENT_ERROR: 1539 default: 1540 switch (sense_key) { 1541 case SSD_KEY_UNIT_ATTENTION: 1542 error_action |= TPC_ERR_NO_DECREMENT; 1543 break; 1544 case SSD_KEY_HARDWARE_ERROR: 1545 /* 1546 * This is our generic "something bad happened" 1547 * error code. It often isn't recoverable. 1548 */ 1549 if ((asc == 0x44) && (ascq == 0x00)) 1550 error_action = TPC_ERR_FAIL; 1551 break; 1552 case SSD_KEY_NOT_READY: 1553 /* 1554 * If the LUN is powered down, there likely isn't 1555 * much point in retrying right now. 1556 */ 1557 if ((asc == 0x04) && (ascq == 0x02)) 1558 error_action = TPC_ERR_FAIL; 1559 /* 1560 * If the LUN is offline, there probably isn't much 1561 * point in retrying, either. 1562 */ 1563 if ((asc == 0x04) && (ascq == 0x03)) 1564 error_action = TPC_ERR_FAIL; 1565 break; 1566 } 1567 } 1568 return (error_action); 1569 } 1570 1571 static tpc_error_action 1572 tpc_error_parse(union ctl_io *io) 1573 { 1574 tpc_error_action error_action = TPC_ERR_RETRY; 1575 1576 switch (io->io_hdr.io_type) { 1577 case CTL_IO_SCSI: 1578 switch (io->io_hdr.status & CTL_STATUS_MASK) { 1579 case CTL_SCSI_ERROR: 1580 switch (io->scsiio.scsi_status) { 1581 case SCSI_STATUS_CHECK_COND: 1582 error_action = tpc_checkcond_parse(io); 1583 break; 1584 default: 1585 break; 1586 } 1587 break; 1588 default: 1589 break; 1590 } 1591 break; 1592 case CTL_IO_TASK: 1593 break; 1594 default: 1595 panic("%s: invalid ctl_io type %d\n", __func__, 1596 io->io_hdr.io_type); 1597 break; 1598 } 1599 return (error_action); 1600 } 1601 1602 void 1603 tpc_done(union ctl_io *io) 1604 { 1605 struct tpc_io *tio, *tior; 1606 1607 /* 1608 * Very minimal retry logic. We basically retry if we got an error 1609 * back, and the retry count is greater than 0. If we ever want 1610 * more sophisticated initiator type behavior, the CAM error 1611 * recovery code in ../common might be helpful. 1612 */ 1613 tio = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; 1614 if (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) 1615 && (io->io_hdr.retries > 0)) { 1616 ctl_io_status old_status; 1617 tpc_error_action error_action; 1618 1619 error_action = tpc_error_parse(io); 1620 switch (error_action & TPC_ERR_MASK) { 1621 case TPC_ERR_FAIL: 1622 break; 1623 case TPC_ERR_RETRY: 1624 default: 1625 if ((error_action & TPC_ERR_NO_DECREMENT) == 0) 1626 io->io_hdr.retries--; 1627 old_status = io->io_hdr.status; 1628 io->io_hdr.status = CTL_STATUS_NONE; 1629 io->io_hdr.flags &= ~CTL_FLAG_ABORT; 1630 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 1631 if (tpcl_queue(io, tio->lun) != CTL_RETVAL_COMPLETE) { 1632 printf("%s: error returned from tpcl_queue()!\n", 1633 __func__); 1634 io->io_hdr.status = old_status; 1635 } else 1636 return; 1637 } 1638 } 1639 1640 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) { 1641 tio->list->error = 1; 1642 if (io->io_hdr.io_type == CTL_IO_SCSI && 1643 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SCSI_ERROR) { 1644 tio->list->fwd_scsi_status = io->scsiio.scsi_status; 1645 tio->list->fwd_sense_data = io->scsiio.sense_data; 1646 tio->list->fwd_sense_len = io->scsiio.sense_len; 1647 tio->list->fwd_target = tio->target; 1648 tio->list->fwd_cscd = tio->cscd; 1649 } 1650 } else 1651 atomic_add_int(&tio->list->curops, 1); 1652 if (!tio->list->error && !tio->list->abort) { 1653 while ((tior = TAILQ_FIRST(&tio->run)) != NULL) { 1654 TAILQ_REMOVE(&tio->run, tior, rlinks); 1655 atomic_add_int(&tio->list->tbdio, 1); 1656 if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE) 1657 panic("tpcl_queue() error"); 1658 } 1659 } 1660 if (atomic_fetchadd_int(&tio->list->tbdio, -1) == 1) 1661 tpc_process(tio->list); 1662 } 1663 1664 int 1665 ctl_extended_copy_lid1(struct ctl_scsiio *ctsio) 1666 { 1667 struct ctl_lun *lun = CTL_LUN(ctsio); 1668 struct scsi_extended_copy *cdb; 1669 struct scsi_extended_copy_lid1_data *data; 1670 struct scsi_ec_cscd *cscd; 1671 struct scsi_ec_segment *seg; 1672 struct tpc_list *list, *tlist; 1673 uint8_t *ptr; 1674 const char *value; 1675 int len, off, lencscd, lenseg, leninl, nseg; 1676 1677 CTL_DEBUG_PRINT(("ctl_extended_copy_lid1\n")); 1678 1679 cdb = (struct scsi_extended_copy *)ctsio->cdb; 1680 len = scsi_4btoul(cdb->length); 1681 1682 if (len == 0) { 1683 ctl_set_success(ctsio); 1684 goto done; 1685 } 1686 if (len < sizeof(struct scsi_extended_copy_lid1_data) || 1687 len > sizeof(struct scsi_extended_copy_lid1_data) + 1688 TPC_MAX_LIST + TPC_MAX_INLINE) { 1689 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, 1690 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0); 1691 goto done; 1692 } 1693 1694 /* 1695 * If we've got a kernel request that hasn't been malloced yet, 1696 * malloc it and tell the caller the data buffer is here. 1697 */ 1698 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 1699 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 1700 ctsio->kern_data_len = len; 1701 ctsio->kern_total_len = len; 1702 ctsio->kern_rel_offset = 0; 1703 ctsio->kern_sg_entries = 0; 1704 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 1705 ctsio->be_move_done = ctl_config_move_done; 1706 ctl_datamove((union ctl_io *)ctsio); 1707 1708 return (CTL_RETVAL_COMPLETE); 1709 } 1710 1711 data = (struct scsi_extended_copy_lid1_data *)ctsio->kern_data_ptr; 1712 lencscd = scsi_2btoul(data->cscd_list_length); 1713 lenseg = scsi_4btoul(data->segment_list_length); 1714 leninl = scsi_4btoul(data->inline_data_length); 1715 if (lencscd > TPC_MAX_CSCDS * sizeof(struct scsi_ec_cscd)) { 1716 ctl_set_sense(ctsio, /*current_error*/ 1, 1717 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1718 /*asc*/ 0x26, /*ascq*/ 0x06, SSD_ELEM_NONE); 1719 goto done; 1720 } 1721 if (lenseg > TPC_MAX_SEGS * sizeof(struct scsi_ec_segment)) { 1722 ctl_set_sense(ctsio, /*current_error*/ 1, 1723 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1724 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE); 1725 goto done; 1726 } 1727 if (lencscd + lenseg > TPC_MAX_LIST || 1728 leninl > TPC_MAX_INLINE || 1729 len < sizeof(struct scsi_extended_copy_lid1_data) + 1730 lencscd + lenseg + leninl) { 1731 ctl_set_param_len_error(ctsio); 1732 goto done; 1733 } 1734 1735 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO); 1736 list->service_action = cdb->service_action; 1737 value = dnvlist_get_string(lun->be_lun->options, "insecure_tpc", NULL); 1738 if (value != NULL && strcmp(value, "on") == 0) 1739 list->init_port = -1; 1740 else 1741 list->init_port = ctsio->io_hdr.nexus.targ_port; 1742 list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus); 1743 list->list_id = data->list_identifier; 1744 list->flags = data->flags; 1745 list->params = ctsio->kern_data_ptr; 1746 list->cscd = (struct scsi_ec_cscd *)&data->data[0]; 1747 ptr = &data->data[0]; 1748 for (off = 0; off < lencscd; off += sizeof(struct scsi_ec_cscd)) { 1749 cscd = (struct scsi_ec_cscd *)(ptr + off); 1750 if (cscd->type_code != EC_CSCD_ID) { 1751 free(list, M_CTL); 1752 ctl_set_sense(ctsio, /*current_error*/ 1, 1753 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1754 /*asc*/ 0x26, /*ascq*/ 0x07, SSD_ELEM_NONE); 1755 goto done; 1756 } 1757 } 1758 ptr = &data->data[lencscd]; 1759 for (nseg = 0, off = 0; off < lenseg; nseg++) { 1760 if (nseg >= TPC_MAX_SEGS) { 1761 free(list, M_CTL); 1762 ctl_set_sense(ctsio, /*current_error*/ 1, 1763 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1764 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE); 1765 goto done; 1766 } 1767 seg = (struct scsi_ec_segment *)(ptr + off); 1768 if (seg->type_code != EC_SEG_B2B && 1769 seg->type_code != EC_SEG_VERIFY && 1770 seg->type_code != EC_SEG_REGISTER_KEY) { 1771 free(list, M_CTL); 1772 ctl_set_sense(ctsio, /*current_error*/ 1, 1773 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1774 /*asc*/ 0x26, /*ascq*/ 0x09, SSD_ELEM_NONE); 1775 goto done; 1776 } 1777 list->seg[nseg] = seg; 1778 off += sizeof(struct scsi_ec_segment) + 1779 scsi_2btoul(seg->descr_length); 1780 } 1781 list->inl = &data->data[lencscd + lenseg]; 1782 list->ncscd = lencscd / sizeof(struct scsi_ec_cscd); 1783 list->nseg = nseg; 1784 list->leninl = leninl; 1785 list->ctsio = ctsio; 1786 list->lun = lun; 1787 mtx_lock(&lun->lun_lock); 1788 if ((list->flags & EC_LIST_ID_USAGE_MASK) != EC_LIST_ID_USAGE_NONE) { 1789 tlist = tpc_find_list(lun, list->list_id, list->init_idx); 1790 if (tlist != NULL && !tlist->completed) { 1791 mtx_unlock(&lun->lun_lock); 1792 free(list, M_CTL); 1793 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 1794 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, 1795 /*bit*/ 0); 1796 goto done; 1797 } 1798 if (tlist != NULL) { 1799 TAILQ_REMOVE(&lun->tpc_lists, tlist, links); 1800 free(tlist, M_CTL); 1801 } 1802 } 1803 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links); 1804 mtx_unlock(&lun->lun_lock); 1805 1806 tpc_process(list); 1807 return (CTL_RETVAL_COMPLETE); 1808 1809 done: 1810 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 1811 free(ctsio->kern_data_ptr, M_CTL); 1812 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 1813 } 1814 ctl_done((union ctl_io *)ctsio); 1815 return (CTL_RETVAL_COMPLETE); 1816 } 1817 1818 int 1819 ctl_extended_copy_lid4(struct ctl_scsiio *ctsio) 1820 { 1821 struct ctl_lun *lun = CTL_LUN(ctsio); 1822 struct scsi_extended_copy *cdb; 1823 struct scsi_extended_copy_lid4_data *data; 1824 struct scsi_ec_cscd *cscd; 1825 struct scsi_ec_segment *seg; 1826 struct tpc_list *list, *tlist; 1827 uint8_t *ptr; 1828 const char *value; 1829 int len, off, lencscd, lenseg, leninl, nseg; 1830 1831 CTL_DEBUG_PRINT(("ctl_extended_copy_lid4\n")); 1832 1833 cdb = (struct scsi_extended_copy *)ctsio->cdb; 1834 len = scsi_4btoul(cdb->length); 1835 1836 if (len == 0) { 1837 ctl_set_success(ctsio); 1838 goto done; 1839 } 1840 if (len < sizeof(struct scsi_extended_copy_lid4_data) || 1841 len > sizeof(struct scsi_extended_copy_lid4_data) + 1842 TPC_MAX_LIST + TPC_MAX_INLINE) { 1843 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, 1844 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0); 1845 goto done; 1846 } 1847 1848 /* 1849 * If we've got a kernel request that hasn't been malloced yet, 1850 * malloc it and tell the caller the data buffer is here. 1851 */ 1852 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 1853 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 1854 ctsio->kern_data_len = len; 1855 ctsio->kern_total_len = len; 1856 ctsio->kern_rel_offset = 0; 1857 ctsio->kern_sg_entries = 0; 1858 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 1859 ctsio->be_move_done = ctl_config_move_done; 1860 ctl_datamove((union ctl_io *)ctsio); 1861 1862 return (CTL_RETVAL_COMPLETE); 1863 } 1864 1865 data = (struct scsi_extended_copy_lid4_data *)ctsio->kern_data_ptr; 1866 lencscd = scsi_2btoul(data->cscd_list_length); 1867 lenseg = scsi_2btoul(data->segment_list_length); 1868 leninl = scsi_2btoul(data->inline_data_length); 1869 if (lencscd > TPC_MAX_CSCDS * sizeof(struct scsi_ec_cscd)) { 1870 ctl_set_sense(ctsio, /*current_error*/ 1, 1871 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1872 /*asc*/ 0x26, /*ascq*/ 0x06, SSD_ELEM_NONE); 1873 goto done; 1874 } 1875 if (lenseg > TPC_MAX_SEGS * sizeof(struct scsi_ec_segment)) { 1876 ctl_set_sense(ctsio, /*current_error*/ 1, 1877 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1878 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE); 1879 goto done; 1880 } 1881 if (lencscd + lenseg > TPC_MAX_LIST || 1882 leninl > TPC_MAX_INLINE || 1883 len < sizeof(struct scsi_extended_copy_lid1_data) + 1884 lencscd + lenseg + leninl) { 1885 ctl_set_param_len_error(ctsio); 1886 goto done; 1887 } 1888 1889 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO); 1890 list->service_action = cdb->service_action; 1891 value = dnvlist_get_string(lun->be_lun->options, "insecure_tpc", NULL); 1892 if (value != NULL && strcmp(value, "on") == 0) 1893 list->init_port = -1; 1894 else 1895 list->init_port = ctsio->io_hdr.nexus.targ_port; 1896 list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus); 1897 list->list_id = scsi_4btoul(data->list_identifier); 1898 list->flags = data->flags; 1899 list->params = ctsio->kern_data_ptr; 1900 list->cscd = (struct scsi_ec_cscd *)&data->data[0]; 1901 ptr = &data->data[0]; 1902 for (off = 0; off < lencscd; off += sizeof(struct scsi_ec_cscd)) { 1903 cscd = (struct scsi_ec_cscd *)(ptr + off); 1904 if (cscd->type_code != EC_CSCD_ID) { 1905 free(list, M_CTL); 1906 ctl_set_sense(ctsio, /*current_error*/ 1, 1907 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1908 /*asc*/ 0x26, /*ascq*/ 0x07, SSD_ELEM_NONE); 1909 goto done; 1910 } 1911 } 1912 ptr = &data->data[lencscd]; 1913 for (nseg = 0, off = 0; off < lenseg; nseg++) { 1914 if (nseg >= TPC_MAX_SEGS) { 1915 free(list, M_CTL); 1916 ctl_set_sense(ctsio, /*current_error*/ 1, 1917 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1918 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE); 1919 goto done; 1920 } 1921 seg = (struct scsi_ec_segment *)(ptr + off); 1922 if (seg->type_code != EC_SEG_B2B && 1923 seg->type_code != EC_SEG_VERIFY && 1924 seg->type_code != EC_SEG_REGISTER_KEY) { 1925 free(list, M_CTL); 1926 ctl_set_sense(ctsio, /*current_error*/ 1, 1927 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1928 /*asc*/ 0x26, /*ascq*/ 0x09, SSD_ELEM_NONE); 1929 goto done; 1930 } 1931 list->seg[nseg] = seg; 1932 off += sizeof(struct scsi_ec_segment) + 1933 scsi_2btoul(seg->descr_length); 1934 } 1935 list->inl = &data->data[lencscd + lenseg]; 1936 list->ncscd = lencscd / sizeof(struct scsi_ec_cscd); 1937 list->nseg = nseg; 1938 list->leninl = leninl; 1939 list->ctsio = ctsio; 1940 list->lun = lun; 1941 mtx_lock(&lun->lun_lock); 1942 if ((list->flags & EC_LIST_ID_USAGE_MASK) != EC_LIST_ID_USAGE_NONE) { 1943 tlist = tpc_find_list(lun, list->list_id, list->init_idx); 1944 if (tlist != NULL && !tlist->completed) { 1945 mtx_unlock(&lun->lun_lock); 1946 free(list, M_CTL); 1947 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 1948 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, 1949 /*bit*/ 0); 1950 goto done; 1951 } 1952 if (tlist != NULL) { 1953 TAILQ_REMOVE(&lun->tpc_lists, tlist, links); 1954 free(tlist, M_CTL); 1955 } 1956 } 1957 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links); 1958 mtx_unlock(&lun->lun_lock); 1959 1960 tpc_process(list); 1961 return (CTL_RETVAL_COMPLETE); 1962 1963 done: 1964 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 1965 free(ctsio->kern_data_ptr, M_CTL); 1966 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 1967 } 1968 ctl_done((union ctl_io *)ctsio); 1969 return (CTL_RETVAL_COMPLETE); 1970 } 1971 1972 static void 1973 tpc_create_token(struct ctl_lun *lun, struct ctl_port *port, off_t len, 1974 struct scsi_token *token) 1975 { 1976 static int id = 0; 1977 struct scsi_vpd_id_descriptor *idd = NULL; 1978 struct scsi_ec_cscd_id *cscd; 1979 struct scsi_read_capacity_data_long *dtsd; 1980 int targid_len; 1981 1982 scsi_ulto4b(ROD_TYPE_AUR, token->type); 1983 scsi_ulto2b(0x01f8, token->length); 1984 scsi_u64to8b(atomic_fetchadd_int(&id, 1), &token->body[0]); 1985 if (lun->lun_devid) 1986 idd = scsi_get_devid_desc((struct scsi_vpd_id_descriptor *) 1987 lun->lun_devid->data, lun->lun_devid->len, 1988 scsi_devid_is_lun_naa); 1989 if (idd == NULL && lun->lun_devid) 1990 idd = scsi_get_devid_desc((struct scsi_vpd_id_descriptor *) 1991 lun->lun_devid->data, lun->lun_devid->len, 1992 scsi_devid_is_lun_eui64); 1993 if (idd != NULL) { 1994 cscd = (struct scsi_ec_cscd_id *)&token->body[8]; 1995 cscd->type_code = EC_CSCD_ID; 1996 cscd->luidt_pdt = T_DIRECT; 1997 memcpy(&cscd->codeset, idd, 4 + idd->length); 1998 scsi_ulto3b(lun->be_lun->blocksize, cscd->dtsp.block_length); 1999 } 2000 scsi_u64to8b(0, &token->body[40]); /* XXX: Should be 128bit value. */ 2001 scsi_u64to8b(len, &token->body[48]); 2002 2003 /* ROD token device type specific data (RC16 without first field) */ 2004 dtsd = (struct scsi_read_capacity_data_long *)&token->body[88 - 8]; 2005 scsi_ulto4b(lun->be_lun->blocksize, dtsd->length); 2006 dtsd->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE; 2007 scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, dtsd->lalba_lbp); 2008 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) 2009 dtsd->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ; 2010 2011 if (port->target_devid) { 2012 targid_len = port->target_devid->len; 2013 memcpy(&token->body[120], port->target_devid->data, targid_len); 2014 } else 2015 targid_len = 32; 2016 arc4rand(&token->body[120 + targid_len], 384 - targid_len, 0); 2017 }; 2018 2019 int 2020 ctl_populate_token(struct ctl_scsiio *ctsio) 2021 { 2022 struct ctl_softc *softc = CTL_SOFTC(ctsio); 2023 struct ctl_port *port = CTL_PORT(ctsio); 2024 struct ctl_lun *lun = CTL_LUN(ctsio); 2025 struct scsi_populate_token *cdb; 2026 struct scsi_populate_token_data *data; 2027 struct tpc_list *list, *tlist; 2028 struct tpc_token *token; 2029 uint64_t lba; 2030 int len, lendata, lendesc; 2031 2032 CTL_DEBUG_PRINT(("ctl_populate_token\n")); 2033 2034 cdb = (struct scsi_populate_token *)ctsio->cdb; 2035 len = scsi_4btoul(cdb->length); 2036 2037 if (len < sizeof(struct scsi_populate_token_data) || 2038 len > sizeof(struct scsi_populate_token_data) + 2039 TPC_MAX_SEGS * sizeof(struct scsi_range_desc)) { 2040 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, 2041 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0); 2042 goto done; 2043 } 2044 2045 /* 2046 * If we've got a kernel request that hasn't been malloced yet, 2047 * malloc it and tell the caller the data buffer is here. 2048 */ 2049 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 2050 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 2051 ctsio->kern_data_len = len; 2052 ctsio->kern_total_len = len; 2053 ctsio->kern_rel_offset = 0; 2054 ctsio->kern_sg_entries = 0; 2055 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 2056 ctsio->be_move_done = ctl_config_move_done; 2057 ctl_datamove((union ctl_io *)ctsio); 2058 2059 return (CTL_RETVAL_COMPLETE); 2060 } 2061 2062 data = (struct scsi_populate_token_data *)ctsio->kern_data_ptr; 2063 lendata = scsi_2btoul(data->length); 2064 if (lendata < sizeof(struct scsi_populate_token_data) - 2 + 2065 sizeof(struct scsi_range_desc)) { 2066 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, 2067 /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0); 2068 goto done; 2069 } 2070 lendesc = scsi_2btoul(data->range_descriptor_length); 2071 if (lendesc < sizeof(struct scsi_range_desc) || 2072 len < sizeof(struct scsi_populate_token_data) + lendesc || 2073 lendata < sizeof(struct scsi_populate_token_data) - 2 + lendesc) { 2074 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, 2075 /*field*/ 14, /*bit_valid*/ 0, /*bit*/ 0); 2076 goto done; 2077 } 2078 /* 2079 printf("PT(list=%u) flags=%x to=%d rt=%x len=%x\n", 2080 scsi_4btoul(cdb->list_identifier), 2081 data->flags, scsi_4btoul(data->inactivity_timeout), 2082 scsi_4btoul(data->rod_type), 2083 scsi_2btoul(data->range_descriptor_length)); 2084 */ 2085 2086 /* Validate INACTIVITY TIMEOUT field */ 2087 if (scsi_4btoul(data->inactivity_timeout) > TPC_MAX_TOKEN_TIMEOUT) { 2088 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 2089 /*command*/ 0, /*field*/ 4, /*bit_valid*/ 0, 2090 /*bit*/ 0); 2091 goto done; 2092 } 2093 2094 /* Validate ROD TYPE field */ 2095 if ((data->flags & EC_PT_RTV) && 2096 scsi_4btoul(data->rod_type) != ROD_TYPE_AUR) { 2097 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, 2098 /*field*/ 8, /*bit_valid*/ 0, /*bit*/ 0); 2099 goto done; 2100 } 2101 2102 /* Validate list of ranges */ 2103 if (tpc_check_ranges_l(&data->desc[0], 2104 scsi_2btoul(data->range_descriptor_length) / 2105 sizeof(struct scsi_range_desc), 2106 lun->be_lun->maxlba, &lba) != 0) { 2107 ctl_set_lba_out_of_range(ctsio, lba); 2108 goto done; 2109 } 2110 if (tpc_check_ranges_x(&data->desc[0], 2111 scsi_2btoul(data->range_descriptor_length) / 2112 sizeof(struct scsi_range_desc)) != 0) { 2113 ctl_set_invalid_field(ctsio, /*sks_valid*/ 0, 2114 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, 2115 /*bit*/ 0); 2116 goto done; 2117 } 2118 2119 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO); 2120 list->service_action = cdb->service_action; 2121 list->init_port = ctsio->io_hdr.nexus.targ_port; 2122 list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus); 2123 list->list_id = scsi_4btoul(cdb->list_identifier); 2124 list->flags = data->flags; 2125 list->ctsio = ctsio; 2126 list->lun = lun; 2127 mtx_lock(&lun->lun_lock); 2128 tlist = tpc_find_list(lun, list->list_id, list->init_idx); 2129 if (tlist != NULL && !tlist->completed) { 2130 mtx_unlock(&lun->lun_lock); 2131 free(list, M_CTL); 2132 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 2133 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, 2134 /*bit*/ 0); 2135 goto done; 2136 } 2137 if (tlist != NULL) { 2138 TAILQ_REMOVE(&lun->tpc_lists, tlist, links); 2139 free(tlist, M_CTL); 2140 } 2141 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links); 2142 mtx_unlock(&lun->lun_lock); 2143 2144 token = malloc(sizeof(*token), M_CTL, M_WAITOK | M_ZERO); 2145 token->lun = lun->lun; 2146 token->blocksize = lun->be_lun->blocksize; 2147 token->params = ctsio->kern_data_ptr; 2148 token->range = &data->desc[0]; 2149 token->nrange = scsi_2btoul(data->range_descriptor_length) / 2150 sizeof(struct scsi_range_desc); 2151 list->cursectors = tpc_ranges_length(token->range, token->nrange); 2152 list->curbytes = (off_t)list->cursectors * lun->be_lun->blocksize; 2153 tpc_create_token(lun, port, list->curbytes, 2154 (struct scsi_token *)token->token); 2155 token->active = 0; 2156 token->last_active = time_uptime; 2157 token->timeout = scsi_4btoul(data->inactivity_timeout); 2158 if (token->timeout == 0) 2159 token->timeout = TPC_DFL_TOKEN_TIMEOUT; 2160 else if (token->timeout < TPC_MIN_TOKEN_TIMEOUT) 2161 token->timeout = TPC_MIN_TOKEN_TIMEOUT; 2162 memcpy(list->res_token, token->token, sizeof(list->res_token)); 2163 list->res_token_valid = 1; 2164 list->curseg = 0; 2165 list->completed = 1; 2166 list->last_active = time_uptime; 2167 mtx_lock(&softc->tpc_lock); 2168 TAILQ_INSERT_TAIL(&softc->tpc_tokens, token, links); 2169 mtx_unlock(&softc->tpc_lock); 2170 ctl_set_success(ctsio); 2171 ctl_done((union ctl_io *)ctsio); 2172 return (CTL_RETVAL_COMPLETE); 2173 2174 done: 2175 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 2176 free(ctsio->kern_data_ptr, M_CTL); 2177 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 2178 } 2179 ctl_done((union ctl_io *)ctsio); 2180 return (CTL_RETVAL_COMPLETE); 2181 } 2182 2183 int 2184 ctl_write_using_token(struct ctl_scsiio *ctsio) 2185 { 2186 struct ctl_softc *softc = CTL_SOFTC(ctsio); 2187 struct ctl_lun *lun = CTL_LUN(ctsio); 2188 struct scsi_write_using_token *cdb; 2189 struct scsi_write_using_token_data *data; 2190 struct tpc_list *list, *tlist; 2191 struct tpc_token *token; 2192 uint64_t lba; 2193 int len, lendata, lendesc; 2194 2195 CTL_DEBUG_PRINT(("ctl_write_using_token\n")); 2196 2197 cdb = (struct scsi_write_using_token *)ctsio->cdb; 2198 len = scsi_4btoul(cdb->length); 2199 2200 if (len < sizeof(struct scsi_write_using_token_data) || 2201 len > sizeof(struct scsi_write_using_token_data) + 2202 TPC_MAX_SEGS * sizeof(struct scsi_range_desc)) { 2203 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, 2204 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0); 2205 goto done; 2206 } 2207 2208 /* 2209 * If we've got a kernel request that hasn't been malloced yet, 2210 * malloc it and tell the caller the data buffer is here. 2211 */ 2212 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 2213 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 2214 ctsio->kern_data_len = len; 2215 ctsio->kern_total_len = len; 2216 ctsio->kern_rel_offset = 0; 2217 ctsio->kern_sg_entries = 0; 2218 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 2219 ctsio->be_move_done = ctl_config_move_done; 2220 ctl_datamove((union ctl_io *)ctsio); 2221 2222 return (CTL_RETVAL_COMPLETE); 2223 } 2224 2225 data = (struct scsi_write_using_token_data *)ctsio->kern_data_ptr; 2226 lendata = scsi_2btoul(data->length); 2227 if (lendata < sizeof(struct scsi_write_using_token_data) - 2 + 2228 sizeof(struct scsi_range_desc)) { 2229 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, 2230 /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0); 2231 goto done; 2232 } 2233 lendesc = scsi_2btoul(data->range_descriptor_length); 2234 if (lendesc < sizeof(struct scsi_range_desc) || 2235 len < sizeof(struct scsi_write_using_token_data) + lendesc || 2236 lendata < sizeof(struct scsi_write_using_token_data) - 2 + lendesc) { 2237 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, 2238 /*field*/ 534, /*bit_valid*/ 0, /*bit*/ 0); 2239 goto done; 2240 } 2241 /* 2242 printf("WUT(list=%u) flags=%x off=%ju len=%x\n", 2243 scsi_4btoul(cdb->list_identifier), 2244 data->flags, scsi_8btou64(data->offset_into_rod), 2245 scsi_2btoul(data->range_descriptor_length)); 2246 */ 2247 2248 /* Validate list of ranges */ 2249 if (tpc_check_ranges_l(&data->desc[0], 2250 scsi_2btoul(data->range_descriptor_length) / 2251 sizeof(struct scsi_range_desc), 2252 lun->be_lun->maxlba, &lba) != 0) { 2253 ctl_set_lba_out_of_range(ctsio, lba); 2254 goto done; 2255 } 2256 if (tpc_check_ranges_x(&data->desc[0], 2257 scsi_2btoul(data->range_descriptor_length) / 2258 sizeof(struct scsi_range_desc)) != 0) { 2259 ctl_set_invalid_field(ctsio, /*sks_valid*/ 0, 2260 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, 2261 /*bit*/ 0); 2262 goto done; 2263 } 2264 2265 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO); 2266 list->service_action = cdb->service_action; 2267 list->init_port = ctsio->io_hdr.nexus.targ_port; 2268 list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus); 2269 list->list_id = scsi_4btoul(cdb->list_identifier); 2270 list->flags = data->flags; 2271 list->params = ctsio->kern_data_ptr; 2272 list->range = &data->desc[0]; 2273 list->nrange = scsi_2btoul(data->range_descriptor_length) / 2274 sizeof(struct scsi_range_desc); 2275 list->offset_into_rod = scsi_8btou64(data->offset_into_rod); 2276 list->ctsio = ctsio; 2277 list->lun = lun; 2278 mtx_lock(&lun->lun_lock); 2279 tlist = tpc_find_list(lun, list->list_id, list->init_idx); 2280 if (tlist != NULL && !tlist->completed) { 2281 mtx_unlock(&lun->lun_lock); 2282 free(list, M_CTL); 2283 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 2284 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, 2285 /*bit*/ 0); 2286 goto done; 2287 } 2288 if (tlist != NULL) { 2289 TAILQ_REMOVE(&lun->tpc_lists, tlist, links); 2290 free(tlist, M_CTL); 2291 } 2292 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links); 2293 mtx_unlock(&lun->lun_lock); 2294 2295 /* Block device zero ROD token -> no token. */ 2296 if (scsi_4btoul(data->rod_token) == ROD_TYPE_BLOCK_ZERO) { 2297 tpc_process(list); 2298 return (CTL_RETVAL_COMPLETE); 2299 } 2300 2301 mtx_lock(&softc->tpc_lock); 2302 TAILQ_FOREACH(token, &softc->tpc_tokens, links) { 2303 if (memcmp(token->token, data->rod_token, 2304 sizeof(data->rod_token)) == 0) 2305 break; 2306 } 2307 if (token != NULL) { 2308 token->active++; 2309 list->token = token; 2310 if (data->flags & EC_WUT_DEL_TKN) 2311 token->timeout = 0; 2312 } 2313 mtx_unlock(&softc->tpc_lock); 2314 if (token == NULL) { 2315 mtx_lock(&lun->lun_lock); 2316 TAILQ_REMOVE(&lun->tpc_lists, list, links); 2317 mtx_unlock(&lun->lun_lock); 2318 free(list, M_CTL); 2319 ctl_set_sense(ctsio, /*current_error*/ 1, 2320 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 2321 /*asc*/ 0x23, /*ascq*/ 0x04, SSD_ELEM_NONE); 2322 goto done; 2323 } 2324 2325 tpc_process(list); 2326 return (CTL_RETVAL_COMPLETE); 2327 2328 done: 2329 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 2330 free(ctsio->kern_data_ptr, M_CTL); 2331 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 2332 } 2333 ctl_done((union ctl_io *)ctsio); 2334 return (CTL_RETVAL_COMPLETE); 2335 } 2336 2337 int 2338 ctl_receive_rod_token_information(struct ctl_scsiio *ctsio) 2339 { 2340 struct ctl_lun *lun = CTL_LUN(ctsio); 2341 struct scsi_receive_rod_token_information *cdb; 2342 struct scsi_receive_copy_status_lid4_data *data; 2343 struct tpc_list *list; 2344 struct tpc_list list_copy; 2345 uint8_t *ptr; 2346 int retval; 2347 int alloc_len, total_len, token_len; 2348 uint32_t list_id; 2349 2350 CTL_DEBUG_PRINT(("ctl_receive_rod_token_information\n")); 2351 2352 cdb = (struct scsi_receive_rod_token_information *)ctsio->cdb; 2353 retval = CTL_RETVAL_COMPLETE; 2354 2355 list_id = scsi_4btoul(cdb->list_identifier); 2356 mtx_lock(&lun->lun_lock); 2357 list = tpc_find_list(lun, list_id, 2358 ctl_get_initindex(&ctsio->io_hdr.nexus)); 2359 if (list == NULL) { 2360 mtx_unlock(&lun->lun_lock); 2361 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 2362 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, 2363 /*bit*/ 0); 2364 ctl_done((union ctl_io *)ctsio); 2365 return (retval); 2366 } 2367 list_copy = *list; 2368 if (list->completed) { 2369 TAILQ_REMOVE(&lun->tpc_lists, list, links); 2370 free(list, M_CTL); 2371 } 2372 mtx_unlock(&lun->lun_lock); 2373 2374 token_len = list_copy.res_token_valid ? 2 + sizeof(list_copy.res_token) : 0; 2375 total_len = sizeof(*data) + list_copy.sense_len + 4 + token_len; 2376 alloc_len = scsi_4btoul(cdb->length); 2377 2378 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 2379 ctsio->kern_sg_entries = 0; 2380 ctsio->kern_rel_offset = 0; 2381 ctsio->kern_data_len = min(total_len, alloc_len); 2382 ctsio->kern_total_len = ctsio->kern_data_len; 2383 2384 data = (struct scsi_receive_copy_status_lid4_data *)ctsio->kern_data_ptr; 2385 scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len + 2386 4 + token_len, data->available_data); 2387 data->response_to_service_action = list_copy.service_action; 2388 if (list_copy.completed) { 2389 if (list_copy.error) 2390 data->copy_command_status = RCS_CCS_ERROR; 2391 else if (list_copy.abort) 2392 data->copy_command_status = RCS_CCS_ABORTED; 2393 else 2394 data->copy_command_status = RCS_CCS_COMPLETED; 2395 } else 2396 data->copy_command_status = RCS_CCS_INPROG_FG; 2397 scsi_ulto2b(list_copy.curops, data->operation_counter); 2398 scsi_ulto4b(UINT32_MAX, data->estimated_status_update_delay); 2399 data->transfer_count_units = RCS_TC_LBAS; 2400 scsi_u64to8b(list_copy.cursectors, data->transfer_count); 2401 scsi_ulto2b(list_copy.curseg, data->segments_processed); 2402 data->length_of_the_sense_data_field = list_copy.sense_len; 2403 data->sense_data_length = list_copy.sense_len; 2404 memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len); 2405 2406 ptr = &data->sense_data[data->length_of_the_sense_data_field]; 2407 scsi_ulto4b(token_len, &ptr[0]); 2408 if (list_copy.res_token_valid) { 2409 scsi_ulto2b(0, &ptr[4]); 2410 memcpy(&ptr[6], list_copy.res_token, sizeof(list_copy.res_token)); 2411 } 2412 /* 2413 printf("RRTI(list=%u) valid=%d\n", 2414 scsi_4btoul(cdb->list_identifier), list_copy.res_token_valid); 2415 */ 2416 ctl_set_success(ctsio); 2417 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 2418 ctsio->be_move_done = ctl_config_move_done; 2419 ctl_datamove((union ctl_io *)ctsio); 2420 return (retval); 2421 } 2422 2423 int 2424 ctl_report_all_rod_tokens(struct ctl_scsiio *ctsio) 2425 { 2426 struct ctl_softc *softc = CTL_SOFTC(ctsio); 2427 struct scsi_report_all_rod_tokens *cdb; 2428 struct scsi_report_all_rod_tokens_data *data; 2429 struct tpc_token *token; 2430 int retval; 2431 int alloc_len, total_len, tokens, i; 2432 2433 CTL_DEBUG_PRINT(("ctl_receive_rod_token_information\n")); 2434 2435 cdb = (struct scsi_report_all_rod_tokens *)ctsio->cdb; 2436 retval = CTL_RETVAL_COMPLETE; 2437 2438 tokens = 0; 2439 mtx_lock(&softc->tpc_lock); 2440 TAILQ_FOREACH(token, &softc->tpc_tokens, links) 2441 tokens++; 2442 mtx_unlock(&softc->tpc_lock); 2443 if (tokens > 512) 2444 tokens = 512; 2445 2446 total_len = sizeof(*data) + tokens * 96; 2447 alloc_len = scsi_4btoul(cdb->length); 2448 2449 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 2450 ctsio->kern_sg_entries = 0; 2451 ctsio->kern_rel_offset = 0; 2452 ctsio->kern_data_len = min(total_len, alloc_len); 2453 ctsio->kern_total_len = ctsio->kern_data_len; 2454 2455 data = (struct scsi_report_all_rod_tokens_data *)ctsio->kern_data_ptr; 2456 i = 0; 2457 mtx_lock(&softc->tpc_lock); 2458 TAILQ_FOREACH(token, &softc->tpc_tokens, links) { 2459 if (i >= tokens) 2460 break; 2461 memcpy(&data->rod_management_token_list[i * 96], 2462 token->token, 96); 2463 i++; 2464 } 2465 mtx_unlock(&softc->tpc_lock); 2466 scsi_ulto4b(sizeof(*data) - 4 + i * 96, data->available_data); 2467 /* 2468 printf("RART tokens=%d\n", i); 2469 */ 2470 ctl_set_success(ctsio); 2471 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 2472 ctsio->be_move_done = ctl_config_move_done; 2473 ctl_datamove((union ctl_io *)ctsio); 2474 return (retval); 2475 } 2476