1 /*- 2 * Copyright (c) 2014 Alexander Motin <mav@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/types.h> 34 #include <sys/lock.h> 35 #include <sys/module.h> 36 #include <sys/mutex.h> 37 #include <sys/condvar.h> 38 #include <sys/malloc.h> 39 #include <sys/conf.h> 40 #include <sys/queue.h> 41 #include <sys/sysctl.h> 42 #include <machine/atomic.h> 43 44 #include <cam/cam.h> 45 #include <cam/scsi/scsi_all.h> 46 #include <cam/scsi/scsi_da.h> 47 #include <cam/ctl/ctl_io.h> 48 #include <cam/ctl/ctl.h> 49 #include <cam/ctl/ctl_frontend.h> 50 #include <cam/ctl/ctl_frontend_internal.h> 51 #include <cam/ctl/ctl_util.h> 52 #include <cam/ctl/ctl_backend.h> 53 #include <cam/ctl/ctl_ioctl.h> 54 #include <cam/ctl/ctl_ha.h> 55 #include <cam/ctl/ctl_private.h> 56 #include <cam/ctl/ctl_debug.h> 57 #include <cam/ctl/ctl_scsi_all.h> 58 #include <cam/ctl/ctl_tpc.h> 59 #include <cam/ctl/ctl_error.h> 60 61 #define TPC_MAX_CSCDS 64 62 #define TPC_MAX_SEGS 64 63 #define TPC_MAX_SEG 0 64 #define TPC_MAX_LIST 8192 65 #define TPC_MAX_INLINE 0 66 #define TPC_MAX_LISTS 255 67 #define TPC_MAX_IO_SIZE (1024 * 1024) 68 69 MALLOC_DEFINE(M_CTL_TPC, "ctltpc", "CTL TPC"); 70 71 typedef enum { 72 TPC_ERR_RETRY = 0x000, 73 TPC_ERR_FAIL = 0x001, 74 TPC_ERR_MASK = 0x0ff, 75 TPC_ERR_NO_DECREMENT = 0x100 76 } tpc_error_action; 77 78 struct tpc_list; 79 TAILQ_HEAD(runl, tpc_io); 80 struct tpc_io { 81 union ctl_io *io; 82 uint64_t lun; 83 struct tpc_list *list; 84 struct runl run; 85 TAILQ_ENTRY(tpc_io) rlinks; 86 TAILQ_ENTRY(tpc_io) links; 87 }; 88 89 struct tpc_list { 90 uint8_t service_action; 91 int init_port; 92 uint32_t init_idx; 93 uint32_t list_id; 94 uint8_t flags; 95 uint8_t *params; 96 struct scsi_ec_cscd *cscd; 97 struct scsi_ec_segment *seg[TPC_MAX_SEGS]; 98 uint8_t *inl; 99 int ncscd; 100 int nseg; 101 int leninl; 102 int curseg; 103 off_t curbytes; 104 int curops; 105 int stage; 106 uint8_t *buf; 107 int segbytes; 108 int tbdio; 109 int error; 110 int abort; 111 int completed; 112 TAILQ_HEAD(, tpc_io) allio; 113 struct scsi_sense_data sense_data; 114 uint8_t sense_len; 115 uint8_t scsi_status; 116 struct ctl_scsiio *ctsio; 117 struct ctl_lun *lun; 118 TAILQ_ENTRY(tpc_list) links; 119 }; 120 121 void 122 ctl_tpc_init(struct ctl_lun *lun) 123 { 124 125 TAILQ_INIT(&lun->tpc_lists); 126 } 127 128 void 129 ctl_tpc_shutdown(struct ctl_lun *lun) 130 { 131 struct tpc_list *list; 132 133 while ((list = TAILQ_FIRST(&lun->tpc_lists)) != NULL) { 134 TAILQ_REMOVE(&lun->tpc_lists, list, links); 135 KASSERT(list->completed, 136 ("Not completed TPC (%p) on shutdown", list)); 137 free(list, M_CTL); 138 } 139 } 140 141 int 142 ctl_inquiry_evpd_tpc(struct ctl_scsiio *ctsio, int alloc_len) 143 { 144 struct scsi_vpd_tpc *tpc_ptr; 145 struct scsi_vpd_tpc_descriptor *d_ptr; 146 struct scsi_vpd_tpc_descriptor_sc *sc_ptr; 147 struct scsi_vpd_tpc_descriptor_sc_descr *scd_ptr; 148 struct scsi_vpd_tpc_descriptor_pd *pd_ptr; 149 struct scsi_vpd_tpc_descriptor_sd *sd_ptr; 150 struct scsi_vpd_tpc_descriptor_sdid *sdid_ptr; 151 struct scsi_vpd_tpc_descriptor_gco *gco_ptr; 152 struct ctl_lun *lun; 153 int data_len; 154 155 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 156 157 data_len = sizeof(struct scsi_vpd_tpc) + 158 roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sc) + 159 2 * sizeof(struct scsi_vpd_tpc_descriptor_sc_descr) + 7, 4) + 160 sizeof(struct scsi_vpd_tpc_descriptor_pd) + 161 roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sd) + 4, 4) + 162 roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sdid) + 2, 4) + 163 sizeof(struct scsi_vpd_tpc_descriptor_gco); 164 165 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 166 tpc_ptr = (struct scsi_vpd_tpc *)ctsio->kern_data_ptr; 167 ctsio->kern_sg_entries = 0; 168 169 if (data_len < alloc_len) { 170 ctsio->residual = alloc_len - data_len; 171 ctsio->kern_data_len = data_len; 172 ctsio->kern_total_len = data_len; 173 } else { 174 ctsio->residual = 0; 175 ctsio->kern_data_len = alloc_len; 176 ctsio->kern_total_len = alloc_len; 177 } 178 ctsio->kern_data_resid = 0; 179 ctsio->kern_rel_offset = 0; 180 ctsio->kern_sg_entries = 0; 181 182 /* 183 * The control device is always connected. The disk device, on the 184 * other hand, may not be online all the time. 185 */ 186 if (lun != NULL) 187 tpc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 188 lun->be_lun->lun_type; 189 else 190 tpc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 191 tpc_ptr->page_code = SVPD_SCSI_TPC; 192 scsi_ulto2b(data_len - 4, tpc_ptr->page_length); 193 194 /* Supported commands */ 195 d_ptr = (struct scsi_vpd_tpc_descriptor *)&tpc_ptr->descr[0]; 196 sc_ptr = (struct scsi_vpd_tpc_descriptor_sc *)d_ptr; 197 scsi_ulto2b(SVPD_TPC_SC, sc_ptr->desc_type); 198 sc_ptr->list_length = 2 * sizeof(*scd_ptr) + 7; 199 scsi_ulto2b(roundup2(1 + sc_ptr->list_length, 4), sc_ptr->desc_length); 200 scd_ptr = &sc_ptr->descr[0]; 201 scd_ptr->opcode = EXTENDED_COPY; 202 scd_ptr->sa_length = 3; 203 scd_ptr->supported_service_actions[0] = EC_EC_LID1; 204 scd_ptr->supported_service_actions[1] = EC_EC_LID4; 205 scd_ptr->supported_service_actions[2] = EC_COA; 206 scd_ptr = (struct scsi_vpd_tpc_descriptor_sc_descr *) 207 &scd_ptr->supported_service_actions[scd_ptr->sa_length]; 208 scd_ptr->opcode = RECEIVE_COPY_STATUS; 209 scd_ptr->sa_length = 4; 210 scd_ptr->supported_service_actions[0] = RCS_RCS_LID1; 211 scd_ptr->supported_service_actions[1] = RCS_RCFD; 212 scd_ptr->supported_service_actions[2] = RCS_RCS_LID4; 213 scd_ptr->supported_service_actions[3] = RCS_RCOP; 214 215 /* Parameter data. */ 216 d_ptr = (struct scsi_vpd_tpc_descriptor *) 217 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length)); 218 pd_ptr = (struct scsi_vpd_tpc_descriptor_pd *)d_ptr; 219 scsi_ulto2b(SVPD_TPC_PD, pd_ptr->desc_type); 220 scsi_ulto2b(sizeof(*pd_ptr) - 4, pd_ptr->desc_length); 221 scsi_ulto2b(TPC_MAX_CSCDS, pd_ptr->maximum_cscd_descriptor_count); 222 scsi_ulto2b(TPC_MAX_SEGS, pd_ptr->maximum_segment_descriptor_count); 223 scsi_ulto4b(TPC_MAX_LIST, pd_ptr->maximum_descriptor_list_length); 224 scsi_ulto4b(TPC_MAX_INLINE, pd_ptr->maximum_inline_data_length); 225 226 /* Supported Descriptors */ 227 d_ptr = (struct scsi_vpd_tpc_descriptor *) 228 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length)); 229 sd_ptr = (struct scsi_vpd_tpc_descriptor_sd *)d_ptr; 230 scsi_ulto2b(SVPD_TPC_SD, sd_ptr->desc_type); 231 scsi_ulto2b(roundup2(sizeof(*sd_ptr) - 4 + 4, 4), sd_ptr->desc_length); 232 sd_ptr->list_length = 4; 233 sd_ptr->supported_descriptor_codes[0] = EC_SEG_B2B; 234 sd_ptr->supported_descriptor_codes[1] = EC_SEG_VERIFY; 235 sd_ptr->supported_descriptor_codes[2] = EC_SEG_REGISTER_KEY; 236 sd_ptr->supported_descriptor_codes[3] = EC_CSCD_ID; 237 238 /* Supported CSCD Descriptor IDs */ 239 d_ptr = (struct scsi_vpd_tpc_descriptor *) 240 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length)); 241 sdid_ptr = (struct scsi_vpd_tpc_descriptor_sdid *)d_ptr; 242 scsi_ulto2b(SVPD_TPC_SDID, sdid_ptr->desc_type); 243 scsi_ulto2b(roundup2(sizeof(*sdid_ptr) - 4 + 2, 4), sdid_ptr->desc_length); 244 scsi_ulto2b(2, sdid_ptr->list_length); 245 scsi_ulto2b(0xffff, &sdid_ptr->supported_descriptor_ids[0]); 246 247 /* General Copy Operations */ 248 d_ptr = (struct scsi_vpd_tpc_descriptor *) 249 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length)); 250 gco_ptr = (struct scsi_vpd_tpc_descriptor_gco *)d_ptr; 251 scsi_ulto2b(SVPD_TPC_GCO, gco_ptr->desc_type); 252 scsi_ulto2b(sizeof(*gco_ptr) - 4, gco_ptr->desc_length); 253 scsi_ulto4b(TPC_MAX_LISTS, gco_ptr->total_concurrent_copies); 254 scsi_ulto4b(TPC_MAX_LISTS, gco_ptr->maximum_identified_concurrent_copies); 255 scsi_ulto4b(TPC_MAX_SEG, gco_ptr->maximum_segment_length); 256 gco_ptr->data_segment_granularity = 0; 257 gco_ptr->inline_data_granularity = 0; 258 259 ctsio->scsi_status = SCSI_STATUS_OK; 260 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 261 ctsio->be_move_done = ctl_config_move_done; 262 ctl_datamove((union ctl_io *)ctsio); 263 264 return (CTL_RETVAL_COMPLETE); 265 } 266 267 int 268 ctl_receive_copy_operating_parameters(struct ctl_scsiio *ctsio) 269 { 270 struct ctl_lun *lun; 271 struct scsi_receive_copy_operating_parameters *cdb; 272 struct scsi_receive_copy_operating_parameters_data *data; 273 int retval; 274 int alloc_len, total_len; 275 276 CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n")); 277 278 cdb = (struct scsi_receive_copy_operating_parameters *)ctsio->cdb; 279 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 280 281 retval = CTL_RETVAL_COMPLETE; 282 283 total_len = sizeof(*data) + 4; 284 alloc_len = scsi_4btoul(cdb->length); 285 286 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 287 288 ctsio->kern_sg_entries = 0; 289 290 if (total_len < alloc_len) { 291 ctsio->residual = alloc_len - total_len; 292 ctsio->kern_data_len = total_len; 293 ctsio->kern_total_len = total_len; 294 } else { 295 ctsio->residual = 0; 296 ctsio->kern_data_len = alloc_len; 297 ctsio->kern_total_len = alloc_len; 298 } 299 ctsio->kern_data_resid = 0; 300 ctsio->kern_rel_offset = 0; 301 302 data = (struct scsi_receive_copy_operating_parameters_data *)ctsio->kern_data_ptr; 303 scsi_ulto4b(sizeof(*data) - 4 + 4, data->length); 304 data->snlid = RCOP_SNLID; 305 scsi_ulto2b(TPC_MAX_CSCDS, data->maximum_cscd_descriptor_count); 306 scsi_ulto2b(TPC_MAX_SEGS, data->maximum_segment_descriptor_count); 307 scsi_ulto4b(TPC_MAX_LIST, data->maximum_descriptor_list_length); 308 scsi_ulto4b(TPC_MAX_SEG, data->maximum_segment_length); 309 scsi_ulto4b(TPC_MAX_INLINE, data->maximum_inline_data_length); 310 scsi_ulto4b(0, data->held_data_limit); 311 scsi_ulto4b(0, data->maximum_stream_device_transfer_size); 312 scsi_ulto2b(TPC_MAX_LISTS, data->total_concurrent_copies); 313 data->maximum_concurrent_copies = TPC_MAX_LISTS; 314 data->data_segment_granularity = 0; 315 data->inline_data_granularity = 0; 316 data->held_data_granularity = 0; 317 data->implemented_descriptor_list_length = 4; 318 data->list_of_implemented_descriptor_type_codes[0] = EC_SEG_B2B; 319 data->list_of_implemented_descriptor_type_codes[1] = EC_SEG_VERIFY; 320 data->list_of_implemented_descriptor_type_codes[2] = EC_SEG_REGISTER_KEY; 321 data->list_of_implemented_descriptor_type_codes[3] = EC_CSCD_ID; 322 323 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 324 ctsio->be_move_done = ctl_config_move_done; 325 326 ctl_datamove((union ctl_io *)ctsio); 327 return (retval); 328 } 329 330 static struct tpc_list * 331 tpc_find_list(struct ctl_lun *lun, uint32_t list_id, uint32_t init_idx) 332 { 333 struct tpc_list *list; 334 335 mtx_assert(&lun->lun_lock, MA_OWNED); 336 TAILQ_FOREACH(list, &lun->tpc_lists, links) { 337 if ((list->flags & EC_LIST_ID_USAGE_MASK) != 338 EC_LIST_ID_USAGE_NONE && list->list_id == list_id && 339 list->init_idx == init_idx) 340 break; 341 } 342 return (list); 343 } 344 345 int 346 ctl_receive_copy_status_lid1(struct ctl_scsiio *ctsio) 347 { 348 struct ctl_lun *lun; 349 struct scsi_receive_copy_status_lid1 *cdb; 350 struct scsi_receive_copy_status_lid1_data *data; 351 struct tpc_list *list; 352 struct tpc_list list_copy; 353 int retval; 354 int alloc_len, total_len; 355 uint32_t list_id; 356 357 CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid1\n")); 358 359 cdb = (struct scsi_receive_copy_status_lid1 *)ctsio->cdb; 360 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 361 362 retval = CTL_RETVAL_COMPLETE; 363 364 list_id = cdb->list_identifier; 365 mtx_lock(&lun->lun_lock); 366 list = tpc_find_list(lun, list_id, 367 ctl_get_resindex(&ctsio->io_hdr.nexus)); 368 if (list == NULL) { 369 mtx_unlock(&lun->lun_lock); 370 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 371 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, 372 /*bit*/ 0); 373 ctl_done((union ctl_io *)ctsio); 374 return (retval); 375 } 376 list_copy = *list; 377 if (list->completed) { 378 TAILQ_REMOVE(&lun->tpc_lists, list, links); 379 free(list, M_CTL); 380 } 381 mtx_unlock(&lun->lun_lock); 382 383 total_len = sizeof(*data); 384 alloc_len = scsi_4btoul(cdb->length); 385 386 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 387 388 ctsio->kern_sg_entries = 0; 389 390 if (total_len < alloc_len) { 391 ctsio->residual = alloc_len - total_len; 392 ctsio->kern_data_len = total_len; 393 ctsio->kern_total_len = total_len; 394 } else { 395 ctsio->residual = 0; 396 ctsio->kern_data_len = alloc_len; 397 ctsio->kern_total_len = alloc_len; 398 } 399 ctsio->kern_data_resid = 0; 400 ctsio->kern_rel_offset = 0; 401 402 data = (struct scsi_receive_copy_status_lid1_data *)ctsio->kern_data_ptr; 403 scsi_ulto4b(sizeof(*data) - 4, data->available_data); 404 if (list_copy.completed) { 405 if (list_copy.error || list_copy.abort) 406 data->copy_command_status = RCS_CCS_ERROR; 407 else 408 data->copy_command_status = RCS_CCS_COMPLETED; 409 } else 410 data->copy_command_status = RCS_CCS_INPROG; 411 scsi_ulto2b(list_copy.curseg, data->segments_processed); 412 if (list_copy.curbytes <= UINT32_MAX) { 413 data->transfer_count_units = RCS_TC_BYTES; 414 scsi_ulto4b(list_copy.curbytes, data->transfer_count); 415 } else { 416 data->transfer_count_units = RCS_TC_MBYTES; 417 scsi_ulto4b(list_copy.curbytes >> 20, data->transfer_count); 418 } 419 420 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 421 ctsio->be_move_done = ctl_config_move_done; 422 423 ctl_datamove((union ctl_io *)ctsio); 424 return (retval); 425 } 426 427 int 428 ctl_receive_copy_failure_details(struct ctl_scsiio *ctsio) 429 { 430 struct ctl_lun *lun; 431 struct scsi_receive_copy_failure_details *cdb; 432 struct scsi_receive_copy_failure_details_data *data; 433 struct tpc_list *list; 434 struct tpc_list list_copy; 435 int retval; 436 int alloc_len, total_len; 437 uint32_t list_id; 438 439 CTL_DEBUG_PRINT(("ctl_receive_copy_failure_details\n")); 440 441 cdb = (struct scsi_receive_copy_failure_details *)ctsio->cdb; 442 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 443 444 retval = CTL_RETVAL_COMPLETE; 445 446 list_id = cdb->list_identifier; 447 mtx_lock(&lun->lun_lock); 448 list = tpc_find_list(lun, list_id, 449 ctl_get_resindex(&ctsio->io_hdr.nexus)); 450 if (list == NULL || !list->completed) { 451 mtx_unlock(&lun->lun_lock); 452 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 453 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, 454 /*bit*/ 0); 455 ctl_done((union ctl_io *)ctsio); 456 return (retval); 457 } 458 list_copy = *list; 459 TAILQ_REMOVE(&lun->tpc_lists, list, links); 460 free(list, M_CTL); 461 mtx_unlock(&lun->lun_lock); 462 463 total_len = sizeof(*data) + list_copy.sense_len; 464 alloc_len = scsi_4btoul(cdb->length); 465 466 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 467 468 ctsio->kern_sg_entries = 0; 469 470 if (total_len < alloc_len) { 471 ctsio->residual = alloc_len - total_len; 472 ctsio->kern_data_len = total_len; 473 ctsio->kern_total_len = total_len; 474 } else { 475 ctsio->residual = 0; 476 ctsio->kern_data_len = alloc_len; 477 ctsio->kern_total_len = alloc_len; 478 } 479 ctsio->kern_data_resid = 0; 480 ctsio->kern_rel_offset = 0; 481 482 data = (struct scsi_receive_copy_failure_details_data *)ctsio->kern_data_ptr; 483 if (list_copy.completed && (list_copy.error || list_copy.abort)) { 484 scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len, 485 data->available_data); 486 data->copy_command_status = RCS_CCS_ERROR; 487 } else 488 scsi_ulto4b(0, data->available_data); 489 scsi_ulto2b(list_copy.sense_len, data->sense_data_length); 490 memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len); 491 492 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 493 ctsio->be_move_done = ctl_config_move_done; 494 495 ctl_datamove((union ctl_io *)ctsio); 496 return (retval); 497 } 498 499 int 500 ctl_receive_copy_status_lid4(struct ctl_scsiio *ctsio) 501 { 502 struct ctl_lun *lun; 503 struct scsi_receive_copy_status_lid4 *cdb; 504 struct scsi_receive_copy_status_lid4_data *data; 505 struct tpc_list *list; 506 struct tpc_list list_copy; 507 int retval; 508 int alloc_len, total_len; 509 uint32_t list_id; 510 511 CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid4\n")); 512 513 cdb = (struct scsi_receive_copy_status_lid4 *)ctsio->cdb; 514 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 515 516 retval = CTL_RETVAL_COMPLETE; 517 518 list_id = scsi_4btoul(cdb->list_identifier); 519 mtx_lock(&lun->lun_lock); 520 list = tpc_find_list(lun, list_id, 521 ctl_get_resindex(&ctsio->io_hdr.nexus)); 522 if (list == NULL) { 523 mtx_unlock(&lun->lun_lock); 524 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 525 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, 526 /*bit*/ 0); 527 ctl_done((union ctl_io *)ctsio); 528 return (retval); 529 } 530 list_copy = *list; 531 if (list->completed) { 532 TAILQ_REMOVE(&lun->tpc_lists, list, links); 533 free(list, M_CTL); 534 } 535 mtx_unlock(&lun->lun_lock); 536 537 total_len = sizeof(*data) + list_copy.sense_len; 538 alloc_len = scsi_4btoul(cdb->length); 539 540 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 541 542 ctsio->kern_sg_entries = 0; 543 544 if (total_len < alloc_len) { 545 ctsio->residual = alloc_len - total_len; 546 ctsio->kern_data_len = total_len; 547 ctsio->kern_total_len = total_len; 548 } else { 549 ctsio->residual = 0; 550 ctsio->kern_data_len = alloc_len; 551 ctsio->kern_total_len = alloc_len; 552 } 553 ctsio->kern_data_resid = 0; 554 ctsio->kern_rel_offset = 0; 555 556 data = (struct scsi_receive_copy_status_lid4_data *)ctsio->kern_data_ptr; 557 scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len, 558 data->available_data); 559 data->response_to_service_action = list_copy.service_action; 560 if (list_copy.completed) { 561 if (list_copy.error) 562 data->copy_command_status = RCS_CCS_ERROR; 563 else if (list_copy.abort) 564 data->copy_command_status = RCS_CCS_ABORTED; 565 else 566 data->copy_command_status = RCS_CCS_COMPLETED; 567 } else 568 data->copy_command_status = RCS_CCS_INPROG_FG; 569 scsi_ulto2b(list_copy.curops, data->operation_counter); 570 scsi_ulto4b(UINT32_MAX, data->estimated_status_update_delay); 571 data->transfer_count_units = RCS_TC_BYTES; 572 scsi_u64to8b(list_copy.curbytes, data->transfer_count); 573 scsi_ulto2b(list_copy.curseg, data->segments_processed); 574 data->length_of_the_sense_data_field = list_copy.sense_len; 575 data->sense_data_length = list_copy.sense_len; 576 memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len); 577 578 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 579 ctsio->be_move_done = ctl_config_move_done; 580 581 ctl_datamove((union ctl_io *)ctsio); 582 return (retval); 583 } 584 585 int 586 ctl_copy_operation_abort(struct ctl_scsiio *ctsio) 587 { 588 struct ctl_lun *lun; 589 struct scsi_copy_operation_abort *cdb; 590 struct tpc_list *list; 591 int retval; 592 uint32_t list_id; 593 594 CTL_DEBUG_PRINT(("ctl_copy_operation_abort\n")); 595 596 cdb = (struct scsi_copy_operation_abort *)ctsio->cdb; 597 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 598 599 retval = CTL_RETVAL_COMPLETE; 600 601 list_id = scsi_4btoul(cdb->list_identifier); 602 mtx_lock(&lun->lun_lock); 603 list = tpc_find_list(lun, list_id, 604 ctl_get_resindex(&ctsio->io_hdr.nexus)); 605 if (list == NULL) { 606 mtx_unlock(&lun->lun_lock); 607 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 608 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, 609 /*bit*/ 0); 610 ctl_done((union ctl_io *)ctsio); 611 return (retval); 612 } 613 list->abort = 1; 614 mtx_unlock(&lun->lun_lock); 615 616 ctl_set_success(ctsio); 617 ctl_done((union ctl_io *)ctsio); 618 return (retval); 619 } 620 621 static uint64_t 622 tpc_resolve(struct tpc_list *list, uint16_t idx, uint32_t *ss) 623 { 624 625 if (idx == 0xffff) { 626 if (ss && list->lun->be_lun) 627 *ss = list->lun->be_lun->blocksize; 628 return (list->lun->lun); 629 } 630 if (idx >= list->ncscd) 631 return (UINT64_MAX); 632 return (tpcl_resolve(list->init_port, &list->cscd[idx], ss)); 633 } 634 635 static int 636 tpc_process_b2b(struct tpc_list *list) 637 { 638 struct scsi_ec_segment_b2b *seg; 639 struct scsi_ec_cscd_dtsp *sdstp, *ddstp; 640 struct tpc_io *tior, *tiow; 641 struct runl run, *prun; 642 uint64_t sl, dl; 643 off_t srclba, dstlba, numbytes, donebytes, roundbytes; 644 int numlba; 645 uint32_t srcblock, dstblock; 646 647 if (list->stage == 1) { 648 complete: 649 while ((tior = TAILQ_FIRST(&list->allio)) != NULL) { 650 TAILQ_REMOVE(&list->allio, tior, links); 651 ctl_free_io(tior->io); 652 free(tior, M_CTL); 653 } 654 free(list->buf, M_CTL); 655 if (list->abort) { 656 ctl_set_task_aborted(list->ctsio); 657 return (CTL_RETVAL_ERROR); 658 } else if (list->error) { 659 ctl_set_sense(list->ctsio, /*current_error*/ 1, 660 /*sense_key*/ SSD_KEY_COPY_ABORTED, 661 /*asc*/ 0x0d, /*ascq*/ 0x01, SSD_ELEM_NONE); 662 return (CTL_RETVAL_ERROR); 663 } else { 664 list->curbytes += list->segbytes; 665 return (CTL_RETVAL_COMPLETE); 666 } 667 } 668 669 TAILQ_INIT(&list->allio); 670 seg = (struct scsi_ec_segment_b2b *)list->seg[list->curseg]; 671 sl = tpc_resolve(list, scsi_2btoul(seg->src_cscd), &srcblock); 672 dl = tpc_resolve(list, scsi_2btoul(seg->dst_cscd), &dstblock); 673 if (sl >= CTL_MAX_LUNS || dl >= CTL_MAX_LUNS) { 674 ctl_set_sense(list->ctsio, /*current_error*/ 1, 675 /*sense_key*/ SSD_KEY_COPY_ABORTED, 676 /*asc*/ 0x08, /*ascq*/ 0x04, SSD_ELEM_NONE); 677 return (CTL_RETVAL_ERROR); 678 } 679 sdstp = &list->cscd[scsi_2btoul(seg->src_cscd)].dtsp; 680 if (scsi_3btoul(sdstp->block_length) != 0) 681 srcblock = scsi_3btoul(sdstp->block_length); 682 ddstp = &list->cscd[scsi_2btoul(seg->dst_cscd)].dtsp; 683 if (scsi_3btoul(ddstp->block_length) != 0) 684 dstblock = scsi_3btoul(ddstp->block_length); 685 numlba = scsi_2btoul(seg->number_of_blocks); 686 if (seg->flags & EC_SEG_DC) 687 numbytes = (off_t)numlba * dstblock; 688 else 689 numbytes = (off_t)numlba * srcblock; 690 srclba = scsi_8btou64(seg->src_lba); 691 dstlba = scsi_8btou64(seg->dst_lba); 692 693 // printf("Copy %ju bytes from %ju @ %ju to %ju @ %ju\n", 694 // (uintmax_t)numbytes, sl, scsi_8btou64(seg->src_lba), 695 // dl, scsi_8btou64(seg->dst_lba)); 696 697 if (numbytes == 0) 698 return (CTL_RETVAL_COMPLETE); 699 700 if (numbytes % srcblock != 0 || numbytes % dstblock != 0) { 701 ctl_set_sense(list->ctsio, /*current_error*/ 1, 702 /*sense_key*/ SSD_KEY_COPY_ABORTED, 703 /*asc*/ 0x26, /*ascq*/ 0x0A, SSD_ELEM_NONE); 704 return (CTL_RETVAL_ERROR); 705 } 706 707 list->buf = malloc(numbytes, M_CTL, M_WAITOK); 708 list->segbytes = numbytes; 709 donebytes = 0; 710 TAILQ_INIT(&run); 711 prun = &run; 712 list->tbdio = 1; 713 while (donebytes < numbytes) { 714 roundbytes = MIN(numbytes - donebytes, TPC_MAX_IO_SIZE); 715 716 tior = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO); 717 TAILQ_INIT(&tior->run); 718 tior->list = list; 719 TAILQ_INSERT_TAIL(&list->allio, tior, links); 720 tior->io = tpcl_alloc_io(); 721 if (tior->io == NULL) { 722 list->error = 1; 723 goto complete; 724 } 725 ctl_scsi_read_write(tior->io, 726 /*data_ptr*/ &list->buf[donebytes], 727 /*data_len*/ roundbytes, 728 /*read_op*/ 1, 729 /*byte2*/ 0, 730 /*minimum_cdb_size*/ 0, 731 /*lba*/ srclba + donebytes / srcblock, 732 /*num_blocks*/ roundbytes / srcblock, 733 /*tag_type*/ CTL_TAG_SIMPLE, 734 /*control*/ 0); 735 tior->io->io_hdr.retries = 3; 736 tior->lun = sl; 737 tior->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tior; 738 739 tiow = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO); 740 TAILQ_INIT(&tiow->run); 741 tiow->list = list; 742 TAILQ_INSERT_TAIL(&list->allio, tiow, links); 743 tiow->io = tpcl_alloc_io(); 744 if (tiow->io == NULL) { 745 list->error = 1; 746 goto complete; 747 } 748 ctl_scsi_read_write(tiow->io, 749 /*data_ptr*/ &list->buf[donebytes], 750 /*data_len*/ roundbytes, 751 /*read_op*/ 0, 752 /*byte2*/ 0, 753 /*minimum_cdb_size*/ 0, 754 /*lba*/ dstlba + donebytes / dstblock, 755 /*num_blocks*/ roundbytes / dstblock, 756 /*tag_type*/ CTL_TAG_SIMPLE, 757 /*control*/ 0); 758 tiow->io->io_hdr.retries = 3; 759 tiow->lun = dl; 760 tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tior; 761 762 TAILQ_INSERT_TAIL(&tior->run, tiow, rlinks); 763 TAILQ_INSERT_TAIL(prun, tior, rlinks); 764 prun = &tior->run; 765 donebytes += roundbytes; 766 } 767 768 while ((tior = TAILQ_FIRST(&run)) != NULL) { 769 TAILQ_REMOVE(&run, tior, rlinks); 770 if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE) 771 panic("tpcl_queue() error"); 772 } 773 774 list->stage++; 775 return (CTL_RETVAL_QUEUED); 776 } 777 778 static int 779 tpc_process_verify(struct tpc_list *list) 780 { 781 struct scsi_ec_segment_verify *seg; 782 struct tpc_io *tio; 783 uint64_t sl; 784 785 if (list->stage == 1) { 786 complete: 787 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) { 788 TAILQ_REMOVE(&list->allio, tio, links); 789 ctl_free_io(tio->io); 790 free(tio, M_CTL); 791 } 792 if (list->abort) { 793 ctl_set_task_aborted(list->ctsio); 794 return (CTL_RETVAL_ERROR); 795 } else if (list->error) { 796 ctl_set_sense(list->ctsio, /*current_error*/ 1, 797 /*sense_key*/ SSD_KEY_COPY_ABORTED, 798 /*asc*/ 0x0d, /*ascq*/ 0x01, SSD_ELEM_NONE); 799 return (CTL_RETVAL_ERROR); 800 } else 801 return (CTL_RETVAL_COMPLETE); 802 } 803 804 TAILQ_INIT(&list->allio); 805 seg = (struct scsi_ec_segment_verify *)list->seg[list->curseg]; 806 sl = tpc_resolve(list, scsi_2btoul(seg->src_cscd), NULL); 807 if (sl >= CTL_MAX_LUNS) { 808 ctl_set_sense(list->ctsio, /*current_error*/ 1, 809 /*sense_key*/ SSD_KEY_COPY_ABORTED, 810 /*asc*/ 0x08, /*ascq*/ 0x04, SSD_ELEM_NONE); 811 return (CTL_RETVAL_ERROR); 812 } 813 814 // printf("Verify %ju\n", sl); 815 816 if ((seg->tur & 0x01) == 0) 817 return (CTL_RETVAL_COMPLETE); 818 819 list->tbdio = 1; 820 tio = malloc(sizeof(*tio), M_CTL, M_WAITOK | M_ZERO); 821 TAILQ_INIT(&tio->run); 822 tio->list = list; 823 TAILQ_INSERT_TAIL(&list->allio, tio, links); 824 tio->io = tpcl_alloc_io(); 825 if (tio->io == NULL) { 826 list->error = 1; 827 goto complete; 828 } 829 ctl_scsi_tur(tio->io, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0); 830 tio->io->io_hdr.retries = 3; 831 tio->lun = sl; 832 tio->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tio; 833 list->stage++; 834 if (tpcl_queue(tio->io, tio->lun) != CTL_RETVAL_COMPLETE) 835 panic("tpcl_queue() error"); 836 return (CTL_RETVAL_QUEUED); 837 } 838 839 static int 840 tpc_process_register_key(struct tpc_list *list) 841 { 842 struct scsi_ec_segment_register_key *seg; 843 struct tpc_io *tio; 844 uint64_t dl; 845 int datalen; 846 847 if (list->stage == 1) { 848 complete: 849 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) { 850 TAILQ_REMOVE(&list->allio, tio, links); 851 ctl_free_io(tio->io); 852 free(tio, M_CTL); 853 } 854 free(list->buf, M_CTL); 855 if (list->abort) { 856 ctl_set_task_aborted(list->ctsio); 857 return (CTL_RETVAL_ERROR); 858 } else if (list->error) { 859 ctl_set_sense(list->ctsio, /*current_error*/ 1, 860 /*sense_key*/ SSD_KEY_COPY_ABORTED, 861 /*asc*/ 0x0d, /*ascq*/ 0x01, SSD_ELEM_NONE); 862 return (CTL_RETVAL_ERROR); 863 } else 864 return (CTL_RETVAL_COMPLETE); 865 } 866 867 TAILQ_INIT(&list->allio); 868 seg = (struct scsi_ec_segment_register_key *)list->seg[list->curseg]; 869 dl = tpc_resolve(list, scsi_2btoul(seg->dst_cscd), NULL); 870 if (dl >= CTL_MAX_LUNS) { 871 ctl_set_sense(list->ctsio, /*current_error*/ 1, 872 /*sense_key*/ SSD_KEY_COPY_ABORTED, 873 /*asc*/ 0x08, /*ascq*/ 0x04, SSD_ELEM_NONE); 874 return (CTL_RETVAL_ERROR); 875 } 876 877 // printf("Register Key %ju\n", dl); 878 879 list->tbdio = 1; 880 tio = malloc(sizeof(*tio), M_CTL, M_WAITOK | M_ZERO); 881 TAILQ_INIT(&tio->run); 882 tio->list = list; 883 TAILQ_INSERT_TAIL(&list->allio, tio, links); 884 tio->io = tpcl_alloc_io(); 885 if (tio->io == NULL) { 886 list->error = 1; 887 goto complete; 888 } 889 datalen = sizeof(struct scsi_per_res_out_parms); 890 list->buf = malloc(datalen, M_CTL, M_WAITOK); 891 ctl_scsi_persistent_res_out(tio->io, 892 list->buf, datalen, SPRO_REGISTER, -1, 893 scsi_8btou64(seg->res_key), scsi_8btou64(seg->sa_res_key), 894 /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0); 895 tio->io->io_hdr.retries = 3; 896 tio->lun = dl; 897 tio->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tio; 898 list->stage++; 899 if (tpcl_queue(tio->io, tio->lun) != CTL_RETVAL_COMPLETE) 900 panic("tpcl_queue() error"); 901 return (CTL_RETVAL_QUEUED); 902 } 903 904 static void 905 tpc_process(struct tpc_list *list) 906 { 907 struct ctl_lun *lun = list->lun; 908 struct scsi_ec_segment *seg; 909 struct ctl_scsiio *ctsio = list->ctsio; 910 int retval = CTL_RETVAL_COMPLETE; 911 912 //printf("ZZZ %d cscd, %d segs\n", list->ncscd, list->nseg); 913 while (list->curseg < list->nseg) { 914 seg = list->seg[list->curseg]; 915 switch (seg->type_code) { 916 case EC_SEG_B2B: 917 retval = tpc_process_b2b(list); 918 break; 919 case EC_SEG_VERIFY: 920 retval = tpc_process_verify(list); 921 break; 922 case EC_SEG_REGISTER_KEY: 923 retval = tpc_process_register_key(list); 924 break; 925 default: 926 ctl_set_sense(ctsio, /*current_error*/ 1, 927 /*sense_key*/ SSD_KEY_COPY_ABORTED, 928 /*asc*/ 0x26, /*ascq*/ 0x09, SSD_ELEM_NONE); 929 goto done; 930 } 931 if (retval == CTL_RETVAL_QUEUED) 932 return; 933 if (retval == CTL_RETVAL_ERROR) { 934 list->error = 1; 935 goto done; 936 } 937 list->curseg++; 938 list->stage = 0; 939 } 940 941 ctl_set_success(ctsio); 942 943 done: 944 //printf("ZZZ done\n"); 945 free(list->params, M_CTL); 946 list->params = NULL; 947 mtx_lock(&lun->lun_lock); 948 if ((list->flags & EC_LIST_ID_USAGE_MASK) == EC_LIST_ID_USAGE_NONE) { 949 TAILQ_REMOVE(&lun->tpc_lists, list, links); 950 free(list, M_CTL); 951 } else { 952 list->completed = 1; 953 list->sense_data = ctsio->sense_data; 954 list->sense_len = ctsio->sense_len; 955 list->scsi_status = ctsio->scsi_status; 956 } 957 mtx_unlock(&lun->lun_lock); 958 959 ctl_done((union ctl_io *)ctsio); 960 } 961 962 /* 963 * For any sort of check condition, busy, etc., we just retry. We do not 964 * decrement the retry count for unit attention type errors. These are 965 * normal, and we want to save the retry count for "real" errors. Otherwise, 966 * we could end up with situations where a command will succeed in some 967 * situations and fail in others, depending on whether a unit attention is 968 * pending. Also, some of our error recovery actions, most notably the 969 * LUN reset action, will cause a unit attention. 970 * 971 * We can add more detail here later if necessary. 972 */ 973 static tpc_error_action 974 tpc_checkcond_parse(union ctl_io *io) 975 { 976 tpc_error_action error_action; 977 int error_code, sense_key, asc, ascq; 978 979 /* 980 * Default to retrying the command. 981 */ 982 error_action = TPC_ERR_RETRY; 983 984 scsi_extract_sense_len(&io->scsiio.sense_data, 985 io->scsiio.sense_len, 986 &error_code, 987 &sense_key, 988 &asc, 989 &ascq, 990 /*show_errors*/ 1); 991 992 switch (error_code) { 993 case SSD_DEFERRED_ERROR: 994 case SSD_DESC_DEFERRED_ERROR: 995 error_action |= TPC_ERR_NO_DECREMENT; 996 break; 997 case SSD_CURRENT_ERROR: 998 case SSD_DESC_CURRENT_ERROR: 999 default: 1000 switch (sense_key) { 1001 case SSD_KEY_UNIT_ATTENTION: 1002 error_action |= TPC_ERR_NO_DECREMENT; 1003 break; 1004 case SSD_KEY_HARDWARE_ERROR: 1005 /* 1006 * This is our generic "something bad happened" 1007 * error code. It often isn't recoverable. 1008 */ 1009 if ((asc == 0x44) && (ascq == 0x00)) 1010 error_action = TPC_ERR_FAIL; 1011 break; 1012 case SSD_KEY_NOT_READY: 1013 /* 1014 * If the LUN is powered down, there likely isn't 1015 * much point in retrying right now. 1016 */ 1017 if ((asc == 0x04) && (ascq == 0x02)) 1018 error_action = TPC_ERR_FAIL; 1019 /* 1020 * If the LUN is offline, there probably isn't much 1021 * point in retrying, either. 1022 */ 1023 if ((asc == 0x04) && (ascq == 0x03)) 1024 error_action = TPC_ERR_FAIL; 1025 break; 1026 } 1027 } 1028 return (error_action); 1029 } 1030 1031 static tpc_error_action 1032 tpc_error_parse(union ctl_io *io) 1033 { 1034 tpc_error_action error_action = TPC_ERR_RETRY; 1035 1036 switch (io->io_hdr.io_type) { 1037 case CTL_IO_SCSI: 1038 switch (io->io_hdr.status & CTL_STATUS_MASK) { 1039 case CTL_SCSI_ERROR: 1040 switch (io->scsiio.scsi_status) { 1041 case SCSI_STATUS_CHECK_COND: 1042 error_action = tpc_checkcond_parse(io); 1043 break; 1044 default: 1045 break; 1046 } 1047 break; 1048 default: 1049 break; 1050 } 1051 break; 1052 case CTL_IO_TASK: 1053 break; 1054 default: 1055 panic("%s: invalid ctl_io type %d\n", __func__, 1056 io->io_hdr.io_type); 1057 break; 1058 } 1059 return (error_action); 1060 } 1061 1062 void 1063 tpc_done(union ctl_io *io) 1064 { 1065 struct tpc_io *tio, *tior; 1066 1067 /* 1068 * Very minimal retry logic. We basically retry if we got an error 1069 * back, and the retry count is greater than 0. If we ever want 1070 * more sophisticated initiator type behavior, the CAM error 1071 * recovery code in ../common might be helpful. 1072 */ 1073 // if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) 1074 // ctl_io_error_print(io, NULL); 1075 tio = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; 1076 if (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) 1077 && (io->io_hdr.retries > 0)) { 1078 ctl_io_status old_status; 1079 tpc_error_action error_action; 1080 1081 error_action = tpc_error_parse(io); 1082 switch (error_action & TPC_ERR_MASK) { 1083 case TPC_ERR_FAIL: 1084 break; 1085 case TPC_ERR_RETRY: 1086 default: 1087 if ((error_action & TPC_ERR_NO_DECREMENT) == 0) 1088 io->io_hdr.retries--; 1089 old_status = io->io_hdr.status; 1090 io->io_hdr.status = CTL_STATUS_NONE; 1091 io->io_hdr.flags &= ~CTL_FLAG_ABORT; 1092 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 1093 if (tpcl_queue(io, tio->lun) != CTL_RETVAL_COMPLETE) { 1094 printf("%s: error returned from ctl_queue()!\n", 1095 __func__); 1096 io->io_hdr.status = old_status; 1097 } else 1098 return; 1099 } 1100 } 1101 1102 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) 1103 tio->list->error = 1; 1104 else 1105 atomic_add_int(&tio->list->curops, 1); 1106 if (!tio->list->error && !tio->list->abort) { 1107 while ((tior = TAILQ_FIRST(&tio->run)) != NULL) { 1108 TAILQ_REMOVE(&tio->run, tior, rlinks); 1109 atomic_add_int(&tio->list->tbdio, 1); 1110 if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE) 1111 panic("tpcl_queue() error"); 1112 } 1113 } 1114 if (atomic_fetchadd_int(&tio->list->tbdio, -1) == 1) 1115 tpc_process(tio->list); 1116 } 1117 1118 int 1119 ctl_extended_copy_lid1(struct ctl_scsiio *ctsio) 1120 { 1121 struct scsi_extended_copy *cdb; 1122 struct scsi_extended_copy_lid1_data *data; 1123 struct ctl_lun *lun; 1124 struct tpc_list *list, *tlist; 1125 uint8_t *ptr; 1126 char *value; 1127 int len, off, lencscd, lenseg, leninl, nseg; 1128 1129 CTL_DEBUG_PRINT(("ctl_extended_copy_lid1\n")); 1130 1131 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 1132 cdb = (struct scsi_extended_copy *)ctsio->cdb; 1133 len = scsi_4btoul(cdb->length); 1134 1135 if (len < sizeof(struct scsi_extended_copy_lid1_data) || 1136 len > sizeof(struct scsi_extended_copy_lid1_data) + 1137 TPC_MAX_LIST + TPC_MAX_INLINE) { 1138 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, 1139 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0); 1140 goto done; 1141 } 1142 1143 /* 1144 * If we've got a kernel request that hasn't been malloced yet, 1145 * malloc it and tell the caller the data buffer is here. 1146 */ 1147 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 1148 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 1149 ctsio->kern_data_len = len; 1150 ctsio->kern_total_len = len; 1151 ctsio->kern_data_resid = 0; 1152 ctsio->kern_rel_offset = 0; 1153 ctsio->kern_sg_entries = 0; 1154 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 1155 ctsio->be_move_done = ctl_config_move_done; 1156 ctl_datamove((union ctl_io *)ctsio); 1157 1158 return (CTL_RETVAL_COMPLETE); 1159 } 1160 1161 data = (struct scsi_extended_copy_lid1_data *)ctsio->kern_data_ptr; 1162 lencscd = scsi_2btoul(data->cscd_list_length); 1163 lenseg = scsi_4btoul(data->segment_list_length); 1164 leninl = scsi_4btoul(data->inline_data_length); 1165 if (len < sizeof(struct scsi_extended_copy_lid1_data) + 1166 lencscd + lenseg + leninl || 1167 leninl > TPC_MAX_INLINE) { 1168 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, 1169 /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); 1170 goto done; 1171 } 1172 if (lencscd > TPC_MAX_CSCDS * sizeof(struct scsi_ec_cscd)) { 1173 ctl_set_sense(ctsio, /*current_error*/ 1, 1174 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1175 /*asc*/ 0x26, /*ascq*/ 0x06, SSD_ELEM_NONE); 1176 goto done; 1177 } 1178 if (lencscd + lenseg > TPC_MAX_LIST) { 1179 ctl_set_param_len_error(ctsio); 1180 goto done; 1181 } 1182 1183 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO); 1184 list->service_action = cdb->service_action; 1185 value = ctl_get_opt(&lun->be_lun->options, "insecure_tpc"); 1186 if (value != NULL && strcmp(value, "on") == 0) 1187 list->init_port = -1; 1188 else 1189 list->init_port = ctsio->io_hdr.nexus.targ_port; 1190 list->init_idx = ctl_get_resindex(&ctsio->io_hdr.nexus); 1191 list->list_id = data->list_identifier; 1192 list->flags = data->flags; 1193 list->params = ctsio->kern_data_ptr; 1194 list->cscd = (struct scsi_ec_cscd *)&data->data[0]; 1195 ptr = &data->data[lencscd]; 1196 for (nseg = 0, off = 0; off < lenseg; nseg++) { 1197 if (nseg >= TPC_MAX_SEGS) { 1198 free(list, M_CTL); 1199 ctl_set_sense(ctsio, /*current_error*/ 1, 1200 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1201 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE); 1202 goto done; 1203 } 1204 list->seg[nseg] = (struct scsi_ec_segment *)(ptr + off); 1205 off += sizeof(struct scsi_ec_segment) + 1206 scsi_2btoul(list->seg[nseg]->descr_length); 1207 } 1208 list->inl = &data->data[lencscd + lenseg]; 1209 list->ncscd = lencscd / sizeof(struct scsi_ec_cscd); 1210 list->nseg = nseg; 1211 list->leninl = leninl; 1212 list->ctsio = ctsio; 1213 list->lun = lun; 1214 mtx_lock(&lun->lun_lock); 1215 if ((list->flags & EC_LIST_ID_USAGE_MASK) != EC_LIST_ID_USAGE_NONE) { 1216 tlist = tpc_find_list(lun, list->list_id, list->init_idx); 1217 if (tlist != NULL && !tlist->completed) { 1218 mtx_unlock(&lun->lun_lock); 1219 free(list, M_CTL); 1220 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 1221 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, 1222 /*bit*/ 0); 1223 goto done; 1224 } 1225 if (tlist != NULL) { 1226 TAILQ_REMOVE(&lun->tpc_lists, tlist, links); 1227 free(tlist, M_CTL); 1228 } 1229 } 1230 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links); 1231 mtx_unlock(&lun->lun_lock); 1232 1233 tpc_process(list); 1234 return (CTL_RETVAL_COMPLETE); 1235 1236 done: 1237 ctl_done((union ctl_io *)ctsio); 1238 return (CTL_RETVAL_COMPLETE); 1239 } 1240 1241 int 1242 ctl_extended_copy_lid4(struct ctl_scsiio *ctsio) 1243 { 1244 struct scsi_extended_copy *cdb; 1245 struct scsi_extended_copy_lid4_data *data; 1246 struct ctl_lun *lun; 1247 struct tpc_list *list, *tlist; 1248 uint8_t *ptr; 1249 char *value; 1250 int len, off, lencscd, lenseg, leninl, nseg; 1251 1252 CTL_DEBUG_PRINT(("ctl_extended_copy_lid4\n")); 1253 1254 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 1255 cdb = (struct scsi_extended_copy *)ctsio->cdb; 1256 len = scsi_4btoul(cdb->length); 1257 1258 if (len < sizeof(struct scsi_extended_copy_lid4_data) || 1259 len > sizeof(struct scsi_extended_copy_lid4_data) + 1260 TPC_MAX_LIST + TPC_MAX_INLINE) { 1261 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, 1262 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0); 1263 goto done; 1264 } 1265 1266 /* 1267 * If we've got a kernel request that hasn't been malloced yet, 1268 * malloc it and tell the caller the data buffer is here. 1269 */ 1270 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 1271 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 1272 ctsio->kern_data_len = len; 1273 ctsio->kern_total_len = len; 1274 ctsio->kern_data_resid = 0; 1275 ctsio->kern_rel_offset = 0; 1276 ctsio->kern_sg_entries = 0; 1277 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 1278 ctsio->be_move_done = ctl_config_move_done; 1279 ctl_datamove((union ctl_io *)ctsio); 1280 1281 return (CTL_RETVAL_COMPLETE); 1282 } 1283 1284 data = (struct scsi_extended_copy_lid4_data *)ctsio->kern_data_ptr; 1285 lencscd = scsi_2btoul(data->cscd_list_length); 1286 lenseg = scsi_2btoul(data->segment_list_length); 1287 leninl = scsi_2btoul(data->inline_data_length); 1288 if (len < sizeof(struct scsi_extended_copy_lid4_data) + 1289 lencscd + lenseg + leninl || 1290 leninl > TPC_MAX_INLINE) { 1291 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, 1292 /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); 1293 goto done; 1294 } 1295 if (lencscd > TPC_MAX_CSCDS * sizeof(struct scsi_ec_cscd)) { 1296 ctl_set_sense(ctsio, /*current_error*/ 1, 1297 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1298 /*asc*/ 0x26, /*ascq*/ 0x06, SSD_ELEM_NONE); 1299 goto done; 1300 } 1301 if (lencscd + lenseg > TPC_MAX_LIST) { 1302 ctl_set_param_len_error(ctsio); 1303 goto done; 1304 } 1305 1306 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO); 1307 list->service_action = cdb->service_action; 1308 value = ctl_get_opt(&lun->be_lun->options, "insecure_tpc"); 1309 if (value != NULL && strcmp(value, "on") == 0) 1310 list->init_port = -1; 1311 else 1312 list->init_port = ctsio->io_hdr.nexus.targ_port; 1313 list->init_idx = ctl_get_resindex(&ctsio->io_hdr.nexus); 1314 list->list_id = scsi_4btoul(data->list_identifier); 1315 list->flags = data->flags; 1316 list->params = ctsio->kern_data_ptr; 1317 list->cscd = (struct scsi_ec_cscd *)&data->data[0]; 1318 ptr = &data->data[lencscd]; 1319 for (nseg = 0, off = 0; off < lenseg; nseg++) { 1320 if (nseg >= TPC_MAX_SEGS) { 1321 free(list, M_CTL); 1322 ctl_set_sense(ctsio, /*current_error*/ 1, 1323 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1324 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE); 1325 goto done; 1326 } 1327 list->seg[nseg] = (struct scsi_ec_segment *)(ptr + off); 1328 off += sizeof(struct scsi_ec_segment) + 1329 scsi_2btoul(list->seg[nseg]->descr_length); 1330 } 1331 list->inl = &data->data[lencscd + lenseg]; 1332 list->ncscd = lencscd / sizeof(struct scsi_ec_cscd); 1333 list->nseg = nseg; 1334 list->leninl = leninl; 1335 list->ctsio = ctsio; 1336 list->lun = lun; 1337 mtx_lock(&lun->lun_lock); 1338 if ((list->flags & EC_LIST_ID_USAGE_MASK) != EC_LIST_ID_USAGE_NONE) { 1339 tlist = tpc_find_list(lun, list->list_id, list->init_idx); 1340 if (tlist != NULL && !tlist->completed) { 1341 mtx_unlock(&lun->lun_lock); 1342 free(list, M_CTL); 1343 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 1344 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, 1345 /*bit*/ 0); 1346 goto done; 1347 } 1348 if (tlist != NULL) { 1349 TAILQ_REMOVE(&lun->tpc_lists, tlist, links); 1350 free(tlist, M_CTL); 1351 } 1352 } 1353 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links); 1354 mtx_unlock(&lun->lun_lock); 1355 1356 tpc_process(list); 1357 return (CTL_RETVAL_COMPLETE); 1358 1359 done: 1360 ctl_done((union ctl_io *)ctsio); 1361 return (CTL_RETVAL_COMPLETE); 1362 } 1363 1364