1 /*- 2 * Copyright (c) 2003-2009 Silicon Graphics International Corp. 3 * Copyright (c) 2012 The FreeBSD Foundation 4 * Copyright (c) 2014-2017 Alexander Motin <mav@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Portions of this software were developed by Edward Tomasz Napierala 8 * under sponsorship from the FreeBSD Foundation. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions, and the following disclaimer, 15 * without modification. 16 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 17 * substantially similar to the "NO WARRANTY" disclaimer below 18 * ("Disclaimer") and any redistribution must be conditioned upon 19 * including a substantially similar Disclaimer requirement for further 20 * binary redistribution. 21 * 22 * NO WARRANTY 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 27 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 32 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 * POSSIBILITY OF SUCH DAMAGES. 34 * 35 * $Id$ 36 */ 37 /* 38 * CAM Target Layer, a SCSI device emulation subsystem. 39 * 40 * Author: Ken Merry <ken@FreeBSD.org> 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/ctype.h> 49 #include <sys/kernel.h> 50 #include <sys/types.h> 51 #include <sys/kthread.h> 52 #include <sys/bio.h> 53 #include <sys/fcntl.h> 54 #include <sys/lock.h> 55 #include <sys/module.h> 56 #include <sys/mutex.h> 57 #include <sys/condvar.h> 58 #include <sys/malloc.h> 59 #include <sys/conf.h> 60 #include <sys/ioccom.h> 61 #include <sys/queue.h> 62 #include <sys/sbuf.h> 63 #include <sys/smp.h> 64 #include <sys/endian.h> 65 #include <sys/sysctl.h> 66 #include <vm/uma.h> 67 68 #include <cam/cam.h> 69 #include <cam/scsi/scsi_all.h> 70 #include <cam/scsi/scsi_cd.h> 71 #include <cam/scsi/scsi_da.h> 72 #include <cam/ctl/ctl_io.h> 73 #include <cam/ctl/ctl.h> 74 #include <cam/ctl/ctl_frontend.h> 75 #include <cam/ctl/ctl_util.h> 76 #include <cam/ctl/ctl_backend.h> 77 #include <cam/ctl/ctl_ioctl.h> 78 #include <cam/ctl/ctl_ha.h> 79 #include <cam/ctl/ctl_private.h> 80 #include <cam/ctl/ctl_debug.h> 81 #include <cam/ctl/ctl_scsi_all.h> 82 #include <cam/ctl/ctl_error.h> 83 84 struct ctl_softc *control_softc = NULL; 85 86 /* 87 * Template mode pages. 88 */ 89 90 /* 91 * Note that these are default values only. The actual values will be 92 * filled in when the user does a mode sense. 93 */ 94 const static struct scsi_da_rw_recovery_page rw_er_page_default = { 95 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 96 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 97 /*byte3*/SMS_RWER_AWRE|SMS_RWER_ARRE, 98 /*read_retry_count*/0, 99 /*correction_span*/0, 100 /*head_offset_count*/0, 101 /*data_strobe_offset_cnt*/0, 102 /*byte8*/SMS_RWER_LBPERE, 103 /*write_retry_count*/0, 104 /*reserved2*/0, 105 /*recovery_time_limit*/{0, 0}, 106 }; 107 108 const static struct scsi_da_rw_recovery_page rw_er_page_changeable = { 109 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 110 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 111 /*byte3*/SMS_RWER_PER, 112 /*read_retry_count*/0, 113 /*correction_span*/0, 114 /*head_offset_count*/0, 115 /*data_strobe_offset_cnt*/0, 116 /*byte8*/SMS_RWER_LBPERE, 117 /*write_retry_count*/0, 118 /*reserved2*/0, 119 /*recovery_time_limit*/{0, 0}, 120 }; 121 122 const static struct scsi_format_page format_page_default = { 123 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 124 /*page_length*/sizeof(struct scsi_format_page) - 2, 125 /*tracks_per_zone*/ {0, 0}, 126 /*alt_sectors_per_zone*/ {0, 0}, 127 /*alt_tracks_per_zone*/ {0, 0}, 128 /*alt_tracks_per_lun*/ {0, 0}, 129 /*sectors_per_track*/ {(CTL_DEFAULT_SECTORS_PER_TRACK >> 8) & 0xff, 130 CTL_DEFAULT_SECTORS_PER_TRACK & 0xff}, 131 /*bytes_per_sector*/ {0, 0}, 132 /*interleave*/ {0, 0}, 133 /*track_skew*/ {0, 0}, 134 /*cylinder_skew*/ {0, 0}, 135 /*flags*/ SFP_HSEC, 136 /*reserved*/ {0, 0, 0} 137 }; 138 139 const static struct scsi_format_page format_page_changeable = { 140 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 141 /*page_length*/sizeof(struct scsi_format_page) - 2, 142 /*tracks_per_zone*/ {0, 0}, 143 /*alt_sectors_per_zone*/ {0, 0}, 144 /*alt_tracks_per_zone*/ {0, 0}, 145 /*alt_tracks_per_lun*/ {0, 0}, 146 /*sectors_per_track*/ {0, 0}, 147 /*bytes_per_sector*/ {0, 0}, 148 /*interleave*/ {0, 0}, 149 /*track_skew*/ {0, 0}, 150 /*cylinder_skew*/ {0, 0}, 151 /*flags*/ 0, 152 /*reserved*/ {0, 0, 0} 153 }; 154 155 const static struct scsi_rigid_disk_page rigid_disk_page_default = { 156 /*page_code*/SMS_RIGID_DISK_PAGE, 157 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 158 /*cylinders*/ {0, 0, 0}, 159 /*heads*/ CTL_DEFAULT_HEADS, 160 /*start_write_precomp*/ {0, 0, 0}, 161 /*start_reduced_current*/ {0, 0, 0}, 162 /*step_rate*/ {0, 0}, 163 /*landing_zone_cylinder*/ {0, 0, 0}, 164 /*rpl*/ SRDP_RPL_DISABLED, 165 /*rotational_offset*/ 0, 166 /*reserved1*/ 0, 167 /*rotation_rate*/ {(CTL_DEFAULT_ROTATION_RATE >> 8) & 0xff, 168 CTL_DEFAULT_ROTATION_RATE & 0xff}, 169 /*reserved2*/ {0, 0} 170 }; 171 172 const static struct scsi_rigid_disk_page rigid_disk_page_changeable = { 173 /*page_code*/SMS_RIGID_DISK_PAGE, 174 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 175 /*cylinders*/ {0, 0, 0}, 176 /*heads*/ 0, 177 /*start_write_precomp*/ {0, 0, 0}, 178 /*start_reduced_current*/ {0, 0, 0}, 179 /*step_rate*/ {0, 0}, 180 /*landing_zone_cylinder*/ {0, 0, 0}, 181 /*rpl*/ 0, 182 /*rotational_offset*/ 0, 183 /*reserved1*/ 0, 184 /*rotation_rate*/ {0, 0}, 185 /*reserved2*/ {0, 0} 186 }; 187 188 const static struct scsi_da_verify_recovery_page verify_er_page_default = { 189 /*page_code*/SMS_VERIFY_ERROR_RECOVERY_PAGE, 190 /*page_length*/sizeof(struct scsi_da_verify_recovery_page) - 2, 191 /*byte3*/0, 192 /*read_retry_count*/0, 193 /*reserved*/{ 0, 0, 0, 0, 0, 0 }, 194 /*recovery_time_limit*/{0, 0}, 195 }; 196 197 const static struct scsi_da_verify_recovery_page verify_er_page_changeable = { 198 /*page_code*/SMS_VERIFY_ERROR_RECOVERY_PAGE, 199 /*page_length*/sizeof(struct scsi_da_verify_recovery_page) - 2, 200 /*byte3*/SMS_VER_PER, 201 /*read_retry_count*/0, 202 /*reserved*/{ 0, 0, 0, 0, 0, 0 }, 203 /*recovery_time_limit*/{0, 0}, 204 }; 205 206 const static struct scsi_caching_page caching_page_default = { 207 /*page_code*/SMS_CACHING_PAGE, 208 /*page_length*/sizeof(struct scsi_caching_page) - 2, 209 /*flags1*/ SCP_DISC | SCP_WCE, 210 /*ret_priority*/ 0, 211 /*disable_pf_transfer_len*/ {0xff, 0xff}, 212 /*min_prefetch*/ {0, 0}, 213 /*max_prefetch*/ {0xff, 0xff}, 214 /*max_pf_ceiling*/ {0xff, 0xff}, 215 /*flags2*/ 0, 216 /*cache_segments*/ 0, 217 /*cache_seg_size*/ {0, 0}, 218 /*reserved*/ 0, 219 /*non_cache_seg_size*/ {0, 0, 0} 220 }; 221 222 const static struct scsi_caching_page caching_page_changeable = { 223 /*page_code*/SMS_CACHING_PAGE, 224 /*page_length*/sizeof(struct scsi_caching_page) - 2, 225 /*flags1*/ SCP_WCE | SCP_RCD, 226 /*ret_priority*/ 0, 227 /*disable_pf_transfer_len*/ {0, 0}, 228 /*min_prefetch*/ {0, 0}, 229 /*max_prefetch*/ {0, 0}, 230 /*max_pf_ceiling*/ {0, 0}, 231 /*flags2*/ 0, 232 /*cache_segments*/ 0, 233 /*cache_seg_size*/ {0, 0}, 234 /*reserved*/ 0, 235 /*non_cache_seg_size*/ {0, 0, 0} 236 }; 237 238 const static struct scsi_control_page control_page_default = { 239 /*page_code*/SMS_CONTROL_MODE_PAGE, 240 /*page_length*/sizeof(struct scsi_control_page) - 2, 241 /*rlec*/0, 242 /*queue_flags*/SCP_QUEUE_ALG_RESTRICTED, 243 /*eca_and_aen*/0, 244 /*flags4*/SCP_TAS, 245 /*aen_holdoff_period*/{0, 0}, 246 /*busy_timeout_period*/{0, 0}, 247 /*extended_selftest_completion_time*/{0, 0} 248 }; 249 250 const static struct scsi_control_page control_page_changeable = { 251 /*page_code*/SMS_CONTROL_MODE_PAGE, 252 /*page_length*/sizeof(struct scsi_control_page) - 2, 253 /*rlec*/SCP_DSENSE, 254 /*queue_flags*/SCP_QUEUE_ALG_MASK | SCP_NUAR, 255 /*eca_and_aen*/SCP_SWP, 256 /*flags4*/0, 257 /*aen_holdoff_period*/{0, 0}, 258 /*busy_timeout_period*/{0, 0}, 259 /*extended_selftest_completion_time*/{0, 0} 260 }; 261 262 #define CTL_CEM_LEN (sizeof(struct scsi_control_ext_page) - 4) 263 264 const static struct scsi_control_ext_page control_ext_page_default = { 265 /*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF, 266 /*subpage_code*/0x01, 267 /*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN}, 268 /*flags*/0, 269 /*prio*/0, 270 /*max_sense*/0 271 }; 272 273 const static struct scsi_control_ext_page control_ext_page_changeable = { 274 /*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF, 275 /*subpage_code*/0x01, 276 /*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN}, 277 /*flags*/0, 278 /*prio*/0, 279 /*max_sense*/0xff 280 }; 281 282 const static struct scsi_info_exceptions_page ie_page_default = { 283 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 284 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 285 /*info_flags*/SIEP_FLAGS_EWASC, 286 /*mrie*/SIEP_MRIE_NO, 287 /*interval_timer*/{0, 0, 0, 0}, 288 /*report_count*/{0, 0, 0, 1} 289 }; 290 291 const static struct scsi_info_exceptions_page ie_page_changeable = { 292 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 293 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 294 /*info_flags*/SIEP_FLAGS_EWASC | SIEP_FLAGS_DEXCPT | SIEP_FLAGS_TEST | 295 SIEP_FLAGS_LOGERR, 296 /*mrie*/0x0f, 297 /*interval_timer*/{0xff, 0xff, 0xff, 0xff}, 298 /*report_count*/{0xff, 0xff, 0xff, 0xff} 299 }; 300 301 #define CTL_LBPM_LEN (sizeof(struct ctl_logical_block_provisioning_page) - 4) 302 303 const static struct ctl_logical_block_provisioning_page lbp_page_default = {{ 304 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 305 /*subpage_code*/0x02, 306 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 307 /*flags*/0, 308 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 309 /*descr*/{}}, 310 {{/*flags*/0, 311 /*resource*/0x01, 312 /*reserved*/{0, 0}, 313 /*count*/{0, 0, 0, 0}}, 314 {/*flags*/0, 315 /*resource*/0x02, 316 /*reserved*/{0, 0}, 317 /*count*/{0, 0, 0, 0}}, 318 {/*flags*/0, 319 /*resource*/0xf1, 320 /*reserved*/{0, 0}, 321 /*count*/{0, 0, 0, 0}}, 322 {/*flags*/0, 323 /*resource*/0xf2, 324 /*reserved*/{0, 0}, 325 /*count*/{0, 0, 0, 0}} 326 } 327 }; 328 329 const static struct ctl_logical_block_provisioning_page lbp_page_changeable = {{ 330 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 331 /*subpage_code*/0x02, 332 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 333 /*flags*/SLBPP_SITUA, 334 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 335 /*descr*/{}}, 336 {{/*flags*/0, 337 /*resource*/0, 338 /*reserved*/{0, 0}, 339 /*count*/{0, 0, 0, 0}}, 340 {/*flags*/0, 341 /*resource*/0, 342 /*reserved*/{0, 0}, 343 /*count*/{0, 0, 0, 0}}, 344 {/*flags*/0, 345 /*resource*/0, 346 /*reserved*/{0, 0}, 347 /*count*/{0, 0, 0, 0}}, 348 {/*flags*/0, 349 /*resource*/0, 350 /*reserved*/{0, 0}, 351 /*count*/{0, 0, 0, 0}} 352 } 353 }; 354 355 const static struct scsi_cddvd_capabilities_page cddvd_page_default = { 356 /*page_code*/SMS_CDDVD_CAPS_PAGE, 357 /*page_length*/sizeof(struct scsi_cddvd_capabilities_page) - 2, 358 /*caps1*/0x3f, 359 /*caps2*/0x00, 360 /*caps3*/0xf0, 361 /*caps4*/0x00, 362 /*caps5*/0x29, 363 /*caps6*/0x00, 364 /*obsolete*/{0, 0}, 365 /*nvol_levels*/{0, 0}, 366 /*buffer_size*/{8, 0}, 367 /*obsolete2*/{0, 0}, 368 /*reserved*/0, 369 /*digital*/0, 370 /*obsolete3*/0, 371 /*copy_management*/0, 372 /*reserved2*/0, 373 /*rotation_control*/0, 374 /*cur_write_speed*/0, 375 /*num_speed_descr*/0, 376 }; 377 378 const static struct scsi_cddvd_capabilities_page cddvd_page_changeable = { 379 /*page_code*/SMS_CDDVD_CAPS_PAGE, 380 /*page_length*/sizeof(struct scsi_cddvd_capabilities_page) - 2, 381 /*caps1*/0, 382 /*caps2*/0, 383 /*caps3*/0, 384 /*caps4*/0, 385 /*caps5*/0, 386 /*caps6*/0, 387 /*obsolete*/{0, 0}, 388 /*nvol_levels*/{0, 0}, 389 /*buffer_size*/{0, 0}, 390 /*obsolete2*/{0, 0}, 391 /*reserved*/0, 392 /*digital*/0, 393 /*obsolete3*/0, 394 /*copy_management*/0, 395 /*reserved2*/0, 396 /*rotation_control*/0, 397 /*cur_write_speed*/0, 398 /*num_speed_descr*/0, 399 }; 400 401 SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer"); 402 static int worker_threads = -1; 403 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN, 404 &worker_threads, 1, "Number of worker threads"); 405 static int ctl_debug = CTL_DEBUG_NONE; 406 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, debug, CTLFLAG_RWTUN, 407 &ctl_debug, 0, "Enabled debug flags"); 408 static int ctl_lun_map_size = 1024; 409 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, lun_map_size, CTLFLAG_RWTUN, 410 &ctl_lun_map_size, 0, "Size of per-port LUN map (max LUN + 1)"); 411 #ifdef CTL_TIME_IO 412 static int ctl_time_io_secs = CTL_TIME_IO_DEFAULT_SECS; 413 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, time_io_secs, CTLFLAG_RWTUN, 414 &ctl_time_io_secs, 0, "Log requests taking more seconds"); 415 #endif 416 417 /* 418 * Maximum number of LUNs we support. MUST be a power of 2. 419 */ 420 #define CTL_DEFAULT_MAX_LUNS 1024 421 static int ctl_max_luns = CTL_DEFAULT_MAX_LUNS; 422 TUNABLE_INT("kern.cam.ctl.max_luns", &ctl_max_luns); 423 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, max_luns, CTLFLAG_RDTUN, 424 &ctl_max_luns, CTL_DEFAULT_MAX_LUNS, "Maximum number of LUNs"); 425 426 /* 427 * Maximum number of ports registered at one time. 428 */ 429 #define CTL_DEFAULT_MAX_PORTS 256 430 static int ctl_max_ports = CTL_DEFAULT_MAX_PORTS; 431 TUNABLE_INT("kern.cam.ctl.max_ports", &ctl_max_ports); 432 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, max_ports, CTLFLAG_RDTUN, 433 &ctl_max_ports, CTL_DEFAULT_MAX_LUNS, "Maximum number of ports"); 434 435 /* 436 * Maximum number of initiators we support. 437 */ 438 #define CTL_MAX_INITIATORS (CTL_MAX_INIT_PER_PORT * ctl_max_ports) 439 440 /* 441 * Supported pages (0x00), Serial number (0x80), Device ID (0x83), 442 * Extended INQUIRY Data (0x86), Mode Page Policy (0x87), 443 * SCSI Ports (0x88), Third-party Copy (0x8F), Block limits (0xB0), 444 * Block Device Characteristics (0xB1) and Logical Block Provisioning (0xB2) 445 */ 446 #define SCSI_EVPD_NUM_SUPPORTED_PAGES 10 447 448 static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event, 449 int param); 450 static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest); 451 static void ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest); 452 static int ctl_init(void); 453 static int ctl_shutdown(void); 454 static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td); 455 static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td); 456 static void ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio); 457 static void ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 458 struct ctl_ooa *ooa_hdr, 459 struct ctl_ooa_entry *kern_entries); 460 static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 461 struct thread *td); 462 static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun, 463 struct ctl_be_lun *be_lun); 464 static int ctl_free_lun(struct ctl_lun *lun); 465 static void ctl_create_lun(struct ctl_be_lun *be_lun); 466 467 static int ctl_do_mode_select(union ctl_io *io); 468 static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, 469 uint64_t res_key, uint64_t sa_res_key, 470 uint8_t type, uint32_t residx, 471 struct ctl_scsiio *ctsio, 472 struct scsi_per_res_out *cdb, 473 struct scsi_per_res_out_parms* param); 474 static void ctl_pro_preempt_other(struct ctl_lun *lun, 475 union ctl_ha_msg *msg); 476 static void ctl_hndl_per_res_out_on_other_sc(union ctl_io *io); 477 static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len); 478 static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len); 479 static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len); 480 static int ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len); 481 static int ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len); 482 static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, 483 int alloc_len); 484 static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, 485 int alloc_len); 486 static int ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len); 487 static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len); 488 static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio); 489 static int ctl_inquiry_std(struct ctl_scsiio *ctsio); 490 static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len); 491 static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2, 492 bool seq); 493 static ctl_action ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2); 494 static ctl_action ctl_check_for_blockage(struct ctl_lun *lun, 495 union ctl_io *pending_io, union ctl_io *ooa_io); 496 static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 497 union ctl_io *starting_io); 498 static int ctl_check_blocked(struct ctl_lun *lun); 499 static int ctl_scsiio_lun_check(struct ctl_lun *lun, 500 const struct ctl_cmd_entry *entry, 501 struct ctl_scsiio *ctsio); 502 static void ctl_failover_lun(union ctl_io *io); 503 static int ctl_scsiio_precheck(struct ctl_softc *ctl_softc, 504 struct ctl_scsiio *ctsio); 505 static int ctl_scsiio(struct ctl_scsiio *ctsio); 506 507 static int ctl_target_reset(union ctl_io *io); 508 static void ctl_do_lun_reset(struct ctl_lun *lun, uint32_t initidx, 509 ctl_ua_type ua_type); 510 static int ctl_lun_reset(union ctl_io *io); 511 static int ctl_abort_task(union ctl_io *io); 512 static int ctl_abort_task_set(union ctl_io *io); 513 static int ctl_query_task(union ctl_io *io, int task_set); 514 static void ctl_i_t_nexus_loss(struct ctl_softc *softc, uint32_t initidx, 515 ctl_ua_type ua_type); 516 static int ctl_i_t_nexus_reset(union ctl_io *io); 517 static int ctl_query_async_event(union ctl_io *io); 518 static void ctl_run_task(union ctl_io *io); 519 #ifdef CTL_IO_DELAY 520 static void ctl_datamove_timer_wakeup(void *arg); 521 static void ctl_done_timer_wakeup(void *arg); 522 #endif /* CTL_IO_DELAY */ 523 524 static void ctl_send_datamove_done(union ctl_io *io, int have_lock); 525 static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq); 526 static int ctl_datamove_remote_dm_write_cb(union ctl_io *io); 527 static void ctl_datamove_remote_write(union ctl_io *io); 528 static int ctl_datamove_remote_dm_read_cb(union ctl_io *io); 529 static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq); 530 static int ctl_datamove_remote_sgl_setup(union ctl_io *io); 531 static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 532 ctl_ha_dt_cb callback); 533 static void ctl_datamove_remote_read(union ctl_io *io); 534 static void ctl_datamove_remote(union ctl_io *io); 535 static void ctl_process_done(union ctl_io *io); 536 static void ctl_lun_thread(void *arg); 537 static void ctl_thresh_thread(void *arg); 538 static void ctl_work_thread(void *arg); 539 static void ctl_enqueue_incoming(union ctl_io *io); 540 static void ctl_enqueue_rtr(union ctl_io *io); 541 static void ctl_enqueue_done(union ctl_io *io); 542 static void ctl_enqueue_isc(union ctl_io *io); 543 static const struct ctl_cmd_entry * 544 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa); 545 static const struct ctl_cmd_entry * 546 ctl_validate_command(struct ctl_scsiio *ctsio); 547 static int ctl_cmd_applicable(uint8_t lun_type, 548 const struct ctl_cmd_entry *entry); 549 static int ctl_ha_init(void); 550 static int ctl_ha_shutdown(void); 551 552 static uint64_t ctl_get_prkey(struct ctl_lun *lun, uint32_t residx); 553 static void ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx); 554 static void ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx); 555 static void ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key); 556 557 /* 558 * Load the serialization table. This isn't very pretty, but is probably 559 * the easiest way to do it. 560 */ 561 #include "ctl_ser_table.c" 562 563 /* 564 * We only need to define open, close and ioctl routines for this driver. 565 */ 566 static struct cdevsw ctl_cdevsw = { 567 .d_version = D_VERSION, 568 .d_flags = 0, 569 .d_open = ctl_open, 570 .d_close = ctl_close, 571 .d_ioctl = ctl_ioctl, 572 .d_name = "ctl", 573 }; 574 575 576 MALLOC_DEFINE(M_CTL, "ctlmem", "Memory used for CTL"); 577 578 static int ctl_module_event_handler(module_t, int /*modeventtype_t*/, void *); 579 580 static moduledata_t ctl_moduledata = { 581 "ctl", 582 ctl_module_event_handler, 583 NULL 584 }; 585 586 DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD); 587 MODULE_VERSION(ctl, 1); 588 589 static struct ctl_frontend ha_frontend = 590 { 591 .name = "ha", 592 .init = ctl_ha_init, 593 .shutdown = ctl_ha_shutdown, 594 }; 595 596 static int 597 ctl_ha_init(void) 598 { 599 struct ctl_softc *softc = control_softc; 600 601 if (ctl_pool_create(softc, "othersc", CTL_POOL_ENTRIES_OTHER_SC, 602 &softc->othersc_pool) != 0) 603 return (ENOMEM); 604 if (ctl_ha_msg_init(softc) != CTL_HA_STATUS_SUCCESS) { 605 ctl_pool_free(softc->othersc_pool); 606 return (EIO); 607 } 608 if (ctl_ha_msg_register(CTL_HA_CHAN_CTL, ctl_isc_event_handler) 609 != CTL_HA_STATUS_SUCCESS) { 610 ctl_ha_msg_destroy(softc); 611 ctl_pool_free(softc->othersc_pool); 612 return (EIO); 613 } 614 return (0); 615 }; 616 617 static int 618 ctl_ha_shutdown(void) 619 { 620 struct ctl_softc *softc = control_softc; 621 struct ctl_port *port; 622 623 ctl_ha_msg_shutdown(softc); 624 if (ctl_ha_msg_deregister(CTL_HA_CHAN_CTL) != CTL_HA_STATUS_SUCCESS) 625 return (EIO); 626 if (ctl_ha_msg_destroy(softc) != CTL_HA_STATUS_SUCCESS) 627 return (EIO); 628 ctl_pool_free(softc->othersc_pool); 629 while ((port = STAILQ_FIRST(&ha_frontend.port_list)) != NULL) { 630 ctl_port_deregister(port); 631 free(port->port_name, M_CTL); 632 free(port, M_CTL); 633 } 634 return (0); 635 }; 636 637 static void 638 ctl_ha_datamove(union ctl_io *io) 639 { 640 struct ctl_lun *lun = CTL_LUN(io); 641 struct ctl_sg_entry *sgl; 642 union ctl_ha_msg msg; 643 uint32_t sg_entries_sent; 644 int do_sg_copy, i, j; 645 646 memset(&msg.dt, 0, sizeof(msg.dt)); 647 msg.hdr.msg_type = CTL_MSG_DATAMOVE; 648 msg.hdr.original_sc = io->io_hdr.original_sc; 649 msg.hdr.serializing_sc = io; 650 msg.hdr.nexus = io->io_hdr.nexus; 651 msg.hdr.status = io->io_hdr.status; 652 msg.dt.flags = io->io_hdr.flags; 653 654 /* 655 * We convert everything into a S/G list here. We can't 656 * pass by reference, only by value between controllers. 657 * So we can't pass a pointer to the S/G list, only as many 658 * S/G entries as we can fit in here. If it's possible for 659 * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries, 660 * then we need to break this up into multiple transfers. 661 */ 662 if (io->scsiio.kern_sg_entries == 0) { 663 msg.dt.kern_sg_entries = 1; 664 #if 0 665 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 666 msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr; 667 } else { 668 /* XXX KDM use busdma here! */ 669 msg.dt.sg_list[0].addr = 670 (void *)vtophys(io->scsiio.kern_data_ptr); 671 } 672 #else 673 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, 674 ("HA does not support BUS_ADDR")); 675 msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr; 676 #endif 677 msg.dt.sg_list[0].len = io->scsiio.kern_data_len; 678 do_sg_copy = 0; 679 } else { 680 msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries; 681 do_sg_copy = 1; 682 } 683 684 msg.dt.kern_data_len = io->scsiio.kern_data_len; 685 msg.dt.kern_total_len = io->scsiio.kern_total_len; 686 msg.dt.kern_data_resid = io->scsiio.kern_data_resid; 687 msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset; 688 msg.dt.sg_sequence = 0; 689 690 /* 691 * Loop until we've sent all of the S/G entries. On the 692 * other end, we'll recompose these S/G entries into one 693 * contiguous list before processing. 694 */ 695 for (sg_entries_sent = 0; sg_entries_sent < msg.dt.kern_sg_entries; 696 msg.dt.sg_sequence++) { 697 msg.dt.cur_sg_entries = MIN((sizeof(msg.dt.sg_list) / 698 sizeof(msg.dt.sg_list[0])), 699 msg.dt.kern_sg_entries - sg_entries_sent); 700 if (do_sg_copy != 0) { 701 sgl = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; 702 for (i = sg_entries_sent, j = 0; 703 i < msg.dt.cur_sg_entries; i++, j++) { 704 #if 0 705 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 706 msg.dt.sg_list[j].addr = sgl[i].addr; 707 } else { 708 /* XXX KDM use busdma here! */ 709 msg.dt.sg_list[j].addr = 710 (void *)vtophys(sgl[i].addr); 711 } 712 #else 713 KASSERT((io->io_hdr.flags & 714 CTL_FLAG_BUS_ADDR) == 0, 715 ("HA does not support BUS_ADDR")); 716 msg.dt.sg_list[j].addr = sgl[i].addr; 717 #endif 718 msg.dt.sg_list[j].len = sgl[i].len; 719 } 720 } 721 722 sg_entries_sent += msg.dt.cur_sg_entries; 723 msg.dt.sg_last = (sg_entries_sent >= msg.dt.kern_sg_entries); 724 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 725 sizeof(msg.dt) - sizeof(msg.dt.sg_list) + 726 sizeof(struct ctl_sg_entry) * msg.dt.cur_sg_entries, 727 M_WAITOK) > CTL_HA_STATUS_SUCCESS) { 728 io->io_hdr.port_status = 31341; 729 io->scsiio.be_move_done(io); 730 return; 731 } 732 msg.dt.sent_sg_entries = sg_entries_sent; 733 } 734 735 /* 736 * Officially handover the request from us to peer. 737 * If failover has just happened, then we must return error. 738 * If failover happen just after, then it is not our problem. 739 */ 740 if (lun) 741 mtx_lock(&lun->lun_lock); 742 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 743 if (lun) 744 mtx_unlock(&lun->lun_lock); 745 io->io_hdr.port_status = 31342; 746 io->scsiio.be_move_done(io); 747 return; 748 } 749 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 750 io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; 751 if (lun) 752 mtx_unlock(&lun->lun_lock); 753 } 754 755 static void 756 ctl_ha_done(union ctl_io *io) 757 { 758 union ctl_ha_msg msg; 759 760 if (io->io_hdr.io_type == CTL_IO_SCSI) { 761 memset(&msg, 0, sizeof(msg)); 762 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 763 msg.hdr.original_sc = io->io_hdr.original_sc; 764 msg.hdr.nexus = io->io_hdr.nexus; 765 msg.hdr.status = io->io_hdr.status; 766 msg.scsi.scsi_status = io->scsiio.scsi_status; 767 msg.scsi.tag_num = io->scsiio.tag_num; 768 msg.scsi.tag_type = io->scsiio.tag_type; 769 msg.scsi.sense_len = io->scsiio.sense_len; 770 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 771 io->scsiio.sense_len); 772 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 773 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + 774 msg.scsi.sense_len, M_WAITOK); 775 } 776 ctl_free_io(io); 777 } 778 779 static void 780 ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc, 781 union ctl_ha_msg *msg_info) 782 { 783 struct ctl_scsiio *ctsio; 784 785 if (msg_info->hdr.original_sc == NULL) { 786 printf("%s: original_sc == NULL!\n", __func__); 787 /* XXX KDM now what? */ 788 return; 789 } 790 791 ctsio = &msg_info->hdr.original_sc->scsiio; 792 ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 793 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 794 ctsio->io_hdr.status = msg_info->hdr.status; 795 ctsio->scsi_status = msg_info->scsi.scsi_status; 796 ctsio->sense_len = msg_info->scsi.sense_len; 797 memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data, 798 msg_info->scsi.sense_len); 799 ctl_enqueue_isc((union ctl_io *)ctsio); 800 } 801 802 static void 803 ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc, 804 union ctl_ha_msg *msg_info) 805 { 806 struct ctl_scsiio *ctsio; 807 808 if (msg_info->hdr.serializing_sc == NULL) { 809 printf("%s: serializing_sc == NULL!\n", __func__); 810 /* XXX KDM now what? */ 811 return; 812 } 813 814 ctsio = &msg_info->hdr.serializing_sc->scsiio; 815 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 816 ctl_enqueue_isc((union ctl_io *)ctsio); 817 } 818 819 void 820 ctl_isc_announce_lun(struct ctl_lun *lun) 821 { 822 struct ctl_softc *softc = lun->ctl_softc; 823 union ctl_ha_msg *msg; 824 struct ctl_ha_msg_lun_pr_key pr_key; 825 int i, k; 826 827 if (softc->ha_link != CTL_HA_LINK_ONLINE) 828 return; 829 mtx_lock(&lun->lun_lock); 830 i = sizeof(msg->lun); 831 if (lun->lun_devid) 832 i += lun->lun_devid->len; 833 i += sizeof(pr_key) * lun->pr_key_count; 834 alloc: 835 mtx_unlock(&lun->lun_lock); 836 msg = malloc(i, M_CTL, M_WAITOK); 837 mtx_lock(&lun->lun_lock); 838 k = sizeof(msg->lun); 839 if (lun->lun_devid) 840 k += lun->lun_devid->len; 841 k += sizeof(pr_key) * lun->pr_key_count; 842 if (i < k) { 843 free(msg, M_CTL); 844 i = k; 845 goto alloc; 846 } 847 bzero(&msg->lun, sizeof(msg->lun)); 848 msg->hdr.msg_type = CTL_MSG_LUN_SYNC; 849 msg->hdr.nexus.targ_lun = lun->lun; 850 msg->hdr.nexus.targ_mapped_lun = lun->lun; 851 msg->lun.flags = lun->flags; 852 msg->lun.pr_generation = lun->pr_generation; 853 msg->lun.pr_res_idx = lun->pr_res_idx; 854 msg->lun.pr_res_type = lun->pr_res_type; 855 msg->lun.pr_key_count = lun->pr_key_count; 856 i = 0; 857 if (lun->lun_devid) { 858 msg->lun.lun_devid_len = lun->lun_devid->len; 859 memcpy(&msg->lun.data[i], lun->lun_devid->data, 860 msg->lun.lun_devid_len); 861 i += msg->lun.lun_devid_len; 862 } 863 for (k = 0; k < CTL_MAX_INITIATORS; k++) { 864 if ((pr_key.pr_key = ctl_get_prkey(lun, k)) == 0) 865 continue; 866 pr_key.pr_iid = k; 867 memcpy(&msg->lun.data[i], &pr_key, sizeof(pr_key)); 868 i += sizeof(pr_key); 869 } 870 mtx_unlock(&lun->lun_lock); 871 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, 872 M_WAITOK); 873 free(msg, M_CTL); 874 875 if (lun->flags & CTL_LUN_PRIMARY_SC) { 876 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 877 ctl_isc_announce_mode(lun, -1, 878 lun->mode_pages.index[i].page_code & SMPH_PC_MASK, 879 lun->mode_pages.index[i].subpage); 880 } 881 } 882 } 883 884 void 885 ctl_isc_announce_port(struct ctl_port *port) 886 { 887 struct ctl_softc *softc = port->ctl_softc; 888 union ctl_ha_msg *msg; 889 int i; 890 891 if (port->targ_port < softc->port_min || 892 port->targ_port >= softc->port_max || 893 softc->ha_link != CTL_HA_LINK_ONLINE) 894 return; 895 i = sizeof(msg->port) + strlen(port->port_name) + 1; 896 if (port->lun_map) 897 i += port->lun_map_size * sizeof(uint32_t); 898 if (port->port_devid) 899 i += port->port_devid->len; 900 if (port->target_devid) 901 i += port->target_devid->len; 902 if (port->init_devid) 903 i += port->init_devid->len; 904 msg = malloc(i, M_CTL, M_WAITOK); 905 bzero(&msg->port, sizeof(msg->port)); 906 msg->hdr.msg_type = CTL_MSG_PORT_SYNC; 907 msg->hdr.nexus.targ_port = port->targ_port; 908 msg->port.port_type = port->port_type; 909 msg->port.physical_port = port->physical_port; 910 msg->port.virtual_port = port->virtual_port; 911 msg->port.status = port->status; 912 i = 0; 913 msg->port.name_len = sprintf(&msg->port.data[i], 914 "%d:%s", softc->ha_id, port->port_name) + 1; 915 i += msg->port.name_len; 916 if (port->lun_map) { 917 msg->port.lun_map_len = port->lun_map_size * sizeof(uint32_t); 918 memcpy(&msg->port.data[i], port->lun_map, 919 msg->port.lun_map_len); 920 i += msg->port.lun_map_len; 921 } 922 if (port->port_devid) { 923 msg->port.port_devid_len = port->port_devid->len; 924 memcpy(&msg->port.data[i], port->port_devid->data, 925 msg->port.port_devid_len); 926 i += msg->port.port_devid_len; 927 } 928 if (port->target_devid) { 929 msg->port.target_devid_len = port->target_devid->len; 930 memcpy(&msg->port.data[i], port->target_devid->data, 931 msg->port.target_devid_len); 932 i += msg->port.target_devid_len; 933 } 934 if (port->init_devid) { 935 msg->port.init_devid_len = port->init_devid->len; 936 memcpy(&msg->port.data[i], port->init_devid->data, 937 msg->port.init_devid_len); 938 i += msg->port.init_devid_len; 939 } 940 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, 941 M_WAITOK); 942 free(msg, M_CTL); 943 } 944 945 void 946 ctl_isc_announce_iid(struct ctl_port *port, int iid) 947 { 948 struct ctl_softc *softc = port->ctl_softc; 949 union ctl_ha_msg *msg; 950 int i, l; 951 952 if (port->targ_port < softc->port_min || 953 port->targ_port >= softc->port_max || 954 softc->ha_link != CTL_HA_LINK_ONLINE) 955 return; 956 mtx_lock(&softc->ctl_lock); 957 i = sizeof(msg->iid); 958 l = 0; 959 if (port->wwpn_iid[iid].name) 960 l = strlen(port->wwpn_iid[iid].name) + 1; 961 i += l; 962 msg = malloc(i, M_CTL, M_NOWAIT); 963 if (msg == NULL) { 964 mtx_unlock(&softc->ctl_lock); 965 return; 966 } 967 bzero(&msg->iid, sizeof(msg->iid)); 968 msg->hdr.msg_type = CTL_MSG_IID_SYNC; 969 msg->hdr.nexus.targ_port = port->targ_port; 970 msg->hdr.nexus.initid = iid; 971 msg->iid.in_use = port->wwpn_iid[iid].in_use; 972 msg->iid.name_len = l; 973 msg->iid.wwpn = port->wwpn_iid[iid].wwpn; 974 if (port->wwpn_iid[iid].name) 975 strlcpy(msg->iid.data, port->wwpn_iid[iid].name, l); 976 mtx_unlock(&softc->ctl_lock); 977 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->iid, i, M_NOWAIT); 978 free(msg, M_CTL); 979 } 980 981 void 982 ctl_isc_announce_mode(struct ctl_lun *lun, uint32_t initidx, 983 uint8_t page, uint8_t subpage) 984 { 985 struct ctl_softc *softc = lun->ctl_softc; 986 union ctl_ha_msg msg; 987 u_int i; 988 989 if (softc->ha_link != CTL_HA_LINK_ONLINE) 990 return; 991 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 992 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) == 993 page && lun->mode_pages.index[i].subpage == subpage) 994 break; 995 } 996 if (i == CTL_NUM_MODE_PAGES) 997 return; 998 999 /* Don't try to replicate pages not present on this device. */ 1000 if (lun->mode_pages.index[i].page_data == NULL) 1001 return; 1002 1003 bzero(&msg.mode, sizeof(msg.mode)); 1004 msg.hdr.msg_type = CTL_MSG_MODE_SYNC; 1005 msg.hdr.nexus.targ_port = initidx / CTL_MAX_INIT_PER_PORT; 1006 msg.hdr.nexus.initid = initidx % CTL_MAX_INIT_PER_PORT; 1007 msg.hdr.nexus.targ_lun = lun->lun; 1008 msg.hdr.nexus.targ_mapped_lun = lun->lun; 1009 msg.mode.page_code = page; 1010 msg.mode.subpage = subpage; 1011 msg.mode.page_len = lun->mode_pages.index[i].page_len; 1012 memcpy(msg.mode.data, lun->mode_pages.index[i].page_data, 1013 msg.mode.page_len); 1014 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg.mode, sizeof(msg.mode), 1015 M_WAITOK); 1016 } 1017 1018 static void 1019 ctl_isc_ha_link_up(struct ctl_softc *softc) 1020 { 1021 struct ctl_port *port; 1022 struct ctl_lun *lun; 1023 union ctl_ha_msg msg; 1024 int i; 1025 1026 /* Announce this node parameters to peer for validation. */ 1027 msg.login.msg_type = CTL_MSG_LOGIN; 1028 msg.login.version = CTL_HA_VERSION; 1029 msg.login.ha_mode = softc->ha_mode; 1030 msg.login.ha_id = softc->ha_id; 1031 msg.login.max_luns = ctl_max_luns; 1032 msg.login.max_ports = ctl_max_ports; 1033 msg.login.max_init_per_port = CTL_MAX_INIT_PER_PORT; 1034 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg.login, sizeof(msg.login), 1035 M_WAITOK); 1036 1037 STAILQ_FOREACH(port, &softc->port_list, links) { 1038 ctl_isc_announce_port(port); 1039 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1040 if (port->wwpn_iid[i].in_use) 1041 ctl_isc_announce_iid(port, i); 1042 } 1043 } 1044 STAILQ_FOREACH(lun, &softc->lun_list, links) 1045 ctl_isc_announce_lun(lun); 1046 } 1047 1048 static void 1049 ctl_isc_ha_link_down(struct ctl_softc *softc) 1050 { 1051 struct ctl_port *port; 1052 struct ctl_lun *lun; 1053 union ctl_io *io; 1054 int i; 1055 1056 mtx_lock(&softc->ctl_lock); 1057 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1058 mtx_lock(&lun->lun_lock); 1059 if (lun->flags & CTL_LUN_PEER_SC_PRIMARY) { 1060 lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; 1061 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 1062 } 1063 mtx_unlock(&lun->lun_lock); 1064 1065 mtx_unlock(&softc->ctl_lock); 1066 io = ctl_alloc_io(softc->othersc_pool); 1067 mtx_lock(&softc->ctl_lock); 1068 ctl_zero_io(io); 1069 io->io_hdr.msg_type = CTL_MSG_FAILOVER; 1070 io->io_hdr.nexus.targ_mapped_lun = lun->lun; 1071 ctl_enqueue_isc(io); 1072 } 1073 1074 STAILQ_FOREACH(port, &softc->port_list, links) { 1075 if (port->targ_port >= softc->port_min && 1076 port->targ_port < softc->port_max) 1077 continue; 1078 port->status &= ~CTL_PORT_STATUS_ONLINE; 1079 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1080 port->wwpn_iid[i].in_use = 0; 1081 free(port->wwpn_iid[i].name, M_CTL); 1082 port->wwpn_iid[i].name = NULL; 1083 } 1084 } 1085 mtx_unlock(&softc->ctl_lock); 1086 } 1087 1088 static void 1089 ctl_isc_ua(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1090 { 1091 struct ctl_lun *lun; 1092 uint32_t iid = ctl_get_initindex(&msg->hdr.nexus); 1093 1094 mtx_lock(&softc->ctl_lock); 1095 if (msg->hdr.nexus.targ_mapped_lun >= ctl_max_luns || 1096 (lun = softc->ctl_luns[msg->hdr.nexus.targ_mapped_lun]) == NULL) { 1097 mtx_unlock(&softc->ctl_lock); 1098 return; 1099 } 1100 mtx_lock(&lun->lun_lock); 1101 mtx_unlock(&softc->ctl_lock); 1102 if (msg->ua.ua_type == CTL_UA_THIN_PROV_THRES && msg->ua.ua_set) 1103 memcpy(lun->ua_tpt_info, msg->ua.ua_info, 8); 1104 if (msg->ua.ua_all) { 1105 if (msg->ua.ua_set) 1106 ctl_est_ua_all(lun, iid, msg->ua.ua_type); 1107 else 1108 ctl_clr_ua_all(lun, iid, msg->ua.ua_type); 1109 } else { 1110 if (msg->ua.ua_set) 1111 ctl_est_ua(lun, iid, msg->ua.ua_type); 1112 else 1113 ctl_clr_ua(lun, iid, msg->ua.ua_type); 1114 } 1115 mtx_unlock(&lun->lun_lock); 1116 } 1117 1118 static void 1119 ctl_isc_lun_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1120 { 1121 struct ctl_lun *lun; 1122 struct ctl_ha_msg_lun_pr_key pr_key; 1123 int i, k; 1124 ctl_lun_flags oflags; 1125 uint32_t targ_lun; 1126 1127 targ_lun = msg->hdr.nexus.targ_mapped_lun; 1128 mtx_lock(&softc->ctl_lock); 1129 if (targ_lun >= ctl_max_luns || 1130 (lun = softc->ctl_luns[targ_lun]) == NULL) { 1131 mtx_unlock(&softc->ctl_lock); 1132 return; 1133 } 1134 mtx_lock(&lun->lun_lock); 1135 mtx_unlock(&softc->ctl_lock); 1136 if (lun->flags & CTL_LUN_DISABLED) { 1137 mtx_unlock(&lun->lun_lock); 1138 return; 1139 } 1140 i = (lun->lun_devid != NULL) ? lun->lun_devid->len : 0; 1141 if (msg->lun.lun_devid_len != i || (i > 0 && 1142 memcmp(&msg->lun.data[0], lun->lun_devid->data, i) != 0)) { 1143 mtx_unlock(&lun->lun_lock); 1144 printf("%s: Received conflicting HA LUN %d\n", 1145 __func__, targ_lun); 1146 return; 1147 } else { 1148 /* Record whether peer is primary. */ 1149 oflags = lun->flags; 1150 if ((msg->lun.flags & CTL_LUN_PRIMARY_SC) && 1151 (msg->lun.flags & CTL_LUN_DISABLED) == 0) 1152 lun->flags |= CTL_LUN_PEER_SC_PRIMARY; 1153 else 1154 lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; 1155 if (oflags != lun->flags) 1156 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 1157 1158 /* If peer is primary and we are not -- use data */ 1159 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 1160 (lun->flags & CTL_LUN_PEER_SC_PRIMARY)) { 1161 lun->pr_generation = msg->lun.pr_generation; 1162 lun->pr_res_idx = msg->lun.pr_res_idx; 1163 lun->pr_res_type = msg->lun.pr_res_type; 1164 lun->pr_key_count = msg->lun.pr_key_count; 1165 for (k = 0; k < CTL_MAX_INITIATORS; k++) 1166 ctl_clr_prkey(lun, k); 1167 for (k = 0; k < msg->lun.pr_key_count; k++) { 1168 memcpy(&pr_key, &msg->lun.data[i], 1169 sizeof(pr_key)); 1170 ctl_alloc_prkey(lun, pr_key.pr_iid); 1171 ctl_set_prkey(lun, pr_key.pr_iid, 1172 pr_key.pr_key); 1173 i += sizeof(pr_key); 1174 } 1175 } 1176 1177 mtx_unlock(&lun->lun_lock); 1178 CTL_DEBUG_PRINT(("%s: Known LUN %d, peer is %s\n", 1179 __func__, targ_lun, 1180 (msg->lun.flags & CTL_LUN_PRIMARY_SC) ? 1181 "primary" : "secondary")); 1182 1183 /* If we are primary but peer doesn't know -- notify */ 1184 if ((lun->flags & CTL_LUN_PRIMARY_SC) && 1185 (msg->lun.flags & CTL_LUN_PEER_SC_PRIMARY) == 0) 1186 ctl_isc_announce_lun(lun); 1187 } 1188 } 1189 1190 static void 1191 ctl_isc_port_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1192 { 1193 struct ctl_port *port; 1194 struct ctl_lun *lun; 1195 int i, new; 1196 1197 port = softc->ctl_ports[msg->hdr.nexus.targ_port]; 1198 if (port == NULL) { 1199 CTL_DEBUG_PRINT(("%s: New port %d\n", __func__, 1200 msg->hdr.nexus.targ_port)); 1201 new = 1; 1202 port = malloc(sizeof(*port), M_CTL, M_WAITOK | M_ZERO); 1203 port->frontend = &ha_frontend; 1204 port->targ_port = msg->hdr.nexus.targ_port; 1205 port->fe_datamove = ctl_ha_datamove; 1206 port->fe_done = ctl_ha_done; 1207 } else if (port->frontend == &ha_frontend) { 1208 CTL_DEBUG_PRINT(("%s: Updated port %d\n", __func__, 1209 msg->hdr.nexus.targ_port)); 1210 new = 0; 1211 } else { 1212 printf("%s: Received conflicting HA port %d\n", 1213 __func__, msg->hdr.nexus.targ_port); 1214 return; 1215 } 1216 port->port_type = msg->port.port_type; 1217 port->physical_port = msg->port.physical_port; 1218 port->virtual_port = msg->port.virtual_port; 1219 port->status = msg->port.status; 1220 i = 0; 1221 free(port->port_name, M_CTL); 1222 port->port_name = strndup(&msg->port.data[i], msg->port.name_len, 1223 M_CTL); 1224 i += msg->port.name_len; 1225 if (msg->port.lun_map_len != 0) { 1226 if (port->lun_map == NULL || 1227 port->lun_map_size * sizeof(uint32_t) < 1228 msg->port.lun_map_len) { 1229 port->lun_map_size = 0; 1230 free(port->lun_map, M_CTL); 1231 port->lun_map = malloc(msg->port.lun_map_len, 1232 M_CTL, M_WAITOK); 1233 } 1234 memcpy(port->lun_map, &msg->port.data[i], msg->port.lun_map_len); 1235 port->lun_map_size = msg->port.lun_map_len / sizeof(uint32_t); 1236 i += msg->port.lun_map_len; 1237 } else { 1238 port->lun_map_size = 0; 1239 free(port->lun_map, M_CTL); 1240 port->lun_map = NULL; 1241 } 1242 if (msg->port.port_devid_len != 0) { 1243 if (port->port_devid == NULL || 1244 port->port_devid->len < msg->port.port_devid_len) { 1245 free(port->port_devid, M_CTL); 1246 port->port_devid = malloc(sizeof(struct ctl_devid) + 1247 msg->port.port_devid_len, M_CTL, M_WAITOK); 1248 } 1249 memcpy(port->port_devid->data, &msg->port.data[i], 1250 msg->port.port_devid_len); 1251 port->port_devid->len = msg->port.port_devid_len; 1252 i += msg->port.port_devid_len; 1253 } else { 1254 free(port->port_devid, M_CTL); 1255 port->port_devid = NULL; 1256 } 1257 if (msg->port.target_devid_len != 0) { 1258 if (port->target_devid == NULL || 1259 port->target_devid->len < msg->port.target_devid_len) { 1260 free(port->target_devid, M_CTL); 1261 port->target_devid = malloc(sizeof(struct ctl_devid) + 1262 msg->port.target_devid_len, M_CTL, M_WAITOK); 1263 } 1264 memcpy(port->target_devid->data, &msg->port.data[i], 1265 msg->port.target_devid_len); 1266 port->target_devid->len = msg->port.target_devid_len; 1267 i += msg->port.target_devid_len; 1268 } else { 1269 free(port->target_devid, M_CTL); 1270 port->target_devid = NULL; 1271 } 1272 if (msg->port.init_devid_len != 0) { 1273 if (port->init_devid == NULL || 1274 port->init_devid->len < msg->port.init_devid_len) { 1275 free(port->init_devid, M_CTL); 1276 port->init_devid = malloc(sizeof(struct ctl_devid) + 1277 msg->port.init_devid_len, M_CTL, M_WAITOK); 1278 } 1279 memcpy(port->init_devid->data, &msg->port.data[i], 1280 msg->port.init_devid_len); 1281 port->init_devid->len = msg->port.init_devid_len; 1282 i += msg->port.init_devid_len; 1283 } else { 1284 free(port->init_devid, M_CTL); 1285 port->init_devid = NULL; 1286 } 1287 if (new) { 1288 if (ctl_port_register(port) != 0) { 1289 printf("%s: ctl_port_register() failed with error\n", 1290 __func__); 1291 } 1292 } 1293 mtx_lock(&softc->ctl_lock); 1294 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1295 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 1296 continue; 1297 mtx_lock(&lun->lun_lock); 1298 ctl_est_ua_all(lun, -1, CTL_UA_INQ_CHANGE); 1299 mtx_unlock(&lun->lun_lock); 1300 } 1301 mtx_unlock(&softc->ctl_lock); 1302 } 1303 1304 static void 1305 ctl_isc_iid_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1306 { 1307 struct ctl_port *port; 1308 int iid; 1309 1310 port = softc->ctl_ports[msg->hdr.nexus.targ_port]; 1311 if (port == NULL) { 1312 printf("%s: Received IID for unknown port %d\n", 1313 __func__, msg->hdr.nexus.targ_port); 1314 return; 1315 } 1316 iid = msg->hdr.nexus.initid; 1317 if (port->wwpn_iid[iid].in_use != 0 && 1318 msg->iid.in_use == 0) 1319 ctl_i_t_nexus_loss(softc, iid, CTL_UA_POWERON); 1320 port->wwpn_iid[iid].in_use = msg->iid.in_use; 1321 port->wwpn_iid[iid].wwpn = msg->iid.wwpn; 1322 free(port->wwpn_iid[iid].name, M_CTL); 1323 if (msg->iid.name_len) { 1324 port->wwpn_iid[iid].name = strndup(&msg->iid.data[0], 1325 msg->iid.name_len, M_CTL); 1326 } else 1327 port->wwpn_iid[iid].name = NULL; 1328 } 1329 1330 static void 1331 ctl_isc_login(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1332 { 1333 1334 if (msg->login.version != CTL_HA_VERSION) { 1335 printf("CTL HA peers have different versions %d != %d\n", 1336 msg->login.version, CTL_HA_VERSION); 1337 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1338 return; 1339 } 1340 if (msg->login.ha_mode != softc->ha_mode) { 1341 printf("CTL HA peers have different ha_mode %d != %d\n", 1342 msg->login.ha_mode, softc->ha_mode); 1343 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1344 return; 1345 } 1346 if (msg->login.ha_id == softc->ha_id) { 1347 printf("CTL HA peers have same ha_id %d\n", msg->login.ha_id); 1348 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1349 return; 1350 } 1351 if (msg->login.max_luns != ctl_max_luns || 1352 msg->login.max_ports != ctl_max_ports || 1353 msg->login.max_init_per_port != CTL_MAX_INIT_PER_PORT) { 1354 printf("CTL HA peers have different limits\n"); 1355 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1356 return; 1357 } 1358 } 1359 1360 static void 1361 ctl_isc_mode_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1362 { 1363 struct ctl_lun *lun; 1364 u_int i; 1365 uint32_t initidx, targ_lun; 1366 1367 targ_lun = msg->hdr.nexus.targ_mapped_lun; 1368 mtx_lock(&softc->ctl_lock); 1369 if (targ_lun >= ctl_max_luns || 1370 (lun = softc->ctl_luns[targ_lun]) == NULL) { 1371 mtx_unlock(&softc->ctl_lock); 1372 return; 1373 } 1374 mtx_lock(&lun->lun_lock); 1375 mtx_unlock(&softc->ctl_lock); 1376 if (lun->flags & CTL_LUN_DISABLED) { 1377 mtx_unlock(&lun->lun_lock); 1378 return; 1379 } 1380 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 1381 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) == 1382 msg->mode.page_code && 1383 lun->mode_pages.index[i].subpage == msg->mode.subpage) 1384 break; 1385 } 1386 if (i == CTL_NUM_MODE_PAGES) { 1387 mtx_unlock(&lun->lun_lock); 1388 return; 1389 } 1390 memcpy(lun->mode_pages.index[i].page_data, msg->mode.data, 1391 lun->mode_pages.index[i].page_len); 1392 initidx = ctl_get_initindex(&msg->hdr.nexus); 1393 if (initidx != -1) 1394 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 1395 mtx_unlock(&lun->lun_lock); 1396 } 1397 1398 /* 1399 * ISC (Inter Shelf Communication) event handler. Events from the HA 1400 * subsystem come in here. 1401 */ 1402 static void 1403 ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param) 1404 { 1405 struct ctl_softc *softc = control_softc; 1406 union ctl_io *io; 1407 struct ctl_prio *presio; 1408 ctl_ha_status isc_status; 1409 1410 CTL_DEBUG_PRINT(("CTL: Isc Msg event %d\n", event)); 1411 if (event == CTL_HA_EVT_MSG_RECV) { 1412 union ctl_ha_msg *msg, msgbuf; 1413 1414 if (param > sizeof(msgbuf)) 1415 msg = malloc(param, M_CTL, M_WAITOK); 1416 else 1417 msg = &msgbuf; 1418 isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, msg, param, 1419 M_WAITOK); 1420 if (isc_status != CTL_HA_STATUS_SUCCESS) { 1421 printf("%s: Error receiving message: %d\n", 1422 __func__, isc_status); 1423 if (msg != &msgbuf) 1424 free(msg, M_CTL); 1425 return; 1426 } 1427 1428 CTL_DEBUG_PRINT(("CTL: msg_type %d\n", msg->msg_type)); 1429 switch (msg->hdr.msg_type) { 1430 case CTL_MSG_SERIALIZE: 1431 io = ctl_alloc_io(softc->othersc_pool); 1432 ctl_zero_io(io); 1433 // populate ctsio from msg 1434 io->io_hdr.io_type = CTL_IO_SCSI; 1435 io->io_hdr.msg_type = CTL_MSG_SERIALIZE; 1436 io->io_hdr.original_sc = msg->hdr.original_sc; 1437 io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC | 1438 CTL_FLAG_IO_ACTIVE; 1439 /* 1440 * If we're in serialization-only mode, we don't 1441 * want to go through full done processing. Thus 1442 * the COPY flag. 1443 * 1444 * XXX KDM add another flag that is more specific. 1445 */ 1446 if (softc->ha_mode != CTL_HA_MODE_XFER) 1447 io->io_hdr.flags |= CTL_FLAG_INT_COPY; 1448 io->io_hdr.nexus = msg->hdr.nexus; 1449 #if 0 1450 printf("port %u, iid %u, lun %u\n", 1451 io->io_hdr.nexus.targ_port, 1452 io->io_hdr.nexus.initid, 1453 io->io_hdr.nexus.targ_lun); 1454 #endif 1455 io->scsiio.tag_num = msg->scsi.tag_num; 1456 io->scsiio.tag_type = msg->scsi.tag_type; 1457 #ifdef CTL_TIME_IO 1458 io->io_hdr.start_time = time_uptime; 1459 getbinuptime(&io->io_hdr.start_bt); 1460 #endif /* CTL_TIME_IO */ 1461 io->scsiio.cdb_len = msg->scsi.cdb_len; 1462 memcpy(io->scsiio.cdb, msg->scsi.cdb, 1463 CTL_MAX_CDBLEN); 1464 if (softc->ha_mode == CTL_HA_MODE_XFER) { 1465 const struct ctl_cmd_entry *entry; 1466 1467 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 1468 io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 1469 io->io_hdr.flags |= 1470 entry->flags & CTL_FLAG_DATA_MASK; 1471 } 1472 ctl_enqueue_isc(io); 1473 break; 1474 1475 /* Performed on the Originating SC, XFER mode only */ 1476 case CTL_MSG_DATAMOVE: { 1477 struct ctl_sg_entry *sgl; 1478 int i, j; 1479 1480 io = msg->hdr.original_sc; 1481 if (io == NULL) { 1482 printf("%s: original_sc == NULL!\n", __func__); 1483 /* XXX KDM do something here */ 1484 break; 1485 } 1486 io->io_hdr.msg_type = CTL_MSG_DATAMOVE; 1487 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1488 /* 1489 * Keep track of this, we need to send it back over 1490 * when the datamove is complete. 1491 */ 1492 io->io_hdr.serializing_sc = msg->hdr.serializing_sc; 1493 if (msg->hdr.status == CTL_SUCCESS) 1494 io->io_hdr.status = msg->hdr.status; 1495 1496 if (msg->dt.sg_sequence == 0) { 1497 #ifdef CTL_TIME_IO 1498 getbinuptime(&io->io_hdr.dma_start_bt); 1499 #endif 1500 i = msg->dt.kern_sg_entries + 1501 msg->dt.kern_data_len / 1502 CTL_HA_DATAMOVE_SEGMENT + 1; 1503 sgl = malloc(sizeof(*sgl) * i, M_CTL, 1504 M_WAITOK | M_ZERO); 1505 io->io_hdr.remote_sglist = sgl; 1506 io->io_hdr.local_sglist = 1507 &sgl[msg->dt.kern_sg_entries]; 1508 1509 io->scsiio.kern_data_ptr = (uint8_t *)sgl; 1510 1511 io->scsiio.kern_sg_entries = 1512 msg->dt.kern_sg_entries; 1513 io->scsiio.rem_sg_entries = 1514 msg->dt.kern_sg_entries; 1515 io->scsiio.kern_data_len = 1516 msg->dt.kern_data_len; 1517 io->scsiio.kern_total_len = 1518 msg->dt.kern_total_len; 1519 io->scsiio.kern_data_resid = 1520 msg->dt.kern_data_resid; 1521 io->scsiio.kern_rel_offset = 1522 msg->dt.kern_rel_offset; 1523 io->io_hdr.flags &= ~CTL_FLAG_BUS_ADDR; 1524 io->io_hdr.flags |= msg->dt.flags & 1525 CTL_FLAG_BUS_ADDR; 1526 } else 1527 sgl = (struct ctl_sg_entry *) 1528 io->scsiio.kern_data_ptr; 1529 1530 for (i = msg->dt.sent_sg_entries, j = 0; 1531 i < (msg->dt.sent_sg_entries + 1532 msg->dt.cur_sg_entries); i++, j++) { 1533 sgl[i].addr = msg->dt.sg_list[j].addr; 1534 sgl[i].len = msg->dt.sg_list[j].len; 1535 1536 #if 0 1537 printf("%s: DATAMOVE: %p,%lu j=%d, i=%d\n", 1538 __func__, sgl[i].addr, sgl[i].len, j, i); 1539 #endif 1540 } 1541 1542 /* 1543 * If this is the last piece of the I/O, we've got 1544 * the full S/G list. Queue processing in the thread. 1545 * Otherwise wait for the next piece. 1546 */ 1547 if (msg->dt.sg_last != 0) 1548 ctl_enqueue_isc(io); 1549 break; 1550 } 1551 /* Performed on the Serializing (primary) SC, XFER mode only */ 1552 case CTL_MSG_DATAMOVE_DONE: { 1553 if (msg->hdr.serializing_sc == NULL) { 1554 printf("%s: serializing_sc == NULL!\n", 1555 __func__); 1556 /* XXX KDM now what? */ 1557 break; 1558 } 1559 /* 1560 * We grab the sense information here in case 1561 * there was a failure, so we can return status 1562 * back to the initiator. 1563 */ 1564 io = msg->hdr.serializing_sc; 1565 io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 1566 io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; 1567 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1568 io->io_hdr.port_status = msg->scsi.port_status; 1569 io->scsiio.kern_data_resid = msg->scsi.kern_data_resid; 1570 if (msg->hdr.status != CTL_STATUS_NONE) { 1571 io->io_hdr.status = msg->hdr.status; 1572 io->scsiio.scsi_status = msg->scsi.scsi_status; 1573 io->scsiio.sense_len = msg->scsi.sense_len; 1574 memcpy(&io->scsiio.sense_data, 1575 &msg->scsi.sense_data, 1576 msg->scsi.sense_len); 1577 if (msg->hdr.status == CTL_SUCCESS) 1578 io->io_hdr.flags |= CTL_FLAG_STATUS_SENT; 1579 } 1580 ctl_enqueue_isc(io); 1581 break; 1582 } 1583 1584 /* Preformed on Originating SC, SER_ONLY mode */ 1585 case CTL_MSG_R2R: 1586 io = msg->hdr.original_sc; 1587 if (io == NULL) { 1588 printf("%s: original_sc == NULL!\n", 1589 __func__); 1590 break; 1591 } 1592 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1593 io->io_hdr.msg_type = CTL_MSG_R2R; 1594 io->io_hdr.serializing_sc = msg->hdr.serializing_sc; 1595 ctl_enqueue_isc(io); 1596 break; 1597 1598 /* 1599 * Performed on Serializing(i.e. primary SC) SC in SER_ONLY 1600 * mode. 1601 * Performed on the Originating (i.e. secondary) SC in XFER 1602 * mode 1603 */ 1604 case CTL_MSG_FINISH_IO: 1605 if (softc->ha_mode == CTL_HA_MODE_XFER) 1606 ctl_isc_handler_finish_xfer(softc, msg); 1607 else 1608 ctl_isc_handler_finish_ser_only(softc, msg); 1609 break; 1610 1611 /* Preformed on Originating SC */ 1612 case CTL_MSG_BAD_JUJU: 1613 io = msg->hdr.original_sc; 1614 if (io == NULL) { 1615 printf("%s: Bad JUJU!, original_sc is NULL!\n", 1616 __func__); 1617 break; 1618 } 1619 ctl_copy_sense_data(msg, io); 1620 /* 1621 * IO should have already been cleaned up on other 1622 * SC so clear this flag so we won't send a message 1623 * back to finish the IO there. 1624 */ 1625 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 1626 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1627 1628 /* io = msg->hdr.serializing_sc; */ 1629 io->io_hdr.msg_type = CTL_MSG_BAD_JUJU; 1630 ctl_enqueue_isc(io); 1631 break; 1632 1633 /* Handle resets sent from the other side */ 1634 case CTL_MSG_MANAGE_TASKS: { 1635 struct ctl_taskio *taskio; 1636 taskio = (struct ctl_taskio *)ctl_alloc_io( 1637 softc->othersc_pool); 1638 ctl_zero_io((union ctl_io *)taskio); 1639 taskio->io_hdr.io_type = CTL_IO_TASK; 1640 taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 1641 taskio->io_hdr.nexus = msg->hdr.nexus; 1642 taskio->task_action = msg->task.task_action; 1643 taskio->tag_num = msg->task.tag_num; 1644 taskio->tag_type = msg->task.tag_type; 1645 #ifdef CTL_TIME_IO 1646 taskio->io_hdr.start_time = time_uptime; 1647 getbinuptime(&taskio->io_hdr.start_bt); 1648 #endif /* CTL_TIME_IO */ 1649 ctl_run_task((union ctl_io *)taskio); 1650 break; 1651 } 1652 /* Persistent Reserve action which needs attention */ 1653 case CTL_MSG_PERS_ACTION: 1654 presio = (struct ctl_prio *)ctl_alloc_io( 1655 softc->othersc_pool); 1656 ctl_zero_io((union ctl_io *)presio); 1657 presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION; 1658 presio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 1659 presio->io_hdr.nexus = msg->hdr.nexus; 1660 presio->pr_msg = msg->pr; 1661 ctl_enqueue_isc((union ctl_io *)presio); 1662 break; 1663 case CTL_MSG_UA: 1664 ctl_isc_ua(softc, msg, param); 1665 break; 1666 case CTL_MSG_PORT_SYNC: 1667 ctl_isc_port_sync(softc, msg, param); 1668 break; 1669 case CTL_MSG_LUN_SYNC: 1670 ctl_isc_lun_sync(softc, msg, param); 1671 break; 1672 case CTL_MSG_IID_SYNC: 1673 ctl_isc_iid_sync(softc, msg, param); 1674 break; 1675 case CTL_MSG_LOGIN: 1676 ctl_isc_login(softc, msg, param); 1677 break; 1678 case CTL_MSG_MODE_SYNC: 1679 ctl_isc_mode_sync(softc, msg, param); 1680 break; 1681 default: 1682 printf("Received HA message of unknown type %d\n", 1683 msg->hdr.msg_type); 1684 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1685 break; 1686 } 1687 if (msg != &msgbuf) 1688 free(msg, M_CTL); 1689 } else if (event == CTL_HA_EVT_LINK_CHANGE) { 1690 printf("CTL: HA link status changed from %d to %d\n", 1691 softc->ha_link, param); 1692 if (param == softc->ha_link) 1693 return; 1694 if (softc->ha_link == CTL_HA_LINK_ONLINE) { 1695 softc->ha_link = param; 1696 ctl_isc_ha_link_down(softc); 1697 } else { 1698 softc->ha_link = param; 1699 if (softc->ha_link == CTL_HA_LINK_ONLINE) 1700 ctl_isc_ha_link_up(softc); 1701 } 1702 return; 1703 } else { 1704 printf("ctl_isc_event_handler: Unknown event %d\n", event); 1705 return; 1706 } 1707 } 1708 1709 static void 1710 ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest) 1711 { 1712 1713 memcpy(&dest->scsiio.sense_data, &src->scsi.sense_data, 1714 src->scsi.sense_len); 1715 dest->scsiio.scsi_status = src->scsi.scsi_status; 1716 dest->scsiio.sense_len = src->scsi.sense_len; 1717 dest->io_hdr.status = src->hdr.status; 1718 } 1719 1720 static void 1721 ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest) 1722 { 1723 1724 memcpy(&dest->scsi.sense_data, &src->scsiio.sense_data, 1725 src->scsiio.sense_len); 1726 dest->scsi.scsi_status = src->scsiio.scsi_status; 1727 dest->scsi.sense_len = src->scsiio.sense_len; 1728 dest->hdr.status = src->io_hdr.status; 1729 } 1730 1731 void 1732 ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 1733 { 1734 struct ctl_softc *softc = lun->ctl_softc; 1735 ctl_ua_type *pu; 1736 1737 if (initidx < softc->init_min || initidx >= softc->init_max) 1738 return; 1739 mtx_assert(&lun->lun_lock, MA_OWNED); 1740 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 1741 if (pu == NULL) 1742 return; 1743 pu[initidx % CTL_MAX_INIT_PER_PORT] |= ua; 1744 } 1745 1746 void 1747 ctl_est_ua_port(struct ctl_lun *lun, int port, uint32_t except, ctl_ua_type ua) 1748 { 1749 int i; 1750 1751 mtx_assert(&lun->lun_lock, MA_OWNED); 1752 if (lun->pending_ua[port] == NULL) 1753 return; 1754 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1755 if (port * CTL_MAX_INIT_PER_PORT + i == except) 1756 continue; 1757 lun->pending_ua[port][i] |= ua; 1758 } 1759 } 1760 1761 void 1762 ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 1763 { 1764 struct ctl_softc *softc = lun->ctl_softc; 1765 int i; 1766 1767 mtx_assert(&lun->lun_lock, MA_OWNED); 1768 for (i = softc->port_min; i < softc->port_max; i++) 1769 ctl_est_ua_port(lun, i, except, ua); 1770 } 1771 1772 void 1773 ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 1774 { 1775 struct ctl_softc *softc = lun->ctl_softc; 1776 ctl_ua_type *pu; 1777 1778 if (initidx < softc->init_min || initidx >= softc->init_max) 1779 return; 1780 mtx_assert(&lun->lun_lock, MA_OWNED); 1781 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 1782 if (pu == NULL) 1783 return; 1784 pu[initidx % CTL_MAX_INIT_PER_PORT] &= ~ua; 1785 } 1786 1787 void 1788 ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 1789 { 1790 struct ctl_softc *softc = lun->ctl_softc; 1791 int i, j; 1792 1793 mtx_assert(&lun->lun_lock, MA_OWNED); 1794 for (i = softc->port_min; i < softc->port_max; i++) { 1795 if (lun->pending_ua[i] == NULL) 1796 continue; 1797 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 1798 if (i * CTL_MAX_INIT_PER_PORT + j == except) 1799 continue; 1800 lun->pending_ua[i][j] &= ~ua; 1801 } 1802 } 1803 } 1804 1805 void 1806 ctl_clr_ua_allluns(struct ctl_softc *ctl_softc, uint32_t initidx, 1807 ctl_ua_type ua_type) 1808 { 1809 struct ctl_lun *lun; 1810 1811 mtx_assert(&ctl_softc->ctl_lock, MA_OWNED); 1812 STAILQ_FOREACH(lun, &ctl_softc->lun_list, links) { 1813 mtx_lock(&lun->lun_lock); 1814 ctl_clr_ua(lun, initidx, ua_type); 1815 mtx_unlock(&lun->lun_lock); 1816 } 1817 } 1818 1819 static int 1820 ctl_ha_role_sysctl(SYSCTL_HANDLER_ARGS) 1821 { 1822 struct ctl_softc *softc = (struct ctl_softc *)arg1; 1823 struct ctl_lun *lun; 1824 struct ctl_lun_req ireq; 1825 int error, value; 1826 1827 value = (softc->flags & CTL_FLAG_ACTIVE_SHELF) ? 0 : 1; 1828 error = sysctl_handle_int(oidp, &value, 0, req); 1829 if ((error != 0) || (req->newptr == NULL)) 1830 return (error); 1831 1832 mtx_lock(&softc->ctl_lock); 1833 if (value == 0) 1834 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1835 else 1836 softc->flags &= ~CTL_FLAG_ACTIVE_SHELF; 1837 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1838 mtx_unlock(&softc->ctl_lock); 1839 bzero(&ireq, sizeof(ireq)); 1840 ireq.reqtype = CTL_LUNREQ_MODIFY; 1841 ireq.reqdata.modify.lun_id = lun->lun; 1842 lun->backend->ioctl(NULL, CTL_LUN_REQ, (caddr_t)&ireq, 0, 1843 curthread); 1844 if (ireq.status != CTL_LUN_OK) { 1845 printf("%s: CTL_LUNREQ_MODIFY returned %d '%s'\n", 1846 __func__, ireq.status, ireq.error_str); 1847 } 1848 mtx_lock(&softc->ctl_lock); 1849 } 1850 mtx_unlock(&softc->ctl_lock); 1851 return (0); 1852 } 1853 1854 static int 1855 ctl_init(void) 1856 { 1857 struct make_dev_args args; 1858 struct ctl_softc *softc; 1859 int i, error; 1860 1861 softc = control_softc = malloc(sizeof(*control_softc), M_DEVBUF, 1862 M_WAITOK | M_ZERO); 1863 1864 make_dev_args_init(&args); 1865 args.mda_devsw = &ctl_cdevsw; 1866 args.mda_uid = UID_ROOT; 1867 args.mda_gid = GID_OPERATOR; 1868 args.mda_mode = 0600; 1869 args.mda_si_drv1 = softc; 1870 error = make_dev_s(&args, &softc->dev, "cam/ctl"); 1871 if (error != 0) { 1872 free(softc, M_DEVBUF); 1873 control_softc = NULL; 1874 return (error); 1875 } 1876 1877 sysctl_ctx_init(&softc->sysctl_ctx); 1878 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, 1879 SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl", 1880 CTLFLAG_RD, 0, "CAM Target Layer"); 1881 1882 if (softc->sysctl_tree == NULL) { 1883 printf("%s: unable to allocate sysctl tree\n", __func__); 1884 destroy_dev(softc->dev); 1885 free(softc, M_DEVBUF); 1886 control_softc = NULL; 1887 return (ENOMEM); 1888 } 1889 1890 mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF); 1891 softc->io_zone = uma_zcreate("CTL IO", sizeof(union ctl_io), 1892 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 1893 softc->flags = 0; 1894 1895 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1896 OID_AUTO, "ha_mode", CTLFLAG_RDTUN, (int *)&softc->ha_mode, 0, 1897 "HA mode (0 - act/stby, 1 - serialize only, 2 - xfer)"); 1898 1899 if (ctl_max_luns <= 0 || powerof2(ctl_max_luns) == 0) { 1900 printf("Bad value %d for kern.cam.ctl.max_luns, must be a power of two, using %d\n", 1901 ctl_max_luns, CTL_DEFAULT_MAX_LUNS); 1902 ctl_max_luns = CTL_DEFAULT_MAX_LUNS; 1903 } 1904 softc->ctl_luns = malloc(sizeof(struct ctl_lun *) * ctl_max_luns, 1905 M_DEVBUF, M_WAITOK | M_ZERO); 1906 softc->ctl_lun_mask = malloc(sizeof(uint32_t) * 1907 ((ctl_max_luns + 31) / 32), M_DEVBUF, M_WAITOK | M_ZERO); 1908 if (ctl_max_ports <= 0 || powerof2(ctl_max_ports) == 0) { 1909 printf("Bad value %d for kern.cam.ctl.max_ports, must be a power of two, using %d\n", 1910 ctl_max_ports, CTL_DEFAULT_MAX_PORTS); 1911 ctl_max_ports = CTL_DEFAULT_MAX_PORTS; 1912 } 1913 softc->ctl_port_mask = malloc(sizeof(uint32_t) * 1914 ((ctl_max_ports + 31) / 32), M_DEVBUF, M_WAITOK | M_ZERO); 1915 softc->ctl_ports = malloc(sizeof(struct ctl_port *) * ctl_max_ports, 1916 M_DEVBUF, M_WAITOK | M_ZERO); 1917 1918 1919 /* 1920 * In Copan's HA scheme, the "master" and "slave" roles are 1921 * figured out through the slot the controller is in. Although it 1922 * is an active/active system, someone has to be in charge. 1923 */ 1924 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1925 OID_AUTO, "ha_id", CTLFLAG_RDTUN, &softc->ha_id, 0, 1926 "HA head ID (0 - no HA)"); 1927 if (softc->ha_id == 0 || softc->ha_id > NUM_HA_SHELVES) { 1928 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1929 softc->is_single = 1; 1930 softc->port_cnt = ctl_max_ports; 1931 softc->port_min = 0; 1932 } else { 1933 softc->port_cnt = ctl_max_ports / NUM_HA_SHELVES; 1934 softc->port_min = (softc->ha_id - 1) * softc->port_cnt; 1935 } 1936 softc->port_max = softc->port_min + softc->port_cnt; 1937 softc->init_min = softc->port_min * CTL_MAX_INIT_PER_PORT; 1938 softc->init_max = softc->port_max * CTL_MAX_INIT_PER_PORT; 1939 1940 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1941 OID_AUTO, "ha_link", CTLFLAG_RD, (int *)&softc->ha_link, 0, 1942 "HA link state (0 - offline, 1 - unknown, 2 - online)"); 1943 1944 STAILQ_INIT(&softc->lun_list); 1945 STAILQ_INIT(&softc->pending_lun_queue); 1946 STAILQ_INIT(&softc->fe_list); 1947 STAILQ_INIT(&softc->port_list); 1948 STAILQ_INIT(&softc->be_list); 1949 ctl_tpc_init(softc); 1950 1951 if (worker_threads <= 0) 1952 worker_threads = max(1, mp_ncpus / 4); 1953 if (worker_threads > CTL_MAX_THREADS) 1954 worker_threads = CTL_MAX_THREADS; 1955 1956 for (i = 0; i < worker_threads; i++) { 1957 struct ctl_thread *thr = &softc->threads[i]; 1958 1959 mtx_init(&thr->queue_lock, "CTL queue mutex", NULL, MTX_DEF); 1960 thr->ctl_softc = softc; 1961 STAILQ_INIT(&thr->incoming_queue); 1962 STAILQ_INIT(&thr->rtr_queue); 1963 STAILQ_INIT(&thr->done_queue); 1964 STAILQ_INIT(&thr->isc_queue); 1965 1966 error = kproc_kthread_add(ctl_work_thread, thr, 1967 &softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i); 1968 if (error != 0) { 1969 printf("error creating CTL work thread!\n"); 1970 return (error); 1971 } 1972 } 1973 error = kproc_kthread_add(ctl_lun_thread, softc, 1974 &softc->ctl_proc, &softc->lun_thread, 0, 0, "ctl", "lun"); 1975 if (error != 0) { 1976 printf("error creating CTL lun thread!\n"); 1977 return (error); 1978 } 1979 error = kproc_kthread_add(ctl_thresh_thread, softc, 1980 &softc->ctl_proc, &softc->thresh_thread, 0, 0, "ctl", "thresh"); 1981 if (error != 0) { 1982 printf("error creating CTL threshold thread!\n"); 1983 return (error); 1984 } 1985 1986 SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree), 1987 OID_AUTO, "ha_role", CTLTYPE_INT | CTLFLAG_RWTUN, 1988 softc, 0, ctl_ha_role_sysctl, "I", "HA role for this head"); 1989 1990 if (softc->is_single == 0) { 1991 if (ctl_frontend_register(&ha_frontend) != 0) 1992 softc->is_single = 1; 1993 } 1994 return (0); 1995 } 1996 1997 static int 1998 ctl_shutdown(void) 1999 { 2000 struct ctl_softc *softc = control_softc; 2001 int i; 2002 2003 if (softc->is_single == 0) 2004 ctl_frontend_deregister(&ha_frontend); 2005 2006 destroy_dev(softc->dev); 2007 2008 /* Shutdown CTL threads. */ 2009 softc->shutdown = 1; 2010 for (i = 0; i < worker_threads; i++) { 2011 struct ctl_thread *thr = &softc->threads[i]; 2012 while (thr->thread != NULL) { 2013 wakeup(thr); 2014 if (thr->thread != NULL) 2015 pause("CTL thr shutdown", 1); 2016 } 2017 mtx_destroy(&thr->queue_lock); 2018 } 2019 while (softc->lun_thread != NULL) { 2020 wakeup(&softc->pending_lun_queue); 2021 if (softc->lun_thread != NULL) 2022 pause("CTL thr shutdown", 1); 2023 } 2024 while (softc->thresh_thread != NULL) { 2025 wakeup(softc->thresh_thread); 2026 if (softc->thresh_thread != NULL) 2027 pause("CTL thr shutdown", 1); 2028 } 2029 2030 ctl_tpc_shutdown(softc); 2031 uma_zdestroy(softc->io_zone); 2032 mtx_destroy(&softc->ctl_lock); 2033 2034 free(softc->ctl_luns, M_DEVBUF); 2035 free(softc->ctl_lun_mask, M_DEVBUF); 2036 free(softc->ctl_port_mask, M_DEVBUF); 2037 free(softc->ctl_ports, M_DEVBUF); 2038 2039 sysctl_ctx_free(&softc->sysctl_ctx); 2040 2041 free(softc, M_DEVBUF); 2042 control_softc = NULL; 2043 return (0); 2044 } 2045 2046 static int 2047 ctl_module_event_handler(module_t mod, int what, void *arg) 2048 { 2049 2050 switch (what) { 2051 case MOD_LOAD: 2052 return (ctl_init()); 2053 case MOD_UNLOAD: 2054 return (ctl_shutdown()); 2055 default: 2056 return (EOPNOTSUPP); 2057 } 2058 } 2059 2060 /* 2061 * XXX KDM should we do some access checks here? Bump a reference count to 2062 * prevent a CTL module from being unloaded while someone has it open? 2063 */ 2064 static int 2065 ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td) 2066 { 2067 return (0); 2068 } 2069 2070 static int 2071 ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td) 2072 { 2073 return (0); 2074 } 2075 2076 /* 2077 * Remove an initiator by port number and initiator ID. 2078 * Returns 0 for success, -1 for failure. 2079 */ 2080 int 2081 ctl_remove_initiator(struct ctl_port *port, int iid) 2082 { 2083 struct ctl_softc *softc = port->ctl_softc; 2084 int last; 2085 2086 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 2087 2088 if (iid > CTL_MAX_INIT_PER_PORT) { 2089 printf("%s: initiator ID %u > maximun %u!\n", 2090 __func__, iid, CTL_MAX_INIT_PER_PORT); 2091 return (-1); 2092 } 2093 2094 mtx_lock(&softc->ctl_lock); 2095 last = (--port->wwpn_iid[iid].in_use == 0); 2096 port->wwpn_iid[iid].last_use = time_uptime; 2097 mtx_unlock(&softc->ctl_lock); 2098 if (last) 2099 ctl_i_t_nexus_loss(softc, iid, CTL_UA_POWERON); 2100 ctl_isc_announce_iid(port, iid); 2101 2102 return (0); 2103 } 2104 2105 /* 2106 * Add an initiator to the initiator map. 2107 * Returns iid for success, < 0 for failure. 2108 */ 2109 int 2110 ctl_add_initiator(struct ctl_port *port, int iid, uint64_t wwpn, char *name) 2111 { 2112 struct ctl_softc *softc = port->ctl_softc; 2113 time_t best_time; 2114 int i, best; 2115 2116 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 2117 2118 if (iid >= CTL_MAX_INIT_PER_PORT) { 2119 printf("%s: WWPN %#jx initiator ID %u > maximum %u!\n", 2120 __func__, wwpn, iid, CTL_MAX_INIT_PER_PORT); 2121 free(name, M_CTL); 2122 return (-1); 2123 } 2124 2125 mtx_lock(&softc->ctl_lock); 2126 2127 if (iid < 0 && (wwpn != 0 || name != NULL)) { 2128 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 2129 if (wwpn != 0 && wwpn == port->wwpn_iid[i].wwpn) { 2130 iid = i; 2131 break; 2132 } 2133 if (name != NULL && port->wwpn_iid[i].name != NULL && 2134 strcmp(name, port->wwpn_iid[i].name) == 0) { 2135 iid = i; 2136 break; 2137 } 2138 } 2139 } 2140 2141 if (iid < 0) { 2142 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 2143 if (port->wwpn_iid[i].in_use == 0 && 2144 port->wwpn_iid[i].wwpn == 0 && 2145 port->wwpn_iid[i].name == NULL) { 2146 iid = i; 2147 break; 2148 } 2149 } 2150 } 2151 2152 if (iid < 0) { 2153 best = -1; 2154 best_time = INT32_MAX; 2155 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 2156 if (port->wwpn_iid[i].in_use == 0) { 2157 if (port->wwpn_iid[i].last_use < best_time) { 2158 best = i; 2159 best_time = port->wwpn_iid[i].last_use; 2160 } 2161 } 2162 } 2163 iid = best; 2164 } 2165 2166 if (iid < 0) { 2167 mtx_unlock(&softc->ctl_lock); 2168 free(name, M_CTL); 2169 return (-2); 2170 } 2171 2172 if (port->wwpn_iid[iid].in_use > 0 && (wwpn != 0 || name != NULL)) { 2173 /* 2174 * This is not an error yet. 2175 */ 2176 if (wwpn != 0 && wwpn == port->wwpn_iid[iid].wwpn) { 2177 #if 0 2178 printf("%s: port %d iid %u WWPN %#jx arrived" 2179 " again\n", __func__, port->targ_port, 2180 iid, (uintmax_t)wwpn); 2181 #endif 2182 goto take; 2183 } 2184 if (name != NULL && port->wwpn_iid[iid].name != NULL && 2185 strcmp(name, port->wwpn_iid[iid].name) == 0) { 2186 #if 0 2187 printf("%s: port %d iid %u name '%s' arrived" 2188 " again\n", __func__, port->targ_port, 2189 iid, name); 2190 #endif 2191 goto take; 2192 } 2193 2194 /* 2195 * This is an error, but what do we do about it? The 2196 * driver is telling us we have a new WWPN for this 2197 * initiator ID, so we pretty much need to use it. 2198 */ 2199 printf("%s: port %d iid %u WWPN %#jx '%s' arrived," 2200 " but WWPN %#jx '%s' is still at that address\n", 2201 __func__, port->targ_port, iid, wwpn, name, 2202 (uintmax_t)port->wwpn_iid[iid].wwpn, 2203 port->wwpn_iid[iid].name); 2204 } 2205 take: 2206 free(port->wwpn_iid[iid].name, M_CTL); 2207 port->wwpn_iid[iid].name = name; 2208 port->wwpn_iid[iid].wwpn = wwpn; 2209 port->wwpn_iid[iid].in_use++; 2210 mtx_unlock(&softc->ctl_lock); 2211 ctl_isc_announce_iid(port, iid); 2212 2213 return (iid); 2214 } 2215 2216 static int 2217 ctl_create_iid(struct ctl_port *port, int iid, uint8_t *buf) 2218 { 2219 int len; 2220 2221 switch (port->port_type) { 2222 case CTL_PORT_FC: 2223 { 2224 struct scsi_transportid_fcp *id = 2225 (struct scsi_transportid_fcp *)buf; 2226 if (port->wwpn_iid[iid].wwpn == 0) 2227 return (0); 2228 memset(id, 0, sizeof(*id)); 2229 id->format_protocol = SCSI_PROTO_FC; 2230 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->n_port_name); 2231 return (sizeof(*id)); 2232 } 2233 case CTL_PORT_ISCSI: 2234 { 2235 struct scsi_transportid_iscsi_port *id = 2236 (struct scsi_transportid_iscsi_port *)buf; 2237 if (port->wwpn_iid[iid].name == NULL) 2238 return (0); 2239 memset(id, 0, 256); 2240 id->format_protocol = SCSI_TRN_ISCSI_FORMAT_PORT | 2241 SCSI_PROTO_ISCSI; 2242 len = strlcpy(id->iscsi_name, port->wwpn_iid[iid].name, 252) + 1; 2243 len = roundup2(min(len, 252), 4); 2244 scsi_ulto2b(len, id->additional_length); 2245 return (sizeof(*id) + len); 2246 } 2247 case CTL_PORT_SAS: 2248 { 2249 struct scsi_transportid_sas *id = 2250 (struct scsi_transportid_sas *)buf; 2251 if (port->wwpn_iid[iid].wwpn == 0) 2252 return (0); 2253 memset(id, 0, sizeof(*id)); 2254 id->format_protocol = SCSI_PROTO_SAS; 2255 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->sas_address); 2256 return (sizeof(*id)); 2257 } 2258 default: 2259 { 2260 struct scsi_transportid_spi *id = 2261 (struct scsi_transportid_spi *)buf; 2262 memset(id, 0, sizeof(*id)); 2263 id->format_protocol = SCSI_PROTO_SPI; 2264 scsi_ulto2b(iid, id->scsi_addr); 2265 scsi_ulto2b(port->targ_port, id->rel_trgt_port_id); 2266 return (sizeof(*id)); 2267 } 2268 } 2269 } 2270 2271 /* 2272 * Serialize a command that went down the "wrong" side, and so was sent to 2273 * this controller for execution. The logic is a little different than the 2274 * standard case in ctl_scsiio_precheck(). Errors in this case need to get 2275 * sent back to the other side, but in the success case, we execute the 2276 * command on this side (XFER mode) or tell the other side to execute it 2277 * (SER_ONLY mode). 2278 */ 2279 static void 2280 ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio) 2281 { 2282 struct ctl_softc *softc = CTL_SOFTC(ctsio); 2283 struct ctl_port *port = CTL_PORT(ctsio); 2284 union ctl_ha_msg msg_info; 2285 struct ctl_lun *lun; 2286 const struct ctl_cmd_entry *entry; 2287 uint32_t targ_lun; 2288 2289 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 2290 2291 /* Make sure that we know about this port. */ 2292 if (port == NULL || (port->status & CTL_PORT_STATUS_ONLINE) == 0) { 2293 ctl_set_internal_failure(ctsio, /*sks_valid*/ 0, 2294 /*retry_count*/ 1); 2295 goto badjuju; 2296 } 2297 2298 /* Make sure that we know about this LUN. */ 2299 mtx_lock(&softc->ctl_lock); 2300 if (targ_lun >= ctl_max_luns || 2301 (lun = softc->ctl_luns[targ_lun]) == NULL) { 2302 mtx_unlock(&softc->ctl_lock); 2303 2304 /* 2305 * The other node would not send this request to us unless 2306 * received announce that we are primary node for this LUN. 2307 * If this LUN does not exist now, it is probably result of 2308 * a race, so respond to initiator in the most opaque way. 2309 */ 2310 ctl_set_busy(ctsio); 2311 goto badjuju; 2312 } 2313 mtx_lock(&lun->lun_lock); 2314 mtx_unlock(&softc->ctl_lock); 2315 2316 /* 2317 * If the LUN is invalid, pretend that it doesn't exist. 2318 * It will go away as soon as all pending I/Os completed. 2319 */ 2320 if (lun->flags & CTL_LUN_DISABLED) { 2321 mtx_unlock(&lun->lun_lock); 2322 ctl_set_busy(ctsio); 2323 goto badjuju; 2324 } 2325 2326 entry = ctl_get_cmd_entry(ctsio, NULL); 2327 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { 2328 mtx_unlock(&lun->lun_lock); 2329 goto badjuju; 2330 } 2331 2332 CTL_LUN(ctsio) = lun; 2333 CTL_BACKEND_LUN(ctsio) = lun->be_lun; 2334 2335 /* 2336 * Every I/O goes into the OOA queue for a 2337 * particular LUN, and stays there until completion. 2338 */ 2339 #ifdef CTL_TIME_IO 2340 if (TAILQ_EMPTY(&lun->ooa_queue)) 2341 lun->idle_time += getsbinuptime() - lun->last_busy; 2342 #endif 2343 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2344 2345 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 2346 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq, 2347 ooa_links))) { 2348 case CTL_ACTION_BLOCK: 2349 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 2350 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 2351 blocked_links); 2352 mtx_unlock(&lun->lun_lock); 2353 break; 2354 case CTL_ACTION_PASS: 2355 case CTL_ACTION_SKIP: 2356 if (softc->ha_mode == CTL_HA_MODE_XFER) { 2357 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 2358 ctl_enqueue_rtr((union ctl_io *)ctsio); 2359 mtx_unlock(&lun->lun_lock); 2360 } else { 2361 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 2362 mtx_unlock(&lun->lun_lock); 2363 2364 /* send msg back to other side */ 2365 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 2366 msg_info.hdr.serializing_sc = (union ctl_io *)ctsio; 2367 msg_info.hdr.msg_type = CTL_MSG_R2R; 2368 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 2369 sizeof(msg_info.hdr), M_WAITOK); 2370 } 2371 break; 2372 case CTL_ACTION_OVERLAP: 2373 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2374 mtx_unlock(&lun->lun_lock); 2375 ctl_set_overlapped_cmd(ctsio); 2376 goto badjuju; 2377 case CTL_ACTION_OVERLAP_TAG: 2378 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2379 mtx_unlock(&lun->lun_lock); 2380 ctl_set_overlapped_tag(ctsio, ctsio->tag_num); 2381 goto badjuju; 2382 case CTL_ACTION_ERROR: 2383 default: 2384 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2385 mtx_unlock(&lun->lun_lock); 2386 2387 ctl_set_internal_failure(ctsio, /*sks_valid*/ 0, 2388 /*retry_count*/ 0); 2389 badjuju: 2390 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); 2391 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 2392 msg_info.hdr.serializing_sc = NULL; 2393 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 2394 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 2395 sizeof(msg_info.scsi), M_WAITOK); 2396 ctl_free_io((union ctl_io *)ctsio); 2397 break; 2398 } 2399 } 2400 2401 /* 2402 * Returns 0 for success, errno for failure. 2403 */ 2404 static void 2405 ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 2406 struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries) 2407 { 2408 union ctl_io *io; 2409 2410 mtx_lock(&lun->lun_lock); 2411 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); (io != NULL); 2412 (*cur_fill_num)++, io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, 2413 ooa_links)) { 2414 struct ctl_ooa_entry *entry; 2415 2416 /* 2417 * If we've got more than we can fit, just count the 2418 * remaining entries. 2419 */ 2420 if (*cur_fill_num >= ooa_hdr->alloc_num) 2421 continue; 2422 2423 entry = &kern_entries[*cur_fill_num]; 2424 2425 entry->tag_num = io->scsiio.tag_num; 2426 entry->lun_num = lun->lun; 2427 #ifdef CTL_TIME_IO 2428 entry->start_bt = io->io_hdr.start_bt; 2429 #endif 2430 bcopy(io->scsiio.cdb, entry->cdb, io->scsiio.cdb_len); 2431 entry->cdb_len = io->scsiio.cdb_len; 2432 if (io->io_hdr.flags & CTL_FLAG_BLOCKED) 2433 entry->cmd_flags |= CTL_OOACMD_FLAG_BLOCKED; 2434 2435 if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) 2436 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA; 2437 2438 if (io->io_hdr.flags & CTL_FLAG_ABORT) 2439 entry->cmd_flags |= CTL_OOACMD_FLAG_ABORT; 2440 2441 if (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR) 2442 entry->cmd_flags |= CTL_OOACMD_FLAG_RTR; 2443 2444 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) 2445 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED; 2446 } 2447 mtx_unlock(&lun->lun_lock); 2448 } 2449 2450 static void * 2451 ctl_copyin_alloc(void *user_addr, unsigned int len, char *error_str, 2452 size_t error_str_len) 2453 { 2454 void *kptr; 2455 2456 kptr = malloc(len, M_CTL, M_WAITOK | M_ZERO); 2457 2458 if (copyin(user_addr, kptr, len) != 0) { 2459 snprintf(error_str, error_str_len, "Error copying %d bytes " 2460 "from user address %p to kernel address %p", len, 2461 user_addr, kptr); 2462 free(kptr, M_CTL); 2463 return (NULL); 2464 } 2465 2466 return (kptr); 2467 } 2468 2469 static void 2470 ctl_free_args(int num_args, struct ctl_be_arg *args) 2471 { 2472 int i; 2473 2474 if (args == NULL) 2475 return; 2476 2477 for (i = 0; i < num_args; i++) { 2478 free(args[i].kname, M_CTL); 2479 free(args[i].kvalue, M_CTL); 2480 } 2481 2482 free(args, M_CTL); 2483 } 2484 2485 static struct ctl_be_arg * 2486 ctl_copyin_args(int num_args, struct ctl_be_arg *uargs, 2487 char *error_str, size_t error_str_len) 2488 { 2489 struct ctl_be_arg *args; 2490 int i; 2491 2492 args = ctl_copyin_alloc(uargs, num_args * sizeof(*args), 2493 error_str, error_str_len); 2494 2495 if (args == NULL) 2496 goto bailout; 2497 2498 for (i = 0; i < num_args; i++) { 2499 args[i].kname = NULL; 2500 args[i].kvalue = NULL; 2501 } 2502 2503 for (i = 0; i < num_args; i++) { 2504 uint8_t *tmpptr; 2505 2506 if (args[i].namelen == 0) { 2507 snprintf(error_str, error_str_len, "Argument %d " 2508 "name length is zero", i); 2509 goto bailout; 2510 } 2511 2512 args[i].kname = ctl_copyin_alloc(args[i].name, 2513 args[i].namelen, error_str, error_str_len); 2514 if (args[i].kname == NULL) 2515 goto bailout; 2516 2517 if (args[i].kname[args[i].namelen - 1] != '\0') { 2518 snprintf(error_str, error_str_len, "Argument %d " 2519 "name is not NUL-terminated", i); 2520 goto bailout; 2521 } 2522 2523 if (args[i].flags & CTL_BEARG_RD) { 2524 if (args[i].vallen == 0) { 2525 snprintf(error_str, error_str_len, "Argument %d " 2526 "value length is zero", i); 2527 goto bailout; 2528 } 2529 2530 tmpptr = ctl_copyin_alloc(args[i].value, 2531 args[i].vallen, error_str, error_str_len); 2532 if (tmpptr == NULL) 2533 goto bailout; 2534 2535 if ((args[i].flags & CTL_BEARG_ASCII) 2536 && (tmpptr[args[i].vallen - 1] != '\0')) { 2537 snprintf(error_str, error_str_len, "Argument " 2538 "%d value is not NUL-terminated", i); 2539 free(tmpptr, M_CTL); 2540 goto bailout; 2541 } 2542 args[i].kvalue = tmpptr; 2543 } else { 2544 args[i].kvalue = malloc(args[i].vallen, 2545 M_CTL, M_WAITOK | M_ZERO); 2546 } 2547 } 2548 2549 return (args); 2550 bailout: 2551 2552 ctl_free_args(num_args, args); 2553 2554 return (NULL); 2555 } 2556 2557 static void 2558 ctl_copyout_args(int num_args, struct ctl_be_arg *args) 2559 { 2560 int i; 2561 2562 for (i = 0; i < num_args; i++) { 2563 if (args[i].flags & CTL_BEARG_WR) 2564 copyout(args[i].kvalue, args[i].value, args[i].vallen); 2565 } 2566 } 2567 2568 /* 2569 * Escape characters that are illegal or not recommended in XML. 2570 */ 2571 int 2572 ctl_sbuf_printf_esc(struct sbuf *sb, char *str, int size) 2573 { 2574 char *end = str + size; 2575 int retval; 2576 2577 retval = 0; 2578 2579 for (; *str && str < end; str++) { 2580 switch (*str) { 2581 case '&': 2582 retval = sbuf_printf(sb, "&"); 2583 break; 2584 case '>': 2585 retval = sbuf_printf(sb, ">"); 2586 break; 2587 case '<': 2588 retval = sbuf_printf(sb, "<"); 2589 break; 2590 default: 2591 retval = sbuf_putc(sb, *str); 2592 break; 2593 } 2594 2595 if (retval != 0) 2596 break; 2597 2598 } 2599 2600 return (retval); 2601 } 2602 2603 static void 2604 ctl_id_sbuf(struct ctl_devid *id, struct sbuf *sb) 2605 { 2606 struct scsi_vpd_id_descriptor *desc; 2607 int i; 2608 2609 if (id == NULL || id->len < 4) 2610 return; 2611 desc = (struct scsi_vpd_id_descriptor *)id->data; 2612 switch (desc->id_type & SVPD_ID_TYPE_MASK) { 2613 case SVPD_ID_TYPE_T10: 2614 sbuf_printf(sb, "t10."); 2615 break; 2616 case SVPD_ID_TYPE_EUI64: 2617 sbuf_printf(sb, "eui."); 2618 break; 2619 case SVPD_ID_TYPE_NAA: 2620 sbuf_printf(sb, "naa."); 2621 break; 2622 case SVPD_ID_TYPE_SCSI_NAME: 2623 break; 2624 } 2625 switch (desc->proto_codeset & SVPD_ID_CODESET_MASK) { 2626 case SVPD_ID_CODESET_BINARY: 2627 for (i = 0; i < desc->length; i++) 2628 sbuf_printf(sb, "%02x", desc->identifier[i]); 2629 break; 2630 case SVPD_ID_CODESET_ASCII: 2631 sbuf_printf(sb, "%.*s", (int)desc->length, 2632 (char *)desc->identifier); 2633 break; 2634 case SVPD_ID_CODESET_UTF8: 2635 sbuf_printf(sb, "%s", (char *)desc->identifier); 2636 break; 2637 } 2638 } 2639 2640 static int 2641 ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 2642 struct thread *td) 2643 { 2644 struct ctl_softc *softc = dev->si_drv1; 2645 struct ctl_port *port; 2646 struct ctl_lun *lun; 2647 int retval; 2648 2649 retval = 0; 2650 2651 switch (cmd) { 2652 case CTL_IO: 2653 retval = ctl_ioctl_io(dev, cmd, addr, flag, td); 2654 break; 2655 case CTL_ENABLE_PORT: 2656 case CTL_DISABLE_PORT: 2657 case CTL_SET_PORT_WWNS: { 2658 struct ctl_port *port; 2659 struct ctl_port_entry *entry; 2660 2661 entry = (struct ctl_port_entry *)addr; 2662 2663 mtx_lock(&softc->ctl_lock); 2664 STAILQ_FOREACH(port, &softc->port_list, links) { 2665 int action, done; 2666 2667 if (port->targ_port < softc->port_min || 2668 port->targ_port >= softc->port_max) 2669 continue; 2670 2671 action = 0; 2672 done = 0; 2673 if ((entry->port_type == CTL_PORT_NONE) 2674 && (entry->targ_port == port->targ_port)) { 2675 /* 2676 * If the user only wants to enable or 2677 * disable or set WWNs on a specific port, 2678 * do the operation and we're done. 2679 */ 2680 action = 1; 2681 done = 1; 2682 } else if (entry->port_type & port->port_type) { 2683 /* 2684 * Compare the user's type mask with the 2685 * particular frontend type to see if we 2686 * have a match. 2687 */ 2688 action = 1; 2689 done = 0; 2690 2691 /* 2692 * Make sure the user isn't trying to set 2693 * WWNs on multiple ports at the same time. 2694 */ 2695 if (cmd == CTL_SET_PORT_WWNS) { 2696 printf("%s: Can't set WWNs on " 2697 "multiple ports\n", __func__); 2698 retval = EINVAL; 2699 break; 2700 } 2701 } 2702 if (action == 0) 2703 continue; 2704 2705 /* 2706 * XXX KDM we have to drop the lock here, because 2707 * the online/offline operations can potentially 2708 * block. We need to reference count the frontends 2709 * so they can't go away, 2710 */ 2711 if (cmd == CTL_ENABLE_PORT) { 2712 mtx_unlock(&softc->ctl_lock); 2713 ctl_port_online(port); 2714 mtx_lock(&softc->ctl_lock); 2715 } else if (cmd == CTL_DISABLE_PORT) { 2716 mtx_unlock(&softc->ctl_lock); 2717 ctl_port_offline(port); 2718 mtx_lock(&softc->ctl_lock); 2719 } else if (cmd == CTL_SET_PORT_WWNS) { 2720 ctl_port_set_wwns(port, 2721 (entry->flags & CTL_PORT_WWNN_VALID) ? 2722 1 : 0, entry->wwnn, 2723 (entry->flags & CTL_PORT_WWPN_VALID) ? 2724 1 : 0, entry->wwpn); 2725 } 2726 if (done != 0) 2727 break; 2728 } 2729 mtx_unlock(&softc->ctl_lock); 2730 break; 2731 } 2732 case CTL_GET_OOA: { 2733 struct ctl_ooa *ooa_hdr; 2734 struct ctl_ooa_entry *entries; 2735 uint32_t cur_fill_num; 2736 2737 ooa_hdr = (struct ctl_ooa *)addr; 2738 2739 if ((ooa_hdr->alloc_len == 0) 2740 || (ooa_hdr->alloc_num == 0)) { 2741 printf("%s: CTL_GET_OOA: alloc len %u and alloc num %u " 2742 "must be non-zero\n", __func__, 2743 ooa_hdr->alloc_len, ooa_hdr->alloc_num); 2744 retval = EINVAL; 2745 break; 2746 } 2747 2748 if (ooa_hdr->alloc_len != (ooa_hdr->alloc_num * 2749 sizeof(struct ctl_ooa_entry))) { 2750 printf("%s: CTL_GET_OOA: alloc len %u must be alloc " 2751 "num %d * sizeof(struct ctl_ooa_entry) %zd\n", 2752 __func__, ooa_hdr->alloc_len, 2753 ooa_hdr->alloc_num,sizeof(struct ctl_ooa_entry)); 2754 retval = EINVAL; 2755 break; 2756 } 2757 2758 entries = malloc(ooa_hdr->alloc_len, M_CTL, M_WAITOK | M_ZERO); 2759 if (entries == NULL) { 2760 printf("%s: could not allocate %d bytes for OOA " 2761 "dump\n", __func__, ooa_hdr->alloc_len); 2762 retval = ENOMEM; 2763 break; 2764 } 2765 2766 mtx_lock(&softc->ctl_lock); 2767 if ((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0 && 2768 (ooa_hdr->lun_num >= ctl_max_luns || 2769 softc->ctl_luns[ooa_hdr->lun_num] == NULL)) { 2770 mtx_unlock(&softc->ctl_lock); 2771 free(entries, M_CTL); 2772 printf("%s: CTL_GET_OOA: invalid LUN %ju\n", 2773 __func__, (uintmax_t)ooa_hdr->lun_num); 2774 retval = EINVAL; 2775 break; 2776 } 2777 2778 cur_fill_num = 0; 2779 2780 if (ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) { 2781 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2782 ctl_ioctl_fill_ooa(lun, &cur_fill_num, 2783 ooa_hdr, entries); 2784 } 2785 } else { 2786 lun = softc->ctl_luns[ooa_hdr->lun_num]; 2787 ctl_ioctl_fill_ooa(lun, &cur_fill_num, ooa_hdr, 2788 entries); 2789 } 2790 mtx_unlock(&softc->ctl_lock); 2791 2792 ooa_hdr->fill_num = min(cur_fill_num, ooa_hdr->alloc_num); 2793 ooa_hdr->fill_len = ooa_hdr->fill_num * 2794 sizeof(struct ctl_ooa_entry); 2795 retval = copyout(entries, ooa_hdr->entries, ooa_hdr->fill_len); 2796 if (retval != 0) { 2797 printf("%s: error copying out %d bytes for OOA dump\n", 2798 __func__, ooa_hdr->fill_len); 2799 } 2800 2801 getbinuptime(&ooa_hdr->cur_bt); 2802 2803 if (cur_fill_num > ooa_hdr->alloc_num) { 2804 ooa_hdr->dropped_num = cur_fill_num -ooa_hdr->alloc_num; 2805 ooa_hdr->status = CTL_OOA_NEED_MORE_SPACE; 2806 } else { 2807 ooa_hdr->dropped_num = 0; 2808 ooa_hdr->status = CTL_OOA_OK; 2809 } 2810 2811 free(entries, M_CTL); 2812 break; 2813 } 2814 case CTL_DELAY_IO: { 2815 struct ctl_io_delay_info *delay_info; 2816 2817 delay_info = (struct ctl_io_delay_info *)addr; 2818 2819 #ifdef CTL_IO_DELAY 2820 mtx_lock(&softc->ctl_lock); 2821 if (delay_info->lun_id >= ctl_max_luns || 2822 (lun = softc->ctl_luns[delay_info->lun_id]) == NULL) { 2823 mtx_unlock(&softc->ctl_lock); 2824 delay_info->status = CTL_DELAY_STATUS_INVALID_LUN; 2825 break; 2826 } 2827 mtx_lock(&lun->lun_lock); 2828 mtx_unlock(&softc->ctl_lock); 2829 delay_info->status = CTL_DELAY_STATUS_OK; 2830 switch (delay_info->delay_type) { 2831 case CTL_DELAY_TYPE_CONT: 2832 case CTL_DELAY_TYPE_ONESHOT: 2833 break; 2834 default: 2835 delay_info->status = CTL_DELAY_STATUS_INVALID_TYPE; 2836 break; 2837 } 2838 switch (delay_info->delay_loc) { 2839 case CTL_DELAY_LOC_DATAMOVE: 2840 lun->delay_info.datamove_type = delay_info->delay_type; 2841 lun->delay_info.datamove_delay = delay_info->delay_secs; 2842 break; 2843 case CTL_DELAY_LOC_DONE: 2844 lun->delay_info.done_type = delay_info->delay_type; 2845 lun->delay_info.done_delay = delay_info->delay_secs; 2846 break; 2847 default: 2848 delay_info->status = CTL_DELAY_STATUS_INVALID_LOC; 2849 break; 2850 } 2851 mtx_unlock(&lun->lun_lock); 2852 #else 2853 delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED; 2854 #endif /* CTL_IO_DELAY */ 2855 break; 2856 } 2857 #ifdef CTL_LEGACY_STATS 2858 case CTL_GETSTATS: { 2859 struct ctl_stats *stats = (struct ctl_stats *)addr; 2860 int i; 2861 2862 /* 2863 * XXX KDM no locking here. If the LUN list changes, 2864 * things can blow up. 2865 */ 2866 i = 0; 2867 stats->status = CTL_SS_OK; 2868 stats->fill_len = 0; 2869 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2870 if (stats->fill_len + sizeof(lun->legacy_stats) > 2871 stats->alloc_len) { 2872 stats->status = CTL_SS_NEED_MORE_SPACE; 2873 break; 2874 } 2875 retval = copyout(&lun->legacy_stats, &stats->lun_stats[i++], 2876 sizeof(lun->legacy_stats)); 2877 if (retval != 0) 2878 break; 2879 stats->fill_len += sizeof(lun->legacy_stats); 2880 } 2881 stats->num_luns = softc->num_luns; 2882 stats->flags = CTL_STATS_FLAG_NONE; 2883 #ifdef CTL_TIME_IO 2884 stats->flags |= CTL_STATS_FLAG_TIME_VALID; 2885 #endif 2886 getnanouptime(&stats->timestamp); 2887 break; 2888 } 2889 #endif /* CTL_LEGACY_STATS */ 2890 case CTL_ERROR_INJECT: { 2891 struct ctl_error_desc *err_desc, *new_err_desc; 2892 2893 err_desc = (struct ctl_error_desc *)addr; 2894 2895 new_err_desc = malloc(sizeof(*new_err_desc), M_CTL, 2896 M_WAITOK | M_ZERO); 2897 bcopy(err_desc, new_err_desc, sizeof(*new_err_desc)); 2898 2899 mtx_lock(&softc->ctl_lock); 2900 if (err_desc->lun_id >= ctl_max_luns || 2901 (lun = softc->ctl_luns[err_desc->lun_id]) == NULL) { 2902 mtx_unlock(&softc->ctl_lock); 2903 free(new_err_desc, M_CTL); 2904 printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n", 2905 __func__, (uintmax_t)err_desc->lun_id); 2906 retval = EINVAL; 2907 break; 2908 } 2909 mtx_lock(&lun->lun_lock); 2910 mtx_unlock(&softc->ctl_lock); 2911 2912 /* 2913 * We could do some checking here to verify the validity 2914 * of the request, but given the complexity of error 2915 * injection requests, the checking logic would be fairly 2916 * complex. 2917 * 2918 * For now, if the request is invalid, it just won't get 2919 * executed and might get deleted. 2920 */ 2921 STAILQ_INSERT_TAIL(&lun->error_list, new_err_desc, links); 2922 2923 /* 2924 * XXX KDM check to make sure the serial number is unique, 2925 * in case we somehow manage to wrap. That shouldn't 2926 * happen for a very long time, but it's the right thing to 2927 * do. 2928 */ 2929 new_err_desc->serial = lun->error_serial; 2930 err_desc->serial = lun->error_serial; 2931 lun->error_serial++; 2932 2933 mtx_unlock(&lun->lun_lock); 2934 break; 2935 } 2936 case CTL_ERROR_INJECT_DELETE: { 2937 struct ctl_error_desc *delete_desc, *desc, *desc2; 2938 int delete_done; 2939 2940 delete_desc = (struct ctl_error_desc *)addr; 2941 delete_done = 0; 2942 2943 mtx_lock(&softc->ctl_lock); 2944 if (delete_desc->lun_id >= ctl_max_luns || 2945 (lun = softc->ctl_luns[delete_desc->lun_id]) == NULL) { 2946 mtx_unlock(&softc->ctl_lock); 2947 printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n", 2948 __func__, (uintmax_t)delete_desc->lun_id); 2949 retval = EINVAL; 2950 break; 2951 } 2952 mtx_lock(&lun->lun_lock); 2953 mtx_unlock(&softc->ctl_lock); 2954 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 2955 if (desc->serial != delete_desc->serial) 2956 continue; 2957 2958 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, 2959 links); 2960 free(desc, M_CTL); 2961 delete_done = 1; 2962 } 2963 mtx_unlock(&lun->lun_lock); 2964 if (delete_done == 0) { 2965 printf("%s: CTL_ERROR_INJECT_DELETE: can't find " 2966 "error serial %ju on LUN %u\n", __func__, 2967 delete_desc->serial, delete_desc->lun_id); 2968 retval = EINVAL; 2969 break; 2970 } 2971 break; 2972 } 2973 case CTL_DUMP_STRUCTS: { 2974 int j, k; 2975 struct ctl_port *port; 2976 struct ctl_frontend *fe; 2977 2978 mtx_lock(&softc->ctl_lock); 2979 printf("CTL Persistent Reservation information start:\n"); 2980 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2981 mtx_lock(&lun->lun_lock); 2982 if ((lun->flags & CTL_LUN_DISABLED) != 0) { 2983 mtx_unlock(&lun->lun_lock); 2984 continue; 2985 } 2986 2987 for (j = 0; j < ctl_max_ports; j++) { 2988 if (lun->pr_keys[j] == NULL) 2989 continue; 2990 for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){ 2991 if (lun->pr_keys[j][k] == 0) 2992 continue; 2993 printf(" LUN %ju port %d iid %d key " 2994 "%#jx\n", lun->lun, j, k, 2995 (uintmax_t)lun->pr_keys[j][k]); 2996 } 2997 } 2998 mtx_unlock(&lun->lun_lock); 2999 } 3000 printf("CTL Persistent Reservation information end\n"); 3001 printf("CTL Ports:\n"); 3002 STAILQ_FOREACH(port, &softc->port_list, links) { 3003 printf(" Port %d '%s' Frontend '%s' Type %u pp %d vp %d WWNN " 3004 "%#jx WWPN %#jx\n", port->targ_port, port->port_name, 3005 port->frontend->name, port->port_type, 3006 port->physical_port, port->virtual_port, 3007 (uintmax_t)port->wwnn, (uintmax_t)port->wwpn); 3008 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 3009 if (port->wwpn_iid[j].in_use == 0 && 3010 port->wwpn_iid[j].wwpn == 0 && 3011 port->wwpn_iid[j].name == NULL) 3012 continue; 3013 3014 printf(" iid %u use %d WWPN %#jx '%s'\n", 3015 j, port->wwpn_iid[j].in_use, 3016 (uintmax_t)port->wwpn_iid[j].wwpn, 3017 port->wwpn_iid[j].name); 3018 } 3019 } 3020 printf("CTL Port information end\n"); 3021 mtx_unlock(&softc->ctl_lock); 3022 /* 3023 * XXX KDM calling this without a lock. We'd likely want 3024 * to drop the lock before calling the frontend's dump 3025 * routine anyway. 3026 */ 3027 printf("CTL Frontends:\n"); 3028 STAILQ_FOREACH(fe, &softc->fe_list, links) { 3029 printf(" Frontend '%s'\n", fe->name); 3030 if (fe->fe_dump != NULL) 3031 fe->fe_dump(); 3032 } 3033 printf("CTL Frontend information end\n"); 3034 break; 3035 } 3036 case CTL_LUN_REQ: { 3037 struct ctl_lun_req *lun_req; 3038 struct ctl_backend_driver *backend; 3039 3040 lun_req = (struct ctl_lun_req *)addr; 3041 3042 backend = ctl_backend_find(lun_req->backend); 3043 if (backend == NULL) { 3044 lun_req->status = CTL_LUN_ERROR; 3045 snprintf(lun_req->error_str, 3046 sizeof(lun_req->error_str), 3047 "Backend \"%s\" not found.", 3048 lun_req->backend); 3049 break; 3050 } 3051 if (lun_req->num_be_args > 0) { 3052 lun_req->kern_be_args = ctl_copyin_args( 3053 lun_req->num_be_args, 3054 lun_req->be_args, 3055 lun_req->error_str, 3056 sizeof(lun_req->error_str)); 3057 if (lun_req->kern_be_args == NULL) { 3058 lun_req->status = CTL_LUN_ERROR; 3059 break; 3060 } 3061 } 3062 3063 retval = backend->ioctl(dev, cmd, addr, flag, td); 3064 3065 if (lun_req->num_be_args > 0) { 3066 ctl_copyout_args(lun_req->num_be_args, 3067 lun_req->kern_be_args); 3068 ctl_free_args(lun_req->num_be_args, 3069 lun_req->kern_be_args); 3070 } 3071 break; 3072 } 3073 case CTL_LUN_LIST: { 3074 struct sbuf *sb; 3075 struct ctl_lun_list *list; 3076 struct ctl_option *opt; 3077 3078 list = (struct ctl_lun_list *)addr; 3079 3080 /* 3081 * Allocate a fixed length sbuf here, based on the length 3082 * of the user's buffer. We could allocate an auto-extending 3083 * buffer, and then tell the user how much larger our 3084 * amount of data is than his buffer, but that presents 3085 * some problems: 3086 * 3087 * 1. The sbuf(9) routines use a blocking malloc, and so 3088 * we can't hold a lock while calling them with an 3089 * auto-extending buffer. 3090 * 3091 * 2. There is not currently a LUN reference counting 3092 * mechanism, outside of outstanding transactions on 3093 * the LUN's OOA queue. So a LUN could go away on us 3094 * while we're getting the LUN number, backend-specific 3095 * information, etc. Thus, given the way things 3096 * currently work, we need to hold the CTL lock while 3097 * grabbing LUN information. 3098 * 3099 * So, from the user's standpoint, the best thing to do is 3100 * allocate what he thinks is a reasonable buffer length, 3101 * and then if he gets a CTL_LUN_LIST_NEED_MORE_SPACE error, 3102 * double the buffer length and try again. (And repeat 3103 * that until he succeeds.) 3104 */ 3105 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 3106 if (sb == NULL) { 3107 list->status = CTL_LUN_LIST_ERROR; 3108 snprintf(list->error_str, sizeof(list->error_str), 3109 "Unable to allocate %d bytes for LUN list", 3110 list->alloc_len); 3111 break; 3112 } 3113 3114 sbuf_printf(sb, "<ctllunlist>\n"); 3115 3116 mtx_lock(&softc->ctl_lock); 3117 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3118 mtx_lock(&lun->lun_lock); 3119 retval = sbuf_printf(sb, "<lun id=\"%ju\">\n", 3120 (uintmax_t)lun->lun); 3121 3122 /* 3123 * Bail out as soon as we see that we've overfilled 3124 * the buffer. 3125 */ 3126 if (retval != 0) 3127 break; 3128 3129 retval = sbuf_printf(sb, "\t<backend_type>%s" 3130 "</backend_type>\n", 3131 (lun->backend == NULL) ? "none" : 3132 lun->backend->name); 3133 3134 if (retval != 0) 3135 break; 3136 3137 retval = sbuf_printf(sb, "\t<lun_type>%d</lun_type>\n", 3138 lun->be_lun->lun_type); 3139 3140 if (retval != 0) 3141 break; 3142 3143 if (lun->backend == NULL) { 3144 retval = sbuf_printf(sb, "</lun>\n"); 3145 if (retval != 0) 3146 break; 3147 continue; 3148 } 3149 3150 retval = sbuf_printf(sb, "\t<size>%ju</size>\n", 3151 (lun->be_lun->maxlba > 0) ? 3152 lun->be_lun->maxlba + 1 : 0); 3153 3154 if (retval != 0) 3155 break; 3156 3157 retval = sbuf_printf(sb, "\t<blocksize>%u</blocksize>\n", 3158 lun->be_lun->blocksize); 3159 3160 if (retval != 0) 3161 break; 3162 3163 retval = sbuf_printf(sb, "\t<serial_number>"); 3164 3165 if (retval != 0) 3166 break; 3167 3168 retval = ctl_sbuf_printf_esc(sb, 3169 lun->be_lun->serial_num, 3170 sizeof(lun->be_lun->serial_num)); 3171 3172 if (retval != 0) 3173 break; 3174 3175 retval = sbuf_printf(sb, "</serial_number>\n"); 3176 3177 if (retval != 0) 3178 break; 3179 3180 retval = sbuf_printf(sb, "\t<device_id>"); 3181 3182 if (retval != 0) 3183 break; 3184 3185 retval = ctl_sbuf_printf_esc(sb, 3186 lun->be_lun->device_id, 3187 sizeof(lun->be_lun->device_id)); 3188 3189 if (retval != 0) 3190 break; 3191 3192 retval = sbuf_printf(sb, "</device_id>\n"); 3193 3194 if (retval != 0) 3195 break; 3196 3197 if (lun->backend->lun_info != NULL) { 3198 retval = lun->backend->lun_info(lun->be_lun->be_lun, sb); 3199 if (retval != 0) 3200 break; 3201 } 3202 STAILQ_FOREACH(opt, &lun->be_lun->options, links) { 3203 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n", 3204 opt->name, opt->value, opt->name); 3205 if (retval != 0) 3206 break; 3207 } 3208 3209 retval = sbuf_printf(sb, "</lun>\n"); 3210 3211 if (retval != 0) 3212 break; 3213 mtx_unlock(&lun->lun_lock); 3214 } 3215 if (lun != NULL) 3216 mtx_unlock(&lun->lun_lock); 3217 mtx_unlock(&softc->ctl_lock); 3218 3219 if ((retval != 0) 3220 || ((retval = sbuf_printf(sb, "</ctllunlist>\n")) != 0)) { 3221 retval = 0; 3222 sbuf_delete(sb); 3223 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3224 snprintf(list->error_str, sizeof(list->error_str), 3225 "Out of space, %d bytes is too small", 3226 list->alloc_len); 3227 break; 3228 } 3229 3230 sbuf_finish(sb); 3231 3232 retval = copyout(sbuf_data(sb), list->lun_xml, 3233 sbuf_len(sb) + 1); 3234 3235 list->fill_len = sbuf_len(sb) + 1; 3236 list->status = CTL_LUN_LIST_OK; 3237 sbuf_delete(sb); 3238 break; 3239 } 3240 case CTL_ISCSI: { 3241 struct ctl_iscsi *ci; 3242 struct ctl_frontend *fe; 3243 3244 ci = (struct ctl_iscsi *)addr; 3245 3246 fe = ctl_frontend_find("iscsi"); 3247 if (fe == NULL) { 3248 ci->status = CTL_ISCSI_ERROR; 3249 snprintf(ci->error_str, sizeof(ci->error_str), 3250 "Frontend \"iscsi\" not found."); 3251 break; 3252 } 3253 3254 retval = fe->ioctl(dev, cmd, addr, flag, td); 3255 break; 3256 } 3257 case CTL_PORT_REQ: { 3258 struct ctl_req *req; 3259 struct ctl_frontend *fe; 3260 3261 req = (struct ctl_req *)addr; 3262 3263 fe = ctl_frontend_find(req->driver); 3264 if (fe == NULL) { 3265 req->status = CTL_LUN_ERROR; 3266 snprintf(req->error_str, sizeof(req->error_str), 3267 "Frontend \"%s\" not found.", req->driver); 3268 break; 3269 } 3270 if (req->num_args > 0) { 3271 req->kern_args = ctl_copyin_args(req->num_args, 3272 req->args, req->error_str, sizeof(req->error_str)); 3273 if (req->kern_args == NULL) { 3274 req->status = CTL_LUN_ERROR; 3275 break; 3276 } 3277 } 3278 3279 if (fe->ioctl) 3280 retval = fe->ioctl(dev, cmd, addr, flag, td); 3281 else 3282 retval = ENODEV; 3283 3284 if (req->num_args > 0) { 3285 ctl_copyout_args(req->num_args, req->kern_args); 3286 ctl_free_args(req->num_args, req->kern_args); 3287 } 3288 break; 3289 } 3290 case CTL_PORT_LIST: { 3291 struct sbuf *sb; 3292 struct ctl_port *port; 3293 struct ctl_lun_list *list; 3294 struct ctl_option *opt; 3295 int j; 3296 uint32_t plun; 3297 3298 list = (struct ctl_lun_list *)addr; 3299 3300 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 3301 if (sb == NULL) { 3302 list->status = CTL_LUN_LIST_ERROR; 3303 snprintf(list->error_str, sizeof(list->error_str), 3304 "Unable to allocate %d bytes for LUN list", 3305 list->alloc_len); 3306 break; 3307 } 3308 3309 sbuf_printf(sb, "<ctlportlist>\n"); 3310 3311 mtx_lock(&softc->ctl_lock); 3312 STAILQ_FOREACH(port, &softc->port_list, links) { 3313 retval = sbuf_printf(sb, "<targ_port id=\"%ju\">\n", 3314 (uintmax_t)port->targ_port); 3315 3316 /* 3317 * Bail out as soon as we see that we've overfilled 3318 * the buffer. 3319 */ 3320 if (retval != 0) 3321 break; 3322 3323 retval = sbuf_printf(sb, "\t<frontend_type>%s" 3324 "</frontend_type>\n", port->frontend->name); 3325 if (retval != 0) 3326 break; 3327 3328 retval = sbuf_printf(sb, "\t<port_type>%d</port_type>\n", 3329 port->port_type); 3330 if (retval != 0) 3331 break; 3332 3333 retval = sbuf_printf(sb, "\t<online>%s</online>\n", 3334 (port->status & CTL_PORT_STATUS_ONLINE) ? "YES" : "NO"); 3335 if (retval != 0) 3336 break; 3337 3338 retval = sbuf_printf(sb, "\t<port_name>%s</port_name>\n", 3339 port->port_name); 3340 if (retval != 0) 3341 break; 3342 3343 retval = sbuf_printf(sb, "\t<physical_port>%d</physical_port>\n", 3344 port->physical_port); 3345 if (retval != 0) 3346 break; 3347 3348 retval = sbuf_printf(sb, "\t<virtual_port>%d</virtual_port>\n", 3349 port->virtual_port); 3350 if (retval != 0) 3351 break; 3352 3353 if (port->target_devid != NULL) { 3354 sbuf_printf(sb, "\t<target>"); 3355 ctl_id_sbuf(port->target_devid, sb); 3356 sbuf_printf(sb, "</target>\n"); 3357 } 3358 3359 if (port->port_devid != NULL) { 3360 sbuf_printf(sb, "\t<port>"); 3361 ctl_id_sbuf(port->port_devid, sb); 3362 sbuf_printf(sb, "</port>\n"); 3363 } 3364 3365 if (port->port_info != NULL) { 3366 retval = port->port_info(port->onoff_arg, sb); 3367 if (retval != 0) 3368 break; 3369 } 3370 STAILQ_FOREACH(opt, &port->options, links) { 3371 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n", 3372 opt->name, opt->value, opt->name); 3373 if (retval != 0) 3374 break; 3375 } 3376 3377 if (port->lun_map != NULL) { 3378 sbuf_printf(sb, "\t<lun_map>on</lun_map>\n"); 3379 for (j = 0; j < port->lun_map_size; j++) { 3380 plun = ctl_lun_map_from_port(port, j); 3381 if (plun == UINT32_MAX) 3382 continue; 3383 sbuf_printf(sb, 3384 "\t<lun id=\"%u\">%u</lun>\n", 3385 j, plun); 3386 } 3387 } 3388 3389 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 3390 if (port->wwpn_iid[j].in_use == 0 || 3391 (port->wwpn_iid[j].wwpn == 0 && 3392 port->wwpn_iid[j].name == NULL)) 3393 continue; 3394 3395 if (port->wwpn_iid[j].name != NULL) 3396 retval = sbuf_printf(sb, 3397 "\t<initiator id=\"%u\">%s</initiator>\n", 3398 j, port->wwpn_iid[j].name); 3399 else 3400 retval = sbuf_printf(sb, 3401 "\t<initiator id=\"%u\">naa.%08jx</initiator>\n", 3402 j, port->wwpn_iid[j].wwpn); 3403 if (retval != 0) 3404 break; 3405 } 3406 if (retval != 0) 3407 break; 3408 3409 retval = sbuf_printf(sb, "</targ_port>\n"); 3410 if (retval != 0) 3411 break; 3412 } 3413 mtx_unlock(&softc->ctl_lock); 3414 3415 if ((retval != 0) 3416 || ((retval = sbuf_printf(sb, "</ctlportlist>\n")) != 0)) { 3417 retval = 0; 3418 sbuf_delete(sb); 3419 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3420 snprintf(list->error_str, sizeof(list->error_str), 3421 "Out of space, %d bytes is too small", 3422 list->alloc_len); 3423 break; 3424 } 3425 3426 sbuf_finish(sb); 3427 3428 retval = copyout(sbuf_data(sb), list->lun_xml, 3429 sbuf_len(sb) + 1); 3430 3431 list->fill_len = sbuf_len(sb) + 1; 3432 list->status = CTL_LUN_LIST_OK; 3433 sbuf_delete(sb); 3434 break; 3435 } 3436 case CTL_LUN_MAP: { 3437 struct ctl_lun_map *lm = (struct ctl_lun_map *)addr; 3438 struct ctl_port *port; 3439 3440 mtx_lock(&softc->ctl_lock); 3441 if (lm->port < softc->port_min || 3442 lm->port >= softc->port_max || 3443 (port = softc->ctl_ports[lm->port]) == NULL) { 3444 mtx_unlock(&softc->ctl_lock); 3445 return (ENXIO); 3446 } 3447 if (port->status & CTL_PORT_STATUS_ONLINE) { 3448 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3449 if (ctl_lun_map_to_port(port, lun->lun) == 3450 UINT32_MAX) 3451 continue; 3452 mtx_lock(&lun->lun_lock); 3453 ctl_est_ua_port(lun, lm->port, -1, 3454 CTL_UA_LUN_CHANGE); 3455 mtx_unlock(&lun->lun_lock); 3456 } 3457 } 3458 mtx_unlock(&softc->ctl_lock); // XXX: port_enable sleeps 3459 if (lm->plun != UINT32_MAX) { 3460 if (lm->lun == UINT32_MAX) 3461 retval = ctl_lun_map_unset(port, lm->plun); 3462 else if (lm->lun < ctl_max_luns && 3463 softc->ctl_luns[lm->lun] != NULL) 3464 retval = ctl_lun_map_set(port, lm->plun, lm->lun); 3465 else 3466 return (ENXIO); 3467 } else { 3468 if (lm->lun == UINT32_MAX) 3469 retval = ctl_lun_map_deinit(port); 3470 else 3471 retval = ctl_lun_map_init(port); 3472 } 3473 if (port->status & CTL_PORT_STATUS_ONLINE) 3474 ctl_isc_announce_port(port); 3475 break; 3476 } 3477 case CTL_GET_LUN_STATS: { 3478 struct ctl_get_io_stats *stats = (struct ctl_get_io_stats *)addr; 3479 int i; 3480 3481 /* 3482 * XXX KDM no locking here. If the LUN list changes, 3483 * things can blow up. 3484 */ 3485 i = 0; 3486 stats->status = CTL_SS_OK; 3487 stats->fill_len = 0; 3488 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3489 if (lun->lun < stats->first_item) 3490 continue; 3491 if (stats->fill_len + sizeof(lun->stats) > 3492 stats->alloc_len) { 3493 stats->status = CTL_SS_NEED_MORE_SPACE; 3494 break; 3495 } 3496 retval = copyout(&lun->stats, &stats->stats[i++], 3497 sizeof(lun->stats)); 3498 if (retval != 0) 3499 break; 3500 stats->fill_len += sizeof(lun->stats); 3501 } 3502 stats->num_items = softc->num_luns; 3503 stats->flags = CTL_STATS_FLAG_NONE; 3504 #ifdef CTL_TIME_IO 3505 stats->flags |= CTL_STATS_FLAG_TIME_VALID; 3506 #endif 3507 getnanouptime(&stats->timestamp); 3508 break; 3509 } 3510 case CTL_GET_PORT_STATS: { 3511 struct ctl_get_io_stats *stats = (struct ctl_get_io_stats *)addr; 3512 int i; 3513 3514 /* 3515 * XXX KDM no locking here. If the LUN list changes, 3516 * things can blow up. 3517 */ 3518 i = 0; 3519 stats->status = CTL_SS_OK; 3520 stats->fill_len = 0; 3521 STAILQ_FOREACH(port, &softc->port_list, links) { 3522 if (port->targ_port < stats->first_item) 3523 continue; 3524 if (stats->fill_len + sizeof(port->stats) > 3525 stats->alloc_len) { 3526 stats->status = CTL_SS_NEED_MORE_SPACE; 3527 break; 3528 } 3529 retval = copyout(&port->stats, &stats->stats[i++], 3530 sizeof(port->stats)); 3531 if (retval != 0) 3532 break; 3533 stats->fill_len += sizeof(port->stats); 3534 } 3535 stats->num_items = softc->num_ports; 3536 stats->flags = CTL_STATS_FLAG_NONE; 3537 #ifdef CTL_TIME_IO 3538 stats->flags |= CTL_STATS_FLAG_TIME_VALID; 3539 #endif 3540 getnanouptime(&stats->timestamp); 3541 break; 3542 } 3543 default: { 3544 /* XXX KDM should we fix this? */ 3545 #if 0 3546 struct ctl_backend_driver *backend; 3547 unsigned int type; 3548 int found; 3549 3550 found = 0; 3551 3552 /* 3553 * We encode the backend type as the ioctl type for backend 3554 * ioctls. So parse it out here, and then search for a 3555 * backend of this type. 3556 */ 3557 type = _IOC_TYPE(cmd); 3558 3559 STAILQ_FOREACH(backend, &softc->be_list, links) { 3560 if (backend->type == type) { 3561 found = 1; 3562 break; 3563 } 3564 } 3565 if (found == 0) { 3566 printf("ctl: unknown ioctl command %#lx or backend " 3567 "%d\n", cmd, type); 3568 retval = EINVAL; 3569 break; 3570 } 3571 retval = backend->ioctl(dev, cmd, addr, flag, td); 3572 #endif 3573 retval = ENOTTY; 3574 break; 3575 } 3576 } 3577 return (retval); 3578 } 3579 3580 uint32_t 3581 ctl_get_initindex(struct ctl_nexus *nexus) 3582 { 3583 return (nexus->initid + (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); 3584 } 3585 3586 int 3587 ctl_lun_map_init(struct ctl_port *port) 3588 { 3589 struct ctl_softc *softc = port->ctl_softc; 3590 struct ctl_lun *lun; 3591 int size = ctl_lun_map_size; 3592 uint32_t i; 3593 3594 if (port->lun_map == NULL || port->lun_map_size < size) { 3595 port->lun_map_size = 0; 3596 free(port->lun_map, M_CTL); 3597 port->lun_map = malloc(size * sizeof(uint32_t), 3598 M_CTL, M_NOWAIT); 3599 } 3600 if (port->lun_map == NULL) 3601 return (ENOMEM); 3602 for (i = 0; i < size; i++) 3603 port->lun_map[i] = UINT32_MAX; 3604 port->lun_map_size = size; 3605 if (port->status & CTL_PORT_STATUS_ONLINE) { 3606 if (port->lun_disable != NULL) { 3607 STAILQ_FOREACH(lun, &softc->lun_list, links) 3608 port->lun_disable(port->targ_lun_arg, lun->lun); 3609 } 3610 ctl_isc_announce_port(port); 3611 } 3612 return (0); 3613 } 3614 3615 int 3616 ctl_lun_map_deinit(struct ctl_port *port) 3617 { 3618 struct ctl_softc *softc = port->ctl_softc; 3619 struct ctl_lun *lun; 3620 3621 if (port->lun_map == NULL) 3622 return (0); 3623 port->lun_map_size = 0; 3624 free(port->lun_map, M_CTL); 3625 port->lun_map = NULL; 3626 if (port->status & CTL_PORT_STATUS_ONLINE) { 3627 if (port->lun_enable != NULL) { 3628 STAILQ_FOREACH(lun, &softc->lun_list, links) 3629 port->lun_enable(port->targ_lun_arg, lun->lun); 3630 } 3631 ctl_isc_announce_port(port); 3632 } 3633 return (0); 3634 } 3635 3636 int 3637 ctl_lun_map_set(struct ctl_port *port, uint32_t plun, uint32_t glun) 3638 { 3639 int status; 3640 uint32_t old; 3641 3642 if (port->lun_map == NULL) { 3643 status = ctl_lun_map_init(port); 3644 if (status != 0) 3645 return (status); 3646 } 3647 if (plun >= port->lun_map_size) 3648 return (EINVAL); 3649 old = port->lun_map[plun]; 3650 port->lun_map[plun] = glun; 3651 if ((port->status & CTL_PORT_STATUS_ONLINE) && old == UINT32_MAX) { 3652 if (port->lun_enable != NULL) 3653 port->lun_enable(port->targ_lun_arg, plun); 3654 ctl_isc_announce_port(port); 3655 } 3656 return (0); 3657 } 3658 3659 int 3660 ctl_lun_map_unset(struct ctl_port *port, uint32_t plun) 3661 { 3662 uint32_t old; 3663 3664 if (port->lun_map == NULL || plun >= port->lun_map_size) 3665 return (0); 3666 old = port->lun_map[plun]; 3667 port->lun_map[plun] = UINT32_MAX; 3668 if ((port->status & CTL_PORT_STATUS_ONLINE) && old != UINT32_MAX) { 3669 if (port->lun_disable != NULL) 3670 port->lun_disable(port->targ_lun_arg, plun); 3671 ctl_isc_announce_port(port); 3672 } 3673 return (0); 3674 } 3675 3676 uint32_t 3677 ctl_lun_map_from_port(struct ctl_port *port, uint32_t lun_id) 3678 { 3679 3680 if (port == NULL) 3681 return (UINT32_MAX); 3682 if (port->lun_map == NULL) 3683 return (lun_id); 3684 if (lun_id > port->lun_map_size) 3685 return (UINT32_MAX); 3686 return (port->lun_map[lun_id]); 3687 } 3688 3689 uint32_t 3690 ctl_lun_map_to_port(struct ctl_port *port, uint32_t lun_id) 3691 { 3692 uint32_t i; 3693 3694 if (port == NULL) 3695 return (UINT32_MAX); 3696 if (port->lun_map == NULL) 3697 return (lun_id); 3698 for (i = 0; i < port->lun_map_size; i++) { 3699 if (port->lun_map[i] == lun_id) 3700 return (i); 3701 } 3702 return (UINT32_MAX); 3703 } 3704 3705 uint32_t 3706 ctl_decode_lun(uint64_t encoded) 3707 { 3708 uint8_t lun[8]; 3709 uint32_t result = 0xffffffff; 3710 3711 be64enc(lun, encoded); 3712 switch (lun[0] & RPL_LUNDATA_ATYP_MASK) { 3713 case RPL_LUNDATA_ATYP_PERIPH: 3714 if ((lun[0] & 0x3f) == 0 && lun[2] == 0 && lun[3] == 0 && 3715 lun[4] == 0 && lun[5] == 0 && lun[6] == 0 && lun[7] == 0) 3716 result = lun[1]; 3717 break; 3718 case RPL_LUNDATA_ATYP_FLAT: 3719 if (lun[2] == 0 && lun[3] == 0 && lun[4] == 0 && lun[5] == 0 && 3720 lun[6] == 0 && lun[7] == 0) 3721 result = ((lun[0] & 0x3f) << 8) + lun[1]; 3722 break; 3723 case RPL_LUNDATA_ATYP_EXTLUN: 3724 switch (lun[0] & RPL_LUNDATA_EXT_EAM_MASK) { 3725 case 0x02: 3726 switch (lun[0] & RPL_LUNDATA_EXT_LEN_MASK) { 3727 case 0x00: 3728 result = lun[1]; 3729 break; 3730 case 0x10: 3731 result = (lun[1] << 16) + (lun[2] << 8) + 3732 lun[3]; 3733 break; 3734 case 0x20: 3735 if (lun[1] == 0 && lun[6] == 0 && lun[7] == 0) 3736 result = (lun[2] << 24) + 3737 (lun[3] << 16) + (lun[4] << 8) + 3738 lun[5]; 3739 break; 3740 } 3741 break; 3742 case RPL_LUNDATA_EXT_EAM_NOT_SPEC: 3743 result = 0xffffffff; 3744 break; 3745 } 3746 break; 3747 } 3748 return (result); 3749 } 3750 3751 uint64_t 3752 ctl_encode_lun(uint32_t decoded) 3753 { 3754 uint64_t l = decoded; 3755 3756 if (l <= 0xff) 3757 return (((uint64_t)RPL_LUNDATA_ATYP_PERIPH << 56) | (l << 48)); 3758 if (l <= 0x3fff) 3759 return (((uint64_t)RPL_LUNDATA_ATYP_FLAT << 56) | (l << 48)); 3760 if (l <= 0xffffff) 3761 return (((uint64_t)(RPL_LUNDATA_ATYP_EXTLUN | 0x12) << 56) | 3762 (l << 32)); 3763 return ((((uint64_t)RPL_LUNDATA_ATYP_EXTLUN | 0x22) << 56) | (l << 16)); 3764 } 3765 3766 int 3767 ctl_ffz(uint32_t *mask, uint32_t first, uint32_t last) 3768 { 3769 int i; 3770 3771 for (i = first; i < last; i++) { 3772 if ((mask[i / 32] & (1 << (i % 32))) == 0) 3773 return (i); 3774 } 3775 return (-1); 3776 } 3777 3778 int 3779 ctl_set_mask(uint32_t *mask, uint32_t bit) 3780 { 3781 uint32_t chunk, piece; 3782 3783 chunk = bit >> 5; 3784 piece = bit % (sizeof(uint32_t) * 8); 3785 3786 if ((mask[chunk] & (1 << piece)) != 0) 3787 return (-1); 3788 else 3789 mask[chunk] |= (1 << piece); 3790 3791 return (0); 3792 } 3793 3794 int 3795 ctl_clear_mask(uint32_t *mask, uint32_t bit) 3796 { 3797 uint32_t chunk, piece; 3798 3799 chunk = bit >> 5; 3800 piece = bit % (sizeof(uint32_t) * 8); 3801 3802 if ((mask[chunk] & (1 << piece)) == 0) 3803 return (-1); 3804 else 3805 mask[chunk] &= ~(1 << piece); 3806 3807 return (0); 3808 } 3809 3810 int 3811 ctl_is_set(uint32_t *mask, uint32_t bit) 3812 { 3813 uint32_t chunk, piece; 3814 3815 chunk = bit >> 5; 3816 piece = bit % (sizeof(uint32_t) * 8); 3817 3818 if ((mask[chunk] & (1 << piece)) == 0) 3819 return (0); 3820 else 3821 return (1); 3822 } 3823 3824 static uint64_t 3825 ctl_get_prkey(struct ctl_lun *lun, uint32_t residx) 3826 { 3827 uint64_t *t; 3828 3829 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3830 if (t == NULL) 3831 return (0); 3832 return (t[residx % CTL_MAX_INIT_PER_PORT]); 3833 } 3834 3835 static void 3836 ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx) 3837 { 3838 uint64_t *t; 3839 3840 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3841 if (t == NULL) 3842 return; 3843 t[residx % CTL_MAX_INIT_PER_PORT] = 0; 3844 } 3845 3846 static void 3847 ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx) 3848 { 3849 uint64_t *p; 3850 u_int i; 3851 3852 i = residx/CTL_MAX_INIT_PER_PORT; 3853 if (lun->pr_keys[i] != NULL) 3854 return; 3855 mtx_unlock(&lun->lun_lock); 3856 p = malloc(sizeof(uint64_t) * CTL_MAX_INIT_PER_PORT, M_CTL, 3857 M_WAITOK | M_ZERO); 3858 mtx_lock(&lun->lun_lock); 3859 if (lun->pr_keys[i] == NULL) 3860 lun->pr_keys[i] = p; 3861 else 3862 free(p, M_CTL); 3863 } 3864 3865 static void 3866 ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key) 3867 { 3868 uint64_t *t; 3869 3870 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3871 KASSERT(t != NULL, ("prkey %d is not allocated", residx)); 3872 t[residx % CTL_MAX_INIT_PER_PORT] = key; 3873 } 3874 3875 /* 3876 * ctl_softc, pool_name, total_ctl_io are passed in. 3877 * npool is passed out. 3878 */ 3879 int 3880 ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name, 3881 uint32_t total_ctl_io, void **npool) 3882 { 3883 struct ctl_io_pool *pool; 3884 3885 pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL, 3886 M_NOWAIT | M_ZERO); 3887 if (pool == NULL) 3888 return (ENOMEM); 3889 3890 snprintf(pool->name, sizeof(pool->name), "CTL IO %s", pool_name); 3891 pool->ctl_softc = ctl_softc; 3892 #ifdef IO_POOLS 3893 pool->zone = uma_zsecond_create(pool->name, NULL, 3894 NULL, NULL, NULL, ctl_softc->io_zone); 3895 /* uma_prealloc(pool->zone, total_ctl_io); */ 3896 #else 3897 pool->zone = ctl_softc->io_zone; 3898 #endif 3899 3900 *npool = pool; 3901 return (0); 3902 } 3903 3904 void 3905 ctl_pool_free(struct ctl_io_pool *pool) 3906 { 3907 3908 if (pool == NULL) 3909 return; 3910 3911 #ifdef IO_POOLS 3912 uma_zdestroy(pool->zone); 3913 #endif 3914 free(pool, M_CTL); 3915 } 3916 3917 union ctl_io * 3918 ctl_alloc_io(void *pool_ref) 3919 { 3920 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3921 union ctl_io *io; 3922 3923 io = uma_zalloc(pool->zone, M_WAITOK); 3924 if (io != NULL) { 3925 io->io_hdr.pool = pool_ref; 3926 CTL_SOFTC(io) = pool->ctl_softc; 3927 } 3928 return (io); 3929 } 3930 3931 union ctl_io * 3932 ctl_alloc_io_nowait(void *pool_ref) 3933 { 3934 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3935 union ctl_io *io; 3936 3937 io = uma_zalloc(pool->zone, M_NOWAIT); 3938 if (io != NULL) { 3939 io->io_hdr.pool = pool_ref; 3940 CTL_SOFTC(io) = pool->ctl_softc; 3941 } 3942 return (io); 3943 } 3944 3945 void 3946 ctl_free_io(union ctl_io *io) 3947 { 3948 struct ctl_io_pool *pool; 3949 3950 if (io == NULL) 3951 return; 3952 3953 pool = (struct ctl_io_pool *)io->io_hdr.pool; 3954 uma_zfree(pool->zone, io); 3955 } 3956 3957 void 3958 ctl_zero_io(union ctl_io *io) 3959 { 3960 struct ctl_io_pool *pool; 3961 3962 if (io == NULL) 3963 return; 3964 3965 /* 3966 * May need to preserve linked list pointers at some point too. 3967 */ 3968 pool = io->io_hdr.pool; 3969 memset(io, 0, sizeof(*io)); 3970 io->io_hdr.pool = pool; 3971 CTL_SOFTC(io) = pool->ctl_softc; 3972 } 3973 3974 int 3975 ctl_expand_number(const char *buf, uint64_t *num) 3976 { 3977 char *endptr; 3978 uint64_t number; 3979 unsigned shift; 3980 3981 number = strtoq(buf, &endptr, 0); 3982 3983 switch (tolower((unsigned char)*endptr)) { 3984 case 'e': 3985 shift = 60; 3986 break; 3987 case 'p': 3988 shift = 50; 3989 break; 3990 case 't': 3991 shift = 40; 3992 break; 3993 case 'g': 3994 shift = 30; 3995 break; 3996 case 'm': 3997 shift = 20; 3998 break; 3999 case 'k': 4000 shift = 10; 4001 break; 4002 case 'b': 4003 case '\0': /* No unit. */ 4004 *num = number; 4005 return (0); 4006 default: 4007 /* Unrecognized unit. */ 4008 return (-1); 4009 } 4010 4011 if ((number << shift) >> shift != number) { 4012 /* Overflow */ 4013 return (-1); 4014 } 4015 *num = number << shift; 4016 return (0); 4017 } 4018 4019 4020 /* 4021 * This routine could be used in the future to load default and/or saved 4022 * mode page parameters for a particuar lun. 4023 */ 4024 static int 4025 ctl_init_page_index(struct ctl_lun *lun) 4026 { 4027 int i, page_code; 4028 struct ctl_page_index *page_index; 4029 const char *value; 4030 uint64_t ival; 4031 4032 memcpy(&lun->mode_pages.index, page_index_template, 4033 sizeof(page_index_template)); 4034 4035 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 4036 4037 page_index = &lun->mode_pages.index[i]; 4038 if (lun->be_lun->lun_type == T_DIRECT && 4039 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 4040 continue; 4041 if (lun->be_lun->lun_type == T_PROCESSOR && 4042 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 4043 continue; 4044 if (lun->be_lun->lun_type == T_CDROM && 4045 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 4046 continue; 4047 4048 page_code = page_index->page_code & SMPH_PC_MASK; 4049 switch (page_code) { 4050 case SMS_RW_ERROR_RECOVERY_PAGE: { 4051 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4052 ("subpage %#x for page %#x is incorrect!", 4053 page_index->subpage, page_code)); 4054 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT], 4055 &rw_er_page_default, 4056 sizeof(rw_er_page_default)); 4057 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CHANGEABLE], 4058 &rw_er_page_changeable, 4059 sizeof(rw_er_page_changeable)); 4060 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_DEFAULT], 4061 &rw_er_page_default, 4062 sizeof(rw_er_page_default)); 4063 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_SAVED], 4064 &rw_er_page_default, 4065 sizeof(rw_er_page_default)); 4066 page_index->page_data = 4067 (uint8_t *)lun->mode_pages.rw_er_page; 4068 break; 4069 } 4070 case SMS_FORMAT_DEVICE_PAGE: { 4071 struct scsi_format_page *format_page; 4072 4073 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4074 ("subpage %#x for page %#x is incorrect!", 4075 page_index->subpage, page_code)); 4076 4077 /* 4078 * Sectors per track are set above. Bytes per 4079 * sector need to be set here on a per-LUN basis. 4080 */ 4081 memcpy(&lun->mode_pages.format_page[CTL_PAGE_CURRENT], 4082 &format_page_default, 4083 sizeof(format_page_default)); 4084 memcpy(&lun->mode_pages.format_page[ 4085 CTL_PAGE_CHANGEABLE], &format_page_changeable, 4086 sizeof(format_page_changeable)); 4087 memcpy(&lun->mode_pages.format_page[CTL_PAGE_DEFAULT], 4088 &format_page_default, 4089 sizeof(format_page_default)); 4090 memcpy(&lun->mode_pages.format_page[CTL_PAGE_SAVED], 4091 &format_page_default, 4092 sizeof(format_page_default)); 4093 4094 format_page = &lun->mode_pages.format_page[ 4095 CTL_PAGE_CURRENT]; 4096 scsi_ulto2b(lun->be_lun->blocksize, 4097 format_page->bytes_per_sector); 4098 4099 format_page = &lun->mode_pages.format_page[ 4100 CTL_PAGE_DEFAULT]; 4101 scsi_ulto2b(lun->be_lun->blocksize, 4102 format_page->bytes_per_sector); 4103 4104 format_page = &lun->mode_pages.format_page[ 4105 CTL_PAGE_SAVED]; 4106 scsi_ulto2b(lun->be_lun->blocksize, 4107 format_page->bytes_per_sector); 4108 4109 page_index->page_data = 4110 (uint8_t *)lun->mode_pages.format_page; 4111 break; 4112 } 4113 case SMS_RIGID_DISK_PAGE: { 4114 struct scsi_rigid_disk_page *rigid_disk_page; 4115 uint32_t sectors_per_cylinder; 4116 uint64_t cylinders; 4117 #ifndef __XSCALE__ 4118 int shift; 4119 #endif /* !__XSCALE__ */ 4120 4121 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4122 ("subpage %#x for page %#x is incorrect!", 4123 page_index->subpage, page_code)); 4124 4125 /* 4126 * Rotation rate and sectors per track are set 4127 * above. We calculate the cylinders here based on 4128 * capacity. Due to the number of heads and 4129 * sectors per track we're using, smaller arrays 4130 * may turn out to have 0 cylinders. Linux and 4131 * FreeBSD don't pay attention to these mode pages 4132 * to figure out capacity, but Solaris does. It 4133 * seems to deal with 0 cylinders just fine, and 4134 * works out a fake geometry based on the capacity. 4135 */ 4136 memcpy(&lun->mode_pages.rigid_disk_page[ 4137 CTL_PAGE_DEFAULT], &rigid_disk_page_default, 4138 sizeof(rigid_disk_page_default)); 4139 memcpy(&lun->mode_pages.rigid_disk_page[ 4140 CTL_PAGE_CHANGEABLE],&rigid_disk_page_changeable, 4141 sizeof(rigid_disk_page_changeable)); 4142 4143 sectors_per_cylinder = CTL_DEFAULT_SECTORS_PER_TRACK * 4144 CTL_DEFAULT_HEADS; 4145 4146 /* 4147 * The divide method here will be more accurate, 4148 * probably, but results in floating point being 4149 * used in the kernel on i386 (__udivdi3()). On the 4150 * XScale, though, __udivdi3() is implemented in 4151 * software. 4152 * 4153 * The shift method for cylinder calculation is 4154 * accurate if sectors_per_cylinder is a power of 4155 * 2. Otherwise it might be slightly off -- you 4156 * might have a bit of a truncation problem. 4157 */ 4158 #ifdef __XSCALE__ 4159 cylinders = (lun->be_lun->maxlba + 1) / 4160 sectors_per_cylinder; 4161 #else 4162 for (shift = 31; shift > 0; shift--) { 4163 if (sectors_per_cylinder & (1 << shift)) 4164 break; 4165 } 4166 cylinders = (lun->be_lun->maxlba + 1) >> shift; 4167 #endif 4168 4169 /* 4170 * We've basically got 3 bytes, or 24 bits for the 4171 * cylinder size in the mode page. If we're over, 4172 * just round down to 2^24. 4173 */ 4174 if (cylinders > 0xffffff) 4175 cylinders = 0xffffff; 4176 4177 rigid_disk_page = &lun->mode_pages.rigid_disk_page[ 4178 CTL_PAGE_DEFAULT]; 4179 scsi_ulto3b(cylinders, rigid_disk_page->cylinders); 4180 4181 if ((value = ctl_get_opt(&lun->be_lun->options, 4182 "rpm")) != NULL) { 4183 scsi_ulto2b(strtol(value, NULL, 0), 4184 rigid_disk_page->rotation_rate); 4185 } 4186 4187 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_CURRENT], 4188 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 4189 sizeof(rigid_disk_page_default)); 4190 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_SAVED], 4191 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 4192 sizeof(rigid_disk_page_default)); 4193 4194 page_index->page_data = 4195 (uint8_t *)lun->mode_pages.rigid_disk_page; 4196 break; 4197 } 4198 case SMS_VERIFY_ERROR_RECOVERY_PAGE: { 4199 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4200 ("subpage %#x for page %#x is incorrect!", 4201 page_index->subpage, page_code)); 4202 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_CURRENT], 4203 &verify_er_page_default, 4204 sizeof(verify_er_page_default)); 4205 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_CHANGEABLE], 4206 &verify_er_page_changeable, 4207 sizeof(verify_er_page_changeable)); 4208 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_DEFAULT], 4209 &verify_er_page_default, 4210 sizeof(verify_er_page_default)); 4211 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_SAVED], 4212 &verify_er_page_default, 4213 sizeof(verify_er_page_default)); 4214 page_index->page_data = 4215 (uint8_t *)lun->mode_pages.verify_er_page; 4216 break; 4217 } 4218 case SMS_CACHING_PAGE: { 4219 struct scsi_caching_page *caching_page; 4220 4221 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4222 ("subpage %#x for page %#x is incorrect!", 4223 page_index->subpage, page_code)); 4224 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT], 4225 &caching_page_default, 4226 sizeof(caching_page_default)); 4227 memcpy(&lun->mode_pages.caching_page[ 4228 CTL_PAGE_CHANGEABLE], &caching_page_changeable, 4229 sizeof(caching_page_changeable)); 4230 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_SAVED], 4231 &caching_page_default, 4232 sizeof(caching_page_default)); 4233 caching_page = &lun->mode_pages.caching_page[ 4234 CTL_PAGE_SAVED]; 4235 value = ctl_get_opt(&lun->be_lun->options, "writecache"); 4236 if (value != NULL && strcmp(value, "off") == 0) 4237 caching_page->flags1 &= ~SCP_WCE; 4238 value = ctl_get_opt(&lun->be_lun->options, "readcache"); 4239 if (value != NULL && strcmp(value, "off") == 0) 4240 caching_page->flags1 |= SCP_RCD; 4241 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT], 4242 &lun->mode_pages.caching_page[CTL_PAGE_SAVED], 4243 sizeof(caching_page_default)); 4244 page_index->page_data = 4245 (uint8_t *)lun->mode_pages.caching_page; 4246 break; 4247 } 4248 case SMS_CONTROL_MODE_PAGE: { 4249 switch (page_index->subpage) { 4250 case SMS_SUBPAGE_PAGE_0: { 4251 struct scsi_control_page *control_page; 4252 4253 memcpy(&lun->mode_pages.control_page[ 4254 CTL_PAGE_DEFAULT], 4255 &control_page_default, 4256 sizeof(control_page_default)); 4257 memcpy(&lun->mode_pages.control_page[ 4258 CTL_PAGE_CHANGEABLE], 4259 &control_page_changeable, 4260 sizeof(control_page_changeable)); 4261 memcpy(&lun->mode_pages.control_page[ 4262 CTL_PAGE_SAVED], 4263 &control_page_default, 4264 sizeof(control_page_default)); 4265 control_page = &lun->mode_pages.control_page[ 4266 CTL_PAGE_SAVED]; 4267 value = ctl_get_opt(&lun->be_lun->options, 4268 "reordering"); 4269 if (value != NULL && 4270 strcmp(value, "unrestricted") == 0) { 4271 control_page->queue_flags &= 4272 ~SCP_QUEUE_ALG_MASK; 4273 control_page->queue_flags |= 4274 SCP_QUEUE_ALG_UNRESTRICTED; 4275 } 4276 memcpy(&lun->mode_pages.control_page[ 4277 CTL_PAGE_CURRENT], 4278 &lun->mode_pages.control_page[ 4279 CTL_PAGE_SAVED], 4280 sizeof(control_page_default)); 4281 page_index->page_data = 4282 (uint8_t *)lun->mode_pages.control_page; 4283 break; 4284 } 4285 case 0x01: 4286 memcpy(&lun->mode_pages.control_ext_page[ 4287 CTL_PAGE_DEFAULT], 4288 &control_ext_page_default, 4289 sizeof(control_ext_page_default)); 4290 memcpy(&lun->mode_pages.control_ext_page[ 4291 CTL_PAGE_CHANGEABLE], 4292 &control_ext_page_changeable, 4293 sizeof(control_ext_page_changeable)); 4294 memcpy(&lun->mode_pages.control_ext_page[ 4295 CTL_PAGE_SAVED], 4296 &control_ext_page_default, 4297 sizeof(control_ext_page_default)); 4298 memcpy(&lun->mode_pages.control_ext_page[ 4299 CTL_PAGE_CURRENT], 4300 &lun->mode_pages.control_ext_page[ 4301 CTL_PAGE_SAVED], 4302 sizeof(control_ext_page_default)); 4303 page_index->page_data = 4304 (uint8_t *)lun->mode_pages.control_ext_page; 4305 break; 4306 default: 4307 panic("subpage %#x for page %#x is incorrect!", 4308 page_index->subpage, page_code); 4309 } 4310 break; 4311 } 4312 case SMS_INFO_EXCEPTIONS_PAGE: { 4313 switch (page_index->subpage) { 4314 case SMS_SUBPAGE_PAGE_0: 4315 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_CURRENT], 4316 &ie_page_default, 4317 sizeof(ie_page_default)); 4318 memcpy(&lun->mode_pages.ie_page[ 4319 CTL_PAGE_CHANGEABLE], &ie_page_changeable, 4320 sizeof(ie_page_changeable)); 4321 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_DEFAULT], 4322 &ie_page_default, 4323 sizeof(ie_page_default)); 4324 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_SAVED], 4325 &ie_page_default, 4326 sizeof(ie_page_default)); 4327 page_index->page_data = 4328 (uint8_t *)lun->mode_pages.ie_page; 4329 break; 4330 case 0x02: { 4331 struct ctl_logical_block_provisioning_page *page; 4332 4333 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_DEFAULT], 4334 &lbp_page_default, 4335 sizeof(lbp_page_default)); 4336 memcpy(&lun->mode_pages.lbp_page[ 4337 CTL_PAGE_CHANGEABLE], &lbp_page_changeable, 4338 sizeof(lbp_page_changeable)); 4339 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 4340 &lbp_page_default, 4341 sizeof(lbp_page_default)); 4342 page = &lun->mode_pages.lbp_page[CTL_PAGE_SAVED]; 4343 value = ctl_get_opt(&lun->be_lun->options, 4344 "avail-threshold"); 4345 if (value != NULL && 4346 ctl_expand_number(value, &ival) == 0) { 4347 page->descr[0].flags |= SLBPPD_ENABLED | 4348 SLBPPD_ARMING_DEC; 4349 if (lun->be_lun->blocksize) 4350 ival /= lun->be_lun->blocksize; 4351 else 4352 ival /= 512; 4353 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4354 page->descr[0].count); 4355 } 4356 value = ctl_get_opt(&lun->be_lun->options, 4357 "used-threshold"); 4358 if (value != NULL && 4359 ctl_expand_number(value, &ival) == 0) { 4360 page->descr[1].flags |= SLBPPD_ENABLED | 4361 SLBPPD_ARMING_INC; 4362 if (lun->be_lun->blocksize) 4363 ival /= lun->be_lun->blocksize; 4364 else 4365 ival /= 512; 4366 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4367 page->descr[1].count); 4368 } 4369 value = ctl_get_opt(&lun->be_lun->options, 4370 "pool-avail-threshold"); 4371 if (value != NULL && 4372 ctl_expand_number(value, &ival) == 0) { 4373 page->descr[2].flags |= SLBPPD_ENABLED | 4374 SLBPPD_ARMING_DEC; 4375 if (lun->be_lun->blocksize) 4376 ival /= lun->be_lun->blocksize; 4377 else 4378 ival /= 512; 4379 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4380 page->descr[2].count); 4381 } 4382 value = ctl_get_opt(&lun->be_lun->options, 4383 "pool-used-threshold"); 4384 if (value != NULL && 4385 ctl_expand_number(value, &ival) == 0) { 4386 page->descr[3].flags |= SLBPPD_ENABLED | 4387 SLBPPD_ARMING_INC; 4388 if (lun->be_lun->blocksize) 4389 ival /= lun->be_lun->blocksize; 4390 else 4391 ival /= 512; 4392 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4393 page->descr[3].count); 4394 } 4395 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_CURRENT], 4396 &lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 4397 sizeof(lbp_page_default)); 4398 page_index->page_data = 4399 (uint8_t *)lun->mode_pages.lbp_page; 4400 break; 4401 } 4402 default: 4403 panic("subpage %#x for page %#x is incorrect!", 4404 page_index->subpage, page_code); 4405 } 4406 break; 4407 } 4408 case SMS_CDDVD_CAPS_PAGE:{ 4409 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4410 ("subpage %#x for page %#x is incorrect!", 4411 page_index->subpage, page_code)); 4412 memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_DEFAULT], 4413 &cddvd_page_default, 4414 sizeof(cddvd_page_default)); 4415 memcpy(&lun->mode_pages.cddvd_page[ 4416 CTL_PAGE_CHANGEABLE], &cddvd_page_changeable, 4417 sizeof(cddvd_page_changeable)); 4418 memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_SAVED], 4419 &cddvd_page_default, 4420 sizeof(cddvd_page_default)); 4421 memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_CURRENT], 4422 &lun->mode_pages.cddvd_page[CTL_PAGE_SAVED], 4423 sizeof(cddvd_page_default)); 4424 page_index->page_data = 4425 (uint8_t *)lun->mode_pages.cddvd_page; 4426 break; 4427 } 4428 default: 4429 panic("invalid page code value %#x", page_code); 4430 } 4431 } 4432 4433 return (CTL_RETVAL_COMPLETE); 4434 } 4435 4436 static int 4437 ctl_init_log_page_index(struct ctl_lun *lun) 4438 { 4439 struct ctl_page_index *page_index; 4440 int i, j, k, prev; 4441 4442 memcpy(&lun->log_pages.index, log_page_index_template, 4443 sizeof(log_page_index_template)); 4444 4445 prev = -1; 4446 for (i = 0, j = 0, k = 0; i < CTL_NUM_LOG_PAGES; i++) { 4447 4448 page_index = &lun->log_pages.index[i]; 4449 if (lun->be_lun->lun_type == T_DIRECT && 4450 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 4451 continue; 4452 if (lun->be_lun->lun_type == T_PROCESSOR && 4453 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 4454 continue; 4455 if (lun->be_lun->lun_type == T_CDROM && 4456 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 4457 continue; 4458 4459 if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING && 4460 lun->backend->lun_attr == NULL) 4461 continue; 4462 4463 if (page_index->page_code != prev) { 4464 lun->log_pages.pages_page[j] = page_index->page_code; 4465 prev = page_index->page_code; 4466 j++; 4467 } 4468 lun->log_pages.subpages_page[k*2] = page_index->page_code; 4469 lun->log_pages.subpages_page[k*2+1] = page_index->subpage; 4470 k++; 4471 } 4472 lun->log_pages.index[0].page_data = &lun->log_pages.pages_page[0]; 4473 lun->log_pages.index[0].page_len = j; 4474 lun->log_pages.index[1].page_data = &lun->log_pages.subpages_page[0]; 4475 lun->log_pages.index[1].page_len = k * 2; 4476 lun->log_pages.index[2].page_data = &lun->log_pages.lbp_page[0]; 4477 lun->log_pages.index[2].page_len = 12*CTL_NUM_LBP_PARAMS; 4478 lun->log_pages.index[3].page_data = (uint8_t *)&lun->log_pages.stat_page; 4479 lun->log_pages.index[3].page_len = sizeof(lun->log_pages.stat_page); 4480 lun->log_pages.index[4].page_data = (uint8_t *)&lun->log_pages.ie_page; 4481 lun->log_pages.index[4].page_len = sizeof(lun->log_pages.ie_page); 4482 4483 return (CTL_RETVAL_COMPLETE); 4484 } 4485 4486 static int 4487 hex2bin(const char *str, uint8_t *buf, int buf_size) 4488 { 4489 int i; 4490 u_char c; 4491 4492 memset(buf, 0, buf_size); 4493 while (isspace(str[0])) 4494 str++; 4495 if (str[0] == '0' && (str[1] == 'x' || str[1] == 'X')) 4496 str += 2; 4497 buf_size *= 2; 4498 for (i = 0; str[i] != 0 && i < buf_size; i++) { 4499 while (str[i] == '-') /* Skip dashes in UUIDs. */ 4500 str++; 4501 c = str[i]; 4502 if (isdigit(c)) 4503 c -= '0'; 4504 else if (isalpha(c)) 4505 c -= isupper(c) ? 'A' - 10 : 'a' - 10; 4506 else 4507 break; 4508 if (c >= 16) 4509 break; 4510 if ((i & 1) == 0) 4511 buf[i / 2] |= (c << 4); 4512 else 4513 buf[i / 2] |= c; 4514 } 4515 return ((i + 1) / 2); 4516 } 4517 4518 /* 4519 * LUN allocation. 4520 * 4521 * Requirements: 4522 * - caller allocates and zeros LUN storage, or passes in a NULL LUN if he 4523 * wants us to allocate the LUN and he can block. 4524 * - ctl_softc is always set 4525 * - be_lun is set if the LUN has a backend (needed for disk LUNs) 4526 * 4527 * Returns 0 for success, non-zero (errno) for failure. 4528 */ 4529 static int 4530 ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun, 4531 struct ctl_be_lun *const be_lun) 4532 { 4533 struct ctl_lun *nlun, *lun; 4534 struct scsi_vpd_id_descriptor *desc; 4535 struct scsi_vpd_id_t10 *t10id; 4536 const char *eui, *naa, *scsiname, *uuid, *vendor, *value; 4537 int lun_number, lun_malloced; 4538 int devidlen, idlen1, idlen2 = 0, len; 4539 4540 if (be_lun == NULL) 4541 return (EINVAL); 4542 4543 /* 4544 * We currently only support Direct Access or Processor LUN types. 4545 */ 4546 switch (be_lun->lun_type) { 4547 case T_DIRECT: 4548 case T_PROCESSOR: 4549 case T_CDROM: 4550 break; 4551 case T_SEQUENTIAL: 4552 case T_CHANGER: 4553 default: 4554 be_lun->lun_config_status(be_lun->be_lun, 4555 CTL_LUN_CONFIG_FAILURE); 4556 break; 4557 } 4558 if (ctl_lun == NULL) { 4559 lun = malloc(sizeof(*lun), M_CTL, M_WAITOK); 4560 lun_malloced = 1; 4561 } else { 4562 lun_malloced = 0; 4563 lun = ctl_lun; 4564 } 4565 4566 memset(lun, 0, sizeof(*lun)); 4567 if (lun_malloced) 4568 lun->flags = CTL_LUN_MALLOCED; 4569 4570 lun->pending_sense = malloc(sizeof(struct scsi_sense_data *) * 4571 ctl_max_ports, M_DEVBUF, M_WAITOK | M_ZERO); 4572 lun->pending_ua = malloc(sizeof(ctl_ua_type *) * ctl_max_ports, 4573 M_DEVBUF, M_WAITOK | M_ZERO); 4574 lun->pr_keys = malloc(sizeof(uint64_t *) * ctl_max_ports, 4575 M_DEVBUF, M_WAITOK | M_ZERO); 4576 4577 /* Generate LUN ID. */ 4578 devidlen = max(CTL_DEVID_MIN_LEN, 4579 strnlen(be_lun->device_id, CTL_DEVID_LEN)); 4580 idlen1 = sizeof(*t10id) + devidlen; 4581 len = sizeof(struct scsi_vpd_id_descriptor) + idlen1; 4582 scsiname = ctl_get_opt(&be_lun->options, "scsiname"); 4583 if (scsiname != NULL) { 4584 idlen2 = roundup2(strlen(scsiname) + 1, 4); 4585 len += sizeof(struct scsi_vpd_id_descriptor) + idlen2; 4586 } 4587 eui = ctl_get_opt(&be_lun->options, "eui"); 4588 if (eui != NULL) { 4589 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4590 } 4591 naa = ctl_get_opt(&be_lun->options, "naa"); 4592 if (naa != NULL) { 4593 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4594 } 4595 uuid = ctl_get_opt(&be_lun->options, "uuid"); 4596 if (uuid != NULL) { 4597 len += sizeof(struct scsi_vpd_id_descriptor) + 18; 4598 } 4599 lun->lun_devid = malloc(sizeof(struct ctl_devid) + len, 4600 M_CTL, M_WAITOK | M_ZERO); 4601 desc = (struct scsi_vpd_id_descriptor *)lun->lun_devid->data; 4602 desc->proto_codeset = SVPD_ID_CODESET_ASCII; 4603 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10; 4604 desc->length = idlen1; 4605 t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0]; 4606 memset(t10id->vendor, ' ', sizeof(t10id->vendor)); 4607 if ((vendor = ctl_get_opt(&be_lun->options, "vendor")) == NULL) { 4608 strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor)); 4609 } else { 4610 strncpy(t10id->vendor, vendor, 4611 min(sizeof(t10id->vendor), strlen(vendor))); 4612 } 4613 strncpy((char *)t10id->vendor_spec_id, 4614 (char *)be_lun->device_id, devidlen); 4615 if (scsiname != NULL) { 4616 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4617 desc->length); 4618 desc->proto_codeset = SVPD_ID_CODESET_UTF8; 4619 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4620 SVPD_ID_TYPE_SCSI_NAME; 4621 desc->length = idlen2; 4622 strlcpy(desc->identifier, scsiname, idlen2); 4623 } 4624 if (eui != NULL) { 4625 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4626 desc->length); 4627 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4628 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4629 SVPD_ID_TYPE_EUI64; 4630 desc->length = hex2bin(eui, desc->identifier, 16); 4631 desc->length = desc->length > 12 ? 16 : 4632 (desc->length > 8 ? 12 : 8); 4633 len -= 16 - desc->length; 4634 } 4635 if (naa != NULL) { 4636 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4637 desc->length); 4638 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4639 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4640 SVPD_ID_TYPE_NAA; 4641 desc->length = hex2bin(naa, desc->identifier, 16); 4642 desc->length = desc->length > 8 ? 16 : 8; 4643 len -= 16 - desc->length; 4644 } 4645 if (uuid != NULL) { 4646 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4647 desc->length); 4648 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4649 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4650 SVPD_ID_TYPE_UUID; 4651 desc->identifier[0] = 0x10; 4652 hex2bin(uuid, &desc->identifier[2], 16); 4653 desc->length = 18; 4654 } 4655 lun->lun_devid->len = len; 4656 4657 mtx_lock(&ctl_softc->ctl_lock); 4658 /* 4659 * See if the caller requested a particular LUN number. If so, see 4660 * if it is available. Otherwise, allocate the first available LUN. 4661 */ 4662 if (be_lun->flags & CTL_LUN_FLAG_ID_REQ) { 4663 if ((be_lun->req_lun_id > (ctl_max_luns - 1)) 4664 || (ctl_is_set(ctl_softc->ctl_lun_mask, be_lun->req_lun_id))) { 4665 mtx_unlock(&ctl_softc->ctl_lock); 4666 if (be_lun->req_lun_id > (ctl_max_luns - 1)) { 4667 printf("ctl: requested LUN ID %d is higher " 4668 "than ctl_max_luns - 1 (%d)\n", 4669 be_lun->req_lun_id, ctl_max_luns - 1); 4670 } else { 4671 /* 4672 * XXX KDM return an error, or just assign 4673 * another LUN ID in this case?? 4674 */ 4675 printf("ctl: requested LUN ID %d is already " 4676 "in use\n", be_lun->req_lun_id); 4677 } 4678 fail: 4679 free(lun->lun_devid, M_CTL); 4680 if (lun->flags & CTL_LUN_MALLOCED) 4681 free(lun, M_CTL); 4682 be_lun->lun_config_status(be_lun->be_lun, 4683 CTL_LUN_CONFIG_FAILURE); 4684 return (ENOSPC); 4685 } 4686 lun_number = be_lun->req_lun_id; 4687 } else { 4688 lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, 0, ctl_max_luns); 4689 if (lun_number == -1) { 4690 mtx_unlock(&ctl_softc->ctl_lock); 4691 printf("ctl: can't allocate LUN, out of LUNs\n"); 4692 goto fail; 4693 } 4694 } 4695 ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number); 4696 mtx_unlock(&ctl_softc->ctl_lock); 4697 4698 mtx_init(&lun->lun_lock, "CTL LUN", NULL, MTX_DEF); 4699 lun->lun = lun_number; 4700 lun->be_lun = be_lun; 4701 /* 4702 * The processor LUN is always enabled. Disk LUNs come on line 4703 * disabled, and must be enabled by the backend. 4704 */ 4705 lun->flags |= CTL_LUN_DISABLED; 4706 lun->backend = be_lun->be; 4707 be_lun->ctl_lun = lun; 4708 be_lun->lun_id = lun_number; 4709 atomic_add_int(&be_lun->be->num_luns, 1); 4710 if (be_lun->flags & CTL_LUN_FLAG_EJECTED) 4711 lun->flags |= CTL_LUN_EJECTED; 4712 if (be_lun->flags & CTL_LUN_FLAG_NO_MEDIA) 4713 lun->flags |= CTL_LUN_NO_MEDIA; 4714 if (be_lun->flags & CTL_LUN_FLAG_STOPPED) 4715 lun->flags |= CTL_LUN_STOPPED; 4716 4717 if (be_lun->flags & CTL_LUN_FLAG_PRIMARY) 4718 lun->flags |= CTL_LUN_PRIMARY_SC; 4719 4720 value = ctl_get_opt(&be_lun->options, "removable"); 4721 if (value != NULL) { 4722 if (strcmp(value, "on") == 0) 4723 lun->flags |= CTL_LUN_REMOVABLE; 4724 } else if (be_lun->lun_type == T_CDROM) 4725 lun->flags |= CTL_LUN_REMOVABLE; 4726 4727 lun->ctl_softc = ctl_softc; 4728 #ifdef CTL_TIME_IO 4729 lun->last_busy = getsbinuptime(); 4730 #endif 4731 TAILQ_INIT(&lun->ooa_queue); 4732 TAILQ_INIT(&lun->blocked_queue); 4733 STAILQ_INIT(&lun->error_list); 4734 lun->ie_reported = 1; 4735 callout_init_mtx(&lun->ie_callout, &lun->lun_lock, 0); 4736 ctl_tpc_lun_init(lun); 4737 if (lun->flags & CTL_LUN_REMOVABLE) { 4738 lun->prevent = malloc((CTL_MAX_INITIATORS + 31) / 32 * 4, 4739 M_CTL, M_WAITOK); 4740 } 4741 4742 /* 4743 * Initialize the mode and log page index. 4744 */ 4745 ctl_init_page_index(lun); 4746 ctl_init_log_page_index(lun); 4747 4748 /* Setup statistics gathering */ 4749 #ifdef CTL_LEGACY_STATS 4750 lun->legacy_stats.device_type = be_lun->lun_type; 4751 lun->legacy_stats.lun_number = lun_number; 4752 lun->legacy_stats.blocksize = be_lun->blocksize; 4753 if (be_lun->blocksize == 0) 4754 lun->legacy_stats.flags = CTL_LUN_STATS_NO_BLOCKSIZE; 4755 lun->legacy_stats.ports = malloc(sizeof(struct ctl_lun_io_port_stats) * 4756 ctl_max_ports, M_DEVBUF, M_WAITOK | M_ZERO); 4757 for (len = 0; len < ctl_max_ports; len++) 4758 lun->legacy_stats.ports[len].targ_port = len; 4759 #endif /* CTL_LEGACY_STATS */ 4760 lun->stats.item = lun_number; 4761 4762 /* 4763 * Now, before we insert this lun on the lun list, set the lun 4764 * inventory changed UA for all other luns. 4765 */ 4766 mtx_lock(&ctl_softc->ctl_lock); 4767 STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) { 4768 mtx_lock(&nlun->lun_lock); 4769 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4770 mtx_unlock(&nlun->lun_lock); 4771 } 4772 STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links); 4773 ctl_softc->ctl_luns[lun_number] = lun; 4774 ctl_softc->num_luns++; 4775 mtx_unlock(&ctl_softc->ctl_lock); 4776 4777 lun->be_lun->lun_config_status(lun->be_lun->be_lun, CTL_LUN_CONFIG_OK); 4778 return (0); 4779 } 4780 4781 /* 4782 * Delete a LUN. 4783 * Assumptions: 4784 * - LUN has already been marked invalid and any pending I/O has been taken 4785 * care of. 4786 */ 4787 static int 4788 ctl_free_lun(struct ctl_lun *lun) 4789 { 4790 struct ctl_softc *softc = lun->ctl_softc; 4791 struct ctl_lun *nlun; 4792 int i; 4793 4794 KASSERT(TAILQ_EMPTY(&lun->ooa_queue), 4795 ("Freeing a LUN %p with outstanding I/O!\n", lun)); 4796 4797 mtx_lock(&softc->ctl_lock); 4798 STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links); 4799 ctl_clear_mask(softc->ctl_lun_mask, lun->lun); 4800 softc->ctl_luns[lun->lun] = NULL; 4801 softc->num_luns--; 4802 STAILQ_FOREACH(nlun, &softc->lun_list, links) { 4803 mtx_lock(&nlun->lun_lock); 4804 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4805 mtx_unlock(&nlun->lun_lock); 4806 } 4807 mtx_unlock(&softc->ctl_lock); 4808 4809 /* 4810 * Tell the backend to free resources, if this LUN has a backend. 4811 */ 4812 atomic_subtract_int(&lun->be_lun->be->num_luns, 1); 4813 lun->be_lun->lun_shutdown(lun->be_lun->be_lun); 4814 4815 lun->ie_reportcnt = UINT32_MAX; 4816 callout_drain(&lun->ie_callout); 4817 ctl_tpc_lun_shutdown(lun); 4818 mtx_destroy(&lun->lun_lock); 4819 free(lun->lun_devid, M_CTL); 4820 for (i = 0; i < ctl_max_ports; i++) 4821 free(lun->pending_ua[i], M_CTL); 4822 free(lun->pending_ua, M_DEVBUF); 4823 for (i = 0; i < ctl_max_ports; i++) 4824 free(lun->pr_keys[i], M_CTL); 4825 free(lun->pr_keys, M_DEVBUF); 4826 free(lun->write_buffer, M_CTL); 4827 free(lun->prevent, M_CTL); 4828 if (lun->flags & CTL_LUN_MALLOCED) 4829 free(lun, M_CTL); 4830 4831 return (0); 4832 } 4833 4834 static void 4835 ctl_create_lun(struct ctl_be_lun *be_lun) 4836 { 4837 4838 /* 4839 * ctl_alloc_lun() should handle all potential failure cases. 4840 */ 4841 ctl_alloc_lun(control_softc, NULL, be_lun); 4842 } 4843 4844 int 4845 ctl_add_lun(struct ctl_be_lun *be_lun) 4846 { 4847 struct ctl_softc *softc = control_softc; 4848 4849 mtx_lock(&softc->ctl_lock); 4850 STAILQ_INSERT_TAIL(&softc->pending_lun_queue, be_lun, links); 4851 mtx_unlock(&softc->ctl_lock); 4852 wakeup(&softc->pending_lun_queue); 4853 4854 return (0); 4855 } 4856 4857 int 4858 ctl_enable_lun(struct ctl_be_lun *be_lun) 4859 { 4860 struct ctl_softc *softc; 4861 struct ctl_port *port, *nport; 4862 struct ctl_lun *lun; 4863 int retval; 4864 4865 lun = (struct ctl_lun *)be_lun->ctl_lun; 4866 softc = lun->ctl_softc; 4867 4868 mtx_lock(&softc->ctl_lock); 4869 mtx_lock(&lun->lun_lock); 4870 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 4871 /* 4872 * eh? Why did we get called if the LUN is already 4873 * enabled? 4874 */ 4875 mtx_unlock(&lun->lun_lock); 4876 mtx_unlock(&softc->ctl_lock); 4877 return (0); 4878 } 4879 lun->flags &= ~CTL_LUN_DISABLED; 4880 mtx_unlock(&lun->lun_lock); 4881 4882 STAILQ_FOREACH_SAFE(port, &softc->port_list, links, nport) { 4883 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || 4884 port->lun_map != NULL || port->lun_enable == NULL) 4885 continue; 4886 4887 /* 4888 * Drop the lock while we call the FETD's enable routine. 4889 * This can lead to a callback into CTL (at least in the 4890 * case of the internal initiator frontend. 4891 */ 4892 mtx_unlock(&softc->ctl_lock); 4893 retval = port->lun_enable(port->targ_lun_arg, lun->lun); 4894 mtx_lock(&softc->ctl_lock); 4895 if (retval != 0) { 4896 printf("%s: FETD %s port %d returned error " 4897 "%d for lun_enable on lun %jd\n", 4898 __func__, port->port_name, port->targ_port, 4899 retval, (intmax_t)lun->lun); 4900 } 4901 } 4902 4903 mtx_unlock(&softc->ctl_lock); 4904 ctl_isc_announce_lun(lun); 4905 4906 return (0); 4907 } 4908 4909 int 4910 ctl_disable_lun(struct ctl_be_lun *be_lun) 4911 { 4912 struct ctl_softc *softc; 4913 struct ctl_port *port; 4914 struct ctl_lun *lun; 4915 int retval; 4916 4917 lun = (struct ctl_lun *)be_lun->ctl_lun; 4918 softc = lun->ctl_softc; 4919 4920 mtx_lock(&softc->ctl_lock); 4921 mtx_lock(&lun->lun_lock); 4922 if (lun->flags & CTL_LUN_DISABLED) { 4923 mtx_unlock(&lun->lun_lock); 4924 mtx_unlock(&softc->ctl_lock); 4925 return (0); 4926 } 4927 lun->flags |= CTL_LUN_DISABLED; 4928 mtx_unlock(&lun->lun_lock); 4929 4930 STAILQ_FOREACH(port, &softc->port_list, links) { 4931 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || 4932 port->lun_map != NULL || port->lun_disable == NULL) 4933 continue; 4934 4935 /* 4936 * Drop the lock before we call the frontend's disable 4937 * routine, to avoid lock order reversals. 4938 * 4939 * XXX KDM what happens if the frontend list changes while 4940 * we're traversing it? It's unlikely, but should be handled. 4941 */ 4942 mtx_unlock(&softc->ctl_lock); 4943 retval = port->lun_disable(port->targ_lun_arg, lun->lun); 4944 mtx_lock(&softc->ctl_lock); 4945 if (retval != 0) { 4946 printf("%s: FETD %s port %d returned error " 4947 "%d for lun_disable on lun %jd\n", 4948 __func__, port->port_name, port->targ_port, 4949 retval, (intmax_t)lun->lun); 4950 } 4951 } 4952 4953 mtx_unlock(&softc->ctl_lock); 4954 ctl_isc_announce_lun(lun); 4955 4956 return (0); 4957 } 4958 4959 int 4960 ctl_start_lun(struct ctl_be_lun *be_lun) 4961 { 4962 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4963 4964 mtx_lock(&lun->lun_lock); 4965 lun->flags &= ~CTL_LUN_STOPPED; 4966 mtx_unlock(&lun->lun_lock); 4967 return (0); 4968 } 4969 4970 int 4971 ctl_stop_lun(struct ctl_be_lun *be_lun) 4972 { 4973 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4974 4975 mtx_lock(&lun->lun_lock); 4976 lun->flags |= CTL_LUN_STOPPED; 4977 mtx_unlock(&lun->lun_lock); 4978 return (0); 4979 } 4980 4981 int 4982 ctl_lun_no_media(struct ctl_be_lun *be_lun) 4983 { 4984 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4985 4986 mtx_lock(&lun->lun_lock); 4987 lun->flags |= CTL_LUN_NO_MEDIA; 4988 mtx_unlock(&lun->lun_lock); 4989 return (0); 4990 } 4991 4992 int 4993 ctl_lun_has_media(struct ctl_be_lun *be_lun) 4994 { 4995 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4996 union ctl_ha_msg msg; 4997 4998 mtx_lock(&lun->lun_lock); 4999 lun->flags &= ~(CTL_LUN_NO_MEDIA | CTL_LUN_EJECTED); 5000 if (lun->flags & CTL_LUN_REMOVABLE) 5001 ctl_est_ua_all(lun, -1, CTL_UA_MEDIUM_CHANGE); 5002 mtx_unlock(&lun->lun_lock); 5003 if ((lun->flags & CTL_LUN_REMOVABLE) && 5004 lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 5005 bzero(&msg.ua, sizeof(msg.ua)); 5006 msg.hdr.msg_type = CTL_MSG_UA; 5007 msg.hdr.nexus.initid = -1; 5008 msg.hdr.nexus.targ_port = -1; 5009 msg.hdr.nexus.targ_lun = lun->lun; 5010 msg.hdr.nexus.targ_mapped_lun = lun->lun; 5011 msg.ua.ua_all = 1; 5012 msg.ua.ua_set = 1; 5013 msg.ua.ua_type = CTL_UA_MEDIUM_CHANGE; 5014 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), 5015 M_WAITOK); 5016 } 5017 return (0); 5018 } 5019 5020 int 5021 ctl_lun_ejected(struct ctl_be_lun *be_lun) 5022 { 5023 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 5024 5025 mtx_lock(&lun->lun_lock); 5026 lun->flags |= CTL_LUN_EJECTED; 5027 mtx_unlock(&lun->lun_lock); 5028 return (0); 5029 } 5030 5031 int 5032 ctl_lun_primary(struct ctl_be_lun *be_lun) 5033 { 5034 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 5035 5036 mtx_lock(&lun->lun_lock); 5037 lun->flags |= CTL_LUN_PRIMARY_SC; 5038 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 5039 mtx_unlock(&lun->lun_lock); 5040 ctl_isc_announce_lun(lun); 5041 return (0); 5042 } 5043 5044 int 5045 ctl_lun_secondary(struct ctl_be_lun *be_lun) 5046 { 5047 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 5048 5049 mtx_lock(&lun->lun_lock); 5050 lun->flags &= ~CTL_LUN_PRIMARY_SC; 5051 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 5052 mtx_unlock(&lun->lun_lock); 5053 ctl_isc_announce_lun(lun); 5054 return (0); 5055 } 5056 5057 int 5058 ctl_invalidate_lun(struct ctl_be_lun *be_lun) 5059 { 5060 struct ctl_softc *softc; 5061 struct ctl_lun *lun; 5062 5063 lun = (struct ctl_lun *)be_lun->ctl_lun; 5064 softc = lun->ctl_softc; 5065 5066 mtx_lock(&lun->lun_lock); 5067 5068 /* 5069 * The LUN needs to be disabled before it can be marked invalid. 5070 */ 5071 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 5072 mtx_unlock(&lun->lun_lock); 5073 return (-1); 5074 } 5075 /* 5076 * Mark the LUN invalid. 5077 */ 5078 lun->flags |= CTL_LUN_INVALID; 5079 5080 /* 5081 * If there is nothing in the OOA queue, go ahead and free the LUN. 5082 * If we have something in the OOA queue, we'll free it when the 5083 * last I/O completes. 5084 */ 5085 if (TAILQ_EMPTY(&lun->ooa_queue)) { 5086 mtx_unlock(&lun->lun_lock); 5087 ctl_free_lun(lun); 5088 } else 5089 mtx_unlock(&lun->lun_lock); 5090 5091 return (0); 5092 } 5093 5094 void 5095 ctl_lun_capacity_changed(struct ctl_be_lun *be_lun) 5096 { 5097 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 5098 union ctl_ha_msg msg; 5099 5100 mtx_lock(&lun->lun_lock); 5101 ctl_est_ua_all(lun, -1, CTL_UA_CAPACITY_CHANGE); 5102 mtx_unlock(&lun->lun_lock); 5103 if (lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 5104 /* Send msg to other side. */ 5105 bzero(&msg.ua, sizeof(msg.ua)); 5106 msg.hdr.msg_type = CTL_MSG_UA; 5107 msg.hdr.nexus.initid = -1; 5108 msg.hdr.nexus.targ_port = -1; 5109 msg.hdr.nexus.targ_lun = lun->lun; 5110 msg.hdr.nexus.targ_mapped_lun = lun->lun; 5111 msg.ua.ua_all = 1; 5112 msg.ua.ua_set = 1; 5113 msg.ua.ua_type = CTL_UA_CAPACITY_CHANGE; 5114 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), 5115 M_WAITOK); 5116 } 5117 } 5118 5119 /* 5120 * Backend "memory move is complete" callback for requests that never 5121 * make it down to say RAIDCore's configuration code. 5122 */ 5123 int 5124 ctl_config_move_done(union ctl_io *io) 5125 { 5126 int retval; 5127 5128 CTL_DEBUG_PRINT(("ctl_config_move_done\n")); 5129 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, 5130 ("Config I/O type isn't CTL_IO_SCSI (%d)!", io->io_hdr.io_type)); 5131 5132 if ((io->io_hdr.port_status != 0) && 5133 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5134 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5135 ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, 5136 /*retry_count*/ io->io_hdr.port_status); 5137 } else if (io->scsiio.kern_data_resid != 0 && 5138 (io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT && 5139 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5140 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5141 ctl_set_invalid_field_ciu(&io->scsiio); 5142 } 5143 5144 if (ctl_debug & CTL_DEBUG_CDB_DATA) 5145 ctl_data_print(io); 5146 if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) || 5147 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 5148 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) || 5149 ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) { 5150 /* 5151 * XXX KDM just assuming a single pointer here, and not a 5152 * S/G list. If we start using S/G lists for config data, 5153 * we'll need to know how to clean them up here as well. 5154 */ 5155 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5156 free(io->scsiio.kern_data_ptr, M_CTL); 5157 ctl_done(io); 5158 retval = CTL_RETVAL_COMPLETE; 5159 } else { 5160 /* 5161 * XXX KDM now we need to continue data movement. Some 5162 * options: 5163 * - call ctl_scsiio() again? We don't do this for data 5164 * writes, because for those at least we know ahead of 5165 * time where the write will go and how long it is. For 5166 * config writes, though, that information is largely 5167 * contained within the write itself, thus we need to 5168 * parse out the data again. 5169 * 5170 * - Call some other function once the data is in? 5171 */ 5172 5173 /* 5174 * XXX KDM call ctl_scsiio() again for now, and check flag 5175 * bits to see whether we're allocated or not. 5176 */ 5177 retval = ctl_scsiio(&io->scsiio); 5178 } 5179 return (retval); 5180 } 5181 5182 /* 5183 * This gets called by a backend driver when it is done with a 5184 * data_submit method. 5185 */ 5186 void 5187 ctl_data_submit_done(union ctl_io *io) 5188 { 5189 /* 5190 * If the IO_CONT flag is set, we need to call the supplied 5191 * function to continue processing the I/O, instead of completing 5192 * the I/O just yet. 5193 * 5194 * If there is an error, though, we don't want to keep processing. 5195 * Instead, just send status back to the initiator. 5196 */ 5197 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 5198 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 5199 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5200 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5201 io->scsiio.io_cont(io); 5202 return; 5203 } 5204 ctl_done(io); 5205 } 5206 5207 /* 5208 * This gets called by a backend driver when it is done with a 5209 * configuration write. 5210 */ 5211 void 5212 ctl_config_write_done(union ctl_io *io) 5213 { 5214 uint8_t *buf; 5215 5216 /* 5217 * If the IO_CONT flag is set, we need to call the supplied 5218 * function to continue processing the I/O, instead of completing 5219 * the I/O just yet. 5220 * 5221 * If there is an error, though, we don't want to keep processing. 5222 * Instead, just send status back to the initiator. 5223 */ 5224 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 5225 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 5226 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5227 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5228 io->scsiio.io_cont(io); 5229 return; 5230 } 5231 /* 5232 * Since a configuration write can be done for commands that actually 5233 * have data allocated, like write buffer, and commands that have 5234 * no data, like start/stop unit, we need to check here. 5235 */ 5236 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5237 buf = io->scsiio.kern_data_ptr; 5238 else 5239 buf = NULL; 5240 ctl_done(io); 5241 if (buf) 5242 free(buf, M_CTL); 5243 } 5244 5245 void 5246 ctl_config_read_done(union ctl_io *io) 5247 { 5248 uint8_t *buf; 5249 5250 /* 5251 * If there is some error -- we are done, skip data transfer. 5252 */ 5253 if ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0 || 5254 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 5255 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { 5256 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5257 buf = io->scsiio.kern_data_ptr; 5258 else 5259 buf = NULL; 5260 ctl_done(io); 5261 if (buf) 5262 free(buf, M_CTL); 5263 return; 5264 } 5265 5266 /* 5267 * If the IO_CONT flag is set, we need to call the supplied 5268 * function to continue processing the I/O, instead of completing 5269 * the I/O just yet. 5270 */ 5271 if (io->io_hdr.flags & CTL_FLAG_IO_CONT) { 5272 io->scsiio.io_cont(io); 5273 return; 5274 } 5275 5276 ctl_datamove(io); 5277 } 5278 5279 /* 5280 * SCSI release command. 5281 */ 5282 int 5283 ctl_scsi_release(struct ctl_scsiio *ctsio) 5284 { 5285 struct ctl_lun *lun = CTL_LUN(ctsio); 5286 uint32_t residx; 5287 5288 CTL_DEBUG_PRINT(("ctl_scsi_release\n")); 5289 5290 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5291 5292 /* 5293 * XXX KDM right now, we only support LUN reservation. We don't 5294 * support 3rd party reservations, or extent reservations, which 5295 * might actually need the parameter list. If we've gotten this 5296 * far, we've got a LUN reservation. Anything else got kicked out 5297 * above. So, according to SPC, ignore the length. 5298 */ 5299 5300 mtx_lock(&lun->lun_lock); 5301 5302 /* 5303 * According to SPC, it is not an error for an intiator to attempt 5304 * to release a reservation on a LUN that isn't reserved, or that 5305 * is reserved by another initiator. The reservation can only be 5306 * released, though, by the initiator who made it or by one of 5307 * several reset type events. 5308 */ 5309 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx)) 5310 lun->flags &= ~CTL_LUN_RESERVED; 5311 5312 mtx_unlock(&lun->lun_lock); 5313 5314 ctl_set_success(ctsio); 5315 ctl_done((union ctl_io *)ctsio); 5316 return (CTL_RETVAL_COMPLETE); 5317 } 5318 5319 int 5320 ctl_scsi_reserve(struct ctl_scsiio *ctsio) 5321 { 5322 struct ctl_lun *lun = CTL_LUN(ctsio); 5323 uint32_t residx; 5324 5325 CTL_DEBUG_PRINT(("ctl_reserve\n")); 5326 5327 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5328 5329 /* 5330 * XXX KDM right now, we only support LUN reservation. We don't 5331 * support 3rd party reservations, or extent reservations, which 5332 * might actually need the parameter list. If we've gotten this 5333 * far, we've got a LUN reservation. Anything else got kicked out 5334 * above. So, according to SPC, ignore the length. 5335 */ 5336 5337 mtx_lock(&lun->lun_lock); 5338 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx != residx)) { 5339 ctl_set_reservation_conflict(ctsio); 5340 goto bailout; 5341 } 5342 5343 /* SPC-3 exceptions to SPC-2 RESERVE and RELEASE behavior. */ 5344 if (lun->flags & CTL_LUN_PR_RESERVED) { 5345 ctl_set_success(ctsio); 5346 goto bailout; 5347 } 5348 5349 lun->flags |= CTL_LUN_RESERVED; 5350 lun->res_idx = residx; 5351 ctl_set_success(ctsio); 5352 5353 bailout: 5354 mtx_unlock(&lun->lun_lock); 5355 ctl_done((union ctl_io *)ctsio); 5356 return (CTL_RETVAL_COMPLETE); 5357 } 5358 5359 int 5360 ctl_start_stop(struct ctl_scsiio *ctsio) 5361 { 5362 struct ctl_lun *lun = CTL_LUN(ctsio); 5363 struct scsi_start_stop_unit *cdb; 5364 int retval; 5365 5366 CTL_DEBUG_PRINT(("ctl_start_stop\n")); 5367 5368 cdb = (struct scsi_start_stop_unit *)ctsio->cdb; 5369 5370 if ((cdb->how & SSS_PC_MASK) == 0) { 5371 if ((lun->flags & CTL_LUN_PR_RESERVED) && 5372 (cdb->how & SSS_START) == 0) { 5373 uint32_t residx; 5374 5375 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5376 if (ctl_get_prkey(lun, residx) == 0 || 5377 (lun->pr_res_idx != residx && lun->pr_res_type < 4)) { 5378 5379 ctl_set_reservation_conflict(ctsio); 5380 ctl_done((union ctl_io *)ctsio); 5381 return (CTL_RETVAL_COMPLETE); 5382 } 5383 } 5384 5385 if ((cdb->how & SSS_LOEJ) && 5386 (lun->flags & CTL_LUN_REMOVABLE) == 0) { 5387 ctl_set_invalid_field(ctsio, 5388 /*sks_valid*/ 1, 5389 /*command*/ 1, 5390 /*field*/ 4, 5391 /*bit_valid*/ 1, 5392 /*bit*/ 1); 5393 ctl_done((union ctl_io *)ctsio); 5394 return (CTL_RETVAL_COMPLETE); 5395 } 5396 5397 if ((cdb->how & SSS_START) == 0 && (cdb->how & SSS_LOEJ) && 5398 lun->prevent_count > 0) { 5399 /* "Medium removal prevented" */ 5400 ctl_set_sense(ctsio, /*current_error*/ 1, 5401 /*sense_key*/(lun->flags & CTL_LUN_NO_MEDIA) ? 5402 SSD_KEY_NOT_READY : SSD_KEY_ILLEGAL_REQUEST, 5403 /*asc*/ 0x53, /*ascq*/ 0x02, SSD_ELEM_NONE); 5404 ctl_done((union ctl_io *)ctsio); 5405 return (CTL_RETVAL_COMPLETE); 5406 } 5407 } 5408 5409 retval = lun->backend->config_write((union ctl_io *)ctsio); 5410 return (retval); 5411 } 5412 5413 int 5414 ctl_prevent_allow(struct ctl_scsiio *ctsio) 5415 { 5416 struct ctl_lun *lun = CTL_LUN(ctsio); 5417 struct scsi_prevent *cdb; 5418 int retval; 5419 uint32_t initidx; 5420 5421 CTL_DEBUG_PRINT(("ctl_prevent_allow\n")); 5422 5423 cdb = (struct scsi_prevent *)ctsio->cdb; 5424 5425 if ((lun->flags & CTL_LUN_REMOVABLE) == 0 || lun->prevent == NULL) { 5426 ctl_set_invalid_opcode(ctsio); 5427 ctl_done((union ctl_io *)ctsio); 5428 return (CTL_RETVAL_COMPLETE); 5429 } 5430 5431 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5432 mtx_lock(&lun->lun_lock); 5433 if ((cdb->how & PR_PREVENT) && 5434 ctl_is_set(lun->prevent, initidx) == 0) { 5435 ctl_set_mask(lun->prevent, initidx); 5436 lun->prevent_count++; 5437 } else if ((cdb->how & PR_PREVENT) == 0 && 5438 ctl_is_set(lun->prevent, initidx)) { 5439 ctl_clear_mask(lun->prevent, initidx); 5440 lun->prevent_count--; 5441 } 5442 mtx_unlock(&lun->lun_lock); 5443 retval = lun->backend->config_write((union ctl_io *)ctsio); 5444 return (retval); 5445 } 5446 5447 /* 5448 * We support the SYNCHRONIZE CACHE command (10 and 16 byte versions), but 5449 * we don't really do anything with the LBA and length fields if the user 5450 * passes them in. Instead we'll just flush out the cache for the entire 5451 * LUN. 5452 */ 5453 int 5454 ctl_sync_cache(struct ctl_scsiio *ctsio) 5455 { 5456 struct ctl_lun *lun = CTL_LUN(ctsio); 5457 struct ctl_lba_len_flags *lbalen; 5458 uint64_t starting_lba; 5459 uint32_t block_count; 5460 int retval; 5461 uint8_t byte2; 5462 5463 CTL_DEBUG_PRINT(("ctl_sync_cache\n")); 5464 5465 retval = 0; 5466 5467 switch (ctsio->cdb[0]) { 5468 case SYNCHRONIZE_CACHE: { 5469 struct scsi_sync_cache *cdb; 5470 cdb = (struct scsi_sync_cache *)ctsio->cdb; 5471 5472 starting_lba = scsi_4btoul(cdb->begin_lba); 5473 block_count = scsi_2btoul(cdb->lb_count); 5474 byte2 = cdb->byte2; 5475 break; 5476 } 5477 case SYNCHRONIZE_CACHE_16: { 5478 struct scsi_sync_cache_16 *cdb; 5479 cdb = (struct scsi_sync_cache_16 *)ctsio->cdb; 5480 5481 starting_lba = scsi_8btou64(cdb->begin_lba); 5482 block_count = scsi_4btoul(cdb->lb_count); 5483 byte2 = cdb->byte2; 5484 break; 5485 } 5486 default: 5487 ctl_set_invalid_opcode(ctsio); 5488 ctl_done((union ctl_io *)ctsio); 5489 goto bailout; 5490 break; /* NOTREACHED */ 5491 } 5492 5493 /* 5494 * We check the LBA and length, but don't do anything with them. 5495 * A SYNCHRONIZE CACHE will cause the entire cache for this lun to 5496 * get flushed. This check will just help satisfy anyone who wants 5497 * to see an error for an out of range LBA. 5498 */ 5499 if ((starting_lba + block_count) > (lun->be_lun->maxlba + 1)) { 5500 ctl_set_lba_out_of_range(ctsio, 5501 MAX(starting_lba, lun->be_lun->maxlba + 1)); 5502 ctl_done((union ctl_io *)ctsio); 5503 goto bailout; 5504 } 5505 5506 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5507 lbalen->lba = starting_lba; 5508 lbalen->len = block_count; 5509 lbalen->flags = byte2; 5510 retval = lun->backend->config_write((union ctl_io *)ctsio); 5511 5512 bailout: 5513 return (retval); 5514 } 5515 5516 int 5517 ctl_format(struct ctl_scsiio *ctsio) 5518 { 5519 struct scsi_format *cdb; 5520 int length, defect_list_len; 5521 5522 CTL_DEBUG_PRINT(("ctl_format\n")); 5523 5524 cdb = (struct scsi_format *)ctsio->cdb; 5525 5526 length = 0; 5527 if (cdb->byte2 & SF_FMTDATA) { 5528 if (cdb->byte2 & SF_LONGLIST) 5529 length = sizeof(struct scsi_format_header_long); 5530 else 5531 length = sizeof(struct scsi_format_header_short); 5532 } 5533 5534 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5535 && (length > 0)) { 5536 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5537 ctsio->kern_data_len = length; 5538 ctsio->kern_total_len = length; 5539 ctsio->kern_rel_offset = 0; 5540 ctsio->kern_sg_entries = 0; 5541 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5542 ctsio->be_move_done = ctl_config_move_done; 5543 ctl_datamove((union ctl_io *)ctsio); 5544 5545 return (CTL_RETVAL_COMPLETE); 5546 } 5547 5548 defect_list_len = 0; 5549 5550 if (cdb->byte2 & SF_FMTDATA) { 5551 if (cdb->byte2 & SF_LONGLIST) { 5552 struct scsi_format_header_long *header; 5553 5554 header = (struct scsi_format_header_long *) 5555 ctsio->kern_data_ptr; 5556 5557 defect_list_len = scsi_4btoul(header->defect_list_len); 5558 if (defect_list_len != 0) { 5559 ctl_set_invalid_field(ctsio, 5560 /*sks_valid*/ 1, 5561 /*command*/ 0, 5562 /*field*/ 2, 5563 /*bit_valid*/ 0, 5564 /*bit*/ 0); 5565 goto bailout; 5566 } 5567 } else { 5568 struct scsi_format_header_short *header; 5569 5570 header = (struct scsi_format_header_short *) 5571 ctsio->kern_data_ptr; 5572 5573 defect_list_len = scsi_2btoul(header->defect_list_len); 5574 if (defect_list_len != 0) { 5575 ctl_set_invalid_field(ctsio, 5576 /*sks_valid*/ 1, 5577 /*command*/ 0, 5578 /*field*/ 2, 5579 /*bit_valid*/ 0, 5580 /*bit*/ 0); 5581 goto bailout; 5582 } 5583 } 5584 } 5585 5586 ctl_set_success(ctsio); 5587 bailout: 5588 5589 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5590 free(ctsio->kern_data_ptr, M_CTL); 5591 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5592 } 5593 5594 ctl_done((union ctl_io *)ctsio); 5595 return (CTL_RETVAL_COMPLETE); 5596 } 5597 5598 int 5599 ctl_read_buffer(struct ctl_scsiio *ctsio) 5600 { 5601 struct ctl_lun *lun = CTL_LUN(ctsio); 5602 uint64_t buffer_offset; 5603 uint32_t len; 5604 uint8_t byte2; 5605 static uint8_t descr[4]; 5606 static uint8_t echo_descr[4] = { 0 }; 5607 5608 CTL_DEBUG_PRINT(("ctl_read_buffer\n")); 5609 5610 switch (ctsio->cdb[0]) { 5611 case READ_BUFFER: { 5612 struct scsi_read_buffer *cdb; 5613 5614 cdb = (struct scsi_read_buffer *)ctsio->cdb; 5615 buffer_offset = scsi_3btoul(cdb->offset); 5616 len = scsi_3btoul(cdb->length); 5617 byte2 = cdb->byte2; 5618 break; 5619 } 5620 case READ_BUFFER_16: { 5621 struct scsi_read_buffer_16 *cdb; 5622 5623 cdb = (struct scsi_read_buffer_16 *)ctsio->cdb; 5624 buffer_offset = scsi_8btou64(cdb->offset); 5625 len = scsi_4btoul(cdb->length); 5626 byte2 = cdb->byte2; 5627 break; 5628 } 5629 default: /* This shouldn't happen. */ 5630 ctl_set_invalid_opcode(ctsio); 5631 ctl_done((union ctl_io *)ctsio); 5632 return (CTL_RETVAL_COMPLETE); 5633 } 5634 5635 if (buffer_offset > CTL_WRITE_BUFFER_SIZE || 5636 buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5637 ctl_set_invalid_field(ctsio, 5638 /*sks_valid*/ 1, 5639 /*command*/ 1, 5640 /*field*/ 6, 5641 /*bit_valid*/ 0, 5642 /*bit*/ 0); 5643 ctl_done((union ctl_io *)ctsio); 5644 return (CTL_RETVAL_COMPLETE); 5645 } 5646 5647 if ((byte2 & RWB_MODE) == RWB_MODE_DESCR) { 5648 descr[0] = 0; 5649 scsi_ulto3b(CTL_WRITE_BUFFER_SIZE, &descr[1]); 5650 ctsio->kern_data_ptr = descr; 5651 len = min(len, sizeof(descr)); 5652 } else if ((byte2 & RWB_MODE) == RWB_MODE_ECHO_DESCR) { 5653 ctsio->kern_data_ptr = echo_descr; 5654 len = min(len, sizeof(echo_descr)); 5655 } else { 5656 if (lun->write_buffer == NULL) { 5657 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5658 M_CTL, M_WAITOK); 5659 } 5660 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5661 } 5662 ctsio->kern_data_len = len; 5663 ctsio->kern_total_len = len; 5664 ctsio->kern_rel_offset = 0; 5665 ctsio->kern_sg_entries = 0; 5666 ctl_set_success(ctsio); 5667 ctsio->be_move_done = ctl_config_move_done; 5668 ctl_datamove((union ctl_io *)ctsio); 5669 return (CTL_RETVAL_COMPLETE); 5670 } 5671 5672 int 5673 ctl_write_buffer(struct ctl_scsiio *ctsio) 5674 { 5675 struct ctl_lun *lun = CTL_LUN(ctsio); 5676 struct scsi_write_buffer *cdb; 5677 int buffer_offset, len; 5678 5679 CTL_DEBUG_PRINT(("ctl_write_buffer\n")); 5680 5681 cdb = (struct scsi_write_buffer *)ctsio->cdb; 5682 5683 len = scsi_3btoul(cdb->length); 5684 buffer_offset = scsi_3btoul(cdb->offset); 5685 5686 if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5687 ctl_set_invalid_field(ctsio, 5688 /*sks_valid*/ 1, 5689 /*command*/ 1, 5690 /*field*/ 6, 5691 /*bit_valid*/ 0, 5692 /*bit*/ 0); 5693 ctl_done((union ctl_io *)ctsio); 5694 return (CTL_RETVAL_COMPLETE); 5695 } 5696 5697 /* 5698 * If we've got a kernel request that hasn't been malloced yet, 5699 * malloc it and tell the caller the data buffer is here. 5700 */ 5701 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5702 if (lun->write_buffer == NULL) { 5703 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5704 M_CTL, M_WAITOK); 5705 } 5706 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5707 ctsio->kern_data_len = len; 5708 ctsio->kern_total_len = len; 5709 ctsio->kern_rel_offset = 0; 5710 ctsio->kern_sg_entries = 0; 5711 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5712 ctsio->be_move_done = ctl_config_move_done; 5713 ctl_datamove((union ctl_io *)ctsio); 5714 5715 return (CTL_RETVAL_COMPLETE); 5716 } 5717 5718 ctl_set_success(ctsio); 5719 ctl_done((union ctl_io *)ctsio); 5720 return (CTL_RETVAL_COMPLETE); 5721 } 5722 5723 int 5724 ctl_write_same(struct ctl_scsiio *ctsio) 5725 { 5726 struct ctl_lun *lun = CTL_LUN(ctsio); 5727 struct ctl_lba_len_flags *lbalen; 5728 uint64_t lba; 5729 uint32_t num_blocks; 5730 int len, retval; 5731 uint8_t byte2; 5732 5733 CTL_DEBUG_PRINT(("ctl_write_same\n")); 5734 5735 switch (ctsio->cdb[0]) { 5736 case WRITE_SAME_10: { 5737 struct scsi_write_same_10 *cdb; 5738 5739 cdb = (struct scsi_write_same_10 *)ctsio->cdb; 5740 5741 lba = scsi_4btoul(cdb->addr); 5742 num_blocks = scsi_2btoul(cdb->length); 5743 byte2 = cdb->byte2; 5744 break; 5745 } 5746 case WRITE_SAME_16: { 5747 struct scsi_write_same_16 *cdb; 5748 5749 cdb = (struct scsi_write_same_16 *)ctsio->cdb; 5750 5751 lba = scsi_8btou64(cdb->addr); 5752 num_blocks = scsi_4btoul(cdb->length); 5753 byte2 = cdb->byte2; 5754 break; 5755 } 5756 default: 5757 /* 5758 * We got a command we don't support. This shouldn't 5759 * happen, commands should be filtered out above us. 5760 */ 5761 ctl_set_invalid_opcode(ctsio); 5762 ctl_done((union ctl_io *)ctsio); 5763 5764 return (CTL_RETVAL_COMPLETE); 5765 break; /* NOTREACHED */ 5766 } 5767 5768 /* ANCHOR flag can be used only together with UNMAP */ 5769 if ((byte2 & SWS_UNMAP) == 0 && (byte2 & SWS_ANCHOR) != 0) { 5770 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 5771 /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); 5772 ctl_done((union ctl_io *)ctsio); 5773 return (CTL_RETVAL_COMPLETE); 5774 } 5775 5776 /* 5777 * The first check is to make sure we're in bounds, the second 5778 * check is to catch wrap-around problems. If the lba + num blocks 5779 * is less than the lba, then we've wrapped around and the block 5780 * range is invalid anyway. 5781 */ 5782 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5783 || ((lba + num_blocks) < lba)) { 5784 ctl_set_lba_out_of_range(ctsio, 5785 MAX(lba, lun->be_lun->maxlba + 1)); 5786 ctl_done((union ctl_io *)ctsio); 5787 return (CTL_RETVAL_COMPLETE); 5788 } 5789 5790 /* Zero number of blocks means "to the last logical block" */ 5791 if (num_blocks == 0) { 5792 if ((lun->be_lun->maxlba + 1) - lba > UINT32_MAX) { 5793 ctl_set_invalid_field(ctsio, 5794 /*sks_valid*/ 0, 5795 /*command*/ 1, 5796 /*field*/ 0, 5797 /*bit_valid*/ 0, 5798 /*bit*/ 0); 5799 ctl_done((union ctl_io *)ctsio); 5800 return (CTL_RETVAL_COMPLETE); 5801 } 5802 num_blocks = (lun->be_lun->maxlba + 1) - lba; 5803 } 5804 5805 len = lun->be_lun->blocksize; 5806 5807 /* 5808 * If we've got a kernel request that hasn't been malloced yet, 5809 * malloc it and tell the caller the data buffer is here. 5810 */ 5811 if ((byte2 & SWS_NDOB) == 0 && 5812 (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5813 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 5814 ctsio->kern_data_len = len; 5815 ctsio->kern_total_len = len; 5816 ctsio->kern_rel_offset = 0; 5817 ctsio->kern_sg_entries = 0; 5818 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5819 ctsio->be_move_done = ctl_config_move_done; 5820 ctl_datamove((union ctl_io *)ctsio); 5821 5822 return (CTL_RETVAL_COMPLETE); 5823 } 5824 5825 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5826 lbalen->lba = lba; 5827 lbalen->len = num_blocks; 5828 lbalen->flags = byte2; 5829 retval = lun->backend->config_write((union ctl_io *)ctsio); 5830 5831 return (retval); 5832 } 5833 5834 int 5835 ctl_unmap(struct ctl_scsiio *ctsio) 5836 { 5837 struct ctl_lun *lun = CTL_LUN(ctsio); 5838 struct scsi_unmap *cdb; 5839 struct ctl_ptr_len_flags *ptrlen; 5840 struct scsi_unmap_header *hdr; 5841 struct scsi_unmap_desc *buf, *end, *endnz, *range; 5842 uint64_t lba; 5843 uint32_t num_blocks; 5844 int len, retval; 5845 uint8_t byte2; 5846 5847 CTL_DEBUG_PRINT(("ctl_unmap\n")); 5848 5849 cdb = (struct scsi_unmap *)ctsio->cdb; 5850 len = scsi_2btoul(cdb->length); 5851 byte2 = cdb->byte2; 5852 5853 /* 5854 * If we've got a kernel request that hasn't been malloced yet, 5855 * malloc it and tell the caller the data buffer is here. 5856 */ 5857 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5858 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 5859 ctsio->kern_data_len = len; 5860 ctsio->kern_total_len = len; 5861 ctsio->kern_rel_offset = 0; 5862 ctsio->kern_sg_entries = 0; 5863 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5864 ctsio->be_move_done = ctl_config_move_done; 5865 ctl_datamove((union ctl_io *)ctsio); 5866 5867 return (CTL_RETVAL_COMPLETE); 5868 } 5869 5870 len = ctsio->kern_total_len - ctsio->kern_data_resid; 5871 hdr = (struct scsi_unmap_header *)ctsio->kern_data_ptr; 5872 if (len < sizeof (*hdr) || 5873 len < (scsi_2btoul(hdr->length) + sizeof(hdr->length)) || 5874 len < (scsi_2btoul(hdr->desc_length) + sizeof (*hdr)) || 5875 scsi_2btoul(hdr->desc_length) % sizeof(*buf) != 0) { 5876 ctl_set_invalid_field(ctsio, 5877 /*sks_valid*/ 0, 5878 /*command*/ 0, 5879 /*field*/ 0, 5880 /*bit_valid*/ 0, 5881 /*bit*/ 0); 5882 goto done; 5883 } 5884 len = scsi_2btoul(hdr->desc_length); 5885 buf = (struct scsi_unmap_desc *)(hdr + 1); 5886 end = buf + len / sizeof(*buf); 5887 5888 endnz = buf; 5889 for (range = buf; range < end; range++) { 5890 lba = scsi_8btou64(range->lba); 5891 num_blocks = scsi_4btoul(range->length); 5892 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5893 || ((lba + num_blocks) < lba)) { 5894 ctl_set_lba_out_of_range(ctsio, 5895 MAX(lba, lun->be_lun->maxlba + 1)); 5896 ctl_done((union ctl_io *)ctsio); 5897 return (CTL_RETVAL_COMPLETE); 5898 } 5899 if (num_blocks != 0) 5900 endnz = range + 1; 5901 } 5902 5903 /* 5904 * Block backend can not handle zero last range. 5905 * Filter it out and return if there is nothing left. 5906 */ 5907 len = (uint8_t *)endnz - (uint8_t *)buf; 5908 if (len == 0) { 5909 ctl_set_success(ctsio); 5910 goto done; 5911 } 5912 5913 mtx_lock(&lun->lun_lock); 5914 ptrlen = (struct ctl_ptr_len_flags *) 5915 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5916 ptrlen->ptr = (void *)buf; 5917 ptrlen->len = len; 5918 ptrlen->flags = byte2; 5919 ctl_check_blocked(lun); 5920 mtx_unlock(&lun->lun_lock); 5921 5922 retval = lun->backend->config_write((union ctl_io *)ctsio); 5923 return (retval); 5924 5925 done: 5926 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5927 free(ctsio->kern_data_ptr, M_CTL); 5928 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5929 } 5930 ctl_done((union ctl_io *)ctsio); 5931 return (CTL_RETVAL_COMPLETE); 5932 } 5933 5934 int 5935 ctl_default_page_handler(struct ctl_scsiio *ctsio, 5936 struct ctl_page_index *page_index, uint8_t *page_ptr) 5937 { 5938 struct ctl_lun *lun = CTL_LUN(ctsio); 5939 uint8_t *current_cp; 5940 int set_ua; 5941 uint32_t initidx; 5942 5943 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5944 set_ua = 0; 5945 5946 current_cp = (page_index->page_data + (page_index->page_len * 5947 CTL_PAGE_CURRENT)); 5948 5949 mtx_lock(&lun->lun_lock); 5950 if (memcmp(current_cp, page_ptr, page_index->page_len)) { 5951 memcpy(current_cp, page_ptr, page_index->page_len); 5952 set_ua = 1; 5953 } 5954 if (set_ua != 0) 5955 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 5956 mtx_unlock(&lun->lun_lock); 5957 if (set_ua) { 5958 ctl_isc_announce_mode(lun, 5959 ctl_get_initindex(&ctsio->io_hdr.nexus), 5960 page_index->page_code, page_index->subpage); 5961 } 5962 return (CTL_RETVAL_COMPLETE); 5963 } 5964 5965 static void 5966 ctl_ie_timer(void *arg) 5967 { 5968 struct ctl_lun *lun = arg; 5969 uint64_t t; 5970 5971 if (lun->ie_asc == 0) 5972 return; 5973 5974 if (lun->MODE_IE.mrie == SIEP_MRIE_UA) 5975 ctl_est_ua_all(lun, -1, CTL_UA_IE); 5976 else 5977 lun->ie_reported = 0; 5978 5979 if (lun->ie_reportcnt < scsi_4btoul(lun->MODE_IE.report_count)) { 5980 lun->ie_reportcnt++; 5981 t = scsi_4btoul(lun->MODE_IE.interval_timer); 5982 if (t == 0 || t == UINT32_MAX) 5983 t = 3000; /* 5 min */ 5984 callout_schedule(&lun->ie_callout, t * hz / 10); 5985 } 5986 } 5987 5988 int 5989 ctl_ie_page_handler(struct ctl_scsiio *ctsio, 5990 struct ctl_page_index *page_index, uint8_t *page_ptr) 5991 { 5992 struct ctl_lun *lun = CTL_LUN(ctsio); 5993 struct scsi_info_exceptions_page *pg; 5994 uint64_t t; 5995 5996 (void)ctl_default_page_handler(ctsio, page_index, page_ptr); 5997 5998 pg = (struct scsi_info_exceptions_page *)page_ptr; 5999 mtx_lock(&lun->lun_lock); 6000 if (pg->info_flags & SIEP_FLAGS_TEST) { 6001 lun->ie_asc = 0x5d; 6002 lun->ie_ascq = 0xff; 6003 if (pg->mrie == SIEP_MRIE_UA) { 6004 ctl_est_ua_all(lun, -1, CTL_UA_IE); 6005 lun->ie_reported = 1; 6006 } else { 6007 ctl_clr_ua_all(lun, -1, CTL_UA_IE); 6008 lun->ie_reported = -1; 6009 } 6010 lun->ie_reportcnt = 1; 6011 if (lun->ie_reportcnt < scsi_4btoul(pg->report_count)) { 6012 lun->ie_reportcnt++; 6013 t = scsi_4btoul(pg->interval_timer); 6014 if (t == 0 || t == UINT32_MAX) 6015 t = 3000; /* 5 min */ 6016 callout_reset(&lun->ie_callout, t * hz / 10, 6017 ctl_ie_timer, lun); 6018 } 6019 } else { 6020 lun->ie_asc = 0; 6021 lun->ie_ascq = 0; 6022 lun->ie_reported = 1; 6023 ctl_clr_ua_all(lun, -1, CTL_UA_IE); 6024 lun->ie_reportcnt = UINT32_MAX; 6025 callout_stop(&lun->ie_callout); 6026 } 6027 mtx_unlock(&lun->lun_lock); 6028 return (CTL_RETVAL_COMPLETE); 6029 } 6030 6031 static int 6032 ctl_do_mode_select(union ctl_io *io) 6033 { 6034 struct ctl_lun *lun = CTL_LUN(io); 6035 struct scsi_mode_page_header *page_header; 6036 struct ctl_page_index *page_index; 6037 struct ctl_scsiio *ctsio; 6038 int page_len, page_len_offset, page_len_size; 6039 union ctl_modepage_info *modepage_info; 6040 uint16_t *len_left, *len_used; 6041 int retval, i; 6042 6043 ctsio = &io->scsiio; 6044 page_index = NULL; 6045 page_len = 0; 6046 6047 modepage_info = (union ctl_modepage_info *) 6048 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6049 len_left = &modepage_info->header.len_left; 6050 len_used = &modepage_info->header.len_used; 6051 6052 do_next_page: 6053 6054 page_header = (struct scsi_mode_page_header *) 6055 (ctsio->kern_data_ptr + *len_used); 6056 6057 if (*len_left == 0) { 6058 free(ctsio->kern_data_ptr, M_CTL); 6059 ctl_set_success(ctsio); 6060 ctl_done((union ctl_io *)ctsio); 6061 return (CTL_RETVAL_COMPLETE); 6062 } else if (*len_left < sizeof(struct scsi_mode_page_header)) { 6063 6064 free(ctsio->kern_data_ptr, M_CTL); 6065 ctl_set_param_len_error(ctsio); 6066 ctl_done((union ctl_io *)ctsio); 6067 return (CTL_RETVAL_COMPLETE); 6068 6069 } else if ((page_header->page_code & SMPH_SPF) 6070 && (*len_left < sizeof(struct scsi_mode_page_header_sp))) { 6071 6072 free(ctsio->kern_data_ptr, M_CTL); 6073 ctl_set_param_len_error(ctsio); 6074 ctl_done((union ctl_io *)ctsio); 6075 return (CTL_RETVAL_COMPLETE); 6076 } 6077 6078 6079 /* 6080 * XXX KDM should we do something with the block descriptor? 6081 */ 6082 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6083 page_index = &lun->mode_pages.index[i]; 6084 if (lun->be_lun->lun_type == T_DIRECT && 6085 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6086 continue; 6087 if (lun->be_lun->lun_type == T_PROCESSOR && 6088 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6089 continue; 6090 if (lun->be_lun->lun_type == T_CDROM && 6091 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6092 continue; 6093 6094 if ((page_index->page_code & SMPH_PC_MASK) != 6095 (page_header->page_code & SMPH_PC_MASK)) 6096 continue; 6097 6098 /* 6099 * If neither page has a subpage code, then we've got a 6100 * match. 6101 */ 6102 if (((page_index->page_code & SMPH_SPF) == 0) 6103 && ((page_header->page_code & SMPH_SPF) == 0)) { 6104 page_len = page_header->page_length; 6105 break; 6106 } 6107 6108 /* 6109 * If both pages have subpages, then the subpage numbers 6110 * have to match. 6111 */ 6112 if ((page_index->page_code & SMPH_SPF) 6113 && (page_header->page_code & SMPH_SPF)) { 6114 struct scsi_mode_page_header_sp *sph; 6115 6116 sph = (struct scsi_mode_page_header_sp *)page_header; 6117 if (page_index->subpage == sph->subpage) { 6118 page_len = scsi_2btoul(sph->page_length); 6119 break; 6120 } 6121 } 6122 } 6123 6124 /* 6125 * If we couldn't find the page, or if we don't have a mode select 6126 * handler for it, send back an error to the user. 6127 */ 6128 if ((i >= CTL_NUM_MODE_PAGES) 6129 || (page_index->select_handler == NULL)) { 6130 ctl_set_invalid_field(ctsio, 6131 /*sks_valid*/ 1, 6132 /*command*/ 0, 6133 /*field*/ *len_used, 6134 /*bit_valid*/ 0, 6135 /*bit*/ 0); 6136 free(ctsio->kern_data_ptr, M_CTL); 6137 ctl_done((union ctl_io *)ctsio); 6138 return (CTL_RETVAL_COMPLETE); 6139 } 6140 6141 if (page_index->page_code & SMPH_SPF) { 6142 page_len_offset = 2; 6143 page_len_size = 2; 6144 } else { 6145 page_len_size = 1; 6146 page_len_offset = 1; 6147 } 6148 6149 /* 6150 * If the length the initiator gives us isn't the one we specify in 6151 * the mode page header, or if they didn't specify enough data in 6152 * the CDB to avoid truncating this page, kick out the request. 6153 */ 6154 if (page_len != page_index->page_len - page_len_offset - page_len_size) { 6155 ctl_set_invalid_field(ctsio, 6156 /*sks_valid*/ 1, 6157 /*command*/ 0, 6158 /*field*/ *len_used + page_len_offset, 6159 /*bit_valid*/ 0, 6160 /*bit*/ 0); 6161 free(ctsio->kern_data_ptr, M_CTL); 6162 ctl_done((union ctl_io *)ctsio); 6163 return (CTL_RETVAL_COMPLETE); 6164 } 6165 if (*len_left < page_index->page_len) { 6166 free(ctsio->kern_data_ptr, M_CTL); 6167 ctl_set_param_len_error(ctsio); 6168 ctl_done((union ctl_io *)ctsio); 6169 return (CTL_RETVAL_COMPLETE); 6170 } 6171 6172 /* 6173 * Run through the mode page, checking to make sure that the bits 6174 * the user changed are actually legal for him to change. 6175 */ 6176 for (i = 0; i < page_index->page_len; i++) { 6177 uint8_t *user_byte, *change_mask, *current_byte; 6178 int bad_bit; 6179 int j; 6180 6181 user_byte = (uint8_t *)page_header + i; 6182 change_mask = page_index->page_data + 6183 (page_index->page_len * CTL_PAGE_CHANGEABLE) + i; 6184 current_byte = page_index->page_data + 6185 (page_index->page_len * CTL_PAGE_CURRENT) + i; 6186 6187 /* 6188 * Check to see whether the user set any bits in this byte 6189 * that he is not allowed to set. 6190 */ 6191 if ((*user_byte & ~(*change_mask)) == 6192 (*current_byte & ~(*change_mask))) 6193 continue; 6194 6195 /* 6196 * Go through bit by bit to determine which one is illegal. 6197 */ 6198 bad_bit = 0; 6199 for (j = 7; j >= 0; j--) { 6200 if ((((1 << i) & ~(*change_mask)) & *user_byte) != 6201 (((1 << i) & ~(*change_mask)) & *current_byte)) { 6202 bad_bit = i; 6203 break; 6204 } 6205 } 6206 ctl_set_invalid_field(ctsio, 6207 /*sks_valid*/ 1, 6208 /*command*/ 0, 6209 /*field*/ *len_used + i, 6210 /*bit_valid*/ 1, 6211 /*bit*/ bad_bit); 6212 free(ctsio->kern_data_ptr, M_CTL); 6213 ctl_done((union ctl_io *)ctsio); 6214 return (CTL_RETVAL_COMPLETE); 6215 } 6216 6217 /* 6218 * Decrement these before we call the page handler, since we may 6219 * end up getting called back one way or another before the handler 6220 * returns to this context. 6221 */ 6222 *len_left -= page_index->page_len; 6223 *len_used += page_index->page_len; 6224 6225 retval = page_index->select_handler(ctsio, page_index, 6226 (uint8_t *)page_header); 6227 6228 /* 6229 * If the page handler returns CTL_RETVAL_QUEUED, then we need to 6230 * wait until this queued command completes to finish processing 6231 * the mode page. If it returns anything other than 6232 * CTL_RETVAL_COMPLETE (e.g. CTL_RETVAL_ERROR), then it should have 6233 * already set the sense information, freed the data pointer, and 6234 * completed the io for us. 6235 */ 6236 if (retval != CTL_RETVAL_COMPLETE) 6237 goto bailout_no_done; 6238 6239 /* 6240 * If the initiator sent us more than one page, parse the next one. 6241 */ 6242 if (*len_left > 0) 6243 goto do_next_page; 6244 6245 ctl_set_success(ctsio); 6246 free(ctsio->kern_data_ptr, M_CTL); 6247 ctl_done((union ctl_io *)ctsio); 6248 6249 bailout_no_done: 6250 6251 return (CTL_RETVAL_COMPLETE); 6252 6253 } 6254 6255 int 6256 ctl_mode_select(struct ctl_scsiio *ctsio) 6257 { 6258 struct ctl_lun *lun = CTL_LUN(ctsio); 6259 union ctl_modepage_info *modepage_info; 6260 int bd_len, i, header_size, param_len, pf, rtd, sp; 6261 uint32_t initidx; 6262 6263 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 6264 switch (ctsio->cdb[0]) { 6265 case MODE_SELECT_6: { 6266 struct scsi_mode_select_6 *cdb; 6267 6268 cdb = (struct scsi_mode_select_6 *)ctsio->cdb; 6269 6270 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 6271 rtd = (cdb->byte2 & SMS_RTD) ? 1 : 0; 6272 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 6273 param_len = cdb->length; 6274 header_size = sizeof(struct scsi_mode_header_6); 6275 break; 6276 } 6277 case MODE_SELECT_10: { 6278 struct scsi_mode_select_10 *cdb; 6279 6280 cdb = (struct scsi_mode_select_10 *)ctsio->cdb; 6281 6282 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 6283 rtd = (cdb->byte2 & SMS_RTD) ? 1 : 0; 6284 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 6285 param_len = scsi_2btoul(cdb->length); 6286 header_size = sizeof(struct scsi_mode_header_10); 6287 break; 6288 } 6289 default: 6290 ctl_set_invalid_opcode(ctsio); 6291 ctl_done((union ctl_io *)ctsio); 6292 return (CTL_RETVAL_COMPLETE); 6293 } 6294 6295 if (rtd) { 6296 if (param_len != 0) { 6297 ctl_set_invalid_field(ctsio, /*sks_valid*/ 0, 6298 /*command*/ 1, /*field*/ 0, 6299 /*bit_valid*/ 0, /*bit*/ 0); 6300 ctl_done((union ctl_io *)ctsio); 6301 return (CTL_RETVAL_COMPLETE); 6302 } 6303 6304 /* Revert to defaults. */ 6305 ctl_init_page_index(lun); 6306 mtx_lock(&lun->lun_lock); 6307 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 6308 mtx_unlock(&lun->lun_lock); 6309 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6310 ctl_isc_announce_mode(lun, -1, 6311 lun->mode_pages.index[i].page_code & SMPH_PC_MASK, 6312 lun->mode_pages.index[i].subpage); 6313 } 6314 ctl_set_success(ctsio); 6315 ctl_done((union ctl_io *)ctsio); 6316 return (CTL_RETVAL_COMPLETE); 6317 } 6318 6319 /* 6320 * From SPC-3: 6321 * "A parameter list length of zero indicates that the Data-Out Buffer 6322 * shall be empty. This condition shall not be considered as an error." 6323 */ 6324 if (param_len == 0) { 6325 ctl_set_success(ctsio); 6326 ctl_done((union ctl_io *)ctsio); 6327 return (CTL_RETVAL_COMPLETE); 6328 } 6329 6330 /* 6331 * Since we'll hit this the first time through, prior to 6332 * allocation, we don't need to free a data buffer here. 6333 */ 6334 if (param_len < header_size) { 6335 ctl_set_param_len_error(ctsio); 6336 ctl_done((union ctl_io *)ctsio); 6337 return (CTL_RETVAL_COMPLETE); 6338 } 6339 6340 /* 6341 * Allocate the data buffer and grab the user's data. In theory, 6342 * we shouldn't have to sanity check the parameter list length here 6343 * because the maximum size is 64K. We should be able to malloc 6344 * that much without too many problems. 6345 */ 6346 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 6347 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 6348 ctsio->kern_data_len = param_len; 6349 ctsio->kern_total_len = param_len; 6350 ctsio->kern_rel_offset = 0; 6351 ctsio->kern_sg_entries = 0; 6352 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6353 ctsio->be_move_done = ctl_config_move_done; 6354 ctl_datamove((union ctl_io *)ctsio); 6355 6356 return (CTL_RETVAL_COMPLETE); 6357 } 6358 6359 switch (ctsio->cdb[0]) { 6360 case MODE_SELECT_6: { 6361 struct scsi_mode_header_6 *mh6; 6362 6363 mh6 = (struct scsi_mode_header_6 *)ctsio->kern_data_ptr; 6364 bd_len = mh6->blk_desc_len; 6365 break; 6366 } 6367 case MODE_SELECT_10: { 6368 struct scsi_mode_header_10 *mh10; 6369 6370 mh10 = (struct scsi_mode_header_10 *)ctsio->kern_data_ptr; 6371 bd_len = scsi_2btoul(mh10->blk_desc_len); 6372 break; 6373 } 6374 default: 6375 panic("%s: Invalid CDB type %#x", __func__, ctsio->cdb[0]); 6376 } 6377 6378 if (param_len < (header_size + bd_len)) { 6379 free(ctsio->kern_data_ptr, M_CTL); 6380 ctl_set_param_len_error(ctsio); 6381 ctl_done((union ctl_io *)ctsio); 6382 return (CTL_RETVAL_COMPLETE); 6383 } 6384 6385 /* 6386 * Set the IO_CONT flag, so that if this I/O gets passed to 6387 * ctl_config_write_done(), it'll get passed back to 6388 * ctl_do_mode_select() for further processing, or completion if 6389 * we're all done. 6390 */ 6391 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 6392 ctsio->io_cont = ctl_do_mode_select; 6393 6394 modepage_info = (union ctl_modepage_info *) 6395 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6396 memset(modepage_info, 0, sizeof(*modepage_info)); 6397 modepage_info->header.len_left = param_len - header_size - bd_len; 6398 modepage_info->header.len_used = header_size + bd_len; 6399 6400 return (ctl_do_mode_select((union ctl_io *)ctsio)); 6401 } 6402 6403 int 6404 ctl_mode_sense(struct ctl_scsiio *ctsio) 6405 { 6406 struct ctl_lun *lun = CTL_LUN(ctsio); 6407 int pc, page_code, dbd, llba, subpage; 6408 int alloc_len, page_len, header_len, total_len; 6409 struct scsi_mode_block_descr *block_desc; 6410 struct ctl_page_index *page_index; 6411 6412 dbd = 0; 6413 llba = 0; 6414 block_desc = NULL; 6415 6416 CTL_DEBUG_PRINT(("ctl_mode_sense\n")); 6417 6418 switch (ctsio->cdb[0]) { 6419 case MODE_SENSE_6: { 6420 struct scsi_mode_sense_6 *cdb; 6421 6422 cdb = (struct scsi_mode_sense_6 *)ctsio->cdb; 6423 6424 header_len = sizeof(struct scsi_mode_hdr_6); 6425 if (cdb->byte2 & SMS_DBD) 6426 dbd = 1; 6427 else 6428 header_len += sizeof(struct scsi_mode_block_descr); 6429 6430 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6431 page_code = cdb->page & SMS_PAGE_CODE; 6432 subpage = cdb->subpage; 6433 alloc_len = cdb->length; 6434 break; 6435 } 6436 case MODE_SENSE_10: { 6437 struct scsi_mode_sense_10 *cdb; 6438 6439 cdb = (struct scsi_mode_sense_10 *)ctsio->cdb; 6440 6441 header_len = sizeof(struct scsi_mode_hdr_10); 6442 6443 if (cdb->byte2 & SMS_DBD) 6444 dbd = 1; 6445 else 6446 header_len += sizeof(struct scsi_mode_block_descr); 6447 if (cdb->byte2 & SMS10_LLBAA) 6448 llba = 1; 6449 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6450 page_code = cdb->page & SMS_PAGE_CODE; 6451 subpage = cdb->subpage; 6452 alloc_len = scsi_2btoul(cdb->length); 6453 break; 6454 } 6455 default: 6456 ctl_set_invalid_opcode(ctsio); 6457 ctl_done((union ctl_io *)ctsio); 6458 return (CTL_RETVAL_COMPLETE); 6459 break; /* NOTREACHED */ 6460 } 6461 6462 /* 6463 * We have to make a first pass through to calculate the size of 6464 * the pages that match the user's query. Then we allocate enough 6465 * memory to hold it, and actually copy the data into the buffer. 6466 */ 6467 switch (page_code) { 6468 case SMS_ALL_PAGES_PAGE: { 6469 u_int i; 6470 6471 page_len = 0; 6472 6473 /* 6474 * At the moment, values other than 0 and 0xff here are 6475 * reserved according to SPC-3. 6476 */ 6477 if ((subpage != SMS_SUBPAGE_PAGE_0) 6478 && (subpage != SMS_SUBPAGE_ALL)) { 6479 ctl_set_invalid_field(ctsio, 6480 /*sks_valid*/ 1, 6481 /*command*/ 1, 6482 /*field*/ 3, 6483 /*bit_valid*/ 0, 6484 /*bit*/ 0); 6485 ctl_done((union ctl_io *)ctsio); 6486 return (CTL_RETVAL_COMPLETE); 6487 } 6488 6489 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6490 page_index = &lun->mode_pages.index[i]; 6491 6492 /* Make sure the page is supported for this dev type */ 6493 if (lun->be_lun->lun_type == T_DIRECT && 6494 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6495 continue; 6496 if (lun->be_lun->lun_type == T_PROCESSOR && 6497 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6498 continue; 6499 if (lun->be_lun->lun_type == T_CDROM && 6500 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6501 continue; 6502 6503 /* 6504 * We don't use this subpage if the user didn't 6505 * request all subpages. 6506 */ 6507 if ((page_index->subpage != 0) 6508 && (subpage == SMS_SUBPAGE_PAGE_0)) 6509 continue; 6510 6511 #if 0 6512 printf("found page %#x len %d\n", 6513 page_index->page_code & SMPH_PC_MASK, 6514 page_index->page_len); 6515 #endif 6516 page_len += page_index->page_len; 6517 } 6518 break; 6519 } 6520 default: { 6521 u_int i; 6522 6523 page_len = 0; 6524 6525 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6526 page_index = &lun->mode_pages.index[i]; 6527 6528 /* Make sure the page is supported for this dev type */ 6529 if (lun->be_lun->lun_type == T_DIRECT && 6530 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6531 continue; 6532 if (lun->be_lun->lun_type == T_PROCESSOR && 6533 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6534 continue; 6535 if (lun->be_lun->lun_type == T_CDROM && 6536 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6537 continue; 6538 6539 /* Look for the right page code */ 6540 if ((page_index->page_code & SMPH_PC_MASK) != page_code) 6541 continue; 6542 6543 /* Look for the right subpage or the subpage wildcard*/ 6544 if ((page_index->subpage != subpage) 6545 && (subpage != SMS_SUBPAGE_ALL)) 6546 continue; 6547 6548 #if 0 6549 printf("found page %#x len %d\n", 6550 page_index->page_code & SMPH_PC_MASK, 6551 page_index->page_len); 6552 #endif 6553 6554 page_len += page_index->page_len; 6555 } 6556 6557 if (page_len == 0) { 6558 ctl_set_invalid_field(ctsio, 6559 /*sks_valid*/ 1, 6560 /*command*/ 1, 6561 /*field*/ 2, 6562 /*bit_valid*/ 1, 6563 /*bit*/ 5); 6564 ctl_done((union ctl_io *)ctsio); 6565 return (CTL_RETVAL_COMPLETE); 6566 } 6567 break; 6568 } 6569 } 6570 6571 total_len = header_len + page_len; 6572 #if 0 6573 printf("header_len = %d, page_len = %d, total_len = %d\n", 6574 header_len, page_len, total_len); 6575 #endif 6576 6577 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6578 ctsio->kern_sg_entries = 0; 6579 ctsio->kern_rel_offset = 0; 6580 ctsio->kern_data_len = min(total_len, alloc_len); 6581 ctsio->kern_total_len = ctsio->kern_data_len; 6582 6583 switch (ctsio->cdb[0]) { 6584 case MODE_SENSE_6: { 6585 struct scsi_mode_hdr_6 *header; 6586 6587 header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr; 6588 6589 header->datalen = MIN(total_len - 1, 254); 6590 if (lun->be_lun->lun_type == T_DIRECT) { 6591 header->dev_specific = 0x10; /* DPOFUA */ 6592 if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || 6593 (lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) 6594 header->dev_specific |= 0x80; /* WP */ 6595 } 6596 if (dbd) 6597 header->block_descr_len = 0; 6598 else 6599 header->block_descr_len = 6600 sizeof(struct scsi_mode_block_descr); 6601 block_desc = (struct scsi_mode_block_descr *)&header[1]; 6602 break; 6603 } 6604 case MODE_SENSE_10: { 6605 struct scsi_mode_hdr_10 *header; 6606 int datalen; 6607 6608 header = (struct scsi_mode_hdr_10 *)ctsio->kern_data_ptr; 6609 6610 datalen = MIN(total_len - 2, 65533); 6611 scsi_ulto2b(datalen, header->datalen); 6612 if (lun->be_lun->lun_type == T_DIRECT) { 6613 header->dev_specific = 0x10; /* DPOFUA */ 6614 if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || 6615 (lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) 6616 header->dev_specific |= 0x80; /* WP */ 6617 } 6618 if (dbd) 6619 scsi_ulto2b(0, header->block_descr_len); 6620 else 6621 scsi_ulto2b(sizeof(struct scsi_mode_block_descr), 6622 header->block_descr_len); 6623 block_desc = (struct scsi_mode_block_descr *)&header[1]; 6624 break; 6625 } 6626 default: 6627 panic("%s: Invalid CDB type %#x", __func__, ctsio->cdb[0]); 6628 } 6629 6630 /* 6631 * If we've got a disk, use its blocksize in the block 6632 * descriptor. Otherwise, just set it to 0. 6633 */ 6634 if (dbd == 0) { 6635 if (lun->be_lun->lun_type == T_DIRECT) 6636 scsi_ulto3b(lun->be_lun->blocksize, 6637 block_desc->block_len); 6638 else 6639 scsi_ulto3b(0, block_desc->block_len); 6640 } 6641 6642 switch (page_code) { 6643 case SMS_ALL_PAGES_PAGE: { 6644 int i, data_used; 6645 6646 data_used = header_len; 6647 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6648 struct ctl_page_index *page_index; 6649 6650 page_index = &lun->mode_pages.index[i]; 6651 if (lun->be_lun->lun_type == T_DIRECT && 6652 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6653 continue; 6654 if (lun->be_lun->lun_type == T_PROCESSOR && 6655 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6656 continue; 6657 if (lun->be_lun->lun_type == T_CDROM && 6658 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6659 continue; 6660 6661 /* 6662 * We don't use this subpage if the user didn't 6663 * request all subpages. We already checked (above) 6664 * to make sure the user only specified a subpage 6665 * of 0 or 0xff in the SMS_ALL_PAGES_PAGE case. 6666 */ 6667 if ((page_index->subpage != 0) 6668 && (subpage == SMS_SUBPAGE_PAGE_0)) 6669 continue; 6670 6671 /* 6672 * Call the handler, if it exists, to update the 6673 * page to the latest values. 6674 */ 6675 if (page_index->sense_handler != NULL) 6676 page_index->sense_handler(ctsio, page_index,pc); 6677 6678 memcpy(ctsio->kern_data_ptr + data_used, 6679 page_index->page_data + 6680 (page_index->page_len * pc), 6681 page_index->page_len); 6682 data_used += page_index->page_len; 6683 } 6684 break; 6685 } 6686 default: { 6687 int i, data_used; 6688 6689 data_used = header_len; 6690 6691 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6692 struct ctl_page_index *page_index; 6693 6694 page_index = &lun->mode_pages.index[i]; 6695 6696 /* Look for the right page code */ 6697 if ((page_index->page_code & SMPH_PC_MASK) != page_code) 6698 continue; 6699 6700 /* Look for the right subpage or the subpage wildcard*/ 6701 if ((page_index->subpage != subpage) 6702 && (subpage != SMS_SUBPAGE_ALL)) 6703 continue; 6704 6705 /* Make sure the page is supported for this dev type */ 6706 if (lun->be_lun->lun_type == T_DIRECT && 6707 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6708 continue; 6709 if (lun->be_lun->lun_type == T_PROCESSOR && 6710 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6711 continue; 6712 if (lun->be_lun->lun_type == T_CDROM && 6713 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6714 continue; 6715 6716 /* 6717 * Call the handler, if it exists, to update the 6718 * page to the latest values. 6719 */ 6720 if (page_index->sense_handler != NULL) 6721 page_index->sense_handler(ctsio, page_index,pc); 6722 6723 memcpy(ctsio->kern_data_ptr + data_used, 6724 page_index->page_data + 6725 (page_index->page_len * pc), 6726 page_index->page_len); 6727 data_used += page_index->page_len; 6728 } 6729 break; 6730 } 6731 } 6732 6733 ctl_set_success(ctsio); 6734 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6735 ctsio->be_move_done = ctl_config_move_done; 6736 ctl_datamove((union ctl_io *)ctsio); 6737 return (CTL_RETVAL_COMPLETE); 6738 } 6739 6740 int 6741 ctl_lbp_log_sense_handler(struct ctl_scsiio *ctsio, 6742 struct ctl_page_index *page_index, 6743 int pc) 6744 { 6745 struct ctl_lun *lun = CTL_LUN(ctsio); 6746 struct scsi_log_param_header *phdr; 6747 uint8_t *data; 6748 uint64_t val; 6749 6750 data = page_index->page_data; 6751 6752 if (lun->backend->lun_attr != NULL && 6753 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksavail")) 6754 != UINT64_MAX) { 6755 phdr = (struct scsi_log_param_header *)data; 6756 scsi_ulto2b(0x0001, phdr->param_code); 6757 phdr->param_control = SLP_LBIN | SLP_LP; 6758 phdr->param_len = 8; 6759 data = (uint8_t *)(phdr + 1); 6760 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6761 data[4] = 0x02; /* per-pool */ 6762 data += phdr->param_len; 6763 } 6764 6765 if (lun->backend->lun_attr != NULL && 6766 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksused")) 6767 != UINT64_MAX) { 6768 phdr = (struct scsi_log_param_header *)data; 6769 scsi_ulto2b(0x0002, phdr->param_code); 6770 phdr->param_control = SLP_LBIN | SLP_LP; 6771 phdr->param_len = 8; 6772 data = (uint8_t *)(phdr + 1); 6773 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6774 data[4] = 0x01; /* per-LUN */ 6775 data += phdr->param_len; 6776 } 6777 6778 if (lun->backend->lun_attr != NULL && 6779 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksavail")) 6780 != UINT64_MAX) { 6781 phdr = (struct scsi_log_param_header *)data; 6782 scsi_ulto2b(0x00f1, phdr->param_code); 6783 phdr->param_control = SLP_LBIN | SLP_LP; 6784 phdr->param_len = 8; 6785 data = (uint8_t *)(phdr + 1); 6786 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6787 data[4] = 0x02; /* per-pool */ 6788 data += phdr->param_len; 6789 } 6790 6791 if (lun->backend->lun_attr != NULL && 6792 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksused")) 6793 != UINT64_MAX) { 6794 phdr = (struct scsi_log_param_header *)data; 6795 scsi_ulto2b(0x00f2, phdr->param_code); 6796 phdr->param_control = SLP_LBIN | SLP_LP; 6797 phdr->param_len = 8; 6798 data = (uint8_t *)(phdr + 1); 6799 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6800 data[4] = 0x02; /* per-pool */ 6801 data += phdr->param_len; 6802 } 6803 6804 page_index->page_len = data - page_index->page_data; 6805 return (0); 6806 } 6807 6808 int 6809 ctl_sap_log_sense_handler(struct ctl_scsiio *ctsio, 6810 struct ctl_page_index *page_index, 6811 int pc) 6812 { 6813 struct ctl_lun *lun = CTL_LUN(ctsio); 6814 struct stat_page *data; 6815 struct bintime *t; 6816 6817 data = (struct stat_page *)page_index->page_data; 6818 6819 scsi_ulto2b(SLP_SAP, data->sap.hdr.param_code); 6820 data->sap.hdr.param_control = SLP_LBIN; 6821 data->sap.hdr.param_len = sizeof(struct scsi_log_stat_and_perf) - 6822 sizeof(struct scsi_log_param_header); 6823 scsi_u64to8b(lun->stats.operations[CTL_STATS_READ], 6824 data->sap.read_num); 6825 scsi_u64to8b(lun->stats.operations[CTL_STATS_WRITE], 6826 data->sap.write_num); 6827 if (lun->be_lun->blocksize > 0) { 6828 scsi_u64to8b(lun->stats.bytes[CTL_STATS_WRITE] / 6829 lun->be_lun->blocksize, data->sap.recvieved_lba); 6830 scsi_u64to8b(lun->stats.bytes[CTL_STATS_READ] / 6831 lun->be_lun->blocksize, data->sap.transmitted_lba); 6832 } 6833 t = &lun->stats.time[CTL_STATS_READ]; 6834 scsi_u64to8b((uint64_t)t->sec * 1000 + t->frac / (UINT64_MAX / 1000), 6835 data->sap.read_int); 6836 t = &lun->stats.time[CTL_STATS_WRITE]; 6837 scsi_u64to8b((uint64_t)t->sec * 1000 + t->frac / (UINT64_MAX / 1000), 6838 data->sap.write_int); 6839 scsi_u64to8b(0, data->sap.weighted_num); 6840 scsi_u64to8b(0, data->sap.weighted_int); 6841 scsi_ulto2b(SLP_IT, data->it.hdr.param_code); 6842 data->it.hdr.param_control = SLP_LBIN; 6843 data->it.hdr.param_len = sizeof(struct scsi_log_idle_time) - 6844 sizeof(struct scsi_log_param_header); 6845 #ifdef CTL_TIME_IO 6846 scsi_u64to8b(lun->idle_time / SBT_1MS, data->it.idle_int); 6847 #endif 6848 scsi_ulto2b(SLP_TI, data->ti.hdr.param_code); 6849 data->it.hdr.param_control = SLP_LBIN; 6850 data->ti.hdr.param_len = sizeof(struct scsi_log_time_interval) - 6851 sizeof(struct scsi_log_param_header); 6852 scsi_ulto4b(3, data->ti.exponent); 6853 scsi_ulto4b(1, data->ti.integer); 6854 return (0); 6855 } 6856 6857 int 6858 ctl_ie_log_sense_handler(struct ctl_scsiio *ctsio, 6859 struct ctl_page_index *page_index, 6860 int pc) 6861 { 6862 struct ctl_lun *lun = CTL_LUN(ctsio); 6863 struct scsi_log_informational_exceptions *data; 6864 6865 data = (struct scsi_log_informational_exceptions *)page_index->page_data; 6866 6867 scsi_ulto2b(SLP_IE_GEN, data->hdr.param_code); 6868 data->hdr.param_control = SLP_LBIN; 6869 data->hdr.param_len = sizeof(struct scsi_log_informational_exceptions) - 6870 sizeof(struct scsi_log_param_header); 6871 data->ie_asc = lun->ie_asc; 6872 data->ie_ascq = lun->ie_ascq; 6873 data->temperature = 0xff; 6874 return (0); 6875 } 6876 6877 int 6878 ctl_log_sense(struct ctl_scsiio *ctsio) 6879 { 6880 struct ctl_lun *lun = CTL_LUN(ctsio); 6881 int i, pc, page_code, subpage; 6882 int alloc_len, total_len; 6883 struct ctl_page_index *page_index; 6884 struct scsi_log_sense *cdb; 6885 struct scsi_log_header *header; 6886 6887 CTL_DEBUG_PRINT(("ctl_log_sense\n")); 6888 6889 cdb = (struct scsi_log_sense *)ctsio->cdb; 6890 pc = (cdb->page & SLS_PAGE_CTRL_MASK) >> 6; 6891 page_code = cdb->page & SLS_PAGE_CODE; 6892 subpage = cdb->subpage; 6893 alloc_len = scsi_2btoul(cdb->length); 6894 6895 page_index = NULL; 6896 for (i = 0; i < CTL_NUM_LOG_PAGES; i++) { 6897 page_index = &lun->log_pages.index[i]; 6898 6899 /* Look for the right page code */ 6900 if ((page_index->page_code & SL_PAGE_CODE) != page_code) 6901 continue; 6902 6903 /* Look for the right subpage or the subpage wildcard*/ 6904 if (page_index->subpage != subpage) 6905 continue; 6906 6907 break; 6908 } 6909 if (i >= CTL_NUM_LOG_PAGES) { 6910 ctl_set_invalid_field(ctsio, 6911 /*sks_valid*/ 1, 6912 /*command*/ 1, 6913 /*field*/ 2, 6914 /*bit_valid*/ 0, 6915 /*bit*/ 0); 6916 ctl_done((union ctl_io *)ctsio); 6917 return (CTL_RETVAL_COMPLETE); 6918 } 6919 6920 total_len = sizeof(struct scsi_log_header) + page_index->page_len; 6921 6922 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6923 ctsio->kern_sg_entries = 0; 6924 ctsio->kern_rel_offset = 0; 6925 ctsio->kern_data_len = min(total_len, alloc_len); 6926 ctsio->kern_total_len = ctsio->kern_data_len; 6927 6928 header = (struct scsi_log_header *)ctsio->kern_data_ptr; 6929 header->page = page_index->page_code; 6930 if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING) 6931 header->page |= SL_DS; 6932 if (page_index->subpage) { 6933 header->page |= SL_SPF; 6934 header->subpage = page_index->subpage; 6935 } 6936 scsi_ulto2b(page_index->page_len, header->datalen); 6937 6938 /* 6939 * Call the handler, if it exists, to update the 6940 * page to the latest values. 6941 */ 6942 if (page_index->sense_handler != NULL) 6943 page_index->sense_handler(ctsio, page_index, pc); 6944 6945 memcpy(header + 1, page_index->page_data, page_index->page_len); 6946 6947 ctl_set_success(ctsio); 6948 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6949 ctsio->be_move_done = ctl_config_move_done; 6950 ctl_datamove((union ctl_io *)ctsio); 6951 return (CTL_RETVAL_COMPLETE); 6952 } 6953 6954 int 6955 ctl_read_capacity(struct ctl_scsiio *ctsio) 6956 { 6957 struct ctl_lun *lun = CTL_LUN(ctsio); 6958 struct scsi_read_capacity *cdb; 6959 struct scsi_read_capacity_data *data; 6960 uint32_t lba; 6961 6962 CTL_DEBUG_PRINT(("ctl_read_capacity\n")); 6963 6964 cdb = (struct scsi_read_capacity *)ctsio->cdb; 6965 6966 lba = scsi_4btoul(cdb->addr); 6967 if (((cdb->pmi & SRC_PMI) == 0) 6968 && (lba != 0)) { 6969 ctl_set_invalid_field(/*ctsio*/ ctsio, 6970 /*sks_valid*/ 1, 6971 /*command*/ 1, 6972 /*field*/ 2, 6973 /*bit_valid*/ 0, 6974 /*bit*/ 0); 6975 ctl_done((union ctl_io *)ctsio); 6976 return (CTL_RETVAL_COMPLETE); 6977 } 6978 6979 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 6980 data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr; 6981 ctsio->kern_data_len = sizeof(*data); 6982 ctsio->kern_total_len = sizeof(*data); 6983 ctsio->kern_rel_offset = 0; 6984 ctsio->kern_sg_entries = 0; 6985 6986 /* 6987 * If the maximum LBA is greater than 0xfffffffe, the user must 6988 * issue a SERVICE ACTION IN (16) command, with the read capacity 6989 * serivce action set. 6990 */ 6991 if (lun->be_lun->maxlba > 0xfffffffe) 6992 scsi_ulto4b(0xffffffff, data->addr); 6993 else 6994 scsi_ulto4b(lun->be_lun->maxlba, data->addr); 6995 6996 /* 6997 * XXX KDM this may not be 512 bytes... 6998 */ 6999 scsi_ulto4b(lun->be_lun->blocksize, data->length); 7000 7001 ctl_set_success(ctsio); 7002 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7003 ctsio->be_move_done = ctl_config_move_done; 7004 ctl_datamove((union ctl_io *)ctsio); 7005 return (CTL_RETVAL_COMPLETE); 7006 } 7007 7008 int 7009 ctl_read_capacity_16(struct ctl_scsiio *ctsio) 7010 { 7011 struct ctl_lun *lun = CTL_LUN(ctsio); 7012 struct scsi_read_capacity_16 *cdb; 7013 struct scsi_read_capacity_data_long *data; 7014 uint64_t lba; 7015 uint32_t alloc_len; 7016 7017 CTL_DEBUG_PRINT(("ctl_read_capacity_16\n")); 7018 7019 cdb = (struct scsi_read_capacity_16 *)ctsio->cdb; 7020 7021 alloc_len = scsi_4btoul(cdb->alloc_len); 7022 lba = scsi_8btou64(cdb->addr); 7023 7024 if ((cdb->reladr & SRC16_PMI) 7025 && (lba != 0)) { 7026 ctl_set_invalid_field(/*ctsio*/ ctsio, 7027 /*sks_valid*/ 1, 7028 /*command*/ 1, 7029 /*field*/ 2, 7030 /*bit_valid*/ 0, 7031 /*bit*/ 0); 7032 ctl_done((union ctl_io *)ctsio); 7033 return (CTL_RETVAL_COMPLETE); 7034 } 7035 7036 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 7037 data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr; 7038 ctsio->kern_rel_offset = 0; 7039 ctsio->kern_sg_entries = 0; 7040 ctsio->kern_data_len = min(sizeof(*data), alloc_len); 7041 ctsio->kern_total_len = ctsio->kern_data_len; 7042 7043 scsi_u64to8b(lun->be_lun->maxlba, data->addr); 7044 /* XXX KDM this may not be 512 bytes... */ 7045 scsi_ulto4b(lun->be_lun->blocksize, data->length); 7046 data->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE; 7047 scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, data->lalba_lbp); 7048 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) 7049 data->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ; 7050 7051 ctl_set_success(ctsio); 7052 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7053 ctsio->be_move_done = ctl_config_move_done; 7054 ctl_datamove((union ctl_io *)ctsio); 7055 return (CTL_RETVAL_COMPLETE); 7056 } 7057 7058 int 7059 ctl_get_lba_status(struct ctl_scsiio *ctsio) 7060 { 7061 struct ctl_lun *lun = CTL_LUN(ctsio); 7062 struct scsi_get_lba_status *cdb; 7063 struct scsi_get_lba_status_data *data; 7064 struct ctl_lba_len_flags *lbalen; 7065 uint64_t lba; 7066 uint32_t alloc_len, total_len; 7067 int retval; 7068 7069 CTL_DEBUG_PRINT(("ctl_get_lba_status\n")); 7070 7071 cdb = (struct scsi_get_lba_status *)ctsio->cdb; 7072 lba = scsi_8btou64(cdb->addr); 7073 alloc_len = scsi_4btoul(cdb->alloc_len); 7074 7075 if (lba > lun->be_lun->maxlba) { 7076 ctl_set_lba_out_of_range(ctsio, lba); 7077 ctl_done((union ctl_io *)ctsio); 7078 return (CTL_RETVAL_COMPLETE); 7079 } 7080 7081 total_len = sizeof(*data) + sizeof(data->descr[0]); 7082 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7083 data = (struct scsi_get_lba_status_data *)ctsio->kern_data_ptr; 7084 ctsio->kern_rel_offset = 0; 7085 ctsio->kern_sg_entries = 0; 7086 ctsio->kern_data_len = min(total_len, alloc_len); 7087 ctsio->kern_total_len = ctsio->kern_data_len; 7088 7089 /* Fill dummy data in case backend can't tell anything. */ 7090 scsi_ulto4b(4 + sizeof(data->descr[0]), data->length); 7091 scsi_u64to8b(lba, data->descr[0].addr); 7092 scsi_ulto4b(MIN(UINT32_MAX, lun->be_lun->maxlba + 1 - lba), 7093 data->descr[0].length); 7094 data->descr[0].status = 0; /* Mapped or unknown. */ 7095 7096 ctl_set_success(ctsio); 7097 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7098 ctsio->be_move_done = ctl_config_move_done; 7099 7100 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 7101 lbalen->lba = lba; 7102 lbalen->len = total_len; 7103 lbalen->flags = 0; 7104 retval = lun->backend->config_read((union ctl_io *)ctsio); 7105 return (retval); 7106 } 7107 7108 int 7109 ctl_read_defect(struct ctl_scsiio *ctsio) 7110 { 7111 struct scsi_read_defect_data_10 *ccb10; 7112 struct scsi_read_defect_data_12 *ccb12; 7113 struct scsi_read_defect_data_hdr_10 *data10; 7114 struct scsi_read_defect_data_hdr_12 *data12; 7115 uint32_t alloc_len, data_len; 7116 uint8_t format; 7117 7118 CTL_DEBUG_PRINT(("ctl_read_defect\n")); 7119 7120 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 7121 ccb10 = (struct scsi_read_defect_data_10 *)&ctsio->cdb; 7122 format = ccb10->format; 7123 alloc_len = scsi_2btoul(ccb10->alloc_length); 7124 data_len = sizeof(*data10); 7125 } else { 7126 ccb12 = (struct scsi_read_defect_data_12 *)&ctsio->cdb; 7127 format = ccb12->format; 7128 alloc_len = scsi_4btoul(ccb12->alloc_length); 7129 data_len = sizeof(*data12); 7130 } 7131 if (alloc_len == 0) { 7132 ctl_set_success(ctsio); 7133 ctl_done((union ctl_io *)ctsio); 7134 return (CTL_RETVAL_COMPLETE); 7135 } 7136 7137 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 7138 ctsio->kern_rel_offset = 0; 7139 ctsio->kern_sg_entries = 0; 7140 ctsio->kern_data_len = min(data_len, alloc_len); 7141 ctsio->kern_total_len = ctsio->kern_data_len; 7142 7143 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 7144 data10 = (struct scsi_read_defect_data_hdr_10 *) 7145 ctsio->kern_data_ptr; 7146 data10->format = format; 7147 scsi_ulto2b(0, data10->length); 7148 } else { 7149 data12 = (struct scsi_read_defect_data_hdr_12 *) 7150 ctsio->kern_data_ptr; 7151 data12->format = format; 7152 scsi_ulto2b(0, data12->generation); 7153 scsi_ulto4b(0, data12->length); 7154 } 7155 7156 ctl_set_success(ctsio); 7157 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7158 ctsio->be_move_done = ctl_config_move_done; 7159 ctl_datamove((union ctl_io *)ctsio); 7160 return (CTL_RETVAL_COMPLETE); 7161 } 7162 7163 int 7164 ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio) 7165 { 7166 struct ctl_softc *softc = CTL_SOFTC(ctsio); 7167 struct ctl_lun *lun = CTL_LUN(ctsio); 7168 struct scsi_maintenance_in *cdb; 7169 int retval; 7170 int alloc_len, ext, total_len = 0, g, pc, pg, ts, os; 7171 int num_ha_groups, num_target_ports, shared_group; 7172 struct ctl_port *port; 7173 struct scsi_target_group_data *rtg_ptr; 7174 struct scsi_target_group_data_extended *rtg_ext_ptr; 7175 struct scsi_target_port_group_descriptor *tpg_desc; 7176 7177 CTL_DEBUG_PRINT(("ctl_report_tagret_port_groups\n")); 7178 7179 cdb = (struct scsi_maintenance_in *)ctsio->cdb; 7180 retval = CTL_RETVAL_COMPLETE; 7181 7182 switch (cdb->byte2 & STG_PDF_MASK) { 7183 case STG_PDF_LENGTH: 7184 ext = 0; 7185 break; 7186 case STG_PDF_EXTENDED: 7187 ext = 1; 7188 break; 7189 default: 7190 ctl_set_invalid_field(/*ctsio*/ ctsio, 7191 /*sks_valid*/ 1, 7192 /*command*/ 1, 7193 /*field*/ 2, 7194 /*bit_valid*/ 1, 7195 /*bit*/ 5); 7196 ctl_done((union ctl_io *)ctsio); 7197 return(retval); 7198 } 7199 7200 num_target_ports = 0; 7201 shared_group = (softc->is_single != 0); 7202 mtx_lock(&softc->ctl_lock); 7203 STAILQ_FOREACH(port, &softc->port_list, links) { 7204 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7205 continue; 7206 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 7207 continue; 7208 num_target_ports++; 7209 if (port->status & CTL_PORT_STATUS_HA_SHARED) 7210 shared_group = 1; 7211 } 7212 mtx_unlock(&softc->ctl_lock); 7213 num_ha_groups = (softc->is_single) ? 0 : NUM_HA_SHELVES; 7214 7215 if (ext) 7216 total_len = sizeof(struct scsi_target_group_data_extended); 7217 else 7218 total_len = sizeof(struct scsi_target_group_data); 7219 total_len += sizeof(struct scsi_target_port_group_descriptor) * 7220 (shared_group + num_ha_groups) + 7221 sizeof(struct scsi_target_port_descriptor) * num_target_ports; 7222 7223 alloc_len = scsi_4btoul(cdb->length); 7224 7225 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7226 ctsio->kern_sg_entries = 0; 7227 ctsio->kern_rel_offset = 0; 7228 ctsio->kern_data_len = min(total_len, alloc_len); 7229 ctsio->kern_total_len = ctsio->kern_data_len; 7230 7231 if (ext) { 7232 rtg_ext_ptr = (struct scsi_target_group_data_extended *) 7233 ctsio->kern_data_ptr; 7234 scsi_ulto4b(total_len - 4, rtg_ext_ptr->length); 7235 rtg_ext_ptr->format_type = 0x10; 7236 rtg_ext_ptr->implicit_transition_time = 0; 7237 tpg_desc = &rtg_ext_ptr->groups[0]; 7238 } else { 7239 rtg_ptr = (struct scsi_target_group_data *) 7240 ctsio->kern_data_ptr; 7241 scsi_ulto4b(total_len - 4, rtg_ptr->length); 7242 tpg_desc = &rtg_ptr->groups[0]; 7243 } 7244 7245 mtx_lock(&softc->ctl_lock); 7246 pg = softc->port_min / softc->port_cnt; 7247 if (lun->flags & (CTL_LUN_PRIMARY_SC | CTL_LUN_PEER_SC_PRIMARY)) { 7248 /* Some shelf is known to be primary. */ 7249 if (softc->ha_link == CTL_HA_LINK_OFFLINE) 7250 os = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE; 7251 else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) 7252 os = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; 7253 else if (softc->ha_mode == CTL_HA_MODE_ACT_STBY) 7254 os = TPG_ASYMMETRIC_ACCESS_STANDBY; 7255 else 7256 os = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 7257 if (lun->flags & CTL_LUN_PRIMARY_SC) { 7258 ts = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7259 } else { 7260 ts = os; 7261 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7262 } 7263 } else { 7264 /* No known primary shelf. */ 7265 if (softc->ha_link == CTL_HA_LINK_OFFLINE) { 7266 ts = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE; 7267 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7268 } else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) { 7269 ts = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; 7270 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7271 } else { 7272 ts = os = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; 7273 } 7274 } 7275 if (shared_group) { 7276 tpg_desc->pref_state = ts; 7277 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP | 7278 TPG_U_SUP | TPG_T_SUP; 7279 scsi_ulto2b(1, tpg_desc->target_port_group); 7280 tpg_desc->status = TPG_IMPLICIT; 7281 pc = 0; 7282 STAILQ_FOREACH(port, &softc->port_list, links) { 7283 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7284 continue; 7285 if (!softc->is_single && 7286 (port->status & CTL_PORT_STATUS_HA_SHARED) == 0) 7287 continue; 7288 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 7289 continue; 7290 scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc]. 7291 relative_target_port_identifier); 7292 pc++; 7293 } 7294 tpg_desc->target_port_count = pc; 7295 tpg_desc = (struct scsi_target_port_group_descriptor *) 7296 &tpg_desc->descriptors[pc]; 7297 } 7298 for (g = 0; g < num_ha_groups; g++) { 7299 tpg_desc->pref_state = (g == pg) ? ts : os; 7300 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP | 7301 TPG_U_SUP | TPG_T_SUP; 7302 scsi_ulto2b(2 + g, tpg_desc->target_port_group); 7303 tpg_desc->status = TPG_IMPLICIT; 7304 pc = 0; 7305 STAILQ_FOREACH(port, &softc->port_list, links) { 7306 if (port->targ_port < g * softc->port_cnt || 7307 port->targ_port >= (g + 1) * softc->port_cnt) 7308 continue; 7309 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7310 continue; 7311 if (port->status & CTL_PORT_STATUS_HA_SHARED) 7312 continue; 7313 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 7314 continue; 7315 scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc]. 7316 relative_target_port_identifier); 7317 pc++; 7318 } 7319 tpg_desc->target_port_count = pc; 7320 tpg_desc = (struct scsi_target_port_group_descriptor *) 7321 &tpg_desc->descriptors[pc]; 7322 } 7323 mtx_unlock(&softc->ctl_lock); 7324 7325 ctl_set_success(ctsio); 7326 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7327 ctsio->be_move_done = ctl_config_move_done; 7328 ctl_datamove((union ctl_io *)ctsio); 7329 return(retval); 7330 } 7331 7332 int 7333 ctl_report_supported_opcodes(struct ctl_scsiio *ctsio) 7334 { 7335 struct ctl_lun *lun = CTL_LUN(ctsio); 7336 struct scsi_report_supported_opcodes *cdb; 7337 const struct ctl_cmd_entry *entry, *sentry; 7338 struct scsi_report_supported_opcodes_all *all; 7339 struct scsi_report_supported_opcodes_descr *descr; 7340 struct scsi_report_supported_opcodes_one *one; 7341 int retval; 7342 int alloc_len, total_len; 7343 int opcode, service_action, i, j, num; 7344 7345 CTL_DEBUG_PRINT(("ctl_report_supported_opcodes\n")); 7346 7347 cdb = (struct scsi_report_supported_opcodes *)ctsio->cdb; 7348 retval = CTL_RETVAL_COMPLETE; 7349 7350 opcode = cdb->requested_opcode; 7351 service_action = scsi_2btoul(cdb->requested_service_action); 7352 switch (cdb->options & RSO_OPTIONS_MASK) { 7353 case RSO_OPTIONS_ALL: 7354 num = 0; 7355 for (i = 0; i < 256; i++) { 7356 entry = &ctl_cmd_table[i]; 7357 if (entry->flags & CTL_CMD_FLAG_SA5) { 7358 for (j = 0; j < 32; j++) { 7359 sentry = &((const struct ctl_cmd_entry *) 7360 entry->execute)[j]; 7361 if (ctl_cmd_applicable( 7362 lun->be_lun->lun_type, sentry)) 7363 num++; 7364 } 7365 } else { 7366 if (ctl_cmd_applicable(lun->be_lun->lun_type, 7367 entry)) 7368 num++; 7369 } 7370 } 7371 total_len = sizeof(struct scsi_report_supported_opcodes_all) + 7372 num * sizeof(struct scsi_report_supported_opcodes_descr); 7373 break; 7374 case RSO_OPTIONS_OC: 7375 if (ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) { 7376 ctl_set_invalid_field(/*ctsio*/ ctsio, 7377 /*sks_valid*/ 1, 7378 /*command*/ 1, 7379 /*field*/ 2, 7380 /*bit_valid*/ 1, 7381 /*bit*/ 2); 7382 ctl_done((union ctl_io *)ctsio); 7383 return (CTL_RETVAL_COMPLETE); 7384 } 7385 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7386 break; 7387 case RSO_OPTIONS_OC_SA: 7388 if ((ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) == 0 || 7389 service_action >= 32) { 7390 ctl_set_invalid_field(/*ctsio*/ ctsio, 7391 /*sks_valid*/ 1, 7392 /*command*/ 1, 7393 /*field*/ 2, 7394 /*bit_valid*/ 1, 7395 /*bit*/ 2); 7396 ctl_done((union ctl_io *)ctsio); 7397 return (CTL_RETVAL_COMPLETE); 7398 } 7399 /* FALLTHROUGH */ 7400 case RSO_OPTIONS_OC_ASA: 7401 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7402 break; 7403 default: 7404 ctl_set_invalid_field(/*ctsio*/ ctsio, 7405 /*sks_valid*/ 1, 7406 /*command*/ 1, 7407 /*field*/ 2, 7408 /*bit_valid*/ 1, 7409 /*bit*/ 2); 7410 ctl_done((union ctl_io *)ctsio); 7411 return (CTL_RETVAL_COMPLETE); 7412 } 7413 7414 alloc_len = scsi_4btoul(cdb->length); 7415 7416 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7417 ctsio->kern_sg_entries = 0; 7418 ctsio->kern_rel_offset = 0; 7419 ctsio->kern_data_len = min(total_len, alloc_len); 7420 ctsio->kern_total_len = ctsio->kern_data_len; 7421 7422 switch (cdb->options & RSO_OPTIONS_MASK) { 7423 case RSO_OPTIONS_ALL: 7424 all = (struct scsi_report_supported_opcodes_all *) 7425 ctsio->kern_data_ptr; 7426 num = 0; 7427 for (i = 0; i < 256; i++) { 7428 entry = &ctl_cmd_table[i]; 7429 if (entry->flags & CTL_CMD_FLAG_SA5) { 7430 for (j = 0; j < 32; j++) { 7431 sentry = &((const struct ctl_cmd_entry *) 7432 entry->execute)[j]; 7433 if (!ctl_cmd_applicable( 7434 lun->be_lun->lun_type, sentry)) 7435 continue; 7436 descr = &all->descr[num++]; 7437 descr->opcode = i; 7438 scsi_ulto2b(j, descr->service_action); 7439 descr->flags = RSO_SERVACTV; 7440 scsi_ulto2b(sentry->length, 7441 descr->cdb_length); 7442 } 7443 } else { 7444 if (!ctl_cmd_applicable(lun->be_lun->lun_type, 7445 entry)) 7446 continue; 7447 descr = &all->descr[num++]; 7448 descr->opcode = i; 7449 scsi_ulto2b(0, descr->service_action); 7450 descr->flags = 0; 7451 scsi_ulto2b(entry->length, descr->cdb_length); 7452 } 7453 } 7454 scsi_ulto4b( 7455 num * sizeof(struct scsi_report_supported_opcodes_descr), 7456 all->length); 7457 break; 7458 case RSO_OPTIONS_OC: 7459 one = (struct scsi_report_supported_opcodes_one *) 7460 ctsio->kern_data_ptr; 7461 entry = &ctl_cmd_table[opcode]; 7462 goto fill_one; 7463 case RSO_OPTIONS_OC_SA: 7464 one = (struct scsi_report_supported_opcodes_one *) 7465 ctsio->kern_data_ptr; 7466 entry = &ctl_cmd_table[opcode]; 7467 entry = &((const struct ctl_cmd_entry *) 7468 entry->execute)[service_action]; 7469 fill_one: 7470 if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 7471 one->support = 3; 7472 scsi_ulto2b(entry->length, one->cdb_length); 7473 one->cdb_usage[0] = opcode; 7474 memcpy(&one->cdb_usage[1], entry->usage, 7475 entry->length - 1); 7476 } else 7477 one->support = 1; 7478 break; 7479 case RSO_OPTIONS_OC_ASA: 7480 one = (struct scsi_report_supported_opcodes_one *) 7481 ctsio->kern_data_ptr; 7482 entry = &ctl_cmd_table[opcode]; 7483 if (entry->flags & CTL_CMD_FLAG_SA5) { 7484 entry = &((const struct ctl_cmd_entry *) 7485 entry->execute)[service_action]; 7486 } else if (service_action != 0) { 7487 one->support = 1; 7488 break; 7489 } 7490 goto fill_one; 7491 } 7492 7493 ctl_set_success(ctsio); 7494 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7495 ctsio->be_move_done = ctl_config_move_done; 7496 ctl_datamove((union ctl_io *)ctsio); 7497 return(retval); 7498 } 7499 7500 int 7501 ctl_report_supported_tmf(struct ctl_scsiio *ctsio) 7502 { 7503 struct scsi_report_supported_tmf *cdb; 7504 struct scsi_report_supported_tmf_ext_data *data; 7505 int retval; 7506 int alloc_len, total_len; 7507 7508 CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n")); 7509 7510 cdb = (struct scsi_report_supported_tmf *)ctsio->cdb; 7511 7512 retval = CTL_RETVAL_COMPLETE; 7513 7514 if (cdb->options & RST_REPD) 7515 total_len = sizeof(struct scsi_report_supported_tmf_ext_data); 7516 else 7517 total_len = sizeof(struct scsi_report_supported_tmf_data); 7518 alloc_len = scsi_4btoul(cdb->length); 7519 7520 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7521 ctsio->kern_sg_entries = 0; 7522 ctsio->kern_rel_offset = 0; 7523 ctsio->kern_data_len = min(total_len, alloc_len); 7524 ctsio->kern_total_len = ctsio->kern_data_len; 7525 7526 data = (struct scsi_report_supported_tmf_ext_data *)ctsio->kern_data_ptr; 7527 data->byte1 |= RST_ATS | RST_ATSS | RST_CTSS | RST_LURS | RST_QTS | 7528 RST_TRS; 7529 data->byte2 |= RST_QAES | RST_QTSS | RST_ITNRS; 7530 data->length = total_len - 4; 7531 7532 ctl_set_success(ctsio); 7533 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7534 ctsio->be_move_done = ctl_config_move_done; 7535 ctl_datamove((union ctl_io *)ctsio); 7536 return (retval); 7537 } 7538 7539 int 7540 ctl_report_timestamp(struct ctl_scsiio *ctsio) 7541 { 7542 struct scsi_report_timestamp *cdb; 7543 struct scsi_report_timestamp_data *data; 7544 struct timeval tv; 7545 int64_t timestamp; 7546 int retval; 7547 int alloc_len, total_len; 7548 7549 CTL_DEBUG_PRINT(("ctl_report_timestamp\n")); 7550 7551 cdb = (struct scsi_report_timestamp *)ctsio->cdb; 7552 7553 retval = CTL_RETVAL_COMPLETE; 7554 7555 total_len = sizeof(struct scsi_report_timestamp_data); 7556 alloc_len = scsi_4btoul(cdb->length); 7557 7558 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7559 ctsio->kern_sg_entries = 0; 7560 ctsio->kern_rel_offset = 0; 7561 ctsio->kern_data_len = min(total_len, alloc_len); 7562 ctsio->kern_total_len = ctsio->kern_data_len; 7563 7564 data = (struct scsi_report_timestamp_data *)ctsio->kern_data_ptr; 7565 scsi_ulto2b(sizeof(*data) - 2, data->length); 7566 data->origin = RTS_ORIG_OUTSIDE; 7567 getmicrotime(&tv); 7568 timestamp = (int64_t)tv.tv_sec * 1000 + tv.tv_usec / 1000; 7569 scsi_ulto4b(timestamp >> 16, data->timestamp); 7570 scsi_ulto2b(timestamp & 0xffff, &data->timestamp[4]); 7571 7572 ctl_set_success(ctsio); 7573 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7574 ctsio->be_move_done = ctl_config_move_done; 7575 ctl_datamove((union ctl_io *)ctsio); 7576 return (retval); 7577 } 7578 7579 int 7580 ctl_persistent_reserve_in(struct ctl_scsiio *ctsio) 7581 { 7582 struct ctl_softc *softc = CTL_SOFTC(ctsio); 7583 struct ctl_lun *lun = CTL_LUN(ctsio); 7584 struct scsi_per_res_in *cdb; 7585 int alloc_len, total_len = 0; 7586 /* struct scsi_per_res_in_rsrv in_data; */ 7587 uint64_t key; 7588 7589 CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n")); 7590 7591 cdb = (struct scsi_per_res_in *)ctsio->cdb; 7592 7593 alloc_len = scsi_2btoul(cdb->length); 7594 7595 retry: 7596 mtx_lock(&lun->lun_lock); 7597 switch (cdb->action) { 7598 case SPRI_RK: /* read keys */ 7599 total_len = sizeof(struct scsi_per_res_in_keys) + 7600 lun->pr_key_count * 7601 sizeof(struct scsi_per_res_key); 7602 break; 7603 case SPRI_RR: /* read reservation */ 7604 if (lun->flags & CTL_LUN_PR_RESERVED) 7605 total_len = sizeof(struct scsi_per_res_in_rsrv); 7606 else 7607 total_len = sizeof(struct scsi_per_res_in_header); 7608 break; 7609 case SPRI_RC: /* report capabilities */ 7610 total_len = sizeof(struct scsi_per_res_cap); 7611 break; 7612 case SPRI_RS: /* read full status */ 7613 total_len = sizeof(struct scsi_per_res_in_header) + 7614 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7615 lun->pr_key_count; 7616 break; 7617 default: 7618 panic("%s: Invalid PR type %#x", __func__, cdb->action); 7619 } 7620 mtx_unlock(&lun->lun_lock); 7621 7622 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7623 ctsio->kern_rel_offset = 0; 7624 ctsio->kern_sg_entries = 0; 7625 ctsio->kern_data_len = min(total_len, alloc_len); 7626 ctsio->kern_total_len = ctsio->kern_data_len; 7627 7628 mtx_lock(&lun->lun_lock); 7629 switch (cdb->action) { 7630 case SPRI_RK: { // read keys 7631 struct scsi_per_res_in_keys *res_keys; 7632 int i, key_count; 7633 7634 res_keys = (struct scsi_per_res_in_keys*)ctsio->kern_data_ptr; 7635 7636 /* 7637 * We had to drop the lock to allocate our buffer, which 7638 * leaves time for someone to come in with another 7639 * persistent reservation. (That is unlikely, though, 7640 * since this should be the only persistent reservation 7641 * command active right now.) 7642 */ 7643 if (total_len != (sizeof(struct scsi_per_res_in_keys) + 7644 (lun->pr_key_count * 7645 sizeof(struct scsi_per_res_key)))){ 7646 mtx_unlock(&lun->lun_lock); 7647 free(ctsio->kern_data_ptr, M_CTL); 7648 printf("%s: reservation length changed, retrying\n", 7649 __func__); 7650 goto retry; 7651 } 7652 7653 scsi_ulto4b(lun->pr_generation, res_keys->header.generation); 7654 7655 scsi_ulto4b(sizeof(struct scsi_per_res_key) * 7656 lun->pr_key_count, res_keys->header.length); 7657 7658 for (i = 0, key_count = 0; i < CTL_MAX_INITIATORS; i++) { 7659 if ((key = ctl_get_prkey(lun, i)) == 0) 7660 continue; 7661 7662 /* 7663 * We used lun->pr_key_count to calculate the 7664 * size to allocate. If it turns out the number of 7665 * initiators with the registered flag set is 7666 * larger than that (i.e. they haven't been kept in 7667 * sync), we've got a problem. 7668 */ 7669 if (key_count >= lun->pr_key_count) { 7670 key_count++; 7671 continue; 7672 } 7673 scsi_u64to8b(key, res_keys->keys[key_count].key); 7674 key_count++; 7675 } 7676 break; 7677 } 7678 case SPRI_RR: { // read reservation 7679 struct scsi_per_res_in_rsrv *res; 7680 int tmp_len, header_only; 7681 7682 res = (struct scsi_per_res_in_rsrv *)ctsio->kern_data_ptr; 7683 7684 scsi_ulto4b(lun->pr_generation, res->header.generation); 7685 7686 if (lun->flags & CTL_LUN_PR_RESERVED) 7687 { 7688 tmp_len = sizeof(struct scsi_per_res_in_rsrv); 7689 scsi_ulto4b(sizeof(struct scsi_per_res_in_rsrv_data), 7690 res->header.length); 7691 header_only = 0; 7692 } else { 7693 tmp_len = sizeof(struct scsi_per_res_in_header); 7694 scsi_ulto4b(0, res->header.length); 7695 header_only = 1; 7696 } 7697 7698 /* 7699 * We had to drop the lock to allocate our buffer, which 7700 * leaves time for someone to come in with another 7701 * persistent reservation. (That is unlikely, though, 7702 * since this should be the only persistent reservation 7703 * command active right now.) 7704 */ 7705 if (tmp_len != total_len) { 7706 mtx_unlock(&lun->lun_lock); 7707 free(ctsio->kern_data_ptr, M_CTL); 7708 printf("%s: reservation status changed, retrying\n", 7709 __func__); 7710 goto retry; 7711 } 7712 7713 /* 7714 * No reservation held, so we're done. 7715 */ 7716 if (header_only != 0) 7717 break; 7718 7719 /* 7720 * If the registration is an All Registrants type, the key 7721 * is 0, since it doesn't really matter. 7722 */ 7723 if (lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 7724 scsi_u64to8b(ctl_get_prkey(lun, lun->pr_res_idx), 7725 res->data.reservation); 7726 } 7727 res->data.scopetype = lun->pr_res_type; 7728 break; 7729 } 7730 case SPRI_RC: //report capabilities 7731 { 7732 struct scsi_per_res_cap *res_cap; 7733 uint16_t type_mask; 7734 7735 res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr; 7736 scsi_ulto2b(sizeof(*res_cap), res_cap->length); 7737 res_cap->flags1 = SPRI_CRH; 7738 res_cap->flags2 = SPRI_TMV | SPRI_ALLOW_5; 7739 type_mask = SPRI_TM_WR_EX_AR | 7740 SPRI_TM_EX_AC_RO | 7741 SPRI_TM_WR_EX_RO | 7742 SPRI_TM_EX_AC | 7743 SPRI_TM_WR_EX | 7744 SPRI_TM_EX_AC_AR; 7745 scsi_ulto2b(type_mask, res_cap->type_mask); 7746 break; 7747 } 7748 case SPRI_RS: { // read full status 7749 struct scsi_per_res_in_full *res_status; 7750 struct scsi_per_res_in_full_desc *res_desc; 7751 struct ctl_port *port; 7752 int i, len; 7753 7754 res_status = (struct scsi_per_res_in_full*)ctsio->kern_data_ptr; 7755 7756 /* 7757 * We had to drop the lock to allocate our buffer, which 7758 * leaves time for someone to come in with another 7759 * persistent reservation. (That is unlikely, though, 7760 * since this should be the only persistent reservation 7761 * command active right now.) 7762 */ 7763 if (total_len < (sizeof(struct scsi_per_res_in_header) + 7764 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7765 lun->pr_key_count)){ 7766 mtx_unlock(&lun->lun_lock); 7767 free(ctsio->kern_data_ptr, M_CTL); 7768 printf("%s: reservation length changed, retrying\n", 7769 __func__); 7770 goto retry; 7771 } 7772 7773 scsi_ulto4b(lun->pr_generation, res_status->header.generation); 7774 7775 res_desc = &res_status->desc[0]; 7776 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 7777 if ((key = ctl_get_prkey(lun, i)) == 0) 7778 continue; 7779 7780 scsi_u64to8b(key, res_desc->res_key.key); 7781 if ((lun->flags & CTL_LUN_PR_RESERVED) && 7782 (lun->pr_res_idx == i || 7783 lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS)) { 7784 res_desc->flags = SPRI_FULL_R_HOLDER; 7785 res_desc->scopetype = lun->pr_res_type; 7786 } 7787 scsi_ulto2b(i / CTL_MAX_INIT_PER_PORT, 7788 res_desc->rel_trgt_port_id); 7789 len = 0; 7790 port = softc->ctl_ports[i / CTL_MAX_INIT_PER_PORT]; 7791 if (port != NULL) 7792 len = ctl_create_iid(port, 7793 i % CTL_MAX_INIT_PER_PORT, 7794 res_desc->transport_id); 7795 scsi_ulto4b(len, res_desc->additional_length); 7796 res_desc = (struct scsi_per_res_in_full_desc *) 7797 &res_desc->transport_id[len]; 7798 } 7799 scsi_ulto4b((uint8_t *)res_desc - (uint8_t *)&res_status->desc[0], 7800 res_status->header.length); 7801 break; 7802 } 7803 default: 7804 panic("%s: Invalid PR type %#x", __func__, cdb->action); 7805 } 7806 mtx_unlock(&lun->lun_lock); 7807 7808 ctl_set_success(ctsio); 7809 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7810 ctsio->be_move_done = ctl_config_move_done; 7811 ctl_datamove((union ctl_io *)ctsio); 7812 return (CTL_RETVAL_COMPLETE); 7813 } 7814 7815 /* 7816 * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if 7817 * it should return. 7818 */ 7819 static int 7820 ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key, 7821 uint64_t sa_res_key, uint8_t type, uint32_t residx, 7822 struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb, 7823 struct scsi_per_res_out_parms* param) 7824 { 7825 union ctl_ha_msg persis_io; 7826 int i; 7827 7828 mtx_lock(&lun->lun_lock); 7829 if (sa_res_key == 0) { 7830 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 7831 /* validate scope and type */ 7832 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7833 SPR_LU_SCOPE) { 7834 mtx_unlock(&lun->lun_lock); 7835 ctl_set_invalid_field(/*ctsio*/ ctsio, 7836 /*sks_valid*/ 1, 7837 /*command*/ 1, 7838 /*field*/ 2, 7839 /*bit_valid*/ 1, 7840 /*bit*/ 4); 7841 ctl_done((union ctl_io *)ctsio); 7842 return (1); 7843 } 7844 7845 if (type>8 || type==2 || type==4 || type==0) { 7846 mtx_unlock(&lun->lun_lock); 7847 ctl_set_invalid_field(/*ctsio*/ ctsio, 7848 /*sks_valid*/ 1, 7849 /*command*/ 1, 7850 /*field*/ 2, 7851 /*bit_valid*/ 1, 7852 /*bit*/ 0); 7853 ctl_done((union ctl_io *)ctsio); 7854 return (1); 7855 } 7856 7857 /* 7858 * Unregister everybody else and build UA for 7859 * them 7860 */ 7861 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 7862 if (i == residx || ctl_get_prkey(lun, i) == 0) 7863 continue; 7864 7865 ctl_clr_prkey(lun, i); 7866 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7867 } 7868 lun->pr_key_count = 1; 7869 lun->pr_res_type = type; 7870 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 7871 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 7872 lun->pr_res_idx = residx; 7873 lun->pr_generation++; 7874 mtx_unlock(&lun->lun_lock); 7875 7876 /* send msg to other side */ 7877 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7878 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7879 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7880 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7881 persis_io.pr.pr_info.res_type = type; 7882 memcpy(persis_io.pr.pr_info.sa_res_key, 7883 param->serv_act_res_key, 7884 sizeof(param->serv_act_res_key)); 7885 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 7886 sizeof(persis_io.pr), M_WAITOK); 7887 } else { 7888 /* not all registrants */ 7889 mtx_unlock(&lun->lun_lock); 7890 free(ctsio->kern_data_ptr, M_CTL); 7891 ctl_set_invalid_field(ctsio, 7892 /*sks_valid*/ 1, 7893 /*command*/ 0, 7894 /*field*/ 8, 7895 /*bit_valid*/ 0, 7896 /*bit*/ 0); 7897 ctl_done((union ctl_io *)ctsio); 7898 return (1); 7899 } 7900 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 7901 || !(lun->flags & CTL_LUN_PR_RESERVED)) { 7902 int found = 0; 7903 7904 if (res_key == sa_res_key) { 7905 /* special case */ 7906 /* 7907 * The spec implies this is not good but doesn't 7908 * say what to do. There are two choices either 7909 * generate a res conflict or check condition 7910 * with illegal field in parameter data. Since 7911 * that is what is done when the sa_res_key is 7912 * zero I'll take that approach since this has 7913 * to do with the sa_res_key. 7914 */ 7915 mtx_unlock(&lun->lun_lock); 7916 free(ctsio->kern_data_ptr, M_CTL); 7917 ctl_set_invalid_field(ctsio, 7918 /*sks_valid*/ 1, 7919 /*command*/ 0, 7920 /*field*/ 8, 7921 /*bit_valid*/ 0, 7922 /*bit*/ 0); 7923 ctl_done((union ctl_io *)ctsio); 7924 return (1); 7925 } 7926 7927 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 7928 if (ctl_get_prkey(lun, i) != sa_res_key) 7929 continue; 7930 7931 found = 1; 7932 ctl_clr_prkey(lun, i); 7933 lun->pr_key_count--; 7934 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7935 } 7936 if (!found) { 7937 mtx_unlock(&lun->lun_lock); 7938 free(ctsio->kern_data_ptr, M_CTL); 7939 ctl_set_reservation_conflict(ctsio); 7940 ctl_done((union ctl_io *)ctsio); 7941 return (CTL_RETVAL_COMPLETE); 7942 } 7943 lun->pr_generation++; 7944 mtx_unlock(&lun->lun_lock); 7945 7946 /* send msg to other side */ 7947 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7948 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7949 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7950 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7951 persis_io.pr.pr_info.res_type = type; 7952 memcpy(persis_io.pr.pr_info.sa_res_key, 7953 param->serv_act_res_key, 7954 sizeof(param->serv_act_res_key)); 7955 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 7956 sizeof(persis_io.pr), M_WAITOK); 7957 } else { 7958 /* Reserved but not all registrants */ 7959 /* sa_res_key is res holder */ 7960 if (sa_res_key == ctl_get_prkey(lun, lun->pr_res_idx)) { 7961 /* validate scope and type */ 7962 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7963 SPR_LU_SCOPE) { 7964 mtx_unlock(&lun->lun_lock); 7965 ctl_set_invalid_field(/*ctsio*/ ctsio, 7966 /*sks_valid*/ 1, 7967 /*command*/ 1, 7968 /*field*/ 2, 7969 /*bit_valid*/ 1, 7970 /*bit*/ 4); 7971 ctl_done((union ctl_io *)ctsio); 7972 return (1); 7973 } 7974 7975 if (type>8 || type==2 || type==4 || type==0) { 7976 mtx_unlock(&lun->lun_lock); 7977 ctl_set_invalid_field(/*ctsio*/ ctsio, 7978 /*sks_valid*/ 1, 7979 /*command*/ 1, 7980 /*field*/ 2, 7981 /*bit_valid*/ 1, 7982 /*bit*/ 0); 7983 ctl_done((union ctl_io *)ctsio); 7984 return (1); 7985 } 7986 7987 /* 7988 * Do the following: 7989 * if sa_res_key != res_key remove all 7990 * registrants w/sa_res_key and generate UA 7991 * for these registrants(Registrations 7992 * Preempted) if it wasn't an exclusive 7993 * reservation generate UA(Reservations 7994 * Preempted) for all other registered nexuses 7995 * if the type has changed. Establish the new 7996 * reservation and holder. If res_key and 7997 * sa_res_key are the same do the above 7998 * except don't unregister the res holder. 7999 */ 8000 8001 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 8002 if (i == residx || ctl_get_prkey(lun, i) == 0) 8003 continue; 8004 8005 if (sa_res_key == ctl_get_prkey(lun, i)) { 8006 ctl_clr_prkey(lun, i); 8007 lun->pr_key_count--; 8008 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8009 } else if (type != lun->pr_res_type && 8010 (lun->pr_res_type == SPR_TYPE_WR_EX_RO || 8011 lun->pr_res_type == SPR_TYPE_EX_AC_RO)) { 8012 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8013 } 8014 } 8015 lun->pr_res_type = type; 8016 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 8017 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 8018 lun->pr_res_idx = residx; 8019 else 8020 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8021 lun->pr_generation++; 8022 mtx_unlock(&lun->lun_lock); 8023 8024 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8025 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8026 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8027 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8028 persis_io.pr.pr_info.res_type = type; 8029 memcpy(persis_io.pr.pr_info.sa_res_key, 8030 param->serv_act_res_key, 8031 sizeof(param->serv_act_res_key)); 8032 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8033 sizeof(persis_io.pr), M_WAITOK); 8034 } else { 8035 /* 8036 * sa_res_key is not the res holder just 8037 * remove registrants 8038 */ 8039 int found=0; 8040 8041 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8042 if (sa_res_key != ctl_get_prkey(lun, i)) 8043 continue; 8044 8045 found = 1; 8046 ctl_clr_prkey(lun, i); 8047 lun->pr_key_count--; 8048 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8049 } 8050 8051 if (!found) { 8052 mtx_unlock(&lun->lun_lock); 8053 free(ctsio->kern_data_ptr, M_CTL); 8054 ctl_set_reservation_conflict(ctsio); 8055 ctl_done((union ctl_io *)ctsio); 8056 return (1); 8057 } 8058 lun->pr_generation++; 8059 mtx_unlock(&lun->lun_lock); 8060 8061 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8062 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8063 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8064 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8065 persis_io.pr.pr_info.res_type = type; 8066 memcpy(persis_io.pr.pr_info.sa_res_key, 8067 param->serv_act_res_key, 8068 sizeof(param->serv_act_res_key)); 8069 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8070 sizeof(persis_io.pr), M_WAITOK); 8071 } 8072 } 8073 return (0); 8074 } 8075 8076 static void 8077 ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg) 8078 { 8079 uint64_t sa_res_key; 8080 int i; 8081 8082 sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key); 8083 8084 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 8085 || lun->pr_res_idx == CTL_PR_NO_RESERVATION 8086 || sa_res_key != ctl_get_prkey(lun, lun->pr_res_idx)) { 8087 if (sa_res_key == 0) { 8088 /* 8089 * Unregister everybody else and build UA for 8090 * them 8091 */ 8092 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 8093 if (i == msg->pr.pr_info.residx || 8094 ctl_get_prkey(lun, i) == 0) 8095 continue; 8096 8097 ctl_clr_prkey(lun, i); 8098 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8099 } 8100 8101 lun->pr_key_count = 1; 8102 lun->pr_res_type = msg->pr.pr_info.res_type; 8103 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 8104 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 8105 lun->pr_res_idx = msg->pr.pr_info.residx; 8106 } else { 8107 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8108 if (sa_res_key == ctl_get_prkey(lun, i)) 8109 continue; 8110 8111 ctl_clr_prkey(lun, i); 8112 lun->pr_key_count--; 8113 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8114 } 8115 } 8116 } else { 8117 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8118 if (i == msg->pr.pr_info.residx || 8119 ctl_get_prkey(lun, i) == 0) 8120 continue; 8121 8122 if (sa_res_key == ctl_get_prkey(lun, i)) { 8123 ctl_clr_prkey(lun, i); 8124 lun->pr_key_count--; 8125 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8126 } else if (msg->pr.pr_info.res_type != lun->pr_res_type 8127 && (lun->pr_res_type == SPR_TYPE_WR_EX_RO || 8128 lun->pr_res_type == SPR_TYPE_EX_AC_RO)) { 8129 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8130 } 8131 } 8132 lun->pr_res_type = msg->pr.pr_info.res_type; 8133 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 8134 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 8135 lun->pr_res_idx = msg->pr.pr_info.residx; 8136 else 8137 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8138 } 8139 lun->pr_generation++; 8140 8141 } 8142 8143 8144 int 8145 ctl_persistent_reserve_out(struct ctl_scsiio *ctsio) 8146 { 8147 struct ctl_softc *softc = CTL_SOFTC(ctsio); 8148 struct ctl_lun *lun = CTL_LUN(ctsio); 8149 int retval; 8150 u_int32_t param_len; 8151 struct scsi_per_res_out *cdb; 8152 struct scsi_per_res_out_parms* param; 8153 uint32_t residx; 8154 uint64_t res_key, sa_res_key, key; 8155 uint8_t type; 8156 union ctl_ha_msg persis_io; 8157 int i; 8158 8159 CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n")); 8160 8161 cdb = (struct scsi_per_res_out *)ctsio->cdb; 8162 retval = CTL_RETVAL_COMPLETE; 8163 8164 /* 8165 * We only support whole-LUN scope. The scope & type are ignored for 8166 * register, register and ignore existing key and clear. 8167 * We sometimes ignore scope and type on preempts too!! 8168 * Verify reservation type here as well. 8169 */ 8170 type = cdb->scope_type & SPR_TYPE_MASK; 8171 if ((cdb->action == SPRO_RESERVE) 8172 || (cdb->action == SPRO_RELEASE)) { 8173 if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { 8174 ctl_set_invalid_field(/*ctsio*/ ctsio, 8175 /*sks_valid*/ 1, 8176 /*command*/ 1, 8177 /*field*/ 2, 8178 /*bit_valid*/ 1, 8179 /*bit*/ 4); 8180 ctl_done((union ctl_io *)ctsio); 8181 return (CTL_RETVAL_COMPLETE); 8182 } 8183 8184 if (type>8 || type==2 || type==4 || type==0) { 8185 ctl_set_invalid_field(/*ctsio*/ ctsio, 8186 /*sks_valid*/ 1, 8187 /*command*/ 1, 8188 /*field*/ 2, 8189 /*bit_valid*/ 1, 8190 /*bit*/ 0); 8191 ctl_done((union ctl_io *)ctsio); 8192 return (CTL_RETVAL_COMPLETE); 8193 } 8194 } 8195 8196 param_len = scsi_4btoul(cdb->length); 8197 8198 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 8199 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 8200 ctsio->kern_data_len = param_len; 8201 ctsio->kern_total_len = param_len; 8202 ctsio->kern_rel_offset = 0; 8203 ctsio->kern_sg_entries = 0; 8204 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 8205 ctsio->be_move_done = ctl_config_move_done; 8206 ctl_datamove((union ctl_io *)ctsio); 8207 8208 return (CTL_RETVAL_COMPLETE); 8209 } 8210 8211 param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr; 8212 8213 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 8214 res_key = scsi_8btou64(param->res_key.key); 8215 sa_res_key = scsi_8btou64(param->serv_act_res_key); 8216 8217 /* 8218 * Validate the reservation key here except for SPRO_REG_IGNO 8219 * This must be done for all other service actions 8220 */ 8221 if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) { 8222 mtx_lock(&lun->lun_lock); 8223 if ((key = ctl_get_prkey(lun, residx)) != 0) { 8224 if (res_key != key) { 8225 /* 8226 * The current key passed in doesn't match 8227 * the one the initiator previously 8228 * registered. 8229 */ 8230 mtx_unlock(&lun->lun_lock); 8231 free(ctsio->kern_data_ptr, M_CTL); 8232 ctl_set_reservation_conflict(ctsio); 8233 ctl_done((union ctl_io *)ctsio); 8234 return (CTL_RETVAL_COMPLETE); 8235 } 8236 } else if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REGISTER) { 8237 /* 8238 * We are not registered 8239 */ 8240 mtx_unlock(&lun->lun_lock); 8241 free(ctsio->kern_data_ptr, M_CTL); 8242 ctl_set_reservation_conflict(ctsio); 8243 ctl_done((union ctl_io *)ctsio); 8244 return (CTL_RETVAL_COMPLETE); 8245 } else if (res_key != 0) { 8246 /* 8247 * We are not registered and trying to register but 8248 * the register key isn't zero. 8249 */ 8250 mtx_unlock(&lun->lun_lock); 8251 free(ctsio->kern_data_ptr, M_CTL); 8252 ctl_set_reservation_conflict(ctsio); 8253 ctl_done((union ctl_io *)ctsio); 8254 return (CTL_RETVAL_COMPLETE); 8255 } 8256 mtx_unlock(&lun->lun_lock); 8257 } 8258 8259 switch (cdb->action & SPRO_ACTION_MASK) { 8260 case SPRO_REGISTER: 8261 case SPRO_REG_IGNO: { 8262 8263 #if 0 8264 printf("Registration received\n"); 8265 #endif 8266 8267 /* 8268 * We don't support any of these options, as we report in 8269 * the read capabilities request (see 8270 * ctl_persistent_reserve_in(), above). 8271 */ 8272 if ((param->flags & SPR_SPEC_I_PT) 8273 || (param->flags & SPR_ALL_TG_PT) 8274 || (param->flags & SPR_APTPL)) { 8275 int bit_ptr; 8276 8277 if (param->flags & SPR_APTPL) 8278 bit_ptr = 0; 8279 else if (param->flags & SPR_ALL_TG_PT) 8280 bit_ptr = 2; 8281 else /* SPR_SPEC_I_PT */ 8282 bit_ptr = 3; 8283 8284 free(ctsio->kern_data_ptr, M_CTL); 8285 ctl_set_invalid_field(ctsio, 8286 /*sks_valid*/ 1, 8287 /*command*/ 0, 8288 /*field*/ 20, 8289 /*bit_valid*/ 1, 8290 /*bit*/ bit_ptr); 8291 ctl_done((union ctl_io *)ctsio); 8292 return (CTL_RETVAL_COMPLETE); 8293 } 8294 8295 mtx_lock(&lun->lun_lock); 8296 8297 /* 8298 * The initiator wants to clear the 8299 * key/unregister. 8300 */ 8301 if (sa_res_key == 0) { 8302 if ((res_key == 0 8303 && (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER) 8304 || ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO 8305 && ctl_get_prkey(lun, residx) == 0)) { 8306 mtx_unlock(&lun->lun_lock); 8307 goto done; 8308 } 8309 8310 ctl_clr_prkey(lun, residx); 8311 lun->pr_key_count--; 8312 8313 if (residx == lun->pr_res_idx) { 8314 lun->flags &= ~CTL_LUN_PR_RESERVED; 8315 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8316 8317 if ((lun->pr_res_type == SPR_TYPE_WR_EX_RO || 8318 lun->pr_res_type == SPR_TYPE_EX_AC_RO) && 8319 lun->pr_key_count) { 8320 /* 8321 * If the reservation is a registrants 8322 * only type we need to generate a UA 8323 * for other registered inits. The 8324 * sense code should be RESERVATIONS 8325 * RELEASED 8326 */ 8327 8328 for (i = softc->init_min; i < softc->init_max; i++){ 8329 if (ctl_get_prkey(lun, i) == 0) 8330 continue; 8331 ctl_est_ua(lun, i, 8332 CTL_UA_RES_RELEASE); 8333 } 8334 } 8335 lun->pr_res_type = 0; 8336 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8337 if (lun->pr_key_count==0) { 8338 lun->flags &= ~CTL_LUN_PR_RESERVED; 8339 lun->pr_res_type = 0; 8340 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8341 } 8342 } 8343 lun->pr_generation++; 8344 mtx_unlock(&lun->lun_lock); 8345 8346 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8347 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8348 persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY; 8349 persis_io.pr.pr_info.residx = residx; 8350 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8351 sizeof(persis_io.pr), M_WAITOK); 8352 } else /* sa_res_key != 0 */ { 8353 8354 /* 8355 * If we aren't registered currently then increment 8356 * the key count and set the registered flag. 8357 */ 8358 ctl_alloc_prkey(lun, residx); 8359 if (ctl_get_prkey(lun, residx) == 0) 8360 lun->pr_key_count++; 8361 ctl_set_prkey(lun, residx, sa_res_key); 8362 lun->pr_generation++; 8363 mtx_unlock(&lun->lun_lock); 8364 8365 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8366 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8367 persis_io.pr.pr_info.action = CTL_PR_REG_KEY; 8368 persis_io.pr.pr_info.residx = residx; 8369 memcpy(persis_io.pr.pr_info.sa_res_key, 8370 param->serv_act_res_key, 8371 sizeof(param->serv_act_res_key)); 8372 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8373 sizeof(persis_io.pr), M_WAITOK); 8374 } 8375 8376 break; 8377 } 8378 case SPRO_RESERVE: 8379 #if 0 8380 printf("Reserve executed type %d\n", type); 8381 #endif 8382 mtx_lock(&lun->lun_lock); 8383 if (lun->flags & CTL_LUN_PR_RESERVED) { 8384 /* 8385 * if this isn't the reservation holder and it's 8386 * not a "all registrants" type or if the type is 8387 * different then we have a conflict 8388 */ 8389 if ((lun->pr_res_idx != residx 8390 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) 8391 || lun->pr_res_type != type) { 8392 mtx_unlock(&lun->lun_lock); 8393 free(ctsio->kern_data_ptr, M_CTL); 8394 ctl_set_reservation_conflict(ctsio); 8395 ctl_done((union ctl_io *)ctsio); 8396 return (CTL_RETVAL_COMPLETE); 8397 } 8398 mtx_unlock(&lun->lun_lock); 8399 } else /* create a reservation */ { 8400 /* 8401 * If it's not an "all registrants" type record 8402 * reservation holder 8403 */ 8404 if (type != SPR_TYPE_WR_EX_AR 8405 && type != SPR_TYPE_EX_AC_AR) 8406 lun->pr_res_idx = residx; /* Res holder */ 8407 else 8408 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8409 8410 lun->flags |= CTL_LUN_PR_RESERVED; 8411 lun->pr_res_type = type; 8412 8413 mtx_unlock(&lun->lun_lock); 8414 8415 /* send msg to other side */ 8416 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8417 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8418 persis_io.pr.pr_info.action = CTL_PR_RESERVE; 8419 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8420 persis_io.pr.pr_info.res_type = type; 8421 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8422 sizeof(persis_io.pr), M_WAITOK); 8423 } 8424 break; 8425 8426 case SPRO_RELEASE: 8427 mtx_lock(&lun->lun_lock); 8428 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) { 8429 /* No reservation exists return good status */ 8430 mtx_unlock(&lun->lun_lock); 8431 goto done; 8432 } 8433 /* 8434 * Is this nexus a reservation holder? 8435 */ 8436 if (lun->pr_res_idx != residx 8437 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 8438 /* 8439 * not a res holder return good status but 8440 * do nothing 8441 */ 8442 mtx_unlock(&lun->lun_lock); 8443 goto done; 8444 } 8445 8446 if (lun->pr_res_type != type) { 8447 mtx_unlock(&lun->lun_lock); 8448 free(ctsio->kern_data_ptr, M_CTL); 8449 ctl_set_illegal_pr_release(ctsio); 8450 ctl_done((union ctl_io *)ctsio); 8451 return (CTL_RETVAL_COMPLETE); 8452 } 8453 8454 /* okay to release */ 8455 lun->flags &= ~CTL_LUN_PR_RESERVED; 8456 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8457 lun->pr_res_type = 0; 8458 8459 /* 8460 * If this isn't an exclusive access reservation and NUAR 8461 * is not set, generate UA for all other registrants. 8462 */ 8463 if (type != SPR_TYPE_EX_AC && type != SPR_TYPE_WR_EX && 8464 (lun->MODE_CTRL.queue_flags & SCP_NUAR) == 0) { 8465 for (i = softc->init_min; i < softc->init_max; i++) { 8466 if (i == residx || ctl_get_prkey(lun, i) == 0) 8467 continue; 8468 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8469 } 8470 } 8471 mtx_unlock(&lun->lun_lock); 8472 8473 /* Send msg to other side */ 8474 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8475 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8476 persis_io.pr.pr_info.action = CTL_PR_RELEASE; 8477 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8478 sizeof(persis_io.pr), M_WAITOK); 8479 break; 8480 8481 case SPRO_CLEAR: 8482 /* send msg to other side */ 8483 8484 mtx_lock(&lun->lun_lock); 8485 lun->flags &= ~CTL_LUN_PR_RESERVED; 8486 lun->pr_res_type = 0; 8487 lun->pr_key_count = 0; 8488 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8489 8490 ctl_clr_prkey(lun, residx); 8491 for (i = 0; i < CTL_MAX_INITIATORS; i++) 8492 if (ctl_get_prkey(lun, i) != 0) { 8493 ctl_clr_prkey(lun, i); 8494 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8495 } 8496 lun->pr_generation++; 8497 mtx_unlock(&lun->lun_lock); 8498 8499 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8500 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8501 persis_io.pr.pr_info.action = CTL_PR_CLEAR; 8502 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8503 sizeof(persis_io.pr), M_WAITOK); 8504 break; 8505 8506 case SPRO_PREEMPT: 8507 case SPRO_PRE_ABO: { 8508 int nretval; 8509 8510 nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type, 8511 residx, ctsio, cdb, param); 8512 if (nretval != 0) 8513 return (CTL_RETVAL_COMPLETE); 8514 break; 8515 } 8516 default: 8517 panic("%s: Invalid PR type %#x", __func__, cdb->action); 8518 } 8519 8520 done: 8521 free(ctsio->kern_data_ptr, M_CTL); 8522 ctl_set_success(ctsio); 8523 ctl_done((union ctl_io *)ctsio); 8524 8525 return (retval); 8526 } 8527 8528 /* 8529 * This routine is for handling a message from the other SC pertaining to 8530 * persistent reserve out. All the error checking will have been done 8531 * so only perorming the action need be done here to keep the two 8532 * in sync. 8533 */ 8534 static void 8535 ctl_hndl_per_res_out_on_other_sc(union ctl_io *io) 8536 { 8537 struct ctl_softc *softc = CTL_SOFTC(io); 8538 union ctl_ha_msg *msg = (union ctl_ha_msg *)&io->presio.pr_msg; 8539 struct ctl_lun *lun; 8540 int i; 8541 uint32_t residx, targ_lun; 8542 8543 targ_lun = msg->hdr.nexus.targ_mapped_lun; 8544 mtx_lock(&softc->ctl_lock); 8545 if (targ_lun >= ctl_max_luns || 8546 (lun = softc->ctl_luns[targ_lun]) == NULL) { 8547 mtx_unlock(&softc->ctl_lock); 8548 return; 8549 } 8550 mtx_lock(&lun->lun_lock); 8551 mtx_unlock(&softc->ctl_lock); 8552 if (lun->flags & CTL_LUN_DISABLED) { 8553 mtx_unlock(&lun->lun_lock); 8554 return; 8555 } 8556 residx = ctl_get_initindex(&msg->hdr.nexus); 8557 switch(msg->pr.pr_info.action) { 8558 case CTL_PR_REG_KEY: 8559 ctl_alloc_prkey(lun, msg->pr.pr_info.residx); 8560 if (ctl_get_prkey(lun, msg->pr.pr_info.residx) == 0) 8561 lun->pr_key_count++; 8562 ctl_set_prkey(lun, msg->pr.pr_info.residx, 8563 scsi_8btou64(msg->pr.pr_info.sa_res_key)); 8564 lun->pr_generation++; 8565 break; 8566 8567 case CTL_PR_UNREG_KEY: 8568 ctl_clr_prkey(lun, msg->pr.pr_info.residx); 8569 lun->pr_key_count--; 8570 8571 /* XXX Need to see if the reservation has been released */ 8572 /* if so do we need to generate UA? */ 8573 if (msg->pr.pr_info.residx == lun->pr_res_idx) { 8574 lun->flags &= ~CTL_LUN_PR_RESERVED; 8575 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8576 8577 if ((lun->pr_res_type == SPR_TYPE_WR_EX_RO || 8578 lun->pr_res_type == SPR_TYPE_EX_AC_RO) && 8579 lun->pr_key_count) { 8580 /* 8581 * If the reservation is a registrants 8582 * only type we need to generate a UA 8583 * for other registered inits. The 8584 * sense code should be RESERVATIONS 8585 * RELEASED 8586 */ 8587 8588 for (i = softc->init_min; i < softc->init_max; i++) { 8589 if (ctl_get_prkey(lun, i) == 0) 8590 continue; 8591 8592 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8593 } 8594 } 8595 lun->pr_res_type = 0; 8596 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8597 if (lun->pr_key_count==0) { 8598 lun->flags &= ~CTL_LUN_PR_RESERVED; 8599 lun->pr_res_type = 0; 8600 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8601 } 8602 } 8603 lun->pr_generation++; 8604 break; 8605 8606 case CTL_PR_RESERVE: 8607 lun->flags |= CTL_LUN_PR_RESERVED; 8608 lun->pr_res_type = msg->pr.pr_info.res_type; 8609 lun->pr_res_idx = msg->pr.pr_info.residx; 8610 8611 break; 8612 8613 case CTL_PR_RELEASE: 8614 /* 8615 * If this isn't an exclusive access reservation and NUAR 8616 * is not set, generate UA for all other registrants. 8617 */ 8618 if (lun->pr_res_type != SPR_TYPE_EX_AC && 8619 lun->pr_res_type != SPR_TYPE_WR_EX && 8620 (lun->MODE_CTRL.queue_flags & SCP_NUAR) == 0) { 8621 for (i = softc->init_min; i < softc->init_max; i++) 8622 if (i == residx || ctl_get_prkey(lun, i) == 0) 8623 continue; 8624 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8625 } 8626 8627 lun->flags &= ~CTL_LUN_PR_RESERVED; 8628 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8629 lun->pr_res_type = 0; 8630 break; 8631 8632 case CTL_PR_PREEMPT: 8633 ctl_pro_preempt_other(lun, msg); 8634 break; 8635 case CTL_PR_CLEAR: 8636 lun->flags &= ~CTL_LUN_PR_RESERVED; 8637 lun->pr_res_type = 0; 8638 lun->pr_key_count = 0; 8639 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8640 8641 for (i=0; i < CTL_MAX_INITIATORS; i++) { 8642 if (ctl_get_prkey(lun, i) == 0) 8643 continue; 8644 ctl_clr_prkey(lun, i); 8645 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8646 } 8647 lun->pr_generation++; 8648 break; 8649 } 8650 8651 mtx_unlock(&lun->lun_lock); 8652 } 8653 8654 int 8655 ctl_read_write(struct ctl_scsiio *ctsio) 8656 { 8657 struct ctl_lun *lun = CTL_LUN(ctsio); 8658 struct ctl_lba_len_flags *lbalen; 8659 uint64_t lba; 8660 uint32_t num_blocks; 8661 int flags, retval; 8662 int isread; 8663 8664 CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0])); 8665 8666 flags = 0; 8667 isread = ctsio->cdb[0] == READ_6 || ctsio->cdb[0] == READ_10 8668 || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16; 8669 switch (ctsio->cdb[0]) { 8670 case READ_6: 8671 case WRITE_6: { 8672 struct scsi_rw_6 *cdb; 8673 8674 cdb = (struct scsi_rw_6 *)ctsio->cdb; 8675 8676 lba = scsi_3btoul(cdb->addr); 8677 /* only 5 bits are valid in the most significant address byte */ 8678 lba &= 0x1fffff; 8679 num_blocks = cdb->length; 8680 /* 8681 * This is correct according to SBC-2. 8682 */ 8683 if (num_blocks == 0) 8684 num_blocks = 256; 8685 break; 8686 } 8687 case READ_10: 8688 case WRITE_10: { 8689 struct scsi_rw_10 *cdb; 8690 8691 cdb = (struct scsi_rw_10 *)ctsio->cdb; 8692 if (cdb->byte2 & SRW10_FUA) 8693 flags |= CTL_LLF_FUA; 8694 if (cdb->byte2 & SRW10_DPO) 8695 flags |= CTL_LLF_DPO; 8696 lba = scsi_4btoul(cdb->addr); 8697 num_blocks = scsi_2btoul(cdb->length); 8698 break; 8699 } 8700 case WRITE_VERIFY_10: { 8701 struct scsi_write_verify_10 *cdb; 8702 8703 cdb = (struct scsi_write_verify_10 *)ctsio->cdb; 8704 flags |= CTL_LLF_FUA; 8705 if (cdb->byte2 & SWV_DPO) 8706 flags |= CTL_LLF_DPO; 8707 lba = scsi_4btoul(cdb->addr); 8708 num_blocks = scsi_2btoul(cdb->length); 8709 break; 8710 } 8711 case READ_12: 8712 case WRITE_12: { 8713 struct scsi_rw_12 *cdb; 8714 8715 cdb = (struct scsi_rw_12 *)ctsio->cdb; 8716 if (cdb->byte2 & SRW12_FUA) 8717 flags |= CTL_LLF_FUA; 8718 if (cdb->byte2 & SRW12_DPO) 8719 flags |= CTL_LLF_DPO; 8720 lba = scsi_4btoul(cdb->addr); 8721 num_blocks = scsi_4btoul(cdb->length); 8722 break; 8723 } 8724 case WRITE_VERIFY_12: { 8725 struct scsi_write_verify_12 *cdb; 8726 8727 cdb = (struct scsi_write_verify_12 *)ctsio->cdb; 8728 flags |= CTL_LLF_FUA; 8729 if (cdb->byte2 & SWV_DPO) 8730 flags |= CTL_LLF_DPO; 8731 lba = scsi_4btoul(cdb->addr); 8732 num_blocks = scsi_4btoul(cdb->length); 8733 break; 8734 } 8735 case READ_16: 8736 case WRITE_16: { 8737 struct scsi_rw_16 *cdb; 8738 8739 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8740 if (cdb->byte2 & SRW12_FUA) 8741 flags |= CTL_LLF_FUA; 8742 if (cdb->byte2 & SRW12_DPO) 8743 flags |= CTL_LLF_DPO; 8744 lba = scsi_8btou64(cdb->addr); 8745 num_blocks = scsi_4btoul(cdb->length); 8746 break; 8747 } 8748 case WRITE_ATOMIC_16: { 8749 struct scsi_write_atomic_16 *cdb; 8750 8751 if (lun->be_lun->atomicblock == 0) { 8752 ctl_set_invalid_opcode(ctsio); 8753 ctl_done((union ctl_io *)ctsio); 8754 return (CTL_RETVAL_COMPLETE); 8755 } 8756 8757 cdb = (struct scsi_write_atomic_16 *)ctsio->cdb; 8758 if (cdb->byte2 & SRW12_FUA) 8759 flags |= CTL_LLF_FUA; 8760 if (cdb->byte2 & SRW12_DPO) 8761 flags |= CTL_LLF_DPO; 8762 lba = scsi_8btou64(cdb->addr); 8763 num_blocks = scsi_2btoul(cdb->length); 8764 if (num_blocks > lun->be_lun->atomicblock) { 8765 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 8766 /*command*/ 1, /*field*/ 12, /*bit_valid*/ 0, 8767 /*bit*/ 0); 8768 ctl_done((union ctl_io *)ctsio); 8769 return (CTL_RETVAL_COMPLETE); 8770 } 8771 break; 8772 } 8773 case WRITE_VERIFY_16: { 8774 struct scsi_write_verify_16 *cdb; 8775 8776 cdb = (struct scsi_write_verify_16 *)ctsio->cdb; 8777 flags |= CTL_LLF_FUA; 8778 if (cdb->byte2 & SWV_DPO) 8779 flags |= CTL_LLF_DPO; 8780 lba = scsi_8btou64(cdb->addr); 8781 num_blocks = scsi_4btoul(cdb->length); 8782 break; 8783 } 8784 default: 8785 /* 8786 * We got a command we don't support. This shouldn't 8787 * happen, commands should be filtered out above us. 8788 */ 8789 ctl_set_invalid_opcode(ctsio); 8790 ctl_done((union ctl_io *)ctsio); 8791 8792 return (CTL_RETVAL_COMPLETE); 8793 break; /* NOTREACHED */ 8794 } 8795 8796 /* 8797 * The first check is to make sure we're in bounds, the second 8798 * check is to catch wrap-around problems. If the lba + num blocks 8799 * is less than the lba, then we've wrapped around and the block 8800 * range is invalid anyway. 8801 */ 8802 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8803 || ((lba + num_blocks) < lba)) { 8804 ctl_set_lba_out_of_range(ctsio, 8805 MAX(lba, lun->be_lun->maxlba + 1)); 8806 ctl_done((union ctl_io *)ctsio); 8807 return (CTL_RETVAL_COMPLETE); 8808 } 8809 8810 /* 8811 * According to SBC-3, a transfer length of 0 is not an error. 8812 * Note that this cannot happen with WRITE(6) or READ(6), since 0 8813 * translates to 256 blocks for those commands. 8814 */ 8815 if (num_blocks == 0) { 8816 ctl_set_success(ctsio); 8817 ctl_done((union ctl_io *)ctsio); 8818 return (CTL_RETVAL_COMPLETE); 8819 } 8820 8821 /* Set FUA and/or DPO if caches are disabled. */ 8822 if (isread) { 8823 if ((lun->MODE_CACHING.flags1 & SCP_RCD) != 0) 8824 flags |= CTL_LLF_FUA | CTL_LLF_DPO; 8825 } else { 8826 if ((lun->MODE_CACHING.flags1 & SCP_WCE) == 0) 8827 flags |= CTL_LLF_FUA; 8828 } 8829 8830 lbalen = (struct ctl_lba_len_flags *) 8831 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8832 lbalen->lba = lba; 8833 lbalen->len = num_blocks; 8834 lbalen->flags = (isread ? CTL_LLF_READ : CTL_LLF_WRITE) | flags; 8835 8836 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 8837 ctsio->kern_rel_offset = 0; 8838 8839 CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n")); 8840 8841 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8842 return (retval); 8843 } 8844 8845 static int 8846 ctl_cnw_cont(union ctl_io *io) 8847 { 8848 struct ctl_lun *lun = CTL_LUN(io); 8849 struct ctl_scsiio *ctsio; 8850 struct ctl_lba_len_flags *lbalen; 8851 int retval; 8852 8853 ctsio = &io->scsiio; 8854 ctsio->io_hdr.status = CTL_STATUS_NONE; 8855 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT; 8856 lbalen = (struct ctl_lba_len_flags *) 8857 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8858 lbalen->flags &= ~CTL_LLF_COMPARE; 8859 lbalen->flags |= CTL_LLF_WRITE; 8860 8861 CTL_DEBUG_PRINT(("ctl_cnw_cont: calling data_submit()\n")); 8862 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8863 return (retval); 8864 } 8865 8866 int 8867 ctl_cnw(struct ctl_scsiio *ctsio) 8868 { 8869 struct ctl_lun *lun = CTL_LUN(ctsio); 8870 struct ctl_lba_len_flags *lbalen; 8871 uint64_t lba; 8872 uint32_t num_blocks; 8873 int flags, retval; 8874 8875 CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0])); 8876 8877 flags = 0; 8878 switch (ctsio->cdb[0]) { 8879 case COMPARE_AND_WRITE: { 8880 struct scsi_compare_and_write *cdb; 8881 8882 cdb = (struct scsi_compare_and_write *)ctsio->cdb; 8883 if (cdb->byte2 & SRW10_FUA) 8884 flags |= CTL_LLF_FUA; 8885 if (cdb->byte2 & SRW10_DPO) 8886 flags |= CTL_LLF_DPO; 8887 lba = scsi_8btou64(cdb->addr); 8888 num_blocks = cdb->length; 8889 break; 8890 } 8891 default: 8892 /* 8893 * We got a command we don't support. This shouldn't 8894 * happen, commands should be filtered out above us. 8895 */ 8896 ctl_set_invalid_opcode(ctsio); 8897 ctl_done((union ctl_io *)ctsio); 8898 8899 return (CTL_RETVAL_COMPLETE); 8900 break; /* NOTREACHED */ 8901 } 8902 8903 /* 8904 * The first check is to make sure we're in bounds, the second 8905 * check is to catch wrap-around problems. If the lba + num blocks 8906 * is less than the lba, then we've wrapped around and the block 8907 * range is invalid anyway. 8908 */ 8909 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8910 || ((lba + num_blocks) < lba)) { 8911 ctl_set_lba_out_of_range(ctsio, 8912 MAX(lba, lun->be_lun->maxlba + 1)); 8913 ctl_done((union ctl_io *)ctsio); 8914 return (CTL_RETVAL_COMPLETE); 8915 } 8916 8917 /* 8918 * According to SBC-3, a transfer length of 0 is not an error. 8919 */ 8920 if (num_blocks == 0) { 8921 ctl_set_success(ctsio); 8922 ctl_done((union ctl_io *)ctsio); 8923 return (CTL_RETVAL_COMPLETE); 8924 } 8925 8926 /* Set FUA if write cache is disabled. */ 8927 if ((lun->MODE_CACHING.flags1 & SCP_WCE) == 0) 8928 flags |= CTL_LLF_FUA; 8929 8930 ctsio->kern_total_len = 2 * num_blocks * lun->be_lun->blocksize; 8931 ctsio->kern_rel_offset = 0; 8932 8933 /* 8934 * Set the IO_CONT flag, so that if this I/O gets passed to 8935 * ctl_data_submit_done(), it'll get passed back to 8936 * ctl_ctl_cnw_cont() for further processing. 8937 */ 8938 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 8939 ctsio->io_cont = ctl_cnw_cont; 8940 8941 lbalen = (struct ctl_lba_len_flags *) 8942 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8943 lbalen->lba = lba; 8944 lbalen->len = num_blocks; 8945 lbalen->flags = CTL_LLF_COMPARE | flags; 8946 8947 CTL_DEBUG_PRINT(("ctl_cnw: calling data_submit()\n")); 8948 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8949 return (retval); 8950 } 8951 8952 int 8953 ctl_verify(struct ctl_scsiio *ctsio) 8954 { 8955 struct ctl_lun *lun = CTL_LUN(ctsio); 8956 struct ctl_lba_len_flags *lbalen; 8957 uint64_t lba; 8958 uint32_t num_blocks; 8959 int bytchk, flags; 8960 int retval; 8961 8962 CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0])); 8963 8964 bytchk = 0; 8965 flags = CTL_LLF_FUA; 8966 switch (ctsio->cdb[0]) { 8967 case VERIFY_10: { 8968 struct scsi_verify_10 *cdb; 8969 8970 cdb = (struct scsi_verify_10 *)ctsio->cdb; 8971 if (cdb->byte2 & SVFY_BYTCHK) 8972 bytchk = 1; 8973 if (cdb->byte2 & SVFY_DPO) 8974 flags |= CTL_LLF_DPO; 8975 lba = scsi_4btoul(cdb->addr); 8976 num_blocks = scsi_2btoul(cdb->length); 8977 break; 8978 } 8979 case VERIFY_12: { 8980 struct scsi_verify_12 *cdb; 8981 8982 cdb = (struct scsi_verify_12 *)ctsio->cdb; 8983 if (cdb->byte2 & SVFY_BYTCHK) 8984 bytchk = 1; 8985 if (cdb->byte2 & SVFY_DPO) 8986 flags |= CTL_LLF_DPO; 8987 lba = scsi_4btoul(cdb->addr); 8988 num_blocks = scsi_4btoul(cdb->length); 8989 break; 8990 } 8991 case VERIFY_16: { 8992 struct scsi_rw_16 *cdb; 8993 8994 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8995 if (cdb->byte2 & SVFY_BYTCHK) 8996 bytchk = 1; 8997 if (cdb->byte2 & SVFY_DPO) 8998 flags |= CTL_LLF_DPO; 8999 lba = scsi_8btou64(cdb->addr); 9000 num_blocks = scsi_4btoul(cdb->length); 9001 break; 9002 } 9003 default: 9004 /* 9005 * We got a command we don't support. This shouldn't 9006 * happen, commands should be filtered out above us. 9007 */ 9008 ctl_set_invalid_opcode(ctsio); 9009 ctl_done((union ctl_io *)ctsio); 9010 return (CTL_RETVAL_COMPLETE); 9011 } 9012 9013 /* 9014 * The first check is to make sure we're in bounds, the second 9015 * check is to catch wrap-around problems. If the lba + num blocks 9016 * is less than the lba, then we've wrapped around and the block 9017 * range is invalid anyway. 9018 */ 9019 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 9020 || ((lba + num_blocks) < lba)) { 9021 ctl_set_lba_out_of_range(ctsio, 9022 MAX(lba, lun->be_lun->maxlba + 1)); 9023 ctl_done((union ctl_io *)ctsio); 9024 return (CTL_RETVAL_COMPLETE); 9025 } 9026 9027 /* 9028 * According to SBC-3, a transfer length of 0 is not an error. 9029 */ 9030 if (num_blocks == 0) { 9031 ctl_set_success(ctsio); 9032 ctl_done((union ctl_io *)ctsio); 9033 return (CTL_RETVAL_COMPLETE); 9034 } 9035 9036 lbalen = (struct ctl_lba_len_flags *) 9037 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 9038 lbalen->lba = lba; 9039 lbalen->len = num_blocks; 9040 if (bytchk) { 9041 lbalen->flags = CTL_LLF_COMPARE | flags; 9042 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 9043 } else { 9044 lbalen->flags = CTL_LLF_VERIFY | flags; 9045 ctsio->kern_total_len = 0; 9046 } 9047 ctsio->kern_rel_offset = 0; 9048 9049 CTL_DEBUG_PRINT(("ctl_verify: calling data_submit()\n")); 9050 retval = lun->backend->data_submit((union ctl_io *)ctsio); 9051 return (retval); 9052 } 9053 9054 int 9055 ctl_report_luns(struct ctl_scsiio *ctsio) 9056 { 9057 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9058 struct ctl_port *port = CTL_PORT(ctsio); 9059 struct ctl_lun *lun, *request_lun = CTL_LUN(ctsio); 9060 struct scsi_report_luns *cdb; 9061 struct scsi_report_luns_data *lun_data; 9062 int num_filled, num_luns, num_port_luns, retval; 9063 uint32_t alloc_len, lun_datalen; 9064 uint32_t initidx, targ_lun_id, lun_id; 9065 9066 retval = CTL_RETVAL_COMPLETE; 9067 cdb = (struct scsi_report_luns *)ctsio->cdb; 9068 9069 CTL_DEBUG_PRINT(("ctl_report_luns\n")); 9070 9071 num_luns = 0; 9072 num_port_luns = port->lun_map ? port->lun_map_size : ctl_max_luns; 9073 mtx_lock(&softc->ctl_lock); 9074 for (targ_lun_id = 0; targ_lun_id < num_port_luns; targ_lun_id++) { 9075 if (ctl_lun_map_from_port(port, targ_lun_id) != UINT32_MAX) 9076 num_luns++; 9077 } 9078 mtx_unlock(&softc->ctl_lock); 9079 9080 switch (cdb->select_report) { 9081 case RPL_REPORT_DEFAULT: 9082 case RPL_REPORT_ALL: 9083 case RPL_REPORT_NONSUBSID: 9084 break; 9085 case RPL_REPORT_WELLKNOWN: 9086 case RPL_REPORT_ADMIN: 9087 case RPL_REPORT_CONGLOM: 9088 num_luns = 0; 9089 break; 9090 default: 9091 ctl_set_invalid_field(ctsio, 9092 /*sks_valid*/ 1, 9093 /*command*/ 1, 9094 /*field*/ 2, 9095 /*bit_valid*/ 0, 9096 /*bit*/ 0); 9097 ctl_done((union ctl_io *)ctsio); 9098 return (retval); 9099 break; /* NOTREACHED */ 9100 } 9101 9102 alloc_len = scsi_4btoul(cdb->length); 9103 /* 9104 * The initiator has to allocate at least 16 bytes for this request, 9105 * so he can at least get the header and the first LUN. Otherwise 9106 * we reject the request (per SPC-3 rev 14, section 6.21). 9107 */ 9108 if (alloc_len < (sizeof(struct scsi_report_luns_data) + 9109 sizeof(struct scsi_report_luns_lundata))) { 9110 ctl_set_invalid_field(ctsio, 9111 /*sks_valid*/ 1, 9112 /*command*/ 1, 9113 /*field*/ 6, 9114 /*bit_valid*/ 0, 9115 /*bit*/ 0); 9116 ctl_done((union ctl_io *)ctsio); 9117 return (retval); 9118 } 9119 9120 lun_datalen = sizeof(*lun_data) + 9121 (num_luns * sizeof(struct scsi_report_luns_lundata)); 9122 9123 ctsio->kern_data_ptr = malloc(lun_datalen, M_CTL, M_WAITOK | M_ZERO); 9124 lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr; 9125 ctsio->kern_sg_entries = 0; 9126 9127 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9128 9129 mtx_lock(&softc->ctl_lock); 9130 for (targ_lun_id = 0, num_filled = 0; 9131 targ_lun_id < num_port_luns && num_filled < num_luns; 9132 targ_lun_id++) { 9133 lun_id = ctl_lun_map_from_port(port, targ_lun_id); 9134 if (lun_id == UINT32_MAX) 9135 continue; 9136 lun = softc->ctl_luns[lun_id]; 9137 if (lun == NULL) 9138 continue; 9139 9140 be64enc(lun_data->luns[num_filled++].lundata, 9141 ctl_encode_lun(targ_lun_id)); 9142 9143 /* 9144 * According to SPC-3, rev 14 section 6.21: 9145 * 9146 * "The execution of a REPORT LUNS command to any valid and 9147 * installed logical unit shall clear the REPORTED LUNS DATA 9148 * HAS CHANGED unit attention condition for all logical 9149 * units of that target with respect to the requesting 9150 * initiator. A valid and installed logical unit is one 9151 * having a PERIPHERAL QUALIFIER of 000b in the standard 9152 * INQUIRY data (see 6.4.2)." 9153 * 9154 * If request_lun is NULL, the LUN this report luns command 9155 * was issued to is either disabled or doesn't exist. In that 9156 * case, we shouldn't clear any pending lun change unit 9157 * attention. 9158 */ 9159 if (request_lun != NULL) { 9160 mtx_lock(&lun->lun_lock); 9161 ctl_clr_ua(lun, initidx, CTL_UA_LUN_CHANGE); 9162 mtx_unlock(&lun->lun_lock); 9163 } 9164 } 9165 mtx_unlock(&softc->ctl_lock); 9166 9167 /* 9168 * It's quite possible that we've returned fewer LUNs than we allocated 9169 * space for. Trim it. 9170 */ 9171 lun_datalen = sizeof(*lun_data) + 9172 (num_filled * sizeof(struct scsi_report_luns_lundata)); 9173 ctsio->kern_rel_offset = 0; 9174 ctsio->kern_sg_entries = 0; 9175 ctsio->kern_data_len = min(lun_datalen, alloc_len); 9176 ctsio->kern_total_len = ctsio->kern_data_len; 9177 9178 /* 9179 * We set this to the actual data length, regardless of how much 9180 * space we actually have to return results. If the user looks at 9181 * this value, he'll know whether or not he allocated enough space 9182 * and reissue the command if necessary. We don't support well 9183 * known logical units, so if the user asks for that, return none. 9184 */ 9185 scsi_ulto4b(lun_datalen - 8, lun_data->length); 9186 9187 /* 9188 * We can only return SCSI_STATUS_CHECK_COND when we can't satisfy 9189 * this request. 9190 */ 9191 ctl_set_success(ctsio); 9192 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9193 ctsio->be_move_done = ctl_config_move_done; 9194 ctl_datamove((union ctl_io *)ctsio); 9195 return (retval); 9196 } 9197 9198 int 9199 ctl_request_sense(struct ctl_scsiio *ctsio) 9200 { 9201 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9202 struct ctl_lun *lun = CTL_LUN(ctsio); 9203 struct scsi_request_sense *cdb; 9204 struct scsi_sense_data *sense_ptr, *ps; 9205 uint32_t initidx; 9206 int have_error; 9207 u_int sense_len = SSD_FULL_SIZE; 9208 scsi_sense_data_type sense_format; 9209 ctl_ua_type ua_type; 9210 uint8_t asc = 0, ascq = 0; 9211 9212 cdb = (struct scsi_request_sense *)ctsio->cdb; 9213 9214 CTL_DEBUG_PRINT(("ctl_request_sense\n")); 9215 9216 /* 9217 * Determine which sense format the user wants. 9218 */ 9219 if (cdb->byte2 & SRS_DESC) 9220 sense_format = SSD_TYPE_DESC; 9221 else 9222 sense_format = SSD_TYPE_FIXED; 9223 9224 ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK); 9225 sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr; 9226 ctsio->kern_sg_entries = 0; 9227 ctsio->kern_rel_offset = 0; 9228 9229 /* 9230 * struct scsi_sense_data, which is currently set to 256 bytes, is 9231 * larger than the largest allowed value for the length field in the 9232 * REQUEST SENSE CDB, which is 252 bytes as of SPC-4. 9233 */ 9234 ctsio->kern_data_len = cdb->length; 9235 ctsio->kern_total_len = cdb->length; 9236 9237 /* 9238 * If we don't have a LUN, we don't have any pending sense. 9239 */ 9240 if (lun == NULL || 9241 ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 9242 softc->ha_link < CTL_HA_LINK_UNKNOWN)) { 9243 /* "Logical unit not supported" */ 9244 ctl_set_sense_data(sense_ptr, &sense_len, NULL, sense_format, 9245 /*current_error*/ 1, 9246 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 9247 /*asc*/ 0x25, 9248 /*ascq*/ 0x00, 9249 SSD_ELEM_NONE); 9250 goto send; 9251 } 9252 9253 have_error = 0; 9254 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9255 /* 9256 * Check for pending sense, and then for pending unit attentions. 9257 * Pending sense gets returned first, then pending unit attentions. 9258 */ 9259 mtx_lock(&lun->lun_lock); 9260 ps = lun->pending_sense[initidx / CTL_MAX_INIT_PER_PORT]; 9261 if (ps != NULL) 9262 ps += initidx % CTL_MAX_INIT_PER_PORT; 9263 if (ps != NULL && ps->error_code != 0) { 9264 scsi_sense_data_type stored_format; 9265 9266 /* 9267 * Check to see which sense format was used for the stored 9268 * sense data. 9269 */ 9270 stored_format = scsi_sense_type(ps); 9271 9272 /* 9273 * If the user requested a different sense format than the 9274 * one we stored, then we need to convert it to the other 9275 * format. If we're going from descriptor to fixed format 9276 * sense data, we may lose things in translation, depending 9277 * on what options were used. 9278 * 9279 * If the stored format is SSD_TYPE_NONE (i.e. invalid), 9280 * for some reason we'll just copy it out as-is. 9281 */ 9282 if ((stored_format == SSD_TYPE_FIXED) 9283 && (sense_format == SSD_TYPE_DESC)) 9284 ctl_sense_to_desc((struct scsi_sense_data_fixed *) 9285 ps, (struct scsi_sense_data_desc *)sense_ptr); 9286 else if ((stored_format == SSD_TYPE_DESC) 9287 && (sense_format == SSD_TYPE_FIXED)) 9288 ctl_sense_to_fixed((struct scsi_sense_data_desc *) 9289 ps, (struct scsi_sense_data_fixed *)sense_ptr); 9290 else 9291 memcpy(sense_ptr, ps, sizeof(*sense_ptr)); 9292 9293 ps->error_code = 0; 9294 have_error = 1; 9295 } else { 9296 ua_type = ctl_build_ua(lun, initidx, sense_ptr, &sense_len, 9297 sense_format); 9298 if (ua_type != CTL_UA_NONE) 9299 have_error = 1; 9300 } 9301 if (have_error == 0) { 9302 /* 9303 * Report informational exception if have one and allowed. 9304 */ 9305 if (lun->MODE_IE.mrie != SIEP_MRIE_NO) { 9306 asc = lun->ie_asc; 9307 ascq = lun->ie_ascq; 9308 } 9309 ctl_set_sense_data(sense_ptr, &sense_len, lun, sense_format, 9310 /*current_error*/ 1, 9311 /*sense_key*/ SSD_KEY_NO_SENSE, 9312 /*asc*/ asc, 9313 /*ascq*/ ascq, 9314 SSD_ELEM_NONE); 9315 } 9316 mtx_unlock(&lun->lun_lock); 9317 9318 send: 9319 /* 9320 * We report the SCSI status as OK, since the status of the command 9321 * itself is OK. We're reporting sense as parameter data. 9322 */ 9323 ctl_set_success(ctsio); 9324 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9325 ctsio->be_move_done = ctl_config_move_done; 9326 ctl_datamove((union ctl_io *)ctsio); 9327 return (CTL_RETVAL_COMPLETE); 9328 } 9329 9330 int 9331 ctl_tur(struct ctl_scsiio *ctsio) 9332 { 9333 9334 CTL_DEBUG_PRINT(("ctl_tur\n")); 9335 9336 ctl_set_success(ctsio); 9337 ctl_done((union ctl_io *)ctsio); 9338 9339 return (CTL_RETVAL_COMPLETE); 9340 } 9341 9342 /* 9343 * SCSI VPD page 0x00, the Supported VPD Pages page. 9344 */ 9345 static int 9346 ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len) 9347 { 9348 struct ctl_lun *lun = CTL_LUN(ctsio); 9349 struct scsi_vpd_supported_pages *pages; 9350 int sup_page_size; 9351 int p; 9352 9353 sup_page_size = sizeof(struct scsi_vpd_supported_pages) * 9354 SCSI_EVPD_NUM_SUPPORTED_PAGES; 9355 ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO); 9356 pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr; 9357 ctsio->kern_rel_offset = 0; 9358 ctsio->kern_sg_entries = 0; 9359 ctsio->kern_data_len = min(sup_page_size, alloc_len); 9360 ctsio->kern_total_len = ctsio->kern_data_len; 9361 9362 /* 9363 * The control device is always connected. The disk device, on the 9364 * other hand, may not be online all the time. Need to change this 9365 * to figure out whether the disk device is actually online or not. 9366 */ 9367 if (lun != NULL) 9368 pages->device = (SID_QUAL_LU_CONNECTED << 5) | 9369 lun->be_lun->lun_type; 9370 else 9371 pages->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9372 9373 p = 0; 9374 /* Supported VPD pages */ 9375 pages->page_list[p++] = SVPD_SUPPORTED_PAGES; 9376 /* Serial Number */ 9377 pages->page_list[p++] = SVPD_UNIT_SERIAL_NUMBER; 9378 /* Device Identification */ 9379 pages->page_list[p++] = SVPD_DEVICE_ID; 9380 /* Extended INQUIRY Data */ 9381 pages->page_list[p++] = SVPD_EXTENDED_INQUIRY_DATA; 9382 /* Mode Page Policy */ 9383 pages->page_list[p++] = SVPD_MODE_PAGE_POLICY; 9384 /* SCSI Ports */ 9385 pages->page_list[p++] = SVPD_SCSI_PORTS; 9386 /* Third-party Copy */ 9387 pages->page_list[p++] = SVPD_SCSI_TPC; 9388 if (lun != NULL && lun->be_lun->lun_type == T_DIRECT) { 9389 /* Block limits */ 9390 pages->page_list[p++] = SVPD_BLOCK_LIMITS; 9391 /* Block Device Characteristics */ 9392 pages->page_list[p++] = SVPD_BDC; 9393 /* Logical Block Provisioning */ 9394 pages->page_list[p++] = SVPD_LBP; 9395 } 9396 pages->length = p; 9397 9398 ctl_set_success(ctsio); 9399 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9400 ctsio->be_move_done = ctl_config_move_done; 9401 ctl_datamove((union ctl_io *)ctsio); 9402 return (CTL_RETVAL_COMPLETE); 9403 } 9404 9405 /* 9406 * SCSI VPD page 0x80, the Unit Serial Number page. 9407 */ 9408 static int 9409 ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len) 9410 { 9411 struct ctl_lun *lun = CTL_LUN(ctsio); 9412 struct scsi_vpd_unit_serial_number *sn_ptr; 9413 int data_len; 9414 9415 data_len = 4 + CTL_SN_LEN; 9416 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9417 sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr; 9418 ctsio->kern_rel_offset = 0; 9419 ctsio->kern_sg_entries = 0; 9420 ctsio->kern_data_len = min(data_len, alloc_len); 9421 ctsio->kern_total_len = ctsio->kern_data_len; 9422 9423 /* 9424 * The control device is always connected. The disk device, on the 9425 * other hand, may not be online all the time. Need to change this 9426 * to figure out whether the disk device is actually online or not. 9427 */ 9428 if (lun != NULL) 9429 sn_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9430 lun->be_lun->lun_type; 9431 else 9432 sn_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9433 9434 sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER; 9435 sn_ptr->length = CTL_SN_LEN; 9436 /* 9437 * If we don't have a LUN, we just leave the serial number as 9438 * all spaces. 9439 */ 9440 if (lun != NULL) { 9441 strncpy((char *)sn_ptr->serial_num, 9442 (char *)lun->be_lun->serial_num, CTL_SN_LEN); 9443 } else 9444 memset(sn_ptr->serial_num, 0x20, CTL_SN_LEN); 9445 9446 ctl_set_success(ctsio); 9447 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9448 ctsio->be_move_done = ctl_config_move_done; 9449 ctl_datamove((union ctl_io *)ctsio); 9450 return (CTL_RETVAL_COMPLETE); 9451 } 9452 9453 9454 /* 9455 * SCSI VPD page 0x86, the Extended INQUIRY Data page. 9456 */ 9457 static int 9458 ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len) 9459 { 9460 struct ctl_lun *lun = CTL_LUN(ctsio); 9461 struct scsi_vpd_extended_inquiry_data *eid_ptr; 9462 int data_len; 9463 9464 data_len = sizeof(struct scsi_vpd_extended_inquiry_data); 9465 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9466 eid_ptr = (struct scsi_vpd_extended_inquiry_data *)ctsio->kern_data_ptr; 9467 ctsio->kern_sg_entries = 0; 9468 ctsio->kern_rel_offset = 0; 9469 ctsio->kern_data_len = min(data_len, alloc_len); 9470 ctsio->kern_total_len = ctsio->kern_data_len; 9471 9472 /* 9473 * The control device is always connected. The disk device, on the 9474 * other hand, may not be online all the time. 9475 */ 9476 if (lun != NULL) 9477 eid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9478 lun->be_lun->lun_type; 9479 else 9480 eid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9481 eid_ptr->page_code = SVPD_EXTENDED_INQUIRY_DATA; 9482 scsi_ulto2b(data_len - 4, eid_ptr->page_length); 9483 /* 9484 * We support head of queue, ordered and simple tags. 9485 */ 9486 eid_ptr->flags2 = SVPD_EID_HEADSUP | SVPD_EID_ORDSUP | SVPD_EID_SIMPSUP; 9487 /* 9488 * Volatile cache supported. 9489 */ 9490 eid_ptr->flags3 = SVPD_EID_V_SUP; 9491 9492 /* 9493 * This means that we clear the REPORTED LUNS DATA HAS CHANGED unit 9494 * attention for a particular IT nexus on all LUNs once we report 9495 * it to that nexus once. This bit is required as of SPC-4. 9496 */ 9497 eid_ptr->flags4 = SVPD_EID_LUICLR; 9498 9499 /* 9500 * We support revert to defaults (RTD) bit in MODE SELECT. 9501 */ 9502 eid_ptr->flags5 = SVPD_EID_RTD_SUP; 9503 9504 /* 9505 * XXX KDM in order to correctly answer this, we would need 9506 * information from the SIM to determine how much sense data it 9507 * can send. So this would really be a path inquiry field, most 9508 * likely. This can be set to a maximum of 252 according to SPC-4, 9509 * but the hardware may or may not be able to support that much. 9510 * 0 just means that the maximum sense data length is not reported. 9511 */ 9512 eid_ptr->max_sense_length = 0; 9513 9514 ctl_set_success(ctsio); 9515 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9516 ctsio->be_move_done = ctl_config_move_done; 9517 ctl_datamove((union ctl_io *)ctsio); 9518 return (CTL_RETVAL_COMPLETE); 9519 } 9520 9521 static int 9522 ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len) 9523 { 9524 struct ctl_lun *lun = CTL_LUN(ctsio); 9525 struct scsi_vpd_mode_page_policy *mpp_ptr; 9526 int data_len; 9527 9528 data_len = sizeof(struct scsi_vpd_mode_page_policy) + 9529 sizeof(struct scsi_vpd_mode_page_policy_descr); 9530 9531 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9532 mpp_ptr = (struct scsi_vpd_mode_page_policy *)ctsio->kern_data_ptr; 9533 ctsio->kern_rel_offset = 0; 9534 ctsio->kern_sg_entries = 0; 9535 ctsio->kern_data_len = min(data_len, alloc_len); 9536 ctsio->kern_total_len = ctsio->kern_data_len; 9537 9538 /* 9539 * The control device is always connected. The disk device, on the 9540 * other hand, may not be online all the time. 9541 */ 9542 if (lun != NULL) 9543 mpp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9544 lun->be_lun->lun_type; 9545 else 9546 mpp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9547 mpp_ptr->page_code = SVPD_MODE_PAGE_POLICY; 9548 scsi_ulto2b(data_len - 4, mpp_ptr->page_length); 9549 mpp_ptr->descr[0].page_code = 0x3f; 9550 mpp_ptr->descr[0].subpage_code = 0xff; 9551 mpp_ptr->descr[0].policy = SVPD_MPP_SHARED; 9552 9553 ctl_set_success(ctsio); 9554 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9555 ctsio->be_move_done = ctl_config_move_done; 9556 ctl_datamove((union ctl_io *)ctsio); 9557 return (CTL_RETVAL_COMPLETE); 9558 } 9559 9560 /* 9561 * SCSI VPD page 0x83, the Device Identification page. 9562 */ 9563 static int 9564 ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len) 9565 { 9566 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9567 struct ctl_port *port = CTL_PORT(ctsio); 9568 struct ctl_lun *lun = CTL_LUN(ctsio); 9569 struct scsi_vpd_device_id *devid_ptr; 9570 struct scsi_vpd_id_descriptor *desc; 9571 int data_len, g; 9572 uint8_t proto; 9573 9574 data_len = sizeof(struct scsi_vpd_device_id) + 9575 sizeof(struct scsi_vpd_id_descriptor) + 9576 sizeof(struct scsi_vpd_id_rel_trgt_port_id) + 9577 sizeof(struct scsi_vpd_id_descriptor) + 9578 sizeof(struct scsi_vpd_id_trgt_port_grp_id); 9579 if (lun && lun->lun_devid) 9580 data_len += lun->lun_devid->len; 9581 if (port && port->port_devid) 9582 data_len += port->port_devid->len; 9583 if (port && port->target_devid) 9584 data_len += port->target_devid->len; 9585 9586 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9587 devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr; 9588 ctsio->kern_sg_entries = 0; 9589 ctsio->kern_rel_offset = 0; 9590 ctsio->kern_sg_entries = 0; 9591 ctsio->kern_data_len = min(data_len, alloc_len); 9592 ctsio->kern_total_len = ctsio->kern_data_len; 9593 9594 /* 9595 * The control device is always connected. The disk device, on the 9596 * other hand, may not be online all the time. 9597 */ 9598 if (lun != NULL) 9599 devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9600 lun->be_lun->lun_type; 9601 else 9602 devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9603 devid_ptr->page_code = SVPD_DEVICE_ID; 9604 scsi_ulto2b(data_len - 4, devid_ptr->length); 9605 9606 if (port && port->port_type == CTL_PORT_FC) 9607 proto = SCSI_PROTO_FC << 4; 9608 else if (port && port->port_type == CTL_PORT_SAS) 9609 proto = SCSI_PROTO_SAS << 4; 9610 else if (port && port->port_type == CTL_PORT_ISCSI) 9611 proto = SCSI_PROTO_ISCSI << 4; 9612 else 9613 proto = SCSI_PROTO_SPI << 4; 9614 desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list; 9615 9616 /* 9617 * We're using a LUN association here. i.e., this device ID is a 9618 * per-LUN identifier. 9619 */ 9620 if (lun && lun->lun_devid) { 9621 memcpy(desc, lun->lun_devid->data, lun->lun_devid->len); 9622 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9623 lun->lun_devid->len); 9624 } 9625 9626 /* 9627 * This is for the WWPN which is a port association. 9628 */ 9629 if (port && port->port_devid) { 9630 memcpy(desc, port->port_devid->data, port->port_devid->len); 9631 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9632 port->port_devid->len); 9633 } 9634 9635 /* 9636 * This is for the Relative Target Port(type 4h) identifier 9637 */ 9638 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9639 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9640 SVPD_ID_TYPE_RELTARG; 9641 desc->length = 4; 9642 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port, &desc->identifier[2]); 9643 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9644 sizeof(struct scsi_vpd_id_rel_trgt_port_id)); 9645 9646 /* 9647 * This is for the Target Port Group(type 5h) identifier 9648 */ 9649 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9650 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9651 SVPD_ID_TYPE_TPORTGRP; 9652 desc->length = 4; 9653 if (softc->is_single || 9654 (port && port->status & CTL_PORT_STATUS_HA_SHARED)) 9655 g = 1; 9656 else 9657 g = 2 + ctsio->io_hdr.nexus.targ_port / softc->port_cnt; 9658 scsi_ulto2b(g, &desc->identifier[2]); 9659 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9660 sizeof(struct scsi_vpd_id_trgt_port_grp_id)); 9661 9662 /* 9663 * This is for the Target identifier 9664 */ 9665 if (port && port->target_devid) { 9666 memcpy(desc, port->target_devid->data, port->target_devid->len); 9667 } 9668 9669 ctl_set_success(ctsio); 9670 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9671 ctsio->be_move_done = ctl_config_move_done; 9672 ctl_datamove((union ctl_io *)ctsio); 9673 return (CTL_RETVAL_COMPLETE); 9674 } 9675 9676 static int 9677 ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len) 9678 { 9679 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9680 struct ctl_lun *lun = CTL_LUN(ctsio); 9681 struct scsi_vpd_scsi_ports *sp; 9682 struct scsi_vpd_port_designation *pd; 9683 struct scsi_vpd_port_designation_cont *pdc; 9684 struct ctl_port *port; 9685 int data_len, num_target_ports, iid_len, id_len; 9686 9687 num_target_ports = 0; 9688 iid_len = 0; 9689 id_len = 0; 9690 mtx_lock(&softc->ctl_lock); 9691 STAILQ_FOREACH(port, &softc->port_list, links) { 9692 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 9693 continue; 9694 if (lun != NULL && 9695 ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 9696 continue; 9697 num_target_ports++; 9698 if (port->init_devid) 9699 iid_len += port->init_devid->len; 9700 if (port->port_devid) 9701 id_len += port->port_devid->len; 9702 } 9703 mtx_unlock(&softc->ctl_lock); 9704 9705 data_len = sizeof(struct scsi_vpd_scsi_ports) + 9706 num_target_ports * (sizeof(struct scsi_vpd_port_designation) + 9707 sizeof(struct scsi_vpd_port_designation_cont)) + iid_len + id_len; 9708 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9709 sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr; 9710 ctsio->kern_sg_entries = 0; 9711 ctsio->kern_rel_offset = 0; 9712 ctsio->kern_sg_entries = 0; 9713 ctsio->kern_data_len = min(data_len, alloc_len); 9714 ctsio->kern_total_len = ctsio->kern_data_len; 9715 9716 /* 9717 * The control device is always connected. The disk device, on the 9718 * other hand, may not be online all the time. Need to change this 9719 * to figure out whether the disk device is actually online or not. 9720 */ 9721 if (lun != NULL) 9722 sp->device = (SID_QUAL_LU_CONNECTED << 5) | 9723 lun->be_lun->lun_type; 9724 else 9725 sp->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9726 9727 sp->page_code = SVPD_SCSI_PORTS; 9728 scsi_ulto2b(data_len - sizeof(struct scsi_vpd_scsi_ports), 9729 sp->page_length); 9730 pd = &sp->design[0]; 9731 9732 mtx_lock(&softc->ctl_lock); 9733 STAILQ_FOREACH(port, &softc->port_list, links) { 9734 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 9735 continue; 9736 if (lun != NULL && 9737 ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 9738 continue; 9739 scsi_ulto2b(port->targ_port, pd->relative_port_id); 9740 if (port->init_devid) { 9741 iid_len = port->init_devid->len; 9742 memcpy(pd->initiator_transportid, 9743 port->init_devid->data, port->init_devid->len); 9744 } else 9745 iid_len = 0; 9746 scsi_ulto2b(iid_len, pd->initiator_transportid_length); 9747 pdc = (struct scsi_vpd_port_designation_cont *) 9748 (&pd->initiator_transportid[iid_len]); 9749 if (port->port_devid) { 9750 id_len = port->port_devid->len; 9751 memcpy(pdc->target_port_descriptors, 9752 port->port_devid->data, port->port_devid->len); 9753 } else 9754 id_len = 0; 9755 scsi_ulto2b(id_len, pdc->target_port_descriptors_length); 9756 pd = (struct scsi_vpd_port_designation *) 9757 ((uint8_t *)pdc->target_port_descriptors + id_len); 9758 } 9759 mtx_unlock(&softc->ctl_lock); 9760 9761 ctl_set_success(ctsio); 9762 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9763 ctsio->be_move_done = ctl_config_move_done; 9764 ctl_datamove((union ctl_io *)ctsio); 9765 return (CTL_RETVAL_COMPLETE); 9766 } 9767 9768 static int 9769 ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len) 9770 { 9771 struct ctl_lun *lun = CTL_LUN(ctsio); 9772 struct scsi_vpd_block_limits *bl_ptr; 9773 uint64_t ival; 9774 9775 ctsio->kern_data_ptr = malloc(sizeof(*bl_ptr), M_CTL, M_WAITOK | M_ZERO); 9776 bl_ptr = (struct scsi_vpd_block_limits *)ctsio->kern_data_ptr; 9777 ctsio->kern_sg_entries = 0; 9778 ctsio->kern_rel_offset = 0; 9779 ctsio->kern_sg_entries = 0; 9780 ctsio->kern_data_len = min(sizeof(*bl_ptr), alloc_len); 9781 ctsio->kern_total_len = ctsio->kern_data_len; 9782 9783 /* 9784 * The control device is always connected. The disk device, on the 9785 * other hand, may not be online all the time. Need to change this 9786 * to figure out whether the disk device is actually online or not. 9787 */ 9788 if (lun != NULL) 9789 bl_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9790 lun->be_lun->lun_type; 9791 else 9792 bl_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9793 9794 bl_ptr->page_code = SVPD_BLOCK_LIMITS; 9795 scsi_ulto2b(sizeof(*bl_ptr) - 4, bl_ptr->page_length); 9796 bl_ptr->max_cmp_write_len = 0xff; 9797 scsi_ulto4b(0xffffffff, bl_ptr->max_txfer_len); 9798 if (lun != NULL) { 9799 scsi_ulto4b(lun->be_lun->opttxferlen, bl_ptr->opt_txfer_len); 9800 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 9801 ival = 0xffffffff; 9802 ctl_get_opt_number(&lun->be_lun->options, 9803 "unmap_max_lba", &ival); 9804 scsi_ulto4b(ival, bl_ptr->max_unmap_lba_cnt); 9805 ival = 0xffffffff; 9806 ctl_get_opt_number(&lun->be_lun->options, 9807 "unmap_max_descr", &ival); 9808 scsi_ulto4b(ival, bl_ptr->max_unmap_blk_cnt); 9809 if (lun->be_lun->ublockexp != 0) { 9810 scsi_ulto4b((1 << lun->be_lun->ublockexp), 9811 bl_ptr->opt_unmap_grain); 9812 scsi_ulto4b(0x80000000 | lun->be_lun->ublockoff, 9813 bl_ptr->unmap_grain_align); 9814 } 9815 } 9816 scsi_ulto4b(lun->be_lun->atomicblock, 9817 bl_ptr->max_atomic_transfer_length); 9818 scsi_ulto4b(0, bl_ptr->atomic_alignment); 9819 scsi_ulto4b(0, bl_ptr->atomic_transfer_length_granularity); 9820 scsi_ulto4b(0, bl_ptr->max_atomic_transfer_length_with_atomic_boundary); 9821 scsi_ulto4b(0, bl_ptr->max_atomic_boundary_size); 9822 ival = UINT64_MAX; 9823 ctl_get_opt_number(&lun->be_lun->options, "write_same_max_lba", &ival); 9824 scsi_u64to8b(ival, bl_ptr->max_write_same_length); 9825 } 9826 9827 ctl_set_success(ctsio); 9828 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9829 ctsio->be_move_done = ctl_config_move_done; 9830 ctl_datamove((union ctl_io *)ctsio); 9831 return (CTL_RETVAL_COMPLETE); 9832 } 9833 9834 static int 9835 ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len) 9836 { 9837 struct ctl_lun *lun = CTL_LUN(ctsio); 9838 struct scsi_vpd_block_device_characteristics *bdc_ptr; 9839 const char *value; 9840 u_int i; 9841 9842 ctsio->kern_data_ptr = malloc(sizeof(*bdc_ptr), M_CTL, M_WAITOK | M_ZERO); 9843 bdc_ptr = (struct scsi_vpd_block_device_characteristics *)ctsio->kern_data_ptr; 9844 ctsio->kern_sg_entries = 0; 9845 ctsio->kern_rel_offset = 0; 9846 ctsio->kern_data_len = min(sizeof(*bdc_ptr), alloc_len); 9847 ctsio->kern_total_len = ctsio->kern_data_len; 9848 9849 /* 9850 * The control device is always connected. The disk device, on the 9851 * other hand, may not be online all the time. Need to change this 9852 * to figure out whether the disk device is actually online or not. 9853 */ 9854 if (lun != NULL) 9855 bdc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9856 lun->be_lun->lun_type; 9857 else 9858 bdc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9859 bdc_ptr->page_code = SVPD_BDC; 9860 scsi_ulto2b(sizeof(*bdc_ptr) - 4, bdc_ptr->page_length); 9861 if (lun != NULL && 9862 (value = ctl_get_opt(&lun->be_lun->options, "rpm")) != NULL) 9863 i = strtol(value, NULL, 0); 9864 else 9865 i = CTL_DEFAULT_ROTATION_RATE; 9866 scsi_ulto2b(i, bdc_ptr->medium_rotation_rate); 9867 if (lun != NULL && 9868 (value = ctl_get_opt(&lun->be_lun->options, "formfactor")) != NULL) 9869 i = strtol(value, NULL, 0); 9870 else 9871 i = 0; 9872 bdc_ptr->wab_wac_ff = (i & 0x0f); 9873 bdc_ptr->flags = SVPD_FUAB | SVPD_VBULS; 9874 9875 ctl_set_success(ctsio); 9876 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9877 ctsio->be_move_done = ctl_config_move_done; 9878 ctl_datamove((union ctl_io *)ctsio); 9879 return (CTL_RETVAL_COMPLETE); 9880 } 9881 9882 static int 9883 ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len) 9884 { 9885 struct ctl_lun *lun = CTL_LUN(ctsio); 9886 struct scsi_vpd_logical_block_prov *lbp_ptr; 9887 const char *value; 9888 9889 ctsio->kern_data_ptr = malloc(sizeof(*lbp_ptr), M_CTL, M_WAITOK | M_ZERO); 9890 lbp_ptr = (struct scsi_vpd_logical_block_prov *)ctsio->kern_data_ptr; 9891 ctsio->kern_sg_entries = 0; 9892 ctsio->kern_rel_offset = 0; 9893 ctsio->kern_data_len = min(sizeof(*lbp_ptr), alloc_len); 9894 ctsio->kern_total_len = ctsio->kern_data_len; 9895 9896 /* 9897 * The control device is always connected. The disk device, on the 9898 * other hand, may not be online all the time. Need to change this 9899 * to figure out whether the disk device is actually online or not. 9900 */ 9901 if (lun != NULL) 9902 lbp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9903 lun->be_lun->lun_type; 9904 else 9905 lbp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9906 9907 lbp_ptr->page_code = SVPD_LBP; 9908 scsi_ulto2b(sizeof(*lbp_ptr) - 4, lbp_ptr->page_length); 9909 lbp_ptr->threshold_exponent = CTL_LBP_EXPONENT; 9910 if (lun != NULL && lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 9911 lbp_ptr->flags = SVPD_LBP_UNMAP | SVPD_LBP_WS16 | 9912 SVPD_LBP_WS10 | SVPD_LBP_RZ | SVPD_LBP_ANC_SUP; 9913 value = ctl_get_opt(&lun->be_lun->options, "provisioning_type"); 9914 if (value != NULL) { 9915 if (strcmp(value, "resource") == 0) 9916 lbp_ptr->prov_type = SVPD_LBP_RESOURCE; 9917 else if (strcmp(value, "thin") == 0) 9918 lbp_ptr->prov_type = SVPD_LBP_THIN; 9919 } else 9920 lbp_ptr->prov_type = SVPD_LBP_THIN; 9921 } 9922 9923 ctl_set_success(ctsio); 9924 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9925 ctsio->be_move_done = ctl_config_move_done; 9926 ctl_datamove((union ctl_io *)ctsio); 9927 return (CTL_RETVAL_COMPLETE); 9928 } 9929 9930 /* 9931 * INQUIRY with the EVPD bit set. 9932 */ 9933 static int 9934 ctl_inquiry_evpd(struct ctl_scsiio *ctsio) 9935 { 9936 struct ctl_lun *lun = CTL_LUN(ctsio); 9937 struct scsi_inquiry *cdb; 9938 int alloc_len, retval; 9939 9940 cdb = (struct scsi_inquiry *)ctsio->cdb; 9941 alloc_len = scsi_2btoul(cdb->length); 9942 9943 switch (cdb->page_code) { 9944 case SVPD_SUPPORTED_PAGES: 9945 retval = ctl_inquiry_evpd_supported(ctsio, alloc_len); 9946 break; 9947 case SVPD_UNIT_SERIAL_NUMBER: 9948 retval = ctl_inquiry_evpd_serial(ctsio, alloc_len); 9949 break; 9950 case SVPD_DEVICE_ID: 9951 retval = ctl_inquiry_evpd_devid(ctsio, alloc_len); 9952 break; 9953 case SVPD_EXTENDED_INQUIRY_DATA: 9954 retval = ctl_inquiry_evpd_eid(ctsio, alloc_len); 9955 break; 9956 case SVPD_MODE_PAGE_POLICY: 9957 retval = ctl_inquiry_evpd_mpp(ctsio, alloc_len); 9958 break; 9959 case SVPD_SCSI_PORTS: 9960 retval = ctl_inquiry_evpd_scsi_ports(ctsio, alloc_len); 9961 break; 9962 case SVPD_SCSI_TPC: 9963 retval = ctl_inquiry_evpd_tpc(ctsio, alloc_len); 9964 break; 9965 case SVPD_BLOCK_LIMITS: 9966 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 9967 goto err; 9968 retval = ctl_inquiry_evpd_block_limits(ctsio, alloc_len); 9969 break; 9970 case SVPD_BDC: 9971 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 9972 goto err; 9973 retval = ctl_inquiry_evpd_bdc(ctsio, alloc_len); 9974 break; 9975 case SVPD_LBP: 9976 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 9977 goto err; 9978 retval = ctl_inquiry_evpd_lbp(ctsio, alloc_len); 9979 break; 9980 default: 9981 err: 9982 ctl_set_invalid_field(ctsio, 9983 /*sks_valid*/ 1, 9984 /*command*/ 1, 9985 /*field*/ 2, 9986 /*bit_valid*/ 0, 9987 /*bit*/ 0); 9988 ctl_done((union ctl_io *)ctsio); 9989 retval = CTL_RETVAL_COMPLETE; 9990 break; 9991 } 9992 9993 return (retval); 9994 } 9995 9996 /* 9997 * Standard INQUIRY data. 9998 */ 9999 static int 10000 ctl_inquiry_std(struct ctl_scsiio *ctsio) 10001 { 10002 struct ctl_softc *softc = CTL_SOFTC(ctsio); 10003 struct ctl_port *port = CTL_PORT(ctsio); 10004 struct ctl_lun *lun = CTL_LUN(ctsio); 10005 struct scsi_inquiry_data *inq_ptr; 10006 struct scsi_inquiry *cdb; 10007 char *val; 10008 uint32_t alloc_len, data_len; 10009 ctl_port_type port_type; 10010 10011 port_type = port->port_type; 10012 if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL) 10013 port_type = CTL_PORT_SCSI; 10014 10015 cdb = (struct scsi_inquiry *)ctsio->cdb; 10016 alloc_len = scsi_2btoul(cdb->length); 10017 10018 /* 10019 * We malloc the full inquiry data size here and fill it 10020 * in. If the user only asks for less, we'll give him 10021 * that much. 10022 */ 10023 data_len = offsetof(struct scsi_inquiry_data, vendor_specific1); 10024 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10025 inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr; 10026 ctsio->kern_sg_entries = 0; 10027 ctsio->kern_rel_offset = 0; 10028 ctsio->kern_data_len = min(data_len, alloc_len); 10029 ctsio->kern_total_len = ctsio->kern_data_len; 10030 10031 if (lun != NULL) { 10032 if ((lun->flags & CTL_LUN_PRIMARY_SC) || 10033 softc->ha_link >= CTL_HA_LINK_UNKNOWN) { 10034 inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10035 lun->be_lun->lun_type; 10036 } else { 10037 inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | 10038 lun->be_lun->lun_type; 10039 } 10040 if (lun->flags & CTL_LUN_REMOVABLE) 10041 inq_ptr->dev_qual2 |= SID_RMB; 10042 } else 10043 inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE; 10044 10045 /* RMB in byte 2 is 0 */ 10046 inq_ptr->version = SCSI_REV_SPC5; 10047 10048 /* 10049 * According to SAM-3, even if a device only supports a single 10050 * level of LUN addressing, it should still set the HISUP bit: 10051 * 10052 * 4.9.1 Logical unit numbers overview 10053 * 10054 * All logical unit number formats described in this standard are 10055 * hierarchical in structure even when only a single level in that 10056 * hierarchy is used. The HISUP bit shall be set to one in the 10057 * standard INQUIRY data (see SPC-2) when any logical unit number 10058 * format described in this standard is used. Non-hierarchical 10059 * formats are outside the scope of this standard. 10060 * 10061 * Therefore we set the HiSup bit here. 10062 * 10063 * The response format is 2, per SPC-3. 10064 */ 10065 inq_ptr->response_format = SID_HiSup | 2; 10066 10067 inq_ptr->additional_length = data_len - 10068 (offsetof(struct scsi_inquiry_data, additional_length) + 1); 10069 CTL_DEBUG_PRINT(("additional_length = %d\n", 10070 inq_ptr->additional_length)); 10071 10072 inq_ptr->spc3_flags = SPC3_SID_3PC | SPC3_SID_TPGS_IMPLICIT; 10073 if (port_type == CTL_PORT_SCSI) 10074 inq_ptr->spc2_flags = SPC2_SID_ADDR16; 10075 inq_ptr->spc2_flags |= SPC2_SID_MultiP; 10076 inq_ptr->flags = SID_CmdQue; 10077 if (port_type == CTL_PORT_SCSI) 10078 inq_ptr->flags |= SID_WBus16 | SID_Sync; 10079 10080 /* 10081 * Per SPC-3, unused bytes in ASCII strings are filled with spaces. 10082 * We have 8 bytes for the vendor name, and 16 bytes for the device 10083 * name and 4 bytes for the revision. 10084 */ 10085 if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options, 10086 "vendor")) == NULL) { 10087 strncpy(inq_ptr->vendor, CTL_VENDOR, sizeof(inq_ptr->vendor)); 10088 } else { 10089 memset(inq_ptr->vendor, ' ', sizeof(inq_ptr->vendor)); 10090 strncpy(inq_ptr->vendor, val, 10091 min(sizeof(inq_ptr->vendor), strlen(val))); 10092 } 10093 if (lun == NULL) { 10094 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10095 sizeof(inq_ptr->product)); 10096 } else if ((val = ctl_get_opt(&lun->be_lun->options, "product")) == NULL) { 10097 switch (lun->be_lun->lun_type) { 10098 case T_DIRECT: 10099 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10100 sizeof(inq_ptr->product)); 10101 break; 10102 case T_PROCESSOR: 10103 strncpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT, 10104 sizeof(inq_ptr->product)); 10105 break; 10106 case T_CDROM: 10107 strncpy(inq_ptr->product, CTL_CDROM_PRODUCT, 10108 sizeof(inq_ptr->product)); 10109 break; 10110 default: 10111 strncpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT, 10112 sizeof(inq_ptr->product)); 10113 break; 10114 } 10115 } else { 10116 memset(inq_ptr->product, ' ', sizeof(inq_ptr->product)); 10117 strncpy(inq_ptr->product, val, 10118 min(sizeof(inq_ptr->product), strlen(val))); 10119 } 10120 10121 /* 10122 * XXX make this a macro somewhere so it automatically gets 10123 * incremented when we make changes. 10124 */ 10125 if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options, 10126 "revision")) == NULL) { 10127 strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision)); 10128 } else { 10129 memset(inq_ptr->revision, ' ', sizeof(inq_ptr->revision)); 10130 strncpy(inq_ptr->revision, val, 10131 min(sizeof(inq_ptr->revision), strlen(val))); 10132 } 10133 10134 /* 10135 * For parallel SCSI, we support double transition and single 10136 * transition clocking. We also support QAS (Quick Arbitration 10137 * and Selection) and Information Unit transfers on both the 10138 * control and array devices. 10139 */ 10140 if (port_type == CTL_PORT_SCSI) 10141 inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS | 10142 SID_SPI_IUS; 10143 10144 /* SAM-6 (no version claimed) */ 10145 scsi_ulto2b(0x00C0, inq_ptr->version1); 10146 /* SPC-5 (no version claimed) */ 10147 scsi_ulto2b(0x05C0, inq_ptr->version2); 10148 if (port_type == CTL_PORT_FC) { 10149 /* FCP-2 ANSI INCITS.350:2003 */ 10150 scsi_ulto2b(0x0917, inq_ptr->version3); 10151 } else if (port_type == CTL_PORT_SCSI) { 10152 /* SPI-4 ANSI INCITS.362:200x */ 10153 scsi_ulto2b(0x0B56, inq_ptr->version3); 10154 } else if (port_type == CTL_PORT_ISCSI) { 10155 /* iSCSI (no version claimed) */ 10156 scsi_ulto2b(0x0960, inq_ptr->version3); 10157 } else if (port_type == CTL_PORT_SAS) { 10158 /* SAS (no version claimed) */ 10159 scsi_ulto2b(0x0BE0, inq_ptr->version3); 10160 } else if (port_type == CTL_PORT_UMASS) { 10161 /* USB Mass Storage Class Bulk-Only Transport, Revision 1.0 */ 10162 scsi_ulto2b(0x1730, inq_ptr->version3); 10163 } 10164 10165 if (lun == NULL) { 10166 /* SBC-4 (no version claimed) */ 10167 scsi_ulto2b(0x0600, inq_ptr->version4); 10168 } else { 10169 switch (lun->be_lun->lun_type) { 10170 case T_DIRECT: 10171 /* SBC-4 (no version claimed) */ 10172 scsi_ulto2b(0x0600, inq_ptr->version4); 10173 break; 10174 case T_PROCESSOR: 10175 break; 10176 case T_CDROM: 10177 /* MMC-6 (no version claimed) */ 10178 scsi_ulto2b(0x04E0, inq_ptr->version4); 10179 break; 10180 default: 10181 break; 10182 } 10183 } 10184 10185 ctl_set_success(ctsio); 10186 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10187 ctsio->be_move_done = ctl_config_move_done; 10188 ctl_datamove((union ctl_io *)ctsio); 10189 return (CTL_RETVAL_COMPLETE); 10190 } 10191 10192 int 10193 ctl_inquiry(struct ctl_scsiio *ctsio) 10194 { 10195 struct scsi_inquiry *cdb; 10196 int retval; 10197 10198 CTL_DEBUG_PRINT(("ctl_inquiry\n")); 10199 10200 cdb = (struct scsi_inquiry *)ctsio->cdb; 10201 if (cdb->byte2 & SI_EVPD) 10202 retval = ctl_inquiry_evpd(ctsio); 10203 else if (cdb->page_code == 0) 10204 retval = ctl_inquiry_std(ctsio); 10205 else { 10206 ctl_set_invalid_field(ctsio, 10207 /*sks_valid*/ 1, 10208 /*command*/ 1, 10209 /*field*/ 2, 10210 /*bit_valid*/ 0, 10211 /*bit*/ 0); 10212 ctl_done((union ctl_io *)ctsio); 10213 return (CTL_RETVAL_COMPLETE); 10214 } 10215 10216 return (retval); 10217 } 10218 10219 int 10220 ctl_get_config(struct ctl_scsiio *ctsio) 10221 { 10222 struct ctl_lun *lun = CTL_LUN(ctsio); 10223 struct scsi_get_config_header *hdr; 10224 struct scsi_get_config_feature *feature; 10225 struct scsi_get_config *cdb; 10226 uint32_t alloc_len, data_len; 10227 int rt, starting; 10228 10229 cdb = (struct scsi_get_config *)ctsio->cdb; 10230 rt = (cdb->rt & SGC_RT_MASK); 10231 starting = scsi_2btoul(cdb->starting_feature); 10232 alloc_len = scsi_2btoul(cdb->length); 10233 10234 data_len = sizeof(struct scsi_get_config_header) + 10235 sizeof(struct scsi_get_config_feature) + 8 + 10236 sizeof(struct scsi_get_config_feature) + 8 + 10237 sizeof(struct scsi_get_config_feature) + 4 + 10238 sizeof(struct scsi_get_config_feature) + 4 + 10239 sizeof(struct scsi_get_config_feature) + 8 + 10240 sizeof(struct scsi_get_config_feature) + 10241 sizeof(struct scsi_get_config_feature) + 4 + 10242 sizeof(struct scsi_get_config_feature) + 4 + 10243 sizeof(struct scsi_get_config_feature) + 4 + 10244 sizeof(struct scsi_get_config_feature) + 4 + 10245 sizeof(struct scsi_get_config_feature) + 4 + 10246 sizeof(struct scsi_get_config_feature) + 4; 10247 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10248 ctsio->kern_sg_entries = 0; 10249 ctsio->kern_rel_offset = 0; 10250 10251 hdr = (struct scsi_get_config_header *)ctsio->kern_data_ptr; 10252 if (lun->flags & CTL_LUN_NO_MEDIA) 10253 scsi_ulto2b(0x0000, hdr->current_profile); 10254 else 10255 scsi_ulto2b(0x0010, hdr->current_profile); 10256 feature = (struct scsi_get_config_feature *)(hdr + 1); 10257 10258 if (starting > 0x003b) 10259 goto done; 10260 if (starting > 0x003a) 10261 goto f3b; 10262 if (starting > 0x002b) 10263 goto f3a; 10264 if (starting > 0x002a) 10265 goto f2b; 10266 if (starting > 0x001f) 10267 goto f2a; 10268 if (starting > 0x001e) 10269 goto f1f; 10270 if (starting > 0x001d) 10271 goto f1e; 10272 if (starting > 0x0010) 10273 goto f1d; 10274 if (starting > 0x0003) 10275 goto f10; 10276 if (starting > 0x0002) 10277 goto f3; 10278 if (starting > 0x0001) 10279 goto f2; 10280 if (starting > 0x0000) 10281 goto f1; 10282 10283 /* Profile List */ 10284 scsi_ulto2b(0x0000, feature->feature_code); 10285 feature->flags = SGC_F_PERSISTENT | SGC_F_CURRENT; 10286 feature->add_length = 8; 10287 scsi_ulto2b(0x0008, &feature->feature_data[0]); /* CD-ROM */ 10288 feature->feature_data[2] = 0x00; 10289 scsi_ulto2b(0x0010, &feature->feature_data[4]); /* DVD-ROM */ 10290 feature->feature_data[6] = 0x01; 10291 feature = (struct scsi_get_config_feature *) 10292 &feature->feature_data[feature->add_length]; 10293 10294 f1: /* Core */ 10295 scsi_ulto2b(0x0001, feature->feature_code); 10296 feature->flags = 0x08 | SGC_F_PERSISTENT | SGC_F_CURRENT; 10297 feature->add_length = 8; 10298 scsi_ulto4b(0x00000000, &feature->feature_data[0]); 10299 feature->feature_data[4] = 0x03; 10300 feature = (struct scsi_get_config_feature *) 10301 &feature->feature_data[feature->add_length]; 10302 10303 f2: /* Morphing */ 10304 scsi_ulto2b(0x0002, feature->feature_code); 10305 feature->flags = 0x04 | SGC_F_PERSISTENT | SGC_F_CURRENT; 10306 feature->add_length = 4; 10307 feature->feature_data[0] = 0x02; 10308 feature = (struct scsi_get_config_feature *) 10309 &feature->feature_data[feature->add_length]; 10310 10311 f3: /* Removable Medium */ 10312 scsi_ulto2b(0x0003, feature->feature_code); 10313 feature->flags = 0x04 | SGC_F_PERSISTENT | SGC_F_CURRENT; 10314 feature->add_length = 4; 10315 feature->feature_data[0] = 0x39; 10316 feature = (struct scsi_get_config_feature *) 10317 &feature->feature_data[feature->add_length]; 10318 10319 if (rt == SGC_RT_CURRENT && (lun->flags & CTL_LUN_NO_MEDIA)) 10320 goto done; 10321 10322 f10: /* Random Read */ 10323 scsi_ulto2b(0x0010, feature->feature_code); 10324 feature->flags = 0x00; 10325 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10326 feature->flags |= SGC_F_CURRENT; 10327 feature->add_length = 8; 10328 scsi_ulto4b(lun->be_lun->blocksize, &feature->feature_data[0]); 10329 scsi_ulto2b(1, &feature->feature_data[4]); 10330 feature->feature_data[6] = 0x00; 10331 feature = (struct scsi_get_config_feature *) 10332 &feature->feature_data[feature->add_length]; 10333 10334 f1d: /* Multi-Read */ 10335 scsi_ulto2b(0x001D, feature->feature_code); 10336 feature->flags = 0x00; 10337 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10338 feature->flags |= SGC_F_CURRENT; 10339 feature->add_length = 0; 10340 feature = (struct scsi_get_config_feature *) 10341 &feature->feature_data[feature->add_length]; 10342 10343 f1e: /* CD Read */ 10344 scsi_ulto2b(0x001E, feature->feature_code); 10345 feature->flags = 0x00; 10346 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10347 feature->flags |= SGC_F_CURRENT; 10348 feature->add_length = 4; 10349 feature->feature_data[0] = 0x00; 10350 feature = (struct scsi_get_config_feature *) 10351 &feature->feature_data[feature->add_length]; 10352 10353 f1f: /* DVD Read */ 10354 scsi_ulto2b(0x001F, feature->feature_code); 10355 feature->flags = 0x08; 10356 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10357 feature->flags |= SGC_F_CURRENT; 10358 feature->add_length = 4; 10359 feature->feature_data[0] = 0x01; 10360 feature->feature_data[2] = 0x03; 10361 feature = (struct scsi_get_config_feature *) 10362 &feature->feature_data[feature->add_length]; 10363 10364 f2a: /* DVD+RW */ 10365 scsi_ulto2b(0x002A, feature->feature_code); 10366 feature->flags = 0x04; 10367 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10368 feature->flags |= SGC_F_CURRENT; 10369 feature->add_length = 4; 10370 feature->feature_data[0] = 0x00; 10371 feature->feature_data[1] = 0x00; 10372 feature = (struct scsi_get_config_feature *) 10373 &feature->feature_data[feature->add_length]; 10374 10375 f2b: /* DVD+R */ 10376 scsi_ulto2b(0x002B, feature->feature_code); 10377 feature->flags = 0x00; 10378 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10379 feature->flags |= SGC_F_CURRENT; 10380 feature->add_length = 4; 10381 feature->feature_data[0] = 0x00; 10382 feature = (struct scsi_get_config_feature *) 10383 &feature->feature_data[feature->add_length]; 10384 10385 f3a: /* DVD+RW Dual Layer */ 10386 scsi_ulto2b(0x003A, feature->feature_code); 10387 feature->flags = 0x00; 10388 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10389 feature->flags |= SGC_F_CURRENT; 10390 feature->add_length = 4; 10391 feature->feature_data[0] = 0x00; 10392 feature->feature_data[1] = 0x00; 10393 feature = (struct scsi_get_config_feature *) 10394 &feature->feature_data[feature->add_length]; 10395 10396 f3b: /* DVD+R Dual Layer */ 10397 scsi_ulto2b(0x003B, feature->feature_code); 10398 feature->flags = 0x00; 10399 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10400 feature->flags |= SGC_F_CURRENT; 10401 feature->add_length = 4; 10402 feature->feature_data[0] = 0x00; 10403 feature = (struct scsi_get_config_feature *) 10404 &feature->feature_data[feature->add_length]; 10405 10406 done: 10407 data_len = (uint8_t *)feature - (uint8_t *)hdr; 10408 if (rt == SGC_RT_SPECIFIC && data_len > 4) { 10409 feature = (struct scsi_get_config_feature *)(hdr + 1); 10410 if (scsi_2btoul(feature->feature_code) == starting) 10411 feature = (struct scsi_get_config_feature *) 10412 &feature->feature_data[feature->add_length]; 10413 data_len = (uint8_t *)feature - (uint8_t *)hdr; 10414 } 10415 scsi_ulto4b(data_len - 4, hdr->data_length); 10416 ctsio->kern_data_len = min(data_len, alloc_len); 10417 ctsio->kern_total_len = ctsio->kern_data_len; 10418 10419 ctl_set_success(ctsio); 10420 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10421 ctsio->be_move_done = ctl_config_move_done; 10422 ctl_datamove((union ctl_io *)ctsio); 10423 return (CTL_RETVAL_COMPLETE); 10424 } 10425 10426 int 10427 ctl_get_event_status(struct ctl_scsiio *ctsio) 10428 { 10429 struct scsi_get_event_status_header *hdr; 10430 struct scsi_get_event_status *cdb; 10431 uint32_t alloc_len, data_len; 10432 int notif_class; 10433 10434 cdb = (struct scsi_get_event_status *)ctsio->cdb; 10435 if ((cdb->byte2 & SGESN_POLLED) == 0) { 10436 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, 10437 /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); 10438 ctl_done((union ctl_io *)ctsio); 10439 return (CTL_RETVAL_COMPLETE); 10440 } 10441 notif_class = cdb->notif_class; 10442 alloc_len = scsi_2btoul(cdb->length); 10443 10444 data_len = sizeof(struct scsi_get_event_status_header); 10445 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10446 ctsio->kern_sg_entries = 0; 10447 ctsio->kern_rel_offset = 0; 10448 ctsio->kern_data_len = min(data_len, alloc_len); 10449 ctsio->kern_total_len = ctsio->kern_data_len; 10450 10451 hdr = (struct scsi_get_event_status_header *)ctsio->kern_data_ptr; 10452 scsi_ulto2b(0, hdr->descr_length); 10453 hdr->nea_class = SGESN_NEA; 10454 hdr->supported_class = 0; 10455 10456 ctl_set_success(ctsio); 10457 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10458 ctsio->be_move_done = ctl_config_move_done; 10459 ctl_datamove((union ctl_io *)ctsio); 10460 return (CTL_RETVAL_COMPLETE); 10461 } 10462 10463 int 10464 ctl_mechanism_status(struct ctl_scsiio *ctsio) 10465 { 10466 struct scsi_mechanism_status_header *hdr; 10467 struct scsi_mechanism_status *cdb; 10468 uint32_t alloc_len, data_len; 10469 10470 cdb = (struct scsi_mechanism_status *)ctsio->cdb; 10471 alloc_len = scsi_2btoul(cdb->length); 10472 10473 data_len = sizeof(struct scsi_mechanism_status_header); 10474 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10475 ctsio->kern_sg_entries = 0; 10476 ctsio->kern_rel_offset = 0; 10477 ctsio->kern_data_len = min(data_len, alloc_len); 10478 ctsio->kern_total_len = ctsio->kern_data_len; 10479 10480 hdr = (struct scsi_mechanism_status_header *)ctsio->kern_data_ptr; 10481 hdr->state1 = 0x00; 10482 hdr->state2 = 0xe0; 10483 scsi_ulto3b(0, hdr->lba); 10484 hdr->slots_num = 0; 10485 scsi_ulto2b(0, hdr->slots_length); 10486 10487 ctl_set_success(ctsio); 10488 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10489 ctsio->be_move_done = ctl_config_move_done; 10490 ctl_datamove((union ctl_io *)ctsio); 10491 return (CTL_RETVAL_COMPLETE); 10492 } 10493 10494 static void 10495 ctl_ultomsf(uint32_t lba, uint8_t *buf) 10496 { 10497 10498 lba += 150; 10499 buf[0] = 0; 10500 buf[1] = bin2bcd((lba / 75) / 60); 10501 buf[2] = bin2bcd((lba / 75) % 60); 10502 buf[3] = bin2bcd(lba % 75); 10503 } 10504 10505 int 10506 ctl_read_toc(struct ctl_scsiio *ctsio) 10507 { 10508 struct ctl_lun *lun = CTL_LUN(ctsio); 10509 struct scsi_read_toc_hdr *hdr; 10510 struct scsi_read_toc_type01_descr *descr; 10511 struct scsi_read_toc *cdb; 10512 uint32_t alloc_len, data_len; 10513 int format, msf; 10514 10515 cdb = (struct scsi_read_toc *)ctsio->cdb; 10516 msf = (cdb->byte2 & CD_MSF) != 0; 10517 format = cdb->format; 10518 alloc_len = scsi_2btoul(cdb->data_len); 10519 10520 data_len = sizeof(struct scsi_read_toc_hdr); 10521 if (format == 0) 10522 data_len += 2 * sizeof(struct scsi_read_toc_type01_descr); 10523 else 10524 data_len += sizeof(struct scsi_read_toc_type01_descr); 10525 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10526 ctsio->kern_sg_entries = 0; 10527 ctsio->kern_rel_offset = 0; 10528 ctsio->kern_data_len = min(data_len, alloc_len); 10529 ctsio->kern_total_len = ctsio->kern_data_len; 10530 10531 hdr = (struct scsi_read_toc_hdr *)ctsio->kern_data_ptr; 10532 if (format == 0) { 10533 scsi_ulto2b(0x12, hdr->data_length); 10534 hdr->first = 1; 10535 hdr->last = 1; 10536 descr = (struct scsi_read_toc_type01_descr *)(hdr + 1); 10537 descr->addr_ctl = 0x14; 10538 descr->track_number = 1; 10539 if (msf) 10540 ctl_ultomsf(0, descr->track_start); 10541 else 10542 scsi_ulto4b(0, descr->track_start); 10543 descr++; 10544 descr->addr_ctl = 0x14; 10545 descr->track_number = 0xaa; 10546 if (msf) 10547 ctl_ultomsf(lun->be_lun->maxlba+1, descr->track_start); 10548 else 10549 scsi_ulto4b(lun->be_lun->maxlba+1, descr->track_start); 10550 } else { 10551 scsi_ulto2b(0x0a, hdr->data_length); 10552 hdr->first = 1; 10553 hdr->last = 1; 10554 descr = (struct scsi_read_toc_type01_descr *)(hdr + 1); 10555 descr->addr_ctl = 0x14; 10556 descr->track_number = 1; 10557 if (msf) 10558 ctl_ultomsf(0, descr->track_start); 10559 else 10560 scsi_ulto4b(0, descr->track_start); 10561 } 10562 10563 ctl_set_success(ctsio); 10564 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10565 ctsio->be_move_done = ctl_config_move_done; 10566 ctl_datamove((union ctl_io *)ctsio); 10567 return (CTL_RETVAL_COMPLETE); 10568 } 10569 10570 /* 10571 * For known CDB types, parse the LBA and length. 10572 */ 10573 static int 10574 ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len) 10575 { 10576 if (io->io_hdr.io_type != CTL_IO_SCSI) 10577 return (1); 10578 10579 switch (io->scsiio.cdb[0]) { 10580 case COMPARE_AND_WRITE: { 10581 struct scsi_compare_and_write *cdb; 10582 10583 cdb = (struct scsi_compare_and_write *)io->scsiio.cdb; 10584 10585 *lba = scsi_8btou64(cdb->addr); 10586 *len = cdb->length; 10587 break; 10588 } 10589 case READ_6: 10590 case WRITE_6: { 10591 struct scsi_rw_6 *cdb; 10592 10593 cdb = (struct scsi_rw_6 *)io->scsiio.cdb; 10594 10595 *lba = scsi_3btoul(cdb->addr); 10596 /* only 5 bits are valid in the most significant address byte */ 10597 *lba &= 0x1fffff; 10598 *len = cdb->length; 10599 break; 10600 } 10601 case READ_10: 10602 case WRITE_10: { 10603 struct scsi_rw_10 *cdb; 10604 10605 cdb = (struct scsi_rw_10 *)io->scsiio.cdb; 10606 10607 *lba = scsi_4btoul(cdb->addr); 10608 *len = scsi_2btoul(cdb->length); 10609 break; 10610 } 10611 case WRITE_VERIFY_10: { 10612 struct scsi_write_verify_10 *cdb; 10613 10614 cdb = (struct scsi_write_verify_10 *)io->scsiio.cdb; 10615 10616 *lba = scsi_4btoul(cdb->addr); 10617 *len = scsi_2btoul(cdb->length); 10618 break; 10619 } 10620 case READ_12: 10621 case WRITE_12: { 10622 struct scsi_rw_12 *cdb; 10623 10624 cdb = (struct scsi_rw_12 *)io->scsiio.cdb; 10625 10626 *lba = scsi_4btoul(cdb->addr); 10627 *len = scsi_4btoul(cdb->length); 10628 break; 10629 } 10630 case WRITE_VERIFY_12: { 10631 struct scsi_write_verify_12 *cdb; 10632 10633 cdb = (struct scsi_write_verify_12 *)io->scsiio.cdb; 10634 10635 *lba = scsi_4btoul(cdb->addr); 10636 *len = scsi_4btoul(cdb->length); 10637 break; 10638 } 10639 case READ_16: 10640 case WRITE_16: { 10641 struct scsi_rw_16 *cdb; 10642 10643 cdb = (struct scsi_rw_16 *)io->scsiio.cdb; 10644 10645 *lba = scsi_8btou64(cdb->addr); 10646 *len = scsi_4btoul(cdb->length); 10647 break; 10648 } 10649 case WRITE_ATOMIC_16: { 10650 struct scsi_write_atomic_16 *cdb; 10651 10652 cdb = (struct scsi_write_atomic_16 *)io->scsiio.cdb; 10653 10654 *lba = scsi_8btou64(cdb->addr); 10655 *len = scsi_2btoul(cdb->length); 10656 break; 10657 } 10658 case WRITE_VERIFY_16: { 10659 struct scsi_write_verify_16 *cdb; 10660 10661 cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb; 10662 10663 *lba = scsi_8btou64(cdb->addr); 10664 *len = scsi_4btoul(cdb->length); 10665 break; 10666 } 10667 case WRITE_SAME_10: { 10668 struct scsi_write_same_10 *cdb; 10669 10670 cdb = (struct scsi_write_same_10 *)io->scsiio.cdb; 10671 10672 *lba = scsi_4btoul(cdb->addr); 10673 *len = scsi_2btoul(cdb->length); 10674 break; 10675 } 10676 case WRITE_SAME_16: { 10677 struct scsi_write_same_16 *cdb; 10678 10679 cdb = (struct scsi_write_same_16 *)io->scsiio.cdb; 10680 10681 *lba = scsi_8btou64(cdb->addr); 10682 *len = scsi_4btoul(cdb->length); 10683 break; 10684 } 10685 case VERIFY_10: { 10686 struct scsi_verify_10 *cdb; 10687 10688 cdb = (struct scsi_verify_10 *)io->scsiio.cdb; 10689 10690 *lba = scsi_4btoul(cdb->addr); 10691 *len = scsi_2btoul(cdb->length); 10692 break; 10693 } 10694 case VERIFY_12: { 10695 struct scsi_verify_12 *cdb; 10696 10697 cdb = (struct scsi_verify_12 *)io->scsiio.cdb; 10698 10699 *lba = scsi_4btoul(cdb->addr); 10700 *len = scsi_4btoul(cdb->length); 10701 break; 10702 } 10703 case VERIFY_16: { 10704 struct scsi_verify_16 *cdb; 10705 10706 cdb = (struct scsi_verify_16 *)io->scsiio.cdb; 10707 10708 *lba = scsi_8btou64(cdb->addr); 10709 *len = scsi_4btoul(cdb->length); 10710 break; 10711 } 10712 case UNMAP: { 10713 *lba = 0; 10714 *len = UINT64_MAX; 10715 break; 10716 } 10717 case SERVICE_ACTION_IN: { /* GET LBA STATUS */ 10718 struct scsi_get_lba_status *cdb; 10719 10720 cdb = (struct scsi_get_lba_status *)io->scsiio.cdb; 10721 *lba = scsi_8btou64(cdb->addr); 10722 *len = UINT32_MAX; 10723 break; 10724 } 10725 default: 10726 return (1); 10727 break; /* NOTREACHED */ 10728 } 10729 10730 return (0); 10731 } 10732 10733 static ctl_action 10734 ctl_extent_check_lba(uint64_t lba1, uint64_t len1, uint64_t lba2, uint64_t len2, 10735 bool seq) 10736 { 10737 uint64_t endlba1, endlba2; 10738 10739 endlba1 = lba1 + len1 - (seq ? 0 : 1); 10740 endlba2 = lba2 + len2 - 1; 10741 10742 if ((endlba1 < lba2) || (endlba2 < lba1)) 10743 return (CTL_ACTION_PASS); 10744 else 10745 return (CTL_ACTION_BLOCK); 10746 } 10747 10748 static int 10749 ctl_extent_check_unmap(union ctl_io *io, uint64_t lba2, uint64_t len2) 10750 { 10751 struct ctl_ptr_len_flags *ptrlen; 10752 struct scsi_unmap_desc *buf, *end, *range; 10753 uint64_t lba; 10754 uint32_t len; 10755 10756 /* If not UNMAP -- go other way. */ 10757 if (io->io_hdr.io_type != CTL_IO_SCSI || 10758 io->scsiio.cdb[0] != UNMAP) 10759 return (CTL_ACTION_ERROR); 10760 10761 /* If UNMAP without data -- block and wait for data. */ 10762 ptrlen = (struct ctl_ptr_len_flags *) 10763 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 10764 if ((io->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0 || 10765 ptrlen->ptr == NULL) 10766 return (CTL_ACTION_BLOCK); 10767 10768 /* UNMAP with data -- check for collision. */ 10769 buf = (struct scsi_unmap_desc *)ptrlen->ptr; 10770 end = buf + ptrlen->len / sizeof(*buf); 10771 for (range = buf; range < end; range++) { 10772 lba = scsi_8btou64(range->lba); 10773 len = scsi_4btoul(range->length); 10774 if ((lba < lba2 + len2) && (lba + len > lba2)) 10775 return (CTL_ACTION_BLOCK); 10776 } 10777 return (CTL_ACTION_PASS); 10778 } 10779 10780 static ctl_action 10781 ctl_extent_check(union ctl_io *io1, union ctl_io *io2, bool seq) 10782 { 10783 uint64_t lba1, lba2; 10784 uint64_t len1, len2; 10785 int retval; 10786 10787 if (ctl_get_lba_len(io2, &lba2, &len2) != 0) 10788 return (CTL_ACTION_ERROR); 10789 10790 retval = ctl_extent_check_unmap(io1, lba2, len2); 10791 if (retval != CTL_ACTION_ERROR) 10792 return (retval); 10793 10794 if (ctl_get_lba_len(io1, &lba1, &len1) != 0) 10795 return (CTL_ACTION_ERROR); 10796 10797 if (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE) 10798 seq = FALSE; 10799 return (ctl_extent_check_lba(lba1, len1, lba2, len2, seq)); 10800 } 10801 10802 static ctl_action 10803 ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2) 10804 { 10805 uint64_t lba1, lba2; 10806 uint64_t len1, len2; 10807 10808 if (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE) 10809 return (CTL_ACTION_PASS); 10810 if (ctl_get_lba_len(io1, &lba1, &len1) != 0) 10811 return (CTL_ACTION_ERROR); 10812 if (ctl_get_lba_len(io2, &lba2, &len2) != 0) 10813 return (CTL_ACTION_ERROR); 10814 10815 if (lba1 + len1 == lba2) 10816 return (CTL_ACTION_BLOCK); 10817 return (CTL_ACTION_PASS); 10818 } 10819 10820 static ctl_action 10821 ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io, 10822 union ctl_io *ooa_io) 10823 { 10824 const struct ctl_cmd_entry *pending_entry, *ooa_entry; 10825 const ctl_serialize_action *serialize_row; 10826 10827 /* 10828 * The initiator attempted multiple untagged commands at the same 10829 * time. Can't do that. 10830 */ 10831 if ((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10832 && (ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10833 && ((pending_io->io_hdr.nexus.targ_port == 10834 ooa_io->io_hdr.nexus.targ_port) 10835 && (pending_io->io_hdr.nexus.initid == 10836 ooa_io->io_hdr.nexus.initid)) 10837 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | 10838 CTL_FLAG_STATUS_SENT)) == 0)) 10839 return (CTL_ACTION_OVERLAP); 10840 10841 /* 10842 * The initiator attempted to send multiple tagged commands with 10843 * the same ID. (It's fine if different initiators have the same 10844 * tag ID.) 10845 * 10846 * Even if all of those conditions are true, we don't kill the I/O 10847 * if the command ahead of us has been aborted. We won't end up 10848 * sending it to the FETD, and it's perfectly legal to resend a 10849 * command with the same tag number as long as the previous 10850 * instance of this tag number has been aborted somehow. 10851 */ 10852 if ((pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 10853 && (ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 10854 && (pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num) 10855 && ((pending_io->io_hdr.nexus.targ_port == 10856 ooa_io->io_hdr.nexus.targ_port) 10857 && (pending_io->io_hdr.nexus.initid == 10858 ooa_io->io_hdr.nexus.initid)) 10859 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | 10860 CTL_FLAG_STATUS_SENT)) == 0)) 10861 return (CTL_ACTION_OVERLAP_TAG); 10862 10863 /* 10864 * If we get a head of queue tag, SAM-3 says that we should 10865 * immediately execute it. 10866 * 10867 * What happens if this command would normally block for some other 10868 * reason? e.g. a request sense with a head of queue tag 10869 * immediately after a write. Normally that would block, but this 10870 * will result in its getting executed immediately... 10871 * 10872 * We currently return "pass" instead of "skip", so we'll end up 10873 * going through the rest of the queue to check for overlapped tags. 10874 * 10875 * XXX KDM check for other types of blockage first?? 10876 */ 10877 if (pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 10878 return (CTL_ACTION_PASS); 10879 10880 /* 10881 * Ordered tags have to block until all items ahead of them 10882 * have completed. If we get called with an ordered tag, we always 10883 * block, if something else is ahead of us in the queue. 10884 */ 10885 if (pending_io->scsiio.tag_type == CTL_TAG_ORDERED) 10886 return (CTL_ACTION_BLOCK); 10887 10888 /* 10889 * Simple tags get blocked until all head of queue and ordered tags 10890 * ahead of them have completed. I'm lumping untagged commands in 10891 * with simple tags here. XXX KDM is that the right thing to do? 10892 */ 10893 if (((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10894 || (pending_io->scsiio.tag_type == CTL_TAG_SIMPLE)) 10895 && ((ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 10896 || (ooa_io->scsiio.tag_type == CTL_TAG_ORDERED))) 10897 return (CTL_ACTION_BLOCK); 10898 10899 pending_entry = ctl_get_cmd_entry(&pending_io->scsiio, NULL); 10900 KASSERT(pending_entry->seridx < CTL_SERIDX_COUNT, 10901 ("%s: Invalid seridx %d for pending CDB %02x %02x @ %p", 10902 __func__, pending_entry->seridx, pending_io->scsiio.cdb[0], 10903 pending_io->scsiio.cdb[1], pending_io)); 10904 ooa_entry = ctl_get_cmd_entry(&ooa_io->scsiio, NULL); 10905 if (ooa_entry->seridx == CTL_SERIDX_INVLD) 10906 return (CTL_ACTION_PASS); /* Unsupported command in OOA queue */ 10907 KASSERT(ooa_entry->seridx < CTL_SERIDX_COUNT, 10908 ("%s: Invalid seridx %d for ooa CDB %02x %02x @ %p", 10909 __func__, ooa_entry->seridx, ooa_io->scsiio.cdb[0], 10910 ooa_io->scsiio.cdb[1], ooa_io)); 10911 10912 serialize_row = ctl_serialize_table[ooa_entry->seridx]; 10913 10914 switch (serialize_row[pending_entry->seridx]) { 10915 case CTL_SER_BLOCK: 10916 return (CTL_ACTION_BLOCK); 10917 case CTL_SER_EXTENT: 10918 return (ctl_extent_check(ooa_io, pending_io, 10919 (lun->be_lun && lun->be_lun->serseq == CTL_LUN_SERSEQ_ON))); 10920 case CTL_SER_EXTENTOPT: 10921 if ((lun->MODE_CTRL.queue_flags & SCP_QUEUE_ALG_MASK) != 10922 SCP_QUEUE_ALG_UNRESTRICTED) 10923 return (ctl_extent_check(ooa_io, pending_io, 10924 (lun->be_lun && 10925 lun->be_lun->serseq == CTL_LUN_SERSEQ_ON))); 10926 return (CTL_ACTION_PASS); 10927 case CTL_SER_EXTENTSEQ: 10928 if (lun->be_lun && lun->be_lun->serseq != CTL_LUN_SERSEQ_OFF) 10929 return (ctl_extent_check_seq(ooa_io, pending_io)); 10930 return (CTL_ACTION_PASS); 10931 case CTL_SER_PASS: 10932 return (CTL_ACTION_PASS); 10933 case CTL_SER_BLOCKOPT: 10934 if ((lun->MODE_CTRL.queue_flags & SCP_QUEUE_ALG_MASK) != 10935 SCP_QUEUE_ALG_UNRESTRICTED) 10936 return (CTL_ACTION_BLOCK); 10937 return (CTL_ACTION_PASS); 10938 case CTL_SER_SKIP: 10939 return (CTL_ACTION_SKIP); 10940 default: 10941 panic("%s: Invalid serialization value %d for %d => %d", 10942 __func__, serialize_row[pending_entry->seridx], 10943 pending_entry->seridx, ooa_entry->seridx); 10944 } 10945 10946 return (CTL_ACTION_ERROR); 10947 } 10948 10949 /* 10950 * Check for blockage or overlaps against the OOA (Order Of Arrival) queue. 10951 * Assumptions: 10952 * - pending_io is generally either incoming, or on the blocked queue 10953 * - starting I/O is the I/O we want to start the check with. 10954 */ 10955 static ctl_action 10956 ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 10957 union ctl_io *starting_io) 10958 { 10959 union ctl_io *ooa_io; 10960 ctl_action action; 10961 10962 mtx_assert(&lun->lun_lock, MA_OWNED); 10963 10964 /* 10965 * Run back along the OOA queue, starting with the current 10966 * blocked I/O and going through every I/O before it on the 10967 * queue. If starting_io is NULL, we'll just end up returning 10968 * CTL_ACTION_PASS. 10969 */ 10970 for (ooa_io = starting_io; ooa_io != NULL; 10971 ooa_io = (union ctl_io *)TAILQ_PREV(&ooa_io->io_hdr, ctl_ooaq, 10972 ooa_links)){ 10973 10974 /* 10975 * This routine just checks to see whether 10976 * cur_blocked is blocked by ooa_io, which is ahead 10977 * of it in the queue. It doesn't queue/dequeue 10978 * cur_blocked. 10979 */ 10980 action = ctl_check_for_blockage(lun, pending_io, ooa_io); 10981 switch (action) { 10982 case CTL_ACTION_BLOCK: 10983 case CTL_ACTION_OVERLAP: 10984 case CTL_ACTION_OVERLAP_TAG: 10985 case CTL_ACTION_SKIP: 10986 case CTL_ACTION_ERROR: 10987 return (action); 10988 break; /* NOTREACHED */ 10989 case CTL_ACTION_PASS: 10990 break; 10991 default: 10992 panic("%s: Invalid action %d\n", __func__, action); 10993 } 10994 } 10995 10996 return (CTL_ACTION_PASS); 10997 } 10998 10999 /* 11000 * Assumptions: 11001 * - An I/O has just completed, and has been removed from the per-LUN OOA 11002 * queue, so some items on the blocked queue may now be unblocked. 11003 */ 11004 static int 11005 ctl_check_blocked(struct ctl_lun *lun) 11006 { 11007 struct ctl_softc *softc = lun->ctl_softc; 11008 union ctl_io *cur_blocked, *next_blocked; 11009 11010 mtx_assert(&lun->lun_lock, MA_OWNED); 11011 11012 /* 11013 * Run forward from the head of the blocked queue, checking each 11014 * entry against the I/Os prior to it on the OOA queue to see if 11015 * there is still any blockage. 11016 * 11017 * We cannot use the TAILQ_FOREACH() macro, because it can't deal 11018 * with our removing a variable on it while it is traversing the 11019 * list. 11020 */ 11021 for (cur_blocked = (union ctl_io *)TAILQ_FIRST(&lun->blocked_queue); 11022 cur_blocked != NULL; cur_blocked = next_blocked) { 11023 union ctl_io *prev_ooa; 11024 ctl_action action; 11025 11026 next_blocked = (union ctl_io *)TAILQ_NEXT(&cur_blocked->io_hdr, 11027 blocked_links); 11028 11029 prev_ooa = (union ctl_io *)TAILQ_PREV(&cur_blocked->io_hdr, 11030 ctl_ooaq, ooa_links); 11031 11032 /* 11033 * If cur_blocked happens to be the first item in the OOA 11034 * queue now, prev_ooa will be NULL, and the action 11035 * returned will just be CTL_ACTION_PASS. 11036 */ 11037 action = ctl_check_ooa(lun, cur_blocked, prev_ooa); 11038 11039 switch (action) { 11040 case CTL_ACTION_BLOCK: 11041 /* Nothing to do here, still blocked */ 11042 break; 11043 case CTL_ACTION_OVERLAP: 11044 case CTL_ACTION_OVERLAP_TAG: 11045 /* 11046 * This shouldn't happen! In theory we've already 11047 * checked this command for overlap... 11048 */ 11049 break; 11050 case CTL_ACTION_PASS: 11051 case CTL_ACTION_SKIP: { 11052 const struct ctl_cmd_entry *entry; 11053 11054 /* 11055 * The skip case shouldn't happen, this transaction 11056 * should have never made it onto the blocked queue. 11057 */ 11058 /* 11059 * This I/O is no longer blocked, we can remove it 11060 * from the blocked queue. Since this is a TAILQ 11061 * (doubly linked list), we can do O(1) removals 11062 * from any place on the list. 11063 */ 11064 TAILQ_REMOVE(&lun->blocked_queue, &cur_blocked->io_hdr, 11065 blocked_links); 11066 cur_blocked->io_hdr.flags &= ~CTL_FLAG_BLOCKED; 11067 11068 if ((softc->ha_mode != CTL_HA_MODE_XFER) && 11069 (cur_blocked->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)){ 11070 /* 11071 * Need to send IO back to original side to 11072 * run 11073 */ 11074 union ctl_ha_msg msg_info; 11075 11076 cur_blocked->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 11077 msg_info.hdr.original_sc = 11078 cur_blocked->io_hdr.original_sc; 11079 msg_info.hdr.serializing_sc = cur_blocked; 11080 msg_info.hdr.msg_type = CTL_MSG_R2R; 11081 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11082 sizeof(msg_info.hdr), M_NOWAIT); 11083 break; 11084 } 11085 entry = ctl_get_cmd_entry(&cur_blocked->scsiio, NULL); 11086 11087 /* 11088 * Check this I/O for LUN state changes that may 11089 * have happened while this command was blocked. 11090 * The LUN state may have been changed by a command 11091 * ahead of us in the queue, so we need to re-check 11092 * for any states that can be caused by SCSI 11093 * commands. 11094 */ 11095 if (ctl_scsiio_lun_check(lun, entry, 11096 &cur_blocked->scsiio) == 0) { 11097 cur_blocked->io_hdr.flags |= 11098 CTL_FLAG_IS_WAS_ON_RTR; 11099 ctl_enqueue_rtr(cur_blocked); 11100 } else 11101 ctl_done(cur_blocked); 11102 break; 11103 } 11104 default: 11105 /* 11106 * This probably shouldn't happen -- we shouldn't 11107 * get CTL_ACTION_ERROR, or anything else. 11108 */ 11109 break; 11110 } 11111 } 11112 11113 return (CTL_RETVAL_COMPLETE); 11114 } 11115 11116 /* 11117 * This routine (with one exception) checks LUN flags that can be set by 11118 * commands ahead of us in the OOA queue. These flags have to be checked 11119 * when a command initially comes in, and when we pull a command off the 11120 * blocked queue and are preparing to execute it. The reason we have to 11121 * check these flags for commands on the blocked queue is that the LUN 11122 * state may have been changed by a command ahead of us while we're on the 11123 * blocked queue. 11124 * 11125 * Ordering is somewhat important with these checks, so please pay 11126 * careful attention to the placement of any new checks. 11127 */ 11128 static int 11129 ctl_scsiio_lun_check(struct ctl_lun *lun, 11130 const struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio) 11131 { 11132 struct ctl_softc *softc = lun->ctl_softc; 11133 int retval; 11134 uint32_t residx; 11135 11136 retval = 0; 11137 11138 mtx_assert(&lun->lun_lock, MA_OWNED); 11139 11140 /* 11141 * If this shelf is a secondary shelf controller, we may have to 11142 * reject some commands disallowed by HA mode and link state. 11143 */ 11144 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) { 11145 if (softc->ha_link == CTL_HA_LINK_OFFLINE && 11146 (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { 11147 ctl_set_lun_unavail(ctsio); 11148 retval = 1; 11149 goto bailout; 11150 } 11151 if ((lun->flags & CTL_LUN_PEER_SC_PRIMARY) == 0 && 11152 (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { 11153 ctl_set_lun_transit(ctsio); 11154 retval = 1; 11155 goto bailout; 11156 } 11157 if (softc->ha_mode == CTL_HA_MODE_ACT_STBY && 11158 (entry->flags & CTL_CMD_FLAG_OK_ON_STANDBY) == 0) { 11159 ctl_set_lun_standby(ctsio); 11160 retval = 1; 11161 goto bailout; 11162 } 11163 11164 /* The rest of checks are only done on executing side */ 11165 if (softc->ha_mode == CTL_HA_MODE_XFER) 11166 goto bailout; 11167 } 11168 11169 if (entry->pattern & CTL_LUN_PAT_WRITE) { 11170 if (lun->be_lun && 11171 lun->be_lun->flags & CTL_LUN_FLAG_READONLY) { 11172 ctl_set_hw_write_protected(ctsio); 11173 retval = 1; 11174 goto bailout; 11175 } 11176 if ((lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) { 11177 ctl_set_sense(ctsio, /*current_error*/ 1, 11178 /*sense_key*/ SSD_KEY_DATA_PROTECT, 11179 /*asc*/ 0x27, /*ascq*/ 0x02, SSD_ELEM_NONE); 11180 retval = 1; 11181 goto bailout; 11182 } 11183 } 11184 11185 /* 11186 * Check for a reservation conflict. If this command isn't allowed 11187 * even on reserved LUNs, and if this initiator isn't the one who 11188 * reserved us, reject the command with a reservation conflict. 11189 */ 11190 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 11191 if ((lun->flags & CTL_LUN_RESERVED) 11192 && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) { 11193 if (lun->res_idx != residx) { 11194 ctl_set_reservation_conflict(ctsio); 11195 retval = 1; 11196 goto bailout; 11197 } 11198 } 11199 11200 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0 || 11201 (entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV)) { 11202 /* No reservation or command is allowed. */; 11203 } else if ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_WRESV) && 11204 (lun->pr_res_type == SPR_TYPE_WR_EX || 11205 lun->pr_res_type == SPR_TYPE_WR_EX_RO || 11206 lun->pr_res_type == SPR_TYPE_WR_EX_AR)) { 11207 /* The command is allowed for Write Exclusive resv. */; 11208 } else { 11209 /* 11210 * if we aren't registered or it's a res holder type 11211 * reservation and this isn't the res holder then set a 11212 * conflict. 11213 */ 11214 if (ctl_get_prkey(lun, residx) == 0 || 11215 (residx != lun->pr_res_idx && lun->pr_res_type < 4)) { 11216 ctl_set_reservation_conflict(ctsio); 11217 retval = 1; 11218 goto bailout; 11219 } 11220 } 11221 11222 if ((entry->flags & CTL_CMD_FLAG_OK_ON_NO_MEDIA) == 0) { 11223 if (lun->flags & CTL_LUN_EJECTED) 11224 ctl_set_lun_ejected(ctsio); 11225 else if (lun->flags & CTL_LUN_NO_MEDIA) { 11226 if (lun->flags & CTL_LUN_REMOVABLE) 11227 ctl_set_lun_no_media(ctsio); 11228 else 11229 ctl_set_lun_int_reqd(ctsio); 11230 } else if (lun->flags & CTL_LUN_STOPPED) 11231 ctl_set_lun_stopped(ctsio); 11232 else 11233 goto bailout; 11234 retval = 1; 11235 goto bailout; 11236 } 11237 11238 bailout: 11239 return (retval); 11240 } 11241 11242 static void 11243 ctl_failover_io(union ctl_io *io, int have_lock) 11244 { 11245 ctl_set_busy(&io->scsiio); 11246 ctl_done(io); 11247 } 11248 11249 static void 11250 ctl_failover_lun(union ctl_io *rio) 11251 { 11252 struct ctl_softc *softc = CTL_SOFTC(rio); 11253 struct ctl_lun *lun; 11254 struct ctl_io_hdr *io, *next_io; 11255 uint32_t targ_lun; 11256 11257 targ_lun = rio->io_hdr.nexus.targ_mapped_lun; 11258 CTL_DEBUG_PRINT(("FAILOVER for lun %ju\n", targ_lun)); 11259 11260 /* Find and lock the LUN. */ 11261 mtx_lock(&softc->ctl_lock); 11262 if (targ_lun > ctl_max_luns || 11263 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11264 mtx_unlock(&softc->ctl_lock); 11265 return; 11266 } 11267 mtx_lock(&lun->lun_lock); 11268 mtx_unlock(&softc->ctl_lock); 11269 if (lun->flags & CTL_LUN_DISABLED) { 11270 mtx_unlock(&lun->lun_lock); 11271 return; 11272 } 11273 11274 if (softc->ha_mode == CTL_HA_MODE_XFER) { 11275 TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { 11276 /* We are master */ 11277 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 11278 if (io->flags & CTL_FLAG_IO_ACTIVE) { 11279 io->flags |= CTL_FLAG_ABORT; 11280 io->flags |= CTL_FLAG_FAILOVER; 11281 } else { /* This can be only due to DATAMOVE */ 11282 io->msg_type = CTL_MSG_DATAMOVE_DONE; 11283 io->flags &= ~CTL_FLAG_DMA_INPROG; 11284 io->flags |= CTL_FLAG_IO_ACTIVE; 11285 io->port_status = 31340; 11286 ctl_enqueue_isc((union ctl_io *)io); 11287 } 11288 } 11289 /* We are slave */ 11290 if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { 11291 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; 11292 if (io->flags & CTL_FLAG_IO_ACTIVE) { 11293 io->flags |= CTL_FLAG_FAILOVER; 11294 } else { 11295 ctl_set_busy(&((union ctl_io *)io)-> 11296 scsiio); 11297 ctl_done((union ctl_io *)io); 11298 } 11299 } 11300 } 11301 } else { /* SERIALIZE modes */ 11302 TAILQ_FOREACH_SAFE(io, &lun->blocked_queue, blocked_links, 11303 next_io) { 11304 /* We are master */ 11305 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 11306 TAILQ_REMOVE(&lun->blocked_queue, io, 11307 blocked_links); 11308 io->flags &= ~CTL_FLAG_BLOCKED; 11309 TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links); 11310 ctl_free_io((union ctl_io *)io); 11311 } 11312 } 11313 TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { 11314 /* We are master */ 11315 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 11316 TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links); 11317 ctl_free_io((union ctl_io *)io); 11318 } 11319 /* We are slave */ 11320 if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { 11321 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; 11322 if (!(io->flags & CTL_FLAG_IO_ACTIVE)) { 11323 ctl_set_busy(&((union ctl_io *)io)-> 11324 scsiio); 11325 ctl_done((union ctl_io *)io); 11326 } 11327 } 11328 } 11329 ctl_check_blocked(lun); 11330 } 11331 mtx_unlock(&lun->lun_lock); 11332 } 11333 11334 static int 11335 ctl_scsiio_precheck(struct ctl_softc *softc, struct ctl_scsiio *ctsio) 11336 { 11337 struct ctl_lun *lun; 11338 const struct ctl_cmd_entry *entry; 11339 uint32_t initidx, targ_lun; 11340 int retval = 0; 11341 11342 lun = NULL; 11343 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 11344 if (targ_lun < ctl_max_luns) 11345 lun = softc->ctl_luns[targ_lun]; 11346 if (lun) { 11347 /* 11348 * If the LUN is invalid, pretend that it doesn't exist. 11349 * It will go away as soon as all pending I/O has been 11350 * completed. 11351 */ 11352 mtx_lock(&lun->lun_lock); 11353 if (lun->flags & CTL_LUN_DISABLED) { 11354 mtx_unlock(&lun->lun_lock); 11355 lun = NULL; 11356 } 11357 } 11358 CTL_LUN(ctsio) = lun; 11359 if (lun) { 11360 CTL_BACKEND_LUN(ctsio) = lun->be_lun; 11361 11362 /* 11363 * Every I/O goes into the OOA queue for a particular LUN, 11364 * and stays there until completion. 11365 */ 11366 #ifdef CTL_TIME_IO 11367 if (TAILQ_EMPTY(&lun->ooa_queue)) 11368 lun->idle_time += getsbinuptime() - lun->last_busy; 11369 #endif 11370 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 11371 } 11372 11373 /* Get command entry and return error if it is unsuppotyed. */ 11374 entry = ctl_validate_command(ctsio); 11375 if (entry == NULL) { 11376 if (lun) 11377 mtx_unlock(&lun->lun_lock); 11378 return (retval); 11379 } 11380 11381 ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 11382 ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK; 11383 11384 /* 11385 * Check to see whether we can send this command to LUNs that don't 11386 * exist. This should pretty much only be the case for inquiry 11387 * and request sense. Further checks, below, really require having 11388 * a LUN, so we can't really check the command anymore. Just put 11389 * it on the rtr queue. 11390 */ 11391 if (lun == NULL) { 11392 if (entry->flags & CTL_CMD_FLAG_OK_ON_NO_LUN) { 11393 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11394 ctl_enqueue_rtr((union ctl_io *)ctsio); 11395 return (retval); 11396 } 11397 11398 ctl_set_unsupported_lun(ctsio); 11399 ctl_done((union ctl_io *)ctsio); 11400 CTL_DEBUG_PRINT(("ctl_scsiio_precheck: bailing out due to invalid LUN\n")); 11401 return (retval); 11402 } else { 11403 /* 11404 * Make sure we support this particular command on this LUN. 11405 * e.g., we don't support writes to the control LUN. 11406 */ 11407 if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 11408 mtx_unlock(&lun->lun_lock); 11409 ctl_set_invalid_opcode(ctsio); 11410 ctl_done((union ctl_io *)ctsio); 11411 return (retval); 11412 } 11413 } 11414 11415 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 11416 11417 /* 11418 * If we've got a request sense, it'll clear the contingent 11419 * allegiance condition. Otherwise, if we have a CA condition for 11420 * this initiator, clear it, because it sent down a command other 11421 * than request sense. 11422 */ 11423 if (ctsio->cdb[0] != REQUEST_SENSE) { 11424 struct scsi_sense_data *ps; 11425 11426 ps = lun->pending_sense[initidx / CTL_MAX_INIT_PER_PORT]; 11427 if (ps != NULL) 11428 ps[initidx % CTL_MAX_INIT_PER_PORT].error_code = 0; 11429 } 11430 11431 /* 11432 * If the command has this flag set, it handles its own unit 11433 * attention reporting, we shouldn't do anything. Otherwise we 11434 * check for any pending unit attentions, and send them back to the 11435 * initiator. We only do this when a command initially comes in, 11436 * not when we pull it off the blocked queue. 11437 * 11438 * According to SAM-3, section 5.3.2, the order that things get 11439 * presented back to the host is basically unit attentions caused 11440 * by some sort of reset event, busy status, reservation conflicts 11441 * or task set full, and finally any other status. 11442 * 11443 * One issue here is that some of the unit attentions we report 11444 * don't fall into the "reset" category (e.g. "reported luns data 11445 * has changed"). So reporting it here, before the reservation 11446 * check, may be technically wrong. I guess the only thing to do 11447 * would be to check for and report the reset events here, and then 11448 * check for the other unit attention types after we check for a 11449 * reservation conflict. 11450 * 11451 * XXX KDM need to fix this 11452 */ 11453 if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) { 11454 ctl_ua_type ua_type; 11455 u_int sense_len = 0; 11456 11457 ua_type = ctl_build_ua(lun, initidx, &ctsio->sense_data, 11458 &sense_len, SSD_TYPE_NONE); 11459 if (ua_type != CTL_UA_NONE) { 11460 mtx_unlock(&lun->lun_lock); 11461 ctsio->scsi_status = SCSI_STATUS_CHECK_COND; 11462 ctsio->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 11463 ctsio->sense_len = sense_len; 11464 ctl_done((union ctl_io *)ctsio); 11465 return (retval); 11466 } 11467 } 11468 11469 11470 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { 11471 mtx_unlock(&lun->lun_lock); 11472 ctl_done((union ctl_io *)ctsio); 11473 return (retval); 11474 } 11475 11476 /* 11477 * XXX CHD this is where we want to send IO to other side if 11478 * this LUN is secondary on this SC. We will need to make a copy 11479 * of the IO and flag the IO on this side as SENT_2OTHER and the flag 11480 * the copy we send as FROM_OTHER. 11481 * We also need to stuff the address of the original IO so we can 11482 * find it easily. Something similar will need be done on the other 11483 * side so when we are done we can find the copy. 11484 */ 11485 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 11486 (lun->flags & CTL_LUN_PEER_SC_PRIMARY) != 0 && 11487 (entry->flags & CTL_CMD_FLAG_RUN_HERE) == 0) { 11488 union ctl_ha_msg msg_info; 11489 int isc_retval; 11490 11491 ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; 11492 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 11493 mtx_unlock(&lun->lun_lock); 11494 11495 msg_info.hdr.msg_type = CTL_MSG_SERIALIZE; 11496 msg_info.hdr.original_sc = (union ctl_io *)ctsio; 11497 msg_info.hdr.serializing_sc = NULL; 11498 msg_info.hdr.nexus = ctsio->io_hdr.nexus; 11499 msg_info.scsi.tag_num = ctsio->tag_num; 11500 msg_info.scsi.tag_type = ctsio->tag_type; 11501 msg_info.scsi.cdb_len = ctsio->cdb_len; 11502 memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN); 11503 11504 if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11505 sizeof(msg_info.scsi) - sizeof(msg_info.scsi.sense_data), 11506 M_WAITOK)) > CTL_HA_STATUS_SUCCESS) { 11507 ctl_set_busy(ctsio); 11508 ctl_done((union ctl_io *)ctsio); 11509 return (retval); 11510 } 11511 return (retval); 11512 } 11513 11514 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 11515 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, 11516 ctl_ooaq, ooa_links))) { 11517 case CTL_ACTION_BLOCK: 11518 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 11519 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 11520 blocked_links); 11521 mtx_unlock(&lun->lun_lock); 11522 return (retval); 11523 case CTL_ACTION_PASS: 11524 case CTL_ACTION_SKIP: 11525 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11526 mtx_unlock(&lun->lun_lock); 11527 ctl_enqueue_rtr((union ctl_io *)ctsio); 11528 break; 11529 case CTL_ACTION_OVERLAP: 11530 mtx_unlock(&lun->lun_lock); 11531 ctl_set_overlapped_cmd(ctsio); 11532 ctl_done((union ctl_io *)ctsio); 11533 break; 11534 case CTL_ACTION_OVERLAP_TAG: 11535 mtx_unlock(&lun->lun_lock); 11536 ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff); 11537 ctl_done((union ctl_io *)ctsio); 11538 break; 11539 case CTL_ACTION_ERROR: 11540 default: 11541 mtx_unlock(&lun->lun_lock); 11542 ctl_set_internal_failure(ctsio, 11543 /*sks_valid*/ 0, 11544 /*retry_count*/ 0); 11545 ctl_done((union ctl_io *)ctsio); 11546 break; 11547 } 11548 return (retval); 11549 } 11550 11551 const struct ctl_cmd_entry * 11552 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa) 11553 { 11554 const struct ctl_cmd_entry *entry; 11555 int service_action; 11556 11557 entry = &ctl_cmd_table[ctsio->cdb[0]]; 11558 if (sa) 11559 *sa = ((entry->flags & CTL_CMD_FLAG_SA5) != 0); 11560 if (entry->flags & CTL_CMD_FLAG_SA5) { 11561 service_action = ctsio->cdb[1] & SERVICE_ACTION_MASK; 11562 entry = &((const struct ctl_cmd_entry *) 11563 entry->execute)[service_action]; 11564 } 11565 return (entry); 11566 } 11567 11568 const struct ctl_cmd_entry * 11569 ctl_validate_command(struct ctl_scsiio *ctsio) 11570 { 11571 const struct ctl_cmd_entry *entry; 11572 int i, sa; 11573 uint8_t diff; 11574 11575 entry = ctl_get_cmd_entry(ctsio, &sa); 11576 if (entry->execute == NULL) { 11577 if (sa) 11578 ctl_set_invalid_field(ctsio, 11579 /*sks_valid*/ 1, 11580 /*command*/ 1, 11581 /*field*/ 1, 11582 /*bit_valid*/ 1, 11583 /*bit*/ 4); 11584 else 11585 ctl_set_invalid_opcode(ctsio); 11586 ctl_done((union ctl_io *)ctsio); 11587 return (NULL); 11588 } 11589 KASSERT(entry->length > 0, 11590 ("Not defined length for command 0x%02x/0x%02x", 11591 ctsio->cdb[0], ctsio->cdb[1])); 11592 for (i = 1; i < entry->length; i++) { 11593 diff = ctsio->cdb[i] & ~entry->usage[i - 1]; 11594 if (diff == 0) 11595 continue; 11596 ctl_set_invalid_field(ctsio, 11597 /*sks_valid*/ 1, 11598 /*command*/ 1, 11599 /*field*/ i, 11600 /*bit_valid*/ 1, 11601 /*bit*/ fls(diff) - 1); 11602 ctl_done((union ctl_io *)ctsio); 11603 return (NULL); 11604 } 11605 return (entry); 11606 } 11607 11608 static int 11609 ctl_cmd_applicable(uint8_t lun_type, const struct ctl_cmd_entry *entry) 11610 { 11611 11612 switch (lun_type) { 11613 case T_DIRECT: 11614 if ((entry->flags & CTL_CMD_FLAG_OK_ON_DIRECT) == 0) 11615 return (0); 11616 break; 11617 case T_PROCESSOR: 11618 if ((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0) 11619 return (0); 11620 break; 11621 case T_CDROM: 11622 if ((entry->flags & CTL_CMD_FLAG_OK_ON_CDROM) == 0) 11623 return (0); 11624 break; 11625 default: 11626 return (0); 11627 } 11628 return (1); 11629 } 11630 11631 static int 11632 ctl_scsiio(struct ctl_scsiio *ctsio) 11633 { 11634 int retval; 11635 const struct ctl_cmd_entry *entry; 11636 11637 retval = CTL_RETVAL_COMPLETE; 11638 11639 CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0])); 11640 11641 entry = ctl_get_cmd_entry(ctsio, NULL); 11642 11643 /* 11644 * If this I/O has been aborted, just send it straight to 11645 * ctl_done() without executing it. 11646 */ 11647 if (ctsio->io_hdr.flags & CTL_FLAG_ABORT) { 11648 ctl_done((union ctl_io *)ctsio); 11649 goto bailout; 11650 } 11651 11652 /* 11653 * All the checks should have been handled by ctl_scsiio_precheck(). 11654 * We should be clear now to just execute the I/O. 11655 */ 11656 retval = entry->execute(ctsio); 11657 11658 bailout: 11659 return (retval); 11660 } 11661 11662 static int 11663 ctl_target_reset(union ctl_io *io) 11664 { 11665 struct ctl_softc *softc = CTL_SOFTC(io); 11666 struct ctl_port *port = CTL_PORT(io); 11667 struct ctl_lun *lun; 11668 uint32_t initidx; 11669 ctl_ua_type ua_type; 11670 11671 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 11672 union ctl_ha_msg msg_info; 11673 11674 msg_info.hdr.nexus = io->io_hdr.nexus; 11675 msg_info.task.task_action = io->taskio.task_action; 11676 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11677 msg_info.hdr.original_sc = NULL; 11678 msg_info.hdr.serializing_sc = NULL; 11679 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11680 sizeof(msg_info.task), M_WAITOK); 11681 } 11682 11683 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11684 if (io->taskio.task_action == CTL_TASK_TARGET_RESET) 11685 ua_type = CTL_UA_TARG_RESET; 11686 else 11687 ua_type = CTL_UA_BUS_RESET; 11688 mtx_lock(&softc->ctl_lock); 11689 STAILQ_FOREACH(lun, &softc->lun_list, links) { 11690 if (port != NULL && 11691 ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 11692 continue; 11693 ctl_do_lun_reset(lun, initidx, ua_type); 11694 } 11695 mtx_unlock(&softc->ctl_lock); 11696 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11697 return (0); 11698 } 11699 11700 /* 11701 * The LUN should always be set. The I/O is optional, and is used to 11702 * distinguish between I/Os sent by this initiator, and by other 11703 * initiators. We set unit attention for initiators other than this one. 11704 * SAM-3 is vague on this point. It does say that a unit attention should 11705 * be established for other initiators when a LUN is reset (see section 11706 * 5.7.3), but it doesn't specifically say that the unit attention should 11707 * be established for this particular initiator when a LUN is reset. Here 11708 * is the relevant text, from SAM-3 rev 8: 11709 * 11710 * 5.7.2 When a SCSI initiator port aborts its own tasks 11711 * 11712 * When a SCSI initiator port causes its own task(s) to be aborted, no 11713 * notification that the task(s) have been aborted shall be returned to 11714 * the SCSI initiator port other than the completion response for the 11715 * command or task management function action that caused the task(s) to 11716 * be aborted and notification(s) associated with related effects of the 11717 * action (e.g., a reset unit attention condition). 11718 * 11719 * XXX KDM for now, we're setting unit attention for all initiators. 11720 */ 11721 static void 11722 ctl_do_lun_reset(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua_type) 11723 { 11724 union ctl_io *xio; 11725 int i; 11726 11727 mtx_lock(&lun->lun_lock); 11728 /* Abort tasks. */ 11729 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11730 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11731 xio->io_hdr.flags |= CTL_FLAG_ABORT | CTL_FLAG_ABORT_STATUS; 11732 } 11733 /* Clear CA. */ 11734 for (i = 0; i < ctl_max_ports; i++) { 11735 free(lun->pending_sense[i], M_CTL); 11736 lun->pending_sense[i] = NULL; 11737 } 11738 /* Clear reservation. */ 11739 lun->flags &= ~CTL_LUN_RESERVED; 11740 /* Clear prevent media removal. */ 11741 if (lun->prevent) { 11742 for (i = 0; i < CTL_MAX_INITIATORS; i++) 11743 ctl_clear_mask(lun->prevent, i); 11744 lun->prevent_count = 0; 11745 } 11746 /* Clear TPC status */ 11747 ctl_tpc_lun_clear(lun, -1); 11748 /* Establish UA. */ 11749 #if 0 11750 ctl_est_ua_all(lun, initidx, ua_type); 11751 #else 11752 ctl_est_ua_all(lun, -1, ua_type); 11753 #endif 11754 mtx_unlock(&lun->lun_lock); 11755 } 11756 11757 static int 11758 ctl_lun_reset(union ctl_io *io) 11759 { 11760 struct ctl_softc *softc = CTL_SOFTC(io); 11761 struct ctl_lun *lun; 11762 uint32_t targ_lun, initidx; 11763 11764 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11765 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11766 mtx_lock(&softc->ctl_lock); 11767 if (targ_lun >= ctl_max_luns || 11768 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11769 mtx_unlock(&softc->ctl_lock); 11770 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 11771 return (1); 11772 } 11773 ctl_do_lun_reset(lun, initidx, CTL_UA_LUN_RESET); 11774 mtx_unlock(&softc->ctl_lock); 11775 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11776 11777 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0) { 11778 union ctl_ha_msg msg_info; 11779 11780 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11781 msg_info.hdr.nexus = io->io_hdr.nexus; 11782 msg_info.task.task_action = CTL_TASK_LUN_RESET; 11783 msg_info.hdr.original_sc = NULL; 11784 msg_info.hdr.serializing_sc = NULL; 11785 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11786 sizeof(msg_info.task), M_WAITOK); 11787 } 11788 return (0); 11789 } 11790 11791 static void 11792 ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id, 11793 int other_sc) 11794 { 11795 union ctl_io *xio; 11796 11797 mtx_assert(&lun->lun_lock, MA_OWNED); 11798 11799 /* 11800 * Run through the OOA queue and attempt to find the given I/O. 11801 * The target port, initiator ID, tag type and tag number have to 11802 * match the values that we got from the initiator. If we have an 11803 * untagged command to abort, simply abort the first untagged command 11804 * we come to. We only allow one untagged command at a time of course. 11805 */ 11806 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11807 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11808 11809 if ((targ_port == UINT32_MAX || 11810 targ_port == xio->io_hdr.nexus.targ_port) && 11811 (init_id == UINT32_MAX || 11812 init_id == xio->io_hdr.nexus.initid)) { 11813 if (targ_port != xio->io_hdr.nexus.targ_port || 11814 init_id != xio->io_hdr.nexus.initid) 11815 xio->io_hdr.flags |= CTL_FLAG_ABORT_STATUS; 11816 xio->io_hdr.flags |= CTL_FLAG_ABORT; 11817 if (!other_sc && !(lun->flags & CTL_LUN_PRIMARY_SC)) { 11818 union ctl_ha_msg msg_info; 11819 11820 msg_info.hdr.nexus = xio->io_hdr.nexus; 11821 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 11822 msg_info.task.tag_num = xio->scsiio.tag_num; 11823 msg_info.task.tag_type = xio->scsiio.tag_type; 11824 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11825 msg_info.hdr.original_sc = NULL; 11826 msg_info.hdr.serializing_sc = NULL; 11827 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11828 sizeof(msg_info.task), M_NOWAIT); 11829 } 11830 } 11831 } 11832 } 11833 11834 static int 11835 ctl_abort_task_set(union ctl_io *io) 11836 { 11837 struct ctl_softc *softc = CTL_SOFTC(io); 11838 struct ctl_lun *lun; 11839 uint32_t targ_lun; 11840 11841 /* 11842 * Look up the LUN. 11843 */ 11844 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11845 mtx_lock(&softc->ctl_lock); 11846 if (targ_lun >= ctl_max_luns || 11847 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11848 mtx_unlock(&softc->ctl_lock); 11849 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 11850 return (1); 11851 } 11852 11853 mtx_lock(&lun->lun_lock); 11854 mtx_unlock(&softc->ctl_lock); 11855 if (io->taskio.task_action == CTL_TASK_ABORT_TASK_SET) { 11856 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, 11857 io->io_hdr.nexus.initid, 11858 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 11859 } else { /* CTL_TASK_CLEAR_TASK_SET */ 11860 ctl_abort_tasks_lun(lun, UINT32_MAX, UINT32_MAX, 11861 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 11862 } 11863 mtx_unlock(&lun->lun_lock); 11864 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11865 return (0); 11866 } 11867 11868 static void 11869 ctl_i_t_nexus_loss(struct ctl_softc *softc, uint32_t initidx, 11870 ctl_ua_type ua_type) 11871 { 11872 struct ctl_lun *lun; 11873 struct scsi_sense_data *ps; 11874 uint32_t p, i; 11875 11876 p = initidx / CTL_MAX_INIT_PER_PORT; 11877 i = initidx % CTL_MAX_INIT_PER_PORT; 11878 mtx_lock(&softc->ctl_lock); 11879 STAILQ_FOREACH(lun, &softc->lun_list, links) { 11880 mtx_lock(&lun->lun_lock); 11881 /* Abort tasks. */ 11882 ctl_abort_tasks_lun(lun, p, i, 1); 11883 /* Clear CA. */ 11884 ps = lun->pending_sense[p]; 11885 if (ps != NULL) 11886 ps[i].error_code = 0; 11887 /* Clear reservation. */ 11888 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == initidx)) 11889 lun->flags &= ~CTL_LUN_RESERVED; 11890 /* Clear prevent media removal. */ 11891 if (lun->prevent && ctl_is_set(lun->prevent, initidx)) { 11892 ctl_clear_mask(lun->prevent, initidx); 11893 lun->prevent_count--; 11894 } 11895 /* Clear TPC status */ 11896 ctl_tpc_lun_clear(lun, initidx); 11897 /* Establish UA. */ 11898 ctl_est_ua(lun, initidx, ua_type); 11899 mtx_unlock(&lun->lun_lock); 11900 } 11901 mtx_unlock(&softc->ctl_lock); 11902 } 11903 11904 static int 11905 ctl_i_t_nexus_reset(union ctl_io *io) 11906 { 11907 struct ctl_softc *softc = CTL_SOFTC(io); 11908 uint32_t initidx; 11909 11910 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 11911 union ctl_ha_msg msg_info; 11912 11913 msg_info.hdr.nexus = io->io_hdr.nexus; 11914 msg_info.task.task_action = CTL_TASK_I_T_NEXUS_RESET; 11915 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11916 msg_info.hdr.original_sc = NULL; 11917 msg_info.hdr.serializing_sc = NULL; 11918 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11919 sizeof(msg_info.task), M_WAITOK); 11920 } 11921 11922 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11923 ctl_i_t_nexus_loss(softc, initidx, CTL_UA_I_T_NEXUS_LOSS); 11924 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11925 return (0); 11926 } 11927 11928 static int 11929 ctl_abort_task(union ctl_io *io) 11930 { 11931 struct ctl_softc *softc = CTL_SOFTC(io); 11932 union ctl_io *xio; 11933 struct ctl_lun *lun; 11934 #if 0 11935 struct sbuf sb; 11936 char printbuf[128]; 11937 #endif 11938 int found; 11939 uint32_t targ_lun; 11940 11941 found = 0; 11942 11943 /* 11944 * Look up the LUN. 11945 */ 11946 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11947 mtx_lock(&softc->ctl_lock); 11948 if (targ_lun >= ctl_max_luns || 11949 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11950 mtx_unlock(&softc->ctl_lock); 11951 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 11952 return (1); 11953 } 11954 11955 #if 0 11956 printf("ctl_abort_task: called for lun %lld, tag %d type %d\n", 11957 lun->lun, io->taskio.tag_num, io->taskio.tag_type); 11958 #endif 11959 11960 mtx_lock(&lun->lun_lock); 11961 mtx_unlock(&softc->ctl_lock); 11962 /* 11963 * Run through the OOA queue and attempt to find the given I/O. 11964 * The target port, initiator ID, tag type and tag number have to 11965 * match the values that we got from the initiator. If we have an 11966 * untagged command to abort, simply abort the first untagged command 11967 * we come to. We only allow one untagged command at a time of course. 11968 */ 11969 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11970 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11971 #if 0 11972 sbuf_new(&sb, printbuf, sizeof(printbuf), SBUF_FIXEDLEN); 11973 11974 sbuf_printf(&sb, "LUN %lld tag %d type %d%s%s%s%s: ", 11975 lun->lun, xio->scsiio.tag_num, 11976 xio->scsiio.tag_type, 11977 (xio->io_hdr.blocked_links.tqe_prev 11978 == NULL) ? "" : " BLOCKED", 11979 (xio->io_hdr.flags & 11980 CTL_FLAG_DMA_INPROG) ? " DMA" : "", 11981 (xio->io_hdr.flags & 11982 CTL_FLAG_ABORT) ? " ABORT" : "", 11983 (xio->io_hdr.flags & 11984 CTL_FLAG_IS_WAS_ON_RTR ? " RTR" : "")); 11985 ctl_scsi_command_string(&xio->scsiio, NULL, &sb); 11986 sbuf_finish(&sb); 11987 printf("%s\n", sbuf_data(&sb)); 11988 #endif 11989 11990 if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port) 11991 || (xio->io_hdr.nexus.initid != io->io_hdr.nexus.initid) 11992 || (xio->io_hdr.flags & CTL_FLAG_ABORT)) 11993 continue; 11994 11995 /* 11996 * If the abort says that the task is untagged, the 11997 * task in the queue must be untagged. Otherwise, 11998 * we just check to see whether the tag numbers 11999 * match. This is because the QLogic firmware 12000 * doesn't pass back the tag type in an abort 12001 * request. 12002 */ 12003 #if 0 12004 if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED) 12005 && (io->taskio.tag_type == CTL_TAG_UNTAGGED)) 12006 || (xio->scsiio.tag_num == io->taskio.tag_num)) 12007 #endif 12008 /* 12009 * XXX KDM we've got problems with FC, because it 12010 * doesn't send down a tag type with aborts. So we 12011 * can only really go by the tag number... 12012 * This may cause problems with parallel SCSI. 12013 * Need to figure that out!! 12014 */ 12015 if (xio->scsiio.tag_num == io->taskio.tag_num) { 12016 xio->io_hdr.flags |= CTL_FLAG_ABORT; 12017 found = 1; 12018 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0 && 12019 !(lun->flags & CTL_LUN_PRIMARY_SC)) { 12020 union ctl_ha_msg msg_info; 12021 12022 msg_info.hdr.nexus = io->io_hdr.nexus; 12023 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 12024 msg_info.task.tag_num = io->taskio.tag_num; 12025 msg_info.task.tag_type = io->taskio.tag_type; 12026 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 12027 msg_info.hdr.original_sc = NULL; 12028 msg_info.hdr.serializing_sc = NULL; 12029 #if 0 12030 printf("Sent Abort to other side\n"); 12031 #endif 12032 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 12033 sizeof(msg_info.task), M_NOWAIT); 12034 } 12035 #if 0 12036 printf("ctl_abort_task: found I/O to abort\n"); 12037 #endif 12038 } 12039 } 12040 mtx_unlock(&lun->lun_lock); 12041 12042 if (found == 0) { 12043 /* 12044 * This isn't really an error. It's entirely possible for 12045 * the abort and command completion to cross on the wire. 12046 * This is more of an informative/diagnostic error. 12047 */ 12048 #if 0 12049 printf("ctl_abort_task: ABORT sent for nonexistent I/O: " 12050 "%u:%u:%u tag %d type %d\n", 12051 io->io_hdr.nexus.initid, 12052 io->io_hdr.nexus.targ_port, 12053 io->io_hdr.nexus.targ_lun, io->taskio.tag_num, 12054 io->taskio.tag_type); 12055 #endif 12056 } 12057 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12058 return (0); 12059 } 12060 12061 static int 12062 ctl_query_task(union ctl_io *io, int task_set) 12063 { 12064 struct ctl_softc *softc = CTL_SOFTC(io); 12065 union ctl_io *xio; 12066 struct ctl_lun *lun; 12067 int found = 0; 12068 uint32_t targ_lun; 12069 12070 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12071 mtx_lock(&softc->ctl_lock); 12072 if (targ_lun >= ctl_max_luns || 12073 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12074 mtx_unlock(&softc->ctl_lock); 12075 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 12076 return (1); 12077 } 12078 mtx_lock(&lun->lun_lock); 12079 mtx_unlock(&softc->ctl_lock); 12080 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 12081 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 12082 12083 if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port) 12084 || (xio->io_hdr.nexus.initid != io->io_hdr.nexus.initid) 12085 || (xio->io_hdr.flags & CTL_FLAG_ABORT)) 12086 continue; 12087 12088 if (task_set || xio->scsiio.tag_num == io->taskio.tag_num) { 12089 found = 1; 12090 break; 12091 } 12092 } 12093 mtx_unlock(&lun->lun_lock); 12094 if (found) 12095 io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED; 12096 else 12097 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12098 return (0); 12099 } 12100 12101 static int 12102 ctl_query_async_event(union ctl_io *io) 12103 { 12104 struct ctl_softc *softc = CTL_SOFTC(io); 12105 struct ctl_lun *lun; 12106 ctl_ua_type ua; 12107 uint32_t targ_lun, initidx; 12108 12109 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12110 mtx_lock(&softc->ctl_lock); 12111 if (targ_lun >= ctl_max_luns || 12112 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12113 mtx_unlock(&softc->ctl_lock); 12114 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 12115 return (1); 12116 } 12117 mtx_lock(&lun->lun_lock); 12118 mtx_unlock(&softc->ctl_lock); 12119 initidx = ctl_get_initindex(&io->io_hdr.nexus); 12120 ua = ctl_build_qae(lun, initidx, io->taskio.task_resp); 12121 mtx_unlock(&lun->lun_lock); 12122 if (ua != CTL_UA_NONE) 12123 io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED; 12124 else 12125 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12126 return (0); 12127 } 12128 12129 static void 12130 ctl_run_task(union ctl_io *io) 12131 { 12132 int retval = 1; 12133 12134 CTL_DEBUG_PRINT(("ctl_run_task\n")); 12135 KASSERT(io->io_hdr.io_type == CTL_IO_TASK, 12136 ("ctl_run_task: Unextected io_type %d\n", io->io_hdr.io_type)); 12137 io->taskio.task_status = CTL_TASK_FUNCTION_NOT_SUPPORTED; 12138 bzero(io->taskio.task_resp, sizeof(io->taskio.task_resp)); 12139 switch (io->taskio.task_action) { 12140 case CTL_TASK_ABORT_TASK: 12141 retval = ctl_abort_task(io); 12142 break; 12143 case CTL_TASK_ABORT_TASK_SET: 12144 case CTL_TASK_CLEAR_TASK_SET: 12145 retval = ctl_abort_task_set(io); 12146 break; 12147 case CTL_TASK_CLEAR_ACA: 12148 break; 12149 case CTL_TASK_I_T_NEXUS_RESET: 12150 retval = ctl_i_t_nexus_reset(io); 12151 break; 12152 case CTL_TASK_LUN_RESET: 12153 retval = ctl_lun_reset(io); 12154 break; 12155 case CTL_TASK_TARGET_RESET: 12156 case CTL_TASK_BUS_RESET: 12157 retval = ctl_target_reset(io); 12158 break; 12159 case CTL_TASK_PORT_LOGIN: 12160 break; 12161 case CTL_TASK_PORT_LOGOUT: 12162 break; 12163 case CTL_TASK_QUERY_TASK: 12164 retval = ctl_query_task(io, 0); 12165 break; 12166 case CTL_TASK_QUERY_TASK_SET: 12167 retval = ctl_query_task(io, 1); 12168 break; 12169 case CTL_TASK_QUERY_ASYNC_EVENT: 12170 retval = ctl_query_async_event(io); 12171 break; 12172 default: 12173 printf("%s: got unknown task management event %d\n", 12174 __func__, io->taskio.task_action); 12175 break; 12176 } 12177 if (retval == 0) 12178 io->io_hdr.status = CTL_SUCCESS; 12179 else 12180 io->io_hdr.status = CTL_ERROR; 12181 ctl_done(io); 12182 } 12183 12184 /* 12185 * For HA operation. Handle commands that come in from the other 12186 * controller. 12187 */ 12188 static void 12189 ctl_handle_isc(union ctl_io *io) 12190 { 12191 struct ctl_softc *softc = CTL_SOFTC(io); 12192 struct ctl_lun *lun; 12193 const struct ctl_cmd_entry *entry; 12194 uint32_t targ_lun; 12195 12196 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12197 switch (io->io_hdr.msg_type) { 12198 case CTL_MSG_SERIALIZE: 12199 ctl_serialize_other_sc_cmd(&io->scsiio); 12200 break; 12201 case CTL_MSG_R2R: /* Only used in SER_ONLY mode. */ 12202 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 12203 if (targ_lun >= ctl_max_luns || 12204 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12205 ctl_done(io); 12206 break; 12207 } 12208 mtx_lock(&lun->lun_lock); 12209 if (ctl_scsiio_lun_check(lun, entry, &io->scsiio) != 0) { 12210 mtx_unlock(&lun->lun_lock); 12211 ctl_done(io); 12212 break; 12213 } 12214 io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 12215 mtx_unlock(&lun->lun_lock); 12216 ctl_enqueue_rtr(io); 12217 break; 12218 case CTL_MSG_FINISH_IO: 12219 if (softc->ha_mode == CTL_HA_MODE_XFER) { 12220 ctl_done(io); 12221 break; 12222 } 12223 if (targ_lun >= ctl_max_luns || 12224 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12225 ctl_free_io(io); 12226 break; 12227 } 12228 mtx_lock(&lun->lun_lock); 12229 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); 12230 ctl_check_blocked(lun); 12231 mtx_unlock(&lun->lun_lock); 12232 ctl_free_io(io); 12233 break; 12234 case CTL_MSG_PERS_ACTION: 12235 ctl_hndl_per_res_out_on_other_sc(io); 12236 ctl_free_io(io); 12237 break; 12238 case CTL_MSG_BAD_JUJU: 12239 ctl_done(io); 12240 break; 12241 case CTL_MSG_DATAMOVE: /* Only used in XFER mode */ 12242 ctl_datamove_remote(io); 12243 break; 12244 case CTL_MSG_DATAMOVE_DONE: /* Only used in XFER mode */ 12245 io->scsiio.be_move_done(io); 12246 break; 12247 case CTL_MSG_FAILOVER: 12248 ctl_failover_lun(io); 12249 ctl_free_io(io); 12250 break; 12251 default: 12252 printf("%s: Invalid message type %d\n", 12253 __func__, io->io_hdr.msg_type); 12254 ctl_free_io(io); 12255 break; 12256 } 12257 12258 } 12259 12260 12261 /* 12262 * Returns the match type in the case of a match, or CTL_LUN_PAT_NONE if 12263 * there is no match. 12264 */ 12265 static ctl_lun_error_pattern 12266 ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc) 12267 { 12268 const struct ctl_cmd_entry *entry; 12269 ctl_lun_error_pattern filtered_pattern, pattern; 12270 12271 pattern = desc->error_pattern; 12272 12273 /* 12274 * XXX KDM we need more data passed into this function to match a 12275 * custom pattern, and we actually need to implement custom pattern 12276 * matching. 12277 */ 12278 if (pattern & CTL_LUN_PAT_CMD) 12279 return (CTL_LUN_PAT_CMD); 12280 12281 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY) 12282 return (CTL_LUN_PAT_ANY); 12283 12284 entry = ctl_get_cmd_entry(ctsio, NULL); 12285 12286 filtered_pattern = entry->pattern & pattern; 12287 12288 /* 12289 * If the user requested specific flags in the pattern (e.g. 12290 * CTL_LUN_PAT_RANGE), make sure the command supports all of those 12291 * flags. 12292 * 12293 * If the user did not specify any flags, it doesn't matter whether 12294 * or not the command supports the flags. 12295 */ 12296 if ((filtered_pattern & ~CTL_LUN_PAT_MASK) != 12297 (pattern & ~CTL_LUN_PAT_MASK)) 12298 return (CTL_LUN_PAT_NONE); 12299 12300 /* 12301 * If the user asked for a range check, see if the requested LBA 12302 * range overlaps with this command's LBA range. 12303 */ 12304 if (filtered_pattern & CTL_LUN_PAT_RANGE) { 12305 uint64_t lba1; 12306 uint64_t len1; 12307 ctl_action action; 12308 int retval; 12309 12310 retval = ctl_get_lba_len((union ctl_io *)ctsio, &lba1, &len1); 12311 if (retval != 0) 12312 return (CTL_LUN_PAT_NONE); 12313 12314 action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba, 12315 desc->lba_range.len, FALSE); 12316 /* 12317 * A "pass" means that the LBA ranges don't overlap, so 12318 * this doesn't match the user's range criteria. 12319 */ 12320 if (action == CTL_ACTION_PASS) 12321 return (CTL_LUN_PAT_NONE); 12322 } 12323 12324 return (filtered_pattern); 12325 } 12326 12327 static void 12328 ctl_inject_error(struct ctl_lun *lun, union ctl_io *io) 12329 { 12330 struct ctl_error_desc *desc, *desc2; 12331 12332 mtx_assert(&lun->lun_lock, MA_OWNED); 12333 12334 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 12335 ctl_lun_error_pattern pattern; 12336 /* 12337 * Check to see whether this particular command matches 12338 * the pattern in the descriptor. 12339 */ 12340 pattern = ctl_cmd_pattern_match(&io->scsiio, desc); 12341 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_NONE) 12342 continue; 12343 12344 switch (desc->lun_error & CTL_LUN_INJ_TYPE) { 12345 case CTL_LUN_INJ_ABORTED: 12346 ctl_set_aborted(&io->scsiio); 12347 break; 12348 case CTL_LUN_INJ_MEDIUM_ERR: 12349 ctl_set_medium_error(&io->scsiio, 12350 (io->io_hdr.flags & CTL_FLAG_DATA_MASK) != 12351 CTL_FLAG_DATA_OUT); 12352 break; 12353 case CTL_LUN_INJ_UA: 12354 /* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET 12355 * OCCURRED */ 12356 ctl_set_ua(&io->scsiio, 0x29, 0x00); 12357 break; 12358 case CTL_LUN_INJ_CUSTOM: 12359 /* 12360 * We're assuming the user knows what he is doing. 12361 * Just copy the sense information without doing 12362 * checks. 12363 */ 12364 bcopy(&desc->custom_sense, &io->scsiio.sense_data, 12365 MIN(sizeof(desc->custom_sense), 12366 sizeof(io->scsiio.sense_data))); 12367 io->scsiio.scsi_status = SCSI_STATUS_CHECK_COND; 12368 io->scsiio.sense_len = SSD_FULL_SIZE; 12369 io->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 12370 break; 12371 case CTL_LUN_INJ_NONE: 12372 default: 12373 /* 12374 * If this is an error injection type we don't know 12375 * about, clear the continuous flag (if it is set) 12376 * so it will get deleted below. 12377 */ 12378 desc->lun_error &= ~CTL_LUN_INJ_CONTINUOUS; 12379 break; 12380 } 12381 /* 12382 * By default, each error injection action is a one-shot 12383 */ 12384 if (desc->lun_error & CTL_LUN_INJ_CONTINUOUS) 12385 continue; 12386 12387 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links); 12388 12389 free(desc, M_CTL); 12390 } 12391 } 12392 12393 #ifdef CTL_IO_DELAY 12394 static void 12395 ctl_datamove_timer_wakeup(void *arg) 12396 { 12397 union ctl_io *io; 12398 12399 io = (union ctl_io *)arg; 12400 12401 ctl_datamove(io); 12402 } 12403 #endif /* CTL_IO_DELAY */ 12404 12405 void 12406 ctl_datamove(union ctl_io *io) 12407 { 12408 void (*fe_datamove)(union ctl_io *io); 12409 12410 mtx_assert(&((struct ctl_softc *)CTL_SOFTC(io))->ctl_lock, MA_NOTOWNED); 12411 12412 CTL_DEBUG_PRINT(("ctl_datamove\n")); 12413 12414 /* No data transferred yet. Frontend must update this when done. */ 12415 io->scsiio.kern_data_resid = io->scsiio.kern_data_len; 12416 12417 #ifdef CTL_TIME_IO 12418 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 12419 char str[256]; 12420 char path_str[64]; 12421 struct sbuf sb; 12422 12423 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 12424 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12425 12426 sbuf_cat(&sb, path_str); 12427 switch (io->io_hdr.io_type) { 12428 case CTL_IO_SCSI: 12429 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 12430 sbuf_printf(&sb, "\n"); 12431 sbuf_cat(&sb, path_str); 12432 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12433 io->scsiio.tag_num, io->scsiio.tag_type); 12434 break; 12435 case CTL_IO_TASK: 12436 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 12437 "Tag Type: %d\n", io->taskio.task_action, 12438 io->taskio.tag_num, io->taskio.tag_type); 12439 break; 12440 default: 12441 panic("%s: Invalid CTL I/O type %d\n", 12442 __func__, io->io_hdr.io_type); 12443 } 12444 sbuf_cat(&sb, path_str); 12445 sbuf_printf(&sb, "ctl_datamove: %jd seconds\n", 12446 (intmax_t)time_uptime - io->io_hdr.start_time); 12447 sbuf_finish(&sb); 12448 printf("%s", sbuf_data(&sb)); 12449 } 12450 #endif /* CTL_TIME_IO */ 12451 12452 #ifdef CTL_IO_DELAY 12453 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 12454 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 12455 } else { 12456 struct ctl_lun *lun; 12457 12458 lun = CTL_LUN(io); 12459 if ((lun != NULL) 12460 && (lun->delay_info.datamove_delay > 0)) { 12461 12462 callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1); 12463 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 12464 callout_reset(&io->io_hdr.delay_callout, 12465 lun->delay_info.datamove_delay * hz, 12466 ctl_datamove_timer_wakeup, io); 12467 if (lun->delay_info.datamove_type == 12468 CTL_DELAY_TYPE_ONESHOT) 12469 lun->delay_info.datamove_delay = 0; 12470 return; 12471 } 12472 } 12473 #endif 12474 12475 /* 12476 * This command has been aborted. Set the port status, so we fail 12477 * the data move. 12478 */ 12479 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12480 printf("ctl_datamove: tag 0x%04x on (%u:%u:%u) aborted\n", 12481 io->scsiio.tag_num, io->io_hdr.nexus.initid, 12482 io->io_hdr.nexus.targ_port, 12483 io->io_hdr.nexus.targ_lun); 12484 io->io_hdr.port_status = 31337; 12485 /* 12486 * Note that the backend, in this case, will get the 12487 * callback in its context. In other cases it may get 12488 * called in the frontend's interrupt thread context. 12489 */ 12490 io->scsiio.be_move_done(io); 12491 return; 12492 } 12493 12494 /* Don't confuse frontend with zero length data move. */ 12495 if (io->scsiio.kern_data_len == 0) { 12496 io->scsiio.be_move_done(io); 12497 return; 12498 } 12499 12500 fe_datamove = CTL_PORT(io)->fe_datamove; 12501 fe_datamove(io); 12502 } 12503 12504 static void 12505 ctl_send_datamove_done(union ctl_io *io, int have_lock) 12506 { 12507 union ctl_ha_msg msg; 12508 #ifdef CTL_TIME_IO 12509 struct bintime cur_bt; 12510 #endif 12511 12512 memset(&msg, 0, sizeof(msg)); 12513 msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 12514 msg.hdr.original_sc = io; 12515 msg.hdr.serializing_sc = io->io_hdr.serializing_sc; 12516 msg.hdr.nexus = io->io_hdr.nexus; 12517 msg.hdr.status = io->io_hdr.status; 12518 msg.scsi.kern_data_resid = io->scsiio.kern_data_resid; 12519 msg.scsi.tag_num = io->scsiio.tag_num; 12520 msg.scsi.tag_type = io->scsiio.tag_type; 12521 msg.scsi.scsi_status = io->scsiio.scsi_status; 12522 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 12523 io->scsiio.sense_len); 12524 msg.scsi.sense_len = io->scsiio.sense_len; 12525 msg.scsi.port_status = io->io_hdr.port_status; 12526 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12527 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12528 ctl_failover_io(io, /*have_lock*/ have_lock); 12529 return; 12530 } 12531 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 12532 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + 12533 msg.scsi.sense_len, M_WAITOK); 12534 12535 #ifdef CTL_TIME_IO 12536 getbinuptime(&cur_bt); 12537 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); 12538 bintime_add(&io->io_hdr.dma_bt, &cur_bt); 12539 #endif 12540 io->io_hdr.num_dmas++; 12541 } 12542 12543 /* 12544 * The DMA to the remote side is done, now we need to tell the other side 12545 * we're done so it can continue with its data movement. 12546 */ 12547 static void 12548 ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq) 12549 { 12550 union ctl_io *io; 12551 uint32_t i; 12552 12553 io = rq->context; 12554 12555 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12556 printf("%s: ISC DMA write failed with error %d", __func__, 12557 rq->ret); 12558 ctl_set_internal_failure(&io->scsiio, 12559 /*sks_valid*/ 1, 12560 /*retry_count*/ rq->ret); 12561 } 12562 12563 ctl_dt_req_free(rq); 12564 12565 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12566 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12567 free(io->io_hdr.remote_sglist, M_CTL); 12568 io->io_hdr.remote_sglist = NULL; 12569 io->io_hdr.local_sglist = NULL; 12570 12571 /* 12572 * The data is in local and remote memory, so now we need to send 12573 * status (good or back) back to the other side. 12574 */ 12575 ctl_send_datamove_done(io, /*have_lock*/ 0); 12576 } 12577 12578 /* 12579 * We've moved the data from the host/controller into local memory. Now we 12580 * need to push it over to the remote controller's memory. 12581 */ 12582 static int 12583 ctl_datamove_remote_dm_write_cb(union ctl_io *io) 12584 { 12585 int retval; 12586 12587 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_WRITE, 12588 ctl_datamove_remote_write_cb); 12589 return (retval); 12590 } 12591 12592 static void 12593 ctl_datamove_remote_write(union ctl_io *io) 12594 { 12595 int retval; 12596 void (*fe_datamove)(union ctl_io *io); 12597 12598 /* 12599 * - Get the data from the host/HBA into local memory. 12600 * - DMA memory from the local controller to the remote controller. 12601 * - Send status back to the remote controller. 12602 */ 12603 12604 retval = ctl_datamove_remote_sgl_setup(io); 12605 if (retval != 0) 12606 return; 12607 12608 /* Switch the pointer over so the FETD knows what to do */ 12609 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 12610 12611 /* 12612 * Use a custom move done callback, since we need to send completion 12613 * back to the other controller, not to the backend on this side. 12614 */ 12615 io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb; 12616 12617 fe_datamove = CTL_PORT(io)->fe_datamove; 12618 fe_datamove(io); 12619 } 12620 12621 static int 12622 ctl_datamove_remote_dm_read_cb(union ctl_io *io) 12623 { 12624 #if 0 12625 char str[256]; 12626 char path_str[64]; 12627 struct sbuf sb; 12628 #endif 12629 uint32_t i; 12630 12631 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12632 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12633 free(io->io_hdr.remote_sglist, M_CTL); 12634 io->io_hdr.remote_sglist = NULL; 12635 io->io_hdr.local_sglist = NULL; 12636 12637 #if 0 12638 scsi_path_string(io, path_str, sizeof(path_str)); 12639 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12640 sbuf_cat(&sb, path_str); 12641 scsi_command_string(&io->scsiio, NULL, &sb); 12642 sbuf_printf(&sb, "\n"); 12643 sbuf_cat(&sb, path_str); 12644 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12645 io->scsiio.tag_num, io->scsiio.tag_type); 12646 sbuf_cat(&sb, path_str); 12647 sbuf_printf(&sb, "%s: flags %#x, status %#x\n", __func__, 12648 io->io_hdr.flags, io->io_hdr.status); 12649 sbuf_finish(&sb); 12650 printk("%s", sbuf_data(&sb)); 12651 #endif 12652 12653 12654 /* 12655 * The read is done, now we need to send status (good or bad) back 12656 * to the other side. 12657 */ 12658 ctl_send_datamove_done(io, /*have_lock*/ 0); 12659 12660 return (0); 12661 } 12662 12663 static void 12664 ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq) 12665 { 12666 union ctl_io *io; 12667 void (*fe_datamove)(union ctl_io *io); 12668 12669 io = rq->context; 12670 12671 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12672 printf("%s: ISC DMA read failed with error %d\n", __func__, 12673 rq->ret); 12674 ctl_set_internal_failure(&io->scsiio, 12675 /*sks_valid*/ 1, 12676 /*retry_count*/ rq->ret); 12677 } 12678 12679 ctl_dt_req_free(rq); 12680 12681 /* Switch the pointer over so the FETD knows what to do */ 12682 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 12683 12684 /* 12685 * Use a custom move done callback, since we need to send completion 12686 * back to the other controller, not to the backend on this side. 12687 */ 12688 io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb; 12689 12690 /* XXX KDM add checks like the ones in ctl_datamove? */ 12691 12692 fe_datamove = CTL_PORT(io)->fe_datamove; 12693 fe_datamove(io); 12694 } 12695 12696 static int 12697 ctl_datamove_remote_sgl_setup(union ctl_io *io) 12698 { 12699 struct ctl_sg_entry *local_sglist; 12700 uint32_t len_to_go; 12701 int retval; 12702 int i; 12703 12704 retval = 0; 12705 local_sglist = io->io_hdr.local_sglist; 12706 len_to_go = io->scsiio.kern_data_len; 12707 12708 /* 12709 * The difficult thing here is that the size of the various 12710 * S/G segments may be different than the size from the 12711 * remote controller. That'll make it harder when DMAing 12712 * the data back to the other side. 12713 */ 12714 for (i = 0; len_to_go > 0; i++) { 12715 local_sglist[i].len = MIN(len_to_go, CTL_HA_DATAMOVE_SEGMENT); 12716 local_sglist[i].addr = 12717 malloc(local_sglist[i].len, M_CTL, M_WAITOK); 12718 12719 len_to_go -= local_sglist[i].len; 12720 } 12721 /* 12722 * Reset the number of S/G entries accordingly. The original 12723 * number of S/G entries is available in rem_sg_entries. 12724 */ 12725 io->scsiio.kern_sg_entries = i; 12726 12727 #if 0 12728 printf("%s: kern_sg_entries = %d\n", __func__, 12729 io->scsiio.kern_sg_entries); 12730 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12731 printf("%s: sg[%d] = %p, %lu\n", __func__, i, 12732 local_sglist[i].addr, local_sglist[i].len); 12733 #endif 12734 12735 return (retval); 12736 } 12737 12738 static int 12739 ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 12740 ctl_ha_dt_cb callback) 12741 { 12742 struct ctl_ha_dt_req *rq; 12743 struct ctl_sg_entry *remote_sglist, *local_sglist; 12744 uint32_t local_used, remote_used, total_used; 12745 int i, j, isc_ret; 12746 12747 rq = ctl_dt_req_alloc(); 12748 12749 /* 12750 * If we failed to allocate the request, and if the DMA didn't fail 12751 * anyway, set busy status. This is just a resource allocation 12752 * failure. 12753 */ 12754 if ((rq == NULL) 12755 && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 12756 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) 12757 ctl_set_busy(&io->scsiio); 12758 12759 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 12760 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) { 12761 12762 if (rq != NULL) 12763 ctl_dt_req_free(rq); 12764 12765 /* 12766 * The data move failed. We need to return status back 12767 * to the other controller. No point in trying to DMA 12768 * data to the remote controller. 12769 */ 12770 12771 ctl_send_datamove_done(io, /*have_lock*/ 0); 12772 12773 return (1); 12774 } 12775 12776 local_sglist = io->io_hdr.local_sglist; 12777 remote_sglist = io->io_hdr.remote_sglist; 12778 local_used = 0; 12779 remote_used = 0; 12780 total_used = 0; 12781 12782 /* 12783 * Pull/push the data over the wire from/to the other controller. 12784 * This takes into account the possibility that the local and 12785 * remote sglists may not be identical in terms of the size of 12786 * the elements and the number of elements. 12787 * 12788 * One fundamental assumption here is that the length allocated for 12789 * both the local and remote sglists is identical. Otherwise, we've 12790 * essentially got a coding error of some sort. 12791 */ 12792 isc_ret = CTL_HA_STATUS_SUCCESS; 12793 for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) { 12794 uint32_t cur_len; 12795 uint8_t *tmp_ptr; 12796 12797 rq->command = command; 12798 rq->context = io; 12799 12800 /* 12801 * Both pointers should be aligned. But it is possible 12802 * that the allocation length is not. They should both 12803 * also have enough slack left over at the end, though, 12804 * to round up to the next 8 byte boundary. 12805 */ 12806 cur_len = MIN(local_sglist[i].len - local_used, 12807 remote_sglist[j].len - remote_used); 12808 rq->size = cur_len; 12809 12810 tmp_ptr = (uint8_t *)local_sglist[i].addr; 12811 tmp_ptr += local_used; 12812 12813 #if 0 12814 /* Use physical addresses when talking to ISC hardware */ 12815 if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) { 12816 /* XXX KDM use busdma */ 12817 rq->local = vtophys(tmp_ptr); 12818 } else 12819 rq->local = tmp_ptr; 12820 #else 12821 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, 12822 ("HA does not support BUS_ADDR")); 12823 rq->local = tmp_ptr; 12824 #endif 12825 12826 tmp_ptr = (uint8_t *)remote_sglist[j].addr; 12827 tmp_ptr += remote_used; 12828 rq->remote = tmp_ptr; 12829 12830 rq->callback = NULL; 12831 12832 local_used += cur_len; 12833 if (local_used >= local_sglist[i].len) { 12834 i++; 12835 local_used = 0; 12836 } 12837 12838 remote_used += cur_len; 12839 if (remote_used >= remote_sglist[j].len) { 12840 j++; 12841 remote_used = 0; 12842 } 12843 total_used += cur_len; 12844 12845 if (total_used >= io->scsiio.kern_data_len) 12846 rq->callback = callback; 12847 12848 #if 0 12849 printf("%s: %s: local %p remote %p size %d\n", __func__, 12850 (command == CTL_HA_DT_CMD_WRITE) ? "WRITE" : "READ", 12851 rq->local, rq->remote, rq->size); 12852 #endif 12853 12854 isc_ret = ctl_dt_single(rq); 12855 if (isc_ret > CTL_HA_STATUS_SUCCESS) 12856 break; 12857 } 12858 if (isc_ret != CTL_HA_STATUS_WAIT) { 12859 rq->ret = isc_ret; 12860 callback(rq); 12861 } 12862 12863 return (0); 12864 } 12865 12866 static void 12867 ctl_datamove_remote_read(union ctl_io *io) 12868 { 12869 int retval; 12870 uint32_t i; 12871 12872 /* 12873 * This will send an error to the other controller in the case of a 12874 * failure. 12875 */ 12876 retval = ctl_datamove_remote_sgl_setup(io); 12877 if (retval != 0) 12878 return; 12879 12880 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ, 12881 ctl_datamove_remote_read_cb); 12882 if (retval != 0) { 12883 /* 12884 * Make sure we free memory if there was an error.. The 12885 * ctl_datamove_remote_xfer() function will send the 12886 * datamove done message, or call the callback with an 12887 * error if there is a problem. 12888 */ 12889 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12890 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12891 free(io->io_hdr.remote_sglist, M_CTL); 12892 io->io_hdr.remote_sglist = NULL; 12893 io->io_hdr.local_sglist = NULL; 12894 } 12895 } 12896 12897 /* 12898 * Process a datamove request from the other controller. This is used for 12899 * XFER mode only, not SER_ONLY mode. For writes, we DMA into local memory 12900 * first. Once that is complete, the data gets DMAed into the remote 12901 * controller's memory. For reads, we DMA from the remote controller's 12902 * memory into our memory first, and then move it out to the FETD. 12903 */ 12904 static void 12905 ctl_datamove_remote(union ctl_io *io) 12906 { 12907 12908 mtx_assert(&((struct ctl_softc *)CTL_SOFTC(io))->ctl_lock, MA_NOTOWNED); 12909 12910 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12911 ctl_failover_io(io, /*have_lock*/ 0); 12912 return; 12913 } 12914 12915 /* 12916 * Note that we look for an aborted I/O here, but don't do some of 12917 * the other checks that ctl_datamove() normally does. 12918 * We don't need to run the datamove delay code, since that should 12919 * have been done if need be on the other controller. 12920 */ 12921 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12922 printf("%s: tag 0x%04x on (%u:%u:%u) aborted\n", __func__, 12923 io->scsiio.tag_num, io->io_hdr.nexus.initid, 12924 io->io_hdr.nexus.targ_port, 12925 io->io_hdr.nexus.targ_lun); 12926 io->io_hdr.port_status = 31338; 12927 ctl_send_datamove_done(io, /*have_lock*/ 0); 12928 return; 12929 } 12930 12931 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) 12932 ctl_datamove_remote_write(io); 12933 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) 12934 ctl_datamove_remote_read(io); 12935 else { 12936 io->io_hdr.port_status = 31339; 12937 ctl_send_datamove_done(io, /*have_lock*/ 0); 12938 } 12939 } 12940 12941 static void 12942 ctl_process_done(union ctl_io *io) 12943 { 12944 struct ctl_softc *softc = CTL_SOFTC(io); 12945 struct ctl_port *port = CTL_PORT(io); 12946 struct ctl_lun *lun = CTL_LUN(io); 12947 void (*fe_done)(union ctl_io *io); 12948 union ctl_ha_msg msg; 12949 12950 CTL_DEBUG_PRINT(("ctl_process_done\n")); 12951 fe_done = port->fe_done; 12952 12953 #ifdef CTL_TIME_IO 12954 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 12955 char str[256]; 12956 char path_str[64]; 12957 struct sbuf sb; 12958 12959 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 12960 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12961 12962 sbuf_cat(&sb, path_str); 12963 switch (io->io_hdr.io_type) { 12964 case CTL_IO_SCSI: 12965 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 12966 sbuf_printf(&sb, "\n"); 12967 sbuf_cat(&sb, path_str); 12968 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12969 io->scsiio.tag_num, io->scsiio.tag_type); 12970 break; 12971 case CTL_IO_TASK: 12972 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 12973 "Tag Type: %d\n", io->taskio.task_action, 12974 io->taskio.tag_num, io->taskio.tag_type); 12975 break; 12976 default: 12977 panic("%s: Invalid CTL I/O type %d\n", 12978 __func__, io->io_hdr.io_type); 12979 } 12980 sbuf_cat(&sb, path_str); 12981 sbuf_printf(&sb, "ctl_process_done: %jd seconds\n", 12982 (intmax_t)time_uptime - io->io_hdr.start_time); 12983 sbuf_finish(&sb); 12984 printf("%s", sbuf_data(&sb)); 12985 } 12986 #endif /* CTL_TIME_IO */ 12987 12988 switch (io->io_hdr.io_type) { 12989 case CTL_IO_SCSI: 12990 break; 12991 case CTL_IO_TASK: 12992 if (ctl_debug & CTL_DEBUG_INFO) 12993 ctl_io_error_print(io, NULL); 12994 fe_done(io); 12995 return; 12996 default: 12997 panic("%s: Invalid CTL I/O type %d\n", 12998 __func__, io->io_hdr.io_type); 12999 } 13000 13001 if (lun == NULL) { 13002 CTL_DEBUG_PRINT(("NULL LUN for lun %d\n", 13003 io->io_hdr.nexus.targ_mapped_lun)); 13004 goto bailout; 13005 } 13006 13007 mtx_lock(&lun->lun_lock); 13008 13009 /* 13010 * Check to see if we have any informational exception and status 13011 * of this command can be modified to report it in form of either 13012 * RECOVERED ERROR or NO SENSE, depending on MRIE mode page field. 13013 */ 13014 if (lun->ie_reported == 0 && lun->ie_asc != 0 && 13015 io->io_hdr.status == CTL_SUCCESS && 13016 (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0) { 13017 uint8_t mrie = lun->MODE_IE.mrie; 13018 uint8_t per = ((lun->MODE_RWER.byte3 & SMS_RWER_PER) || 13019 (lun->MODE_VER.byte3 & SMS_VER_PER)); 13020 if (((mrie == SIEP_MRIE_REC_COND && per) || 13021 mrie == SIEP_MRIE_REC_UNCOND || 13022 mrie == SIEP_MRIE_NO_SENSE) && 13023 (ctl_get_cmd_entry(&io->scsiio, NULL)->flags & 13024 CTL_CMD_FLAG_NO_SENSE) == 0) { 13025 ctl_set_sense(&io->scsiio, 13026 /*current_error*/ 1, 13027 /*sense_key*/ (mrie == SIEP_MRIE_NO_SENSE) ? 13028 SSD_KEY_NO_SENSE : SSD_KEY_RECOVERED_ERROR, 13029 /*asc*/ lun->ie_asc, 13030 /*ascq*/ lun->ie_ascq, 13031 SSD_ELEM_NONE); 13032 lun->ie_reported = 1; 13033 } 13034 } else if (lun->ie_reported < 0) 13035 lun->ie_reported = 0; 13036 13037 /* 13038 * Check to see if we have any errors to inject here. We only 13039 * inject errors for commands that don't already have errors set. 13040 */ 13041 if (!STAILQ_EMPTY(&lun->error_list) && 13042 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) && 13043 ((io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0)) 13044 ctl_inject_error(lun, io); 13045 13046 /* 13047 * XXX KDM how do we treat commands that aren't completed 13048 * successfully? 13049 * 13050 * XXX KDM should we also track I/O latency? 13051 */ 13052 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS && 13053 io->io_hdr.io_type == CTL_IO_SCSI) { 13054 int type; 13055 #ifdef CTL_TIME_IO 13056 struct bintime bt; 13057 13058 getbinuptime(&bt); 13059 bintime_sub(&bt, &io->io_hdr.start_bt); 13060 #endif 13061 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13062 CTL_FLAG_DATA_IN) 13063 type = CTL_STATS_READ; 13064 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13065 CTL_FLAG_DATA_OUT) 13066 type = CTL_STATS_WRITE; 13067 else 13068 type = CTL_STATS_NO_IO; 13069 13070 #ifdef CTL_LEGACY_STATS 13071 uint32_t targ_port = port->targ_port; 13072 lun->legacy_stats.ports[targ_port].bytes[type] += 13073 io->scsiio.kern_total_len; 13074 lun->legacy_stats.ports[targ_port].operations[type] ++; 13075 lun->legacy_stats.ports[targ_port].num_dmas[type] += 13076 io->io_hdr.num_dmas; 13077 #ifdef CTL_TIME_IO 13078 bintime_add(&lun->legacy_stats.ports[targ_port].dma_time[type], 13079 &io->io_hdr.dma_bt); 13080 bintime_add(&lun->legacy_stats.ports[targ_port].time[type], 13081 &bt); 13082 #endif 13083 #endif /* CTL_LEGACY_STATS */ 13084 13085 lun->stats.bytes[type] += io->scsiio.kern_total_len; 13086 lun->stats.operations[type] ++; 13087 lun->stats.dmas[type] += io->io_hdr.num_dmas; 13088 #ifdef CTL_TIME_IO 13089 bintime_add(&lun->stats.dma_time[type], &io->io_hdr.dma_bt); 13090 bintime_add(&lun->stats.time[type], &bt); 13091 #endif 13092 13093 mtx_lock(&port->port_lock); 13094 port->stats.bytes[type] += io->scsiio.kern_total_len; 13095 port->stats.operations[type] ++; 13096 port->stats.dmas[type] += io->io_hdr.num_dmas; 13097 #ifdef CTL_TIME_IO 13098 bintime_add(&port->stats.dma_time[type], &io->io_hdr.dma_bt); 13099 bintime_add(&port->stats.time[type], &bt); 13100 #endif 13101 mtx_unlock(&port->port_lock); 13102 } 13103 13104 /* 13105 * Remove this from the OOA queue. 13106 */ 13107 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); 13108 #ifdef CTL_TIME_IO 13109 if (TAILQ_EMPTY(&lun->ooa_queue)) 13110 lun->last_busy = getsbinuptime(); 13111 #endif 13112 13113 /* 13114 * Run through the blocked queue on this LUN and see if anything 13115 * has become unblocked, now that this transaction is done. 13116 */ 13117 ctl_check_blocked(lun); 13118 13119 /* 13120 * If the LUN has been invalidated, free it if there is nothing 13121 * left on its OOA queue. 13122 */ 13123 if ((lun->flags & CTL_LUN_INVALID) 13124 && TAILQ_EMPTY(&lun->ooa_queue)) { 13125 mtx_unlock(&lun->lun_lock); 13126 ctl_free_lun(lun); 13127 } else 13128 mtx_unlock(&lun->lun_lock); 13129 13130 bailout: 13131 13132 /* 13133 * If this command has been aborted, make sure we set the status 13134 * properly. The FETD is responsible for freeing the I/O and doing 13135 * whatever it needs to do to clean up its state. 13136 */ 13137 if (io->io_hdr.flags & CTL_FLAG_ABORT) 13138 ctl_set_task_aborted(&io->scsiio); 13139 13140 /* 13141 * If enabled, print command error status. 13142 */ 13143 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS && 13144 (ctl_debug & CTL_DEBUG_INFO) != 0) 13145 ctl_io_error_print(io, NULL); 13146 13147 /* 13148 * Tell the FETD or the other shelf controller we're done with this 13149 * command. Note that only SCSI commands get to this point. Task 13150 * management commands are completed above. 13151 */ 13152 if ((softc->ha_mode != CTL_HA_MODE_XFER) && 13153 (io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC)) { 13154 memset(&msg, 0, sizeof(msg)); 13155 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 13156 msg.hdr.serializing_sc = io->io_hdr.serializing_sc; 13157 msg.hdr.nexus = io->io_hdr.nexus; 13158 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13159 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data), 13160 M_WAITOK); 13161 } 13162 13163 fe_done(io); 13164 } 13165 13166 /* 13167 * Front end should call this if it doesn't do autosense. When the request 13168 * sense comes back in from the initiator, we'll dequeue this and send it. 13169 */ 13170 int 13171 ctl_queue_sense(union ctl_io *io) 13172 { 13173 struct ctl_softc *softc = CTL_SOFTC(io); 13174 struct ctl_port *port = CTL_PORT(io); 13175 struct ctl_lun *lun; 13176 struct scsi_sense_data *ps; 13177 uint32_t initidx, p, targ_lun; 13178 13179 CTL_DEBUG_PRINT(("ctl_queue_sense\n")); 13180 13181 targ_lun = ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); 13182 13183 /* 13184 * LUN lookup will likely move to the ctl_work_thread() once we 13185 * have our new queueing infrastructure (that doesn't put things on 13186 * a per-LUN queue initially). That is so that we can handle 13187 * things like an INQUIRY to a LUN that we don't have enabled. We 13188 * can't deal with that right now. 13189 * If we don't have a LUN for this, just toss the sense information. 13190 */ 13191 mtx_lock(&softc->ctl_lock); 13192 if (targ_lun >= ctl_max_luns || 13193 (lun = softc->ctl_luns[targ_lun]) == NULL) { 13194 mtx_unlock(&softc->ctl_lock); 13195 goto bailout; 13196 } 13197 mtx_lock(&lun->lun_lock); 13198 mtx_unlock(&softc->ctl_lock); 13199 13200 initidx = ctl_get_initindex(&io->io_hdr.nexus); 13201 p = initidx / CTL_MAX_INIT_PER_PORT; 13202 if (lun->pending_sense[p] == NULL) { 13203 lun->pending_sense[p] = malloc(sizeof(*ps) * CTL_MAX_INIT_PER_PORT, 13204 M_CTL, M_NOWAIT | M_ZERO); 13205 } 13206 if ((ps = lun->pending_sense[p]) != NULL) { 13207 ps += initidx % CTL_MAX_INIT_PER_PORT; 13208 memset(ps, 0, sizeof(*ps)); 13209 memcpy(ps, &io->scsiio.sense_data, io->scsiio.sense_len); 13210 } 13211 mtx_unlock(&lun->lun_lock); 13212 13213 bailout: 13214 ctl_free_io(io); 13215 return (CTL_RETVAL_COMPLETE); 13216 } 13217 13218 /* 13219 * Primary command inlet from frontend ports. All SCSI and task I/O 13220 * requests must go through this function. 13221 */ 13222 int 13223 ctl_queue(union ctl_io *io) 13224 { 13225 struct ctl_port *port = CTL_PORT(io); 13226 13227 CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0])); 13228 13229 #ifdef CTL_TIME_IO 13230 io->io_hdr.start_time = time_uptime; 13231 getbinuptime(&io->io_hdr.start_bt); 13232 #endif /* CTL_TIME_IO */ 13233 13234 /* Map FE-specific LUN ID into global one. */ 13235 io->io_hdr.nexus.targ_mapped_lun = 13236 ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); 13237 13238 switch (io->io_hdr.io_type) { 13239 case CTL_IO_SCSI: 13240 case CTL_IO_TASK: 13241 if (ctl_debug & CTL_DEBUG_CDB) 13242 ctl_io_print(io); 13243 ctl_enqueue_incoming(io); 13244 break; 13245 default: 13246 printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type); 13247 return (EINVAL); 13248 } 13249 13250 return (CTL_RETVAL_COMPLETE); 13251 } 13252 13253 #ifdef CTL_IO_DELAY 13254 static void 13255 ctl_done_timer_wakeup(void *arg) 13256 { 13257 union ctl_io *io; 13258 13259 io = (union ctl_io *)arg; 13260 ctl_done(io); 13261 } 13262 #endif /* CTL_IO_DELAY */ 13263 13264 void 13265 ctl_serseq_done(union ctl_io *io) 13266 { 13267 struct ctl_lun *lun = CTL_LUN(io);; 13268 13269 if (lun->be_lun == NULL || 13270 lun->be_lun->serseq == CTL_LUN_SERSEQ_OFF) 13271 return; 13272 mtx_lock(&lun->lun_lock); 13273 io->io_hdr.flags |= CTL_FLAG_SERSEQ_DONE; 13274 ctl_check_blocked(lun); 13275 mtx_unlock(&lun->lun_lock); 13276 } 13277 13278 void 13279 ctl_done(union ctl_io *io) 13280 { 13281 13282 /* 13283 * Enable this to catch duplicate completion issues. 13284 */ 13285 #if 0 13286 if (io->io_hdr.flags & CTL_FLAG_ALREADY_DONE) { 13287 printf("%s: type %d msg %d cdb %x iptl: " 13288 "%u:%u:%u tag 0x%04x " 13289 "flag %#x status %x\n", 13290 __func__, 13291 io->io_hdr.io_type, 13292 io->io_hdr.msg_type, 13293 io->scsiio.cdb[0], 13294 io->io_hdr.nexus.initid, 13295 io->io_hdr.nexus.targ_port, 13296 io->io_hdr.nexus.targ_lun, 13297 (io->io_hdr.io_type == 13298 CTL_IO_TASK) ? 13299 io->taskio.tag_num : 13300 io->scsiio.tag_num, 13301 io->io_hdr.flags, 13302 io->io_hdr.status); 13303 } else 13304 io->io_hdr.flags |= CTL_FLAG_ALREADY_DONE; 13305 #endif 13306 13307 /* 13308 * This is an internal copy of an I/O, and should not go through 13309 * the normal done processing logic. 13310 */ 13311 if (io->io_hdr.flags & CTL_FLAG_INT_COPY) 13312 return; 13313 13314 #ifdef CTL_IO_DELAY 13315 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 13316 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 13317 } else { 13318 struct ctl_lun *lun = CTL_LUN(io); 13319 13320 if ((lun != NULL) 13321 && (lun->delay_info.done_delay > 0)) { 13322 13323 callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1); 13324 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 13325 callout_reset(&io->io_hdr.delay_callout, 13326 lun->delay_info.done_delay * hz, 13327 ctl_done_timer_wakeup, io); 13328 if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT) 13329 lun->delay_info.done_delay = 0; 13330 return; 13331 } 13332 } 13333 #endif /* CTL_IO_DELAY */ 13334 13335 ctl_enqueue_done(io); 13336 } 13337 13338 static void 13339 ctl_work_thread(void *arg) 13340 { 13341 struct ctl_thread *thr = (struct ctl_thread *)arg; 13342 struct ctl_softc *softc = thr->ctl_softc; 13343 union ctl_io *io; 13344 int retval; 13345 13346 CTL_DEBUG_PRINT(("ctl_work_thread starting\n")); 13347 13348 while (!softc->shutdown) { 13349 /* 13350 * We handle the queues in this order: 13351 * - ISC 13352 * - done queue (to free up resources, unblock other commands) 13353 * - RtR queue 13354 * - incoming queue 13355 * 13356 * If those queues are empty, we break out of the loop and 13357 * go to sleep. 13358 */ 13359 mtx_lock(&thr->queue_lock); 13360 io = (union ctl_io *)STAILQ_FIRST(&thr->isc_queue); 13361 if (io != NULL) { 13362 STAILQ_REMOVE_HEAD(&thr->isc_queue, links); 13363 mtx_unlock(&thr->queue_lock); 13364 ctl_handle_isc(io); 13365 continue; 13366 } 13367 io = (union ctl_io *)STAILQ_FIRST(&thr->done_queue); 13368 if (io != NULL) { 13369 STAILQ_REMOVE_HEAD(&thr->done_queue, links); 13370 /* clear any blocked commands, call fe_done */ 13371 mtx_unlock(&thr->queue_lock); 13372 ctl_process_done(io); 13373 continue; 13374 } 13375 io = (union ctl_io *)STAILQ_FIRST(&thr->incoming_queue); 13376 if (io != NULL) { 13377 STAILQ_REMOVE_HEAD(&thr->incoming_queue, links); 13378 mtx_unlock(&thr->queue_lock); 13379 if (io->io_hdr.io_type == CTL_IO_TASK) 13380 ctl_run_task(io); 13381 else 13382 ctl_scsiio_precheck(softc, &io->scsiio); 13383 continue; 13384 } 13385 io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue); 13386 if (io != NULL) { 13387 STAILQ_REMOVE_HEAD(&thr->rtr_queue, links); 13388 mtx_unlock(&thr->queue_lock); 13389 retval = ctl_scsiio(&io->scsiio); 13390 if (retval != CTL_RETVAL_COMPLETE) 13391 CTL_DEBUG_PRINT(("ctl_scsiio failed\n")); 13392 continue; 13393 } 13394 13395 /* Sleep until we have something to do. */ 13396 mtx_sleep(thr, &thr->queue_lock, PDROP | PRIBIO, "-", 0); 13397 } 13398 thr->thread = NULL; 13399 kthread_exit(); 13400 } 13401 13402 static void 13403 ctl_lun_thread(void *arg) 13404 { 13405 struct ctl_softc *softc = (struct ctl_softc *)arg; 13406 struct ctl_be_lun *be_lun; 13407 13408 CTL_DEBUG_PRINT(("ctl_lun_thread starting\n")); 13409 13410 while (!softc->shutdown) { 13411 mtx_lock(&softc->ctl_lock); 13412 be_lun = STAILQ_FIRST(&softc->pending_lun_queue); 13413 if (be_lun != NULL) { 13414 STAILQ_REMOVE_HEAD(&softc->pending_lun_queue, links); 13415 mtx_unlock(&softc->ctl_lock); 13416 ctl_create_lun(be_lun); 13417 continue; 13418 } 13419 13420 /* Sleep until we have something to do. */ 13421 mtx_sleep(&softc->pending_lun_queue, &softc->ctl_lock, 13422 PDROP | PRIBIO, "-", 0); 13423 } 13424 softc->lun_thread = NULL; 13425 kthread_exit(); 13426 } 13427 13428 static void 13429 ctl_thresh_thread(void *arg) 13430 { 13431 struct ctl_softc *softc = (struct ctl_softc *)arg; 13432 struct ctl_lun *lun; 13433 struct ctl_logical_block_provisioning_page *page; 13434 const char *attr; 13435 union ctl_ha_msg msg; 13436 uint64_t thres, val; 13437 int i, e, set; 13438 13439 CTL_DEBUG_PRINT(("ctl_thresh_thread starting\n")); 13440 13441 while (!softc->shutdown) { 13442 mtx_lock(&softc->ctl_lock); 13443 STAILQ_FOREACH(lun, &softc->lun_list, links) { 13444 if ((lun->flags & CTL_LUN_DISABLED) || 13445 (lun->flags & CTL_LUN_NO_MEDIA) || 13446 lun->backend->lun_attr == NULL) 13447 continue; 13448 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 13449 softc->ha_mode == CTL_HA_MODE_XFER) 13450 continue; 13451 if ((lun->MODE_RWER.byte8 & SMS_RWER_LBPERE) == 0) 13452 continue; 13453 e = 0; 13454 page = &lun->MODE_LBP; 13455 for (i = 0; i < CTL_NUM_LBP_THRESH; i++) { 13456 if ((page->descr[i].flags & SLBPPD_ENABLED) == 0) 13457 continue; 13458 thres = scsi_4btoul(page->descr[i].count); 13459 thres <<= CTL_LBP_EXPONENT; 13460 switch (page->descr[i].resource) { 13461 case 0x01: 13462 attr = "blocksavail"; 13463 break; 13464 case 0x02: 13465 attr = "blocksused"; 13466 break; 13467 case 0xf1: 13468 attr = "poolblocksavail"; 13469 break; 13470 case 0xf2: 13471 attr = "poolblocksused"; 13472 break; 13473 default: 13474 continue; 13475 } 13476 mtx_unlock(&softc->ctl_lock); // XXX 13477 val = lun->backend->lun_attr( 13478 lun->be_lun->be_lun, attr); 13479 mtx_lock(&softc->ctl_lock); 13480 if (val == UINT64_MAX) 13481 continue; 13482 if ((page->descr[i].flags & SLBPPD_ARMING_MASK) 13483 == SLBPPD_ARMING_INC) 13484 e = (val >= thres); 13485 else 13486 e = (val <= thres); 13487 if (e) 13488 break; 13489 } 13490 mtx_lock(&lun->lun_lock); 13491 if (e) { 13492 scsi_u64to8b((uint8_t *)&page->descr[i] - 13493 (uint8_t *)page, lun->ua_tpt_info); 13494 if (lun->lasttpt == 0 || 13495 time_uptime - lun->lasttpt >= CTL_LBP_UA_PERIOD) { 13496 lun->lasttpt = time_uptime; 13497 ctl_est_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 13498 set = 1; 13499 } else 13500 set = 0; 13501 } else { 13502 lun->lasttpt = 0; 13503 ctl_clr_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 13504 set = -1; 13505 } 13506 mtx_unlock(&lun->lun_lock); 13507 if (set != 0 && 13508 lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 13509 /* Send msg to other side. */ 13510 bzero(&msg.ua, sizeof(msg.ua)); 13511 msg.hdr.msg_type = CTL_MSG_UA; 13512 msg.hdr.nexus.initid = -1; 13513 msg.hdr.nexus.targ_port = -1; 13514 msg.hdr.nexus.targ_lun = lun->lun; 13515 msg.hdr.nexus.targ_mapped_lun = lun->lun; 13516 msg.ua.ua_all = 1; 13517 msg.ua.ua_set = (set > 0); 13518 msg.ua.ua_type = CTL_UA_THIN_PROV_THRES; 13519 memcpy(msg.ua.ua_info, lun->ua_tpt_info, 8); 13520 mtx_unlock(&softc->ctl_lock); // XXX 13521 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13522 sizeof(msg.ua), M_WAITOK); 13523 mtx_lock(&softc->ctl_lock); 13524 } 13525 } 13526 mtx_sleep(&softc->thresh_thread, &softc->ctl_lock, 13527 PDROP | PRIBIO, "-", CTL_LBP_PERIOD * hz); 13528 } 13529 softc->thresh_thread = NULL; 13530 kthread_exit(); 13531 } 13532 13533 static void 13534 ctl_enqueue_incoming(union ctl_io *io) 13535 { 13536 struct ctl_softc *softc = CTL_SOFTC(io); 13537 struct ctl_thread *thr; 13538 u_int idx; 13539 13540 idx = (io->io_hdr.nexus.targ_port * 127 + 13541 io->io_hdr.nexus.initid) % worker_threads; 13542 thr = &softc->threads[idx]; 13543 mtx_lock(&thr->queue_lock); 13544 STAILQ_INSERT_TAIL(&thr->incoming_queue, &io->io_hdr, links); 13545 mtx_unlock(&thr->queue_lock); 13546 wakeup(thr); 13547 } 13548 13549 static void 13550 ctl_enqueue_rtr(union ctl_io *io) 13551 { 13552 struct ctl_softc *softc = CTL_SOFTC(io); 13553 struct ctl_thread *thr; 13554 13555 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13556 mtx_lock(&thr->queue_lock); 13557 STAILQ_INSERT_TAIL(&thr->rtr_queue, &io->io_hdr, links); 13558 mtx_unlock(&thr->queue_lock); 13559 wakeup(thr); 13560 } 13561 13562 static void 13563 ctl_enqueue_done(union ctl_io *io) 13564 { 13565 struct ctl_softc *softc = CTL_SOFTC(io); 13566 struct ctl_thread *thr; 13567 13568 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13569 mtx_lock(&thr->queue_lock); 13570 STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links); 13571 mtx_unlock(&thr->queue_lock); 13572 wakeup(thr); 13573 } 13574 13575 static void 13576 ctl_enqueue_isc(union ctl_io *io) 13577 { 13578 struct ctl_softc *softc = CTL_SOFTC(io); 13579 struct ctl_thread *thr; 13580 13581 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13582 mtx_lock(&thr->queue_lock); 13583 STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links); 13584 mtx_unlock(&thr->queue_lock); 13585 wakeup(thr); 13586 } 13587 13588 /* 13589 * vim: ts=8 13590 */ 13591