1 /*- 2 * Copyright (c) 2003-2009 Silicon Graphics International Corp. 3 * Copyright (c) 2012 The FreeBSD Foundation 4 * Copyright (c) 2015 Alexander Motin <mav@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Portions of this software were developed by Edward Tomasz Napierala 8 * under sponsorship from the FreeBSD Foundation. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions, and the following disclaimer, 15 * without modification. 16 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 17 * substantially similar to the "NO WARRANTY" disclaimer below 18 * ("Disclaimer") and any redistribution must be conditioned upon 19 * including a substantially similar Disclaimer requirement for further 20 * binary redistribution. 21 * 22 * NO WARRANTY 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 27 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 32 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 * POSSIBILITY OF SUCH DAMAGES. 34 * 35 * $Id$ 36 */ 37 /* 38 * CAM Target Layer, a SCSI device emulation subsystem. 39 * 40 * Author: Ken Merry <ken@FreeBSD.org> 41 */ 42 43 #define _CTL_C 44 45 #include <sys/cdefs.h> 46 __FBSDID("$FreeBSD$"); 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/ctype.h> 51 #include <sys/kernel.h> 52 #include <sys/types.h> 53 #include <sys/kthread.h> 54 #include <sys/bio.h> 55 #include <sys/fcntl.h> 56 #include <sys/lock.h> 57 #include <sys/module.h> 58 #include <sys/mutex.h> 59 #include <sys/condvar.h> 60 #include <sys/malloc.h> 61 #include <sys/conf.h> 62 #include <sys/ioccom.h> 63 #include <sys/queue.h> 64 #include <sys/sbuf.h> 65 #include <sys/smp.h> 66 #include <sys/endian.h> 67 #include <sys/sysctl.h> 68 #include <vm/uma.h> 69 70 #include <cam/cam.h> 71 #include <cam/scsi/scsi_all.h> 72 #include <cam/scsi/scsi_cd.h> 73 #include <cam/scsi/scsi_da.h> 74 #include <cam/ctl/ctl_io.h> 75 #include <cam/ctl/ctl.h> 76 #include <cam/ctl/ctl_frontend.h> 77 #include <cam/ctl/ctl_util.h> 78 #include <cam/ctl/ctl_backend.h> 79 #include <cam/ctl/ctl_ioctl.h> 80 #include <cam/ctl/ctl_ha.h> 81 #include <cam/ctl/ctl_private.h> 82 #include <cam/ctl/ctl_debug.h> 83 #include <cam/ctl/ctl_scsi_all.h> 84 #include <cam/ctl/ctl_error.h> 85 86 struct ctl_softc *control_softc = NULL; 87 88 /* 89 * Template mode pages. 90 */ 91 92 /* 93 * Note that these are default values only. The actual values will be 94 * filled in when the user does a mode sense. 95 */ 96 const static struct scsi_da_rw_recovery_page rw_er_page_default = { 97 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 98 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 99 /*byte3*/SMS_RWER_AWRE|SMS_RWER_ARRE, 100 /*read_retry_count*/0, 101 /*correction_span*/0, 102 /*head_offset_count*/0, 103 /*data_strobe_offset_cnt*/0, 104 /*byte8*/SMS_RWER_LBPERE, 105 /*write_retry_count*/0, 106 /*reserved2*/0, 107 /*recovery_time_limit*/{0, 0}, 108 }; 109 110 const static struct scsi_da_rw_recovery_page rw_er_page_changeable = { 111 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 112 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 113 /*byte3*/SMS_RWER_PER, 114 /*read_retry_count*/0, 115 /*correction_span*/0, 116 /*head_offset_count*/0, 117 /*data_strobe_offset_cnt*/0, 118 /*byte8*/SMS_RWER_LBPERE, 119 /*write_retry_count*/0, 120 /*reserved2*/0, 121 /*recovery_time_limit*/{0, 0}, 122 }; 123 124 const static struct scsi_format_page format_page_default = { 125 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 126 /*page_length*/sizeof(struct scsi_format_page) - 2, 127 /*tracks_per_zone*/ {0, 0}, 128 /*alt_sectors_per_zone*/ {0, 0}, 129 /*alt_tracks_per_zone*/ {0, 0}, 130 /*alt_tracks_per_lun*/ {0, 0}, 131 /*sectors_per_track*/ {(CTL_DEFAULT_SECTORS_PER_TRACK >> 8) & 0xff, 132 CTL_DEFAULT_SECTORS_PER_TRACK & 0xff}, 133 /*bytes_per_sector*/ {0, 0}, 134 /*interleave*/ {0, 0}, 135 /*track_skew*/ {0, 0}, 136 /*cylinder_skew*/ {0, 0}, 137 /*flags*/ SFP_HSEC, 138 /*reserved*/ {0, 0, 0} 139 }; 140 141 const static struct scsi_format_page format_page_changeable = { 142 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 143 /*page_length*/sizeof(struct scsi_format_page) - 2, 144 /*tracks_per_zone*/ {0, 0}, 145 /*alt_sectors_per_zone*/ {0, 0}, 146 /*alt_tracks_per_zone*/ {0, 0}, 147 /*alt_tracks_per_lun*/ {0, 0}, 148 /*sectors_per_track*/ {0, 0}, 149 /*bytes_per_sector*/ {0, 0}, 150 /*interleave*/ {0, 0}, 151 /*track_skew*/ {0, 0}, 152 /*cylinder_skew*/ {0, 0}, 153 /*flags*/ 0, 154 /*reserved*/ {0, 0, 0} 155 }; 156 157 const static struct scsi_rigid_disk_page rigid_disk_page_default = { 158 /*page_code*/SMS_RIGID_DISK_PAGE, 159 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 160 /*cylinders*/ {0, 0, 0}, 161 /*heads*/ CTL_DEFAULT_HEADS, 162 /*start_write_precomp*/ {0, 0, 0}, 163 /*start_reduced_current*/ {0, 0, 0}, 164 /*step_rate*/ {0, 0}, 165 /*landing_zone_cylinder*/ {0, 0, 0}, 166 /*rpl*/ SRDP_RPL_DISABLED, 167 /*rotational_offset*/ 0, 168 /*reserved1*/ 0, 169 /*rotation_rate*/ {(CTL_DEFAULT_ROTATION_RATE >> 8) & 0xff, 170 CTL_DEFAULT_ROTATION_RATE & 0xff}, 171 /*reserved2*/ {0, 0} 172 }; 173 174 const static struct scsi_rigid_disk_page rigid_disk_page_changeable = { 175 /*page_code*/SMS_RIGID_DISK_PAGE, 176 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 177 /*cylinders*/ {0, 0, 0}, 178 /*heads*/ 0, 179 /*start_write_precomp*/ {0, 0, 0}, 180 /*start_reduced_current*/ {0, 0, 0}, 181 /*step_rate*/ {0, 0}, 182 /*landing_zone_cylinder*/ {0, 0, 0}, 183 /*rpl*/ 0, 184 /*rotational_offset*/ 0, 185 /*reserved1*/ 0, 186 /*rotation_rate*/ {0, 0}, 187 /*reserved2*/ {0, 0} 188 }; 189 190 const static struct scsi_da_verify_recovery_page verify_er_page_default = { 191 /*page_code*/SMS_VERIFY_ERROR_RECOVERY_PAGE, 192 /*page_length*/sizeof(struct scsi_da_verify_recovery_page) - 2, 193 /*byte3*/0, 194 /*read_retry_count*/0, 195 /*reserved*/{ 0, 0, 0, 0, 0, 0 }, 196 /*recovery_time_limit*/{0, 0}, 197 }; 198 199 const static struct scsi_da_verify_recovery_page verify_er_page_changeable = { 200 /*page_code*/SMS_VERIFY_ERROR_RECOVERY_PAGE, 201 /*page_length*/sizeof(struct scsi_da_verify_recovery_page) - 2, 202 /*byte3*/SMS_VER_PER, 203 /*read_retry_count*/0, 204 /*reserved*/{ 0, 0, 0, 0, 0, 0 }, 205 /*recovery_time_limit*/{0, 0}, 206 }; 207 208 const static struct scsi_caching_page caching_page_default = { 209 /*page_code*/SMS_CACHING_PAGE, 210 /*page_length*/sizeof(struct scsi_caching_page) - 2, 211 /*flags1*/ SCP_DISC | SCP_WCE, 212 /*ret_priority*/ 0, 213 /*disable_pf_transfer_len*/ {0xff, 0xff}, 214 /*min_prefetch*/ {0, 0}, 215 /*max_prefetch*/ {0xff, 0xff}, 216 /*max_pf_ceiling*/ {0xff, 0xff}, 217 /*flags2*/ 0, 218 /*cache_segments*/ 0, 219 /*cache_seg_size*/ {0, 0}, 220 /*reserved*/ 0, 221 /*non_cache_seg_size*/ {0, 0, 0} 222 }; 223 224 const static struct scsi_caching_page caching_page_changeable = { 225 /*page_code*/SMS_CACHING_PAGE, 226 /*page_length*/sizeof(struct scsi_caching_page) - 2, 227 /*flags1*/ SCP_WCE | SCP_RCD, 228 /*ret_priority*/ 0, 229 /*disable_pf_transfer_len*/ {0, 0}, 230 /*min_prefetch*/ {0, 0}, 231 /*max_prefetch*/ {0, 0}, 232 /*max_pf_ceiling*/ {0, 0}, 233 /*flags2*/ 0, 234 /*cache_segments*/ 0, 235 /*cache_seg_size*/ {0, 0}, 236 /*reserved*/ 0, 237 /*non_cache_seg_size*/ {0, 0, 0} 238 }; 239 240 const static struct scsi_control_page control_page_default = { 241 /*page_code*/SMS_CONTROL_MODE_PAGE, 242 /*page_length*/sizeof(struct scsi_control_page) - 2, 243 /*rlec*/0, 244 /*queue_flags*/SCP_QUEUE_ALG_RESTRICTED, 245 /*eca_and_aen*/0, 246 /*flags4*/SCP_TAS, 247 /*aen_holdoff_period*/{0, 0}, 248 /*busy_timeout_period*/{0, 0}, 249 /*extended_selftest_completion_time*/{0, 0} 250 }; 251 252 const static struct scsi_control_page control_page_changeable = { 253 /*page_code*/SMS_CONTROL_MODE_PAGE, 254 /*page_length*/sizeof(struct scsi_control_page) - 2, 255 /*rlec*/SCP_DSENSE, 256 /*queue_flags*/SCP_QUEUE_ALG_MASK | SCP_NUAR, 257 /*eca_and_aen*/SCP_SWP, 258 /*flags4*/0, 259 /*aen_holdoff_period*/{0, 0}, 260 /*busy_timeout_period*/{0, 0}, 261 /*extended_selftest_completion_time*/{0, 0} 262 }; 263 264 #define CTL_CEM_LEN (sizeof(struct scsi_control_ext_page) - 4) 265 266 const static struct scsi_control_ext_page control_ext_page_default = { 267 /*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF, 268 /*subpage_code*/0x01, 269 /*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN}, 270 /*flags*/0, 271 /*prio*/0, 272 /*max_sense*/0 273 }; 274 275 const static struct scsi_control_ext_page control_ext_page_changeable = { 276 /*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF, 277 /*subpage_code*/0x01, 278 /*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN}, 279 /*flags*/0, 280 /*prio*/0, 281 /*max_sense*/0xff 282 }; 283 284 const static struct scsi_info_exceptions_page ie_page_default = { 285 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 286 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 287 /*info_flags*/SIEP_FLAGS_EWASC, 288 /*mrie*/SIEP_MRIE_NO, 289 /*interval_timer*/{0, 0, 0, 0}, 290 /*report_count*/{0, 0, 0, 1} 291 }; 292 293 const static struct scsi_info_exceptions_page ie_page_changeable = { 294 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 295 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 296 /*info_flags*/SIEP_FLAGS_EWASC | SIEP_FLAGS_DEXCPT | SIEP_FLAGS_TEST | 297 SIEP_FLAGS_LOGERR, 298 /*mrie*/0x0f, 299 /*interval_timer*/{0xff, 0xff, 0xff, 0xff}, 300 /*report_count*/{0xff, 0xff, 0xff, 0xff} 301 }; 302 303 #define CTL_LBPM_LEN (sizeof(struct ctl_logical_block_provisioning_page) - 4) 304 305 const static struct ctl_logical_block_provisioning_page lbp_page_default = {{ 306 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 307 /*subpage_code*/0x02, 308 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 309 /*flags*/0, 310 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 311 /*descr*/{}}, 312 {{/*flags*/0, 313 /*resource*/0x01, 314 /*reserved*/{0, 0}, 315 /*count*/{0, 0, 0, 0}}, 316 {/*flags*/0, 317 /*resource*/0x02, 318 /*reserved*/{0, 0}, 319 /*count*/{0, 0, 0, 0}}, 320 {/*flags*/0, 321 /*resource*/0xf1, 322 /*reserved*/{0, 0}, 323 /*count*/{0, 0, 0, 0}}, 324 {/*flags*/0, 325 /*resource*/0xf2, 326 /*reserved*/{0, 0}, 327 /*count*/{0, 0, 0, 0}} 328 } 329 }; 330 331 const static struct ctl_logical_block_provisioning_page lbp_page_changeable = {{ 332 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 333 /*subpage_code*/0x02, 334 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 335 /*flags*/SLBPP_SITUA, 336 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 337 /*descr*/{}}, 338 {{/*flags*/0, 339 /*resource*/0, 340 /*reserved*/{0, 0}, 341 /*count*/{0, 0, 0, 0}}, 342 {/*flags*/0, 343 /*resource*/0, 344 /*reserved*/{0, 0}, 345 /*count*/{0, 0, 0, 0}}, 346 {/*flags*/0, 347 /*resource*/0, 348 /*reserved*/{0, 0}, 349 /*count*/{0, 0, 0, 0}}, 350 {/*flags*/0, 351 /*resource*/0, 352 /*reserved*/{0, 0}, 353 /*count*/{0, 0, 0, 0}} 354 } 355 }; 356 357 const static struct scsi_cddvd_capabilities_page cddvd_page_default = { 358 /*page_code*/SMS_CDDVD_CAPS_PAGE, 359 /*page_length*/sizeof(struct scsi_cddvd_capabilities_page) - 2, 360 /*caps1*/0x3f, 361 /*caps2*/0x00, 362 /*caps3*/0xf0, 363 /*caps4*/0x00, 364 /*caps5*/0x29, 365 /*caps6*/0x00, 366 /*obsolete*/{0, 0}, 367 /*nvol_levels*/{0, 0}, 368 /*buffer_size*/{8, 0}, 369 /*obsolete2*/{0, 0}, 370 /*reserved*/0, 371 /*digital*/0, 372 /*obsolete3*/0, 373 /*copy_management*/0, 374 /*reserved2*/0, 375 /*rotation_control*/0, 376 /*cur_write_speed*/0, 377 /*num_speed_descr*/0, 378 }; 379 380 const static struct scsi_cddvd_capabilities_page cddvd_page_changeable = { 381 /*page_code*/SMS_CDDVD_CAPS_PAGE, 382 /*page_length*/sizeof(struct scsi_cddvd_capabilities_page) - 2, 383 /*caps1*/0, 384 /*caps2*/0, 385 /*caps3*/0, 386 /*caps4*/0, 387 /*caps5*/0, 388 /*caps6*/0, 389 /*obsolete*/{0, 0}, 390 /*nvol_levels*/{0, 0}, 391 /*buffer_size*/{0, 0}, 392 /*obsolete2*/{0, 0}, 393 /*reserved*/0, 394 /*digital*/0, 395 /*obsolete3*/0, 396 /*copy_management*/0, 397 /*reserved2*/0, 398 /*rotation_control*/0, 399 /*cur_write_speed*/0, 400 /*num_speed_descr*/0, 401 }; 402 403 SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer"); 404 static int worker_threads = -1; 405 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN, 406 &worker_threads, 1, "Number of worker threads"); 407 static int ctl_debug = CTL_DEBUG_NONE; 408 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, debug, CTLFLAG_RWTUN, 409 &ctl_debug, 0, "Enabled debug flags"); 410 static int ctl_lun_map_size = 1024; 411 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, lun_map_size, CTLFLAG_RWTUN, 412 &ctl_lun_map_size, 0, "Size of per-port LUN map (max LUN + 1)"); 413 414 /* 415 * Supported pages (0x00), Serial number (0x80), Device ID (0x83), 416 * Extended INQUIRY Data (0x86), Mode Page Policy (0x87), 417 * SCSI Ports (0x88), Third-party Copy (0x8F), Block limits (0xB0), 418 * Block Device Characteristics (0xB1) and Logical Block Provisioning (0xB2) 419 */ 420 #define SCSI_EVPD_NUM_SUPPORTED_PAGES 10 421 422 static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event, 423 int param); 424 static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest); 425 static void ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest); 426 static int ctl_init(void); 427 void ctl_shutdown(void); 428 static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td); 429 static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td); 430 static void ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio); 431 static void ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 432 struct ctl_ooa *ooa_hdr, 433 struct ctl_ooa_entry *kern_entries); 434 static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 435 struct thread *td); 436 static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun, 437 struct ctl_be_lun *be_lun); 438 static int ctl_free_lun(struct ctl_lun *lun); 439 static void ctl_create_lun(struct ctl_be_lun *be_lun); 440 static struct ctl_port * ctl_io_port(struct ctl_io_hdr *io_hdr); 441 442 static int ctl_do_mode_select(union ctl_io *io); 443 static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, 444 uint64_t res_key, uint64_t sa_res_key, 445 uint8_t type, uint32_t residx, 446 struct ctl_scsiio *ctsio, 447 struct scsi_per_res_out *cdb, 448 struct scsi_per_res_out_parms* param); 449 static void ctl_pro_preempt_other(struct ctl_lun *lun, 450 union ctl_ha_msg *msg); 451 static void ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg); 452 static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len); 453 static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len); 454 static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len); 455 static int ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len); 456 static int ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len); 457 static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, 458 int alloc_len); 459 static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, 460 int alloc_len); 461 static int ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len); 462 static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len); 463 static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio); 464 static int ctl_inquiry_std(struct ctl_scsiio *ctsio); 465 static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len); 466 static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2, 467 bool seq); 468 static ctl_action ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2); 469 static ctl_action ctl_check_for_blockage(struct ctl_lun *lun, 470 union ctl_io *pending_io, union ctl_io *ooa_io); 471 static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 472 union ctl_io *starting_io); 473 static int ctl_check_blocked(struct ctl_lun *lun); 474 static int ctl_scsiio_lun_check(struct ctl_lun *lun, 475 const struct ctl_cmd_entry *entry, 476 struct ctl_scsiio *ctsio); 477 static void ctl_failover_lun(union ctl_io *io); 478 static int ctl_scsiio_precheck(struct ctl_softc *ctl_softc, 479 struct ctl_scsiio *ctsio); 480 static int ctl_scsiio(struct ctl_scsiio *ctsio); 481 482 static int ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io); 483 static int ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io, 484 ctl_ua_type ua_type); 485 static int ctl_do_lun_reset(struct ctl_lun *lun, union ctl_io *io, 486 ctl_ua_type ua_type); 487 static int ctl_lun_reset(struct ctl_softc *ctl_softc, union ctl_io *io); 488 static int ctl_abort_task(union ctl_io *io); 489 static int ctl_abort_task_set(union ctl_io *io); 490 static int ctl_query_task(union ctl_io *io, int task_set); 491 static int ctl_i_t_nexus_reset(union ctl_io *io); 492 static int ctl_query_async_event(union ctl_io *io); 493 static void ctl_run_task(union ctl_io *io); 494 #ifdef CTL_IO_DELAY 495 static void ctl_datamove_timer_wakeup(void *arg); 496 static void ctl_done_timer_wakeup(void *arg); 497 #endif /* CTL_IO_DELAY */ 498 499 static void ctl_send_datamove_done(union ctl_io *io, int have_lock); 500 static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq); 501 static int ctl_datamove_remote_dm_write_cb(union ctl_io *io); 502 static void ctl_datamove_remote_write(union ctl_io *io); 503 static int ctl_datamove_remote_dm_read_cb(union ctl_io *io); 504 static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq); 505 static int ctl_datamove_remote_sgl_setup(union ctl_io *io); 506 static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 507 ctl_ha_dt_cb callback); 508 static void ctl_datamove_remote_read(union ctl_io *io); 509 static void ctl_datamove_remote(union ctl_io *io); 510 static void ctl_process_done(union ctl_io *io); 511 static void ctl_lun_thread(void *arg); 512 static void ctl_thresh_thread(void *arg); 513 static void ctl_work_thread(void *arg); 514 static void ctl_enqueue_incoming(union ctl_io *io); 515 static void ctl_enqueue_rtr(union ctl_io *io); 516 static void ctl_enqueue_done(union ctl_io *io); 517 static void ctl_enqueue_isc(union ctl_io *io); 518 static const struct ctl_cmd_entry * 519 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa); 520 static const struct ctl_cmd_entry * 521 ctl_validate_command(struct ctl_scsiio *ctsio); 522 static int ctl_cmd_applicable(uint8_t lun_type, 523 const struct ctl_cmd_entry *entry); 524 525 static uint64_t ctl_get_prkey(struct ctl_lun *lun, uint32_t residx); 526 static void ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx); 527 static void ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx); 528 static void ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key); 529 530 /* 531 * Load the serialization table. This isn't very pretty, but is probably 532 * the easiest way to do it. 533 */ 534 #include "ctl_ser_table.c" 535 536 /* 537 * We only need to define open, close and ioctl routines for this driver. 538 */ 539 static struct cdevsw ctl_cdevsw = { 540 .d_version = D_VERSION, 541 .d_flags = 0, 542 .d_open = ctl_open, 543 .d_close = ctl_close, 544 .d_ioctl = ctl_ioctl, 545 .d_name = "ctl", 546 }; 547 548 549 MALLOC_DEFINE(M_CTL, "ctlmem", "Memory used for CTL"); 550 551 static int ctl_module_event_handler(module_t, int /*modeventtype_t*/, void *); 552 553 static moduledata_t ctl_moduledata = { 554 "ctl", 555 ctl_module_event_handler, 556 NULL 557 }; 558 559 DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD); 560 MODULE_VERSION(ctl, 1); 561 562 static struct ctl_frontend ha_frontend = 563 { 564 .name = "ha", 565 }; 566 567 static void 568 ctl_ha_datamove(union ctl_io *io) 569 { 570 struct ctl_lun *lun; 571 struct ctl_sg_entry *sgl; 572 union ctl_ha_msg msg; 573 uint32_t sg_entries_sent; 574 int do_sg_copy, i, j; 575 576 lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 577 memset(&msg.dt, 0, sizeof(msg.dt)); 578 msg.hdr.msg_type = CTL_MSG_DATAMOVE; 579 msg.hdr.original_sc = io->io_hdr.original_sc; 580 msg.hdr.serializing_sc = io; 581 msg.hdr.nexus = io->io_hdr.nexus; 582 msg.hdr.status = io->io_hdr.status; 583 msg.dt.flags = io->io_hdr.flags; 584 585 /* 586 * We convert everything into a S/G list here. We can't 587 * pass by reference, only by value between controllers. 588 * So we can't pass a pointer to the S/G list, only as many 589 * S/G entries as we can fit in here. If it's possible for 590 * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries, 591 * then we need to break this up into multiple transfers. 592 */ 593 if (io->scsiio.kern_sg_entries == 0) { 594 msg.dt.kern_sg_entries = 1; 595 #if 0 596 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 597 msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr; 598 } else { 599 /* XXX KDM use busdma here! */ 600 msg.dt.sg_list[0].addr = 601 (void *)vtophys(io->scsiio.kern_data_ptr); 602 } 603 #else 604 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, 605 ("HA does not support BUS_ADDR")); 606 msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr; 607 #endif 608 msg.dt.sg_list[0].len = io->scsiio.kern_data_len; 609 do_sg_copy = 0; 610 } else { 611 msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries; 612 do_sg_copy = 1; 613 } 614 615 msg.dt.kern_data_len = io->scsiio.kern_data_len; 616 msg.dt.kern_total_len = io->scsiio.kern_total_len; 617 msg.dt.kern_data_resid = io->scsiio.kern_data_resid; 618 msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset; 619 msg.dt.sg_sequence = 0; 620 621 /* 622 * Loop until we've sent all of the S/G entries. On the 623 * other end, we'll recompose these S/G entries into one 624 * contiguous list before processing. 625 */ 626 for (sg_entries_sent = 0; sg_entries_sent < msg.dt.kern_sg_entries; 627 msg.dt.sg_sequence++) { 628 msg.dt.cur_sg_entries = MIN((sizeof(msg.dt.sg_list) / 629 sizeof(msg.dt.sg_list[0])), 630 msg.dt.kern_sg_entries - sg_entries_sent); 631 if (do_sg_copy != 0) { 632 sgl = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; 633 for (i = sg_entries_sent, j = 0; 634 i < msg.dt.cur_sg_entries; i++, j++) { 635 #if 0 636 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 637 msg.dt.sg_list[j].addr = sgl[i].addr; 638 } else { 639 /* XXX KDM use busdma here! */ 640 msg.dt.sg_list[j].addr = 641 (void *)vtophys(sgl[i].addr); 642 } 643 #else 644 KASSERT((io->io_hdr.flags & 645 CTL_FLAG_BUS_ADDR) == 0, 646 ("HA does not support BUS_ADDR")); 647 msg.dt.sg_list[j].addr = sgl[i].addr; 648 #endif 649 msg.dt.sg_list[j].len = sgl[i].len; 650 } 651 } 652 653 sg_entries_sent += msg.dt.cur_sg_entries; 654 msg.dt.sg_last = (sg_entries_sent >= msg.dt.kern_sg_entries); 655 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 656 sizeof(msg.dt) - sizeof(msg.dt.sg_list) + 657 sizeof(struct ctl_sg_entry) * msg.dt.cur_sg_entries, 658 M_WAITOK) > CTL_HA_STATUS_SUCCESS) { 659 io->io_hdr.port_status = 31341; 660 io->scsiio.be_move_done(io); 661 return; 662 } 663 msg.dt.sent_sg_entries = sg_entries_sent; 664 } 665 666 /* 667 * Officially handover the request from us to peer. 668 * If failover has just happened, then we must return error. 669 * If failover happen just after, then it is not our problem. 670 */ 671 if (lun) 672 mtx_lock(&lun->lun_lock); 673 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 674 if (lun) 675 mtx_unlock(&lun->lun_lock); 676 io->io_hdr.port_status = 31342; 677 io->scsiio.be_move_done(io); 678 return; 679 } 680 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 681 io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; 682 if (lun) 683 mtx_unlock(&lun->lun_lock); 684 } 685 686 static void 687 ctl_ha_done(union ctl_io *io) 688 { 689 union ctl_ha_msg msg; 690 691 if (io->io_hdr.io_type == CTL_IO_SCSI) { 692 memset(&msg, 0, sizeof(msg)); 693 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 694 msg.hdr.original_sc = io->io_hdr.original_sc; 695 msg.hdr.nexus = io->io_hdr.nexus; 696 msg.hdr.status = io->io_hdr.status; 697 msg.scsi.scsi_status = io->scsiio.scsi_status; 698 msg.scsi.tag_num = io->scsiio.tag_num; 699 msg.scsi.tag_type = io->scsiio.tag_type; 700 msg.scsi.sense_len = io->scsiio.sense_len; 701 msg.scsi.sense_residual = io->scsiio.sense_residual; 702 msg.scsi.residual = io->scsiio.residual; 703 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 704 io->scsiio.sense_len); 705 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 706 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + 707 msg.scsi.sense_len, M_WAITOK); 708 } 709 ctl_free_io(io); 710 } 711 712 static void 713 ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc, 714 union ctl_ha_msg *msg_info) 715 { 716 struct ctl_scsiio *ctsio; 717 718 if (msg_info->hdr.original_sc == NULL) { 719 printf("%s: original_sc == NULL!\n", __func__); 720 /* XXX KDM now what? */ 721 return; 722 } 723 724 ctsio = &msg_info->hdr.original_sc->scsiio; 725 ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 726 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 727 ctsio->io_hdr.status = msg_info->hdr.status; 728 ctsio->scsi_status = msg_info->scsi.scsi_status; 729 ctsio->sense_len = msg_info->scsi.sense_len; 730 ctsio->sense_residual = msg_info->scsi.sense_residual; 731 ctsio->residual = msg_info->scsi.residual; 732 memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data, 733 msg_info->scsi.sense_len); 734 ctl_enqueue_isc((union ctl_io *)ctsio); 735 } 736 737 static void 738 ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc, 739 union ctl_ha_msg *msg_info) 740 { 741 struct ctl_scsiio *ctsio; 742 743 if (msg_info->hdr.serializing_sc == NULL) { 744 printf("%s: serializing_sc == NULL!\n", __func__); 745 /* XXX KDM now what? */ 746 return; 747 } 748 749 ctsio = &msg_info->hdr.serializing_sc->scsiio; 750 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 751 ctl_enqueue_isc((union ctl_io *)ctsio); 752 } 753 754 void 755 ctl_isc_announce_lun(struct ctl_lun *lun) 756 { 757 struct ctl_softc *softc = lun->ctl_softc; 758 union ctl_ha_msg *msg; 759 struct ctl_ha_msg_lun_pr_key pr_key; 760 int i, k; 761 762 if (softc->ha_link != CTL_HA_LINK_ONLINE) 763 return; 764 mtx_lock(&lun->lun_lock); 765 i = sizeof(msg->lun); 766 if (lun->lun_devid) 767 i += lun->lun_devid->len; 768 i += sizeof(pr_key) * lun->pr_key_count; 769 alloc: 770 mtx_unlock(&lun->lun_lock); 771 msg = malloc(i, M_CTL, M_WAITOK); 772 mtx_lock(&lun->lun_lock); 773 k = sizeof(msg->lun); 774 if (lun->lun_devid) 775 k += lun->lun_devid->len; 776 k += sizeof(pr_key) * lun->pr_key_count; 777 if (i < k) { 778 free(msg, M_CTL); 779 i = k; 780 goto alloc; 781 } 782 bzero(&msg->lun, sizeof(msg->lun)); 783 msg->hdr.msg_type = CTL_MSG_LUN_SYNC; 784 msg->hdr.nexus.targ_lun = lun->lun; 785 msg->hdr.nexus.targ_mapped_lun = lun->lun; 786 msg->lun.flags = lun->flags; 787 msg->lun.pr_generation = lun->pr_generation; 788 msg->lun.pr_res_idx = lun->pr_res_idx; 789 msg->lun.pr_res_type = lun->pr_res_type; 790 msg->lun.pr_key_count = lun->pr_key_count; 791 i = 0; 792 if (lun->lun_devid) { 793 msg->lun.lun_devid_len = lun->lun_devid->len; 794 memcpy(&msg->lun.data[i], lun->lun_devid->data, 795 msg->lun.lun_devid_len); 796 i += msg->lun.lun_devid_len; 797 } 798 for (k = 0; k < CTL_MAX_INITIATORS; k++) { 799 if ((pr_key.pr_key = ctl_get_prkey(lun, k)) == 0) 800 continue; 801 pr_key.pr_iid = k; 802 memcpy(&msg->lun.data[i], &pr_key, sizeof(pr_key)); 803 i += sizeof(pr_key); 804 } 805 mtx_unlock(&lun->lun_lock); 806 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, 807 M_WAITOK); 808 free(msg, M_CTL); 809 810 if (lun->flags & CTL_LUN_PRIMARY_SC) { 811 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 812 ctl_isc_announce_mode(lun, -1, 813 lun->mode_pages.index[i].page_code & SMPH_PC_MASK, 814 lun->mode_pages.index[i].subpage); 815 } 816 } 817 } 818 819 void 820 ctl_isc_announce_port(struct ctl_port *port) 821 { 822 struct ctl_softc *softc = port->ctl_softc; 823 union ctl_ha_msg *msg; 824 int i; 825 826 if (port->targ_port < softc->port_min || 827 port->targ_port >= softc->port_max || 828 softc->ha_link != CTL_HA_LINK_ONLINE) 829 return; 830 i = sizeof(msg->port) + strlen(port->port_name) + 1; 831 if (port->lun_map) 832 i += port->lun_map_size * sizeof(uint32_t); 833 if (port->port_devid) 834 i += port->port_devid->len; 835 if (port->target_devid) 836 i += port->target_devid->len; 837 if (port->init_devid) 838 i += port->init_devid->len; 839 msg = malloc(i, M_CTL, M_WAITOK); 840 bzero(&msg->port, sizeof(msg->port)); 841 msg->hdr.msg_type = CTL_MSG_PORT_SYNC; 842 msg->hdr.nexus.targ_port = port->targ_port; 843 msg->port.port_type = port->port_type; 844 msg->port.physical_port = port->physical_port; 845 msg->port.virtual_port = port->virtual_port; 846 msg->port.status = port->status; 847 i = 0; 848 msg->port.name_len = sprintf(&msg->port.data[i], 849 "%d:%s", softc->ha_id, port->port_name) + 1; 850 i += msg->port.name_len; 851 if (port->lun_map) { 852 msg->port.lun_map_len = port->lun_map_size * sizeof(uint32_t); 853 memcpy(&msg->port.data[i], port->lun_map, 854 msg->port.lun_map_len); 855 i += msg->port.lun_map_len; 856 } 857 if (port->port_devid) { 858 msg->port.port_devid_len = port->port_devid->len; 859 memcpy(&msg->port.data[i], port->port_devid->data, 860 msg->port.port_devid_len); 861 i += msg->port.port_devid_len; 862 } 863 if (port->target_devid) { 864 msg->port.target_devid_len = port->target_devid->len; 865 memcpy(&msg->port.data[i], port->target_devid->data, 866 msg->port.target_devid_len); 867 i += msg->port.target_devid_len; 868 } 869 if (port->init_devid) { 870 msg->port.init_devid_len = port->init_devid->len; 871 memcpy(&msg->port.data[i], port->init_devid->data, 872 msg->port.init_devid_len); 873 i += msg->port.init_devid_len; 874 } 875 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, 876 M_WAITOK); 877 free(msg, M_CTL); 878 } 879 880 void 881 ctl_isc_announce_iid(struct ctl_port *port, int iid) 882 { 883 struct ctl_softc *softc = port->ctl_softc; 884 union ctl_ha_msg *msg; 885 int i, l; 886 887 if (port->targ_port < softc->port_min || 888 port->targ_port >= softc->port_max || 889 softc->ha_link != CTL_HA_LINK_ONLINE) 890 return; 891 mtx_lock(&softc->ctl_lock); 892 i = sizeof(msg->iid); 893 l = 0; 894 if (port->wwpn_iid[iid].name) 895 l = strlen(port->wwpn_iid[iid].name) + 1; 896 i += l; 897 msg = malloc(i, M_CTL, M_NOWAIT); 898 if (msg == NULL) { 899 mtx_unlock(&softc->ctl_lock); 900 return; 901 } 902 bzero(&msg->iid, sizeof(msg->iid)); 903 msg->hdr.msg_type = CTL_MSG_IID_SYNC; 904 msg->hdr.nexus.targ_port = port->targ_port; 905 msg->hdr.nexus.initid = iid; 906 msg->iid.in_use = port->wwpn_iid[iid].in_use; 907 msg->iid.name_len = l; 908 msg->iid.wwpn = port->wwpn_iid[iid].wwpn; 909 if (port->wwpn_iid[iid].name) 910 strlcpy(msg->iid.data, port->wwpn_iid[iid].name, l); 911 mtx_unlock(&softc->ctl_lock); 912 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->iid, i, M_NOWAIT); 913 free(msg, M_CTL); 914 } 915 916 void 917 ctl_isc_announce_mode(struct ctl_lun *lun, uint32_t initidx, 918 uint8_t page, uint8_t subpage) 919 { 920 struct ctl_softc *softc = lun->ctl_softc; 921 union ctl_ha_msg msg; 922 u_int i; 923 924 if (softc->ha_link != CTL_HA_LINK_ONLINE) 925 return; 926 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 927 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) == 928 page && lun->mode_pages.index[i].subpage == subpage) 929 break; 930 } 931 if (i == CTL_NUM_MODE_PAGES) 932 return; 933 934 /* Don't try to replicate pages not present on this device. */ 935 if (lun->mode_pages.index[i].page_data == NULL) 936 return; 937 938 bzero(&msg.mode, sizeof(msg.mode)); 939 msg.hdr.msg_type = CTL_MSG_MODE_SYNC; 940 msg.hdr.nexus.targ_port = initidx / CTL_MAX_INIT_PER_PORT; 941 msg.hdr.nexus.initid = initidx % CTL_MAX_INIT_PER_PORT; 942 msg.hdr.nexus.targ_lun = lun->lun; 943 msg.hdr.nexus.targ_mapped_lun = lun->lun; 944 msg.mode.page_code = page; 945 msg.mode.subpage = subpage; 946 msg.mode.page_len = lun->mode_pages.index[i].page_len; 947 memcpy(msg.mode.data, lun->mode_pages.index[i].page_data, 948 msg.mode.page_len); 949 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg.mode, sizeof(msg.mode), 950 M_WAITOK); 951 } 952 953 static void 954 ctl_isc_ha_link_up(struct ctl_softc *softc) 955 { 956 struct ctl_port *port; 957 struct ctl_lun *lun; 958 union ctl_ha_msg msg; 959 int i; 960 961 /* Announce this node parameters to peer for validation. */ 962 msg.login.msg_type = CTL_MSG_LOGIN; 963 msg.login.version = CTL_HA_VERSION; 964 msg.login.ha_mode = softc->ha_mode; 965 msg.login.ha_id = softc->ha_id; 966 msg.login.max_luns = CTL_MAX_LUNS; 967 msg.login.max_ports = CTL_MAX_PORTS; 968 msg.login.max_init_per_port = CTL_MAX_INIT_PER_PORT; 969 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg.login, sizeof(msg.login), 970 M_WAITOK); 971 972 STAILQ_FOREACH(port, &softc->port_list, links) { 973 ctl_isc_announce_port(port); 974 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 975 if (port->wwpn_iid[i].in_use) 976 ctl_isc_announce_iid(port, i); 977 } 978 } 979 STAILQ_FOREACH(lun, &softc->lun_list, links) 980 ctl_isc_announce_lun(lun); 981 } 982 983 static void 984 ctl_isc_ha_link_down(struct ctl_softc *softc) 985 { 986 struct ctl_port *port; 987 struct ctl_lun *lun; 988 union ctl_io *io; 989 int i; 990 991 mtx_lock(&softc->ctl_lock); 992 STAILQ_FOREACH(lun, &softc->lun_list, links) { 993 mtx_lock(&lun->lun_lock); 994 if (lun->flags & CTL_LUN_PEER_SC_PRIMARY) { 995 lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; 996 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 997 } 998 mtx_unlock(&lun->lun_lock); 999 1000 mtx_unlock(&softc->ctl_lock); 1001 io = ctl_alloc_io(softc->othersc_pool); 1002 mtx_lock(&softc->ctl_lock); 1003 ctl_zero_io(io); 1004 io->io_hdr.msg_type = CTL_MSG_FAILOVER; 1005 io->io_hdr.nexus.targ_mapped_lun = lun->lun; 1006 ctl_enqueue_isc(io); 1007 } 1008 1009 STAILQ_FOREACH(port, &softc->port_list, links) { 1010 if (port->targ_port >= softc->port_min && 1011 port->targ_port < softc->port_max) 1012 continue; 1013 port->status &= ~CTL_PORT_STATUS_ONLINE; 1014 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1015 port->wwpn_iid[i].in_use = 0; 1016 free(port->wwpn_iid[i].name, M_CTL); 1017 port->wwpn_iid[i].name = NULL; 1018 } 1019 } 1020 mtx_unlock(&softc->ctl_lock); 1021 } 1022 1023 static void 1024 ctl_isc_ua(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1025 { 1026 struct ctl_lun *lun; 1027 uint32_t iid = ctl_get_initindex(&msg->hdr.nexus); 1028 1029 mtx_lock(&softc->ctl_lock); 1030 if (msg->hdr.nexus.targ_mapped_lun >= CTL_MAX_LUNS || 1031 (lun = softc->ctl_luns[msg->hdr.nexus.targ_mapped_lun]) == NULL) { 1032 mtx_unlock(&softc->ctl_lock); 1033 return; 1034 } 1035 mtx_lock(&lun->lun_lock); 1036 mtx_unlock(&softc->ctl_lock); 1037 if (msg->ua.ua_type == CTL_UA_THIN_PROV_THRES && msg->ua.ua_set) 1038 memcpy(lun->ua_tpt_info, msg->ua.ua_info, 8); 1039 if (msg->ua.ua_all) { 1040 if (msg->ua.ua_set) 1041 ctl_est_ua_all(lun, iid, msg->ua.ua_type); 1042 else 1043 ctl_clr_ua_all(lun, iid, msg->ua.ua_type); 1044 } else { 1045 if (msg->ua.ua_set) 1046 ctl_est_ua(lun, iid, msg->ua.ua_type); 1047 else 1048 ctl_clr_ua(lun, iid, msg->ua.ua_type); 1049 } 1050 mtx_unlock(&lun->lun_lock); 1051 } 1052 1053 static void 1054 ctl_isc_lun_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1055 { 1056 struct ctl_lun *lun; 1057 struct ctl_ha_msg_lun_pr_key pr_key; 1058 int i, k; 1059 ctl_lun_flags oflags; 1060 uint32_t targ_lun; 1061 1062 targ_lun = msg->hdr.nexus.targ_mapped_lun; 1063 mtx_lock(&softc->ctl_lock); 1064 if (targ_lun >= CTL_MAX_LUNS || 1065 (lun = softc->ctl_luns[targ_lun]) == NULL) { 1066 mtx_unlock(&softc->ctl_lock); 1067 return; 1068 } 1069 mtx_lock(&lun->lun_lock); 1070 mtx_unlock(&softc->ctl_lock); 1071 if (lun->flags & CTL_LUN_DISABLED) { 1072 mtx_unlock(&lun->lun_lock); 1073 return; 1074 } 1075 i = (lun->lun_devid != NULL) ? lun->lun_devid->len : 0; 1076 if (msg->lun.lun_devid_len != i || (i > 0 && 1077 memcmp(&msg->lun.data[0], lun->lun_devid->data, i) != 0)) { 1078 mtx_unlock(&lun->lun_lock); 1079 printf("%s: Received conflicting HA LUN %d\n", 1080 __func__, targ_lun); 1081 return; 1082 } else { 1083 /* Record whether peer is primary. */ 1084 oflags = lun->flags; 1085 if ((msg->lun.flags & CTL_LUN_PRIMARY_SC) && 1086 (msg->lun.flags & CTL_LUN_DISABLED) == 0) 1087 lun->flags |= CTL_LUN_PEER_SC_PRIMARY; 1088 else 1089 lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; 1090 if (oflags != lun->flags) 1091 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 1092 1093 /* If peer is primary and we are not -- use data */ 1094 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 1095 (lun->flags & CTL_LUN_PEER_SC_PRIMARY)) { 1096 lun->pr_generation = msg->lun.pr_generation; 1097 lun->pr_res_idx = msg->lun.pr_res_idx; 1098 lun->pr_res_type = msg->lun.pr_res_type; 1099 lun->pr_key_count = msg->lun.pr_key_count; 1100 for (k = 0; k < CTL_MAX_INITIATORS; k++) 1101 ctl_clr_prkey(lun, k); 1102 for (k = 0; k < msg->lun.pr_key_count; k++) { 1103 memcpy(&pr_key, &msg->lun.data[i], 1104 sizeof(pr_key)); 1105 ctl_alloc_prkey(lun, pr_key.pr_iid); 1106 ctl_set_prkey(lun, pr_key.pr_iid, 1107 pr_key.pr_key); 1108 i += sizeof(pr_key); 1109 } 1110 } 1111 1112 mtx_unlock(&lun->lun_lock); 1113 CTL_DEBUG_PRINT(("%s: Known LUN %d, peer is %s\n", 1114 __func__, targ_lun, 1115 (msg->lun.flags & CTL_LUN_PRIMARY_SC) ? 1116 "primary" : "secondary")); 1117 1118 /* If we are primary but peer doesn't know -- notify */ 1119 if ((lun->flags & CTL_LUN_PRIMARY_SC) && 1120 (msg->lun.flags & CTL_LUN_PEER_SC_PRIMARY) == 0) 1121 ctl_isc_announce_lun(lun); 1122 } 1123 } 1124 1125 static void 1126 ctl_isc_port_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1127 { 1128 struct ctl_port *port; 1129 struct ctl_lun *lun; 1130 int i, new; 1131 1132 port = softc->ctl_ports[msg->hdr.nexus.targ_port]; 1133 if (port == NULL) { 1134 CTL_DEBUG_PRINT(("%s: New port %d\n", __func__, 1135 msg->hdr.nexus.targ_port)); 1136 new = 1; 1137 port = malloc(sizeof(*port), M_CTL, M_WAITOK | M_ZERO); 1138 port->frontend = &ha_frontend; 1139 port->targ_port = msg->hdr.nexus.targ_port; 1140 port->fe_datamove = ctl_ha_datamove; 1141 port->fe_done = ctl_ha_done; 1142 } else if (port->frontend == &ha_frontend) { 1143 CTL_DEBUG_PRINT(("%s: Updated port %d\n", __func__, 1144 msg->hdr.nexus.targ_port)); 1145 new = 0; 1146 } else { 1147 printf("%s: Received conflicting HA port %d\n", 1148 __func__, msg->hdr.nexus.targ_port); 1149 return; 1150 } 1151 port->port_type = msg->port.port_type; 1152 port->physical_port = msg->port.physical_port; 1153 port->virtual_port = msg->port.virtual_port; 1154 port->status = msg->port.status; 1155 i = 0; 1156 free(port->port_name, M_CTL); 1157 port->port_name = strndup(&msg->port.data[i], msg->port.name_len, 1158 M_CTL); 1159 i += msg->port.name_len; 1160 if (msg->port.lun_map_len != 0) { 1161 if (port->lun_map == NULL || 1162 port->lun_map_size * sizeof(uint32_t) < 1163 msg->port.lun_map_len) { 1164 port->lun_map_size = 0; 1165 free(port->lun_map, M_CTL); 1166 port->lun_map = malloc(msg->port.lun_map_len, 1167 M_CTL, M_WAITOK); 1168 } 1169 memcpy(port->lun_map, &msg->port.data[i], msg->port.lun_map_len); 1170 port->lun_map_size = msg->port.lun_map_len / sizeof(uint32_t); 1171 i += msg->port.lun_map_len; 1172 } else { 1173 port->lun_map_size = 0; 1174 free(port->lun_map, M_CTL); 1175 port->lun_map = NULL; 1176 } 1177 if (msg->port.port_devid_len != 0) { 1178 if (port->port_devid == NULL || 1179 port->port_devid->len < msg->port.port_devid_len) { 1180 free(port->port_devid, M_CTL); 1181 port->port_devid = malloc(sizeof(struct ctl_devid) + 1182 msg->port.port_devid_len, M_CTL, M_WAITOK); 1183 } 1184 memcpy(port->port_devid->data, &msg->port.data[i], 1185 msg->port.port_devid_len); 1186 port->port_devid->len = msg->port.port_devid_len; 1187 i += msg->port.port_devid_len; 1188 } else { 1189 free(port->port_devid, M_CTL); 1190 port->port_devid = NULL; 1191 } 1192 if (msg->port.target_devid_len != 0) { 1193 if (port->target_devid == NULL || 1194 port->target_devid->len < msg->port.target_devid_len) { 1195 free(port->target_devid, M_CTL); 1196 port->target_devid = malloc(sizeof(struct ctl_devid) + 1197 msg->port.target_devid_len, M_CTL, M_WAITOK); 1198 } 1199 memcpy(port->target_devid->data, &msg->port.data[i], 1200 msg->port.target_devid_len); 1201 port->target_devid->len = msg->port.target_devid_len; 1202 i += msg->port.target_devid_len; 1203 } else { 1204 free(port->target_devid, M_CTL); 1205 port->target_devid = NULL; 1206 } 1207 if (msg->port.init_devid_len != 0) { 1208 if (port->init_devid == NULL || 1209 port->init_devid->len < msg->port.init_devid_len) { 1210 free(port->init_devid, M_CTL); 1211 port->init_devid = malloc(sizeof(struct ctl_devid) + 1212 msg->port.init_devid_len, M_CTL, M_WAITOK); 1213 } 1214 memcpy(port->init_devid->data, &msg->port.data[i], 1215 msg->port.init_devid_len); 1216 port->init_devid->len = msg->port.init_devid_len; 1217 i += msg->port.init_devid_len; 1218 } else { 1219 free(port->init_devid, M_CTL); 1220 port->init_devid = NULL; 1221 } 1222 if (new) { 1223 if (ctl_port_register(port) != 0) { 1224 printf("%s: ctl_port_register() failed with error\n", 1225 __func__); 1226 } 1227 } 1228 mtx_lock(&softc->ctl_lock); 1229 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1230 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 1231 continue; 1232 mtx_lock(&lun->lun_lock); 1233 ctl_est_ua_all(lun, -1, CTL_UA_INQ_CHANGE); 1234 mtx_unlock(&lun->lun_lock); 1235 } 1236 mtx_unlock(&softc->ctl_lock); 1237 } 1238 1239 static void 1240 ctl_isc_iid_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1241 { 1242 struct ctl_port *port; 1243 int iid; 1244 1245 port = softc->ctl_ports[msg->hdr.nexus.targ_port]; 1246 if (port == NULL) { 1247 printf("%s: Received IID for unknown port %d\n", 1248 __func__, msg->hdr.nexus.targ_port); 1249 return; 1250 } 1251 iid = msg->hdr.nexus.initid; 1252 port->wwpn_iid[iid].in_use = msg->iid.in_use; 1253 port->wwpn_iid[iid].wwpn = msg->iid.wwpn; 1254 free(port->wwpn_iid[iid].name, M_CTL); 1255 if (msg->iid.name_len) { 1256 port->wwpn_iid[iid].name = strndup(&msg->iid.data[0], 1257 msg->iid.name_len, M_CTL); 1258 } else 1259 port->wwpn_iid[iid].name = NULL; 1260 } 1261 1262 static void 1263 ctl_isc_login(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1264 { 1265 1266 if (msg->login.version != CTL_HA_VERSION) { 1267 printf("CTL HA peers have different versions %d != %d\n", 1268 msg->login.version, CTL_HA_VERSION); 1269 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1270 return; 1271 } 1272 if (msg->login.ha_mode != softc->ha_mode) { 1273 printf("CTL HA peers have different ha_mode %d != %d\n", 1274 msg->login.ha_mode, softc->ha_mode); 1275 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1276 return; 1277 } 1278 if (msg->login.ha_id == softc->ha_id) { 1279 printf("CTL HA peers have same ha_id %d\n", msg->login.ha_id); 1280 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1281 return; 1282 } 1283 if (msg->login.max_luns != CTL_MAX_LUNS || 1284 msg->login.max_ports != CTL_MAX_PORTS || 1285 msg->login.max_init_per_port != CTL_MAX_INIT_PER_PORT) { 1286 printf("CTL HA peers have different limits\n"); 1287 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1288 return; 1289 } 1290 } 1291 1292 static void 1293 ctl_isc_mode_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1294 { 1295 struct ctl_lun *lun; 1296 u_int i; 1297 uint32_t initidx, targ_lun; 1298 1299 targ_lun = msg->hdr.nexus.targ_mapped_lun; 1300 mtx_lock(&softc->ctl_lock); 1301 if (targ_lun >= CTL_MAX_LUNS || 1302 (lun = softc->ctl_luns[targ_lun]) == NULL) { 1303 mtx_unlock(&softc->ctl_lock); 1304 return; 1305 } 1306 mtx_lock(&lun->lun_lock); 1307 mtx_unlock(&softc->ctl_lock); 1308 if (lun->flags & CTL_LUN_DISABLED) { 1309 mtx_unlock(&lun->lun_lock); 1310 return; 1311 } 1312 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 1313 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) == 1314 msg->mode.page_code && 1315 lun->mode_pages.index[i].subpage == msg->mode.subpage) 1316 break; 1317 } 1318 if (i == CTL_NUM_MODE_PAGES) { 1319 mtx_unlock(&lun->lun_lock); 1320 return; 1321 } 1322 memcpy(lun->mode_pages.index[i].page_data, msg->mode.data, 1323 lun->mode_pages.index[i].page_len); 1324 initidx = ctl_get_initindex(&msg->hdr.nexus); 1325 if (initidx != -1) 1326 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 1327 mtx_unlock(&lun->lun_lock); 1328 } 1329 1330 /* 1331 * ISC (Inter Shelf Communication) event handler. Events from the HA 1332 * subsystem come in here. 1333 */ 1334 static void 1335 ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param) 1336 { 1337 struct ctl_softc *softc = control_softc; 1338 union ctl_io *io; 1339 struct ctl_prio *presio; 1340 ctl_ha_status isc_status; 1341 1342 CTL_DEBUG_PRINT(("CTL: Isc Msg event %d\n", event)); 1343 if (event == CTL_HA_EVT_MSG_RECV) { 1344 union ctl_ha_msg *msg, msgbuf; 1345 1346 if (param > sizeof(msgbuf)) 1347 msg = malloc(param, M_CTL, M_WAITOK); 1348 else 1349 msg = &msgbuf; 1350 isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, msg, param, 1351 M_WAITOK); 1352 if (isc_status != CTL_HA_STATUS_SUCCESS) { 1353 printf("%s: Error receiving message: %d\n", 1354 __func__, isc_status); 1355 if (msg != &msgbuf) 1356 free(msg, M_CTL); 1357 return; 1358 } 1359 1360 CTL_DEBUG_PRINT(("CTL: msg_type %d\n", msg->msg_type)); 1361 switch (msg->hdr.msg_type) { 1362 case CTL_MSG_SERIALIZE: 1363 io = ctl_alloc_io(softc->othersc_pool); 1364 ctl_zero_io(io); 1365 // populate ctsio from msg 1366 io->io_hdr.io_type = CTL_IO_SCSI; 1367 io->io_hdr.msg_type = CTL_MSG_SERIALIZE; 1368 io->io_hdr.original_sc = msg->hdr.original_sc; 1369 io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC | 1370 CTL_FLAG_IO_ACTIVE; 1371 /* 1372 * If we're in serialization-only mode, we don't 1373 * want to go through full done processing. Thus 1374 * the COPY flag. 1375 * 1376 * XXX KDM add another flag that is more specific. 1377 */ 1378 if (softc->ha_mode != CTL_HA_MODE_XFER) 1379 io->io_hdr.flags |= CTL_FLAG_INT_COPY; 1380 io->io_hdr.nexus = msg->hdr.nexus; 1381 #if 0 1382 printf("port %u, iid %u, lun %u\n", 1383 io->io_hdr.nexus.targ_port, 1384 io->io_hdr.nexus.initid, 1385 io->io_hdr.nexus.targ_lun); 1386 #endif 1387 io->scsiio.tag_num = msg->scsi.tag_num; 1388 io->scsiio.tag_type = msg->scsi.tag_type; 1389 #ifdef CTL_TIME_IO 1390 io->io_hdr.start_time = time_uptime; 1391 getbinuptime(&io->io_hdr.start_bt); 1392 #endif /* CTL_TIME_IO */ 1393 io->scsiio.cdb_len = msg->scsi.cdb_len; 1394 memcpy(io->scsiio.cdb, msg->scsi.cdb, 1395 CTL_MAX_CDBLEN); 1396 if (softc->ha_mode == CTL_HA_MODE_XFER) { 1397 const struct ctl_cmd_entry *entry; 1398 1399 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 1400 io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 1401 io->io_hdr.flags |= 1402 entry->flags & CTL_FLAG_DATA_MASK; 1403 } 1404 ctl_enqueue_isc(io); 1405 break; 1406 1407 /* Performed on the Originating SC, XFER mode only */ 1408 case CTL_MSG_DATAMOVE: { 1409 struct ctl_sg_entry *sgl; 1410 int i, j; 1411 1412 io = msg->hdr.original_sc; 1413 if (io == NULL) { 1414 printf("%s: original_sc == NULL!\n", __func__); 1415 /* XXX KDM do something here */ 1416 break; 1417 } 1418 io->io_hdr.msg_type = CTL_MSG_DATAMOVE; 1419 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1420 /* 1421 * Keep track of this, we need to send it back over 1422 * when the datamove is complete. 1423 */ 1424 io->io_hdr.serializing_sc = msg->hdr.serializing_sc; 1425 if (msg->hdr.status == CTL_SUCCESS) 1426 io->io_hdr.status = msg->hdr.status; 1427 1428 if (msg->dt.sg_sequence == 0) { 1429 #ifdef CTL_TIME_IO 1430 getbinuptime(&io->io_hdr.dma_start_bt); 1431 #endif 1432 i = msg->dt.kern_sg_entries + 1433 msg->dt.kern_data_len / 1434 CTL_HA_DATAMOVE_SEGMENT + 1; 1435 sgl = malloc(sizeof(*sgl) * i, M_CTL, 1436 M_WAITOK | M_ZERO); 1437 io->io_hdr.remote_sglist = sgl; 1438 io->io_hdr.local_sglist = 1439 &sgl[msg->dt.kern_sg_entries]; 1440 1441 io->scsiio.kern_data_ptr = (uint8_t *)sgl; 1442 1443 io->scsiio.kern_sg_entries = 1444 msg->dt.kern_sg_entries; 1445 io->scsiio.rem_sg_entries = 1446 msg->dt.kern_sg_entries; 1447 io->scsiio.kern_data_len = 1448 msg->dt.kern_data_len; 1449 io->scsiio.kern_total_len = 1450 msg->dt.kern_total_len; 1451 io->scsiio.kern_data_resid = 1452 msg->dt.kern_data_resid; 1453 io->scsiio.kern_rel_offset = 1454 msg->dt.kern_rel_offset; 1455 io->io_hdr.flags &= ~CTL_FLAG_BUS_ADDR; 1456 io->io_hdr.flags |= msg->dt.flags & 1457 CTL_FLAG_BUS_ADDR; 1458 } else 1459 sgl = (struct ctl_sg_entry *) 1460 io->scsiio.kern_data_ptr; 1461 1462 for (i = msg->dt.sent_sg_entries, j = 0; 1463 i < (msg->dt.sent_sg_entries + 1464 msg->dt.cur_sg_entries); i++, j++) { 1465 sgl[i].addr = msg->dt.sg_list[j].addr; 1466 sgl[i].len = msg->dt.sg_list[j].len; 1467 1468 #if 0 1469 printf("%s: DATAMOVE: %p,%lu j=%d, i=%d\n", 1470 __func__, sgl[i].addr, sgl[i].len, j, i); 1471 #endif 1472 } 1473 1474 /* 1475 * If this is the last piece of the I/O, we've got 1476 * the full S/G list. Queue processing in the thread. 1477 * Otherwise wait for the next piece. 1478 */ 1479 if (msg->dt.sg_last != 0) 1480 ctl_enqueue_isc(io); 1481 break; 1482 } 1483 /* Performed on the Serializing (primary) SC, XFER mode only */ 1484 case CTL_MSG_DATAMOVE_DONE: { 1485 if (msg->hdr.serializing_sc == NULL) { 1486 printf("%s: serializing_sc == NULL!\n", 1487 __func__); 1488 /* XXX KDM now what? */ 1489 break; 1490 } 1491 /* 1492 * We grab the sense information here in case 1493 * there was a failure, so we can return status 1494 * back to the initiator. 1495 */ 1496 io = msg->hdr.serializing_sc; 1497 io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 1498 io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; 1499 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1500 io->io_hdr.port_status = msg->scsi.fetd_status; 1501 io->scsiio.residual = msg->scsi.residual; 1502 if (msg->hdr.status != CTL_STATUS_NONE) { 1503 io->io_hdr.status = msg->hdr.status; 1504 io->scsiio.scsi_status = msg->scsi.scsi_status; 1505 io->scsiio.sense_len = msg->scsi.sense_len; 1506 io->scsiio.sense_residual =msg->scsi.sense_residual; 1507 memcpy(&io->scsiio.sense_data, 1508 &msg->scsi.sense_data, 1509 msg->scsi.sense_len); 1510 if (msg->hdr.status == CTL_SUCCESS) 1511 io->io_hdr.flags |= CTL_FLAG_STATUS_SENT; 1512 } 1513 ctl_enqueue_isc(io); 1514 break; 1515 } 1516 1517 /* Preformed on Originating SC, SER_ONLY mode */ 1518 case CTL_MSG_R2R: 1519 io = msg->hdr.original_sc; 1520 if (io == NULL) { 1521 printf("%s: original_sc == NULL!\n", 1522 __func__); 1523 break; 1524 } 1525 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1526 io->io_hdr.msg_type = CTL_MSG_R2R; 1527 io->io_hdr.serializing_sc = msg->hdr.serializing_sc; 1528 ctl_enqueue_isc(io); 1529 break; 1530 1531 /* 1532 * Performed on Serializing(i.e. primary SC) SC in SER_ONLY 1533 * mode. 1534 * Performed on the Originating (i.e. secondary) SC in XFER 1535 * mode 1536 */ 1537 case CTL_MSG_FINISH_IO: 1538 if (softc->ha_mode == CTL_HA_MODE_XFER) 1539 ctl_isc_handler_finish_xfer(softc, msg); 1540 else 1541 ctl_isc_handler_finish_ser_only(softc, msg); 1542 break; 1543 1544 /* Preformed on Originating SC */ 1545 case CTL_MSG_BAD_JUJU: 1546 io = msg->hdr.original_sc; 1547 if (io == NULL) { 1548 printf("%s: Bad JUJU!, original_sc is NULL!\n", 1549 __func__); 1550 break; 1551 } 1552 ctl_copy_sense_data(msg, io); 1553 /* 1554 * IO should have already been cleaned up on other 1555 * SC so clear this flag so we won't send a message 1556 * back to finish the IO there. 1557 */ 1558 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 1559 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1560 1561 /* io = msg->hdr.serializing_sc; */ 1562 io->io_hdr.msg_type = CTL_MSG_BAD_JUJU; 1563 ctl_enqueue_isc(io); 1564 break; 1565 1566 /* Handle resets sent from the other side */ 1567 case CTL_MSG_MANAGE_TASKS: { 1568 struct ctl_taskio *taskio; 1569 taskio = (struct ctl_taskio *)ctl_alloc_io( 1570 softc->othersc_pool); 1571 ctl_zero_io((union ctl_io *)taskio); 1572 taskio->io_hdr.io_type = CTL_IO_TASK; 1573 taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 1574 taskio->io_hdr.nexus = msg->hdr.nexus; 1575 taskio->task_action = msg->task.task_action; 1576 taskio->tag_num = msg->task.tag_num; 1577 taskio->tag_type = msg->task.tag_type; 1578 #ifdef CTL_TIME_IO 1579 taskio->io_hdr.start_time = time_uptime; 1580 getbinuptime(&taskio->io_hdr.start_bt); 1581 #endif /* CTL_TIME_IO */ 1582 ctl_run_task((union ctl_io *)taskio); 1583 break; 1584 } 1585 /* Persistent Reserve action which needs attention */ 1586 case CTL_MSG_PERS_ACTION: 1587 presio = (struct ctl_prio *)ctl_alloc_io( 1588 softc->othersc_pool); 1589 ctl_zero_io((union ctl_io *)presio); 1590 presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION; 1591 presio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 1592 presio->io_hdr.nexus = msg->hdr.nexus; 1593 presio->pr_msg = msg->pr; 1594 ctl_enqueue_isc((union ctl_io *)presio); 1595 break; 1596 case CTL_MSG_UA: 1597 ctl_isc_ua(softc, msg, param); 1598 break; 1599 case CTL_MSG_PORT_SYNC: 1600 ctl_isc_port_sync(softc, msg, param); 1601 break; 1602 case CTL_MSG_LUN_SYNC: 1603 ctl_isc_lun_sync(softc, msg, param); 1604 break; 1605 case CTL_MSG_IID_SYNC: 1606 ctl_isc_iid_sync(softc, msg, param); 1607 break; 1608 case CTL_MSG_LOGIN: 1609 ctl_isc_login(softc, msg, param); 1610 break; 1611 case CTL_MSG_MODE_SYNC: 1612 ctl_isc_mode_sync(softc, msg, param); 1613 break; 1614 default: 1615 printf("Received HA message of unknown type %d\n", 1616 msg->hdr.msg_type); 1617 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1618 break; 1619 } 1620 if (msg != &msgbuf) 1621 free(msg, M_CTL); 1622 } else if (event == CTL_HA_EVT_LINK_CHANGE) { 1623 printf("CTL: HA link status changed from %d to %d\n", 1624 softc->ha_link, param); 1625 if (param == softc->ha_link) 1626 return; 1627 if (softc->ha_link == CTL_HA_LINK_ONLINE) { 1628 softc->ha_link = param; 1629 ctl_isc_ha_link_down(softc); 1630 } else { 1631 softc->ha_link = param; 1632 if (softc->ha_link == CTL_HA_LINK_ONLINE) 1633 ctl_isc_ha_link_up(softc); 1634 } 1635 return; 1636 } else { 1637 printf("ctl_isc_event_handler: Unknown event %d\n", event); 1638 return; 1639 } 1640 } 1641 1642 static void 1643 ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest) 1644 { 1645 1646 memcpy(&dest->scsiio.sense_data, &src->scsi.sense_data, 1647 src->scsi.sense_len); 1648 dest->scsiio.scsi_status = src->scsi.scsi_status; 1649 dest->scsiio.sense_len = src->scsi.sense_len; 1650 dest->io_hdr.status = src->hdr.status; 1651 } 1652 1653 static void 1654 ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest) 1655 { 1656 1657 memcpy(&dest->scsi.sense_data, &src->scsiio.sense_data, 1658 src->scsiio.sense_len); 1659 dest->scsi.scsi_status = src->scsiio.scsi_status; 1660 dest->scsi.sense_len = src->scsiio.sense_len; 1661 dest->hdr.status = src->io_hdr.status; 1662 } 1663 1664 void 1665 ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 1666 { 1667 struct ctl_softc *softc = lun->ctl_softc; 1668 ctl_ua_type *pu; 1669 1670 if (initidx < softc->init_min || initidx >= softc->init_max) 1671 return; 1672 mtx_assert(&lun->lun_lock, MA_OWNED); 1673 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 1674 if (pu == NULL) 1675 return; 1676 pu[initidx % CTL_MAX_INIT_PER_PORT] |= ua; 1677 } 1678 1679 void 1680 ctl_est_ua_port(struct ctl_lun *lun, int port, uint32_t except, ctl_ua_type ua) 1681 { 1682 int i; 1683 1684 mtx_assert(&lun->lun_lock, MA_OWNED); 1685 if (lun->pending_ua[port] == NULL) 1686 return; 1687 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1688 if (port * CTL_MAX_INIT_PER_PORT + i == except) 1689 continue; 1690 lun->pending_ua[port][i] |= ua; 1691 } 1692 } 1693 1694 void 1695 ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 1696 { 1697 struct ctl_softc *softc = lun->ctl_softc; 1698 int i; 1699 1700 mtx_assert(&lun->lun_lock, MA_OWNED); 1701 for (i = softc->port_min; i < softc->port_max; i++) 1702 ctl_est_ua_port(lun, i, except, ua); 1703 } 1704 1705 void 1706 ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 1707 { 1708 struct ctl_softc *softc = lun->ctl_softc; 1709 ctl_ua_type *pu; 1710 1711 if (initidx < softc->init_min || initidx >= softc->init_max) 1712 return; 1713 mtx_assert(&lun->lun_lock, MA_OWNED); 1714 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 1715 if (pu == NULL) 1716 return; 1717 pu[initidx % CTL_MAX_INIT_PER_PORT] &= ~ua; 1718 } 1719 1720 void 1721 ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 1722 { 1723 struct ctl_softc *softc = lun->ctl_softc; 1724 int i, j; 1725 1726 mtx_assert(&lun->lun_lock, MA_OWNED); 1727 for (i = softc->port_min; i < softc->port_max; i++) { 1728 if (lun->pending_ua[i] == NULL) 1729 continue; 1730 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 1731 if (i * CTL_MAX_INIT_PER_PORT + j == except) 1732 continue; 1733 lun->pending_ua[i][j] &= ~ua; 1734 } 1735 } 1736 } 1737 1738 void 1739 ctl_clr_ua_allluns(struct ctl_softc *ctl_softc, uint32_t initidx, 1740 ctl_ua_type ua_type) 1741 { 1742 struct ctl_lun *lun; 1743 1744 mtx_assert(&ctl_softc->ctl_lock, MA_OWNED); 1745 STAILQ_FOREACH(lun, &ctl_softc->lun_list, links) { 1746 mtx_lock(&lun->lun_lock); 1747 ctl_clr_ua(lun, initidx, ua_type); 1748 mtx_unlock(&lun->lun_lock); 1749 } 1750 } 1751 1752 static int 1753 ctl_ha_role_sysctl(SYSCTL_HANDLER_ARGS) 1754 { 1755 struct ctl_softc *softc = (struct ctl_softc *)arg1; 1756 struct ctl_lun *lun; 1757 struct ctl_lun_req ireq; 1758 int error, value; 1759 1760 value = (softc->flags & CTL_FLAG_ACTIVE_SHELF) ? 0 : 1; 1761 error = sysctl_handle_int(oidp, &value, 0, req); 1762 if ((error != 0) || (req->newptr == NULL)) 1763 return (error); 1764 1765 mtx_lock(&softc->ctl_lock); 1766 if (value == 0) 1767 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1768 else 1769 softc->flags &= ~CTL_FLAG_ACTIVE_SHELF; 1770 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1771 mtx_unlock(&softc->ctl_lock); 1772 bzero(&ireq, sizeof(ireq)); 1773 ireq.reqtype = CTL_LUNREQ_MODIFY; 1774 ireq.reqdata.modify.lun_id = lun->lun; 1775 lun->backend->ioctl(NULL, CTL_LUN_REQ, (caddr_t)&ireq, 0, 1776 curthread); 1777 if (ireq.status != CTL_LUN_OK) { 1778 printf("%s: CTL_LUNREQ_MODIFY returned %d '%s'\n", 1779 __func__, ireq.status, ireq.error_str); 1780 } 1781 mtx_lock(&softc->ctl_lock); 1782 } 1783 mtx_unlock(&softc->ctl_lock); 1784 return (0); 1785 } 1786 1787 static int 1788 ctl_init(void) 1789 { 1790 struct make_dev_args args; 1791 struct ctl_softc *softc; 1792 void *other_pool; 1793 int i, error; 1794 1795 softc = control_softc = malloc(sizeof(*control_softc), M_DEVBUF, 1796 M_WAITOK | M_ZERO); 1797 1798 make_dev_args_init(&args); 1799 args.mda_devsw = &ctl_cdevsw; 1800 args.mda_uid = UID_ROOT; 1801 args.mda_gid = GID_OPERATOR; 1802 args.mda_mode = 0600; 1803 args.mda_si_drv1 = softc; 1804 error = make_dev_s(&args, &softc->dev, "cam/ctl"); 1805 if (error != 0) { 1806 free(control_softc, M_DEVBUF); 1807 return (error); 1808 } 1809 1810 sysctl_ctx_init(&softc->sysctl_ctx); 1811 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, 1812 SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl", 1813 CTLFLAG_RD, 0, "CAM Target Layer"); 1814 1815 if (softc->sysctl_tree == NULL) { 1816 printf("%s: unable to allocate sysctl tree\n", __func__); 1817 destroy_dev(softc->dev); 1818 free(control_softc, M_DEVBUF); 1819 control_softc = NULL; 1820 return (ENOMEM); 1821 } 1822 1823 mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF); 1824 softc->io_zone = uma_zcreate("CTL IO", sizeof(union ctl_io), 1825 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 1826 softc->flags = 0; 1827 1828 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1829 OID_AUTO, "ha_mode", CTLFLAG_RDTUN, (int *)&softc->ha_mode, 0, 1830 "HA mode (0 - act/stby, 1 - serialize only, 2 - xfer)"); 1831 1832 /* 1833 * In Copan's HA scheme, the "master" and "slave" roles are 1834 * figured out through the slot the controller is in. Although it 1835 * is an active/active system, someone has to be in charge. 1836 */ 1837 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1838 OID_AUTO, "ha_id", CTLFLAG_RDTUN, &softc->ha_id, 0, 1839 "HA head ID (0 - no HA)"); 1840 if (softc->ha_id == 0 || softc->ha_id > NUM_HA_SHELVES) { 1841 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1842 softc->is_single = 1; 1843 softc->port_cnt = CTL_MAX_PORTS; 1844 softc->port_min = 0; 1845 } else { 1846 softc->port_cnt = CTL_MAX_PORTS / NUM_HA_SHELVES; 1847 softc->port_min = (softc->ha_id - 1) * softc->port_cnt; 1848 } 1849 softc->port_max = softc->port_min + softc->port_cnt; 1850 softc->init_min = softc->port_min * CTL_MAX_INIT_PER_PORT; 1851 softc->init_max = softc->port_max * CTL_MAX_INIT_PER_PORT; 1852 1853 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1854 OID_AUTO, "ha_link", CTLFLAG_RD, (int *)&softc->ha_link, 0, 1855 "HA link state (0 - offline, 1 - unknown, 2 - online)"); 1856 1857 STAILQ_INIT(&softc->lun_list); 1858 STAILQ_INIT(&softc->pending_lun_queue); 1859 STAILQ_INIT(&softc->fe_list); 1860 STAILQ_INIT(&softc->port_list); 1861 STAILQ_INIT(&softc->be_list); 1862 ctl_tpc_init(softc); 1863 1864 if (ctl_pool_create(softc, "othersc", CTL_POOL_ENTRIES_OTHER_SC, 1865 &other_pool) != 0) 1866 { 1867 printf("ctl: can't allocate %d entry other SC pool, " 1868 "exiting\n", CTL_POOL_ENTRIES_OTHER_SC); 1869 return (ENOMEM); 1870 } 1871 softc->othersc_pool = other_pool; 1872 1873 if (worker_threads <= 0) 1874 worker_threads = max(1, mp_ncpus / 4); 1875 if (worker_threads > CTL_MAX_THREADS) 1876 worker_threads = CTL_MAX_THREADS; 1877 1878 for (i = 0; i < worker_threads; i++) { 1879 struct ctl_thread *thr = &softc->threads[i]; 1880 1881 mtx_init(&thr->queue_lock, "CTL queue mutex", NULL, MTX_DEF); 1882 thr->ctl_softc = softc; 1883 STAILQ_INIT(&thr->incoming_queue); 1884 STAILQ_INIT(&thr->rtr_queue); 1885 STAILQ_INIT(&thr->done_queue); 1886 STAILQ_INIT(&thr->isc_queue); 1887 1888 error = kproc_kthread_add(ctl_work_thread, thr, 1889 &softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i); 1890 if (error != 0) { 1891 printf("error creating CTL work thread!\n"); 1892 ctl_pool_free(other_pool); 1893 return (error); 1894 } 1895 } 1896 error = kproc_kthread_add(ctl_lun_thread, softc, 1897 &softc->ctl_proc, NULL, 0, 0, "ctl", "lun"); 1898 if (error != 0) { 1899 printf("error creating CTL lun thread!\n"); 1900 ctl_pool_free(other_pool); 1901 return (error); 1902 } 1903 error = kproc_kthread_add(ctl_thresh_thread, softc, 1904 &softc->ctl_proc, NULL, 0, 0, "ctl", "thresh"); 1905 if (error != 0) { 1906 printf("error creating CTL threshold thread!\n"); 1907 ctl_pool_free(other_pool); 1908 return (error); 1909 } 1910 1911 SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree), 1912 OID_AUTO, "ha_role", CTLTYPE_INT | CTLFLAG_RWTUN, 1913 softc, 0, ctl_ha_role_sysctl, "I", "HA role for this head"); 1914 1915 if (softc->is_single == 0) { 1916 ctl_frontend_register(&ha_frontend); 1917 if (ctl_ha_msg_init(softc) != CTL_HA_STATUS_SUCCESS) { 1918 printf("ctl_init: ctl_ha_msg_init failed.\n"); 1919 softc->is_single = 1; 1920 } else 1921 if (ctl_ha_msg_register(CTL_HA_CHAN_CTL, ctl_isc_event_handler) 1922 != CTL_HA_STATUS_SUCCESS) { 1923 printf("ctl_init: ctl_ha_msg_register failed.\n"); 1924 softc->is_single = 1; 1925 } 1926 } 1927 return (0); 1928 } 1929 1930 void 1931 ctl_shutdown(void) 1932 { 1933 struct ctl_softc *softc = control_softc; 1934 struct ctl_lun *lun, *next_lun; 1935 1936 if (softc->is_single == 0) { 1937 ctl_ha_msg_shutdown(softc); 1938 if (ctl_ha_msg_deregister(CTL_HA_CHAN_CTL) 1939 != CTL_HA_STATUS_SUCCESS) 1940 printf("%s: ctl_ha_msg_deregister failed.\n", __func__); 1941 if (ctl_ha_msg_destroy(softc) != CTL_HA_STATUS_SUCCESS) 1942 printf("%s: ctl_ha_msg_destroy failed.\n", __func__); 1943 ctl_frontend_deregister(&ha_frontend); 1944 } 1945 1946 mtx_lock(&softc->ctl_lock); 1947 1948 STAILQ_FOREACH_SAFE(lun, &softc->lun_list, links, next_lun) 1949 ctl_free_lun(lun); 1950 1951 mtx_unlock(&softc->ctl_lock); 1952 1953 #if 0 1954 ctl_shutdown_thread(softc->work_thread); 1955 mtx_destroy(&softc->queue_lock); 1956 #endif 1957 1958 ctl_tpc_shutdown(softc); 1959 uma_zdestroy(softc->io_zone); 1960 mtx_destroy(&softc->ctl_lock); 1961 1962 destroy_dev(softc->dev); 1963 1964 sysctl_ctx_free(&softc->sysctl_ctx); 1965 1966 free(control_softc, M_DEVBUF); 1967 control_softc = NULL; 1968 } 1969 1970 static int 1971 ctl_module_event_handler(module_t mod, int what, void *arg) 1972 { 1973 1974 switch (what) { 1975 case MOD_LOAD: 1976 return (ctl_init()); 1977 case MOD_UNLOAD: 1978 return (EBUSY); 1979 default: 1980 return (EOPNOTSUPP); 1981 } 1982 } 1983 1984 /* 1985 * XXX KDM should we do some access checks here? Bump a reference count to 1986 * prevent a CTL module from being unloaded while someone has it open? 1987 */ 1988 static int 1989 ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td) 1990 { 1991 return (0); 1992 } 1993 1994 static int 1995 ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td) 1996 { 1997 return (0); 1998 } 1999 2000 /* 2001 * Remove an initiator by port number and initiator ID. 2002 * Returns 0 for success, -1 for failure. 2003 */ 2004 int 2005 ctl_remove_initiator(struct ctl_port *port, int iid) 2006 { 2007 struct ctl_softc *softc = port->ctl_softc; 2008 2009 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 2010 2011 if (iid > CTL_MAX_INIT_PER_PORT) { 2012 printf("%s: initiator ID %u > maximun %u!\n", 2013 __func__, iid, CTL_MAX_INIT_PER_PORT); 2014 return (-1); 2015 } 2016 2017 mtx_lock(&softc->ctl_lock); 2018 port->wwpn_iid[iid].in_use--; 2019 port->wwpn_iid[iid].last_use = time_uptime; 2020 mtx_unlock(&softc->ctl_lock); 2021 ctl_isc_announce_iid(port, iid); 2022 2023 return (0); 2024 } 2025 2026 /* 2027 * Add an initiator to the initiator map. 2028 * Returns iid for success, < 0 for failure. 2029 */ 2030 int 2031 ctl_add_initiator(struct ctl_port *port, int iid, uint64_t wwpn, char *name) 2032 { 2033 struct ctl_softc *softc = port->ctl_softc; 2034 time_t best_time; 2035 int i, best; 2036 2037 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 2038 2039 if (iid >= CTL_MAX_INIT_PER_PORT) { 2040 printf("%s: WWPN %#jx initiator ID %u > maximum %u!\n", 2041 __func__, wwpn, iid, CTL_MAX_INIT_PER_PORT); 2042 free(name, M_CTL); 2043 return (-1); 2044 } 2045 2046 mtx_lock(&softc->ctl_lock); 2047 2048 if (iid < 0 && (wwpn != 0 || name != NULL)) { 2049 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 2050 if (wwpn != 0 && wwpn == port->wwpn_iid[i].wwpn) { 2051 iid = i; 2052 break; 2053 } 2054 if (name != NULL && port->wwpn_iid[i].name != NULL && 2055 strcmp(name, port->wwpn_iid[i].name) == 0) { 2056 iid = i; 2057 break; 2058 } 2059 } 2060 } 2061 2062 if (iid < 0) { 2063 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 2064 if (port->wwpn_iid[i].in_use == 0 && 2065 port->wwpn_iid[i].wwpn == 0 && 2066 port->wwpn_iid[i].name == NULL) { 2067 iid = i; 2068 break; 2069 } 2070 } 2071 } 2072 2073 if (iid < 0) { 2074 best = -1; 2075 best_time = INT32_MAX; 2076 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 2077 if (port->wwpn_iid[i].in_use == 0) { 2078 if (port->wwpn_iid[i].last_use < best_time) { 2079 best = i; 2080 best_time = port->wwpn_iid[i].last_use; 2081 } 2082 } 2083 } 2084 iid = best; 2085 } 2086 2087 if (iid < 0) { 2088 mtx_unlock(&softc->ctl_lock); 2089 free(name, M_CTL); 2090 return (-2); 2091 } 2092 2093 if (port->wwpn_iid[iid].in_use > 0 && (wwpn != 0 || name != NULL)) { 2094 /* 2095 * This is not an error yet. 2096 */ 2097 if (wwpn != 0 && wwpn == port->wwpn_iid[iid].wwpn) { 2098 #if 0 2099 printf("%s: port %d iid %u WWPN %#jx arrived" 2100 " again\n", __func__, port->targ_port, 2101 iid, (uintmax_t)wwpn); 2102 #endif 2103 goto take; 2104 } 2105 if (name != NULL && port->wwpn_iid[iid].name != NULL && 2106 strcmp(name, port->wwpn_iid[iid].name) == 0) { 2107 #if 0 2108 printf("%s: port %d iid %u name '%s' arrived" 2109 " again\n", __func__, port->targ_port, 2110 iid, name); 2111 #endif 2112 goto take; 2113 } 2114 2115 /* 2116 * This is an error, but what do we do about it? The 2117 * driver is telling us we have a new WWPN for this 2118 * initiator ID, so we pretty much need to use it. 2119 */ 2120 printf("%s: port %d iid %u WWPN %#jx '%s' arrived," 2121 " but WWPN %#jx '%s' is still at that address\n", 2122 __func__, port->targ_port, iid, wwpn, name, 2123 (uintmax_t)port->wwpn_iid[iid].wwpn, 2124 port->wwpn_iid[iid].name); 2125 2126 /* 2127 * XXX KDM clear have_ca and ua_pending on each LUN for 2128 * this initiator. 2129 */ 2130 } 2131 take: 2132 free(port->wwpn_iid[iid].name, M_CTL); 2133 port->wwpn_iid[iid].name = name; 2134 port->wwpn_iid[iid].wwpn = wwpn; 2135 port->wwpn_iid[iid].in_use++; 2136 mtx_unlock(&softc->ctl_lock); 2137 ctl_isc_announce_iid(port, iid); 2138 2139 return (iid); 2140 } 2141 2142 static int 2143 ctl_create_iid(struct ctl_port *port, int iid, uint8_t *buf) 2144 { 2145 int len; 2146 2147 switch (port->port_type) { 2148 case CTL_PORT_FC: 2149 { 2150 struct scsi_transportid_fcp *id = 2151 (struct scsi_transportid_fcp *)buf; 2152 if (port->wwpn_iid[iid].wwpn == 0) 2153 return (0); 2154 memset(id, 0, sizeof(*id)); 2155 id->format_protocol = SCSI_PROTO_FC; 2156 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->n_port_name); 2157 return (sizeof(*id)); 2158 } 2159 case CTL_PORT_ISCSI: 2160 { 2161 struct scsi_transportid_iscsi_port *id = 2162 (struct scsi_transportid_iscsi_port *)buf; 2163 if (port->wwpn_iid[iid].name == NULL) 2164 return (0); 2165 memset(id, 0, 256); 2166 id->format_protocol = SCSI_TRN_ISCSI_FORMAT_PORT | 2167 SCSI_PROTO_ISCSI; 2168 len = strlcpy(id->iscsi_name, port->wwpn_iid[iid].name, 252) + 1; 2169 len = roundup2(min(len, 252), 4); 2170 scsi_ulto2b(len, id->additional_length); 2171 return (sizeof(*id) + len); 2172 } 2173 case CTL_PORT_SAS: 2174 { 2175 struct scsi_transportid_sas *id = 2176 (struct scsi_transportid_sas *)buf; 2177 if (port->wwpn_iid[iid].wwpn == 0) 2178 return (0); 2179 memset(id, 0, sizeof(*id)); 2180 id->format_protocol = SCSI_PROTO_SAS; 2181 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->sas_address); 2182 return (sizeof(*id)); 2183 } 2184 default: 2185 { 2186 struct scsi_transportid_spi *id = 2187 (struct scsi_transportid_spi *)buf; 2188 memset(id, 0, sizeof(*id)); 2189 id->format_protocol = SCSI_PROTO_SPI; 2190 scsi_ulto2b(iid, id->scsi_addr); 2191 scsi_ulto2b(port->targ_port, id->rel_trgt_port_id); 2192 return (sizeof(*id)); 2193 } 2194 } 2195 } 2196 2197 /* 2198 * Serialize a command that went down the "wrong" side, and so was sent to 2199 * this controller for execution. The logic is a little different than the 2200 * standard case in ctl_scsiio_precheck(). Errors in this case need to get 2201 * sent back to the other side, but in the success case, we execute the 2202 * command on this side (XFER mode) or tell the other side to execute it 2203 * (SER_ONLY mode). 2204 */ 2205 static void 2206 ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio) 2207 { 2208 struct ctl_softc *softc = control_softc; 2209 union ctl_ha_msg msg_info; 2210 struct ctl_port *port; 2211 struct ctl_lun *lun; 2212 const struct ctl_cmd_entry *entry; 2213 uint32_t targ_lun; 2214 2215 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 2216 mtx_lock(&softc->ctl_lock); 2217 2218 /* Make sure that we know about this port. */ 2219 port = ctl_io_port(&ctsio->io_hdr); 2220 if (port == NULL || (port->status & CTL_PORT_STATUS_ONLINE) == 0) { 2221 ctl_set_internal_failure(ctsio, /*sks_valid*/ 0, 2222 /*retry_count*/ 1); 2223 goto badjuju; 2224 } 2225 2226 /* Make sure that we know about this LUN. */ 2227 if (targ_lun >= CTL_MAX_LUNS || 2228 (lun = softc->ctl_luns[targ_lun]) == NULL) { 2229 mtx_unlock(&softc->ctl_lock); 2230 2231 /* 2232 * The other node would not send this request to us unless 2233 * received announce that we are primary node for this LUN. 2234 * If this LUN does not exist now, it is probably result of 2235 * a race, so respond to initiator in the most opaque way. 2236 */ 2237 ctl_set_busy(ctsio); 2238 goto badjuju; 2239 } 2240 mtx_lock(&lun->lun_lock); 2241 mtx_unlock(&softc->ctl_lock); 2242 2243 /* 2244 * If the LUN is invalid, pretend that it doesn't exist. 2245 * It will go away as soon as all pending I/Os completed. 2246 */ 2247 if (lun->flags & CTL_LUN_DISABLED) { 2248 mtx_unlock(&lun->lun_lock); 2249 ctl_set_busy(ctsio); 2250 goto badjuju; 2251 } 2252 2253 entry = ctl_get_cmd_entry(ctsio, NULL); 2254 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { 2255 mtx_unlock(&lun->lun_lock); 2256 goto badjuju; 2257 } 2258 2259 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun; 2260 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = lun->be_lun; 2261 2262 /* 2263 * Every I/O goes into the OOA queue for a 2264 * particular LUN, and stays there until completion. 2265 */ 2266 #ifdef CTL_TIME_IO 2267 if (TAILQ_EMPTY(&lun->ooa_queue)) 2268 lun->idle_time += getsbinuptime() - lun->last_busy; 2269 #endif 2270 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2271 2272 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 2273 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq, 2274 ooa_links))) { 2275 case CTL_ACTION_BLOCK: 2276 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 2277 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 2278 blocked_links); 2279 mtx_unlock(&lun->lun_lock); 2280 break; 2281 case CTL_ACTION_PASS: 2282 case CTL_ACTION_SKIP: 2283 if (softc->ha_mode == CTL_HA_MODE_XFER) { 2284 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 2285 ctl_enqueue_rtr((union ctl_io *)ctsio); 2286 mtx_unlock(&lun->lun_lock); 2287 } else { 2288 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 2289 mtx_unlock(&lun->lun_lock); 2290 2291 /* send msg back to other side */ 2292 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 2293 msg_info.hdr.serializing_sc = (union ctl_io *)ctsio; 2294 msg_info.hdr.msg_type = CTL_MSG_R2R; 2295 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 2296 sizeof(msg_info.hdr), M_WAITOK); 2297 } 2298 break; 2299 case CTL_ACTION_OVERLAP: 2300 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2301 mtx_unlock(&lun->lun_lock); 2302 ctl_set_overlapped_cmd(ctsio); 2303 goto badjuju; 2304 case CTL_ACTION_OVERLAP_TAG: 2305 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2306 mtx_unlock(&lun->lun_lock); 2307 ctl_set_overlapped_tag(ctsio, ctsio->tag_num); 2308 goto badjuju; 2309 case CTL_ACTION_ERROR: 2310 default: 2311 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2312 mtx_unlock(&lun->lun_lock); 2313 2314 ctl_set_internal_failure(ctsio, /*sks_valid*/ 0, 2315 /*retry_count*/ 0); 2316 badjuju: 2317 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); 2318 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 2319 msg_info.hdr.serializing_sc = NULL; 2320 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 2321 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 2322 sizeof(msg_info.scsi), M_WAITOK); 2323 ctl_free_io((union ctl_io *)ctsio); 2324 break; 2325 } 2326 } 2327 2328 /* 2329 * Returns 0 for success, errno for failure. 2330 */ 2331 static void 2332 ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 2333 struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries) 2334 { 2335 union ctl_io *io; 2336 2337 mtx_lock(&lun->lun_lock); 2338 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); (io != NULL); 2339 (*cur_fill_num)++, io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, 2340 ooa_links)) { 2341 struct ctl_ooa_entry *entry; 2342 2343 /* 2344 * If we've got more than we can fit, just count the 2345 * remaining entries. 2346 */ 2347 if (*cur_fill_num >= ooa_hdr->alloc_num) 2348 continue; 2349 2350 entry = &kern_entries[*cur_fill_num]; 2351 2352 entry->tag_num = io->scsiio.tag_num; 2353 entry->lun_num = lun->lun; 2354 #ifdef CTL_TIME_IO 2355 entry->start_bt = io->io_hdr.start_bt; 2356 #endif 2357 bcopy(io->scsiio.cdb, entry->cdb, io->scsiio.cdb_len); 2358 entry->cdb_len = io->scsiio.cdb_len; 2359 if (io->io_hdr.flags & CTL_FLAG_BLOCKED) 2360 entry->cmd_flags |= CTL_OOACMD_FLAG_BLOCKED; 2361 2362 if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) 2363 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA; 2364 2365 if (io->io_hdr.flags & CTL_FLAG_ABORT) 2366 entry->cmd_flags |= CTL_OOACMD_FLAG_ABORT; 2367 2368 if (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR) 2369 entry->cmd_flags |= CTL_OOACMD_FLAG_RTR; 2370 2371 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) 2372 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED; 2373 } 2374 mtx_unlock(&lun->lun_lock); 2375 } 2376 2377 static void * 2378 ctl_copyin_alloc(void *user_addr, unsigned int len, char *error_str, 2379 size_t error_str_len) 2380 { 2381 void *kptr; 2382 2383 kptr = malloc(len, M_CTL, M_WAITOK | M_ZERO); 2384 2385 if (copyin(user_addr, kptr, len) != 0) { 2386 snprintf(error_str, error_str_len, "Error copying %d bytes " 2387 "from user address %p to kernel address %p", len, 2388 user_addr, kptr); 2389 free(kptr, M_CTL); 2390 return (NULL); 2391 } 2392 2393 return (kptr); 2394 } 2395 2396 static void 2397 ctl_free_args(int num_args, struct ctl_be_arg *args) 2398 { 2399 int i; 2400 2401 if (args == NULL) 2402 return; 2403 2404 for (i = 0; i < num_args; i++) { 2405 free(args[i].kname, M_CTL); 2406 free(args[i].kvalue, M_CTL); 2407 } 2408 2409 free(args, M_CTL); 2410 } 2411 2412 static struct ctl_be_arg * 2413 ctl_copyin_args(int num_args, struct ctl_be_arg *uargs, 2414 char *error_str, size_t error_str_len) 2415 { 2416 struct ctl_be_arg *args; 2417 int i; 2418 2419 args = ctl_copyin_alloc(uargs, num_args * sizeof(*args), 2420 error_str, error_str_len); 2421 2422 if (args == NULL) 2423 goto bailout; 2424 2425 for (i = 0; i < num_args; i++) { 2426 args[i].kname = NULL; 2427 args[i].kvalue = NULL; 2428 } 2429 2430 for (i = 0; i < num_args; i++) { 2431 uint8_t *tmpptr; 2432 2433 if (args[i].namelen == 0) { 2434 snprintf(error_str, error_str_len, "Argument %d " 2435 "name length is zero", i); 2436 goto bailout; 2437 } 2438 2439 args[i].kname = ctl_copyin_alloc(args[i].name, 2440 args[i].namelen, error_str, error_str_len); 2441 if (args[i].kname == NULL) 2442 goto bailout; 2443 2444 if (args[i].kname[args[i].namelen - 1] != '\0') { 2445 snprintf(error_str, error_str_len, "Argument %d " 2446 "name is not NUL-terminated", i); 2447 goto bailout; 2448 } 2449 2450 if (args[i].flags & CTL_BEARG_RD) { 2451 if (args[i].vallen == 0) { 2452 snprintf(error_str, error_str_len, "Argument %d " 2453 "value length is zero", i); 2454 goto bailout; 2455 } 2456 2457 tmpptr = ctl_copyin_alloc(args[i].value, 2458 args[i].vallen, error_str, error_str_len); 2459 if (tmpptr == NULL) 2460 goto bailout; 2461 2462 if ((args[i].flags & CTL_BEARG_ASCII) 2463 && (tmpptr[args[i].vallen - 1] != '\0')) { 2464 snprintf(error_str, error_str_len, "Argument " 2465 "%d value is not NUL-terminated", i); 2466 free(tmpptr, M_CTL); 2467 goto bailout; 2468 } 2469 args[i].kvalue = tmpptr; 2470 } else { 2471 args[i].kvalue = malloc(args[i].vallen, 2472 M_CTL, M_WAITOK | M_ZERO); 2473 } 2474 } 2475 2476 return (args); 2477 bailout: 2478 2479 ctl_free_args(num_args, args); 2480 2481 return (NULL); 2482 } 2483 2484 static void 2485 ctl_copyout_args(int num_args, struct ctl_be_arg *args) 2486 { 2487 int i; 2488 2489 for (i = 0; i < num_args; i++) { 2490 if (args[i].flags & CTL_BEARG_WR) 2491 copyout(args[i].kvalue, args[i].value, args[i].vallen); 2492 } 2493 } 2494 2495 /* 2496 * Escape characters that are illegal or not recommended in XML. 2497 */ 2498 int 2499 ctl_sbuf_printf_esc(struct sbuf *sb, char *str, int size) 2500 { 2501 char *end = str + size; 2502 int retval; 2503 2504 retval = 0; 2505 2506 for (; *str && str < end; str++) { 2507 switch (*str) { 2508 case '&': 2509 retval = sbuf_printf(sb, "&"); 2510 break; 2511 case '>': 2512 retval = sbuf_printf(sb, ">"); 2513 break; 2514 case '<': 2515 retval = sbuf_printf(sb, "<"); 2516 break; 2517 default: 2518 retval = sbuf_putc(sb, *str); 2519 break; 2520 } 2521 2522 if (retval != 0) 2523 break; 2524 2525 } 2526 2527 return (retval); 2528 } 2529 2530 static void 2531 ctl_id_sbuf(struct ctl_devid *id, struct sbuf *sb) 2532 { 2533 struct scsi_vpd_id_descriptor *desc; 2534 int i; 2535 2536 if (id == NULL || id->len < 4) 2537 return; 2538 desc = (struct scsi_vpd_id_descriptor *)id->data; 2539 switch (desc->id_type & SVPD_ID_TYPE_MASK) { 2540 case SVPD_ID_TYPE_T10: 2541 sbuf_printf(sb, "t10."); 2542 break; 2543 case SVPD_ID_TYPE_EUI64: 2544 sbuf_printf(sb, "eui."); 2545 break; 2546 case SVPD_ID_TYPE_NAA: 2547 sbuf_printf(sb, "naa."); 2548 break; 2549 case SVPD_ID_TYPE_SCSI_NAME: 2550 break; 2551 } 2552 switch (desc->proto_codeset & SVPD_ID_CODESET_MASK) { 2553 case SVPD_ID_CODESET_BINARY: 2554 for (i = 0; i < desc->length; i++) 2555 sbuf_printf(sb, "%02x", desc->identifier[i]); 2556 break; 2557 case SVPD_ID_CODESET_ASCII: 2558 sbuf_printf(sb, "%.*s", (int)desc->length, 2559 (char *)desc->identifier); 2560 break; 2561 case SVPD_ID_CODESET_UTF8: 2562 sbuf_printf(sb, "%s", (char *)desc->identifier); 2563 break; 2564 } 2565 } 2566 2567 static int 2568 ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 2569 struct thread *td) 2570 { 2571 struct ctl_softc *softc = dev->si_drv1; 2572 struct ctl_lun *lun; 2573 int retval; 2574 2575 retval = 0; 2576 2577 switch (cmd) { 2578 case CTL_IO: 2579 retval = ctl_ioctl_io(dev, cmd, addr, flag, td); 2580 break; 2581 case CTL_ENABLE_PORT: 2582 case CTL_DISABLE_PORT: 2583 case CTL_SET_PORT_WWNS: { 2584 struct ctl_port *port; 2585 struct ctl_port_entry *entry; 2586 2587 entry = (struct ctl_port_entry *)addr; 2588 2589 mtx_lock(&softc->ctl_lock); 2590 STAILQ_FOREACH(port, &softc->port_list, links) { 2591 int action, done; 2592 2593 if (port->targ_port < softc->port_min || 2594 port->targ_port >= softc->port_max) 2595 continue; 2596 2597 action = 0; 2598 done = 0; 2599 if ((entry->port_type == CTL_PORT_NONE) 2600 && (entry->targ_port == port->targ_port)) { 2601 /* 2602 * If the user only wants to enable or 2603 * disable or set WWNs on a specific port, 2604 * do the operation and we're done. 2605 */ 2606 action = 1; 2607 done = 1; 2608 } else if (entry->port_type & port->port_type) { 2609 /* 2610 * Compare the user's type mask with the 2611 * particular frontend type to see if we 2612 * have a match. 2613 */ 2614 action = 1; 2615 done = 0; 2616 2617 /* 2618 * Make sure the user isn't trying to set 2619 * WWNs on multiple ports at the same time. 2620 */ 2621 if (cmd == CTL_SET_PORT_WWNS) { 2622 printf("%s: Can't set WWNs on " 2623 "multiple ports\n", __func__); 2624 retval = EINVAL; 2625 break; 2626 } 2627 } 2628 if (action == 0) 2629 continue; 2630 2631 /* 2632 * XXX KDM we have to drop the lock here, because 2633 * the online/offline operations can potentially 2634 * block. We need to reference count the frontends 2635 * so they can't go away, 2636 */ 2637 if (cmd == CTL_ENABLE_PORT) { 2638 mtx_unlock(&softc->ctl_lock); 2639 ctl_port_online(port); 2640 mtx_lock(&softc->ctl_lock); 2641 } else if (cmd == CTL_DISABLE_PORT) { 2642 mtx_unlock(&softc->ctl_lock); 2643 ctl_port_offline(port); 2644 mtx_lock(&softc->ctl_lock); 2645 } else if (cmd == CTL_SET_PORT_WWNS) { 2646 ctl_port_set_wwns(port, 2647 (entry->flags & CTL_PORT_WWNN_VALID) ? 2648 1 : 0, entry->wwnn, 2649 (entry->flags & CTL_PORT_WWPN_VALID) ? 2650 1 : 0, entry->wwpn); 2651 } 2652 if (done != 0) 2653 break; 2654 } 2655 mtx_unlock(&softc->ctl_lock); 2656 break; 2657 } 2658 case CTL_GET_OOA: { 2659 struct ctl_ooa *ooa_hdr; 2660 struct ctl_ooa_entry *entries; 2661 uint32_t cur_fill_num; 2662 2663 ooa_hdr = (struct ctl_ooa *)addr; 2664 2665 if ((ooa_hdr->alloc_len == 0) 2666 || (ooa_hdr->alloc_num == 0)) { 2667 printf("%s: CTL_GET_OOA: alloc len %u and alloc num %u " 2668 "must be non-zero\n", __func__, 2669 ooa_hdr->alloc_len, ooa_hdr->alloc_num); 2670 retval = EINVAL; 2671 break; 2672 } 2673 2674 if (ooa_hdr->alloc_len != (ooa_hdr->alloc_num * 2675 sizeof(struct ctl_ooa_entry))) { 2676 printf("%s: CTL_GET_OOA: alloc len %u must be alloc " 2677 "num %d * sizeof(struct ctl_ooa_entry) %zd\n", 2678 __func__, ooa_hdr->alloc_len, 2679 ooa_hdr->alloc_num,sizeof(struct ctl_ooa_entry)); 2680 retval = EINVAL; 2681 break; 2682 } 2683 2684 entries = malloc(ooa_hdr->alloc_len, M_CTL, M_WAITOK | M_ZERO); 2685 if (entries == NULL) { 2686 printf("%s: could not allocate %d bytes for OOA " 2687 "dump\n", __func__, ooa_hdr->alloc_len); 2688 retval = ENOMEM; 2689 break; 2690 } 2691 2692 mtx_lock(&softc->ctl_lock); 2693 if ((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0 && 2694 (ooa_hdr->lun_num >= CTL_MAX_LUNS || 2695 softc->ctl_luns[ooa_hdr->lun_num] == NULL)) { 2696 mtx_unlock(&softc->ctl_lock); 2697 free(entries, M_CTL); 2698 printf("%s: CTL_GET_OOA: invalid LUN %ju\n", 2699 __func__, (uintmax_t)ooa_hdr->lun_num); 2700 retval = EINVAL; 2701 break; 2702 } 2703 2704 cur_fill_num = 0; 2705 2706 if (ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) { 2707 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2708 ctl_ioctl_fill_ooa(lun, &cur_fill_num, 2709 ooa_hdr, entries); 2710 } 2711 } else { 2712 lun = softc->ctl_luns[ooa_hdr->lun_num]; 2713 ctl_ioctl_fill_ooa(lun, &cur_fill_num, ooa_hdr, 2714 entries); 2715 } 2716 mtx_unlock(&softc->ctl_lock); 2717 2718 ooa_hdr->fill_num = min(cur_fill_num, ooa_hdr->alloc_num); 2719 ooa_hdr->fill_len = ooa_hdr->fill_num * 2720 sizeof(struct ctl_ooa_entry); 2721 retval = copyout(entries, ooa_hdr->entries, ooa_hdr->fill_len); 2722 if (retval != 0) { 2723 printf("%s: error copying out %d bytes for OOA dump\n", 2724 __func__, ooa_hdr->fill_len); 2725 } 2726 2727 getbinuptime(&ooa_hdr->cur_bt); 2728 2729 if (cur_fill_num > ooa_hdr->alloc_num) { 2730 ooa_hdr->dropped_num = cur_fill_num -ooa_hdr->alloc_num; 2731 ooa_hdr->status = CTL_OOA_NEED_MORE_SPACE; 2732 } else { 2733 ooa_hdr->dropped_num = 0; 2734 ooa_hdr->status = CTL_OOA_OK; 2735 } 2736 2737 free(entries, M_CTL); 2738 break; 2739 } 2740 case CTL_DELAY_IO: { 2741 struct ctl_io_delay_info *delay_info; 2742 2743 delay_info = (struct ctl_io_delay_info *)addr; 2744 2745 #ifdef CTL_IO_DELAY 2746 mtx_lock(&softc->ctl_lock); 2747 if (delay_info->lun_id >= CTL_MAX_LUNS || 2748 (lun = softc->ctl_luns[delay_info->lun_id]) == NULL) { 2749 mtx_unlock(&softc->ctl_lock); 2750 delay_info->status = CTL_DELAY_STATUS_INVALID_LUN; 2751 break; 2752 } 2753 mtx_lock(&lun->lun_lock); 2754 mtx_unlock(&softc->ctl_lock); 2755 delay_info->status = CTL_DELAY_STATUS_OK; 2756 switch (delay_info->delay_type) { 2757 case CTL_DELAY_TYPE_CONT: 2758 case CTL_DELAY_TYPE_ONESHOT: 2759 break; 2760 default: 2761 delay_info->status = CTL_DELAY_STATUS_INVALID_TYPE; 2762 break; 2763 } 2764 switch (delay_info->delay_loc) { 2765 case CTL_DELAY_LOC_DATAMOVE: 2766 lun->delay_info.datamove_type = delay_info->delay_type; 2767 lun->delay_info.datamove_delay = delay_info->delay_secs; 2768 break; 2769 case CTL_DELAY_LOC_DONE: 2770 lun->delay_info.done_type = delay_info->delay_type; 2771 lun->delay_info.done_delay = delay_info->delay_secs; 2772 break; 2773 default: 2774 delay_info->status = CTL_DELAY_STATUS_INVALID_LOC; 2775 break; 2776 } 2777 mtx_unlock(&lun->lun_lock); 2778 #else 2779 delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED; 2780 #endif /* CTL_IO_DELAY */ 2781 break; 2782 } 2783 case CTL_GETSTATS: { 2784 struct ctl_stats *stats; 2785 int i; 2786 2787 stats = (struct ctl_stats *)addr; 2788 2789 if ((sizeof(struct ctl_lun_io_stats) * softc->num_luns) > 2790 stats->alloc_len) { 2791 stats->status = CTL_SS_NEED_MORE_SPACE; 2792 stats->num_luns = softc->num_luns; 2793 break; 2794 } 2795 /* 2796 * XXX KDM no locking here. If the LUN list changes, 2797 * things can blow up. 2798 */ 2799 i = 0; 2800 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2801 retval = copyout(&lun->stats, &stats->lun_stats[i++], 2802 sizeof(lun->stats)); 2803 if (retval != 0) 2804 break; 2805 } 2806 stats->num_luns = softc->num_luns; 2807 stats->fill_len = sizeof(struct ctl_lun_io_stats) * 2808 softc->num_luns; 2809 stats->status = CTL_SS_OK; 2810 #ifdef CTL_TIME_IO 2811 stats->flags = CTL_STATS_FLAG_TIME_VALID; 2812 #else 2813 stats->flags = CTL_STATS_FLAG_NONE; 2814 #endif 2815 getnanouptime(&stats->timestamp); 2816 break; 2817 } 2818 case CTL_ERROR_INJECT: { 2819 struct ctl_error_desc *err_desc, *new_err_desc; 2820 2821 err_desc = (struct ctl_error_desc *)addr; 2822 2823 new_err_desc = malloc(sizeof(*new_err_desc), M_CTL, 2824 M_WAITOK | M_ZERO); 2825 bcopy(err_desc, new_err_desc, sizeof(*new_err_desc)); 2826 2827 mtx_lock(&softc->ctl_lock); 2828 if (err_desc->lun_id >= CTL_MAX_LUNS || 2829 (lun = softc->ctl_luns[err_desc->lun_id]) == NULL) { 2830 mtx_unlock(&softc->ctl_lock); 2831 free(new_err_desc, M_CTL); 2832 printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n", 2833 __func__, (uintmax_t)err_desc->lun_id); 2834 retval = EINVAL; 2835 break; 2836 } 2837 mtx_lock(&lun->lun_lock); 2838 mtx_unlock(&softc->ctl_lock); 2839 2840 /* 2841 * We could do some checking here to verify the validity 2842 * of the request, but given the complexity of error 2843 * injection requests, the checking logic would be fairly 2844 * complex. 2845 * 2846 * For now, if the request is invalid, it just won't get 2847 * executed and might get deleted. 2848 */ 2849 STAILQ_INSERT_TAIL(&lun->error_list, new_err_desc, links); 2850 2851 /* 2852 * XXX KDM check to make sure the serial number is unique, 2853 * in case we somehow manage to wrap. That shouldn't 2854 * happen for a very long time, but it's the right thing to 2855 * do. 2856 */ 2857 new_err_desc->serial = lun->error_serial; 2858 err_desc->serial = lun->error_serial; 2859 lun->error_serial++; 2860 2861 mtx_unlock(&lun->lun_lock); 2862 break; 2863 } 2864 case CTL_ERROR_INJECT_DELETE: { 2865 struct ctl_error_desc *delete_desc, *desc, *desc2; 2866 int delete_done; 2867 2868 delete_desc = (struct ctl_error_desc *)addr; 2869 delete_done = 0; 2870 2871 mtx_lock(&softc->ctl_lock); 2872 if (delete_desc->lun_id >= CTL_MAX_LUNS || 2873 (lun = softc->ctl_luns[delete_desc->lun_id]) == NULL) { 2874 mtx_unlock(&softc->ctl_lock); 2875 printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n", 2876 __func__, (uintmax_t)delete_desc->lun_id); 2877 retval = EINVAL; 2878 break; 2879 } 2880 mtx_lock(&lun->lun_lock); 2881 mtx_unlock(&softc->ctl_lock); 2882 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 2883 if (desc->serial != delete_desc->serial) 2884 continue; 2885 2886 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, 2887 links); 2888 free(desc, M_CTL); 2889 delete_done = 1; 2890 } 2891 mtx_unlock(&lun->lun_lock); 2892 if (delete_done == 0) { 2893 printf("%s: CTL_ERROR_INJECT_DELETE: can't find " 2894 "error serial %ju on LUN %u\n", __func__, 2895 delete_desc->serial, delete_desc->lun_id); 2896 retval = EINVAL; 2897 break; 2898 } 2899 break; 2900 } 2901 case CTL_DUMP_STRUCTS: { 2902 int j, k; 2903 struct ctl_port *port; 2904 struct ctl_frontend *fe; 2905 2906 mtx_lock(&softc->ctl_lock); 2907 printf("CTL Persistent Reservation information start:\n"); 2908 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2909 mtx_lock(&lun->lun_lock); 2910 if ((lun->flags & CTL_LUN_DISABLED) != 0) { 2911 mtx_unlock(&lun->lun_lock); 2912 continue; 2913 } 2914 2915 for (j = 0; j < CTL_MAX_PORTS; j++) { 2916 if (lun->pr_keys[j] == NULL) 2917 continue; 2918 for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){ 2919 if (lun->pr_keys[j][k] == 0) 2920 continue; 2921 printf(" LUN %ju port %d iid %d key " 2922 "%#jx\n", lun->lun, j, k, 2923 (uintmax_t)lun->pr_keys[j][k]); 2924 } 2925 } 2926 mtx_unlock(&lun->lun_lock); 2927 } 2928 printf("CTL Persistent Reservation information end\n"); 2929 printf("CTL Ports:\n"); 2930 STAILQ_FOREACH(port, &softc->port_list, links) { 2931 printf(" Port %d '%s' Frontend '%s' Type %u pp %d vp %d WWNN " 2932 "%#jx WWPN %#jx\n", port->targ_port, port->port_name, 2933 port->frontend->name, port->port_type, 2934 port->physical_port, port->virtual_port, 2935 (uintmax_t)port->wwnn, (uintmax_t)port->wwpn); 2936 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 2937 if (port->wwpn_iid[j].in_use == 0 && 2938 port->wwpn_iid[j].wwpn == 0 && 2939 port->wwpn_iid[j].name == NULL) 2940 continue; 2941 2942 printf(" iid %u use %d WWPN %#jx '%s'\n", 2943 j, port->wwpn_iid[j].in_use, 2944 (uintmax_t)port->wwpn_iid[j].wwpn, 2945 port->wwpn_iid[j].name); 2946 } 2947 } 2948 printf("CTL Port information end\n"); 2949 mtx_unlock(&softc->ctl_lock); 2950 /* 2951 * XXX KDM calling this without a lock. We'd likely want 2952 * to drop the lock before calling the frontend's dump 2953 * routine anyway. 2954 */ 2955 printf("CTL Frontends:\n"); 2956 STAILQ_FOREACH(fe, &softc->fe_list, links) { 2957 printf(" Frontend '%s'\n", fe->name); 2958 if (fe->fe_dump != NULL) 2959 fe->fe_dump(); 2960 } 2961 printf("CTL Frontend information end\n"); 2962 break; 2963 } 2964 case CTL_LUN_REQ: { 2965 struct ctl_lun_req *lun_req; 2966 struct ctl_backend_driver *backend; 2967 2968 lun_req = (struct ctl_lun_req *)addr; 2969 2970 backend = ctl_backend_find(lun_req->backend); 2971 if (backend == NULL) { 2972 lun_req->status = CTL_LUN_ERROR; 2973 snprintf(lun_req->error_str, 2974 sizeof(lun_req->error_str), 2975 "Backend \"%s\" not found.", 2976 lun_req->backend); 2977 break; 2978 } 2979 if (lun_req->num_be_args > 0) { 2980 lun_req->kern_be_args = ctl_copyin_args( 2981 lun_req->num_be_args, 2982 lun_req->be_args, 2983 lun_req->error_str, 2984 sizeof(lun_req->error_str)); 2985 if (lun_req->kern_be_args == NULL) { 2986 lun_req->status = CTL_LUN_ERROR; 2987 break; 2988 } 2989 } 2990 2991 retval = backend->ioctl(dev, cmd, addr, flag, td); 2992 2993 if (lun_req->num_be_args > 0) { 2994 ctl_copyout_args(lun_req->num_be_args, 2995 lun_req->kern_be_args); 2996 ctl_free_args(lun_req->num_be_args, 2997 lun_req->kern_be_args); 2998 } 2999 break; 3000 } 3001 case CTL_LUN_LIST: { 3002 struct sbuf *sb; 3003 struct ctl_lun_list *list; 3004 struct ctl_option *opt; 3005 3006 list = (struct ctl_lun_list *)addr; 3007 3008 /* 3009 * Allocate a fixed length sbuf here, based on the length 3010 * of the user's buffer. We could allocate an auto-extending 3011 * buffer, and then tell the user how much larger our 3012 * amount of data is than his buffer, but that presents 3013 * some problems: 3014 * 3015 * 1. The sbuf(9) routines use a blocking malloc, and so 3016 * we can't hold a lock while calling them with an 3017 * auto-extending buffer. 3018 * 3019 * 2. There is not currently a LUN reference counting 3020 * mechanism, outside of outstanding transactions on 3021 * the LUN's OOA queue. So a LUN could go away on us 3022 * while we're getting the LUN number, backend-specific 3023 * information, etc. Thus, given the way things 3024 * currently work, we need to hold the CTL lock while 3025 * grabbing LUN information. 3026 * 3027 * So, from the user's standpoint, the best thing to do is 3028 * allocate what he thinks is a reasonable buffer length, 3029 * and then if he gets a CTL_LUN_LIST_NEED_MORE_SPACE error, 3030 * double the buffer length and try again. (And repeat 3031 * that until he succeeds.) 3032 */ 3033 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 3034 if (sb == NULL) { 3035 list->status = CTL_LUN_LIST_ERROR; 3036 snprintf(list->error_str, sizeof(list->error_str), 3037 "Unable to allocate %d bytes for LUN list", 3038 list->alloc_len); 3039 break; 3040 } 3041 3042 sbuf_printf(sb, "<ctllunlist>\n"); 3043 3044 mtx_lock(&softc->ctl_lock); 3045 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3046 mtx_lock(&lun->lun_lock); 3047 retval = sbuf_printf(sb, "<lun id=\"%ju\">\n", 3048 (uintmax_t)lun->lun); 3049 3050 /* 3051 * Bail out as soon as we see that we've overfilled 3052 * the buffer. 3053 */ 3054 if (retval != 0) 3055 break; 3056 3057 retval = sbuf_printf(sb, "\t<backend_type>%s" 3058 "</backend_type>\n", 3059 (lun->backend == NULL) ? "none" : 3060 lun->backend->name); 3061 3062 if (retval != 0) 3063 break; 3064 3065 retval = sbuf_printf(sb, "\t<lun_type>%d</lun_type>\n", 3066 lun->be_lun->lun_type); 3067 3068 if (retval != 0) 3069 break; 3070 3071 if (lun->backend == NULL) { 3072 retval = sbuf_printf(sb, "</lun>\n"); 3073 if (retval != 0) 3074 break; 3075 continue; 3076 } 3077 3078 retval = sbuf_printf(sb, "\t<size>%ju</size>\n", 3079 (lun->be_lun->maxlba > 0) ? 3080 lun->be_lun->maxlba + 1 : 0); 3081 3082 if (retval != 0) 3083 break; 3084 3085 retval = sbuf_printf(sb, "\t<blocksize>%u</blocksize>\n", 3086 lun->be_lun->blocksize); 3087 3088 if (retval != 0) 3089 break; 3090 3091 retval = sbuf_printf(sb, "\t<serial_number>"); 3092 3093 if (retval != 0) 3094 break; 3095 3096 retval = ctl_sbuf_printf_esc(sb, 3097 lun->be_lun->serial_num, 3098 sizeof(lun->be_lun->serial_num)); 3099 3100 if (retval != 0) 3101 break; 3102 3103 retval = sbuf_printf(sb, "</serial_number>\n"); 3104 3105 if (retval != 0) 3106 break; 3107 3108 retval = sbuf_printf(sb, "\t<device_id>"); 3109 3110 if (retval != 0) 3111 break; 3112 3113 retval = ctl_sbuf_printf_esc(sb, 3114 lun->be_lun->device_id, 3115 sizeof(lun->be_lun->device_id)); 3116 3117 if (retval != 0) 3118 break; 3119 3120 retval = sbuf_printf(sb, "</device_id>\n"); 3121 3122 if (retval != 0) 3123 break; 3124 3125 if (lun->backend->lun_info != NULL) { 3126 retval = lun->backend->lun_info(lun->be_lun->be_lun, sb); 3127 if (retval != 0) 3128 break; 3129 } 3130 STAILQ_FOREACH(opt, &lun->be_lun->options, links) { 3131 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n", 3132 opt->name, opt->value, opt->name); 3133 if (retval != 0) 3134 break; 3135 } 3136 3137 retval = sbuf_printf(sb, "</lun>\n"); 3138 3139 if (retval != 0) 3140 break; 3141 mtx_unlock(&lun->lun_lock); 3142 } 3143 if (lun != NULL) 3144 mtx_unlock(&lun->lun_lock); 3145 mtx_unlock(&softc->ctl_lock); 3146 3147 if ((retval != 0) 3148 || ((retval = sbuf_printf(sb, "</ctllunlist>\n")) != 0)) { 3149 retval = 0; 3150 sbuf_delete(sb); 3151 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3152 snprintf(list->error_str, sizeof(list->error_str), 3153 "Out of space, %d bytes is too small", 3154 list->alloc_len); 3155 break; 3156 } 3157 3158 sbuf_finish(sb); 3159 3160 retval = copyout(sbuf_data(sb), list->lun_xml, 3161 sbuf_len(sb) + 1); 3162 3163 list->fill_len = sbuf_len(sb) + 1; 3164 list->status = CTL_LUN_LIST_OK; 3165 sbuf_delete(sb); 3166 break; 3167 } 3168 case CTL_ISCSI: { 3169 struct ctl_iscsi *ci; 3170 struct ctl_frontend *fe; 3171 3172 ci = (struct ctl_iscsi *)addr; 3173 3174 fe = ctl_frontend_find("iscsi"); 3175 if (fe == NULL) { 3176 ci->status = CTL_ISCSI_ERROR; 3177 snprintf(ci->error_str, sizeof(ci->error_str), 3178 "Frontend \"iscsi\" not found."); 3179 break; 3180 } 3181 3182 retval = fe->ioctl(dev, cmd, addr, flag, td); 3183 break; 3184 } 3185 case CTL_PORT_REQ: { 3186 struct ctl_req *req; 3187 struct ctl_frontend *fe; 3188 3189 req = (struct ctl_req *)addr; 3190 3191 fe = ctl_frontend_find(req->driver); 3192 if (fe == NULL) { 3193 req->status = CTL_LUN_ERROR; 3194 snprintf(req->error_str, sizeof(req->error_str), 3195 "Frontend \"%s\" not found.", req->driver); 3196 break; 3197 } 3198 if (req->num_args > 0) { 3199 req->kern_args = ctl_copyin_args(req->num_args, 3200 req->args, req->error_str, sizeof(req->error_str)); 3201 if (req->kern_args == NULL) { 3202 req->status = CTL_LUN_ERROR; 3203 break; 3204 } 3205 } 3206 3207 if (fe->ioctl) 3208 retval = fe->ioctl(dev, cmd, addr, flag, td); 3209 else 3210 retval = ENODEV; 3211 3212 if (req->num_args > 0) { 3213 ctl_copyout_args(req->num_args, req->kern_args); 3214 ctl_free_args(req->num_args, req->kern_args); 3215 } 3216 break; 3217 } 3218 case CTL_PORT_LIST: { 3219 struct sbuf *sb; 3220 struct ctl_port *port; 3221 struct ctl_lun_list *list; 3222 struct ctl_option *opt; 3223 int j; 3224 uint32_t plun; 3225 3226 list = (struct ctl_lun_list *)addr; 3227 3228 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 3229 if (sb == NULL) { 3230 list->status = CTL_LUN_LIST_ERROR; 3231 snprintf(list->error_str, sizeof(list->error_str), 3232 "Unable to allocate %d bytes for LUN list", 3233 list->alloc_len); 3234 break; 3235 } 3236 3237 sbuf_printf(sb, "<ctlportlist>\n"); 3238 3239 mtx_lock(&softc->ctl_lock); 3240 STAILQ_FOREACH(port, &softc->port_list, links) { 3241 retval = sbuf_printf(sb, "<targ_port id=\"%ju\">\n", 3242 (uintmax_t)port->targ_port); 3243 3244 /* 3245 * Bail out as soon as we see that we've overfilled 3246 * the buffer. 3247 */ 3248 if (retval != 0) 3249 break; 3250 3251 retval = sbuf_printf(sb, "\t<frontend_type>%s" 3252 "</frontend_type>\n", port->frontend->name); 3253 if (retval != 0) 3254 break; 3255 3256 retval = sbuf_printf(sb, "\t<port_type>%d</port_type>\n", 3257 port->port_type); 3258 if (retval != 0) 3259 break; 3260 3261 retval = sbuf_printf(sb, "\t<online>%s</online>\n", 3262 (port->status & CTL_PORT_STATUS_ONLINE) ? "YES" : "NO"); 3263 if (retval != 0) 3264 break; 3265 3266 retval = sbuf_printf(sb, "\t<port_name>%s</port_name>\n", 3267 port->port_name); 3268 if (retval != 0) 3269 break; 3270 3271 retval = sbuf_printf(sb, "\t<physical_port>%d</physical_port>\n", 3272 port->physical_port); 3273 if (retval != 0) 3274 break; 3275 3276 retval = sbuf_printf(sb, "\t<virtual_port>%d</virtual_port>\n", 3277 port->virtual_port); 3278 if (retval != 0) 3279 break; 3280 3281 if (port->target_devid != NULL) { 3282 sbuf_printf(sb, "\t<target>"); 3283 ctl_id_sbuf(port->target_devid, sb); 3284 sbuf_printf(sb, "</target>\n"); 3285 } 3286 3287 if (port->port_devid != NULL) { 3288 sbuf_printf(sb, "\t<port>"); 3289 ctl_id_sbuf(port->port_devid, sb); 3290 sbuf_printf(sb, "</port>\n"); 3291 } 3292 3293 if (port->port_info != NULL) { 3294 retval = port->port_info(port->onoff_arg, sb); 3295 if (retval != 0) 3296 break; 3297 } 3298 STAILQ_FOREACH(opt, &port->options, links) { 3299 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n", 3300 opt->name, opt->value, opt->name); 3301 if (retval != 0) 3302 break; 3303 } 3304 3305 if (port->lun_map != NULL) { 3306 sbuf_printf(sb, "\t<lun_map>on</lun_map>\n"); 3307 for (j = 0; j < port->lun_map_size; j++) { 3308 plun = ctl_lun_map_from_port(port, j); 3309 if (plun == UINT32_MAX) 3310 continue; 3311 sbuf_printf(sb, 3312 "\t<lun id=\"%u\">%u</lun>\n", 3313 j, plun); 3314 } 3315 } 3316 3317 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 3318 if (port->wwpn_iid[j].in_use == 0 || 3319 (port->wwpn_iid[j].wwpn == 0 && 3320 port->wwpn_iid[j].name == NULL)) 3321 continue; 3322 3323 if (port->wwpn_iid[j].name != NULL) 3324 retval = sbuf_printf(sb, 3325 "\t<initiator id=\"%u\">%s</initiator>\n", 3326 j, port->wwpn_iid[j].name); 3327 else 3328 retval = sbuf_printf(sb, 3329 "\t<initiator id=\"%u\">naa.%08jx</initiator>\n", 3330 j, port->wwpn_iid[j].wwpn); 3331 if (retval != 0) 3332 break; 3333 } 3334 if (retval != 0) 3335 break; 3336 3337 retval = sbuf_printf(sb, "</targ_port>\n"); 3338 if (retval != 0) 3339 break; 3340 } 3341 mtx_unlock(&softc->ctl_lock); 3342 3343 if ((retval != 0) 3344 || ((retval = sbuf_printf(sb, "</ctlportlist>\n")) != 0)) { 3345 retval = 0; 3346 sbuf_delete(sb); 3347 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3348 snprintf(list->error_str, sizeof(list->error_str), 3349 "Out of space, %d bytes is too small", 3350 list->alloc_len); 3351 break; 3352 } 3353 3354 sbuf_finish(sb); 3355 3356 retval = copyout(sbuf_data(sb), list->lun_xml, 3357 sbuf_len(sb) + 1); 3358 3359 list->fill_len = sbuf_len(sb) + 1; 3360 list->status = CTL_LUN_LIST_OK; 3361 sbuf_delete(sb); 3362 break; 3363 } 3364 case CTL_LUN_MAP: { 3365 struct ctl_lun_map *lm = (struct ctl_lun_map *)addr; 3366 struct ctl_port *port; 3367 3368 mtx_lock(&softc->ctl_lock); 3369 if (lm->port < softc->port_min || 3370 lm->port >= softc->port_max || 3371 (port = softc->ctl_ports[lm->port]) == NULL) { 3372 mtx_unlock(&softc->ctl_lock); 3373 return (ENXIO); 3374 } 3375 if (port->status & CTL_PORT_STATUS_ONLINE) { 3376 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3377 if (ctl_lun_map_to_port(port, lun->lun) == 3378 UINT32_MAX) 3379 continue; 3380 mtx_lock(&lun->lun_lock); 3381 ctl_est_ua_port(lun, lm->port, -1, 3382 CTL_UA_LUN_CHANGE); 3383 mtx_unlock(&lun->lun_lock); 3384 } 3385 } 3386 mtx_unlock(&softc->ctl_lock); // XXX: port_enable sleeps 3387 if (lm->plun != UINT32_MAX) { 3388 if (lm->lun == UINT32_MAX) 3389 retval = ctl_lun_map_unset(port, lm->plun); 3390 else if (lm->lun < CTL_MAX_LUNS && 3391 softc->ctl_luns[lm->lun] != NULL) 3392 retval = ctl_lun_map_set(port, lm->plun, lm->lun); 3393 else 3394 return (ENXIO); 3395 } else { 3396 if (lm->lun == UINT32_MAX) 3397 retval = ctl_lun_map_deinit(port); 3398 else 3399 retval = ctl_lun_map_init(port); 3400 } 3401 if (port->status & CTL_PORT_STATUS_ONLINE) 3402 ctl_isc_announce_port(port); 3403 break; 3404 } 3405 default: { 3406 /* XXX KDM should we fix this? */ 3407 #if 0 3408 struct ctl_backend_driver *backend; 3409 unsigned int type; 3410 int found; 3411 3412 found = 0; 3413 3414 /* 3415 * We encode the backend type as the ioctl type for backend 3416 * ioctls. So parse it out here, and then search for a 3417 * backend of this type. 3418 */ 3419 type = _IOC_TYPE(cmd); 3420 3421 STAILQ_FOREACH(backend, &softc->be_list, links) { 3422 if (backend->type == type) { 3423 found = 1; 3424 break; 3425 } 3426 } 3427 if (found == 0) { 3428 printf("ctl: unknown ioctl command %#lx or backend " 3429 "%d\n", cmd, type); 3430 retval = EINVAL; 3431 break; 3432 } 3433 retval = backend->ioctl(dev, cmd, addr, flag, td); 3434 #endif 3435 retval = ENOTTY; 3436 break; 3437 } 3438 } 3439 return (retval); 3440 } 3441 3442 uint32_t 3443 ctl_get_initindex(struct ctl_nexus *nexus) 3444 { 3445 return (nexus->initid + (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); 3446 } 3447 3448 int 3449 ctl_lun_map_init(struct ctl_port *port) 3450 { 3451 struct ctl_softc *softc = port->ctl_softc; 3452 struct ctl_lun *lun; 3453 int size = ctl_lun_map_size; 3454 uint32_t i; 3455 3456 if (port->lun_map == NULL || port->lun_map_size < size) { 3457 port->lun_map_size = 0; 3458 free(port->lun_map, M_CTL); 3459 port->lun_map = malloc(size * sizeof(uint32_t), 3460 M_CTL, M_NOWAIT); 3461 } 3462 if (port->lun_map == NULL) 3463 return (ENOMEM); 3464 for (i = 0; i < size; i++) 3465 port->lun_map[i] = UINT32_MAX; 3466 port->lun_map_size = size; 3467 if (port->status & CTL_PORT_STATUS_ONLINE) { 3468 if (port->lun_disable != NULL) { 3469 STAILQ_FOREACH(lun, &softc->lun_list, links) 3470 port->lun_disable(port->targ_lun_arg, lun->lun); 3471 } 3472 ctl_isc_announce_port(port); 3473 } 3474 return (0); 3475 } 3476 3477 int 3478 ctl_lun_map_deinit(struct ctl_port *port) 3479 { 3480 struct ctl_softc *softc = port->ctl_softc; 3481 struct ctl_lun *lun; 3482 3483 if (port->lun_map == NULL) 3484 return (0); 3485 port->lun_map_size = 0; 3486 free(port->lun_map, M_CTL); 3487 port->lun_map = NULL; 3488 if (port->status & CTL_PORT_STATUS_ONLINE) { 3489 if (port->lun_enable != NULL) { 3490 STAILQ_FOREACH(lun, &softc->lun_list, links) 3491 port->lun_enable(port->targ_lun_arg, lun->lun); 3492 } 3493 ctl_isc_announce_port(port); 3494 } 3495 return (0); 3496 } 3497 3498 int 3499 ctl_lun_map_set(struct ctl_port *port, uint32_t plun, uint32_t glun) 3500 { 3501 int status; 3502 uint32_t old; 3503 3504 if (port->lun_map == NULL) { 3505 status = ctl_lun_map_init(port); 3506 if (status != 0) 3507 return (status); 3508 } 3509 if (plun >= port->lun_map_size) 3510 return (EINVAL); 3511 old = port->lun_map[plun]; 3512 port->lun_map[plun] = glun; 3513 if ((port->status & CTL_PORT_STATUS_ONLINE) && old == UINT32_MAX) { 3514 if (port->lun_enable != NULL) 3515 port->lun_enable(port->targ_lun_arg, plun); 3516 ctl_isc_announce_port(port); 3517 } 3518 return (0); 3519 } 3520 3521 int 3522 ctl_lun_map_unset(struct ctl_port *port, uint32_t plun) 3523 { 3524 uint32_t old; 3525 3526 if (port->lun_map == NULL || plun >= port->lun_map_size) 3527 return (0); 3528 old = port->lun_map[plun]; 3529 port->lun_map[plun] = UINT32_MAX; 3530 if ((port->status & CTL_PORT_STATUS_ONLINE) && old != UINT32_MAX) { 3531 if (port->lun_disable != NULL) 3532 port->lun_disable(port->targ_lun_arg, plun); 3533 ctl_isc_announce_port(port); 3534 } 3535 return (0); 3536 } 3537 3538 uint32_t 3539 ctl_lun_map_from_port(struct ctl_port *port, uint32_t lun_id) 3540 { 3541 3542 if (port == NULL) 3543 return (UINT32_MAX); 3544 if (port->lun_map == NULL) 3545 return (lun_id); 3546 if (lun_id > port->lun_map_size) 3547 return (UINT32_MAX); 3548 return (port->lun_map[lun_id]); 3549 } 3550 3551 uint32_t 3552 ctl_lun_map_to_port(struct ctl_port *port, uint32_t lun_id) 3553 { 3554 uint32_t i; 3555 3556 if (port == NULL) 3557 return (UINT32_MAX); 3558 if (port->lun_map == NULL) 3559 return (lun_id); 3560 for (i = 0; i < port->lun_map_size; i++) { 3561 if (port->lun_map[i] == lun_id) 3562 return (i); 3563 } 3564 return (UINT32_MAX); 3565 } 3566 3567 uint32_t 3568 ctl_decode_lun(uint64_t encoded) 3569 { 3570 uint8_t lun[8]; 3571 uint32_t result = 0xffffffff; 3572 3573 be64enc(lun, encoded); 3574 switch (lun[0] & RPL_LUNDATA_ATYP_MASK) { 3575 case RPL_LUNDATA_ATYP_PERIPH: 3576 if ((lun[0] & 0x3f) == 0 && lun[2] == 0 && lun[3] == 0 && 3577 lun[4] == 0 && lun[5] == 0 && lun[6] == 0 && lun[7] == 0) 3578 result = lun[1]; 3579 break; 3580 case RPL_LUNDATA_ATYP_FLAT: 3581 if (lun[2] == 0 && lun[3] == 0 && lun[4] == 0 && lun[5] == 0 && 3582 lun[6] == 0 && lun[7] == 0) 3583 result = ((lun[0] & 0x3f) << 8) + lun[1]; 3584 break; 3585 case RPL_LUNDATA_ATYP_EXTLUN: 3586 switch (lun[0] & RPL_LUNDATA_EXT_EAM_MASK) { 3587 case 0x02: 3588 switch (lun[0] & RPL_LUNDATA_EXT_LEN_MASK) { 3589 case 0x00: 3590 result = lun[1]; 3591 break; 3592 case 0x10: 3593 result = (lun[1] << 16) + (lun[2] << 8) + 3594 lun[3]; 3595 break; 3596 case 0x20: 3597 if (lun[1] == 0 && lun[6] == 0 && lun[7] == 0) 3598 result = (lun[2] << 24) + 3599 (lun[3] << 16) + (lun[4] << 8) + 3600 lun[5]; 3601 break; 3602 } 3603 break; 3604 case RPL_LUNDATA_EXT_EAM_NOT_SPEC: 3605 result = 0xffffffff; 3606 break; 3607 } 3608 break; 3609 } 3610 return (result); 3611 } 3612 3613 uint64_t 3614 ctl_encode_lun(uint32_t decoded) 3615 { 3616 uint64_t l = decoded; 3617 3618 if (l <= 0xff) 3619 return (((uint64_t)RPL_LUNDATA_ATYP_PERIPH << 56) | (l << 48)); 3620 if (l <= 0x3fff) 3621 return (((uint64_t)RPL_LUNDATA_ATYP_FLAT << 56) | (l << 48)); 3622 if (l <= 0xffffff) 3623 return (((uint64_t)(RPL_LUNDATA_ATYP_EXTLUN | 0x12) << 56) | 3624 (l << 32)); 3625 return ((((uint64_t)RPL_LUNDATA_ATYP_EXTLUN | 0x22) << 56) | (l << 16)); 3626 } 3627 3628 static struct ctl_port * 3629 ctl_io_port(struct ctl_io_hdr *io_hdr) 3630 { 3631 3632 return (control_softc->ctl_ports[io_hdr->nexus.targ_port]); 3633 } 3634 3635 int 3636 ctl_ffz(uint32_t *mask, uint32_t first, uint32_t last) 3637 { 3638 int i; 3639 3640 for (i = first; i < last; i++) { 3641 if ((mask[i / 32] & (1 << (i % 32))) == 0) 3642 return (i); 3643 } 3644 return (-1); 3645 } 3646 3647 int 3648 ctl_set_mask(uint32_t *mask, uint32_t bit) 3649 { 3650 uint32_t chunk, piece; 3651 3652 chunk = bit >> 5; 3653 piece = bit % (sizeof(uint32_t) * 8); 3654 3655 if ((mask[chunk] & (1 << piece)) != 0) 3656 return (-1); 3657 else 3658 mask[chunk] |= (1 << piece); 3659 3660 return (0); 3661 } 3662 3663 int 3664 ctl_clear_mask(uint32_t *mask, uint32_t bit) 3665 { 3666 uint32_t chunk, piece; 3667 3668 chunk = bit >> 5; 3669 piece = bit % (sizeof(uint32_t) * 8); 3670 3671 if ((mask[chunk] & (1 << piece)) == 0) 3672 return (-1); 3673 else 3674 mask[chunk] &= ~(1 << piece); 3675 3676 return (0); 3677 } 3678 3679 int 3680 ctl_is_set(uint32_t *mask, uint32_t bit) 3681 { 3682 uint32_t chunk, piece; 3683 3684 chunk = bit >> 5; 3685 piece = bit % (sizeof(uint32_t) * 8); 3686 3687 if ((mask[chunk] & (1 << piece)) == 0) 3688 return (0); 3689 else 3690 return (1); 3691 } 3692 3693 static uint64_t 3694 ctl_get_prkey(struct ctl_lun *lun, uint32_t residx) 3695 { 3696 uint64_t *t; 3697 3698 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3699 if (t == NULL) 3700 return (0); 3701 return (t[residx % CTL_MAX_INIT_PER_PORT]); 3702 } 3703 3704 static void 3705 ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx) 3706 { 3707 uint64_t *t; 3708 3709 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3710 if (t == NULL) 3711 return; 3712 t[residx % CTL_MAX_INIT_PER_PORT] = 0; 3713 } 3714 3715 static void 3716 ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx) 3717 { 3718 uint64_t *p; 3719 u_int i; 3720 3721 i = residx/CTL_MAX_INIT_PER_PORT; 3722 if (lun->pr_keys[i] != NULL) 3723 return; 3724 mtx_unlock(&lun->lun_lock); 3725 p = malloc(sizeof(uint64_t) * CTL_MAX_INIT_PER_PORT, M_CTL, 3726 M_WAITOK | M_ZERO); 3727 mtx_lock(&lun->lun_lock); 3728 if (lun->pr_keys[i] == NULL) 3729 lun->pr_keys[i] = p; 3730 else 3731 free(p, M_CTL); 3732 } 3733 3734 static void 3735 ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key) 3736 { 3737 uint64_t *t; 3738 3739 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3740 KASSERT(t != NULL, ("prkey %d is not allocated", residx)); 3741 t[residx % CTL_MAX_INIT_PER_PORT] = key; 3742 } 3743 3744 /* 3745 * ctl_softc, pool_name, total_ctl_io are passed in. 3746 * npool is passed out. 3747 */ 3748 int 3749 ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name, 3750 uint32_t total_ctl_io, void **npool) 3751 { 3752 #ifdef IO_POOLS 3753 struct ctl_io_pool *pool; 3754 3755 pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL, 3756 M_NOWAIT | M_ZERO); 3757 if (pool == NULL) 3758 return (ENOMEM); 3759 3760 snprintf(pool->name, sizeof(pool->name), "CTL IO %s", pool_name); 3761 pool->ctl_softc = ctl_softc; 3762 pool->zone = uma_zsecond_create(pool->name, NULL, 3763 NULL, NULL, NULL, ctl_softc->io_zone); 3764 /* uma_prealloc(pool->zone, total_ctl_io); */ 3765 3766 *npool = pool; 3767 #else 3768 *npool = ctl_softc->io_zone; 3769 #endif 3770 return (0); 3771 } 3772 3773 void 3774 ctl_pool_free(struct ctl_io_pool *pool) 3775 { 3776 3777 if (pool == NULL) 3778 return; 3779 3780 #ifdef IO_POOLS 3781 uma_zdestroy(pool->zone); 3782 free(pool, M_CTL); 3783 #endif 3784 } 3785 3786 union ctl_io * 3787 ctl_alloc_io(void *pool_ref) 3788 { 3789 union ctl_io *io; 3790 #ifdef IO_POOLS 3791 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3792 3793 io = uma_zalloc(pool->zone, M_WAITOK); 3794 #else 3795 io = uma_zalloc((uma_zone_t)pool_ref, M_WAITOK); 3796 #endif 3797 if (io != NULL) 3798 io->io_hdr.pool = pool_ref; 3799 return (io); 3800 } 3801 3802 union ctl_io * 3803 ctl_alloc_io_nowait(void *pool_ref) 3804 { 3805 union ctl_io *io; 3806 #ifdef IO_POOLS 3807 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3808 3809 io = uma_zalloc(pool->zone, M_NOWAIT); 3810 #else 3811 io = uma_zalloc((uma_zone_t)pool_ref, M_NOWAIT); 3812 #endif 3813 if (io != NULL) 3814 io->io_hdr.pool = pool_ref; 3815 return (io); 3816 } 3817 3818 void 3819 ctl_free_io(union ctl_io *io) 3820 { 3821 #ifdef IO_POOLS 3822 struct ctl_io_pool *pool; 3823 #endif 3824 3825 if (io == NULL) 3826 return; 3827 3828 #ifdef IO_POOLS 3829 pool = (struct ctl_io_pool *)io->io_hdr.pool; 3830 uma_zfree(pool->zone, io); 3831 #else 3832 uma_zfree((uma_zone_t)io->io_hdr.pool, io); 3833 #endif 3834 } 3835 3836 void 3837 ctl_zero_io(union ctl_io *io) 3838 { 3839 void *pool_ref; 3840 3841 if (io == NULL) 3842 return; 3843 3844 /* 3845 * May need to preserve linked list pointers at some point too. 3846 */ 3847 pool_ref = io->io_hdr.pool; 3848 memset(io, 0, sizeof(*io)); 3849 io->io_hdr.pool = pool_ref; 3850 } 3851 3852 int 3853 ctl_expand_number(const char *buf, uint64_t *num) 3854 { 3855 char *endptr; 3856 uint64_t number; 3857 unsigned shift; 3858 3859 number = strtoq(buf, &endptr, 0); 3860 3861 switch (tolower((unsigned char)*endptr)) { 3862 case 'e': 3863 shift = 60; 3864 break; 3865 case 'p': 3866 shift = 50; 3867 break; 3868 case 't': 3869 shift = 40; 3870 break; 3871 case 'g': 3872 shift = 30; 3873 break; 3874 case 'm': 3875 shift = 20; 3876 break; 3877 case 'k': 3878 shift = 10; 3879 break; 3880 case 'b': 3881 case '\0': /* No unit. */ 3882 *num = number; 3883 return (0); 3884 default: 3885 /* Unrecognized unit. */ 3886 return (-1); 3887 } 3888 3889 if ((number << shift) >> shift != number) { 3890 /* Overflow */ 3891 return (-1); 3892 } 3893 *num = number << shift; 3894 return (0); 3895 } 3896 3897 3898 /* 3899 * This routine could be used in the future to load default and/or saved 3900 * mode page parameters for a particuar lun. 3901 */ 3902 static int 3903 ctl_init_page_index(struct ctl_lun *lun) 3904 { 3905 int i, page_code; 3906 struct ctl_page_index *page_index; 3907 const char *value; 3908 uint64_t ival; 3909 3910 memcpy(&lun->mode_pages.index, page_index_template, 3911 sizeof(page_index_template)); 3912 3913 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 3914 3915 page_index = &lun->mode_pages.index[i]; 3916 if (lun->be_lun->lun_type == T_DIRECT && 3917 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 3918 continue; 3919 if (lun->be_lun->lun_type == T_PROCESSOR && 3920 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 3921 continue; 3922 if (lun->be_lun->lun_type == T_CDROM && 3923 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 3924 continue; 3925 3926 page_code = page_index->page_code & SMPH_PC_MASK; 3927 switch (page_code) { 3928 case SMS_RW_ERROR_RECOVERY_PAGE: { 3929 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 3930 ("subpage %#x for page %#x is incorrect!", 3931 page_index->subpage, page_code)); 3932 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT], 3933 &rw_er_page_default, 3934 sizeof(rw_er_page_default)); 3935 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CHANGEABLE], 3936 &rw_er_page_changeable, 3937 sizeof(rw_er_page_changeable)); 3938 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_DEFAULT], 3939 &rw_er_page_default, 3940 sizeof(rw_er_page_default)); 3941 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_SAVED], 3942 &rw_er_page_default, 3943 sizeof(rw_er_page_default)); 3944 page_index->page_data = 3945 (uint8_t *)lun->mode_pages.rw_er_page; 3946 break; 3947 } 3948 case SMS_FORMAT_DEVICE_PAGE: { 3949 struct scsi_format_page *format_page; 3950 3951 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 3952 ("subpage %#x for page %#x is incorrect!", 3953 page_index->subpage, page_code)); 3954 3955 /* 3956 * Sectors per track are set above. Bytes per 3957 * sector need to be set here on a per-LUN basis. 3958 */ 3959 memcpy(&lun->mode_pages.format_page[CTL_PAGE_CURRENT], 3960 &format_page_default, 3961 sizeof(format_page_default)); 3962 memcpy(&lun->mode_pages.format_page[ 3963 CTL_PAGE_CHANGEABLE], &format_page_changeable, 3964 sizeof(format_page_changeable)); 3965 memcpy(&lun->mode_pages.format_page[CTL_PAGE_DEFAULT], 3966 &format_page_default, 3967 sizeof(format_page_default)); 3968 memcpy(&lun->mode_pages.format_page[CTL_PAGE_SAVED], 3969 &format_page_default, 3970 sizeof(format_page_default)); 3971 3972 format_page = &lun->mode_pages.format_page[ 3973 CTL_PAGE_CURRENT]; 3974 scsi_ulto2b(lun->be_lun->blocksize, 3975 format_page->bytes_per_sector); 3976 3977 format_page = &lun->mode_pages.format_page[ 3978 CTL_PAGE_DEFAULT]; 3979 scsi_ulto2b(lun->be_lun->blocksize, 3980 format_page->bytes_per_sector); 3981 3982 format_page = &lun->mode_pages.format_page[ 3983 CTL_PAGE_SAVED]; 3984 scsi_ulto2b(lun->be_lun->blocksize, 3985 format_page->bytes_per_sector); 3986 3987 page_index->page_data = 3988 (uint8_t *)lun->mode_pages.format_page; 3989 break; 3990 } 3991 case SMS_RIGID_DISK_PAGE: { 3992 struct scsi_rigid_disk_page *rigid_disk_page; 3993 uint32_t sectors_per_cylinder; 3994 uint64_t cylinders; 3995 #ifndef __XSCALE__ 3996 int shift; 3997 #endif /* !__XSCALE__ */ 3998 3999 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4000 ("subpage %#x for page %#x is incorrect!", 4001 page_index->subpage, page_code)); 4002 4003 /* 4004 * Rotation rate and sectors per track are set 4005 * above. We calculate the cylinders here based on 4006 * capacity. Due to the number of heads and 4007 * sectors per track we're using, smaller arrays 4008 * may turn out to have 0 cylinders. Linux and 4009 * FreeBSD don't pay attention to these mode pages 4010 * to figure out capacity, but Solaris does. It 4011 * seems to deal with 0 cylinders just fine, and 4012 * works out a fake geometry based on the capacity. 4013 */ 4014 memcpy(&lun->mode_pages.rigid_disk_page[ 4015 CTL_PAGE_DEFAULT], &rigid_disk_page_default, 4016 sizeof(rigid_disk_page_default)); 4017 memcpy(&lun->mode_pages.rigid_disk_page[ 4018 CTL_PAGE_CHANGEABLE],&rigid_disk_page_changeable, 4019 sizeof(rigid_disk_page_changeable)); 4020 4021 sectors_per_cylinder = CTL_DEFAULT_SECTORS_PER_TRACK * 4022 CTL_DEFAULT_HEADS; 4023 4024 /* 4025 * The divide method here will be more accurate, 4026 * probably, but results in floating point being 4027 * used in the kernel on i386 (__udivdi3()). On the 4028 * XScale, though, __udivdi3() is implemented in 4029 * software. 4030 * 4031 * The shift method for cylinder calculation is 4032 * accurate if sectors_per_cylinder is a power of 4033 * 2. Otherwise it might be slightly off -- you 4034 * might have a bit of a truncation problem. 4035 */ 4036 #ifdef __XSCALE__ 4037 cylinders = (lun->be_lun->maxlba + 1) / 4038 sectors_per_cylinder; 4039 #else 4040 for (shift = 31; shift > 0; shift--) { 4041 if (sectors_per_cylinder & (1 << shift)) 4042 break; 4043 } 4044 cylinders = (lun->be_lun->maxlba + 1) >> shift; 4045 #endif 4046 4047 /* 4048 * We've basically got 3 bytes, or 24 bits for the 4049 * cylinder size in the mode page. If we're over, 4050 * just round down to 2^24. 4051 */ 4052 if (cylinders > 0xffffff) 4053 cylinders = 0xffffff; 4054 4055 rigid_disk_page = &lun->mode_pages.rigid_disk_page[ 4056 CTL_PAGE_DEFAULT]; 4057 scsi_ulto3b(cylinders, rigid_disk_page->cylinders); 4058 4059 if ((value = ctl_get_opt(&lun->be_lun->options, 4060 "rpm")) != NULL) { 4061 scsi_ulto2b(strtol(value, NULL, 0), 4062 rigid_disk_page->rotation_rate); 4063 } 4064 4065 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_CURRENT], 4066 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 4067 sizeof(rigid_disk_page_default)); 4068 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_SAVED], 4069 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 4070 sizeof(rigid_disk_page_default)); 4071 4072 page_index->page_data = 4073 (uint8_t *)lun->mode_pages.rigid_disk_page; 4074 break; 4075 } 4076 case SMS_VERIFY_ERROR_RECOVERY_PAGE: { 4077 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4078 ("subpage %#x for page %#x is incorrect!", 4079 page_index->subpage, page_code)); 4080 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_CURRENT], 4081 &verify_er_page_default, 4082 sizeof(verify_er_page_default)); 4083 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_CHANGEABLE], 4084 &verify_er_page_changeable, 4085 sizeof(verify_er_page_changeable)); 4086 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_DEFAULT], 4087 &verify_er_page_default, 4088 sizeof(verify_er_page_default)); 4089 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_SAVED], 4090 &verify_er_page_default, 4091 sizeof(verify_er_page_default)); 4092 page_index->page_data = 4093 (uint8_t *)lun->mode_pages.verify_er_page; 4094 break; 4095 } 4096 case SMS_CACHING_PAGE: { 4097 struct scsi_caching_page *caching_page; 4098 4099 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4100 ("subpage %#x for page %#x is incorrect!", 4101 page_index->subpage, page_code)); 4102 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT], 4103 &caching_page_default, 4104 sizeof(caching_page_default)); 4105 memcpy(&lun->mode_pages.caching_page[ 4106 CTL_PAGE_CHANGEABLE], &caching_page_changeable, 4107 sizeof(caching_page_changeable)); 4108 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_SAVED], 4109 &caching_page_default, 4110 sizeof(caching_page_default)); 4111 caching_page = &lun->mode_pages.caching_page[ 4112 CTL_PAGE_SAVED]; 4113 value = ctl_get_opt(&lun->be_lun->options, "writecache"); 4114 if (value != NULL && strcmp(value, "off") == 0) 4115 caching_page->flags1 &= ~SCP_WCE; 4116 value = ctl_get_opt(&lun->be_lun->options, "readcache"); 4117 if (value != NULL && strcmp(value, "off") == 0) 4118 caching_page->flags1 |= SCP_RCD; 4119 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT], 4120 &lun->mode_pages.caching_page[CTL_PAGE_SAVED], 4121 sizeof(caching_page_default)); 4122 page_index->page_data = 4123 (uint8_t *)lun->mode_pages.caching_page; 4124 break; 4125 } 4126 case SMS_CONTROL_MODE_PAGE: { 4127 switch (page_index->subpage) { 4128 case SMS_SUBPAGE_PAGE_0: { 4129 struct scsi_control_page *control_page; 4130 4131 memcpy(&lun->mode_pages.control_page[ 4132 CTL_PAGE_DEFAULT], 4133 &control_page_default, 4134 sizeof(control_page_default)); 4135 memcpy(&lun->mode_pages.control_page[ 4136 CTL_PAGE_CHANGEABLE], 4137 &control_page_changeable, 4138 sizeof(control_page_changeable)); 4139 memcpy(&lun->mode_pages.control_page[ 4140 CTL_PAGE_SAVED], 4141 &control_page_default, 4142 sizeof(control_page_default)); 4143 control_page = &lun->mode_pages.control_page[ 4144 CTL_PAGE_SAVED]; 4145 value = ctl_get_opt(&lun->be_lun->options, 4146 "reordering"); 4147 if (value != NULL && 4148 strcmp(value, "unrestricted") == 0) { 4149 control_page->queue_flags &= 4150 ~SCP_QUEUE_ALG_MASK; 4151 control_page->queue_flags |= 4152 SCP_QUEUE_ALG_UNRESTRICTED; 4153 } 4154 memcpy(&lun->mode_pages.control_page[ 4155 CTL_PAGE_CURRENT], 4156 &lun->mode_pages.control_page[ 4157 CTL_PAGE_SAVED], 4158 sizeof(control_page_default)); 4159 page_index->page_data = 4160 (uint8_t *)lun->mode_pages.control_page; 4161 break; 4162 } 4163 case 0x01: 4164 memcpy(&lun->mode_pages.control_ext_page[ 4165 CTL_PAGE_DEFAULT], 4166 &control_ext_page_default, 4167 sizeof(control_ext_page_default)); 4168 memcpy(&lun->mode_pages.control_ext_page[ 4169 CTL_PAGE_CHANGEABLE], 4170 &control_ext_page_changeable, 4171 sizeof(control_ext_page_changeable)); 4172 memcpy(&lun->mode_pages.control_ext_page[ 4173 CTL_PAGE_SAVED], 4174 &control_ext_page_default, 4175 sizeof(control_ext_page_default)); 4176 memcpy(&lun->mode_pages.control_ext_page[ 4177 CTL_PAGE_CURRENT], 4178 &lun->mode_pages.control_ext_page[ 4179 CTL_PAGE_SAVED], 4180 sizeof(control_ext_page_default)); 4181 page_index->page_data = 4182 (uint8_t *)lun->mode_pages.control_ext_page; 4183 break; 4184 default: 4185 panic("subpage %#x for page %#x is incorrect!", 4186 page_index->subpage, page_code); 4187 } 4188 break; 4189 } 4190 case SMS_INFO_EXCEPTIONS_PAGE: { 4191 switch (page_index->subpage) { 4192 case SMS_SUBPAGE_PAGE_0: 4193 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_CURRENT], 4194 &ie_page_default, 4195 sizeof(ie_page_default)); 4196 memcpy(&lun->mode_pages.ie_page[ 4197 CTL_PAGE_CHANGEABLE], &ie_page_changeable, 4198 sizeof(ie_page_changeable)); 4199 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_DEFAULT], 4200 &ie_page_default, 4201 sizeof(ie_page_default)); 4202 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_SAVED], 4203 &ie_page_default, 4204 sizeof(ie_page_default)); 4205 page_index->page_data = 4206 (uint8_t *)lun->mode_pages.ie_page; 4207 break; 4208 case 0x02: { 4209 struct ctl_logical_block_provisioning_page *page; 4210 4211 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_DEFAULT], 4212 &lbp_page_default, 4213 sizeof(lbp_page_default)); 4214 memcpy(&lun->mode_pages.lbp_page[ 4215 CTL_PAGE_CHANGEABLE], &lbp_page_changeable, 4216 sizeof(lbp_page_changeable)); 4217 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 4218 &lbp_page_default, 4219 sizeof(lbp_page_default)); 4220 page = &lun->mode_pages.lbp_page[CTL_PAGE_SAVED]; 4221 value = ctl_get_opt(&lun->be_lun->options, 4222 "avail-threshold"); 4223 if (value != NULL && 4224 ctl_expand_number(value, &ival) == 0) { 4225 page->descr[0].flags |= SLBPPD_ENABLED | 4226 SLBPPD_ARMING_DEC; 4227 if (lun->be_lun->blocksize) 4228 ival /= lun->be_lun->blocksize; 4229 else 4230 ival /= 512; 4231 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4232 page->descr[0].count); 4233 } 4234 value = ctl_get_opt(&lun->be_lun->options, 4235 "used-threshold"); 4236 if (value != NULL && 4237 ctl_expand_number(value, &ival) == 0) { 4238 page->descr[1].flags |= SLBPPD_ENABLED | 4239 SLBPPD_ARMING_INC; 4240 if (lun->be_lun->blocksize) 4241 ival /= lun->be_lun->blocksize; 4242 else 4243 ival /= 512; 4244 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4245 page->descr[1].count); 4246 } 4247 value = ctl_get_opt(&lun->be_lun->options, 4248 "pool-avail-threshold"); 4249 if (value != NULL && 4250 ctl_expand_number(value, &ival) == 0) { 4251 page->descr[2].flags |= SLBPPD_ENABLED | 4252 SLBPPD_ARMING_DEC; 4253 if (lun->be_lun->blocksize) 4254 ival /= lun->be_lun->blocksize; 4255 else 4256 ival /= 512; 4257 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4258 page->descr[2].count); 4259 } 4260 value = ctl_get_opt(&lun->be_lun->options, 4261 "pool-used-threshold"); 4262 if (value != NULL && 4263 ctl_expand_number(value, &ival) == 0) { 4264 page->descr[3].flags |= SLBPPD_ENABLED | 4265 SLBPPD_ARMING_INC; 4266 if (lun->be_lun->blocksize) 4267 ival /= lun->be_lun->blocksize; 4268 else 4269 ival /= 512; 4270 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4271 page->descr[3].count); 4272 } 4273 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_CURRENT], 4274 &lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 4275 sizeof(lbp_page_default)); 4276 page_index->page_data = 4277 (uint8_t *)lun->mode_pages.lbp_page; 4278 break; 4279 } 4280 default: 4281 panic("subpage %#x for page %#x is incorrect!", 4282 page_index->subpage, page_code); 4283 } 4284 break; 4285 } 4286 case SMS_CDDVD_CAPS_PAGE:{ 4287 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4288 ("subpage %#x for page %#x is incorrect!", 4289 page_index->subpage, page_code)); 4290 memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_DEFAULT], 4291 &cddvd_page_default, 4292 sizeof(cddvd_page_default)); 4293 memcpy(&lun->mode_pages.cddvd_page[ 4294 CTL_PAGE_CHANGEABLE], &cddvd_page_changeable, 4295 sizeof(cddvd_page_changeable)); 4296 memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_SAVED], 4297 &cddvd_page_default, 4298 sizeof(cddvd_page_default)); 4299 memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_CURRENT], 4300 &lun->mode_pages.cddvd_page[CTL_PAGE_SAVED], 4301 sizeof(cddvd_page_default)); 4302 page_index->page_data = 4303 (uint8_t *)lun->mode_pages.cddvd_page; 4304 break; 4305 } 4306 default: 4307 panic("invalid page code value %#x", page_code); 4308 } 4309 } 4310 4311 return (CTL_RETVAL_COMPLETE); 4312 } 4313 4314 static int 4315 ctl_init_log_page_index(struct ctl_lun *lun) 4316 { 4317 struct ctl_page_index *page_index; 4318 int i, j, k, prev; 4319 4320 memcpy(&lun->log_pages.index, log_page_index_template, 4321 sizeof(log_page_index_template)); 4322 4323 prev = -1; 4324 for (i = 0, j = 0, k = 0; i < CTL_NUM_LOG_PAGES; i++) { 4325 4326 page_index = &lun->log_pages.index[i]; 4327 if (lun->be_lun->lun_type == T_DIRECT && 4328 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 4329 continue; 4330 if (lun->be_lun->lun_type == T_PROCESSOR && 4331 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 4332 continue; 4333 if (lun->be_lun->lun_type == T_CDROM && 4334 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 4335 continue; 4336 4337 if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING && 4338 lun->backend->lun_attr == NULL) 4339 continue; 4340 4341 if (page_index->page_code != prev) { 4342 lun->log_pages.pages_page[j] = page_index->page_code; 4343 prev = page_index->page_code; 4344 j++; 4345 } 4346 lun->log_pages.subpages_page[k*2] = page_index->page_code; 4347 lun->log_pages.subpages_page[k*2+1] = page_index->subpage; 4348 k++; 4349 } 4350 lun->log_pages.index[0].page_data = &lun->log_pages.pages_page[0]; 4351 lun->log_pages.index[0].page_len = j; 4352 lun->log_pages.index[1].page_data = &lun->log_pages.subpages_page[0]; 4353 lun->log_pages.index[1].page_len = k * 2; 4354 lun->log_pages.index[2].page_data = &lun->log_pages.lbp_page[0]; 4355 lun->log_pages.index[2].page_len = 12*CTL_NUM_LBP_PARAMS; 4356 lun->log_pages.index[3].page_data = (uint8_t *)&lun->log_pages.stat_page; 4357 lun->log_pages.index[3].page_len = sizeof(lun->log_pages.stat_page); 4358 lun->log_pages.index[4].page_data = (uint8_t *)&lun->log_pages.ie_page; 4359 lun->log_pages.index[4].page_len = sizeof(lun->log_pages.ie_page); 4360 4361 return (CTL_RETVAL_COMPLETE); 4362 } 4363 4364 static int 4365 hex2bin(const char *str, uint8_t *buf, int buf_size) 4366 { 4367 int i; 4368 u_char c; 4369 4370 memset(buf, 0, buf_size); 4371 while (isspace(str[0])) 4372 str++; 4373 if (str[0] == '0' && (str[1] == 'x' || str[1] == 'X')) 4374 str += 2; 4375 buf_size *= 2; 4376 for (i = 0; str[i] != 0 && i < buf_size; i++) { 4377 while (str[i] == '-') /* Skip dashes in UUIDs. */ 4378 str++; 4379 c = str[i]; 4380 if (isdigit(c)) 4381 c -= '0'; 4382 else if (isalpha(c)) 4383 c -= isupper(c) ? 'A' - 10 : 'a' - 10; 4384 else 4385 break; 4386 if (c >= 16) 4387 break; 4388 if ((i & 1) == 0) 4389 buf[i / 2] |= (c << 4); 4390 else 4391 buf[i / 2] |= c; 4392 } 4393 return ((i + 1) / 2); 4394 } 4395 4396 /* 4397 * LUN allocation. 4398 * 4399 * Requirements: 4400 * - caller allocates and zeros LUN storage, or passes in a NULL LUN if he 4401 * wants us to allocate the LUN and he can block. 4402 * - ctl_softc is always set 4403 * - be_lun is set if the LUN has a backend (needed for disk LUNs) 4404 * 4405 * Returns 0 for success, non-zero (errno) for failure. 4406 */ 4407 static int 4408 ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun, 4409 struct ctl_be_lun *const be_lun) 4410 { 4411 struct ctl_lun *nlun, *lun; 4412 struct scsi_vpd_id_descriptor *desc; 4413 struct scsi_vpd_id_t10 *t10id; 4414 const char *eui, *naa, *scsiname, *uuid, *vendor, *value; 4415 int lun_number, i, lun_malloced; 4416 int devidlen, idlen1, idlen2 = 0, len; 4417 4418 if (be_lun == NULL) 4419 return (EINVAL); 4420 4421 /* 4422 * We currently only support Direct Access or Processor LUN types. 4423 */ 4424 switch (be_lun->lun_type) { 4425 case T_DIRECT: 4426 case T_PROCESSOR: 4427 case T_CDROM: 4428 break; 4429 case T_SEQUENTIAL: 4430 case T_CHANGER: 4431 default: 4432 be_lun->lun_config_status(be_lun->be_lun, 4433 CTL_LUN_CONFIG_FAILURE); 4434 break; 4435 } 4436 if (ctl_lun == NULL) { 4437 lun = malloc(sizeof(*lun), M_CTL, M_WAITOK); 4438 lun_malloced = 1; 4439 } else { 4440 lun_malloced = 0; 4441 lun = ctl_lun; 4442 } 4443 4444 memset(lun, 0, sizeof(*lun)); 4445 if (lun_malloced) 4446 lun->flags = CTL_LUN_MALLOCED; 4447 4448 /* Generate LUN ID. */ 4449 devidlen = max(CTL_DEVID_MIN_LEN, 4450 strnlen(be_lun->device_id, CTL_DEVID_LEN)); 4451 idlen1 = sizeof(*t10id) + devidlen; 4452 len = sizeof(struct scsi_vpd_id_descriptor) + idlen1; 4453 scsiname = ctl_get_opt(&be_lun->options, "scsiname"); 4454 if (scsiname != NULL) { 4455 idlen2 = roundup2(strlen(scsiname) + 1, 4); 4456 len += sizeof(struct scsi_vpd_id_descriptor) + idlen2; 4457 } 4458 eui = ctl_get_opt(&be_lun->options, "eui"); 4459 if (eui != NULL) { 4460 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4461 } 4462 naa = ctl_get_opt(&be_lun->options, "naa"); 4463 if (naa != NULL) { 4464 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4465 } 4466 uuid = ctl_get_opt(&be_lun->options, "uuid"); 4467 if (uuid != NULL) { 4468 len += sizeof(struct scsi_vpd_id_descriptor) + 18; 4469 } 4470 lun->lun_devid = malloc(sizeof(struct ctl_devid) + len, 4471 M_CTL, M_WAITOK | M_ZERO); 4472 desc = (struct scsi_vpd_id_descriptor *)lun->lun_devid->data; 4473 desc->proto_codeset = SVPD_ID_CODESET_ASCII; 4474 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10; 4475 desc->length = idlen1; 4476 t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0]; 4477 memset(t10id->vendor, ' ', sizeof(t10id->vendor)); 4478 if ((vendor = ctl_get_opt(&be_lun->options, "vendor")) == NULL) { 4479 strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor)); 4480 } else { 4481 strncpy(t10id->vendor, vendor, 4482 min(sizeof(t10id->vendor), strlen(vendor))); 4483 } 4484 strncpy((char *)t10id->vendor_spec_id, 4485 (char *)be_lun->device_id, devidlen); 4486 if (scsiname != NULL) { 4487 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4488 desc->length); 4489 desc->proto_codeset = SVPD_ID_CODESET_UTF8; 4490 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4491 SVPD_ID_TYPE_SCSI_NAME; 4492 desc->length = idlen2; 4493 strlcpy(desc->identifier, scsiname, idlen2); 4494 } 4495 if (eui != NULL) { 4496 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4497 desc->length); 4498 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4499 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4500 SVPD_ID_TYPE_EUI64; 4501 desc->length = hex2bin(eui, desc->identifier, 16); 4502 desc->length = desc->length > 12 ? 16 : 4503 (desc->length > 8 ? 12 : 8); 4504 len -= 16 - desc->length; 4505 } 4506 if (naa != NULL) { 4507 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4508 desc->length); 4509 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4510 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4511 SVPD_ID_TYPE_NAA; 4512 desc->length = hex2bin(naa, desc->identifier, 16); 4513 desc->length = desc->length > 8 ? 16 : 8; 4514 len -= 16 - desc->length; 4515 } 4516 if (uuid != NULL) { 4517 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4518 desc->length); 4519 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4520 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4521 SVPD_ID_TYPE_UUID; 4522 desc->identifier[0] = 0x10; 4523 hex2bin(uuid, &desc->identifier[2], 16); 4524 desc->length = 18; 4525 } 4526 lun->lun_devid->len = len; 4527 4528 mtx_lock(&ctl_softc->ctl_lock); 4529 /* 4530 * See if the caller requested a particular LUN number. If so, see 4531 * if it is available. Otherwise, allocate the first available LUN. 4532 */ 4533 if (be_lun->flags & CTL_LUN_FLAG_ID_REQ) { 4534 if ((be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) 4535 || (ctl_is_set(ctl_softc->ctl_lun_mask, be_lun->req_lun_id))) { 4536 mtx_unlock(&ctl_softc->ctl_lock); 4537 if (be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) { 4538 printf("ctl: requested LUN ID %d is higher " 4539 "than CTL_MAX_LUNS - 1 (%d)\n", 4540 be_lun->req_lun_id, CTL_MAX_LUNS - 1); 4541 } else { 4542 /* 4543 * XXX KDM return an error, or just assign 4544 * another LUN ID in this case?? 4545 */ 4546 printf("ctl: requested LUN ID %d is already " 4547 "in use\n", be_lun->req_lun_id); 4548 } 4549 if (lun->flags & CTL_LUN_MALLOCED) 4550 free(lun, M_CTL); 4551 be_lun->lun_config_status(be_lun->be_lun, 4552 CTL_LUN_CONFIG_FAILURE); 4553 return (ENOSPC); 4554 } 4555 lun_number = be_lun->req_lun_id; 4556 } else { 4557 lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, 0, CTL_MAX_LUNS); 4558 if (lun_number == -1) { 4559 mtx_unlock(&ctl_softc->ctl_lock); 4560 printf("ctl: can't allocate LUN, out of LUNs\n"); 4561 if (lun->flags & CTL_LUN_MALLOCED) 4562 free(lun, M_CTL); 4563 be_lun->lun_config_status(be_lun->be_lun, 4564 CTL_LUN_CONFIG_FAILURE); 4565 return (ENOSPC); 4566 } 4567 } 4568 ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number); 4569 4570 mtx_init(&lun->lun_lock, "CTL LUN", NULL, MTX_DEF); 4571 lun->lun = lun_number; 4572 lun->be_lun = be_lun; 4573 /* 4574 * The processor LUN is always enabled. Disk LUNs come on line 4575 * disabled, and must be enabled by the backend. 4576 */ 4577 lun->flags |= CTL_LUN_DISABLED; 4578 lun->backend = be_lun->be; 4579 be_lun->ctl_lun = lun; 4580 be_lun->lun_id = lun_number; 4581 atomic_add_int(&be_lun->be->num_luns, 1); 4582 if (be_lun->flags & CTL_LUN_FLAG_EJECTED) 4583 lun->flags |= CTL_LUN_EJECTED; 4584 if (be_lun->flags & CTL_LUN_FLAG_NO_MEDIA) 4585 lun->flags |= CTL_LUN_NO_MEDIA; 4586 if (be_lun->flags & CTL_LUN_FLAG_STOPPED) 4587 lun->flags |= CTL_LUN_STOPPED; 4588 4589 if (be_lun->flags & CTL_LUN_FLAG_PRIMARY) 4590 lun->flags |= CTL_LUN_PRIMARY_SC; 4591 4592 value = ctl_get_opt(&be_lun->options, "removable"); 4593 if (value != NULL) { 4594 if (strcmp(value, "on") == 0) 4595 lun->flags |= CTL_LUN_REMOVABLE; 4596 } else if (be_lun->lun_type == T_CDROM) 4597 lun->flags |= CTL_LUN_REMOVABLE; 4598 4599 lun->ctl_softc = ctl_softc; 4600 #ifdef CTL_TIME_IO 4601 lun->last_busy = getsbinuptime(); 4602 #endif 4603 TAILQ_INIT(&lun->ooa_queue); 4604 TAILQ_INIT(&lun->blocked_queue); 4605 STAILQ_INIT(&lun->error_list); 4606 lun->ie_reported = 1; 4607 callout_init_mtx(&lun->ie_callout, &lun->lun_lock, 0); 4608 ctl_tpc_lun_init(lun); 4609 4610 /* 4611 * Initialize the mode and log page index. 4612 */ 4613 ctl_init_page_index(lun); 4614 ctl_init_log_page_index(lun); 4615 4616 /* 4617 * Now, before we insert this lun on the lun list, set the lun 4618 * inventory changed UA for all other luns. 4619 */ 4620 STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) { 4621 mtx_lock(&nlun->lun_lock); 4622 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4623 mtx_unlock(&nlun->lun_lock); 4624 } 4625 4626 STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links); 4627 4628 ctl_softc->ctl_luns[lun_number] = lun; 4629 4630 ctl_softc->num_luns++; 4631 4632 /* Setup statistics gathering */ 4633 lun->stats.device_type = be_lun->lun_type; 4634 lun->stats.lun_number = lun_number; 4635 lun->stats.blocksize = be_lun->blocksize; 4636 if (be_lun->blocksize == 0) 4637 lun->stats.flags = CTL_LUN_STATS_NO_BLOCKSIZE; 4638 for (i = 0;i < CTL_MAX_PORTS;i++) 4639 lun->stats.ports[i].targ_port = i; 4640 4641 mtx_unlock(&ctl_softc->ctl_lock); 4642 4643 lun->be_lun->lun_config_status(lun->be_lun->be_lun, CTL_LUN_CONFIG_OK); 4644 return (0); 4645 } 4646 4647 /* 4648 * Delete a LUN. 4649 * Assumptions: 4650 * - LUN has already been marked invalid and any pending I/O has been taken 4651 * care of. 4652 */ 4653 static int 4654 ctl_free_lun(struct ctl_lun *lun) 4655 { 4656 struct ctl_softc *softc; 4657 struct ctl_lun *nlun; 4658 int i; 4659 4660 softc = lun->ctl_softc; 4661 4662 mtx_assert(&softc->ctl_lock, MA_OWNED); 4663 4664 STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links); 4665 4666 ctl_clear_mask(softc->ctl_lun_mask, lun->lun); 4667 4668 softc->ctl_luns[lun->lun] = NULL; 4669 4670 if (!TAILQ_EMPTY(&lun->ooa_queue)) 4671 panic("Freeing a LUN %p with outstanding I/O!!\n", lun); 4672 4673 softc->num_luns--; 4674 4675 /* 4676 * Tell the backend to free resources, if this LUN has a backend. 4677 */ 4678 atomic_subtract_int(&lun->be_lun->be->num_luns, 1); 4679 lun->be_lun->lun_shutdown(lun->be_lun->be_lun); 4680 4681 lun->ie_reportcnt = UINT32_MAX; 4682 callout_drain(&lun->ie_callout); 4683 4684 ctl_tpc_lun_shutdown(lun); 4685 mtx_destroy(&lun->lun_lock); 4686 free(lun->lun_devid, M_CTL); 4687 for (i = 0; i < CTL_MAX_PORTS; i++) 4688 free(lun->pending_ua[i], M_CTL); 4689 for (i = 0; i < CTL_MAX_PORTS; i++) 4690 free(lun->pr_keys[i], M_CTL); 4691 free(lun->write_buffer, M_CTL); 4692 if (lun->flags & CTL_LUN_MALLOCED) 4693 free(lun, M_CTL); 4694 4695 STAILQ_FOREACH(nlun, &softc->lun_list, links) { 4696 mtx_lock(&nlun->lun_lock); 4697 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4698 mtx_unlock(&nlun->lun_lock); 4699 } 4700 4701 return (0); 4702 } 4703 4704 static void 4705 ctl_create_lun(struct ctl_be_lun *be_lun) 4706 { 4707 4708 /* 4709 * ctl_alloc_lun() should handle all potential failure cases. 4710 */ 4711 ctl_alloc_lun(control_softc, NULL, be_lun); 4712 } 4713 4714 int 4715 ctl_add_lun(struct ctl_be_lun *be_lun) 4716 { 4717 struct ctl_softc *softc = control_softc; 4718 4719 mtx_lock(&softc->ctl_lock); 4720 STAILQ_INSERT_TAIL(&softc->pending_lun_queue, be_lun, links); 4721 mtx_unlock(&softc->ctl_lock); 4722 wakeup(&softc->pending_lun_queue); 4723 4724 return (0); 4725 } 4726 4727 int 4728 ctl_enable_lun(struct ctl_be_lun *be_lun) 4729 { 4730 struct ctl_softc *softc; 4731 struct ctl_port *port, *nport; 4732 struct ctl_lun *lun; 4733 int retval; 4734 4735 lun = (struct ctl_lun *)be_lun->ctl_lun; 4736 softc = lun->ctl_softc; 4737 4738 mtx_lock(&softc->ctl_lock); 4739 mtx_lock(&lun->lun_lock); 4740 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 4741 /* 4742 * eh? Why did we get called if the LUN is already 4743 * enabled? 4744 */ 4745 mtx_unlock(&lun->lun_lock); 4746 mtx_unlock(&softc->ctl_lock); 4747 return (0); 4748 } 4749 lun->flags &= ~CTL_LUN_DISABLED; 4750 mtx_unlock(&lun->lun_lock); 4751 4752 STAILQ_FOREACH_SAFE(port, &softc->port_list, links, nport) { 4753 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || 4754 port->lun_map != NULL || port->lun_enable == NULL) 4755 continue; 4756 4757 /* 4758 * Drop the lock while we call the FETD's enable routine. 4759 * This can lead to a callback into CTL (at least in the 4760 * case of the internal initiator frontend. 4761 */ 4762 mtx_unlock(&softc->ctl_lock); 4763 retval = port->lun_enable(port->targ_lun_arg, lun->lun); 4764 mtx_lock(&softc->ctl_lock); 4765 if (retval != 0) { 4766 printf("%s: FETD %s port %d returned error " 4767 "%d for lun_enable on lun %jd\n", 4768 __func__, port->port_name, port->targ_port, 4769 retval, (intmax_t)lun->lun); 4770 } 4771 } 4772 4773 mtx_unlock(&softc->ctl_lock); 4774 ctl_isc_announce_lun(lun); 4775 4776 return (0); 4777 } 4778 4779 int 4780 ctl_disable_lun(struct ctl_be_lun *be_lun) 4781 { 4782 struct ctl_softc *softc; 4783 struct ctl_port *port; 4784 struct ctl_lun *lun; 4785 int retval; 4786 4787 lun = (struct ctl_lun *)be_lun->ctl_lun; 4788 softc = lun->ctl_softc; 4789 4790 mtx_lock(&softc->ctl_lock); 4791 mtx_lock(&lun->lun_lock); 4792 if (lun->flags & CTL_LUN_DISABLED) { 4793 mtx_unlock(&lun->lun_lock); 4794 mtx_unlock(&softc->ctl_lock); 4795 return (0); 4796 } 4797 lun->flags |= CTL_LUN_DISABLED; 4798 mtx_unlock(&lun->lun_lock); 4799 4800 STAILQ_FOREACH(port, &softc->port_list, links) { 4801 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || 4802 port->lun_map != NULL || port->lun_disable == NULL) 4803 continue; 4804 4805 /* 4806 * Drop the lock before we call the frontend's disable 4807 * routine, to avoid lock order reversals. 4808 * 4809 * XXX KDM what happens if the frontend list changes while 4810 * we're traversing it? It's unlikely, but should be handled. 4811 */ 4812 mtx_unlock(&softc->ctl_lock); 4813 retval = port->lun_disable(port->targ_lun_arg, lun->lun); 4814 mtx_lock(&softc->ctl_lock); 4815 if (retval != 0) { 4816 printf("%s: FETD %s port %d returned error " 4817 "%d for lun_disable on lun %jd\n", 4818 __func__, port->port_name, port->targ_port, 4819 retval, (intmax_t)lun->lun); 4820 } 4821 } 4822 4823 mtx_unlock(&softc->ctl_lock); 4824 ctl_isc_announce_lun(lun); 4825 4826 return (0); 4827 } 4828 4829 int 4830 ctl_start_lun(struct ctl_be_lun *be_lun) 4831 { 4832 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4833 4834 mtx_lock(&lun->lun_lock); 4835 lun->flags &= ~CTL_LUN_STOPPED; 4836 mtx_unlock(&lun->lun_lock); 4837 return (0); 4838 } 4839 4840 int 4841 ctl_stop_lun(struct ctl_be_lun *be_lun) 4842 { 4843 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4844 4845 mtx_lock(&lun->lun_lock); 4846 lun->flags |= CTL_LUN_STOPPED; 4847 mtx_unlock(&lun->lun_lock); 4848 return (0); 4849 } 4850 4851 int 4852 ctl_lun_no_media(struct ctl_be_lun *be_lun) 4853 { 4854 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4855 4856 mtx_lock(&lun->lun_lock); 4857 lun->flags |= CTL_LUN_NO_MEDIA; 4858 mtx_unlock(&lun->lun_lock); 4859 return (0); 4860 } 4861 4862 int 4863 ctl_lun_has_media(struct ctl_be_lun *be_lun) 4864 { 4865 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4866 union ctl_ha_msg msg; 4867 4868 mtx_lock(&lun->lun_lock); 4869 lun->flags &= ~(CTL_LUN_NO_MEDIA | CTL_LUN_EJECTED); 4870 if (lun->flags & CTL_LUN_REMOVABLE) 4871 ctl_est_ua_all(lun, -1, CTL_UA_MEDIUM_CHANGE); 4872 mtx_unlock(&lun->lun_lock); 4873 if ((lun->flags & CTL_LUN_REMOVABLE) && 4874 lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 4875 bzero(&msg.ua, sizeof(msg.ua)); 4876 msg.hdr.msg_type = CTL_MSG_UA; 4877 msg.hdr.nexus.initid = -1; 4878 msg.hdr.nexus.targ_port = -1; 4879 msg.hdr.nexus.targ_lun = lun->lun; 4880 msg.hdr.nexus.targ_mapped_lun = lun->lun; 4881 msg.ua.ua_all = 1; 4882 msg.ua.ua_set = 1; 4883 msg.ua.ua_type = CTL_UA_MEDIUM_CHANGE; 4884 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), 4885 M_WAITOK); 4886 } 4887 return (0); 4888 } 4889 4890 int 4891 ctl_lun_ejected(struct ctl_be_lun *be_lun) 4892 { 4893 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4894 4895 mtx_lock(&lun->lun_lock); 4896 lun->flags |= CTL_LUN_EJECTED; 4897 mtx_unlock(&lun->lun_lock); 4898 return (0); 4899 } 4900 4901 int 4902 ctl_lun_primary(struct ctl_be_lun *be_lun) 4903 { 4904 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4905 4906 mtx_lock(&lun->lun_lock); 4907 lun->flags |= CTL_LUN_PRIMARY_SC; 4908 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 4909 mtx_unlock(&lun->lun_lock); 4910 ctl_isc_announce_lun(lun); 4911 return (0); 4912 } 4913 4914 int 4915 ctl_lun_secondary(struct ctl_be_lun *be_lun) 4916 { 4917 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4918 4919 mtx_lock(&lun->lun_lock); 4920 lun->flags &= ~CTL_LUN_PRIMARY_SC; 4921 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 4922 mtx_unlock(&lun->lun_lock); 4923 ctl_isc_announce_lun(lun); 4924 return (0); 4925 } 4926 4927 int 4928 ctl_invalidate_lun(struct ctl_be_lun *be_lun) 4929 { 4930 struct ctl_softc *softc; 4931 struct ctl_lun *lun; 4932 4933 lun = (struct ctl_lun *)be_lun->ctl_lun; 4934 softc = lun->ctl_softc; 4935 4936 mtx_lock(&lun->lun_lock); 4937 4938 /* 4939 * The LUN needs to be disabled before it can be marked invalid. 4940 */ 4941 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 4942 mtx_unlock(&lun->lun_lock); 4943 return (-1); 4944 } 4945 /* 4946 * Mark the LUN invalid. 4947 */ 4948 lun->flags |= CTL_LUN_INVALID; 4949 4950 /* 4951 * If there is nothing in the OOA queue, go ahead and free the LUN. 4952 * If we have something in the OOA queue, we'll free it when the 4953 * last I/O completes. 4954 */ 4955 if (TAILQ_EMPTY(&lun->ooa_queue)) { 4956 mtx_unlock(&lun->lun_lock); 4957 mtx_lock(&softc->ctl_lock); 4958 ctl_free_lun(lun); 4959 mtx_unlock(&softc->ctl_lock); 4960 } else 4961 mtx_unlock(&lun->lun_lock); 4962 4963 return (0); 4964 } 4965 4966 void 4967 ctl_lun_capacity_changed(struct ctl_be_lun *be_lun) 4968 { 4969 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4970 union ctl_ha_msg msg; 4971 4972 mtx_lock(&lun->lun_lock); 4973 ctl_est_ua_all(lun, -1, CTL_UA_CAPACITY_CHANGE); 4974 mtx_unlock(&lun->lun_lock); 4975 if (lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 4976 /* Send msg to other side. */ 4977 bzero(&msg.ua, sizeof(msg.ua)); 4978 msg.hdr.msg_type = CTL_MSG_UA; 4979 msg.hdr.nexus.initid = -1; 4980 msg.hdr.nexus.targ_port = -1; 4981 msg.hdr.nexus.targ_lun = lun->lun; 4982 msg.hdr.nexus.targ_mapped_lun = lun->lun; 4983 msg.ua.ua_all = 1; 4984 msg.ua.ua_set = 1; 4985 msg.ua.ua_type = CTL_UA_CAPACITY_CHANGE; 4986 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), 4987 M_WAITOK); 4988 } 4989 } 4990 4991 /* 4992 * Backend "memory move is complete" callback for requests that never 4993 * make it down to say RAIDCore's configuration code. 4994 */ 4995 int 4996 ctl_config_move_done(union ctl_io *io) 4997 { 4998 int retval; 4999 5000 CTL_DEBUG_PRINT(("ctl_config_move_done\n")); 5001 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, 5002 ("Config I/O type isn't CTL_IO_SCSI (%d)!", io->io_hdr.io_type)); 5003 5004 if ((io->io_hdr.port_status != 0) && 5005 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5006 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5007 /* 5008 * For hardware error sense keys, the sense key 5009 * specific value is defined to be a retry count, 5010 * but we use it to pass back an internal FETD 5011 * error code. XXX KDM Hopefully the FETD is only 5012 * using 16 bits for an error code, since that's 5013 * all the space we have in the sks field. 5014 */ 5015 ctl_set_internal_failure(&io->scsiio, 5016 /*sks_valid*/ 1, 5017 /*retry_count*/ 5018 io->io_hdr.port_status); 5019 } 5020 5021 if (ctl_debug & CTL_DEBUG_CDB_DATA) 5022 ctl_data_print(io); 5023 if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) || 5024 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 5025 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) || 5026 ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) { 5027 /* 5028 * XXX KDM just assuming a single pointer here, and not a 5029 * S/G list. If we start using S/G lists for config data, 5030 * we'll need to know how to clean them up here as well. 5031 */ 5032 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5033 free(io->scsiio.kern_data_ptr, M_CTL); 5034 ctl_done(io); 5035 retval = CTL_RETVAL_COMPLETE; 5036 } else { 5037 /* 5038 * XXX KDM now we need to continue data movement. Some 5039 * options: 5040 * - call ctl_scsiio() again? We don't do this for data 5041 * writes, because for those at least we know ahead of 5042 * time where the write will go and how long it is. For 5043 * config writes, though, that information is largely 5044 * contained within the write itself, thus we need to 5045 * parse out the data again. 5046 * 5047 * - Call some other function once the data is in? 5048 */ 5049 5050 /* 5051 * XXX KDM call ctl_scsiio() again for now, and check flag 5052 * bits to see whether we're allocated or not. 5053 */ 5054 retval = ctl_scsiio(&io->scsiio); 5055 } 5056 return (retval); 5057 } 5058 5059 /* 5060 * This gets called by a backend driver when it is done with a 5061 * data_submit method. 5062 */ 5063 void 5064 ctl_data_submit_done(union ctl_io *io) 5065 { 5066 /* 5067 * If the IO_CONT flag is set, we need to call the supplied 5068 * function to continue processing the I/O, instead of completing 5069 * the I/O just yet. 5070 * 5071 * If there is an error, though, we don't want to keep processing. 5072 * Instead, just send status back to the initiator. 5073 */ 5074 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 5075 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 5076 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5077 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5078 io->scsiio.io_cont(io); 5079 return; 5080 } 5081 ctl_done(io); 5082 } 5083 5084 /* 5085 * This gets called by a backend driver when it is done with a 5086 * configuration write. 5087 */ 5088 void 5089 ctl_config_write_done(union ctl_io *io) 5090 { 5091 uint8_t *buf; 5092 5093 /* 5094 * If the IO_CONT flag is set, we need to call the supplied 5095 * function to continue processing the I/O, instead of completing 5096 * the I/O just yet. 5097 * 5098 * If there is an error, though, we don't want to keep processing. 5099 * Instead, just send status back to the initiator. 5100 */ 5101 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 5102 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 5103 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5104 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5105 io->scsiio.io_cont(io); 5106 return; 5107 } 5108 /* 5109 * Since a configuration write can be done for commands that actually 5110 * have data allocated, like write buffer, and commands that have 5111 * no data, like start/stop unit, we need to check here. 5112 */ 5113 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5114 buf = io->scsiio.kern_data_ptr; 5115 else 5116 buf = NULL; 5117 ctl_done(io); 5118 if (buf) 5119 free(buf, M_CTL); 5120 } 5121 5122 void 5123 ctl_config_read_done(union ctl_io *io) 5124 { 5125 uint8_t *buf; 5126 5127 /* 5128 * If there is some error -- we are done, skip data transfer. 5129 */ 5130 if ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0 || 5131 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 5132 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { 5133 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5134 buf = io->scsiio.kern_data_ptr; 5135 else 5136 buf = NULL; 5137 ctl_done(io); 5138 if (buf) 5139 free(buf, M_CTL); 5140 return; 5141 } 5142 5143 /* 5144 * If the IO_CONT flag is set, we need to call the supplied 5145 * function to continue processing the I/O, instead of completing 5146 * the I/O just yet. 5147 */ 5148 if (io->io_hdr.flags & CTL_FLAG_IO_CONT) { 5149 io->scsiio.io_cont(io); 5150 return; 5151 } 5152 5153 ctl_datamove(io); 5154 } 5155 5156 /* 5157 * SCSI release command. 5158 */ 5159 int 5160 ctl_scsi_release(struct ctl_scsiio *ctsio) 5161 { 5162 struct ctl_lun *lun; 5163 uint32_t residx; 5164 5165 CTL_DEBUG_PRINT(("ctl_scsi_release\n")); 5166 5167 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5168 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5169 5170 /* 5171 * XXX KDM right now, we only support LUN reservation. We don't 5172 * support 3rd party reservations, or extent reservations, which 5173 * might actually need the parameter list. If we've gotten this 5174 * far, we've got a LUN reservation. Anything else got kicked out 5175 * above. So, according to SPC, ignore the length. 5176 */ 5177 5178 mtx_lock(&lun->lun_lock); 5179 5180 /* 5181 * According to SPC, it is not an error for an intiator to attempt 5182 * to release a reservation on a LUN that isn't reserved, or that 5183 * is reserved by another initiator. The reservation can only be 5184 * released, though, by the initiator who made it or by one of 5185 * several reset type events. 5186 */ 5187 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx)) 5188 lun->flags &= ~CTL_LUN_RESERVED; 5189 5190 mtx_unlock(&lun->lun_lock); 5191 5192 ctl_set_success(ctsio); 5193 ctl_done((union ctl_io *)ctsio); 5194 return (CTL_RETVAL_COMPLETE); 5195 } 5196 5197 int 5198 ctl_scsi_reserve(struct ctl_scsiio *ctsio) 5199 { 5200 struct ctl_lun *lun; 5201 uint32_t residx; 5202 5203 CTL_DEBUG_PRINT(("ctl_reserve\n")); 5204 5205 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5206 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5207 5208 /* 5209 * XXX KDM right now, we only support LUN reservation. We don't 5210 * support 3rd party reservations, or extent reservations, which 5211 * might actually need the parameter list. If we've gotten this 5212 * far, we've got a LUN reservation. Anything else got kicked out 5213 * above. So, according to SPC, ignore the length. 5214 */ 5215 5216 mtx_lock(&lun->lun_lock); 5217 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx != residx)) { 5218 ctl_set_reservation_conflict(ctsio); 5219 goto bailout; 5220 } 5221 5222 /* SPC-3 exceptions to SPC-2 RESERVE and RELEASE behavior. */ 5223 if (lun->flags & CTL_LUN_PR_RESERVED) { 5224 ctl_set_success(ctsio); 5225 goto bailout; 5226 } 5227 5228 lun->flags |= CTL_LUN_RESERVED; 5229 lun->res_idx = residx; 5230 ctl_set_success(ctsio); 5231 5232 bailout: 5233 mtx_unlock(&lun->lun_lock); 5234 ctl_done((union ctl_io *)ctsio); 5235 return (CTL_RETVAL_COMPLETE); 5236 } 5237 5238 int 5239 ctl_start_stop(struct ctl_scsiio *ctsio) 5240 { 5241 struct scsi_start_stop_unit *cdb; 5242 struct ctl_lun *lun; 5243 int retval; 5244 5245 CTL_DEBUG_PRINT(("ctl_start_stop\n")); 5246 5247 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5248 cdb = (struct scsi_start_stop_unit *)ctsio->cdb; 5249 5250 if ((cdb->how & SSS_PC_MASK) == 0) { 5251 if ((lun->flags & CTL_LUN_PR_RESERVED) && 5252 (cdb->how & SSS_START) == 0) { 5253 uint32_t residx; 5254 5255 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5256 if (ctl_get_prkey(lun, residx) == 0 || 5257 (lun->pr_res_idx != residx && lun->pr_res_type < 4)) { 5258 5259 ctl_set_reservation_conflict(ctsio); 5260 ctl_done((union ctl_io *)ctsio); 5261 return (CTL_RETVAL_COMPLETE); 5262 } 5263 } 5264 5265 if ((cdb->how & SSS_LOEJ) && 5266 (lun->flags & CTL_LUN_REMOVABLE) == 0) { 5267 ctl_set_invalid_field(ctsio, 5268 /*sks_valid*/ 1, 5269 /*command*/ 1, 5270 /*field*/ 4, 5271 /*bit_valid*/ 1, 5272 /*bit*/ 1); 5273 ctl_done((union ctl_io *)ctsio); 5274 return (CTL_RETVAL_COMPLETE); 5275 } 5276 5277 if ((cdb->how & SSS_START) == 0 && (cdb->how & SSS_LOEJ) && 5278 lun->prevent_count > 0) { 5279 /* "Medium removal prevented" */ 5280 ctl_set_sense(ctsio, /*current_error*/ 1, 5281 /*sense_key*/(lun->flags & CTL_LUN_NO_MEDIA) ? 5282 SSD_KEY_NOT_READY : SSD_KEY_ILLEGAL_REQUEST, 5283 /*asc*/ 0x53, /*ascq*/ 0x02, SSD_ELEM_NONE); 5284 ctl_done((union ctl_io *)ctsio); 5285 return (CTL_RETVAL_COMPLETE); 5286 } 5287 } 5288 5289 retval = lun->backend->config_write((union ctl_io *)ctsio); 5290 return (retval); 5291 } 5292 5293 int 5294 ctl_prevent_allow(struct ctl_scsiio *ctsio) 5295 { 5296 struct ctl_lun *lun; 5297 struct scsi_prevent *cdb; 5298 int retval; 5299 uint32_t initidx; 5300 5301 CTL_DEBUG_PRINT(("ctl_prevent_allow\n")); 5302 5303 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5304 cdb = (struct scsi_prevent *)ctsio->cdb; 5305 5306 if ((lun->flags & CTL_LUN_REMOVABLE) == 0) { 5307 ctl_set_invalid_opcode(ctsio); 5308 ctl_done((union ctl_io *)ctsio); 5309 return (CTL_RETVAL_COMPLETE); 5310 } 5311 5312 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5313 mtx_lock(&lun->lun_lock); 5314 if ((cdb->how & PR_PREVENT) && 5315 ctl_is_set(lun->prevent, initidx) == 0) { 5316 ctl_set_mask(lun->prevent, initidx); 5317 lun->prevent_count++; 5318 } else if ((cdb->how & PR_PREVENT) == 0 && 5319 ctl_is_set(lun->prevent, initidx)) { 5320 ctl_clear_mask(lun->prevent, initidx); 5321 lun->prevent_count--; 5322 } 5323 mtx_unlock(&lun->lun_lock); 5324 retval = lun->backend->config_write((union ctl_io *)ctsio); 5325 return (retval); 5326 } 5327 5328 /* 5329 * We support the SYNCHRONIZE CACHE command (10 and 16 byte versions), but 5330 * we don't really do anything with the LBA and length fields if the user 5331 * passes them in. Instead we'll just flush out the cache for the entire 5332 * LUN. 5333 */ 5334 int 5335 ctl_sync_cache(struct ctl_scsiio *ctsio) 5336 { 5337 struct ctl_lun *lun; 5338 struct ctl_softc *softc; 5339 struct ctl_lba_len_flags *lbalen; 5340 uint64_t starting_lba; 5341 uint32_t block_count; 5342 int retval; 5343 uint8_t byte2; 5344 5345 CTL_DEBUG_PRINT(("ctl_sync_cache\n")); 5346 5347 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5348 softc = lun->ctl_softc; 5349 retval = 0; 5350 5351 switch (ctsio->cdb[0]) { 5352 case SYNCHRONIZE_CACHE: { 5353 struct scsi_sync_cache *cdb; 5354 cdb = (struct scsi_sync_cache *)ctsio->cdb; 5355 5356 starting_lba = scsi_4btoul(cdb->begin_lba); 5357 block_count = scsi_2btoul(cdb->lb_count); 5358 byte2 = cdb->byte2; 5359 break; 5360 } 5361 case SYNCHRONIZE_CACHE_16: { 5362 struct scsi_sync_cache_16 *cdb; 5363 cdb = (struct scsi_sync_cache_16 *)ctsio->cdb; 5364 5365 starting_lba = scsi_8btou64(cdb->begin_lba); 5366 block_count = scsi_4btoul(cdb->lb_count); 5367 byte2 = cdb->byte2; 5368 break; 5369 } 5370 default: 5371 ctl_set_invalid_opcode(ctsio); 5372 ctl_done((union ctl_io *)ctsio); 5373 goto bailout; 5374 break; /* NOTREACHED */ 5375 } 5376 5377 /* 5378 * We check the LBA and length, but don't do anything with them. 5379 * A SYNCHRONIZE CACHE will cause the entire cache for this lun to 5380 * get flushed. This check will just help satisfy anyone who wants 5381 * to see an error for an out of range LBA. 5382 */ 5383 if ((starting_lba + block_count) > (lun->be_lun->maxlba + 1)) { 5384 ctl_set_lba_out_of_range(ctsio, 5385 MAX(starting_lba, lun->be_lun->maxlba + 1)); 5386 ctl_done((union ctl_io *)ctsio); 5387 goto bailout; 5388 } 5389 5390 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5391 lbalen->lba = starting_lba; 5392 lbalen->len = block_count; 5393 lbalen->flags = byte2; 5394 retval = lun->backend->config_write((union ctl_io *)ctsio); 5395 5396 bailout: 5397 return (retval); 5398 } 5399 5400 int 5401 ctl_format(struct ctl_scsiio *ctsio) 5402 { 5403 struct scsi_format *cdb; 5404 struct ctl_lun *lun; 5405 int length, defect_list_len; 5406 5407 CTL_DEBUG_PRINT(("ctl_format\n")); 5408 5409 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5410 5411 cdb = (struct scsi_format *)ctsio->cdb; 5412 5413 length = 0; 5414 if (cdb->byte2 & SF_FMTDATA) { 5415 if (cdb->byte2 & SF_LONGLIST) 5416 length = sizeof(struct scsi_format_header_long); 5417 else 5418 length = sizeof(struct scsi_format_header_short); 5419 } 5420 5421 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5422 && (length > 0)) { 5423 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5424 ctsio->kern_data_len = length; 5425 ctsio->kern_total_len = length; 5426 ctsio->kern_data_resid = 0; 5427 ctsio->kern_rel_offset = 0; 5428 ctsio->kern_sg_entries = 0; 5429 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5430 ctsio->be_move_done = ctl_config_move_done; 5431 ctl_datamove((union ctl_io *)ctsio); 5432 5433 return (CTL_RETVAL_COMPLETE); 5434 } 5435 5436 defect_list_len = 0; 5437 5438 if (cdb->byte2 & SF_FMTDATA) { 5439 if (cdb->byte2 & SF_LONGLIST) { 5440 struct scsi_format_header_long *header; 5441 5442 header = (struct scsi_format_header_long *) 5443 ctsio->kern_data_ptr; 5444 5445 defect_list_len = scsi_4btoul(header->defect_list_len); 5446 if (defect_list_len != 0) { 5447 ctl_set_invalid_field(ctsio, 5448 /*sks_valid*/ 1, 5449 /*command*/ 0, 5450 /*field*/ 2, 5451 /*bit_valid*/ 0, 5452 /*bit*/ 0); 5453 goto bailout; 5454 } 5455 } else { 5456 struct scsi_format_header_short *header; 5457 5458 header = (struct scsi_format_header_short *) 5459 ctsio->kern_data_ptr; 5460 5461 defect_list_len = scsi_2btoul(header->defect_list_len); 5462 if (defect_list_len != 0) { 5463 ctl_set_invalid_field(ctsio, 5464 /*sks_valid*/ 1, 5465 /*command*/ 0, 5466 /*field*/ 2, 5467 /*bit_valid*/ 0, 5468 /*bit*/ 0); 5469 goto bailout; 5470 } 5471 } 5472 } 5473 5474 ctl_set_success(ctsio); 5475 bailout: 5476 5477 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5478 free(ctsio->kern_data_ptr, M_CTL); 5479 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5480 } 5481 5482 ctl_done((union ctl_io *)ctsio); 5483 return (CTL_RETVAL_COMPLETE); 5484 } 5485 5486 int 5487 ctl_read_buffer(struct ctl_scsiio *ctsio) 5488 { 5489 struct ctl_lun *lun; 5490 uint64_t buffer_offset; 5491 uint32_t len; 5492 uint8_t byte2; 5493 static uint8_t descr[4]; 5494 static uint8_t echo_descr[4] = { 0 }; 5495 5496 CTL_DEBUG_PRINT(("ctl_read_buffer\n")); 5497 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5498 switch (ctsio->cdb[0]) { 5499 case READ_BUFFER: { 5500 struct scsi_read_buffer *cdb; 5501 5502 cdb = (struct scsi_read_buffer *)ctsio->cdb; 5503 buffer_offset = scsi_3btoul(cdb->offset); 5504 len = scsi_3btoul(cdb->length); 5505 byte2 = cdb->byte2; 5506 break; 5507 } 5508 case READ_BUFFER_16: { 5509 struct scsi_read_buffer_16 *cdb; 5510 5511 cdb = (struct scsi_read_buffer_16 *)ctsio->cdb; 5512 buffer_offset = scsi_8btou64(cdb->offset); 5513 len = scsi_4btoul(cdb->length); 5514 byte2 = cdb->byte2; 5515 break; 5516 } 5517 default: /* This shouldn't happen. */ 5518 ctl_set_invalid_opcode(ctsio); 5519 ctl_done((union ctl_io *)ctsio); 5520 return (CTL_RETVAL_COMPLETE); 5521 } 5522 5523 if (buffer_offset > CTL_WRITE_BUFFER_SIZE || 5524 buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5525 ctl_set_invalid_field(ctsio, 5526 /*sks_valid*/ 1, 5527 /*command*/ 1, 5528 /*field*/ 6, 5529 /*bit_valid*/ 0, 5530 /*bit*/ 0); 5531 ctl_done((union ctl_io *)ctsio); 5532 return (CTL_RETVAL_COMPLETE); 5533 } 5534 5535 if ((byte2 & RWB_MODE) == RWB_MODE_DESCR) { 5536 descr[0] = 0; 5537 scsi_ulto3b(CTL_WRITE_BUFFER_SIZE, &descr[1]); 5538 ctsio->kern_data_ptr = descr; 5539 len = min(len, sizeof(descr)); 5540 } else if ((byte2 & RWB_MODE) == RWB_MODE_ECHO_DESCR) { 5541 ctsio->kern_data_ptr = echo_descr; 5542 len = min(len, sizeof(echo_descr)); 5543 } else { 5544 if (lun->write_buffer == NULL) { 5545 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5546 M_CTL, M_WAITOK); 5547 } 5548 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5549 } 5550 ctsio->kern_data_len = len; 5551 ctsio->kern_total_len = len; 5552 ctsio->kern_data_resid = 0; 5553 ctsio->kern_rel_offset = 0; 5554 ctsio->kern_sg_entries = 0; 5555 ctl_set_success(ctsio); 5556 ctsio->be_move_done = ctl_config_move_done; 5557 ctl_datamove((union ctl_io *)ctsio); 5558 return (CTL_RETVAL_COMPLETE); 5559 } 5560 5561 int 5562 ctl_write_buffer(struct ctl_scsiio *ctsio) 5563 { 5564 struct scsi_write_buffer *cdb; 5565 struct ctl_lun *lun; 5566 int buffer_offset, len; 5567 5568 CTL_DEBUG_PRINT(("ctl_write_buffer\n")); 5569 5570 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5571 cdb = (struct scsi_write_buffer *)ctsio->cdb; 5572 5573 len = scsi_3btoul(cdb->length); 5574 buffer_offset = scsi_3btoul(cdb->offset); 5575 5576 if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5577 ctl_set_invalid_field(ctsio, 5578 /*sks_valid*/ 1, 5579 /*command*/ 1, 5580 /*field*/ 6, 5581 /*bit_valid*/ 0, 5582 /*bit*/ 0); 5583 ctl_done((union ctl_io *)ctsio); 5584 return (CTL_RETVAL_COMPLETE); 5585 } 5586 5587 /* 5588 * If we've got a kernel request that hasn't been malloced yet, 5589 * malloc it and tell the caller the data buffer is here. 5590 */ 5591 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5592 if (lun->write_buffer == NULL) { 5593 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5594 M_CTL, M_WAITOK); 5595 } 5596 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5597 ctsio->kern_data_len = len; 5598 ctsio->kern_total_len = len; 5599 ctsio->kern_data_resid = 0; 5600 ctsio->kern_rel_offset = 0; 5601 ctsio->kern_sg_entries = 0; 5602 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5603 ctsio->be_move_done = ctl_config_move_done; 5604 ctl_datamove((union ctl_io *)ctsio); 5605 5606 return (CTL_RETVAL_COMPLETE); 5607 } 5608 5609 ctl_set_success(ctsio); 5610 ctl_done((union ctl_io *)ctsio); 5611 return (CTL_RETVAL_COMPLETE); 5612 } 5613 5614 int 5615 ctl_write_same(struct ctl_scsiio *ctsio) 5616 { 5617 struct ctl_lun *lun; 5618 struct ctl_lba_len_flags *lbalen; 5619 uint64_t lba; 5620 uint32_t num_blocks; 5621 int len, retval; 5622 uint8_t byte2; 5623 5624 CTL_DEBUG_PRINT(("ctl_write_same\n")); 5625 5626 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5627 5628 switch (ctsio->cdb[0]) { 5629 case WRITE_SAME_10: { 5630 struct scsi_write_same_10 *cdb; 5631 5632 cdb = (struct scsi_write_same_10 *)ctsio->cdb; 5633 5634 lba = scsi_4btoul(cdb->addr); 5635 num_blocks = scsi_2btoul(cdb->length); 5636 byte2 = cdb->byte2; 5637 break; 5638 } 5639 case WRITE_SAME_16: { 5640 struct scsi_write_same_16 *cdb; 5641 5642 cdb = (struct scsi_write_same_16 *)ctsio->cdb; 5643 5644 lba = scsi_8btou64(cdb->addr); 5645 num_blocks = scsi_4btoul(cdb->length); 5646 byte2 = cdb->byte2; 5647 break; 5648 } 5649 default: 5650 /* 5651 * We got a command we don't support. This shouldn't 5652 * happen, commands should be filtered out above us. 5653 */ 5654 ctl_set_invalid_opcode(ctsio); 5655 ctl_done((union ctl_io *)ctsio); 5656 5657 return (CTL_RETVAL_COMPLETE); 5658 break; /* NOTREACHED */ 5659 } 5660 5661 /* ANCHOR flag can be used only together with UNMAP */ 5662 if ((byte2 & SWS_UNMAP) == 0 && (byte2 & SWS_ANCHOR) != 0) { 5663 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 5664 /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); 5665 ctl_done((union ctl_io *)ctsio); 5666 return (CTL_RETVAL_COMPLETE); 5667 } 5668 5669 /* 5670 * The first check is to make sure we're in bounds, the second 5671 * check is to catch wrap-around problems. If the lba + num blocks 5672 * is less than the lba, then we've wrapped around and the block 5673 * range is invalid anyway. 5674 */ 5675 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5676 || ((lba + num_blocks) < lba)) { 5677 ctl_set_lba_out_of_range(ctsio, 5678 MAX(lba, lun->be_lun->maxlba + 1)); 5679 ctl_done((union ctl_io *)ctsio); 5680 return (CTL_RETVAL_COMPLETE); 5681 } 5682 5683 /* Zero number of blocks means "to the last logical block" */ 5684 if (num_blocks == 0) { 5685 if ((lun->be_lun->maxlba + 1) - lba > UINT32_MAX) { 5686 ctl_set_invalid_field(ctsio, 5687 /*sks_valid*/ 0, 5688 /*command*/ 1, 5689 /*field*/ 0, 5690 /*bit_valid*/ 0, 5691 /*bit*/ 0); 5692 ctl_done((union ctl_io *)ctsio); 5693 return (CTL_RETVAL_COMPLETE); 5694 } 5695 num_blocks = (lun->be_lun->maxlba + 1) - lba; 5696 } 5697 5698 len = lun->be_lun->blocksize; 5699 5700 /* 5701 * If we've got a kernel request that hasn't been malloced yet, 5702 * malloc it and tell the caller the data buffer is here. 5703 */ 5704 if ((byte2 & SWS_NDOB) == 0 && 5705 (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5706 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 5707 ctsio->kern_data_len = len; 5708 ctsio->kern_total_len = len; 5709 ctsio->kern_data_resid = 0; 5710 ctsio->kern_rel_offset = 0; 5711 ctsio->kern_sg_entries = 0; 5712 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5713 ctsio->be_move_done = ctl_config_move_done; 5714 ctl_datamove((union ctl_io *)ctsio); 5715 5716 return (CTL_RETVAL_COMPLETE); 5717 } 5718 5719 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5720 lbalen->lba = lba; 5721 lbalen->len = num_blocks; 5722 lbalen->flags = byte2; 5723 retval = lun->backend->config_write((union ctl_io *)ctsio); 5724 5725 return (retval); 5726 } 5727 5728 int 5729 ctl_unmap(struct ctl_scsiio *ctsio) 5730 { 5731 struct ctl_lun *lun; 5732 struct scsi_unmap *cdb; 5733 struct ctl_ptr_len_flags *ptrlen; 5734 struct scsi_unmap_header *hdr; 5735 struct scsi_unmap_desc *buf, *end, *endnz, *range; 5736 uint64_t lba; 5737 uint32_t num_blocks; 5738 int len, retval; 5739 uint8_t byte2; 5740 5741 CTL_DEBUG_PRINT(("ctl_unmap\n")); 5742 5743 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5744 cdb = (struct scsi_unmap *)ctsio->cdb; 5745 5746 len = scsi_2btoul(cdb->length); 5747 byte2 = cdb->byte2; 5748 5749 /* 5750 * If we've got a kernel request that hasn't been malloced yet, 5751 * malloc it and tell the caller the data buffer is here. 5752 */ 5753 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5754 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 5755 ctsio->kern_data_len = len; 5756 ctsio->kern_total_len = len; 5757 ctsio->kern_data_resid = 0; 5758 ctsio->kern_rel_offset = 0; 5759 ctsio->kern_sg_entries = 0; 5760 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5761 ctsio->be_move_done = ctl_config_move_done; 5762 ctl_datamove((union ctl_io *)ctsio); 5763 5764 return (CTL_RETVAL_COMPLETE); 5765 } 5766 5767 len = ctsio->kern_total_len - ctsio->kern_data_resid; 5768 hdr = (struct scsi_unmap_header *)ctsio->kern_data_ptr; 5769 if (len < sizeof (*hdr) || 5770 len < (scsi_2btoul(hdr->length) + sizeof(hdr->length)) || 5771 len < (scsi_2btoul(hdr->desc_length) + sizeof (*hdr)) || 5772 scsi_2btoul(hdr->desc_length) % sizeof(*buf) != 0) { 5773 ctl_set_invalid_field(ctsio, 5774 /*sks_valid*/ 0, 5775 /*command*/ 0, 5776 /*field*/ 0, 5777 /*bit_valid*/ 0, 5778 /*bit*/ 0); 5779 goto done; 5780 } 5781 len = scsi_2btoul(hdr->desc_length); 5782 buf = (struct scsi_unmap_desc *)(hdr + 1); 5783 end = buf + len / sizeof(*buf); 5784 5785 endnz = buf; 5786 for (range = buf; range < end; range++) { 5787 lba = scsi_8btou64(range->lba); 5788 num_blocks = scsi_4btoul(range->length); 5789 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5790 || ((lba + num_blocks) < lba)) { 5791 ctl_set_lba_out_of_range(ctsio, 5792 MAX(lba, lun->be_lun->maxlba + 1)); 5793 ctl_done((union ctl_io *)ctsio); 5794 return (CTL_RETVAL_COMPLETE); 5795 } 5796 if (num_blocks != 0) 5797 endnz = range + 1; 5798 } 5799 5800 /* 5801 * Block backend can not handle zero last range. 5802 * Filter it out and return if there is nothing left. 5803 */ 5804 len = (uint8_t *)endnz - (uint8_t *)buf; 5805 if (len == 0) { 5806 ctl_set_success(ctsio); 5807 goto done; 5808 } 5809 5810 mtx_lock(&lun->lun_lock); 5811 ptrlen = (struct ctl_ptr_len_flags *) 5812 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5813 ptrlen->ptr = (void *)buf; 5814 ptrlen->len = len; 5815 ptrlen->flags = byte2; 5816 ctl_check_blocked(lun); 5817 mtx_unlock(&lun->lun_lock); 5818 5819 retval = lun->backend->config_write((union ctl_io *)ctsio); 5820 return (retval); 5821 5822 done: 5823 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5824 free(ctsio->kern_data_ptr, M_CTL); 5825 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5826 } 5827 ctl_done((union ctl_io *)ctsio); 5828 return (CTL_RETVAL_COMPLETE); 5829 } 5830 5831 int 5832 ctl_default_page_handler(struct ctl_scsiio *ctsio, 5833 struct ctl_page_index *page_index, uint8_t *page_ptr) 5834 { 5835 struct ctl_lun *lun; 5836 uint8_t *current_cp, *saved_cp; 5837 int set_ua; 5838 uint32_t initidx; 5839 5840 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5841 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5842 set_ua = 0; 5843 5844 current_cp = (page_index->page_data + (page_index->page_len * 5845 CTL_PAGE_CURRENT)); 5846 saved_cp = (page_index->page_data + (page_index->page_len * 5847 CTL_PAGE_SAVED)); 5848 5849 mtx_lock(&lun->lun_lock); 5850 if (memcmp(current_cp, page_ptr, page_index->page_len)) { 5851 memcpy(current_cp, page_ptr, page_index->page_len); 5852 memcpy(saved_cp, page_ptr, page_index->page_len); 5853 set_ua = 1; 5854 } 5855 if (set_ua != 0) 5856 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 5857 mtx_unlock(&lun->lun_lock); 5858 if (set_ua) { 5859 ctl_isc_announce_mode(lun, 5860 ctl_get_initindex(&ctsio->io_hdr.nexus), 5861 page_index->page_code, page_index->subpage); 5862 } 5863 return (CTL_RETVAL_COMPLETE); 5864 } 5865 5866 static void 5867 ctl_ie_timer(void *arg) 5868 { 5869 struct ctl_lun *lun = arg; 5870 uint64_t t; 5871 5872 if (lun->ie_asc == 0) 5873 return; 5874 5875 if (lun->MODE_IE.mrie == SIEP_MRIE_UA) 5876 ctl_est_ua_all(lun, -1, CTL_UA_IE); 5877 else 5878 lun->ie_reported = 0; 5879 5880 if (lun->ie_reportcnt < scsi_4btoul(lun->MODE_IE.report_count)) { 5881 lun->ie_reportcnt++; 5882 t = scsi_4btoul(lun->MODE_IE.interval_timer); 5883 if (t == 0 || t == UINT32_MAX) 5884 t = 3000; /* 5 min */ 5885 callout_schedule(&lun->ie_callout, t * hz / 10); 5886 } 5887 } 5888 5889 int 5890 ctl_ie_page_handler(struct ctl_scsiio *ctsio, 5891 struct ctl_page_index *page_index, uint8_t *page_ptr) 5892 { 5893 struct scsi_info_exceptions_page *pg; 5894 struct ctl_lun *lun; 5895 uint64_t t; 5896 5897 (void)ctl_default_page_handler(ctsio, page_index, page_ptr); 5898 5899 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5900 pg = (struct scsi_info_exceptions_page *)page_ptr; 5901 mtx_lock(&lun->lun_lock); 5902 if (pg->info_flags & SIEP_FLAGS_TEST) { 5903 lun->ie_asc = 0x5d; 5904 lun->ie_ascq = 0xff; 5905 if (pg->mrie == SIEP_MRIE_UA) { 5906 ctl_est_ua_all(lun, -1, CTL_UA_IE); 5907 lun->ie_reported = 1; 5908 } else { 5909 ctl_clr_ua_all(lun, -1, CTL_UA_IE); 5910 lun->ie_reported = -1; 5911 } 5912 lun->ie_reportcnt = 1; 5913 if (lun->ie_reportcnt < scsi_4btoul(pg->report_count)) { 5914 lun->ie_reportcnt++; 5915 t = scsi_4btoul(pg->interval_timer); 5916 if (t == 0 || t == UINT32_MAX) 5917 t = 3000; /* 5 min */ 5918 callout_reset(&lun->ie_callout, t * hz / 10, 5919 ctl_ie_timer, lun); 5920 } 5921 } else { 5922 lun->ie_asc = 0; 5923 lun->ie_ascq = 0; 5924 lun->ie_reported = 1; 5925 ctl_clr_ua_all(lun, -1, CTL_UA_IE); 5926 lun->ie_reportcnt = UINT32_MAX; 5927 callout_stop(&lun->ie_callout); 5928 } 5929 mtx_unlock(&lun->lun_lock); 5930 return (CTL_RETVAL_COMPLETE); 5931 } 5932 5933 static int 5934 ctl_do_mode_select(union ctl_io *io) 5935 { 5936 struct scsi_mode_page_header *page_header; 5937 struct ctl_page_index *page_index; 5938 struct ctl_scsiio *ctsio; 5939 int page_len, page_len_offset, page_len_size; 5940 union ctl_modepage_info *modepage_info; 5941 struct ctl_lun *lun; 5942 uint16_t *len_left, *len_used; 5943 int retval, i; 5944 5945 ctsio = &io->scsiio; 5946 page_index = NULL; 5947 page_len = 0; 5948 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5949 5950 modepage_info = (union ctl_modepage_info *) 5951 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 5952 len_left = &modepage_info->header.len_left; 5953 len_used = &modepage_info->header.len_used; 5954 5955 do_next_page: 5956 5957 page_header = (struct scsi_mode_page_header *) 5958 (ctsio->kern_data_ptr + *len_used); 5959 5960 if (*len_left == 0) { 5961 free(ctsio->kern_data_ptr, M_CTL); 5962 ctl_set_success(ctsio); 5963 ctl_done((union ctl_io *)ctsio); 5964 return (CTL_RETVAL_COMPLETE); 5965 } else if (*len_left < sizeof(struct scsi_mode_page_header)) { 5966 5967 free(ctsio->kern_data_ptr, M_CTL); 5968 ctl_set_param_len_error(ctsio); 5969 ctl_done((union ctl_io *)ctsio); 5970 return (CTL_RETVAL_COMPLETE); 5971 5972 } else if ((page_header->page_code & SMPH_SPF) 5973 && (*len_left < sizeof(struct scsi_mode_page_header_sp))) { 5974 5975 free(ctsio->kern_data_ptr, M_CTL); 5976 ctl_set_param_len_error(ctsio); 5977 ctl_done((union ctl_io *)ctsio); 5978 return (CTL_RETVAL_COMPLETE); 5979 } 5980 5981 5982 /* 5983 * XXX KDM should we do something with the block descriptor? 5984 */ 5985 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 5986 page_index = &lun->mode_pages.index[i]; 5987 if (lun->be_lun->lun_type == T_DIRECT && 5988 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 5989 continue; 5990 if (lun->be_lun->lun_type == T_PROCESSOR && 5991 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 5992 continue; 5993 if (lun->be_lun->lun_type == T_CDROM && 5994 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 5995 continue; 5996 5997 if ((page_index->page_code & SMPH_PC_MASK) != 5998 (page_header->page_code & SMPH_PC_MASK)) 5999 continue; 6000 6001 /* 6002 * If neither page has a subpage code, then we've got a 6003 * match. 6004 */ 6005 if (((page_index->page_code & SMPH_SPF) == 0) 6006 && ((page_header->page_code & SMPH_SPF) == 0)) { 6007 page_len = page_header->page_length; 6008 break; 6009 } 6010 6011 /* 6012 * If both pages have subpages, then the subpage numbers 6013 * have to match. 6014 */ 6015 if ((page_index->page_code & SMPH_SPF) 6016 && (page_header->page_code & SMPH_SPF)) { 6017 struct scsi_mode_page_header_sp *sph; 6018 6019 sph = (struct scsi_mode_page_header_sp *)page_header; 6020 if (page_index->subpage == sph->subpage) { 6021 page_len = scsi_2btoul(sph->page_length); 6022 break; 6023 } 6024 } 6025 } 6026 6027 /* 6028 * If we couldn't find the page, or if we don't have a mode select 6029 * handler for it, send back an error to the user. 6030 */ 6031 if ((i >= CTL_NUM_MODE_PAGES) 6032 || (page_index->select_handler == NULL)) { 6033 ctl_set_invalid_field(ctsio, 6034 /*sks_valid*/ 1, 6035 /*command*/ 0, 6036 /*field*/ *len_used, 6037 /*bit_valid*/ 0, 6038 /*bit*/ 0); 6039 free(ctsio->kern_data_ptr, M_CTL); 6040 ctl_done((union ctl_io *)ctsio); 6041 return (CTL_RETVAL_COMPLETE); 6042 } 6043 6044 if (page_index->page_code & SMPH_SPF) { 6045 page_len_offset = 2; 6046 page_len_size = 2; 6047 } else { 6048 page_len_size = 1; 6049 page_len_offset = 1; 6050 } 6051 6052 /* 6053 * If the length the initiator gives us isn't the one we specify in 6054 * the mode page header, or if they didn't specify enough data in 6055 * the CDB to avoid truncating this page, kick out the request. 6056 */ 6057 if (page_len != page_index->page_len - page_len_offset - page_len_size) { 6058 ctl_set_invalid_field(ctsio, 6059 /*sks_valid*/ 1, 6060 /*command*/ 0, 6061 /*field*/ *len_used + page_len_offset, 6062 /*bit_valid*/ 0, 6063 /*bit*/ 0); 6064 free(ctsio->kern_data_ptr, M_CTL); 6065 ctl_done((union ctl_io *)ctsio); 6066 return (CTL_RETVAL_COMPLETE); 6067 } 6068 if (*len_left < page_index->page_len) { 6069 free(ctsio->kern_data_ptr, M_CTL); 6070 ctl_set_param_len_error(ctsio); 6071 ctl_done((union ctl_io *)ctsio); 6072 return (CTL_RETVAL_COMPLETE); 6073 } 6074 6075 /* 6076 * Run through the mode page, checking to make sure that the bits 6077 * the user changed are actually legal for him to change. 6078 */ 6079 for (i = 0; i < page_index->page_len; i++) { 6080 uint8_t *user_byte, *change_mask, *current_byte; 6081 int bad_bit; 6082 int j; 6083 6084 user_byte = (uint8_t *)page_header + i; 6085 change_mask = page_index->page_data + 6086 (page_index->page_len * CTL_PAGE_CHANGEABLE) + i; 6087 current_byte = page_index->page_data + 6088 (page_index->page_len * CTL_PAGE_CURRENT) + i; 6089 6090 /* 6091 * Check to see whether the user set any bits in this byte 6092 * that he is not allowed to set. 6093 */ 6094 if ((*user_byte & ~(*change_mask)) == 6095 (*current_byte & ~(*change_mask))) 6096 continue; 6097 6098 /* 6099 * Go through bit by bit to determine which one is illegal. 6100 */ 6101 bad_bit = 0; 6102 for (j = 7; j >= 0; j--) { 6103 if ((((1 << i) & ~(*change_mask)) & *user_byte) != 6104 (((1 << i) & ~(*change_mask)) & *current_byte)) { 6105 bad_bit = i; 6106 break; 6107 } 6108 } 6109 ctl_set_invalid_field(ctsio, 6110 /*sks_valid*/ 1, 6111 /*command*/ 0, 6112 /*field*/ *len_used + i, 6113 /*bit_valid*/ 1, 6114 /*bit*/ bad_bit); 6115 free(ctsio->kern_data_ptr, M_CTL); 6116 ctl_done((union ctl_io *)ctsio); 6117 return (CTL_RETVAL_COMPLETE); 6118 } 6119 6120 /* 6121 * Decrement these before we call the page handler, since we may 6122 * end up getting called back one way or another before the handler 6123 * returns to this context. 6124 */ 6125 *len_left -= page_index->page_len; 6126 *len_used += page_index->page_len; 6127 6128 retval = page_index->select_handler(ctsio, page_index, 6129 (uint8_t *)page_header); 6130 6131 /* 6132 * If the page handler returns CTL_RETVAL_QUEUED, then we need to 6133 * wait until this queued command completes to finish processing 6134 * the mode page. If it returns anything other than 6135 * CTL_RETVAL_COMPLETE (e.g. CTL_RETVAL_ERROR), then it should have 6136 * already set the sense information, freed the data pointer, and 6137 * completed the io for us. 6138 */ 6139 if (retval != CTL_RETVAL_COMPLETE) 6140 goto bailout_no_done; 6141 6142 /* 6143 * If the initiator sent us more than one page, parse the next one. 6144 */ 6145 if (*len_left > 0) 6146 goto do_next_page; 6147 6148 ctl_set_success(ctsio); 6149 free(ctsio->kern_data_ptr, M_CTL); 6150 ctl_done((union ctl_io *)ctsio); 6151 6152 bailout_no_done: 6153 6154 return (CTL_RETVAL_COMPLETE); 6155 6156 } 6157 6158 int 6159 ctl_mode_select(struct ctl_scsiio *ctsio) 6160 { 6161 struct ctl_lun *lun; 6162 union ctl_modepage_info *modepage_info; 6163 int bd_len, i, header_size, param_len, pf, rtd, sp; 6164 uint32_t initidx; 6165 6166 lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6167 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 6168 switch (ctsio->cdb[0]) { 6169 case MODE_SELECT_6: { 6170 struct scsi_mode_select_6 *cdb; 6171 6172 cdb = (struct scsi_mode_select_6 *)ctsio->cdb; 6173 6174 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 6175 rtd = (cdb->byte2 & SMS_RTD) ? 1 : 0; 6176 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 6177 param_len = cdb->length; 6178 header_size = sizeof(struct scsi_mode_header_6); 6179 break; 6180 } 6181 case MODE_SELECT_10: { 6182 struct scsi_mode_select_10 *cdb; 6183 6184 cdb = (struct scsi_mode_select_10 *)ctsio->cdb; 6185 6186 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 6187 rtd = (cdb->byte2 & SMS_RTD) ? 1 : 0; 6188 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 6189 param_len = scsi_2btoul(cdb->length); 6190 header_size = sizeof(struct scsi_mode_header_10); 6191 break; 6192 } 6193 default: 6194 ctl_set_invalid_opcode(ctsio); 6195 ctl_done((union ctl_io *)ctsio); 6196 return (CTL_RETVAL_COMPLETE); 6197 } 6198 6199 if (rtd) { 6200 if (param_len != 0) { 6201 ctl_set_invalid_field(ctsio, /*sks_valid*/ 0, 6202 /*command*/ 1, /*field*/ 0, 6203 /*bit_valid*/ 0, /*bit*/ 0); 6204 ctl_done((union ctl_io *)ctsio); 6205 return (CTL_RETVAL_COMPLETE); 6206 } 6207 6208 /* Revert to defaults. */ 6209 ctl_init_page_index(lun); 6210 mtx_lock(&lun->lun_lock); 6211 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 6212 mtx_unlock(&lun->lun_lock); 6213 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6214 ctl_isc_announce_mode(lun, -1, 6215 lun->mode_pages.index[i].page_code & SMPH_PC_MASK, 6216 lun->mode_pages.index[i].subpage); 6217 } 6218 ctl_set_success(ctsio); 6219 ctl_done((union ctl_io *)ctsio); 6220 return (CTL_RETVAL_COMPLETE); 6221 } 6222 6223 /* 6224 * From SPC-3: 6225 * "A parameter list length of zero indicates that the Data-Out Buffer 6226 * shall be empty. This condition shall not be considered as an error." 6227 */ 6228 if (param_len == 0) { 6229 ctl_set_success(ctsio); 6230 ctl_done((union ctl_io *)ctsio); 6231 return (CTL_RETVAL_COMPLETE); 6232 } 6233 6234 /* 6235 * Since we'll hit this the first time through, prior to 6236 * allocation, we don't need to free a data buffer here. 6237 */ 6238 if (param_len < header_size) { 6239 ctl_set_param_len_error(ctsio); 6240 ctl_done((union ctl_io *)ctsio); 6241 return (CTL_RETVAL_COMPLETE); 6242 } 6243 6244 /* 6245 * Allocate the data buffer and grab the user's data. In theory, 6246 * we shouldn't have to sanity check the parameter list length here 6247 * because the maximum size is 64K. We should be able to malloc 6248 * that much without too many problems. 6249 */ 6250 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 6251 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 6252 ctsio->kern_data_len = param_len; 6253 ctsio->kern_total_len = param_len; 6254 ctsio->kern_data_resid = 0; 6255 ctsio->kern_rel_offset = 0; 6256 ctsio->kern_sg_entries = 0; 6257 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6258 ctsio->be_move_done = ctl_config_move_done; 6259 ctl_datamove((union ctl_io *)ctsio); 6260 6261 return (CTL_RETVAL_COMPLETE); 6262 } 6263 6264 switch (ctsio->cdb[0]) { 6265 case MODE_SELECT_6: { 6266 struct scsi_mode_header_6 *mh6; 6267 6268 mh6 = (struct scsi_mode_header_6 *)ctsio->kern_data_ptr; 6269 bd_len = mh6->blk_desc_len; 6270 break; 6271 } 6272 case MODE_SELECT_10: { 6273 struct scsi_mode_header_10 *mh10; 6274 6275 mh10 = (struct scsi_mode_header_10 *)ctsio->kern_data_ptr; 6276 bd_len = scsi_2btoul(mh10->blk_desc_len); 6277 break; 6278 } 6279 default: 6280 panic("%s: Invalid CDB type %#x", __func__, ctsio->cdb[0]); 6281 } 6282 6283 if (param_len < (header_size + bd_len)) { 6284 free(ctsio->kern_data_ptr, M_CTL); 6285 ctl_set_param_len_error(ctsio); 6286 ctl_done((union ctl_io *)ctsio); 6287 return (CTL_RETVAL_COMPLETE); 6288 } 6289 6290 /* 6291 * Set the IO_CONT flag, so that if this I/O gets passed to 6292 * ctl_config_write_done(), it'll get passed back to 6293 * ctl_do_mode_select() for further processing, or completion if 6294 * we're all done. 6295 */ 6296 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 6297 ctsio->io_cont = ctl_do_mode_select; 6298 6299 modepage_info = (union ctl_modepage_info *) 6300 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6301 memset(modepage_info, 0, sizeof(*modepage_info)); 6302 modepage_info->header.len_left = param_len - header_size - bd_len; 6303 modepage_info->header.len_used = header_size + bd_len; 6304 6305 return (ctl_do_mode_select((union ctl_io *)ctsio)); 6306 } 6307 6308 int 6309 ctl_mode_sense(struct ctl_scsiio *ctsio) 6310 { 6311 struct ctl_lun *lun; 6312 int pc, page_code, dbd, llba, subpage; 6313 int alloc_len, page_len, header_len, total_len; 6314 struct scsi_mode_block_descr *block_desc; 6315 struct ctl_page_index *page_index; 6316 6317 dbd = 0; 6318 llba = 0; 6319 block_desc = NULL; 6320 6321 CTL_DEBUG_PRINT(("ctl_mode_sense\n")); 6322 6323 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6324 switch (ctsio->cdb[0]) { 6325 case MODE_SENSE_6: { 6326 struct scsi_mode_sense_6 *cdb; 6327 6328 cdb = (struct scsi_mode_sense_6 *)ctsio->cdb; 6329 6330 header_len = sizeof(struct scsi_mode_hdr_6); 6331 if (cdb->byte2 & SMS_DBD) 6332 dbd = 1; 6333 else 6334 header_len += sizeof(struct scsi_mode_block_descr); 6335 6336 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6337 page_code = cdb->page & SMS_PAGE_CODE; 6338 subpage = cdb->subpage; 6339 alloc_len = cdb->length; 6340 break; 6341 } 6342 case MODE_SENSE_10: { 6343 struct scsi_mode_sense_10 *cdb; 6344 6345 cdb = (struct scsi_mode_sense_10 *)ctsio->cdb; 6346 6347 header_len = sizeof(struct scsi_mode_hdr_10); 6348 6349 if (cdb->byte2 & SMS_DBD) 6350 dbd = 1; 6351 else 6352 header_len += sizeof(struct scsi_mode_block_descr); 6353 if (cdb->byte2 & SMS10_LLBAA) 6354 llba = 1; 6355 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6356 page_code = cdb->page & SMS_PAGE_CODE; 6357 subpage = cdb->subpage; 6358 alloc_len = scsi_2btoul(cdb->length); 6359 break; 6360 } 6361 default: 6362 ctl_set_invalid_opcode(ctsio); 6363 ctl_done((union ctl_io *)ctsio); 6364 return (CTL_RETVAL_COMPLETE); 6365 break; /* NOTREACHED */ 6366 } 6367 6368 /* 6369 * We have to make a first pass through to calculate the size of 6370 * the pages that match the user's query. Then we allocate enough 6371 * memory to hold it, and actually copy the data into the buffer. 6372 */ 6373 switch (page_code) { 6374 case SMS_ALL_PAGES_PAGE: { 6375 u_int i; 6376 6377 page_len = 0; 6378 6379 /* 6380 * At the moment, values other than 0 and 0xff here are 6381 * reserved according to SPC-3. 6382 */ 6383 if ((subpage != SMS_SUBPAGE_PAGE_0) 6384 && (subpage != SMS_SUBPAGE_ALL)) { 6385 ctl_set_invalid_field(ctsio, 6386 /*sks_valid*/ 1, 6387 /*command*/ 1, 6388 /*field*/ 3, 6389 /*bit_valid*/ 0, 6390 /*bit*/ 0); 6391 ctl_done((union ctl_io *)ctsio); 6392 return (CTL_RETVAL_COMPLETE); 6393 } 6394 6395 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6396 page_index = &lun->mode_pages.index[i]; 6397 6398 /* Make sure the page is supported for this dev type */ 6399 if (lun->be_lun->lun_type == T_DIRECT && 6400 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6401 continue; 6402 if (lun->be_lun->lun_type == T_PROCESSOR && 6403 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6404 continue; 6405 if (lun->be_lun->lun_type == T_CDROM && 6406 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6407 continue; 6408 6409 /* 6410 * We don't use this subpage if the user didn't 6411 * request all subpages. 6412 */ 6413 if ((page_index->subpage != 0) 6414 && (subpage == SMS_SUBPAGE_PAGE_0)) 6415 continue; 6416 6417 #if 0 6418 printf("found page %#x len %d\n", 6419 page_index->page_code & SMPH_PC_MASK, 6420 page_index->page_len); 6421 #endif 6422 page_len += page_index->page_len; 6423 } 6424 break; 6425 } 6426 default: { 6427 u_int i; 6428 6429 page_len = 0; 6430 6431 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6432 page_index = &lun->mode_pages.index[i]; 6433 6434 /* Make sure the page is supported for this dev type */ 6435 if (lun->be_lun->lun_type == T_DIRECT && 6436 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6437 continue; 6438 if (lun->be_lun->lun_type == T_PROCESSOR && 6439 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6440 continue; 6441 if (lun->be_lun->lun_type == T_CDROM && 6442 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6443 continue; 6444 6445 /* Look for the right page code */ 6446 if ((page_index->page_code & SMPH_PC_MASK) != page_code) 6447 continue; 6448 6449 /* Look for the right subpage or the subpage wildcard*/ 6450 if ((page_index->subpage != subpage) 6451 && (subpage != SMS_SUBPAGE_ALL)) 6452 continue; 6453 6454 #if 0 6455 printf("found page %#x len %d\n", 6456 page_index->page_code & SMPH_PC_MASK, 6457 page_index->page_len); 6458 #endif 6459 6460 page_len += page_index->page_len; 6461 } 6462 6463 if (page_len == 0) { 6464 ctl_set_invalid_field(ctsio, 6465 /*sks_valid*/ 1, 6466 /*command*/ 1, 6467 /*field*/ 2, 6468 /*bit_valid*/ 1, 6469 /*bit*/ 5); 6470 ctl_done((union ctl_io *)ctsio); 6471 return (CTL_RETVAL_COMPLETE); 6472 } 6473 break; 6474 } 6475 } 6476 6477 total_len = header_len + page_len; 6478 #if 0 6479 printf("header_len = %d, page_len = %d, total_len = %d\n", 6480 header_len, page_len, total_len); 6481 #endif 6482 6483 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6484 ctsio->kern_sg_entries = 0; 6485 ctsio->kern_data_resid = 0; 6486 ctsio->kern_rel_offset = 0; 6487 if (total_len < alloc_len) { 6488 ctsio->residual = alloc_len - total_len; 6489 ctsio->kern_data_len = total_len; 6490 ctsio->kern_total_len = total_len; 6491 } else { 6492 ctsio->residual = 0; 6493 ctsio->kern_data_len = alloc_len; 6494 ctsio->kern_total_len = alloc_len; 6495 } 6496 6497 switch (ctsio->cdb[0]) { 6498 case MODE_SENSE_6: { 6499 struct scsi_mode_hdr_6 *header; 6500 6501 header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr; 6502 6503 header->datalen = MIN(total_len - 1, 254); 6504 if (lun->be_lun->lun_type == T_DIRECT) { 6505 header->dev_specific = 0x10; /* DPOFUA */ 6506 if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || 6507 (lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) 6508 header->dev_specific |= 0x80; /* WP */ 6509 } 6510 if (dbd) 6511 header->block_descr_len = 0; 6512 else 6513 header->block_descr_len = 6514 sizeof(struct scsi_mode_block_descr); 6515 block_desc = (struct scsi_mode_block_descr *)&header[1]; 6516 break; 6517 } 6518 case MODE_SENSE_10: { 6519 struct scsi_mode_hdr_10 *header; 6520 int datalen; 6521 6522 header = (struct scsi_mode_hdr_10 *)ctsio->kern_data_ptr; 6523 6524 datalen = MIN(total_len - 2, 65533); 6525 scsi_ulto2b(datalen, header->datalen); 6526 if (lun->be_lun->lun_type == T_DIRECT) { 6527 header->dev_specific = 0x10; /* DPOFUA */ 6528 if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || 6529 (lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) 6530 header->dev_specific |= 0x80; /* WP */ 6531 } 6532 if (dbd) 6533 scsi_ulto2b(0, header->block_descr_len); 6534 else 6535 scsi_ulto2b(sizeof(struct scsi_mode_block_descr), 6536 header->block_descr_len); 6537 block_desc = (struct scsi_mode_block_descr *)&header[1]; 6538 break; 6539 } 6540 default: 6541 panic("%s: Invalid CDB type %#x", __func__, ctsio->cdb[0]); 6542 } 6543 6544 /* 6545 * If we've got a disk, use its blocksize in the block 6546 * descriptor. Otherwise, just set it to 0. 6547 */ 6548 if (dbd == 0) { 6549 if (lun->be_lun->lun_type == T_DIRECT) 6550 scsi_ulto3b(lun->be_lun->blocksize, 6551 block_desc->block_len); 6552 else 6553 scsi_ulto3b(0, block_desc->block_len); 6554 } 6555 6556 switch (page_code) { 6557 case SMS_ALL_PAGES_PAGE: { 6558 int i, data_used; 6559 6560 data_used = header_len; 6561 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6562 struct ctl_page_index *page_index; 6563 6564 page_index = &lun->mode_pages.index[i]; 6565 if (lun->be_lun->lun_type == T_DIRECT && 6566 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6567 continue; 6568 if (lun->be_lun->lun_type == T_PROCESSOR && 6569 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6570 continue; 6571 if (lun->be_lun->lun_type == T_CDROM && 6572 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6573 continue; 6574 6575 /* 6576 * We don't use this subpage if the user didn't 6577 * request all subpages. We already checked (above) 6578 * to make sure the user only specified a subpage 6579 * of 0 or 0xff in the SMS_ALL_PAGES_PAGE case. 6580 */ 6581 if ((page_index->subpage != 0) 6582 && (subpage == SMS_SUBPAGE_PAGE_0)) 6583 continue; 6584 6585 /* 6586 * Call the handler, if it exists, to update the 6587 * page to the latest values. 6588 */ 6589 if (page_index->sense_handler != NULL) 6590 page_index->sense_handler(ctsio, page_index,pc); 6591 6592 memcpy(ctsio->kern_data_ptr + data_used, 6593 page_index->page_data + 6594 (page_index->page_len * pc), 6595 page_index->page_len); 6596 data_used += page_index->page_len; 6597 } 6598 break; 6599 } 6600 default: { 6601 int i, data_used; 6602 6603 data_used = header_len; 6604 6605 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6606 struct ctl_page_index *page_index; 6607 6608 page_index = &lun->mode_pages.index[i]; 6609 6610 /* Look for the right page code */ 6611 if ((page_index->page_code & SMPH_PC_MASK) != page_code) 6612 continue; 6613 6614 /* Look for the right subpage or the subpage wildcard*/ 6615 if ((page_index->subpage != subpage) 6616 && (subpage != SMS_SUBPAGE_ALL)) 6617 continue; 6618 6619 /* Make sure the page is supported for this dev type */ 6620 if (lun->be_lun->lun_type == T_DIRECT && 6621 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6622 continue; 6623 if (lun->be_lun->lun_type == T_PROCESSOR && 6624 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6625 continue; 6626 if (lun->be_lun->lun_type == T_CDROM && 6627 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6628 continue; 6629 6630 /* 6631 * Call the handler, if it exists, to update the 6632 * page to the latest values. 6633 */ 6634 if (page_index->sense_handler != NULL) 6635 page_index->sense_handler(ctsio, page_index,pc); 6636 6637 memcpy(ctsio->kern_data_ptr + data_used, 6638 page_index->page_data + 6639 (page_index->page_len * pc), 6640 page_index->page_len); 6641 data_used += page_index->page_len; 6642 } 6643 break; 6644 } 6645 } 6646 6647 ctl_set_success(ctsio); 6648 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6649 ctsio->be_move_done = ctl_config_move_done; 6650 ctl_datamove((union ctl_io *)ctsio); 6651 return (CTL_RETVAL_COMPLETE); 6652 } 6653 6654 int 6655 ctl_lbp_log_sense_handler(struct ctl_scsiio *ctsio, 6656 struct ctl_page_index *page_index, 6657 int pc) 6658 { 6659 struct ctl_lun *lun; 6660 struct scsi_log_param_header *phdr; 6661 uint8_t *data; 6662 uint64_t val; 6663 6664 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6665 data = page_index->page_data; 6666 6667 if (lun->backend->lun_attr != NULL && 6668 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksavail")) 6669 != UINT64_MAX) { 6670 phdr = (struct scsi_log_param_header *)data; 6671 scsi_ulto2b(0x0001, phdr->param_code); 6672 phdr->param_control = SLP_LBIN | SLP_LP; 6673 phdr->param_len = 8; 6674 data = (uint8_t *)(phdr + 1); 6675 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6676 data[4] = 0x02; /* per-pool */ 6677 data += phdr->param_len; 6678 } 6679 6680 if (lun->backend->lun_attr != NULL && 6681 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksused")) 6682 != UINT64_MAX) { 6683 phdr = (struct scsi_log_param_header *)data; 6684 scsi_ulto2b(0x0002, phdr->param_code); 6685 phdr->param_control = SLP_LBIN | SLP_LP; 6686 phdr->param_len = 8; 6687 data = (uint8_t *)(phdr + 1); 6688 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6689 data[4] = 0x01; /* per-LUN */ 6690 data += phdr->param_len; 6691 } 6692 6693 if (lun->backend->lun_attr != NULL && 6694 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksavail")) 6695 != UINT64_MAX) { 6696 phdr = (struct scsi_log_param_header *)data; 6697 scsi_ulto2b(0x00f1, phdr->param_code); 6698 phdr->param_control = SLP_LBIN | SLP_LP; 6699 phdr->param_len = 8; 6700 data = (uint8_t *)(phdr + 1); 6701 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6702 data[4] = 0x02; /* per-pool */ 6703 data += phdr->param_len; 6704 } 6705 6706 if (lun->backend->lun_attr != NULL && 6707 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksused")) 6708 != UINT64_MAX) { 6709 phdr = (struct scsi_log_param_header *)data; 6710 scsi_ulto2b(0x00f2, phdr->param_code); 6711 phdr->param_control = SLP_LBIN | SLP_LP; 6712 phdr->param_len = 8; 6713 data = (uint8_t *)(phdr + 1); 6714 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6715 data[4] = 0x02; /* per-pool */ 6716 data += phdr->param_len; 6717 } 6718 6719 page_index->page_len = data - page_index->page_data; 6720 return (0); 6721 } 6722 6723 int 6724 ctl_sap_log_sense_handler(struct ctl_scsiio *ctsio, 6725 struct ctl_page_index *page_index, 6726 int pc) 6727 { 6728 struct ctl_lun *lun; 6729 struct stat_page *data; 6730 uint64_t rn, wn, rb, wb; 6731 struct bintime rt, wt; 6732 int i; 6733 6734 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6735 data = (struct stat_page *)page_index->page_data; 6736 6737 scsi_ulto2b(SLP_SAP, data->sap.hdr.param_code); 6738 data->sap.hdr.param_control = SLP_LBIN; 6739 data->sap.hdr.param_len = sizeof(struct scsi_log_stat_and_perf) - 6740 sizeof(struct scsi_log_param_header); 6741 rn = wn = rb = wb = 0; 6742 bintime_clear(&rt); 6743 bintime_clear(&wt); 6744 for (i = 0; i < CTL_MAX_PORTS; i++) { 6745 rn += lun->stats.ports[i].operations[CTL_STATS_READ]; 6746 wn += lun->stats.ports[i].operations[CTL_STATS_WRITE]; 6747 rb += lun->stats.ports[i].bytes[CTL_STATS_READ]; 6748 wb += lun->stats.ports[i].bytes[CTL_STATS_WRITE]; 6749 bintime_add(&rt, &lun->stats.ports[i].time[CTL_STATS_READ]); 6750 bintime_add(&wt, &lun->stats.ports[i].time[CTL_STATS_WRITE]); 6751 } 6752 scsi_u64to8b(rn, data->sap.read_num); 6753 scsi_u64to8b(wn, data->sap.write_num); 6754 if (lun->stats.blocksize > 0) { 6755 scsi_u64to8b(wb / lun->stats.blocksize, 6756 data->sap.recvieved_lba); 6757 scsi_u64to8b(rb / lun->stats.blocksize, 6758 data->sap.transmitted_lba); 6759 } 6760 scsi_u64to8b((uint64_t)rt.sec * 1000 + rt.frac / (UINT64_MAX / 1000), 6761 data->sap.read_int); 6762 scsi_u64to8b((uint64_t)wt.sec * 1000 + wt.frac / (UINT64_MAX / 1000), 6763 data->sap.write_int); 6764 scsi_u64to8b(0, data->sap.weighted_num); 6765 scsi_u64to8b(0, data->sap.weighted_int); 6766 scsi_ulto2b(SLP_IT, data->it.hdr.param_code); 6767 data->it.hdr.param_control = SLP_LBIN; 6768 data->it.hdr.param_len = sizeof(struct scsi_log_idle_time) - 6769 sizeof(struct scsi_log_param_header); 6770 #ifdef CTL_TIME_IO 6771 scsi_u64to8b(lun->idle_time / SBT_1MS, data->it.idle_int); 6772 #endif 6773 scsi_ulto2b(SLP_TI, data->ti.hdr.param_code); 6774 data->it.hdr.param_control = SLP_LBIN; 6775 data->ti.hdr.param_len = sizeof(struct scsi_log_time_interval) - 6776 sizeof(struct scsi_log_param_header); 6777 scsi_ulto4b(3, data->ti.exponent); 6778 scsi_ulto4b(1, data->ti.integer); 6779 return (0); 6780 } 6781 6782 int 6783 ctl_ie_log_sense_handler(struct ctl_scsiio *ctsio, 6784 struct ctl_page_index *page_index, 6785 int pc) 6786 { 6787 struct ctl_lun *lun; 6788 struct scsi_log_informational_exceptions *data; 6789 6790 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6791 data = (struct scsi_log_informational_exceptions *)page_index->page_data; 6792 6793 scsi_ulto2b(SLP_IE_GEN, data->hdr.param_code); 6794 data->hdr.param_control = SLP_LBIN; 6795 data->hdr.param_len = sizeof(struct scsi_log_informational_exceptions) - 6796 sizeof(struct scsi_log_param_header); 6797 data->ie_asc = lun->ie_asc; 6798 data->ie_ascq = lun->ie_ascq; 6799 data->temperature = 0xff; 6800 return (0); 6801 } 6802 6803 int 6804 ctl_log_sense(struct ctl_scsiio *ctsio) 6805 { 6806 struct ctl_lun *lun; 6807 int i, pc, page_code, subpage; 6808 int alloc_len, total_len; 6809 struct ctl_page_index *page_index; 6810 struct scsi_log_sense *cdb; 6811 struct scsi_log_header *header; 6812 6813 CTL_DEBUG_PRINT(("ctl_log_sense\n")); 6814 6815 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6816 cdb = (struct scsi_log_sense *)ctsio->cdb; 6817 pc = (cdb->page & SLS_PAGE_CTRL_MASK) >> 6; 6818 page_code = cdb->page & SLS_PAGE_CODE; 6819 subpage = cdb->subpage; 6820 alloc_len = scsi_2btoul(cdb->length); 6821 6822 page_index = NULL; 6823 for (i = 0; i < CTL_NUM_LOG_PAGES; i++) { 6824 page_index = &lun->log_pages.index[i]; 6825 6826 /* Look for the right page code */ 6827 if ((page_index->page_code & SL_PAGE_CODE) != page_code) 6828 continue; 6829 6830 /* Look for the right subpage or the subpage wildcard*/ 6831 if (page_index->subpage != subpage) 6832 continue; 6833 6834 break; 6835 } 6836 if (i >= CTL_NUM_LOG_PAGES) { 6837 ctl_set_invalid_field(ctsio, 6838 /*sks_valid*/ 1, 6839 /*command*/ 1, 6840 /*field*/ 2, 6841 /*bit_valid*/ 0, 6842 /*bit*/ 0); 6843 ctl_done((union ctl_io *)ctsio); 6844 return (CTL_RETVAL_COMPLETE); 6845 } 6846 6847 total_len = sizeof(struct scsi_log_header) + page_index->page_len; 6848 6849 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6850 ctsio->kern_sg_entries = 0; 6851 ctsio->kern_data_resid = 0; 6852 ctsio->kern_rel_offset = 0; 6853 if (total_len < alloc_len) { 6854 ctsio->residual = alloc_len - total_len; 6855 ctsio->kern_data_len = total_len; 6856 ctsio->kern_total_len = total_len; 6857 } else { 6858 ctsio->residual = 0; 6859 ctsio->kern_data_len = alloc_len; 6860 ctsio->kern_total_len = alloc_len; 6861 } 6862 6863 header = (struct scsi_log_header *)ctsio->kern_data_ptr; 6864 header->page = page_index->page_code; 6865 if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING) 6866 header->page |= SL_DS; 6867 if (page_index->subpage) { 6868 header->page |= SL_SPF; 6869 header->subpage = page_index->subpage; 6870 } 6871 scsi_ulto2b(page_index->page_len, header->datalen); 6872 6873 /* 6874 * Call the handler, if it exists, to update the 6875 * page to the latest values. 6876 */ 6877 if (page_index->sense_handler != NULL) 6878 page_index->sense_handler(ctsio, page_index, pc); 6879 6880 memcpy(header + 1, page_index->page_data, page_index->page_len); 6881 6882 ctl_set_success(ctsio); 6883 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6884 ctsio->be_move_done = ctl_config_move_done; 6885 ctl_datamove((union ctl_io *)ctsio); 6886 return (CTL_RETVAL_COMPLETE); 6887 } 6888 6889 int 6890 ctl_read_capacity(struct ctl_scsiio *ctsio) 6891 { 6892 struct scsi_read_capacity *cdb; 6893 struct scsi_read_capacity_data *data; 6894 struct ctl_lun *lun; 6895 uint32_t lba; 6896 6897 CTL_DEBUG_PRINT(("ctl_read_capacity\n")); 6898 6899 cdb = (struct scsi_read_capacity *)ctsio->cdb; 6900 6901 lba = scsi_4btoul(cdb->addr); 6902 if (((cdb->pmi & SRC_PMI) == 0) 6903 && (lba != 0)) { 6904 ctl_set_invalid_field(/*ctsio*/ ctsio, 6905 /*sks_valid*/ 1, 6906 /*command*/ 1, 6907 /*field*/ 2, 6908 /*bit_valid*/ 0, 6909 /*bit*/ 0); 6910 ctl_done((union ctl_io *)ctsio); 6911 return (CTL_RETVAL_COMPLETE); 6912 } 6913 6914 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6915 6916 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 6917 data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr; 6918 ctsio->residual = 0; 6919 ctsio->kern_data_len = sizeof(*data); 6920 ctsio->kern_total_len = sizeof(*data); 6921 ctsio->kern_data_resid = 0; 6922 ctsio->kern_rel_offset = 0; 6923 ctsio->kern_sg_entries = 0; 6924 6925 /* 6926 * If the maximum LBA is greater than 0xfffffffe, the user must 6927 * issue a SERVICE ACTION IN (16) command, with the read capacity 6928 * serivce action set. 6929 */ 6930 if (lun->be_lun->maxlba > 0xfffffffe) 6931 scsi_ulto4b(0xffffffff, data->addr); 6932 else 6933 scsi_ulto4b(lun->be_lun->maxlba, data->addr); 6934 6935 /* 6936 * XXX KDM this may not be 512 bytes... 6937 */ 6938 scsi_ulto4b(lun->be_lun->blocksize, data->length); 6939 6940 ctl_set_success(ctsio); 6941 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6942 ctsio->be_move_done = ctl_config_move_done; 6943 ctl_datamove((union ctl_io *)ctsio); 6944 return (CTL_RETVAL_COMPLETE); 6945 } 6946 6947 int 6948 ctl_read_capacity_16(struct ctl_scsiio *ctsio) 6949 { 6950 struct scsi_read_capacity_16 *cdb; 6951 struct scsi_read_capacity_data_long *data; 6952 struct ctl_lun *lun; 6953 uint64_t lba; 6954 uint32_t alloc_len; 6955 6956 CTL_DEBUG_PRINT(("ctl_read_capacity_16\n")); 6957 6958 cdb = (struct scsi_read_capacity_16 *)ctsio->cdb; 6959 6960 alloc_len = scsi_4btoul(cdb->alloc_len); 6961 lba = scsi_8btou64(cdb->addr); 6962 6963 if ((cdb->reladr & SRC16_PMI) 6964 && (lba != 0)) { 6965 ctl_set_invalid_field(/*ctsio*/ ctsio, 6966 /*sks_valid*/ 1, 6967 /*command*/ 1, 6968 /*field*/ 2, 6969 /*bit_valid*/ 0, 6970 /*bit*/ 0); 6971 ctl_done((union ctl_io *)ctsio); 6972 return (CTL_RETVAL_COMPLETE); 6973 } 6974 6975 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6976 6977 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 6978 data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr; 6979 6980 if (sizeof(*data) < alloc_len) { 6981 ctsio->residual = alloc_len - sizeof(*data); 6982 ctsio->kern_data_len = sizeof(*data); 6983 ctsio->kern_total_len = sizeof(*data); 6984 } else { 6985 ctsio->residual = 0; 6986 ctsio->kern_data_len = alloc_len; 6987 ctsio->kern_total_len = alloc_len; 6988 } 6989 ctsio->kern_data_resid = 0; 6990 ctsio->kern_rel_offset = 0; 6991 ctsio->kern_sg_entries = 0; 6992 6993 scsi_u64to8b(lun->be_lun->maxlba, data->addr); 6994 /* XXX KDM this may not be 512 bytes... */ 6995 scsi_ulto4b(lun->be_lun->blocksize, data->length); 6996 data->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE; 6997 scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, data->lalba_lbp); 6998 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) 6999 data->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ; 7000 7001 ctl_set_success(ctsio); 7002 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7003 ctsio->be_move_done = ctl_config_move_done; 7004 ctl_datamove((union ctl_io *)ctsio); 7005 return (CTL_RETVAL_COMPLETE); 7006 } 7007 7008 int 7009 ctl_get_lba_status(struct ctl_scsiio *ctsio) 7010 { 7011 struct scsi_get_lba_status *cdb; 7012 struct scsi_get_lba_status_data *data; 7013 struct ctl_lun *lun; 7014 struct ctl_lba_len_flags *lbalen; 7015 uint64_t lba; 7016 uint32_t alloc_len, total_len; 7017 int retval; 7018 7019 CTL_DEBUG_PRINT(("ctl_get_lba_status\n")); 7020 7021 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7022 cdb = (struct scsi_get_lba_status *)ctsio->cdb; 7023 lba = scsi_8btou64(cdb->addr); 7024 alloc_len = scsi_4btoul(cdb->alloc_len); 7025 7026 if (lba > lun->be_lun->maxlba) { 7027 ctl_set_lba_out_of_range(ctsio, lba); 7028 ctl_done((union ctl_io *)ctsio); 7029 return (CTL_RETVAL_COMPLETE); 7030 } 7031 7032 total_len = sizeof(*data) + sizeof(data->descr[0]); 7033 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7034 data = (struct scsi_get_lba_status_data *)ctsio->kern_data_ptr; 7035 7036 if (total_len < alloc_len) { 7037 ctsio->residual = alloc_len - total_len; 7038 ctsio->kern_data_len = total_len; 7039 ctsio->kern_total_len = total_len; 7040 } else { 7041 ctsio->residual = 0; 7042 ctsio->kern_data_len = alloc_len; 7043 ctsio->kern_total_len = alloc_len; 7044 } 7045 ctsio->kern_data_resid = 0; 7046 ctsio->kern_rel_offset = 0; 7047 ctsio->kern_sg_entries = 0; 7048 7049 /* Fill dummy data in case backend can't tell anything. */ 7050 scsi_ulto4b(4 + sizeof(data->descr[0]), data->length); 7051 scsi_u64to8b(lba, data->descr[0].addr); 7052 scsi_ulto4b(MIN(UINT32_MAX, lun->be_lun->maxlba + 1 - lba), 7053 data->descr[0].length); 7054 data->descr[0].status = 0; /* Mapped or unknown. */ 7055 7056 ctl_set_success(ctsio); 7057 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7058 ctsio->be_move_done = ctl_config_move_done; 7059 7060 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 7061 lbalen->lba = lba; 7062 lbalen->len = total_len; 7063 lbalen->flags = 0; 7064 retval = lun->backend->config_read((union ctl_io *)ctsio); 7065 return (CTL_RETVAL_COMPLETE); 7066 } 7067 7068 int 7069 ctl_read_defect(struct ctl_scsiio *ctsio) 7070 { 7071 struct scsi_read_defect_data_10 *ccb10; 7072 struct scsi_read_defect_data_12 *ccb12; 7073 struct scsi_read_defect_data_hdr_10 *data10; 7074 struct scsi_read_defect_data_hdr_12 *data12; 7075 uint32_t alloc_len, data_len; 7076 uint8_t format; 7077 7078 CTL_DEBUG_PRINT(("ctl_read_defect\n")); 7079 7080 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 7081 ccb10 = (struct scsi_read_defect_data_10 *)&ctsio->cdb; 7082 format = ccb10->format; 7083 alloc_len = scsi_2btoul(ccb10->alloc_length); 7084 data_len = sizeof(*data10); 7085 } else { 7086 ccb12 = (struct scsi_read_defect_data_12 *)&ctsio->cdb; 7087 format = ccb12->format; 7088 alloc_len = scsi_4btoul(ccb12->alloc_length); 7089 data_len = sizeof(*data12); 7090 } 7091 if (alloc_len == 0) { 7092 ctl_set_success(ctsio); 7093 ctl_done((union ctl_io *)ctsio); 7094 return (CTL_RETVAL_COMPLETE); 7095 } 7096 7097 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 7098 if (data_len < alloc_len) { 7099 ctsio->residual = alloc_len - data_len; 7100 ctsio->kern_data_len = data_len; 7101 ctsio->kern_total_len = data_len; 7102 } else { 7103 ctsio->residual = 0; 7104 ctsio->kern_data_len = alloc_len; 7105 ctsio->kern_total_len = alloc_len; 7106 } 7107 ctsio->kern_data_resid = 0; 7108 ctsio->kern_rel_offset = 0; 7109 ctsio->kern_sg_entries = 0; 7110 7111 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 7112 data10 = (struct scsi_read_defect_data_hdr_10 *) 7113 ctsio->kern_data_ptr; 7114 data10->format = format; 7115 scsi_ulto2b(0, data10->length); 7116 } else { 7117 data12 = (struct scsi_read_defect_data_hdr_12 *) 7118 ctsio->kern_data_ptr; 7119 data12->format = format; 7120 scsi_ulto2b(0, data12->generation); 7121 scsi_ulto4b(0, data12->length); 7122 } 7123 7124 ctl_set_success(ctsio); 7125 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7126 ctsio->be_move_done = ctl_config_move_done; 7127 ctl_datamove((union ctl_io *)ctsio); 7128 return (CTL_RETVAL_COMPLETE); 7129 } 7130 7131 int 7132 ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio) 7133 { 7134 struct scsi_maintenance_in *cdb; 7135 int retval; 7136 int alloc_len, ext, total_len = 0, g, pc, pg, ts, os; 7137 int num_ha_groups, num_target_ports, shared_group; 7138 struct ctl_lun *lun; 7139 struct ctl_softc *softc; 7140 struct ctl_port *port; 7141 struct scsi_target_group_data *rtg_ptr; 7142 struct scsi_target_group_data_extended *rtg_ext_ptr; 7143 struct scsi_target_port_group_descriptor *tpg_desc; 7144 7145 CTL_DEBUG_PRINT(("ctl_report_tagret_port_groups\n")); 7146 7147 cdb = (struct scsi_maintenance_in *)ctsio->cdb; 7148 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7149 softc = lun->ctl_softc; 7150 7151 retval = CTL_RETVAL_COMPLETE; 7152 7153 switch (cdb->byte2 & STG_PDF_MASK) { 7154 case STG_PDF_LENGTH: 7155 ext = 0; 7156 break; 7157 case STG_PDF_EXTENDED: 7158 ext = 1; 7159 break; 7160 default: 7161 ctl_set_invalid_field(/*ctsio*/ ctsio, 7162 /*sks_valid*/ 1, 7163 /*command*/ 1, 7164 /*field*/ 2, 7165 /*bit_valid*/ 1, 7166 /*bit*/ 5); 7167 ctl_done((union ctl_io *)ctsio); 7168 return(retval); 7169 } 7170 7171 num_target_ports = 0; 7172 shared_group = (softc->is_single != 0); 7173 mtx_lock(&softc->ctl_lock); 7174 STAILQ_FOREACH(port, &softc->port_list, links) { 7175 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7176 continue; 7177 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 7178 continue; 7179 num_target_ports++; 7180 if (port->status & CTL_PORT_STATUS_HA_SHARED) 7181 shared_group = 1; 7182 } 7183 mtx_unlock(&softc->ctl_lock); 7184 num_ha_groups = (softc->is_single) ? 0 : NUM_HA_SHELVES; 7185 7186 if (ext) 7187 total_len = sizeof(struct scsi_target_group_data_extended); 7188 else 7189 total_len = sizeof(struct scsi_target_group_data); 7190 total_len += sizeof(struct scsi_target_port_group_descriptor) * 7191 (shared_group + num_ha_groups) + 7192 sizeof(struct scsi_target_port_descriptor) * num_target_ports; 7193 7194 alloc_len = scsi_4btoul(cdb->length); 7195 7196 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7197 7198 ctsio->kern_sg_entries = 0; 7199 7200 if (total_len < alloc_len) { 7201 ctsio->residual = alloc_len - total_len; 7202 ctsio->kern_data_len = total_len; 7203 ctsio->kern_total_len = total_len; 7204 } else { 7205 ctsio->residual = 0; 7206 ctsio->kern_data_len = alloc_len; 7207 ctsio->kern_total_len = alloc_len; 7208 } 7209 ctsio->kern_data_resid = 0; 7210 ctsio->kern_rel_offset = 0; 7211 7212 if (ext) { 7213 rtg_ext_ptr = (struct scsi_target_group_data_extended *) 7214 ctsio->kern_data_ptr; 7215 scsi_ulto4b(total_len - 4, rtg_ext_ptr->length); 7216 rtg_ext_ptr->format_type = 0x10; 7217 rtg_ext_ptr->implicit_transition_time = 0; 7218 tpg_desc = &rtg_ext_ptr->groups[0]; 7219 } else { 7220 rtg_ptr = (struct scsi_target_group_data *) 7221 ctsio->kern_data_ptr; 7222 scsi_ulto4b(total_len - 4, rtg_ptr->length); 7223 tpg_desc = &rtg_ptr->groups[0]; 7224 } 7225 7226 mtx_lock(&softc->ctl_lock); 7227 pg = softc->port_min / softc->port_cnt; 7228 if (lun->flags & (CTL_LUN_PRIMARY_SC | CTL_LUN_PEER_SC_PRIMARY)) { 7229 /* Some shelf is known to be primary. */ 7230 if (softc->ha_link == CTL_HA_LINK_OFFLINE) 7231 os = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE; 7232 else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) 7233 os = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; 7234 else if (softc->ha_mode == CTL_HA_MODE_ACT_STBY) 7235 os = TPG_ASYMMETRIC_ACCESS_STANDBY; 7236 else 7237 os = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 7238 if (lun->flags & CTL_LUN_PRIMARY_SC) { 7239 ts = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7240 } else { 7241 ts = os; 7242 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7243 } 7244 } else { 7245 /* No known primary shelf. */ 7246 if (softc->ha_link == CTL_HA_LINK_OFFLINE) { 7247 ts = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE; 7248 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7249 } else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) { 7250 ts = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; 7251 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7252 } else { 7253 ts = os = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; 7254 } 7255 } 7256 if (shared_group) { 7257 tpg_desc->pref_state = ts; 7258 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP | 7259 TPG_U_SUP | TPG_T_SUP; 7260 scsi_ulto2b(1, tpg_desc->target_port_group); 7261 tpg_desc->status = TPG_IMPLICIT; 7262 pc = 0; 7263 STAILQ_FOREACH(port, &softc->port_list, links) { 7264 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7265 continue; 7266 if (!softc->is_single && 7267 (port->status & CTL_PORT_STATUS_HA_SHARED) == 0) 7268 continue; 7269 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 7270 continue; 7271 scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc]. 7272 relative_target_port_identifier); 7273 pc++; 7274 } 7275 tpg_desc->target_port_count = pc; 7276 tpg_desc = (struct scsi_target_port_group_descriptor *) 7277 &tpg_desc->descriptors[pc]; 7278 } 7279 for (g = 0; g < num_ha_groups; g++) { 7280 tpg_desc->pref_state = (g == pg) ? ts : os; 7281 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP | 7282 TPG_U_SUP | TPG_T_SUP; 7283 scsi_ulto2b(2 + g, tpg_desc->target_port_group); 7284 tpg_desc->status = TPG_IMPLICIT; 7285 pc = 0; 7286 STAILQ_FOREACH(port, &softc->port_list, links) { 7287 if (port->targ_port < g * softc->port_cnt || 7288 port->targ_port >= (g + 1) * softc->port_cnt) 7289 continue; 7290 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7291 continue; 7292 if (port->status & CTL_PORT_STATUS_HA_SHARED) 7293 continue; 7294 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 7295 continue; 7296 scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc]. 7297 relative_target_port_identifier); 7298 pc++; 7299 } 7300 tpg_desc->target_port_count = pc; 7301 tpg_desc = (struct scsi_target_port_group_descriptor *) 7302 &tpg_desc->descriptors[pc]; 7303 } 7304 mtx_unlock(&softc->ctl_lock); 7305 7306 ctl_set_success(ctsio); 7307 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7308 ctsio->be_move_done = ctl_config_move_done; 7309 ctl_datamove((union ctl_io *)ctsio); 7310 return(retval); 7311 } 7312 7313 int 7314 ctl_report_supported_opcodes(struct ctl_scsiio *ctsio) 7315 { 7316 struct ctl_lun *lun; 7317 struct scsi_report_supported_opcodes *cdb; 7318 const struct ctl_cmd_entry *entry, *sentry; 7319 struct scsi_report_supported_opcodes_all *all; 7320 struct scsi_report_supported_opcodes_descr *descr; 7321 struct scsi_report_supported_opcodes_one *one; 7322 int retval; 7323 int alloc_len, total_len; 7324 int opcode, service_action, i, j, num; 7325 7326 CTL_DEBUG_PRINT(("ctl_report_supported_opcodes\n")); 7327 7328 cdb = (struct scsi_report_supported_opcodes *)ctsio->cdb; 7329 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7330 7331 retval = CTL_RETVAL_COMPLETE; 7332 7333 opcode = cdb->requested_opcode; 7334 service_action = scsi_2btoul(cdb->requested_service_action); 7335 switch (cdb->options & RSO_OPTIONS_MASK) { 7336 case RSO_OPTIONS_ALL: 7337 num = 0; 7338 for (i = 0; i < 256; i++) { 7339 entry = &ctl_cmd_table[i]; 7340 if (entry->flags & CTL_CMD_FLAG_SA5) { 7341 for (j = 0; j < 32; j++) { 7342 sentry = &((const struct ctl_cmd_entry *) 7343 entry->execute)[j]; 7344 if (ctl_cmd_applicable( 7345 lun->be_lun->lun_type, sentry)) 7346 num++; 7347 } 7348 } else { 7349 if (ctl_cmd_applicable(lun->be_lun->lun_type, 7350 entry)) 7351 num++; 7352 } 7353 } 7354 total_len = sizeof(struct scsi_report_supported_opcodes_all) + 7355 num * sizeof(struct scsi_report_supported_opcodes_descr); 7356 break; 7357 case RSO_OPTIONS_OC: 7358 if (ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) { 7359 ctl_set_invalid_field(/*ctsio*/ ctsio, 7360 /*sks_valid*/ 1, 7361 /*command*/ 1, 7362 /*field*/ 2, 7363 /*bit_valid*/ 1, 7364 /*bit*/ 2); 7365 ctl_done((union ctl_io *)ctsio); 7366 return (CTL_RETVAL_COMPLETE); 7367 } 7368 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7369 break; 7370 case RSO_OPTIONS_OC_SA: 7371 if ((ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) == 0 || 7372 service_action >= 32) { 7373 ctl_set_invalid_field(/*ctsio*/ ctsio, 7374 /*sks_valid*/ 1, 7375 /*command*/ 1, 7376 /*field*/ 2, 7377 /*bit_valid*/ 1, 7378 /*bit*/ 2); 7379 ctl_done((union ctl_io *)ctsio); 7380 return (CTL_RETVAL_COMPLETE); 7381 } 7382 /* FALLTHROUGH */ 7383 case RSO_OPTIONS_OC_ASA: 7384 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7385 break; 7386 default: 7387 ctl_set_invalid_field(/*ctsio*/ ctsio, 7388 /*sks_valid*/ 1, 7389 /*command*/ 1, 7390 /*field*/ 2, 7391 /*bit_valid*/ 1, 7392 /*bit*/ 2); 7393 ctl_done((union ctl_io *)ctsio); 7394 return (CTL_RETVAL_COMPLETE); 7395 } 7396 7397 alloc_len = scsi_4btoul(cdb->length); 7398 7399 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7400 7401 ctsio->kern_sg_entries = 0; 7402 7403 if (total_len < alloc_len) { 7404 ctsio->residual = alloc_len - total_len; 7405 ctsio->kern_data_len = total_len; 7406 ctsio->kern_total_len = total_len; 7407 } else { 7408 ctsio->residual = 0; 7409 ctsio->kern_data_len = alloc_len; 7410 ctsio->kern_total_len = alloc_len; 7411 } 7412 ctsio->kern_data_resid = 0; 7413 ctsio->kern_rel_offset = 0; 7414 7415 switch (cdb->options & RSO_OPTIONS_MASK) { 7416 case RSO_OPTIONS_ALL: 7417 all = (struct scsi_report_supported_opcodes_all *) 7418 ctsio->kern_data_ptr; 7419 num = 0; 7420 for (i = 0; i < 256; i++) { 7421 entry = &ctl_cmd_table[i]; 7422 if (entry->flags & CTL_CMD_FLAG_SA5) { 7423 for (j = 0; j < 32; j++) { 7424 sentry = &((const struct ctl_cmd_entry *) 7425 entry->execute)[j]; 7426 if (!ctl_cmd_applicable( 7427 lun->be_lun->lun_type, sentry)) 7428 continue; 7429 descr = &all->descr[num++]; 7430 descr->opcode = i; 7431 scsi_ulto2b(j, descr->service_action); 7432 descr->flags = RSO_SERVACTV; 7433 scsi_ulto2b(sentry->length, 7434 descr->cdb_length); 7435 } 7436 } else { 7437 if (!ctl_cmd_applicable(lun->be_lun->lun_type, 7438 entry)) 7439 continue; 7440 descr = &all->descr[num++]; 7441 descr->opcode = i; 7442 scsi_ulto2b(0, descr->service_action); 7443 descr->flags = 0; 7444 scsi_ulto2b(entry->length, descr->cdb_length); 7445 } 7446 } 7447 scsi_ulto4b( 7448 num * sizeof(struct scsi_report_supported_opcodes_descr), 7449 all->length); 7450 break; 7451 case RSO_OPTIONS_OC: 7452 one = (struct scsi_report_supported_opcodes_one *) 7453 ctsio->kern_data_ptr; 7454 entry = &ctl_cmd_table[opcode]; 7455 goto fill_one; 7456 case RSO_OPTIONS_OC_SA: 7457 one = (struct scsi_report_supported_opcodes_one *) 7458 ctsio->kern_data_ptr; 7459 entry = &ctl_cmd_table[opcode]; 7460 entry = &((const struct ctl_cmd_entry *) 7461 entry->execute)[service_action]; 7462 fill_one: 7463 if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 7464 one->support = 3; 7465 scsi_ulto2b(entry->length, one->cdb_length); 7466 one->cdb_usage[0] = opcode; 7467 memcpy(&one->cdb_usage[1], entry->usage, 7468 entry->length - 1); 7469 } else 7470 one->support = 1; 7471 break; 7472 case RSO_OPTIONS_OC_ASA: 7473 one = (struct scsi_report_supported_opcodes_one *) 7474 ctsio->kern_data_ptr; 7475 entry = &ctl_cmd_table[opcode]; 7476 if (entry->flags & CTL_CMD_FLAG_SA5) { 7477 entry = &((const struct ctl_cmd_entry *) 7478 entry->execute)[service_action]; 7479 } else if (service_action != 0) { 7480 one->support = 1; 7481 break; 7482 } 7483 goto fill_one; 7484 } 7485 7486 ctl_set_success(ctsio); 7487 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7488 ctsio->be_move_done = ctl_config_move_done; 7489 ctl_datamove((union ctl_io *)ctsio); 7490 return(retval); 7491 } 7492 7493 int 7494 ctl_report_supported_tmf(struct ctl_scsiio *ctsio) 7495 { 7496 struct scsi_report_supported_tmf *cdb; 7497 struct scsi_report_supported_tmf_ext_data *data; 7498 int retval; 7499 int alloc_len, total_len; 7500 7501 CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n")); 7502 7503 cdb = (struct scsi_report_supported_tmf *)ctsio->cdb; 7504 7505 retval = CTL_RETVAL_COMPLETE; 7506 7507 if (cdb->options & RST_REPD) 7508 total_len = sizeof(struct scsi_report_supported_tmf_ext_data); 7509 else 7510 total_len = sizeof(struct scsi_report_supported_tmf_data); 7511 alloc_len = scsi_4btoul(cdb->length); 7512 7513 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7514 7515 ctsio->kern_sg_entries = 0; 7516 7517 if (total_len < alloc_len) { 7518 ctsio->residual = alloc_len - total_len; 7519 ctsio->kern_data_len = total_len; 7520 ctsio->kern_total_len = total_len; 7521 } else { 7522 ctsio->residual = 0; 7523 ctsio->kern_data_len = alloc_len; 7524 ctsio->kern_total_len = alloc_len; 7525 } 7526 ctsio->kern_data_resid = 0; 7527 ctsio->kern_rel_offset = 0; 7528 7529 data = (struct scsi_report_supported_tmf_ext_data *)ctsio->kern_data_ptr; 7530 data->byte1 |= RST_ATS | RST_ATSS | RST_CTSS | RST_LURS | RST_QTS | 7531 RST_TRS; 7532 data->byte2 |= RST_QAES | RST_QTSS | RST_ITNRS; 7533 data->length = total_len - 4; 7534 7535 ctl_set_success(ctsio); 7536 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7537 ctsio->be_move_done = ctl_config_move_done; 7538 ctl_datamove((union ctl_io *)ctsio); 7539 return (retval); 7540 } 7541 7542 int 7543 ctl_report_timestamp(struct ctl_scsiio *ctsio) 7544 { 7545 struct scsi_report_timestamp *cdb; 7546 struct scsi_report_timestamp_data *data; 7547 struct timeval tv; 7548 int64_t timestamp; 7549 int retval; 7550 int alloc_len, total_len; 7551 7552 CTL_DEBUG_PRINT(("ctl_report_timestamp\n")); 7553 7554 cdb = (struct scsi_report_timestamp *)ctsio->cdb; 7555 7556 retval = CTL_RETVAL_COMPLETE; 7557 7558 total_len = sizeof(struct scsi_report_timestamp_data); 7559 alloc_len = scsi_4btoul(cdb->length); 7560 7561 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7562 7563 ctsio->kern_sg_entries = 0; 7564 7565 if (total_len < alloc_len) { 7566 ctsio->residual = alloc_len - total_len; 7567 ctsio->kern_data_len = total_len; 7568 ctsio->kern_total_len = total_len; 7569 } else { 7570 ctsio->residual = 0; 7571 ctsio->kern_data_len = alloc_len; 7572 ctsio->kern_total_len = alloc_len; 7573 } 7574 ctsio->kern_data_resid = 0; 7575 ctsio->kern_rel_offset = 0; 7576 7577 data = (struct scsi_report_timestamp_data *)ctsio->kern_data_ptr; 7578 scsi_ulto2b(sizeof(*data) - 2, data->length); 7579 data->origin = RTS_ORIG_OUTSIDE; 7580 getmicrotime(&tv); 7581 timestamp = (int64_t)tv.tv_sec * 1000 + tv.tv_usec / 1000; 7582 scsi_ulto4b(timestamp >> 16, data->timestamp); 7583 scsi_ulto2b(timestamp & 0xffff, &data->timestamp[4]); 7584 7585 ctl_set_success(ctsio); 7586 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7587 ctsio->be_move_done = ctl_config_move_done; 7588 ctl_datamove((union ctl_io *)ctsio); 7589 return (retval); 7590 } 7591 7592 int 7593 ctl_persistent_reserve_in(struct ctl_scsiio *ctsio) 7594 { 7595 struct scsi_per_res_in *cdb; 7596 int alloc_len, total_len = 0; 7597 /* struct scsi_per_res_in_rsrv in_data; */ 7598 struct ctl_lun *lun; 7599 struct ctl_softc *softc; 7600 uint64_t key; 7601 7602 CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n")); 7603 7604 cdb = (struct scsi_per_res_in *)ctsio->cdb; 7605 7606 alloc_len = scsi_2btoul(cdb->length); 7607 7608 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7609 softc = lun->ctl_softc; 7610 7611 retry: 7612 mtx_lock(&lun->lun_lock); 7613 switch (cdb->action) { 7614 case SPRI_RK: /* read keys */ 7615 total_len = sizeof(struct scsi_per_res_in_keys) + 7616 lun->pr_key_count * 7617 sizeof(struct scsi_per_res_key); 7618 break; 7619 case SPRI_RR: /* read reservation */ 7620 if (lun->flags & CTL_LUN_PR_RESERVED) 7621 total_len = sizeof(struct scsi_per_res_in_rsrv); 7622 else 7623 total_len = sizeof(struct scsi_per_res_in_header); 7624 break; 7625 case SPRI_RC: /* report capabilities */ 7626 total_len = sizeof(struct scsi_per_res_cap); 7627 break; 7628 case SPRI_RS: /* read full status */ 7629 total_len = sizeof(struct scsi_per_res_in_header) + 7630 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7631 lun->pr_key_count; 7632 break; 7633 default: 7634 panic("%s: Invalid PR type %#x", __func__, cdb->action); 7635 } 7636 mtx_unlock(&lun->lun_lock); 7637 7638 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7639 7640 if (total_len < alloc_len) { 7641 ctsio->residual = alloc_len - total_len; 7642 ctsio->kern_data_len = total_len; 7643 ctsio->kern_total_len = total_len; 7644 } else { 7645 ctsio->residual = 0; 7646 ctsio->kern_data_len = alloc_len; 7647 ctsio->kern_total_len = alloc_len; 7648 } 7649 7650 ctsio->kern_data_resid = 0; 7651 ctsio->kern_rel_offset = 0; 7652 ctsio->kern_sg_entries = 0; 7653 7654 mtx_lock(&lun->lun_lock); 7655 switch (cdb->action) { 7656 case SPRI_RK: { // read keys 7657 struct scsi_per_res_in_keys *res_keys; 7658 int i, key_count; 7659 7660 res_keys = (struct scsi_per_res_in_keys*)ctsio->kern_data_ptr; 7661 7662 /* 7663 * We had to drop the lock to allocate our buffer, which 7664 * leaves time for someone to come in with another 7665 * persistent reservation. (That is unlikely, though, 7666 * since this should be the only persistent reservation 7667 * command active right now.) 7668 */ 7669 if (total_len != (sizeof(struct scsi_per_res_in_keys) + 7670 (lun->pr_key_count * 7671 sizeof(struct scsi_per_res_key)))){ 7672 mtx_unlock(&lun->lun_lock); 7673 free(ctsio->kern_data_ptr, M_CTL); 7674 printf("%s: reservation length changed, retrying\n", 7675 __func__); 7676 goto retry; 7677 } 7678 7679 scsi_ulto4b(lun->pr_generation, res_keys->header.generation); 7680 7681 scsi_ulto4b(sizeof(struct scsi_per_res_key) * 7682 lun->pr_key_count, res_keys->header.length); 7683 7684 for (i = 0, key_count = 0; i < CTL_MAX_INITIATORS; i++) { 7685 if ((key = ctl_get_prkey(lun, i)) == 0) 7686 continue; 7687 7688 /* 7689 * We used lun->pr_key_count to calculate the 7690 * size to allocate. If it turns out the number of 7691 * initiators with the registered flag set is 7692 * larger than that (i.e. they haven't been kept in 7693 * sync), we've got a problem. 7694 */ 7695 if (key_count >= lun->pr_key_count) { 7696 key_count++; 7697 continue; 7698 } 7699 scsi_u64to8b(key, res_keys->keys[key_count].key); 7700 key_count++; 7701 } 7702 break; 7703 } 7704 case SPRI_RR: { // read reservation 7705 struct scsi_per_res_in_rsrv *res; 7706 int tmp_len, header_only; 7707 7708 res = (struct scsi_per_res_in_rsrv *)ctsio->kern_data_ptr; 7709 7710 scsi_ulto4b(lun->pr_generation, res->header.generation); 7711 7712 if (lun->flags & CTL_LUN_PR_RESERVED) 7713 { 7714 tmp_len = sizeof(struct scsi_per_res_in_rsrv); 7715 scsi_ulto4b(sizeof(struct scsi_per_res_in_rsrv_data), 7716 res->header.length); 7717 header_only = 0; 7718 } else { 7719 tmp_len = sizeof(struct scsi_per_res_in_header); 7720 scsi_ulto4b(0, res->header.length); 7721 header_only = 1; 7722 } 7723 7724 /* 7725 * We had to drop the lock to allocate our buffer, which 7726 * leaves time for someone to come in with another 7727 * persistent reservation. (That is unlikely, though, 7728 * since this should be the only persistent reservation 7729 * command active right now.) 7730 */ 7731 if (tmp_len != total_len) { 7732 mtx_unlock(&lun->lun_lock); 7733 free(ctsio->kern_data_ptr, M_CTL); 7734 printf("%s: reservation status changed, retrying\n", 7735 __func__); 7736 goto retry; 7737 } 7738 7739 /* 7740 * No reservation held, so we're done. 7741 */ 7742 if (header_only != 0) 7743 break; 7744 7745 /* 7746 * If the registration is an All Registrants type, the key 7747 * is 0, since it doesn't really matter. 7748 */ 7749 if (lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 7750 scsi_u64to8b(ctl_get_prkey(lun, lun->pr_res_idx), 7751 res->data.reservation); 7752 } 7753 res->data.scopetype = lun->pr_res_type; 7754 break; 7755 } 7756 case SPRI_RC: //report capabilities 7757 { 7758 struct scsi_per_res_cap *res_cap; 7759 uint16_t type_mask; 7760 7761 res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr; 7762 scsi_ulto2b(sizeof(*res_cap), res_cap->length); 7763 res_cap->flags1 = SPRI_CRH; 7764 res_cap->flags2 = SPRI_TMV | SPRI_ALLOW_5; 7765 type_mask = SPRI_TM_WR_EX_AR | 7766 SPRI_TM_EX_AC_RO | 7767 SPRI_TM_WR_EX_RO | 7768 SPRI_TM_EX_AC | 7769 SPRI_TM_WR_EX | 7770 SPRI_TM_EX_AC_AR; 7771 scsi_ulto2b(type_mask, res_cap->type_mask); 7772 break; 7773 } 7774 case SPRI_RS: { // read full status 7775 struct scsi_per_res_in_full *res_status; 7776 struct scsi_per_res_in_full_desc *res_desc; 7777 struct ctl_port *port; 7778 int i, len; 7779 7780 res_status = (struct scsi_per_res_in_full*)ctsio->kern_data_ptr; 7781 7782 /* 7783 * We had to drop the lock to allocate our buffer, which 7784 * leaves time for someone to come in with another 7785 * persistent reservation. (That is unlikely, though, 7786 * since this should be the only persistent reservation 7787 * command active right now.) 7788 */ 7789 if (total_len < (sizeof(struct scsi_per_res_in_header) + 7790 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7791 lun->pr_key_count)){ 7792 mtx_unlock(&lun->lun_lock); 7793 free(ctsio->kern_data_ptr, M_CTL); 7794 printf("%s: reservation length changed, retrying\n", 7795 __func__); 7796 goto retry; 7797 } 7798 7799 scsi_ulto4b(lun->pr_generation, res_status->header.generation); 7800 7801 res_desc = &res_status->desc[0]; 7802 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 7803 if ((key = ctl_get_prkey(lun, i)) == 0) 7804 continue; 7805 7806 scsi_u64to8b(key, res_desc->res_key.key); 7807 if ((lun->flags & CTL_LUN_PR_RESERVED) && 7808 (lun->pr_res_idx == i || 7809 lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS)) { 7810 res_desc->flags = SPRI_FULL_R_HOLDER; 7811 res_desc->scopetype = lun->pr_res_type; 7812 } 7813 scsi_ulto2b(i / CTL_MAX_INIT_PER_PORT, 7814 res_desc->rel_trgt_port_id); 7815 len = 0; 7816 port = softc->ctl_ports[i / CTL_MAX_INIT_PER_PORT]; 7817 if (port != NULL) 7818 len = ctl_create_iid(port, 7819 i % CTL_MAX_INIT_PER_PORT, 7820 res_desc->transport_id); 7821 scsi_ulto4b(len, res_desc->additional_length); 7822 res_desc = (struct scsi_per_res_in_full_desc *) 7823 &res_desc->transport_id[len]; 7824 } 7825 scsi_ulto4b((uint8_t *)res_desc - (uint8_t *)&res_status->desc[0], 7826 res_status->header.length); 7827 break; 7828 } 7829 default: 7830 panic("%s: Invalid PR type %#x", __func__, cdb->action); 7831 } 7832 mtx_unlock(&lun->lun_lock); 7833 7834 ctl_set_success(ctsio); 7835 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7836 ctsio->be_move_done = ctl_config_move_done; 7837 ctl_datamove((union ctl_io *)ctsio); 7838 return (CTL_RETVAL_COMPLETE); 7839 } 7840 7841 /* 7842 * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if 7843 * it should return. 7844 */ 7845 static int 7846 ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key, 7847 uint64_t sa_res_key, uint8_t type, uint32_t residx, 7848 struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb, 7849 struct scsi_per_res_out_parms* param) 7850 { 7851 union ctl_ha_msg persis_io; 7852 int i; 7853 7854 mtx_lock(&lun->lun_lock); 7855 if (sa_res_key == 0) { 7856 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 7857 /* validate scope and type */ 7858 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7859 SPR_LU_SCOPE) { 7860 mtx_unlock(&lun->lun_lock); 7861 ctl_set_invalid_field(/*ctsio*/ ctsio, 7862 /*sks_valid*/ 1, 7863 /*command*/ 1, 7864 /*field*/ 2, 7865 /*bit_valid*/ 1, 7866 /*bit*/ 4); 7867 ctl_done((union ctl_io *)ctsio); 7868 return (1); 7869 } 7870 7871 if (type>8 || type==2 || type==4 || type==0) { 7872 mtx_unlock(&lun->lun_lock); 7873 ctl_set_invalid_field(/*ctsio*/ ctsio, 7874 /*sks_valid*/ 1, 7875 /*command*/ 1, 7876 /*field*/ 2, 7877 /*bit_valid*/ 1, 7878 /*bit*/ 0); 7879 ctl_done((union ctl_io *)ctsio); 7880 return (1); 7881 } 7882 7883 /* 7884 * Unregister everybody else and build UA for 7885 * them 7886 */ 7887 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 7888 if (i == residx || ctl_get_prkey(lun, i) == 0) 7889 continue; 7890 7891 ctl_clr_prkey(lun, i); 7892 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7893 } 7894 lun->pr_key_count = 1; 7895 lun->pr_res_type = type; 7896 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 7897 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 7898 lun->pr_res_idx = residx; 7899 lun->pr_generation++; 7900 mtx_unlock(&lun->lun_lock); 7901 7902 /* send msg to other side */ 7903 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7904 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7905 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7906 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7907 persis_io.pr.pr_info.res_type = type; 7908 memcpy(persis_io.pr.pr_info.sa_res_key, 7909 param->serv_act_res_key, 7910 sizeof(param->serv_act_res_key)); 7911 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 7912 sizeof(persis_io.pr), M_WAITOK); 7913 } else { 7914 /* not all registrants */ 7915 mtx_unlock(&lun->lun_lock); 7916 free(ctsio->kern_data_ptr, M_CTL); 7917 ctl_set_invalid_field(ctsio, 7918 /*sks_valid*/ 1, 7919 /*command*/ 0, 7920 /*field*/ 8, 7921 /*bit_valid*/ 0, 7922 /*bit*/ 0); 7923 ctl_done((union ctl_io *)ctsio); 7924 return (1); 7925 } 7926 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 7927 || !(lun->flags & CTL_LUN_PR_RESERVED)) { 7928 int found = 0; 7929 7930 if (res_key == sa_res_key) { 7931 /* special case */ 7932 /* 7933 * The spec implies this is not good but doesn't 7934 * say what to do. There are two choices either 7935 * generate a res conflict or check condition 7936 * with illegal field in parameter data. Since 7937 * that is what is done when the sa_res_key is 7938 * zero I'll take that approach since this has 7939 * to do with the sa_res_key. 7940 */ 7941 mtx_unlock(&lun->lun_lock); 7942 free(ctsio->kern_data_ptr, M_CTL); 7943 ctl_set_invalid_field(ctsio, 7944 /*sks_valid*/ 1, 7945 /*command*/ 0, 7946 /*field*/ 8, 7947 /*bit_valid*/ 0, 7948 /*bit*/ 0); 7949 ctl_done((union ctl_io *)ctsio); 7950 return (1); 7951 } 7952 7953 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 7954 if (ctl_get_prkey(lun, i) != sa_res_key) 7955 continue; 7956 7957 found = 1; 7958 ctl_clr_prkey(lun, i); 7959 lun->pr_key_count--; 7960 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7961 } 7962 if (!found) { 7963 mtx_unlock(&lun->lun_lock); 7964 free(ctsio->kern_data_ptr, M_CTL); 7965 ctl_set_reservation_conflict(ctsio); 7966 ctl_done((union ctl_io *)ctsio); 7967 return (CTL_RETVAL_COMPLETE); 7968 } 7969 lun->pr_generation++; 7970 mtx_unlock(&lun->lun_lock); 7971 7972 /* send msg to other side */ 7973 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7974 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7975 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7976 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7977 persis_io.pr.pr_info.res_type = type; 7978 memcpy(persis_io.pr.pr_info.sa_res_key, 7979 param->serv_act_res_key, 7980 sizeof(param->serv_act_res_key)); 7981 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 7982 sizeof(persis_io.pr), M_WAITOK); 7983 } else { 7984 /* Reserved but not all registrants */ 7985 /* sa_res_key is res holder */ 7986 if (sa_res_key == ctl_get_prkey(lun, lun->pr_res_idx)) { 7987 /* validate scope and type */ 7988 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7989 SPR_LU_SCOPE) { 7990 mtx_unlock(&lun->lun_lock); 7991 ctl_set_invalid_field(/*ctsio*/ ctsio, 7992 /*sks_valid*/ 1, 7993 /*command*/ 1, 7994 /*field*/ 2, 7995 /*bit_valid*/ 1, 7996 /*bit*/ 4); 7997 ctl_done((union ctl_io *)ctsio); 7998 return (1); 7999 } 8000 8001 if (type>8 || type==2 || type==4 || type==0) { 8002 mtx_unlock(&lun->lun_lock); 8003 ctl_set_invalid_field(/*ctsio*/ ctsio, 8004 /*sks_valid*/ 1, 8005 /*command*/ 1, 8006 /*field*/ 2, 8007 /*bit_valid*/ 1, 8008 /*bit*/ 0); 8009 ctl_done((union ctl_io *)ctsio); 8010 return (1); 8011 } 8012 8013 /* 8014 * Do the following: 8015 * if sa_res_key != res_key remove all 8016 * registrants w/sa_res_key and generate UA 8017 * for these registrants(Registrations 8018 * Preempted) if it wasn't an exclusive 8019 * reservation generate UA(Reservations 8020 * Preempted) for all other registered nexuses 8021 * if the type has changed. Establish the new 8022 * reservation and holder. If res_key and 8023 * sa_res_key are the same do the above 8024 * except don't unregister the res holder. 8025 */ 8026 8027 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 8028 if (i == residx || ctl_get_prkey(lun, i) == 0) 8029 continue; 8030 8031 if (sa_res_key == ctl_get_prkey(lun, i)) { 8032 ctl_clr_prkey(lun, i); 8033 lun->pr_key_count--; 8034 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8035 } else if (type != lun->pr_res_type && 8036 (lun->pr_res_type == SPR_TYPE_WR_EX_RO || 8037 lun->pr_res_type == SPR_TYPE_EX_AC_RO)) { 8038 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8039 } 8040 } 8041 lun->pr_res_type = type; 8042 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 8043 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 8044 lun->pr_res_idx = residx; 8045 else 8046 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8047 lun->pr_generation++; 8048 mtx_unlock(&lun->lun_lock); 8049 8050 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8051 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8052 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8053 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8054 persis_io.pr.pr_info.res_type = type; 8055 memcpy(persis_io.pr.pr_info.sa_res_key, 8056 param->serv_act_res_key, 8057 sizeof(param->serv_act_res_key)); 8058 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8059 sizeof(persis_io.pr), M_WAITOK); 8060 } else { 8061 /* 8062 * sa_res_key is not the res holder just 8063 * remove registrants 8064 */ 8065 int found=0; 8066 8067 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8068 if (sa_res_key != ctl_get_prkey(lun, i)) 8069 continue; 8070 8071 found = 1; 8072 ctl_clr_prkey(lun, i); 8073 lun->pr_key_count--; 8074 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8075 } 8076 8077 if (!found) { 8078 mtx_unlock(&lun->lun_lock); 8079 free(ctsio->kern_data_ptr, M_CTL); 8080 ctl_set_reservation_conflict(ctsio); 8081 ctl_done((union ctl_io *)ctsio); 8082 return (1); 8083 } 8084 lun->pr_generation++; 8085 mtx_unlock(&lun->lun_lock); 8086 8087 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8088 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8089 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8090 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8091 persis_io.pr.pr_info.res_type = type; 8092 memcpy(persis_io.pr.pr_info.sa_res_key, 8093 param->serv_act_res_key, 8094 sizeof(param->serv_act_res_key)); 8095 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8096 sizeof(persis_io.pr), M_WAITOK); 8097 } 8098 } 8099 return (0); 8100 } 8101 8102 static void 8103 ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg) 8104 { 8105 uint64_t sa_res_key; 8106 int i; 8107 8108 sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key); 8109 8110 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 8111 || lun->pr_res_idx == CTL_PR_NO_RESERVATION 8112 || sa_res_key != ctl_get_prkey(lun, lun->pr_res_idx)) { 8113 if (sa_res_key == 0) { 8114 /* 8115 * Unregister everybody else and build UA for 8116 * them 8117 */ 8118 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 8119 if (i == msg->pr.pr_info.residx || 8120 ctl_get_prkey(lun, i) == 0) 8121 continue; 8122 8123 ctl_clr_prkey(lun, i); 8124 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8125 } 8126 8127 lun->pr_key_count = 1; 8128 lun->pr_res_type = msg->pr.pr_info.res_type; 8129 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 8130 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 8131 lun->pr_res_idx = msg->pr.pr_info.residx; 8132 } else { 8133 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8134 if (sa_res_key == ctl_get_prkey(lun, i)) 8135 continue; 8136 8137 ctl_clr_prkey(lun, i); 8138 lun->pr_key_count--; 8139 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8140 } 8141 } 8142 } else { 8143 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8144 if (i == msg->pr.pr_info.residx || 8145 ctl_get_prkey(lun, i) == 0) 8146 continue; 8147 8148 if (sa_res_key == ctl_get_prkey(lun, i)) { 8149 ctl_clr_prkey(lun, i); 8150 lun->pr_key_count--; 8151 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8152 } else if (msg->pr.pr_info.res_type != lun->pr_res_type 8153 && (lun->pr_res_type == SPR_TYPE_WR_EX_RO || 8154 lun->pr_res_type == SPR_TYPE_EX_AC_RO)) { 8155 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8156 } 8157 } 8158 lun->pr_res_type = msg->pr.pr_info.res_type; 8159 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 8160 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 8161 lun->pr_res_idx = msg->pr.pr_info.residx; 8162 else 8163 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8164 } 8165 lun->pr_generation++; 8166 8167 } 8168 8169 8170 int 8171 ctl_persistent_reserve_out(struct ctl_scsiio *ctsio) 8172 { 8173 int retval; 8174 u_int32_t param_len; 8175 struct scsi_per_res_out *cdb; 8176 struct ctl_lun *lun; 8177 struct scsi_per_res_out_parms* param; 8178 struct ctl_softc *softc; 8179 uint32_t residx; 8180 uint64_t res_key, sa_res_key, key; 8181 uint8_t type; 8182 union ctl_ha_msg persis_io; 8183 int i; 8184 8185 CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n")); 8186 8187 retval = CTL_RETVAL_COMPLETE; 8188 8189 cdb = (struct scsi_per_res_out *)ctsio->cdb; 8190 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8191 softc = lun->ctl_softc; 8192 8193 /* 8194 * We only support whole-LUN scope. The scope & type are ignored for 8195 * register, register and ignore existing key and clear. 8196 * We sometimes ignore scope and type on preempts too!! 8197 * Verify reservation type here as well. 8198 */ 8199 type = cdb->scope_type & SPR_TYPE_MASK; 8200 if ((cdb->action == SPRO_RESERVE) 8201 || (cdb->action == SPRO_RELEASE)) { 8202 if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { 8203 ctl_set_invalid_field(/*ctsio*/ ctsio, 8204 /*sks_valid*/ 1, 8205 /*command*/ 1, 8206 /*field*/ 2, 8207 /*bit_valid*/ 1, 8208 /*bit*/ 4); 8209 ctl_done((union ctl_io *)ctsio); 8210 return (CTL_RETVAL_COMPLETE); 8211 } 8212 8213 if (type>8 || type==2 || type==4 || type==0) { 8214 ctl_set_invalid_field(/*ctsio*/ ctsio, 8215 /*sks_valid*/ 1, 8216 /*command*/ 1, 8217 /*field*/ 2, 8218 /*bit_valid*/ 1, 8219 /*bit*/ 0); 8220 ctl_done((union ctl_io *)ctsio); 8221 return (CTL_RETVAL_COMPLETE); 8222 } 8223 } 8224 8225 param_len = scsi_4btoul(cdb->length); 8226 8227 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 8228 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 8229 ctsio->kern_data_len = param_len; 8230 ctsio->kern_total_len = param_len; 8231 ctsio->kern_data_resid = 0; 8232 ctsio->kern_rel_offset = 0; 8233 ctsio->kern_sg_entries = 0; 8234 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 8235 ctsio->be_move_done = ctl_config_move_done; 8236 ctl_datamove((union ctl_io *)ctsio); 8237 8238 return (CTL_RETVAL_COMPLETE); 8239 } 8240 8241 param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr; 8242 8243 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 8244 res_key = scsi_8btou64(param->res_key.key); 8245 sa_res_key = scsi_8btou64(param->serv_act_res_key); 8246 8247 /* 8248 * Validate the reservation key here except for SPRO_REG_IGNO 8249 * This must be done for all other service actions 8250 */ 8251 if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) { 8252 mtx_lock(&lun->lun_lock); 8253 if ((key = ctl_get_prkey(lun, residx)) != 0) { 8254 if (res_key != key) { 8255 /* 8256 * The current key passed in doesn't match 8257 * the one the initiator previously 8258 * registered. 8259 */ 8260 mtx_unlock(&lun->lun_lock); 8261 free(ctsio->kern_data_ptr, M_CTL); 8262 ctl_set_reservation_conflict(ctsio); 8263 ctl_done((union ctl_io *)ctsio); 8264 return (CTL_RETVAL_COMPLETE); 8265 } 8266 } else if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REGISTER) { 8267 /* 8268 * We are not registered 8269 */ 8270 mtx_unlock(&lun->lun_lock); 8271 free(ctsio->kern_data_ptr, M_CTL); 8272 ctl_set_reservation_conflict(ctsio); 8273 ctl_done((union ctl_io *)ctsio); 8274 return (CTL_RETVAL_COMPLETE); 8275 } else if (res_key != 0) { 8276 /* 8277 * We are not registered and trying to register but 8278 * the register key isn't zero. 8279 */ 8280 mtx_unlock(&lun->lun_lock); 8281 free(ctsio->kern_data_ptr, M_CTL); 8282 ctl_set_reservation_conflict(ctsio); 8283 ctl_done((union ctl_io *)ctsio); 8284 return (CTL_RETVAL_COMPLETE); 8285 } 8286 mtx_unlock(&lun->lun_lock); 8287 } 8288 8289 switch (cdb->action & SPRO_ACTION_MASK) { 8290 case SPRO_REGISTER: 8291 case SPRO_REG_IGNO: { 8292 8293 #if 0 8294 printf("Registration received\n"); 8295 #endif 8296 8297 /* 8298 * We don't support any of these options, as we report in 8299 * the read capabilities request (see 8300 * ctl_persistent_reserve_in(), above). 8301 */ 8302 if ((param->flags & SPR_SPEC_I_PT) 8303 || (param->flags & SPR_ALL_TG_PT) 8304 || (param->flags & SPR_APTPL)) { 8305 int bit_ptr; 8306 8307 if (param->flags & SPR_APTPL) 8308 bit_ptr = 0; 8309 else if (param->flags & SPR_ALL_TG_PT) 8310 bit_ptr = 2; 8311 else /* SPR_SPEC_I_PT */ 8312 bit_ptr = 3; 8313 8314 free(ctsio->kern_data_ptr, M_CTL); 8315 ctl_set_invalid_field(ctsio, 8316 /*sks_valid*/ 1, 8317 /*command*/ 0, 8318 /*field*/ 20, 8319 /*bit_valid*/ 1, 8320 /*bit*/ bit_ptr); 8321 ctl_done((union ctl_io *)ctsio); 8322 return (CTL_RETVAL_COMPLETE); 8323 } 8324 8325 mtx_lock(&lun->lun_lock); 8326 8327 /* 8328 * The initiator wants to clear the 8329 * key/unregister. 8330 */ 8331 if (sa_res_key == 0) { 8332 if ((res_key == 0 8333 && (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER) 8334 || ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO 8335 && ctl_get_prkey(lun, residx) == 0)) { 8336 mtx_unlock(&lun->lun_lock); 8337 goto done; 8338 } 8339 8340 ctl_clr_prkey(lun, residx); 8341 lun->pr_key_count--; 8342 8343 if (residx == lun->pr_res_idx) { 8344 lun->flags &= ~CTL_LUN_PR_RESERVED; 8345 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8346 8347 if ((lun->pr_res_type == SPR_TYPE_WR_EX_RO || 8348 lun->pr_res_type == SPR_TYPE_EX_AC_RO) && 8349 lun->pr_key_count) { 8350 /* 8351 * If the reservation is a registrants 8352 * only type we need to generate a UA 8353 * for other registered inits. The 8354 * sense code should be RESERVATIONS 8355 * RELEASED 8356 */ 8357 8358 for (i = softc->init_min; i < softc->init_max; i++){ 8359 if (ctl_get_prkey(lun, i) == 0) 8360 continue; 8361 ctl_est_ua(lun, i, 8362 CTL_UA_RES_RELEASE); 8363 } 8364 } 8365 lun->pr_res_type = 0; 8366 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8367 if (lun->pr_key_count==0) { 8368 lun->flags &= ~CTL_LUN_PR_RESERVED; 8369 lun->pr_res_type = 0; 8370 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8371 } 8372 } 8373 lun->pr_generation++; 8374 mtx_unlock(&lun->lun_lock); 8375 8376 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8377 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8378 persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY; 8379 persis_io.pr.pr_info.residx = residx; 8380 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8381 sizeof(persis_io.pr), M_WAITOK); 8382 } else /* sa_res_key != 0 */ { 8383 8384 /* 8385 * If we aren't registered currently then increment 8386 * the key count and set the registered flag. 8387 */ 8388 ctl_alloc_prkey(lun, residx); 8389 if (ctl_get_prkey(lun, residx) == 0) 8390 lun->pr_key_count++; 8391 ctl_set_prkey(lun, residx, sa_res_key); 8392 lun->pr_generation++; 8393 mtx_unlock(&lun->lun_lock); 8394 8395 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8396 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8397 persis_io.pr.pr_info.action = CTL_PR_REG_KEY; 8398 persis_io.pr.pr_info.residx = residx; 8399 memcpy(persis_io.pr.pr_info.sa_res_key, 8400 param->serv_act_res_key, 8401 sizeof(param->serv_act_res_key)); 8402 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8403 sizeof(persis_io.pr), M_WAITOK); 8404 } 8405 8406 break; 8407 } 8408 case SPRO_RESERVE: 8409 #if 0 8410 printf("Reserve executed type %d\n", type); 8411 #endif 8412 mtx_lock(&lun->lun_lock); 8413 if (lun->flags & CTL_LUN_PR_RESERVED) { 8414 /* 8415 * if this isn't the reservation holder and it's 8416 * not a "all registrants" type or if the type is 8417 * different then we have a conflict 8418 */ 8419 if ((lun->pr_res_idx != residx 8420 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) 8421 || lun->pr_res_type != type) { 8422 mtx_unlock(&lun->lun_lock); 8423 free(ctsio->kern_data_ptr, M_CTL); 8424 ctl_set_reservation_conflict(ctsio); 8425 ctl_done((union ctl_io *)ctsio); 8426 return (CTL_RETVAL_COMPLETE); 8427 } 8428 mtx_unlock(&lun->lun_lock); 8429 } else /* create a reservation */ { 8430 /* 8431 * If it's not an "all registrants" type record 8432 * reservation holder 8433 */ 8434 if (type != SPR_TYPE_WR_EX_AR 8435 && type != SPR_TYPE_EX_AC_AR) 8436 lun->pr_res_idx = residx; /* Res holder */ 8437 else 8438 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8439 8440 lun->flags |= CTL_LUN_PR_RESERVED; 8441 lun->pr_res_type = type; 8442 8443 mtx_unlock(&lun->lun_lock); 8444 8445 /* send msg to other side */ 8446 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8447 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8448 persis_io.pr.pr_info.action = CTL_PR_RESERVE; 8449 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8450 persis_io.pr.pr_info.res_type = type; 8451 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8452 sizeof(persis_io.pr), M_WAITOK); 8453 } 8454 break; 8455 8456 case SPRO_RELEASE: 8457 mtx_lock(&lun->lun_lock); 8458 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) { 8459 /* No reservation exists return good status */ 8460 mtx_unlock(&lun->lun_lock); 8461 goto done; 8462 } 8463 /* 8464 * Is this nexus a reservation holder? 8465 */ 8466 if (lun->pr_res_idx != residx 8467 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 8468 /* 8469 * not a res holder return good status but 8470 * do nothing 8471 */ 8472 mtx_unlock(&lun->lun_lock); 8473 goto done; 8474 } 8475 8476 if (lun->pr_res_type != type) { 8477 mtx_unlock(&lun->lun_lock); 8478 free(ctsio->kern_data_ptr, M_CTL); 8479 ctl_set_illegal_pr_release(ctsio); 8480 ctl_done((union ctl_io *)ctsio); 8481 return (CTL_RETVAL_COMPLETE); 8482 } 8483 8484 /* okay to release */ 8485 lun->flags &= ~CTL_LUN_PR_RESERVED; 8486 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8487 lun->pr_res_type = 0; 8488 8489 /* 8490 * If this isn't an exclusive access reservation and NUAR 8491 * is not set, generate UA for all other registrants. 8492 */ 8493 if (type != SPR_TYPE_EX_AC && type != SPR_TYPE_WR_EX && 8494 (lun->MODE_CTRL.queue_flags & SCP_NUAR) == 0) { 8495 for (i = softc->init_min; i < softc->init_max; i++) { 8496 if (i == residx || ctl_get_prkey(lun, i) == 0) 8497 continue; 8498 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8499 } 8500 } 8501 mtx_unlock(&lun->lun_lock); 8502 8503 /* Send msg to other side */ 8504 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8505 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8506 persis_io.pr.pr_info.action = CTL_PR_RELEASE; 8507 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8508 sizeof(persis_io.pr), M_WAITOK); 8509 break; 8510 8511 case SPRO_CLEAR: 8512 /* send msg to other side */ 8513 8514 mtx_lock(&lun->lun_lock); 8515 lun->flags &= ~CTL_LUN_PR_RESERVED; 8516 lun->pr_res_type = 0; 8517 lun->pr_key_count = 0; 8518 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8519 8520 ctl_clr_prkey(lun, residx); 8521 for (i = 0; i < CTL_MAX_INITIATORS; i++) 8522 if (ctl_get_prkey(lun, i) != 0) { 8523 ctl_clr_prkey(lun, i); 8524 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8525 } 8526 lun->pr_generation++; 8527 mtx_unlock(&lun->lun_lock); 8528 8529 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8530 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8531 persis_io.pr.pr_info.action = CTL_PR_CLEAR; 8532 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8533 sizeof(persis_io.pr), M_WAITOK); 8534 break; 8535 8536 case SPRO_PREEMPT: 8537 case SPRO_PRE_ABO: { 8538 int nretval; 8539 8540 nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type, 8541 residx, ctsio, cdb, param); 8542 if (nretval != 0) 8543 return (CTL_RETVAL_COMPLETE); 8544 break; 8545 } 8546 default: 8547 panic("%s: Invalid PR type %#x", __func__, cdb->action); 8548 } 8549 8550 done: 8551 free(ctsio->kern_data_ptr, M_CTL); 8552 ctl_set_success(ctsio); 8553 ctl_done((union ctl_io *)ctsio); 8554 8555 return (retval); 8556 } 8557 8558 /* 8559 * This routine is for handling a message from the other SC pertaining to 8560 * persistent reserve out. All the error checking will have been done 8561 * so only perorming the action need be done here to keep the two 8562 * in sync. 8563 */ 8564 static void 8565 ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg) 8566 { 8567 struct ctl_softc *softc = control_softc; 8568 struct ctl_lun *lun; 8569 int i; 8570 uint32_t residx, targ_lun; 8571 8572 targ_lun = msg->hdr.nexus.targ_mapped_lun; 8573 mtx_lock(&softc->ctl_lock); 8574 if (targ_lun >= CTL_MAX_LUNS || 8575 (lun = softc->ctl_luns[targ_lun]) == NULL) { 8576 mtx_unlock(&softc->ctl_lock); 8577 return; 8578 } 8579 mtx_lock(&lun->lun_lock); 8580 mtx_unlock(&softc->ctl_lock); 8581 if (lun->flags & CTL_LUN_DISABLED) { 8582 mtx_unlock(&lun->lun_lock); 8583 return; 8584 } 8585 residx = ctl_get_initindex(&msg->hdr.nexus); 8586 switch(msg->pr.pr_info.action) { 8587 case CTL_PR_REG_KEY: 8588 ctl_alloc_prkey(lun, msg->pr.pr_info.residx); 8589 if (ctl_get_prkey(lun, msg->pr.pr_info.residx) == 0) 8590 lun->pr_key_count++; 8591 ctl_set_prkey(lun, msg->pr.pr_info.residx, 8592 scsi_8btou64(msg->pr.pr_info.sa_res_key)); 8593 lun->pr_generation++; 8594 break; 8595 8596 case CTL_PR_UNREG_KEY: 8597 ctl_clr_prkey(lun, msg->pr.pr_info.residx); 8598 lun->pr_key_count--; 8599 8600 /* XXX Need to see if the reservation has been released */ 8601 /* if so do we need to generate UA? */ 8602 if (msg->pr.pr_info.residx == lun->pr_res_idx) { 8603 lun->flags &= ~CTL_LUN_PR_RESERVED; 8604 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8605 8606 if ((lun->pr_res_type == SPR_TYPE_WR_EX_RO || 8607 lun->pr_res_type == SPR_TYPE_EX_AC_RO) && 8608 lun->pr_key_count) { 8609 /* 8610 * If the reservation is a registrants 8611 * only type we need to generate a UA 8612 * for other registered inits. The 8613 * sense code should be RESERVATIONS 8614 * RELEASED 8615 */ 8616 8617 for (i = softc->init_min; i < softc->init_max; i++) { 8618 if (ctl_get_prkey(lun, i) == 0) 8619 continue; 8620 8621 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8622 } 8623 } 8624 lun->pr_res_type = 0; 8625 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8626 if (lun->pr_key_count==0) { 8627 lun->flags &= ~CTL_LUN_PR_RESERVED; 8628 lun->pr_res_type = 0; 8629 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8630 } 8631 } 8632 lun->pr_generation++; 8633 break; 8634 8635 case CTL_PR_RESERVE: 8636 lun->flags |= CTL_LUN_PR_RESERVED; 8637 lun->pr_res_type = msg->pr.pr_info.res_type; 8638 lun->pr_res_idx = msg->pr.pr_info.residx; 8639 8640 break; 8641 8642 case CTL_PR_RELEASE: 8643 /* 8644 * If this isn't an exclusive access reservation and NUAR 8645 * is not set, generate UA for all other registrants. 8646 */ 8647 if (lun->pr_res_type != SPR_TYPE_EX_AC && 8648 lun->pr_res_type != SPR_TYPE_WR_EX && 8649 (lun->MODE_CTRL.queue_flags & SCP_NUAR) == 0) { 8650 for (i = softc->init_min; i < softc->init_max; i++) 8651 if (i == residx || ctl_get_prkey(lun, i) == 0) 8652 continue; 8653 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8654 } 8655 8656 lun->flags &= ~CTL_LUN_PR_RESERVED; 8657 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8658 lun->pr_res_type = 0; 8659 break; 8660 8661 case CTL_PR_PREEMPT: 8662 ctl_pro_preempt_other(lun, msg); 8663 break; 8664 case CTL_PR_CLEAR: 8665 lun->flags &= ~CTL_LUN_PR_RESERVED; 8666 lun->pr_res_type = 0; 8667 lun->pr_key_count = 0; 8668 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8669 8670 for (i=0; i < CTL_MAX_INITIATORS; i++) { 8671 if (ctl_get_prkey(lun, i) == 0) 8672 continue; 8673 ctl_clr_prkey(lun, i); 8674 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8675 } 8676 lun->pr_generation++; 8677 break; 8678 } 8679 8680 mtx_unlock(&lun->lun_lock); 8681 } 8682 8683 int 8684 ctl_read_write(struct ctl_scsiio *ctsio) 8685 { 8686 struct ctl_lun *lun; 8687 struct ctl_lba_len_flags *lbalen; 8688 uint64_t lba; 8689 uint32_t num_blocks; 8690 int flags, retval; 8691 int isread; 8692 8693 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8694 8695 CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0])); 8696 8697 flags = 0; 8698 isread = ctsio->cdb[0] == READ_6 || ctsio->cdb[0] == READ_10 8699 || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16; 8700 switch (ctsio->cdb[0]) { 8701 case READ_6: 8702 case WRITE_6: { 8703 struct scsi_rw_6 *cdb; 8704 8705 cdb = (struct scsi_rw_6 *)ctsio->cdb; 8706 8707 lba = scsi_3btoul(cdb->addr); 8708 /* only 5 bits are valid in the most significant address byte */ 8709 lba &= 0x1fffff; 8710 num_blocks = cdb->length; 8711 /* 8712 * This is correct according to SBC-2. 8713 */ 8714 if (num_blocks == 0) 8715 num_blocks = 256; 8716 break; 8717 } 8718 case READ_10: 8719 case WRITE_10: { 8720 struct scsi_rw_10 *cdb; 8721 8722 cdb = (struct scsi_rw_10 *)ctsio->cdb; 8723 if (cdb->byte2 & SRW10_FUA) 8724 flags |= CTL_LLF_FUA; 8725 if (cdb->byte2 & SRW10_DPO) 8726 flags |= CTL_LLF_DPO; 8727 lba = scsi_4btoul(cdb->addr); 8728 num_blocks = scsi_2btoul(cdb->length); 8729 break; 8730 } 8731 case WRITE_VERIFY_10: { 8732 struct scsi_write_verify_10 *cdb; 8733 8734 cdb = (struct scsi_write_verify_10 *)ctsio->cdb; 8735 flags |= CTL_LLF_FUA; 8736 if (cdb->byte2 & SWV_DPO) 8737 flags |= CTL_LLF_DPO; 8738 lba = scsi_4btoul(cdb->addr); 8739 num_blocks = scsi_2btoul(cdb->length); 8740 break; 8741 } 8742 case READ_12: 8743 case WRITE_12: { 8744 struct scsi_rw_12 *cdb; 8745 8746 cdb = (struct scsi_rw_12 *)ctsio->cdb; 8747 if (cdb->byte2 & SRW12_FUA) 8748 flags |= CTL_LLF_FUA; 8749 if (cdb->byte2 & SRW12_DPO) 8750 flags |= CTL_LLF_DPO; 8751 lba = scsi_4btoul(cdb->addr); 8752 num_blocks = scsi_4btoul(cdb->length); 8753 break; 8754 } 8755 case WRITE_VERIFY_12: { 8756 struct scsi_write_verify_12 *cdb; 8757 8758 cdb = (struct scsi_write_verify_12 *)ctsio->cdb; 8759 flags |= CTL_LLF_FUA; 8760 if (cdb->byte2 & SWV_DPO) 8761 flags |= CTL_LLF_DPO; 8762 lba = scsi_4btoul(cdb->addr); 8763 num_blocks = scsi_4btoul(cdb->length); 8764 break; 8765 } 8766 case READ_16: 8767 case WRITE_16: { 8768 struct scsi_rw_16 *cdb; 8769 8770 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8771 if (cdb->byte2 & SRW12_FUA) 8772 flags |= CTL_LLF_FUA; 8773 if (cdb->byte2 & SRW12_DPO) 8774 flags |= CTL_LLF_DPO; 8775 lba = scsi_8btou64(cdb->addr); 8776 num_blocks = scsi_4btoul(cdb->length); 8777 break; 8778 } 8779 case WRITE_ATOMIC_16: { 8780 struct scsi_write_atomic_16 *cdb; 8781 8782 if (lun->be_lun->atomicblock == 0) { 8783 ctl_set_invalid_opcode(ctsio); 8784 ctl_done((union ctl_io *)ctsio); 8785 return (CTL_RETVAL_COMPLETE); 8786 } 8787 8788 cdb = (struct scsi_write_atomic_16 *)ctsio->cdb; 8789 if (cdb->byte2 & SRW12_FUA) 8790 flags |= CTL_LLF_FUA; 8791 if (cdb->byte2 & SRW12_DPO) 8792 flags |= CTL_LLF_DPO; 8793 lba = scsi_8btou64(cdb->addr); 8794 num_blocks = scsi_2btoul(cdb->length); 8795 if (num_blocks > lun->be_lun->atomicblock) { 8796 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 8797 /*command*/ 1, /*field*/ 12, /*bit_valid*/ 0, 8798 /*bit*/ 0); 8799 ctl_done((union ctl_io *)ctsio); 8800 return (CTL_RETVAL_COMPLETE); 8801 } 8802 break; 8803 } 8804 case WRITE_VERIFY_16: { 8805 struct scsi_write_verify_16 *cdb; 8806 8807 cdb = (struct scsi_write_verify_16 *)ctsio->cdb; 8808 flags |= CTL_LLF_FUA; 8809 if (cdb->byte2 & SWV_DPO) 8810 flags |= CTL_LLF_DPO; 8811 lba = scsi_8btou64(cdb->addr); 8812 num_blocks = scsi_4btoul(cdb->length); 8813 break; 8814 } 8815 default: 8816 /* 8817 * We got a command we don't support. This shouldn't 8818 * happen, commands should be filtered out above us. 8819 */ 8820 ctl_set_invalid_opcode(ctsio); 8821 ctl_done((union ctl_io *)ctsio); 8822 8823 return (CTL_RETVAL_COMPLETE); 8824 break; /* NOTREACHED */ 8825 } 8826 8827 /* 8828 * The first check is to make sure we're in bounds, the second 8829 * check is to catch wrap-around problems. If the lba + num blocks 8830 * is less than the lba, then we've wrapped around and the block 8831 * range is invalid anyway. 8832 */ 8833 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8834 || ((lba + num_blocks) < lba)) { 8835 ctl_set_lba_out_of_range(ctsio, 8836 MAX(lba, lun->be_lun->maxlba + 1)); 8837 ctl_done((union ctl_io *)ctsio); 8838 return (CTL_RETVAL_COMPLETE); 8839 } 8840 8841 /* 8842 * According to SBC-3, a transfer length of 0 is not an error. 8843 * Note that this cannot happen with WRITE(6) or READ(6), since 0 8844 * translates to 256 blocks for those commands. 8845 */ 8846 if (num_blocks == 0) { 8847 ctl_set_success(ctsio); 8848 ctl_done((union ctl_io *)ctsio); 8849 return (CTL_RETVAL_COMPLETE); 8850 } 8851 8852 /* Set FUA and/or DPO if caches are disabled. */ 8853 if (isread) { 8854 if ((lun->MODE_CACHING.flags1 & SCP_RCD) != 0) 8855 flags |= CTL_LLF_FUA | CTL_LLF_DPO; 8856 } else { 8857 if ((lun->MODE_CACHING.flags1 & SCP_WCE) == 0) 8858 flags |= CTL_LLF_FUA; 8859 } 8860 8861 lbalen = (struct ctl_lba_len_flags *) 8862 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8863 lbalen->lba = lba; 8864 lbalen->len = num_blocks; 8865 lbalen->flags = (isread ? CTL_LLF_READ : CTL_LLF_WRITE) | flags; 8866 8867 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 8868 ctsio->kern_rel_offset = 0; 8869 8870 CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n")); 8871 8872 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8873 return (retval); 8874 } 8875 8876 static int 8877 ctl_cnw_cont(union ctl_io *io) 8878 { 8879 struct ctl_scsiio *ctsio; 8880 struct ctl_lun *lun; 8881 struct ctl_lba_len_flags *lbalen; 8882 int retval; 8883 8884 ctsio = &io->scsiio; 8885 ctsio->io_hdr.status = CTL_STATUS_NONE; 8886 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT; 8887 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8888 lbalen = (struct ctl_lba_len_flags *) 8889 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8890 lbalen->flags &= ~CTL_LLF_COMPARE; 8891 lbalen->flags |= CTL_LLF_WRITE; 8892 8893 CTL_DEBUG_PRINT(("ctl_cnw_cont: calling data_submit()\n")); 8894 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8895 return (retval); 8896 } 8897 8898 int 8899 ctl_cnw(struct ctl_scsiio *ctsio) 8900 { 8901 struct ctl_lun *lun; 8902 struct ctl_lba_len_flags *lbalen; 8903 uint64_t lba; 8904 uint32_t num_blocks; 8905 int flags, retval; 8906 8907 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8908 8909 CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0])); 8910 8911 flags = 0; 8912 switch (ctsio->cdb[0]) { 8913 case COMPARE_AND_WRITE: { 8914 struct scsi_compare_and_write *cdb; 8915 8916 cdb = (struct scsi_compare_and_write *)ctsio->cdb; 8917 if (cdb->byte2 & SRW10_FUA) 8918 flags |= CTL_LLF_FUA; 8919 if (cdb->byte2 & SRW10_DPO) 8920 flags |= CTL_LLF_DPO; 8921 lba = scsi_8btou64(cdb->addr); 8922 num_blocks = cdb->length; 8923 break; 8924 } 8925 default: 8926 /* 8927 * We got a command we don't support. This shouldn't 8928 * happen, commands should be filtered out above us. 8929 */ 8930 ctl_set_invalid_opcode(ctsio); 8931 ctl_done((union ctl_io *)ctsio); 8932 8933 return (CTL_RETVAL_COMPLETE); 8934 break; /* NOTREACHED */ 8935 } 8936 8937 /* 8938 * The first check is to make sure we're in bounds, the second 8939 * check is to catch wrap-around problems. If the lba + num blocks 8940 * is less than the lba, then we've wrapped around and the block 8941 * range is invalid anyway. 8942 */ 8943 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8944 || ((lba + num_blocks) < lba)) { 8945 ctl_set_lba_out_of_range(ctsio, 8946 MAX(lba, lun->be_lun->maxlba + 1)); 8947 ctl_done((union ctl_io *)ctsio); 8948 return (CTL_RETVAL_COMPLETE); 8949 } 8950 8951 /* 8952 * According to SBC-3, a transfer length of 0 is not an error. 8953 */ 8954 if (num_blocks == 0) { 8955 ctl_set_success(ctsio); 8956 ctl_done((union ctl_io *)ctsio); 8957 return (CTL_RETVAL_COMPLETE); 8958 } 8959 8960 /* Set FUA if write cache is disabled. */ 8961 if ((lun->MODE_CACHING.flags1 & SCP_WCE) == 0) 8962 flags |= CTL_LLF_FUA; 8963 8964 ctsio->kern_total_len = 2 * num_blocks * lun->be_lun->blocksize; 8965 ctsio->kern_rel_offset = 0; 8966 8967 /* 8968 * Set the IO_CONT flag, so that if this I/O gets passed to 8969 * ctl_data_submit_done(), it'll get passed back to 8970 * ctl_ctl_cnw_cont() for further processing. 8971 */ 8972 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 8973 ctsio->io_cont = ctl_cnw_cont; 8974 8975 lbalen = (struct ctl_lba_len_flags *) 8976 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8977 lbalen->lba = lba; 8978 lbalen->len = num_blocks; 8979 lbalen->flags = CTL_LLF_COMPARE | flags; 8980 8981 CTL_DEBUG_PRINT(("ctl_cnw: calling data_submit()\n")); 8982 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8983 return (retval); 8984 } 8985 8986 int 8987 ctl_verify(struct ctl_scsiio *ctsio) 8988 { 8989 struct ctl_lun *lun; 8990 struct ctl_lba_len_flags *lbalen; 8991 uint64_t lba; 8992 uint32_t num_blocks; 8993 int bytchk, flags; 8994 int retval; 8995 8996 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8997 8998 CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0])); 8999 9000 bytchk = 0; 9001 flags = CTL_LLF_FUA; 9002 switch (ctsio->cdb[0]) { 9003 case VERIFY_10: { 9004 struct scsi_verify_10 *cdb; 9005 9006 cdb = (struct scsi_verify_10 *)ctsio->cdb; 9007 if (cdb->byte2 & SVFY_BYTCHK) 9008 bytchk = 1; 9009 if (cdb->byte2 & SVFY_DPO) 9010 flags |= CTL_LLF_DPO; 9011 lba = scsi_4btoul(cdb->addr); 9012 num_blocks = scsi_2btoul(cdb->length); 9013 break; 9014 } 9015 case VERIFY_12: { 9016 struct scsi_verify_12 *cdb; 9017 9018 cdb = (struct scsi_verify_12 *)ctsio->cdb; 9019 if (cdb->byte2 & SVFY_BYTCHK) 9020 bytchk = 1; 9021 if (cdb->byte2 & SVFY_DPO) 9022 flags |= CTL_LLF_DPO; 9023 lba = scsi_4btoul(cdb->addr); 9024 num_blocks = scsi_4btoul(cdb->length); 9025 break; 9026 } 9027 case VERIFY_16: { 9028 struct scsi_rw_16 *cdb; 9029 9030 cdb = (struct scsi_rw_16 *)ctsio->cdb; 9031 if (cdb->byte2 & SVFY_BYTCHK) 9032 bytchk = 1; 9033 if (cdb->byte2 & SVFY_DPO) 9034 flags |= CTL_LLF_DPO; 9035 lba = scsi_8btou64(cdb->addr); 9036 num_blocks = scsi_4btoul(cdb->length); 9037 break; 9038 } 9039 default: 9040 /* 9041 * We got a command we don't support. This shouldn't 9042 * happen, commands should be filtered out above us. 9043 */ 9044 ctl_set_invalid_opcode(ctsio); 9045 ctl_done((union ctl_io *)ctsio); 9046 return (CTL_RETVAL_COMPLETE); 9047 } 9048 9049 /* 9050 * The first check is to make sure we're in bounds, the second 9051 * check is to catch wrap-around problems. If the lba + num blocks 9052 * is less than the lba, then we've wrapped around and the block 9053 * range is invalid anyway. 9054 */ 9055 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 9056 || ((lba + num_blocks) < lba)) { 9057 ctl_set_lba_out_of_range(ctsio, 9058 MAX(lba, lun->be_lun->maxlba + 1)); 9059 ctl_done((union ctl_io *)ctsio); 9060 return (CTL_RETVAL_COMPLETE); 9061 } 9062 9063 /* 9064 * According to SBC-3, a transfer length of 0 is not an error. 9065 */ 9066 if (num_blocks == 0) { 9067 ctl_set_success(ctsio); 9068 ctl_done((union ctl_io *)ctsio); 9069 return (CTL_RETVAL_COMPLETE); 9070 } 9071 9072 lbalen = (struct ctl_lba_len_flags *) 9073 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 9074 lbalen->lba = lba; 9075 lbalen->len = num_blocks; 9076 if (bytchk) { 9077 lbalen->flags = CTL_LLF_COMPARE | flags; 9078 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 9079 } else { 9080 lbalen->flags = CTL_LLF_VERIFY | flags; 9081 ctsio->kern_total_len = 0; 9082 } 9083 ctsio->kern_rel_offset = 0; 9084 9085 CTL_DEBUG_PRINT(("ctl_verify: calling data_submit()\n")); 9086 retval = lun->backend->data_submit((union ctl_io *)ctsio); 9087 return (retval); 9088 } 9089 9090 int 9091 ctl_report_luns(struct ctl_scsiio *ctsio) 9092 { 9093 struct ctl_softc *softc; 9094 struct scsi_report_luns *cdb; 9095 struct scsi_report_luns_data *lun_data; 9096 struct ctl_lun *lun, *request_lun; 9097 struct ctl_port *port; 9098 int num_filled, num_luns, num_port_luns, retval; 9099 uint32_t alloc_len, lun_datalen; 9100 uint32_t initidx, targ_lun_id, lun_id; 9101 9102 retval = CTL_RETVAL_COMPLETE; 9103 cdb = (struct scsi_report_luns *)ctsio->cdb; 9104 port = ctl_io_port(&ctsio->io_hdr); 9105 softc = port->ctl_softc; 9106 9107 CTL_DEBUG_PRINT(("ctl_report_luns\n")); 9108 9109 num_luns = 0; 9110 num_port_luns = port->lun_map ? port->lun_map_size : CTL_MAX_LUNS; 9111 mtx_lock(&softc->ctl_lock); 9112 for (targ_lun_id = 0; targ_lun_id < num_port_luns; targ_lun_id++) { 9113 if (ctl_lun_map_from_port(port, targ_lun_id) != UINT32_MAX) 9114 num_luns++; 9115 } 9116 mtx_unlock(&softc->ctl_lock); 9117 9118 switch (cdb->select_report) { 9119 case RPL_REPORT_DEFAULT: 9120 case RPL_REPORT_ALL: 9121 case RPL_REPORT_NONSUBSID: 9122 break; 9123 case RPL_REPORT_WELLKNOWN: 9124 case RPL_REPORT_ADMIN: 9125 case RPL_REPORT_CONGLOM: 9126 num_luns = 0; 9127 break; 9128 default: 9129 ctl_set_invalid_field(ctsio, 9130 /*sks_valid*/ 1, 9131 /*command*/ 1, 9132 /*field*/ 2, 9133 /*bit_valid*/ 0, 9134 /*bit*/ 0); 9135 ctl_done((union ctl_io *)ctsio); 9136 return (retval); 9137 break; /* NOTREACHED */ 9138 } 9139 9140 alloc_len = scsi_4btoul(cdb->length); 9141 /* 9142 * The initiator has to allocate at least 16 bytes for this request, 9143 * so he can at least get the header and the first LUN. Otherwise 9144 * we reject the request (per SPC-3 rev 14, section 6.21). 9145 */ 9146 if (alloc_len < (sizeof(struct scsi_report_luns_data) + 9147 sizeof(struct scsi_report_luns_lundata))) { 9148 ctl_set_invalid_field(ctsio, 9149 /*sks_valid*/ 1, 9150 /*command*/ 1, 9151 /*field*/ 6, 9152 /*bit_valid*/ 0, 9153 /*bit*/ 0); 9154 ctl_done((union ctl_io *)ctsio); 9155 return (retval); 9156 } 9157 9158 request_lun = (struct ctl_lun *) 9159 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9160 9161 lun_datalen = sizeof(*lun_data) + 9162 (num_luns * sizeof(struct scsi_report_luns_lundata)); 9163 9164 ctsio->kern_data_ptr = malloc(lun_datalen, M_CTL, M_WAITOK | M_ZERO); 9165 lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr; 9166 ctsio->kern_sg_entries = 0; 9167 9168 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9169 9170 mtx_lock(&softc->ctl_lock); 9171 for (targ_lun_id = 0, num_filled = 0; 9172 targ_lun_id < num_port_luns && num_filled < num_luns; 9173 targ_lun_id++) { 9174 lun_id = ctl_lun_map_from_port(port, targ_lun_id); 9175 if (lun_id == UINT32_MAX) 9176 continue; 9177 lun = softc->ctl_luns[lun_id]; 9178 if (lun == NULL) 9179 continue; 9180 9181 be64enc(lun_data->luns[num_filled++].lundata, 9182 ctl_encode_lun(targ_lun_id)); 9183 9184 /* 9185 * According to SPC-3, rev 14 section 6.21: 9186 * 9187 * "The execution of a REPORT LUNS command to any valid and 9188 * installed logical unit shall clear the REPORTED LUNS DATA 9189 * HAS CHANGED unit attention condition for all logical 9190 * units of that target with respect to the requesting 9191 * initiator. A valid and installed logical unit is one 9192 * having a PERIPHERAL QUALIFIER of 000b in the standard 9193 * INQUIRY data (see 6.4.2)." 9194 * 9195 * If request_lun is NULL, the LUN this report luns command 9196 * was issued to is either disabled or doesn't exist. In that 9197 * case, we shouldn't clear any pending lun change unit 9198 * attention. 9199 */ 9200 if (request_lun != NULL) { 9201 mtx_lock(&lun->lun_lock); 9202 ctl_clr_ua(lun, initidx, CTL_UA_LUN_CHANGE); 9203 mtx_unlock(&lun->lun_lock); 9204 } 9205 } 9206 mtx_unlock(&softc->ctl_lock); 9207 9208 /* 9209 * It's quite possible that we've returned fewer LUNs than we allocated 9210 * space for. Trim it. 9211 */ 9212 lun_datalen = sizeof(*lun_data) + 9213 (num_filled * sizeof(struct scsi_report_luns_lundata)); 9214 9215 if (lun_datalen < alloc_len) { 9216 ctsio->residual = alloc_len - lun_datalen; 9217 ctsio->kern_data_len = lun_datalen; 9218 ctsio->kern_total_len = lun_datalen; 9219 } else { 9220 ctsio->residual = 0; 9221 ctsio->kern_data_len = alloc_len; 9222 ctsio->kern_total_len = alloc_len; 9223 } 9224 ctsio->kern_data_resid = 0; 9225 ctsio->kern_rel_offset = 0; 9226 ctsio->kern_sg_entries = 0; 9227 9228 /* 9229 * We set this to the actual data length, regardless of how much 9230 * space we actually have to return results. If the user looks at 9231 * this value, he'll know whether or not he allocated enough space 9232 * and reissue the command if necessary. We don't support well 9233 * known logical units, so if the user asks for that, return none. 9234 */ 9235 scsi_ulto4b(lun_datalen - 8, lun_data->length); 9236 9237 /* 9238 * We can only return SCSI_STATUS_CHECK_COND when we can't satisfy 9239 * this request. 9240 */ 9241 ctl_set_success(ctsio); 9242 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9243 ctsio->be_move_done = ctl_config_move_done; 9244 ctl_datamove((union ctl_io *)ctsio); 9245 return (retval); 9246 } 9247 9248 int 9249 ctl_request_sense(struct ctl_scsiio *ctsio) 9250 { 9251 struct scsi_request_sense *cdb; 9252 struct scsi_sense_data *sense_ptr; 9253 struct ctl_softc *softc; 9254 struct ctl_lun *lun; 9255 uint32_t initidx; 9256 int have_error; 9257 u_int sense_len = SSD_FULL_SIZE; 9258 scsi_sense_data_type sense_format; 9259 ctl_ua_type ua_type; 9260 uint8_t asc = 0, ascq = 0; 9261 9262 cdb = (struct scsi_request_sense *)ctsio->cdb; 9263 9264 softc = control_softc; 9265 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9266 9267 CTL_DEBUG_PRINT(("ctl_request_sense\n")); 9268 9269 /* 9270 * Determine which sense format the user wants. 9271 */ 9272 if (cdb->byte2 & SRS_DESC) 9273 sense_format = SSD_TYPE_DESC; 9274 else 9275 sense_format = SSD_TYPE_FIXED; 9276 9277 ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK); 9278 sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr; 9279 ctsio->kern_sg_entries = 0; 9280 9281 /* 9282 * struct scsi_sense_data, which is currently set to 256 bytes, is 9283 * larger than the largest allowed value for the length field in the 9284 * REQUEST SENSE CDB, which is 252 bytes as of SPC-4. 9285 */ 9286 ctsio->residual = 0; 9287 ctsio->kern_data_len = cdb->length; 9288 ctsio->kern_total_len = cdb->length; 9289 9290 ctsio->kern_data_resid = 0; 9291 ctsio->kern_rel_offset = 0; 9292 ctsio->kern_sg_entries = 0; 9293 9294 /* 9295 * If we don't have a LUN, we don't have any pending sense. 9296 */ 9297 if (lun == NULL || 9298 ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 9299 softc->ha_link < CTL_HA_LINK_UNKNOWN)) { 9300 /* "Logical unit not supported" */ 9301 ctl_set_sense_data(sense_ptr, &sense_len, NULL, sense_format, 9302 /*current_error*/ 1, 9303 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 9304 /*asc*/ 0x25, 9305 /*ascq*/ 0x00, 9306 SSD_ELEM_NONE); 9307 goto send; 9308 } 9309 9310 have_error = 0; 9311 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9312 /* 9313 * Check for pending sense, and then for pending unit attentions. 9314 * Pending sense gets returned first, then pending unit attentions. 9315 */ 9316 mtx_lock(&lun->lun_lock); 9317 #ifdef CTL_WITH_CA 9318 if (ctl_is_set(lun->have_ca, initidx)) { 9319 scsi_sense_data_type stored_format; 9320 9321 /* 9322 * Check to see which sense format was used for the stored 9323 * sense data. 9324 */ 9325 stored_format = scsi_sense_type(&lun->pending_sense[initidx]); 9326 9327 /* 9328 * If the user requested a different sense format than the 9329 * one we stored, then we need to convert it to the other 9330 * format. If we're going from descriptor to fixed format 9331 * sense data, we may lose things in translation, depending 9332 * on what options were used. 9333 * 9334 * If the stored format is SSD_TYPE_NONE (i.e. invalid), 9335 * for some reason we'll just copy it out as-is. 9336 */ 9337 if ((stored_format == SSD_TYPE_FIXED) 9338 && (sense_format == SSD_TYPE_DESC)) 9339 ctl_sense_to_desc((struct scsi_sense_data_fixed *) 9340 &lun->pending_sense[initidx], 9341 (struct scsi_sense_data_desc *)sense_ptr); 9342 else if ((stored_format == SSD_TYPE_DESC) 9343 && (sense_format == SSD_TYPE_FIXED)) 9344 ctl_sense_to_fixed((struct scsi_sense_data_desc *) 9345 &lun->pending_sense[initidx], 9346 (struct scsi_sense_data_fixed *)sense_ptr); 9347 else 9348 memcpy(sense_ptr, &lun->pending_sense[initidx], 9349 MIN(sizeof(*sense_ptr), 9350 sizeof(lun->pending_sense[initidx]))); 9351 9352 ctl_clear_mask(lun->have_ca, initidx); 9353 have_error = 1; 9354 } else 9355 #endif 9356 if (have_error == 0) { 9357 ua_type = ctl_build_ua(lun, initidx, sense_ptr, &sense_len, 9358 sense_format); 9359 if (ua_type != CTL_UA_NONE) 9360 have_error = 1; 9361 } 9362 if (have_error == 0) { 9363 /* 9364 * Report informational exception if have one and allowed. 9365 */ 9366 if (lun->MODE_IE.mrie != SIEP_MRIE_NO) { 9367 asc = lun->ie_asc; 9368 ascq = lun->ie_ascq; 9369 } 9370 ctl_set_sense_data(sense_ptr, &sense_len, lun, sense_format, 9371 /*current_error*/ 1, 9372 /*sense_key*/ SSD_KEY_NO_SENSE, 9373 /*asc*/ asc, 9374 /*ascq*/ ascq, 9375 SSD_ELEM_NONE); 9376 } 9377 mtx_unlock(&lun->lun_lock); 9378 9379 send: 9380 /* 9381 * We report the SCSI status as OK, since the status of the command 9382 * itself is OK. We're reporting sense as parameter data. 9383 */ 9384 ctl_set_success(ctsio); 9385 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9386 ctsio->be_move_done = ctl_config_move_done; 9387 ctl_datamove((union ctl_io *)ctsio); 9388 return (CTL_RETVAL_COMPLETE); 9389 } 9390 9391 int 9392 ctl_tur(struct ctl_scsiio *ctsio) 9393 { 9394 9395 CTL_DEBUG_PRINT(("ctl_tur\n")); 9396 9397 ctl_set_success(ctsio); 9398 ctl_done((union ctl_io *)ctsio); 9399 9400 return (CTL_RETVAL_COMPLETE); 9401 } 9402 9403 /* 9404 * SCSI VPD page 0x00, the Supported VPD Pages page. 9405 */ 9406 static int 9407 ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len) 9408 { 9409 struct scsi_vpd_supported_pages *pages; 9410 int sup_page_size; 9411 struct ctl_lun *lun; 9412 int p; 9413 9414 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9415 9416 sup_page_size = sizeof(struct scsi_vpd_supported_pages) * 9417 SCSI_EVPD_NUM_SUPPORTED_PAGES; 9418 ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO); 9419 pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr; 9420 ctsio->kern_sg_entries = 0; 9421 9422 if (sup_page_size < alloc_len) { 9423 ctsio->residual = alloc_len - sup_page_size; 9424 ctsio->kern_data_len = sup_page_size; 9425 ctsio->kern_total_len = sup_page_size; 9426 } else { 9427 ctsio->residual = 0; 9428 ctsio->kern_data_len = alloc_len; 9429 ctsio->kern_total_len = alloc_len; 9430 } 9431 ctsio->kern_data_resid = 0; 9432 ctsio->kern_rel_offset = 0; 9433 ctsio->kern_sg_entries = 0; 9434 9435 /* 9436 * The control device is always connected. The disk device, on the 9437 * other hand, may not be online all the time. Need to change this 9438 * to figure out whether the disk device is actually online or not. 9439 */ 9440 if (lun != NULL) 9441 pages->device = (SID_QUAL_LU_CONNECTED << 5) | 9442 lun->be_lun->lun_type; 9443 else 9444 pages->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9445 9446 p = 0; 9447 /* Supported VPD pages */ 9448 pages->page_list[p++] = SVPD_SUPPORTED_PAGES; 9449 /* Serial Number */ 9450 pages->page_list[p++] = SVPD_UNIT_SERIAL_NUMBER; 9451 /* Device Identification */ 9452 pages->page_list[p++] = SVPD_DEVICE_ID; 9453 /* Extended INQUIRY Data */ 9454 pages->page_list[p++] = SVPD_EXTENDED_INQUIRY_DATA; 9455 /* Mode Page Policy */ 9456 pages->page_list[p++] = SVPD_MODE_PAGE_POLICY; 9457 /* SCSI Ports */ 9458 pages->page_list[p++] = SVPD_SCSI_PORTS; 9459 /* Third-party Copy */ 9460 pages->page_list[p++] = SVPD_SCSI_TPC; 9461 if (lun != NULL && lun->be_lun->lun_type == T_DIRECT) { 9462 /* Block limits */ 9463 pages->page_list[p++] = SVPD_BLOCK_LIMITS; 9464 /* Block Device Characteristics */ 9465 pages->page_list[p++] = SVPD_BDC; 9466 /* Logical Block Provisioning */ 9467 pages->page_list[p++] = SVPD_LBP; 9468 } 9469 pages->length = p; 9470 9471 ctl_set_success(ctsio); 9472 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9473 ctsio->be_move_done = ctl_config_move_done; 9474 ctl_datamove((union ctl_io *)ctsio); 9475 return (CTL_RETVAL_COMPLETE); 9476 } 9477 9478 /* 9479 * SCSI VPD page 0x80, the Unit Serial Number page. 9480 */ 9481 static int 9482 ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len) 9483 { 9484 struct scsi_vpd_unit_serial_number *sn_ptr; 9485 struct ctl_lun *lun; 9486 int data_len; 9487 9488 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9489 9490 data_len = 4 + CTL_SN_LEN; 9491 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9492 sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr; 9493 if (data_len < alloc_len) { 9494 ctsio->residual = alloc_len - data_len; 9495 ctsio->kern_data_len = data_len; 9496 ctsio->kern_total_len = data_len; 9497 } else { 9498 ctsio->residual = 0; 9499 ctsio->kern_data_len = alloc_len; 9500 ctsio->kern_total_len = alloc_len; 9501 } 9502 ctsio->kern_data_resid = 0; 9503 ctsio->kern_rel_offset = 0; 9504 ctsio->kern_sg_entries = 0; 9505 9506 /* 9507 * The control device is always connected. The disk device, on the 9508 * other hand, may not be online all the time. Need to change this 9509 * to figure out whether the disk device is actually online or not. 9510 */ 9511 if (lun != NULL) 9512 sn_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9513 lun->be_lun->lun_type; 9514 else 9515 sn_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9516 9517 sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER; 9518 sn_ptr->length = CTL_SN_LEN; 9519 /* 9520 * If we don't have a LUN, we just leave the serial number as 9521 * all spaces. 9522 */ 9523 if (lun != NULL) { 9524 strncpy((char *)sn_ptr->serial_num, 9525 (char *)lun->be_lun->serial_num, CTL_SN_LEN); 9526 } else 9527 memset(sn_ptr->serial_num, 0x20, CTL_SN_LEN); 9528 9529 ctl_set_success(ctsio); 9530 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9531 ctsio->be_move_done = ctl_config_move_done; 9532 ctl_datamove((union ctl_io *)ctsio); 9533 return (CTL_RETVAL_COMPLETE); 9534 } 9535 9536 9537 /* 9538 * SCSI VPD page 0x86, the Extended INQUIRY Data page. 9539 */ 9540 static int 9541 ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len) 9542 { 9543 struct scsi_vpd_extended_inquiry_data *eid_ptr; 9544 struct ctl_lun *lun; 9545 int data_len; 9546 9547 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9548 9549 data_len = sizeof(struct scsi_vpd_extended_inquiry_data); 9550 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9551 eid_ptr = (struct scsi_vpd_extended_inquiry_data *)ctsio->kern_data_ptr; 9552 ctsio->kern_sg_entries = 0; 9553 9554 if (data_len < alloc_len) { 9555 ctsio->residual = alloc_len - data_len; 9556 ctsio->kern_data_len = data_len; 9557 ctsio->kern_total_len = data_len; 9558 } else { 9559 ctsio->residual = 0; 9560 ctsio->kern_data_len = alloc_len; 9561 ctsio->kern_total_len = alloc_len; 9562 } 9563 ctsio->kern_data_resid = 0; 9564 ctsio->kern_rel_offset = 0; 9565 ctsio->kern_sg_entries = 0; 9566 9567 /* 9568 * The control device is always connected. The disk device, on the 9569 * other hand, may not be online all the time. 9570 */ 9571 if (lun != NULL) 9572 eid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9573 lun->be_lun->lun_type; 9574 else 9575 eid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9576 eid_ptr->page_code = SVPD_EXTENDED_INQUIRY_DATA; 9577 scsi_ulto2b(data_len - 4, eid_ptr->page_length); 9578 /* 9579 * We support head of queue, ordered and simple tags. 9580 */ 9581 eid_ptr->flags2 = SVPD_EID_HEADSUP | SVPD_EID_ORDSUP | SVPD_EID_SIMPSUP; 9582 /* 9583 * Volatile cache supported. 9584 */ 9585 eid_ptr->flags3 = SVPD_EID_V_SUP; 9586 9587 /* 9588 * This means that we clear the REPORTED LUNS DATA HAS CHANGED unit 9589 * attention for a particular IT nexus on all LUNs once we report 9590 * it to that nexus once. This bit is required as of SPC-4. 9591 */ 9592 eid_ptr->flags4 = SVPD_EID_LUICLR; 9593 9594 /* 9595 * We support revert to defaults (RTD) bit in MODE SELECT. 9596 */ 9597 eid_ptr->flags5 = SVPD_EID_RTD_SUP; 9598 9599 /* 9600 * XXX KDM in order to correctly answer this, we would need 9601 * information from the SIM to determine how much sense data it 9602 * can send. So this would really be a path inquiry field, most 9603 * likely. This can be set to a maximum of 252 according to SPC-4, 9604 * but the hardware may or may not be able to support that much. 9605 * 0 just means that the maximum sense data length is not reported. 9606 */ 9607 eid_ptr->max_sense_length = 0; 9608 9609 ctl_set_success(ctsio); 9610 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9611 ctsio->be_move_done = ctl_config_move_done; 9612 ctl_datamove((union ctl_io *)ctsio); 9613 return (CTL_RETVAL_COMPLETE); 9614 } 9615 9616 static int 9617 ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len) 9618 { 9619 struct scsi_vpd_mode_page_policy *mpp_ptr; 9620 struct ctl_lun *lun; 9621 int data_len; 9622 9623 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9624 9625 data_len = sizeof(struct scsi_vpd_mode_page_policy) + 9626 sizeof(struct scsi_vpd_mode_page_policy_descr); 9627 9628 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9629 mpp_ptr = (struct scsi_vpd_mode_page_policy *)ctsio->kern_data_ptr; 9630 ctsio->kern_sg_entries = 0; 9631 9632 if (data_len < alloc_len) { 9633 ctsio->residual = alloc_len - data_len; 9634 ctsio->kern_data_len = data_len; 9635 ctsio->kern_total_len = data_len; 9636 } else { 9637 ctsio->residual = 0; 9638 ctsio->kern_data_len = alloc_len; 9639 ctsio->kern_total_len = alloc_len; 9640 } 9641 ctsio->kern_data_resid = 0; 9642 ctsio->kern_rel_offset = 0; 9643 ctsio->kern_sg_entries = 0; 9644 9645 /* 9646 * The control device is always connected. The disk device, on the 9647 * other hand, may not be online all the time. 9648 */ 9649 if (lun != NULL) 9650 mpp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9651 lun->be_lun->lun_type; 9652 else 9653 mpp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9654 mpp_ptr->page_code = SVPD_MODE_PAGE_POLICY; 9655 scsi_ulto2b(data_len - 4, mpp_ptr->page_length); 9656 mpp_ptr->descr[0].page_code = 0x3f; 9657 mpp_ptr->descr[0].subpage_code = 0xff; 9658 mpp_ptr->descr[0].policy = SVPD_MPP_SHARED; 9659 9660 ctl_set_success(ctsio); 9661 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9662 ctsio->be_move_done = ctl_config_move_done; 9663 ctl_datamove((union ctl_io *)ctsio); 9664 return (CTL_RETVAL_COMPLETE); 9665 } 9666 9667 /* 9668 * SCSI VPD page 0x83, the Device Identification page. 9669 */ 9670 static int 9671 ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len) 9672 { 9673 struct scsi_vpd_device_id *devid_ptr; 9674 struct scsi_vpd_id_descriptor *desc; 9675 struct ctl_softc *softc; 9676 struct ctl_lun *lun; 9677 struct ctl_port *port; 9678 int data_len, g; 9679 uint8_t proto; 9680 9681 softc = control_softc; 9682 9683 port = ctl_io_port(&ctsio->io_hdr); 9684 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9685 9686 data_len = sizeof(struct scsi_vpd_device_id) + 9687 sizeof(struct scsi_vpd_id_descriptor) + 9688 sizeof(struct scsi_vpd_id_rel_trgt_port_id) + 9689 sizeof(struct scsi_vpd_id_descriptor) + 9690 sizeof(struct scsi_vpd_id_trgt_port_grp_id); 9691 if (lun && lun->lun_devid) 9692 data_len += lun->lun_devid->len; 9693 if (port && port->port_devid) 9694 data_len += port->port_devid->len; 9695 if (port && port->target_devid) 9696 data_len += port->target_devid->len; 9697 9698 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9699 devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr; 9700 ctsio->kern_sg_entries = 0; 9701 9702 if (data_len < alloc_len) { 9703 ctsio->residual = alloc_len - data_len; 9704 ctsio->kern_data_len = data_len; 9705 ctsio->kern_total_len = data_len; 9706 } else { 9707 ctsio->residual = 0; 9708 ctsio->kern_data_len = alloc_len; 9709 ctsio->kern_total_len = alloc_len; 9710 } 9711 ctsio->kern_data_resid = 0; 9712 ctsio->kern_rel_offset = 0; 9713 ctsio->kern_sg_entries = 0; 9714 9715 /* 9716 * The control device is always connected. The disk device, on the 9717 * other hand, may not be online all the time. 9718 */ 9719 if (lun != NULL) 9720 devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9721 lun->be_lun->lun_type; 9722 else 9723 devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9724 devid_ptr->page_code = SVPD_DEVICE_ID; 9725 scsi_ulto2b(data_len - 4, devid_ptr->length); 9726 9727 if (port && port->port_type == CTL_PORT_FC) 9728 proto = SCSI_PROTO_FC << 4; 9729 else if (port && port->port_type == CTL_PORT_ISCSI) 9730 proto = SCSI_PROTO_ISCSI << 4; 9731 else 9732 proto = SCSI_PROTO_SPI << 4; 9733 desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list; 9734 9735 /* 9736 * We're using a LUN association here. i.e., this device ID is a 9737 * per-LUN identifier. 9738 */ 9739 if (lun && lun->lun_devid) { 9740 memcpy(desc, lun->lun_devid->data, lun->lun_devid->len); 9741 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9742 lun->lun_devid->len); 9743 } 9744 9745 /* 9746 * This is for the WWPN which is a port association. 9747 */ 9748 if (port && port->port_devid) { 9749 memcpy(desc, port->port_devid->data, port->port_devid->len); 9750 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9751 port->port_devid->len); 9752 } 9753 9754 /* 9755 * This is for the Relative Target Port(type 4h) identifier 9756 */ 9757 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9758 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9759 SVPD_ID_TYPE_RELTARG; 9760 desc->length = 4; 9761 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port, &desc->identifier[2]); 9762 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9763 sizeof(struct scsi_vpd_id_rel_trgt_port_id)); 9764 9765 /* 9766 * This is for the Target Port Group(type 5h) identifier 9767 */ 9768 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9769 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9770 SVPD_ID_TYPE_TPORTGRP; 9771 desc->length = 4; 9772 if (softc->is_single || 9773 (port && port->status & CTL_PORT_STATUS_HA_SHARED)) 9774 g = 1; 9775 else 9776 g = 2 + ctsio->io_hdr.nexus.targ_port / softc->port_cnt; 9777 scsi_ulto2b(g, &desc->identifier[2]); 9778 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9779 sizeof(struct scsi_vpd_id_trgt_port_grp_id)); 9780 9781 /* 9782 * This is for the Target identifier 9783 */ 9784 if (port && port->target_devid) { 9785 memcpy(desc, port->target_devid->data, port->target_devid->len); 9786 } 9787 9788 ctl_set_success(ctsio); 9789 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9790 ctsio->be_move_done = ctl_config_move_done; 9791 ctl_datamove((union ctl_io *)ctsio); 9792 return (CTL_RETVAL_COMPLETE); 9793 } 9794 9795 static int 9796 ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len) 9797 { 9798 struct ctl_softc *softc = control_softc; 9799 struct scsi_vpd_scsi_ports *sp; 9800 struct scsi_vpd_port_designation *pd; 9801 struct scsi_vpd_port_designation_cont *pdc; 9802 struct ctl_lun *lun; 9803 struct ctl_port *port; 9804 int data_len, num_target_ports, iid_len, id_len; 9805 9806 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9807 9808 num_target_ports = 0; 9809 iid_len = 0; 9810 id_len = 0; 9811 mtx_lock(&softc->ctl_lock); 9812 STAILQ_FOREACH(port, &softc->port_list, links) { 9813 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 9814 continue; 9815 if (lun != NULL && 9816 ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 9817 continue; 9818 num_target_ports++; 9819 if (port->init_devid) 9820 iid_len += port->init_devid->len; 9821 if (port->port_devid) 9822 id_len += port->port_devid->len; 9823 } 9824 mtx_unlock(&softc->ctl_lock); 9825 9826 data_len = sizeof(struct scsi_vpd_scsi_ports) + 9827 num_target_ports * (sizeof(struct scsi_vpd_port_designation) + 9828 sizeof(struct scsi_vpd_port_designation_cont)) + iid_len + id_len; 9829 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9830 sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr; 9831 ctsio->kern_sg_entries = 0; 9832 9833 if (data_len < alloc_len) { 9834 ctsio->residual = alloc_len - data_len; 9835 ctsio->kern_data_len = data_len; 9836 ctsio->kern_total_len = data_len; 9837 } else { 9838 ctsio->residual = 0; 9839 ctsio->kern_data_len = alloc_len; 9840 ctsio->kern_total_len = alloc_len; 9841 } 9842 ctsio->kern_data_resid = 0; 9843 ctsio->kern_rel_offset = 0; 9844 ctsio->kern_sg_entries = 0; 9845 9846 /* 9847 * The control device is always connected. The disk device, on the 9848 * other hand, may not be online all the time. Need to change this 9849 * to figure out whether the disk device is actually online or not. 9850 */ 9851 if (lun != NULL) 9852 sp->device = (SID_QUAL_LU_CONNECTED << 5) | 9853 lun->be_lun->lun_type; 9854 else 9855 sp->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9856 9857 sp->page_code = SVPD_SCSI_PORTS; 9858 scsi_ulto2b(data_len - sizeof(struct scsi_vpd_scsi_ports), 9859 sp->page_length); 9860 pd = &sp->design[0]; 9861 9862 mtx_lock(&softc->ctl_lock); 9863 STAILQ_FOREACH(port, &softc->port_list, links) { 9864 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 9865 continue; 9866 if (lun != NULL && 9867 ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 9868 continue; 9869 scsi_ulto2b(port->targ_port, pd->relative_port_id); 9870 if (port->init_devid) { 9871 iid_len = port->init_devid->len; 9872 memcpy(pd->initiator_transportid, 9873 port->init_devid->data, port->init_devid->len); 9874 } else 9875 iid_len = 0; 9876 scsi_ulto2b(iid_len, pd->initiator_transportid_length); 9877 pdc = (struct scsi_vpd_port_designation_cont *) 9878 (&pd->initiator_transportid[iid_len]); 9879 if (port->port_devid) { 9880 id_len = port->port_devid->len; 9881 memcpy(pdc->target_port_descriptors, 9882 port->port_devid->data, port->port_devid->len); 9883 } else 9884 id_len = 0; 9885 scsi_ulto2b(id_len, pdc->target_port_descriptors_length); 9886 pd = (struct scsi_vpd_port_designation *) 9887 ((uint8_t *)pdc->target_port_descriptors + id_len); 9888 } 9889 mtx_unlock(&softc->ctl_lock); 9890 9891 ctl_set_success(ctsio); 9892 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9893 ctsio->be_move_done = ctl_config_move_done; 9894 ctl_datamove((union ctl_io *)ctsio); 9895 return (CTL_RETVAL_COMPLETE); 9896 } 9897 9898 static int 9899 ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len) 9900 { 9901 struct scsi_vpd_block_limits *bl_ptr; 9902 struct ctl_lun *lun; 9903 uint64_t ival; 9904 9905 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9906 9907 ctsio->kern_data_ptr = malloc(sizeof(*bl_ptr), M_CTL, M_WAITOK | M_ZERO); 9908 bl_ptr = (struct scsi_vpd_block_limits *)ctsio->kern_data_ptr; 9909 ctsio->kern_sg_entries = 0; 9910 9911 if (sizeof(*bl_ptr) < alloc_len) { 9912 ctsio->residual = alloc_len - sizeof(*bl_ptr); 9913 ctsio->kern_data_len = sizeof(*bl_ptr); 9914 ctsio->kern_total_len = sizeof(*bl_ptr); 9915 } else { 9916 ctsio->residual = 0; 9917 ctsio->kern_data_len = alloc_len; 9918 ctsio->kern_total_len = alloc_len; 9919 } 9920 ctsio->kern_data_resid = 0; 9921 ctsio->kern_rel_offset = 0; 9922 ctsio->kern_sg_entries = 0; 9923 9924 /* 9925 * The control device is always connected. The disk device, on the 9926 * other hand, may not be online all the time. Need to change this 9927 * to figure out whether the disk device is actually online or not. 9928 */ 9929 if (lun != NULL) 9930 bl_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9931 lun->be_lun->lun_type; 9932 else 9933 bl_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9934 9935 bl_ptr->page_code = SVPD_BLOCK_LIMITS; 9936 scsi_ulto2b(sizeof(*bl_ptr) - 4, bl_ptr->page_length); 9937 bl_ptr->max_cmp_write_len = 0xff; 9938 scsi_ulto4b(0xffffffff, bl_ptr->max_txfer_len); 9939 if (lun != NULL) { 9940 scsi_ulto4b(lun->be_lun->opttxferlen, bl_ptr->opt_txfer_len); 9941 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 9942 ival = 0xffffffff; 9943 ctl_get_opt_number(&lun->be_lun->options, 9944 "unmap_max_lba", &ival); 9945 scsi_ulto4b(ival, bl_ptr->max_unmap_lba_cnt); 9946 ival = 0xffffffff; 9947 ctl_get_opt_number(&lun->be_lun->options, 9948 "unmap_max_descr", &ival); 9949 scsi_ulto4b(ival, bl_ptr->max_unmap_blk_cnt); 9950 if (lun->be_lun->ublockexp != 0) { 9951 scsi_ulto4b((1 << lun->be_lun->ublockexp), 9952 bl_ptr->opt_unmap_grain); 9953 scsi_ulto4b(0x80000000 | lun->be_lun->ublockoff, 9954 bl_ptr->unmap_grain_align); 9955 } 9956 } 9957 scsi_ulto4b(lun->be_lun->atomicblock, 9958 bl_ptr->max_atomic_transfer_length); 9959 scsi_ulto4b(0, bl_ptr->atomic_alignment); 9960 scsi_ulto4b(0, bl_ptr->atomic_transfer_length_granularity); 9961 scsi_ulto4b(0, bl_ptr->max_atomic_transfer_length_with_atomic_boundary); 9962 scsi_ulto4b(0, bl_ptr->max_atomic_boundary_size); 9963 ival = UINT64_MAX; 9964 ctl_get_opt_number(&lun->be_lun->options, "write_same_max_lba", &ival); 9965 scsi_u64to8b(ival, bl_ptr->max_write_same_length); 9966 } 9967 9968 ctl_set_success(ctsio); 9969 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9970 ctsio->be_move_done = ctl_config_move_done; 9971 ctl_datamove((union ctl_io *)ctsio); 9972 return (CTL_RETVAL_COMPLETE); 9973 } 9974 9975 static int 9976 ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len) 9977 { 9978 struct scsi_vpd_block_device_characteristics *bdc_ptr; 9979 struct ctl_lun *lun; 9980 const char *value; 9981 u_int i; 9982 9983 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9984 9985 ctsio->kern_data_ptr = malloc(sizeof(*bdc_ptr), M_CTL, M_WAITOK | M_ZERO); 9986 bdc_ptr = (struct scsi_vpd_block_device_characteristics *)ctsio->kern_data_ptr; 9987 ctsio->kern_sg_entries = 0; 9988 9989 if (sizeof(*bdc_ptr) < alloc_len) { 9990 ctsio->residual = alloc_len - sizeof(*bdc_ptr); 9991 ctsio->kern_data_len = sizeof(*bdc_ptr); 9992 ctsio->kern_total_len = sizeof(*bdc_ptr); 9993 } else { 9994 ctsio->residual = 0; 9995 ctsio->kern_data_len = alloc_len; 9996 ctsio->kern_total_len = alloc_len; 9997 } 9998 ctsio->kern_data_resid = 0; 9999 ctsio->kern_rel_offset = 0; 10000 ctsio->kern_sg_entries = 0; 10001 10002 /* 10003 * The control device is always connected. The disk device, on the 10004 * other hand, may not be online all the time. Need to change this 10005 * to figure out whether the disk device is actually online or not. 10006 */ 10007 if (lun != NULL) 10008 bdc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10009 lun->be_lun->lun_type; 10010 else 10011 bdc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10012 bdc_ptr->page_code = SVPD_BDC; 10013 scsi_ulto2b(sizeof(*bdc_ptr) - 4, bdc_ptr->page_length); 10014 if (lun != NULL && 10015 (value = ctl_get_opt(&lun->be_lun->options, "rpm")) != NULL) 10016 i = strtol(value, NULL, 0); 10017 else 10018 i = CTL_DEFAULT_ROTATION_RATE; 10019 scsi_ulto2b(i, bdc_ptr->medium_rotation_rate); 10020 if (lun != NULL && 10021 (value = ctl_get_opt(&lun->be_lun->options, "formfactor")) != NULL) 10022 i = strtol(value, NULL, 0); 10023 else 10024 i = 0; 10025 bdc_ptr->wab_wac_ff = (i & 0x0f); 10026 bdc_ptr->flags = SVPD_FUAB | SVPD_VBULS; 10027 10028 ctl_set_success(ctsio); 10029 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10030 ctsio->be_move_done = ctl_config_move_done; 10031 ctl_datamove((union ctl_io *)ctsio); 10032 return (CTL_RETVAL_COMPLETE); 10033 } 10034 10035 static int 10036 ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len) 10037 { 10038 struct scsi_vpd_logical_block_prov *lbp_ptr; 10039 struct ctl_lun *lun; 10040 const char *value; 10041 10042 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10043 10044 ctsio->kern_data_ptr = malloc(sizeof(*lbp_ptr), M_CTL, M_WAITOK | M_ZERO); 10045 lbp_ptr = (struct scsi_vpd_logical_block_prov *)ctsio->kern_data_ptr; 10046 ctsio->kern_sg_entries = 0; 10047 10048 if (sizeof(*lbp_ptr) < alloc_len) { 10049 ctsio->residual = alloc_len - sizeof(*lbp_ptr); 10050 ctsio->kern_data_len = sizeof(*lbp_ptr); 10051 ctsio->kern_total_len = sizeof(*lbp_ptr); 10052 } else { 10053 ctsio->residual = 0; 10054 ctsio->kern_data_len = alloc_len; 10055 ctsio->kern_total_len = alloc_len; 10056 } 10057 ctsio->kern_data_resid = 0; 10058 ctsio->kern_rel_offset = 0; 10059 ctsio->kern_sg_entries = 0; 10060 10061 /* 10062 * The control device is always connected. The disk device, on the 10063 * other hand, may not be online all the time. Need to change this 10064 * to figure out whether the disk device is actually online or not. 10065 */ 10066 if (lun != NULL) 10067 lbp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10068 lun->be_lun->lun_type; 10069 else 10070 lbp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10071 10072 lbp_ptr->page_code = SVPD_LBP; 10073 scsi_ulto2b(sizeof(*lbp_ptr) - 4, lbp_ptr->page_length); 10074 lbp_ptr->threshold_exponent = CTL_LBP_EXPONENT; 10075 if (lun != NULL && lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 10076 lbp_ptr->flags = SVPD_LBP_UNMAP | SVPD_LBP_WS16 | 10077 SVPD_LBP_WS10 | SVPD_LBP_RZ | SVPD_LBP_ANC_SUP; 10078 value = ctl_get_opt(&lun->be_lun->options, "provisioning_type"); 10079 if (value != NULL) { 10080 if (strcmp(value, "resource") == 0) 10081 lbp_ptr->prov_type = SVPD_LBP_RESOURCE; 10082 else if (strcmp(value, "thin") == 0) 10083 lbp_ptr->prov_type = SVPD_LBP_THIN; 10084 } else 10085 lbp_ptr->prov_type = SVPD_LBP_THIN; 10086 } 10087 10088 ctl_set_success(ctsio); 10089 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10090 ctsio->be_move_done = ctl_config_move_done; 10091 ctl_datamove((union ctl_io *)ctsio); 10092 return (CTL_RETVAL_COMPLETE); 10093 } 10094 10095 /* 10096 * INQUIRY with the EVPD bit set. 10097 */ 10098 static int 10099 ctl_inquiry_evpd(struct ctl_scsiio *ctsio) 10100 { 10101 struct ctl_lun *lun; 10102 struct scsi_inquiry *cdb; 10103 int alloc_len, retval; 10104 10105 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10106 cdb = (struct scsi_inquiry *)ctsio->cdb; 10107 alloc_len = scsi_2btoul(cdb->length); 10108 10109 switch (cdb->page_code) { 10110 case SVPD_SUPPORTED_PAGES: 10111 retval = ctl_inquiry_evpd_supported(ctsio, alloc_len); 10112 break; 10113 case SVPD_UNIT_SERIAL_NUMBER: 10114 retval = ctl_inquiry_evpd_serial(ctsio, alloc_len); 10115 break; 10116 case SVPD_DEVICE_ID: 10117 retval = ctl_inquiry_evpd_devid(ctsio, alloc_len); 10118 break; 10119 case SVPD_EXTENDED_INQUIRY_DATA: 10120 retval = ctl_inquiry_evpd_eid(ctsio, alloc_len); 10121 break; 10122 case SVPD_MODE_PAGE_POLICY: 10123 retval = ctl_inquiry_evpd_mpp(ctsio, alloc_len); 10124 break; 10125 case SVPD_SCSI_PORTS: 10126 retval = ctl_inquiry_evpd_scsi_ports(ctsio, alloc_len); 10127 break; 10128 case SVPD_SCSI_TPC: 10129 retval = ctl_inquiry_evpd_tpc(ctsio, alloc_len); 10130 break; 10131 case SVPD_BLOCK_LIMITS: 10132 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 10133 goto err; 10134 retval = ctl_inquiry_evpd_block_limits(ctsio, alloc_len); 10135 break; 10136 case SVPD_BDC: 10137 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 10138 goto err; 10139 retval = ctl_inquiry_evpd_bdc(ctsio, alloc_len); 10140 break; 10141 case SVPD_LBP: 10142 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 10143 goto err; 10144 retval = ctl_inquiry_evpd_lbp(ctsio, alloc_len); 10145 break; 10146 default: 10147 err: 10148 ctl_set_invalid_field(ctsio, 10149 /*sks_valid*/ 1, 10150 /*command*/ 1, 10151 /*field*/ 2, 10152 /*bit_valid*/ 0, 10153 /*bit*/ 0); 10154 ctl_done((union ctl_io *)ctsio); 10155 retval = CTL_RETVAL_COMPLETE; 10156 break; 10157 } 10158 10159 return (retval); 10160 } 10161 10162 /* 10163 * Standard INQUIRY data. 10164 */ 10165 static int 10166 ctl_inquiry_std(struct ctl_scsiio *ctsio) 10167 { 10168 struct scsi_inquiry_data *inq_ptr; 10169 struct scsi_inquiry *cdb; 10170 struct ctl_softc *softc = control_softc; 10171 struct ctl_port *port; 10172 struct ctl_lun *lun; 10173 char *val; 10174 uint32_t alloc_len, data_len; 10175 ctl_port_type port_type; 10176 10177 port = ctl_io_port(&ctsio->io_hdr); 10178 port_type = port->port_type; 10179 if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL) 10180 port_type = CTL_PORT_SCSI; 10181 10182 lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10183 cdb = (struct scsi_inquiry *)ctsio->cdb; 10184 alloc_len = scsi_2btoul(cdb->length); 10185 10186 /* 10187 * We malloc the full inquiry data size here and fill it 10188 * in. If the user only asks for less, we'll give him 10189 * that much. 10190 */ 10191 data_len = offsetof(struct scsi_inquiry_data, vendor_specific1); 10192 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10193 inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr; 10194 ctsio->kern_sg_entries = 0; 10195 ctsio->kern_data_resid = 0; 10196 ctsio->kern_rel_offset = 0; 10197 10198 if (data_len < alloc_len) { 10199 ctsio->residual = alloc_len - data_len; 10200 ctsio->kern_data_len = data_len; 10201 ctsio->kern_total_len = data_len; 10202 } else { 10203 ctsio->residual = 0; 10204 ctsio->kern_data_len = alloc_len; 10205 ctsio->kern_total_len = alloc_len; 10206 } 10207 10208 if (lun != NULL) { 10209 if ((lun->flags & CTL_LUN_PRIMARY_SC) || 10210 softc->ha_link >= CTL_HA_LINK_UNKNOWN) { 10211 inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10212 lun->be_lun->lun_type; 10213 } else { 10214 inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | 10215 lun->be_lun->lun_type; 10216 } 10217 if (lun->flags & CTL_LUN_REMOVABLE) 10218 inq_ptr->dev_qual2 |= SID_RMB; 10219 } else 10220 inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE; 10221 10222 /* RMB in byte 2 is 0 */ 10223 inq_ptr->version = SCSI_REV_SPC5; 10224 10225 /* 10226 * According to SAM-3, even if a device only supports a single 10227 * level of LUN addressing, it should still set the HISUP bit: 10228 * 10229 * 4.9.1 Logical unit numbers overview 10230 * 10231 * All logical unit number formats described in this standard are 10232 * hierarchical in structure even when only a single level in that 10233 * hierarchy is used. The HISUP bit shall be set to one in the 10234 * standard INQUIRY data (see SPC-2) when any logical unit number 10235 * format described in this standard is used. Non-hierarchical 10236 * formats are outside the scope of this standard. 10237 * 10238 * Therefore we set the HiSup bit here. 10239 * 10240 * The response format is 2, per SPC-3. 10241 */ 10242 inq_ptr->response_format = SID_HiSup | 2; 10243 10244 inq_ptr->additional_length = data_len - 10245 (offsetof(struct scsi_inquiry_data, additional_length) + 1); 10246 CTL_DEBUG_PRINT(("additional_length = %d\n", 10247 inq_ptr->additional_length)); 10248 10249 inq_ptr->spc3_flags = SPC3_SID_3PC | SPC3_SID_TPGS_IMPLICIT; 10250 if (port_type == CTL_PORT_SCSI) 10251 inq_ptr->spc2_flags = SPC2_SID_ADDR16; 10252 inq_ptr->spc2_flags |= SPC2_SID_MultiP; 10253 inq_ptr->flags = SID_CmdQue; 10254 if (port_type == CTL_PORT_SCSI) 10255 inq_ptr->flags |= SID_WBus16 | SID_Sync; 10256 10257 /* 10258 * Per SPC-3, unused bytes in ASCII strings are filled with spaces. 10259 * We have 8 bytes for the vendor name, and 16 bytes for the device 10260 * name and 4 bytes for the revision. 10261 */ 10262 if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options, 10263 "vendor")) == NULL) { 10264 strncpy(inq_ptr->vendor, CTL_VENDOR, sizeof(inq_ptr->vendor)); 10265 } else { 10266 memset(inq_ptr->vendor, ' ', sizeof(inq_ptr->vendor)); 10267 strncpy(inq_ptr->vendor, val, 10268 min(sizeof(inq_ptr->vendor), strlen(val))); 10269 } 10270 if (lun == NULL) { 10271 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10272 sizeof(inq_ptr->product)); 10273 } else if ((val = ctl_get_opt(&lun->be_lun->options, "product")) == NULL) { 10274 switch (lun->be_lun->lun_type) { 10275 case T_DIRECT: 10276 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10277 sizeof(inq_ptr->product)); 10278 break; 10279 case T_PROCESSOR: 10280 strncpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT, 10281 sizeof(inq_ptr->product)); 10282 break; 10283 case T_CDROM: 10284 strncpy(inq_ptr->product, CTL_CDROM_PRODUCT, 10285 sizeof(inq_ptr->product)); 10286 break; 10287 default: 10288 strncpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT, 10289 sizeof(inq_ptr->product)); 10290 break; 10291 } 10292 } else { 10293 memset(inq_ptr->product, ' ', sizeof(inq_ptr->product)); 10294 strncpy(inq_ptr->product, val, 10295 min(sizeof(inq_ptr->product), strlen(val))); 10296 } 10297 10298 /* 10299 * XXX make this a macro somewhere so it automatically gets 10300 * incremented when we make changes. 10301 */ 10302 if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options, 10303 "revision")) == NULL) { 10304 strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision)); 10305 } else { 10306 memset(inq_ptr->revision, ' ', sizeof(inq_ptr->revision)); 10307 strncpy(inq_ptr->revision, val, 10308 min(sizeof(inq_ptr->revision), strlen(val))); 10309 } 10310 10311 /* 10312 * For parallel SCSI, we support double transition and single 10313 * transition clocking. We also support QAS (Quick Arbitration 10314 * and Selection) and Information Unit transfers on both the 10315 * control and array devices. 10316 */ 10317 if (port_type == CTL_PORT_SCSI) 10318 inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS | 10319 SID_SPI_IUS; 10320 10321 /* SAM-6 (no version claimed) */ 10322 scsi_ulto2b(0x00C0, inq_ptr->version1); 10323 /* SPC-5 (no version claimed) */ 10324 scsi_ulto2b(0x05C0, inq_ptr->version2); 10325 if (port_type == CTL_PORT_FC) { 10326 /* FCP-2 ANSI INCITS.350:2003 */ 10327 scsi_ulto2b(0x0917, inq_ptr->version3); 10328 } else if (port_type == CTL_PORT_SCSI) { 10329 /* SPI-4 ANSI INCITS.362:200x */ 10330 scsi_ulto2b(0x0B56, inq_ptr->version3); 10331 } else if (port_type == CTL_PORT_ISCSI) { 10332 /* iSCSI (no version claimed) */ 10333 scsi_ulto2b(0x0960, inq_ptr->version3); 10334 } else if (port_type == CTL_PORT_SAS) { 10335 /* SAS (no version claimed) */ 10336 scsi_ulto2b(0x0BE0, inq_ptr->version3); 10337 } 10338 10339 if (lun == NULL) { 10340 /* SBC-4 (no version claimed) */ 10341 scsi_ulto2b(0x0600, inq_ptr->version4); 10342 } else { 10343 switch (lun->be_lun->lun_type) { 10344 case T_DIRECT: 10345 /* SBC-4 (no version claimed) */ 10346 scsi_ulto2b(0x0600, inq_ptr->version4); 10347 break; 10348 case T_PROCESSOR: 10349 break; 10350 case T_CDROM: 10351 /* MMC-6 (no version claimed) */ 10352 scsi_ulto2b(0x04E0, inq_ptr->version4); 10353 break; 10354 default: 10355 break; 10356 } 10357 } 10358 10359 ctl_set_success(ctsio); 10360 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10361 ctsio->be_move_done = ctl_config_move_done; 10362 ctl_datamove((union ctl_io *)ctsio); 10363 return (CTL_RETVAL_COMPLETE); 10364 } 10365 10366 int 10367 ctl_inquiry(struct ctl_scsiio *ctsio) 10368 { 10369 struct scsi_inquiry *cdb; 10370 int retval; 10371 10372 CTL_DEBUG_PRINT(("ctl_inquiry\n")); 10373 10374 cdb = (struct scsi_inquiry *)ctsio->cdb; 10375 if (cdb->byte2 & SI_EVPD) 10376 retval = ctl_inquiry_evpd(ctsio); 10377 else if (cdb->page_code == 0) 10378 retval = ctl_inquiry_std(ctsio); 10379 else { 10380 ctl_set_invalid_field(ctsio, 10381 /*sks_valid*/ 1, 10382 /*command*/ 1, 10383 /*field*/ 2, 10384 /*bit_valid*/ 0, 10385 /*bit*/ 0); 10386 ctl_done((union ctl_io *)ctsio); 10387 return (CTL_RETVAL_COMPLETE); 10388 } 10389 10390 return (retval); 10391 } 10392 10393 int 10394 ctl_get_config(struct ctl_scsiio *ctsio) 10395 { 10396 struct scsi_get_config_header *hdr; 10397 struct scsi_get_config_feature *feature; 10398 struct scsi_get_config *cdb; 10399 struct ctl_lun *lun; 10400 uint32_t alloc_len, data_len; 10401 int rt, starting; 10402 10403 lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10404 cdb = (struct scsi_get_config *)ctsio->cdb; 10405 rt = (cdb->rt & SGC_RT_MASK); 10406 starting = scsi_2btoul(cdb->starting_feature); 10407 alloc_len = scsi_2btoul(cdb->length); 10408 10409 data_len = sizeof(struct scsi_get_config_header) + 10410 sizeof(struct scsi_get_config_feature) + 8 + 10411 sizeof(struct scsi_get_config_feature) + 8 + 10412 sizeof(struct scsi_get_config_feature) + 4 + 10413 sizeof(struct scsi_get_config_feature) + 4 + 10414 sizeof(struct scsi_get_config_feature) + 8 + 10415 sizeof(struct scsi_get_config_feature) + 10416 sizeof(struct scsi_get_config_feature) + 4 + 10417 sizeof(struct scsi_get_config_feature) + 4 + 10418 sizeof(struct scsi_get_config_feature) + 4 + 10419 sizeof(struct scsi_get_config_feature) + 4 + 10420 sizeof(struct scsi_get_config_feature) + 4 + 10421 sizeof(struct scsi_get_config_feature) + 4; 10422 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10423 ctsio->kern_sg_entries = 0; 10424 ctsio->kern_data_resid = 0; 10425 ctsio->kern_rel_offset = 0; 10426 10427 hdr = (struct scsi_get_config_header *)ctsio->kern_data_ptr; 10428 if (lun->flags & CTL_LUN_NO_MEDIA) 10429 scsi_ulto2b(0x0000, hdr->current_profile); 10430 else 10431 scsi_ulto2b(0x0010, hdr->current_profile); 10432 feature = (struct scsi_get_config_feature *)(hdr + 1); 10433 10434 if (starting > 0x003b) 10435 goto done; 10436 if (starting > 0x003a) 10437 goto f3b; 10438 if (starting > 0x002b) 10439 goto f3a; 10440 if (starting > 0x002a) 10441 goto f2b; 10442 if (starting > 0x001f) 10443 goto f2a; 10444 if (starting > 0x001e) 10445 goto f1f; 10446 if (starting > 0x001d) 10447 goto f1e; 10448 if (starting > 0x0010) 10449 goto f1d; 10450 if (starting > 0x0003) 10451 goto f10; 10452 if (starting > 0x0002) 10453 goto f3; 10454 if (starting > 0x0001) 10455 goto f2; 10456 if (starting > 0x0000) 10457 goto f1; 10458 10459 /* Profile List */ 10460 scsi_ulto2b(0x0000, feature->feature_code); 10461 feature->flags = SGC_F_PERSISTENT | SGC_F_CURRENT; 10462 feature->add_length = 8; 10463 scsi_ulto2b(0x0008, &feature->feature_data[0]); /* CD-ROM */ 10464 feature->feature_data[2] = 0x00; 10465 scsi_ulto2b(0x0010, &feature->feature_data[4]); /* DVD-ROM */ 10466 feature->feature_data[6] = 0x01; 10467 feature = (struct scsi_get_config_feature *) 10468 &feature->feature_data[feature->add_length]; 10469 10470 f1: /* Core */ 10471 scsi_ulto2b(0x0001, feature->feature_code); 10472 feature->flags = 0x08 | SGC_F_PERSISTENT | SGC_F_CURRENT; 10473 feature->add_length = 8; 10474 scsi_ulto4b(0x00000000, &feature->feature_data[0]); 10475 feature->feature_data[4] = 0x03; 10476 feature = (struct scsi_get_config_feature *) 10477 &feature->feature_data[feature->add_length]; 10478 10479 f2: /* Morphing */ 10480 scsi_ulto2b(0x0002, feature->feature_code); 10481 feature->flags = 0x04 | SGC_F_PERSISTENT | SGC_F_CURRENT; 10482 feature->add_length = 4; 10483 feature->feature_data[0] = 0x02; 10484 feature = (struct scsi_get_config_feature *) 10485 &feature->feature_data[feature->add_length]; 10486 10487 f3: /* Removable Medium */ 10488 scsi_ulto2b(0x0003, feature->feature_code); 10489 feature->flags = 0x04 | SGC_F_PERSISTENT | SGC_F_CURRENT; 10490 feature->add_length = 4; 10491 feature->feature_data[0] = 0x39; 10492 feature = (struct scsi_get_config_feature *) 10493 &feature->feature_data[feature->add_length]; 10494 10495 if (rt == SGC_RT_CURRENT && (lun->flags & CTL_LUN_NO_MEDIA)) 10496 goto done; 10497 10498 f10: /* Random Read */ 10499 scsi_ulto2b(0x0010, feature->feature_code); 10500 feature->flags = 0x00; 10501 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10502 feature->flags |= SGC_F_CURRENT; 10503 feature->add_length = 8; 10504 scsi_ulto4b(lun->be_lun->blocksize, &feature->feature_data[0]); 10505 scsi_ulto2b(1, &feature->feature_data[4]); 10506 feature->feature_data[6] = 0x00; 10507 feature = (struct scsi_get_config_feature *) 10508 &feature->feature_data[feature->add_length]; 10509 10510 f1d: /* Multi-Read */ 10511 scsi_ulto2b(0x001D, feature->feature_code); 10512 feature->flags = 0x00; 10513 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10514 feature->flags |= SGC_F_CURRENT; 10515 feature->add_length = 0; 10516 feature = (struct scsi_get_config_feature *) 10517 &feature->feature_data[feature->add_length]; 10518 10519 f1e: /* CD Read */ 10520 scsi_ulto2b(0x001E, feature->feature_code); 10521 feature->flags = 0x00; 10522 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10523 feature->flags |= SGC_F_CURRENT; 10524 feature->add_length = 4; 10525 feature->feature_data[0] = 0x00; 10526 feature = (struct scsi_get_config_feature *) 10527 &feature->feature_data[feature->add_length]; 10528 10529 f1f: /* DVD Read */ 10530 scsi_ulto2b(0x001F, feature->feature_code); 10531 feature->flags = 0x08; 10532 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10533 feature->flags |= SGC_F_CURRENT; 10534 feature->add_length = 4; 10535 feature->feature_data[0] = 0x01; 10536 feature->feature_data[2] = 0x03; 10537 feature = (struct scsi_get_config_feature *) 10538 &feature->feature_data[feature->add_length]; 10539 10540 f2a: /* DVD+RW */ 10541 scsi_ulto2b(0x002A, feature->feature_code); 10542 feature->flags = 0x04; 10543 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10544 feature->flags |= SGC_F_CURRENT; 10545 feature->add_length = 4; 10546 feature->feature_data[0] = 0x00; 10547 feature->feature_data[1] = 0x00; 10548 feature = (struct scsi_get_config_feature *) 10549 &feature->feature_data[feature->add_length]; 10550 10551 f2b: /* DVD+R */ 10552 scsi_ulto2b(0x002B, feature->feature_code); 10553 feature->flags = 0x00; 10554 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10555 feature->flags |= SGC_F_CURRENT; 10556 feature->add_length = 4; 10557 feature->feature_data[0] = 0x00; 10558 feature = (struct scsi_get_config_feature *) 10559 &feature->feature_data[feature->add_length]; 10560 10561 f3a: /* DVD+RW Dual Layer */ 10562 scsi_ulto2b(0x003A, feature->feature_code); 10563 feature->flags = 0x00; 10564 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10565 feature->flags |= SGC_F_CURRENT; 10566 feature->add_length = 4; 10567 feature->feature_data[0] = 0x00; 10568 feature->feature_data[1] = 0x00; 10569 feature = (struct scsi_get_config_feature *) 10570 &feature->feature_data[feature->add_length]; 10571 10572 f3b: /* DVD+R Dual Layer */ 10573 scsi_ulto2b(0x003B, feature->feature_code); 10574 feature->flags = 0x00; 10575 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10576 feature->flags |= SGC_F_CURRENT; 10577 feature->add_length = 4; 10578 feature->feature_data[0] = 0x00; 10579 feature = (struct scsi_get_config_feature *) 10580 &feature->feature_data[feature->add_length]; 10581 10582 done: 10583 data_len = (uint8_t *)feature - (uint8_t *)hdr; 10584 if (rt == SGC_RT_SPECIFIC && data_len > 4) { 10585 feature = (struct scsi_get_config_feature *)(hdr + 1); 10586 if (scsi_2btoul(feature->feature_code) == starting) 10587 feature = (struct scsi_get_config_feature *) 10588 &feature->feature_data[feature->add_length]; 10589 data_len = (uint8_t *)feature - (uint8_t *)hdr; 10590 } 10591 scsi_ulto4b(data_len - 4, hdr->data_length); 10592 if (data_len < alloc_len) { 10593 ctsio->residual = alloc_len - data_len; 10594 ctsio->kern_data_len = data_len; 10595 ctsio->kern_total_len = data_len; 10596 } else { 10597 ctsio->residual = 0; 10598 ctsio->kern_data_len = alloc_len; 10599 ctsio->kern_total_len = alloc_len; 10600 } 10601 10602 ctl_set_success(ctsio); 10603 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10604 ctsio->be_move_done = ctl_config_move_done; 10605 ctl_datamove((union ctl_io *)ctsio); 10606 return (CTL_RETVAL_COMPLETE); 10607 } 10608 10609 int 10610 ctl_get_event_status(struct ctl_scsiio *ctsio) 10611 { 10612 struct scsi_get_event_status_header *hdr; 10613 struct scsi_get_event_status *cdb; 10614 struct ctl_lun *lun; 10615 uint32_t alloc_len, data_len; 10616 int notif_class; 10617 10618 lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10619 cdb = (struct scsi_get_event_status *)ctsio->cdb; 10620 if ((cdb->byte2 & SGESN_POLLED) == 0) { 10621 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, 10622 /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); 10623 ctl_done((union ctl_io *)ctsio); 10624 return (CTL_RETVAL_COMPLETE); 10625 } 10626 notif_class = cdb->notif_class; 10627 alloc_len = scsi_2btoul(cdb->length); 10628 10629 data_len = sizeof(struct scsi_get_event_status_header); 10630 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10631 ctsio->kern_sg_entries = 0; 10632 ctsio->kern_data_resid = 0; 10633 ctsio->kern_rel_offset = 0; 10634 10635 if (data_len < alloc_len) { 10636 ctsio->residual = alloc_len - data_len; 10637 ctsio->kern_data_len = data_len; 10638 ctsio->kern_total_len = data_len; 10639 } else { 10640 ctsio->residual = 0; 10641 ctsio->kern_data_len = alloc_len; 10642 ctsio->kern_total_len = alloc_len; 10643 } 10644 10645 hdr = (struct scsi_get_event_status_header *)ctsio->kern_data_ptr; 10646 scsi_ulto2b(0, hdr->descr_length); 10647 hdr->nea_class = SGESN_NEA; 10648 hdr->supported_class = 0; 10649 10650 ctl_set_success(ctsio); 10651 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10652 ctsio->be_move_done = ctl_config_move_done; 10653 ctl_datamove((union ctl_io *)ctsio); 10654 return (CTL_RETVAL_COMPLETE); 10655 } 10656 10657 int 10658 ctl_mechanism_status(struct ctl_scsiio *ctsio) 10659 { 10660 struct scsi_mechanism_status_header *hdr; 10661 struct scsi_mechanism_status *cdb; 10662 struct ctl_lun *lun; 10663 uint32_t alloc_len, data_len; 10664 10665 lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10666 cdb = (struct scsi_mechanism_status *)ctsio->cdb; 10667 alloc_len = scsi_2btoul(cdb->length); 10668 10669 data_len = sizeof(struct scsi_mechanism_status_header); 10670 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10671 ctsio->kern_sg_entries = 0; 10672 ctsio->kern_data_resid = 0; 10673 ctsio->kern_rel_offset = 0; 10674 10675 if (data_len < alloc_len) { 10676 ctsio->residual = alloc_len - data_len; 10677 ctsio->kern_data_len = data_len; 10678 ctsio->kern_total_len = data_len; 10679 } else { 10680 ctsio->residual = 0; 10681 ctsio->kern_data_len = alloc_len; 10682 ctsio->kern_total_len = alloc_len; 10683 } 10684 10685 hdr = (struct scsi_mechanism_status_header *)ctsio->kern_data_ptr; 10686 hdr->state1 = 0x00; 10687 hdr->state2 = 0xe0; 10688 scsi_ulto3b(0, hdr->lba); 10689 hdr->slots_num = 0; 10690 scsi_ulto2b(0, hdr->slots_length); 10691 10692 ctl_set_success(ctsio); 10693 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10694 ctsio->be_move_done = ctl_config_move_done; 10695 ctl_datamove((union ctl_io *)ctsio); 10696 return (CTL_RETVAL_COMPLETE); 10697 } 10698 10699 static void 10700 ctl_ultomsf(uint32_t lba, uint8_t *buf) 10701 { 10702 10703 lba += 150; 10704 buf[0] = 0; 10705 buf[1] = bin2bcd((lba / 75) / 60); 10706 buf[2] = bin2bcd((lba / 75) % 60); 10707 buf[3] = bin2bcd(lba % 75); 10708 } 10709 10710 int 10711 ctl_read_toc(struct ctl_scsiio *ctsio) 10712 { 10713 struct scsi_read_toc_hdr *hdr; 10714 struct scsi_read_toc_type01_descr *descr; 10715 struct scsi_read_toc *cdb; 10716 struct ctl_lun *lun; 10717 uint32_t alloc_len, data_len; 10718 int format, msf; 10719 10720 lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10721 cdb = (struct scsi_read_toc *)ctsio->cdb; 10722 msf = (cdb->byte2 & CD_MSF) != 0; 10723 format = cdb->format; 10724 alloc_len = scsi_2btoul(cdb->data_len); 10725 10726 data_len = sizeof(struct scsi_read_toc_hdr); 10727 if (format == 0) 10728 data_len += 2 * sizeof(struct scsi_read_toc_type01_descr); 10729 else 10730 data_len += sizeof(struct scsi_read_toc_type01_descr); 10731 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10732 ctsio->kern_sg_entries = 0; 10733 ctsio->kern_data_resid = 0; 10734 ctsio->kern_rel_offset = 0; 10735 10736 if (data_len < alloc_len) { 10737 ctsio->residual = alloc_len - data_len; 10738 ctsio->kern_data_len = data_len; 10739 ctsio->kern_total_len = data_len; 10740 } else { 10741 ctsio->residual = 0; 10742 ctsio->kern_data_len = alloc_len; 10743 ctsio->kern_total_len = alloc_len; 10744 } 10745 10746 hdr = (struct scsi_read_toc_hdr *)ctsio->kern_data_ptr; 10747 if (format == 0) { 10748 scsi_ulto2b(0x12, hdr->data_length); 10749 hdr->first = 1; 10750 hdr->last = 1; 10751 descr = (struct scsi_read_toc_type01_descr *)(hdr + 1); 10752 descr->addr_ctl = 0x14; 10753 descr->track_number = 1; 10754 if (msf) 10755 ctl_ultomsf(0, descr->track_start); 10756 else 10757 scsi_ulto4b(0, descr->track_start); 10758 descr++; 10759 descr->addr_ctl = 0x14; 10760 descr->track_number = 0xaa; 10761 if (msf) 10762 ctl_ultomsf(lun->be_lun->maxlba+1, descr->track_start); 10763 else 10764 scsi_ulto4b(lun->be_lun->maxlba+1, descr->track_start); 10765 } else { 10766 scsi_ulto2b(0x0a, hdr->data_length); 10767 hdr->first = 1; 10768 hdr->last = 1; 10769 descr = (struct scsi_read_toc_type01_descr *)(hdr + 1); 10770 descr->addr_ctl = 0x14; 10771 descr->track_number = 1; 10772 if (msf) 10773 ctl_ultomsf(0, descr->track_start); 10774 else 10775 scsi_ulto4b(0, descr->track_start); 10776 } 10777 10778 ctl_set_success(ctsio); 10779 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10780 ctsio->be_move_done = ctl_config_move_done; 10781 ctl_datamove((union ctl_io *)ctsio); 10782 return (CTL_RETVAL_COMPLETE); 10783 } 10784 10785 /* 10786 * For known CDB types, parse the LBA and length. 10787 */ 10788 static int 10789 ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len) 10790 { 10791 if (io->io_hdr.io_type != CTL_IO_SCSI) 10792 return (1); 10793 10794 switch (io->scsiio.cdb[0]) { 10795 case COMPARE_AND_WRITE: { 10796 struct scsi_compare_and_write *cdb; 10797 10798 cdb = (struct scsi_compare_and_write *)io->scsiio.cdb; 10799 10800 *lba = scsi_8btou64(cdb->addr); 10801 *len = cdb->length; 10802 break; 10803 } 10804 case READ_6: 10805 case WRITE_6: { 10806 struct scsi_rw_6 *cdb; 10807 10808 cdb = (struct scsi_rw_6 *)io->scsiio.cdb; 10809 10810 *lba = scsi_3btoul(cdb->addr); 10811 /* only 5 bits are valid in the most significant address byte */ 10812 *lba &= 0x1fffff; 10813 *len = cdb->length; 10814 break; 10815 } 10816 case READ_10: 10817 case WRITE_10: { 10818 struct scsi_rw_10 *cdb; 10819 10820 cdb = (struct scsi_rw_10 *)io->scsiio.cdb; 10821 10822 *lba = scsi_4btoul(cdb->addr); 10823 *len = scsi_2btoul(cdb->length); 10824 break; 10825 } 10826 case WRITE_VERIFY_10: { 10827 struct scsi_write_verify_10 *cdb; 10828 10829 cdb = (struct scsi_write_verify_10 *)io->scsiio.cdb; 10830 10831 *lba = scsi_4btoul(cdb->addr); 10832 *len = scsi_2btoul(cdb->length); 10833 break; 10834 } 10835 case READ_12: 10836 case WRITE_12: { 10837 struct scsi_rw_12 *cdb; 10838 10839 cdb = (struct scsi_rw_12 *)io->scsiio.cdb; 10840 10841 *lba = scsi_4btoul(cdb->addr); 10842 *len = scsi_4btoul(cdb->length); 10843 break; 10844 } 10845 case WRITE_VERIFY_12: { 10846 struct scsi_write_verify_12 *cdb; 10847 10848 cdb = (struct scsi_write_verify_12 *)io->scsiio.cdb; 10849 10850 *lba = scsi_4btoul(cdb->addr); 10851 *len = scsi_4btoul(cdb->length); 10852 break; 10853 } 10854 case READ_16: 10855 case WRITE_16: { 10856 struct scsi_rw_16 *cdb; 10857 10858 cdb = (struct scsi_rw_16 *)io->scsiio.cdb; 10859 10860 *lba = scsi_8btou64(cdb->addr); 10861 *len = scsi_4btoul(cdb->length); 10862 break; 10863 } 10864 case WRITE_ATOMIC_16: { 10865 struct scsi_write_atomic_16 *cdb; 10866 10867 cdb = (struct scsi_write_atomic_16 *)io->scsiio.cdb; 10868 10869 *lba = scsi_8btou64(cdb->addr); 10870 *len = scsi_2btoul(cdb->length); 10871 break; 10872 } 10873 case WRITE_VERIFY_16: { 10874 struct scsi_write_verify_16 *cdb; 10875 10876 cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb; 10877 10878 *lba = scsi_8btou64(cdb->addr); 10879 *len = scsi_4btoul(cdb->length); 10880 break; 10881 } 10882 case WRITE_SAME_10: { 10883 struct scsi_write_same_10 *cdb; 10884 10885 cdb = (struct scsi_write_same_10 *)io->scsiio.cdb; 10886 10887 *lba = scsi_4btoul(cdb->addr); 10888 *len = scsi_2btoul(cdb->length); 10889 break; 10890 } 10891 case WRITE_SAME_16: { 10892 struct scsi_write_same_16 *cdb; 10893 10894 cdb = (struct scsi_write_same_16 *)io->scsiio.cdb; 10895 10896 *lba = scsi_8btou64(cdb->addr); 10897 *len = scsi_4btoul(cdb->length); 10898 break; 10899 } 10900 case VERIFY_10: { 10901 struct scsi_verify_10 *cdb; 10902 10903 cdb = (struct scsi_verify_10 *)io->scsiio.cdb; 10904 10905 *lba = scsi_4btoul(cdb->addr); 10906 *len = scsi_2btoul(cdb->length); 10907 break; 10908 } 10909 case VERIFY_12: { 10910 struct scsi_verify_12 *cdb; 10911 10912 cdb = (struct scsi_verify_12 *)io->scsiio.cdb; 10913 10914 *lba = scsi_4btoul(cdb->addr); 10915 *len = scsi_4btoul(cdb->length); 10916 break; 10917 } 10918 case VERIFY_16: { 10919 struct scsi_verify_16 *cdb; 10920 10921 cdb = (struct scsi_verify_16 *)io->scsiio.cdb; 10922 10923 *lba = scsi_8btou64(cdb->addr); 10924 *len = scsi_4btoul(cdb->length); 10925 break; 10926 } 10927 case UNMAP: { 10928 *lba = 0; 10929 *len = UINT64_MAX; 10930 break; 10931 } 10932 case SERVICE_ACTION_IN: { /* GET LBA STATUS */ 10933 struct scsi_get_lba_status *cdb; 10934 10935 cdb = (struct scsi_get_lba_status *)io->scsiio.cdb; 10936 *lba = scsi_8btou64(cdb->addr); 10937 *len = UINT32_MAX; 10938 break; 10939 } 10940 default: 10941 return (1); 10942 break; /* NOTREACHED */ 10943 } 10944 10945 return (0); 10946 } 10947 10948 static ctl_action 10949 ctl_extent_check_lba(uint64_t lba1, uint64_t len1, uint64_t lba2, uint64_t len2, 10950 bool seq) 10951 { 10952 uint64_t endlba1, endlba2; 10953 10954 endlba1 = lba1 + len1 - (seq ? 0 : 1); 10955 endlba2 = lba2 + len2 - 1; 10956 10957 if ((endlba1 < lba2) || (endlba2 < lba1)) 10958 return (CTL_ACTION_PASS); 10959 else 10960 return (CTL_ACTION_BLOCK); 10961 } 10962 10963 static int 10964 ctl_extent_check_unmap(union ctl_io *io, uint64_t lba2, uint64_t len2) 10965 { 10966 struct ctl_ptr_len_flags *ptrlen; 10967 struct scsi_unmap_desc *buf, *end, *range; 10968 uint64_t lba; 10969 uint32_t len; 10970 10971 /* If not UNMAP -- go other way. */ 10972 if (io->io_hdr.io_type != CTL_IO_SCSI || 10973 io->scsiio.cdb[0] != UNMAP) 10974 return (CTL_ACTION_ERROR); 10975 10976 /* If UNMAP without data -- block and wait for data. */ 10977 ptrlen = (struct ctl_ptr_len_flags *) 10978 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 10979 if ((io->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0 || 10980 ptrlen->ptr == NULL) 10981 return (CTL_ACTION_BLOCK); 10982 10983 /* UNMAP with data -- check for collision. */ 10984 buf = (struct scsi_unmap_desc *)ptrlen->ptr; 10985 end = buf + ptrlen->len / sizeof(*buf); 10986 for (range = buf; range < end; range++) { 10987 lba = scsi_8btou64(range->lba); 10988 len = scsi_4btoul(range->length); 10989 if ((lba < lba2 + len2) && (lba + len > lba2)) 10990 return (CTL_ACTION_BLOCK); 10991 } 10992 return (CTL_ACTION_PASS); 10993 } 10994 10995 static ctl_action 10996 ctl_extent_check(union ctl_io *io1, union ctl_io *io2, bool seq) 10997 { 10998 uint64_t lba1, lba2; 10999 uint64_t len1, len2; 11000 int retval; 11001 11002 if (ctl_get_lba_len(io2, &lba2, &len2) != 0) 11003 return (CTL_ACTION_ERROR); 11004 11005 retval = ctl_extent_check_unmap(io1, lba2, len2); 11006 if (retval != CTL_ACTION_ERROR) 11007 return (retval); 11008 11009 if (ctl_get_lba_len(io1, &lba1, &len1) != 0) 11010 return (CTL_ACTION_ERROR); 11011 11012 if (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE) 11013 seq = FALSE; 11014 return (ctl_extent_check_lba(lba1, len1, lba2, len2, seq)); 11015 } 11016 11017 static ctl_action 11018 ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2) 11019 { 11020 uint64_t lba1, lba2; 11021 uint64_t len1, len2; 11022 11023 if (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE) 11024 return (CTL_ACTION_PASS); 11025 if (ctl_get_lba_len(io1, &lba1, &len1) != 0) 11026 return (CTL_ACTION_ERROR); 11027 if (ctl_get_lba_len(io2, &lba2, &len2) != 0) 11028 return (CTL_ACTION_ERROR); 11029 11030 if (lba1 + len1 == lba2) 11031 return (CTL_ACTION_BLOCK); 11032 return (CTL_ACTION_PASS); 11033 } 11034 11035 static ctl_action 11036 ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io, 11037 union ctl_io *ooa_io) 11038 { 11039 const struct ctl_cmd_entry *pending_entry, *ooa_entry; 11040 const ctl_serialize_action *serialize_row; 11041 11042 /* 11043 * The initiator attempted multiple untagged commands at the same 11044 * time. Can't do that. 11045 */ 11046 if ((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 11047 && (ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 11048 && ((pending_io->io_hdr.nexus.targ_port == 11049 ooa_io->io_hdr.nexus.targ_port) 11050 && (pending_io->io_hdr.nexus.initid == 11051 ooa_io->io_hdr.nexus.initid)) 11052 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | 11053 CTL_FLAG_STATUS_SENT)) == 0)) 11054 return (CTL_ACTION_OVERLAP); 11055 11056 /* 11057 * The initiator attempted to send multiple tagged commands with 11058 * the same ID. (It's fine if different initiators have the same 11059 * tag ID.) 11060 * 11061 * Even if all of those conditions are true, we don't kill the I/O 11062 * if the command ahead of us has been aborted. We won't end up 11063 * sending it to the FETD, and it's perfectly legal to resend a 11064 * command with the same tag number as long as the previous 11065 * instance of this tag number has been aborted somehow. 11066 */ 11067 if ((pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 11068 && (ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 11069 && (pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num) 11070 && ((pending_io->io_hdr.nexus.targ_port == 11071 ooa_io->io_hdr.nexus.targ_port) 11072 && (pending_io->io_hdr.nexus.initid == 11073 ooa_io->io_hdr.nexus.initid)) 11074 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | 11075 CTL_FLAG_STATUS_SENT)) == 0)) 11076 return (CTL_ACTION_OVERLAP_TAG); 11077 11078 /* 11079 * If we get a head of queue tag, SAM-3 says that we should 11080 * immediately execute it. 11081 * 11082 * What happens if this command would normally block for some other 11083 * reason? e.g. a request sense with a head of queue tag 11084 * immediately after a write. Normally that would block, but this 11085 * will result in its getting executed immediately... 11086 * 11087 * We currently return "pass" instead of "skip", so we'll end up 11088 * going through the rest of the queue to check for overlapped tags. 11089 * 11090 * XXX KDM check for other types of blockage first?? 11091 */ 11092 if (pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 11093 return (CTL_ACTION_PASS); 11094 11095 /* 11096 * Ordered tags have to block until all items ahead of them 11097 * have completed. If we get called with an ordered tag, we always 11098 * block, if something else is ahead of us in the queue. 11099 */ 11100 if (pending_io->scsiio.tag_type == CTL_TAG_ORDERED) 11101 return (CTL_ACTION_BLOCK); 11102 11103 /* 11104 * Simple tags get blocked until all head of queue and ordered tags 11105 * ahead of them have completed. I'm lumping untagged commands in 11106 * with simple tags here. XXX KDM is that the right thing to do? 11107 */ 11108 if (((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 11109 || (pending_io->scsiio.tag_type == CTL_TAG_SIMPLE)) 11110 && ((ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 11111 || (ooa_io->scsiio.tag_type == CTL_TAG_ORDERED))) 11112 return (CTL_ACTION_BLOCK); 11113 11114 pending_entry = ctl_get_cmd_entry(&pending_io->scsiio, NULL); 11115 KASSERT(pending_entry->seridx < CTL_SERIDX_COUNT, 11116 ("%s: Invalid seridx %d for pending CDB %02x %02x @ %p", 11117 __func__, pending_entry->seridx, pending_io->scsiio.cdb[0], 11118 pending_io->scsiio.cdb[1], pending_io)); 11119 ooa_entry = ctl_get_cmd_entry(&ooa_io->scsiio, NULL); 11120 if (ooa_entry->seridx == CTL_SERIDX_INVLD) 11121 return (CTL_ACTION_PASS); /* Unsupported command in OOA queue */ 11122 KASSERT(ooa_entry->seridx < CTL_SERIDX_COUNT, 11123 ("%s: Invalid seridx %d for ooa CDB %02x %02x @ %p", 11124 __func__, ooa_entry->seridx, ooa_io->scsiio.cdb[0], 11125 ooa_io->scsiio.cdb[1], ooa_io)); 11126 11127 serialize_row = ctl_serialize_table[ooa_entry->seridx]; 11128 11129 switch (serialize_row[pending_entry->seridx]) { 11130 case CTL_SER_BLOCK: 11131 return (CTL_ACTION_BLOCK); 11132 case CTL_SER_EXTENT: 11133 return (ctl_extent_check(ooa_io, pending_io, 11134 (lun->be_lun && lun->be_lun->serseq == CTL_LUN_SERSEQ_ON))); 11135 case CTL_SER_EXTENTOPT: 11136 if ((lun->MODE_CTRL.queue_flags & SCP_QUEUE_ALG_MASK) != 11137 SCP_QUEUE_ALG_UNRESTRICTED) 11138 return (ctl_extent_check(ooa_io, pending_io, 11139 (lun->be_lun && 11140 lun->be_lun->serseq == CTL_LUN_SERSEQ_ON))); 11141 return (CTL_ACTION_PASS); 11142 case CTL_SER_EXTENTSEQ: 11143 if (lun->be_lun && lun->be_lun->serseq != CTL_LUN_SERSEQ_OFF) 11144 return (ctl_extent_check_seq(ooa_io, pending_io)); 11145 return (CTL_ACTION_PASS); 11146 case CTL_SER_PASS: 11147 return (CTL_ACTION_PASS); 11148 case CTL_SER_BLOCKOPT: 11149 if ((lun->MODE_CTRL.queue_flags & SCP_QUEUE_ALG_MASK) != 11150 SCP_QUEUE_ALG_UNRESTRICTED) 11151 return (CTL_ACTION_BLOCK); 11152 return (CTL_ACTION_PASS); 11153 case CTL_SER_SKIP: 11154 return (CTL_ACTION_SKIP); 11155 default: 11156 panic("%s: Invalid serialization value %d for %d => %d", 11157 __func__, serialize_row[pending_entry->seridx], 11158 pending_entry->seridx, ooa_entry->seridx); 11159 } 11160 11161 return (CTL_ACTION_ERROR); 11162 } 11163 11164 /* 11165 * Check for blockage or overlaps against the OOA (Order Of Arrival) queue. 11166 * Assumptions: 11167 * - pending_io is generally either incoming, or on the blocked queue 11168 * - starting I/O is the I/O we want to start the check with. 11169 */ 11170 static ctl_action 11171 ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 11172 union ctl_io *starting_io) 11173 { 11174 union ctl_io *ooa_io; 11175 ctl_action action; 11176 11177 mtx_assert(&lun->lun_lock, MA_OWNED); 11178 11179 /* 11180 * Run back along the OOA queue, starting with the current 11181 * blocked I/O and going through every I/O before it on the 11182 * queue. If starting_io is NULL, we'll just end up returning 11183 * CTL_ACTION_PASS. 11184 */ 11185 for (ooa_io = starting_io; ooa_io != NULL; 11186 ooa_io = (union ctl_io *)TAILQ_PREV(&ooa_io->io_hdr, ctl_ooaq, 11187 ooa_links)){ 11188 11189 /* 11190 * This routine just checks to see whether 11191 * cur_blocked is blocked by ooa_io, which is ahead 11192 * of it in the queue. It doesn't queue/dequeue 11193 * cur_blocked. 11194 */ 11195 action = ctl_check_for_blockage(lun, pending_io, ooa_io); 11196 switch (action) { 11197 case CTL_ACTION_BLOCK: 11198 case CTL_ACTION_OVERLAP: 11199 case CTL_ACTION_OVERLAP_TAG: 11200 case CTL_ACTION_SKIP: 11201 case CTL_ACTION_ERROR: 11202 return (action); 11203 break; /* NOTREACHED */ 11204 case CTL_ACTION_PASS: 11205 break; 11206 default: 11207 panic("%s: Invalid action %d\n", __func__, action); 11208 } 11209 } 11210 11211 return (CTL_ACTION_PASS); 11212 } 11213 11214 /* 11215 * Assumptions: 11216 * - An I/O has just completed, and has been removed from the per-LUN OOA 11217 * queue, so some items on the blocked queue may now be unblocked. 11218 */ 11219 static int 11220 ctl_check_blocked(struct ctl_lun *lun) 11221 { 11222 struct ctl_softc *softc = lun->ctl_softc; 11223 union ctl_io *cur_blocked, *next_blocked; 11224 11225 mtx_assert(&lun->lun_lock, MA_OWNED); 11226 11227 /* 11228 * Run forward from the head of the blocked queue, checking each 11229 * entry against the I/Os prior to it on the OOA queue to see if 11230 * there is still any blockage. 11231 * 11232 * We cannot use the TAILQ_FOREACH() macro, because it can't deal 11233 * with our removing a variable on it while it is traversing the 11234 * list. 11235 */ 11236 for (cur_blocked = (union ctl_io *)TAILQ_FIRST(&lun->blocked_queue); 11237 cur_blocked != NULL; cur_blocked = next_blocked) { 11238 union ctl_io *prev_ooa; 11239 ctl_action action; 11240 11241 next_blocked = (union ctl_io *)TAILQ_NEXT(&cur_blocked->io_hdr, 11242 blocked_links); 11243 11244 prev_ooa = (union ctl_io *)TAILQ_PREV(&cur_blocked->io_hdr, 11245 ctl_ooaq, ooa_links); 11246 11247 /* 11248 * If cur_blocked happens to be the first item in the OOA 11249 * queue now, prev_ooa will be NULL, and the action 11250 * returned will just be CTL_ACTION_PASS. 11251 */ 11252 action = ctl_check_ooa(lun, cur_blocked, prev_ooa); 11253 11254 switch (action) { 11255 case CTL_ACTION_BLOCK: 11256 /* Nothing to do here, still blocked */ 11257 break; 11258 case CTL_ACTION_OVERLAP: 11259 case CTL_ACTION_OVERLAP_TAG: 11260 /* 11261 * This shouldn't happen! In theory we've already 11262 * checked this command for overlap... 11263 */ 11264 break; 11265 case CTL_ACTION_PASS: 11266 case CTL_ACTION_SKIP: { 11267 const struct ctl_cmd_entry *entry; 11268 11269 /* 11270 * The skip case shouldn't happen, this transaction 11271 * should have never made it onto the blocked queue. 11272 */ 11273 /* 11274 * This I/O is no longer blocked, we can remove it 11275 * from the blocked queue. Since this is a TAILQ 11276 * (doubly linked list), we can do O(1) removals 11277 * from any place on the list. 11278 */ 11279 TAILQ_REMOVE(&lun->blocked_queue, &cur_blocked->io_hdr, 11280 blocked_links); 11281 cur_blocked->io_hdr.flags &= ~CTL_FLAG_BLOCKED; 11282 11283 if ((softc->ha_mode != CTL_HA_MODE_XFER) && 11284 (cur_blocked->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)){ 11285 /* 11286 * Need to send IO back to original side to 11287 * run 11288 */ 11289 union ctl_ha_msg msg_info; 11290 11291 cur_blocked->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 11292 msg_info.hdr.original_sc = 11293 cur_blocked->io_hdr.original_sc; 11294 msg_info.hdr.serializing_sc = cur_blocked; 11295 msg_info.hdr.msg_type = CTL_MSG_R2R; 11296 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11297 sizeof(msg_info.hdr), M_NOWAIT); 11298 break; 11299 } 11300 entry = ctl_get_cmd_entry(&cur_blocked->scsiio, NULL); 11301 11302 /* 11303 * Check this I/O for LUN state changes that may 11304 * have happened while this command was blocked. 11305 * The LUN state may have been changed by a command 11306 * ahead of us in the queue, so we need to re-check 11307 * for any states that can be caused by SCSI 11308 * commands. 11309 */ 11310 if (ctl_scsiio_lun_check(lun, entry, 11311 &cur_blocked->scsiio) == 0) { 11312 cur_blocked->io_hdr.flags |= 11313 CTL_FLAG_IS_WAS_ON_RTR; 11314 ctl_enqueue_rtr(cur_blocked); 11315 } else 11316 ctl_done(cur_blocked); 11317 break; 11318 } 11319 default: 11320 /* 11321 * This probably shouldn't happen -- we shouldn't 11322 * get CTL_ACTION_ERROR, or anything else. 11323 */ 11324 break; 11325 } 11326 } 11327 11328 return (CTL_RETVAL_COMPLETE); 11329 } 11330 11331 /* 11332 * This routine (with one exception) checks LUN flags that can be set by 11333 * commands ahead of us in the OOA queue. These flags have to be checked 11334 * when a command initially comes in, and when we pull a command off the 11335 * blocked queue and are preparing to execute it. The reason we have to 11336 * check these flags for commands on the blocked queue is that the LUN 11337 * state may have been changed by a command ahead of us while we're on the 11338 * blocked queue. 11339 * 11340 * Ordering is somewhat important with these checks, so please pay 11341 * careful attention to the placement of any new checks. 11342 */ 11343 static int 11344 ctl_scsiio_lun_check(struct ctl_lun *lun, 11345 const struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio) 11346 { 11347 struct ctl_softc *softc = lun->ctl_softc; 11348 int retval; 11349 uint32_t residx; 11350 11351 retval = 0; 11352 11353 mtx_assert(&lun->lun_lock, MA_OWNED); 11354 11355 /* 11356 * If this shelf is a secondary shelf controller, we may have to 11357 * reject some commands disallowed by HA mode and link state. 11358 */ 11359 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) { 11360 if (softc->ha_link == CTL_HA_LINK_OFFLINE && 11361 (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { 11362 ctl_set_lun_unavail(ctsio); 11363 retval = 1; 11364 goto bailout; 11365 } 11366 if ((lun->flags & CTL_LUN_PEER_SC_PRIMARY) == 0 && 11367 (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { 11368 ctl_set_lun_transit(ctsio); 11369 retval = 1; 11370 goto bailout; 11371 } 11372 if (softc->ha_mode == CTL_HA_MODE_ACT_STBY && 11373 (entry->flags & CTL_CMD_FLAG_OK_ON_STANDBY) == 0) { 11374 ctl_set_lun_standby(ctsio); 11375 retval = 1; 11376 goto bailout; 11377 } 11378 11379 /* The rest of checks are only done on executing side */ 11380 if (softc->ha_mode == CTL_HA_MODE_XFER) 11381 goto bailout; 11382 } 11383 11384 if (entry->pattern & CTL_LUN_PAT_WRITE) { 11385 if (lun->be_lun && 11386 lun->be_lun->flags & CTL_LUN_FLAG_READONLY) { 11387 ctl_set_hw_write_protected(ctsio); 11388 retval = 1; 11389 goto bailout; 11390 } 11391 if ((lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) { 11392 ctl_set_sense(ctsio, /*current_error*/ 1, 11393 /*sense_key*/ SSD_KEY_DATA_PROTECT, 11394 /*asc*/ 0x27, /*ascq*/ 0x02, SSD_ELEM_NONE); 11395 retval = 1; 11396 goto bailout; 11397 } 11398 } 11399 11400 /* 11401 * Check for a reservation conflict. If this command isn't allowed 11402 * even on reserved LUNs, and if this initiator isn't the one who 11403 * reserved us, reject the command with a reservation conflict. 11404 */ 11405 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 11406 if ((lun->flags & CTL_LUN_RESERVED) 11407 && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) { 11408 if (lun->res_idx != residx) { 11409 ctl_set_reservation_conflict(ctsio); 11410 retval = 1; 11411 goto bailout; 11412 } 11413 } 11414 11415 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0 || 11416 (entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV)) { 11417 /* No reservation or command is allowed. */; 11418 } else if ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_WRESV) && 11419 (lun->pr_res_type == SPR_TYPE_WR_EX || 11420 lun->pr_res_type == SPR_TYPE_WR_EX_RO || 11421 lun->pr_res_type == SPR_TYPE_WR_EX_AR)) { 11422 /* The command is allowed for Write Exclusive resv. */; 11423 } else { 11424 /* 11425 * if we aren't registered or it's a res holder type 11426 * reservation and this isn't the res holder then set a 11427 * conflict. 11428 */ 11429 if (ctl_get_prkey(lun, residx) == 0 || 11430 (residx != lun->pr_res_idx && lun->pr_res_type < 4)) { 11431 ctl_set_reservation_conflict(ctsio); 11432 retval = 1; 11433 goto bailout; 11434 } 11435 } 11436 11437 if ((entry->flags & CTL_CMD_FLAG_OK_ON_NO_MEDIA) == 0) { 11438 if (lun->flags & CTL_LUN_EJECTED) 11439 ctl_set_lun_ejected(ctsio); 11440 else if (lun->flags & CTL_LUN_NO_MEDIA) { 11441 if (lun->flags & CTL_LUN_REMOVABLE) 11442 ctl_set_lun_no_media(ctsio); 11443 else 11444 ctl_set_lun_int_reqd(ctsio); 11445 } else if (lun->flags & CTL_LUN_STOPPED) 11446 ctl_set_lun_stopped(ctsio); 11447 else 11448 goto bailout; 11449 retval = 1; 11450 goto bailout; 11451 } 11452 11453 bailout: 11454 return (retval); 11455 } 11456 11457 static void 11458 ctl_failover_io(union ctl_io *io, int have_lock) 11459 { 11460 ctl_set_busy(&io->scsiio); 11461 ctl_done(io); 11462 } 11463 11464 static void 11465 ctl_failover_lun(union ctl_io *rio) 11466 { 11467 struct ctl_softc *softc = control_softc; 11468 struct ctl_lun *lun; 11469 struct ctl_io_hdr *io, *next_io; 11470 uint32_t targ_lun; 11471 11472 targ_lun = rio->io_hdr.nexus.targ_mapped_lun; 11473 CTL_DEBUG_PRINT(("FAILOVER for lun %ju\n", targ_lun)); 11474 11475 /* Find and lock the LUN. */ 11476 mtx_lock(&softc->ctl_lock); 11477 if (targ_lun > CTL_MAX_LUNS || 11478 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11479 mtx_unlock(&softc->ctl_lock); 11480 return; 11481 } 11482 mtx_lock(&lun->lun_lock); 11483 mtx_unlock(&softc->ctl_lock); 11484 if (lun->flags & CTL_LUN_DISABLED) { 11485 mtx_unlock(&lun->lun_lock); 11486 return; 11487 } 11488 11489 if (softc->ha_mode == CTL_HA_MODE_XFER) { 11490 TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { 11491 /* We are master */ 11492 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 11493 if (io->flags & CTL_FLAG_IO_ACTIVE) { 11494 io->flags |= CTL_FLAG_ABORT; 11495 io->flags |= CTL_FLAG_FAILOVER; 11496 } else { /* This can be only due to DATAMOVE */ 11497 io->msg_type = CTL_MSG_DATAMOVE_DONE; 11498 io->flags &= ~CTL_FLAG_DMA_INPROG; 11499 io->flags |= CTL_FLAG_IO_ACTIVE; 11500 io->port_status = 31340; 11501 ctl_enqueue_isc((union ctl_io *)io); 11502 } 11503 } 11504 /* We are slave */ 11505 if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { 11506 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; 11507 if (io->flags & CTL_FLAG_IO_ACTIVE) { 11508 io->flags |= CTL_FLAG_FAILOVER; 11509 } else { 11510 ctl_set_busy(&((union ctl_io *)io)-> 11511 scsiio); 11512 ctl_done((union ctl_io *)io); 11513 } 11514 } 11515 } 11516 } else { /* SERIALIZE modes */ 11517 TAILQ_FOREACH_SAFE(io, &lun->blocked_queue, blocked_links, 11518 next_io) { 11519 /* We are master */ 11520 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 11521 TAILQ_REMOVE(&lun->blocked_queue, io, 11522 blocked_links); 11523 io->flags &= ~CTL_FLAG_BLOCKED; 11524 TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links); 11525 ctl_free_io((union ctl_io *)io); 11526 } 11527 } 11528 TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { 11529 /* We are master */ 11530 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 11531 TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links); 11532 ctl_free_io((union ctl_io *)io); 11533 } 11534 /* We are slave */ 11535 if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { 11536 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; 11537 if (!(io->flags & CTL_FLAG_IO_ACTIVE)) { 11538 ctl_set_busy(&((union ctl_io *)io)-> 11539 scsiio); 11540 ctl_done((union ctl_io *)io); 11541 } 11542 } 11543 } 11544 ctl_check_blocked(lun); 11545 } 11546 mtx_unlock(&lun->lun_lock); 11547 } 11548 11549 static int 11550 ctl_scsiio_precheck(struct ctl_softc *softc, struct ctl_scsiio *ctsio) 11551 { 11552 struct ctl_lun *lun; 11553 const struct ctl_cmd_entry *entry; 11554 uint32_t initidx, targ_lun; 11555 int retval = 0; 11556 11557 lun = NULL; 11558 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 11559 if (targ_lun < CTL_MAX_LUNS) 11560 lun = softc->ctl_luns[targ_lun]; 11561 if (lun) { 11562 /* 11563 * If the LUN is invalid, pretend that it doesn't exist. 11564 * It will go away as soon as all pending I/O has been 11565 * completed. 11566 */ 11567 mtx_lock(&lun->lun_lock); 11568 if (lun->flags & CTL_LUN_DISABLED) { 11569 mtx_unlock(&lun->lun_lock); 11570 lun = NULL; 11571 } 11572 } 11573 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun; 11574 if (lun) { 11575 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = 11576 lun->be_lun; 11577 11578 /* 11579 * Every I/O goes into the OOA queue for a particular LUN, 11580 * and stays there until completion. 11581 */ 11582 #ifdef CTL_TIME_IO 11583 if (TAILQ_EMPTY(&lun->ooa_queue)) 11584 lun->idle_time += getsbinuptime() - lun->last_busy; 11585 #endif 11586 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 11587 } 11588 11589 /* Get command entry and return error if it is unsuppotyed. */ 11590 entry = ctl_validate_command(ctsio); 11591 if (entry == NULL) { 11592 if (lun) 11593 mtx_unlock(&lun->lun_lock); 11594 return (retval); 11595 } 11596 11597 ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 11598 ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK; 11599 11600 /* 11601 * Check to see whether we can send this command to LUNs that don't 11602 * exist. This should pretty much only be the case for inquiry 11603 * and request sense. Further checks, below, really require having 11604 * a LUN, so we can't really check the command anymore. Just put 11605 * it on the rtr queue. 11606 */ 11607 if (lun == NULL) { 11608 if (entry->flags & CTL_CMD_FLAG_OK_ON_NO_LUN) { 11609 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11610 ctl_enqueue_rtr((union ctl_io *)ctsio); 11611 return (retval); 11612 } 11613 11614 ctl_set_unsupported_lun(ctsio); 11615 ctl_done((union ctl_io *)ctsio); 11616 CTL_DEBUG_PRINT(("ctl_scsiio_precheck: bailing out due to invalid LUN\n")); 11617 return (retval); 11618 } else { 11619 /* 11620 * Make sure we support this particular command on this LUN. 11621 * e.g., we don't support writes to the control LUN. 11622 */ 11623 if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 11624 mtx_unlock(&lun->lun_lock); 11625 ctl_set_invalid_opcode(ctsio); 11626 ctl_done((union ctl_io *)ctsio); 11627 return (retval); 11628 } 11629 } 11630 11631 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 11632 11633 #ifdef CTL_WITH_CA 11634 /* 11635 * If we've got a request sense, it'll clear the contingent 11636 * allegiance condition. Otherwise, if we have a CA condition for 11637 * this initiator, clear it, because it sent down a command other 11638 * than request sense. 11639 */ 11640 if ((ctsio->cdb[0] != REQUEST_SENSE) 11641 && (ctl_is_set(lun->have_ca, initidx))) 11642 ctl_clear_mask(lun->have_ca, initidx); 11643 #endif 11644 11645 /* 11646 * If the command has this flag set, it handles its own unit 11647 * attention reporting, we shouldn't do anything. Otherwise we 11648 * check for any pending unit attentions, and send them back to the 11649 * initiator. We only do this when a command initially comes in, 11650 * not when we pull it off the blocked queue. 11651 * 11652 * According to SAM-3, section 5.3.2, the order that things get 11653 * presented back to the host is basically unit attentions caused 11654 * by some sort of reset event, busy status, reservation conflicts 11655 * or task set full, and finally any other status. 11656 * 11657 * One issue here is that some of the unit attentions we report 11658 * don't fall into the "reset" category (e.g. "reported luns data 11659 * has changed"). So reporting it here, before the reservation 11660 * check, may be technically wrong. I guess the only thing to do 11661 * would be to check for and report the reset events here, and then 11662 * check for the other unit attention types after we check for a 11663 * reservation conflict. 11664 * 11665 * XXX KDM need to fix this 11666 */ 11667 if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) { 11668 ctl_ua_type ua_type; 11669 u_int sense_len = 0; 11670 11671 ua_type = ctl_build_ua(lun, initidx, &ctsio->sense_data, 11672 &sense_len, SSD_TYPE_NONE); 11673 if (ua_type != CTL_UA_NONE) { 11674 mtx_unlock(&lun->lun_lock); 11675 ctsio->scsi_status = SCSI_STATUS_CHECK_COND; 11676 ctsio->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 11677 ctsio->sense_len = sense_len; 11678 ctl_done((union ctl_io *)ctsio); 11679 return (retval); 11680 } 11681 } 11682 11683 11684 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { 11685 mtx_unlock(&lun->lun_lock); 11686 ctl_done((union ctl_io *)ctsio); 11687 return (retval); 11688 } 11689 11690 /* 11691 * XXX CHD this is where we want to send IO to other side if 11692 * this LUN is secondary on this SC. We will need to make a copy 11693 * of the IO and flag the IO on this side as SENT_2OTHER and the flag 11694 * the copy we send as FROM_OTHER. 11695 * We also need to stuff the address of the original IO so we can 11696 * find it easily. Something similar will need be done on the other 11697 * side so when we are done we can find the copy. 11698 */ 11699 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 11700 (lun->flags & CTL_LUN_PEER_SC_PRIMARY) != 0 && 11701 (entry->flags & CTL_CMD_FLAG_RUN_HERE) == 0) { 11702 union ctl_ha_msg msg_info; 11703 int isc_retval; 11704 11705 ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; 11706 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 11707 mtx_unlock(&lun->lun_lock); 11708 11709 msg_info.hdr.msg_type = CTL_MSG_SERIALIZE; 11710 msg_info.hdr.original_sc = (union ctl_io *)ctsio; 11711 msg_info.hdr.serializing_sc = NULL; 11712 msg_info.hdr.nexus = ctsio->io_hdr.nexus; 11713 msg_info.scsi.tag_num = ctsio->tag_num; 11714 msg_info.scsi.tag_type = ctsio->tag_type; 11715 msg_info.scsi.cdb_len = ctsio->cdb_len; 11716 memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN); 11717 11718 if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11719 sizeof(msg_info.scsi) - sizeof(msg_info.scsi.sense_data), 11720 M_WAITOK)) > CTL_HA_STATUS_SUCCESS) { 11721 ctl_set_busy(ctsio); 11722 ctl_done((union ctl_io *)ctsio); 11723 return (retval); 11724 } 11725 return (retval); 11726 } 11727 11728 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 11729 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, 11730 ctl_ooaq, ooa_links))) { 11731 case CTL_ACTION_BLOCK: 11732 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 11733 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 11734 blocked_links); 11735 mtx_unlock(&lun->lun_lock); 11736 return (retval); 11737 case CTL_ACTION_PASS: 11738 case CTL_ACTION_SKIP: 11739 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11740 mtx_unlock(&lun->lun_lock); 11741 ctl_enqueue_rtr((union ctl_io *)ctsio); 11742 break; 11743 case CTL_ACTION_OVERLAP: 11744 mtx_unlock(&lun->lun_lock); 11745 ctl_set_overlapped_cmd(ctsio); 11746 ctl_done((union ctl_io *)ctsio); 11747 break; 11748 case CTL_ACTION_OVERLAP_TAG: 11749 mtx_unlock(&lun->lun_lock); 11750 ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff); 11751 ctl_done((union ctl_io *)ctsio); 11752 break; 11753 case CTL_ACTION_ERROR: 11754 default: 11755 mtx_unlock(&lun->lun_lock); 11756 ctl_set_internal_failure(ctsio, 11757 /*sks_valid*/ 0, 11758 /*retry_count*/ 0); 11759 ctl_done((union ctl_io *)ctsio); 11760 break; 11761 } 11762 return (retval); 11763 } 11764 11765 const struct ctl_cmd_entry * 11766 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa) 11767 { 11768 const struct ctl_cmd_entry *entry; 11769 int service_action; 11770 11771 entry = &ctl_cmd_table[ctsio->cdb[0]]; 11772 if (sa) 11773 *sa = ((entry->flags & CTL_CMD_FLAG_SA5) != 0); 11774 if (entry->flags & CTL_CMD_FLAG_SA5) { 11775 service_action = ctsio->cdb[1] & SERVICE_ACTION_MASK; 11776 entry = &((const struct ctl_cmd_entry *) 11777 entry->execute)[service_action]; 11778 } 11779 return (entry); 11780 } 11781 11782 const struct ctl_cmd_entry * 11783 ctl_validate_command(struct ctl_scsiio *ctsio) 11784 { 11785 const struct ctl_cmd_entry *entry; 11786 int i, sa; 11787 uint8_t diff; 11788 11789 entry = ctl_get_cmd_entry(ctsio, &sa); 11790 if (entry->execute == NULL) { 11791 if (sa) 11792 ctl_set_invalid_field(ctsio, 11793 /*sks_valid*/ 1, 11794 /*command*/ 1, 11795 /*field*/ 1, 11796 /*bit_valid*/ 1, 11797 /*bit*/ 4); 11798 else 11799 ctl_set_invalid_opcode(ctsio); 11800 ctl_done((union ctl_io *)ctsio); 11801 return (NULL); 11802 } 11803 KASSERT(entry->length > 0, 11804 ("Not defined length for command 0x%02x/0x%02x", 11805 ctsio->cdb[0], ctsio->cdb[1])); 11806 for (i = 1; i < entry->length; i++) { 11807 diff = ctsio->cdb[i] & ~entry->usage[i - 1]; 11808 if (diff == 0) 11809 continue; 11810 ctl_set_invalid_field(ctsio, 11811 /*sks_valid*/ 1, 11812 /*command*/ 1, 11813 /*field*/ i, 11814 /*bit_valid*/ 1, 11815 /*bit*/ fls(diff) - 1); 11816 ctl_done((union ctl_io *)ctsio); 11817 return (NULL); 11818 } 11819 return (entry); 11820 } 11821 11822 static int 11823 ctl_cmd_applicable(uint8_t lun_type, const struct ctl_cmd_entry *entry) 11824 { 11825 11826 switch (lun_type) { 11827 case T_DIRECT: 11828 if ((entry->flags & CTL_CMD_FLAG_OK_ON_DIRECT) == 0) 11829 return (0); 11830 break; 11831 case T_PROCESSOR: 11832 if ((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0) 11833 return (0); 11834 break; 11835 case T_CDROM: 11836 if ((entry->flags & CTL_CMD_FLAG_OK_ON_CDROM) == 0) 11837 return (0); 11838 break; 11839 default: 11840 return (0); 11841 } 11842 return (1); 11843 } 11844 11845 static int 11846 ctl_scsiio(struct ctl_scsiio *ctsio) 11847 { 11848 int retval; 11849 const struct ctl_cmd_entry *entry; 11850 11851 retval = CTL_RETVAL_COMPLETE; 11852 11853 CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0])); 11854 11855 entry = ctl_get_cmd_entry(ctsio, NULL); 11856 11857 /* 11858 * If this I/O has been aborted, just send it straight to 11859 * ctl_done() without executing it. 11860 */ 11861 if (ctsio->io_hdr.flags & CTL_FLAG_ABORT) { 11862 ctl_done((union ctl_io *)ctsio); 11863 goto bailout; 11864 } 11865 11866 /* 11867 * All the checks should have been handled by ctl_scsiio_precheck(). 11868 * We should be clear now to just execute the I/O. 11869 */ 11870 retval = entry->execute(ctsio); 11871 11872 bailout: 11873 return (retval); 11874 } 11875 11876 /* 11877 * Since we only implement one target right now, a bus reset simply resets 11878 * our single target. 11879 */ 11880 static int 11881 ctl_bus_reset(struct ctl_softc *softc, union ctl_io *io) 11882 { 11883 return(ctl_target_reset(softc, io, CTL_UA_BUS_RESET)); 11884 } 11885 11886 static int 11887 ctl_target_reset(struct ctl_softc *softc, union ctl_io *io, 11888 ctl_ua_type ua_type) 11889 { 11890 struct ctl_port *port; 11891 struct ctl_lun *lun; 11892 int retval; 11893 11894 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 11895 union ctl_ha_msg msg_info; 11896 11897 msg_info.hdr.nexus = io->io_hdr.nexus; 11898 if (ua_type==CTL_UA_TARG_RESET) 11899 msg_info.task.task_action = CTL_TASK_TARGET_RESET; 11900 else 11901 msg_info.task.task_action = CTL_TASK_BUS_RESET; 11902 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11903 msg_info.hdr.original_sc = NULL; 11904 msg_info.hdr.serializing_sc = NULL; 11905 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11906 sizeof(msg_info.task), M_WAITOK); 11907 } 11908 retval = 0; 11909 11910 mtx_lock(&softc->ctl_lock); 11911 port = ctl_io_port(&io->io_hdr); 11912 STAILQ_FOREACH(lun, &softc->lun_list, links) { 11913 if (port != NULL && 11914 ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 11915 continue; 11916 retval += ctl_do_lun_reset(lun, io, ua_type); 11917 } 11918 mtx_unlock(&softc->ctl_lock); 11919 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11920 return (retval); 11921 } 11922 11923 /* 11924 * The LUN should always be set. The I/O is optional, and is used to 11925 * distinguish between I/Os sent by this initiator, and by other 11926 * initiators. We set unit attention for initiators other than this one. 11927 * SAM-3 is vague on this point. It does say that a unit attention should 11928 * be established for other initiators when a LUN is reset (see section 11929 * 5.7.3), but it doesn't specifically say that the unit attention should 11930 * be established for this particular initiator when a LUN is reset. Here 11931 * is the relevant text, from SAM-3 rev 8: 11932 * 11933 * 5.7.2 When a SCSI initiator port aborts its own tasks 11934 * 11935 * When a SCSI initiator port causes its own task(s) to be aborted, no 11936 * notification that the task(s) have been aborted shall be returned to 11937 * the SCSI initiator port other than the completion response for the 11938 * command or task management function action that caused the task(s) to 11939 * be aborted and notification(s) associated with related effects of the 11940 * action (e.g., a reset unit attention condition). 11941 * 11942 * XXX KDM for now, we're setting unit attention for all initiators. 11943 */ 11944 static int 11945 ctl_do_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type) 11946 { 11947 union ctl_io *xio; 11948 #if 0 11949 uint32_t initidx; 11950 #endif 11951 int i; 11952 11953 mtx_lock(&lun->lun_lock); 11954 /* 11955 * Run through the OOA queue and abort each I/O. 11956 */ 11957 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11958 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11959 xio->io_hdr.flags |= CTL_FLAG_ABORT | CTL_FLAG_ABORT_STATUS; 11960 } 11961 11962 /* 11963 * This version sets unit attention for every 11964 */ 11965 #if 0 11966 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11967 ctl_est_ua_all(lun, initidx, ua_type); 11968 #else 11969 ctl_est_ua_all(lun, -1, ua_type); 11970 #endif 11971 11972 /* 11973 * A reset (any kind, really) clears reservations established with 11974 * RESERVE/RELEASE. It does not clear reservations established 11975 * with PERSISTENT RESERVE OUT, but we don't support that at the 11976 * moment anyway. See SPC-2, section 5.6. SPC-3 doesn't address 11977 * reservations made with the RESERVE/RELEASE commands, because 11978 * those commands are obsolete in SPC-3. 11979 */ 11980 lun->flags &= ~CTL_LUN_RESERVED; 11981 11982 #ifdef CTL_WITH_CA 11983 for (i = 0; i < CTL_MAX_INITIATORS; i++) 11984 ctl_clear_mask(lun->have_ca, i); 11985 #endif 11986 lun->prevent_count = 0; 11987 for (i = 0; i < CTL_MAX_INITIATORS; i++) 11988 ctl_clear_mask(lun->prevent, i); 11989 mtx_unlock(&lun->lun_lock); 11990 11991 return (0); 11992 } 11993 11994 static int 11995 ctl_lun_reset(struct ctl_softc *softc, union ctl_io *io) 11996 { 11997 struct ctl_lun *lun; 11998 uint32_t targ_lun; 11999 int retval; 12000 12001 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12002 mtx_lock(&softc->ctl_lock); 12003 if (targ_lun >= CTL_MAX_LUNS || 12004 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12005 mtx_unlock(&softc->ctl_lock); 12006 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 12007 return (1); 12008 } 12009 retval = ctl_do_lun_reset(lun, io, CTL_UA_LUN_RESET); 12010 mtx_unlock(&softc->ctl_lock); 12011 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12012 12013 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0) { 12014 union ctl_ha_msg msg_info; 12015 12016 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 12017 msg_info.hdr.nexus = io->io_hdr.nexus; 12018 msg_info.task.task_action = CTL_TASK_LUN_RESET; 12019 msg_info.hdr.original_sc = NULL; 12020 msg_info.hdr.serializing_sc = NULL; 12021 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 12022 sizeof(msg_info.task), M_WAITOK); 12023 } 12024 return (retval); 12025 } 12026 12027 static void 12028 ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id, 12029 int other_sc) 12030 { 12031 union ctl_io *xio; 12032 12033 mtx_assert(&lun->lun_lock, MA_OWNED); 12034 12035 /* 12036 * Run through the OOA queue and attempt to find the given I/O. 12037 * The target port, initiator ID, tag type and tag number have to 12038 * match the values that we got from the initiator. If we have an 12039 * untagged command to abort, simply abort the first untagged command 12040 * we come to. We only allow one untagged command at a time of course. 12041 */ 12042 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 12043 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 12044 12045 if ((targ_port == UINT32_MAX || 12046 targ_port == xio->io_hdr.nexus.targ_port) && 12047 (init_id == UINT32_MAX || 12048 init_id == xio->io_hdr.nexus.initid)) { 12049 if (targ_port != xio->io_hdr.nexus.targ_port || 12050 init_id != xio->io_hdr.nexus.initid) 12051 xio->io_hdr.flags |= CTL_FLAG_ABORT_STATUS; 12052 xio->io_hdr.flags |= CTL_FLAG_ABORT; 12053 if (!other_sc && !(lun->flags & CTL_LUN_PRIMARY_SC)) { 12054 union ctl_ha_msg msg_info; 12055 12056 msg_info.hdr.nexus = xio->io_hdr.nexus; 12057 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 12058 msg_info.task.tag_num = xio->scsiio.tag_num; 12059 msg_info.task.tag_type = xio->scsiio.tag_type; 12060 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 12061 msg_info.hdr.original_sc = NULL; 12062 msg_info.hdr.serializing_sc = NULL; 12063 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 12064 sizeof(msg_info.task), M_NOWAIT); 12065 } 12066 } 12067 } 12068 } 12069 12070 static int 12071 ctl_abort_task_set(union ctl_io *io) 12072 { 12073 struct ctl_softc *softc = control_softc; 12074 struct ctl_lun *lun; 12075 uint32_t targ_lun; 12076 12077 /* 12078 * Look up the LUN. 12079 */ 12080 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12081 mtx_lock(&softc->ctl_lock); 12082 if (targ_lun >= CTL_MAX_LUNS || 12083 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12084 mtx_unlock(&softc->ctl_lock); 12085 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 12086 return (1); 12087 } 12088 12089 mtx_lock(&lun->lun_lock); 12090 mtx_unlock(&softc->ctl_lock); 12091 if (io->taskio.task_action == CTL_TASK_ABORT_TASK_SET) { 12092 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, 12093 io->io_hdr.nexus.initid, 12094 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 12095 } else { /* CTL_TASK_CLEAR_TASK_SET */ 12096 ctl_abort_tasks_lun(lun, UINT32_MAX, UINT32_MAX, 12097 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 12098 } 12099 mtx_unlock(&lun->lun_lock); 12100 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12101 return (0); 12102 } 12103 12104 static int 12105 ctl_i_t_nexus_reset(union ctl_io *io) 12106 { 12107 struct ctl_softc *softc = control_softc; 12108 struct ctl_lun *lun; 12109 uint32_t initidx; 12110 12111 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 12112 union ctl_ha_msg msg_info; 12113 12114 msg_info.hdr.nexus = io->io_hdr.nexus; 12115 msg_info.task.task_action = CTL_TASK_I_T_NEXUS_RESET; 12116 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 12117 msg_info.hdr.original_sc = NULL; 12118 msg_info.hdr.serializing_sc = NULL; 12119 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 12120 sizeof(msg_info.task), M_WAITOK); 12121 } 12122 12123 initidx = ctl_get_initindex(&io->io_hdr.nexus); 12124 mtx_lock(&softc->ctl_lock); 12125 STAILQ_FOREACH(lun, &softc->lun_list, links) { 12126 mtx_lock(&lun->lun_lock); 12127 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, 12128 io->io_hdr.nexus.initid, 1); 12129 #ifdef CTL_WITH_CA 12130 ctl_clear_mask(lun->have_ca, initidx); 12131 #endif 12132 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == initidx)) 12133 lun->flags &= ~CTL_LUN_RESERVED; 12134 if (ctl_is_set(lun->prevent, initidx)) { 12135 ctl_clear_mask(lun->prevent, initidx); 12136 lun->prevent_count--; 12137 } 12138 ctl_est_ua(lun, initidx, CTL_UA_I_T_NEXUS_LOSS); 12139 mtx_unlock(&lun->lun_lock); 12140 } 12141 mtx_unlock(&softc->ctl_lock); 12142 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12143 return (0); 12144 } 12145 12146 static int 12147 ctl_abort_task(union ctl_io *io) 12148 { 12149 union ctl_io *xio; 12150 struct ctl_lun *lun; 12151 struct ctl_softc *softc; 12152 #if 0 12153 struct sbuf sb; 12154 char printbuf[128]; 12155 #endif 12156 int found; 12157 uint32_t targ_lun; 12158 12159 softc = control_softc; 12160 found = 0; 12161 12162 /* 12163 * Look up the LUN. 12164 */ 12165 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12166 mtx_lock(&softc->ctl_lock); 12167 if (targ_lun >= CTL_MAX_LUNS || 12168 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12169 mtx_unlock(&softc->ctl_lock); 12170 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 12171 return (1); 12172 } 12173 12174 #if 0 12175 printf("ctl_abort_task: called for lun %lld, tag %d type %d\n", 12176 lun->lun, io->taskio.tag_num, io->taskio.tag_type); 12177 #endif 12178 12179 mtx_lock(&lun->lun_lock); 12180 mtx_unlock(&softc->ctl_lock); 12181 /* 12182 * Run through the OOA queue and attempt to find the given I/O. 12183 * The target port, initiator ID, tag type and tag number have to 12184 * match the values that we got from the initiator. If we have an 12185 * untagged command to abort, simply abort the first untagged command 12186 * we come to. We only allow one untagged command at a time of course. 12187 */ 12188 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 12189 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 12190 #if 0 12191 sbuf_new(&sb, printbuf, sizeof(printbuf), SBUF_FIXEDLEN); 12192 12193 sbuf_printf(&sb, "LUN %lld tag %d type %d%s%s%s%s: ", 12194 lun->lun, xio->scsiio.tag_num, 12195 xio->scsiio.tag_type, 12196 (xio->io_hdr.blocked_links.tqe_prev 12197 == NULL) ? "" : " BLOCKED", 12198 (xio->io_hdr.flags & 12199 CTL_FLAG_DMA_INPROG) ? " DMA" : "", 12200 (xio->io_hdr.flags & 12201 CTL_FLAG_ABORT) ? " ABORT" : "", 12202 (xio->io_hdr.flags & 12203 CTL_FLAG_IS_WAS_ON_RTR ? " RTR" : "")); 12204 ctl_scsi_command_string(&xio->scsiio, NULL, &sb); 12205 sbuf_finish(&sb); 12206 printf("%s\n", sbuf_data(&sb)); 12207 #endif 12208 12209 if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port) 12210 || (xio->io_hdr.nexus.initid != io->io_hdr.nexus.initid) 12211 || (xio->io_hdr.flags & CTL_FLAG_ABORT)) 12212 continue; 12213 12214 /* 12215 * If the abort says that the task is untagged, the 12216 * task in the queue must be untagged. Otherwise, 12217 * we just check to see whether the tag numbers 12218 * match. This is because the QLogic firmware 12219 * doesn't pass back the tag type in an abort 12220 * request. 12221 */ 12222 #if 0 12223 if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED) 12224 && (io->taskio.tag_type == CTL_TAG_UNTAGGED)) 12225 || (xio->scsiio.tag_num == io->taskio.tag_num)) 12226 #endif 12227 /* 12228 * XXX KDM we've got problems with FC, because it 12229 * doesn't send down a tag type with aborts. So we 12230 * can only really go by the tag number... 12231 * This may cause problems with parallel SCSI. 12232 * Need to figure that out!! 12233 */ 12234 if (xio->scsiio.tag_num == io->taskio.tag_num) { 12235 xio->io_hdr.flags |= CTL_FLAG_ABORT; 12236 found = 1; 12237 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0 && 12238 !(lun->flags & CTL_LUN_PRIMARY_SC)) { 12239 union ctl_ha_msg msg_info; 12240 12241 msg_info.hdr.nexus = io->io_hdr.nexus; 12242 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 12243 msg_info.task.tag_num = io->taskio.tag_num; 12244 msg_info.task.tag_type = io->taskio.tag_type; 12245 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 12246 msg_info.hdr.original_sc = NULL; 12247 msg_info.hdr.serializing_sc = NULL; 12248 #if 0 12249 printf("Sent Abort to other side\n"); 12250 #endif 12251 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 12252 sizeof(msg_info.task), M_NOWAIT); 12253 } 12254 #if 0 12255 printf("ctl_abort_task: found I/O to abort\n"); 12256 #endif 12257 } 12258 } 12259 mtx_unlock(&lun->lun_lock); 12260 12261 if (found == 0) { 12262 /* 12263 * This isn't really an error. It's entirely possible for 12264 * the abort and command completion to cross on the wire. 12265 * This is more of an informative/diagnostic error. 12266 */ 12267 #if 0 12268 printf("ctl_abort_task: ABORT sent for nonexistent I/O: " 12269 "%u:%u:%u tag %d type %d\n", 12270 io->io_hdr.nexus.initid, 12271 io->io_hdr.nexus.targ_port, 12272 io->io_hdr.nexus.targ_lun, io->taskio.tag_num, 12273 io->taskio.tag_type); 12274 #endif 12275 } 12276 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12277 return (0); 12278 } 12279 12280 static int 12281 ctl_query_task(union ctl_io *io, int task_set) 12282 { 12283 union ctl_io *xio; 12284 struct ctl_lun *lun; 12285 struct ctl_softc *softc; 12286 int found = 0; 12287 uint32_t targ_lun; 12288 12289 softc = control_softc; 12290 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12291 mtx_lock(&softc->ctl_lock); 12292 if (targ_lun >= CTL_MAX_LUNS || 12293 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12294 mtx_unlock(&softc->ctl_lock); 12295 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 12296 return (1); 12297 } 12298 mtx_lock(&lun->lun_lock); 12299 mtx_unlock(&softc->ctl_lock); 12300 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 12301 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 12302 12303 if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port) 12304 || (xio->io_hdr.nexus.initid != io->io_hdr.nexus.initid) 12305 || (xio->io_hdr.flags & CTL_FLAG_ABORT)) 12306 continue; 12307 12308 if (task_set || xio->scsiio.tag_num == io->taskio.tag_num) { 12309 found = 1; 12310 break; 12311 } 12312 } 12313 mtx_unlock(&lun->lun_lock); 12314 if (found) 12315 io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED; 12316 else 12317 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12318 return (0); 12319 } 12320 12321 static int 12322 ctl_query_async_event(union ctl_io *io) 12323 { 12324 struct ctl_lun *lun; 12325 struct ctl_softc *softc; 12326 ctl_ua_type ua; 12327 uint32_t targ_lun, initidx; 12328 12329 softc = control_softc; 12330 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12331 mtx_lock(&softc->ctl_lock); 12332 if (targ_lun >= CTL_MAX_LUNS || 12333 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12334 mtx_unlock(&softc->ctl_lock); 12335 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 12336 return (1); 12337 } 12338 mtx_lock(&lun->lun_lock); 12339 mtx_unlock(&softc->ctl_lock); 12340 initidx = ctl_get_initindex(&io->io_hdr.nexus); 12341 ua = ctl_build_qae(lun, initidx, io->taskio.task_resp); 12342 mtx_unlock(&lun->lun_lock); 12343 if (ua != CTL_UA_NONE) 12344 io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED; 12345 else 12346 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12347 return (0); 12348 } 12349 12350 static void 12351 ctl_run_task(union ctl_io *io) 12352 { 12353 struct ctl_softc *softc = control_softc; 12354 int retval = 1; 12355 12356 CTL_DEBUG_PRINT(("ctl_run_task\n")); 12357 KASSERT(io->io_hdr.io_type == CTL_IO_TASK, 12358 ("ctl_run_task: Unextected io_type %d\n", io->io_hdr.io_type)); 12359 io->taskio.task_status = CTL_TASK_FUNCTION_NOT_SUPPORTED; 12360 bzero(io->taskio.task_resp, sizeof(io->taskio.task_resp)); 12361 switch (io->taskio.task_action) { 12362 case CTL_TASK_ABORT_TASK: 12363 retval = ctl_abort_task(io); 12364 break; 12365 case CTL_TASK_ABORT_TASK_SET: 12366 case CTL_TASK_CLEAR_TASK_SET: 12367 retval = ctl_abort_task_set(io); 12368 break; 12369 case CTL_TASK_CLEAR_ACA: 12370 break; 12371 case CTL_TASK_I_T_NEXUS_RESET: 12372 retval = ctl_i_t_nexus_reset(io); 12373 break; 12374 case CTL_TASK_LUN_RESET: 12375 retval = ctl_lun_reset(softc, io); 12376 break; 12377 case CTL_TASK_TARGET_RESET: 12378 retval = ctl_target_reset(softc, io, CTL_UA_TARG_RESET); 12379 break; 12380 case CTL_TASK_BUS_RESET: 12381 retval = ctl_bus_reset(softc, io); 12382 break; 12383 case CTL_TASK_PORT_LOGIN: 12384 break; 12385 case CTL_TASK_PORT_LOGOUT: 12386 break; 12387 case CTL_TASK_QUERY_TASK: 12388 retval = ctl_query_task(io, 0); 12389 break; 12390 case CTL_TASK_QUERY_TASK_SET: 12391 retval = ctl_query_task(io, 1); 12392 break; 12393 case CTL_TASK_QUERY_ASYNC_EVENT: 12394 retval = ctl_query_async_event(io); 12395 break; 12396 default: 12397 printf("%s: got unknown task management event %d\n", 12398 __func__, io->taskio.task_action); 12399 break; 12400 } 12401 if (retval == 0) 12402 io->io_hdr.status = CTL_SUCCESS; 12403 else 12404 io->io_hdr.status = CTL_ERROR; 12405 ctl_done(io); 12406 } 12407 12408 /* 12409 * For HA operation. Handle commands that come in from the other 12410 * controller. 12411 */ 12412 static void 12413 ctl_handle_isc(union ctl_io *io) 12414 { 12415 struct ctl_softc *softc = control_softc; 12416 struct ctl_lun *lun; 12417 const struct ctl_cmd_entry *entry; 12418 uint32_t targ_lun; 12419 12420 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12421 switch (io->io_hdr.msg_type) { 12422 case CTL_MSG_SERIALIZE: 12423 ctl_serialize_other_sc_cmd(&io->scsiio); 12424 break; 12425 case CTL_MSG_R2R: /* Only used in SER_ONLY mode. */ 12426 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 12427 if (targ_lun >= CTL_MAX_LUNS || 12428 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12429 ctl_done(io); 12430 break; 12431 } 12432 mtx_lock(&lun->lun_lock); 12433 if (ctl_scsiio_lun_check(lun, entry, &io->scsiio) != 0) { 12434 mtx_unlock(&lun->lun_lock); 12435 ctl_done(io); 12436 break; 12437 } 12438 io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 12439 mtx_unlock(&lun->lun_lock); 12440 ctl_enqueue_rtr(io); 12441 break; 12442 case CTL_MSG_FINISH_IO: 12443 if (softc->ha_mode == CTL_HA_MODE_XFER) { 12444 ctl_done(io); 12445 break; 12446 } 12447 if (targ_lun >= CTL_MAX_LUNS || 12448 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12449 ctl_free_io(io); 12450 break; 12451 } 12452 mtx_lock(&lun->lun_lock); 12453 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); 12454 ctl_check_blocked(lun); 12455 mtx_unlock(&lun->lun_lock); 12456 ctl_free_io(io); 12457 break; 12458 case CTL_MSG_PERS_ACTION: 12459 ctl_hndl_per_res_out_on_other_sc( 12460 (union ctl_ha_msg *)&io->presio.pr_msg); 12461 ctl_free_io(io); 12462 break; 12463 case CTL_MSG_BAD_JUJU: 12464 ctl_done(io); 12465 break; 12466 case CTL_MSG_DATAMOVE: /* Only used in XFER mode */ 12467 ctl_datamove_remote(io); 12468 break; 12469 case CTL_MSG_DATAMOVE_DONE: /* Only used in XFER mode */ 12470 io->scsiio.be_move_done(io); 12471 break; 12472 case CTL_MSG_FAILOVER: 12473 ctl_failover_lun(io); 12474 ctl_free_io(io); 12475 break; 12476 default: 12477 printf("%s: Invalid message type %d\n", 12478 __func__, io->io_hdr.msg_type); 12479 ctl_free_io(io); 12480 break; 12481 } 12482 12483 } 12484 12485 12486 /* 12487 * Returns the match type in the case of a match, or CTL_LUN_PAT_NONE if 12488 * there is no match. 12489 */ 12490 static ctl_lun_error_pattern 12491 ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc) 12492 { 12493 const struct ctl_cmd_entry *entry; 12494 ctl_lun_error_pattern filtered_pattern, pattern; 12495 12496 pattern = desc->error_pattern; 12497 12498 /* 12499 * XXX KDM we need more data passed into this function to match a 12500 * custom pattern, and we actually need to implement custom pattern 12501 * matching. 12502 */ 12503 if (pattern & CTL_LUN_PAT_CMD) 12504 return (CTL_LUN_PAT_CMD); 12505 12506 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY) 12507 return (CTL_LUN_PAT_ANY); 12508 12509 entry = ctl_get_cmd_entry(ctsio, NULL); 12510 12511 filtered_pattern = entry->pattern & pattern; 12512 12513 /* 12514 * If the user requested specific flags in the pattern (e.g. 12515 * CTL_LUN_PAT_RANGE), make sure the command supports all of those 12516 * flags. 12517 * 12518 * If the user did not specify any flags, it doesn't matter whether 12519 * or not the command supports the flags. 12520 */ 12521 if ((filtered_pattern & ~CTL_LUN_PAT_MASK) != 12522 (pattern & ~CTL_LUN_PAT_MASK)) 12523 return (CTL_LUN_PAT_NONE); 12524 12525 /* 12526 * If the user asked for a range check, see if the requested LBA 12527 * range overlaps with this command's LBA range. 12528 */ 12529 if (filtered_pattern & CTL_LUN_PAT_RANGE) { 12530 uint64_t lba1; 12531 uint64_t len1; 12532 ctl_action action; 12533 int retval; 12534 12535 retval = ctl_get_lba_len((union ctl_io *)ctsio, &lba1, &len1); 12536 if (retval != 0) 12537 return (CTL_LUN_PAT_NONE); 12538 12539 action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba, 12540 desc->lba_range.len, FALSE); 12541 /* 12542 * A "pass" means that the LBA ranges don't overlap, so 12543 * this doesn't match the user's range criteria. 12544 */ 12545 if (action == CTL_ACTION_PASS) 12546 return (CTL_LUN_PAT_NONE); 12547 } 12548 12549 return (filtered_pattern); 12550 } 12551 12552 static void 12553 ctl_inject_error(struct ctl_lun *lun, union ctl_io *io) 12554 { 12555 struct ctl_error_desc *desc, *desc2; 12556 12557 mtx_assert(&lun->lun_lock, MA_OWNED); 12558 12559 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 12560 ctl_lun_error_pattern pattern; 12561 /* 12562 * Check to see whether this particular command matches 12563 * the pattern in the descriptor. 12564 */ 12565 pattern = ctl_cmd_pattern_match(&io->scsiio, desc); 12566 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_NONE) 12567 continue; 12568 12569 switch (desc->lun_error & CTL_LUN_INJ_TYPE) { 12570 case CTL_LUN_INJ_ABORTED: 12571 ctl_set_aborted(&io->scsiio); 12572 break; 12573 case CTL_LUN_INJ_MEDIUM_ERR: 12574 ctl_set_medium_error(&io->scsiio, 12575 (io->io_hdr.flags & CTL_FLAG_DATA_MASK) != 12576 CTL_FLAG_DATA_OUT); 12577 break; 12578 case CTL_LUN_INJ_UA: 12579 /* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET 12580 * OCCURRED */ 12581 ctl_set_ua(&io->scsiio, 0x29, 0x00); 12582 break; 12583 case CTL_LUN_INJ_CUSTOM: 12584 /* 12585 * We're assuming the user knows what he is doing. 12586 * Just copy the sense information without doing 12587 * checks. 12588 */ 12589 bcopy(&desc->custom_sense, &io->scsiio.sense_data, 12590 MIN(sizeof(desc->custom_sense), 12591 sizeof(io->scsiio.sense_data))); 12592 io->scsiio.scsi_status = SCSI_STATUS_CHECK_COND; 12593 io->scsiio.sense_len = SSD_FULL_SIZE; 12594 io->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 12595 break; 12596 case CTL_LUN_INJ_NONE: 12597 default: 12598 /* 12599 * If this is an error injection type we don't know 12600 * about, clear the continuous flag (if it is set) 12601 * so it will get deleted below. 12602 */ 12603 desc->lun_error &= ~CTL_LUN_INJ_CONTINUOUS; 12604 break; 12605 } 12606 /* 12607 * By default, each error injection action is a one-shot 12608 */ 12609 if (desc->lun_error & CTL_LUN_INJ_CONTINUOUS) 12610 continue; 12611 12612 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links); 12613 12614 free(desc, M_CTL); 12615 } 12616 } 12617 12618 #ifdef CTL_IO_DELAY 12619 static void 12620 ctl_datamove_timer_wakeup(void *arg) 12621 { 12622 union ctl_io *io; 12623 12624 io = (union ctl_io *)arg; 12625 12626 ctl_datamove(io); 12627 } 12628 #endif /* CTL_IO_DELAY */ 12629 12630 void 12631 ctl_datamove(union ctl_io *io) 12632 { 12633 struct ctl_lun *lun; 12634 void (*fe_datamove)(union ctl_io *io); 12635 12636 mtx_assert(&control_softc->ctl_lock, MA_NOTOWNED); 12637 12638 CTL_DEBUG_PRINT(("ctl_datamove\n")); 12639 12640 lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 12641 #ifdef CTL_TIME_IO 12642 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 12643 char str[256]; 12644 char path_str[64]; 12645 struct sbuf sb; 12646 12647 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 12648 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12649 12650 sbuf_cat(&sb, path_str); 12651 switch (io->io_hdr.io_type) { 12652 case CTL_IO_SCSI: 12653 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 12654 sbuf_printf(&sb, "\n"); 12655 sbuf_cat(&sb, path_str); 12656 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12657 io->scsiio.tag_num, io->scsiio.tag_type); 12658 break; 12659 case CTL_IO_TASK: 12660 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 12661 "Tag Type: %d\n", io->taskio.task_action, 12662 io->taskio.tag_num, io->taskio.tag_type); 12663 break; 12664 default: 12665 panic("%s: Invalid CTL I/O type %d\n", 12666 __func__, io->io_hdr.io_type); 12667 } 12668 sbuf_cat(&sb, path_str); 12669 sbuf_printf(&sb, "ctl_datamove: %jd seconds\n", 12670 (intmax_t)time_uptime - io->io_hdr.start_time); 12671 sbuf_finish(&sb); 12672 printf("%s", sbuf_data(&sb)); 12673 } 12674 #endif /* CTL_TIME_IO */ 12675 12676 #ifdef CTL_IO_DELAY 12677 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 12678 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 12679 } else { 12680 if ((lun != NULL) 12681 && (lun->delay_info.datamove_delay > 0)) { 12682 12683 callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1); 12684 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 12685 callout_reset(&io->io_hdr.delay_callout, 12686 lun->delay_info.datamove_delay * hz, 12687 ctl_datamove_timer_wakeup, io); 12688 if (lun->delay_info.datamove_type == 12689 CTL_DELAY_TYPE_ONESHOT) 12690 lun->delay_info.datamove_delay = 0; 12691 return; 12692 } 12693 } 12694 #endif 12695 12696 /* 12697 * This command has been aborted. Set the port status, so we fail 12698 * the data move. 12699 */ 12700 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12701 printf("ctl_datamove: tag 0x%04x on (%u:%u:%u) aborted\n", 12702 io->scsiio.tag_num, io->io_hdr.nexus.initid, 12703 io->io_hdr.nexus.targ_port, 12704 io->io_hdr.nexus.targ_lun); 12705 io->io_hdr.port_status = 31337; 12706 /* 12707 * Note that the backend, in this case, will get the 12708 * callback in its context. In other cases it may get 12709 * called in the frontend's interrupt thread context. 12710 */ 12711 io->scsiio.be_move_done(io); 12712 return; 12713 } 12714 12715 /* Don't confuse frontend with zero length data move. */ 12716 if (io->scsiio.kern_data_len == 0) { 12717 io->scsiio.be_move_done(io); 12718 return; 12719 } 12720 12721 fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove; 12722 fe_datamove(io); 12723 } 12724 12725 static void 12726 ctl_send_datamove_done(union ctl_io *io, int have_lock) 12727 { 12728 union ctl_ha_msg msg; 12729 #ifdef CTL_TIME_IO 12730 struct bintime cur_bt; 12731 #endif 12732 12733 memset(&msg, 0, sizeof(msg)); 12734 msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 12735 msg.hdr.original_sc = io; 12736 msg.hdr.serializing_sc = io->io_hdr.serializing_sc; 12737 msg.hdr.nexus = io->io_hdr.nexus; 12738 msg.hdr.status = io->io_hdr.status; 12739 msg.scsi.tag_num = io->scsiio.tag_num; 12740 msg.scsi.tag_type = io->scsiio.tag_type; 12741 msg.scsi.scsi_status = io->scsiio.scsi_status; 12742 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 12743 io->scsiio.sense_len); 12744 msg.scsi.sense_len = io->scsiio.sense_len; 12745 msg.scsi.sense_residual = io->scsiio.sense_residual; 12746 msg.scsi.fetd_status = io->io_hdr.port_status; 12747 msg.scsi.residual = io->scsiio.residual; 12748 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12749 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12750 ctl_failover_io(io, /*have_lock*/ have_lock); 12751 return; 12752 } 12753 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 12754 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + 12755 msg.scsi.sense_len, M_WAITOK); 12756 12757 #ifdef CTL_TIME_IO 12758 getbinuptime(&cur_bt); 12759 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); 12760 bintime_add(&io->io_hdr.dma_bt, &cur_bt); 12761 #endif 12762 io->io_hdr.num_dmas++; 12763 } 12764 12765 /* 12766 * The DMA to the remote side is done, now we need to tell the other side 12767 * we're done so it can continue with its data movement. 12768 */ 12769 static void 12770 ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq) 12771 { 12772 union ctl_io *io; 12773 uint32_t i; 12774 12775 io = rq->context; 12776 12777 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12778 printf("%s: ISC DMA write failed with error %d", __func__, 12779 rq->ret); 12780 ctl_set_internal_failure(&io->scsiio, 12781 /*sks_valid*/ 1, 12782 /*retry_count*/ rq->ret); 12783 } 12784 12785 ctl_dt_req_free(rq); 12786 12787 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12788 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12789 free(io->io_hdr.remote_sglist, M_CTL); 12790 io->io_hdr.remote_sglist = NULL; 12791 io->io_hdr.local_sglist = NULL; 12792 12793 /* 12794 * The data is in local and remote memory, so now we need to send 12795 * status (good or back) back to the other side. 12796 */ 12797 ctl_send_datamove_done(io, /*have_lock*/ 0); 12798 } 12799 12800 /* 12801 * We've moved the data from the host/controller into local memory. Now we 12802 * need to push it over to the remote controller's memory. 12803 */ 12804 static int 12805 ctl_datamove_remote_dm_write_cb(union ctl_io *io) 12806 { 12807 int retval; 12808 12809 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_WRITE, 12810 ctl_datamove_remote_write_cb); 12811 return (retval); 12812 } 12813 12814 static void 12815 ctl_datamove_remote_write(union ctl_io *io) 12816 { 12817 int retval; 12818 void (*fe_datamove)(union ctl_io *io); 12819 12820 /* 12821 * - Get the data from the host/HBA into local memory. 12822 * - DMA memory from the local controller to the remote controller. 12823 * - Send status back to the remote controller. 12824 */ 12825 12826 retval = ctl_datamove_remote_sgl_setup(io); 12827 if (retval != 0) 12828 return; 12829 12830 /* Switch the pointer over so the FETD knows what to do */ 12831 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 12832 12833 /* 12834 * Use a custom move done callback, since we need to send completion 12835 * back to the other controller, not to the backend on this side. 12836 */ 12837 io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb; 12838 12839 fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove; 12840 fe_datamove(io); 12841 } 12842 12843 static int 12844 ctl_datamove_remote_dm_read_cb(union ctl_io *io) 12845 { 12846 #if 0 12847 char str[256]; 12848 char path_str[64]; 12849 struct sbuf sb; 12850 #endif 12851 uint32_t i; 12852 12853 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12854 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12855 free(io->io_hdr.remote_sglist, M_CTL); 12856 io->io_hdr.remote_sglist = NULL; 12857 io->io_hdr.local_sglist = NULL; 12858 12859 #if 0 12860 scsi_path_string(io, path_str, sizeof(path_str)); 12861 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12862 sbuf_cat(&sb, path_str); 12863 scsi_command_string(&io->scsiio, NULL, &sb); 12864 sbuf_printf(&sb, "\n"); 12865 sbuf_cat(&sb, path_str); 12866 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12867 io->scsiio.tag_num, io->scsiio.tag_type); 12868 sbuf_cat(&sb, path_str); 12869 sbuf_printf(&sb, "%s: flags %#x, status %#x\n", __func__, 12870 io->io_hdr.flags, io->io_hdr.status); 12871 sbuf_finish(&sb); 12872 printk("%s", sbuf_data(&sb)); 12873 #endif 12874 12875 12876 /* 12877 * The read is done, now we need to send status (good or bad) back 12878 * to the other side. 12879 */ 12880 ctl_send_datamove_done(io, /*have_lock*/ 0); 12881 12882 return (0); 12883 } 12884 12885 static void 12886 ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq) 12887 { 12888 union ctl_io *io; 12889 void (*fe_datamove)(union ctl_io *io); 12890 12891 io = rq->context; 12892 12893 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12894 printf("%s: ISC DMA read failed with error %d\n", __func__, 12895 rq->ret); 12896 ctl_set_internal_failure(&io->scsiio, 12897 /*sks_valid*/ 1, 12898 /*retry_count*/ rq->ret); 12899 } 12900 12901 ctl_dt_req_free(rq); 12902 12903 /* Switch the pointer over so the FETD knows what to do */ 12904 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 12905 12906 /* 12907 * Use a custom move done callback, since we need to send completion 12908 * back to the other controller, not to the backend on this side. 12909 */ 12910 io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb; 12911 12912 /* XXX KDM add checks like the ones in ctl_datamove? */ 12913 12914 fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove; 12915 fe_datamove(io); 12916 } 12917 12918 static int 12919 ctl_datamove_remote_sgl_setup(union ctl_io *io) 12920 { 12921 struct ctl_sg_entry *local_sglist; 12922 uint32_t len_to_go; 12923 int retval; 12924 int i; 12925 12926 retval = 0; 12927 local_sglist = io->io_hdr.local_sglist; 12928 len_to_go = io->scsiio.kern_data_len; 12929 12930 /* 12931 * The difficult thing here is that the size of the various 12932 * S/G segments may be different than the size from the 12933 * remote controller. That'll make it harder when DMAing 12934 * the data back to the other side. 12935 */ 12936 for (i = 0; len_to_go > 0; i++) { 12937 local_sglist[i].len = MIN(len_to_go, CTL_HA_DATAMOVE_SEGMENT); 12938 local_sglist[i].addr = 12939 malloc(local_sglist[i].len, M_CTL, M_WAITOK); 12940 12941 len_to_go -= local_sglist[i].len; 12942 } 12943 /* 12944 * Reset the number of S/G entries accordingly. The original 12945 * number of S/G entries is available in rem_sg_entries. 12946 */ 12947 io->scsiio.kern_sg_entries = i; 12948 12949 #if 0 12950 printf("%s: kern_sg_entries = %d\n", __func__, 12951 io->scsiio.kern_sg_entries); 12952 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12953 printf("%s: sg[%d] = %p, %lu\n", __func__, i, 12954 local_sglist[i].addr, local_sglist[i].len); 12955 #endif 12956 12957 return (retval); 12958 } 12959 12960 static int 12961 ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 12962 ctl_ha_dt_cb callback) 12963 { 12964 struct ctl_ha_dt_req *rq; 12965 struct ctl_sg_entry *remote_sglist, *local_sglist; 12966 uint32_t local_used, remote_used, total_used; 12967 int i, j, isc_ret; 12968 12969 rq = ctl_dt_req_alloc(); 12970 12971 /* 12972 * If we failed to allocate the request, and if the DMA didn't fail 12973 * anyway, set busy status. This is just a resource allocation 12974 * failure. 12975 */ 12976 if ((rq == NULL) 12977 && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 12978 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) 12979 ctl_set_busy(&io->scsiio); 12980 12981 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 12982 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) { 12983 12984 if (rq != NULL) 12985 ctl_dt_req_free(rq); 12986 12987 /* 12988 * The data move failed. We need to return status back 12989 * to the other controller. No point in trying to DMA 12990 * data to the remote controller. 12991 */ 12992 12993 ctl_send_datamove_done(io, /*have_lock*/ 0); 12994 12995 return (1); 12996 } 12997 12998 local_sglist = io->io_hdr.local_sglist; 12999 remote_sglist = io->io_hdr.remote_sglist; 13000 local_used = 0; 13001 remote_used = 0; 13002 total_used = 0; 13003 13004 /* 13005 * Pull/push the data over the wire from/to the other controller. 13006 * This takes into account the possibility that the local and 13007 * remote sglists may not be identical in terms of the size of 13008 * the elements and the number of elements. 13009 * 13010 * One fundamental assumption here is that the length allocated for 13011 * both the local and remote sglists is identical. Otherwise, we've 13012 * essentially got a coding error of some sort. 13013 */ 13014 isc_ret = CTL_HA_STATUS_SUCCESS; 13015 for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) { 13016 uint32_t cur_len; 13017 uint8_t *tmp_ptr; 13018 13019 rq->command = command; 13020 rq->context = io; 13021 13022 /* 13023 * Both pointers should be aligned. But it is possible 13024 * that the allocation length is not. They should both 13025 * also have enough slack left over at the end, though, 13026 * to round up to the next 8 byte boundary. 13027 */ 13028 cur_len = MIN(local_sglist[i].len - local_used, 13029 remote_sglist[j].len - remote_used); 13030 rq->size = cur_len; 13031 13032 tmp_ptr = (uint8_t *)local_sglist[i].addr; 13033 tmp_ptr += local_used; 13034 13035 #if 0 13036 /* Use physical addresses when talking to ISC hardware */ 13037 if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) { 13038 /* XXX KDM use busdma */ 13039 rq->local = vtophys(tmp_ptr); 13040 } else 13041 rq->local = tmp_ptr; 13042 #else 13043 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, 13044 ("HA does not support BUS_ADDR")); 13045 rq->local = tmp_ptr; 13046 #endif 13047 13048 tmp_ptr = (uint8_t *)remote_sglist[j].addr; 13049 tmp_ptr += remote_used; 13050 rq->remote = tmp_ptr; 13051 13052 rq->callback = NULL; 13053 13054 local_used += cur_len; 13055 if (local_used >= local_sglist[i].len) { 13056 i++; 13057 local_used = 0; 13058 } 13059 13060 remote_used += cur_len; 13061 if (remote_used >= remote_sglist[j].len) { 13062 j++; 13063 remote_used = 0; 13064 } 13065 total_used += cur_len; 13066 13067 if (total_used >= io->scsiio.kern_data_len) 13068 rq->callback = callback; 13069 13070 #if 0 13071 printf("%s: %s: local %p remote %p size %d\n", __func__, 13072 (command == CTL_HA_DT_CMD_WRITE) ? "WRITE" : "READ", 13073 rq->local, rq->remote, rq->size); 13074 #endif 13075 13076 isc_ret = ctl_dt_single(rq); 13077 if (isc_ret > CTL_HA_STATUS_SUCCESS) 13078 break; 13079 } 13080 if (isc_ret != CTL_HA_STATUS_WAIT) { 13081 rq->ret = isc_ret; 13082 callback(rq); 13083 } 13084 13085 return (0); 13086 } 13087 13088 static void 13089 ctl_datamove_remote_read(union ctl_io *io) 13090 { 13091 int retval; 13092 uint32_t i; 13093 13094 /* 13095 * This will send an error to the other controller in the case of a 13096 * failure. 13097 */ 13098 retval = ctl_datamove_remote_sgl_setup(io); 13099 if (retval != 0) 13100 return; 13101 13102 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ, 13103 ctl_datamove_remote_read_cb); 13104 if (retval != 0) { 13105 /* 13106 * Make sure we free memory if there was an error.. The 13107 * ctl_datamove_remote_xfer() function will send the 13108 * datamove done message, or call the callback with an 13109 * error if there is a problem. 13110 */ 13111 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 13112 free(io->io_hdr.local_sglist[i].addr, M_CTL); 13113 free(io->io_hdr.remote_sglist, M_CTL); 13114 io->io_hdr.remote_sglist = NULL; 13115 io->io_hdr.local_sglist = NULL; 13116 } 13117 } 13118 13119 /* 13120 * Process a datamove request from the other controller. This is used for 13121 * XFER mode only, not SER_ONLY mode. For writes, we DMA into local memory 13122 * first. Once that is complete, the data gets DMAed into the remote 13123 * controller's memory. For reads, we DMA from the remote controller's 13124 * memory into our memory first, and then move it out to the FETD. 13125 */ 13126 static void 13127 ctl_datamove_remote(union ctl_io *io) 13128 { 13129 13130 mtx_assert(&control_softc->ctl_lock, MA_NOTOWNED); 13131 13132 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 13133 ctl_failover_io(io, /*have_lock*/ 0); 13134 return; 13135 } 13136 13137 /* 13138 * Note that we look for an aborted I/O here, but don't do some of 13139 * the other checks that ctl_datamove() normally does. 13140 * We don't need to run the datamove delay code, since that should 13141 * have been done if need be on the other controller. 13142 */ 13143 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 13144 printf("%s: tag 0x%04x on (%u:%u:%u) aborted\n", __func__, 13145 io->scsiio.tag_num, io->io_hdr.nexus.initid, 13146 io->io_hdr.nexus.targ_port, 13147 io->io_hdr.nexus.targ_lun); 13148 io->io_hdr.port_status = 31338; 13149 ctl_send_datamove_done(io, /*have_lock*/ 0); 13150 return; 13151 } 13152 13153 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) 13154 ctl_datamove_remote_write(io); 13155 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) 13156 ctl_datamove_remote_read(io); 13157 else { 13158 io->io_hdr.port_status = 31339; 13159 ctl_send_datamove_done(io, /*have_lock*/ 0); 13160 } 13161 } 13162 13163 static void 13164 ctl_process_done(union ctl_io *io) 13165 { 13166 struct ctl_lun *lun; 13167 struct ctl_softc *softc = control_softc; 13168 void (*fe_done)(union ctl_io *io); 13169 union ctl_ha_msg msg; 13170 uint32_t targ_port = io->io_hdr.nexus.targ_port; 13171 13172 CTL_DEBUG_PRINT(("ctl_process_done\n")); 13173 fe_done = softc->ctl_ports[targ_port]->fe_done; 13174 13175 #ifdef CTL_TIME_IO 13176 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 13177 char str[256]; 13178 char path_str[64]; 13179 struct sbuf sb; 13180 13181 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 13182 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 13183 13184 sbuf_cat(&sb, path_str); 13185 switch (io->io_hdr.io_type) { 13186 case CTL_IO_SCSI: 13187 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 13188 sbuf_printf(&sb, "\n"); 13189 sbuf_cat(&sb, path_str); 13190 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 13191 io->scsiio.tag_num, io->scsiio.tag_type); 13192 break; 13193 case CTL_IO_TASK: 13194 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 13195 "Tag Type: %d\n", io->taskio.task_action, 13196 io->taskio.tag_num, io->taskio.tag_type); 13197 break; 13198 default: 13199 panic("%s: Invalid CTL I/O type %d\n", 13200 __func__, io->io_hdr.io_type); 13201 } 13202 sbuf_cat(&sb, path_str); 13203 sbuf_printf(&sb, "ctl_process_done: %jd seconds\n", 13204 (intmax_t)time_uptime - io->io_hdr.start_time); 13205 sbuf_finish(&sb); 13206 printf("%s", sbuf_data(&sb)); 13207 } 13208 #endif /* CTL_TIME_IO */ 13209 13210 switch (io->io_hdr.io_type) { 13211 case CTL_IO_SCSI: 13212 break; 13213 case CTL_IO_TASK: 13214 if (ctl_debug & CTL_DEBUG_INFO) 13215 ctl_io_error_print(io, NULL); 13216 fe_done(io); 13217 return; 13218 default: 13219 panic("%s: Invalid CTL I/O type %d\n", 13220 __func__, io->io_hdr.io_type); 13221 } 13222 13223 lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13224 if (lun == NULL) { 13225 CTL_DEBUG_PRINT(("NULL LUN for lun %d\n", 13226 io->io_hdr.nexus.targ_mapped_lun)); 13227 goto bailout; 13228 } 13229 13230 mtx_lock(&lun->lun_lock); 13231 13232 /* 13233 * Check to see if we have any informational exception and status 13234 * of this command can be modified to report it in form of either 13235 * RECOVERED ERROR or NO SENSE, depending on MRIE mode page field. 13236 */ 13237 if (lun->ie_reported == 0 && lun->ie_asc != 0 && 13238 io->io_hdr.status == CTL_SUCCESS && 13239 (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0) { 13240 uint8_t mrie = lun->MODE_IE.mrie; 13241 uint8_t per = ((lun->MODE_RWER.byte3 & SMS_RWER_PER) || 13242 (lun->MODE_VER.byte3 & SMS_VER_PER)); 13243 if (((mrie == SIEP_MRIE_REC_COND && per) || 13244 mrie == SIEP_MRIE_REC_UNCOND || 13245 mrie == SIEP_MRIE_NO_SENSE) && 13246 (ctl_get_cmd_entry(&io->scsiio, NULL)->flags & 13247 CTL_CMD_FLAG_NO_SENSE) == 0) { 13248 ctl_set_sense(&io->scsiio, 13249 /*current_error*/ 1, 13250 /*sense_key*/ (mrie == SIEP_MRIE_NO_SENSE) ? 13251 SSD_KEY_NO_SENSE : SSD_KEY_RECOVERED_ERROR, 13252 /*asc*/ lun->ie_asc, 13253 /*ascq*/ lun->ie_ascq, 13254 SSD_ELEM_NONE); 13255 lun->ie_reported = 1; 13256 } 13257 } else if (lun->ie_reported < 0) 13258 lun->ie_reported = 0; 13259 13260 /* 13261 * Check to see if we have any errors to inject here. We only 13262 * inject errors for commands that don't already have errors set. 13263 */ 13264 if (!STAILQ_EMPTY(&lun->error_list) && 13265 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) && 13266 ((io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0)) 13267 ctl_inject_error(lun, io); 13268 13269 /* 13270 * XXX KDM how do we treat commands that aren't completed 13271 * successfully? 13272 * 13273 * XXX KDM should we also track I/O latency? 13274 */ 13275 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS && 13276 io->io_hdr.io_type == CTL_IO_SCSI) { 13277 #ifdef CTL_TIME_IO 13278 struct bintime cur_bt; 13279 #endif 13280 int type; 13281 13282 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13283 CTL_FLAG_DATA_IN) 13284 type = CTL_STATS_READ; 13285 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13286 CTL_FLAG_DATA_OUT) 13287 type = CTL_STATS_WRITE; 13288 else 13289 type = CTL_STATS_NO_IO; 13290 13291 lun->stats.ports[targ_port].bytes[type] += 13292 io->scsiio.kern_total_len; 13293 lun->stats.ports[targ_port].operations[type]++; 13294 #ifdef CTL_TIME_IO 13295 bintime_add(&lun->stats.ports[targ_port].dma_time[type], 13296 &io->io_hdr.dma_bt); 13297 getbinuptime(&cur_bt); 13298 bintime_sub(&cur_bt, &io->io_hdr.start_bt); 13299 bintime_add(&lun->stats.ports[targ_port].time[type], &cur_bt); 13300 #endif 13301 lun->stats.ports[targ_port].num_dmas[type] += 13302 io->io_hdr.num_dmas; 13303 } 13304 13305 /* 13306 * Remove this from the OOA queue. 13307 */ 13308 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); 13309 #ifdef CTL_TIME_IO 13310 if (TAILQ_EMPTY(&lun->ooa_queue)) 13311 lun->last_busy = getsbinuptime(); 13312 #endif 13313 13314 /* 13315 * Run through the blocked queue on this LUN and see if anything 13316 * has become unblocked, now that this transaction is done. 13317 */ 13318 ctl_check_blocked(lun); 13319 13320 /* 13321 * If the LUN has been invalidated, free it if there is nothing 13322 * left on its OOA queue. 13323 */ 13324 if ((lun->flags & CTL_LUN_INVALID) 13325 && TAILQ_EMPTY(&lun->ooa_queue)) { 13326 mtx_unlock(&lun->lun_lock); 13327 mtx_lock(&softc->ctl_lock); 13328 ctl_free_lun(lun); 13329 mtx_unlock(&softc->ctl_lock); 13330 } else 13331 mtx_unlock(&lun->lun_lock); 13332 13333 bailout: 13334 13335 /* 13336 * If this command has been aborted, make sure we set the status 13337 * properly. The FETD is responsible for freeing the I/O and doing 13338 * whatever it needs to do to clean up its state. 13339 */ 13340 if (io->io_hdr.flags & CTL_FLAG_ABORT) 13341 ctl_set_task_aborted(&io->scsiio); 13342 13343 /* 13344 * If enabled, print command error status. 13345 */ 13346 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS && 13347 (ctl_debug & CTL_DEBUG_INFO) != 0) 13348 ctl_io_error_print(io, NULL); 13349 13350 /* 13351 * Tell the FETD or the other shelf controller we're done with this 13352 * command. Note that only SCSI commands get to this point. Task 13353 * management commands are completed above. 13354 */ 13355 if ((softc->ha_mode != CTL_HA_MODE_XFER) && 13356 (io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC)) { 13357 memset(&msg, 0, sizeof(msg)); 13358 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 13359 msg.hdr.serializing_sc = io->io_hdr.serializing_sc; 13360 msg.hdr.nexus = io->io_hdr.nexus; 13361 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13362 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data), 13363 M_WAITOK); 13364 } 13365 13366 fe_done(io); 13367 } 13368 13369 #ifdef CTL_WITH_CA 13370 /* 13371 * Front end should call this if it doesn't do autosense. When the request 13372 * sense comes back in from the initiator, we'll dequeue this and send it. 13373 */ 13374 int 13375 ctl_queue_sense(union ctl_io *io) 13376 { 13377 struct ctl_lun *lun; 13378 struct ctl_port *port; 13379 struct ctl_softc *softc; 13380 uint32_t initidx, targ_lun; 13381 13382 CTL_DEBUG_PRINT(("ctl_queue_sense\n")); 13383 13384 softc = control_softc; 13385 port = ctl_io_port(&ctsio->io_hdr); 13386 targ_lun = ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); 13387 13388 /* 13389 * LUN lookup will likely move to the ctl_work_thread() once we 13390 * have our new queueing infrastructure (that doesn't put things on 13391 * a per-LUN queue initially). That is so that we can handle 13392 * things like an INQUIRY to a LUN that we don't have enabled. We 13393 * can't deal with that right now. 13394 * If we don't have a LUN for this, just toss the sense information. 13395 */ 13396 mtx_lock(&softc->ctl_lock); 13397 if (targ_lun >= CTL_MAX_LUNS || 13398 (lun = softc->ctl_luns[targ_lun]) == NULL) { 13399 mtx_unlock(&softc->ctl_lock); 13400 goto bailout; 13401 } 13402 mtx_lock(&lun->lun_lock); 13403 mtx_unlock(&softc->ctl_lock); 13404 13405 /* 13406 * Already have CA set for this LUN...toss the sense information. 13407 */ 13408 initidx = ctl_get_initindex(&io->io_hdr.nexus); 13409 if (ctl_is_set(lun->have_ca, initidx)) { 13410 mtx_unlock(&lun->lun_lock); 13411 goto bailout; 13412 } 13413 13414 memcpy(&lun->pending_sense[initidx], &io->scsiio.sense_data, 13415 MIN(sizeof(lun->pending_sense[initidx]), 13416 sizeof(io->scsiio.sense_data))); 13417 ctl_set_mask(lun->have_ca, initidx); 13418 mtx_unlock(&lun->lun_lock); 13419 13420 bailout: 13421 ctl_free_io(io); 13422 return (CTL_RETVAL_COMPLETE); 13423 } 13424 #endif 13425 13426 /* 13427 * Primary command inlet from frontend ports. All SCSI and task I/O 13428 * requests must go through this function. 13429 */ 13430 int 13431 ctl_queue(union ctl_io *io) 13432 { 13433 struct ctl_port *port; 13434 13435 CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0])); 13436 13437 #ifdef CTL_TIME_IO 13438 io->io_hdr.start_time = time_uptime; 13439 getbinuptime(&io->io_hdr.start_bt); 13440 #endif /* CTL_TIME_IO */ 13441 13442 /* Map FE-specific LUN ID into global one. */ 13443 port = ctl_io_port(&io->io_hdr); 13444 io->io_hdr.nexus.targ_mapped_lun = 13445 ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); 13446 13447 switch (io->io_hdr.io_type) { 13448 case CTL_IO_SCSI: 13449 case CTL_IO_TASK: 13450 if (ctl_debug & CTL_DEBUG_CDB) 13451 ctl_io_print(io); 13452 ctl_enqueue_incoming(io); 13453 break; 13454 default: 13455 printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type); 13456 return (EINVAL); 13457 } 13458 13459 return (CTL_RETVAL_COMPLETE); 13460 } 13461 13462 #ifdef CTL_IO_DELAY 13463 static void 13464 ctl_done_timer_wakeup(void *arg) 13465 { 13466 union ctl_io *io; 13467 13468 io = (union ctl_io *)arg; 13469 ctl_done(io); 13470 } 13471 #endif /* CTL_IO_DELAY */ 13472 13473 void 13474 ctl_serseq_done(union ctl_io *io) 13475 { 13476 struct ctl_lun *lun; 13477 13478 lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13479 if (lun->be_lun == NULL || 13480 lun->be_lun->serseq == CTL_LUN_SERSEQ_OFF) 13481 return; 13482 mtx_lock(&lun->lun_lock); 13483 io->io_hdr.flags |= CTL_FLAG_SERSEQ_DONE; 13484 ctl_check_blocked(lun); 13485 mtx_unlock(&lun->lun_lock); 13486 } 13487 13488 void 13489 ctl_done(union ctl_io *io) 13490 { 13491 13492 /* 13493 * Enable this to catch duplicate completion issues. 13494 */ 13495 #if 0 13496 if (io->io_hdr.flags & CTL_FLAG_ALREADY_DONE) { 13497 printf("%s: type %d msg %d cdb %x iptl: " 13498 "%u:%u:%u tag 0x%04x " 13499 "flag %#x status %x\n", 13500 __func__, 13501 io->io_hdr.io_type, 13502 io->io_hdr.msg_type, 13503 io->scsiio.cdb[0], 13504 io->io_hdr.nexus.initid, 13505 io->io_hdr.nexus.targ_port, 13506 io->io_hdr.nexus.targ_lun, 13507 (io->io_hdr.io_type == 13508 CTL_IO_TASK) ? 13509 io->taskio.tag_num : 13510 io->scsiio.tag_num, 13511 io->io_hdr.flags, 13512 io->io_hdr.status); 13513 } else 13514 io->io_hdr.flags |= CTL_FLAG_ALREADY_DONE; 13515 #endif 13516 13517 /* 13518 * This is an internal copy of an I/O, and should not go through 13519 * the normal done processing logic. 13520 */ 13521 if (io->io_hdr.flags & CTL_FLAG_INT_COPY) 13522 return; 13523 13524 #ifdef CTL_IO_DELAY 13525 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 13526 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 13527 } else { 13528 struct ctl_lun *lun; 13529 13530 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13531 13532 if ((lun != NULL) 13533 && (lun->delay_info.done_delay > 0)) { 13534 13535 callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1); 13536 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 13537 callout_reset(&io->io_hdr.delay_callout, 13538 lun->delay_info.done_delay * hz, 13539 ctl_done_timer_wakeup, io); 13540 if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT) 13541 lun->delay_info.done_delay = 0; 13542 return; 13543 } 13544 } 13545 #endif /* CTL_IO_DELAY */ 13546 13547 ctl_enqueue_done(io); 13548 } 13549 13550 static void 13551 ctl_work_thread(void *arg) 13552 { 13553 struct ctl_thread *thr = (struct ctl_thread *)arg; 13554 struct ctl_softc *softc = thr->ctl_softc; 13555 union ctl_io *io; 13556 int retval; 13557 13558 CTL_DEBUG_PRINT(("ctl_work_thread starting\n")); 13559 13560 for (;;) { 13561 /* 13562 * We handle the queues in this order: 13563 * - ISC 13564 * - done queue (to free up resources, unblock other commands) 13565 * - RtR queue 13566 * - incoming queue 13567 * 13568 * If those queues are empty, we break out of the loop and 13569 * go to sleep. 13570 */ 13571 mtx_lock(&thr->queue_lock); 13572 io = (union ctl_io *)STAILQ_FIRST(&thr->isc_queue); 13573 if (io != NULL) { 13574 STAILQ_REMOVE_HEAD(&thr->isc_queue, links); 13575 mtx_unlock(&thr->queue_lock); 13576 ctl_handle_isc(io); 13577 continue; 13578 } 13579 io = (union ctl_io *)STAILQ_FIRST(&thr->done_queue); 13580 if (io != NULL) { 13581 STAILQ_REMOVE_HEAD(&thr->done_queue, links); 13582 /* clear any blocked commands, call fe_done */ 13583 mtx_unlock(&thr->queue_lock); 13584 ctl_process_done(io); 13585 continue; 13586 } 13587 io = (union ctl_io *)STAILQ_FIRST(&thr->incoming_queue); 13588 if (io != NULL) { 13589 STAILQ_REMOVE_HEAD(&thr->incoming_queue, links); 13590 mtx_unlock(&thr->queue_lock); 13591 if (io->io_hdr.io_type == CTL_IO_TASK) 13592 ctl_run_task(io); 13593 else 13594 ctl_scsiio_precheck(softc, &io->scsiio); 13595 continue; 13596 } 13597 io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue); 13598 if (io != NULL) { 13599 STAILQ_REMOVE_HEAD(&thr->rtr_queue, links); 13600 mtx_unlock(&thr->queue_lock); 13601 retval = ctl_scsiio(&io->scsiio); 13602 if (retval != CTL_RETVAL_COMPLETE) 13603 CTL_DEBUG_PRINT(("ctl_scsiio failed\n")); 13604 continue; 13605 } 13606 13607 /* Sleep until we have something to do. */ 13608 mtx_sleep(thr, &thr->queue_lock, PDROP | PRIBIO, "-", 0); 13609 } 13610 } 13611 13612 static void 13613 ctl_lun_thread(void *arg) 13614 { 13615 struct ctl_softc *softc = (struct ctl_softc *)arg; 13616 struct ctl_be_lun *be_lun; 13617 13618 CTL_DEBUG_PRINT(("ctl_lun_thread starting\n")); 13619 13620 for (;;) { 13621 mtx_lock(&softc->ctl_lock); 13622 be_lun = STAILQ_FIRST(&softc->pending_lun_queue); 13623 if (be_lun != NULL) { 13624 STAILQ_REMOVE_HEAD(&softc->pending_lun_queue, links); 13625 mtx_unlock(&softc->ctl_lock); 13626 ctl_create_lun(be_lun); 13627 continue; 13628 } 13629 13630 /* Sleep until we have something to do. */ 13631 mtx_sleep(&softc->pending_lun_queue, &softc->ctl_lock, 13632 PDROP | PRIBIO, "-", 0); 13633 } 13634 } 13635 13636 static void 13637 ctl_thresh_thread(void *arg) 13638 { 13639 struct ctl_softc *softc = (struct ctl_softc *)arg; 13640 struct ctl_lun *lun; 13641 struct ctl_logical_block_provisioning_page *page; 13642 const char *attr; 13643 union ctl_ha_msg msg; 13644 uint64_t thres, val; 13645 int i, e, set; 13646 13647 CTL_DEBUG_PRINT(("ctl_thresh_thread starting\n")); 13648 13649 for (;;) { 13650 mtx_lock(&softc->ctl_lock); 13651 STAILQ_FOREACH(lun, &softc->lun_list, links) { 13652 if ((lun->flags & CTL_LUN_DISABLED) || 13653 (lun->flags & CTL_LUN_NO_MEDIA) || 13654 lun->backend->lun_attr == NULL) 13655 continue; 13656 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 13657 softc->ha_mode == CTL_HA_MODE_XFER) 13658 continue; 13659 if ((lun->MODE_RWER.byte8 & SMS_RWER_LBPERE) == 0) 13660 continue; 13661 e = 0; 13662 page = &lun->MODE_LBP; 13663 for (i = 0; i < CTL_NUM_LBP_THRESH; i++) { 13664 if ((page->descr[i].flags & SLBPPD_ENABLED) == 0) 13665 continue; 13666 thres = scsi_4btoul(page->descr[i].count); 13667 thres <<= CTL_LBP_EXPONENT; 13668 switch (page->descr[i].resource) { 13669 case 0x01: 13670 attr = "blocksavail"; 13671 break; 13672 case 0x02: 13673 attr = "blocksused"; 13674 break; 13675 case 0xf1: 13676 attr = "poolblocksavail"; 13677 break; 13678 case 0xf2: 13679 attr = "poolblocksused"; 13680 break; 13681 default: 13682 continue; 13683 } 13684 mtx_unlock(&softc->ctl_lock); // XXX 13685 val = lun->backend->lun_attr( 13686 lun->be_lun->be_lun, attr); 13687 mtx_lock(&softc->ctl_lock); 13688 if (val == UINT64_MAX) 13689 continue; 13690 if ((page->descr[i].flags & SLBPPD_ARMING_MASK) 13691 == SLBPPD_ARMING_INC) 13692 e = (val >= thres); 13693 else 13694 e = (val <= thres); 13695 if (e) 13696 break; 13697 } 13698 mtx_lock(&lun->lun_lock); 13699 if (e) { 13700 scsi_u64to8b((uint8_t *)&page->descr[i] - 13701 (uint8_t *)page, lun->ua_tpt_info); 13702 if (lun->lasttpt == 0 || 13703 time_uptime - lun->lasttpt >= CTL_LBP_UA_PERIOD) { 13704 lun->lasttpt = time_uptime; 13705 ctl_est_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 13706 set = 1; 13707 } else 13708 set = 0; 13709 } else { 13710 lun->lasttpt = 0; 13711 ctl_clr_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 13712 set = -1; 13713 } 13714 mtx_unlock(&lun->lun_lock); 13715 if (set != 0 && 13716 lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 13717 /* Send msg to other side. */ 13718 bzero(&msg.ua, sizeof(msg.ua)); 13719 msg.hdr.msg_type = CTL_MSG_UA; 13720 msg.hdr.nexus.initid = -1; 13721 msg.hdr.nexus.targ_port = -1; 13722 msg.hdr.nexus.targ_lun = lun->lun; 13723 msg.hdr.nexus.targ_mapped_lun = lun->lun; 13724 msg.ua.ua_all = 1; 13725 msg.ua.ua_set = (set > 0); 13726 msg.ua.ua_type = CTL_UA_THIN_PROV_THRES; 13727 memcpy(msg.ua.ua_info, lun->ua_tpt_info, 8); 13728 mtx_unlock(&softc->ctl_lock); // XXX 13729 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13730 sizeof(msg.ua), M_WAITOK); 13731 mtx_lock(&softc->ctl_lock); 13732 } 13733 } 13734 mtx_unlock(&softc->ctl_lock); 13735 pause("-", CTL_LBP_PERIOD * hz); 13736 } 13737 } 13738 13739 static void 13740 ctl_enqueue_incoming(union ctl_io *io) 13741 { 13742 struct ctl_softc *softc = control_softc; 13743 struct ctl_thread *thr; 13744 u_int idx; 13745 13746 idx = (io->io_hdr.nexus.targ_port * 127 + 13747 io->io_hdr.nexus.initid) % worker_threads; 13748 thr = &softc->threads[idx]; 13749 mtx_lock(&thr->queue_lock); 13750 STAILQ_INSERT_TAIL(&thr->incoming_queue, &io->io_hdr, links); 13751 mtx_unlock(&thr->queue_lock); 13752 wakeup(thr); 13753 } 13754 13755 static void 13756 ctl_enqueue_rtr(union ctl_io *io) 13757 { 13758 struct ctl_softc *softc = control_softc; 13759 struct ctl_thread *thr; 13760 13761 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13762 mtx_lock(&thr->queue_lock); 13763 STAILQ_INSERT_TAIL(&thr->rtr_queue, &io->io_hdr, links); 13764 mtx_unlock(&thr->queue_lock); 13765 wakeup(thr); 13766 } 13767 13768 static void 13769 ctl_enqueue_done(union ctl_io *io) 13770 { 13771 struct ctl_softc *softc = control_softc; 13772 struct ctl_thread *thr; 13773 13774 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13775 mtx_lock(&thr->queue_lock); 13776 STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links); 13777 mtx_unlock(&thr->queue_lock); 13778 wakeup(thr); 13779 } 13780 13781 static void 13782 ctl_enqueue_isc(union ctl_io *io) 13783 { 13784 struct ctl_softc *softc = control_softc; 13785 struct ctl_thread *thr; 13786 13787 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13788 mtx_lock(&thr->queue_lock); 13789 STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links); 13790 mtx_unlock(&thr->queue_lock); 13791 wakeup(thr); 13792 } 13793 13794 /* 13795 * vim: ts=8 13796 */ 13797