1 /*- 2 * Copyright (c) 2003-2009 Silicon Graphics International Corp. 3 * Copyright (c) 2012 The FreeBSD Foundation 4 * Copyright (c) 2014-2017 Alexander Motin <mav@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Portions of this software were developed by Edward Tomasz Napierala 8 * under sponsorship from the FreeBSD Foundation. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions, and the following disclaimer, 15 * without modification. 16 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 17 * substantially similar to the "NO WARRANTY" disclaimer below 18 * ("Disclaimer") and any redistribution must be conditioned upon 19 * including a substantially similar Disclaimer requirement for further 20 * binary redistribution. 21 * 22 * NO WARRANTY 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 27 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 32 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 * POSSIBILITY OF SUCH DAMAGES. 34 * 35 * $Id$ 36 */ 37 /* 38 * CAM Target Layer, a SCSI device emulation subsystem. 39 * 40 * Author: Ken Merry <ken@FreeBSD.org> 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/ctype.h> 49 #include <sys/kernel.h> 50 #include <sys/types.h> 51 #include <sys/kthread.h> 52 #include <sys/bio.h> 53 #include <sys/fcntl.h> 54 #include <sys/lock.h> 55 #include <sys/module.h> 56 #include <sys/mutex.h> 57 #include <sys/condvar.h> 58 #include <sys/malloc.h> 59 #include <sys/conf.h> 60 #include <sys/ioccom.h> 61 #include <sys/queue.h> 62 #include <sys/sbuf.h> 63 #include <sys/smp.h> 64 #include <sys/endian.h> 65 #include <sys/sysctl.h> 66 #include <vm/uma.h> 67 68 #include <cam/cam.h> 69 #include <cam/scsi/scsi_all.h> 70 #include <cam/scsi/scsi_cd.h> 71 #include <cam/scsi/scsi_da.h> 72 #include <cam/ctl/ctl_io.h> 73 #include <cam/ctl/ctl.h> 74 #include <cam/ctl/ctl_frontend.h> 75 #include <cam/ctl/ctl_util.h> 76 #include <cam/ctl/ctl_backend.h> 77 #include <cam/ctl/ctl_ioctl.h> 78 #include <cam/ctl/ctl_ha.h> 79 #include <cam/ctl/ctl_private.h> 80 #include <cam/ctl/ctl_debug.h> 81 #include <cam/ctl/ctl_scsi_all.h> 82 #include <cam/ctl/ctl_error.h> 83 84 struct ctl_softc *control_softc = NULL; 85 86 /* 87 * Template mode pages. 88 */ 89 90 /* 91 * Note that these are default values only. The actual values will be 92 * filled in when the user does a mode sense. 93 */ 94 const static struct scsi_da_rw_recovery_page rw_er_page_default = { 95 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 96 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 97 /*byte3*/SMS_RWER_AWRE|SMS_RWER_ARRE, 98 /*read_retry_count*/0, 99 /*correction_span*/0, 100 /*head_offset_count*/0, 101 /*data_strobe_offset_cnt*/0, 102 /*byte8*/SMS_RWER_LBPERE, 103 /*write_retry_count*/0, 104 /*reserved2*/0, 105 /*recovery_time_limit*/{0, 0}, 106 }; 107 108 const static struct scsi_da_rw_recovery_page rw_er_page_changeable = { 109 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 110 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 111 /*byte3*/SMS_RWER_PER, 112 /*read_retry_count*/0, 113 /*correction_span*/0, 114 /*head_offset_count*/0, 115 /*data_strobe_offset_cnt*/0, 116 /*byte8*/SMS_RWER_LBPERE, 117 /*write_retry_count*/0, 118 /*reserved2*/0, 119 /*recovery_time_limit*/{0, 0}, 120 }; 121 122 const static struct scsi_format_page format_page_default = { 123 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 124 /*page_length*/sizeof(struct scsi_format_page) - 2, 125 /*tracks_per_zone*/ {0, 0}, 126 /*alt_sectors_per_zone*/ {0, 0}, 127 /*alt_tracks_per_zone*/ {0, 0}, 128 /*alt_tracks_per_lun*/ {0, 0}, 129 /*sectors_per_track*/ {(CTL_DEFAULT_SECTORS_PER_TRACK >> 8) & 0xff, 130 CTL_DEFAULT_SECTORS_PER_TRACK & 0xff}, 131 /*bytes_per_sector*/ {0, 0}, 132 /*interleave*/ {0, 0}, 133 /*track_skew*/ {0, 0}, 134 /*cylinder_skew*/ {0, 0}, 135 /*flags*/ SFP_HSEC, 136 /*reserved*/ {0, 0, 0} 137 }; 138 139 const static struct scsi_format_page format_page_changeable = { 140 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 141 /*page_length*/sizeof(struct scsi_format_page) - 2, 142 /*tracks_per_zone*/ {0, 0}, 143 /*alt_sectors_per_zone*/ {0, 0}, 144 /*alt_tracks_per_zone*/ {0, 0}, 145 /*alt_tracks_per_lun*/ {0, 0}, 146 /*sectors_per_track*/ {0, 0}, 147 /*bytes_per_sector*/ {0, 0}, 148 /*interleave*/ {0, 0}, 149 /*track_skew*/ {0, 0}, 150 /*cylinder_skew*/ {0, 0}, 151 /*flags*/ 0, 152 /*reserved*/ {0, 0, 0} 153 }; 154 155 const static struct scsi_rigid_disk_page rigid_disk_page_default = { 156 /*page_code*/SMS_RIGID_DISK_PAGE, 157 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 158 /*cylinders*/ {0, 0, 0}, 159 /*heads*/ CTL_DEFAULT_HEADS, 160 /*start_write_precomp*/ {0, 0, 0}, 161 /*start_reduced_current*/ {0, 0, 0}, 162 /*step_rate*/ {0, 0}, 163 /*landing_zone_cylinder*/ {0, 0, 0}, 164 /*rpl*/ SRDP_RPL_DISABLED, 165 /*rotational_offset*/ 0, 166 /*reserved1*/ 0, 167 /*rotation_rate*/ {(CTL_DEFAULT_ROTATION_RATE >> 8) & 0xff, 168 CTL_DEFAULT_ROTATION_RATE & 0xff}, 169 /*reserved2*/ {0, 0} 170 }; 171 172 const static struct scsi_rigid_disk_page rigid_disk_page_changeable = { 173 /*page_code*/SMS_RIGID_DISK_PAGE, 174 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 175 /*cylinders*/ {0, 0, 0}, 176 /*heads*/ 0, 177 /*start_write_precomp*/ {0, 0, 0}, 178 /*start_reduced_current*/ {0, 0, 0}, 179 /*step_rate*/ {0, 0}, 180 /*landing_zone_cylinder*/ {0, 0, 0}, 181 /*rpl*/ 0, 182 /*rotational_offset*/ 0, 183 /*reserved1*/ 0, 184 /*rotation_rate*/ {0, 0}, 185 /*reserved2*/ {0, 0} 186 }; 187 188 const static struct scsi_da_verify_recovery_page verify_er_page_default = { 189 /*page_code*/SMS_VERIFY_ERROR_RECOVERY_PAGE, 190 /*page_length*/sizeof(struct scsi_da_verify_recovery_page) - 2, 191 /*byte3*/0, 192 /*read_retry_count*/0, 193 /*reserved*/{ 0, 0, 0, 0, 0, 0 }, 194 /*recovery_time_limit*/{0, 0}, 195 }; 196 197 const static struct scsi_da_verify_recovery_page verify_er_page_changeable = { 198 /*page_code*/SMS_VERIFY_ERROR_RECOVERY_PAGE, 199 /*page_length*/sizeof(struct scsi_da_verify_recovery_page) - 2, 200 /*byte3*/SMS_VER_PER, 201 /*read_retry_count*/0, 202 /*reserved*/{ 0, 0, 0, 0, 0, 0 }, 203 /*recovery_time_limit*/{0, 0}, 204 }; 205 206 const static struct scsi_caching_page caching_page_default = { 207 /*page_code*/SMS_CACHING_PAGE, 208 /*page_length*/sizeof(struct scsi_caching_page) - 2, 209 /*flags1*/ SCP_DISC | SCP_WCE, 210 /*ret_priority*/ 0, 211 /*disable_pf_transfer_len*/ {0xff, 0xff}, 212 /*min_prefetch*/ {0, 0}, 213 /*max_prefetch*/ {0xff, 0xff}, 214 /*max_pf_ceiling*/ {0xff, 0xff}, 215 /*flags2*/ 0, 216 /*cache_segments*/ 0, 217 /*cache_seg_size*/ {0, 0}, 218 /*reserved*/ 0, 219 /*non_cache_seg_size*/ {0, 0, 0} 220 }; 221 222 const static struct scsi_caching_page caching_page_changeable = { 223 /*page_code*/SMS_CACHING_PAGE, 224 /*page_length*/sizeof(struct scsi_caching_page) - 2, 225 /*flags1*/ SCP_WCE | SCP_RCD, 226 /*ret_priority*/ 0, 227 /*disable_pf_transfer_len*/ {0, 0}, 228 /*min_prefetch*/ {0, 0}, 229 /*max_prefetch*/ {0, 0}, 230 /*max_pf_ceiling*/ {0, 0}, 231 /*flags2*/ 0, 232 /*cache_segments*/ 0, 233 /*cache_seg_size*/ {0, 0}, 234 /*reserved*/ 0, 235 /*non_cache_seg_size*/ {0, 0, 0} 236 }; 237 238 const static struct scsi_control_page control_page_default = { 239 /*page_code*/SMS_CONTROL_MODE_PAGE, 240 /*page_length*/sizeof(struct scsi_control_page) - 2, 241 /*rlec*/0, 242 /*queue_flags*/SCP_QUEUE_ALG_RESTRICTED, 243 /*eca_and_aen*/0, 244 /*flags4*/SCP_TAS, 245 /*aen_holdoff_period*/{0, 0}, 246 /*busy_timeout_period*/{0, 0}, 247 /*extended_selftest_completion_time*/{0, 0} 248 }; 249 250 const static struct scsi_control_page control_page_changeable = { 251 /*page_code*/SMS_CONTROL_MODE_PAGE, 252 /*page_length*/sizeof(struct scsi_control_page) - 2, 253 /*rlec*/SCP_DSENSE, 254 /*queue_flags*/SCP_QUEUE_ALG_MASK | SCP_NUAR, 255 /*eca_and_aen*/SCP_SWP, 256 /*flags4*/0, 257 /*aen_holdoff_period*/{0, 0}, 258 /*busy_timeout_period*/{0, 0}, 259 /*extended_selftest_completion_time*/{0, 0} 260 }; 261 262 #define CTL_CEM_LEN (sizeof(struct scsi_control_ext_page) - 4) 263 264 const static struct scsi_control_ext_page control_ext_page_default = { 265 /*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF, 266 /*subpage_code*/0x01, 267 /*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN}, 268 /*flags*/0, 269 /*prio*/0, 270 /*max_sense*/0 271 }; 272 273 const static struct scsi_control_ext_page control_ext_page_changeable = { 274 /*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF, 275 /*subpage_code*/0x01, 276 /*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN}, 277 /*flags*/0, 278 /*prio*/0, 279 /*max_sense*/0xff 280 }; 281 282 const static struct scsi_info_exceptions_page ie_page_default = { 283 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 284 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 285 /*info_flags*/SIEP_FLAGS_EWASC, 286 /*mrie*/SIEP_MRIE_NO, 287 /*interval_timer*/{0, 0, 0, 0}, 288 /*report_count*/{0, 0, 0, 1} 289 }; 290 291 const static struct scsi_info_exceptions_page ie_page_changeable = { 292 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 293 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 294 /*info_flags*/SIEP_FLAGS_EWASC | SIEP_FLAGS_DEXCPT | SIEP_FLAGS_TEST | 295 SIEP_FLAGS_LOGERR, 296 /*mrie*/0x0f, 297 /*interval_timer*/{0xff, 0xff, 0xff, 0xff}, 298 /*report_count*/{0xff, 0xff, 0xff, 0xff} 299 }; 300 301 #define CTL_LBPM_LEN (sizeof(struct ctl_logical_block_provisioning_page) - 4) 302 303 const static struct ctl_logical_block_provisioning_page lbp_page_default = {{ 304 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 305 /*subpage_code*/0x02, 306 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 307 /*flags*/0, 308 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 309 /*descr*/{}}, 310 {{/*flags*/0, 311 /*resource*/0x01, 312 /*reserved*/{0, 0}, 313 /*count*/{0, 0, 0, 0}}, 314 {/*flags*/0, 315 /*resource*/0x02, 316 /*reserved*/{0, 0}, 317 /*count*/{0, 0, 0, 0}}, 318 {/*flags*/0, 319 /*resource*/0xf1, 320 /*reserved*/{0, 0}, 321 /*count*/{0, 0, 0, 0}}, 322 {/*flags*/0, 323 /*resource*/0xf2, 324 /*reserved*/{0, 0}, 325 /*count*/{0, 0, 0, 0}} 326 } 327 }; 328 329 const static struct ctl_logical_block_provisioning_page lbp_page_changeable = {{ 330 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 331 /*subpage_code*/0x02, 332 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 333 /*flags*/SLBPP_SITUA, 334 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 335 /*descr*/{}}, 336 {{/*flags*/0, 337 /*resource*/0, 338 /*reserved*/{0, 0}, 339 /*count*/{0, 0, 0, 0}}, 340 {/*flags*/0, 341 /*resource*/0, 342 /*reserved*/{0, 0}, 343 /*count*/{0, 0, 0, 0}}, 344 {/*flags*/0, 345 /*resource*/0, 346 /*reserved*/{0, 0}, 347 /*count*/{0, 0, 0, 0}}, 348 {/*flags*/0, 349 /*resource*/0, 350 /*reserved*/{0, 0}, 351 /*count*/{0, 0, 0, 0}} 352 } 353 }; 354 355 const static struct scsi_cddvd_capabilities_page cddvd_page_default = { 356 /*page_code*/SMS_CDDVD_CAPS_PAGE, 357 /*page_length*/sizeof(struct scsi_cddvd_capabilities_page) - 2, 358 /*caps1*/0x3f, 359 /*caps2*/0x00, 360 /*caps3*/0xf0, 361 /*caps4*/0x00, 362 /*caps5*/0x29, 363 /*caps6*/0x00, 364 /*obsolete*/{0, 0}, 365 /*nvol_levels*/{0, 0}, 366 /*buffer_size*/{8, 0}, 367 /*obsolete2*/{0, 0}, 368 /*reserved*/0, 369 /*digital*/0, 370 /*obsolete3*/0, 371 /*copy_management*/0, 372 /*reserved2*/0, 373 /*rotation_control*/0, 374 /*cur_write_speed*/0, 375 /*num_speed_descr*/0, 376 }; 377 378 const static struct scsi_cddvd_capabilities_page cddvd_page_changeable = { 379 /*page_code*/SMS_CDDVD_CAPS_PAGE, 380 /*page_length*/sizeof(struct scsi_cddvd_capabilities_page) - 2, 381 /*caps1*/0, 382 /*caps2*/0, 383 /*caps3*/0, 384 /*caps4*/0, 385 /*caps5*/0, 386 /*caps6*/0, 387 /*obsolete*/{0, 0}, 388 /*nvol_levels*/{0, 0}, 389 /*buffer_size*/{0, 0}, 390 /*obsolete2*/{0, 0}, 391 /*reserved*/0, 392 /*digital*/0, 393 /*obsolete3*/0, 394 /*copy_management*/0, 395 /*reserved2*/0, 396 /*rotation_control*/0, 397 /*cur_write_speed*/0, 398 /*num_speed_descr*/0, 399 }; 400 401 SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer"); 402 static int worker_threads = -1; 403 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN, 404 &worker_threads, 1, "Number of worker threads"); 405 static int ctl_debug = CTL_DEBUG_NONE; 406 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, debug, CTLFLAG_RWTUN, 407 &ctl_debug, 0, "Enabled debug flags"); 408 static int ctl_lun_map_size = 1024; 409 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, lun_map_size, CTLFLAG_RWTUN, 410 &ctl_lun_map_size, 0, "Size of per-port LUN map (max LUN + 1)"); 411 #ifdef CTL_TIME_IO 412 static int ctl_time_io_secs = CTL_TIME_IO_DEFAULT_SECS; 413 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, time_io_secs, CTLFLAG_RWTUN, 414 &ctl_time_io_secs, 0, "Log requests taking more seconds"); 415 #endif 416 417 /* 418 * Supported pages (0x00), Serial number (0x80), Device ID (0x83), 419 * Extended INQUIRY Data (0x86), Mode Page Policy (0x87), 420 * SCSI Ports (0x88), Third-party Copy (0x8F), Block limits (0xB0), 421 * Block Device Characteristics (0xB1) and Logical Block Provisioning (0xB2) 422 */ 423 #define SCSI_EVPD_NUM_SUPPORTED_PAGES 10 424 425 static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event, 426 int param); 427 static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest); 428 static void ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest); 429 static int ctl_init(void); 430 static int ctl_shutdown(void); 431 static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td); 432 static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td); 433 static void ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio); 434 static void ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 435 struct ctl_ooa *ooa_hdr, 436 struct ctl_ooa_entry *kern_entries); 437 static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 438 struct thread *td); 439 static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun, 440 struct ctl_be_lun *be_lun); 441 static int ctl_free_lun(struct ctl_lun *lun); 442 static void ctl_create_lun(struct ctl_be_lun *be_lun); 443 444 static int ctl_do_mode_select(union ctl_io *io); 445 static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, 446 uint64_t res_key, uint64_t sa_res_key, 447 uint8_t type, uint32_t residx, 448 struct ctl_scsiio *ctsio, 449 struct scsi_per_res_out *cdb, 450 struct scsi_per_res_out_parms* param); 451 static void ctl_pro_preempt_other(struct ctl_lun *lun, 452 union ctl_ha_msg *msg); 453 static void ctl_hndl_per_res_out_on_other_sc(union ctl_io *io); 454 static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len); 455 static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len); 456 static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len); 457 static int ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len); 458 static int ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len); 459 static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, 460 int alloc_len); 461 static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, 462 int alloc_len); 463 static int ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len); 464 static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len); 465 static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio); 466 static int ctl_inquiry_std(struct ctl_scsiio *ctsio); 467 static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len); 468 static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2, 469 bool seq); 470 static ctl_action ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2); 471 static ctl_action ctl_check_for_blockage(struct ctl_lun *lun, 472 union ctl_io *pending_io, union ctl_io *ooa_io); 473 static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 474 union ctl_io *starting_io); 475 static int ctl_check_blocked(struct ctl_lun *lun); 476 static int ctl_scsiio_lun_check(struct ctl_lun *lun, 477 const struct ctl_cmd_entry *entry, 478 struct ctl_scsiio *ctsio); 479 static void ctl_failover_lun(union ctl_io *io); 480 static int ctl_scsiio_precheck(struct ctl_softc *ctl_softc, 481 struct ctl_scsiio *ctsio); 482 static int ctl_scsiio(struct ctl_scsiio *ctsio); 483 484 static int ctl_target_reset(union ctl_io *io); 485 static void ctl_do_lun_reset(struct ctl_lun *lun, uint32_t initidx, 486 ctl_ua_type ua_type); 487 static int ctl_lun_reset(union ctl_io *io); 488 static int ctl_abort_task(union ctl_io *io); 489 static int ctl_abort_task_set(union ctl_io *io); 490 static int ctl_query_task(union ctl_io *io, int task_set); 491 static void ctl_i_t_nexus_loss(struct ctl_softc *softc, uint32_t initidx, 492 ctl_ua_type ua_type); 493 static int ctl_i_t_nexus_reset(union ctl_io *io); 494 static int ctl_query_async_event(union ctl_io *io); 495 static void ctl_run_task(union ctl_io *io); 496 #ifdef CTL_IO_DELAY 497 static void ctl_datamove_timer_wakeup(void *arg); 498 static void ctl_done_timer_wakeup(void *arg); 499 #endif /* CTL_IO_DELAY */ 500 501 static void ctl_send_datamove_done(union ctl_io *io, int have_lock); 502 static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq); 503 static int ctl_datamove_remote_dm_write_cb(union ctl_io *io); 504 static void ctl_datamove_remote_write(union ctl_io *io); 505 static int ctl_datamove_remote_dm_read_cb(union ctl_io *io); 506 static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq); 507 static int ctl_datamove_remote_sgl_setup(union ctl_io *io); 508 static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 509 ctl_ha_dt_cb callback); 510 static void ctl_datamove_remote_read(union ctl_io *io); 511 static void ctl_datamove_remote(union ctl_io *io); 512 static void ctl_process_done(union ctl_io *io); 513 static void ctl_lun_thread(void *arg); 514 static void ctl_thresh_thread(void *arg); 515 static void ctl_work_thread(void *arg); 516 static void ctl_enqueue_incoming(union ctl_io *io); 517 static void ctl_enqueue_rtr(union ctl_io *io); 518 static void ctl_enqueue_done(union ctl_io *io); 519 static void ctl_enqueue_isc(union ctl_io *io); 520 static const struct ctl_cmd_entry * 521 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa); 522 static const struct ctl_cmd_entry * 523 ctl_validate_command(struct ctl_scsiio *ctsio); 524 static int ctl_cmd_applicable(uint8_t lun_type, 525 const struct ctl_cmd_entry *entry); 526 static int ctl_ha_init(void); 527 static int ctl_ha_shutdown(void); 528 529 static uint64_t ctl_get_prkey(struct ctl_lun *lun, uint32_t residx); 530 static void ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx); 531 static void ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx); 532 static void ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key); 533 534 /* 535 * Load the serialization table. This isn't very pretty, but is probably 536 * the easiest way to do it. 537 */ 538 #include "ctl_ser_table.c" 539 540 /* 541 * We only need to define open, close and ioctl routines for this driver. 542 */ 543 static struct cdevsw ctl_cdevsw = { 544 .d_version = D_VERSION, 545 .d_flags = 0, 546 .d_open = ctl_open, 547 .d_close = ctl_close, 548 .d_ioctl = ctl_ioctl, 549 .d_name = "ctl", 550 }; 551 552 553 MALLOC_DEFINE(M_CTL, "ctlmem", "Memory used for CTL"); 554 555 static int ctl_module_event_handler(module_t, int /*modeventtype_t*/, void *); 556 557 static moduledata_t ctl_moduledata = { 558 "ctl", 559 ctl_module_event_handler, 560 NULL 561 }; 562 563 DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD); 564 MODULE_VERSION(ctl, 1); 565 566 static struct ctl_frontend ha_frontend = 567 { 568 .name = "ha", 569 .init = ctl_ha_init, 570 .shutdown = ctl_ha_shutdown, 571 }; 572 573 static int 574 ctl_ha_init(void) 575 { 576 struct ctl_softc *softc = control_softc; 577 578 if (ctl_pool_create(softc, "othersc", CTL_POOL_ENTRIES_OTHER_SC, 579 &softc->othersc_pool) != 0) 580 return (ENOMEM); 581 if (ctl_ha_msg_init(softc) != CTL_HA_STATUS_SUCCESS) { 582 ctl_pool_free(softc->othersc_pool); 583 return (EIO); 584 } 585 if (ctl_ha_msg_register(CTL_HA_CHAN_CTL, ctl_isc_event_handler) 586 != CTL_HA_STATUS_SUCCESS) { 587 ctl_ha_msg_destroy(softc); 588 ctl_pool_free(softc->othersc_pool); 589 return (EIO); 590 } 591 return (0); 592 }; 593 594 static int 595 ctl_ha_shutdown(void) 596 { 597 struct ctl_softc *softc = control_softc; 598 struct ctl_port *port; 599 600 ctl_ha_msg_shutdown(softc); 601 if (ctl_ha_msg_deregister(CTL_HA_CHAN_CTL) != CTL_HA_STATUS_SUCCESS) 602 return (EIO); 603 if (ctl_ha_msg_destroy(softc) != CTL_HA_STATUS_SUCCESS) 604 return (EIO); 605 ctl_pool_free(softc->othersc_pool); 606 while ((port = STAILQ_FIRST(&ha_frontend.port_list)) != NULL) { 607 ctl_port_deregister(port); 608 free(port->port_name, M_CTL); 609 free(port, M_CTL); 610 } 611 return (0); 612 }; 613 614 static void 615 ctl_ha_datamove(union ctl_io *io) 616 { 617 struct ctl_lun *lun = CTL_LUN(io); 618 struct ctl_sg_entry *sgl; 619 union ctl_ha_msg msg; 620 uint32_t sg_entries_sent; 621 int do_sg_copy, i, j; 622 623 memset(&msg.dt, 0, sizeof(msg.dt)); 624 msg.hdr.msg_type = CTL_MSG_DATAMOVE; 625 msg.hdr.original_sc = io->io_hdr.original_sc; 626 msg.hdr.serializing_sc = io; 627 msg.hdr.nexus = io->io_hdr.nexus; 628 msg.hdr.status = io->io_hdr.status; 629 msg.dt.flags = io->io_hdr.flags; 630 631 /* 632 * We convert everything into a S/G list here. We can't 633 * pass by reference, only by value between controllers. 634 * So we can't pass a pointer to the S/G list, only as many 635 * S/G entries as we can fit in here. If it's possible for 636 * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries, 637 * then we need to break this up into multiple transfers. 638 */ 639 if (io->scsiio.kern_sg_entries == 0) { 640 msg.dt.kern_sg_entries = 1; 641 #if 0 642 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 643 msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr; 644 } else { 645 /* XXX KDM use busdma here! */ 646 msg.dt.sg_list[0].addr = 647 (void *)vtophys(io->scsiio.kern_data_ptr); 648 } 649 #else 650 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, 651 ("HA does not support BUS_ADDR")); 652 msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr; 653 #endif 654 msg.dt.sg_list[0].len = io->scsiio.kern_data_len; 655 do_sg_copy = 0; 656 } else { 657 msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries; 658 do_sg_copy = 1; 659 } 660 661 msg.dt.kern_data_len = io->scsiio.kern_data_len; 662 msg.dt.kern_total_len = io->scsiio.kern_total_len; 663 msg.dt.kern_data_resid = io->scsiio.kern_data_resid; 664 msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset; 665 msg.dt.sg_sequence = 0; 666 667 /* 668 * Loop until we've sent all of the S/G entries. On the 669 * other end, we'll recompose these S/G entries into one 670 * contiguous list before processing. 671 */ 672 for (sg_entries_sent = 0; sg_entries_sent < msg.dt.kern_sg_entries; 673 msg.dt.sg_sequence++) { 674 msg.dt.cur_sg_entries = MIN((sizeof(msg.dt.sg_list) / 675 sizeof(msg.dt.sg_list[0])), 676 msg.dt.kern_sg_entries - sg_entries_sent); 677 if (do_sg_copy != 0) { 678 sgl = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; 679 for (i = sg_entries_sent, j = 0; 680 i < msg.dt.cur_sg_entries; i++, j++) { 681 #if 0 682 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 683 msg.dt.sg_list[j].addr = sgl[i].addr; 684 } else { 685 /* XXX KDM use busdma here! */ 686 msg.dt.sg_list[j].addr = 687 (void *)vtophys(sgl[i].addr); 688 } 689 #else 690 KASSERT((io->io_hdr.flags & 691 CTL_FLAG_BUS_ADDR) == 0, 692 ("HA does not support BUS_ADDR")); 693 msg.dt.sg_list[j].addr = sgl[i].addr; 694 #endif 695 msg.dt.sg_list[j].len = sgl[i].len; 696 } 697 } 698 699 sg_entries_sent += msg.dt.cur_sg_entries; 700 msg.dt.sg_last = (sg_entries_sent >= msg.dt.kern_sg_entries); 701 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 702 sizeof(msg.dt) - sizeof(msg.dt.sg_list) + 703 sizeof(struct ctl_sg_entry) * msg.dt.cur_sg_entries, 704 M_WAITOK) > CTL_HA_STATUS_SUCCESS) { 705 io->io_hdr.port_status = 31341; 706 io->scsiio.be_move_done(io); 707 return; 708 } 709 msg.dt.sent_sg_entries = sg_entries_sent; 710 } 711 712 /* 713 * Officially handover the request from us to peer. 714 * If failover has just happened, then we must return error. 715 * If failover happen just after, then it is not our problem. 716 */ 717 if (lun) 718 mtx_lock(&lun->lun_lock); 719 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 720 if (lun) 721 mtx_unlock(&lun->lun_lock); 722 io->io_hdr.port_status = 31342; 723 io->scsiio.be_move_done(io); 724 return; 725 } 726 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 727 io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; 728 if (lun) 729 mtx_unlock(&lun->lun_lock); 730 } 731 732 static void 733 ctl_ha_done(union ctl_io *io) 734 { 735 union ctl_ha_msg msg; 736 737 if (io->io_hdr.io_type == CTL_IO_SCSI) { 738 memset(&msg, 0, sizeof(msg)); 739 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 740 msg.hdr.original_sc = io->io_hdr.original_sc; 741 msg.hdr.nexus = io->io_hdr.nexus; 742 msg.hdr.status = io->io_hdr.status; 743 msg.scsi.scsi_status = io->scsiio.scsi_status; 744 msg.scsi.tag_num = io->scsiio.tag_num; 745 msg.scsi.tag_type = io->scsiio.tag_type; 746 msg.scsi.sense_len = io->scsiio.sense_len; 747 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 748 io->scsiio.sense_len); 749 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 750 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + 751 msg.scsi.sense_len, M_WAITOK); 752 } 753 ctl_free_io(io); 754 } 755 756 static void 757 ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc, 758 union ctl_ha_msg *msg_info) 759 { 760 struct ctl_scsiio *ctsio; 761 762 if (msg_info->hdr.original_sc == NULL) { 763 printf("%s: original_sc == NULL!\n", __func__); 764 /* XXX KDM now what? */ 765 return; 766 } 767 768 ctsio = &msg_info->hdr.original_sc->scsiio; 769 ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 770 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 771 ctsio->io_hdr.status = msg_info->hdr.status; 772 ctsio->scsi_status = msg_info->scsi.scsi_status; 773 ctsio->sense_len = msg_info->scsi.sense_len; 774 memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data, 775 msg_info->scsi.sense_len); 776 ctl_enqueue_isc((union ctl_io *)ctsio); 777 } 778 779 static void 780 ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc, 781 union ctl_ha_msg *msg_info) 782 { 783 struct ctl_scsiio *ctsio; 784 785 if (msg_info->hdr.serializing_sc == NULL) { 786 printf("%s: serializing_sc == NULL!\n", __func__); 787 /* XXX KDM now what? */ 788 return; 789 } 790 791 ctsio = &msg_info->hdr.serializing_sc->scsiio; 792 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 793 ctl_enqueue_isc((union ctl_io *)ctsio); 794 } 795 796 void 797 ctl_isc_announce_lun(struct ctl_lun *lun) 798 { 799 struct ctl_softc *softc = lun->ctl_softc; 800 union ctl_ha_msg *msg; 801 struct ctl_ha_msg_lun_pr_key pr_key; 802 int i, k; 803 804 if (softc->ha_link != CTL_HA_LINK_ONLINE) 805 return; 806 mtx_lock(&lun->lun_lock); 807 i = sizeof(msg->lun); 808 if (lun->lun_devid) 809 i += lun->lun_devid->len; 810 i += sizeof(pr_key) * lun->pr_key_count; 811 alloc: 812 mtx_unlock(&lun->lun_lock); 813 msg = malloc(i, M_CTL, M_WAITOK); 814 mtx_lock(&lun->lun_lock); 815 k = sizeof(msg->lun); 816 if (lun->lun_devid) 817 k += lun->lun_devid->len; 818 k += sizeof(pr_key) * lun->pr_key_count; 819 if (i < k) { 820 free(msg, M_CTL); 821 i = k; 822 goto alloc; 823 } 824 bzero(&msg->lun, sizeof(msg->lun)); 825 msg->hdr.msg_type = CTL_MSG_LUN_SYNC; 826 msg->hdr.nexus.targ_lun = lun->lun; 827 msg->hdr.nexus.targ_mapped_lun = lun->lun; 828 msg->lun.flags = lun->flags; 829 msg->lun.pr_generation = lun->pr_generation; 830 msg->lun.pr_res_idx = lun->pr_res_idx; 831 msg->lun.pr_res_type = lun->pr_res_type; 832 msg->lun.pr_key_count = lun->pr_key_count; 833 i = 0; 834 if (lun->lun_devid) { 835 msg->lun.lun_devid_len = lun->lun_devid->len; 836 memcpy(&msg->lun.data[i], lun->lun_devid->data, 837 msg->lun.lun_devid_len); 838 i += msg->lun.lun_devid_len; 839 } 840 for (k = 0; k < CTL_MAX_INITIATORS; k++) { 841 if ((pr_key.pr_key = ctl_get_prkey(lun, k)) == 0) 842 continue; 843 pr_key.pr_iid = k; 844 memcpy(&msg->lun.data[i], &pr_key, sizeof(pr_key)); 845 i += sizeof(pr_key); 846 } 847 mtx_unlock(&lun->lun_lock); 848 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, 849 M_WAITOK); 850 free(msg, M_CTL); 851 852 if (lun->flags & CTL_LUN_PRIMARY_SC) { 853 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 854 ctl_isc_announce_mode(lun, -1, 855 lun->mode_pages.index[i].page_code & SMPH_PC_MASK, 856 lun->mode_pages.index[i].subpage); 857 } 858 } 859 } 860 861 void 862 ctl_isc_announce_port(struct ctl_port *port) 863 { 864 struct ctl_softc *softc = port->ctl_softc; 865 union ctl_ha_msg *msg; 866 int i; 867 868 if (port->targ_port < softc->port_min || 869 port->targ_port >= softc->port_max || 870 softc->ha_link != CTL_HA_LINK_ONLINE) 871 return; 872 i = sizeof(msg->port) + strlen(port->port_name) + 1; 873 if (port->lun_map) 874 i += port->lun_map_size * sizeof(uint32_t); 875 if (port->port_devid) 876 i += port->port_devid->len; 877 if (port->target_devid) 878 i += port->target_devid->len; 879 if (port->init_devid) 880 i += port->init_devid->len; 881 msg = malloc(i, M_CTL, M_WAITOK); 882 bzero(&msg->port, sizeof(msg->port)); 883 msg->hdr.msg_type = CTL_MSG_PORT_SYNC; 884 msg->hdr.nexus.targ_port = port->targ_port; 885 msg->port.port_type = port->port_type; 886 msg->port.physical_port = port->physical_port; 887 msg->port.virtual_port = port->virtual_port; 888 msg->port.status = port->status; 889 i = 0; 890 msg->port.name_len = sprintf(&msg->port.data[i], 891 "%d:%s", softc->ha_id, port->port_name) + 1; 892 i += msg->port.name_len; 893 if (port->lun_map) { 894 msg->port.lun_map_len = port->lun_map_size * sizeof(uint32_t); 895 memcpy(&msg->port.data[i], port->lun_map, 896 msg->port.lun_map_len); 897 i += msg->port.lun_map_len; 898 } 899 if (port->port_devid) { 900 msg->port.port_devid_len = port->port_devid->len; 901 memcpy(&msg->port.data[i], port->port_devid->data, 902 msg->port.port_devid_len); 903 i += msg->port.port_devid_len; 904 } 905 if (port->target_devid) { 906 msg->port.target_devid_len = port->target_devid->len; 907 memcpy(&msg->port.data[i], port->target_devid->data, 908 msg->port.target_devid_len); 909 i += msg->port.target_devid_len; 910 } 911 if (port->init_devid) { 912 msg->port.init_devid_len = port->init_devid->len; 913 memcpy(&msg->port.data[i], port->init_devid->data, 914 msg->port.init_devid_len); 915 i += msg->port.init_devid_len; 916 } 917 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, 918 M_WAITOK); 919 free(msg, M_CTL); 920 } 921 922 void 923 ctl_isc_announce_iid(struct ctl_port *port, int iid) 924 { 925 struct ctl_softc *softc = port->ctl_softc; 926 union ctl_ha_msg *msg; 927 int i, l; 928 929 if (port->targ_port < softc->port_min || 930 port->targ_port >= softc->port_max || 931 softc->ha_link != CTL_HA_LINK_ONLINE) 932 return; 933 mtx_lock(&softc->ctl_lock); 934 i = sizeof(msg->iid); 935 l = 0; 936 if (port->wwpn_iid[iid].name) 937 l = strlen(port->wwpn_iid[iid].name) + 1; 938 i += l; 939 msg = malloc(i, M_CTL, M_NOWAIT); 940 if (msg == NULL) { 941 mtx_unlock(&softc->ctl_lock); 942 return; 943 } 944 bzero(&msg->iid, sizeof(msg->iid)); 945 msg->hdr.msg_type = CTL_MSG_IID_SYNC; 946 msg->hdr.nexus.targ_port = port->targ_port; 947 msg->hdr.nexus.initid = iid; 948 msg->iid.in_use = port->wwpn_iid[iid].in_use; 949 msg->iid.name_len = l; 950 msg->iid.wwpn = port->wwpn_iid[iid].wwpn; 951 if (port->wwpn_iid[iid].name) 952 strlcpy(msg->iid.data, port->wwpn_iid[iid].name, l); 953 mtx_unlock(&softc->ctl_lock); 954 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->iid, i, M_NOWAIT); 955 free(msg, M_CTL); 956 } 957 958 void 959 ctl_isc_announce_mode(struct ctl_lun *lun, uint32_t initidx, 960 uint8_t page, uint8_t subpage) 961 { 962 struct ctl_softc *softc = lun->ctl_softc; 963 union ctl_ha_msg msg; 964 u_int i; 965 966 if (softc->ha_link != CTL_HA_LINK_ONLINE) 967 return; 968 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 969 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) == 970 page && lun->mode_pages.index[i].subpage == subpage) 971 break; 972 } 973 if (i == CTL_NUM_MODE_PAGES) 974 return; 975 976 /* Don't try to replicate pages not present on this device. */ 977 if (lun->mode_pages.index[i].page_data == NULL) 978 return; 979 980 bzero(&msg.mode, sizeof(msg.mode)); 981 msg.hdr.msg_type = CTL_MSG_MODE_SYNC; 982 msg.hdr.nexus.targ_port = initidx / CTL_MAX_INIT_PER_PORT; 983 msg.hdr.nexus.initid = initidx % CTL_MAX_INIT_PER_PORT; 984 msg.hdr.nexus.targ_lun = lun->lun; 985 msg.hdr.nexus.targ_mapped_lun = lun->lun; 986 msg.mode.page_code = page; 987 msg.mode.subpage = subpage; 988 msg.mode.page_len = lun->mode_pages.index[i].page_len; 989 memcpy(msg.mode.data, lun->mode_pages.index[i].page_data, 990 msg.mode.page_len); 991 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg.mode, sizeof(msg.mode), 992 M_WAITOK); 993 } 994 995 static void 996 ctl_isc_ha_link_up(struct ctl_softc *softc) 997 { 998 struct ctl_port *port; 999 struct ctl_lun *lun; 1000 union ctl_ha_msg msg; 1001 int i; 1002 1003 /* Announce this node parameters to peer for validation. */ 1004 msg.login.msg_type = CTL_MSG_LOGIN; 1005 msg.login.version = CTL_HA_VERSION; 1006 msg.login.ha_mode = softc->ha_mode; 1007 msg.login.ha_id = softc->ha_id; 1008 msg.login.max_luns = CTL_MAX_LUNS; 1009 msg.login.max_ports = CTL_MAX_PORTS; 1010 msg.login.max_init_per_port = CTL_MAX_INIT_PER_PORT; 1011 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg.login, sizeof(msg.login), 1012 M_WAITOK); 1013 1014 STAILQ_FOREACH(port, &softc->port_list, links) { 1015 ctl_isc_announce_port(port); 1016 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1017 if (port->wwpn_iid[i].in_use) 1018 ctl_isc_announce_iid(port, i); 1019 } 1020 } 1021 STAILQ_FOREACH(lun, &softc->lun_list, links) 1022 ctl_isc_announce_lun(lun); 1023 } 1024 1025 static void 1026 ctl_isc_ha_link_down(struct ctl_softc *softc) 1027 { 1028 struct ctl_port *port; 1029 struct ctl_lun *lun; 1030 union ctl_io *io; 1031 int i; 1032 1033 mtx_lock(&softc->ctl_lock); 1034 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1035 mtx_lock(&lun->lun_lock); 1036 if (lun->flags & CTL_LUN_PEER_SC_PRIMARY) { 1037 lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; 1038 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 1039 } 1040 mtx_unlock(&lun->lun_lock); 1041 1042 mtx_unlock(&softc->ctl_lock); 1043 io = ctl_alloc_io(softc->othersc_pool); 1044 mtx_lock(&softc->ctl_lock); 1045 ctl_zero_io(io); 1046 io->io_hdr.msg_type = CTL_MSG_FAILOVER; 1047 io->io_hdr.nexus.targ_mapped_lun = lun->lun; 1048 ctl_enqueue_isc(io); 1049 } 1050 1051 STAILQ_FOREACH(port, &softc->port_list, links) { 1052 if (port->targ_port >= softc->port_min && 1053 port->targ_port < softc->port_max) 1054 continue; 1055 port->status &= ~CTL_PORT_STATUS_ONLINE; 1056 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1057 port->wwpn_iid[i].in_use = 0; 1058 free(port->wwpn_iid[i].name, M_CTL); 1059 port->wwpn_iid[i].name = NULL; 1060 } 1061 } 1062 mtx_unlock(&softc->ctl_lock); 1063 } 1064 1065 static void 1066 ctl_isc_ua(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1067 { 1068 struct ctl_lun *lun; 1069 uint32_t iid = ctl_get_initindex(&msg->hdr.nexus); 1070 1071 mtx_lock(&softc->ctl_lock); 1072 if (msg->hdr.nexus.targ_mapped_lun >= CTL_MAX_LUNS || 1073 (lun = softc->ctl_luns[msg->hdr.nexus.targ_mapped_lun]) == NULL) { 1074 mtx_unlock(&softc->ctl_lock); 1075 return; 1076 } 1077 mtx_lock(&lun->lun_lock); 1078 mtx_unlock(&softc->ctl_lock); 1079 if (msg->ua.ua_type == CTL_UA_THIN_PROV_THRES && msg->ua.ua_set) 1080 memcpy(lun->ua_tpt_info, msg->ua.ua_info, 8); 1081 if (msg->ua.ua_all) { 1082 if (msg->ua.ua_set) 1083 ctl_est_ua_all(lun, iid, msg->ua.ua_type); 1084 else 1085 ctl_clr_ua_all(lun, iid, msg->ua.ua_type); 1086 } else { 1087 if (msg->ua.ua_set) 1088 ctl_est_ua(lun, iid, msg->ua.ua_type); 1089 else 1090 ctl_clr_ua(lun, iid, msg->ua.ua_type); 1091 } 1092 mtx_unlock(&lun->lun_lock); 1093 } 1094 1095 static void 1096 ctl_isc_lun_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1097 { 1098 struct ctl_lun *lun; 1099 struct ctl_ha_msg_lun_pr_key pr_key; 1100 int i, k; 1101 ctl_lun_flags oflags; 1102 uint32_t targ_lun; 1103 1104 targ_lun = msg->hdr.nexus.targ_mapped_lun; 1105 mtx_lock(&softc->ctl_lock); 1106 if (targ_lun >= CTL_MAX_LUNS || 1107 (lun = softc->ctl_luns[targ_lun]) == NULL) { 1108 mtx_unlock(&softc->ctl_lock); 1109 return; 1110 } 1111 mtx_lock(&lun->lun_lock); 1112 mtx_unlock(&softc->ctl_lock); 1113 if (lun->flags & CTL_LUN_DISABLED) { 1114 mtx_unlock(&lun->lun_lock); 1115 return; 1116 } 1117 i = (lun->lun_devid != NULL) ? lun->lun_devid->len : 0; 1118 if (msg->lun.lun_devid_len != i || (i > 0 && 1119 memcmp(&msg->lun.data[0], lun->lun_devid->data, i) != 0)) { 1120 mtx_unlock(&lun->lun_lock); 1121 printf("%s: Received conflicting HA LUN %d\n", 1122 __func__, targ_lun); 1123 return; 1124 } else { 1125 /* Record whether peer is primary. */ 1126 oflags = lun->flags; 1127 if ((msg->lun.flags & CTL_LUN_PRIMARY_SC) && 1128 (msg->lun.flags & CTL_LUN_DISABLED) == 0) 1129 lun->flags |= CTL_LUN_PEER_SC_PRIMARY; 1130 else 1131 lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; 1132 if (oflags != lun->flags) 1133 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 1134 1135 /* If peer is primary and we are not -- use data */ 1136 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 1137 (lun->flags & CTL_LUN_PEER_SC_PRIMARY)) { 1138 lun->pr_generation = msg->lun.pr_generation; 1139 lun->pr_res_idx = msg->lun.pr_res_idx; 1140 lun->pr_res_type = msg->lun.pr_res_type; 1141 lun->pr_key_count = msg->lun.pr_key_count; 1142 for (k = 0; k < CTL_MAX_INITIATORS; k++) 1143 ctl_clr_prkey(lun, k); 1144 for (k = 0; k < msg->lun.pr_key_count; k++) { 1145 memcpy(&pr_key, &msg->lun.data[i], 1146 sizeof(pr_key)); 1147 ctl_alloc_prkey(lun, pr_key.pr_iid); 1148 ctl_set_prkey(lun, pr_key.pr_iid, 1149 pr_key.pr_key); 1150 i += sizeof(pr_key); 1151 } 1152 } 1153 1154 mtx_unlock(&lun->lun_lock); 1155 CTL_DEBUG_PRINT(("%s: Known LUN %d, peer is %s\n", 1156 __func__, targ_lun, 1157 (msg->lun.flags & CTL_LUN_PRIMARY_SC) ? 1158 "primary" : "secondary")); 1159 1160 /* If we are primary but peer doesn't know -- notify */ 1161 if ((lun->flags & CTL_LUN_PRIMARY_SC) && 1162 (msg->lun.flags & CTL_LUN_PEER_SC_PRIMARY) == 0) 1163 ctl_isc_announce_lun(lun); 1164 } 1165 } 1166 1167 static void 1168 ctl_isc_port_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1169 { 1170 struct ctl_port *port; 1171 struct ctl_lun *lun; 1172 int i, new; 1173 1174 port = softc->ctl_ports[msg->hdr.nexus.targ_port]; 1175 if (port == NULL) { 1176 CTL_DEBUG_PRINT(("%s: New port %d\n", __func__, 1177 msg->hdr.nexus.targ_port)); 1178 new = 1; 1179 port = malloc(sizeof(*port), M_CTL, M_WAITOK | M_ZERO); 1180 port->frontend = &ha_frontend; 1181 port->targ_port = msg->hdr.nexus.targ_port; 1182 port->fe_datamove = ctl_ha_datamove; 1183 port->fe_done = ctl_ha_done; 1184 } else if (port->frontend == &ha_frontend) { 1185 CTL_DEBUG_PRINT(("%s: Updated port %d\n", __func__, 1186 msg->hdr.nexus.targ_port)); 1187 new = 0; 1188 } else { 1189 printf("%s: Received conflicting HA port %d\n", 1190 __func__, msg->hdr.nexus.targ_port); 1191 return; 1192 } 1193 port->port_type = msg->port.port_type; 1194 port->physical_port = msg->port.physical_port; 1195 port->virtual_port = msg->port.virtual_port; 1196 port->status = msg->port.status; 1197 i = 0; 1198 free(port->port_name, M_CTL); 1199 port->port_name = strndup(&msg->port.data[i], msg->port.name_len, 1200 M_CTL); 1201 i += msg->port.name_len; 1202 if (msg->port.lun_map_len != 0) { 1203 if (port->lun_map == NULL || 1204 port->lun_map_size * sizeof(uint32_t) < 1205 msg->port.lun_map_len) { 1206 port->lun_map_size = 0; 1207 free(port->lun_map, M_CTL); 1208 port->lun_map = malloc(msg->port.lun_map_len, 1209 M_CTL, M_WAITOK); 1210 } 1211 memcpy(port->lun_map, &msg->port.data[i], msg->port.lun_map_len); 1212 port->lun_map_size = msg->port.lun_map_len / sizeof(uint32_t); 1213 i += msg->port.lun_map_len; 1214 } else { 1215 port->lun_map_size = 0; 1216 free(port->lun_map, M_CTL); 1217 port->lun_map = NULL; 1218 } 1219 if (msg->port.port_devid_len != 0) { 1220 if (port->port_devid == NULL || 1221 port->port_devid->len < msg->port.port_devid_len) { 1222 free(port->port_devid, M_CTL); 1223 port->port_devid = malloc(sizeof(struct ctl_devid) + 1224 msg->port.port_devid_len, M_CTL, M_WAITOK); 1225 } 1226 memcpy(port->port_devid->data, &msg->port.data[i], 1227 msg->port.port_devid_len); 1228 port->port_devid->len = msg->port.port_devid_len; 1229 i += msg->port.port_devid_len; 1230 } else { 1231 free(port->port_devid, M_CTL); 1232 port->port_devid = NULL; 1233 } 1234 if (msg->port.target_devid_len != 0) { 1235 if (port->target_devid == NULL || 1236 port->target_devid->len < msg->port.target_devid_len) { 1237 free(port->target_devid, M_CTL); 1238 port->target_devid = malloc(sizeof(struct ctl_devid) + 1239 msg->port.target_devid_len, M_CTL, M_WAITOK); 1240 } 1241 memcpy(port->target_devid->data, &msg->port.data[i], 1242 msg->port.target_devid_len); 1243 port->target_devid->len = msg->port.target_devid_len; 1244 i += msg->port.target_devid_len; 1245 } else { 1246 free(port->target_devid, M_CTL); 1247 port->target_devid = NULL; 1248 } 1249 if (msg->port.init_devid_len != 0) { 1250 if (port->init_devid == NULL || 1251 port->init_devid->len < msg->port.init_devid_len) { 1252 free(port->init_devid, M_CTL); 1253 port->init_devid = malloc(sizeof(struct ctl_devid) + 1254 msg->port.init_devid_len, M_CTL, M_WAITOK); 1255 } 1256 memcpy(port->init_devid->data, &msg->port.data[i], 1257 msg->port.init_devid_len); 1258 port->init_devid->len = msg->port.init_devid_len; 1259 i += msg->port.init_devid_len; 1260 } else { 1261 free(port->init_devid, M_CTL); 1262 port->init_devid = NULL; 1263 } 1264 if (new) { 1265 if (ctl_port_register(port) != 0) { 1266 printf("%s: ctl_port_register() failed with error\n", 1267 __func__); 1268 } 1269 } 1270 mtx_lock(&softc->ctl_lock); 1271 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1272 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 1273 continue; 1274 mtx_lock(&lun->lun_lock); 1275 ctl_est_ua_all(lun, -1, CTL_UA_INQ_CHANGE); 1276 mtx_unlock(&lun->lun_lock); 1277 } 1278 mtx_unlock(&softc->ctl_lock); 1279 } 1280 1281 static void 1282 ctl_isc_iid_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1283 { 1284 struct ctl_port *port; 1285 int iid; 1286 1287 port = softc->ctl_ports[msg->hdr.nexus.targ_port]; 1288 if (port == NULL) { 1289 printf("%s: Received IID for unknown port %d\n", 1290 __func__, msg->hdr.nexus.targ_port); 1291 return; 1292 } 1293 iid = msg->hdr.nexus.initid; 1294 if (port->wwpn_iid[iid].in_use != 0 && 1295 msg->iid.in_use == 0) 1296 ctl_i_t_nexus_loss(softc, iid, CTL_UA_POWERON); 1297 port->wwpn_iid[iid].in_use = msg->iid.in_use; 1298 port->wwpn_iid[iid].wwpn = msg->iid.wwpn; 1299 free(port->wwpn_iid[iid].name, M_CTL); 1300 if (msg->iid.name_len) { 1301 port->wwpn_iid[iid].name = strndup(&msg->iid.data[0], 1302 msg->iid.name_len, M_CTL); 1303 } else 1304 port->wwpn_iid[iid].name = NULL; 1305 } 1306 1307 static void 1308 ctl_isc_login(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1309 { 1310 1311 if (msg->login.version != CTL_HA_VERSION) { 1312 printf("CTL HA peers have different versions %d != %d\n", 1313 msg->login.version, CTL_HA_VERSION); 1314 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1315 return; 1316 } 1317 if (msg->login.ha_mode != softc->ha_mode) { 1318 printf("CTL HA peers have different ha_mode %d != %d\n", 1319 msg->login.ha_mode, softc->ha_mode); 1320 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1321 return; 1322 } 1323 if (msg->login.ha_id == softc->ha_id) { 1324 printf("CTL HA peers have same ha_id %d\n", msg->login.ha_id); 1325 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1326 return; 1327 } 1328 if (msg->login.max_luns != CTL_MAX_LUNS || 1329 msg->login.max_ports != CTL_MAX_PORTS || 1330 msg->login.max_init_per_port != CTL_MAX_INIT_PER_PORT) { 1331 printf("CTL HA peers have different limits\n"); 1332 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1333 return; 1334 } 1335 } 1336 1337 static void 1338 ctl_isc_mode_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1339 { 1340 struct ctl_lun *lun; 1341 u_int i; 1342 uint32_t initidx, targ_lun; 1343 1344 targ_lun = msg->hdr.nexus.targ_mapped_lun; 1345 mtx_lock(&softc->ctl_lock); 1346 if (targ_lun >= CTL_MAX_LUNS || 1347 (lun = softc->ctl_luns[targ_lun]) == NULL) { 1348 mtx_unlock(&softc->ctl_lock); 1349 return; 1350 } 1351 mtx_lock(&lun->lun_lock); 1352 mtx_unlock(&softc->ctl_lock); 1353 if (lun->flags & CTL_LUN_DISABLED) { 1354 mtx_unlock(&lun->lun_lock); 1355 return; 1356 } 1357 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 1358 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) == 1359 msg->mode.page_code && 1360 lun->mode_pages.index[i].subpage == msg->mode.subpage) 1361 break; 1362 } 1363 if (i == CTL_NUM_MODE_PAGES) { 1364 mtx_unlock(&lun->lun_lock); 1365 return; 1366 } 1367 memcpy(lun->mode_pages.index[i].page_data, msg->mode.data, 1368 lun->mode_pages.index[i].page_len); 1369 initidx = ctl_get_initindex(&msg->hdr.nexus); 1370 if (initidx != -1) 1371 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 1372 mtx_unlock(&lun->lun_lock); 1373 } 1374 1375 /* 1376 * ISC (Inter Shelf Communication) event handler. Events from the HA 1377 * subsystem come in here. 1378 */ 1379 static void 1380 ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param) 1381 { 1382 struct ctl_softc *softc = control_softc; 1383 union ctl_io *io; 1384 struct ctl_prio *presio; 1385 ctl_ha_status isc_status; 1386 1387 CTL_DEBUG_PRINT(("CTL: Isc Msg event %d\n", event)); 1388 if (event == CTL_HA_EVT_MSG_RECV) { 1389 union ctl_ha_msg *msg, msgbuf; 1390 1391 if (param > sizeof(msgbuf)) 1392 msg = malloc(param, M_CTL, M_WAITOK); 1393 else 1394 msg = &msgbuf; 1395 isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, msg, param, 1396 M_WAITOK); 1397 if (isc_status != CTL_HA_STATUS_SUCCESS) { 1398 printf("%s: Error receiving message: %d\n", 1399 __func__, isc_status); 1400 if (msg != &msgbuf) 1401 free(msg, M_CTL); 1402 return; 1403 } 1404 1405 CTL_DEBUG_PRINT(("CTL: msg_type %d\n", msg->msg_type)); 1406 switch (msg->hdr.msg_type) { 1407 case CTL_MSG_SERIALIZE: 1408 io = ctl_alloc_io(softc->othersc_pool); 1409 ctl_zero_io(io); 1410 // populate ctsio from msg 1411 io->io_hdr.io_type = CTL_IO_SCSI; 1412 io->io_hdr.msg_type = CTL_MSG_SERIALIZE; 1413 io->io_hdr.original_sc = msg->hdr.original_sc; 1414 io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC | 1415 CTL_FLAG_IO_ACTIVE; 1416 /* 1417 * If we're in serialization-only mode, we don't 1418 * want to go through full done processing. Thus 1419 * the COPY flag. 1420 * 1421 * XXX KDM add another flag that is more specific. 1422 */ 1423 if (softc->ha_mode != CTL_HA_MODE_XFER) 1424 io->io_hdr.flags |= CTL_FLAG_INT_COPY; 1425 io->io_hdr.nexus = msg->hdr.nexus; 1426 #if 0 1427 printf("port %u, iid %u, lun %u\n", 1428 io->io_hdr.nexus.targ_port, 1429 io->io_hdr.nexus.initid, 1430 io->io_hdr.nexus.targ_lun); 1431 #endif 1432 io->scsiio.tag_num = msg->scsi.tag_num; 1433 io->scsiio.tag_type = msg->scsi.tag_type; 1434 #ifdef CTL_TIME_IO 1435 io->io_hdr.start_time = time_uptime; 1436 getbinuptime(&io->io_hdr.start_bt); 1437 #endif /* CTL_TIME_IO */ 1438 io->scsiio.cdb_len = msg->scsi.cdb_len; 1439 memcpy(io->scsiio.cdb, msg->scsi.cdb, 1440 CTL_MAX_CDBLEN); 1441 if (softc->ha_mode == CTL_HA_MODE_XFER) { 1442 const struct ctl_cmd_entry *entry; 1443 1444 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 1445 io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 1446 io->io_hdr.flags |= 1447 entry->flags & CTL_FLAG_DATA_MASK; 1448 } 1449 ctl_enqueue_isc(io); 1450 break; 1451 1452 /* Performed on the Originating SC, XFER mode only */ 1453 case CTL_MSG_DATAMOVE: { 1454 struct ctl_sg_entry *sgl; 1455 int i, j; 1456 1457 io = msg->hdr.original_sc; 1458 if (io == NULL) { 1459 printf("%s: original_sc == NULL!\n", __func__); 1460 /* XXX KDM do something here */ 1461 break; 1462 } 1463 io->io_hdr.msg_type = CTL_MSG_DATAMOVE; 1464 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1465 /* 1466 * Keep track of this, we need to send it back over 1467 * when the datamove is complete. 1468 */ 1469 io->io_hdr.serializing_sc = msg->hdr.serializing_sc; 1470 if (msg->hdr.status == CTL_SUCCESS) 1471 io->io_hdr.status = msg->hdr.status; 1472 1473 if (msg->dt.sg_sequence == 0) { 1474 #ifdef CTL_TIME_IO 1475 getbinuptime(&io->io_hdr.dma_start_bt); 1476 #endif 1477 i = msg->dt.kern_sg_entries + 1478 msg->dt.kern_data_len / 1479 CTL_HA_DATAMOVE_SEGMENT + 1; 1480 sgl = malloc(sizeof(*sgl) * i, M_CTL, 1481 M_WAITOK | M_ZERO); 1482 io->io_hdr.remote_sglist = sgl; 1483 io->io_hdr.local_sglist = 1484 &sgl[msg->dt.kern_sg_entries]; 1485 1486 io->scsiio.kern_data_ptr = (uint8_t *)sgl; 1487 1488 io->scsiio.kern_sg_entries = 1489 msg->dt.kern_sg_entries; 1490 io->scsiio.rem_sg_entries = 1491 msg->dt.kern_sg_entries; 1492 io->scsiio.kern_data_len = 1493 msg->dt.kern_data_len; 1494 io->scsiio.kern_total_len = 1495 msg->dt.kern_total_len; 1496 io->scsiio.kern_data_resid = 1497 msg->dt.kern_data_resid; 1498 io->scsiio.kern_rel_offset = 1499 msg->dt.kern_rel_offset; 1500 io->io_hdr.flags &= ~CTL_FLAG_BUS_ADDR; 1501 io->io_hdr.flags |= msg->dt.flags & 1502 CTL_FLAG_BUS_ADDR; 1503 } else 1504 sgl = (struct ctl_sg_entry *) 1505 io->scsiio.kern_data_ptr; 1506 1507 for (i = msg->dt.sent_sg_entries, j = 0; 1508 i < (msg->dt.sent_sg_entries + 1509 msg->dt.cur_sg_entries); i++, j++) { 1510 sgl[i].addr = msg->dt.sg_list[j].addr; 1511 sgl[i].len = msg->dt.sg_list[j].len; 1512 1513 #if 0 1514 printf("%s: DATAMOVE: %p,%lu j=%d, i=%d\n", 1515 __func__, sgl[i].addr, sgl[i].len, j, i); 1516 #endif 1517 } 1518 1519 /* 1520 * If this is the last piece of the I/O, we've got 1521 * the full S/G list. Queue processing in the thread. 1522 * Otherwise wait for the next piece. 1523 */ 1524 if (msg->dt.sg_last != 0) 1525 ctl_enqueue_isc(io); 1526 break; 1527 } 1528 /* Performed on the Serializing (primary) SC, XFER mode only */ 1529 case CTL_MSG_DATAMOVE_DONE: { 1530 if (msg->hdr.serializing_sc == NULL) { 1531 printf("%s: serializing_sc == NULL!\n", 1532 __func__); 1533 /* XXX KDM now what? */ 1534 break; 1535 } 1536 /* 1537 * We grab the sense information here in case 1538 * there was a failure, so we can return status 1539 * back to the initiator. 1540 */ 1541 io = msg->hdr.serializing_sc; 1542 io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 1543 io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; 1544 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1545 io->io_hdr.port_status = msg->scsi.port_status; 1546 io->scsiio.kern_data_resid = msg->scsi.kern_data_resid; 1547 if (msg->hdr.status != CTL_STATUS_NONE) { 1548 io->io_hdr.status = msg->hdr.status; 1549 io->scsiio.scsi_status = msg->scsi.scsi_status; 1550 io->scsiio.sense_len = msg->scsi.sense_len; 1551 memcpy(&io->scsiio.sense_data, 1552 &msg->scsi.sense_data, 1553 msg->scsi.sense_len); 1554 if (msg->hdr.status == CTL_SUCCESS) 1555 io->io_hdr.flags |= CTL_FLAG_STATUS_SENT; 1556 } 1557 ctl_enqueue_isc(io); 1558 break; 1559 } 1560 1561 /* Preformed on Originating SC, SER_ONLY mode */ 1562 case CTL_MSG_R2R: 1563 io = msg->hdr.original_sc; 1564 if (io == NULL) { 1565 printf("%s: original_sc == NULL!\n", 1566 __func__); 1567 break; 1568 } 1569 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1570 io->io_hdr.msg_type = CTL_MSG_R2R; 1571 io->io_hdr.serializing_sc = msg->hdr.serializing_sc; 1572 ctl_enqueue_isc(io); 1573 break; 1574 1575 /* 1576 * Performed on Serializing(i.e. primary SC) SC in SER_ONLY 1577 * mode. 1578 * Performed on the Originating (i.e. secondary) SC in XFER 1579 * mode 1580 */ 1581 case CTL_MSG_FINISH_IO: 1582 if (softc->ha_mode == CTL_HA_MODE_XFER) 1583 ctl_isc_handler_finish_xfer(softc, msg); 1584 else 1585 ctl_isc_handler_finish_ser_only(softc, msg); 1586 break; 1587 1588 /* Preformed on Originating SC */ 1589 case CTL_MSG_BAD_JUJU: 1590 io = msg->hdr.original_sc; 1591 if (io == NULL) { 1592 printf("%s: Bad JUJU!, original_sc is NULL!\n", 1593 __func__); 1594 break; 1595 } 1596 ctl_copy_sense_data(msg, io); 1597 /* 1598 * IO should have already been cleaned up on other 1599 * SC so clear this flag so we won't send a message 1600 * back to finish the IO there. 1601 */ 1602 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 1603 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1604 1605 /* io = msg->hdr.serializing_sc; */ 1606 io->io_hdr.msg_type = CTL_MSG_BAD_JUJU; 1607 ctl_enqueue_isc(io); 1608 break; 1609 1610 /* Handle resets sent from the other side */ 1611 case CTL_MSG_MANAGE_TASKS: { 1612 struct ctl_taskio *taskio; 1613 taskio = (struct ctl_taskio *)ctl_alloc_io( 1614 softc->othersc_pool); 1615 ctl_zero_io((union ctl_io *)taskio); 1616 taskio->io_hdr.io_type = CTL_IO_TASK; 1617 taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 1618 taskio->io_hdr.nexus = msg->hdr.nexus; 1619 taskio->task_action = msg->task.task_action; 1620 taskio->tag_num = msg->task.tag_num; 1621 taskio->tag_type = msg->task.tag_type; 1622 #ifdef CTL_TIME_IO 1623 taskio->io_hdr.start_time = time_uptime; 1624 getbinuptime(&taskio->io_hdr.start_bt); 1625 #endif /* CTL_TIME_IO */ 1626 ctl_run_task((union ctl_io *)taskio); 1627 break; 1628 } 1629 /* Persistent Reserve action which needs attention */ 1630 case CTL_MSG_PERS_ACTION: 1631 presio = (struct ctl_prio *)ctl_alloc_io( 1632 softc->othersc_pool); 1633 ctl_zero_io((union ctl_io *)presio); 1634 presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION; 1635 presio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 1636 presio->io_hdr.nexus = msg->hdr.nexus; 1637 presio->pr_msg = msg->pr; 1638 ctl_enqueue_isc((union ctl_io *)presio); 1639 break; 1640 case CTL_MSG_UA: 1641 ctl_isc_ua(softc, msg, param); 1642 break; 1643 case CTL_MSG_PORT_SYNC: 1644 ctl_isc_port_sync(softc, msg, param); 1645 break; 1646 case CTL_MSG_LUN_SYNC: 1647 ctl_isc_lun_sync(softc, msg, param); 1648 break; 1649 case CTL_MSG_IID_SYNC: 1650 ctl_isc_iid_sync(softc, msg, param); 1651 break; 1652 case CTL_MSG_LOGIN: 1653 ctl_isc_login(softc, msg, param); 1654 break; 1655 case CTL_MSG_MODE_SYNC: 1656 ctl_isc_mode_sync(softc, msg, param); 1657 break; 1658 default: 1659 printf("Received HA message of unknown type %d\n", 1660 msg->hdr.msg_type); 1661 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1662 break; 1663 } 1664 if (msg != &msgbuf) 1665 free(msg, M_CTL); 1666 } else if (event == CTL_HA_EVT_LINK_CHANGE) { 1667 printf("CTL: HA link status changed from %d to %d\n", 1668 softc->ha_link, param); 1669 if (param == softc->ha_link) 1670 return; 1671 if (softc->ha_link == CTL_HA_LINK_ONLINE) { 1672 softc->ha_link = param; 1673 ctl_isc_ha_link_down(softc); 1674 } else { 1675 softc->ha_link = param; 1676 if (softc->ha_link == CTL_HA_LINK_ONLINE) 1677 ctl_isc_ha_link_up(softc); 1678 } 1679 return; 1680 } else { 1681 printf("ctl_isc_event_handler: Unknown event %d\n", event); 1682 return; 1683 } 1684 } 1685 1686 static void 1687 ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest) 1688 { 1689 1690 memcpy(&dest->scsiio.sense_data, &src->scsi.sense_data, 1691 src->scsi.sense_len); 1692 dest->scsiio.scsi_status = src->scsi.scsi_status; 1693 dest->scsiio.sense_len = src->scsi.sense_len; 1694 dest->io_hdr.status = src->hdr.status; 1695 } 1696 1697 static void 1698 ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest) 1699 { 1700 1701 memcpy(&dest->scsi.sense_data, &src->scsiio.sense_data, 1702 src->scsiio.sense_len); 1703 dest->scsi.scsi_status = src->scsiio.scsi_status; 1704 dest->scsi.sense_len = src->scsiio.sense_len; 1705 dest->hdr.status = src->io_hdr.status; 1706 } 1707 1708 void 1709 ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 1710 { 1711 struct ctl_softc *softc = lun->ctl_softc; 1712 ctl_ua_type *pu; 1713 1714 if (initidx < softc->init_min || initidx >= softc->init_max) 1715 return; 1716 mtx_assert(&lun->lun_lock, MA_OWNED); 1717 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 1718 if (pu == NULL) 1719 return; 1720 pu[initidx % CTL_MAX_INIT_PER_PORT] |= ua; 1721 } 1722 1723 void 1724 ctl_est_ua_port(struct ctl_lun *lun, int port, uint32_t except, ctl_ua_type ua) 1725 { 1726 int i; 1727 1728 mtx_assert(&lun->lun_lock, MA_OWNED); 1729 if (lun->pending_ua[port] == NULL) 1730 return; 1731 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1732 if (port * CTL_MAX_INIT_PER_PORT + i == except) 1733 continue; 1734 lun->pending_ua[port][i] |= ua; 1735 } 1736 } 1737 1738 void 1739 ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 1740 { 1741 struct ctl_softc *softc = lun->ctl_softc; 1742 int i; 1743 1744 mtx_assert(&lun->lun_lock, MA_OWNED); 1745 for (i = softc->port_min; i < softc->port_max; i++) 1746 ctl_est_ua_port(lun, i, except, ua); 1747 } 1748 1749 void 1750 ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 1751 { 1752 struct ctl_softc *softc = lun->ctl_softc; 1753 ctl_ua_type *pu; 1754 1755 if (initidx < softc->init_min || initidx >= softc->init_max) 1756 return; 1757 mtx_assert(&lun->lun_lock, MA_OWNED); 1758 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 1759 if (pu == NULL) 1760 return; 1761 pu[initidx % CTL_MAX_INIT_PER_PORT] &= ~ua; 1762 } 1763 1764 void 1765 ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 1766 { 1767 struct ctl_softc *softc = lun->ctl_softc; 1768 int i, j; 1769 1770 mtx_assert(&lun->lun_lock, MA_OWNED); 1771 for (i = softc->port_min; i < softc->port_max; i++) { 1772 if (lun->pending_ua[i] == NULL) 1773 continue; 1774 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 1775 if (i * CTL_MAX_INIT_PER_PORT + j == except) 1776 continue; 1777 lun->pending_ua[i][j] &= ~ua; 1778 } 1779 } 1780 } 1781 1782 void 1783 ctl_clr_ua_allluns(struct ctl_softc *ctl_softc, uint32_t initidx, 1784 ctl_ua_type ua_type) 1785 { 1786 struct ctl_lun *lun; 1787 1788 mtx_assert(&ctl_softc->ctl_lock, MA_OWNED); 1789 STAILQ_FOREACH(lun, &ctl_softc->lun_list, links) { 1790 mtx_lock(&lun->lun_lock); 1791 ctl_clr_ua(lun, initidx, ua_type); 1792 mtx_unlock(&lun->lun_lock); 1793 } 1794 } 1795 1796 static int 1797 ctl_ha_role_sysctl(SYSCTL_HANDLER_ARGS) 1798 { 1799 struct ctl_softc *softc = (struct ctl_softc *)arg1; 1800 struct ctl_lun *lun; 1801 struct ctl_lun_req ireq; 1802 int error, value; 1803 1804 value = (softc->flags & CTL_FLAG_ACTIVE_SHELF) ? 0 : 1; 1805 error = sysctl_handle_int(oidp, &value, 0, req); 1806 if ((error != 0) || (req->newptr == NULL)) 1807 return (error); 1808 1809 mtx_lock(&softc->ctl_lock); 1810 if (value == 0) 1811 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1812 else 1813 softc->flags &= ~CTL_FLAG_ACTIVE_SHELF; 1814 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1815 mtx_unlock(&softc->ctl_lock); 1816 bzero(&ireq, sizeof(ireq)); 1817 ireq.reqtype = CTL_LUNREQ_MODIFY; 1818 ireq.reqdata.modify.lun_id = lun->lun; 1819 lun->backend->ioctl(NULL, CTL_LUN_REQ, (caddr_t)&ireq, 0, 1820 curthread); 1821 if (ireq.status != CTL_LUN_OK) { 1822 printf("%s: CTL_LUNREQ_MODIFY returned %d '%s'\n", 1823 __func__, ireq.status, ireq.error_str); 1824 } 1825 mtx_lock(&softc->ctl_lock); 1826 } 1827 mtx_unlock(&softc->ctl_lock); 1828 return (0); 1829 } 1830 1831 static int 1832 ctl_init(void) 1833 { 1834 struct make_dev_args args; 1835 struct ctl_softc *softc; 1836 int i, error; 1837 1838 softc = control_softc = malloc(sizeof(*control_softc), M_DEVBUF, 1839 M_WAITOK | M_ZERO); 1840 1841 make_dev_args_init(&args); 1842 args.mda_devsw = &ctl_cdevsw; 1843 args.mda_uid = UID_ROOT; 1844 args.mda_gid = GID_OPERATOR; 1845 args.mda_mode = 0600; 1846 args.mda_si_drv1 = softc; 1847 error = make_dev_s(&args, &softc->dev, "cam/ctl"); 1848 if (error != 0) { 1849 free(softc, M_DEVBUF); 1850 control_softc = NULL; 1851 return (error); 1852 } 1853 1854 sysctl_ctx_init(&softc->sysctl_ctx); 1855 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, 1856 SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl", 1857 CTLFLAG_RD, 0, "CAM Target Layer"); 1858 1859 if (softc->sysctl_tree == NULL) { 1860 printf("%s: unable to allocate sysctl tree\n", __func__); 1861 destroy_dev(softc->dev); 1862 free(softc, M_DEVBUF); 1863 control_softc = NULL; 1864 return (ENOMEM); 1865 } 1866 1867 mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF); 1868 softc->io_zone = uma_zcreate("CTL IO", sizeof(union ctl_io), 1869 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 1870 softc->flags = 0; 1871 1872 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1873 OID_AUTO, "ha_mode", CTLFLAG_RDTUN, (int *)&softc->ha_mode, 0, 1874 "HA mode (0 - act/stby, 1 - serialize only, 2 - xfer)"); 1875 1876 /* 1877 * In Copan's HA scheme, the "master" and "slave" roles are 1878 * figured out through the slot the controller is in. Although it 1879 * is an active/active system, someone has to be in charge. 1880 */ 1881 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1882 OID_AUTO, "ha_id", CTLFLAG_RDTUN, &softc->ha_id, 0, 1883 "HA head ID (0 - no HA)"); 1884 if (softc->ha_id == 0 || softc->ha_id > NUM_HA_SHELVES) { 1885 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1886 softc->is_single = 1; 1887 softc->port_cnt = CTL_MAX_PORTS; 1888 softc->port_min = 0; 1889 } else { 1890 softc->port_cnt = CTL_MAX_PORTS / NUM_HA_SHELVES; 1891 softc->port_min = (softc->ha_id - 1) * softc->port_cnt; 1892 } 1893 softc->port_max = softc->port_min + softc->port_cnt; 1894 softc->init_min = softc->port_min * CTL_MAX_INIT_PER_PORT; 1895 softc->init_max = softc->port_max * CTL_MAX_INIT_PER_PORT; 1896 1897 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1898 OID_AUTO, "ha_link", CTLFLAG_RD, (int *)&softc->ha_link, 0, 1899 "HA link state (0 - offline, 1 - unknown, 2 - online)"); 1900 1901 STAILQ_INIT(&softc->lun_list); 1902 STAILQ_INIT(&softc->pending_lun_queue); 1903 STAILQ_INIT(&softc->fe_list); 1904 STAILQ_INIT(&softc->port_list); 1905 STAILQ_INIT(&softc->be_list); 1906 ctl_tpc_init(softc); 1907 1908 if (worker_threads <= 0) 1909 worker_threads = max(1, mp_ncpus / 4); 1910 if (worker_threads > CTL_MAX_THREADS) 1911 worker_threads = CTL_MAX_THREADS; 1912 1913 for (i = 0; i < worker_threads; i++) { 1914 struct ctl_thread *thr = &softc->threads[i]; 1915 1916 mtx_init(&thr->queue_lock, "CTL queue mutex", NULL, MTX_DEF); 1917 thr->ctl_softc = softc; 1918 STAILQ_INIT(&thr->incoming_queue); 1919 STAILQ_INIT(&thr->rtr_queue); 1920 STAILQ_INIT(&thr->done_queue); 1921 STAILQ_INIT(&thr->isc_queue); 1922 1923 error = kproc_kthread_add(ctl_work_thread, thr, 1924 &softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i); 1925 if (error != 0) { 1926 printf("error creating CTL work thread!\n"); 1927 return (error); 1928 } 1929 } 1930 error = kproc_kthread_add(ctl_lun_thread, softc, 1931 &softc->ctl_proc, &softc->lun_thread, 0, 0, "ctl", "lun"); 1932 if (error != 0) { 1933 printf("error creating CTL lun thread!\n"); 1934 return (error); 1935 } 1936 error = kproc_kthread_add(ctl_thresh_thread, softc, 1937 &softc->ctl_proc, &softc->thresh_thread, 0, 0, "ctl", "thresh"); 1938 if (error != 0) { 1939 printf("error creating CTL threshold thread!\n"); 1940 return (error); 1941 } 1942 1943 SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree), 1944 OID_AUTO, "ha_role", CTLTYPE_INT | CTLFLAG_RWTUN, 1945 softc, 0, ctl_ha_role_sysctl, "I", "HA role for this head"); 1946 1947 if (softc->is_single == 0) { 1948 if (ctl_frontend_register(&ha_frontend) != 0) 1949 softc->is_single = 1; 1950 } 1951 return (0); 1952 } 1953 1954 static int 1955 ctl_shutdown(void) 1956 { 1957 struct ctl_softc *softc = control_softc; 1958 int i; 1959 1960 if (softc->is_single == 0) 1961 ctl_frontend_deregister(&ha_frontend); 1962 1963 destroy_dev(softc->dev); 1964 1965 /* Shutdown CTL threads. */ 1966 softc->shutdown = 1; 1967 for (i = 0; i < worker_threads; i++) { 1968 struct ctl_thread *thr = &softc->threads[i]; 1969 while (thr->thread != NULL) { 1970 wakeup(thr); 1971 if (thr->thread != NULL) 1972 pause("CTL thr shutdown", 1); 1973 } 1974 mtx_destroy(&thr->queue_lock); 1975 } 1976 while (softc->lun_thread != NULL) { 1977 wakeup(&softc->pending_lun_queue); 1978 if (softc->lun_thread != NULL) 1979 pause("CTL thr shutdown", 1); 1980 } 1981 while (softc->thresh_thread != NULL) { 1982 wakeup(softc->thresh_thread); 1983 if (softc->thresh_thread != NULL) 1984 pause("CTL thr shutdown", 1); 1985 } 1986 1987 ctl_tpc_shutdown(softc); 1988 uma_zdestroy(softc->io_zone); 1989 mtx_destroy(&softc->ctl_lock); 1990 1991 sysctl_ctx_free(&softc->sysctl_ctx); 1992 1993 free(softc, M_DEVBUF); 1994 control_softc = NULL; 1995 return (0); 1996 } 1997 1998 static int 1999 ctl_module_event_handler(module_t mod, int what, void *arg) 2000 { 2001 2002 switch (what) { 2003 case MOD_LOAD: 2004 return (ctl_init()); 2005 case MOD_UNLOAD: 2006 return (ctl_shutdown()); 2007 default: 2008 return (EOPNOTSUPP); 2009 } 2010 } 2011 2012 /* 2013 * XXX KDM should we do some access checks here? Bump a reference count to 2014 * prevent a CTL module from being unloaded while someone has it open? 2015 */ 2016 static int 2017 ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td) 2018 { 2019 return (0); 2020 } 2021 2022 static int 2023 ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td) 2024 { 2025 return (0); 2026 } 2027 2028 /* 2029 * Remove an initiator by port number and initiator ID. 2030 * Returns 0 for success, -1 for failure. 2031 */ 2032 int 2033 ctl_remove_initiator(struct ctl_port *port, int iid) 2034 { 2035 struct ctl_softc *softc = port->ctl_softc; 2036 int last; 2037 2038 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 2039 2040 if (iid > CTL_MAX_INIT_PER_PORT) { 2041 printf("%s: initiator ID %u > maximun %u!\n", 2042 __func__, iid, CTL_MAX_INIT_PER_PORT); 2043 return (-1); 2044 } 2045 2046 mtx_lock(&softc->ctl_lock); 2047 last = (--port->wwpn_iid[iid].in_use == 0); 2048 port->wwpn_iid[iid].last_use = time_uptime; 2049 mtx_unlock(&softc->ctl_lock); 2050 if (last) 2051 ctl_i_t_nexus_loss(softc, iid, CTL_UA_POWERON); 2052 ctl_isc_announce_iid(port, iid); 2053 2054 return (0); 2055 } 2056 2057 /* 2058 * Add an initiator to the initiator map. 2059 * Returns iid for success, < 0 for failure. 2060 */ 2061 int 2062 ctl_add_initiator(struct ctl_port *port, int iid, uint64_t wwpn, char *name) 2063 { 2064 struct ctl_softc *softc = port->ctl_softc; 2065 time_t best_time; 2066 int i, best; 2067 2068 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 2069 2070 if (iid >= CTL_MAX_INIT_PER_PORT) { 2071 printf("%s: WWPN %#jx initiator ID %u > maximum %u!\n", 2072 __func__, wwpn, iid, CTL_MAX_INIT_PER_PORT); 2073 free(name, M_CTL); 2074 return (-1); 2075 } 2076 2077 mtx_lock(&softc->ctl_lock); 2078 2079 if (iid < 0 && (wwpn != 0 || name != NULL)) { 2080 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 2081 if (wwpn != 0 && wwpn == port->wwpn_iid[i].wwpn) { 2082 iid = i; 2083 break; 2084 } 2085 if (name != NULL && port->wwpn_iid[i].name != NULL && 2086 strcmp(name, port->wwpn_iid[i].name) == 0) { 2087 iid = i; 2088 break; 2089 } 2090 } 2091 } 2092 2093 if (iid < 0) { 2094 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 2095 if (port->wwpn_iid[i].in_use == 0 && 2096 port->wwpn_iid[i].wwpn == 0 && 2097 port->wwpn_iid[i].name == NULL) { 2098 iid = i; 2099 break; 2100 } 2101 } 2102 } 2103 2104 if (iid < 0) { 2105 best = -1; 2106 best_time = INT32_MAX; 2107 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 2108 if (port->wwpn_iid[i].in_use == 0) { 2109 if (port->wwpn_iid[i].last_use < best_time) { 2110 best = i; 2111 best_time = port->wwpn_iid[i].last_use; 2112 } 2113 } 2114 } 2115 iid = best; 2116 } 2117 2118 if (iid < 0) { 2119 mtx_unlock(&softc->ctl_lock); 2120 free(name, M_CTL); 2121 return (-2); 2122 } 2123 2124 if (port->wwpn_iid[iid].in_use > 0 && (wwpn != 0 || name != NULL)) { 2125 /* 2126 * This is not an error yet. 2127 */ 2128 if (wwpn != 0 && wwpn == port->wwpn_iid[iid].wwpn) { 2129 #if 0 2130 printf("%s: port %d iid %u WWPN %#jx arrived" 2131 " again\n", __func__, port->targ_port, 2132 iid, (uintmax_t)wwpn); 2133 #endif 2134 goto take; 2135 } 2136 if (name != NULL && port->wwpn_iid[iid].name != NULL && 2137 strcmp(name, port->wwpn_iid[iid].name) == 0) { 2138 #if 0 2139 printf("%s: port %d iid %u name '%s' arrived" 2140 " again\n", __func__, port->targ_port, 2141 iid, name); 2142 #endif 2143 goto take; 2144 } 2145 2146 /* 2147 * This is an error, but what do we do about it? The 2148 * driver is telling us we have a new WWPN for this 2149 * initiator ID, so we pretty much need to use it. 2150 */ 2151 printf("%s: port %d iid %u WWPN %#jx '%s' arrived," 2152 " but WWPN %#jx '%s' is still at that address\n", 2153 __func__, port->targ_port, iid, wwpn, name, 2154 (uintmax_t)port->wwpn_iid[iid].wwpn, 2155 port->wwpn_iid[iid].name); 2156 } 2157 take: 2158 free(port->wwpn_iid[iid].name, M_CTL); 2159 port->wwpn_iid[iid].name = name; 2160 port->wwpn_iid[iid].wwpn = wwpn; 2161 port->wwpn_iid[iid].in_use++; 2162 mtx_unlock(&softc->ctl_lock); 2163 ctl_isc_announce_iid(port, iid); 2164 2165 return (iid); 2166 } 2167 2168 static int 2169 ctl_create_iid(struct ctl_port *port, int iid, uint8_t *buf) 2170 { 2171 int len; 2172 2173 switch (port->port_type) { 2174 case CTL_PORT_FC: 2175 { 2176 struct scsi_transportid_fcp *id = 2177 (struct scsi_transportid_fcp *)buf; 2178 if (port->wwpn_iid[iid].wwpn == 0) 2179 return (0); 2180 memset(id, 0, sizeof(*id)); 2181 id->format_protocol = SCSI_PROTO_FC; 2182 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->n_port_name); 2183 return (sizeof(*id)); 2184 } 2185 case CTL_PORT_ISCSI: 2186 { 2187 struct scsi_transportid_iscsi_port *id = 2188 (struct scsi_transportid_iscsi_port *)buf; 2189 if (port->wwpn_iid[iid].name == NULL) 2190 return (0); 2191 memset(id, 0, 256); 2192 id->format_protocol = SCSI_TRN_ISCSI_FORMAT_PORT | 2193 SCSI_PROTO_ISCSI; 2194 len = strlcpy(id->iscsi_name, port->wwpn_iid[iid].name, 252) + 1; 2195 len = roundup2(min(len, 252), 4); 2196 scsi_ulto2b(len, id->additional_length); 2197 return (sizeof(*id) + len); 2198 } 2199 case CTL_PORT_SAS: 2200 { 2201 struct scsi_transportid_sas *id = 2202 (struct scsi_transportid_sas *)buf; 2203 if (port->wwpn_iid[iid].wwpn == 0) 2204 return (0); 2205 memset(id, 0, sizeof(*id)); 2206 id->format_protocol = SCSI_PROTO_SAS; 2207 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->sas_address); 2208 return (sizeof(*id)); 2209 } 2210 default: 2211 { 2212 struct scsi_transportid_spi *id = 2213 (struct scsi_transportid_spi *)buf; 2214 memset(id, 0, sizeof(*id)); 2215 id->format_protocol = SCSI_PROTO_SPI; 2216 scsi_ulto2b(iid, id->scsi_addr); 2217 scsi_ulto2b(port->targ_port, id->rel_trgt_port_id); 2218 return (sizeof(*id)); 2219 } 2220 } 2221 } 2222 2223 /* 2224 * Serialize a command that went down the "wrong" side, and so was sent to 2225 * this controller for execution. The logic is a little different than the 2226 * standard case in ctl_scsiio_precheck(). Errors in this case need to get 2227 * sent back to the other side, but in the success case, we execute the 2228 * command on this side (XFER mode) or tell the other side to execute it 2229 * (SER_ONLY mode). 2230 */ 2231 static void 2232 ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio) 2233 { 2234 struct ctl_softc *softc = CTL_SOFTC(ctsio); 2235 struct ctl_port *port = CTL_PORT(ctsio); 2236 union ctl_ha_msg msg_info; 2237 struct ctl_lun *lun; 2238 const struct ctl_cmd_entry *entry; 2239 uint32_t targ_lun; 2240 2241 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 2242 2243 /* Make sure that we know about this port. */ 2244 if (port == NULL || (port->status & CTL_PORT_STATUS_ONLINE) == 0) { 2245 ctl_set_internal_failure(ctsio, /*sks_valid*/ 0, 2246 /*retry_count*/ 1); 2247 goto badjuju; 2248 } 2249 2250 /* Make sure that we know about this LUN. */ 2251 mtx_lock(&softc->ctl_lock); 2252 if (targ_lun >= CTL_MAX_LUNS || 2253 (lun = softc->ctl_luns[targ_lun]) == NULL) { 2254 mtx_unlock(&softc->ctl_lock); 2255 2256 /* 2257 * The other node would not send this request to us unless 2258 * received announce that we are primary node for this LUN. 2259 * If this LUN does not exist now, it is probably result of 2260 * a race, so respond to initiator in the most opaque way. 2261 */ 2262 ctl_set_busy(ctsio); 2263 goto badjuju; 2264 } 2265 mtx_lock(&lun->lun_lock); 2266 mtx_unlock(&softc->ctl_lock); 2267 2268 /* 2269 * If the LUN is invalid, pretend that it doesn't exist. 2270 * It will go away as soon as all pending I/Os completed. 2271 */ 2272 if (lun->flags & CTL_LUN_DISABLED) { 2273 mtx_unlock(&lun->lun_lock); 2274 ctl_set_busy(ctsio); 2275 goto badjuju; 2276 } 2277 2278 entry = ctl_get_cmd_entry(ctsio, NULL); 2279 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { 2280 mtx_unlock(&lun->lun_lock); 2281 goto badjuju; 2282 } 2283 2284 CTL_LUN(ctsio) = lun; 2285 CTL_BACKEND_LUN(ctsio) = lun->be_lun; 2286 2287 /* 2288 * Every I/O goes into the OOA queue for a 2289 * particular LUN, and stays there until completion. 2290 */ 2291 #ifdef CTL_TIME_IO 2292 if (TAILQ_EMPTY(&lun->ooa_queue)) 2293 lun->idle_time += getsbinuptime() - lun->last_busy; 2294 #endif 2295 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2296 2297 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 2298 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq, 2299 ooa_links))) { 2300 case CTL_ACTION_BLOCK: 2301 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 2302 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 2303 blocked_links); 2304 mtx_unlock(&lun->lun_lock); 2305 break; 2306 case CTL_ACTION_PASS: 2307 case CTL_ACTION_SKIP: 2308 if (softc->ha_mode == CTL_HA_MODE_XFER) { 2309 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 2310 ctl_enqueue_rtr((union ctl_io *)ctsio); 2311 mtx_unlock(&lun->lun_lock); 2312 } else { 2313 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 2314 mtx_unlock(&lun->lun_lock); 2315 2316 /* send msg back to other side */ 2317 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 2318 msg_info.hdr.serializing_sc = (union ctl_io *)ctsio; 2319 msg_info.hdr.msg_type = CTL_MSG_R2R; 2320 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 2321 sizeof(msg_info.hdr), M_WAITOK); 2322 } 2323 break; 2324 case CTL_ACTION_OVERLAP: 2325 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2326 mtx_unlock(&lun->lun_lock); 2327 ctl_set_overlapped_cmd(ctsio); 2328 goto badjuju; 2329 case CTL_ACTION_OVERLAP_TAG: 2330 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2331 mtx_unlock(&lun->lun_lock); 2332 ctl_set_overlapped_tag(ctsio, ctsio->tag_num); 2333 goto badjuju; 2334 case CTL_ACTION_ERROR: 2335 default: 2336 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2337 mtx_unlock(&lun->lun_lock); 2338 2339 ctl_set_internal_failure(ctsio, /*sks_valid*/ 0, 2340 /*retry_count*/ 0); 2341 badjuju: 2342 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); 2343 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 2344 msg_info.hdr.serializing_sc = NULL; 2345 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 2346 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 2347 sizeof(msg_info.scsi), M_WAITOK); 2348 ctl_free_io((union ctl_io *)ctsio); 2349 break; 2350 } 2351 } 2352 2353 /* 2354 * Returns 0 for success, errno for failure. 2355 */ 2356 static void 2357 ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 2358 struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries) 2359 { 2360 union ctl_io *io; 2361 2362 mtx_lock(&lun->lun_lock); 2363 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); (io != NULL); 2364 (*cur_fill_num)++, io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, 2365 ooa_links)) { 2366 struct ctl_ooa_entry *entry; 2367 2368 /* 2369 * If we've got more than we can fit, just count the 2370 * remaining entries. 2371 */ 2372 if (*cur_fill_num >= ooa_hdr->alloc_num) 2373 continue; 2374 2375 entry = &kern_entries[*cur_fill_num]; 2376 2377 entry->tag_num = io->scsiio.tag_num; 2378 entry->lun_num = lun->lun; 2379 #ifdef CTL_TIME_IO 2380 entry->start_bt = io->io_hdr.start_bt; 2381 #endif 2382 bcopy(io->scsiio.cdb, entry->cdb, io->scsiio.cdb_len); 2383 entry->cdb_len = io->scsiio.cdb_len; 2384 if (io->io_hdr.flags & CTL_FLAG_BLOCKED) 2385 entry->cmd_flags |= CTL_OOACMD_FLAG_BLOCKED; 2386 2387 if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) 2388 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA; 2389 2390 if (io->io_hdr.flags & CTL_FLAG_ABORT) 2391 entry->cmd_flags |= CTL_OOACMD_FLAG_ABORT; 2392 2393 if (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR) 2394 entry->cmd_flags |= CTL_OOACMD_FLAG_RTR; 2395 2396 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) 2397 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED; 2398 } 2399 mtx_unlock(&lun->lun_lock); 2400 } 2401 2402 static void * 2403 ctl_copyin_alloc(void *user_addr, unsigned int len, char *error_str, 2404 size_t error_str_len) 2405 { 2406 void *kptr; 2407 2408 kptr = malloc(len, M_CTL, M_WAITOK | M_ZERO); 2409 2410 if (copyin(user_addr, kptr, len) != 0) { 2411 snprintf(error_str, error_str_len, "Error copying %d bytes " 2412 "from user address %p to kernel address %p", len, 2413 user_addr, kptr); 2414 free(kptr, M_CTL); 2415 return (NULL); 2416 } 2417 2418 return (kptr); 2419 } 2420 2421 static void 2422 ctl_free_args(int num_args, struct ctl_be_arg *args) 2423 { 2424 int i; 2425 2426 if (args == NULL) 2427 return; 2428 2429 for (i = 0; i < num_args; i++) { 2430 free(args[i].kname, M_CTL); 2431 free(args[i].kvalue, M_CTL); 2432 } 2433 2434 free(args, M_CTL); 2435 } 2436 2437 static struct ctl_be_arg * 2438 ctl_copyin_args(int num_args, struct ctl_be_arg *uargs, 2439 char *error_str, size_t error_str_len) 2440 { 2441 struct ctl_be_arg *args; 2442 int i; 2443 2444 args = ctl_copyin_alloc(uargs, num_args * sizeof(*args), 2445 error_str, error_str_len); 2446 2447 if (args == NULL) 2448 goto bailout; 2449 2450 for (i = 0; i < num_args; i++) { 2451 args[i].kname = NULL; 2452 args[i].kvalue = NULL; 2453 } 2454 2455 for (i = 0; i < num_args; i++) { 2456 uint8_t *tmpptr; 2457 2458 if (args[i].namelen == 0) { 2459 snprintf(error_str, error_str_len, "Argument %d " 2460 "name length is zero", i); 2461 goto bailout; 2462 } 2463 2464 args[i].kname = ctl_copyin_alloc(args[i].name, 2465 args[i].namelen, error_str, error_str_len); 2466 if (args[i].kname == NULL) 2467 goto bailout; 2468 2469 if (args[i].kname[args[i].namelen - 1] != '\0') { 2470 snprintf(error_str, error_str_len, "Argument %d " 2471 "name is not NUL-terminated", i); 2472 goto bailout; 2473 } 2474 2475 if (args[i].flags & CTL_BEARG_RD) { 2476 if (args[i].vallen == 0) { 2477 snprintf(error_str, error_str_len, "Argument %d " 2478 "value length is zero", i); 2479 goto bailout; 2480 } 2481 2482 tmpptr = ctl_copyin_alloc(args[i].value, 2483 args[i].vallen, error_str, error_str_len); 2484 if (tmpptr == NULL) 2485 goto bailout; 2486 2487 if ((args[i].flags & CTL_BEARG_ASCII) 2488 && (tmpptr[args[i].vallen - 1] != '\0')) { 2489 snprintf(error_str, error_str_len, "Argument " 2490 "%d value is not NUL-terminated", i); 2491 free(tmpptr, M_CTL); 2492 goto bailout; 2493 } 2494 args[i].kvalue = tmpptr; 2495 } else { 2496 args[i].kvalue = malloc(args[i].vallen, 2497 M_CTL, M_WAITOK | M_ZERO); 2498 } 2499 } 2500 2501 return (args); 2502 bailout: 2503 2504 ctl_free_args(num_args, args); 2505 2506 return (NULL); 2507 } 2508 2509 static void 2510 ctl_copyout_args(int num_args, struct ctl_be_arg *args) 2511 { 2512 int i; 2513 2514 for (i = 0; i < num_args; i++) { 2515 if (args[i].flags & CTL_BEARG_WR) 2516 copyout(args[i].kvalue, args[i].value, args[i].vallen); 2517 } 2518 } 2519 2520 /* 2521 * Escape characters that are illegal or not recommended in XML. 2522 */ 2523 int 2524 ctl_sbuf_printf_esc(struct sbuf *sb, char *str, int size) 2525 { 2526 char *end = str + size; 2527 int retval; 2528 2529 retval = 0; 2530 2531 for (; *str && str < end; str++) { 2532 switch (*str) { 2533 case '&': 2534 retval = sbuf_printf(sb, "&"); 2535 break; 2536 case '>': 2537 retval = sbuf_printf(sb, ">"); 2538 break; 2539 case '<': 2540 retval = sbuf_printf(sb, "<"); 2541 break; 2542 default: 2543 retval = sbuf_putc(sb, *str); 2544 break; 2545 } 2546 2547 if (retval != 0) 2548 break; 2549 2550 } 2551 2552 return (retval); 2553 } 2554 2555 static void 2556 ctl_id_sbuf(struct ctl_devid *id, struct sbuf *sb) 2557 { 2558 struct scsi_vpd_id_descriptor *desc; 2559 int i; 2560 2561 if (id == NULL || id->len < 4) 2562 return; 2563 desc = (struct scsi_vpd_id_descriptor *)id->data; 2564 switch (desc->id_type & SVPD_ID_TYPE_MASK) { 2565 case SVPD_ID_TYPE_T10: 2566 sbuf_printf(sb, "t10."); 2567 break; 2568 case SVPD_ID_TYPE_EUI64: 2569 sbuf_printf(sb, "eui."); 2570 break; 2571 case SVPD_ID_TYPE_NAA: 2572 sbuf_printf(sb, "naa."); 2573 break; 2574 case SVPD_ID_TYPE_SCSI_NAME: 2575 break; 2576 } 2577 switch (desc->proto_codeset & SVPD_ID_CODESET_MASK) { 2578 case SVPD_ID_CODESET_BINARY: 2579 for (i = 0; i < desc->length; i++) 2580 sbuf_printf(sb, "%02x", desc->identifier[i]); 2581 break; 2582 case SVPD_ID_CODESET_ASCII: 2583 sbuf_printf(sb, "%.*s", (int)desc->length, 2584 (char *)desc->identifier); 2585 break; 2586 case SVPD_ID_CODESET_UTF8: 2587 sbuf_printf(sb, "%s", (char *)desc->identifier); 2588 break; 2589 } 2590 } 2591 2592 static int 2593 ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 2594 struct thread *td) 2595 { 2596 struct ctl_softc *softc = dev->si_drv1; 2597 struct ctl_port *port; 2598 struct ctl_lun *lun; 2599 int retval; 2600 2601 retval = 0; 2602 2603 switch (cmd) { 2604 case CTL_IO: 2605 retval = ctl_ioctl_io(dev, cmd, addr, flag, td); 2606 break; 2607 case CTL_ENABLE_PORT: 2608 case CTL_DISABLE_PORT: 2609 case CTL_SET_PORT_WWNS: { 2610 struct ctl_port *port; 2611 struct ctl_port_entry *entry; 2612 2613 entry = (struct ctl_port_entry *)addr; 2614 2615 mtx_lock(&softc->ctl_lock); 2616 STAILQ_FOREACH(port, &softc->port_list, links) { 2617 int action, done; 2618 2619 if (port->targ_port < softc->port_min || 2620 port->targ_port >= softc->port_max) 2621 continue; 2622 2623 action = 0; 2624 done = 0; 2625 if ((entry->port_type == CTL_PORT_NONE) 2626 && (entry->targ_port == port->targ_port)) { 2627 /* 2628 * If the user only wants to enable or 2629 * disable or set WWNs on a specific port, 2630 * do the operation and we're done. 2631 */ 2632 action = 1; 2633 done = 1; 2634 } else if (entry->port_type & port->port_type) { 2635 /* 2636 * Compare the user's type mask with the 2637 * particular frontend type to see if we 2638 * have a match. 2639 */ 2640 action = 1; 2641 done = 0; 2642 2643 /* 2644 * Make sure the user isn't trying to set 2645 * WWNs on multiple ports at the same time. 2646 */ 2647 if (cmd == CTL_SET_PORT_WWNS) { 2648 printf("%s: Can't set WWNs on " 2649 "multiple ports\n", __func__); 2650 retval = EINVAL; 2651 break; 2652 } 2653 } 2654 if (action == 0) 2655 continue; 2656 2657 /* 2658 * XXX KDM we have to drop the lock here, because 2659 * the online/offline operations can potentially 2660 * block. We need to reference count the frontends 2661 * so they can't go away, 2662 */ 2663 if (cmd == CTL_ENABLE_PORT) { 2664 mtx_unlock(&softc->ctl_lock); 2665 ctl_port_online(port); 2666 mtx_lock(&softc->ctl_lock); 2667 } else if (cmd == CTL_DISABLE_PORT) { 2668 mtx_unlock(&softc->ctl_lock); 2669 ctl_port_offline(port); 2670 mtx_lock(&softc->ctl_lock); 2671 } else if (cmd == CTL_SET_PORT_WWNS) { 2672 ctl_port_set_wwns(port, 2673 (entry->flags & CTL_PORT_WWNN_VALID) ? 2674 1 : 0, entry->wwnn, 2675 (entry->flags & CTL_PORT_WWPN_VALID) ? 2676 1 : 0, entry->wwpn); 2677 } 2678 if (done != 0) 2679 break; 2680 } 2681 mtx_unlock(&softc->ctl_lock); 2682 break; 2683 } 2684 case CTL_GET_OOA: { 2685 struct ctl_ooa *ooa_hdr; 2686 struct ctl_ooa_entry *entries; 2687 uint32_t cur_fill_num; 2688 2689 ooa_hdr = (struct ctl_ooa *)addr; 2690 2691 if ((ooa_hdr->alloc_len == 0) 2692 || (ooa_hdr->alloc_num == 0)) { 2693 printf("%s: CTL_GET_OOA: alloc len %u and alloc num %u " 2694 "must be non-zero\n", __func__, 2695 ooa_hdr->alloc_len, ooa_hdr->alloc_num); 2696 retval = EINVAL; 2697 break; 2698 } 2699 2700 if (ooa_hdr->alloc_len != (ooa_hdr->alloc_num * 2701 sizeof(struct ctl_ooa_entry))) { 2702 printf("%s: CTL_GET_OOA: alloc len %u must be alloc " 2703 "num %d * sizeof(struct ctl_ooa_entry) %zd\n", 2704 __func__, ooa_hdr->alloc_len, 2705 ooa_hdr->alloc_num,sizeof(struct ctl_ooa_entry)); 2706 retval = EINVAL; 2707 break; 2708 } 2709 2710 entries = malloc(ooa_hdr->alloc_len, M_CTL, M_WAITOK | M_ZERO); 2711 if (entries == NULL) { 2712 printf("%s: could not allocate %d bytes for OOA " 2713 "dump\n", __func__, ooa_hdr->alloc_len); 2714 retval = ENOMEM; 2715 break; 2716 } 2717 2718 mtx_lock(&softc->ctl_lock); 2719 if ((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0 && 2720 (ooa_hdr->lun_num >= CTL_MAX_LUNS || 2721 softc->ctl_luns[ooa_hdr->lun_num] == NULL)) { 2722 mtx_unlock(&softc->ctl_lock); 2723 free(entries, M_CTL); 2724 printf("%s: CTL_GET_OOA: invalid LUN %ju\n", 2725 __func__, (uintmax_t)ooa_hdr->lun_num); 2726 retval = EINVAL; 2727 break; 2728 } 2729 2730 cur_fill_num = 0; 2731 2732 if (ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) { 2733 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2734 ctl_ioctl_fill_ooa(lun, &cur_fill_num, 2735 ooa_hdr, entries); 2736 } 2737 } else { 2738 lun = softc->ctl_luns[ooa_hdr->lun_num]; 2739 ctl_ioctl_fill_ooa(lun, &cur_fill_num, ooa_hdr, 2740 entries); 2741 } 2742 mtx_unlock(&softc->ctl_lock); 2743 2744 ooa_hdr->fill_num = min(cur_fill_num, ooa_hdr->alloc_num); 2745 ooa_hdr->fill_len = ooa_hdr->fill_num * 2746 sizeof(struct ctl_ooa_entry); 2747 retval = copyout(entries, ooa_hdr->entries, ooa_hdr->fill_len); 2748 if (retval != 0) { 2749 printf("%s: error copying out %d bytes for OOA dump\n", 2750 __func__, ooa_hdr->fill_len); 2751 } 2752 2753 getbinuptime(&ooa_hdr->cur_bt); 2754 2755 if (cur_fill_num > ooa_hdr->alloc_num) { 2756 ooa_hdr->dropped_num = cur_fill_num -ooa_hdr->alloc_num; 2757 ooa_hdr->status = CTL_OOA_NEED_MORE_SPACE; 2758 } else { 2759 ooa_hdr->dropped_num = 0; 2760 ooa_hdr->status = CTL_OOA_OK; 2761 } 2762 2763 free(entries, M_CTL); 2764 break; 2765 } 2766 case CTL_DELAY_IO: { 2767 struct ctl_io_delay_info *delay_info; 2768 2769 delay_info = (struct ctl_io_delay_info *)addr; 2770 2771 #ifdef CTL_IO_DELAY 2772 mtx_lock(&softc->ctl_lock); 2773 if (delay_info->lun_id >= CTL_MAX_LUNS || 2774 (lun = softc->ctl_luns[delay_info->lun_id]) == NULL) { 2775 mtx_unlock(&softc->ctl_lock); 2776 delay_info->status = CTL_DELAY_STATUS_INVALID_LUN; 2777 break; 2778 } 2779 mtx_lock(&lun->lun_lock); 2780 mtx_unlock(&softc->ctl_lock); 2781 delay_info->status = CTL_DELAY_STATUS_OK; 2782 switch (delay_info->delay_type) { 2783 case CTL_DELAY_TYPE_CONT: 2784 case CTL_DELAY_TYPE_ONESHOT: 2785 break; 2786 default: 2787 delay_info->status = CTL_DELAY_STATUS_INVALID_TYPE; 2788 break; 2789 } 2790 switch (delay_info->delay_loc) { 2791 case CTL_DELAY_LOC_DATAMOVE: 2792 lun->delay_info.datamove_type = delay_info->delay_type; 2793 lun->delay_info.datamove_delay = delay_info->delay_secs; 2794 break; 2795 case CTL_DELAY_LOC_DONE: 2796 lun->delay_info.done_type = delay_info->delay_type; 2797 lun->delay_info.done_delay = delay_info->delay_secs; 2798 break; 2799 default: 2800 delay_info->status = CTL_DELAY_STATUS_INVALID_LOC; 2801 break; 2802 } 2803 mtx_unlock(&lun->lun_lock); 2804 #else 2805 delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED; 2806 #endif /* CTL_IO_DELAY */ 2807 break; 2808 } 2809 #ifdef CTL_LEGACY_STATS 2810 case CTL_GETSTATS: { 2811 struct ctl_stats *stats = (struct ctl_stats *)addr; 2812 int i; 2813 2814 /* 2815 * XXX KDM no locking here. If the LUN list changes, 2816 * things can blow up. 2817 */ 2818 i = 0; 2819 stats->status = CTL_SS_OK; 2820 stats->fill_len = 0; 2821 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2822 if (stats->fill_len + sizeof(lun->legacy_stats) > 2823 stats->alloc_len) { 2824 stats->status = CTL_SS_NEED_MORE_SPACE; 2825 break; 2826 } 2827 retval = copyout(&lun->legacy_stats, &stats->lun_stats[i++], 2828 sizeof(lun->legacy_stats)); 2829 if (retval != 0) 2830 break; 2831 stats->fill_len += sizeof(lun->legacy_stats); 2832 } 2833 stats->num_luns = softc->num_luns; 2834 stats->flags = CTL_STATS_FLAG_NONE; 2835 #ifdef CTL_TIME_IO 2836 stats->flags |= CTL_STATS_FLAG_TIME_VALID; 2837 #endif 2838 getnanouptime(&stats->timestamp); 2839 break; 2840 } 2841 #endif /* CTL_LEGACY_STATS */ 2842 case CTL_ERROR_INJECT: { 2843 struct ctl_error_desc *err_desc, *new_err_desc; 2844 2845 err_desc = (struct ctl_error_desc *)addr; 2846 2847 new_err_desc = malloc(sizeof(*new_err_desc), M_CTL, 2848 M_WAITOK | M_ZERO); 2849 bcopy(err_desc, new_err_desc, sizeof(*new_err_desc)); 2850 2851 mtx_lock(&softc->ctl_lock); 2852 if (err_desc->lun_id >= CTL_MAX_LUNS || 2853 (lun = softc->ctl_luns[err_desc->lun_id]) == NULL) { 2854 mtx_unlock(&softc->ctl_lock); 2855 free(new_err_desc, M_CTL); 2856 printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n", 2857 __func__, (uintmax_t)err_desc->lun_id); 2858 retval = EINVAL; 2859 break; 2860 } 2861 mtx_lock(&lun->lun_lock); 2862 mtx_unlock(&softc->ctl_lock); 2863 2864 /* 2865 * We could do some checking here to verify the validity 2866 * of the request, but given the complexity of error 2867 * injection requests, the checking logic would be fairly 2868 * complex. 2869 * 2870 * For now, if the request is invalid, it just won't get 2871 * executed and might get deleted. 2872 */ 2873 STAILQ_INSERT_TAIL(&lun->error_list, new_err_desc, links); 2874 2875 /* 2876 * XXX KDM check to make sure the serial number is unique, 2877 * in case we somehow manage to wrap. That shouldn't 2878 * happen for a very long time, but it's the right thing to 2879 * do. 2880 */ 2881 new_err_desc->serial = lun->error_serial; 2882 err_desc->serial = lun->error_serial; 2883 lun->error_serial++; 2884 2885 mtx_unlock(&lun->lun_lock); 2886 break; 2887 } 2888 case CTL_ERROR_INJECT_DELETE: { 2889 struct ctl_error_desc *delete_desc, *desc, *desc2; 2890 int delete_done; 2891 2892 delete_desc = (struct ctl_error_desc *)addr; 2893 delete_done = 0; 2894 2895 mtx_lock(&softc->ctl_lock); 2896 if (delete_desc->lun_id >= CTL_MAX_LUNS || 2897 (lun = softc->ctl_luns[delete_desc->lun_id]) == NULL) { 2898 mtx_unlock(&softc->ctl_lock); 2899 printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n", 2900 __func__, (uintmax_t)delete_desc->lun_id); 2901 retval = EINVAL; 2902 break; 2903 } 2904 mtx_lock(&lun->lun_lock); 2905 mtx_unlock(&softc->ctl_lock); 2906 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 2907 if (desc->serial != delete_desc->serial) 2908 continue; 2909 2910 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, 2911 links); 2912 free(desc, M_CTL); 2913 delete_done = 1; 2914 } 2915 mtx_unlock(&lun->lun_lock); 2916 if (delete_done == 0) { 2917 printf("%s: CTL_ERROR_INJECT_DELETE: can't find " 2918 "error serial %ju on LUN %u\n", __func__, 2919 delete_desc->serial, delete_desc->lun_id); 2920 retval = EINVAL; 2921 break; 2922 } 2923 break; 2924 } 2925 case CTL_DUMP_STRUCTS: { 2926 int j, k; 2927 struct ctl_port *port; 2928 struct ctl_frontend *fe; 2929 2930 mtx_lock(&softc->ctl_lock); 2931 printf("CTL Persistent Reservation information start:\n"); 2932 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2933 mtx_lock(&lun->lun_lock); 2934 if ((lun->flags & CTL_LUN_DISABLED) != 0) { 2935 mtx_unlock(&lun->lun_lock); 2936 continue; 2937 } 2938 2939 for (j = 0; j < CTL_MAX_PORTS; j++) { 2940 if (lun->pr_keys[j] == NULL) 2941 continue; 2942 for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){ 2943 if (lun->pr_keys[j][k] == 0) 2944 continue; 2945 printf(" LUN %ju port %d iid %d key " 2946 "%#jx\n", lun->lun, j, k, 2947 (uintmax_t)lun->pr_keys[j][k]); 2948 } 2949 } 2950 mtx_unlock(&lun->lun_lock); 2951 } 2952 printf("CTL Persistent Reservation information end\n"); 2953 printf("CTL Ports:\n"); 2954 STAILQ_FOREACH(port, &softc->port_list, links) { 2955 printf(" Port %d '%s' Frontend '%s' Type %u pp %d vp %d WWNN " 2956 "%#jx WWPN %#jx\n", port->targ_port, port->port_name, 2957 port->frontend->name, port->port_type, 2958 port->physical_port, port->virtual_port, 2959 (uintmax_t)port->wwnn, (uintmax_t)port->wwpn); 2960 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 2961 if (port->wwpn_iid[j].in_use == 0 && 2962 port->wwpn_iid[j].wwpn == 0 && 2963 port->wwpn_iid[j].name == NULL) 2964 continue; 2965 2966 printf(" iid %u use %d WWPN %#jx '%s'\n", 2967 j, port->wwpn_iid[j].in_use, 2968 (uintmax_t)port->wwpn_iid[j].wwpn, 2969 port->wwpn_iid[j].name); 2970 } 2971 } 2972 printf("CTL Port information end\n"); 2973 mtx_unlock(&softc->ctl_lock); 2974 /* 2975 * XXX KDM calling this without a lock. We'd likely want 2976 * to drop the lock before calling the frontend's dump 2977 * routine anyway. 2978 */ 2979 printf("CTL Frontends:\n"); 2980 STAILQ_FOREACH(fe, &softc->fe_list, links) { 2981 printf(" Frontend '%s'\n", fe->name); 2982 if (fe->fe_dump != NULL) 2983 fe->fe_dump(); 2984 } 2985 printf("CTL Frontend information end\n"); 2986 break; 2987 } 2988 case CTL_LUN_REQ: { 2989 struct ctl_lun_req *lun_req; 2990 struct ctl_backend_driver *backend; 2991 2992 lun_req = (struct ctl_lun_req *)addr; 2993 2994 backend = ctl_backend_find(lun_req->backend); 2995 if (backend == NULL) { 2996 lun_req->status = CTL_LUN_ERROR; 2997 snprintf(lun_req->error_str, 2998 sizeof(lun_req->error_str), 2999 "Backend \"%s\" not found.", 3000 lun_req->backend); 3001 break; 3002 } 3003 if (lun_req->num_be_args > 0) { 3004 lun_req->kern_be_args = ctl_copyin_args( 3005 lun_req->num_be_args, 3006 lun_req->be_args, 3007 lun_req->error_str, 3008 sizeof(lun_req->error_str)); 3009 if (lun_req->kern_be_args == NULL) { 3010 lun_req->status = CTL_LUN_ERROR; 3011 break; 3012 } 3013 } 3014 3015 retval = backend->ioctl(dev, cmd, addr, flag, td); 3016 3017 if (lun_req->num_be_args > 0) { 3018 ctl_copyout_args(lun_req->num_be_args, 3019 lun_req->kern_be_args); 3020 ctl_free_args(lun_req->num_be_args, 3021 lun_req->kern_be_args); 3022 } 3023 break; 3024 } 3025 case CTL_LUN_LIST: { 3026 struct sbuf *sb; 3027 struct ctl_lun_list *list; 3028 struct ctl_option *opt; 3029 3030 list = (struct ctl_lun_list *)addr; 3031 3032 /* 3033 * Allocate a fixed length sbuf here, based on the length 3034 * of the user's buffer. We could allocate an auto-extending 3035 * buffer, and then tell the user how much larger our 3036 * amount of data is than his buffer, but that presents 3037 * some problems: 3038 * 3039 * 1. The sbuf(9) routines use a blocking malloc, and so 3040 * we can't hold a lock while calling them with an 3041 * auto-extending buffer. 3042 * 3043 * 2. There is not currently a LUN reference counting 3044 * mechanism, outside of outstanding transactions on 3045 * the LUN's OOA queue. So a LUN could go away on us 3046 * while we're getting the LUN number, backend-specific 3047 * information, etc. Thus, given the way things 3048 * currently work, we need to hold the CTL lock while 3049 * grabbing LUN information. 3050 * 3051 * So, from the user's standpoint, the best thing to do is 3052 * allocate what he thinks is a reasonable buffer length, 3053 * and then if he gets a CTL_LUN_LIST_NEED_MORE_SPACE error, 3054 * double the buffer length and try again. (And repeat 3055 * that until he succeeds.) 3056 */ 3057 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 3058 if (sb == NULL) { 3059 list->status = CTL_LUN_LIST_ERROR; 3060 snprintf(list->error_str, sizeof(list->error_str), 3061 "Unable to allocate %d bytes for LUN list", 3062 list->alloc_len); 3063 break; 3064 } 3065 3066 sbuf_printf(sb, "<ctllunlist>\n"); 3067 3068 mtx_lock(&softc->ctl_lock); 3069 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3070 mtx_lock(&lun->lun_lock); 3071 retval = sbuf_printf(sb, "<lun id=\"%ju\">\n", 3072 (uintmax_t)lun->lun); 3073 3074 /* 3075 * Bail out as soon as we see that we've overfilled 3076 * the buffer. 3077 */ 3078 if (retval != 0) 3079 break; 3080 3081 retval = sbuf_printf(sb, "\t<backend_type>%s" 3082 "</backend_type>\n", 3083 (lun->backend == NULL) ? "none" : 3084 lun->backend->name); 3085 3086 if (retval != 0) 3087 break; 3088 3089 retval = sbuf_printf(sb, "\t<lun_type>%d</lun_type>\n", 3090 lun->be_lun->lun_type); 3091 3092 if (retval != 0) 3093 break; 3094 3095 if (lun->backend == NULL) { 3096 retval = sbuf_printf(sb, "</lun>\n"); 3097 if (retval != 0) 3098 break; 3099 continue; 3100 } 3101 3102 retval = sbuf_printf(sb, "\t<size>%ju</size>\n", 3103 (lun->be_lun->maxlba > 0) ? 3104 lun->be_lun->maxlba + 1 : 0); 3105 3106 if (retval != 0) 3107 break; 3108 3109 retval = sbuf_printf(sb, "\t<blocksize>%u</blocksize>\n", 3110 lun->be_lun->blocksize); 3111 3112 if (retval != 0) 3113 break; 3114 3115 retval = sbuf_printf(sb, "\t<serial_number>"); 3116 3117 if (retval != 0) 3118 break; 3119 3120 retval = ctl_sbuf_printf_esc(sb, 3121 lun->be_lun->serial_num, 3122 sizeof(lun->be_lun->serial_num)); 3123 3124 if (retval != 0) 3125 break; 3126 3127 retval = sbuf_printf(sb, "</serial_number>\n"); 3128 3129 if (retval != 0) 3130 break; 3131 3132 retval = sbuf_printf(sb, "\t<device_id>"); 3133 3134 if (retval != 0) 3135 break; 3136 3137 retval = ctl_sbuf_printf_esc(sb, 3138 lun->be_lun->device_id, 3139 sizeof(lun->be_lun->device_id)); 3140 3141 if (retval != 0) 3142 break; 3143 3144 retval = sbuf_printf(sb, "</device_id>\n"); 3145 3146 if (retval != 0) 3147 break; 3148 3149 if (lun->backend->lun_info != NULL) { 3150 retval = lun->backend->lun_info(lun->be_lun->be_lun, sb); 3151 if (retval != 0) 3152 break; 3153 } 3154 STAILQ_FOREACH(opt, &lun->be_lun->options, links) { 3155 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n", 3156 opt->name, opt->value, opt->name); 3157 if (retval != 0) 3158 break; 3159 } 3160 3161 retval = sbuf_printf(sb, "</lun>\n"); 3162 3163 if (retval != 0) 3164 break; 3165 mtx_unlock(&lun->lun_lock); 3166 } 3167 if (lun != NULL) 3168 mtx_unlock(&lun->lun_lock); 3169 mtx_unlock(&softc->ctl_lock); 3170 3171 if ((retval != 0) 3172 || ((retval = sbuf_printf(sb, "</ctllunlist>\n")) != 0)) { 3173 retval = 0; 3174 sbuf_delete(sb); 3175 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3176 snprintf(list->error_str, sizeof(list->error_str), 3177 "Out of space, %d bytes is too small", 3178 list->alloc_len); 3179 break; 3180 } 3181 3182 sbuf_finish(sb); 3183 3184 retval = copyout(sbuf_data(sb), list->lun_xml, 3185 sbuf_len(sb) + 1); 3186 3187 list->fill_len = sbuf_len(sb) + 1; 3188 list->status = CTL_LUN_LIST_OK; 3189 sbuf_delete(sb); 3190 break; 3191 } 3192 case CTL_ISCSI: { 3193 struct ctl_iscsi *ci; 3194 struct ctl_frontend *fe; 3195 3196 ci = (struct ctl_iscsi *)addr; 3197 3198 fe = ctl_frontend_find("iscsi"); 3199 if (fe == NULL) { 3200 ci->status = CTL_ISCSI_ERROR; 3201 snprintf(ci->error_str, sizeof(ci->error_str), 3202 "Frontend \"iscsi\" not found."); 3203 break; 3204 } 3205 3206 retval = fe->ioctl(dev, cmd, addr, flag, td); 3207 break; 3208 } 3209 case CTL_PORT_REQ: { 3210 struct ctl_req *req; 3211 struct ctl_frontend *fe; 3212 3213 req = (struct ctl_req *)addr; 3214 3215 fe = ctl_frontend_find(req->driver); 3216 if (fe == NULL) { 3217 req->status = CTL_LUN_ERROR; 3218 snprintf(req->error_str, sizeof(req->error_str), 3219 "Frontend \"%s\" not found.", req->driver); 3220 break; 3221 } 3222 if (req->num_args > 0) { 3223 req->kern_args = ctl_copyin_args(req->num_args, 3224 req->args, req->error_str, sizeof(req->error_str)); 3225 if (req->kern_args == NULL) { 3226 req->status = CTL_LUN_ERROR; 3227 break; 3228 } 3229 } 3230 3231 if (fe->ioctl) 3232 retval = fe->ioctl(dev, cmd, addr, flag, td); 3233 else 3234 retval = ENODEV; 3235 3236 if (req->num_args > 0) { 3237 ctl_copyout_args(req->num_args, req->kern_args); 3238 ctl_free_args(req->num_args, req->kern_args); 3239 } 3240 break; 3241 } 3242 case CTL_PORT_LIST: { 3243 struct sbuf *sb; 3244 struct ctl_port *port; 3245 struct ctl_lun_list *list; 3246 struct ctl_option *opt; 3247 int j; 3248 uint32_t plun; 3249 3250 list = (struct ctl_lun_list *)addr; 3251 3252 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 3253 if (sb == NULL) { 3254 list->status = CTL_LUN_LIST_ERROR; 3255 snprintf(list->error_str, sizeof(list->error_str), 3256 "Unable to allocate %d bytes for LUN list", 3257 list->alloc_len); 3258 break; 3259 } 3260 3261 sbuf_printf(sb, "<ctlportlist>\n"); 3262 3263 mtx_lock(&softc->ctl_lock); 3264 STAILQ_FOREACH(port, &softc->port_list, links) { 3265 retval = sbuf_printf(sb, "<targ_port id=\"%ju\">\n", 3266 (uintmax_t)port->targ_port); 3267 3268 /* 3269 * Bail out as soon as we see that we've overfilled 3270 * the buffer. 3271 */ 3272 if (retval != 0) 3273 break; 3274 3275 retval = sbuf_printf(sb, "\t<frontend_type>%s" 3276 "</frontend_type>\n", port->frontend->name); 3277 if (retval != 0) 3278 break; 3279 3280 retval = sbuf_printf(sb, "\t<port_type>%d</port_type>\n", 3281 port->port_type); 3282 if (retval != 0) 3283 break; 3284 3285 retval = sbuf_printf(sb, "\t<online>%s</online>\n", 3286 (port->status & CTL_PORT_STATUS_ONLINE) ? "YES" : "NO"); 3287 if (retval != 0) 3288 break; 3289 3290 retval = sbuf_printf(sb, "\t<port_name>%s</port_name>\n", 3291 port->port_name); 3292 if (retval != 0) 3293 break; 3294 3295 retval = sbuf_printf(sb, "\t<physical_port>%d</physical_port>\n", 3296 port->physical_port); 3297 if (retval != 0) 3298 break; 3299 3300 retval = sbuf_printf(sb, "\t<virtual_port>%d</virtual_port>\n", 3301 port->virtual_port); 3302 if (retval != 0) 3303 break; 3304 3305 if (port->target_devid != NULL) { 3306 sbuf_printf(sb, "\t<target>"); 3307 ctl_id_sbuf(port->target_devid, sb); 3308 sbuf_printf(sb, "</target>\n"); 3309 } 3310 3311 if (port->port_devid != NULL) { 3312 sbuf_printf(sb, "\t<port>"); 3313 ctl_id_sbuf(port->port_devid, sb); 3314 sbuf_printf(sb, "</port>\n"); 3315 } 3316 3317 if (port->port_info != NULL) { 3318 retval = port->port_info(port->onoff_arg, sb); 3319 if (retval != 0) 3320 break; 3321 } 3322 STAILQ_FOREACH(opt, &port->options, links) { 3323 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n", 3324 opt->name, opt->value, opt->name); 3325 if (retval != 0) 3326 break; 3327 } 3328 3329 if (port->lun_map != NULL) { 3330 sbuf_printf(sb, "\t<lun_map>on</lun_map>\n"); 3331 for (j = 0; j < port->lun_map_size; j++) { 3332 plun = ctl_lun_map_from_port(port, j); 3333 if (plun == UINT32_MAX) 3334 continue; 3335 sbuf_printf(sb, 3336 "\t<lun id=\"%u\">%u</lun>\n", 3337 j, plun); 3338 } 3339 } 3340 3341 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 3342 if (port->wwpn_iid[j].in_use == 0 || 3343 (port->wwpn_iid[j].wwpn == 0 && 3344 port->wwpn_iid[j].name == NULL)) 3345 continue; 3346 3347 if (port->wwpn_iid[j].name != NULL) 3348 retval = sbuf_printf(sb, 3349 "\t<initiator id=\"%u\">%s</initiator>\n", 3350 j, port->wwpn_iid[j].name); 3351 else 3352 retval = sbuf_printf(sb, 3353 "\t<initiator id=\"%u\">naa.%08jx</initiator>\n", 3354 j, port->wwpn_iid[j].wwpn); 3355 if (retval != 0) 3356 break; 3357 } 3358 if (retval != 0) 3359 break; 3360 3361 retval = sbuf_printf(sb, "</targ_port>\n"); 3362 if (retval != 0) 3363 break; 3364 } 3365 mtx_unlock(&softc->ctl_lock); 3366 3367 if ((retval != 0) 3368 || ((retval = sbuf_printf(sb, "</ctlportlist>\n")) != 0)) { 3369 retval = 0; 3370 sbuf_delete(sb); 3371 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3372 snprintf(list->error_str, sizeof(list->error_str), 3373 "Out of space, %d bytes is too small", 3374 list->alloc_len); 3375 break; 3376 } 3377 3378 sbuf_finish(sb); 3379 3380 retval = copyout(sbuf_data(sb), list->lun_xml, 3381 sbuf_len(sb) + 1); 3382 3383 list->fill_len = sbuf_len(sb) + 1; 3384 list->status = CTL_LUN_LIST_OK; 3385 sbuf_delete(sb); 3386 break; 3387 } 3388 case CTL_LUN_MAP: { 3389 struct ctl_lun_map *lm = (struct ctl_lun_map *)addr; 3390 struct ctl_port *port; 3391 3392 mtx_lock(&softc->ctl_lock); 3393 if (lm->port < softc->port_min || 3394 lm->port >= softc->port_max || 3395 (port = softc->ctl_ports[lm->port]) == NULL) { 3396 mtx_unlock(&softc->ctl_lock); 3397 return (ENXIO); 3398 } 3399 if (port->status & CTL_PORT_STATUS_ONLINE) { 3400 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3401 if (ctl_lun_map_to_port(port, lun->lun) == 3402 UINT32_MAX) 3403 continue; 3404 mtx_lock(&lun->lun_lock); 3405 ctl_est_ua_port(lun, lm->port, -1, 3406 CTL_UA_LUN_CHANGE); 3407 mtx_unlock(&lun->lun_lock); 3408 } 3409 } 3410 mtx_unlock(&softc->ctl_lock); // XXX: port_enable sleeps 3411 if (lm->plun != UINT32_MAX) { 3412 if (lm->lun == UINT32_MAX) 3413 retval = ctl_lun_map_unset(port, lm->plun); 3414 else if (lm->lun < CTL_MAX_LUNS && 3415 softc->ctl_luns[lm->lun] != NULL) 3416 retval = ctl_lun_map_set(port, lm->plun, lm->lun); 3417 else 3418 return (ENXIO); 3419 } else { 3420 if (lm->lun == UINT32_MAX) 3421 retval = ctl_lun_map_deinit(port); 3422 else 3423 retval = ctl_lun_map_init(port); 3424 } 3425 if (port->status & CTL_PORT_STATUS_ONLINE) 3426 ctl_isc_announce_port(port); 3427 break; 3428 } 3429 case CTL_GET_LUN_STATS: { 3430 struct ctl_get_io_stats *stats = (struct ctl_get_io_stats *)addr; 3431 int i; 3432 3433 /* 3434 * XXX KDM no locking here. If the LUN list changes, 3435 * things can blow up. 3436 */ 3437 i = 0; 3438 stats->status = CTL_SS_OK; 3439 stats->fill_len = 0; 3440 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3441 if (lun->lun < stats->first_item) 3442 continue; 3443 if (stats->fill_len + sizeof(lun->stats) > 3444 stats->alloc_len) { 3445 stats->status = CTL_SS_NEED_MORE_SPACE; 3446 break; 3447 } 3448 retval = copyout(&lun->stats, &stats->stats[i++], 3449 sizeof(lun->stats)); 3450 if (retval != 0) 3451 break; 3452 stats->fill_len += sizeof(lun->stats); 3453 } 3454 stats->num_items = softc->num_luns; 3455 stats->flags = CTL_STATS_FLAG_NONE; 3456 #ifdef CTL_TIME_IO 3457 stats->flags |= CTL_STATS_FLAG_TIME_VALID; 3458 #endif 3459 getnanouptime(&stats->timestamp); 3460 break; 3461 } 3462 case CTL_GET_PORT_STATS: { 3463 struct ctl_get_io_stats *stats = (struct ctl_get_io_stats *)addr; 3464 int i; 3465 3466 /* 3467 * XXX KDM no locking here. If the LUN list changes, 3468 * things can blow up. 3469 */ 3470 i = 0; 3471 stats->status = CTL_SS_OK; 3472 stats->fill_len = 0; 3473 STAILQ_FOREACH(port, &softc->port_list, links) { 3474 if (port->targ_port < stats->first_item) 3475 continue; 3476 if (stats->fill_len + sizeof(port->stats) > 3477 stats->alloc_len) { 3478 stats->status = CTL_SS_NEED_MORE_SPACE; 3479 break; 3480 } 3481 retval = copyout(&port->stats, &stats->stats[i++], 3482 sizeof(port->stats)); 3483 if (retval != 0) 3484 break; 3485 stats->fill_len += sizeof(port->stats); 3486 } 3487 stats->num_items = softc->num_ports; 3488 stats->flags = CTL_STATS_FLAG_NONE; 3489 #ifdef CTL_TIME_IO 3490 stats->flags |= CTL_STATS_FLAG_TIME_VALID; 3491 #endif 3492 getnanouptime(&stats->timestamp); 3493 break; 3494 } 3495 default: { 3496 /* XXX KDM should we fix this? */ 3497 #if 0 3498 struct ctl_backend_driver *backend; 3499 unsigned int type; 3500 int found; 3501 3502 found = 0; 3503 3504 /* 3505 * We encode the backend type as the ioctl type for backend 3506 * ioctls. So parse it out here, and then search for a 3507 * backend of this type. 3508 */ 3509 type = _IOC_TYPE(cmd); 3510 3511 STAILQ_FOREACH(backend, &softc->be_list, links) { 3512 if (backend->type == type) { 3513 found = 1; 3514 break; 3515 } 3516 } 3517 if (found == 0) { 3518 printf("ctl: unknown ioctl command %#lx or backend " 3519 "%d\n", cmd, type); 3520 retval = EINVAL; 3521 break; 3522 } 3523 retval = backend->ioctl(dev, cmd, addr, flag, td); 3524 #endif 3525 retval = ENOTTY; 3526 break; 3527 } 3528 } 3529 return (retval); 3530 } 3531 3532 uint32_t 3533 ctl_get_initindex(struct ctl_nexus *nexus) 3534 { 3535 return (nexus->initid + (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); 3536 } 3537 3538 int 3539 ctl_lun_map_init(struct ctl_port *port) 3540 { 3541 struct ctl_softc *softc = port->ctl_softc; 3542 struct ctl_lun *lun; 3543 int size = ctl_lun_map_size; 3544 uint32_t i; 3545 3546 if (port->lun_map == NULL || port->lun_map_size < size) { 3547 port->lun_map_size = 0; 3548 free(port->lun_map, M_CTL); 3549 port->lun_map = malloc(size * sizeof(uint32_t), 3550 M_CTL, M_NOWAIT); 3551 } 3552 if (port->lun_map == NULL) 3553 return (ENOMEM); 3554 for (i = 0; i < size; i++) 3555 port->lun_map[i] = UINT32_MAX; 3556 port->lun_map_size = size; 3557 if (port->status & CTL_PORT_STATUS_ONLINE) { 3558 if (port->lun_disable != NULL) { 3559 STAILQ_FOREACH(lun, &softc->lun_list, links) 3560 port->lun_disable(port->targ_lun_arg, lun->lun); 3561 } 3562 ctl_isc_announce_port(port); 3563 } 3564 return (0); 3565 } 3566 3567 int 3568 ctl_lun_map_deinit(struct ctl_port *port) 3569 { 3570 struct ctl_softc *softc = port->ctl_softc; 3571 struct ctl_lun *lun; 3572 3573 if (port->lun_map == NULL) 3574 return (0); 3575 port->lun_map_size = 0; 3576 free(port->lun_map, M_CTL); 3577 port->lun_map = NULL; 3578 if (port->status & CTL_PORT_STATUS_ONLINE) { 3579 if (port->lun_enable != NULL) { 3580 STAILQ_FOREACH(lun, &softc->lun_list, links) 3581 port->lun_enable(port->targ_lun_arg, lun->lun); 3582 } 3583 ctl_isc_announce_port(port); 3584 } 3585 return (0); 3586 } 3587 3588 int 3589 ctl_lun_map_set(struct ctl_port *port, uint32_t plun, uint32_t glun) 3590 { 3591 int status; 3592 uint32_t old; 3593 3594 if (port->lun_map == NULL) { 3595 status = ctl_lun_map_init(port); 3596 if (status != 0) 3597 return (status); 3598 } 3599 if (plun >= port->lun_map_size) 3600 return (EINVAL); 3601 old = port->lun_map[plun]; 3602 port->lun_map[plun] = glun; 3603 if ((port->status & CTL_PORT_STATUS_ONLINE) && old == UINT32_MAX) { 3604 if (port->lun_enable != NULL) 3605 port->lun_enable(port->targ_lun_arg, plun); 3606 ctl_isc_announce_port(port); 3607 } 3608 return (0); 3609 } 3610 3611 int 3612 ctl_lun_map_unset(struct ctl_port *port, uint32_t plun) 3613 { 3614 uint32_t old; 3615 3616 if (port->lun_map == NULL || plun >= port->lun_map_size) 3617 return (0); 3618 old = port->lun_map[plun]; 3619 port->lun_map[plun] = UINT32_MAX; 3620 if ((port->status & CTL_PORT_STATUS_ONLINE) && old != UINT32_MAX) { 3621 if (port->lun_disable != NULL) 3622 port->lun_disable(port->targ_lun_arg, plun); 3623 ctl_isc_announce_port(port); 3624 } 3625 return (0); 3626 } 3627 3628 uint32_t 3629 ctl_lun_map_from_port(struct ctl_port *port, uint32_t lun_id) 3630 { 3631 3632 if (port == NULL) 3633 return (UINT32_MAX); 3634 if (port->lun_map == NULL) 3635 return (lun_id); 3636 if (lun_id > port->lun_map_size) 3637 return (UINT32_MAX); 3638 return (port->lun_map[lun_id]); 3639 } 3640 3641 uint32_t 3642 ctl_lun_map_to_port(struct ctl_port *port, uint32_t lun_id) 3643 { 3644 uint32_t i; 3645 3646 if (port == NULL) 3647 return (UINT32_MAX); 3648 if (port->lun_map == NULL) 3649 return (lun_id); 3650 for (i = 0; i < port->lun_map_size; i++) { 3651 if (port->lun_map[i] == lun_id) 3652 return (i); 3653 } 3654 return (UINT32_MAX); 3655 } 3656 3657 uint32_t 3658 ctl_decode_lun(uint64_t encoded) 3659 { 3660 uint8_t lun[8]; 3661 uint32_t result = 0xffffffff; 3662 3663 be64enc(lun, encoded); 3664 switch (lun[0] & RPL_LUNDATA_ATYP_MASK) { 3665 case RPL_LUNDATA_ATYP_PERIPH: 3666 if ((lun[0] & 0x3f) == 0 && lun[2] == 0 && lun[3] == 0 && 3667 lun[4] == 0 && lun[5] == 0 && lun[6] == 0 && lun[7] == 0) 3668 result = lun[1]; 3669 break; 3670 case RPL_LUNDATA_ATYP_FLAT: 3671 if (lun[2] == 0 && lun[3] == 0 && lun[4] == 0 && lun[5] == 0 && 3672 lun[6] == 0 && lun[7] == 0) 3673 result = ((lun[0] & 0x3f) << 8) + lun[1]; 3674 break; 3675 case RPL_LUNDATA_ATYP_EXTLUN: 3676 switch (lun[0] & RPL_LUNDATA_EXT_EAM_MASK) { 3677 case 0x02: 3678 switch (lun[0] & RPL_LUNDATA_EXT_LEN_MASK) { 3679 case 0x00: 3680 result = lun[1]; 3681 break; 3682 case 0x10: 3683 result = (lun[1] << 16) + (lun[2] << 8) + 3684 lun[3]; 3685 break; 3686 case 0x20: 3687 if (lun[1] == 0 && lun[6] == 0 && lun[7] == 0) 3688 result = (lun[2] << 24) + 3689 (lun[3] << 16) + (lun[4] << 8) + 3690 lun[5]; 3691 break; 3692 } 3693 break; 3694 case RPL_LUNDATA_EXT_EAM_NOT_SPEC: 3695 result = 0xffffffff; 3696 break; 3697 } 3698 break; 3699 } 3700 return (result); 3701 } 3702 3703 uint64_t 3704 ctl_encode_lun(uint32_t decoded) 3705 { 3706 uint64_t l = decoded; 3707 3708 if (l <= 0xff) 3709 return (((uint64_t)RPL_LUNDATA_ATYP_PERIPH << 56) | (l << 48)); 3710 if (l <= 0x3fff) 3711 return (((uint64_t)RPL_LUNDATA_ATYP_FLAT << 56) | (l << 48)); 3712 if (l <= 0xffffff) 3713 return (((uint64_t)(RPL_LUNDATA_ATYP_EXTLUN | 0x12) << 56) | 3714 (l << 32)); 3715 return ((((uint64_t)RPL_LUNDATA_ATYP_EXTLUN | 0x22) << 56) | (l << 16)); 3716 } 3717 3718 int 3719 ctl_ffz(uint32_t *mask, uint32_t first, uint32_t last) 3720 { 3721 int i; 3722 3723 for (i = first; i < last; i++) { 3724 if ((mask[i / 32] & (1 << (i % 32))) == 0) 3725 return (i); 3726 } 3727 return (-1); 3728 } 3729 3730 int 3731 ctl_set_mask(uint32_t *mask, uint32_t bit) 3732 { 3733 uint32_t chunk, piece; 3734 3735 chunk = bit >> 5; 3736 piece = bit % (sizeof(uint32_t) * 8); 3737 3738 if ((mask[chunk] & (1 << piece)) != 0) 3739 return (-1); 3740 else 3741 mask[chunk] |= (1 << piece); 3742 3743 return (0); 3744 } 3745 3746 int 3747 ctl_clear_mask(uint32_t *mask, uint32_t bit) 3748 { 3749 uint32_t chunk, piece; 3750 3751 chunk = bit >> 5; 3752 piece = bit % (sizeof(uint32_t) * 8); 3753 3754 if ((mask[chunk] & (1 << piece)) == 0) 3755 return (-1); 3756 else 3757 mask[chunk] &= ~(1 << piece); 3758 3759 return (0); 3760 } 3761 3762 int 3763 ctl_is_set(uint32_t *mask, uint32_t bit) 3764 { 3765 uint32_t chunk, piece; 3766 3767 chunk = bit >> 5; 3768 piece = bit % (sizeof(uint32_t) * 8); 3769 3770 if ((mask[chunk] & (1 << piece)) == 0) 3771 return (0); 3772 else 3773 return (1); 3774 } 3775 3776 static uint64_t 3777 ctl_get_prkey(struct ctl_lun *lun, uint32_t residx) 3778 { 3779 uint64_t *t; 3780 3781 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3782 if (t == NULL) 3783 return (0); 3784 return (t[residx % CTL_MAX_INIT_PER_PORT]); 3785 } 3786 3787 static void 3788 ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx) 3789 { 3790 uint64_t *t; 3791 3792 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3793 if (t == NULL) 3794 return; 3795 t[residx % CTL_MAX_INIT_PER_PORT] = 0; 3796 } 3797 3798 static void 3799 ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx) 3800 { 3801 uint64_t *p; 3802 u_int i; 3803 3804 i = residx/CTL_MAX_INIT_PER_PORT; 3805 if (lun->pr_keys[i] != NULL) 3806 return; 3807 mtx_unlock(&lun->lun_lock); 3808 p = malloc(sizeof(uint64_t) * CTL_MAX_INIT_PER_PORT, M_CTL, 3809 M_WAITOK | M_ZERO); 3810 mtx_lock(&lun->lun_lock); 3811 if (lun->pr_keys[i] == NULL) 3812 lun->pr_keys[i] = p; 3813 else 3814 free(p, M_CTL); 3815 } 3816 3817 static void 3818 ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key) 3819 { 3820 uint64_t *t; 3821 3822 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3823 KASSERT(t != NULL, ("prkey %d is not allocated", residx)); 3824 t[residx % CTL_MAX_INIT_PER_PORT] = key; 3825 } 3826 3827 /* 3828 * ctl_softc, pool_name, total_ctl_io are passed in. 3829 * npool is passed out. 3830 */ 3831 int 3832 ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name, 3833 uint32_t total_ctl_io, void **npool) 3834 { 3835 struct ctl_io_pool *pool; 3836 3837 pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL, 3838 M_NOWAIT | M_ZERO); 3839 if (pool == NULL) 3840 return (ENOMEM); 3841 3842 snprintf(pool->name, sizeof(pool->name), "CTL IO %s", pool_name); 3843 pool->ctl_softc = ctl_softc; 3844 #ifdef IO_POOLS 3845 pool->zone = uma_zsecond_create(pool->name, NULL, 3846 NULL, NULL, NULL, ctl_softc->io_zone); 3847 /* uma_prealloc(pool->zone, total_ctl_io); */ 3848 #else 3849 pool->zone = ctl_softc->io_zone; 3850 #endif 3851 3852 *npool = pool; 3853 return (0); 3854 } 3855 3856 void 3857 ctl_pool_free(struct ctl_io_pool *pool) 3858 { 3859 3860 if (pool == NULL) 3861 return; 3862 3863 #ifdef IO_POOLS 3864 uma_zdestroy(pool->zone); 3865 #endif 3866 free(pool, M_CTL); 3867 } 3868 3869 union ctl_io * 3870 ctl_alloc_io(void *pool_ref) 3871 { 3872 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3873 union ctl_io *io; 3874 3875 io = uma_zalloc(pool->zone, M_WAITOK); 3876 if (io != NULL) { 3877 io->io_hdr.pool = pool_ref; 3878 CTL_SOFTC(io) = pool->ctl_softc; 3879 } 3880 return (io); 3881 } 3882 3883 union ctl_io * 3884 ctl_alloc_io_nowait(void *pool_ref) 3885 { 3886 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3887 union ctl_io *io; 3888 3889 io = uma_zalloc(pool->zone, M_NOWAIT); 3890 if (io != NULL) { 3891 io->io_hdr.pool = pool_ref; 3892 CTL_SOFTC(io) = pool->ctl_softc; 3893 } 3894 return (io); 3895 } 3896 3897 void 3898 ctl_free_io(union ctl_io *io) 3899 { 3900 struct ctl_io_pool *pool; 3901 3902 if (io == NULL) 3903 return; 3904 3905 pool = (struct ctl_io_pool *)io->io_hdr.pool; 3906 uma_zfree(pool->zone, io); 3907 } 3908 3909 void 3910 ctl_zero_io(union ctl_io *io) 3911 { 3912 struct ctl_io_pool *pool; 3913 3914 if (io == NULL) 3915 return; 3916 3917 /* 3918 * May need to preserve linked list pointers at some point too. 3919 */ 3920 pool = io->io_hdr.pool; 3921 memset(io, 0, sizeof(*io)); 3922 io->io_hdr.pool = pool; 3923 CTL_SOFTC(io) = pool->ctl_softc; 3924 } 3925 3926 int 3927 ctl_expand_number(const char *buf, uint64_t *num) 3928 { 3929 char *endptr; 3930 uint64_t number; 3931 unsigned shift; 3932 3933 number = strtoq(buf, &endptr, 0); 3934 3935 switch (tolower((unsigned char)*endptr)) { 3936 case 'e': 3937 shift = 60; 3938 break; 3939 case 'p': 3940 shift = 50; 3941 break; 3942 case 't': 3943 shift = 40; 3944 break; 3945 case 'g': 3946 shift = 30; 3947 break; 3948 case 'm': 3949 shift = 20; 3950 break; 3951 case 'k': 3952 shift = 10; 3953 break; 3954 case 'b': 3955 case '\0': /* No unit. */ 3956 *num = number; 3957 return (0); 3958 default: 3959 /* Unrecognized unit. */ 3960 return (-1); 3961 } 3962 3963 if ((number << shift) >> shift != number) { 3964 /* Overflow */ 3965 return (-1); 3966 } 3967 *num = number << shift; 3968 return (0); 3969 } 3970 3971 3972 /* 3973 * This routine could be used in the future to load default and/or saved 3974 * mode page parameters for a particuar lun. 3975 */ 3976 static int 3977 ctl_init_page_index(struct ctl_lun *lun) 3978 { 3979 int i, page_code; 3980 struct ctl_page_index *page_index; 3981 const char *value; 3982 uint64_t ival; 3983 3984 memcpy(&lun->mode_pages.index, page_index_template, 3985 sizeof(page_index_template)); 3986 3987 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 3988 3989 page_index = &lun->mode_pages.index[i]; 3990 if (lun->be_lun->lun_type == T_DIRECT && 3991 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 3992 continue; 3993 if (lun->be_lun->lun_type == T_PROCESSOR && 3994 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 3995 continue; 3996 if (lun->be_lun->lun_type == T_CDROM && 3997 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 3998 continue; 3999 4000 page_code = page_index->page_code & SMPH_PC_MASK; 4001 switch (page_code) { 4002 case SMS_RW_ERROR_RECOVERY_PAGE: { 4003 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4004 ("subpage %#x for page %#x is incorrect!", 4005 page_index->subpage, page_code)); 4006 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT], 4007 &rw_er_page_default, 4008 sizeof(rw_er_page_default)); 4009 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CHANGEABLE], 4010 &rw_er_page_changeable, 4011 sizeof(rw_er_page_changeable)); 4012 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_DEFAULT], 4013 &rw_er_page_default, 4014 sizeof(rw_er_page_default)); 4015 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_SAVED], 4016 &rw_er_page_default, 4017 sizeof(rw_er_page_default)); 4018 page_index->page_data = 4019 (uint8_t *)lun->mode_pages.rw_er_page; 4020 break; 4021 } 4022 case SMS_FORMAT_DEVICE_PAGE: { 4023 struct scsi_format_page *format_page; 4024 4025 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4026 ("subpage %#x for page %#x is incorrect!", 4027 page_index->subpage, page_code)); 4028 4029 /* 4030 * Sectors per track are set above. Bytes per 4031 * sector need to be set here on a per-LUN basis. 4032 */ 4033 memcpy(&lun->mode_pages.format_page[CTL_PAGE_CURRENT], 4034 &format_page_default, 4035 sizeof(format_page_default)); 4036 memcpy(&lun->mode_pages.format_page[ 4037 CTL_PAGE_CHANGEABLE], &format_page_changeable, 4038 sizeof(format_page_changeable)); 4039 memcpy(&lun->mode_pages.format_page[CTL_PAGE_DEFAULT], 4040 &format_page_default, 4041 sizeof(format_page_default)); 4042 memcpy(&lun->mode_pages.format_page[CTL_PAGE_SAVED], 4043 &format_page_default, 4044 sizeof(format_page_default)); 4045 4046 format_page = &lun->mode_pages.format_page[ 4047 CTL_PAGE_CURRENT]; 4048 scsi_ulto2b(lun->be_lun->blocksize, 4049 format_page->bytes_per_sector); 4050 4051 format_page = &lun->mode_pages.format_page[ 4052 CTL_PAGE_DEFAULT]; 4053 scsi_ulto2b(lun->be_lun->blocksize, 4054 format_page->bytes_per_sector); 4055 4056 format_page = &lun->mode_pages.format_page[ 4057 CTL_PAGE_SAVED]; 4058 scsi_ulto2b(lun->be_lun->blocksize, 4059 format_page->bytes_per_sector); 4060 4061 page_index->page_data = 4062 (uint8_t *)lun->mode_pages.format_page; 4063 break; 4064 } 4065 case SMS_RIGID_DISK_PAGE: { 4066 struct scsi_rigid_disk_page *rigid_disk_page; 4067 uint32_t sectors_per_cylinder; 4068 uint64_t cylinders; 4069 #ifndef __XSCALE__ 4070 int shift; 4071 #endif /* !__XSCALE__ */ 4072 4073 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4074 ("subpage %#x for page %#x is incorrect!", 4075 page_index->subpage, page_code)); 4076 4077 /* 4078 * Rotation rate and sectors per track are set 4079 * above. We calculate the cylinders here based on 4080 * capacity. Due to the number of heads and 4081 * sectors per track we're using, smaller arrays 4082 * may turn out to have 0 cylinders. Linux and 4083 * FreeBSD don't pay attention to these mode pages 4084 * to figure out capacity, but Solaris does. It 4085 * seems to deal with 0 cylinders just fine, and 4086 * works out a fake geometry based on the capacity. 4087 */ 4088 memcpy(&lun->mode_pages.rigid_disk_page[ 4089 CTL_PAGE_DEFAULT], &rigid_disk_page_default, 4090 sizeof(rigid_disk_page_default)); 4091 memcpy(&lun->mode_pages.rigid_disk_page[ 4092 CTL_PAGE_CHANGEABLE],&rigid_disk_page_changeable, 4093 sizeof(rigid_disk_page_changeable)); 4094 4095 sectors_per_cylinder = CTL_DEFAULT_SECTORS_PER_TRACK * 4096 CTL_DEFAULT_HEADS; 4097 4098 /* 4099 * The divide method here will be more accurate, 4100 * probably, but results in floating point being 4101 * used in the kernel on i386 (__udivdi3()). On the 4102 * XScale, though, __udivdi3() is implemented in 4103 * software. 4104 * 4105 * The shift method for cylinder calculation is 4106 * accurate if sectors_per_cylinder is a power of 4107 * 2. Otherwise it might be slightly off -- you 4108 * might have a bit of a truncation problem. 4109 */ 4110 #ifdef __XSCALE__ 4111 cylinders = (lun->be_lun->maxlba + 1) / 4112 sectors_per_cylinder; 4113 #else 4114 for (shift = 31; shift > 0; shift--) { 4115 if (sectors_per_cylinder & (1 << shift)) 4116 break; 4117 } 4118 cylinders = (lun->be_lun->maxlba + 1) >> shift; 4119 #endif 4120 4121 /* 4122 * We've basically got 3 bytes, or 24 bits for the 4123 * cylinder size in the mode page. If we're over, 4124 * just round down to 2^24. 4125 */ 4126 if (cylinders > 0xffffff) 4127 cylinders = 0xffffff; 4128 4129 rigid_disk_page = &lun->mode_pages.rigid_disk_page[ 4130 CTL_PAGE_DEFAULT]; 4131 scsi_ulto3b(cylinders, rigid_disk_page->cylinders); 4132 4133 if ((value = ctl_get_opt(&lun->be_lun->options, 4134 "rpm")) != NULL) { 4135 scsi_ulto2b(strtol(value, NULL, 0), 4136 rigid_disk_page->rotation_rate); 4137 } 4138 4139 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_CURRENT], 4140 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 4141 sizeof(rigid_disk_page_default)); 4142 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_SAVED], 4143 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 4144 sizeof(rigid_disk_page_default)); 4145 4146 page_index->page_data = 4147 (uint8_t *)lun->mode_pages.rigid_disk_page; 4148 break; 4149 } 4150 case SMS_VERIFY_ERROR_RECOVERY_PAGE: { 4151 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4152 ("subpage %#x for page %#x is incorrect!", 4153 page_index->subpage, page_code)); 4154 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_CURRENT], 4155 &verify_er_page_default, 4156 sizeof(verify_er_page_default)); 4157 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_CHANGEABLE], 4158 &verify_er_page_changeable, 4159 sizeof(verify_er_page_changeable)); 4160 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_DEFAULT], 4161 &verify_er_page_default, 4162 sizeof(verify_er_page_default)); 4163 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_SAVED], 4164 &verify_er_page_default, 4165 sizeof(verify_er_page_default)); 4166 page_index->page_data = 4167 (uint8_t *)lun->mode_pages.verify_er_page; 4168 break; 4169 } 4170 case SMS_CACHING_PAGE: { 4171 struct scsi_caching_page *caching_page; 4172 4173 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4174 ("subpage %#x for page %#x is incorrect!", 4175 page_index->subpage, page_code)); 4176 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT], 4177 &caching_page_default, 4178 sizeof(caching_page_default)); 4179 memcpy(&lun->mode_pages.caching_page[ 4180 CTL_PAGE_CHANGEABLE], &caching_page_changeable, 4181 sizeof(caching_page_changeable)); 4182 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_SAVED], 4183 &caching_page_default, 4184 sizeof(caching_page_default)); 4185 caching_page = &lun->mode_pages.caching_page[ 4186 CTL_PAGE_SAVED]; 4187 value = ctl_get_opt(&lun->be_lun->options, "writecache"); 4188 if (value != NULL && strcmp(value, "off") == 0) 4189 caching_page->flags1 &= ~SCP_WCE; 4190 value = ctl_get_opt(&lun->be_lun->options, "readcache"); 4191 if (value != NULL && strcmp(value, "off") == 0) 4192 caching_page->flags1 |= SCP_RCD; 4193 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT], 4194 &lun->mode_pages.caching_page[CTL_PAGE_SAVED], 4195 sizeof(caching_page_default)); 4196 page_index->page_data = 4197 (uint8_t *)lun->mode_pages.caching_page; 4198 break; 4199 } 4200 case SMS_CONTROL_MODE_PAGE: { 4201 switch (page_index->subpage) { 4202 case SMS_SUBPAGE_PAGE_0: { 4203 struct scsi_control_page *control_page; 4204 4205 memcpy(&lun->mode_pages.control_page[ 4206 CTL_PAGE_DEFAULT], 4207 &control_page_default, 4208 sizeof(control_page_default)); 4209 memcpy(&lun->mode_pages.control_page[ 4210 CTL_PAGE_CHANGEABLE], 4211 &control_page_changeable, 4212 sizeof(control_page_changeable)); 4213 memcpy(&lun->mode_pages.control_page[ 4214 CTL_PAGE_SAVED], 4215 &control_page_default, 4216 sizeof(control_page_default)); 4217 control_page = &lun->mode_pages.control_page[ 4218 CTL_PAGE_SAVED]; 4219 value = ctl_get_opt(&lun->be_lun->options, 4220 "reordering"); 4221 if (value != NULL && 4222 strcmp(value, "unrestricted") == 0) { 4223 control_page->queue_flags &= 4224 ~SCP_QUEUE_ALG_MASK; 4225 control_page->queue_flags |= 4226 SCP_QUEUE_ALG_UNRESTRICTED; 4227 } 4228 memcpy(&lun->mode_pages.control_page[ 4229 CTL_PAGE_CURRENT], 4230 &lun->mode_pages.control_page[ 4231 CTL_PAGE_SAVED], 4232 sizeof(control_page_default)); 4233 page_index->page_data = 4234 (uint8_t *)lun->mode_pages.control_page; 4235 break; 4236 } 4237 case 0x01: 4238 memcpy(&lun->mode_pages.control_ext_page[ 4239 CTL_PAGE_DEFAULT], 4240 &control_ext_page_default, 4241 sizeof(control_ext_page_default)); 4242 memcpy(&lun->mode_pages.control_ext_page[ 4243 CTL_PAGE_CHANGEABLE], 4244 &control_ext_page_changeable, 4245 sizeof(control_ext_page_changeable)); 4246 memcpy(&lun->mode_pages.control_ext_page[ 4247 CTL_PAGE_SAVED], 4248 &control_ext_page_default, 4249 sizeof(control_ext_page_default)); 4250 memcpy(&lun->mode_pages.control_ext_page[ 4251 CTL_PAGE_CURRENT], 4252 &lun->mode_pages.control_ext_page[ 4253 CTL_PAGE_SAVED], 4254 sizeof(control_ext_page_default)); 4255 page_index->page_data = 4256 (uint8_t *)lun->mode_pages.control_ext_page; 4257 break; 4258 default: 4259 panic("subpage %#x for page %#x is incorrect!", 4260 page_index->subpage, page_code); 4261 } 4262 break; 4263 } 4264 case SMS_INFO_EXCEPTIONS_PAGE: { 4265 switch (page_index->subpage) { 4266 case SMS_SUBPAGE_PAGE_0: 4267 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_CURRENT], 4268 &ie_page_default, 4269 sizeof(ie_page_default)); 4270 memcpy(&lun->mode_pages.ie_page[ 4271 CTL_PAGE_CHANGEABLE], &ie_page_changeable, 4272 sizeof(ie_page_changeable)); 4273 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_DEFAULT], 4274 &ie_page_default, 4275 sizeof(ie_page_default)); 4276 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_SAVED], 4277 &ie_page_default, 4278 sizeof(ie_page_default)); 4279 page_index->page_data = 4280 (uint8_t *)lun->mode_pages.ie_page; 4281 break; 4282 case 0x02: { 4283 struct ctl_logical_block_provisioning_page *page; 4284 4285 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_DEFAULT], 4286 &lbp_page_default, 4287 sizeof(lbp_page_default)); 4288 memcpy(&lun->mode_pages.lbp_page[ 4289 CTL_PAGE_CHANGEABLE], &lbp_page_changeable, 4290 sizeof(lbp_page_changeable)); 4291 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 4292 &lbp_page_default, 4293 sizeof(lbp_page_default)); 4294 page = &lun->mode_pages.lbp_page[CTL_PAGE_SAVED]; 4295 value = ctl_get_opt(&lun->be_lun->options, 4296 "avail-threshold"); 4297 if (value != NULL && 4298 ctl_expand_number(value, &ival) == 0) { 4299 page->descr[0].flags |= SLBPPD_ENABLED | 4300 SLBPPD_ARMING_DEC; 4301 if (lun->be_lun->blocksize) 4302 ival /= lun->be_lun->blocksize; 4303 else 4304 ival /= 512; 4305 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4306 page->descr[0].count); 4307 } 4308 value = ctl_get_opt(&lun->be_lun->options, 4309 "used-threshold"); 4310 if (value != NULL && 4311 ctl_expand_number(value, &ival) == 0) { 4312 page->descr[1].flags |= SLBPPD_ENABLED | 4313 SLBPPD_ARMING_INC; 4314 if (lun->be_lun->blocksize) 4315 ival /= lun->be_lun->blocksize; 4316 else 4317 ival /= 512; 4318 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4319 page->descr[1].count); 4320 } 4321 value = ctl_get_opt(&lun->be_lun->options, 4322 "pool-avail-threshold"); 4323 if (value != NULL && 4324 ctl_expand_number(value, &ival) == 0) { 4325 page->descr[2].flags |= SLBPPD_ENABLED | 4326 SLBPPD_ARMING_DEC; 4327 if (lun->be_lun->blocksize) 4328 ival /= lun->be_lun->blocksize; 4329 else 4330 ival /= 512; 4331 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4332 page->descr[2].count); 4333 } 4334 value = ctl_get_opt(&lun->be_lun->options, 4335 "pool-used-threshold"); 4336 if (value != NULL && 4337 ctl_expand_number(value, &ival) == 0) { 4338 page->descr[3].flags |= SLBPPD_ENABLED | 4339 SLBPPD_ARMING_INC; 4340 if (lun->be_lun->blocksize) 4341 ival /= lun->be_lun->blocksize; 4342 else 4343 ival /= 512; 4344 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4345 page->descr[3].count); 4346 } 4347 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_CURRENT], 4348 &lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 4349 sizeof(lbp_page_default)); 4350 page_index->page_data = 4351 (uint8_t *)lun->mode_pages.lbp_page; 4352 break; 4353 } 4354 default: 4355 panic("subpage %#x for page %#x is incorrect!", 4356 page_index->subpage, page_code); 4357 } 4358 break; 4359 } 4360 case SMS_CDDVD_CAPS_PAGE:{ 4361 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4362 ("subpage %#x for page %#x is incorrect!", 4363 page_index->subpage, page_code)); 4364 memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_DEFAULT], 4365 &cddvd_page_default, 4366 sizeof(cddvd_page_default)); 4367 memcpy(&lun->mode_pages.cddvd_page[ 4368 CTL_PAGE_CHANGEABLE], &cddvd_page_changeable, 4369 sizeof(cddvd_page_changeable)); 4370 memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_SAVED], 4371 &cddvd_page_default, 4372 sizeof(cddvd_page_default)); 4373 memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_CURRENT], 4374 &lun->mode_pages.cddvd_page[CTL_PAGE_SAVED], 4375 sizeof(cddvd_page_default)); 4376 page_index->page_data = 4377 (uint8_t *)lun->mode_pages.cddvd_page; 4378 break; 4379 } 4380 default: 4381 panic("invalid page code value %#x", page_code); 4382 } 4383 } 4384 4385 return (CTL_RETVAL_COMPLETE); 4386 } 4387 4388 static int 4389 ctl_init_log_page_index(struct ctl_lun *lun) 4390 { 4391 struct ctl_page_index *page_index; 4392 int i, j, k, prev; 4393 4394 memcpy(&lun->log_pages.index, log_page_index_template, 4395 sizeof(log_page_index_template)); 4396 4397 prev = -1; 4398 for (i = 0, j = 0, k = 0; i < CTL_NUM_LOG_PAGES; i++) { 4399 4400 page_index = &lun->log_pages.index[i]; 4401 if (lun->be_lun->lun_type == T_DIRECT && 4402 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 4403 continue; 4404 if (lun->be_lun->lun_type == T_PROCESSOR && 4405 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 4406 continue; 4407 if (lun->be_lun->lun_type == T_CDROM && 4408 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 4409 continue; 4410 4411 if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING && 4412 lun->backend->lun_attr == NULL) 4413 continue; 4414 4415 if (page_index->page_code != prev) { 4416 lun->log_pages.pages_page[j] = page_index->page_code; 4417 prev = page_index->page_code; 4418 j++; 4419 } 4420 lun->log_pages.subpages_page[k*2] = page_index->page_code; 4421 lun->log_pages.subpages_page[k*2+1] = page_index->subpage; 4422 k++; 4423 } 4424 lun->log_pages.index[0].page_data = &lun->log_pages.pages_page[0]; 4425 lun->log_pages.index[0].page_len = j; 4426 lun->log_pages.index[1].page_data = &lun->log_pages.subpages_page[0]; 4427 lun->log_pages.index[1].page_len = k * 2; 4428 lun->log_pages.index[2].page_data = &lun->log_pages.lbp_page[0]; 4429 lun->log_pages.index[2].page_len = 12*CTL_NUM_LBP_PARAMS; 4430 lun->log_pages.index[3].page_data = (uint8_t *)&lun->log_pages.stat_page; 4431 lun->log_pages.index[3].page_len = sizeof(lun->log_pages.stat_page); 4432 lun->log_pages.index[4].page_data = (uint8_t *)&lun->log_pages.ie_page; 4433 lun->log_pages.index[4].page_len = sizeof(lun->log_pages.ie_page); 4434 4435 return (CTL_RETVAL_COMPLETE); 4436 } 4437 4438 static int 4439 hex2bin(const char *str, uint8_t *buf, int buf_size) 4440 { 4441 int i; 4442 u_char c; 4443 4444 memset(buf, 0, buf_size); 4445 while (isspace(str[0])) 4446 str++; 4447 if (str[0] == '0' && (str[1] == 'x' || str[1] == 'X')) 4448 str += 2; 4449 buf_size *= 2; 4450 for (i = 0; str[i] != 0 && i < buf_size; i++) { 4451 while (str[i] == '-') /* Skip dashes in UUIDs. */ 4452 str++; 4453 c = str[i]; 4454 if (isdigit(c)) 4455 c -= '0'; 4456 else if (isalpha(c)) 4457 c -= isupper(c) ? 'A' - 10 : 'a' - 10; 4458 else 4459 break; 4460 if (c >= 16) 4461 break; 4462 if ((i & 1) == 0) 4463 buf[i / 2] |= (c << 4); 4464 else 4465 buf[i / 2] |= c; 4466 } 4467 return ((i + 1) / 2); 4468 } 4469 4470 /* 4471 * LUN allocation. 4472 * 4473 * Requirements: 4474 * - caller allocates and zeros LUN storage, or passes in a NULL LUN if he 4475 * wants us to allocate the LUN and he can block. 4476 * - ctl_softc is always set 4477 * - be_lun is set if the LUN has a backend (needed for disk LUNs) 4478 * 4479 * Returns 0 for success, non-zero (errno) for failure. 4480 */ 4481 static int 4482 ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun, 4483 struct ctl_be_lun *const be_lun) 4484 { 4485 struct ctl_lun *nlun, *lun; 4486 struct scsi_vpd_id_descriptor *desc; 4487 struct scsi_vpd_id_t10 *t10id; 4488 const char *eui, *naa, *scsiname, *uuid, *vendor, *value; 4489 int lun_number, lun_malloced; 4490 int devidlen, idlen1, idlen2 = 0, len; 4491 4492 if (be_lun == NULL) 4493 return (EINVAL); 4494 4495 /* 4496 * We currently only support Direct Access or Processor LUN types. 4497 */ 4498 switch (be_lun->lun_type) { 4499 case T_DIRECT: 4500 case T_PROCESSOR: 4501 case T_CDROM: 4502 break; 4503 case T_SEQUENTIAL: 4504 case T_CHANGER: 4505 default: 4506 be_lun->lun_config_status(be_lun->be_lun, 4507 CTL_LUN_CONFIG_FAILURE); 4508 break; 4509 } 4510 if (ctl_lun == NULL) { 4511 lun = malloc(sizeof(*lun), M_CTL, M_WAITOK); 4512 lun_malloced = 1; 4513 } else { 4514 lun_malloced = 0; 4515 lun = ctl_lun; 4516 } 4517 4518 memset(lun, 0, sizeof(*lun)); 4519 if (lun_malloced) 4520 lun->flags = CTL_LUN_MALLOCED; 4521 4522 /* Generate LUN ID. */ 4523 devidlen = max(CTL_DEVID_MIN_LEN, 4524 strnlen(be_lun->device_id, CTL_DEVID_LEN)); 4525 idlen1 = sizeof(*t10id) + devidlen; 4526 len = sizeof(struct scsi_vpd_id_descriptor) + idlen1; 4527 scsiname = ctl_get_opt(&be_lun->options, "scsiname"); 4528 if (scsiname != NULL) { 4529 idlen2 = roundup2(strlen(scsiname) + 1, 4); 4530 len += sizeof(struct scsi_vpd_id_descriptor) + idlen2; 4531 } 4532 eui = ctl_get_opt(&be_lun->options, "eui"); 4533 if (eui != NULL) { 4534 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4535 } 4536 naa = ctl_get_opt(&be_lun->options, "naa"); 4537 if (naa != NULL) { 4538 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4539 } 4540 uuid = ctl_get_opt(&be_lun->options, "uuid"); 4541 if (uuid != NULL) { 4542 len += sizeof(struct scsi_vpd_id_descriptor) + 18; 4543 } 4544 lun->lun_devid = malloc(sizeof(struct ctl_devid) + len, 4545 M_CTL, M_WAITOK | M_ZERO); 4546 desc = (struct scsi_vpd_id_descriptor *)lun->lun_devid->data; 4547 desc->proto_codeset = SVPD_ID_CODESET_ASCII; 4548 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10; 4549 desc->length = idlen1; 4550 t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0]; 4551 memset(t10id->vendor, ' ', sizeof(t10id->vendor)); 4552 if ((vendor = ctl_get_opt(&be_lun->options, "vendor")) == NULL) { 4553 strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor)); 4554 } else { 4555 strncpy(t10id->vendor, vendor, 4556 min(sizeof(t10id->vendor), strlen(vendor))); 4557 } 4558 strncpy((char *)t10id->vendor_spec_id, 4559 (char *)be_lun->device_id, devidlen); 4560 if (scsiname != NULL) { 4561 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4562 desc->length); 4563 desc->proto_codeset = SVPD_ID_CODESET_UTF8; 4564 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4565 SVPD_ID_TYPE_SCSI_NAME; 4566 desc->length = idlen2; 4567 strlcpy(desc->identifier, scsiname, idlen2); 4568 } 4569 if (eui != NULL) { 4570 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4571 desc->length); 4572 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4573 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4574 SVPD_ID_TYPE_EUI64; 4575 desc->length = hex2bin(eui, desc->identifier, 16); 4576 desc->length = desc->length > 12 ? 16 : 4577 (desc->length > 8 ? 12 : 8); 4578 len -= 16 - desc->length; 4579 } 4580 if (naa != NULL) { 4581 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4582 desc->length); 4583 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4584 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4585 SVPD_ID_TYPE_NAA; 4586 desc->length = hex2bin(naa, desc->identifier, 16); 4587 desc->length = desc->length > 8 ? 16 : 8; 4588 len -= 16 - desc->length; 4589 } 4590 if (uuid != NULL) { 4591 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4592 desc->length); 4593 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4594 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4595 SVPD_ID_TYPE_UUID; 4596 desc->identifier[0] = 0x10; 4597 hex2bin(uuid, &desc->identifier[2], 16); 4598 desc->length = 18; 4599 } 4600 lun->lun_devid->len = len; 4601 4602 mtx_lock(&ctl_softc->ctl_lock); 4603 /* 4604 * See if the caller requested a particular LUN number. If so, see 4605 * if it is available. Otherwise, allocate the first available LUN. 4606 */ 4607 if (be_lun->flags & CTL_LUN_FLAG_ID_REQ) { 4608 if ((be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) 4609 || (ctl_is_set(ctl_softc->ctl_lun_mask, be_lun->req_lun_id))) { 4610 mtx_unlock(&ctl_softc->ctl_lock); 4611 if (be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) { 4612 printf("ctl: requested LUN ID %d is higher " 4613 "than CTL_MAX_LUNS - 1 (%d)\n", 4614 be_lun->req_lun_id, CTL_MAX_LUNS - 1); 4615 } else { 4616 /* 4617 * XXX KDM return an error, or just assign 4618 * another LUN ID in this case?? 4619 */ 4620 printf("ctl: requested LUN ID %d is already " 4621 "in use\n", be_lun->req_lun_id); 4622 } 4623 fail: 4624 free(lun->lun_devid, M_CTL); 4625 if (lun->flags & CTL_LUN_MALLOCED) 4626 free(lun, M_CTL); 4627 be_lun->lun_config_status(be_lun->be_lun, 4628 CTL_LUN_CONFIG_FAILURE); 4629 return (ENOSPC); 4630 } 4631 lun_number = be_lun->req_lun_id; 4632 } else { 4633 lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, 0, CTL_MAX_LUNS); 4634 if (lun_number == -1) { 4635 mtx_unlock(&ctl_softc->ctl_lock); 4636 printf("ctl: can't allocate LUN, out of LUNs\n"); 4637 goto fail; 4638 } 4639 } 4640 ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number); 4641 mtx_unlock(&ctl_softc->ctl_lock); 4642 4643 mtx_init(&lun->lun_lock, "CTL LUN", NULL, MTX_DEF); 4644 lun->lun = lun_number; 4645 lun->be_lun = be_lun; 4646 /* 4647 * The processor LUN is always enabled. Disk LUNs come on line 4648 * disabled, and must be enabled by the backend. 4649 */ 4650 lun->flags |= CTL_LUN_DISABLED; 4651 lun->backend = be_lun->be; 4652 be_lun->ctl_lun = lun; 4653 be_lun->lun_id = lun_number; 4654 atomic_add_int(&be_lun->be->num_luns, 1); 4655 if (be_lun->flags & CTL_LUN_FLAG_EJECTED) 4656 lun->flags |= CTL_LUN_EJECTED; 4657 if (be_lun->flags & CTL_LUN_FLAG_NO_MEDIA) 4658 lun->flags |= CTL_LUN_NO_MEDIA; 4659 if (be_lun->flags & CTL_LUN_FLAG_STOPPED) 4660 lun->flags |= CTL_LUN_STOPPED; 4661 4662 if (be_lun->flags & CTL_LUN_FLAG_PRIMARY) 4663 lun->flags |= CTL_LUN_PRIMARY_SC; 4664 4665 value = ctl_get_opt(&be_lun->options, "removable"); 4666 if (value != NULL) { 4667 if (strcmp(value, "on") == 0) 4668 lun->flags |= CTL_LUN_REMOVABLE; 4669 } else if (be_lun->lun_type == T_CDROM) 4670 lun->flags |= CTL_LUN_REMOVABLE; 4671 4672 lun->ctl_softc = ctl_softc; 4673 #ifdef CTL_TIME_IO 4674 lun->last_busy = getsbinuptime(); 4675 #endif 4676 TAILQ_INIT(&lun->ooa_queue); 4677 TAILQ_INIT(&lun->blocked_queue); 4678 STAILQ_INIT(&lun->error_list); 4679 lun->ie_reported = 1; 4680 callout_init_mtx(&lun->ie_callout, &lun->lun_lock, 0); 4681 ctl_tpc_lun_init(lun); 4682 if (lun->flags & CTL_LUN_REMOVABLE) { 4683 lun->prevent = malloc((CTL_MAX_INITIATORS + 31) / 32 * 4, 4684 M_CTL, M_WAITOK); 4685 } 4686 4687 /* 4688 * Initialize the mode and log page index. 4689 */ 4690 ctl_init_page_index(lun); 4691 ctl_init_log_page_index(lun); 4692 4693 /* Setup statistics gathering */ 4694 #ifdef CTL_LEGACY_STATS 4695 lun->legacy_stats.device_type = be_lun->lun_type; 4696 lun->legacy_stats.lun_number = lun_number; 4697 lun->legacy_stats.blocksize = be_lun->blocksize; 4698 if (be_lun->blocksize == 0) 4699 lun->legacy_stats.flags = CTL_LUN_STATS_NO_BLOCKSIZE; 4700 for (len = 0; len < CTL_MAX_PORTS; len++) 4701 lun->legacy_stats.ports[len].targ_port = len; 4702 #endif /* CTL_LEGACY_STATS */ 4703 lun->stats.item = lun_number; 4704 4705 /* 4706 * Now, before we insert this lun on the lun list, set the lun 4707 * inventory changed UA for all other luns. 4708 */ 4709 mtx_lock(&ctl_softc->ctl_lock); 4710 STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) { 4711 mtx_lock(&nlun->lun_lock); 4712 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4713 mtx_unlock(&nlun->lun_lock); 4714 } 4715 STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links); 4716 ctl_softc->ctl_luns[lun_number] = lun; 4717 ctl_softc->num_luns++; 4718 mtx_unlock(&ctl_softc->ctl_lock); 4719 4720 lun->be_lun->lun_config_status(lun->be_lun->be_lun, CTL_LUN_CONFIG_OK); 4721 return (0); 4722 } 4723 4724 /* 4725 * Delete a LUN. 4726 * Assumptions: 4727 * - LUN has already been marked invalid and any pending I/O has been taken 4728 * care of. 4729 */ 4730 static int 4731 ctl_free_lun(struct ctl_lun *lun) 4732 { 4733 struct ctl_softc *softc = lun->ctl_softc; 4734 struct ctl_lun *nlun; 4735 int i; 4736 4737 KASSERT(TAILQ_EMPTY(&lun->ooa_queue), 4738 ("Freeing a LUN %p with outstanding I/O!\n", lun)); 4739 4740 mtx_lock(&softc->ctl_lock); 4741 STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links); 4742 ctl_clear_mask(softc->ctl_lun_mask, lun->lun); 4743 softc->ctl_luns[lun->lun] = NULL; 4744 softc->num_luns--; 4745 STAILQ_FOREACH(nlun, &softc->lun_list, links) { 4746 mtx_lock(&nlun->lun_lock); 4747 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4748 mtx_unlock(&nlun->lun_lock); 4749 } 4750 mtx_unlock(&softc->ctl_lock); 4751 4752 /* 4753 * Tell the backend to free resources, if this LUN has a backend. 4754 */ 4755 atomic_subtract_int(&lun->be_lun->be->num_luns, 1); 4756 lun->be_lun->lun_shutdown(lun->be_lun->be_lun); 4757 4758 lun->ie_reportcnt = UINT32_MAX; 4759 callout_drain(&lun->ie_callout); 4760 ctl_tpc_lun_shutdown(lun); 4761 mtx_destroy(&lun->lun_lock); 4762 free(lun->lun_devid, M_CTL); 4763 for (i = 0; i < CTL_MAX_PORTS; i++) 4764 free(lun->pending_ua[i], M_CTL); 4765 for (i = 0; i < CTL_MAX_PORTS; i++) 4766 free(lun->pr_keys[i], M_CTL); 4767 free(lun->write_buffer, M_CTL); 4768 free(lun->prevent, M_CTL); 4769 if (lun->flags & CTL_LUN_MALLOCED) 4770 free(lun, M_CTL); 4771 4772 return (0); 4773 } 4774 4775 static void 4776 ctl_create_lun(struct ctl_be_lun *be_lun) 4777 { 4778 4779 /* 4780 * ctl_alloc_lun() should handle all potential failure cases. 4781 */ 4782 ctl_alloc_lun(control_softc, NULL, be_lun); 4783 } 4784 4785 int 4786 ctl_add_lun(struct ctl_be_lun *be_lun) 4787 { 4788 struct ctl_softc *softc = control_softc; 4789 4790 mtx_lock(&softc->ctl_lock); 4791 STAILQ_INSERT_TAIL(&softc->pending_lun_queue, be_lun, links); 4792 mtx_unlock(&softc->ctl_lock); 4793 wakeup(&softc->pending_lun_queue); 4794 4795 return (0); 4796 } 4797 4798 int 4799 ctl_enable_lun(struct ctl_be_lun *be_lun) 4800 { 4801 struct ctl_softc *softc; 4802 struct ctl_port *port, *nport; 4803 struct ctl_lun *lun; 4804 int retval; 4805 4806 lun = (struct ctl_lun *)be_lun->ctl_lun; 4807 softc = lun->ctl_softc; 4808 4809 mtx_lock(&softc->ctl_lock); 4810 mtx_lock(&lun->lun_lock); 4811 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 4812 /* 4813 * eh? Why did we get called if the LUN is already 4814 * enabled? 4815 */ 4816 mtx_unlock(&lun->lun_lock); 4817 mtx_unlock(&softc->ctl_lock); 4818 return (0); 4819 } 4820 lun->flags &= ~CTL_LUN_DISABLED; 4821 mtx_unlock(&lun->lun_lock); 4822 4823 STAILQ_FOREACH_SAFE(port, &softc->port_list, links, nport) { 4824 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || 4825 port->lun_map != NULL || port->lun_enable == NULL) 4826 continue; 4827 4828 /* 4829 * Drop the lock while we call the FETD's enable routine. 4830 * This can lead to a callback into CTL (at least in the 4831 * case of the internal initiator frontend. 4832 */ 4833 mtx_unlock(&softc->ctl_lock); 4834 retval = port->lun_enable(port->targ_lun_arg, lun->lun); 4835 mtx_lock(&softc->ctl_lock); 4836 if (retval != 0) { 4837 printf("%s: FETD %s port %d returned error " 4838 "%d for lun_enable on lun %jd\n", 4839 __func__, port->port_name, port->targ_port, 4840 retval, (intmax_t)lun->lun); 4841 } 4842 } 4843 4844 mtx_unlock(&softc->ctl_lock); 4845 ctl_isc_announce_lun(lun); 4846 4847 return (0); 4848 } 4849 4850 int 4851 ctl_disable_lun(struct ctl_be_lun *be_lun) 4852 { 4853 struct ctl_softc *softc; 4854 struct ctl_port *port; 4855 struct ctl_lun *lun; 4856 int retval; 4857 4858 lun = (struct ctl_lun *)be_lun->ctl_lun; 4859 softc = lun->ctl_softc; 4860 4861 mtx_lock(&softc->ctl_lock); 4862 mtx_lock(&lun->lun_lock); 4863 if (lun->flags & CTL_LUN_DISABLED) { 4864 mtx_unlock(&lun->lun_lock); 4865 mtx_unlock(&softc->ctl_lock); 4866 return (0); 4867 } 4868 lun->flags |= CTL_LUN_DISABLED; 4869 mtx_unlock(&lun->lun_lock); 4870 4871 STAILQ_FOREACH(port, &softc->port_list, links) { 4872 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || 4873 port->lun_map != NULL || port->lun_disable == NULL) 4874 continue; 4875 4876 /* 4877 * Drop the lock before we call the frontend's disable 4878 * routine, to avoid lock order reversals. 4879 * 4880 * XXX KDM what happens if the frontend list changes while 4881 * we're traversing it? It's unlikely, but should be handled. 4882 */ 4883 mtx_unlock(&softc->ctl_lock); 4884 retval = port->lun_disable(port->targ_lun_arg, lun->lun); 4885 mtx_lock(&softc->ctl_lock); 4886 if (retval != 0) { 4887 printf("%s: FETD %s port %d returned error " 4888 "%d for lun_disable on lun %jd\n", 4889 __func__, port->port_name, port->targ_port, 4890 retval, (intmax_t)lun->lun); 4891 } 4892 } 4893 4894 mtx_unlock(&softc->ctl_lock); 4895 ctl_isc_announce_lun(lun); 4896 4897 return (0); 4898 } 4899 4900 int 4901 ctl_start_lun(struct ctl_be_lun *be_lun) 4902 { 4903 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4904 4905 mtx_lock(&lun->lun_lock); 4906 lun->flags &= ~CTL_LUN_STOPPED; 4907 mtx_unlock(&lun->lun_lock); 4908 return (0); 4909 } 4910 4911 int 4912 ctl_stop_lun(struct ctl_be_lun *be_lun) 4913 { 4914 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4915 4916 mtx_lock(&lun->lun_lock); 4917 lun->flags |= CTL_LUN_STOPPED; 4918 mtx_unlock(&lun->lun_lock); 4919 return (0); 4920 } 4921 4922 int 4923 ctl_lun_no_media(struct ctl_be_lun *be_lun) 4924 { 4925 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4926 4927 mtx_lock(&lun->lun_lock); 4928 lun->flags |= CTL_LUN_NO_MEDIA; 4929 mtx_unlock(&lun->lun_lock); 4930 return (0); 4931 } 4932 4933 int 4934 ctl_lun_has_media(struct ctl_be_lun *be_lun) 4935 { 4936 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4937 union ctl_ha_msg msg; 4938 4939 mtx_lock(&lun->lun_lock); 4940 lun->flags &= ~(CTL_LUN_NO_MEDIA | CTL_LUN_EJECTED); 4941 if (lun->flags & CTL_LUN_REMOVABLE) 4942 ctl_est_ua_all(lun, -1, CTL_UA_MEDIUM_CHANGE); 4943 mtx_unlock(&lun->lun_lock); 4944 if ((lun->flags & CTL_LUN_REMOVABLE) && 4945 lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 4946 bzero(&msg.ua, sizeof(msg.ua)); 4947 msg.hdr.msg_type = CTL_MSG_UA; 4948 msg.hdr.nexus.initid = -1; 4949 msg.hdr.nexus.targ_port = -1; 4950 msg.hdr.nexus.targ_lun = lun->lun; 4951 msg.hdr.nexus.targ_mapped_lun = lun->lun; 4952 msg.ua.ua_all = 1; 4953 msg.ua.ua_set = 1; 4954 msg.ua.ua_type = CTL_UA_MEDIUM_CHANGE; 4955 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), 4956 M_WAITOK); 4957 } 4958 return (0); 4959 } 4960 4961 int 4962 ctl_lun_ejected(struct ctl_be_lun *be_lun) 4963 { 4964 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4965 4966 mtx_lock(&lun->lun_lock); 4967 lun->flags |= CTL_LUN_EJECTED; 4968 mtx_unlock(&lun->lun_lock); 4969 return (0); 4970 } 4971 4972 int 4973 ctl_lun_primary(struct ctl_be_lun *be_lun) 4974 { 4975 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4976 4977 mtx_lock(&lun->lun_lock); 4978 lun->flags |= CTL_LUN_PRIMARY_SC; 4979 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 4980 mtx_unlock(&lun->lun_lock); 4981 ctl_isc_announce_lun(lun); 4982 return (0); 4983 } 4984 4985 int 4986 ctl_lun_secondary(struct ctl_be_lun *be_lun) 4987 { 4988 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4989 4990 mtx_lock(&lun->lun_lock); 4991 lun->flags &= ~CTL_LUN_PRIMARY_SC; 4992 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 4993 mtx_unlock(&lun->lun_lock); 4994 ctl_isc_announce_lun(lun); 4995 return (0); 4996 } 4997 4998 int 4999 ctl_invalidate_lun(struct ctl_be_lun *be_lun) 5000 { 5001 struct ctl_softc *softc; 5002 struct ctl_lun *lun; 5003 5004 lun = (struct ctl_lun *)be_lun->ctl_lun; 5005 softc = lun->ctl_softc; 5006 5007 mtx_lock(&lun->lun_lock); 5008 5009 /* 5010 * The LUN needs to be disabled before it can be marked invalid. 5011 */ 5012 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 5013 mtx_unlock(&lun->lun_lock); 5014 return (-1); 5015 } 5016 /* 5017 * Mark the LUN invalid. 5018 */ 5019 lun->flags |= CTL_LUN_INVALID; 5020 5021 /* 5022 * If there is nothing in the OOA queue, go ahead and free the LUN. 5023 * If we have something in the OOA queue, we'll free it when the 5024 * last I/O completes. 5025 */ 5026 if (TAILQ_EMPTY(&lun->ooa_queue)) { 5027 mtx_unlock(&lun->lun_lock); 5028 ctl_free_lun(lun); 5029 } else 5030 mtx_unlock(&lun->lun_lock); 5031 5032 return (0); 5033 } 5034 5035 void 5036 ctl_lun_capacity_changed(struct ctl_be_lun *be_lun) 5037 { 5038 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 5039 union ctl_ha_msg msg; 5040 5041 mtx_lock(&lun->lun_lock); 5042 ctl_est_ua_all(lun, -1, CTL_UA_CAPACITY_CHANGE); 5043 mtx_unlock(&lun->lun_lock); 5044 if (lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 5045 /* Send msg to other side. */ 5046 bzero(&msg.ua, sizeof(msg.ua)); 5047 msg.hdr.msg_type = CTL_MSG_UA; 5048 msg.hdr.nexus.initid = -1; 5049 msg.hdr.nexus.targ_port = -1; 5050 msg.hdr.nexus.targ_lun = lun->lun; 5051 msg.hdr.nexus.targ_mapped_lun = lun->lun; 5052 msg.ua.ua_all = 1; 5053 msg.ua.ua_set = 1; 5054 msg.ua.ua_type = CTL_UA_CAPACITY_CHANGE; 5055 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), 5056 M_WAITOK); 5057 } 5058 } 5059 5060 /* 5061 * Backend "memory move is complete" callback for requests that never 5062 * make it down to say RAIDCore's configuration code. 5063 */ 5064 int 5065 ctl_config_move_done(union ctl_io *io) 5066 { 5067 int retval; 5068 5069 CTL_DEBUG_PRINT(("ctl_config_move_done\n")); 5070 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, 5071 ("Config I/O type isn't CTL_IO_SCSI (%d)!", io->io_hdr.io_type)); 5072 5073 if ((io->io_hdr.port_status != 0) && 5074 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5075 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5076 ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, 5077 /*retry_count*/ io->io_hdr.port_status); 5078 } else if (io->scsiio.kern_data_resid != 0 && 5079 (io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT && 5080 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5081 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5082 ctl_set_invalid_field_ciu(&io->scsiio); 5083 } 5084 5085 if (ctl_debug & CTL_DEBUG_CDB_DATA) 5086 ctl_data_print(io); 5087 if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) || 5088 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 5089 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) || 5090 ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) { 5091 /* 5092 * XXX KDM just assuming a single pointer here, and not a 5093 * S/G list. If we start using S/G lists for config data, 5094 * we'll need to know how to clean them up here as well. 5095 */ 5096 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5097 free(io->scsiio.kern_data_ptr, M_CTL); 5098 ctl_done(io); 5099 retval = CTL_RETVAL_COMPLETE; 5100 } else { 5101 /* 5102 * XXX KDM now we need to continue data movement. Some 5103 * options: 5104 * - call ctl_scsiio() again? We don't do this for data 5105 * writes, because for those at least we know ahead of 5106 * time where the write will go and how long it is. For 5107 * config writes, though, that information is largely 5108 * contained within the write itself, thus we need to 5109 * parse out the data again. 5110 * 5111 * - Call some other function once the data is in? 5112 */ 5113 5114 /* 5115 * XXX KDM call ctl_scsiio() again for now, and check flag 5116 * bits to see whether we're allocated or not. 5117 */ 5118 retval = ctl_scsiio(&io->scsiio); 5119 } 5120 return (retval); 5121 } 5122 5123 /* 5124 * This gets called by a backend driver when it is done with a 5125 * data_submit method. 5126 */ 5127 void 5128 ctl_data_submit_done(union ctl_io *io) 5129 { 5130 /* 5131 * If the IO_CONT flag is set, we need to call the supplied 5132 * function to continue processing the I/O, instead of completing 5133 * the I/O just yet. 5134 * 5135 * If there is an error, though, we don't want to keep processing. 5136 * Instead, just send status back to the initiator. 5137 */ 5138 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 5139 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 5140 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5141 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5142 io->scsiio.io_cont(io); 5143 return; 5144 } 5145 ctl_done(io); 5146 } 5147 5148 /* 5149 * This gets called by a backend driver when it is done with a 5150 * configuration write. 5151 */ 5152 void 5153 ctl_config_write_done(union ctl_io *io) 5154 { 5155 uint8_t *buf; 5156 5157 /* 5158 * If the IO_CONT flag is set, we need to call the supplied 5159 * function to continue processing the I/O, instead of completing 5160 * the I/O just yet. 5161 * 5162 * If there is an error, though, we don't want to keep processing. 5163 * Instead, just send status back to the initiator. 5164 */ 5165 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 5166 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 5167 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5168 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5169 io->scsiio.io_cont(io); 5170 return; 5171 } 5172 /* 5173 * Since a configuration write can be done for commands that actually 5174 * have data allocated, like write buffer, and commands that have 5175 * no data, like start/stop unit, we need to check here. 5176 */ 5177 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5178 buf = io->scsiio.kern_data_ptr; 5179 else 5180 buf = NULL; 5181 ctl_done(io); 5182 if (buf) 5183 free(buf, M_CTL); 5184 } 5185 5186 void 5187 ctl_config_read_done(union ctl_io *io) 5188 { 5189 uint8_t *buf; 5190 5191 /* 5192 * If there is some error -- we are done, skip data transfer. 5193 */ 5194 if ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0 || 5195 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 5196 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { 5197 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5198 buf = io->scsiio.kern_data_ptr; 5199 else 5200 buf = NULL; 5201 ctl_done(io); 5202 if (buf) 5203 free(buf, M_CTL); 5204 return; 5205 } 5206 5207 /* 5208 * If the IO_CONT flag is set, we need to call the supplied 5209 * function to continue processing the I/O, instead of completing 5210 * the I/O just yet. 5211 */ 5212 if (io->io_hdr.flags & CTL_FLAG_IO_CONT) { 5213 io->scsiio.io_cont(io); 5214 return; 5215 } 5216 5217 ctl_datamove(io); 5218 } 5219 5220 /* 5221 * SCSI release command. 5222 */ 5223 int 5224 ctl_scsi_release(struct ctl_scsiio *ctsio) 5225 { 5226 struct ctl_lun *lun = CTL_LUN(ctsio); 5227 uint32_t residx; 5228 5229 CTL_DEBUG_PRINT(("ctl_scsi_release\n")); 5230 5231 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5232 5233 /* 5234 * XXX KDM right now, we only support LUN reservation. We don't 5235 * support 3rd party reservations, or extent reservations, which 5236 * might actually need the parameter list. If we've gotten this 5237 * far, we've got a LUN reservation. Anything else got kicked out 5238 * above. So, according to SPC, ignore the length. 5239 */ 5240 5241 mtx_lock(&lun->lun_lock); 5242 5243 /* 5244 * According to SPC, it is not an error for an intiator to attempt 5245 * to release a reservation on a LUN that isn't reserved, or that 5246 * is reserved by another initiator. The reservation can only be 5247 * released, though, by the initiator who made it or by one of 5248 * several reset type events. 5249 */ 5250 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx)) 5251 lun->flags &= ~CTL_LUN_RESERVED; 5252 5253 mtx_unlock(&lun->lun_lock); 5254 5255 ctl_set_success(ctsio); 5256 ctl_done((union ctl_io *)ctsio); 5257 return (CTL_RETVAL_COMPLETE); 5258 } 5259 5260 int 5261 ctl_scsi_reserve(struct ctl_scsiio *ctsio) 5262 { 5263 struct ctl_lun *lun = CTL_LUN(ctsio); 5264 uint32_t residx; 5265 5266 CTL_DEBUG_PRINT(("ctl_reserve\n")); 5267 5268 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5269 5270 /* 5271 * XXX KDM right now, we only support LUN reservation. We don't 5272 * support 3rd party reservations, or extent reservations, which 5273 * might actually need the parameter list. If we've gotten this 5274 * far, we've got a LUN reservation. Anything else got kicked out 5275 * above. So, according to SPC, ignore the length. 5276 */ 5277 5278 mtx_lock(&lun->lun_lock); 5279 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx != residx)) { 5280 ctl_set_reservation_conflict(ctsio); 5281 goto bailout; 5282 } 5283 5284 /* SPC-3 exceptions to SPC-2 RESERVE and RELEASE behavior. */ 5285 if (lun->flags & CTL_LUN_PR_RESERVED) { 5286 ctl_set_success(ctsio); 5287 goto bailout; 5288 } 5289 5290 lun->flags |= CTL_LUN_RESERVED; 5291 lun->res_idx = residx; 5292 ctl_set_success(ctsio); 5293 5294 bailout: 5295 mtx_unlock(&lun->lun_lock); 5296 ctl_done((union ctl_io *)ctsio); 5297 return (CTL_RETVAL_COMPLETE); 5298 } 5299 5300 int 5301 ctl_start_stop(struct ctl_scsiio *ctsio) 5302 { 5303 struct ctl_lun *lun = CTL_LUN(ctsio); 5304 struct scsi_start_stop_unit *cdb; 5305 int retval; 5306 5307 CTL_DEBUG_PRINT(("ctl_start_stop\n")); 5308 5309 cdb = (struct scsi_start_stop_unit *)ctsio->cdb; 5310 5311 if ((cdb->how & SSS_PC_MASK) == 0) { 5312 if ((lun->flags & CTL_LUN_PR_RESERVED) && 5313 (cdb->how & SSS_START) == 0) { 5314 uint32_t residx; 5315 5316 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5317 if (ctl_get_prkey(lun, residx) == 0 || 5318 (lun->pr_res_idx != residx && lun->pr_res_type < 4)) { 5319 5320 ctl_set_reservation_conflict(ctsio); 5321 ctl_done((union ctl_io *)ctsio); 5322 return (CTL_RETVAL_COMPLETE); 5323 } 5324 } 5325 5326 if ((cdb->how & SSS_LOEJ) && 5327 (lun->flags & CTL_LUN_REMOVABLE) == 0) { 5328 ctl_set_invalid_field(ctsio, 5329 /*sks_valid*/ 1, 5330 /*command*/ 1, 5331 /*field*/ 4, 5332 /*bit_valid*/ 1, 5333 /*bit*/ 1); 5334 ctl_done((union ctl_io *)ctsio); 5335 return (CTL_RETVAL_COMPLETE); 5336 } 5337 5338 if ((cdb->how & SSS_START) == 0 && (cdb->how & SSS_LOEJ) && 5339 lun->prevent_count > 0) { 5340 /* "Medium removal prevented" */ 5341 ctl_set_sense(ctsio, /*current_error*/ 1, 5342 /*sense_key*/(lun->flags & CTL_LUN_NO_MEDIA) ? 5343 SSD_KEY_NOT_READY : SSD_KEY_ILLEGAL_REQUEST, 5344 /*asc*/ 0x53, /*ascq*/ 0x02, SSD_ELEM_NONE); 5345 ctl_done((union ctl_io *)ctsio); 5346 return (CTL_RETVAL_COMPLETE); 5347 } 5348 } 5349 5350 retval = lun->backend->config_write((union ctl_io *)ctsio); 5351 return (retval); 5352 } 5353 5354 int 5355 ctl_prevent_allow(struct ctl_scsiio *ctsio) 5356 { 5357 struct ctl_lun *lun = CTL_LUN(ctsio); 5358 struct scsi_prevent *cdb; 5359 int retval; 5360 uint32_t initidx; 5361 5362 CTL_DEBUG_PRINT(("ctl_prevent_allow\n")); 5363 5364 cdb = (struct scsi_prevent *)ctsio->cdb; 5365 5366 if ((lun->flags & CTL_LUN_REMOVABLE) == 0 || lun->prevent == NULL) { 5367 ctl_set_invalid_opcode(ctsio); 5368 ctl_done((union ctl_io *)ctsio); 5369 return (CTL_RETVAL_COMPLETE); 5370 } 5371 5372 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5373 mtx_lock(&lun->lun_lock); 5374 if ((cdb->how & PR_PREVENT) && 5375 ctl_is_set(lun->prevent, initidx) == 0) { 5376 ctl_set_mask(lun->prevent, initidx); 5377 lun->prevent_count++; 5378 } else if ((cdb->how & PR_PREVENT) == 0 && 5379 ctl_is_set(lun->prevent, initidx)) { 5380 ctl_clear_mask(lun->prevent, initidx); 5381 lun->prevent_count--; 5382 } 5383 mtx_unlock(&lun->lun_lock); 5384 retval = lun->backend->config_write((union ctl_io *)ctsio); 5385 return (retval); 5386 } 5387 5388 /* 5389 * We support the SYNCHRONIZE CACHE command (10 and 16 byte versions), but 5390 * we don't really do anything with the LBA and length fields if the user 5391 * passes them in. Instead we'll just flush out the cache for the entire 5392 * LUN. 5393 */ 5394 int 5395 ctl_sync_cache(struct ctl_scsiio *ctsio) 5396 { 5397 struct ctl_lun *lun = CTL_LUN(ctsio); 5398 struct ctl_lba_len_flags *lbalen; 5399 uint64_t starting_lba; 5400 uint32_t block_count; 5401 int retval; 5402 uint8_t byte2; 5403 5404 CTL_DEBUG_PRINT(("ctl_sync_cache\n")); 5405 5406 retval = 0; 5407 5408 switch (ctsio->cdb[0]) { 5409 case SYNCHRONIZE_CACHE: { 5410 struct scsi_sync_cache *cdb; 5411 cdb = (struct scsi_sync_cache *)ctsio->cdb; 5412 5413 starting_lba = scsi_4btoul(cdb->begin_lba); 5414 block_count = scsi_2btoul(cdb->lb_count); 5415 byte2 = cdb->byte2; 5416 break; 5417 } 5418 case SYNCHRONIZE_CACHE_16: { 5419 struct scsi_sync_cache_16 *cdb; 5420 cdb = (struct scsi_sync_cache_16 *)ctsio->cdb; 5421 5422 starting_lba = scsi_8btou64(cdb->begin_lba); 5423 block_count = scsi_4btoul(cdb->lb_count); 5424 byte2 = cdb->byte2; 5425 break; 5426 } 5427 default: 5428 ctl_set_invalid_opcode(ctsio); 5429 ctl_done((union ctl_io *)ctsio); 5430 goto bailout; 5431 break; /* NOTREACHED */ 5432 } 5433 5434 /* 5435 * We check the LBA and length, but don't do anything with them. 5436 * A SYNCHRONIZE CACHE will cause the entire cache for this lun to 5437 * get flushed. This check will just help satisfy anyone who wants 5438 * to see an error for an out of range LBA. 5439 */ 5440 if ((starting_lba + block_count) > (lun->be_lun->maxlba + 1)) { 5441 ctl_set_lba_out_of_range(ctsio, 5442 MAX(starting_lba, lun->be_lun->maxlba + 1)); 5443 ctl_done((union ctl_io *)ctsio); 5444 goto bailout; 5445 } 5446 5447 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5448 lbalen->lba = starting_lba; 5449 lbalen->len = block_count; 5450 lbalen->flags = byte2; 5451 retval = lun->backend->config_write((union ctl_io *)ctsio); 5452 5453 bailout: 5454 return (retval); 5455 } 5456 5457 int 5458 ctl_format(struct ctl_scsiio *ctsio) 5459 { 5460 struct scsi_format *cdb; 5461 int length, defect_list_len; 5462 5463 CTL_DEBUG_PRINT(("ctl_format\n")); 5464 5465 cdb = (struct scsi_format *)ctsio->cdb; 5466 5467 length = 0; 5468 if (cdb->byte2 & SF_FMTDATA) { 5469 if (cdb->byte2 & SF_LONGLIST) 5470 length = sizeof(struct scsi_format_header_long); 5471 else 5472 length = sizeof(struct scsi_format_header_short); 5473 } 5474 5475 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5476 && (length > 0)) { 5477 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5478 ctsio->kern_data_len = length; 5479 ctsio->kern_total_len = length; 5480 ctsio->kern_rel_offset = 0; 5481 ctsio->kern_sg_entries = 0; 5482 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5483 ctsio->be_move_done = ctl_config_move_done; 5484 ctl_datamove((union ctl_io *)ctsio); 5485 5486 return (CTL_RETVAL_COMPLETE); 5487 } 5488 5489 defect_list_len = 0; 5490 5491 if (cdb->byte2 & SF_FMTDATA) { 5492 if (cdb->byte2 & SF_LONGLIST) { 5493 struct scsi_format_header_long *header; 5494 5495 header = (struct scsi_format_header_long *) 5496 ctsio->kern_data_ptr; 5497 5498 defect_list_len = scsi_4btoul(header->defect_list_len); 5499 if (defect_list_len != 0) { 5500 ctl_set_invalid_field(ctsio, 5501 /*sks_valid*/ 1, 5502 /*command*/ 0, 5503 /*field*/ 2, 5504 /*bit_valid*/ 0, 5505 /*bit*/ 0); 5506 goto bailout; 5507 } 5508 } else { 5509 struct scsi_format_header_short *header; 5510 5511 header = (struct scsi_format_header_short *) 5512 ctsio->kern_data_ptr; 5513 5514 defect_list_len = scsi_2btoul(header->defect_list_len); 5515 if (defect_list_len != 0) { 5516 ctl_set_invalid_field(ctsio, 5517 /*sks_valid*/ 1, 5518 /*command*/ 0, 5519 /*field*/ 2, 5520 /*bit_valid*/ 0, 5521 /*bit*/ 0); 5522 goto bailout; 5523 } 5524 } 5525 } 5526 5527 ctl_set_success(ctsio); 5528 bailout: 5529 5530 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5531 free(ctsio->kern_data_ptr, M_CTL); 5532 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5533 } 5534 5535 ctl_done((union ctl_io *)ctsio); 5536 return (CTL_RETVAL_COMPLETE); 5537 } 5538 5539 int 5540 ctl_read_buffer(struct ctl_scsiio *ctsio) 5541 { 5542 struct ctl_lun *lun = CTL_LUN(ctsio); 5543 uint64_t buffer_offset; 5544 uint32_t len; 5545 uint8_t byte2; 5546 static uint8_t descr[4]; 5547 static uint8_t echo_descr[4] = { 0 }; 5548 5549 CTL_DEBUG_PRINT(("ctl_read_buffer\n")); 5550 5551 switch (ctsio->cdb[0]) { 5552 case READ_BUFFER: { 5553 struct scsi_read_buffer *cdb; 5554 5555 cdb = (struct scsi_read_buffer *)ctsio->cdb; 5556 buffer_offset = scsi_3btoul(cdb->offset); 5557 len = scsi_3btoul(cdb->length); 5558 byte2 = cdb->byte2; 5559 break; 5560 } 5561 case READ_BUFFER_16: { 5562 struct scsi_read_buffer_16 *cdb; 5563 5564 cdb = (struct scsi_read_buffer_16 *)ctsio->cdb; 5565 buffer_offset = scsi_8btou64(cdb->offset); 5566 len = scsi_4btoul(cdb->length); 5567 byte2 = cdb->byte2; 5568 break; 5569 } 5570 default: /* This shouldn't happen. */ 5571 ctl_set_invalid_opcode(ctsio); 5572 ctl_done((union ctl_io *)ctsio); 5573 return (CTL_RETVAL_COMPLETE); 5574 } 5575 5576 if (buffer_offset > CTL_WRITE_BUFFER_SIZE || 5577 buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5578 ctl_set_invalid_field(ctsio, 5579 /*sks_valid*/ 1, 5580 /*command*/ 1, 5581 /*field*/ 6, 5582 /*bit_valid*/ 0, 5583 /*bit*/ 0); 5584 ctl_done((union ctl_io *)ctsio); 5585 return (CTL_RETVAL_COMPLETE); 5586 } 5587 5588 if ((byte2 & RWB_MODE) == RWB_MODE_DESCR) { 5589 descr[0] = 0; 5590 scsi_ulto3b(CTL_WRITE_BUFFER_SIZE, &descr[1]); 5591 ctsio->kern_data_ptr = descr; 5592 len = min(len, sizeof(descr)); 5593 } else if ((byte2 & RWB_MODE) == RWB_MODE_ECHO_DESCR) { 5594 ctsio->kern_data_ptr = echo_descr; 5595 len = min(len, sizeof(echo_descr)); 5596 } else { 5597 if (lun->write_buffer == NULL) { 5598 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5599 M_CTL, M_WAITOK); 5600 } 5601 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5602 } 5603 ctsio->kern_data_len = len; 5604 ctsio->kern_total_len = len; 5605 ctsio->kern_rel_offset = 0; 5606 ctsio->kern_sg_entries = 0; 5607 ctl_set_success(ctsio); 5608 ctsio->be_move_done = ctl_config_move_done; 5609 ctl_datamove((union ctl_io *)ctsio); 5610 return (CTL_RETVAL_COMPLETE); 5611 } 5612 5613 int 5614 ctl_write_buffer(struct ctl_scsiio *ctsio) 5615 { 5616 struct ctl_lun *lun = CTL_LUN(ctsio); 5617 struct scsi_write_buffer *cdb; 5618 int buffer_offset, len; 5619 5620 CTL_DEBUG_PRINT(("ctl_write_buffer\n")); 5621 5622 cdb = (struct scsi_write_buffer *)ctsio->cdb; 5623 5624 len = scsi_3btoul(cdb->length); 5625 buffer_offset = scsi_3btoul(cdb->offset); 5626 5627 if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5628 ctl_set_invalid_field(ctsio, 5629 /*sks_valid*/ 1, 5630 /*command*/ 1, 5631 /*field*/ 6, 5632 /*bit_valid*/ 0, 5633 /*bit*/ 0); 5634 ctl_done((union ctl_io *)ctsio); 5635 return (CTL_RETVAL_COMPLETE); 5636 } 5637 5638 /* 5639 * If we've got a kernel request that hasn't been malloced yet, 5640 * malloc it and tell the caller the data buffer is here. 5641 */ 5642 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5643 if (lun->write_buffer == NULL) { 5644 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5645 M_CTL, M_WAITOK); 5646 } 5647 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5648 ctsio->kern_data_len = len; 5649 ctsio->kern_total_len = len; 5650 ctsio->kern_rel_offset = 0; 5651 ctsio->kern_sg_entries = 0; 5652 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5653 ctsio->be_move_done = ctl_config_move_done; 5654 ctl_datamove((union ctl_io *)ctsio); 5655 5656 return (CTL_RETVAL_COMPLETE); 5657 } 5658 5659 ctl_set_success(ctsio); 5660 ctl_done((union ctl_io *)ctsio); 5661 return (CTL_RETVAL_COMPLETE); 5662 } 5663 5664 int 5665 ctl_write_same(struct ctl_scsiio *ctsio) 5666 { 5667 struct ctl_lun *lun = CTL_LUN(ctsio); 5668 struct ctl_lba_len_flags *lbalen; 5669 uint64_t lba; 5670 uint32_t num_blocks; 5671 int len, retval; 5672 uint8_t byte2; 5673 5674 CTL_DEBUG_PRINT(("ctl_write_same\n")); 5675 5676 switch (ctsio->cdb[0]) { 5677 case WRITE_SAME_10: { 5678 struct scsi_write_same_10 *cdb; 5679 5680 cdb = (struct scsi_write_same_10 *)ctsio->cdb; 5681 5682 lba = scsi_4btoul(cdb->addr); 5683 num_blocks = scsi_2btoul(cdb->length); 5684 byte2 = cdb->byte2; 5685 break; 5686 } 5687 case WRITE_SAME_16: { 5688 struct scsi_write_same_16 *cdb; 5689 5690 cdb = (struct scsi_write_same_16 *)ctsio->cdb; 5691 5692 lba = scsi_8btou64(cdb->addr); 5693 num_blocks = scsi_4btoul(cdb->length); 5694 byte2 = cdb->byte2; 5695 break; 5696 } 5697 default: 5698 /* 5699 * We got a command we don't support. This shouldn't 5700 * happen, commands should be filtered out above us. 5701 */ 5702 ctl_set_invalid_opcode(ctsio); 5703 ctl_done((union ctl_io *)ctsio); 5704 5705 return (CTL_RETVAL_COMPLETE); 5706 break; /* NOTREACHED */ 5707 } 5708 5709 /* ANCHOR flag can be used only together with UNMAP */ 5710 if ((byte2 & SWS_UNMAP) == 0 && (byte2 & SWS_ANCHOR) != 0) { 5711 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 5712 /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); 5713 ctl_done((union ctl_io *)ctsio); 5714 return (CTL_RETVAL_COMPLETE); 5715 } 5716 5717 /* 5718 * The first check is to make sure we're in bounds, the second 5719 * check is to catch wrap-around problems. If the lba + num blocks 5720 * is less than the lba, then we've wrapped around and the block 5721 * range is invalid anyway. 5722 */ 5723 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5724 || ((lba + num_blocks) < lba)) { 5725 ctl_set_lba_out_of_range(ctsio, 5726 MAX(lba, lun->be_lun->maxlba + 1)); 5727 ctl_done((union ctl_io *)ctsio); 5728 return (CTL_RETVAL_COMPLETE); 5729 } 5730 5731 /* Zero number of blocks means "to the last logical block" */ 5732 if (num_blocks == 0) { 5733 if ((lun->be_lun->maxlba + 1) - lba > UINT32_MAX) { 5734 ctl_set_invalid_field(ctsio, 5735 /*sks_valid*/ 0, 5736 /*command*/ 1, 5737 /*field*/ 0, 5738 /*bit_valid*/ 0, 5739 /*bit*/ 0); 5740 ctl_done((union ctl_io *)ctsio); 5741 return (CTL_RETVAL_COMPLETE); 5742 } 5743 num_blocks = (lun->be_lun->maxlba + 1) - lba; 5744 } 5745 5746 len = lun->be_lun->blocksize; 5747 5748 /* 5749 * If we've got a kernel request that hasn't been malloced yet, 5750 * malloc it and tell the caller the data buffer is here. 5751 */ 5752 if ((byte2 & SWS_NDOB) == 0 && 5753 (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5754 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 5755 ctsio->kern_data_len = len; 5756 ctsio->kern_total_len = len; 5757 ctsio->kern_rel_offset = 0; 5758 ctsio->kern_sg_entries = 0; 5759 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5760 ctsio->be_move_done = ctl_config_move_done; 5761 ctl_datamove((union ctl_io *)ctsio); 5762 5763 return (CTL_RETVAL_COMPLETE); 5764 } 5765 5766 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5767 lbalen->lba = lba; 5768 lbalen->len = num_blocks; 5769 lbalen->flags = byte2; 5770 retval = lun->backend->config_write((union ctl_io *)ctsio); 5771 5772 return (retval); 5773 } 5774 5775 int 5776 ctl_unmap(struct ctl_scsiio *ctsio) 5777 { 5778 struct ctl_lun *lun = CTL_LUN(ctsio); 5779 struct scsi_unmap *cdb; 5780 struct ctl_ptr_len_flags *ptrlen; 5781 struct scsi_unmap_header *hdr; 5782 struct scsi_unmap_desc *buf, *end, *endnz, *range; 5783 uint64_t lba; 5784 uint32_t num_blocks; 5785 int len, retval; 5786 uint8_t byte2; 5787 5788 CTL_DEBUG_PRINT(("ctl_unmap\n")); 5789 5790 cdb = (struct scsi_unmap *)ctsio->cdb; 5791 len = scsi_2btoul(cdb->length); 5792 byte2 = cdb->byte2; 5793 5794 /* 5795 * If we've got a kernel request that hasn't been malloced yet, 5796 * malloc it and tell the caller the data buffer is here. 5797 */ 5798 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5799 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 5800 ctsio->kern_data_len = len; 5801 ctsio->kern_total_len = len; 5802 ctsio->kern_rel_offset = 0; 5803 ctsio->kern_sg_entries = 0; 5804 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5805 ctsio->be_move_done = ctl_config_move_done; 5806 ctl_datamove((union ctl_io *)ctsio); 5807 5808 return (CTL_RETVAL_COMPLETE); 5809 } 5810 5811 len = ctsio->kern_total_len - ctsio->kern_data_resid; 5812 hdr = (struct scsi_unmap_header *)ctsio->kern_data_ptr; 5813 if (len < sizeof (*hdr) || 5814 len < (scsi_2btoul(hdr->length) + sizeof(hdr->length)) || 5815 len < (scsi_2btoul(hdr->desc_length) + sizeof (*hdr)) || 5816 scsi_2btoul(hdr->desc_length) % sizeof(*buf) != 0) { 5817 ctl_set_invalid_field(ctsio, 5818 /*sks_valid*/ 0, 5819 /*command*/ 0, 5820 /*field*/ 0, 5821 /*bit_valid*/ 0, 5822 /*bit*/ 0); 5823 goto done; 5824 } 5825 len = scsi_2btoul(hdr->desc_length); 5826 buf = (struct scsi_unmap_desc *)(hdr + 1); 5827 end = buf + len / sizeof(*buf); 5828 5829 endnz = buf; 5830 for (range = buf; range < end; range++) { 5831 lba = scsi_8btou64(range->lba); 5832 num_blocks = scsi_4btoul(range->length); 5833 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5834 || ((lba + num_blocks) < lba)) { 5835 ctl_set_lba_out_of_range(ctsio, 5836 MAX(lba, lun->be_lun->maxlba + 1)); 5837 ctl_done((union ctl_io *)ctsio); 5838 return (CTL_RETVAL_COMPLETE); 5839 } 5840 if (num_blocks != 0) 5841 endnz = range + 1; 5842 } 5843 5844 /* 5845 * Block backend can not handle zero last range. 5846 * Filter it out and return if there is nothing left. 5847 */ 5848 len = (uint8_t *)endnz - (uint8_t *)buf; 5849 if (len == 0) { 5850 ctl_set_success(ctsio); 5851 goto done; 5852 } 5853 5854 mtx_lock(&lun->lun_lock); 5855 ptrlen = (struct ctl_ptr_len_flags *) 5856 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5857 ptrlen->ptr = (void *)buf; 5858 ptrlen->len = len; 5859 ptrlen->flags = byte2; 5860 ctl_check_blocked(lun); 5861 mtx_unlock(&lun->lun_lock); 5862 5863 retval = lun->backend->config_write((union ctl_io *)ctsio); 5864 return (retval); 5865 5866 done: 5867 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5868 free(ctsio->kern_data_ptr, M_CTL); 5869 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5870 } 5871 ctl_done((union ctl_io *)ctsio); 5872 return (CTL_RETVAL_COMPLETE); 5873 } 5874 5875 int 5876 ctl_default_page_handler(struct ctl_scsiio *ctsio, 5877 struct ctl_page_index *page_index, uint8_t *page_ptr) 5878 { 5879 struct ctl_lun *lun = CTL_LUN(ctsio); 5880 uint8_t *current_cp; 5881 int set_ua; 5882 uint32_t initidx; 5883 5884 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5885 set_ua = 0; 5886 5887 current_cp = (page_index->page_data + (page_index->page_len * 5888 CTL_PAGE_CURRENT)); 5889 5890 mtx_lock(&lun->lun_lock); 5891 if (memcmp(current_cp, page_ptr, page_index->page_len)) { 5892 memcpy(current_cp, page_ptr, page_index->page_len); 5893 set_ua = 1; 5894 } 5895 if (set_ua != 0) 5896 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 5897 mtx_unlock(&lun->lun_lock); 5898 if (set_ua) { 5899 ctl_isc_announce_mode(lun, 5900 ctl_get_initindex(&ctsio->io_hdr.nexus), 5901 page_index->page_code, page_index->subpage); 5902 } 5903 return (CTL_RETVAL_COMPLETE); 5904 } 5905 5906 static void 5907 ctl_ie_timer(void *arg) 5908 { 5909 struct ctl_lun *lun = arg; 5910 uint64_t t; 5911 5912 if (lun->ie_asc == 0) 5913 return; 5914 5915 if (lun->MODE_IE.mrie == SIEP_MRIE_UA) 5916 ctl_est_ua_all(lun, -1, CTL_UA_IE); 5917 else 5918 lun->ie_reported = 0; 5919 5920 if (lun->ie_reportcnt < scsi_4btoul(lun->MODE_IE.report_count)) { 5921 lun->ie_reportcnt++; 5922 t = scsi_4btoul(lun->MODE_IE.interval_timer); 5923 if (t == 0 || t == UINT32_MAX) 5924 t = 3000; /* 5 min */ 5925 callout_schedule(&lun->ie_callout, t * hz / 10); 5926 } 5927 } 5928 5929 int 5930 ctl_ie_page_handler(struct ctl_scsiio *ctsio, 5931 struct ctl_page_index *page_index, uint8_t *page_ptr) 5932 { 5933 struct ctl_lun *lun = CTL_LUN(ctsio); 5934 struct scsi_info_exceptions_page *pg; 5935 uint64_t t; 5936 5937 (void)ctl_default_page_handler(ctsio, page_index, page_ptr); 5938 5939 pg = (struct scsi_info_exceptions_page *)page_ptr; 5940 mtx_lock(&lun->lun_lock); 5941 if (pg->info_flags & SIEP_FLAGS_TEST) { 5942 lun->ie_asc = 0x5d; 5943 lun->ie_ascq = 0xff; 5944 if (pg->mrie == SIEP_MRIE_UA) { 5945 ctl_est_ua_all(lun, -1, CTL_UA_IE); 5946 lun->ie_reported = 1; 5947 } else { 5948 ctl_clr_ua_all(lun, -1, CTL_UA_IE); 5949 lun->ie_reported = -1; 5950 } 5951 lun->ie_reportcnt = 1; 5952 if (lun->ie_reportcnt < scsi_4btoul(pg->report_count)) { 5953 lun->ie_reportcnt++; 5954 t = scsi_4btoul(pg->interval_timer); 5955 if (t == 0 || t == UINT32_MAX) 5956 t = 3000; /* 5 min */ 5957 callout_reset(&lun->ie_callout, t * hz / 10, 5958 ctl_ie_timer, lun); 5959 } 5960 } else { 5961 lun->ie_asc = 0; 5962 lun->ie_ascq = 0; 5963 lun->ie_reported = 1; 5964 ctl_clr_ua_all(lun, -1, CTL_UA_IE); 5965 lun->ie_reportcnt = UINT32_MAX; 5966 callout_stop(&lun->ie_callout); 5967 } 5968 mtx_unlock(&lun->lun_lock); 5969 return (CTL_RETVAL_COMPLETE); 5970 } 5971 5972 static int 5973 ctl_do_mode_select(union ctl_io *io) 5974 { 5975 struct ctl_lun *lun = CTL_LUN(io); 5976 struct scsi_mode_page_header *page_header; 5977 struct ctl_page_index *page_index; 5978 struct ctl_scsiio *ctsio; 5979 int page_len, page_len_offset, page_len_size; 5980 union ctl_modepage_info *modepage_info; 5981 uint16_t *len_left, *len_used; 5982 int retval, i; 5983 5984 ctsio = &io->scsiio; 5985 page_index = NULL; 5986 page_len = 0; 5987 5988 modepage_info = (union ctl_modepage_info *) 5989 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 5990 len_left = &modepage_info->header.len_left; 5991 len_used = &modepage_info->header.len_used; 5992 5993 do_next_page: 5994 5995 page_header = (struct scsi_mode_page_header *) 5996 (ctsio->kern_data_ptr + *len_used); 5997 5998 if (*len_left == 0) { 5999 free(ctsio->kern_data_ptr, M_CTL); 6000 ctl_set_success(ctsio); 6001 ctl_done((union ctl_io *)ctsio); 6002 return (CTL_RETVAL_COMPLETE); 6003 } else if (*len_left < sizeof(struct scsi_mode_page_header)) { 6004 6005 free(ctsio->kern_data_ptr, M_CTL); 6006 ctl_set_param_len_error(ctsio); 6007 ctl_done((union ctl_io *)ctsio); 6008 return (CTL_RETVAL_COMPLETE); 6009 6010 } else if ((page_header->page_code & SMPH_SPF) 6011 && (*len_left < sizeof(struct scsi_mode_page_header_sp))) { 6012 6013 free(ctsio->kern_data_ptr, M_CTL); 6014 ctl_set_param_len_error(ctsio); 6015 ctl_done((union ctl_io *)ctsio); 6016 return (CTL_RETVAL_COMPLETE); 6017 } 6018 6019 6020 /* 6021 * XXX KDM should we do something with the block descriptor? 6022 */ 6023 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6024 page_index = &lun->mode_pages.index[i]; 6025 if (lun->be_lun->lun_type == T_DIRECT && 6026 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6027 continue; 6028 if (lun->be_lun->lun_type == T_PROCESSOR && 6029 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6030 continue; 6031 if (lun->be_lun->lun_type == T_CDROM && 6032 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6033 continue; 6034 6035 if ((page_index->page_code & SMPH_PC_MASK) != 6036 (page_header->page_code & SMPH_PC_MASK)) 6037 continue; 6038 6039 /* 6040 * If neither page has a subpage code, then we've got a 6041 * match. 6042 */ 6043 if (((page_index->page_code & SMPH_SPF) == 0) 6044 && ((page_header->page_code & SMPH_SPF) == 0)) { 6045 page_len = page_header->page_length; 6046 break; 6047 } 6048 6049 /* 6050 * If both pages have subpages, then the subpage numbers 6051 * have to match. 6052 */ 6053 if ((page_index->page_code & SMPH_SPF) 6054 && (page_header->page_code & SMPH_SPF)) { 6055 struct scsi_mode_page_header_sp *sph; 6056 6057 sph = (struct scsi_mode_page_header_sp *)page_header; 6058 if (page_index->subpage == sph->subpage) { 6059 page_len = scsi_2btoul(sph->page_length); 6060 break; 6061 } 6062 } 6063 } 6064 6065 /* 6066 * If we couldn't find the page, or if we don't have a mode select 6067 * handler for it, send back an error to the user. 6068 */ 6069 if ((i >= CTL_NUM_MODE_PAGES) 6070 || (page_index->select_handler == NULL)) { 6071 ctl_set_invalid_field(ctsio, 6072 /*sks_valid*/ 1, 6073 /*command*/ 0, 6074 /*field*/ *len_used, 6075 /*bit_valid*/ 0, 6076 /*bit*/ 0); 6077 free(ctsio->kern_data_ptr, M_CTL); 6078 ctl_done((union ctl_io *)ctsio); 6079 return (CTL_RETVAL_COMPLETE); 6080 } 6081 6082 if (page_index->page_code & SMPH_SPF) { 6083 page_len_offset = 2; 6084 page_len_size = 2; 6085 } else { 6086 page_len_size = 1; 6087 page_len_offset = 1; 6088 } 6089 6090 /* 6091 * If the length the initiator gives us isn't the one we specify in 6092 * the mode page header, or if they didn't specify enough data in 6093 * the CDB to avoid truncating this page, kick out the request. 6094 */ 6095 if (page_len != page_index->page_len - page_len_offset - page_len_size) { 6096 ctl_set_invalid_field(ctsio, 6097 /*sks_valid*/ 1, 6098 /*command*/ 0, 6099 /*field*/ *len_used + page_len_offset, 6100 /*bit_valid*/ 0, 6101 /*bit*/ 0); 6102 free(ctsio->kern_data_ptr, M_CTL); 6103 ctl_done((union ctl_io *)ctsio); 6104 return (CTL_RETVAL_COMPLETE); 6105 } 6106 if (*len_left < page_index->page_len) { 6107 free(ctsio->kern_data_ptr, M_CTL); 6108 ctl_set_param_len_error(ctsio); 6109 ctl_done((union ctl_io *)ctsio); 6110 return (CTL_RETVAL_COMPLETE); 6111 } 6112 6113 /* 6114 * Run through the mode page, checking to make sure that the bits 6115 * the user changed are actually legal for him to change. 6116 */ 6117 for (i = 0; i < page_index->page_len; i++) { 6118 uint8_t *user_byte, *change_mask, *current_byte; 6119 int bad_bit; 6120 int j; 6121 6122 user_byte = (uint8_t *)page_header + i; 6123 change_mask = page_index->page_data + 6124 (page_index->page_len * CTL_PAGE_CHANGEABLE) + i; 6125 current_byte = page_index->page_data + 6126 (page_index->page_len * CTL_PAGE_CURRENT) + i; 6127 6128 /* 6129 * Check to see whether the user set any bits in this byte 6130 * that he is not allowed to set. 6131 */ 6132 if ((*user_byte & ~(*change_mask)) == 6133 (*current_byte & ~(*change_mask))) 6134 continue; 6135 6136 /* 6137 * Go through bit by bit to determine which one is illegal. 6138 */ 6139 bad_bit = 0; 6140 for (j = 7; j >= 0; j--) { 6141 if ((((1 << i) & ~(*change_mask)) & *user_byte) != 6142 (((1 << i) & ~(*change_mask)) & *current_byte)) { 6143 bad_bit = i; 6144 break; 6145 } 6146 } 6147 ctl_set_invalid_field(ctsio, 6148 /*sks_valid*/ 1, 6149 /*command*/ 0, 6150 /*field*/ *len_used + i, 6151 /*bit_valid*/ 1, 6152 /*bit*/ bad_bit); 6153 free(ctsio->kern_data_ptr, M_CTL); 6154 ctl_done((union ctl_io *)ctsio); 6155 return (CTL_RETVAL_COMPLETE); 6156 } 6157 6158 /* 6159 * Decrement these before we call the page handler, since we may 6160 * end up getting called back one way or another before the handler 6161 * returns to this context. 6162 */ 6163 *len_left -= page_index->page_len; 6164 *len_used += page_index->page_len; 6165 6166 retval = page_index->select_handler(ctsio, page_index, 6167 (uint8_t *)page_header); 6168 6169 /* 6170 * If the page handler returns CTL_RETVAL_QUEUED, then we need to 6171 * wait until this queued command completes to finish processing 6172 * the mode page. If it returns anything other than 6173 * CTL_RETVAL_COMPLETE (e.g. CTL_RETVAL_ERROR), then it should have 6174 * already set the sense information, freed the data pointer, and 6175 * completed the io for us. 6176 */ 6177 if (retval != CTL_RETVAL_COMPLETE) 6178 goto bailout_no_done; 6179 6180 /* 6181 * If the initiator sent us more than one page, parse the next one. 6182 */ 6183 if (*len_left > 0) 6184 goto do_next_page; 6185 6186 ctl_set_success(ctsio); 6187 free(ctsio->kern_data_ptr, M_CTL); 6188 ctl_done((union ctl_io *)ctsio); 6189 6190 bailout_no_done: 6191 6192 return (CTL_RETVAL_COMPLETE); 6193 6194 } 6195 6196 int 6197 ctl_mode_select(struct ctl_scsiio *ctsio) 6198 { 6199 struct ctl_lun *lun = CTL_LUN(ctsio); 6200 union ctl_modepage_info *modepage_info; 6201 int bd_len, i, header_size, param_len, pf, rtd, sp; 6202 uint32_t initidx; 6203 6204 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 6205 switch (ctsio->cdb[0]) { 6206 case MODE_SELECT_6: { 6207 struct scsi_mode_select_6 *cdb; 6208 6209 cdb = (struct scsi_mode_select_6 *)ctsio->cdb; 6210 6211 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 6212 rtd = (cdb->byte2 & SMS_RTD) ? 1 : 0; 6213 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 6214 param_len = cdb->length; 6215 header_size = sizeof(struct scsi_mode_header_6); 6216 break; 6217 } 6218 case MODE_SELECT_10: { 6219 struct scsi_mode_select_10 *cdb; 6220 6221 cdb = (struct scsi_mode_select_10 *)ctsio->cdb; 6222 6223 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 6224 rtd = (cdb->byte2 & SMS_RTD) ? 1 : 0; 6225 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 6226 param_len = scsi_2btoul(cdb->length); 6227 header_size = sizeof(struct scsi_mode_header_10); 6228 break; 6229 } 6230 default: 6231 ctl_set_invalid_opcode(ctsio); 6232 ctl_done((union ctl_io *)ctsio); 6233 return (CTL_RETVAL_COMPLETE); 6234 } 6235 6236 if (rtd) { 6237 if (param_len != 0) { 6238 ctl_set_invalid_field(ctsio, /*sks_valid*/ 0, 6239 /*command*/ 1, /*field*/ 0, 6240 /*bit_valid*/ 0, /*bit*/ 0); 6241 ctl_done((union ctl_io *)ctsio); 6242 return (CTL_RETVAL_COMPLETE); 6243 } 6244 6245 /* Revert to defaults. */ 6246 ctl_init_page_index(lun); 6247 mtx_lock(&lun->lun_lock); 6248 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 6249 mtx_unlock(&lun->lun_lock); 6250 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6251 ctl_isc_announce_mode(lun, -1, 6252 lun->mode_pages.index[i].page_code & SMPH_PC_MASK, 6253 lun->mode_pages.index[i].subpage); 6254 } 6255 ctl_set_success(ctsio); 6256 ctl_done((union ctl_io *)ctsio); 6257 return (CTL_RETVAL_COMPLETE); 6258 } 6259 6260 /* 6261 * From SPC-3: 6262 * "A parameter list length of zero indicates that the Data-Out Buffer 6263 * shall be empty. This condition shall not be considered as an error." 6264 */ 6265 if (param_len == 0) { 6266 ctl_set_success(ctsio); 6267 ctl_done((union ctl_io *)ctsio); 6268 return (CTL_RETVAL_COMPLETE); 6269 } 6270 6271 /* 6272 * Since we'll hit this the first time through, prior to 6273 * allocation, we don't need to free a data buffer here. 6274 */ 6275 if (param_len < header_size) { 6276 ctl_set_param_len_error(ctsio); 6277 ctl_done((union ctl_io *)ctsio); 6278 return (CTL_RETVAL_COMPLETE); 6279 } 6280 6281 /* 6282 * Allocate the data buffer and grab the user's data. In theory, 6283 * we shouldn't have to sanity check the parameter list length here 6284 * because the maximum size is 64K. We should be able to malloc 6285 * that much without too many problems. 6286 */ 6287 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 6288 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 6289 ctsio->kern_data_len = param_len; 6290 ctsio->kern_total_len = param_len; 6291 ctsio->kern_rel_offset = 0; 6292 ctsio->kern_sg_entries = 0; 6293 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6294 ctsio->be_move_done = ctl_config_move_done; 6295 ctl_datamove((union ctl_io *)ctsio); 6296 6297 return (CTL_RETVAL_COMPLETE); 6298 } 6299 6300 switch (ctsio->cdb[0]) { 6301 case MODE_SELECT_6: { 6302 struct scsi_mode_header_6 *mh6; 6303 6304 mh6 = (struct scsi_mode_header_6 *)ctsio->kern_data_ptr; 6305 bd_len = mh6->blk_desc_len; 6306 break; 6307 } 6308 case MODE_SELECT_10: { 6309 struct scsi_mode_header_10 *mh10; 6310 6311 mh10 = (struct scsi_mode_header_10 *)ctsio->kern_data_ptr; 6312 bd_len = scsi_2btoul(mh10->blk_desc_len); 6313 break; 6314 } 6315 default: 6316 panic("%s: Invalid CDB type %#x", __func__, ctsio->cdb[0]); 6317 } 6318 6319 if (param_len < (header_size + bd_len)) { 6320 free(ctsio->kern_data_ptr, M_CTL); 6321 ctl_set_param_len_error(ctsio); 6322 ctl_done((union ctl_io *)ctsio); 6323 return (CTL_RETVAL_COMPLETE); 6324 } 6325 6326 /* 6327 * Set the IO_CONT flag, so that if this I/O gets passed to 6328 * ctl_config_write_done(), it'll get passed back to 6329 * ctl_do_mode_select() for further processing, or completion if 6330 * we're all done. 6331 */ 6332 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 6333 ctsio->io_cont = ctl_do_mode_select; 6334 6335 modepage_info = (union ctl_modepage_info *) 6336 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6337 memset(modepage_info, 0, sizeof(*modepage_info)); 6338 modepage_info->header.len_left = param_len - header_size - bd_len; 6339 modepage_info->header.len_used = header_size + bd_len; 6340 6341 return (ctl_do_mode_select((union ctl_io *)ctsio)); 6342 } 6343 6344 int 6345 ctl_mode_sense(struct ctl_scsiio *ctsio) 6346 { 6347 struct ctl_lun *lun = CTL_LUN(ctsio); 6348 int pc, page_code, dbd, llba, subpage; 6349 int alloc_len, page_len, header_len, total_len; 6350 struct scsi_mode_block_descr *block_desc; 6351 struct ctl_page_index *page_index; 6352 6353 dbd = 0; 6354 llba = 0; 6355 block_desc = NULL; 6356 6357 CTL_DEBUG_PRINT(("ctl_mode_sense\n")); 6358 6359 switch (ctsio->cdb[0]) { 6360 case MODE_SENSE_6: { 6361 struct scsi_mode_sense_6 *cdb; 6362 6363 cdb = (struct scsi_mode_sense_6 *)ctsio->cdb; 6364 6365 header_len = sizeof(struct scsi_mode_hdr_6); 6366 if (cdb->byte2 & SMS_DBD) 6367 dbd = 1; 6368 else 6369 header_len += sizeof(struct scsi_mode_block_descr); 6370 6371 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6372 page_code = cdb->page & SMS_PAGE_CODE; 6373 subpage = cdb->subpage; 6374 alloc_len = cdb->length; 6375 break; 6376 } 6377 case MODE_SENSE_10: { 6378 struct scsi_mode_sense_10 *cdb; 6379 6380 cdb = (struct scsi_mode_sense_10 *)ctsio->cdb; 6381 6382 header_len = sizeof(struct scsi_mode_hdr_10); 6383 6384 if (cdb->byte2 & SMS_DBD) 6385 dbd = 1; 6386 else 6387 header_len += sizeof(struct scsi_mode_block_descr); 6388 if (cdb->byte2 & SMS10_LLBAA) 6389 llba = 1; 6390 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6391 page_code = cdb->page & SMS_PAGE_CODE; 6392 subpage = cdb->subpage; 6393 alloc_len = scsi_2btoul(cdb->length); 6394 break; 6395 } 6396 default: 6397 ctl_set_invalid_opcode(ctsio); 6398 ctl_done((union ctl_io *)ctsio); 6399 return (CTL_RETVAL_COMPLETE); 6400 break; /* NOTREACHED */ 6401 } 6402 6403 /* 6404 * We have to make a first pass through to calculate the size of 6405 * the pages that match the user's query. Then we allocate enough 6406 * memory to hold it, and actually copy the data into the buffer. 6407 */ 6408 switch (page_code) { 6409 case SMS_ALL_PAGES_PAGE: { 6410 u_int i; 6411 6412 page_len = 0; 6413 6414 /* 6415 * At the moment, values other than 0 and 0xff here are 6416 * reserved according to SPC-3. 6417 */ 6418 if ((subpage != SMS_SUBPAGE_PAGE_0) 6419 && (subpage != SMS_SUBPAGE_ALL)) { 6420 ctl_set_invalid_field(ctsio, 6421 /*sks_valid*/ 1, 6422 /*command*/ 1, 6423 /*field*/ 3, 6424 /*bit_valid*/ 0, 6425 /*bit*/ 0); 6426 ctl_done((union ctl_io *)ctsio); 6427 return (CTL_RETVAL_COMPLETE); 6428 } 6429 6430 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6431 page_index = &lun->mode_pages.index[i]; 6432 6433 /* Make sure the page is supported for this dev type */ 6434 if (lun->be_lun->lun_type == T_DIRECT && 6435 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6436 continue; 6437 if (lun->be_lun->lun_type == T_PROCESSOR && 6438 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6439 continue; 6440 if (lun->be_lun->lun_type == T_CDROM && 6441 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6442 continue; 6443 6444 /* 6445 * We don't use this subpage if the user didn't 6446 * request all subpages. 6447 */ 6448 if ((page_index->subpage != 0) 6449 && (subpage == SMS_SUBPAGE_PAGE_0)) 6450 continue; 6451 6452 #if 0 6453 printf("found page %#x len %d\n", 6454 page_index->page_code & SMPH_PC_MASK, 6455 page_index->page_len); 6456 #endif 6457 page_len += page_index->page_len; 6458 } 6459 break; 6460 } 6461 default: { 6462 u_int i; 6463 6464 page_len = 0; 6465 6466 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6467 page_index = &lun->mode_pages.index[i]; 6468 6469 /* Make sure the page is supported for this dev type */ 6470 if (lun->be_lun->lun_type == T_DIRECT && 6471 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6472 continue; 6473 if (lun->be_lun->lun_type == T_PROCESSOR && 6474 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6475 continue; 6476 if (lun->be_lun->lun_type == T_CDROM && 6477 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6478 continue; 6479 6480 /* Look for the right page code */ 6481 if ((page_index->page_code & SMPH_PC_MASK) != page_code) 6482 continue; 6483 6484 /* Look for the right subpage or the subpage wildcard*/ 6485 if ((page_index->subpage != subpage) 6486 && (subpage != SMS_SUBPAGE_ALL)) 6487 continue; 6488 6489 #if 0 6490 printf("found page %#x len %d\n", 6491 page_index->page_code & SMPH_PC_MASK, 6492 page_index->page_len); 6493 #endif 6494 6495 page_len += page_index->page_len; 6496 } 6497 6498 if (page_len == 0) { 6499 ctl_set_invalid_field(ctsio, 6500 /*sks_valid*/ 1, 6501 /*command*/ 1, 6502 /*field*/ 2, 6503 /*bit_valid*/ 1, 6504 /*bit*/ 5); 6505 ctl_done((union ctl_io *)ctsio); 6506 return (CTL_RETVAL_COMPLETE); 6507 } 6508 break; 6509 } 6510 } 6511 6512 total_len = header_len + page_len; 6513 #if 0 6514 printf("header_len = %d, page_len = %d, total_len = %d\n", 6515 header_len, page_len, total_len); 6516 #endif 6517 6518 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6519 ctsio->kern_sg_entries = 0; 6520 ctsio->kern_rel_offset = 0; 6521 ctsio->kern_data_len = min(total_len, alloc_len); 6522 ctsio->kern_total_len = ctsio->kern_data_len; 6523 6524 switch (ctsio->cdb[0]) { 6525 case MODE_SENSE_6: { 6526 struct scsi_mode_hdr_6 *header; 6527 6528 header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr; 6529 6530 header->datalen = MIN(total_len - 1, 254); 6531 if (lun->be_lun->lun_type == T_DIRECT) { 6532 header->dev_specific = 0x10; /* DPOFUA */ 6533 if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || 6534 (lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) 6535 header->dev_specific |= 0x80; /* WP */ 6536 } 6537 if (dbd) 6538 header->block_descr_len = 0; 6539 else 6540 header->block_descr_len = 6541 sizeof(struct scsi_mode_block_descr); 6542 block_desc = (struct scsi_mode_block_descr *)&header[1]; 6543 break; 6544 } 6545 case MODE_SENSE_10: { 6546 struct scsi_mode_hdr_10 *header; 6547 int datalen; 6548 6549 header = (struct scsi_mode_hdr_10 *)ctsio->kern_data_ptr; 6550 6551 datalen = MIN(total_len - 2, 65533); 6552 scsi_ulto2b(datalen, header->datalen); 6553 if (lun->be_lun->lun_type == T_DIRECT) { 6554 header->dev_specific = 0x10; /* DPOFUA */ 6555 if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || 6556 (lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) 6557 header->dev_specific |= 0x80; /* WP */ 6558 } 6559 if (dbd) 6560 scsi_ulto2b(0, header->block_descr_len); 6561 else 6562 scsi_ulto2b(sizeof(struct scsi_mode_block_descr), 6563 header->block_descr_len); 6564 block_desc = (struct scsi_mode_block_descr *)&header[1]; 6565 break; 6566 } 6567 default: 6568 panic("%s: Invalid CDB type %#x", __func__, ctsio->cdb[0]); 6569 } 6570 6571 /* 6572 * If we've got a disk, use its blocksize in the block 6573 * descriptor. Otherwise, just set it to 0. 6574 */ 6575 if (dbd == 0) { 6576 if (lun->be_lun->lun_type == T_DIRECT) 6577 scsi_ulto3b(lun->be_lun->blocksize, 6578 block_desc->block_len); 6579 else 6580 scsi_ulto3b(0, block_desc->block_len); 6581 } 6582 6583 switch (page_code) { 6584 case SMS_ALL_PAGES_PAGE: { 6585 int i, data_used; 6586 6587 data_used = header_len; 6588 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6589 struct ctl_page_index *page_index; 6590 6591 page_index = &lun->mode_pages.index[i]; 6592 if (lun->be_lun->lun_type == T_DIRECT && 6593 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6594 continue; 6595 if (lun->be_lun->lun_type == T_PROCESSOR && 6596 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6597 continue; 6598 if (lun->be_lun->lun_type == T_CDROM && 6599 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6600 continue; 6601 6602 /* 6603 * We don't use this subpage if the user didn't 6604 * request all subpages. We already checked (above) 6605 * to make sure the user only specified a subpage 6606 * of 0 or 0xff in the SMS_ALL_PAGES_PAGE case. 6607 */ 6608 if ((page_index->subpage != 0) 6609 && (subpage == SMS_SUBPAGE_PAGE_0)) 6610 continue; 6611 6612 /* 6613 * Call the handler, if it exists, to update the 6614 * page to the latest values. 6615 */ 6616 if (page_index->sense_handler != NULL) 6617 page_index->sense_handler(ctsio, page_index,pc); 6618 6619 memcpy(ctsio->kern_data_ptr + data_used, 6620 page_index->page_data + 6621 (page_index->page_len * pc), 6622 page_index->page_len); 6623 data_used += page_index->page_len; 6624 } 6625 break; 6626 } 6627 default: { 6628 int i, data_used; 6629 6630 data_used = header_len; 6631 6632 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6633 struct ctl_page_index *page_index; 6634 6635 page_index = &lun->mode_pages.index[i]; 6636 6637 /* Look for the right page code */ 6638 if ((page_index->page_code & SMPH_PC_MASK) != page_code) 6639 continue; 6640 6641 /* Look for the right subpage or the subpage wildcard*/ 6642 if ((page_index->subpage != subpage) 6643 && (subpage != SMS_SUBPAGE_ALL)) 6644 continue; 6645 6646 /* Make sure the page is supported for this dev type */ 6647 if (lun->be_lun->lun_type == T_DIRECT && 6648 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6649 continue; 6650 if (lun->be_lun->lun_type == T_PROCESSOR && 6651 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6652 continue; 6653 if (lun->be_lun->lun_type == T_CDROM && 6654 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6655 continue; 6656 6657 /* 6658 * Call the handler, if it exists, to update the 6659 * page to the latest values. 6660 */ 6661 if (page_index->sense_handler != NULL) 6662 page_index->sense_handler(ctsio, page_index,pc); 6663 6664 memcpy(ctsio->kern_data_ptr + data_used, 6665 page_index->page_data + 6666 (page_index->page_len * pc), 6667 page_index->page_len); 6668 data_used += page_index->page_len; 6669 } 6670 break; 6671 } 6672 } 6673 6674 ctl_set_success(ctsio); 6675 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6676 ctsio->be_move_done = ctl_config_move_done; 6677 ctl_datamove((union ctl_io *)ctsio); 6678 return (CTL_RETVAL_COMPLETE); 6679 } 6680 6681 int 6682 ctl_lbp_log_sense_handler(struct ctl_scsiio *ctsio, 6683 struct ctl_page_index *page_index, 6684 int pc) 6685 { 6686 struct ctl_lun *lun = CTL_LUN(ctsio); 6687 struct scsi_log_param_header *phdr; 6688 uint8_t *data; 6689 uint64_t val; 6690 6691 data = page_index->page_data; 6692 6693 if (lun->backend->lun_attr != NULL && 6694 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksavail")) 6695 != UINT64_MAX) { 6696 phdr = (struct scsi_log_param_header *)data; 6697 scsi_ulto2b(0x0001, phdr->param_code); 6698 phdr->param_control = SLP_LBIN | SLP_LP; 6699 phdr->param_len = 8; 6700 data = (uint8_t *)(phdr + 1); 6701 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6702 data[4] = 0x02; /* per-pool */ 6703 data += phdr->param_len; 6704 } 6705 6706 if (lun->backend->lun_attr != NULL && 6707 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksused")) 6708 != UINT64_MAX) { 6709 phdr = (struct scsi_log_param_header *)data; 6710 scsi_ulto2b(0x0002, phdr->param_code); 6711 phdr->param_control = SLP_LBIN | SLP_LP; 6712 phdr->param_len = 8; 6713 data = (uint8_t *)(phdr + 1); 6714 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6715 data[4] = 0x01; /* per-LUN */ 6716 data += phdr->param_len; 6717 } 6718 6719 if (lun->backend->lun_attr != NULL && 6720 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksavail")) 6721 != UINT64_MAX) { 6722 phdr = (struct scsi_log_param_header *)data; 6723 scsi_ulto2b(0x00f1, phdr->param_code); 6724 phdr->param_control = SLP_LBIN | SLP_LP; 6725 phdr->param_len = 8; 6726 data = (uint8_t *)(phdr + 1); 6727 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6728 data[4] = 0x02; /* per-pool */ 6729 data += phdr->param_len; 6730 } 6731 6732 if (lun->backend->lun_attr != NULL && 6733 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksused")) 6734 != UINT64_MAX) { 6735 phdr = (struct scsi_log_param_header *)data; 6736 scsi_ulto2b(0x00f2, phdr->param_code); 6737 phdr->param_control = SLP_LBIN | SLP_LP; 6738 phdr->param_len = 8; 6739 data = (uint8_t *)(phdr + 1); 6740 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6741 data[4] = 0x02; /* per-pool */ 6742 data += phdr->param_len; 6743 } 6744 6745 page_index->page_len = data - page_index->page_data; 6746 return (0); 6747 } 6748 6749 int 6750 ctl_sap_log_sense_handler(struct ctl_scsiio *ctsio, 6751 struct ctl_page_index *page_index, 6752 int pc) 6753 { 6754 struct ctl_lun *lun = CTL_LUN(ctsio); 6755 struct stat_page *data; 6756 struct bintime *t; 6757 6758 data = (struct stat_page *)page_index->page_data; 6759 6760 scsi_ulto2b(SLP_SAP, data->sap.hdr.param_code); 6761 data->sap.hdr.param_control = SLP_LBIN; 6762 data->sap.hdr.param_len = sizeof(struct scsi_log_stat_and_perf) - 6763 sizeof(struct scsi_log_param_header); 6764 scsi_u64to8b(lun->stats.operations[CTL_STATS_READ], 6765 data->sap.read_num); 6766 scsi_u64to8b(lun->stats.operations[CTL_STATS_WRITE], 6767 data->sap.write_num); 6768 if (lun->be_lun->blocksize > 0) { 6769 scsi_u64to8b(lun->stats.bytes[CTL_STATS_WRITE] / 6770 lun->be_lun->blocksize, data->sap.recvieved_lba); 6771 scsi_u64to8b(lun->stats.bytes[CTL_STATS_READ] / 6772 lun->be_lun->blocksize, data->sap.transmitted_lba); 6773 } 6774 t = &lun->stats.time[CTL_STATS_READ]; 6775 scsi_u64to8b((uint64_t)t->sec * 1000 + t->frac / (UINT64_MAX / 1000), 6776 data->sap.read_int); 6777 t = &lun->stats.time[CTL_STATS_WRITE]; 6778 scsi_u64to8b((uint64_t)t->sec * 1000 + t->frac / (UINT64_MAX / 1000), 6779 data->sap.write_int); 6780 scsi_u64to8b(0, data->sap.weighted_num); 6781 scsi_u64to8b(0, data->sap.weighted_int); 6782 scsi_ulto2b(SLP_IT, data->it.hdr.param_code); 6783 data->it.hdr.param_control = SLP_LBIN; 6784 data->it.hdr.param_len = sizeof(struct scsi_log_idle_time) - 6785 sizeof(struct scsi_log_param_header); 6786 #ifdef CTL_TIME_IO 6787 scsi_u64to8b(lun->idle_time / SBT_1MS, data->it.idle_int); 6788 #endif 6789 scsi_ulto2b(SLP_TI, data->ti.hdr.param_code); 6790 data->it.hdr.param_control = SLP_LBIN; 6791 data->ti.hdr.param_len = sizeof(struct scsi_log_time_interval) - 6792 sizeof(struct scsi_log_param_header); 6793 scsi_ulto4b(3, data->ti.exponent); 6794 scsi_ulto4b(1, data->ti.integer); 6795 return (0); 6796 } 6797 6798 int 6799 ctl_ie_log_sense_handler(struct ctl_scsiio *ctsio, 6800 struct ctl_page_index *page_index, 6801 int pc) 6802 { 6803 struct ctl_lun *lun = CTL_LUN(ctsio); 6804 struct scsi_log_informational_exceptions *data; 6805 6806 data = (struct scsi_log_informational_exceptions *)page_index->page_data; 6807 6808 scsi_ulto2b(SLP_IE_GEN, data->hdr.param_code); 6809 data->hdr.param_control = SLP_LBIN; 6810 data->hdr.param_len = sizeof(struct scsi_log_informational_exceptions) - 6811 sizeof(struct scsi_log_param_header); 6812 data->ie_asc = lun->ie_asc; 6813 data->ie_ascq = lun->ie_ascq; 6814 data->temperature = 0xff; 6815 return (0); 6816 } 6817 6818 int 6819 ctl_log_sense(struct ctl_scsiio *ctsio) 6820 { 6821 struct ctl_lun *lun = CTL_LUN(ctsio); 6822 int i, pc, page_code, subpage; 6823 int alloc_len, total_len; 6824 struct ctl_page_index *page_index; 6825 struct scsi_log_sense *cdb; 6826 struct scsi_log_header *header; 6827 6828 CTL_DEBUG_PRINT(("ctl_log_sense\n")); 6829 6830 cdb = (struct scsi_log_sense *)ctsio->cdb; 6831 pc = (cdb->page & SLS_PAGE_CTRL_MASK) >> 6; 6832 page_code = cdb->page & SLS_PAGE_CODE; 6833 subpage = cdb->subpage; 6834 alloc_len = scsi_2btoul(cdb->length); 6835 6836 page_index = NULL; 6837 for (i = 0; i < CTL_NUM_LOG_PAGES; i++) { 6838 page_index = &lun->log_pages.index[i]; 6839 6840 /* Look for the right page code */ 6841 if ((page_index->page_code & SL_PAGE_CODE) != page_code) 6842 continue; 6843 6844 /* Look for the right subpage or the subpage wildcard*/ 6845 if (page_index->subpage != subpage) 6846 continue; 6847 6848 break; 6849 } 6850 if (i >= CTL_NUM_LOG_PAGES) { 6851 ctl_set_invalid_field(ctsio, 6852 /*sks_valid*/ 1, 6853 /*command*/ 1, 6854 /*field*/ 2, 6855 /*bit_valid*/ 0, 6856 /*bit*/ 0); 6857 ctl_done((union ctl_io *)ctsio); 6858 return (CTL_RETVAL_COMPLETE); 6859 } 6860 6861 total_len = sizeof(struct scsi_log_header) + page_index->page_len; 6862 6863 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6864 ctsio->kern_sg_entries = 0; 6865 ctsio->kern_rel_offset = 0; 6866 ctsio->kern_data_len = min(total_len, alloc_len); 6867 ctsio->kern_total_len = ctsio->kern_data_len; 6868 6869 header = (struct scsi_log_header *)ctsio->kern_data_ptr; 6870 header->page = page_index->page_code; 6871 if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING) 6872 header->page |= SL_DS; 6873 if (page_index->subpage) { 6874 header->page |= SL_SPF; 6875 header->subpage = page_index->subpage; 6876 } 6877 scsi_ulto2b(page_index->page_len, header->datalen); 6878 6879 /* 6880 * Call the handler, if it exists, to update the 6881 * page to the latest values. 6882 */ 6883 if (page_index->sense_handler != NULL) 6884 page_index->sense_handler(ctsio, page_index, pc); 6885 6886 memcpy(header + 1, page_index->page_data, page_index->page_len); 6887 6888 ctl_set_success(ctsio); 6889 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6890 ctsio->be_move_done = ctl_config_move_done; 6891 ctl_datamove((union ctl_io *)ctsio); 6892 return (CTL_RETVAL_COMPLETE); 6893 } 6894 6895 int 6896 ctl_read_capacity(struct ctl_scsiio *ctsio) 6897 { 6898 struct ctl_lun *lun = CTL_LUN(ctsio); 6899 struct scsi_read_capacity *cdb; 6900 struct scsi_read_capacity_data *data; 6901 uint32_t lba; 6902 6903 CTL_DEBUG_PRINT(("ctl_read_capacity\n")); 6904 6905 cdb = (struct scsi_read_capacity *)ctsio->cdb; 6906 6907 lba = scsi_4btoul(cdb->addr); 6908 if (((cdb->pmi & SRC_PMI) == 0) 6909 && (lba != 0)) { 6910 ctl_set_invalid_field(/*ctsio*/ ctsio, 6911 /*sks_valid*/ 1, 6912 /*command*/ 1, 6913 /*field*/ 2, 6914 /*bit_valid*/ 0, 6915 /*bit*/ 0); 6916 ctl_done((union ctl_io *)ctsio); 6917 return (CTL_RETVAL_COMPLETE); 6918 } 6919 6920 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 6921 data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr; 6922 ctsio->kern_data_len = sizeof(*data); 6923 ctsio->kern_total_len = sizeof(*data); 6924 ctsio->kern_rel_offset = 0; 6925 ctsio->kern_sg_entries = 0; 6926 6927 /* 6928 * If the maximum LBA is greater than 0xfffffffe, the user must 6929 * issue a SERVICE ACTION IN (16) command, with the read capacity 6930 * serivce action set. 6931 */ 6932 if (lun->be_lun->maxlba > 0xfffffffe) 6933 scsi_ulto4b(0xffffffff, data->addr); 6934 else 6935 scsi_ulto4b(lun->be_lun->maxlba, data->addr); 6936 6937 /* 6938 * XXX KDM this may not be 512 bytes... 6939 */ 6940 scsi_ulto4b(lun->be_lun->blocksize, data->length); 6941 6942 ctl_set_success(ctsio); 6943 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6944 ctsio->be_move_done = ctl_config_move_done; 6945 ctl_datamove((union ctl_io *)ctsio); 6946 return (CTL_RETVAL_COMPLETE); 6947 } 6948 6949 int 6950 ctl_read_capacity_16(struct ctl_scsiio *ctsio) 6951 { 6952 struct ctl_lun *lun = CTL_LUN(ctsio); 6953 struct scsi_read_capacity_16 *cdb; 6954 struct scsi_read_capacity_data_long *data; 6955 uint64_t lba; 6956 uint32_t alloc_len; 6957 6958 CTL_DEBUG_PRINT(("ctl_read_capacity_16\n")); 6959 6960 cdb = (struct scsi_read_capacity_16 *)ctsio->cdb; 6961 6962 alloc_len = scsi_4btoul(cdb->alloc_len); 6963 lba = scsi_8btou64(cdb->addr); 6964 6965 if ((cdb->reladr & SRC16_PMI) 6966 && (lba != 0)) { 6967 ctl_set_invalid_field(/*ctsio*/ ctsio, 6968 /*sks_valid*/ 1, 6969 /*command*/ 1, 6970 /*field*/ 2, 6971 /*bit_valid*/ 0, 6972 /*bit*/ 0); 6973 ctl_done((union ctl_io *)ctsio); 6974 return (CTL_RETVAL_COMPLETE); 6975 } 6976 6977 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 6978 data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr; 6979 ctsio->kern_rel_offset = 0; 6980 ctsio->kern_sg_entries = 0; 6981 ctsio->kern_data_len = min(sizeof(*data), alloc_len); 6982 ctsio->kern_total_len = ctsio->kern_data_len; 6983 6984 scsi_u64to8b(lun->be_lun->maxlba, data->addr); 6985 /* XXX KDM this may not be 512 bytes... */ 6986 scsi_ulto4b(lun->be_lun->blocksize, data->length); 6987 data->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE; 6988 scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, data->lalba_lbp); 6989 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) 6990 data->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ; 6991 6992 ctl_set_success(ctsio); 6993 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6994 ctsio->be_move_done = ctl_config_move_done; 6995 ctl_datamove((union ctl_io *)ctsio); 6996 return (CTL_RETVAL_COMPLETE); 6997 } 6998 6999 int 7000 ctl_get_lba_status(struct ctl_scsiio *ctsio) 7001 { 7002 struct ctl_lun *lun = CTL_LUN(ctsio); 7003 struct scsi_get_lba_status *cdb; 7004 struct scsi_get_lba_status_data *data; 7005 struct ctl_lba_len_flags *lbalen; 7006 uint64_t lba; 7007 uint32_t alloc_len, total_len; 7008 int retval; 7009 7010 CTL_DEBUG_PRINT(("ctl_get_lba_status\n")); 7011 7012 cdb = (struct scsi_get_lba_status *)ctsio->cdb; 7013 lba = scsi_8btou64(cdb->addr); 7014 alloc_len = scsi_4btoul(cdb->alloc_len); 7015 7016 if (lba > lun->be_lun->maxlba) { 7017 ctl_set_lba_out_of_range(ctsio, lba); 7018 ctl_done((union ctl_io *)ctsio); 7019 return (CTL_RETVAL_COMPLETE); 7020 } 7021 7022 total_len = sizeof(*data) + sizeof(data->descr[0]); 7023 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7024 data = (struct scsi_get_lba_status_data *)ctsio->kern_data_ptr; 7025 ctsio->kern_rel_offset = 0; 7026 ctsio->kern_sg_entries = 0; 7027 ctsio->kern_data_len = min(total_len, alloc_len); 7028 ctsio->kern_total_len = ctsio->kern_data_len; 7029 7030 /* Fill dummy data in case backend can't tell anything. */ 7031 scsi_ulto4b(4 + sizeof(data->descr[0]), data->length); 7032 scsi_u64to8b(lba, data->descr[0].addr); 7033 scsi_ulto4b(MIN(UINT32_MAX, lun->be_lun->maxlba + 1 - lba), 7034 data->descr[0].length); 7035 data->descr[0].status = 0; /* Mapped or unknown. */ 7036 7037 ctl_set_success(ctsio); 7038 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7039 ctsio->be_move_done = ctl_config_move_done; 7040 7041 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 7042 lbalen->lba = lba; 7043 lbalen->len = total_len; 7044 lbalen->flags = 0; 7045 retval = lun->backend->config_read((union ctl_io *)ctsio); 7046 return (retval); 7047 } 7048 7049 int 7050 ctl_read_defect(struct ctl_scsiio *ctsio) 7051 { 7052 struct scsi_read_defect_data_10 *ccb10; 7053 struct scsi_read_defect_data_12 *ccb12; 7054 struct scsi_read_defect_data_hdr_10 *data10; 7055 struct scsi_read_defect_data_hdr_12 *data12; 7056 uint32_t alloc_len, data_len; 7057 uint8_t format; 7058 7059 CTL_DEBUG_PRINT(("ctl_read_defect\n")); 7060 7061 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 7062 ccb10 = (struct scsi_read_defect_data_10 *)&ctsio->cdb; 7063 format = ccb10->format; 7064 alloc_len = scsi_2btoul(ccb10->alloc_length); 7065 data_len = sizeof(*data10); 7066 } else { 7067 ccb12 = (struct scsi_read_defect_data_12 *)&ctsio->cdb; 7068 format = ccb12->format; 7069 alloc_len = scsi_4btoul(ccb12->alloc_length); 7070 data_len = sizeof(*data12); 7071 } 7072 if (alloc_len == 0) { 7073 ctl_set_success(ctsio); 7074 ctl_done((union ctl_io *)ctsio); 7075 return (CTL_RETVAL_COMPLETE); 7076 } 7077 7078 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 7079 ctsio->kern_rel_offset = 0; 7080 ctsio->kern_sg_entries = 0; 7081 ctsio->kern_data_len = min(data_len, alloc_len); 7082 ctsio->kern_total_len = ctsio->kern_data_len; 7083 7084 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 7085 data10 = (struct scsi_read_defect_data_hdr_10 *) 7086 ctsio->kern_data_ptr; 7087 data10->format = format; 7088 scsi_ulto2b(0, data10->length); 7089 } else { 7090 data12 = (struct scsi_read_defect_data_hdr_12 *) 7091 ctsio->kern_data_ptr; 7092 data12->format = format; 7093 scsi_ulto2b(0, data12->generation); 7094 scsi_ulto4b(0, data12->length); 7095 } 7096 7097 ctl_set_success(ctsio); 7098 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7099 ctsio->be_move_done = ctl_config_move_done; 7100 ctl_datamove((union ctl_io *)ctsio); 7101 return (CTL_RETVAL_COMPLETE); 7102 } 7103 7104 int 7105 ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio) 7106 { 7107 struct ctl_softc *softc = CTL_SOFTC(ctsio); 7108 struct ctl_lun *lun = CTL_LUN(ctsio); 7109 struct scsi_maintenance_in *cdb; 7110 int retval; 7111 int alloc_len, ext, total_len = 0, g, pc, pg, ts, os; 7112 int num_ha_groups, num_target_ports, shared_group; 7113 struct ctl_port *port; 7114 struct scsi_target_group_data *rtg_ptr; 7115 struct scsi_target_group_data_extended *rtg_ext_ptr; 7116 struct scsi_target_port_group_descriptor *tpg_desc; 7117 7118 CTL_DEBUG_PRINT(("ctl_report_tagret_port_groups\n")); 7119 7120 cdb = (struct scsi_maintenance_in *)ctsio->cdb; 7121 retval = CTL_RETVAL_COMPLETE; 7122 7123 switch (cdb->byte2 & STG_PDF_MASK) { 7124 case STG_PDF_LENGTH: 7125 ext = 0; 7126 break; 7127 case STG_PDF_EXTENDED: 7128 ext = 1; 7129 break; 7130 default: 7131 ctl_set_invalid_field(/*ctsio*/ ctsio, 7132 /*sks_valid*/ 1, 7133 /*command*/ 1, 7134 /*field*/ 2, 7135 /*bit_valid*/ 1, 7136 /*bit*/ 5); 7137 ctl_done((union ctl_io *)ctsio); 7138 return(retval); 7139 } 7140 7141 num_target_ports = 0; 7142 shared_group = (softc->is_single != 0); 7143 mtx_lock(&softc->ctl_lock); 7144 STAILQ_FOREACH(port, &softc->port_list, links) { 7145 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7146 continue; 7147 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 7148 continue; 7149 num_target_ports++; 7150 if (port->status & CTL_PORT_STATUS_HA_SHARED) 7151 shared_group = 1; 7152 } 7153 mtx_unlock(&softc->ctl_lock); 7154 num_ha_groups = (softc->is_single) ? 0 : NUM_HA_SHELVES; 7155 7156 if (ext) 7157 total_len = sizeof(struct scsi_target_group_data_extended); 7158 else 7159 total_len = sizeof(struct scsi_target_group_data); 7160 total_len += sizeof(struct scsi_target_port_group_descriptor) * 7161 (shared_group + num_ha_groups) + 7162 sizeof(struct scsi_target_port_descriptor) * num_target_ports; 7163 7164 alloc_len = scsi_4btoul(cdb->length); 7165 7166 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7167 ctsio->kern_sg_entries = 0; 7168 ctsio->kern_rel_offset = 0; 7169 ctsio->kern_data_len = min(total_len, alloc_len); 7170 ctsio->kern_total_len = ctsio->kern_data_len; 7171 7172 if (ext) { 7173 rtg_ext_ptr = (struct scsi_target_group_data_extended *) 7174 ctsio->kern_data_ptr; 7175 scsi_ulto4b(total_len - 4, rtg_ext_ptr->length); 7176 rtg_ext_ptr->format_type = 0x10; 7177 rtg_ext_ptr->implicit_transition_time = 0; 7178 tpg_desc = &rtg_ext_ptr->groups[0]; 7179 } else { 7180 rtg_ptr = (struct scsi_target_group_data *) 7181 ctsio->kern_data_ptr; 7182 scsi_ulto4b(total_len - 4, rtg_ptr->length); 7183 tpg_desc = &rtg_ptr->groups[0]; 7184 } 7185 7186 mtx_lock(&softc->ctl_lock); 7187 pg = softc->port_min / softc->port_cnt; 7188 if (lun->flags & (CTL_LUN_PRIMARY_SC | CTL_LUN_PEER_SC_PRIMARY)) { 7189 /* Some shelf is known to be primary. */ 7190 if (softc->ha_link == CTL_HA_LINK_OFFLINE) 7191 os = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE; 7192 else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) 7193 os = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; 7194 else if (softc->ha_mode == CTL_HA_MODE_ACT_STBY) 7195 os = TPG_ASYMMETRIC_ACCESS_STANDBY; 7196 else 7197 os = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 7198 if (lun->flags & CTL_LUN_PRIMARY_SC) { 7199 ts = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7200 } else { 7201 ts = os; 7202 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7203 } 7204 } else { 7205 /* No known primary shelf. */ 7206 if (softc->ha_link == CTL_HA_LINK_OFFLINE) { 7207 ts = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE; 7208 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7209 } else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) { 7210 ts = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; 7211 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7212 } else { 7213 ts = os = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; 7214 } 7215 } 7216 if (shared_group) { 7217 tpg_desc->pref_state = ts; 7218 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP | 7219 TPG_U_SUP | TPG_T_SUP; 7220 scsi_ulto2b(1, tpg_desc->target_port_group); 7221 tpg_desc->status = TPG_IMPLICIT; 7222 pc = 0; 7223 STAILQ_FOREACH(port, &softc->port_list, links) { 7224 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7225 continue; 7226 if (!softc->is_single && 7227 (port->status & CTL_PORT_STATUS_HA_SHARED) == 0) 7228 continue; 7229 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 7230 continue; 7231 scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc]. 7232 relative_target_port_identifier); 7233 pc++; 7234 } 7235 tpg_desc->target_port_count = pc; 7236 tpg_desc = (struct scsi_target_port_group_descriptor *) 7237 &tpg_desc->descriptors[pc]; 7238 } 7239 for (g = 0; g < num_ha_groups; g++) { 7240 tpg_desc->pref_state = (g == pg) ? ts : os; 7241 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP | 7242 TPG_U_SUP | TPG_T_SUP; 7243 scsi_ulto2b(2 + g, tpg_desc->target_port_group); 7244 tpg_desc->status = TPG_IMPLICIT; 7245 pc = 0; 7246 STAILQ_FOREACH(port, &softc->port_list, links) { 7247 if (port->targ_port < g * softc->port_cnt || 7248 port->targ_port >= (g + 1) * softc->port_cnt) 7249 continue; 7250 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7251 continue; 7252 if (port->status & CTL_PORT_STATUS_HA_SHARED) 7253 continue; 7254 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 7255 continue; 7256 scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc]. 7257 relative_target_port_identifier); 7258 pc++; 7259 } 7260 tpg_desc->target_port_count = pc; 7261 tpg_desc = (struct scsi_target_port_group_descriptor *) 7262 &tpg_desc->descriptors[pc]; 7263 } 7264 mtx_unlock(&softc->ctl_lock); 7265 7266 ctl_set_success(ctsio); 7267 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7268 ctsio->be_move_done = ctl_config_move_done; 7269 ctl_datamove((union ctl_io *)ctsio); 7270 return(retval); 7271 } 7272 7273 int 7274 ctl_report_supported_opcodes(struct ctl_scsiio *ctsio) 7275 { 7276 struct ctl_lun *lun = CTL_LUN(ctsio); 7277 struct scsi_report_supported_opcodes *cdb; 7278 const struct ctl_cmd_entry *entry, *sentry; 7279 struct scsi_report_supported_opcodes_all *all; 7280 struct scsi_report_supported_opcodes_descr *descr; 7281 struct scsi_report_supported_opcodes_one *one; 7282 int retval; 7283 int alloc_len, total_len; 7284 int opcode, service_action, i, j, num; 7285 7286 CTL_DEBUG_PRINT(("ctl_report_supported_opcodes\n")); 7287 7288 cdb = (struct scsi_report_supported_opcodes *)ctsio->cdb; 7289 retval = CTL_RETVAL_COMPLETE; 7290 7291 opcode = cdb->requested_opcode; 7292 service_action = scsi_2btoul(cdb->requested_service_action); 7293 switch (cdb->options & RSO_OPTIONS_MASK) { 7294 case RSO_OPTIONS_ALL: 7295 num = 0; 7296 for (i = 0; i < 256; i++) { 7297 entry = &ctl_cmd_table[i]; 7298 if (entry->flags & CTL_CMD_FLAG_SA5) { 7299 for (j = 0; j < 32; j++) { 7300 sentry = &((const struct ctl_cmd_entry *) 7301 entry->execute)[j]; 7302 if (ctl_cmd_applicable( 7303 lun->be_lun->lun_type, sentry)) 7304 num++; 7305 } 7306 } else { 7307 if (ctl_cmd_applicable(lun->be_lun->lun_type, 7308 entry)) 7309 num++; 7310 } 7311 } 7312 total_len = sizeof(struct scsi_report_supported_opcodes_all) + 7313 num * sizeof(struct scsi_report_supported_opcodes_descr); 7314 break; 7315 case RSO_OPTIONS_OC: 7316 if (ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) { 7317 ctl_set_invalid_field(/*ctsio*/ ctsio, 7318 /*sks_valid*/ 1, 7319 /*command*/ 1, 7320 /*field*/ 2, 7321 /*bit_valid*/ 1, 7322 /*bit*/ 2); 7323 ctl_done((union ctl_io *)ctsio); 7324 return (CTL_RETVAL_COMPLETE); 7325 } 7326 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7327 break; 7328 case RSO_OPTIONS_OC_SA: 7329 if ((ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) == 0 || 7330 service_action >= 32) { 7331 ctl_set_invalid_field(/*ctsio*/ ctsio, 7332 /*sks_valid*/ 1, 7333 /*command*/ 1, 7334 /*field*/ 2, 7335 /*bit_valid*/ 1, 7336 /*bit*/ 2); 7337 ctl_done((union ctl_io *)ctsio); 7338 return (CTL_RETVAL_COMPLETE); 7339 } 7340 /* FALLTHROUGH */ 7341 case RSO_OPTIONS_OC_ASA: 7342 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7343 break; 7344 default: 7345 ctl_set_invalid_field(/*ctsio*/ ctsio, 7346 /*sks_valid*/ 1, 7347 /*command*/ 1, 7348 /*field*/ 2, 7349 /*bit_valid*/ 1, 7350 /*bit*/ 2); 7351 ctl_done((union ctl_io *)ctsio); 7352 return (CTL_RETVAL_COMPLETE); 7353 } 7354 7355 alloc_len = scsi_4btoul(cdb->length); 7356 7357 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7358 ctsio->kern_sg_entries = 0; 7359 ctsio->kern_rel_offset = 0; 7360 ctsio->kern_data_len = min(total_len, alloc_len); 7361 ctsio->kern_total_len = ctsio->kern_data_len; 7362 7363 switch (cdb->options & RSO_OPTIONS_MASK) { 7364 case RSO_OPTIONS_ALL: 7365 all = (struct scsi_report_supported_opcodes_all *) 7366 ctsio->kern_data_ptr; 7367 num = 0; 7368 for (i = 0; i < 256; i++) { 7369 entry = &ctl_cmd_table[i]; 7370 if (entry->flags & CTL_CMD_FLAG_SA5) { 7371 for (j = 0; j < 32; j++) { 7372 sentry = &((const struct ctl_cmd_entry *) 7373 entry->execute)[j]; 7374 if (!ctl_cmd_applicable( 7375 lun->be_lun->lun_type, sentry)) 7376 continue; 7377 descr = &all->descr[num++]; 7378 descr->opcode = i; 7379 scsi_ulto2b(j, descr->service_action); 7380 descr->flags = RSO_SERVACTV; 7381 scsi_ulto2b(sentry->length, 7382 descr->cdb_length); 7383 } 7384 } else { 7385 if (!ctl_cmd_applicable(lun->be_lun->lun_type, 7386 entry)) 7387 continue; 7388 descr = &all->descr[num++]; 7389 descr->opcode = i; 7390 scsi_ulto2b(0, descr->service_action); 7391 descr->flags = 0; 7392 scsi_ulto2b(entry->length, descr->cdb_length); 7393 } 7394 } 7395 scsi_ulto4b( 7396 num * sizeof(struct scsi_report_supported_opcodes_descr), 7397 all->length); 7398 break; 7399 case RSO_OPTIONS_OC: 7400 one = (struct scsi_report_supported_opcodes_one *) 7401 ctsio->kern_data_ptr; 7402 entry = &ctl_cmd_table[opcode]; 7403 goto fill_one; 7404 case RSO_OPTIONS_OC_SA: 7405 one = (struct scsi_report_supported_opcodes_one *) 7406 ctsio->kern_data_ptr; 7407 entry = &ctl_cmd_table[opcode]; 7408 entry = &((const struct ctl_cmd_entry *) 7409 entry->execute)[service_action]; 7410 fill_one: 7411 if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 7412 one->support = 3; 7413 scsi_ulto2b(entry->length, one->cdb_length); 7414 one->cdb_usage[0] = opcode; 7415 memcpy(&one->cdb_usage[1], entry->usage, 7416 entry->length - 1); 7417 } else 7418 one->support = 1; 7419 break; 7420 case RSO_OPTIONS_OC_ASA: 7421 one = (struct scsi_report_supported_opcodes_one *) 7422 ctsio->kern_data_ptr; 7423 entry = &ctl_cmd_table[opcode]; 7424 if (entry->flags & CTL_CMD_FLAG_SA5) { 7425 entry = &((const struct ctl_cmd_entry *) 7426 entry->execute)[service_action]; 7427 } else if (service_action != 0) { 7428 one->support = 1; 7429 break; 7430 } 7431 goto fill_one; 7432 } 7433 7434 ctl_set_success(ctsio); 7435 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7436 ctsio->be_move_done = ctl_config_move_done; 7437 ctl_datamove((union ctl_io *)ctsio); 7438 return(retval); 7439 } 7440 7441 int 7442 ctl_report_supported_tmf(struct ctl_scsiio *ctsio) 7443 { 7444 struct scsi_report_supported_tmf *cdb; 7445 struct scsi_report_supported_tmf_ext_data *data; 7446 int retval; 7447 int alloc_len, total_len; 7448 7449 CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n")); 7450 7451 cdb = (struct scsi_report_supported_tmf *)ctsio->cdb; 7452 7453 retval = CTL_RETVAL_COMPLETE; 7454 7455 if (cdb->options & RST_REPD) 7456 total_len = sizeof(struct scsi_report_supported_tmf_ext_data); 7457 else 7458 total_len = sizeof(struct scsi_report_supported_tmf_data); 7459 alloc_len = scsi_4btoul(cdb->length); 7460 7461 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7462 ctsio->kern_sg_entries = 0; 7463 ctsio->kern_rel_offset = 0; 7464 ctsio->kern_data_len = min(total_len, alloc_len); 7465 ctsio->kern_total_len = ctsio->kern_data_len; 7466 7467 data = (struct scsi_report_supported_tmf_ext_data *)ctsio->kern_data_ptr; 7468 data->byte1 |= RST_ATS | RST_ATSS | RST_CTSS | RST_LURS | RST_QTS | 7469 RST_TRS; 7470 data->byte2 |= RST_QAES | RST_QTSS | RST_ITNRS; 7471 data->length = total_len - 4; 7472 7473 ctl_set_success(ctsio); 7474 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7475 ctsio->be_move_done = ctl_config_move_done; 7476 ctl_datamove((union ctl_io *)ctsio); 7477 return (retval); 7478 } 7479 7480 int 7481 ctl_report_timestamp(struct ctl_scsiio *ctsio) 7482 { 7483 struct scsi_report_timestamp *cdb; 7484 struct scsi_report_timestamp_data *data; 7485 struct timeval tv; 7486 int64_t timestamp; 7487 int retval; 7488 int alloc_len, total_len; 7489 7490 CTL_DEBUG_PRINT(("ctl_report_timestamp\n")); 7491 7492 cdb = (struct scsi_report_timestamp *)ctsio->cdb; 7493 7494 retval = CTL_RETVAL_COMPLETE; 7495 7496 total_len = sizeof(struct scsi_report_timestamp_data); 7497 alloc_len = scsi_4btoul(cdb->length); 7498 7499 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7500 ctsio->kern_sg_entries = 0; 7501 ctsio->kern_rel_offset = 0; 7502 ctsio->kern_data_len = min(total_len, alloc_len); 7503 ctsio->kern_total_len = ctsio->kern_data_len; 7504 7505 data = (struct scsi_report_timestamp_data *)ctsio->kern_data_ptr; 7506 scsi_ulto2b(sizeof(*data) - 2, data->length); 7507 data->origin = RTS_ORIG_OUTSIDE; 7508 getmicrotime(&tv); 7509 timestamp = (int64_t)tv.tv_sec * 1000 + tv.tv_usec / 1000; 7510 scsi_ulto4b(timestamp >> 16, data->timestamp); 7511 scsi_ulto2b(timestamp & 0xffff, &data->timestamp[4]); 7512 7513 ctl_set_success(ctsio); 7514 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7515 ctsio->be_move_done = ctl_config_move_done; 7516 ctl_datamove((union ctl_io *)ctsio); 7517 return (retval); 7518 } 7519 7520 int 7521 ctl_persistent_reserve_in(struct ctl_scsiio *ctsio) 7522 { 7523 struct ctl_softc *softc = CTL_SOFTC(ctsio); 7524 struct ctl_lun *lun = CTL_LUN(ctsio); 7525 struct scsi_per_res_in *cdb; 7526 int alloc_len, total_len = 0; 7527 /* struct scsi_per_res_in_rsrv in_data; */ 7528 uint64_t key; 7529 7530 CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n")); 7531 7532 cdb = (struct scsi_per_res_in *)ctsio->cdb; 7533 7534 alloc_len = scsi_2btoul(cdb->length); 7535 7536 retry: 7537 mtx_lock(&lun->lun_lock); 7538 switch (cdb->action) { 7539 case SPRI_RK: /* read keys */ 7540 total_len = sizeof(struct scsi_per_res_in_keys) + 7541 lun->pr_key_count * 7542 sizeof(struct scsi_per_res_key); 7543 break; 7544 case SPRI_RR: /* read reservation */ 7545 if (lun->flags & CTL_LUN_PR_RESERVED) 7546 total_len = sizeof(struct scsi_per_res_in_rsrv); 7547 else 7548 total_len = sizeof(struct scsi_per_res_in_header); 7549 break; 7550 case SPRI_RC: /* report capabilities */ 7551 total_len = sizeof(struct scsi_per_res_cap); 7552 break; 7553 case SPRI_RS: /* read full status */ 7554 total_len = sizeof(struct scsi_per_res_in_header) + 7555 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7556 lun->pr_key_count; 7557 break; 7558 default: 7559 panic("%s: Invalid PR type %#x", __func__, cdb->action); 7560 } 7561 mtx_unlock(&lun->lun_lock); 7562 7563 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7564 ctsio->kern_rel_offset = 0; 7565 ctsio->kern_sg_entries = 0; 7566 ctsio->kern_data_len = min(total_len, alloc_len); 7567 ctsio->kern_total_len = ctsio->kern_data_len; 7568 7569 mtx_lock(&lun->lun_lock); 7570 switch (cdb->action) { 7571 case SPRI_RK: { // read keys 7572 struct scsi_per_res_in_keys *res_keys; 7573 int i, key_count; 7574 7575 res_keys = (struct scsi_per_res_in_keys*)ctsio->kern_data_ptr; 7576 7577 /* 7578 * We had to drop the lock to allocate our buffer, which 7579 * leaves time for someone to come in with another 7580 * persistent reservation. (That is unlikely, though, 7581 * since this should be the only persistent reservation 7582 * command active right now.) 7583 */ 7584 if (total_len != (sizeof(struct scsi_per_res_in_keys) + 7585 (lun->pr_key_count * 7586 sizeof(struct scsi_per_res_key)))){ 7587 mtx_unlock(&lun->lun_lock); 7588 free(ctsio->kern_data_ptr, M_CTL); 7589 printf("%s: reservation length changed, retrying\n", 7590 __func__); 7591 goto retry; 7592 } 7593 7594 scsi_ulto4b(lun->pr_generation, res_keys->header.generation); 7595 7596 scsi_ulto4b(sizeof(struct scsi_per_res_key) * 7597 lun->pr_key_count, res_keys->header.length); 7598 7599 for (i = 0, key_count = 0; i < CTL_MAX_INITIATORS; i++) { 7600 if ((key = ctl_get_prkey(lun, i)) == 0) 7601 continue; 7602 7603 /* 7604 * We used lun->pr_key_count to calculate the 7605 * size to allocate. If it turns out the number of 7606 * initiators with the registered flag set is 7607 * larger than that (i.e. they haven't been kept in 7608 * sync), we've got a problem. 7609 */ 7610 if (key_count >= lun->pr_key_count) { 7611 key_count++; 7612 continue; 7613 } 7614 scsi_u64to8b(key, res_keys->keys[key_count].key); 7615 key_count++; 7616 } 7617 break; 7618 } 7619 case SPRI_RR: { // read reservation 7620 struct scsi_per_res_in_rsrv *res; 7621 int tmp_len, header_only; 7622 7623 res = (struct scsi_per_res_in_rsrv *)ctsio->kern_data_ptr; 7624 7625 scsi_ulto4b(lun->pr_generation, res->header.generation); 7626 7627 if (lun->flags & CTL_LUN_PR_RESERVED) 7628 { 7629 tmp_len = sizeof(struct scsi_per_res_in_rsrv); 7630 scsi_ulto4b(sizeof(struct scsi_per_res_in_rsrv_data), 7631 res->header.length); 7632 header_only = 0; 7633 } else { 7634 tmp_len = sizeof(struct scsi_per_res_in_header); 7635 scsi_ulto4b(0, res->header.length); 7636 header_only = 1; 7637 } 7638 7639 /* 7640 * We had to drop the lock to allocate our buffer, which 7641 * leaves time for someone to come in with another 7642 * persistent reservation. (That is unlikely, though, 7643 * since this should be the only persistent reservation 7644 * command active right now.) 7645 */ 7646 if (tmp_len != total_len) { 7647 mtx_unlock(&lun->lun_lock); 7648 free(ctsio->kern_data_ptr, M_CTL); 7649 printf("%s: reservation status changed, retrying\n", 7650 __func__); 7651 goto retry; 7652 } 7653 7654 /* 7655 * No reservation held, so we're done. 7656 */ 7657 if (header_only != 0) 7658 break; 7659 7660 /* 7661 * If the registration is an All Registrants type, the key 7662 * is 0, since it doesn't really matter. 7663 */ 7664 if (lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 7665 scsi_u64to8b(ctl_get_prkey(lun, lun->pr_res_idx), 7666 res->data.reservation); 7667 } 7668 res->data.scopetype = lun->pr_res_type; 7669 break; 7670 } 7671 case SPRI_RC: //report capabilities 7672 { 7673 struct scsi_per_res_cap *res_cap; 7674 uint16_t type_mask; 7675 7676 res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr; 7677 scsi_ulto2b(sizeof(*res_cap), res_cap->length); 7678 res_cap->flags1 = SPRI_CRH; 7679 res_cap->flags2 = SPRI_TMV | SPRI_ALLOW_5; 7680 type_mask = SPRI_TM_WR_EX_AR | 7681 SPRI_TM_EX_AC_RO | 7682 SPRI_TM_WR_EX_RO | 7683 SPRI_TM_EX_AC | 7684 SPRI_TM_WR_EX | 7685 SPRI_TM_EX_AC_AR; 7686 scsi_ulto2b(type_mask, res_cap->type_mask); 7687 break; 7688 } 7689 case SPRI_RS: { // read full status 7690 struct scsi_per_res_in_full *res_status; 7691 struct scsi_per_res_in_full_desc *res_desc; 7692 struct ctl_port *port; 7693 int i, len; 7694 7695 res_status = (struct scsi_per_res_in_full*)ctsio->kern_data_ptr; 7696 7697 /* 7698 * We had to drop the lock to allocate our buffer, which 7699 * leaves time for someone to come in with another 7700 * persistent reservation. (That is unlikely, though, 7701 * since this should be the only persistent reservation 7702 * command active right now.) 7703 */ 7704 if (total_len < (sizeof(struct scsi_per_res_in_header) + 7705 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7706 lun->pr_key_count)){ 7707 mtx_unlock(&lun->lun_lock); 7708 free(ctsio->kern_data_ptr, M_CTL); 7709 printf("%s: reservation length changed, retrying\n", 7710 __func__); 7711 goto retry; 7712 } 7713 7714 scsi_ulto4b(lun->pr_generation, res_status->header.generation); 7715 7716 res_desc = &res_status->desc[0]; 7717 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 7718 if ((key = ctl_get_prkey(lun, i)) == 0) 7719 continue; 7720 7721 scsi_u64to8b(key, res_desc->res_key.key); 7722 if ((lun->flags & CTL_LUN_PR_RESERVED) && 7723 (lun->pr_res_idx == i || 7724 lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS)) { 7725 res_desc->flags = SPRI_FULL_R_HOLDER; 7726 res_desc->scopetype = lun->pr_res_type; 7727 } 7728 scsi_ulto2b(i / CTL_MAX_INIT_PER_PORT, 7729 res_desc->rel_trgt_port_id); 7730 len = 0; 7731 port = softc->ctl_ports[i / CTL_MAX_INIT_PER_PORT]; 7732 if (port != NULL) 7733 len = ctl_create_iid(port, 7734 i % CTL_MAX_INIT_PER_PORT, 7735 res_desc->transport_id); 7736 scsi_ulto4b(len, res_desc->additional_length); 7737 res_desc = (struct scsi_per_res_in_full_desc *) 7738 &res_desc->transport_id[len]; 7739 } 7740 scsi_ulto4b((uint8_t *)res_desc - (uint8_t *)&res_status->desc[0], 7741 res_status->header.length); 7742 break; 7743 } 7744 default: 7745 panic("%s: Invalid PR type %#x", __func__, cdb->action); 7746 } 7747 mtx_unlock(&lun->lun_lock); 7748 7749 ctl_set_success(ctsio); 7750 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7751 ctsio->be_move_done = ctl_config_move_done; 7752 ctl_datamove((union ctl_io *)ctsio); 7753 return (CTL_RETVAL_COMPLETE); 7754 } 7755 7756 /* 7757 * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if 7758 * it should return. 7759 */ 7760 static int 7761 ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key, 7762 uint64_t sa_res_key, uint8_t type, uint32_t residx, 7763 struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb, 7764 struct scsi_per_res_out_parms* param) 7765 { 7766 union ctl_ha_msg persis_io; 7767 int i; 7768 7769 mtx_lock(&lun->lun_lock); 7770 if (sa_res_key == 0) { 7771 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 7772 /* validate scope and type */ 7773 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7774 SPR_LU_SCOPE) { 7775 mtx_unlock(&lun->lun_lock); 7776 ctl_set_invalid_field(/*ctsio*/ ctsio, 7777 /*sks_valid*/ 1, 7778 /*command*/ 1, 7779 /*field*/ 2, 7780 /*bit_valid*/ 1, 7781 /*bit*/ 4); 7782 ctl_done((union ctl_io *)ctsio); 7783 return (1); 7784 } 7785 7786 if (type>8 || type==2 || type==4 || type==0) { 7787 mtx_unlock(&lun->lun_lock); 7788 ctl_set_invalid_field(/*ctsio*/ ctsio, 7789 /*sks_valid*/ 1, 7790 /*command*/ 1, 7791 /*field*/ 2, 7792 /*bit_valid*/ 1, 7793 /*bit*/ 0); 7794 ctl_done((union ctl_io *)ctsio); 7795 return (1); 7796 } 7797 7798 /* 7799 * Unregister everybody else and build UA for 7800 * them 7801 */ 7802 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 7803 if (i == residx || ctl_get_prkey(lun, i) == 0) 7804 continue; 7805 7806 ctl_clr_prkey(lun, i); 7807 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7808 } 7809 lun->pr_key_count = 1; 7810 lun->pr_res_type = type; 7811 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 7812 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 7813 lun->pr_res_idx = residx; 7814 lun->pr_generation++; 7815 mtx_unlock(&lun->lun_lock); 7816 7817 /* send msg to other side */ 7818 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7819 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7820 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7821 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7822 persis_io.pr.pr_info.res_type = type; 7823 memcpy(persis_io.pr.pr_info.sa_res_key, 7824 param->serv_act_res_key, 7825 sizeof(param->serv_act_res_key)); 7826 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 7827 sizeof(persis_io.pr), M_WAITOK); 7828 } else { 7829 /* not all registrants */ 7830 mtx_unlock(&lun->lun_lock); 7831 free(ctsio->kern_data_ptr, M_CTL); 7832 ctl_set_invalid_field(ctsio, 7833 /*sks_valid*/ 1, 7834 /*command*/ 0, 7835 /*field*/ 8, 7836 /*bit_valid*/ 0, 7837 /*bit*/ 0); 7838 ctl_done((union ctl_io *)ctsio); 7839 return (1); 7840 } 7841 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 7842 || !(lun->flags & CTL_LUN_PR_RESERVED)) { 7843 int found = 0; 7844 7845 if (res_key == sa_res_key) { 7846 /* special case */ 7847 /* 7848 * The spec implies this is not good but doesn't 7849 * say what to do. There are two choices either 7850 * generate a res conflict or check condition 7851 * with illegal field in parameter data. Since 7852 * that is what is done when the sa_res_key is 7853 * zero I'll take that approach since this has 7854 * to do with the sa_res_key. 7855 */ 7856 mtx_unlock(&lun->lun_lock); 7857 free(ctsio->kern_data_ptr, M_CTL); 7858 ctl_set_invalid_field(ctsio, 7859 /*sks_valid*/ 1, 7860 /*command*/ 0, 7861 /*field*/ 8, 7862 /*bit_valid*/ 0, 7863 /*bit*/ 0); 7864 ctl_done((union ctl_io *)ctsio); 7865 return (1); 7866 } 7867 7868 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 7869 if (ctl_get_prkey(lun, i) != sa_res_key) 7870 continue; 7871 7872 found = 1; 7873 ctl_clr_prkey(lun, i); 7874 lun->pr_key_count--; 7875 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7876 } 7877 if (!found) { 7878 mtx_unlock(&lun->lun_lock); 7879 free(ctsio->kern_data_ptr, M_CTL); 7880 ctl_set_reservation_conflict(ctsio); 7881 ctl_done((union ctl_io *)ctsio); 7882 return (CTL_RETVAL_COMPLETE); 7883 } 7884 lun->pr_generation++; 7885 mtx_unlock(&lun->lun_lock); 7886 7887 /* send msg to other side */ 7888 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7889 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7890 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7891 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7892 persis_io.pr.pr_info.res_type = type; 7893 memcpy(persis_io.pr.pr_info.sa_res_key, 7894 param->serv_act_res_key, 7895 sizeof(param->serv_act_res_key)); 7896 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 7897 sizeof(persis_io.pr), M_WAITOK); 7898 } else { 7899 /* Reserved but not all registrants */ 7900 /* sa_res_key is res holder */ 7901 if (sa_res_key == ctl_get_prkey(lun, lun->pr_res_idx)) { 7902 /* validate scope and type */ 7903 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7904 SPR_LU_SCOPE) { 7905 mtx_unlock(&lun->lun_lock); 7906 ctl_set_invalid_field(/*ctsio*/ ctsio, 7907 /*sks_valid*/ 1, 7908 /*command*/ 1, 7909 /*field*/ 2, 7910 /*bit_valid*/ 1, 7911 /*bit*/ 4); 7912 ctl_done((union ctl_io *)ctsio); 7913 return (1); 7914 } 7915 7916 if (type>8 || type==2 || type==4 || type==0) { 7917 mtx_unlock(&lun->lun_lock); 7918 ctl_set_invalid_field(/*ctsio*/ ctsio, 7919 /*sks_valid*/ 1, 7920 /*command*/ 1, 7921 /*field*/ 2, 7922 /*bit_valid*/ 1, 7923 /*bit*/ 0); 7924 ctl_done((union ctl_io *)ctsio); 7925 return (1); 7926 } 7927 7928 /* 7929 * Do the following: 7930 * if sa_res_key != res_key remove all 7931 * registrants w/sa_res_key and generate UA 7932 * for these registrants(Registrations 7933 * Preempted) if it wasn't an exclusive 7934 * reservation generate UA(Reservations 7935 * Preempted) for all other registered nexuses 7936 * if the type has changed. Establish the new 7937 * reservation and holder. If res_key and 7938 * sa_res_key are the same do the above 7939 * except don't unregister the res holder. 7940 */ 7941 7942 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 7943 if (i == residx || ctl_get_prkey(lun, i) == 0) 7944 continue; 7945 7946 if (sa_res_key == ctl_get_prkey(lun, i)) { 7947 ctl_clr_prkey(lun, i); 7948 lun->pr_key_count--; 7949 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7950 } else if (type != lun->pr_res_type && 7951 (lun->pr_res_type == SPR_TYPE_WR_EX_RO || 7952 lun->pr_res_type == SPR_TYPE_EX_AC_RO)) { 7953 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 7954 } 7955 } 7956 lun->pr_res_type = type; 7957 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 7958 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 7959 lun->pr_res_idx = residx; 7960 else 7961 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 7962 lun->pr_generation++; 7963 mtx_unlock(&lun->lun_lock); 7964 7965 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7966 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7967 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7968 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7969 persis_io.pr.pr_info.res_type = type; 7970 memcpy(persis_io.pr.pr_info.sa_res_key, 7971 param->serv_act_res_key, 7972 sizeof(param->serv_act_res_key)); 7973 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 7974 sizeof(persis_io.pr), M_WAITOK); 7975 } else { 7976 /* 7977 * sa_res_key is not the res holder just 7978 * remove registrants 7979 */ 7980 int found=0; 7981 7982 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 7983 if (sa_res_key != ctl_get_prkey(lun, i)) 7984 continue; 7985 7986 found = 1; 7987 ctl_clr_prkey(lun, i); 7988 lun->pr_key_count--; 7989 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7990 } 7991 7992 if (!found) { 7993 mtx_unlock(&lun->lun_lock); 7994 free(ctsio->kern_data_ptr, M_CTL); 7995 ctl_set_reservation_conflict(ctsio); 7996 ctl_done((union ctl_io *)ctsio); 7997 return (1); 7998 } 7999 lun->pr_generation++; 8000 mtx_unlock(&lun->lun_lock); 8001 8002 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8003 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8004 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8005 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8006 persis_io.pr.pr_info.res_type = type; 8007 memcpy(persis_io.pr.pr_info.sa_res_key, 8008 param->serv_act_res_key, 8009 sizeof(param->serv_act_res_key)); 8010 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8011 sizeof(persis_io.pr), M_WAITOK); 8012 } 8013 } 8014 return (0); 8015 } 8016 8017 static void 8018 ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg) 8019 { 8020 uint64_t sa_res_key; 8021 int i; 8022 8023 sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key); 8024 8025 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 8026 || lun->pr_res_idx == CTL_PR_NO_RESERVATION 8027 || sa_res_key != ctl_get_prkey(lun, lun->pr_res_idx)) { 8028 if (sa_res_key == 0) { 8029 /* 8030 * Unregister everybody else and build UA for 8031 * them 8032 */ 8033 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 8034 if (i == msg->pr.pr_info.residx || 8035 ctl_get_prkey(lun, i) == 0) 8036 continue; 8037 8038 ctl_clr_prkey(lun, i); 8039 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8040 } 8041 8042 lun->pr_key_count = 1; 8043 lun->pr_res_type = msg->pr.pr_info.res_type; 8044 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 8045 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 8046 lun->pr_res_idx = msg->pr.pr_info.residx; 8047 } else { 8048 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8049 if (sa_res_key == ctl_get_prkey(lun, i)) 8050 continue; 8051 8052 ctl_clr_prkey(lun, i); 8053 lun->pr_key_count--; 8054 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8055 } 8056 } 8057 } else { 8058 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8059 if (i == msg->pr.pr_info.residx || 8060 ctl_get_prkey(lun, i) == 0) 8061 continue; 8062 8063 if (sa_res_key == ctl_get_prkey(lun, i)) { 8064 ctl_clr_prkey(lun, i); 8065 lun->pr_key_count--; 8066 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8067 } else if (msg->pr.pr_info.res_type != lun->pr_res_type 8068 && (lun->pr_res_type == SPR_TYPE_WR_EX_RO || 8069 lun->pr_res_type == SPR_TYPE_EX_AC_RO)) { 8070 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8071 } 8072 } 8073 lun->pr_res_type = msg->pr.pr_info.res_type; 8074 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 8075 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 8076 lun->pr_res_idx = msg->pr.pr_info.residx; 8077 else 8078 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8079 } 8080 lun->pr_generation++; 8081 8082 } 8083 8084 8085 int 8086 ctl_persistent_reserve_out(struct ctl_scsiio *ctsio) 8087 { 8088 struct ctl_softc *softc = CTL_SOFTC(ctsio); 8089 struct ctl_lun *lun = CTL_LUN(ctsio); 8090 int retval; 8091 u_int32_t param_len; 8092 struct scsi_per_res_out *cdb; 8093 struct scsi_per_res_out_parms* param; 8094 uint32_t residx; 8095 uint64_t res_key, sa_res_key, key; 8096 uint8_t type; 8097 union ctl_ha_msg persis_io; 8098 int i; 8099 8100 CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n")); 8101 8102 cdb = (struct scsi_per_res_out *)ctsio->cdb; 8103 retval = CTL_RETVAL_COMPLETE; 8104 8105 /* 8106 * We only support whole-LUN scope. The scope & type are ignored for 8107 * register, register and ignore existing key and clear. 8108 * We sometimes ignore scope and type on preempts too!! 8109 * Verify reservation type here as well. 8110 */ 8111 type = cdb->scope_type & SPR_TYPE_MASK; 8112 if ((cdb->action == SPRO_RESERVE) 8113 || (cdb->action == SPRO_RELEASE)) { 8114 if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { 8115 ctl_set_invalid_field(/*ctsio*/ ctsio, 8116 /*sks_valid*/ 1, 8117 /*command*/ 1, 8118 /*field*/ 2, 8119 /*bit_valid*/ 1, 8120 /*bit*/ 4); 8121 ctl_done((union ctl_io *)ctsio); 8122 return (CTL_RETVAL_COMPLETE); 8123 } 8124 8125 if (type>8 || type==2 || type==4 || type==0) { 8126 ctl_set_invalid_field(/*ctsio*/ ctsio, 8127 /*sks_valid*/ 1, 8128 /*command*/ 1, 8129 /*field*/ 2, 8130 /*bit_valid*/ 1, 8131 /*bit*/ 0); 8132 ctl_done((union ctl_io *)ctsio); 8133 return (CTL_RETVAL_COMPLETE); 8134 } 8135 } 8136 8137 param_len = scsi_4btoul(cdb->length); 8138 8139 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 8140 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 8141 ctsio->kern_data_len = param_len; 8142 ctsio->kern_total_len = param_len; 8143 ctsio->kern_rel_offset = 0; 8144 ctsio->kern_sg_entries = 0; 8145 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 8146 ctsio->be_move_done = ctl_config_move_done; 8147 ctl_datamove((union ctl_io *)ctsio); 8148 8149 return (CTL_RETVAL_COMPLETE); 8150 } 8151 8152 param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr; 8153 8154 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 8155 res_key = scsi_8btou64(param->res_key.key); 8156 sa_res_key = scsi_8btou64(param->serv_act_res_key); 8157 8158 /* 8159 * Validate the reservation key here except for SPRO_REG_IGNO 8160 * This must be done for all other service actions 8161 */ 8162 if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) { 8163 mtx_lock(&lun->lun_lock); 8164 if ((key = ctl_get_prkey(lun, residx)) != 0) { 8165 if (res_key != key) { 8166 /* 8167 * The current key passed in doesn't match 8168 * the one the initiator previously 8169 * registered. 8170 */ 8171 mtx_unlock(&lun->lun_lock); 8172 free(ctsio->kern_data_ptr, M_CTL); 8173 ctl_set_reservation_conflict(ctsio); 8174 ctl_done((union ctl_io *)ctsio); 8175 return (CTL_RETVAL_COMPLETE); 8176 } 8177 } else if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REGISTER) { 8178 /* 8179 * We are not registered 8180 */ 8181 mtx_unlock(&lun->lun_lock); 8182 free(ctsio->kern_data_ptr, M_CTL); 8183 ctl_set_reservation_conflict(ctsio); 8184 ctl_done((union ctl_io *)ctsio); 8185 return (CTL_RETVAL_COMPLETE); 8186 } else if (res_key != 0) { 8187 /* 8188 * We are not registered and trying to register but 8189 * the register key isn't zero. 8190 */ 8191 mtx_unlock(&lun->lun_lock); 8192 free(ctsio->kern_data_ptr, M_CTL); 8193 ctl_set_reservation_conflict(ctsio); 8194 ctl_done((union ctl_io *)ctsio); 8195 return (CTL_RETVAL_COMPLETE); 8196 } 8197 mtx_unlock(&lun->lun_lock); 8198 } 8199 8200 switch (cdb->action & SPRO_ACTION_MASK) { 8201 case SPRO_REGISTER: 8202 case SPRO_REG_IGNO: { 8203 8204 #if 0 8205 printf("Registration received\n"); 8206 #endif 8207 8208 /* 8209 * We don't support any of these options, as we report in 8210 * the read capabilities request (see 8211 * ctl_persistent_reserve_in(), above). 8212 */ 8213 if ((param->flags & SPR_SPEC_I_PT) 8214 || (param->flags & SPR_ALL_TG_PT) 8215 || (param->flags & SPR_APTPL)) { 8216 int bit_ptr; 8217 8218 if (param->flags & SPR_APTPL) 8219 bit_ptr = 0; 8220 else if (param->flags & SPR_ALL_TG_PT) 8221 bit_ptr = 2; 8222 else /* SPR_SPEC_I_PT */ 8223 bit_ptr = 3; 8224 8225 free(ctsio->kern_data_ptr, M_CTL); 8226 ctl_set_invalid_field(ctsio, 8227 /*sks_valid*/ 1, 8228 /*command*/ 0, 8229 /*field*/ 20, 8230 /*bit_valid*/ 1, 8231 /*bit*/ bit_ptr); 8232 ctl_done((union ctl_io *)ctsio); 8233 return (CTL_RETVAL_COMPLETE); 8234 } 8235 8236 mtx_lock(&lun->lun_lock); 8237 8238 /* 8239 * The initiator wants to clear the 8240 * key/unregister. 8241 */ 8242 if (sa_res_key == 0) { 8243 if ((res_key == 0 8244 && (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER) 8245 || ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO 8246 && ctl_get_prkey(lun, residx) == 0)) { 8247 mtx_unlock(&lun->lun_lock); 8248 goto done; 8249 } 8250 8251 ctl_clr_prkey(lun, residx); 8252 lun->pr_key_count--; 8253 8254 if (residx == lun->pr_res_idx) { 8255 lun->flags &= ~CTL_LUN_PR_RESERVED; 8256 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8257 8258 if ((lun->pr_res_type == SPR_TYPE_WR_EX_RO || 8259 lun->pr_res_type == SPR_TYPE_EX_AC_RO) && 8260 lun->pr_key_count) { 8261 /* 8262 * If the reservation is a registrants 8263 * only type we need to generate a UA 8264 * for other registered inits. The 8265 * sense code should be RESERVATIONS 8266 * RELEASED 8267 */ 8268 8269 for (i = softc->init_min; i < softc->init_max; i++){ 8270 if (ctl_get_prkey(lun, i) == 0) 8271 continue; 8272 ctl_est_ua(lun, i, 8273 CTL_UA_RES_RELEASE); 8274 } 8275 } 8276 lun->pr_res_type = 0; 8277 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8278 if (lun->pr_key_count==0) { 8279 lun->flags &= ~CTL_LUN_PR_RESERVED; 8280 lun->pr_res_type = 0; 8281 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8282 } 8283 } 8284 lun->pr_generation++; 8285 mtx_unlock(&lun->lun_lock); 8286 8287 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8288 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8289 persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY; 8290 persis_io.pr.pr_info.residx = residx; 8291 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8292 sizeof(persis_io.pr), M_WAITOK); 8293 } else /* sa_res_key != 0 */ { 8294 8295 /* 8296 * If we aren't registered currently then increment 8297 * the key count and set the registered flag. 8298 */ 8299 ctl_alloc_prkey(lun, residx); 8300 if (ctl_get_prkey(lun, residx) == 0) 8301 lun->pr_key_count++; 8302 ctl_set_prkey(lun, residx, sa_res_key); 8303 lun->pr_generation++; 8304 mtx_unlock(&lun->lun_lock); 8305 8306 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8307 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8308 persis_io.pr.pr_info.action = CTL_PR_REG_KEY; 8309 persis_io.pr.pr_info.residx = residx; 8310 memcpy(persis_io.pr.pr_info.sa_res_key, 8311 param->serv_act_res_key, 8312 sizeof(param->serv_act_res_key)); 8313 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8314 sizeof(persis_io.pr), M_WAITOK); 8315 } 8316 8317 break; 8318 } 8319 case SPRO_RESERVE: 8320 #if 0 8321 printf("Reserve executed type %d\n", type); 8322 #endif 8323 mtx_lock(&lun->lun_lock); 8324 if (lun->flags & CTL_LUN_PR_RESERVED) { 8325 /* 8326 * if this isn't the reservation holder and it's 8327 * not a "all registrants" type or if the type is 8328 * different then we have a conflict 8329 */ 8330 if ((lun->pr_res_idx != residx 8331 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) 8332 || lun->pr_res_type != type) { 8333 mtx_unlock(&lun->lun_lock); 8334 free(ctsio->kern_data_ptr, M_CTL); 8335 ctl_set_reservation_conflict(ctsio); 8336 ctl_done((union ctl_io *)ctsio); 8337 return (CTL_RETVAL_COMPLETE); 8338 } 8339 mtx_unlock(&lun->lun_lock); 8340 } else /* create a reservation */ { 8341 /* 8342 * If it's not an "all registrants" type record 8343 * reservation holder 8344 */ 8345 if (type != SPR_TYPE_WR_EX_AR 8346 && type != SPR_TYPE_EX_AC_AR) 8347 lun->pr_res_idx = residx; /* Res holder */ 8348 else 8349 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8350 8351 lun->flags |= CTL_LUN_PR_RESERVED; 8352 lun->pr_res_type = type; 8353 8354 mtx_unlock(&lun->lun_lock); 8355 8356 /* send msg to other side */ 8357 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8358 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8359 persis_io.pr.pr_info.action = CTL_PR_RESERVE; 8360 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8361 persis_io.pr.pr_info.res_type = type; 8362 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8363 sizeof(persis_io.pr), M_WAITOK); 8364 } 8365 break; 8366 8367 case SPRO_RELEASE: 8368 mtx_lock(&lun->lun_lock); 8369 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) { 8370 /* No reservation exists return good status */ 8371 mtx_unlock(&lun->lun_lock); 8372 goto done; 8373 } 8374 /* 8375 * Is this nexus a reservation holder? 8376 */ 8377 if (lun->pr_res_idx != residx 8378 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 8379 /* 8380 * not a res holder return good status but 8381 * do nothing 8382 */ 8383 mtx_unlock(&lun->lun_lock); 8384 goto done; 8385 } 8386 8387 if (lun->pr_res_type != type) { 8388 mtx_unlock(&lun->lun_lock); 8389 free(ctsio->kern_data_ptr, M_CTL); 8390 ctl_set_illegal_pr_release(ctsio); 8391 ctl_done((union ctl_io *)ctsio); 8392 return (CTL_RETVAL_COMPLETE); 8393 } 8394 8395 /* okay to release */ 8396 lun->flags &= ~CTL_LUN_PR_RESERVED; 8397 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8398 lun->pr_res_type = 0; 8399 8400 /* 8401 * If this isn't an exclusive access reservation and NUAR 8402 * is not set, generate UA for all other registrants. 8403 */ 8404 if (type != SPR_TYPE_EX_AC && type != SPR_TYPE_WR_EX && 8405 (lun->MODE_CTRL.queue_flags & SCP_NUAR) == 0) { 8406 for (i = softc->init_min; i < softc->init_max; i++) { 8407 if (i == residx || ctl_get_prkey(lun, i) == 0) 8408 continue; 8409 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8410 } 8411 } 8412 mtx_unlock(&lun->lun_lock); 8413 8414 /* Send msg to other side */ 8415 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8416 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8417 persis_io.pr.pr_info.action = CTL_PR_RELEASE; 8418 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8419 sizeof(persis_io.pr), M_WAITOK); 8420 break; 8421 8422 case SPRO_CLEAR: 8423 /* send msg to other side */ 8424 8425 mtx_lock(&lun->lun_lock); 8426 lun->flags &= ~CTL_LUN_PR_RESERVED; 8427 lun->pr_res_type = 0; 8428 lun->pr_key_count = 0; 8429 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8430 8431 ctl_clr_prkey(lun, residx); 8432 for (i = 0; i < CTL_MAX_INITIATORS; i++) 8433 if (ctl_get_prkey(lun, i) != 0) { 8434 ctl_clr_prkey(lun, i); 8435 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8436 } 8437 lun->pr_generation++; 8438 mtx_unlock(&lun->lun_lock); 8439 8440 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8441 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8442 persis_io.pr.pr_info.action = CTL_PR_CLEAR; 8443 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8444 sizeof(persis_io.pr), M_WAITOK); 8445 break; 8446 8447 case SPRO_PREEMPT: 8448 case SPRO_PRE_ABO: { 8449 int nretval; 8450 8451 nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type, 8452 residx, ctsio, cdb, param); 8453 if (nretval != 0) 8454 return (CTL_RETVAL_COMPLETE); 8455 break; 8456 } 8457 default: 8458 panic("%s: Invalid PR type %#x", __func__, cdb->action); 8459 } 8460 8461 done: 8462 free(ctsio->kern_data_ptr, M_CTL); 8463 ctl_set_success(ctsio); 8464 ctl_done((union ctl_io *)ctsio); 8465 8466 return (retval); 8467 } 8468 8469 /* 8470 * This routine is for handling a message from the other SC pertaining to 8471 * persistent reserve out. All the error checking will have been done 8472 * so only perorming the action need be done here to keep the two 8473 * in sync. 8474 */ 8475 static void 8476 ctl_hndl_per_res_out_on_other_sc(union ctl_io *io) 8477 { 8478 struct ctl_softc *softc = CTL_SOFTC(io); 8479 union ctl_ha_msg *msg = (union ctl_ha_msg *)&io->presio.pr_msg; 8480 struct ctl_lun *lun; 8481 int i; 8482 uint32_t residx, targ_lun; 8483 8484 targ_lun = msg->hdr.nexus.targ_mapped_lun; 8485 mtx_lock(&softc->ctl_lock); 8486 if (targ_lun >= CTL_MAX_LUNS || 8487 (lun = softc->ctl_luns[targ_lun]) == NULL) { 8488 mtx_unlock(&softc->ctl_lock); 8489 return; 8490 } 8491 mtx_lock(&lun->lun_lock); 8492 mtx_unlock(&softc->ctl_lock); 8493 if (lun->flags & CTL_LUN_DISABLED) { 8494 mtx_unlock(&lun->lun_lock); 8495 return; 8496 } 8497 residx = ctl_get_initindex(&msg->hdr.nexus); 8498 switch(msg->pr.pr_info.action) { 8499 case CTL_PR_REG_KEY: 8500 ctl_alloc_prkey(lun, msg->pr.pr_info.residx); 8501 if (ctl_get_prkey(lun, msg->pr.pr_info.residx) == 0) 8502 lun->pr_key_count++; 8503 ctl_set_prkey(lun, msg->pr.pr_info.residx, 8504 scsi_8btou64(msg->pr.pr_info.sa_res_key)); 8505 lun->pr_generation++; 8506 break; 8507 8508 case CTL_PR_UNREG_KEY: 8509 ctl_clr_prkey(lun, msg->pr.pr_info.residx); 8510 lun->pr_key_count--; 8511 8512 /* XXX Need to see if the reservation has been released */ 8513 /* if so do we need to generate UA? */ 8514 if (msg->pr.pr_info.residx == lun->pr_res_idx) { 8515 lun->flags &= ~CTL_LUN_PR_RESERVED; 8516 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8517 8518 if ((lun->pr_res_type == SPR_TYPE_WR_EX_RO || 8519 lun->pr_res_type == SPR_TYPE_EX_AC_RO) && 8520 lun->pr_key_count) { 8521 /* 8522 * If the reservation is a registrants 8523 * only type we need to generate a UA 8524 * for other registered inits. The 8525 * sense code should be RESERVATIONS 8526 * RELEASED 8527 */ 8528 8529 for (i = softc->init_min; i < softc->init_max; i++) { 8530 if (ctl_get_prkey(lun, i) == 0) 8531 continue; 8532 8533 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8534 } 8535 } 8536 lun->pr_res_type = 0; 8537 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8538 if (lun->pr_key_count==0) { 8539 lun->flags &= ~CTL_LUN_PR_RESERVED; 8540 lun->pr_res_type = 0; 8541 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8542 } 8543 } 8544 lun->pr_generation++; 8545 break; 8546 8547 case CTL_PR_RESERVE: 8548 lun->flags |= CTL_LUN_PR_RESERVED; 8549 lun->pr_res_type = msg->pr.pr_info.res_type; 8550 lun->pr_res_idx = msg->pr.pr_info.residx; 8551 8552 break; 8553 8554 case CTL_PR_RELEASE: 8555 /* 8556 * If this isn't an exclusive access reservation and NUAR 8557 * is not set, generate UA for all other registrants. 8558 */ 8559 if (lun->pr_res_type != SPR_TYPE_EX_AC && 8560 lun->pr_res_type != SPR_TYPE_WR_EX && 8561 (lun->MODE_CTRL.queue_flags & SCP_NUAR) == 0) { 8562 for (i = softc->init_min; i < softc->init_max; i++) 8563 if (i == residx || ctl_get_prkey(lun, i) == 0) 8564 continue; 8565 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8566 } 8567 8568 lun->flags &= ~CTL_LUN_PR_RESERVED; 8569 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8570 lun->pr_res_type = 0; 8571 break; 8572 8573 case CTL_PR_PREEMPT: 8574 ctl_pro_preempt_other(lun, msg); 8575 break; 8576 case CTL_PR_CLEAR: 8577 lun->flags &= ~CTL_LUN_PR_RESERVED; 8578 lun->pr_res_type = 0; 8579 lun->pr_key_count = 0; 8580 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8581 8582 for (i=0; i < CTL_MAX_INITIATORS; i++) { 8583 if (ctl_get_prkey(lun, i) == 0) 8584 continue; 8585 ctl_clr_prkey(lun, i); 8586 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8587 } 8588 lun->pr_generation++; 8589 break; 8590 } 8591 8592 mtx_unlock(&lun->lun_lock); 8593 } 8594 8595 int 8596 ctl_read_write(struct ctl_scsiio *ctsio) 8597 { 8598 struct ctl_lun *lun = CTL_LUN(ctsio); 8599 struct ctl_lba_len_flags *lbalen; 8600 uint64_t lba; 8601 uint32_t num_blocks; 8602 int flags, retval; 8603 int isread; 8604 8605 CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0])); 8606 8607 flags = 0; 8608 isread = ctsio->cdb[0] == READ_6 || ctsio->cdb[0] == READ_10 8609 || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16; 8610 switch (ctsio->cdb[0]) { 8611 case READ_6: 8612 case WRITE_6: { 8613 struct scsi_rw_6 *cdb; 8614 8615 cdb = (struct scsi_rw_6 *)ctsio->cdb; 8616 8617 lba = scsi_3btoul(cdb->addr); 8618 /* only 5 bits are valid in the most significant address byte */ 8619 lba &= 0x1fffff; 8620 num_blocks = cdb->length; 8621 /* 8622 * This is correct according to SBC-2. 8623 */ 8624 if (num_blocks == 0) 8625 num_blocks = 256; 8626 break; 8627 } 8628 case READ_10: 8629 case WRITE_10: { 8630 struct scsi_rw_10 *cdb; 8631 8632 cdb = (struct scsi_rw_10 *)ctsio->cdb; 8633 if (cdb->byte2 & SRW10_FUA) 8634 flags |= CTL_LLF_FUA; 8635 if (cdb->byte2 & SRW10_DPO) 8636 flags |= CTL_LLF_DPO; 8637 lba = scsi_4btoul(cdb->addr); 8638 num_blocks = scsi_2btoul(cdb->length); 8639 break; 8640 } 8641 case WRITE_VERIFY_10: { 8642 struct scsi_write_verify_10 *cdb; 8643 8644 cdb = (struct scsi_write_verify_10 *)ctsio->cdb; 8645 flags |= CTL_LLF_FUA; 8646 if (cdb->byte2 & SWV_DPO) 8647 flags |= CTL_LLF_DPO; 8648 lba = scsi_4btoul(cdb->addr); 8649 num_blocks = scsi_2btoul(cdb->length); 8650 break; 8651 } 8652 case READ_12: 8653 case WRITE_12: { 8654 struct scsi_rw_12 *cdb; 8655 8656 cdb = (struct scsi_rw_12 *)ctsio->cdb; 8657 if (cdb->byte2 & SRW12_FUA) 8658 flags |= CTL_LLF_FUA; 8659 if (cdb->byte2 & SRW12_DPO) 8660 flags |= CTL_LLF_DPO; 8661 lba = scsi_4btoul(cdb->addr); 8662 num_blocks = scsi_4btoul(cdb->length); 8663 break; 8664 } 8665 case WRITE_VERIFY_12: { 8666 struct scsi_write_verify_12 *cdb; 8667 8668 cdb = (struct scsi_write_verify_12 *)ctsio->cdb; 8669 flags |= CTL_LLF_FUA; 8670 if (cdb->byte2 & SWV_DPO) 8671 flags |= CTL_LLF_DPO; 8672 lba = scsi_4btoul(cdb->addr); 8673 num_blocks = scsi_4btoul(cdb->length); 8674 break; 8675 } 8676 case READ_16: 8677 case WRITE_16: { 8678 struct scsi_rw_16 *cdb; 8679 8680 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8681 if (cdb->byte2 & SRW12_FUA) 8682 flags |= CTL_LLF_FUA; 8683 if (cdb->byte2 & SRW12_DPO) 8684 flags |= CTL_LLF_DPO; 8685 lba = scsi_8btou64(cdb->addr); 8686 num_blocks = scsi_4btoul(cdb->length); 8687 break; 8688 } 8689 case WRITE_ATOMIC_16: { 8690 struct scsi_write_atomic_16 *cdb; 8691 8692 if (lun->be_lun->atomicblock == 0) { 8693 ctl_set_invalid_opcode(ctsio); 8694 ctl_done((union ctl_io *)ctsio); 8695 return (CTL_RETVAL_COMPLETE); 8696 } 8697 8698 cdb = (struct scsi_write_atomic_16 *)ctsio->cdb; 8699 if (cdb->byte2 & SRW12_FUA) 8700 flags |= CTL_LLF_FUA; 8701 if (cdb->byte2 & SRW12_DPO) 8702 flags |= CTL_LLF_DPO; 8703 lba = scsi_8btou64(cdb->addr); 8704 num_blocks = scsi_2btoul(cdb->length); 8705 if (num_blocks > lun->be_lun->atomicblock) { 8706 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 8707 /*command*/ 1, /*field*/ 12, /*bit_valid*/ 0, 8708 /*bit*/ 0); 8709 ctl_done((union ctl_io *)ctsio); 8710 return (CTL_RETVAL_COMPLETE); 8711 } 8712 break; 8713 } 8714 case WRITE_VERIFY_16: { 8715 struct scsi_write_verify_16 *cdb; 8716 8717 cdb = (struct scsi_write_verify_16 *)ctsio->cdb; 8718 flags |= CTL_LLF_FUA; 8719 if (cdb->byte2 & SWV_DPO) 8720 flags |= CTL_LLF_DPO; 8721 lba = scsi_8btou64(cdb->addr); 8722 num_blocks = scsi_4btoul(cdb->length); 8723 break; 8724 } 8725 default: 8726 /* 8727 * We got a command we don't support. This shouldn't 8728 * happen, commands should be filtered out above us. 8729 */ 8730 ctl_set_invalid_opcode(ctsio); 8731 ctl_done((union ctl_io *)ctsio); 8732 8733 return (CTL_RETVAL_COMPLETE); 8734 break; /* NOTREACHED */ 8735 } 8736 8737 /* 8738 * The first check is to make sure we're in bounds, the second 8739 * check is to catch wrap-around problems. If the lba + num blocks 8740 * is less than the lba, then we've wrapped around and the block 8741 * range is invalid anyway. 8742 */ 8743 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8744 || ((lba + num_blocks) < lba)) { 8745 ctl_set_lba_out_of_range(ctsio, 8746 MAX(lba, lun->be_lun->maxlba + 1)); 8747 ctl_done((union ctl_io *)ctsio); 8748 return (CTL_RETVAL_COMPLETE); 8749 } 8750 8751 /* 8752 * According to SBC-3, a transfer length of 0 is not an error. 8753 * Note that this cannot happen with WRITE(6) or READ(6), since 0 8754 * translates to 256 blocks for those commands. 8755 */ 8756 if (num_blocks == 0) { 8757 ctl_set_success(ctsio); 8758 ctl_done((union ctl_io *)ctsio); 8759 return (CTL_RETVAL_COMPLETE); 8760 } 8761 8762 /* Set FUA and/or DPO if caches are disabled. */ 8763 if (isread) { 8764 if ((lun->MODE_CACHING.flags1 & SCP_RCD) != 0) 8765 flags |= CTL_LLF_FUA | CTL_LLF_DPO; 8766 } else { 8767 if ((lun->MODE_CACHING.flags1 & SCP_WCE) == 0) 8768 flags |= CTL_LLF_FUA; 8769 } 8770 8771 lbalen = (struct ctl_lba_len_flags *) 8772 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8773 lbalen->lba = lba; 8774 lbalen->len = num_blocks; 8775 lbalen->flags = (isread ? CTL_LLF_READ : CTL_LLF_WRITE) | flags; 8776 8777 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 8778 ctsio->kern_rel_offset = 0; 8779 8780 CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n")); 8781 8782 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8783 return (retval); 8784 } 8785 8786 static int 8787 ctl_cnw_cont(union ctl_io *io) 8788 { 8789 struct ctl_lun *lun = CTL_LUN(io); 8790 struct ctl_scsiio *ctsio; 8791 struct ctl_lba_len_flags *lbalen; 8792 int retval; 8793 8794 ctsio = &io->scsiio; 8795 ctsio->io_hdr.status = CTL_STATUS_NONE; 8796 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT; 8797 lbalen = (struct ctl_lba_len_flags *) 8798 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8799 lbalen->flags &= ~CTL_LLF_COMPARE; 8800 lbalen->flags |= CTL_LLF_WRITE; 8801 8802 CTL_DEBUG_PRINT(("ctl_cnw_cont: calling data_submit()\n")); 8803 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8804 return (retval); 8805 } 8806 8807 int 8808 ctl_cnw(struct ctl_scsiio *ctsio) 8809 { 8810 struct ctl_lun *lun = CTL_LUN(ctsio); 8811 struct ctl_lba_len_flags *lbalen; 8812 uint64_t lba; 8813 uint32_t num_blocks; 8814 int flags, retval; 8815 8816 CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0])); 8817 8818 flags = 0; 8819 switch (ctsio->cdb[0]) { 8820 case COMPARE_AND_WRITE: { 8821 struct scsi_compare_and_write *cdb; 8822 8823 cdb = (struct scsi_compare_and_write *)ctsio->cdb; 8824 if (cdb->byte2 & SRW10_FUA) 8825 flags |= CTL_LLF_FUA; 8826 if (cdb->byte2 & SRW10_DPO) 8827 flags |= CTL_LLF_DPO; 8828 lba = scsi_8btou64(cdb->addr); 8829 num_blocks = cdb->length; 8830 break; 8831 } 8832 default: 8833 /* 8834 * We got a command we don't support. This shouldn't 8835 * happen, commands should be filtered out above us. 8836 */ 8837 ctl_set_invalid_opcode(ctsio); 8838 ctl_done((union ctl_io *)ctsio); 8839 8840 return (CTL_RETVAL_COMPLETE); 8841 break; /* NOTREACHED */ 8842 } 8843 8844 /* 8845 * The first check is to make sure we're in bounds, the second 8846 * check is to catch wrap-around problems. If the lba + num blocks 8847 * is less than the lba, then we've wrapped around and the block 8848 * range is invalid anyway. 8849 */ 8850 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8851 || ((lba + num_blocks) < lba)) { 8852 ctl_set_lba_out_of_range(ctsio, 8853 MAX(lba, lun->be_lun->maxlba + 1)); 8854 ctl_done((union ctl_io *)ctsio); 8855 return (CTL_RETVAL_COMPLETE); 8856 } 8857 8858 /* 8859 * According to SBC-3, a transfer length of 0 is not an error. 8860 */ 8861 if (num_blocks == 0) { 8862 ctl_set_success(ctsio); 8863 ctl_done((union ctl_io *)ctsio); 8864 return (CTL_RETVAL_COMPLETE); 8865 } 8866 8867 /* Set FUA if write cache is disabled. */ 8868 if ((lun->MODE_CACHING.flags1 & SCP_WCE) == 0) 8869 flags |= CTL_LLF_FUA; 8870 8871 ctsio->kern_total_len = 2 * num_blocks * lun->be_lun->blocksize; 8872 ctsio->kern_rel_offset = 0; 8873 8874 /* 8875 * Set the IO_CONT flag, so that if this I/O gets passed to 8876 * ctl_data_submit_done(), it'll get passed back to 8877 * ctl_ctl_cnw_cont() for further processing. 8878 */ 8879 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 8880 ctsio->io_cont = ctl_cnw_cont; 8881 8882 lbalen = (struct ctl_lba_len_flags *) 8883 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8884 lbalen->lba = lba; 8885 lbalen->len = num_blocks; 8886 lbalen->flags = CTL_LLF_COMPARE | flags; 8887 8888 CTL_DEBUG_PRINT(("ctl_cnw: calling data_submit()\n")); 8889 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8890 return (retval); 8891 } 8892 8893 int 8894 ctl_verify(struct ctl_scsiio *ctsio) 8895 { 8896 struct ctl_lun *lun = CTL_LUN(ctsio); 8897 struct ctl_lba_len_flags *lbalen; 8898 uint64_t lba; 8899 uint32_t num_blocks; 8900 int bytchk, flags; 8901 int retval; 8902 8903 CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0])); 8904 8905 bytchk = 0; 8906 flags = CTL_LLF_FUA; 8907 switch (ctsio->cdb[0]) { 8908 case VERIFY_10: { 8909 struct scsi_verify_10 *cdb; 8910 8911 cdb = (struct scsi_verify_10 *)ctsio->cdb; 8912 if (cdb->byte2 & SVFY_BYTCHK) 8913 bytchk = 1; 8914 if (cdb->byte2 & SVFY_DPO) 8915 flags |= CTL_LLF_DPO; 8916 lba = scsi_4btoul(cdb->addr); 8917 num_blocks = scsi_2btoul(cdb->length); 8918 break; 8919 } 8920 case VERIFY_12: { 8921 struct scsi_verify_12 *cdb; 8922 8923 cdb = (struct scsi_verify_12 *)ctsio->cdb; 8924 if (cdb->byte2 & SVFY_BYTCHK) 8925 bytchk = 1; 8926 if (cdb->byte2 & SVFY_DPO) 8927 flags |= CTL_LLF_DPO; 8928 lba = scsi_4btoul(cdb->addr); 8929 num_blocks = scsi_4btoul(cdb->length); 8930 break; 8931 } 8932 case VERIFY_16: { 8933 struct scsi_rw_16 *cdb; 8934 8935 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8936 if (cdb->byte2 & SVFY_BYTCHK) 8937 bytchk = 1; 8938 if (cdb->byte2 & SVFY_DPO) 8939 flags |= CTL_LLF_DPO; 8940 lba = scsi_8btou64(cdb->addr); 8941 num_blocks = scsi_4btoul(cdb->length); 8942 break; 8943 } 8944 default: 8945 /* 8946 * We got a command we don't support. This shouldn't 8947 * happen, commands should be filtered out above us. 8948 */ 8949 ctl_set_invalid_opcode(ctsio); 8950 ctl_done((union ctl_io *)ctsio); 8951 return (CTL_RETVAL_COMPLETE); 8952 } 8953 8954 /* 8955 * The first check is to make sure we're in bounds, the second 8956 * check is to catch wrap-around problems. If the lba + num blocks 8957 * is less than the lba, then we've wrapped around and the block 8958 * range is invalid anyway. 8959 */ 8960 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8961 || ((lba + num_blocks) < lba)) { 8962 ctl_set_lba_out_of_range(ctsio, 8963 MAX(lba, lun->be_lun->maxlba + 1)); 8964 ctl_done((union ctl_io *)ctsio); 8965 return (CTL_RETVAL_COMPLETE); 8966 } 8967 8968 /* 8969 * According to SBC-3, a transfer length of 0 is not an error. 8970 */ 8971 if (num_blocks == 0) { 8972 ctl_set_success(ctsio); 8973 ctl_done((union ctl_io *)ctsio); 8974 return (CTL_RETVAL_COMPLETE); 8975 } 8976 8977 lbalen = (struct ctl_lba_len_flags *) 8978 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8979 lbalen->lba = lba; 8980 lbalen->len = num_blocks; 8981 if (bytchk) { 8982 lbalen->flags = CTL_LLF_COMPARE | flags; 8983 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 8984 } else { 8985 lbalen->flags = CTL_LLF_VERIFY | flags; 8986 ctsio->kern_total_len = 0; 8987 } 8988 ctsio->kern_rel_offset = 0; 8989 8990 CTL_DEBUG_PRINT(("ctl_verify: calling data_submit()\n")); 8991 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8992 return (retval); 8993 } 8994 8995 int 8996 ctl_report_luns(struct ctl_scsiio *ctsio) 8997 { 8998 struct ctl_softc *softc = CTL_SOFTC(ctsio); 8999 struct ctl_port *port = CTL_PORT(ctsio); 9000 struct ctl_lun *lun, *request_lun = CTL_LUN(ctsio); 9001 struct scsi_report_luns *cdb; 9002 struct scsi_report_luns_data *lun_data; 9003 int num_filled, num_luns, num_port_luns, retval; 9004 uint32_t alloc_len, lun_datalen; 9005 uint32_t initidx, targ_lun_id, lun_id; 9006 9007 retval = CTL_RETVAL_COMPLETE; 9008 cdb = (struct scsi_report_luns *)ctsio->cdb; 9009 9010 CTL_DEBUG_PRINT(("ctl_report_luns\n")); 9011 9012 num_luns = 0; 9013 num_port_luns = port->lun_map ? port->lun_map_size : CTL_MAX_LUNS; 9014 mtx_lock(&softc->ctl_lock); 9015 for (targ_lun_id = 0; targ_lun_id < num_port_luns; targ_lun_id++) { 9016 if (ctl_lun_map_from_port(port, targ_lun_id) != UINT32_MAX) 9017 num_luns++; 9018 } 9019 mtx_unlock(&softc->ctl_lock); 9020 9021 switch (cdb->select_report) { 9022 case RPL_REPORT_DEFAULT: 9023 case RPL_REPORT_ALL: 9024 case RPL_REPORT_NONSUBSID: 9025 break; 9026 case RPL_REPORT_WELLKNOWN: 9027 case RPL_REPORT_ADMIN: 9028 case RPL_REPORT_CONGLOM: 9029 num_luns = 0; 9030 break; 9031 default: 9032 ctl_set_invalid_field(ctsio, 9033 /*sks_valid*/ 1, 9034 /*command*/ 1, 9035 /*field*/ 2, 9036 /*bit_valid*/ 0, 9037 /*bit*/ 0); 9038 ctl_done((union ctl_io *)ctsio); 9039 return (retval); 9040 break; /* NOTREACHED */ 9041 } 9042 9043 alloc_len = scsi_4btoul(cdb->length); 9044 /* 9045 * The initiator has to allocate at least 16 bytes for this request, 9046 * so he can at least get the header and the first LUN. Otherwise 9047 * we reject the request (per SPC-3 rev 14, section 6.21). 9048 */ 9049 if (alloc_len < (sizeof(struct scsi_report_luns_data) + 9050 sizeof(struct scsi_report_luns_lundata))) { 9051 ctl_set_invalid_field(ctsio, 9052 /*sks_valid*/ 1, 9053 /*command*/ 1, 9054 /*field*/ 6, 9055 /*bit_valid*/ 0, 9056 /*bit*/ 0); 9057 ctl_done((union ctl_io *)ctsio); 9058 return (retval); 9059 } 9060 9061 lun_datalen = sizeof(*lun_data) + 9062 (num_luns * sizeof(struct scsi_report_luns_lundata)); 9063 9064 ctsio->kern_data_ptr = malloc(lun_datalen, M_CTL, M_WAITOK | M_ZERO); 9065 lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr; 9066 ctsio->kern_sg_entries = 0; 9067 9068 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9069 9070 mtx_lock(&softc->ctl_lock); 9071 for (targ_lun_id = 0, num_filled = 0; 9072 targ_lun_id < num_port_luns && num_filled < num_luns; 9073 targ_lun_id++) { 9074 lun_id = ctl_lun_map_from_port(port, targ_lun_id); 9075 if (lun_id == UINT32_MAX) 9076 continue; 9077 lun = softc->ctl_luns[lun_id]; 9078 if (lun == NULL) 9079 continue; 9080 9081 be64enc(lun_data->luns[num_filled++].lundata, 9082 ctl_encode_lun(targ_lun_id)); 9083 9084 /* 9085 * According to SPC-3, rev 14 section 6.21: 9086 * 9087 * "The execution of a REPORT LUNS command to any valid and 9088 * installed logical unit shall clear the REPORTED LUNS DATA 9089 * HAS CHANGED unit attention condition for all logical 9090 * units of that target with respect to the requesting 9091 * initiator. A valid and installed logical unit is one 9092 * having a PERIPHERAL QUALIFIER of 000b in the standard 9093 * INQUIRY data (see 6.4.2)." 9094 * 9095 * If request_lun is NULL, the LUN this report luns command 9096 * was issued to is either disabled or doesn't exist. In that 9097 * case, we shouldn't clear any pending lun change unit 9098 * attention. 9099 */ 9100 if (request_lun != NULL) { 9101 mtx_lock(&lun->lun_lock); 9102 ctl_clr_ua(lun, initidx, CTL_UA_LUN_CHANGE); 9103 mtx_unlock(&lun->lun_lock); 9104 } 9105 } 9106 mtx_unlock(&softc->ctl_lock); 9107 9108 /* 9109 * It's quite possible that we've returned fewer LUNs than we allocated 9110 * space for. Trim it. 9111 */ 9112 lun_datalen = sizeof(*lun_data) + 9113 (num_filled * sizeof(struct scsi_report_luns_lundata)); 9114 ctsio->kern_rel_offset = 0; 9115 ctsio->kern_sg_entries = 0; 9116 ctsio->kern_data_len = min(lun_datalen, alloc_len); 9117 ctsio->kern_total_len = ctsio->kern_data_len; 9118 9119 /* 9120 * We set this to the actual data length, regardless of how much 9121 * space we actually have to return results. If the user looks at 9122 * this value, he'll know whether or not he allocated enough space 9123 * and reissue the command if necessary. We don't support well 9124 * known logical units, so if the user asks for that, return none. 9125 */ 9126 scsi_ulto4b(lun_datalen - 8, lun_data->length); 9127 9128 /* 9129 * We can only return SCSI_STATUS_CHECK_COND when we can't satisfy 9130 * this request. 9131 */ 9132 ctl_set_success(ctsio); 9133 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9134 ctsio->be_move_done = ctl_config_move_done; 9135 ctl_datamove((union ctl_io *)ctsio); 9136 return (retval); 9137 } 9138 9139 int 9140 ctl_request_sense(struct ctl_scsiio *ctsio) 9141 { 9142 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9143 struct ctl_lun *lun = CTL_LUN(ctsio); 9144 struct scsi_request_sense *cdb; 9145 struct scsi_sense_data *sense_ptr, *ps; 9146 uint32_t initidx; 9147 int have_error; 9148 u_int sense_len = SSD_FULL_SIZE; 9149 scsi_sense_data_type sense_format; 9150 ctl_ua_type ua_type; 9151 uint8_t asc = 0, ascq = 0; 9152 9153 cdb = (struct scsi_request_sense *)ctsio->cdb; 9154 9155 CTL_DEBUG_PRINT(("ctl_request_sense\n")); 9156 9157 /* 9158 * Determine which sense format the user wants. 9159 */ 9160 if (cdb->byte2 & SRS_DESC) 9161 sense_format = SSD_TYPE_DESC; 9162 else 9163 sense_format = SSD_TYPE_FIXED; 9164 9165 ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK); 9166 sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr; 9167 ctsio->kern_sg_entries = 0; 9168 ctsio->kern_rel_offset = 0; 9169 9170 /* 9171 * struct scsi_sense_data, which is currently set to 256 bytes, is 9172 * larger than the largest allowed value for the length field in the 9173 * REQUEST SENSE CDB, which is 252 bytes as of SPC-4. 9174 */ 9175 ctsio->kern_data_len = cdb->length; 9176 ctsio->kern_total_len = cdb->length; 9177 9178 /* 9179 * If we don't have a LUN, we don't have any pending sense. 9180 */ 9181 if (lun == NULL || 9182 ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 9183 softc->ha_link < CTL_HA_LINK_UNKNOWN)) { 9184 /* "Logical unit not supported" */ 9185 ctl_set_sense_data(sense_ptr, &sense_len, NULL, sense_format, 9186 /*current_error*/ 1, 9187 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 9188 /*asc*/ 0x25, 9189 /*ascq*/ 0x00, 9190 SSD_ELEM_NONE); 9191 goto send; 9192 } 9193 9194 have_error = 0; 9195 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9196 /* 9197 * Check for pending sense, and then for pending unit attentions. 9198 * Pending sense gets returned first, then pending unit attentions. 9199 */ 9200 mtx_lock(&lun->lun_lock); 9201 ps = lun->pending_sense[initidx / CTL_MAX_INIT_PER_PORT]; 9202 if (ps != NULL) 9203 ps += initidx % CTL_MAX_INIT_PER_PORT; 9204 if (ps != NULL && ps->error_code != 0) { 9205 scsi_sense_data_type stored_format; 9206 9207 /* 9208 * Check to see which sense format was used for the stored 9209 * sense data. 9210 */ 9211 stored_format = scsi_sense_type(ps); 9212 9213 /* 9214 * If the user requested a different sense format than the 9215 * one we stored, then we need to convert it to the other 9216 * format. If we're going from descriptor to fixed format 9217 * sense data, we may lose things in translation, depending 9218 * on what options were used. 9219 * 9220 * If the stored format is SSD_TYPE_NONE (i.e. invalid), 9221 * for some reason we'll just copy it out as-is. 9222 */ 9223 if ((stored_format == SSD_TYPE_FIXED) 9224 && (sense_format == SSD_TYPE_DESC)) 9225 ctl_sense_to_desc((struct scsi_sense_data_fixed *) 9226 ps, (struct scsi_sense_data_desc *)sense_ptr); 9227 else if ((stored_format == SSD_TYPE_DESC) 9228 && (sense_format == SSD_TYPE_FIXED)) 9229 ctl_sense_to_fixed((struct scsi_sense_data_desc *) 9230 ps, (struct scsi_sense_data_fixed *)sense_ptr); 9231 else 9232 memcpy(sense_ptr, ps, sizeof(*sense_ptr)); 9233 9234 ps->error_code = 0; 9235 have_error = 1; 9236 } else { 9237 ua_type = ctl_build_ua(lun, initidx, sense_ptr, &sense_len, 9238 sense_format); 9239 if (ua_type != CTL_UA_NONE) 9240 have_error = 1; 9241 } 9242 if (have_error == 0) { 9243 /* 9244 * Report informational exception if have one and allowed. 9245 */ 9246 if (lun->MODE_IE.mrie != SIEP_MRIE_NO) { 9247 asc = lun->ie_asc; 9248 ascq = lun->ie_ascq; 9249 } 9250 ctl_set_sense_data(sense_ptr, &sense_len, lun, sense_format, 9251 /*current_error*/ 1, 9252 /*sense_key*/ SSD_KEY_NO_SENSE, 9253 /*asc*/ asc, 9254 /*ascq*/ ascq, 9255 SSD_ELEM_NONE); 9256 } 9257 mtx_unlock(&lun->lun_lock); 9258 9259 send: 9260 /* 9261 * We report the SCSI status as OK, since the status of the command 9262 * itself is OK. We're reporting sense as parameter data. 9263 */ 9264 ctl_set_success(ctsio); 9265 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9266 ctsio->be_move_done = ctl_config_move_done; 9267 ctl_datamove((union ctl_io *)ctsio); 9268 return (CTL_RETVAL_COMPLETE); 9269 } 9270 9271 int 9272 ctl_tur(struct ctl_scsiio *ctsio) 9273 { 9274 9275 CTL_DEBUG_PRINT(("ctl_tur\n")); 9276 9277 ctl_set_success(ctsio); 9278 ctl_done((union ctl_io *)ctsio); 9279 9280 return (CTL_RETVAL_COMPLETE); 9281 } 9282 9283 /* 9284 * SCSI VPD page 0x00, the Supported VPD Pages page. 9285 */ 9286 static int 9287 ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len) 9288 { 9289 struct ctl_lun *lun = CTL_LUN(ctsio); 9290 struct scsi_vpd_supported_pages *pages; 9291 int sup_page_size; 9292 int p; 9293 9294 sup_page_size = sizeof(struct scsi_vpd_supported_pages) * 9295 SCSI_EVPD_NUM_SUPPORTED_PAGES; 9296 ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO); 9297 pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr; 9298 ctsio->kern_rel_offset = 0; 9299 ctsio->kern_sg_entries = 0; 9300 ctsio->kern_data_len = min(sup_page_size, alloc_len); 9301 ctsio->kern_total_len = ctsio->kern_data_len; 9302 9303 /* 9304 * The control device is always connected. The disk device, on the 9305 * other hand, may not be online all the time. Need to change this 9306 * to figure out whether the disk device is actually online or not. 9307 */ 9308 if (lun != NULL) 9309 pages->device = (SID_QUAL_LU_CONNECTED << 5) | 9310 lun->be_lun->lun_type; 9311 else 9312 pages->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9313 9314 p = 0; 9315 /* Supported VPD pages */ 9316 pages->page_list[p++] = SVPD_SUPPORTED_PAGES; 9317 /* Serial Number */ 9318 pages->page_list[p++] = SVPD_UNIT_SERIAL_NUMBER; 9319 /* Device Identification */ 9320 pages->page_list[p++] = SVPD_DEVICE_ID; 9321 /* Extended INQUIRY Data */ 9322 pages->page_list[p++] = SVPD_EXTENDED_INQUIRY_DATA; 9323 /* Mode Page Policy */ 9324 pages->page_list[p++] = SVPD_MODE_PAGE_POLICY; 9325 /* SCSI Ports */ 9326 pages->page_list[p++] = SVPD_SCSI_PORTS; 9327 /* Third-party Copy */ 9328 pages->page_list[p++] = SVPD_SCSI_TPC; 9329 if (lun != NULL && lun->be_lun->lun_type == T_DIRECT) { 9330 /* Block limits */ 9331 pages->page_list[p++] = SVPD_BLOCK_LIMITS; 9332 /* Block Device Characteristics */ 9333 pages->page_list[p++] = SVPD_BDC; 9334 /* Logical Block Provisioning */ 9335 pages->page_list[p++] = SVPD_LBP; 9336 } 9337 pages->length = p; 9338 9339 ctl_set_success(ctsio); 9340 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9341 ctsio->be_move_done = ctl_config_move_done; 9342 ctl_datamove((union ctl_io *)ctsio); 9343 return (CTL_RETVAL_COMPLETE); 9344 } 9345 9346 /* 9347 * SCSI VPD page 0x80, the Unit Serial Number page. 9348 */ 9349 static int 9350 ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len) 9351 { 9352 struct ctl_lun *lun = CTL_LUN(ctsio); 9353 struct scsi_vpd_unit_serial_number *sn_ptr; 9354 int data_len; 9355 9356 data_len = 4 + CTL_SN_LEN; 9357 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9358 sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr; 9359 ctsio->kern_rel_offset = 0; 9360 ctsio->kern_sg_entries = 0; 9361 ctsio->kern_data_len = min(data_len, alloc_len); 9362 ctsio->kern_total_len = ctsio->kern_data_len; 9363 9364 /* 9365 * The control device is always connected. The disk device, on the 9366 * other hand, may not be online all the time. Need to change this 9367 * to figure out whether the disk device is actually online or not. 9368 */ 9369 if (lun != NULL) 9370 sn_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9371 lun->be_lun->lun_type; 9372 else 9373 sn_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9374 9375 sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER; 9376 sn_ptr->length = CTL_SN_LEN; 9377 /* 9378 * If we don't have a LUN, we just leave the serial number as 9379 * all spaces. 9380 */ 9381 if (lun != NULL) { 9382 strncpy((char *)sn_ptr->serial_num, 9383 (char *)lun->be_lun->serial_num, CTL_SN_LEN); 9384 } else 9385 memset(sn_ptr->serial_num, 0x20, CTL_SN_LEN); 9386 9387 ctl_set_success(ctsio); 9388 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9389 ctsio->be_move_done = ctl_config_move_done; 9390 ctl_datamove((union ctl_io *)ctsio); 9391 return (CTL_RETVAL_COMPLETE); 9392 } 9393 9394 9395 /* 9396 * SCSI VPD page 0x86, the Extended INQUIRY Data page. 9397 */ 9398 static int 9399 ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len) 9400 { 9401 struct ctl_lun *lun = CTL_LUN(ctsio); 9402 struct scsi_vpd_extended_inquiry_data *eid_ptr; 9403 int data_len; 9404 9405 data_len = sizeof(struct scsi_vpd_extended_inquiry_data); 9406 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9407 eid_ptr = (struct scsi_vpd_extended_inquiry_data *)ctsio->kern_data_ptr; 9408 ctsio->kern_sg_entries = 0; 9409 ctsio->kern_rel_offset = 0; 9410 ctsio->kern_data_len = min(data_len, alloc_len); 9411 ctsio->kern_total_len = ctsio->kern_data_len; 9412 9413 /* 9414 * The control device is always connected. The disk device, on the 9415 * other hand, may not be online all the time. 9416 */ 9417 if (lun != NULL) 9418 eid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9419 lun->be_lun->lun_type; 9420 else 9421 eid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9422 eid_ptr->page_code = SVPD_EXTENDED_INQUIRY_DATA; 9423 scsi_ulto2b(data_len - 4, eid_ptr->page_length); 9424 /* 9425 * We support head of queue, ordered and simple tags. 9426 */ 9427 eid_ptr->flags2 = SVPD_EID_HEADSUP | SVPD_EID_ORDSUP | SVPD_EID_SIMPSUP; 9428 /* 9429 * Volatile cache supported. 9430 */ 9431 eid_ptr->flags3 = SVPD_EID_V_SUP; 9432 9433 /* 9434 * This means that we clear the REPORTED LUNS DATA HAS CHANGED unit 9435 * attention for a particular IT nexus on all LUNs once we report 9436 * it to that nexus once. This bit is required as of SPC-4. 9437 */ 9438 eid_ptr->flags4 = SVPD_EID_LUICLR; 9439 9440 /* 9441 * We support revert to defaults (RTD) bit in MODE SELECT. 9442 */ 9443 eid_ptr->flags5 = SVPD_EID_RTD_SUP; 9444 9445 /* 9446 * XXX KDM in order to correctly answer this, we would need 9447 * information from the SIM to determine how much sense data it 9448 * can send. So this would really be a path inquiry field, most 9449 * likely. This can be set to a maximum of 252 according to SPC-4, 9450 * but the hardware may or may not be able to support that much. 9451 * 0 just means that the maximum sense data length is not reported. 9452 */ 9453 eid_ptr->max_sense_length = 0; 9454 9455 ctl_set_success(ctsio); 9456 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9457 ctsio->be_move_done = ctl_config_move_done; 9458 ctl_datamove((union ctl_io *)ctsio); 9459 return (CTL_RETVAL_COMPLETE); 9460 } 9461 9462 static int 9463 ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len) 9464 { 9465 struct ctl_lun *lun = CTL_LUN(ctsio); 9466 struct scsi_vpd_mode_page_policy *mpp_ptr; 9467 int data_len; 9468 9469 data_len = sizeof(struct scsi_vpd_mode_page_policy) + 9470 sizeof(struct scsi_vpd_mode_page_policy_descr); 9471 9472 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9473 mpp_ptr = (struct scsi_vpd_mode_page_policy *)ctsio->kern_data_ptr; 9474 ctsio->kern_rel_offset = 0; 9475 ctsio->kern_sg_entries = 0; 9476 ctsio->kern_data_len = min(data_len, alloc_len); 9477 ctsio->kern_total_len = ctsio->kern_data_len; 9478 9479 /* 9480 * The control device is always connected. The disk device, on the 9481 * other hand, may not be online all the time. 9482 */ 9483 if (lun != NULL) 9484 mpp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9485 lun->be_lun->lun_type; 9486 else 9487 mpp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9488 mpp_ptr->page_code = SVPD_MODE_PAGE_POLICY; 9489 scsi_ulto2b(data_len - 4, mpp_ptr->page_length); 9490 mpp_ptr->descr[0].page_code = 0x3f; 9491 mpp_ptr->descr[0].subpage_code = 0xff; 9492 mpp_ptr->descr[0].policy = SVPD_MPP_SHARED; 9493 9494 ctl_set_success(ctsio); 9495 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9496 ctsio->be_move_done = ctl_config_move_done; 9497 ctl_datamove((union ctl_io *)ctsio); 9498 return (CTL_RETVAL_COMPLETE); 9499 } 9500 9501 /* 9502 * SCSI VPD page 0x83, the Device Identification page. 9503 */ 9504 static int 9505 ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len) 9506 { 9507 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9508 struct ctl_port *port = CTL_PORT(ctsio); 9509 struct ctl_lun *lun = CTL_LUN(ctsio); 9510 struct scsi_vpd_device_id *devid_ptr; 9511 struct scsi_vpd_id_descriptor *desc; 9512 int data_len, g; 9513 uint8_t proto; 9514 9515 data_len = sizeof(struct scsi_vpd_device_id) + 9516 sizeof(struct scsi_vpd_id_descriptor) + 9517 sizeof(struct scsi_vpd_id_rel_trgt_port_id) + 9518 sizeof(struct scsi_vpd_id_descriptor) + 9519 sizeof(struct scsi_vpd_id_trgt_port_grp_id); 9520 if (lun && lun->lun_devid) 9521 data_len += lun->lun_devid->len; 9522 if (port && port->port_devid) 9523 data_len += port->port_devid->len; 9524 if (port && port->target_devid) 9525 data_len += port->target_devid->len; 9526 9527 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9528 devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr; 9529 ctsio->kern_sg_entries = 0; 9530 ctsio->kern_rel_offset = 0; 9531 ctsio->kern_sg_entries = 0; 9532 ctsio->kern_data_len = min(data_len, alloc_len); 9533 ctsio->kern_total_len = ctsio->kern_data_len; 9534 9535 /* 9536 * The control device is always connected. The disk device, on the 9537 * other hand, may not be online all the time. 9538 */ 9539 if (lun != NULL) 9540 devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9541 lun->be_lun->lun_type; 9542 else 9543 devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9544 devid_ptr->page_code = SVPD_DEVICE_ID; 9545 scsi_ulto2b(data_len - 4, devid_ptr->length); 9546 9547 if (port && port->port_type == CTL_PORT_FC) 9548 proto = SCSI_PROTO_FC << 4; 9549 else if (port && port->port_type == CTL_PORT_SAS) 9550 proto = SCSI_PROTO_SAS << 4; 9551 else if (port && port->port_type == CTL_PORT_ISCSI) 9552 proto = SCSI_PROTO_ISCSI << 4; 9553 else 9554 proto = SCSI_PROTO_SPI << 4; 9555 desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list; 9556 9557 /* 9558 * We're using a LUN association here. i.e., this device ID is a 9559 * per-LUN identifier. 9560 */ 9561 if (lun && lun->lun_devid) { 9562 memcpy(desc, lun->lun_devid->data, lun->lun_devid->len); 9563 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9564 lun->lun_devid->len); 9565 } 9566 9567 /* 9568 * This is for the WWPN which is a port association. 9569 */ 9570 if (port && port->port_devid) { 9571 memcpy(desc, port->port_devid->data, port->port_devid->len); 9572 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9573 port->port_devid->len); 9574 } 9575 9576 /* 9577 * This is for the Relative Target Port(type 4h) identifier 9578 */ 9579 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9580 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9581 SVPD_ID_TYPE_RELTARG; 9582 desc->length = 4; 9583 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port, &desc->identifier[2]); 9584 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9585 sizeof(struct scsi_vpd_id_rel_trgt_port_id)); 9586 9587 /* 9588 * This is for the Target Port Group(type 5h) identifier 9589 */ 9590 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9591 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9592 SVPD_ID_TYPE_TPORTGRP; 9593 desc->length = 4; 9594 if (softc->is_single || 9595 (port && port->status & CTL_PORT_STATUS_HA_SHARED)) 9596 g = 1; 9597 else 9598 g = 2 + ctsio->io_hdr.nexus.targ_port / softc->port_cnt; 9599 scsi_ulto2b(g, &desc->identifier[2]); 9600 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9601 sizeof(struct scsi_vpd_id_trgt_port_grp_id)); 9602 9603 /* 9604 * This is for the Target identifier 9605 */ 9606 if (port && port->target_devid) { 9607 memcpy(desc, port->target_devid->data, port->target_devid->len); 9608 } 9609 9610 ctl_set_success(ctsio); 9611 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9612 ctsio->be_move_done = ctl_config_move_done; 9613 ctl_datamove((union ctl_io *)ctsio); 9614 return (CTL_RETVAL_COMPLETE); 9615 } 9616 9617 static int 9618 ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len) 9619 { 9620 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9621 struct ctl_lun *lun = CTL_LUN(ctsio); 9622 struct scsi_vpd_scsi_ports *sp; 9623 struct scsi_vpd_port_designation *pd; 9624 struct scsi_vpd_port_designation_cont *pdc; 9625 struct ctl_port *port; 9626 int data_len, num_target_ports, iid_len, id_len; 9627 9628 num_target_ports = 0; 9629 iid_len = 0; 9630 id_len = 0; 9631 mtx_lock(&softc->ctl_lock); 9632 STAILQ_FOREACH(port, &softc->port_list, links) { 9633 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 9634 continue; 9635 if (lun != NULL && 9636 ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 9637 continue; 9638 num_target_ports++; 9639 if (port->init_devid) 9640 iid_len += port->init_devid->len; 9641 if (port->port_devid) 9642 id_len += port->port_devid->len; 9643 } 9644 mtx_unlock(&softc->ctl_lock); 9645 9646 data_len = sizeof(struct scsi_vpd_scsi_ports) + 9647 num_target_ports * (sizeof(struct scsi_vpd_port_designation) + 9648 sizeof(struct scsi_vpd_port_designation_cont)) + iid_len + id_len; 9649 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9650 sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr; 9651 ctsio->kern_sg_entries = 0; 9652 ctsio->kern_rel_offset = 0; 9653 ctsio->kern_sg_entries = 0; 9654 ctsio->kern_data_len = min(data_len, alloc_len); 9655 ctsio->kern_total_len = ctsio->kern_data_len; 9656 9657 /* 9658 * The control device is always connected. The disk device, on the 9659 * other hand, may not be online all the time. Need to change this 9660 * to figure out whether the disk device is actually online or not. 9661 */ 9662 if (lun != NULL) 9663 sp->device = (SID_QUAL_LU_CONNECTED << 5) | 9664 lun->be_lun->lun_type; 9665 else 9666 sp->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9667 9668 sp->page_code = SVPD_SCSI_PORTS; 9669 scsi_ulto2b(data_len - sizeof(struct scsi_vpd_scsi_ports), 9670 sp->page_length); 9671 pd = &sp->design[0]; 9672 9673 mtx_lock(&softc->ctl_lock); 9674 STAILQ_FOREACH(port, &softc->port_list, links) { 9675 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 9676 continue; 9677 if (lun != NULL && 9678 ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 9679 continue; 9680 scsi_ulto2b(port->targ_port, pd->relative_port_id); 9681 if (port->init_devid) { 9682 iid_len = port->init_devid->len; 9683 memcpy(pd->initiator_transportid, 9684 port->init_devid->data, port->init_devid->len); 9685 } else 9686 iid_len = 0; 9687 scsi_ulto2b(iid_len, pd->initiator_transportid_length); 9688 pdc = (struct scsi_vpd_port_designation_cont *) 9689 (&pd->initiator_transportid[iid_len]); 9690 if (port->port_devid) { 9691 id_len = port->port_devid->len; 9692 memcpy(pdc->target_port_descriptors, 9693 port->port_devid->data, port->port_devid->len); 9694 } else 9695 id_len = 0; 9696 scsi_ulto2b(id_len, pdc->target_port_descriptors_length); 9697 pd = (struct scsi_vpd_port_designation *) 9698 ((uint8_t *)pdc->target_port_descriptors + id_len); 9699 } 9700 mtx_unlock(&softc->ctl_lock); 9701 9702 ctl_set_success(ctsio); 9703 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9704 ctsio->be_move_done = ctl_config_move_done; 9705 ctl_datamove((union ctl_io *)ctsio); 9706 return (CTL_RETVAL_COMPLETE); 9707 } 9708 9709 static int 9710 ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len) 9711 { 9712 struct ctl_lun *lun = CTL_LUN(ctsio); 9713 struct scsi_vpd_block_limits *bl_ptr; 9714 uint64_t ival; 9715 9716 ctsio->kern_data_ptr = malloc(sizeof(*bl_ptr), M_CTL, M_WAITOK | M_ZERO); 9717 bl_ptr = (struct scsi_vpd_block_limits *)ctsio->kern_data_ptr; 9718 ctsio->kern_sg_entries = 0; 9719 ctsio->kern_rel_offset = 0; 9720 ctsio->kern_sg_entries = 0; 9721 ctsio->kern_data_len = min(sizeof(*bl_ptr), alloc_len); 9722 ctsio->kern_total_len = ctsio->kern_data_len; 9723 9724 /* 9725 * The control device is always connected. The disk device, on the 9726 * other hand, may not be online all the time. Need to change this 9727 * to figure out whether the disk device is actually online or not. 9728 */ 9729 if (lun != NULL) 9730 bl_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9731 lun->be_lun->lun_type; 9732 else 9733 bl_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9734 9735 bl_ptr->page_code = SVPD_BLOCK_LIMITS; 9736 scsi_ulto2b(sizeof(*bl_ptr) - 4, bl_ptr->page_length); 9737 bl_ptr->max_cmp_write_len = 0xff; 9738 scsi_ulto4b(0xffffffff, bl_ptr->max_txfer_len); 9739 if (lun != NULL) { 9740 scsi_ulto4b(lun->be_lun->opttxferlen, bl_ptr->opt_txfer_len); 9741 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 9742 ival = 0xffffffff; 9743 ctl_get_opt_number(&lun->be_lun->options, 9744 "unmap_max_lba", &ival); 9745 scsi_ulto4b(ival, bl_ptr->max_unmap_lba_cnt); 9746 ival = 0xffffffff; 9747 ctl_get_opt_number(&lun->be_lun->options, 9748 "unmap_max_descr", &ival); 9749 scsi_ulto4b(ival, bl_ptr->max_unmap_blk_cnt); 9750 if (lun->be_lun->ublockexp != 0) { 9751 scsi_ulto4b((1 << lun->be_lun->ublockexp), 9752 bl_ptr->opt_unmap_grain); 9753 scsi_ulto4b(0x80000000 | lun->be_lun->ublockoff, 9754 bl_ptr->unmap_grain_align); 9755 } 9756 } 9757 scsi_ulto4b(lun->be_lun->atomicblock, 9758 bl_ptr->max_atomic_transfer_length); 9759 scsi_ulto4b(0, bl_ptr->atomic_alignment); 9760 scsi_ulto4b(0, bl_ptr->atomic_transfer_length_granularity); 9761 scsi_ulto4b(0, bl_ptr->max_atomic_transfer_length_with_atomic_boundary); 9762 scsi_ulto4b(0, bl_ptr->max_atomic_boundary_size); 9763 ival = UINT64_MAX; 9764 ctl_get_opt_number(&lun->be_lun->options, "write_same_max_lba", &ival); 9765 scsi_u64to8b(ival, bl_ptr->max_write_same_length); 9766 } 9767 9768 ctl_set_success(ctsio); 9769 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9770 ctsio->be_move_done = ctl_config_move_done; 9771 ctl_datamove((union ctl_io *)ctsio); 9772 return (CTL_RETVAL_COMPLETE); 9773 } 9774 9775 static int 9776 ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len) 9777 { 9778 struct ctl_lun *lun = CTL_LUN(ctsio); 9779 struct scsi_vpd_block_device_characteristics *bdc_ptr; 9780 const char *value; 9781 u_int i; 9782 9783 ctsio->kern_data_ptr = malloc(sizeof(*bdc_ptr), M_CTL, M_WAITOK | M_ZERO); 9784 bdc_ptr = (struct scsi_vpd_block_device_characteristics *)ctsio->kern_data_ptr; 9785 ctsio->kern_sg_entries = 0; 9786 ctsio->kern_rel_offset = 0; 9787 ctsio->kern_data_len = min(sizeof(*bdc_ptr), alloc_len); 9788 ctsio->kern_total_len = ctsio->kern_data_len; 9789 9790 /* 9791 * The control device is always connected. The disk device, on the 9792 * other hand, may not be online all the time. Need to change this 9793 * to figure out whether the disk device is actually online or not. 9794 */ 9795 if (lun != NULL) 9796 bdc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9797 lun->be_lun->lun_type; 9798 else 9799 bdc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9800 bdc_ptr->page_code = SVPD_BDC; 9801 scsi_ulto2b(sizeof(*bdc_ptr) - 4, bdc_ptr->page_length); 9802 if (lun != NULL && 9803 (value = ctl_get_opt(&lun->be_lun->options, "rpm")) != NULL) 9804 i = strtol(value, NULL, 0); 9805 else 9806 i = CTL_DEFAULT_ROTATION_RATE; 9807 scsi_ulto2b(i, bdc_ptr->medium_rotation_rate); 9808 if (lun != NULL && 9809 (value = ctl_get_opt(&lun->be_lun->options, "formfactor")) != NULL) 9810 i = strtol(value, NULL, 0); 9811 else 9812 i = 0; 9813 bdc_ptr->wab_wac_ff = (i & 0x0f); 9814 bdc_ptr->flags = SVPD_FUAB | SVPD_VBULS; 9815 9816 ctl_set_success(ctsio); 9817 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9818 ctsio->be_move_done = ctl_config_move_done; 9819 ctl_datamove((union ctl_io *)ctsio); 9820 return (CTL_RETVAL_COMPLETE); 9821 } 9822 9823 static int 9824 ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len) 9825 { 9826 struct ctl_lun *lun = CTL_LUN(ctsio); 9827 struct scsi_vpd_logical_block_prov *lbp_ptr; 9828 const char *value; 9829 9830 ctsio->kern_data_ptr = malloc(sizeof(*lbp_ptr), M_CTL, M_WAITOK | M_ZERO); 9831 lbp_ptr = (struct scsi_vpd_logical_block_prov *)ctsio->kern_data_ptr; 9832 ctsio->kern_sg_entries = 0; 9833 ctsio->kern_rel_offset = 0; 9834 ctsio->kern_data_len = min(sizeof(*lbp_ptr), alloc_len); 9835 ctsio->kern_total_len = ctsio->kern_data_len; 9836 9837 /* 9838 * The control device is always connected. The disk device, on the 9839 * other hand, may not be online all the time. Need to change this 9840 * to figure out whether the disk device is actually online or not. 9841 */ 9842 if (lun != NULL) 9843 lbp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9844 lun->be_lun->lun_type; 9845 else 9846 lbp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9847 9848 lbp_ptr->page_code = SVPD_LBP; 9849 scsi_ulto2b(sizeof(*lbp_ptr) - 4, lbp_ptr->page_length); 9850 lbp_ptr->threshold_exponent = CTL_LBP_EXPONENT; 9851 if (lun != NULL && lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 9852 lbp_ptr->flags = SVPD_LBP_UNMAP | SVPD_LBP_WS16 | 9853 SVPD_LBP_WS10 | SVPD_LBP_RZ | SVPD_LBP_ANC_SUP; 9854 value = ctl_get_opt(&lun->be_lun->options, "provisioning_type"); 9855 if (value != NULL) { 9856 if (strcmp(value, "resource") == 0) 9857 lbp_ptr->prov_type = SVPD_LBP_RESOURCE; 9858 else if (strcmp(value, "thin") == 0) 9859 lbp_ptr->prov_type = SVPD_LBP_THIN; 9860 } else 9861 lbp_ptr->prov_type = SVPD_LBP_THIN; 9862 } 9863 9864 ctl_set_success(ctsio); 9865 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9866 ctsio->be_move_done = ctl_config_move_done; 9867 ctl_datamove((union ctl_io *)ctsio); 9868 return (CTL_RETVAL_COMPLETE); 9869 } 9870 9871 /* 9872 * INQUIRY with the EVPD bit set. 9873 */ 9874 static int 9875 ctl_inquiry_evpd(struct ctl_scsiio *ctsio) 9876 { 9877 struct ctl_lun *lun = CTL_LUN(ctsio); 9878 struct scsi_inquiry *cdb; 9879 int alloc_len, retval; 9880 9881 cdb = (struct scsi_inquiry *)ctsio->cdb; 9882 alloc_len = scsi_2btoul(cdb->length); 9883 9884 switch (cdb->page_code) { 9885 case SVPD_SUPPORTED_PAGES: 9886 retval = ctl_inquiry_evpd_supported(ctsio, alloc_len); 9887 break; 9888 case SVPD_UNIT_SERIAL_NUMBER: 9889 retval = ctl_inquiry_evpd_serial(ctsio, alloc_len); 9890 break; 9891 case SVPD_DEVICE_ID: 9892 retval = ctl_inquiry_evpd_devid(ctsio, alloc_len); 9893 break; 9894 case SVPD_EXTENDED_INQUIRY_DATA: 9895 retval = ctl_inquiry_evpd_eid(ctsio, alloc_len); 9896 break; 9897 case SVPD_MODE_PAGE_POLICY: 9898 retval = ctl_inquiry_evpd_mpp(ctsio, alloc_len); 9899 break; 9900 case SVPD_SCSI_PORTS: 9901 retval = ctl_inquiry_evpd_scsi_ports(ctsio, alloc_len); 9902 break; 9903 case SVPD_SCSI_TPC: 9904 retval = ctl_inquiry_evpd_tpc(ctsio, alloc_len); 9905 break; 9906 case SVPD_BLOCK_LIMITS: 9907 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 9908 goto err; 9909 retval = ctl_inquiry_evpd_block_limits(ctsio, alloc_len); 9910 break; 9911 case SVPD_BDC: 9912 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 9913 goto err; 9914 retval = ctl_inquiry_evpd_bdc(ctsio, alloc_len); 9915 break; 9916 case SVPD_LBP: 9917 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 9918 goto err; 9919 retval = ctl_inquiry_evpd_lbp(ctsio, alloc_len); 9920 break; 9921 default: 9922 err: 9923 ctl_set_invalid_field(ctsio, 9924 /*sks_valid*/ 1, 9925 /*command*/ 1, 9926 /*field*/ 2, 9927 /*bit_valid*/ 0, 9928 /*bit*/ 0); 9929 ctl_done((union ctl_io *)ctsio); 9930 retval = CTL_RETVAL_COMPLETE; 9931 break; 9932 } 9933 9934 return (retval); 9935 } 9936 9937 /* 9938 * Standard INQUIRY data. 9939 */ 9940 static int 9941 ctl_inquiry_std(struct ctl_scsiio *ctsio) 9942 { 9943 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9944 struct ctl_port *port = CTL_PORT(ctsio); 9945 struct ctl_lun *lun = CTL_LUN(ctsio); 9946 struct scsi_inquiry_data *inq_ptr; 9947 struct scsi_inquiry *cdb; 9948 char *val; 9949 uint32_t alloc_len, data_len; 9950 ctl_port_type port_type; 9951 9952 port_type = port->port_type; 9953 if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL) 9954 port_type = CTL_PORT_SCSI; 9955 9956 cdb = (struct scsi_inquiry *)ctsio->cdb; 9957 alloc_len = scsi_2btoul(cdb->length); 9958 9959 /* 9960 * We malloc the full inquiry data size here and fill it 9961 * in. If the user only asks for less, we'll give him 9962 * that much. 9963 */ 9964 data_len = offsetof(struct scsi_inquiry_data, vendor_specific1); 9965 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9966 inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr; 9967 ctsio->kern_sg_entries = 0; 9968 ctsio->kern_rel_offset = 0; 9969 ctsio->kern_data_len = min(data_len, alloc_len); 9970 ctsio->kern_total_len = ctsio->kern_data_len; 9971 9972 if (lun != NULL) { 9973 if ((lun->flags & CTL_LUN_PRIMARY_SC) || 9974 softc->ha_link >= CTL_HA_LINK_UNKNOWN) { 9975 inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9976 lun->be_lun->lun_type; 9977 } else { 9978 inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | 9979 lun->be_lun->lun_type; 9980 } 9981 if (lun->flags & CTL_LUN_REMOVABLE) 9982 inq_ptr->dev_qual2 |= SID_RMB; 9983 } else 9984 inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE; 9985 9986 /* RMB in byte 2 is 0 */ 9987 inq_ptr->version = SCSI_REV_SPC5; 9988 9989 /* 9990 * According to SAM-3, even if a device only supports a single 9991 * level of LUN addressing, it should still set the HISUP bit: 9992 * 9993 * 4.9.1 Logical unit numbers overview 9994 * 9995 * All logical unit number formats described in this standard are 9996 * hierarchical in structure even when only a single level in that 9997 * hierarchy is used. The HISUP bit shall be set to one in the 9998 * standard INQUIRY data (see SPC-2) when any logical unit number 9999 * format described in this standard is used. Non-hierarchical 10000 * formats are outside the scope of this standard. 10001 * 10002 * Therefore we set the HiSup bit here. 10003 * 10004 * The response format is 2, per SPC-3. 10005 */ 10006 inq_ptr->response_format = SID_HiSup | 2; 10007 10008 inq_ptr->additional_length = data_len - 10009 (offsetof(struct scsi_inquiry_data, additional_length) + 1); 10010 CTL_DEBUG_PRINT(("additional_length = %d\n", 10011 inq_ptr->additional_length)); 10012 10013 inq_ptr->spc3_flags = SPC3_SID_3PC | SPC3_SID_TPGS_IMPLICIT; 10014 if (port_type == CTL_PORT_SCSI) 10015 inq_ptr->spc2_flags = SPC2_SID_ADDR16; 10016 inq_ptr->spc2_flags |= SPC2_SID_MultiP; 10017 inq_ptr->flags = SID_CmdQue; 10018 if (port_type == CTL_PORT_SCSI) 10019 inq_ptr->flags |= SID_WBus16 | SID_Sync; 10020 10021 /* 10022 * Per SPC-3, unused bytes in ASCII strings are filled with spaces. 10023 * We have 8 bytes for the vendor name, and 16 bytes for the device 10024 * name and 4 bytes for the revision. 10025 */ 10026 if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options, 10027 "vendor")) == NULL) { 10028 strncpy(inq_ptr->vendor, CTL_VENDOR, sizeof(inq_ptr->vendor)); 10029 } else { 10030 memset(inq_ptr->vendor, ' ', sizeof(inq_ptr->vendor)); 10031 strncpy(inq_ptr->vendor, val, 10032 min(sizeof(inq_ptr->vendor), strlen(val))); 10033 } 10034 if (lun == NULL) { 10035 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10036 sizeof(inq_ptr->product)); 10037 } else if ((val = ctl_get_opt(&lun->be_lun->options, "product")) == NULL) { 10038 switch (lun->be_lun->lun_type) { 10039 case T_DIRECT: 10040 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10041 sizeof(inq_ptr->product)); 10042 break; 10043 case T_PROCESSOR: 10044 strncpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT, 10045 sizeof(inq_ptr->product)); 10046 break; 10047 case T_CDROM: 10048 strncpy(inq_ptr->product, CTL_CDROM_PRODUCT, 10049 sizeof(inq_ptr->product)); 10050 break; 10051 default: 10052 strncpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT, 10053 sizeof(inq_ptr->product)); 10054 break; 10055 } 10056 } else { 10057 memset(inq_ptr->product, ' ', sizeof(inq_ptr->product)); 10058 strncpy(inq_ptr->product, val, 10059 min(sizeof(inq_ptr->product), strlen(val))); 10060 } 10061 10062 /* 10063 * XXX make this a macro somewhere so it automatically gets 10064 * incremented when we make changes. 10065 */ 10066 if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options, 10067 "revision")) == NULL) { 10068 strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision)); 10069 } else { 10070 memset(inq_ptr->revision, ' ', sizeof(inq_ptr->revision)); 10071 strncpy(inq_ptr->revision, val, 10072 min(sizeof(inq_ptr->revision), strlen(val))); 10073 } 10074 10075 /* 10076 * For parallel SCSI, we support double transition and single 10077 * transition clocking. We also support QAS (Quick Arbitration 10078 * and Selection) and Information Unit transfers on both the 10079 * control and array devices. 10080 */ 10081 if (port_type == CTL_PORT_SCSI) 10082 inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS | 10083 SID_SPI_IUS; 10084 10085 /* SAM-6 (no version claimed) */ 10086 scsi_ulto2b(0x00C0, inq_ptr->version1); 10087 /* SPC-5 (no version claimed) */ 10088 scsi_ulto2b(0x05C0, inq_ptr->version2); 10089 if (port_type == CTL_PORT_FC) { 10090 /* FCP-2 ANSI INCITS.350:2003 */ 10091 scsi_ulto2b(0x0917, inq_ptr->version3); 10092 } else if (port_type == CTL_PORT_SCSI) { 10093 /* SPI-4 ANSI INCITS.362:200x */ 10094 scsi_ulto2b(0x0B56, inq_ptr->version3); 10095 } else if (port_type == CTL_PORT_ISCSI) { 10096 /* iSCSI (no version claimed) */ 10097 scsi_ulto2b(0x0960, inq_ptr->version3); 10098 } else if (port_type == CTL_PORT_SAS) { 10099 /* SAS (no version claimed) */ 10100 scsi_ulto2b(0x0BE0, inq_ptr->version3); 10101 } else if (port_type == CTL_PORT_UMASS) { 10102 /* USB Mass Storage Class Bulk-Only Transport, Revision 1.0 */ 10103 scsi_ulto2b(0x1730, inq_ptr->version3); 10104 } 10105 10106 if (lun == NULL) { 10107 /* SBC-4 (no version claimed) */ 10108 scsi_ulto2b(0x0600, inq_ptr->version4); 10109 } else { 10110 switch (lun->be_lun->lun_type) { 10111 case T_DIRECT: 10112 /* SBC-4 (no version claimed) */ 10113 scsi_ulto2b(0x0600, inq_ptr->version4); 10114 break; 10115 case T_PROCESSOR: 10116 break; 10117 case T_CDROM: 10118 /* MMC-6 (no version claimed) */ 10119 scsi_ulto2b(0x04E0, inq_ptr->version4); 10120 break; 10121 default: 10122 break; 10123 } 10124 } 10125 10126 ctl_set_success(ctsio); 10127 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10128 ctsio->be_move_done = ctl_config_move_done; 10129 ctl_datamove((union ctl_io *)ctsio); 10130 return (CTL_RETVAL_COMPLETE); 10131 } 10132 10133 int 10134 ctl_inquiry(struct ctl_scsiio *ctsio) 10135 { 10136 struct scsi_inquiry *cdb; 10137 int retval; 10138 10139 CTL_DEBUG_PRINT(("ctl_inquiry\n")); 10140 10141 cdb = (struct scsi_inquiry *)ctsio->cdb; 10142 if (cdb->byte2 & SI_EVPD) 10143 retval = ctl_inquiry_evpd(ctsio); 10144 else if (cdb->page_code == 0) 10145 retval = ctl_inquiry_std(ctsio); 10146 else { 10147 ctl_set_invalid_field(ctsio, 10148 /*sks_valid*/ 1, 10149 /*command*/ 1, 10150 /*field*/ 2, 10151 /*bit_valid*/ 0, 10152 /*bit*/ 0); 10153 ctl_done((union ctl_io *)ctsio); 10154 return (CTL_RETVAL_COMPLETE); 10155 } 10156 10157 return (retval); 10158 } 10159 10160 int 10161 ctl_get_config(struct ctl_scsiio *ctsio) 10162 { 10163 struct ctl_lun *lun = CTL_LUN(ctsio); 10164 struct scsi_get_config_header *hdr; 10165 struct scsi_get_config_feature *feature; 10166 struct scsi_get_config *cdb; 10167 uint32_t alloc_len, data_len; 10168 int rt, starting; 10169 10170 cdb = (struct scsi_get_config *)ctsio->cdb; 10171 rt = (cdb->rt & SGC_RT_MASK); 10172 starting = scsi_2btoul(cdb->starting_feature); 10173 alloc_len = scsi_2btoul(cdb->length); 10174 10175 data_len = sizeof(struct scsi_get_config_header) + 10176 sizeof(struct scsi_get_config_feature) + 8 + 10177 sizeof(struct scsi_get_config_feature) + 8 + 10178 sizeof(struct scsi_get_config_feature) + 4 + 10179 sizeof(struct scsi_get_config_feature) + 4 + 10180 sizeof(struct scsi_get_config_feature) + 8 + 10181 sizeof(struct scsi_get_config_feature) + 10182 sizeof(struct scsi_get_config_feature) + 4 + 10183 sizeof(struct scsi_get_config_feature) + 4 + 10184 sizeof(struct scsi_get_config_feature) + 4 + 10185 sizeof(struct scsi_get_config_feature) + 4 + 10186 sizeof(struct scsi_get_config_feature) + 4 + 10187 sizeof(struct scsi_get_config_feature) + 4; 10188 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10189 ctsio->kern_sg_entries = 0; 10190 ctsio->kern_rel_offset = 0; 10191 10192 hdr = (struct scsi_get_config_header *)ctsio->kern_data_ptr; 10193 if (lun->flags & CTL_LUN_NO_MEDIA) 10194 scsi_ulto2b(0x0000, hdr->current_profile); 10195 else 10196 scsi_ulto2b(0x0010, hdr->current_profile); 10197 feature = (struct scsi_get_config_feature *)(hdr + 1); 10198 10199 if (starting > 0x003b) 10200 goto done; 10201 if (starting > 0x003a) 10202 goto f3b; 10203 if (starting > 0x002b) 10204 goto f3a; 10205 if (starting > 0x002a) 10206 goto f2b; 10207 if (starting > 0x001f) 10208 goto f2a; 10209 if (starting > 0x001e) 10210 goto f1f; 10211 if (starting > 0x001d) 10212 goto f1e; 10213 if (starting > 0x0010) 10214 goto f1d; 10215 if (starting > 0x0003) 10216 goto f10; 10217 if (starting > 0x0002) 10218 goto f3; 10219 if (starting > 0x0001) 10220 goto f2; 10221 if (starting > 0x0000) 10222 goto f1; 10223 10224 /* Profile List */ 10225 scsi_ulto2b(0x0000, feature->feature_code); 10226 feature->flags = SGC_F_PERSISTENT | SGC_F_CURRENT; 10227 feature->add_length = 8; 10228 scsi_ulto2b(0x0008, &feature->feature_data[0]); /* CD-ROM */ 10229 feature->feature_data[2] = 0x00; 10230 scsi_ulto2b(0x0010, &feature->feature_data[4]); /* DVD-ROM */ 10231 feature->feature_data[6] = 0x01; 10232 feature = (struct scsi_get_config_feature *) 10233 &feature->feature_data[feature->add_length]; 10234 10235 f1: /* Core */ 10236 scsi_ulto2b(0x0001, feature->feature_code); 10237 feature->flags = 0x08 | SGC_F_PERSISTENT | SGC_F_CURRENT; 10238 feature->add_length = 8; 10239 scsi_ulto4b(0x00000000, &feature->feature_data[0]); 10240 feature->feature_data[4] = 0x03; 10241 feature = (struct scsi_get_config_feature *) 10242 &feature->feature_data[feature->add_length]; 10243 10244 f2: /* Morphing */ 10245 scsi_ulto2b(0x0002, feature->feature_code); 10246 feature->flags = 0x04 | SGC_F_PERSISTENT | SGC_F_CURRENT; 10247 feature->add_length = 4; 10248 feature->feature_data[0] = 0x02; 10249 feature = (struct scsi_get_config_feature *) 10250 &feature->feature_data[feature->add_length]; 10251 10252 f3: /* Removable Medium */ 10253 scsi_ulto2b(0x0003, feature->feature_code); 10254 feature->flags = 0x04 | SGC_F_PERSISTENT | SGC_F_CURRENT; 10255 feature->add_length = 4; 10256 feature->feature_data[0] = 0x39; 10257 feature = (struct scsi_get_config_feature *) 10258 &feature->feature_data[feature->add_length]; 10259 10260 if (rt == SGC_RT_CURRENT && (lun->flags & CTL_LUN_NO_MEDIA)) 10261 goto done; 10262 10263 f10: /* Random Read */ 10264 scsi_ulto2b(0x0010, feature->feature_code); 10265 feature->flags = 0x00; 10266 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10267 feature->flags |= SGC_F_CURRENT; 10268 feature->add_length = 8; 10269 scsi_ulto4b(lun->be_lun->blocksize, &feature->feature_data[0]); 10270 scsi_ulto2b(1, &feature->feature_data[4]); 10271 feature->feature_data[6] = 0x00; 10272 feature = (struct scsi_get_config_feature *) 10273 &feature->feature_data[feature->add_length]; 10274 10275 f1d: /* Multi-Read */ 10276 scsi_ulto2b(0x001D, feature->feature_code); 10277 feature->flags = 0x00; 10278 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10279 feature->flags |= SGC_F_CURRENT; 10280 feature->add_length = 0; 10281 feature = (struct scsi_get_config_feature *) 10282 &feature->feature_data[feature->add_length]; 10283 10284 f1e: /* CD Read */ 10285 scsi_ulto2b(0x001E, feature->feature_code); 10286 feature->flags = 0x00; 10287 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10288 feature->flags |= SGC_F_CURRENT; 10289 feature->add_length = 4; 10290 feature->feature_data[0] = 0x00; 10291 feature = (struct scsi_get_config_feature *) 10292 &feature->feature_data[feature->add_length]; 10293 10294 f1f: /* DVD Read */ 10295 scsi_ulto2b(0x001F, feature->feature_code); 10296 feature->flags = 0x08; 10297 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10298 feature->flags |= SGC_F_CURRENT; 10299 feature->add_length = 4; 10300 feature->feature_data[0] = 0x01; 10301 feature->feature_data[2] = 0x03; 10302 feature = (struct scsi_get_config_feature *) 10303 &feature->feature_data[feature->add_length]; 10304 10305 f2a: /* DVD+RW */ 10306 scsi_ulto2b(0x002A, feature->feature_code); 10307 feature->flags = 0x04; 10308 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10309 feature->flags |= SGC_F_CURRENT; 10310 feature->add_length = 4; 10311 feature->feature_data[0] = 0x00; 10312 feature->feature_data[1] = 0x00; 10313 feature = (struct scsi_get_config_feature *) 10314 &feature->feature_data[feature->add_length]; 10315 10316 f2b: /* DVD+R */ 10317 scsi_ulto2b(0x002B, feature->feature_code); 10318 feature->flags = 0x00; 10319 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10320 feature->flags |= SGC_F_CURRENT; 10321 feature->add_length = 4; 10322 feature->feature_data[0] = 0x00; 10323 feature = (struct scsi_get_config_feature *) 10324 &feature->feature_data[feature->add_length]; 10325 10326 f3a: /* DVD+RW Dual Layer */ 10327 scsi_ulto2b(0x003A, feature->feature_code); 10328 feature->flags = 0x00; 10329 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10330 feature->flags |= SGC_F_CURRENT; 10331 feature->add_length = 4; 10332 feature->feature_data[0] = 0x00; 10333 feature->feature_data[1] = 0x00; 10334 feature = (struct scsi_get_config_feature *) 10335 &feature->feature_data[feature->add_length]; 10336 10337 f3b: /* DVD+R Dual Layer */ 10338 scsi_ulto2b(0x003B, feature->feature_code); 10339 feature->flags = 0x00; 10340 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10341 feature->flags |= SGC_F_CURRENT; 10342 feature->add_length = 4; 10343 feature->feature_data[0] = 0x00; 10344 feature = (struct scsi_get_config_feature *) 10345 &feature->feature_data[feature->add_length]; 10346 10347 done: 10348 data_len = (uint8_t *)feature - (uint8_t *)hdr; 10349 if (rt == SGC_RT_SPECIFIC && data_len > 4) { 10350 feature = (struct scsi_get_config_feature *)(hdr + 1); 10351 if (scsi_2btoul(feature->feature_code) == starting) 10352 feature = (struct scsi_get_config_feature *) 10353 &feature->feature_data[feature->add_length]; 10354 data_len = (uint8_t *)feature - (uint8_t *)hdr; 10355 } 10356 scsi_ulto4b(data_len - 4, hdr->data_length); 10357 ctsio->kern_data_len = min(data_len, alloc_len); 10358 ctsio->kern_total_len = ctsio->kern_data_len; 10359 10360 ctl_set_success(ctsio); 10361 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10362 ctsio->be_move_done = ctl_config_move_done; 10363 ctl_datamove((union ctl_io *)ctsio); 10364 return (CTL_RETVAL_COMPLETE); 10365 } 10366 10367 int 10368 ctl_get_event_status(struct ctl_scsiio *ctsio) 10369 { 10370 struct scsi_get_event_status_header *hdr; 10371 struct scsi_get_event_status *cdb; 10372 uint32_t alloc_len, data_len; 10373 int notif_class; 10374 10375 cdb = (struct scsi_get_event_status *)ctsio->cdb; 10376 if ((cdb->byte2 & SGESN_POLLED) == 0) { 10377 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, 10378 /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); 10379 ctl_done((union ctl_io *)ctsio); 10380 return (CTL_RETVAL_COMPLETE); 10381 } 10382 notif_class = cdb->notif_class; 10383 alloc_len = scsi_2btoul(cdb->length); 10384 10385 data_len = sizeof(struct scsi_get_event_status_header); 10386 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10387 ctsio->kern_sg_entries = 0; 10388 ctsio->kern_rel_offset = 0; 10389 ctsio->kern_data_len = min(data_len, alloc_len); 10390 ctsio->kern_total_len = ctsio->kern_data_len; 10391 10392 hdr = (struct scsi_get_event_status_header *)ctsio->kern_data_ptr; 10393 scsi_ulto2b(0, hdr->descr_length); 10394 hdr->nea_class = SGESN_NEA; 10395 hdr->supported_class = 0; 10396 10397 ctl_set_success(ctsio); 10398 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10399 ctsio->be_move_done = ctl_config_move_done; 10400 ctl_datamove((union ctl_io *)ctsio); 10401 return (CTL_RETVAL_COMPLETE); 10402 } 10403 10404 int 10405 ctl_mechanism_status(struct ctl_scsiio *ctsio) 10406 { 10407 struct scsi_mechanism_status_header *hdr; 10408 struct scsi_mechanism_status *cdb; 10409 uint32_t alloc_len, data_len; 10410 10411 cdb = (struct scsi_mechanism_status *)ctsio->cdb; 10412 alloc_len = scsi_2btoul(cdb->length); 10413 10414 data_len = sizeof(struct scsi_mechanism_status_header); 10415 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10416 ctsio->kern_sg_entries = 0; 10417 ctsio->kern_rel_offset = 0; 10418 ctsio->kern_data_len = min(data_len, alloc_len); 10419 ctsio->kern_total_len = ctsio->kern_data_len; 10420 10421 hdr = (struct scsi_mechanism_status_header *)ctsio->kern_data_ptr; 10422 hdr->state1 = 0x00; 10423 hdr->state2 = 0xe0; 10424 scsi_ulto3b(0, hdr->lba); 10425 hdr->slots_num = 0; 10426 scsi_ulto2b(0, hdr->slots_length); 10427 10428 ctl_set_success(ctsio); 10429 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10430 ctsio->be_move_done = ctl_config_move_done; 10431 ctl_datamove((union ctl_io *)ctsio); 10432 return (CTL_RETVAL_COMPLETE); 10433 } 10434 10435 static void 10436 ctl_ultomsf(uint32_t lba, uint8_t *buf) 10437 { 10438 10439 lba += 150; 10440 buf[0] = 0; 10441 buf[1] = bin2bcd((lba / 75) / 60); 10442 buf[2] = bin2bcd((lba / 75) % 60); 10443 buf[3] = bin2bcd(lba % 75); 10444 } 10445 10446 int 10447 ctl_read_toc(struct ctl_scsiio *ctsio) 10448 { 10449 struct ctl_lun *lun = CTL_LUN(ctsio); 10450 struct scsi_read_toc_hdr *hdr; 10451 struct scsi_read_toc_type01_descr *descr; 10452 struct scsi_read_toc *cdb; 10453 uint32_t alloc_len, data_len; 10454 int format, msf; 10455 10456 cdb = (struct scsi_read_toc *)ctsio->cdb; 10457 msf = (cdb->byte2 & CD_MSF) != 0; 10458 format = cdb->format; 10459 alloc_len = scsi_2btoul(cdb->data_len); 10460 10461 data_len = sizeof(struct scsi_read_toc_hdr); 10462 if (format == 0) 10463 data_len += 2 * sizeof(struct scsi_read_toc_type01_descr); 10464 else 10465 data_len += sizeof(struct scsi_read_toc_type01_descr); 10466 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10467 ctsio->kern_sg_entries = 0; 10468 ctsio->kern_rel_offset = 0; 10469 ctsio->kern_data_len = min(data_len, alloc_len); 10470 ctsio->kern_total_len = ctsio->kern_data_len; 10471 10472 hdr = (struct scsi_read_toc_hdr *)ctsio->kern_data_ptr; 10473 if (format == 0) { 10474 scsi_ulto2b(0x12, hdr->data_length); 10475 hdr->first = 1; 10476 hdr->last = 1; 10477 descr = (struct scsi_read_toc_type01_descr *)(hdr + 1); 10478 descr->addr_ctl = 0x14; 10479 descr->track_number = 1; 10480 if (msf) 10481 ctl_ultomsf(0, descr->track_start); 10482 else 10483 scsi_ulto4b(0, descr->track_start); 10484 descr++; 10485 descr->addr_ctl = 0x14; 10486 descr->track_number = 0xaa; 10487 if (msf) 10488 ctl_ultomsf(lun->be_lun->maxlba+1, descr->track_start); 10489 else 10490 scsi_ulto4b(lun->be_lun->maxlba+1, descr->track_start); 10491 } else { 10492 scsi_ulto2b(0x0a, hdr->data_length); 10493 hdr->first = 1; 10494 hdr->last = 1; 10495 descr = (struct scsi_read_toc_type01_descr *)(hdr + 1); 10496 descr->addr_ctl = 0x14; 10497 descr->track_number = 1; 10498 if (msf) 10499 ctl_ultomsf(0, descr->track_start); 10500 else 10501 scsi_ulto4b(0, descr->track_start); 10502 } 10503 10504 ctl_set_success(ctsio); 10505 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10506 ctsio->be_move_done = ctl_config_move_done; 10507 ctl_datamove((union ctl_io *)ctsio); 10508 return (CTL_RETVAL_COMPLETE); 10509 } 10510 10511 /* 10512 * For known CDB types, parse the LBA and length. 10513 */ 10514 static int 10515 ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len) 10516 { 10517 if (io->io_hdr.io_type != CTL_IO_SCSI) 10518 return (1); 10519 10520 switch (io->scsiio.cdb[0]) { 10521 case COMPARE_AND_WRITE: { 10522 struct scsi_compare_and_write *cdb; 10523 10524 cdb = (struct scsi_compare_and_write *)io->scsiio.cdb; 10525 10526 *lba = scsi_8btou64(cdb->addr); 10527 *len = cdb->length; 10528 break; 10529 } 10530 case READ_6: 10531 case WRITE_6: { 10532 struct scsi_rw_6 *cdb; 10533 10534 cdb = (struct scsi_rw_6 *)io->scsiio.cdb; 10535 10536 *lba = scsi_3btoul(cdb->addr); 10537 /* only 5 bits are valid in the most significant address byte */ 10538 *lba &= 0x1fffff; 10539 *len = cdb->length; 10540 break; 10541 } 10542 case READ_10: 10543 case WRITE_10: { 10544 struct scsi_rw_10 *cdb; 10545 10546 cdb = (struct scsi_rw_10 *)io->scsiio.cdb; 10547 10548 *lba = scsi_4btoul(cdb->addr); 10549 *len = scsi_2btoul(cdb->length); 10550 break; 10551 } 10552 case WRITE_VERIFY_10: { 10553 struct scsi_write_verify_10 *cdb; 10554 10555 cdb = (struct scsi_write_verify_10 *)io->scsiio.cdb; 10556 10557 *lba = scsi_4btoul(cdb->addr); 10558 *len = scsi_2btoul(cdb->length); 10559 break; 10560 } 10561 case READ_12: 10562 case WRITE_12: { 10563 struct scsi_rw_12 *cdb; 10564 10565 cdb = (struct scsi_rw_12 *)io->scsiio.cdb; 10566 10567 *lba = scsi_4btoul(cdb->addr); 10568 *len = scsi_4btoul(cdb->length); 10569 break; 10570 } 10571 case WRITE_VERIFY_12: { 10572 struct scsi_write_verify_12 *cdb; 10573 10574 cdb = (struct scsi_write_verify_12 *)io->scsiio.cdb; 10575 10576 *lba = scsi_4btoul(cdb->addr); 10577 *len = scsi_4btoul(cdb->length); 10578 break; 10579 } 10580 case READ_16: 10581 case WRITE_16: { 10582 struct scsi_rw_16 *cdb; 10583 10584 cdb = (struct scsi_rw_16 *)io->scsiio.cdb; 10585 10586 *lba = scsi_8btou64(cdb->addr); 10587 *len = scsi_4btoul(cdb->length); 10588 break; 10589 } 10590 case WRITE_ATOMIC_16: { 10591 struct scsi_write_atomic_16 *cdb; 10592 10593 cdb = (struct scsi_write_atomic_16 *)io->scsiio.cdb; 10594 10595 *lba = scsi_8btou64(cdb->addr); 10596 *len = scsi_2btoul(cdb->length); 10597 break; 10598 } 10599 case WRITE_VERIFY_16: { 10600 struct scsi_write_verify_16 *cdb; 10601 10602 cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb; 10603 10604 *lba = scsi_8btou64(cdb->addr); 10605 *len = scsi_4btoul(cdb->length); 10606 break; 10607 } 10608 case WRITE_SAME_10: { 10609 struct scsi_write_same_10 *cdb; 10610 10611 cdb = (struct scsi_write_same_10 *)io->scsiio.cdb; 10612 10613 *lba = scsi_4btoul(cdb->addr); 10614 *len = scsi_2btoul(cdb->length); 10615 break; 10616 } 10617 case WRITE_SAME_16: { 10618 struct scsi_write_same_16 *cdb; 10619 10620 cdb = (struct scsi_write_same_16 *)io->scsiio.cdb; 10621 10622 *lba = scsi_8btou64(cdb->addr); 10623 *len = scsi_4btoul(cdb->length); 10624 break; 10625 } 10626 case VERIFY_10: { 10627 struct scsi_verify_10 *cdb; 10628 10629 cdb = (struct scsi_verify_10 *)io->scsiio.cdb; 10630 10631 *lba = scsi_4btoul(cdb->addr); 10632 *len = scsi_2btoul(cdb->length); 10633 break; 10634 } 10635 case VERIFY_12: { 10636 struct scsi_verify_12 *cdb; 10637 10638 cdb = (struct scsi_verify_12 *)io->scsiio.cdb; 10639 10640 *lba = scsi_4btoul(cdb->addr); 10641 *len = scsi_4btoul(cdb->length); 10642 break; 10643 } 10644 case VERIFY_16: { 10645 struct scsi_verify_16 *cdb; 10646 10647 cdb = (struct scsi_verify_16 *)io->scsiio.cdb; 10648 10649 *lba = scsi_8btou64(cdb->addr); 10650 *len = scsi_4btoul(cdb->length); 10651 break; 10652 } 10653 case UNMAP: { 10654 *lba = 0; 10655 *len = UINT64_MAX; 10656 break; 10657 } 10658 case SERVICE_ACTION_IN: { /* GET LBA STATUS */ 10659 struct scsi_get_lba_status *cdb; 10660 10661 cdb = (struct scsi_get_lba_status *)io->scsiio.cdb; 10662 *lba = scsi_8btou64(cdb->addr); 10663 *len = UINT32_MAX; 10664 break; 10665 } 10666 default: 10667 return (1); 10668 break; /* NOTREACHED */ 10669 } 10670 10671 return (0); 10672 } 10673 10674 static ctl_action 10675 ctl_extent_check_lba(uint64_t lba1, uint64_t len1, uint64_t lba2, uint64_t len2, 10676 bool seq) 10677 { 10678 uint64_t endlba1, endlba2; 10679 10680 endlba1 = lba1 + len1 - (seq ? 0 : 1); 10681 endlba2 = lba2 + len2 - 1; 10682 10683 if ((endlba1 < lba2) || (endlba2 < lba1)) 10684 return (CTL_ACTION_PASS); 10685 else 10686 return (CTL_ACTION_BLOCK); 10687 } 10688 10689 static int 10690 ctl_extent_check_unmap(union ctl_io *io, uint64_t lba2, uint64_t len2) 10691 { 10692 struct ctl_ptr_len_flags *ptrlen; 10693 struct scsi_unmap_desc *buf, *end, *range; 10694 uint64_t lba; 10695 uint32_t len; 10696 10697 /* If not UNMAP -- go other way. */ 10698 if (io->io_hdr.io_type != CTL_IO_SCSI || 10699 io->scsiio.cdb[0] != UNMAP) 10700 return (CTL_ACTION_ERROR); 10701 10702 /* If UNMAP without data -- block and wait for data. */ 10703 ptrlen = (struct ctl_ptr_len_flags *) 10704 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 10705 if ((io->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0 || 10706 ptrlen->ptr == NULL) 10707 return (CTL_ACTION_BLOCK); 10708 10709 /* UNMAP with data -- check for collision. */ 10710 buf = (struct scsi_unmap_desc *)ptrlen->ptr; 10711 end = buf + ptrlen->len / sizeof(*buf); 10712 for (range = buf; range < end; range++) { 10713 lba = scsi_8btou64(range->lba); 10714 len = scsi_4btoul(range->length); 10715 if ((lba < lba2 + len2) && (lba + len > lba2)) 10716 return (CTL_ACTION_BLOCK); 10717 } 10718 return (CTL_ACTION_PASS); 10719 } 10720 10721 static ctl_action 10722 ctl_extent_check(union ctl_io *io1, union ctl_io *io2, bool seq) 10723 { 10724 uint64_t lba1, lba2; 10725 uint64_t len1, len2; 10726 int retval; 10727 10728 if (ctl_get_lba_len(io2, &lba2, &len2) != 0) 10729 return (CTL_ACTION_ERROR); 10730 10731 retval = ctl_extent_check_unmap(io1, lba2, len2); 10732 if (retval != CTL_ACTION_ERROR) 10733 return (retval); 10734 10735 if (ctl_get_lba_len(io1, &lba1, &len1) != 0) 10736 return (CTL_ACTION_ERROR); 10737 10738 if (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE) 10739 seq = FALSE; 10740 return (ctl_extent_check_lba(lba1, len1, lba2, len2, seq)); 10741 } 10742 10743 static ctl_action 10744 ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2) 10745 { 10746 uint64_t lba1, lba2; 10747 uint64_t len1, len2; 10748 10749 if (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE) 10750 return (CTL_ACTION_PASS); 10751 if (ctl_get_lba_len(io1, &lba1, &len1) != 0) 10752 return (CTL_ACTION_ERROR); 10753 if (ctl_get_lba_len(io2, &lba2, &len2) != 0) 10754 return (CTL_ACTION_ERROR); 10755 10756 if (lba1 + len1 == lba2) 10757 return (CTL_ACTION_BLOCK); 10758 return (CTL_ACTION_PASS); 10759 } 10760 10761 static ctl_action 10762 ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io, 10763 union ctl_io *ooa_io) 10764 { 10765 const struct ctl_cmd_entry *pending_entry, *ooa_entry; 10766 const ctl_serialize_action *serialize_row; 10767 10768 /* 10769 * The initiator attempted multiple untagged commands at the same 10770 * time. Can't do that. 10771 */ 10772 if ((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10773 && (ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10774 && ((pending_io->io_hdr.nexus.targ_port == 10775 ooa_io->io_hdr.nexus.targ_port) 10776 && (pending_io->io_hdr.nexus.initid == 10777 ooa_io->io_hdr.nexus.initid)) 10778 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | 10779 CTL_FLAG_STATUS_SENT)) == 0)) 10780 return (CTL_ACTION_OVERLAP); 10781 10782 /* 10783 * The initiator attempted to send multiple tagged commands with 10784 * the same ID. (It's fine if different initiators have the same 10785 * tag ID.) 10786 * 10787 * Even if all of those conditions are true, we don't kill the I/O 10788 * if the command ahead of us has been aborted. We won't end up 10789 * sending it to the FETD, and it's perfectly legal to resend a 10790 * command with the same tag number as long as the previous 10791 * instance of this tag number has been aborted somehow. 10792 */ 10793 if ((pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 10794 && (ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 10795 && (pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num) 10796 && ((pending_io->io_hdr.nexus.targ_port == 10797 ooa_io->io_hdr.nexus.targ_port) 10798 && (pending_io->io_hdr.nexus.initid == 10799 ooa_io->io_hdr.nexus.initid)) 10800 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | 10801 CTL_FLAG_STATUS_SENT)) == 0)) 10802 return (CTL_ACTION_OVERLAP_TAG); 10803 10804 /* 10805 * If we get a head of queue tag, SAM-3 says that we should 10806 * immediately execute it. 10807 * 10808 * What happens if this command would normally block for some other 10809 * reason? e.g. a request sense with a head of queue tag 10810 * immediately after a write. Normally that would block, but this 10811 * will result in its getting executed immediately... 10812 * 10813 * We currently return "pass" instead of "skip", so we'll end up 10814 * going through the rest of the queue to check for overlapped tags. 10815 * 10816 * XXX KDM check for other types of blockage first?? 10817 */ 10818 if (pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 10819 return (CTL_ACTION_PASS); 10820 10821 /* 10822 * Ordered tags have to block until all items ahead of them 10823 * have completed. If we get called with an ordered tag, we always 10824 * block, if something else is ahead of us in the queue. 10825 */ 10826 if (pending_io->scsiio.tag_type == CTL_TAG_ORDERED) 10827 return (CTL_ACTION_BLOCK); 10828 10829 /* 10830 * Simple tags get blocked until all head of queue and ordered tags 10831 * ahead of them have completed. I'm lumping untagged commands in 10832 * with simple tags here. XXX KDM is that the right thing to do? 10833 */ 10834 if (((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10835 || (pending_io->scsiio.tag_type == CTL_TAG_SIMPLE)) 10836 && ((ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 10837 || (ooa_io->scsiio.tag_type == CTL_TAG_ORDERED))) 10838 return (CTL_ACTION_BLOCK); 10839 10840 pending_entry = ctl_get_cmd_entry(&pending_io->scsiio, NULL); 10841 KASSERT(pending_entry->seridx < CTL_SERIDX_COUNT, 10842 ("%s: Invalid seridx %d for pending CDB %02x %02x @ %p", 10843 __func__, pending_entry->seridx, pending_io->scsiio.cdb[0], 10844 pending_io->scsiio.cdb[1], pending_io)); 10845 ooa_entry = ctl_get_cmd_entry(&ooa_io->scsiio, NULL); 10846 if (ooa_entry->seridx == CTL_SERIDX_INVLD) 10847 return (CTL_ACTION_PASS); /* Unsupported command in OOA queue */ 10848 KASSERT(ooa_entry->seridx < CTL_SERIDX_COUNT, 10849 ("%s: Invalid seridx %d for ooa CDB %02x %02x @ %p", 10850 __func__, ooa_entry->seridx, ooa_io->scsiio.cdb[0], 10851 ooa_io->scsiio.cdb[1], ooa_io)); 10852 10853 serialize_row = ctl_serialize_table[ooa_entry->seridx]; 10854 10855 switch (serialize_row[pending_entry->seridx]) { 10856 case CTL_SER_BLOCK: 10857 return (CTL_ACTION_BLOCK); 10858 case CTL_SER_EXTENT: 10859 return (ctl_extent_check(ooa_io, pending_io, 10860 (lun->be_lun && lun->be_lun->serseq == CTL_LUN_SERSEQ_ON))); 10861 case CTL_SER_EXTENTOPT: 10862 if ((lun->MODE_CTRL.queue_flags & SCP_QUEUE_ALG_MASK) != 10863 SCP_QUEUE_ALG_UNRESTRICTED) 10864 return (ctl_extent_check(ooa_io, pending_io, 10865 (lun->be_lun && 10866 lun->be_lun->serseq == CTL_LUN_SERSEQ_ON))); 10867 return (CTL_ACTION_PASS); 10868 case CTL_SER_EXTENTSEQ: 10869 if (lun->be_lun && lun->be_lun->serseq != CTL_LUN_SERSEQ_OFF) 10870 return (ctl_extent_check_seq(ooa_io, pending_io)); 10871 return (CTL_ACTION_PASS); 10872 case CTL_SER_PASS: 10873 return (CTL_ACTION_PASS); 10874 case CTL_SER_BLOCKOPT: 10875 if ((lun->MODE_CTRL.queue_flags & SCP_QUEUE_ALG_MASK) != 10876 SCP_QUEUE_ALG_UNRESTRICTED) 10877 return (CTL_ACTION_BLOCK); 10878 return (CTL_ACTION_PASS); 10879 case CTL_SER_SKIP: 10880 return (CTL_ACTION_SKIP); 10881 default: 10882 panic("%s: Invalid serialization value %d for %d => %d", 10883 __func__, serialize_row[pending_entry->seridx], 10884 pending_entry->seridx, ooa_entry->seridx); 10885 } 10886 10887 return (CTL_ACTION_ERROR); 10888 } 10889 10890 /* 10891 * Check for blockage or overlaps against the OOA (Order Of Arrival) queue. 10892 * Assumptions: 10893 * - pending_io is generally either incoming, or on the blocked queue 10894 * - starting I/O is the I/O we want to start the check with. 10895 */ 10896 static ctl_action 10897 ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 10898 union ctl_io *starting_io) 10899 { 10900 union ctl_io *ooa_io; 10901 ctl_action action; 10902 10903 mtx_assert(&lun->lun_lock, MA_OWNED); 10904 10905 /* 10906 * Run back along the OOA queue, starting with the current 10907 * blocked I/O and going through every I/O before it on the 10908 * queue. If starting_io is NULL, we'll just end up returning 10909 * CTL_ACTION_PASS. 10910 */ 10911 for (ooa_io = starting_io; ooa_io != NULL; 10912 ooa_io = (union ctl_io *)TAILQ_PREV(&ooa_io->io_hdr, ctl_ooaq, 10913 ooa_links)){ 10914 10915 /* 10916 * This routine just checks to see whether 10917 * cur_blocked is blocked by ooa_io, which is ahead 10918 * of it in the queue. It doesn't queue/dequeue 10919 * cur_blocked. 10920 */ 10921 action = ctl_check_for_blockage(lun, pending_io, ooa_io); 10922 switch (action) { 10923 case CTL_ACTION_BLOCK: 10924 case CTL_ACTION_OVERLAP: 10925 case CTL_ACTION_OVERLAP_TAG: 10926 case CTL_ACTION_SKIP: 10927 case CTL_ACTION_ERROR: 10928 return (action); 10929 break; /* NOTREACHED */ 10930 case CTL_ACTION_PASS: 10931 break; 10932 default: 10933 panic("%s: Invalid action %d\n", __func__, action); 10934 } 10935 } 10936 10937 return (CTL_ACTION_PASS); 10938 } 10939 10940 /* 10941 * Assumptions: 10942 * - An I/O has just completed, and has been removed from the per-LUN OOA 10943 * queue, so some items on the blocked queue may now be unblocked. 10944 */ 10945 static int 10946 ctl_check_blocked(struct ctl_lun *lun) 10947 { 10948 struct ctl_softc *softc = lun->ctl_softc; 10949 union ctl_io *cur_blocked, *next_blocked; 10950 10951 mtx_assert(&lun->lun_lock, MA_OWNED); 10952 10953 /* 10954 * Run forward from the head of the blocked queue, checking each 10955 * entry against the I/Os prior to it on the OOA queue to see if 10956 * there is still any blockage. 10957 * 10958 * We cannot use the TAILQ_FOREACH() macro, because it can't deal 10959 * with our removing a variable on it while it is traversing the 10960 * list. 10961 */ 10962 for (cur_blocked = (union ctl_io *)TAILQ_FIRST(&lun->blocked_queue); 10963 cur_blocked != NULL; cur_blocked = next_blocked) { 10964 union ctl_io *prev_ooa; 10965 ctl_action action; 10966 10967 next_blocked = (union ctl_io *)TAILQ_NEXT(&cur_blocked->io_hdr, 10968 blocked_links); 10969 10970 prev_ooa = (union ctl_io *)TAILQ_PREV(&cur_blocked->io_hdr, 10971 ctl_ooaq, ooa_links); 10972 10973 /* 10974 * If cur_blocked happens to be the first item in the OOA 10975 * queue now, prev_ooa will be NULL, and the action 10976 * returned will just be CTL_ACTION_PASS. 10977 */ 10978 action = ctl_check_ooa(lun, cur_blocked, prev_ooa); 10979 10980 switch (action) { 10981 case CTL_ACTION_BLOCK: 10982 /* Nothing to do here, still blocked */ 10983 break; 10984 case CTL_ACTION_OVERLAP: 10985 case CTL_ACTION_OVERLAP_TAG: 10986 /* 10987 * This shouldn't happen! In theory we've already 10988 * checked this command for overlap... 10989 */ 10990 break; 10991 case CTL_ACTION_PASS: 10992 case CTL_ACTION_SKIP: { 10993 const struct ctl_cmd_entry *entry; 10994 10995 /* 10996 * The skip case shouldn't happen, this transaction 10997 * should have never made it onto the blocked queue. 10998 */ 10999 /* 11000 * This I/O is no longer blocked, we can remove it 11001 * from the blocked queue. Since this is a TAILQ 11002 * (doubly linked list), we can do O(1) removals 11003 * from any place on the list. 11004 */ 11005 TAILQ_REMOVE(&lun->blocked_queue, &cur_blocked->io_hdr, 11006 blocked_links); 11007 cur_blocked->io_hdr.flags &= ~CTL_FLAG_BLOCKED; 11008 11009 if ((softc->ha_mode != CTL_HA_MODE_XFER) && 11010 (cur_blocked->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)){ 11011 /* 11012 * Need to send IO back to original side to 11013 * run 11014 */ 11015 union ctl_ha_msg msg_info; 11016 11017 cur_blocked->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 11018 msg_info.hdr.original_sc = 11019 cur_blocked->io_hdr.original_sc; 11020 msg_info.hdr.serializing_sc = cur_blocked; 11021 msg_info.hdr.msg_type = CTL_MSG_R2R; 11022 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11023 sizeof(msg_info.hdr), M_NOWAIT); 11024 break; 11025 } 11026 entry = ctl_get_cmd_entry(&cur_blocked->scsiio, NULL); 11027 11028 /* 11029 * Check this I/O for LUN state changes that may 11030 * have happened while this command was blocked. 11031 * The LUN state may have been changed by a command 11032 * ahead of us in the queue, so we need to re-check 11033 * for any states that can be caused by SCSI 11034 * commands. 11035 */ 11036 if (ctl_scsiio_lun_check(lun, entry, 11037 &cur_blocked->scsiio) == 0) { 11038 cur_blocked->io_hdr.flags |= 11039 CTL_FLAG_IS_WAS_ON_RTR; 11040 ctl_enqueue_rtr(cur_blocked); 11041 } else 11042 ctl_done(cur_blocked); 11043 break; 11044 } 11045 default: 11046 /* 11047 * This probably shouldn't happen -- we shouldn't 11048 * get CTL_ACTION_ERROR, or anything else. 11049 */ 11050 break; 11051 } 11052 } 11053 11054 return (CTL_RETVAL_COMPLETE); 11055 } 11056 11057 /* 11058 * This routine (with one exception) checks LUN flags that can be set by 11059 * commands ahead of us in the OOA queue. These flags have to be checked 11060 * when a command initially comes in, and when we pull a command off the 11061 * blocked queue and are preparing to execute it. The reason we have to 11062 * check these flags for commands on the blocked queue is that the LUN 11063 * state may have been changed by a command ahead of us while we're on the 11064 * blocked queue. 11065 * 11066 * Ordering is somewhat important with these checks, so please pay 11067 * careful attention to the placement of any new checks. 11068 */ 11069 static int 11070 ctl_scsiio_lun_check(struct ctl_lun *lun, 11071 const struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio) 11072 { 11073 struct ctl_softc *softc = lun->ctl_softc; 11074 int retval; 11075 uint32_t residx; 11076 11077 retval = 0; 11078 11079 mtx_assert(&lun->lun_lock, MA_OWNED); 11080 11081 /* 11082 * If this shelf is a secondary shelf controller, we may have to 11083 * reject some commands disallowed by HA mode and link state. 11084 */ 11085 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) { 11086 if (softc->ha_link == CTL_HA_LINK_OFFLINE && 11087 (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { 11088 ctl_set_lun_unavail(ctsio); 11089 retval = 1; 11090 goto bailout; 11091 } 11092 if ((lun->flags & CTL_LUN_PEER_SC_PRIMARY) == 0 && 11093 (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { 11094 ctl_set_lun_transit(ctsio); 11095 retval = 1; 11096 goto bailout; 11097 } 11098 if (softc->ha_mode == CTL_HA_MODE_ACT_STBY && 11099 (entry->flags & CTL_CMD_FLAG_OK_ON_STANDBY) == 0) { 11100 ctl_set_lun_standby(ctsio); 11101 retval = 1; 11102 goto bailout; 11103 } 11104 11105 /* The rest of checks are only done on executing side */ 11106 if (softc->ha_mode == CTL_HA_MODE_XFER) 11107 goto bailout; 11108 } 11109 11110 if (entry->pattern & CTL_LUN_PAT_WRITE) { 11111 if (lun->be_lun && 11112 lun->be_lun->flags & CTL_LUN_FLAG_READONLY) { 11113 ctl_set_hw_write_protected(ctsio); 11114 retval = 1; 11115 goto bailout; 11116 } 11117 if ((lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) { 11118 ctl_set_sense(ctsio, /*current_error*/ 1, 11119 /*sense_key*/ SSD_KEY_DATA_PROTECT, 11120 /*asc*/ 0x27, /*ascq*/ 0x02, SSD_ELEM_NONE); 11121 retval = 1; 11122 goto bailout; 11123 } 11124 } 11125 11126 /* 11127 * Check for a reservation conflict. If this command isn't allowed 11128 * even on reserved LUNs, and if this initiator isn't the one who 11129 * reserved us, reject the command with a reservation conflict. 11130 */ 11131 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 11132 if ((lun->flags & CTL_LUN_RESERVED) 11133 && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) { 11134 if (lun->res_idx != residx) { 11135 ctl_set_reservation_conflict(ctsio); 11136 retval = 1; 11137 goto bailout; 11138 } 11139 } 11140 11141 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0 || 11142 (entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV)) { 11143 /* No reservation or command is allowed. */; 11144 } else if ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_WRESV) && 11145 (lun->pr_res_type == SPR_TYPE_WR_EX || 11146 lun->pr_res_type == SPR_TYPE_WR_EX_RO || 11147 lun->pr_res_type == SPR_TYPE_WR_EX_AR)) { 11148 /* The command is allowed for Write Exclusive resv. */; 11149 } else { 11150 /* 11151 * if we aren't registered or it's a res holder type 11152 * reservation and this isn't the res holder then set a 11153 * conflict. 11154 */ 11155 if (ctl_get_prkey(lun, residx) == 0 || 11156 (residx != lun->pr_res_idx && lun->pr_res_type < 4)) { 11157 ctl_set_reservation_conflict(ctsio); 11158 retval = 1; 11159 goto bailout; 11160 } 11161 } 11162 11163 if ((entry->flags & CTL_CMD_FLAG_OK_ON_NO_MEDIA) == 0) { 11164 if (lun->flags & CTL_LUN_EJECTED) 11165 ctl_set_lun_ejected(ctsio); 11166 else if (lun->flags & CTL_LUN_NO_MEDIA) { 11167 if (lun->flags & CTL_LUN_REMOVABLE) 11168 ctl_set_lun_no_media(ctsio); 11169 else 11170 ctl_set_lun_int_reqd(ctsio); 11171 } else if (lun->flags & CTL_LUN_STOPPED) 11172 ctl_set_lun_stopped(ctsio); 11173 else 11174 goto bailout; 11175 retval = 1; 11176 goto bailout; 11177 } 11178 11179 bailout: 11180 return (retval); 11181 } 11182 11183 static void 11184 ctl_failover_io(union ctl_io *io, int have_lock) 11185 { 11186 ctl_set_busy(&io->scsiio); 11187 ctl_done(io); 11188 } 11189 11190 static void 11191 ctl_failover_lun(union ctl_io *rio) 11192 { 11193 struct ctl_softc *softc = CTL_SOFTC(rio); 11194 struct ctl_lun *lun; 11195 struct ctl_io_hdr *io, *next_io; 11196 uint32_t targ_lun; 11197 11198 targ_lun = rio->io_hdr.nexus.targ_mapped_lun; 11199 CTL_DEBUG_PRINT(("FAILOVER for lun %ju\n", targ_lun)); 11200 11201 /* Find and lock the LUN. */ 11202 mtx_lock(&softc->ctl_lock); 11203 if (targ_lun > CTL_MAX_LUNS || 11204 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11205 mtx_unlock(&softc->ctl_lock); 11206 return; 11207 } 11208 mtx_lock(&lun->lun_lock); 11209 mtx_unlock(&softc->ctl_lock); 11210 if (lun->flags & CTL_LUN_DISABLED) { 11211 mtx_unlock(&lun->lun_lock); 11212 return; 11213 } 11214 11215 if (softc->ha_mode == CTL_HA_MODE_XFER) { 11216 TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { 11217 /* We are master */ 11218 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 11219 if (io->flags & CTL_FLAG_IO_ACTIVE) { 11220 io->flags |= CTL_FLAG_ABORT; 11221 io->flags |= CTL_FLAG_FAILOVER; 11222 } else { /* This can be only due to DATAMOVE */ 11223 io->msg_type = CTL_MSG_DATAMOVE_DONE; 11224 io->flags &= ~CTL_FLAG_DMA_INPROG; 11225 io->flags |= CTL_FLAG_IO_ACTIVE; 11226 io->port_status = 31340; 11227 ctl_enqueue_isc((union ctl_io *)io); 11228 } 11229 } 11230 /* We are slave */ 11231 if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { 11232 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; 11233 if (io->flags & CTL_FLAG_IO_ACTIVE) { 11234 io->flags |= CTL_FLAG_FAILOVER; 11235 } else { 11236 ctl_set_busy(&((union ctl_io *)io)-> 11237 scsiio); 11238 ctl_done((union ctl_io *)io); 11239 } 11240 } 11241 } 11242 } else { /* SERIALIZE modes */ 11243 TAILQ_FOREACH_SAFE(io, &lun->blocked_queue, blocked_links, 11244 next_io) { 11245 /* We are master */ 11246 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 11247 TAILQ_REMOVE(&lun->blocked_queue, io, 11248 blocked_links); 11249 io->flags &= ~CTL_FLAG_BLOCKED; 11250 TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links); 11251 ctl_free_io((union ctl_io *)io); 11252 } 11253 } 11254 TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { 11255 /* We are master */ 11256 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 11257 TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links); 11258 ctl_free_io((union ctl_io *)io); 11259 } 11260 /* We are slave */ 11261 if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { 11262 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; 11263 if (!(io->flags & CTL_FLAG_IO_ACTIVE)) { 11264 ctl_set_busy(&((union ctl_io *)io)-> 11265 scsiio); 11266 ctl_done((union ctl_io *)io); 11267 } 11268 } 11269 } 11270 ctl_check_blocked(lun); 11271 } 11272 mtx_unlock(&lun->lun_lock); 11273 } 11274 11275 static int 11276 ctl_scsiio_precheck(struct ctl_softc *softc, struct ctl_scsiio *ctsio) 11277 { 11278 struct ctl_lun *lun; 11279 const struct ctl_cmd_entry *entry; 11280 uint32_t initidx, targ_lun; 11281 int retval = 0; 11282 11283 lun = NULL; 11284 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 11285 if (targ_lun < CTL_MAX_LUNS) 11286 lun = softc->ctl_luns[targ_lun]; 11287 if (lun) { 11288 /* 11289 * If the LUN is invalid, pretend that it doesn't exist. 11290 * It will go away as soon as all pending I/O has been 11291 * completed. 11292 */ 11293 mtx_lock(&lun->lun_lock); 11294 if (lun->flags & CTL_LUN_DISABLED) { 11295 mtx_unlock(&lun->lun_lock); 11296 lun = NULL; 11297 } 11298 } 11299 CTL_LUN(ctsio) = lun; 11300 if (lun) { 11301 CTL_BACKEND_LUN(ctsio) = lun->be_lun; 11302 11303 /* 11304 * Every I/O goes into the OOA queue for a particular LUN, 11305 * and stays there until completion. 11306 */ 11307 #ifdef CTL_TIME_IO 11308 if (TAILQ_EMPTY(&lun->ooa_queue)) 11309 lun->idle_time += getsbinuptime() - lun->last_busy; 11310 #endif 11311 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 11312 } 11313 11314 /* Get command entry and return error if it is unsuppotyed. */ 11315 entry = ctl_validate_command(ctsio); 11316 if (entry == NULL) { 11317 if (lun) 11318 mtx_unlock(&lun->lun_lock); 11319 return (retval); 11320 } 11321 11322 ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 11323 ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK; 11324 11325 /* 11326 * Check to see whether we can send this command to LUNs that don't 11327 * exist. This should pretty much only be the case for inquiry 11328 * and request sense. Further checks, below, really require having 11329 * a LUN, so we can't really check the command anymore. Just put 11330 * it on the rtr queue. 11331 */ 11332 if (lun == NULL) { 11333 if (entry->flags & CTL_CMD_FLAG_OK_ON_NO_LUN) { 11334 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11335 ctl_enqueue_rtr((union ctl_io *)ctsio); 11336 return (retval); 11337 } 11338 11339 ctl_set_unsupported_lun(ctsio); 11340 ctl_done((union ctl_io *)ctsio); 11341 CTL_DEBUG_PRINT(("ctl_scsiio_precheck: bailing out due to invalid LUN\n")); 11342 return (retval); 11343 } else { 11344 /* 11345 * Make sure we support this particular command on this LUN. 11346 * e.g., we don't support writes to the control LUN. 11347 */ 11348 if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 11349 mtx_unlock(&lun->lun_lock); 11350 ctl_set_invalid_opcode(ctsio); 11351 ctl_done((union ctl_io *)ctsio); 11352 return (retval); 11353 } 11354 } 11355 11356 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 11357 11358 /* 11359 * If we've got a request sense, it'll clear the contingent 11360 * allegiance condition. Otherwise, if we have a CA condition for 11361 * this initiator, clear it, because it sent down a command other 11362 * than request sense. 11363 */ 11364 if (ctsio->cdb[0] != REQUEST_SENSE) { 11365 struct scsi_sense_data *ps; 11366 11367 ps = lun->pending_sense[initidx / CTL_MAX_INIT_PER_PORT]; 11368 if (ps != NULL) 11369 ps[initidx % CTL_MAX_INIT_PER_PORT].error_code = 0; 11370 } 11371 11372 /* 11373 * If the command has this flag set, it handles its own unit 11374 * attention reporting, we shouldn't do anything. Otherwise we 11375 * check for any pending unit attentions, and send them back to the 11376 * initiator. We only do this when a command initially comes in, 11377 * not when we pull it off the blocked queue. 11378 * 11379 * According to SAM-3, section 5.3.2, the order that things get 11380 * presented back to the host is basically unit attentions caused 11381 * by some sort of reset event, busy status, reservation conflicts 11382 * or task set full, and finally any other status. 11383 * 11384 * One issue here is that some of the unit attentions we report 11385 * don't fall into the "reset" category (e.g. "reported luns data 11386 * has changed"). So reporting it here, before the reservation 11387 * check, may be technically wrong. I guess the only thing to do 11388 * would be to check for and report the reset events here, and then 11389 * check for the other unit attention types after we check for a 11390 * reservation conflict. 11391 * 11392 * XXX KDM need to fix this 11393 */ 11394 if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) { 11395 ctl_ua_type ua_type; 11396 u_int sense_len = 0; 11397 11398 ua_type = ctl_build_ua(lun, initidx, &ctsio->sense_data, 11399 &sense_len, SSD_TYPE_NONE); 11400 if (ua_type != CTL_UA_NONE) { 11401 mtx_unlock(&lun->lun_lock); 11402 ctsio->scsi_status = SCSI_STATUS_CHECK_COND; 11403 ctsio->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 11404 ctsio->sense_len = sense_len; 11405 ctl_done((union ctl_io *)ctsio); 11406 return (retval); 11407 } 11408 } 11409 11410 11411 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { 11412 mtx_unlock(&lun->lun_lock); 11413 ctl_done((union ctl_io *)ctsio); 11414 return (retval); 11415 } 11416 11417 /* 11418 * XXX CHD this is where we want to send IO to other side if 11419 * this LUN is secondary on this SC. We will need to make a copy 11420 * of the IO and flag the IO on this side as SENT_2OTHER and the flag 11421 * the copy we send as FROM_OTHER. 11422 * We also need to stuff the address of the original IO so we can 11423 * find it easily. Something similar will need be done on the other 11424 * side so when we are done we can find the copy. 11425 */ 11426 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 11427 (lun->flags & CTL_LUN_PEER_SC_PRIMARY) != 0 && 11428 (entry->flags & CTL_CMD_FLAG_RUN_HERE) == 0) { 11429 union ctl_ha_msg msg_info; 11430 int isc_retval; 11431 11432 ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; 11433 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 11434 mtx_unlock(&lun->lun_lock); 11435 11436 msg_info.hdr.msg_type = CTL_MSG_SERIALIZE; 11437 msg_info.hdr.original_sc = (union ctl_io *)ctsio; 11438 msg_info.hdr.serializing_sc = NULL; 11439 msg_info.hdr.nexus = ctsio->io_hdr.nexus; 11440 msg_info.scsi.tag_num = ctsio->tag_num; 11441 msg_info.scsi.tag_type = ctsio->tag_type; 11442 msg_info.scsi.cdb_len = ctsio->cdb_len; 11443 memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN); 11444 11445 if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11446 sizeof(msg_info.scsi) - sizeof(msg_info.scsi.sense_data), 11447 M_WAITOK)) > CTL_HA_STATUS_SUCCESS) { 11448 ctl_set_busy(ctsio); 11449 ctl_done((union ctl_io *)ctsio); 11450 return (retval); 11451 } 11452 return (retval); 11453 } 11454 11455 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 11456 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, 11457 ctl_ooaq, ooa_links))) { 11458 case CTL_ACTION_BLOCK: 11459 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 11460 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 11461 blocked_links); 11462 mtx_unlock(&lun->lun_lock); 11463 return (retval); 11464 case CTL_ACTION_PASS: 11465 case CTL_ACTION_SKIP: 11466 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11467 mtx_unlock(&lun->lun_lock); 11468 ctl_enqueue_rtr((union ctl_io *)ctsio); 11469 break; 11470 case CTL_ACTION_OVERLAP: 11471 mtx_unlock(&lun->lun_lock); 11472 ctl_set_overlapped_cmd(ctsio); 11473 ctl_done((union ctl_io *)ctsio); 11474 break; 11475 case CTL_ACTION_OVERLAP_TAG: 11476 mtx_unlock(&lun->lun_lock); 11477 ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff); 11478 ctl_done((union ctl_io *)ctsio); 11479 break; 11480 case CTL_ACTION_ERROR: 11481 default: 11482 mtx_unlock(&lun->lun_lock); 11483 ctl_set_internal_failure(ctsio, 11484 /*sks_valid*/ 0, 11485 /*retry_count*/ 0); 11486 ctl_done((union ctl_io *)ctsio); 11487 break; 11488 } 11489 return (retval); 11490 } 11491 11492 const struct ctl_cmd_entry * 11493 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa) 11494 { 11495 const struct ctl_cmd_entry *entry; 11496 int service_action; 11497 11498 entry = &ctl_cmd_table[ctsio->cdb[0]]; 11499 if (sa) 11500 *sa = ((entry->flags & CTL_CMD_FLAG_SA5) != 0); 11501 if (entry->flags & CTL_CMD_FLAG_SA5) { 11502 service_action = ctsio->cdb[1] & SERVICE_ACTION_MASK; 11503 entry = &((const struct ctl_cmd_entry *) 11504 entry->execute)[service_action]; 11505 } 11506 return (entry); 11507 } 11508 11509 const struct ctl_cmd_entry * 11510 ctl_validate_command(struct ctl_scsiio *ctsio) 11511 { 11512 const struct ctl_cmd_entry *entry; 11513 int i, sa; 11514 uint8_t diff; 11515 11516 entry = ctl_get_cmd_entry(ctsio, &sa); 11517 if (entry->execute == NULL) { 11518 if (sa) 11519 ctl_set_invalid_field(ctsio, 11520 /*sks_valid*/ 1, 11521 /*command*/ 1, 11522 /*field*/ 1, 11523 /*bit_valid*/ 1, 11524 /*bit*/ 4); 11525 else 11526 ctl_set_invalid_opcode(ctsio); 11527 ctl_done((union ctl_io *)ctsio); 11528 return (NULL); 11529 } 11530 KASSERT(entry->length > 0, 11531 ("Not defined length for command 0x%02x/0x%02x", 11532 ctsio->cdb[0], ctsio->cdb[1])); 11533 for (i = 1; i < entry->length; i++) { 11534 diff = ctsio->cdb[i] & ~entry->usage[i - 1]; 11535 if (diff == 0) 11536 continue; 11537 ctl_set_invalid_field(ctsio, 11538 /*sks_valid*/ 1, 11539 /*command*/ 1, 11540 /*field*/ i, 11541 /*bit_valid*/ 1, 11542 /*bit*/ fls(diff) - 1); 11543 ctl_done((union ctl_io *)ctsio); 11544 return (NULL); 11545 } 11546 return (entry); 11547 } 11548 11549 static int 11550 ctl_cmd_applicable(uint8_t lun_type, const struct ctl_cmd_entry *entry) 11551 { 11552 11553 switch (lun_type) { 11554 case T_DIRECT: 11555 if ((entry->flags & CTL_CMD_FLAG_OK_ON_DIRECT) == 0) 11556 return (0); 11557 break; 11558 case T_PROCESSOR: 11559 if ((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0) 11560 return (0); 11561 break; 11562 case T_CDROM: 11563 if ((entry->flags & CTL_CMD_FLAG_OK_ON_CDROM) == 0) 11564 return (0); 11565 break; 11566 default: 11567 return (0); 11568 } 11569 return (1); 11570 } 11571 11572 static int 11573 ctl_scsiio(struct ctl_scsiio *ctsio) 11574 { 11575 int retval; 11576 const struct ctl_cmd_entry *entry; 11577 11578 retval = CTL_RETVAL_COMPLETE; 11579 11580 CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0])); 11581 11582 entry = ctl_get_cmd_entry(ctsio, NULL); 11583 11584 /* 11585 * If this I/O has been aborted, just send it straight to 11586 * ctl_done() without executing it. 11587 */ 11588 if (ctsio->io_hdr.flags & CTL_FLAG_ABORT) { 11589 ctl_done((union ctl_io *)ctsio); 11590 goto bailout; 11591 } 11592 11593 /* 11594 * All the checks should have been handled by ctl_scsiio_precheck(). 11595 * We should be clear now to just execute the I/O. 11596 */ 11597 retval = entry->execute(ctsio); 11598 11599 bailout: 11600 return (retval); 11601 } 11602 11603 static int 11604 ctl_target_reset(union ctl_io *io) 11605 { 11606 struct ctl_softc *softc = CTL_SOFTC(io); 11607 struct ctl_port *port = CTL_PORT(io); 11608 struct ctl_lun *lun; 11609 uint32_t initidx; 11610 ctl_ua_type ua_type; 11611 11612 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 11613 union ctl_ha_msg msg_info; 11614 11615 msg_info.hdr.nexus = io->io_hdr.nexus; 11616 msg_info.task.task_action = io->taskio.task_action; 11617 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11618 msg_info.hdr.original_sc = NULL; 11619 msg_info.hdr.serializing_sc = NULL; 11620 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11621 sizeof(msg_info.task), M_WAITOK); 11622 } 11623 11624 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11625 if (io->taskio.task_action == CTL_TASK_TARGET_RESET) 11626 ua_type = CTL_UA_TARG_RESET; 11627 else 11628 ua_type = CTL_UA_BUS_RESET; 11629 mtx_lock(&softc->ctl_lock); 11630 STAILQ_FOREACH(lun, &softc->lun_list, links) { 11631 if (port != NULL && 11632 ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 11633 continue; 11634 ctl_do_lun_reset(lun, initidx, ua_type); 11635 } 11636 mtx_unlock(&softc->ctl_lock); 11637 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11638 return (0); 11639 } 11640 11641 /* 11642 * The LUN should always be set. The I/O is optional, and is used to 11643 * distinguish between I/Os sent by this initiator, and by other 11644 * initiators. We set unit attention for initiators other than this one. 11645 * SAM-3 is vague on this point. It does say that a unit attention should 11646 * be established for other initiators when a LUN is reset (see section 11647 * 5.7.3), but it doesn't specifically say that the unit attention should 11648 * be established for this particular initiator when a LUN is reset. Here 11649 * is the relevant text, from SAM-3 rev 8: 11650 * 11651 * 5.7.2 When a SCSI initiator port aborts its own tasks 11652 * 11653 * When a SCSI initiator port causes its own task(s) to be aborted, no 11654 * notification that the task(s) have been aborted shall be returned to 11655 * the SCSI initiator port other than the completion response for the 11656 * command or task management function action that caused the task(s) to 11657 * be aborted and notification(s) associated with related effects of the 11658 * action (e.g., a reset unit attention condition). 11659 * 11660 * XXX KDM for now, we're setting unit attention for all initiators. 11661 */ 11662 static void 11663 ctl_do_lun_reset(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua_type) 11664 { 11665 union ctl_io *xio; 11666 int i; 11667 11668 mtx_lock(&lun->lun_lock); 11669 /* Abort tasks. */ 11670 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11671 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11672 xio->io_hdr.flags |= CTL_FLAG_ABORT | CTL_FLAG_ABORT_STATUS; 11673 } 11674 /* Clear CA. */ 11675 for (i = 0; i < CTL_MAX_PORTS; i++) { 11676 free(lun->pending_sense[i], M_CTL); 11677 lun->pending_sense[i] = NULL; 11678 } 11679 /* Clear reservation. */ 11680 lun->flags &= ~CTL_LUN_RESERVED; 11681 /* Clear prevent media removal. */ 11682 if (lun->prevent) { 11683 for (i = 0; i < CTL_MAX_INITIATORS; i++) 11684 ctl_clear_mask(lun->prevent, i); 11685 lun->prevent_count = 0; 11686 } 11687 /* Clear TPC status */ 11688 ctl_tpc_lun_clear(lun, -1); 11689 /* Establish UA. */ 11690 #if 0 11691 ctl_est_ua_all(lun, initidx, ua_type); 11692 #else 11693 ctl_est_ua_all(lun, -1, ua_type); 11694 #endif 11695 mtx_unlock(&lun->lun_lock); 11696 } 11697 11698 static int 11699 ctl_lun_reset(union ctl_io *io) 11700 { 11701 struct ctl_softc *softc = CTL_SOFTC(io); 11702 struct ctl_lun *lun; 11703 uint32_t targ_lun, initidx; 11704 11705 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11706 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11707 mtx_lock(&softc->ctl_lock); 11708 if (targ_lun >= CTL_MAX_LUNS || 11709 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11710 mtx_unlock(&softc->ctl_lock); 11711 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 11712 return (1); 11713 } 11714 ctl_do_lun_reset(lun, initidx, CTL_UA_LUN_RESET); 11715 mtx_unlock(&softc->ctl_lock); 11716 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11717 11718 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0) { 11719 union ctl_ha_msg msg_info; 11720 11721 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11722 msg_info.hdr.nexus = io->io_hdr.nexus; 11723 msg_info.task.task_action = CTL_TASK_LUN_RESET; 11724 msg_info.hdr.original_sc = NULL; 11725 msg_info.hdr.serializing_sc = NULL; 11726 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11727 sizeof(msg_info.task), M_WAITOK); 11728 } 11729 return (0); 11730 } 11731 11732 static void 11733 ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id, 11734 int other_sc) 11735 { 11736 union ctl_io *xio; 11737 11738 mtx_assert(&lun->lun_lock, MA_OWNED); 11739 11740 /* 11741 * Run through the OOA queue and attempt to find the given I/O. 11742 * The target port, initiator ID, tag type and tag number have to 11743 * match the values that we got from the initiator. If we have an 11744 * untagged command to abort, simply abort the first untagged command 11745 * we come to. We only allow one untagged command at a time of course. 11746 */ 11747 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11748 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11749 11750 if ((targ_port == UINT32_MAX || 11751 targ_port == xio->io_hdr.nexus.targ_port) && 11752 (init_id == UINT32_MAX || 11753 init_id == xio->io_hdr.nexus.initid)) { 11754 if (targ_port != xio->io_hdr.nexus.targ_port || 11755 init_id != xio->io_hdr.nexus.initid) 11756 xio->io_hdr.flags |= CTL_FLAG_ABORT_STATUS; 11757 xio->io_hdr.flags |= CTL_FLAG_ABORT; 11758 if (!other_sc && !(lun->flags & CTL_LUN_PRIMARY_SC)) { 11759 union ctl_ha_msg msg_info; 11760 11761 msg_info.hdr.nexus = xio->io_hdr.nexus; 11762 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 11763 msg_info.task.tag_num = xio->scsiio.tag_num; 11764 msg_info.task.tag_type = xio->scsiio.tag_type; 11765 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11766 msg_info.hdr.original_sc = NULL; 11767 msg_info.hdr.serializing_sc = NULL; 11768 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11769 sizeof(msg_info.task), M_NOWAIT); 11770 } 11771 } 11772 } 11773 } 11774 11775 static int 11776 ctl_abort_task_set(union ctl_io *io) 11777 { 11778 struct ctl_softc *softc = CTL_SOFTC(io); 11779 struct ctl_lun *lun; 11780 uint32_t targ_lun; 11781 11782 /* 11783 * Look up the LUN. 11784 */ 11785 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11786 mtx_lock(&softc->ctl_lock); 11787 if (targ_lun >= CTL_MAX_LUNS || 11788 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11789 mtx_unlock(&softc->ctl_lock); 11790 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 11791 return (1); 11792 } 11793 11794 mtx_lock(&lun->lun_lock); 11795 mtx_unlock(&softc->ctl_lock); 11796 if (io->taskio.task_action == CTL_TASK_ABORT_TASK_SET) { 11797 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, 11798 io->io_hdr.nexus.initid, 11799 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 11800 } else { /* CTL_TASK_CLEAR_TASK_SET */ 11801 ctl_abort_tasks_lun(lun, UINT32_MAX, UINT32_MAX, 11802 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 11803 } 11804 mtx_unlock(&lun->lun_lock); 11805 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11806 return (0); 11807 } 11808 11809 static void 11810 ctl_i_t_nexus_loss(struct ctl_softc *softc, uint32_t initidx, 11811 ctl_ua_type ua_type) 11812 { 11813 struct ctl_lun *lun; 11814 struct scsi_sense_data *ps; 11815 uint32_t p, i; 11816 11817 p = initidx / CTL_MAX_INIT_PER_PORT; 11818 i = initidx % CTL_MAX_INIT_PER_PORT; 11819 mtx_lock(&softc->ctl_lock); 11820 STAILQ_FOREACH(lun, &softc->lun_list, links) { 11821 mtx_lock(&lun->lun_lock); 11822 /* Abort tasks. */ 11823 ctl_abort_tasks_lun(lun, p, i, 1); 11824 /* Clear CA. */ 11825 ps = lun->pending_sense[p]; 11826 if (ps != NULL) 11827 ps[i].error_code = 0; 11828 /* Clear reservation. */ 11829 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == initidx)) 11830 lun->flags &= ~CTL_LUN_RESERVED; 11831 /* Clear prevent media removal. */ 11832 if (lun->prevent && ctl_is_set(lun->prevent, initidx)) { 11833 ctl_clear_mask(lun->prevent, initidx); 11834 lun->prevent_count--; 11835 } 11836 /* Clear TPC status */ 11837 ctl_tpc_lun_clear(lun, initidx); 11838 /* Establish UA. */ 11839 ctl_est_ua(lun, initidx, ua_type); 11840 mtx_unlock(&lun->lun_lock); 11841 } 11842 mtx_unlock(&softc->ctl_lock); 11843 } 11844 11845 static int 11846 ctl_i_t_nexus_reset(union ctl_io *io) 11847 { 11848 struct ctl_softc *softc = CTL_SOFTC(io); 11849 uint32_t initidx; 11850 11851 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 11852 union ctl_ha_msg msg_info; 11853 11854 msg_info.hdr.nexus = io->io_hdr.nexus; 11855 msg_info.task.task_action = CTL_TASK_I_T_NEXUS_RESET; 11856 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11857 msg_info.hdr.original_sc = NULL; 11858 msg_info.hdr.serializing_sc = NULL; 11859 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11860 sizeof(msg_info.task), M_WAITOK); 11861 } 11862 11863 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11864 ctl_i_t_nexus_loss(softc, initidx, CTL_UA_I_T_NEXUS_LOSS); 11865 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11866 return (0); 11867 } 11868 11869 static int 11870 ctl_abort_task(union ctl_io *io) 11871 { 11872 struct ctl_softc *softc = CTL_SOFTC(io); 11873 union ctl_io *xio; 11874 struct ctl_lun *lun; 11875 #if 0 11876 struct sbuf sb; 11877 char printbuf[128]; 11878 #endif 11879 int found; 11880 uint32_t targ_lun; 11881 11882 found = 0; 11883 11884 /* 11885 * Look up the LUN. 11886 */ 11887 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11888 mtx_lock(&softc->ctl_lock); 11889 if (targ_lun >= CTL_MAX_LUNS || 11890 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11891 mtx_unlock(&softc->ctl_lock); 11892 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 11893 return (1); 11894 } 11895 11896 #if 0 11897 printf("ctl_abort_task: called for lun %lld, tag %d type %d\n", 11898 lun->lun, io->taskio.tag_num, io->taskio.tag_type); 11899 #endif 11900 11901 mtx_lock(&lun->lun_lock); 11902 mtx_unlock(&softc->ctl_lock); 11903 /* 11904 * Run through the OOA queue and attempt to find the given I/O. 11905 * The target port, initiator ID, tag type and tag number have to 11906 * match the values that we got from the initiator. If we have an 11907 * untagged command to abort, simply abort the first untagged command 11908 * we come to. We only allow one untagged command at a time of course. 11909 */ 11910 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11911 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11912 #if 0 11913 sbuf_new(&sb, printbuf, sizeof(printbuf), SBUF_FIXEDLEN); 11914 11915 sbuf_printf(&sb, "LUN %lld tag %d type %d%s%s%s%s: ", 11916 lun->lun, xio->scsiio.tag_num, 11917 xio->scsiio.tag_type, 11918 (xio->io_hdr.blocked_links.tqe_prev 11919 == NULL) ? "" : " BLOCKED", 11920 (xio->io_hdr.flags & 11921 CTL_FLAG_DMA_INPROG) ? " DMA" : "", 11922 (xio->io_hdr.flags & 11923 CTL_FLAG_ABORT) ? " ABORT" : "", 11924 (xio->io_hdr.flags & 11925 CTL_FLAG_IS_WAS_ON_RTR ? " RTR" : "")); 11926 ctl_scsi_command_string(&xio->scsiio, NULL, &sb); 11927 sbuf_finish(&sb); 11928 printf("%s\n", sbuf_data(&sb)); 11929 #endif 11930 11931 if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port) 11932 || (xio->io_hdr.nexus.initid != io->io_hdr.nexus.initid) 11933 || (xio->io_hdr.flags & CTL_FLAG_ABORT)) 11934 continue; 11935 11936 /* 11937 * If the abort says that the task is untagged, the 11938 * task in the queue must be untagged. Otherwise, 11939 * we just check to see whether the tag numbers 11940 * match. This is because the QLogic firmware 11941 * doesn't pass back the tag type in an abort 11942 * request. 11943 */ 11944 #if 0 11945 if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED) 11946 && (io->taskio.tag_type == CTL_TAG_UNTAGGED)) 11947 || (xio->scsiio.tag_num == io->taskio.tag_num)) 11948 #endif 11949 /* 11950 * XXX KDM we've got problems with FC, because it 11951 * doesn't send down a tag type with aborts. So we 11952 * can only really go by the tag number... 11953 * This may cause problems with parallel SCSI. 11954 * Need to figure that out!! 11955 */ 11956 if (xio->scsiio.tag_num == io->taskio.tag_num) { 11957 xio->io_hdr.flags |= CTL_FLAG_ABORT; 11958 found = 1; 11959 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0 && 11960 !(lun->flags & CTL_LUN_PRIMARY_SC)) { 11961 union ctl_ha_msg msg_info; 11962 11963 msg_info.hdr.nexus = io->io_hdr.nexus; 11964 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 11965 msg_info.task.tag_num = io->taskio.tag_num; 11966 msg_info.task.tag_type = io->taskio.tag_type; 11967 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11968 msg_info.hdr.original_sc = NULL; 11969 msg_info.hdr.serializing_sc = NULL; 11970 #if 0 11971 printf("Sent Abort to other side\n"); 11972 #endif 11973 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11974 sizeof(msg_info.task), M_NOWAIT); 11975 } 11976 #if 0 11977 printf("ctl_abort_task: found I/O to abort\n"); 11978 #endif 11979 } 11980 } 11981 mtx_unlock(&lun->lun_lock); 11982 11983 if (found == 0) { 11984 /* 11985 * This isn't really an error. It's entirely possible for 11986 * the abort and command completion to cross on the wire. 11987 * This is more of an informative/diagnostic error. 11988 */ 11989 #if 0 11990 printf("ctl_abort_task: ABORT sent for nonexistent I/O: " 11991 "%u:%u:%u tag %d type %d\n", 11992 io->io_hdr.nexus.initid, 11993 io->io_hdr.nexus.targ_port, 11994 io->io_hdr.nexus.targ_lun, io->taskio.tag_num, 11995 io->taskio.tag_type); 11996 #endif 11997 } 11998 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11999 return (0); 12000 } 12001 12002 static int 12003 ctl_query_task(union ctl_io *io, int task_set) 12004 { 12005 struct ctl_softc *softc = CTL_SOFTC(io); 12006 union ctl_io *xio; 12007 struct ctl_lun *lun; 12008 int found = 0; 12009 uint32_t targ_lun; 12010 12011 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12012 mtx_lock(&softc->ctl_lock); 12013 if (targ_lun >= CTL_MAX_LUNS || 12014 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12015 mtx_unlock(&softc->ctl_lock); 12016 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 12017 return (1); 12018 } 12019 mtx_lock(&lun->lun_lock); 12020 mtx_unlock(&softc->ctl_lock); 12021 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 12022 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 12023 12024 if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port) 12025 || (xio->io_hdr.nexus.initid != io->io_hdr.nexus.initid) 12026 || (xio->io_hdr.flags & CTL_FLAG_ABORT)) 12027 continue; 12028 12029 if (task_set || xio->scsiio.tag_num == io->taskio.tag_num) { 12030 found = 1; 12031 break; 12032 } 12033 } 12034 mtx_unlock(&lun->lun_lock); 12035 if (found) 12036 io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED; 12037 else 12038 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12039 return (0); 12040 } 12041 12042 static int 12043 ctl_query_async_event(union ctl_io *io) 12044 { 12045 struct ctl_softc *softc = CTL_SOFTC(io); 12046 struct ctl_lun *lun; 12047 ctl_ua_type ua; 12048 uint32_t targ_lun, initidx; 12049 12050 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12051 mtx_lock(&softc->ctl_lock); 12052 if (targ_lun >= CTL_MAX_LUNS || 12053 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12054 mtx_unlock(&softc->ctl_lock); 12055 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 12056 return (1); 12057 } 12058 mtx_lock(&lun->lun_lock); 12059 mtx_unlock(&softc->ctl_lock); 12060 initidx = ctl_get_initindex(&io->io_hdr.nexus); 12061 ua = ctl_build_qae(lun, initidx, io->taskio.task_resp); 12062 mtx_unlock(&lun->lun_lock); 12063 if (ua != CTL_UA_NONE) 12064 io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED; 12065 else 12066 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12067 return (0); 12068 } 12069 12070 static void 12071 ctl_run_task(union ctl_io *io) 12072 { 12073 int retval = 1; 12074 12075 CTL_DEBUG_PRINT(("ctl_run_task\n")); 12076 KASSERT(io->io_hdr.io_type == CTL_IO_TASK, 12077 ("ctl_run_task: Unextected io_type %d\n", io->io_hdr.io_type)); 12078 io->taskio.task_status = CTL_TASK_FUNCTION_NOT_SUPPORTED; 12079 bzero(io->taskio.task_resp, sizeof(io->taskio.task_resp)); 12080 switch (io->taskio.task_action) { 12081 case CTL_TASK_ABORT_TASK: 12082 retval = ctl_abort_task(io); 12083 break; 12084 case CTL_TASK_ABORT_TASK_SET: 12085 case CTL_TASK_CLEAR_TASK_SET: 12086 retval = ctl_abort_task_set(io); 12087 break; 12088 case CTL_TASK_CLEAR_ACA: 12089 break; 12090 case CTL_TASK_I_T_NEXUS_RESET: 12091 retval = ctl_i_t_nexus_reset(io); 12092 break; 12093 case CTL_TASK_LUN_RESET: 12094 retval = ctl_lun_reset(io); 12095 break; 12096 case CTL_TASK_TARGET_RESET: 12097 case CTL_TASK_BUS_RESET: 12098 retval = ctl_target_reset(io); 12099 break; 12100 case CTL_TASK_PORT_LOGIN: 12101 break; 12102 case CTL_TASK_PORT_LOGOUT: 12103 break; 12104 case CTL_TASK_QUERY_TASK: 12105 retval = ctl_query_task(io, 0); 12106 break; 12107 case CTL_TASK_QUERY_TASK_SET: 12108 retval = ctl_query_task(io, 1); 12109 break; 12110 case CTL_TASK_QUERY_ASYNC_EVENT: 12111 retval = ctl_query_async_event(io); 12112 break; 12113 default: 12114 printf("%s: got unknown task management event %d\n", 12115 __func__, io->taskio.task_action); 12116 break; 12117 } 12118 if (retval == 0) 12119 io->io_hdr.status = CTL_SUCCESS; 12120 else 12121 io->io_hdr.status = CTL_ERROR; 12122 ctl_done(io); 12123 } 12124 12125 /* 12126 * For HA operation. Handle commands that come in from the other 12127 * controller. 12128 */ 12129 static void 12130 ctl_handle_isc(union ctl_io *io) 12131 { 12132 struct ctl_softc *softc = CTL_SOFTC(io); 12133 struct ctl_lun *lun; 12134 const struct ctl_cmd_entry *entry; 12135 uint32_t targ_lun; 12136 12137 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12138 switch (io->io_hdr.msg_type) { 12139 case CTL_MSG_SERIALIZE: 12140 ctl_serialize_other_sc_cmd(&io->scsiio); 12141 break; 12142 case CTL_MSG_R2R: /* Only used in SER_ONLY mode. */ 12143 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 12144 if (targ_lun >= CTL_MAX_LUNS || 12145 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12146 ctl_done(io); 12147 break; 12148 } 12149 mtx_lock(&lun->lun_lock); 12150 if (ctl_scsiio_lun_check(lun, entry, &io->scsiio) != 0) { 12151 mtx_unlock(&lun->lun_lock); 12152 ctl_done(io); 12153 break; 12154 } 12155 io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 12156 mtx_unlock(&lun->lun_lock); 12157 ctl_enqueue_rtr(io); 12158 break; 12159 case CTL_MSG_FINISH_IO: 12160 if (softc->ha_mode == CTL_HA_MODE_XFER) { 12161 ctl_done(io); 12162 break; 12163 } 12164 if (targ_lun >= CTL_MAX_LUNS || 12165 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12166 ctl_free_io(io); 12167 break; 12168 } 12169 mtx_lock(&lun->lun_lock); 12170 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); 12171 ctl_check_blocked(lun); 12172 mtx_unlock(&lun->lun_lock); 12173 ctl_free_io(io); 12174 break; 12175 case CTL_MSG_PERS_ACTION: 12176 ctl_hndl_per_res_out_on_other_sc(io); 12177 ctl_free_io(io); 12178 break; 12179 case CTL_MSG_BAD_JUJU: 12180 ctl_done(io); 12181 break; 12182 case CTL_MSG_DATAMOVE: /* Only used in XFER mode */ 12183 ctl_datamove_remote(io); 12184 break; 12185 case CTL_MSG_DATAMOVE_DONE: /* Only used in XFER mode */ 12186 io->scsiio.be_move_done(io); 12187 break; 12188 case CTL_MSG_FAILOVER: 12189 ctl_failover_lun(io); 12190 ctl_free_io(io); 12191 break; 12192 default: 12193 printf("%s: Invalid message type %d\n", 12194 __func__, io->io_hdr.msg_type); 12195 ctl_free_io(io); 12196 break; 12197 } 12198 12199 } 12200 12201 12202 /* 12203 * Returns the match type in the case of a match, or CTL_LUN_PAT_NONE if 12204 * there is no match. 12205 */ 12206 static ctl_lun_error_pattern 12207 ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc) 12208 { 12209 const struct ctl_cmd_entry *entry; 12210 ctl_lun_error_pattern filtered_pattern, pattern; 12211 12212 pattern = desc->error_pattern; 12213 12214 /* 12215 * XXX KDM we need more data passed into this function to match a 12216 * custom pattern, and we actually need to implement custom pattern 12217 * matching. 12218 */ 12219 if (pattern & CTL_LUN_PAT_CMD) 12220 return (CTL_LUN_PAT_CMD); 12221 12222 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY) 12223 return (CTL_LUN_PAT_ANY); 12224 12225 entry = ctl_get_cmd_entry(ctsio, NULL); 12226 12227 filtered_pattern = entry->pattern & pattern; 12228 12229 /* 12230 * If the user requested specific flags in the pattern (e.g. 12231 * CTL_LUN_PAT_RANGE), make sure the command supports all of those 12232 * flags. 12233 * 12234 * If the user did not specify any flags, it doesn't matter whether 12235 * or not the command supports the flags. 12236 */ 12237 if ((filtered_pattern & ~CTL_LUN_PAT_MASK) != 12238 (pattern & ~CTL_LUN_PAT_MASK)) 12239 return (CTL_LUN_PAT_NONE); 12240 12241 /* 12242 * If the user asked for a range check, see if the requested LBA 12243 * range overlaps with this command's LBA range. 12244 */ 12245 if (filtered_pattern & CTL_LUN_PAT_RANGE) { 12246 uint64_t lba1; 12247 uint64_t len1; 12248 ctl_action action; 12249 int retval; 12250 12251 retval = ctl_get_lba_len((union ctl_io *)ctsio, &lba1, &len1); 12252 if (retval != 0) 12253 return (CTL_LUN_PAT_NONE); 12254 12255 action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba, 12256 desc->lba_range.len, FALSE); 12257 /* 12258 * A "pass" means that the LBA ranges don't overlap, so 12259 * this doesn't match the user's range criteria. 12260 */ 12261 if (action == CTL_ACTION_PASS) 12262 return (CTL_LUN_PAT_NONE); 12263 } 12264 12265 return (filtered_pattern); 12266 } 12267 12268 static void 12269 ctl_inject_error(struct ctl_lun *lun, union ctl_io *io) 12270 { 12271 struct ctl_error_desc *desc, *desc2; 12272 12273 mtx_assert(&lun->lun_lock, MA_OWNED); 12274 12275 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 12276 ctl_lun_error_pattern pattern; 12277 /* 12278 * Check to see whether this particular command matches 12279 * the pattern in the descriptor. 12280 */ 12281 pattern = ctl_cmd_pattern_match(&io->scsiio, desc); 12282 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_NONE) 12283 continue; 12284 12285 switch (desc->lun_error & CTL_LUN_INJ_TYPE) { 12286 case CTL_LUN_INJ_ABORTED: 12287 ctl_set_aborted(&io->scsiio); 12288 break; 12289 case CTL_LUN_INJ_MEDIUM_ERR: 12290 ctl_set_medium_error(&io->scsiio, 12291 (io->io_hdr.flags & CTL_FLAG_DATA_MASK) != 12292 CTL_FLAG_DATA_OUT); 12293 break; 12294 case CTL_LUN_INJ_UA: 12295 /* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET 12296 * OCCURRED */ 12297 ctl_set_ua(&io->scsiio, 0x29, 0x00); 12298 break; 12299 case CTL_LUN_INJ_CUSTOM: 12300 /* 12301 * We're assuming the user knows what he is doing. 12302 * Just copy the sense information without doing 12303 * checks. 12304 */ 12305 bcopy(&desc->custom_sense, &io->scsiio.sense_data, 12306 MIN(sizeof(desc->custom_sense), 12307 sizeof(io->scsiio.sense_data))); 12308 io->scsiio.scsi_status = SCSI_STATUS_CHECK_COND; 12309 io->scsiio.sense_len = SSD_FULL_SIZE; 12310 io->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 12311 break; 12312 case CTL_LUN_INJ_NONE: 12313 default: 12314 /* 12315 * If this is an error injection type we don't know 12316 * about, clear the continuous flag (if it is set) 12317 * so it will get deleted below. 12318 */ 12319 desc->lun_error &= ~CTL_LUN_INJ_CONTINUOUS; 12320 break; 12321 } 12322 /* 12323 * By default, each error injection action is a one-shot 12324 */ 12325 if (desc->lun_error & CTL_LUN_INJ_CONTINUOUS) 12326 continue; 12327 12328 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links); 12329 12330 free(desc, M_CTL); 12331 } 12332 } 12333 12334 #ifdef CTL_IO_DELAY 12335 static void 12336 ctl_datamove_timer_wakeup(void *arg) 12337 { 12338 union ctl_io *io; 12339 12340 io = (union ctl_io *)arg; 12341 12342 ctl_datamove(io); 12343 } 12344 #endif /* CTL_IO_DELAY */ 12345 12346 void 12347 ctl_datamove(union ctl_io *io) 12348 { 12349 void (*fe_datamove)(union ctl_io *io); 12350 12351 mtx_assert(&((struct ctl_softc *)CTL_SOFTC(io))->ctl_lock, MA_NOTOWNED); 12352 12353 CTL_DEBUG_PRINT(("ctl_datamove\n")); 12354 12355 /* No data transferred yet. Frontend must update this when done. */ 12356 io->scsiio.kern_data_resid = io->scsiio.kern_data_len; 12357 12358 #ifdef CTL_TIME_IO 12359 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 12360 char str[256]; 12361 char path_str[64]; 12362 struct sbuf sb; 12363 12364 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 12365 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12366 12367 sbuf_cat(&sb, path_str); 12368 switch (io->io_hdr.io_type) { 12369 case CTL_IO_SCSI: 12370 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 12371 sbuf_printf(&sb, "\n"); 12372 sbuf_cat(&sb, path_str); 12373 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12374 io->scsiio.tag_num, io->scsiio.tag_type); 12375 break; 12376 case CTL_IO_TASK: 12377 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 12378 "Tag Type: %d\n", io->taskio.task_action, 12379 io->taskio.tag_num, io->taskio.tag_type); 12380 break; 12381 default: 12382 panic("%s: Invalid CTL I/O type %d\n", 12383 __func__, io->io_hdr.io_type); 12384 } 12385 sbuf_cat(&sb, path_str); 12386 sbuf_printf(&sb, "ctl_datamove: %jd seconds\n", 12387 (intmax_t)time_uptime - io->io_hdr.start_time); 12388 sbuf_finish(&sb); 12389 printf("%s", sbuf_data(&sb)); 12390 } 12391 #endif /* CTL_TIME_IO */ 12392 12393 #ifdef CTL_IO_DELAY 12394 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 12395 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 12396 } else { 12397 struct ctl_lun *lun; 12398 12399 lun = CTL_LUN(io); 12400 if ((lun != NULL) 12401 && (lun->delay_info.datamove_delay > 0)) { 12402 12403 callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1); 12404 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 12405 callout_reset(&io->io_hdr.delay_callout, 12406 lun->delay_info.datamove_delay * hz, 12407 ctl_datamove_timer_wakeup, io); 12408 if (lun->delay_info.datamove_type == 12409 CTL_DELAY_TYPE_ONESHOT) 12410 lun->delay_info.datamove_delay = 0; 12411 return; 12412 } 12413 } 12414 #endif 12415 12416 /* 12417 * This command has been aborted. Set the port status, so we fail 12418 * the data move. 12419 */ 12420 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12421 printf("ctl_datamove: tag 0x%04x on (%u:%u:%u) aborted\n", 12422 io->scsiio.tag_num, io->io_hdr.nexus.initid, 12423 io->io_hdr.nexus.targ_port, 12424 io->io_hdr.nexus.targ_lun); 12425 io->io_hdr.port_status = 31337; 12426 /* 12427 * Note that the backend, in this case, will get the 12428 * callback in its context. In other cases it may get 12429 * called in the frontend's interrupt thread context. 12430 */ 12431 io->scsiio.be_move_done(io); 12432 return; 12433 } 12434 12435 /* Don't confuse frontend with zero length data move. */ 12436 if (io->scsiio.kern_data_len == 0) { 12437 io->scsiio.be_move_done(io); 12438 return; 12439 } 12440 12441 fe_datamove = CTL_PORT(io)->fe_datamove; 12442 fe_datamove(io); 12443 } 12444 12445 static void 12446 ctl_send_datamove_done(union ctl_io *io, int have_lock) 12447 { 12448 union ctl_ha_msg msg; 12449 #ifdef CTL_TIME_IO 12450 struct bintime cur_bt; 12451 #endif 12452 12453 memset(&msg, 0, sizeof(msg)); 12454 msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 12455 msg.hdr.original_sc = io; 12456 msg.hdr.serializing_sc = io->io_hdr.serializing_sc; 12457 msg.hdr.nexus = io->io_hdr.nexus; 12458 msg.hdr.status = io->io_hdr.status; 12459 msg.scsi.kern_data_resid = io->scsiio.kern_data_resid; 12460 msg.scsi.tag_num = io->scsiio.tag_num; 12461 msg.scsi.tag_type = io->scsiio.tag_type; 12462 msg.scsi.scsi_status = io->scsiio.scsi_status; 12463 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 12464 io->scsiio.sense_len); 12465 msg.scsi.sense_len = io->scsiio.sense_len; 12466 msg.scsi.port_status = io->io_hdr.port_status; 12467 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12468 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12469 ctl_failover_io(io, /*have_lock*/ have_lock); 12470 return; 12471 } 12472 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 12473 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + 12474 msg.scsi.sense_len, M_WAITOK); 12475 12476 #ifdef CTL_TIME_IO 12477 getbinuptime(&cur_bt); 12478 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); 12479 bintime_add(&io->io_hdr.dma_bt, &cur_bt); 12480 #endif 12481 io->io_hdr.num_dmas++; 12482 } 12483 12484 /* 12485 * The DMA to the remote side is done, now we need to tell the other side 12486 * we're done so it can continue with its data movement. 12487 */ 12488 static void 12489 ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq) 12490 { 12491 union ctl_io *io; 12492 uint32_t i; 12493 12494 io = rq->context; 12495 12496 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12497 printf("%s: ISC DMA write failed with error %d", __func__, 12498 rq->ret); 12499 ctl_set_internal_failure(&io->scsiio, 12500 /*sks_valid*/ 1, 12501 /*retry_count*/ rq->ret); 12502 } 12503 12504 ctl_dt_req_free(rq); 12505 12506 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12507 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12508 free(io->io_hdr.remote_sglist, M_CTL); 12509 io->io_hdr.remote_sglist = NULL; 12510 io->io_hdr.local_sglist = NULL; 12511 12512 /* 12513 * The data is in local and remote memory, so now we need to send 12514 * status (good or back) back to the other side. 12515 */ 12516 ctl_send_datamove_done(io, /*have_lock*/ 0); 12517 } 12518 12519 /* 12520 * We've moved the data from the host/controller into local memory. Now we 12521 * need to push it over to the remote controller's memory. 12522 */ 12523 static int 12524 ctl_datamove_remote_dm_write_cb(union ctl_io *io) 12525 { 12526 int retval; 12527 12528 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_WRITE, 12529 ctl_datamove_remote_write_cb); 12530 return (retval); 12531 } 12532 12533 static void 12534 ctl_datamove_remote_write(union ctl_io *io) 12535 { 12536 int retval; 12537 void (*fe_datamove)(union ctl_io *io); 12538 12539 /* 12540 * - Get the data from the host/HBA into local memory. 12541 * - DMA memory from the local controller to the remote controller. 12542 * - Send status back to the remote controller. 12543 */ 12544 12545 retval = ctl_datamove_remote_sgl_setup(io); 12546 if (retval != 0) 12547 return; 12548 12549 /* Switch the pointer over so the FETD knows what to do */ 12550 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 12551 12552 /* 12553 * Use a custom move done callback, since we need to send completion 12554 * back to the other controller, not to the backend on this side. 12555 */ 12556 io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb; 12557 12558 fe_datamove = CTL_PORT(io)->fe_datamove; 12559 fe_datamove(io); 12560 } 12561 12562 static int 12563 ctl_datamove_remote_dm_read_cb(union ctl_io *io) 12564 { 12565 #if 0 12566 char str[256]; 12567 char path_str[64]; 12568 struct sbuf sb; 12569 #endif 12570 uint32_t i; 12571 12572 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12573 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12574 free(io->io_hdr.remote_sglist, M_CTL); 12575 io->io_hdr.remote_sglist = NULL; 12576 io->io_hdr.local_sglist = NULL; 12577 12578 #if 0 12579 scsi_path_string(io, path_str, sizeof(path_str)); 12580 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12581 sbuf_cat(&sb, path_str); 12582 scsi_command_string(&io->scsiio, NULL, &sb); 12583 sbuf_printf(&sb, "\n"); 12584 sbuf_cat(&sb, path_str); 12585 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12586 io->scsiio.tag_num, io->scsiio.tag_type); 12587 sbuf_cat(&sb, path_str); 12588 sbuf_printf(&sb, "%s: flags %#x, status %#x\n", __func__, 12589 io->io_hdr.flags, io->io_hdr.status); 12590 sbuf_finish(&sb); 12591 printk("%s", sbuf_data(&sb)); 12592 #endif 12593 12594 12595 /* 12596 * The read is done, now we need to send status (good or bad) back 12597 * to the other side. 12598 */ 12599 ctl_send_datamove_done(io, /*have_lock*/ 0); 12600 12601 return (0); 12602 } 12603 12604 static void 12605 ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq) 12606 { 12607 union ctl_io *io; 12608 void (*fe_datamove)(union ctl_io *io); 12609 12610 io = rq->context; 12611 12612 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12613 printf("%s: ISC DMA read failed with error %d\n", __func__, 12614 rq->ret); 12615 ctl_set_internal_failure(&io->scsiio, 12616 /*sks_valid*/ 1, 12617 /*retry_count*/ rq->ret); 12618 } 12619 12620 ctl_dt_req_free(rq); 12621 12622 /* Switch the pointer over so the FETD knows what to do */ 12623 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 12624 12625 /* 12626 * Use a custom move done callback, since we need to send completion 12627 * back to the other controller, not to the backend on this side. 12628 */ 12629 io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb; 12630 12631 /* XXX KDM add checks like the ones in ctl_datamove? */ 12632 12633 fe_datamove = CTL_PORT(io)->fe_datamove; 12634 fe_datamove(io); 12635 } 12636 12637 static int 12638 ctl_datamove_remote_sgl_setup(union ctl_io *io) 12639 { 12640 struct ctl_sg_entry *local_sglist; 12641 uint32_t len_to_go; 12642 int retval; 12643 int i; 12644 12645 retval = 0; 12646 local_sglist = io->io_hdr.local_sglist; 12647 len_to_go = io->scsiio.kern_data_len; 12648 12649 /* 12650 * The difficult thing here is that the size of the various 12651 * S/G segments may be different than the size from the 12652 * remote controller. That'll make it harder when DMAing 12653 * the data back to the other side. 12654 */ 12655 for (i = 0; len_to_go > 0; i++) { 12656 local_sglist[i].len = MIN(len_to_go, CTL_HA_DATAMOVE_SEGMENT); 12657 local_sglist[i].addr = 12658 malloc(local_sglist[i].len, M_CTL, M_WAITOK); 12659 12660 len_to_go -= local_sglist[i].len; 12661 } 12662 /* 12663 * Reset the number of S/G entries accordingly. The original 12664 * number of S/G entries is available in rem_sg_entries. 12665 */ 12666 io->scsiio.kern_sg_entries = i; 12667 12668 #if 0 12669 printf("%s: kern_sg_entries = %d\n", __func__, 12670 io->scsiio.kern_sg_entries); 12671 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12672 printf("%s: sg[%d] = %p, %lu\n", __func__, i, 12673 local_sglist[i].addr, local_sglist[i].len); 12674 #endif 12675 12676 return (retval); 12677 } 12678 12679 static int 12680 ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 12681 ctl_ha_dt_cb callback) 12682 { 12683 struct ctl_ha_dt_req *rq; 12684 struct ctl_sg_entry *remote_sglist, *local_sglist; 12685 uint32_t local_used, remote_used, total_used; 12686 int i, j, isc_ret; 12687 12688 rq = ctl_dt_req_alloc(); 12689 12690 /* 12691 * If we failed to allocate the request, and if the DMA didn't fail 12692 * anyway, set busy status. This is just a resource allocation 12693 * failure. 12694 */ 12695 if ((rq == NULL) 12696 && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 12697 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) 12698 ctl_set_busy(&io->scsiio); 12699 12700 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 12701 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) { 12702 12703 if (rq != NULL) 12704 ctl_dt_req_free(rq); 12705 12706 /* 12707 * The data move failed. We need to return status back 12708 * to the other controller. No point in trying to DMA 12709 * data to the remote controller. 12710 */ 12711 12712 ctl_send_datamove_done(io, /*have_lock*/ 0); 12713 12714 return (1); 12715 } 12716 12717 local_sglist = io->io_hdr.local_sglist; 12718 remote_sglist = io->io_hdr.remote_sglist; 12719 local_used = 0; 12720 remote_used = 0; 12721 total_used = 0; 12722 12723 /* 12724 * Pull/push the data over the wire from/to the other controller. 12725 * This takes into account the possibility that the local and 12726 * remote sglists may not be identical in terms of the size of 12727 * the elements and the number of elements. 12728 * 12729 * One fundamental assumption here is that the length allocated for 12730 * both the local and remote sglists is identical. Otherwise, we've 12731 * essentially got a coding error of some sort. 12732 */ 12733 isc_ret = CTL_HA_STATUS_SUCCESS; 12734 for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) { 12735 uint32_t cur_len; 12736 uint8_t *tmp_ptr; 12737 12738 rq->command = command; 12739 rq->context = io; 12740 12741 /* 12742 * Both pointers should be aligned. But it is possible 12743 * that the allocation length is not. They should both 12744 * also have enough slack left over at the end, though, 12745 * to round up to the next 8 byte boundary. 12746 */ 12747 cur_len = MIN(local_sglist[i].len - local_used, 12748 remote_sglist[j].len - remote_used); 12749 rq->size = cur_len; 12750 12751 tmp_ptr = (uint8_t *)local_sglist[i].addr; 12752 tmp_ptr += local_used; 12753 12754 #if 0 12755 /* Use physical addresses when talking to ISC hardware */ 12756 if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) { 12757 /* XXX KDM use busdma */ 12758 rq->local = vtophys(tmp_ptr); 12759 } else 12760 rq->local = tmp_ptr; 12761 #else 12762 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, 12763 ("HA does not support BUS_ADDR")); 12764 rq->local = tmp_ptr; 12765 #endif 12766 12767 tmp_ptr = (uint8_t *)remote_sglist[j].addr; 12768 tmp_ptr += remote_used; 12769 rq->remote = tmp_ptr; 12770 12771 rq->callback = NULL; 12772 12773 local_used += cur_len; 12774 if (local_used >= local_sglist[i].len) { 12775 i++; 12776 local_used = 0; 12777 } 12778 12779 remote_used += cur_len; 12780 if (remote_used >= remote_sglist[j].len) { 12781 j++; 12782 remote_used = 0; 12783 } 12784 total_used += cur_len; 12785 12786 if (total_used >= io->scsiio.kern_data_len) 12787 rq->callback = callback; 12788 12789 #if 0 12790 printf("%s: %s: local %p remote %p size %d\n", __func__, 12791 (command == CTL_HA_DT_CMD_WRITE) ? "WRITE" : "READ", 12792 rq->local, rq->remote, rq->size); 12793 #endif 12794 12795 isc_ret = ctl_dt_single(rq); 12796 if (isc_ret > CTL_HA_STATUS_SUCCESS) 12797 break; 12798 } 12799 if (isc_ret != CTL_HA_STATUS_WAIT) { 12800 rq->ret = isc_ret; 12801 callback(rq); 12802 } 12803 12804 return (0); 12805 } 12806 12807 static void 12808 ctl_datamove_remote_read(union ctl_io *io) 12809 { 12810 int retval; 12811 uint32_t i; 12812 12813 /* 12814 * This will send an error to the other controller in the case of a 12815 * failure. 12816 */ 12817 retval = ctl_datamove_remote_sgl_setup(io); 12818 if (retval != 0) 12819 return; 12820 12821 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ, 12822 ctl_datamove_remote_read_cb); 12823 if (retval != 0) { 12824 /* 12825 * Make sure we free memory if there was an error.. The 12826 * ctl_datamove_remote_xfer() function will send the 12827 * datamove done message, or call the callback with an 12828 * error if there is a problem. 12829 */ 12830 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12831 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12832 free(io->io_hdr.remote_sglist, M_CTL); 12833 io->io_hdr.remote_sglist = NULL; 12834 io->io_hdr.local_sglist = NULL; 12835 } 12836 } 12837 12838 /* 12839 * Process a datamove request from the other controller. This is used for 12840 * XFER mode only, not SER_ONLY mode. For writes, we DMA into local memory 12841 * first. Once that is complete, the data gets DMAed into the remote 12842 * controller's memory. For reads, we DMA from the remote controller's 12843 * memory into our memory first, and then move it out to the FETD. 12844 */ 12845 static void 12846 ctl_datamove_remote(union ctl_io *io) 12847 { 12848 12849 mtx_assert(&((struct ctl_softc *)CTL_SOFTC(io))->ctl_lock, MA_NOTOWNED); 12850 12851 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12852 ctl_failover_io(io, /*have_lock*/ 0); 12853 return; 12854 } 12855 12856 /* 12857 * Note that we look for an aborted I/O here, but don't do some of 12858 * the other checks that ctl_datamove() normally does. 12859 * We don't need to run the datamove delay code, since that should 12860 * have been done if need be on the other controller. 12861 */ 12862 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12863 printf("%s: tag 0x%04x on (%u:%u:%u) aborted\n", __func__, 12864 io->scsiio.tag_num, io->io_hdr.nexus.initid, 12865 io->io_hdr.nexus.targ_port, 12866 io->io_hdr.nexus.targ_lun); 12867 io->io_hdr.port_status = 31338; 12868 ctl_send_datamove_done(io, /*have_lock*/ 0); 12869 return; 12870 } 12871 12872 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) 12873 ctl_datamove_remote_write(io); 12874 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) 12875 ctl_datamove_remote_read(io); 12876 else { 12877 io->io_hdr.port_status = 31339; 12878 ctl_send_datamove_done(io, /*have_lock*/ 0); 12879 } 12880 } 12881 12882 static void 12883 ctl_process_done(union ctl_io *io) 12884 { 12885 struct ctl_softc *softc = CTL_SOFTC(io); 12886 struct ctl_port *port = CTL_PORT(io); 12887 struct ctl_lun *lun = CTL_LUN(io); 12888 void (*fe_done)(union ctl_io *io); 12889 union ctl_ha_msg msg; 12890 12891 CTL_DEBUG_PRINT(("ctl_process_done\n")); 12892 fe_done = port->fe_done; 12893 12894 #ifdef CTL_TIME_IO 12895 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 12896 char str[256]; 12897 char path_str[64]; 12898 struct sbuf sb; 12899 12900 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 12901 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12902 12903 sbuf_cat(&sb, path_str); 12904 switch (io->io_hdr.io_type) { 12905 case CTL_IO_SCSI: 12906 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 12907 sbuf_printf(&sb, "\n"); 12908 sbuf_cat(&sb, path_str); 12909 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12910 io->scsiio.tag_num, io->scsiio.tag_type); 12911 break; 12912 case CTL_IO_TASK: 12913 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 12914 "Tag Type: %d\n", io->taskio.task_action, 12915 io->taskio.tag_num, io->taskio.tag_type); 12916 break; 12917 default: 12918 panic("%s: Invalid CTL I/O type %d\n", 12919 __func__, io->io_hdr.io_type); 12920 } 12921 sbuf_cat(&sb, path_str); 12922 sbuf_printf(&sb, "ctl_process_done: %jd seconds\n", 12923 (intmax_t)time_uptime - io->io_hdr.start_time); 12924 sbuf_finish(&sb); 12925 printf("%s", sbuf_data(&sb)); 12926 } 12927 #endif /* CTL_TIME_IO */ 12928 12929 switch (io->io_hdr.io_type) { 12930 case CTL_IO_SCSI: 12931 break; 12932 case CTL_IO_TASK: 12933 if (ctl_debug & CTL_DEBUG_INFO) 12934 ctl_io_error_print(io, NULL); 12935 fe_done(io); 12936 return; 12937 default: 12938 panic("%s: Invalid CTL I/O type %d\n", 12939 __func__, io->io_hdr.io_type); 12940 } 12941 12942 if (lun == NULL) { 12943 CTL_DEBUG_PRINT(("NULL LUN for lun %d\n", 12944 io->io_hdr.nexus.targ_mapped_lun)); 12945 goto bailout; 12946 } 12947 12948 mtx_lock(&lun->lun_lock); 12949 12950 /* 12951 * Check to see if we have any informational exception and status 12952 * of this command can be modified to report it in form of either 12953 * RECOVERED ERROR or NO SENSE, depending on MRIE mode page field. 12954 */ 12955 if (lun->ie_reported == 0 && lun->ie_asc != 0 && 12956 io->io_hdr.status == CTL_SUCCESS && 12957 (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0) { 12958 uint8_t mrie = lun->MODE_IE.mrie; 12959 uint8_t per = ((lun->MODE_RWER.byte3 & SMS_RWER_PER) || 12960 (lun->MODE_VER.byte3 & SMS_VER_PER)); 12961 if (((mrie == SIEP_MRIE_REC_COND && per) || 12962 mrie == SIEP_MRIE_REC_UNCOND || 12963 mrie == SIEP_MRIE_NO_SENSE) && 12964 (ctl_get_cmd_entry(&io->scsiio, NULL)->flags & 12965 CTL_CMD_FLAG_NO_SENSE) == 0) { 12966 ctl_set_sense(&io->scsiio, 12967 /*current_error*/ 1, 12968 /*sense_key*/ (mrie == SIEP_MRIE_NO_SENSE) ? 12969 SSD_KEY_NO_SENSE : SSD_KEY_RECOVERED_ERROR, 12970 /*asc*/ lun->ie_asc, 12971 /*ascq*/ lun->ie_ascq, 12972 SSD_ELEM_NONE); 12973 lun->ie_reported = 1; 12974 } 12975 } else if (lun->ie_reported < 0) 12976 lun->ie_reported = 0; 12977 12978 /* 12979 * Check to see if we have any errors to inject here. We only 12980 * inject errors for commands that don't already have errors set. 12981 */ 12982 if (!STAILQ_EMPTY(&lun->error_list) && 12983 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) && 12984 ((io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0)) 12985 ctl_inject_error(lun, io); 12986 12987 /* 12988 * XXX KDM how do we treat commands that aren't completed 12989 * successfully? 12990 * 12991 * XXX KDM should we also track I/O latency? 12992 */ 12993 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS && 12994 io->io_hdr.io_type == CTL_IO_SCSI) { 12995 int type; 12996 #ifdef CTL_TIME_IO 12997 struct bintime bt; 12998 12999 getbinuptime(&bt); 13000 bintime_sub(&bt, &io->io_hdr.start_bt); 13001 #endif 13002 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13003 CTL_FLAG_DATA_IN) 13004 type = CTL_STATS_READ; 13005 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13006 CTL_FLAG_DATA_OUT) 13007 type = CTL_STATS_WRITE; 13008 else 13009 type = CTL_STATS_NO_IO; 13010 13011 #ifdef CTL_LEGACY_STATS 13012 uint32_t targ_port = port->targ_port; 13013 lun->legacy_stats.ports[targ_port].bytes[type] += 13014 io->scsiio.kern_total_len; 13015 lun->legacy_stats.ports[targ_port].operations[type] ++; 13016 lun->legacy_stats.ports[targ_port].num_dmas[type] += 13017 io->io_hdr.num_dmas; 13018 #ifdef CTL_TIME_IO 13019 bintime_add(&lun->legacy_stats.ports[targ_port].dma_time[type], 13020 &io->io_hdr.dma_bt); 13021 bintime_add(&lun->legacy_stats.ports[targ_port].time[type], 13022 &bt); 13023 #endif 13024 #endif /* CTL_LEGACY_STATS */ 13025 13026 lun->stats.bytes[type] += io->scsiio.kern_total_len; 13027 lun->stats.operations[type] ++; 13028 lun->stats.dmas[type] += io->io_hdr.num_dmas; 13029 #ifdef CTL_TIME_IO 13030 bintime_add(&lun->stats.dma_time[type], &io->io_hdr.dma_bt); 13031 bintime_add(&lun->stats.time[type], &bt); 13032 #endif 13033 13034 mtx_lock(&port->port_lock); 13035 port->stats.bytes[type] += io->scsiio.kern_total_len; 13036 port->stats.operations[type] ++; 13037 port->stats.dmas[type] += io->io_hdr.num_dmas; 13038 #ifdef CTL_TIME_IO 13039 bintime_add(&port->stats.dma_time[type], &io->io_hdr.dma_bt); 13040 bintime_add(&port->stats.time[type], &bt); 13041 #endif 13042 mtx_unlock(&port->port_lock); 13043 } 13044 13045 /* 13046 * Remove this from the OOA queue. 13047 */ 13048 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); 13049 #ifdef CTL_TIME_IO 13050 if (TAILQ_EMPTY(&lun->ooa_queue)) 13051 lun->last_busy = getsbinuptime(); 13052 #endif 13053 13054 /* 13055 * Run through the blocked queue on this LUN and see if anything 13056 * has become unblocked, now that this transaction is done. 13057 */ 13058 ctl_check_blocked(lun); 13059 13060 /* 13061 * If the LUN has been invalidated, free it if there is nothing 13062 * left on its OOA queue. 13063 */ 13064 if ((lun->flags & CTL_LUN_INVALID) 13065 && TAILQ_EMPTY(&lun->ooa_queue)) { 13066 mtx_unlock(&lun->lun_lock); 13067 ctl_free_lun(lun); 13068 } else 13069 mtx_unlock(&lun->lun_lock); 13070 13071 bailout: 13072 13073 /* 13074 * If this command has been aborted, make sure we set the status 13075 * properly. The FETD is responsible for freeing the I/O and doing 13076 * whatever it needs to do to clean up its state. 13077 */ 13078 if (io->io_hdr.flags & CTL_FLAG_ABORT) 13079 ctl_set_task_aborted(&io->scsiio); 13080 13081 /* 13082 * If enabled, print command error status. 13083 */ 13084 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS && 13085 (ctl_debug & CTL_DEBUG_INFO) != 0) 13086 ctl_io_error_print(io, NULL); 13087 13088 /* 13089 * Tell the FETD or the other shelf controller we're done with this 13090 * command. Note that only SCSI commands get to this point. Task 13091 * management commands are completed above. 13092 */ 13093 if ((softc->ha_mode != CTL_HA_MODE_XFER) && 13094 (io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC)) { 13095 memset(&msg, 0, sizeof(msg)); 13096 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 13097 msg.hdr.serializing_sc = io->io_hdr.serializing_sc; 13098 msg.hdr.nexus = io->io_hdr.nexus; 13099 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13100 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data), 13101 M_WAITOK); 13102 } 13103 13104 fe_done(io); 13105 } 13106 13107 /* 13108 * Front end should call this if it doesn't do autosense. When the request 13109 * sense comes back in from the initiator, we'll dequeue this and send it. 13110 */ 13111 int 13112 ctl_queue_sense(union ctl_io *io) 13113 { 13114 struct ctl_softc *softc = CTL_SOFTC(io); 13115 struct ctl_port *port = CTL_PORT(io); 13116 struct ctl_lun *lun; 13117 struct scsi_sense_data *ps; 13118 uint32_t initidx, p, targ_lun; 13119 13120 CTL_DEBUG_PRINT(("ctl_queue_sense\n")); 13121 13122 targ_lun = ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); 13123 13124 /* 13125 * LUN lookup will likely move to the ctl_work_thread() once we 13126 * have our new queueing infrastructure (that doesn't put things on 13127 * a per-LUN queue initially). That is so that we can handle 13128 * things like an INQUIRY to a LUN that we don't have enabled. We 13129 * can't deal with that right now. 13130 * If we don't have a LUN for this, just toss the sense information. 13131 */ 13132 mtx_lock(&softc->ctl_lock); 13133 if (targ_lun >= CTL_MAX_LUNS || 13134 (lun = softc->ctl_luns[targ_lun]) == NULL) { 13135 mtx_unlock(&softc->ctl_lock); 13136 goto bailout; 13137 } 13138 mtx_lock(&lun->lun_lock); 13139 mtx_unlock(&softc->ctl_lock); 13140 13141 initidx = ctl_get_initindex(&io->io_hdr.nexus); 13142 p = initidx / CTL_MAX_INIT_PER_PORT; 13143 if (lun->pending_sense[p] == NULL) { 13144 lun->pending_sense[p] = malloc(sizeof(*ps) * CTL_MAX_INIT_PER_PORT, 13145 M_CTL, M_NOWAIT | M_ZERO); 13146 } 13147 if ((ps = lun->pending_sense[p]) != NULL) { 13148 ps += initidx % CTL_MAX_INIT_PER_PORT; 13149 memset(ps, 0, sizeof(*ps)); 13150 memcpy(ps, &io->scsiio.sense_data, io->scsiio.sense_len); 13151 } 13152 mtx_unlock(&lun->lun_lock); 13153 13154 bailout: 13155 ctl_free_io(io); 13156 return (CTL_RETVAL_COMPLETE); 13157 } 13158 13159 /* 13160 * Primary command inlet from frontend ports. All SCSI and task I/O 13161 * requests must go through this function. 13162 */ 13163 int 13164 ctl_queue(union ctl_io *io) 13165 { 13166 struct ctl_port *port = CTL_PORT(io); 13167 13168 CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0])); 13169 13170 #ifdef CTL_TIME_IO 13171 io->io_hdr.start_time = time_uptime; 13172 getbinuptime(&io->io_hdr.start_bt); 13173 #endif /* CTL_TIME_IO */ 13174 13175 /* Map FE-specific LUN ID into global one. */ 13176 io->io_hdr.nexus.targ_mapped_lun = 13177 ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); 13178 13179 switch (io->io_hdr.io_type) { 13180 case CTL_IO_SCSI: 13181 case CTL_IO_TASK: 13182 if (ctl_debug & CTL_DEBUG_CDB) 13183 ctl_io_print(io); 13184 ctl_enqueue_incoming(io); 13185 break; 13186 default: 13187 printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type); 13188 return (EINVAL); 13189 } 13190 13191 return (CTL_RETVAL_COMPLETE); 13192 } 13193 13194 #ifdef CTL_IO_DELAY 13195 static void 13196 ctl_done_timer_wakeup(void *arg) 13197 { 13198 union ctl_io *io; 13199 13200 io = (union ctl_io *)arg; 13201 ctl_done(io); 13202 } 13203 #endif /* CTL_IO_DELAY */ 13204 13205 void 13206 ctl_serseq_done(union ctl_io *io) 13207 { 13208 struct ctl_lun *lun = CTL_LUN(io);; 13209 13210 if (lun->be_lun == NULL || 13211 lun->be_lun->serseq == CTL_LUN_SERSEQ_OFF) 13212 return; 13213 mtx_lock(&lun->lun_lock); 13214 io->io_hdr.flags |= CTL_FLAG_SERSEQ_DONE; 13215 ctl_check_blocked(lun); 13216 mtx_unlock(&lun->lun_lock); 13217 } 13218 13219 void 13220 ctl_done(union ctl_io *io) 13221 { 13222 13223 /* 13224 * Enable this to catch duplicate completion issues. 13225 */ 13226 #if 0 13227 if (io->io_hdr.flags & CTL_FLAG_ALREADY_DONE) { 13228 printf("%s: type %d msg %d cdb %x iptl: " 13229 "%u:%u:%u tag 0x%04x " 13230 "flag %#x status %x\n", 13231 __func__, 13232 io->io_hdr.io_type, 13233 io->io_hdr.msg_type, 13234 io->scsiio.cdb[0], 13235 io->io_hdr.nexus.initid, 13236 io->io_hdr.nexus.targ_port, 13237 io->io_hdr.nexus.targ_lun, 13238 (io->io_hdr.io_type == 13239 CTL_IO_TASK) ? 13240 io->taskio.tag_num : 13241 io->scsiio.tag_num, 13242 io->io_hdr.flags, 13243 io->io_hdr.status); 13244 } else 13245 io->io_hdr.flags |= CTL_FLAG_ALREADY_DONE; 13246 #endif 13247 13248 /* 13249 * This is an internal copy of an I/O, and should not go through 13250 * the normal done processing logic. 13251 */ 13252 if (io->io_hdr.flags & CTL_FLAG_INT_COPY) 13253 return; 13254 13255 #ifdef CTL_IO_DELAY 13256 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 13257 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 13258 } else { 13259 struct ctl_lun *lun = CTL_LUN(io); 13260 13261 if ((lun != NULL) 13262 && (lun->delay_info.done_delay > 0)) { 13263 13264 callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1); 13265 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 13266 callout_reset(&io->io_hdr.delay_callout, 13267 lun->delay_info.done_delay * hz, 13268 ctl_done_timer_wakeup, io); 13269 if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT) 13270 lun->delay_info.done_delay = 0; 13271 return; 13272 } 13273 } 13274 #endif /* CTL_IO_DELAY */ 13275 13276 ctl_enqueue_done(io); 13277 } 13278 13279 static void 13280 ctl_work_thread(void *arg) 13281 { 13282 struct ctl_thread *thr = (struct ctl_thread *)arg; 13283 struct ctl_softc *softc = thr->ctl_softc; 13284 union ctl_io *io; 13285 int retval; 13286 13287 CTL_DEBUG_PRINT(("ctl_work_thread starting\n")); 13288 13289 while (!softc->shutdown) { 13290 /* 13291 * We handle the queues in this order: 13292 * - ISC 13293 * - done queue (to free up resources, unblock other commands) 13294 * - RtR queue 13295 * - incoming queue 13296 * 13297 * If those queues are empty, we break out of the loop and 13298 * go to sleep. 13299 */ 13300 mtx_lock(&thr->queue_lock); 13301 io = (union ctl_io *)STAILQ_FIRST(&thr->isc_queue); 13302 if (io != NULL) { 13303 STAILQ_REMOVE_HEAD(&thr->isc_queue, links); 13304 mtx_unlock(&thr->queue_lock); 13305 ctl_handle_isc(io); 13306 continue; 13307 } 13308 io = (union ctl_io *)STAILQ_FIRST(&thr->done_queue); 13309 if (io != NULL) { 13310 STAILQ_REMOVE_HEAD(&thr->done_queue, links); 13311 /* clear any blocked commands, call fe_done */ 13312 mtx_unlock(&thr->queue_lock); 13313 ctl_process_done(io); 13314 continue; 13315 } 13316 io = (union ctl_io *)STAILQ_FIRST(&thr->incoming_queue); 13317 if (io != NULL) { 13318 STAILQ_REMOVE_HEAD(&thr->incoming_queue, links); 13319 mtx_unlock(&thr->queue_lock); 13320 if (io->io_hdr.io_type == CTL_IO_TASK) 13321 ctl_run_task(io); 13322 else 13323 ctl_scsiio_precheck(softc, &io->scsiio); 13324 continue; 13325 } 13326 io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue); 13327 if (io != NULL) { 13328 STAILQ_REMOVE_HEAD(&thr->rtr_queue, links); 13329 mtx_unlock(&thr->queue_lock); 13330 retval = ctl_scsiio(&io->scsiio); 13331 if (retval != CTL_RETVAL_COMPLETE) 13332 CTL_DEBUG_PRINT(("ctl_scsiio failed\n")); 13333 continue; 13334 } 13335 13336 /* Sleep until we have something to do. */ 13337 mtx_sleep(thr, &thr->queue_lock, PDROP | PRIBIO, "-", 0); 13338 } 13339 thr->thread = NULL; 13340 kthread_exit(); 13341 } 13342 13343 static void 13344 ctl_lun_thread(void *arg) 13345 { 13346 struct ctl_softc *softc = (struct ctl_softc *)arg; 13347 struct ctl_be_lun *be_lun; 13348 13349 CTL_DEBUG_PRINT(("ctl_lun_thread starting\n")); 13350 13351 while (!softc->shutdown) { 13352 mtx_lock(&softc->ctl_lock); 13353 be_lun = STAILQ_FIRST(&softc->pending_lun_queue); 13354 if (be_lun != NULL) { 13355 STAILQ_REMOVE_HEAD(&softc->pending_lun_queue, links); 13356 mtx_unlock(&softc->ctl_lock); 13357 ctl_create_lun(be_lun); 13358 continue; 13359 } 13360 13361 /* Sleep until we have something to do. */ 13362 mtx_sleep(&softc->pending_lun_queue, &softc->ctl_lock, 13363 PDROP | PRIBIO, "-", 0); 13364 } 13365 softc->lun_thread = NULL; 13366 kthread_exit(); 13367 } 13368 13369 static void 13370 ctl_thresh_thread(void *arg) 13371 { 13372 struct ctl_softc *softc = (struct ctl_softc *)arg; 13373 struct ctl_lun *lun; 13374 struct ctl_logical_block_provisioning_page *page; 13375 const char *attr; 13376 union ctl_ha_msg msg; 13377 uint64_t thres, val; 13378 int i, e, set; 13379 13380 CTL_DEBUG_PRINT(("ctl_thresh_thread starting\n")); 13381 13382 while (!softc->shutdown) { 13383 mtx_lock(&softc->ctl_lock); 13384 STAILQ_FOREACH(lun, &softc->lun_list, links) { 13385 if ((lun->flags & CTL_LUN_DISABLED) || 13386 (lun->flags & CTL_LUN_NO_MEDIA) || 13387 lun->backend->lun_attr == NULL) 13388 continue; 13389 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 13390 softc->ha_mode == CTL_HA_MODE_XFER) 13391 continue; 13392 if ((lun->MODE_RWER.byte8 & SMS_RWER_LBPERE) == 0) 13393 continue; 13394 e = 0; 13395 page = &lun->MODE_LBP; 13396 for (i = 0; i < CTL_NUM_LBP_THRESH; i++) { 13397 if ((page->descr[i].flags & SLBPPD_ENABLED) == 0) 13398 continue; 13399 thres = scsi_4btoul(page->descr[i].count); 13400 thres <<= CTL_LBP_EXPONENT; 13401 switch (page->descr[i].resource) { 13402 case 0x01: 13403 attr = "blocksavail"; 13404 break; 13405 case 0x02: 13406 attr = "blocksused"; 13407 break; 13408 case 0xf1: 13409 attr = "poolblocksavail"; 13410 break; 13411 case 0xf2: 13412 attr = "poolblocksused"; 13413 break; 13414 default: 13415 continue; 13416 } 13417 mtx_unlock(&softc->ctl_lock); // XXX 13418 val = lun->backend->lun_attr( 13419 lun->be_lun->be_lun, attr); 13420 mtx_lock(&softc->ctl_lock); 13421 if (val == UINT64_MAX) 13422 continue; 13423 if ((page->descr[i].flags & SLBPPD_ARMING_MASK) 13424 == SLBPPD_ARMING_INC) 13425 e = (val >= thres); 13426 else 13427 e = (val <= thres); 13428 if (e) 13429 break; 13430 } 13431 mtx_lock(&lun->lun_lock); 13432 if (e) { 13433 scsi_u64to8b((uint8_t *)&page->descr[i] - 13434 (uint8_t *)page, lun->ua_tpt_info); 13435 if (lun->lasttpt == 0 || 13436 time_uptime - lun->lasttpt >= CTL_LBP_UA_PERIOD) { 13437 lun->lasttpt = time_uptime; 13438 ctl_est_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 13439 set = 1; 13440 } else 13441 set = 0; 13442 } else { 13443 lun->lasttpt = 0; 13444 ctl_clr_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 13445 set = -1; 13446 } 13447 mtx_unlock(&lun->lun_lock); 13448 if (set != 0 && 13449 lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 13450 /* Send msg to other side. */ 13451 bzero(&msg.ua, sizeof(msg.ua)); 13452 msg.hdr.msg_type = CTL_MSG_UA; 13453 msg.hdr.nexus.initid = -1; 13454 msg.hdr.nexus.targ_port = -1; 13455 msg.hdr.nexus.targ_lun = lun->lun; 13456 msg.hdr.nexus.targ_mapped_lun = lun->lun; 13457 msg.ua.ua_all = 1; 13458 msg.ua.ua_set = (set > 0); 13459 msg.ua.ua_type = CTL_UA_THIN_PROV_THRES; 13460 memcpy(msg.ua.ua_info, lun->ua_tpt_info, 8); 13461 mtx_unlock(&softc->ctl_lock); // XXX 13462 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13463 sizeof(msg.ua), M_WAITOK); 13464 mtx_lock(&softc->ctl_lock); 13465 } 13466 } 13467 mtx_sleep(&softc->thresh_thread, &softc->ctl_lock, 13468 PDROP | PRIBIO, "-", CTL_LBP_PERIOD * hz); 13469 } 13470 softc->thresh_thread = NULL; 13471 kthread_exit(); 13472 } 13473 13474 static void 13475 ctl_enqueue_incoming(union ctl_io *io) 13476 { 13477 struct ctl_softc *softc = CTL_SOFTC(io); 13478 struct ctl_thread *thr; 13479 u_int idx; 13480 13481 idx = (io->io_hdr.nexus.targ_port * 127 + 13482 io->io_hdr.nexus.initid) % worker_threads; 13483 thr = &softc->threads[idx]; 13484 mtx_lock(&thr->queue_lock); 13485 STAILQ_INSERT_TAIL(&thr->incoming_queue, &io->io_hdr, links); 13486 mtx_unlock(&thr->queue_lock); 13487 wakeup(thr); 13488 } 13489 13490 static void 13491 ctl_enqueue_rtr(union ctl_io *io) 13492 { 13493 struct ctl_softc *softc = CTL_SOFTC(io); 13494 struct ctl_thread *thr; 13495 13496 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13497 mtx_lock(&thr->queue_lock); 13498 STAILQ_INSERT_TAIL(&thr->rtr_queue, &io->io_hdr, links); 13499 mtx_unlock(&thr->queue_lock); 13500 wakeup(thr); 13501 } 13502 13503 static void 13504 ctl_enqueue_done(union ctl_io *io) 13505 { 13506 struct ctl_softc *softc = CTL_SOFTC(io); 13507 struct ctl_thread *thr; 13508 13509 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13510 mtx_lock(&thr->queue_lock); 13511 STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links); 13512 mtx_unlock(&thr->queue_lock); 13513 wakeup(thr); 13514 } 13515 13516 static void 13517 ctl_enqueue_isc(union ctl_io *io) 13518 { 13519 struct ctl_softc *softc = CTL_SOFTC(io); 13520 struct ctl_thread *thr; 13521 13522 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13523 mtx_lock(&thr->queue_lock); 13524 STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links); 13525 mtx_unlock(&thr->queue_lock); 13526 wakeup(thr); 13527 } 13528 13529 /* 13530 * vim: ts=8 13531 */ 13532