1 /*- 2 * Copyright (c) 2003-2009 Silicon Graphics International Corp. 3 * Copyright (c) 2012 The FreeBSD Foundation 4 * Copyright (c) 2014-2017 Alexander Motin <mav@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Portions of this software were developed by Edward Tomasz Napierala 8 * under sponsorship from the FreeBSD Foundation. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions, and the following disclaimer, 15 * without modification. 16 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 17 * substantially similar to the "NO WARRANTY" disclaimer below 18 * ("Disclaimer") and any redistribution must be conditioned upon 19 * including a substantially similar Disclaimer requirement for further 20 * binary redistribution. 21 * 22 * NO WARRANTY 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 27 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 32 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 * POSSIBILITY OF SUCH DAMAGES. 34 * 35 * $Id$ 36 */ 37 /* 38 * CAM Target Layer, a SCSI device emulation subsystem. 39 * 40 * Author: Ken Merry <ken@FreeBSD.org> 41 */ 42 43 #define _CTL_C 44 45 #include <sys/cdefs.h> 46 __FBSDID("$FreeBSD$"); 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/ctype.h> 51 #include <sys/kernel.h> 52 #include <sys/types.h> 53 #include <sys/kthread.h> 54 #include <sys/bio.h> 55 #include <sys/fcntl.h> 56 #include <sys/lock.h> 57 #include <sys/module.h> 58 #include <sys/mutex.h> 59 #include <sys/condvar.h> 60 #include <sys/malloc.h> 61 #include <sys/conf.h> 62 #include <sys/ioccom.h> 63 #include <sys/queue.h> 64 #include <sys/sbuf.h> 65 #include <sys/smp.h> 66 #include <sys/endian.h> 67 #include <sys/sysctl.h> 68 #include <vm/uma.h> 69 70 #include <cam/cam.h> 71 #include <cam/scsi/scsi_all.h> 72 #include <cam/scsi/scsi_cd.h> 73 #include <cam/scsi/scsi_da.h> 74 #include <cam/ctl/ctl_io.h> 75 #include <cam/ctl/ctl.h> 76 #include <cam/ctl/ctl_frontend.h> 77 #include <cam/ctl/ctl_util.h> 78 #include <cam/ctl/ctl_backend.h> 79 #include <cam/ctl/ctl_ioctl.h> 80 #include <cam/ctl/ctl_ha.h> 81 #include <cam/ctl/ctl_private.h> 82 #include <cam/ctl/ctl_debug.h> 83 #include <cam/ctl/ctl_scsi_all.h> 84 #include <cam/ctl/ctl_error.h> 85 86 struct ctl_softc *control_softc = NULL; 87 88 /* 89 * Template mode pages. 90 */ 91 92 /* 93 * Note that these are default values only. The actual values will be 94 * filled in when the user does a mode sense. 95 */ 96 const static struct scsi_da_rw_recovery_page rw_er_page_default = { 97 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 98 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 99 /*byte3*/SMS_RWER_AWRE|SMS_RWER_ARRE, 100 /*read_retry_count*/0, 101 /*correction_span*/0, 102 /*head_offset_count*/0, 103 /*data_strobe_offset_cnt*/0, 104 /*byte8*/SMS_RWER_LBPERE, 105 /*write_retry_count*/0, 106 /*reserved2*/0, 107 /*recovery_time_limit*/{0, 0}, 108 }; 109 110 const static struct scsi_da_rw_recovery_page rw_er_page_changeable = { 111 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 112 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 113 /*byte3*/SMS_RWER_PER, 114 /*read_retry_count*/0, 115 /*correction_span*/0, 116 /*head_offset_count*/0, 117 /*data_strobe_offset_cnt*/0, 118 /*byte8*/SMS_RWER_LBPERE, 119 /*write_retry_count*/0, 120 /*reserved2*/0, 121 /*recovery_time_limit*/{0, 0}, 122 }; 123 124 const static struct scsi_format_page format_page_default = { 125 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 126 /*page_length*/sizeof(struct scsi_format_page) - 2, 127 /*tracks_per_zone*/ {0, 0}, 128 /*alt_sectors_per_zone*/ {0, 0}, 129 /*alt_tracks_per_zone*/ {0, 0}, 130 /*alt_tracks_per_lun*/ {0, 0}, 131 /*sectors_per_track*/ {(CTL_DEFAULT_SECTORS_PER_TRACK >> 8) & 0xff, 132 CTL_DEFAULT_SECTORS_PER_TRACK & 0xff}, 133 /*bytes_per_sector*/ {0, 0}, 134 /*interleave*/ {0, 0}, 135 /*track_skew*/ {0, 0}, 136 /*cylinder_skew*/ {0, 0}, 137 /*flags*/ SFP_HSEC, 138 /*reserved*/ {0, 0, 0} 139 }; 140 141 const static struct scsi_format_page format_page_changeable = { 142 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 143 /*page_length*/sizeof(struct scsi_format_page) - 2, 144 /*tracks_per_zone*/ {0, 0}, 145 /*alt_sectors_per_zone*/ {0, 0}, 146 /*alt_tracks_per_zone*/ {0, 0}, 147 /*alt_tracks_per_lun*/ {0, 0}, 148 /*sectors_per_track*/ {0, 0}, 149 /*bytes_per_sector*/ {0, 0}, 150 /*interleave*/ {0, 0}, 151 /*track_skew*/ {0, 0}, 152 /*cylinder_skew*/ {0, 0}, 153 /*flags*/ 0, 154 /*reserved*/ {0, 0, 0} 155 }; 156 157 const static struct scsi_rigid_disk_page rigid_disk_page_default = { 158 /*page_code*/SMS_RIGID_DISK_PAGE, 159 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 160 /*cylinders*/ {0, 0, 0}, 161 /*heads*/ CTL_DEFAULT_HEADS, 162 /*start_write_precomp*/ {0, 0, 0}, 163 /*start_reduced_current*/ {0, 0, 0}, 164 /*step_rate*/ {0, 0}, 165 /*landing_zone_cylinder*/ {0, 0, 0}, 166 /*rpl*/ SRDP_RPL_DISABLED, 167 /*rotational_offset*/ 0, 168 /*reserved1*/ 0, 169 /*rotation_rate*/ {(CTL_DEFAULT_ROTATION_RATE >> 8) & 0xff, 170 CTL_DEFAULT_ROTATION_RATE & 0xff}, 171 /*reserved2*/ {0, 0} 172 }; 173 174 const static struct scsi_rigid_disk_page rigid_disk_page_changeable = { 175 /*page_code*/SMS_RIGID_DISK_PAGE, 176 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 177 /*cylinders*/ {0, 0, 0}, 178 /*heads*/ 0, 179 /*start_write_precomp*/ {0, 0, 0}, 180 /*start_reduced_current*/ {0, 0, 0}, 181 /*step_rate*/ {0, 0}, 182 /*landing_zone_cylinder*/ {0, 0, 0}, 183 /*rpl*/ 0, 184 /*rotational_offset*/ 0, 185 /*reserved1*/ 0, 186 /*rotation_rate*/ {0, 0}, 187 /*reserved2*/ {0, 0} 188 }; 189 190 const static struct scsi_da_verify_recovery_page verify_er_page_default = { 191 /*page_code*/SMS_VERIFY_ERROR_RECOVERY_PAGE, 192 /*page_length*/sizeof(struct scsi_da_verify_recovery_page) - 2, 193 /*byte3*/0, 194 /*read_retry_count*/0, 195 /*reserved*/{ 0, 0, 0, 0, 0, 0 }, 196 /*recovery_time_limit*/{0, 0}, 197 }; 198 199 const static struct scsi_da_verify_recovery_page verify_er_page_changeable = { 200 /*page_code*/SMS_VERIFY_ERROR_RECOVERY_PAGE, 201 /*page_length*/sizeof(struct scsi_da_verify_recovery_page) - 2, 202 /*byte3*/SMS_VER_PER, 203 /*read_retry_count*/0, 204 /*reserved*/{ 0, 0, 0, 0, 0, 0 }, 205 /*recovery_time_limit*/{0, 0}, 206 }; 207 208 const static struct scsi_caching_page caching_page_default = { 209 /*page_code*/SMS_CACHING_PAGE, 210 /*page_length*/sizeof(struct scsi_caching_page) - 2, 211 /*flags1*/ SCP_DISC | SCP_WCE, 212 /*ret_priority*/ 0, 213 /*disable_pf_transfer_len*/ {0xff, 0xff}, 214 /*min_prefetch*/ {0, 0}, 215 /*max_prefetch*/ {0xff, 0xff}, 216 /*max_pf_ceiling*/ {0xff, 0xff}, 217 /*flags2*/ 0, 218 /*cache_segments*/ 0, 219 /*cache_seg_size*/ {0, 0}, 220 /*reserved*/ 0, 221 /*non_cache_seg_size*/ {0, 0, 0} 222 }; 223 224 const static struct scsi_caching_page caching_page_changeable = { 225 /*page_code*/SMS_CACHING_PAGE, 226 /*page_length*/sizeof(struct scsi_caching_page) - 2, 227 /*flags1*/ SCP_WCE | SCP_RCD, 228 /*ret_priority*/ 0, 229 /*disable_pf_transfer_len*/ {0, 0}, 230 /*min_prefetch*/ {0, 0}, 231 /*max_prefetch*/ {0, 0}, 232 /*max_pf_ceiling*/ {0, 0}, 233 /*flags2*/ 0, 234 /*cache_segments*/ 0, 235 /*cache_seg_size*/ {0, 0}, 236 /*reserved*/ 0, 237 /*non_cache_seg_size*/ {0, 0, 0} 238 }; 239 240 const static struct scsi_control_page control_page_default = { 241 /*page_code*/SMS_CONTROL_MODE_PAGE, 242 /*page_length*/sizeof(struct scsi_control_page) - 2, 243 /*rlec*/0, 244 /*queue_flags*/SCP_QUEUE_ALG_RESTRICTED, 245 /*eca_and_aen*/0, 246 /*flags4*/SCP_TAS, 247 /*aen_holdoff_period*/{0, 0}, 248 /*busy_timeout_period*/{0, 0}, 249 /*extended_selftest_completion_time*/{0, 0} 250 }; 251 252 const static struct scsi_control_page control_page_changeable = { 253 /*page_code*/SMS_CONTROL_MODE_PAGE, 254 /*page_length*/sizeof(struct scsi_control_page) - 2, 255 /*rlec*/SCP_DSENSE, 256 /*queue_flags*/SCP_QUEUE_ALG_MASK | SCP_NUAR, 257 /*eca_and_aen*/SCP_SWP, 258 /*flags4*/0, 259 /*aen_holdoff_period*/{0, 0}, 260 /*busy_timeout_period*/{0, 0}, 261 /*extended_selftest_completion_time*/{0, 0} 262 }; 263 264 #define CTL_CEM_LEN (sizeof(struct scsi_control_ext_page) - 4) 265 266 const static struct scsi_control_ext_page control_ext_page_default = { 267 /*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF, 268 /*subpage_code*/0x01, 269 /*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN}, 270 /*flags*/0, 271 /*prio*/0, 272 /*max_sense*/0 273 }; 274 275 const static struct scsi_control_ext_page control_ext_page_changeable = { 276 /*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF, 277 /*subpage_code*/0x01, 278 /*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN}, 279 /*flags*/0, 280 /*prio*/0, 281 /*max_sense*/0xff 282 }; 283 284 const static struct scsi_info_exceptions_page ie_page_default = { 285 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 286 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 287 /*info_flags*/SIEP_FLAGS_EWASC, 288 /*mrie*/SIEP_MRIE_NO, 289 /*interval_timer*/{0, 0, 0, 0}, 290 /*report_count*/{0, 0, 0, 1} 291 }; 292 293 const static struct scsi_info_exceptions_page ie_page_changeable = { 294 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 295 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 296 /*info_flags*/SIEP_FLAGS_EWASC | SIEP_FLAGS_DEXCPT | SIEP_FLAGS_TEST | 297 SIEP_FLAGS_LOGERR, 298 /*mrie*/0x0f, 299 /*interval_timer*/{0xff, 0xff, 0xff, 0xff}, 300 /*report_count*/{0xff, 0xff, 0xff, 0xff} 301 }; 302 303 #define CTL_LBPM_LEN (sizeof(struct ctl_logical_block_provisioning_page) - 4) 304 305 const static struct ctl_logical_block_provisioning_page lbp_page_default = {{ 306 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 307 /*subpage_code*/0x02, 308 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 309 /*flags*/0, 310 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 311 /*descr*/{}}, 312 {{/*flags*/0, 313 /*resource*/0x01, 314 /*reserved*/{0, 0}, 315 /*count*/{0, 0, 0, 0}}, 316 {/*flags*/0, 317 /*resource*/0x02, 318 /*reserved*/{0, 0}, 319 /*count*/{0, 0, 0, 0}}, 320 {/*flags*/0, 321 /*resource*/0xf1, 322 /*reserved*/{0, 0}, 323 /*count*/{0, 0, 0, 0}}, 324 {/*flags*/0, 325 /*resource*/0xf2, 326 /*reserved*/{0, 0}, 327 /*count*/{0, 0, 0, 0}} 328 } 329 }; 330 331 const static struct ctl_logical_block_provisioning_page lbp_page_changeable = {{ 332 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 333 /*subpage_code*/0x02, 334 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 335 /*flags*/SLBPP_SITUA, 336 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 337 /*descr*/{}}, 338 {{/*flags*/0, 339 /*resource*/0, 340 /*reserved*/{0, 0}, 341 /*count*/{0, 0, 0, 0}}, 342 {/*flags*/0, 343 /*resource*/0, 344 /*reserved*/{0, 0}, 345 /*count*/{0, 0, 0, 0}}, 346 {/*flags*/0, 347 /*resource*/0, 348 /*reserved*/{0, 0}, 349 /*count*/{0, 0, 0, 0}}, 350 {/*flags*/0, 351 /*resource*/0, 352 /*reserved*/{0, 0}, 353 /*count*/{0, 0, 0, 0}} 354 } 355 }; 356 357 const static struct scsi_cddvd_capabilities_page cddvd_page_default = { 358 /*page_code*/SMS_CDDVD_CAPS_PAGE, 359 /*page_length*/sizeof(struct scsi_cddvd_capabilities_page) - 2, 360 /*caps1*/0x3f, 361 /*caps2*/0x00, 362 /*caps3*/0xf0, 363 /*caps4*/0x00, 364 /*caps5*/0x29, 365 /*caps6*/0x00, 366 /*obsolete*/{0, 0}, 367 /*nvol_levels*/{0, 0}, 368 /*buffer_size*/{8, 0}, 369 /*obsolete2*/{0, 0}, 370 /*reserved*/0, 371 /*digital*/0, 372 /*obsolete3*/0, 373 /*copy_management*/0, 374 /*reserved2*/0, 375 /*rotation_control*/0, 376 /*cur_write_speed*/0, 377 /*num_speed_descr*/0, 378 }; 379 380 const static struct scsi_cddvd_capabilities_page cddvd_page_changeable = { 381 /*page_code*/SMS_CDDVD_CAPS_PAGE, 382 /*page_length*/sizeof(struct scsi_cddvd_capabilities_page) - 2, 383 /*caps1*/0, 384 /*caps2*/0, 385 /*caps3*/0, 386 /*caps4*/0, 387 /*caps5*/0, 388 /*caps6*/0, 389 /*obsolete*/{0, 0}, 390 /*nvol_levels*/{0, 0}, 391 /*buffer_size*/{0, 0}, 392 /*obsolete2*/{0, 0}, 393 /*reserved*/0, 394 /*digital*/0, 395 /*obsolete3*/0, 396 /*copy_management*/0, 397 /*reserved2*/0, 398 /*rotation_control*/0, 399 /*cur_write_speed*/0, 400 /*num_speed_descr*/0, 401 }; 402 403 SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer"); 404 static int worker_threads = -1; 405 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN, 406 &worker_threads, 1, "Number of worker threads"); 407 static int ctl_debug = CTL_DEBUG_NONE; 408 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, debug, CTLFLAG_RWTUN, 409 &ctl_debug, 0, "Enabled debug flags"); 410 static int ctl_lun_map_size = 1024; 411 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, lun_map_size, CTLFLAG_RWTUN, 412 &ctl_lun_map_size, 0, "Size of per-port LUN map (max LUN + 1)"); 413 414 /* 415 * Supported pages (0x00), Serial number (0x80), Device ID (0x83), 416 * Extended INQUIRY Data (0x86), Mode Page Policy (0x87), 417 * SCSI Ports (0x88), Third-party Copy (0x8F), Block limits (0xB0), 418 * Block Device Characteristics (0xB1) and Logical Block Provisioning (0xB2) 419 */ 420 #define SCSI_EVPD_NUM_SUPPORTED_PAGES 10 421 422 static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event, 423 int param); 424 static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest); 425 static void ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest); 426 static int ctl_init(void); 427 static int ctl_shutdown(void); 428 static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td); 429 static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td); 430 static void ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio); 431 static void ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 432 struct ctl_ooa *ooa_hdr, 433 struct ctl_ooa_entry *kern_entries); 434 static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 435 struct thread *td); 436 static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun, 437 struct ctl_be_lun *be_lun); 438 static int ctl_free_lun(struct ctl_lun *lun); 439 static void ctl_create_lun(struct ctl_be_lun *be_lun); 440 441 static int ctl_do_mode_select(union ctl_io *io); 442 static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, 443 uint64_t res_key, uint64_t sa_res_key, 444 uint8_t type, uint32_t residx, 445 struct ctl_scsiio *ctsio, 446 struct scsi_per_res_out *cdb, 447 struct scsi_per_res_out_parms* param); 448 static void ctl_pro_preempt_other(struct ctl_lun *lun, 449 union ctl_ha_msg *msg); 450 static void ctl_hndl_per_res_out_on_other_sc(union ctl_io *io); 451 static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len); 452 static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len); 453 static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len); 454 static int ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len); 455 static int ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len); 456 static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, 457 int alloc_len); 458 static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, 459 int alloc_len); 460 static int ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len); 461 static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len); 462 static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio); 463 static int ctl_inquiry_std(struct ctl_scsiio *ctsio); 464 static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len); 465 static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2, 466 bool seq); 467 static ctl_action ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2); 468 static ctl_action ctl_check_for_blockage(struct ctl_lun *lun, 469 union ctl_io *pending_io, union ctl_io *ooa_io); 470 static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 471 union ctl_io *starting_io); 472 static int ctl_check_blocked(struct ctl_lun *lun); 473 static int ctl_scsiio_lun_check(struct ctl_lun *lun, 474 const struct ctl_cmd_entry *entry, 475 struct ctl_scsiio *ctsio); 476 static void ctl_failover_lun(union ctl_io *io); 477 static int ctl_scsiio_precheck(struct ctl_softc *ctl_softc, 478 struct ctl_scsiio *ctsio); 479 static int ctl_scsiio(struct ctl_scsiio *ctsio); 480 481 static int ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io); 482 static int ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io, 483 ctl_ua_type ua_type); 484 static int ctl_do_lun_reset(struct ctl_lun *lun, union ctl_io *io, 485 ctl_ua_type ua_type); 486 static int ctl_lun_reset(struct ctl_softc *ctl_softc, union ctl_io *io); 487 static int ctl_abort_task(union ctl_io *io); 488 static int ctl_abort_task_set(union ctl_io *io); 489 static int ctl_query_task(union ctl_io *io, int task_set); 490 static int ctl_i_t_nexus_reset(union ctl_io *io); 491 static int ctl_query_async_event(union ctl_io *io); 492 static void ctl_run_task(union ctl_io *io); 493 #ifdef CTL_IO_DELAY 494 static void ctl_datamove_timer_wakeup(void *arg); 495 static void ctl_done_timer_wakeup(void *arg); 496 #endif /* CTL_IO_DELAY */ 497 498 static void ctl_send_datamove_done(union ctl_io *io, int have_lock); 499 static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq); 500 static int ctl_datamove_remote_dm_write_cb(union ctl_io *io); 501 static void ctl_datamove_remote_write(union ctl_io *io); 502 static int ctl_datamove_remote_dm_read_cb(union ctl_io *io); 503 static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq); 504 static int ctl_datamove_remote_sgl_setup(union ctl_io *io); 505 static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 506 ctl_ha_dt_cb callback); 507 static void ctl_datamove_remote_read(union ctl_io *io); 508 static void ctl_datamove_remote(union ctl_io *io); 509 static void ctl_process_done(union ctl_io *io); 510 static void ctl_lun_thread(void *arg); 511 static void ctl_thresh_thread(void *arg); 512 static void ctl_work_thread(void *arg); 513 static void ctl_enqueue_incoming(union ctl_io *io); 514 static void ctl_enqueue_rtr(union ctl_io *io); 515 static void ctl_enqueue_done(union ctl_io *io); 516 static void ctl_enqueue_isc(union ctl_io *io); 517 static const struct ctl_cmd_entry * 518 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa); 519 static const struct ctl_cmd_entry * 520 ctl_validate_command(struct ctl_scsiio *ctsio); 521 static int ctl_cmd_applicable(uint8_t lun_type, 522 const struct ctl_cmd_entry *entry); 523 static int ctl_ha_init(void); 524 static int ctl_ha_shutdown(void); 525 526 static uint64_t ctl_get_prkey(struct ctl_lun *lun, uint32_t residx); 527 static void ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx); 528 static void ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx); 529 static void ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key); 530 531 /* 532 * Load the serialization table. This isn't very pretty, but is probably 533 * the easiest way to do it. 534 */ 535 #include "ctl_ser_table.c" 536 537 /* 538 * We only need to define open, close and ioctl routines for this driver. 539 */ 540 static struct cdevsw ctl_cdevsw = { 541 .d_version = D_VERSION, 542 .d_flags = 0, 543 .d_open = ctl_open, 544 .d_close = ctl_close, 545 .d_ioctl = ctl_ioctl, 546 .d_name = "ctl", 547 }; 548 549 550 MALLOC_DEFINE(M_CTL, "ctlmem", "Memory used for CTL"); 551 552 static int ctl_module_event_handler(module_t, int /*modeventtype_t*/, void *); 553 554 static moduledata_t ctl_moduledata = { 555 "ctl", 556 ctl_module_event_handler, 557 NULL 558 }; 559 560 DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD); 561 MODULE_VERSION(ctl, 1); 562 563 static struct ctl_frontend ha_frontend = 564 { 565 .name = "ha", 566 .init = ctl_ha_init, 567 .shutdown = ctl_ha_shutdown, 568 }; 569 570 static int 571 ctl_ha_init(void) 572 { 573 struct ctl_softc *softc = control_softc; 574 575 if (ctl_pool_create(softc, "othersc", CTL_POOL_ENTRIES_OTHER_SC, 576 &softc->othersc_pool) != 0) 577 return (ENOMEM); 578 if (ctl_ha_msg_init(softc) != CTL_HA_STATUS_SUCCESS) { 579 ctl_pool_free(softc->othersc_pool); 580 return (EIO); 581 } 582 if (ctl_ha_msg_register(CTL_HA_CHAN_CTL, ctl_isc_event_handler) 583 != CTL_HA_STATUS_SUCCESS) { 584 ctl_ha_msg_destroy(softc); 585 ctl_pool_free(softc->othersc_pool); 586 return (EIO); 587 } 588 return (0); 589 }; 590 591 static int 592 ctl_ha_shutdown(void) 593 { 594 struct ctl_softc *softc = control_softc; 595 struct ctl_port *port; 596 597 ctl_ha_msg_shutdown(softc); 598 if (ctl_ha_msg_deregister(CTL_HA_CHAN_CTL) != CTL_HA_STATUS_SUCCESS) 599 return (EIO); 600 if (ctl_ha_msg_destroy(softc) != CTL_HA_STATUS_SUCCESS) 601 return (EIO); 602 ctl_pool_free(softc->othersc_pool); 603 while ((port = STAILQ_FIRST(&ha_frontend.port_list)) != NULL) { 604 ctl_port_deregister(port); 605 free(port->port_name, M_CTL); 606 free(port, M_CTL); 607 } 608 return (0); 609 }; 610 611 static void 612 ctl_ha_datamove(union ctl_io *io) 613 { 614 struct ctl_lun *lun = CTL_LUN(io); 615 struct ctl_sg_entry *sgl; 616 union ctl_ha_msg msg; 617 uint32_t sg_entries_sent; 618 int do_sg_copy, i, j; 619 620 memset(&msg.dt, 0, sizeof(msg.dt)); 621 msg.hdr.msg_type = CTL_MSG_DATAMOVE; 622 msg.hdr.original_sc = io->io_hdr.original_sc; 623 msg.hdr.serializing_sc = io; 624 msg.hdr.nexus = io->io_hdr.nexus; 625 msg.hdr.status = io->io_hdr.status; 626 msg.dt.flags = io->io_hdr.flags; 627 628 /* 629 * We convert everything into a S/G list here. We can't 630 * pass by reference, only by value between controllers. 631 * So we can't pass a pointer to the S/G list, only as many 632 * S/G entries as we can fit in here. If it's possible for 633 * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries, 634 * then we need to break this up into multiple transfers. 635 */ 636 if (io->scsiio.kern_sg_entries == 0) { 637 msg.dt.kern_sg_entries = 1; 638 #if 0 639 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 640 msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr; 641 } else { 642 /* XXX KDM use busdma here! */ 643 msg.dt.sg_list[0].addr = 644 (void *)vtophys(io->scsiio.kern_data_ptr); 645 } 646 #else 647 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, 648 ("HA does not support BUS_ADDR")); 649 msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr; 650 #endif 651 msg.dt.sg_list[0].len = io->scsiio.kern_data_len; 652 do_sg_copy = 0; 653 } else { 654 msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries; 655 do_sg_copy = 1; 656 } 657 658 msg.dt.kern_data_len = io->scsiio.kern_data_len; 659 msg.dt.kern_total_len = io->scsiio.kern_total_len; 660 msg.dt.kern_data_resid = io->scsiio.kern_data_resid; 661 msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset; 662 msg.dt.sg_sequence = 0; 663 664 /* 665 * Loop until we've sent all of the S/G entries. On the 666 * other end, we'll recompose these S/G entries into one 667 * contiguous list before processing. 668 */ 669 for (sg_entries_sent = 0; sg_entries_sent < msg.dt.kern_sg_entries; 670 msg.dt.sg_sequence++) { 671 msg.dt.cur_sg_entries = MIN((sizeof(msg.dt.sg_list) / 672 sizeof(msg.dt.sg_list[0])), 673 msg.dt.kern_sg_entries - sg_entries_sent); 674 if (do_sg_copy != 0) { 675 sgl = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; 676 for (i = sg_entries_sent, j = 0; 677 i < msg.dt.cur_sg_entries; i++, j++) { 678 #if 0 679 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 680 msg.dt.sg_list[j].addr = sgl[i].addr; 681 } else { 682 /* XXX KDM use busdma here! */ 683 msg.dt.sg_list[j].addr = 684 (void *)vtophys(sgl[i].addr); 685 } 686 #else 687 KASSERT((io->io_hdr.flags & 688 CTL_FLAG_BUS_ADDR) == 0, 689 ("HA does not support BUS_ADDR")); 690 msg.dt.sg_list[j].addr = sgl[i].addr; 691 #endif 692 msg.dt.sg_list[j].len = sgl[i].len; 693 } 694 } 695 696 sg_entries_sent += msg.dt.cur_sg_entries; 697 msg.dt.sg_last = (sg_entries_sent >= msg.dt.kern_sg_entries); 698 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 699 sizeof(msg.dt) - sizeof(msg.dt.sg_list) + 700 sizeof(struct ctl_sg_entry) * msg.dt.cur_sg_entries, 701 M_WAITOK) > CTL_HA_STATUS_SUCCESS) { 702 io->io_hdr.port_status = 31341; 703 io->scsiio.be_move_done(io); 704 return; 705 } 706 msg.dt.sent_sg_entries = sg_entries_sent; 707 } 708 709 /* 710 * Officially handover the request from us to peer. 711 * If failover has just happened, then we must return error. 712 * If failover happen just after, then it is not our problem. 713 */ 714 if (lun) 715 mtx_lock(&lun->lun_lock); 716 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 717 if (lun) 718 mtx_unlock(&lun->lun_lock); 719 io->io_hdr.port_status = 31342; 720 io->scsiio.be_move_done(io); 721 return; 722 } 723 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 724 io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; 725 if (lun) 726 mtx_unlock(&lun->lun_lock); 727 } 728 729 static void 730 ctl_ha_done(union ctl_io *io) 731 { 732 union ctl_ha_msg msg; 733 734 if (io->io_hdr.io_type == CTL_IO_SCSI) { 735 memset(&msg, 0, sizeof(msg)); 736 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 737 msg.hdr.original_sc = io->io_hdr.original_sc; 738 msg.hdr.nexus = io->io_hdr.nexus; 739 msg.hdr.status = io->io_hdr.status; 740 msg.scsi.scsi_status = io->scsiio.scsi_status; 741 msg.scsi.tag_num = io->scsiio.tag_num; 742 msg.scsi.tag_type = io->scsiio.tag_type; 743 msg.scsi.sense_len = io->scsiio.sense_len; 744 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 745 io->scsiio.sense_len); 746 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 747 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + 748 msg.scsi.sense_len, M_WAITOK); 749 } 750 ctl_free_io(io); 751 } 752 753 static void 754 ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc, 755 union ctl_ha_msg *msg_info) 756 { 757 struct ctl_scsiio *ctsio; 758 759 if (msg_info->hdr.original_sc == NULL) { 760 printf("%s: original_sc == NULL!\n", __func__); 761 /* XXX KDM now what? */ 762 return; 763 } 764 765 ctsio = &msg_info->hdr.original_sc->scsiio; 766 ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 767 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 768 ctsio->io_hdr.status = msg_info->hdr.status; 769 ctsio->scsi_status = msg_info->scsi.scsi_status; 770 ctsio->sense_len = msg_info->scsi.sense_len; 771 memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data, 772 msg_info->scsi.sense_len); 773 ctl_enqueue_isc((union ctl_io *)ctsio); 774 } 775 776 static void 777 ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc, 778 union ctl_ha_msg *msg_info) 779 { 780 struct ctl_scsiio *ctsio; 781 782 if (msg_info->hdr.serializing_sc == NULL) { 783 printf("%s: serializing_sc == NULL!\n", __func__); 784 /* XXX KDM now what? */ 785 return; 786 } 787 788 ctsio = &msg_info->hdr.serializing_sc->scsiio; 789 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 790 ctl_enqueue_isc((union ctl_io *)ctsio); 791 } 792 793 void 794 ctl_isc_announce_lun(struct ctl_lun *lun) 795 { 796 struct ctl_softc *softc = lun->ctl_softc; 797 union ctl_ha_msg *msg; 798 struct ctl_ha_msg_lun_pr_key pr_key; 799 int i, k; 800 801 if (softc->ha_link != CTL_HA_LINK_ONLINE) 802 return; 803 mtx_lock(&lun->lun_lock); 804 i = sizeof(msg->lun); 805 if (lun->lun_devid) 806 i += lun->lun_devid->len; 807 i += sizeof(pr_key) * lun->pr_key_count; 808 alloc: 809 mtx_unlock(&lun->lun_lock); 810 msg = malloc(i, M_CTL, M_WAITOK); 811 mtx_lock(&lun->lun_lock); 812 k = sizeof(msg->lun); 813 if (lun->lun_devid) 814 k += lun->lun_devid->len; 815 k += sizeof(pr_key) * lun->pr_key_count; 816 if (i < k) { 817 free(msg, M_CTL); 818 i = k; 819 goto alloc; 820 } 821 bzero(&msg->lun, sizeof(msg->lun)); 822 msg->hdr.msg_type = CTL_MSG_LUN_SYNC; 823 msg->hdr.nexus.targ_lun = lun->lun; 824 msg->hdr.nexus.targ_mapped_lun = lun->lun; 825 msg->lun.flags = lun->flags; 826 msg->lun.pr_generation = lun->pr_generation; 827 msg->lun.pr_res_idx = lun->pr_res_idx; 828 msg->lun.pr_res_type = lun->pr_res_type; 829 msg->lun.pr_key_count = lun->pr_key_count; 830 i = 0; 831 if (lun->lun_devid) { 832 msg->lun.lun_devid_len = lun->lun_devid->len; 833 memcpy(&msg->lun.data[i], lun->lun_devid->data, 834 msg->lun.lun_devid_len); 835 i += msg->lun.lun_devid_len; 836 } 837 for (k = 0; k < CTL_MAX_INITIATORS; k++) { 838 if ((pr_key.pr_key = ctl_get_prkey(lun, k)) == 0) 839 continue; 840 pr_key.pr_iid = k; 841 memcpy(&msg->lun.data[i], &pr_key, sizeof(pr_key)); 842 i += sizeof(pr_key); 843 } 844 mtx_unlock(&lun->lun_lock); 845 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, 846 M_WAITOK); 847 free(msg, M_CTL); 848 849 if (lun->flags & CTL_LUN_PRIMARY_SC) { 850 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 851 ctl_isc_announce_mode(lun, -1, 852 lun->mode_pages.index[i].page_code & SMPH_PC_MASK, 853 lun->mode_pages.index[i].subpage); 854 } 855 } 856 } 857 858 void 859 ctl_isc_announce_port(struct ctl_port *port) 860 { 861 struct ctl_softc *softc = port->ctl_softc; 862 union ctl_ha_msg *msg; 863 int i; 864 865 if (port->targ_port < softc->port_min || 866 port->targ_port >= softc->port_max || 867 softc->ha_link != CTL_HA_LINK_ONLINE) 868 return; 869 i = sizeof(msg->port) + strlen(port->port_name) + 1; 870 if (port->lun_map) 871 i += port->lun_map_size * sizeof(uint32_t); 872 if (port->port_devid) 873 i += port->port_devid->len; 874 if (port->target_devid) 875 i += port->target_devid->len; 876 if (port->init_devid) 877 i += port->init_devid->len; 878 msg = malloc(i, M_CTL, M_WAITOK); 879 bzero(&msg->port, sizeof(msg->port)); 880 msg->hdr.msg_type = CTL_MSG_PORT_SYNC; 881 msg->hdr.nexus.targ_port = port->targ_port; 882 msg->port.port_type = port->port_type; 883 msg->port.physical_port = port->physical_port; 884 msg->port.virtual_port = port->virtual_port; 885 msg->port.status = port->status; 886 i = 0; 887 msg->port.name_len = sprintf(&msg->port.data[i], 888 "%d:%s", softc->ha_id, port->port_name) + 1; 889 i += msg->port.name_len; 890 if (port->lun_map) { 891 msg->port.lun_map_len = port->lun_map_size * sizeof(uint32_t); 892 memcpy(&msg->port.data[i], port->lun_map, 893 msg->port.lun_map_len); 894 i += msg->port.lun_map_len; 895 } 896 if (port->port_devid) { 897 msg->port.port_devid_len = port->port_devid->len; 898 memcpy(&msg->port.data[i], port->port_devid->data, 899 msg->port.port_devid_len); 900 i += msg->port.port_devid_len; 901 } 902 if (port->target_devid) { 903 msg->port.target_devid_len = port->target_devid->len; 904 memcpy(&msg->port.data[i], port->target_devid->data, 905 msg->port.target_devid_len); 906 i += msg->port.target_devid_len; 907 } 908 if (port->init_devid) { 909 msg->port.init_devid_len = port->init_devid->len; 910 memcpy(&msg->port.data[i], port->init_devid->data, 911 msg->port.init_devid_len); 912 i += msg->port.init_devid_len; 913 } 914 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, 915 M_WAITOK); 916 free(msg, M_CTL); 917 } 918 919 void 920 ctl_isc_announce_iid(struct ctl_port *port, int iid) 921 { 922 struct ctl_softc *softc = port->ctl_softc; 923 union ctl_ha_msg *msg; 924 int i, l; 925 926 if (port->targ_port < softc->port_min || 927 port->targ_port >= softc->port_max || 928 softc->ha_link != CTL_HA_LINK_ONLINE) 929 return; 930 mtx_lock(&softc->ctl_lock); 931 i = sizeof(msg->iid); 932 l = 0; 933 if (port->wwpn_iid[iid].name) 934 l = strlen(port->wwpn_iid[iid].name) + 1; 935 i += l; 936 msg = malloc(i, M_CTL, M_NOWAIT); 937 if (msg == NULL) { 938 mtx_unlock(&softc->ctl_lock); 939 return; 940 } 941 bzero(&msg->iid, sizeof(msg->iid)); 942 msg->hdr.msg_type = CTL_MSG_IID_SYNC; 943 msg->hdr.nexus.targ_port = port->targ_port; 944 msg->hdr.nexus.initid = iid; 945 msg->iid.in_use = port->wwpn_iid[iid].in_use; 946 msg->iid.name_len = l; 947 msg->iid.wwpn = port->wwpn_iid[iid].wwpn; 948 if (port->wwpn_iid[iid].name) 949 strlcpy(msg->iid.data, port->wwpn_iid[iid].name, l); 950 mtx_unlock(&softc->ctl_lock); 951 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->iid, i, M_NOWAIT); 952 free(msg, M_CTL); 953 } 954 955 void 956 ctl_isc_announce_mode(struct ctl_lun *lun, uint32_t initidx, 957 uint8_t page, uint8_t subpage) 958 { 959 struct ctl_softc *softc = lun->ctl_softc; 960 union ctl_ha_msg msg; 961 u_int i; 962 963 if (softc->ha_link != CTL_HA_LINK_ONLINE) 964 return; 965 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 966 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) == 967 page && lun->mode_pages.index[i].subpage == subpage) 968 break; 969 } 970 if (i == CTL_NUM_MODE_PAGES) 971 return; 972 973 /* Don't try to replicate pages not present on this device. */ 974 if (lun->mode_pages.index[i].page_data == NULL) 975 return; 976 977 bzero(&msg.mode, sizeof(msg.mode)); 978 msg.hdr.msg_type = CTL_MSG_MODE_SYNC; 979 msg.hdr.nexus.targ_port = initidx / CTL_MAX_INIT_PER_PORT; 980 msg.hdr.nexus.initid = initidx % CTL_MAX_INIT_PER_PORT; 981 msg.hdr.nexus.targ_lun = lun->lun; 982 msg.hdr.nexus.targ_mapped_lun = lun->lun; 983 msg.mode.page_code = page; 984 msg.mode.subpage = subpage; 985 msg.mode.page_len = lun->mode_pages.index[i].page_len; 986 memcpy(msg.mode.data, lun->mode_pages.index[i].page_data, 987 msg.mode.page_len); 988 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg.mode, sizeof(msg.mode), 989 M_WAITOK); 990 } 991 992 static void 993 ctl_isc_ha_link_up(struct ctl_softc *softc) 994 { 995 struct ctl_port *port; 996 struct ctl_lun *lun; 997 union ctl_ha_msg msg; 998 int i; 999 1000 /* Announce this node parameters to peer for validation. */ 1001 msg.login.msg_type = CTL_MSG_LOGIN; 1002 msg.login.version = CTL_HA_VERSION; 1003 msg.login.ha_mode = softc->ha_mode; 1004 msg.login.ha_id = softc->ha_id; 1005 msg.login.max_luns = CTL_MAX_LUNS; 1006 msg.login.max_ports = CTL_MAX_PORTS; 1007 msg.login.max_init_per_port = CTL_MAX_INIT_PER_PORT; 1008 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg.login, sizeof(msg.login), 1009 M_WAITOK); 1010 1011 STAILQ_FOREACH(port, &softc->port_list, links) { 1012 ctl_isc_announce_port(port); 1013 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1014 if (port->wwpn_iid[i].in_use) 1015 ctl_isc_announce_iid(port, i); 1016 } 1017 } 1018 STAILQ_FOREACH(lun, &softc->lun_list, links) 1019 ctl_isc_announce_lun(lun); 1020 } 1021 1022 static void 1023 ctl_isc_ha_link_down(struct ctl_softc *softc) 1024 { 1025 struct ctl_port *port; 1026 struct ctl_lun *lun; 1027 union ctl_io *io; 1028 int i; 1029 1030 mtx_lock(&softc->ctl_lock); 1031 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1032 mtx_lock(&lun->lun_lock); 1033 if (lun->flags & CTL_LUN_PEER_SC_PRIMARY) { 1034 lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; 1035 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 1036 } 1037 mtx_unlock(&lun->lun_lock); 1038 1039 mtx_unlock(&softc->ctl_lock); 1040 io = ctl_alloc_io(softc->othersc_pool); 1041 mtx_lock(&softc->ctl_lock); 1042 ctl_zero_io(io); 1043 io->io_hdr.msg_type = CTL_MSG_FAILOVER; 1044 io->io_hdr.nexus.targ_mapped_lun = lun->lun; 1045 ctl_enqueue_isc(io); 1046 } 1047 1048 STAILQ_FOREACH(port, &softc->port_list, links) { 1049 if (port->targ_port >= softc->port_min && 1050 port->targ_port < softc->port_max) 1051 continue; 1052 port->status &= ~CTL_PORT_STATUS_ONLINE; 1053 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1054 port->wwpn_iid[i].in_use = 0; 1055 free(port->wwpn_iid[i].name, M_CTL); 1056 port->wwpn_iid[i].name = NULL; 1057 } 1058 } 1059 mtx_unlock(&softc->ctl_lock); 1060 } 1061 1062 static void 1063 ctl_isc_ua(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1064 { 1065 struct ctl_lun *lun; 1066 uint32_t iid = ctl_get_initindex(&msg->hdr.nexus); 1067 1068 mtx_lock(&softc->ctl_lock); 1069 if (msg->hdr.nexus.targ_mapped_lun >= CTL_MAX_LUNS || 1070 (lun = softc->ctl_luns[msg->hdr.nexus.targ_mapped_lun]) == NULL) { 1071 mtx_unlock(&softc->ctl_lock); 1072 return; 1073 } 1074 mtx_lock(&lun->lun_lock); 1075 mtx_unlock(&softc->ctl_lock); 1076 if (msg->ua.ua_type == CTL_UA_THIN_PROV_THRES && msg->ua.ua_set) 1077 memcpy(lun->ua_tpt_info, msg->ua.ua_info, 8); 1078 if (msg->ua.ua_all) { 1079 if (msg->ua.ua_set) 1080 ctl_est_ua_all(lun, iid, msg->ua.ua_type); 1081 else 1082 ctl_clr_ua_all(lun, iid, msg->ua.ua_type); 1083 } else { 1084 if (msg->ua.ua_set) 1085 ctl_est_ua(lun, iid, msg->ua.ua_type); 1086 else 1087 ctl_clr_ua(lun, iid, msg->ua.ua_type); 1088 } 1089 mtx_unlock(&lun->lun_lock); 1090 } 1091 1092 static void 1093 ctl_isc_lun_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1094 { 1095 struct ctl_lun *lun; 1096 struct ctl_ha_msg_lun_pr_key pr_key; 1097 int i, k; 1098 ctl_lun_flags oflags; 1099 uint32_t targ_lun; 1100 1101 targ_lun = msg->hdr.nexus.targ_mapped_lun; 1102 mtx_lock(&softc->ctl_lock); 1103 if (targ_lun >= CTL_MAX_LUNS || 1104 (lun = softc->ctl_luns[targ_lun]) == NULL) { 1105 mtx_unlock(&softc->ctl_lock); 1106 return; 1107 } 1108 mtx_lock(&lun->lun_lock); 1109 mtx_unlock(&softc->ctl_lock); 1110 if (lun->flags & CTL_LUN_DISABLED) { 1111 mtx_unlock(&lun->lun_lock); 1112 return; 1113 } 1114 i = (lun->lun_devid != NULL) ? lun->lun_devid->len : 0; 1115 if (msg->lun.lun_devid_len != i || (i > 0 && 1116 memcmp(&msg->lun.data[0], lun->lun_devid->data, i) != 0)) { 1117 mtx_unlock(&lun->lun_lock); 1118 printf("%s: Received conflicting HA LUN %d\n", 1119 __func__, targ_lun); 1120 return; 1121 } else { 1122 /* Record whether peer is primary. */ 1123 oflags = lun->flags; 1124 if ((msg->lun.flags & CTL_LUN_PRIMARY_SC) && 1125 (msg->lun.flags & CTL_LUN_DISABLED) == 0) 1126 lun->flags |= CTL_LUN_PEER_SC_PRIMARY; 1127 else 1128 lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; 1129 if (oflags != lun->flags) 1130 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 1131 1132 /* If peer is primary and we are not -- use data */ 1133 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 1134 (lun->flags & CTL_LUN_PEER_SC_PRIMARY)) { 1135 lun->pr_generation = msg->lun.pr_generation; 1136 lun->pr_res_idx = msg->lun.pr_res_idx; 1137 lun->pr_res_type = msg->lun.pr_res_type; 1138 lun->pr_key_count = msg->lun.pr_key_count; 1139 for (k = 0; k < CTL_MAX_INITIATORS; k++) 1140 ctl_clr_prkey(lun, k); 1141 for (k = 0; k < msg->lun.pr_key_count; k++) { 1142 memcpy(&pr_key, &msg->lun.data[i], 1143 sizeof(pr_key)); 1144 ctl_alloc_prkey(lun, pr_key.pr_iid); 1145 ctl_set_prkey(lun, pr_key.pr_iid, 1146 pr_key.pr_key); 1147 i += sizeof(pr_key); 1148 } 1149 } 1150 1151 mtx_unlock(&lun->lun_lock); 1152 CTL_DEBUG_PRINT(("%s: Known LUN %d, peer is %s\n", 1153 __func__, targ_lun, 1154 (msg->lun.flags & CTL_LUN_PRIMARY_SC) ? 1155 "primary" : "secondary")); 1156 1157 /* If we are primary but peer doesn't know -- notify */ 1158 if ((lun->flags & CTL_LUN_PRIMARY_SC) && 1159 (msg->lun.flags & CTL_LUN_PEER_SC_PRIMARY) == 0) 1160 ctl_isc_announce_lun(lun); 1161 } 1162 } 1163 1164 static void 1165 ctl_isc_port_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1166 { 1167 struct ctl_port *port; 1168 struct ctl_lun *lun; 1169 int i, new; 1170 1171 port = softc->ctl_ports[msg->hdr.nexus.targ_port]; 1172 if (port == NULL) { 1173 CTL_DEBUG_PRINT(("%s: New port %d\n", __func__, 1174 msg->hdr.nexus.targ_port)); 1175 new = 1; 1176 port = malloc(sizeof(*port), M_CTL, M_WAITOK | M_ZERO); 1177 port->frontend = &ha_frontend; 1178 port->targ_port = msg->hdr.nexus.targ_port; 1179 port->fe_datamove = ctl_ha_datamove; 1180 port->fe_done = ctl_ha_done; 1181 } else if (port->frontend == &ha_frontend) { 1182 CTL_DEBUG_PRINT(("%s: Updated port %d\n", __func__, 1183 msg->hdr.nexus.targ_port)); 1184 new = 0; 1185 } else { 1186 printf("%s: Received conflicting HA port %d\n", 1187 __func__, msg->hdr.nexus.targ_port); 1188 return; 1189 } 1190 port->port_type = msg->port.port_type; 1191 port->physical_port = msg->port.physical_port; 1192 port->virtual_port = msg->port.virtual_port; 1193 port->status = msg->port.status; 1194 i = 0; 1195 free(port->port_name, M_CTL); 1196 port->port_name = strndup(&msg->port.data[i], msg->port.name_len, 1197 M_CTL); 1198 i += msg->port.name_len; 1199 if (msg->port.lun_map_len != 0) { 1200 if (port->lun_map == NULL || 1201 port->lun_map_size * sizeof(uint32_t) < 1202 msg->port.lun_map_len) { 1203 port->lun_map_size = 0; 1204 free(port->lun_map, M_CTL); 1205 port->lun_map = malloc(msg->port.lun_map_len, 1206 M_CTL, M_WAITOK); 1207 } 1208 memcpy(port->lun_map, &msg->port.data[i], msg->port.lun_map_len); 1209 port->lun_map_size = msg->port.lun_map_len / sizeof(uint32_t); 1210 i += msg->port.lun_map_len; 1211 } else { 1212 port->lun_map_size = 0; 1213 free(port->lun_map, M_CTL); 1214 port->lun_map = NULL; 1215 } 1216 if (msg->port.port_devid_len != 0) { 1217 if (port->port_devid == NULL || 1218 port->port_devid->len < msg->port.port_devid_len) { 1219 free(port->port_devid, M_CTL); 1220 port->port_devid = malloc(sizeof(struct ctl_devid) + 1221 msg->port.port_devid_len, M_CTL, M_WAITOK); 1222 } 1223 memcpy(port->port_devid->data, &msg->port.data[i], 1224 msg->port.port_devid_len); 1225 port->port_devid->len = msg->port.port_devid_len; 1226 i += msg->port.port_devid_len; 1227 } else { 1228 free(port->port_devid, M_CTL); 1229 port->port_devid = NULL; 1230 } 1231 if (msg->port.target_devid_len != 0) { 1232 if (port->target_devid == NULL || 1233 port->target_devid->len < msg->port.target_devid_len) { 1234 free(port->target_devid, M_CTL); 1235 port->target_devid = malloc(sizeof(struct ctl_devid) + 1236 msg->port.target_devid_len, M_CTL, M_WAITOK); 1237 } 1238 memcpy(port->target_devid->data, &msg->port.data[i], 1239 msg->port.target_devid_len); 1240 port->target_devid->len = msg->port.target_devid_len; 1241 i += msg->port.target_devid_len; 1242 } else { 1243 free(port->target_devid, M_CTL); 1244 port->target_devid = NULL; 1245 } 1246 if (msg->port.init_devid_len != 0) { 1247 if (port->init_devid == NULL || 1248 port->init_devid->len < msg->port.init_devid_len) { 1249 free(port->init_devid, M_CTL); 1250 port->init_devid = malloc(sizeof(struct ctl_devid) + 1251 msg->port.init_devid_len, M_CTL, M_WAITOK); 1252 } 1253 memcpy(port->init_devid->data, &msg->port.data[i], 1254 msg->port.init_devid_len); 1255 port->init_devid->len = msg->port.init_devid_len; 1256 i += msg->port.init_devid_len; 1257 } else { 1258 free(port->init_devid, M_CTL); 1259 port->init_devid = NULL; 1260 } 1261 if (new) { 1262 if (ctl_port_register(port) != 0) { 1263 printf("%s: ctl_port_register() failed with error\n", 1264 __func__); 1265 } 1266 } 1267 mtx_lock(&softc->ctl_lock); 1268 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1269 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 1270 continue; 1271 mtx_lock(&lun->lun_lock); 1272 ctl_est_ua_all(lun, -1, CTL_UA_INQ_CHANGE); 1273 mtx_unlock(&lun->lun_lock); 1274 } 1275 mtx_unlock(&softc->ctl_lock); 1276 } 1277 1278 static void 1279 ctl_isc_iid_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1280 { 1281 struct ctl_port *port; 1282 int iid; 1283 1284 port = softc->ctl_ports[msg->hdr.nexus.targ_port]; 1285 if (port == NULL) { 1286 printf("%s: Received IID for unknown port %d\n", 1287 __func__, msg->hdr.nexus.targ_port); 1288 return; 1289 } 1290 iid = msg->hdr.nexus.initid; 1291 port->wwpn_iid[iid].in_use = msg->iid.in_use; 1292 port->wwpn_iid[iid].wwpn = msg->iid.wwpn; 1293 free(port->wwpn_iid[iid].name, M_CTL); 1294 if (msg->iid.name_len) { 1295 port->wwpn_iid[iid].name = strndup(&msg->iid.data[0], 1296 msg->iid.name_len, M_CTL); 1297 } else 1298 port->wwpn_iid[iid].name = NULL; 1299 } 1300 1301 static void 1302 ctl_isc_login(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1303 { 1304 1305 if (msg->login.version != CTL_HA_VERSION) { 1306 printf("CTL HA peers have different versions %d != %d\n", 1307 msg->login.version, CTL_HA_VERSION); 1308 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1309 return; 1310 } 1311 if (msg->login.ha_mode != softc->ha_mode) { 1312 printf("CTL HA peers have different ha_mode %d != %d\n", 1313 msg->login.ha_mode, softc->ha_mode); 1314 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1315 return; 1316 } 1317 if (msg->login.ha_id == softc->ha_id) { 1318 printf("CTL HA peers have same ha_id %d\n", msg->login.ha_id); 1319 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1320 return; 1321 } 1322 if (msg->login.max_luns != CTL_MAX_LUNS || 1323 msg->login.max_ports != CTL_MAX_PORTS || 1324 msg->login.max_init_per_port != CTL_MAX_INIT_PER_PORT) { 1325 printf("CTL HA peers have different limits\n"); 1326 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1327 return; 1328 } 1329 } 1330 1331 static void 1332 ctl_isc_mode_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1333 { 1334 struct ctl_lun *lun; 1335 u_int i; 1336 uint32_t initidx, targ_lun; 1337 1338 targ_lun = msg->hdr.nexus.targ_mapped_lun; 1339 mtx_lock(&softc->ctl_lock); 1340 if (targ_lun >= CTL_MAX_LUNS || 1341 (lun = softc->ctl_luns[targ_lun]) == NULL) { 1342 mtx_unlock(&softc->ctl_lock); 1343 return; 1344 } 1345 mtx_lock(&lun->lun_lock); 1346 mtx_unlock(&softc->ctl_lock); 1347 if (lun->flags & CTL_LUN_DISABLED) { 1348 mtx_unlock(&lun->lun_lock); 1349 return; 1350 } 1351 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 1352 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) == 1353 msg->mode.page_code && 1354 lun->mode_pages.index[i].subpage == msg->mode.subpage) 1355 break; 1356 } 1357 if (i == CTL_NUM_MODE_PAGES) { 1358 mtx_unlock(&lun->lun_lock); 1359 return; 1360 } 1361 memcpy(lun->mode_pages.index[i].page_data, msg->mode.data, 1362 lun->mode_pages.index[i].page_len); 1363 initidx = ctl_get_initindex(&msg->hdr.nexus); 1364 if (initidx != -1) 1365 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 1366 mtx_unlock(&lun->lun_lock); 1367 } 1368 1369 /* 1370 * ISC (Inter Shelf Communication) event handler. Events from the HA 1371 * subsystem come in here. 1372 */ 1373 static void 1374 ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param) 1375 { 1376 struct ctl_softc *softc = control_softc; 1377 union ctl_io *io; 1378 struct ctl_prio *presio; 1379 ctl_ha_status isc_status; 1380 1381 CTL_DEBUG_PRINT(("CTL: Isc Msg event %d\n", event)); 1382 if (event == CTL_HA_EVT_MSG_RECV) { 1383 union ctl_ha_msg *msg, msgbuf; 1384 1385 if (param > sizeof(msgbuf)) 1386 msg = malloc(param, M_CTL, M_WAITOK); 1387 else 1388 msg = &msgbuf; 1389 isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, msg, param, 1390 M_WAITOK); 1391 if (isc_status != CTL_HA_STATUS_SUCCESS) { 1392 printf("%s: Error receiving message: %d\n", 1393 __func__, isc_status); 1394 if (msg != &msgbuf) 1395 free(msg, M_CTL); 1396 return; 1397 } 1398 1399 CTL_DEBUG_PRINT(("CTL: msg_type %d\n", msg->msg_type)); 1400 switch (msg->hdr.msg_type) { 1401 case CTL_MSG_SERIALIZE: 1402 io = ctl_alloc_io(softc->othersc_pool); 1403 ctl_zero_io(io); 1404 // populate ctsio from msg 1405 io->io_hdr.io_type = CTL_IO_SCSI; 1406 io->io_hdr.msg_type = CTL_MSG_SERIALIZE; 1407 io->io_hdr.original_sc = msg->hdr.original_sc; 1408 io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC | 1409 CTL_FLAG_IO_ACTIVE; 1410 /* 1411 * If we're in serialization-only mode, we don't 1412 * want to go through full done processing. Thus 1413 * the COPY flag. 1414 * 1415 * XXX KDM add another flag that is more specific. 1416 */ 1417 if (softc->ha_mode != CTL_HA_MODE_XFER) 1418 io->io_hdr.flags |= CTL_FLAG_INT_COPY; 1419 io->io_hdr.nexus = msg->hdr.nexus; 1420 #if 0 1421 printf("port %u, iid %u, lun %u\n", 1422 io->io_hdr.nexus.targ_port, 1423 io->io_hdr.nexus.initid, 1424 io->io_hdr.nexus.targ_lun); 1425 #endif 1426 io->scsiio.tag_num = msg->scsi.tag_num; 1427 io->scsiio.tag_type = msg->scsi.tag_type; 1428 #ifdef CTL_TIME_IO 1429 io->io_hdr.start_time = time_uptime; 1430 getbinuptime(&io->io_hdr.start_bt); 1431 #endif /* CTL_TIME_IO */ 1432 io->scsiio.cdb_len = msg->scsi.cdb_len; 1433 memcpy(io->scsiio.cdb, msg->scsi.cdb, 1434 CTL_MAX_CDBLEN); 1435 if (softc->ha_mode == CTL_HA_MODE_XFER) { 1436 const struct ctl_cmd_entry *entry; 1437 1438 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 1439 io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 1440 io->io_hdr.flags |= 1441 entry->flags & CTL_FLAG_DATA_MASK; 1442 } 1443 ctl_enqueue_isc(io); 1444 break; 1445 1446 /* Performed on the Originating SC, XFER mode only */ 1447 case CTL_MSG_DATAMOVE: { 1448 struct ctl_sg_entry *sgl; 1449 int i, j; 1450 1451 io = msg->hdr.original_sc; 1452 if (io == NULL) { 1453 printf("%s: original_sc == NULL!\n", __func__); 1454 /* XXX KDM do something here */ 1455 break; 1456 } 1457 io->io_hdr.msg_type = CTL_MSG_DATAMOVE; 1458 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1459 /* 1460 * Keep track of this, we need to send it back over 1461 * when the datamove is complete. 1462 */ 1463 io->io_hdr.serializing_sc = msg->hdr.serializing_sc; 1464 if (msg->hdr.status == CTL_SUCCESS) 1465 io->io_hdr.status = msg->hdr.status; 1466 1467 if (msg->dt.sg_sequence == 0) { 1468 #ifdef CTL_TIME_IO 1469 getbinuptime(&io->io_hdr.dma_start_bt); 1470 #endif 1471 i = msg->dt.kern_sg_entries + 1472 msg->dt.kern_data_len / 1473 CTL_HA_DATAMOVE_SEGMENT + 1; 1474 sgl = malloc(sizeof(*sgl) * i, M_CTL, 1475 M_WAITOK | M_ZERO); 1476 io->io_hdr.remote_sglist = sgl; 1477 io->io_hdr.local_sglist = 1478 &sgl[msg->dt.kern_sg_entries]; 1479 1480 io->scsiio.kern_data_ptr = (uint8_t *)sgl; 1481 1482 io->scsiio.kern_sg_entries = 1483 msg->dt.kern_sg_entries; 1484 io->scsiio.rem_sg_entries = 1485 msg->dt.kern_sg_entries; 1486 io->scsiio.kern_data_len = 1487 msg->dt.kern_data_len; 1488 io->scsiio.kern_total_len = 1489 msg->dt.kern_total_len; 1490 io->scsiio.kern_data_resid = 1491 msg->dt.kern_data_resid; 1492 io->scsiio.kern_rel_offset = 1493 msg->dt.kern_rel_offset; 1494 io->io_hdr.flags &= ~CTL_FLAG_BUS_ADDR; 1495 io->io_hdr.flags |= msg->dt.flags & 1496 CTL_FLAG_BUS_ADDR; 1497 } else 1498 sgl = (struct ctl_sg_entry *) 1499 io->scsiio.kern_data_ptr; 1500 1501 for (i = msg->dt.sent_sg_entries, j = 0; 1502 i < (msg->dt.sent_sg_entries + 1503 msg->dt.cur_sg_entries); i++, j++) { 1504 sgl[i].addr = msg->dt.sg_list[j].addr; 1505 sgl[i].len = msg->dt.sg_list[j].len; 1506 1507 #if 0 1508 printf("%s: DATAMOVE: %p,%lu j=%d, i=%d\n", 1509 __func__, sgl[i].addr, sgl[i].len, j, i); 1510 #endif 1511 } 1512 1513 /* 1514 * If this is the last piece of the I/O, we've got 1515 * the full S/G list. Queue processing in the thread. 1516 * Otherwise wait for the next piece. 1517 */ 1518 if (msg->dt.sg_last != 0) 1519 ctl_enqueue_isc(io); 1520 break; 1521 } 1522 /* Performed on the Serializing (primary) SC, XFER mode only */ 1523 case CTL_MSG_DATAMOVE_DONE: { 1524 if (msg->hdr.serializing_sc == NULL) { 1525 printf("%s: serializing_sc == NULL!\n", 1526 __func__); 1527 /* XXX KDM now what? */ 1528 break; 1529 } 1530 /* 1531 * We grab the sense information here in case 1532 * there was a failure, so we can return status 1533 * back to the initiator. 1534 */ 1535 io = msg->hdr.serializing_sc; 1536 io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 1537 io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; 1538 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1539 io->io_hdr.port_status = msg->scsi.port_status; 1540 io->scsiio.kern_data_resid = msg->scsi.kern_data_resid; 1541 if (msg->hdr.status != CTL_STATUS_NONE) { 1542 io->io_hdr.status = msg->hdr.status; 1543 io->scsiio.scsi_status = msg->scsi.scsi_status; 1544 io->scsiio.sense_len = msg->scsi.sense_len; 1545 memcpy(&io->scsiio.sense_data, 1546 &msg->scsi.sense_data, 1547 msg->scsi.sense_len); 1548 if (msg->hdr.status == CTL_SUCCESS) 1549 io->io_hdr.flags |= CTL_FLAG_STATUS_SENT; 1550 } 1551 ctl_enqueue_isc(io); 1552 break; 1553 } 1554 1555 /* Preformed on Originating SC, SER_ONLY mode */ 1556 case CTL_MSG_R2R: 1557 io = msg->hdr.original_sc; 1558 if (io == NULL) { 1559 printf("%s: original_sc == NULL!\n", 1560 __func__); 1561 break; 1562 } 1563 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1564 io->io_hdr.msg_type = CTL_MSG_R2R; 1565 io->io_hdr.serializing_sc = msg->hdr.serializing_sc; 1566 ctl_enqueue_isc(io); 1567 break; 1568 1569 /* 1570 * Performed on Serializing(i.e. primary SC) SC in SER_ONLY 1571 * mode. 1572 * Performed on the Originating (i.e. secondary) SC in XFER 1573 * mode 1574 */ 1575 case CTL_MSG_FINISH_IO: 1576 if (softc->ha_mode == CTL_HA_MODE_XFER) 1577 ctl_isc_handler_finish_xfer(softc, msg); 1578 else 1579 ctl_isc_handler_finish_ser_only(softc, msg); 1580 break; 1581 1582 /* Preformed on Originating SC */ 1583 case CTL_MSG_BAD_JUJU: 1584 io = msg->hdr.original_sc; 1585 if (io == NULL) { 1586 printf("%s: Bad JUJU!, original_sc is NULL!\n", 1587 __func__); 1588 break; 1589 } 1590 ctl_copy_sense_data(msg, io); 1591 /* 1592 * IO should have already been cleaned up on other 1593 * SC so clear this flag so we won't send a message 1594 * back to finish the IO there. 1595 */ 1596 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 1597 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1598 1599 /* io = msg->hdr.serializing_sc; */ 1600 io->io_hdr.msg_type = CTL_MSG_BAD_JUJU; 1601 ctl_enqueue_isc(io); 1602 break; 1603 1604 /* Handle resets sent from the other side */ 1605 case CTL_MSG_MANAGE_TASKS: { 1606 struct ctl_taskio *taskio; 1607 taskio = (struct ctl_taskio *)ctl_alloc_io( 1608 softc->othersc_pool); 1609 ctl_zero_io((union ctl_io *)taskio); 1610 taskio->io_hdr.io_type = CTL_IO_TASK; 1611 taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 1612 taskio->io_hdr.nexus = msg->hdr.nexus; 1613 taskio->task_action = msg->task.task_action; 1614 taskio->tag_num = msg->task.tag_num; 1615 taskio->tag_type = msg->task.tag_type; 1616 #ifdef CTL_TIME_IO 1617 taskio->io_hdr.start_time = time_uptime; 1618 getbinuptime(&taskio->io_hdr.start_bt); 1619 #endif /* CTL_TIME_IO */ 1620 ctl_run_task((union ctl_io *)taskio); 1621 break; 1622 } 1623 /* Persistent Reserve action which needs attention */ 1624 case CTL_MSG_PERS_ACTION: 1625 presio = (struct ctl_prio *)ctl_alloc_io( 1626 softc->othersc_pool); 1627 ctl_zero_io((union ctl_io *)presio); 1628 presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION; 1629 presio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 1630 presio->io_hdr.nexus = msg->hdr.nexus; 1631 presio->pr_msg = msg->pr; 1632 ctl_enqueue_isc((union ctl_io *)presio); 1633 break; 1634 case CTL_MSG_UA: 1635 ctl_isc_ua(softc, msg, param); 1636 break; 1637 case CTL_MSG_PORT_SYNC: 1638 ctl_isc_port_sync(softc, msg, param); 1639 break; 1640 case CTL_MSG_LUN_SYNC: 1641 ctl_isc_lun_sync(softc, msg, param); 1642 break; 1643 case CTL_MSG_IID_SYNC: 1644 ctl_isc_iid_sync(softc, msg, param); 1645 break; 1646 case CTL_MSG_LOGIN: 1647 ctl_isc_login(softc, msg, param); 1648 break; 1649 case CTL_MSG_MODE_SYNC: 1650 ctl_isc_mode_sync(softc, msg, param); 1651 break; 1652 default: 1653 printf("Received HA message of unknown type %d\n", 1654 msg->hdr.msg_type); 1655 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1656 break; 1657 } 1658 if (msg != &msgbuf) 1659 free(msg, M_CTL); 1660 } else if (event == CTL_HA_EVT_LINK_CHANGE) { 1661 printf("CTL: HA link status changed from %d to %d\n", 1662 softc->ha_link, param); 1663 if (param == softc->ha_link) 1664 return; 1665 if (softc->ha_link == CTL_HA_LINK_ONLINE) { 1666 softc->ha_link = param; 1667 ctl_isc_ha_link_down(softc); 1668 } else { 1669 softc->ha_link = param; 1670 if (softc->ha_link == CTL_HA_LINK_ONLINE) 1671 ctl_isc_ha_link_up(softc); 1672 } 1673 return; 1674 } else { 1675 printf("ctl_isc_event_handler: Unknown event %d\n", event); 1676 return; 1677 } 1678 } 1679 1680 static void 1681 ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest) 1682 { 1683 1684 memcpy(&dest->scsiio.sense_data, &src->scsi.sense_data, 1685 src->scsi.sense_len); 1686 dest->scsiio.scsi_status = src->scsi.scsi_status; 1687 dest->scsiio.sense_len = src->scsi.sense_len; 1688 dest->io_hdr.status = src->hdr.status; 1689 } 1690 1691 static void 1692 ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest) 1693 { 1694 1695 memcpy(&dest->scsi.sense_data, &src->scsiio.sense_data, 1696 src->scsiio.sense_len); 1697 dest->scsi.scsi_status = src->scsiio.scsi_status; 1698 dest->scsi.sense_len = src->scsiio.sense_len; 1699 dest->hdr.status = src->io_hdr.status; 1700 } 1701 1702 void 1703 ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 1704 { 1705 struct ctl_softc *softc = lun->ctl_softc; 1706 ctl_ua_type *pu; 1707 1708 if (initidx < softc->init_min || initidx >= softc->init_max) 1709 return; 1710 mtx_assert(&lun->lun_lock, MA_OWNED); 1711 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 1712 if (pu == NULL) 1713 return; 1714 pu[initidx % CTL_MAX_INIT_PER_PORT] |= ua; 1715 } 1716 1717 void 1718 ctl_est_ua_port(struct ctl_lun *lun, int port, uint32_t except, ctl_ua_type ua) 1719 { 1720 int i; 1721 1722 mtx_assert(&lun->lun_lock, MA_OWNED); 1723 if (lun->pending_ua[port] == NULL) 1724 return; 1725 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1726 if (port * CTL_MAX_INIT_PER_PORT + i == except) 1727 continue; 1728 lun->pending_ua[port][i] |= ua; 1729 } 1730 } 1731 1732 void 1733 ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 1734 { 1735 struct ctl_softc *softc = lun->ctl_softc; 1736 int i; 1737 1738 mtx_assert(&lun->lun_lock, MA_OWNED); 1739 for (i = softc->port_min; i < softc->port_max; i++) 1740 ctl_est_ua_port(lun, i, except, ua); 1741 } 1742 1743 void 1744 ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 1745 { 1746 struct ctl_softc *softc = lun->ctl_softc; 1747 ctl_ua_type *pu; 1748 1749 if (initidx < softc->init_min || initidx >= softc->init_max) 1750 return; 1751 mtx_assert(&lun->lun_lock, MA_OWNED); 1752 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 1753 if (pu == NULL) 1754 return; 1755 pu[initidx % CTL_MAX_INIT_PER_PORT] &= ~ua; 1756 } 1757 1758 void 1759 ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 1760 { 1761 struct ctl_softc *softc = lun->ctl_softc; 1762 int i, j; 1763 1764 mtx_assert(&lun->lun_lock, MA_OWNED); 1765 for (i = softc->port_min; i < softc->port_max; i++) { 1766 if (lun->pending_ua[i] == NULL) 1767 continue; 1768 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 1769 if (i * CTL_MAX_INIT_PER_PORT + j == except) 1770 continue; 1771 lun->pending_ua[i][j] &= ~ua; 1772 } 1773 } 1774 } 1775 1776 void 1777 ctl_clr_ua_allluns(struct ctl_softc *ctl_softc, uint32_t initidx, 1778 ctl_ua_type ua_type) 1779 { 1780 struct ctl_lun *lun; 1781 1782 mtx_assert(&ctl_softc->ctl_lock, MA_OWNED); 1783 STAILQ_FOREACH(lun, &ctl_softc->lun_list, links) { 1784 mtx_lock(&lun->lun_lock); 1785 ctl_clr_ua(lun, initidx, ua_type); 1786 mtx_unlock(&lun->lun_lock); 1787 } 1788 } 1789 1790 static int 1791 ctl_ha_role_sysctl(SYSCTL_HANDLER_ARGS) 1792 { 1793 struct ctl_softc *softc = (struct ctl_softc *)arg1; 1794 struct ctl_lun *lun; 1795 struct ctl_lun_req ireq; 1796 int error, value; 1797 1798 value = (softc->flags & CTL_FLAG_ACTIVE_SHELF) ? 0 : 1; 1799 error = sysctl_handle_int(oidp, &value, 0, req); 1800 if ((error != 0) || (req->newptr == NULL)) 1801 return (error); 1802 1803 mtx_lock(&softc->ctl_lock); 1804 if (value == 0) 1805 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1806 else 1807 softc->flags &= ~CTL_FLAG_ACTIVE_SHELF; 1808 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1809 mtx_unlock(&softc->ctl_lock); 1810 bzero(&ireq, sizeof(ireq)); 1811 ireq.reqtype = CTL_LUNREQ_MODIFY; 1812 ireq.reqdata.modify.lun_id = lun->lun; 1813 lun->backend->ioctl(NULL, CTL_LUN_REQ, (caddr_t)&ireq, 0, 1814 curthread); 1815 if (ireq.status != CTL_LUN_OK) { 1816 printf("%s: CTL_LUNREQ_MODIFY returned %d '%s'\n", 1817 __func__, ireq.status, ireq.error_str); 1818 } 1819 mtx_lock(&softc->ctl_lock); 1820 } 1821 mtx_unlock(&softc->ctl_lock); 1822 return (0); 1823 } 1824 1825 static int 1826 ctl_init(void) 1827 { 1828 struct make_dev_args args; 1829 struct ctl_softc *softc; 1830 int i, error; 1831 1832 softc = control_softc = malloc(sizeof(*control_softc), M_DEVBUF, 1833 M_WAITOK | M_ZERO); 1834 1835 make_dev_args_init(&args); 1836 args.mda_devsw = &ctl_cdevsw; 1837 args.mda_uid = UID_ROOT; 1838 args.mda_gid = GID_OPERATOR; 1839 args.mda_mode = 0600; 1840 args.mda_si_drv1 = softc; 1841 error = make_dev_s(&args, &softc->dev, "cam/ctl"); 1842 if (error != 0) { 1843 free(softc, M_DEVBUF); 1844 control_softc = NULL; 1845 return (error); 1846 } 1847 1848 sysctl_ctx_init(&softc->sysctl_ctx); 1849 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, 1850 SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl", 1851 CTLFLAG_RD, 0, "CAM Target Layer"); 1852 1853 if (softc->sysctl_tree == NULL) { 1854 printf("%s: unable to allocate sysctl tree\n", __func__); 1855 destroy_dev(softc->dev); 1856 free(softc, M_DEVBUF); 1857 control_softc = NULL; 1858 return (ENOMEM); 1859 } 1860 1861 mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF); 1862 softc->io_zone = uma_zcreate("CTL IO", sizeof(union ctl_io), 1863 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 1864 softc->flags = 0; 1865 1866 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1867 OID_AUTO, "ha_mode", CTLFLAG_RDTUN, (int *)&softc->ha_mode, 0, 1868 "HA mode (0 - act/stby, 1 - serialize only, 2 - xfer)"); 1869 1870 /* 1871 * In Copan's HA scheme, the "master" and "slave" roles are 1872 * figured out through the slot the controller is in. Although it 1873 * is an active/active system, someone has to be in charge. 1874 */ 1875 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1876 OID_AUTO, "ha_id", CTLFLAG_RDTUN, &softc->ha_id, 0, 1877 "HA head ID (0 - no HA)"); 1878 if (softc->ha_id == 0 || softc->ha_id > NUM_HA_SHELVES) { 1879 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1880 softc->is_single = 1; 1881 softc->port_cnt = CTL_MAX_PORTS; 1882 softc->port_min = 0; 1883 } else { 1884 softc->port_cnt = CTL_MAX_PORTS / NUM_HA_SHELVES; 1885 softc->port_min = (softc->ha_id - 1) * softc->port_cnt; 1886 } 1887 softc->port_max = softc->port_min + softc->port_cnt; 1888 softc->init_min = softc->port_min * CTL_MAX_INIT_PER_PORT; 1889 softc->init_max = softc->port_max * CTL_MAX_INIT_PER_PORT; 1890 1891 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1892 OID_AUTO, "ha_link", CTLFLAG_RD, (int *)&softc->ha_link, 0, 1893 "HA link state (0 - offline, 1 - unknown, 2 - online)"); 1894 1895 STAILQ_INIT(&softc->lun_list); 1896 STAILQ_INIT(&softc->pending_lun_queue); 1897 STAILQ_INIT(&softc->fe_list); 1898 STAILQ_INIT(&softc->port_list); 1899 STAILQ_INIT(&softc->be_list); 1900 ctl_tpc_init(softc); 1901 1902 if (worker_threads <= 0) 1903 worker_threads = max(1, mp_ncpus / 4); 1904 if (worker_threads > CTL_MAX_THREADS) 1905 worker_threads = CTL_MAX_THREADS; 1906 1907 for (i = 0; i < worker_threads; i++) { 1908 struct ctl_thread *thr = &softc->threads[i]; 1909 1910 mtx_init(&thr->queue_lock, "CTL queue mutex", NULL, MTX_DEF); 1911 thr->ctl_softc = softc; 1912 STAILQ_INIT(&thr->incoming_queue); 1913 STAILQ_INIT(&thr->rtr_queue); 1914 STAILQ_INIT(&thr->done_queue); 1915 STAILQ_INIT(&thr->isc_queue); 1916 1917 error = kproc_kthread_add(ctl_work_thread, thr, 1918 &softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i); 1919 if (error != 0) { 1920 printf("error creating CTL work thread!\n"); 1921 return (error); 1922 } 1923 } 1924 error = kproc_kthread_add(ctl_lun_thread, softc, 1925 &softc->ctl_proc, &softc->lun_thread, 0, 0, "ctl", "lun"); 1926 if (error != 0) { 1927 printf("error creating CTL lun thread!\n"); 1928 return (error); 1929 } 1930 error = kproc_kthread_add(ctl_thresh_thread, softc, 1931 &softc->ctl_proc, &softc->thresh_thread, 0, 0, "ctl", "thresh"); 1932 if (error != 0) { 1933 printf("error creating CTL threshold thread!\n"); 1934 return (error); 1935 } 1936 1937 SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree), 1938 OID_AUTO, "ha_role", CTLTYPE_INT | CTLFLAG_RWTUN, 1939 softc, 0, ctl_ha_role_sysctl, "I", "HA role for this head"); 1940 1941 if (softc->is_single == 0) { 1942 if (ctl_frontend_register(&ha_frontend) != 0) 1943 softc->is_single = 1; 1944 } 1945 return (0); 1946 } 1947 1948 static int 1949 ctl_shutdown(void) 1950 { 1951 struct ctl_softc *softc = control_softc; 1952 int i; 1953 1954 if (softc->is_single == 0) 1955 ctl_frontend_deregister(&ha_frontend); 1956 1957 destroy_dev(softc->dev); 1958 1959 /* Shutdown CTL threads. */ 1960 softc->shutdown = 1; 1961 for (i = 0; i < worker_threads; i++) { 1962 struct ctl_thread *thr = &softc->threads[i]; 1963 while (thr->thread != NULL) { 1964 wakeup(thr); 1965 if (thr->thread != NULL) 1966 pause("CTL thr shutdown", 1); 1967 } 1968 mtx_destroy(&thr->queue_lock); 1969 } 1970 while (softc->lun_thread != NULL) { 1971 wakeup(&softc->pending_lun_queue); 1972 if (softc->lun_thread != NULL) 1973 pause("CTL thr shutdown", 1); 1974 } 1975 while (softc->thresh_thread != NULL) { 1976 wakeup(softc->thresh_thread); 1977 if (softc->thresh_thread != NULL) 1978 pause("CTL thr shutdown", 1); 1979 } 1980 1981 ctl_tpc_shutdown(softc); 1982 uma_zdestroy(softc->io_zone); 1983 mtx_destroy(&softc->ctl_lock); 1984 1985 sysctl_ctx_free(&softc->sysctl_ctx); 1986 1987 free(softc, M_DEVBUF); 1988 control_softc = NULL; 1989 return (0); 1990 } 1991 1992 static int 1993 ctl_module_event_handler(module_t mod, int what, void *arg) 1994 { 1995 1996 switch (what) { 1997 case MOD_LOAD: 1998 return (ctl_init()); 1999 case MOD_UNLOAD: 2000 return (ctl_shutdown()); 2001 default: 2002 return (EOPNOTSUPP); 2003 } 2004 } 2005 2006 /* 2007 * XXX KDM should we do some access checks here? Bump a reference count to 2008 * prevent a CTL module from being unloaded while someone has it open? 2009 */ 2010 static int 2011 ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td) 2012 { 2013 return (0); 2014 } 2015 2016 static int 2017 ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td) 2018 { 2019 return (0); 2020 } 2021 2022 /* 2023 * Remove an initiator by port number and initiator ID. 2024 * Returns 0 for success, -1 for failure. 2025 */ 2026 int 2027 ctl_remove_initiator(struct ctl_port *port, int iid) 2028 { 2029 struct ctl_softc *softc = port->ctl_softc; 2030 2031 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 2032 2033 if (iid > CTL_MAX_INIT_PER_PORT) { 2034 printf("%s: initiator ID %u > maximun %u!\n", 2035 __func__, iid, CTL_MAX_INIT_PER_PORT); 2036 return (-1); 2037 } 2038 2039 mtx_lock(&softc->ctl_lock); 2040 port->wwpn_iid[iid].in_use--; 2041 port->wwpn_iid[iid].last_use = time_uptime; 2042 mtx_unlock(&softc->ctl_lock); 2043 ctl_isc_announce_iid(port, iid); 2044 2045 return (0); 2046 } 2047 2048 /* 2049 * Add an initiator to the initiator map. 2050 * Returns iid for success, < 0 for failure. 2051 */ 2052 int 2053 ctl_add_initiator(struct ctl_port *port, int iid, uint64_t wwpn, char *name) 2054 { 2055 struct ctl_softc *softc = port->ctl_softc; 2056 time_t best_time; 2057 int i, best; 2058 2059 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 2060 2061 if (iid >= CTL_MAX_INIT_PER_PORT) { 2062 printf("%s: WWPN %#jx initiator ID %u > maximum %u!\n", 2063 __func__, wwpn, iid, CTL_MAX_INIT_PER_PORT); 2064 free(name, M_CTL); 2065 return (-1); 2066 } 2067 2068 mtx_lock(&softc->ctl_lock); 2069 2070 if (iid < 0 && (wwpn != 0 || name != NULL)) { 2071 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 2072 if (wwpn != 0 && wwpn == port->wwpn_iid[i].wwpn) { 2073 iid = i; 2074 break; 2075 } 2076 if (name != NULL && port->wwpn_iid[i].name != NULL && 2077 strcmp(name, port->wwpn_iid[i].name) == 0) { 2078 iid = i; 2079 break; 2080 } 2081 } 2082 } 2083 2084 if (iid < 0) { 2085 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 2086 if (port->wwpn_iid[i].in_use == 0 && 2087 port->wwpn_iid[i].wwpn == 0 && 2088 port->wwpn_iid[i].name == NULL) { 2089 iid = i; 2090 break; 2091 } 2092 } 2093 } 2094 2095 if (iid < 0) { 2096 best = -1; 2097 best_time = INT32_MAX; 2098 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 2099 if (port->wwpn_iid[i].in_use == 0) { 2100 if (port->wwpn_iid[i].last_use < best_time) { 2101 best = i; 2102 best_time = port->wwpn_iid[i].last_use; 2103 } 2104 } 2105 } 2106 iid = best; 2107 } 2108 2109 if (iid < 0) { 2110 mtx_unlock(&softc->ctl_lock); 2111 free(name, M_CTL); 2112 return (-2); 2113 } 2114 2115 if (port->wwpn_iid[iid].in_use > 0 && (wwpn != 0 || name != NULL)) { 2116 /* 2117 * This is not an error yet. 2118 */ 2119 if (wwpn != 0 && wwpn == port->wwpn_iid[iid].wwpn) { 2120 #if 0 2121 printf("%s: port %d iid %u WWPN %#jx arrived" 2122 " again\n", __func__, port->targ_port, 2123 iid, (uintmax_t)wwpn); 2124 #endif 2125 goto take; 2126 } 2127 if (name != NULL && port->wwpn_iid[iid].name != NULL && 2128 strcmp(name, port->wwpn_iid[iid].name) == 0) { 2129 #if 0 2130 printf("%s: port %d iid %u name '%s' arrived" 2131 " again\n", __func__, port->targ_port, 2132 iid, name); 2133 #endif 2134 goto take; 2135 } 2136 2137 /* 2138 * This is an error, but what do we do about it? The 2139 * driver is telling us we have a new WWPN for this 2140 * initiator ID, so we pretty much need to use it. 2141 */ 2142 printf("%s: port %d iid %u WWPN %#jx '%s' arrived," 2143 " but WWPN %#jx '%s' is still at that address\n", 2144 __func__, port->targ_port, iid, wwpn, name, 2145 (uintmax_t)port->wwpn_iid[iid].wwpn, 2146 port->wwpn_iid[iid].name); 2147 2148 /* 2149 * XXX KDM clear have_ca and ua_pending on each LUN for 2150 * this initiator. 2151 */ 2152 } 2153 take: 2154 free(port->wwpn_iid[iid].name, M_CTL); 2155 port->wwpn_iid[iid].name = name; 2156 port->wwpn_iid[iid].wwpn = wwpn; 2157 port->wwpn_iid[iid].in_use++; 2158 mtx_unlock(&softc->ctl_lock); 2159 ctl_isc_announce_iid(port, iid); 2160 2161 return (iid); 2162 } 2163 2164 static int 2165 ctl_create_iid(struct ctl_port *port, int iid, uint8_t *buf) 2166 { 2167 int len; 2168 2169 switch (port->port_type) { 2170 case CTL_PORT_FC: 2171 { 2172 struct scsi_transportid_fcp *id = 2173 (struct scsi_transportid_fcp *)buf; 2174 if (port->wwpn_iid[iid].wwpn == 0) 2175 return (0); 2176 memset(id, 0, sizeof(*id)); 2177 id->format_protocol = SCSI_PROTO_FC; 2178 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->n_port_name); 2179 return (sizeof(*id)); 2180 } 2181 case CTL_PORT_ISCSI: 2182 { 2183 struct scsi_transportid_iscsi_port *id = 2184 (struct scsi_transportid_iscsi_port *)buf; 2185 if (port->wwpn_iid[iid].name == NULL) 2186 return (0); 2187 memset(id, 0, 256); 2188 id->format_protocol = SCSI_TRN_ISCSI_FORMAT_PORT | 2189 SCSI_PROTO_ISCSI; 2190 len = strlcpy(id->iscsi_name, port->wwpn_iid[iid].name, 252) + 1; 2191 len = roundup2(min(len, 252), 4); 2192 scsi_ulto2b(len, id->additional_length); 2193 return (sizeof(*id) + len); 2194 } 2195 case CTL_PORT_SAS: 2196 { 2197 struct scsi_transportid_sas *id = 2198 (struct scsi_transportid_sas *)buf; 2199 if (port->wwpn_iid[iid].wwpn == 0) 2200 return (0); 2201 memset(id, 0, sizeof(*id)); 2202 id->format_protocol = SCSI_PROTO_SAS; 2203 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->sas_address); 2204 return (sizeof(*id)); 2205 } 2206 default: 2207 { 2208 struct scsi_transportid_spi *id = 2209 (struct scsi_transportid_spi *)buf; 2210 memset(id, 0, sizeof(*id)); 2211 id->format_protocol = SCSI_PROTO_SPI; 2212 scsi_ulto2b(iid, id->scsi_addr); 2213 scsi_ulto2b(port->targ_port, id->rel_trgt_port_id); 2214 return (sizeof(*id)); 2215 } 2216 } 2217 } 2218 2219 /* 2220 * Serialize a command that went down the "wrong" side, and so was sent to 2221 * this controller for execution. The logic is a little different than the 2222 * standard case in ctl_scsiio_precheck(). Errors in this case need to get 2223 * sent back to the other side, but in the success case, we execute the 2224 * command on this side (XFER mode) or tell the other side to execute it 2225 * (SER_ONLY mode). 2226 */ 2227 static void 2228 ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio) 2229 { 2230 struct ctl_softc *softc = CTL_SOFTC(ctsio); 2231 struct ctl_port *port = CTL_PORT(ctsio); 2232 union ctl_ha_msg msg_info; 2233 struct ctl_lun *lun; 2234 const struct ctl_cmd_entry *entry; 2235 uint32_t targ_lun; 2236 2237 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 2238 2239 /* Make sure that we know about this port. */ 2240 if (port == NULL || (port->status & CTL_PORT_STATUS_ONLINE) == 0) { 2241 ctl_set_internal_failure(ctsio, /*sks_valid*/ 0, 2242 /*retry_count*/ 1); 2243 goto badjuju; 2244 } 2245 2246 /* Make sure that we know about this LUN. */ 2247 mtx_lock(&softc->ctl_lock); 2248 if (targ_lun >= CTL_MAX_LUNS || 2249 (lun = softc->ctl_luns[targ_lun]) == NULL) { 2250 mtx_unlock(&softc->ctl_lock); 2251 2252 /* 2253 * The other node would not send this request to us unless 2254 * received announce that we are primary node for this LUN. 2255 * If this LUN does not exist now, it is probably result of 2256 * a race, so respond to initiator in the most opaque way. 2257 */ 2258 ctl_set_busy(ctsio); 2259 goto badjuju; 2260 } 2261 mtx_lock(&lun->lun_lock); 2262 mtx_unlock(&softc->ctl_lock); 2263 2264 /* 2265 * If the LUN is invalid, pretend that it doesn't exist. 2266 * It will go away as soon as all pending I/Os completed. 2267 */ 2268 if (lun->flags & CTL_LUN_DISABLED) { 2269 mtx_unlock(&lun->lun_lock); 2270 ctl_set_busy(ctsio); 2271 goto badjuju; 2272 } 2273 2274 entry = ctl_get_cmd_entry(ctsio, NULL); 2275 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { 2276 mtx_unlock(&lun->lun_lock); 2277 goto badjuju; 2278 } 2279 2280 CTL_LUN(ctsio) = lun; 2281 CTL_BACKEND_LUN(ctsio) = lun->be_lun; 2282 2283 /* 2284 * Every I/O goes into the OOA queue for a 2285 * particular LUN, and stays there until completion. 2286 */ 2287 #ifdef CTL_TIME_IO 2288 if (TAILQ_EMPTY(&lun->ooa_queue)) 2289 lun->idle_time += getsbinuptime() - lun->last_busy; 2290 #endif 2291 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2292 2293 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 2294 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq, 2295 ooa_links))) { 2296 case CTL_ACTION_BLOCK: 2297 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 2298 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 2299 blocked_links); 2300 mtx_unlock(&lun->lun_lock); 2301 break; 2302 case CTL_ACTION_PASS: 2303 case CTL_ACTION_SKIP: 2304 if (softc->ha_mode == CTL_HA_MODE_XFER) { 2305 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 2306 ctl_enqueue_rtr((union ctl_io *)ctsio); 2307 mtx_unlock(&lun->lun_lock); 2308 } else { 2309 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 2310 mtx_unlock(&lun->lun_lock); 2311 2312 /* send msg back to other side */ 2313 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 2314 msg_info.hdr.serializing_sc = (union ctl_io *)ctsio; 2315 msg_info.hdr.msg_type = CTL_MSG_R2R; 2316 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 2317 sizeof(msg_info.hdr), M_WAITOK); 2318 } 2319 break; 2320 case CTL_ACTION_OVERLAP: 2321 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2322 mtx_unlock(&lun->lun_lock); 2323 ctl_set_overlapped_cmd(ctsio); 2324 goto badjuju; 2325 case CTL_ACTION_OVERLAP_TAG: 2326 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2327 mtx_unlock(&lun->lun_lock); 2328 ctl_set_overlapped_tag(ctsio, ctsio->tag_num); 2329 goto badjuju; 2330 case CTL_ACTION_ERROR: 2331 default: 2332 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2333 mtx_unlock(&lun->lun_lock); 2334 2335 ctl_set_internal_failure(ctsio, /*sks_valid*/ 0, 2336 /*retry_count*/ 0); 2337 badjuju: 2338 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); 2339 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 2340 msg_info.hdr.serializing_sc = NULL; 2341 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 2342 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 2343 sizeof(msg_info.scsi), M_WAITOK); 2344 ctl_free_io((union ctl_io *)ctsio); 2345 break; 2346 } 2347 } 2348 2349 /* 2350 * Returns 0 for success, errno for failure. 2351 */ 2352 static void 2353 ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 2354 struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries) 2355 { 2356 union ctl_io *io; 2357 2358 mtx_lock(&lun->lun_lock); 2359 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); (io != NULL); 2360 (*cur_fill_num)++, io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, 2361 ooa_links)) { 2362 struct ctl_ooa_entry *entry; 2363 2364 /* 2365 * If we've got more than we can fit, just count the 2366 * remaining entries. 2367 */ 2368 if (*cur_fill_num >= ooa_hdr->alloc_num) 2369 continue; 2370 2371 entry = &kern_entries[*cur_fill_num]; 2372 2373 entry->tag_num = io->scsiio.tag_num; 2374 entry->lun_num = lun->lun; 2375 #ifdef CTL_TIME_IO 2376 entry->start_bt = io->io_hdr.start_bt; 2377 #endif 2378 bcopy(io->scsiio.cdb, entry->cdb, io->scsiio.cdb_len); 2379 entry->cdb_len = io->scsiio.cdb_len; 2380 if (io->io_hdr.flags & CTL_FLAG_BLOCKED) 2381 entry->cmd_flags |= CTL_OOACMD_FLAG_BLOCKED; 2382 2383 if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) 2384 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA; 2385 2386 if (io->io_hdr.flags & CTL_FLAG_ABORT) 2387 entry->cmd_flags |= CTL_OOACMD_FLAG_ABORT; 2388 2389 if (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR) 2390 entry->cmd_flags |= CTL_OOACMD_FLAG_RTR; 2391 2392 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) 2393 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED; 2394 } 2395 mtx_unlock(&lun->lun_lock); 2396 } 2397 2398 static void * 2399 ctl_copyin_alloc(void *user_addr, unsigned int len, char *error_str, 2400 size_t error_str_len) 2401 { 2402 void *kptr; 2403 2404 kptr = malloc(len, M_CTL, M_WAITOK | M_ZERO); 2405 2406 if (copyin(user_addr, kptr, len) != 0) { 2407 snprintf(error_str, error_str_len, "Error copying %d bytes " 2408 "from user address %p to kernel address %p", len, 2409 user_addr, kptr); 2410 free(kptr, M_CTL); 2411 return (NULL); 2412 } 2413 2414 return (kptr); 2415 } 2416 2417 static void 2418 ctl_free_args(int num_args, struct ctl_be_arg *args) 2419 { 2420 int i; 2421 2422 if (args == NULL) 2423 return; 2424 2425 for (i = 0; i < num_args; i++) { 2426 free(args[i].kname, M_CTL); 2427 free(args[i].kvalue, M_CTL); 2428 } 2429 2430 free(args, M_CTL); 2431 } 2432 2433 static struct ctl_be_arg * 2434 ctl_copyin_args(int num_args, struct ctl_be_arg *uargs, 2435 char *error_str, size_t error_str_len) 2436 { 2437 struct ctl_be_arg *args; 2438 int i; 2439 2440 args = ctl_copyin_alloc(uargs, num_args * sizeof(*args), 2441 error_str, error_str_len); 2442 2443 if (args == NULL) 2444 goto bailout; 2445 2446 for (i = 0; i < num_args; i++) { 2447 args[i].kname = NULL; 2448 args[i].kvalue = NULL; 2449 } 2450 2451 for (i = 0; i < num_args; i++) { 2452 uint8_t *tmpptr; 2453 2454 if (args[i].namelen == 0) { 2455 snprintf(error_str, error_str_len, "Argument %d " 2456 "name length is zero", i); 2457 goto bailout; 2458 } 2459 2460 args[i].kname = ctl_copyin_alloc(args[i].name, 2461 args[i].namelen, error_str, error_str_len); 2462 if (args[i].kname == NULL) 2463 goto bailout; 2464 2465 if (args[i].kname[args[i].namelen - 1] != '\0') { 2466 snprintf(error_str, error_str_len, "Argument %d " 2467 "name is not NUL-terminated", i); 2468 goto bailout; 2469 } 2470 2471 if (args[i].flags & CTL_BEARG_RD) { 2472 if (args[i].vallen == 0) { 2473 snprintf(error_str, error_str_len, "Argument %d " 2474 "value length is zero", i); 2475 goto bailout; 2476 } 2477 2478 tmpptr = ctl_copyin_alloc(args[i].value, 2479 args[i].vallen, error_str, error_str_len); 2480 if (tmpptr == NULL) 2481 goto bailout; 2482 2483 if ((args[i].flags & CTL_BEARG_ASCII) 2484 && (tmpptr[args[i].vallen - 1] != '\0')) { 2485 snprintf(error_str, error_str_len, "Argument " 2486 "%d value is not NUL-terminated", i); 2487 free(tmpptr, M_CTL); 2488 goto bailout; 2489 } 2490 args[i].kvalue = tmpptr; 2491 } else { 2492 args[i].kvalue = malloc(args[i].vallen, 2493 M_CTL, M_WAITOK | M_ZERO); 2494 } 2495 } 2496 2497 return (args); 2498 bailout: 2499 2500 ctl_free_args(num_args, args); 2501 2502 return (NULL); 2503 } 2504 2505 static void 2506 ctl_copyout_args(int num_args, struct ctl_be_arg *args) 2507 { 2508 int i; 2509 2510 for (i = 0; i < num_args; i++) { 2511 if (args[i].flags & CTL_BEARG_WR) 2512 copyout(args[i].kvalue, args[i].value, args[i].vallen); 2513 } 2514 } 2515 2516 /* 2517 * Escape characters that are illegal or not recommended in XML. 2518 */ 2519 int 2520 ctl_sbuf_printf_esc(struct sbuf *sb, char *str, int size) 2521 { 2522 char *end = str + size; 2523 int retval; 2524 2525 retval = 0; 2526 2527 for (; *str && str < end; str++) { 2528 switch (*str) { 2529 case '&': 2530 retval = sbuf_printf(sb, "&"); 2531 break; 2532 case '>': 2533 retval = sbuf_printf(sb, ">"); 2534 break; 2535 case '<': 2536 retval = sbuf_printf(sb, "<"); 2537 break; 2538 default: 2539 retval = sbuf_putc(sb, *str); 2540 break; 2541 } 2542 2543 if (retval != 0) 2544 break; 2545 2546 } 2547 2548 return (retval); 2549 } 2550 2551 static void 2552 ctl_id_sbuf(struct ctl_devid *id, struct sbuf *sb) 2553 { 2554 struct scsi_vpd_id_descriptor *desc; 2555 int i; 2556 2557 if (id == NULL || id->len < 4) 2558 return; 2559 desc = (struct scsi_vpd_id_descriptor *)id->data; 2560 switch (desc->id_type & SVPD_ID_TYPE_MASK) { 2561 case SVPD_ID_TYPE_T10: 2562 sbuf_printf(sb, "t10."); 2563 break; 2564 case SVPD_ID_TYPE_EUI64: 2565 sbuf_printf(sb, "eui."); 2566 break; 2567 case SVPD_ID_TYPE_NAA: 2568 sbuf_printf(sb, "naa."); 2569 break; 2570 case SVPD_ID_TYPE_SCSI_NAME: 2571 break; 2572 } 2573 switch (desc->proto_codeset & SVPD_ID_CODESET_MASK) { 2574 case SVPD_ID_CODESET_BINARY: 2575 for (i = 0; i < desc->length; i++) 2576 sbuf_printf(sb, "%02x", desc->identifier[i]); 2577 break; 2578 case SVPD_ID_CODESET_ASCII: 2579 sbuf_printf(sb, "%.*s", (int)desc->length, 2580 (char *)desc->identifier); 2581 break; 2582 case SVPD_ID_CODESET_UTF8: 2583 sbuf_printf(sb, "%s", (char *)desc->identifier); 2584 break; 2585 } 2586 } 2587 2588 static int 2589 ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 2590 struct thread *td) 2591 { 2592 struct ctl_softc *softc = dev->si_drv1; 2593 struct ctl_port *port; 2594 struct ctl_lun *lun; 2595 int retval; 2596 2597 retval = 0; 2598 2599 switch (cmd) { 2600 case CTL_IO: 2601 retval = ctl_ioctl_io(dev, cmd, addr, flag, td); 2602 break; 2603 case CTL_ENABLE_PORT: 2604 case CTL_DISABLE_PORT: 2605 case CTL_SET_PORT_WWNS: { 2606 struct ctl_port *port; 2607 struct ctl_port_entry *entry; 2608 2609 entry = (struct ctl_port_entry *)addr; 2610 2611 mtx_lock(&softc->ctl_lock); 2612 STAILQ_FOREACH(port, &softc->port_list, links) { 2613 int action, done; 2614 2615 if (port->targ_port < softc->port_min || 2616 port->targ_port >= softc->port_max) 2617 continue; 2618 2619 action = 0; 2620 done = 0; 2621 if ((entry->port_type == CTL_PORT_NONE) 2622 && (entry->targ_port == port->targ_port)) { 2623 /* 2624 * If the user only wants to enable or 2625 * disable or set WWNs on a specific port, 2626 * do the operation and we're done. 2627 */ 2628 action = 1; 2629 done = 1; 2630 } else if (entry->port_type & port->port_type) { 2631 /* 2632 * Compare the user's type mask with the 2633 * particular frontend type to see if we 2634 * have a match. 2635 */ 2636 action = 1; 2637 done = 0; 2638 2639 /* 2640 * Make sure the user isn't trying to set 2641 * WWNs on multiple ports at the same time. 2642 */ 2643 if (cmd == CTL_SET_PORT_WWNS) { 2644 printf("%s: Can't set WWNs on " 2645 "multiple ports\n", __func__); 2646 retval = EINVAL; 2647 break; 2648 } 2649 } 2650 if (action == 0) 2651 continue; 2652 2653 /* 2654 * XXX KDM we have to drop the lock here, because 2655 * the online/offline operations can potentially 2656 * block. We need to reference count the frontends 2657 * so they can't go away, 2658 */ 2659 if (cmd == CTL_ENABLE_PORT) { 2660 mtx_unlock(&softc->ctl_lock); 2661 ctl_port_online(port); 2662 mtx_lock(&softc->ctl_lock); 2663 } else if (cmd == CTL_DISABLE_PORT) { 2664 mtx_unlock(&softc->ctl_lock); 2665 ctl_port_offline(port); 2666 mtx_lock(&softc->ctl_lock); 2667 } else if (cmd == CTL_SET_PORT_WWNS) { 2668 ctl_port_set_wwns(port, 2669 (entry->flags & CTL_PORT_WWNN_VALID) ? 2670 1 : 0, entry->wwnn, 2671 (entry->flags & CTL_PORT_WWPN_VALID) ? 2672 1 : 0, entry->wwpn); 2673 } 2674 if (done != 0) 2675 break; 2676 } 2677 mtx_unlock(&softc->ctl_lock); 2678 break; 2679 } 2680 case CTL_GET_OOA: { 2681 struct ctl_ooa *ooa_hdr; 2682 struct ctl_ooa_entry *entries; 2683 uint32_t cur_fill_num; 2684 2685 ooa_hdr = (struct ctl_ooa *)addr; 2686 2687 if ((ooa_hdr->alloc_len == 0) 2688 || (ooa_hdr->alloc_num == 0)) { 2689 printf("%s: CTL_GET_OOA: alloc len %u and alloc num %u " 2690 "must be non-zero\n", __func__, 2691 ooa_hdr->alloc_len, ooa_hdr->alloc_num); 2692 retval = EINVAL; 2693 break; 2694 } 2695 2696 if (ooa_hdr->alloc_len != (ooa_hdr->alloc_num * 2697 sizeof(struct ctl_ooa_entry))) { 2698 printf("%s: CTL_GET_OOA: alloc len %u must be alloc " 2699 "num %d * sizeof(struct ctl_ooa_entry) %zd\n", 2700 __func__, ooa_hdr->alloc_len, 2701 ooa_hdr->alloc_num,sizeof(struct ctl_ooa_entry)); 2702 retval = EINVAL; 2703 break; 2704 } 2705 2706 entries = malloc(ooa_hdr->alloc_len, M_CTL, M_WAITOK | M_ZERO); 2707 if (entries == NULL) { 2708 printf("%s: could not allocate %d bytes for OOA " 2709 "dump\n", __func__, ooa_hdr->alloc_len); 2710 retval = ENOMEM; 2711 break; 2712 } 2713 2714 mtx_lock(&softc->ctl_lock); 2715 if ((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0 && 2716 (ooa_hdr->lun_num >= CTL_MAX_LUNS || 2717 softc->ctl_luns[ooa_hdr->lun_num] == NULL)) { 2718 mtx_unlock(&softc->ctl_lock); 2719 free(entries, M_CTL); 2720 printf("%s: CTL_GET_OOA: invalid LUN %ju\n", 2721 __func__, (uintmax_t)ooa_hdr->lun_num); 2722 retval = EINVAL; 2723 break; 2724 } 2725 2726 cur_fill_num = 0; 2727 2728 if (ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) { 2729 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2730 ctl_ioctl_fill_ooa(lun, &cur_fill_num, 2731 ooa_hdr, entries); 2732 } 2733 } else { 2734 lun = softc->ctl_luns[ooa_hdr->lun_num]; 2735 ctl_ioctl_fill_ooa(lun, &cur_fill_num, ooa_hdr, 2736 entries); 2737 } 2738 mtx_unlock(&softc->ctl_lock); 2739 2740 ooa_hdr->fill_num = min(cur_fill_num, ooa_hdr->alloc_num); 2741 ooa_hdr->fill_len = ooa_hdr->fill_num * 2742 sizeof(struct ctl_ooa_entry); 2743 retval = copyout(entries, ooa_hdr->entries, ooa_hdr->fill_len); 2744 if (retval != 0) { 2745 printf("%s: error copying out %d bytes for OOA dump\n", 2746 __func__, ooa_hdr->fill_len); 2747 } 2748 2749 getbinuptime(&ooa_hdr->cur_bt); 2750 2751 if (cur_fill_num > ooa_hdr->alloc_num) { 2752 ooa_hdr->dropped_num = cur_fill_num -ooa_hdr->alloc_num; 2753 ooa_hdr->status = CTL_OOA_NEED_MORE_SPACE; 2754 } else { 2755 ooa_hdr->dropped_num = 0; 2756 ooa_hdr->status = CTL_OOA_OK; 2757 } 2758 2759 free(entries, M_CTL); 2760 break; 2761 } 2762 case CTL_DELAY_IO: { 2763 struct ctl_io_delay_info *delay_info; 2764 2765 delay_info = (struct ctl_io_delay_info *)addr; 2766 2767 #ifdef CTL_IO_DELAY 2768 mtx_lock(&softc->ctl_lock); 2769 if (delay_info->lun_id >= CTL_MAX_LUNS || 2770 (lun = softc->ctl_luns[delay_info->lun_id]) == NULL) { 2771 mtx_unlock(&softc->ctl_lock); 2772 delay_info->status = CTL_DELAY_STATUS_INVALID_LUN; 2773 break; 2774 } 2775 mtx_lock(&lun->lun_lock); 2776 mtx_unlock(&softc->ctl_lock); 2777 delay_info->status = CTL_DELAY_STATUS_OK; 2778 switch (delay_info->delay_type) { 2779 case CTL_DELAY_TYPE_CONT: 2780 case CTL_DELAY_TYPE_ONESHOT: 2781 break; 2782 default: 2783 delay_info->status = CTL_DELAY_STATUS_INVALID_TYPE; 2784 break; 2785 } 2786 switch (delay_info->delay_loc) { 2787 case CTL_DELAY_LOC_DATAMOVE: 2788 lun->delay_info.datamove_type = delay_info->delay_type; 2789 lun->delay_info.datamove_delay = delay_info->delay_secs; 2790 break; 2791 case CTL_DELAY_LOC_DONE: 2792 lun->delay_info.done_type = delay_info->delay_type; 2793 lun->delay_info.done_delay = delay_info->delay_secs; 2794 break; 2795 default: 2796 delay_info->status = CTL_DELAY_STATUS_INVALID_LOC; 2797 break; 2798 } 2799 mtx_unlock(&lun->lun_lock); 2800 #else 2801 delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED; 2802 #endif /* CTL_IO_DELAY */ 2803 break; 2804 } 2805 #ifdef CTL_LEGACY_STATS 2806 case CTL_GETSTATS: { 2807 struct ctl_stats *stats = (struct ctl_stats *)addr; 2808 int i; 2809 2810 /* 2811 * XXX KDM no locking here. If the LUN list changes, 2812 * things can blow up. 2813 */ 2814 i = 0; 2815 stats->status = CTL_SS_OK; 2816 stats->fill_len = 0; 2817 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2818 if (stats->fill_len + sizeof(lun->legacy_stats) > 2819 stats->alloc_len) { 2820 stats->status = CTL_SS_NEED_MORE_SPACE; 2821 break; 2822 } 2823 retval = copyout(&lun->legacy_stats, &stats->lun_stats[i++], 2824 sizeof(lun->legacy_stats)); 2825 if (retval != 0) 2826 break; 2827 stats->fill_len += sizeof(lun->legacy_stats); 2828 } 2829 stats->num_luns = softc->num_luns; 2830 stats->flags = CTL_STATS_FLAG_NONE; 2831 #ifdef CTL_TIME_IO 2832 stats->flags |= CTL_STATS_FLAG_TIME_VALID; 2833 #endif 2834 getnanouptime(&stats->timestamp); 2835 break; 2836 } 2837 #endif /* CTL_LEGACY_STATS */ 2838 case CTL_ERROR_INJECT: { 2839 struct ctl_error_desc *err_desc, *new_err_desc; 2840 2841 err_desc = (struct ctl_error_desc *)addr; 2842 2843 new_err_desc = malloc(sizeof(*new_err_desc), M_CTL, 2844 M_WAITOK | M_ZERO); 2845 bcopy(err_desc, new_err_desc, sizeof(*new_err_desc)); 2846 2847 mtx_lock(&softc->ctl_lock); 2848 if (err_desc->lun_id >= CTL_MAX_LUNS || 2849 (lun = softc->ctl_luns[err_desc->lun_id]) == NULL) { 2850 mtx_unlock(&softc->ctl_lock); 2851 free(new_err_desc, M_CTL); 2852 printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n", 2853 __func__, (uintmax_t)err_desc->lun_id); 2854 retval = EINVAL; 2855 break; 2856 } 2857 mtx_lock(&lun->lun_lock); 2858 mtx_unlock(&softc->ctl_lock); 2859 2860 /* 2861 * We could do some checking here to verify the validity 2862 * of the request, but given the complexity of error 2863 * injection requests, the checking logic would be fairly 2864 * complex. 2865 * 2866 * For now, if the request is invalid, it just won't get 2867 * executed and might get deleted. 2868 */ 2869 STAILQ_INSERT_TAIL(&lun->error_list, new_err_desc, links); 2870 2871 /* 2872 * XXX KDM check to make sure the serial number is unique, 2873 * in case we somehow manage to wrap. That shouldn't 2874 * happen for a very long time, but it's the right thing to 2875 * do. 2876 */ 2877 new_err_desc->serial = lun->error_serial; 2878 err_desc->serial = lun->error_serial; 2879 lun->error_serial++; 2880 2881 mtx_unlock(&lun->lun_lock); 2882 break; 2883 } 2884 case CTL_ERROR_INJECT_DELETE: { 2885 struct ctl_error_desc *delete_desc, *desc, *desc2; 2886 int delete_done; 2887 2888 delete_desc = (struct ctl_error_desc *)addr; 2889 delete_done = 0; 2890 2891 mtx_lock(&softc->ctl_lock); 2892 if (delete_desc->lun_id >= CTL_MAX_LUNS || 2893 (lun = softc->ctl_luns[delete_desc->lun_id]) == NULL) { 2894 mtx_unlock(&softc->ctl_lock); 2895 printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n", 2896 __func__, (uintmax_t)delete_desc->lun_id); 2897 retval = EINVAL; 2898 break; 2899 } 2900 mtx_lock(&lun->lun_lock); 2901 mtx_unlock(&softc->ctl_lock); 2902 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 2903 if (desc->serial != delete_desc->serial) 2904 continue; 2905 2906 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, 2907 links); 2908 free(desc, M_CTL); 2909 delete_done = 1; 2910 } 2911 mtx_unlock(&lun->lun_lock); 2912 if (delete_done == 0) { 2913 printf("%s: CTL_ERROR_INJECT_DELETE: can't find " 2914 "error serial %ju on LUN %u\n", __func__, 2915 delete_desc->serial, delete_desc->lun_id); 2916 retval = EINVAL; 2917 break; 2918 } 2919 break; 2920 } 2921 case CTL_DUMP_STRUCTS: { 2922 int j, k; 2923 struct ctl_port *port; 2924 struct ctl_frontend *fe; 2925 2926 mtx_lock(&softc->ctl_lock); 2927 printf("CTL Persistent Reservation information start:\n"); 2928 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2929 mtx_lock(&lun->lun_lock); 2930 if ((lun->flags & CTL_LUN_DISABLED) != 0) { 2931 mtx_unlock(&lun->lun_lock); 2932 continue; 2933 } 2934 2935 for (j = 0; j < CTL_MAX_PORTS; j++) { 2936 if (lun->pr_keys[j] == NULL) 2937 continue; 2938 for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){ 2939 if (lun->pr_keys[j][k] == 0) 2940 continue; 2941 printf(" LUN %ju port %d iid %d key " 2942 "%#jx\n", lun->lun, j, k, 2943 (uintmax_t)lun->pr_keys[j][k]); 2944 } 2945 } 2946 mtx_unlock(&lun->lun_lock); 2947 } 2948 printf("CTL Persistent Reservation information end\n"); 2949 printf("CTL Ports:\n"); 2950 STAILQ_FOREACH(port, &softc->port_list, links) { 2951 printf(" Port %d '%s' Frontend '%s' Type %u pp %d vp %d WWNN " 2952 "%#jx WWPN %#jx\n", port->targ_port, port->port_name, 2953 port->frontend->name, port->port_type, 2954 port->physical_port, port->virtual_port, 2955 (uintmax_t)port->wwnn, (uintmax_t)port->wwpn); 2956 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 2957 if (port->wwpn_iid[j].in_use == 0 && 2958 port->wwpn_iid[j].wwpn == 0 && 2959 port->wwpn_iid[j].name == NULL) 2960 continue; 2961 2962 printf(" iid %u use %d WWPN %#jx '%s'\n", 2963 j, port->wwpn_iid[j].in_use, 2964 (uintmax_t)port->wwpn_iid[j].wwpn, 2965 port->wwpn_iid[j].name); 2966 } 2967 } 2968 printf("CTL Port information end\n"); 2969 mtx_unlock(&softc->ctl_lock); 2970 /* 2971 * XXX KDM calling this without a lock. We'd likely want 2972 * to drop the lock before calling the frontend's dump 2973 * routine anyway. 2974 */ 2975 printf("CTL Frontends:\n"); 2976 STAILQ_FOREACH(fe, &softc->fe_list, links) { 2977 printf(" Frontend '%s'\n", fe->name); 2978 if (fe->fe_dump != NULL) 2979 fe->fe_dump(); 2980 } 2981 printf("CTL Frontend information end\n"); 2982 break; 2983 } 2984 case CTL_LUN_REQ: { 2985 struct ctl_lun_req *lun_req; 2986 struct ctl_backend_driver *backend; 2987 2988 lun_req = (struct ctl_lun_req *)addr; 2989 2990 backend = ctl_backend_find(lun_req->backend); 2991 if (backend == NULL) { 2992 lun_req->status = CTL_LUN_ERROR; 2993 snprintf(lun_req->error_str, 2994 sizeof(lun_req->error_str), 2995 "Backend \"%s\" not found.", 2996 lun_req->backend); 2997 break; 2998 } 2999 if (lun_req->num_be_args > 0) { 3000 lun_req->kern_be_args = ctl_copyin_args( 3001 lun_req->num_be_args, 3002 lun_req->be_args, 3003 lun_req->error_str, 3004 sizeof(lun_req->error_str)); 3005 if (lun_req->kern_be_args == NULL) { 3006 lun_req->status = CTL_LUN_ERROR; 3007 break; 3008 } 3009 } 3010 3011 retval = backend->ioctl(dev, cmd, addr, flag, td); 3012 3013 if (lun_req->num_be_args > 0) { 3014 ctl_copyout_args(lun_req->num_be_args, 3015 lun_req->kern_be_args); 3016 ctl_free_args(lun_req->num_be_args, 3017 lun_req->kern_be_args); 3018 } 3019 break; 3020 } 3021 case CTL_LUN_LIST: { 3022 struct sbuf *sb; 3023 struct ctl_lun_list *list; 3024 struct ctl_option *opt; 3025 3026 list = (struct ctl_lun_list *)addr; 3027 3028 /* 3029 * Allocate a fixed length sbuf here, based on the length 3030 * of the user's buffer. We could allocate an auto-extending 3031 * buffer, and then tell the user how much larger our 3032 * amount of data is than his buffer, but that presents 3033 * some problems: 3034 * 3035 * 1. The sbuf(9) routines use a blocking malloc, and so 3036 * we can't hold a lock while calling them with an 3037 * auto-extending buffer. 3038 * 3039 * 2. There is not currently a LUN reference counting 3040 * mechanism, outside of outstanding transactions on 3041 * the LUN's OOA queue. So a LUN could go away on us 3042 * while we're getting the LUN number, backend-specific 3043 * information, etc. Thus, given the way things 3044 * currently work, we need to hold the CTL lock while 3045 * grabbing LUN information. 3046 * 3047 * So, from the user's standpoint, the best thing to do is 3048 * allocate what he thinks is a reasonable buffer length, 3049 * and then if he gets a CTL_LUN_LIST_NEED_MORE_SPACE error, 3050 * double the buffer length and try again. (And repeat 3051 * that until he succeeds.) 3052 */ 3053 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 3054 if (sb == NULL) { 3055 list->status = CTL_LUN_LIST_ERROR; 3056 snprintf(list->error_str, sizeof(list->error_str), 3057 "Unable to allocate %d bytes for LUN list", 3058 list->alloc_len); 3059 break; 3060 } 3061 3062 sbuf_printf(sb, "<ctllunlist>\n"); 3063 3064 mtx_lock(&softc->ctl_lock); 3065 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3066 mtx_lock(&lun->lun_lock); 3067 retval = sbuf_printf(sb, "<lun id=\"%ju\">\n", 3068 (uintmax_t)lun->lun); 3069 3070 /* 3071 * Bail out as soon as we see that we've overfilled 3072 * the buffer. 3073 */ 3074 if (retval != 0) 3075 break; 3076 3077 retval = sbuf_printf(sb, "\t<backend_type>%s" 3078 "</backend_type>\n", 3079 (lun->backend == NULL) ? "none" : 3080 lun->backend->name); 3081 3082 if (retval != 0) 3083 break; 3084 3085 retval = sbuf_printf(sb, "\t<lun_type>%d</lun_type>\n", 3086 lun->be_lun->lun_type); 3087 3088 if (retval != 0) 3089 break; 3090 3091 if (lun->backend == NULL) { 3092 retval = sbuf_printf(sb, "</lun>\n"); 3093 if (retval != 0) 3094 break; 3095 continue; 3096 } 3097 3098 retval = sbuf_printf(sb, "\t<size>%ju</size>\n", 3099 (lun->be_lun->maxlba > 0) ? 3100 lun->be_lun->maxlba + 1 : 0); 3101 3102 if (retval != 0) 3103 break; 3104 3105 retval = sbuf_printf(sb, "\t<blocksize>%u</blocksize>\n", 3106 lun->be_lun->blocksize); 3107 3108 if (retval != 0) 3109 break; 3110 3111 retval = sbuf_printf(sb, "\t<serial_number>"); 3112 3113 if (retval != 0) 3114 break; 3115 3116 retval = ctl_sbuf_printf_esc(sb, 3117 lun->be_lun->serial_num, 3118 sizeof(lun->be_lun->serial_num)); 3119 3120 if (retval != 0) 3121 break; 3122 3123 retval = sbuf_printf(sb, "</serial_number>\n"); 3124 3125 if (retval != 0) 3126 break; 3127 3128 retval = sbuf_printf(sb, "\t<device_id>"); 3129 3130 if (retval != 0) 3131 break; 3132 3133 retval = ctl_sbuf_printf_esc(sb, 3134 lun->be_lun->device_id, 3135 sizeof(lun->be_lun->device_id)); 3136 3137 if (retval != 0) 3138 break; 3139 3140 retval = sbuf_printf(sb, "</device_id>\n"); 3141 3142 if (retval != 0) 3143 break; 3144 3145 if (lun->backend->lun_info != NULL) { 3146 retval = lun->backend->lun_info(lun->be_lun->be_lun, sb); 3147 if (retval != 0) 3148 break; 3149 } 3150 STAILQ_FOREACH(opt, &lun->be_lun->options, links) { 3151 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n", 3152 opt->name, opt->value, opt->name); 3153 if (retval != 0) 3154 break; 3155 } 3156 3157 retval = sbuf_printf(sb, "</lun>\n"); 3158 3159 if (retval != 0) 3160 break; 3161 mtx_unlock(&lun->lun_lock); 3162 } 3163 if (lun != NULL) 3164 mtx_unlock(&lun->lun_lock); 3165 mtx_unlock(&softc->ctl_lock); 3166 3167 if ((retval != 0) 3168 || ((retval = sbuf_printf(sb, "</ctllunlist>\n")) != 0)) { 3169 retval = 0; 3170 sbuf_delete(sb); 3171 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3172 snprintf(list->error_str, sizeof(list->error_str), 3173 "Out of space, %d bytes is too small", 3174 list->alloc_len); 3175 break; 3176 } 3177 3178 sbuf_finish(sb); 3179 3180 retval = copyout(sbuf_data(sb), list->lun_xml, 3181 sbuf_len(sb) + 1); 3182 3183 list->fill_len = sbuf_len(sb) + 1; 3184 list->status = CTL_LUN_LIST_OK; 3185 sbuf_delete(sb); 3186 break; 3187 } 3188 case CTL_ISCSI: { 3189 struct ctl_iscsi *ci; 3190 struct ctl_frontend *fe; 3191 3192 ci = (struct ctl_iscsi *)addr; 3193 3194 fe = ctl_frontend_find("iscsi"); 3195 if (fe == NULL) { 3196 ci->status = CTL_ISCSI_ERROR; 3197 snprintf(ci->error_str, sizeof(ci->error_str), 3198 "Frontend \"iscsi\" not found."); 3199 break; 3200 } 3201 3202 retval = fe->ioctl(dev, cmd, addr, flag, td); 3203 break; 3204 } 3205 case CTL_PORT_REQ: { 3206 struct ctl_req *req; 3207 struct ctl_frontend *fe; 3208 3209 req = (struct ctl_req *)addr; 3210 3211 fe = ctl_frontend_find(req->driver); 3212 if (fe == NULL) { 3213 req->status = CTL_LUN_ERROR; 3214 snprintf(req->error_str, sizeof(req->error_str), 3215 "Frontend \"%s\" not found.", req->driver); 3216 break; 3217 } 3218 if (req->num_args > 0) { 3219 req->kern_args = ctl_copyin_args(req->num_args, 3220 req->args, req->error_str, sizeof(req->error_str)); 3221 if (req->kern_args == NULL) { 3222 req->status = CTL_LUN_ERROR; 3223 break; 3224 } 3225 } 3226 3227 if (fe->ioctl) 3228 retval = fe->ioctl(dev, cmd, addr, flag, td); 3229 else 3230 retval = ENODEV; 3231 3232 if (req->num_args > 0) { 3233 ctl_copyout_args(req->num_args, req->kern_args); 3234 ctl_free_args(req->num_args, req->kern_args); 3235 } 3236 break; 3237 } 3238 case CTL_PORT_LIST: { 3239 struct sbuf *sb; 3240 struct ctl_port *port; 3241 struct ctl_lun_list *list; 3242 struct ctl_option *opt; 3243 int j; 3244 uint32_t plun; 3245 3246 list = (struct ctl_lun_list *)addr; 3247 3248 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 3249 if (sb == NULL) { 3250 list->status = CTL_LUN_LIST_ERROR; 3251 snprintf(list->error_str, sizeof(list->error_str), 3252 "Unable to allocate %d bytes for LUN list", 3253 list->alloc_len); 3254 break; 3255 } 3256 3257 sbuf_printf(sb, "<ctlportlist>\n"); 3258 3259 mtx_lock(&softc->ctl_lock); 3260 STAILQ_FOREACH(port, &softc->port_list, links) { 3261 retval = sbuf_printf(sb, "<targ_port id=\"%ju\">\n", 3262 (uintmax_t)port->targ_port); 3263 3264 /* 3265 * Bail out as soon as we see that we've overfilled 3266 * the buffer. 3267 */ 3268 if (retval != 0) 3269 break; 3270 3271 retval = sbuf_printf(sb, "\t<frontend_type>%s" 3272 "</frontend_type>\n", port->frontend->name); 3273 if (retval != 0) 3274 break; 3275 3276 retval = sbuf_printf(sb, "\t<port_type>%d</port_type>\n", 3277 port->port_type); 3278 if (retval != 0) 3279 break; 3280 3281 retval = sbuf_printf(sb, "\t<online>%s</online>\n", 3282 (port->status & CTL_PORT_STATUS_ONLINE) ? "YES" : "NO"); 3283 if (retval != 0) 3284 break; 3285 3286 retval = sbuf_printf(sb, "\t<port_name>%s</port_name>\n", 3287 port->port_name); 3288 if (retval != 0) 3289 break; 3290 3291 retval = sbuf_printf(sb, "\t<physical_port>%d</physical_port>\n", 3292 port->physical_port); 3293 if (retval != 0) 3294 break; 3295 3296 retval = sbuf_printf(sb, "\t<virtual_port>%d</virtual_port>\n", 3297 port->virtual_port); 3298 if (retval != 0) 3299 break; 3300 3301 if (port->target_devid != NULL) { 3302 sbuf_printf(sb, "\t<target>"); 3303 ctl_id_sbuf(port->target_devid, sb); 3304 sbuf_printf(sb, "</target>\n"); 3305 } 3306 3307 if (port->port_devid != NULL) { 3308 sbuf_printf(sb, "\t<port>"); 3309 ctl_id_sbuf(port->port_devid, sb); 3310 sbuf_printf(sb, "</port>\n"); 3311 } 3312 3313 if (port->port_info != NULL) { 3314 retval = port->port_info(port->onoff_arg, sb); 3315 if (retval != 0) 3316 break; 3317 } 3318 STAILQ_FOREACH(opt, &port->options, links) { 3319 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n", 3320 opt->name, opt->value, opt->name); 3321 if (retval != 0) 3322 break; 3323 } 3324 3325 if (port->lun_map != NULL) { 3326 sbuf_printf(sb, "\t<lun_map>on</lun_map>\n"); 3327 for (j = 0; j < port->lun_map_size; j++) { 3328 plun = ctl_lun_map_from_port(port, j); 3329 if (plun == UINT32_MAX) 3330 continue; 3331 sbuf_printf(sb, 3332 "\t<lun id=\"%u\">%u</lun>\n", 3333 j, plun); 3334 } 3335 } 3336 3337 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 3338 if (port->wwpn_iid[j].in_use == 0 || 3339 (port->wwpn_iid[j].wwpn == 0 && 3340 port->wwpn_iid[j].name == NULL)) 3341 continue; 3342 3343 if (port->wwpn_iid[j].name != NULL) 3344 retval = sbuf_printf(sb, 3345 "\t<initiator id=\"%u\">%s</initiator>\n", 3346 j, port->wwpn_iid[j].name); 3347 else 3348 retval = sbuf_printf(sb, 3349 "\t<initiator id=\"%u\">naa.%08jx</initiator>\n", 3350 j, port->wwpn_iid[j].wwpn); 3351 if (retval != 0) 3352 break; 3353 } 3354 if (retval != 0) 3355 break; 3356 3357 retval = sbuf_printf(sb, "</targ_port>\n"); 3358 if (retval != 0) 3359 break; 3360 } 3361 mtx_unlock(&softc->ctl_lock); 3362 3363 if ((retval != 0) 3364 || ((retval = sbuf_printf(sb, "</ctlportlist>\n")) != 0)) { 3365 retval = 0; 3366 sbuf_delete(sb); 3367 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3368 snprintf(list->error_str, sizeof(list->error_str), 3369 "Out of space, %d bytes is too small", 3370 list->alloc_len); 3371 break; 3372 } 3373 3374 sbuf_finish(sb); 3375 3376 retval = copyout(sbuf_data(sb), list->lun_xml, 3377 sbuf_len(sb) + 1); 3378 3379 list->fill_len = sbuf_len(sb) + 1; 3380 list->status = CTL_LUN_LIST_OK; 3381 sbuf_delete(sb); 3382 break; 3383 } 3384 case CTL_LUN_MAP: { 3385 struct ctl_lun_map *lm = (struct ctl_lun_map *)addr; 3386 struct ctl_port *port; 3387 3388 mtx_lock(&softc->ctl_lock); 3389 if (lm->port < softc->port_min || 3390 lm->port >= softc->port_max || 3391 (port = softc->ctl_ports[lm->port]) == NULL) { 3392 mtx_unlock(&softc->ctl_lock); 3393 return (ENXIO); 3394 } 3395 if (port->status & CTL_PORT_STATUS_ONLINE) { 3396 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3397 if (ctl_lun_map_to_port(port, lun->lun) == 3398 UINT32_MAX) 3399 continue; 3400 mtx_lock(&lun->lun_lock); 3401 ctl_est_ua_port(lun, lm->port, -1, 3402 CTL_UA_LUN_CHANGE); 3403 mtx_unlock(&lun->lun_lock); 3404 } 3405 } 3406 mtx_unlock(&softc->ctl_lock); // XXX: port_enable sleeps 3407 if (lm->plun != UINT32_MAX) { 3408 if (lm->lun == UINT32_MAX) 3409 retval = ctl_lun_map_unset(port, lm->plun); 3410 else if (lm->lun < CTL_MAX_LUNS && 3411 softc->ctl_luns[lm->lun] != NULL) 3412 retval = ctl_lun_map_set(port, lm->plun, lm->lun); 3413 else 3414 return (ENXIO); 3415 } else { 3416 if (lm->lun == UINT32_MAX) 3417 retval = ctl_lun_map_deinit(port); 3418 else 3419 retval = ctl_lun_map_init(port); 3420 } 3421 if (port->status & CTL_PORT_STATUS_ONLINE) 3422 ctl_isc_announce_port(port); 3423 break; 3424 } 3425 case CTL_GET_LUN_STATS: { 3426 struct ctl_get_io_stats *stats = (struct ctl_get_io_stats *)addr; 3427 int i; 3428 3429 /* 3430 * XXX KDM no locking here. If the LUN list changes, 3431 * things can blow up. 3432 */ 3433 i = 0; 3434 stats->status = CTL_SS_OK; 3435 stats->fill_len = 0; 3436 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3437 if (lun->lun < stats->first_item) 3438 continue; 3439 if (stats->fill_len + sizeof(lun->stats) > 3440 stats->alloc_len) { 3441 stats->status = CTL_SS_NEED_MORE_SPACE; 3442 break; 3443 } 3444 retval = copyout(&lun->stats, &stats->stats[i++], 3445 sizeof(lun->stats)); 3446 if (retval != 0) 3447 break; 3448 stats->fill_len += sizeof(lun->stats); 3449 } 3450 stats->num_items = softc->num_luns; 3451 stats->flags = CTL_STATS_FLAG_NONE; 3452 #ifdef CTL_TIME_IO 3453 stats->flags |= CTL_STATS_FLAG_TIME_VALID; 3454 #endif 3455 getnanouptime(&stats->timestamp); 3456 break; 3457 } 3458 case CTL_GET_PORT_STATS: { 3459 struct ctl_get_io_stats *stats = (struct ctl_get_io_stats *)addr; 3460 int i; 3461 3462 /* 3463 * XXX KDM no locking here. If the LUN list changes, 3464 * things can blow up. 3465 */ 3466 i = 0; 3467 stats->status = CTL_SS_OK; 3468 stats->fill_len = 0; 3469 STAILQ_FOREACH(port, &softc->port_list, links) { 3470 if (port->targ_port < stats->first_item) 3471 continue; 3472 if (stats->fill_len + sizeof(port->stats) > 3473 stats->alloc_len) { 3474 stats->status = CTL_SS_NEED_MORE_SPACE; 3475 break; 3476 } 3477 retval = copyout(&port->stats, &stats->stats[i++], 3478 sizeof(port->stats)); 3479 if (retval != 0) 3480 break; 3481 stats->fill_len += sizeof(port->stats); 3482 } 3483 stats->num_items = softc->num_ports; 3484 stats->flags = CTL_STATS_FLAG_NONE; 3485 #ifdef CTL_TIME_IO 3486 stats->flags |= CTL_STATS_FLAG_TIME_VALID; 3487 #endif 3488 getnanouptime(&stats->timestamp); 3489 break; 3490 } 3491 default: { 3492 /* XXX KDM should we fix this? */ 3493 #if 0 3494 struct ctl_backend_driver *backend; 3495 unsigned int type; 3496 int found; 3497 3498 found = 0; 3499 3500 /* 3501 * We encode the backend type as the ioctl type for backend 3502 * ioctls. So parse it out here, and then search for a 3503 * backend of this type. 3504 */ 3505 type = _IOC_TYPE(cmd); 3506 3507 STAILQ_FOREACH(backend, &softc->be_list, links) { 3508 if (backend->type == type) { 3509 found = 1; 3510 break; 3511 } 3512 } 3513 if (found == 0) { 3514 printf("ctl: unknown ioctl command %#lx or backend " 3515 "%d\n", cmd, type); 3516 retval = EINVAL; 3517 break; 3518 } 3519 retval = backend->ioctl(dev, cmd, addr, flag, td); 3520 #endif 3521 retval = ENOTTY; 3522 break; 3523 } 3524 } 3525 return (retval); 3526 } 3527 3528 uint32_t 3529 ctl_get_initindex(struct ctl_nexus *nexus) 3530 { 3531 return (nexus->initid + (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); 3532 } 3533 3534 int 3535 ctl_lun_map_init(struct ctl_port *port) 3536 { 3537 struct ctl_softc *softc = port->ctl_softc; 3538 struct ctl_lun *lun; 3539 int size = ctl_lun_map_size; 3540 uint32_t i; 3541 3542 if (port->lun_map == NULL || port->lun_map_size < size) { 3543 port->lun_map_size = 0; 3544 free(port->lun_map, M_CTL); 3545 port->lun_map = malloc(size * sizeof(uint32_t), 3546 M_CTL, M_NOWAIT); 3547 } 3548 if (port->lun_map == NULL) 3549 return (ENOMEM); 3550 for (i = 0; i < size; i++) 3551 port->lun_map[i] = UINT32_MAX; 3552 port->lun_map_size = size; 3553 if (port->status & CTL_PORT_STATUS_ONLINE) { 3554 if (port->lun_disable != NULL) { 3555 STAILQ_FOREACH(lun, &softc->lun_list, links) 3556 port->lun_disable(port->targ_lun_arg, lun->lun); 3557 } 3558 ctl_isc_announce_port(port); 3559 } 3560 return (0); 3561 } 3562 3563 int 3564 ctl_lun_map_deinit(struct ctl_port *port) 3565 { 3566 struct ctl_softc *softc = port->ctl_softc; 3567 struct ctl_lun *lun; 3568 3569 if (port->lun_map == NULL) 3570 return (0); 3571 port->lun_map_size = 0; 3572 free(port->lun_map, M_CTL); 3573 port->lun_map = NULL; 3574 if (port->status & CTL_PORT_STATUS_ONLINE) { 3575 if (port->lun_enable != NULL) { 3576 STAILQ_FOREACH(lun, &softc->lun_list, links) 3577 port->lun_enable(port->targ_lun_arg, lun->lun); 3578 } 3579 ctl_isc_announce_port(port); 3580 } 3581 return (0); 3582 } 3583 3584 int 3585 ctl_lun_map_set(struct ctl_port *port, uint32_t plun, uint32_t glun) 3586 { 3587 int status; 3588 uint32_t old; 3589 3590 if (port->lun_map == NULL) { 3591 status = ctl_lun_map_init(port); 3592 if (status != 0) 3593 return (status); 3594 } 3595 if (plun >= port->lun_map_size) 3596 return (EINVAL); 3597 old = port->lun_map[plun]; 3598 port->lun_map[plun] = glun; 3599 if ((port->status & CTL_PORT_STATUS_ONLINE) && old == UINT32_MAX) { 3600 if (port->lun_enable != NULL) 3601 port->lun_enable(port->targ_lun_arg, plun); 3602 ctl_isc_announce_port(port); 3603 } 3604 return (0); 3605 } 3606 3607 int 3608 ctl_lun_map_unset(struct ctl_port *port, uint32_t plun) 3609 { 3610 uint32_t old; 3611 3612 if (port->lun_map == NULL || plun >= port->lun_map_size) 3613 return (0); 3614 old = port->lun_map[plun]; 3615 port->lun_map[plun] = UINT32_MAX; 3616 if ((port->status & CTL_PORT_STATUS_ONLINE) && old != UINT32_MAX) { 3617 if (port->lun_disable != NULL) 3618 port->lun_disable(port->targ_lun_arg, plun); 3619 ctl_isc_announce_port(port); 3620 } 3621 return (0); 3622 } 3623 3624 uint32_t 3625 ctl_lun_map_from_port(struct ctl_port *port, uint32_t lun_id) 3626 { 3627 3628 if (port == NULL) 3629 return (UINT32_MAX); 3630 if (port->lun_map == NULL) 3631 return (lun_id); 3632 if (lun_id > port->lun_map_size) 3633 return (UINT32_MAX); 3634 return (port->lun_map[lun_id]); 3635 } 3636 3637 uint32_t 3638 ctl_lun_map_to_port(struct ctl_port *port, uint32_t lun_id) 3639 { 3640 uint32_t i; 3641 3642 if (port == NULL) 3643 return (UINT32_MAX); 3644 if (port->lun_map == NULL) 3645 return (lun_id); 3646 for (i = 0; i < port->lun_map_size; i++) { 3647 if (port->lun_map[i] == lun_id) 3648 return (i); 3649 } 3650 return (UINT32_MAX); 3651 } 3652 3653 uint32_t 3654 ctl_decode_lun(uint64_t encoded) 3655 { 3656 uint8_t lun[8]; 3657 uint32_t result = 0xffffffff; 3658 3659 be64enc(lun, encoded); 3660 switch (lun[0] & RPL_LUNDATA_ATYP_MASK) { 3661 case RPL_LUNDATA_ATYP_PERIPH: 3662 if ((lun[0] & 0x3f) == 0 && lun[2] == 0 && lun[3] == 0 && 3663 lun[4] == 0 && lun[5] == 0 && lun[6] == 0 && lun[7] == 0) 3664 result = lun[1]; 3665 break; 3666 case RPL_LUNDATA_ATYP_FLAT: 3667 if (lun[2] == 0 && lun[3] == 0 && lun[4] == 0 && lun[5] == 0 && 3668 lun[6] == 0 && lun[7] == 0) 3669 result = ((lun[0] & 0x3f) << 8) + lun[1]; 3670 break; 3671 case RPL_LUNDATA_ATYP_EXTLUN: 3672 switch (lun[0] & RPL_LUNDATA_EXT_EAM_MASK) { 3673 case 0x02: 3674 switch (lun[0] & RPL_LUNDATA_EXT_LEN_MASK) { 3675 case 0x00: 3676 result = lun[1]; 3677 break; 3678 case 0x10: 3679 result = (lun[1] << 16) + (lun[2] << 8) + 3680 lun[3]; 3681 break; 3682 case 0x20: 3683 if (lun[1] == 0 && lun[6] == 0 && lun[7] == 0) 3684 result = (lun[2] << 24) + 3685 (lun[3] << 16) + (lun[4] << 8) + 3686 lun[5]; 3687 break; 3688 } 3689 break; 3690 case RPL_LUNDATA_EXT_EAM_NOT_SPEC: 3691 result = 0xffffffff; 3692 break; 3693 } 3694 break; 3695 } 3696 return (result); 3697 } 3698 3699 uint64_t 3700 ctl_encode_lun(uint32_t decoded) 3701 { 3702 uint64_t l = decoded; 3703 3704 if (l <= 0xff) 3705 return (((uint64_t)RPL_LUNDATA_ATYP_PERIPH << 56) | (l << 48)); 3706 if (l <= 0x3fff) 3707 return (((uint64_t)RPL_LUNDATA_ATYP_FLAT << 56) | (l << 48)); 3708 if (l <= 0xffffff) 3709 return (((uint64_t)(RPL_LUNDATA_ATYP_EXTLUN | 0x12) << 56) | 3710 (l << 32)); 3711 return ((((uint64_t)RPL_LUNDATA_ATYP_EXTLUN | 0x22) << 56) | (l << 16)); 3712 } 3713 3714 int 3715 ctl_ffz(uint32_t *mask, uint32_t first, uint32_t last) 3716 { 3717 int i; 3718 3719 for (i = first; i < last; i++) { 3720 if ((mask[i / 32] & (1 << (i % 32))) == 0) 3721 return (i); 3722 } 3723 return (-1); 3724 } 3725 3726 int 3727 ctl_set_mask(uint32_t *mask, uint32_t bit) 3728 { 3729 uint32_t chunk, piece; 3730 3731 chunk = bit >> 5; 3732 piece = bit % (sizeof(uint32_t) * 8); 3733 3734 if ((mask[chunk] & (1 << piece)) != 0) 3735 return (-1); 3736 else 3737 mask[chunk] |= (1 << piece); 3738 3739 return (0); 3740 } 3741 3742 int 3743 ctl_clear_mask(uint32_t *mask, uint32_t bit) 3744 { 3745 uint32_t chunk, piece; 3746 3747 chunk = bit >> 5; 3748 piece = bit % (sizeof(uint32_t) * 8); 3749 3750 if ((mask[chunk] & (1 << piece)) == 0) 3751 return (-1); 3752 else 3753 mask[chunk] &= ~(1 << piece); 3754 3755 return (0); 3756 } 3757 3758 int 3759 ctl_is_set(uint32_t *mask, uint32_t bit) 3760 { 3761 uint32_t chunk, piece; 3762 3763 chunk = bit >> 5; 3764 piece = bit % (sizeof(uint32_t) * 8); 3765 3766 if ((mask[chunk] & (1 << piece)) == 0) 3767 return (0); 3768 else 3769 return (1); 3770 } 3771 3772 static uint64_t 3773 ctl_get_prkey(struct ctl_lun *lun, uint32_t residx) 3774 { 3775 uint64_t *t; 3776 3777 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3778 if (t == NULL) 3779 return (0); 3780 return (t[residx % CTL_MAX_INIT_PER_PORT]); 3781 } 3782 3783 static void 3784 ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx) 3785 { 3786 uint64_t *t; 3787 3788 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3789 if (t == NULL) 3790 return; 3791 t[residx % CTL_MAX_INIT_PER_PORT] = 0; 3792 } 3793 3794 static void 3795 ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx) 3796 { 3797 uint64_t *p; 3798 u_int i; 3799 3800 i = residx/CTL_MAX_INIT_PER_PORT; 3801 if (lun->pr_keys[i] != NULL) 3802 return; 3803 mtx_unlock(&lun->lun_lock); 3804 p = malloc(sizeof(uint64_t) * CTL_MAX_INIT_PER_PORT, M_CTL, 3805 M_WAITOK | M_ZERO); 3806 mtx_lock(&lun->lun_lock); 3807 if (lun->pr_keys[i] == NULL) 3808 lun->pr_keys[i] = p; 3809 else 3810 free(p, M_CTL); 3811 } 3812 3813 static void 3814 ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key) 3815 { 3816 uint64_t *t; 3817 3818 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3819 KASSERT(t != NULL, ("prkey %d is not allocated", residx)); 3820 t[residx % CTL_MAX_INIT_PER_PORT] = key; 3821 } 3822 3823 /* 3824 * ctl_softc, pool_name, total_ctl_io are passed in. 3825 * npool is passed out. 3826 */ 3827 int 3828 ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name, 3829 uint32_t total_ctl_io, void **npool) 3830 { 3831 struct ctl_io_pool *pool; 3832 3833 pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL, 3834 M_NOWAIT | M_ZERO); 3835 if (pool == NULL) 3836 return (ENOMEM); 3837 3838 snprintf(pool->name, sizeof(pool->name), "CTL IO %s", pool_name); 3839 pool->ctl_softc = ctl_softc; 3840 #ifdef IO_POOLS 3841 pool->zone = uma_zsecond_create(pool->name, NULL, 3842 NULL, NULL, NULL, ctl_softc->io_zone); 3843 /* uma_prealloc(pool->zone, total_ctl_io); */ 3844 #else 3845 pool->zone = ctl_softc->io_zone; 3846 #endif 3847 3848 *npool = pool; 3849 return (0); 3850 } 3851 3852 void 3853 ctl_pool_free(struct ctl_io_pool *pool) 3854 { 3855 3856 if (pool == NULL) 3857 return; 3858 3859 #ifdef IO_POOLS 3860 uma_zdestroy(pool->zone); 3861 #endif 3862 free(pool, M_CTL); 3863 } 3864 3865 union ctl_io * 3866 ctl_alloc_io(void *pool_ref) 3867 { 3868 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3869 union ctl_io *io; 3870 3871 io = uma_zalloc(pool->zone, M_WAITOK); 3872 if (io != NULL) { 3873 io->io_hdr.pool = pool_ref; 3874 CTL_SOFTC(io) = pool->ctl_softc; 3875 } 3876 return (io); 3877 } 3878 3879 union ctl_io * 3880 ctl_alloc_io_nowait(void *pool_ref) 3881 { 3882 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3883 union ctl_io *io; 3884 3885 io = uma_zalloc(pool->zone, M_NOWAIT); 3886 if (io != NULL) { 3887 io->io_hdr.pool = pool_ref; 3888 CTL_SOFTC(io) = pool->ctl_softc; 3889 } 3890 return (io); 3891 } 3892 3893 void 3894 ctl_free_io(union ctl_io *io) 3895 { 3896 struct ctl_io_pool *pool; 3897 3898 if (io == NULL) 3899 return; 3900 3901 pool = (struct ctl_io_pool *)io->io_hdr.pool; 3902 uma_zfree(pool->zone, io); 3903 } 3904 3905 void 3906 ctl_zero_io(union ctl_io *io) 3907 { 3908 struct ctl_io_pool *pool; 3909 3910 if (io == NULL) 3911 return; 3912 3913 /* 3914 * May need to preserve linked list pointers at some point too. 3915 */ 3916 pool = io->io_hdr.pool; 3917 memset(io, 0, sizeof(*io)); 3918 io->io_hdr.pool = pool; 3919 CTL_SOFTC(io) = pool->ctl_softc; 3920 } 3921 3922 int 3923 ctl_expand_number(const char *buf, uint64_t *num) 3924 { 3925 char *endptr; 3926 uint64_t number; 3927 unsigned shift; 3928 3929 number = strtoq(buf, &endptr, 0); 3930 3931 switch (tolower((unsigned char)*endptr)) { 3932 case 'e': 3933 shift = 60; 3934 break; 3935 case 'p': 3936 shift = 50; 3937 break; 3938 case 't': 3939 shift = 40; 3940 break; 3941 case 'g': 3942 shift = 30; 3943 break; 3944 case 'm': 3945 shift = 20; 3946 break; 3947 case 'k': 3948 shift = 10; 3949 break; 3950 case 'b': 3951 case '\0': /* No unit. */ 3952 *num = number; 3953 return (0); 3954 default: 3955 /* Unrecognized unit. */ 3956 return (-1); 3957 } 3958 3959 if ((number << shift) >> shift != number) { 3960 /* Overflow */ 3961 return (-1); 3962 } 3963 *num = number << shift; 3964 return (0); 3965 } 3966 3967 3968 /* 3969 * This routine could be used in the future to load default and/or saved 3970 * mode page parameters for a particuar lun. 3971 */ 3972 static int 3973 ctl_init_page_index(struct ctl_lun *lun) 3974 { 3975 int i, page_code; 3976 struct ctl_page_index *page_index; 3977 const char *value; 3978 uint64_t ival; 3979 3980 memcpy(&lun->mode_pages.index, page_index_template, 3981 sizeof(page_index_template)); 3982 3983 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 3984 3985 page_index = &lun->mode_pages.index[i]; 3986 if (lun->be_lun->lun_type == T_DIRECT && 3987 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 3988 continue; 3989 if (lun->be_lun->lun_type == T_PROCESSOR && 3990 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 3991 continue; 3992 if (lun->be_lun->lun_type == T_CDROM && 3993 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 3994 continue; 3995 3996 page_code = page_index->page_code & SMPH_PC_MASK; 3997 switch (page_code) { 3998 case SMS_RW_ERROR_RECOVERY_PAGE: { 3999 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4000 ("subpage %#x for page %#x is incorrect!", 4001 page_index->subpage, page_code)); 4002 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT], 4003 &rw_er_page_default, 4004 sizeof(rw_er_page_default)); 4005 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CHANGEABLE], 4006 &rw_er_page_changeable, 4007 sizeof(rw_er_page_changeable)); 4008 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_DEFAULT], 4009 &rw_er_page_default, 4010 sizeof(rw_er_page_default)); 4011 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_SAVED], 4012 &rw_er_page_default, 4013 sizeof(rw_er_page_default)); 4014 page_index->page_data = 4015 (uint8_t *)lun->mode_pages.rw_er_page; 4016 break; 4017 } 4018 case SMS_FORMAT_DEVICE_PAGE: { 4019 struct scsi_format_page *format_page; 4020 4021 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4022 ("subpage %#x for page %#x is incorrect!", 4023 page_index->subpage, page_code)); 4024 4025 /* 4026 * Sectors per track are set above. Bytes per 4027 * sector need to be set here on a per-LUN basis. 4028 */ 4029 memcpy(&lun->mode_pages.format_page[CTL_PAGE_CURRENT], 4030 &format_page_default, 4031 sizeof(format_page_default)); 4032 memcpy(&lun->mode_pages.format_page[ 4033 CTL_PAGE_CHANGEABLE], &format_page_changeable, 4034 sizeof(format_page_changeable)); 4035 memcpy(&lun->mode_pages.format_page[CTL_PAGE_DEFAULT], 4036 &format_page_default, 4037 sizeof(format_page_default)); 4038 memcpy(&lun->mode_pages.format_page[CTL_PAGE_SAVED], 4039 &format_page_default, 4040 sizeof(format_page_default)); 4041 4042 format_page = &lun->mode_pages.format_page[ 4043 CTL_PAGE_CURRENT]; 4044 scsi_ulto2b(lun->be_lun->blocksize, 4045 format_page->bytes_per_sector); 4046 4047 format_page = &lun->mode_pages.format_page[ 4048 CTL_PAGE_DEFAULT]; 4049 scsi_ulto2b(lun->be_lun->blocksize, 4050 format_page->bytes_per_sector); 4051 4052 format_page = &lun->mode_pages.format_page[ 4053 CTL_PAGE_SAVED]; 4054 scsi_ulto2b(lun->be_lun->blocksize, 4055 format_page->bytes_per_sector); 4056 4057 page_index->page_data = 4058 (uint8_t *)lun->mode_pages.format_page; 4059 break; 4060 } 4061 case SMS_RIGID_DISK_PAGE: { 4062 struct scsi_rigid_disk_page *rigid_disk_page; 4063 uint32_t sectors_per_cylinder; 4064 uint64_t cylinders; 4065 #ifndef __XSCALE__ 4066 int shift; 4067 #endif /* !__XSCALE__ */ 4068 4069 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4070 ("subpage %#x for page %#x is incorrect!", 4071 page_index->subpage, page_code)); 4072 4073 /* 4074 * Rotation rate and sectors per track are set 4075 * above. We calculate the cylinders here based on 4076 * capacity. Due to the number of heads and 4077 * sectors per track we're using, smaller arrays 4078 * may turn out to have 0 cylinders. Linux and 4079 * FreeBSD don't pay attention to these mode pages 4080 * to figure out capacity, but Solaris does. It 4081 * seems to deal with 0 cylinders just fine, and 4082 * works out a fake geometry based on the capacity. 4083 */ 4084 memcpy(&lun->mode_pages.rigid_disk_page[ 4085 CTL_PAGE_DEFAULT], &rigid_disk_page_default, 4086 sizeof(rigid_disk_page_default)); 4087 memcpy(&lun->mode_pages.rigid_disk_page[ 4088 CTL_PAGE_CHANGEABLE],&rigid_disk_page_changeable, 4089 sizeof(rigid_disk_page_changeable)); 4090 4091 sectors_per_cylinder = CTL_DEFAULT_SECTORS_PER_TRACK * 4092 CTL_DEFAULT_HEADS; 4093 4094 /* 4095 * The divide method here will be more accurate, 4096 * probably, but results in floating point being 4097 * used in the kernel on i386 (__udivdi3()). On the 4098 * XScale, though, __udivdi3() is implemented in 4099 * software. 4100 * 4101 * The shift method for cylinder calculation is 4102 * accurate if sectors_per_cylinder is a power of 4103 * 2. Otherwise it might be slightly off -- you 4104 * might have a bit of a truncation problem. 4105 */ 4106 #ifdef __XSCALE__ 4107 cylinders = (lun->be_lun->maxlba + 1) / 4108 sectors_per_cylinder; 4109 #else 4110 for (shift = 31; shift > 0; shift--) { 4111 if (sectors_per_cylinder & (1 << shift)) 4112 break; 4113 } 4114 cylinders = (lun->be_lun->maxlba + 1) >> shift; 4115 #endif 4116 4117 /* 4118 * We've basically got 3 bytes, or 24 bits for the 4119 * cylinder size in the mode page. If we're over, 4120 * just round down to 2^24. 4121 */ 4122 if (cylinders > 0xffffff) 4123 cylinders = 0xffffff; 4124 4125 rigid_disk_page = &lun->mode_pages.rigid_disk_page[ 4126 CTL_PAGE_DEFAULT]; 4127 scsi_ulto3b(cylinders, rigid_disk_page->cylinders); 4128 4129 if ((value = ctl_get_opt(&lun->be_lun->options, 4130 "rpm")) != NULL) { 4131 scsi_ulto2b(strtol(value, NULL, 0), 4132 rigid_disk_page->rotation_rate); 4133 } 4134 4135 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_CURRENT], 4136 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 4137 sizeof(rigid_disk_page_default)); 4138 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_SAVED], 4139 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 4140 sizeof(rigid_disk_page_default)); 4141 4142 page_index->page_data = 4143 (uint8_t *)lun->mode_pages.rigid_disk_page; 4144 break; 4145 } 4146 case SMS_VERIFY_ERROR_RECOVERY_PAGE: { 4147 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4148 ("subpage %#x for page %#x is incorrect!", 4149 page_index->subpage, page_code)); 4150 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_CURRENT], 4151 &verify_er_page_default, 4152 sizeof(verify_er_page_default)); 4153 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_CHANGEABLE], 4154 &verify_er_page_changeable, 4155 sizeof(verify_er_page_changeable)); 4156 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_DEFAULT], 4157 &verify_er_page_default, 4158 sizeof(verify_er_page_default)); 4159 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_SAVED], 4160 &verify_er_page_default, 4161 sizeof(verify_er_page_default)); 4162 page_index->page_data = 4163 (uint8_t *)lun->mode_pages.verify_er_page; 4164 break; 4165 } 4166 case SMS_CACHING_PAGE: { 4167 struct scsi_caching_page *caching_page; 4168 4169 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4170 ("subpage %#x for page %#x is incorrect!", 4171 page_index->subpage, page_code)); 4172 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT], 4173 &caching_page_default, 4174 sizeof(caching_page_default)); 4175 memcpy(&lun->mode_pages.caching_page[ 4176 CTL_PAGE_CHANGEABLE], &caching_page_changeable, 4177 sizeof(caching_page_changeable)); 4178 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_SAVED], 4179 &caching_page_default, 4180 sizeof(caching_page_default)); 4181 caching_page = &lun->mode_pages.caching_page[ 4182 CTL_PAGE_SAVED]; 4183 value = ctl_get_opt(&lun->be_lun->options, "writecache"); 4184 if (value != NULL && strcmp(value, "off") == 0) 4185 caching_page->flags1 &= ~SCP_WCE; 4186 value = ctl_get_opt(&lun->be_lun->options, "readcache"); 4187 if (value != NULL && strcmp(value, "off") == 0) 4188 caching_page->flags1 |= SCP_RCD; 4189 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT], 4190 &lun->mode_pages.caching_page[CTL_PAGE_SAVED], 4191 sizeof(caching_page_default)); 4192 page_index->page_data = 4193 (uint8_t *)lun->mode_pages.caching_page; 4194 break; 4195 } 4196 case SMS_CONTROL_MODE_PAGE: { 4197 switch (page_index->subpage) { 4198 case SMS_SUBPAGE_PAGE_0: { 4199 struct scsi_control_page *control_page; 4200 4201 memcpy(&lun->mode_pages.control_page[ 4202 CTL_PAGE_DEFAULT], 4203 &control_page_default, 4204 sizeof(control_page_default)); 4205 memcpy(&lun->mode_pages.control_page[ 4206 CTL_PAGE_CHANGEABLE], 4207 &control_page_changeable, 4208 sizeof(control_page_changeable)); 4209 memcpy(&lun->mode_pages.control_page[ 4210 CTL_PAGE_SAVED], 4211 &control_page_default, 4212 sizeof(control_page_default)); 4213 control_page = &lun->mode_pages.control_page[ 4214 CTL_PAGE_SAVED]; 4215 value = ctl_get_opt(&lun->be_lun->options, 4216 "reordering"); 4217 if (value != NULL && 4218 strcmp(value, "unrestricted") == 0) { 4219 control_page->queue_flags &= 4220 ~SCP_QUEUE_ALG_MASK; 4221 control_page->queue_flags |= 4222 SCP_QUEUE_ALG_UNRESTRICTED; 4223 } 4224 memcpy(&lun->mode_pages.control_page[ 4225 CTL_PAGE_CURRENT], 4226 &lun->mode_pages.control_page[ 4227 CTL_PAGE_SAVED], 4228 sizeof(control_page_default)); 4229 page_index->page_data = 4230 (uint8_t *)lun->mode_pages.control_page; 4231 break; 4232 } 4233 case 0x01: 4234 memcpy(&lun->mode_pages.control_ext_page[ 4235 CTL_PAGE_DEFAULT], 4236 &control_ext_page_default, 4237 sizeof(control_ext_page_default)); 4238 memcpy(&lun->mode_pages.control_ext_page[ 4239 CTL_PAGE_CHANGEABLE], 4240 &control_ext_page_changeable, 4241 sizeof(control_ext_page_changeable)); 4242 memcpy(&lun->mode_pages.control_ext_page[ 4243 CTL_PAGE_SAVED], 4244 &control_ext_page_default, 4245 sizeof(control_ext_page_default)); 4246 memcpy(&lun->mode_pages.control_ext_page[ 4247 CTL_PAGE_CURRENT], 4248 &lun->mode_pages.control_ext_page[ 4249 CTL_PAGE_SAVED], 4250 sizeof(control_ext_page_default)); 4251 page_index->page_data = 4252 (uint8_t *)lun->mode_pages.control_ext_page; 4253 break; 4254 default: 4255 panic("subpage %#x for page %#x is incorrect!", 4256 page_index->subpage, page_code); 4257 } 4258 break; 4259 } 4260 case SMS_INFO_EXCEPTIONS_PAGE: { 4261 switch (page_index->subpage) { 4262 case SMS_SUBPAGE_PAGE_0: 4263 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_CURRENT], 4264 &ie_page_default, 4265 sizeof(ie_page_default)); 4266 memcpy(&lun->mode_pages.ie_page[ 4267 CTL_PAGE_CHANGEABLE], &ie_page_changeable, 4268 sizeof(ie_page_changeable)); 4269 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_DEFAULT], 4270 &ie_page_default, 4271 sizeof(ie_page_default)); 4272 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_SAVED], 4273 &ie_page_default, 4274 sizeof(ie_page_default)); 4275 page_index->page_data = 4276 (uint8_t *)lun->mode_pages.ie_page; 4277 break; 4278 case 0x02: { 4279 struct ctl_logical_block_provisioning_page *page; 4280 4281 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_DEFAULT], 4282 &lbp_page_default, 4283 sizeof(lbp_page_default)); 4284 memcpy(&lun->mode_pages.lbp_page[ 4285 CTL_PAGE_CHANGEABLE], &lbp_page_changeable, 4286 sizeof(lbp_page_changeable)); 4287 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 4288 &lbp_page_default, 4289 sizeof(lbp_page_default)); 4290 page = &lun->mode_pages.lbp_page[CTL_PAGE_SAVED]; 4291 value = ctl_get_opt(&lun->be_lun->options, 4292 "avail-threshold"); 4293 if (value != NULL && 4294 ctl_expand_number(value, &ival) == 0) { 4295 page->descr[0].flags |= SLBPPD_ENABLED | 4296 SLBPPD_ARMING_DEC; 4297 if (lun->be_lun->blocksize) 4298 ival /= lun->be_lun->blocksize; 4299 else 4300 ival /= 512; 4301 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4302 page->descr[0].count); 4303 } 4304 value = ctl_get_opt(&lun->be_lun->options, 4305 "used-threshold"); 4306 if (value != NULL && 4307 ctl_expand_number(value, &ival) == 0) { 4308 page->descr[1].flags |= SLBPPD_ENABLED | 4309 SLBPPD_ARMING_INC; 4310 if (lun->be_lun->blocksize) 4311 ival /= lun->be_lun->blocksize; 4312 else 4313 ival /= 512; 4314 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4315 page->descr[1].count); 4316 } 4317 value = ctl_get_opt(&lun->be_lun->options, 4318 "pool-avail-threshold"); 4319 if (value != NULL && 4320 ctl_expand_number(value, &ival) == 0) { 4321 page->descr[2].flags |= SLBPPD_ENABLED | 4322 SLBPPD_ARMING_DEC; 4323 if (lun->be_lun->blocksize) 4324 ival /= lun->be_lun->blocksize; 4325 else 4326 ival /= 512; 4327 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4328 page->descr[2].count); 4329 } 4330 value = ctl_get_opt(&lun->be_lun->options, 4331 "pool-used-threshold"); 4332 if (value != NULL && 4333 ctl_expand_number(value, &ival) == 0) { 4334 page->descr[3].flags |= SLBPPD_ENABLED | 4335 SLBPPD_ARMING_INC; 4336 if (lun->be_lun->blocksize) 4337 ival /= lun->be_lun->blocksize; 4338 else 4339 ival /= 512; 4340 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4341 page->descr[3].count); 4342 } 4343 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_CURRENT], 4344 &lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 4345 sizeof(lbp_page_default)); 4346 page_index->page_data = 4347 (uint8_t *)lun->mode_pages.lbp_page; 4348 break; 4349 } 4350 default: 4351 panic("subpage %#x for page %#x is incorrect!", 4352 page_index->subpage, page_code); 4353 } 4354 break; 4355 } 4356 case SMS_CDDVD_CAPS_PAGE:{ 4357 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4358 ("subpage %#x for page %#x is incorrect!", 4359 page_index->subpage, page_code)); 4360 memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_DEFAULT], 4361 &cddvd_page_default, 4362 sizeof(cddvd_page_default)); 4363 memcpy(&lun->mode_pages.cddvd_page[ 4364 CTL_PAGE_CHANGEABLE], &cddvd_page_changeable, 4365 sizeof(cddvd_page_changeable)); 4366 memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_SAVED], 4367 &cddvd_page_default, 4368 sizeof(cddvd_page_default)); 4369 memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_CURRENT], 4370 &lun->mode_pages.cddvd_page[CTL_PAGE_SAVED], 4371 sizeof(cddvd_page_default)); 4372 page_index->page_data = 4373 (uint8_t *)lun->mode_pages.cddvd_page; 4374 break; 4375 } 4376 default: 4377 panic("invalid page code value %#x", page_code); 4378 } 4379 } 4380 4381 return (CTL_RETVAL_COMPLETE); 4382 } 4383 4384 static int 4385 ctl_init_log_page_index(struct ctl_lun *lun) 4386 { 4387 struct ctl_page_index *page_index; 4388 int i, j, k, prev; 4389 4390 memcpy(&lun->log_pages.index, log_page_index_template, 4391 sizeof(log_page_index_template)); 4392 4393 prev = -1; 4394 for (i = 0, j = 0, k = 0; i < CTL_NUM_LOG_PAGES; i++) { 4395 4396 page_index = &lun->log_pages.index[i]; 4397 if (lun->be_lun->lun_type == T_DIRECT && 4398 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 4399 continue; 4400 if (lun->be_lun->lun_type == T_PROCESSOR && 4401 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 4402 continue; 4403 if (lun->be_lun->lun_type == T_CDROM && 4404 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 4405 continue; 4406 4407 if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING && 4408 lun->backend->lun_attr == NULL) 4409 continue; 4410 4411 if (page_index->page_code != prev) { 4412 lun->log_pages.pages_page[j] = page_index->page_code; 4413 prev = page_index->page_code; 4414 j++; 4415 } 4416 lun->log_pages.subpages_page[k*2] = page_index->page_code; 4417 lun->log_pages.subpages_page[k*2+1] = page_index->subpage; 4418 k++; 4419 } 4420 lun->log_pages.index[0].page_data = &lun->log_pages.pages_page[0]; 4421 lun->log_pages.index[0].page_len = j; 4422 lun->log_pages.index[1].page_data = &lun->log_pages.subpages_page[0]; 4423 lun->log_pages.index[1].page_len = k * 2; 4424 lun->log_pages.index[2].page_data = &lun->log_pages.lbp_page[0]; 4425 lun->log_pages.index[2].page_len = 12*CTL_NUM_LBP_PARAMS; 4426 lun->log_pages.index[3].page_data = (uint8_t *)&lun->log_pages.stat_page; 4427 lun->log_pages.index[3].page_len = sizeof(lun->log_pages.stat_page); 4428 lun->log_pages.index[4].page_data = (uint8_t *)&lun->log_pages.ie_page; 4429 lun->log_pages.index[4].page_len = sizeof(lun->log_pages.ie_page); 4430 4431 return (CTL_RETVAL_COMPLETE); 4432 } 4433 4434 static int 4435 hex2bin(const char *str, uint8_t *buf, int buf_size) 4436 { 4437 int i; 4438 u_char c; 4439 4440 memset(buf, 0, buf_size); 4441 while (isspace(str[0])) 4442 str++; 4443 if (str[0] == '0' && (str[1] == 'x' || str[1] == 'X')) 4444 str += 2; 4445 buf_size *= 2; 4446 for (i = 0; str[i] != 0 && i < buf_size; i++) { 4447 while (str[i] == '-') /* Skip dashes in UUIDs. */ 4448 str++; 4449 c = str[i]; 4450 if (isdigit(c)) 4451 c -= '0'; 4452 else if (isalpha(c)) 4453 c -= isupper(c) ? 'A' - 10 : 'a' - 10; 4454 else 4455 break; 4456 if (c >= 16) 4457 break; 4458 if ((i & 1) == 0) 4459 buf[i / 2] |= (c << 4); 4460 else 4461 buf[i / 2] |= c; 4462 } 4463 return ((i + 1) / 2); 4464 } 4465 4466 /* 4467 * LUN allocation. 4468 * 4469 * Requirements: 4470 * - caller allocates and zeros LUN storage, or passes in a NULL LUN if he 4471 * wants us to allocate the LUN and he can block. 4472 * - ctl_softc is always set 4473 * - be_lun is set if the LUN has a backend (needed for disk LUNs) 4474 * 4475 * Returns 0 for success, non-zero (errno) for failure. 4476 */ 4477 static int 4478 ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun, 4479 struct ctl_be_lun *const be_lun) 4480 { 4481 struct ctl_lun *nlun, *lun; 4482 struct scsi_vpd_id_descriptor *desc; 4483 struct scsi_vpd_id_t10 *t10id; 4484 const char *eui, *naa, *scsiname, *uuid, *vendor, *value; 4485 int lun_number, lun_malloced; 4486 int devidlen, idlen1, idlen2 = 0, len; 4487 4488 if (be_lun == NULL) 4489 return (EINVAL); 4490 4491 /* 4492 * We currently only support Direct Access or Processor LUN types. 4493 */ 4494 switch (be_lun->lun_type) { 4495 case T_DIRECT: 4496 case T_PROCESSOR: 4497 case T_CDROM: 4498 break; 4499 case T_SEQUENTIAL: 4500 case T_CHANGER: 4501 default: 4502 be_lun->lun_config_status(be_lun->be_lun, 4503 CTL_LUN_CONFIG_FAILURE); 4504 break; 4505 } 4506 if (ctl_lun == NULL) { 4507 lun = malloc(sizeof(*lun), M_CTL, M_WAITOK); 4508 lun_malloced = 1; 4509 } else { 4510 lun_malloced = 0; 4511 lun = ctl_lun; 4512 } 4513 4514 memset(lun, 0, sizeof(*lun)); 4515 if (lun_malloced) 4516 lun->flags = CTL_LUN_MALLOCED; 4517 4518 /* Generate LUN ID. */ 4519 devidlen = max(CTL_DEVID_MIN_LEN, 4520 strnlen(be_lun->device_id, CTL_DEVID_LEN)); 4521 idlen1 = sizeof(*t10id) + devidlen; 4522 len = sizeof(struct scsi_vpd_id_descriptor) + idlen1; 4523 scsiname = ctl_get_opt(&be_lun->options, "scsiname"); 4524 if (scsiname != NULL) { 4525 idlen2 = roundup2(strlen(scsiname) + 1, 4); 4526 len += sizeof(struct scsi_vpd_id_descriptor) + idlen2; 4527 } 4528 eui = ctl_get_opt(&be_lun->options, "eui"); 4529 if (eui != NULL) { 4530 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4531 } 4532 naa = ctl_get_opt(&be_lun->options, "naa"); 4533 if (naa != NULL) { 4534 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4535 } 4536 uuid = ctl_get_opt(&be_lun->options, "uuid"); 4537 if (uuid != NULL) { 4538 len += sizeof(struct scsi_vpd_id_descriptor) + 18; 4539 } 4540 lun->lun_devid = malloc(sizeof(struct ctl_devid) + len, 4541 M_CTL, M_WAITOK | M_ZERO); 4542 desc = (struct scsi_vpd_id_descriptor *)lun->lun_devid->data; 4543 desc->proto_codeset = SVPD_ID_CODESET_ASCII; 4544 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10; 4545 desc->length = idlen1; 4546 t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0]; 4547 memset(t10id->vendor, ' ', sizeof(t10id->vendor)); 4548 if ((vendor = ctl_get_opt(&be_lun->options, "vendor")) == NULL) { 4549 strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor)); 4550 } else { 4551 strncpy(t10id->vendor, vendor, 4552 min(sizeof(t10id->vendor), strlen(vendor))); 4553 } 4554 strncpy((char *)t10id->vendor_spec_id, 4555 (char *)be_lun->device_id, devidlen); 4556 if (scsiname != NULL) { 4557 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4558 desc->length); 4559 desc->proto_codeset = SVPD_ID_CODESET_UTF8; 4560 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4561 SVPD_ID_TYPE_SCSI_NAME; 4562 desc->length = idlen2; 4563 strlcpy(desc->identifier, scsiname, idlen2); 4564 } 4565 if (eui != NULL) { 4566 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4567 desc->length); 4568 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4569 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4570 SVPD_ID_TYPE_EUI64; 4571 desc->length = hex2bin(eui, desc->identifier, 16); 4572 desc->length = desc->length > 12 ? 16 : 4573 (desc->length > 8 ? 12 : 8); 4574 len -= 16 - desc->length; 4575 } 4576 if (naa != NULL) { 4577 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4578 desc->length); 4579 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4580 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4581 SVPD_ID_TYPE_NAA; 4582 desc->length = hex2bin(naa, desc->identifier, 16); 4583 desc->length = desc->length > 8 ? 16 : 8; 4584 len -= 16 - desc->length; 4585 } 4586 if (uuid != NULL) { 4587 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4588 desc->length); 4589 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4590 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4591 SVPD_ID_TYPE_UUID; 4592 desc->identifier[0] = 0x10; 4593 hex2bin(uuid, &desc->identifier[2], 16); 4594 desc->length = 18; 4595 } 4596 lun->lun_devid->len = len; 4597 4598 mtx_lock(&ctl_softc->ctl_lock); 4599 /* 4600 * See if the caller requested a particular LUN number. If so, see 4601 * if it is available. Otherwise, allocate the first available LUN. 4602 */ 4603 if (be_lun->flags & CTL_LUN_FLAG_ID_REQ) { 4604 if ((be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) 4605 || (ctl_is_set(ctl_softc->ctl_lun_mask, be_lun->req_lun_id))) { 4606 mtx_unlock(&ctl_softc->ctl_lock); 4607 if (be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) { 4608 printf("ctl: requested LUN ID %d is higher " 4609 "than CTL_MAX_LUNS - 1 (%d)\n", 4610 be_lun->req_lun_id, CTL_MAX_LUNS - 1); 4611 } else { 4612 /* 4613 * XXX KDM return an error, or just assign 4614 * another LUN ID in this case?? 4615 */ 4616 printf("ctl: requested LUN ID %d is already " 4617 "in use\n", be_lun->req_lun_id); 4618 } 4619 fail: 4620 free(lun->lun_devid, M_CTL); 4621 if (lun->flags & CTL_LUN_MALLOCED) 4622 free(lun, M_CTL); 4623 be_lun->lun_config_status(be_lun->be_lun, 4624 CTL_LUN_CONFIG_FAILURE); 4625 return (ENOSPC); 4626 } 4627 lun_number = be_lun->req_lun_id; 4628 } else { 4629 lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, 0, CTL_MAX_LUNS); 4630 if (lun_number == -1) { 4631 mtx_unlock(&ctl_softc->ctl_lock); 4632 printf("ctl: can't allocate LUN, out of LUNs\n"); 4633 goto fail; 4634 } 4635 } 4636 ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number); 4637 mtx_unlock(&ctl_softc->ctl_lock); 4638 4639 mtx_init(&lun->lun_lock, "CTL LUN", NULL, MTX_DEF); 4640 lun->lun = lun_number; 4641 lun->be_lun = be_lun; 4642 /* 4643 * The processor LUN is always enabled. Disk LUNs come on line 4644 * disabled, and must be enabled by the backend. 4645 */ 4646 lun->flags |= CTL_LUN_DISABLED; 4647 lun->backend = be_lun->be; 4648 be_lun->ctl_lun = lun; 4649 be_lun->lun_id = lun_number; 4650 atomic_add_int(&be_lun->be->num_luns, 1); 4651 if (be_lun->flags & CTL_LUN_FLAG_EJECTED) 4652 lun->flags |= CTL_LUN_EJECTED; 4653 if (be_lun->flags & CTL_LUN_FLAG_NO_MEDIA) 4654 lun->flags |= CTL_LUN_NO_MEDIA; 4655 if (be_lun->flags & CTL_LUN_FLAG_STOPPED) 4656 lun->flags |= CTL_LUN_STOPPED; 4657 4658 if (be_lun->flags & CTL_LUN_FLAG_PRIMARY) 4659 lun->flags |= CTL_LUN_PRIMARY_SC; 4660 4661 value = ctl_get_opt(&be_lun->options, "removable"); 4662 if (value != NULL) { 4663 if (strcmp(value, "on") == 0) 4664 lun->flags |= CTL_LUN_REMOVABLE; 4665 } else if (be_lun->lun_type == T_CDROM) 4666 lun->flags |= CTL_LUN_REMOVABLE; 4667 4668 lun->ctl_softc = ctl_softc; 4669 #ifdef CTL_TIME_IO 4670 lun->last_busy = getsbinuptime(); 4671 #endif 4672 TAILQ_INIT(&lun->ooa_queue); 4673 TAILQ_INIT(&lun->blocked_queue); 4674 STAILQ_INIT(&lun->error_list); 4675 lun->ie_reported = 1; 4676 callout_init_mtx(&lun->ie_callout, &lun->lun_lock, 0); 4677 ctl_tpc_lun_init(lun); 4678 if (lun->flags & CTL_LUN_REMOVABLE) { 4679 lun->prevent = malloc((CTL_MAX_INITIATORS + 31) / 32 * 4, 4680 M_CTL, M_WAITOK); 4681 } 4682 4683 /* 4684 * Initialize the mode and log page index. 4685 */ 4686 ctl_init_page_index(lun); 4687 ctl_init_log_page_index(lun); 4688 4689 /* Setup statistics gathering */ 4690 #ifdef CTL_LEGACY_STATS 4691 lun->legacy_stats.device_type = be_lun->lun_type; 4692 lun->legacy_stats.lun_number = lun_number; 4693 lun->legacy_stats.blocksize = be_lun->blocksize; 4694 if (be_lun->blocksize == 0) 4695 lun->legacy_stats.flags = CTL_LUN_STATS_NO_BLOCKSIZE; 4696 for (len = 0; len < CTL_MAX_PORTS; len++) 4697 lun->legacy_stats.ports[len].targ_port = len; 4698 #endif /* CTL_LEGACY_STATS */ 4699 lun->stats.item = lun_number; 4700 4701 /* 4702 * Now, before we insert this lun on the lun list, set the lun 4703 * inventory changed UA for all other luns. 4704 */ 4705 mtx_lock(&ctl_softc->ctl_lock); 4706 STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) { 4707 mtx_lock(&nlun->lun_lock); 4708 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4709 mtx_unlock(&nlun->lun_lock); 4710 } 4711 STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links); 4712 ctl_softc->ctl_luns[lun_number] = lun; 4713 ctl_softc->num_luns++; 4714 mtx_unlock(&ctl_softc->ctl_lock); 4715 4716 lun->be_lun->lun_config_status(lun->be_lun->be_lun, CTL_LUN_CONFIG_OK); 4717 return (0); 4718 } 4719 4720 /* 4721 * Delete a LUN. 4722 * Assumptions: 4723 * - LUN has already been marked invalid and any pending I/O has been taken 4724 * care of. 4725 */ 4726 static int 4727 ctl_free_lun(struct ctl_lun *lun) 4728 { 4729 struct ctl_softc *softc = lun->ctl_softc; 4730 struct ctl_lun *nlun; 4731 int i; 4732 4733 mtx_assert(&softc->ctl_lock, MA_OWNED); 4734 4735 STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links); 4736 4737 ctl_clear_mask(softc->ctl_lun_mask, lun->lun); 4738 4739 softc->ctl_luns[lun->lun] = NULL; 4740 4741 if (!TAILQ_EMPTY(&lun->ooa_queue)) 4742 panic("Freeing a LUN %p with outstanding I/O!!\n", lun); 4743 4744 softc->num_luns--; 4745 4746 /* 4747 * Tell the backend to free resources, if this LUN has a backend. 4748 */ 4749 atomic_subtract_int(&lun->be_lun->be->num_luns, 1); 4750 lun->be_lun->lun_shutdown(lun->be_lun->be_lun); 4751 4752 lun->ie_reportcnt = UINT32_MAX; 4753 callout_drain(&lun->ie_callout); 4754 4755 ctl_tpc_lun_shutdown(lun); 4756 mtx_destroy(&lun->lun_lock); 4757 free(lun->lun_devid, M_CTL); 4758 for (i = 0; i < CTL_MAX_PORTS; i++) 4759 free(lun->pending_ua[i], M_CTL); 4760 for (i = 0; i < CTL_MAX_PORTS; i++) 4761 free(lun->pr_keys[i], M_CTL); 4762 free(lun->write_buffer, M_CTL); 4763 free(lun->prevent, M_CTL); 4764 if (lun->flags & CTL_LUN_MALLOCED) 4765 free(lun, M_CTL); 4766 4767 STAILQ_FOREACH(nlun, &softc->lun_list, links) { 4768 mtx_lock(&nlun->lun_lock); 4769 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4770 mtx_unlock(&nlun->lun_lock); 4771 } 4772 4773 return (0); 4774 } 4775 4776 static void 4777 ctl_create_lun(struct ctl_be_lun *be_lun) 4778 { 4779 4780 /* 4781 * ctl_alloc_lun() should handle all potential failure cases. 4782 */ 4783 ctl_alloc_lun(control_softc, NULL, be_lun); 4784 } 4785 4786 int 4787 ctl_add_lun(struct ctl_be_lun *be_lun) 4788 { 4789 struct ctl_softc *softc = control_softc; 4790 4791 mtx_lock(&softc->ctl_lock); 4792 STAILQ_INSERT_TAIL(&softc->pending_lun_queue, be_lun, links); 4793 mtx_unlock(&softc->ctl_lock); 4794 wakeup(&softc->pending_lun_queue); 4795 4796 return (0); 4797 } 4798 4799 int 4800 ctl_enable_lun(struct ctl_be_lun *be_lun) 4801 { 4802 struct ctl_softc *softc; 4803 struct ctl_port *port, *nport; 4804 struct ctl_lun *lun; 4805 int retval; 4806 4807 lun = (struct ctl_lun *)be_lun->ctl_lun; 4808 softc = lun->ctl_softc; 4809 4810 mtx_lock(&softc->ctl_lock); 4811 mtx_lock(&lun->lun_lock); 4812 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 4813 /* 4814 * eh? Why did we get called if the LUN is already 4815 * enabled? 4816 */ 4817 mtx_unlock(&lun->lun_lock); 4818 mtx_unlock(&softc->ctl_lock); 4819 return (0); 4820 } 4821 lun->flags &= ~CTL_LUN_DISABLED; 4822 mtx_unlock(&lun->lun_lock); 4823 4824 STAILQ_FOREACH_SAFE(port, &softc->port_list, links, nport) { 4825 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || 4826 port->lun_map != NULL || port->lun_enable == NULL) 4827 continue; 4828 4829 /* 4830 * Drop the lock while we call the FETD's enable routine. 4831 * This can lead to a callback into CTL (at least in the 4832 * case of the internal initiator frontend. 4833 */ 4834 mtx_unlock(&softc->ctl_lock); 4835 retval = port->lun_enable(port->targ_lun_arg, lun->lun); 4836 mtx_lock(&softc->ctl_lock); 4837 if (retval != 0) { 4838 printf("%s: FETD %s port %d returned error " 4839 "%d for lun_enable on lun %jd\n", 4840 __func__, port->port_name, port->targ_port, 4841 retval, (intmax_t)lun->lun); 4842 } 4843 } 4844 4845 mtx_unlock(&softc->ctl_lock); 4846 ctl_isc_announce_lun(lun); 4847 4848 return (0); 4849 } 4850 4851 int 4852 ctl_disable_lun(struct ctl_be_lun *be_lun) 4853 { 4854 struct ctl_softc *softc; 4855 struct ctl_port *port; 4856 struct ctl_lun *lun; 4857 int retval; 4858 4859 lun = (struct ctl_lun *)be_lun->ctl_lun; 4860 softc = lun->ctl_softc; 4861 4862 mtx_lock(&softc->ctl_lock); 4863 mtx_lock(&lun->lun_lock); 4864 if (lun->flags & CTL_LUN_DISABLED) { 4865 mtx_unlock(&lun->lun_lock); 4866 mtx_unlock(&softc->ctl_lock); 4867 return (0); 4868 } 4869 lun->flags |= CTL_LUN_DISABLED; 4870 mtx_unlock(&lun->lun_lock); 4871 4872 STAILQ_FOREACH(port, &softc->port_list, links) { 4873 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || 4874 port->lun_map != NULL || port->lun_disable == NULL) 4875 continue; 4876 4877 /* 4878 * Drop the lock before we call the frontend's disable 4879 * routine, to avoid lock order reversals. 4880 * 4881 * XXX KDM what happens if the frontend list changes while 4882 * we're traversing it? It's unlikely, but should be handled. 4883 */ 4884 mtx_unlock(&softc->ctl_lock); 4885 retval = port->lun_disable(port->targ_lun_arg, lun->lun); 4886 mtx_lock(&softc->ctl_lock); 4887 if (retval != 0) { 4888 printf("%s: FETD %s port %d returned error " 4889 "%d for lun_disable on lun %jd\n", 4890 __func__, port->port_name, port->targ_port, 4891 retval, (intmax_t)lun->lun); 4892 } 4893 } 4894 4895 mtx_unlock(&softc->ctl_lock); 4896 ctl_isc_announce_lun(lun); 4897 4898 return (0); 4899 } 4900 4901 int 4902 ctl_start_lun(struct ctl_be_lun *be_lun) 4903 { 4904 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4905 4906 mtx_lock(&lun->lun_lock); 4907 lun->flags &= ~CTL_LUN_STOPPED; 4908 mtx_unlock(&lun->lun_lock); 4909 return (0); 4910 } 4911 4912 int 4913 ctl_stop_lun(struct ctl_be_lun *be_lun) 4914 { 4915 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4916 4917 mtx_lock(&lun->lun_lock); 4918 lun->flags |= CTL_LUN_STOPPED; 4919 mtx_unlock(&lun->lun_lock); 4920 return (0); 4921 } 4922 4923 int 4924 ctl_lun_no_media(struct ctl_be_lun *be_lun) 4925 { 4926 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4927 4928 mtx_lock(&lun->lun_lock); 4929 lun->flags |= CTL_LUN_NO_MEDIA; 4930 mtx_unlock(&lun->lun_lock); 4931 return (0); 4932 } 4933 4934 int 4935 ctl_lun_has_media(struct ctl_be_lun *be_lun) 4936 { 4937 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4938 union ctl_ha_msg msg; 4939 4940 mtx_lock(&lun->lun_lock); 4941 lun->flags &= ~(CTL_LUN_NO_MEDIA | CTL_LUN_EJECTED); 4942 if (lun->flags & CTL_LUN_REMOVABLE) 4943 ctl_est_ua_all(lun, -1, CTL_UA_MEDIUM_CHANGE); 4944 mtx_unlock(&lun->lun_lock); 4945 if ((lun->flags & CTL_LUN_REMOVABLE) && 4946 lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 4947 bzero(&msg.ua, sizeof(msg.ua)); 4948 msg.hdr.msg_type = CTL_MSG_UA; 4949 msg.hdr.nexus.initid = -1; 4950 msg.hdr.nexus.targ_port = -1; 4951 msg.hdr.nexus.targ_lun = lun->lun; 4952 msg.hdr.nexus.targ_mapped_lun = lun->lun; 4953 msg.ua.ua_all = 1; 4954 msg.ua.ua_set = 1; 4955 msg.ua.ua_type = CTL_UA_MEDIUM_CHANGE; 4956 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), 4957 M_WAITOK); 4958 } 4959 return (0); 4960 } 4961 4962 int 4963 ctl_lun_ejected(struct ctl_be_lun *be_lun) 4964 { 4965 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4966 4967 mtx_lock(&lun->lun_lock); 4968 lun->flags |= CTL_LUN_EJECTED; 4969 mtx_unlock(&lun->lun_lock); 4970 return (0); 4971 } 4972 4973 int 4974 ctl_lun_primary(struct ctl_be_lun *be_lun) 4975 { 4976 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4977 4978 mtx_lock(&lun->lun_lock); 4979 lun->flags |= CTL_LUN_PRIMARY_SC; 4980 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 4981 mtx_unlock(&lun->lun_lock); 4982 ctl_isc_announce_lun(lun); 4983 return (0); 4984 } 4985 4986 int 4987 ctl_lun_secondary(struct ctl_be_lun *be_lun) 4988 { 4989 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4990 4991 mtx_lock(&lun->lun_lock); 4992 lun->flags &= ~CTL_LUN_PRIMARY_SC; 4993 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 4994 mtx_unlock(&lun->lun_lock); 4995 ctl_isc_announce_lun(lun); 4996 return (0); 4997 } 4998 4999 int 5000 ctl_invalidate_lun(struct ctl_be_lun *be_lun) 5001 { 5002 struct ctl_softc *softc; 5003 struct ctl_lun *lun; 5004 5005 lun = (struct ctl_lun *)be_lun->ctl_lun; 5006 softc = lun->ctl_softc; 5007 5008 mtx_lock(&lun->lun_lock); 5009 5010 /* 5011 * The LUN needs to be disabled before it can be marked invalid. 5012 */ 5013 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 5014 mtx_unlock(&lun->lun_lock); 5015 return (-1); 5016 } 5017 /* 5018 * Mark the LUN invalid. 5019 */ 5020 lun->flags |= CTL_LUN_INVALID; 5021 5022 /* 5023 * If there is nothing in the OOA queue, go ahead and free the LUN. 5024 * If we have something in the OOA queue, we'll free it when the 5025 * last I/O completes. 5026 */ 5027 if (TAILQ_EMPTY(&lun->ooa_queue)) { 5028 mtx_unlock(&lun->lun_lock); 5029 mtx_lock(&softc->ctl_lock); 5030 ctl_free_lun(lun); 5031 mtx_unlock(&softc->ctl_lock); 5032 } else 5033 mtx_unlock(&lun->lun_lock); 5034 5035 return (0); 5036 } 5037 5038 void 5039 ctl_lun_capacity_changed(struct ctl_be_lun *be_lun) 5040 { 5041 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 5042 union ctl_ha_msg msg; 5043 5044 mtx_lock(&lun->lun_lock); 5045 ctl_est_ua_all(lun, -1, CTL_UA_CAPACITY_CHANGE); 5046 mtx_unlock(&lun->lun_lock); 5047 if (lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 5048 /* Send msg to other side. */ 5049 bzero(&msg.ua, sizeof(msg.ua)); 5050 msg.hdr.msg_type = CTL_MSG_UA; 5051 msg.hdr.nexus.initid = -1; 5052 msg.hdr.nexus.targ_port = -1; 5053 msg.hdr.nexus.targ_lun = lun->lun; 5054 msg.hdr.nexus.targ_mapped_lun = lun->lun; 5055 msg.ua.ua_all = 1; 5056 msg.ua.ua_set = 1; 5057 msg.ua.ua_type = CTL_UA_CAPACITY_CHANGE; 5058 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), 5059 M_WAITOK); 5060 } 5061 } 5062 5063 /* 5064 * Backend "memory move is complete" callback for requests that never 5065 * make it down to say RAIDCore's configuration code. 5066 */ 5067 int 5068 ctl_config_move_done(union ctl_io *io) 5069 { 5070 int retval; 5071 5072 CTL_DEBUG_PRINT(("ctl_config_move_done\n")); 5073 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, 5074 ("Config I/O type isn't CTL_IO_SCSI (%d)!", io->io_hdr.io_type)); 5075 5076 if ((io->io_hdr.port_status != 0) && 5077 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5078 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5079 ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, 5080 /*retry_count*/ io->io_hdr.port_status); 5081 } else if (io->scsiio.kern_data_resid != 0 && 5082 (io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT && 5083 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5084 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5085 ctl_set_invalid_field_ciu(&io->scsiio); 5086 } 5087 5088 if (ctl_debug & CTL_DEBUG_CDB_DATA) 5089 ctl_data_print(io); 5090 if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) || 5091 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 5092 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) || 5093 ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) { 5094 /* 5095 * XXX KDM just assuming a single pointer here, and not a 5096 * S/G list. If we start using S/G lists for config data, 5097 * we'll need to know how to clean them up here as well. 5098 */ 5099 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5100 free(io->scsiio.kern_data_ptr, M_CTL); 5101 ctl_done(io); 5102 retval = CTL_RETVAL_COMPLETE; 5103 } else { 5104 /* 5105 * XXX KDM now we need to continue data movement. Some 5106 * options: 5107 * - call ctl_scsiio() again? We don't do this for data 5108 * writes, because for those at least we know ahead of 5109 * time where the write will go and how long it is. For 5110 * config writes, though, that information is largely 5111 * contained within the write itself, thus we need to 5112 * parse out the data again. 5113 * 5114 * - Call some other function once the data is in? 5115 */ 5116 5117 /* 5118 * XXX KDM call ctl_scsiio() again for now, and check flag 5119 * bits to see whether we're allocated or not. 5120 */ 5121 retval = ctl_scsiio(&io->scsiio); 5122 } 5123 return (retval); 5124 } 5125 5126 /* 5127 * This gets called by a backend driver when it is done with a 5128 * data_submit method. 5129 */ 5130 void 5131 ctl_data_submit_done(union ctl_io *io) 5132 { 5133 /* 5134 * If the IO_CONT flag is set, we need to call the supplied 5135 * function to continue processing the I/O, instead of completing 5136 * the I/O just yet. 5137 * 5138 * If there is an error, though, we don't want to keep processing. 5139 * Instead, just send status back to the initiator. 5140 */ 5141 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 5142 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 5143 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5144 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5145 io->scsiio.io_cont(io); 5146 return; 5147 } 5148 ctl_done(io); 5149 } 5150 5151 /* 5152 * This gets called by a backend driver when it is done with a 5153 * configuration write. 5154 */ 5155 void 5156 ctl_config_write_done(union ctl_io *io) 5157 { 5158 uint8_t *buf; 5159 5160 /* 5161 * If the IO_CONT flag is set, we need to call the supplied 5162 * function to continue processing the I/O, instead of completing 5163 * the I/O just yet. 5164 * 5165 * If there is an error, though, we don't want to keep processing. 5166 * Instead, just send status back to the initiator. 5167 */ 5168 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 5169 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 5170 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5171 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5172 io->scsiio.io_cont(io); 5173 return; 5174 } 5175 /* 5176 * Since a configuration write can be done for commands that actually 5177 * have data allocated, like write buffer, and commands that have 5178 * no data, like start/stop unit, we need to check here. 5179 */ 5180 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5181 buf = io->scsiio.kern_data_ptr; 5182 else 5183 buf = NULL; 5184 ctl_done(io); 5185 if (buf) 5186 free(buf, M_CTL); 5187 } 5188 5189 void 5190 ctl_config_read_done(union ctl_io *io) 5191 { 5192 uint8_t *buf; 5193 5194 /* 5195 * If there is some error -- we are done, skip data transfer. 5196 */ 5197 if ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0 || 5198 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 5199 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { 5200 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5201 buf = io->scsiio.kern_data_ptr; 5202 else 5203 buf = NULL; 5204 ctl_done(io); 5205 if (buf) 5206 free(buf, M_CTL); 5207 return; 5208 } 5209 5210 /* 5211 * If the IO_CONT flag is set, we need to call the supplied 5212 * function to continue processing the I/O, instead of completing 5213 * the I/O just yet. 5214 */ 5215 if (io->io_hdr.flags & CTL_FLAG_IO_CONT) { 5216 io->scsiio.io_cont(io); 5217 return; 5218 } 5219 5220 ctl_datamove(io); 5221 } 5222 5223 /* 5224 * SCSI release command. 5225 */ 5226 int 5227 ctl_scsi_release(struct ctl_scsiio *ctsio) 5228 { 5229 struct ctl_lun *lun = CTL_LUN(ctsio); 5230 uint32_t residx; 5231 5232 CTL_DEBUG_PRINT(("ctl_scsi_release\n")); 5233 5234 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5235 5236 /* 5237 * XXX KDM right now, we only support LUN reservation. We don't 5238 * support 3rd party reservations, or extent reservations, which 5239 * might actually need the parameter list. If we've gotten this 5240 * far, we've got a LUN reservation. Anything else got kicked out 5241 * above. So, according to SPC, ignore the length. 5242 */ 5243 5244 mtx_lock(&lun->lun_lock); 5245 5246 /* 5247 * According to SPC, it is not an error for an intiator to attempt 5248 * to release a reservation on a LUN that isn't reserved, or that 5249 * is reserved by another initiator. The reservation can only be 5250 * released, though, by the initiator who made it or by one of 5251 * several reset type events. 5252 */ 5253 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx)) 5254 lun->flags &= ~CTL_LUN_RESERVED; 5255 5256 mtx_unlock(&lun->lun_lock); 5257 5258 ctl_set_success(ctsio); 5259 ctl_done((union ctl_io *)ctsio); 5260 return (CTL_RETVAL_COMPLETE); 5261 } 5262 5263 int 5264 ctl_scsi_reserve(struct ctl_scsiio *ctsio) 5265 { 5266 struct ctl_lun *lun = CTL_LUN(ctsio); 5267 uint32_t residx; 5268 5269 CTL_DEBUG_PRINT(("ctl_reserve\n")); 5270 5271 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5272 5273 /* 5274 * XXX KDM right now, we only support LUN reservation. We don't 5275 * support 3rd party reservations, or extent reservations, which 5276 * might actually need the parameter list. If we've gotten this 5277 * far, we've got a LUN reservation. Anything else got kicked out 5278 * above. So, according to SPC, ignore the length. 5279 */ 5280 5281 mtx_lock(&lun->lun_lock); 5282 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx != residx)) { 5283 ctl_set_reservation_conflict(ctsio); 5284 goto bailout; 5285 } 5286 5287 /* SPC-3 exceptions to SPC-2 RESERVE and RELEASE behavior. */ 5288 if (lun->flags & CTL_LUN_PR_RESERVED) { 5289 ctl_set_success(ctsio); 5290 goto bailout; 5291 } 5292 5293 lun->flags |= CTL_LUN_RESERVED; 5294 lun->res_idx = residx; 5295 ctl_set_success(ctsio); 5296 5297 bailout: 5298 mtx_unlock(&lun->lun_lock); 5299 ctl_done((union ctl_io *)ctsio); 5300 return (CTL_RETVAL_COMPLETE); 5301 } 5302 5303 int 5304 ctl_start_stop(struct ctl_scsiio *ctsio) 5305 { 5306 struct ctl_lun *lun = CTL_LUN(ctsio); 5307 struct scsi_start_stop_unit *cdb; 5308 int retval; 5309 5310 CTL_DEBUG_PRINT(("ctl_start_stop\n")); 5311 5312 cdb = (struct scsi_start_stop_unit *)ctsio->cdb; 5313 5314 if ((cdb->how & SSS_PC_MASK) == 0) { 5315 if ((lun->flags & CTL_LUN_PR_RESERVED) && 5316 (cdb->how & SSS_START) == 0) { 5317 uint32_t residx; 5318 5319 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5320 if (ctl_get_prkey(lun, residx) == 0 || 5321 (lun->pr_res_idx != residx && lun->pr_res_type < 4)) { 5322 5323 ctl_set_reservation_conflict(ctsio); 5324 ctl_done((union ctl_io *)ctsio); 5325 return (CTL_RETVAL_COMPLETE); 5326 } 5327 } 5328 5329 if ((cdb->how & SSS_LOEJ) && 5330 (lun->flags & CTL_LUN_REMOVABLE) == 0) { 5331 ctl_set_invalid_field(ctsio, 5332 /*sks_valid*/ 1, 5333 /*command*/ 1, 5334 /*field*/ 4, 5335 /*bit_valid*/ 1, 5336 /*bit*/ 1); 5337 ctl_done((union ctl_io *)ctsio); 5338 return (CTL_RETVAL_COMPLETE); 5339 } 5340 5341 if ((cdb->how & SSS_START) == 0 && (cdb->how & SSS_LOEJ) && 5342 lun->prevent_count > 0) { 5343 /* "Medium removal prevented" */ 5344 ctl_set_sense(ctsio, /*current_error*/ 1, 5345 /*sense_key*/(lun->flags & CTL_LUN_NO_MEDIA) ? 5346 SSD_KEY_NOT_READY : SSD_KEY_ILLEGAL_REQUEST, 5347 /*asc*/ 0x53, /*ascq*/ 0x02, SSD_ELEM_NONE); 5348 ctl_done((union ctl_io *)ctsio); 5349 return (CTL_RETVAL_COMPLETE); 5350 } 5351 } 5352 5353 retval = lun->backend->config_write((union ctl_io *)ctsio); 5354 return (retval); 5355 } 5356 5357 int 5358 ctl_prevent_allow(struct ctl_scsiio *ctsio) 5359 { 5360 struct ctl_lun *lun = CTL_LUN(ctsio); 5361 struct scsi_prevent *cdb; 5362 int retval; 5363 uint32_t initidx; 5364 5365 CTL_DEBUG_PRINT(("ctl_prevent_allow\n")); 5366 5367 cdb = (struct scsi_prevent *)ctsio->cdb; 5368 5369 if ((lun->flags & CTL_LUN_REMOVABLE) == 0 || lun->prevent == NULL) { 5370 ctl_set_invalid_opcode(ctsio); 5371 ctl_done((union ctl_io *)ctsio); 5372 return (CTL_RETVAL_COMPLETE); 5373 } 5374 5375 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5376 mtx_lock(&lun->lun_lock); 5377 if ((cdb->how & PR_PREVENT) && 5378 ctl_is_set(lun->prevent, initidx) == 0) { 5379 ctl_set_mask(lun->prevent, initidx); 5380 lun->prevent_count++; 5381 } else if ((cdb->how & PR_PREVENT) == 0 && 5382 ctl_is_set(lun->prevent, initidx)) { 5383 ctl_clear_mask(lun->prevent, initidx); 5384 lun->prevent_count--; 5385 } 5386 mtx_unlock(&lun->lun_lock); 5387 retval = lun->backend->config_write((union ctl_io *)ctsio); 5388 return (retval); 5389 } 5390 5391 /* 5392 * We support the SYNCHRONIZE CACHE command (10 and 16 byte versions), but 5393 * we don't really do anything with the LBA and length fields if the user 5394 * passes them in. Instead we'll just flush out the cache for the entire 5395 * LUN. 5396 */ 5397 int 5398 ctl_sync_cache(struct ctl_scsiio *ctsio) 5399 { 5400 struct ctl_lun *lun = CTL_LUN(ctsio); 5401 struct ctl_lba_len_flags *lbalen; 5402 uint64_t starting_lba; 5403 uint32_t block_count; 5404 int retval; 5405 uint8_t byte2; 5406 5407 CTL_DEBUG_PRINT(("ctl_sync_cache\n")); 5408 5409 retval = 0; 5410 5411 switch (ctsio->cdb[0]) { 5412 case SYNCHRONIZE_CACHE: { 5413 struct scsi_sync_cache *cdb; 5414 cdb = (struct scsi_sync_cache *)ctsio->cdb; 5415 5416 starting_lba = scsi_4btoul(cdb->begin_lba); 5417 block_count = scsi_2btoul(cdb->lb_count); 5418 byte2 = cdb->byte2; 5419 break; 5420 } 5421 case SYNCHRONIZE_CACHE_16: { 5422 struct scsi_sync_cache_16 *cdb; 5423 cdb = (struct scsi_sync_cache_16 *)ctsio->cdb; 5424 5425 starting_lba = scsi_8btou64(cdb->begin_lba); 5426 block_count = scsi_4btoul(cdb->lb_count); 5427 byte2 = cdb->byte2; 5428 break; 5429 } 5430 default: 5431 ctl_set_invalid_opcode(ctsio); 5432 ctl_done((union ctl_io *)ctsio); 5433 goto bailout; 5434 break; /* NOTREACHED */ 5435 } 5436 5437 /* 5438 * We check the LBA and length, but don't do anything with them. 5439 * A SYNCHRONIZE CACHE will cause the entire cache for this lun to 5440 * get flushed. This check will just help satisfy anyone who wants 5441 * to see an error for an out of range LBA. 5442 */ 5443 if ((starting_lba + block_count) > (lun->be_lun->maxlba + 1)) { 5444 ctl_set_lba_out_of_range(ctsio, 5445 MAX(starting_lba, lun->be_lun->maxlba + 1)); 5446 ctl_done((union ctl_io *)ctsio); 5447 goto bailout; 5448 } 5449 5450 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5451 lbalen->lba = starting_lba; 5452 lbalen->len = block_count; 5453 lbalen->flags = byte2; 5454 retval = lun->backend->config_write((union ctl_io *)ctsio); 5455 5456 bailout: 5457 return (retval); 5458 } 5459 5460 int 5461 ctl_format(struct ctl_scsiio *ctsio) 5462 { 5463 struct scsi_format *cdb; 5464 int length, defect_list_len; 5465 5466 CTL_DEBUG_PRINT(("ctl_format\n")); 5467 5468 cdb = (struct scsi_format *)ctsio->cdb; 5469 5470 length = 0; 5471 if (cdb->byte2 & SF_FMTDATA) { 5472 if (cdb->byte2 & SF_LONGLIST) 5473 length = sizeof(struct scsi_format_header_long); 5474 else 5475 length = sizeof(struct scsi_format_header_short); 5476 } 5477 5478 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5479 && (length > 0)) { 5480 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5481 ctsio->kern_data_len = length; 5482 ctsio->kern_total_len = length; 5483 ctsio->kern_rel_offset = 0; 5484 ctsio->kern_sg_entries = 0; 5485 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5486 ctsio->be_move_done = ctl_config_move_done; 5487 ctl_datamove((union ctl_io *)ctsio); 5488 5489 return (CTL_RETVAL_COMPLETE); 5490 } 5491 5492 defect_list_len = 0; 5493 5494 if (cdb->byte2 & SF_FMTDATA) { 5495 if (cdb->byte2 & SF_LONGLIST) { 5496 struct scsi_format_header_long *header; 5497 5498 header = (struct scsi_format_header_long *) 5499 ctsio->kern_data_ptr; 5500 5501 defect_list_len = scsi_4btoul(header->defect_list_len); 5502 if (defect_list_len != 0) { 5503 ctl_set_invalid_field(ctsio, 5504 /*sks_valid*/ 1, 5505 /*command*/ 0, 5506 /*field*/ 2, 5507 /*bit_valid*/ 0, 5508 /*bit*/ 0); 5509 goto bailout; 5510 } 5511 } else { 5512 struct scsi_format_header_short *header; 5513 5514 header = (struct scsi_format_header_short *) 5515 ctsio->kern_data_ptr; 5516 5517 defect_list_len = scsi_2btoul(header->defect_list_len); 5518 if (defect_list_len != 0) { 5519 ctl_set_invalid_field(ctsio, 5520 /*sks_valid*/ 1, 5521 /*command*/ 0, 5522 /*field*/ 2, 5523 /*bit_valid*/ 0, 5524 /*bit*/ 0); 5525 goto bailout; 5526 } 5527 } 5528 } 5529 5530 ctl_set_success(ctsio); 5531 bailout: 5532 5533 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5534 free(ctsio->kern_data_ptr, M_CTL); 5535 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5536 } 5537 5538 ctl_done((union ctl_io *)ctsio); 5539 return (CTL_RETVAL_COMPLETE); 5540 } 5541 5542 int 5543 ctl_read_buffer(struct ctl_scsiio *ctsio) 5544 { 5545 struct ctl_lun *lun = CTL_LUN(ctsio); 5546 uint64_t buffer_offset; 5547 uint32_t len; 5548 uint8_t byte2; 5549 static uint8_t descr[4]; 5550 static uint8_t echo_descr[4] = { 0 }; 5551 5552 CTL_DEBUG_PRINT(("ctl_read_buffer\n")); 5553 5554 switch (ctsio->cdb[0]) { 5555 case READ_BUFFER: { 5556 struct scsi_read_buffer *cdb; 5557 5558 cdb = (struct scsi_read_buffer *)ctsio->cdb; 5559 buffer_offset = scsi_3btoul(cdb->offset); 5560 len = scsi_3btoul(cdb->length); 5561 byte2 = cdb->byte2; 5562 break; 5563 } 5564 case READ_BUFFER_16: { 5565 struct scsi_read_buffer_16 *cdb; 5566 5567 cdb = (struct scsi_read_buffer_16 *)ctsio->cdb; 5568 buffer_offset = scsi_8btou64(cdb->offset); 5569 len = scsi_4btoul(cdb->length); 5570 byte2 = cdb->byte2; 5571 break; 5572 } 5573 default: /* This shouldn't happen. */ 5574 ctl_set_invalid_opcode(ctsio); 5575 ctl_done((union ctl_io *)ctsio); 5576 return (CTL_RETVAL_COMPLETE); 5577 } 5578 5579 if (buffer_offset > CTL_WRITE_BUFFER_SIZE || 5580 buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5581 ctl_set_invalid_field(ctsio, 5582 /*sks_valid*/ 1, 5583 /*command*/ 1, 5584 /*field*/ 6, 5585 /*bit_valid*/ 0, 5586 /*bit*/ 0); 5587 ctl_done((union ctl_io *)ctsio); 5588 return (CTL_RETVAL_COMPLETE); 5589 } 5590 5591 if ((byte2 & RWB_MODE) == RWB_MODE_DESCR) { 5592 descr[0] = 0; 5593 scsi_ulto3b(CTL_WRITE_BUFFER_SIZE, &descr[1]); 5594 ctsio->kern_data_ptr = descr; 5595 len = min(len, sizeof(descr)); 5596 } else if ((byte2 & RWB_MODE) == RWB_MODE_ECHO_DESCR) { 5597 ctsio->kern_data_ptr = echo_descr; 5598 len = min(len, sizeof(echo_descr)); 5599 } else { 5600 if (lun->write_buffer == NULL) { 5601 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5602 M_CTL, M_WAITOK); 5603 } 5604 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5605 } 5606 ctsio->kern_data_len = len; 5607 ctsio->kern_total_len = len; 5608 ctsio->kern_rel_offset = 0; 5609 ctsio->kern_sg_entries = 0; 5610 ctl_set_success(ctsio); 5611 ctsio->be_move_done = ctl_config_move_done; 5612 ctl_datamove((union ctl_io *)ctsio); 5613 return (CTL_RETVAL_COMPLETE); 5614 } 5615 5616 int 5617 ctl_write_buffer(struct ctl_scsiio *ctsio) 5618 { 5619 struct ctl_lun *lun = CTL_LUN(ctsio); 5620 struct scsi_write_buffer *cdb; 5621 int buffer_offset, len; 5622 5623 CTL_DEBUG_PRINT(("ctl_write_buffer\n")); 5624 5625 cdb = (struct scsi_write_buffer *)ctsio->cdb; 5626 5627 len = scsi_3btoul(cdb->length); 5628 buffer_offset = scsi_3btoul(cdb->offset); 5629 5630 if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5631 ctl_set_invalid_field(ctsio, 5632 /*sks_valid*/ 1, 5633 /*command*/ 1, 5634 /*field*/ 6, 5635 /*bit_valid*/ 0, 5636 /*bit*/ 0); 5637 ctl_done((union ctl_io *)ctsio); 5638 return (CTL_RETVAL_COMPLETE); 5639 } 5640 5641 /* 5642 * If we've got a kernel request that hasn't been malloced yet, 5643 * malloc it and tell the caller the data buffer is here. 5644 */ 5645 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5646 if (lun->write_buffer == NULL) { 5647 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5648 M_CTL, M_WAITOK); 5649 } 5650 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5651 ctsio->kern_data_len = len; 5652 ctsio->kern_total_len = len; 5653 ctsio->kern_rel_offset = 0; 5654 ctsio->kern_sg_entries = 0; 5655 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5656 ctsio->be_move_done = ctl_config_move_done; 5657 ctl_datamove((union ctl_io *)ctsio); 5658 5659 return (CTL_RETVAL_COMPLETE); 5660 } 5661 5662 ctl_set_success(ctsio); 5663 ctl_done((union ctl_io *)ctsio); 5664 return (CTL_RETVAL_COMPLETE); 5665 } 5666 5667 int 5668 ctl_write_same(struct ctl_scsiio *ctsio) 5669 { 5670 struct ctl_lun *lun = CTL_LUN(ctsio); 5671 struct ctl_lba_len_flags *lbalen; 5672 uint64_t lba; 5673 uint32_t num_blocks; 5674 int len, retval; 5675 uint8_t byte2; 5676 5677 CTL_DEBUG_PRINT(("ctl_write_same\n")); 5678 5679 switch (ctsio->cdb[0]) { 5680 case WRITE_SAME_10: { 5681 struct scsi_write_same_10 *cdb; 5682 5683 cdb = (struct scsi_write_same_10 *)ctsio->cdb; 5684 5685 lba = scsi_4btoul(cdb->addr); 5686 num_blocks = scsi_2btoul(cdb->length); 5687 byte2 = cdb->byte2; 5688 break; 5689 } 5690 case WRITE_SAME_16: { 5691 struct scsi_write_same_16 *cdb; 5692 5693 cdb = (struct scsi_write_same_16 *)ctsio->cdb; 5694 5695 lba = scsi_8btou64(cdb->addr); 5696 num_blocks = scsi_4btoul(cdb->length); 5697 byte2 = cdb->byte2; 5698 break; 5699 } 5700 default: 5701 /* 5702 * We got a command we don't support. This shouldn't 5703 * happen, commands should be filtered out above us. 5704 */ 5705 ctl_set_invalid_opcode(ctsio); 5706 ctl_done((union ctl_io *)ctsio); 5707 5708 return (CTL_RETVAL_COMPLETE); 5709 break; /* NOTREACHED */ 5710 } 5711 5712 /* ANCHOR flag can be used only together with UNMAP */ 5713 if ((byte2 & SWS_UNMAP) == 0 && (byte2 & SWS_ANCHOR) != 0) { 5714 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 5715 /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); 5716 ctl_done((union ctl_io *)ctsio); 5717 return (CTL_RETVAL_COMPLETE); 5718 } 5719 5720 /* 5721 * The first check is to make sure we're in bounds, the second 5722 * check is to catch wrap-around problems. If the lba + num blocks 5723 * is less than the lba, then we've wrapped around and the block 5724 * range is invalid anyway. 5725 */ 5726 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5727 || ((lba + num_blocks) < lba)) { 5728 ctl_set_lba_out_of_range(ctsio, 5729 MAX(lba, lun->be_lun->maxlba + 1)); 5730 ctl_done((union ctl_io *)ctsio); 5731 return (CTL_RETVAL_COMPLETE); 5732 } 5733 5734 /* Zero number of blocks means "to the last logical block" */ 5735 if (num_blocks == 0) { 5736 if ((lun->be_lun->maxlba + 1) - lba > UINT32_MAX) { 5737 ctl_set_invalid_field(ctsio, 5738 /*sks_valid*/ 0, 5739 /*command*/ 1, 5740 /*field*/ 0, 5741 /*bit_valid*/ 0, 5742 /*bit*/ 0); 5743 ctl_done((union ctl_io *)ctsio); 5744 return (CTL_RETVAL_COMPLETE); 5745 } 5746 num_blocks = (lun->be_lun->maxlba + 1) - lba; 5747 } 5748 5749 len = lun->be_lun->blocksize; 5750 5751 /* 5752 * If we've got a kernel request that hasn't been malloced yet, 5753 * malloc it and tell the caller the data buffer is here. 5754 */ 5755 if ((byte2 & SWS_NDOB) == 0 && 5756 (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5757 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 5758 ctsio->kern_data_len = len; 5759 ctsio->kern_total_len = len; 5760 ctsio->kern_rel_offset = 0; 5761 ctsio->kern_sg_entries = 0; 5762 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5763 ctsio->be_move_done = ctl_config_move_done; 5764 ctl_datamove((union ctl_io *)ctsio); 5765 5766 return (CTL_RETVAL_COMPLETE); 5767 } 5768 5769 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5770 lbalen->lba = lba; 5771 lbalen->len = num_blocks; 5772 lbalen->flags = byte2; 5773 retval = lun->backend->config_write((union ctl_io *)ctsio); 5774 5775 return (retval); 5776 } 5777 5778 int 5779 ctl_unmap(struct ctl_scsiio *ctsio) 5780 { 5781 struct ctl_lun *lun = CTL_LUN(ctsio); 5782 struct scsi_unmap *cdb; 5783 struct ctl_ptr_len_flags *ptrlen; 5784 struct scsi_unmap_header *hdr; 5785 struct scsi_unmap_desc *buf, *end, *endnz, *range; 5786 uint64_t lba; 5787 uint32_t num_blocks; 5788 int len, retval; 5789 uint8_t byte2; 5790 5791 CTL_DEBUG_PRINT(("ctl_unmap\n")); 5792 5793 cdb = (struct scsi_unmap *)ctsio->cdb; 5794 len = scsi_2btoul(cdb->length); 5795 byte2 = cdb->byte2; 5796 5797 /* 5798 * If we've got a kernel request that hasn't been malloced yet, 5799 * malloc it and tell the caller the data buffer is here. 5800 */ 5801 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5802 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 5803 ctsio->kern_data_len = len; 5804 ctsio->kern_total_len = len; 5805 ctsio->kern_rel_offset = 0; 5806 ctsio->kern_sg_entries = 0; 5807 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5808 ctsio->be_move_done = ctl_config_move_done; 5809 ctl_datamove((union ctl_io *)ctsio); 5810 5811 return (CTL_RETVAL_COMPLETE); 5812 } 5813 5814 len = ctsio->kern_total_len - ctsio->kern_data_resid; 5815 hdr = (struct scsi_unmap_header *)ctsio->kern_data_ptr; 5816 if (len < sizeof (*hdr) || 5817 len < (scsi_2btoul(hdr->length) + sizeof(hdr->length)) || 5818 len < (scsi_2btoul(hdr->desc_length) + sizeof (*hdr)) || 5819 scsi_2btoul(hdr->desc_length) % sizeof(*buf) != 0) { 5820 ctl_set_invalid_field(ctsio, 5821 /*sks_valid*/ 0, 5822 /*command*/ 0, 5823 /*field*/ 0, 5824 /*bit_valid*/ 0, 5825 /*bit*/ 0); 5826 goto done; 5827 } 5828 len = scsi_2btoul(hdr->desc_length); 5829 buf = (struct scsi_unmap_desc *)(hdr + 1); 5830 end = buf + len / sizeof(*buf); 5831 5832 endnz = buf; 5833 for (range = buf; range < end; range++) { 5834 lba = scsi_8btou64(range->lba); 5835 num_blocks = scsi_4btoul(range->length); 5836 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5837 || ((lba + num_blocks) < lba)) { 5838 ctl_set_lba_out_of_range(ctsio, 5839 MAX(lba, lun->be_lun->maxlba + 1)); 5840 ctl_done((union ctl_io *)ctsio); 5841 return (CTL_RETVAL_COMPLETE); 5842 } 5843 if (num_blocks != 0) 5844 endnz = range + 1; 5845 } 5846 5847 /* 5848 * Block backend can not handle zero last range. 5849 * Filter it out and return if there is nothing left. 5850 */ 5851 len = (uint8_t *)endnz - (uint8_t *)buf; 5852 if (len == 0) { 5853 ctl_set_success(ctsio); 5854 goto done; 5855 } 5856 5857 mtx_lock(&lun->lun_lock); 5858 ptrlen = (struct ctl_ptr_len_flags *) 5859 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5860 ptrlen->ptr = (void *)buf; 5861 ptrlen->len = len; 5862 ptrlen->flags = byte2; 5863 ctl_check_blocked(lun); 5864 mtx_unlock(&lun->lun_lock); 5865 5866 retval = lun->backend->config_write((union ctl_io *)ctsio); 5867 return (retval); 5868 5869 done: 5870 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5871 free(ctsio->kern_data_ptr, M_CTL); 5872 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5873 } 5874 ctl_done((union ctl_io *)ctsio); 5875 return (CTL_RETVAL_COMPLETE); 5876 } 5877 5878 int 5879 ctl_default_page_handler(struct ctl_scsiio *ctsio, 5880 struct ctl_page_index *page_index, uint8_t *page_ptr) 5881 { 5882 struct ctl_lun *lun = CTL_LUN(ctsio); 5883 uint8_t *current_cp; 5884 int set_ua; 5885 uint32_t initidx; 5886 5887 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5888 set_ua = 0; 5889 5890 current_cp = (page_index->page_data + (page_index->page_len * 5891 CTL_PAGE_CURRENT)); 5892 5893 mtx_lock(&lun->lun_lock); 5894 if (memcmp(current_cp, page_ptr, page_index->page_len)) { 5895 memcpy(current_cp, page_ptr, page_index->page_len); 5896 set_ua = 1; 5897 } 5898 if (set_ua != 0) 5899 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 5900 mtx_unlock(&lun->lun_lock); 5901 if (set_ua) { 5902 ctl_isc_announce_mode(lun, 5903 ctl_get_initindex(&ctsio->io_hdr.nexus), 5904 page_index->page_code, page_index->subpage); 5905 } 5906 return (CTL_RETVAL_COMPLETE); 5907 } 5908 5909 static void 5910 ctl_ie_timer(void *arg) 5911 { 5912 struct ctl_lun *lun = arg; 5913 uint64_t t; 5914 5915 if (lun->ie_asc == 0) 5916 return; 5917 5918 if (lun->MODE_IE.mrie == SIEP_MRIE_UA) 5919 ctl_est_ua_all(lun, -1, CTL_UA_IE); 5920 else 5921 lun->ie_reported = 0; 5922 5923 if (lun->ie_reportcnt < scsi_4btoul(lun->MODE_IE.report_count)) { 5924 lun->ie_reportcnt++; 5925 t = scsi_4btoul(lun->MODE_IE.interval_timer); 5926 if (t == 0 || t == UINT32_MAX) 5927 t = 3000; /* 5 min */ 5928 callout_schedule(&lun->ie_callout, t * hz / 10); 5929 } 5930 } 5931 5932 int 5933 ctl_ie_page_handler(struct ctl_scsiio *ctsio, 5934 struct ctl_page_index *page_index, uint8_t *page_ptr) 5935 { 5936 struct ctl_lun *lun = CTL_LUN(ctsio); 5937 struct scsi_info_exceptions_page *pg; 5938 uint64_t t; 5939 5940 (void)ctl_default_page_handler(ctsio, page_index, page_ptr); 5941 5942 pg = (struct scsi_info_exceptions_page *)page_ptr; 5943 mtx_lock(&lun->lun_lock); 5944 if (pg->info_flags & SIEP_FLAGS_TEST) { 5945 lun->ie_asc = 0x5d; 5946 lun->ie_ascq = 0xff; 5947 if (pg->mrie == SIEP_MRIE_UA) { 5948 ctl_est_ua_all(lun, -1, CTL_UA_IE); 5949 lun->ie_reported = 1; 5950 } else { 5951 ctl_clr_ua_all(lun, -1, CTL_UA_IE); 5952 lun->ie_reported = -1; 5953 } 5954 lun->ie_reportcnt = 1; 5955 if (lun->ie_reportcnt < scsi_4btoul(pg->report_count)) { 5956 lun->ie_reportcnt++; 5957 t = scsi_4btoul(pg->interval_timer); 5958 if (t == 0 || t == UINT32_MAX) 5959 t = 3000; /* 5 min */ 5960 callout_reset(&lun->ie_callout, t * hz / 10, 5961 ctl_ie_timer, lun); 5962 } 5963 } else { 5964 lun->ie_asc = 0; 5965 lun->ie_ascq = 0; 5966 lun->ie_reported = 1; 5967 ctl_clr_ua_all(lun, -1, CTL_UA_IE); 5968 lun->ie_reportcnt = UINT32_MAX; 5969 callout_stop(&lun->ie_callout); 5970 } 5971 mtx_unlock(&lun->lun_lock); 5972 return (CTL_RETVAL_COMPLETE); 5973 } 5974 5975 static int 5976 ctl_do_mode_select(union ctl_io *io) 5977 { 5978 struct ctl_lun *lun = CTL_LUN(io); 5979 struct scsi_mode_page_header *page_header; 5980 struct ctl_page_index *page_index; 5981 struct ctl_scsiio *ctsio; 5982 int page_len, page_len_offset, page_len_size; 5983 union ctl_modepage_info *modepage_info; 5984 uint16_t *len_left, *len_used; 5985 int retval, i; 5986 5987 ctsio = &io->scsiio; 5988 page_index = NULL; 5989 page_len = 0; 5990 5991 modepage_info = (union ctl_modepage_info *) 5992 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 5993 len_left = &modepage_info->header.len_left; 5994 len_used = &modepage_info->header.len_used; 5995 5996 do_next_page: 5997 5998 page_header = (struct scsi_mode_page_header *) 5999 (ctsio->kern_data_ptr + *len_used); 6000 6001 if (*len_left == 0) { 6002 free(ctsio->kern_data_ptr, M_CTL); 6003 ctl_set_success(ctsio); 6004 ctl_done((union ctl_io *)ctsio); 6005 return (CTL_RETVAL_COMPLETE); 6006 } else if (*len_left < sizeof(struct scsi_mode_page_header)) { 6007 6008 free(ctsio->kern_data_ptr, M_CTL); 6009 ctl_set_param_len_error(ctsio); 6010 ctl_done((union ctl_io *)ctsio); 6011 return (CTL_RETVAL_COMPLETE); 6012 6013 } else if ((page_header->page_code & SMPH_SPF) 6014 && (*len_left < sizeof(struct scsi_mode_page_header_sp))) { 6015 6016 free(ctsio->kern_data_ptr, M_CTL); 6017 ctl_set_param_len_error(ctsio); 6018 ctl_done((union ctl_io *)ctsio); 6019 return (CTL_RETVAL_COMPLETE); 6020 } 6021 6022 6023 /* 6024 * XXX KDM should we do something with the block descriptor? 6025 */ 6026 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6027 page_index = &lun->mode_pages.index[i]; 6028 if (lun->be_lun->lun_type == T_DIRECT && 6029 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6030 continue; 6031 if (lun->be_lun->lun_type == T_PROCESSOR && 6032 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6033 continue; 6034 if (lun->be_lun->lun_type == T_CDROM && 6035 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6036 continue; 6037 6038 if ((page_index->page_code & SMPH_PC_MASK) != 6039 (page_header->page_code & SMPH_PC_MASK)) 6040 continue; 6041 6042 /* 6043 * If neither page has a subpage code, then we've got a 6044 * match. 6045 */ 6046 if (((page_index->page_code & SMPH_SPF) == 0) 6047 && ((page_header->page_code & SMPH_SPF) == 0)) { 6048 page_len = page_header->page_length; 6049 break; 6050 } 6051 6052 /* 6053 * If both pages have subpages, then the subpage numbers 6054 * have to match. 6055 */ 6056 if ((page_index->page_code & SMPH_SPF) 6057 && (page_header->page_code & SMPH_SPF)) { 6058 struct scsi_mode_page_header_sp *sph; 6059 6060 sph = (struct scsi_mode_page_header_sp *)page_header; 6061 if (page_index->subpage == sph->subpage) { 6062 page_len = scsi_2btoul(sph->page_length); 6063 break; 6064 } 6065 } 6066 } 6067 6068 /* 6069 * If we couldn't find the page, or if we don't have a mode select 6070 * handler for it, send back an error to the user. 6071 */ 6072 if ((i >= CTL_NUM_MODE_PAGES) 6073 || (page_index->select_handler == NULL)) { 6074 ctl_set_invalid_field(ctsio, 6075 /*sks_valid*/ 1, 6076 /*command*/ 0, 6077 /*field*/ *len_used, 6078 /*bit_valid*/ 0, 6079 /*bit*/ 0); 6080 free(ctsio->kern_data_ptr, M_CTL); 6081 ctl_done((union ctl_io *)ctsio); 6082 return (CTL_RETVAL_COMPLETE); 6083 } 6084 6085 if (page_index->page_code & SMPH_SPF) { 6086 page_len_offset = 2; 6087 page_len_size = 2; 6088 } else { 6089 page_len_size = 1; 6090 page_len_offset = 1; 6091 } 6092 6093 /* 6094 * If the length the initiator gives us isn't the one we specify in 6095 * the mode page header, or if they didn't specify enough data in 6096 * the CDB to avoid truncating this page, kick out the request. 6097 */ 6098 if (page_len != page_index->page_len - page_len_offset - page_len_size) { 6099 ctl_set_invalid_field(ctsio, 6100 /*sks_valid*/ 1, 6101 /*command*/ 0, 6102 /*field*/ *len_used + page_len_offset, 6103 /*bit_valid*/ 0, 6104 /*bit*/ 0); 6105 free(ctsio->kern_data_ptr, M_CTL); 6106 ctl_done((union ctl_io *)ctsio); 6107 return (CTL_RETVAL_COMPLETE); 6108 } 6109 if (*len_left < page_index->page_len) { 6110 free(ctsio->kern_data_ptr, M_CTL); 6111 ctl_set_param_len_error(ctsio); 6112 ctl_done((union ctl_io *)ctsio); 6113 return (CTL_RETVAL_COMPLETE); 6114 } 6115 6116 /* 6117 * Run through the mode page, checking to make sure that the bits 6118 * the user changed are actually legal for him to change. 6119 */ 6120 for (i = 0; i < page_index->page_len; i++) { 6121 uint8_t *user_byte, *change_mask, *current_byte; 6122 int bad_bit; 6123 int j; 6124 6125 user_byte = (uint8_t *)page_header + i; 6126 change_mask = page_index->page_data + 6127 (page_index->page_len * CTL_PAGE_CHANGEABLE) + i; 6128 current_byte = page_index->page_data + 6129 (page_index->page_len * CTL_PAGE_CURRENT) + i; 6130 6131 /* 6132 * Check to see whether the user set any bits in this byte 6133 * that he is not allowed to set. 6134 */ 6135 if ((*user_byte & ~(*change_mask)) == 6136 (*current_byte & ~(*change_mask))) 6137 continue; 6138 6139 /* 6140 * Go through bit by bit to determine which one is illegal. 6141 */ 6142 bad_bit = 0; 6143 for (j = 7; j >= 0; j--) { 6144 if ((((1 << i) & ~(*change_mask)) & *user_byte) != 6145 (((1 << i) & ~(*change_mask)) & *current_byte)) { 6146 bad_bit = i; 6147 break; 6148 } 6149 } 6150 ctl_set_invalid_field(ctsio, 6151 /*sks_valid*/ 1, 6152 /*command*/ 0, 6153 /*field*/ *len_used + i, 6154 /*bit_valid*/ 1, 6155 /*bit*/ bad_bit); 6156 free(ctsio->kern_data_ptr, M_CTL); 6157 ctl_done((union ctl_io *)ctsio); 6158 return (CTL_RETVAL_COMPLETE); 6159 } 6160 6161 /* 6162 * Decrement these before we call the page handler, since we may 6163 * end up getting called back one way or another before the handler 6164 * returns to this context. 6165 */ 6166 *len_left -= page_index->page_len; 6167 *len_used += page_index->page_len; 6168 6169 retval = page_index->select_handler(ctsio, page_index, 6170 (uint8_t *)page_header); 6171 6172 /* 6173 * If the page handler returns CTL_RETVAL_QUEUED, then we need to 6174 * wait until this queued command completes to finish processing 6175 * the mode page. If it returns anything other than 6176 * CTL_RETVAL_COMPLETE (e.g. CTL_RETVAL_ERROR), then it should have 6177 * already set the sense information, freed the data pointer, and 6178 * completed the io for us. 6179 */ 6180 if (retval != CTL_RETVAL_COMPLETE) 6181 goto bailout_no_done; 6182 6183 /* 6184 * If the initiator sent us more than one page, parse the next one. 6185 */ 6186 if (*len_left > 0) 6187 goto do_next_page; 6188 6189 ctl_set_success(ctsio); 6190 free(ctsio->kern_data_ptr, M_CTL); 6191 ctl_done((union ctl_io *)ctsio); 6192 6193 bailout_no_done: 6194 6195 return (CTL_RETVAL_COMPLETE); 6196 6197 } 6198 6199 int 6200 ctl_mode_select(struct ctl_scsiio *ctsio) 6201 { 6202 struct ctl_lun *lun = CTL_LUN(ctsio); 6203 union ctl_modepage_info *modepage_info; 6204 int bd_len, i, header_size, param_len, pf, rtd, sp; 6205 uint32_t initidx; 6206 6207 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 6208 switch (ctsio->cdb[0]) { 6209 case MODE_SELECT_6: { 6210 struct scsi_mode_select_6 *cdb; 6211 6212 cdb = (struct scsi_mode_select_6 *)ctsio->cdb; 6213 6214 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 6215 rtd = (cdb->byte2 & SMS_RTD) ? 1 : 0; 6216 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 6217 param_len = cdb->length; 6218 header_size = sizeof(struct scsi_mode_header_6); 6219 break; 6220 } 6221 case MODE_SELECT_10: { 6222 struct scsi_mode_select_10 *cdb; 6223 6224 cdb = (struct scsi_mode_select_10 *)ctsio->cdb; 6225 6226 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 6227 rtd = (cdb->byte2 & SMS_RTD) ? 1 : 0; 6228 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 6229 param_len = scsi_2btoul(cdb->length); 6230 header_size = sizeof(struct scsi_mode_header_10); 6231 break; 6232 } 6233 default: 6234 ctl_set_invalid_opcode(ctsio); 6235 ctl_done((union ctl_io *)ctsio); 6236 return (CTL_RETVAL_COMPLETE); 6237 } 6238 6239 if (rtd) { 6240 if (param_len != 0) { 6241 ctl_set_invalid_field(ctsio, /*sks_valid*/ 0, 6242 /*command*/ 1, /*field*/ 0, 6243 /*bit_valid*/ 0, /*bit*/ 0); 6244 ctl_done((union ctl_io *)ctsio); 6245 return (CTL_RETVAL_COMPLETE); 6246 } 6247 6248 /* Revert to defaults. */ 6249 ctl_init_page_index(lun); 6250 mtx_lock(&lun->lun_lock); 6251 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 6252 mtx_unlock(&lun->lun_lock); 6253 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6254 ctl_isc_announce_mode(lun, -1, 6255 lun->mode_pages.index[i].page_code & SMPH_PC_MASK, 6256 lun->mode_pages.index[i].subpage); 6257 } 6258 ctl_set_success(ctsio); 6259 ctl_done((union ctl_io *)ctsio); 6260 return (CTL_RETVAL_COMPLETE); 6261 } 6262 6263 /* 6264 * From SPC-3: 6265 * "A parameter list length of zero indicates that the Data-Out Buffer 6266 * shall be empty. This condition shall not be considered as an error." 6267 */ 6268 if (param_len == 0) { 6269 ctl_set_success(ctsio); 6270 ctl_done((union ctl_io *)ctsio); 6271 return (CTL_RETVAL_COMPLETE); 6272 } 6273 6274 /* 6275 * Since we'll hit this the first time through, prior to 6276 * allocation, we don't need to free a data buffer here. 6277 */ 6278 if (param_len < header_size) { 6279 ctl_set_param_len_error(ctsio); 6280 ctl_done((union ctl_io *)ctsio); 6281 return (CTL_RETVAL_COMPLETE); 6282 } 6283 6284 /* 6285 * Allocate the data buffer and grab the user's data. In theory, 6286 * we shouldn't have to sanity check the parameter list length here 6287 * because the maximum size is 64K. We should be able to malloc 6288 * that much without too many problems. 6289 */ 6290 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 6291 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 6292 ctsio->kern_data_len = param_len; 6293 ctsio->kern_total_len = param_len; 6294 ctsio->kern_rel_offset = 0; 6295 ctsio->kern_sg_entries = 0; 6296 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6297 ctsio->be_move_done = ctl_config_move_done; 6298 ctl_datamove((union ctl_io *)ctsio); 6299 6300 return (CTL_RETVAL_COMPLETE); 6301 } 6302 6303 switch (ctsio->cdb[0]) { 6304 case MODE_SELECT_6: { 6305 struct scsi_mode_header_6 *mh6; 6306 6307 mh6 = (struct scsi_mode_header_6 *)ctsio->kern_data_ptr; 6308 bd_len = mh6->blk_desc_len; 6309 break; 6310 } 6311 case MODE_SELECT_10: { 6312 struct scsi_mode_header_10 *mh10; 6313 6314 mh10 = (struct scsi_mode_header_10 *)ctsio->kern_data_ptr; 6315 bd_len = scsi_2btoul(mh10->blk_desc_len); 6316 break; 6317 } 6318 default: 6319 panic("%s: Invalid CDB type %#x", __func__, ctsio->cdb[0]); 6320 } 6321 6322 if (param_len < (header_size + bd_len)) { 6323 free(ctsio->kern_data_ptr, M_CTL); 6324 ctl_set_param_len_error(ctsio); 6325 ctl_done((union ctl_io *)ctsio); 6326 return (CTL_RETVAL_COMPLETE); 6327 } 6328 6329 /* 6330 * Set the IO_CONT flag, so that if this I/O gets passed to 6331 * ctl_config_write_done(), it'll get passed back to 6332 * ctl_do_mode_select() for further processing, or completion if 6333 * we're all done. 6334 */ 6335 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 6336 ctsio->io_cont = ctl_do_mode_select; 6337 6338 modepage_info = (union ctl_modepage_info *) 6339 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6340 memset(modepage_info, 0, sizeof(*modepage_info)); 6341 modepage_info->header.len_left = param_len - header_size - bd_len; 6342 modepage_info->header.len_used = header_size + bd_len; 6343 6344 return (ctl_do_mode_select((union ctl_io *)ctsio)); 6345 } 6346 6347 int 6348 ctl_mode_sense(struct ctl_scsiio *ctsio) 6349 { 6350 struct ctl_lun *lun = CTL_LUN(ctsio); 6351 int pc, page_code, dbd, llba, subpage; 6352 int alloc_len, page_len, header_len, total_len; 6353 struct scsi_mode_block_descr *block_desc; 6354 struct ctl_page_index *page_index; 6355 6356 dbd = 0; 6357 llba = 0; 6358 block_desc = NULL; 6359 6360 CTL_DEBUG_PRINT(("ctl_mode_sense\n")); 6361 6362 switch (ctsio->cdb[0]) { 6363 case MODE_SENSE_6: { 6364 struct scsi_mode_sense_6 *cdb; 6365 6366 cdb = (struct scsi_mode_sense_6 *)ctsio->cdb; 6367 6368 header_len = sizeof(struct scsi_mode_hdr_6); 6369 if (cdb->byte2 & SMS_DBD) 6370 dbd = 1; 6371 else 6372 header_len += sizeof(struct scsi_mode_block_descr); 6373 6374 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6375 page_code = cdb->page & SMS_PAGE_CODE; 6376 subpage = cdb->subpage; 6377 alloc_len = cdb->length; 6378 break; 6379 } 6380 case MODE_SENSE_10: { 6381 struct scsi_mode_sense_10 *cdb; 6382 6383 cdb = (struct scsi_mode_sense_10 *)ctsio->cdb; 6384 6385 header_len = sizeof(struct scsi_mode_hdr_10); 6386 6387 if (cdb->byte2 & SMS_DBD) 6388 dbd = 1; 6389 else 6390 header_len += sizeof(struct scsi_mode_block_descr); 6391 if (cdb->byte2 & SMS10_LLBAA) 6392 llba = 1; 6393 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6394 page_code = cdb->page & SMS_PAGE_CODE; 6395 subpage = cdb->subpage; 6396 alloc_len = scsi_2btoul(cdb->length); 6397 break; 6398 } 6399 default: 6400 ctl_set_invalid_opcode(ctsio); 6401 ctl_done((union ctl_io *)ctsio); 6402 return (CTL_RETVAL_COMPLETE); 6403 break; /* NOTREACHED */ 6404 } 6405 6406 /* 6407 * We have to make a first pass through to calculate the size of 6408 * the pages that match the user's query. Then we allocate enough 6409 * memory to hold it, and actually copy the data into the buffer. 6410 */ 6411 switch (page_code) { 6412 case SMS_ALL_PAGES_PAGE: { 6413 u_int i; 6414 6415 page_len = 0; 6416 6417 /* 6418 * At the moment, values other than 0 and 0xff here are 6419 * reserved according to SPC-3. 6420 */ 6421 if ((subpage != SMS_SUBPAGE_PAGE_0) 6422 && (subpage != SMS_SUBPAGE_ALL)) { 6423 ctl_set_invalid_field(ctsio, 6424 /*sks_valid*/ 1, 6425 /*command*/ 1, 6426 /*field*/ 3, 6427 /*bit_valid*/ 0, 6428 /*bit*/ 0); 6429 ctl_done((union ctl_io *)ctsio); 6430 return (CTL_RETVAL_COMPLETE); 6431 } 6432 6433 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6434 page_index = &lun->mode_pages.index[i]; 6435 6436 /* Make sure the page is supported for this dev type */ 6437 if (lun->be_lun->lun_type == T_DIRECT && 6438 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6439 continue; 6440 if (lun->be_lun->lun_type == T_PROCESSOR && 6441 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6442 continue; 6443 if (lun->be_lun->lun_type == T_CDROM && 6444 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6445 continue; 6446 6447 /* 6448 * We don't use this subpage if the user didn't 6449 * request all subpages. 6450 */ 6451 if ((page_index->subpage != 0) 6452 && (subpage == SMS_SUBPAGE_PAGE_0)) 6453 continue; 6454 6455 #if 0 6456 printf("found page %#x len %d\n", 6457 page_index->page_code & SMPH_PC_MASK, 6458 page_index->page_len); 6459 #endif 6460 page_len += page_index->page_len; 6461 } 6462 break; 6463 } 6464 default: { 6465 u_int i; 6466 6467 page_len = 0; 6468 6469 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6470 page_index = &lun->mode_pages.index[i]; 6471 6472 /* Make sure the page is supported for this dev type */ 6473 if (lun->be_lun->lun_type == T_DIRECT && 6474 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6475 continue; 6476 if (lun->be_lun->lun_type == T_PROCESSOR && 6477 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6478 continue; 6479 if (lun->be_lun->lun_type == T_CDROM && 6480 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6481 continue; 6482 6483 /* Look for the right page code */ 6484 if ((page_index->page_code & SMPH_PC_MASK) != page_code) 6485 continue; 6486 6487 /* Look for the right subpage or the subpage wildcard*/ 6488 if ((page_index->subpage != subpage) 6489 && (subpage != SMS_SUBPAGE_ALL)) 6490 continue; 6491 6492 #if 0 6493 printf("found page %#x len %d\n", 6494 page_index->page_code & SMPH_PC_MASK, 6495 page_index->page_len); 6496 #endif 6497 6498 page_len += page_index->page_len; 6499 } 6500 6501 if (page_len == 0) { 6502 ctl_set_invalid_field(ctsio, 6503 /*sks_valid*/ 1, 6504 /*command*/ 1, 6505 /*field*/ 2, 6506 /*bit_valid*/ 1, 6507 /*bit*/ 5); 6508 ctl_done((union ctl_io *)ctsio); 6509 return (CTL_RETVAL_COMPLETE); 6510 } 6511 break; 6512 } 6513 } 6514 6515 total_len = header_len + page_len; 6516 #if 0 6517 printf("header_len = %d, page_len = %d, total_len = %d\n", 6518 header_len, page_len, total_len); 6519 #endif 6520 6521 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6522 ctsio->kern_sg_entries = 0; 6523 ctsio->kern_rel_offset = 0; 6524 ctsio->kern_data_len = min(total_len, alloc_len); 6525 ctsio->kern_total_len = ctsio->kern_data_len; 6526 6527 switch (ctsio->cdb[0]) { 6528 case MODE_SENSE_6: { 6529 struct scsi_mode_hdr_6 *header; 6530 6531 header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr; 6532 6533 header->datalen = MIN(total_len - 1, 254); 6534 if (lun->be_lun->lun_type == T_DIRECT) { 6535 header->dev_specific = 0x10; /* DPOFUA */ 6536 if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || 6537 (lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) 6538 header->dev_specific |= 0x80; /* WP */ 6539 } 6540 if (dbd) 6541 header->block_descr_len = 0; 6542 else 6543 header->block_descr_len = 6544 sizeof(struct scsi_mode_block_descr); 6545 block_desc = (struct scsi_mode_block_descr *)&header[1]; 6546 break; 6547 } 6548 case MODE_SENSE_10: { 6549 struct scsi_mode_hdr_10 *header; 6550 int datalen; 6551 6552 header = (struct scsi_mode_hdr_10 *)ctsio->kern_data_ptr; 6553 6554 datalen = MIN(total_len - 2, 65533); 6555 scsi_ulto2b(datalen, header->datalen); 6556 if (lun->be_lun->lun_type == T_DIRECT) { 6557 header->dev_specific = 0x10; /* DPOFUA */ 6558 if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || 6559 (lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) 6560 header->dev_specific |= 0x80; /* WP */ 6561 } 6562 if (dbd) 6563 scsi_ulto2b(0, header->block_descr_len); 6564 else 6565 scsi_ulto2b(sizeof(struct scsi_mode_block_descr), 6566 header->block_descr_len); 6567 block_desc = (struct scsi_mode_block_descr *)&header[1]; 6568 break; 6569 } 6570 default: 6571 panic("%s: Invalid CDB type %#x", __func__, ctsio->cdb[0]); 6572 } 6573 6574 /* 6575 * If we've got a disk, use its blocksize in the block 6576 * descriptor. Otherwise, just set it to 0. 6577 */ 6578 if (dbd == 0) { 6579 if (lun->be_lun->lun_type == T_DIRECT) 6580 scsi_ulto3b(lun->be_lun->blocksize, 6581 block_desc->block_len); 6582 else 6583 scsi_ulto3b(0, block_desc->block_len); 6584 } 6585 6586 switch (page_code) { 6587 case SMS_ALL_PAGES_PAGE: { 6588 int i, data_used; 6589 6590 data_used = header_len; 6591 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6592 struct ctl_page_index *page_index; 6593 6594 page_index = &lun->mode_pages.index[i]; 6595 if (lun->be_lun->lun_type == T_DIRECT && 6596 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6597 continue; 6598 if (lun->be_lun->lun_type == T_PROCESSOR && 6599 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6600 continue; 6601 if (lun->be_lun->lun_type == T_CDROM && 6602 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6603 continue; 6604 6605 /* 6606 * We don't use this subpage if the user didn't 6607 * request all subpages. We already checked (above) 6608 * to make sure the user only specified a subpage 6609 * of 0 or 0xff in the SMS_ALL_PAGES_PAGE case. 6610 */ 6611 if ((page_index->subpage != 0) 6612 && (subpage == SMS_SUBPAGE_PAGE_0)) 6613 continue; 6614 6615 /* 6616 * Call the handler, if it exists, to update the 6617 * page to the latest values. 6618 */ 6619 if (page_index->sense_handler != NULL) 6620 page_index->sense_handler(ctsio, page_index,pc); 6621 6622 memcpy(ctsio->kern_data_ptr + data_used, 6623 page_index->page_data + 6624 (page_index->page_len * pc), 6625 page_index->page_len); 6626 data_used += page_index->page_len; 6627 } 6628 break; 6629 } 6630 default: { 6631 int i, data_used; 6632 6633 data_used = header_len; 6634 6635 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6636 struct ctl_page_index *page_index; 6637 6638 page_index = &lun->mode_pages.index[i]; 6639 6640 /* Look for the right page code */ 6641 if ((page_index->page_code & SMPH_PC_MASK) != page_code) 6642 continue; 6643 6644 /* Look for the right subpage or the subpage wildcard*/ 6645 if ((page_index->subpage != subpage) 6646 && (subpage != SMS_SUBPAGE_ALL)) 6647 continue; 6648 6649 /* Make sure the page is supported for this dev type */ 6650 if (lun->be_lun->lun_type == T_DIRECT && 6651 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6652 continue; 6653 if (lun->be_lun->lun_type == T_PROCESSOR && 6654 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6655 continue; 6656 if (lun->be_lun->lun_type == T_CDROM && 6657 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6658 continue; 6659 6660 /* 6661 * Call the handler, if it exists, to update the 6662 * page to the latest values. 6663 */ 6664 if (page_index->sense_handler != NULL) 6665 page_index->sense_handler(ctsio, page_index,pc); 6666 6667 memcpy(ctsio->kern_data_ptr + data_used, 6668 page_index->page_data + 6669 (page_index->page_len * pc), 6670 page_index->page_len); 6671 data_used += page_index->page_len; 6672 } 6673 break; 6674 } 6675 } 6676 6677 ctl_set_success(ctsio); 6678 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6679 ctsio->be_move_done = ctl_config_move_done; 6680 ctl_datamove((union ctl_io *)ctsio); 6681 return (CTL_RETVAL_COMPLETE); 6682 } 6683 6684 int 6685 ctl_lbp_log_sense_handler(struct ctl_scsiio *ctsio, 6686 struct ctl_page_index *page_index, 6687 int pc) 6688 { 6689 struct ctl_lun *lun = CTL_LUN(ctsio); 6690 struct scsi_log_param_header *phdr; 6691 uint8_t *data; 6692 uint64_t val; 6693 6694 data = page_index->page_data; 6695 6696 if (lun->backend->lun_attr != NULL && 6697 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksavail")) 6698 != UINT64_MAX) { 6699 phdr = (struct scsi_log_param_header *)data; 6700 scsi_ulto2b(0x0001, phdr->param_code); 6701 phdr->param_control = SLP_LBIN | SLP_LP; 6702 phdr->param_len = 8; 6703 data = (uint8_t *)(phdr + 1); 6704 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6705 data[4] = 0x02; /* per-pool */ 6706 data += phdr->param_len; 6707 } 6708 6709 if (lun->backend->lun_attr != NULL && 6710 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksused")) 6711 != UINT64_MAX) { 6712 phdr = (struct scsi_log_param_header *)data; 6713 scsi_ulto2b(0x0002, phdr->param_code); 6714 phdr->param_control = SLP_LBIN | SLP_LP; 6715 phdr->param_len = 8; 6716 data = (uint8_t *)(phdr + 1); 6717 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6718 data[4] = 0x01; /* per-LUN */ 6719 data += phdr->param_len; 6720 } 6721 6722 if (lun->backend->lun_attr != NULL && 6723 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksavail")) 6724 != UINT64_MAX) { 6725 phdr = (struct scsi_log_param_header *)data; 6726 scsi_ulto2b(0x00f1, phdr->param_code); 6727 phdr->param_control = SLP_LBIN | SLP_LP; 6728 phdr->param_len = 8; 6729 data = (uint8_t *)(phdr + 1); 6730 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6731 data[4] = 0x02; /* per-pool */ 6732 data += phdr->param_len; 6733 } 6734 6735 if (lun->backend->lun_attr != NULL && 6736 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksused")) 6737 != UINT64_MAX) { 6738 phdr = (struct scsi_log_param_header *)data; 6739 scsi_ulto2b(0x00f2, phdr->param_code); 6740 phdr->param_control = SLP_LBIN | SLP_LP; 6741 phdr->param_len = 8; 6742 data = (uint8_t *)(phdr + 1); 6743 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6744 data[4] = 0x02; /* per-pool */ 6745 data += phdr->param_len; 6746 } 6747 6748 page_index->page_len = data - page_index->page_data; 6749 return (0); 6750 } 6751 6752 int 6753 ctl_sap_log_sense_handler(struct ctl_scsiio *ctsio, 6754 struct ctl_page_index *page_index, 6755 int pc) 6756 { 6757 struct ctl_lun *lun = CTL_LUN(ctsio); 6758 struct stat_page *data; 6759 struct bintime *t; 6760 6761 data = (struct stat_page *)page_index->page_data; 6762 6763 scsi_ulto2b(SLP_SAP, data->sap.hdr.param_code); 6764 data->sap.hdr.param_control = SLP_LBIN; 6765 data->sap.hdr.param_len = sizeof(struct scsi_log_stat_and_perf) - 6766 sizeof(struct scsi_log_param_header); 6767 scsi_u64to8b(lun->stats.operations[CTL_STATS_READ], 6768 data->sap.read_num); 6769 scsi_u64to8b(lun->stats.operations[CTL_STATS_WRITE], 6770 data->sap.write_num); 6771 if (lun->be_lun->blocksize > 0) { 6772 scsi_u64to8b(lun->stats.bytes[CTL_STATS_WRITE] / 6773 lun->be_lun->blocksize, data->sap.recvieved_lba); 6774 scsi_u64to8b(lun->stats.bytes[CTL_STATS_READ] / 6775 lun->be_lun->blocksize, data->sap.transmitted_lba); 6776 } 6777 t = &lun->stats.time[CTL_STATS_READ]; 6778 scsi_u64to8b((uint64_t)t->sec * 1000 + t->frac / (UINT64_MAX / 1000), 6779 data->sap.read_int); 6780 t = &lun->stats.time[CTL_STATS_WRITE]; 6781 scsi_u64to8b((uint64_t)t->sec * 1000 + t->frac / (UINT64_MAX / 1000), 6782 data->sap.write_int); 6783 scsi_u64to8b(0, data->sap.weighted_num); 6784 scsi_u64to8b(0, data->sap.weighted_int); 6785 scsi_ulto2b(SLP_IT, data->it.hdr.param_code); 6786 data->it.hdr.param_control = SLP_LBIN; 6787 data->it.hdr.param_len = sizeof(struct scsi_log_idle_time) - 6788 sizeof(struct scsi_log_param_header); 6789 #ifdef CTL_TIME_IO 6790 scsi_u64to8b(lun->idle_time / SBT_1MS, data->it.idle_int); 6791 #endif 6792 scsi_ulto2b(SLP_TI, data->ti.hdr.param_code); 6793 data->it.hdr.param_control = SLP_LBIN; 6794 data->ti.hdr.param_len = sizeof(struct scsi_log_time_interval) - 6795 sizeof(struct scsi_log_param_header); 6796 scsi_ulto4b(3, data->ti.exponent); 6797 scsi_ulto4b(1, data->ti.integer); 6798 return (0); 6799 } 6800 6801 int 6802 ctl_ie_log_sense_handler(struct ctl_scsiio *ctsio, 6803 struct ctl_page_index *page_index, 6804 int pc) 6805 { 6806 struct ctl_lun *lun = CTL_LUN(ctsio); 6807 struct scsi_log_informational_exceptions *data; 6808 6809 data = (struct scsi_log_informational_exceptions *)page_index->page_data; 6810 6811 scsi_ulto2b(SLP_IE_GEN, data->hdr.param_code); 6812 data->hdr.param_control = SLP_LBIN; 6813 data->hdr.param_len = sizeof(struct scsi_log_informational_exceptions) - 6814 sizeof(struct scsi_log_param_header); 6815 data->ie_asc = lun->ie_asc; 6816 data->ie_ascq = lun->ie_ascq; 6817 data->temperature = 0xff; 6818 return (0); 6819 } 6820 6821 int 6822 ctl_log_sense(struct ctl_scsiio *ctsio) 6823 { 6824 struct ctl_lun *lun = CTL_LUN(ctsio); 6825 int i, pc, page_code, subpage; 6826 int alloc_len, total_len; 6827 struct ctl_page_index *page_index; 6828 struct scsi_log_sense *cdb; 6829 struct scsi_log_header *header; 6830 6831 CTL_DEBUG_PRINT(("ctl_log_sense\n")); 6832 6833 cdb = (struct scsi_log_sense *)ctsio->cdb; 6834 pc = (cdb->page & SLS_PAGE_CTRL_MASK) >> 6; 6835 page_code = cdb->page & SLS_PAGE_CODE; 6836 subpage = cdb->subpage; 6837 alloc_len = scsi_2btoul(cdb->length); 6838 6839 page_index = NULL; 6840 for (i = 0; i < CTL_NUM_LOG_PAGES; i++) { 6841 page_index = &lun->log_pages.index[i]; 6842 6843 /* Look for the right page code */ 6844 if ((page_index->page_code & SL_PAGE_CODE) != page_code) 6845 continue; 6846 6847 /* Look for the right subpage or the subpage wildcard*/ 6848 if (page_index->subpage != subpage) 6849 continue; 6850 6851 break; 6852 } 6853 if (i >= CTL_NUM_LOG_PAGES) { 6854 ctl_set_invalid_field(ctsio, 6855 /*sks_valid*/ 1, 6856 /*command*/ 1, 6857 /*field*/ 2, 6858 /*bit_valid*/ 0, 6859 /*bit*/ 0); 6860 ctl_done((union ctl_io *)ctsio); 6861 return (CTL_RETVAL_COMPLETE); 6862 } 6863 6864 total_len = sizeof(struct scsi_log_header) + page_index->page_len; 6865 6866 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6867 ctsio->kern_sg_entries = 0; 6868 ctsio->kern_rel_offset = 0; 6869 ctsio->kern_data_len = min(total_len, alloc_len); 6870 ctsio->kern_total_len = ctsio->kern_data_len; 6871 6872 header = (struct scsi_log_header *)ctsio->kern_data_ptr; 6873 header->page = page_index->page_code; 6874 if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING) 6875 header->page |= SL_DS; 6876 if (page_index->subpage) { 6877 header->page |= SL_SPF; 6878 header->subpage = page_index->subpage; 6879 } 6880 scsi_ulto2b(page_index->page_len, header->datalen); 6881 6882 /* 6883 * Call the handler, if it exists, to update the 6884 * page to the latest values. 6885 */ 6886 if (page_index->sense_handler != NULL) 6887 page_index->sense_handler(ctsio, page_index, pc); 6888 6889 memcpy(header + 1, page_index->page_data, page_index->page_len); 6890 6891 ctl_set_success(ctsio); 6892 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6893 ctsio->be_move_done = ctl_config_move_done; 6894 ctl_datamove((union ctl_io *)ctsio); 6895 return (CTL_RETVAL_COMPLETE); 6896 } 6897 6898 int 6899 ctl_read_capacity(struct ctl_scsiio *ctsio) 6900 { 6901 struct ctl_lun *lun = CTL_LUN(ctsio); 6902 struct scsi_read_capacity *cdb; 6903 struct scsi_read_capacity_data *data; 6904 uint32_t lba; 6905 6906 CTL_DEBUG_PRINT(("ctl_read_capacity\n")); 6907 6908 cdb = (struct scsi_read_capacity *)ctsio->cdb; 6909 6910 lba = scsi_4btoul(cdb->addr); 6911 if (((cdb->pmi & SRC_PMI) == 0) 6912 && (lba != 0)) { 6913 ctl_set_invalid_field(/*ctsio*/ ctsio, 6914 /*sks_valid*/ 1, 6915 /*command*/ 1, 6916 /*field*/ 2, 6917 /*bit_valid*/ 0, 6918 /*bit*/ 0); 6919 ctl_done((union ctl_io *)ctsio); 6920 return (CTL_RETVAL_COMPLETE); 6921 } 6922 6923 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 6924 data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr; 6925 ctsio->kern_data_len = sizeof(*data); 6926 ctsio->kern_total_len = sizeof(*data); 6927 ctsio->kern_rel_offset = 0; 6928 ctsio->kern_sg_entries = 0; 6929 6930 /* 6931 * If the maximum LBA is greater than 0xfffffffe, the user must 6932 * issue a SERVICE ACTION IN (16) command, with the read capacity 6933 * serivce action set. 6934 */ 6935 if (lun->be_lun->maxlba > 0xfffffffe) 6936 scsi_ulto4b(0xffffffff, data->addr); 6937 else 6938 scsi_ulto4b(lun->be_lun->maxlba, data->addr); 6939 6940 /* 6941 * XXX KDM this may not be 512 bytes... 6942 */ 6943 scsi_ulto4b(lun->be_lun->blocksize, data->length); 6944 6945 ctl_set_success(ctsio); 6946 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6947 ctsio->be_move_done = ctl_config_move_done; 6948 ctl_datamove((union ctl_io *)ctsio); 6949 return (CTL_RETVAL_COMPLETE); 6950 } 6951 6952 int 6953 ctl_read_capacity_16(struct ctl_scsiio *ctsio) 6954 { 6955 struct ctl_lun *lun = CTL_LUN(ctsio); 6956 struct scsi_read_capacity_16 *cdb; 6957 struct scsi_read_capacity_data_long *data; 6958 uint64_t lba; 6959 uint32_t alloc_len; 6960 6961 CTL_DEBUG_PRINT(("ctl_read_capacity_16\n")); 6962 6963 cdb = (struct scsi_read_capacity_16 *)ctsio->cdb; 6964 6965 alloc_len = scsi_4btoul(cdb->alloc_len); 6966 lba = scsi_8btou64(cdb->addr); 6967 6968 if ((cdb->reladr & SRC16_PMI) 6969 && (lba != 0)) { 6970 ctl_set_invalid_field(/*ctsio*/ ctsio, 6971 /*sks_valid*/ 1, 6972 /*command*/ 1, 6973 /*field*/ 2, 6974 /*bit_valid*/ 0, 6975 /*bit*/ 0); 6976 ctl_done((union ctl_io *)ctsio); 6977 return (CTL_RETVAL_COMPLETE); 6978 } 6979 6980 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 6981 data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr; 6982 ctsio->kern_rel_offset = 0; 6983 ctsio->kern_sg_entries = 0; 6984 ctsio->kern_data_len = min(sizeof(*data), alloc_len); 6985 ctsio->kern_total_len = ctsio->kern_data_len; 6986 6987 scsi_u64to8b(lun->be_lun->maxlba, data->addr); 6988 /* XXX KDM this may not be 512 bytes... */ 6989 scsi_ulto4b(lun->be_lun->blocksize, data->length); 6990 data->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE; 6991 scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, data->lalba_lbp); 6992 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) 6993 data->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ; 6994 6995 ctl_set_success(ctsio); 6996 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6997 ctsio->be_move_done = ctl_config_move_done; 6998 ctl_datamove((union ctl_io *)ctsio); 6999 return (CTL_RETVAL_COMPLETE); 7000 } 7001 7002 int 7003 ctl_get_lba_status(struct ctl_scsiio *ctsio) 7004 { 7005 struct ctl_lun *lun = CTL_LUN(ctsio); 7006 struct scsi_get_lba_status *cdb; 7007 struct scsi_get_lba_status_data *data; 7008 struct ctl_lba_len_flags *lbalen; 7009 uint64_t lba; 7010 uint32_t alloc_len, total_len; 7011 int retval; 7012 7013 CTL_DEBUG_PRINT(("ctl_get_lba_status\n")); 7014 7015 cdb = (struct scsi_get_lba_status *)ctsio->cdb; 7016 lba = scsi_8btou64(cdb->addr); 7017 alloc_len = scsi_4btoul(cdb->alloc_len); 7018 7019 if (lba > lun->be_lun->maxlba) { 7020 ctl_set_lba_out_of_range(ctsio, lba); 7021 ctl_done((union ctl_io *)ctsio); 7022 return (CTL_RETVAL_COMPLETE); 7023 } 7024 7025 total_len = sizeof(*data) + sizeof(data->descr[0]); 7026 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7027 data = (struct scsi_get_lba_status_data *)ctsio->kern_data_ptr; 7028 ctsio->kern_rel_offset = 0; 7029 ctsio->kern_sg_entries = 0; 7030 ctsio->kern_data_len = min(total_len, alloc_len); 7031 ctsio->kern_total_len = ctsio->kern_data_len; 7032 7033 /* Fill dummy data in case backend can't tell anything. */ 7034 scsi_ulto4b(4 + sizeof(data->descr[0]), data->length); 7035 scsi_u64to8b(lba, data->descr[0].addr); 7036 scsi_ulto4b(MIN(UINT32_MAX, lun->be_lun->maxlba + 1 - lba), 7037 data->descr[0].length); 7038 data->descr[0].status = 0; /* Mapped or unknown. */ 7039 7040 ctl_set_success(ctsio); 7041 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7042 ctsio->be_move_done = ctl_config_move_done; 7043 7044 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 7045 lbalen->lba = lba; 7046 lbalen->len = total_len; 7047 lbalen->flags = 0; 7048 retval = lun->backend->config_read((union ctl_io *)ctsio); 7049 return (CTL_RETVAL_COMPLETE); 7050 } 7051 7052 int 7053 ctl_read_defect(struct ctl_scsiio *ctsio) 7054 { 7055 struct scsi_read_defect_data_10 *ccb10; 7056 struct scsi_read_defect_data_12 *ccb12; 7057 struct scsi_read_defect_data_hdr_10 *data10; 7058 struct scsi_read_defect_data_hdr_12 *data12; 7059 uint32_t alloc_len, data_len; 7060 uint8_t format; 7061 7062 CTL_DEBUG_PRINT(("ctl_read_defect\n")); 7063 7064 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 7065 ccb10 = (struct scsi_read_defect_data_10 *)&ctsio->cdb; 7066 format = ccb10->format; 7067 alloc_len = scsi_2btoul(ccb10->alloc_length); 7068 data_len = sizeof(*data10); 7069 } else { 7070 ccb12 = (struct scsi_read_defect_data_12 *)&ctsio->cdb; 7071 format = ccb12->format; 7072 alloc_len = scsi_4btoul(ccb12->alloc_length); 7073 data_len = sizeof(*data12); 7074 } 7075 if (alloc_len == 0) { 7076 ctl_set_success(ctsio); 7077 ctl_done((union ctl_io *)ctsio); 7078 return (CTL_RETVAL_COMPLETE); 7079 } 7080 7081 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 7082 ctsio->kern_rel_offset = 0; 7083 ctsio->kern_sg_entries = 0; 7084 ctsio->kern_data_len = min(data_len, alloc_len); 7085 ctsio->kern_total_len = ctsio->kern_data_len; 7086 7087 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 7088 data10 = (struct scsi_read_defect_data_hdr_10 *) 7089 ctsio->kern_data_ptr; 7090 data10->format = format; 7091 scsi_ulto2b(0, data10->length); 7092 } else { 7093 data12 = (struct scsi_read_defect_data_hdr_12 *) 7094 ctsio->kern_data_ptr; 7095 data12->format = format; 7096 scsi_ulto2b(0, data12->generation); 7097 scsi_ulto4b(0, data12->length); 7098 } 7099 7100 ctl_set_success(ctsio); 7101 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7102 ctsio->be_move_done = ctl_config_move_done; 7103 ctl_datamove((union ctl_io *)ctsio); 7104 return (CTL_RETVAL_COMPLETE); 7105 } 7106 7107 int 7108 ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio) 7109 { 7110 struct ctl_softc *softc = CTL_SOFTC(ctsio); 7111 struct ctl_lun *lun = CTL_LUN(ctsio); 7112 struct scsi_maintenance_in *cdb; 7113 int retval; 7114 int alloc_len, ext, total_len = 0, g, pc, pg, ts, os; 7115 int num_ha_groups, num_target_ports, shared_group; 7116 struct ctl_port *port; 7117 struct scsi_target_group_data *rtg_ptr; 7118 struct scsi_target_group_data_extended *rtg_ext_ptr; 7119 struct scsi_target_port_group_descriptor *tpg_desc; 7120 7121 CTL_DEBUG_PRINT(("ctl_report_tagret_port_groups\n")); 7122 7123 cdb = (struct scsi_maintenance_in *)ctsio->cdb; 7124 retval = CTL_RETVAL_COMPLETE; 7125 7126 switch (cdb->byte2 & STG_PDF_MASK) { 7127 case STG_PDF_LENGTH: 7128 ext = 0; 7129 break; 7130 case STG_PDF_EXTENDED: 7131 ext = 1; 7132 break; 7133 default: 7134 ctl_set_invalid_field(/*ctsio*/ ctsio, 7135 /*sks_valid*/ 1, 7136 /*command*/ 1, 7137 /*field*/ 2, 7138 /*bit_valid*/ 1, 7139 /*bit*/ 5); 7140 ctl_done((union ctl_io *)ctsio); 7141 return(retval); 7142 } 7143 7144 num_target_ports = 0; 7145 shared_group = (softc->is_single != 0); 7146 mtx_lock(&softc->ctl_lock); 7147 STAILQ_FOREACH(port, &softc->port_list, links) { 7148 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7149 continue; 7150 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 7151 continue; 7152 num_target_ports++; 7153 if (port->status & CTL_PORT_STATUS_HA_SHARED) 7154 shared_group = 1; 7155 } 7156 mtx_unlock(&softc->ctl_lock); 7157 num_ha_groups = (softc->is_single) ? 0 : NUM_HA_SHELVES; 7158 7159 if (ext) 7160 total_len = sizeof(struct scsi_target_group_data_extended); 7161 else 7162 total_len = sizeof(struct scsi_target_group_data); 7163 total_len += sizeof(struct scsi_target_port_group_descriptor) * 7164 (shared_group + num_ha_groups) + 7165 sizeof(struct scsi_target_port_descriptor) * num_target_ports; 7166 7167 alloc_len = scsi_4btoul(cdb->length); 7168 7169 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7170 ctsio->kern_sg_entries = 0; 7171 ctsio->kern_rel_offset = 0; 7172 ctsio->kern_data_len = min(total_len, alloc_len); 7173 ctsio->kern_total_len = ctsio->kern_data_len; 7174 7175 if (ext) { 7176 rtg_ext_ptr = (struct scsi_target_group_data_extended *) 7177 ctsio->kern_data_ptr; 7178 scsi_ulto4b(total_len - 4, rtg_ext_ptr->length); 7179 rtg_ext_ptr->format_type = 0x10; 7180 rtg_ext_ptr->implicit_transition_time = 0; 7181 tpg_desc = &rtg_ext_ptr->groups[0]; 7182 } else { 7183 rtg_ptr = (struct scsi_target_group_data *) 7184 ctsio->kern_data_ptr; 7185 scsi_ulto4b(total_len - 4, rtg_ptr->length); 7186 tpg_desc = &rtg_ptr->groups[0]; 7187 } 7188 7189 mtx_lock(&softc->ctl_lock); 7190 pg = softc->port_min / softc->port_cnt; 7191 if (lun->flags & (CTL_LUN_PRIMARY_SC | CTL_LUN_PEER_SC_PRIMARY)) { 7192 /* Some shelf is known to be primary. */ 7193 if (softc->ha_link == CTL_HA_LINK_OFFLINE) 7194 os = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE; 7195 else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) 7196 os = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; 7197 else if (softc->ha_mode == CTL_HA_MODE_ACT_STBY) 7198 os = TPG_ASYMMETRIC_ACCESS_STANDBY; 7199 else 7200 os = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 7201 if (lun->flags & CTL_LUN_PRIMARY_SC) { 7202 ts = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7203 } else { 7204 ts = os; 7205 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7206 } 7207 } else { 7208 /* No known primary shelf. */ 7209 if (softc->ha_link == CTL_HA_LINK_OFFLINE) { 7210 ts = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE; 7211 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7212 } else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) { 7213 ts = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; 7214 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7215 } else { 7216 ts = os = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; 7217 } 7218 } 7219 if (shared_group) { 7220 tpg_desc->pref_state = ts; 7221 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP | 7222 TPG_U_SUP | TPG_T_SUP; 7223 scsi_ulto2b(1, tpg_desc->target_port_group); 7224 tpg_desc->status = TPG_IMPLICIT; 7225 pc = 0; 7226 STAILQ_FOREACH(port, &softc->port_list, links) { 7227 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7228 continue; 7229 if (!softc->is_single && 7230 (port->status & CTL_PORT_STATUS_HA_SHARED) == 0) 7231 continue; 7232 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 7233 continue; 7234 scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc]. 7235 relative_target_port_identifier); 7236 pc++; 7237 } 7238 tpg_desc->target_port_count = pc; 7239 tpg_desc = (struct scsi_target_port_group_descriptor *) 7240 &tpg_desc->descriptors[pc]; 7241 } 7242 for (g = 0; g < num_ha_groups; g++) { 7243 tpg_desc->pref_state = (g == pg) ? ts : os; 7244 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP | 7245 TPG_U_SUP | TPG_T_SUP; 7246 scsi_ulto2b(2 + g, tpg_desc->target_port_group); 7247 tpg_desc->status = TPG_IMPLICIT; 7248 pc = 0; 7249 STAILQ_FOREACH(port, &softc->port_list, links) { 7250 if (port->targ_port < g * softc->port_cnt || 7251 port->targ_port >= (g + 1) * softc->port_cnt) 7252 continue; 7253 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7254 continue; 7255 if (port->status & CTL_PORT_STATUS_HA_SHARED) 7256 continue; 7257 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 7258 continue; 7259 scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc]. 7260 relative_target_port_identifier); 7261 pc++; 7262 } 7263 tpg_desc->target_port_count = pc; 7264 tpg_desc = (struct scsi_target_port_group_descriptor *) 7265 &tpg_desc->descriptors[pc]; 7266 } 7267 mtx_unlock(&softc->ctl_lock); 7268 7269 ctl_set_success(ctsio); 7270 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7271 ctsio->be_move_done = ctl_config_move_done; 7272 ctl_datamove((union ctl_io *)ctsio); 7273 return(retval); 7274 } 7275 7276 int 7277 ctl_report_supported_opcodes(struct ctl_scsiio *ctsio) 7278 { 7279 struct ctl_lun *lun = CTL_LUN(ctsio); 7280 struct scsi_report_supported_opcodes *cdb; 7281 const struct ctl_cmd_entry *entry, *sentry; 7282 struct scsi_report_supported_opcodes_all *all; 7283 struct scsi_report_supported_opcodes_descr *descr; 7284 struct scsi_report_supported_opcodes_one *one; 7285 int retval; 7286 int alloc_len, total_len; 7287 int opcode, service_action, i, j, num; 7288 7289 CTL_DEBUG_PRINT(("ctl_report_supported_opcodes\n")); 7290 7291 cdb = (struct scsi_report_supported_opcodes *)ctsio->cdb; 7292 retval = CTL_RETVAL_COMPLETE; 7293 7294 opcode = cdb->requested_opcode; 7295 service_action = scsi_2btoul(cdb->requested_service_action); 7296 switch (cdb->options & RSO_OPTIONS_MASK) { 7297 case RSO_OPTIONS_ALL: 7298 num = 0; 7299 for (i = 0; i < 256; i++) { 7300 entry = &ctl_cmd_table[i]; 7301 if (entry->flags & CTL_CMD_FLAG_SA5) { 7302 for (j = 0; j < 32; j++) { 7303 sentry = &((const struct ctl_cmd_entry *) 7304 entry->execute)[j]; 7305 if (ctl_cmd_applicable( 7306 lun->be_lun->lun_type, sentry)) 7307 num++; 7308 } 7309 } else { 7310 if (ctl_cmd_applicable(lun->be_lun->lun_type, 7311 entry)) 7312 num++; 7313 } 7314 } 7315 total_len = sizeof(struct scsi_report_supported_opcodes_all) + 7316 num * sizeof(struct scsi_report_supported_opcodes_descr); 7317 break; 7318 case RSO_OPTIONS_OC: 7319 if (ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) { 7320 ctl_set_invalid_field(/*ctsio*/ ctsio, 7321 /*sks_valid*/ 1, 7322 /*command*/ 1, 7323 /*field*/ 2, 7324 /*bit_valid*/ 1, 7325 /*bit*/ 2); 7326 ctl_done((union ctl_io *)ctsio); 7327 return (CTL_RETVAL_COMPLETE); 7328 } 7329 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7330 break; 7331 case RSO_OPTIONS_OC_SA: 7332 if ((ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) == 0 || 7333 service_action >= 32) { 7334 ctl_set_invalid_field(/*ctsio*/ ctsio, 7335 /*sks_valid*/ 1, 7336 /*command*/ 1, 7337 /*field*/ 2, 7338 /*bit_valid*/ 1, 7339 /*bit*/ 2); 7340 ctl_done((union ctl_io *)ctsio); 7341 return (CTL_RETVAL_COMPLETE); 7342 } 7343 /* FALLTHROUGH */ 7344 case RSO_OPTIONS_OC_ASA: 7345 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7346 break; 7347 default: 7348 ctl_set_invalid_field(/*ctsio*/ ctsio, 7349 /*sks_valid*/ 1, 7350 /*command*/ 1, 7351 /*field*/ 2, 7352 /*bit_valid*/ 1, 7353 /*bit*/ 2); 7354 ctl_done((union ctl_io *)ctsio); 7355 return (CTL_RETVAL_COMPLETE); 7356 } 7357 7358 alloc_len = scsi_4btoul(cdb->length); 7359 7360 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7361 ctsio->kern_sg_entries = 0; 7362 ctsio->kern_rel_offset = 0; 7363 ctsio->kern_data_len = min(total_len, alloc_len); 7364 ctsio->kern_total_len = ctsio->kern_data_len; 7365 7366 switch (cdb->options & RSO_OPTIONS_MASK) { 7367 case RSO_OPTIONS_ALL: 7368 all = (struct scsi_report_supported_opcodes_all *) 7369 ctsio->kern_data_ptr; 7370 num = 0; 7371 for (i = 0; i < 256; i++) { 7372 entry = &ctl_cmd_table[i]; 7373 if (entry->flags & CTL_CMD_FLAG_SA5) { 7374 for (j = 0; j < 32; j++) { 7375 sentry = &((const struct ctl_cmd_entry *) 7376 entry->execute)[j]; 7377 if (!ctl_cmd_applicable( 7378 lun->be_lun->lun_type, sentry)) 7379 continue; 7380 descr = &all->descr[num++]; 7381 descr->opcode = i; 7382 scsi_ulto2b(j, descr->service_action); 7383 descr->flags = RSO_SERVACTV; 7384 scsi_ulto2b(sentry->length, 7385 descr->cdb_length); 7386 } 7387 } else { 7388 if (!ctl_cmd_applicable(lun->be_lun->lun_type, 7389 entry)) 7390 continue; 7391 descr = &all->descr[num++]; 7392 descr->opcode = i; 7393 scsi_ulto2b(0, descr->service_action); 7394 descr->flags = 0; 7395 scsi_ulto2b(entry->length, descr->cdb_length); 7396 } 7397 } 7398 scsi_ulto4b( 7399 num * sizeof(struct scsi_report_supported_opcodes_descr), 7400 all->length); 7401 break; 7402 case RSO_OPTIONS_OC: 7403 one = (struct scsi_report_supported_opcodes_one *) 7404 ctsio->kern_data_ptr; 7405 entry = &ctl_cmd_table[opcode]; 7406 goto fill_one; 7407 case RSO_OPTIONS_OC_SA: 7408 one = (struct scsi_report_supported_opcodes_one *) 7409 ctsio->kern_data_ptr; 7410 entry = &ctl_cmd_table[opcode]; 7411 entry = &((const struct ctl_cmd_entry *) 7412 entry->execute)[service_action]; 7413 fill_one: 7414 if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 7415 one->support = 3; 7416 scsi_ulto2b(entry->length, one->cdb_length); 7417 one->cdb_usage[0] = opcode; 7418 memcpy(&one->cdb_usage[1], entry->usage, 7419 entry->length - 1); 7420 } else 7421 one->support = 1; 7422 break; 7423 case RSO_OPTIONS_OC_ASA: 7424 one = (struct scsi_report_supported_opcodes_one *) 7425 ctsio->kern_data_ptr; 7426 entry = &ctl_cmd_table[opcode]; 7427 if (entry->flags & CTL_CMD_FLAG_SA5) { 7428 entry = &((const struct ctl_cmd_entry *) 7429 entry->execute)[service_action]; 7430 } else if (service_action != 0) { 7431 one->support = 1; 7432 break; 7433 } 7434 goto fill_one; 7435 } 7436 7437 ctl_set_success(ctsio); 7438 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7439 ctsio->be_move_done = ctl_config_move_done; 7440 ctl_datamove((union ctl_io *)ctsio); 7441 return(retval); 7442 } 7443 7444 int 7445 ctl_report_supported_tmf(struct ctl_scsiio *ctsio) 7446 { 7447 struct scsi_report_supported_tmf *cdb; 7448 struct scsi_report_supported_tmf_ext_data *data; 7449 int retval; 7450 int alloc_len, total_len; 7451 7452 CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n")); 7453 7454 cdb = (struct scsi_report_supported_tmf *)ctsio->cdb; 7455 7456 retval = CTL_RETVAL_COMPLETE; 7457 7458 if (cdb->options & RST_REPD) 7459 total_len = sizeof(struct scsi_report_supported_tmf_ext_data); 7460 else 7461 total_len = sizeof(struct scsi_report_supported_tmf_data); 7462 alloc_len = scsi_4btoul(cdb->length); 7463 7464 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7465 ctsio->kern_sg_entries = 0; 7466 ctsio->kern_rel_offset = 0; 7467 ctsio->kern_data_len = min(total_len, alloc_len); 7468 ctsio->kern_total_len = ctsio->kern_data_len; 7469 7470 data = (struct scsi_report_supported_tmf_ext_data *)ctsio->kern_data_ptr; 7471 data->byte1 |= RST_ATS | RST_ATSS | RST_CTSS | RST_LURS | RST_QTS | 7472 RST_TRS; 7473 data->byte2 |= RST_QAES | RST_QTSS | RST_ITNRS; 7474 data->length = total_len - 4; 7475 7476 ctl_set_success(ctsio); 7477 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7478 ctsio->be_move_done = ctl_config_move_done; 7479 ctl_datamove((union ctl_io *)ctsio); 7480 return (retval); 7481 } 7482 7483 int 7484 ctl_report_timestamp(struct ctl_scsiio *ctsio) 7485 { 7486 struct scsi_report_timestamp *cdb; 7487 struct scsi_report_timestamp_data *data; 7488 struct timeval tv; 7489 int64_t timestamp; 7490 int retval; 7491 int alloc_len, total_len; 7492 7493 CTL_DEBUG_PRINT(("ctl_report_timestamp\n")); 7494 7495 cdb = (struct scsi_report_timestamp *)ctsio->cdb; 7496 7497 retval = CTL_RETVAL_COMPLETE; 7498 7499 total_len = sizeof(struct scsi_report_timestamp_data); 7500 alloc_len = scsi_4btoul(cdb->length); 7501 7502 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7503 ctsio->kern_sg_entries = 0; 7504 ctsio->kern_rel_offset = 0; 7505 ctsio->kern_data_len = min(total_len, alloc_len); 7506 ctsio->kern_total_len = ctsio->kern_data_len; 7507 7508 data = (struct scsi_report_timestamp_data *)ctsio->kern_data_ptr; 7509 scsi_ulto2b(sizeof(*data) - 2, data->length); 7510 data->origin = RTS_ORIG_OUTSIDE; 7511 getmicrotime(&tv); 7512 timestamp = (int64_t)tv.tv_sec * 1000 + tv.tv_usec / 1000; 7513 scsi_ulto4b(timestamp >> 16, data->timestamp); 7514 scsi_ulto2b(timestamp & 0xffff, &data->timestamp[4]); 7515 7516 ctl_set_success(ctsio); 7517 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7518 ctsio->be_move_done = ctl_config_move_done; 7519 ctl_datamove((union ctl_io *)ctsio); 7520 return (retval); 7521 } 7522 7523 int 7524 ctl_persistent_reserve_in(struct ctl_scsiio *ctsio) 7525 { 7526 struct ctl_softc *softc = CTL_SOFTC(ctsio); 7527 struct ctl_lun *lun = CTL_LUN(ctsio); 7528 struct scsi_per_res_in *cdb; 7529 int alloc_len, total_len = 0; 7530 /* struct scsi_per_res_in_rsrv in_data; */ 7531 uint64_t key; 7532 7533 CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n")); 7534 7535 cdb = (struct scsi_per_res_in *)ctsio->cdb; 7536 7537 alloc_len = scsi_2btoul(cdb->length); 7538 7539 retry: 7540 mtx_lock(&lun->lun_lock); 7541 switch (cdb->action) { 7542 case SPRI_RK: /* read keys */ 7543 total_len = sizeof(struct scsi_per_res_in_keys) + 7544 lun->pr_key_count * 7545 sizeof(struct scsi_per_res_key); 7546 break; 7547 case SPRI_RR: /* read reservation */ 7548 if (lun->flags & CTL_LUN_PR_RESERVED) 7549 total_len = sizeof(struct scsi_per_res_in_rsrv); 7550 else 7551 total_len = sizeof(struct scsi_per_res_in_header); 7552 break; 7553 case SPRI_RC: /* report capabilities */ 7554 total_len = sizeof(struct scsi_per_res_cap); 7555 break; 7556 case SPRI_RS: /* read full status */ 7557 total_len = sizeof(struct scsi_per_res_in_header) + 7558 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7559 lun->pr_key_count; 7560 break; 7561 default: 7562 panic("%s: Invalid PR type %#x", __func__, cdb->action); 7563 } 7564 mtx_unlock(&lun->lun_lock); 7565 7566 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7567 ctsio->kern_rel_offset = 0; 7568 ctsio->kern_sg_entries = 0; 7569 ctsio->kern_data_len = min(total_len, alloc_len); 7570 ctsio->kern_total_len = ctsio->kern_data_len; 7571 7572 mtx_lock(&lun->lun_lock); 7573 switch (cdb->action) { 7574 case SPRI_RK: { // read keys 7575 struct scsi_per_res_in_keys *res_keys; 7576 int i, key_count; 7577 7578 res_keys = (struct scsi_per_res_in_keys*)ctsio->kern_data_ptr; 7579 7580 /* 7581 * We had to drop the lock to allocate our buffer, which 7582 * leaves time for someone to come in with another 7583 * persistent reservation. (That is unlikely, though, 7584 * since this should be the only persistent reservation 7585 * command active right now.) 7586 */ 7587 if (total_len != (sizeof(struct scsi_per_res_in_keys) + 7588 (lun->pr_key_count * 7589 sizeof(struct scsi_per_res_key)))){ 7590 mtx_unlock(&lun->lun_lock); 7591 free(ctsio->kern_data_ptr, M_CTL); 7592 printf("%s: reservation length changed, retrying\n", 7593 __func__); 7594 goto retry; 7595 } 7596 7597 scsi_ulto4b(lun->pr_generation, res_keys->header.generation); 7598 7599 scsi_ulto4b(sizeof(struct scsi_per_res_key) * 7600 lun->pr_key_count, res_keys->header.length); 7601 7602 for (i = 0, key_count = 0; i < CTL_MAX_INITIATORS; i++) { 7603 if ((key = ctl_get_prkey(lun, i)) == 0) 7604 continue; 7605 7606 /* 7607 * We used lun->pr_key_count to calculate the 7608 * size to allocate. If it turns out the number of 7609 * initiators with the registered flag set is 7610 * larger than that (i.e. they haven't been kept in 7611 * sync), we've got a problem. 7612 */ 7613 if (key_count >= lun->pr_key_count) { 7614 key_count++; 7615 continue; 7616 } 7617 scsi_u64to8b(key, res_keys->keys[key_count].key); 7618 key_count++; 7619 } 7620 break; 7621 } 7622 case SPRI_RR: { // read reservation 7623 struct scsi_per_res_in_rsrv *res; 7624 int tmp_len, header_only; 7625 7626 res = (struct scsi_per_res_in_rsrv *)ctsio->kern_data_ptr; 7627 7628 scsi_ulto4b(lun->pr_generation, res->header.generation); 7629 7630 if (lun->flags & CTL_LUN_PR_RESERVED) 7631 { 7632 tmp_len = sizeof(struct scsi_per_res_in_rsrv); 7633 scsi_ulto4b(sizeof(struct scsi_per_res_in_rsrv_data), 7634 res->header.length); 7635 header_only = 0; 7636 } else { 7637 tmp_len = sizeof(struct scsi_per_res_in_header); 7638 scsi_ulto4b(0, res->header.length); 7639 header_only = 1; 7640 } 7641 7642 /* 7643 * We had to drop the lock to allocate our buffer, which 7644 * leaves time for someone to come in with another 7645 * persistent reservation. (That is unlikely, though, 7646 * since this should be the only persistent reservation 7647 * command active right now.) 7648 */ 7649 if (tmp_len != total_len) { 7650 mtx_unlock(&lun->lun_lock); 7651 free(ctsio->kern_data_ptr, M_CTL); 7652 printf("%s: reservation status changed, retrying\n", 7653 __func__); 7654 goto retry; 7655 } 7656 7657 /* 7658 * No reservation held, so we're done. 7659 */ 7660 if (header_only != 0) 7661 break; 7662 7663 /* 7664 * If the registration is an All Registrants type, the key 7665 * is 0, since it doesn't really matter. 7666 */ 7667 if (lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 7668 scsi_u64to8b(ctl_get_prkey(lun, lun->pr_res_idx), 7669 res->data.reservation); 7670 } 7671 res->data.scopetype = lun->pr_res_type; 7672 break; 7673 } 7674 case SPRI_RC: //report capabilities 7675 { 7676 struct scsi_per_res_cap *res_cap; 7677 uint16_t type_mask; 7678 7679 res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr; 7680 scsi_ulto2b(sizeof(*res_cap), res_cap->length); 7681 res_cap->flags1 = SPRI_CRH; 7682 res_cap->flags2 = SPRI_TMV | SPRI_ALLOW_5; 7683 type_mask = SPRI_TM_WR_EX_AR | 7684 SPRI_TM_EX_AC_RO | 7685 SPRI_TM_WR_EX_RO | 7686 SPRI_TM_EX_AC | 7687 SPRI_TM_WR_EX | 7688 SPRI_TM_EX_AC_AR; 7689 scsi_ulto2b(type_mask, res_cap->type_mask); 7690 break; 7691 } 7692 case SPRI_RS: { // read full status 7693 struct scsi_per_res_in_full *res_status; 7694 struct scsi_per_res_in_full_desc *res_desc; 7695 struct ctl_port *port; 7696 int i, len; 7697 7698 res_status = (struct scsi_per_res_in_full*)ctsio->kern_data_ptr; 7699 7700 /* 7701 * We had to drop the lock to allocate our buffer, which 7702 * leaves time for someone to come in with another 7703 * persistent reservation. (That is unlikely, though, 7704 * since this should be the only persistent reservation 7705 * command active right now.) 7706 */ 7707 if (total_len < (sizeof(struct scsi_per_res_in_header) + 7708 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7709 lun->pr_key_count)){ 7710 mtx_unlock(&lun->lun_lock); 7711 free(ctsio->kern_data_ptr, M_CTL); 7712 printf("%s: reservation length changed, retrying\n", 7713 __func__); 7714 goto retry; 7715 } 7716 7717 scsi_ulto4b(lun->pr_generation, res_status->header.generation); 7718 7719 res_desc = &res_status->desc[0]; 7720 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 7721 if ((key = ctl_get_prkey(lun, i)) == 0) 7722 continue; 7723 7724 scsi_u64to8b(key, res_desc->res_key.key); 7725 if ((lun->flags & CTL_LUN_PR_RESERVED) && 7726 (lun->pr_res_idx == i || 7727 lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS)) { 7728 res_desc->flags = SPRI_FULL_R_HOLDER; 7729 res_desc->scopetype = lun->pr_res_type; 7730 } 7731 scsi_ulto2b(i / CTL_MAX_INIT_PER_PORT, 7732 res_desc->rel_trgt_port_id); 7733 len = 0; 7734 port = softc->ctl_ports[i / CTL_MAX_INIT_PER_PORT]; 7735 if (port != NULL) 7736 len = ctl_create_iid(port, 7737 i % CTL_MAX_INIT_PER_PORT, 7738 res_desc->transport_id); 7739 scsi_ulto4b(len, res_desc->additional_length); 7740 res_desc = (struct scsi_per_res_in_full_desc *) 7741 &res_desc->transport_id[len]; 7742 } 7743 scsi_ulto4b((uint8_t *)res_desc - (uint8_t *)&res_status->desc[0], 7744 res_status->header.length); 7745 break; 7746 } 7747 default: 7748 panic("%s: Invalid PR type %#x", __func__, cdb->action); 7749 } 7750 mtx_unlock(&lun->lun_lock); 7751 7752 ctl_set_success(ctsio); 7753 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7754 ctsio->be_move_done = ctl_config_move_done; 7755 ctl_datamove((union ctl_io *)ctsio); 7756 return (CTL_RETVAL_COMPLETE); 7757 } 7758 7759 /* 7760 * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if 7761 * it should return. 7762 */ 7763 static int 7764 ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key, 7765 uint64_t sa_res_key, uint8_t type, uint32_t residx, 7766 struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb, 7767 struct scsi_per_res_out_parms* param) 7768 { 7769 union ctl_ha_msg persis_io; 7770 int i; 7771 7772 mtx_lock(&lun->lun_lock); 7773 if (sa_res_key == 0) { 7774 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 7775 /* validate scope and type */ 7776 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7777 SPR_LU_SCOPE) { 7778 mtx_unlock(&lun->lun_lock); 7779 ctl_set_invalid_field(/*ctsio*/ ctsio, 7780 /*sks_valid*/ 1, 7781 /*command*/ 1, 7782 /*field*/ 2, 7783 /*bit_valid*/ 1, 7784 /*bit*/ 4); 7785 ctl_done((union ctl_io *)ctsio); 7786 return (1); 7787 } 7788 7789 if (type>8 || type==2 || type==4 || type==0) { 7790 mtx_unlock(&lun->lun_lock); 7791 ctl_set_invalid_field(/*ctsio*/ ctsio, 7792 /*sks_valid*/ 1, 7793 /*command*/ 1, 7794 /*field*/ 2, 7795 /*bit_valid*/ 1, 7796 /*bit*/ 0); 7797 ctl_done((union ctl_io *)ctsio); 7798 return (1); 7799 } 7800 7801 /* 7802 * Unregister everybody else and build UA for 7803 * them 7804 */ 7805 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 7806 if (i == residx || ctl_get_prkey(lun, i) == 0) 7807 continue; 7808 7809 ctl_clr_prkey(lun, i); 7810 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7811 } 7812 lun->pr_key_count = 1; 7813 lun->pr_res_type = type; 7814 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 7815 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 7816 lun->pr_res_idx = residx; 7817 lun->pr_generation++; 7818 mtx_unlock(&lun->lun_lock); 7819 7820 /* send msg to other side */ 7821 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7822 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7823 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7824 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7825 persis_io.pr.pr_info.res_type = type; 7826 memcpy(persis_io.pr.pr_info.sa_res_key, 7827 param->serv_act_res_key, 7828 sizeof(param->serv_act_res_key)); 7829 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 7830 sizeof(persis_io.pr), M_WAITOK); 7831 } else { 7832 /* not all registrants */ 7833 mtx_unlock(&lun->lun_lock); 7834 free(ctsio->kern_data_ptr, M_CTL); 7835 ctl_set_invalid_field(ctsio, 7836 /*sks_valid*/ 1, 7837 /*command*/ 0, 7838 /*field*/ 8, 7839 /*bit_valid*/ 0, 7840 /*bit*/ 0); 7841 ctl_done((union ctl_io *)ctsio); 7842 return (1); 7843 } 7844 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 7845 || !(lun->flags & CTL_LUN_PR_RESERVED)) { 7846 int found = 0; 7847 7848 if (res_key == sa_res_key) { 7849 /* special case */ 7850 /* 7851 * The spec implies this is not good but doesn't 7852 * say what to do. There are two choices either 7853 * generate a res conflict or check condition 7854 * with illegal field in parameter data. Since 7855 * that is what is done when the sa_res_key is 7856 * zero I'll take that approach since this has 7857 * to do with the sa_res_key. 7858 */ 7859 mtx_unlock(&lun->lun_lock); 7860 free(ctsio->kern_data_ptr, M_CTL); 7861 ctl_set_invalid_field(ctsio, 7862 /*sks_valid*/ 1, 7863 /*command*/ 0, 7864 /*field*/ 8, 7865 /*bit_valid*/ 0, 7866 /*bit*/ 0); 7867 ctl_done((union ctl_io *)ctsio); 7868 return (1); 7869 } 7870 7871 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 7872 if (ctl_get_prkey(lun, i) != sa_res_key) 7873 continue; 7874 7875 found = 1; 7876 ctl_clr_prkey(lun, i); 7877 lun->pr_key_count--; 7878 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7879 } 7880 if (!found) { 7881 mtx_unlock(&lun->lun_lock); 7882 free(ctsio->kern_data_ptr, M_CTL); 7883 ctl_set_reservation_conflict(ctsio); 7884 ctl_done((union ctl_io *)ctsio); 7885 return (CTL_RETVAL_COMPLETE); 7886 } 7887 lun->pr_generation++; 7888 mtx_unlock(&lun->lun_lock); 7889 7890 /* send msg to other side */ 7891 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7892 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7893 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7894 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7895 persis_io.pr.pr_info.res_type = type; 7896 memcpy(persis_io.pr.pr_info.sa_res_key, 7897 param->serv_act_res_key, 7898 sizeof(param->serv_act_res_key)); 7899 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 7900 sizeof(persis_io.pr), M_WAITOK); 7901 } else { 7902 /* Reserved but not all registrants */ 7903 /* sa_res_key is res holder */ 7904 if (sa_res_key == ctl_get_prkey(lun, lun->pr_res_idx)) { 7905 /* validate scope and type */ 7906 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7907 SPR_LU_SCOPE) { 7908 mtx_unlock(&lun->lun_lock); 7909 ctl_set_invalid_field(/*ctsio*/ ctsio, 7910 /*sks_valid*/ 1, 7911 /*command*/ 1, 7912 /*field*/ 2, 7913 /*bit_valid*/ 1, 7914 /*bit*/ 4); 7915 ctl_done((union ctl_io *)ctsio); 7916 return (1); 7917 } 7918 7919 if (type>8 || type==2 || type==4 || type==0) { 7920 mtx_unlock(&lun->lun_lock); 7921 ctl_set_invalid_field(/*ctsio*/ ctsio, 7922 /*sks_valid*/ 1, 7923 /*command*/ 1, 7924 /*field*/ 2, 7925 /*bit_valid*/ 1, 7926 /*bit*/ 0); 7927 ctl_done((union ctl_io *)ctsio); 7928 return (1); 7929 } 7930 7931 /* 7932 * Do the following: 7933 * if sa_res_key != res_key remove all 7934 * registrants w/sa_res_key and generate UA 7935 * for these registrants(Registrations 7936 * Preempted) if it wasn't an exclusive 7937 * reservation generate UA(Reservations 7938 * Preempted) for all other registered nexuses 7939 * if the type has changed. Establish the new 7940 * reservation and holder. If res_key and 7941 * sa_res_key are the same do the above 7942 * except don't unregister the res holder. 7943 */ 7944 7945 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 7946 if (i == residx || ctl_get_prkey(lun, i) == 0) 7947 continue; 7948 7949 if (sa_res_key == ctl_get_prkey(lun, i)) { 7950 ctl_clr_prkey(lun, i); 7951 lun->pr_key_count--; 7952 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7953 } else if (type != lun->pr_res_type && 7954 (lun->pr_res_type == SPR_TYPE_WR_EX_RO || 7955 lun->pr_res_type == SPR_TYPE_EX_AC_RO)) { 7956 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 7957 } 7958 } 7959 lun->pr_res_type = type; 7960 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 7961 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 7962 lun->pr_res_idx = residx; 7963 else 7964 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 7965 lun->pr_generation++; 7966 mtx_unlock(&lun->lun_lock); 7967 7968 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7969 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7970 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7971 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7972 persis_io.pr.pr_info.res_type = type; 7973 memcpy(persis_io.pr.pr_info.sa_res_key, 7974 param->serv_act_res_key, 7975 sizeof(param->serv_act_res_key)); 7976 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 7977 sizeof(persis_io.pr), M_WAITOK); 7978 } else { 7979 /* 7980 * sa_res_key is not the res holder just 7981 * remove registrants 7982 */ 7983 int found=0; 7984 7985 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 7986 if (sa_res_key != ctl_get_prkey(lun, i)) 7987 continue; 7988 7989 found = 1; 7990 ctl_clr_prkey(lun, i); 7991 lun->pr_key_count--; 7992 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7993 } 7994 7995 if (!found) { 7996 mtx_unlock(&lun->lun_lock); 7997 free(ctsio->kern_data_ptr, M_CTL); 7998 ctl_set_reservation_conflict(ctsio); 7999 ctl_done((union ctl_io *)ctsio); 8000 return (1); 8001 } 8002 lun->pr_generation++; 8003 mtx_unlock(&lun->lun_lock); 8004 8005 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8006 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8007 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8008 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8009 persis_io.pr.pr_info.res_type = type; 8010 memcpy(persis_io.pr.pr_info.sa_res_key, 8011 param->serv_act_res_key, 8012 sizeof(param->serv_act_res_key)); 8013 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8014 sizeof(persis_io.pr), M_WAITOK); 8015 } 8016 } 8017 return (0); 8018 } 8019 8020 static void 8021 ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg) 8022 { 8023 uint64_t sa_res_key; 8024 int i; 8025 8026 sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key); 8027 8028 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 8029 || lun->pr_res_idx == CTL_PR_NO_RESERVATION 8030 || sa_res_key != ctl_get_prkey(lun, lun->pr_res_idx)) { 8031 if (sa_res_key == 0) { 8032 /* 8033 * Unregister everybody else and build UA for 8034 * them 8035 */ 8036 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 8037 if (i == msg->pr.pr_info.residx || 8038 ctl_get_prkey(lun, i) == 0) 8039 continue; 8040 8041 ctl_clr_prkey(lun, i); 8042 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8043 } 8044 8045 lun->pr_key_count = 1; 8046 lun->pr_res_type = msg->pr.pr_info.res_type; 8047 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 8048 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 8049 lun->pr_res_idx = msg->pr.pr_info.residx; 8050 } else { 8051 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8052 if (sa_res_key == ctl_get_prkey(lun, i)) 8053 continue; 8054 8055 ctl_clr_prkey(lun, i); 8056 lun->pr_key_count--; 8057 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8058 } 8059 } 8060 } else { 8061 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8062 if (i == msg->pr.pr_info.residx || 8063 ctl_get_prkey(lun, i) == 0) 8064 continue; 8065 8066 if (sa_res_key == ctl_get_prkey(lun, i)) { 8067 ctl_clr_prkey(lun, i); 8068 lun->pr_key_count--; 8069 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8070 } else if (msg->pr.pr_info.res_type != lun->pr_res_type 8071 && (lun->pr_res_type == SPR_TYPE_WR_EX_RO || 8072 lun->pr_res_type == SPR_TYPE_EX_AC_RO)) { 8073 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8074 } 8075 } 8076 lun->pr_res_type = msg->pr.pr_info.res_type; 8077 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 8078 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 8079 lun->pr_res_idx = msg->pr.pr_info.residx; 8080 else 8081 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8082 } 8083 lun->pr_generation++; 8084 8085 } 8086 8087 8088 int 8089 ctl_persistent_reserve_out(struct ctl_scsiio *ctsio) 8090 { 8091 struct ctl_softc *softc = CTL_SOFTC(ctsio); 8092 struct ctl_lun *lun = CTL_LUN(ctsio); 8093 int retval; 8094 u_int32_t param_len; 8095 struct scsi_per_res_out *cdb; 8096 struct scsi_per_res_out_parms* param; 8097 uint32_t residx; 8098 uint64_t res_key, sa_res_key, key; 8099 uint8_t type; 8100 union ctl_ha_msg persis_io; 8101 int i; 8102 8103 CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n")); 8104 8105 cdb = (struct scsi_per_res_out *)ctsio->cdb; 8106 retval = CTL_RETVAL_COMPLETE; 8107 8108 /* 8109 * We only support whole-LUN scope. The scope & type are ignored for 8110 * register, register and ignore existing key and clear. 8111 * We sometimes ignore scope and type on preempts too!! 8112 * Verify reservation type here as well. 8113 */ 8114 type = cdb->scope_type & SPR_TYPE_MASK; 8115 if ((cdb->action == SPRO_RESERVE) 8116 || (cdb->action == SPRO_RELEASE)) { 8117 if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { 8118 ctl_set_invalid_field(/*ctsio*/ ctsio, 8119 /*sks_valid*/ 1, 8120 /*command*/ 1, 8121 /*field*/ 2, 8122 /*bit_valid*/ 1, 8123 /*bit*/ 4); 8124 ctl_done((union ctl_io *)ctsio); 8125 return (CTL_RETVAL_COMPLETE); 8126 } 8127 8128 if (type>8 || type==2 || type==4 || type==0) { 8129 ctl_set_invalid_field(/*ctsio*/ ctsio, 8130 /*sks_valid*/ 1, 8131 /*command*/ 1, 8132 /*field*/ 2, 8133 /*bit_valid*/ 1, 8134 /*bit*/ 0); 8135 ctl_done((union ctl_io *)ctsio); 8136 return (CTL_RETVAL_COMPLETE); 8137 } 8138 } 8139 8140 param_len = scsi_4btoul(cdb->length); 8141 8142 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 8143 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 8144 ctsio->kern_data_len = param_len; 8145 ctsio->kern_total_len = param_len; 8146 ctsio->kern_rel_offset = 0; 8147 ctsio->kern_sg_entries = 0; 8148 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 8149 ctsio->be_move_done = ctl_config_move_done; 8150 ctl_datamove((union ctl_io *)ctsio); 8151 8152 return (CTL_RETVAL_COMPLETE); 8153 } 8154 8155 param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr; 8156 8157 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 8158 res_key = scsi_8btou64(param->res_key.key); 8159 sa_res_key = scsi_8btou64(param->serv_act_res_key); 8160 8161 /* 8162 * Validate the reservation key here except for SPRO_REG_IGNO 8163 * This must be done for all other service actions 8164 */ 8165 if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) { 8166 mtx_lock(&lun->lun_lock); 8167 if ((key = ctl_get_prkey(lun, residx)) != 0) { 8168 if (res_key != key) { 8169 /* 8170 * The current key passed in doesn't match 8171 * the one the initiator previously 8172 * registered. 8173 */ 8174 mtx_unlock(&lun->lun_lock); 8175 free(ctsio->kern_data_ptr, M_CTL); 8176 ctl_set_reservation_conflict(ctsio); 8177 ctl_done((union ctl_io *)ctsio); 8178 return (CTL_RETVAL_COMPLETE); 8179 } 8180 } else if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REGISTER) { 8181 /* 8182 * We are not registered 8183 */ 8184 mtx_unlock(&lun->lun_lock); 8185 free(ctsio->kern_data_ptr, M_CTL); 8186 ctl_set_reservation_conflict(ctsio); 8187 ctl_done((union ctl_io *)ctsio); 8188 return (CTL_RETVAL_COMPLETE); 8189 } else if (res_key != 0) { 8190 /* 8191 * We are not registered and trying to register but 8192 * the register key isn't zero. 8193 */ 8194 mtx_unlock(&lun->lun_lock); 8195 free(ctsio->kern_data_ptr, M_CTL); 8196 ctl_set_reservation_conflict(ctsio); 8197 ctl_done((union ctl_io *)ctsio); 8198 return (CTL_RETVAL_COMPLETE); 8199 } 8200 mtx_unlock(&lun->lun_lock); 8201 } 8202 8203 switch (cdb->action & SPRO_ACTION_MASK) { 8204 case SPRO_REGISTER: 8205 case SPRO_REG_IGNO: { 8206 8207 #if 0 8208 printf("Registration received\n"); 8209 #endif 8210 8211 /* 8212 * We don't support any of these options, as we report in 8213 * the read capabilities request (see 8214 * ctl_persistent_reserve_in(), above). 8215 */ 8216 if ((param->flags & SPR_SPEC_I_PT) 8217 || (param->flags & SPR_ALL_TG_PT) 8218 || (param->flags & SPR_APTPL)) { 8219 int bit_ptr; 8220 8221 if (param->flags & SPR_APTPL) 8222 bit_ptr = 0; 8223 else if (param->flags & SPR_ALL_TG_PT) 8224 bit_ptr = 2; 8225 else /* SPR_SPEC_I_PT */ 8226 bit_ptr = 3; 8227 8228 free(ctsio->kern_data_ptr, M_CTL); 8229 ctl_set_invalid_field(ctsio, 8230 /*sks_valid*/ 1, 8231 /*command*/ 0, 8232 /*field*/ 20, 8233 /*bit_valid*/ 1, 8234 /*bit*/ bit_ptr); 8235 ctl_done((union ctl_io *)ctsio); 8236 return (CTL_RETVAL_COMPLETE); 8237 } 8238 8239 mtx_lock(&lun->lun_lock); 8240 8241 /* 8242 * The initiator wants to clear the 8243 * key/unregister. 8244 */ 8245 if (sa_res_key == 0) { 8246 if ((res_key == 0 8247 && (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER) 8248 || ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO 8249 && ctl_get_prkey(lun, residx) == 0)) { 8250 mtx_unlock(&lun->lun_lock); 8251 goto done; 8252 } 8253 8254 ctl_clr_prkey(lun, residx); 8255 lun->pr_key_count--; 8256 8257 if (residx == lun->pr_res_idx) { 8258 lun->flags &= ~CTL_LUN_PR_RESERVED; 8259 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8260 8261 if ((lun->pr_res_type == SPR_TYPE_WR_EX_RO || 8262 lun->pr_res_type == SPR_TYPE_EX_AC_RO) && 8263 lun->pr_key_count) { 8264 /* 8265 * If the reservation is a registrants 8266 * only type we need to generate a UA 8267 * for other registered inits. The 8268 * sense code should be RESERVATIONS 8269 * RELEASED 8270 */ 8271 8272 for (i = softc->init_min; i < softc->init_max; i++){ 8273 if (ctl_get_prkey(lun, i) == 0) 8274 continue; 8275 ctl_est_ua(lun, i, 8276 CTL_UA_RES_RELEASE); 8277 } 8278 } 8279 lun->pr_res_type = 0; 8280 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8281 if (lun->pr_key_count==0) { 8282 lun->flags &= ~CTL_LUN_PR_RESERVED; 8283 lun->pr_res_type = 0; 8284 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8285 } 8286 } 8287 lun->pr_generation++; 8288 mtx_unlock(&lun->lun_lock); 8289 8290 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8291 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8292 persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY; 8293 persis_io.pr.pr_info.residx = residx; 8294 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8295 sizeof(persis_io.pr), M_WAITOK); 8296 } else /* sa_res_key != 0 */ { 8297 8298 /* 8299 * If we aren't registered currently then increment 8300 * the key count and set the registered flag. 8301 */ 8302 ctl_alloc_prkey(lun, residx); 8303 if (ctl_get_prkey(lun, residx) == 0) 8304 lun->pr_key_count++; 8305 ctl_set_prkey(lun, residx, sa_res_key); 8306 lun->pr_generation++; 8307 mtx_unlock(&lun->lun_lock); 8308 8309 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8310 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8311 persis_io.pr.pr_info.action = CTL_PR_REG_KEY; 8312 persis_io.pr.pr_info.residx = residx; 8313 memcpy(persis_io.pr.pr_info.sa_res_key, 8314 param->serv_act_res_key, 8315 sizeof(param->serv_act_res_key)); 8316 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8317 sizeof(persis_io.pr), M_WAITOK); 8318 } 8319 8320 break; 8321 } 8322 case SPRO_RESERVE: 8323 #if 0 8324 printf("Reserve executed type %d\n", type); 8325 #endif 8326 mtx_lock(&lun->lun_lock); 8327 if (lun->flags & CTL_LUN_PR_RESERVED) { 8328 /* 8329 * if this isn't the reservation holder and it's 8330 * not a "all registrants" type or if the type is 8331 * different then we have a conflict 8332 */ 8333 if ((lun->pr_res_idx != residx 8334 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) 8335 || lun->pr_res_type != type) { 8336 mtx_unlock(&lun->lun_lock); 8337 free(ctsio->kern_data_ptr, M_CTL); 8338 ctl_set_reservation_conflict(ctsio); 8339 ctl_done((union ctl_io *)ctsio); 8340 return (CTL_RETVAL_COMPLETE); 8341 } 8342 mtx_unlock(&lun->lun_lock); 8343 } else /* create a reservation */ { 8344 /* 8345 * If it's not an "all registrants" type record 8346 * reservation holder 8347 */ 8348 if (type != SPR_TYPE_WR_EX_AR 8349 && type != SPR_TYPE_EX_AC_AR) 8350 lun->pr_res_idx = residx; /* Res holder */ 8351 else 8352 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8353 8354 lun->flags |= CTL_LUN_PR_RESERVED; 8355 lun->pr_res_type = type; 8356 8357 mtx_unlock(&lun->lun_lock); 8358 8359 /* send msg to other side */ 8360 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8361 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8362 persis_io.pr.pr_info.action = CTL_PR_RESERVE; 8363 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8364 persis_io.pr.pr_info.res_type = type; 8365 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8366 sizeof(persis_io.pr), M_WAITOK); 8367 } 8368 break; 8369 8370 case SPRO_RELEASE: 8371 mtx_lock(&lun->lun_lock); 8372 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) { 8373 /* No reservation exists return good status */ 8374 mtx_unlock(&lun->lun_lock); 8375 goto done; 8376 } 8377 /* 8378 * Is this nexus a reservation holder? 8379 */ 8380 if (lun->pr_res_idx != residx 8381 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 8382 /* 8383 * not a res holder return good status but 8384 * do nothing 8385 */ 8386 mtx_unlock(&lun->lun_lock); 8387 goto done; 8388 } 8389 8390 if (lun->pr_res_type != type) { 8391 mtx_unlock(&lun->lun_lock); 8392 free(ctsio->kern_data_ptr, M_CTL); 8393 ctl_set_illegal_pr_release(ctsio); 8394 ctl_done((union ctl_io *)ctsio); 8395 return (CTL_RETVAL_COMPLETE); 8396 } 8397 8398 /* okay to release */ 8399 lun->flags &= ~CTL_LUN_PR_RESERVED; 8400 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8401 lun->pr_res_type = 0; 8402 8403 /* 8404 * If this isn't an exclusive access reservation and NUAR 8405 * is not set, generate UA for all other registrants. 8406 */ 8407 if (type != SPR_TYPE_EX_AC && type != SPR_TYPE_WR_EX && 8408 (lun->MODE_CTRL.queue_flags & SCP_NUAR) == 0) { 8409 for (i = softc->init_min; i < softc->init_max; i++) { 8410 if (i == residx || ctl_get_prkey(lun, i) == 0) 8411 continue; 8412 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8413 } 8414 } 8415 mtx_unlock(&lun->lun_lock); 8416 8417 /* Send msg to other side */ 8418 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8419 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8420 persis_io.pr.pr_info.action = CTL_PR_RELEASE; 8421 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8422 sizeof(persis_io.pr), M_WAITOK); 8423 break; 8424 8425 case SPRO_CLEAR: 8426 /* send msg to other side */ 8427 8428 mtx_lock(&lun->lun_lock); 8429 lun->flags &= ~CTL_LUN_PR_RESERVED; 8430 lun->pr_res_type = 0; 8431 lun->pr_key_count = 0; 8432 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8433 8434 ctl_clr_prkey(lun, residx); 8435 for (i = 0; i < CTL_MAX_INITIATORS; i++) 8436 if (ctl_get_prkey(lun, i) != 0) { 8437 ctl_clr_prkey(lun, i); 8438 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8439 } 8440 lun->pr_generation++; 8441 mtx_unlock(&lun->lun_lock); 8442 8443 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8444 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8445 persis_io.pr.pr_info.action = CTL_PR_CLEAR; 8446 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8447 sizeof(persis_io.pr), M_WAITOK); 8448 break; 8449 8450 case SPRO_PREEMPT: 8451 case SPRO_PRE_ABO: { 8452 int nretval; 8453 8454 nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type, 8455 residx, ctsio, cdb, param); 8456 if (nretval != 0) 8457 return (CTL_RETVAL_COMPLETE); 8458 break; 8459 } 8460 default: 8461 panic("%s: Invalid PR type %#x", __func__, cdb->action); 8462 } 8463 8464 done: 8465 free(ctsio->kern_data_ptr, M_CTL); 8466 ctl_set_success(ctsio); 8467 ctl_done((union ctl_io *)ctsio); 8468 8469 return (retval); 8470 } 8471 8472 /* 8473 * This routine is for handling a message from the other SC pertaining to 8474 * persistent reserve out. All the error checking will have been done 8475 * so only perorming the action need be done here to keep the two 8476 * in sync. 8477 */ 8478 static void 8479 ctl_hndl_per_res_out_on_other_sc(union ctl_io *io) 8480 { 8481 struct ctl_softc *softc = CTL_SOFTC(io); 8482 union ctl_ha_msg *msg = (union ctl_ha_msg *)&io->presio.pr_msg; 8483 struct ctl_lun *lun; 8484 int i; 8485 uint32_t residx, targ_lun; 8486 8487 targ_lun = msg->hdr.nexus.targ_mapped_lun; 8488 mtx_lock(&softc->ctl_lock); 8489 if (targ_lun >= CTL_MAX_LUNS || 8490 (lun = softc->ctl_luns[targ_lun]) == NULL) { 8491 mtx_unlock(&softc->ctl_lock); 8492 return; 8493 } 8494 mtx_lock(&lun->lun_lock); 8495 mtx_unlock(&softc->ctl_lock); 8496 if (lun->flags & CTL_LUN_DISABLED) { 8497 mtx_unlock(&lun->lun_lock); 8498 return; 8499 } 8500 residx = ctl_get_initindex(&msg->hdr.nexus); 8501 switch(msg->pr.pr_info.action) { 8502 case CTL_PR_REG_KEY: 8503 ctl_alloc_prkey(lun, msg->pr.pr_info.residx); 8504 if (ctl_get_prkey(lun, msg->pr.pr_info.residx) == 0) 8505 lun->pr_key_count++; 8506 ctl_set_prkey(lun, msg->pr.pr_info.residx, 8507 scsi_8btou64(msg->pr.pr_info.sa_res_key)); 8508 lun->pr_generation++; 8509 break; 8510 8511 case CTL_PR_UNREG_KEY: 8512 ctl_clr_prkey(lun, msg->pr.pr_info.residx); 8513 lun->pr_key_count--; 8514 8515 /* XXX Need to see if the reservation has been released */ 8516 /* if so do we need to generate UA? */ 8517 if (msg->pr.pr_info.residx == lun->pr_res_idx) { 8518 lun->flags &= ~CTL_LUN_PR_RESERVED; 8519 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8520 8521 if ((lun->pr_res_type == SPR_TYPE_WR_EX_RO || 8522 lun->pr_res_type == SPR_TYPE_EX_AC_RO) && 8523 lun->pr_key_count) { 8524 /* 8525 * If the reservation is a registrants 8526 * only type we need to generate a UA 8527 * for other registered inits. The 8528 * sense code should be RESERVATIONS 8529 * RELEASED 8530 */ 8531 8532 for (i = softc->init_min; i < softc->init_max; i++) { 8533 if (ctl_get_prkey(lun, i) == 0) 8534 continue; 8535 8536 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8537 } 8538 } 8539 lun->pr_res_type = 0; 8540 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8541 if (lun->pr_key_count==0) { 8542 lun->flags &= ~CTL_LUN_PR_RESERVED; 8543 lun->pr_res_type = 0; 8544 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8545 } 8546 } 8547 lun->pr_generation++; 8548 break; 8549 8550 case CTL_PR_RESERVE: 8551 lun->flags |= CTL_LUN_PR_RESERVED; 8552 lun->pr_res_type = msg->pr.pr_info.res_type; 8553 lun->pr_res_idx = msg->pr.pr_info.residx; 8554 8555 break; 8556 8557 case CTL_PR_RELEASE: 8558 /* 8559 * If this isn't an exclusive access reservation and NUAR 8560 * is not set, generate UA for all other registrants. 8561 */ 8562 if (lun->pr_res_type != SPR_TYPE_EX_AC && 8563 lun->pr_res_type != SPR_TYPE_WR_EX && 8564 (lun->MODE_CTRL.queue_flags & SCP_NUAR) == 0) { 8565 for (i = softc->init_min; i < softc->init_max; i++) 8566 if (i == residx || ctl_get_prkey(lun, i) == 0) 8567 continue; 8568 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8569 } 8570 8571 lun->flags &= ~CTL_LUN_PR_RESERVED; 8572 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8573 lun->pr_res_type = 0; 8574 break; 8575 8576 case CTL_PR_PREEMPT: 8577 ctl_pro_preempt_other(lun, msg); 8578 break; 8579 case CTL_PR_CLEAR: 8580 lun->flags &= ~CTL_LUN_PR_RESERVED; 8581 lun->pr_res_type = 0; 8582 lun->pr_key_count = 0; 8583 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8584 8585 for (i=0; i < CTL_MAX_INITIATORS; i++) { 8586 if (ctl_get_prkey(lun, i) == 0) 8587 continue; 8588 ctl_clr_prkey(lun, i); 8589 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8590 } 8591 lun->pr_generation++; 8592 break; 8593 } 8594 8595 mtx_unlock(&lun->lun_lock); 8596 } 8597 8598 int 8599 ctl_read_write(struct ctl_scsiio *ctsio) 8600 { 8601 struct ctl_lun *lun = CTL_LUN(ctsio); 8602 struct ctl_lba_len_flags *lbalen; 8603 uint64_t lba; 8604 uint32_t num_blocks; 8605 int flags, retval; 8606 int isread; 8607 8608 CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0])); 8609 8610 flags = 0; 8611 isread = ctsio->cdb[0] == READ_6 || ctsio->cdb[0] == READ_10 8612 || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16; 8613 switch (ctsio->cdb[0]) { 8614 case READ_6: 8615 case WRITE_6: { 8616 struct scsi_rw_6 *cdb; 8617 8618 cdb = (struct scsi_rw_6 *)ctsio->cdb; 8619 8620 lba = scsi_3btoul(cdb->addr); 8621 /* only 5 bits are valid in the most significant address byte */ 8622 lba &= 0x1fffff; 8623 num_blocks = cdb->length; 8624 /* 8625 * This is correct according to SBC-2. 8626 */ 8627 if (num_blocks == 0) 8628 num_blocks = 256; 8629 break; 8630 } 8631 case READ_10: 8632 case WRITE_10: { 8633 struct scsi_rw_10 *cdb; 8634 8635 cdb = (struct scsi_rw_10 *)ctsio->cdb; 8636 if (cdb->byte2 & SRW10_FUA) 8637 flags |= CTL_LLF_FUA; 8638 if (cdb->byte2 & SRW10_DPO) 8639 flags |= CTL_LLF_DPO; 8640 lba = scsi_4btoul(cdb->addr); 8641 num_blocks = scsi_2btoul(cdb->length); 8642 break; 8643 } 8644 case WRITE_VERIFY_10: { 8645 struct scsi_write_verify_10 *cdb; 8646 8647 cdb = (struct scsi_write_verify_10 *)ctsio->cdb; 8648 flags |= CTL_LLF_FUA; 8649 if (cdb->byte2 & SWV_DPO) 8650 flags |= CTL_LLF_DPO; 8651 lba = scsi_4btoul(cdb->addr); 8652 num_blocks = scsi_2btoul(cdb->length); 8653 break; 8654 } 8655 case READ_12: 8656 case WRITE_12: { 8657 struct scsi_rw_12 *cdb; 8658 8659 cdb = (struct scsi_rw_12 *)ctsio->cdb; 8660 if (cdb->byte2 & SRW12_FUA) 8661 flags |= CTL_LLF_FUA; 8662 if (cdb->byte2 & SRW12_DPO) 8663 flags |= CTL_LLF_DPO; 8664 lba = scsi_4btoul(cdb->addr); 8665 num_blocks = scsi_4btoul(cdb->length); 8666 break; 8667 } 8668 case WRITE_VERIFY_12: { 8669 struct scsi_write_verify_12 *cdb; 8670 8671 cdb = (struct scsi_write_verify_12 *)ctsio->cdb; 8672 flags |= CTL_LLF_FUA; 8673 if (cdb->byte2 & SWV_DPO) 8674 flags |= CTL_LLF_DPO; 8675 lba = scsi_4btoul(cdb->addr); 8676 num_blocks = scsi_4btoul(cdb->length); 8677 break; 8678 } 8679 case READ_16: 8680 case WRITE_16: { 8681 struct scsi_rw_16 *cdb; 8682 8683 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8684 if (cdb->byte2 & SRW12_FUA) 8685 flags |= CTL_LLF_FUA; 8686 if (cdb->byte2 & SRW12_DPO) 8687 flags |= CTL_LLF_DPO; 8688 lba = scsi_8btou64(cdb->addr); 8689 num_blocks = scsi_4btoul(cdb->length); 8690 break; 8691 } 8692 case WRITE_ATOMIC_16: { 8693 struct scsi_write_atomic_16 *cdb; 8694 8695 if (lun->be_lun->atomicblock == 0) { 8696 ctl_set_invalid_opcode(ctsio); 8697 ctl_done((union ctl_io *)ctsio); 8698 return (CTL_RETVAL_COMPLETE); 8699 } 8700 8701 cdb = (struct scsi_write_atomic_16 *)ctsio->cdb; 8702 if (cdb->byte2 & SRW12_FUA) 8703 flags |= CTL_LLF_FUA; 8704 if (cdb->byte2 & SRW12_DPO) 8705 flags |= CTL_LLF_DPO; 8706 lba = scsi_8btou64(cdb->addr); 8707 num_blocks = scsi_2btoul(cdb->length); 8708 if (num_blocks > lun->be_lun->atomicblock) { 8709 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 8710 /*command*/ 1, /*field*/ 12, /*bit_valid*/ 0, 8711 /*bit*/ 0); 8712 ctl_done((union ctl_io *)ctsio); 8713 return (CTL_RETVAL_COMPLETE); 8714 } 8715 break; 8716 } 8717 case WRITE_VERIFY_16: { 8718 struct scsi_write_verify_16 *cdb; 8719 8720 cdb = (struct scsi_write_verify_16 *)ctsio->cdb; 8721 flags |= CTL_LLF_FUA; 8722 if (cdb->byte2 & SWV_DPO) 8723 flags |= CTL_LLF_DPO; 8724 lba = scsi_8btou64(cdb->addr); 8725 num_blocks = scsi_4btoul(cdb->length); 8726 break; 8727 } 8728 default: 8729 /* 8730 * We got a command we don't support. This shouldn't 8731 * happen, commands should be filtered out above us. 8732 */ 8733 ctl_set_invalid_opcode(ctsio); 8734 ctl_done((union ctl_io *)ctsio); 8735 8736 return (CTL_RETVAL_COMPLETE); 8737 break; /* NOTREACHED */ 8738 } 8739 8740 /* 8741 * The first check is to make sure we're in bounds, the second 8742 * check is to catch wrap-around problems. If the lba + num blocks 8743 * is less than the lba, then we've wrapped around and the block 8744 * range is invalid anyway. 8745 */ 8746 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8747 || ((lba + num_blocks) < lba)) { 8748 ctl_set_lba_out_of_range(ctsio, 8749 MAX(lba, lun->be_lun->maxlba + 1)); 8750 ctl_done((union ctl_io *)ctsio); 8751 return (CTL_RETVAL_COMPLETE); 8752 } 8753 8754 /* 8755 * According to SBC-3, a transfer length of 0 is not an error. 8756 * Note that this cannot happen with WRITE(6) or READ(6), since 0 8757 * translates to 256 blocks for those commands. 8758 */ 8759 if (num_blocks == 0) { 8760 ctl_set_success(ctsio); 8761 ctl_done((union ctl_io *)ctsio); 8762 return (CTL_RETVAL_COMPLETE); 8763 } 8764 8765 /* Set FUA and/or DPO if caches are disabled. */ 8766 if (isread) { 8767 if ((lun->MODE_CACHING.flags1 & SCP_RCD) != 0) 8768 flags |= CTL_LLF_FUA | CTL_LLF_DPO; 8769 } else { 8770 if ((lun->MODE_CACHING.flags1 & SCP_WCE) == 0) 8771 flags |= CTL_LLF_FUA; 8772 } 8773 8774 lbalen = (struct ctl_lba_len_flags *) 8775 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8776 lbalen->lba = lba; 8777 lbalen->len = num_blocks; 8778 lbalen->flags = (isread ? CTL_LLF_READ : CTL_LLF_WRITE) | flags; 8779 8780 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 8781 ctsio->kern_rel_offset = 0; 8782 8783 CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n")); 8784 8785 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8786 return (retval); 8787 } 8788 8789 static int 8790 ctl_cnw_cont(union ctl_io *io) 8791 { 8792 struct ctl_lun *lun = CTL_LUN(io); 8793 struct ctl_scsiio *ctsio; 8794 struct ctl_lba_len_flags *lbalen; 8795 int retval; 8796 8797 ctsio = &io->scsiio; 8798 ctsio->io_hdr.status = CTL_STATUS_NONE; 8799 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT; 8800 lbalen = (struct ctl_lba_len_flags *) 8801 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8802 lbalen->flags &= ~CTL_LLF_COMPARE; 8803 lbalen->flags |= CTL_LLF_WRITE; 8804 8805 CTL_DEBUG_PRINT(("ctl_cnw_cont: calling data_submit()\n")); 8806 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8807 return (retval); 8808 } 8809 8810 int 8811 ctl_cnw(struct ctl_scsiio *ctsio) 8812 { 8813 struct ctl_lun *lun = CTL_LUN(ctsio); 8814 struct ctl_lba_len_flags *lbalen; 8815 uint64_t lba; 8816 uint32_t num_blocks; 8817 int flags, retval; 8818 8819 CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0])); 8820 8821 flags = 0; 8822 switch (ctsio->cdb[0]) { 8823 case COMPARE_AND_WRITE: { 8824 struct scsi_compare_and_write *cdb; 8825 8826 cdb = (struct scsi_compare_and_write *)ctsio->cdb; 8827 if (cdb->byte2 & SRW10_FUA) 8828 flags |= CTL_LLF_FUA; 8829 if (cdb->byte2 & SRW10_DPO) 8830 flags |= CTL_LLF_DPO; 8831 lba = scsi_8btou64(cdb->addr); 8832 num_blocks = cdb->length; 8833 break; 8834 } 8835 default: 8836 /* 8837 * We got a command we don't support. This shouldn't 8838 * happen, commands should be filtered out above us. 8839 */ 8840 ctl_set_invalid_opcode(ctsio); 8841 ctl_done((union ctl_io *)ctsio); 8842 8843 return (CTL_RETVAL_COMPLETE); 8844 break; /* NOTREACHED */ 8845 } 8846 8847 /* 8848 * The first check is to make sure we're in bounds, the second 8849 * check is to catch wrap-around problems. If the lba + num blocks 8850 * is less than the lba, then we've wrapped around and the block 8851 * range is invalid anyway. 8852 */ 8853 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8854 || ((lba + num_blocks) < lba)) { 8855 ctl_set_lba_out_of_range(ctsio, 8856 MAX(lba, lun->be_lun->maxlba + 1)); 8857 ctl_done((union ctl_io *)ctsio); 8858 return (CTL_RETVAL_COMPLETE); 8859 } 8860 8861 /* 8862 * According to SBC-3, a transfer length of 0 is not an error. 8863 */ 8864 if (num_blocks == 0) { 8865 ctl_set_success(ctsio); 8866 ctl_done((union ctl_io *)ctsio); 8867 return (CTL_RETVAL_COMPLETE); 8868 } 8869 8870 /* Set FUA if write cache is disabled. */ 8871 if ((lun->MODE_CACHING.flags1 & SCP_WCE) == 0) 8872 flags |= CTL_LLF_FUA; 8873 8874 ctsio->kern_total_len = 2 * num_blocks * lun->be_lun->blocksize; 8875 ctsio->kern_rel_offset = 0; 8876 8877 /* 8878 * Set the IO_CONT flag, so that if this I/O gets passed to 8879 * ctl_data_submit_done(), it'll get passed back to 8880 * ctl_ctl_cnw_cont() for further processing. 8881 */ 8882 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 8883 ctsio->io_cont = ctl_cnw_cont; 8884 8885 lbalen = (struct ctl_lba_len_flags *) 8886 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8887 lbalen->lba = lba; 8888 lbalen->len = num_blocks; 8889 lbalen->flags = CTL_LLF_COMPARE | flags; 8890 8891 CTL_DEBUG_PRINT(("ctl_cnw: calling data_submit()\n")); 8892 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8893 return (retval); 8894 } 8895 8896 int 8897 ctl_verify(struct ctl_scsiio *ctsio) 8898 { 8899 struct ctl_lun *lun = CTL_LUN(ctsio); 8900 struct ctl_lba_len_flags *lbalen; 8901 uint64_t lba; 8902 uint32_t num_blocks; 8903 int bytchk, flags; 8904 int retval; 8905 8906 CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0])); 8907 8908 bytchk = 0; 8909 flags = CTL_LLF_FUA; 8910 switch (ctsio->cdb[0]) { 8911 case VERIFY_10: { 8912 struct scsi_verify_10 *cdb; 8913 8914 cdb = (struct scsi_verify_10 *)ctsio->cdb; 8915 if (cdb->byte2 & SVFY_BYTCHK) 8916 bytchk = 1; 8917 if (cdb->byte2 & SVFY_DPO) 8918 flags |= CTL_LLF_DPO; 8919 lba = scsi_4btoul(cdb->addr); 8920 num_blocks = scsi_2btoul(cdb->length); 8921 break; 8922 } 8923 case VERIFY_12: { 8924 struct scsi_verify_12 *cdb; 8925 8926 cdb = (struct scsi_verify_12 *)ctsio->cdb; 8927 if (cdb->byte2 & SVFY_BYTCHK) 8928 bytchk = 1; 8929 if (cdb->byte2 & SVFY_DPO) 8930 flags |= CTL_LLF_DPO; 8931 lba = scsi_4btoul(cdb->addr); 8932 num_blocks = scsi_4btoul(cdb->length); 8933 break; 8934 } 8935 case VERIFY_16: { 8936 struct scsi_rw_16 *cdb; 8937 8938 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8939 if (cdb->byte2 & SVFY_BYTCHK) 8940 bytchk = 1; 8941 if (cdb->byte2 & SVFY_DPO) 8942 flags |= CTL_LLF_DPO; 8943 lba = scsi_8btou64(cdb->addr); 8944 num_blocks = scsi_4btoul(cdb->length); 8945 break; 8946 } 8947 default: 8948 /* 8949 * We got a command we don't support. This shouldn't 8950 * happen, commands should be filtered out above us. 8951 */ 8952 ctl_set_invalid_opcode(ctsio); 8953 ctl_done((union ctl_io *)ctsio); 8954 return (CTL_RETVAL_COMPLETE); 8955 } 8956 8957 /* 8958 * The first check is to make sure we're in bounds, the second 8959 * check is to catch wrap-around problems. If the lba + num blocks 8960 * is less than the lba, then we've wrapped around and the block 8961 * range is invalid anyway. 8962 */ 8963 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8964 || ((lba + num_blocks) < lba)) { 8965 ctl_set_lba_out_of_range(ctsio, 8966 MAX(lba, lun->be_lun->maxlba + 1)); 8967 ctl_done((union ctl_io *)ctsio); 8968 return (CTL_RETVAL_COMPLETE); 8969 } 8970 8971 /* 8972 * According to SBC-3, a transfer length of 0 is not an error. 8973 */ 8974 if (num_blocks == 0) { 8975 ctl_set_success(ctsio); 8976 ctl_done((union ctl_io *)ctsio); 8977 return (CTL_RETVAL_COMPLETE); 8978 } 8979 8980 lbalen = (struct ctl_lba_len_flags *) 8981 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8982 lbalen->lba = lba; 8983 lbalen->len = num_blocks; 8984 if (bytchk) { 8985 lbalen->flags = CTL_LLF_COMPARE | flags; 8986 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 8987 } else { 8988 lbalen->flags = CTL_LLF_VERIFY | flags; 8989 ctsio->kern_total_len = 0; 8990 } 8991 ctsio->kern_rel_offset = 0; 8992 8993 CTL_DEBUG_PRINT(("ctl_verify: calling data_submit()\n")); 8994 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8995 return (retval); 8996 } 8997 8998 int 8999 ctl_report_luns(struct ctl_scsiio *ctsio) 9000 { 9001 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9002 struct ctl_port *port = CTL_PORT(ctsio); 9003 struct ctl_lun *lun, *request_lun = CTL_LUN(ctsio); 9004 struct scsi_report_luns *cdb; 9005 struct scsi_report_luns_data *lun_data; 9006 int num_filled, num_luns, num_port_luns, retval; 9007 uint32_t alloc_len, lun_datalen; 9008 uint32_t initidx, targ_lun_id, lun_id; 9009 9010 retval = CTL_RETVAL_COMPLETE; 9011 cdb = (struct scsi_report_luns *)ctsio->cdb; 9012 9013 CTL_DEBUG_PRINT(("ctl_report_luns\n")); 9014 9015 num_luns = 0; 9016 num_port_luns = port->lun_map ? port->lun_map_size : CTL_MAX_LUNS; 9017 mtx_lock(&softc->ctl_lock); 9018 for (targ_lun_id = 0; targ_lun_id < num_port_luns; targ_lun_id++) { 9019 if (ctl_lun_map_from_port(port, targ_lun_id) != UINT32_MAX) 9020 num_luns++; 9021 } 9022 mtx_unlock(&softc->ctl_lock); 9023 9024 switch (cdb->select_report) { 9025 case RPL_REPORT_DEFAULT: 9026 case RPL_REPORT_ALL: 9027 case RPL_REPORT_NONSUBSID: 9028 break; 9029 case RPL_REPORT_WELLKNOWN: 9030 case RPL_REPORT_ADMIN: 9031 case RPL_REPORT_CONGLOM: 9032 num_luns = 0; 9033 break; 9034 default: 9035 ctl_set_invalid_field(ctsio, 9036 /*sks_valid*/ 1, 9037 /*command*/ 1, 9038 /*field*/ 2, 9039 /*bit_valid*/ 0, 9040 /*bit*/ 0); 9041 ctl_done((union ctl_io *)ctsio); 9042 return (retval); 9043 break; /* NOTREACHED */ 9044 } 9045 9046 alloc_len = scsi_4btoul(cdb->length); 9047 /* 9048 * The initiator has to allocate at least 16 bytes for this request, 9049 * so he can at least get the header and the first LUN. Otherwise 9050 * we reject the request (per SPC-3 rev 14, section 6.21). 9051 */ 9052 if (alloc_len < (sizeof(struct scsi_report_luns_data) + 9053 sizeof(struct scsi_report_luns_lundata))) { 9054 ctl_set_invalid_field(ctsio, 9055 /*sks_valid*/ 1, 9056 /*command*/ 1, 9057 /*field*/ 6, 9058 /*bit_valid*/ 0, 9059 /*bit*/ 0); 9060 ctl_done((union ctl_io *)ctsio); 9061 return (retval); 9062 } 9063 9064 lun_datalen = sizeof(*lun_data) + 9065 (num_luns * sizeof(struct scsi_report_luns_lundata)); 9066 9067 ctsio->kern_data_ptr = malloc(lun_datalen, M_CTL, M_WAITOK | M_ZERO); 9068 lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr; 9069 ctsio->kern_sg_entries = 0; 9070 9071 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9072 9073 mtx_lock(&softc->ctl_lock); 9074 for (targ_lun_id = 0, num_filled = 0; 9075 targ_lun_id < num_port_luns && num_filled < num_luns; 9076 targ_lun_id++) { 9077 lun_id = ctl_lun_map_from_port(port, targ_lun_id); 9078 if (lun_id == UINT32_MAX) 9079 continue; 9080 lun = softc->ctl_luns[lun_id]; 9081 if (lun == NULL) 9082 continue; 9083 9084 be64enc(lun_data->luns[num_filled++].lundata, 9085 ctl_encode_lun(targ_lun_id)); 9086 9087 /* 9088 * According to SPC-3, rev 14 section 6.21: 9089 * 9090 * "The execution of a REPORT LUNS command to any valid and 9091 * installed logical unit shall clear the REPORTED LUNS DATA 9092 * HAS CHANGED unit attention condition for all logical 9093 * units of that target with respect to the requesting 9094 * initiator. A valid and installed logical unit is one 9095 * having a PERIPHERAL QUALIFIER of 000b in the standard 9096 * INQUIRY data (see 6.4.2)." 9097 * 9098 * If request_lun is NULL, the LUN this report luns command 9099 * was issued to is either disabled or doesn't exist. In that 9100 * case, we shouldn't clear any pending lun change unit 9101 * attention. 9102 */ 9103 if (request_lun != NULL) { 9104 mtx_lock(&lun->lun_lock); 9105 ctl_clr_ua(lun, initidx, CTL_UA_LUN_CHANGE); 9106 mtx_unlock(&lun->lun_lock); 9107 } 9108 } 9109 mtx_unlock(&softc->ctl_lock); 9110 9111 /* 9112 * It's quite possible that we've returned fewer LUNs than we allocated 9113 * space for. Trim it. 9114 */ 9115 lun_datalen = sizeof(*lun_data) + 9116 (num_filled * sizeof(struct scsi_report_luns_lundata)); 9117 ctsio->kern_rel_offset = 0; 9118 ctsio->kern_sg_entries = 0; 9119 ctsio->kern_data_len = min(lun_datalen, alloc_len); 9120 ctsio->kern_total_len = ctsio->kern_data_len; 9121 9122 /* 9123 * We set this to the actual data length, regardless of how much 9124 * space we actually have to return results. If the user looks at 9125 * this value, he'll know whether or not he allocated enough space 9126 * and reissue the command if necessary. We don't support well 9127 * known logical units, so if the user asks for that, return none. 9128 */ 9129 scsi_ulto4b(lun_datalen - 8, lun_data->length); 9130 9131 /* 9132 * We can only return SCSI_STATUS_CHECK_COND when we can't satisfy 9133 * this request. 9134 */ 9135 ctl_set_success(ctsio); 9136 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9137 ctsio->be_move_done = ctl_config_move_done; 9138 ctl_datamove((union ctl_io *)ctsio); 9139 return (retval); 9140 } 9141 9142 int 9143 ctl_request_sense(struct ctl_scsiio *ctsio) 9144 { 9145 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9146 struct ctl_lun *lun = CTL_LUN(ctsio); 9147 struct scsi_request_sense *cdb; 9148 struct scsi_sense_data *sense_ptr; 9149 uint32_t initidx; 9150 int have_error; 9151 u_int sense_len = SSD_FULL_SIZE; 9152 scsi_sense_data_type sense_format; 9153 ctl_ua_type ua_type; 9154 uint8_t asc = 0, ascq = 0; 9155 9156 cdb = (struct scsi_request_sense *)ctsio->cdb; 9157 9158 CTL_DEBUG_PRINT(("ctl_request_sense\n")); 9159 9160 /* 9161 * Determine which sense format the user wants. 9162 */ 9163 if (cdb->byte2 & SRS_DESC) 9164 sense_format = SSD_TYPE_DESC; 9165 else 9166 sense_format = SSD_TYPE_FIXED; 9167 9168 ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK); 9169 sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr; 9170 ctsio->kern_sg_entries = 0; 9171 ctsio->kern_rel_offset = 0; 9172 9173 /* 9174 * struct scsi_sense_data, which is currently set to 256 bytes, is 9175 * larger than the largest allowed value for the length field in the 9176 * REQUEST SENSE CDB, which is 252 bytes as of SPC-4. 9177 */ 9178 ctsio->kern_data_len = cdb->length; 9179 ctsio->kern_total_len = cdb->length; 9180 9181 /* 9182 * If we don't have a LUN, we don't have any pending sense. 9183 */ 9184 if (lun == NULL || 9185 ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 9186 softc->ha_link < CTL_HA_LINK_UNKNOWN)) { 9187 /* "Logical unit not supported" */ 9188 ctl_set_sense_data(sense_ptr, &sense_len, NULL, sense_format, 9189 /*current_error*/ 1, 9190 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 9191 /*asc*/ 0x25, 9192 /*ascq*/ 0x00, 9193 SSD_ELEM_NONE); 9194 goto send; 9195 } 9196 9197 have_error = 0; 9198 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9199 /* 9200 * Check for pending sense, and then for pending unit attentions. 9201 * Pending sense gets returned first, then pending unit attentions. 9202 */ 9203 mtx_lock(&lun->lun_lock); 9204 #ifdef CTL_WITH_CA 9205 if (ctl_is_set(lun->have_ca, initidx)) { 9206 scsi_sense_data_type stored_format; 9207 9208 /* 9209 * Check to see which sense format was used for the stored 9210 * sense data. 9211 */ 9212 stored_format = scsi_sense_type(&lun->pending_sense[initidx]); 9213 9214 /* 9215 * If the user requested a different sense format than the 9216 * one we stored, then we need to convert it to the other 9217 * format. If we're going from descriptor to fixed format 9218 * sense data, we may lose things in translation, depending 9219 * on what options were used. 9220 * 9221 * If the stored format is SSD_TYPE_NONE (i.e. invalid), 9222 * for some reason we'll just copy it out as-is. 9223 */ 9224 if ((stored_format == SSD_TYPE_FIXED) 9225 && (sense_format == SSD_TYPE_DESC)) 9226 ctl_sense_to_desc((struct scsi_sense_data_fixed *) 9227 &lun->pending_sense[initidx], 9228 (struct scsi_sense_data_desc *)sense_ptr); 9229 else if ((stored_format == SSD_TYPE_DESC) 9230 && (sense_format == SSD_TYPE_FIXED)) 9231 ctl_sense_to_fixed((struct scsi_sense_data_desc *) 9232 &lun->pending_sense[initidx], 9233 (struct scsi_sense_data_fixed *)sense_ptr); 9234 else 9235 memcpy(sense_ptr, &lun->pending_sense[initidx], 9236 MIN(sizeof(*sense_ptr), 9237 sizeof(lun->pending_sense[initidx]))); 9238 9239 ctl_clear_mask(lun->have_ca, initidx); 9240 have_error = 1; 9241 } else 9242 #endif 9243 if (have_error == 0) { 9244 ua_type = ctl_build_ua(lun, initidx, sense_ptr, &sense_len, 9245 sense_format); 9246 if (ua_type != CTL_UA_NONE) 9247 have_error = 1; 9248 } 9249 if (have_error == 0) { 9250 /* 9251 * Report informational exception if have one and allowed. 9252 */ 9253 if (lun->MODE_IE.mrie != SIEP_MRIE_NO) { 9254 asc = lun->ie_asc; 9255 ascq = lun->ie_ascq; 9256 } 9257 ctl_set_sense_data(sense_ptr, &sense_len, lun, sense_format, 9258 /*current_error*/ 1, 9259 /*sense_key*/ SSD_KEY_NO_SENSE, 9260 /*asc*/ asc, 9261 /*ascq*/ ascq, 9262 SSD_ELEM_NONE); 9263 } 9264 mtx_unlock(&lun->lun_lock); 9265 9266 send: 9267 /* 9268 * We report the SCSI status as OK, since the status of the command 9269 * itself is OK. We're reporting sense as parameter data. 9270 */ 9271 ctl_set_success(ctsio); 9272 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9273 ctsio->be_move_done = ctl_config_move_done; 9274 ctl_datamove((union ctl_io *)ctsio); 9275 return (CTL_RETVAL_COMPLETE); 9276 } 9277 9278 int 9279 ctl_tur(struct ctl_scsiio *ctsio) 9280 { 9281 9282 CTL_DEBUG_PRINT(("ctl_tur\n")); 9283 9284 ctl_set_success(ctsio); 9285 ctl_done((union ctl_io *)ctsio); 9286 9287 return (CTL_RETVAL_COMPLETE); 9288 } 9289 9290 /* 9291 * SCSI VPD page 0x00, the Supported VPD Pages page. 9292 */ 9293 static int 9294 ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len) 9295 { 9296 struct ctl_lun *lun = CTL_LUN(ctsio); 9297 struct scsi_vpd_supported_pages *pages; 9298 int sup_page_size; 9299 int p; 9300 9301 sup_page_size = sizeof(struct scsi_vpd_supported_pages) * 9302 SCSI_EVPD_NUM_SUPPORTED_PAGES; 9303 ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO); 9304 pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr; 9305 ctsio->kern_rel_offset = 0; 9306 ctsio->kern_sg_entries = 0; 9307 ctsio->kern_data_len = min(sup_page_size, alloc_len); 9308 ctsio->kern_total_len = ctsio->kern_data_len; 9309 9310 /* 9311 * The control device is always connected. The disk device, on the 9312 * other hand, may not be online all the time. Need to change this 9313 * to figure out whether the disk device is actually online or not. 9314 */ 9315 if (lun != NULL) 9316 pages->device = (SID_QUAL_LU_CONNECTED << 5) | 9317 lun->be_lun->lun_type; 9318 else 9319 pages->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9320 9321 p = 0; 9322 /* Supported VPD pages */ 9323 pages->page_list[p++] = SVPD_SUPPORTED_PAGES; 9324 /* Serial Number */ 9325 pages->page_list[p++] = SVPD_UNIT_SERIAL_NUMBER; 9326 /* Device Identification */ 9327 pages->page_list[p++] = SVPD_DEVICE_ID; 9328 /* Extended INQUIRY Data */ 9329 pages->page_list[p++] = SVPD_EXTENDED_INQUIRY_DATA; 9330 /* Mode Page Policy */ 9331 pages->page_list[p++] = SVPD_MODE_PAGE_POLICY; 9332 /* SCSI Ports */ 9333 pages->page_list[p++] = SVPD_SCSI_PORTS; 9334 /* Third-party Copy */ 9335 pages->page_list[p++] = SVPD_SCSI_TPC; 9336 if (lun != NULL && lun->be_lun->lun_type == T_DIRECT) { 9337 /* Block limits */ 9338 pages->page_list[p++] = SVPD_BLOCK_LIMITS; 9339 /* Block Device Characteristics */ 9340 pages->page_list[p++] = SVPD_BDC; 9341 /* Logical Block Provisioning */ 9342 pages->page_list[p++] = SVPD_LBP; 9343 } 9344 pages->length = p; 9345 9346 ctl_set_success(ctsio); 9347 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9348 ctsio->be_move_done = ctl_config_move_done; 9349 ctl_datamove((union ctl_io *)ctsio); 9350 return (CTL_RETVAL_COMPLETE); 9351 } 9352 9353 /* 9354 * SCSI VPD page 0x80, the Unit Serial Number page. 9355 */ 9356 static int 9357 ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len) 9358 { 9359 struct ctl_lun *lun = CTL_LUN(ctsio); 9360 struct scsi_vpd_unit_serial_number *sn_ptr; 9361 int data_len; 9362 9363 data_len = 4 + CTL_SN_LEN; 9364 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9365 sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr; 9366 ctsio->kern_rel_offset = 0; 9367 ctsio->kern_sg_entries = 0; 9368 ctsio->kern_data_len = min(data_len, alloc_len); 9369 ctsio->kern_total_len = ctsio->kern_data_len; 9370 9371 /* 9372 * The control device is always connected. The disk device, on the 9373 * other hand, may not be online all the time. Need to change this 9374 * to figure out whether the disk device is actually online or not. 9375 */ 9376 if (lun != NULL) 9377 sn_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9378 lun->be_lun->lun_type; 9379 else 9380 sn_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9381 9382 sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER; 9383 sn_ptr->length = CTL_SN_LEN; 9384 /* 9385 * If we don't have a LUN, we just leave the serial number as 9386 * all spaces. 9387 */ 9388 if (lun != NULL) { 9389 strncpy((char *)sn_ptr->serial_num, 9390 (char *)lun->be_lun->serial_num, CTL_SN_LEN); 9391 } else 9392 memset(sn_ptr->serial_num, 0x20, CTL_SN_LEN); 9393 9394 ctl_set_success(ctsio); 9395 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9396 ctsio->be_move_done = ctl_config_move_done; 9397 ctl_datamove((union ctl_io *)ctsio); 9398 return (CTL_RETVAL_COMPLETE); 9399 } 9400 9401 9402 /* 9403 * SCSI VPD page 0x86, the Extended INQUIRY Data page. 9404 */ 9405 static int 9406 ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len) 9407 { 9408 struct ctl_lun *lun = CTL_LUN(ctsio); 9409 struct scsi_vpd_extended_inquiry_data *eid_ptr; 9410 int data_len; 9411 9412 data_len = sizeof(struct scsi_vpd_extended_inquiry_data); 9413 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9414 eid_ptr = (struct scsi_vpd_extended_inquiry_data *)ctsio->kern_data_ptr; 9415 ctsio->kern_sg_entries = 0; 9416 ctsio->kern_rel_offset = 0; 9417 ctsio->kern_data_len = min(data_len, alloc_len); 9418 ctsio->kern_total_len = ctsio->kern_data_len; 9419 9420 /* 9421 * The control device is always connected. The disk device, on the 9422 * other hand, may not be online all the time. 9423 */ 9424 if (lun != NULL) 9425 eid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9426 lun->be_lun->lun_type; 9427 else 9428 eid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9429 eid_ptr->page_code = SVPD_EXTENDED_INQUIRY_DATA; 9430 scsi_ulto2b(data_len - 4, eid_ptr->page_length); 9431 /* 9432 * We support head of queue, ordered and simple tags. 9433 */ 9434 eid_ptr->flags2 = SVPD_EID_HEADSUP | SVPD_EID_ORDSUP | SVPD_EID_SIMPSUP; 9435 /* 9436 * Volatile cache supported. 9437 */ 9438 eid_ptr->flags3 = SVPD_EID_V_SUP; 9439 9440 /* 9441 * This means that we clear the REPORTED LUNS DATA HAS CHANGED unit 9442 * attention for a particular IT nexus on all LUNs once we report 9443 * it to that nexus once. This bit is required as of SPC-4. 9444 */ 9445 eid_ptr->flags4 = SVPD_EID_LUICLR; 9446 9447 /* 9448 * We support revert to defaults (RTD) bit in MODE SELECT. 9449 */ 9450 eid_ptr->flags5 = SVPD_EID_RTD_SUP; 9451 9452 /* 9453 * XXX KDM in order to correctly answer this, we would need 9454 * information from the SIM to determine how much sense data it 9455 * can send. So this would really be a path inquiry field, most 9456 * likely. This can be set to a maximum of 252 according to SPC-4, 9457 * but the hardware may or may not be able to support that much. 9458 * 0 just means that the maximum sense data length is not reported. 9459 */ 9460 eid_ptr->max_sense_length = 0; 9461 9462 ctl_set_success(ctsio); 9463 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9464 ctsio->be_move_done = ctl_config_move_done; 9465 ctl_datamove((union ctl_io *)ctsio); 9466 return (CTL_RETVAL_COMPLETE); 9467 } 9468 9469 static int 9470 ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len) 9471 { 9472 struct ctl_lun *lun = CTL_LUN(ctsio); 9473 struct scsi_vpd_mode_page_policy *mpp_ptr; 9474 int data_len; 9475 9476 data_len = sizeof(struct scsi_vpd_mode_page_policy) + 9477 sizeof(struct scsi_vpd_mode_page_policy_descr); 9478 9479 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9480 mpp_ptr = (struct scsi_vpd_mode_page_policy *)ctsio->kern_data_ptr; 9481 ctsio->kern_rel_offset = 0; 9482 ctsio->kern_sg_entries = 0; 9483 ctsio->kern_data_len = min(data_len, alloc_len); 9484 ctsio->kern_total_len = ctsio->kern_data_len; 9485 9486 /* 9487 * The control device is always connected. The disk device, on the 9488 * other hand, may not be online all the time. 9489 */ 9490 if (lun != NULL) 9491 mpp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9492 lun->be_lun->lun_type; 9493 else 9494 mpp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9495 mpp_ptr->page_code = SVPD_MODE_PAGE_POLICY; 9496 scsi_ulto2b(data_len - 4, mpp_ptr->page_length); 9497 mpp_ptr->descr[0].page_code = 0x3f; 9498 mpp_ptr->descr[0].subpage_code = 0xff; 9499 mpp_ptr->descr[0].policy = SVPD_MPP_SHARED; 9500 9501 ctl_set_success(ctsio); 9502 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9503 ctsio->be_move_done = ctl_config_move_done; 9504 ctl_datamove((union ctl_io *)ctsio); 9505 return (CTL_RETVAL_COMPLETE); 9506 } 9507 9508 /* 9509 * SCSI VPD page 0x83, the Device Identification page. 9510 */ 9511 static int 9512 ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len) 9513 { 9514 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9515 struct ctl_port *port = CTL_PORT(ctsio); 9516 struct ctl_lun *lun = CTL_LUN(ctsio); 9517 struct scsi_vpd_device_id *devid_ptr; 9518 struct scsi_vpd_id_descriptor *desc; 9519 int data_len, g; 9520 uint8_t proto; 9521 9522 data_len = sizeof(struct scsi_vpd_device_id) + 9523 sizeof(struct scsi_vpd_id_descriptor) + 9524 sizeof(struct scsi_vpd_id_rel_trgt_port_id) + 9525 sizeof(struct scsi_vpd_id_descriptor) + 9526 sizeof(struct scsi_vpd_id_trgt_port_grp_id); 9527 if (lun && lun->lun_devid) 9528 data_len += lun->lun_devid->len; 9529 if (port && port->port_devid) 9530 data_len += port->port_devid->len; 9531 if (port && port->target_devid) 9532 data_len += port->target_devid->len; 9533 9534 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9535 devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr; 9536 ctsio->kern_sg_entries = 0; 9537 ctsio->kern_rel_offset = 0; 9538 ctsio->kern_sg_entries = 0; 9539 ctsio->kern_data_len = min(data_len, alloc_len); 9540 ctsio->kern_total_len = ctsio->kern_data_len; 9541 9542 /* 9543 * The control device is always connected. The disk device, on the 9544 * other hand, may not be online all the time. 9545 */ 9546 if (lun != NULL) 9547 devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9548 lun->be_lun->lun_type; 9549 else 9550 devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9551 devid_ptr->page_code = SVPD_DEVICE_ID; 9552 scsi_ulto2b(data_len - 4, devid_ptr->length); 9553 9554 if (port && port->port_type == CTL_PORT_FC) 9555 proto = SCSI_PROTO_FC << 4; 9556 else if (port && port->port_type == CTL_PORT_ISCSI) 9557 proto = SCSI_PROTO_ISCSI << 4; 9558 else 9559 proto = SCSI_PROTO_SPI << 4; 9560 desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list; 9561 9562 /* 9563 * We're using a LUN association here. i.e., this device ID is a 9564 * per-LUN identifier. 9565 */ 9566 if (lun && lun->lun_devid) { 9567 memcpy(desc, lun->lun_devid->data, lun->lun_devid->len); 9568 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9569 lun->lun_devid->len); 9570 } 9571 9572 /* 9573 * This is for the WWPN which is a port association. 9574 */ 9575 if (port && port->port_devid) { 9576 memcpy(desc, port->port_devid->data, port->port_devid->len); 9577 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9578 port->port_devid->len); 9579 } 9580 9581 /* 9582 * This is for the Relative Target Port(type 4h) identifier 9583 */ 9584 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9585 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9586 SVPD_ID_TYPE_RELTARG; 9587 desc->length = 4; 9588 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port, &desc->identifier[2]); 9589 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9590 sizeof(struct scsi_vpd_id_rel_trgt_port_id)); 9591 9592 /* 9593 * This is for the Target Port Group(type 5h) identifier 9594 */ 9595 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9596 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9597 SVPD_ID_TYPE_TPORTGRP; 9598 desc->length = 4; 9599 if (softc->is_single || 9600 (port && port->status & CTL_PORT_STATUS_HA_SHARED)) 9601 g = 1; 9602 else 9603 g = 2 + ctsio->io_hdr.nexus.targ_port / softc->port_cnt; 9604 scsi_ulto2b(g, &desc->identifier[2]); 9605 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9606 sizeof(struct scsi_vpd_id_trgt_port_grp_id)); 9607 9608 /* 9609 * This is for the Target identifier 9610 */ 9611 if (port && port->target_devid) { 9612 memcpy(desc, port->target_devid->data, port->target_devid->len); 9613 } 9614 9615 ctl_set_success(ctsio); 9616 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9617 ctsio->be_move_done = ctl_config_move_done; 9618 ctl_datamove((union ctl_io *)ctsio); 9619 return (CTL_RETVAL_COMPLETE); 9620 } 9621 9622 static int 9623 ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len) 9624 { 9625 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9626 struct ctl_lun *lun = CTL_LUN(ctsio); 9627 struct scsi_vpd_scsi_ports *sp; 9628 struct scsi_vpd_port_designation *pd; 9629 struct scsi_vpd_port_designation_cont *pdc; 9630 struct ctl_port *port; 9631 int data_len, num_target_ports, iid_len, id_len; 9632 9633 num_target_ports = 0; 9634 iid_len = 0; 9635 id_len = 0; 9636 mtx_lock(&softc->ctl_lock); 9637 STAILQ_FOREACH(port, &softc->port_list, links) { 9638 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 9639 continue; 9640 if (lun != NULL && 9641 ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 9642 continue; 9643 num_target_ports++; 9644 if (port->init_devid) 9645 iid_len += port->init_devid->len; 9646 if (port->port_devid) 9647 id_len += port->port_devid->len; 9648 } 9649 mtx_unlock(&softc->ctl_lock); 9650 9651 data_len = sizeof(struct scsi_vpd_scsi_ports) + 9652 num_target_ports * (sizeof(struct scsi_vpd_port_designation) + 9653 sizeof(struct scsi_vpd_port_designation_cont)) + iid_len + id_len; 9654 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9655 sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr; 9656 ctsio->kern_sg_entries = 0; 9657 ctsio->kern_rel_offset = 0; 9658 ctsio->kern_sg_entries = 0; 9659 ctsio->kern_data_len = min(data_len, alloc_len); 9660 ctsio->kern_total_len = ctsio->kern_data_len; 9661 9662 /* 9663 * The control device is always connected. The disk device, on the 9664 * other hand, may not be online all the time. Need to change this 9665 * to figure out whether the disk device is actually online or not. 9666 */ 9667 if (lun != NULL) 9668 sp->device = (SID_QUAL_LU_CONNECTED << 5) | 9669 lun->be_lun->lun_type; 9670 else 9671 sp->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9672 9673 sp->page_code = SVPD_SCSI_PORTS; 9674 scsi_ulto2b(data_len - sizeof(struct scsi_vpd_scsi_ports), 9675 sp->page_length); 9676 pd = &sp->design[0]; 9677 9678 mtx_lock(&softc->ctl_lock); 9679 STAILQ_FOREACH(port, &softc->port_list, links) { 9680 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 9681 continue; 9682 if (lun != NULL && 9683 ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 9684 continue; 9685 scsi_ulto2b(port->targ_port, pd->relative_port_id); 9686 if (port->init_devid) { 9687 iid_len = port->init_devid->len; 9688 memcpy(pd->initiator_transportid, 9689 port->init_devid->data, port->init_devid->len); 9690 } else 9691 iid_len = 0; 9692 scsi_ulto2b(iid_len, pd->initiator_transportid_length); 9693 pdc = (struct scsi_vpd_port_designation_cont *) 9694 (&pd->initiator_transportid[iid_len]); 9695 if (port->port_devid) { 9696 id_len = port->port_devid->len; 9697 memcpy(pdc->target_port_descriptors, 9698 port->port_devid->data, port->port_devid->len); 9699 } else 9700 id_len = 0; 9701 scsi_ulto2b(id_len, pdc->target_port_descriptors_length); 9702 pd = (struct scsi_vpd_port_designation *) 9703 ((uint8_t *)pdc->target_port_descriptors + id_len); 9704 } 9705 mtx_unlock(&softc->ctl_lock); 9706 9707 ctl_set_success(ctsio); 9708 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9709 ctsio->be_move_done = ctl_config_move_done; 9710 ctl_datamove((union ctl_io *)ctsio); 9711 return (CTL_RETVAL_COMPLETE); 9712 } 9713 9714 static int 9715 ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len) 9716 { 9717 struct ctl_lun *lun = CTL_LUN(ctsio); 9718 struct scsi_vpd_block_limits *bl_ptr; 9719 uint64_t ival; 9720 9721 ctsio->kern_data_ptr = malloc(sizeof(*bl_ptr), M_CTL, M_WAITOK | M_ZERO); 9722 bl_ptr = (struct scsi_vpd_block_limits *)ctsio->kern_data_ptr; 9723 ctsio->kern_sg_entries = 0; 9724 ctsio->kern_rel_offset = 0; 9725 ctsio->kern_sg_entries = 0; 9726 ctsio->kern_data_len = min(sizeof(*bl_ptr), alloc_len); 9727 ctsio->kern_total_len = ctsio->kern_data_len; 9728 9729 /* 9730 * The control device is always connected. The disk device, on the 9731 * other hand, may not be online all the time. Need to change this 9732 * to figure out whether the disk device is actually online or not. 9733 */ 9734 if (lun != NULL) 9735 bl_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9736 lun->be_lun->lun_type; 9737 else 9738 bl_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9739 9740 bl_ptr->page_code = SVPD_BLOCK_LIMITS; 9741 scsi_ulto2b(sizeof(*bl_ptr) - 4, bl_ptr->page_length); 9742 bl_ptr->max_cmp_write_len = 0xff; 9743 scsi_ulto4b(0xffffffff, bl_ptr->max_txfer_len); 9744 if (lun != NULL) { 9745 scsi_ulto4b(lun->be_lun->opttxferlen, bl_ptr->opt_txfer_len); 9746 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 9747 ival = 0xffffffff; 9748 ctl_get_opt_number(&lun->be_lun->options, 9749 "unmap_max_lba", &ival); 9750 scsi_ulto4b(ival, bl_ptr->max_unmap_lba_cnt); 9751 ival = 0xffffffff; 9752 ctl_get_opt_number(&lun->be_lun->options, 9753 "unmap_max_descr", &ival); 9754 scsi_ulto4b(ival, bl_ptr->max_unmap_blk_cnt); 9755 if (lun->be_lun->ublockexp != 0) { 9756 scsi_ulto4b((1 << lun->be_lun->ublockexp), 9757 bl_ptr->opt_unmap_grain); 9758 scsi_ulto4b(0x80000000 | lun->be_lun->ublockoff, 9759 bl_ptr->unmap_grain_align); 9760 } 9761 } 9762 scsi_ulto4b(lun->be_lun->atomicblock, 9763 bl_ptr->max_atomic_transfer_length); 9764 scsi_ulto4b(0, bl_ptr->atomic_alignment); 9765 scsi_ulto4b(0, bl_ptr->atomic_transfer_length_granularity); 9766 scsi_ulto4b(0, bl_ptr->max_atomic_transfer_length_with_atomic_boundary); 9767 scsi_ulto4b(0, bl_ptr->max_atomic_boundary_size); 9768 ival = UINT64_MAX; 9769 ctl_get_opt_number(&lun->be_lun->options, "write_same_max_lba", &ival); 9770 scsi_u64to8b(ival, bl_ptr->max_write_same_length); 9771 } 9772 9773 ctl_set_success(ctsio); 9774 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9775 ctsio->be_move_done = ctl_config_move_done; 9776 ctl_datamove((union ctl_io *)ctsio); 9777 return (CTL_RETVAL_COMPLETE); 9778 } 9779 9780 static int 9781 ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len) 9782 { 9783 struct ctl_lun *lun = CTL_LUN(ctsio); 9784 struct scsi_vpd_block_device_characteristics *bdc_ptr; 9785 const char *value; 9786 u_int i; 9787 9788 ctsio->kern_data_ptr = malloc(sizeof(*bdc_ptr), M_CTL, M_WAITOK | M_ZERO); 9789 bdc_ptr = (struct scsi_vpd_block_device_characteristics *)ctsio->kern_data_ptr; 9790 ctsio->kern_sg_entries = 0; 9791 ctsio->kern_rel_offset = 0; 9792 ctsio->kern_data_len = min(sizeof(*bdc_ptr), alloc_len); 9793 ctsio->kern_total_len = ctsio->kern_data_len; 9794 9795 /* 9796 * The control device is always connected. The disk device, on the 9797 * other hand, may not be online all the time. Need to change this 9798 * to figure out whether the disk device is actually online or not. 9799 */ 9800 if (lun != NULL) 9801 bdc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9802 lun->be_lun->lun_type; 9803 else 9804 bdc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9805 bdc_ptr->page_code = SVPD_BDC; 9806 scsi_ulto2b(sizeof(*bdc_ptr) - 4, bdc_ptr->page_length); 9807 if (lun != NULL && 9808 (value = ctl_get_opt(&lun->be_lun->options, "rpm")) != NULL) 9809 i = strtol(value, NULL, 0); 9810 else 9811 i = CTL_DEFAULT_ROTATION_RATE; 9812 scsi_ulto2b(i, bdc_ptr->medium_rotation_rate); 9813 if (lun != NULL && 9814 (value = ctl_get_opt(&lun->be_lun->options, "formfactor")) != NULL) 9815 i = strtol(value, NULL, 0); 9816 else 9817 i = 0; 9818 bdc_ptr->wab_wac_ff = (i & 0x0f); 9819 bdc_ptr->flags = SVPD_FUAB | SVPD_VBULS; 9820 9821 ctl_set_success(ctsio); 9822 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9823 ctsio->be_move_done = ctl_config_move_done; 9824 ctl_datamove((union ctl_io *)ctsio); 9825 return (CTL_RETVAL_COMPLETE); 9826 } 9827 9828 static int 9829 ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len) 9830 { 9831 struct ctl_lun *lun = CTL_LUN(ctsio); 9832 struct scsi_vpd_logical_block_prov *lbp_ptr; 9833 const char *value; 9834 9835 ctsio->kern_data_ptr = malloc(sizeof(*lbp_ptr), M_CTL, M_WAITOK | M_ZERO); 9836 lbp_ptr = (struct scsi_vpd_logical_block_prov *)ctsio->kern_data_ptr; 9837 ctsio->kern_sg_entries = 0; 9838 ctsio->kern_rel_offset = 0; 9839 ctsio->kern_data_len = min(sizeof(*lbp_ptr), alloc_len); 9840 ctsio->kern_total_len = ctsio->kern_data_len; 9841 9842 /* 9843 * The control device is always connected. The disk device, on the 9844 * other hand, may not be online all the time. Need to change this 9845 * to figure out whether the disk device is actually online or not. 9846 */ 9847 if (lun != NULL) 9848 lbp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9849 lun->be_lun->lun_type; 9850 else 9851 lbp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9852 9853 lbp_ptr->page_code = SVPD_LBP; 9854 scsi_ulto2b(sizeof(*lbp_ptr) - 4, lbp_ptr->page_length); 9855 lbp_ptr->threshold_exponent = CTL_LBP_EXPONENT; 9856 if (lun != NULL && lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 9857 lbp_ptr->flags = SVPD_LBP_UNMAP | SVPD_LBP_WS16 | 9858 SVPD_LBP_WS10 | SVPD_LBP_RZ | SVPD_LBP_ANC_SUP; 9859 value = ctl_get_opt(&lun->be_lun->options, "provisioning_type"); 9860 if (value != NULL) { 9861 if (strcmp(value, "resource") == 0) 9862 lbp_ptr->prov_type = SVPD_LBP_RESOURCE; 9863 else if (strcmp(value, "thin") == 0) 9864 lbp_ptr->prov_type = SVPD_LBP_THIN; 9865 } else 9866 lbp_ptr->prov_type = SVPD_LBP_THIN; 9867 } 9868 9869 ctl_set_success(ctsio); 9870 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9871 ctsio->be_move_done = ctl_config_move_done; 9872 ctl_datamove((union ctl_io *)ctsio); 9873 return (CTL_RETVAL_COMPLETE); 9874 } 9875 9876 /* 9877 * INQUIRY with the EVPD bit set. 9878 */ 9879 static int 9880 ctl_inquiry_evpd(struct ctl_scsiio *ctsio) 9881 { 9882 struct ctl_lun *lun = CTL_LUN(ctsio); 9883 struct scsi_inquiry *cdb; 9884 int alloc_len, retval; 9885 9886 cdb = (struct scsi_inquiry *)ctsio->cdb; 9887 alloc_len = scsi_2btoul(cdb->length); 9888 9889 switch (cdb->page_code) { 9890 case SVPD_SUPPORTED_PAGES: 9891 retval = ctl_inquiry_evpd_supported(ctsio, alloc_len); 9892 break; 9893 case SVPD_UNIT_SERIAL_NUMBER: 9894 retval = ctl_inquiry_evpd_serial(ctsio, alloc_len); 9895 break; 9896 case SVPD_DEVICE_ID: 9897 retval = ctl_inquiry_evpd_devid(ctsio, alloc_len); 9898 break; 9899 case SVPD_EXTENDED_INQUIRY_DATA: 9900 retval = ctl_inquiry_evpd_eid(ctsio, alloc_len); 9901 break; 9902 case SVPD_MODE_PAGE_POLICY: 9903 retval = ctl_inquiry_evpd_mpp(ctsio, alloc_len); 9904 break; 9905 case SVPD_SCSI_PORTS: 9906 retval = ctl_inquiry_evpd_scsi_ports(ctsio, alloc_len); 9907 break; 9908 case SVPD_SCSI_TPC: 9909 retval = ctl_inquiry_evpd_tpc(ctsio, alloc_len); 9910 break; 9911 case SVPD_BLOCK_LIMITS: 9912 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 9913 goto err; 9914 retval = ctl_inquiry_evpd_block_limits(ctsio, alloc_len); 9915 break; 9916 case SVPD_BDC: 9917 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 9918 goto err; 9919 retval = ctl_inquiry_evpd_bdc(ctsio, alloc_len); 9920 break; 9921 case SVPD_LBP: 9922 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 9923 goto err; 9924 retval = ctl_inquiry_evpd_lbp(ctsio, alloc_len); 9925 break; 9926 default: 9927 err: 9928 ctl_set_invalid_field(ctsio, 9929 /*sks_valid*/ 1, 9930 /*command*/ 1, 9931 /*field*/ 2, 9932 /*bit_valid*/ 0, 9933 /*bit*/ 0); 9934 ctl_done((union ctl_io *)ctsio); 9935 retval = CTL_RETVAL_COMPLETE; 9936 break; 9937 } 9938 9939 return (retval); 9940 } 9941 9942 /* 9943 * Standard INQUIRY data. 9944 */ 9945 static int 9946 ctl_inquiry_std(struct ctl_scsiio *ctsio) 9947 { 9948 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9949 struct ctl_port *port = CTL_PORT(ctsio); 9950 struct ctl_lun *lun = CTL_LUN(ctsio); 9951 struct scsi_inquiry_data *inq_ptr; 9952 struct scsi_inquiry *cdb; 9953 char *val; 9954 uint32_t alloc_len, data_len; 9955 ctl_port_type port_type; 9956 9957 port_type = port->port_type; 9958 if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL) 9959 port_type = CTL_PORT_SCSI; 9960 9961 cdb = (struct scsi_inquiry *)ctsio->cdb; 9962 alloc_len = scsi_2btoul(cdb->length); 9963 9964 /* 9965 * We malloc the full inquiry data size here and fill it 9966 * in. If the user only asks for less, we'll give him 9967 * that much. 9968 */ 9969 data_len = offsetof(struct scsi_inquiry_data, vendor_specific1); 9970 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9971 inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr; 9972 ctsio->kern_sg_entries = 0; 9973 ctsio->kern_rel_offset = 0; 9974 ctsio->kern_data_len = min(data_len, alloc_len); 9975 ctsio->kern_total_len = ctsio->kern_data_len; 9976 9977 if (lun != NULL) { 9978 if ((lun->flags & CTL_LUN_PRIMARY_SC) || 9979 softc->ha_link >= CTL_HA_LINK_UNKNOWN) { 9980 inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9981 lun->be_lun->lun_type; 9982 } else { 9983 inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | 9984 lun->be_lun->lun_type; 9985 } 9986 if (lun->flags & CTL_LUN_REMOVABLE) 9987 inq_ptr->dev_qual2 |= SID_RMB; 9988 } else 9989 inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE; 9990 9991 /* RMB in byte 2 is 0 */ 9992 inq_ptr->version = SCSI_REV_SPC5; 9993 9994 /* 9995 * According to SAM-3, even if a device only supports a single 9996 * level of LUN addressing, it should still set the HISUP bit: 9997 * 9998 * 4.9.1 Logical unit numbers overview 9999 * 10000 * All logical unit number formats described in this standard are 10001 * hierarchical in structure even when only a single level in that 10002 * hierarchy is used. The HISUP bit shall be set to one in the 10003 * standard INQUIRY data (see SPC-2) when any logical unit number 10004 * format described in this standard is used. Non-hierarchical 10005 * formats are outside the scope of this standard. 10006 * 10007 * Therefore we set the HiSup bit here. 10008 * 10009 * The response format is 2, per SPC-3. 10010 */ 10011 inq_ptr->response_format = SID_HiSup | 2; 10012 10013 inq_ptr->additional_length = data_len - 10014 (offsetof(struct scsi_inquiry_data, additional_length) + 1); 10015 CTL_DEBUG_PRINT(("additional_length = %d\n", 10016 inq_ptr->additional_length)); 10017 10018 inq_ptr->spc3_flags = SPC3_SID_3PC | SPC3_SID_TPGS_IMPLICIT; 10019 if (port_type == CTL_PORT_SCSI) 10020 inq_ptr->spc2_flags = SPC2_SID_ADDR16; 10021 inq_ptr->spc2_flags |= SPC2_SID_MultiP; 10022 inq_ptr->flags = SID_CmdQue; 10023 if (port_type == CTL_PORT_SCSI) 10024 inq_ptr->flags |= SID_WBus16 | SID_Sync; 10025 10026 /* 10027 * Per SPC-3, unused bytes in ASCII strings are filled with spaces. 10028 * We have 8 bytes for the vendor name, and 16 bytes for the device 10029 * name and 4 bytes for the revision. 10030 */ 10031 if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options, 10032 "vendor")) == NULL) { 10033 strncpy(inq_ptr->vendor, CTL_VENDOR, sizeof(inq_ptr->vendor)); 10034 } else { 10035 memset(inq_ptr->vendor, ' ', sizeof(inq_ptr->vendor)); 10036 strncpy(inq_ptr->vendor, val, 10037 min(sizeof(inq_ptr->vendor), strlen(val))); 10038 } 10039 if (lun == NULL) { 10040 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10041 sizeof(inq_ptr->product)); 10042 } else if ((val = ctl_get_opt(&lun->be_lun->options, "product")) == NULL) { 10043 switch (lun->be_lun->lun_type) { 10044 case T_DIRECT: 10045 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10046 sizeof(inq_ptr->product)); 10047 break; 10048 case T_PROCESSOR: 10049 strncpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT, 10050 sizeof(inq_ptr->product)); 10051 break; 10052 case T_CDROM: 10053 strncpy(inq_ptr->product, CTL_CDROM_PRODUCT, 10054 sizeof(inq_ptr->product)); 10055 break; 10056 default: 10057 strncpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT, 10058 sizeof(inq_ptr->product)); 10059 break; 10060 } 10061 } else { 10062 memset(inq_ptr->product, ' ', sizeof(inq_ptr->product)); 10063 strncpy(inq_ptr->product, val, 10064 min(sizeof(inq_ptr->product), strlen(val))); 10065 } 10066 10067 /* 10068 * XXX make this a macro somewhere so it automatically gets 10069 * incremented when we make changes. 10070 */ 10071 if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options, 10072 "revision")) == NULL) { 10073 strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision)); 10074 } else { 10075 memset(inq_ptr->revision, ' ', sizeof(inq_ptr->revision)); 10076 strncpy(inq_ptr->revision, val, 10077 min(sizeof(inq_ptr->revision), strlen(val))); 10078 } 10079 10080 /* 10081 * For parallel SCSI, we support double transition and single 10082 * transition clocking. We also support QAS (Quick Arbitration 10083 * and Selection) and Information Unit transfers on both the 10084 * control and array devices. 10085 */ 10086 if (port_type == CTL_PORT_SCSI) 10087 inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS | 10088 SID_SPI_IUS; 10089 10090 /* SAM-6 (no version claimed) */ 10091 scsi_ulto2b(0x00C0, inq_ptr->version1); 10092 /* SPC-5 (no version claimed) */ 10093 scsi_ulto2b(0x05C0, inq_ptr->version2); 10094 if (port_type == CTL_PORT_FC) { 10095 /* FCP-2 ANSI INCITS.350:2003 */ 10096 scsi_ulto2b(0x0917, inq_ptr->version3); 10097 } else if (port_type == CTL_PORT_SCSI) { 10098 /* SPI-4 ANSI INCITS.362:200x */ 10099 scsi_ulto2b(0x0B56, inq_ptr->version3); 10100 } else if (port_type == CTL_PORT_ISCSI) { 10101 /* iSCSI (no version claimed) */ 10102 scsi_ulto2b(0x0960, inq_ptr->version3); 10103 } else if (port_type == CTL_PORT_SAS) { 10104 /* SAS (no version claimed) */ 10105 scsi_ulto2b(0x0BE0, inq_ptr->version3); 10106 } else if (port_type == CTL_PORT_UMASS) { 10107 /* USB Mass Storage Class Bulk-Only Transport, Revision 1.0 */ 10108 scsi_ulto2b(0x1730, inq_ptr->version3); 10109 } 10110 10111 if (lun == NULL) { 10112 /* SBC-4 (no version claimed) */ 10113 scsi_ulto2b(0x0600, inq_ptr->version4); 10114 } else { 10115 switch (lun->be_lun->lun_type) { 10116 case T_DIRECT: 10117 /* SBC-4 (no version claimed) */ 10118 scsi_ulto2b(0x0600, inq_ptr->version4); 10119 break; 10120 case T_PROCESSOR: 10121 break; 10122 case T_CDROM: 10123 /* MMC-6 (no version claimed) */ 10124 scsi_ulto2b(0x04E0, inq_ptr->version4); 10125 break; 10126 default: 10127 break; 10128 } 10129 } 10130 10131 ctl_set_success(ctsio); 10132 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10133 ctsio->be_move_done = ctl_config_move_done; 10134 ctl_datamove((union ctl_io *)ctsio); 10135 return (CTL_RETVAL_COMPLETE); 10136 } 10137 10138 int 10139 ctl_inquiry(struct ctl_scsiio *ctsio) 10140 { 10141 struct scsi_inquiry *cdb; 10142 int retval; 10143 10144 CTL_DEBUG_PRINT(("ctl_inquiry\n")); 10145 10146 cdb = (struct scsi_inquiry *)ctsio->cdb; 10147 if (cdb->byte2 & SI_EVPD) 10148 retval = ctl_inquiry_evpd(ctsio); 10149 else if (cdb->page_code == 0) 10150 retval = ctl_inquiry_std(ctsio); 10151 else { 10152 ctl_set_invalid_field(ctsio, 10153 /*sks_valid*/ 1, 10154 /*command*/ 1, 10155 /*field*/ 2, 10156 /*bit_valid*/ 0, 10157 /*bit*/ 0); 10158 ctl_done((union ctl_io *)ctsio); 10159 return (CTL_RETVAL_COMPLETE); 10160 } 10161 10162 return (retval); 10163 } 10164 10165 int 10166 ctl_get_config(struct ctl_scsiio *ctsio) 10167 { 10168 struct ctl_lun *lun = CTL_LUN(ctsio); 10169 struct scsi_get_config_header *hdr; 10170 struct scsi_get_config_feature *feature; 10171 struct scsi_get_config *cdb; 10172 uint32_t alloc_len, data_len; 10173 int rt, starting; 10174 10175 cdb = (struct scsi_get_config *)ctsio->cdb; 10176 rt = (cdb->rt & SGC_RT_MASK); 10177 starting = scsi_2btoul(cdb->starting_feature); 10178 alloc_len = scsi_2btoul(cdb->length); 10179 10180 data_len = sizeof(struct scsi_get_config_header) + 10181 sizeof(struct scsi_get_config_feature) + 8 + 10182 sizeof(struct scsi_get_config_feature) + 8 + 10183 sizeof(struct scsi_get_config_feature) + 4 + 10184 sizeof(struct scsi_get_config_feature) + 4 + 10185 sizeof(struct scsi_get_config_feature) + 8 + 10186 sizeof(struct scsi_get_config_feature) + 10187 sizeof(struct scsi_get_config_feature) + 4 + 10188 sizeof(struct scsi_get_config_feature) + 4 + 10189 sizeof(struct scsi_get_config_feature) + 4 + 10190 sizeof(struct scsi_get_config_feature) + 4 + 10191 sizeof(struct scsi_get_config_feature) + 4 + 10192 sizeof(struct scsi_get_config_feature) + 4; 10193 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10194 ctsio->kern_sg_entries = 0; 10195 ctsio->kern_rel_offset = 0; 10196 10197 hdr = (struct scsi_get_config_header *)ctsio->kern_data_ptr; 10198 if (lun->flags & CTL_LUN_NO_MEDIA) 10199 scsi_ulto2b(0x0000, hdr->current_profile); 10200 else 10201 scsi_ulto2b(0x0010, hdr->current_profile); 10202 feature = (struct scsi_get_config_feature *)(hdr + 1); 10203 10204 if (starting > 0x003b) 10205 goto done; 10206 if (starting > 0x003a) 10207 goto f3b; 10208 if (starting > 0x002b) 10209 goto f3a; 10210 if (starting > 0x002a) 10211 goto f2b; 10212 if (starting > 0x001f) 10213 goto f2a; 10214 if (starting > 0x001e) 10215 goto f1f; 10216 if (starting > 0x001d) 10217 goto f1e; 10218 if (starting > 0x0010) 10219 goto f1d; 10220 if (starting > 0x0003) 10221 goto f10; 10222 if (starting > 0x0002) 10223 goto f3; 10224 if (starting > 0x0001) 10225 goto f2; 10226 if (starting > 0x0000) 10227 goto f1; 10228 10229 /* Profile List */ 10230 scsi_ulto2b(0x0000, feature->feature_code); 10231 feature->flags = SGC_F_PERSISTENT | SGC_F_CURRENT; 10232 feature->add_length = 8; 10233 scsi_ulto2b(0x0008, &feature->feature_data[0]); /* CD-ROM */ 10234 feature->feature_data[2] = 0x00; 10235 scsi_ulto2b(0x0010, &feature->feature_data[4]); /* DVD-ROM */ 10236 feature->feature_data[6] = 0x01; 10237 feature = (struct scsi_get_config_feature *) 10238 &feature->feature_data[feature->add_length]; 10239 10240 f1: /* Core */ 10241 scsi_ulto2b(0x0001, feature->feature_code); 10242 feature->flags = 0x08 | SGC_F_PERSISTENT | SGC_F_CURRENT; 10243 feature->add_length = 8; 10244 scsi_ulto4b(0x00000000, &feature->feature_data[0]); 10245 feature->feature_data[4] = 0x03; 10246 feature = (struct scsi_get_config_feature *) 10247 &feature->feature_data[feature->add_length]; 10248 10249 f2: /* Morphing */ 10250 scsi_ulto2b(0x0002, feature->feature_code); 10251 feature->flags = 0x04 | SGC_F_PERSISTENT | SGC_F_CURRENT; 10252 feature->add_length = 4; 10253 feature->feature_data[0] = 0x02; 10254 feature = (struct scsi_get_config_feature *) 10255 &feature->feature_data[feature->add_length]; 10256 10257 f3: /* Removable Medium */ 10258 scsi_ulto2b(0x0003, feature->feature_code); 10259 feature->flags = 0x04 | SGC_F_PERSISTENT | SGC_F_CURRENT; 10260 feature->add_length = 4; 10261 feature->feature_data[0] = 0x39; 10262 feature = (struct scsi_get_config_feature *) 10263 &feature->feature_data[feature->add_length]; 10264 10265 if (rt == SGC_RT_CURRENT && (lun->flags & CTL_LUN_NO_MEDIA)) 10266 goto done; 10267 10268 f10: /* Random Read */ 10269 scsi_ulto2b(0x0010, feature->feature_code); 10270 feature->flags = 0x00; 10271 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10272 feature->flags |= SGC_F_CURRENT; 10273 feature->add_length = 8; 10274 scsi_ulto4b(lun->be_lun->blocksize, &feature->feature_data[0]); 10275 scsi_ulto2b(1, &feature->feature_data[4]); 10276 feature->feature_data[6] = 0x00; 10277 feature = (struct scsi_get_config_feature *) 10278 &feature->feature_data[feature->add_length]; 10279 10280 f1d: /* Multi-Read */ 10281 scsi_ulto2b(0x001D, feature->feature_code); 10282 feature->flags = 0x00; 10283 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10284 feature->flags |= SGC_F_CURRENT; 10285 feature->add_length = 0; 10286 feature = (struct scsi_get_config_feature *) 10287 &feature->feature_data[feature->add_length]; 10288 10289 f1e: /* CD Read */ 10290 scsi_ulto2b(0x001E, feature->feature_code); 10291 feature->flags = 0x00; 10292 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10293 feature->flags |= SGC_F_CURRENT; 10294 feature->add_length = 4; 10295 feature->feature_data[0] = 0x00; 10296 feature = (struct scsi_get_config_feature *) 10297 &feature->feature_data[feature->add_length]; 10298 10299 f1f: /* DVD Read */ 10300 scsi_ulto2b(0x001F, feature->feature_code); 10301 feature->flags = 0x08; 10302 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10303 feature->flags |= SGC_F_CURRENT; 10304 feature->add_length = 4; 10305 feature->feature_data[0] = 0x01; 10306 feature->feature_data[2] = 0x03; 10307 feature = (struct scsi_get_config_feature *) 10308 &feature->feature_data[feature->add_length]; 10309 10310 f2a: /* DVD+RW */ 10311 scsi_ulto2b(0x002A, feature->feature_code); 10312 feature->flags = 0x04; 10313 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10314 feature->flags |= SGC_F_CURRENT; 10315 feature->add_length = 4; 10316 feature->feature_data[0] = 0x00; 10317 feature->feature_data[1] = 0x00; 10318 feature = (struct scsi_get_config_feature *) 10319 &feature->feature_data[feature->add_length]; 10320 10321 f2b: /* DVD+R */ 10322 scsi_ulto2b(0x002B, feature->feature_code); 10323 feature->flags = 0x00; 10324 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10325 feature->flags |= SGC_F_CURRENT; 10326 feature->add_length = 4; 10327 feature->feature_data[0] = 0x00; 10328 feature = (struct scsi_get_config_feature *) 10329 &feature->feature_data[feature->add_length]; 10330 10331 f3a: /* DVD+RW Dual Layer */ 10332 scsi_ulto2b(0x003A, feature->feature_code); 10333 feature->flags = 0x00; 10334 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10335 feature->flags |= SGC_F_CURRENT; 10336 feature->add_length = 4; 10337 feature->feature_data[0] = 0x00; 10338 feature->feature_data[1] = 0x00; 10339 feature = (struct scsi_get_config_feature *) 10340 &feature->feature_data[feature->add_length]; 10341 10342 f3b: /* DVD+R Dual Layer */ 10343 scsi_ulto2b(0x003B, feature->feature_code); 10344 feature->flags = 0x00; 10345 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10346 feature->flags |= SGC_F_CURRENT; 10347 feature->add_length = 4; 10348 feature->feature_data[0] = 0x00; 10349 feature = (struct scsi_get_config_feature *) 10350 &feature->feature_data[feature->add_length]; 10351 10352 done: 10353 data_len = (uint8_t *)feature - (uint8_t *)hdr; 10354 if (rt == SGC_RT_SPECIFIC && data_len > 4) { 10355 feature = (struct scsi_get_config_feature *)(hdr + 1); 10356 if (scsi_2btoul(feature->feature_code) == starting) 10357 feature = (struct scsi_get_config_feature *) 10358 &feature->feature_data[feature->add_length]; 10359 data_len = (uint8_t *)feature - (uint8_t *)hdr; 10360 } 10361 scsi_ulto4b(data_len - 4, hdr->data_length); 10362 ctsio->kern_data_len = min(data_len, alloc_len); 10363 ctsio->kern_total_len = ctsio->kern_data_len; 10364 10365 ctl_set_success(ctsio); 10366 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10367 ctsio->be_move_done = ctl_config_move_done; 10368 ctl_datamove((union ctl_io *)ctsio); 10369 return (CTL_RETVAL_COMPLETE); 10370 } 10371 10372 int 10373 ctl_get_event_status(struct ctl_scsiio *ctsio) 10374 { 10375 struct scsi_get_event_status_header *hdr; 10376 struct scsi_get_event_status *cdb; 10377 uint32_t alloc_len, data_len; 10378 int notif_class; 10379 10380 cdb = (struct scsi_get_event_status *)ctsio->cdb; 10381 if ((cdb->byte2 & SGESN_POLLED) == 0) { 10382 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, 10383 /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); 10384 ctl_done((union ctl_io *)ctsio); 10385 return (CTL_RETVAL_COMPLETE); 10386 } 10387 notif_class = cdb->notif_class; 10388 alloc_len = scsi_2btoul(cdb->length); 10389 10390 data_len = sizeof(struct scsi_get_event_status_header); 10391 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10392 ctsio->kern_sg_entries = 0; 10393 ctsio->kern_rel_offset = 0; 10394 ctsio->kern_data_len = min(data_len, alloc_len); 10395 ctsio->kern_total_len = ctsio->kern_data_len; 10396 10397 hdr = (struct scsi_get_event_status_header *)ctsio->kern_data_ptr; 10398 scsi_ulto2b(0, hdr->descr_length); 10399 hdr->nea_class = SGESN_NEA; 10400 hdr->supported_class = 0; 10401 10402 ctl_set_success(ctsio); 10403 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10404 ctsio->be_move_done = ctl_config_move_done; 10405 ctl_datamove((union ctl_io *)ctsio); 10406 return (CTL_RETVAL_COMPLETE); 10407 } 10408 10409 int 10410 ctl_mechanism_status(struct ctl_scsiio *ctsio) 10411 { 10412 struct scsi_mechanism_status_header *hdr; 10413 struct scsi_mechanism_status *cdb; 10414 uint32_t alloc_len, data_len; 10415 10416 cdb = (struct scsi_mechanism_status *)ctsio->cdb; 10417 alloc_len = scsi_2btoul(cdb->length); 10418 10419 data_len = sizeof(struct scsi_mechanism_status_header); 10420 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10421 ctsio->kern_sg_entries = 0; 10422 ctsio->kern_rel_offset = 0; 10423 ctsio->kern_data_len = min(data_len, alloc_len); 10424 ctsio->kern_total_len = ctsio->kern_data_len; 10425 10426 hdr = (struct scsi_mechanism_status_header *)ctsio->kern_data_ptr; 10427 hdr->state1 = 0x00; 10428 hdr->state2 = 0xe0; 10429 scsi_ulto3b(0, hdr->lba); 10430 hdr->slots_num = 0; 10431 scsi_ulto2b(0, hdr->slots_length); 10432 10433 ctl_set_success(ctsio); 10434 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10435 ctsio->be_move_done = ctl_config_move_done; 10436 ctl_datamove((union ctl_io *)ctsio); 10437 return (CTL_RETVAL_COMPLETE); 10438 } 10439 10440 static void 10441 ctl_ultomsf(uint32_t lba, uint8_t *buf) 10442 { 10443 10444 lba += 150; 10445 buf[0] = 0; 10446 buf[1] = bin2bcd((lba / 75) / 60); 10447 buf[2] = bin2bcd((lba / 75) % 60); 10448 buf[3] = bin2bcd(lba % 75); 10449 } 10450 10451 int 10452 ctl_read_toc(struct ctl_scsiio *ctsio) 10453 { 10454 struct ctl_lun *lun = CTL_LUN(ctsio); 10455 struct scsi_read_toc_hdr *hdr; 10456 struct scsi_read_toc_type01_descr *descr; 10457 struct scsi_read_toc *cdb; 10458 uint32_t alloc_len, data_len; 10459 int format, msf; 10460 10461 cdb = (struct scsi_read_toc *)ctsio->cdb; 10462 msf = (cdb->byte2 & CD_MSF) != 0; 10463 format = cdb->format; 10464 alloc_len = scsi_2btoul(cdb->data_len); 10465 10466 data_len = sizeof(struct scsi_read_toc_hdr); 10467 if (format == 0) 10468 data_len += 2 * sizeof(struct scsi_read_toc_type01_descr); 10469 else 10470 data_len += sizeof(struct scsi_read_toc_type01_descr); 10471 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10472 ctsio->kern_sg_entries = 0; 10473 ctsio->kern_rel_offset = 0; 10474 ctsio->kern_data_len = min(data_len, alloc_len); 10475 ctsio->kern_total_len = ctsio->kern_data_len; 10476 10477 hdr = (struct scsi_read_toc_hdr *)ctsio->kern_data_ptr; 10478 if (format == 0) { 10479 scsi_ulto2b(0x12, hdr->data_length); 10480 hdr->first = 1; 10481 hdr->last = 1; 10482 descr = (struct scsi_read_toc_type01_descr *)(hdr + 1); 10483 descr->addr_ctl = 0x14; 10484 descr->track_number = 1; 10485 if (msf) 10486 ctl_ultomsf(0, descr->track_start); 10487 else 10488 scsi_ulto4b(0, descr->track_start); 10489 descr++; 10490 descr->addr_ctl = 0x14; 10491 descr->track_number = 0xaa; 10492 if (msf) 10493 ctl_ultomsf(lun->be_lun->maxlba+1, descr->track_start); 10494 else 10495 scsi_ulto4b(lun->be_lun->maxlba+1, descr->track_start); 10496 } else { 10497 scsi_ulto2b(0x0a, hdr->data_length); 10498 hdr->first = 1; 10499 hdr->last = 1; 10500 descr = (struct scsi_read_toc_type01_descr *)(hdr + 1); 10501 descr->addr_ctl = 0x14; 10502 descr->track_number = 1; 10503 if (msf) 10504 ctl_ultomsf(0, descr->track_start); 10505 else 10506 scsi_ulto4b(0, descr->track_start); 10507 } 10508 10509 ctl_set_success(ctsio); 10510 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10511 ctsio->be_move_done = ctl_config_move_done; 10512 ctl_datamove((union ctl_io *)ctsio); 10513 return (CTL_RETVAL_COMPLETE); 10514 } 10515 10516 /* 10517 * For known CDB types, parse the LBA and length. 10518 */ 10519 static int 10520 ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len) 10521 { 10522 if (io->io_hdr.io_type != CTL_IO_SCSI) 10523 return (1); 10524 10525 switch (io->scsiio.cdb[0]) { 10526 case COMPARE_AND_WRITE: { 10527 struct scsi_compare_and_write *cdb; 10528 10529 cdb = (struct scsi_compare_and_write *)io->scsiio.cdb; 10530 10531 *lba = scsi_8btou64(cdb->addr); 10532 *len = cdb->length; 10533 break; 10534 } 10535 case READ_6: 10536 case WRITE_6: { 10537 struct scsi_rw_6 *cdb; 10538 10539 cdb = (struct scsi_rw_6 *)io->scsiio.cdb; 10540 10541 *lba = scsi_3btoul(cdb->addr); 10542 /* only 5 bits are valid in the most significant address byte */ 10543 *lba &= 0x1fffff; 10544 *len = cdb->length; 10545 break; 10546 } 10547 case READ_10: 10548 case WRITE_10: { 10549 struct scsi_rw_10 *cdb; 10550 10551 cdb = (struct scsi_rw_10 *)io->scsiio.cdb; 10552 10553 *lba = scsi_4btoul(cdb->addr); 10554 *len = scsi_2btoul(cdb->length); 10555 break; 10556 } 10557 case WRITE_VERIFY_10: { 10558 struct scsi_write_verify_10 *cdb; 10559 10560 cdb = (struct scsi_write_verify_10 *)io->scsiio.cdb; 10561 10562 *lba = scsi_4btoul(cdb->addr); 10563 *len = scsi_2btoul(cdb->length); 10564 break; 10565 } 10566 case READ_12: 10567 case WRITE_12: { 10568 struct scsi_rw_12 *cdb; 10569 10570 cdb = (struct scsi_rw_12 *)io->scsiio.cdb; 10571 10572 *lba = scsi_4btoul(cdb->addr); 10573 *len = scsi_4btoul(cdb->length); 10574 break; 10575 } 10576 case WRITE_VERIFY_12: { 10577 struct scsi_write_verify_12 *cdb; 10578 10579 cdb = (struct scsi_write_verify_12 *)io->scsiio.cdb; 10580 10581 *lba = scsi_4btoul(cdb->addr); 10582 *len = scsi_4btoul(cdb->length); 10583 break; 10584 } 10585 case READ_16: 10586 case WRITE_16: { 10587 struct scsi_rw_16 *cdb; 10588 10589 cdb = (struct scsi_rw_16 *)io->scsiio.cdb; 10590 10591 *lba = scsi_8btou64(cdb->addr); 10592 *len = scsi_4btoul(cdb->length); 10593 break; 10594 } 10595 case WRITE_ATOMIC_16: { 10596 struct scsi_write_atomic_16 *cdb; 10597 10598 cdb = (struct scsi_write_atomic_16 *)io->scsiio.cdb; 10599 10600 *lba = scsi_8btou64(cdb->addr); 10601 *len = scsi_2btoul(cdb->length); 10602 break; 10603 } 10604 case WRITE_VERIFY_16: { 10605 struct scsi_write_verify_16 *cdb; 10606 10607 cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb; 10608 10609 *lba = scsi_8btou64(cdb->addr); 10610 *len = scsi_4btoul(cdb->length); 10611 break; 10612 } 10613 case WRITE_SAME_10: { 10614 struct scsi_write_same_10 *cdb; 10615 10616 cdb = (struct scsi_write_same_10 *)io->scsiio.cdb; 10617 10618 *lba = scsi_4btoul(cdb->addr); 10619 *len = scsi_2btoul(cdb->length); 10620 break; 10621 } 10622 case WRITE_SAME_16: { 10623 struct scsi_write_same_16 *cdb; 10624 10625 cdb = (struct scsi_write_same_16 *)io->scsiio.cdb; 10626 10627 *lba = scsi_8btou64(cdb->addr); 10628 *len = scsi_4btoul(cdb->length); 10629 break; 10630 } 10631 case VERIFY_10: { 10632 struct scsi_verify_10 *cdb; 10633 10634 cdb = (struct scsi_verify_10 *)io->scsiio.cdb; 10635 10636 *lba = scsi_4btoul(cdb->addr); 10637 *len = scsi_2btoul(cdb->length); 10638 break; 10639 } 10640 case VERIFY_12: { 10641 struct scsi_verify_12 *cdb; 10642 10643 cdb = (struct scsi_verify_12 *)io->scsiio.cdb; 10644 10645 *lba = scsi_4btoul(cdb->addr); 10646 *len = scsi_4btoul(cdb->length); 10647 break; 10648 } 10649 case VERIFY_16: { 10650 struct scsi_verify_16 *cdb; 10651 10652 cdb = (struct scsi_verify_16 *)io->scsiio.cdb; 10653 10654 *lba = scsi_8btou64(cdb->addr); 10655 *len = scsi_4btoul(cdb->length); 10656 break; 10657 } 10658 case UNMAP: { 10659 *lba = 0; 10660 *len = UINT64_MAX; 10661 break; 10662 } 10663 case SERVICE_ACTION_IN: { /* GET LBA STATUS */ 10664 struct scsi_get_lba_status *cdb; 10665 10666 cdb = (struct scsi_get_lba_status *)io->scsiio.cdb; 10667 *lba = scsi_8btou64(cdb->addr); 10668 *len = UINT32_MAX; 10669 break; 10670 } 10671 default: 10672 return (1); 10673 break; /* NOTREACHED */ 10674 } 10675 10676 return (0); 10677 } 10678 10679 static ctl_action 10680 ctl_extent_check_lba(uint64_t lba1, uint64_t len1, uint64_t lba2, uint64_t len2, 10681 bool seq) 10682 { 10683 uint64_t endlba1, endlba2; 10684 10685 endlba1 = lba1 + len1 - (seq ? 0 : 1); 10686 endlba2 = lba2 + len2 - 1; 10687 10688 if ((endlba1 < lba2) || (endlba2 < lba1)) 10689 return (CTL_ACTION_PASS); 10690 else 10691 return (CTL_ACTION_BLOCK); 10692 } 10693 10694 static int 10695 ctl_extent_check_unmap(union ctl_io *io, uint64_t lba2, uint64_t len2) 10696 { 10697 struct ctl_ptr_len_flags *ptrlen; 10698 struct scsi_unmap_desc *buf, *end, *range; 10699 uint64_t lba; 10700 uint32_t len; 10701 10702 /* If not UNMAP -- go other way. */ 10703 if (io->io_hdr.io_type != CTL_IO_SCSI || 10704 io->scsiio.cdb[0] != UNMAP) 10705 return (CTL_ACTION_ERROR); 10706 10707 /* If UNMAP without data -- block and wait for data. */ 10708 ptrlen = (struct ctl_ptr_len_flags *) 10709 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 10710 if ((io->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0 || 10711 ptrlen->ptr == NULL) 10712 return (CTL_ACTION_BLOCK); 10713 10714 /* UNMAP with data -- check for collision. */ 10715 buf = (struct scsi_unmap_desc *)ptrlen->ptr; 10716 end = buf + ptrlen->len / sizeof(*buf); 10717 for (range = buf; range < end; range++) { 10718 lba = scsi_8btou64(range->lba); 10719 len = scsi_4btoul(range->length); 10720 if ((lba < lba2 + len2) && (lba + len > lba2)) 10721 return (CTL_ACTION_BLOCK); 10722 } 10723 return (CTL_ACTION_PASS); 10724 } 10725 10726 static ctl_action 10727 ctl_extent_check(union ctl_io *io1, union ctl_io *io2, bool seq) 10728 { 10729 uint64_t lba1, lba2; 10730 uint64_t len1, len2; 10731 int retval; 10732 10733 if (ctl_get_lba_len(io2, &lba2, &len2) != 0) 10734 return (CTL_ACTION_ERROR); 10735 10736 retval = ctl_extent_check_unmap(io1, lba2, len2); 10737 if (retval != CTL_ACTION_ERROR) 10738 return (retval); 10739 10740 if (ctl_get_lba_len(io1, &lba1, &len1) != 0) 10741 return (CTL_ACTION_ERROR); 10742 10743 if (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE) 10744 seq = FALSE; 10745 return (ctl_extent_check_lba(lba1, len1, lba2, len2, seq)); 10746 } 10747 10748 static ctl_action 10749 ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2) 10750 { 10751 uint64_t lba1, lba2; 10752 uint64_t len1, len2; 10753 10754 if (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE) 10755 return (CTL_ACTION_PASS); 10756 if (ctl_get_lba_len(io1, &lba1, &len1) != 0) 10757 return (CTL_ACTION_ERROR); 10758 if (ctl_get_lba_len(io2, &lba2, &len2) != 0) 10759 return (CTL_ACTION_ERROR); 10760 10761 if (lba1 + len1 == lba2) 10762 return (CTL_ACTION_BLOCK); 10763 return (CTL_ACTION_PASS); 10764 } 10765 10766 static ctl_action 10767 ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io, 10768 union ctl_io *ooa_io) 10769 { 10770 const struct ctl_cmd_entry *pending_entry, *ooa_entry; 10771 const ctl_serialize_action *serialize_row; 10772 10773 /* 10774 * The initiator attempted multiple untagged commands at the same 10775 * time. Can't do that. 10776 */ 10777 if ((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10778 && (ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10779 && ((pending_io->io_hdr.nexus.targ_port == 10780 ooa_io->io_hdr.nexus.targ_port) 10781 && (pending_io->io_hdr.nexus.initid == 10782 ooa_io->io_hdr.nexus.initid)) 10783 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | 10784 CTL_FLAG_STATUS_SENT)) == 0)) 10785 return (CTL_ACTION_OVERLAP); 10786 10787 /* 10788 * The initiator attempted to send multiple tagged commands with 10789 * the same ID. (It's fine if different initiators have the same 10790 * tag ID.) 10791 * 10792 * Even if all of those conditions are true, we don't kill the I/O 10793 * if the command ahead of us has been aborted. We won't end up 10794 * sending it to the FETD, and it's perfectly legal to resend a 10795 * command with the same tag number as long as the previous 10796 * instance of this tag number has been aborted somehow. 10797 */ 10798 if ((pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 10799 && (ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 10800 && (pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num) 10801 && ((pending_io->io_hdr.nexus.targ_port == 10802 ooa_io->io_hdr.nexus.targ_port) 10803 && (pending_io->io_hdr.nexus.initid == 10804 ooa_io->io_hdr.nexus.initid)) 10805 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | 10806 CTL_FLAG_STATUS_SENT)) == 0)) 10807 return (CTL_ACTION_OVERLAP_TAG); 10808 10809 /* 10810 * If we get a head of queue tag, SAM-3 says that we should 10811 * immediately execute it. 10812 * 10813 * What happens if this command would normally block for some other 10814 * reason? e.g. a request sense with a head of queue tag 10815 * immediately after a write. Normally that would block, but this 10816 * will result in its getting executed immediately... 10817 * 10818 * We currently return "pass" instead of "skip", so we'll end up 10819 * going through the rest of the queue to check for overlapped tags. 10820 * 10821 * XXX KDM check for other types of blockage first?? 10822 */ 10823 if (pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 10824 return (CTL_ACTION_PASS); 10825 10826 /* 10827 * Ordered tags have to block until all items ahead of them 10828 * have completed. If we get called with an ordered tag, we always 10829 * block, if something else is ahead of us in the queue. 10830 */ 10831 if (pending_io->scsiio.tag_type == CTL_TAG_ORDERED) 10832 return (CTL_ACTION_BLOCK); 10833 10834 /* 10835 * Simple tags get blocked until all head of queue and ordered tags 10836 * ahead of them have completed. I'm lumping untagged commands in 10837 * with simple tags here. XXX KDM is that the right thing to do? 10838 */ 10839 if (((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10840 || (pending_io->scsiio.tag_type == CTL_TAG_SIMPLE)) 10841 && ((ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 10842 || (ooa_io->scsiio.tag_type == CTL_TAG_ORDERED))) 10843 return (CTL_ACTION_BLOCK); 10844 10845 pending_entry = ctl_get_cmd_entry(&pending_io->scsiio, NULL); 10846 KASSERT(pending_entry->seridx < CTL_SERIDX_COUNT, 10847 ("%s: Invalid seridx %d for pending CDB %02x %02x @ %p", 10848 __func__, pending_entry->seridx, pending_io->scsiio.cdb[0], 10849 pending_io->scsiio.cdb[1], pending_io)); 10850 ooa_entry = ctl_get_cmd_entry(&ooa_io->scsiio, NULL); 10851 if (ooa_entry->seridx == CTL_SERIDX_INVLD) 10852 return (CTL_ACTION_PASS); /* Unsupported command in OOA queue */ 10853 KASSERT(ooa_entry->seridx < CTL_SERIDX_COUNT, 10854 ("%s: Invalid seridx %d for ooa CDB %02x %02x @ %p", 10855 __func__, ooa_entry->seridx, ooa_io->scsiio.cdb[0], 10856 ooa_io->scsiio.cdb[1], ooa_io)); 10857 10858 serialize_row = ctl_serialize_table[ooa_entry->seridx]; 10859 10860 switch (serialize_row[pending_entry->seridx]) { 10861 case CTL_SER_BLOCK: 10862 return (CTL_ACTION_BLOCK); 10863 case CTL_SER_EXTENT: 10864 return (ctl_extent_check(ooa_io, pending_io, 10865 (lun->be_lun && lun->be_lun->serseq == CTL_LUN_SERSEQ_ON))); 10866 case CTL_SER_EXTENTOPT: 10867 if ((lun->MODE_CTRL.queue_flags & SCP_QUEUE_ALG_MASK) != 10868 SCP_QUEUE_ALG_UNRESTRICTED) 10869 return (ctl_extent_check(ooa_io, pending_io, 10870 (lun->be_lun && 10871 lun->be_lun->serseq == CTL_LUN_SERSEQ_ON))); 10872 return (CTL_ACTION_PASS); 10873 case CTL_SER_EXTENTSEQ: 10874 if (lun->be_lun && lun->be_lun->serseq != CTL_LUN_SERSEQ_OFF) 10875 return (ctl_extent_check_seq(ooa_io, pending_io)); 10876 return (CTL_ACTION_PASS); 10877 case CTL_SER_PASS: 10878 return (CTL_ACTION_PASS); 10879 case CTL_SER_BLOCKOPT: 10880 if ((lun->MODE_CTRL.queue_flags & SCP_QUEUE_ALG_MASK) != 10881 SCP_QUEUE_ALG_UNRESTRICTED) 10882 return (CTL_ACTION_BLOCK); 10883 return (CTL_ACTION_PASS); 10884 case CTL_SER_SKIP: 10885 return (CTL_ACTION_SKIP); 10886 default: 10887 panic("%s: Invalid serialization value %d for %d => %d", 10888 __func__, serialize_row[pending_entry->seridx], 10889 pending_entry->seridx, ooa_entry->seridx); 10890 } 10891 10892 return (CTL_ACTION_ERROR); 10893 } 10894 10895 /* 10896 * Check for blockage or overlaps against the OOA (Order Of Arrival) queue. 10897 * Assumptions: 10898 * - pending_io is generally either incoming, or on the blocked queue 10899 * - starting I/O is the I/O we want to start the check with. 10900 */ 10901 static ctl_action 10902 ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 10903 union ctl_io *starting_io) 10904 { 10905 union ctl_io *ooa_io; 10906 ctl_action action; 10907 10908 mtx_assert(&lun->lun_lock, MA_OWNED); 10909 10910 /* 10911 * Run back along the OOA queue, starting with the current 10912 * blocked I/O and going through every I/O before it on the 10913 * queue. If starting_io is NULL, we'll just end up returning 10914 * CTL_ACTION_PASS. 10915 */ 10916 for (ooa_io = starting_io; ooa_io != NULL; 10917 ooa_io = (union ctl_io *)TAILQ_PREV(&ooa_io->io_hdr, ctl_ooaq, 10918 ooa_links)){ 10919 10920 /* 10921 * This routine just checks to see whether 10922 * cur_blocked is blocked by ooa_io, which is ahead 10923 * of it in the queue. It doesn't queue/dequeue 10924 * cur_blocked. 10925 */ 10926 action = ctl_check_for_blockage(lun, pending_io, ooa_io); 10927 switch (action) { 10928 case CTL_ACTION_BLOCK: 10929 case CTL_ACTION_OVERLAP: 10930 case CTL_ACTION_OVERLAP_TAG: 10931 case CTL_ACTION_SKIP: 10932 case CTL_ACTION_ERROR: 10933 return (action); 10934 break; /* NOTREACHED */ 10935 case CTL_ACTION_PASS: 10936 break; 10937 default: 10938 panic("%s: Invalid action %d\n", __func__, action); 10939 } 10940 } 10941 10942 return (CTL_ACTION_PASS); 10943 } 10944 10945 /* 10946 * Assumptions: 10947 * - An I/O has just completed, and has been removed from the per-LUN OOA 10948 * queue, so some items on the blocked queue may now be unblocked. 10949 */ 10950 static int 10951 ctl_check_blocked(struct ctl_lun *lun) 10952 { 10953 struct ctl_softc *softc = lun->ctl_softc; 10954 union ctl_io *cur_blocked, *next_blocked; 10955 10956 mtx_assert(&lun->lun_lock, MA_OWNED); 10957 10958 /* 10959 * Run forward from the head of the blocked queue, checking each 10960 * entry against the I/Os prior to it on the OOA queue to see if 10961 * there is still any blockage. 10962 * 10963 * We cannot use the TAILQ_FOREACH() macro, because it can't deal 10964 * with our removing a variable on it while it is traversing the 10965 * list. 10966 */ 10967 for (cur_blocked = (union ctl_io *)TAILQ_FIRST(&lun->blocked_queue); 10968 cur_blocked != NULL; cur_blocked = next_blocked) { 10969 union ctl_io *prev_ooa; 10970 ctl_action action; 10971 10972 next_blocked = (union ctl_io *)TAILQ_NEXT(&cur_blocked->io_hdr, 10973 blocked_links); 10974 10975 prev_ooa = (union ctl_io *)TAILQ_PREV(&cur_blocked->io_hdr, 10976 ctl_ooaq, ooa_links); 10977 10978 /* 10979 * If cur_blocked happens to be the first item in the OOA 10980 * queue now, prev_ooa will be NULL, and the action 10981 * returned will just be CTL_ACTION_PASS. 10982 */ 10983 action = ctl_check_ooa(lun, cur_blocked, prev_ooa); 10984 10985 switch (action) { 10986 case CTL_ACTION_BLOCK: 10987 /* Nothing to do here, still blocked */ 10988 break; 10989 case CTL_ACTION_OVERLAP: 10990 case CTL_ACTION_OVERLAP_TAG: 10991 /* 10992 * This shouldn't happen! In theory we've already 10993 * checked this command for overlap... 10994 */ 10995 break; 10996 case CTL_ACTION_PASS: 10997 case CTL_ACTION_SKIP: { 10998 const struct ctl_cmd_entry *entry; 10999 11000 /* 11001 * The skip case shouldn't happen, this transaction 11002 * should have never made it onto the blocked queue. 11003 */ 11004 /* 11005 * This I/O is no longer blocked, we can remove it 11006 * from the blocked queue. Since this is a TAILQ 11007 * (doubly linked list), we can do O(1) removals 11008 * from any place on the list. 11009 */ 11010 TAILQ_REMOVE(&lun->blocked_queue, &cur_blocked->io_hdr, 11011 blocked_links); 11012 cur_blocked->io_hdr.flags &= ~CTL_FLAG_BLOCKED; 11013 11014 if ((softc->ha_mode != CTL_HA_MODE_XFER) && 11015 (cur_blocked->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)){ 11016 /* 11017 * Need to send IO back to original side to 11018 * run 11019 */ 11020 union ctl_ha_msg msg_info; 11021 11022 cur_blocked->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 11023 msg_info.hdr.original_sc = 11024 cur_blocked->io_hdr.original_sc; 11025 msg_info.hdr.serializing_sc = cur_blocked; 11026 msg_info.hdr.msg_type = CTL_MSG_R2R; 11027 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11028 sizeof(msg_info.hdr), M_NOWAIT); 11029 break; 11030 } 11031 entry = ctl_get_cmd_entry(&cur_blocked->scsiio, NULL); 11032 11033 /* 11034 * Check this I/O for LUN state changes that may 11035 * have happened while this command was blocked. 11036 * The LUN state may have been changed by a command 11037 * ahead of us in the queue, so we need to re-check 11038 * for any states that can be caused by SCSI 11039 * commands. 11040 */ 11041 if (ctl_scsiio_lun_check(lun, entry, 11042 &cur_blocked->scsiio) == 0) { 11043 cur_blocked->io_hdr.flags |= 11044 CTL_FLAG_IS_WAS_ON_RTR; 11045 ctl_enqueue_rtr(cur_blocked); 11046 } else 11047 ctl_done(cur_blocked); 11048 break; 11049 } 11050 default: 11051 /* 11052 * This probably shouldn't happen -- we shouldn't 11053 * get CTL_ACTION_ERROR, or anything else. 11054 */ 11055 break; 11056 } 11057 } 11058 11059 return (CTL_RETVAL_COMPLETE); 11060 } 11061 11062 /* 11063 * This routine (with one exception) checks LUN flags that can be set by 11064 * commands ahead of us in the OOA queue. These flags have to be checked 11065 * when a command initially comes in, and when we pull a command off the 11066 * blocked queue and are preparing to execute it. The reason we have to 11067 * check these flags for commands on the blocked queue is that the LUN 11068 * state may have been changed by a command ahead of us while we're on the 11069 * blocked queue. 11070 * 11071 * Ordering is somewhat important with these checks, so please pay 11072 * careful attention to the placement of any new checks. 11073 */ 11074 static int 11075 ctl_scsiio_lun_check(struct ctl_lun *lun, 11076 const struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio) 11077 { 11078 struct ctl_softc *softc = lun->ctl_softc; 11079 int retval; 11080 uint32_t residx; 11081 11082 retval = 0; 11083 11084 mtx_assert(&lun->lun_lock, MA_OWNED); 11085 11086 /* 11087 * If this shelf is a secondary shelf controller, we may have to 11088 * reject some commands disallowed by HA mode and link state. 11089 */ 11090 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) { 11091 if (softc->ha_link == CTL_HA_LINK_OFFLINE && 11092 (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { 11093 ctl_set_lun_unavail(ctsio); 11094 retval = 1; 11095 goto bailout; 11096 } 11097 if ((lun->flags & CTL_LUN_PEER_SC_PRIMARY) == 0 && 11098 (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { 11099 ctl_set_lun_transit(ctsio); 11100 retval = 1; 11101 goto bailout; 11102 } 11103 if (softc->ha_mode == CTL_HA_MODE_ACT_STBY && 11104 (entry->flags & CTL_CMD_FLAG_OK_ON_STANDBY) == 0) { 11105 ctl_set_lun_standby(ctsio); 11106 retval = 1; 11107 goto bailout; 11108 } 11109 11110 /* The rest of checks are only done on executing side */ 11111 if (softc->ha_mode == CTL_HA_MODE_XFER) 11112 goto bailout; 11113 } 11114 11115 if (entry->pattern & CTL_LUN_PAT_WRITE) { 11116 if (lun->be_lun && 11117 lun->be_lun->flags & CTL_LUN_FLAG_READONLY) { 11118 ctl_set_hw_write_protected(ctsio); 11119 retval = 1; 11120 goto bailout; 11121 } 11122 if ((lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) { 11123 ctl_set_sense(ctsio, /*current_error*/ 1, 11124 /*sense_key*/ SSD_KEY_DATA_PROTECT, 11125 /*asc*/ 0x27, /*ascq*/ 0x02, SSD_ELEM_NONE); 11126 retval = 1; 11127 goto bailout; 11128 } 11129 } 11130 11131 /* 11132 * Check for a reservation conflict. If this command isn't allowed 11133 * even on reserved LUNs, and if this initiator isn't the one who 11134 * reserved us, reject the command with a reservation conflict. 11135 */ 11136 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 11137 if ((lun->flags & CTL_LUN_RESERVED) 11138 && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) { 11139 if (lun->res_idx != residx) { 11140 ctl_set_reservation_conflict(ctsio); 11141 retval = 1; 11142 goto bailout; 11143 } 11144 } 11145 11146 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0 || 11147 (entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV)) { 11148 /* No reservation or command is allowed. */; 11149 } else if ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_WRESV) && 11150 (lun->pr_res_type == SPR_TYPE_WR_EX || 11151 lun->pr_res_type == SPR_TYPE_WR_EX_RO || 11152 lun->pr_res_type == SPR_TYPE_WR_EX_AR)) { 11153 /* The command is allowed for Write Exclusive resv. */; 11154 } else { 11155 /* 11156 * if we aren't registered or it's a res holder type 11157 * reservation and this isn't the res holder then set a 11158 * conflict. 11159 */ 11160 if (ctl_get_prkey(lun, residx) == 0 || 11161 (residx != lun->pr_res_idx && lun->pr_res_type < 4)) { 11162 ctl_set_reservation_conflict(ctsio); 11163 retval = 1; 11164 goto bailout; 11165 } 11166 } 11167 11168 if ((entry->flags & CTL_CMD_FLAG_OK_ON_NO_MEDIA) == 0) { 11169 if (lun->flags & CTL_LUN_EJECTED) 11170 ctl_set_lun_ejected(ctsio); 11171 else if (lun->flags & CTL_LUN_NO_MEDIA) { 11172 if (lun->flags & CTL_LUN_REMOVABLE) 11173 ctl_set_lun_no_media(ctsio); 11174 else 11175 ctl_set_lun_int_reqd(ctsio); 11176 } else if (lun->flags & CTL_LUN_STOPPED) 11177 ctl_set_lun_stopped(ctsio); 11178 else 11179 goto bailout; 11180 retval = 1; 11181 goto bailout; 11182 } 11183 11184 bailout: 11185 return (retval); 11186 } 11187 11188 static void 11189 ctl_failover_io(union ctl_io *io, int have_lock) 11190 { 11191 ctl_set_busy(&io->scsiio); 11192 ctl_done(io); 11193 } 11194 11195 static void 11196 ctl_failover_lun(union ctl_io *rio) 11197 { 11198 struct ctl_softc *softc = CTL_SOFTC(rio); 11199 struct ctl_lun *lun; 11200 struct ctl_io_hdr *io, *next_io; 11201 uint32_t targ_lun; 11202 11203 targ_lun = rio->io_hdr.nexus.targ_mapped_lun; 11204 CTL_DEBUG_PRINT(("FAILOVER for lun %ju\n", targ_lun)); 11205 11206 /* Find and lock the LUN. */ 11207 mtx_lock(&softc->ctl_lock); 11208 if (targ_lun > CTL_MAX_LUNS || 11209 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11210 mtx_unlock(&softc->ctl_lock); 11211 return; 11212 } 11213 mtx_lock(&lun->lun_lock); 11214 mtx_unlock(&softc->ctl_lock); 11215 if (lun->flags & CTL_LUN_DISABLED) { 11216 mtx_unlock(&lun->lun_lock); 11217 return; 11218 } 11219 11220 if (softc->ha_mode == CTL_HA_MODE_XFER) { 11221 TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { 11222 /* We are master */ 11223 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 11224 if (io->flags & CTL_FLAG_IO_ACTIVE) { 11225 io->flags |= CTL_FLAG_ABORT; 11226 io->flags |= CTL_FLAG_FAILOVER; 11227 } else { /* This can be only due to DATAMOVE */ 11228 io->msg_type = CTL_MSG_DATAMOVE_DONE; 11229 io->flags &= ~CTL_FLAG_DMA_INPROG; 11230 io->flags |= CTL_FLAG_IO_ACTIVE; 11231 io->port_status = 31340; 11232 ctl_enqueue_isc((union ctl_io *)io); 11233 } 11234 } 11235 /* We are slave */ 11236 if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { 11237 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; 11238 if (io->flags & CTL_FLAG_IO_ACTIVE) { 11239 io->flags |= CTL_FLAG_FAILOVER; 11240 } else { 11241 ctl_set_busy(&((union ctl_io *)io)-> 11242 scsiio); 11243 ctl_done((union ctl_io *)io); 11244 } 11245 } 11246 } 11247 } else { /* SERIALIZE modes */ 11248 TAILQ_FOREACH_SAFE(io, &lun->blocked_queue, blocked_links, 11249 next_io) { 11250 /* We are master */ 11251 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 11252 TAILQ_REMOVE(&lun->blocked_queue, io, 11253 blocked_links); 11254 io->flags &= ~CTL_FLAG_BLOCKED; 11255 TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links); 11256 ctl_free_io((union ctl_io *)io); 11257 } 11258 } 11259 TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { 11260 /* We are master */ 11261 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 11262 TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links); 11263 ctl_free_io((union ctl_io *)io); 11264 } 11265 /* We are slave */ 11266 if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { 11267 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; 11268 if (!(io->flags & CTL_FLAG_IO_ACTIVE)) { 11269 ctl_set_busy(&((union ctl_io *)io)-> 11270 scsiio); 11271 ctl_done((union ctl_io *)io); 11272 } 11273 } 11274 } 11275 ctl_check_blocked(lun); 11276 } 11277 mtx_unlock(&lun->lun_lock); 11278 } 11279 11280 static int 11281 ctl_scsiio_precheck(struct ctl_softc *softc, struct ctl_scsiio *ctsio) 11282 { 11283 struct ctl_lun *lun; 11284 const struct ctl_cmd_entry *entry; 11285 uint32_t initidx, targ_lun; 11286 int retval = 0; 11287 11288 lun = NULL; 11289 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 11290 if (targ_lun < CTL_MAX_LUNS) 11291 lun = softc->ctl_luns[targ_lun]; 11292 if (lun) { 11293 /* 11294 * If the LUN is invalid, pretend that it doesn't exist. 11295 * It will go away as soon as all pending I/O has been 11296 * completed. 11297 */ 11298 mtx_lock(&lun->lun_lock); 11299 if (lun->flags & CTL_LUN_DISABLED) { 11300 mtx_unlock(&lun->lun_lock); 11301 lun = NULL; 11302 } 11303 } 11304 CTL_LUN(ctsio) = lun; 11305 if (lun) { 11306 CTL_BACKEND_LUN(ctsio) = lun->be_lun; 11307 11308 /* 11309 * Every I/O goes into the OOA queue for a particular LUN, 11310 * and stays there until completion. 11311 */ 11312 #ifdef CTL_TIME_IO 11313 if (TAILQ_EMPTY(&lun->ooa_queue)) 11314 lun->idle_time += getsbinuptime() - lun->last_busy; 11315 #endif 11316 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 11317 } 11318 11319 /* Get command entry and return error if it is unsuppotyed. */ 11320 entry = ctl_validate_command(ctsio); 11321 if (entry == NULL) { 11322 if (lun) 11323 mtx_unlock(&lun->lun_lock); 11324 return (retval); 11325 } 11326 11327 ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 11328 ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK; 11329 11330 /* 11331 * Check to see whether we can send this command to LUNs that don't 11332 * exist. This should pretty much only be the case for inquiry 11333 * and request sense. Further checks, below, really require having 11334 * a LUN, so we can't really check the command anymore. Just put 11335 * it on the rtr queue. 11336 */ 11337 if (lun == NULL) { 11338 if (entry->flags & CTL_CMD_FLAG_OK_ON_NO_LUN) { 11339 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11340 ctl_enqueue_rtr((union ctl_io *)ctsio); 11341 return (retval); 11342 } 11343 11344 ctl_set_unsupported_lun(ctsio); 11345 ctl_done((union ctl_io *)ctsio); 11346 CTL_DEBUG_PRINT(("ctl_scsiio_precheck: bailing out due to invalid LUN\n")); 11347 return (retval); 11348 } else { 11349 /* 11350 * Make sure we support this particular command on this LUN. 11351 * e.g., we don't support writes to the control LUN. 11352 */ 11353 if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 11354 mtx_unlock(&lun->lun_lock); 11355 ctl_set_invalid_opcode(ctsio); 11356 ctl_done((union ctl_io *)ctsio); 11357 return (retval); 11358 } 11359 } 11360 11361 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 11362 11363 #ifdef CTL_WITH_CA 11364 /* 11365 * If we've got a request sense, it'll clear the contingent 11366 * allegiance condition. Otherwise, if we have a CA condition for 11367 * this initiator, clear it, because it sent down a command other 11368 * than request sense. 11369 */ 11370 if ((ctsio->cdb[0] != REQUEST_SENSE) 11371 && (ctl_is_set(lun->have_ca, initidx))) 11372 ctl_clear_mask(lun->have_ca, initidx); 11373 #endif 11374 11375 /* 11376 * If the command has this flag set, it handles its own unit 11377 * attention reporting, we shouldn't do anything. Otherwise we 11378 * check for any pending unit attentions, and send them back to the 11379 * initiator. We only do this when a command initially comes in, 11380 * not when we pull it off the blocked queue. 11381 * 11382 * According to SAM-3, section 5.3.2, the order that things get 11383 * presented back to the host is basically unit attentions caused 11384 * by some sort of reset event, busy status, reservation conflicts 11385 * or task set full, and finally any other status. 11386 * 11387 * One issue here is that some of the unit attentions we report 11388 * don't fall into the "reset" category (e.g. "reported luns data 11389 * has changed"). So reporting it here, before the reservation 11390 * check, may be technically wrong. I guess the only thing to do 11391 * would be to check for and report the reset events here, and then 11392 * check for the other unit attention types after we check for a 11393 * reservation conflict. 11394 * 11395 * XXX KDM need to fix this 11396 */ 11397 if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) { 11398 ctl_ua_type ua_type; 11399 u_int sense_len = 0; 11400 11401 ua_type = ctl_build_ua(lun, initidx, &ctsio->sense_data, 11402 &sense_len, SSD_TYPE_NONE); 11403 if (ua_type != CTL_UA_NONE) { 11404 mtx_unlock(&lun->lun_lock); 11405 ctsio->scsi_status = SCSI_STATUS_CHECK_COND; 11406 ctsio->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 11407 ctsio->sense_len = sense_len; 11408 ctl_done((union ctl_io *)ctsio); 11409 return (retval); 11410 } 11411 } 11412 11413 11414 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { 11415 mtx_unlock(&lun->lun_lock); 11416 ctl_done((union ctl_io *)ctsio); 11417 return (retval); 11418 } 11419 11420 /* 11421 * XXX CHD this is where we want to send IO to other side if 11422 * this LUN is secondary on this SC. We will need to make a copy 11423 * of the IO and flag the IO on this side as SENT_2OTHER and the flag 11424 * the copy we send as FROM_OTHER. 11425 * We also need to stuff the address of the original IO so we can 11426 * find it easily. Something similar will need be done on the other 11427 * side so when we are done we can find the copy. 11428 */ 11429 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 11430 (lun->flags & CTL_LUN_PEER_SC_PRIMARY) != 0 && 11431 (entry->flags & CTL_CMD_FLAG_RUN_HERE) == 0) { 11432 union ctl_ha_msg msg_info; 11433 int isc_retval; 11434 11435 ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; 11436 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 11437 mtx_unlock(&lun->lun_lock); 11438 11439 msg_info.hdr.msg_type = CTL_MSG_SERIALIZE; 11440 msg_info.hdr.original_sc = (union ctl_io *)ctsio; 11441 msg_info.hdr.serializing_sc = NULL; 11442 msg_info.hdr.nexus = ctsio->io_hdr.nexus; 11443 msg_info.scsi.tag_num = ctsio->tag_num; 11444 msg_info.scsi.tag_type = ctsio->tag_type; 11445 msg_info.scsi.cdb_len = ctsio->cdb_len; 11446 memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN); 11447 11448 if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11449 sizeof(msg_info.scsi) - sizeof(msg_info.scsi.sense_data), 11450 M_WAITOK)) > CTL_HA_STATUS_SUCCESS) { 11451 ctl_set_busy(ctsio); 11452 ctl_done((union ctl_io *)ctsio); 11453 return (retval); 11454 } 11455 return (retval); 11456 } 11457 11458 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 11459 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, 11460 ctl_ooaq, ooa_links))) { 11461 case CTL_ACTION_BLOCK: 11462 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 11463 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 11464 blocked_links); 11465 mtx_unlock(&lun->lun_lock); 11466 return (retval); 11467 case CTL_ACTION_PASS: 11468 case CTL_ACTION_SKIP: 11469 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11470 mtx_unlock(&lun->lun_lock); 11471 ctl_enqueue_rtr((union ctl_io *)ctsio); 11472 break; 11473 case CTL_ACTION_OVERLAP: 11474 mtx_unlock(&lun->lun_lock); 11475 ctl_set_overlapped_cmd(ctsio); 11476 ctl_done((union ctl_io *)ctsio); 11477 break; 11478 case CTL_ACTION_OVERLAP_TAG: 11479 mtx_unlock(&lun->lun_lock); 11480 ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff); 11481 ctl_done((union ctl_io *)ctsio); 11482 break; 11483 case CTL_ACTION_ERROR: 11484 default: 11485 mtx_unlock(&lun->lun_lock); 11486 ctl_set_internal_failure(ctsio, 11487 /*sks_valid*/ 0, 11488 /*retry_count*/ 0); 11489 ctl_done((union ctl_io *)ctsio); 11490 break; 11491 } 11492 return (retval); 11493 } 11494 11495 const struct ctl_cmd_entry * 11496 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa) 11497 { 11498 const struct ctl_cmd_entry *entry; 11499 int service_action; 11500 11501 entry = &ctl_cmd_table[ctsio->cdb[0]]; 11502 if (sa) 11503 *sa = ((entry->flags & CTL_CMD_FLAG_SA5) != 0); 11504 if (entry->flags & CTL_CMD_FLAG_SA5) { 11505 service_action = ctsio->cdb[1] & SERVICE_ACTION_MASK; 11506 entry = &((const struct ctl_cmd_entry *) 11507 entry->execute)[service_action]; 11508 } 11509 return (entry); 11510 } 11511 11512 const struct ctl_cmd_entry * 11513 ctl_validate_command(struct ctl_scsiio *ctsio) 11514 { 11515 const struct ctl_cmd_entry *entry; 11516 int i, sa; 11517 uint8_t diff; 11518 11519 entry = ctl_get_cmd_entry(ctsio, &sa); 11520 if (entry->execute == NULL) { 11521 if (sa) 11522 ctl_set_invalid_field(ctsio, 11523 /*sks_valid*/ 1, 11524 /*command*/ 1, 11525 /*field*/ 1, 11526 /*bit_valid*/ 1, 11527 /*bit*/ 4); 11528 else 11529 ctl_set_invalid_opcode(ctsio); 11530 ctl_done((union ctl_io *)ctsio); 11531 return (NULL); 11532 } 11533 KASSERT(entry->length > 0, 11534 ("Not defined length for command 0x%02x/0x%02x", 11535 ctsio->cdb[0], ctsio->cdb[1])); 11536 for (i = 1; i < entry->length; i++) { 11537 diff = ctsio->cdb[i] & ~entry->usage[i - 1]; 11538 if (diff == 0) 11539 continue; 11540 ctl_set_invalid_field(ctsio, 11541 /*sks_valid*/ 1, 11542 /*command*/ 1, 11543 /*field*/ i, 11544 /*bit_valid*/ 1, 11545 /*bit*/ fls(diff) - 1); 11546 ctl_done((union ctl_io *)ctsio); 11547 return (NULL); 11548 } 11549 return (entry); 11550 } 11551 11552 static int 11553 ctl_cmd_applicable(uint8_t lun_type, const struct ctl_cmd_entry *entry) 11554 { 11555 11556 switch (lun_type) { 11557 case T_DIRECT: 11558 if ((entry->flags & CTL_CMD_FLAG_OK_ON_DIRECT) == 0) 11559 return (0); 11560 break; 11561 case T_PROCESSOR: 11562 if ((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0) 11563 return (0); 11564 break; 11565 case T_CDROM: 11566 if ((entry->flags & CTL_CMD_FLAG_OK_ON_CDROM) == 0) 11567 return (0); 11568 break; 11569 default: 11570 return (0); 11571 } 11572 return (1); 11573 } 11574 11575 static int 11576 ctl_scsiio(struct ctl_scsiio *ctsio) 11577 { 11578 int retval; 11579 const struct ctl_cmd_entry *entry; 11580 11581 retval = CTL_RETVAL_COMPLETE; 11582 11583 CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0])); 11584 11585 entry = ctl_get_cmd_entry(ctsio, NULL); 11586 11587 /* 11588 * If this I/O has been aborted, just send it straight to 11589 * ctl_done() without executing it. 11590 */ 11591 if (ctsio->io_hdr.flags & CTL_FLAG_ABORT) { 11592 ctl_done((union ctl_io *)ctsio); 11593 goto bailout; 11594 } 11595 11596 /* 11597 * All the checks should have been handled by ctl_scsiio_precheck(). 11598 * We should be clear now to just execute the I/O. 11599 */ 11600 retval = entry->execute(ctsio); 11601 11602 bailout: 11603 return (retval); 11604 } 11605 11606 /* 11607 * Since we only implement one target right now, a bus reset simply resets 11608 * our single target. 11609 */ 11610 static int 11611 ctl_bus_reset(struct ctl_softc *softc, union ctl_io *io) 11612 { 11613 return(ctl_target_reset(softc, io, CTL_UA_BUS_RESET)); 11614 } 11615 11616 static int 11617 ctl_target_reset(struct ctl_softc *softc, union ctl_io *io, 11618 ctl_ua_type ua_type) 11619 { 11620 struct ctl_port *port = CTL_PORT(io); 11621 struct ctl_lun *lun; 11622 int retval; 11623 11624 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 11625 union ctl_ha_msg msg_info; 11626 11627 msg_info.hdr.nexus = io->io_hdr.nexus; 11628 if (ua_type==CTL_UA_TARG_RESET) 11629 msg_info.task.task_action = CTL_TASK_TARGET_RESET; 11630 else 11631 msg_info.task.task_action = CTL_TASK_BUS_RESET; 11632 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11633 msg_info.hdr.original_sc = NULL; 11634 msg_info.hdr.serializing_sc = NULL; 11635 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11636 sizeof(msg_info.task), M_WAITOK); 11637 } 11638 retval = 0; 11639 11640 mtx_lock(&softc->ctl_lock); 11641 STAILQ_FOREACH(lun, &softc->lun_list, links) { 11642 if (port != NULL && 11643 ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 11644 continue; 11645 retval += ctl_do_lun_reset(lun, io, ua_type); 11646 } 11647 mtx_unlock(&softc->ctl_lock); 11648 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11649 return (retval); 11650 } 11651 11652 /* 11653 * The LUN should always be set. The I/O is optional, and is used to 11654 * distinguish between I/Os sent by this initiator, and by other 11655 * initiators. We set unit attention for initiators other than this one. 11656 * SAM-3 is vague on this point. It does say that a unit attention should 11657 * be established for other initiators when a LUN is reset (see section 11658 * 5.7.3), but it doesn't specifically say that the unit attention should 11659 * be established for this particular initiator when a LUN is reset. Here 11660 * is the relevant text, from SAM-3 rev 8: 11661 * 11662 * 5.7.2 When a SCSI initiator port aborts its own tasks 11663 * 11664 * When a SCSI initiator port causes its own task(s) to be aborted, no 11665 * notification that the task(s) have been aborted shall be returned to 11666 * the SCSI initiator port other than the completion response for the 11667 * command or task management function action that caused the task(s) to 11668 * be aborted and notification(s) associated with related effects of the 11669 * action (e.g., a reset unit attention condition). 11670 * 11671 * XXX KDM for now, we're setting unit attention for all initiators. 11672 */ 11673 static int 11674 ctl_do_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type) 11675 { 11676 union ctl_io *xio; 11677 #if 0 11678 uint32_t initidx; 11679 #endif 11680 int i; 11681 11682 mtx_lock(&lun->lun_lock); 11683 /* 11684 * Run through the OOA queue and abort each I/O. 11685 */ 11686 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11687 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11688 xio->io_hdr.flags |= CTL_FLAG_ABORT | CTL_FLAG_ABORT_STATUS; 11689 } 11690 11691 /* 11692 * This version sets unit attention for every 11693 */ 11694 #if 0 11695 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11696 ctl_est_ua_all(lun, initidx, ua_type); 11697 #else 11698 ctl_est_ua_all(lun, -1, ua_type); 11699 #endif 11700 11701 /* 11702 * A reset (any kind, really) clears reservations established with 11703 * RESERVE/RELEASE. It does not clear reservations established 11704 * with PERSISTENT RESERVE OUT, but we don't support that at the 11705 * moment anyway. See SPC-2, section 5.6. SPC-3 doesn't address 11706 * reservations made with the RESERVE/RELEASE commands, because 11707 * those commands are obsolete in SPC-3. 11708 */ 11709 lun->flags &= ~CTL_LUN_RESERVED; 11710 11711 #ifdef CTL_WITH_CA 11712 for (i = 0; i < CTL_MAX_INITIATORS; i++) 11713 ctl_clear_mask(lun->have_ca, i); 11714 #endif 11715 lun->prevent_count = 0; 11716 if (lun->prevent) { 11717 for (i = 0; i < CTL_MAX_INITIATORS; i++) 11718 ctl_clear_mask(lun->prevent, i); 11719 } 11720 mtx_unlock(&lun->lun_lock); 11721 11722 return (0); 11723 } 11724 11725 static int 11726 ctl_lun_reset(struct ctl_softc *softc, union ctl_io *io) 11727 { 11728 struct ctl_lun *lun; 11729 uint32_t targ_lun; 11730 int retval; 11731 11732 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11733 mtx_lock(&softc->ctl_lock); 11734 if (targ_lun >= CTL_MAX_LUNS || 11735 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11736 mtx_unlock(&softc->ctl_lock); 11737 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 11738 return (1); 11739 } 11740 retval = ctl_do_lun_reset(lun, io, CTL_UA_LUN_RESET); 11741 mtx_unlock(&softc->ctl_lock); 11742 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11743 11744 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0) { 11745 union ctl_ha_msg msg_info; 11746 11747 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11748 msg_info.hdr.nexus = io->io_hdr.nexus; 11749 msg_info.task.task_action = CTL_TASK_LUN_RESET; 11750 msg_info.hdr.original_sc = NULL; 11751 msg_info.hdr.serializing_sc = NULL; 11752 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11753 sizeof(msg_info.task), M_WAITOK); 11754 } 11755 return (retval); 11756 } 11757 11758 static void 11759 ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id, 11760 int other_sc) 11761 { 11762 union ctl_io *xio; 11763 11764 mtx_assert(&lun->lun_lock, MA_OWNED); 11765 11766 /* 11767 * Run through the OOA queue and attempt to find the given I/O. 11768 * The target port, initiator ID, tag type and tag number have to 11769 * match the values that we got from the initiator. If we have an 11770 * untagged command to abort, simply abort the first untagged command 11771 * we come to. We only allow one untagged command at a time of course. 11772 */ 11773 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11774 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11775 11776 if ((targ_port == UINT32_MAX || 11777 targ_port == xio->io_hdr.nexus.targ_port) && 11778 (init_id == UINT32_MAX || 11779 init_id == xio->io_hdr.nexus.initid)) { 11780 if (targ_port != xio->io_hdr.nexus.targ_port || 11781 init_id != xio->io_hdr.nexus.initid) 11782 xio->io_hdr.flags |= CTL_FLAG_ABORT_STATUS; 11783 xio->io_hdr.flags |= CTL_FLAG_ABORT; 11784 if (!other_sc && !(lun->flags & CTL_LUN_PRIMARY_SC)) { 11785 union ctl_ha_msg msg_info; 11786 11787 msg_info.hdr.nexus = xio->io_hdr.nexus; 11788 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 11789 msg_info.task.tag_num = xio->scsiio.tag_num; 11790 msg_info.task.tag_type = xio->scsiio.tag_type; 11791 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11792 msg_info.hdr.original_sc = NULL; 11793 msg_info.hdr.serializing_sc = NULL; 11794 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11795 sizeof(msg_info.task), M_NOWAIT); 11796 } 11797 } 11798 } 11799 } 11800 11801 static int 11802 ctl_abort_task_set(union ctl_io *io) 11803 { 11804 struct ctl_softc *softc = CTL_SOFTC(io); 11805 struct ctl_lun *lun; 11806 uint32_t targ_lun; 11807 11808 /* 11809 * Look up the LUN. 11810 */ 11811 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11812 mtx_lock(&softc->ctl_lock); 11813 if (targ_lun >= CTL_MAX_LUNS || 11814 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11815 mtx_unlock(&softc->ctl_lock); 11816 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 11817 return (1); 11818 } 11819 11820 mtx_lock(&lun->lun_lock); 11821 mtx_unlock(&softc->ctl_lock); 11822 if (io->taskio.task_action == CTL_TASK_ABORT_TASK_SET) { 11823 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, 11824 io->io_hdr.nexus.initid, 11825 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 11826 } else { /* CTL_TASK_CLEAR_TASK_SET */ 11827 ctl_abort_tasks_lun(lun, UINT32_MAX, UINT32_MAX, 11828 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 11829 } 11830 mtx_unlock(&lun->lun_lock); 11831 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11832 return (0); 11833 } 11834 11835 static int 11836 ctl_i_t_nexus_reset(union ctl_io *io) 11837 { 11838 struct ctl_softc *softc = CTL_SOFTC(io); 11839 struct ctl_lun *lun; 11840 uint32_t initidx; 11841 11842 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 11843 union ctl_ha_msg msg_info; 11844 11845 msg_info.hdr.nexus = io->io_hdr.nexus; 11846 msg_info.task.task_action = CTL_TASK_I_T_NEXUS_RESET; 11847 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11848 msg_info.hdr.original_sc = NULL; 11849 msg_info.hdr.serializing_sc = NULL; 11850 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11851 sizeof(msg_info.task), M_WAITOK); 11852 } 11853 11854 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11855 mtx_lock(&softc->ctl_lock); 11856 STAILQ_FOREACH(lun, &softc->lun_list, links) { 11857 mtx_lock(&lun->lun_lock); 11858 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, 11859 io->io_hdr.nexus.initid, 1); 11860 #ifdef CTL_WITH_CA 11861 ctl_clear_mask(lun->have_ca, initidx); 11862 #endif 11863 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == initidx)) 11864 lun->flags &= ~CTL_LUN_RESERVED; 11865 if (lun->prevent && ctl_is_set(lun->prevent, initidx)) { 11866 ctl_clear_mask(lun->prevent, initidx); 11867 lun->prevent_count--; 11868 } 11869 ctl_est_ua(lun, initidx, CTL_UA_I_T_NEXUS_LOSS); 11870 mtx_unlock(&lun->lun_lock); 11871 } 11872 mtx_unlock(&softc->ctl_lock); 11873 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11874 return (0); 11875 } 11876 11877 static int 11878 ctl_abort_task(union ctl_io *io) 11879 { 11880 struct ctl_softc *softc = CTL_SOFTC(io); 11881 union ctl_io *xio; 11882 struct ctl_lun *lun; 11883 #if 0 11884 struct sbuf sb; 11885 char printbuf[128]; 11886 #endif 11887 int found; 11888 uint32_t targ_lun; 11889 11890 found = 0; 11891 11892 /* 11893 * Look up the LUN. 11894 */ 11895 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11896 mtx_lock(&softc->ctl_lock); 11897 if (targ_lun >= CTL_MAX_LUNS || 11898 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11899 mtx_unlock(&softc->ctl_lock); 11900 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 11901 return (1); 11902 } 11903 11904 #if 0 11905 printf("ctl_abort_task: called for lun %lld, tag %d type %d\n", 11906 lun->lun, io->taskio.tag_num, io->taskio.tag_type); 11907 #endif 11908 11909 mtx_lock(&lun->lun_lock); 11910 mtx_unlock(&softc->ctl_lock); 11911 /* 11912 * Run through the OOA queue and attempt to find the given I/O. 11913 * The target port, initiator ID, tag type and tag number have to 11914 * match the values that we got from the initiator. If we have an 11915 * untagged command to abort, simply abort the first untagged command 11916 * we come to. We only allow one untagged command at a time of course. 11917 */ 11918 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11919 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11920 #if 0 11921 sbuf_new(&sb, printbuf, sizeof(printbuf), SBUF_FIXEDLEN); 11922 11923 sbuf_printf(&sb, "LUN %lld tag %d type %d%s%s%s%s: ", 11924 lun->lun, xio->scsiio.tag_num, 11925 xio->scsiio.tag_type, 11926 (xio->io_hdr.blocked_links.tqe_prev 11927 == NULL) ? "" : " BLOCKED", 11928 (xio->io_hdr.flags & 11929 CTL_FLAG_DMA_INPROG) ? " DMA" : "", 11930 (xio->io_hdr.flags & 11931 CTL_FLAG_ABORT) ? " ABORT" : "", 11932 (xio->io_hdr.flags & 11933 CTL_FLAG_IS_WAS_ON_RTR ? " RTR" : "")); 11934 ctl_scsi_command_string(&xio->scsiio, NULL, &sb); 11935 sbuf_finish(&sb); 11936 printf("%s\n", sbuf_data(&sb)); 11937 #endif 11938 11939 if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port) 11940 || (xio->io_hdr.nexus.initid != io->io_hdr.nexus.initid) 11941 || (xio->io_hdr.flags & CTL_FLAG_ABORT)) 11942 continue; 11943 11944 /* 11945 * If the abort says that the task is untagged, the 11946 * task in the queue must be untagged. Otherwise, 11947 * we just check to see whether the tag numbers 11948 * match. This is because the QLogic firmware 11949 * doesn't pass back the tag type in an abort 11950 * request. 11951 */ 11952 #if 0 11953 if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED) 11954 && (io->taskio.tag_type == CTL_TAG_UNTAGGED)) 11955 || (xio->scsiio.tag_num == io->taskio.tag_num)) 11956 #endif 11957 /* 11958 * XXX KDM we've got problems with FC, because it 11959 * doesn't send down a tag type with aborts. So we 11960 * can only really go by the tag number... 11961 * This may cause problems with parallel SCSI. 11962 * Need to figure that out!! 11963 */ 11964 if (xio->scsiio.tag_num == io->taskio.tag_num) { 11965 xio->io_hdr.flags |= CTL_FLAG_ABORT; 11966 found = 1; 11967 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0 && 11968 !(lun->flags & CTL_LUN_PRIMARY_SC)) { 11969 union ctl_ha_msg msg_info; 11970 11971 msg_info.hdr.nexus = io->io_hdr.nexus; 11972 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 11973 msg_info.task.tag_num = io->taskio.tag_num; 11974 msg_info.task.tag_type = io->taskio.tag_type; 11975 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11976 msg_info.hdr.original_sc = NULL; 11977 msg_info.hdr.serializing_sc = NULL; 11978 #if 0 11979 printf("Sent Abort to other side\n"); 11980 #endif 11981 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11982 sizeof(msg_info.task), M_NOWAIT); 11983 } 11984 #if 0 11985 printf("ctl_abort_task: found I/O to abort\n"); 11986 #endif 11987 } 11988 } 11989 mtx_unlock(&lun->lun_lock); 11990 11991 if (found == 0) { 11992 /* 11993 * This isn't really an error. It's entirely possible for 11994 * the abort and command completion to cross on the wire. 11995 * This is more of an informative/diagnostic error. 11996 */ 11997 #if 0 11998 printf("ctl_abort_task: ABORT sent for nonexistent I/O: " 11999 "%u:%u:%u tag %d type %d\n", 12000 io->io_hdr.nexus.initid, 12001 io->io_hdr.nexus.targ_port, 12002 io->io_hdr.nexus.targ_lun, io->taskio.tag_num, 12003 io->taskio.tag_type); 12004 #endif 12005 } 12006 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12007 return (0); 12008 } 12009 12010 static int 12011 ctl_query_task(union ctl_io *io, int task_set) 12012 { 12013 struct ctl_softc *softc = CTL_SOFTC(io); 12014 union ctl_io *xio; 12015 struct ctl_lun *lun; 12016 int found = 0; 12017 uint32_t targ_lun; 12018 12019 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12020 mtx_lock(&softc->ctl_lock); 12021 if (targ_lun >= CTL_MAX_LUNS || 12022 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12023 mtx_unlock(&softc->ctl_lock); 12024 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 12025 return (1); 12026 } 12027 mtx_lock(&lun->lun_lock); 12028 mtx_unlock(&softc->ctl_lock); 12029 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 12030 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 12031 12032 if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port) 12033 || (xio->io_hdr.nexus.initid != io->io_hdr.nexus.initid) 12034 || (xio->io_hdr.flags & CTL_FLAG_ABORT)) 12035 continue; 12036 12037 if (task_set || xio->scsiio.tag_num == io->taskio.tag_num) { 12038 found = 1; 12039 break; 12040 } 12041 } 12042 mtx_unlock(&lun->lun_lock); 12043 if (found) 12044 io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED; 12045 else 12046 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12047 return (0); 12048 } 12049 12050 static int 12051 ctl_query_async_event(union ctl_io *io) 12052 { 12053 struct ctl_softc *softc = CTL_SOFTC(io); 12054 struct ctl_lun *lun; 12055 ctl_ua_type ua; 12056 uint32_t targ_lun, initidx; 12057 12058 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12059 mtx_lock(&softc->ctl_lock); 12060 if (targ_lun >= CTL_MAX_LUNS || 12061 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12062 mtx_unlock(&softc->ctl_lock); 12063 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 12064 return (1); 12065 } 12066 mtx_lock(&lun->lun_lock); 12067 mtx_unlock(&softc->ctl_lock); 12068 initidx = ctl_get_initindex(&io->io_hdr.nexus); 12069 ua = ctl_build_qae(lun, initidx, io->taskio.task_resp); 12070 mtx_unlock(&lun->lun_lock); 12071 if (ua != CTL_UA_NONE) 12072 io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED; 12073 else 12074 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12075 return (0); 12076 } 12077 12078 static void 12079 ctl_run_task(union ctl_io *io) 12080 { 12081 struct ctl_softc *softc = CTL_SOFTC(io); 12082 int retval = 1; 12083 12084 CTL_DEBUG_PRINT(("ctl_run_task\n")); 12085 KASSERT(io->io_hdr.io_type == CTL_IO_TASK, 12086 ("ctl_run_task: Unextected io_type %d\n", io->io_hdr.io_type)); 12087 io->taskio.task_status = CTL_TASK_FUNCTION_NOT_SUPPORTED; 12088 bzero(io->taskio.task_resp, sizeof(io->taskio.task_resp)); 12089 switch (io->taskio.task_action) { 12090 case CTL_TASK_ABORT_TASK: 12091 retval = ctl_abort_task(io); 12092 break; 12093 case CTL_TASK_ABORT_TASK_SET: 12094 case CTL_TASK_CLEAR_TASK_SET: 12095 retval = ctl_abort_task_set(io); 12096 break; 12097 case CTL_TASK_CLEAR_ACA: 12098 break; 12099 case CTL_TASK_I_T_NEXUS_RESET: 12100 retval = ctl_i_t_nexus_reset(io); 12101 break; 12102 case CTL_TASK_LUN_RESET: 12103 retval = ctl_lun_reset(softc, io); 12104 break; 12105 case CTL_TASK_TARGET_RESET: 12106 retval = ctl_target_reset(softc, io, CTL_UA_TARG_RESET); 12107 break; 12108 case CTL_TASK_BUS_RESET: 12109 retval = ctl_bus_reset(softc, io); 12110 break; 12111 case CTL_TASK_PORT_LOGIN: 12112 break; 12113 case CTL_TASK_PORT_LOGOUT: 12114 break; 12115 case CTL_TASK_QUERY_TASK: 12116 retval = ctl_query_task(io, 0); 12117 break; 12118 case CTL_TASK_QUERY_TASK_SET: 12119 retval = ctl_query_task(io, 1); 12120 break; 12121 case CTL_TASK_QUERY_ASYNC_EVENT: 12122 retval = ctl_query_async_event(io); 12123 break; 12124 default: 12125 printf("%s: got unknown task management event %d\n", 12126 __func__, io->taskio.task_action); 12127 break; 12128 } 12129 if (retval == 0) 12130 io->io_hdr.status = CTL_SUCCESS; 12131 else 12132 io->io_hdr.status = CTL_ERROR; 12133 ctl_done(io); 12134 } 12135 12136 /* 12137 * For HA operation. Handle commands that come in from the other 12138 * controller. 12139 */ 12140 static void 12141 ctl_handle_isc(union ctl_io *io) 12142 { 12143 struct ctl_softc *softc = CTL_SOFTC(io); 12144 struct ctl_lun *lun; 12145 const struct ctl_cmd_entry *entry; 12146 uint32_t targ_lun; 12147 12148 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12149 switch (io->io_hdr.msg_type) { 12150 case CTL_MSG_SERIALIZE: 12151 ctl_serialize_other_sc_cmd(&io->scsiio); 12152 break; 12153 case CTL_MSG_R2R: /* Only used in SER_ONLY mode. */ 12154 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 12155 if (targ_lun >= CTL_MAX_LUNS || 12156 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12157 ctl_done(io); 12158 break; 12159 } 12160 mtx_lock(&lun->lun_lock); 12161 if (ctl_scsiio_lun_check(lun, entry, &io->scsiio) != 0) { 12162 mtx_unlock(&lun->lun_lock); 12163 ctl_done(io); 12164 break; 12165 } 12166 io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 12167 mtx_unlock(&lun->lun_lock); 12168 ctl_enqueue_rtr(io); 12169 break; 12170 case CTL_MSG_FINISH_IO: 12171 if (softc->ha_mode == CTL_HA_MODE_XFER) { 12172 ctl_done(io); 12173 break; 12174 } 12175 if (targ_lun >= CTL_MAX_LUNS || 12176 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12177 ctl_free_io(io); 12178 break; 12179 } 12180 mtx_lock(&lun->lun_lock); 12181 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); 12182 ctl_check_blocked(lun); 12183 mtx_unlock(&lun->lun_lock); 12184 ctl_free_io(io); 12185 break; 12186 case CTL_MSG_PERS_ACTION: 12187 ctl_hndl_per_res_out_on_other_sc(io); 12188 ctl_free_io(io); 12189 break; 12190 case CTL_MSG_BAD_JUJU: 12191 ctl_done(io); 12192 break; 12193 case CTL_MSG_DATAMOVE: /* Only used in XFER mode */ 12194 ctl_datamove_remote(io); 12195 break; 12196 case CTL_MSG_DATAMOVE_DONE: /* Only used in XFER mode */ 12197 io->scsiio.be_move_done(io); 12198 break; 12199 case CTL_MSG_FAILOVER: 12200 ctl_failover_lun(io); 12201 ctl_free_io(io); 12202 break; 12203 default: 12204 printf("%s: Invalid message type %d\n", 12205 __func__, io->io_hdr.msg_type); 12206 ctl_free_io(io); 12207 break; 12208 } 12209 12210 } 12211 12212 12213 /* 12214 * Returns the match type in the case of a match, or CTL_LUN_PAT_NONE if 12215 * there is no match. 12216 */ 12217 static ctl_lun_error_pattern 12218 ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc) 12219 { 12220 const struct ctl_cmd_entry *entry; 12221 ctl_lun_error_pattern filtered_pattern, pattern; 12222 12223 pattern = desc->error_pattern; 12224 12225 /* 12226 * XXX KDM we need more data passed into this function to match a 12227 * custom pattern, and we actually need to implement custom pattern 12228 * matching. 12229 */ 12230 if (pattern & CTL_LUN_PAT_CMD) 12231 return (CTL_LUN_PAT_CMD); 12232 12233 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY) 12234 return (CTL_LUN_PAT_ANY); 12235 12236 entry = ctl_get_cmd_entry(ctsio, NULL); 12237 12238 filtered_pattern = entry->pattern & pattern; 12239 12240 /* 12241 * If the user requested specific flags in the pattern (e.g. 12242 * CTL_LUN_PAT_RANGE), make sure the command supports all of those 12243 * flags. 12244 * 12245 * If the user did not specify any flags, it doesn't matter whether 12246 * or not the command supports the flags. 12247 */ 12248 if ((filtered_pattern & ~CTL_LUN_PAT_MASK) != 12249 (pattern & ~CTL_LUN_PAT_MASK)) 12250 return (CTL_LUN_PAT_NONE); 12251 12252 /* 12253 * If the user asked for a range check, see if the requested LBA 12254 * range overlaps with this command's LBA range. 12255 */ 12256 if (filtered_pattern & CTL_LUN_PAT_RANGE) { 12257 uint64_t lba1; 12258 uint64_t len1; 12259 ctl_action action; 12260 int retval; 12261 12262 retval = ctl_get_lba_len((union ctl_io *)ctsio, &lba1, &len1); 12263 if (retval != 0) 12264 return (CTL_LUN_PAT_NONE); 12265 12266 action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba, 12267 desc->lba_range.len, FALSE); 12268 /* 12269 * A "pass" means that the LBA ranges don't overlap, so 12270 * this doesn't match the user's range criteria. 12271 */ 12272 if (action == CTL_ACTION_PASS) 12273 return (CTL_LUN_PAT_NONE); 12274 } 12275 12276 return (filtered_pattern); 12277 } 12278 12279 static void 12280 ctl_inject_error(struct ctl_lun *lun, union ctl_io *io) 12281 { 12282 struct ctl_error_desc *desc, *desc2; 12283 12284 mtx_assert(&lun->lun_lock, MA_OWNED); 12285 12286 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 12287 ctl_lun_error_pattern pattern; 12288 /* 12289 * Check to see whether this particular command matches 12290 * the pattern in the descriptor. 12291 */ 12292 pattern = ctl_cmd_pattern_match(&io->scsiio, desc); 12293 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_NONE) 12294 continue; 12295 12296 switch (desc->lun_error & CTL_LUN_INJ_TYPE) { 12297 case CTL_LUN_INJ_ABORTED: 12298 ctl_set_aborted(&io->scsiio); 12299 break; 12300 case CTL_LUN_INJ_MEDIUM_ERR: 12301 ctl_set_medium_error(&io->scsiio, 12302 (io->io_hdr.flags & CTL_FLAG_DATA_MASK) != 12303 CTL_FLAG_DATA_OUT); 12304 break; 12305 case CTL_LUN_INJ_UA: 12306 /* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET 12307 * OCCURRED */ 12308 ctl_set_ua(&io->scsiio, 0x29, 0x00); 12309 break; 12310 case CTL_LUN_INJ_CUSTOM: 12311 /* 12312 * We're assuming the user knows what he is doing. 12313 * Just copy the sense information without doing 12314 * checks. 12315 */ 12316 bcopy(&desc->custom_sense, &io->scsiio.sense_data, 12317 MIN(sizeof(desc->custom_sense), 12318 sizeof(io->scsiio.sense_data))); 12319 io->scsiio.scsi_status = SCSI_STATUS_CHECK_COND; 12320 io->scsiio.sense_len = SSD_FULL_SIZE; 12321 io->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 12322 break; 12323 case CTL_LUN_INJ_NONE: 12324 default: 12325 /* 12326 * If this is an error injection type we don't know 12327 * about, clear the continuous flag (if it is set) 12328 * so it will get deleted below. 12329 */ 12330 desc->lun_error &= ~CTL_LUN_INJ_CONTINUOUS; 12331 break; 12332 } 12333 /* 12334 * By default, each error injection action is a one-shot 12335 */ 12336 if (desc->lun_error & CTL_LUN_INJ_CONTINUOUS) 12337 continue; 12338 12339 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links); 12340 12341 free(desc, M_CTL); 12342 } 12343 } 12344 12345 #ifdef CTL_IO_DELAY 12346 static void 12347 ctl_datamove_timer_wakeup(void *arg) 12348 { 12349 union ctl_io *io; 12350 12351 io = (union ctl_io *)arg; 12352 12353 ctl_datamove(io); 12354 } 12355 #endif /* CTL_IO_DELAY */ 12356 12357 void 12358 ctl_datamove(union ctl_io *io) 12359 { 12360 void (*fe_datamove)(union ctl_io *io); 12361 12362 mtx_assert(&((struct ctl_softc *)CTL_SOFTC(io))->ctl_lock, MA_NOTOWNED); 12363 12364 CTL_DEBUG_PRINT(("ctl_datamove\n")); 12365 12366 /* No data transferred yet. Frontend must update this when done. */ 12367 io->scsiio.kern_data_resid = io->scsiio.kern_data_len; 12368 12369 #ifdef CTL_TIME_IO 12370 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 12371 char str[256]; 12372 char path_str[64]; 12373 struct sbuf sb; 12374 12375 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 12376 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12377 12378 sbuf_cat(&sb, path_str); 12379 switch (io->io_hdr.io_type) { 12380 case CTL_IO_SCSI: 12381 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 12382 sbuf_printf(&sb, "\n"); 12383 sbuf_cat(&sb, path_str); 12384 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12385 io->scsiio.tag_num, io->scsiio.tag_type); 12386 break; 12387 case CTL_IO_TASK: 12388 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 12389 "Tag Type: %d\n", io->taskio.task_action, 12390 io->taskio.tag_num, io->taskio.tag_type); 12391 break; 12392 default: 12393 panic("%s: Invalid CTL I/O type %d\n", 12394 __func__, io->io_hdr.io_type); 12395 } 12396 sbuf_cat(&sb, path_str); 12397 sbuf_printf(&sb, "ctl_datamove: %jd seconds\n", 12398 (intmax_t)time_uptime - io->io_hdr.start_time); 12399 sbuf_finish(&sb); 12400 printf("%s", sbuf_data(&sb)); 12401 } 12402 #endif /* CTL_TIME_IO */ 12403 12404 #ifdef CTL_IO_DELAY 12405 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 12406 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 12407 } else { 12408 if ((lun != NULL) 12409 && (lun->delay_info.datamove_delay > 0)) { 12410 12411 callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1); 12412 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 12413 callout_reset(&io->io_hdr.delay_callout, 12414 lun->delay_info.datamove_delay * hz, 12415 ctl_datamove_timer_wakeup, io); 12416 if (lun->delay_info.datamove_type == 12417 CTL_DELAY_TYPE_ONESHOT) 12418 lun->delay_info.datamove_delay = 0; 12419 return; 12420 } 12421 } 12422 #endif 12423 12424 /* 12425 * This command has been aborted. Set the port status, so we fail 12426 * the data move. 12427 */ 12428 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12429 printf("ctl_datamove: tag 0x%04x on (%u:%u:%u) aborted\n", 12430 io->scsiio.tag_num, io->io_hdr.nexus.initid, 12431 io->io_hdr.nexus.targ_port, 12432 io->io_hdr.nexus.targ_lun); 12433 io->io_hdr.port_status = 31337; 12434 /* 12435 * Note that the backend, in this case, will get the 12436 * callback in its context. In other cases it may get 12437 * called in the frontend's interrupt thread context. 12438 */ 12439 io->scsiio.be_move_done(io); 12440 return; 12441 } 12442 12443 /* Don't confuse frontend with zero length data move. */ 12444 if (io->scsiio.kern_data_len == 0) { 12445 io->scsiio.be_move_done(io); 12446 return; 12447 } 12448 12449 fe_datamove = CTL_PORT(io)->fe_datamove; 12450 fe_datamove(io); 12451 } 12452 12453 static void 12454 ctl_send_datamove_done(union ctl_io *io, int have_lock) 12455 { 12456 union ctl_ha_msg msg; 12457 #ifdef CTL_TIME_IO 12458 struct bintime cur_bt; 12459 #endif 12460 12461 memset(&msg, 0, sizeof(msg)); 12462 msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 12463 msg.hdr.original_sc = io; 12464 msg.hdr.serializing_sc = io->io_hdr.serializing_sc; 12465 msg.hdr.nexus = io->io_hdr.nexus; 12466 msg.hdr.status = io->io_hdr.status; 12467 msg.scsi.kern_data_resid = io->scsiio.kern_data_resid; 12468 msg.scsi.tag_num = io->scsiio.tag_num; 12469 msg.scsi.tag_type = io->scsiio.tag_type; 12470 msg.scsi.scsi_status = io->scsiio.scsi_status; 12471 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 12472 io->scsiio.sense_len); 12473 msg.scsi.sense_len = io->scsiio.sense_len; 12474 msg.scsi.port_status = io->io_hdr.port_status; 12475 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12476 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12477 ctl_failover_io(io, /*have_lock*/ have_lock); 12478 return; 12479 } 12480 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 12481 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + 12482 msg.scsi.sense_len, M_WAITOK); 12483 12484 #ifdef CTL_TIME_IO 12485 getbinuptime(&cur_bt); 12486 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); 12487 bintime_add(&io->io_hdr.dma_bt, &cur_bt); 12488 #endif 12489 io->io_hdr.num_dmas++; 12490 } 12491 12492 /* 12493 * The DMA to the remote side is done, now we need to tell the other side 12494 * we're done so it can continue with its data movement. 12495 */ 12496 static void 12497 ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq) 12498 { 12499 union ctl_io *io; 12500 uint32_t i; 12501 12502 io = rq->context; 12503 12504 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12505 printf("%s: ISC DMA write failed with error %d", __func__, 12506 rq->ret); 12507 ctl_set_internal_failure(&io->scsiio, 12508 /*sks_valid*/ 1, 12509 /*retry_count*/ rq->ret); 12510 } 12511 12512 ctl_dt_req_free(rq); 12513 12514 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12515 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12516 free(io->io_hdr.remote_sglist, M_CTL); 12517 io->io_hdr.remote_sglist = NULL; 12518 io->io_hdr.local_sglist = NULL; 12519 12520 /* 12521 * The data is in local and remote memory, so now we need to send 12522 * status (good or back) back to the other side. 12523 */ 12524 ctl_send_datamove_done(io, /*have_lock*/ 0); 12525 } 12526 12527 /* 12528 * We've moved the data from the host/controller into local memory. Now we 12529 * need to push it over to the remote controller's memory. 12530 */ 12531 static int 12532 ctl_datamove_remote_dm_write_cb(union ctl_io *io) 12533 { 12534 int retval; 12535 12536 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_WRITE, 12537 ctl_datamove_remote_write_cb); 12538 return (retval); 12539 } 12540 12541 static void 12542 ctl_datamove_remote_write(union ctl_io *io) 12543 { 12544 int retval; 12545 void (*fe_datamove)(union ctl_io *io); 12546 12547 /* 12548 * - Get the data from the host/HBA into local memory. 12549 * - DMA memory from the local controller to the remote controller. 12550 * - Send status back to the remote controller. 12551 */ 12552 12553 retval = ctl_datamove_remote_sgl_setup(io); 12554 if (retval != 0) 12555 return; 12556 12557 /* Switch the pointer over so the FETD knows what to do */ 12558 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 12559 12560 /* 12561 * Use a custom move done callback, since we need to send completion 12562 * back to the other controller, not to the backend on this side. 12563 */ 12564 io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb; 12565 12566 fe_datamove = CTL_PORT(io)->fe_datamove; 12567 fe_datamove(io); 12568 } 12569 12570 static int 12571 ctl_datamove_remote_dm_read_cb(union ctl_io *io) 12572 { 12573 #if 0 12574 char str[256]; 12575 char path_str[64]; 12576 struct sbuf sb; 12577 #endif 12578 uint32_t i; 12579 12580 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12581 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12582 free(io->io_hdr.remote_sglist, M_CTL); 12583 io->io_hdr.remote_sglist = NULL; 12584 io->io_hdr.local_sglist = NULL; 12585 12586 #if 0 12587 scsi_path_string(io, path_str, sizeof(path_str)); 12588 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12589 sbuf_cat(&sb, path_str); 12590 scsi_command_string(&io->scsiio, NULL, &sb); 12591 sbuf_printf(&sb, "\n"); 12592 sbuf_cat(&sb, path_str); 12593 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12594 io->scsiio.tag_num, io->scsiio.tag_type); 12595 sbuf_cat(&sb, path_str); 12596 sbuf_printf(&sb, "%s: flags %#x, status %#x\n", __func__, 12597 io->io_hdr.flags, io->io_hdr.status); 12598 sbuf_finish(&sb); 12599 printk("%s", sbuf_data(&sb)); 12600 #endif 12601 12602 12603 /* 12604 * The read is done, now we need to send status (good or bad) back 12605 * to the other side. 12606 */ 12607 ctl_send_datamove_done(io, /*have_lock*/ 0); 12608 12609 return (0); 12610 } 12611 12612 static void 12613 ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq) 12614 { 12615 union ctl_io *io; 12616 void (*fe_datamove)(union ctl_io *io); 12617 12618 io = rq->context; 12619 12620 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12621 printf("%s: ISC DMA read failed with error %d\n", __func__, 12622 rq->ret); 12623 ctl_set_internal_failure(&io->scsiio, 12624 /*sks_valid*/ 1, 12625 /*retry_count*/ rq->ret); 12626 } 12627 12628 ctl_dt_req_free(rq); 12629 12630 /* Switch the pointer over so the FETD knows what to do */ 12631 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 12632 12633 /* 12634 * Use a custom move done callback, since we need to send completion 12635 * back to the other controller, not to the backend on this side. 12636 */ 12637 io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb; 12638 12639 /* XXX KDM add checks like the ones in ctl_datamove? */ 12640 12641 fe_datamove = CTL_PORT(io)->fe_datamove; 12642 fe_datamove(io); 12643 } 12644 12645 static int 12646 ctl_datamove_remote_sgl_setup(union ctl_io *io) 12647 { 12648 struct ctl_sg_entry *local_sglist; 12649 uint32_t len_to_go; 12650 int retval; 12651 int i; 12652 12653 retval = 0; 12654 local_sglist = io->io_hdr.local_sglist; 12655 len_to_go = io->scsiio.kern_data_len; 12656 12657 /* 12658 * The difficult thing here is that the size of the various 12659 * S/G segments may be different than the size from the 12660 * remote controller. That'll make it harder when DMAing 12661 * the data back to the other side. 12662 */ 12663 for (i = 0; len_to_go > 0; i++) { 12664 local_sglist[i].len = MIN(len_to_go, CTL_HA_DATAMOVE_SEGMENT); 12665 local_sglist[i].addr = 12666 malloc(local_sglist[i].len, M_CTL, M_WAITOK); 12667 12668 len_to_go -= local_sglist[i].len; 12669 } 12670 /* 12671 * Reset the number of S/G entries accordingly. The original 12672 * number of S/G entries is available in rem_sg_entries. 12673 */ 12674 io->scsiio.kern_sg_entries = i; 12675 12676 #if 0 12677 printf("%s: kern_sg_entries = %d\n", __func__, 12678 io->scsiio.kern_sg_entries); 12679 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12680 printf("%s: sg[%d] = %p, %lu\n", __func__, i, 12681 local_sglist[i].addr, local_sglist[i].len); 12682 #endif 12683 12684 return (retval); 12685 } 12686 12687 static int 12688 ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 12689 ctl_ha_dt_cb callback) 12690 { 12691 struct ctl_ha_dt_req *rq; 12692 struct ctl_sg_entry *remote_sglist, *local_sglist; 12693 uint32_t local_used, remote_used, total_used; 12694 int i, j, isc_ret; 12695 12696 rq = ctl_dt_req_alloc(); 12697 12698 /* 12699 * If we failed to allocate the request, and if the DMA didn't fail 12700 * anyway, set busy status. This is just a resource allocation 12701 * failure. 12702 */ 12703 if ((rq == NULL) 12704 && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 12705 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) 12706 ctl_set_busy(&io->scsiio); 12707 12708 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 12709 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) { 12710 12711 if (rq != NULL) 12712 ctl_dt_req_free(rq); 12713 12714 /* 12715 * The data move failed. We need to return status back 12716 * to the other controller. No point in trying to DMA 12717 * data to the remote controller. 12718 */ 12719 12720 ctl_send_datamove_done(io, /*have_lock*/ 0); 12721 12722 return (1); 12723 } 12724 12725 local_sglist = io->io_hdr.local_sglist; 12726 remote_sglist = io->io_hdr.remote_sglist; 12727 local_used = 0; 12728 remote_used = 0; 12729 total_used = 0; 12730 12731 /* 12732 * Pull/push the data over the wire from/to the other controller. 12733 * This takes into account the possibility that the local and 12734 * remote sglists may not be identical in terms of the size of 12735 * the elements and the number of elements. 12736 * 12737 * One fundamental assumption here is that the length allocated for 12738 * both the local and remote sglists is identical. Otherwise, we've 12739 * essentially got a coding error of some sort. 12740 */ 12741 isc_ret = CTL_HA_STATUS_SUCCESS; 12742 for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) { 12743 uint32_t cur_len; 12744 uint8_t *tmp_ptr; 12745 12746 rq->command = command; 12747 rq->context = io; 12748 12749 /* 12750 * Both pointers should be aligned. But it is possible 12751 * that the allocation length is not. They should both 12752 * also have enough slack left over at the end, though, 12753 * to round up to the next 8 byte boundary. 12754 */ 12755 cur_len = MIN(local_sglist[i].len - local_used, 12756 remote_sglist[j].len - remote_used); 12757 rq->size = cur_len; 12758 12759 tmp_ptr = (uint8_t *)local_sglist[i].addr; 12760 tmp_ptr += local_used; 12761 12762 #if 0 12763 /* Use physical addresses when talking to ISC hardware */ 12764 if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) { 12765 /* XXX KDM use busdma */ 12766 rq->local = vtophys(tmp_ptr); 12767 } else 12768 rq->local = tmp_ptr; 12769 #else 12770 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, 12771 ("HA does not support BUS_ADDR")); 12772 rq->local = tmp_ptr; 12773 #endif 12774 12775 tmp_ptr = (uint8_t *)remote_sglist[j].addr; 12776 tmp_ptr += remote_used; 12777 rq->remote = tmp_ptr; 12778 12779 rq->callback = NULL; 12780 12781 local_used += cur_len; 12782 if (local_used >= local_sglist[i].len) { 12783 i++; 12784 local_used = 0; 12785 } 12786 12787 remote_used += cur_len; 12788 if (remote_used >= remote_sglist[j].len) { 12789 j++; 12790 remote_used = 0; 12791 } 12792 total_used += cur_len; 12793 12794 if (total_used >= io->scsiio.kern_data_len) 12795 rq->callback = callback; 12796 12797 #if 0 12798 printf("%s: %s: local %p remote %p size %d\n", __func__, 12799 (command == CTL_HA_DT_CMD_WRITE) ? "WRITE" : "READ", 12800 rq->local, rq->remote, rq->size); 12801 #endif 12802 12803 isc_ret = ctl_dt_single(rq); 12804 if (isc_ret > CTL_HA_STATUS_SUCCESS) 12805 break; 12806 } 12807 if (isc_ret != CTL_HA_STATUS_WAIT) { 12808 rq->ret = isc_ret; 12809 callback(rq); 12810 } 12811 12812 return (0); 12813 } 12814 12815 static void 12816 ctl_datamove_remote_read(union ctl_io *io) 12817 { 12818 int retval; 12819 uint32_t i; 12820 12821 /* 12822 * This will send an error to the other controller in the case of a 12823 * failure. 12824 */ 12825 retval = ctl_datamove_remote_sgl_setup(io); 12826 if (retval != 0) 12827 return; 12828 12829 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ, 12830 ctl_datamove_remote_read_cb); 12831 if (retval != 0) { 12832 /* 12833 * Make sure we free memory if there was an error.. The 12834 * ctl_datamove_remote_xfer() function will send the 12835 * datamove done message, or call the callback with an 12836 * error if there is a problem. 12837 */ 12838 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12839 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12840 free(io->io_hdr.remote_sglist, M_CTL); 12841 io->io_hdr.remote_sglist = NULL; 12842 io->io_hdr.local_sglist = NULL; 12843 } 12844 } 12845 12846 /* 12847 * Process a datamove request from the other controller. This is used for 12848 * XFER mode only, not SER_ONLY mode. For writes, we DMA into local memory 12849 * first. Once that is complete, the data gets DMAed into the remote 12850 * controller's memory. For reads, we DMA from the remote controller's 12851 * memory into our memory first, and then move it out to the FETD. 12852 */ 12853 static void 12854 ctl_datamove_remote(union ctl_io *io) 12855 { 12856 12857 mtx_assert(&((struct ctl_softc *)CTL_SOFTC(io))->ctl_lock, MA_NOTOWNED); 12858 12859 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12860 ctl_failover_io(io, /*have_lock*/ 0); 12861 return; 12862 } 12863 12864 /* 12865 * Note that we look for an aborted I/O here, but don't do some of 12866 * the other checks that ctl_datamove() normally does. 12867 * We don't need to run the datamove delay code, since that should 12868 * have been done if need be on the other controller. 12869 */ 12870 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12871 printf("%s: tag 0x%04x on (%u:%u:%u) aborted\n", __func__, 12872 io->scsiio.tag_num, io->io_hdr.nexus.initid, 12873 io->io_hdr.nexus.targ_port, 12874 io->io_hdr.nexus.targ_lun); 12875 io->io_hdr.port_status = 31338; 12876 ctl_send_datamove_done(io, /*have_lock*/ 0); 12877 return; 12878 } 12879 12880 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) 12881 ctl_datamove_remote_write(io); 12882 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) 12883 ctl_datamove_remote_read(io); 12884 else { 12885 io->io_hdr.port_status = 31339; 12886 ctl_send_datamove_done(io, /*have_lock*/ 0); 12887 } 12888 } 12889 12890 static void 12891 ctl_process_done(union ctl_io *io) 12892 { 12893 struct ctl_softc *softc = CTL_SOFTC(io); 12894 struct ctl_port *port = CTL_PORT(io); 12895 struct ctl_lun *lun = CTL_LUN(io); 12896 void (*fe_done)(union ctl_io *io); 12897 union ctl_ha_msg msg; 12898 12899 CTL_DEBUG_PRINT(("ctl_process_done\n")); 12900 fe_done = port->fe_done; 12901 12902 #ifdef CTL_TIME_IO 12903 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 12904 char str[256]; 12905 char path_str[64]; 12906 struct sbuf sb; 12907 12908 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 12909 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12910 12911 sbuf_cat(&sb, path_str); 12912 switch (io->io_hdr.io_type) { 12913 case CTL_IO_SCSI: 12914 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 12915 sbuf_printf(&sb, "\n"); 12916 sbuf_cat(&sb, path_str); 12917 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12918 io->scsiio.tag_num, io->scsiio.tag_type); 12919 break; 12920 case CTL_IO_TASK: 12921 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 12922 "Tag Type: %d\n", io->taskio.task_action, 12923 io->taskio.tag_num, io->taskio.tag_type); 12924 break; 12925 default: 12926 panic("%s: Invalid CTL I/O type %d\n", 12927 __func__, io->io_hdr.io_type); 12928 } 12929 sbuf_cat(&sb, path_str); 12930 sbuf_printf(&sb, "ctl_process_done: %jd seconds\n", 12931 (intmax_t)time_uptime - io->io_hdr.start_time); 12932 sbuf_finish(&sb); 12933 printf("%s", sbuf_data(&sb)); 12934 } 12935 #endif /* CTL_TIME_IO */ 12936 12937 switch (io->io_hdr.io_type) { 12938 case CTL_IO_SCSI: 12939 break; 12940 case CTL_IO_TASK: 12941 if (ctl_debug & CTL_DEBUG_INFO) 12942 ctl_io_error_print(io, NULL); 12943 fe_done(io); 12944 return; 12945 default: 12946 panic("%s: Invalid CTL I/O type %d\n", 12947 __func__, io->io_hdr.io_type); 12948 } 12949 12950 if (lun == NULL) { 12951 CTL_DEBUG_PRINT(("NULL LUN for lun %d\n", 12952 io->io_hdr.nexus.targ_mapped_lun)); 12953 goto bailout; 12954 } 12955 12956 mtx_lock(&lun->lun_lock); 12957 12958 /* 12959 * Check to see if we have any informational exception and status 12960 * of this command can be modified to report it in form of either 12961 * RECOVERED ERROR or NO SENSE, depending on MRIE mode page field. 12962 */ 12963 if (lun->ie_reported == 0 && lun->ie_asc != 0 && 12964 io->io_hdr.status == CTL_SUCCESS && 12965 (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0) { 12966 uint8_t mrie = lun->MODE_IE.mrie; 12967 uint8_t per = ((lun->MODE_RWER.byte3 & SMS_RWER_PER) || 12968 (lun->MODE_VER.byte3 & SMS_VER_PER)); 12969 if (((mrie == SIEP_MRIE_REC_COND && per) || 12970 mrie == SIEP_MRIE_REC_UNCOND || 12971 mrie == SIEP_MRIE_NO_SENSE) && 12972 (ctl_get_cmd_entry(&io->scsiio, NULL)->flags & 12973 CTL_CMD_FLAG_NO_SENSE) == 0) { 12974 ctl_set_sense(&io->scsiio, 12975 /*current_error*/ 1, 12976 /*sense_key*/ (mrie == SIEP_MRIE_NO_SENSE) ? 12977 SSD_KEY_NO_SENSE : SSD_KEY_RECOVERED_ERROR, 12978 /*asc*/ lun->ie_asc, 12979 /*ascq*/ lun->ie_ascq, 12980 SSD_ELEM_NONE); 12981 lun->ie_reported = 1; 12982 } 12983 } else if (lun->ie_reported < 0) 12984 lun->ie_reported = 0; 12985 12986 /* 12987 * Check to see if we have any errors to inject here. We only 12988 * inject errors for commands that don't already have errors set. 12989 */ 12990 if (!STAILQ_EMPTY(&lun->error_list) && 12991 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) && 12992 ((io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0)) 12993 ctl_inject_error(lun, io); 12994 12995 /* 12996 * XXX KDM how do we treat commands that aren't completed 12997 * successfully? 12998 * 12999 * XXX KDM should we also track I/O latency? 13000 */ 13001 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS && 13002 io->io_hdr.io_type == CTL_IO_SCSI) { 13003 int type; 13004 #ifdef CTL_TIME_IO 13005 struct bintime bt; 13006 13007 getbinuptime(&bt); 13008 bintime_sub(&bt, &io->io_hdr.start_bt); 13009 #endif 13010 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13011 CTL_FLAG_DATA_IN) 13012 type = CTL_STATS_READ; 13013 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13014 CTL_FLAG_DATA_OUT) 13015 type = CTL_STATS_WRITE; 13016 else 13017 type = CTL_STATS_NO_IO; 13018 13019 #ifdef CTL_LEGACY_STATS 13020 uint32_t targ_port = port->targ_port; 13021 lun->legacy_stats.ports[targ_port].bytes[type] += 13022 io->scsiio.kern_total_len; 13023 lun->legacy_stats.ports[targ_port].operations[type] ++; 13024 lun->legacy_stats.ports[targ_port].num_dmas[type] += 13025 io->io_hdr.num_dmas; 13026 #ifdef CTL_TIME_IO 13027 bintime_add(&lun->legacy_stats.ports[targ_port].dma_time[type], 13028 &io->io_hdr.dma_bt); 13029 bintime_add(&lun->legacy_stats.ports[targ_port].time[type], 13030 &bt); 13031 #endif 13032 #endif /* CTL_LEGACY_STATS */ 13033 13034 lun->stats.bytes[type] += io->scsiio.kern_total_len; 13035 lun->stats.operations[type] ++; 13036 lun->stats.dmas[type] += io->io_hdr.num_dmas; 13037 #ifdef CTL_TIME_IO 13038 bintime_add(&lun->stats.dma_time[type], &io->io_hdr.dma_bt); 13039 bintime_add(&lun->stats.time[type], &bt); 13040 #endif 13041 13042 mtx_lock(&port->port_lock); 13043 port->stats.bytes[type] += io->scsiio.kern_total_len; 13044 port->stats.operations[type] ++; 13045 port->stats.dmas[type] += io->io_hdr.num_dmas; 13046 #ifdef CTL_TIME_IO 13047 bintime_add(&port->stats.dma_time[type], &io->io_hdr.dma_bt); 13048 bintime_add(&port->stats.time[type], &bt); 13049 #endif 13050 mtx_unlock(&port->port_lock); 13051 } 13052 13053 /* 13054 * Remove this from the OOA queue. 13055 */ 13056 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); 13057 #ifdef CTL_TIME_IO 13058 if (TAILQ_EMPTY(&lun->ooa_queue)) 13059 lun->last_busy = getsbinuptime(); 13060 #endif 13061 13062 /* 13063 * Run through the blocked queue on this LUN and see if anything 13064 * has become unblocked, now that this transaction is done. 13065 */ 13066 ctl_check_blocked(lun); 13067 13068 /* 13069 * If the LUN has been invalidated, free it if there is nothing 13070 * left on its OOA queue. 13071 */ 13072 if ((lun->flags & CTL_LUN_INVALID) 13073 && TAILQ_EMPTY(&lun->ooa_queue)) { 13074 mtx_unlock(&lun->lun_lock); 13075 mtx_lock(&softc->ctl_lock); 13076 ctl_free_lun(lun); 13077 mtx_unlock(&softc->ctl_lock); 13078 } else 13079 mtx_unlock(&lun->lun_lock); 13080 13081 bailout: 13082 13083 /* 13084 * If this command has been aborted, make sure we set the status 13085 * properly. The FETD is responsible for freeing the I/O and doing 13086 * whatever it needs to do to clean up its state. 13087 */ 13088 if (io->io_hdr.flags & CTL_FLAG_ABORT) 13089 ctl_set_task_aborted(&io->scsiio); 13090 13091 /* 13092 * If enabled, print command error status. 13093 */ 13094 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS && 13095 (ctl_debug & CTL_DEBUG_INFO) != 0) 13096 ctl_io_error_print(io, NULL); 13097 13098 /* 13099 * Tell the FETD or the other shelf controller we're done with this 13100 * command. Note that only SCSI commands get to this point. Task 13101 * management commands are completed above. 13102 */ 13103 if ((softc->ha_mode != CTL_HA_MODE_XFER) && 13104 (io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC)) { 13105 memset(&msg, 0, sizeof(msg)); 13106 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 13107 msg.hdr.serializing_sc = io->io_hdr.serializing_sc; 13108 msg.hdr.nexus = io->io_hdr.nexus; 13109 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13110 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data), 13111 M_WAITOK); 13112 } 13113 13114 fe_done(io); 13115 } 13116 13117 #ifdef CTL_WITH_CA 13118 /* 13119 * Front end should call this if it doesn't do autosense. When the request 13120 * sense comes back in from the initiator, we'll dequeue this and send it. 13121 */ 13122 int 13123 ctl_queue_sense(union ctl_io *io) 13124 { 13125 struct ctl_softc *softc = CTL_SOFTC(io); 13126 struct ctl_port *port = CTL_PORT(io); 13127 struct ctl_lun *lun; 13128 uint32_t initidx, targ_lun; 13129 13130 CTL_DEBUG_PRINT(("ctl_queue_sense\n")); 13131 13132 targ_lun = ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); 13133 13134 /* 13135 * LUN lookup will likely move to the ctl_work_thread() once we 13136 * have our new queueing infrastructure (that doesn't put things on 13137 * a per-LUN queue initially). That is so that we can handle 13138 * things like an INQUIRY to a LUN that we don't have enabled. We 13139 * can't deal with that right now. 13140 * If we don't have a LUN for this, just toss the sense information. 13141 */ 13142 mtx_lock(&softc->ctl_lock); 13143 if (targ_lun >= CTL_MAX_LUNS || 13144 (lun = softc->ctl_luns[targ_lun]) == NULL) { 13145 mtx_unlock(&softc->ctl_lock); 13146 goto bailout; 13147 } 13148 mtx_lock(&lun->lun_lock); 13149 mtx_unlock(&softc->ctl_lock); 13150 13151 /* 13152 * Already have CA set for this LUN...toss the sense information. 13153 */ 13154 initidx = ctl_get_initindex(&io->io_hdr.nexus); 13155 if (ctl_is_set(lun->have_ca, initidx)) { 13156 mtx_unlock(&lun->lun_lock); 13157 goto bailout; 13158 } 13159 13160 memcpy(&lun->pending_sense[initidx], &io->scsiio.sense_data, 13161 MIN(sizeof(lun->pending_sense[initidx]), 13162 sizeof(io->scsiio.sense_data))); 13163 ctl_set_mask(lun->have_ca, initidx); 13164 mtx_unlock(&lun->lun_lock); 13165 13166 bailout: 13167 ctl_free_io(io); 13168 return (CTL_RETVAL_COMPLETE); 13169 } 13170 #endif 13171 13172 /* 13173 * Primary command inlet from frontend ports. All SCSI and task I/O 13174 * requests must go through this function. 13175 */ 13176 int 13177 ctl_queue(union ctl_io *io) 13178 { 13179 struct ctl_port *port = CTL_PORT(io); 13180 13181 CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0])); 13182 13183 #ifdef CTL_TIME_IO 13184 io->io_hdr.start_time = time_uptime; 13185 getbinuptime(&io->io_hdr.start_bt); 13186 #endif /* CTL_TIME_IO */ 13187 13188 /* Map FE-specific LUN ID into global one. */ 13189 io->io_hdr.nexus.targ_mapped_lun = 13190 ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); 13191 13192 switch (io->io_hdr.io_type) { 13193 case CTL_IO_SCSI: 13194 case CTL_IO_TASK: 13195 if (ctl_debug & CTL_DEBUG_CDB) 13196 ctl_io_print(io); 13197 ctl_enqueue_incoming(io); 13198 break; 13199 default: 13200 printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type); 13201 return (EINVAL); 13202 } 13203 13204 return (CTL_RETVAL_COMPLETE); 13205 } 13206 13207 #ifdef CTL_IO_DELAY 13208 static void 13209 ctl_done_timer_wakeup(void *arg) 13210 { 13211 union ctl_io *io; 13212 13213 io = (union ctl_io *)arg; 13214 ctl_done(io); 13215 } 13216 #endif /* CTL_IO_DELAY */ 13217 13218 void 13219 ctl_serseq_done(union ctl_io *io) 13220 { 13221 struct ctl_lun *lun = CTL_LUN(io);; 13222 13223 if (lun->be_lun == NULL || 13224 lun->be_lun->serseq == CTL_LUN_SERSEQ_OFF) 13225 return; 13226 mtx_lock(&lun->lun_lock); 13227 io->io_hdr.flags |= CTL_FLAG_SERSEQ_DONE; 13228 ctl_check_blocked(lun); 13229 mtx_unlock(&lun->lun_lock); 13230 } 13231 13232 void 13233 ctl_done(union ctl_io *io) 13234 { 13235 13236 /* 13237 * Enable this to catch duplicate completion issues. 13238 */ 13239 #if 0 13240 if (io->io_hdr.flags & CTL_FLAG_ALREADY_DONE) { 13241 printf("%s: type %d msg %d cdb %x iptl: " 13242 "%u:%u:%u tag 0x%04x " 13243 "flag %#x status %x\n", 13244 __func__, 13245 io->io_hdr.io_type, 13246 io->io_hdr.msg_type, 13247 io->scsiio.cdb[0], 13248 io->io_hdr.nexus.initid, 13249 io->io_hdr.nexus.targ_port, 13250 io->io_hdr.nexus.targ_lun, 13251 (io->io_hdr.io_type == 13252 CTL_IO_TASK) ? 13253 io->taskio.tag_num : 13254 io->scsiio.tag_num, 13255 io->io_hdr.flags, 13256 io->io_hdr.status); 13257 } else 13258 io->io_hdr.flags |= CTL_FLAG_ALREADY_DONE; 13259 #endif 13260 13261 /* 13262 * This is an internal copy of an I/O, and should not go through 13263 * the normal done processing logic. 13264 */ 13265 if (io->io_hdr.flags & CTL_FLAG_INT_COPY) 13266 return; 13267 13268 #ifdef CTL_IO_DELAY 13269 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 13270 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 13271 } else { 13272 struct ctl_lun *lun = CTL_LUN(io); 13273 13274 if ((lun != NULL) 13275 && (lun->delay_info.done_delay > 0)) { 13276 13277 callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1); 13278 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 13279 callout_reset(&io->io_hdr.delay_callout, 13280 lun->delay_info.done_delay * hz, 13281 ctl_done_timer_wakeup, io); 13282 if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT) 13283 lun->delay_info.done_delay = 0; 13284 return; 13285 } 13286 } 13287 #endif /* CTL_IO_DELAY */ 13288 13289 ctl_enqueue_done(io); 13290 } 13291 13292 static void 13293 ctl_work_thread(void *arg) 13294 { 13295 struct ctl_thread *thr = (struct ctl_thread *)arg; 13296 struct ctl_softc *softc = thr->ctl_softc; 13297 union ctl_io *io; 13298 int retval; 13299 13300 CTL_DEBUG_PRINT(("ctl_work_thread starting\n")); 13301 13302 while (!softc->shutdown) { 13303 /* 13304 * We handle the queues in this order: 13305 * - ISC 13306 * - done queue (to free up resources, unblock other commands) 13307 * - RtR queue 13308 * - incoming queue 13309 * 13310 * If those queues are empty, we break out of the loop and 13311 * go to sleep. 13312 */ 13313 mtx_lock(&thr->queue_lock); 13314 io = (union ctl_io *)STAILQ_FIRST(&thr->isc_queue); 13315 if (io != NULL) { 13316 STAILQ_REMOVE_HEAD(&thr->isc_queue, links); 13317 mtx_unlock(&thr->queue_lock); 13318 ctl_handle_isc(io); 13319 continue; 13320 } 13321 io = (union ctl_io *)STAILQ_FIRST(&thr->done_queue); 13322 if (io != NULL) { 13323 STAILQ_REMOVE_HEAD(&thr->done_queue, links); 13324 /* clear any blocked commands, call fe_done */ 13325 mtx_unlock(&thr->queue_lock); 13326 ctl_process_done(io); 13327 continue; 13328 } 13329 io = (union ctl_io *)STAILQ_FIRST(&thr->incoming_queue); 13330 if (io != NULL) { 13331 STAILQ_REMOVE_HEAD(&thr->incoming_queue, links); 13332 mtx_unlock(&thr->queue_lock); 13333 if (io->io_hdr.io_type == CTL_IO_TASK) 13334 ctl_run_task(io); 13335 else 13336 ctl_scsiio_precheck(softc, &io->scsiio); 13337 continue; 13338 } 13339 io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue); 13340 if (io != NULL) { 13341 STAILQ_REMOVE_HEAD(&thr->rtr_queue, links); 13342 mtx_unlock(&thr->queue_lock); 13343 retval = ctl_scsiio(&io->scsiio); 13344 if (retval != CTL_RETVAL_COMPLETE) 13345 CTL_DEBUG_PRINT(("ctl_scsiio failed\n")); 13346 continue; 13347 } 13348 13349 /* Sleep until we have something to do. */ 13350 mtx_sleep(thr, &thr->queue_lock, PDROP | PRIBIO, "-", 0); 13351 } 13352 thr->thread = NULL; 13353 kthread_exit(); 13354 } 13355 13356 static void 13357 ctl_lun_thread(void *arg) 13358 { 13359 struct ctl_softc *softc = (struct ctl_softc *)arg; 13360 struct ctl_be_lun *be_lun; 13361 13362 CTL_DEBUG_PRINT(("ctl_lun_thread starting\n")); 13363 13364 while (!softc->shutdown) { 13365 mtx_lock(&softc->ctl_lock); 13366 be_lun = STAILQ_FIRST(&softc->pending_lun_queue); 13367 if (be_lun != NULL) { 13368 STAILQ_REMOVE_HEAD(&softc->pending_lun_queue, links); 13369 mtx_unlock(&softc->ctl_lock); 13370 ctl_create_lun(be_lun); 13371 continue; 13372 } 13373 13374 /* Sleep until we have something to do. */ 13375 mtx_sleep(&softc->pending_lun_queue, &softc->ctl_lock, 13376 PDROP | PRIBIO, "-", 0); 13377 } 13378 softc->lun_thread = NULL; 13379 kthread_exit(); 13380 } 13381 13382 static void 13383 ctl_thresh_thread(void *arg) 13384 { 13385 struct ctl_softc *softc = (struct ctl_softc *)arg; 13386 struct ctl_lun *lun; 13387 struct ctl_logical_block_provisioning_page *page; 13388 const char *attr; 13389 union ctl_ha_msg msg; 13390 uint64_t thres, val; 13391 int i, e, set; 13392 13393 CTL_DEBUG_PRINT(("ctl_thresh_thread starting\n")); 13394 13395 while (!softc->shutdown) { 13396 mtx_lock(&softc->ctl_lock); 13397 STAILQ_FOREACH(lun, &softc->lun_list, links) { 13398 if ((lun->flags & CTL_LUN_DISABLED) || 13399 (lun->flags & CTL_LUN_NO_MEDIA) || 13400 lun->backend->lun_attr == NULL) 13401 continue; 13402 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 13403 softc->ha_mode == CTL_HA_MODE_XFER) 13404 continue; 13405 if ((lun->MODE_RWER.byte8 & SMS_RWER_LBPERE) == 0) 13406 continue; 13407 e = 0; 13408 page = &lun->MODE_LBP; 13409 for (i = 0; i < CTL_NUM_LBP_THRESH; i++) { 13410 if ((page->descr[i].flags & SLBPPD_ENABLED) == 0) 13411 continue; 13412 thres = scsi_4btoul(page->descr[i].count); 13413 thres <<= CTL_LBP_EXPONENT; 13414 switch (page->descr[i].resource) { 13415 case 0x01: 13416 attr = "blocksavail"; 13417 break; 13418 case 0x02: 13419 attr = "blocksused"; 13420 break; 13421 case 0xf1: 13422 attr = "poolblocksavail"; 13423 break; 13424 case 0xf2: 13425 attr = "poolblocksused"; 13426 break; 13427 default: 13428 continue; 13429 } 13430 mtx_unlock(&softc->ctl_lock); // XXX 13431 val = lun->backend->lun_attr( 13432 lun->be_lun->be_lun, attr); 13433 mtx_lock(&softc->ctl_lock); 13434 if (val == UINT64_MAX) 13435 continue; 13436 if ((page->descr[i].flags & SLBPPD_ARMING_MASK) 13437 == SLBPPD_ARMING_INC) 13438 e = (val >= thres); 13439 else 13440 e = (val <= thres); 13441 if (e) 13442 break; 13443 } 13444 mtx_lock(&lun->lun_lock); 13445 if (e) { 13446 scsi_u64to8b((uint8_t *)&page->descr[i] - 13447 (uint8_t *)page, lun->ua_tpt_info); 13448 if (lun->lasttpt == 0 || 13449 time_uptime - lun->lasttpt >= CTL_LBP_UA_PERIOD) { 13450 lun->lasttpt = time_uptime; 13451 ctl_est_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 13452 set = 1; 13453 } else 13454 set = 0; 13455 } else { 13456 lun->lasttpt = 0; 13457 ctl_clr_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 13458 set = -1; 13459 } 13460 mtx_unlock(&lun->lun_lock); 13461 if (set != 0 && 13462 lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 13463 /* Send msg to other side. */ 13464 bzero(&msg.ua, sizeof(msg.ua)); 13465 msg.hdr.msg_type = CTL_MSG_UA; 13466 msg.hdr.nexus.initid = -1; 13467 msg.hdr.nexus.targ_port = -1; 13468 msg.hdr.nexus.targ_lun = lun->lun; 13469 msg.hdr.nexus.targ_mapped_lun = lun->lun; 13470 msg.ua.ua_all = 1; 13471 msg.ua.ua_set = (set > 0); 13472 msg.ua.ua_type = CTL_UA_THIN_PROV_THRES; 13473 memcpy(msg.ua.ua_info, lun->ua_tpt_info, 8); 13474 mtx_unlock(&softc->ctl_lock); // XXX 13475 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13476 sizeof(msg.ua), M_WAITOK); 13477 mtx_lock(&softc->ctl_lock); 13478 } 13479 } 13480 mtx_sleep(&softc->thresh_thread, &softc->ctl_lock, 13481 PDROP | PRIBIO, "-", CTL_LBP_PERIOD * hz); 13482 } 13483 softc->thresh_thread = NULL; 13484 kthread_exit(); 13485 } 13486 13487 static void 13488 ctl_enqueue_incoming(union ctl_io *io) 13489 { 13490 struct ctl_softc *softc = CTL_SOFTC(io); 13491 struct ctl_thread *thr; 13492 u_int idx; 13493 13494 idx = (io->io_hdr.nexus.targ_port * 127 + 13495 io->io_hdr.nexus.initid) % worker_threads; 13496 thr = &softc->threads[idx]; 13497 mtx_lock(&thr->queue_lock); 13498 STAILQ_INSERT_TAIL(&thr->incoming_queue, &io->io_hdr, links); 13499 mtx_unlock(&thr->queue_lock); 13500 wakeup(thr); 13501 } 13502 13503 static void 13504 ctl_enqueue_rtr(union ctl_io *io) 13505 { 13506 struct ctl_softc *softc = CTL_SOFTC(io); 13507 struct ctl_thread *thr; 13508 13509 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13510 mtx_lock(&thr->queue_lock); 13511 STAILQ_INSERT_TAIL(&thr->rtr_queue, &io->io_hdr, links); 13512 mtx_unlock(&thr->queue_lock); 13513 wakeup(thr); 13514 } 13515 13516 static void 13517 ctl_enqueue_done(union ctl_io *io) 13518 { 13519 struct ctl_softc *softc = CTL_SOFTC(io); 13520 struct ctl_thread *thr; 13521 13522 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13523 mtx_lock(&thr->queue_lock); 13524 STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links); 13525 mtx_unlock(&thr->queue_lock); 13526 wakeup(thr); 13527 } 13528 13529 static void 13530 ctl_enqueue_isc(union ctl_io *io) 13531 { 13532 struct ctl_softc *softc = CTL_SOFTC(io); 13533 struct ctl_thread *thr; 13534 13535 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13536 mtx_lock(&thr->queue_lock); 13537 STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links); 13538 mtx_unlock(&thr->queue_lock); 13539 wakeup(thr); 13540 } 13541 13542 /* 13543 * vim: ts=8 13544 */ 13545