1 /*- 2 * Copyright (c) 2003-2009 Silicon Graphics International Corp. 3 * Copyright (c) 2012 The FreeBSD Foundation 4 * Copyright (c) 2014-2017 Alexander Motin <mav@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Portions of this software were developed by Edward Tomasz Napierala 8 * under sponsorship from the FreeBSD Foundation. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions, and the following disclaimer, 15 * without modification. 16 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 17 * substantially similar to the "NO WARRANTY" disclaimer below 18 * ("Disclaimer") and any redistribution must be conditioned upon 19 * including a substantially similar Disclaimer requirement for further 20 * binary redistribution. 21 * 22 * NO WARRANTY 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 27 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 32 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 * POSSIBILITY OF SUCH DAMAGES. 34 * 35 * $Id$ 36 */ 37 /* 38 * CAM Target Layer, a SCSI device emulation subsystem. 39 * 40 * Author: Ken Merry <ken@FreeBSD.org> 41 */ 42 43 #define _CTL_C 44 45 #include <sys/cdefs.h> 46 __FBSDID("$FreeBSD$"); 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/ctype.h> 51 #include <sys/kernel.h> 52 #include <sys/types.h> 53 #include <sys/kthread.h> 54 #include <sys/bio.h> 55 #include <sys/fcntl.h> 56 #include <sys/lock.h> 57 #include <sys/module.h> 58 #include <sys/mutex.h> 59 #include <sys/condvar.h> 60 #include <sys/malloc.h> 61 #include <sys/conf.h> 62 #include <sys/ioccom.h> 63 #include <sys/queue.h> 64 #include <sys/sbuf.h> 65 #include <sys/smp.h> 66 #include <sys/endian.h> 67 #include <sys/sysctl.h> 68 #include <vm/uma.h> 69 70 #include <cam/cam.h> 71 #include <cam/scsi/scsi_all.h> 72 #include <cam/scsi/scsi_cd.h> 73 #include <cam/scsi/scsi_da.h> 74 #include <cam/ctl/ctl_io.h> 75 #include <cam/ctl/ctl.h> 76 #include <cam/ctl/ctl_frontend.h> 77 #include <cam/ctl/ctl_util.h> 78 #include <cam/ctl/ctl_backend.h> 79 #include <cam/ctl/ctl_ioctl.h> 80 #include <cam/ctl/ctl_ha.h> 81 #include <cam/ctl/ctl_private.h> 82 #include <cam/ctl/ctl_debug.h> 83 #include <cam/ctl/ctl_scsi_all.h> 84 #include <cam/ctl/ctl_error.h> 85 86 struct ctl_softc *control_softc = NULL; 87 88 /* 89 * Template mode pages. 90 */ 91 92 /* 93 * Note that these are default values only. The actual values will be 94 * filled in when the user does a mode sense. 95 */ 96 const static struct scsi_da_rw_recovery_page rw_er_page_default = { 97 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 98 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 99 /*byte3*/SMS_RWER_AWRE|SMS_RWER_ARRE, 100 /*read_retry_count*/0, 101 /*correction_span*/0, 102 /*head_offset_count*/0, 103 /*data_strobe_offset_cnt*/0, 104 /*byte8*/SMS_RWER_LBPERE, 105 /*write_retry_count*/0, 106 /*reserved2*/0, 107 /*recovery_time_limit*/{0, 0}, 108 }; 109 110 const static struct scsi_da_rw_recovery_page rw_er_page_changeable = { 111 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 112 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 113 /*byte3*/SMS_RWER_PER, 114 /*read_retry_count*/0, 115 /*correction_span*/0, 116 /*head_offset_count*/0, 117 /*data_strobe_offset_cnt*/0, 118 /*byte8*/SMS_RWER_LBPERE, 119 /*write_retry_count*/0, 120 /*reserved2*/0, 121 /*recovery_time_limit*/{0, 0}, 122 }; 123 124 const static struct scsi_format_page format_page_default = { 125 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 126 /*page_length*/sizeof(struct scsi_format_page) - 2, 127 /*tracks_per_zone*/ {0, 0}, 128 /*alt_sectors_per_zone*/ {0, 0}, 129 /*alt_tracks_per_zone*/ {0, 0}, 130 /*alt_tracks_per_lun*/ {0, 0}, 131 /*sectors_per_track*/ {(CTL_DEFAULT_SECTORS_PER_TRACK >> 8) & 0xff, 132 CTL_DEFAULT_SECTORS_PER_TRACK & 0xff}, 133 /*bytes_per_sector*/ {0, 0}, 134 /*interleave*/ {0, 0}, 135 /*track_skew*/ {0, 0}, 136 /*cylinder_skew*/ {0, 0}, 137 /*flags*/ SFP_HSEC, 138 /*reserved*/ {0, 0, 0} 139 }; 140 141 const static struct scsi_format_page format_page_changeable = { 142 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 143 /*page_length*/sizeof(struct scsi_format_page) - 2, 144 /*tracks_per_zone*/ {0, 0}, 145 /*alt_sectors_per_zone*/ {0, 0}, 146 /*alt_tracks_per_zone*/ {0, 0}, 147 /*alt_tracks_per_lun*/ {0, 0}, 148 /*sectors_per_track*/ {0, 0}, 149 /*bytes_per_sector*/ {0, 0}, 150 /*interleave*/ {0, 0}, 151 /*track_skew*/ {0, 0}, 152 /*cylinder_skew*/ {0, 0}, 153 /*flags*/ 0, 154 /*reserved*/ {0, 0, 0} 155 }; 156 157 const static struct scsi_rigid_disk_page rigid_disk_page_default = { 158 /*page_code*/SMS_RIGID_DISK_PAGE, 159 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 160 /*cylinders*/ {0, 0, 0}, 161 /*heads*/ CTL_DEFAULT_HEADS, 162 /*start_write_precomp*/ {0, 0, 0}, 163 /*start_reduced_current*/ {0, 0, 0}, 164 /*step_rate*/ {0, 0}, 165 /*landing_zone_cylinder*/ {0, 0, 0}, 166 /*rpl*/ SRDP_RPL_DISABLED, 167 /*rotational_offset*/ 0, 168 /*reserved1*/ 0, 169 /*rotation_rate*/ {(CTL_DEFAULT_ROTATION_RATE >> 8) & 0xff, 170 CTL_DEFAULT_ROTATION_RATE & 0xff}, 171 /*reserved2*/ {0, 0} 172 }; 173 174 const static struct scsi_rigid_disk_page rigid_disk_page_changeable = { 175 /*page_code*/SMS_RIGID_DISK_PAGE, 176 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 177 /*cylinders*/ {0, 0, 0}, 178 /*heads*/ 0, 179 /*start_write_precomp*/ {0, 0, 0}, 180 /*start_reduced_current*/ {0, 0, 0}, 181 /*step_rate*/ {0, 0}, 182 /*landing_zone_cylinder*/ {0, 0, 0}, 183 /*rpl*/ 0, 184 /*rotational_offset*/ 0, 185 /*reserved1*/ 0, 186 /*rotation_rate*/ {0, 0}, 187 /*reserved2*/ {0, 0} 188 }; 189 190 const static struct scsi_da_verify_recovery_page verify_er_page_default = { 191 /*page_code*/SMS_VERIFY_ERROR_RECOVERY_PAGE, 192 /*page_length*/sizeof(struct scsi_da_verify_recovery_page) - 2, 193 /*byte3*/0, 194 /*read_retry_count*/0, 195 /*reserved*/{ 0, 0, 0, 0, 0, 0 }, 196 /*recovery_time_limit*/{0, 0}, 197 }; 198 199 const static struct scsi_da_verify_recovery_page verify_er_page_changeable = { 200 /*page_code*/SMS_VERIFY_ERROR_RECOVERY_PAGE, 201 /*page_length*/sizeof(struct scsi_da_verify_recovery_page) - 2, 202 /*byte3*/SMS_VER_PER, 203 /*read_retry_count*/0, 204 /*reserved*/{ 0, 0, 0, 0, 0, 0 }, 205 /*recovery_time_limit*/{0, 0}, 206 }; 207 208 const static struct scsi_caching_page caching_page_default = { 209 /*page_code*/SMS_CACHING_PAGE, 210 /*page_length*/sizeof(struct scsi_caching_page) - 2, 211 /*flags1*/ SCP_DISC | SCP_WCE, 212 /*ret_priority*/ 0, 213 /*disable_pf_transfer_len*/ {0xff, 0xff}, 214 /*min_prefetch*/ {0, 0}, 215 /*max_prefetch*/ {0xff, 0xff}, 216 /*max_pf_ceiling*/ {0xff, 0xff}, 217 /*flags2*/ 0, 218 /*cache_segments*/ 0, 219 /*cache_seg_size*/ {0, 0}, 220 /*reserved*/ 0, 221 /*non_cache_seg_size*/ {0, 0, 0} 222 }; 223 224 const static struct scsi_caching_page caching_page_changeable = { 225 /*page_code*/SMS_CACHING_PAGE, 226 /*page_length*/sizeof(struct scsi_caching_page) - 2, 227 /*flags1*/ SCP_WCE | SCP_RCD, 228 /*ret_priority*/ 0, 229 /*disable_pf_transfer_len*/ {0, 0}, 230 /*min_prefetch*/ {0, 0}, 231 /*max_prefetch*/ {0, 0}, 232 /*max_pf_ceiling*/ {0, 0}, 233 /*flags2*/ 0, 234 /*cache_segments*/ 0, 235 /*cache_seg_size*/ {0, 0}, 236 /*reserved*/ 0, 237 /*non_cache_seg_size*/ {0, 0, 0} 238 }; 239 240 const static struct scsi_control_page control_page_default = { 241 /*page_code*/SMS_CONTROL_MODE_PAGE, 242 /*page_length*/sizeof(struct scsi_control_page) - 2, 243 /*rlec*/0, 244 /*queue_flags*/SCP_QUEUE_ALG_RESTRICTED, 245 /*eca_and_aen*/0, 246 /*flags4*/SCP_TAS, 247 /*aen_holdoff_period*/{0, 0}, 248 /*busy_timeout_period*/{0, 0}, 249 /*extended_selftest_completion_time*/{0, 0} 250 }; 251 252 const static struct scsi_control_page control_page_changeable = { 253 /*page_code*/SMS_CONTROL_MODE_PAGE, 254 /*page_length*/sizeof(struct scsi_control_page) - 2, 255 /*rlec*/SCP_DSENSE, 256 /*queue_flags*/SCP_QUEUE_ALG_MASK | SCP_NUAR, 257 /*eca_and_aen*/SCP_SWP, 258 /*flags4*/0, 259 /*aen_holdoff_period*/{0, 0}, 260 /*busy_timeout_period*/{0, 0}, 261 /*extended_selftest_completion_time*/{0, 0} 262 }; 263 264 #define CTL_CEM_LEN (sizeof(struct scsi_control_ext_page) - 4) 265 266 const static struct scsi_control_ext_page control_ext_page_default = { 267 /*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF, 268 /*subpage_code*/0x01, 269 /*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN}, 270 /*flags*/0, 271 /*prio*/0, 272 /*max_sense*/0 273 }; 274 275 const static struct scsi_control_ext_page control_ext_page_changeable = { 276 /*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF, 277 /*subpage_code*/0x01, 278 /*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN}, 279 /*flags*/0, 280 /*prio*/0, 281 /*max_sense*/0xff 282 }; 283 284 const static struct scsi_info_exceptions_page ie_page_default = { 285 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 286 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 287 /*info_flags*/SIEP_FLAGS_EWASC, 288 /*mrie*/SIEP_MRIE_NO, 289 /*interval_timer*/{0, 0, 0, 0}, 290 /*report_count*/{0, 0, 0, 1} 291 }; 292 293 const static struct scsi_info_exceptions_page ie_page_changeable = { 294 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 295 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 296 /*info_flags*/SIEP_FLAGS_EWASC | SIEP_FLAGS_DEXCPT | SIEP_FLAGS_TEST | 297 SIEP_FLAGS_LOGERR, 298 /*mrie*/0x0f, 299 /*interval_timer*/{0xff, 0xff, 0xff, 0xff}, 300 /*report_count*/{0xff, 0xff, 0xff, 0xff} 301 }; 302 303 #define CTL_LBPM_LEN (sizeof(struct ctl_logical_block_provisioning_page) - 4) 304 305 const static struct ctl_logical_block_provisioning_page lbp_page_default = {{ 306 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 307 /*subpage_code*/0x02, 308 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 309 /*flags*/0, 310 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 311 /*descr*/{}}, 312 {{/*flags*/0, 313 /*resource*/0x01, 314 /*reserved*/{0, 0}, 315 /*count*/{0, 0, 0, 0}}, 316 {/*flags*/0, 317 /*resource*/0x02, 318 /*reserved*/{0, 0}, 319 /*count*/{0, 0, 0, 0}}, 320 {/*flags*/0, 321 /*resource*/0xf1, 322 /*reserved*/{0, 0}, 323 /*count*/{0, 0, 0, 0}}, 324 {/*flags*/0, 325 /*resource*/0xf2, 326 /*reserved*/{0, 0}, 327 /*count*/{0, 0, 0, 0}} 328 } 329 }; 330 331 const static struct ctl_logical_block_provisioning_page lbp_page_changeable = {{ 332 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 333 /*subpage_code*/0x02, 334 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 335 /*flags*/SLBPP_SITUA, 336 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 337 /*descr*/{}}, 338 {{/*flags*/0, 339 /*resource*/0, 340 /*reserved*/{0, 0}, 341 /*count*/{0, 0, 0, 0}}, 342 {/*flags*/0, 343 /*resource*/0, 344 /*reserved*/{0, 0}, 345 /*count*/{0, 0, 0, 0}}, 346 {/*flags*/0, 347 /*resource*/0, 348 /*reserved*/{0, 0}, 349 /*count*/{0, 0, 0, 0}}, 350 {/*flags*/0, 351 /*resource*/0, 352 /*reserved*/{0, 0}, 353 /*count*/{0, 0, 0, 0}} 354 } 355 }; 356 357 const static struct scsi_cddvd_capabilities_page cddvd_page_default = { 358 /*page_code*/SMS_CDDVD_CAPS_PAGE, 359 /*page_length*/sizeof(struct scsi_cddvd_capabilities_page) - 2, 360 /*caps1*/0x3f, 361 /*caps2*/0x00, 362 /*caps3*/0xf0, 363 /*caps4*/0x00, 364 /*caps5*/0x29, 365 /*caps6*/0x00, 366 /*obsolete*/{0, 0}, 367 /*nvol_levels*/{0, 0}, 368 /*buffer_size*/{8, 0}, 369 /*obsolete2*/{0, 0}, 370 /*reserved*/0, 371 /*digital*/0, 372 /*obsolete3*/0, 373 /*copy_management*/0, 374 /*reserved2*/0, 375 /*rotation_control*/0, 376 /*cur_write_speed*/0, 377 /*num_speed_descr*/0, 378 }; 379 380 const static struct scsi_cddvd_capabilities_page cddvd_page_changeable = { 381 /*page_code*/SMS_CDDVD_CAPS_PAGE, 382 /*page_length*/sizeof(struct scsi_cddvd_capabilities_page) - 2, 383 /*caps1*/0, 384 /*caps2*/0, 385 /*caps3*/0, 386 /*caps4*/0, 387 /*caps5*/0, 388 /*caps6*/0, 389 /*obsolete*/{0, 0}, 390 /*nvol_levels*/{0, 0}, 391 /*buffer_size*/{0, 0}, 392 /*obsolete2*/{0, 0}, 393 /*reserved*/0, 394 /*digital*/0, 395 /*obsolete3*/0, 396 /*copy_management*/0, 397 /*reserved2*/0, 398 /*rotation_control*/0, 399 /*cur_write_speed*/0, 400 /*num_speed_descr*/0, 401 }; 402 403 SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer"); 404 static int worker_threads = -1; 405 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN, 406 &worker_threads, 1, "Number of worker threads"); 407 static int ctl_debug = CTL_DEBUG_NONE; 408 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, debug, CTLFLAG_RWTUN, 409 &ctl_debug, 0, "Enabled debug flags"); 410 static int ctl_lun_map_size = 1024; 411 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, lun_map_size, CTLFLAG_RWTUN, 412 &ctl_lun_map_size, 0, "Size of per-port LUN map (max LUN + 1)"); 413 414 /* 415 * Supported pages (0x00), Serial number (0x80), Device ID (0x83), 416 * Extended INQUIRY Data (0x86), Mode Page Policy (0x87), 417 * SCSI Ports (0x88), Third-party Copy (0x8F), Block limits (0xB0), 418 * Block Device Characteristics (0xB1) and Logical Block Provisioning (0xB2) 419 */ 420 #define SCSI_EVPD_NUM_SUPPORTED_PAGES 10 421 422 static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event, 423 int param); 424 static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest); 425 static void ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest); 426 static int ctl_init(void); 427 void ctl_shutdown(void); 428 static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td); 429 static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td); 430 static void ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio); 431 static void ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 432 struct ctl_ooa *ooa_hdr, 433 struct ctl_ooa_entry *kern_entries); 434 static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 435 struct thread *td); 436 static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun, 437 struct ctl_be_lun *be_lun); 438 static int ctl_free_lun(struct ctl_lun *lun); 439 static void ctl_create_lun(struct ctl_be_lun *be_lun); 440 441 static int ctl_do_mode_select(union ctl_io *io); 442 static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, 443 uint64_t res_key, uint64_t sa_res_key, 444 uint8_t type, uint32_t residx, 445 struct ctl_scsiio *ctsio, 446 struct scsi_per_res_out *cdb, 447 struct scsi_per_res_out_parms* param); 448 static void ctl_pro_preempt_other(struct ctl_lun *lun, 449 union ctl_ha_msg *msg); 450 static void ctl_hndl_per_res_out_on_other_sc(union ctl_io *io); 451 static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len); 452 static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len); 453 static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len); 454 static int ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len); 455 static int ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len); 456 static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, 457 int alloc_len); 458 static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, 459 int alloc_len); 460 static int ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len); 461 static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len); 462 static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio); 463 static int ctl_inquiry_std(struct ctl_scsiio *ctsio); 464 static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len); 465 static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2, 466 bool seq); 467 static ctl_action ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2); 468 static ctl_action ctl_check_for_blockage(struct ctl_lun *lun, 469 union ctl_io *pending_io, union ctl_io *ooa_io); 470 static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 471 union ctl_io *starting_io); 472 static int ctl_check_blocked(struct ctl_lun *lun); 473 static int ctl_scsiio_lun_check(struct ctl_lun *lun, 474 const struct ctl_cmd_entry *entry, 475 struct ctl_scsiio *ctsio); 476 static void ctl_failover_lun(union ctl_io *io); 477 static int ctl_scsiio_precheck(struct ctl_softc *ctl_softc, 478 struct ctl_scsiio *ctsio); 479 static int ctl_scsiio(struct ctl_scsiio *ctsio); 480 481 static int ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io); 482 static int ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io, 483 ctl_ua_type ua_type); 484 static int ctl_do_lun_reset(struct ctl_lun *lun, union ctl_io *io, 485 ctl_ua_type ua_type); 486 static int ctl_lun_reset(struct ctl_softc *ctl_softc, union ctl_io *io); 487 static int ctl_abort_task(union ctl_io *io); 488 static int ctl_abort_task_set(union ctl_io *io); 489 static int ctl_query_task(union ctl_io *io, int task_set); 490 static int ctl_i_t_nexus_reset(union ctl_io *io); 491 static int ctl_query_async_event(union ctl_io *io); 492 static void ctl_run_task(union ctl_io *io); 493 #ifdef CTL_IO_DELAY 494 static void ctl_datamove_timer_wakeup(void *arg); 495 static void ctl_done_timer_wakeup(void *arg); 496 #endif /* CTL_IO_DELAY */ 497 498 static void ctl_send_datamove_done(union ctl_io *io, int have_lock); 499 static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq); 500 static int ctl_datamove_remote_dm_write_cb(union ctl_io *io); 501 static void ctl_datamove_remote_write(union ctl_io *io); 502 static int ctl_datamove_remote_dm_read_cb(union ctl_io *io); 503 static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq); 504 static int ctl_datamove_remote_sgl_setup(union ctl_io *io); 505 static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 506 ctl_ha_dt_cb callback); 507 static void ctl_datamove_remote_read(union ctl_io *io); 508 static void ctl_datamove_remote(union ctl_io *io); 509 static void ctl_process_done(union ctl_io *io); 510 static void ctl_lun_thread(void *arg); 511 static void ctl_thresh_thread(void *arg); 512 static void ctl_work_thread(void *arg); 513 static void ctl_enqueue_incoming(union ctl_io *io); 514 static void ctl_enqueue_rtr(union ctl_io *io); 515 static void ctl_enqueue_done(union ctl_io *io); 516 static void ctl_enqueue_isc(union ctl_io *io); 517 static const struct ctl_cmd_entry * 518 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa); 519 static const struct ctl_cmd_entry * 520 ctl_validate_command(struct ctl_scsiio *ctsio); 521 static int ctl_cmd_applicable(uint8_t lun_type, 522 const struct ctl_cmd_entry *entry); 523 524 static uint64_t ctl_get_prkey(struct ctl_lun *lun, uint32_t residx); 525 static void ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx); 526 static void ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx); 527 static void ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key); 528 529 /* 530 * Load the serialization table. This isn't very pretty, but is probably 531 * the easiest way to do it. 532 */ 533 #include "ctl_ser_table.c" 534 535 /* 536 * We only need to define open, close and ioctl routines for this driver. 537 */ 538 static struct cdevsw ctl_cdevsw = { 539 .d_version = D_VERSION, 540 .d_flags = 0, 541 .d_open = ctl_open, 542 .d_close = ctl_close, 543 .d_ioctl = ctl_ioctl, 544 .d_name = "ctl", 545 }; 546 547 548 MALLOC_DEFINE(M_CTL, "ctlmem", "Memory used for CTL"); 549 550 static int ctl_module_event_handler(module_t, int /*modeventtype_t*/, void *); 551 552 static moduledata_t ctl_moduledata = { 553 "ctl", 554 ctl_module_event_handler, 555 NULL 556 }; 557 558 DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD); 559 MODULE_VERSION(ctl, 1); 560 561 static struct ctl_frontend ha_frontend = 562 { 563 .name = "ha", 564 }; 565 566 static void 567 ctl_ha_datamove(union ctl_io *io) 568 { 569 struct ctl_lun *lun = CTL_LUN(io); 570 struct ctl_sg_entry *sgl; 571 union ctl_ha_msg msg; 572 uint32_t sg_entries_sent; 573 int do_sg_copy, i, j; 574 575 memset(&msg.dt, 0, sizeof(msg.dt)); 576 msg.hdr.msg_type = CTL_MSG_DATAMOVE; 577 msg.hdr.original_sc = io->io_hdr.original_sc; 578 msg.hdr.serializing_sc = io; 579 msg.hdr.nexus = io->io_hdr.nexus; 580 msg.hdr.status = io->io_hdr.status; 581 msg.dt.flags = io->io_hdr.flags; 582 583 /* 584 * We convert everything into a S/G list here. We can't 585 * pass by reference, only by value between controllers. 586 * So we can't pass a pointer to the S/G list, only as many 587 * S/G entries as we can fit in here. If it's possible for 588 * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries, 589 * then we need to break this up into multiple transfers. 590 */ 591 if (io->scsiio.kern_sg_entries == 0) { 592 msg.dt.kern_sg_entries = 1; 593 #if 0 594 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 595 msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr; 596 } else { 597 /* XXX KDM use busdma here! */ 598 msg.dt.sg_list[0].addr = 599 (void *)vtophys(io->scsiio.kern_data_ptr); 600 } 601 #else 602 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, 603 ("HA does not support BUS_ADDR")); 604 msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr; 605 #endif 606 msg.dt.sg_list[0].len = io->scsiio.kern_data_len; 607 do_sg_copy = 0; 608 } else { 609 msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries; 610 do_sg_copy = 1; 611 } 612 613 msg.dt.kern_data_len = io->scsiio.kern_data_len; 614 msg.dt.kern_total_len = io->scsiio.kern_total_len; 615 msg.dt.kern_data_resid = io->scsiio.kern_data_resid; 616 msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset; 617 msg.dt.sg_sequence = 0; 618 619 /* 620 * Loop until we've sent all of the S/G entries. On the 621 * other end, we'll recompose these S/G entries into one 622 * contiguous list before processing. 623 */ 624 for (sg_entries_sent = 0; sg_entries_sent < msg.dt.kern_sg_entries; 625 msg.dt.sg_sequence++) { 626 msg.dt.cur_sg_entries = MIN((sizeof(msg.dt.sg_list) / 627 sizeof(msg.dt.sg_list[0])), 628 msg.dt.kern_sg_entries - sg_entries_sent); 629 if (do_sg_copy != 0) { 630 sgl = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; 631 for (i = sg_entries_sent, j = 0; 632 i < msg.dt.cur_sg_entries; i++, j++) { 633 #if 0 634 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 635 msg.dt.sg_list[j].addr = sgl[i].addr; 636 } else { 637 /* XXX KDM use busdma here! */ 638 msg.dt.sg_list[j].addr = 639 (void *)vtophys(sgl[i].addr); 640 } 641 #else 642 KASSERT((io->io_hdr.flags & 643 CTL_FLAG_BUS_ADDR) == 0, 644 ("HA does not support BUS_ADDR")); 645 msg.dt.sg_list[j].addr = sgl[i].addr; 646 #endif 647 msg.dt.sg_list[j].len = sgl[i].len; 648 } 649 } 650 651 sg_entries_sent += msg.dt.cur_sg_entries; 652 msg.dt.sg_last = (sg_entries_sent >= msg.dt.kern_sg_entries); 653 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 654 sizeof(msg.dt) - sizeof(msg.dt.sg_list) + 655 sizeof(struct ctl_sg_entry) * msg.dt.cur_sg_entries, 656 M_WAITOK) > CTL_HA_STATUS_SUCCESS) { 657 io->io_hdr.port_status = 31341; 658 io->scsiio.be_move_done(io); 659 return; 660 } 661 msg.dt.sent_sg_entries = sg_entries_sent; 662 } 663 664 /* 665 * Officially handover the request from us to peer. 666 * If failover has just happened, then we must return error. 667 * If failover happen just after, then it is not our problem. 668 */ 669 if (lun) 670 mtx_lock(&lun->lun_lock); 671 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 672 if (lun) 673 mtx_unlock(&lun->lun_lock); 674 io->io_hdr.port_status = 31342; 675 io->scsiio.be_move_done(io); 676 return; 677 } 678 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 679 io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; 680 if (lun) 681 mtx_unlock(&lun->lun_lock); 682 } 683 684 static void 685 ctl_ha_done(union ctl_io *io) 686 { 687 union ctl_ha_msg msg; 688 689 if (io->io_hdr.io_type == CTL_IO_SCSI) { 690 memset(&msg, 0, sizeof(msg)); 691 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 692 msg.hdr.original_sc = io->io_hdr.original_sc; 693 msg.hdr.nexus = io->io_hdr.nexus; 694 msg.hdr.status = io->io_hdr.status; 695 msg.scsi.scsi_status = io->scsiio.scsi_status; 696 msg.scsi.tag_num = io->scsiio.tag_num; 697 msg.scsi.tag_type = io->scsiio.tag_type; 698 msg.scsi.sense_len = io->scsiio.sense_len; 699 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 700 io->scsiio.sense_len); 701 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 702 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + 703 msg.scsi.sense_len, M_WAITOK); 704 } 705 ctl_free_io(io); 706 } 707 708 static void 709 ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc, 710 union ctl_ha_msg *msg_info) 711 { 712 struct ctl_scsiio *ctsio; 713 714 if (msg_info->hdr.original_sc == NULL) { 715 printf("%s: original_sc == NULL!\n", __func__); 716 /* XXX KDM now what? */ 717 return; 718 } 719 720 ctsio = &msg_info->hdr.original_sc->scsiio; 721 ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 722 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 723 ctsio->io_hdr.status = msg_info->hdr.status; 724 ctsio->scsi_status = msg_info->scsi.scsi_status; 725 ctsio->sense_len = msg_info->scsi.sense_len; 726 memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data, 727 msg_info->scsi.sense_len); 728 ctl_enqueue_isc((union ctl_io *)ctsio); 729 } 730 731 static void 732 ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc, 733 union ctl_ha_msg *msg_info) 734 { 735 struct ctl_scsiio *ctsio; 736 737 if (msg_info->hdr.serializing_sc == NULL) { 738 printf("%s: serializing_sc == NULL!\n", __func__); 739 /* XXX KDM now what? */ 740 return; 741 } 742 743 ctsio = &msg_info->hdr.serializing_sc->scsiio; 744 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 745 ctl_enqueue_isc((union ctl_io *)ctsio); 746 } 747 748 void 749 ctl_isc_announce_lun(struct ctl_lun *lun) 750 { 751 struct ctl_softc *softc = lun->ctl_softc; 752 union ctl_ha_msg *msg; 753 struct ctl_ha_msg_lun_pr_key pr_key; 754 int i, k; 755 756 if (softc->ha_link != CTL_HA_LINK_ONLINE) 757 return; 758 mtx_lock(&lun->lun_lock); 759 i = sizeof(msg->lun); 760 if (lun->lun_devid) 761 i += lun->lun_devid->len; 762 i += sizeof(pr_key) * lun->pr_key_count; 763 alloc: 764 mtx_unlock(&lun->lun_lock); 765 msg = malloc(i, M_CTL, M_WAITOK); 766 mtx_lock(&lun->lun_lock); 767 k = sizeof(msg->lun); 768 if (lun->lun_devid) 769 k += lun->lun_devid->len; 770 k += sizeof(pr_key) * lun->pr_key_count; 771 if (i < k) { 772 free(msg, M_CTL); 773 i = k; 774 goto alloc; 775 } 776 bzero(&msg->lun, sizeof(msg->lun)); 777 msg->hdr.msg_type = CTL_MSG_LUN_SYNC; 778 msg->hdr.nexus.targ_lun = lun->lun; 779 msg->hdr.nexus.targ_mapped_lun = lun->lun; 780 msg->lun.flags = lun->flags; 781 msg->lun.pr_generation = lun->pr_generation; 782 msg->lun.pr_res_idx = lun->pr_res_idx; 783 msg->lun.pr_res_type = lun->pr_res_type; 784 msg->lun.pr_key_count = lun->pr_key_count; 785 i = 0; 786 if (lun->lun_devid) { 787 msg->lun.lun_devid_len = lun->lun_devid->len; 788 memcpy(&msg->lun.data[i], lun->lun_devid->data, 789 msg->lun.lun_devid_len); 790 i += msg->lun.lun_devid_len; 791 } 792 for (k = 0; k < CTL_MAX_INITIATORS; k++) { 793 if ((pr_key.pr_key = ctl_get_prkey(lun, k)) == 0) 794 continue; 795 pr_key.pr_iid = k; 796 memcpy(&msg->lun.data[i], &pr_key, sizeof(pr_key)); 797 i += sizeof(pr_key); 798 } 799 mtx_unlock(&lun->lun_lock); 800 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, 801 M_WAITOK); 802 free(msg, M_CTL); 803 804 if (lun->flags & CTL_LUN_PRIMARY_SC) { 805 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 806 ctl_isc_announce_mode(lun, -1, 807 lun->mode_pages.index[i].page_code & SMPH_PC_MASK, 808 lun->mode_pages.index[i].subpage); 809 } 810 } 811 } 812 813 void 814 ctl_isc_announce_port(struct ctl_port *port) 815 { 816 struct ctl_softc *softc = port->ctl_softc; 817 union ctl_ha_msg *msg; 818 int i; 819 820 if (port->targ_port < softc->port_min || 821 port->targ_port >= softc->port_max || 822 softc->ha_link != CTL_HA_LINK_ONLINE) 823 return; 824 i = sizeof(msg->port) + strlen(port->port_name) + 1; 825 if (port->lun_map) 826 i += port->lun_map_size * sizeof(uint32_t); 827 if (port->port_devid) 828 i += port->port_devid->len; 829 if (port->target_devid) 830 i += port->target_devid->len; 831 if (port->init_devid) 832 i += port->init_devid->len; 833 msg = malloc(i, M_CTL, M_WAITOK); 834 bzero(&msg->port, sizeof(msg->port)); 835 msg->hdr.msg_type = CTL_MSG_PORT_SYNC; 836 msg->hdr.nexus.targ_port = port->targ_port; 837 msg->port.port_type = port->port_type; 838 msg->port.physical_port = port->physical_port; 839 msg->port.virtual_port = port->virtual_port; 840 msg->port.status = port->status; 841 i = 0; 842 msg->port.name_len = sprintf(&msg->port.data[i], 843 "%d:%s", softc->ha_id, port->port_name) + 1; 844 i += msg->port.name_len; 845 if (port->lun_map) { 846 msg->port.lun_map_len = port->lun_map_size * sizeof(uint32_t); 847 memcpy(&msg->port.data[i], port->lun_map, 848 msg->port.lun_map_len); 849 i += msg->port.lun_map_len; 850 } 851 if (port->port_devid) { 852 msg->port.port_devid_len = port->port_devid->len; 853 memcpy(&msg->port.data[i], port->port_devid->data, 854 msg->port.port_devid_len); 855 i += msg->port.port_devid_len; 856 } 857 if (port->target_devid) { 858 msg->port.target_devid_len = port->target_devid->len; 859 memcpy(&msg->port.data[i], port->target_devid->data, 860 msg->port.target_devid_len); 861 i += msg->port.target_devid_len; 862 } 863 if (port->init_devid) { 864 msg->port.init_devid_len = port->init_devid->len; 865 memcpy(&msg->port.data[i], port->init_devid->data, 866 msg->port.init_devid_len); 867 i += msg->port.init_devid_len; 868 } 869 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, 870 M_WAITOK); 871 free(msg, M_CTL); 872 } 873 874 void 875 ctl_isc_announce_iid(struct ctl_port *port, int iid) 876 { 877 struct ctl_softc *softc = port->ctl_softc; 878 union ctl_ha_msg *msg; 879 int i, l; 880 881 if (port->targ_port < softc->port_min || 882 port->targ_port >= softc->port_max || 883 softc->ha_link != CTL_HA_LINK_ONLINE) 884 return; 885 mtx_lock(&softc->ctl_lock); 886 i = sizeof(msg->iid); 887 l = 0; 888 if (port->wwpn_iid[iid].name) 889 l = strlen(port->wwpn_iid[iid].name) + 1; 890 i += l; 891 msg = malloc(i, M_CTL, M_NOWAIT); 892 if (msg == NULL) { 893 mtx_unlock(&softc->ctl_lock); 894 return; 895 } 896 bzero(&msg->iid, sizeof(msg->iid)); 897 msg->hdr.msg_type = CTL_MSG_IID_SYNC; 898 msg->hdr.nexus.targ_port = port->targ_port; 899 msg->hdr.nexus.initid = iid; 900 msg->iid.in_use = port->wwpn_iid[iid].in_use; 901 msg->iid.name_len = l; 902 msg->iid.wwpn = port->wwpn_iid[iid].wwpn; 903 if (port->wwpn_iid[iid].name) 904 strlcpy(msg->iid.data, port->wwpn_iid[iid].name, l); 905 mtx_unlock(&softc->ctl_lock); 906 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->iid, i, M_NOWAIT); 907 free(msg, M_CTL); 908 } 909 910 void 911 ctl_isc_announce_mode(struct ctl_lun *lun, uint32_t initidx, 912 uint8_t page, uint8_t subpage) 913 { 914 struct ctl_softc *softc = lun->ctl_softc; 915 union ctl_ha_msg msg; 916 u_int i; 917 918 if (softc->ha_link != CTL_HA_LINK_ONLINE) 919 return; 920 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 921 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) == 922 page && lun->mode_pages.index[i].subpage == subpage) 923 break; 924 } 925 if (i == CTL_NUM_MODE_PAGES) 926 return; 927 928 /* Don't try to replicate pages not present on this device. */ 929 if (lun->mode_pages.index[i].page_data == NULL) 930 return; 931 932 bzero(&msg.mode, sizeof(msg.mode)); 933 msg.hdr.msg_type = CTL_MSG_MODE_SYNC; 934 msg.hdr.nexus.targ_port = initidx / CTL_MAX_INIT_PER_PORT; 935 msg.hdr.nexus.initid = initidx % CTL_MAX_INIT_PER_PORT; 936 msg.hdr.nexus.targ_lun = lun->lun; 937 msg.hdr.nexus.targ_mapped_lun = lun->lun; 938 msg.mode.page_code = page; 939 msg.mode.subpage = subpage; 940 msg.mode.page_len = lun->mode_pages.index[i].page_len; 941 memcpy(msg.mode.data, lun->mode_pages.index[i].page_data, 942 msg.mode.page_len); 943 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg.mode, sizeof(msg.mode), 944 M_WAITOK); 945 } 946 947 static void 948 ctl_isc_ha_link_up(struct ctl_softc *softc) 949 { 950 struct ctl_port *port; 951 struct ctl_lun *lun; 952 union ctl_ha_msg msg; 953 int i; 954 955 /* Announce this node parameters to peer for validation. */ 956 msg.login.msg_type = CTL_MSG_LOGIN; 957 msg.login.version = CTL_HA_VERSION; 958 msg.login.ha_mode = softc->ha_mode; 959 msg.login.ha_id = softc->ha_id; 960 msg.login.max_luns = CTL_MAX_LUNS; 961 msg.login.max_ports = CTL_MAX_PORTS; 962 msg.login.max_init_per_port = CTL_MAX_INIT_PER_PORT; 963 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg.login, sizeof(msg.login), 964 M_WAITOK); 965 966 STAILQ_FOREACH(port, &softc->port_list, links) { 967 ctl_isc_announce_port(port); 968 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 969 if (port->wwpn_iid[i].in_use) 970 ctl_isc_announce_iid(port, i); 971 } 972 } 973 STAILQ_FOREACH(lun, &softc->lun_list, links) 974 ctl_isc_announce_lun(lun); 975 } 976 977 static void 978 ctl_isc_ha_link_down(struct ctl_softc *softc) 979 { 980 struct ctl_port *port; 981 struct ctl_lun *lun; 982 union ctl_io *io; 983 int i; 984 985 mtx_lock(&softc->ctl_lock); 986 STAILQ_FOREACH(lun, &softc->lun_list, links) { 987 mtx_lock(&lun->lun_lock); 988 if (lun->flags & CTL_LUN_PEER_SC_PRIMARY) { 989 lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; 990 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 991 } 992 mtx_unlock(&lun->lun_lock); 993 994 mtx_unlock(&softc->ctl_lock); 995 io = ctl_alloc_io(softc->othersc_pool); 996 mtx_lock(&softc->ctl_lock); 997 ctl_zero_io(io); 998 io->io_hdr.msg_type = CTL_MSG_FAILOVER; 999 io->io_hdr.nexus.targ_mapped_lun = lun->lun; 1000 ctl_enqueue_isc(io); 1001 } 1002 1003 STAILQ_FOREACH(port, &softc->port_list, links) { 1004 if (port->targ_port >= softc->port_min && 1005 port->targ_port < softc->port_max) 1006 continue; 1007 port->status &= ~CTL_PORT_STATUS_ONLINE; 1008 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1009 port->wwpn_iid[i].in_use = 0; 1010 free(port->wwpn_iid[i].name, M_CTL); 1011 port->wwpn_iid[i].name = NULL; 1012 } 1013 } 1014 mtx_unlock(&softc->ctl_lock); 1015 } 1016 1017 static void 1018 ctl_isc_ua(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1019 { 1020 struct ctl_lun *lun; 1021 uint32_t iid = ctl_get_initindex(&msg->hdr.nexus); 1022 1023 mtx_lock(&softc->ctl_lock); 1024 if (msg->hdr.nexus.targ_mapped_lun >= CTL_MAX_LUNS || 1025 (lun = softc->ctl_luns[msg->hdr.nexus.targ_mapped_lun]) == NULL) { 1026 mtx_unlock(&softc->ctl_lock); 1027 return; 1028 } 1029 mtx_lock(&lun->lun_lock); 1030 mtx_unlock(&softc->ctl_lock); 1031 if (msg->ua.ua_type == CTL_UA_THIN_PROV_THRES && msg->ua.ua_set) 1032 memcpy(lun->ua_tpt_info, msg->ua.ua_info, 8); 1033 if (msg->ua.ua_all) { 1034 if (msg->ua.ua_set) 1035 ctl_est_ua_all(lun, iid, msg->ua.ua_type); 1036 else 1037 ctl_clr_ua_all(lun, iid, msg->ua.ua_type); 1038 } else { 1039 if (msg->ua.ua_set) 1040 ctl_est_ua(lun, iid, msg->ua.ua_type); 1041 else 1042 ctl_clr_ua(lun, iid, msg->ua.ua_type); 1043 } 1044 mtx_unlock(&lun->lun_lock); 1045 } 1046 1047 static void 1048 ctl_isc_lun_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1049 { 1050 struct ctl_lun *lun; 1051 struct ctl_ha_msg_lun_pr_key pr_key; 1052 int i, k; 1053 ctl_lun_flags oflags; 1054 uint32_t targ_lun; 1055 1056 targ_lun = msg->hdr.nexus.targ_mapped_lun; 1057 mtx_lock(&softc->ctl_lock); 1058 if (targ_lun >= CTL_MAX_LUNS || 1059 (lun = softc->ctl_luns[targ_lun]) == NULL) { 1060 mtx_unlock(&softc->ctl_lock); 1061 return; 1062 } 1063 mtx_lock(&lun->lun_lock); 1064 mtx_unlock(&softc->ctl_lock); 1065 if (lun->flags & CTL_LUN_DISABLED) { 1066 mtx_unlock(&lun->lun_lock); 1067 return; 1068 } 1069 i = (lun->lun_devid != NULL) ? lun->lun_devid->len : 0; 1070 if (msg->lun.lun_devid_len != i || (i > 0 && 1071 memcmp(&msg->lun.data[0], lun->lun_devid->data, i) != 0)) { 1072 mtx_unlock(&lun->lun_lock); 1073 printf("%s: Received conflicting HA LUN %d\n", 1074 __func__, targ_lun); 1075 return; 1076 } else { 1077 /* Record whether peer is primary. */ 1078 oflags = lun->flags; 1079 if ((msg->lun.flags & CTL_LUN_PRIMARY_SC) && 1080 (msg->lun.flags & CTL_LUN_DISABLED) == 0) 1081 lun->flags |= CTL_LUN_PEER_SC_PRIMARY; 1082 else 1083 lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; 1084 if (oflags != lun->flags) 1085 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 1086 1087 /* If peer is primary and we are not -- use data */ 1088 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 1089 (lun->flags & CTL_LUN_PEER_SC_PRIMARY)) { 1090 lun->pr_generation = msg->lun.pr_generation; 1091 lun->pr_res_idx = msg->lun.pr_res_idx; 1092 lun->pr_res_type = msg->lun.pr_res_type; 1093 lun->pr_key_count = msg->lun.pr_key_count; 1094 for (k = 0; k < CTL_MAX_INITIATORS; k++) 1095 ctl_clr_prkey(lun, k); 1096 for (k = 0; k < msg->lun.pr_key_count; k++) { 1097 memcpy(&pr_key, &msg->lun.data[i], 1098 sizeof(pr_key)); 1099 ctl_alloc_prkey(lun, pr_key.pr_iid); 1100 ctl_set_prkey(lun, pr_key.pr_iid, 1101 pr_key.pr_key); 1102 i += sizeof(pr_key); 1103 } 1104 } 1105 1106 mtx_unlock(&lun->lun_lock); 1107 CTL_DEBUG_PRINT(("%s: Known LUN %d, peer is %s\n", 1108 __func__, targ_lun, 1109 (msg->lun.flags & CTL_LUN_PRIMARY_SC) ? 1110 "primary" : "secondary")); 1111 1112 /* If we are primary but peer doesn't know -- notify */ 1113 if ((lun->flags & CTL_LUN_PRIMARY_SC) && 1114 (msg->lun.flags & CTL_LUN_PEER_SC_PRIMARY) == 0) 1115 ctl_isc_announce_lun(lun); 1116 } 1117 } 1118 1119 static void 1120 ctl_isc_port_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1121 { 1122 struct ctl_port *port; 1123 struct ctl_lun *lun; 1124 int i, new; 1125 1126 port = softc->ctl_ports[msg->hdr.nexus.targ_port]; 1127 if (port == NULL) { 1128 CTL_DEBUG_PRINT(("%s: New port %d\n", __func__, 1129 msg->hdr.nexus.targ_port)); 1130 new = 1; 1131 port = malloc(sizeof(*port), M_CTL, M_WAITOK | M_ZERO); 1132 port->frontend = &ha_frontend; 1133 port->targ_port = msg->hdr.nexus.targ_port; 1134 port->fe_datamove = ctl_ha_datamove; 1135 port->fe_done = ctl_ha_done; 1136 } else if (port->frontend == &ha_frontend) { 1137 CTL_DEBUG_PRINT(("%s: Updated port %d\n", __func__, 1138 msg->hdr.nexus.targ_port)); 1139 new = 0; 1140 } else { 1141 printf("%s: Received conflicting HA port %d\n", 1142 __func__, msg->hdr.nexus.targ_port); 1143 return; 1144 } 1145 port->port_type = msg->port.port_type; 1146 port->physical_port = msg->port.physical_port; 1147 port->virtual_port = msg->port.virtual_port; 1148 port->status = msg->port.status; 1149 i = 0; 1150 free(port->port_name, M_CTL); 1151 port->port_name = strndup(&msg->port.data[i], msg->port.name_len, 1152 M_CTL); 1153 i += msg->port.name_len; 1154 if (msg->port.lun_map_len != 0) { 1155 if (port->lun_map == NULL || 1156 port->lun_map_size * sizeof(uint32_t) < 1157 msg->port.lun_map_len) { 1158 port->lun_map_size = 0; 1159 free(port->lun_map, M_CTL); 1160 port->lun_map = malloc(msg->port.lun_map_len, 1161 M_CTL, M_WAITOK); 1162 } 1163 memcpy(port->lun_map, &msg->port.data[i], msg->port.lun_map_len); 1164 port->lun_map_size = msg->port.lun_map_len / sizeof(uint32_t); 1165 i += msg->port.lun_map_len; 1166 } else { 1167 port->lun_map_size = 0; 1168 free(port->lun_map, M_CTL); 1169 port->lun_map = NULL; 1170 } 1171 if (msg->port.port_devid_len != 0) { 1172 if (port->port_devid == NULL || 1173 port->port_devid->len < msg->port.port_devid_len) { 1174 free(port->port_devid, M_CTL); 1175 port->port_devid = malloc(sizeof(struct ctl_devid) + 1176 msg->port.port_devid_len, M_CTL, M_WAITOK); 1177 } 1178 memcpy(port->port_devid->data, &msg->port.data[i], 1179 msg->port.port_devid_len); 1180 port->port_devid->len = msg->port.port_devid_len; 1181 i += msg->port.port_devid_len; 1182 } else { 1183 free(port->port_devid, M_CTL); 1184 port->port_devid = NULL; 1185 } 1186 if (msg->port.target_devid_len != 0) { 1187 if (port->target_devid == NULL || 1188 port->target_devid->len < msg->port.target_devid_len) { 1189 free(port->target_devid, M_CTL); 1190 port->target_devid = malloc(sizeof(struct ctl_devid) + 1191 msg->port.target_devid_len, M_CTL, M_WAITOK); 1192 } 1193 memcpy(port->target_devid->data, &msg->port.data[i], 1194 msg->port.target_devid_len); 1195 port->target_devid->len = msg->port.target_devid_len; 1196 i += msg->port.target_devid_len; 1197 } else { 1198 free(port->target_devid, M_CTL); 1199 port->target_devid = NULL; 1200 } 1201 if (msg->port.init_devid_len != 0) { 1202 if (port->init_devid == NULL || 1203 port->init_devid->len < msg->port.init_devid_len) { 1204 free(port->init_devid, M_CTL); 1205 port->init_devid = malloc(sizeof(struct ctl_devid) + 1206 msg->port.init_devid_len, M_CTL, M_WAITOK); 1207 } 1208 memcpy(port->init_devid->data, &msg->port.data[i], 1209 msg->port.init_devid_len); 1210 port->init_devid->len = msg->port.init_devid_len; 1211 i += msg->port.init_devid_len; 1212 } else { 1213 free(port->init_devid, M_CTL); 1214 port->init_devid = NULL; 1215 } 1216 if (new) { 1217 if (ctl_port_register(port) != 0) { 1218 printf("%s: ctl_port_register() failed with error\n", 1219 __func__); 1220 } 1221 } 1222 mtx_lock(&softc->ctl_lock); 1223 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1224 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 1225 continue; 1226 mtx_lock(&lun->lun_lock); 1227 ctl_est_ua_all(lun, -1, CTL_UA_INQ_CHANGE); 1228 mtx_unlock(&lun->lun_lock); 1229 } 1230 mtx_unlock(&softc->ctl_lock); 1231 } 1232 1233 static void 1234 ctl_isc_iid_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1235 { 1236 struct ctl_port *port; 1237 int iid; 1238 1239 port = softc->ctl_ports[msg->hdr.nexus.targ_port]; 1240 if (port == NULL) { 1241 printf("%s: Received IID for unknown port %d\n", 1242 __func__, msg->hdr.nexus.targ_port); 1243 return; 1244 } 1245 iid = msg->hdr.nexus.initid; 1246 port->wwpn_iid[iid].in_use = msg->iid.in_use; 1247 port->wwpn_iid[iid].wwpn = msg->iid.wwpn; 1248 free(port->wwpn_iid[iid].name, M_CTL); 1249 if (msg->iid.name_len) { 1250 port->wwpn_iid[iid].name = strndup(&msg->iid.data[0], 1251 msg->iid.name_len, M_CTL); 1252 } else 1253 port->wwpn_iid[iid].name = NULL; 1254 } 1255 1256 static void 1257 ctl_isc_login(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1258 { 1259 1260 if (msg->login.version != CTL_HA_VERSION) { 1261 printf("CTL HA peers have different versions %d != %d\n", 1262 msg->login.version, CTL_HA_VERSION); 1263 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1264 return; 1265 } 1266 if (msg->login.ha_mode != softc->ha_mode) { 1267 printf("CTL HA peers have different ha_mode %d != %d\n", 1268 msg->login.ha_mode, softc->ha_mode); 1269 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1270 return; 1271 } 1272 if (msg->login.ha_id == softc->ha_id) { 1273 printf("CTL HA peers have same ha_id %d\n", msg->login.ha_id); 1274 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1275 return; 1276 } 1277 if (msg->login.max_luns != CTL_MAX_LUNS || 1278 msg->login.max_ports != CTL_MAX_PORTS || 1279 msg->login.max_init_per_port != CTL_MAX_INIT_PER_PORT) { 1280 printf("CTL HA peers have different limits\n"); 1281 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1282 return; 1283 } 1284 } 1285 1286 static void 1287 ctl_isc_mode_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1288 { 1289 struct ctl_lun *lun; 1290 u_int i; 1291 uint32_t initidx, targ_lun; 1292 1293 targ_lun = msg->hdr.nexus.targ_mapped_lun; 1294 mtx_lock(&softc->ctl_lock); 1295 if (targ_lun >= CTL_MAX_LUNS || 1296 (lun = softc->ctl_luns[targ_lun]) == NULL) { 1297 mtx_unlock(&softc->ctl_lock); 1298 return; 1299 } 1300 mtx_lock(&lun->lun_lock); 1301 mtx_unlock(&softc->ctl_lock); 1302 if (lun->flags & CTL_LUN_DISABLED) { 1303 mtx_unlock(&lun->lun_lock); 1304 return; 1305 } 1306 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 1307 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) == 1308 msg->mode.page_code && 1309 lun->mode_pages.index[i].subpage == msg->mode.subpage) 1310 break; 1311 } 1312 if (i == CTL_NUM_MODE_PAGES) { 1313 mtx_unlock(&lun->lun_lock); 1314 return; 1315 } 1316 memcpy(lun->mode_pages.index[i].page_data, msg->mode.data, 1317 lun->mode_pages.index[i].page_len); 1318 initidx = ctl_get_initindex(&msg->hdr.nexus); 1319 if (initidx != -1) 1320 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 1321 mtx_unlock(&lun->lun_lock); 1322 } 1323 1324 /* 1325 * ISC (Inter Shelf Communication) event handler. Events from the HA 1326 * subsystem come in here. 1327 */ 1328 static void 1329 ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param) 1330 { 1331 struct ctl_softc *softc = control_softc; 1332 union ctl_io *io; 1333 struct ctl_prio *presio; 1334 ctl_ha_status isc_status; 1335 1336 CTL_DEBUG_PRINT(("CTL: Isc Msg event %d\n", event)); 1337 if (event == CTL_HA_EVT_MSG_RECV) { 1338 union ctl_ha_msg *msg, msgbuf; 1339 1340 if (param > sizeof(msgbuf)) 1341 msg = malloc(param, M_CTL, M_WAITOK); 1342 else 1343 msg = &msgbuf; 1344 isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, msg, param, 1345 M_WAITOK); 1346 if (isc_status != CTL_HA_STATUS_SUCCESS) { 1347 printf("%s: Error receiving message: %d\n", 1348 __func__, isc_status); 1349 if (msg != &msgbuf) 1350 free(msg, M_CTL); 1351 return; 1352 } 1353 1354 CTL_DEBUG_PRINT(("CTL: msg_type %d\n", msg->msg_type)); 1355 switch (msg->hdr.msg_type) { 1356 case CTL_MSG_SERIALIZE: 1357 io = ctl_alloc_io(softc->othersc_pool); 1358 ctl_zero_io(io); 1359 // populate ctsio from msg 1360 io->io_hdr.io_type = CTL_IO_SCSI; 1361 io->io_hdr.msg_type = CTL_MSG_SERIALIZE; 1362 io->io_hdr.original_sc = msg->hdr.original_sc; 1363 io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC | 1364 CTL_FLAG_IO_ACTIVE; 1365 /* 1366 * If we're in serialization-only mode, we don't 1367 * want to go through full done processing. Thus 1368 * the COPY flag. 1369 * 1370 * XXX KDM add another flag that is more specific. 1371 */ 1372 if (softc->ha_mode != CTL_HA_MODE_XFER) 1373 io->io_hdr.flags |= CTL_FLAG_INT_COPY; 1374 io->io_hdr.nexus = msg->hdr.nexus; 1375 #if 0 1376 printf("port %u, iid %u, lun %u\n", 1377 io->io_hdr.nexus.targ_port, 1378 io->io_hdr.nexus.initid, 1379 io->io_hdr.nexus.targ_lun); 1380 #endif 1381 io->scsiio.tag_num = msg->scsi.tag_num; 1382 io->scsiio.tag_type = msg->scsi.tag_type; 1383 #ifdef CTL_TIME_IO 1384 io->io_hdr.start_time = time_uptime; 1385 getbinuptime(&io->io_hdr.start_bt); 1386 #endif /* CTL_TIME_IO */ 1387 io->scsiio.cdb_len = msg->scsi.cdb_len; 1388 memcpy(io->scsiio.cdb, msg->scsi.cdb, 1389 CTL_MAX_CDBLEN); 1390 if (softc->ha_mode == CTL_HA_MODE_XFER) { 1391 const struct ctl_cmd_entry *entry; 1392 1393 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 1394 io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 1395 io->io_hdr.flags |= 1396 entry->flags & CTL_FLAG_DATA_MASK; 1397 } 1398 ctl_enqueue_isc(io); 1399 break; 1400 1401 /* Performed on the Originating SC, XFER mode only */ 1402 case CTL_MSG_DATAMOVE: { 1403 struct ctl_sg_entry *sgl; 1404 int i, j; 1405 1406 io = msg->hdr.original_sc; 1407 if (io == NULL) { 1408 printf("%s: original_sc == NULL!\n", __func__); 1409 /* XXX KDM do something here */ 1410 break; 1411 } 1412 io->io_hdr.msg_type = CTL_MSG_DATAMOVE; 1413 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1414 /* 1415 * Keep track of this, we need to send it back over 1416 * when the datamove is complete. 1417 */ 1418 io->io_hdr.serializing_sc = msg->hdr.serializing_sc; 1419 if (msg->hdr.status == CTL_SUCCESS) 1420 io->io_hdr.status = msg->hdr.status; 1421 1422 if (msg->dt.sg_sequence == 0) { 1423 #ifdef CTL_TIME_IO 1424 getbinuptime(&io->io_hdr.dma_start_bt); 1425 #endif 1426 i = msg->dt.kern_sg_entries + 1427 msg->dt.kern_data_len / 1428 CTL_HA_DATAMOVE_SEGMENT + 1; 1429 sgl = malloc(sizeof(*sgl) * i, M_CTL, 1430 M_WAITOK | M_ZERO); 1431 io->io_hdr.remote_sglist = sgl; 1432 io->io_hdr.local_sglist = 1433 &sgl[msg->dt.kern_sg_entries]; 1434 1435 io->scsiio.kern_data_ptr = (uint8_t *)sgl; 1436 1437 io->scsiio.kern_sg_entries = 1438 msg->dt.kern_sg_entries; 1439 io->scsiio.rem_sg_entries = 1440 msg->dt.kern_sg_entries; 1441 io->scsiio.kern_data_len = 1442 msg->dt.kern_data_len; 1443 io->scsiio.kern_total_len = 1444 msg->dt.kern_total_len; 1445 io->scsiio.kern_data_resid = 1446 msg->dt.kern_data_resid; 1447 io->scsiio.kern_rel_offset = 1448 msg->dt.kern_rel_offset; 1449 io->io_hdr.flags &= ~CTL_FLAG_BUS_ADDR; 1450 io->io_hdr.flags |= msg->dt.flags & 1451 CTL_FLAG_BUS_ADDR; 1452 } else 1453 sgl = (struct ctl_sg_entry *) 1454 io->scsiio.kern_data_ptr; 1455 1456 for (i = msg->dt.sent_sg_entries, j = 0; 1457 i < (msg->dt.sent_sg_entries + 1458 msg->dt.cur_sg_entries); i++, j++) { 1459 sgl[i].addr = msg->dt.sg_list[j].addr; 1460 sgl[i].len = msg->dt.sg_list[j].len; 1461 1462 #if 0 1463 printf("%s: DATAMOVE: %p,%lu j=%d, i=%d\n", 1464 __func__, sgl[i].addr, sgl[i].len, j, i); 1465 #endif 1466 } 1467 1468 /* 1469 * If this is the last piece of the I/O, we've got 1470 * the full S/G list. Queue processing in the thread. 1471 * Otherwise wait for the next piece. 1472 */ 1473 if (msg->dt.sg_last != 0) 1474 ctl_enqueue_isc(io); 1475 break; 1476 } 1477 /* Performed on the Serializing (primary) SC, XFER mode only */ 1478 case CTL_MSG_DATAMOVE_DONE: { 1479 if (msg->hdr.serializing_sc == NULL) { 1480 printf("%s: serializing_sc == NULL!\n", 1481 __func__); 1482 /* XXX KDM now what? */ 1483 break; 1484 } 1485 /* 1486 * We grab the sense information here in case 1487 * there was a failure, so we can return status 1488 * back to the initiator. 1489 */ 1490 io = msg->hdr.serializing_sc; 1491 io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 1492 io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; 1493 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1494 io->io_hdr.port_status = msg->scsi.port_status; 1495 io->scsiio.kern_data_resid = msg->scsi.kern_data_resid; 1496 if (msg->hdr.status != CTL_STATUS_NONE) { 1497 io->io_hdr.status = msg->hdr.status; 1498 io->scsiio.scsi_status = msg->scsi.scsi_status; 1499 io->scsiio.sense_len = msg->scsi.sense_len; 1500 memcpy(&io->scsiio.sense_data, 1501 &msg->scsi.sense_data, 1502 msg->scsi.sense_len); 1503 if (msg->hdr.status == CTL_SUCCESS) 1504 io->io_hdr.flags |= CTL_FLAG_STATUS_SENT; 1505 } 1506 ctl_enqueue_isc(io); 1507 break; 1508 } 1509 1510 /* Preformed on Originating SC, SER_ONLY mode */ 1511 case CTL_MSG_R2R: 1512 io = msg->hdr.original_sc; 1513 if (io == NULL) { 1514 printf("%s: original_sc == NULL!\n", 1515 __func__); 1516 break; 1517 } 1518 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1519 io->io_hdr.msg_type = CTL_MSG_R2R; 1520 io->io_hdr.serializing_sc = msg->hdr.serializing_sc; 1521 ctl_enqueue_isc(io); 1522 break; 1523 1524 /* 1525 * Performed on Serializing(i.e. primary SC) SC in SER_ONLY 1526 * mode. 1527 * Performed on the Originating (i.e. secondary) SC in XFER 1528 * mode 1529 */ 1530 case CTL_MSG_FINISH_IO: 1531 if (softc->ha_mode == CTL_HA_MODE_XFER) 1532 ctl_isc_handler_finish_xfer(softc, msg); 1533 else 1534 ctl_isc_handler_finish_ser_only(softc, msg); 1535 break; 1536 1537 /* Preformed on Originating SC */ 1538 case CTL_MSG_BAD_JUJU: 1539 io = msg->hdr.original_sc; 1540 if (io == NULL) { 1541 printf("%s: Bad JUJU!, original_sc is NULL!\n", 1542 __func__); 1543 break; 1544 } 1545 ctl_copy_sense_data(msg, io); 1546 /* 1547 * IO should have already been cleaned up on other 1548 * SC so clear this flag so we won't send a message 1549 * back to finish the IO there. 1550 */ 1551 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 1552 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1553 1554 /* io = msg->hdr.serializing_sc; */ 1555 io->io_hdr.msg_type = CTL_MSG_BAD_JUJU; 1556 ctl_enqueue_isc(io); 1557 break; 1558 1559 /* Handle resets sent from the other side */ 1560 case CTL_MSG_MANAGE_TASKS: { 1561 struct ctl_taskio *taskio; 1562 taskio = (struct ctl_taskio *)ctl_alloc_io( 1563 softc->othersc_pool); 1564 ctl_zero_io((union ctl_io *)taskio); 1565 taskio->io_hdr.io_type = CTL_IO_TASK; 1566 taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 1567 taskio->io_hdr.nexus = msg->hdr.nexus; 1568 taskio->task_action = msg->task.task_action; 1569 taskio->tag_num = msg->task.tag_num; 1570 taskio->tag_type = msg->task.tag_type; 1571 #ifdef CTL_TIME_IO 1572 taskio->io_hdr.start_time = time_uptime; 1573 getbinuptime(&taskio->io_hdr.start_bt); 1574 #endif /* CTL_TIME_IO */ 1575 ctl_run_task((union ctl_io *)taskio); 1576 break; 1577 } 1578 /* Persistent Reserve action which needs attention */ 1579 case CTL_MSG_PERS_ACTION: 1580 presio = (struct ctl_prio *)ctl_alloc_io( 1581 softc->othersc_pool); 1582 ctl_zero_io((union ctl_io *)presio); 1583 presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION; 1584 presio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 1585 presio->io_hdr.nexus = msg->hdr.nexus; 1586 presio->pr_msg = msg->pr; 1587 ctl_enqueue_isc((union ctl_io *)presio); 1588 break; 1589 case CTL_MSG_UA: 1590 ctl_isc_ua(softc, msg, param); 1591 break; 1592 case CTL_MSG_PORT_SYNC: 1593 ctl_isc_port_sync(softc, msg, param); 1594 break; 1595 case CTL_MSG_LUN_SYNC: 1596 ctl_isc_lun_sync(softc, msg, param); 1597 break; 1598 case CTL_MSG_IID_SYNC: 1599 ctl_isc_iid_sync(softc, msg, param); 1600 break; 1601 case CTL_MSG_LOGIN: 1602 ctl_isc_login(softc, msg, param); 1603 break; 1604 case CTL_MSG_MODE_SYNC: 1605 ctl_isc_mode_sync(softc, msg, param); 1606 break; 1607 default: 1608 printf("Received HA message of unknown type %d\n", 1609 msg->hdr.msg_type); 1610 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1611 break; 1612 } 1613 if (msg != &msgbuf) 1614 free(msg, M_CTL); 1615 } else if (event == CTL_HA_EVT_LINK_CHANGE) { 1616 printf("CTL: HA link status changed from %d to %d\n", 1617 softc->ha_link, param); 1618 if (param == softc->ha_link) 1619 return; 1620 if (softc->ha_link == CTL_HA_LINK_ONLINE) { 1621 softc->ha_link = param; 1622 ctl_isc_ha_link_down(softc); 1623 } else { 1624 softc->ha_link = param; 1625 if (softc->ha_link == CTL_HA_LINK_ONLINE) 1626 ctl_isc_ha_link_up(softc); 1627 } 1628 return; 1629 } else { 1630 printf("ctl_isc_event_handler: Unknown event %d\n", event); 1631 return; 1632 } 1633 } 1634 1635 static void 1636 ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest) 1637 { 1638 1639 memcpy(&dest->scsiio.sense_data, &src->scsi.sense_data, 1640 src->scsi.sense_len); 1641 dest->scsiio.scsi_status = src->scsi.scsi_status; 1642 dest->scsiio.sense_len = src->scsi.sense_len; 1643 dest->io_hdr.status = src->hdr.status; 1644 } 1645 1646 static void 1647 ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest) 1648 { 1649 1650 memcpy(&dest->scsi.sense_data, &src->scsiio.sense_data, 1651 src->scsiio.sense_len); 1652 dest->scsi.scsi_status = src->scsiio.scsi_status; 1653 dest->scsi.sense_len = src->scsiio.sense_len; 1654 dest->hdr.status = src->io_hdr.status; 1655 } 1656 1657 void 1658 ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 1659 { 1660 struct ctl_softc *softc = lun->ctl_softc; 1661 ctl_ua_type *pu; 1662 1663 if (initidx < softc->init_min || initidx >= softc->init_max) 1664 return; 1665 mtx_assert(&lun->lun_lock, MA_OWNED); 1666 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 1667 if (pu == NULL) 1668 return; 1669 pu[initidx % CTL_MAX_INIT_PER_PORT] |= ua; 1670 } 1671 1672 void 1673 ctl_est_ua_port(struct ctl_lun *lun, int port, uint32_t except, ctl_ua_type ua) 1674 { 1675 int i; 1676 1677 mtx_assert(&lun->lun_lock, MA_OWNED); 1678 if (lun->pending_ua[port] == NULL) 1679 return; 1680 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1681 if (port * CTL_MAX_INIT_PER_PORT + i == except) 1682 continue; 1683 lun->pending_ua[port][i] |= ua; 1684 } 1685 } 1686 1687 void 1688 ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 1689 { 1690 struct ctl_softc *softc = lun->ctl_softc; 1691 int i; 1692 1693 mtx_assert(&lun->lun_lock, MA_OWNED); 1694 for (i = softc->port_min; i < softc->port_max; i++) 1695 ctl_est_ua_port(lun, i, except, ua); 1696 } 1697 1698 void 1699 ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 1700 { 1701 struct ctl_softc *softc = lun->ctl_softc; 1702 ctl_ua_type *pu; 1703 1704 if (initidx < softc->init_min || initidx >= softc->init_max) 1705 return; 1706 mtx_assert(&lun->lun_lock, MA_OWNED); 1707 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 1708 if (pu == NULL) 1709 return; 1710 pu[initidx % CTL_MAX_INIT_PER_PORT] &= ~ua; 1711 } 1712 1713 void 1714 ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 1715 { 1716 struct ctl_softc *softc = lun->ctl_softc; 1717 int i, j; 1718 1719 mtx_assert(&lun->lun_lock, MA_OWNED); 1720 for (i = softc->port_min; i < softc->port_max; i++) { 1721 if (lun->pending_ua[i] == NULL) 1722 continue; 1723 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 1724 if (i * CTL_MAX_INIT_PER_PORT + j == except) 1725 continue; 1726 lun->pending_ua[i][j] &= ~ua; 1727 } 1728 } 1729 } 1730 1731 void 1732 ctl_clr_ua_allluns(struct ctl_softc *ctl_softc, uint32_t initidx, 1733 ctl_ua_type ua_type) 1734 { 1735 struct ctl_lun *lun; 1736 1737 mtx_assert(&ctl_softc->ctl_lock, MA_OWNED); 1738 STAILQ_FOREACH(lun, &ctl_softc->lun_list, links) { 1739 mtx_lock(&lun->lun_lock); 1740 ctl_clr_ua(lun, initidx, ua_type); 1741 mtx_unlock(&lun->lun_lock); 1742 } 1743 } 1744 1745 static int 1746 ctl_ha_role_sysctl(SYSCTL_HANDLER_ARGS) 1747 { 1748 struct ctl_softc *softc = (struct ctl_softc *)arg1; 1749 struct ctl_lun *lun; 1750 struct ctl_lun_req ireq; 1751 int error, value; 1752 1753 value = (softc->flags & CTL_FLAG_ACTIVE_SHELF) ? 0 : 1; 1754 error = sysctl_handle_int(oidp, &value, 0, req); 1755 if ((error != 0) || (req->newptr == NULL)) 1756 return (error); 1757 1758 mtx_lock(&softc->ctl_lock); 1759 if (value == 0) 1760 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1761 else 1762 softc->flags &= ~CTL_FLAG_ACTIVE_SHELF; 1763 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1764 mtx_unlock(&softc->ctl_lock); 1765 bzero(&ireq, sizeof(ireq)); 1766 ireq.reqtype = CTL_LUNREQ_MODIFY; 1767 ireq.reqdata.modify.lun_id = lun->lun; 1768 lun->backend->ioctl(NULL, CTL_LUN_REQ, (caddr_t)&ireq, 0, 1769 curthread); 1770 if (ireq.status != CTL_LUN_OK) { 1771 printf("%s: CTL_LUNREQ_MODIFY returned %d '%s'\n", 1772 __func__, ireq.status, ireq.error_str); 1773 } 1774 mtx_lock(&softc->ctl_lock); 1775 } 1776 mtx_unlock(&softc->ctl_lock); 1777 return (0); 1778 } 1779 1780 static int 1781 ctl_init(void) 1782 { 1783 struct make_dev_args args; 1784 struct ctl_softc *softc; 1785 void *other_pool; 1786 int i, error; 1787 1788 softc = control_softc = malloc(sizeof(*control_softc), M_DEVBUF, 1789 M_WAITOK | M_ZERO); 1790 1791 make_dev_args_init(&args); 1792 args.mda_devsw = &ctl_cdevsw; 1793 args.mda_uid = UID_ROOT; 1794 args.mda_gid = GID_OPERATOR; 1795 args.mda_mode = 0600; 1796 args.mda_si_drv1 = softc; 1797 error = make_dev_s(&args, &softc->dev, "cam/ctl"); 1798 if (error != 0) { 1799 free(softc, M_DEVBUF); 1800 control_softc = NULL; 1801 return (error); 1802 } 1803 1804 sysctl_ctx_init(&softc->sysctl_ctx); 1805 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, 1806 SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl", 1807 CTLFLAG_RD, 0, "CAM Target Layer"); 1808 1809 if (softc->sysctl_tree == NULL) { 1810 printf("%s: unable to allocate sysctl tree\n", __func__); 1811 destroy_dev(softc->dev); 1812 free(softc, M_DEVBUF); 1813 control_softc = NULL; 1814 return (ENOMEM); 1815 } 1816 1817 mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF); 1818 softc->io_zone = uma_zcreate("CTL IO", sizeof(union ctl_io), 1819 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 1820 softc->flags = 0; 1821 1822 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1823 OID_AUTO, "ha_mode", CTLFLAG_RDTUN, (int *)&softc->ha_mode, 0, 1824 "HA mode (0 - act/stby, 1 - serialize only, 2 - xfer)"); 1825 1826 /* 1827 * In Copan's HA scheme, the "master" and "slave" roles are 1828 * figured out through the slot the controller is in. Although it 1829 * is an active/active system, someone has to be in charge. 1830 */ 1831 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1832 OID_AUTO, "ha_id", CTLFLAG_RDTUN, &softc->ha_id, 0, 1833 "HA head ID (0 - no HA)"); 1834 if (softc->ha_id == 0 || softc->ha_id > NUM_HA_SHELVES) { 1835 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1836 softc->is_single = 1; 1837 softc->port_cnt = CTL_MAX_PORTS; 1838 softc->port_min = 0; 1839 } else { 1840 softc->port_cnt = CTL_MAX_PORTS / NUM_HA_SHELVES; 1841 softc->port_min = (softc->ha_id - 1) * softc->port_cnt; 1842 } 1843 softc->port_max = softc->port_min + softc->port_cnt; 1844 softc->init_min = softc->port_min * CTL_MAX_INIT_PER_PORT; 1845 softc->init_max = softc->port_max * CTL_MAX_INIT_PER_PORT; 1846 1847 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1848 OID_AUTO, "ha_link", CTLFLAG_RD, (int *)&softc->ha_link, 0, 1849 "HA link state (0 - offline, 1 - unknown, 2 - online)"); 1850 1851 STAILQ_INIT(&softc->lun_list); 1852 STAILQ_INIT(&softc->pending_lun_queue); 1853 STAILQ_INIT(&softc->fe_list); 1854 STAILQ_INIT(&softc->port_list); 1855 STAILQ_INIT(&softc->be_list); 1856 ctl_tpc_init(softc); 1857 1858 if (ctl_pool_create(softc, "othersc", CTL_POOL_ENTRIES_OTHER_SC, 1859 &other_pool) != 0) 1860 { 1861 printf("ctl: can't allocate %d entry other SC pool, " 1862 "exiting\n", CTL_POOL_ENTRIES_OTHER_SC); 1863 return (ENOMEM); 1864 } 1865 softc->othersc_pool = other_pool; 1866 1867 if (worker_threads <= 0) 1868 worker_threads = max(1, mp_ncpus / 4); 1869 if (worker_threads > CTL_MAX_THREADS) 1870 worker_threads = CTL_MAX_THREADS; 1871 1872 for (i = 0; i < worker_threads; i++) { 1873 struct ctl_thread *thr = &softc->threads[i]; 1874 1875 mtx_init(&thr->queue_lock, "CTL queue mutex", NULL, MTX_DEF); 1876 thr->ctl_softc = softc; 1877 STAILQ_INIT(&thr->incoming_queue); 1878 STAILQ_INIT(&thr->rtr_queue); 1879 STAILQ_INIT(&thr->done_queue); 1880 STAILQ_INIT(&thr->isc_queue); 1881 1882 error = kproc_kthread_add(ctl_work_thread, thr, 1883 &softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i); 1884 if (error != 0) { 1885 printf("error creating CTL work thread!\n"); 1886 ctl_pool_free(other_pool); 1887 return (error); 1888 } 1889 } 1890 error = kproc_kthread_add(ctl_lun_thread, softc, 1891 &softc->ctl_proc, NULL, 0, 0, "ctl", "lun"); 1892 if (error != 0) { 1893 printf("error creating CTL lun thread!\n"); 1894 ctl_pool_free(other_pool); 1895 return (error); 1896 } 1897 error = kproc_kthread_add(ctl_thresh_thread, softc, 1898 &softc->ctl_proc, NULL, 0, 0, "ctl", "thresh"); 1899 if (error != 0) { 1900 printf("error creating CTL threshold thread!\n"); 1901 ctl_pool_free(other_pool); 1902 return (error); 1903 } 1904 1905 SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree), 1906 OID_AUTO, "ha_role", CTLTYPE_INT | CTLFLAG_RWTUN, 1907 softc, 0, ctl_ha_role_sysctl, "I", "HA role for this head"); 1908 1909 if (softc->is_single == 0) { 1910 ctl_frontend_register(&ha_frontend); 1911 if (ctl_ha_msg_init(softc) != CTL_HA_STATUS_SUCCESS) { 1912 printf("ctl_init: ctl_ha_msg_init failed.\n"); 1913 softc->is_single = 1; 1914 } else 1915 if (ctl_ha_msg_register(CTL_HA_CHAN_CTL, ctl_isc_event_handler) 1916 != CTL_HA_STATUS_SUCCESS) { 1917 printf("ctl_init: ctl_ha_msg_register failed.\n"); 1918 softc->is_single = 1; 1919 } 1920 } 1921 return (0); 1922 } 1923 1924 void 1925 ctl_shutdown(void) 1926 { 1927 struct ctl_softc *softc = control_softc; 1928 struct ctl_lun *lun, *next_lun; 1929 1930 if (softc->is_single == 0) { 1931 ctl_ha_msg_shutdown(softc); 1932 if (ctl_ha_msg_deregister(CTL_HA_CHAN_CTL) 1933 != CTL_HA_STATUS_SUCCESS) 1934 printf("%s: ctl_ha_msg_deregister failed.\n", __func__); 1935 if (ctl_ha_msg_destroy(softc) != CTL_HA_STATUS_SUCCESS) 1936 printf("%s: ctl_ha_msg_destroy failed.\n", __func__); 1937 ctl_frontend_deregister(&ha_frontend); 1938 } 1939 1940 mtx_lock(&softc->ctl_lock); 1941 1942 STAILQ_FOREACH_SAFE(lun, &softc->lun_list, links, next_lun) 1943 ctl_free_lun(lun); 1944 1945 mtx_unlock(&softc->ctl_lock); 1946 1947 #if 0 1948 ctl_shutdown_thread(softc->work_thread); 1949 mtx_destroy(&softc->queue_lock); 1950 #endif 1951 1952 ctl_tpc_shutdown(softc); 1953 uma_zdestroy(softc->io_zone); 1954 mtx_destroy(&softc->ctl_lock); 1955 1956 destroy_dev(softc->dev); 1957 1958 sysctl_ctx_free(&softc->sysctl_ctx); 1959 1960 free(softc, M_DEVBUF); 1961 control_softc = NULL; 1962 } 1963 1964 static int 1965 ctl_module_event_handler(module_t mod, int what, void *arg) 1966 { 1967 1968 switch (what) { 1969 case MOD_LOAD: 1970 return (ctl_init()); 1971 case MOD_UNLOAD: 1972 return (EBUSY); 1973 default: 1974 return (EOPNOTSUPP); 1975 } 1976 } 1977 1978 /* 1979 * XXX KDM should we do some access checks here? Bump a reference count to 1980 * prevent a CTL module from being unloaded while someone has it open? 1981 */ 1982 static int 1983 ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td) 1984 { 1985 return (0); 1986 } 1987 1988 static int 1989 ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td) 1990 { 1991 return (0); 1992 } 1993 1994 /* 1995 * Remove an initiator by port number and initiator ID. 1996 * Returns 0 for success, -1 for failure. 1997 */ 1998 int 1999 ctl_remove_initiator(struct ctl_port *port, int iid) 2000 { 2001 struct ctl_softc *softc = port->ctl_softc; 2002 2003 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 2004 2005 if (iid > CTL_MAX_INIT_PER_PORT) { 2006 printf("%s: initiator ID %u > maximun %u!\n", 2007 __func__, iid, CTL_MAX_INIT_PER_PORT); 2008 return (-1); 2009 } 2010 2011 mtx_lock(&softc->ctl_lock); 2012 port->wwpn_iid[iid].in_use--; 2013 port->wwpn_iid[iid].last_use = time_uptime; 2014 mtx_unlock(&softc->ctl_lock); 2015 ctl_isc_announce_iid(port, iid); 2016 2017 return (0); 2018 } 2019 2020 /* 2021 * Add an initiator to the initiator map. 2022 * Returns iid for success, < 0 for failure. 2023 */ 2024 int 2025 ctl_add_initiator(struct ctl_port *port, int iid, uint64_t wwpn, char *name) 2026 { 2027 struct ctl_softc *softc = port->ctl_softc; 2028 time_t best_time; 2029 int i, best; 2030 2031 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 2032 2033 if (iid >= CTL_MAX_INIT_PER_PORT) { 2034 printf("%s: WWPN %#jx initiator ID %u > maximum %u!\n", 2035 __func__, wwpn, iid, CTL_MAX_INIT_PER_PORT); 2036 free(name, M_CTL); 2037 return (-1); 2038 } 2039 2040 mtx_lock(&softc->ctl_lock); 2041 2042 if (iid < 0 && (wwpn != 0 || name != NULL)) { 2043 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 2044 if (wwpn != 0 && wwpn == port->wwpn_iid[i].wwpn) { 2045 iid = i; 2046 break; 2047 } 2048 if (name != NULL && port->wwpn_iid[i].name != NULL && 2049 strcmp(name, port->wwpn_iid[i].name) == 0) { 2050 iid = i; 2051 break; 2052 } 2053 } 2054 } 2055 2056 if (iid < 0) { 2057 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 2058 if (port->wwpn_iid[i].in_use == 0 && 2059 port->wwpn_iid[i].wwpn == 0 && 2060 port->wwpn_iid[i].name == NULL) { 2061 iid = i; 2062 break; 2063 } 2064 } 2065 } 2066 2067 if (iid < 0) { 2068 best = -1; 2069 best_time = INT32_MAX; 2070 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 2071 if (port->wwpn_iid[i].in_use == 0) { 2072 if (port->wwpn_iid[i].last_use < best_time) { 2073 best = i; 2074 best_time = port->wwpn_iid[i].last_use; 2075 } 2076 } 2077 } 2078 iid = best; 2079 } 2080 2081 if (iid < 0) { 2082 mtx_unlock(&softc->ctl_lock); 2083 free(name, M_CTL); 2084 return (-2); 2085 } 2086 2087 if (port->wwpn_iid[iid].in_use > 0 && (wwpn != 0 || name != NULL)) { 2088 /* 2089 * This is not an error yet. 2090 */ 2091 if (wwpn != 0 && wwpn == port->wwpn_iid[iid].wwpn) { 2092 #if 0 2093 printf("%s: port %d iid %u WWPN %#jx arrived" 2094 " again\n", __func__, port->targ_port, 2095 iid, (uintmax_t)wwpn); 2096 #endif 2097 goto take; 2098 } 2099 if (name != NULL && port->wwpn_iid[iid].name != NULL && 2100 strcmp(name, port->wwpn_iid[iid].name) == 0) { 2101 #if 0 2102 printf("%s: port %d iid %u name '%s' arrived" 2103 " again\n", __func__, port->targ_port, 2104 iid, name); 2105 #endif 2106 goto take; 2107 } 2108 2109 /* 2110 * This is an error, but what do we do about it? The 2111 * driver is telling us we have a new WWPN for this 2112 * initiator ID, so we pretty much need to use it. 2113 */ 2114 printf("%s: port %d iid %u WWPN %#jx '%s' arrived," 2115 " but WWPN %#jx '%s' is still at that address\n", 2116 __func__, port->targ_port, iid, wwpn, name, 2117 (uintmax_t)port->wwpn_iid[iid].wwpn, 2118 port->wwpn_iid[iid].name); 2119 2120 /* 2121 * XXX KDM clear have_ca and ua_pending on each LUN for 2122 * this initiator. 2123 */ 2124 } 2125 take: 2126 free(port->wwpn_iid[iid].name, M_CTL); 2127 port->wwpn_iid[iid].name = name; 2128 port->wwpn_iid[iid].wwpn = wwpn; 2129 port->wwpn_iid[iid].in_use++; 2130 mtx_unlock(&softc->ctl_lock); 2131 ctl_isc_announce_iid(port, iid); 2132 2133 return (iid); 2134 } 2135 2136 static int 2137 ctl_create_iid(struct ctl_port *port, int iid, uint8_t *buf) 2138 { 2139 int len; 2140 2141 switch (port->port_type) { 2142 case CTL_PORT_FC: 2143 { 2144 struct scsi_transportid_fcp *id = 2145 (struct scsi_transportid_fcp *)buf; 2146 if (port->wwpn_iid[iid].wwpn == 0) 2147 return (0); 2148 memset(id, 0, sizeof(*id)); 2149 id->format_protocol = SCSI_PROTO_FC; 2150 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->n_port_name); 2151 return (sizeof(*id)); 2152 } 2153 case CTL_PORT_ISCSI: 2154 { 2155 struct scsi_transportid_iscsi_port *id = 2156 (struct scsi_transportid_iscsi_port *)buf; 2157 if (port->wwpn_iid[iid].name == NULL) 2158 return (0); 2159 memset(id, 0, 256); 2160 id->format_protocol = SCSI_TRN_ISCSI_FORMAT_PORT | 2161 SCSI_PROTO_ISCSI; 2162 len = strlcpy(id->iscsi_name, port->wwpn_iid[iid].name, 252) + 1; 2163 len = roundup2(min(len, 252), 4); 2164 scsi_ulto2b(len, id->additional_length); 2165 return (sizeof(*id) + len); 2166 } 2167 case CTL_PORT_SAS: 2168 { 2169 struct scsi_transportid_sas *id = 2170 (struct scsi_transportid_sas *)buf; 2171 if (port->wwpn_iid[iid].wwpn == 0) 2172 return (0); 2173 memset(id, 0, sizeof(*id)); 2174 id->format_protocol = SCSI_PROTO_SAS; 2175 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->sas_address); 2176 return (sizeof(*id)); 2177 } 2178 default: 2179 { 2180 struct scsi_transportid_spi *id = 2181 (struct scsi_transportid_spi *)buf; 2182 memset(id, 0, sizeof(*id)); 2183 id->format_protocol = SCSI_PROTO_SPI; 2184 scsi_ulto2b(iid, id->scsi_addr); 2185 scsi_ulto2b(port->targ_port, id->rel_trgt_port_id); 2186 return (sizeof(*id)); 2187 } 2188 } 2189 } 2190 2191 /* 2192 * Serialize a command that went down the "wrong" side, and so was sent to 2193 * this controller for execution. The logic is a little different than the 2194 * standard case in ctl_scsiio_precheck(). Errors in this case need to get 2195 * sent back to the other side, but in the success case, we execute the 2196 * command on this side (XFER mode) or tell the other side to execute it 2197 * (SER_ONLY mode). 2198 */ 2199 static void 2200 ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio) 2201 { 2202 struct ctl_softc *softc = CTL_SOFTC(ctsio); 2203 struct ctl_port *port = CTL_PORT(ctsio); 2204 union ctl_ha_msg msg_info; 2205 struct ctl_lun *lun; 2206 const struct ctl_cmd_entry *entry; 2207 uint32_t targ_lun; 2208 2209 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 2210 2211 /* Make sure that we know about this port. */ 2212 if (port == NULL || (port->status & CTL_PORT_STATUS_ONLINE) == 0) { 2213 ctl_set_internal_failure(ctsio, /*sks_valid*/ 0, 2214 /*retry_count*/ 1); 2215 goto badjuju; 2216 } 2217 2218 /* Make sure that we know about this LUN. */ 2219 mtx_lock(&softc->ctl_lock); 2220 if (targ_lun >= CTL_MAX_LUNS || 2221 (lun = softc->ctl_luns[targ_lun]) == NULL) { 2222 mtx_unlock(&softc->ctl_lock); 2223 2224 /* 2225 * The other node would not send this request to us unless 2226 * received announce that we are primary node for this LUN. 2227 * If this LUN does not exist now, it is probably result of 2228 * a race, so respond to initiator in the most opaque way. 2229 */ 2230 ctl_set_busy(ctsio); 2231 goto badjuju; 2232 } 2233 mtx_lock(&lun->lun_lock); 2234 mtx_unlock(&softc->ctl_lock); 2235 2236 /* 2237 * If the LUN is invalid, pretend that it doesn't exist. 2238 * It will go away as soon as all pending I/Os completed. 2239 */ 2240 if (lun->flags & CTL_LUN_DISABLED) { 2241 mtx_unlock(&lun->lun_lock); 2242 ctl_set_busy(ctsio); 2243 goto badjuju; 2244 } 2245 2246 entry = ctl_get_cmd_entry(ctsio, NULL); 2247 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { 2248 mtx_unlock(&lun->lun_lock); 2249 goto badjuju; 2250 } 2251 2252 CTL_LUN(ctsio) = lun; 2253 CTL_BACKEND_LUN(ctsio) = lun->be_lun; 2254 2255 /* 2256 * Every I/O goes into the OOA queue for a 2257 * particular LUN, and stays there until completion. 2258 */ 2259 #ifdef CTL_TIME_IO 2260 if (TAILQ_EMPTY(&lun->ooa_queue)) 2261 lun->idle_time += getsbinuptime() - lun->last_busy; 2262 #endif 2263 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2264 2265 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 2266 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq, 2267 ooa_links))) { 2268 case CTL_ACTION_BLOCK: 2269 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 2270 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 2271 blocked_links); 2272 mtx_unlock(&lun->lun_lock); 2273 break; 2274 case CTL_ACTION_PASS: 2275 case CTL_ACTION_SKIP: 2276 if (softc->ha_mode == CTL_HA_MODE_XFER) { 2277 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 2278 ctl_enqueue_rtr((union ctl_io *)ctsio); 2279 mtx_unlock(&lun->lun_lock); 2280 } else { 2281 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 2282 mtx_unlock(&lun->lun_lock); 2283 2284 /* send msg back to other side */ 2285 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 2286 msg_info.hdr.serializing_sc = (union ctl_io *)ctsio; 2287 msg_info.hdr.msg_type = CTL_MSG_R2R; 2288 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 2289 sizeof(msg_info.hdr), M_WAITOK); 2290 } 2291 break; 2292 case CTL_ACTION_OVERLAP: 2293 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2294 mtx_unlock(&lun->lun_lock); 2295 ctl_set_overlapped_cmd(ctsio); 2296 goto badjuju; 2297 case CTL_ACTION_OVERLAP_TAG: 2298 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2299 mtx_unlock(&lun->lun_lock); 2300 ctl_set_overlapped_tag(ctsio, ctsio->tag_num); 2301 goto badjuju; 2302 case CTL_ACTION_ERROR: 2303 default: 2304 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2305 mtx_unlock(&lun->lun_lock); 2306 2307 ctl_set_internal_failure(ctsio, /*sks_valid*/ 0, 2308 /*retry_count*/ 0); 2309 badjuju: 2310 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); 2311 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 2312 msg_info.hdr.serializing_sc = NULL; 2313 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 2314 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 2315 sizeof(msg_info.scsi), M_WAITOK); 2316 ctl_free_io((union ctl_io *)ctsio); 2317 break; 2318 } 2319 } 2320 2321 /* 2322 * Returns 0 for success, errno for failure. 2323 */ 2324 static void 2325 ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 2326 struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries) 2327 { 2328 union ctl_io *io; 2329 2330 mtx_lock(&lun->lun_lock); 2331 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); (io != NULL); 2332 (*cur_fill_num)++, io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, 2333 ooa_links)) { 2334 struct ctl_ooa_entry *entry; 2335 2336 /* 2337 * If we've got more than we can fit, just count the 2338 * remaining entries. 2339 */ 2340 if (*cur_fill_num >= ooa_hdr->alloc_num) 2341 continue; 2342 2343 entry = &kern_entries[*cur_fill_num]; 2344 2345 entry->tag_num = io->scsiio.tag_num; 2346 entry->lun_num = lun->lun; 2347 #ifdef CTL_TIME_IO 2348 entry->start_bt = io->io_hdr.start_bt; 2349 #endif 2350 bcopy(io->scsiio.cdb, entry->cdb, io->scsiio.cdb_len); 2351 entry->cdb_len = io->scsiio.cdb_len; 2352 if (io->io_hdr.flags & CTL_FLAG_BLOCKED) 2353 entry->cmd_flags |= CTL_OOACMD_FLAG_BLOCKED; 2354 2355 if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) 2356 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA; 2357 2358 if (io->io_hdr.flags & CTL_FLAG_ABORT) 2359 entry->cmd_flags |= CTL_OOACMD_FLAG_ABORT; 2360 2361 if (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR) 2362 entry->cmd_flags |= CTL_OOACMD_FLAG_RTR; 2363 2364 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) 2365 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED; 2366 } 2367 mtx_unlock(&lun->lun_lock); 2368 } 2369 2370 static void * 2371 ctl_copyin_alloc(void *user_addr, unsigned int len, char *error_str, 2372 size_t error_str_len) 2373 { 2374 void *kptr; 2375 2376 kptr = malloc(len, M_CTL, M_WAITOK | M_ZERO); 2377 2378 if (copyin(user_addr, kptr, len) != 0) { 2379 snprintf(error_str, error_str_len, "Error copying %d bytes " 2380 "from user address %p to kernel address %p", len, 2381 user_addr, kptr); 2382 free(kptr, M_CTL); 2383 return (NULL); 2384 } 2385 2386 return (kptr); 2387 } 2388 2389 static void 2390 ctl_free_args(int num_args, struct ctl_be_arg *args) 2391 { 2392 int i; 2393 2394 if (args == NULL) 2395 return; 2396 2397 for (i = 0; i < num_args; i++) { 2398 free(args[i].kname, M_CTL); 2399 free(args[i].kvalue, M_CTL); 2400 } 2401 2402 free(args, M_CTL); 2403 } 2404 2405 static struct ctl_be_arg * 2406 ctl_copyin_args(int num_args, struct ctl_be_arg *uargs, 2407 char *error_str, size_t error_str_len) 2408 { 2409 struct ctl_be_arg *args; 2410 int i; 2411 2412 args = ctl_copyin_alloc(uargs, num_args * sizeof(*args), 2413 error_str, error_str_len); 2414 2415 if (args == NULL) 2416 goto bailout; 2417 2418 for (i = 0; i < num_args; i++) { 2419 args[i].kname = NULL; 2420 args[i].kvalue = NULL; 2421 } 2422 2423 for (i = 0; i < num_args; i++) { 2424 uint8_t *tmpptr; 2425 2426 if (args[i].namelen == 0) { 2427 snprintf(error_str, error_str_len, "Argument %d " 2428 "name length is zero", i); 2429 goto bailout; 2430 } 2431 2432 args[i].kname = ctl_copyin_alloc(args[i].name, 2433 args[i].namelen, error_str, error_str_len); 2434 if (args[i].kname == NULL) 2435 goto bailout; 2436 2437 if (args[i].kname[args[i].namelen - 1] != '\0') { 2438 snprintf(error_str, error_str_len, "Argument %d " 2439 "name is not NUL-terminated", i); 2440 goto bailout; 2441 } 2442 2443 if (args[i].flags & CTL_BEARG_RD) { 2444 if (args[i].vallen == 0) { 2445 snprintf(error_str, error_str_len, "Argument %d " 2446 "value length is zero", i); 2447 goto bailout; 2448 } 2449 2450 tmpptr = ctl_copyin_alloc(args[i].value, 2451 args[i].vallen, error_str, error_str_len); 2452 if (tmpptr == NULL) 2453 goto bailout; 2454 2455 if ((args[i].flags & CTL_BEARG_ASCII) 2456 && (tmpptr[args[i].vallen - 1] != '\0')) { 2457 snprintf(error_str, error_str_len, "Argument " 2458 "%d value is not NUL-terminated", i); 2459 free(tmpptr, M_CTL); 2460 goto bailout; 2461 } 2462 args[i].kvalue = tmpptr; 2463 } else { 2464 args[i].kvalue = malloc(args[i].vallen, 2465 M_CTL, M_WAITOK | M_ZERO); 2466 } 2467 } 2468 2469 return (args); 2470 bailout: 2471 2472 ctl_free_args(num_args, args); 2473 2474 return (NULL); 2475 } 2476 2477 static void 2478 ctl_copyout_args(int num_args, struct ctl_be_arg *args) 2479 { 2480 int i; 2481 2482 for (i = 0; i < num_args; i++) { 2483 if (args[i].flags & CTL_BEARG_WR) 2484 copyout(args[i].kvalue, args[i].value, args[i].vallen); 2485 } 2486 } 2487 2488 /* 2489 * Escape characters that are illegal or not recommended in XML. 2490 */ 2491 int 2492 ctl_sbuf_printf_esc(struct sbuf *sb, char *str, int size) 2493 { 2494 char *end = str + size; 2495 int retval; 2496 2497 retval = 0; 2498 2499 for (; *str && str < end; str++) { 2500 switch (*str) { 2501 case '&': 2502 retval = sbuf_printf(sb, "&"); 2503 break; 2504 case '>': 2505 retval = sbuf_printf(sb, ">"); 2506 break; 2507 case '<': 2508 retval = sbuf_printf(sb, "<"); 2509 break; 2510 default: 2511 retval = sbuf_putc(sb, *str); 2512 break; 2513 } 2514 2515 if (retval != 0) 2516 break; 2517 2518 } 2519 2520 return (retval); 2521 } 2522 2523 static void 2524 ctl_id_sbuf(struct ctl_devid *id, struct sbuf *sb) 2525 { 2526 struct scsi_vpd_id_descriptor *desc; 2527 int i; 2528 2529 if (id == NULL || id->len < 4) 2530 return; 2531 desc = (struct scsi_vpd_id_descriptor *)id->data; 2532 switch (desc->id_type & SVPD_ID_TYPE_MASK) { 2533 case SVPD_ID_TYPE_T10: 2534 sbuf_printf(sb, "t10."); 2535 break; 2536 case SVPD_ID_TYPE_EUI64: 2537 sbuf_printf(sb, "eui."); 2538 break; 2539 case SVPD_ID_TYPE_NAA: 2540 sbuf_printf(sb, "naa."); 2541 break; 2542 case SVPD_ID_TYPE_SCSI_NAME: 2543 break; 2544 } 2545 switch (desc->proto_codeset & SVPD_ID_CODESET_MASK) { 2546 case SVPD_ID_CODESET_BINARY: 2547 for (i = 0; i < desc->length; i++) 2548 sbuf_printf(sb, "%02x", desc->identifier[i]); 2549 break; 2550 case SVPD_ID_CODESET_ASCII: 2551 sbuf_printf(sb, "%.*s", (int)desc->length, 2552 (char *)desc->identifier); 2553 break; 2554 case SVPD_ID_CODESET_UTF8: 2555 sbuf_printf(sb, "%s", (char *)desc->identifier); 2556 break; 2557 } 2558 } 2559 2560 static int 2561 ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 2562 struct thread *td) 2563 { 2564 struct ctl_softc *softc = dev->si_drv1; 2565 struct ctl_port *port; 2566 struct ctl_lun *lun; 2567 int retval; 2568 2569 retval = 0; 2570 2571 switch (cmd) { 2572 case CTL_IO: 2573 retval = ctl_ioctl_io(dev, cmd, addr, flag, td); 2574 break; 2575 case CTL_ENABLE_PORT: 2576 case CTL_DISABLE_PORT: 2577 case CTL_SET_PORT_WWNS: { 2578 struct ctl_port *port; 2579 struct ctl_port_entry *entry; 2580 2581 entry = (struct ctl_port_entry *)addr; 2582 2583 mtx_lock(&softc->ctl_lock); 2584 STAILQ_FOREACH(port, &softc->port_list, links) { 2585 int action, done; 2586 2587 if (port->targ_port < softc->port_min || 2588 port->targ_port >= softc->port_max) 2589 continue; 2590 2591 action = 0; 2592 done = 0; 2593 if ((entry->port_type == CTL_PORT_NONE) 2594 && (entry->targ_port == port->targ_port)) { 2595 /* 2596 * If the user only wants to enable or 2597 * disable or set WWNs on a specific port, 2598 * do the operation and we're done. 2599 */ 2600 action = 1; 2601 done = 1; 2602 } else if (entry->port_type & port->port_type) { 2603 /* 2604 * Compare the user's type mask with the 2605 * particular frontend type to see if we 2606 * have a match. 2607 */ 2608 action = 1; 2609 done = 0; 2610 2611 /* 2612 * Make sure the user isn't trying to set 2613 * WWNs on multiple ports at the same time. 2614 */ 2615 if (cmd == CTL_SET_PORT_WWNS) { 2616 printf("%s: Can't set WWNs on " 2617 "multiple ports\n", __func__); 2618 retval = EINVAL; 2619 break; 2620 } 2621 } 2622 if (action == 0) 2623 continue; 2624 2625 /* 2626 * XXX KDM we have to drop the lock here, because 2627 * the online/offline operations can potentially 2628 * block. We need to reference count the frontends 2629 * so they can't go away, 2630 */ 2631 if (cmd == CTL_ENABLE_PORT) { 2632 mtx_unlock(&softc->ctl_lock); 2633 ctl_port_online(port); 2634 mtx_lock(&softc->ctl_lock); 2635 } else if (cmd == CTL_DISABLE_PORT) { 2636 mtx_unlock(&softc->ctl_lock); 2637 ctl_port_offline(port); 2638 mtx_lock(&softc->ctl_lock); 2639 } else if (cmd == CTL_SET_PORT_WWNS) { 2640 ctl_port_set_wwns(port, 2641 (entry->flags & CTL_PORT_WWNN_VALID) ? 2642 1 : 0, entry->wwnn, 2643 (entry->flags & CTL_PORT_WWPN_VALID) ? 2644 1 : 0, entry->wwpn); 2645 } 2646 if (done != 0) 2647 break; 2648 } 2649 mtx_unlock(&softc->ctl_lock); 2650 break; 2651 } 2652 case CTL_GET_OOA: { 2653 struct ctl_ooa *ooa_hdr; 2654 struct ctl_ooa_entry *entries; 2655 uint32_t cur_fill_num; 2656 2657 ooa_hdr = (struct ctl_ooa *)addr; 2658 2659 if ((ooa_hdr->alloc_len == 0) 2660 || (ooa_hdr->alloc_num == 0)) { 2661 printf("%s: CTL_GET_OOA: alloc len %u and alloc num %u " 2662 "must be non-zero\n", __func__, 2663 ooa_hdr->alloc_len, ooa_hdr->alloc_num); 2664 retval = EINVAL; 2665 break; 2666 } 2667 2668 if (ooa_hdr->alloc_len != (ooa_hdr->alloc_num * 2669 sizeof(struct ctl_ooa_entry))) { 2670 printf("%s: CTL_GET_OOA: alloc len %u must be alloc " 2671 "num %d * sizeof(struct ctl_ooa_entry) %zd\n", 2672 __func__, ooa_hdr->alloc_len, 2673 ooa_hdr->alloc_num,sizeof(struct ctl_ooa_entry)); 2674 retval = EINVAL; 2675 break; 2676 } 2677 2678 entries = malloc(ooa_hdr->alloc_len, M_CTL, M_WAITOK | M_ZERO); 2679 if (entries == NULL) { 2680 printf("%s: could not allocate %d bytes for OOA " 2681 "dump\n", __func__, ooa_hdr->alloc_len); 2682 retval = ENOMEM; 2683 break; 2684 } 2685 2686 mtx_lock(&softc->ctl_lock); 2687 if ((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0 && 2688 (ooa_hdr->lun_num >= CTL_MAX_LUNS || 2689 softc->ctl_luns[ooa_hdr->lun_num] == NULL)) { 2690 mtx_unlock(&softc->ctl_lock); 2691 free(entries, M_CTL); 2692 printf("%s: CTL_GET_OOA: invalid LUN %ju\n", 2693 __func__, (uintmax_t)ooa_hdr->lun_num); 2694 retval = EINVAL; 2695 break; 2696 } 2697 2698 cur_fill_num = 0; 2699 2700 if (ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) { 2701 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2702 ctl_ioctl_fill_ooa(lun, &cur_fill_num, 2703 ooa_hdr, entries); 2704 } 2705 } else { 2706 lun = softc->ctl_luns[ooa_hdr->lun_num]; 2707 ctl_ioctl_fill_ooa(lun, &cur_fill_num, ooa_hdr, 2708 entries); 2709 } 2710 mtx_unlock(&softc->ctl_lock); 2711 2712 ooa_hdr->fill_num = min(cur_fill_num, ooa_hdr->alloc_num); 2713 ooa_hdr->fill_len = ooa_hdr->fill_num * 2714 sizeof(struct ctl_ooa_entry); 2715 retval = copyout(entries, ooa_hdr->entries, ooa_hdr->fill_len); 2716 if (retval != 0) { 2717 printf("%s: error copying out %d bytes for OOA dump\n", 2718 __func__, ooa_hdr->fill_len); 2719 } 2720 2721 getbinuptime(&ooa_hdr->cur_bt); 2722 2723 if (cur_fill_num > ooa_hdr->alloc_num) { 2724 ooa_hdr->dropped_num = cur_fill_num -ooa_hdr->alloc_num; 2725 ooa_hdr->status = CTL_OOA_NEED_MORE_SPACE; 2726 } else { 2727 ooa_hdr->dropped_num = 0; 2728 ooa_hdr->status = CTL_OOA_OK; 2729 } 2730 2731 free(entries, M_CTL); 2732 break; 2733 } 2734 case CTL_DELAY_IO: { 2735 struct ctl_io_delay_info *delay_info; 2736 2737 delay_info = (struct ctl_io_delay_info *)addr; 2738 2739 #ifdef CTL_IO_DELAY 2740 mtx_lock(&softc->ctl_lock); 2741 if (delay_info->lun_id >= CTL_MAX_LUNS || 2742 (lun = softc->ctl_luns[delay_info->lun_id]) == NULL) { 2743 mtx_unlock(&softc->ctl_lock); 2744 delay_info->status = CTL_DELAY_STATUS_INVALID_LUN; 2745 break; 2746 } 2747 mtx_lock(&lun->lun_lock); 2748 mtx_unlock(&softc->ctl_lock); 2749 delay_info->status = CTL_DELAY_STATUS_OK; 2750 switch (delay_info->delay_type) { 2751 case CTL_DELAY_TYPE_CONT: 2752 case CTL_DELAY_TYPE_ONESHOT: 2753 break; 2754 default: 2755 delay_info->status = CTL_DELAY_STATUS_INVALID_TYPE; 2756 break; 2757 } 2758 switch (delay_info->delay_loc) { 2759 case CTL_DELAY_LOC_DATAMOVE: 2760 lun->delay_info.datamove_type = delay_info->delay_type; 2761 lun->delay_info.datamove_delay = delay_info->delay_secs; 2762 break; 2763 case CTL_DELAY_LOC_DONE: 2764 lun->delay_info.done_type = delay_info->delay_type; 2765 lun->delay_info.done_delay = delay_info->delay_secs; 2766 break; 2767 default: 2768 delay_info->status = CTL_DELAY_STATUS_INVALID_LOC; 2769 break; 2770 } 2771 mtx_unlock(&lun->lun_lock); 2772 #else 2773 delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED; 2774 #endif /* CTL_IO_DELAY */ 2775 break; 2776 } 2777 #ifdef CTL_LEGACY_STATS 2778 case CTL_GETSTATS: { 2779 struct ctl_stats *stats = (struct ctl_stats *)addr; 2780 int i; 2781 2782 /* 2783 * XXX KDM no locking here. If the LUN list changes, 2784 * things can blow up. 2785 */ 2786 i = 0; 2787 stats->status = CTL_SS_OK; 2788 stats->fill_len = 0; 2789 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2790 if (stats->fill_len + sizeof(lun->legacy_stats) > 2791 stats->alloc_len) { 2792 stats->status = CTL_SS_NEED_MORE_SPACE; 2793 break; 2794 } 2795 retval = copyout(&lun->legacy_stats, &stats->lun_stats[i++], 2796 sizeof(lun->legacy_stats)); 2797 if (retval != 0) 2798 break; 2799 stats->fill_len += sizeof(lun->legacy_stats); 2800 } 2801 stats->num_luns = softc->num_luns; 2802 stats->flags = CTL_STATS_FLAG_NONE; 2803 #ifdef CTL_TIME_IO 2804 stats->flags |= CTL_STATS_FLAG_TIME_VALID; 2805 #endif 2806 getnanouptime(&stats->timestamp); 2807 break; 2808 } 2809 #endif /* CTL_LEGACY_STATS */ 2810 case CTL_ERROR_INJECT: { 2811 struct ctl_error_desc *err_desc, *new_err_desc; 2812 2813 err_desc = (struct ctl_error_desc *)addr; 2814 2815 new_err_desc = malloc(sizeof(*new_err_desc), M_CTL, 2816 M_WAITOK | M_ZERO); 2817 bcopy(err_desc, new_err_desc, sizeof(*new_err_desc)); 2818 2819 mtx_lock(&softc->ctl_lock); 2820 if (err_desc->lun_id >= CTL_MAX_LUNS || 2821 (lun = softc->ctl_luns[err_desc->lun_id]) == NULL) { 2822 mtx_unlock(&softc->ctl_lock); 2823 free(new_err_desc, M_CTL); 2824 printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n", 2825 __func__, (uintmax_t)err_desc->lun_id); 2826 retval = EINVAL; 2827 break; 2828 } 2829 mtx_lock(&lun->lun_lock); 2830 mtx_unlock(&softc->ctl_lock); 2831 2832 /* 2833 * We could do some checking here to verify the validity 2834 * of the request, but given the complexity of error 2835 * injection requests, the checking logic would be fairly 2836 * complex. 2837 * 2838 * For now, if the request is invalid, it just won't get 2839 * executed and might get deleted. 2840 */ 2841 STAILQ_INSERT_TAIL(&lun->error_list, new_err_desc, links); 2842 2843 /* 2844 * XXX KDM check to make sure the serial number is unique, 2845 * in case we somehow manage to wrap. That shouldn't 2846 * happen for a very long time, but it's the right thing to 2847 * do. 2848 */ 2849 new_err_desc->serial = lun->error_serial; 2850 err_desc->serial = lun->error_serial; 2851 lun->error_serial++; 2852 2853 mtx_unlock(&lun->lun_lock); 2854 break; 2855 } 2856 case CTL_ERROR_INJECT_DELETE: { 2857 struct ctl_error_desc *delete_desc, *desc, *desc2; 2858 int delete_done; 2859 2860 delete_desc = (struct ctl_error_desc *)addr; 2861 delete_done = 0; 2862 2863 mtx_lock(&softc->ctl_lock); 2864 if (delete_desc->lun_id >= CTL_MAX_LUNS || 2865 (lun = softc->ctl_luns[delete_desc->lun_id]) == NULL) { 2866 mtx_unlock(&softc->ctl_lock); 2867 printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n", 2868 __func__, (uintmax_t)delete_desc->lun_id); 2869 retval = EINVAL; 2870 break; 2871 } 2872 mtx_lock(&lun->lun_lock); 2873 mtx_unlock(&softc->ctl_lock); 2874 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 2875 if (desc->serial != delete_desc->serial) 2876 continue; 2877 2878 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, 2879 links); 2880 free(desc, M_CTL); 2881 delete_done = 1; 2882 } 2883 mtx_unlock(&lun->lun_lock); 2884 if (delete_done == 0) { 2885 printf("%s: CTL_ERROR_INJECT_DELETE: can't find " 2886 "error serial %ju on LUN %u\n", __func__, 2887 delete_desc->serial, delete_desc->lun_id); 2888 retval = EINVAL; 2889 break; 2890 } 2891 break; 2892 } 2893 case CTL_DUMP_STRUCTS: { 2894 int j, k; 2895 struct ctl_port *port; 2896 struct ctl_frontend *fe; 2897 2898 mtx_lock(&softc->ctl_lock); 2899 printf("CTL Persistent Reservation information start:\n"); 2900 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2901 mtx_lock(&lun->lun_lock); 2902 if ((lun->flags & CTL_LUN_DISABLED) != 0) { 2903 mtx_unlock(&lun->lun_lock); 2904 continue; 2905 } 2906 2907 for (j = 0; j < CTL_MAX_PORTS; j++) { 2908 if (lun->pr_keys[j] == NULL) 2909 continue; 2910 for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){ 2911 if (lun->pr_keys[j][k] == 0) 2912 continue; 2913 printf(" LUN %ju port %d iid %d key " 2914 "%#jx\n", lun->lun, j, k, 2915 (uintmax_t)lun->pr_keys[j][k]); 2916 } 2917 } 2918 mtx_unlock(&lun->lun_lock); 2919 } 2920 printf("CTL Persistent Reservation information end\n"); 2921 printf("CTL Ports:\n"); 2922 STAILQ_FOREACH(port, &softc->port_list, links) { 2923 printf(" Port %d '%s' Frontend '%s' Type %u pp %d vp %d WWNN " 2924 "%#jx WWPN %#jx\n", port->targ_port, port->port_name, 2925 port->frontend->name, port->port_type, 2926 port->physical_port, port->virtual_port, 2927 (uintmax_t)port->wwnn, (uintmax_t)port->wwpn); 2928 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 2929 if (port->wwpn_iid[j].in_use == 0 && 2930 port->wwpn_iid[j].wwpn == 0 && 2931 port->wwpn_iid[j].name == NULL) 2932 continue; 2933 2934 printf(" iid %u use %d WWPN %#jx '%s'\n", 2935 j, port->wwpn_iid[j].in_use, 2936 (uintmax_t)port->wwpn_iid[j].wwpn, 2937 port->wwpn_iid[j].name); 2938 } 2939 } 2940 printf("CTL Port information end\n"); 2941 mtx_unlock(&softc->ctl_lock); 2942 /* 2943 * XXX KDM calling this without a lock. We'd likely want 2944 * to drop the lock before calling the frontend's dump 2945 * routine anyway. 2946 */ 2947 printf("CTL Frontends:\n"); 2948 STAILQ_FOREACH(fe, &softc->fe_list, links) { 2949 printf(" Frontend '%s'\n", fe->name); 2950 if (fe->fe_dump != NULL) 2951 fe->fe_dump(); 2952 } 2953 printf("CTL Frontend information end\n"); 2954 break; 2955 } 2956 case CTL_LUN_REQ: { 2957 struct ctl_lun_req *lun_req; 2958 struct ctl_backend_driver *backend; 2959 2960 lun_req = (struct ctl_lun_req *)addr; 2961 2962 backend = ctl_backend_find(lun_req->backend); 2963 if (backend == NULL) { 2964 lun_req->status = CTL_LUN_ERROR; 2965 snprintf(lun_req->error_str, 2966 sizeof(lun_req->error_str), 2967 "Backend \"%s\" not found.", 2968 lun_req->backend); 2969 break; 2970 } 2971 if (lun_req->num_be_args > 0) { 2972 lun_req->kern_be_args = ctl_copyin_args( 2973 lun_req->num_be_args, 2974 lun_req->be_args, 2975 lun_req->error_str, 2976 sizeof(lun_req->error_str)); 2977 if (lun_req->kern_be_args == NULL) { 2978 lun_req->status = CTL_LUN_ERROR; 2979 break; 2980 } 2981 } 2982 2983 retval = backend->ioctl(dev, cmd, addr, flag, td); 2984 2985 if (lun_req->num_be_args > 0) { 2986 ctl_copyout_args(lun_req->num_be_args, 2987 lun_req->kern_be_args); 2988 ctl_free_args(lun_req->num_be_args, 2989 lun_req->kern_be_args); 2990 } 2991 break; 2992 } 2993 case CTL_LUN_LIST: { 2994 struct sbuf *sb; 2995 struct ctl_lun_list *list; 2996 struct ctl_option *opt; 2997 2998 list = (struct ctl_lun_list *)addr; 2999 3000 /* 3001 * Allocate a fixed length sbuf here, based on the length 3002 * of the user's buffer. We could allocate an auto-extending 3003 * buffer, and then tell the user how much larger our 3004 * amount of data is than his buffer, but that presents 3005 * some problems: 3006 * 3007 * 1. The sbuf(9) routines use a blocking malloc, and so 3008 * we can't hold a lock while calling them with an 3009 * auto-extending buffer. 3010 * 3011 * 2. There is not currently a LUN reference counting 3012 * mechanism, outside of outstanding transactions on 3013 * the LUN's OOA queue. So a LUN could go away on us 3014 * while we're getting the LUN number, backend-specific 3015 * information, etc. Thus, given the way things 3016 * currently work, we need to hold the CTL lock while 3017 * grabbing LUN information. 3018 * 3019 * So, from the user's standpoint, the best thing to do is 3020 * allocate what he thinks is a reasonable buffer length, 3021 * and then if he gets a CTL_LUN_LIST_NEED_MORE_SPACE error, 3022 * double the buffer length and try again. (And repeat 3023 * that until he succeeds.) 3024 */ 3025 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 3026 if (sb == NULL) { 3027 list->status = CTL_LUN_LIST_ERROR; 3028 snprintf(list->error_str, sizeof(list->error_str), 3029 "Unable to allocate %d bytes for LUN list", 3030 list->alloc_len); 3031 break; 3032 } 3033 3034 sbuf_printf(sb, "<ctllunlist>\n"); 3035 3036 mtx_lock(&softc->ctl_lock); 3037 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3038 mtx_lock(&lun->lun_lock); 3039 retval = sbuf_printf(sb, "<lun id=\"%ju\">\n", 3040 (uintmax_t)lun->lun); 3041 3042 /* 3043 * Bail out as soon as we see that we've overfilled 3044 * the buffer. 3045 */ 3046 if (retval != 0) 3047 break; 3048 3049 retval = sbuf_printf(sb, "\t<backend_type>%s" 3050 "</backend_type>\n", 3051 (lun->backend == NULL) ? "none" : 3052 lun->backend->name); 3053 3054 if (retval != 0) 3055 break; 3056 3057 retval = sbuf_printf(sb, "\t<lun_type>%d</lun_type>\n", 3058 lun->be_lun->lun_type); 3059 3060 if (retval != 0) 3061 break; 3062 3063 if (lun->backend == NULL) { 3064 retval = sbuf_printf(sb, "</lun>\n"); 3065 if (retval != 0) 3066 break; 3067 continue; 3068 } 3069 3070 retval = sbuf_printf(sb, "\t<size>%ju</size>\n", 3071 (lun->be_lun->maxlba > 0) ? 3072 lun->be_lun->maxlba + 1 : 0); 3073 3074 if (retval != 0) 3075 break; 3076 3077 retval = sbuf_printf(sb, "\t<blocksize>%u</blocksize>\n", 3078 lun->be_lun->blocksize); 3079 3080 if (retval != 0) 3081 break; 3082 3083 retval = sbuf_printf(sb, "\t<serial_number>"); 3084 3085 if (retval != 0) 3086 break; 3087 3088 retval = ctl_sbuf_printf_esc(sb, 3089 lun->be_lun->serial_num, 3090 sizeof(lun->be_lun->serial_num)); 3091 3092 if (retval != 0) 3093 break; 3094 3095 retval = sbuf_printf(sb, "</serial_number>\n"); 3096 3097 if (retval != 0) 3098 break; 3099 3100 retval = sbuf_printf(sb, "\t<device_id>"); 3101 3102 if (retval != 0) 3103 break; 3104 3105 retval = ctl_sbuf_printf_esc(sb, 3106 lun->be_lun->device_id, 3107 sizeof(lun->be_lun->device_id)); 3108 3109 if (retval != 0) 3110 break; 3111 3112 retval = sbuf_printf(sb, "</device_id>\n"); 3113 3114 if (retval != 0) 3115 break; 3116 3117 if (lun->backend->lun_info != NULL) { 3118 retval = lun->backend->lun_info(lun->be_lun->be_lun, sb); 3119 if (retval != 0) 3120 break; 3121 } 3122 STAILQ_FOREACH(opt, &lun->be_lun->options, links) { 3123 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n", 3124 opt->name, opt->value, opt->name); 3125 if (retval != 0) 3126 break; 3127 } 3128 3129 retval = sbuf_printf(sb, "</lun>\n"); 3130 3131 if (retval != 0) 3132 break; 3133 mtx_unlock(&lun->lun_lock); 3134 } 3135 if (lun != NULL) 3136 mtx_unlock(&lun->lun_lock); 3137 mtx_unlock(&softc->ctl_lock); 3138 3139 if ((retval != 0) 3140 || ((retval = sbuf_printf(sb, "</ctllunlist>\n")) != 0)) { 3141 retval = 0; 3142 sbuf_delete(sb); 3143 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3144 snprintf(list->error_str, sizeof(list->error_str), 3145 "Out of space, %d bytes is too small", 3146 list->alloc_len); 3147 break; 3148 } 3149 3150 sbuf_finish(sb); 3151 3152 retval = copyout(sbuf_data(sb), list->lun_xml, 3153 sbuf_len(sb) + 1); 3154 3155 list->fill_len = sbuf_len(sb) + 1; 3156 list->status = CTL_LUN_LIST_OK; 3157 sbuf_delete(sb); 3158 break; 3159 } 3160 case CTL_ISCSI: { 3161 struct ctl_iscsi *ci; 3162 struct ctl_frontend *fe; 3163 3164 ci = (struct ctl_iscsi *)addr; 3165 3166 fe = ctl_frontend_find("iscsi"); 3167 if (fe == NULL) { 3168 ci->status = CTL_ISCSI_ERROR; 3169 snprintf(ci->error_str, sizeof(ci->error_str), 3170 "Frontend \"iscsi\" not found."); 3171 break; 3172 } 3173 3174 retval = fe->ioctl(dev, cmd, addr, flag, td); 3175 break; 3176 } 3177 case CTL_PORT_REQ: { 3178 struct ctl_req *req; 3179 struct ctl_frontend *fe; 3180 3181 req = (struct ctl_req *)addr; 3182 3183 fe = ctl_frontend_find(req->driver); 3184 if (fe == NULL) { 3185 req->status = CTL_LUN_ERROR; 3186 snprintf(req->error_str, sizeof(req->error_str), 3187 "Frontend \"%s\" not found.", req->driver); 3188 break; 3189 } 3190 if (req->num_args > 0) { 3191 req->kern_args = ctl_copyin_args(req->num_args, 3192 req->args, req->error_str, sizeof(req->error_str)); 3193 if (req->kern_args == NULL) { 3194 req->status = CTL_LUN_ERROR; 3195 break; 3196 } 3197 } 3198 3199 if (fe->ioctl) 3200 retval = fe->ioctl(dev, cmd, addr, flag, td); 3201 else 3202 retval = ENODEV; 3203 3204 if (req->num_args > 0) { 3205 ctl_copyout_args(req->num_args, req->kern_args); 3206 ctl_free_args(req->num_args, req->kern_args); 3207 } 3208 break; 3209 } 3210 case CTL_PORT_LIST: { 3211 struct sbuf *sb; 3212 struct ctl_port *port; 3213 struct ctl_lun_list *list; 3214 struct ctl_option *opt; 3215 int j; 3216 uint32_t plun; 3217 3218 list = (struct ctl_lun_list *)addr; 3219 3220 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 3221 if (sb == NULL) { 3222 list->status = CTL_LUN_LIST_ERROR; 3223 snprintf(list->error_str, sizeof(list->error_str), 3224 "Unable to allocate %d bytes for LUN list", 3225 list->alloc_len); 3226 break; 3227 } 3228 3229 sbuf_printf(sb, "<ctlportlist>\n"); 3230 3231 mtx_lock(&softc->ctl_lock); 3232 STAILQ_FOREACH(port, &softc->port_list, links) { 3233 retval = sbuf_printf(sb, "<targ_port id=\"%ju\">\n", 3234 (uintmax_t)port->targ_port); 3235 3236 /* 3237 * Bail out as soon as we see that we've overfilled 3238 * the buffer. 3239 */ 3240 if (retval != 0) 3241 break; 3242 3243 retval = sbuf_printf(sb, "\t<frontend_type>%s" 3244 "</frontend_type>\n", port->frontend->name); 3245 if (retval != 0) 3246 break; 3247 3248 retval = sbuf_printf(sb, "\t<port_type>%d</port_type>\n", 3249 port->port_type); 3250 if (retval != 0) 3251 break; 3252 3253 retval = sbuf_printf(sb, "\t<online>%s</online>\n", 3254 (port->status & CTL_PORT_STATUS_ONLINE) ? "YES" : "NO"); 3255 if (retval != 0) 3256 break; 3257 3258 retval = sbuf_printf(sb, "\t<port_name>%s</port_name>\n", 3259 port->port_name); 3260 if (retval != 0) 3261 break; 3262 3263 retval = sbuf_printf(sb, "\t<physical_port>%d</physical_port>\n", 3264 port->physical_port); 3265 if (retval != 0) 3266 break; 3267 3268 retval = sbuf_printf(sb, "\t<virtual_port>%d</virtual_port>\n", 3269 port->virtual_port); 3270 if (retval != 0) 3271 break; 3272 3273 if (port->target_devid != NULL) { 3274 sbuf_printf(sb, "\t<target>"); 3275 ctl_id_sbuf(port->target_devid, sb); 3276 sbuf_printf(sb, "</target>\n"); 3277 } 3278 3279 if (port->port_devid != NULL) { 3280 sbuf_printf(sb, "\t<port>"); 3281 ctl_id_sbuf(port->port_devid, sb); 3282 sbuf_printf(sb, "</port>\n"); 3283 } 3284 3285 if (port->port_info != NULL) { 3286 retval = port->port_info(port->onoff_arg, sb); 3287 if (retval != 0) 3288 break; 3289 } 3290 STAILQ_FOREACH(opt, &port->options, links) { 3291 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n", 3292 opt->name, opt->value, opt->name); 3293 if (retval != 0) 3294 break; 3295 } 3296 3297 if (port->lun_map != NULL) { 3298 sbuf_printf(sb, "\t<lun_map>on</lun_map>\n"); 3299 for (j = 0; j < port->lun_map_size; j++) { 3300 plun = ctl_lun_map_from_port(port, j); 3301 if (plun == UINT32_MAX) 3302 continue; 3303 sbuf_printf(sb, 3304 "\t<lun id=\"%u\">%u</lun>\n", 3305 j, plun); 3306 } 3307 } 3308 3309 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 3310 if (port->wwpn_iid[j].in_use == 0 || 3311 (port->wwpn_iid[j].wwpn == 0 && 3312 port->wwpn_iid[j].name == NULL)) 3313 continue; 3314 3315 if (port->wwpn_iid[j].name != NULL) 3316 retval = sbuf_printf(sb, 3317 "\t<initiator id=\"%u\">%s</initiator>\n", 3318 j, port->wwpn_iid[j].name); 3319 else 3320 retval = sbuf_printf(sb, 3321 "\t<initiator id=\"%u\">naa.%08jx</initiator>\n", 3322 j, port->wwpn_iid[j].wwpn); 3323 if (retval != 0) 3324 break; 3325 } 3326 if (retval != 0) 3327 break; 3328 3329 retval = sbuf_printf(sb, "</targ_port>\n"); 3330 if (retval != 0) 3331 break; 3332 } 3333 mtx_unlock(&softc->ctl_lock); 3334 3335 if ((retval != 0) 3336 || ((retval = sbuf_printf(sb, "</ctlportlist>\n")) != 0)) { 3337 retval = 0; 3338 sbuf_delete(sb); 3339 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3340 snprintf(list->error_str, sizeof(list->error_str), 3341 "Out of space, %d bytes is too small", 3342 list->alloc_len); 3343 break; 3344 } 3345 3346 sbuf_finish(sb); 3347 3348 retval = copyout(sbuf_data(sb), list->lun_xml, 3349 sbuf_len(sb) + 1); 3350 3351 list->fill_len = sbuf_len(sb) + 1; 3352 list->status = CTL_LUN_LIST_OK; 3353 sbuf_delete(sb); 3354 break; 3355 } 3356 case CTL_LUN_MAP: { 3357 struct ctl_lun_map *lm = (struct ctl_lun_map *)addr; 3358 struct ctl_port *port; 3359 3360 mtx_lock(&softc->ctl_lock); 3361 if (lm->port < softc->port_min || 3362 lm->port >= softc->port_max || 3363 (port = softc->ctl_ports[lm->port]) == NULL) { 3364 mtx_unlock(&softc->ctl_lock); 3365 return (ENXIO); 3366 } 3367 if (port->status & CTL_PORT_STATUS_ONLINE) { 3368 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3369 if (ctl_lun_map_to_port(port, lun->lun) == 3370 UINT32_MAX) 3371 continue; 3372 mtx_lock(&lun->lun_lock); 3373 ctl_est_ua_port(lun, lm->port, -1, 3374 CTL_UA_LUN_CHANGE); 3375 mtx_unlock(&lun->lun_lock); 3376 } 3377 } 3378 mtx_unlock(&softc->ctl_lock); // XXX: port_enable sleeps 3379 if (lm->plun != UINT32_MAX) { 3380 if (lm->lun == UINT32_MAX) 3381 retval = ctl_lun_map_unset(port, lm->plun); 3382 else if (lm->lun < CTL_MAX_LUNS && 3383 softc->ctl_luns[lm->lun] != NULL) 3384 retval = ctl_lun_map_set(port, lm->plun, lm->lun); 3385 else 3386 return (ENXIO); 3387 } else { 3388 if (lm->lun == UINT32_MAX) 3389 retval = ctl_lun_map_deinit(port); 3390 else 3391 retval = ctl_lun_map_init(port); 3392 } 3393 if (port->status & CTL_PORT_STATUS_ONLINE) 3394 ctl_isc_announce_port(port); 3395 break; 3396 } 3397 case CTL_GET_LUN_STATS: { 3398 struct ctl_get_io_stats *stats = (struct ctl_get_io_stats *)addr; 3399 int i; 3400 3401 /* 3402 * XXX KDM no locking here. If the LUN list changes, 3403 * things can blow up. 3404 */ 3405 i = 0; 3406 stats->status = CTL_SS_OK; 3407 stats->fill_len = 0; 3408 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3409 if (lun->lun < stats->first_item) 3410 continue; 3411 if (stats->fill_len + sizeof(lun->stats) > 3412 stats->alloc_len) { 3413 stats->status = CTL_SS_NEED_MORE_SPACE; 3414 break; 3415 } 3416 retval = copyout(&lun->stats, &stats->stats[i++], 3417 sizeof(lun->stats)); 3418 if (retval != 0) 3419 break; 3420 stats->fill_len += sizeof(lun->stats); 3421 } 3422 stats->num_items = softc->num_luns; 3423 stats->flags = CTL_STATS_FLAG_NONE; 3424 #ifdef CTL_TIME_IO 3425 stats->flags |= CTL_STATS_FLAG_TIME_VALID; 3426 #endif 3427 getnanouptime(&stats->timestamp); 3428 break; 3429 } 3430 case CTL_GET_PORT_STATS: { 3431 struct ctl_get_io_stats *stats = (struct ctl_get_io_stats *)addr; 3432 int i; 3433 3434 /* 3435 * XXX KDM no locking here. If the LUN list changes, 3436 * things can blow up. 3437 */ 3438 i = 0; 3439 stats->status = CTL_SS_OK; 3440 stats->fill_len = 0; 3441 STAILQ_FOREACH(port, &softc->port_list, links) { 3442 if (port->targ_port < stats->first_item) 3443 continue; 3444 if (stats->fill_len + sizeof(port->stats) > 3445 stats->alloc_len) { 3446 stats->status = CTL_SS_NEED_MORE_SPACE; 3447 break; 3448 } 3449 retval = copyout(&port->stats, &stats->stats[i++], 3450 sizeof(port->stats)); 3451 if (retval != 0) 3452 break; 3453 stats->fill_len += sizeof(port->stats); 3454 } 3455 stats->num_items = softc->num_ports; 3456 stats->flags = CTL_STATS_FLAG_NONE; 3457 #ifdef CTL_TIME_IO 3458 stats->flags |= CTL_STATS_FLAG_TIME_VALID; 3459 #endif 3460 getnanouptime(&stats->timestamp); 3461 break; 3462 } 3463 default: { 3464 /* XXX KDM should we fix this? */ 3465 #if 0 3466 struct ctl_backend_driver *backend; 3467 unsigned int type; 3468 int found; 3469 3470 found = 0; 3471 3472 /* 3473 * We encode the backend type as the ioctl type for backend 3474 * ioctls. So parse it out here, and then search for a 3475 * backend of this type. 3476 */ 3477 type = _IOC_TYPE(cmd); 3478 3479 STAILQ_FOREACH(backend, &softc->be_list, links) { 3480 if (backend->type == type) { 3481 found = 1; 3482 break; 3483 } 3484 } 3485 if (found == 0) { 3486 printf("ctl: unknown ioctl command %#lx or backend " 3487 "%d\n", cmd, type); 3488 retval = EINVAL; 3489 break; 3490 } 3491 retval = backend->ioctl(dev, cmd, addr, flag, td); 3492 #endif 3493 retval = ENOTTY; 3494 break; 3495 } 3496 } 3497 return (retval); 3498 } 3499 3500 uint32_t 3501 ctl_get_initindex(struct ctl_nexus *nexus) 3502 { 3503 return (nexus->initid + (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); 3504 } 3505 3506 int 3507 ctl_lun_map_init(struct ctl_port *port) 3508 { 3509 struct ctl_softc *softc = port->ctl_softc; 3510 struct ctl_lun *lun; 3511 int size = ctl_lun_map_size; 3512 uint32_t i; 3513 3514 if (port->lun_map == NULL || port->lun_map_size < size) { 3515 port->lun_map_size = 0; 3516 free(port->lun_map, M_CTL); 3517 port->lun_map = malloc(size * sizeof(uint32_t), 3518 M_CTL, M_NOWAIT); 3519 } 3520 if (port->lun_map == NULL) 3521 return (ENOMEM); 3522 for (i = 0; i < size; i++) 3523 port->lun_map[i] = UINT32_MAX; 3524 port->lun_map_size = size; 3525 if (port->status & CTL_PORT_STATUS_ONLINE) { 3526 if (port->lun_disable != NULL) { 3527 STAILQ_FOREACH(lun, &softc->lun_list, links) 3528 port->lun_disable(port->targ_lun_arg, lun->lun); 3529 } 3530 ctl_isc_announce_port(port); 3531 } 3532 return (0); 3533 } 3534 3535 int 3536 ctl_lun_map_deinit(struct ctl_port *port) 3537 { 3538 struct ctl_softc *softc = port->ctl_softc; 3539 struct ctl_lun *lun; 3540 3541 if (port->lun_map == NULL) 3542 return (0); 3543 port->lun_map_size = 0; 3544 free(port->lun_map, M_CTL); 3545 port->lun_map = NULL; 3546 if (port->status & CTL_PORT_STATUS_ONLINE) { 3547 if (port->lun_enable != NULL) { 3548 STAILQ_FOREACH(lun, &softc->lun_list, links) 3549 port->lun_enable(port->targ_lun_arg, lun->lun); 3550 } 3551 ctl_isc_announce_port(port); 3552 } 3553 return (0); 3554 } 3555 3556 int 3557 ctl_lun_map_set(struct ctl_port *port, uint32_t plun, uint32_t glun) 3558 { 3559 int status; 3560 uint32_t old; 3561 3562 if (port->lun_map == NULL) { 3563 status = ctl_lun_map_init(port); 3564 if (status != 0) 3565 return (status); 3566 } 3567 if (plun >= port->lun_map_size) 3568 return (EINVAL); 3569 old = port->lun_map[plun]; 3570 port->lun_map[plun] = glun; 3571 if ((port->status & CTL_PORT_STATUS_ONLINE) && old == UINT32_MAX) { 3572 if (port->lun_enable != NULL) 3573 port->lun_enable(port->targ_lun_arg, plun); 3574 ctl_isc_announce_port(port); 3575 } 3576 return (0); 3577 } 3578 3579 int 3580 ctl_lun_map_unset(struct ctl_port *port, uint32_t plun) 3581 { 3582 uint32_t old; 3583 3584 if (port->lun_map == NULL || plun >= port->lun_map_size) 3585 return (0); 3586 old = port->lun_map[plun]; 3587 port->lun_map[plun] = UINT32_MAX; 3588 if ((port->status & CTL_PORT_STATUS_ONLINE) && old != UINT32_MAX) { 3589 if (port->lun_disable != NULL) 3590 port->lun_disable(port->targ_lun_arg, plun); 3591 ctl_isc_announce_port(port); 3592 } 3593 return (0); 3594 } 3595 3596 uint32_t 3597 ctl_lun_map_from_port(struct ctl_port *port, uint32_t lun_id) 3598 { 3599 3600 if (port == NULL) 3601 return (UINT32_MAX); 3602 if (port->lun_map == NULL) 3603 return (lun_id); 3604 if (lun_id > port->lun_map_size) 3605 return (UINT32_MAX); 3606 return (port->lun_map[lun_id]); 3607 } 3608 3609 uint32_t 3610 ctl_lun_map_to_port(struct ctl_port *port, uint32_t lun_id) 3611 { 3612 uint32_t i; 3613 3614 if (port == NULL) 3615 return (UINT32_MAX); 3616 if (port->lun_map == NULL) 3617 return (lun_id); 3618 for (i = 0; i < port->lun_map_size; i++) { 3619 if (port->lun_map[i] == lun_id) 3620 return (i); 3621 } 3622 return (UINT32_MAX); 3623 } 3624 3625 uint32_t 3626 ctl_decode_lun(uint64_t encoded) 3627 { 3628 uint8_t lun[8]; 3629 uint32_t result = 0xffffffff; 3630 3631 be64enc(lun, encoded); 3632 switch (lun[0] & RPL_LUNDATA_ATYP_MASK) { 3633 case RPL_LUNDATA_ATYP_PERIPH: 3634 if ((lun[0] & 0x3f) == 0 && lun[2] == 0 && lun[3] == 0 && 3635 lun[4] == 0 && lun[5] == 0 && lun[6] == 0 && lun[7] == 0) 3636 result = lun[1]; 3637 break; 3638 case RPL_LUNDATA_ATYP_FLAT: 3639 if (lun[2] == 0 && lun[3] == 0 && lun[4] == 0 && lun[5] == 0 && 3640 lun[6] == 0 && lun[7] == 0) 3641 result = ((lun[0] & 0x3f) << 8) + lun[1]; 3642 break; 3643 case RPL_LUNDATA_ATYP_EXTLUN: 3644 switch (lun[0] & RPL_LUNDATA_EXT_EAM_MASK) { 3645 case 0x02: 3646 switch (lun[0] & RPL_LUNDATA_EXT_LEN_MASK) { 3647 case 0x00: 3648 result = lun[1]; 3649 break; 3650 case 0x10: 3651 result = (lun[1] << 16) + (lun[2] << 8) + 3652 lun[3]; 3653 break; 3654 case 0x20: 3655 if (lun[1] == 0 && lun[6] == 0 && lun[7] == 0) 3656 result = (lun[2] << 24) + 3657 (lun[3] << 16) + (lun[4] << 8) + 3658 lun[5]; 3659 break; 3660 } 3661 break; 3662 case RPL_LUNDATA_EXT_EAM_NOT_SPEC: 3663 result = 0xffffffff; 3664 break; 3665 } 3666 break; 3667 } 3668 return (result); 3669 } 3670 3671 uint64_t 3672 ctl_encode_lun(uint32_t decoded) 3673 { 3674 uint64_t l = decoded; 3675 3676 if (l <= 0xff) 3677 return (((uint64_t)RPL_LUNDATA_ATYP_PERIPH << 56) | (l << 48)); 3678 if (l <= 0x3fff) 3679 return (((uint64_t)RPL_LUNDATA_ATYP_FLAT << 56) | (l << 48)); 3680 if (l <= 0xffffff) 3681 return (((uint64_t)(RPL_LUNDATA_ATYP_EXTLUN | 0x12) << 56) | 3682 (l << 32)); 3683 return ((((uint64_t)RPL_LUNDATA_ATYP_EXTLUN | 0x22) << 56) | (l << 16)); 3684 } 3685 3686 int 3687 ctl_ffz(uint32_t *mask, uint32_t first, uint32_t last) 3688 { 3689 int i; 3690 3691 for (i = first; i < last; i++) { 3692 if ((mask[i / 32] & (1 << (i % 32))) == 0) 3693 return (i); 3694 } 3695 return (-1); 3696 } 3697 3698 int 3699 ctl_set_mask(uint32_t *mask, uint32_t bit) 3700 { 3701 uint32_t chunk, piece; 3702 3703 chunk = bit >> 5; 3704 piece = bit % (sizeof(uint32_t) * 8); 3705 3706 if ((mask[chunk] & (1 << piece)) != 0) 3707 return (-1); 3708 else 3709 mask[chunk] |= (1 << piece); 3710 3711 return (0); 3712 } 3713 3714 int 3715 ctl_clear_mask(uint32_t *mask, uint32_t bit) 3716 { 3717 uint32_t chunk, piece; 3718 3719 chunk = bit >> 5; 3720 piece = bit % (sizeof(uint32_t) * 8); 3721 3722 if ((mask[chunk] & (1 << piece)) == 0) 3723 return (-1); 3724 else 3725 mask[chunk] &= ~(1 << piece); 3726 3727 return (0); 3728 } 3729 3730 int 3731 ctl_is_set(uint32_t *mask, uint32_t bit) 3732 { 3733 uint32_t chunk, piece; 3734 3735 chunk = bit >> 5; 3736 piece = bit % (sizeof(uint32_t) * 8); 3737 3738 if ((mask[chunk] & (1 << piece)) == 0) 3739 return (0); 3740 else 3741 return (1); 3742 } 3743 3744 static uint64_t 3745 ctl_get_prkey(struct ctl_lun *lun, uint32_t residx) 3746 { 3747 uint64_t *t; 3748 3749 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3750 if (t == NULL) 3751 return (0); 3752 return (t[residx % CTL_MAX_INIT_PER_PORT]); 3753 } 3754 3755 static void 3756 ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx) 3757 { 3758 uint64_t *t; 3759 3760 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3761 if (t == NULL) 3762 return; 3763 t[residx % CTL_MAX_INIT_PER_PORT] = 0; 3764 } 3765 3766 static void 3767 ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx) 3768 { 3769 uint64_t *p; 3770 u_int i; 3771 3772 i = residx/CTL_MAX_INIT_PER_PORT; 3773 if (lun->pr_keys[i] != NULL) 3774 return; 3775 mtx_unlock(&lun->lun_lock); 3776 p = malloc(sizeof(uint64_t) * CTL_MAX_INIT_PER_PORT, M_CTL, 3777 M_WAITOK | M_ZERO); 3778 mtx_lock(&lun->lun_lock); 3779 if (lun->pr_keys[i] == NULL) 3780 lun->pr_keys[i] = p; 3781 else 3782 free(p, M_CTL); 3783 } 3784 3785 static void 3786 ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key) 3787 { 3788 uint64_t *t; 3789 3790 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3791 KASSERT(t != NULL, ("prkey %d is not allocated", residx)); 3792 t[residx % CTL_MAX_INIT_PER_PORT] = key; 3793 } 3794 3795 /* 3796 * ctl_softc, pool_name, total_ctl_io are passed in. 3797 * npool is passed out. 3798 */ 3799 int 3800 ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name, 3801 uint32_t total_ctl_io, void **npool) 3802 { 3803 struct ctl_io_pool *pool; 3804 3805 pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL, 3806 M_NOWAIT | M_ZERO); 3807 if (pool == NULL) 3808 return (ENOMEM); 3809 3810 snprintf(pool->name, sizeof(pool->name), "CTL IO %s", pool_name); 3811 pool->ctl_softc = ctl_softc; 3812 #ifdef IO_POOLS 3813 pool->zone = uma_zsecond_create(pool->name, NULL, 3814 NULL, NULL, NULL, ctl_softc->io_zone); 3815 /* uma_prealloc(pool->zone, total_ctl_io); */ 3816 #else 3817 pool->zone = ctl_softc->io_zone; 3818 #endif 3819 3820 *npool = pool; 3821 return (0); 3822 } 3823 3824 void 3825 ctl_pool_free(struct ctl_io_pool *pool) 3826 { 3827 3828 if (pool == NULL) 3829 return; 3830 3831 #ifdef IO_POOLS 3832 uma_zdestroy(pool->zone); 3833 #endif 3834 free(pool, M_CTL); 3835 } 3836 3837 union ctl_io * 3838 ctl_alloc_io(void *pool_ref) 3839 { 3840 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3841 union ctl_io *io; 3842 3843 io = uma_zalloc(pool->zone, M_WAITOK); 3844 if (io != NULL) { 3845 io->io_hdr.pool = pool_ref; 3846 CTL_SOFTC(io) = pool->ctl_softc; 3847 } 3848 return (io); 3849 } 3850 3851 union ctl_io * 3852 ctl_alloc_io_nowait(void *pool_ref) 3853 { 3854 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3855 union ctl_io *io; 3856 3857 io = uma_zalloc(pool->zone, M_NOWAIT); 3858 if (io != NULL) { 3859 io->io_hdr.pool = pool_ref; 3860 CTL_SOFTC(io) = pool->ctl_softc; 3861 } 3862 return (io); 3863 } 3864 3865 void 3866 ctl_free_io(union ctl_io *io) 3867 { 3868 struct ctl_io_pool *pool; 3869 3870 if (io == NULL) 3871 return; 3872 3873 pool = (struct ctl_io_pool *)io->io_hdr.pool; 3874 uma_zfree(pool->zone, io); 3875 } 3876 3877 void 3878 ctl_zero_io(union ctl_io *io) 3879 { 3880 struct ctl_io_pool *pool; 3881 3882 if (io == NULL) 3883 return; 3884 3885 /* 3886 * May need to preserve linked list pointers at some point too. 3887 */ 3888 pool = io->io_hdr.pool; 3889 memset(io, 0, sizeof(*io)); 3890 io->io_hdr.pool = pool; 3891 CTL_SOFTC(io) = pool->ctl_softc; 3892 } 3893 3894 int 3895 ctl_expand_number(const char *buf, uint64_t *num) 3896 { 3897 char *endptr; 3898 uint64_t number; 3899 unsigned shift; 3900 3901 number = strtoq(buf, &endptr, 0); 3902 3903 switch (tolower((unsigned char)*endptr)) { 3904 case 'e': 3905 shift = 60; 3906 break; 3907 case 'p': 3908 shift = 50; 3909 break; 3910 case 't': 3911 shift = 40; 3912 break; 3913 case 'g': 3914 shift = 30; 3915 break; 3916 case 'm': 3917 shift = 20; 3918 break; 3919 case 'k': 3920 shift = 10; 3921 break; 3922 case 'b': 3923 case '\0': /* No unit. */ 3924 *num = number; 3925 return (0); 3926 default: 3927 /* Unrecognized unit. */ 3928 return (-1); 3929 } 3930 3931 if ((number << shift) >> shift != number) { 3932 /* Overflow */ 3933 return (-1); 3934 } 3935 *num = number << shift; 3936 return (0); 3937 } 3938 3939 3940 /* 3941 * This routine could be used in the future to load default and/or saved 3942 * mode page parameters for a particuar lun. 3943 */ 3944 static int 3945 ctl_init_page_index(struct ctl_lun *lun) 3946 { 3947 int i, page_code; 3948 struct ctl_page_index *page_index; 3949 const char *value; 3950 uint64_t ival; 3951 3952 memcpy(&lun->mode_pages.index, page_index_template, 3953 sizeof(page_index_template)); 3954 3955 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 3956 3957 page_index = &lun->mode_pages.index[i]; 3958 if (lun->be_lun->lun_type == T_DIRECT && 3959 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 3960 continue; 3961 if (lun->be_lun->lun_type == T_PROCESSOR && 3962 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 3963 continue; 3964 if (lun->be_lun->lun_type == T_CDROM && 3965 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 3966 continue; 3967 3968 page_code = page_index->page_code & SMPH_PC_MASK; 3969 switch (page_code) { 3970 case SMS_RW_ERROR_RECOVERY_PAGE: { 3971 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 3972 ("subpage %#x for page %#x is incorrect!", 3973 page_index->subpage, page_code)); 3974 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT], 3975 &rw_er_page_default, 3976 sizeof(rw_er_page_default)); 3977 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CHANGEABLE], 3978 &rw_er_page_changeable, 3979 sizeof(rw_er_page_changeable)); 3980 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_DEFAULT], 3981 &rw_er_page_default, 3982 sizeof(rw_er_page_default)); 3983 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_SAVED], 3984 &rw_er_page_default, 3985 sizeof(rw_er_page_default)); 3986 page_index->page_data = 3987 (uint8_t *)lun->mode_pages.rw_er_page; 3988 break; 3989 } 3990 case SMS_FORMAT_DEVICE_PAGE: { 3991 struct scsi_format_page *format_page; 3992 3993 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 3994 ("subpage %#x for page %#x is incorrect!", 3995 page_index->subpage, page_code)); 3996 3997 /* 3998 * Sectors per track are set above. Bytes per 3999 * sector need to be set here on a per-LUN basis. 4000 */ 4001 memcpy(&lun->mode_pages.format_page[CTL_PAGE_CURRENT], 4002 &format_page_default, 4003 sizeof(format_page_default)); 4004 memcpy(&lun->mode_pages.format_page[ 4005 CTL_PAGE_CHANGEABLE], &format_page_changeable, 4006 sizeof(format_page_changeable)); 4007 memcpy(&lun->mode_pages.format_page[CTL_PAGE_DEFAULT], 4008 &format_page_default, 4009 sizeof(format_page_default)); 4010 memcpy(&lun->mode_pages.format_page[CTL_PAGE_SAVED], 4011 &format_page_default, 4012 sizeof(format_page_default)); 4013 4014 format_page = &lun->mode_pages.format_page[ 4015 CTL_PAGE_CURRENT]; 4016 scsi_ulto2b(lun->be_lun->blocksize, 4017 format_page->bytes_per_sector); 4018 4019 format_page = &lun->mode_pages.format_page[ 4020 CTL_PAGE_DEFAULT]; 4021 scsi_ulto2b(lun->be_lun->blocksize, 4022 format_page->bytes_per_sector); 4023 4024 format_page = &lun->mode_pages.format_page[ 4025 CTL_PAGE_SAVED]; 4026 scsi_ulto2b(lun->be_lun->blocksize, 4027 format_page->bytes_per_sector); 4028 4029 page_index->page_data = 4030 (uint8_t *)lun->mode_pages.format_page; 4031 break; 4032 } 4033 case SMS_RIGID_DISK_PAGE: { 4034 struct scsi_rigid_disk_page *rigid_disk_page; 4035 uint32_t sectors_per_cylinder; 4036 uint64_t cylinders; 4037 #ifndef __XSCALE__ 4038 int shift; 4039 #endif /* !__XSCALE__ */ 4040 4041 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4042 ("subpage %#x for page %#x is incorrect!", 4043 page_index->subpage, page_code)); 4044 4045 /* 4046 * Rotation rate and sectors per track are set 4047 * above. We calculate the cylinders here based on 4048 * capacity. Due to the number of heads and 4049 * sectors per track we're using, smaller arrays 4050 * may turn out to have 0 cylinders. Linux and 4051 * FreeBSD don't pay attention to these mode pages 4052 * to figure out capacity, but Solaris does. It 4053 * seems to deal with 0 cylinders just fine, and 4054 * works out a fake geometry based on the capacity. 4055 */ 4056 memcpy(&lun->mode_pages.rigid_disk_page[ 4057 CTL_PAGE_DEFAULT], &rigid_disk_page_default, 4058 sizeof(rigid_disk_page_default)); 4059 memcpy(&lun->mode_pages.rigid_disk_page[ 4060 CTL_PAGE_CHANGEABLE],&rigid_disk_page_changeable, 4061 sizeof(rigid_disk_page_changeable)); 4062 4063 sectors_per_cylinder = CTL_DEFAULT_SECTORS_PER_TRACK * 4064 CTL_DEFAULT_HEADS; 4065 4066 /* 4067 * The divide method here will be more accurate, 4068 * probably, but results in floating point being 4069 * used in the kernel on i386 (__udivdi3()). On the 4070 * XScale, though, __udivdi3() is implemented in 4071 * software. 4072 * 4073 * The shift method for cylinder calculation is 4074 * accurate if sectors_per_cylinder is a power of 4075 * 2. Otherwise it might be slightly off -- you 4076 * might have a bit of a truncation problem. 4077 */ 4078 #ifdef __XSCALE__ 4079 cylinders = (lun->be_lun->maxlba + 1) / 4080 sectors_per_cylinder; 4081 #else 4082 for (shift = 31; shift > 0; shift--) { 4083 if (sectors_per_cylinder & (1 << shift)) 4084 break; 4085 } 4086 cylinders = (lun->be_lun->maxlba + 1) >> shift; 4087 #endif 4088 4089 /* 4090 * We've basically got 3 bytes, or 24 bits for the 4091 * cylinder size in the mode page. If we're over, 4092 * just round down to 2^24. 4093 */ 4094 if (cylinders > 0xffffff) 4095 cylinders = 0xffffff; 4096 4097 rigid_disk_page = &lun->mode_pages.rigid_disk_page[ 4098 CTL_PAGE_DEFAULT]; 4099 scsi_ulto3b(cylinders, rigid_disk_page->cylinders); 4100 4101 if ((value = ctl_get_opt(&lun->be_lun->options, 4102 "rpm")) != NULL) { 4103 scsi_ulto2b(strtol(value, NULL, 0), 4104 rigid_disk_page->rotation_rate); 4105 } 4106 4107 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_CURRENT], 4108 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 4109 sizeof(rigid_disk_page_default)); 4110 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_SAVED], 4111 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 4112 sizeof(rigid_disk_page_default)); 4113 4114 page_index->page_data = 4115 (uint8_t *)lun->mode_pages.rigid_disk_page; 4116 break; 4117 } 4118 case SMS_VERIFY_ERROR_RECOVERY_PAGE: { 4119 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4120 ("subpage %#x for page %#x is incorrect!", 4121 page_index->subpage, page_code)); 4122 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_CURRENT], 4123 &verify_er_page_default, 4124 sizeof(verify_er_page_default)); 4125 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_CHANGEABLE], 4126 &verify_er_page_changeable, 4127 sizeof(verify_er_page_changeable)); 4128 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_DEFAULT], 4129 &verify_er_page_default, 4130 sizeof(verify_er_page_default)); 4131 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_SAVED], 4132 &verify_er_page_default, 4133 sizeof(verify_er_page_default)); 4134 page_index->page_data = 4135 (uint8_t *)lun->mode_pages.verify_er_page; 4136 break; 4137 } 4138 case SMS_CACHING_PAGE: { 4139 struct scsi_caching_page *caching_page; 4140 4141 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4142 ("subpage %#x for page %#x is incorrect!", 4143 page_index->subpage, page_code)); 4144 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT], 4145 &caching_page_default, 4146 sizeof(caching_page_default)); 4147 memcpy(&lun->mode_pages.caching_page[ 4148 CTL_PAGE_CHANGEABLE], &caching_page_changeable, 4149 sizeof(caching_page_changeable)); 4150 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_SAVED], 4151 &caching_page_default, 4152 sizeof(caching_page_default)); 4153 caching_page = &lun->mode_pages.caching_page[ 4154 CTL_PAGE_SAVED]; 4155 value = ctl_get_opt(&lun->be_lun->options, "writecache"); 4156 if (value != NULL && strcmp(value, "off") == 0) 4157 caching_page->flags1 &= ~SCP_WCE; 4158 value = ctl_get_opt(&lun->be_lun->options, "readcache"); 4159 if (value != NULL && strcmp(value, "off") == 0) 4160 caching_page->flags1 |= SCP_RCD; 4161 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT], 4162 &lun->mode_pages.caching_page[CTL_PAGE_SAVED], 4163 sizeof(caching_page_default)); 4164 page_index->page_data = 4165 (uint8_t *)lun->mode_pages.caching_page; 4166 break; 4167 } 4168 case SMS_CONTROL_MODE_PAGE: { 4169 switch (page_index->subpage) { 4170 case SMS_SUBPAGE_PAGE_0: { 4171 struct scsi_control_page *control_page; 4172 4173 memcpy(&lun->mode_pages.control_page[ 4174 CTL_PAGE_DEFAULT], 4175 &control_page_default, 4176 sizeof(control_page_default)); 4177 memcpy(&lun->mode_pages.control_page[ 4178 CTL_PAGE_CHANGEABLE], 4179 &control_page_changeable, 4180 sizeof(control_page_changeable)); 4181 memcpy(&lun->mode_pages.control_page[ 4182 CTL_PAGE_SAVED], 4183 &control_page_default, 4184 sizeof(control_page_default)); 4185 control_page = &lun->mode_pages.control_page[ 4186 CTL_PAGE_SAVED]; 4187 value = ctl_get_opt(&lun->be_lun->options, 4188 "reordering"); 4189 if (value != NULL && 4190 strcmp(value, "unrestricted") == 0) { 4191 control_page->queue_flags &= 4192 ~SCP_QUEUE_ALG_MASK; 4193 control_page->queue_flags |= 4194 SCP_QUEUE_ALG_UNRESTRICTED; 4195 } 4196 memcpy(&lun->mode_pages.control_page[ 4197 CTL_PAGE_CURRENT], 4198 &lun->mode_pages.control_page[ 4199 CTL_PAGE_SAVED], 4200 sizeof(control_page_default)); 4201 page_index->page_data = 4202 (uint8_t *)lun->mode_pages.control_page; 4203 break; 4204 } 4205 case 0x01: 4206 memcpy(&lun->mode_pages.control_ext_page[ 4207 CTL_PAGE_DEFAULT], 4208 &control_ext_page_default, 4209 sizeof(control_ext_page_default)); 4210 memcpy(&lun->mode_pages.control_ext_page[ 4211 CTL_PAGE_CHANGEABLE], 4212 &control_ext_page_changeable, 4213 sizeof(control_ext_page_changeable)); 4214 memcpy(&lun->mode_pages.control_ext_page[ 4215 CTL_PAGE_SAVED], 4216 &control_ext_page_default, 4217 sizeof(control_ext_page_default)); 4218 memcpy(&lun->mode_pages.control_ext_page[ 4219 CTL_PAGE_CURRENT], 4220 &lun->mode_pages.control_ext_page[ 4221 CTL_PAGE_SAVED], 4222 sizeof(control_ext_page_default)); 4223 page_index->page_data = 4224 (uint8_t *)lun->mode_pages.control_ext_page; 4225 break; 4226 default: 4227 panic("subpage %#x for page %#x is incorrect!", 4228 page_index->subpage, page_code); 4229 } 4230 break; 4231 } 4232 case SMS_INFO_EXCEPTIONS_PAGE: { 4233 switch (page_index->subpage) { 4234 case SMS_SUBPAGE_PAGE_0: 4235 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_CURRENT], 4236 &ie_page_default, 4237 sizeof(ie_page_default)); 4238 memcpy(&lun->mode_pages.ie_page[ 4239 CTL_PAGE_CHANGEABLE], &ie_page_changeable, 4240 sizeof(ie_page_changeable)); 4241 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_DEFAULT], 4242 &ie_page_default, 4243 sizeof(ie_page_default)); 4244 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_SAVED], 4245 &ie_page_default, 4246 sizeof(ie_page_default)); 4247 page_index->page_data = 4248 (uint8_t *)lun->mode_pages.ie_page; 4249 break; 4250 case 0x02: { 4251 struct ctl_logical_block_provisioning_page *page; 4252 4253 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_DEFAULT], 4254 &lbp_page_default, 4255 sizeof(lbp_page_default)); 4256 memcpy(&lun->mode_pages.lbp_page[ 4257 CTL_PAGE_CHANGEABLE], &lbp_page_changeable, 4258 sizeof(lbp_page_changeable)); 4259 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 4260 &lbp_page_default, 4261 sizeof(lbp_page_default)); 4262 page = &lun->mode_pages.lbp_page[CTL_PAGE_SAVED]; 4263 value = ctl_get_opt(&lun->be_lun->options, 4264 "avail-threshold"); 4265 if (value != NULL && 4266 ctl_expand_number(value, &ival) == 0) { 4267 page->descr[0].flags |= SLBPPD_ENABLED | 4268 SLBPPD_ARMING_DEC; 4269 if (lun->be_lun->blocksize) 4270 ival /= lun->be_lun->blocksize; 4271 else 4272 ival /= 512; 4273 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4274 page->descr[0].count); 4275 } 4276 value = ctl_get_opt(&lun->be_lun->options, 4277 "used-threshold"); 4278 if (value != NULL && 4279 ctl_expand_number(value, &ival) == 0) { 4280 page->descr[1].flags |= SLBPPD_ENABLED | 4281 SLBPPD_ARMING_INC; 4282 if (lun->be_lun->blocksize) 4283 ival /= lun->be_lun->blocksize; 4284 else 4285 ival /= 512; 4286 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4287 page->descr[1].count); 4288 } 4289 value = ctl_get_opt(&lun->be_lun->options, 4290 "pool-avail-threshold"); 4291 if (value != NULL && 4292 ctl_expand_number(value, &ival) == 0) { 4293 page->descr[2].flags |= SLBPPD_ENABLED | 4294 SLBPPD_ARMING_DEC; 4295 if (lun->be_lun->blocksize) 4296 ival /= lun->be_lun->blocksize; 4297 else 4298 ival /= 512; 4299 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4300 page->descr[2].count); 4301 } 4302 value = ctl_get_opt(&lun->be_lun->options, 4303 "pool-used-threshold"); 4304 if (value != NULL && 4305 ctl_expand_number(value, &ival) == 0) { 4306 page->descr[3].flags |= SLBPPD_ENABLED | 4307 SLBPPD_ARMING_INC; 4308 if (lun->be_lun->blocksize) 4309 ival /= lun->be_lun->blocksize; 4310 else 4311 ival /= 512; 4312 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4313 page->descr[3].count); 4314 } 4315 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_CURRENT], 4316 &lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 4317 sizeof(lbp_page_default)); 4318 page_index->page_data = 4319 (uint8_t *)lun->mode_pages.lbp_page; 4320 break; 4321 } 4322 default: 4323 panic("subpage %#x for page %#x is incorrect!", 4324 page_index->subpage, page_code); 4325 } 4326 break; 4327 } 4328 case SMS_CDDVD_CAPS_PAGE:{ 4329 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4330 ("subpage %#x for page %#x is incorrect!", 4331 page_index->subpage, page_code)); 4332 memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_DEFAULT], 4333 &cddvd_page_default, 4334 sizeof(cddvd_page_default)); 4335 memcpy(&lun->mode_pages.cddvd_page[ 4336 CTL_PAGE_CHANGEABLE], &cddvd_page_changeable, 4337 sizeof(cddvd_page_changeable)); 4338 memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_SAVED], 4339 &cddvd_page_default, 4340 sizeof(cddvd_page_default)); 4341 memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_CURRENT], 4342 &lun->mode_pages.cddvd_page[CTL_PAGE_SAVED], 4343 sizeof(cddvd_page_default)); 4344 page_index->page_data = 4345 (uint8_t *)lun->mode_pages.cddvd_page; 4346 break; 4347 } 4348 default: 4349 panic("invalid page code value %#x", page_code); 4350 } 4351 } 4352 4353 return (CTL_RETVAL_COMPLETE); 4354 } 4355 4356 static int 4357 ctl_init_log_page_index(struct ctl_lun *lun) 4358 { 4359 struct ctl_page_index *page_index; 4360 int i, j, k, prev; 4361 4362 memcpy(&lun->log_pages.index, log_page_index_template, 4363 sizeof(log_page_index_template)); 4364 4365 prev = -1; 4366 for (i = 0, j = 0, k = 0; i < CTL_NUM_LOG_PAGES; i++) { 4367 4368 page_index = &lun->log_pages.index[i]; 4369 if (lun->be_lun->lun_type == T_DIRECT && 4370 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 4371 continue; 4372 if (lun->be_lun->lun_type == T_PROCESSOR && 4373 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 4374 continue; 4375 if (lun->be_lun->lun_type == T_CDROM && 4376 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 4377 continue; 4378 4379 if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING && 4380 lun->backend->lun_attr == NULL) 4381 continue; 4382 4383 if (page_index->page_code != prev) { 4384 lun->log_pages.pages_page[j] = page_index->page_code; 4385 prev = page_index->page_code; 4386 j++; 4387 } 4388 lun->log_pages.subpages_page[k*2] = page_index->page_code; 4389 lun->log_pages.subpages_page[k*2+1] = page_index->subpage; 4390 k++; 4391 } 4392 lun->log_pages.index[0].page_data = &lun->log_pages.pages_page[0]; 4393 lun->log_pages.index[0].page_len = j; 4394 lun->log_pages.index[1].page_data = &lun->log_pages.subpages_page[0]; 4395 lun->log_pages.index[1].page_len = k * 2; 4396 lun->log_pages.index[2].page_data = &lun->log_pages.lbp_page[0]; 4397 lun->log_pages.index[2].page_len = 12*CTL_NUM_LBP_PARAMS; 4398 lun->log_pages.index[3].page_data = (uint8_t *)&lun->log_pages.stat_page; 4399 lun->log_pages.index[3].page_len = sizeof(lun->log_pages.stat_page); 4400 lun->log_pages.index[4].page_data = (uint8_t *)&lun->log_pages.ie_page; 4401 lun->log_pages.index[4].page_len = sizeof(lun->log_pages.ie_page); 4402 4403 return (CTL_RETVAL_COMPLETE); 4404 } 4405 4406 static int 4407 hex2bin(const char *str, uint8_t *buf, int buf_size) 4408 { 4409 int i; 4410 u_char c; 4411 4412 memset(buf, 0, buf_size); 4413 while (isspace(str[0])) 4414 str++; 4415 if (str[0] == '0' && (str[1] == 'x' || str[1] == 'X')) 4416 str += 2; 4417 buf_size *= 2; 4418 for (i = 0; str[i] != 0 && i < buf_size; i++) { 4419 while (str[i] == '-') /* Skip dashes in UUIDs. */ 4420 str++; 4421 c = str[i]; 4422 if (isdigit(c)) 4423 c -= '0'; 4424 else if (isalpha(c)) 4425 c -= isupper(c) ? 'A' - 10 : 'a' - 10; 4426 else 4427 break; 4428 if (c >= 16) 4429 break; 4430 if ((i & 1) == 0) 4431 buf[i / 2] |= (c << 4); 4432 else 4433 buf[i / 2] |= c; 4434 } 4435 return ((i + 1) / 2); 4436 } 4437 4438 /* 4439 * LUN allocation. 4440 * 4441 * Requirements: 4442 * - caller allocates and zeros LUN storage, or passes in a NULL LUN if he 4443 * wants us to allocate the LUN and he can block. 4444 * - ctl_softc is always set 4445 * - be_lun is set if the LUN has a backend (needed for disk LUNs) 4446 * 4447 * Returns 0 for success, non-zero (errno) for failure. 4448 */ 4449 static int 4450 ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun, 4451 struct ctl_be_lun *const be_lun) 4452 { 4453 struct ctl_lun *nlun, *lun; 4454 struct scsi_vpd_id_descriptor *desc; 4455 struct scsi_vpd_id_t10 *t10id; 4456 const char *eui, *naa, *scsiname, *uuid, *vendor, *value; 4457 int lun_number, lun_malloced; 4458 int devidlen, idlen1, idlen2 = 0, len; 4459 4460 if (be_lun == NULL) 4461 return (EINVAL); 4462 4463 /* 4464 * We currently only support Direct Access or Processor LUN types. 4465 */ 4466 switch (be_lun->lun_type) { 4467 case T_DIRECT: 4468 case T_PROCESSOR: 4469 case T_CDROM: 4470 break; 4471 case T_SEQUENTIAL: 4472 case T_CHANGER: 4473 default: 4474 be_lun->lun_config_status(be_lun->be_lun, 4475 CTL_LUN_CONFIG_FAILURE); 4476 break; 4477 } 4478 if (ctl_lun == NULL) { 4479 lun = malloc(sizeof(*lun), M_CTL, M_WAITOK); 4480 lun_malloced = 1; 4481 } else { 4482 lun_malloced = 0; 4483 lun = ctl_lun; 4484 } 4485 4486 memset(lun, 0, sizeof(*lun)); 4487 if (lun_malloced) 4488 lun->flags = CTL_LUN_MALLOCED; 4489 4490 /* Generate LUN ID. */ 4491 devidlen = max(CTL_DEVID_MIN_LEN, 4492 strnlen(be_lun->device_id, CTL_DEVID_LEN)); 4493 idlen1 = sizeof(*t10id) + devidlen; 4494 len = sizeof(struct scsi_vpd_id_descriptor) + idlen1; 4495 scsiname = ctl_get_opt(&be_lun->options, "scsiname"); 4496 if (scsiname != NULL) { 4497 idlen2 = roundup2(strlen(scsiname) + 1, 4); 4498 len += sizeof(struct scsi_vpd_id_descriptor) + idlen2; 4499 } 4500 eui = ctl_get_opt(&be_lun->options, "eui"); 4501 if (eui != NULL) { 4502 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4503 } 4504 naa = ctl_get_opt(&be_lun->options, "naa"); 4505 if (naa != NULL) { 4506 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4507 } 4508 uuid = ctl_get_opt(&be_lun->options, "uuid"); 4509 if (uuid != NULL) { 4510 len += sizeof(struct scsi_vpd_id_descriptor) + 18; 4511 } 4512 lun->lun_devid = malloc(sizeof(struct ctl_devid) + len, 4513 M_CTL, M_WAITOK | M_ZERO); 4514 desc = (struct scsi_vpd_id_descriptor *)lun->lun_devid->data; 4515 desc->proto_codeset = SVPD_ID_CODESET_ASCII; 4516 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10; 4517 desc->length = idlen1; 4518 t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0]; 4519 memset(t10id->vendor, ' ', sizeof(t10id->vendor)); 4520 if ((vendor = ctl_get_opt(&be_lun->options, "vendor")) == NULL) { 4521 strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor)); 4522 } else { 4523 strncpy(t10id->vendor, vendor, 4524 min(sizeof(t10id->vendor), strlen(vendor))); 4525 } 4526 strncpy((char *)t10id->vendor_spec_id, 4527 (char *)be_lun->device_id, devidlen); 4528 if (scsiname != NULL) { 4529 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4530 desc->length); 4531 desc->proto_codeset = SVPD_ID_CODESET_UTF8; 4532 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4533 SVPD_ID_TYPE_SCSI_NAME; 4534 desc->length = idlen2; 4535 strlcpy(desc->identifier, scsiname, idlen2); 4536 } 4537 if (eui != NULL) { 4538 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4539 desc->length); 4540 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4541 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4542 SVPD_ID_TYPE_EUI64; 4543 desc->length = hex2bin(eui, desc->identifier, 16); 4544 desc->length = desc->length > 12 ? 16 : 4545 (desc->length > 8 ? 12 : 8); 4546 len -= 16 - desc->length; 4547 } 4548 if (naa != NULL) { 4549 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4550 desc->length); 4551 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4552 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4553 SVPD_ID_TYPE_NAA; 4554 desc->length = hex2bin(naa, desc->identifier, 16); 4555 desc->length = desc->length > 8 ? 16 : 8; 4556 len -= 16 - desc->length; 4557 } 4558 if (uuid != NULL) { 4559 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4560 desc->length); 4561 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4562 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4563 SVPD_ID_TYPE_UUID; 4564 desc->identifier[0] = 0x10; 4565 hex2bin(uuid, &desc->identifier[2], 16); 4566 desc->length = 18; 4567 } 4568 lun->lun_devid->len = len; 4569 4570 mtx_lock(&ctl_softc->ctl_lock); 4571 /* 4572 * See if the caller requested a particular LUN number. If so, see 4573 * if it is available. Otherwise, allocate the first available LUN. 4574 */ 4575 if (be_lun->flags & CTL_LUN_FLAG_ID_REQ) { 4576 if ((be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) 4577 || (ctl_is_set(ctl_softc->ctl_lun_mask, be_lun->req_lun_id))) { 4578 mtx_unlock(&ctl_softc->ctl_lock); 4579 if (be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) { 4580 printf("ctl: requested LUN ID %d is higher " 4581 "than CTL_MAX_LUNS - 1 (%d)\n", 4582 be_lun->req_lun_id, CTL_MAX_LUNS - 1); 4583 } else { 4584 /* 4585 * XXX KDM return an error, or just assign 4586 * another LUN ID in this case?? 4587 */ 4588 printf("ctl: requested LUN ID %d is already " 4589 "in use\n", be_lun->req_lun_id); 4590 } 4591 fail: 4592 free(lun->lun_devid, M_CTL); 4593 if (lun->flags & CTL_LUN_MALLOCED) 4594 free(lun, M_CTL); 4595 be_lun->lun_config_status(be_lun->be_lun, 4596 CTL_LUN_CONFIG_FAILURE); 4597 return (ENOSPC); 4598 } 4599 lun_number = be_lun->req_lun_id; 4600 } else { 4601 lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, 0, CTL_MAX_LUNS); 4602 if (lun_number == -1) { 4603 mtx_unlock(&ctl_softc->ctl_lock); 4604 printf("ctl: can't allocate LUN, out of LUNs\n"); 4605 goto fail; 4606 } 4607 } 4608 ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number); 4609 mtx_unlock(&ctl_softc->ctl_lock); 4610 4611 mtx_init(&lun->lun_lock, "CTL LUN", NULL, MTX_DEF); 4612 lun->lun = lun_number; 4613 lun->be_lun = be_lun; 4614 /* 4615 * The processor LUN is always enabled. Disk LUNs come on line 4616 * disabled, and must be enabled by the backend. 4617 */ 4618 lun->flags |= CTL_LUN_DISABLED; 4619 lun->backend = be_lun->be; 4620 be_lun->ctl_lun = lun; 4621 be_lun->lun_id = lun_number; 4622 atomic_add_int(&be_lun->be->num_luns, 1); 4623 if (be_lun->flags & CTL_LUN_FLAG_EJECTED) 4624 lun->flags |= CTL_LUN_EJECTED; 4625 if (be_lun->flags & CTL_LUN_FLAG_NO_MEDIA) 4626 lun->flags |= CTL_LUN_NO_MEDIA; 4627 if (be_lun->flags & CTL_LUN_FLAG_STOPPED) 4628 lun->flags |= CTL_LUN_STOPPED; 4629 4630 if (be_lun->flags & CTL_LUN_FLAG_PRIMARY) 4631 lun->flags |= CTL_LUN_PRIMARY_SC; 4632 4633 value = ctl_get_opt(&be_lun->options, "removable"); 4634 if (value != NULL) { 4635 if (strcmp(value, "on") == 0) 4636 lun->flags |= CTL_LUN_REMOVABLE; 4637 } else if (be_lun->lun_type == T_CDROM) 4638 lun->flags |= CTL_LUN_REMOVABLE; 4639 4640 lun->ctl_softc = ctl_softc; 4641 #ifdef CTL_TIME_IO 4642 lun->last_busy = getsbinuptime(); 4643 #endif 4644 TAILQ_INIT(&lun->ooa_queue); 4645 TAILQ_INIT(&lun->blocked_queue); 4646 STAILQ_INIT(&lun->error_list); 4647 lun->ie_reported = 1; 4648 callout_init_mtx(&lun->ie_callout, &lun->lun_lock, 0); 4649 ctl_tpc_lun_init(lun); 4650 if (lun->flags & CTL_LUN_REMOVABLE) { 4651 lun->prevent = malloc((CTL_MAX_INITIATORS + 31) / 32 * 4, 4652 M_CTL, M_WAITOK); 4653 } 4654 4655 /* 4656 * Initialize the mode and log page index. 4657 */ 4658 ctl_init_page_index(lun); 4659 ctl_init_log_page_index(lun); 4660 4661 /* Setup statistics gathering */ 4662 #ifdef CTL_LEGACY_STATS 4663 lun->legacy_stats.device_type = be_lun->lun_type; 4664 lun->legacy_stats.lun_number = lun_number; 4665 lun->legacy_stats.blocksize = be_lun->blocksize; 4666 if (be_lun->blocksize == 0) 4667 lun->legacy_stats.flags = CTL_LUN_STATS_NO_BLOCKSIZE; 4668 for (len = 0; len < CTL_MAX_PORTS; len++) 4669 lun->legacy_stats.ports[len].targ_port = len; 4670 #endif /* CTL_LEGACY_STATS */ 4671 lun->stats.item = lun_number; 4672 4673 /* 4674 * Now, before we insert this lun on the lun list, set the lun 4675 * inventory changed UA for all other luns. 4676 */ 4677 mtx_lock(&ctl_softc->ctl_lock); 4678 STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) { 4679 mtx_lock(&nlun->lun_lock); 4680 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4681 mtx_unlock(&nlun->lun_lock); 4682 } 4683 STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links); 4684 ctl_softc->ctl_luns[lun_number] = lun; 4685 ctl_softc->num_luns++; 4686 mtx_unlock(&ctl_softc->ctl_lock); 4687 4688 lun->be_lun->lun_config_status(lun->be_lun->be_lun, CTL_LUN_CONFIG_OK); 4689 return (0); 4690 } 4691 4692 /* 4693 * Delete a LUN. 4694 * Assumptions: 4695 * - LUN has already been marked invalid and any pending I/O has been taken 4696 * care of. 4697 */ 4698 static int 4699 ctl_free_lun(struct ctl_lun *lun) 4700 { 4701 struct ctl_softc *softc = lun->ctl_softc; 4702 struct ctl_lun *nlun; 4703 int i; 4704 4705 mtx_assert(&softc->ctl_lock, MA_OWNED); 4706 4707 STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links); 4708 4709 ctl_clear_mask(softc->ctl_lun_mask, lun->lun); 4710 4711 softc->ctl_luns[lun->lun] = NULL; 4712 4713 if (!TAILQ_EMPTY(&lun->ooa_queue)) 4714 panic("Freeing a LUN %p with outstanding I/O!!\n", lun); 4715 4716 softc->num_luns--; 4717 4718 /* 4719 * Tell the backend to free resources, if this LUN has a backend. 4720 */ 4721 atomic_subtract_int(&lun->be_lun->be->num_luns, 1); 4722 lun->be_lun->lun_shutdown(lun->be_lun->be_lun); 4723 4724 lun->ie_reportcnt = UINT32_MAX; 4725 callout_drain(&lun->ie_callout); 4726 4727 ctl_tpc_lun_shutdown(lun); 4728 mtx_destroy(&lun->lun_lock); 4729 free(lun->lun_devid, M_CTL); 4730 for (i = 0; i < CTL_MAX_PORTS; i++) 4731 free(lun->pending_ua[i], M_CTL); 4732 for (i = 0; i < CTL_MAX_PORTS; i++) 4733 free(lun->pr_keys[i], M_CTL); 4734 free(lun->write_buffer, M_CTL); 4735 free(lun->prevent, M_CTL); 4736 if (lun->flags & CTL_LUN_MALLOCED) 4737 free(lun, M_CTL); 4738 4739 STAILQ_FOREACH(nlun, &softc->lun_list, links) { 4740 mtx_lock(&nlun->lun_lock); 4741 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4742 mtx_unlock(&nlun->lun_lock); 4743 } 4744 4745 return (0); 4746 } 4747 4748 static void 4749 ctl_create_lun(struct ctl_be_lun *be_lun) 4750 { 4751 4752 /* 4753 * ctl_alloc_lun() should handle all potential failure cases. 4754 */ 4755 ctl_alloc_lun(control_softc, NULL, be_lun); 4756 } 4757 4758 int 4759 ctl_add_lun(struct ctl_be_lun *be_lun) 4760 { 4761 struct ctl_softc *softc = control_softc; 4762 4763 mtx_lock(&softc->ctl_lock); 4764 STAILQ_INSERT_TAIL(&softc->pending_lun_queue, be_lun, links); 4765 mtx_unlock(&softc->ctl_lock); 4766 wakeup(&softc->pending_lun_queue); 4767 4768 return (0); 4769 } 4770 4771 int 4772 ctl_enable_lun(struct ctl_be_lun *be_lun) 4773 { 4774 struct ctl_softc *softc; 4775 struct ctl_port *port, *nport; 4776 struct ctl_lun *lun; 4777 int retval; 4778 4779 lun = (struct ctl_lun *)be_lun->ctl_lun; 4780 softc = lun->ctl_softc; 4781 4782 mtx_lock(&softc->ctl_lock); 4783 mtx_lock(&lun->lun_lock); 4784 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 4785 /* 4786 * eh? Why did we get called if the LUN is already 4787 * enabled? 4788 */ 4789 mtx_unlock(&lun->lun_lock); 4790 mtx_unlock(&softc->ctl_lock); 4791 return (0); 4792 } 4793 lun->flags &= ~CTL_LUN_DISABLED; 4794 mtx_unlock(&lun->lun_lock); 4795 4796 STAILQ_FOREACH_SAFE(port, &softc->port_list, links, nport) { 4797 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || 4798 port->lun_map != NULL || port->lun_enable == NULL) 4799 continue; 4800 4801 /* 4802 * Drop the lock while we call the FETD's enable routine. 4803 * This can lead to a callback into CTL (at least in the 4804 * case of the internal initiator frontend. 4805 */ 4806 mtx_unlock(&softc->ctl_lock); 4807 retval = port->lun_enable(port->targ_lun_arg, lun->lun); 4808 mtx_lock(&softc->ctl_lock); 4809 if (retval != 0) { 4810 printf("%s: FETD %s port %d returned error " 4811 "%d for lun_enable on lun %jd\n", 4812 __func__, port->port_name, port->targ_port, 4813 retval, (intmax_t)lun->lun); 4814 } 4815 } 4816 4817 mtx_unlock(&softc->ctl_lock); 4818 ctl_isc_announce_lun(lun); 4819 4820 return (0); 4821 } 4822 4823 int 4824 ctl_disable_lun(struct ctl_be_lun *be_lun) 4825 { 4826 struct ctl_softc *softc; 4827 struct ctl_port *port; 4828 struct ctl_lun *lun; 4829 int retval; 4830 4831 lun = (struct ctl_lun *)be_lun->ctl_lun; 4832 softc = lun->ctl_softc; 4833 4834 mtx_lock(&softc->ctl_lock); 4835 mtx_lock(&lun->lun_lock); 4836 if (lun->flags & CTL_LUN_DISABLED) { 4837 mtx_unlock(&lun->lun_lock); 4838 mtx_unlock(&softc->ctl_lock); 4839 return (0); 4840 } 4841 lun->flags |= CTL_LUN_DISABLED; 4842 mtx_unlock(&lun->lun_lock); 4843 4844 STAILQ_FOREACH(port, &softc->port_list, links) { 4845 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || 4846 port->lun_map != NULL || port->lun_disable == NULL) 4847 continue; 4848 4849 /* 4850 * Drop the lock before we call the frontend's disable 4851 * routine, to avoid lock order reversals. 4852 * 4853 * XXX KDM what happens if the frontend list changes while 4854 * we're traversing it? It's unlikely, but should be handled. 4855 */ 4856 mtx_unlock(&softc->ctl_lock); 4857 retval = port->lun_disable(port->targ_lun_arg, lun->lun); 4858 mtx_lock(&softc->ctl_lock); 4859 if (retval != 0) { 4860 printf("%s: FETD %s port %d returned error " 4861 "%d for lun_disable on lun %jd\n", 4862 __func__, port->port_name, port->targ_port, 4863 retval, (intmax_t)lun->lun); 4864 } 4865 } 4866 4867 mtx_unlock(&softc->ctl_lock); 4868 ctl_isc_announce_lun(lun); 4869 4870 return (0); 4871 } 4872 4873 int 4874 ctl_start_lun(struct ctl_be_lun *be_lun) 4875 { 4876 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4877 4878 mtx_lock(&lun->lun_lock); 4879 lun->flags &= ~CTL_LUN_STOPPED; 4880 mtx_unlock(&lun->lun_lock); 4881 return (0); 4882 } 4883 4884 int 4885 ctl_stop_lun(struct ctl_be_lun *be_lun) 4886 { 4887 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4888 4889 mtx_lock(&lun->lun_lock); 4890 lun->flags |= CTL_LUN_STOPPED; 4891 mtx_unlock(&lun->lun_lock); 4892 return (0); 4893 } 4894 4895 int 4896 ctl_lun_no_media(struct ctl_be_lun *be_lun) 4897 { 4898 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4899 4900 mtx_lock(&lun->lun_lock); 4901 lun->flags |= CTL_LUN_NO_MEDIA; 4902 mtx_unlock(&lun->lun_lock); 4903 return (0); 4904 } 4905 4906 int 4907 ctl_lun_has_media(struct ctl_be_lun *be_lun) 4908 { 4909 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4910 union ctl_ha_msg msg; 4911 4912 mtx_lock(&lun->lun_lock); 4913 lun->flags &= ~(CTL_LUN_NO_MEDIA | CTL_LUN_EJECTED); 4914 if (lun->flags & CTL_LUN_REMOVABLE) 4915 ctl_est_ua_all(lun, -1, CTL_UA_MEDIUM_CHANGE); 4916 mtx_unlock(&lun->lun_lock); 4917 if ((lun->flags & CTL_LUN_REMOVABLE) && 4918 lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 4919 bzero(&msg.ua, sizeof(msg.ua)); 4920 msg.hdr.msg_type = CTL_MSG_UA; 4921 msg.hdr.nexus.initid = -1; 4922 msg.hdr.nexus.targ_port = -1; 4923 msg.hdr.nexus.targ_lun = lun->lun; 4924 msg.hdr.nexus.targ_mapped_lun = lun->lun; 4925 msg.ua.ua_all = 1; 4926 msg.ua.ua_set = 1; 4927 msg.ua.ua_type = CTL_UA_MEDIUM_CHANGE; 4928 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), 4929 M_WAITOK); 4930 } 4931 return (0); 4932 } 4933 4934 int 4935 ctl_lun_ejected(struct ctl_be_lun *be_lun) 4936 { 4937 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4938 4939 mtx_lock(&lun->lun_lock); 4940 lun->flags |= CTL_LUN_EJECTED; 4941 mtx_unlock(&lun->lun_lock); 4942 return (0); 4943 } 4944 4945 int 4946 ctl_lun_primary(struct ctl_be_lun *be_lun) 4947 { 4948 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4949 4950 mtx_lock(&lun->lun_lock); 4951 lun->flags |= CTL_LUN_PRIMARY_SC; 4952 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 4953 mtx_unlock(&lun->lun_lock); 4954 ctl_isc_announce_lun(lun); 4955 return (0); 4956 } 4957 4958 int 4959 ctl_lun_secondary(struct ctl_be_lun *be_lun) 4960 { 4961 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4962 4963 mtx_lock(&lun->lun_lock); 4964 lun->flags &= ~CTL_LUN_PRIMARY_SC; 4965 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 4966 mtx_unlock(&lun->lun_lock); 4967 ctl_isc_announce_lun(lun); 4968 return (0); 4969 } 4970 4971 int 4972 ctl_invalidate_lun(struct ctl_be_lun *be_lun) 4973 { 4974 struct ctl_softc *softc; 4975 struct ctl_lun *lun; 4976 4977 lun = (struct ctl_lun *)be_lun->ctl_lun; 4978 softc = lun->ctl_softc; 4979 4980 mtx_lock(&lun->lun_lock); 4981 4982 /* 4983 * The LUN needs to be disabled before it can be marked invalid. 4984 */ 4985 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 4986 mtx_unlock(&lun->lun_lock); 4987 return (-1); 4988 } 4989 /* 4990 * Mark the LUN invalid. 4991 */ 4992 lun->flags |= CTL_LUN_INVALID; 4993 4994 /* 4995 * If there is nothing in the OOA queue, go ahead and free the LUN. 4996 * If we have something in the OOA queue, we'll free it when the 4997 * last I/O completes. 4998 */ 4999 if (TAILQ_EMPTY(&lun->ooa_queue)) { 5000 mtx_unlock(&lun->lun_lock); 5001 mtx_lock(&softc->ctl_lock); 5002 ctl_free_lun(lun); 5003 mtx_unlock(&softc->ctl_lock); 5004 } else 5005 mtx_unlock(&lun->lun_lock); 5006 5007 return (0); 5008 } 5009 5010 void 5011 ctl_lun_capacity_changed(struct ctl_be_lun *be_lun) 5012 { 5013 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 5014 union ctl_ha_msg msg; 5015 5016 mtx_lock(&lun->lun_lock); 5017 ctl_est_ua_all(lun, -1, CTL_UA_CAPACITY_CHANGE); 5018 mtx_unlock(&lun->lun_lock); 5019 if (lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 5020 /* Send msg to other side. */ 5021 bzero(&msg.ua, sizeof(msg.ua)); 5022 msg.hdr.msg_type = CTL_MSG_UA; 5023 msg.hdr.nexus.initid = -1; 5024 msg.hdr.nexus.targ_port = -1; 5025 msg.hdr.nexus.targ_lun = lun->lun; 5026 msg.hdr.nexus.targ_mapped_lun = lun->lun; 5027 msg.ua.ua_all = 1; 5028 msg.ua.ua_set = 1; 5029 msg.ua.ua_type = CTL_UA_CAPACITY_CHANGE; 5030 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), 5031 M_WAITOK); 5032 } 5033 } 5034 5035 /* 5036 * Backend "memory move is complete" callback for requests that never 5037 * make it down to say RAIDCore's configuration code. 5038 */ 5039 int 5040 ctl_config_move_done(union ctl_io *io) 5041 { 5042 int retval; 5043 5044 CTL_DEBUG_PRINT(("ctl_config_move_done\n")); 5045 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, 5046 ("Config I/O type isn't CTL_IO_SCSI (%d)!", io->io_hdr.io_type)); 5047 5048 if ((io->io_hdr.port_status != 0) && 5049 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5050 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5051 ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, 5052 /*retry_count*/ io->io_hdr.port_status); 5053 } else if (io->scsiio.kern_data_resid != 0 && 5054 (io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT && 5055 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5056 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5057 ctl_set_invalid_field_ciu(&io->scsiio); 5058 } 5059 5060 if (ctl_debug & CTL_DEBUG_CDB_DATA) 5061 ctl_data_print(io); 5062 if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) || 5063 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 5064 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) || 5065 ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) { 5066 /* 5067 * XXX KDM just assuming a single pointer here, and not a 5068 * S/G list. If we start using S/G lists for config data, 5069 * we'll need to know how to clean them up here as well. 5070 */ 5071 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5072 free(io->scsiio.kern_data_ptr, M_CTL); 5073 ctl_done(io); 5074 retval = CTL_RETVAL_COMPLETE; 5075 } else { 5076 /* 5077 * XXX KDM now we need to continue data movement. Some 5078 * options: 5079 * - call ctl_scsiio() again? We don't do this for data 5080 * writes, because for those at least we know ahead of 5081 * time where the write will go and how long it is. For 5082 * config writes, though, that information is largely 5083 * contained within the write itself, thus we need to 5084 * parse out the data again. 5085 * 5086 * - Call some other function once the data is in? 5087 */ 5088 5089 /* 5090 * XXX KDM call ctl_scsiio() again for now, and check flag 5091 * bits to see whether we're allocated or not. 5092 */ 5093 retval = ctl_scsiio(&io->scsiio); 5094 } 5095 return (retval); 5096 } 5097 5098 /* 5099 * This gets called by a backend driver when it is done with a 5100 * data_submit method. 5101 */ 5102 void 5103 ctl_data_submit_done(union ctl_io *io) 5104 { 5105 /* 5106 * If the IO_CONT flag is set, we need to call the supplied 5107 * function to continue processing the I/O, instead of completing 5108 * the I/O just yet. 5109 * 5110 * If there is an error, though, we don't want to keep processing. 5111 * Instead, just send status back to the initiator. 5112 */ 5113 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 5114 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 5115 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5116 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5117 io->scsiio.io_cont(io); 5118 return; 5119 } 5120 ctl_done(io); 5121 } 5122 5123 /* 5124 * This gets called by a backend driver when it is done with a 5125 * configuration write. 5126 */ 5127 void 5128 ctl_config_write_done(union ctl_io *io) 5129 { 5130 uint8_t *buf; 5131 5132 /* 5133 * If the IO_CONT flag is set, we need to call the supplied 5134 * function to continue processing the I/O, instead of completing 5135 * the I/O just yet. 5136 * 5137 * If there is an error, though, we don't want to keep processing. 5138 * Instead, just send status back to the initiator. 5139 */ 5140 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 5141 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 5142 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5143 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5144 io->scsiio.io_cont(io); 5145 return; 5146 } 5147 /* 5148 * Since a configuration write can be done for commands that actually 5149 * have data allocated, like write buffer, and commands that have 5150 * no data, like start/stop unit, we need to check here. 5151 */ 5152 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5153 buf = io->scsiio.kern_data_ptr; 5154 else 5155 buf = NULL; 5156 ctl_done(io); 5157 if (buf) 5158 free(buf, M_CTL); 5159 } 5160 5161 void 5162 ctl_config_read_done(union ctl_io *io) 5163 { 5164 uint8_t *buf; 5165 5166 /* 5167 * If there is some error -- we are done, skip data transfer. 5168 */ 5169 if ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0 || 5170 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 5171 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { 5172 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5173 buf = io->scsiio.kern_data_ptr; 5174 else 5175 buf = NULL; 5176 ctl_done(io); 5177 if (buf) 5178 free(buf, M_CTL); 5179 return; 5180 } 5181 5182 /* 5183 * If the IO_CONT flag is set, we need to call the supplied 5184 * function to continue processing the I/O, instead of completing 5185 * the I/O just yet. 5186 */ 5187 if (io->io_hdr.flags & CTL_FLAG_IO_CONT) { 5188 io->scsiio.io_cont(io); 5189 return; 5190 } 5191 5192 ctl_datamove(io); 5193 } 5194 5195 /* 5196 * SCSI release command. 5197 */ 5198 int 5199 ctl_scsi_release(struct ctl_scsiio *ctsio) 5200 { 5201 struct ctl_lun *lun = CTL_LUN(ctsio); 5202 uint32_t residx; 5203 5204 CTL_DEBUG_PRINT(("ctl_scsi_release\n")); 5205 5206 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5207 5208 /* 5209 * XXX KDM right now, we only support LUN reservation. We don't 5210 * support 3rd party reservations, or extent reservations, which 5211 * might actually need the parameter list. If we've gotten this 5212 * far, we've got a LUN reservation. Anything else got kicked out 5213 * above. So, according to SPC, ignore the length. 5214 */ 5215 5216 mtx_lock(&lun->lun_lock); 5217 5218 /* 5219 * According to SPC, it is not an error for an intiator to attempt 5220 * to release a reservation on a LUN that isn't reserved, or that 5221 * is reserved by another initiator. The reservation can only be 5222 * released, though, by the initiator who made it or by one of 5223 * several reset type events. 5224 */ 5225 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx)) 5226 lun->flags &= ~CTL_LUN_RESERVED; 5227 5228 mtx_unlock(&lun->lun_lock); 5229 5230 ctl_set_success(ctsio); 5231 ctl_done((union ctl_io *)ctsio); 5232 return (CTL_RETVAL_COMPLETE); 5233 } 5234 5235 int 5236 ctl_scsi_reserve(struct ctl_scsiio *ctsio) 5237 { 5238 struct ctl_lun *lun = CTL_LUN(ctsio); 5239 uint32_t residx; 5240 5241 CTL_DEBUG_PRINT(("ctl_reserve\n")); 5242 5243 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5244 5245 /* 5246 * XXX KDM right now, we only support LUN reservation. We don't 5247 * support 3rd party reservations, or extent reservations, which 5248 * might actually need the parameter list. If we've gotten this 5249 * far, we've got a LUN reservation. Anything else got kicked out 5250 * above. So, according to SPC, ignore the length. 5251 */ 5252 5253 mtx_lock(&lun->lun_lock); 5254 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx != residx)) { 5255 ctl_set_reservation_conflict(ctsio); 5256 goto bailout; 5257 } 5258 5259 /* SPC-3 exceptions to SPC-2 RESERVE and RELEASE behavior. */ 5260 if (lun->flags & CTL_LUN_PR_RESERVED) { 5261 ctl_set_success(ctsio); 5262 goto bailout; 5263 } 5264 5265 lun->flags |= CTL_LUN_RESERVED; 5266 lun->res_idx = residx; 5267 ctl_set_success(ctsio); 5268 5269 bailout: 5270 mtx_unlock(&lun->lun_lock); 5271 ctl_done((union ctl_io *)ctsio); 5272 return (CTL_RETVAL_COMPLETE); 5273 } 5274 5275 int 5276 ctl_start_stop(struct ctl_scsiio *ctsio) 5277 { 5278 struct ctl_lun *lun = CTL_LUN(ctsio); 5279 struct scsi_start_stop_unit *cdb; 5280 int retval; 5281 5282 CTL_DEBUG_PRINT(("ctl_start_stop\n")); 5283 5284 cdb = (struct scsi_start_stop_unit *)ctsio->cdb; 5285 5286 if ((cdb->how & SSS_PC_MASK) == 0) { 5287 if ((lun->flags & CTL_LUN_PR_RESERVED) && 5288 (cdb->how & SSS_START) == 0) { 5289 uint32_t residx; 5290 5291 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5292 if (ctl_get_prkey(lun, residx) == 0 || 5293 (lun->pr_res_idx != residx && lun->pr_res_type < 4)) { 5294 5295 ctl_set_reservation_conflict(ctsio); 5296 ctl_done((union ctl_io *)ctsio); 5297 return (CTL_RETVAL_COMPLETE); 5298 } 5299 } 5300 5301 if ((cdb->how & SSS_LOEJ) && 5302 (lun->flags & CTL_LUN_REMOVABLE) == 0) { 5303 ctl_set_invalid_field(ctsio, 5304 /*sks_valid*/ 1, 5305 /*command*/ 1, 5306 /*field*/ 4, 5307 /*bit_valid*/ 1, 5308 /*bit*/ 1); 5309 ctl_done((union ctl_io *)ctsio); 5310 return (CTL_RETVAL_COMPLETE); 5311 } 5312 5313 if ((cdb->how & SSS_START) == 0 && (cdb->how & SSS_LOEJ) && 5314 lun->prevent_count > 0) { 5315 /* "Medium removal prevented" */ 5316 ctl_set_sense(ctsio, /*current_error*/ 1, 5317 /*sense_key*/(lun->flags & CTL_LUN_NO_MEDIA) ? 5318 SSD_KEY_NOT_READY : SSD_KEY_ILLEGAL_REQUEST, 5319 /*asc*/ 0x53, /*ascq*/ 0x02, SSD_ELEM_NONE); 5320 ctl_done((union ctl_io *)ctsio); 5321 return (CTL_RETVAL_COMPLETE); 5322 } 5323 } 5324 5325 retval = lun->backend->config_write((union ctl_io *)ctsio); 5326 return (retval); 5327 } 5328 5329 int 5330 ctl_prevent_allow(struct ctl_scsiio *ctsio) 5331 { 5332 struct ctl_lun *lun = CTL_LUN(ctsio); 5333 struct scsi_prevent *cdb; 5334 int retval; 5335 uint32_t initidx; 5336 5337 CTL_DEBUG_PRINT(("ctl_prevent_allow\n")); 5338 5339 cdb = (struct scsi_prevent *)ctsio->cdb; 5340 5341 if ((lun->flags & CTL_LUN_REMOVABLE) == 0 || lun->prevent == NULL) { 5342 ctl_set_invalid_opcode(ctsio); 5343 ctl_done((union ctl_io *)ctsio); 5344 return (CTL_RETVAL_COMPLETE); 5345 } 5346 5347 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5348 mtx_lock(&lun->lun_lock); 5349 if ((cdb->how & PR_PREVENT) && 5350 ctl_is_set(lun->prevent, initidx) == 0) { 5351 ctl_set_mask(lun->prevent, initidx); 5352 lun->prevent_count++; 5353 } else if ((cdb->how & PR_PREVENT) == 0 && 5354 ctl_is_set(lun->prevent, initidx)) { 5355 ctl_clear_mask(lun->prevent, initidx); 5356 lun->prevent_count--; 5357 } 5358 mtx_unlock(&lun->lun_lock); 5359 retval = lun->backend->config_write((union ctl_io *)ctsio); 5360 return (retval); 5361 } 5362 5363 /* 5364 * We support the SYNCHRONIZE CACHE command (10 and 16 byte versions), but 5365 * we don't really do anything with the LBA and length fields if the user 5366 * passes them in. Instead we'll just flush out the cache for the entire 5367 * LUN. 5368 */ 5369 int 5370 ctl_sync_cache(struct ctl_scsiio *ctsio) 5371 { 5372 struct ctl_lun *lun = CTL_LUN(ctsio); 5373 struct ctl_lba_len_flags *lbalen; 5374 uint64_t starting_lba; 5375 uint32_t block_count; 5376 int retval; 5377 uint8_t byte2; 5378 5379 CTL_DEBUG_PRINT(("ctl_sync_cache\n")); 5380 5381 retval = 0; 5382 5383 switch (ctsio->cdb[0]) { 5384 case SYNCHRONIZE_CACHE: { 5385 struct scsi_sync_cache *cdb; 5386 cdb = (struct scsi_sync_cache *)ctsio->cdb; 5387 5388 starting_lba = scsi_4btoul(cdb->begin_lba); 5389 block_count = scsi_2btoul(cdb->lb_count); 5390 byte2 = cdb->byte2; 5391 break; 5392 } 5393 case SYNCHRONIZE_CACHE_16: { 5394 struct scsi_sync_cache_16 *cdb; 5395 cdb = (struct scsi_sync_cache_16 *)ctsio->cdb; 5396 5397 starting_lba = scsi_8btou64(cdb->begin_lba); 5398 block_count = scsi_4btoul(cdb->lb_count); 5399 byte2 = cdb->byte2; 5400 break; 5401 } 5402 default: 5403 ctl_set_invalid_opcode(ctsio); 5404 ctl_done((union ctl_io *)ctsio); 5405 goto bailout; 5406 break; /* NOTREACHED */ 5407 } 5408 5409 /* 5410 * We check the LBA and length, but don't do anything with them. 5411 * A SYNCHRONIZE CACHE will cause the entire cache for this lun to 5412 * get flushed. This check will just help satisfy anyone who wants 5413 * to see an error for an out of range LBA. 5414 */ 5415 if ((starting_lba + block_count) > (lun->be_lun->maxlba + 1)) { 5416 ctl_set_lba_out_of_range(ctsio, 5417 MAX(starting_lba, lun->be_lun->maxlba + 1)); 5418 ctl_done((union ctl_io *)ctsio); 5419 goto bailout; 5420 } 5421 5422 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5423 lbalen->lba = starting_lba; 5424 lbalen->len = block_count; 5425 lbalen->flags = byte2; 5426 retval = lun->backend->config_write((union ctl_io *)ctsio); 5427 5428 bailout: 5429 return (retval); 5430 } 5431 5432 int 5433 ctl_format(struct ctl_scsiio *ctsio) 5434 { 5435 struct scsi_format *cdb; 5436 int length, defect_list_len; 5437 5438 CTL_DEBUG_PRINT(("ctl_format\n")); 5439 5440 cdb = (struct scsi_format *)ctsio->cdb; 5441 5442 length = 0; 5443 if (cdb->byte2 & SF_FMTDATA) { 5444 if (cdb->byte2 & SF_LONGLIST) 5445 length = sizeof(struct scsi_format_header_long); 5446 else 5447 length = sizeof(struct scsi_format_header_short); 5448 } 5449 5450 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5451 && (length > 0)) { 5452 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5453 ctsio->kern_data_len = length; 5454 ctsio->kern_total_len = length; 5455 ctsio->kern_rel_offset = 0; 5456 ctsio->kern_sg_entries = 0; 5457 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5458 ctsio->be_move_done = ctl_config_move_done; 5459 ctl_datamove((union ctl_io *)ctsio); 5460 5461 return (CTL_RETVAL_COMPLETE); 5462 } 5463 5464 defect_list_len = 0; 5465 5466 if (cdb->byte2 & SF_FMTDATA) { 5467 if (cdb->byte2 & SF_LONGLIST) { 5468 struct scsi_format_header_long *header; 5469 5470 header = (struct scsi_format_header_long *) 5471 ctsio->kern_data_ptr; 5472 5473 defect_list_len = scsi_4btoul(header->defect_list_len); 5474 if (defect_list_len != 0) { 5475 ctl_set_invalid_field(ctsio, 5476 /*sks_valid*/ 1, 5477 /*command*/ 0, 5478 /*field*/ 2, 5479 /*bit_valid*/ 0, 5480 /*bit*/ 0); 5481 goto bailout; 5482 } 5483 } else { 5484 struct scsi_format_header_short *header; 5485 5486 header = (struct scsi_format_header_short *) 5487 ctsio->kern_data_ptr; 5488 5489 defect_list_len = scsi_2btoul(header->defect_list_len); 5490 if (defect_list_len != 0) { 5491 ctl_set_invalid_field(ctsio, 5492 /*sks_valid*/ 1, 5493 /*command*/ 0, 5494 /*field*/ 2, 5495 /*bit_valid*/ 0, 5496 /*bit*/ 0); 5497 goto bailout; 5498 } 5499 } 5500 } 5501 5502 ctl_set_success(ctsio); 5503 bailout: 5504 5505 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5506 free(ctsio->kern_data_ptr, M_CTL); 5507 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5508 } 5509 5510 ctl_done((union ctl_io *)ctsio); 5511 return (CTL_RETVAL_COMPLETE); 5512 } 5513 5514 int 5515 ctl_read_buffer(struct ctl_scsiio *ctsio) 5516 { 5517 struct ctl_lun *lun = CTL_LUN(ctsio); 5518 uint64_t buffer_offset; 5519 uint32_t len; 5520 uint8_t byte2; 5521 static uint8_t descr[4]; 5522 static uint8_t echo_descr[4] = { 0 }; 5523 5524 CTL_DEBUG_PRINT(("ctl_read_buffer\n")); 5525 5526 switch (ctsio->cdb[0]) { 5527 case READ_BUFFER: { 5528 struct scsi_read_buffer *cdb; 5529 5530 cdb = (struct scsi_read_buffer *)ctsio->cdb; 5531 buffer_offset = scsi_3btoul(cdb->offset); 5532 len = scsi_3btoul(cdb->length); 5533 byte2 = cdb->byte2; 5534 break; 5535 } 5536 case READ_BUFFER_16: { 5537 struct scsi_read_buffer_16 *cdb; 5538 5539 cdb = (struct scsi_read_buffer_16 *)ctsio->cdb; 5540 buffer_offset = scsi_8btou64(cdb->offset); 5541 len = scsi_4btoul(cdb->length); 5542 byte2 = cdb->byte2; 5543 break; 5544 } 5545 default: /* This shouldn't happen. */ 5546 ctl_set_invalid_opcode(ctsio); 5547 ctl_done((union ctl_io *)ctsio); 5548 return (CTL_RETVAL_COMPLETE); 5549 } 5550 5551 if (buffer_offset > CTL_WRITE_BUFFER_SIZE || 5552 buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5553 ctl_set_invalid_field(ctsio, 5554 /*sks_valid*/ 1, 5555 /*command*/ 1, 5556 /*field*/ 6, 5557 /*bit_valid*/ 0, 5558 /*bit*/ 0); 5559 ctl_done((union ctl_io *)ctsio); 5560 return (CTL_RETVAL_COMPLETE); 5561 } 5562 5563 if ((byte2 & RWB_MODE) == RWB_MODE_DESCR) { 5564 descr[0] = 0; 5565 scsi_ulto3b(CTL_WRITE_BUFFER_SIZE, &descr[1]); 5566 ctsio->kern_data_ptr = descr; 5567 len = min(len, sizeof(descr)); 5568 } else if ((byte2 & RWB_MODE) == RWB_MODE_ECHO_DESCR) { 5569 ctsio->kern_data_ptr = echo_descr; 5570 len = min(len, sizeof(echo_descr)); 5571 } else { 5572 if (lun->write_buffer == NULL) { 5573 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5574 M_CTL, M_WAITOK); 5575 } 5576 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5577 } 5578 ctsio->kern_data_len = len; 5579 ctsio->kern_total_len = len; 5580 ctsio->kern_rel_offset = 0; 5581 ctsio->kern_sg_entries = 0; 5582 ctl_set_success(ctsio); 5583 ctsio->be_move_done = ctl_config_move_done; 5584 ctl_datamove((union ctl_io *)ctsio); 5585 return (CTL_RETVAL_COMPLETE); 5586 } 5587 5588 int 5589 ctl_write_buffer(struct ctl_scsiio *ctsio) 5590 { 5591 struct ctl_lun *lun = CTL_LUN(ctsio); 5592 struct scsi_write_buffer *cdb; 5593 int buffer_offset, len; 5594 5595 CTL_DEBUG_PRINT(("ctl_write_buffer\n")); 5596 5597 cdb = (struct scsi_write_buffer *)ctsio->cdb; 5598 5599 len = scsi_3btoul(cdb->length); 5600 buffer_offset = scsi_3btoul(cdb->offset); 5601 5602 if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5603 ctl_set_invalid_field(ctsio, 5604 /*sks_valid*/ 1, 5605 /*command*/ 1, 5606 /*field*/ 6, 5607 /*bit_valid*/ 0, 5608 /*bit*/ 0); 5609 ctl_done((union ctl_io *)ctsio); 5610 return (CTL_RETVAL_COMPLETE); 5611 } 5612 5613 /* 5614 * If we've got a kernel request that hasn't been malloced yet, 5615 * malloc it and tell the caller the data buffer is here. 5616 */ 5617 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5618 if (lun->write_buffer == NULL) { 5619 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5620 M_CTL, M_WAITOK); 5621 } 5622 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5623 ctsio->kern_data_len = len; 5624 ctsio->kern_total_len = len; 5625 ctsio->kern_rel_offset = 0; 5626 ctsio->kern_sg_entries = 0; 5627 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5628 ctsio->be_move_done = ctl_config_move_done; 5629 ctl_datamove((union ctl_io *)ctsio); 5630 5631 return (CTL_RETVAL_COMPLETE); 5632 } 5633 5634 ctl_set_success(ctsio); 5635 ctl_done((union ctl_io *)ctsio); 5636 return (CTL_RETVAL_COMPLETE); 5637 } 5638 5639 int 5640 ctl_write_same(struct ctl_scsiio *ctsio) 5641 { 5642 struct ctl_lun *lun = CTL_LUN(ctsio); 5643 struct ctl_lba_len_flags *lbalen; 5644 uint64_t lba; 5645 uint32_t num_blocks; 5646 int len, retval; 5647 uint8_t byte2; 5648 5649 CTL_DEBUG_PRINT(("ctl_write_same\n")); 5650 5651 switch (ctsio->cdb[0]) { 5652 case WRITE_SAME_10: { 5653 struct scsi_write_same_10 *cdb; 5654 5655 cdb = (struct scsi_write_same_10 *)ctsio->cdb; 5656 5657 lba = scsi_4btoul(cdb->addr); 5658 num_blocks = scsi_2btoul(cdb->length); 5659 byte2 = cdb->byte2; 5660 break; 5661 } 5662 case WRITE_SAME_16: { 5663 struct scsi_write_same_16 *cdb; 5664 5665 cdb = (struct scsi_write_same_16 *)ctsio->cdb; 5666 5667 lba = scsi_8btou64(cdb->addr); 5668 num_blocks = scsi_4btoul(cdb->length); 5669 byte2 = cdb->byte2; 5670 break; 5671 } 5672 default: 5673 /* 5674 * We got a command we don't support. This shouldn't 5675 * happen, commands should be filtered out above us. 5676 */ 5677 ctl_set_invalid_opcode(ctsio); 5678 ctl_done((union ctl_io *)ctsio); 5679 5680 return (CTL_RETVAL_COMPLETE); 5681 break; /* NOTREACHED */ 5682 } 5683 5684 /* ANCHOR flag can be used only together with UNMAP */ 5685 if ((byte2 & SWS_UNMAP) == 0 && (byte2 & SWS_ANCHOR) != 0) { 5686 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 5687 /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); 5688 ctl_done((union ctl_io *)ctsio); 5689 return (CTL_RETVAL_COMPLETE); 5690 } 5691 5692 /* 5693 * The first check is to make sure we're in bounds, the second 5694 * check is to catch wrap-around problems. If the lba + num blocks 5695 * is less than the lba, then we've wrapped around and the block 5696 * range is invalid anyway. 5697 */ 5698 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5699 || ((lba + num_blocks) < lba)) { 5700 ctl_set_lba_out_of_range(ctsio, 5701 MAX(lba, lun->be_lun->maxlba + 1)); 5702 ctl_done((union ctl_io *)ctsio); 5703 return (CTL_RETVAL_COMPLETE); 5704 } 5705 5706 /* Zero number of blocks means "to the last logical block" */ 5707 if (num_blocks == 0) { 5708 if ((lun->be_lun->maxlba + 1) - lba > UINT32_MAX) { 5709 ctl_set_invalid_field(ctsio, 5710 /*sks_valid*/ 0, 5711 /*command*/ 1, 5712 /*field*/ 0, 5713 /*bit_valid*/ 0, 5714 /*bit*/ 0); 5715 ctl_done((union ctl_io *)ctsio); 5716 return (CTL_RETVAL_COMPLETE); 5717 } 5718 num_blocks = (lun->be_lun->maxlba + 1) - lba; 5719 } 5720 5721 len = lun->be_lun->blocksize; 5722 5723 /* 5724 * If we've got a kernel request that hasn't been malloced yet, 5725 * malloc it and tell the caller the data buffer is here. 5726 */ 5727 if ((byte2 & SWS_NDOB) == 0 && 5728 (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5729 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 5730 ctsio->kern_data_len = len; 5731 ctsio->kern_total_len = len; 5732 ctsio->kern_rel_offset = 0; 5733 ctsio->kern_sg_entries = 0; 5734 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5735 ctsio->be_move_done = ctl_config_move_done; 5736 ctl_datamove((union ctl_io *)ctsio); 5737 5738 return (CTL_RETVAL_COMPLETE); 5739 } 5740 5741 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5742 lbalen->lba = lba; 5743 lbalen->len = num_blocks; 5744 lbalen->flags = byte2; 5745 retval = lun->backend->config_write((union ctl_io *)ctsio); 5746 5747 return (retval); 5748 } 5749 5750 int 5751 ctl_unmap(struct ctl_scsiio *ctsio) 5752 { 5753 struct ctl_lun *lun = CTL_LUN(ctsio); 5754 struct scsi_unmap *cdb; 5755 struct ctl_ptr_len_flags *ptrlen; 5756 struct scsi_unmap_header *hdr; 5757 struct scsi_unmap_desc *buf, *end, *endnz, *range; 5758 uint64_t lba; 5759 uint32_t num_blocks; 5760 int len, retval; 5761 uint8_t byte2; 5762 5763 CTL_DEBUG_PRINT(("ctl_unmap\n")); 5764 5765 cdb = (struct scsi_unmap *)ctsio->cdb; 5766 len = scsi_2btoul(cdb->length); 5767 byte2 = cdb->byte2; 5768 5769 /* 5770 * If we've got a kernel request that hasn't been malloced yet, 5771 * malloc it and tell the caller the data buffer is here. 5772 */ 5773 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5774 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 5775 ctsio->kern_data_len = len; 5776 ctsio->kern_total_len = len; 5777 ctsio->kern_rel_offset = 0; 5778 ctsio->kern_sg_entries = 0; 5779 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5780 ctsio->be_move_done = ctl_config_move_done; 5781 ctl_datamove((union ctl_io *)ctsio); 5782 5783 return (CTL_RETVAL_COMPLETE); 5784 } 5785 5786 len = ctsio->kern_total_len - ctsio->kern_data_resid; 5787 hdr = (struct scsi_unmap_header *)ctsio->kern_data_ptr; 5788 if (len < sizeof (*hdr) || 5789 len < (scsi_2btoul(hdr->length) + sizeof(hdr->length)) || 5790 len < (scsi_2btoul(hdr->desc_length) + sizeof (*hdr)) || 5791 scsi_2btoul(hdr->desc_length) % sizeof(*buf) != 0) { 5792 ctl_set_invalid_field(ctsio, 5793 /*sks_valid*/ 0, 5794 /*command*/ 0, 5795 /*field*/ 0, 5796 /*bit_valid*/ 0, 5797 /*bit*/ 0); 5798 goto done; 5799 } 5800 len = scsi_2btoul(hdr->desc_length); 5801 buf = (struct scsi_unmap_desc *)(hdr + 1); 5802 end = buf + len / sizeof(*buf); 5803 5804 endnz = buf; 5805 for (range = buf; range < end; range++) { 5806 lba = scsi_8btou64(range->lba); 5807 num_blocks = scsi_4btoul(range->length); 5808 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5809 || ((lba + num_blocks) < lba)) { 5810 ctl_set_lba_out_of_range(ctsio, 5811 MAX(lba, lun->be_lun->maxlba + 1)); 5812 ctl_done((union ctl_io *)ctsio); 5813 return (CTL_RETVAL_COMPLETE); 5814 } 5815 if (num_blocks != 0) 5816 endnz = range + 1; 5817 } 5818 5819 /* 5820 * Block backend can not handle zero last range. 5821 * Filter it out and return if there is nothing left. 5822 */ 5823 len = (uint8_t *)endnz - (uint8_t *)buf; 5824 if (len == 0) { 5825 ctl_set_success(ctsio); 5826 goto done; 5827 } 5828 5829 mtx_lock(&lun->lun_lock); 5830 ptrlen = (struct ctl_ptr_len_flags *) 5831 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5832 ptrlen->ptr = (void *)buf; 5833 ptrlen->len = len; 5834 ptrlen->flags = byte2; 5835 ctl_check_blocked(lun); 5836 mtx_unlock(&lun->lun_lock); 5837 5838 retval = lun->backend->config_write((union ctl_io *)ctsio); 5839 return (retval); 5840 5841 done: 5842 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5843 free(ctsio->kern_data_ptr, M_CTL); 5844 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5845 } 5846 ctl_done((union ctl_io *)ctsio); 5847 return (CTL_RETVAL_COMPLETE); 5848 } 5849 5850 int 5851 ctl_default_page_handler(struct ctl_scsiio *ctsio, 5852 struct ctl_page_index *page_index, uint8_t *page_ptr) 5853 { 5854 struct ctl_lun *lun = CTL_LUN(ctsio); 5855 uint8_t *current_cp; 5856 int set_ua; 5857 uint32_t initidx; 5858 5859 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5860 set_ua = 0; 5861 5862 current_cp = (page_index->page_data + (page_index->page_len * 5863 CTL_PAGE_CURRENT)); 5864 5865 mtx_lock(&lun->lun_lock); 5866 if (memcmp(current_cp, page_ptr, page_index->page_len)) { 5867 memcpy(current_cp, page_ptr, page_index->page_len); 5868 set_ua = 1; 5869 } 5870 if (set_ua != 0) 5871 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 5872 mtx_unlock(&lun->lun_lock); 5873 if (set_ua) { 5874 ctl_isc_announce_mode(lun, 5875 ctl_get_initindex(&ctsio->io_hdr.nexus), 5876 page_index->page_code, page_index->subpage); 5877 } 5878 return (CTL_RETVAL_COMPLETE); 5879 } 5880 5881 static void 5882 ctl_ie_timer(void *arg) 5883 { 5884 struct ctl_lun *lun = arg; 5885 uint64_t t; 5886 5887 if (lun->ie_asc == 0) 5888 return; 5889 5890 if (lun->MODE_IE.mrie == SIEP_MRIE_UA) 5891 ctl_est_ua_all(lun, -1, CTL_UA_IE); 5892 else 5893 lun->ie_reported = 0; 5894 5895 if (lun->ie_reportcnt < scsi_4btoul(lun->MODE_IE.report_count)) { 5896 lun->ie_reportcnt++; 5897 t = scsi_4btoul(lun->MODE_IE.interval_timer); 5898 if (t == 0 || t == UINT32_MAX) 5899 t = 3000; /* 5 min */ 5900 callout_schedule(&lun->ie_callout, t * hz / 10); 5901 } 5902 } 5903 5904 int 5905 ctl_ie_page_handler(struct ctl_scsiio *ctsio, 5906 struct ctl_page_index *page_index, uint8_t *page_ptr) 5907 { 5908 struct ctl_lun *lun = CTL_LUN(ctsio); 5909 struct scsi_info_exceptions_page *pg; 5910 uint64_t t; 5911 5912 (void)ctl_default_page_handler(ctsio, page_index, page_ptr); 5913 5914 pg = (struct scsi_info_exceptions_page *)page_ptr; 5915 mtx_lock(&lun->lun_lock); 5916 if (pg->info_flags & SIEP_FLAGS_TEST) { 5917 lun->ie_asc = 0x5d; 5918 lun->ie_ascq = 0xff; 5919 if (pg->mrie == SIEP_MRIE_UA) { 5920 ctl_est_ua_all(lun, -1, CTL_UA_IE); 5921 lun->ie_reported = 1; 5922 } else { 5923 ctl_clr_ua_all(lun, -1, CTL_UA_IE); 5924 lun->ie_reported = -1; 5925 } 5926 lun->ie_reportcnt = 1; 5927 if (lun->ie_reportcnt < scsi_4btoul(pg->report_count)) { 5928 lun->ie_reportcnt++; 5929 t = scsi_4btoul(pg->interval_timer); 5930 if (t == 0 || t == UINT32_MAX) 5931 t = 3000; /* 5 min */ 5932 callout_reset(&lun->ie_callout, t * hz / 10, 5933 ctl_ie_timer, lun); 5934 } 5935 } else { 5936 lun->ie_asc = 0; 5937 lun->ie_ascq = 0; 5938 lun->ie_reported = 1; 5939 ctl_clr_ua_all(lun, -1, CTL_UA_IE); 5940 lun->ie_reportcnt = UINT32_MAX; 5941 callout_stop(&lun->ie_callout); 5942 } 5943 mtx_unlock(&lun->lun_lock); 5944 return (CTL_RETVAL_COMPLETE); 5945 } 5946 5947 static int 5948 ctl_do_mode_select(union ctl_io *io) 5949 { 5950 struct ctl_lun *lun = CTL_LUN(io); 5951 struct scsi_mode_page_header *page_header; 5952 struct ctl_page_index *page_index; 5953 struct ctl_scsiio *ctsio; 5954 int page_len, page_len_offset, page_len_size; 5955 union ctl_modepage_info *modepage_info; 5956 uint16_t *len_left, *len_used; 5957 int retval, i; 5958 5959 ctsio = &io->scsiio; 5960 page_index = NULL; 5961 page_len = 0; 5962 5963 modepage_info = (union ctl_modepage_info *) 5964 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 5965 len_left = &modepage_info->header.len_left; 5966 len_used = &modepage_info->header.len_used; 5967 5968 do_next_page: 5969 5970 page_header = (struct scsi_mode_page_header *) 5971 (ctsio->kern_data_ptr + *len_used); 5972 5973 if (*len_left == 0) { 5974 free(ctsio->kern_data_ptr, M_CTL); 5975 ctl_set_success(ctsio); 5976 ctl_done((union ctl_io *)ctsio); 5977 return (CTL_RETVAL_COMPLETE); 5978 } else if (*len_left < sizeof(struct scsi_mode_page_header)) { 5979 5980 free(ctsio->kern_data_ptr, M_CTL); 5981 ctl_set_param_len_error(ctsio); 5982 ctl_done((union ctl_io *)ctsio); 5983 return (CTL_RETVAL_COMPLETE); 5984 5985 } else if ((page_header->page_code & SMPH_SPF) 5986 && (*len_left < sizeof(struct scsi_mode_page_header_sp))) { 5987 5988 free(ctsio->kern_data_ptr, M_CTL); 5989 ctl_set_param_len_error(ctsio); 5990 ctl_done((union ctl_io *)ctsio); 5991 return (CTL_RETVAL_COMPLETE); 5992 } 5993 5994 5995 /* 5996 * XXX KDM should we do something with the block descriptor? 5997 */ 5998 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 5999 page_index = &lun->mode_pages.index[i]; 6000 if (lun->be_lun->lun_type == T_DIRECT && 6001 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6002 continue; 6003 if (lun->be_lun->lun_type == T_PROCESSOR && 6004 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6005 continue; 6006 if (lun->be_lun->lun_type == T_CDROM && 6007 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6008 continue; 6009 6010 if ((page_index->page_code & SMPH_PC_MASK) != 6011 (page_header->page_code & SMPH_PC_MASK)) 6012 continue; 6013 6014 /* 6015 * If neither page has a subpage code, then we've got a 6016 * match. 6017 */ 6018 if (((page_index->page_code & SMPH_SPF) == 0) 6019 && ((page_header->page_code & SMPH_SPF) == 0)) { 6020 page_len = page_header->page_length; 6021 break; 6022 } 6023 6024 /* 6025 * If both pages have subpages, then the subpage numbers 6026 * have to match. 6027 */ 6028 if ((page_index->page_code & SMPH_SPF) 6029 && (page_header->page_code & SMPH_SPF)) { 6030 struct scsi_mode_page_header_sp *sph; 6031 6032 sph = (struct scsi_mode_page_header_sp *)page_header; 6033 if (page_index->subpage == sph->subpage) { 6034 page_len = scsi_2btoul(sph->page_length); 6035 break; 6036 } 6037 } 6038 } 6039 6040 /* 6041 * If we couldn't find the page, or if we don't have a mode select 6042 * handler for it, send back an error to the user. 6043 */ 6044 if ((i >= CTL_NUM_MODE_PAGES) 6045 || (page_index->select_handler == NULL)) { 6046 ctl_set_invalid_field(ctsio, 6047 /*sks_valid*/ 1, 6048 /*command*/ 0, 6049 /*field*/ *len_used, 6050 /*bit_valid*/ 0, 6051 /*bit*/ 0); 6052 free(ctsio->kern_data_ptr, M_CTL); 6053 ctl_done((union ctl_io *)ctsio); 6054 return (CTL_RETVAL_COMPLETE); 6055 } 6056 6057 if (page_index->page_code & SMPH_SPF) { 6058 page_len_offset = 2; 6059 page_len_size = 2; 6060 } else { 6061 page_len_size = 1; 6062 page_len_offset = 1; 6063 } 6064 6065 /* 6066 * If the length the initiator gives us isn't the one we specify in 6067 * the mode page header, or if they didn't specify enough data in 6068 * the CDB to avoid truncating this page, kick out the request. 6069 */ 6070 if (page_len != page_index->page_len - page_len_offset - page_len_size) { 6071 ctl_set_invalid_field(ctsio, 6072 /*sks_valid*/ 1, 6073 /*command*/ 0, 6074 /*field*/ *len_used + page_len_offset, 6075 /*bit_valid*/ 0, 6076 /*bit*/ 0); 6077 free(ctsio->kern_data_ptr, M_CTL); 6078 ctl_done((union ctl_io *)ctsio); 6079 return (CTL_RETVAL_COMPLETE); 6080 } 6081 if (*len_left < page_index->page_len) { 6082 free(ctsio->kern_data_ptr, M_CTL); 6083 ctl_set_param_len_error(ctsio); 6084 ctl_done((union ctl_io *)ctsio); 6085 return (CTL_RETVAL_COMPLETE); 6086 } 6087 6088 /* 6089 * Run through the mode page, checking to make sure that the bits 6090 * the user changed are actually legal for him to change. 6091 */ 6092 for (i = 0; i < page_index->page_len; i++) { 6093 uint8_t *user_byte, *change_mask, *current_byte; 6094 int bad_bit; 6095 int j; 6096 6097 user_byte = (uint8_t *)page_header + i; 6098 change_mask = page_index->page_data + 6099 (page_index->page_len * CTL_PAGE_CHANGEABLE) + i; 6100 current_byte = page_index->page_data + 6101 (page_index->page_len * CTL_PAGE_CURRENT) + i; 6102 6103 /* 6104 * Check to see whether the user set any bits in this byte 6105 * that he is not allowed to set. 6106 */ 6107 if ((*user_byte & ~(*change_mask)) == 6108 (*current_byte & ~(*change_mask))) 6109 continue; 6110 6111 /* 6112 * Go through bit by bit to determine which one is illegal. 6113 */ 6114 bad_bit = 0; 6115 for (j = 7; j >= 0; j--) { 6116 if ((((1 << i) & ~(*change_mask)) & *user_byte) != 6117 (((1 << i) & ~(*change_mask)) & *current_byte)) { 6118 bad_bit = i; 6119 break; 6120 } 6121 } 6122 ctl_set_invalid_field(ctsio, 6123 /*sks_valid*/ 1, 6124 /*command*/ 0, 6125 /*field*/ *len_used + i, 6126 /*bit_valid*/ 1, 6127 /*bit*/ bad_bit); 6128 free(ctsio->kern_data_ptr, M_CTL); 6129 ctl_done((union ctl_io *)ctsio); 6130 return (CTL_RETVAL_COMPLETE); 6131 } 6132 6133 /* 6134 * Decrement these before we call the page handler, since we may 6135 * end up getting called back one way or another before the handler 6136 * returns to this context. 6137 */ 6138 *len_left -= page_index->page_len; 6139 *len_used += page_index->page_len; 6140 6141 retval = page_index->select_handler(ctsio, page_index, 6142 (uint8_t *)page_header); 6143 6144 /* 6145 * If the page handler returns CTL_RETVAL_QUEUED, then we need to 6146 * wait until this queued command completes to finish processing 6147 * the mode page. If it returns anything other than 6148 * CTL_RETVAL_COMPLETE (e.g. CTL_RETVAL_ERROR), then it should have 6149 * already set the sense information, freed the data pointer, and 6150 * completed the io for us. 6151 */ 6152 if (retval != CTL_RETVAL_COMPLETE) 6153 goto bailout_no_done; 6154 6155 /* 6156 * If the initiator sent us more than one page, parse the next one. 6157 */ 6158 if (*len_left > 0) 6159 goto do_next_page; 6160 6161 ctl_set_success(ctsio); 6162 free(ctsio->kern_data_ptr, M_CTL); 6163 ctl_done((union ctl_io *)ctsio); 6164 6165 bailout_no_done: 6166 6167 return (CTL_RETVAL_COMPLETE); 6168 6169 } 6170 6171 int 6172 ctl_mode_select(struct ctl_scsiio *ctsio) 6173 { 6174 struct ctl_lun *lun = CTL_LUN(ctsio); 6175 union ctl_modepage_info *modepage_info; 6176 int bd_len, i, header_size, param_len, pf, rtd, sp; 6177 uint32_t initidx; 6178 6179 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 6180 switch (ctsio->cdb[0]) { 6181 case MODE_SELECT_6: { 6182 struct scsi_mode_select_6 *cdb; 6183 6184 cdb = (struct scsi_mode_select_6 *)ctsio->cdb; 6185 6186 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 6187 rtd = (cdb->byte2 & SMS_RTD) ? 1 : 0; 6188 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 6189 param_len = cdb->length; 6190 header_size = sizeof(struct scsi_mode_header_6); 6191 break; 6192 } 6193 case MODE_SELECT_10: { 6194 struct scsi_mode_select_10 *cdb; 6195 6196 cdb = (struct scsi_mode_select_10 *)ctsio->cdb; 6197 6198 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 6199 rtd = (cdb->byte2 & SMS_RTD) ? 1 : 0; 6200 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 6201 param_len = scsi_2btoul(cdb->length); 6202 header_size = sizeof(struct scsi_mode_header_10); 6203 break; 6204 } 6205 default: 6206 ctl_set_invalid_opcode(ctsio); 6207 ctl_done((union ctl_io *)ctsio); 6208 return (CTL_RETVAL_COMPLETE); 6209 } 6210 6211 if (rtd) { 6212 if (param_len != 0) { 6213 ctl_set_invalid_field(ctsio, /*sks_valid*/ 0, 6214 /*command*/ 1, /*field*/ 0, 6215 /*bit_valid*/ 0, /*bit*/ 0); 6216 ctl_done((union ctl_io *)ctsio); 6217 return (CTL_RETVAL_COMPLETE); 6218 } 6219 6220 /* Revert to defaults. */ 6221 ctl_init_page_index(lun); 6222 mtx_lock(&lun->lun_lock); 6223 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 6224 mtx_unlock(&lun->lun_lock); 6225 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6226 ctl_isc_announce_mode(lun, -1, 6227 lun->mode_pages.index[i].page_code & SMPH_PC_MASK, 6228 lun->mode_pages.index[i].subpage); 6229 } 6230 ctl_set_success(ctsio); 6231 ctl_done((union ctl_io *)ctsio); 6232 return (CTL_RETVAL_COMPLETE); 6233 } 6234 6235 /* 6236 * From SPC-3: 6237 * "A parameter list length of zero indicates that the Data-Out Buffer 6238 * shall be empty. This condition shall not be considered as an error." 6239 */ 6240 if (param_len == 0) { 6241 ctl_set_success(ctsio); 6242 ctl_done((union ctl_io *)ctsio); 6243 return (CTL_RETVAL_COMPLETE); 6244 } 6245 6246 /* 6247 * Since we'll hit this the first time through, prior to 6248 * allocation, we don't need to free a data buffer here. 6249 */ 6250 if (param_len < header_size) { 6251 ctl_set_param_len_error(ctsio); 6252 ctl_done((union ctl_io *)ctsio); 6253 return (CTL_RETVAL_COMPLETE); 6254 } 6255 6256 /* 6257 * Allocate the data buffer and grab the user's data. In theory, 6258 * we shouldn't have to sanity check the parameter list length here 6259 * because the maximum size is 64K. We should be able to malloc 6260 * that much without too many problems. 6261 */ 6262 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 6263 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 6264 ctsio->kern_data_len = param_len; 6265 ctsio->kern_total_len = param_len; 6266 ctsio->kern_rel_offset = 0; 6267 ctsio->kern_sg_entries = 0; 6268 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6269 ctsio->be_move_done = ctl_config_move_done; 6270 ctl_datamove((union ctl_io *)ctsio); 6271 6272 return (CTL_RETVAL_COMPLETE); 6273 } 6274 6275 switch (ctsio->cdb[0]) { 6276 case MODE_SELECT_6: { 6277 struct scsi_mode_header_6 *mh6; 6278 6279 mh6 = (struct scsi_mode_header_6 *)ctsio->kern_data_ptr; 6280 bd_len = mh6->blk_desc_len; 6281 break; 6282 } 6283 case MODE_SELECT_10: { 6284 struct scsi_mode_header_10 *mh10; 6285 6286 mh10 = (struct scsi_mode_header_10 *)ctsio->kern_data_ptr; 6287 bd_len = scsi_2btoul(mh10->blk_desc_len); 6288 break; 6289 } 6290 default: 6291 panic("%s: Invalid CDB type %#x", __func__, ctsio->cdb[0]); 6292 } 6293 6294 if (param_len < (header_size + bd_len)) { 6295 free(ctsio->kern_data_ptr, M_CTL); 6296 ctl_set_param_len_error(ctsio); 6297 ctl_done((union ctl_io *)ctsio); 6298 return (CTL_RETVAL_COMPLETE); 6299 } 6300 6301 /* 6302 * Set the IO_CONT flag, so that if this I/O gets passed to 6303 * ctl_config_write_done(), it'll get passed back to 6304 * ctl_do_mode_select() for further processing, or completion if 6305 * we're all done. 6306 */ 6307 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 6308 ctsio->io_cont = ctl_do_mode_select; 6309 6310 modepage_info = (union ctl_modepage_info *) 6311 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6312 memset(modepage_info, 0, sizeof(*modepage_info)); 6313 modepage_info->header.len_left = param_len - header_size - bd_len; 6314 modepage_info->header.len_used = header_size + bd_len; 6315 6316 return (ctl_do_mode_select((union ctl_io *)ctsio)); 6317 } 6318 6319 int 6320 ctl_mode_sense(struct ctl_scsiio *ctsio) 6321 { 6322 struct ctl_lun *lun = CTL_LUN(ctsio); 6323 int pc, page_code, dbd, llba, subpage; 6324 int alloc_len, page_len, header_len, total_len; 6325 struct scsi_mode_block_descr *block_desc; 6326 struct ctl_page_index *page_index; 6327 6328 dbd = 0; 6329 llba = 0; 6330 block_desc = NULL; 6331 6332 CTL_DEBUG_PRINT(("ctl_mode_sense\n")); 6333 6334 switch (ctsio->cdb[0]) { 6335 case MODE_SENSE_6: { 6336 struct scsi_mode_sense_6 *cdb; 6337 6338 cdb = (struct scsi_mode_sense_6 *)ctsio->cdb; 6339 6340 header_len = sizeof(struct scsi_mode_hdr_6); 6341 if (cdb->byte2 & SMS_DBD) 6342 dbd = 1; 6343 else 6344 header_len += sizeof(struct scsi_mode_block_descr); 6345 6346 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6347 page_code = cdb->page & SMS_PAGE_CODE; 6348 subpage = cdb->subpage; 6349 alloc_len = cdb->length; 6350 break; 6351 } 6352 case MODE_SENSE_10: { 6353 struct scsi_mode_sense_10 *cdb; 6354 6355 cdb = (struct scsi_mode_sense_10 *)ctsio->cdb; 6356 6357 header_len = sizeof(struct scsi_mode_hdr_10); 6358 6359 if (cdb->byte2 & SMS_DBD) 6360 dbd = 1; 6361 else 6362 header_len += sizeof(struct scsi_mode_block_descr); 6363 if (cdb->byte2 & SMS10_LLBAA) 6364 llba = 1; 6365 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6366 page_code = cdb->page & SMS_PAGE_CODE; 6367 subpage = cdb->subpage; 6368 alloc_len = scsi_2btoul(cdb->length); 6369 break; 6370 } 6371 default: 6372 ctl_set_invalid_opcode(ctsio); 6373 ctl_done((union ctl_io *)ctsio); 6374 return (CTL_RETVAL_COMPLETE); 6375 break; /* NOTREACHED */ 6376 } 6377 6378 /* 6379 * We have to make a first pass through to calculate the size of 6380 * the pages that match the user's query. Then we allocate enough 6381 * memory to hold it, and actually copy the data into the buffer. 6382 */ 6383 switch (page_code) { 6384 case SMS_ALL_PAGES_PAGE: { 6385 u_int i; 6386 6387 page_len = 0; 6388 6389 /* 6390 * At the moment, values other than 0 and 0xff here are 6391 * reserved according to SPC-3. 6392 */ 6393 if ((subpage != SMS_SUBPAGE_PAGE_0) 6394 && (subpage != SMS_SUBPAGE_ALL)) { 6395 ctl_set_invalid_field(ctsio, 6396 /*sks_valid*/ 1, 6397 /*command*/ 1, 6398 /*field*/ 3, 6399 /*bit_valid*/ 0, 6400 /*bit*/ 0); 6401 ctl_done((union ctl_io *)ctsio); 6402 return (CTL_RETVAL_COMPLETE); 6403 } 6404 6405 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6406 page_index = &lun->mode_pages.index[i]; 6407 6408 /* Make sure the page is supported for this dev type */ 6409 if (lun->be_lun->lun_type == T_DIRECT && 6410 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6411 continue; 6412 if (lun->be_lun->lun_type == T_PROCESSOR && 6413 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6414 continue; 6415 if (lun->be_lun->lun_type == T_CDROM && 6416 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6417 continue; 6418 6419 /* 6420 * We don't use this subpage if the user didn't 6421 * request all subpages. 6422 */ 6423 if ((page_index->subpage != 0) 6424 && (subpage == SMS_SUBPAGE_PAGE_0)) 6425 continue; 6426 6427 #if 0 6428 printf("found page %#x len %d\n", 6429 page_index->page_code & SMPH_PC_MASK, 6430 page_index->page_len); 6431 #endif 6432 page_len += page_index->page_len; 6433 } 6434 break; 6435 } 6436 default: { 6437 u_int i; 6438 6439 page_len = 0; 6440 6441 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6442 page_index = &lun->mode_pages.index[i]; 6443 6444 /* Make sure the page is supported for this dev type */ 6445 if (lun->be_lun->lun_type == T_DIRECT && 6446 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6447 continue; 6448 if (lun->be_lun->lun_type == T_PROCESSOR && 6449 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6450 continue; 6451 if (lun->be_lun->lun_type == T_CDROM && 6452 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6453 continue; 6454 6455 /* Look for the right page code */ 6456 if ((page_index->page_code & SMPH_PC_MASK) != page_code) 6457 continue; 6458 6459 /* Look for the right subpage or the subpage wildcard*/ 6460 if ((page_index->subpage != subpage) 6461 && (subpage != SMS_SUBPAGE_ALL)) 6462 continue; 6463 6464 #if 0 6465 printf("found page %#x len %d\n", 6466 page_index->page_code & SMPH_PC_MASK, 6467 page_index->page_len); 6468 #endif 6469 6470 page_len += page_index->page_len; 6471 } 6472 6473 if (page_len == 0) { 6474 ctl_set_invalid_field(ctsio, 6475 /*sks_valid*/ 1, 6476 /*command*/ 1, 6477 /*field*/ 2, 6478 /*bit_valid*/ 1, 6479 /*bit*/ 5); 6480 ctl_done((union ctl_io *)ctsio); 6481 return (CTL_RETVAL_COMPLETE); 6482 } 6483 break; 6484 } 6485 } 6486 6487 total_len = header_len + page_len; 6488 #if 0 6489 printf("header_len = %d, page_len = %d, total_len = %d\n", 6490 header_len, page_len, total_len); 6491 #endif 6492 6493 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6494 ctsio->kern_sg_entries = 0; 6495 ctsio->kern_rel_offset = 0; 6496 ctsio->kern_data_len = min(total_len, alloc_len); 6497 ctsio->kern_total_len = ctsio->kern_data_len; 6498 6499 switch (ctsio->cdb[0]) { 6500 case MODE_SENSE_6: { 6501 struct scsi_mode_hdr_6 *header; 6502 6503 header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr; 6504 6505 header->datalen = MIN(total_len - 1, 254); 6506 if (lun->be_lun->lun_type == T_DIRECT) { 6507 header->dev_specific = 0x10; /* DPOFUA */ 6508 if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || 6509 (lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) 6510 header->dev_specific |= 0x80; /* WP */ 6511 } 6512 if (dbd) 6513 header->block_descr_len = 0; 6514 else 6515 header->block_descr_len = 6516 sizeof(struct scsi_mode_block_descr); 6517 block_desc = (struct scsi_mode_block_descr *)&header[1]; 6518 break; 6519 } 6520 case MODE_SENSE_10: { 6521 struct scsi_mode_hdr_10 *header; 6522 int datalen; 6523 6524 header = (struct scsi_mode_hdr_10 *)ctsio->kern_data_ptr; 6525 6526 datalen = MIN(total_len - 2, 65533); 6527 scsi_ulto2b(datalen, header->datalen); 6528 if (lun->be_lun->lun_type == T_DIRECT) { 6529 header->dev_specific = 0x10; /* DPOFUA */ 6530 if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || 6531 (lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) 6532 header->dev_specific |= 0x80; /* WP */ 6533 } 6534 if (dbd) 6535 scsi_ulto2b(0, header->block_descr_len); 6536 else 6537 scsi_ulto2b(sizeof(struct scsi_mode_block_descr), 6538 header->block_descr_len); 6539 block_desc = (struct scsi_mode_block_descr *)&header[1]; 6540 break; 6541 } 6542 default: 6543 panic("%s: Invalid CDB type %#x", __func__, ctsio->cdb[0]); 6544 } 6545 6546 /* 6547 * If we've got a disk, use its blocksize in the block 6548 * descriptor. Otherwise, just set it to 0. 6549 */ 6550 if (dbd == 0) { 6551 if (lun->be_lun->lun_type == T_DIRECT) 6552 scsi_ulto3b(lun->be_lun->blocksize, 6553 block_desc->block_len); 6554 else 6555 scsi_ulto3b(0, block_desc->block_len); 6556 } 6557 6558 switch (page_code) { 6559 case SMS_ALL_PAGES_PAGE: { 6560 int i, data_used; 6561 6562 data_used = header_len; 6563 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6564 struct ctl_page_index *page_index; 6565 6566 page_index = &lun->mode_pages.index[i]; 6567 if (lun->be_lun->lun_type == T_DIRECT && 6568 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6569 continue; 6570 if (lun->be_lun->lun_type == T_PROCESSOR && 6571 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6572 continue; 6573 if (lun->be_lun->lun_type == T_CDROM && 6574 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6575 continue; 6576 6577 /* 6578 * We don't use this subpage if the user didn't 6579 * request all subpages. We already checked (above) 6580 * to make sure the user only specified a subpage 6581 * of 0 or 0xff in the SMS_ALL_PAGES_PAGE case. 6582 */ 6583 if ((page_index->subpage != 0) 6584 && (subpage == SMS_SUBPAGE_PAGE_0)) 6585 continue; 6586 6587 /* 6588 * Call the handler, if it exists, to update the 6589 * page to the latest values. 6590 */ 6591 if (page_index->sense_handler != NULL) 6592 page_index->sense_handler(ctsio, page_index,pc); 6593 6594 memcpy(ctsio->kern_data_ptr + data_used, 6595 page_index->page_data + 6596 (page_index->page_len * pc), 6597 page_index->page_len); 6598 data_used += page_index->page_len; 6599 } 6600 break; 6601 } 6602 default: { 6603 int i, data_used; 6604 6605 data_used = header_len; 6606 6607 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6608 struct ctl_page_index *page_index; 6609 6610 page_index = &lun->mode_pages.index[i]; 6611 6612 /* Look for the right page code */ 6613 if ((page_index->page_code & SMPH_PC_MASK) != page_code) 6614 continue; 6615 6616 /* Look for the right subpage or the subpage wildcard*/ 6617 if ((page_index->subpage != subpage) 6618 && (subpage != SMS_SUBPAGE_ALL)) 6619 continue; 6620 6621 /* Make sure the page is supported for this dev type */ 6622 if (lun->be_lun->lun_type == T_DIRECT && 6623 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6624 continue; 6625 if (lun->be_lun->lun_type == T_PROCESSOR && 6626 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6627 continue; 6628 if (lun->be_lun->lun_type == T_CDROM && 6629 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6630 continue; 6631 6632 /* 6633 * Call the handler, if it exists, to update the 6634 * page to the latest values. 6635 */ 6636 if (page_index->sense_handler != NULL) 6637 page_index->sense_handler(ctsio, page_index,pc); 6638 6639 memcpy(ctsio->kern_data_ptr + data_used, 6640 page_index->page_data + 6641 (page_index->page_len * pc), 6642 page_index->page_len); 6643 data_used += page_index->page_len; 6644 } 6645 break; 6646 } 6647 } 6648 6649 ctl_set_success(ctsio); 6650 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6651 ctsio->be_move_done = ctl_config_move_done; 6652 ctl_datamove((union ctl_io *)ctsio); 6653 return (CTL_RETVAL_COMPLETE); 6654 } 6655 6656 int 6657 ctl_lbp_log_sense_handler(struct ctl_scsiio *ctsio, 6658 struct ctl_page_index *page_index, 6659 int pc) 6660 { 6661 struct ctl_lun *lun = CTL_LUN(ctsio); 6662 struct scsi_log_param_header *phdr; 6663 uint8_t *data; 6664 uint64_t val; 6665 6666 data = page_index->page_data; 6667 6668 if (lun->backend->lun_attr != NULL && 6669 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksavail")) 6670 != UINT64_MAX) { 6671 phdr = (struct scsi_log_param_header *)data; 6672 scsi_ulto2b(0x0001, phdr->param_code); 6673 phdr->param_control = SLP_LBIN | SLP_LP; 6674 phdr->param_len = 8; 6675 data = (uint8_t *)(phdr + 1); 6676 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6677 data[4] = 0x02; /* per-pool */ 6678 data += phdr->param_len; 6679 } 6680 6681 if (lun->backend->lun_attr != NULL && 6682 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksused")) 6683 != UINT64_MAX) { 6684 phdr = (struct scsi_log_param_header *)data; 6685 scsi_ulto2b(0x0002, phdr->param_code); 6686 phdr->param_control = SLP_LBIN | SLP_LP; 6687 phdr->param_len = 8; 6688 data = (uint8_t *)(phdr + 1); 6689 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6690 data[4] = 0x01; /* per-LUN */ 6691 data += phdr->param_len; 6692 } 6693 6694 if (lun->backend->lun_attr != NULL && 6695 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksavail")) 6696 != UINT64_MAX) { 6697 phdr = (struct scsi_log_param_header *)data; 6698 scsi_ulto2b(0x00f1, phdr->param_code); 6699 phdr->param_control = SLP_LBIN | SLP_LP; 6700 phdr->param_len = 8; 6701 data = (uint8_t *)(phdr + 1); 6702 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6703 data[4] = 0x02; /* per-pool */ 6704 data += phdr->param_len; 6705 } 6706 6707 if (lun->backend->lun_attr != NULL && 6708 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksused")) 6709 != UINT64_MAX) { 6710 phdr = (struct scsi_log_param_header *)data; 6711 scsi_ulto2b(0x00f2, phdr->param_code); 6712 phdr->param_control = SLP_LBIN | SLP_LP; 6713 phdr->param_len = 8; 6714 data = (uint8_t *)(phdr + 1); 6715 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6716 data[4] = 0x02; /* per-pool */ 6717 data += phdr->param_len; 6718 } 6719 6720 page_index->page_len = data - page_index->page_data; 6721 return (0); 6722 } 6723 6724 int 6725 ctl_sap_log_sense_handler(struct ctl_scsiio *ctsio, 6726 struct ctl_page_index *page_index, 6727 int pc) 6728 { 6729 struct ctl_lun *lun = CTL_LUN(ctsio); 6730 struct stat_page *data; 6731 struct bintime *t; 6732 6733 data = (struct stat_page *)page_index->page_data; 6734 6735 scsi_ulto2b(SLP_SAP, data->sap.hdr.param_code); 6736 data->sap.hdr.param_control = SLP_LBIN; 6737 data->sap.hdr.param_len = sizeof(struct scsi_log_stat_and_perf) - 6738 sizeof(struct scsi_log_param_header); 6739 scsi_u64to8b(lun->stats.operations[CTL_STATS_READ], 6740 data->sap.read_num); 6741 scsi_u64to8b(lun->stats.operations[CTL_STATS_WRITE], 6742 data->sap.write_num); 6743 if (lun->be_lun->blocksize > 0) { 6744 scsi_u64to8b(lun->stats.bytes[CTL_STATS_WRITE] / 6745 lun->be_lun->blocksize, data->sap.recvieved_lba); 6746 scsi_u64to8b(lun->stats.bytes[CTL_STATS_READ] / 6747 lun->be_lun->blocksize, data->sap.transmitted_lba); 6748 } 6749 t = &lun->stats.time[CTL_STATS_READ]; 6750 scsi_u64to8b((uint64_t)t->sec * 1000 + t->frac / (UINT64_MAX / 1000), 6751 data->sap.read_int); 6752 t = &lun->stats.time[CTL_STATS_WRITE]; 6753 scsi_u64to8b((uint64_t)t->sec * 1000 + t->frac / (UINT64_MAX / 1000), 6754 data->sap.write_int); 6755 scsi_u64to8b(0, data->sap.weighted_num); 6756 scsi_u64to8b(0, data->sap.weighted_int); 6757 scsi_ulto2b(SLP_IT, data->it.hdr.param_code); 6758 data->it.hdr.param_control = SLP_LBIN; 6759 data->it.hdr.param_len = sizeof(struct scsi_log_idle_time) - 6760 sizeof(struct scsi_log_param_header); 6761 #ifdef CTL_TIME_IO 6762 scsi_u64to8b(lun->idle_time / SBT_1MS, data->it.idle_int); 6763 #endif 6764 scsi_ulto2b(SLP_TI, data->ti.hdr.param_code); 6765 data->it.hdr.param_control = SLP_LBIN; 6766 data->ti.hdr.param_len = sizeof(struct scsi_log_time_interval) - 6767 sizeof(struct scsi_log_param_header); 6768 scsi_ulto4b(3, data->ti.exponent); 6769 scsi_ulto4b(1, data->ti.integer); 6770 return (0); 6771 } 6772 6773 int 6774 ctl_ie_log_sense_handler(struct ctl_scsiio *ctsio, 6775 struct ctl_page_index *page_index, 6776 int pc) 6777 { 6778 struct ctl_lun *lun = CTL_LUN(ctsio); 6779 struct scsi_log_informational_exceptions *data; 6780 6781 data = (struct scsi_log_informational_exceptions *)page_index->page_data; 6782 6783 scsi_ulto2b(SLP_IE_GEN, data->hdr.param_code); 6784 data->hdr.param_control = SLP_LBIN; 6785 data->hdr.param_len = sizeof(struct scsi_log_informational_exceptions) - 6786 sizeof(struct scsi_log_param_header); 6787 data->ie_asc = lun->ie_asc; 6788 data->ie_ascq = lun->ie_ascq; 6789 data->temperature = 0xff; 6790 return (0); 6791 } 6792 6793 int 6794 ctl_log_sense(struct ctl_scsiio *ctsio) 6795 { 6796 struct ctl_lun *lun = CTL_LUN(ctsio); 6797 int i, pc, page_code, subpage; 6798 int alloc_len, total_len; 6799 struct ctl_page_index *page_index; 6800 struct scsi_log_sense *cdb; 6801 struct scsi_log_header *header; 6802 6803 CTL_DEBUG_PRINT(("ctl_log_sense\n")); 6804 6805 cdb = (struct scsi_log_sense *)ctsio->cdb; 6806 pc = (cdb->page & SLS_PAGE_CTRL_MASK) >> 6; 6807 page_code = cdb->page & SLS_PAGE_CODE; 6808 subpage = cdb->subpage; 6809 alloc_len = scsi_2btoul(cdb->length); 6810 6811 page_index = NULL; 6812 for (i = 0; i < CTL_NUM_LOG_PAGES; i++) { 6813 page_index = &lun->log_pages.index[i]; 6814 6815 /* Look for the right page code */ 6816 if ((page_index->page_code & SL_PAGE_CODE) != page_code) 6817 continue; 6818 6819 /* Look for the right subpage or the subpage wildcard*/ 6820 if (page_index->subpage != subpage) 6821 continue; 6822 6823 break; 6824 } 6825 if (i >= CTL_NUM_LOG_PAGES) { 6826 ctl_set_invalid_field(ctsio, 6827 /*sks_valid*/ 1, 6828 /*command*/ 1, 6829 /*field*/ 2, 6830 /*bit_valid*/ 0, 6831 /*bit*/ 0); 6832 ctl_done((union ctl_io *)ctsio); 6833 return (CTL_RETVAL_COMPLETE); 6834 } 6835 6836 total_len = sizeof(struct scsi_log_header) + page_index->page_len; 6837 6838 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6839 ctsio->kern_sg_entries = 0; 6840 ctsio->kern_rel_offset = 0; 6841 ctsio->kern_data_len = min(total_len, alloc_len); 6842 ctsio->kern_total_len = ctsio->kern_data_len; 6843 6844 header = (struct scsi_log_header *)ctsio->kern_data_ptr; 6845 header->page = page_index->page_code; 6846 if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING) 6847 header->page |= SL_DS; 6848 if (page_index->subpage) { 6849 header->page |= SL_SPF; 6850 header->subpage = page_index->subpage; 6851 } 6852 scsi_ulto2b(page_index->page_len, header->datalen); 6853 6854 /* 6855 * Call the handler, if it exists, to update the 6856 * page to the latest values. 6857 */ 6858 if (page_index->sense_handler != NULL) 6859 page_index->sense_handler(ctsio, page_index, pc); 6860 6861 memcpy(header + 1, page_index->page_data, page_index->page_len); 6862 6863 ctl_set_success(ctsio); 6864 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6865 ctsio->be_move_done = ctl_config_move_done; 6866 ctl_datamove((union ctl_io *)ctsio); 6867 return (CTL_RETVAL_COMPLETE); 6868 } 6869 6870 int 6871 ctl_read_capacity(struct ctl_scsiio *ctsio) 6872 { 6873 struct ctl_lun *lun = CTL_LUN(ctsio); 6874 struct scsi_read_capacity *cdb; 6875 struct scsi_read_capacity_data *data; 6876 uint32_t lba; 6877 6878 CTL_DEBUG_PRINT(("ctl_read_capacity\n")); 6879 6880 cdb = (struct scsi_read_capacity *)ctsio->cdb; 6881 6882 lba = scsi_4btoul(cdb->addr); 6883 if (((cdb->pmi & SRC_PMI) == 0) 6884 && (lba != 0)) { 6885 ctl_set_invalid_field(/*ctsio*/ ctsio, 6886 /*sks_valid*/ 1, 6887 /*command*/ 1, 6888 /*field*/ 2, 6889 /*bit_valid*/ 0, 6890 /*bit*/ 0); 6891 ctl_done((union ctl_io *)ctsio); 6892 return (CTL_RETVAL_COMPLETE); 6893 } 6894 6895 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 6896 data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr; 6897 ctsio->kern_data_len = sizeof(*data); 6898 ctsio->kern_total_len = sizeof(*data); 6899 ctsio->kern_rel_offset = 0; 6900 ctsio->kern_sg_entries = 0; 6901 6902 /* 6903 * If the maximum LBA is greater than 0xfffffffe, the user must 6904 * issue a SERVICE ACTION IN (16) command, with the read capacity 6905 * serivce action set. 6906 */ 6907 if (lun->be_lun->maxlba > 0xfffffffe) 6908 scsi_ulto4b(0xffffffff, data->addr); 6909 else 6910 scsi_ulto4b(lun->be_lun->maxlba, data->addr); 6911 6912 /* 6913 * XXX KDM this may not be 512 bytes... 6914 */ 6915 scsi_ulto4b(lun->be_lun->blocksize, data->length); 6916 6917 ctl_set_success(ctsio); 6918 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6919 ctsio->be_move_done = ctl_config_move_done; 6920 ctl_datamove((union ctl_io *)ctsio); 6921 return (CTL_RETVAL_COMPLETE); 6922 } 6923 6924 int 6925 ctl_read_capacity_16(struct ctl_scsiio *ctsio) 6926 { 6927 struct ctl_lun *lun = CTL_LUN(ctsio); 6928 struct scsi_read_capacity_16 *cdb; 6929 struct scsi_read_capacity_data_long *data; 6930 uint64_t lba; 6931 uint32_t alloc_len; 6932 6933 CTL_DEBUG_PRINT(("ctl_read_capacity_16\n")); 6934 6935 cdb = (struct scsi_read_capacity_16 *)ctsio->cdb; 6936 6937 alloc_len = scsi_4btoul(cdb->alloc_len); 6938 lba = scsi_8btou64(cdb->addr); 6939 6940 if ((cdb->reladr & SRC16_PMI) 6941 && (lba != 0)) { 6942 ctl_set_invalid_field(/*ctsio*/ ctsio, 6943 /*sks_valid*/ 1, 6944 /*command*/ 1, 6945 /*field*/ 2, 6946 /*bit_valid*/ 0, 6947 /*bit*/ 0); 6948 ctl_done((union ctl_io *)ctsio); 6949 return (CTL_RETVAL_COMPLETE); 6950 } 6951 6952 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 6953 data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr; 6954 ctsio->kern_rel_offset = 0; 6955 ctsio->kern_sg_entries = 0; 6956 ctsio->kern_data_len = min(sizeof(*data), alloc_len); 6957 ctsio->kern_total_len = ctsio->kern_data_len; 6958 6959 scsi_u64to8b(lun->be_lun->maxlba, data->addr); 6960 /* XXX KDM this may not be 512 bytes... */ 6961 scsi_ulto4b(lun->be_lun->blocksize, data->length); 6962 data->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE; 6963 scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, data->lalba_lbp); 6964 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) 6965 data->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ; 6966 6967 ctl_set_success(ctsio); 6968 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6969 ctsio->be_move_done = ctl_config_move_done; 6970 ctl_datamove((union ctl_io *)ctsio); 6971 return (CTL_RETVAL_COMPLETE); 6972 } 6973 6974 int 6975 ctl_get_lba_status(struct ctl_scsiio *ctsio) 6976 { 6977 struct ctl_lun *lun = CTL_LUN(ctsio); 6978 struct scsi_get_lba_status *cdb; 6979 struct scsi_get_lba_status_data *data; 6980 struct ctl_lba_len_flags *lbalen; 6981 uint64_t lba; 6982 uint32_t alloc_len, total_len; 6983 int retval; 6984 6985 CTL_DEBUG_PRINT(("ctl_get_lba_status\n")); 6986 6987 cdb = (struct scsi_get_lba_status *)ctsio->cdb; 6988 lba = scsi_8btou64(cdb->addr); 6989 alloc_len = scsi_4btoul(cdb->alloc_len); 6990 6991 if (lba > lun->be_lun->maxlba) { 6992 ctl_set_lba_out_of_range(ctsio, lba); 6993 ctl_done((union ctl_io *)ctsio); 6994 return (CTL_RETVAL_COMPLETE); 6995 } 6996 6997 total_len = sizeof(*data) + sizeof(data->descr[0]); 6998 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6999 data = (struct scsi_get_lba_status_data *)ctsio->kern_data_ptr; 7000 ctsio->kern_rel_offset = 0; 7001 ctsio->kern_sg_entries = 0; 7002 ctsio->kern_data_len = min(total_len, alloc_len); 7003 ctsio->kern_total_len = ctsio->kern_data_len; 7004 7005 /* Fill dummy data in case backend can't tell anything. */ 7006 scsi_ulto4b(4 + sizeof(data->descr[0]), data->length); 7007 scsi_u64to8b(lba, data->descr[0].addr); 7008 scsi_ulto4b(MIN(UINT32_MAX, lun->be_lun->maxlba + 1 - lba), 7009 data->descr[0].length); 7010 data->descr[0].status = 0; /* Mapped or unknown. */ 7011 7012 ctl_set_success(ctsio); 7013 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7014 ctsio->be_move_done = ctl_config_move_done; 7015 7016 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 7017 lbalen->lba = lba; 7018 lbalen->len = total_len; 7019 lbalen->flags = 0; 7020 retval = lun->backend->config_read((union ctl_io *)ctsio); 7021 return (CTL_RETVAL_COMPLETE); 7022 } 7023 7024 int 7025 ctl_read_defect(struct ctl_scsiio *ctsio) 7026 { 7027 struct scsi_read_defect_data_10 *ccb10; 7028 struct scsi_read_defect_data_12 *ccb12; 7029 struct scsi_read_defect_data_hdr_10 *data10; 7030 struct scsi_read_defect_data_hdr_12 *data12; 7031 uint32_t alloc_len, data_len; 7032 uint8_t format; 7033 7034 CTL_DEBUG_PRINT(("ctl_read_defect\n")); 7035 7036 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 7037 ccb10 = (struct scsi_read_defect_data_10 *)&ctsio->cdb; 7038 format = ccb10->format; 7039 alloc_len = scsi_2btoul(ccb10->alloc_length); 7040 data_len = sizeof(*data10); 7041 } else { 7042 ccb12 = (struct scsi_read_defect_data_12 *)&ctsio->cdb; 7043 format = ccb12->format; 7044 alloc_len = scsi_4btoul(ccb12->alloc_length); 7045 data_len = sizeof(*data12); 7046 } 7047 if (alloc_len == 0) { 7048 ctl_set_success(ctsio); 7049 ctl_done((union ctl_io *)ctsio); 7050 return (CTL_RETVAL_COMPLETE); 7051 } 7052 7053 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 7054 ctsio->kern_rel_offset = 0; 7055 ctsio->kern_sg_entries = 0; 7056 ctsio->kern_data_len = min(data_len, alloc_len); 7057 ctsio->kern_total_len = ctsio->kern_data_len; 7058 7059 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 7060 data10 = (struct scsi_read_defect_data_hdr_10 *) 7061 ctsio->kern_data_ptr; 7062 data10->format = format; 7063 scsi_ulto2b(0, data10->length); 7064 } else { 7065 data12 = (struct scsi_read_defect_data_hdr_12 *) 7066 ctsio->kern_data_ptr; 7067 data12->format = format; 7068 scsi_ulto2b(0, data12->generation); 7069 scsi_ulto4b(0, data12->length); 7070 } 7071 7072 ctl_set_success(ctsio); 7073 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7074 ctsio->be_move_done = ctl_config_move_done; 7075 ctl_datamove((union ctl_io *)ctsio); 7076 return (CTL_RETVAL_COMPLETE); 7077 } 7078 7079 int 7080 ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio) 7081 { 7082 struct ctl_softc *softc = CTL_SOFTC(ctsio); 7083 struct ctl_lun *lun = CTL_LUN(ctsio); 7084 struct scsi_maintenance_in *cdb; 7085 int retval; 7086 int alloc_len, ext, total_len = 0, g, pc, pg, ts, os; 7087 int num_ha_groups, num_target_ports, shared_group; 7088 struct ctl_port *port; 7089 struct scsi_target_group_data *rtg_ptr; 7090 struct scsi_target_group_data_extended *rtg_ext_ptr; 7091 struct scsi_target_port_group_descriptor *tpg_desc; 7092 7093 CTL_DEBUG_PRINT(("ctl_report_tagret_port_groups\n")); 7094 7095 cdb = (struct scsi_maintenance_in *)ctsio->cdb; 7096 retval = CTL_RETVAL_COMPLETE; 7097 7098 switch (cdb->byte2 & STG_PDF_MASK) { 7099 case STG_PDF_LENGTH: 7100 ext = 0; 7101 break; 7102 case STG_PDF_EXTENDED: 7103 ext = 1; 7104 break; 7105 default: 7106 ctl_set_invalid_field(/*ctsio*/ ctsio, 7107 /*sks_valid*/ 1, 7108 /*command*/ 1, 7109 /*field*/ 2, 7110 /*bit_valid*/ 1, 7111 /*bit*/ 5); 7112 ctl_done((union ctl_io *)ctsio); 7113 return(retval); 7114 } 7115 7116 num_target_ports = 0; 7117 shared_group = (softc->is_single != 0); 7118 mtx_lock(&softc->ctl_lock); 7119 STAILQ_FOREACH(port, &softc->port_list, links) { 7120 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7121 continue; 7122 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 7123 continue; 7124 num_target_ports++; 7125 if (port->status & CTL_PORT_STATUS_HA_SHARED) 7126 shared_group = 1; 7127 } 7128 mtx_unlock(&softc->ctl_lock); 7129 num_ha_groups = (softc->is_single) ? 0 : NUM_HA_SHELVES; 7130 7131 if (ext) 7132 total_len = sizeof(struct scsi_target_group_data_extended); 7133 else 7134 total_len = sizeof(struct scsi_target_group_data); 7135 total_len += sizeof(struct scsi_target_port_group_descriptor) * 7136 (shared_group + num_ha_groups) + 7137 sizeof(struct scsi_target_port_descriptor) * num_target_ports; 7138 7139 alloc_len = scsi_4btoul(cdb->length); 7140 7141 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7142 ctsio->kern_sg_entries = 0; 7143 ctsio->kern_rel_offset = 0; 7144 ctsio->kern_data_len = min(total_len, alloc_len); 7145 ctsio->kern_total_len = ctsio->kern_data_len; 7146 7147 if (ext) { 7148 rtg_ext_ptr = (struct scsi_target_group_data_extended *) 7149 ctsio->kern_data_ptr; 7150 scsi_ulto4b(total_len - 4, rtg_ext_ptr->length); 7151 rtg_ext_ptr->format_type = 0x10; 7152 rtg_ext_ptr->implicit_transition_time = 0; 7153 tpg_desc = &rtg_ext_ptr->groups[0]; 7154 } else { 7155 rtg_ptr = (struct scsi_target_group_data *) 7156 ctsio->kern_data_ptr; 7157 scsi_ulto4b(total_len - 4, rtg_ptr->length); 7158 tpg_desc = &rtg_ptr->groups[0]; 7159 } 7160 7161 mtx_lock(&softc->ctl_lock); 7162 pg = softc->port_min / softc->port_cnt; 7163 if (lun->flags & (CTL_LUN_PRIMARY_SC | CTL_LUN_PEER_SC_PRIMARY)) { 7164 /* Some shelf is known to be primary. */ 7165 if (softc->ha_link == CTL_HA_LINK_OFFLINE) 7166 os = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE; 7167 else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) 7168 os = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; 7169 else if (softc->ha_mode == CTL_HA_MODE_ACT_STBY) 7170 os = TPG_ASYMMETRIC_ACCESS_STANDBY; 7171 else 7172 os = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 7173 if (lun->flags & CTL_LUN_PRIMARY_SC) { 7174 ts = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7175 } else { 7176 ts = os; 7177 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7178 } 7179 } else { 7180 /* No known primary shelf. */ 7181 if (softc->ha_link == CTL_HA_LINK_OFFLINE) { 7182 ts = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE; 7183 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7184 } else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) { 7185 ts = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; 7186 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7187 } else { 7188 ts = os = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; 7189 } 7190 } 7191 if (shared_group) { 7192 tpg_desc->pref_state = ts; 7193 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP | 7194 TPG_U_SUP | TPG_T_SUP; 7195 scsi_ulto2b(1, tpg_desc->target_port_group); 7196 tpg_desc->status = TPG_IMPLICIT; 7197 pc = 0; 7198 STAILQ_FOREACH(port, &softc->port_list, links) { 7199 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7200 continue; 7201 if (!softc->is_single && 7202 (port->status & CTL_PORT_STATUS_HA_SHARED) == 0) 7203 continue; 7204 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 7205 continue; 7206 scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc]. 7207 relative_target_port_identifier); 7208 pc++; 7209 } 7210 tpg_desc->target_port_count = pc; 7211 tpg_desc = (struct scsi_target_port_group_descriptor *) 7212 &tpg_desc->descriptors[pc]; 7213 } 7214 for (g = 0; g < num_ha_groups; g++) { 7215 tpg_desc->pref_state = (g == pg) ? ts : os; 7216 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP | 7217 TPG_U_SUP | TPG_T_SUP; 7218 scsi_ulto2b(2 + g, tpg_desc->target_port_group); 7219 tpg_desc->status = TPG_IMPLICIT; 7220 pc = 0; 7221 STAILQ_FOREACH(port, &softc->port_list, links) { 7222 if (port->targ_port < g * softc->port_cnt || 7223 port->targ_port >= (g + 1) * softc->port_cnt) 7224 continue; 7225 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7226 continue; 7227 if (port->status & CTL_PORT_STATUS_HA_SHARED) 7228 continue; 7229 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 7230 continue; 7231 scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc]. 7232 relative_target_port_identifier); 7233 pc++; 7234 } 7235 tpg_desc->target_port_count = pc; 7236 tpg_desc = (struct scsi_target_port_group_descriptor *) 7237 &tpg_desc->descriptors[pc]; 7238 } 7239 mtx_unlock(&softc->ctl_lock); 7240 7241 ctl_set_success(ctsio); 7242 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7243 ctsio->be_move_done = ctl_config_move_done; 7244 ctl_datamove((union ctl_io *)ctsio); 7245 return(retval); 7246 } 7247 7248 int 7249 ctl_report_supported_opcodes(struct ctl_scsiio *ctsio) 7250 { 7251 struct ctl_lun *lun = CTL_LUN(ctsio); 7252 struct scsi_report_supported_opcodes *cdb; 7253 const struct ctl_cmd_entry *entry, *sentry; 7254 struct scsi_report_supported_opcodes_all *all; 7255 struct scsi_report_supported_opcodes_descr *descr; 7256 struct scsi_report_supported_opcodes_one *one; 7257 int retval; 7258 int alloc_len, total_len; 7259 int opcode, service_action, i, j, num; 7260 7261 CTL_DEBUG_PRINT(("ctl_report_supported_opcodes\n")); 7262 7263 cdb = (struct scsi_report_supported_opcodes *)ctsio->cdb; 7264 retval = CTL_RETVAL_COMPLETE; 7265 7266 opcode = cdb->requested_opcode; 7267 service_action = scsi_2btoul(cdb->requested_service_action); 7268 switch (cdb->options & RSO_OPTIONS_MASK) { 7269 case RSO_OPTIONS_ALL: 7270 num = 0; 7271 for (i = 0; i < 256; i++) { 7272 entry = &ctl_cmd_table[i]; 7273 if (entry->flags & CTL_CMD_FLAG_SA5) { 7274 for (j = 0; j < 32; j++) { 7275 sentry = &((const struct ctl_cmd_entry *) 7276 entry->execute)[j]; 7277 if (ctl_cmd_applicable( 7278 lun->be_lun->lun_type, sentry)) 7279 num++; 7280 } 7281 } else { 7282 if (ctl_cmd_applicable(lun->be_lun->lun_type, 7283 entry)) 7284 num++; 7285 } 7286 } 7287 total_len = sizeof(struct scsi_report_supported_opcodes_all) + 7288 num * sizeof(struct scsi_report_supported_opcodes_descr); 7289 break; 7290 case RSO_OPTIONS_OC: 7291 if (ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) { 7292 ctl_set_invalid_field(/*ctsio*/ ctsio, 7293 /*sks_valid*/ 1, 7294 /*command*/ 1, 7295 /*field*/ 2, 7296 /*bit_valid*/ 1, 7297 /*bit*/ 2); 7298 ctl_done((union ctl_io *)ctsio); 7299 return (CTL_RETVAL_COMPLETE); 7300 } 7301 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7302 break; 7303 case RSO_OPTIONS_OC_SA: 7304 if ((ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) == 0 || 7305 service_action >= 32) { 7306 ctl_set_invalid_field(/*ctsio*/ ctsio, 7307 /*sks_valid*/ 1, 7308 /*command*/ 1, 7309 /*field*/ 2, 7310 /*bit_valid*/ 1, 7311 /*bit*/ 2); 7312 ctl_done((union ctl_io *)ctsio); 7313 return (CTL_RETVAL_COMPLETE); 7314 } 7315 /* FALLTHROUGH */ 7316 case RSO_OPTIONS_OC_ASA: 7317 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7318 break; 7319 default: 7320 ctl_set_invalid_field(/*ctsio*/ ctsio, 7321 /*sks_valid*/ 1, 7322 /*command*/ 1, 7323 /*field*/ 2, 7324 /*bit_valid*/ 1, 7325 /*bit*/ 2); 7326 ctl_done((union ctl_io *)ctsio); 7327 return (CTL_RETVAL_COMPLETE); 7328 } 7329 7330 alloc_len = scsi_4btoul(cdb->length); 7331 7332 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7333 ctsio->kern_sg_entries = 0; 7334 ctsio->kern_rel_offset = 0; 7335 ctsio->kern_data_len = min(total_len, alloc_len); 7336 ctsio->kern_total_len = ctsio->kern_data_len; 7337 7338 switch (cdb->options & RSO_OPTIONS_MASK) { 7339 case RSO_OPTIONS_ALL: 7340 all = (struct scsi_report_supported_opcodes_all *) 7341 ctsio->kern_data_ptr; 7342 num = 0; 7343 for (i = 0; i < 256; i++) { 7344 entry = &ctl_cmd_table[i]; 7345 if (entry->flags & CTL_CMD_FLAG_SA5) { 7346 for (j = 0; j < 32; j++) { 7347 sentry = &((const struct ctl_cmd_entry *) 7348 entry->execute)[j]; 7349 if (!ctl_cmd_applicable( 7350 lun->be_lun->lun_type, sentry)) 7351 continue; 7352 descr = &all->descr[num++]; 7353 descr->opcode = i; 7354 scsi_ulto2b(j, descr->service_action); 7355 descr->flags = RSO_SERVACTV; 7356 scsi_ulto2b(sentry->length, 7357 descr->cdb_length); 7358 } 7359 } else { 7360 if (!ctl_cmd_applicable(lun->be_lun->lun_type, 7361 entry)) 7362 continue; 7363 descr = &all->descr[num++]; 7364 descr->opcode = i; 7365 scsi_ulto2b(0, descr->service_action); 7366 descr->flags = 0; 7367 scsi_ulto2b(entry->length, descr->cdb_length); 7368 } 7369 } 7370 scsi_ulto4b( 7371 num * sizeof(struct scsi_report_supported_opcodes_descr), 7372 all->length); 7373 break; 7374 case RSO_OPTIONS_OC: 7375 one = (struct scsi_report_supported_opcodes_one *) 7376 ctsio->kern_data_ptr; 7377 entry = &ctl_cmd_table[opcode]; 7378 goto fill_one; 7379 case RSO_OPTIONS_OC_SA: 7380 one = (struct scsi_report_supported_opcodes_one *) 7381 ctsio->kern_data_ptr; 7382 entry = &ctl_cmd_table[opcode]; 7383 entry = &((const struct ctl_cmd_entry *) 7384 entry->execute)[service_action]; 7385 fill_one: 7386 if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 7387 one->support = 3; 7388 scsi_ulto2b(entry->length, one->cdb_length); 7389 one->cdb_usage[0] = opcode; 7390 memcpy(&one->cdb_usage[1], entry->usage, 7391 entry->length - 1); 7392 } else 7393 one->support = 1; 7394 break; 7395 case RSO_OPTIONS_OC_ASA: 7396 one = (struct scsi_report_supported_opcodes_one *) 7397 ctsio->kern_data_ptr; 7398 entry = &ctl_cmd_table[opcode]; 7399 if (entry->flags & CTL_CMD_FLAG_SA5) { 7400 entry = &((const struct ctl_cmd_entry *) 7401 entry->execute)[service_action]; 7402 } else if (service_action != 0) { 7403 one->support = 1; 7404 break; 7405 } 7406 goto fill_one; 7407 } 7408 7409 ctl_set_success(ctsio); 7410 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7411 ctsio->be_move_done = ctl_config_move_done; 7412 ctl_datamove((union ctl_io *)ctsio); 7413 return(retval); 7414 } 7415 7416 int 7417 ctl_report_supported_tmf(struct ctl_scsiio *ctsio) 7418 { 7419 struct scsi_report_supported_tmf *cdb; 7420 struct scsi_report_supported_tmf_ext_data *data; 7421 int retval; 7422 int alloc_len, total_len; 7423 7424 CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n")); 7425 7426 cdb = (struct scsi_report_supported_tmf *)ctsio->cdb; 7427 7428 retval = CTL_RETVAL_COMPLETE; 7429 7430 if (cdb->options & RST_REPD) 7431 total_len = sizeof(struct scsi_report_supported_tmf_ext_data); 7432 else 7433 total_len = sizeof(struct scsi_report_supported_tmf_data); 7434 alloc_len = scsi_4btoul(cdb->length); 7435 7436 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7437 ctsio->kern_sg_entries = 0; 7438 ctsio->kern_rel_offset = 0; 7439 ctsio->kern_data_len = min(total_len, alloc_len); 7440 ctsio->kern_total_len = ctsio->kern_data_len; 7441 7442 data = (struct scsi_report_supported_tmf_ext_data *)ctsio->kern_data_ptr; 7443 data->byte1 |= RST_ATS | RST_ATSS | RST_CTSS | RST_LURS | RST_QTS | 7444 RST_TRS; 7445 data->byte2 |= RST_QAES | RST_QTSS | RST_ITNRS; 7446 data->length = total_len - 4; 7447 7448 ctl_set_success(ctsio); 7449 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7450 ctsio->be_move_done = ctl_config_move_done; 7451 ctl_datamove((union ctl_io *)ctsio); 7452 return (retval); 7453 } 7454 7455 int 7456 ctl_report_timestamp(struct ctl_scsiio *ctsio) 7457 { 7458 struct scsi_report_timestamp *cdb; 7459 struct scsi_report_timestamp_data *data; 7460 struct timeval tv; 7461 int64_t timestamp; 7462 int retval; 7463 int alloc_len, total_len; 7464 7465 CTL_DEBUG_PRINT(("ctl_report_timestamp\n")); 7466 7467 cdb = (struct scsi_report_timestamp *)ctsio->cdb; 7468 7469 retval = CTL_RETVAL_COMPLETE; 7470 7471 total_len = sizeof(struct scsi_report_timestamp_data); 7472 alloc_len = scsi_4btoul(cdb->length); 7473 7474 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7475 ctsio->kern_sg_entries = 0; 7476 ctsio->kern_rel_offset = 0; 7477 ctsio->kern_data_len = min(total_len, alloc_len); 7478 ctsio->kern_total_len = ctsio->kern_data_len; 7479 7480 data = (struct scsi_report_timestamp_data *)ctsio->kern_data_ptr; 7481 scsi_ulto2b(sizeof(*data) - 2, data->length); 7482 data->origin = RTS_ORIG_OUTSIDE; 7483 getmicrotime(&tv); 7484 timestamp = (int64_t)tv.tv_sec * 1000 + tv.tv_usec / 1000; 7485 scsi_ulto4b(timestamp >> 16, data->timestamp); 7486 scsi_ulto2b(timestamp & 0xffff, &data->timestamp[4]); 7487 7488 ctl_set_success(ctsio); 7489 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7490 ctsio->be_move_done = ctl_config_move_done; 7491 ctl_datamove((union ctl_io *)ctsio); 7492 return (retval); 7493 } 7494 7495 int 7496 ctl_persistent_reserve_in(struct ctl_scsiio *ctsio) 7497 { 7498 struct ctl_softc *softc = CTL_SOFTC(ctsio); 7499 struct ctl_lun *lun = CTL_LUN(ctsio); 7500 struct scsi_per_res_in *cdb; 7501 int alloc_len, total_len = 0; 7502 /* struct scsi_per_res_in_rsrv in_data; */ 7503 uint64_t key; 7504 7505 CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n")); 7506 7507 cdb = (struct scsi_per_res_in *)ctsio->cdb; 7508 7509 alloc_len = scsi_2btoul(cdb->length); 7510 7511 retry: 7512 mtx_lock(&lun->lun_lock); 7513 switch (cdb->action) { 7514 case SPRI_RK: /* read keys */ 7515 total_len = sizeof(struct scsi_per_res_in_keys) + 7516 lun->pr_key_count * 7517 sizeof(struct scsi_per_res_key); 7518 break; 7519 case SPRI_RR: /* read reservation */ 7520 if (lun->flags & CTL_LUN_PR_RESERVED) 7521 total_len = sizeof(struct scsi_per_res_in_rsrv); 7522 else 7523 total_len = sizeof(struct scsi_per_res_in_header); 7524 break; 7525 case SPRI_RC: /* report capabilities */ 7526 total_len = sizeof(struct scsi_per_res_cap); 7527 break; 7528 case SPRI_RS: /* read full status */ 7529 total_len = sizeof(struct scsi_per_res_in_header) + 7530 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7531 lun->pr_key_count; 7532 break; 7533 default: 7534 panic("%s: Invalid PR type %#x", __func__, cdb->action); 7535 } 7536 mtx_unlock(&lun->lun_lock); 7537 7538 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7539 ctsio->kern_rel_offset = 0; 7540 ctsio->kern_sg_entries = 0; 7541 ctsio->kern_data_len = min(total_len, alloc_len); 7542 ctsio->kern_total_len = ctsio->kern_data_len; 7543 7544 mtx_lock(&lun->lun_lock); 7545 switch (cdb->action) { 7546 case SPRI_RK: { // read keys 7547 struct scsi_per_res_in_keys *res_keys; 7548 int i, key_count; 7549 7550 res_keys = (struct scsi_per_res_in_keys*)ctsio->kern_data_ptr; 7551 7552 /* 7553 * We had to drop the lock to allocate our buffer, which 7554 * leaves time for someone to come in with another 7555 * persistent reservation. (That is unlikely, though, 7556 * since this should be the only persistent reservation 7557 * command active right now.) 7558 */ 7559 if (total_len != (sizeof(struct scsi_per_res_in_keys) + 7560 (lun->pr_key_count * 7561 sizeof(struct scsi_per_res_key)))){ 7562 mtx_unlock(&lun->lun_lock); 7563 free(ctsio->kern_data_ptr, M_CTL); 7564 printf("%s: reservation length changed, retrying\n", 7565 __func__); 7566 goto retry; 7567 } 7568 7569 scsi_ulto4b(lun->pr_generation, res_keys->header.generation); 7570 7571 scsi_ulto4b(sizeof(struct scsi_per_res_key) * 7572 lun->pr_key_count, res_keys->header.length); 7573 7574 for (i = 0, key_count = 0; i < CTL_MAX_INITIATORS; i++) { 7575 if ((key = ctl_get_prkey(lun, i)) == 0) 7576 continue; 7577 7578 /* 7579 * We used lun->pr_key_count to calculate the 7580 * size to allocate. If it turns out the number of 7581 * initiators with the registered flag set is 7582 * larger than that (i.e. they haven't been kept in 7583 * sync), we've got a problem. 7584 */ 7585 if (key_count >= lun->pr_key_count) { 7586 key_count++; 7587 continue; 7588 } 7589 scsi_u64to8b(key, res_keys->keys[key_count].key); 7590 key_count++; 7591 } 7592 break; 7593 } 7594 case SPRI_RR: { // read reservation 7595 struct scsi_per_res_in_rsrv *res; 7596 int tmp_len, header_only; 7597 7598 res = (struct scsi_per_res_in_rsrv *)ctsio->kern_data_ptr; 7599 7600 scsi_ulto4b(lun->pr_generation, res->header.generation); 7601 7602 if (lun->flags & CTL_LUN_PR_RESERVED) 7603 { 7604 tmp_len = sizeof(struct scsi_per_res_in_rsrv); 7605 scsi_ulto4b(sizeof(struct scsi_per_res_in_rsrv_data), 7606 res->header.length); 7607 header_only = 0; 7608 } else { 7609 tmp_len = sizeof(struct scsi_per_res_in_header); 7610 scsi_ulto4b(0, res->header.length); 7611 header_only = 1; 7612 } 7613 7614 /* 7615 * We had to drop the lock to allocate our buffer, which 7616 * leaves time for someone to come in with another 7617 * persistent reservation. (That is unlikely, though, 7618 * since this should be the only persistent reservation 7619 * command active right now.) 7620 */ 7621 if (tmp_len != total_len) { 7622 mtx_unlock(&lun->lun_lock); 7623 free(ctsio->kern_data_ptr, M_CTL); 7624 printf("%s: reservation status changed, retrying\n", 7625 __func__); 7626 goto retry; 7627 } 7628 7629 /* 7630 * No reservation held, so we're done. 7631 */ 7632 if (header_only != 0) 7633 break; 7634 7635 /* 7636 * If the registration is an All Registrants type, the key 7637 * is 0, since it doesn't really matter. 7638 */ 7639 if (lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 7640 scsi_u64to8b(ctl_get_prkey(lun, lun->pr_res_idx), 7641 res->data.reservation); 7642 } 7643 res->data.scopetype = lun->pr_res_type; 7644 break; 7645 } 7646 case SPRI_RC: //report capabilities 7647 { 7648 struct scsi_per_res_cap *res_cap; 7649 uint16_t type_mask; 7650 7651 res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr; 7652 scsi_ulto2b(sizeof(*res_cap), res_cap->length); 7653 res_cap->flags1 = SPRI_CRH; 7654 res_cap->flags2 = SPRI_TMV | SPRI_ALLOW_5; 7655 type_mask = SPRI_TM_WR_EX_AR | 7656 SPRI_TM_EX_AC_RO | 7657 SPRI_TM_WR_EX_RO | 7658 SPRI_TM_EX_AC | 7659 SPRI_TM_WR_EX | 7660 SPRI_TM_EX_AC_AR; 7661 scsi_ulto2b(type_mask, res_cap->type_mask); 7662 break; 7663 } 7664 case SPRI_RS: { // read full status 7665 struct scsi_per_res_in_full *res_status; 7666 struct scsi_per_res_in_full_desc *res_desc; 7667 struct ctl_port *port; 7668 int i, len; 7669 7670 res_status = (struct scsi_per_res_in_full*)ctsio->kern_data_ptr; 7671 7672 /* 7673 * We had to drop the lock to allocate our buffer, which 7674 * leaves time for someone to come in with another 7675 * persistent reservation. (That is unlikely, though, 7676 * since this should be the only persistent reservation 7677 * command active right now.) 7678 */ 7679 if (total_len < (sizeof(struct scsi_per_res_in_header) + 7680 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7681 lun->pr_key_count)){ 7682 mtx_unlock(&lun->lun_lock); 7683 free(ctsio->kern_data_ptr, M_CTL); 7684 printf("%s: reservation length changed, retrying\n", 7685 __func__); 7686 goto retry; 7687 } 7688 7689 scsi_ulto4b(lun->pr_generation, res_status->header.generation); 7690 7691 res_desc = &res_status->desc[0]; 7692 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 7693 if ((key = ctl_get_prkey(lun, i)) == 0) 7694 continue; 7695 7696 scsi_u64to8b(key, res_desc->res_key.key); 7697 if ((lun->flags & CTL_LUN_PR_RESERVED) && 7698 (lun->pr_res_idx == i || 7699 lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS)) { 7700 res_desc->flags = SPRI_FULL_R_HOLDER; 7701 res_desc->scopetype = lun->pr_res_type; 7702 } 7703 scsi_ulto2b(i / CTL_MAX_INIT_PER_PORT, 7704 res_desc->rel_trgt_port_id); 7705 len = 0; 7706 port = softc->ctl_ports[i / CTL_MAX_INIT_PER_PORT]; 7707 if (port != NULL) 7708 len = ctl_create_iid(port, 7709 i % CTL_MAX_INIT_PER_PORT, 7710 res_desc->transport_id); 7711 scsi_ulto4b(len, res_desc->additional_length); 7712 res_desc = (struct scsi_per_res_in_full_desc *) 7713 &res_desc->transport_id[len]; 7714 } 7715 scsi_ulto4b((uint8_t *)res_desc - (uint8_t *)&res_status->desc[0], 7716 res_status->header.length); 7717 break; 7718 } 7719 default: 7720 panic("%s: Invalid PR type %#x", __func__, cdb->action); 7721 } 7722 mtx_unlock(&lun->lun_lock); 7723 7724 ctl_set_success(ctsio); 7725 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7726 ctsio->be_move_done = ctl_config_move_done; 7727 ctl_datamove((union ctl_io *)ctsio); 7728 return (CTL_RETVAL_COMPLETE); 7729 } 7730 7731 /* 7732 * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if 7733 * it should return. 7734 */ 7735 static int 7736 ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key, 7737 uint64_t sa_res_key, uint8_t type, uint32_t residx, 7738 struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb, 7739 struct scsi_per_res_out_parms* param) 7740 { 7741 union ctl_ha_msg persis_io; 7742 int i; 7743 7744 mtx_lock(&lun->lun_lock); 7745 if (sa_res_key == 0) { 7746 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 7747 /* validate scope and type */ 7748 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7749 SPR_LU_SCOPE) { 7750 mtx_unlock(&lun->lun_lock); 7751 ctl_set_invalid_field(/*ctsio*/ ctsio, 7752 /*sks_valid*/ 1, 7753 /*command*/ 1, 7754 /*field*/ 2, 7755 /*bit_valid*/ 1, 7756 /*bit*/ 4); 7757 ctl_done((union ctl_io *)ctsio); 7758 return (1); 7759 } 7760 7761 if (type>8 || type==2 || type==4 || type==0) { 7762 mtx_unlock(&lun->lun_lock); 7763 ctl_set_invalid_field(/*ctsio*/ ctsio, 7764 /*sks_valid*/ 1, 7765 /*command*/ 1, 7766 /*field*/ 2, 7767 /*bit_valid*/ 1, 7768 /*bit*/ 0); 7769 ctl_done((union ctl_io *)ctsio); 7770 return (1); 7771 } 7772 7773 /* 7774 * Unregister everybody else and build UA for 7775 * them 7776 */ 7777 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 7778 if (i == residx || ctl_get_prkey(lun, i) == 0) 7779 continue; 7780 7781 ctl_clr_prkey(lun, i); 7782 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7783 } 7784 lun->pr_key_count = 1; 7785 lun->pr_res_type = type; 7786 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 7787 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 7788 lun->pr_res_idx = residx; 7789 lun->pr_generation++; 7790 mtx_unlock(&lun->lun_lock); 7791 7792 /* send msg to other side */ 7793 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7794 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7795 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7796 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7797 persis_io.pr.pr_info.res_type = type; 7798 memcpy(persis_io.pr.pr_info.sa_res_key, 7799 param->serv_act_res_key, 7800 sizeof(param->serv_act_res_key)); 7801 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 7802 sizeof(persis_io.pr), M_WAITOK); 7803 } else { 7804 /* not all registrants */ 7805 mtx_unlock(&lun->lun_lock); 7806 free(ctsio->kern_data_ptr, M_CTL); 7807 ctl_set_invalid_field(ctsio, 7808 /*sks_valid*/ 1, 7809 /*command*/ 0, 7810 /*field*/ 8, 7811 /*bit_valid*/ 0, 7812 /*bit*/ 0); 7813 ctl_done((union ctl_io *)ctsio); 7814 return (1); 7815 } 7816 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 7817 || !(lun->flags & CTL_LUN_PR_RESERVED)) { 7818 int found = 0; 7819 7820 if (res_key == sa_res_key) { 7821 /* special case */ 7822 /* 7823 * The spec implies this is not good but doesn't 7824 * say what to do. There are two choices either 7825 * generate a res conflict or check condition 7826 * with illegal field in parameter data. Since 7827 * that is what is done when the sa_res_key is 7828 * zero I'll take that approach since this has 7829 * to do with the sa_res_key. 7830 */ 7831 mtx_unlock(&lun->lun_lock); 7832 free(ctsio->kern_data_ptr, M_CTL); 7833 ctl_set_invalid_field(ctsio, 7834 /*sks_valid*/ 1, 7835 /*command*/ 0, 7836 /*field*/ 8, 7837 /*bit_valid*/ 0, 7838 /*bit*/ 0); 7839 ctl_done((union ctl_io *)ctsio); 7840 return (1); 7841 } 7842 7843 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 7844 if (ctl_get_prkey(lun, i) != sa_res_key) 7845 continue; 7846 7847 found = 1; 7848 ctl_clr_prkey(lun, i); 7849 lun->pr_key_count--; 7850 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7851 } 7852 if (!found) { 7853 mtx_unlock(&lun->lun_lock); 7854 free(ctsio->kern_data_ptr, M_CTL); 7855 ctl_set_reservation_conflict(ctsio); 7856 ctl_done((union ctl_io *)ctsio); 7857 return (CTL_RETVAL_COMPLETE); 7858 } 7859 lun->pr_generation++; 7860 mtx_unlock(&lun->lun_lock); 7861 7862 /* send msg to other side */ 7863 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7864 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7865 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7866 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7867 persis_io.pr.pr_info.res_type = type; 7868 memcpy(persis_io.pr.pr_info.sa_res_key, 7869 param->serv_act_res_key, 7870 sizeof(param->serv_act_res_key)); 7871 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 7872 sizeof(persis_io.pr), M_WAITOK); 7873 } else { 7874 /* Reserved but not all registrants */ 7875 /* sa_res_key is res holder */ 7876 if (sa_res_key == ctl_get_prkey(lun, lun->pr_res_idx)) { 7877 /* validate scope and type */ 7878 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7879 SPR_LU_SCOPE) { 7880 mtx_unlock(&lun->lun_lock); 7881 ctl_set_invalid_field(/*ctsio*/ ctsio, 7882 /*sks_valid*/ 1, 7883 /*command*/ 1, 7884 /*field*/ 2, 7885 /*bit_valid*/ 1, 7886 /*bit*/ 4); 7887 ctl_done((union ctl_io *)ctsio); 7888 return (1); 7889 } 7890 7891 if (type>8 || type==2 || type==4 || type==0) { 7892 mtx_unlock(&lun->lun_lock); 7893 ctl_set_invalid_field(/*ctsio*/ ctsio, 7894 /*sks_valid*/ 1, 7895 /*command*/ 1, 7896 /*field*/ 2, 7897 /*bit_valid*/ 1, 7898 /*bit*/ 0); 7899 ctl_done((union ctl_io *)ctsio); 7900 return (1); 7901 } 7902 7903 /* 7904 * Do the following: 7905 * if sa_res_key != res_key remove all 7906 * registrants w/sa_res_key and generate UA 7907 * for these registrants(Registrations 7908 * Preempted) if it wasn't an exclusive 7909 * reservation generate UA(Reservations 7910 * Preempted) for all other registered nexuses 7911 * if the type has changed. Establish the new 7912 * reservation and holder. If res_key and 7913 * sa_res_key are the same do the above 7914 * except don't unregister the res holder. 7915 */ 7916 7917 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 7918 if (i == residx || ctl_get_prkey(lun, i) == 0) 7919 continue; 7920 7921 if (sa_res_key == ctl_get_prkey(lun, i)) { 7922 ctl_clr_prkey(lun, i); 7923 lun->pr_key_count--; 7924 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7925 } else if (type != lun->pr_res_type && 7926 (lun->pr_res_type == SPR_TYPE_WR_EX_RO || 7927 lun->pr_res_type == SPR_TYPE_EX_AC_RO)) { 7928 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 7929 } 7930 } 7931 lun->pr_res_type = type; 7932 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 7933 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 7934 lun->pr_res_idx = residx; 7935 else 7936 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 7937 lun->pr_generation++; 7938 mtx_unlock(&lun->lun_lock); 7939 7940 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7941 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7942 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7943 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7944 persis_io.pr.pr_info.res_type = type; 7945 memcpy(persis_io.pr.pr_info.sa_res_key, 7946 param->serv_act_res_key, 7947 sizeof(param->serv_act_res_key)); 7948 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 7949 sizeof(persis_io.pr), M_WAITOK); 7950 } else { 7951 /* 7952 * sa_res_key is not the res holder just 7953 * remove registrants 7954 */ 7955 int found=0; 7956 7957 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 7958 if (sa_res_key != ctl_get_prkey(lun, i)) 7959 continue; 7960 7961 found = 1; 7962 ctl_clr_prkey(lun, i); 7963 lun->pr_key_count--; 7964 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7965 } 7966 7967 if (!found) { 7968 mtx_unlock(&lun->lun_lock); 7969 free(ctsio->kern_data_ptr, M_CTL); 7970 ctl_set_reservation_conflict(ctsio); 7971 ctl_done((union ctl_io *)ctsio); 7972 return (1); 7973 } 7974 lun->pr_generation++; 7975 mtx_unlock(&lun->lun_lock); 7976 7977 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7978 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7979 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7980 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7981 persis_io.pr.pr_info.res_type = type; 7982 memcpy(persis_io.pr.pr_info.sa_res_key, 7983 param->serv_act_res_key, 7984 sizeof(param->serv_act_res_key)); 7985 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 7986 sizeof(persis_io.pr), M_WAITOK); 7987 } 7988 } 7989 return (0); 7990 } 7991 7992 static void 7993 ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg) 7994 { 7995 uint64_t sa_res_key; 7996 int i; 7997 7998 sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key); 7999 8000 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 8001 || lun->pr_res_idx == CTL_PR_NO_RESERVATION 8002 || sa_res_key != ctl_get_prkey(lun, lun->pr_res_idx)) { 8003 if (sa_res_key == 0) { 8004 /* 8005 * Unregister everybody else and build UA for 8006 * them 8007 */ 8008 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 8009 if (i == msg->pr.pr_info.residx || 8010 ctl_get_prkey(lun, i) == 0) 8011 continue; 8012 8013 ctl_clr_prkey(lun, i); 8014 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8015 } 8016 8017 lun->pr_key_count = 1; 8018 lun->pr_res_type = msg->pr.pr_info.res_type; 8019 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 8020 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 8021 lun->pr_res_idx = msg->pr.pr_info.residx; 8022 } else { 8023 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8024 if (sa_res_key == ctl_get_prkey(lun, i)) 8025 continue; 8026 8027 ctl_clr_prkey(lun, i); 8028 lun->pr_key_count--; 8029 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8030 } 8031 } 8032 } else { 8033 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8034 if (i == msg->pr.pr_info.residx || 8035 ctl_get_prkey(lun, i) == 0) 8036 continue; 8037 8038 if (sa_res_key == ctl_get_prkey(lun, i)) { 8039 ctl_clr_prkey(lun, i); 8040 lun->pr_key_count--; 8041 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8042 } else if (msg->pr.pr_info.res_type != lun->pr_res_type 8043 && (lun->pr_res_type == SPR_TYPE_WR_EX_RO || 8044 lun->pr_res_type == SPR_TYPE_EX_AC_RO)) { 8045 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8046 } 8047 } 8048 lun->pr_res_type = msg->pr.pr_info.res_type; 8049 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 8050 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 8051 lun->pr_res_idx = msg->pr.pr_info.residx; 8052 else 8053 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8054 } 8055 lun->pr_generation++; 8056 8057 } 8058 8059 8060 int 8061 ctl_persistent_reserve_out(struct ctl_scsiio *ctsio) 8062 { 8063 struct ctl_softc *softc = CTL_SOFTC(ctsio); 8064 struct ctl_lun *lun = CTL_LUN(ctsio); 8065 int retval; 8066 u_int32_t param_len; 8067 struct scsi_per_res_out *cdb; 8068 struct scsi_per_res_out_parms* param; 8069 uint32_t residx; 8070 uint64_t res_key, sa_res_key, key; 8071 uint8_t type; 8072 union ctl_ha_msg persis_io; 8073 int i; 8074 8075 CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n")); 8076 8077 cdb = (struct scsi_per_res_out *)ctsio->cdb; 8078 retval = CTL_RETVAL_COMPLETE; 8079 8080 /* 8081 * We only support whole-LUN scope. The scope & type are ignored for 8082 * register, register and ignore existing key and clear. 8083 * We sometimes ignore scope and type on preempts too!! 8084 * Verify reservation type here as well. 8085 */ 8086 type = cdb->scope_type & SPR_TYPE_MASK; 8087 if ((cdb->action == SPRO_RESERVE) 8088 || (cdb->action == SPRO_RELEASE)) { 8089 if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { 8090 ctl_set_invalid_field(/*ctsio*/ ctsio, 8091 /*sks_valid*/ 1, 8092 /*command*/ 1, 8093 /*field*/ 2, 8094 /*bit_valid*/ 1, 8095 /*bit*/ 4); 8096 ctl_done((union ctl_io *)ctsio); 8097 return (CTL_RETVAL_COMPLETE); 8098 } 8099 8100 if (type>8 || type==2 || type==4 || type==0) { 8101 ctl_set_invalid_field(/*ctsio*/ ctsio, 8102 /*sks_valid*/ 1, 8103 /*command*/ 1, 8104 /*field*/ 2, 8105 /*bit_valid*/ 1, 8106 /*bit*/ 0); 8107 ctl_done((union ctl_io *)ctsio); 8108 return (CTL_RETVAL_COMPLETE); 8109 } 8110 } 8111 8112 param_len = scsi_4btoul(cdb->length); 8113 8114 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 8115 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 8116 ctsio->kern_data_len = param_len; 8117 ctsio->kern_total_len = param_len; 8118 ctsio->kern_rel_offset = 0; 8119 ctsio->kern_sg_entries = 0; 8120 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 8121 ctsio->be_move_done = ctl_config_move_done; 8122 ctl_datamove((union ctl_io *)ctsio); 8123 8124 return (CTL_RETVAL_COMPLETE); 8125 } 8126 8127 param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr; 8128 8129 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 8130 res_key = scsi_8btou64(param->res_key.key); 8131 sa_res_key = scsi_8btou64(param->serv_act_res_key); 8132 8133 /* 8134 * Validate the reservation key here except for SPRO_REG_IGNO 8135 * This must be done for all other service actions 8136 */ 8137 if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) { 8138 mtx_lock(&lun->lun_lock); 8139 if ((key = ctl_get_prkey(lun, residx)) != 0) { 8140 if (res_key != key) { 8141 /* 8142 * The current key passed in doesn't match 8143 * the one the initiator previously 8144 * registered. 8145 */ 8146 mtx_unlock(&lun->lun_lock); 8147 free(ctsio->kern_data_ptr, M_CTL); 8148 ctl_set_reservation_conflict(ctsio); 8149 ctl_done((union ctl_io *)ctsio); 8150 return (CTL_RETVAL_COMPLETE); 8151 } 8152 } else if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REGISTER) { 8153 /* 8154 * We are not registered 8155 */ 8156 mtx_unlock(&lun->lun_lock); 8157 free(ctsio->kern_data_ptr, M_CTL); 8158 ctl_set_reservation_conflict(ctsio); 8159 ctl_done((union ctl_io *)ctsio); 8160 return (CTL_RETVAL_COMPLETE); 8161 } else if (res_key != 0) { 8162 /* 8163 * We are not registered and trying to register but 8164 * the register key isn't zero. 8165 */ 8166 mtx_unlock(&lun->lun_lock); 8167 free(ctsio->kern_data_ptr, M_CTL); 8168 ctl_set_reservation_conflict(ctsio); 8169 ctl_done((union ctl_io *)ctsio); 8170 return (CTL_RETVAL_COMPLETE); 8171 } 8172 mtx_unlock(&lun->lun_lock); 8173 } 8174 8175 switch (cdb->action & SPRO_ACTION_MASK) { 8176 case SPRO_REGISTER: 8177 case SPRO_REG_IGNO: { 8178 8179 #if 0 8180 printf("Registration received\n"); 8181 #endif 8182 8183 /* 8184 * We don't support any of these options, as we report in 8185 * the read capabilities request (see 8186 * ctl_persistent_reserve_in(), above). 8187 */ 8188 if ((param->flags & SPR_SPEC_I_PT) 8189 || (param->flags & SPR_ALL_TG_PT) 8190 || (param->flags & SPR_APTPL)) { 8191 int bit_ptr; 8192 8193 if (param->flags & SPR_APTPL) 8194 bit_ptr = 0; 8195 else if (param->flags & SPR_ALL_TG_PT) 8196 bit_ptr = 2; 8197 else /* SPR_SPEC_I_PT */ 8198 bit_ptr = 3; 8199 8200 free(ctsio->kern_data_ptr, M_CTL); 8201 ctl_set_invalid_field(ctsio, 8202 /*sks_valid*/ 1, 8203 /*command*/ 0, 8204 /*field*/ 20, 8205 /*bit_valid*/ 1, 8206 /*bit*/ bit_ptr); 8207 ctl_done((union ctl_io *)ctsio); 8208 return (CTL_RETVAL_COMPLETE); 8209 } 8210 8211 mtx_lock(&lun->lun_lock); 8212 8213 /* 8214 * The initiator wants to clear the 8215 * key/unregister. 8216 */ 8217 if (sa_res_key == 0) { 8218 if ((res_key == 0 8219 && (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER) 8220 || ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO 8221 && ctl_get_prkey(lun, residx) == 0)) { 8222 mtx_unlock(&lun->lun_lock); 8223 goto done; 8224 } 8225 8226 ctl_clr_prkey(lun, residx); 8227 lun->pr_key_count--; 8228 8229 if (residx == lun->pr_res_idx) { 8230 lun->flags &= ~CTL_LUN_PR_RESERVED; 8231 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8232 8233 if ((lun->pr_res_type == SPR_TYPE_WR_EX_RO || 8234 lun->pr_res_type == SPR_TYPE_EX_AC_RO) && 8235 lun->pr_key_count) { 8236 /* 8237 * If the reservation is a registrants 8238 * only type we need to generate a UA 8239 * for other registered inits. The 8240 * sense code should be RESERVATIONS 8241 * RELEASED 8242 */ 8243 8244 for (i = softc->init_min; i < softc->init_max; i++){ 8245 if (ctl_get_prkey(lun, i) == 0) 8246 continue; 8247 ctl_est_ua(lun, i, 8248 CTL_UA_RES_RELEASE); 8249 } 8250 } 8251 lun->pr_res_type = 0; 8252 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8253 if (lun->pr_key_count==0) { 8254 lun->flags &= ~CTL_LUN_PR_RESERVED; 8255 lun->pr_res_type = 0; 8256 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8257 } 8258 } 8259 lun->pr_generation++; 8260 mtx_unlock(&lun->lun_lock); 8261 8262 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8263 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8264 persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY; 8265 persis_io.pr.pr_info.residx = residx; 8266 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8267 sizeof(persis_io.pr), M_WAITOK); 8268 } else /* sa_res_key != 0 */ { 8269 8270 /* 8271 * If we aren't registered currently then increment 8272 * the key count and set the registered flag. 8273 */ 8274 ctl_alloc_prkey(lun, residx); 8275 if (ctl_get_prkey(lun, residx) == 0) 8276 lun->pr_key_count++; 8277 ctl_set_prkey(lun, residx, sa_res_key); 8278 lun->pr_generation++; 8279 mtx_unlock(&lun->lun_lock); 8280 8281 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8282 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8283 persis_io.pr.pr_info.action = CTL_PR_REG_KEY; 8284 persis_io.pr.pr_info.residx = residx; 8285 memcpy(persis_io.pr.pr_info.sa_res_key, 8286 param->serv_act_res_key, 8287 sizeof(param->serv_act_res_key)); 8288 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8289 sizeof(persis_io.pr), M_WAITOK); 8290 } 8291 8292 break; 8293 } 8294 case SPRO_RESERVE: 8295 #if 0 8296 printf("Reserve executed type %d\n", type); 8297 #endif 8298 mtx_lock(&lun->lun_lock); 8299 if (lun->flags & CTL_LUN_PR_RESERVED) { 8300 /* 8301 * if this isn't the reservation holder and it's 8302 * not a "all registrants" type or if the type is 8303 * different then we have a conflict 8304 */ 8305 if ((lun->pr_res_idx != residx 8306 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) 8307 || lun->pr_res_type != type) { 8308 mtx_unlock(&lun->lun_lock); 8309 free(ctsio->kern_data_ptr, M_CTL); 8310 ctl_set_reservation_conflict(ctsio); 8311 ctl_done((union ctl_io *)ctsio); 8312 return (CTL_RETVAL_COMPLETE); 8313 } 8314 mtx_unlock(&lun->lun_lock); 8315 } else /* create a reservation */ { 8316 /* 8317 * If it's not an "all registrants" type record 8318 * reservation holder 8319 */ 8320 if (type != SPR_TYPE_WR_EX_AR 8321 && type != SPR_TYPE_EX_AC_AR) 8322 lun->pr_res_idx = residx; /* Res holder */ 8323 else 8324 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8325 8326 lun->flags |= CTL_LUN_PR_RESERVED; 8327 lun->pr_res_type = type; 8328 8329 mtx_unlock(&lun->lun_lock); 8330 8331 /* send msg to other side */ 8332 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8333 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8334 persis_io.pr.pr_info.action = CTL_PR_RESERVE; 8335 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8336 persis_io.pr.pr_info.res_type = type; 8337 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8338 sizeof(persis_io.pr), M_WAITOK); 8339 } 8340 break; 8341 8342 case SPRO_RELEASE: 8343 mtx_lock(&lun->lun_lock); 8344 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) { 8345 /* No reservation exists return good status */ 8346 mtx_unlock(&lun->lun_lock); 8347 goto done; 8348 } 8349 /* 8350 * Is this nexus a reservation holder? 8351 */ 8352 if (lun->pr_res_idx != residx 8353 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 8354 /* 8355 * not a res holder return good status but 8356 * do nothing 8357 */ 8358 mtx_unlock(&lun->lun_lock); 8359 goto done; 8360 } 8361 8362 if (lun->pr_res_type != type) { 8363 mtx_unlock(&lun->lun_lock); 8364 free(ctsio->kern_data_ptr, M_CTL); 8365 ctl_set_illegal_pr_release(ctsio); 8366 ctl_done((union ctl_io *)ctsio); 8367 return (CTL_RETVAL_COMPLETE); 8368 } 8369 8370 /* okay to release */ 8371 lun->flags &= ~CTL_LUN_PR_RESERVED; 8372 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8373 lun->pr_res_type = 0; 8374 8375 /* 8376 * If this isn't an exclusive access reservation and NUAR 8377 * is not set, generate UA for all other registrants. 8378 */ 8379 if (type != SPR_TYPE_EX_AC && type != SPR_TYPE_WR_EX && 8380 (lun->MODE_CTRL.queue_flags & SCP_NUAR) == 0) { 8381 for (i = softc->init_min; i < softc->init_max; i++) { 8382 if (i == residx || ctl_get_prkey(lun, i) == 0) 8383 continue; 8384 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8385 } 8386 } 8387 mtx_unlock(&lun->lun_lock); 8388 8389 /* Send msg to other side */ 8390 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8391 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8392 persis_io.pr.pr_info.action = CTL_PR_RELEASE; 8393 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8394 sizeof(persis_io.pr), M_WAITOK); 8395 break; 8396 8397 case SPRO_CLEAR: 8398 /* send msg to other side */ 8399 8400 mtx_lock(&lun->lun_lock); 8401 lun->flags &= ~CTL_LUN_PR_RESERVED; 8402 lun->pr_res_type = 0; 8403 lun->pr_key_count = 0; 8404 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8405 8406 ctl_clr_prkey(lun, residx); 8407 for (i = 0; i < CTL_MAX_INITIATORS; i++) 8408 if (ctl_get_prkey(lun, i) != 0) { 8409 ctl_clr_prkey(lun, i); 8410 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8411 } 8412 lun->pr_generation++; 8413 mtx_unlock(&lun->lun_lock); 8414 8415 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8416 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8417 persis_io.pr.pr_info.action = CTL_PR_CLEAR; 8418 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8419 sizeof(persis_io.pr), M_WAITOK); 8420 break; 8421 8422 case SPRO_PREEMPT: 8423 case SPRO_PRE_ABO: { 8424 int nretval; 8425 8426 nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type, 8427 residx, ctsio, cdb, param); 8428 if (nretval != 0) 8429 return (CTL_RETVAL_COMPLETE); 8430 break; 8431 } 8432 default: 8433 panic("%s: Invalid PR type %#x", __func__, cdb->action); 8434 } 8435 8436 done: 8437 free(ctsio->kern_data_ptr, M_CTL); 8438 ctl_set_success(ctsio); 8439 ctl_done((union ctl_io *)ctsio); 8440 8441 return (retval); 8442 } 8443 8444 /* 8445 * This routine is for handling a message from the other SC pertaining to 8446 * persistent reserve out. All the error checking will have been done 8447 * so only perorming the action need be done here to keep the two 8448 * in sync. 8449 */ 8450 static void 8451 ctl_hndl_per_res_out_on_other_sc(union ctl_io *io) 8452 { 8453 struct ctl_softc *softc = CTL_SOFTC(io); 8454 union ctl_ha_msg *msg = (union ctl_ha_msg *)&io->presio.pr_msg; 8455 struct ctl_lun *lun; 8456 int i; 8457 uint32_t residx, targ_lun; 8458 8459 targ_lun = msg->hdr.nexus.targ_mapped_lun; 8460 mtx_lock(&softc->ctl_lock); 8461 if (targ_lun >= CTL_MAX_LUNS || 8462 (lun = softc->ctl_luns[targ_lun]) == NULL) { 8463 mtx_unlock(&softc->ctl_lock); 8464 return; 8465 } 8466 mtx_lock(&lun->lun_lock); 8467 mtx_unlock(&softc->ctl_lock); 8468 if (lun->flags & CTL_LUN_DISABLED) { 8469 mtx_unlock(&lun->lun_lock); 8470 return; 8471 } 8472 residx = ctl_get_initindex(&msg->hdr.nexus); 8473 switch(msg->pr.pr_info.action) { 8474 case CTL_PR_REG_KEY: 8475 ctl_alloc_prkey(lun, msg->pr.pr_info.residx); 8476 if (ctl_get_prkey(lun, msg->pr.pr_info.residx) == 0) 8477 lun->pr_key_count++; 8478 ctl_set_prkey(lun, msg->pr.pr_info.residx, 8479 scsi_8btou64(msg->pr.pr_info.sa_res_key)); 8480 lun->pr_generation++; 8481 break; 8482 8483 case CTL_PR_UNREG_KEY: 8484 ctl_clr_prkey(lun, msg->pr.pr_info.residx); 8485 lun->pr_key_count--; 8486 8487 /* XXX Need to see if the reservation has been released */ 8488 /* if so do we need to generate UA? */ 8489 if (msg->pr.pr_info.residx == lun->pr_res_idx) { 8490 lun->flags &= ~CTL_LUN_PR_RESERVED; 8491 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8492 8493 if ((lun->pr_res_type == SPR_TYPE_WR_EX_RO || 8494 lun->pr_res_type == SPR_TYPE_EX_AC_RO) && 8495 lun->pr_key_count) { 8496 /* 8497 * If the reservation is a registrants 8498 * only type we need to generate a UA 8499 * for other registered inits. The 8500 * sense code should be RESERVATIONS 8501 * RELEASED 8502 */ 8503 8504 for (i = softc->init_min; i < softc->init_max; i++) { 8505 if (ctl_get_prkey(lun, i) == 0) 8506 continue; 8507 8508 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8509 } 8510 } 8511 lun->pr_res_type = 0; 8512 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8513 if (lun->pr_key_count==0) { 8514 lun->flags &= ~CTL_LUN_PR_RESERVED; 8515 lun->pr_res_type = 0; 8516 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8517 } 8518 } 8519 lun->pr_generation++; 8520 break; 8521 8522 case CTL_PR_RESERVE: 8523 lun->flags |= CTL_LUN_PR_RESERVED; 8524 lun->pr_res_type = msg->pr.pr_info.res_type; 8525 lun->pr_res_idx = msg->pr.pr_info.residx; 8526 8527 break; 8528 8529 case CTL_PR_RELEASE: 8530 /* 8531 * If this isn't an exclusive access reservation and NUAR 8532 * is not set, generate UA for all other registrants. 8533 */ 8534 if (lun->pr_res_type != SPR_TYPE_EX_AC && 8535 lun->pr_res_type != SPR_TYPE_WR_EX && 8536 (lun->MODE_CTRL.queue_flags & SCP_NUAR) == 0) { 8537 for (i = softc->init_min; i < softc->init_max; i++) 8538 if (i == residx || ctl_get_prkey(lun, i) == 0) 8539 continue; 8540 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8541 } 8542 8543 lun->flags &= ~CTL_LUN_PR_RESERVED; 8544 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8545 lun->pr_res_type = 0; 8546 break; 8547 8548 case CTL_PR_PREEMPT: 8549 ctl_pro_preempt_other(lun, msg); 8550 break; 8551 case CTL_PR_CLEAR: 8552 lun->flags &= ~CTL_LUN_PR_RESERVED; 8553 lun->pr_res_type = 0; 8554 lun->pr_key_count = 0; 8555 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8556 8557 for (i=0; i < CTL_MAX_INITIATORS; i++) { 8558 if (ctl_get_prkey(lun, i) == 0) 8559 continue; 8560 ctl_clr_prkey(lun, i); 8561 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8562 } 8563 lun->pr_generation++; 8564 break; 8565 } 8566 8567 mtx_unlock(&lun->lun_lock); 8568 } 8569 8570 int 8571 ctl_read_write(struct ctl_scsiio *ctsio) 8572 { 8573 struct ctl_lun *lun = CTL_LUN(ctsio); 8574 struct ctl_lba_len_flags *lbalen; 8575 uint64_t lba; 8576 uint32_t num_blocks; 8577 int flags, retval; 8578 int isread; 8579 8580 CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0])); 8581 8582 flags = 0; 8583 isread = ctsio->cdb[0] == READ_6 || ctsio->cdb[0] == READ_10 8584 || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16; 8585 switch (ctsio->cdb[0]) { 8586 case READ_6: 8587 case WRITE_6: { 8588 struct scsi_rw_6 *cdb; 8589 8590 cdb = (struct scsi_rw_6 *)ctsio->cdb; 8591 8592 lba = scsi_3btoul(cdb->addr); 8593 /* only 5 bits are valid in the most significant address byte */ 8594 lba &= 0x1fffff; 8595 num_blocks = cdb->length; 8596 /* 8597 * This is correct according to SBC-2. 8598 */ 8599 if (num_blocks == 0) 8600 num_blocks = 256; 8601 break; 8602 } 8603 case READ_10: 8604 case WRITE_10: { 8605 struct scsi_rw_10 *cdb; 8606 8607 cdb = (struct scsi_rw_10 *)ctsio->cdb; 8608 if (cdb->byte2 & SRW10_FUA) 8609 flags |= CTL_LLF_FUA; 8610 if (cdb->byte2 & SRW10_DPO) 8611 flags |= CTL_LLF_DPO; 8612 lba = scsi_4btoul(cdb->addr); 8613 num_blocks = scsi_2btoul(cdb->length); 8614 break; 8615 } 8616 case WRITE_VERIFY_10: { 8617 struct scsi_write_verify_10 *cdb; 8618 8619 cdb = (struct scsi_write_verify_10 *)ctsio->cdb; 8620 flags |= CTL_LLF_FUA; 8621 if (cdb->byte2 & SWV_DPO) 8622 flags |= CTL_LLF_DPO; 8623 lba = scsi_4btoul(cdb->addr); 8624 num_blocks = scsi_2btoul(cdb->length); 8625 break; 8626 } 8627 case READ_12: 8628 case WRITE_12: { 8629 struct scsi_rw_12 *cdb; 8630 8631 cdb = (struct scsi_rw_12 *)ctsio->cdb; 8632 if (cdb->byte2 & SRW12_FUA) 8633 flags |= CTL_LLF_FUA; 8634 if (cdb->byte2 & SRW12_DPO) 8635 flags |= CTL_LLF_DPO; 8636 lba = scsi_4btoul(cdb->addr); 8637 num_blocks = scsi_4btoul(cdb->length); 8638 break; 8639 } 8640 case WRITE_VERIFY_12: { 8641 struct scsi_write_verify_12 *cdb; 8642 8643 cdb = (struct scsi_write_verify_12 *)ctsio->cdb; 8644 flags |= CTL_LLF_FUA; 8645 if (cdb->byte2 & SWV_DPO) 8646 flags |= CTL_LLF_DPO; 8647 lba = scsi_4btoul(cdb->addr); 8648 num_blocks = scsi_4btoul(cdb->length); 8649 break; 8650 } 8651 case READ_16: 8652 case WRITE_16: { 8653 struct scsi_rw_16 *cdb; 8654 8655 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8656 if (cdb->byte2 & SRW12_FUA) 8657 flags |= CTL_LLF_FUA; 8658 if (cdb->byte2 & SRW12_DPO) 8659 flags |= CTL_LLF_DPO; 8660 lba = scsi_8btou64(cdb->addr); 8661 num_blocks = scsi_4btoul(cdb->length); 8662 break; 8663 } 8664 case WRITE_ATOMIC_16: { 8665 struct scsi_write_atomic_16 *cdb; 8666 8667 if (lun->be_lun->atomicblock == 0) { 8668 ctl_set_invalid_opcode(ctsio); 8669 ctl_done((union ctl_io *)ctsio); 8670 return (CTL_RETVAL_COMPLETE); 8671 } 8672 8673 cdb = (struct scsi_write_atomic_16 *)ctsio->cdb; 8674 if (cdb->byte2 & SRW12_FUA) 8675 flags |= CTL_LLF_FUA; 8676 if (cdb->byte2 & SRW12_DPO) 8677 flags |= CTL_LLF_DPO; 8678 lba = scsi_8btou64(cdb->addr); 8679 num_blocks = scsi_2btoul(cdb->length); 8680 if (num_blocks > lun->be_lun->atomicblock) { 8681 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 8682 /*command*/ 1, /*field*/ 12, /*bit_valid*/ 0, 8683 /*bit*/ 0); 8684 ctl_done((union ctl_io *)ctsio); 8685 return (CTL_RETVAL_COMPLETE); 8686 } 8687 break; 8688 } 8689 case WRITE_VERIFY_16: { 8690 struct scsi_write_verify_16 *cdb; 8691 8692 cdb = (struct scsi_write_verify_16 *)ctsio->cdb; 8693 flags |= CTL_LLF_FUA; 8694 if (cdb->byte2 & SWV_DPO) 8695 flags |= CTL_LLF_DPO; 8696 lba = scsi_8btou64(cdb->addr); 8697 num_blocks = scsi_4btoul(cdb->length); 8698 break; 8699 } 8700 default: 8701 /* 8702 * We got a command we don't support. This shouldn't 8703 * happen, commands should be filtered out above us. 8704 */ 8705 ctl_set_invalid_opcode(ctsio); 8706 ctl_done((union ctl_io *)ctsio); 8707 8708 return (CTL_RETVAL_COMPLETE); 8709 break; /* NOTREACHED */ 8710 } 8711 8712 /* 8713 * The first check is to make sure we're in bounds, the second 8714 * check is to catch wrap-around problems. If the lba + num blocks 8715 * is less than the lba, then we've wrapped around and the block 8716 * range is invalid anyway. 8717 */ 8718 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8719 || ((lba + num_blocks) < lba)) { 8720 ctl_set_lba_out_of_range(ctsio, 8721 MAX(lba, lun->be_lun->maxlba + 1)); 8722 ctl_done((union ctl_io *)ctsio); 8723 return (CTL_RETVAL_COMPLETE); 8724 } 8725 8726 /* 8727 * According to SBC-3, a transfer length of 0 is not an error. 8728 * Note that this cannot happen with WRITE(6) or READ(6), since 0 8729 * translates to 256 blocks for those commands. 8730 */ 8731 if (num_blocks == 0) { 8732 ctl_set_success(ctsio); 8733 ctl_done((union ctl_io *)ctsio); 8734 return (CTL_RETVAL_COMPLETE); 8735 } 8736 8737 /* Set FUA and/or DPO if caches are disabled. */ 8738 if (isread) { 8739 if ((lun->MODE_CACHING.flags1 & SCP_RCD) != 0) 8740 flags |= CTL_LLF_FUA | CTL_LLF_DPO; 8741 } else { 8742 if ((lun->MODE_CACHING.flags1 & SCP_WCE) == 0) 8743 flags |= CTL_LLF_FUA; 8744 } 8745 8746 lbalen = (struct ctl_lba_len_flags *) 8747 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8748 lbalen->lba = lba; 8749 lbalen->len = num_blocks; 8750 lbalen->flags = (isread ? CTL_LLF_READ : CTL_LLF_WRITE) | flags; 8751 8752 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 8753 ctsio->kern_rel_offset = 0; 8754 8755 CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n")); 8756 8757 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8758 return (retval); 8759 } 8760 8761 static int 8762 ctl_cnw_cont(union ctl_io *io) 8763 { 8764 struct ctl_lun *lun = CTL_LUN(io); 8765 struct ctl_scsiio *ctsio; 8766 struct ctl_lba_len_flags *lbalen; 8767 int retval; 8768 8769 ctsio = &io->scsiio; 8770 ctsio->io_hdr.status = CTL_STATUS_NONE; 8771 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT; 8772 lbalen = (struct ctl_lba_len_flags *) 8773 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8774 lbalen->flags &= ~CTL_LLF_COMPARE; 8775 lbalen->flags |= CTL_LLF_WRITE; 8776 8777 CTL_DEBUG_PRINT(("ctl_cnw_cont: calling data_submit()\n")); 8778 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8779 return (retval); 8780 } 8781 8782 int 8783 ctl_cnw(struct ctl_scsiio *ctsio) 8784 { 8785 struct ctl_lun *lun = CTL_LUN(ctsio); 8786 struct ctl_lba_len_flags *lbalen; 8787 uint64_t lba; 8788 uint32_t num_blocks; 8789 int flags, retval; 8790 8791 CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0])); 8792 8793 flags = 0; 8794 switch (ctsio->cdb[0]) { 8795 case COMPARE_AND_WRITE: { 8796 struct scsi_compare_and_write *cdb; 8797 8798 cdb = (struct scsi_compare_and_write *)ctsio->cdb; 8799 if (cdb->byte2 & SRW10_FUA) 8800 flags |= CTL_LLF_FUA; 8801 if (cdb->byte2 & SRW10_DPO) 8802 flags |= CTL_LLF_DPO; 8803 lba = scsi_8btou64(cdb->addr); 8804 num_blocks = cdb->length; 8805 break; 8806 } 8807 default: 8808 /* 8809 * We got a command we don't support. This shouldn't 8810 * happen, commands should be filtered out above us. 8811 */ 8812 ctl_set_invalid_opcode(ctsio); 8813 ctl_done((union ctl_io *)ctsio); 8814 8815 return (CTL_RETVAL_COMPLETE); 8816 break; /* NOTREACHED */ 8817 } 8818 8819 /* 8820 * The first check is to make sure we're in bounds, the second 8821 * check is to catch wrap-around problems. If the lba + num blocks 8822 * is less than the lba, then we've wrapped around and the block 8823 * range is invalid anyway. 8824 */ 8825 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8826 || ((lba + num_blocks) < lba)) { 8827 ctl_set_lba_out_of_range(ctsio, 8828 MAX(lba, lun->be_lun->maxlba + 1)); 8829 ctl_done((union ctl_io *)ctsio); 8830 return (CTL_RETVAL_COMPLETE); 8831 } 8832 8833 /* 8834 * According to SBC-3, a transfer length of 0 is not an error. 8835 */ 8836 if (num_blocks == 0) { 8837 ctl_set_success(ctsio); 8838 ctl_done((union ctl_io *)ctsio); 8839 return (CTL_RETVAL_COMPLETE); 8840 } 8841 8842 /* Set FUA if write cache is disabled. */ 8843 if ((lun->MODE_CACHING.flags1 & SCP_WCE) == 0) 8844 flags |= CTL_LLF_FUA; 8845 8846 ctsio->kern_total_len = 2 * num_blocks * lun->be_lun->blocksize; 8847 ctsio->kern_rel_offset = 0; 8848 8849 /* 8850 * Set the IO_CONT flag, so that if this I/O gets passed to 8851 * ctl_data_submit_done(), it'll get passed back to 8852 * ctl_ctl_cnw_cont() for further processing. 8853 */ 8854 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 8855 ctsio->io_cont = ctl_cnw_cont; 8856 8857 lbalen = (struct ctl_lba_len_flags *) 8858 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8859 lbalen->lba = lba; 8860 lbalen->len = num_blocks; 8861 lbalen->flags = CTL_LLF_COMPARE | flags; 8862 8863 CTL_DEBUG_PRINT(("ctl_cnw: calling data_submit()\n")); 8864 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8865 return (retval); 8866 } 8867 8868 int 8869 ctl_verify(struct ctl_scsiio *ctsio) 8870 { 8871 struct ctl_lun *lun = CTL_LUN(ctsio); 8872 struct ctl_lba_len_flags *lbalen; 8873 uint64_t lba; 8874 uint32_t num_blocks; 8875 int bytchk, flags; 8876 int retval; 8877 8878 CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0])); 8879 8880 bytchk = 0; 8881 flags = CTL_LLF_FUA; 8882 switch (ctsio->cdb[0]) { 8883 case VERIFY_10: { 8884 struct scsi_verify_10 *cdb; 8885 8886 cdb = (struct scsi_verify_10 *)ctsio->cdb; 8887 if (cdb->byte2 & SVFY_BYTCHK) 8888 bytchk = 1; 8889 if (cdb->byte2 & SVFY_DPO) 8890 flags |= CTL_LLF_DPO; 8891 lba = scsi_4btoul(cdb->addr); 8892 num_blocks = scsi_2btoul(cdb->length); 8893 break; 8894 } 8895 case VERIFY_12: { 8896 struct scsi_verify_12 *cdb; 8897 8898 cdb = (struct scsi_verify_12 *)ctsio->cdb; 8899 if (cdb->byte2 & SVFY_BYTCHK) 8900 bytchk = 1; 8901 if (cdb->byte2 & SVFY_DPO) 8902 flags |= CTL_LLF_DPO; 8903 lba = scsi_4btoul(cdb->addr); 8904 num_blocks = scsi_4btoul(cdb->length); 8905 break; 8906 } 8907 case VERIFY_16: { 8908 struct scsi_rw_16 *cdb; 8909 8910 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8911 if (cdb->byte2 & SVFY_BYTCHK) 8912 bytchk = 1; 8913 if (cdb->byte2 & SVFY_DPO) 8914 flags |= CTL_LLF_DPO; 8915 lba = scsi_8btou64(cdb->addr); 8916 num_blocks = scsi_4btoul(cdb->length); 8917 break; 8918 } 8919 default: 8920 /* 8921 * We got a command we don't support. This shouldn't 8922 * happen, commands should be filtered out above us. 8923 */ 8924 ctl_set_invalid_opcode(ctsio); 8925 ctl_done((union ctl_io *)ctsio); 8926 return (CTL_RETVAL_COMPLETE); 8927 } 8928 8929 /* 8930 * The first check is to make sure we're in bounds, the second 8931 * check is to catch wrap-around problems. If the lba + num blocks 8932 * is less than the lba, then we've wrapped around and the block 8933 * range is invalid anyway. 8934 */ 8935 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8936 || ((lba + num_blocks) < lba)) { 8937 ctl_set_lba_out_of_range(ctsio, 8938 MAX(lba, lun->be_lun->maxlba + 1)); 8939 ctl_done((union ctl_io *)ctsio); 8940 return (CTL_RETVAL_COMPLETE); 8941 } 8942 8943 /* 8944 * According to SBC-3, a transfer length of 0 is not an error. 8945 */ 8946 if (num_blocks == 0) { 8947 ctl_set_success(ctsio); 8948 ctl_done((union ctl_io *)ctsio); 8949 return (CTL_RETVAL_COMPLETE); 8950 } 8951 8952 lbalen = (struct ctl_lba_len_flags *) 8953 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8954 lbalen->lba = lba; 8955 lbalen->len = num_blocks; 8956 if (bytchk) { 8957 lbalen->flags = CTL_LLF_COMPARE | flags; 8958 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 8959 } else { 8960 lbalen->flags = CTL_LLF_VERIFY | flags; 8961 ctsio->kern_total_len = 0; 8962 } 8963 ctsio->kern_rel_offset = 0; 8964 8965 CTL_DEBUG_PRINT(("ctl_verify: calling data_submit()\n")); 8966 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8967 return (retval); 8968 } 8969 8970 int 8971 ctl_report_luns(struct ctl_scsiio *ctsio) 8972 { 8973 struct ctl_softc *softc = CTL_SOFTC(ctsio); 8974 struct ctl_port *port = CTL_PORT(ctsio); 8975 struct ctl_lun *lun, *request_lun = CTL_LUN(ctsio); 8976 struct scsi_report_luns *cdb; 8977 struct scsi_report_luns_data *lun_data; 8978 int num_filled, num_luns, num_port_luns, retval; 8979 uint32_t alloc_len, lun_datalen; 8980 uint32_t initidx, targ_lun_id, lun_id; 8981 8982 retval = CTL_RETVAL_COMPLETE; 8983 cdb = (struct scsi_report_luns *)ctsio->cdb; 8984 8985 CTL_DEBUG_PRINT(("ctl_report_luns\n")); 8986 8987 num_luns = 0; 8988 num_port_luns = port->lun_map ? port->lun_map_size : CTL_MAX_LUNS; 8989 mtx_lock(&softc->ctl_lock); 8990 for (targ_lun_id = 0; targ_lun_id < num_port_luns; targ_lun_id++) { 8991 if (ctl_lun_map_from_port(port, targ_lun_id) != UINT32_MAX) 8992 num_luns++; 8993 } 8994 mtx_unlock(&softc->ctl_lock); 8995 8996 switch (cdb->select_report) { 8997 case RPL_REPORT_DEFAULT: 8998 case RPL_REPORT_ALL: 8999 case RPL_REPORT_NONSUBSID: 9000 break; 9001 case RPL_REPORT_WELLKNOWN: 9002 case RPL_REPORT_ADMIN: 9003 case RPL_REPORT_CONGLOM: 9004 num_luns = 0; 9005 break; 9006 default: 9007 ctl_set_invalid_field(ctsio, 9008 /*sks_valid*/ 1, 9009 /*command*/ 1, 9010 /*field*/ 2, 9011 /*bit_valid*/ 0, 9012 /*bit*/ 0); 9013 ctl_done((union ctl_io *)ctsio); 9014 return (retval); 9015 break; /* NOTREACHED */ 9016 } 9017 9018 alloc_len = scsi_4btoul(cdb->length); 9019 /* 9020 * The initiator has to allocate at least 16 bytes for this request, 9021 * so he can at least get the header and the first LUN. Otherwise 9022 * we reject the request (per SPC-3 rev 14, section 6.21). 9023 */ 9024 if (alloc_len < (sizeof(struct scsi_report_luns_data) + 9025 sizeof(struct scsi_report_luns_lundata))) { 9026 ctl_set_invalid_field(ctsio, 9027 /*sks_valid*/ 1, 9028 /*command*/ 1, 9029 /*field*/ 6, 9030 /*bit_valid*/ 0, 9031 /*bit*/ 0); 9032 ctl_done((union ctl_io *)ctsio); 9033 return (retval); 9034 } 9035 9036 lun_datalen = sizeof(*lun_data) + 9037 (num_luns * sizeof(struct scsi_report_luns_lundata)); 9038 9039 ctsio->kern_data_ptr = malloc(lun_datalen, M_CTL, M_WAITOK | M_ZERO); 9040 lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr; 9041 ctsio->kern_sg_entries = 0; 9042 9043 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9044 9045 mtx_lock(&softc->ctl_lock); 9046 for (targ_lun_id = 0, num_filled = 0; 9047 targ_lun_id < num_port_luns && num_filled < num_luns; 9048 targ_lun_id++) { 9049 lun_id = ctl_lun_map_from_port(port, targ_lun_id); 9050 if (lun_id == UINT32_MAX) 9051 continue; 9052 lun = softc->ctl_luns[lun_id]; 9053 if (lun == NULL) 9054 continue; 9055 9056 be64enc(lun_data->luns[num_filled++].lundata, 9057 ctl_encode_lun(targ_lun_id)); 9058 9059 /* 9060 * According to SPC-3, rev 14 section 6.21: 9061 * 9062 * "The execution of a REPORT LUNS command to any valid and 9063 * installed logical unit shall clear the REPORTED LUNS DATA 9064 * HAS CHANGED unit attention condition for all logical 9065 * units of that target with respect to the requesting 9066 * initiator. A valid and installed logical unit is one 9067 * having a PERIPHERAL QUALIFIER of 000b in the standard 9068 * INQUIRY data (see 6.4.2)." 9069 * 9070 * If request_lun is NULL, the LUN this report luns command 9071 * was issued to is either disabled or doesn't exist. In that 9072 * case, we shouldn't clear any pending lun change unit 9073 * attention. 9074 */ 9075 if (request_lun != NULL) { 9076 mtx_lock(&lun->lun_lock); 9077 ctl_clr_ua(lun, initidx, CTL_UA_LUN_CHANGE); 9078 mtx_unlock(&lun->lun_lock); 9079 } 9080 } 9081 mtx_unlock(&softc->ctl_lock); 9082 9083 /* 9084 * It's quite possible that we've returned fewer LUNs than we allocated 9085 * space for. Trim it. 9086 */ 9087 lun_datalen = sizeof(*lun_data) + 9088 (num_filled * sizeof(struct scsi_report_luns_lundata)); 9089 ctsio->kern_rel_offset = 0; 9090 ctsio->kern_sg_entries = 0; 9091 ctsio->kern_data_len = min(lun_datalen, alloc_len); 9092 ctsio->kern_total_len = ctsio->kern_data_len; 9093 9094 /* 9095 * We set this to the actual data length, regardless of how much 9096 * space we actually have to return results. If the user looks at 9097 * this value, he'll know whether or not he allocated enough space 9098 * and reissue the command if necessary. We don't support well 9099 * known logical units, so if the user asks for that, return none. 9100 */ 9101 scsi_ulto4b(lun_datalen - 8, lun_data->length); 9102 9103 /* 9104 * We can only return SCSI_STATUS_CHECK_COND when we can't satisfy 9105 * this request. 9106 */ 9107 ctl_set_success(ctsio); 9108 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9109 ctsio->be_move_done = ctl_config_move_done; 9110 ctl_datamove((union ctl_io *)ctsio); 9111 return (retval); 9112 } 9113 9114 int 9115 ctl_request_sense(struct ctl_scsiio *ctsio) 9116 { 9117 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9118 struct ctl_lun *lun = CTL_LUN(ctsio); 9119 struct scsi_request_sense *cdb; 9120 struct scsi_sense_data *sense_ptr; 9121 uint32_t initidx; 9122 int have_error; 9123 u_int sense_len = SSD_FULL_SIZE; 9124 scsi_sense_data_type sense_format; 9125 ctl_ua_type ua_type; 9126 uint8_t asc = 0, ascq = 0; 9127 9128 cdb = (struct scsi_request_sense *)ctsio->cdb; 9129 9130 CTL_DEBUG_PRINT(("ctl_request_sense\n")); 9131 9132 /* 9133 * Determine which sense format the user wants. 9134 */ 9135 if (cdb->byte2 & SRS_DESC) 9136 sense_format = SSD_TYPE_DESC; 9137 else 9138 sense_format = SSD_TYPE_FIXED; 9139 9140 ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK); 9141 sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr; 9142 ctsio->kern_sg_entries = 0; 9143 ctsio->kern_rel_offset = 0; 9144 9145 /* 9146 * struct scsi_sense_data, which is currently set to 256 bytes, is 9147 * larger than the largest allowed value for the length field in the 9148 * REQUEST SENSE CDB, which is 252 bytes as of SPC-4. 9149 */ 9150 ctsio->kern_data_len = cdb->length; 9151 ctsio->kern_total_len = cdb->length; 9152 9153 /* 9154 * If we don't have a LUN, we don't have any pending sense. 9155 */ 9156 if (lun == NULL || 9157 ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 9158 softc->ha_link < CTL_HA_LINK_UNKNOWN)) { 9159 /* "Logical unit not supported" */ 9160 ctl_set_sense_data(sense_ptr, &sense_len, NULL, sense_format, 9161 /*current_error*/ 1, 9162 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 9163 /*asc*/ 0x25, 9164 /*ascq*/ 0x00, 9165 SSD_ELEM_NONE); 9166 goto send; 9167 } 9168 9169 have_error = 0; 9170 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9171 /* 9172 * Check for pending sense, and then for pending unit attentions. 9173 * Pending sense gets returned first, then pending unit attentions. 9174 */ 9175 mtx_lock(&lun->lun_lock); 9176 #ifdef CTL_WITH_CA 9177 if (ctl_is_set(lun->have_ca, initidx)) { 9178 scsi_sense_data_type stored_format; 9179 9180 /* 9181 * Check to see which sense format was used for the stored 9182 * sense data. 9183 */ 9184 stored_format = scsi_sense_type(&lun->pending_sense[initidx]); 9185 9186 /* 9187 * If the user requested a different sense format than the 9188 * one we stored, then we need to convert it to the other 9189 * format. If we're going from descriptor to fixed format 9190 * sense data, we may lose things in translation, depending 9191 * on what options were used. 9192 * 9193 * If the stored format is SSD_TYPE_NONE (i.e. invalid), 9194 * for some reason we'll just copy it out as-is. 9195 */ 9196 if ((stored_format == SSD_TYPE_FIXED) 9197 && (sense_format == SSD_TYPE_DESC)) 9198 ctl_sense_to_desc((struct scsi_sense_data_fixed *) 9199 &lun->pending_sense[initidx], 9200 (struct scsi_sense_data_desc *)sense_ptr); 9201 else if ((stored_format == SSD_TYPE_DESC) 9202 && (sense_format == SSD_TYPE_FIXED)) 9203 ctl_sense_to_fixed((struct scsi_sense_data_desc *) 9204 &lun->pending_sense[initidx], 9205 (struct scsi_sense_data_fixed *)sense_ptr); 9206 else 9207 memcpy(sense_ptr, &lun->pending_sense[initidx], 9208 MIN(sizeof(*sense_ptr), 9209 sizeof(lun->pending_sense[initidx]))); 9210 9211 ctl_clear_mask(lun->have_ca, initidx); 9212 have_error = 1; 9213 } else 9214 #endif 9215 if (have_error == 0) { 9216 ua_type = ctl_build_ua(lun, initidx, sense_ptr, &sense_len, 9217 sense_format); 9218 if (ua_type != CTL_UA_NONE) 9219 have_error = 1; 9220 } 9221 if (have_error == 0) { 9222 /* 9223 * Report informational exception if have one and allowed. 9224 */ 9225 if (lun->MODE_IE.mrie != SIEP_MRIE_NO) { 9226 asc = lun->ie_asc; 9227 ascq = lun->ie_ascq; 9228 } 9229 ctl_set_sense_data(sense_ptr, &sense_len, lun, sense_format, 9230 /*current_error*/ 1, 9231 /*sense_key*/ SSD_KEY_NO_SENSE, 9232 /*asc*/ asc, 9233 /*ascq*/ ascq, 9234 SSD_ELEM_NONE); 9235 } 9236 mtx_unlock(&lun->lun_lock); 9237 9238 send: 9239 /* 9240 * We report the SCSI status as OK, since the status of the command 9241 * itself is OK. We're reporting sense as parameter data. 9242 */ 9243 ctl_set_success(ctsio); 9244 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9245 ctsio->be_move_done = ctl_config_move_done; 9246 ctl_datamove((union ctl_io *)ctsio); 9247 return (CTL_RETVAL_COMPLETE); 9248 } 9249 9250 int 9251 ctl_tur(struct ctl_scsiio *ctsio) 9252 { 9253 9254 CTL_DEBUG_PRINT(("ctl_tur\n")); 9255 9256 ctl_set_success(ctsio); 9257 ctl_done((union ctl_io *)ctsio); 9258 9259 return (CTL_RETVAL_COMPLETE); 9260 } 9261 9262 /* 9263 * SCSI VPD page 0x00, the Supported VPD Pages page. 9264 */ 9265 static int 9266 ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len) 9267 { 9268 struct ctl_lun *lun = CTL_LUN(ctsio); 9269 struct scsi_vpd_supported_pages *pages; 9270 int sup_page_size; 9271 int p; 9272 9273 sup_page_size = sizeof(struct scsi_vpd_supported_pages) * 9274 SCSI_EVPD_NUM_SUPPORTED_PAGES; 9275 ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO); 9276 pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr; 9277 ctsio->kern_rel_offset = 0; 9278 ctsio->kern_sg_entries = 0; 9279 ctsio->kern_data_len = min(sup_page_size, alloc_len); 9280 ctsio->kern_total_len = ctsio->kern_data_len; 9281 9282 /* 9283 * The control device is always connected. The disk device, on the 9284 * other hand, may not be online all the time. Need to change this 9285 * to figure out whether the disk device is actually online or not. 9286 */ 9287 if (lun != NULL) 9288 pages->device = (SID_QUAL_LU_CONNECTED << 5) | 9289 lun->be_lun->lun_type; 9290 else 9291 pages->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9292 9293 p = 0; 9294 /* Supported VPD pages */ 9295 pages->page_list[p++] = SVPD_SUPPORTED_PAGES; 9296 /* Serial Number */ 9297 pages->page_list[p++] = SVPD_UNIT_SERIAL_NUMBER; 9298 /* Device Identification */ 9299 pages->page_list[p++] = SVPD_DEVICE_ID; 9300 /* Extended INQUIRY Data */ 9301 pages->page_list[p++] = SVPD_EXTENDED_INQUIRY_DATA; 9302 /* Mode Page Policy */ 9303 pages->page_list[p++] = SVPD_MODE_PAGE_POLICY; 9304 /* SCSI Ports */ 9305 pages->page_list[p++] = SVPD_SCSI_PORTS; 9306 /* Third-party Copy */ 9307 pages->page_list[p++] = SVPD_SCSI_TPC; 9308 if (lun != NULL && lun->be_lun->lun_type == T_DIRECT) { 9309 /* Block limits */ 9310 pages->page_list[p++] = SVPD_BLOCK_LIMITS; 9311 /* Block Device Characteristics */ 9312 pages->page_list[p++] = SVPD_BDC; 9313 /* Logical Block Provisioning */ 9314 pages->page_list[p++] = SVPD_LBP; 9315 } 9316 pages->length = p; 9317 9318 ctl_set_success(ctsio); 9319 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9320 ctsio->be_move_done = ctl_config_move_done; 9321 ctl_datamove((union ctl_io *)ctsio); 9322 return (CTL_RETVAL_COMPLETE); 9323 } 9324 9325 /* 9326 * SCSI VPD page 0x80, the Unit Serial Number page. 9327 */ 9328 static int 9329 ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len) 9330 { 9331 struct ctl_lun *lun = CTL_LUN(ctsio); 9332 struct scsi_vpd_unit_serial_number *sn_ptr; 9333 int data_len; 9334 9335 data_len = 4 + CTL_SN_LEN; 9336 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9337 sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr; 9338 ctsio->kern_rel_offset = 0; 9339 ctsio->kern_sg_entries = 0; 9340 ctsio->kern_data_len = min(data_len, alloc_len); 9341 ctsio->kern_total_len = ctsio->kern_data_len; 9342 9343 /* 9344 * The control device is always connected. The disk device, on the 9345 * other hand, may not be online all the time. Need to change this 9346 * to figure out whether the disk device is actually online or not. 9347 */ 9348 if (lun != NULL) 9349 sn_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9350 lun->be_lun->lun_type; 9351 else 9352 sn_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9353 9354 sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER; 9355 sn_ptr->length = CTL_SN_LEN; 9356 /* 9357 * If we don't have a LUN, we just leave the serial number as 9358 * all spaces. 9359 */ 9360 if (lun != NULL) { 9361 strncpy((char *)sn_ptr->serial_num, 9362 (char *)lun->be_lun->serial_num, CTL_SN_LEN); 9363 } else 9364 memset(sn_ptr->serial_num, 0x20, CTL_SN_LEN); 9365 9366 ctl_set_success(ctsio); 9367 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9368 ctsio->be_move_done = ctl_config_move_done; 9369 ctl_datamove((union ctl_io *)ctsio); 9370 return (CTL_RETVAL_COMPLETE); 9371 } 9372 9373 9374 /* 9375 * SCSI VPD page 0x86, the Extended INQUIRY Data page. 9376 */ 9377 static int 9378 ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len) 9379 { 9380 struct ctl_lun *lun = CTL_LUN(ctsio); 9381 struct scsi_vpd_extended_inquiry_data *eid_ptr; 9382 int data_len; 9383 9384 data_len = sizeof(struct scsi_vpd_extended_inquiry_data); 9385 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9386 eid_ptr = (struct scsi_vpd_extended_inquiry_data *)ctsio->kern_data_ptr; 9387 ctsio->kern_sg_entries = 0; 9388 ctsio->kern_rel_offset = 0; 9389 ctsio->kern_data_len = min(data_len, alloc_len); 9390 ctsio->kern_total_len = ctsio->kern_data_len; 9391 9392 /* 9393 * The control device is always connected. The disk device, on the 9394 * other hand, may not be online all the time. 9395 */ 9396 if (lun != NULL) 9397 eid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9398 lun->be_lun->lun_type; 9399 else 9400 eid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9401 eid_ptr->page_code = SVPD_EXTENDED_INQUIRY_DATA; 9402 scsi_ulto2b(data_len - 4, eid_ptr->page_length); 9403 /* 9404 * We support head of queue, ordered and simple tags. 9405 */ 9406 eid_ptr->flags2 = SVPD_EID_HEADSUP | SVPD_EID_ORDSUP | SVPD_EID_SIMPSUP; 9407 /* 9408 * Volatile cache supported. 9409 */ 9410 eid_ptr->flags3 = SVPD_EID_V_SUP; 9411 9412 /* 9413 * This means that we clear the REPORTED LUNS DATA HAS CHANGED unit 9414 * attention for a particular IT nexus on all LUNs once we report 9415 * it to that nexus once. This bit is required as of SPC-4. 9416 */ 9417 eid_ptr->flags4 = SVPD_EID_LUICLR; 9418 9419 /* 9420 * We support revert to defaults (RTD) bit in MODE SELECT. 9421 */ 9422 eid_ptr->flags5 = SVPD_EID_RTD_SUP; 9423 9424 /* 9425 * XXX KDM in order to correctly answer this, we would need 9426 * information from the SIM to determine how much sense data it 9427 * can send. So this would really be a path inquiry field, most 9428 * likely. This can be set to a maximum of 252 according to SPC-4, 9429 * but the hardware may or may not be able to support that much. 9430 * 0 just means that the maximum sense data length is not reported. 9431 */ 9432 eid_ptr->max_sense_length = 0; 9433 9434 ctl_set_success(ctsio); 9435 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9436 ctsio->be_move_done = ctl_config_move_done; 9437 ctl_datamove((union ctl_io *)ctsio); 9438 return (CTL_RETVAL_COMPLETE); 9439 } 9440 9441 static int 9442 ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len) 9443 { 9444 struct ctl_lun *lun = CTL_LUN(ctsio); 9445 struct scsi_vpd_mode_page_policy *mpp_ptr; 9446 int data_len; 9447 9448 data_len = sizeof(struct scsi_vpd_mode_page_policy) + 9449 sizeof(struct scsi_vpd_mode_page_policy_descr); 9450 9451 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9452 mpp_ptr = (struct scsi_vpd_mode_page_policy *)ctsio->kern_data_ptr; 9453 ctsio->kern_rel_offset = 0; 9454 ctsio->kern_sg_entries = 0; 9455 ctsio->kern_data_len = min(data_len, alloc_len); 9456 ctsio->kern_total_len = ctsio->kern_data_len; 9457 9458 /* 9459 * The control device is always connected. The disk device, on the 9460 * other hand, may not be online all the time. 9461 */ 9462 if (lun != NULL) 9463 mpp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9464 lun->be_lun->lun_type; 9465 else 9466 mpp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9467 mpp_ptr->page_code = SVPD_MODE_PAGE_POLICY; 9468 scsi_ulto2b(data_len - 4, mpp_ptr->page_length); 9469 mpp_ptr->descr[0].page_code = 0x3f; 9470 mpp_ptr->descr[0].subpage_code = 0xff; 9471 mpp_ptr->descr[0].policy = SVPD_MPP_SHARED; 9472 9473 ctl_set_success(ctsio); 9474 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9475 ctsio->be_move_done = ctl_config_move_done; 9476 ctl_datamove((union ctl_io *)ctsio); 9477 return (CTL_RETVAL_COMPLETE); 9478 } 9479 9480 /* 9481 * SCSI VPD page 0x83, the Device Identification page. 9482 */ 9483 static int 9484 ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len) 9485 { 9486 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9487 struct ctl_port *port = CTL_PORT(ctsio); 9488 struct ctl_lun *lun = CTL_LUN(ctsio); 9489 struct scsi_vpd_device_id *devid_ptr; 9490 struct scsi_vpd_id_descriptor *desc; 9491 int data_len, g; 9492 uint8_t proto; 9493 9494 data_len = sizeof(struct scsi_vpd_device_id) + 9495 sizeof(struct scsi_vpd_id_descriptor) + 9496 sizeof(struct scsi_vpd_id_rel_trgt_port_id) + 9497 sizeof(struct scsi_vpd_id_descriptor) + 9498 sizeof(struct scsi_vpd_id_trgt_port_grp_id); 9499 if (lun && lun->lun_devid) 9500 data_len += lun->lun_devid->len; 9501 if (port && port->port_devid) 9502 data_len += port->port_devid->len; 9503 if (port && port->target_devid) 9504 data_len += port->target_devid->len; 9505 9506 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9507 devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr; 9508 ctsio->kern_sg_entries = 0; 9509 ctsio->kern_rel_offset = 0; 9510 ctsio->kern_sg_entries = 0; 9511 ctsio->kern_data_len = min(data_len, alloc_len); 9512 ctsio->kern_total_len = ctsio->kern_data_len; 9513 9514 /* 9515 * The control device is always connected. The disk device, on the 9516 * other hand, may not be online all the time. 9517 */ 9518 if (lun != NULL) 9519 devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9520 lun->be_lun->lun_type; 9521 else 9522 devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9523 devid_ptr->page_code = SVPD_DEVICE_ID; 9524 scsi_ulto2b(data_len - 4, devid_ptr->length); 9525 9526 if (port && port->port_type == CTL_PORT_FC) 9527 proto = SCSI_PROTO_FC << 4; 9528 else if (port && port->port_type == CTL_PORT_ISCSI) 9529 proto = SCSI_PROTO_ISCSI << 4; 9530 else 9531 proto = SCSI_PROTO_SPI << 4; 9532 desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list; 9533 9534 /* 9535 * We're using a LUN association here. i.e., this device ID is a 9536 * per-LUN identifier. 9537 */ 9538 if (lun && lun->lun_devid) { 9539 memcpy(desc, lun->lun_devid->data, lun->lun_devid->len); 9540 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9541 lun->lun_devid->len); 9542 } 9543 9544 /* 9545 * This is for the WWPN which is a port association. 9546 */ 9547 if (port && port->port_devid) { 9548 memcpy(desc, port->port_devid->data, port->port_devid->len); 9549 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9550 port->port_devid->len); 9551 } 9552 9553 /* 9554 * This is for the Relative Target Port(type 4h) identifier 9555 */ 9556 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9557 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9558 SVPD_ID_TYPE_RELTARG; 9559 desc->length = 4; 9560 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port, &desc->identifier[2]); 9561 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9562 sizeof(struct scsi_vpd_id_rel_trgt_port_id)); 9563 9564 /* 9565 * This is for the Target Port Group(type 5h) identifier 9566 */ 9567 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9568 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9569 SVPD_ID_TYPE_TPORTGRP; 9570 desc->length = 4; 9571 if (softc->is_single || 9572 (port && port->status & CTL_PORT_STATUS_HA_SHARED)) 9573 g = 1; 9574 else 9575 g = 2 + ctsio->io_hdr.nexus.targ_port / softc->port_cnt; 9576 scsi_ulto2b(g, &desc->identifier[2]); 9577 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9578 sizeof(struct scsi_vpd_id_trgt_port_grp_id)); 9579 9580 /* 9581 * This is for the Target identifier 9582 */ 9583 if (port && port->target_devid) { 9584 memcpy(desc, port->target_devid->data, port->target_devid->len); 9585 } 9586 9587 ctl_set_success(ctsio); 9588 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9589 ctsio->be_move_done = ctl_config_move_done; 9590 ctl_datamove((union ctl_io *)ctsio); 9591 return (CTL_RETVAL_COMPLETE); 9592 } 9593 9594 static int 9595 ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len) 9596 { 9597 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9598 struct ctl_lun *lun = CTL_LUN(ctsio); 9599 struct scsi_vpd_scsi_ports *sp; 9600 struct scsi_vpd_port_designation *pd; 9601 struct scsi_vpd_port_designation_cont *pdc; 9602 struct ctl_port *port; 9603 int data_len, num_target_ports, iid_len, id_len; 9604 9605 num_target_ports = 0; 9606 iid_len = 0; 9607 id_len = 0; 9608 mtx_lock(&softc->ctl_lock); 9609 STAILQ_FOREACH(port, &softc->port_list, links) { 9610 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 9611 continue; 9612 if (lun != NULL && 9613 ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 9614 continue; 9615 num_target_ports++; 9616 if (port->init_devid) 9617 iid_len += port->init_devid->len; 9618 if (port->port_devid) 9619 id_len += port->port_devid->len; 9620 } 9621 mtx_unlock(&softc->ctl_lock); 9622 9623 data_len = sizeof(struct scsi_vpd_scsi_ports) + 9624 num_target_ports * (sizeof(struct scsi_vpd_port_designation) + 9625 sizeof(struct scsi_vpd_port_designation_cont)) + iid_len + id_len; 9626 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9627 sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr; 9628 ctsio->kern_sg_entries = 0; 9629 ctsio->kern_rel_offset = 0; 9630 ctsio->kern_sg_entries = 0; 9631 ctsio->kern_data_len = min(data_len, alloc_len); 9632 ctsio->kern_total_len = ctsio->kern_data_len; 9633 9634 /* 9635 * The control device is always connected. The disk device, on the 9636 * other hand, may not be online all the time. Need to change this 9637 * to figure out whether the disk device is actually online or not. 9638 */ 9639 if (lun != NULL) 9640 sp->device = (SID_QUAL_LU_CONNECTED << 5) | 9641 lun->be_lun->lun_type; 9642 else 9643 sp->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9644 9645 sp->page_code = SVPD_SCSI_PORTS; 9646 scsi_ulto2b(data_len - sizeof(struct scsi_vpd_scsi_ports), 9647 sp->page_length); 9648 pd = &sp->design[0]; 9649 9650 mtx_lock(&softc->ctl_lock); 9651 STAILQ_FOREACH(port, &softc->port_list, links) { 9652 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 9653 continue; 9654 if (lun != NULL && 9655 ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 9656 continue; 9657 scsi_ulto2b(port->targ_port, pd->relative_port_id); 9658 if (port->init_devid) { 9659 iid_len = port->init_devid->len; 9660 memcpy(pd->initiator_transportid, 9661 port->init_devid->data, port->init_devid->len); 9662 } else 9663 iid_len = 0; 9664 scsi_ulto2b(iid_len, pd->initiator_transportid_length); 9665 pdc = (struct scsi_vpd_port_designation_cont *) 9666 (&pd->initiator_transportid[iid_len]); 9667 if (port->port_devid) { 9668 id_len = port->port_devid->len; 9669 memcpy(pdc->target_port_descriptors, 9670 port->port_devid->data, port->port_devid->len); 9671 } else 9672 id_len = 0; 9673 scsi_ulto2b(id_len, pdc->target_port_descriptors_length); 9674 pd = (struct scsi_vpd_port_designation *) 9675 ((uint8_t *)pdc->target_port_descriptors + id_len); 9676 } 9677 mtx_unlock(&softc->ctl_lock); 9678 9679 ctl_set_success(ctsio); 9680 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9681 ctsio->be_move_done = ctl_config_move_done; 9682 ctl_datamove((union ctl_io *)ctsio); 9683 return (CTL_RETVAL_COMPLETE); 9684 } 9685 9686 static int 9687 ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len) 9688 { 9689 struct ctl_lun *lun = CTL_LUN(ctsio); 9690 struct scsi_vpd_block_limits *bl_ptr; 9691 uint64_t ival; 9692 9693 ctsio->kern_data_ptr = malloc(sizeof(*bl_ptr), M_CTL, M_WAITOK | M_ZERO); 9694 bl_ptr = (struct scsi_vpd_block_limits *)ctsio->kern_data_ptr; 9695 ctsio->kern_sg_entries = 0; 9696 ctsio->kern_rel_offset = 0; 9697 ctsio->kern_sg_entries = 0; 9698 ctsio->kern_data_len = min(sizeof(*bl_ptr), alloc_len); 9699 ctsio->kern_total_len = ctsio->kern_data_len; 9700 9701 /* 9702 * The control device is always connected. The disk device, on the 9703 * other hand, may not be online all the time. Need to change this 9704 * to figure out whether the disk device is actually online or not. 9705 */ 9706 if (lun != NULL) 9707 bl_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9708 lun->be_lun->lun_type; 9709 else 9710 bl_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9711 9712 bl_ptr->page_code = SVPD_BLOCK_LIMITS; 9713 scsi_ulto2b(sizeof(*bl_ptr) - 4, bl_ptr->page_length); 9714 bl_ptr->max_cmp_write_len = 0xff; 9715 scsi_ulto4b(0xffffffff, bl_ptr->max_txfer_len); 9716 if (lun != NULL) { 9717 scsi_ulto4b(lun->be_lun->opttxferlen, bl_ptr->opt_txfer_len); 9718 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 9719 ival = 0xffffffff; 9720 ctl_get_opt_number(&lun->be_lun->options, 9721 "unmap_max_lba", &ival); 9722 scsi_ulto4b(ival, bl_ptr->max_unmap_lba_cnt); 9723 ival = 0xffffffff; 9724 ctl_get_opt_number(&lun->be_lun->options, 9725 "unmap_max_descr", &ival); 9726 scsi_ulto4b(ival, bl_ptr->max_unmap_blk_cnt); 9727 if (lun->be_lun->ublockexp != 0) { 9728 scsi_ulto4b((1 << lun->be_lun->ublockexp), 9729 bl_ptr->opt_unmap_grain); 9730 scsi_ulto4b(0x80000000 | lun->be_lun->ublockoff, 9731 bl_ptr->unmap_grain_align); 9732 } 9733 } 9734 scsi_ulto4b(lun->be_lun->atomicblock, 9735 bl_ptr->max_atomic_transfer_length); 9736 scsi_ulto4b(0, bl_ptr->atomic_alignment); 9737 scsi_ulto4b(0, bl_ptr->atomic_transfer_length_granularity); 9738 scsi_ulto4b(0, bl_ptr->max_atomic_transfer_length_with_atomic_boundary); 9739 scsi_ulto4b(0, bl_ptr->max_atomic_boundary_size); 9740 ival = UINT64_MAX; 9741 ctl_get_opt_number(&lun->be_lun->options, "write_same_max_lba", &ival); 9742 scsi_u64to8b(ival, bl_ptr->max_write_same_length); 9743 } 9744 9745 ctl_set_success(ctsio); 9746 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9747 ctsio->be_move_done = ctl_config_move_done; 9748 ctl_datamove((union ctl_io *)ctsio); 9749 return (CTL_RETVAL_COMPLETE); 9750 } 9751 9752 static int 9753 ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len) 9754 { 9755 struct ctl_lun *lun = CTL_LUN(ctsio); 9756 struct scsi_vpd_block_device_characteristics *bdc_ptr; 9757 const char *value; 9758 u_int i; 9759 9760 ctsio->kern_data_ptr = malloc(sizeof(*bdc_ptr), M_CTL, M_WAITOK | M_ZERO); 9761 bdc_ptr = (struct scsi_vpd_block_device_characteristics *)ctsio->kern_data_ptr; 9762 ctsio->kern_sg_entries = 0; 9763 ctsio->kern_rel_offset = 0; 9764 ctsio->kern_data_len = min(sizeof(*bdc_ptr), alloc_len); 9765 ctsio->kern_total_len = ctsio->kern_data_len; 9766 9767 /* 9768 * The control device is always connected. The disk device, on the 9769 * other hand, may not be online all the time. Need to change this 9770 * to figure out whether the disk device is actually online or not. 9771 */ 9772 if (lun != NULL) 9773 bdc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9774 lun->be_lun->lun_type; 9775 else 9776 bdc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9777 bdc_ptr->page_code = SVPD_BDC; 9778 scsi_ulto2b(sizeof(*bdc_ptr) - 4, bdc_ptr->page_length); 9779 if (lun != NULL && 9780 (value = ctl_get_opt(&lun->be_lun->options, "rpm")) != NULL) 9781 i = strtol(value, NULL, 0); 9782 else 9783 i = CTL_DEFAULT_ROTATION_RATE; 9784 scsi_ulto2b(i, bdc_ptr->medium_rotation_rate); 9785 if (lun != NULL && 9786 (value = ctl_get_opt(&lun->be_lun->options, "formfactor")) != NULL) 9787 i = strtol(value, NULL, 0); 9788 else 9789 i = 0; 9790 bdc_ptr->wab_wac_ff = (i & 0x0f); 9791 bdc_ptr->flags = SVPD_FUAB | SVPD_VBULS; 9792 9793 ctl_set_success(ctsio); 9794 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9795 ctsio->be_move_done = ctl_config_move_done; 9796 ctl_datamove((union ctl_io *)ctsio); 9797 return (CTL_RETVAL_COMPLETE); 9798 } 9799 9800 static int 9801 ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len) 9802 { 9803 struct ctl_lun *lun = CTL_LUN(ctsio); 9804 struct scsi_vpd_logical_block_prov *lbp_ptr; 9805 const char *value; 9806 9807 ctsio->kern_data_ptr = malloc(sizeof(*lbp_ptr), M_CTL, M_WAITOK | M_ZERO); 9808 lbp_ptr = (struct scsi_vpd_logical_block_prov *)ctsio->kern_data_ptr; 9809 ctsio->kern_sg_entries = 0; 9810 ctsio->kern_rel_offset = 0; 9811 ctsio->kern_data_len = min(sizeof(*lbp_ptr), alloc_len); 9812 ctsio->kern_total_len = ctsio->kern_data_len; 9813 9814 /* 9815 * The control device is always connected. The disk device, on the 9816 * other hand, may not be online all the time. Need to change this 9817 * to figure out whether the disk device is actually online or not. 9818 */ 9819 if (lun != NULL) 9820 lbp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9821 lun->be_lun->lun_type; 9822 else 9823 lbp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9824 9825 lbp_ptr->page_code = SVPD_LBP; 9826 scsi_ulto2b(sizeof(*lbp_ptr) - 4, lbp_ptr->page_length); 9827 lbp_ptr->threshold_exponent = CTL_LBP_EXPONENT; 9828 if (lun != NULL && lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 9829 lbp_ptr->flags = SVPD_LBP_UNMAP | SVPD_LBP_WS16 | 9830 SVPD_LBP_WS10 | SVPD_LBP_RZ | SVPD_LBP_ANC_SUP; 9831 value = ctl_get_opt(&lun->be_lun->options, "provisioning_type"); 9832 if (value != NULL) { 9833 if (strcmp(value, "resource") == 0) 9834 lbp_ptr->prov_type = SVPD_LBP_RESOURCE; 9835 else if (strcmp(value, "thin") == 0) 9836 lbp_ptr->prov_type = SVPD_LBP_THIN; 9837 } else 9838 lbp_ptr->prov_type = SVPD_LBP_THIN; 9839 } 9840 9841 ctl_set_success(ctsio); 9842 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9843 ctsio->be_move_done = ctl_config_move_done; 9844 ctl_datamove((union ctl_io *)ctsio); 9845 return (CTL_RETVAL_COMPLETE); 9846 } 9847 9848 /* 9849 * INQUIRY with the EVPD bit set. 9850 */ 9851 static int 9852 ctl_inquiry_evpd(struct ctl_scsiio *ctsio) 9853 { 9854 struct ctl_lun *lun = CTL_LUN(ctsio); 9855 struct scsi_inquiry *cdb; 9856 int alloc_len, retval; 9857 9858 cdb = (struct scsi_inquiry *)ctsio->cdb; 9859 alloc_len = scsi_2btoul(cdb->length); 9860 9861 switch (cdb->page_code) { 9862 case SVPD_SUPPORTED_PAGES: 9863 retval = ctl_inquiry_evpd_supported(ctsio, alloc_len); 9864 break; 9865 case SVPD_UNIT_SERIAL_NUMBER: 9866 retval = ctl_inquiry_evpd_serial(ctsio, alloc_len); 9867 break; 9868 case SVPD_DEVICE_ID: 9869 retval = ctl_inquiry_evpd_devid(ctsio, alloc_len); 9870 break; 9871 case SVPD_EXTENDED_INQUIRY_DATA: 9872 retval = ctl_inquiry_evpd_eid(ctsio, alloc_len); 9873 break; 9874 case SVPD_MODE_PAGE_POLICY: 9875 retval = ctl_inquiry_evpd_mpp(ctsio, alloc_len); 9876 break; 9877 case SVPD_SCSI_PORTS: 9878 retval = ctl_inquiry_evpd_scsi_ports(ctsio, alloc_len); 9879 break; 9880 case SVPD_SCSI_TPC: 9881 retval = ctl_inquiry_evpd_tpc(ctsio, alloc_len); 9882 break; 9883 case SVPD_BLOCK_LIMITS: 9884 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 9885 goto err; 9886 retval = ctl_inquiry_evpd_block_limits(ctsio, alloc_len); 9887 break; 9888 case SVPD_BDC: 9889 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 9890 goto err; 9891 retval = ctl_inquiry_evpd_bdc(ctsio, alloc_len); 9892 break; 9893 case SVPD_LBP: 9894 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 9895 goto err; 9896 retval = ctl_inquiry_evpd_lbp(ctsio, alloc_len); 9897 break; 9898 default: 9899 err: 9900 ctl_set_invalid_field(ctsio, 9901 /*sks_valid*/ 1, 9902 /*command*/ 1, 9903 /*field*/ 2, 9904 /*bit_valid*/ 0, 9905 /*bit*/ 0); 9906 ctl_done((union ctl_io *)ctsio); 9907 retval = CTL_RETVAL_COMPLETE; 9908 break; 9909 } 9910 9911 return (retval); 9912 } 9913 9914 /* 9915 * Standard INQUIRY data. 9916 */ 9917 static int 9918 ctl_inquiry_std(struct ctl_scsiio *ctsio) 9919 { 9920 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9921 struct ctl_port *port = CTL_PORT(ctsio); 9922 struct ctl_lun *lun = CTL_LUN(ctsio); 9923 struct scsi_inquiry_data *inq_ptr; 9924 struct scsi_inquiry *cdb; 9925 char *val; 9926 uint32_t alloc_len, data_len; 9927 ctl_port_type port_type; 9928 9929 port_type = port->port_type; 9930 if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL) 9931 port_type = CTL_PORT_SCSI; 9932 9933 cdb = (struct scsi_inquiry *)ctsio->cdb; 9934 alloc_len = scsi_2btoul(cdb->length); 9935 9936 /* 9937 * We malloc the full inquiry data size here and fill it 9938 * in. If the user only asks for less, we'll give him 9939 * that much. 9940 */ 9941 data_len = offsetof(struct scsi_inquiry_data, vendor_specific1); 9942 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9943 inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr; 9944 ctsio->kern_sg_entries = 0; 9945 ctsio->kern_rel_offset = 0; 9946 ctsio->kern_data_len = min(data_len, alloc_len); 9947 ctsio->kern_total_len = ctsio->kern_data_len; 9948 9949 if (lun != NULL) { 9950 if ((lun->flags & CTL_LUN_PRIMARY_SC) || 9951 softc->ha_link >= CTL_HA_LINK_UNKNOWN) { 9952 inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9953 lun->be_lun->lun_type; 9954 } else { 9955 inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | 9956 lun->be_lun->lun_type; 9957 } 9958 if (lun->flags & CTL_LUN_REMOVABLE) 9959 inq_ptr->dev_qual2 |= SID_RMB; 9960 } else 9961 inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE; 9962 9963 /* RMB in byte 2 is 0 */ 9964 inq_ptr->version = SCSI_REV_SPC5; 9965 9966 /* 9967 * According to SAM-3, even if a device only supports a single 9968 * level of LUN addressing, it should still set the HISUP bit: 9969 * 9970 * 4.9.1 Logical unit numbers overview 9971 * 9972 * All logical unit number formats described in this standard are 9973 * hierarchical in structure even when only a single level in that 9974 * hierarchy is used. The HISUP bit shall be set to one in the 9975 * standard INQUIRY data (see SPC-2) when any logical unit number 9976 * format described in this standard is used. Non-hierarchical 9977 * formats are outside the scope of this standard. 9978 * 9979 * Therefore we set the HiSup bit here. 9980 * 9981 * The response format is 2, per SPC-3. 9982 */ 9983 inq_ptr->response_format = SID_HiSup | 2; 9984 9985 inq_ptr->additional_length = data_len - 9986 (offsetof(struct scsi_inquiry_data, additional_length) + 1); 9987 CTL_DEBUG_PRINT(("additional_length = %d\n", 9988 inq_ptr->additional_length)); 9989 9990 inq_ptr->spc3_flags = SPC3_SID_3PC | SPC3_SID_TPGS_IMPLICIT; 9991 if (port_type == CTL_PORT_SCSI) 9992 inq_ptr->spc2_flags = SPC2_SID_ADDR16; 9993 inq_ptr->spc2_flags |= SPC2_SID_MultiP; 9994 inq_ptr->flags = SID_CmdQue; 9995 if (port_type == CTL_PORT_SCSI) 9996 inq_ptr->flags |= SID_WBus16 | SID_Sync; 9997 9998 /* 9999 * Per SPC-3, unused bytes in ASCII strings are filled with spaces. 10000 * We have 8 bytes for the vendor name, and 16 bytes for the device 10001 * name and 4 bytes for the revision. 10002 */ 10003 if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options, 10004 "vendor")) == NULL) { 10005 strncpy(inq_ptr->vendor, CTL_VENDOR, sizeof(inq_ptr->vendor)); 10006 } else { 10007 memset(inq_ptr->vendor, ' ', sizeof(inq_ptr->vendor)); 10008 strncpy(inq_ptr->vendor, val, 10009 min(sizeof(inq_ptr->vendor), strlen(val))); 10010 } 10011 if (lun == NULL) { 10012 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10013 sizeof(inq_ptr->product)); 10014 } else if ((val = ctl_get_opt(&lun->be_lun->options, "product")) == NULL) { 10015 switch (lun->be_lun->lun_type) { 10016 case T_DIRECT: 10017 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10018 sizeof(inq_ptr->product)); 10019 break; 10020 case T_PROCESSOR: 10021 strncpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT, 10022 sizeof(inq_ptr->product)); 10023 break; 10024 case T_CDROM: 10025 strncpy(inq_ptr->product, CTL_CDROM_PRODUCT, 10026 sizeof(inq_ptr->product)); 10027 break; 10028 default: 10029 strncpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT, 10030 sizeof(inq_ptr->product)); 10031 break; 10032 } 10033 } else { 10034 memset(inq_ptr->product, ' ', sizeof(inq_ptr->product)); 10035 strncpy(inq_ptr->product, val, 10036 min(sizeof(inq_ptr->product), strlen(val))); 10037 } 10038 10039 /* 10040 * XXX make this a macro somewhere so it automatically gets 10041 * incremented when we make changes. 10042 */ 10043 if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options, 10044 "revision")) == NULL) { 10045 strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision)); 10046 } else { 10047 memset(inq_ptr->revision, ' ', sizeof(inq_ptr->revision)); 10048 strncpy(inq_ptr->revision, val, 10049 min(sizeof(inq_ptr->revision), strlen(val))); 10050 } 10051 10052 /* 10053 * For parallel SCSI, we support double transition and single 10054 * transition clocking. We also support QAS (Quick Arbitration 10055 * and Selection) and Information Unit transfers on both the 10056 * control and array devices. 10057 */ 10058 if (port_type == CTL_PORT_SCSI) 10059 inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS | 10060 SID_SPI_IUS; 10061 10062 /* SAM-6 (no version claimed) */ 10063 scsi_ulto2b(0x00C0, inq_ptr->version1); 10064 /* SPC-5 (no version claimed) */ 10065 scsi_ulto2b(0x05C0, inq_ptr->version2); 10066 if (port_type == CTL_PORT_FC) { 10067 /* FCP-2 ANSI INCITS.350:2003 */ 10068 scsi_ulto2b(0x0917, inq_ptr->version3); 10069 } else if (port_type == CTL_PORT_SCSI) { 10070 /* SPI-4 ANSI INCITS.362:200x */ 10071 scsi_ulto2b(0x0B56, inq_ptr->version3); 10072 } else if (port_type == CTL_PORT_ISCSI) { 10073 /* iSCSI (no version claimed) */ 10074 scsi_ulto2b(0x0960, inq_ptr->version3); 10075 } else if (port_type == CTL_PORT_SAS) { 10076 /* SAS (no version claimed) */ 10077 scsi_ulto2b(0x0BE0, inq_ptr->version3); 10078 } 10079 10080 if (lun == NULL) { 10081 /* SBC-4 (no version claimed) */ 10082 scsi_ulto2b(0x0600, inq_ptr->version4); 10083 } else { 10084 switch (lun->be_lun->lun_type) { 10085 case T_DIRECT: 10086 /* SBC-4 (no version claimed) */ 10087 scsi_ulto2b(0x0600, inq_ptr->version4); 10088 break; 10089 case T_PROCESSOR: 10090 break; 10091 case T_CDROM: 10092 /* MMC-6 (no version claimed) */ 10093 scsi_ulto2b(0x04E0, inq_ptr->version4); 10094 break; 10095 default: 10096 break; 10097 } 10098 } 10099 10100 ctl_set_success(ctsio); 10101 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10102 ctsio->be_move_done = ctl_config_move_done; 10103 ctl_datamove((union ctl_io *)ctsio); 10104 return (CTL_RETVAL_COMPLETE); 10105 } 10106 10107 int 10108 ctl_inquiry(struct ctl_scsiio *ctsio) 10109 { 10110 struct scsi_inquiry *cdb; 10111 int retval; 10112 10113 CTL_DEBUG_PRINT(("ctl_inquiry\n")); 10114 10115 cdb = (struct scsi_inquiry *)ctsio->cdb; 10116 if (cdb->byte2 & SI_EVPD) 10117 retval = ctl_inquiry_evpd(ctsio); 10118 else if (cdb->page_code == 0) 10119 retval = ctl_inquiry_std(ctsio); 10120 else { 10121 ctl_set_invalid_field(ctsio, 10122 /*sks_valid*/ 1, 10123 /*command*/ 1, 10124 /*field*/ 2, 10125 /*bit_valid*/ 0, 10126 /*bit*/ 0); 10127 ctl_done((union ctl_io *)ctsio); 10128 return (CTL_RETVAL_COMPLETE); 10129 } 10130 10131 return (retval); 10132 } 10133 10134 int 10135 ctl_get_config(struct ctl_scsiio *ctsio) 10136 { 10137 struct ctl_lun *lun = CTL_LUN(ctsio); 10138 struct scsi_get_config_header *hdr; 10139 struct scsi_get_config_feature *feature; 10140 struct scsi_get_config *cdb; 10141 uint32_t alloc_len, data_len; 10142 int rt, starting; 10143 10144 cdb = (struct scsi_get_config *)ctsio->cdb; 10145 rt = (cdb->rt & SGC_RT_MASK); 10146 starting = scsi_2btoul(cdb->starting_feature); 10147 alloc_len = scsi_2btoul(cdb->length); 10148 10149 data_len = sizeof(struct scsi_get_config_header) + 10150 sizeof(struct scsi_get_config_feature) + 8 + 10151 sizeof(struct scsi_get_config_feature) + 8 + 10152 sizeof(struct scsi_get_config_feature) + 4 + 10153 sizeof(struct scsi_get_config_feature) + 4 + 10154 sizeof(struct scsi_get_config_feature) + 8 + 10155 sizeof(struct scsi_get_config_feature) + 10156 sizeof(struct scsi_get_config_feature) + 4 + 10157 sizeof(struct scsi_get_config_feature) + 4 + 10158 sizeof(struct scsi_get_config_feature) + 4 + 10159 sizeof(struct scsi_get_config_feature) + 4 + 10160 sizeof(struct scsi_get_config_feature) + 4 + 10161 sizeof(struct scsi_get_config_feature) + 4; 10162 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10163 ctsio->kern_sg_entries = 0; 10164 ctsio->kern_rel_offset = 0; 10165 10166 hdr = (struct scsi_get_config_header *)ctsio->kern_data_ptr; 10167 if (lun->flags & CTL_LUN_NO_MEDIA) 10168 scsi_ulto2b(0x0000, hdr->current_profile); 10169 else 10170 scsi_ulto2b(0x0010, hdr->current_profile); 10171 feature = (struct scsi_get_config_feature *)(hdr + 1); 10172 10173 if (starting > 0x003b) 10174 goto done; 10175 if (starting > 0x003a) 10176 goto f3b; 10177 if (starting > 0x002b) 10178 goto f3a; 10179 if (starting > 0x002a) 10180 goto f2b; 10181 if (starting > 0x001f) 10182 goto f2a; 10183 if (starting > 0x001e) 10184 goto f1f; 10185 if (starting > 0x001d) 10186 goto f1e; 10187 if (starting > 0x0010) 10188 goto f1d; 10189 if (starting > 0x0003) 10190 goto f10; 10191 if (starting > 0x0002) 10192 goto f3; 10193 if (starting > 0x0001) 10194 goto f2; 10195 if (starting > 0x0000) 10196 goto f1; 10197 10198 /* Profile List */ 10199 scsi_ulto2b(0x0000, feature->feature_code); 10200 feature->flags = SGC_F_PERSISTENT | SGC_F_CURRENT; 10201 feature->add_length = 8; 10202 scsi_ulto2b(0x0008, &feature->feature_data[0]); /* CD-ROM */ 10203 feature->feature_data[2] = 0x00; 10204 scsi_ulto2b(0x0010, &feature->feature_data[4]); /* DVD-ROM */ 10205 feature->feature_data[6] = 0x01; 10206 feature = (struct scsi_get_config_feature *) 10207 &feature->feature_data[feature->add_length]; 10208 10209 f1: /* Core */ 10210 scsi_ulto2b(0x0001, feature->feature_code); 10211 feature->flags = 0x08 | SGC_F_PERSISTENT | SGC_F_CURRENT; 10212 feature->add_length = 8; 10213 scsi_ulto4b(0x00000000, &feature->feature_data[0]); 10214 feature->feature_data[4] = 0x03; 10215 feature = (struct scsi_get_config_feature *) 10216 &feature->feature_data[feature->add_length]; 10217 10218 f2: /* Morphing */ 10219 scsi_ulto2b(0x0002, feature->feature_code); 10220 feature->flags = 0x04 | SGC_F_PERSISTENT | SGC_F_CURRENT; 10221 feature->add_length = 4; 10222 feature->feature_data[0] = 0x02; 10223 feature = (struct scsi_get_config_feature *) 10224 &feature->feature_data[feature->add_length]; 10225 10226 f3: /* Removable Medium */ 10227 scsi_ulto2b(0x0003, feature->feature_code); 10228 feature->flags = 0x04 | SGC_F_PERSISTENT | SGC_F_CURRENT; 10229 feature->add_length = 4; 10230 feature->feature_data[0] = 0x39; 10231 feature = (struct scsi_get_config_feature *) 10232 &feature->feature_data[feature->add_length]; 10233 10234 if (rt == SGC_RT_CURRENT && (lun->flags & CTL_LUN_NO_MEDIA)) 10235 goto done; 10236 10237 f10: /* Random Read */ 10238 scsi_ulto2b(0x0010, feature->feature_code); 10239 feature->flags = 0x00; 10240 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10241 feature->flags |= SGC_F_CURRENT; 10242 feature->add_length = 8; 10243 scsi_ulto4b(lun->be_lun->blocksize, &feature->feature_data[0]); 10244 scsi_ulto2b(1, &feature->feature_data[4]); 10245 feature->feature_data[6] = 0x00; 10246 feature = (struct scsi_get_config_feature *) 10247 &feature->feature_data[feature->add_length]; 10248 10249 f1d: /* Multi-Read */ 10250 scsi_ulto2b(0x001D, feature->feature_code); 10251 feature->flags = 0x00; 10252 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10253 feature->flags |= SGC_F_CURRENT; 10254 feature->add_length = 0; 10255 feature = (struct scsi_get_config_feature *) 10256 &feature->feature_data[feature->add_length]; 10257 10258 f1e: /* CD Read */ 10259 scsi_ulto2b(0x001E, feature->feature_code); 10260 feature->flags = 0x00; 10261 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10262 feature->flags |= SGC_F_CURRENT; 10263 feature->add_length = 4; 10264 feature->feature_data[0] = 0x00; 10265 feature = (struct scsi_get_config_feature *) 10266 &feature->feature_data[feature->add_length]; 10267 10268 f1f: /* DVD Read */ 10269 scsi_ulto2b(0x001F, feature->feature_code); 10270 feature->flags = 0x08; 10271 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10272 feature->flags |= SGC_F_CURRENT; 10273 feature->add_length = 4; 10274 feature->feature_data[0] = 0x01; 10275 feature->feature_data[2] = 0x03; 10276 feature = (struct scsi_get_config_feature *) 10277 &feature->feature_data[feature->add_length]; 10278 10279 f2a: /* DVD+RW */ 10280 scsi_ulto2b(0x002A, feature->feature_code); 10281 feature->flags = 0x04; 10282 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10283 feature->flags |= SGC_F_CURRENT; 10284 feature->add_length = 4; 10285 feature->feature_data[0] = 0x00; 10286 feature->feature_data[1] = 0x00; 10287 feature = (struct scsi_get_config_feature *) 10288 &feature->feature_data[feature->add_length]; 10289 10290 f2b: /* DVD+R */ 10291 scsi_ulto2b(0x002B, feature->feature_code); 10292 feature->flags = 0x00; 10293 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10294 feature->flags |= SGC_F_CURRENT; 10295 feature->add_length = 4; 10296 feature->feature_data[0] = 0x00; 10297 feature = (struct scsi_get_config_feature *) 10298 &feature->feature_data[feature->add_length]; 10299 10300 f3a: /* DVD+RW Dual Layer */ 10301 scsi_ulto2b(0x003A, feature->feature_code); 10302 feature->flags = 0x00; 10303 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10304 feature->flags |= SGC_F_CURRENT; 10305 feature->add_length = 4; 10306 feature->feature_data[0] = 0x00; 10307 feature->feature_data[1] = 0x00; 10308 feature = (struct scsi_get_config_feature *) 10309 &feature->feature_data[feature->add_length]; 10310 10311 f3b: /* DVD+R Dual Layer */ 10312 scsi_ulto2b(0x003B, feature->feature_code); 10313 feature->flags = 0x00; 10314 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10315 feature->flags |= SGC_F_CURRENT; 10316 feature->add_length = 4; 10317 feature->feature_data[0] = 0x00; 10318 feature = (struct scsi_get_config_feature *) 10319 &feature->feature_data[feature->add_length]; 10320 10321 done: 10322 data_len = (uint8_t *)feature - (uint8_t *)hdr; 10323 if (rt == SGC_RT_SPECIFIC && data_len > 4) { 10324 feature = (struct scsi_get_config_feature *)(hdr + 1); 10325 if (scsi_2btoul(feature->feature_code) == starting) 10326 feature = (struct scsi_get_config_feature *) 10327 &feature->feature_data[feature->add_length]; 10328 data_len = (uint8_t *)feature - (uint8_t *)hdr; 10329 } 10330 scsi_ulto4b(data_len - 4, hdr->data_length); 10331 ctsio->kern_data_len = min(data_len, alloc_len); 10332 ctsio->kern_total_len = ctsio->kern_data_len; 10333 10334 ctl_set_success(ctsio); 10335 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10336 ctsio->be_move_done = ctl_config_move_done; 10337 ctl_datamove((union ctl_io *)ctsio); 10338 return (CTL_RETVAL_COMPLETE); 10339 } 10340 10341 int 10342 ctl_get_event_status(struct ctl_scsiio *ctsio) 10343 { 10344 struct scsi_get_event_status_header *hdr; 10345 struct scsi_get_event_status *cdb; 10346 uint32_t alloc_len, data_len; 10347 int notif_class; 10348 10349 cdb = (struct scsi_get_event_status *)ctsio->cdb; 10350 if ((cdb->byte2 & SGESN_POLLED) == 0) { 10351 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, 10352 /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); 10353 ctl_done((union ctl_io *)ctsio); 10354 return (CTL_RETVAL_COMPLETE); 10355 } 10356 notif_class = cdb->notif_class; 10357 alloc_len = scsi_2btoul(cdb->length); 10358 10359 data_len = sizeof(struct scsi_get_event_status_header); 10360 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10361 ctsio->kern_sg_entries = 0; 10362 ctsio->kern_rel_offset = 0; 10363 ctsio->kern_data_len = min(data_len, alloc_len); 10364 ctsio->kern_total_len = ctsio->kern_data_len; 10365 10366 hdr = (struct scsi_get_event_status_header *)ctsio->kern_data_ptr; 10367 scsi_ulto2b(0, hdr->descr_length); 10368 hdr->nea_class = SGESN_NEA; 10369 hdr->supported_class = 0; 10370 10371 ctl_set_success(ctsio); 10372 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10373 ctsio->be_move_done = ctl_config_move_done; 10374 ctl_datamove((union ctl_io *)ctsio); 10375 return (CTL_RETVAL_COMPLETE); 10376 } 10377 10378 int 10379 ctl_mechanism_status(struct ctl_scsiio *ctsio) 10380 { 10381 struct scsi_mechanism_status_header *hdr; 10382 struct scsi_mechanism_status *cdb; 10383 uint32_t alloc_len, data_len; 10384 10385 cdb = (struct scsi_mechanism_status *)ctsio->cdb; 10386 alloc_len = scsi_2btoul(cdb->length); 10387 10388 data_len = sizeof(struct scsi_mechanism_status_header); 10389 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10390 ctsio->kern_sg_entries = 0; 10391 ctsio->kern_rel_offset = 0; 10392 ctsio->kern_data_len = min(data_len, alloc_len); 10393 ctsio->kern_total_len = ctsio->kern_data_len; 10394 10395 hdr = (struct scsi_mechanism_status_header *)ctsio->kern_data_ptr; 10396 hdr->state1 = 0x00; 10397 hdr->state2 = 0xe0; 10398 scsi_ulto3b(0, hdr->lba); 10399 hdr->slots_num = 0; 10400 scsi_ulto2b(0, hdr->slots_length); 10401 10402 ctl_set_success(ctsio); 10403 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10404 ctsio->be_move_done = ctl_config_move_done; 10405 ctl_datamove((union ctl_io *)ctsio); 10406 return (CTL_RETVAL_COMPLETE); 10407 } 10408 10409 static void 10410 ctl_ultomsf(uint32_t lba, uint8_t *buf) 10411 { 10412 10413 lba += 150; 10414 buf[0] = 0; 10415 buf[1] = bin2bcd((lba / 75) / 60); 10416 buf[2] = bin2bcd((lba / 75) % 60); 10417 buf[3] = bin2bcd(lba % 75); 10418 } 10419 10420 int 10421 ctl_read_toc(struct ctl_scsiio *ctsio) 10422 { 10423 struct ctl_lun *lun = CTL_LUN(ctsio); 10424 struct scsi_read_toc_hdr *hdr; 10425 struct scsi_read_toc_type01_descr *descr; 10426 struct scsi_read_toc *cdb; 10427 uint32_t alloc_len, data_len; 10428 int format, msf; 10429 10430 cdb = (struct scsi_read_toc *)ctsio->cdb; 10431 msf = (cdb->byte2 & CD_MSF) != 0; 10432 format = cdb->format; 10433 alloc_len = scsi_2btoul(cdb->data_len); 10434 10435 data_len = sizeof(struct scsi_read_toc_hdr); 10436 if (format == 0) 10437 data_len += 2 * sizeof(struct scsi_read_toc_type01_descr); 10438 else 10439 data_len += sizeof(struct scsi_read_toc_type01_descr); 10440 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10441 ctsio->kern_sg_entries = 0; 10442 ctsio->kern_rel_offset = 0; 10443 ctsio->kern_data_len = min(data_len, alloc_len); 10444 ctsio->kern_total_len = ctsio->kern_data_len; 10445 10446 hdr = (struct scsi_read_toc_hdr *)ctsio->kern_data_ptr; 10447 if (format == 0) { 10448 scsi_ulto2b(0x12, hdr->data_length); 10449 hdr->first = 1; 10450 hdr->last = 1; 10451 descr = (struct scsi_read_toc_type01_descr *)(hdr + 1); 10452 descr->addr_ctl = 0x14; 10453 descr->track_number = 1; 10454 if (msf) 10455 ctl_ultomsf(0, descr->track_start); 10456 else 10457 scsi_ulto4b(0, descr->track_start); 10458 descr++; 10459 descr->addr_ctl = 0x14; 10460 descr->track_number = 0xaa; 10461 if (msf) 10462 ctl_ultomsf(lun->be_lun->maxlba+1, descr->track_start); 10463 else 10464 scsi_ulto4b(lun->be_lun->maxlba+1, descr->track_start); 10465 } else { 10466 scsi_ulto2b(0x0a, hdr->data_length); 10467 hdr->first = 1; 10468 hdr->last = 1; 10469 descr = (struct scsi_read_toc_type01_descr *)(hdr + 1); 10470 descr->addr_ctl = 0x14; 10471 descr->track_number = 1; 10472 if (msf) 10473 ctl_ultomsf(0, descr->track_start); 10474 else 10475 scsi_ulto4b(0, descr->track_start); 10476 } 10477 10478 ctl_set_success(ctsio); 10479 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10480 ctsio->be_move_done = ctl_config_move_done; 10481 ctl_datamove((union ctl_io *)ctsio); 10482 return (CTL_RETVAL_COMPLETE); 10483 } 10484 10485 /* 10486 * For known CDB types, parse the LBA and length. 10487 */ 10488 static int 10489 ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len) 10490 { 10491 if (io->io_hdr.io_type != CTL_IO_SCSI) 10492 return (1); 10493 10494 switch (io->scsiio.cdb[0]) { 10495 case COMPARE_AND_WRITE: { 10496 struct scsi_compare_and_write *cdb; 10497 10498 cdb = (struct scsi_compare_and_write *)io->scsiio.cdb; 10499 10500 *lba = scsi_8btou64(cdb->addr); 10501 *len = cdb->length; 10502 break; 10503 } 10504 case READ_6: 10505 case WRITE_6: { 10506 struct scsi_rw_6 *cdb; 10507 10508 cdb = (struct scsi_rw_6 *)io->scsiio.cdb; 10509 10510 *lba = scsi_3btoul(cdb->addr); 10511 /* only 5 bits are valid in the most significant address byte */ 10512 *lba &= 0x1fffff; 10513 *len = cdb->length; 10514 break; 10515 } 10516 case READ_10: 10517 case WRITE_10: { 10518 struct scsi_rw_10 *cdb; 10519 10520 cdb = (struct scsi_rw_10 *)io->scsiio.cdb; 10521 10522 *lba = scsi_4btoul(cdb->addr); 10523 *len = scsi_2btoul(cdb->length); 10524 break; 10525 } 10526 case WRITE_VERIFY_10: { 10527 struct scsi_write_verify_10 *cdb; 10528 10529 cdb = (struct scsi_write_verify_10 *)io->scsiio.cdb; 10530 10531 *lba = scsi_4btoul(cdb->addr); 10532 *len = scsi_2btoul(cdb->length); 10533 break; 10534 } 10535 case READ_12: 10536 case WRITE_12: { 10537 struct scsi_rw_12 *cdb; 10538 10539 cdb = (struct scsi_rw_12 *)io->scsiio.cdb; 10540 10541 *lba = scsi_4btoul(cdb->addr); 10542 *len = scsi_4btoul(cdb->length); 10543 break; 10544 } 10545 case WRITE_VERIFY_12: { 10546 struct scsi_write_verify_12 *cdb; 10547 10548 cdb = (struct scsi_write_verify_12 *)io->scsiio.cdb; 10549 10550 *lba = scsi_4btoul(cdb->addr); 10551 *len = scsi_4btoul(cdb->length); 10552 break; 10553 } 10554 case READ_16: 10555 case WRITE_16: { 10556 struct scsi_rw_16 *cdb; 10557 10558 cdb = (struct scsi_rw_16 *)io->scsiio.cdb; 10559 10560 *lba = scsi_8btou64(cdb->addr); 10561 *len = scsi_4btoul(cdb->length); 10562 break; 10563 } 10564 case WRITE_ATOMIC_16: { 10565 struct scsi_write_atomic_16 *cdb; 10566 10567 cdb = (struct scsi_write_atomic_16 *)io->scsiio.cdb; 10568 10569 *lba = scsi_8btou64(cdb->addr); 10570 *len = scsi_2btoul(cdb->length); 10571 break; 10572 } 10573 case WRITE_VERIFY_16: { 10574 struct scsi_write_verify_16 *cdb; 10575 10576 cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb; 10577 10578 *lba = scsi_8btou64(cdb->addr); 10579 *len = scsi_4btoul(cdb->length); 10580 break; 10581 } 10582 case WRITE_SAME_10: { 10583 struct scsi_write_same_10 *cdb; 10584 10585 cdb = (struct scsi_write_same_10 *)io->scsiio.cdb; 10586 10587 *lba = scsi_4btoul(cdb->addr); 10588 *len = scsi_2btoul(cdb->length); 10589 break; 10590 } 10591 case WRITE_SAME_16: { 10592 struct scsi_write_same_16 *cdb; 10593 10594 cdb = (struct scsi_write_same_16 *)io->scsiio.cdb; 10595 10596 *lba = scsi_8btou64(cdb->addr); 10597 *len = scsi_4btoul(cdb->length); 10598 break; 10599 } 10600 case VERIFY_10: { 10601 struct scsi_verify_10 *cdb; 10602 10603 cdb = (struct scsi_verify_10 *)io->scsiio.cdb; 10604 10605 *lba = scsi_4btoul(cdb->addr); 10606 *len = scsi_2btoul(cdb->length); 10607 break; 10608 } 10609 case VERIFY_12: { 10610 struct scsi_verify_12 *cdb; 10611 10612 cdb = (struct scsi_verify_12 *)io->scsiio.cdb; 10613 10614 *lba = scsi_4btoul(cdb->addr); 10615 *len = scsi_4btoul(cdb->length); 10616 break; 10617 } 10618 case VERIFY_16: { 10619 struct scsi_verify_16 *cdb; 10620 10621 cdb = (struct scsi_verify_16 *)io->scsiio.cdb; 10622 10623 *lba = scsi_8btou64(cdb->addr); 10624 *len = scsi_4btoul(cdb->length); 10625 break; 10626 } 10627 case UNMAP: { 10628 *lba = 0; 10629 *len = UINT64_MAX; 10630 break; 10631 } 10632 case SERVICE_ACTION_IN: { /* GET LBA STATUS */ 10633 struct scsi_get_lba_status *cdb; 10634 10635 cdb = (struct scsi_get_lba_status *)io->scsiio.cdb; 10636 *lba = scsi_8btou64(cdb->addr); 10637 *len = UINT32_MAX; 10638 break; 10639 } 10640 default: 10641 return (1); 10642 break; /* NOTREACHED */ 10643 } 10644 10645 return (0); 10646 } 10647 10648 static ctl_action 10649 ctl_extent_check_lba(uint64_t lba1, uint64_t len1, uint64_t lba2, uint64_t len2, 10650 bool seq) 10651 { 10652 uint64_t endlba1, endlba2; 10653 10654 endlba1 = lba1 + len1 - (seq ? 0 : 1); 10655 endlba2 = lba2 + len2 - 1; 10656 10657 if ((endlba1 < lba2) || (endlba2 < lba1)) 10658 return (CTL_ACTION_PASS); 10659 else 10660 return (CTL_ACTION_BLOCK); 10661 } 10662 10663 static int 10664 ctl_extent_check_unmap(union ctl_io *io, uint64_t lba2, uint64_t len2) 10665 { 10666 struct ctl_ptr_len_flags *ptrlen; 10667 struct scsi_unmap_desc *buf, *end, *range; 10668 uint64_t lba; 10669 uint32_t len; 10670 10671 /* If not UNMAP -- go other way. */ 10672 if (io->io_hdr.io_type != CTL_IO_SCSI || 10673 io->scsiio.cdb[0] != UNMAP) 10674 return (CTL_ACTION_ERROR); 10675 10676 /* If UNMAP without data -- block and wait for data. */ 10677 ptrlen = (struct ctl_ptr_len_flags *) 10678 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 10679 if ((io->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0 || 10680 ptrlen->ptr == NULL) 10681 return (CTL_ACTION_BLOCK); 10682 10683 /* UNMAP with data -- check for collision. */ 10684 buf = (struct scsi_unmap_desc *)ptrlen->ptr; 10685 end = buf + ptrlen->len / sizeof(*buf); 10686 for (range = buf; range < end; range++) { 10687 lba = scsi_8btou64(range->lba); 10688 len = scsi_4btoul(range->length); 10689 if ((lba < lba2 + len2) && (lba + len > lba2)) 10690 return (CTL_ACTION_BLOCK); 10691 } 10692 return (CTL_ACTION_PASS); 10693 } 10694 10695 static ctl_action 10696 ctl_extent_check(union ctl_io *io1, union ctl_io *io2, bool seq) 10697 { 10698 uint64_t lba1, lba2; 10699 uint64_t len1, len2; 10700 int retval; 10701 10702 if (ctl_get_lba_len(io2, &lba2, &len2) != 0) 10703 return (CTL_ACTION_ERROR); 10704 10705 retval = ctl_extent_check_unmap(io1, lba2, len2); 10706 if (retval != CTL_ACTION_ERROR) 10707 return (retval); 10708 10709 if (ctl_get_lba_len(io1, &lba1, &len1) != 0) 10710 return (CTL_ACTION_ERROR); 10711 10712 if (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE) 10713 seq = FALSE; 10714 return (ctl_extent_check_lba(lba1, len1, lba2, len2, seq)); 10715 } 10716 10717 static ctl_action 10718 ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2) 10719 { 10720 uint64_t lba1, lba2; 10721 uint64_t len1, len2; 10722 10723 if (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE) 10724 return (CTL_ACTION_PASS); 10725 if (ctl_get_lba_len(io1, &lba1, &len1) != 0) 10726 return (CTL_ACTION_ERROR); 10727 if (ctl_get_lba_len(io2, &lba2, &len2) != 0) 10728 return (CTL_ACTION_ERROR); 10729 10730 if (lba1 + len1 == lba2) 10731 return (CTL_ACTION_BLOCK); 10732 return (CTL_ACTION_PASS); 10733 } 10734 10735 static ctl_action 10736 ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io, 10737 union ctl_io *ooa_io) 10738 { 10739 const struct ctl_cmd_entry *pending_entry, *ooa_entry; 10740 const ctl_serialize_action *serialize_row; 10741 10742 /* 10743 * The initiator attempted multiple untagged commands at the same 10744 * time. Can't do that. 10745 */ 10746 if ((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10747 && (ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10748 && ((pending_io->io_hdr.nexus.targ_port == 10749 ooa_io->io_hdr.nexus.targ_port) 10750 && (pending_io->io_hdr.nexus.initid == 10751 ooa_io->io_hdr.nexus.initid)) 10752 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | 10753 CTL_FLAG_STATUS_SENT)) == 0)) 10754 return (CTL_ACTION_OVERLAP); 10755 10756 /* 10757 * The initiator attempted to send multiple tagged commands with 10758 * the same ID. (It's fine if different initiators have the same 10759 * tag ID.) 10760 * 10761 * Even if all of those conditions are true, we don't kill the I/O 10762 * if the command ahead of us has been aborted. We won't end up 10763 * sending it to the FETD, and it's perfectly legal to resend a 10764 * command with the same tag number as long as the previous 10765 * instance of this tag number has been aborted somehow. 10766 */ 10767 if ((pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 10768 && (ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 10769 && (pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num) 10770 && ((pending_io->io_hdr.nexus.targ_port == 10771 ooa_io->io_hdr.nexus.targ_port) 10772 && (pending_io->io_hdr.nexus.initid == 10773 ooa_io->io_hdr.nexus.initid)) 10774 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | 10775 CTL_FLAG_STATUS_SENT)) == 0)) 10776 return (CTL_ACTION_OVERLAP_TAG); 10777 10778 /* 10779 * If we get a head of queue tag, SAM-3 says that we should 10780 * immediately execute it. 10781 * 10782 * What happens if this command would normally block for some other 10783 * reason? e.g. a request sense with a head of queue tag 10784 * immediately after a write. Normally that would block, but this 10785 * will result in its getting executed immediately... 10786 * 10787 * We currently return "pass" instead of "skip", so we'll end up 10788 * going through the rest of the queue to check for overlapped tags. 10789 * 10790 * XXX KDM check for other types of blockage first?? 10791 */ 10792 if (pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 10793 return (CTL_ACTION_PASS); 10794 10795 /* 10796 * Ordered tags have to block until all items ahead of them 10797 * have completed. If we get called with an ordered tag, we always 10798 * block, if something else is ahead of us in the queue. 10799 */ 10800 if (pending_io->scsiio.tag_type == CTL_TAG_ORDERED) 10801 return (CTL_ACTION_BLOCK); 10802 10803 /* 10804 * Simple tags get blocked until all head of queue and ordered tags 10805 * ahead of them have completed. I'm lumping untagged commands in 10806 * with simple tags here. XXX KDM is that the right thing to do? 10807 */ 10808 if (((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10809 || (pending_io->scsiio.tag_type == CTL_TAG_SIMPLE)) 10810 && ((ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 10811 || (ooa_io->scsiio.tag_type == CTL_TAG_ORDERED))) 10812 return (CTL_ACTION_BLOCK); 10813 10814 pending_entry = ctl_get_cmd_entry(&pending_io->scsiio, NULL); 10815 KASSERT(pending_entry->seridx < CTL_SERIDX_COUNT, 10816 ("%s: Invalid seridx %d for pending CDB %02x %02x @ %p", 10817 __func__, pending_entry->seridx, pending_io->scsiio.cdb[0], 10818 pending_io->scsiio.cdb[1], pending_io)); 10819 ooa_entry = ctl_get_cmd_entry(&ooa_io->scsiio, NULL); 10820 if (ooa_entry->seridx == CTL_SERIDX_INVLD) 10821 return (CTL_ACTION_PASS); /* Unsupported command in OOA queue */ 10822 KASSERT(ooa_entry->seridx < CTL_SERIDX_COUNT, 10823 ("%s: Invalid seridx %d for ooa CDB %02x %02x @ %p", 10824 __func__, ooa_entry->seridx, ooa_io->scsiio.cdb[0], 10825 ooa_io->scsiio.cdb[1], ooa_io)); 10826 10827 serialize_row = ctl_serialize_table[ooa_entry->seridx]; 10828 10829 switch (serialize_row[pending_entry->seridx]) { 10830 case CTL_SER_BLOCK: 10831 return (CTL_ACTION_BLOCK); 10832 case CTL_SER_EXTENT: 10833 return (ctl_extent_check(ooa_io, pending_io, 10834 (lun->be_lun && lun->be_lun->serseq == CTL_LUN_SERSEQ_ON))); 10835 case CTL_SER_EXTENTOPT: 10836 if ((lun->MODE_CTRL.queue_flags & SCP_QUEUE_ALG_MASK) != 10837 SCP_QUEUE_ALG_UNRESTRICTED) 10838 return (ctl_extent_check(ooa_io, pending_io, 10839 (lun->be_lun && 10840 lun->be_lun->serseq == CTL_LUN_SERSEQ_ON))); 10841 return (CTL_ACTION_PASS); 10842 case CTL_SER_EXTENTSEQ: 10843 if (lun->be_lun && lun->be_lun->serseq != CTL_LUN_SERSEQ_OFF) 10844 return (ctl_extent_check_seq(ooa_io, pending_io)); 10845 return (CTL_ACTION_PASS); 10846 case CTL_SER_PASS: 10847 return (CTL_ACTION_PASS); 10848 case CTL_SER_BLOCKOPT: 10849 if ((lun->MODE_CTRL.queue_flags & SCP_QUEUE_ALG_MASK) != 10850 SCP_QUEUE_ALG_UNRESTRICTED) 10851 return (CTL_ACTION_BLOCK); 10852 return (CTL_ACTION_PASS); 10853 case CTL_SER_SKIP: 10854 return (CTL_ACTION_SKIP); 10855 default: 10856 panic("%s: Invalid serialization value %d for %d => %d", 10857 __func__, serialize_row[pending_entry->seridx], 10858 pending_entry->seridx, ooa_entry->seridx); 10859 } 10860 10861 return (CTL_ACTION_ERROR); 10862 } 10863 10864 /* 10865 * Check for blockage or overlaps against the OOA (Order Of Arrival) queue. 10866 * Assumptions: 10867 * - pending_io is generally either incoming, or on the blocked queue 10868 * - starting I/O is the I/O we want to start the check with. 10869 */ 10870 static ctl_action 10871 ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 10872 union ctl_io *starting_io) 10873 { 10874 union ctl_io *ooa_io; 10875 ctl_action action; 10876 10877 mtx_assert(&lun->lun_lock, MA_OWNED); 10878 10879 /* 10880 * Run back along the OOA queue, starting with the current 10881 * blocked I/O and going through every I/O before it on the 10882 * queue. If starting_io is NULL, we'll just end up returning 10883 * CTL_ACTION_PASS. 10884 */ 10885 for (ooa_io = starting_io; ooa_io != NULL; 10886 ooa_io = (union ctl_io *)TAILQ_PREV(&ooa_io->io_hdr, ctl_ooaq, 10887 ooa_links)){ 10888 10889 /* 10890 * This routine just checks to see whether 10891 * cur_blocked is blocked by ooa_io, which is ahead 10892 * of it in the queue. It doesn't queue/dequeue 10893 * cur_blocked. 10894 */ 10895 action = ctl_check_for_blockage(lun, pending_io, ooa_io); 10896 switch (action) { 10897 case CTL_ACTION_BLOCK: 10898 case CTL_ACTION_OVERLAP: 10899 case CTL_ACTION_OVERLAP_TAG: 10900 case CTL_ACTION_SKIP: 10901 case CTL_ACTION_ERROR: 10902 return (action); 10903 break; /* NOTREACHED */ 10904 case CTL_ACTION_PASS: 10905 break; 10906 default: 10907 panic("%s: Invalid action %d\n", __func__, action); 10908 } 10909 } 10910 10911 return (CTL_ACTION_PASS); 10912 } 10913 10914 /* 10915 * Assumptions: 10916 * - An I/O has just completed, and has been removed from the per-LUN OOA 10917 * queue, so some items on the blocked queue may now be unblocked. 10918 */ 10919 static int 10920 ctl_check_blocked(struct ctl_lun *lun) 10921 { 10922 struct ctl_softc *softc = lun->ctl_softc; 10923 union ctl_io *cur_blocked, *next_blocked; 10924 10925 mtx_assert(&lun->lun_lock, MA_OWNED); 10926 10927 /* 10928 * Run forward from the head of the blocked queue, checking each 10929 * entry against the I/Os prior to it on the OOA queue to see if 10930 * there is still any blockage. 10931 * 10932 * We cannot use the TAILQ_FOREACH() macro, because it can't deal 10933 * with our removing a variable on it while it is traversing the 10934 * list. 10935 */ 10936 for (cur_blocked = (union ctl_io *)TAILQ_FIRST(&lun->blocked_queue); 10937 cur_blocked != NULL; cur_blocked = next_blocked) { 10938 union ctl_io *prev_ooa; 10939 ctl_action action; 10940 10941 next_blocked = (union ctl_io *)TAILQ_NEXT(&cur_blocked->io_hdr, 10942 blocked_links); 10943 10944 prev_ooa = (union ctl_io *)TAILQ_PREV(&cur_blocked->io_hdr, 10945 ctl_ooaq, ooa_links); 10946 10947 /* 10948 * If cur_blocked happens to be the first item in the OOA 10949 * queue now, prev_ooa will be NULL, and the action 10950 * returned will just be CTL_ACTION_PASS. 10951 */ 10952 action = ctl_check_ooa(lun, cur_blocked, prev_ooa); 10953 10954 switch (action) { 10955 case CTL_ACTION_BLOCK: 10956 /* Nothing to do here, still blocked */ 10957 break; 10958 case CTL_ACTION_OVERLAP: 10959 case CTL_ACTION_OVERLAP_TAG: 10960 /* 10961 * This shouldn't happen! In theory we've already 10962 * checked this command for overlap... 10963 */ 10964 break; 10965 case CTL_ACTION_PASS: 10966 case CTL_ACTION_SKIP: { 10967 const struct ctl_cmd_entry *entry; 10968 10969 /* 10970 * The skip case shouldn't happen, this transaction 10971 * should have never made it onto the blocked queue. 10972 */ 10973 /* 10974 * This I/O is no longer blocked, we can remove it 10975 * from the blocked queue. Since this is a TAILQ 10976 * (doubly linked list), we can do O(1) removals 10977 * from any place on the list. 10978 */ 10979 TAILQ_REMOVE(&lun->blocked_queue, &cur_blocked->io_hdr, 10980 blocked_links); 10981 cur_blocked->io_hdr.flags &= ~CTL_FLAG_BLOCKED; 10982 10983 if ((softc->ha_mode != CTL_HA_MODE_XFER) && 10984 (cur_blocked->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)){ 10985 /* 10986 * Need to send IO back to original side to 10987 * run 10988 */ 10989 union ctl_ha_msg msg_info; 10990 10991 cur_blocked->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 10992 msg_info.hdr.original_sc = 10993 cur_blocked->io_hdr.original_sc; 10994 msg_info.hdr.serializing_sc = cur_blocked; 10995 msg_info.hdr.msg_type = CTL_MSG_R2R; 10996 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 10997 sizeof(msg_info.hdr), M_NOWAIT); 10998 break; 10999 } 11000 entry = ctl_get_cmd_entry(&cur_blocked->scsiio, NULL); 11001 11002 /* 11003 * Check this I/O for LUN state changes that may 11004 * have happened while this command was blocked. 11005 * The LUN state may have been changed by a command 11006 * ahead of us in the queue, so we need to re-check 11007 * for any states that can be caused by SCSI 11008 * commands. 11009 */ 11010 if (ctl_scsiio_lun_check(lun, entry, 11011 &cur_blocked->scsiio) == 0) { 11012 cur_blocked->io_hdr.flags |= 11013 CTL_FLAG_IS_WAS_ON_RTR; 11014 ctl_enqueue_rtr(cur_blocked); 11015 } else 11016 ctl_done(cur_blocked); 11017 break; 11018 } 11019 default: 11020 /* 11021 * This probably shouldn't happen -- we shouldn't 11022 * get CTL_ACTION_ERROR, or anything else. 11023 */ 11024 break; 11025 } 11026 } 11027 11028 return (CTL_RETVAL_COMPLETE); 11029 } 11030 11031 /* 11032 * This routine (with one exception) checks LUN flags that can be set by 11033 * commands ahead of us in the OOA queue. These flags have to be checked 11034 * when a command initially comes in, and when we pull a command off the 11035 * blocked queue and are preparing to execute it. The reason we have to 11036 * check these flags for commands on the blocked queue is that the LUN 11037 * state may have been changed by a command ahead of us while we're on the 11038 * blocked queue. 11039 * 11040 * Ordering is somewhat important with these checks, so please pay 11041 * careful attention to the placement of any new checks. 11042 */ 11043 static int 11044 ctl_scsiio_lun_check(struct ctl_lun *lun, 11045 const struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio) 11046 { 11047 struct ctl_softc *softc = lun->ctl_softc; 11048 int retval; 11049 uint32_t residx; 11050 11051 retval = 0; 11052 11053 mtx_assert(&lun->lun_lock, MA_OWNED); 11054 11055 /* 11056 * If this shelf is a secondary shelf controller, we may have to 11057 * reject some commands disallowed by HA mode and link state. 11058 */ 11059 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) { 11060 if (softc->ha_link == CTL_HA_LINK_OFFLINE && 11061 (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { 11062 ctl_set_lun_unavail(ctsio); 11063 retval = 1; 11064 goto bailout; 11065 } 11066 if ((lun->flags & CTL_LUN_PEER_SC_PRIMARY) == 0 && 11067 (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { 11068 ctl_set_lun_transit(ctsio); 11069 retval = 1; 11070 goto bailout; 11071 } 11072 if (softc->ha_mode == CTL_HA_MODE_ACT_STBY && 11073 (entry->flags & CTL_CMD_FLAG_OK_ON_STANDBY) == 0) { 11074 ctl_set_lun_standby(ctsio); 11075 retval = 1; 11076 goto bailout; 11077 } 11078 11079 /* The rest of checks are only done on executing side */ 11080 if (softc->ha_mode == CTL_HA_MODE_XFER) 11081 goto bailout; 11082 } 11083 11084 if (entry->pattern & CTL_LUN_PAT_WRITE) { 11085 if (lun->be_lun && 11086 lun->be_lun->flags & CTL_LUN_FLAG_READONLY) { 11087 ctl_set_hw_write_protected(ctsio); 11088 retval = 1; 11089 goto bailout; 11090 } 11091 if ((lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) { 11092 ctl_set_sense(ctsio, /*current_error*/ 1, 11093 /*sense_key*/ SSD_KEY_DATA_PROTECT, 11094 /*asc*/ 0x27, /*ascq*/ 0x02, SSD_ELEM_NONE); 11095 retval = 1; 11096 goto bailout; 11097 } 11098 } 11099 11100 /* 11101 * Check for a reservation conflict. If this command isn't allowed 11102 * even on reserved LUNs, and if this initiator isn't the one who 11103 * reserved us, reject the command with a reservation conflict. 11104 */ 11105 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 11106 if ((lun->flags & CTL_LUN_RESERVED) 11107 && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) { 11108 if (lun->res_idx != residx) { 11109 ctl_set_reservation_conflict(ctsio); 11110 retval = 1; 11111 goto bailout; 11112 } 11113 } 11114 11115 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0 || 11116 (entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV)) { 11117 /* No reservation or command is allowed. */; 11118 } else if ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_WRESV) && 11119 (lun->pr_res_type == SPR_TYPE_WR_EX || 11120 lun->pr_res_type == SPR_TYPE_WR_EX_RO || 11121 lun->pr_res_type == SPR_TYPE_WR_EX_AR)) { 11122 /* The command is allowed for Write Exclusive resv. */; 11123 } else { 11124 /* 11125 * if we aren't registered or it's a res holder type 11126 * reservation and this isn't the res holder then set a 11127 * conflict. 11128 */ 11129 if (ctl_get_prkey(lun, residx) == 0 || 11130 (residx != lun->pr_res_idx && lun->pr_res_type < 4)) { 11131 ctl_set_reservation_conflict(ctsio); 11132 retval = 1; 11133 goto bailout; 11134 } 11135 } 11136 11137 if ((entry->flags & CTL_CMD_FLAG_OK_ON_NO_MEDIA) == 0) { 11138 if (lun->flags & CTL_LUN_EJECTED) 11139 ctl_set_lun_ejected(ctsio); 11140 else if (lun->flags & CTL_LUN_NO_MEDIA) { 11141 if (lun->flags & CTL_LUN_REMOVABLE) 11142 ctl_set_lun_no_media(ctsio); 11143 else 11144 ctl_set_lun_int_reqd(ctsio); 11145 } else if (lun->flags & CTL_LUN_STOPPED) 11146 ctl_set_lun_stopped(ctsio); 11147 else 11148 goto bailout; 11149 retval = 1; 11150 goto bailout; 11151 } 11152 11153 bailout: 11154 return (retval); 11155 } 11156 11157 static void 11158 ctl_failover_io(union ctl_io *io, int have_lock) 11159 { 11160 ctl_set_busy(&io->scsiio); 11161 ctl_done(io); 11162 } 11163 11164 static void 11165 ctl_failover_lun(union ctl_io *rio) 11166 { 11167 struct ctl_softc *softc = CTL_SOFTC(rio); 11168 struct ctl_lun *lun; 11169 struct ctl_io_hdr *io, *next_io; 11170 uint32_t targ_lun; 11171 11172 targ_lun = rio->io_hdr.nexus.targ_mapped_lun; 11173 CTL_DEBUG_PRINT(("FAILOVER for lun %ju\n", targ_lun)); 11174 11175 /* Find and lock the LUN. */ 11176 mtx_lock(&softc->ctl_lock); 11177 if (targ_lun > CTL_MAX_LUNS || 11178 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11179 mtx_unlock(&softc->ctl_lock); 11180 return; 11181 } 11182 mtx_lock(&lun->lun_lock); 11183 mtx_unlock(&softc->ctl_lock); 11184 if (lun->flags & CTL_LUN_DISABLED) { 11185 mtx_unlock(&lun->lun_lock); 11186 return; 11187 } 11188 11189 if (softc->ha_mode == CTL_HA_MODE_XFER) { 11190 TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { 11191 /* We are master */ 11192 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 11193 if (io->flags & CTL_FLAG_IO_ACTIVE) { 11194 io->flags |= CTL_FLAG_ABORT; 11195 io->flags |= CTL_FLAG_FAILOVER; 11196 } else { /* This can be only due to DATAMOVE */ 11197 io->msg_type = CTL_MSG_DATAMOVE_DONE; 11198 io->flags &= ~CTL_FLAG_DMA_INPROG; 11199 io->flags |= CTL_FLAG_IO_ACTIVE; 11200 io->port_status = 31340; 11201 ctl_enqueue_isc((union ctl_io *)io); 11202 } 11203 } 11204 /* We are slave */ 11205 if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { 11206 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; 11207 if (io->flags & CTL_FLAG_IO_ACTIVE) { 11208 io->flags |= CTL_FLAG_FAILOVER; 11209 } else { 11210 ctl_set_busy(&((union ctl_io *)io)-> 11211 scsiio); 11212 ctl_done((union ctl_io *)io); 11213 } 11214 } 11215 } 11216 } else { /* SERIALIZE modes */ 11217 TAILQ_FOREACH_SAFE(io, &lun->blocked_queue, blocked_links, 11218 next_io) { 11219 /* We are master */ 11220 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 11221 TAILQ_REMOVE(&lun->blocked_queue, io, 11222 blocked_links); 11223 io->flags &= ~CTL_FLAG_BLOCKED; 11224 TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links); 11225 ctl_free_io((union ctl_io *)io); 11226 } 11227 } 11228 TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { 11229 /* We are master */ 11230 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 11231 TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links); 11232 ctl_free_io((union ctl_io *)io); 11233 } 11234 /* We are slave */ 11235 if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { 11236 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; 11237 if (!(io->flags & CTL_FLAG_IO_ACTIVE)) { 11238 ctl_set_busy(&((union ctl_io *)io)-> 11239 scsiio); 11240 ctl_done((union ctl_io *)io); 11241 } 11242 } 11243 } 11244 ctl_check_blocked(lun); 11245 } 11246 mtx_unlock(&lun->lun_lock); 11247 } 11248 11249 static int 11250 ctl_scsiio_precheck(struct ctl_softc *softc, struct ctl_scsiio *ctsio) 11251 { 11252 struct ctl_lun *lun; 11253 const struct ctl_cmd_entry *entry; 11254 uint32_t initidx, targ_lun; 11255 int retval = 0; 11256 11257 lun = NULL; 11258 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 11259 if (targ_lun < CTL_MAX_LUNS) 11260 lun = softc->ctl_luns[targ_lun]; 11261 if (lun) { 11262 /* 11263 * If the LUN is invalid, pretend that it doesn't exist. 11264 * It will go away as soon as all pending I/O has been 11265 * completed. 11266 */ 11267 mtx_lock(&lun->lun_lock); 11268 if (lun->flags & CTL_LUN_DISABLED) { 11269 mtx_unlock(&lun->lun_lock); 11270 lun = NULL; 11271 } 11272 } 11273 CTL_LUN(ctsio) = lun; 11274 if (lun) { 11275 CTL_BACKEND_LUN(ctsio) = lun->be_lun; 11276 11277 /* 11278 * Every I/O goes into the OOA queue for a particular LUN, 11279 * and stays there until completion. 11280 */ 11281 #ifdef CTL_TIME_IO 11282 if (TAILQ_EMPTY(&lun->ooa_queue)) 11283 lun->idle_time += getsbinuptime() - lun->last_busy; 11284 #endif 11285 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 11286 } 11287 11288 /* Get command entry and return error if it is unsuppotyed. */ 11289 entry = ctl_validate_command(ctsio); 11290 if (entry == NULL) { 11291 if (lun) 11292 mtx_unlock(&lun->lun_lock); 11293 return (retval); 11294 } 11295 11296 ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 11297 ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK; 11298 11299 /* 11300 * Check to see whether we can send this command to LUNs that don't 11301 * exist. This should pretty much only be the case for inquiry 11302 * and request sense. Further checks, below, really require having 11303 * a LUN, so we can't really check the command anymore. Just put 11304 * it on the rtr queue. 11305 */ 11306 if (lun == NULL) { 11307 if (entry->flags & CTL_CMD_FLAG_OK_ON_NO_LUN) { 11308 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11309 ctl_enqueue_rtr((union ctl_io *)ctsio); 11310 return (retval); 11311 } 11312 11313 ctl_set_unsupported_lun(ctsio); 11314 ctl_done((union ctl_io *)ctsio); 11315 CTL_DEBUG_PRINT(("ctl_scsiio_precheck: bailing out due to invalid LUN\n")); 11316 return (retval); 11317 } else { 11318 /* 11319 * Make sure we support this particular command on this LUN. 11320 * e.g., we don't support writes to the control LUN. 11321 */ 11322 if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 11323 mtx_unlock(&lun->lun_lock); 11324 ctl_set_invalid_opcode(ctsio); 11325 ctl_done((union ctl_io *)ctsio); 11326 return (retval); 11327 } 11328 } 11329 11330 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 11331 11332 #ifdef CTL_WITH_CA 11333 /* 11334 * If we've got a request sense, it'll clear the contingent 11335 * allegiance condition. Otherwise, if we have a CA condition for 11336 * this initiator, clear it, because it sent down a command other 11337 * than request sense. 11338 */ 11339 if ((ctsio->cdb[0] != REQUEST_SENSE) 11340 && (ctl_is_set(lun->have_ca, initidx))) 11341 ctl_clear_mask(lun->have_ca, initidx); 11342 #endif 11343 11344 /* 11345 * If the command has this flag set, it handles its own unit 11346 * attention reporting, we shouldn't do anything. Otherwise we 11347 * check for any pending unit attentions, and send them back to the 11348 * initiator. We only do this when a command initially comes in, 11349 * not when we pull it off the blocked queue. 11350 * 11351 * According to SAM-3, section 5.3.2, the order that things get 11352 * presented back to the host is basically unit attentions caused 11353 * by some sort of reset event, busy status, reservation conflicts 11354 * or task set full, and finally any other status. 11355 * 11356 * One issue here is that some of the unit attentions we report 11357 * don't fall into the "reset" category (e.g. "reported luns data 11358 * has changed"). So reporting it here, before the reservation 11359 * check, may be technically wrong. I guess the only thing to do 11360 * would be to check for and report the reset events here, and then 11361 * check for the other unit attention types after we check for a 11362 * reservation conflict. 11363 * 11364 * XXX KDM need to fix this 11365 */ 11366 if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) { 11367 ctl_ua_type ua_type; 11368 u_int sense_len = 0; 11369 11370 ua_type = ctl_build_ua(lun, initidx, &ctsio->sense_data, 11371 &sense_len, SSD_TYPE_NONE); 11372 if (ua_type != CTL_UA_NONE) { 11373 mtx_unlock(&lun->lun_lock); 11374 ctsio->scsi_status = SCSI_STATUS_CHECK_COND; 11375 ctsio->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 11376 ctsio->sense_len = sense_len; 11377 ctl_done((union ctl_io *)ctsio); 11378 return (retval); 11379 } 11380 } 11381 11382 11383 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { 11384 mtx_unlock(&lun->lun_lock); 11385 ctl_done((union ctl_io *)ctsio); 11386 return (retval); 11387 } 11388 11389 /* 11390 * XXX CHD this is where we want to send IO to other side if 11391 * this LUN is secondary on this SC. We will need to make a copy 11392 * of the IO and flag the IO on this side as SENT_2OTHER and the flag 11393 * the copy we send as FROM_OTHER. 11394 * We also need to stuff the address of the original IO so we can 11395 * find it easily. Something similar will need be done on the other 11396 * side so when we are done we can find the copy. 11397 */ 11398 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 11399 (lun->flags & CTL_LUN_PEER_SC_PRIMARY) != 0 && 11400 (entry->flags & CTL_CMD_FLAG_RUN_HERE) == 0) { 11401 union ctl_ha_msg msg_info; 11402 int isc_retval; 11403 11404 ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; 11405 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 11406 mtx_unlock(&lun->lun_lock); 11407 11408 msg_info.hdr.msg_type = CTL_MSG_SERIALIZE; 11409 msg_info.hdr.original_sc = (union ctl_io *)ctsio; 11410 msg_info.hdr.serializing_sc = NULL; 11411 msg_info.hdr.nexus = ctsio->io_hdr.nexus; 11412 msg_info.scsi.tag_num = ctsio->tag_num; 11413 msg_info.scsi.tag_type = ctsio->tag_type; 11414 msg_info.scsi.cdb_len = ctsio->cdb_len; 11415 memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN); 11416 11417 if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11418 sizeof(msg_info.scsi) - sizeof(msg_info.scsi.sense_data), 11419 M_WAITOK)) > CTL_HA_STATUS_SUCCESS) { 11420 ctl_set_busy(ctsio); 11421 ctl_done((union ctl_io *)ctsio); 11422 return (retval); 11423 } 11424 return (retval); 11425 } 11426 11427 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 11428 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, 11429 ctl_ooaq, ooa_links))) { 11430 case CTL_ACTION_BLOCK: 11431 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 11432 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 11433 blocked_links); 11434 mtx_unlock(&lun->lun_lock); 11435 return (retval); 11436 case CTL_ACTION_PASS: 11437 case CTL_ACTION_SKIP: 11438 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11439 mtx_unlock(&lun->lun_lock); 11440 ctl_enqueue_rtr((union ctl_io *)ctsio); 11441 break; 11442 case CTL_ACTION_OVERLAP: 11443 mtx_unlock(&lun->lun_lock); 11444 ctl_set_overlapped_cmd(ctsio); 11445 ctl_done((union ctl_io *)ctsio); 11446 break; 11447 case CTL_ACTION_OVERLAP_TAG: 11448 mtx_unlock(&lun->lun_lock); 11449 ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff); 11450 ctl_done((union ctl_io *)ctsio); 11451 break; 11452 case CTL_ACTION_ERROR: 11453 default: 11454 mtx_unlock(&lun->lun_lock); 11455 ctl_set_internal_failure(ctsio, 11456 /*sks_valid*/ 0, 11457 /*retry_count*/ 0); 11458 ctl_done((union ctl_io *)ctsio); 11459 break; 11460 } 11461 return (retval); 11462 } 11463 11464 const struct ctl_cmd_entry * 11465 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa) 11466 { 11467 const struct ctl_cmd_entry *entry; 11468 int service_action; 11469 11470 entry = &ctl_cmd_table[ctsio->cdb[0]]; 11471 if (sa) 11472 *sa = ((entry->flags & CTL_CMD_FLAG_SA5) != 0); 11473 if (entry->flags & CTL_CMD_FLAG_SA5) { 11474 service_action = ctsio->cdb[1] & SERVICE_ACTION_MASK; 11475 entry = &((const struct ctl_cmd_entry *) 11476 entry->execute)[service_action]; 11477 } 11478 return (entry); 11479 } 11480 11481 const struct ctl_cmd_entry * 11482 ctl_validate_command(struct ctl_scsiio *ctsio) 11483 { 11484 const struct ctl_cmd_entry *entry; 11485 int i, sa; 11486 uint8_t diff; 11487 11488 entry = ctl_get_cmd_entry(ctsio, &sa); 11489 if (entry->execute == NULL) { 11490 if (sa) 11491 ctl_set_invalid_field(ctsio, 11492 /*sks_valid*/ 1, 11493 /*command*/ 1, 11494 /*field*/ 1, 11495 /*bit_valid*/ 1, 11496 /*bit*/ 4); 11497 else 11498 ctl_set_invalid_opcode(ctsio); 11499 ctl_done((union ctl_io *)ctsio); 11500 return (NULL); 11501 } 11502 KASSERT(entry->length > 0, 11503 ("Not defined length for command 0x%02x/0x%02x", 11504 ctsio->cdb[0], ctsio->cdb[1])); 11505 for (i = 1; i < entry->length; i++) { 11506 diff = ctsio->cdb[i] & ~entry->usage[i - 1]; 11507 if (diff == 0) 11508 continue; 11509 ctl_set_invalid_field(ctsio, 11510 /*sks_valid*/ 1, 11511 /*command*/ 1, 11512 /*field*/ i, 11513 /*bit_valid*/ 1, 11514 /*bit*/ fls(diff) - 1); 11515 ctl_done((union ctl_io *)ctsio); 11516 return (NULL); 11517 } 11518 return (entry); 11519 } 11520 11521 static int 11522 ctl_cmd_applicable(uint8_t lun_type, const struct ctl_cmd_entry *entry) 11523 { 11524 11525 switch (lun_type) { 11526 case T_DIRECT: 11527 if ((entry->flags & CTL_CMD_FLAG_OK_ON_DIRECT) == 0) 11528 return (0); 11529 break; 11530 case T_PROCESSOR: 11531 if ((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0) 11532 return (0); 11533 break; 11534 case T_CDROM: 11535 if ((entry->flags & CTL_CMD_FLAG_OK_ON_CDROM) == 0) 11536 return (0); 11537 break; 11538 default: 11539 return (0); 11540 } 11541 return (1); 11542 } 11543 11544 static int 11545 ctl_scsiio(struct ctl_scsiio *ctsio) 11546 { 11547 int retval; 11548 const struct ctl_cmd_entry *entry; 11549 11550 retval = CTL_RETVAL_COMPLETE; 11551 11552 CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0])); 11553 11554 entry = ctl_get_cmd_entry(ctsio, NULL); 11555 11556 /* 11557 * If this I/O has been aborted, just send it straight to 11558 * ctl_done() without executing it. 11559 */ 11560 if (ctsio->io_hdr.flags & CTL_FLAG_ABORT) { 11561 ctl_done((union ctl_io *)ctsio); 11562 goto bailout; 11563 } 11564 11565 /* 11566 * All the checks should have been handled by ctl_scsiio_precheck(). 11567 * We should be clear now to just execute the I/O. 11568 */ 11569 retval = entry->execute(ctsio); 11570 11571 bailout: 11572 return (retval); 11573 } 11574 11575 /* 11576 * Since we only implement one target right now, a bus reset simply resets 11577 * our single target. 11578 */ 11579 static int 11580 ctl_bus_reset(struct ctl_softc *softc, union ctl_io *io) 11581 { 11582 return(ctl_target_reset(softc, io, CTL_UA_BUS_RESET)); 11583 } 11584 11585 static int 11586 ctl_target_reset(struct ctl_softc *softc, union ctl_io *io, 11587 ctl_ua_type ua_type) 11588 { 11589 struct ctl_port *port = CTL_PORT(io); 11590 struct ctl_lun *lun; 11591 int retval; 11592 11593 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 11594 union ctl_ha_msg msg_info; 11595 11596 msg_info.hdr.nexus = io->io_hdr.nexus; 11597 if (ua_type==CTL_UA_TARG_RESET) 11598 msg_info.task.task_action = CTL_TASK_TARGET_RESET; 11599 else 11600 msg_info.task.task_action = CTL_TASK_BUS_RESET; 11601 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11602 msg_info.hdr.original_sc = NULL; 11603 msg_info.hdr.serializing_sc = NULL; 11604 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11605 sizeof(msg_info.task), M_WAITOK); 11606 } 11607 retval = 0; 11608 11609 mtx_lock(&softc->ctl_lock); 11610 STAILQ_FOREACH(lun, &softc->lun_list, links) { 11611 if (port != NULL && 11612 ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 11613 continue; 11614 retval += ctl_do_lun_reset(lun, io, ua_type); 11615 } 11616 mtx_unlock(&softc->ctl_lock); 11617 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11618 return (retval); 11619 } 11620 11621 /* 11622 * The LUN should always be set. The I/O is optional, and is used to 11623 * distinguish between I/Os sent by this initiator, and by other 11624 * initiators. We set unit attention for initiators other than this one. 11625 * SAM-3 is vague on this point. It does say that a unit attention should 11626 * be established for other initiators when a LUN is reset (see section 11627 * 5.7.3), but it doesn't specifically say that the unit attention should 11628 * be established for this particular initiator when a LUN is reset. Here 11629 * is the relevant text, from SAM-3 rev 8: 11630 * 11631 * 5.7.2 When a SCSI initiator port aborts its own tasks 11632 * 11633 * When a SCSI initiator port causes its own task(s) to be aborted, no 11634 * notification that the task(s) have been aborted shall be returned to 11635 * the SCSI initiator port other than the completion response for the 11636 * command or task management function action that caused the task(s) to 11637 * be aborted and notification(s) associated with related effects of the 11638 * action (e.g., a reset unit attention condition). 11639 * 11640 * XXX KDM for now, we're setting unit attention for all initiators. 11641 */ 11642 static int 11643 ctl_do_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type) 11644 { 11645 union ctl_io *xio; 11646 #if 0 11647 uint32_t initidx; 11648 #endif 11649 int i; 11650 11651 mtx_lock(&lun->lun_lock); 11652 /* 11653 * Run through the OOA queue and abort each I/O. 11654 */ 11655 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11656 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11657 xio->io_hdr.flags |= CTL_FLAG_ABORT | CTL_FLAG_ABORT_STATUS; 11658 } 11659 11660 /* 11661 * This version sets unit attention for every 11662 */ 11663 #if 0 11664 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11665 ctl_est_ua_all(lun, initidx, ua_type); 11666 #else 11667 ctl_est_ua_all(lun, -1, ua_type); 11668 #endif 11669 11670 /* 11671 * A reset (any kind, really) clears reservations established with 11672 * RESERVE/RELEASE. It does not clear reservations established 11673 * with PERSISTENT RESERVE OUT, but we don't support that at the 11674 * moment anyway. See SPC-2, section 5.6. SPC-3 doesn't address 11675 * reservations made with the RESERVE/RELEASE commands, because 11676 * those commands are obsolete in SPC-3. 11677 */ 11678 lun->flags &= ~CTL_LUN_RESERVED; 11679 11680 #ifdef CTL_WITH_CA 11681 for (i = 0; i < CTL_MAX_INITIATORS; i++) 11682 ctl_clear_mask(lun->have_ca, i); 11683 #endif 11684 lun->prevent_count = 0; 11685 if (lun->prevent) { 11686 for (i = 0; i < CTL_MAX_INITIATORS; i++) 11687 ctl_clear_mask(lun->prevent, i); 11688 } 11689 mtx_unlock(&lun->lun_lock); 11690 11691 return (0); 11692 } 11693 11694 static int 11695 ctl_lun_reset(struct ctl_softc *softc, union ctl_io *io) 11696 { 11697 struct ctl_lun *lun; 11698 uint32_t targ_lun; 11699 int retval; 11700 11701 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11702 mtx_lock(&softc->ctl_lock); 11703 if (targ_lun >= CTL_MAX_LUNS || 11704 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11705 mtx_unlock(&softc->ctl_lock); 11706 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 11707 return (1); 11708 } 11709 retval = ctl_do_lun_reset(lun, io, CTL_UA_LUN_RESET); 11710 mtx_unlock(&softc->ctl_lock); 11711 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11712 11713 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0) { 11714 union ctl_ha_msg msg_info; 11715 11716 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11717 msg_info.hdr.nexus = io->io_hdr.nexus; 11718 msg_info.task.task_action = CTL_TASK_LUN_RESET; 11719 msg_info.hdr.original_sc = NULL; 11720 msg_info.hdr.serializing_sc = NULL; 11721 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11722 sizeof(msg_info.task), M_WAITOK); 11723 } 11724 return (retval); 11725 } 11726 11727 static void 11728 ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id, 11729 int other_sc) 11730 { 11731 union ctl_io *xio; 11732 11733 mtx_assert(&lun->lun_lock, MA_OWNED); 11734 11735 /* 11736 * Run through the OOA queue and attempt to find the given I/O. 11737 * The target port, initiator ID, tag type and tag number have to 11738 * match the values that we got from the initiator. If we have an 11739 * untagged command to abort, simply abort the first untagged command 11740 * we come to. We only allow one untagged command at a time of course. 11741 */ 11742 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11743 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11744 11745 if ((targ_port == UINT32_MAX || 11746 targ_port == xio->io_hdr.nexus.targ_port) && 11747 (init_id == UINT32_MAX || 11748 init_id == xio->io_hdr.nexus.initid)) { 11749 if (targ_port != xio->io_hdr.nexus.targ_port || 11750 init_id != xio->io_hdr.nexus.initid) 11751 xio->io_hdr.flags |= CTL_FLAG_ABORT_STATUS; 11752 xio->io_hdr.flags |= CTL_FLAG_ABORT; 11753 if (!other_sc && !(lun->flags & CTL_LUN_PRIMARY_SC)) { 11754 union ctl_ha_msg msg_info; 11755 11756 msg_info.hdr.nexus = xio->io_hdr.nexus; 11757 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 11758 msg_info.task.tag_num = xio->scsiio.tag_num; 11759 msg_info.task.tag_type = xio->scsiio.tag_type; 11760 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11761 msg_info.hdr.original_sc = NULL; 11762 msg_info.hdr.serializing_sc = NULL; 11763 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11764 sizeof(msg_info.task), M_NOWAIT); 11765 } 11766 } 11767 } 11768 } 11769 11770 static int 11771 ctl_abort_task_set(union ctl_io *io) 11772 { 11773 struct ctl_softc *softc = CTL_SOFTC(io); 11774 struct ctl_lun *lun; 11775 uint32_t targ_lun; 11776 11777 /* 11778 * Look up the LUN. 11779 */ 11780 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11781 mtx_lock(&softc->ctl_lock); 11782 if (targ_lun >= CTL_MAX_LUNS || 11783 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11784 mtx_unlock(&softc->ctl_lock); 11785 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 11786 return (1); 11787 } 11788 11789 mtx_lock(&lun->lun_lock); 11790 mtx_unlock(&softc->ctl_lock); 11791 if (io->taskio.task_action == CTL_TASK_ABORT_TASK_SET) { 11792 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, 11793 io->io_hdr.nexus.initid, 11794 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 11795 } else { /* CTL_TASK_CLEAR_TASK_SET */ 11796 ctl_abort_tasks_lun(lun, UINT32_MAX, UINT32_MAX, 11797 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 11798 } 11799 mtx_unlock(&lun->lun_lock); 11800 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11801 return (0); 11802 } 11803 11804 static int 11805 ctl_i_t_nexus_reset(union ctl_io *io) 11806 { 11807 struct ctl_softc *softc = CTL_SOFTC(io); 11808 struct ctl_lun *lun; 11809 uint32_t initidx; 11810 11811 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 11812 union ctl_ha_msg msg_info; 11813 11814 msg_info.hdr.nexus = io->io_hdr.nexus; 11815 msg_info.task.task_action = CTL_TASK_I_T_NEXUS_RESET; 11816 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11817 msg_info.hdr.original_sc = NULL; 11818 msg_info.hdr.serializing_sc = NULL; 11819 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11820 sizeof(msg_info.task), M_WAITOK); 11821 } 11822 11823 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11824 mtx_lock(&softc->ctl_lock); 11825 STAILQ_FOREACH(lun, &softc->lun_list, links) { 11826 mtx_lock(&lun->lun_lock); 11827 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, 11828 io->io_hdr.nexus.initid, 1); 11829 #ifdef CTL_WITH_CA 11830 ctl_clear_mask(lun->have_ca, initidx); 11831 #endif 11832 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == initidx)) 11833 lun->flags &= ~CTL_LUN_RESERVED; 11834 if (lun->prevent && ctl_is_set(lun->prevent, initidx)) { 11835 ctl_clear_mask(lun->prevent, initidx); 11836 lun->prevent_count--; 11837 } 11838 ctl_est_ua(lun, initidx, CTL_UA_I_T_NEXUS_LOSS); 11839 mtx_unlock(&lun->lun_lock); 11840 } 11841 mtx_unlock(&softc->ctl_lock); 11842 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11843 return (0); 11844 } 11845 11846 static int 11847 ctl_abort_task(union ctl_io *io) 11848 { 11849 struct ctl_softc *softc = CTL_SOFTC(io); 11850 union ctl_io *xio; 11851 struct ctl_lun *lun; 11852 #if 0 11853 struct sbuf sb; 11854 char printbuf[128]; 11855 #endif 11856 int found; 11857 uint32_t targ_lun; 11858 11859 found = 0; 11860 11861 /* 11862 * Look up the LUN. 11863 */ 11864 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11865 mtx_lock(&softc->ctl_lock); 11866 if (targ_lun >= CTL_MAX_LUNS || 11867 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11868 mtx_unlock(&softc->ctl_lock); 11869 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 11870 return (1); 11871 } 11872 11873 #if 0 11874 printf("ctl_abort_task: called for lun %lld, tag %d type %d\n", 11875 lun->lun, io->taskio.tag_num, io->taskio.tag_type); 11876 #endif 11877 11878 mtx_lock(&lun->lun_lock); 11879 mtx_unlock(&softc->ctl_lock); 11880 /* 11881 * Run through the OOA queue and attempt to find the given I/O. 11882 * The target port, initiator ID, tag type and tag number have to 11883 * match the values that we got from the initiator. If we have an 11884 * untagged command to abort, simply abort the first untagged command 11885 * we come to. We only allow one untagged command at a time of course. 11886 */ 11887 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11888 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11889 #if 0 11890 sbuf_new(&sb, printbuf, sizeof(printbuf), SBUF_FIXEDLEN); 11891 11892 sbuf_printf(&sb, "LUN %lld tag %d type %d%s%s%s%s: ", 11893 lun->lun, xio->scsiio.tag_num, 11894 xio->scsiio.tag_type, 11895 (xio->io_hdr.blocked_links.tqe_prev 11896 == NULL) ? "" : " BLOCKED", 11897 (xio->io_hdr.flags & 11898 CTL_FLAG_DMA_INPROG) ? " DMA" : "", 11899 (xio->io_hdr.flags & 11900 CTL_FLAG_ABORT) ? " ABORT" : "", 11901 (xio->io_hdr.flags & 11902 CTL_FLAG_IS_WAS_ON_RTR ? " RTR" : "")); 11903 ctl_scsi_command_string(&xio->scsiio, NULL, &sb); 11904 sbuf_finish(&sb); 11905 printf("%s\n", sbuf_data(&sb)); 11906 #endif 11907 11908 if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port) 11909 || (xio->io_hdr.nexus.initid != io->io_hdr.nexus.initid) 11910 || (xio->io_hdr.flags & CTL_FLAG_ABORT)) 11911 continue; 11912 11913 /* 11914 * If the abort says that the task is untagged, the 11915 * task in the queue must be untagged. Otherwise, 11916 * we just check to see whether the tag numbers 11917 * match. This is because the QLogic firmware 11918 * doesn't pass back the tag type in an abort 11919 * request. 11920 */ 11921 #if 0 11922 if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED) 11923 && (io->taskio.tag_type == CTL_TAG_UNTAGGED)) 11924 || (xio->scsiio.tag_num == io->taskio.tag_num)) 11925 #endif 11926 /* 11927 * XXX KDM we've got problems with FC, because it 11928 * doesn't send down a tag type with aborts. So we 11929 * can only really go by the tag number... 11930 * This may cause problems with parallel SCSI. 11931 * Need to figure that out!! 11932 */ 11933 if (xio->scsiio.tag_num == io->taskio.tag_num) { 11934 xio->io_hdr.flags |= CTL_FLAG_ABORT; 11935 found = 1; 11936 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0 && 11937 !(lun->flags & CTL_LUN_PRIMARY_SC)) { 11938 union ctl_ha_msg msg_info; 11939 11940 msg_info.hdr.nexus = io->io_hdr.nexus; 11941 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 11942 msg_info.task.tag_num = io->taskio.tag_num; 11943 msg_info.task.tag_type = io->taskio.tag_type; 11944 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11945 msg_info.hdr.original_sc = NULL; 11946 msg_info.hdr.serializing_sc = NULL; 11947 #if 0 11948 printf("Sent Abort to other side\n"); 11949 #endif 11950 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11951 sizeof(msg_info.task), M_NOWAIT); 11952 } 11953 #if 0 11954 printf("ctl_abort_task: found I/O to abort\n"); 11955 #endif 11956 } 11957 } 11958 mtx_unlock(&lun->lun_lock); 11959 11960 if (found == 0) { 11961 /* 11962 * This isn't really an error. It's entirely possible for 11963 * the abort and command completion to cross on the wire. 11964 * This is more of an informative/diagnostic error. 11965 */ 11966 #if 0 11967 printf("ctl_abort_task: ABORT sent for nonexistent I/O: " 11968 "%u:%u:%u tag %d type %d\n", 11969 io->io_hdr.nexus.initid, 11970 io->io_hdr.nexus.targ_port, 11971 io->io_hdr.nexus.targ_lun, io->taskio.tag_num, 11972 io->taskio.tag_type); 11973 #endif 11974 } 11975 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11976 return (0); 11977 } 11978 11979 static int 11980 ctl_query_task(union ctl_io *io, int task_set) 11981 { 11982 struct ctl_softc *softc = CTL_SOFTC(io); 11983 union ctl_io *xio; 11984 struct ctl_lun *lun; 11985 int found = 0; 11986 uint32_t targ_lun; 11987 11988 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11989 mtx_lock(&softc->ctl_lock); 11990 if (targ_lun >= CTL_MAX_LUNS || 11991 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11992 mtx_unlock(&softc->ctl_lock); 11993 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 11994 return (1); 11995 } 11996 mtx_lock(&lun->lun_lock); 11997 mtx_unlock(&softc->ctl_lock); 11998 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11999 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 12000 12001 if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port) 12002 || (xio->io_hdr.nexus.initid != io->io_hdr.nexus.initid) 12003 || (xio->io_hdr.flags & CTL_FLAG_ABORT)) 12004 continue; 12005 12006 if (task_set || xio->scsiio.tag_num == io->taskio.tag_num) { 12007 found = 1; 12008 break; 12009 } 12010 } 12011 mtx_unlock(&lun->lun_lock); 12012 if (found) 12013 io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED; 12014 else 12015 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12016 return (0); 12017 } 12018 12019 static int 12020 ctl_query_async_event(union ctl_io *io) 12021 { 12022 struct ctl_softc *softc = CTL_SOFTC(io); 12023 struct ctl_lun *lun; 12024 ctl_ua_type ua; 12025 uint32_t targ_lun, initidx; 12026 12027 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12028 mtx_lock(&softc->ctl_lock); 12029 if (targ_lun >= CTL_MAX_LUNS || 12030 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12031 mtx_unlock(&softc->ctl_lock); 12032 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 12033 return (1); 12034 } 12035 mtx_lock(&lun->lun_lock); 12036 mtx_unlock(&softc->ctl_lock); 12037 initidx = ctl_get_initindex(&io->io_hdr.nexus); 12038 ua = ctl_build_qae(lun, initidx, io->taskio.task_resp); 12039 mtx_unlock(&lun->lun_lock); 12040 if (ua != CTL_UA_NONE) 12041 io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED; 12042 else 12043 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12044 return (0); 12045 } 12046 12047 static void 12048 ctl_run_task(union ctl_io *io) 12049 { 12050 struct ctl_softc *softc = CTL_SOFTC(io); 12051 int retval = 1; 12052 12053 CTL_DEBUG_PRINT(("ctl_run_task\n")); 12054 KASSERT(io->io_hdr.io_type == CTL_IO_TASK, 12055 ("ctl_run_task: Unextected io_type %d\n", io->io_hdr.io_type)); 12056 io->taskio.task_status = CTL_TASK_FUNCTION_NOT_SUPPORTED; 12057 bzero(io->taskio.task_resp, sizeof(io->taskio.task_resp)); 12058 switch (io->taskio.task_action) { 12059 case CTL_TASK_ABORT_TASK: 12060 retval = ctl_abort_task(io); 12061 break; 12062 case CTL_TASK_ABORT_TASK_SET: 12063 case CTL_TASK_CLEAR_TASK_SET: 12064 retval = ctl_abort_task_set(io); 12065 break; 12066 case CTL_TASK_CLEAR_ACA: 12067 break; 12068 case CTL_TASK_I_T_NEXUS_RESET: 12069 retval = ctl_i_t_nexus_reset(io); 12070 break; 12071 case CTL_TASK_LUN_RESET: 12072 retval = ctl_lun_reset(softc, io); 12073 break; 12074 case CTL_TASK_TARGET_RESET: 12075 retval = ctl_target_reset(softc, io, CTL_UA_TARG_RESET); 12076 break; 12077 case CTL_TASK_BUS_RESET: 12078 retval = ctl_bus_reset(softc, io); 12079 break; 12080 case CTL_TASK_PORT_LOGIN: 12081 break; 12082 case CTL_TASK_PORT_LOGOUT: 12083 break; 12084 case CTL_TASK_QUERY_TASK: 12085 retval = ctl_query_task(io, 0); 12086 break; 12087 case CTL_TASK_QUERY_TASK_SET: 12088 retval = ctl_query_task(io, 1); 12089 break; 12090 case CTL_TASK_QUERY_ASYNC_EVENT: 12091 retval = ctl_query_async_event(io); 12092 break; 12093 default: 12094 printf("%s: got unknown task management event %d\n", 12095 __func__, io->taskio.task_action); 12096 break; 12097 } 12098 if (retval == 0) 12099 io->io_hdr.status = CTL_SUCCESS; 12100 else 12101 io->io_hdr.status = CTL_ERROR; 12102 ctl_done(io); 12103 } 12104 12105 /* 12106 * For HA operation. Handle commands that come in from the other 12107 * controller. 12108 */ 12109 static void 12110 ctl_handle_isc(union ctl_io *io) 12111 { 12112 struct ctl_softc *softc = CTL_SOFTC(io); 12113 struct ctl_lun *lun; 12114 const struct ctl_cmd_entry *entry; 12115 uint32_t targ_lun; 12116 12117 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12118 switch (io->io_hdr.msg_type) { 12119 case CTL_MSG_SERIALIZE: 12120 ctl_serialize_other_sc_cmd(&io->scsiio); 12121 break; 12122 case CTL_MSG_R2R: /* Only used in SER_ONLY mode. */ 12123 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 12124 if (targ_lun >= CTL_MAX_LUNS || 12125 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12126 ctl_done(io); 12127 break; 12128 } 12129 mtx_lock(&lun->lun_lock); 12130 if (ctl_scsiio_lun_check(lun, entry, &io->scsiio) != 0) { 12131 mtx_unlock(&lun->lun_lock); 12132 ctl_done(io); 12133 break; 12134 } 12135 io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 12136 mtx_unlock(&lun->lun_lock); 12137 ctl_enqueue_rtr(io); 12138 break; 12139 case CTL_MSG_FINISH_IO: 12140 if (softc->ha_mode == CTL_HA_MODE_XFER) { 12141 ctl_done(io); 12142 break; 12143 } 12144 if (targ_lun >= CTL_MAX_LUNS || 12145 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12146 ctl_free_io(io); 12147 break; 12148 } 12149 mtx_lock(&lun->lun_lock); 12150 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); 12151 ctl_check_blocked(lun); 12152 mtx_unlock(&lun->lun_lock); 12153 ctl_free_io(io); 12154 break; 12155 case CTL_MSG_PERS_ACTION: 12156 ctl_hndl_per_res_out_on_other_sc(io); 12157 ctl_free_io(io); 12158 break; 12159 case CTL_MSG_BAD_JUJU: 12160 ctl_done(io); 12161 break; 12162 case CTL_MSG_DATAMOVE: /* Only used in XFER mode */ 12163 ctl_datamove_remote(io); 12164 break; 12165 case CTL_MSG_DATAMOVE_DONE: /* Only used in XFER mode */ 12166 io->scsiio.be_move_done(io); 12167 break; 12168 case CTL_MSG_FAILOVER: 12169 ctl_failover_lun(io); 12170 ctl_free_io(io); 12171 break; 12172 default: 12173 printf("%s: Invalid message type %d\n", 12174 __func__, io->io_hdr.msg_type); 12175 ctl_free_io(io); 12176 break; 12177 } 12178 12179 } 12180 12181 12182 /* 12183 * Returns the match type in the case of a match, or CTL_LUN_PAT_NONE if 12184 * there is no match. 12185 */ 12186 static ctl_lun_error_pattern 12187 ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc) 12188 { 12189 const struct ctl_cmd_entry *entry; 12190 ctl_lun_error_pattern filtered_pattern, pattern; 12191 12192 pattern = desc->error_pattern; 12193 12194 /* 12195 * XXX KDM we need more data passed into this function to match a 12196 * custom pattern, and we actually need to implement custom pattern 12197 * matching. 12198 */ 12199 if (pattern & CTL_LUN_PAT_CMD) 12200 return (CTL_LUN_PAT_CMD); 12201 12202 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY) 12203 return (CTL_LUN_PAT_ANY); 12204 12205 entry = ctl_get_cmd_entry(ctsio, NULL); 12206 12207 filtered_pattern = entry->pattern & pattern; 12208 12209 /* 12210 * If the user requested specific flags in the pattern (e.g. 12211 * CTL_LUN_PAT_RANGE), make sure the command supports all of those 12212 * flags. 12213 * 12214 * If the user did not specify any flags, it doesn't matter whether 12215 * or not the command supports the flags. 12216 */ 12217 if ((filtered_pattern & ~CTL_LUN_PAT_MASK) != 12218 (pattern & ~CTL_LUN_PAT_MASK)) 12219 return (CTL_LUN_PAT_NONE); 12220 12221 /* 12222 * If the user asked for a range check, see if the requested LBA 12223 * range overlaps with this command's LBA range. 12224 */ 12225 if (filtered_pattern & CTL_LUN_PAT_RANGE) { 12226 uint64_t lba1; 12227 uint64_t len1; 12228 ctl_action action; 12229 int retval; 12230 12231 retval = ctl_get_lba_len((union ctl_io *)ctsio, &lba1, &len1); 12232 if (retval != 0) 12233 return (CTL_LUN_PAT_NONE); 12234 12235 action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba, 12236 desc->lba_range.len, FALSE); 12237 /* 12238 * A "pass" means that the LBA ranges don't overlap, so 12239 * this doesn't match the user's range criteria. 12240 */ 12241 if (action == CTL_ACTION_PASS) 12242 return (CTL_LUN_PAT_NONE); 12243 } 12244 12245 return (filtered_pattern); 12246 } 12247 12248 static void 12249 ctl_inject_error(struct ctl_lun *lun, union ctl_io *io) 12250 { 12251 struct ctl_error_desc *desc, *desc2; 12252 12253 mtx_assert(&lun->lun_lock, MA_OWNED); 12254 12255 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 12256 ctl_lun_error_pattern pattern; 12257 /* 12258 * Check to see whether this particular command matches 12259 * the pattern in the descriptor. 12260 */ 12261 pattern = ctl_cmd_pattern_match(&io->scsiio, desc); 12262 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_NONE) 12263 continue; 12264 12265 switch (desc->lun_error & CTL_LUN_INJ_TYPE) { 12266 case CTL_LUN_INJ_ABORTED: 12267 ctl_set_aborted(&io->scsiio); 12268 break; 12269 case CTL_LUN_INJ_MEDIUM_ERR: 12270 ctl_set_medium_error(&io->scsiio, 12271 (io->io_hdr.flags & CTL_FLAG_DATA_MASK) != 12272 CTL_FLAG_DATA_OUT); 12273 break; 12274 case CTL_LUN_INJ_UA: 12275 /* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET 12276 * OCCURRED */ 12277 ctl_set_ua(&io->scsiio, 0x29, 0x00); 12278 break; 12279 case CTL_LUN_INJ_CUSTOM: 12280 /* 12281 * We're assuming the user knows what he is doing. 12282 * Just copy the sense information without doing 12283 * checks. 12284 */ 12285 bcopy(&desc->custom_sense, &io->scsiio.sense_data, 12286 MIN(sizeof(desc->custom_sense), 12287 sizeof(io->scsiio.sense_data))); 12288 io->scsiio.scsi_status = SCSI_STATUS_CHECK_COND; 12289 io->scsiio.sense_len = SSD_FULL_SIZE; 12290 io->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 12291 break; 12292 case CTL_LUN_INJ_NONE: 12293 default: 12294 /* 12295 * If this is an error injection type we don't know 12296 * about, clear the continuous flag (if it is set) 12297 * so it will get deleted below. 12298 */ 12299 desc->lun_error &= ~CTL_LUN_INJ_CONTINUOUS; 12300 break; 12301 } 12302 /* 12303 * By default, each error injection action is a one-shot 12304 */ 12305 if (desc->lun_error & CTL_LUN_INJ_CONTINUOUS) 12306 continue; 12307 12308 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links); 12309 12310 free(desc, M_CTL); 12311 } 12312 } 12313 12314 #ifdef CTL_IO_DELAY 12315 static void 12316 ctl_datamove_timer_wakeup(void *arg) 12317 { 12318 union ctl_io *io; 12319 12320 io = (union ctl_io *)arg; 12321 12322 ctl_datamove(io); 12323 } 12324 #endif /* CTL_IO_DELAY */ 12325 12326 void 12327 ctl_datamove(union ctl_io *io) 12328 { 12329 void (*fe_datamove)(union ctl_io *io); 12330 12331 mtx_assert(&((struct ctl_softc *)CTL_SOFTC(io))->ctl_lock, MA_NOTOWNED); 12332 12333 CTL_DEBUG_PRINT(("ctl_datamove\n")); 12334 12335 /* No data transferred yet. Frontend must update this when done. */ 12336 io->scsiio.kern_data_resid = io->scsiio.kern_data_len; 12337 12338 #ifdef CTL_TIME_IO 12339 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 12340 char str[256]; 12341 char path_str[64]; 12342 struct sbuf sb; 12343 12344 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 12345 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12346 12347 sbuf_cat(&sb, path_str); 12348 switch (io->io_hdr.io_type) { 12349 case CTL_IO_SCSI: 12350 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 12351 sbuf_printf(&sb, "\n"); 12352 sbuf_cat(&sb, path_str); 12353 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12354 io->scsiio.tag_num, io->scsiio.tag_type); 12355 break; 12356 case CTL_IO_TASK: 12357 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 12358 "Tag Type: %d\n", io->taskio.task_action, 12359 io->taskio.tag_num, io->taskio.tag_type); 12360 break; 12361 default: 12362 panic("%s: Invalid CTL I/O type %d\n", 12363 __func__, io->io_hdr.io_type); 12364 } 12365 sbuf_cat(&sb, path_str); 12366 sbuf_printf(&sb, "ctl_datamove: %jd seconds\n", 12367 (intmax_t)time_uptime - io->io_hdr.start_time); 12368 sbuf_finish(&sb); 12369 printf("%s", sbuf_data(&sb)); 12370 } 12371 #endif /* CTL_TIME_IO */ 12372 12373 #ifdef CTL_IO_DELAY 12374 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 12375 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 12376 } else { 12377 if ((lun != NULL) 12378 && (lun->delay_info.datamove_delay > 0)) { 12379 12380 callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1); 12381 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 12382 callout_reset(&io->io_hdr.delay_callout, 12383 lun->delay_info.datamove_delay * hz, 12384 ctl_datamove_timer_wakeup, io); 12385 if (lun->delay_info.datamove_type == 12386 CTL_DELAY_TYPE_ONESHOT) 12387 lun->delay_info.datamove_delay = 0; 12388 return; 12389 } 12390 } 12391 #endif 12392 12393 /* 12394 * This command has been aborted. Set the port status, so we fail 12395 * the data move. 12396 */ 12397 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12398 printf("ctl_datamove: tag 0x%04x on (%u:%u:%u) aborted\n", 12399 io->scsiio.tag_num, io->io_hdr.nexus.initid, 12400 io->io_hdr.nexus.targ_port, 12401 io->io_hdr.nexus.targ_lun); 12402 io->io_hdr.port_status = 31337; 12403 /* 12404 * Note that the backend, in this case, will get the 12405 * callback in its context. In other cases it may get 12406 * called in the frontend's interrupt thread context. 12407 */ 12408 io->scsiio.be_move_done(io); 12409 return; 12410 } 12411 12412 /* Don't confuse frontend with zero length data move. */ 12413 if (io->scsiio.kern_data_len == 0) { 12414 io->scsiio.be_move_done(io); 12415 return; 12416 } 12417 12418 fe_datamove = CTL_PORT(io)->fe_datamove; 12419 fe_datamove(io); 12420 } 12421 12422 static void 12423 ctl_send_datamove_done(union ctl_io *io, int have_lock) 12424 { 12425 union ctl_ha_msg msg; 12426 #ifdef CTL_TIME_IO 12427 struct bintime cur_bt; 12428 #endif 12429 12430 memset(&msg, 0, sizeof(msg)); 12431 msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 12432 msg.hdr.original_sc = io; 12433 msg.hdr.serializing_sc = io->io_hdr.serializing_sc; 12434 msg.hdr.nexus = io->io_hdr.nexus; 12435 msg.hdr.status = io->io_hdr.status; 12436 msg.scsi.kern_data_resid = io->scsiio.kern_data_resid; 12437 msg.scsi.tag_num = io->scsiio.tag_num; 12438 msg.scsi.tag_type = io->scsiio.tag_type; 12439 msg.scsi.scsi_status = io->scsiio.scsi_status; 12440 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 12441 io->scsiio.sense_len); 12442 msg.scsi.sense_len = io->scsiio.sense_len; 12443 msg.scsi.port_status = io->io_hdr.port_status; 12444 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12445 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12446 ctl_failover_io(io, /*have_lock*/ have_lock); 12447 return; 12448 } 12449 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 12450 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + 12451 msg.scsi.sense_len, M_WAITOK); 12452 12453 #ifdef CTL_TIME_IO 12454 getbinuptime(&cur_bt); 12455 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); 12456 bintime_add(&io->io_hdr.dma_bt, &cur_bt); 12457 #endif 12458 io->io_hdr.num_dmas++; 12459 } 12460 12461 /* 12462 * The DMA to the remote side is done, now we need to tell the other side 12463 * we're done so it can continue with its data movement. 12464 */ 12465 static void 12466 ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq) 12467 { 12468 union ctl_io *io; 12469 uint32_t i; 12470 12471 io = rq->context; 12472 12473 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12474 printf("%s: ISC DMA write failed with error %d", __func__, 12475 rq->ret); 12476 ctl_set_internal_failure(&io->scsiio, 12477 /*sks_valid*/ 1, 12478 /*retry_count*/ rq->ret); 12479 } 12480 12481 ctl_dt_req_free(rq); 12482 12483 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12484 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12485 free(io->io_hdr.remote_sglist, M_CTL); 12486 io->io_hdr.remote_sglist = NULL; 12487 io->io_hdr.local_sglist = NULL; 12488 12489 /* 12490 * The data is in local and remote memory, so now we need to send 12491 * status (good or back) back to the other side. 12492 */ 12493 ctl_send_datamove_done(io, /*have_lock*/ 0); 12494 } 12495 12496 /* 12497 * We've moved the data from the host/controller into local memory. Now we 12498 * need to push it over to the remote controller's memory. 12499 */ 12500 static int 12501 ctl_datamove_remote_dm_write_cb(union ctl_io *io) 12502 { 12503 int retval; 12504 12505 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_WRITE, 12506 ctl_datamove_remote_write_cb); 12507 return (retval); 12508 } 12509 12510 static void 12511 ctl_datamove_remote_write(union ctl_io *io) 12512 { 12513 int retval; 12514 void (*fe_datamove)(union ctl_io *io); 12515 12516 /* 12517 * - Get the data from the host/HBA into local memory. 12518 * - DMA memory from the local controller to the remote controller. 12519 * - Send status back to the remote controller. 12520 */ 12521 12522 retval = ctl_datamove_remote_sgl_setup(io); 12523 if (retval != 0) 12524 return; 12525 12526 /* Switch the pointer over so the FETD knows what to do */ 12527 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 12528 12529 /* 12530 * Use a custom move done callback, since we need to send completion 12531 * back to the other controller, not to the backend on this side. 12532 */ 12533 io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb; 12534 12535 fe_datamove = CTL_PORT(io)->fe_datamove; 12536 fe_datamove(io); 12537 } 12538 12539 static int 12540 ctl_datamove_remote_dm_read_cb(union ctl_io *io) 12541 { 12542 #if 0 12543 char str[256]; 12544 char path_str[64]; 12545 struct sbuf sb; 12546 #endif 12547 uint32_t i; 12548 12549 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12550 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12551 free(io->io_hdr.remote_sglist, M_CTL); 12552 io->io_hdr.remote_sglist = NULL; 12553 io->io_hdr.local_sglist = NULL; 12554 12555 #if 0 12556 scsi_path_string(io, path_str, sizeof(path_str)); 12557 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12558 sbuf_cat(&sb, path_str); 12559 scsi_command_string(&io->scsiio, NULL, &sb); 12560 sbuf_printf(&sb, "\n"); 12561 sbuf_cat(&sb, path_str); 12562 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12563 io->scsiio.tag_num, io->scsiio.tag_type); 12564 sbuf_cat(&sb, path_str); 12565 sbuf_printf(&sb, "%s: flags %#x, status %#x\n", __func__, 12566 io->io_hdr.flags, io->io_hdr.status); 12567 sbuf_finish(&sb); 12568 printk("%s", sbuf_data(&sb)); 12569 #endif 12570 12571 12572 /* 12573 * The read is done, now we need to send status (good or bad) back 12574 * to the other side. 12575 */ 12576 ctl_send_datamove_done(io, /*have_lock*/ 0); 12577 12578 return (0); 12579 } 12580 12581 static void 12582 ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq) 12583 { 12584 union ctl_io *io; 12585 void (*fe_datamove)(union ctl_io *io); 12586 12587 io = rq->context; 12588 12589 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12590 printf("%s: ISC DMA read failed with error %d\n", __func__, 12591 rq->ret); 12592 ctl_set_internal_failure(&io->scsiio, 12593 /*sks_valid*/ 1, 12594 /*retry_count*/ rq->ret); 12595 } 12596 12597 ctl_dt_req_free(rq); 12598 12599 /* Switch the pointer over so the FETD knows what to do */ 12600 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 12601 12602 /* 12603 * Use a custom move done callback, since we need to send completion 12604 * back to the other controller, not to the backend on this side. 12605 */ 12606 io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb; 12607 12608 /* XXX KDM add checks like the ones in ctl_datamove? */ 12609 12610 fe_datamove = CTL_PORT(io)->fe_datamove; 12611 fe_datamove(io); 12612 } 12613 12614 static int 12615 ctl_datamove_remote_sgl_setup(union ctl_io *io) 12616 { 12617 struct ctl_sg_entry *local_sglist; 12618 uint32_t len_to_go; 12619 int retval; 12620 int i; 12621 12622 retval = 0; 12623 local_sglist = io->io_hdr.local_sglist; 12624 len_to_go = io->scsiio.kern_data_len; 12625 12626 /* 12627 * The difficult thing here is that the size of the various 12628 * S/G segments may be different than the size from the 12629 * remote controller. That'll make it harder when DMAing 12630 * the data back to the other side. 12631 */ 12632 for (i = 0; len_to_go > 0; i++) { 12633 local_sglist[i].len = MIN(len_to_go, CTL_HA_DATAMOVE_SEGMENT); 12634 local_sglist[i].addr = 12635 malloc(local_sglist[i].len, M_CTL, M_WAITOK); 12636 12637 len_to_go -= local_sglist[i].len; 12638 } 12639 /* 12640 * Reset the number of S/G entries accordingly. The original 12641 * number of S/G entries is available in rem_sg_entries. 12642 */ 12643 io->scsiio.kern_sg_entries = i; 12644 12645 #if 0 12646 printf("%s: kern_sg_entries = %d\n", __func__, 12647 io->scsiio.kern_sg_entries); 12648 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12649 printf("%s: sg[%d] = %p, %lu\n", __func__, i, 12650 local_sglist[i].addr, local_sglist[i].len); 12651 #endif 12652 12653 return (retval); 12654 } 12655 12656 static int 12657 ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 12658 ctl_ha_dt_cb callback) 12659 { 12660 struct ctl_ha_dt_req *rq; 12661 struct ctl_sg_entry *remote_sglist, *local_sglist; 12662 uint32_t local_used, remote_used, total_used; 12663 int i, j, isc_ret; 12664 12665 rq = ctl_dt_req_alloc(); 12666 12667 /* 12668 * If we failed to allocate the request, and if the DMA didn't fail 12669 * anyway, set busy status. This is just a resource allocation 12670 * failure. 12671 */ 12672 if ((rq == NULL) 12673 && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 12674 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) 12675 ctl_set_busy(&io->scsiio); 12676 12677 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 12678 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) { 12679 12680 if (rq != NULL) 12681 ctl_dt_req_free(rq); 12682 12683 /* 12684 * The data move failed. We need to return status back 12685 * to the other controller. No point in trying to DMA 12686 * data to the remote controller. 12687 */ 12688 12689 ctl_send_datamove_done(io, /*have_lock*/ 0); 12690 12691 return (1); 12692 } 12693 12694 local_sglist = io->io_hdr.local_sglist; 12695 remote_sglist = io->io_hdr.remote_sglist; 12696 local_used = 0; 12697 remote_used = 0; 12698 total_used = 0; 12699 12700 /* 12701 * Pull/push the data over the wire from/to the other controller. 12702 * This takes into account the possibility that the local and 12703 * remote sglists may not be identical in terms of the size of 12704 * the elements and the number of elements. 12705 * 12706 * One fundamental assumption here is that the length allocated for 12707 * both the local and remote sglists is identical. Otherwise, we've 12708 * essentially got a coding error of some sort. 12709 */ 12710 isc_ret = CTL_HA_STATUS_SUCCESS; 12711 for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) { 12712 uint32_t cur_len; 12713 uint8_t *tmp_ptr; 12714 12715 rq->command = command; 12716 rq->context = io; 12717 12718 /* 12719 * Both pointers should be aligned. But it is possible 12720 * that the allocation length is not. They should both 12721 * also have enough slack left over at the end, though, 12722 * to round up to the next 8 byte boundary. 12723 */ 12724 cur_len = MIN(local_sglist[i].len - local_used, 12725 remote_sglist[j].len - remote_used); 12726 rq->size = cur_len; 12727 12728 tmp_ptr = (uint8_t *)local_sglist[i].addr; 12729 tmp_ptr += local_used; 12730 12731 #if 0 12732 /* Use physical addresses when talking to ISC hardware */ 12733 if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) { 12734 /* XXX KDM use busdma */ 12735 rq->local = vtophys(tmp_ptr); 12736 } else 12737 rq->local = tmp_ptr; 12738 #else 12739 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, 12740 ("HA does not support BUS_ADDR")); 12741 rq->local = tmp_ptr; 12742 #endif 12743 12744 tmp_ptr = (uint8_t *)remote_sglist[j].addr; 12745 tmp_ptr += remote_used; 12746 rq->remote = tmp_ptr; 12747 12748 rq->callback = NULL; 12749 12750 local_used += cur_len; 12751 if (local_used >= local_sglist[i].len) { 12752 i++; 12753 local_used = 0; 12754 } 12755 12756 remote_used += cur_len; 12757 if (remote_used >= remote_sglist[j].len) { 12758 j++; 12759 remote_used = 0; 12760 } 12761 total_used += cur_len; 12762 12763 if (total_used >= io->scsiio.kern_data_len) 12764 rq->callback = callback; 12765 12766 #if 0 12767 printf("%s: %s: local %p remote %p size %d\n", __func__, 12768 (command == CTL_HA_DT_CMD_WRITE) ? "WRITE" : "READ", 12769 rq->local, rq->remote, rq->size); 12770 #endif 12771 12772 isc_ret = ctl_dt_single(rq); 12773 if (isc_ret > CTL_HA_STATUS_SUCCESS) 12774 break; 12775 } 12776 if (isc_ret != CTL_HA_STATUS_WAIT) { 12777 rq->ret = isc_ret; 12778 callback(rq); 12779 } 12780 12781 return (0); 12782 } 12783 12784 static void 12785 ctl_datamove_remote_read(union ctl_io *io) 12786 { 12787 int retval; 12788 uint32_t i; 12789 12790 /* 12791 * This will send an error to the other controller in the case of a 12792 * failure. 12793 */ 12794 retval = ctl_datamove_remote_sgl_setup(io); 12795 if (retval != 0) 12796 return; 12797 12798 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ, 12799 ctl_datamove_remote_read_cb); 12800 if (retval != 0) { 12801 /* 12802 * Make sure we free memory if there was an error.. The 12803 * ctl_datamove_remote_xfer() function will send the 12804 * datamove done message, or call the callback with an 12805 * error if there is a problem. 12806 */ 12807 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12808 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12809 free(io->io_hdr.remote_sglist, M_CTL); 12810 io->io_hdr.remote_sglist = NULL; 12811 io->io_hdr.local_sglist = NULL; 12812 } 12813 } 12814 12815 /* 12816 * Process a datamove request from the other controller. This is used for 12817 * XFER mode only, not SER_ONLY mode. For writes, we DMA into local memory 12818 * first. Once that is complete, the data gets DMAed into the remote 12819 * controller's memory. For reads, we DMA from the remote controller's 12820 * memory into our memory first, and then move it out to the FETD. 12821 */ 12822 static void 12823 ctl_datamove_remote(union ctl_io *io) 12824 { 12825 12826 mtx_assert(&((struct ctl_softc *)CTL_SOFTC(io))->ctl_lock, MA_NOTOWNED); 12827 12828 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12829 ctl_failover_io(io, /*have_lock*/ 0); 12830 return; 12831 } 12832 12833 /* 12834 * Note that we look for an aborted I/O here, but don't do some of 12835 * the other checks that ctl_datamove() normally does. 12836 * We don't need to run the datamove delay code, since that should 12837 * have been done if need be on the other controller. 12838 */ 12839 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12840 printf("%s: tag 0x%04x on (%u:%u:%u) aborted\n", __func__, 12841 io->scsiio.tag_num, io->io_hdr.nexus.initid, 12842 io->io_hdr.nexus.targ_port, 12843 io->io_hdr.nexus.targ_lun); 12844 io->io_hdr.port_status = 31338; 12845 ctl_send_datamove_done(io, /*have_lock*/ 0); 12846 return; 12847 } 12848 12849 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) 12850 ctl_datamove_remote_write(io); 12851 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) 12852 ctl_datamove_remote_read(io); 12853 else { 12854 io->io_hdr.port_status = 31339; 12855 ctl_send_datamove_done(io, /*have_lock*/ 0); 12856 } 12857 } 12858 12859 static void 12860 ctl_process_done(union ctl_io *io) 12861 { 12862 struct ctl_softc *softc = CTL_SOFTC(io); 12863 struct ctl_port *port = CTL_PORT(io); 12864 struct ctl_lun *lun = CTL_LUN(io); 12865 void (*fe_done)(union ctl_io *io); 12866 union ctl_ha_msg msg; 12867 12868 CTL_DEBUG_PRINT(("ctl_process_done\n")); 12869 fe_done = port->fe_done; 12870 12871 #ifdef CTL_TIME_IO 12872 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 12873 char str[256]; 12874 char path_str[64]; 12875 struct sbuf sb; 12876 12877 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 12878 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12879 12880 sbuf_cat(&sb, path_str); 12881 switch (io->io_hdr.io_type) { 12882 case CTL_IO_SCSI: 12883 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 12884 sbuf_printf(&sb, "\n"); 12885 sbuf_cat(&sb, path_str); 12886 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12887 io->scsiio.tag_num, io->scsiio.tag_type); 12888 break; 12889 case CTL_IO_TASK: 12890 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 12891 "Tag Type: %d\n", io->taskio.task_action, 12892 io->taskio.tag_num, io->taskio.tag_type); 12893 break; 12894 default: 12895 panic("%s: Invalid CTL I/O type %d\n", 12896 __func__, io->io_hdr.io_type); 12897 } 12898 sbuf_cat(&sb, path_str); 12899 sbuf_printf(&sb, "ctl_process_done: %jd seconds\n", 12900 (intmax_t)time_uptime - io->io_hdr.start_time); 12901 sbuf_finish(&sb); 12902 printf("%s", sbuf_data(&sb)); 12903 } 12904 #endif /* CTL_TIME_IO */ 12905 12906 switch (io->io_hdr.io_type) { 12907 case CTL_IO_SCSI: 12908 break; 12909 case CTL_IO_TASK: 12910 if (ctl_debug & CTL_DEBUG_INFO) 12911 ctl_io_error_print(io, NULL); 12912 fe_done(io); 12913 return; 12914 default: 12915 panic("%s: Invalid CTL I/O type %d\n", 12916 __func__, io->io_hdr.io_type); 12917 } 12918 12919 if (lun == NULL) { 12920 CTL_DEBUG_PRINT(("NULL LUN for lun %d\n", 12921 io->io_hdr.nexus.targ_mapped_lun)); 12922 goto bailout; 12923 } 12924 12925 mtx_lock(&lun->lun_lock); 12926 12927 /* 12928 * Check to see if we have any informational exception and status 12929 * of this command can be modified to report it in form of either 12930 * RECOVERED ERROR or NO SENSE, depending on MRIE mode page field. 12931 */ 12932 if (lun->ie_reported == 0 && lun->ie_asc != 0 && 12933 io->io_hdr.status == CTL_SUCCESS && 12934 (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0) { 12935 uint8_t mrie = lun->MODE_IE.mrie; 12936 uint8_t per = ((lun->MODE_RWER.byte3 & SMS_RWER_PER) || 12937 (lun->MODE_VER.byte3 & SMS_VER_PER)); 12938 if (((mrie == SIEP_MRIE_REC_COND && per) || 12939 mrie == SIEP_MRIE_REC_UNCOND || 12940 mrie == SIEP_MRIE_NO_SENSE) && 12941 (ctl_get_cmd_entry(&io->scsiio, NULL)->flags & 12942 CTL_CMD_FLAG_NO_SENSE) == 0) { 12943 ctl_set_sense(&io->scsiio, 12944 /*current_error*/ 1, 12945 /*sense_key*/ (mrie == SIEP_MRIE_NO_SENSE) ? 12946 SSD_KEY_NO_SENSE : SSD_KEY_RECOVERED_ERROR, 12947 /*asc*/ lun->ie_asc, 12948 /*ascq*/ lun->ie_ascq, 12949 SSD_ELEM_NONE); 12950 lun->ie_reported = 1; 12951 } 12952 } else if (lun->ie_reported < 0) 12953 lun->ie_reported = 0; 12954 12955 /* 12956 * Check to see if we have any errors to inject here. We only 12957 * inject errors for commands that don't already have errors set. 12958 */ 12959 if (!STAILQ_EMPTY(&lun->error_list) && 12960 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) && 12961 ((io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0)) 12962 ctl_inject_error(lun, io); 12963 12964 /* 12965 * XXX KDM how do we treat commands that aren't completed 12966 * successfully? 12967 * 12968 * XXX KDM should we also track I/O latency? 12969 */ 12970 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS && 12971 io->io_hdr.io_type == CTL_IO_SCSI) { 12972 int type; 12973 #ifdef CTL_TIME_IO 12974 struct bintime bt; 12975 12976 getbinuptime(&bt); 12977 bintime_sub(&bt, &io->io_hdr.start_bt); 12978 #endif 12979 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 12980 CTL_FLAG_DATA_IN) 12981 type = CTL_STATS_READ; 12982 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 12983 CTL_FLAG_DATA_OUT) 12984 type = CTL_STATS_WRITE; 12985 else 12986 type = CTL_STATS_NO_IO; 12987 12988 #ifdef CTL_LEGACY_STATS 12989 uint32_t targ_port = port->targ_port; 12990 lun->legacy_stats.ports[targ_port].bytes[type] += 12991 io->scsiio.kern_total_len; 12992 lun->legacy_stats.ports[targ_port].operations[type] ++; 12993 lun->legacy_stats.ports[targ_port].num_dmas[type] += 12994 io->io_hdr.num_dmas; 12995 #ifdef CTL_TIME_IO 12996 bintime_add(&lun->legacy_stats.ports[targ_port].dma_time[type], 12997 &io->io_hdr.dma_bt); 12998 bintime_add(&lun->legacy_stats.ports[targ_port].time[type], 12999 &bt); 13000 #endif 13001 #endif /* CTL_LEGACY_STATS */ 13002 13003 lun->stats.bytes[type] += io->scsiio.kern_total_len; 13004 lun->stats.operations[type] ++; 13005 lun->stats.dmas[type] += io->io_hdr.num_dmas; 13006 #ifdef CTL_TIME_IO 13007 bintime_add(&lun->stats.dma_time[type], &io->io_hdr.dma_bt); 13008 bintime_add(&lun->stats.time[type], &bt); 13009 #endif 13010 13011 mtx_lock(&port->port_lock); 13012 port->stats.bytes[type] += io->scsiio.kern_total_len; 13013 port->stats.operations[type] ++; 13014 port->stats.dmas[type] += io->io_hdr.num_dmas; 13015 #ifdef CTL_TIME_IO 13016 bintime_add(&port->stats.dma_time[type], &io->io_hdr.dma_bt); 13017 bintime_add(&port->stats.time[type], &bt); 13018 #endif 13019 mtx_unlock(&port->port_lock); 13020 } 13021 13022 /* 13023 * Remove this from the OOA queue. 13024 */ 13025 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); 13026 #ifdef CTL_TIME_IO 13027 if (TAILQ_EMPTY(&lun->ooa_queue)) 13028 lun->last_busy = getsbinuptime(); 13029 #endif 13030 13031 /* 13032 * Run through the blocked queue on this LUN and see if anything 13033 * has become unblocked, now that this transaction is done. 13034 */ 13035 ctl_check_blocked(lun); 13036 13037 /* 13038 * If the LUN has been invalidated, free it if there is nothing 13039 * left on its OOA queue. 13040 */ 13041 if ((lun->flags & CTL_LUN_INVALID) 13042 && TAILQ_EMPTY(&lun->ooa_queue)) { 13043 mtx_unlock(&lun->lun_lock); 13044 mtx_lock(&softc->ctl_lock); 13045 ctl_free_lun(lun); 13046 mtx_unlock(&softc->ctl_lock); 13047 } else 13048 mtx_unlock(&lun->lun_lock); 13049 13050 bailout: 13051 13052 /* 13053 * If this command has been aborted, make sure we set the status 13054 * properly. The FETD is responsible for freeing the I/O and doing 13055 * whatever it needs to do to clean up its state. 13056 */ 13057 if (io->io_hdr.flags & CTL_FLAG_ABORT) 13058 ctl_set_task_aborted(&io->scsiio); 13059 13060 /* 13061 * If enabled, print command error status. 13062 */ 13063 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS && 13064 (ctl_debug & CTL_DEBUG_INFO) != 0) 13065 ctl_io_error_print(io, NULL); 13066 13067 /* 13068 * Tell the FETD or the other shelf controller we're done with this 13069 * command. Note that only SCSI commands get to this point. Task 13070 * management commands are completed above. 13071 */ 13072 if ((softc->ha_mode != CTL_HA_MODE_XFER) && 13073 (io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC)) { 13074 memset(&msg, 0, sizeof(msg)); 13075 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 13076 msg.hdr.serializing_sc = io->io_hdr.serializing_sc; 13077 msg.hdr.nexus = io->io_hdr.nexus; 13078 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13079 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data), 13080 M_WAITOK); 13081 } 13082 13083 fe_done(io); 13084 } 13085 13086 #ifdef CTL_WITH_CA 13087 /* 13088 * Front end should call this if it doesn't do autosense. When the request 13089 * sense comes back in from the initiator, we'll dequeue this and send it. 13090 */ 13091 int 13092 ctl_queue_sense(union ctl_io *io) 13093 { 13094 struct ctl_softc *softc = CTL_SOFTC(io); 13095 struct ctl_port *port = CTL_PORT(io); 13096 struct ctl_lun *lun; 13097 uint32_t initidx, targ_lun; 13098 13099 CTL_DEBUG_PRINT(("ctl_queue_sense\n")); 13100 13101 targ_lun = ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); 13102 13103 /* 13104 * LUN lookup will likely move to the ctl_work_thread() once we 13105 * have our new queueing infrastructure (that doesn't put things on 13106 * a per-LUN queue initially). That is so that we can handle 13107 * things like an INQUIRY to a LUN that we don't have enabled. We 13108 * can't deal with that right now. 13109 * If we don't have a LUN for this, just toss the sense information. 13110 */ 13111 mtx_lock(&softc->ctl_lock); 13112 if (targ_lun >= CTL_MAX_LUNS || 13113 (lun = softc->ctl_luns[targ_lun]) == NULL) { 13114 mtx_unlock(&softc->ctl_lock); 13115 goto bailout; 13116 } 13117 mtx_lock(&lun->lun_lock); 13118 mtx_unlock(&softc->ctl_lock); 13119 13120 /* 13121 * Already have CA set for this LUN...toss the sense information. 13122 */ 13123 initidx = ctl_get_initindex(&io->io_hdr.nexus); 13124 if (ctl_is_set(lun->have_ca, initidx)) { 13125 mtx_unlock(&lun->lun_lock); 13126 goto bailout; 13127 } 13128 13129 memcpy(&lun->pending_sense[initidx], &io->scsiio.sense_data, 13130 MIN(sizeof(lun->pending_sense[initidx]), 13131 sizeof(io->scsiio.sense_data))); 13132 ctl_set_mask(lun->have_ca, initidx); 13133 mtx_unlock(&lun->lun_lock); 13134 13135 bailout: 13136 ctl_free_io(io); 13137 return (CTL_RETVAL_COMPLETE); 13138 } 13139 #endif 13140 13141 /* 13142 * Primary command inlet from frontend ports. All SCSI and task I/O 13143 * requests must go through this function. 13144 */ 13145 int 13146 ctl_queue(union ctl_io *io) 13147 { 13148 struct ctl_port *port = CTL_PORT(io); 13149 13150 CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0])); 13151 13152 #ifdef CTL_TIME_IO 13153 io->io_hdr.start_time = time_uptime; 13154 getbinuptime(&io->io_hdr.start_bt); 13155 #endif /* CTL_TIME_IO */ 13156 13157 /* Map FE-specific LUN ID into global one. */ 13158 io->io_hdr.nexus.targ_mapped_lun = 13159 ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); 13160 13161 switch (io->io_hdr.io_type) { 13162 case CTL_IO_SCSI: 13163 case CTL_IO_TASK: 13164 if (ctl_debug & CTL_DEBUG_CDB) 13165 ctl_io_print(io); 13166 ctl_enqueue_incoming(io); 13167 break; 13168 default: 13169 printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type); 13170 return (EINVAL); 13171 } 13172 13173 return (CTL_RETVAL_COMPLETE); 13174 } 13175 13176 #ifdef CTL_IO_DELAY 13177 static void 13178 ctl_done_timer_wakeup(void *arg) 13179 { 13180 union ctl_io *io; 13181 13182 io = (union ctl_io *)arg; 13183 ctl_done(io); 13184 } 13185 #endif /* CTL_IO_DELAY */ 13186 13187 void 13188 ctl_serseq_done(union ctl_io *io) 13189 { 13190 struct ctl_lun *lun = CTL_LUN(io);; 13191 13192 if (lun->be_lun == NULL || 13193 lun->be_lun->serseq == CTL_LUN_SERSEQ_OFF) 13194 return; 13195 mtx_lock(&lun->lun_lock); 13196 io->io_hdr.flags |= CTL_FLAG_SERSEQ_DONE; 13197 ctl_check_blocked(lun); 13198 mtx_unlock(&lun->lun_lock); 13199 } 13200 13201 void 13202 ctl_done(union ctl_io *io) 13203 { 13204 13205 /* 13206 * Enable this to catch duplicate completion issues. 13207 */ 13208 #if 0 13209 if (io->io_hdr.flags & CTL_FLAG_ALREADY_DONE) { 13210 printf("%s: type %d msg %d cdb %x iptl: " 13211 "%u:%u:%u tag 0x%04x " 13212 "flag %#x status %x\n", 13213 __func__, 13214 io->io_hdr.io_type, 13215 io->io_hdr.msg_type, 13216 io->scsiio.cdb[0], 13217 io->io_hdr.nexus.initid, 13218 io->io_hdr.nexus.targ_port, 13219 io->io_hdr.nexus.targ_lun, 13220 (io->io_hdr.io_type == 13221 CTL_IO_TASK) ? 13222 io->taskio.tag_num : 13223 io->scsiio.tag_num, 13224 io->io_hdr.flags, 13225 io->io_hdr.status); 13226 } else 13227 io->io_hdr.flags |= CTL_FLAG_ALREADY_DONE; 13228 #endif 13229 13230 /* 13231 * This is an internal copy of an I/O, and should not go through 13232 * the normal done processing logic. 13233 */ 13234 if (io->io_hdr.flags & CTL_FLAG_INT_COPY) 13235 return; 13236 13237 #ifdef CTL_IO_DELAY 13238 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 13239 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 13240 } else { 13241 struct ctl_lun *lun = CTL_LUN(io); 13242 13243 if ((lun != NULL) 13244 && (lun->delay_info.done_delay > 0)) { 13245 13246 callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1); 13247 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 13248 callout_reset(&io->io_hdr.delay_callout, 13249 lun->delay_info.done_delay * hz, 13250 ctl_done_timer_wakeup, io); 13251 if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT) 13252 lun->delay_info.done_delay = 0; 13253 return; 13254 } 13255 } 13256 #endif /* CTL_IO_DELAY */ 13257 13258 ctl_enqueue_done(io); 13259 } 13260 13261 static void 13262 ctl_work_thread(void *arg) 13263 { 13264 struct ctl_thread *thr = (struct ctl_thread *)arg; 13265 struct ctl_softc *softc = thr->ctl_softc; 13266 union ctl_io *io; 13267 int retval; 13268 13269 CTL_DEBUG_PRINT(("ctl_work_thread starting\n")); 13270 13271 for (;;) { 13272 /* 13273 * We handle the queues in this order: 13274 * - ISC 13275 * - done queue (to free up resources, unblock other commands) 13276 * - RtR queue 13277 * - incoming queue 13278 * 13279 * If those queues are empty, we break out of the loop and 13280 * go to sleep. 13281 */ 13282 mtx_lock(&thr->queue_lock); 13283 io = (union ctl_io *)STAILQ_FIRST(&thr->isc_queue); 13284 if (io != NULL) { 13285 STAILQ_REMOVE_HEAD(&thr->isc_queue, links); 13286 mtx_unlock(&thr->queue_lock); 13287 ctl_handle_isc(io); 13288 continue; 13289 } 13290 io = (union ctl_io *)STAILQ_FIRST(&thr->done_queue); 13291 if (io != NULL) { 13292 STAILQ_REMOVE_HEAD(&thr->done_queue, links); 13293 /* clear any blocked commands, call fe_done */ 13294 mtx_unlock(&thr->queue_lock); 13295 ctl_process_done(io); 13296 continue; 13297 } 13298 io = (union ctl_io *)STAILQ_FIRST(&thr->incoming_queue); 13299 if (io != NULL) { 13300 STAILQ_REMOVE_HEAD(&thr->incoming_queue, links); 13301 mtx_unlock(&thr->queue_lock); 13302 if (io->io_hdr.io_type == CTL_IO_TASK) 13303 ctl_run_task(io); 13304 else 13305 ctl_scsiio_precheck(softc, &io->scsiio); 13306 continue; 13307 } 13308 io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue); 13309 if (io != NULL) { 13310 STAILQ_REMOVE_HEAD(&thr->rtr_queue, links); 13311 mtx_unlock(&thr->queue_lock); 13312 retval = ctl_scsiio(&io->scsiio); 13313 if (retval != CTL_RETVAL_COMPLETE) 13314 CTL_DEBUG_PRINT(("ctl_scsiio failed\n")); 13315 continue; 13316 } 13317 13318 /* Sleep until we have something to do. */ 13319 mtx_sleep(thr, &thr->queue_lock, PDROP | PRIBIO, "-", 0); 13320 } 13321 } 13322 13323 static void 13324 ctl_lun_thread(void *arg) 13325 { 13326 struct ctl_softc *softc = (struct ctl_softc *)arg; 13327 struct ctl_be_lun *be_lun; 13328 13329 CTL_DEBUG_PRINT(("ctl_lun_thread starting\n")); 13330 13331 for (;;) { 13332 mtx_lock(&softc->ctl_lock); 13333 be_lun = STAILQ_FIRST(&softc->pending_lun_queue); 13334 if (be_lun != NULL) { 13335 STAILQ_REMOVE_HEAD(&softc->pending_lun_queue, links); 13336 mtx_unlock(&softc->ctl_lock); 13337 ctl_create_lun(be_lun); 13338 continue; 13339 } 13340 13341 /* Sleep until we have something to do. */ 13342 mtx_sleep(&softc->pending_lun_queue, &softc->ctl_lock, 13343 PDROP | PRIBIO, "-", 0); 13344 } 13345 } 13346 13347 static void 13348 ctl_thresh_thread(void *arg) 13349 { 13350 struct ctl_softc *softc = (struct ctl_softc *)arg; 13351 struct ctl_lun *lun; 13352 struct ctl_logical_block_provisioning_page *page; 13353 const char *attr; 13354 union ctl_ha_msg msg; 13355 uint64_t thres, val; 13356 int i, e, set; 13357 13358 CTL_DEBUG_PRINT(("ctl_thresh_thread starting\n")); 13359 13360 for (;;) { 13361 mtx_lock(&softc->ctl_lock); 13362 STAILQ_FOREACH(lun, &softc->lun_list, links) { 13363 if ((lun->flags & CTL_LUN_DISABLED) || 13364 (lun->flags & CTL_LUN_NO_MEDIA) || 13365 lun->backend->lun_attr == NULL) 13366 continue; 13367 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 13368 softc->ha_mode == CTL_HA_MODE_XFER) 13369 continue; 13370 if ((lun->MODE_RWER.byte8 & SMS_RWER_LBPERE) == 0) 13371 continue; 13372 e = 0; 13373 page = &lun->MODE_LBP; 13374 for (i = 0; i < CTL_NUM_LBP_THRESH; i++) { 13375 if ((page->descr[i].flags & SLBPPD_ENABLED) == 0) 13376 continue; 13377 thres = scsi_4btoul(page->descr[i].count); 13378 thres <<= CTL_LBP_EXPONENT; 13379 switch (page->descr[i].resource) { 13380 case 0x01: 13381 attr = "blocksavail"; 13382 break; 13383 case 0x02: 13384 attr = "blocksused"; 13385 break; 13386 case 0xf1: 13387 attr = "poolblocksavail"; 13388 break; 13389 case 0xf2: 13390 attr = "poolblocksused"; 13391 break; 13392 default: 13393 continue; 13394 } 13395 mtx_unlock(&softc->ctl_lock); // XXX 13396 val = lun->backend->lun_attr( 13397 lun->be_lun->be_lun, attr); 13398 mtx_lock(&softc->ctl_lock); 13399 if (val == UINT64_MAX) 13400 continue; 13401 if ((page->descr[i].flags & SLBPPD_ARMING_MASK) 13402 == SLBPPD_ARMING_INC) 13403 e = (val >= thres); 13404 else 13405 e = (val <= thres); 13406 if (e) 13407 break; 13408 } 13409 mtx_lock(&lun->lun_lock); 13410 if (e) { 13411 scsi_u64to8b((uint8_t *)&page->descr[i] - 13412 (uint8_t *)page, lun->ua_tpt_info); 13413 if (lun->lasttpt == 0 || 13414 time_uptime - lun->lasttpt >= CTL_LBP_UA_PERIOD) { 13415 lun->lasttpt = time_uptime; 13416 ctl_est_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 13417 set = 1; 13418 } else 13419 set = 0; 13420 } else { 13421 lun->lasttpt = 0; 13422 ctl_clr_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 13423 set = -1; 13424 } 13425 mtx_unlock(&lun->lun_lock); 13426 if (set != 0 && 13427 lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 13428 /* Send msg to other side. */ 13429 bzero(&msg.ua, sizeof(msg.ua)); 13430 msg.hdr.msg_type = CTL_MSG_UA; 13431 msg.hdr.nexus.initid = -1; 13432 msg.hdr.nexus.targ_port = -1; 13433 msg.hdr.nexus.targ_lun = lun->lun; 13434 msg.hdr.nexus.targ_mapped_lun = lun->lun; 13435 msg.ua.ua_all = 1; 13436 msg.ua.ua_set = (set > 0); 13437 msg.ua.ua_type = CTL_UA_THIN_PROV_THRES; 13438 memcpy(msg.ua.ua_info, lun->ua_tpt_info, 8); 13439 mtx_unlock(&softc->ctl_lock); // XXX 13440 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13441 sizeof(msg.ua), M_WAITOK); 13442 mtx_lock(&softc->ctl_lock); 13443 } 13444 } 13445 mtx_unlock(&softc->ctl_lock); 13446 pause("-", CTL_LBP_PERIOD * hz); 13447 } 13448 } 13449 13450 static void 13451 ctl_enqueue_incoming(union ctl_io *io) 13452 { 13453 struct ctl_softc *softc = CTL_SOFTC(io); 13454 struct ctl_thread *thr; 13455 u_int idx; 13456 13457 idx = (io->io_hdr.nexus.targ_port * 127 + 13458 io->io_hdr.nexus.initid) % worker_threads; 13459 thr = &softc->threads[idx]; 13460 mtx_lock(&thr->queue_lock); 13461 STAILQ_INSERT_TAIL(&thr->incoming_queue, &io->io_hdr, links); 13462 mtx_unlock(&thr->queue_lock); 13463 wakeup(thr); 13464 } 13465 13466 static void 13467 ctl_enqueue_rtr(union ctl_io *io) 13468 { 13469 struct ctl_softc *softc = CTL_SOFTC(io); 13470 struct ctl_thread *thr; 13471 13472 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13473 mtx_lock(&thr->queue_lock); 13474 STAILQ_INSERT_TAIL(&thr->rtr_queue, &io->io_hdr, links); 13475 mtx_unlock(&thr->queue_lock); 13476 wakeup(thr); 13477 } 13478 13479 static void 13480 ctl_enqueue_done(union ctl_io *io) 13481 { 13482 struct ctl_softc *softc = CTL_SOFTC(io); 13483 struct ctl_thread *thr; 13484 13485 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13486 mtx_lock(&thr->queue_lock); 13487 STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links); 13488 mtx_unlock(&thr->queue_lock); 13489 wakeup(thr); 13490 } 13491 13492 static void 13493 ctl_enqueue_isc(union ctl_io *io) 13494 { 13495 struct ctl_softc *softc = CTL_SOFTC(io); 13496 struct ctl_thread *thr; 13497 13498 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13499 mtx_lock(&thr->queue_lock); 13500 STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links); 13501 mtx_unlock(&thr->queue_lock); 13502 wakeup(thr); 13503 } 13504 13505 /* 13506 * vim: ts=8 13507 */ 13508