1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2003-2009 Silicon Graphics International Corp. 5 * Copyright (c) 2012 The FreeBSD Foundation 6 * Copyright (c) 2014-2017 Alexander Motin <mav@FreeBSD.org> 7 * Copyright (c) 2017 Jakub Wojciech Klama <jceel@FreeBSD.org> 8 * Copyright (c) 2018 Marcelo Araujo <araujo@FreeBSD.org> 9 * All rights reserved. 10 * 11 * Portions of this software were developed by Edward Tomasz Napierala 12 * under sponsorship from the FreeBSD Foundation. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions, and the following disclaimer, 19 * without modification. 20 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 21 * substantially similar to the "NO WARRANTY" disclaimer below 22 * ("Disclaimer") and any redistribution must be conditioned upon 23 * including a substantially similar Disclaimer requirement for further 24 * binary redistribution. 25 * 26 * NO WARRANTY 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 31 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 35 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGES. 38 * 39 * $Id$ 40 */ 41 /* 42 * CAM Target Layer, a SCSI device emulation subsystem. 43 * 44 * Author: Ken Merry <ken@FreeBSD.org> 45 */ 46 47 #include <sys/cdefs.h> 48 __FBSDID("$FreeBSD$"); 49 50 #include <sys/param.h> 51 #include <sys/systm.h> 52 #include <sys/ctype.h> 53 #include <sys/kernel.h> 54 #include <sys/types.h> 55 #include <sys/kthread.h> 56 #include <sys/bio.h> 57 #include <sys/fcntl.h> 58 #include <sys/lock.h> 59 #include <sys/module.h> 60 #include <sys/mutex.h> 61 #include <sys/condvar.h> 62 #include <sys/malloc.h> 63 #include <sys/conf.h> 64 #include <sys/ioccom.h> 65 #include <sys/queue.h> 66 #include <sys/sbuf.h> 67 #include <sys/smp.h> 68 #include <sys/endian.h> 69 #include <sys/proc.h> 70 #include <sys/sched.h> 71 #include <sys/sysctl.h> 72 #include <sys/nv.h> 73 #include <sys/dnv.h> 74 #include <vm/uma.h> 75 76 #include <cam/cam.h> 77 #include <cam/scsi/scsi_all.h> 78 #include <cam/scsi/scsi_cd.h> 79 #include <cam/scsi/scsi_da.h> 80 #include <cam/ctl/ctl_io.h> 81 #include <cam/ctl/ctl.h> 82 #include <cam/ctl/ctl_frontend.h> 83 #include <cam/ctl/ctl_util.h> 84 #include <cam/ctl/ctl_backend.h> 85 #include <cam/ctl/ctl_ioctl.h> 86 #include <cam/ctl/ctl_ha.h> 87 #include <cam/ctl/ctl_private.h> 88 #include <cam/ctl/ctl_debug.h> 89 #include <cam/ctl/ctl_scsi_all.h> 90 #include <cam/ctl/ctl_error.h> 91 92 struct ctl_softc *control_softc = NULL; 93 94 /* 95 * Template mode pages. 96 */ 97 98 /* 99 * Note that these are default values only. The actual values will be 100 * filled in when the user does a mode sense. 101 */ 102 const static struct scsi_da_rw_recovery_page rw_er_page_default = { 103 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 104 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 105 /*byte3*/SMS_RWER_AWRE|SMS_RWER_ARRE, 106 /*read_retry_count*/0, 107 /*correction_span*/0, 108 /*head_offset_count*/0, 109 /*data_strobe_offset_cnt*/0, 110 /*byte8*/SMS_RWER_LBPERE, 111 /*write_retry_count*/0, 112 /*reserved2*/0, 113 /*recovery_time_limit*/{0, 0}, 114 }; 115 116 const static struct scsi_da_rw_recovery_page rw_er_page_changeable = { 117 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 118 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 119 /*byte3*/SMS_RWER_PER, 120 /*read_retry_count*/0, 121 /*correction_span*/0, 122 /*head_offset_count*/0, 123 /*data_strobe_offset_cnt*/0, 124 /*byte8*/SMS_RWER_LBPERE, 125 /*write_retry_count*/0, 126 /*reserved2*/0, 127 /*recovery_time_limit*/{0, 0}, 128 }; 129 130 const static struct scsi_format_page format_page_default = { 131 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 132 /*page_length*/sizeof(struct scsi_format_page) - 2, 133 /*tracks_per_zone*/ {0, 0}, 134 /*alt_sectors_per_zone*/ {0, 0}, 135 /*alt_tracks_per_zone*/ {0, 0}, 136 /*alt_tracks_per_lun*/ {0, 0}, 137 /*sectors_per_track*/ {(CTL_DEFAULT_SECTORS_PER_TRACK >> 8) & 0xff, 138 CTL_DEFAULT_SECTORS_PER_TRACK & 0xff}, 139 /*bytes_per_sector*/ {0, 0}, 140 /*interleave*/ {0, 0}, 141 /*track_skew*/ {0, 0}, 142 /*cylinder_skew*/ {0, 0}, 143 /*flags*/ SFP_HSEC, 144 /*reserved*/ {0, 0, 0} 145 }; 146 147 const static struct scsi_format_page format_page_changeable = { 148 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 149 /*page_length*/sizeof(struct scsi_format_page) - 2, 150 /*tracks_per_zone*/ {0, 0}, 151 /*alt_sectors_per_zone*/ {0, 0}, 152 /*alt_tracks_per_zone*/ {0, 0}, 153 /*alt_tracks_per_lun*/ {0, 0}, 154 /*sectors_per_track*/ {0, 0}, 155 /*bytes_per_sector*/ {0, 0}, 156 /*interleave*/ {0, 0}, 157 /*track_skew*/ {0, 0}, 158 /*cylinder_skew*/ {0, 0}, 159 /*flags*/ 0, 160 /*reserved*/ {0, 0, 0} 161 }; 162 163 const static struct scsi_rigid_disk_page rigid_disk_page_default = { 164 /*page_code*/SMS_RIGID_DISK_PAGE, 165 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 166 /*cylinders*/ {0, 0, 0}, 167 /*heads*/ CTL_DEFAULT_HEADS, 168 /*start_write_precomp*/ {0, 0, 0}, 169 /*start_reduced_current*/ {0, 0, 0}, 170 /*step_rate*/ {0, 0}, 171 /*landing_zone_cylinder*/ {0, 0, 0}, 172 /*rpl*/ SRDP_RPL_DISABLED, 173 /*rotational_offset*/ 0, 174 /*reserved1*/ 0, 175 /*rotation_rate*/ {(CTL_DEFAULT_ROTATION_RATE >> 8) & 0xff, 176 CTL_DEFAULT_ROTATION_RATE & 0xff}, 177 /*reserved2*/ {0, 0} 178 }; 179 180 const static struct scsi_rigid_disk_page rigid_disk_page_changeable = { 181 /*page_code*/SMS_RIGID_DISK_PAGE, 182 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 183 /*cylinders*/ {0, 0, 0}, 184 /*heads*/ 0, 185 /*start_write_precomp*/ {0, 0, 0}, 186 /*start_reduced_current*/ {0, 0, 0}, 187 /*step_rate*/ {0, 0}, 188 /*landing_zone_cylinder*/ {0, 0, 0}, 189 /*rpl*/ 0, 190 /*rotational_offset*/ 0, 191 /*reserved1*/ 0, 192 /*rotation_rate*/ {0, 0}, 193 /*reserved2*/ {0, 0} 194 }; 195 196 const static struct scsi_da_verify_recovery_page verify_er_page_default = { 197 /*page_code*/SMS_VERIFY_ERROR_RECOVERY_PAGE, 198 /*page_length*/sizeof(struct scsi_da_verify_recovery_page) - 2, 199 /*byte3*/0, 200 /*read_retry_count*/0, 201 /*reserved*/{ 0, 0, 0, 0, 0, 0 }, 202 /*recovery_time_limit*/{0, 0}, 203 }; 204 205 const static struct scsi_da_verify_recovery_page verify_er_page_changeable = { 206 /*page_code*/SMS_VERIFY_ERROR_RECOVERY_PAGE, 207 /*page_length*/sizeof(struct scsi_da_verify_recovery_page) - 2, 208 /*byte3*/SMS_VER_PER, 209 /*read_retry_count*/0, 210 /*reserved*/{ 0, 0, 0, 0, 0, 0 }, 211 /*recovery_time_limit*/{0, 0}, 212 }; 213 214 const static struct scsi_caching_page caching_page_default = { 215 /*page_code*/SMS_CACHING_PAGE, 216 /*page_length*/sizeof(struct scsi_caching_page) - 2, 217 /*flags1*/ SCP_DISC | SCP_WCE, 218 /*ret_priority*/ 0, 219 /*disable_pf_transfer_len*/ {0xff, 0xff}, 220 /*min_prefetch*/ {0, 0}, 221 /*max_prefetch*/ {0xff, 0xff}, 222 /*max_pf_ceiling*/ {0xff, 0xff}, 223 /*flags2*/ 0, 224 /*cache_segments*/ 0, 225 /*cache_seg_size*/ {0, 0}, 226 /*reserved*/ 0, 227 /*non_cache_seg_size*/ {0, 0, 0} 228 }; 229 230 const static struct scsi_caching_page caching_page_changeable = { 231 /*page_code*/SMS_CACHING_PAGE, 232 /*page_length*/sizeof(struct scsi_caching_page) - 2, 233 /*flags1*/ SCP_WCE | SCP_RCD, 234 /*ret_priority*/ 0, 235 /*disable_pf_transfer_len*/ {0, 0}, 236 /*min_prefetch*/ {0, 0}, 237 /*max_prefetch*/ {0, 0}, 238 /*max_pf_ceiling*/ {0, 0}, 239 /*flags2*/ 0, 240 /*cache_segments*/ 0, 241 /*cache_seg_size*/ {0, 0}, 242 /*reserved*/ 0, 243 /*non_cache_seg_size*/ {0, 0, 0} 244 }; 245 246 const static struct scsi_control_page control_page_default = { 247 /*page_code*/SMS_CONTROL_MODE_PAGE, 248 /*page_length*/sizeof(struct scsi_control_page) - 2, 249 /*rlec*/0, 250 /*queue_flags*/SCP_QUEUE_ALG_RESTRICTED, 251 /*eca_and_aen*/0, 252 /*flags4*/SCP_TAS, 253 /*aen_holdoff_period*/{0, 0}, 254 /*busy_timeout_period*/{0, 0}, 255 /*extended_selftest_completion_time*/{0, 0} 256 }; 257 258 const static struct scsi_control_page control_page_changeable = { 259 /*page_code*/SMS_CONTROL_MODE_PAGE, 260 /*page_length*/sizeof(struct scsi_control_page) - 2, 261 /*rlec*/SCP_DSENSE, 262 /*queue_flags*/SCP_QUEUE_ALG_MASK | SCP_NUAR, 263 /*eca_and_aen*/SCP_SWP, 264 /*flags4*/0, 265 /*aen_holdoff_period*/{0, 0}, 266 /*busy_timeout_period*/{0, 0}, 267 /*extended_selftest_completion_time*/{0, 0} 268 }; 269 270 #define CTL_CEM_LEN (sizeof(struct scsi_control_ext_page) - 4) 271 272 const static struct scsi_control_ext_page control_ext_page_default = { 273 /*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF, 274 /*subpage_code*/0x01, 275 /*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN}, 276 /*flags*/0, 277 /*prio*/0, 278 /*max_sense*/0 279 }; 280 281 const static struct scsi_control_ext_page control_ext_page_changeable = { 282 /*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF, 283 /*subpage_code*/0x01, 284 /*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN}, 285 /*flags*/0, 286 /*prio*/0, 287 /*max_sense*/0xff 288 }; 289 290 const static struct scsi_info_exceptions_page ie_page_default = { 291 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 292 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 293 /*info_flags*/SIEP_FLAGS_EWASC, 294 /*mrie*/SIEP_MRIE_NO, 295 /*interval_timer*/{0, 0, 0, 0}, 296 /*report_count*/{0, 0, 0, 1} 297 }; 298 299 const static struct scsi_info_exceptions_page ie_page_changeable = { 300 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 301 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 302 /*info_flags*/SIEP_FLAGS_EWASC | SIEP_FLAGS_DEXCPT | SIEP_FLAGS_TEST | 303 SIEP_FLAGS_LOGERR, 304 /*mrie*/0x0f, 305 /*interval_timer*/{0xff, 0xff, 0xff, 0xff}, 306 /*report_count*/{0xff, 0xff, 0xff, 0xff} 307 }; 308 309 #define CTL_LBPM_LEN (sizeof(struct ctl_logical_block_provisioning_page) - 4) 310 311 const static struct ctl_logical_block_provisioning_page lbp_page_default = {{ 312 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 313 /*subpage_code*/0x02, 314 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 315 /*flags*/0, 316 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 317 /*descr*/{}}, 318 {{/*flags*/0, 319 /*resource*/0x01, 320 /*reserved*/{0, 0}, 321 /*count*/{0, 0, 0, 0}}, 322 {/*flags*/0, 323 /*resource*/0x02, 324 /*reserved*/{0, 0}, 325 /*count*/{0, 0, 0, 0}}, 326 {/*flags*/0, 327 /*resource*/0xf1, 328 /*reserved*/{0, 0}, 329 /*count*/{0, 0, 0, 0}}, 330 {/*flags*/0, 331 /*resource*/0xf2, 332 /*reserved*/{0, 0}, 333 /*count*/{0, 0, 0, 0}} 334 } 335 }; 336 337 const static struct ctl_logical_block_provisioning_page lbp_page_changeable = {{ 338 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 339 /*subpage_code*/0x02, 340 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 341 /*flags*/SLBPP_SITUA, 342 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 343 /*descr*/{}}, 344 {{/*flags*/0, 345 /*resource*/0, 346 /*reserved*/{0, 0}, 347 /*count*/{0, 0, 0, 0}}, 348 {/*flags*/0, 349 /*resource*/0, 350 /*reserved*/{0, 0}, 351 /*count*/{0, 0, 0, 0}}, 352 {/*flags*/0, 353 /*resource*/0, 354 /*reserved*/{0, 0}, 355 /*count*/{0, 0, 0, 0}}, 356 {/*flags*/0, 357 /*resource*/0, 358 /*reserved*/{0, 0}, 359 /*count*/{0, 0, 0, 0}} 360 } 361 }; 362 363 const static struct scsi_cddvd_capabilities_page cddvd_page_default = { 364 /*page_code*/SMS_CDDVD_CAPS_PAGE, 365 /*page_length*/sizeof(struct scsi_cddvd_capabilities_page) - 2, 366 /*caps1*/0x3f, 367 /*caps2*/0x00, 368 /*caps3*/0xf0, 369 /*caps4*/0x00, 370 /*caps5*/0x29, 371 /*caps6*/0x00, 372 /*obsolete*/{0, 0}, 373 /*nvol_levels*/{0, 0}, 374 /*buffer_size*/{8, 0}, 375 /*obsolete2*/{0, 0}, 376 /*reserved*/0, 377 /*digital*/0, 378 /*obsolete3*/0, 379 /*copy_management*/0, 380 /*reserved2*/0, 381 /*rotation_control*/0, 382 /*cur_write_speed*/0, 383 /*num_speed_descr*/0, 384 }; 385 386 const static struct scsi_cddvd_capabilities_page cddvd_page_changeable = { 387 /*page_code*/SMS_CDDVD_CAPS_PAGE, 388 /*page_length*/sizeof(struct scsi_cddvd_capabilities_page) - 2, 389 /*caps1*/0, 390 /*caps2*/0, 391 /*caps3*/0, 392 /*caps4*/0, 393 /*caps5*/0, 394 /*caps6*/0, 395 /*obsolete*/{0, 0}, 396 /*nvol_levels*/{0, 0}, 397 /*buffer_size*/{0, 0}, 398 /*obsolete2*/{0, 0}, 399 /*reserved*/0, 400 /*digital*/0, 401 /*obsolete3*/0, 402 /*copy_management*/0, 403 /*reserved2*/0, 404 /*rotation_control*/0, 405 /*cur_write_speed*/0, 406 /*num_speed_descr*/0, 407 }; 408 409 SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 410 "CAM Target Layer"); 411 static int worker_threads = -1; 412 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN, 413 &worker_threads, 1, "Number of worker threads"); 414 static int ctl_debug = CTL_DEBUG_NONE; 415 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, debug, CTLFLAG_RWTUN, 416 &ctl_debug, 0, "Enabled debug flags"); 417 static int ctl_lun_map_size = 1024; 418 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, lun_map_size, CTLFLAG_RWTUN, 419 &ctl_lun_map_size, 0, "Size of per-port LUN map (max LUN + 1)"); 420 #ifdef CTL_TIME_IO 421 static int ctl_time_io_secs = CTL_TIME_IO_DEFAULT_SECS; 422 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, time_io_secs, CTLFLAG_RWTUN, 423 &ctl_time_io_secs, 0, "Log requests taking more seconds"); 424 #endif 425 426 /* 427 * Maximum number of LUNs we support. MUST be a power of 2. 428 */ 429 #define CTL_DEFAULT_MAX_LUNS 1024 430 static int ctl_max_luns = CTL_DEFAULT_MAX_LUNS; 431 TUNABLE_INT("kern.cam.ctl.max_luns", &ctl_max_luns); 432 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, max_luns, CTLFLAG_RDTUN, 433 &ctl_max_luns, CTL_DEFAULT_MAX_LUNS, "Maximum number of LUNs"); 434 435 /* 436 * Maximum number of ports registered at one time. 437 */ 438 #define CTL_DEFAULT_MAX_PORTS 256 439 static int ctl_max_ports = CTL_DEFAULT_MAX_PORTS; 440 TUNABLE_INT("kern.cam.ctl.max_ports", &ctl_max_ports); 441 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, max_ports, CTLFLAG_RDTUN, 442 &ctl_max_ports, CTL_DEFAULT_MAX_LUNS, "Maximum number of ports"); 443 444 /* 445 * Maximum number of initiators we support. 446 */ 447 #define CTL_MAX_INITIATORS (CTL_MAX_INIT_PER_PORT * ctl_max_ports) 448 449 /* 450 * Supported pages (0x00), Serial number (0x80), Device ID (0x83), 451 * Extended INQUIRY Data (0x86), Mode Page Policy (0x87), 452 * SCSI Ports (0x88), Third-party Copy (0x8F), SCSI Feature Sets (0x92), 453 * Block limits (0xB0), Block Device Characteristics (0xB1) and 454 * Logical Block Provisioning (0xB2) 455 */ 456 #define SCSI_EVPD_NUM_SUPPORTED_PAGES 11 457 458 static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event, 459 int param); 460 static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest); 461 static void ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest); 462 static int ctl_init(void); 463 static int ctl_shutdown(void); 464 static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td); 465 static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td); 466 static void ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio); 467 static void ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 468 struct ctl_ooa *ooa_hdr, 469 struct ctl_ooa_entry *kern_entries); 470 static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 471 struct thread *td); 472 static int ctl_enable_lun(struct ctl_lun *lun); 473 static int ctl_disable_lun(struct ctl_lun *lun); 474 static int ctl_free_lun(struct ctl_lun *lun); 475 476 static int ctl_do_mode_select(union ctl_io *io); 477 static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, 478 uint64_t res_key, uint64_t sa_res_key, 479 uint8_t type, uint32_t residx, 480 struct ctl_scsiio *ctsio, 481 struct scsi_per_res_out *cdb, 482 struct scsi_per_res_out_parms* param); 483 static void ctl_pro_preempt_other(struct ctl_lun *lun, 484 union ctl_ha_msg *msg); 485 static void ctl_hndl_per_res_out_on_other_sc(union ctl_io *io); 486 static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len); 487 static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len); 488 static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len); 489 static int ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len); 490 static int ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len); 491 static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, 492 int alloc_len); 493 static int ctl_inquiry_evpd_sfs(struct ctl_scsiio *ctsio, int alloc_len); 494 static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, 495 int alloc_len); 496 static int ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len); 497 static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len); 498 static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio); 499 static int ctl_inquiry_std(struct ctl_scsiio *ctsio); 500 static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len); 501 static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2, 502 bool seq); 503 static ctl_action ctl_seq_check(union ctl_io *io1, union ctl_io *io2); 504 static ctl_action ctl_check_for_blockage(struct ctl_lun *lun, 505 union ctl_io *pending_io, const uint8_t *serialize_row, 506 union ctl_io *ooa_io); 507 static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 508 union ctl_io **starting_io); 509 static void ctl_try_unblock_io(struct ctl_lun *lun, union ctl_io *io, 510 bool skip); 511 static void ctl_try_unblock_others(struct ctl_lun *lun, union ctl_io *io, 512 bool skip); 513 static int ctl_scsiio_lun_check(struct ctl_lun *lun, 514 const struct ctl_cmd_entry *entry, 515 struct ctl_scsiio *ctsio); 516 static void ctl_failover_lun(union ctl_io *io); 517 static void ctl_scsiio_precheck(struct ctl_scsiio *ctsio); 518 static int ctl_scsiio(struct ctl_scsiio *ctsio); 519 520 static int ctl_target_reset(union ctl_io *io); 521 static void ctl_do_lun_reset(struct ctl_lun *lun, uint32_t initidx, 522 ctl_ua_type ua_type); 523 static int ctl_lun_reset(union ctl_io *io); 524 static int ctl_abort_task(union ctl_io *io); 525 static int ctl_abort_task_set(union ctl_io *io); 526 static int ctl_query_task(union ctl_io *io, int task_set); 527 static void ctl_i_t_nexus_loss(struct ctl_softc *softc, uint32_t initidx, 528 ctl_ua_type ua_type); 529 static int ctl_i_t_nexus_reset(union ctl_io *io); 530 static int ctl_query_async_event(union ctl_io *io); 531 static void ctl_run_task(union ctl_io *io); 532 #ifdef CTL_IO_DELAY 533 static void ctl_datamove_timer_wakeup(void *arg); 534 static void ctl_done_timer_wakeup(void *arg); 535 #endif /* CTL_IO_DELAY */ 536 537 static void ctl_send_datamove_done(union ctl_io *io, int have_lock); 538 static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq); 539 static int ctl_datamove_remote_dm_write_cb(union ctl_io *io, bool samethr); 540 static void ctl_datamove_remote_write(union ctl_io *io); 541 static int ctl_datamove_remote_dm_read_cb(union ctl_io *io, bool samethr); 542 static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq); 543 static int ctl_datamove_remote_sgl_setup(union ctl_io *io); 544 static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 545 ctl_ha_dt_cb callback); 546 static void ctl_datamove_remote_read(union ctl_io *io); 547 static void ctl_datamove_remote(union ctl_io *io); 548 static void ctl_process_done(union ctl_io *io); 549 static void ctl_thresh_thread(void *arg); 550 static void ctl_work_thread(void *arg); 551 static void ctl_enqueue_incoming(union ctl_io *io); 552 static void ctl_enqueue_rtr(union ctl_io *io); 553 static void ctl_enqueue_done(union ctl_io *io); 554 static void ctl_enqueue_isc(union ctl_io *io); 555 static const struct ctl_cmd_entry * 556 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa); 557 static const struct ctl_cmd_entry * 558 ctl_validate_command(struct ctl_scsiio *ctsio); 559 static int ctl_cmd_applicable(uint8_t lun_type, 560 const struct ctl_cmd_entry *entry); 561 static int ctl_ha_init(void); 562 static int ctl_ha_shutdown(void); 563 564 static uint64_t ctl_get_prkey(struct ctl_lun *lun, uint32_t residx); 565 static void ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx); 566 static void ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx); 567 static void ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key); 568 569 /* 570 * Load the serialization table. This isn't very pretty, but is probably 571 * the easiest way to do it. 572 */ 573 #include "ctl_ser_table.c" 574 575 /* 576 * We only need to define open, close and ioctl routines for this driver. 577 */ 578 static struct cdevsw ctl_cdevsw = { 579 .d_version = D_VERSION, 580 .d_flags = 0, 581 .d_open = ctl_open, 582 .d_close = ctl_close, 583 .d_ioctl = ctl_ioctl, 584 .d_name = "ctl", 585 }; 586 587 MALLOC_DEFINE(M_CTL, "ctlmem", "Memory used for CTL"); 588 589 static int ctl_module_event_handler(module_t, int /*modeventtype_t*/, void *); 590 591 static moduledata_t ctl_moduledata = { 592 "ctl", 593 ctl_module_event_handler, 594 NULL 595 }; 596 597 DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD); 598 MODULE_VERSION(ctl, 1); 599 600 static struct ctl_frontend ha_frontend = 601 { 602 .name = "ha", 603 .init = ctl_ha_init, 604 .shutdown = ctl_ha_shutdown, 605 }; 606 607 static int 608 ctl_ha_init(void) 609 { 610 struct ctl_softc *softc = control_softc; 611 612 if (ctl_pool_create(softc, "othersc", CTL_POOL_ENTRIES_OTHER_SC, 613 &softc->othersc_pool) != 0) 614 return (ENOMEM); 615 if (ctl_ha_msg_init(softc) != CTL_HA_STATUS_SUCCESS) { 616 ctl_pool_free(softc->othersc_pool); 617 return (EIO); 618 } 619 if (ctl_ha_msg_register(CTL_HA_CHAN_CTL, ctl_isc_event_handler) 620 != CTL_HA_STATUS_SUCCESS) { 621 ctl_ha_msg_destroy(softc); 622 ctl_pool_free(softc->othersc_pool); 623 return (EIO); 624 } 625 return (0); 626 }; 627 628 static int 629 ctl_ha_shutdown(void) 630 { 631 struct ctl_softc *softc = control_softc; 632 struct ctl_port *port; 633 634 ctl_ha_msg_shutdown(softc); 635 if (ctl_ha_msg_deregister(CTL_HA_CHAN_CTL) != CTL_HA_STATUS_SUCCESS) 636 return (EIO); 637 if (ctl_ha_msg_destroy(softc) != CTL_HA_STATUS_SUCCESS) 638 return (EIO); 639 ctl_pool_free(softc->othersc_pool); 640 while ((port = STAILQ_FIRST(&ha_frontend.port_list)) != NULL) { 641 ctl_port_deregister(port); 642 free(port->port_name, M_CTL); 643 free(port, M_CTL); 644 } 645 return (0); 646 }; 647 648 static void 649 ctl_ha_datamove(union ctl_io *io) 650 { 651 struct ctl_lun *lun = CTL_LUN(io); 652 struct ctl_sg_entry *sgl; 653 union ctl_ha_msg msg; 654 uint32_t sg_entries_sent; 655 int do_sg_copy, i, j; 656 657 memset(&msg.dt, 0, sizeof(msg.dt)); 658 msg.hdr.msg_type = CTL_MSG_DATAMOVE; 659 msg.hdr.original_sc = io->io_hdr.remote_io; 660 msg.hdr.serializing_sc = io; 661 msg.hdr.nexus = io->io_hdr.nexus; 662 msg.hdr.status = io->io_hdr.status; 663 msg.dt.flags = io->io_hdr.flags; 664 665 /* 666 * We convert everything into a S/G list here. We can't 667 * pass by reference, only by value between controllers. 668 * So we can't pass a pointer to the S/G list, only as many 669 * S/G entries as we can fit in here. If it's possible for 670 * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries, 671 * then we need to break this up into multiple transfers. 672 */ 673 if (io->scsiio.kern_sg_entries == 0) { 674 msg.dt.kern_sg_entries = 1; 675 #if 0 676 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 677 msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr; 678 } else { 679 /* XXX KDM use busdma here! */ 680 msg.dt.sg_list[0].addr = 681 (void *)vtophys(io->scsiio.kern_data_ptr); 682 } 683 #else 684 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, 685 ("HA does not support BUS_ADDR")); 686 msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr; 687 #endif 688 msg.dt.sg_list[0].len = io->scsiio.kern_data_len; 689 do_sg_copy = 0; 690 } else { 691 msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries; 692 do_sg_copy = 1; 693 } 694 695 msg.dt.kern_data_len = io->scsiio.kern_data_len; 696 msg.dt.kern_total_len = io->scsiio.kern_total_len; 697 msg.dt.kern_data_resid = io->scsiio.kern_data_resid; 698 msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset; 699 msg.dt.sg_sequence = 0; 700 701 /* 702 * Loop until we've sent all of the S/G entries. On the 703 * other end, we'll recompose these S/G entries into one 704 * contiguous list before processing. 705 */ 706 for (sg_entries_sent = 0; sg_entries_sent < msg.dt.kern_sg_entries; 707 msg.dt.sg_sequence++) { 708 msg.dt.cur_sg_entries = MIN((sizeof(msg.dt.sg_list) / 709 sizeof(msg.dt.sg_list[0])), 710 msg.dt.kern_sg_entries - sg_entries_sent); 711 if (do_sg_copy != 0) { 712 sgl = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; 713 for (i = sg_entries_sent, j = 0; 714 i < msg.dt.cur_sg_entries; i++, j++) { 715 #if 0 716 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 717 msg.dt.sg_list[j].addr = sgl[i].addr; 718 } else { 719 /* XXX KDM use busdma here! */ 720 msg.dt.sg_list[j].addr = 721 (void *)vtophys(sgl[i].addr); 722 } 723 #else 724 KASSERT((io->io_hdr.flags & 725 CTL_FLAG_BUS_ADDR) == 0, 726 ("HA does not support BUS_ADDR")); 727 msg.dt.sg_list[j].addr = sgl[i].addr; 728 #endif 729 msg.dt.sg_list[j].len = sgl[i].len; 730 } 731 } 732 733 sg_entries_sent += msg.dt.cur_sg_entries; 734 msg.dt.sg_last = (sg_entries_sent >= msg.dt.kern_sg_entries); 735 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 736 sizeof(msg.dt) - sizeof(msg.dt.sg_list) + 737 sizeof(struct ctl_sg_entry) * msg.dt.cur_sg_entries, 738 M_WAITOK) > CTL_HA_STATUS_SUCCESS) { 739 io->io_hdr.port_status = 31341; 740 ctl_datamove_done(io, true); 741 return; 742 } 743 msg.dt.sent_sg_entries = sg_entries_sent; 744 } 745 746 /* 747 * Officially handover the request from us to peer. 748 * If failover has just happened, then we must return error. 749 * If failover happen just after, then it is not our problem. 750 */ 751 if (lun) 752 mtx_lock(&lun->lun_lock); 753 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 754 if (lun) 755 mtx_unlock(&lun->lun_lock); 756 io->io_hdr.port_status = 31342; 757 ctl_datamove_done(io, true); 758 return; 759 } 760 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 761 io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; 762 if (lun) 763 mtx_unlock(&lun->lun_lock); 764 } 765 766 static void 767 ctl_ha_done(union ctl_io *io) 768 { 769 union ctl_ha_msg msg; 770 771 if (io->io_hdr.io_type == CTL_IO_SCSI) { 772 memset(&msg, 0, sizeof(msg)); 773 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 774 msg.hdr.original_sc = io->io_hdr.remote_io; 775 msg.hdr.nexus = io->io_hdr.nexus; 776 msg.hdr.status = io->io_hdr.status; 777 msg.scsi.scsi_status = io->scsiio.scsi_status; 778 msg.scsi.tag_num = io->scsiio.tag_num; 779 msg.scsi.tag_type = io->scsiio.tag_type; 780 msg.scsi.sense_len = io->scsiio.sense_len; 781 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 782 io->scsiio.sense_len); 783 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 784 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + 785 msg.scsi.sense_len, M_WAITOK); 786 } 787 ctl_free_io(io); 788 } 789 790 static void 791 ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc, 792 union ctl_ha_msg *msg_info) 793 { 794 struct ctl_scsiio *ctsio; 795 796 if (msg_info->hdr.original_sc == NULL) { 797 printf("%s: original_sc == NULL!\n", __func__); 798 /* XXX KDM now what? */ 799 return; 800 } 801 802 ctsio = &msg_info->hdr.original_sc->scsiio; 803 ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 804 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 805 ctsio->io_hdr.status = msg_info->hdr.status; 806 ctsio->scsi_status = msg_info->scsi.scsi_status; 807 ctsio->sense_len = msg_info->scsi.sense_len; 808 memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data, 809 msg_info->scsi.sense_len); 810 ctl_enqueue_isc((union ctl_io *)ctsio); 811 } 812 813 static void 814 ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc, 815 union ctl_ha_msg *msg_info) 816 { 817 struct ctl_scsiio *ctsio; 818 819 if (msg_info->hdr.serializing_sc == NULL) { 820 printf("%s: serializing_sc == NULL!\n", __func__); 821 /* XXX KDM now what? */ 822 return; 823 } 824 825 ctsio = &msg_info->hdr.serializing_sc->scsiio; 826 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 827 ctl_enqueue_isc((union ctl_io *)ctsio); 828 } 829 830 void 831 ctl_isc_announce_lun(struct ctl_lun *lun) 832 { 833 struct ctl_softc *softc = lun->ctl_softc; 834 union ctl_ha_msg *msg; 835 struct ctl_ha_msg_lun_pr_key pr_key; 836 int i, k; 837 838 if (softc->ha_link != CTL_HA_LINK_ONLINE) 839 return; 840 mtx_lock(&lun->lun_lock); 841 i = sizeof(msg->lun); 842 if (lun->lun_devid) 843 i += lun->lun_devid->len; 844 i += sizeof(pr_key) * lun->pr_key_count; 845 alloc: 846 mtx_unlock(&lun->lun_lock); 847 msg = malloc(i, M_CTL, M_WAITOK); 848 mtx_lock(&lun->lun_lock); 849 k = sizeof(msg->lun); 850 if (lun->lun_devid) 851 k += lun->lun_devid->len; 852 k += sizeof(pr_key) * lun->pr_key_count; 853 if (i < k) { 854 free(msg, M_CTL); 855 i = k; 856 goto alloc; 857 } 858 bzero(&msg->lun, sizeof(msg->lun)); 859 msg->hdr.msg_type = CTL_MSG_LUN_SYNC; 860 msg->hdr.nexus.targ_lun = lun->lun; 861 msg->hdr.nexus.targ_mapped_lun = lun->lun; 862 msg->lun.flags = lun->flags; 863 msg->lun.pr_generation = lun->pr_generation; 864 msg->lun.pr_res_idx = lun->pr_res_idx; 865 msg->lun.pr_res_type = lun->pr_res_type; 866 msg->lun.pr_key_count = lun->pr_key_count; 867 i = 0; 868 if (lun->lun_devid) { 869 msg->lun.lun_devid_len = lun->lun_devid->len; 870 memcpy(&msg->lun.data[i], lun->lun_devid->data, 871 msg->lun.lun_devid_len); 872 i += msg->lun.lun_devid_len; 873 } 874 for (k = 0; k < CTL_MAX_INITIATORS; k++) { 875 if ((pr_key.pr_key = ctl_get_prkey(lun, k)) == 0) 876 continue; 877 pr_key.pr_iid = k; 878 memcpy(&msg->lun.data[i], &pr_key, sizeof(pr_key)); 879 i += sizeof(pr_key); 880 } 881 mtx_unlock(&lun->lun_lock); 882 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->lun, sizeof(msg->lun) + i, 883 M_WAITOK); 884 free(msg, M_CTL); 885 886 if (lun->flags & CTL_LUN_PRIMARY_SC) { 887 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 888 ctl_isc_announce_mode(lun, -1, 889 lun->mode_pages.index[i].page_code & SMPH_PC_MASK, 890 lun->mode_pages.index[i].subpage); 891 } 892 } 893 } 894 895 void 896 ctl_isc_announce_port(struct ctl_port *port) 897 { 898 struct ctl_softc *softc = port->ctl_softc; 899 union ctl_ha_msg *msg; 900 int i; 901 902 if (port->targ_port < softc->port_min || 903 port->targ_port >= softc->port_max || 904 softc->ha_link != CTL_HA_LINK_ONLINE) 905 return; 906 i = sizeof(msg->port) + strlen(port->port_name) + 1; 907 if (port->lun_map) 908 i += port->lun_map_size * sizeof(uint32_t); 909 if (port->port_devid) 910 i += port->port_devid->len; 911 if (port->target_devid) 912 i += port->target_devid->len; 913 if (port->init_devid) 914 i += port->init_devid->len; 915 msg = malloc(i, M_CTL, M_WAITOK); 916 bzero(&msg->port, sizeof(msg->port)); 917 msg->hdr.msg_type = CTL_MSG_PORT_SYNC; 918 msg->hdr.nexus.targ_port = port->targ_port; 919 msg->port.port_type = port->port_type; 920 msg->port.physical_port = port->physical_port; 921 msg->port.virtual_port = port->virtual_port; 922 msg->port.status = port->status; 923 i = 0; 924 msg->port.name_len = sprintf(&msg->port.data[i], 925 "%d:%s", softc->ha_id, port->port_name) + 1; 926 i += msg->port.name_len; 927 if (port->lun_map) { 928 msg->port.lun_map_len = port->lun_map_size * sizeof(uint32_t); 929 memcpy(&msg->port.data[i], port->lun_map, 930 msg->port.lun_map_len); 931 i += msg->port.lun_map_len; 932 } 933 if (port->port_devid) { 934 msg->port.port_devid_len = port->port_devid->len; 935 memcpy(&msg->port.data[i], port->port_devid->data, 936 msg->port.port_devid_len); 937 i += msg->port.port_devid_len; 938 } 939 if (port->target_devid) { 940 msg->port.target_devid_len = port->target_devid->len; 941 memcpy(&msg->port.data[i], port->target_devid->data, 942 msg->port.target_devid_len); 943 i += msg->port.target_devid_len; 944 } 945 if (port->init_devid) { 946 msg->port.init_devid_len = port->init_devid->len; 947 memcpy(&msg->port.data[i], port->init_devid->data, 948 msg->port.init_devid_len); 949 i += msg->port.init_devid_len; 950 } 951 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, 952 M_WAITOK); 953 free(msg, M_CTL); 954 } 955 956 void 957 ctl_isc_announce_iid(struct ctl_port *port, int iid) 958 { 959 struct ctl_softc *softc = port->ctl_softc; 960 union ctl_ha_msg *msg; 961 int i, l; 962 963 if (port->targ_port < softc->port_min || 964 port->targ_port >= softc->port_max || 965 softc->ha_link != CTL_HA_LINK_ONLINE) 966 return; 967 mtx_lock(&softc->ctl_lock); 968 i = sizeof(msg->iid); 969 l = 0; 970 if (port->wwpn_iid[iid].name) 971 l = strlen(port->wwpn_iid[iid].name) + 1; 972 i += l; 973 msg = malloc(i, M_CTL, M_NOWAIT); 974 if (msg == NULL) { 975 mtx_unlock(&softc->ctl_lock); 976 return; 977 } 978 bzero(&msg->iid, sizeof(msg->iid)); 979 msg->hdr.msg_type = CTL_MSG_IID_SYNC; 980 msg->hdr.nexus.targ_port = port->targ_port; 981 msg->hdr.nexus.initid = iid; 982 msg->iid.in_use = port->wwpn_iid[iid].in_use; 983 msg->iid.name_len = l; 984 msg->iid.wwpn = port->wwpn_iid[iid].wwpn; 985 if (port->wwpn_iid[iid].name) 986 strlcpy(msg->iid.data, port->wwpn_iid[iid].name, l); 987 mtx_unlock(&softc->ctl_lock); 988 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->iid, i, M_NOWAIT); 989 free(msg, M_CTL); 990 } 991 992 void 993 ctl_isc_announce_mode(struct ctl_lun *lun, uint32_t initidx, 994 uint8_t page, uint8_t subpage) 995 { 996 struct ctl_softc *softc = lun->ctl_softc; 997 union ctl_ha_msg *msg; 998 u_int i, l; 999 1000 if (softc->ha_link != CTL_HA_LINK_ONLINE) 1001 return; 1002 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 1003 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) == 1004 page && lun->mode_pages.index[i].subpage == subpage) 1005 break; 1006 } 1007 if (i == CTL_NUM_MODE_PAGES) 1008 return; 1009 1010 /* Don't try to replicate pages not present on this device. */ 1011 if (lun->mode_pages.index[i].page_data == NULL) 1012 return; 1013 1014 l = sizeof(msg->mode) + lun->mode_pages.index[i].page_len; 1015 msg = malloc(l, M_CTL, M_WAITOK | M_ZERO); 1016 msg->hdr.msg_type = CTL_MSG_MODE_SYNC; 1017 msg->hdr.nexus.targ_port = initidx / CTL_MAX_INIT_PER_PORT; 1018 msg->hdr.nexus.initid = initidx % CTL_MAX_INIT_PER_PORT; 1019 msg->hdr.nexus.targ_lun = lun->lun; 1020 msg->hdr.nexus.targ_mapped_lun = lun->lun; 1021 msg->mode.page_code = page; 1022 msg->mode.subpage = subpage; 1023 msg->mode.page_len = lun->mode_pages.index[i].page_len; 1024 memcpy(msg->mode.data, lun->mode_pages.index[i].page_data, 1025 msg->mode.page_len); 1026 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->mode, l, M_WAITOK); 1027 free(msg, M_CTL); 1028 } 1029 1030 static void 1031 ctl_isc_ha_link_up(struct ctl_softc *softc) 1032 { 1033 struct ctl_port *port; 1034 struct ctl_lun *lun; 1035 union ctl_ha_msg msg; 1036 int i; 1037 1038 /* Announce this node parameters to peer for validation. */ 1039 msg.login.msg_type = CTL_MSG_LOGIN; 1040 msg.login.version = CTL_HA_VERSION; 1041 msg.login.ha_mode = softc->ha_mode; 1042 msg.login.ha_id = softc->ha_id; 1043 msg.login.max_luns = ctl_max_luns; 1044 msg.login.max_ports = ctl_max_ports; 1045 msg.login.max_init_per_port = CTL_MAX_INIT_PER_PORT; 1046 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg.login, sizeof(msg.login), 1047 M_WAITOK); 1048 1049 STAILQ_FOREACH(port, &softc->port_list, links) { 1050 ctl_isc_announce_port(port); 1051 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1052 if (port->wwpn_iid[i].in_use) 1053 ctl_isc_announce_iid(port, i); 1054 } 1055 } 1056 STAILQ_FOREACH(lun, &softc->lun_list, links) 1057 ctl_isc_announce_lun(lun); 1058 } 1059 1060 static void 1061 ctl_isc_ha_link_down(struct ctl_softc *softc) 1062 { 1063 struct ctl_port *port; 1064 struct ctl_lun *lun; 1065 union ctl_io *io; 1066 int i; 1067 1068 mtx_lock(&softc->ctl_lock); 1069 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1070 mtx_lock(&lun->lun_lock); 1071 if (lun->flags & CTL_LUN_PEER_SC_PRIMARY) { 1072 lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; 1073 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 1074 } 1075 mtx_unlock(&lun->lun_lock); 1076 1077 mtx_unlock(&softc->ctl_lock); 1078 io = ctl_alloc_io(softc->othersc_pool); 1079 mtx_lock(&softc->ctl_lock); 1080 ctl_zero_io(io); 1081 io->io_hdr.msg_type = CTL_MSG_FAILOVER; 1082 io->io_hdr.nexus.targ_mapped_lun = lun->lun; 1083 ctl_enqueue_isc(io); 1084 } 1085 1086 STAILQ_FOREACH(port, &softc->port_list, links) { 1087 if (port->targ_port >= softc->port_min && 1088 port->targ_port < softc->port_max) 1089 continue; 1090 port->status &= ~CTL_PORT_STATUS_ONLINE; 1091 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1092 port->wwpn_iid[i].in_use = 0; 1093 free(port->wwpn_iid[i].name, M_CTL); 1094 port->wwpn_iid[i].name = NULL; 1095 } 1096 } 1097 mtx_unlock(&softc->ctl_lock); 1098 } 1099 1100 static void 1101 ctl_isc_ua(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1102 { 1103 struct ctl_lun *lun; 1104 uint32_t iid = ctl_get_initindex(&msg->hdr.nexus); 1105 1106 mtx_lock(&softc->ctl_lock); 1107 if (msg->hdr.nexus.targ_mapped_lun >= ctl_max_luns || 1108 (lun = softc->ctl_luns[msg->hdr.nexus.targ_mapped_lun]) == NULL) { 1109 mtx_unlock(&softc->ctl_lock); 1110 return; 1111 } 1112 mtx_lock(&lun->lun_lock); 1113 mtx_unlock(&softc->ctl_lock); 1114 if (msg->ua.ua_type == CTL_UA_THIN_PROV_THRES && msg->ua.ua_set) 1115 memcpy(lun->ua_tpt_info, msg->ua.ua_info, 8); 1116 if (msg->ua.ua_all) { 1117 if (msg->ua.ua_set) 1118 ctl_est_ua_all(lun, iid, msg->ua.ua_type); 1119 else 1120 ctl_clr_ua_all(lun, iid, msg->ua.ua_type); 1121 } else { 1122 if (msg->ua.ua_set) 1123 ctl_est_ua(lun, iid, msg->ua.ua_type); 1124 else 1125 ctl_clr_ua(lun, iid, msg->ua.ua_type); 1126 } 1127 mtx_unlock(&lun->lun_lock); 1128 } 1129 1130 static void 1131 ctl_isc_lun_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1132 { 1133 struct ctl_lun *lun; 1134 struct ctl_ha_msg_lun_pr_key pr_key; 1135 int i, k; 1136 ctl_lun_flags oflags; 1137 uint32_t targ_lun; 1138 1139 targ_lun = msg->hdr.nexus.targ_mapped_lun; 1140 mtx_lock(&softc->ctl_lock); 1141 if (targ_lun >= ctl_max_luns || 1142 (lun = softc->ctl_luns[targ_lun]) == NULL) { 1143 mtx_unlock(&softc->ctl_lock); 1144 return; 1145 } 1146 mtx_lock(&lun->lun_lock); 1147 mtx_unlock(&softc->ctl_lock); 1148 if (lun->flags & CTL_LUN_DISABLED) { 1149 mtx_unlock(&lun->lun_lock); 1150 return; 1151 } 1152 i = (lun->lun_devid != NULL) ? lun->lun_devid->len : 0; 1153 if (msg->lun.lun_devid_len != i || (i > 0 && 1154 memcmp(&msg->lun.data[0], lun->lun_devid->data, i) != 0)) { 1155 mtx_unlock(&lun->lun_lock); 1156 printf("%s: Received conflicting HA LUN %d\n", 1157 __func__, targ_lun); 1158 return; 1159 } else { 1160 /* Record whether peer is primary. */ 1161 oflags = lun->flags; 1162 if ((msg->lun.flags & CTL_LUN_PRIMARY_SC) && 1163 (msg->lun.flags & CTL_LUN_DISABLED) == 0) 1164 lun->flags |= CTL_LUN_PEER_SC_PRIMARY; 1165 else 1166 lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; 1167 if (oflags != lun->flags) 1168 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 1169 1170 /* If peer is primary and we are not -- use data */ 1171 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 1172 (lun->flags & CTL_LUN_PEER_SC_PRIMARY)) { 1173 lun->pr_generation = msg->lun.pr_generation; 1174 lun->pr_res_idx = msg->lun.pr_res_idx; 1175 lun->pr_res_type = msg->lun.pr_res_type; 1176 lun->pr_key_count = msg->lun.pr_key_count; 1177 for (k = 0; k < CTL_MAX_INITIATORS; k++) 1178 ctl_clr_prkey(lun, k); 1179 for (k = 0; k < msg->lun.pr_key_count; k++) { 1180 memcpy(&pr_key, &msg->lun.data[i], 1181 sizeof(pr_key)); 1182 ctl_alloc_prkey(lun, pr_key.pr_iid); 1183 ctl_set_prkey(lun, pr_key.pr_iid, 1184 pr_key.pr_key); 1185 i += sizeof(pr_key); 1186 } 1187 } 1188 1189 mtx_unlock(&lun->lun_lock); 1190 CTL_DEBUG_PRINT(("%s: Known LUN %d, peer is %s\n", 1191 __func__, targ_lun, 1192 (msg->lun.flags & CTL_LUN_PRIMARY_SC) ? 1193 "primary" : "secondary")); 1194 1195 /* If we are primary but peer doesn't know -- notify */ 1196 if ((lun->flags & CTL_LUN_PRIMARY_SC) && 1197 (msg->lun.flags & CTL_LUN_PEER_SC_PRIMARY) == 0) 1198 ctl_isc_announce_lun(lun); 1199 } 1200 } 1201 1202 static void 1203 ctl_isc_port_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1204 { 1205 struct ctl_port *port; 1206 struct ctl_lun *lun; 1207 int i, new; 1208 1209 port = softc->ctl_ports[msg->hdr.nexus.targ_port]; 1210 if (port == NULL) { 1211 CTL_DEBUG_PRINT(("%s: New port %d\n", __func__, 1212 msg->hdr.nexus.targ_port)); 1213 new = 1; 1214 port = malloc(sizeof(*port), M_CTL, M_WAITOK | M_ZERO); 1215 port->frontend = &ha_frontend; 1216 port->targ_port = msg->hdr.nexus.targ_port; 1217 port->fe_datamove = ctl_ha_datamove; 1218 port->fe_done = ctl_ha_done; 1219 } else if (port->frontend == &ha_frontend) { 1220 CTL_DEBUG_PRINT(("%s: Updated port %d\n", __func__, 1221 msg->hdr.nexus.targ_port)); 1222 new = 0; 1223 } else { 1224 printf("%s: Received conflicting HA port %d\n", 1225 __func__, msg->hdr.nexus.targ_port); 1226 return; 1227 } 1228 port->port_type = msg->port.port_type; 1229 port->physical_port = msg->port.physical_port; 1230 port->virtual_port = msg->port.virtual_port; 1231 port->status = msg->port.status; 1232 i = 0; 1233 free(port->port_name, M_CTL); 1234 port->port_name = strndup(&msg->port.data[i], msg->port.name_len, 1235 M_CTL); 1236 i += msg->port.name_len; 1237 if (msg->port.lun_map_len != 0) { 1238 if (port->lun_map == NULL || 1239 port->lun_map_size * sizeof(uint32_t) < 1240 msg->port.lun_map_len) { 1241 port->lun_map_size = 0; 1242 free(port->lun_map, M_CTL); 1243 port->lun_map = malloc(msg->port.lun_map_len, 1244 M_CTL, M_WAITOK); 1245 } 1246 memcpy(port->lun_map, &msg->port.data[i], msg->port.lun_map_len); 1247 port->lun_map_size = msg->port.lun_map_len / sizeof(uint32_t); 1248 i += msg->port.lun_map_len; 1249 } else { 1250 port->lun_map_size = 0; 1251 free(port->lun_map, M_CTL); 1252 port->lun_map = NULL; 1253 } 1254 if (msg->port.port_devid_len != 0) { 1255 if (port->port_devid == NULL || 1256 port->port_devid->len < msg->port.port_devid_len) { 1257 free(port->port_devid, M_CTL); 1258 port->port_devid = malloc(sizeof(struct ctl_devid) + 1259 msg->port.port_devid_len, M_CTL, M_WAITOK); 1260 } 1261 memcpy(port->port_devid->data, &msg->port.data[i], 1262 msg->port.port_devid_len); 1263 port->port_devid->len = msg->port.port_devid_len; 1264 i += msg->port.port_devid_len; 1265 } else { 1266 free(port->port_devid, M_CTL); 1267 port->port_devid = NULL; 1268 } 1269 if (msg->port.target_devid_len != 0) { 1270 if (port->target_devid == NULL || 1271 port->target_devid->len < msg->port.target_devid_len) { 1272 free(port->target_devid, M_CTL); 1273 port->target_devid = malloc(sizeof(struct ctl_devid) + 1274 msg->port.target_devid_len, M_CTL, M_WAITOK); 1275 } 1276 memcpy(port->target_devid->data, &msg->port.data[i], 1277 msg->port.target_devid_len); 1278 port->target_devid->len = msg->port.target_devid_len; 1279 i += msg->port.target_devid_len; 1280 } else { 1281 free(port->target_devid, M_CTL); 1282 port->target_devid = NULL; 1283 } 1284 if (msg->port.init_devid_len != 0) { 1285 if (port->init_devid == NULL || 1286 port->init_devid->len < msg->port.init_devid_len) { 1287 free(port->init_devid, M_CTL); 1288 port->init_devid = malloc(sizeof(struct ctl_devid) + 1289 msg->port.init_devid_len, M_CTL, M_WAITOK); 1290 } 1291 memcpy(port->init_devid->data, &msg->port.data[i], 1292 msg->port.init_devid_len); 1293 port->init_devid->len = msg->port.init_devid_len; 1294 i += msg->port.init_devid_len; 1295 } else { 1296 free(port->init_devid, M_CTL); 1297 port->init_devid = NULL; 1298 } 1299 if (new) { 1300 if (ctl_port_register(port) != 0) { 1301 printf("%s: ctl_port_register() failed with error\n", 1302 __func__); 1303 } 1304 } 1305 mtx_lock(&softc->ctl_lock); 1306 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1307 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 1308 continue; 1309 mtx_lock(&lun->lun_lock); 1310 ctl_est_ua_all(lun, -1, CTL_UA_INQ_CHANGE); 1311 mtx_unlock(&lun->lun_lock); 1312 } 1313 mtx_unlock(&softc->ctl_lock); 1314 } 1315 1316 static void 1317 ctl_isc_iid_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1318 { 1319 struct ctl_port *port; 1320 int iid; 1321 1322 port = softc->ctl_ports[msg->hdr.nexus.targ_port]; 1323 if (port == NULL) { 1324 printf("%s: Received IID for unknown port %d\n", 1325 __func__, msg->hdr.nexus.targ_port); 1326 return; 1327 } 1328 iid = msg->hdr.nexus.initid; 1329 if (port->wwpn_iid[iid].in_use != 0 && 1330 msg->iid.in_use == 0) 1331 ctl_i_t_nexus_loss(softc, iid, CTL_UA_POWERON); 1332 port->wwpn_iid[iid].in_use = msg->iid.in_use; 1333 port->wwpn_iid[iid].wwpn = msg->iid.wwpn; 1334 free(port->wwpn_iid[iid].name, M_CTL); 1335 if (msg->iid.name_len) { 1336 port->wwpn_iid[iid].name = strndup(&msg->iid.data[0], 1337 msg->iid.name_len, M_CTL); 1338 } else 1339 port->wwpn_iid[iid].name = NULL; 1340 } 1341 1342 static void 1343 ctl_isc_login(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1344 { 1345 1346 if (msg->login.version != CTL_HA_VERSION) { 1347 printf("CTL HA peers have different versions %d != %d\n", 1348 msg->login.version, CTL_HA_VERSION); 1349 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1350 return; 1351 } 1352 if (msg->login.ha_mode != softc->ha_mode) { 1353 printf("CTL HA peers have different ha_mode %d != %d\n", 1354 msg->login.ha_mode, softc->ha_mode); 1355 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1356 return; 1357 } 1358 if (msg->login.ha_id == softc->ha_id) { 1359 printf("CTL HA peers have same ha_id %d\n", msg->login.ha_id); 1360 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1361 return; 1362 } 1363 if (msg->login.max_luns != ctl_max_luns || 1364 msg->login.max_ports != ctl_max_ports || 1365 msg->login.max_init_per_port != CTL_MAX_INIT_PER_PORT) { 1366 printf("CTL HA peers have different limits\n"); 1367 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1368 return; 1369 } 1370 } 1371 1372 static void 1373 ctl_isc_mode_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1374 { 1375 struct ctl_lun *lun; 1376 u_int i; 1377 uint32_t initidx, targ_lun; 1378 1379 targ_lun = msg->hdr.nexus.targ_mapped_lun; 1380 mtx_lock(&softc->ctl_lock); 1381 if (targ_lun >= ctl_max_luns || 1382 (lun = softc->ctl_luns[targ_lun]) == NULL) { 1383 mtx_unlock(&softc->ctl_lock); 1384 return; 1385 } 1386 mtx_lock(&lun->lun_lock); 1387 mtx_unlock(&softc->ctl_lock); 1388 if (lun->flags & CTL_LUN_DISABLED) { 1389 mtx_unlock(&lun->lun_lock); 1390 return; 1391 } 1392 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 1393 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) == 1394 msg->mode.page_code && 1395 lun->mode_pages.index[i].subpage == msg->mode.subpage) 1396 break; 1397 } 1398 if (i == CTL_NUM_MODE_PAGES) { 1399 mtx_unlock(&lun->lun_lock); 1400 return; 1401 } 1402 memcpy(lun->mode_pages.index[i].page_data, msg->mode.data, 1403 lun->mode_pages.index[i].page_len); 1404 initidx = ctl_get_initindex(&msg->hdr.nexus); 1405 if (initidx != -1) 1406 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 1407 mtx_unlock(&lun->lun_lock); 1408 } 1409 1410 /* 1411 * ISC (Inter Shelf Communication) event handler. Events from the HA 1412 * subsystem come in here. 1413 */ 1414 static void 1415 ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param) 1416 { 1417 struct ctl_softc *softc = control_softc; 1418 union ctl_io *io; 1419 struct ctl_prio *presio; 1420 ctl_ha_status isc_status; 1421 1422 CTL_DEBUG_PRINT(("CTL: Isc Msg event %d\n", event)); 1423 if (event == CTL_HA_EVT_MSG_RECV) { 1424 union ctl_ha_msg *msg, msgbuf; 1425 1426 if (param > sizeof(msgbuf)) 1427 msg = malloc(param, M_CTL, M_WAITOK); 1428 else 1429 msg = &msgbuf; 1430 isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, msg, param, 1431 M_WAITOK); 1432 if (isc_status != CTL_HA_STATUS_SUCCESS) { 1433 printf("%s: Error receiving message: %d\n", 1434 __func__, isc_status); 1435 if (msg != &msgbuf) 1436 free(msg, M_CTL); 1437 return; 1438 } 1439 1440 CTL_DEBUG_PRINT(("CTL: msg_type %d\n", msg->hdr.msg_type)); 1441 switch (msg->hdr.msg_type) { 1442 case CTL_MSG_SERIALIZE: 1443 io = ctl_alloc_io(softc->othersc_pool); 1444 ctl_zero_io(io); 1445 // populate ctsio from msg 1446 io->io_hdr.io_type = CTL_IO_SCSI; 1447 io->io_hdr.msg_type = CTL_MSG_SERIALIZE; 1448 io->io_hdr.remote_io = msg->hdr.original_sc; 1449 io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC | 1450 CTL_FLAG_IO_ACTIVE; 1451 /* 1452 * If we're in serialization-only mode, we don't 1453 * want to go through full done processing. Thus 1454 * the COPY flag. 1455 * 1456 * XXX KDM add another flag that is more specific. 1457 */ 1458 if (softc->ha_mode != CTL_HA_MODE_XFER) 1459 io->io_hdr.flags |= CTL_FLAG_INT_COPY; 1460 io->io_hdr.nexus = msg->hdr.nexus; 1461 io->scsiio.priority = msg->scsi.priority; 1462 io->scsiio.tag_num = msg->scsi.tag_num; 1463 io->scsiio.tag_type = msg->scsi.tag_type; 1464 #ifdef CTL_TIME_IO 1465 io->io_hdr.start_time = time_uptime; 1466 getbinuptime(&io->io_hdr.start_bt); 1467 #endif /* CTL_TIME_IO */ 1468 io->scsiio.cdb_len = msg->scsi.cdb_len; 1469 memcpy(io->scsiio.cdb, msg->scsi.cdb, 1470 CTL_MAX_CDBLEN); 1471 if (softc->ha_mode == CTL_HA_MODE_XFER) { 1472 const struct ctl_cmd_entry *entry; 1473 1474 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 1475 io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 1476 io->io_hdr.flags |= 1477 entry->flags & CTL_FLAG_DATA_MASK; 1478 } 1479 ctl_enqueue_isc(io); 1480 break; 1481 1482 /* Performed on the Originating SC, XFER mode only */ 1483 case CTL_MSG_DATAMOVE: { 1484 struct ctl_sg_entry *sgl; 1485 int i, j; 1486 1487 io = msg->hdr.original_sc; 1488 if (io == NULL) { 1489 printf("%s: original_sc == NULL!\n", __func__); 1490 /* XXX KDM do something here */ 1491 break; 1492 } 1493 io->io_hdr.msg_type = CTL_MSG_DATAMOVE; 1494 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1495 /* 1496 * Keep track of this, we need to send it back over 1497 * when the datamove is complete. 1498 */ 1499 io->io_hdr.remote_io = msg->hdr.serializing_sc; 1500 if (msg->hdr.status == CTL_SUCCESS) 1501 io->io_hdr.status = msg->hdr.status; 1502 1503 if (msg->dt.sg_sequence == 0) { 1504 #ifdef CTL_TIME_IO 1505 getbinuptime(&io->io_hdr.dma_start_bt); 1506 #endif 1507 i = msg->dt.kern_sg_entries + 1508 msg->dt.kern_data_len / 1509 CTL_HA_DATAMOVE_SEGMENT + 1; 1510 sgl = malloc(sizeof(*sgl) * i, M_CTL, 1511 M_WAITOK | M_ZERO); 1512 CTL_RSGL(io) = sgl; 1513 CTL_LSGL(io) = &sgl[msg->dt.kern_sg_entries]; 1514 1515 io->scsiio.kern_data_ptr = (uint8_t *)sgl; 1516 1517 io->scsiio.kern_sg_entries = 1518 msg->dt.kern_sg_entries; 1519 io->scsiio.rem_sg_entries = 1520 msg->dt.kern_sg_entries; 1521 io->scsiio.kern_data_len = 1522 msg->dt.kern_data_len; 1523 io->scsiio.kern_total_len = 1524 msg->dt.kern_total_len; 1525 io->scsiio.kern_data_resid = 1526 msg->dt.kern_data_resid; 1527 io->scsiio.kern_rel_offset = 1528 msg->dt.kern_rel_offset; 1529 io->io_hdr.flags &= ~CTL_FLAG_BUS_ADDR; 1530 io->io_hdr.flags |= msg->dt.flags & 1531 CTL_FLAG_BUS_ADDR; 1532 } else 1533 sgl = (struct ctl_sg_entry *) 1534 io->scsiio.kern_data_ptr; 1535 1536 for (i = msg->dt.sent_sg_entries, j = 0; 1537 i < (msg->dt.sent_sg_entries + 1538 msg->dt.cur_sg_entries); i++, j++) { 1539 sgl[i].addr = msg->dt.sg_list[j].addr; 1540 sgl[i].len = msg->dt.sg_list[j].len; 1541 } 1542 1543 /* 1544 * If this is the last piece of the I/O, we've got 1545 * the full S/G list. Queue processing in the thread. 1546 * Otherwise wait for the next piece. 1547 */ 1548 if (msg->dt.sg_last != 0) 1549 ctl_enqueue_isc(io); 1550 break; 1551 } 1552 /* Performed on the Serializing (primary) SC, XFER mode only */ 1553 case CTL_MSG_DATAMOVE_DONE: { 1554 if (msg->hdr.serializing_sc == NULL) { 1555 printf("%s: serializing_sc == NULL!\n", 1556 __func__); 1557 /* XXX KDM now what? */ 1558 break; 1559 } 1560 /* 1561 * We grab the sense information here in case 1562 * there was a failure, so we can return status 1563 * back to the initiator. 1564 */ 1565 io = msg->hdr.serializing_sc; 1566 io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 1567 io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; 1568 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1569 io->io_hdr.port_status = msg->scsi.port_status; 1570 io->scsiio.kern_data_resid = msg->scsi.kern_data_resid; 1571 if (msg->hdr.status != CTL_STATUS_NONE) { 1572 io->io_hdr.status = msg->hdr.status; 1573 io->scsiio.scsi_status = msg->scsi.scsi_status; 1574 io->scsiio.sense_len = msg->scsi.sense_len; 1575 memcpy(&io->scsiio.sense_data, 1576 &msg->scsi.sense_data, 1577 msg->scsi.sense_len); 1578 if (msg->hdr.status == CTL_SUCCESS) 1579 io->io_hdr.flags |= CTL_FLAG_STATUS_SENT; 1580 } 1581 ctl_enqueue_isc(io); 1582 break; 1583 } 1584 1585 /* Preformed on Originating SC, SER_ONLY mode */ 1586 case CTL_MSG_R2R: 1587 io = msg->hdr.original_sc; 1588 if (io == NULL) { 1589 printf("%s: original_sc == NULL!\n", 1590 __func__); 1591 break; 1592 } 1593 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1594 io->io_hdr.msg_type = CTL_MSG_R2R; 1595 io->io_hdr.remote_io = msg->hdr.serializing_sc; 1596 ctl_enqueue_isc(io); 1597 break; 1598 1599 /* 1600 * Performed on Serializing(i.e. primary SC) SC in SER_ONLY 1601 * mode. 1602 * Performed on the Originating (i.e. secondary) SC in XFER 1603 * mode 1604 */ 1605 case CTL_MSG_FINISH_IO: 1606 if (softc->ha_mode == CTL_HA_MODE_XFER) 1607 ctl_isc_handler_finish_xfer(softc, msg); 1608 else 1609 ctl_isc_handler_finish_ser_only(softc, msg); 1610 break; 1611 1612 /* Preformed on Originating SC */ 1613 case CTL_MSG_BAD_JUJU: 1614 io = msg->hdr.original_sc; 1615 if (io == NULL) { 1616 printf("%s: Bad JUJU!, original_sc is NULL!\n", 1617 __func__); 1618 break; 1619 } 1620 ctl_copy_sense_data(msg, io); 1621 /* 1622 * IO should have already been cleaned up on other 1623 * SC so clear this flag so we won't send a message 1624 * back to finish the IO there. 1625 */ 1626 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 1627 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1628 1629 /* io = msg->hdr.serializing_sc; */ 1630 io->io_hdr.msg_type = CTL_MSG_BAD_JUJU; 1631 ctl_enqueue_isc(io); 1632 break; 1633 1634 /* Handle resets sent from the other side */ 1635 case CTL_MSG_MANAGE_TASKS: { 1636 struct ctl_taskio *taskio; 1637 taskio = (struct ctl_taskio *)ctl_alloc_io( 1638 softc->othersc_pool); 1639 ctl_zero_io((union ctl_io *)taskio); 1640 taskio->io_hdr.io_type = CTL_IO_TASK; 1641 taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 1642 taskio->io_hdr.nexus = msg->hdr.nexus; 1643 taskio->task_action = msg->task.task_action; 1644 taskio->tag_num = msg->task.tag_num; 1645 taskio->tag_type = msg->task.tag_type; 1646 #ifdef CTL_TIME_IO 1647 taskio->io_hdr.start_time = time_uptime; 1648 getbinuptime(&taskio->io_hdr.start_bt); 1649 #endif /* CTL_TIME_IO */ 1650 ctl_run_task((union ctl_io *)taskio); 1651 break; 1652 } 1653 /* Persistent Reserve action which needs attention */ 1654 case CTL_MSG_PERS_ACTION: 1655 presio = (struct ctl_prio *)ctl_alloc_io( 1656 softc->othersc_pool); 1657 ctl_zero_io((union ctl_io *)presio); 1658 presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION; 1659 presio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 1660 presio->io_hdr.nexus = msg->hdr.nexus; 1661 presio->pr_msg = msg->pr; 1662 ctl_enqueue_isc((union ctl_io *)presio); 1663 break; 1664 case CTL_MSG_UA: 1665 ctl_isc_ua(softc, msg, param); 1666 break; 1667 case CTL_MSG_PORT_SYNC: 1668 ctl_isc_port_sync(softc, msg, param); 1669 break; 1670 case CTL_MSG_LUN_SYNC: 1671 ctl_isc_lun_sync(softc, msg, param); 1672 break; 1673 case CTL_MSG_IID_SYNC: 1674 ctl_isc_iid_sync(softc, msg, param); 1675 break; 1676 case CTL_MSG_LOGIN: 1677 ctl_isc_login(softc, msg, param); 1678 break; 1679 case CTL_MSG_MODE_SYNC: 1680 ctl_isc_mode_sync(softc, msg, param); 1681 break; 1682 default: 1683 printf("Received HA message of unknown type %d\n", 1684 msg->hdr.msg_type); 1685 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1686 break; 1687 } 1688 if (msg != &msgbuf) 1689 free(msg, M_CTL); 1690 } else if (event == CTL_HA_EVT_LINK_CHANGE) { 1691 printf("CTL: HA link status changed from %d to %d\n", 1692 softc->ha_link, param); 1693 if (param == softc->ha_link) 1694 return; 1695 if (softc->ha_link == CTL_HA_LINK_ONLINE) { 1696 softc->ha_link = param; 1697 ctl_isc_ha_link_down(softc); 1698 } else { 1699 softc->ha_link = param; 1700 if (softc->ha_link == CTL_HA_LINK_ONLINE) 1701 ctl_isc_ha_link_up(softc); 1702 } 1703 return; 1704 } else { 1705 printf("ctl_isc_event_handler: Unknown event %d\n", event); 1706 return; 1707 } 1708 } 1709 1710 static void 1711 ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest) 1712 { 1713 1714 memcpy(&dest->scsiio.sense_data, &src->scsi.sense_data, 1715 src->scsi.sense_len); 1716 dest->scsiio.scsi_status = src->scsi.scsi_status; 1717 dest->scsiio.sense_len = src->scsi.sense_len; 1718 dest->io_hdr.status = src->hdr.status; 1719 } 1720 1721 static void 1722 ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest) 1723 { 1724 1725 memcpy(&dest->scsi.sense_data, &src->scsiio.sense_data, 1726 src->scsiio.sense_len); 1727 dest->scsi.scsi_status = src->scsiio.scsi_status; 1728 dest->scsi.sense_len = src->scsiio.sense_len; 1729 dest->hdr.status = src->io_hdr.status; 1730 } 1731 1732 void 1733 ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 1734 { 1735 struct ctl_softc *softc = lun->ctl_softc; 1736 ctl_ua_type *pu; 1737 1738 if (initidx < softc->init_min || initidx >= softc->init_max) 1739 return; 1740 mtx_assert(&lun->lun_lock, MA_OWNED); 1741 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 1742 if (pu == NULL) 1743 return; 1744 pu[initidx % CTL_MAX_INIT_PER_PORT] |= ua; 1745 } 1746 1747 void 1748 ctl_est_ua_port(struct ctl_lun *lun, int port, uint32_t except, ctl_ua_type ua) 1749 { 1750 int i; 1751 1752 mtx_assert(&lun->lun_lock, MA_OWNED); 1753 if (lun->pending_ua[port] == NULL) 1754 return; 1755 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1756 if (port * CTL_MAX_INIT_PER_PORT + i == except) 1757 continue; 1758 lun->pending_ua[port][i] |= ua; 1759 } 1760 } 1761 1762 void 1763 ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 1764 { 1765 struct ctl_softc *softc = lun->ctl_softc; 1766 int i; 1767 1768 mtx_assert(&lun->lun_lock, MA_OWNED); 1769 for (i = softc->port_min; i < softc->port_max; i++) 1770 ctl_est_ua_port(lun, i, except, ua); 1771 } 1772 1773 void 1774 ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 1775 { 1776 struct ctl_softc *softc = lun->ctl_softc; 1777 ctl_ua_type *pu; 1778 1779 if (initidx < softc->init_min || initidx >= softc->init_max) 1780 return; 1781 mtx_assert(&lun->lun_lock, MA_OWNED); 1782 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 1783 if (pu == NULL) 1784 return; 1785 pu[initidx % CTL_MAX_INIT_PER_PORT] &= ~ua; 1786 } 1787 1788 void 1789 ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 1790 { 1791 struct ctl_softc *softc = lun->ctl_softc; 1792 int i, j; 1793 1794 mtx_assert(&lun->lun_lock, MA_OWNED); 1795 for (i = softc->port_min; i < softc->port_max; i++) { 1796 if (lun->pending_ua[i] == NULL) 1797 continue; 1798 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 1799 if (i * CTL_MAX_INIT_PER_PORT + j == except) 1800 continue; 1801 lun->pending_ua[i][j] &= ~ua; 1802 } 1803 } 1804 } 1805 1806 void 1807 ctl_clr_ua_allluns(struct ctl_softc *ctl_softc, uint32_t initidx, 1808 ctl_ua_type ua_type) 1809 { 1810 struct ctl_lun *lun; 1811 1812 mtx_assert(&ctl_softc->ctl_lock, MA_OWNED); 1813 STAILQ_FOREACH(lun, &ctl_softc->lun_list, links) { 1814 mtx_lock(&lun->lun_lock); 1815 ctl_clr_ua(lun, initidx, ua_type); 1816 mtx_unlock(&lun->lun_lock); 1817 } 1818 } 1819 1820 static int 1821 ctl_ha_role_sysctl(SYSCTL_HANDLER_ARGS) 1822 { 1823 struct ctl_softc *softc = (struct ctl_softc *)arg1; 1824 struct ctl_lun *lun; 1825 struct ctl_lun_req ireq; 1826 int error, value; 1827 1828 value = (softc->flags & CTL_FLAG_ACTIVE_SHELF) ? 0 : 1; 1829 error = sysctl_handle_int(oidp, &value, 0, req); 1830 if ((error != 0) || (req->newptr == NULL)) 1831 return (error); 1832 1833 mtx_lock(&softc->ctl_lock); 1834 if (value == 0) 1835 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1836 else 1837 softc->flags &= ~CTL_FLAG_ACTIVE_SHELF; 1838 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1839 mtx_unlock(&softc->ctl_lock); 1840 bzero(&ireq, sizeof(ireq)); 1841 ireq.reqtype = CTL_LUNREQ_MODIFY; 1842 ireq.reqdata.modify.lun_id = lun->lun; 1843 lun->backend->ioctl(NULL, CTL_LUN_REQ, (caddr_t)&ireq, 0, 1844 curthread); 1845 if (ireq.status != CTL_LUN_OK) { 1846 printf("%s: CTL_LUNREQ_MODIFY returned %d '%s'\n", 1847 __func__, ireq.status, ireq.error_str); 1848 } 1849 mtx_lock(&softc->ctl_lock); 1850 } 1851 mtx_unlock(&softc->ctl_lock); 1852 return (0); 1853 } 1854 1855 static int 1856 ctl_init(void) 1857 { 1858 struct make_dev_args args; 1859 struct ctl_softc *softc; 1860 int i, error; 1861 1862 softc = control_softc = malloc(sizeof(*control_softc), M_DEVBUF, 1863 M_WAITOK | M_ZERO); 1864 1865 make_dev_args_init(&args); 1866 args.mda_devsw = &ctl_cdevsw; 1867 args.mda_uid = UID_ROOT; 1868 args.mda_gid = GID_OPERATOR; 1869 args.mda_mode = 0600; 1870 args.mda_si_drv1 = softc; 1871 args.mda_si_drv2 = NULL; 1872 error = make_dev_s(&args, &softc->dev, "cam/ctl"); 1873 if (error != 0) { 1874 free(softc, M_DEVBUF); 1875 control_softc = NULL; 1876 return (error); 1877 } 1878 1879 sysctl_ctx_init(&softc->sysctl_ctx); 1880 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, 1881 SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl", 1882 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "CAM Target Layer"); 1883 1884 if (softc->sysctl_tree == NULL) { 1885 printf("%s: unable to allocate sysctl tree\n", __func__); 1886 destroy_dev(softc->dev); 1887 free(softc, M_DEVBUF); 1888 control_softc = NULL; 1889 return (ENOMEM); 1890 } 1891 1892 mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF); 1893 softc->io_zone = uma_zcreate("CTL IO", sizeof(union ctl_io), 1894 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 1895 softc->flags = 0; 1896 1897 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1898 OID_AUTO, "ha_mode", CTLFLAG_RDTUN, (int *)&softc->ha_mode, 0, 1899 "HA mode (0 - act/stby, 1 - serialize only, 2 - xfer)"); 1900 1901 if (ctl_max_luns <= 0 || powerof2(ctl_max_luns) == 0) { 1902 printf("Bad value %d for kern.cam.ctl.max_luns, must be a power of two, using %d\n", 1903 ctl_max_luns, CTL_DEFAULT_MAX_LUNS); 1904 ctl_max_luns = CTL_DEFAULT_MAX_LUNS; 1905 } 1906 softc->ctl_luns = malloc(sizeof(struct ctl_lun *) * ctl_max_luns, 1907 M_DEVBUF, M_WAITOK | M_ZERO); 1908 softc->ctl_lun_mask = malloc(sizeof(uint32_t) * 1909 ((ctl_max_luns + 31) / 32), M_DEVBUF, M_WAITOK | M_ZERO); 1910 if (ctl_max_ports <= 0 || powerof2(ctl_max_ports) == 0) { 1911 printf("Bad value %d for kern.cam.ctl.max_ports, must be a power of two, using %d\n", 1912 ctl_max_ports, CTL_DEFAULT_MAX_PORTS); 1913 ctl_max_ports = CTL_DEFAULT_MAX_PORTS; 1914 } 1915 softc->ctl_port_mask = malloc(sizeof(uint32_t) * 1916 ((ctl_max_ports + 31) / 32), M_DEVBUF, M_WAITOK | M_ZERO); 1917 softc->ctl_ports = malloc(sizeof(struct ctl_port *) * ctl_max_ports, 1918 M_DEVBUF, M_WAITOK | M_ZERO); 1919 1920 /* 1921 * In Copan's HA scheme, the "master" and "slave" roles are 1922 * figured out through the slot the controller is in. Although it 1923 * is an active/active system, someone has to be in charge. 1924 */ 1925 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1926 OID_AUTO, "ha_id", CTLFLAG_RDTUN, &softc->ha_id, 0, 1927 "HA head ID (0 - no HA)"); 1928 if (softc->ha_id == 0 || softc->ha_id > NUM_HA_SHELVES) { 1929 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1930 softc->is_single = 1; 1931 softc->port_cnt = ctl_max_ports; 1932 softc->port_min = 0; 1933 } else { 1934 softc->port_cnt = ctl_max_ports / NUM_HA_SHELVES; 1935 softc->port_min = (softc->ha_id - 1) * softc->port_cnt; 1936 } 1937 softc->port_max = softc->port_min + softc->port_cnt; 1938 softc->init_min = softc->port_min * CTL_MAX_INIT_PER_PORT; 1939 softc->init_max = softc->port_max * CTL_MAX_INIT_PER_PORT; 1940 1941 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1942 OID_AUTO, "ha_link", CTLFLAG_RD, (int *)&softc->ha_link, 0, 1943 "HA link state (0 - offline, 1 - unknown, 2 - online)"); 1944 1945 STAILQ_INIT(&softc->lun_list); 1946 STAILQ_INIT(&softc->fe_list); 1947 STAILQ_INIT(&softc->port_list); 1948 STAILQ_INIT(&softc->be_list); 1949 ctl_tpc_init(softc); 1950 1951 if (worker_threads <= 0) 1952 worker_threads = max(1, mp_ncpus / 4); 1953 if (worker_threads > CTL_MAX_THREADS) 1954 worker_threads = CTL_MAX_THREADS; 1955 1956 for (i = 0; i < worker_threads; i++) { 1957 struct ctl_thread *thr = &softc->threads[i]; 1958 1959 mtx_init(&thr->queue_lock, "CTL queue mutex", NULL, MTX_DEF); 1960 thr->ctl_softc = softc; 1961 STAILQ_INIT(&thr->incoming_queue); 1962 STAILQ_INIT(&thr->rtr_queue); 1963 STAILQ_INIT(&thr->done_queue); 1964 STAILQ_INIT(&thr->isc_queue); 1965 1966 error = kproc_kthread_add(ctl_work_thread, thr, 1967 &softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i); 1968 if (error != 0) { 1969 printf("error creating CTL work thread!\n"); 1970 return (error); 1971 } 1972 } 1973 error = kproc_kthread_add(ctl_thresh_thread, softc, 1974 &softc->ctl_proc, &softc->thresh_thread, 0, 0, "ctl", "thresh"); 1975 if (error != 0) { 1976 printf("error creating CTL threshold thread!\n"); 1977 return (error); 1978 } 1979 1980 SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree), 1981 OID_AUTO, "ha_role", 1982 CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, 1983 softc, 0, ctl_ha_role_sysctl, "I", "HA role for this head"); 1984 1985 if (softc->is_single == 0) { 1986 if (ctl_frontend_register(&ha_frontend) != 0) 1987 softc->is_single = 1; 1988 } 1989 return (0); 1990 } 1991 1992 static int 1993 ctl_shutdown(void) 1994 { 1995 struct ctl_softc *softc = control_softc; 1996 int i; 1997 1998 if (softc->is_single == 0) 1999 ctl_frontend_deregister(&ha_frontend); 2000 2001 destroy_dev(softc->dev); 2002 2003 /* Shutdown CTL threads. */ 2004 softc->shutdown = 1; 2005 for (i = 0; i < worker_threads; i++) { 2006 struct ctl_thread *thr = &softc->threads[i]; 2007 while (thr->thread != NULL) { 2008 wakeup(thr); 2009 if (thr->thread != NULL) 2010 pause("CTL thr shutdown", 1); 2011 } 2012 mtx_destroy(&thr->queue_lock); 2013 } 2014 while (softc->thresh_thread != NULL) { 2015 wakeup(softc->thresh_thread); 2016 if (softc->thresh_thread != NULL) 2017 pause("CTL thr shutdown", 1); 2018 } 2019 2020 ctl_tpc_shutdown(softc); 2021 uma_zdestroy(softc->io_zone); 2022 mtx_destroy(&softc->ctl_lock); 2023 2024 free(softc->ctl_luns, M_DEVBUF); 2025 free(softc->ctl_lun_mask, M_DEVBUF); 2026 free(softc->ctl_port_mask, M_DEVBUF); 2027 free(softc->ctl_ports, M_DEVBUF); 2028 2029 sysctl_ctx_free(&softc->sysctl_ctx); 2030 2031 free(softc, M_DEVBUF); 2032 control_softc = NULL; 2033 return (0); 2034 } 2035 2036 static int 2037 ctl_module_event_handler(module_t mod, int what, void *arg) 2038 { 2039 2040 switch (what) { 2041 case MOD_LOAD: 2042 return (ctl_init()); 2043 case MOD_UNLOAD: 2044 return (ctl_shutdown()); 2045 default: 2046 return (EOPNOTSUPP); 2047 } 2048 } 2049 2050 /* 2051 * XXX KDM should we do some access checks here? Bump a reference count to 2052 * prevent a CTL module from being unloaded while someone has it open? 2053 */ 2054 static int 2055 ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td) 2056 { 2057 return (0); 2058 } 2059 2060 static int 2061 ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td) 2062 { 2063 return (0); 2064 } 2065 2066 /* 2067 * Remove an initiator by port number and initiator ID. 2068 * Returns 0 for success, -1 for failure. 2069 */ 2070 int 2071 ctl_remove_initiator(struct ctl_port *port, int iid) 2072 { 2073 struct ctl_softc *softc = port->ctl_softc; 2074 int last; 2075 2076 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 2077 2078 if (iid > CTL_MAX_INIT_PER_PORT) { 2079 printf("%s: initiator ID %u > maximun %u!\n", 2080 __func__, iid, CTL_MAX_INIT_PER_PORT); 2081 return (-1); 2082 } 2083 2084 mtx_lock(&softc->ctl_lock); 2085 last = (--port->wwpn_iid[iid].in_use == 0); 2086 port->wwpn_iid[iid].last_use = time_uptime; 2087 mtx_unlock(&softc->ctl_lock); 2088 if (last) 2089 ctl_i_t_nexus_loss(softc, iid, CTL_UA_POWERON); 2090 ctl_isc_announce_iid(port, iid); 2091 2092 return (0); 2093 } 2094 2095 /* 2096 * Add an initiator to the initiator map. 2097 * Returns iid for success, < 0 for failure. 2098 */ 2099 int 2100 ctl_add_initiator(struct ctl_port *port, int iid, uint64_t wwpn, char *name) 2101 { 2102 struct ctl_softc *softc = port->ctl_softc; 2103 time_t best_time; 2104 int i, best; 2105 2106 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 2107 2108 if (iid >= CTL_MAX_INIT_PER_PORT) { 2109 printf("%s: WWPN %#jx initiator ID %u > maximum %u!\n", 2110 __func__, wwpn, iid, CTL_MAX_INIT_PER_PORT); 2111 free(name, M_CTL); 2112 return (-1); 2113 } 2114 2115 mtx_lock(&softc->ctl_lock); 2116 2117 if (iid < 0 && (wwpn != 0 || name != NULL)) { 2118 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 2119 if (wwpn != 0 && wwpn == port->wwpn_iid[i].wwpn) { 2120 iid = i; 2121 break; 2122 } 2123 if (name != NULL && port->wwpn_iid[i].name != NULL && 2124 strcmp(name, port->wwpn_iid[i].name) == 0) { 2125 iid = i; 2126 break; 2127 } 2128 } 2129 } 2130 2131 if (iid < 0) { 2132 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 2133 if (port->wwpn_iid[i].in_use == 0 && 2134 port->wwpn_iid[i].wwpn == 0 && 2135 port->wwpn_iid[i].name == NULL) { 2136 iid = i; 2137 break; 2138 } 2139 } 2140 } 2141 2142 if (iid < 0) { 2143 best = -1; 2144 best_time = INT32_MAX; 2145 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 2146 if (port->wwpn_iid[i].in_use == 0) { 2147 if (port->wwpn_iid[i].last_use < best_time) { 2148 best = i; 2149 best_time = port->wwpn_iid[i].last_use; 2150 } 2151 } 2152 } 2153 iid = best; 2154 } 2155 2156 if (iid < 0) { 2157 mtx_unlock(&softc->ctl_lock); 2158 free(name, M_CTL); 2159 return (-2); 2160 } 2161 2162 if (port->wwpn_iid[iid].in_use > 0 && (wwpn != 0 || name != NULL)) { 2163 /* 2164 * This is not an error yet. 2165 */ 2166 if (wwpn != 0 && wwpn == port->wwpn_iid[iid].wwpn) { 2167 #if 0 2168 printf("%s: port %d iid %u WWPN %#jx arrived" 2169 " again\n", __func__, port->targ_port, 2170 iid, (uintmax_t)wwpn); 2171 #endif 2172 goto take; 2173 } 2174 if (name != NULL && port->wwpn_iid[iid].name != NULL && 2175 strcmp(name, port->wwpn_iid[iid].name) == 0) { 2176 #if 0 2177 printf("%s: port %d iid %u name '%s' arrived" 2178 " again\n", __func__, port->targ_port, 2179 iid, name); 2180 #endif 2181 goto take; 2182 } 2183 2184 /* 2185 * This is an error, but what do we do about it? The 2186 * driver is telling us we have a new WWPN for this 2187 * initiator ID, so we pretty much need to use it. 2188 */ 2189 printf("%s: port %d iid %u WWPN %#jx '%s' arrived," 2190 " but WWPN %#jx '%s' is still at that address\n", 2191 __func__, port->targ_port, iid, wwpn, name, 2192 (uintmax_t)port->wwpn_iid[iid].wwpn, 2193 port->wwpn_iid[iid].name); 2194 } 2195 take: 2196 free(port->wwpn_iid[iid].name, M_CTL); 2197 port->wwpn_iid[iid].name = name; 2198 port->wwpn_iid[iid].wwpn = wwpn; 2199 port->wwpn_iid[iid].in_use++; 2200 mtx_unlock(&softc->ctl_lock); 2201 ctl_isc_announce_iid(port, iid); 2202 2203 return (iid); 2204 } 2205 2206 static int 2207 ctl_create_iid(struct ctl_port *port, int iid, uint8_t *buf) 2208 { 2209 int len; 2210 2211 switch (port->port_type) { 2212 case CTL_PORT_FC: 2213 { 2214 struct scsi_transportid_fcp *id = 2215 (struct scsi_transportid_fcp *)buf; 2216 if (port->wwpn_iid[iid].wwpn == 0) 2217 return (0); 2218 memset(id, 0, sizeof(*id)); 2219 id->format_protocol = SCSI_PROTO_FC; 2220 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->n_port_name); 2221 return (sizeof(*id)); 2222 } 2223 case CTL_PORT_ISCSI: 2224 { 2225 struct scsi_transportid_iscsi_port *id = 2226 (struct scsi_transportid_iscsi_port *)buf; 2227 if (port->wwpn_iid[iid].name == NULL) 2228 return (0); 2229 memset(id, 0, 256); 2230 id->format_protocol = SCSI_TRN_ISCSI_FORMAT_PORT | 2231 SCSI_PROTO_ISCSI; 2232 len = strlcpy(id->iscsi_name, port->wwpn_iid[iid].name, 252) + 1; 2233 len = roundup2(min(len, 252), 4); 2234 scsi_ulto2b(len, id->additional_length); 2235 return (sizeof(*id) + len); 2236 } 2237 case CTL_PORT_SAS: 2238 { 2239 struct scsi_transportid_sas *id = 2240 (struct scsi_transportid_sas *)buf; 2241 if (port->wwpn_iid[iid].wwpn == 0) 2242 return (0); 2243 memset(id, 0, sizeof(*id)); 2244 id->format_protocol = SCSI_PROTO_SAS; 2245 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->sas_address); 2246 return (sizeof(*id)); 2247 } 2248 default: 2249 { 2250 struct scsi_transportid_spi *id = 2251 (struct scsi_transportid_spi *)buf; 2252 memset(id, 0, sizeof(*id)); 2253 id->format_protocol = SCSI_PROTO_SPI; 2254 scsi_ulto2b(iid, id->scsi_addr); 2255 scsi_ulto2b(port->targ_port, id->rel_trgt_port_id); 2256 return (sizeof(*id)); 2257 } 2258 } 2259 } 2260 2261 /* 2262 * Serialize a command that went down the "wrong" side, and so was sent to 2263 * this controller for execution. The logic is a little different than the 2264 * standard case in ctl_scsiio_precheck(). Errors in this case need to get 2265 * sent back to the other side, but in the success case, we execute the 2266 * command on this side (XFER mode) or tell the other side to execute it 2267 * (SER_ONLY mode). 2268 */ 2269 static void 2270 ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio) 2271 { 2272 struct ctl_softc *softc = CTL_SOFTC(ctsio); 2273 struct ctl_port *port = CTL_PORT(ctsio); 2274 union ctl_ha_msg msg_info; 2275 struct ctl_lun *lun; 2276 const struct ctl_cmd_entry *entry; 2277 union ctl_io *bio; 2278 uint32_t targ_lun; 2279 2280 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 2281 2282 /* Make sure that we know about this port. */ 2283 if (port == NULL || (port->status & CTL_PORT_STATUS_ONLINE) == 0) { 2284 ctl_set_internal_failure(ctsio, /*sks_valid*/ 0, 2285 /*retry_count*/ 1); 2286 goto badjuju; 2287 } 2288 2289 /* Make sure that we know about this LUN. */ 2290 mtx_lock(&softc->ctl_lock); 2291 if (targ_lun >= ctl_max_luns || 2292 (lun = softc->ctl_luns[targ_lun]) == NULL) { 2293 mtx_unlock(&softc->ctl_lock); 2294 2295 /* 2296 * The other node would not send this request to us unless 2297 * received announce that we are primary node for this LUN. 2298 * If this LUN does not exist now, it is probably result of 2299 * a race, so respond to initiator in the most opaque way. 2300 */ 2301 ctl_set_busy(ctsio); 2302 goto badjuju; 2303 } 2304 mtx_lock(&lun->lun_lock); 2305 mtx_unlock(&softc->ctl_lock); 2306 2307 /* 2308 * If the LUN is invalid, pretend that it doesn't exist. 2309 * It will go away as soon as all pending I/Os completed. 2310 */ 2311 if (lun->flags & CTL_LUN_DISABLED) { 2312 mtx_unlock(&lun->lun_lock); 2313 ctl_set_busy(ctsio); 2314 goto badjuju; 2315 } 2316 2317 entry = ctl_get_cmd_entry(ctsio, NULL); 2318 ctsio->seridx = entry->seridx; 2319 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { 2320 mtx_unlock(&lun->lun_lock); 2321 goto badjuju; 2322 } 2323 2324 CTL_LUN(ctsio) = lun; 2325 CTL_BACKEND_LUN(ctsio) = lun->be_lun; 2326 2327 /* 2328 * Every I/O goes into the OOA queue for a 2329 * particular LUN, and stays there until completion. 2330 */ 2331 #ifdef CTL_TIME_IO 2332 if (LIST_EMPTY(&lun->ooa_queue)) 2333 lun->idle_time += getsbinuptime() - lun->last_busy; 2334 #endif 2335 LIST_INSERT_HEAD(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2336 2337 bio = (union ctl_io *)LIST_NEXT(&ctsio->io_hdr, ooa_links); 2338 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, &bio)) { 2339 case CTL_ACTION_PASS: 2340 case CTL_ACTION_SKIP: 2341 if (softc->ha_mode == CTL_HA_MODE_XFER) { 2342 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 2343 ctl_enqueue_rtr((union ctl_io *)ctsio); 2344 mtx_unlock(&lun->lun_lock); 2345 } else { 2346 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 2347 mtx_unlock(&lun->lun_lock); 2348 2349 /* send msg back to other side */ 2350 msg_info.hdr.original_sc = ctsio->io_hdr.remote_io; 2351 msg_info.hdr.serializing_sc = (union ctl_io *)ctsio; 2352 msg_info.hdr.msg_type = CTL_MSG_R2R; 2353 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 2354 sizeof(msg_info.hdr), M_WAITOK); 2355 } 2356 break; 2357 case CTL_ACTION_BLOCK: 2358 ctsio->io_hdr.blocker = bio; 2359 TAILQ_INSERT_TAIL(&bio->io_hdr.blocked_queue, &ctsio->io_hdr, 2360 blocked_links); 2361 mtx_unlock(&lun->lun_lock); 2362 break; 2363 case CTL_ACTION_OVERLAP: 2364 LIST_REMOVE(&ctsio->io_hdr, ooa_links); 2365 mtx_unlock(&lun->lun_lock); 2366 ctl_set_overlapped_cmd(ctsio); 2367 goto badjuju; 2368 case CTL_ACTION_OVERLAP_TAG: 2369 LIST_REMOVE(&ctsio->io_hdr, ooa_links); 2370 mtx_unlock(&lun->lun_lock); 2371 ctl_set_overlapped_tag(ctsio, ctsio->tag_num); 2372 badjuju: 2373 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); 2374 msg_info.hdr.original_sc = ctsio->io_hdr.remote_io; 2375 msg_info.hdr.serializing_sc = NULL; 2376 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 2377 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 2378 sizeof(msg_info.scsi), M_WAITOK); 2379 ctl_free_io((union ctl_io *)ctsio); 2380 break; 2381 default: 2382 __assert_unreachable(); 2383 } 2384 } 2385 2386 /* 2387 * Returns 0 for success, errno for failure. 2388 */ 2389 static void 2390 ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 2391 struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries) 2392 { 2393 struct ctl_io_hdr *ioh; 2394 2395 mtx_lock(&lun->lun_lock); 2396 ioh = LIST_FIRST(&lun->ooa_queue); 2397 if (ioh == NULL) { 2398 mtx_unlock(&lun->lun_lock); 2399 return; 2400 } 2401 while (LIST_NEXT(ioh, ooa_links) != NULL) 2402 ioh = LIST_NEXT(ioh, ooa_links); 2403 for ( ; ioh; ioh = LIST_PREV(ioh, &lun->ooa_queue, ctl_io_hdr, ooa_links)) { 2404 union ctl_io *io = (union ctl_io *)ioh; 2405 struct ctl_ooa_entry *entry; 2406 2407 /* 2408 * If we've got more than we can fit, just count the 2409 * remaining entries. 2410 */ 2411 if (*cur_fill_num >= ooa_hdr->alloc_num) { 2412 (*cur_fill_num)++; 2413 continue; 2414 } 2415 2416 entry = &kern_entries[*cur_fill_num]; 2417 2418 entry->tag_num = io->scsiio.tag_num; 2419 entry->lun_num = lun->lun; 2420 #ifdef CTL_TIME_IO 2421 entry->start_bt = io->io_hdr.start_bt; 2422 #endif 2423 bcopy(io->scsiio.cdb, entry->cdb, io->scsiio.cdb_len); 2424 entry->cdb_len = io->scsiio.cdb_len; 2425 if (io->io_hdr.blocker != NULL) 2426 entry->cmd_flags |= CTL_OOACMD_FLAG_BLOCKED; 2427 2428 if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) 2429 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA; 2430 2431 if (io->io_hdr.flags & CTL_FLAG_ABORT) 2432 entry->cmd_flags |= CTL_OOACMD_FLAG_ABORT; 2433 2434 if (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR) 2435 entry->cmd_flags |= CTL_OOACMD_FLAG_RTR; 2436 2437 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) 2438 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED; 2439 2440 if (io->io_hdr.flags & CTL_FLAG_STATUS_QUEUED) 2441 entry->cmd_flags |= CTL_OOACMD_FLAG_STATUS_QUEUED; 2442 2443 if (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) 2444 entry->cmd_flags |= CTL_OOACMD_FLAG_STATUS_SENT; 2445 (*cur_fill_num)++; 2446 } 2447 mtx_unlock(&lun->lun_lock); 2448 } 2449 2450 /* 2451 * Escape characters that are illegal or not recommended in XML. 2452 */ 2453 int 2454 ctl_sbuf_printf_esc(struct sbuf *sb, char *str, int size) 2455 { 2456 char *end = str + size; 2457 int retval; 2458 2459 retval = 0; 2460 2461 for (; *str && str < end; str++) { 2462 switch (*str) { 2463 case '&': 2464 retval = sbuf_printf(sb, "&"); 2465 break; 2466 case '>': 2467 retval = sbuf_printf(sb, ">"); 2468 break; 2469 case '<': 2470 retval = sbuf_printf(sb, "<"); 2471 break; 2472 default: 2473 retval = sbuf_putc(sb, *str); 2474 break; 2475 } 2476 2477 if (retval != 0) 2478 break; 2479 } 2480 2481 return (retval); 2482 } 2483 2484 static void 2485 ctl_id_sbuf(struct ctl_devid *id, struct sbuf *sb) 2486 { 2487 struct scsi_vpd_id_descriptor *desc; 2488 int i; 2489 2490 if (id == NULL || id->len < 4) 2491 return; 2492 desc = (struct scsi_vpd_id_descriptor *)id->data; 2493 switch (desc->id_type & SVPD_ID_TYPE_MASK) { 2494 case SVPD_ID_TYPE_T10: 2495 sbuf_printf(sb, "t10."); 2496 break; 2497 case SVPD_ID_TYPE_EUI64: 2498 sbuf_printf(sb, "eui."); 2499 break; 2500 case SVPD_ID_TYPE_NAA: 2501 sbuf_printf(sb, "naa."); 2502 break; 2503 case SVPD_ID_TYPE_SCSI_NAME: 2504 break; 2505 } 2506 switch (desc->proto_codeset & SVPD_ID_CODESET_MASK) { 2507 case SVPD_ID_CODESET_BINARY: 2508 for (i = 0; i < desc->length; i++) 2509 sbuf_printf(sb, "%02x", desc->identifier[i]); 2510 break; 2511 case SVPD_ID_CODESET_ASCII: 2512 sbuf_printf(sb, "%.*s", (int)desc->length, 2513 (char *)desc->identifier); 2514 break; 2515 case SVPD_ID_CODESET_UTF8: 2516 sbuf_printf(sb, "%s", (char *)desc->identifier); 2517 break; 2518 } 2519 } 2520 2521 static int 2522 ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 2523 struct thread *td) 2524 { 2525 struct ctl_softc *softc = dev->si_drv1; 2526 struct ctl_port *port; 2527 struct ctl_lun *lun; 2528 int retval; 2529 2530 retval = 0; 2531 2532 switch (cmd) { 2533 case CTL_IO: 2534 retval = ctl_ioctl_io(dev, cmd, addr, flag, td); 2535 break; 2536 case CTL_ENABLE_PORT: 2537 case CTL_DISABLE_PORT: 2538 case CTL_SET_PORT_WWNS: { 2539 struct ctl_port *port; 2540 struct ctl_port_entry *entry; 2541 2542 entry = (struct ctl_port_entry *)addr; 2543 2544 mtx_lock(&softc->ctl_lock); 2545 STAILQ_FOREACH(port, &softc->port_list, links) { 2546 int action, done; 2547 2548 if (port->targ_port < softc->port_min || 2549 port->targ_port >= softc->port_max) 2550 continue; 2551 2552 action = 0; 2553 done = 0; 2554 if ((entry->port_type == CTL_PORT_NONE) 2555 && (entry->targ_port == port->targ_port)) { 2556 /* 2557 * If the user only wants to enable or 2558 * disable or set WWNs on a specific port, 2559 * do the operation and we're done. 2560 */ 2561 action = 1; 2562 done = 1; 2563 } else if (entry->port_type & port->port_type) { 2564 /* 2565 * Compare the user's type mask with the 2566 * particular frontend type to see if we 2567 * have a match. 2568 */ 2569 action = 1; 2570 done = 0; 2571 2572 /* 2573 * Make sure the user isn't trying to set 2574 * WWNs on multiple ports at the same time. 2575 */ 2576 if (cmd == CTL_SET_PORT_WWNS) { 2577 printf("%s: Can't set WWNs on " 2578 "multiple ports\n", __func__); 2579 retval = EINVAL; 2580 break; 2581 } 2582 } 2583 if (action == 0) 2584 continue; 2585 2586 /* 2587 * XXX KDM we have to drop the lock here, because 2588 * the online/offline operations can potentially 2589 * block. We need to reference count the frontends 2590 * so they can't go away, 2591 */ 2592 if (cmd == CTL_ENABLE_PORT) { 2593 mtx_unlock(&softc->ctl_lock); 2594 ctl_port_online(port); 2595 mtx_lock(&softc->ctl_lock); 2596 } else if (cmd == CTL_DISABLE_PORT) { 2597 mtx_unlock(&softc->ctl_lock); 2598 ctl_port_offline(port); 2599 mtx_lock(&softc->ctl_lock); 2600 } else if (cmd == CTL_SET_PORT_WWNS) { 2601 ctl_port_set_wwns(port, 2602 (entry->flags & CTL_PORT_WWNN_VALID) ? 2603 1 : 0, entry->wwnn, 2604 (entry->flags & CTL_PORT_WWPN_VALID) ? 2605 1 : 0, entry->wwpn); 2606 } 2607 if (done != 0) 2608 break; 2609 } 2610 mtx_unlock(&softc->ctl_lock); 2611 break; 2612 } 2613 case CTL_GET_OOA: { 2614 struct ctl_ooa *ooa_hdr; 2615 struct ctl_ooa_entry *entries; 2616 uint32_t cur_fill_num; 2617 2618 ooa_hdr = (struct ctl_ooa *)addr; 2619 2620 if ((ooa_hdr->alloc_len == 0) 2621 || (ooa_hdr->alloc_num == 0)) { 2622 printf("%s: CTL_GET_OOA: alloc len %u and alloc num %u " 2623 "must be non-zero\n", __func__, 2624 ooa_hdr->alloc_len, ooa_hdr->alloc_num); 2625 retval = EINVAL; 2626 break; 2627 } 2628 2629 if (ooa_hdr->alloc_len != (ooa_hdr->alloc_num * 2630 sizeof(struct ctl_ooa_entry))) { 2631 printf("%s: CTL_GET_OOA: alloc len %u must be alloc " 2632 "num %d * sizeof(struct ctl_ooa_entry) %zd\n", 2633 __func__, ooa_hdr->alloc_len, 2634 ooa_hdr->alloc_num,sizeof(struct ctl_ooa_entry)); 2635 retval = EINVAL; 2636 break; 2637 } 2638 2639 entries = malloc(ooa_hdr->alloc_len, M_CTL, M_WAITOK | M_ZERO); 2640 if (entries == NULL) { 2641 printf("%s: could not allocate %d bytes for OOA " 2642 "dump\n", __func__, ooa_hdr->alloc_len); 2643 retval = ENOMEM; 2644 break; 2645 } 2646 2647 mtx_lock(&softc->ctl_lock); 2648 if ((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0 && 2649 (ooa_hdr->lun_num >= ctl_max_luns || 2650 softc->ctl_luns[ooa_hdr->lun_num] == NULL)) { 2651 mtx_unlock(&softc->ctl_lock); 2652 free(entries, M_CTL); 2653 printf("%s: CTL_GET_OOA: invalid LUN %ju\n", 2654 __func__, (uintmax_t)ooa_hdr->lun_num); 2655 retval = EINVAL; 2656 break; 2657 } 2658 2659 cur_fill_num = 0; 2660 2661 if (ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) { 2662 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2663 ctl_ioctl_fill_ooa(lun, &cur_fill_num, 2664 ooa_hdr, entries); 2665 } 2666 } else { 2667 lun = softc->ctl_luns[ooa_hdr->lun_num]; 2668 ctl_ioctl_fill_ooa(lun, &cur_fill_num, ooa_hdr, 2669 entries); 2670 } 2671 mtx_unlock(&softc->ctl_lock); 2672 2673 ooa_hdr->fill_num = min(cur_fill_num, ooa_hdr->alloc_num); 2674 ooa_hdr->fill_len = ooa_hdr->fill_num * 2675 sizeof(struct ctl_ooa_entry); 2676 retval = copyout(entries, ooa_hdr->entries, ooa_hdr->fill_len); 2677 if (retval != 0) { 2678 printf("%s: error copying out %d bytes for OOA dump\n", 2679 __func__, ooa_hdr->fill_len); 2680 } 2681 2682 getbinuptime(&ooa_hdr->cur_bt); 2683 2684 if (cur_fill_num > ooa_hdr->alloc_num) { 2685 ooa_hdr->dropped_num = cur_fill_num -ooa_hdr->alloc_num; 2686 ooa_hdr->status = CTL_OOA_NEED_MORE_SPACE; 2687 } else { 2688 ooa_hdr->dropped_num = 0; 2689 ooa_hdr->status = CTL_OOA_OK; 2690 } 2691 2692 free(entries, M_CTL); 2693 break; 2694 } 2695 case CTL_DELAY_IO: { 2696 struct ctl_io_delay_info *delay_info; 2697 2698 delay_info = (struct ctl_io_delay_info *)addr; 2699 2700 #ifdef CTL_IO_DELAY 2701 mtx_lock(&softc->ctl_lock); 2702 if (delay_info->lun_id >= ctl_max_luns || 2703 (lun = softc->ctl_luns[delay_info->lun_id]) == NULL) { 2704 mtx_unlock(&softc->ctl_lock); 2705 delay_info->status = CTL_DELAY_STATUS_INVALID_LUN; 2706 break; 2707 } 2708 mtx_lock(&lun->lun_lock); 2709 mtx_unlock(&softc->ctl_lock); 2710 delay_info->status = CTL_DELAY_STATUS_OK; 2711 switch (delay_info->delay_type) { 2712 case CTL_DELAY_TYPE_CONT: 2713 case CTL_DELAY_TYPE_ONESHOT: 2714 break; 2715 default: 2716 delay_info->status = CTL_DELAY_STATUS_INVALID_TYPE; 2717 break; 2718 } 2719 switch (delay_info->delay_loc) { 2720 case CTL_DELAY_LOC_DATAMOVE: 2721 lun->delay_info.datamove_type = delay_info->delay_type; 2722 lun->delay_info.datamove_delay = delay_info->delay_secs; 2723 break; 2724 case CTL_DELAY_LOC_DONE: 2725 lun->delay_info.done_type = delay_info->delay_type; 2726 lun->delay_info.done_delay = delay_info->delay_secs; 2727 break; 2728 default: 2729 delay_info->status = CTL_DELAY_STATUS_INVALID_LOC; 2730 break; 2731 } 2732 mtx_unlock(&lun->lun_lock); 2733 #else 2734 delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED; 2735 #endif /* CTL_IO_DELAY */ 2736 break; 2737 } 2738 case CTL_ERROR_INJECT: { 2739 struct ctl_error_desc *err_desc, *new_err_desc; 2740 2741 err_desc = (struct ctl_error_desc *)addr; 2742 2743 new_err_desc = malloc(sizeof(*new_err_desc), M_CTL, 2744 M_WAITOK | M_ZERO); 2745 bcopy(err_desc, new_err_desc, sizeof(*new_err_desc)); 2746 2747 mtx_lock(&softc->ctl_lock); 2748 if (err_desc->lun_id >= ctl_max_luns || 2749 (lun = softc->ctl_luns[err_desc->lun_id]) == NULL) { 2750 mtx_unlock(&softc->ctl_lock); 2751 free(new_err_desc, M_CTL); 2752 printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n", 2753 __func__, (uintmax_t)err_desc->lun_id); 2754 retval = EINVAL; 2755 break; 2756 } 2757 mtx_lock(&lun->lun_lock); 2758 mtx_unlock(&softc->ctl_lock); 2759 2760 /* 2761 * We could do some checking here to verify the validity 2762 * of the request, but given the complexity of error 2763 * injection requests, the checking logic would be fairly 2764 * complex. 2765 * 2766 * For now, if the request is invalid, it just won't get 2767 * executed and might get deleted. 2768 */ 2769 STAILQ_INSERT_TAIL(&lun->error_list, new_err_desc, links); 2770 2771 /* 2772 * XXX KDM check to make sure the serial number is unique, 2773 * in case we somehow manage to wrap. That shouldn't 2774 * happen for a very long time, but it's the right thing to 2775 * do. 2776 */ 2777 new_err_desc->serial = lun->error_serial; 2778 err_desc->serial = lun->error_serial; 2779 lun->error_serial++; 2780 2781 mtx_unlock(&lun->lun_lock); 2782 break; 2783 } 2784 case CTL_ERROR_INJECT_DELETE: { 2785 struct ctl_error_desc *delete_desc, *desc, *desc2; 2786 int delete_done; 2787 2788 delete_desc = (struct ctl_error_desc *)addr; 2789 delete_done = 0; 2790 2791 mtx_lock(&softc->ctl_lock); 2792 if (delete_desc->lun_id >= ctl_max_luns || 2793 (lun = softc->ctl_luns[delete_desc->lun_id]) == NULL) { 2794 mtx_unlock(&softc->ctl_lock); 2795 printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n", 2796 __func__, (uintmax_t)delete_desc->lun_id); 2797 retval = EINVAL; 2798 break; 2799 } 2800 mtx_lock(&lun->lun_lock); 2801 mtx_unlock(&softc->ctl_lock); 2802 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 2803 if (desc->serial != delete_desc->serial) 2804 continue; 2805 2806 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, 2807 links); 2808 free(desc, M_CTL); 2809 delete_done = 1; 2810 } 2811 mtx_unlock(&lun->lun_lock); 2812 if (delete_done == 0) { 2813 printf("%s: CTL_ERROR_INJECT_DELETE: can't find " 2814 "error serial %ju on LUN %u\n", __func__, 2815 delete_desc->serial, delete_desc->lun_id); 2816 retval = EINVAL; 2817 break; 2818 } 2819 break; 2820 } 2821 case CTL_DUMP_STRUCTS: { 2822 int j, k; 2823 struct ctl_port *port; 2824 struct ctl_frontend *fe; 2825 2826 mtx_lock(&softc->ctl_lock); 2827 printf("CTL Persistent Reservation information start:\n"); 2828 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2829 mtx_lock(&lun->lun_lock); 2830 if ((lun->flags & CTL_LUN_DISABLED) != 0) { 2831 mtx_unlock(&lun->lun_lock); 2832 continue; 2833 } 2834 2835 for (j = 0; j < ctl_max_ports; j++) { 2836 if (lun->pr_keys[j] == NULL) 2837 continue; 2838 for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){ 2839 if (lun->pr_keys[j][k] == 0) 2840 continue; 2841 printf(" LUN %ju port %d iid %d key " 2842 "%#jx\n", lun->lun, j, k, 2843 (uintmax_t)lun->pr_keys[j][k]); 2844 } 2845 } 2846 mtx_unlock(&lun->lun_lock); 2847 } 2848 printf("CTL Persistent Reservation information end\n"); 2849 printf("CTL Ports:\n"); 2850 STAILQ_FOREACH(port, &softc->port_list, links) { 2851 printf(" Port %d '%s' Frontend '%s' Type %u pp %d vp %d WWNN " 2852 "%#jx WWPN %#jx\n", port->targ_port, port->port_name, 2853 port->frontend->name, port->port_type, 2854 port->physical_port, port->virtual_port, 2855 (uintmax_t)port->wwnn, (uintmax_t)port->wwpn); 2856 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 2857 if (port->wwpn_iid[j].in_use == 0 && 2858 port->wwpn_iid[j].wwpn == 0 && 2859 port->wwpn_iid[j].name == NULL) 2860 continue; 2861 2862 printf(" iid %u use %d WWPN %#jx '%s'\n", 2863 j, port->wwpn_iid[j].in_use, 2864 (uintmax_t)port->wwpn_iid[j].wwpn, 2865 port->wwpn_iid[j].name); 2866 } 2867 } 2868 printf("CTL Port information end\n"); 2869 mtx_unlock(&softc->ctl_lock); 2870 /* 2871 * XXX KDM calling this without a lock. We'd likely want 2872 * to drop the lock before calling the frontend's dump 2873 * routine anyway. 2874 */ 2875 printf("CTL Frontends:\n"); 2876 STAILQ_FOREACH(fe, &softc->fe_list, links) { 2877 printf(" Frontend '%s'\n", fe->name); 2878 if (fe->fe_dump != NULL) 2879 fe->fe_dump(); 2880 } 2881 printf("CTL Frontend information end\n"); 2882 break; 2883 } 2884 case CTL_LUN_REQ: { 2885 struct ctl_lun_req *lun_req; 2886 struct ctl_backend_driver *backend; 2887 void *packed; 2888 nvlist_t *tmp_args_nvl; 2889 size_t packed_len; 2890 2891 lun_req = (struct ctl_lun_req *)addr; 2892 tmp_args_nvl = lun_req->args_nvl; 2893 2894 backend = ctl_backend_find(lun_req->backend); 2895 if (backend == NULL) { 2896 lun_req->status = CTL_LUN_ERROR; 2897 snprintf(lun_req->error_str, 2898 sizeof(lun_req->error_str), 2899 "Backend \"%s\" not found.", 2900 lun_req->backend); 2901 break; 2902 } 2903 2904 if (lun_req->args != NULL) { 2905 packed = malloc(lun_req->args_len, M_CTL, M_WAITOK); 2906 if (copyin(lun_req->args, packed, lun_req->args_len) != 0) { 2907 free(packed, M_CTL); 2908 lun_req->status = CTL_LUN_ERROR; 2909 snprintf(lun_req->error_str, sizeof(lun_req->error_str), 2910 "Cannot copyin args."); 2911 break; 2912 } 2913 lun_req->args_nvl = nvlist_unpack(packed, 2914 lun_req->args_len, 0); 2915 free(packed, M_CTL); 2916 2917 if (lun_req->args_nvl == NULL) { 2918 lun_req->status = CTL_LUN_ERROR; 2919 snprintf(lun_req->error_str, sizeof(lun_req->error_str), 2920 "Cannot unpack args nvlist."); 2921 break; 2922 } 2923 } else 2924 lun_req->args_nvl = nvlist_create(0); 2925 2926 retval = backend->ioctl(dev, cmd, addr, flag, td); 2927 nvlist_destroy(lun_req->args_nvl); 2928 lun_req->args_nvl = tmp_args_nvl; 2929 2930 if (lun_req->result_nvl != NULL) { 2931 if (lun_req->result != NULL) { 2932 packed = nvlist_pack(lun_req->result_nvl, 2933 &packed_len); 2934 if (packed == NULL) { 2935 lun_req->status = CTL_LUN_ERROR; 2936 snprintf(lun_req->error_str, 2937 sizeof(lun_req->error_str), 2938 "Cannot pack result nvlist."); 2939 break; 2940 } 2941 2942 if (packed_len > lun_req->result_len) { 2943 lun_req->status = CTL_LUN_ERROR; 2944 snprintf(lun_req->error_str, 2945 sizeof(lun_req->error_str), 2946 "Result nvlist too large."); 2947 free(packed, M_NVLIST); 2948 break; 2949 } 2950 2951 if (copyout(packed, lun_req->result, packed_len)) { 2952 lun_req->status = CTL_LUN_ERROR; 2953 snprintf(lun_req->error_str, 2954 sizeof(lun_req->error_str), 2955 "Cannot copyout() the result."); 2956 free(packed, M_NVLIST); 2957 break; 2958 } 2959 2960 lun_req->result_len = packed_len; 2961 free(packed, M_NVLIST); 2962 } 2963 2964 nvlist_destroy(lun_req->result_nvl); 2965 } 2966 break; 2967 } 2968 case CTL_LUN_LIST: { 2969 struct sbuf *sb; 2970 struct ctl_lun_list *list; 2971 const char *name, *value; 2972 void *cookie; 2973 int type; 2974 2975 list = (struct ctl_lun_list *)addr; 2976 2977 /* 2978 * Allocate a fixed length sbuf here, based on the length 2979 * of the user's buffer. We could allocate an auto-extending 2980 * buffer, and then tell the user how much larger our 2981 * amount of data is than his buffer, but that presents 2982 * some problems: 2983 * 2984 * 1. The sbuf(9) routines use a blocking malloc, and so 2985 * we can't hold a lock while calling them with an 2986 * auto-extending buffer. 2987 * 2988 * 2. There is not currently a LUN reference counting 2989 * mechanism, outside of outstanding transactions on 2990 * the LUN's OOA queue. So a LUN could go away on us 2991 * while we're getting the LUN number, backend-specific 2992 * information, etc. Thus, given the way things 2993 * currently work, we need to hold the CTL lock while 2994 * grabbing LUN information. 2995 * 2996 * So, from the user's standpoint, the best thing to do is 2997 * allocate what he thinks is a reasonable buffer length, 2998 * and then if he gets a CTL_LUN_LIST_NEED_MORE_SPACE error, 2999 * double the buffer length and try again. (And repeat 3000 * that until he succeeds.) 3001 */ 3002 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 3003 if (sb == NULL) { 3004 list->status = CTL_LUN_LIST_ERROR; 3005 snprintf(list->error_str, sizeof(list->error_str), 3006 "Unable to allocate %d bytes for LUN list", 3007 list->alloc_len); 3008 break; 3009 } 3010 3011 sbuf_printf(sb, "<ctllunlist>\n"); 3012 3013 mtx_lock(&softc->ctl_lock); 3014 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3015 mtx_lock(&lun->lun_lock); 3016 retval = sbuf_printf(sb, "<lun id=\"%ju\">\n", 3017 (uintmax_t)lun->lun); 3018 3019 /* 3020 * Bail out as soon as we see that we've overfilled 3021 * the buffer. 3022 */ 3023 if (retval != 0) 3024 break; 3025 3026 retval = sbuf_printf(sb, "\t<backend_type>%s" 3027 "</backend_type>\n", 3028 (lun->backend == NULL) ? "none" : 3029 lun->backend->name); 3030 3031 if (retval != 0) 3032 break; 3033 3034 retval = sbuf_printf(sb, "\t<lun_type>%d</lun_type>\n", 3035 lun->be_lun->lun_type); 3036 3037 if (retval != 0) 3038 break; 3039 3040 if (lun->backend == NULL) { 3041 retval = sbuf_printf(sb, "</lun>\n"); 3042 if (retval != 0) 3043 break; 3044 continue; 3045 } 3046 3047 retval = sbuf_printf(sb, "\t<size>%ju</size>\n", 3048 (lun->be_lun->maxlba > 0) ? 3049 lun->be_lun->maxlba + 1 : 0); 3050 3051 if (retval != 0) 3052 break; 3053 3054 retval = sbuf_printf(sb, "\t<blocksize>%u</blocksize>\n", 3055 lun->be_lun->blocksize); 3056 3057 if (retval != 0) 3058 break; 3059 3060 retval = sbuf_printf(sb, "\t<serial_number>"); 3061 3062 if (retval != 0) 3063 break; 3064 3065 retval = ctl_sbuf_printf_esc(sb, 3066 lun->be_lun->serial_num, 3067 sizeof(lun->be_lun->serial_num)); 3068 3069 if (retval != 0) 3070 break; 3071 3072 retval = sbuf_printf(sb, "</serial_number>\n"); 3073 3074 if (retval != 0) 3075 break; 3076 3077 retval = sbuf_printf(sb, "\t<device_id>"); 3078 3079 if (retval != 0) 3080 break; 3081 3082 retval = ctl_sbuf_printf_esc(sb, 3083 lun->be_lun->device_id, 3084 sizeof(lun->be_lun->device_id)); 3085 3086 if (retval != 0) 3087 break; 3088 3089 retval = sbuf_printf(sb, "</device_id>\n"); 3090 3091 if (retval != 0) 3092 break; 3093 3094 if (lun->backend->lun_info != NULL) { 3095 retval = lun->backend->lun_info(lun->be_lun, sb); 3096 if (retval != 0) 3097 break; 3098 } 3099 3100 cookie = NULL; 3101 while ((name = nvlist_next(lun->be_lun->options, &type, 3102 &cookie)) != NULL) { 3103 sbuf_printf(sb, "\t<%s>", name); 3104 3105 if (type == NV_TYPE_STRING) { 3106 value = dnvlist_get_string( 3107 lun->be_lun->options, name, NULL); 3108 if (value != NULL) 3109 sbuf_printf(sb, "%s", value); 3110 } 3111 3112 sbuf_printf(sb, "</%s>\n", name); 3113 } 3114 3115 retval = sbuf_printf(sb, "</lun>\n"); 3116 3117 if (retval != 0) 3118 break; 3119 mtx_unlock(&lun->lun_lock); 3120 } 3121 if (lun != NULL) 3122 mtx_unlock(&lun->lun_lock); 3123 mtx_unlock(&softc->ctl_lock); 3124 3125 if ((retval != 0) 3126 || ((retval = sbuf_printf(sb, "</ctllunlist>\n")) != 0)) { 3127 retval = 0; 3128 sbuf_delete(sb); 3129 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3130 snprintf(list->error_str, sizeof(list->error_str), 3131 "Out of space, %d bytes is too small", 3132 list->alloc_len); 3133 break; 3134 } 3135 3136 sbuf_finish(sb); 3137 3138 retval = copyout(sbuf_data(sb), list->lun_xml, 3139 sbuf_len(sb) + 1); 3140 3141 list->fill_len = sbuf_len(sb) + 1; 3142 list->status = CTL_LUN_LIST_OK; 3143 sbuf_delete(sb); 3144 break; 3145 } 3146 case CTL_ISCSI: { 3147 struct ctl_iscsi *ci; 3148 struct ctl_frontend *fe; 3149 3150 ci = (struct ctl_iscsi *)addr; 3151 3152 fe = ctl_frontend_find("iscsi"); 3153 if (fe == NULL) { 3154 ci->status = CTL_ISCSI_ERROR; 3155 snprintf(ci->error_str, sizeof(ci->error_str), 3156 "Frontend \"iscsi\" not found."); 3157 break; 3158 } 3159 3160 retval = fe->ioctl(dev, cmd, addr, flag, td); 3161 break; 3162 } 3163 case CTL_PORT_REQ: { 3164 struct ctl_req *req; 3165 struct ctl_frontend *fe; 3166 void *packed; 3167 nvlist_t *tmp_args_nvl; 3168 size_t packed_len; 3169 3170 req = (struct ctl_req *)addr; 3171 tmp_args_nvl = req->args_nvl; 3172 3173 fe = ctl_frontend_find(req->driver); 3174 if (fe == NULL) { 3175 req->status = CTL_LUN_ERROR; 3176 snprintf(req->error_str, sizeof(req->error_str), 3177 "Frontend \"%s\" not found.", req->driver); 3178 break; 3179 } 3180 3181 if (req->args != NULL) { 3182 packed = malloc(req->args_len, M_CTL, M_WAITOK); 3183 if (copyin(req->args, packed, req->args_len) != 0) { 3184 free(packed, M_CTL); 3185 req->status = CTL_LUN_ERROR; 3186 snprintf(req->error_str, sizeof(req->error_str), 3187 "Cannot copyin args."); 3188 break; 3189 } 3190 req->args_nvl = nvlist_unpack(packed, 3191 req->args_len, 0); 3192 free(packed, M_CTL); 3193 3194 if (req->args_nvl == NULL) { 3195 req->status = CTL_LUN_ERROR; 3196 snprintf(req->error_str, sizeof(req->error_str), 3197 "Cannot unpack args nvlist."); 3198 break; 3199 } 3200 } else 3201 req->args_nvl = nvlist_create(0); 3202 3203 if (fe->ioctl) 3204 retval = fe->ioctl(dev, cmd, addr, flag, td); 3205 else 3206 retval = ENODEV; 3207 3208 nvlist_destroy(req->args_nvl); 3209 req->args_nvl = tmp_args_nvl; 3210 3211 if (req->result_nvl != NULL) { 3212 if (req->result != NULL) { 3213 packed = nvlist_pack(req->result_nvl, 3214 &packed_len); 3215 if (packed == NULL) { 3216 req->status = CTL_LUN_ERROR; 3217 snprintf(req->error_str, 3218 sizeof(req->error_str), 3219 "Cannot pack result nvlist."); 3220 break; 3221 } 3222 3223 if (packed_len > req->result_len) { 3224 req->status = CTL_LUN_ERROR; 3225 snprintf(req->error_str, 3226 sizeof(req->error_str), 3227 "Result nvlist too large."); 3228 free(packed, M_NVLIST); 3229 break; 3230 } 3231 3232 if (copyout(packed, req->result, packed_len)) { 3233 req->status = CTL_LUN_ERROR; 3234 snprintf(req->error_str, 3235 sizeof(req->error_str), 3236 "Cannot copyout() the result."); 3237 free(packed, M_NVLIST); 3238 break; 3239 } 3240 3241 req->result_len = packed_len; 3242 free(packed, M_NVLIST); 3243 } 3244 3245 nvlist_destroy(req->result_nvl); 3246 } 3247 break; 3248 } 3249 case CTL_PORT_LIST: { 3250 struct sbuf *sb; 3251 struct ctl_port *port; 3252 struct ctl_lun_list *list; 3253 const char *name, *value; 3254 void *cookie; 3255 int j, type; 3256 uint32_t plun; 3257 3258 list = (struct ctl_lun_list *)addr; 3259 3260 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 3261 if (sb == NULL) { 3262 list->status = CTL_LUN_LIST_ERROR; 3263 snprintf(list->error_str, sizeof(list->error_str), 3264 "Unable to allocate %d bytes for LUN list", 3265 list->alloc_len); 3266 break; 3267 } 3268 3269 sbuf_printf(sb, "<ctlportlist>\n"); 3270 3271 mtx_lock(&softc->ctl_lock); 3272 STAILQ_FOREACH(port, &softc->port_list, links) { 3273 retval = sbuf_printf(sb, "<targ_port id=\"%ju\">\n", 3274 (uintmax_t)port->targ_port); 3275 3276 /* 3277 * Bail out as soon as we see that we've overfilled 3278 * the buffer. 3279 */ 3280 if (retval != 0) 3281 break; 3282 3283 retval = sbuf_printf(sb, "\t<frontend_type>%s" 3284 "</frontend_type>\n", port->frontend->name); 3285 if (retval != 0) 3286 break; 3287 3288 retval = sbuf_printf(sb, "\t<port_type>%d</port_type>\n", 3289 port->port_type); 3290 if (retval != 0) 3291 break; 3292 3293 retval = sbuf_printf(sb, "\t<online>%s</online>\n", 3294 (port->status & CTL_PORT_STATUS_ONLINE) ? "YES" : "NO"); 3295 if (retval != 0) 3296 break; 3297 3298 retval = sbuf_printf(sb, "\t<port_name>%s</port_name>\n", 3299 port->port_name); 3300 if (retval != 0) 3301 break; 3302 3303 retval = sbuf_printf(sb, "\t<physical_port>%d</physical_port>\n", 3304 port->physical_port); 3305 if (retval != 0) 3306 break; 3307 3308 retval = sbuf_printf(sb, "\t<virtual_port>%d</virtual_port>\n", 3309 port->virtual_port); 3310 if (retval != 0) 3311 break; 3312 3313 if (port->target_devid != NULL) { 3314 sbuf_printf(sb, "\t<target>"); 3315 ctl_id_sbuf(port->target_devid, sb); 3316 sbuf_printf(sb, "</target>\n"); 3317 } 3318 3319 if (port->port_devid != NULL) { 3320 sbuf_printf(sb, "\t<port>"); 3321 ctl_id_sbuf(port->port_devid, sb); 3322 sbuf_printf(sb, "</port>\n"); 3323 } 3324 3325 if (port->port_info != NULL) { 3326 retval = port->port_info(port->onoff_arg, sb); 3327 if (retval != 0) 3328 break; 3329 } 3330 3331 cookie = NULL; 3332 while ((name = nvlist_next(port->options, &type, 3333 &cookie)) != NULL) { 3334 sbuf_printf(sb, "\t<%s>", name); 3335 3336 if (type == NV_TYPE_STRING) { 3337 value = dnvlist_get_string(port->options, 3338 name, NULL); 3339 if (value != NULL) 3340 sbuf_printf(sb, "%s", value); 3341 } 3342 3343 sbuf_printf(sb, "</%s>\n", name); 3344 } 3345 3346 if (port->lun_map != NULL) { 3347 sbuf_printf(sb, "\t<lun_map>on</lun_map>\n"); 3348 for (j = 0; j < port->lun_map_size; j++) { 3349 plun = ctl_lun_map_from_port(port, j); 3350 if (plun == UINT32_MAX) 3351 continue; 3352 sbuf_printf(sb, 3353 "\t<lun id=\"%u\">%u</lun>\n", 3354 j, plun); 3355 } 3356 } 3357 3358 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 3359 if (port->wwpn_iid[j].in_use == 0 || 3360 (port->wwpn_iid[j].wwpn == 0 && 3361 port->wwpn_iid[j].name == NULL)) 3362 continue; 3363 3364 if (port->wwpn_iid[j].name != NULL) 3365 retval = sbuf_printf(sb, 3366 "\t<initiator id=\"%u\">%s</initiator>\n", 3367 j, port->wwpn_iid[j].name); 3368 else 3369 retval = sbuf_printf(sb, 3370 "\t<initiator id=\"%u\">naa.%08jx</initiator>\n", 3371 j, port->wwpn_iid[j].wwpn); 3372 if (retval != 0) 3373 break; 3374 } 3375 if (retval != 0) 3376 break; 3377 3378 retval = sbuf_printf(sb, "</targ_port>\n"); 3379 if (retval != 0) 3380 break; 3381 } 3382 mtx_unlock(&softc->ctl_lock); 3383 3384 if ((retval != 0) 3385 || ((retval = sbuf_printf(sb, "</ctlportlist>\n")) != 0)) { 3386 retval = 0; 3387 sbuf_delete(sb); 3388 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3389 snprintf(list->error_str, sizeof(list->error_str), 3390 "Out of space, %d bytes is too small", 3391 list->alloc_len); 3392 break; 3393 } 3394 3395 sbuf_finish(sb); 3396 3397 retval = copyout(sbuf_data(sb), list->lun_xml, 3398 sbuf_len(sb) + 1); 3399 3400 list->fill_len = sbuf_len(sb) + 1; 3401 list->status = CTL_LUN_LIST_OK; 3402 sbuf_delete(sb); 3403 break; 3404 } 3405 case CTL_LUN_MAP: { 3406 struct ctl_lun_map *lm = (struct ctl_lun_map *)addr; 3407 struct ctl_port *port; 3408 3409 mtx_lock(&softc->ctl_lock); 3410 if (lm->port < softc->port_min || 3411 lm->port >= softc->port_max || 3412 (port = softc->ctl_ports[lm->port]) == NULL) { 3413 mtx_unlock(&softc->ctl_lock); 3414 return (ENXIO); 3415 } 3416 if (port->status & CTL_PORT_STATUS_ONLINE) { 3417 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3418 if (ctl_lun_map_to_port(port, lun->lun) == 3419 UINT32_MAX) 3420 continue; 3421 mtx_lock(&lun->lun_lock); 3422 ctl_est_ua_port(lun, lm->port, -1, 3423 CTL_UA_LUN_CHANGE); 3424 mtx_unlock(&lun->lun_lock); 3425 } 3426 } 3427 mtx_unlock(&softc->ctl_lock); // XXX: port_enable sleeps 3428 if (lm->plun != UINT32_MAX) { 3429 if (lm->lun == UINT32_MAX) 3430 retval = ctl_lun_map_unset(port, lm->plun); 3431 else if (lm->lun < ctl_max_luns && 3432 softc->ctl_luns[lm->lun] != NULL) 3433 retval = ctl_lun_map_set(port, lm->plun, lm->lun); 3434 else 3435 return (ENXIO); 3436 } else { 3437 if (lm->lun == UINT32_MAX) 3438 retval = ctl_lun_map_deinit(port); 3439 else 3440 retval = ctl_lun_map_init(port); 3441 } 3442 if (port->status & CTL_PORT_STATUS_ONLINE) 3443 ctl_isc_announce_port(port); 3444 break; 3445 } 3446 case CTL_GET_LUN_STATS: { 3447 struct ctl_get_io_stats *stats = (struct ctl_get_io_stats *)addr; 3448 int i; 3449 3450 /* 3451 * XXX KDM no locking here. If the LUN list changes, 3452 * things can blow up. 3453 */ 3454 i = 0; 3455 stats->status = CTL_SS_OK; 3456 stats->fill_len = 0; 3457 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3458 if (lun->lun < stats->first_item) 3459 continue; 3460 if (stats->fill_len + sizeof(lun->stats) > 3461 stats->alloc_len) { 3462 stats->status = CTL_SS_NEED_MORE_SPACE; 3463 break; 3464 } 3465 retval = copyout(&lun->stats, &stats->stats[i++], 3466 sizeof(lun->stats)); 3467 if (retval != 0) 3468 break; 3469 stats->fill_len += sizeof(lun->stats); 3470 } 3471 stats->num_items = softc->num_luns; 3472 stats->flags = CTL_STATS_FLAG_NONE; 3473 #ifdef CTL_TIME_IO 3474 stats->flags |= CTL_STATS_FLAG_TIME_VALID; 3475 #endif 3476 getnanouptime(&stats->timestamp); 3477 break; 3478 } 3479 case CTL_GET_PORT_STATS: { 3480 struct ctl_get_io_stats *stats = (struct ctl_get_io_stats *)addr; 3481 int i; 3482 3483 /* 3484 * XXX KDM no locking here. If the LUN list changes, 3485 * things can blow up. 3486 */ 3487 i = 0; 3488 stats->status = CTL_SS_OK; 3489 stats->fill_len = 0; 3490 STAILQ_FOREACH(port, &softc->port_list, links) { 3491 if (port->targ_port < stats->first_item) 3492 continue; 3493 if (stats->fill_len + sizeof(port->stats) > 3494 stats->alloc_len) { 3495 stats->status = CTL_SS_NEED_MORE_SPACE; 3496 break; 3497 } 3498 retval = copyout(&port->stats, &stats->stats[i++], 3499 sizeof(port->stats)); 3500 if (retval != 0) 3501 break; 3502 stats->fill_len += sizeof(port->stats); 3503 } 3504 stats->num_items = softc->num_ports; 3505 stats->flags = CTL_STATS_FLAG_NONE; 3506 #ifdef CTL_TIME_IO 3507 stats->flags |= CTL_STATS_FLAG_TIME_VALID; 3508 #endif 3509 getnanouptime(&stats->timestamp); 3510 break; 3511 } 3512 default: { 3513 /* XXX KDM should we fix this? */ 3514 #if 0 3515 struct ctl_backend_driver *backend; 3516 unsigned int type; 3517 int found; 3518 3519 found = 0; 3520 3521 /* 3522 * We encode the backend type as the ioctl type for backend 3523 * ioctls. So parse it out here, and then search for a 3524 * backend of this type. 3525 */ 3526 type = _IOC_TYPE(cmd); 3527 3528 STAILQ_FOREACH(backend, &softc->be_list, links) { 3529 if (backend->type == type) { 3530 found = 1; 3531 break; 3532 } 3533 } 3534 if (found == 0) { 3535 printf("ctl: unknown ioctl command %#lx or backend " 3536 "%d\n", cmd, type); 3537 retval = EINVAL; 3538 break; 3539 } 3540 retval = backend->ioctl(dev, cmd, addr, flag, td); 3541 #endif 3542 retval = ENOTTY; 3543 break; 3544 } 3545 } 3546 return (retval); 3547 } 3548 3549 uint32_t 3550 ctl_get_initindex(struct ctl_nexus *nexus) 3551 { 3552 return (nexus->initid + (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); 3553 } 3554 3555 int 3556 ctl_lun_map_init(struct ctl_port *port) 3557 { 3558 struct ctl_softc *softc = port->ctl_softc; 3559 struct ctl_lun *lun; 3560 int size = ctl_lun_map_size; 3561 uint32_t i; 3562 3563 if (port->lun_map == NULL || port->lun_map_size < size) { 3564 port->lun_map_size = 0; 3565 free(port->lun_map, M_CTL); 3566 port->lun_map = malloc(size * sizeof(uint32_t), 3567 M_CTL, M_NOWAIT); 3568 } 3569 if (port->lun_map == NULL) 3570 return (ENOMEM); 3571 for (i = 0; i < size; i++) 3572 port->lun_map[i] = UINT32_MAX; 3573 port->lun_map_size = size; 3574 if (port->status & CTL_PORT_STATUS_ONLINE) { 3575 if (port->lun_disable != NULL) { 3576 STAILQ_FOREACH(lun, &softc->lun_list, links) 3577 port->lun_disable(port->targ_lun_arg, lun->lun); 3578 } 3579 ctl_isc_announce_port(port); 3580 } 3581 return (0); 3582 } 3583 3584 int 3585 ctl_lun_map_deinit(struct ctl_port *port) 3586 { 3587 struct ctl_softc *softc = port->ctl_softc; 3588 struct ctl_lun *lun; 3589 3590 if (port->lun_map == NULL) 3591 return (0); 3592 port->lun_map_size = 0; 3593 free(port->lun_map, M_CTL); 3594 port->lun_map = NULL; 3595 if (port->status & CTL_PORT_STATUS_ONLINE) { 3596 if (port->lun_enable != NULL) { 3597 STAILQ_FOREACH(lun, &softc->lun_list, links) 3598 port->lun_enable(port->targ_lun_arg, lun->lun); 3599 } 3600 ctl_isc_announce_port(port); 3601 } 3602 return (0); 3603 } 3604 3605 int 3606 ctl_lun_map_set(struct ctl_port *port, uint32_t plun, uint32_t glun) 3607 { 3608 int status; 3609 uint32_t old; 3610 3611 if (port->lun_map == NULL) { 3612 status = ctl_lun_map_init(port); 3613 if (status != 0) 3614 return (status); 3615 } 3616 if (plun >= port->lun_map_size) 3617 return (EINVAL); 3618 old = port->lun_map[plun]; 3619 port->lun_map[plun] = glun; 3620 if ((port->status & CTL_PORT_STATUS_ONLINE) && old == UINT32_MAX) { 3621 if (port->lun_enable != NULL) 3622 port->lun_enable(port->targ_lun_arg, plun); 3623 ctl_isc_announce_port(port); 3624 } 3625 return (0); 3626 } 3627 3628 int 3629 ctl_lun_map_unset(struct ctl_port *port, uint32_t plun) 3630 { 3631 uint32_t old; 3632 3633 if (port->lun_map == NULL || plun >= port->lun_map_size) 3634 return (0); 3635 old = port->lun_map[plun]; 3636 port->lun_map[plun] = UINT32_MAX; 3637 if ((port->status & CTL_PORT_STATUS_ONLINE) && old != UINT32_MAX) { 3638 if (port->lun_disable != NULL) 3639 port->lun_disable(port->targ_lun_arg, plun); 3640 ctl_isc_announce_port(port); 3641 } 3642 return (0); 3643 } 3644 3645 uint32_t 3646 ctl_lun_map_from_port(struct ctl_port *port, uint32_t lun_id) 3647 { 3648 3649 if (port == NULL) 3650 return (UINT32_MAX); 3651 if (port->lun_map == NULL) 3652 return (lun_id); 3653 if (lun_id > port->lun_map_size) 3654 return (UINT32_MAX); 3655 return (port->lun_map[lun_id]); 3656 } 3657 3658 uint32_t 3659 ctl_lun_map_to_port(struct ctl_port *port, uint32_t lun_id) 3660 { 3661 uint32_t i; 3662 3663 if (port == NULL) 3664 return (UINT32_MAX); 3665 if (port->lun_map == NULL) 3666 return (lun_id); 3667 for (i = 0; i < port->lun_map_size; i++) { 3668 if (port->lun_map[i] == lun_id) 3669 return (i); 3670 } 3671 return (UINT32_MAX); 3672 } 3673 3674 uint32_t 3675 ctl_decode_lun(uint64_t encoded) 3676 { 3677 uint8_t lun[8]; 3678 uint32_t result = 0xffffffff; 3679 3680 be64enc(lun, encoded); 3681 switch (lun[0] & RPL_LUNDATA_ATYP_MASK) { 3682 case RPL_LUNDATA_ATYP_PERIPH: 3683 if ((lun[0] & 0x3f) == 0 && lun[2] == 0 && lun[3] == 0 && 3684 lun[4] == 0 && lun[5] == 0 && lun[6] == 0 && lun[7] == 0) 3685 result = lun[1]; 3686 break; 3687 case RPL_LUNDATA_ATYP_FLAT: 3688 if (lun[2] == 0 && lun[3] == 0 && lun[4] == 0 && lun[5] == 0 && 3689 lun[6] == 0 && lun[7] == 0) 3690 result = ((lun[0] & 0x3f) << 8) + lun[1]; 3691 break; 3692 case RPL_LUNDATA_ATYP_EXTLUN: 3693 switch (lun[0] & RPL_LUNDATA_EXT_EAM_MASK) { 3694 case 0x02: 3695 switch (lun[0] & RPL_LUNDATA_EXT_LEN_MASK) { 3696 case 0x00: 3697 result = lun[1]; 3698 break; 3699 case 0x10: 3700 result = (lun[1] << 16) + (lun[2] << 8) + 3701 lun[3]; 3702 break; 3703 case 0x20: 3704 if (lun[1] == 0 && lun[6] == 0 && lun[7] == 0) 3705 result = (lun[2] << 24) + 3706 (lun[3] << 16) + (lun[4] << 8) + 3707 lun[5]; 3708 break; 3709 } 3710 break; 3711 case RPL_LUNDATA_EXT_EAM_NOT_SPEC: 3712 result = 0xffffffff; 3713 break; 3714 } 3715 break; 3716 } 3717 return (result); 3718 } 3719 3720 uint64_t 3721 ctl_encode_lun(uint32_t decoded) 3722 { 3723 uint64_t l = decoded; 3724 3725 if (l <= 0xff) 3726 return (((uint64_t)RPL_LUNDATA_ATYP_PERIPH << 56) | (l << 48)); 3727 if (l <= 0x3fff) 3728 return (((uint64_t)RPL_LUNDATA_ATYP_FLAT << 56) | (l << 48)); 3729 if (l <= 0xffffff) 3730 return (((uint64_t)(RPL_LUNDATA_ATYP_EXTLUN | 0x12) << 56) | 3731 (l << 32)); 3732 return ((((uint64_t)RPL_LUNDATA_ATYP_EXTLUN | 0x22) << 56) | (l << 16)); 3733 } 3734 3735 int 3736 ctl_ffz(uint32_t *mask, uint32_t first, uint32_t last) 3737 { 3738 int i; 3739 3740 for (i = first; i < last; i++) { 3741 if ((mask[i / 32] & (1 << (i % 32))) == 0) 3742 return (i); 3743 } 3744 return (-1); 3745 } 3746 3747 int 3748 ctl_set_mask(uint32_t *mask, uint32_t bit) 3749 { 3750 uint32_t chunk, piece; 3751 3752 chunk = bit >> 5; 3753 piece = bit % (sizeof(uint32_t) * 8); 3754 3755 if ((mask[chunk] & (1 << piece)) != 0) 3756 return (-1); 3757 else 3758 mask[chunk] |= (1 << piece); 3759 3760 return (0); 3761 } 3762 3763 int 3764 ctl_clear_mask(uint32_t *mask, uint32_t bit) 3765 { 3766 uint32_t chunk, piece; 3767 3768 chunk = bit >> 5; 3769 piece = bit % (sizeof(uint32_t) * 8); 3770 3771 if ((mask[chunk] & (1 << piece)) == 0) 3772 return (-1); 3773 else 3774 mask[chunk] &= ~(1 << piece); 3775 3776 return (0); 3777 } 3778 3779 int 3780 ctl_is_set(uint32_t *mask, uint32_t bit) 3781 { 3782 uint32_t chunk, piece; 3783 3784 chunk = bit >> 5; 3785 piece = bit % (sizeof(uint32_t) * 8); 3786 3787 if ((mask[chunk] & (1 << piece)) == 0) 3788 return (0); 3789 else 3790 return (1); 3791 } 3792 3793 static uint64_t 3794 ctl_get_prkey(struct ctl_lun *lun, uint32_t residx) 3795 { 3796 uint64_t *t; 3797 3798 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3799 if (t == NULL) 3800 return (0); 3801 return (t[residx % CTL_MAX_INIT_PER_PORT]); 3802 } 3803 3804 static void 3805 ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx) 3806 { 3807 uint64_t *t; 3808 3809 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3810 if (t == NULL) 3811 return; 3812 t[residx % CTL_MAX_INIT_PER_PORT] = 0; 3813 } 3814 3815 static void 3816 ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx) 3817 { 3818 uint64_t *p; 3819 u_int i; 3820 3821 i = residx/CTL_MAX_INIT_PER_PORT; 3822 if (lun->pr_keys[i] != NULL) 3823 return; 3824 mtx_unlock(&lun->lun_lock); 3825 p = malloc(sizeof(uint64_t) * CTL_MAX_INIT_PER_PORT, M_CTL, 3826 M_WAITOK | M_ZERO); 3827 mtx_lock(&lun->lun_lock); 3828 if (lun->pr_keys[i] == NULL) 3829 lun->pr_keys[i] = p; 3830 else 3831 free(p, M_CTL); 3832 } 3833 3834 static void 3835 ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key) 3836 { 3837 uint64_t *t; 3838 3839 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3840 KASSERT(t != NULL, ("prkey %d is not allocated", residx)); 3841 t[residx % CTL_MAX_INIT_PER_PORT] = key; 3842 } 3843 3844 /* 3845 * ctl_softc, pool_name, total_ctl_io are passed in. 3846 * npool is passed out. 3847 */ 3848 int 3849 ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name, 3850 uint32_t total_ctl_io, void **npool) 3851 { 3852 struct ctl_io_pool *pool; 3853 3854 pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL, 3855 M_NOWAIT | M_ZERO); 3856 if (pool == NULL) 3857 return (ENOMEM); 3858 3859 snprintf(pool->name, sizeof(pool->name), "CTL IO %s", pool_name); 3860 pool->ctl_softc = ctl_softc; 3861 #ifdef IO_POOLS 3862 pool->zone = uma_zsecond_create(pool->name, NULL, 3863 NULL, NULL, NULL, ctl_softc->io_zone); 3864 /* uma_prealloc(pool->zone, total_ctl_io); */ 3865 #else 3866 pool->zone = ctl_softc->io_zone; 3867 #endif 3868 3869 *npool = pool; 3870 return (0); 3871 } 3872 3873 void 3874 ctl_pool_free(struct ctl_io_pool *pool) 3875 { 3876 3877 if (pool == NULL) 3878 return; 3879 3880 #ifdef IO_POOLS 3881 uma_zdestroy(pool->zone); 3882 #endif 3883 free(pool, M_CTL); 3884 } 3885 3886 union ctl_io * 3887 ctl_alloc_io(void *pool_ref) 3888 { 3889 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3890 union ctl_io *io; 3891 3892 io = uma_zalloc(pool->zone, M_WAITOK); 3893 if (io != NULL) { 3894 io->io_hdr.pool = pool_ref; 3895 CTL_SOFTC(io) = pool->ctl_softc; 3896 TAILQ_INIT(&io->io_hdr.blocked_queue); 3897 } 3898 return (io); 3899 } 3900 3901 union ctl_io * 3902 ctl_alloc_io_nowait(void *pool_ref) 3903 { 3904 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3905 union ctl_io *io; 3906 3907 io = uma_zalloc(pool->zone, M_NOWAIT); 3908 if (io != NULL) { 3909 io->io_hdr.pool = pool_ref; 3910 CTL_SOFTC(io) = pool->ctl_softc; 3911 TAILQ_INIT(&io->io_hdr.blocked_queue); 3912 } 3913 return (io); 3914 } 3915 3916 void 3917 ctl_free_io(union ctl_io *io) 3918 { 3919 struct ctl_io_pool *pool; 3920 3921 if (io == NULL) 3922 return; 3923 3924 pool = (struct ctl_io_pool *)io->io_hdr.pool; 3925 uma_zfree(pool->zone, io); 3926 } 3927 3928 void 3929 ctl_zero_io(union ctl_io *io) 3930 { 3931 struct ctl_io_pool *pool; 3932 3933 if (io == NULL) 3934 return; 3935 3936 /* 3937 * May need to preserve linked list pointers at some point too. 3938 */ 3939 pool = io->io_hdr.pool; 3940 memset(io, 0, sizeof(*io)); 3941 io->io_hdr.pool = pool; 3942 CTL_SOFTC(io) = pool->ctl_softc; 3943 TAILQ_INIT(&io->io_hdr.blocked_queue); 3944 } 3945 3946 int 3947 ctl_expand_number(const char *buf, uint64_t *num) 3948 { 3949 char *endptr; 3950 uint64_t number; 3951 unsigned shift; 3952 3953 number = strtoq(buf, &endptr, 0); 3954 3955 switch (tolower((unsigned char)*endptr)) { 3956 case 'e': 3957 shift = 60; 3958 break; 3959 case 'p': 3960 shift = 50; 3961 break; 3962 case 't': 3963 shift = 40; 3964 break; 3965 case 'g': 3966 shift = 30; 3967 break; 3968 case 'm': 3969 shift = 20; 3970 break; 3971 case 'k': 3972 shift = 10; 3973 break; 3974 case 'b': 3975 case '\0': /* No unit. */ 3976 *num = number; 3977 return (0); 3978 default: 3979 /* Unrecognized unit. */ 3980 return (-1); 3981 } 3982 3983 if ((number << shift) >> shift != number) { 3984 /* Overflow */ 3985 return (-1); 3986 } 3987 *num = number << shift; 3988 return (0); 3989 } 3990 3991 /* 3992 * This routine could be used in the future to load default and/or saved 3993 * mode page parameters for a particuar lun. 3994 */ 3995 static int 3996 ctl_init_page_index(struct ctl_lun *lun) 3997 { 3998 int i, page_code; 3999 struct ctl_page_index *page_index; 4000 const char *value; 4001 uint64_t ival; 4002 4003 memcpy(&lun->mode_pages.index, page_index_template, 4004 sizeof(page_index_template)); 4005 4006 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 4007 page_index = &lun->mode_pages.index[i]; 4008 if (lun->be_lun->lun_type == T_DIRECT && 4009 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 4010 continue; 4011 if (lun->be_lun->lun_type == T_PROCESSOR && 4012 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 4013 continue; 4014 if (lun->be_lun->lun_type == T_CDROM && 4015 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 4016 continue; 4017 4018 page_code = page_index->page_code & SMPH_PC_MASK; 4019 switch (page_code) { 4020 case SMS_RW_ERROR_RECOVERY_PAGE: { 4021 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4022 ("subpage %#x for page %#x is incorrect!", 4023 page_index->subpage, page_code)); 4024 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT], 4025 &rw_er_page_default, 4026 sizeof(rw_er_page_default)); 4027 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CHANGEABLE], 4028 &rw_er_page_changeable, 4029 sizeof(rw_er_page_changeable)); 4030 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_DEFAULT], 4031 &rw_er_page_default, 4032 sizeof(rw_er_page_default)); 4033 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_SAVED], 4034 &rw_er_page_default, 4035 sizeof(rw_er_page_default)); 4036 page_index->page_data = 4037 (uint8_t *)lun->mode_pages.rw_er_page; 4038 break; 4039 } 4040 case SMS_FORMAT_DEVICE_PAGE: { 4041 struct scsi_format_page *format_page; 4042 4043 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4044 ("subpage %#x for page %#x is incorrect!", 4045 page_index->subpage, page_code)); 4046 4047 /* 4048 * Sectors per track are set above. Bytes per 4049 * sector need to be set here on a per-LUN basis. 4050 */ 4051 memcpy(&lun->mode_pages.format_page[CTL_PAGE_CURRENT], 4052 &format_page_default, 4053 sizeof(format_page_default)); 4054 memcpy(&lun->mode_pages.format_page[ 4055 CTL_PAGE_CHANGEABLE], &format_page_changeable, 4056 sizeof(format_page_changeable)); 4057 memcpy(&lun->mode_pages.format_page[CTL_PAGE_DEFAULT], 4058 &format_page_default, 4059 sizeof(format_page_default)); 4060 memcpy(&lun->mode_pages.format_page[CTL_PAGE_SAVED], 4061 &format_page_default, 4062 sizeof(format_page_default)); 4063 4064 format_page = &lun->mode_pages.format_page[ 4065 CTL_PAGE_CURRENT]; 4066 scsi_ulto2b(lun->be_lun->blocksize, 4067 format_page->bytes_per_sector); 4068 4069 format_page = &lun->mode_pages.format_page[ 4070 CTL_PAGE_DEFAULT]; 4071 scsi_ulto2b(lun->be_lun->blocksize, 4072 format_page->bytes_per_sector); 4073 4074 format_page = &lun->mode_pages.format_page[ 4075 CTL_PAGE_SAVED]; 4076 scsi_ulto2b(lun->be_lun->blocksize, 4077 format_page->bytes_per_sector); 4078 4079 page_index->page_data = 4080 (uint8_t *)lun->mode_pages.format_page; 4081 break; 4082 } 4083 case SMS_RIGID_DISK_PAGE: { 4084 struct scsi_rigid_disk_page *rigid_disk_page; 4085 uint32_t sectors_per_cylinder; 4086 uint64_t cylinders; 4087 #ifndef __XSCALE__ 4088 int shift; 4089 #endif /* !__XSCALE__ */ 4090 4091 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4092 ("subpage %#x for page %#x is incorrect!", 4093 page_index->subpage, page_code)); 4094 4095 /* 4096 * Rotation rate and sectors per track are set 4097 * above. We calculate the cylinders here based on 4098 * capacity. Due to the number of heads and 4099 * sectors per track we're using, smaller arrays 4100 * may turn out to have 0 cylinders. Linux and 4101 * FreeBSD don't pay attention to these mode pages 4102 * to figure out capacity, but Solaris does. It 4103 * seems to deal with 0 cylinders just fine, and 4104 * works out a fake geometry based on the capacity. 4105 */ 4106 memcpy(&lun->mode_pages.rigid_disk_page[ 4107 CTL_PAGE_DEFAULT], &rigid_disk_page_default, 4108 sizeof(rigid_disk_page_default)); 4109 memcpy(&lun->mode_pages.rigid_disk_page[ 4110 CTL_PAGE_CHANGEABLE],&rigid_disk_page_changeable, 4111 sizeof(rigid_disk_page_changeable)); 4112 4113 sectors_per_cylinder = CTL_DEFAULT_SECTORS_PER_TRACK * 4114 CTL_DEFAULT_HEADS; 4115 4116 /* 4117 * The divide method here will be more accurate, 4118 * probably, but results in floating point being 4119 * used in the kernel on i386 (__udivdi3()). On the 4120 * XScale, though, __udivdi3() is implemented in 4121 * software. 4122 * 4123 * The shift method for cylinder calculation is 4124 * accurate if sectors_per_cylinder is a power of 4125 * 2. Otherwise it might be slightly off -- you 4126 * might have a bit of a truncation problem. 4127 */ 4128 #ifdef __XSCALE__ 4129 cylinders = (lun->be_lun->maxlba + 1) / 4130 sectors_per_cylinder; 4131 #else 4132 for (shift = 31; shift > 0; shift--) { 4133 if (sectors_per_cylinder & (1 << shift)) 4134 break; 4135 } 4136 cylinders = (lun->be_lun->maxlba + 1) >> shift; 4137 #endif 4138 4139 /* 4140 * We've basically got 3 bytes, or 24 bits for the 4141 * cylinder size in the mode page. If we're over, 4142 * just round down to 2^24. 4143 */ 4144 if (cylinders > 0xffffff) 4145 cylinders = 0xffffff; 4146 4147 rigid_disk_page = &lun->mode_pages.rigid_disk_page[ 4148 CTL_PAGE_DEFAULT]; 4149 scsi_ulto3b(cylinders, rigid_disk_page->cylinders); 4150 4151 if ((value = dnvlist_get_string(lun->be_lun->options, 4152 "rpm", NULL)) != NULL) { 4153 scsi_ulto2b(strtol(value, NULL, 0), 4154 rigid_disk_page->rotation_rate); 4155 } 4156 4157 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_CURRENT], 4158 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 4159 sizeof(rigid_disk_page_default)); 4160 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_SAVED], 4161 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 4162 sizeof(rigid_disk_page_default)); 4163 4164 page_index->page_data = 4165 (uint8_t *)lun->mode_pages.rigid_disk_page; 4166 break; 4167 } 4168 case SMS_VERIFY_ERROR_RECOVERY_PAGE: { 4169 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4170 ("subpage %#x for page %#x is incorrect!", 4171 page_index->subpage, page_code)); 4172 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_CURRENT], 4173 &verify_er_page_default, 4174 sizeof(verify_er_page_default)); 4175 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_CHANGEABLE], 4176 &verify_er_page_changeable, 4177 sizeof(verify_er_page_changeable)); 4178 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_DEFAULT], 4179 &verify_er_page_default, 4180 sizeof(verify_er_page_default)); 4181 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_SAVED], 4182 &verify_er_page_default, 4183 sizeof(verify_er_page_default)); 4184 page_index->page_data = 4185 (uint8_t *)lun->mode_pages.verify_er_page; 4186 break; 4187 } 4188 case SMS_CACHING_PAGE: { 4189 struct scsi_caching_page *caching_page; 4190 4191 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4192 ("subpage %#x for page %#x is incorrect!", 4193 page_index->subpage, page_code)); 4194 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT], 4195 &caching_page_default, 4196 sizeof(caching_page_default)); 4197 memcpy(&lun->mode_pages.caching_page[ 4198 CTL_PAGE_CHANGEABLE], &caching_page_changeable, 4199 sizeof(caching_page_changeable)); 4200 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_SAVED], 4201 &caching_page_default, 4202 sizeof(caching_page_default)); 4203 caching_page = &lun->mode_pages.caching_page[ 4204 CTL_PAGE_SAVED]; 4205 value = dnvlist_get_string(lun->be_lun->options, 4206 "writecache", NULL); 4207 if (value != NULL && strcmp(value, "off") == 0) 4208 caching_page->flags1 &= ~SCP_WCE; 4209 value = dnvlist_get_string(lun->be_lun->options, 4210 "readcache", NULL); 4211 if (value != NULL && strcmp(value, "off") == 0) 4212 caching_page->flags1 |= SCP_RCD; 4213 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT], 4214 &lun->mode_pages.caching_page[CTL_PAGE_SAVED], 4215 sizeof(caching_page_default)); 4216 page_index->page_data = 4217 (uint8_t *)lun->mode_pages.caching_page; 4218 break; 4219 } 4220 case SMS_CONTROL_MODE_PAGE: { 4221 switch (page_index->subpage) { 4222 case SMS_SUBPAGE_PAGE_0: { 4223 struct scsi_control_page *control_page; 4224 4225 memcpy(&lun->mode_pages.control_page[ 4226 CTL_PAGE_DEFAULT], 4227 &control_page_default, 4228 sizeof(control_page_default)); 4229 memcpy(&lun->mode_pages.control_page[ 4230 CTL_PAGE_CHANGEABLE], 4231 &control_page_changeable, 4232 sizeof(control_page_changeable)); 4233 memcpy(&lun->mode_pages.control_page[ 4234 CTL_PAGE_SAVED], 4235 &control_page_default, 4236 sizeof(control_page_default)); 4237 control_page = &lun->mode_pages.control_page[ 4238 CTL_PAGE_SAVED]; 4239 value = dnvlist_get_string(lun->be_lun->options, 4240 "reordering", NULL); 4241 if (value != NULL && 4242 strcmp(value, "unrestricted") == 0) { 4243 control_page->queue_flags &= 4244 ~SCP_QUEUE_ALG_MASK; 4245 control_page->queue_flags |= 4246 SCP_QUEUE_ALG_UNRESTRICTED; 4247 } 4248 memcpy(&lun->mode_pages.control_page[ 4249 CTL_PAGE_CURRENT], 4250 &lun->mode_pages.control_page[ 4251 CTL_PAGE_SAVED], 4252 sizeof(control_page_default)); 4253 page_index->page_data = 4254 (uint8_t *)lun->mode_pages.control_page; 4255 break; 4256 } 4257 case 0x01: 4258 memcpy(&lun->mode_pages.control_ext_page[ 4259 CTL_PAGE_DEFAULT], 4260 &control_ext_page_default, 4261 sizeof(control_ext_page_default)); 4262 memcpy(&lun->mode_pages.control_ext_page[ 4263 CTL_PAGE_CHANGEABLE], 4264 &control_ext_page_changeable, 4265 sizeof(control_ext_page_changeable)); 4266 memcpy(&lun->mode_pages.control_ext_page[ 4267 CTL_PAGE_SAVED], 4268 &control_ext_page_default, 4269 sizeof(control_ext_page_default)); 4270 memcpy(&lun->mode_pages.control_ext_page[ 4271 CTL_PAGE_CURRENT], 4272 &lun->mode_pages.control_ext_page[ 4273 CTL_PAGE_SAVED], 4274 sizeof(control_ext_page_default)); 4275 page_index->page_data = 4276 (uint8_t *)lun->mode_pages.control_ext_page; 4277 break; 4278 default: 4279 panic("subpage %#x for page %#x is incorrect!", 4280 page_index->subpage, page_code); 4281 } 4282 break; 4283 } 4284 case SMS_INFO_EXCEPTIONS_PAGE: { 4285 switch (page_index->subpage) { 4286 case SMS_SUBPAGE_PAGE_0: 4287 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_CURRENT], 4288 &ie_page_default, 4289 sizeof(ie_page_default)); 4290 memcpy(&lun->mode_pages.ie_page[ 4291 CTL_PAGE_CHANGEABLE], &ie_page_changeable, 4292 sizeof(ie_page_changeable)); 4293 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_DEFAULT], 4294 &ie_page_default, 4295 sizeof(ie_page_default)); 4296 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_SAVED], 4297 &ie_page_default, 4298 sizeof(ie_page_default)); 4299 page_index->page_data = 4300 (uint8_t *)lun->mode_pages.ie_page; 4301 break; 4302 case 0x02: { 4303 struct ctl_logical_block_provisioning_page *page; 4304 4305 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_DEFAULT], 4306 &lbp_page_default, 4307 sizeof(lbp_page_default)); 4308 memcpy(&lun->mode_pages.lbp_page[ 4309 CTL_PAGE_CHANGEABLE], &lbp_page_changeable, 4310 sizeof(lbp_page_changeable)); 4311 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 4312 &lbp_page_default, 4313 sizeof(lbp_page_default)); 4314 page = &lun->mode_pages.lbp_page[CTL_PAGE_SAVED]; 4315 value = dnvlist_get_string(lun->be_lun->options, 4316 "avail-threshold", NULL); 4317 if (value != NULL && 4318 ctl_expand_number(value, &ival) == 0) { 4319 page->descr[0].flags |= SLBPPD_ENABLED | 4320 SLBPPD_ARMING_DEC; 4321 if (lun->be_lun->blocksize) 4322 ival /= lun->be_lun->blocksize; 4323 else 4324 ival /= 512; 4325 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4326 page->descr[0].count); 4327 } 4328 value = dnvlist_get_string(lun->be_lun->options, 4329 "used-threshold", NULL); 4330 if (value != NULL && 4331 ctl_expand_number(value, &ival) == 0) { 4332 page->descr[1].flags |= SLBPPD_ENABLED | 4333 SLBPPD_ARMING_INC; 4334 if (lun->be_lun->blocksize) 4335 ival /= lun->be_lun->blocksize; 4336 else 4337 ival /= 512; 4338 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4339 page->descr[1].count); 4340 } 4341 value = dnvlist_get_string(lun->be_lun->options, 4342 "pool-avail-threshold", NULL); 4343 if (value != NULL && 4344 ctl_expand_number(value, &ival) == 0) { 4345 page->descr[2].flags |= SLBPPD_ENABLED | 4346 SLBPPD_ARMING_DEC; 4347 if (lun->be_lun->blocksize) 4348 ival /= lun->be_lun->blocksize; 4349 else 4350 ival /= 512; 4351 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4352 page->descr[2].count); 4353 } 4354 value = dnvlist_get_string(lun->be_lun->options, 4355 "pool-used-threshold", NULL); 4356 if (value != NULL && 4357 ctl_expand_number(value, &ival) == 0) { 4358 page->descr[3].flags |= SLBPPD_ENABLED | 4359 SLBPPD_ARMING_INC; 4360 if (lun->be_lun->blocksize) 4361 ival /= lun->be_lun->blocksize; 4362 else 4363 ival /= 512; 4364 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4365 page->descr[3].count); 4366 } 4367 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_CURRENT], 4368 &lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 4369 sizeof(lbp_page_default)); 4370 page_index->page_data = 4371 (uint8_t *)lun->mode_pages.lbp_page; 4372 break; 4373 } 4374 default: 4375 panic("subpage %#x for page %#x is incorrect!", 4376 page_index->subpage, page_code); 4377 } 4378 break; 4379 } 4380 case SMS_CDDVD_CAPS_PAGE:{ 4381 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4382 ("subpage %#x for page %#x is incorrect!", 4383 page_index->subpage, page_code)); 4384 memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_DEFAULT], 4385 &cddvd_page_default, 4386 sizeof(cddvd_page_default)); 4387 memcpy(&lun->mode_pages.cddvd_page[ 4388 CTL_PAGE_CHANGEABLE], &cddvd_page_changeable, 4389 sizeof(cddvd_page_changeable)); 4390 memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_SAVED], 4391 &cddvd_page_default, 4392 sizeof(cddvd_page_default)); 4393 memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_CURRENT], 4394 &lun->mode_pages.cddvd_page[CTL_PAGE_SAVED], 4395 sizeof(cddvd_page_default)); 4396 page_index->page_data = 4397 (uint8_t *)lun->mode_pages.cddvd_page; 4398 break; 4399 } 4400 default: 4401 panic("invalid page code value %#x", page_code); 4402 } 4403 } 4404 4405 return (CTL_RETVAL_COMPLETE); 4406 } 4407 4408 static int 4409 ctl_init_log_page_index(struct ctl_lun *lun) 4410 { 4411 struct ctl_page_index *page_index; 4412 int i, j, k, prev; 4413 4414 memcpy(&lun->log_pages.index, log_page_index_template, 4415 sizeof(log_page_index_template)); 4416 4417 prev = -1; 4418 for (i = 0, j = 0, k = 0; i < CTL_NUM_LOG_PAGES; i++) { 4419 page_index = &lun->log_pages.index[i]; 4420 if (lun->be_lun->lun_type == T_DIRECT && 4421 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 4422 continue; 4423 if (lun->be_lun->lun_type == T_PROCESSOR && 4424 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 4425 continue; 4426 if (lun->be_lun->lun_type == T_CDROM && 4427 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 4428 continue; 4429 4430 if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING && 4431 lun->backend->lun_attr == NULL) 4432 continue; 4433 4434 if (page_index->page_code != prev) { 4435 lun->log_pages.pages_page[j] = page_index->page_code; 4436 prev = page_index->page_code; 4437 j++; 4438 } 4439 lun->log_pages.subpages_page[k*2] = page_index->page_code; 4440 lun->log_pages.subpages_page[k*2+1] = page_index->subpage; 4441 k++; 4442 } 4443 lun->log_pages.index[0].page_data = &lun->log_pages.pages_page[0]; 4444 lun->log_pages.index[0].page_len = j; 4445 lun->log_pages.index[1].page_data = &lun->log_pages.subpages_page[0]; 4446 lun->log_pages.index[1].page_len = k * 2; 4447 lun->log_pages.index[2].page_data = (uint8_t *)&lun->log_pages.temp_page; 4448 lun->log_pages.index[2].page_len = sizeof(lun->log_pages.temp_page); 4449 lun->log_pages.index[3].page_data = &lun->log_pages.lbp_page[0]; 4450 lun->log_pages.index[3].page_len = 12*CTL_NUM_LBP_PARAMS; 4451 lun->log_pages.index[4].page_data = (uint8_t *)&lun->log_pages.stat_page; 4452 lun->log_pages.index[4].page_len = sizeof(lun->log_pages.stat_page); 4453 lun->log_pages.index[5].page_data = (uint8_t *)&lun->log_pages.ie_page; 4454 lun->log_pages.index[5].page_len = sizeof(lun->log_pages.ie_page); 4455 4456 return (CTL_RETVAL_COMPLETE); 4457 } 4458 4459 static int 4460 hex2bin(const char *str, uint8_t *buf, int buf_size) 4461 { 4462 int i; 4463 u_char c; 4464 4465 memset(buf, 0, buf_size); 4466 while (isspace(str[0])) 4467 str++; 4468 if (str[0] == '0' && (str[1] == 'x' || str[1] == 'X')) 4469 str += 2; 4470 buf_size *= 2; 4471 for (i = 0; str[i] != 0 && i < buf_size; i++) { 4472 while (str[i] == '-') /* Skip dashes in UUIDs. */ 4473 str++; 4474 c = str[i]; 4475 if (isdigit(c)) 4476 c -= '0'; 4477 else if (isalpha(c)) 4478 c -= isupper(c) ? 'A' - 10 : 'a' - 10; 4479 else 4480 break; 4481 if (c >= 16) 4482 break; 4483 if ((i & 1) == 0) 4484 buf[i / 2] |= (c << 4); 4485 else 4486 buf[i / 2] |= c; 4487 } 4488 return ((i + 1) / 2); 4489 } 4490 4491 /* 4492 * Add LUN. 4493 * 4494 * Returns 0 for success, non-zero (errno) for failure. 4495 */ 4496 int 4497 ctl_add_lun(struct ctl_be_lun *be_lun) 4498 { 4499 struct ctl_softc *ctl_softc = control_softc; 4500 struct ctl_lun *nlun, *lun; 4501 struct scsi_vpd_id_descriptor *desc; 4502 struct scsi_vpd_id_t10 *t10id; 4503 const char *eui, *naa, *scsiname, *uuid, *vendor, *value; 4504 int lun_number; 4505 int devidlen, idlen1, idlen2 = 0, len; 4506 4507 /* 4508 * We support only Direct Access, CD-ROM or Processor LUN types. 4509 */ 4510 switch (be_lun->lun_type) { 4511 case T_DIRECT: 4512 case T_PROCESSOR: 4513 case T_CDROM: 4514 break; 4515 case T_SEQUENTIAL: 4516 case T_CHANGER: 4517 default: 4518 return (EINVAL); 4519 } 4520 lun = malloc(sizeof(*lun), M_CTL, M_WAITOK | M_ZERO); 4521 4522 lun->pending_sense = malloc(sizeof(struct scsi_sense_data *) * 4523 ctl_max_ports, M_DEVBUF, M_WAITOK | M_ZERO); 4524 lun->pending_ua = malloc(sizeof(ctl_ua_type *) * ctl_max_ports, 4525 M_DEVBUF, M_WAITOK | M_ZERO); 4526 lun->pr_keys = malloc(sizeof(uint64_t *) * ctl_max_ports, 4527 M_DEVBUF, M_WAITOK | M_ZERO); 4528 4529 /* Generate LUN ID. */ 4530 devidlen = max(CTL_DEVID_MIN_LEN, 4531 strnlen(be_lun->device_id, CTL_DEVID_LEN)); 4532 idlen1 = sizeof(*t10id) + devidlen; 4533 len = sizeof(struct scsi_vpd_id_descriptor) + idlen1; 4534 scsiname = dnvlist_get_string(be_lun->options, "scsiname", NULL); 4535 if (scsiname != NULL) { 4536 idlen2 = roundup2(strlen(scsiname) + 1, 4); 4537 len += sizeof(struct scsi_vpd_id_descriptor) + idlen2; 4538 } 4539 eui = dnvlist_get_string(be_lun->options, "eui", NULL); 4540 if (eui != NULL) { 4541 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4542 } 4543 naa = dnvlist_get_string(be_lun->options, "naa", NULL); 4544 if (naa != NULL) { 4545 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4546 } 4547 uuid = dnvlist_get_string(be_lun->options, "uuid", NULL); 4548 if (uuid != NULL) { 4549 len += sizeof(struct scsi_vpd_id_descriptor) + 18; 4550 } 4551 lun->lun_devid = malloc(sizeof(struct ctl_devid) + len, 4552 M_CTL, M_WAITOK | M_ZERO); 4553 desc = (struct scsi_vpd_id_descriptor *)lun->lun_devid->data; 4554 desc->proto_codeset = SVPD_ID_CODESET_ASCII; 4555 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10; 4556 desc->length = idlen1; 4557 t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0]; 4558 memset(t10id->vendor, ' ', sizeof(t10id->vendor)); 4559 if ((vendor = dnvlist_get_string(be_lun->options, "vendor", NULL)) == NULL) { 4560 strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor)); 4561 } else { 4562 strncpy(t10id->vendor, vendor, 4563 min(sizeof(t10id->vendor), strlen(vendor))); 4564 } 4565 strncpy((char *)t10id->vendor_spec_id, 4566 (char *)be_lun->device_id, devidlen); 4567 if (scsiname != NULL) { 4568 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4569 desc->length); 4570 desc->proto_codeset = SVPD_ID_CODESET_UTF8; 4571 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4572 SVPD_ID_TYPE_SCSI_NAME; 4573 desc->length = idlen2; 4574 strlcpy(desc->identifier, scsiname, idlen2); 4575 } 4576 if (eui != NULL) { 4577 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4578 desc->length); 4579 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4580 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4581 SVPD_ID_TYPE_EUI64; 4582 desc->length = hex2bin(eui, desc->identifier, 16); 4583 desc->length = desc->length > 12 ? 16 : 4584 (desc->length > 8 ? 12 : 8); 4585 len -= 16 - desc->length; 4586 } 4587 if (naa != NULL) { 4588 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4589 desc->length); 4590 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4591 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4592 SVPD_ID_TYPE_NAA; 4593 desc->length = hex2bin(naa, desc->identifier, 16); 4594 desc->length = desc->length > 8 ? 16 : 8; 4595 len -= 16 - desc->length; 4596 } 4597 if (uuid != NULL) { 4598 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4599 desc->length); 4600 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4601 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4602 SVPD_ID_TYPE_UUID; 4603 desc->identifier[0] = 0x10; 4604 hex2bin(uuid, &desc->identifier[2], 16); 4605 desc->length = 18; 4606 } 4607 lun->lun_devid->len = len; 4608 4609 mtx_lock(&ctl_softc->ctl_lock); 4610 /* 4611 * See if the caller requested a particular LUN number. If so, see 4612 * if it is available. Otherwise, allocate the first available LUN. 4613 */ 4614 if (be_lun->flags & CTL_LUN_FLAG_ID_REQ) { 4615 if ((be_lun->req_lun_id > (ctl_max_luns - 1)) 4616 || (ctl_is_set(ctl_softc->ctl_lun_mask, be_lun->req_lun_id))) { 4617 mtx_unlock(&ctl_softc->ctl_lock); 4618 if (be_lun->req_lun_id > (ctl_max_luns - 1)) { 4619 printf("ctl: requested LUN ID %d is higher " 4620 "than ctl_max_luns - 1 (%d)\n", 4621 be_lun->req_lun_id, ctl_max_luns - 1); 4622 } else { 4623 /* 4624 * XXX KDM return an error, or just assign 4625 * another LUN ID in this case?? 4626 */ 4627 printf("ctl: requested LUN ID %d is already " 4628 "in use\n", be_lun->req_lun_id); 4629 } 4630 fail: 4631 free(lun->lun_devid, M_CTL); 4632 free(lun, M_CTL); 4633 return (ENOSPC); 4634 } 4635 lun_number = be_lun->req_lun_id; 4636 } else { 4637 lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, 0, ctl_max_luns); 4638 if (lun_number == -1) { 4639 mtx_unlock(&ctl_softc->ctl_lock); 4640 printf("ctl: can't allocate LUN, out of LUNs\n"); 4641 goto fail; 4642 } 4643 } 4644 ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number); 4645 mtx_unlock(&ctl_softc->ctl_lock); 4646 4647 mtx_init(&lun->lun_lock, "CTL LUN", NULL, MTX_DEF); 4648 lun->lun = lun_number; 4649 lun->be_lun = be_lun; 4650 /* 4651 * The processor LUN is always enabled. Disk LUNs come on line 4652 * disabled, and must be enabled by the backend. 4653 */ 4654 lun->flags |= CTL_LUN_DISABLED; 4655 lun->backend = be_lun->be; 4656 be_lun->ctl_lun = lun; 4657 be_lun->lun_id = lun_number; 4658 if (be_lun->flags & CTL_LUN_FLAG_EJECTED) 4659 lun->flags |= CTL_LUN_EJECTED; 4660 if (be_lun->flags & CTL_LUN_FLAG_NO_MEDIA) 4661 lun->flags |= CTL_LUN_NO_MEDIA; 4662 if (be_lun->flags & CTL_LUN_FLAG_STOPPED) 4663 lun->flags |= CTL_LUN_STOPPED; 4664 4665 if (be_lun->flags & CTL_LUN_FLAG_PRIMARY) 4666 lun->flags |= CTL_LUN_PRIMARY_SC; 4667 4668 value = dnvlist_get_string(be_lun->options, "removable", NULL); 4669 if (value != NULL) { 4670 if (strcmp(value, "on") == 0) 4671 lun->flags |= CTL_LUN_REMOVABLE; 4672 } else if (be_lun->lun_type == T_CDROM) 4673 lun->flags |= CTL_LUN_REMOVABLE; 4674 4675 lun->ctl_softc = ctl_softc; 4676 #ifdef CTL_TIME_IO 4677 lun->last_busy = getsbinuptime(); 4678 #endif 4679 LIST_INIT(&lun->ooa_queue); 4680 STAILQ_INIT(&lun->error_list); 4681 lun->ie_reported = 1; 4682 callout_init_mtx(&lun->ie_callout, &lun->lun_lock, 0); 4683 ctl_tpc_lun_init(lun); 4684 if (lun->flags & CTL_LUN_REMOVABLE) { 4685 lun->prevent = malloc((CTL_MAX_INITIATORS + 31) / 32 * 4, 4686 M_CTL, M_WAITOK); 4687 } 4688 4689 /* 4690 * Initialize the mode and log page index. 4691 */ 4692 ctl_init_page_index(lun); 4693 ctl_init_log_page_index(lun); 4694 4695 /* Setup statistics gathering */ 4696 lun->stats.item = lun_number; 4697 4698 /* 4699 * Now, before we insert this lun on the lun list, set the lun 4700 * inventory changed UA for all other luns. 4701 */ 4702 mtx_lock(&ctl_softc->ctl_lock); 4703 STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) { 4704 mtx_lock(&nlun->lun_lock); 4705 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4706 mtx_unlock(&nlun->lun_lock); 4707 } 4708 STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links); 4709 ctl_softc->ctl_luns[lun_number] = lun; 4710 ctl_softc->num_luns++; 4711 mtx_unlock(&ctl_softc->ctl_lock); 4712 4713 /* 4714 * We successfully added the LUN, attempt to enable it. 4715 */ 4716 if (ctl_enable_lun(lun) != 0) { 4717 printf("%s: ctl_enable_lun() failed!\n", __func__); 4718 mtx_lock(&ctl_softc->ctl_lock); 4719 STAILQ_REMOVE(&ctl_softc->lun_list, lun, ctl_lun, links); 4720 ctl_clear_mask(ctl_softc->ctl_lun_mask, lun_number); 4721 ctl_softc->ctl_luns[lun_number] = NULL; 4722 ctl_softc->num_luns--; 4723 mtx_unlock(&ctl_softc->ctl_lock); 4724 free(lun->lun_devid, M_CTL); 4725 free(lun, M_CTL); 4726 return (EIO); 4727 } 4728 4729 return (0); 4730 } 4731 4732 /* 4733 * Free LUN that has no active requests. 4734 */ 4735 static int 4736 ctl_free_lun(struct ctl_lun *lun) 4737 { 4738 struct ctl_softc *softc = lun->ctl_softc; 4739 struct ctl_lun *nlun; 4740 int i; 4741 4742 KASSERT(LIST_EMPTY(&lun->ooa_queue), 4743 ("Freeing a LUN %p with outstanding I/O!\n", lun)); 4744 4745 mtx_lock(&softc->ctl_lock); 4746 STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links); 4747 ctl_clear_mask(softc->ctl_lun_mask, lun->lun); 4748 softc->ctl_luns[lun->lun] = NULL; 4749 softc->num_luns--; 4750 STAILQ_FOREACH(nlun, &softc->lun_list, links) { 4751 mtx_lock(&nlun->lun_lock); 4752 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4753 mtx_unlock(&nlun->lun_lock); 4754 } 4755 mtx_unlock(&softc->ctl_lock); 4756 4757 /* 4758 * Tell the backend to free resources, if this LUN has a backend. 4759 */ 4760 lun->be_lun->lun_shutdown(lun->be_lun); 4761 4762 lun->ie_reportcnt = UINT32_MAX; 4763 callout_drain(&lun->ie_callout); 4764 ctl_tpc_lun_shutdown(lun); 4765 mtx_destroy(&lun->lun_lock); 4766 free(lun->lun_devid, M_CTL); 4767 for (i = 0; i < ctl_max_ports; i++) 4768 free(lun->pending_ua[i], M_CTL); 4769 free(lun->pending_ua, M_DEVBUF); 4770 for (i = 0; i < ctl_max_ports; i++) 4771 free(lun->pr_keys[i], M_CTL); 4772 free(lun->pr_keys, M_DEVBUF); 4773 free(lun->write_buffer, M_CTL); 4774 free(lun->prevent, M_CTL); 4775 free(lun, M_CTL); 4776 4777 return (0); 4778 } 4779 4780 static int 4781 ctl_enable_lun(struct ctl_lun *lun) 4782 { 4783 struct ctl_softc *softc; 4784 struct ctl_port *port, *nport; 4785 int retval; 4786 4787 softc = lun->ctl_softc; 4788 4789 mtx_lock(&softc->ctl_lock); 4790 mtx_lock(&lun->lun_lock); 4791 KASSERT((lun->flags & CTL_LUN_DISABLED) != 0, 4792 ("%s: LUN not disabled", __func__)); 4793 lun->flags &= ~CTL_LUN_DISABLED; 4794 mtx_unlock(&lun->lun_lock); 4795 4796 STAILQ_FOREACH_SAFE(port, &softc->port_list, links, nport) { 4797 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || 4798 port->lun_map != NULL || port->lun_enable == NULL) 4799 continue; 4800 4801 /* 4802 * Drop the lock while we call the FETD's enable routine. 4803 * This can lead to a callback into CTL (at least in the 4804 * case of the internal initiator frontend. 4805 */ 4806 mtx_unlock(&softc->ctl_lock); 4807 retval = port->lun_enable(port->targ_lun_arg, lun->lun); 4808 mtx_lock(&softc->ctl_lock); 4809 if (retval != 0) { 4810 printf("%s: FETD %s port %d returned error " 4811 "%d for lun_enable on lun %jd\n", 4812 __func__, port->port_name, port->targ_port, 4813 retval, (intmax_t)lun->lun); 4814 } 4815 } 4816 4817 mtx_unlock(&softc->ctl_lock); 4818 ctl_isc_announce_lun(lun); 4819 4820 return (0); 4821 } 4822 4823 static int 4824 ctl_disable_lun(struct ctl_lun *lun) 4825 { 4826 struct ctl_softc *softc; 4827 struct ctl_port *port; 4828 int retval; 4829 4830 softc = lun->ctl_softc; 4831 4832 mtx_lock(&softc->ctl_lock); 4833 mtx_lock(&lun->lun_lock); 4834 KASSERT((lun->flags & CTL_LUN_DISABLED) == 0, 4835 ("%s: LUN not enabled", __func__)); 4836 lun->flags |= CTL_LUN_DISABLED; 4837 mtx_unlock(&lun->lun_lock); 4838 4839 STAILQ_FOREACH(port, &softc->port_list, links) { 4840 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || 4841 port->lun_map != NULL || port->lun_disable == NULL) 4842 continue; 4843 4844 /* 4845 * Drop the lock before we call the frontend's disable 4846 * routine, to avoid lock order reversals. 4847 * 4848 * XXX KDM what happens if the frontend list changes while 4849 * we're traversing it? It's unlikely, but should be handled. 4850 */ 4851 mtx_unlock(&softc->ctl_lock); 4852 retval = port->lun_disable(port->targ_lun_arg, lun->lun); 4853 mtx_lock(&softc->ctl_lock); 4854 if (retval != 0) { 4855 printf("%s: FETD %s port %d returned error " 4856 "%d for lun_disable on lun %jd\n", 4857 __func__, port->port_name, port->targ_port, 4858 retval, (intmax_t)lun->lun); 4859 } 4860 } 4861 4862 mtx_unlock(&softc->ctl_lock); 4863 ctl_isc_announce_lun(lun); 4864 4865 return (0); 4866 } 4867 4868 int 4869 ctl_start_lun(struct ctl_be_lun *be_lun) 4870 { 4871 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4872 4873 mtx_lock(&lun->lun_lock); 4874 lun->flags &= ~CTL_LUN_STOPPED; 4875 mtx_unlock(&lun->lun_lock); 4876 return (0); 4877 } 4878 4879 int 4880 ctl_stop_lun(struct ctl_be_lun *be_lun) 4881 { 4882 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4883 4884 mtx_lock(&lun->lun_lock); 4885 lun->flags |= CTL_LUN_STOPPED; 4886 mtx_unlock(&lun->lun_lock); 4887 return (0); 4888 } 4889 4890 int 4891 ctl_lun_no_media(struct ctl_be_lun *be_lun) 4892 { 4893 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4894 4895 mtx_lock(&lun->lun_lock); 4896 lun->flags |= CTL_LUN_NO_MEDIA; 4897 mtx_unlock(&lun->lun_lock); 4898 return (0); 4899 } 4900 4901 int 4902 ctl_lun_has_media(struct ctl_be_lun *be_lun) 4903 { 4904 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4905 union ctl_ha_msg msg; 4906 4907 mtx_lock(&lun->lun_lock); 4908 lun->flags &= ~(CTL_LUN_NO_MEDIA | CTL_LUN_EJECTED); 4909 if (lun->flags & CTL_LUN_REMOVABLE) 4910 ctl_est_ua_all(lun, -1, CTL_UA_MEDIUM_CHANGE); 4911 mtx_unlock(&lun->lun_lock); 4912 if ((lun->flags & CTL_LUN_REMOVABLE) && 4913 lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 4914 bzero(&msg.ua, sizeof(msg.ua)); 4915 msg.hdr.msg_type = CTL_MSG_UA; 4916 msg.hdr.nexus.initid = -1; 4917 msg.hdr.nexus.targ_port = -1; 4918 msg.hdr.nexus.targ_lun = lun->lun; 4919 msg.hdr.nexus.targ_mapped_lun = lun->lun; 4920 msg.ua.ua_all = 1; 4921 msg.ua.ua_set = 1; 4922 msg.ua.ua_type = CTL_UA_MEDIUM_CHANGE; 4923 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), 4924 M_WAITOK); 4925 } 4926 return (0); 4927 } 4928 4929 int 4930 ctl_lun_ejected(struct ctl_be_lun *be_lun) 4931 { 4932 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4933 4934 mtx_lock(&lun->lun_lock); 4935 lun->flags |= CTL_LUN_EJECTED; 4936 mtx_unlock(&lun->lun_lock); 4937 return (0); 4938 } 4939 4940 int 4941 ctl_lun_primary(struct ctl_be_lun *be_lun) 4942 { 4943 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4944 4945 mtx_lock(&lun->lun_lock); 4946 lun->flags |= CTL_LUN_PRIMARY_SC; 4947 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 4948 mtx_unlock(&lun->lun_lock); 4949 ctl_isc_announce_lun(lun); 4950 return (0); 4951 } 4952 4953 int 4954 ctl_lun_secondary(struct ctl_be_lun *be_lun) 4955 { 4956 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4957 4958 mtx_lock(&lun->lun_lock); 4959 lun->flags &= ~CTL_LUN_PRIMARY_SC; 4960 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 4961 mtx_unlock(&lun->lun_lock); 4962 ctl_isc_announce_lun(lun); 4963 return (0); 4964 } 4965 4966 /* 4967 * Remove LUN. If there are active requests, wait for completion. 4968 * 4969 * Returns 0 for success, non-zero (errno) for failure. 4970 * Completion is reported to backed via the lun_shutdown() method. 4971 */ 4972 int 4973 ctl_remove_lun(struct ctl_be_lun *be_lun) 4974 { 4975 struct ctl_lun *lun; 4976 4977 lun = (struct ctl_lun *)be_lun->ctl_lun; 4978 4979 ctl_disable_lun(lun); 4980 4981 mtx_lock(&lun->lun_lock); 4982 lun->flags |= CTL_LUN_INVALID; 4983 4984 /* 4985 * If there is nothing in the OOA queue, go ahead and free the LUN. 4986 * If we have something in the OOA queue, we'll free it when the 4987 * last I/O completes. 4988 */ 4989 if (LIST_EMPTY(&lun->ooa_queue)) { 4990 mtx_unlock(&lun->lun_lock); 4991 ctl_free_lun(lun); 4992 } else 4993 mtx_unlock(&lun->lun_lock); 4994 4995 return (0); 4996 } 4997 4998 void 4999 ctl_lun_capacity_changed(struct ctl_be_lun *be_lun) 5000 { 5001 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 5002 union ctl_ha_msg msg; 5003 5004 mtx_lock(&lun->lun_lock); 5005 ctl_est_ua_all(lun, -1, CTL_UA_CAPACITY_CHANGE); 5006 mtx_unlock(&lun->lun_lock); 5007 if (lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 5008 /* Send msg to other side. */ 5009 bzero(&msg.ua, sizeof(msg.ua)); 5010 msg.hdr.msg_type = CTL_MSG_UA; 5011 msg.hdr.nexus.initid = -1; 5012 msg.hdr.nexus.targ_port = -1; 5013 msg.hdr.nexus.targ_lun = lun->lun; 5014 msg.hdr.nexus.targ_mapped_lun = lun->lun; 5015 msg.ua.ua_all = 1; 5016 msg.ua.ua_set = 1; 5017 msg.ua.ua_type = CTL_UA_CAPACITY_CHANGE; 5018 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), 5019 M_WAITOK); 5020 } 5021 } 5022 5023 /* 5024 * Backend "memory move is complete" callback for requests that never 5025 * make it down to say RAIDCore's configuration code. 5026 */ 5027 int 5028 ctl_config_move_done(union ctl_io *io, bool samethr) 5029 { 5030 int retval; 5031 5032 CTL_DEBUG_PRINT(("ctl_config_move_done\n")); 5033 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, 5034 ("%s: unexpected I/O type %x", __func__, io->io_hdr.io_type)); 5035 5036 if (ctl_debug & CTL_DEBUG_CDB_DATA) 5037 ctl_data_print(io); 5038 if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) || 5039 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 5040 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) || 5041 ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) { 5042 /* 5043 * XXX KDM just assuming a single pointer here, and not a 5044 * S/G list. If we start using S/G lists for config data, 5045 * we'll need to know how to clean them up here as well. 5046 */ 5047 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5048 free(io->scsiio.kern_data_ptr, M_CTL); 5049 ctl_done(io); 5050 retval = CTL_RETVAL_COMPLETE; 5051 } else { 5052 /* 5053 * XXX KDM now we need to continue data movement. Some 5054 * options: 5055 * - call ctl_scsiio() again? We don't do this for data 5056 * writes, because for those at least we know ahead of 5057 * time where the write will go and how long it is. For 5058 * config writes, though, that information is largely 5059 * contained within the write itself, thus we need to 5060 * parse out the data again. 5061 * 5062 * - Call some other function once the data is in? 5063 */ 5064 5065 /* 5066 * XXX KDM call ctl_scsiio() again for now, and check flag 5067 * bits to see whether we're allocated or not. 5068 */ 5069 retval = ctl_scsiio(&io->scsiio); 5070 } 5071 return (retval); 5072 } 5073 5074 /* 5075 * This gets called by a backend driver when it is done with a 5076 * data_submit method. 5077 */ 5078 void 5079 ctl_data_submit_done(union ctl_io *io) 5080 { 5081 /* 5082 * If the IO_CONT flag is set, we need to call the supplied 5083 * function to continue processing the I/O, instead of completing 5084 * the I/O just yet. 5085 * 5086 * If there is an error, though, we don't want to keep processing. 5087 * Instead, just send status back to the initiator. 5088 */ 5089 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 5090 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 5091 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5092 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5093 io->scsiio.io_cont(io); 5094 return; 5095 } 5096 ctl_done(io); 5097 } 5098 5099 /* 5100 * This gets called by a backend driver when it is done with a 5101 * configuration write. 5102 */ 5103 void 5104 ctl_config_write_done(union ctl_io *io) 5105 { 5106 uint8_t *buf; 5107 5108 /* 5109 * If the IO_CONT flag is set, we need to call the supplied 5110 * function to continue processing the I/O, instead of completing 5111 * the I/O just yet. 5112 * 5113 * If there is an error, though, we don't want to keep processing. 5114 * Instead, just send status back to the initiator. 5115 */ 5116 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 5117 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 5118 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5119 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5120 io->scsiio.io_cont(io); 5121 return; 5122 } 5123 /* 5124 * Since a configuration write can be done for commands that actually 5125 * have data allocated, like write buffer, and commands that have 5126 * no data, like start/stop unit, we need to check here. 5127 */ 5128 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5129 buf = io->scsiio.kern_data_ptr; 5130 else 5131 buf = NULL; 5132 ctl_done(io); 5133 if (buf) 5134 free(buf, M_CTL); 5135 } 5136 5137 void 5138 ctl_config_read_done(union ctl_io *io) 5139 { 5140 uint8_t *buf; 5141 5142 /* 5143 * If there is some error -- we are done, skip data transfer. 5144 */ 5145 if ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0 || 5146 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 5147 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { 5148 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5149 buf = io->scsiio.kern_data_ptr; 5150 else 5151 buf = NULL; 5152 ctl_done(io); 5153 if (buf) 5154 free(buf, M_CTL); 5155 return; 5156 } 5157 5158 /* 5159 * If the IO_CONT flag is set, we need to call the supplied 5160 * function to continue processing the I/O, instead of completing 5161 * the I/O just yet. 5162 */ 5163 if (io->io_hdr.flags & CTL_FLAG_IO_CONT) { 5164 io->scsiio.io_cont(io); 5165 return; 5166 } 5167 5168 ctl_datamove(io); 5169 } 5170 5171 /* 5172 * SCSI release command. 5173 */ 5174 int 5175 ctl_scsi_release(struct ctl_scsiio *ctsio) 5176 { 5177 struct ctl_lun *lun = CTL_LUN(ctsio); 5178 uint32_t residx; 5179 5180 CTL_DEBUG_PRINT(("ctl_scsi_release\n")); 5181 5182 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5183 5184 /* 5185 * XXX KDM right now, we only support LUN reservation. We don't 5186 * support 3rd party reservations, or extent reservations, which 5187 * might actually need the parameter list. If we've gotten this 5188 * far, we've got a LUN reservation. Anything else got kicked out 5189 * above. So, according to SPC, ignore the length. 5190 */ 5191 5192 mtx_lock(&lun->lun_lock); 5193 5194 /* 5195 * According to SPC, it is not an error for an intiator to attempt 5196 * to release a reservation on a LUN that isn't reserved, or that 5197 * is reserved by another initiator. The reservation can only be 5198 * released, though, by the initiator who made it or by one of 5199 * several reset type events. 5200 */ 5201 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx)) 5202 lun->flags &= ~CTL_LUN_RESERVED; 5203 5204 mtx_unlock(&lun->lun_lock); 5205 5206 ctl_set_success(ctsio); 5207 ctl_done((union ctl_io *)ctsio); 5208 return (CTL_RETVAL_COMPLETE); 5209 } 5210 5211 int 5212 ctl_scsi_reserve(struct ctl_scsiio *ctsio) 5213 { 5214 struct ctl_lun *lun = CTL_LUN(ctsio); 5215 uint32_t residx; 5216 5217 CTL_DEBUG_PRINT(("ctl_reserve\n")); 5218 5219 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5220 5221 /* 5222 * XXX KDM right now, we only support LUN reservation. We don't 5223 * support 3rd party reservations, or extent reservations, which 5224 * might actually need the parameter list. If we've gotten this 5225 * far, we've got a LUN reservation. Anything else got kicked out 5226 * above. So, according to SPC, ignore the length. 5227 */ 5228 5229 mtx_lock(&lun->lun_lock); 5230 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx != residx)) { 5231 ctl_set_reservation_conflict(ctsio); 5232 goto bailout; 5233 } 5234 5235 /* SPC-3 exceptions to SPC-2 RESERVE and RELEASE behavior. */ 5236 if (lun->flags & CTL_LUN_PR_RESERVED) { 5237 ctl_set_success(ctsio); 5238 goto bailout; 5239 } 5240 5241 lun->flags |= CTL_LUN_RESERVED; 5242 lun->res_idx = residx; 5243 ctl_set_success(ctsio); 5244 5245 bailout: 5246 mtx_unlock(&lun->lun_lock); 5247 ctl_done((union ctl_io *)ctsio); 5248 return (CTL_RETVAL_COMPLETE); 5249 } 5250 5251 int 5252 ctl_start_stop(struct ctl_scsiio *ctsio) 5253 { 5254 struct ctl_lun *lun = CTL_LUN(ctsio); 5255 struct scsi_start_stop_unit *cdb; 5256 int retval; 5257 5258 CTL_DEBUG_PRINT(("ctl_start_stop\n")); 5259 5260 cdb = (struct scsi_start_stop_unit *)ctsio->cdb; 5261 5262 if ((cdb->how & SSS_PC_MASK) == 0) { 5263 if ((lun->flags & CTL_LUN_PR_RESERVED) && 5264 (cdb->how & SSS_START) == 0) { 5265 uint32_t residx; 5266 5267 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5268 if (ctl_get_prkey(lun, residx) == 0 || 5269 (lun->pr_res_idx != residx && lun->pr_res_type < 4)) { 5270 ctl_set_reservation_conflict(ctsio); 5271 ctl_done((union ctl_io *)ctsio); 5272 return (CTL_RETVAL_COMPLETE); 5273 } 5274 } 5275 5276 if ((cdb->how & SSS_LOEJ) && 5277 (lun->flags & CTL_LUN_REMOVABLE) == 0) { 5278 ctl_set_invalid_field(ctsio, 5279 /*sks_valid*/ 1, 5280 /*command*/ 1, 5281 /*field*/ 4, 5282 /*bit_valid*/ 1, 5283 /*bit*/ 1); 5284 ctl_done((union ctl_io *)ctsio); 5285 return (CTL_RETVAL_COMPLETE); 5286 } 5287 5288 if ((cdb->how & SSS_START) == 0 && (cdb->how & SSS_LOEJ) && 5289 lun->prevent_count > 0) { 5290 /* "Medium removal prevented" */ 5291 ctl_set_sense(ctsio, /*current_error*/ 1, 5292 /*sense_key*/(lun->flags & CTL_LUN_NO_MEDIA) ? 5293 SSD_KEY_NOT_READY : SSD_KEY_ILLEGAL_REQUEST, 5294 /*asc*/ 0x53, /*ascq*/ 0x02, SSD_ELEM_NONE); 5295 ctl_done((union ctl_io *)ctsio); 5296 return (CTL_RETVAL_COMPLETE); 5297 } 5298 } 5299 5300 retval = lun->backend->config_write((union ctl_io *)ctsio); 5301 return (retval); 5302 } 5303 5304 int 5305 ctl_prevent_allow(struct ctl_scsiio *ctsio) 5306 { 5307 struct ctl_lun *lun = CTL_LUN(ctsio); 5308 struct scsi_prevent *cdb; 5309 int retval; 5310 uint32_t initidx; 5311 5312 CTL_DEBUG_PRINT(("ctl_prevent_allow\n")); 5313 5314 cdb = (struct scsi_prevent *)ctsio->cdb; 5315 5316 if ((lun->flags & CTL_LUN_REMOVABLE) == 0 || lun->prevent == NULL) { 5317 ctl_set_invalid_opcode(ctsio); 5318 ctl_done((union ctl_io *)ctsio); 5319 return (CTL_RETVAL_COMPLETE); 5320 } 5321 5322 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5323 mtx_lock(&lun->lun_lock); 5324 if ((cdb->how & PR_PREVENT) && 5325 ctl_is_set(lun->prevent, initidx) == 0) { 5326 ctl_set_mask(lun->prevent, initidx); 5327 lun->prevent_count++; 5328 } else if ((cdb->how & PR_PREVENT) == 0 && 5329 ctl_is_set(lun->prevent, initidx)) { 5330 ctl_clear_mask(lun->prevent, initidx); 5331 lun->prevent_count--; 5332 } 5333 mtx_unlock(&lun->lun_lock); 5334 retval = lun->backend->config_write((union ctl_io *)ctsio); 5335 return (retval); 5336 } 5337 5338 /* 5339 * We support the SYNCHRONIZE CACHE command (10 and 16 byte versions), but 5340 * we don't really do anything with the LBA and length fields if the user 5341 * passes them in. Instead we'll just flush out the cache for the entire 5342 * LUN. 5343 */ 5344 int 5345 ctl_sync_cache(struct ctl_scsiio *ctsio) 5346 { 5347 struct ctl_lun *lun = CTL_LUN(ctsio); 5348 struct ctl_lba_len_flags *lbalen; 5349 uint64_t starting_lba; 5350 uint32_t block_count; 5351 int retval; 5352 uint8_t byte2; 5353 5354 CTL_DEBUG_PRINT(("ctl_sync_cache\n")); 5355 5356 retval = 0; 5357 5358 switch (ctsio->cdb[0]) { 5359 case SYNCHRONIZE_CACHE: { 5360 struct scsi_sync_cache *cdb; 5361 cdb = (struct scsi_sync_cache *)ctsio->cdb; 5362 5363 starting_lba = scsi_4btoul(cdb->begin_lba); 5364 block_count = scsi_2btoul(cdb->lb_count); 5365 byte2 = cdb->byte2; 5366 break; 5367 } 5368 case SYNCHRONIZE_CACHE_16: { 5369 struct scsi_sync_cache_16 *cdb; 5370 cdb = (struct scsi_sync_cache_16 *)ctsio->cdb; 5371 5372 starting_lba = scsi_8btou64(cdb->begin_lba); 5373 block_count = scsi_4btoul(cdb->lb_count); 5374 byte2 = cdb->byte2; 5375 break; 5376 } 5377 default: 5378 ctl_set_invalid_opcode(ctsio); 5379 ctl_done((union ctl_io *)ctsio); 5380 goto bailout; 5381 break; /* NOTREACHED */ 5382 } 5383 5384 /* 5385 * We check the LBA and length, but don't do anything with them. 5386 * A SYNCHRONIZE CACHE will cause the entire cache for this lun to 5387 * get flushed. This check will just help satisfy anyone who wants 5388 * to see an error for an out of range LBA. 5389 */ 5390 if ((starting_lba + block_count) > (lun->be_lun->maxlba + 1)) { 5391 ctl_set_lba_out_of_range(ctsio, 5392 MAX(starting_lba, lun->be_lun->maxlba + 1)); 5393 ctl_done((union ctl_io *)ctsio); 5394 goto bailout; 5395 } 5396 5397 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5398 lbalen->lba = starting_lba; 5399 lbalen->len = block_count; 5400 lbalen->flags = byte2; 5401 retval = lun->backend->config_write((union ctl_io *)ctsio); 5402 5403 bailout: 5404 return (retval); 5405 } 5406 5407 int 5408 ctl_format(struct ctl_scsiio *ctsio) 5409 { 5410 struct scsi_format *cdb; 5411 int length, defect_list_len; 5412 5413 CTL_DEBUG_PRINT(("ctl_format\n")); 5414 5415 cdb = (struct scsi_format *)ctsio->cdb; 5416 5417 length = 0; 5418 if (cdb->byte2 & SF_FMTDATA) { 5419 if (cdb->byte2 & SF_LONGLIST) 5420 length = sizeof(struct scsi_format_header_long); 5421 else 5422 length = sizeof(struct scsi_format_header_short); 5423 } 5424 5425 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5426 && (length > 0)) { 5427 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5428 ctsio->kern_data_len = length; 5429 ctsio->kern_total_len = length; 5430 ctsio->kern_rel_offset = 0; 5431 ctsio->kern_sg_entries = 0; 5432 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5433 ctsio->be_move_done = ctl_config_move_done; 5434 ctl_datamove((union ctl_io *)ctsio); 5435 5436 return (CTL_RETVAL_COMPLETE); 5437 } 5438 5439 defect_list_len = 0; 5440 5441 if (cdb->byte2 & SF_FMTDATA) { 5442 if (cdb->byte2 & SF_LONGLIST) { 5443 struct scsi_format_header_long *header; 5444 5445 header = (struct scsi_format_header_long *) 5446 ctsio->kern_data_ptr; 5447 5448 defect_list_len = scsi_4btoul(header->defect_list_len); 5449 if (defect_list_len != 0) { 5450 ctl_set_invalid_field(ctsio, 5451 /*sks_valid*/ 1, 5452 /*command*/ 0, 5453 /*field*/ 2, 5454 /*bit_valid*/ 0, 5455 /*bit*/ 0); 5456 goto bailout; 5457 } 5458 } else { 5459 struct scsi_format_header_short *header; 5460 5461 header = (struct scsi_format_header_short *) 5462 ctsio->kern_data_ptr; 5463 5464 defect_list_len = scsi_2btoul(header->defect_list_len); 5465 if (defect_list_len != 0) { 5466 ctl_set_invalid_field(ctsio, 5467 /*sks_valid*/ 1, 5468 /*command*/ 0, 5469 /*field*/ 2, 5470 /*bit_valid*/ 0, 5471 /*bit*/ 0); 5472 goto bailout; 5473 } 5474 } 5475 } 5476 5477 ctl_set_success(ctsio); 5478 bailout: 5479 5480 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5481 free(ctsio->kern_data_ptr, M_CTL); 5482 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5483 } 5484 5485 ctl_done((union ctl_io *)ctsio); 5486 return (CTL_RETVAL_COMPLETE); 5487 } 5488 5489 int 5490 ctl_read_buffer(struct ctl_scsiio *ctsio) 5491 { 5492 struct ctl_lun *lun = CTL_LUN(ctsio); 5493 uint64_t buffer_offset; 5494 uint32_t len; 5495 uint8_t byte2; 5496 static uint8_t descr[4]; 5497 static uint8_t echo_descr[4] = { 0 }; 5498 5499 CTL_DEBUG_PRINT(("ctl_read_buffer\n")); 5500 5501 switch (ctsio->cdb[0]) { 5502 case READ_BUFFER: { 5503 struct scsi_read_buffer *cdb; 5504 5505 cdb = (struct scsi_read_buffer *)ctsio->cdb; 5506 buffer_offset = scsi_3btoul(cdb->offset); 5507 len = scsi_3btoul(cdb->length); 5508 byte2 = cdb->byte2; 5509 break; 5510 } 5511 case READ_BUFFER_16: { 5512 struct scsi_read_buffer_16 *cdb; 5513 5514 cdb = (struct scsi_read_buffer_16 *)ctsio->cdb; 5515 buffer_offset = scsi_8btou64(cdb->offset); 5516 len = scsi_4btoul(cdb->length); 5517 byte2 = cdb->byte2; 5518 break; 5519 } 5520 default: /* This shouldn't happen. */ 5521 ctl_set_invalid_opcode(ctsio); 5522 ctl_done((union ctl_io *)ctsio); 5523 return (CTL_RETVAL_COMPLETE); 5524 } 5525 5526 if (buffer_offset > CTL_WRITE_BUFFER_SIZE || 5527 buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5528 ctl_set_invalid_field(ctsio, 5529 /*sks_valid*/ 1, 5530 /*command*/ 1, 5531 /*field*/ 6, 5532 /*bit_valid*/ 0, 5533 /*bit*/ 0); 5534 ctl_done((union ctl_io *)ctsio); 5535 return (CTL_RETVAL_COMPLETE); 5536 } 5537 5538 if ((byte2 & RWB_MODE) == RWB_MODE_DESCR) { 5539 descr[0] = 0; 5540 scsi_ulto3b(CTL_WRITE_BUFFER_SIZE, &descr[1]); 5541 ctsio->kern_data_ptr = descr; 5542 len = min(len, sizeof(descr)); 5543 } else if ((byte2 & RWB_MODE) == RWB_MODE_ECHO_DESCR) { 5544 ctsio->kern_data_ptr = echo_descr; 5545 len = min(len, sizeof(echo_descr)); 5546 } else { 5547 if (lun->write_buffer == NULL) { 5548 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5549 M_CTL, M_WAITOK); 5550 } 5551 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5552 } 5553 ctsio->kern_data_len = len; 5554 ctsio->kern_total_len = len; 5555 ctsio->kern_rel_offset = 0; 5556 ctsio->kern_sg_entries = 0; 5557 ctl_set_success(ctsio); 5558 ctsio->be_move_done = ctl_config_move_done; 5559 ctl_datamove((union ctl_io *)ctsio); 5560 return (CTL_RETVAL_COMPLETE); 5561 } 5562 5563 int 5564 ctl_write_buffer(struct ctl_scsiio *ctsio) 5565 { 5566 struct ctl_lun *lun = CTL_LUN(ctsio); 5567 struct scsi_write_buffer *cdb; 5568 int buffer_offset, len; 5569 5570 CTL_DEBUG_PRINT(("ctl_write_buffer\n")); 5571 5572 cdb = (struct scsi_write_buffer *)ctsio->cdb; 5573 5574 len = scsi_3btoul(cdb->length); 5575 buffer_offset = scsi_3btoul(cdb->offset); 5576 5577 if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5578 ctl_set_invalid_field(ctsio, 5579 /*sks_valid*/ 1, 5580 /*command*/ 1, 5581 /*field*/ 6, 5582 /*bit_valid*/ 0, 5583 /*bit*/ 0); 5584 ctl_done((union ctl_io *)ctsio); 5585 return (CTL_RETVAL_COMPLETE); 5586 } 5587 5588 /* 5589 * If we've got a kernel request that hasn't been malloced yet, 5590 * malloc it and tell the caller the data buffer is here. 5591 */ 5592 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5593 if (lun->write_buffer == NULL) { 5594 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5595 M_CTL, M_WAITOK); 5596 } 5597 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5598 ctsio->kern_data_len = len; 5599 ctsio->kern_total_len = len; 5600 ctsio->kern_rel_offset = 0; 5601 ctsio->kern_sg_entries = 0; 5602 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5603 ctsio->be_move_done = ctl_config_move_done; 5604 ctl_datamove((union ctl_io *)ctsio); 5605 5606 return (CTL_RETVAL_COMPLETE); 5607 } 5608 5609 ctl_set_success(ctsio); 5610 ctl_done((union ctl_io *)ctsio); 5611 return (CTL_RETVAL_COMPLETE); 5612 } 5613 5614 static int 5615 ctl_write_same_cont(union ctl_io *io) 5616 { 5617 struct ctl_lun *lun = CTL_LUN(io); 5618 struct ctl_scsiio *ctsio; 5619 struct ctl_lba_len_flags *lbalen; 5620 int retval; 5621 5622 ctsio = &io->scsiio; 5623 ctsio->io_hdr.status = CTL_STATUS_NONE; 5624 lbalen = (struct ctl_lba_len_flags *) 5625 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5626 lbalen->lba += lbalen->len; 5627 if ((lun->be_lun->maxlba + 1) - lbalen->lba <= UINT32_MAX) { 5628 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT; 5629 lbalen->len = (lun->be_lun->maxlba + 1) - lbalen->lba; 5630 } 5631 5632 CTL_DEBUG_PRINT(("ctl_write_same_cont: calling config_write()\n")); 5633 retval = lun->backend->config_write((union ctl_io *)ctsio); 5634 return (retval); 5635 } 5636 5637 int 5638 ctl_write_same(struct ctl_scsiio *ctsio) 5639 { 5640 struct ctl_lun *lun = CTL_LUN(ctsio); 5641 struct ctl_lba_len_flags *lbalen; 5642 const char *val; 5643 uint64_t lba, ival; 5644 uint32_t num_blocks; 5645 int len, retval; 5646 uint8_t byte2; 5647 5648 CTL_DEBUG_PRINT(("ctl_write_same\n")); 5649 5650 switch (ctsio->cdb[0]) { 5651 case WRITE_SAME_10: { 5652 struct scsi_write_same_10 *cdb; 5653 5654 cdb = (struct scsi_write_same_10 *)ctsio->cdb; 5655 5656 lba = scsi_4btoul(cdb->addr); 5657 num_blocks = scsi_2btoul(cdb->length); 5658 byte2 = cdb->byte2; 5659 break; 5660 } 5661 case WRITE_SAME_16: { 5662 struct scsi_write_same_16 *cdb; 5663 5664 cdb = (struct scsi_write_same_16 *)ctsio->cdb; 5665 5666 lba = scsi_8btou64(cdb->addr); 5667 num_blocks = scsi_4btoul(cdb->length); 5668 byte2 = cdb->byte2; 5669 break; 5670 } 5671 default: 5672 /* 5673 * We got a command we don't support. This shouldn't 5674 * happen, commands should be filtered out above us. 5675 */ 5676 ctl_set_invalid_opcode(ctsio); 5677 ctl_done((union ctl_io *)ctsio); 5678 5679 return (CTL_RETVAL_COMPLETE); 5680 break; /* NOTREACHED */ 5681 } 5682 5683 /* ANCHOR flag can be used only together with UNMAP */ 5684 if ((byte2 & SWS_UNMAP) == 0 && (byte2 & SWS_ANCHOR) != 0) { 5685 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 5686 /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); 5687 ctl_done((union ctl_io *)ctsio); 5688 return (CTL_RETVAL_COMPLETE); 5689 } 5690 5691 /* 5692 * The first check is to make sure we're in bounds, the second 5693 * check is to catch wrap-around problems. If the lba + num blocks 5694 * is less than the lba, then we've wrapped around and the block 5695 * range is invalid anyway. 5696 */ 5697 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5698 || ((lba + num_blocks) < lba)) { 5699 ctl_set_lba_out_of_range(ctsio, 5700 MAX(lba, lun->be_lun->maxlba + 1)); 5701 ctl_done((union ctl_io *)ctsio); 5702 return (CTL_RETVAL_COMPLETE); 5703 } 5704 5705 /* Zero number of blocks means "to the last logical block" */ 5706 if (num_blocks == 0) { 5707 ival = UINT64_MAX; 5708 val = dnvlist_get_string(lun->be_lun->options, 5709 "write_same_max_lba", NULL); 5710 if (val != NULL) 5711 ctl_expand_number(val, &ival); 5712 if ((lun->be_lun->maxlba + 1) - lba > ival) { 5713 ctl_set_invalid_field(ctsio, 5714 /*sks_valid*/ 1, /*command*/ 1, 5715 /*field*/ ctsio->cdb[0] == WRITE_SAME_10 ? 7 : 10, 5716 /*bit_valid*/ 0, /*bit*/ 0); 5717 ctl_done((union ctl_io *)ctsio); 5718 return (CTL_RETVAL_COMPLETE); 5719 } 5720 if ((lun->be_lun->maxlba + 1) - lba > UINT32_MAX) { 5721 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 5722 ctsio->io_cont = ctl_write_same_cont; 5723 num_blocks = 1 << 31; 5724 } else 5725 num_blocks = (lun->be_lun->maxlba + 1) - lba; 5726 } 5727 5728 len = lun->be_lun->blocksize; 5729 5730 /* 5731 * If we've got a kernel request that hasn't been malloced yet, 5732 * malloc it and tell the caller the data buffer is here. 5733 */ 5734 if ((byte2 & SWS_NDOB) == 0 && 5735 (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5736 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 5737 ctsio->kern_data_len = len; 5738 ctsio->kern_total_len = len; 5739 ctsio->kern_rel_offset = 0; 5740 ctsio->kern_sg_entries = 0; 5741 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5742 ctsio->be_move_done = ctl_config_move_done; 5743 ctl_datamove((union ctl_io *)ctsio); 5744 5745 return (CTL_RETVAL_COMPLETE); 5746 } 5747 5748 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5749 lbalen->lba = lba; 5750 lbalen->len = num_blocks; 5751 lbalen->flags = byte2; 5752 retval = lun->backend->config_write((union ctl_io *)ctsio); 5753 5754 return (retval); 5755 } 5756 5757 int 5758 ctl_unmap(struct ctl_scsiio *ctsio) 5759 { 5760 struct ctl_lun *lun = CTL_LUN(ctsio); 5761 struct scsi_unmap *cdb; 5762 struct ctl_ptr_len_flags *ptrlen; 5763 struct scsi_unmap_header *hdr; 5764 struct scsi_unmap_desc *buf, *end, *endnz, *range; 5765 uint64_t lba; 5766 uint32_t num_blocks; 5767 int len, retval; 5768 uint8_t byte2; 5769 5770 CTL_DEBUG_PRINT(("ctl_unmap\n")); 5771 5772 cdb = (struct scsi_unmap *)ctsio->cdb; 5773 len = scsi_2btoul(cdb->length); 5774 byte2 = cdb->byte2; 5775 5776 /* 5777 * If we've got a kernel request that hasn't been malloced yet, 5778 * malloc it and tell the caller the data buffer is here. 5779 */ 5780 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5781 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 5782 ctsio->kern_data_len = len; 5783 ctsio->kern_total_len = len; 5784 ctsio->kern_rel_offset = 0; 5785 ctsio->kern_sg_entries = 0; 5786 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5787 ctsio->be_move_done = ctl_config_move_done; 5788 ctl_datamove((union ctl_io *)ctsio); 5789 5790 return (CTL_RETVAL_COMPLETE); 5791 } 5792 5793 len = ctsio->kern_total_len - ctsio->kern_data_resid; 5794 hdr = (struct scsi_unmap_header *)ctsio->kern_data_ptr; 5795 if (len < sizeof (*hdr) || 5796 len < (scsi_2btoul(hdr->length) + sizeof(hdr->length)) || 5797 len < (scsi_2btoul(hdr->desc_length) + sizeof (*hdr)) || 5798 scsi_2btoul(hdr->desc_length) % sizeof(*buf) != 0) { 5799 ctl_set_invalid_field(ctsio, 5800 /*sks_valid*/ 0, 5801 /*command*/ 0, 5802 /*field*/ 0, 5803 /*bit_valid*/ 0, 5804 /*bit*/ 0); 5805 goto done; 5806 } 5807 len = scsi_2btoul(hdr->desc_length); 5808 buf = (struct scsi_unmap_desc *)(hdr + 1); 5809 end = buf + len / sizeof(*buf); 5810 5811 endnz = buf; 5812 for (range = buf; range < end; range++) { 5813 lba = scsi_8btou64(range->lba); 5814 num_blocks = scsi_4btoul(range->length); 5815 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5816 || ((lba + num_blocks) < lba)) { 5817 ctl_set_lba_out_of_range(ctsio, 5818 MAX(lba, lun->be_lun->maxlba + 1)); 5819 ctl_done((union ctl_io *)ctsio); 5820 return (CTL_RETVAL_COMPLETE); 5821 } 5822 if (num_blocks != 0) 5823 endnz = range + 1; 5824 } 5825 5826 /* 5827 * Block backend can not handle zero last range. 5828 * Filter it out and return if there is nothing left. 5829 */ 5830 len = (uint8_t *)endnz - (uint8_t *)buf; 5831 if (len == 0) { 5832 ctl_set_success(ctsio); 5833 goto done; 5834 } 5835 5836 mtx_lock(&lun->lun_lock); 5837 ptrlen = (struct ctl_ptr_len_flags *) 5838 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5839 ptrlen->ptr = (void *)buf; 5840 ptrlen->len = len; 5841 ptrlen->flags = byte2; 5842 ctl_try_unblock_others(lun, (union ctl_io *)ctsio, FALSE); 5843 mtx_unlock(&lun->lun_lock); 5844 5845 retval = lun->backend->config_write((union ctl_io *)ctsio); 5846 return (retval); 5847 5848 done: 5849 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5850 free(ctsio->kern_data_ptr, M_CTL); 5851 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5852 } 5853 ctl_done((union ctl_io *)ctsio); 5854 return (CTL_RETVAL_COMPLETE); 5855 } 5856 5857 int 5858 ctl_default_page_handler(struct ctl_scsiio *ctsio, 5859 struct ctl_page_index *page_index, uint8_t *page_ptr) 5860 { 5861 struct ctl_lun *lun = CTL_LUN(ctsio); 5862 uint8_t *current_cp; 5863 int set_ua; 5864 uint32_t initidx; 5865 5866 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5867 set_ua = 0; 5868 5869 current_cp = (page_index->page_data + (page_index->page_len * 5870 CTL_PAGE_CURRENT)); 5871 5872 mtx_lock(&lun->lun_lock); 5873 if (memcmp(current_cp, page_ptr, page_index->page_len)) { 5874 memcpy(current_cp, page_ptr, page_index->page_len); 5875 set_ua = 1; 5876 } 5877 if (set_ua != 0) 5878 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 5879 mtx_unlock(&lun->lun_lock); 5880 if (set_ua) { 5881 ctl_isc_announce_mode(lun, 5882 ctl_get_initindex(&ctsio->io_hdr.nexus), 5883 page_index->page_code, page_index->subpage); 5884 } 5885 return (CTL_RETVAL_COMPLETE); 5886 } 5887 5888 static void 5889 ctl_ie_timer(void *arg) 5890 { 5891 struct ctl_lun *lun = arg; 5892 uint64_t t; 5893 5894 if (lun->ie_asc == 0) 5895 return; 5896 5897 if (lun->MODE_IE.mrie == SIEP_MRIE_UA) 5898 ctl_est_ua_all(lun, -1, CTL_UA_IE); 5899 else 5900 lun->ie_reported = 0; 5901 5902 if (lun->ie_reportcnt < scsi_4btoul(lun->MODE_IE.report_count)) { 5903 lun->ie_reportcnt++; 5904 t = scsi_4btoul(lun->MODE_IE.interval_timer); 5905 if (t == 0 || t == UINT32_MAX) 5906 t = 3000; /* 5 min */ 5907 callout_schedule_sbt(&lun->ie_callout, SBT_1S / 10 * t, 5908 SBT_1S / 10, 0); 5909 } 5910 } 5911 5912 int 5913 ctl_ie_page_handler(struct ctl_scsiio *ctsio, 5914 struct ctl_page_index *page_index, uint8_t *page_ptr) 5915 { 5916 struct ctl_lun *lun = CTL_LUN(ctsio); 5917 struct scsi_info_exceptions_page *pg; 5918 uint64_t t; 5919 5920 (void)ctl_default_page_handler(ctsio, page_index, page_ptr); 5921 5922 pg = (struct scsi_info_exceptions_page *)page_ptr; 5923 mtx_lock(&lun->lun_lock); 5924 if (pg->info_flags & SIEP_FLAGS_TEST) { 5925 lun->ie_asc = 0x5d; 5926 lun->ie_ascq = 0xff; 5927 if (pg->mrie == SIEP_MRIE_UA) { 5928 ctl_est_ua_all(lun, -1, CTL_UA_IE); 5929 lun->ie_reported = 1; 5930 } else { 5931 ctl_clr_ua_all(lun, -1, CTL_UA_IE); 5932 lun->ie_reported = -1; 5933 } 5934 lun->ie_reportcnt = 1; 5935 if (lun->ie_reportcnt < scsi_4btoul(pg->report_count)) { 5936 lun->ie_reportcnt++; 5937 t = scsi_4btoul(pg->interval_timer); 5938 if (t == 0 || t == UINT32_MAX) 5939 t = 3000; /* 5 min */ 5940 callout_reset_sbt(&lun->ie_callout, SBT_1S / 10 * t, 5941 SBT_1S / 10, ctl_ie_timer, lun, 0); 5942 } 5943 } else { 5944 lun->ie_asc = 0; 5945 lun->ie_ascq = 0; 5946 lun->ie_reported = 1; 5947 ctl_clr_ua_all(lun, -1, CTL_UA_IE); 5948 lun->ie_reportcnt = UINT32_MAX; 5949 callout_stop(&lun->ie_callout); 5950 } 5951 mtx_unlock(&lun->lun_lock); 5952 return (CTL_RETVAL_COMPLETE); 5953 } 5954 5955 static int 5956 ctl_do_mode_select(union ctl_io *io) 5957 { 5958 struct ctl_lun *lun = CTL_LUN(io); 5959 struct scsi_mode_page_header *page_header; 5960 struct ctl_page_index *page_index; 5961 struct ctl_scsiio *ctsio; 5962 int page_len, page_len_offset, page_len_size; 5963 union ctl_modepage_info *modepage_info; 5964 uint16_t *len_left, *len_used; 5965 int retval, i; 5966 5967 ctsio = &io->scsiio; 5968 page_index = NULL; 5969 page_len = 0; 5970 5971 modepage_info = (union ctl_modepage_info *) 5972 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 5973 len_left = &modepage_info->header.len_left; 5974 len_used = &modepage_info->header.len_used; 5975 5976 do_next_page: 5977 5978 page_header = (struct scsi_mode_page_header *) 5979 (ctsio->kern_data_ptr + *len_used); 5980 5981 if (*len_left == 0) { 5982 free(ctsio->kern_data_ptr, M_CTL); 5983 ctl_set_success(ctsio); 5984 ctl_done((union ctl_io *)ctsio); 5985 return (CTL_RETVAL_COMPLETE); 5986 } else if (*len_left < sizeof(struct scsi_mode_page_header)) { 5987 free(ctsio->kern_data_ptr, M_CTL); 5988 ctl_set_param_len_error(ctsio); 5989 ctl_done((union ctl_io *)ctsio); 5990 return (CTL_RETVAL_COMPLETE); 5991 5992 } else if ((page_header->page_code & SMPH_SPF) 5993 && (*len_left < sizeof(struct scsi_mode_page_header_sp))) { 5994 free(ctsio->kern_data_ptr, M_CTL); 5995 ctl_set_param_len_error(ctsio); 5996 ctl_done((union ctl_io *)ctsio); 5997 return (CTL_RETVAL_COMPLETE); 5998 } 5999 6000 /* 6001 * XXX KDM should we do something with the block descriptor? 6002 */ 6003 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6004 page_index = &lun->mode_pages.index[i]; 6005 if (lun->be_lun->lun_type == T_DIRECT && 6006 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6007 continue; 6008 if (lun->be_lun->lun_type == T_PROCESSOR && 6009 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6010 continue; 6011 if (lun->be_lun->lun_type == T_CDROM && 6012 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6013 continue; 6014 6015 if ((page_index->page_code & SMPH_PC_MASK) != 6016 (page_header->page_code & SMPH_PC_MASK)) 6017 continue; 6018 6019 /* 6020 * If neither page has a subpage code, then we've got a 6021 * match. 6022 */ 6023 if (((page_index->page_code & SMPH_SPF) == 0) 6024 && ((page_header->page_code & SMPH_SPF) == 0)) { 6025 page_len = page_header->page_length; 6026 break; 6027 } 6028 6029 /* 6030 * If both pages have subpages, then the subpage numbers 6031 * have to match. 6032 */ 6033 if ((page_index->page_code & SMPH_SPF) 6034 && (page_header->page_code & SMPH_SPF)) { 6035 struct scsi_mode_page_header_sp *sph; 6036 6037 sph = (struct scsi_mode_page_header_sp *)page_header; 6038 if (page_index->subpage == sph->subpage) { 6039 page_len = scsi_2btoul(sph->page_length); 6040 break; 6041 } 6042 } 6043 } 6044 6045 /* 6046 * If we couldn't find the page, or if we don't have a mode select 6047 * handler for it, send back an error to the user. 6048 */ 6049 if ((i >= CTL_NUM_MODE_PAGES) 6050 || (page_index->select_handler == NULL)) { 6051 ctl_set_invalid_field(ctsio, 6052 /*sks_valid*/ 1, 6053 /*command*/ 0, 6054 /*field*/ *len_used, 6055 /*bit_valid*/ 0, 6056 /*bit*/ 0); 6057 free(ctsio->kern_data_ptr, M_CTL); 6058 ctl_done((union ctl_io *)ctsio); 6059 return (CTL_RETVAL_COMPLETE); 6060 } 6061 6062 if (page_index->page_code & SMPH_SPF) { 6063 page_len_offset = 2; 6064 page_len_size = 2; 6065 } else { 6066 page_len_size = 1; 6067 page_len_offset = 1; 6068 } 6069 6070 /* 6071 * If the length the initiator gives us isn't the one we specify in 6072 * the mode page header, or if they didn't specify enough data in 6073 * the CDB to avoid truncating this page, kick out the request. 6074 */ 6075 if (page_len != page_index->page_len - page_len_offset - page_len_size) { 6076 ctl_set_invalid_field(ctsio, 6077 /*sks_valid*/ 1, 6078 /*command*/ 0, 6079 /*field*/ *len_used + page_len_offset, 6080 /*bit_valid*/ 0, 6081 /*bit*/ 0); 6082 free(ctsio->kern_data_ptr, M_CTL); 6083 ctl_done((union ctl_io *)ctsio); 6084 return (CTL_RETVAL_COMPLETE); 6085 } 6086 if (*len_left < page_index->page_len) { 6087 free(ctsio->kern_data_ptr, M_CTL); 6088 ctl_set_param_len_error(ctsio); 6089 ctl_done((union ctl_io *)ctsio); 6090 return (CTL_RETVAL_COMPLETE); 6091 } 6092 6093 /* 6094 * Run through the mode page, checking to make sure that the bits 6095 * the user changed are actually legal for him to change. 6096 */ 6097 for (i = 0; i < page_index->page_len; i++) { 6098 uint8_t *user_byte, *change_mask, *current_byte; 6099 int bad_bit; 6100 int j; 6101 6102 user_byte = (uint8_t *)page_header + i; 6103 change_mask = page_index->page_data + 6104 (page_index->page_len * CTL_PAGE_CHANGEABLE) + i; 6105 current_byte = page_index->page_data + 6106 (page_index->page_len * CTL_PAGE_CURRENT) + i; 6107 6108 /* 6109 * Check to see whether the user set any bits in this byte 6110 * that he is not allowed to set. 6111 */ 6112 if ((*user_byte & ~(*change_mask)) == 6113 (*current_byte & ~(*change_mask))) 6114 continue; 6115 6116 /* 6117 * Go through bit by bit to determine which one is illegal. 6118 */ 6119 bad_bit = 0; 6120 for (j = 7; j >= 0; j--) { 6121 if ((((1 << i) & ~(*change_mask)) & *user_byte) != 6122 (((1 << i) & ~(*change_mask)) & *current_byte)) { 6123 bad_bit = i; 6124 break; 6125 } 6126 } 6127 ctl_set_invalid_field(ctsio, 6128 /*sks_valid*/ 1, 6129 /*command*/ 0, 6130 /*field*/ *len_used + i, 6131 /*bit_valid*/ 1, 6132 /*bit*/ bad_bit); 6133 free(ctsio->kern_data_ptr, M_CTL); 6134 ctl_done((union ctl_io *)ctsio); 6135 return (CTL_RETVAL_COMPLETE); 6136 } 6137 6138 /* 6139 * Decrement these before we call the page handler, since we may 6140 * end up getting called back one way or another before the handler 6141 * returns to this context. 6142 */ 6143 *len_left -= page_index->page_len; 6144 *len_used += page_index->page_len; 6145 6146 retval = page_index->select_handler(ctsio, page_index, 6147 (uint8_t *)page_header); 6148 6149 /* 6150 * If the page handler returns CTL_RETVAL_QUEUED, then we need to 6151 * wait until this queued command completes to finish processing 6152 * the mode page. If it returns anything other than 6153 * CTL_RETVAL_COMPLETE (e.g. CTL_RETVAL_ERROR), then it should have 6154 * already set the sense information, freed the data pointer, and 6155 * completed the io for us. 6156 */ 6157 if (retval != CTL_RETVAL_COMPLETE) 6158 goto bailout_no_done; 6159 6160 /* 6161 * If the initiator sent us more than one page, parse the next one. 6162 */ 6163 if (*len_left > 0) 6164 goto do_next_page; 6165 6166 ctl_set_success(ctsio); 6167 free(ctsio->kern_data_ptr, M_CTL); 6168 ctl_done((union ctl_io *)ctsio); 6169 6170 bailout_no_done: 6171 6172 return (CTL_RETVAL_COMPLETE); 6173 6174 } 6175 6176 int 6177 ctl_mode_select(struct ctl_scsiio *ctsio) 6178 { 6179 struct ctl_lun *lun = CTL_LUN(ctsio); 6180 union ctl_modepage_info *modepage_info; 6181 int bd_len, i, header_size, param_len, rtd; 6182 uint32_t initidx; 6183 6184 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 6185 switch (ctsio->cdb[0]) { 6186 case MODE_SELECT_6: { 6187 struct scsi_mode_select_6 *cdb; 6188 6189 cdb = (struct scsi_mode_select_6 *)ctsio->cdb; 6190 6191 rtd = (cdb->byte2 & SMS_RTD) ? 1 : 0; 6192 param_len = cdb->length; 6193 header_size = sizeof(struct scsi_mode_header_6); 6194 break; 6195 } 6196 case MODE_SELECT_10: { 6197 struct scsi_mode_select_10 *cdb; 6198 6199 cdb = (struct scsi_mode_select_10 *)ctsio->cdb; 6200 6201 rtd = (cdb->byte2 & SMS_RTD) ? 1 : 0; 6202 param_len = scsi_2btoul(cdb->length); 6203 header_size = sizeof(struct scsi_mode_header_10); 6204 break; 6205 } 6206 default: 6207 ctl_set_invalid_opcode(ctsio); 6208 ctl_done((union ctl_io *)ctsio); 6209 return (CTL_RETVAL_COMPLETE); 6210 } 6211 6212 if (rtd) { 6213 if (param_len != 0) { 6214 ctl_set_invalid_field(ctsio, /*sks_valid*/ 0, 6215 /*command*/ 1, /*field*/ 0, 6216 /*bit_valid*/ 0, /*bit*/ 0); 6217 ctl_done((union ctl_io *)ctsio); 6218 return (CTL_RETVAL_COMPLETE); 6219 } 6220 6221 /* Revert to defaults. */ 6222 ctl_init_page_index(lun); 6223 mtx_lock(&lun->lun_lock); 6224 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 6225 mtx_unlock(&lun->lun_lock); 6226 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6227 ctl_isc_announce_mode(lun, -1, 6228 lun->mode_pages.index[i].page_code & SMPH_PC_MASK, 6229 lun->mode_pages.index[i].subpage); 6230 } 6231 ctl_set_success(ctsio); 6232 ctl_done((union ctl_io *)ctsio); 6233 return (CTL_RETVAL_COMPLETE); 6234 } 6235 6236 /* 6237 * From SPC-3: 6238 * "A parameter list length of zero indicates that the Data-Out Buffer 6239 * shall be empty. This condition shall not be considered as an error." 6240 */ 6241 if (param_len == 0) { 6242 ctl_set_success(ctsio); 6243 ctl_done((union ctl_io *)ctsio); 6244 return (CTL_RETVAL_COMPLETE); 6245 } 6246 6247 /* 6248 * Since we'll hit this the first time through, prior to 6249 * allocation, we don't need to free a data buffer here. 6250 */ 6251 if (param_len < header_size) { 6252 ctl_set_param_len_error(ctsio); 6253 ctl_done((union ctl_io *)ctsio); 6254 return (CTL_RETVAL_COMPLETE); 6255 } 6256 6257 /* 6258 * Allocate the data buffer and grab the user's data. In theory, 6259 * we shouldn't have to sanity check the parameter list length here 6260 * because the maximum size is 64K. We should be able to malloc 6261 * that much without too many problems. 6262 */ 6263 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 6264 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 6265 ctsio->kern_data_len = param_len; 6266 ctsio->kern_total_len = param_len; 6267 ctsio->kern_rel_offset = 0; 6268 ctsio->kern_sg_entries = 0; 6269 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6270 ctsio->be_move_done = ctl_config_move_done; 6271 ctl_datamove((union ctl_io *)ctsio); 6272 6273 return (CTL_RETVAL_COMPLETE); 6274 } 6275 6276 switch (ctsio->cdb[0]) { 6277 case MODE_SELECT_6: { 6278 struct scsi_mode_header_6 *mh6; 6279 6280 mh6 = (struct scsi_mode_header_6 *)ctsio->kern_data_ptr; 6281 bd_len = mh6->blk_desc_len; 6282 break; 6283 } 6284 case MODE_SELECT_10: { 6285 struct scsi_mode_header_10 *mh10; 6286 6287 mh10 = (struct scsi_mode_header_10 *)ctsio->kern_data_ptr; 6288 bd_len = scsi_2btoul(mh10->blk_desc_len); 6289 break; 6290 } 6291 default: 6292 panic("%s: Invalid CDB type %#x", __func__, ctsio->cdb[0]); 6293 } 6294 6295 if (param_len < (header_size + bd_len)) { 6296 free(ctsio->kern_data_ptr, M_CTL); 6297 ctl_set_param_len_error(ctsio); 6298 ctl_done((union ctl_io *)ctsio); 6299 return (CTL_RETVAL_COMPLETE); 6300 } 6301 6302 /* 6303 * Set the IO_CONT flag, so that if this I/O gets passed to 6304 * ctl_config_write_done(), it'll get passed back to 6305 * ctl_do_mode_select() for further processing, or completion if 6306 * we're all done. 6307 */ 6308 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 6309 ctsio->io_cont = ctl_do_mode_select; 6310 6311 modepage_info = (union ctl_modepage_info *) 6312 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6313 memset(modepage_info, 0, sizeof(*modepage_info)); 6314 modepage_info->header.len_left = param_len - header_size - bd_len; 6315 modepage_info->header.len_used = header_size + bd_len; 6316 6317 return (ctl_do_mode_select((union ctl_io *)ctsio)); 6318 } 6319 6320 int 6321 ctl_mode_sense(struct ctl_scsiio *ctsio) 6322 { 6323 struct ctl_lun *lun = CTL_LUN(ctsio); 6324 int pc, page_code, llba, subpage; 6325 int alloc_len, page_len, header_len, bd_len, total_len; 6326 void *block_desc; 6327 struct ctl_page_index *page_index; 6328 6329 llba = 0; 6330 6331 CTL_DEBUG_PRINT(("ctl_mode_sense\n")); 6332 6333 switch (ctsio->cdb[0]) { 6334 case MODE_SENSE_6: { 6335 struct scsi_mode_sense_6 *cdb; 6336 6337 cdb = (struct scsi_mode_sense_6 *)ctsio->cdb; 6338 6339 header_len = sizeof(struct scsi_mode_hdr_6); 6340 if (cdb->byte2 & SMS_DBD) 6341 bd_len = 0; 6342 else 6343 bd_len = sizeof(struct scsi_mode_block_descr); 6344 header_len += bd_len; 6345 6346 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6347 page_code = cdb->page & SMS_PAGE_CODE; 6348 subpage = cdb->subpage; 6349 alloc_len = cdb->length; 6350 break; 6351 } 6352 case MODE_SENSE_10: { 6353 struct scsi_mode_sense_10 *cdb; 6354 6355 cdb = (struct scsi_mode_sense_10 *)ctsio->cdb; 6356 6357 header_len = sizeof(struct scsi_mode_hdr_10); 6358 if (cdb->byte2 & SMS_DBD) { 6359 bd_len = 0; 6360 } else if (lun->be_lun->lun_type == T_DIRECT) { 6361 if (cdb->byte2 & SMS10_LLBAA) { 6362 llba = 1; 6363 bd_len = sizeof(struct scsi_mode_block_descr_dlong); 6364 } else 6365 bd_len = sizeof(struct scsi_mode_block_descr_dshort); 6366 } else 6367 bd_len = sizeof(struct scsi_mode_block_descr); 6368 header_len += bd_len; 6369 6370 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6371 page_code = cdb->page & SMS_PAGE_CODE; 6372 subpage = cdb->subpage; 6373 alloc_len = scsi_2btoul(cdb->length); 6374 break; 6375 } 6376 default: 6377 ctl_set_invalid_opcode(ctsio); 6378 ctl_done((union ctl_io *)ctsio); 6379 return (CTL_RETVAL_COMPLETE); 6380 break; /* NOTREACHED */ 6381 } 6382 6383 /* 6384 * We have to make a first pass through to calculate the size of 6385 * the pages that match the user's query. Then we allocate enough 6386 * memory to hold it, and actually copy the data into the buffer. 6387 */ 6388 switch (page_code) { 6389 case SMS_ALL_PAGES_PAGE: { 6390 u_int i; 6391 6392 page_len = 0; 6393 6394 /* 6395 * At the moment, values other than 0 and 0xff here are 6396 * reserved according to SPC-3. 6397 */ 6398 if ((subpage != SMS_SUBPAGE_PAGE_0) 6399 && (subpage != SMS_SUBPAGE_ALL)) { 6400 ctl_set_invalid_field(ctsio, 6401 /*sks_valid*/ 1, 6402 /*command*/ 1, 6403 /*field*/ 3, 6404 /*bit_valid*/ 0, 6405 /*bit*/ 0); 6406 ctl_done((union ctl_io *)ctsio); 6407 return (CTL_RETVAL_COMPLETE); 6408 } 6409 6410 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6411 page_index = &lun->mode_pages.index[i]; 6412 6413 /* Make sure the page is supported for this dev type */ 6414 if (lun->be_lun->lun_type == T_DIRECT && 6415 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6416 continue; 6417 if (lun->be_lun->lun_type == T_PROCESSOR && 6418 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6419 continue; 6420 if (lun->be_lun->lun_type == T_CDROM && 6421 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6422 continue; 6423 6424 /* 6425 * We don't use this subpage if the user didn't 6426 * request all subpages. 6427 */ 6428 if ((page_index->subpage != 0) 6429 && (subpage == SMS_SUBPAGE_PAGE_0)) 6430 continue; 6431 6432 page_len += page_index->page_len; 6433 } 6434 break; 6435 } 6436 default: { 6437 u_int i; 6438 6439 page_len = 0; 6440 6441 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6442 page_index = &lun->mode_pages.index[i]; 6443 6444 /* Make sure the page is supported for this dev type */ 6445 if (lun->be_lun->lun_type == T_DIRECT && 6446 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6447 continue; 6448 if (lun->be_lun->lun_type == T_PROCESSOR && 6449 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6450 continue; 6451 if (lun->be_lun->lun_type == T_CDROM && 6452 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6453 continue; 6454 6455 /* Look for the right page code */ 6456 if ((page_index->page_code & SMPH_PC_MASK) != page_code) 6457 continue; 6458 6459 /* Look for the right subpage or the subpage wildcard*/ 6460 if ((page_index->subpage != subpage) 6461 && (subpage != SMS_SUBPAGE_ALL)) 6462 continue; 6463 6464 page_len += page_index->page_len; 6465 } 6466 6467 if (page_len == 0) { 6468 ctl_set_invalid_field(ctsio, 6469 /*sks_valid*/ 1, 6470 /*command*/ 1, 6471 /*field*/ 2, 6472 /*bit_valid*/ 1, 6473 /*bit*/ 5); 6474 ctl_done((union ctl_io *)ctsio); 6475 return (CTL_RETVAL_COMPLETE); 6476 } 6477 break; 6478 } 6479 } 6480 6481 total_len = header_len + page_len; 6482 6483 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6484 ctsio->kern_sg_entries = 0; 6485 ctsio->kern_rel_offset = 0; 6486 ctsio->kern_data_len = min(total_len, alloc_len); 6487 ctsio->kern_total_len = ctsio->kern_data_len; 6488 6489 switch (ctsio->cdb[0]) { 6490 case MODE_SENSE_6: { 6491 struct scsi_mode_hdr_6 *header; 6492 6493 header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr; 6494 6495 header->datalen = MIN(total_len - 1, 254); 6496 if (lun->be_lun->lun_type == T_DIRECT) { 6497 header->dev_specific = 0x10; /* DPOFUA */ 6498 if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || 6499 (lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) 6500 header->dev_specific |= 0x80; /* WP */ 6501 } 6502 header->block_descr_len = bd_len; 6503 block_desc = &header[1]; 6504 break; 6505 } 6506 case MODE_SENSE_10: { 6507 struct scsi_mode_hdr_10 *header; 6508 int datalen; 6509 6510 header = (struct scsi_mode_hdr_10 *)ctsio->kern_data_ptr; 6511 6512 datalen = MIN(total_len - 2, 65533); 6513 scsi_ulto2b(datalen, header->datalen); 6514 if (lun->be_lun->lun_type == T_DIRECT) { 6515 header->dev_specific = 0x10; /* DPOFUA */ 6516 if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || 6517 (lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) 6518 header->dev_specific |= 0x80; /* WP */ 6519 } 6520 if (llba) 6521 header->flags |= SMH_LONGLBA; 6522 scsi_ulto2b(bd_len, header->block_descr_len); 6523 block_desc = &header[1]; 6524 break; 6525 } 6526 default: 6527 panic("%s: Invalid CDB type %#x", __func__, ctsio->cdb[0]); 6528 } 6529 6530 /* 6531 * If we've got a disk, use its blocksize in the block 6532 * descriptor. Otherwise, just set it to 0. 6533 */ 6534 if (bd_len > 0) { 6535 if (lun->be_lun->lun_type == T_DIRECT) { 6536 if (llba) { 6537 struct scsi_mode_block_descr_dlong *bd = block_desc; 6538 if (lun->be_lun->maxlba != 0) 6539 scsi_u64to8b(lun->be_lun->maxlba + 1, 6540 bd->num_blocks); 6541 scsi_ulto4b(lun->be_lun->blocksize, 6542 bd->block_len); 6543 } else { 6544 struct scsi_mode_block_descr_dshort *bd = block_desc; 6545 if (lun->be_lun->maxlba != 0) 6546 scsi_ulto4b(MIN(lun->be_lun->maxlba+1, 6547 UINT32_MAX), bd->num_blocks); 6548 scsi_ulto3b(lun->be_lun->blocksize, 6549 bd->block_len); 6550 } 6551 } else { 6552 struct scsi_mode_block_descr *bd = block_desc; 6553 scsi_ulto3b(0, bd->block_len); 6554 } 6555 } 6556 6557 switch (page_code) { 6558 case SMS_ALL_PAGES_PAGE: { 6559 int i, data_used; 6560 6561 data_used = header_len; 6562 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6563 struct ctl_page_index *page_index; 6564 6565 page_index = &lun->mode_pages.index[i]; 6566 if (lun->be_lun->lun_type == T_DIRECT && 6567 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6568 continue; 6569 if (lun->be_lun->lun_type == T_PROCESSOR && 6570 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6571 continue; 6572 if (lun->be_lun->lun_type == T_CDROM && 6573 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6574 continue; 6575 6576 /* 6577 * We don't use this subpage if the user didn't 6578 * request all subpages. We already checked (above) 6579 * to make sure the user only specified a subpage 6580 * of 0 or 0xff in the SMS_ALL_PAGES_PAGE case. 6581 */ 6582 if ((page_index->subpage != 0) 6583 && (subpage == SMS_SUBPAGE_PAGE_0)) 6584 continue; 6585 6586 /* 6587 * Call the handler, if it exists, to update the 6588 * page to the latest values. 6589 */ 6590 if (page_index->sense_handler != NULL) 6591 page_index->sense_handler(ctsio, page_index,pc); 6592 6593 memcpy(ctsio->kern_data_ptr + data_used, 6594 page_index->page_data + 6595 (page_index->page_len * pc), 6596 page_index->page_len); 6597 data_used += page_index->page_len; 6598 } 6599 break; 6600 } 6601 default: { 6602 int i, data_used; 6603 6604 data_used = header_len; 6605 6606 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6607 struct ctl_page_index *page_index; 6608 6609 page_index = &lun->mode_pages.index[i]; 6610 6611 /* Look for the right page code */ 6612 if ((page_index->page_code & SMPH_PC_MASK) != page_code) 6613 continue; 6614 6615 /* Look for the right subpage or the subpage wildcard*/ 6616 if ((page_index->subpage != subpage) 6617 && (subpage != SMS_SUBPAGE_ALL)) 6618 continue; 6619 6620 /* Make sure the page is supported for this dev type */ 6621 if (lun->be_lun->lun_type == T_DIRECT && 6622 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6623 continue; 6624 if (lun->be_lun->lun_type == T_PROCESSOR && 6625 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6626 continue; 6627 if (lun->be_lun->lun_type == T_CDROM && 6628 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6629 continue; 6630 6631 /* 6632 * Call the handler, if it exists, to update the 6633 * page to the latest values. 6634 */ 6635 if (page_index->sense_handler != NULL) 6636 page_index->sense_handler(ctsio, page_index,pc); 6637 6638 memcpy(ctsio->kern_data_ptr + data_used, 6639 page_index->page_data + 6640 (page_index->page_len * pc), 6641 page_index->page_len); 6642 data_used += page_index->page_len; 6643 } 6644 break; 6645 } 6646 } 6647 6648 ctl_set_success(ctsio); 6649 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6650 ctsio->be_move_done = ctl_config_move_done; 6651 ctl_datamove((union ctl_io *)ctsio); 6652 return (CTL_RETVAL_COMPLETE); 6653 } 6654 6655 int 6656 ctl_temp_log_sense_handler(struct ctl_scsiio *ctsio, 6657 struct ctl_page_index *page_index, 6658 int pc) 6659 { 6660 struct ctl_lun *lun = CTL_LUN(ctsio); 6661 struct scsi_log_temperature *data; 6662 const char *value; 6663 6664 data = (struct scsi_log_temperature *)page_index->page_data; 6665 6666 scsi_ulto2b(SLP_TEMPERATURE, data->hdr.param_code); 6667 data->hdr.param_control = SLP_LBIN; 6668 data->hdr.param_len = sizeof(struct scsi_log_temperature) - 6669 sizeof(struct scsi_log_param_header); 6670 if ((value = dnvlist_get_string(lun->be_lun->options, "temperature", 6671 NULL)) != NULL) 6672 data->temperature = strtol(value, NULL, 0); 6673 else 6674 data->temperature = 0xff; 6675 data++; 6676 6677 scsi_ulto2b(SLP_REFTEMPERATURE, data->hdr.param_code); 6678 data->hdr.param_control = SLP_LBIN; 6679 data->hdr.param_len = sizeof(struct scsi_log_temperature) - 6680 sizeof(struct scsi_log_param_header); 6681 if ((value = dnvlist_get_string(lun->be_lun->options, "reftemperature", 6682 NULL)) != NULL) 6683 data->temperature = strtol(value, NULL, 0); 6684 else 6685 data->temperature = 0xff; 6686 return (0); 6687 } 6688 6689 int 6690 ctl_lbp_log_sense_handler(struct ctl_scsiio *ctsio, 6691 struct ctl_page_index *page_index, 6692 int pc) 6693 { 6694 struct ctl_lun *lun = CTL_LUN(ctsio); 6695 struct scsi_log_param_header *phdr; 6696 uint8_t *data; 6697 uint64_t val; 6698 6699 data = page_index->page_data; 6700 6701 if (lun->backend->lun_attr != NULL && 6702 (val = lun->backend->lun_attr(lun->be_lun, "blocksavail")) 6703 != UINT64_MAX) { 6704 phdr = (struct scsi_log_param_header *)data; 6705 scsi_ulto2b(0x0001, phdr->param_code); 6706 phdr->param_control = SLP_LBIN | SLP_LP; 6707 phdr->param_len = 8; 6708 data = (uint8_t *)(phdr + 1); 6709 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6710 data[4] = 0x02; /* per-pool */ 6711 data += phdr->param_len; 6712 } 6713 6714 if (lun->backend->lun_attr != NULL && 6715 (val = lun->backend->lun_attr(lun->be_lun, "blocksused")) 6716 != UINT64_MAX) { 6717 phdr = (struct scsi_log_param_header *)data; 6718 scsi_ulto2b(0x0002, phdr->param_code); 6719 phdr->param_control = SLP_LBIN | SLP_LP; 6720 phdr->param_len = 8; 6721 data = (uint8_t *)(phdr + 1); 6722 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6723 data[4] = 0x01; /* per-LUN */ 6724 data += phdr->param_len; 6725 } 6726 6727 if (lun->backend->lun_attr != NULL && 6728 (val = lun->backend->lun_attr(lun->be_lun, "poolblocksavail")) 6729 != UINT64_MAX) { 6730 phdr = (struct scsi_log_param_header *)data; 6731 scsi_ulto2b(0x00f1, phdr->param_code); 6732 phdr->param_control = SLP_LBIN | SLP_LP; 6733 phdr->param_len = 8; 6734 data = (uint8_t *)(phdr + 1); 6735 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6736 data[4] = 0x02; /* per-pool */ 6737 data += phdr->param_len; 6738 } 6739 6740 if (lun->backend->lun_attr != NULL && 6741 (val = lun->backend->lun_attr(lun->be_lun, "poolblocksused")) 6742 != UINT64_MAX) { 6743 phdr = (struct scsi_log_param_header *)data; 6744 scsi_ulto2b(0x00f2, phdr->param_code); 6745 phdr->param_control = SLP_LBIN | SLP_LP; 6746 phdr->param_len = 8; 6747 data = (uint8_t *)(phdr + 1); 6748 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6749 data[4] = 0x02; /* per-pool */ 6750 data += phdr->param_len; 6751 } 6752 6753 page_index->page_len = data - page_index->page_data; 6754 return (0); 6755 } 6756 6757 int 6758 ctl_sap_log_sense_handler(struct ctl_scsiio *ctsio, 6759 struct ctl_page_index *page_index, 6760 int pc) 6761 { 6762 struct ctl_lun *lun = CTL_LUN(ctsio); 6763 struct stat_page *data; 6764 struct bintime *t; 6765 6766 data = (struct stat_page *)page_index->page_data; 6767 6768 scsi_ulto2b(SLP_SAP, data->sap.hdr.param_code); 6769 data->sap.hdr.param_control = SLP_LBIN; 6770 data->sap.hdr.param_len = sizeof(struct scsi_log_stat_and_perf) - 6771 sizeof(struct scsi_log_param_header); 6772 scsi_u64to8b(lun->stats.operations[CTL_STATS_READ], 6773 data->sap.read_num); 6774 scsi_u64to8b(lun->stats.operations[CTL_STATS_WRITE], 6775 data->sap.write_num); 6776 if (lun->be_lun->blocksize > 0) { 6777 scsi_u64to8b(lun->stats.bytes[CTL_STATS_WRITE] / 6778 lun->be_lun->blocksize, data->sap.recvieved_lba); 6779 scsi_u64to8b(lun->stats.bytes[CTL_STATS_READ] / 6780 lun->be_lun->blocksize, data->sap.transmitted_lba); 6781 } 6782 t = &lun->stats.time[CTL_STATS_READ]; 6783 scsi_u64to8b((uint64_t)t->sec * 1000 + t->frac / (UINT64_MAX / 1000), 6784 data->sap.read_int); 6785 t = &lun->stats.time[CTL_STATS_WRITE]; 6786 scsi_u64to8b((uint64_t)t->sec * 1000 + t->frac / (UINT64_MAX / 1000), 6787 data->sap.write_int); 6788 scsi_u64to8b(0, data->sap.weighted_num); 6789 scsi_u64to8b(0, data->sap.weighted_int); 6790 scsi_ulto2b(SLP_IT, data->it.hdr.param_code); 6791 data->it.hdr.param_control = SLP_LBIN; 6792 data->it.hdr.param_len = sizeof(struct scsi_log_idle_time) - 6793 sizeof(struct scsi_log_param_header); 6794 #ifdef CTL_TIME_IO 6795 scsi_u64to8b(lun->idle_time / SBT_1MS, data->it.idle_int); 6796 #endif 6797 scsi_ulto2b(SLP_TI, data->ti.hdr.param_code); 6798 data->it.hdr.param_control = SLP_LBIN; 6799 data->ti.hdr.param_len = sizeof(struct scsi_log_time_interval) - 6800 sizeof(struct scsi_log_param_header); 6801 scsi_ulto4b(3, data->ti.exponent); 6802 scsi_ulto4b(1, data->ti.integer); 6803 return (0); 6804 } 6805 6806 int 6807 ctl_ie_log_sense_handler(struct ctl_scsiio *ctsio, 6808 struct ctl_page_index *page_index, 6809 int pc) 6810 { 6811 struct ctl_lun *lun = CTL_LUN(ctsio); 6812 struct scsi_log_informational_exceptions *data; 6813 const char *value; 6814 6815 data = (struct scsi_log_informational_exceptions *)page_index->page_data; 6816 6817 scsi_ulto2b(SLP_IE_GEN, data->hdr.param_code); 6818 data->hdr.param_control = SLP_LBIN; 6819 data->hdr.param_len = sizeof(struct scsi_log_informational_exceptions) - 6820 sizeof(struct scsi_log_param_header); 6821 data->ie_asc = lun->ie_asc; 6822 data->ie_ascq = lun->ie_ascq; 6823 if ((value = dnvlist_get_string(lun->be_lun->options, "temperature", 6824 NULL)) != NULL) 6825 data->temperature = strtol(value, NULL, 0); 6826 else 6827 data->temperature = 0xff; 6828 return (0); 6829 } 6830 6831 int 6832 ctl_log_sense(struct ctl_scsiio *ctsio) 6833 { 6834 struct ctl_lun *lun = CTL_LUN(ctsio); 6835 int i, pc, page_code, subpage; 6836 int alloc_len, total_len; 6837 struct ctl_page_index *page_index; 6838 struct scsi_log_sense *cdb; 6839 struct scsi_log_header *header; 6840 6841 CTL_DEBUG_PRINT(("ctl_log_sense\n")); 6842 6843 cdb = (struct scsi_log_sense *)ctsio->cdb; 6844 pc = (cdb->page & SLS_PAGE_CTRL_MASK) >> 6; 6845 page_code = cdb->page & SLS_PAGE_CODE; 6846 subpage = cdb->subpage; 6847 alloc_len = scsi_2btoul(cdb->length); 6848 6849 page_index = NULL; 6850 for (i = 0; i < CTL_NUM_LOG_PAGES; i++) { 6851 page_index = &lun->log_pages.index[i]; 6852 6853 /* Look for the right page code */ 6854 if ((page_index->page_code & SL_PAGE_CODE) != page_code) 6855 continue; 6856 6857 /* Look for the right subpage or the subpage wildcard*/ 6858 if (page_index->subpage != subpage) 6859 continue; 6860 6861 break; 6862 } 6863 if (i >= CTL_NUM_LOG_PAGES) { 6864 ctl_set_invalid_field(ctsio, 6865 /*sks_valid*/ 1, 6866 /*command*/ 1, 6867 /*field*/ 2, 6868 /*bit_valid*/ 0, 6869 /*bit*/ 0); 6870 ctl_done((union ctl_io *)ctsio); 6871 return (CTL_RETVAL_COMPLETE); 6872 } 6873 6874 total_len = sizeof(struct scsi_log_header) + page_index->page_len; 6875 6876 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6877 ctsio->kern_sg_entries = 0; 6878 ctsio->kern_rel_offset = 0; 6879 ctsio->kern_data_len = min(total_len, alloc_len); 6880 ctsio->kern_total_len = ctsio->kern_data_len; 6881 6882 header = (struct scsi_log_header *)ctsio->kern_data_ptr; 6883 header->page = page_index->page_code; 6884 if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING) 6885 header->page |= SL_DS; 6886 if (page_index->subpage) { 6887 header->page |= SL_SPF; 6888 header->subpage = page_index->subpage; 6889 } 6890 scsi_ulto2b(page_index->page_len, header->datalen); 6891 6892 /* 6893 * Call the handler, if it exists, to update the 6894 * page to the latest values. 6895 */ 6896 if (page_index->sense_handler != NULL) 6897 page_index->sense_handler(ctsio, page_index, pc); 6898 6899 memcpy(header + 1, page_index->page_data, page_index->page_len); 6900 6901 ctl_set_success(ctsio); 6902 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6903 ctsio->be_move_done = ctl_config_move_done; 6904 ctl_datamove((union ctl_io *)ctsio); 6905 return (CTL_RETVAL_COMPLETE); 6906 } 6907 6908 int 6909 ctl_read_capacity(struct ctl_scsiio *ctsio) 6910 { 6911 struct ctl_lun *lun = CTL_LUN(ctsio); 6912 struct scsi_read_capacity *cdb; 6913 struct scsi_read_capacity_data *data; 6914 uint32_t lba; 6915 6916 CTL_DEBUG_PRINT(("ctl_read_capacity\n")); 6917 6918 cdb = (struct scsi_read_capacity *)ctsio->cdb; 6919 6920 lba = scsi_4btoul(cdb->addr); 6921 if (((cdb->pmi & SRC_PMI) == 0) 6922 && (lba != 0)) { 6923 ctl_set_invalid_field(/*ctsio*/ ctsio, 6924 /*sks_valid*/ 1, 6925 /*command*/ 1, 6926 /*field*/ 2, 6927 /*bit_valid*/ 0, 6928 /*bit*/ 0); 6929 ctl_done((union ctl_io *)ctsio); 6930 return (CTL_RETVAL_COMPLETE); 6931 } 6932 6933 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 6934 data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr; 6935 ctsio->kern_data_len = sizeof(*data); 6936 ctsio->kern_total_len = sizeof(*data); 6937 ctsio->kern_rel_offset = 0; 6938 ctsio->kern_sg_entries = 0; 6939 6940 /* 6941 * If the maximum LBA is greater than 0xfffffffe, the user must 6942 * issue a SERVICE ACTION IN (16) command, with the read capacity 6943 * serivce action set. 6944 */ 6945 if (lun->be_lun->maxlba > 0xfffffffe) 6946 scsi_ulto4b(0xffffffff, data->addr); 6947 else 6948 scsi_ulto4b(lun->be_lun->maxlba, data->addr); 6949 6950 /* 6951 * XXX KDM this may not be 512 bytes... 6952 */ 6953 scsi_ulto4b(lun->be_lun->blocksize, data->length); 6954 6955 ctl_set_success(ctsio); 6956 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6957 ctsio->be_move_done = ctl_config_move_done; 6958 ctl_datamove((union ctl_io *)ctsio); 6959 return (CTL_RETVAL_COMPLETE); 6960 } 6961 6962 int 6963 ctl_read_capacity_16(struct ctl_scsiio *ctsio) 6964 { 6965 struct ctl_lun *lun = CTL_LUN(ctsio); 6966 struct scsi_read_capacity_16 *cdb; 6967 struct scsi_read_capacity_data_long *data; 6968 uint64_t lba; 6969 uint32_t alloc_len; 6970 6971 CTL_DEBUG_PRINT(("ctl_read_capacity_16\n")); 6972 6973 cdb = (struct scsi_read_capacity_16 *)ctsio->cdb; 6974 6975 alloc_len = scsi_4btoul(cdb->alloc_len); 6976 lba = scsi_8btou64(cdb->addr); 6977 6978 if ((cdb->reladr & SRC16_PMI) 6979 && (lba != 0)) { 6980 ctl_set_invalid_field(/*ctsio*/ ctsio, 6981 /*sks_valid*/ 1, 6982 /*command*/ 1, 6983 /*field*/ 2, 6984 /*bit_valid*/ 0, 6985 /*bit*/ 0); 6986 ctl_done((union ctl_io *)ctsio); 6987 return (CTL_RETVAL_COMPLETE); 6988 } 6989 6990 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 6991 data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr; 6992 ctsio->kern_rel_offset = 0; 6993 ctsio->kern_sg_entries = 0; 6994 ctsio->kern_data_len = min(sizeof(*data), alloc_len); 6995 ctsio->kern_total_len = ctsio->kern_data_len; 6996 6997 scsi_u64to8b(lun->be_lun->maxlba, data->addr); 6998 /* XXX KDM this may not be 512 bytes... */ 6999 scsi_ulto4b(lun->be_lun->blocksize, data->length); 7000 data->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE; 7001 scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, data->lalba_lbp); 7002 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) 7003 data->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ; 7004 7005 ctl_set_success(ctsio); 7006 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7007 ctsio->be_move_done = ctl_config_move_done; 7008 ctl_datamove((union ctl_io *)ctsio); 7009 return (CTL_RETVAL_COMPLETE); 7010 } 7011 7012 int 7013 ctl_get_lba_status(struct ctl_scsiio *ctsio) 7014 { 7015 struct ctl_lun *lun = CTL_LUN(ctsio); 7016 struct scsi_get_lba_status *cdb; 7017 struct scsi_get_lba_status_data *data; 7018 struct ctl_lba_len_flags *lbalen; 7019 uint64_t lba; 7020 uint32_t alloc_len, total_len; 7021 int retval; 7022 7023 CTL_DEBUG_PRINT(("ctl_get_lba_status\n")); 7024 7025 cdb = (struct scsi_get_lba_status *)ctsio->cdb; 7026 lba = scsi_8btou64(cdb->addr); 7027 alloc_len = scsi_4btoul(cdb->alloc_len); 7028 7029 if (lba > lun->be_lun->maxlba) { 7030 ctl_set_lba_out_of_range(ctsio, lba); 7031 ctl_done((union ctl_io *)ctsio); 7032 return (CTL_RETVAL_COMPLETE); 7033 } 7034 7035 total_len = sizeof(*data) + sizeof(data->descr[0]); 7036 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7037 data = (struct scsi_get_lba_status_data *)ctsio->kern_data_ptr; 7038 ctsio->kern_rel_offset = 0; 7039 ctsio->kern_sg_entries = 0; 7040 ctsio->kern_data_len = min(total_len, alloc_len); 7041 ctsio->kern_total_len = ctsio->kern_data_len; 7042 7043 /* Fill dummy data in case backend can't tell anything. */ 7044 scsi_ulto4b(4 + sizeof(data->descr[0]), data->length); 7045 scsi_u64to8b(lba, data->descr[0].addr); 7046 scsi_ulto4b(MIN(UINT32_MAX, lun->be_lun->maxlba + 1 - lba), 7047 data->descr[0].length); 7048 data->descr[0].status = 0; /* Mapped or unknown. */ 7049 7050 ctl_set_success(ctsio); 7051 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7052 ctsio->be_move_done = ctl_config_move_done; 7053 7054 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 7055 lbalen->lba = lba; 7056 lbalen->len = total_len; 7057 lbalen->flags = 0; 7058 retval = lun->backend->config_read((union ctl_io *)ctsio); 7059 return (retval); 7060 } 7061 7062 int 7063 ctl_read_defect(struct ctl_scsiio *ctsio) 7064 { 7065 struct scsi_read_defect_data_10 *ccb10; 7066 struct scsi_read_defect_data_12 *ccb12; 7067 struct scsi_read_defect_data_hdr_10 *data10; 7068 struct scsi_read_defect_data_hdr_12 *data12; 7069 uint32_t alloc_len, data_len; 7070 uint8_t format; 7071 7072 CTL_DEBUG_PRINT(("ctl_read_defect\n")); 7073 7074 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 7075 ccb10 = (struct scsi_read_defect_data_10 *)&ctsio->cdb; 7076 format = ccb10->format; 7077 alloc_len = scsi_2btoul(ccb10->alloc_length); 7078 data_len = sizeof(*data10); 7079 } else { 7080 ccb12 = (struct scsi_read_defect_data_12 *)&ctsio->cdb; 7081 format = ccb12->format; 7082 alloc_len = scsi_4btoul(ccb12->alloc_length); 7083 data_len = sizeof(*data12); 7084 } 7085 if (alloc_len == 0) { 7086 ctl_set_success(ctsio); 7087 ctl_done((union ctl_io *)ctsio); 7088 return (CTL_RETVAL_COMPLETE); 7089 } 7090 7091 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 7092 ctsio->kern_rel_offset = 0; 7093 ctsio->kern_sg_entries = 0; 7094 ctsio->kern_data_len = min(data_len, alloc_len); 7095 ctsio->kern_total_len = ctsio->kern_data_len; 7096 7097 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 7098 data10 = (struct scsi_read_defect_data_hdr_10 *) 7099 ctsio->kern_data_ptr; 7100 data10->format = format; 7101 scsi_ulto2b(0, data10->length); 7102 } else { 7103 data12 = (struct scsi_read_defect_data_hdr_12 *) 7104 ctsio->kern_data_ptr; 7105 data12->format = format; 7106 scsi_ulto2b(0, data12->generation); 7107 scsi_ulto4b(0, data12->length); 7108 } 7109 7110 ctl_set_success(ctsio); 7111 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7112 ctsio->be_move_done = ctl_config_move_done; 7113 ctl_datamove((union ctl_io *)ctsio); 7114 return (CTL_RETVAL_COMPLETE); 7115 } 7116 7117 int 7118 ctl_report_ident_info(struct ctl_scsiio *ctsio) 7119 { 7120 struct ctl_lun *lun = CTL_LUN(ctsio); 7121 struct scsi_report_ident_info *cdb; 7122 struct scsi_report_ident_info_data *rii_ptr; 7123 struct scsi_report_ident_info_descr *riid_ptr; 7124 const char *oii, *otii; 7125 int retval, alloc_len, total_len = 0, len = 0; 7126 7127 CTL_DEBUG_PRINT(("ctl_report_ident_info\n")); 7128 7129 cdb = (struct scsi_report_ident_info *)ctsio->cdb; 7130 retval = CTL_RETVAL_COMPLETE; 7131 7132 total_len = sizeof(struct scsi_report_ident_info_data); 7133 switch (cdb->type) { 7134 case RII_LUII: 7135 oii = dnvlist_get_string(lun->be_lun->options, 7136 "ident_info", NULL); 7137 if (oii) 7138 len = strlen(oii); /* Approximately */ 7139 break; 7140 case RII_LUTII: 7141 otii = dnvlist_get_string(lun->be_lun->options, 7142 "text_ident_info", NULL); 7143 if (otii) 7144 len = strlen(otii) + 1; /* NULL-terminated */ 7145 break; 7146 case RII_IIS: 7147 len = 2 * sizeof(struct scsi_report_ident_info_descr); 7148 break; 7149 default: 7150 ctl_set_invalid_field(/*ctsio*/ ctsio, 7151 /*sks_valid*/ 1, 7152 /*command*/ 1, 7153 /*field*/ 11, 7154 /*bit_valid*/ 1, 7155 /*bit*/ 2); 7156 ctl_done((union ctl_io *)ctsio); 7157 return(retval); 7158 } 7159 total_len += len; 7160 alloc_len = scsi_4btoul(cdb->length); 7161 7162 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7163 ctsio->kern_sg_entries = 0; 7164 ctsio->kern_rel_offset = 0; 7165 ctsio->kern_data_len = min(total_len, alloc_len); 7166 ctsio->kern_total_len = ctsio->kern_data_len; 7167 7168 rii_ptr = (struct scsi_report_ident_info_data *)ctsio->kern_data_ptr; 7169 switch (cdb->type) { 7170 case RII_LUII: 7171 if (oii) { 7172 if (oii[0] == '0' && oii[1] == 'x') 7173 len = hex2bin(oii, (uint8_t *)(rii_ptr + 1), len); 7174 else 7175 strncpy((uint8_t *)(rii_ptr + 1), oii, len); 7176 } 7177 break; 7178 case RII_LUTII: 7179 if (otii) 7180 strlcpy((uint8_t *)(rii_ptr + 1), otii, len); 7181 break; 7182 case RII_IIS: 7183 riid_ptr = (struct scsi_report_ident_info_descr *)(rii_ptr + 1); 7184 riid_ptr->type = RII_LUII; 7185 scsi_ulto2b(0xffff, riid_ptr->length); 7186 riid_ptr++; 7187 riid_ptr->type = RII_LUTII; 7188 scsi_ulto2b(0xffff, riid_ptr->length); 7189 } 7190 scsi_ulto2b(len, rii_ptr->length); 7191 7192 ctl_set_success(ctsio); 7193 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7194 ctsio->be_move_done = ctl_config_move_done; 7195 ctl_datamove((union ctl_io *)ctsio); 7196 return(retval); 7197 } 7198 7199 int 7200 ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio) 7201 { 7202 struct ctl_softc *softc = CTL_SOFTC(ctsio); 7203 struct ctl_lun *lun = CTL_LUN(ctsio); 7204 struct scsi_maintenance_in *cdb; 7205 int retval; 7206 int alloc_len, ext, total_len = 0, g, pc, pg, ts, os; 7207 int num_ha_groups, num_target_ports, shared_group; 7208 struct ctl_port *port; 7209 struct scsi_target_group_data *rtg_ptr; 7210 struct scsi_target_group_data_extended *rtg_ext_ptr; 7211 struct scsi_target_port_group_descriptor *tpg_desc; 7212 7213 CTL_DEBUG_PRINT(("ctl_report_tagret_port_groups\n")); 7214 7215 cdb = (struct scsi_maintenance_in *)ctsio->cdb; 7216 retval = CTL_RETVAL_COMPLETE; 7217 7218 switch (cdb->byte2 & STG_PDF_MASK) { 7219 case STG_PDF_LENGTH: 7220 ext = 0; 7221 break; 7222 case STG_PDF_EXTENDED: 7223 ext = 1; 7224 break; 7225 default: 7226 ctl_set_invalid_field(/*ctsio*/ ctsio, 7227 /*sks_valid*/ 1, 7228 /*command*/ 1, 7229 /*field*/ 2, 7230 /*bit_valid*/ 1, 7231 /*bit*/ 5); 7232 ctl_done((union ctl_io *)ctsio); 7233 return(retval); 7234 } 7235 7236 num_target_ports = 0; 7237 shared_group = (softc->is_single != 0); 7238 mtx_lock(&softc->ctl_lock); 7239 STAILQ_FOREACH(port, &softc->port_list, links) { 7240 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7241 continue; 7242 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 7243 continue; 7244 num_target_ports++; 7245 if (port->status & CTL_PORT_STATUS_HA_SHARED) 7246 shared_group = 1; 7247 } 7248 mtx_unlock(&softc->ctl_lock); 7249 num_ha_groups = (softc->is_single) ? 0 : NUM_HA_SHELVES; 7250 7251 if (ext) 7252 total_len = sizeof(struct scsi_target_group_data_extended); 7253 else 7254 total_len = sizeof(struct scsi_target_group_data); 7255 total_len += sizeof(struct scsi_target_port_group_descriptor) * 7256 (shared_group + num_ha_groups) + 7257 sizeof(struct scsi_target_port_descriptor) * num_target_ports; 7258 7259 alloc_len = scsi_4btoul(cdb->length); 7260 7261 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7262 ctsio->kern_sg_entries = 0; 7263 ctsio->kern_rel_offset = 0; 7264 ctsio->kern_data_len = min(total_len, alloc_len); 7265 ctsio->kern_total_len = ctsio->kern_data_len; 7266 7267 if (ext) { 7268 rtg_ext_ptr = (struct scsi_target_group_data_extended *) 7269 ctsio->kern_data_ptr; 7270 scsi_ulto4b(total_len - 4, rtg_ext_ptr->length); 7271 rtg_ext_ptr->format_type = 0x10; 7272 rtg_ext_ptr->implicit_transition_time = 0; 7273 tpg_desc = &rtg_ext_ptr->groups[0]; 7274 } else { 7275 rtg_ptr = (struct scsi_target_group_data *) 7276 ctsio->kern_data_ptr; 7277 scsi_ulto4b(total_len - 4, rtg_ptr->length); 7278 tpg_desc = &rtg_ptr->groups[0]; 7279 } 7280 7281 mtx_lock(&softc->ctl_lock); 7282 pg = softc->port_min / softc->port_cnt; 7283 if (lun->flags & (CTL_LUN_PRIMARY_SC | CTL_LUN_PEER_SC_PRIMARY)) { 7284 /* Some shelf is known to be primary. */ 7285 if (softc->ha_link == CTL_HA_LINK_OFFLINE) 7286 os = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE; 7287 else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) 7288 os = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; 7289 else if (softc->ha_mode == CTL_HA_MODE_ACT_STBY) 7290 os = TPG_ASYMMETRIC_ACCESS_STANDBY; 7291 else 7292 os = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 7293 if (lun->flags & CTL_LUN_PRIMARY_SC) { 7294 ts = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7295 } else { 7296 ts = os; 7297 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7298 } 7299 } else { 7300 /* No known primary shelf. */ 7301 if (softc->ha_link == CTL_HA_LINK_OFFLINE) { 7302 ts = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE; 7303 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7304 } else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) { 7305 ts = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; 7306 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7307 } else { 7308 ts = os = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; 7309 } 7310 } 7311 if (shared_group) { 7312 tpg_desc->pref_state = ts; 7313 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP | 7314 TPG_U_SUP | TPG_T_SUP; 7315 scsi_ulto2b(1, tpg_desc->target_port_group); 7316 tpg_desc->status = TPG_IMPLICIT; 7317 pc = 0; 7318 STAILQ_FOREACH(port, &softc->port_list, links) { 7319 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7320 continue; 7321 if (!softc->is_single && 7322 (port->status & CTL_PORT_STATUS_HA_SHARED) == 0) 7323 continue; 7324 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 7325 continue; 7326 scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc]. 7327 relative_target_port_identifier); 7328 pc++; 7329 } 7330 tpg_desc->target_port_count = pc; 7331 tpg_desc = (struct scsi_target_port_group_descriptor *) 7332 &tpg_desc->descriptors[pc]; 7333 } 7334 for (g = 0; g < num_ha_groups; g++) { 7335 tpg_desc->pref_state = (g == pg) ? ts : os; 7336 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP | 7337 TPG_U_SUP | TPG_T_SUP; 7338 scsi_ulto2b(2 + g, tpg_desc->target_port_group); 7339 tpg_desc->status = TPG_IMPLICIT; 7340 pc = 0; 7341 STAILQ_FOREACH(port, &softc->port_list, links) { 7342 if (port->targ_port < g * softc->port_cnt || 7343 port->targ_port >= (g + 1) * softc->port_cnt) 7344 continue; 7345 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7346 continue; 7347 if (port->status & CTL_PORT_STATUS_HA_SHARED) 7348 continue; 7349 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 7350 continue; 7351 scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc]. 7352 relative_target_port_identifier); 7353 pc++; 7354 } 7355 tpg_desc->target_port_count = pc; 7356 tpg_desc = (struct scsi_target_port_group_descriptor *) 7357 &tpg_desc->descriptors[pc]; 7358 } 7359 mtx_unlock(&softc->ctl_lock); 7360 7361 ctl_set_success(ctsio); 7362 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7363 ctsio->be_move_done = ctl_config_move_done; 7364 ctl_datamove((union ctl_io *)ctsio); 7365 return(retval); 7366 } 7367 7368 int 7369 ctl_report_supported_opcodes(struct ctl_scsiio *ctsio) 7370 { 7371 struct ctl_lun *lun = CTL_LUN(ctsio); 7372 struct scsi_report_supported_opcodes *cdb; 7373 const struct ctl_cmd_entry *entry, *sentry; 7374 struct scsi_report_supported_opcodes_all *all; 7375 struct scsi_report_supported_opcodes_descr *descr; 7376 struct scsi_report_supported_opcodes_one *one; 7377 int retval; 7378 int alloc_len, total_len; 7379 int opcode, service_action, i, j, num; 7380 7381 CTL_DEBUG_PRINT(("ctl_report_supported_opcodes\n")); 7382 7383 cdb = (struct scsi_report_supported_opcodes *)ctsio->cdb; 7384 retval = CTL_RETVAL_COMPLETE; 7385 7386 opcode = cdb->requested_opcode; 7387 service_action = scsi_2btoul(cdb->requested_service_action); 7388 switch (cdb->options & RSO_OPTIONS_MASK) { 7389 case RSO_OPTIONS_ALL: 7390 num = 0; 7391 for (i = 0; i < 256; i++) { 7392 entry = &ctl_cmd_table[i]; 7393 if (entry->flags & CTL_CMD_FLAG_SA5) { 7394 for (j = 0; j < 32; j++) { 7395 sentry = &((const struct ctl_cmd_entry *) 7396 entry->execute)[j]; 7397 if (ctl_cmd_applicable( 7398 lun->be_lun->lun_type, sentry)) 7399 num++; 7400 } 7401 } else { 7402 if (ctl_cmd_applicable(lun->be_lun->lun_type, 7403 entry)) 7404 num++; 7405 } 7406 } 7407 total_len = sizeof(struct scsi_report_supported_opcodes_all) + 7408 num * sizeof(struct scsi_report_supported_opcodes_descr); 7409 break; 7410 case RSO_OPTIONS_OC: 7411 if (ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) { 7412 ctl_set_invalid_field(/*ctsio*/ ctsio, 7413 /*sks_valid*/ 1, 7414 /*command*/ 1, 7415 /*field*/ 2, 7416 /*bit_valid*/ 1, 7417 /*bit*/ 2); 7418 ctl_done((union ctl_io *)ctsio); 7419 return (CTL_RETVAL_COMPLETE); 7420 } 7421 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7422 break; 7423 case RSO_OPTIONS_OC_SA: 7424 if ((ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) == 0 || 7425 service_action >= 32) { 7426 ctl_set_invalid_field(/*ctsio*/ ctsio, 7427 /*sks_valid*/ 1, 7428 /*command*/ 1, 7429 /*field*/ 2, 7430 /*bit_valid*/ 1, 7431 /*bit*/ 2); 7432 ctl_done((union ctl_io *)ctsio); 7433 return (CTL_RETVAL_COMPLETE); 7434 } 7435 /* FALLTHROUGH */ 7436 case RSO_OPTIONS_OC_ASA: 7437 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7438 break; 7439 default: 7440 ctl_set_invalid_field(/*ctsio*/ ctsio, 7441 /*sks_valid*/ 1, 7442 /*command*/ 1, 7443 /*field*/ 2, 7444 /*bit_valid*/ 1, 7445 /*bit*/ 2); 7446 ctl_done((union ctl_io *)ctsio); 7447 return (CTL_RETVAL_COMPLETE); 7448 } 7449 7450 alloc_len = scsi_4btoul(cdb->length); 7451 7452 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7453 ctsio->kern_sg_entries = 0; 7454 ctsio->kern_rel_offset = 0; 7455 ctsio->kern_data_len = min(total_len, alloc_len); 7456 ctsio->kern_total_len = ctsio->kern_data_len; 7457 7458 switch (cdb->options & RSO_OPTIONS_MASK) { 7459 case RSO_OPTIONS_ALL: 7460 all = (struct scsi_report_supported_opcodes_all *) 7461 ctsio->kern_data_ptr; 7462 num = 0; 7463 for (i = 0; i < 256; i++) { 7464 entry = &ctl_cmd_table[i]; 7465 if (entry->flags & CTL_CMD_FLAG_SA5) { 7466 for (j = 0; j < 32; j++) { 7467 sentry = &((const struct ctl_cmd_entry *) 7468 entry->execute)[j]; 7469 if (!ctl_cmd_applicable( 7470 lun->be_lun->lun_type, sentry)) 7471 continue; 7472 descr = &all->descr[num++]; 7473 descr->opcode = i; 7474 scsi_ulto2b(j, descr->service_action); 7475 descr->flags = RSO_SERVACTV; 7476 scsi_ulto2b(sentry->length, 7477 descr->cdb_length); 7478 } 7479 } else { 7480 if (!ctl_cmd_applicable(lun->be_lun->lun_type, 7481 entry)) 7482 continue; 7483 descr = &all->descr[num++]; 7484 descr->opcode = i; 7485 scsi_ulto2b(0, descr->service_action); 7486 descr->flags = 0; 7487 scsi_ulto2b(entry->length, descr->cdb_length); 7488 } 7489 } 7490 scsi_ulto4b( 7491 num * sizeof(struct scsi_report_supported_opcodes_descr), 7492 all->length); 7493 break; 7494 case RSO_OPTIONS_OC: 7495 one = (struct scsi_report_supported_opcodes_one *) 7496 ctsio->kern_data_ptr; 7497 entry = &ctl_cmd_table[opcode]; 7498 goto fill_one; 7499 case RSO_OPTIONS_OC_SA: 7500 one = (struct scsi_report_supported_opcodes_one *) 7501 ctsio->kern_data_ptr; 7502 entry = &ctl_cmd_table[opcode]; 7503 entry = &((const struct ctl_cmd_entry *) 7504 entry->execute)[service_action]; 7505 fill_one: 7506 if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 7507 one->support = 3; 7508 scsi_ulto2b(entry->length, one->cdb_length); 7509 one->cdb_usage[0] = opcode; 7510 memcpy(&one->cdb_usage[1], entry->usage, 7511 entry->length - 1); 7512 } else 7513 one->support = 1; 7514 break; 7515 case RSO_OPTIONS_OC_ASA: 7516 one = (struct scsi_report_supported_opcodes_one *) 7517 ctsio->kern_data_ptr; 7518 entry = &ctl_cmd_table[opcode]; 7519 if (entry->flags & CTL_CMD_FLAG_SA5) { 7520 entry = &((const struct ctl_cmd_entry *) 7521 entry->execute)[service_action]; 7522 } else if (service_action != 0) { 7523 one->support = 1; 7524 break; 7525 } 7526 goto fill_one; 7527 } 7528 7529 ctl_set_success(ctsio); 7530 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7531 ctsio->be_move_done = ctl_config_move_done; 7532 ctl_datamove((union ctl_io *)ctsio); 7533 return(retval); 7534 } 7535 7536 int 7537 ctl_report_supported_tmf(struct ctl_scsiio *ctsio) 7538 { 7539 struct scsi_report_supported_tmf *cdb; 7540 struct scsi_report_supported_tmf_ext_data *data; 7541 int retval; 7542 int alloc_len, total_len; 7543 7544 CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n")); 7545 7546 cdb = (struct scsi_report_supported_tmf *)ctsio->cdb; 7547 7548 retval = CTL_RETVAL_COMPLETE; 7549 7550 if (cdb->options & RST_REPD) 7551 total_len = sizeof(struct scsi_report_supported_tmf_ext_data); 7552 else 7553 total_len = sizeof(struct scsi_report_supported_tmf_data); 7554 alloc_len = scsi_4btoul(cdb->length); 7555 7556 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7557 ctsio->kern_sg_entries = 0; 7558 ctsio->kern_rel_offset = 0; 7559 ctsio->kern_data_len = min(total_len, alloc_len); 7560 ctsio->kern_total_len = ctsio->kern_data_len; 7561 7562 data = (struct scsi_report_supported_tmf_ext_data *)ctsio->kern_data_ptr; 7563 data->byte1 |= RST_ATS | RST_ATSS | RST_CTSS | RST_LURS | RST_QTS | 7564 RST_TRS; 7565 data->byte2 |= RST_QAES | RST_QTSS | RST_ITNRS; 7566 data->length = total_len - 4; 7567 7568 ctl_set_success(ctsio); 7569 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7570 ctsio->be_move_done = ctl_config_move_done; 7571 ctl_datamove((union ctl_io *)ctsio); 7572 return (retval); 7573 } 7574 7575 int 7576 ctl_report_timestamp(struct ctl_scsiio *ctsio) 7577 { 7578 struct scsi_report_timestamp *cdb; 7579 struct scsi_report_timestamp_data *data; 7580 struct timeval tv; 7581 int64_t timestamp; 7582 int retval; 7583 int alloc_len, total_len; 7584 7585 CTL_DEBUG_PRINT(("ctl_report_timestamp\n")); 7586 7587 cdb = (struct scsi_report_timestamp *)ctsio->cdb; 7588 7589 retval = CTL_RETVAL_COMPLETE; 7590 7591 total_len = sizeof(struct scsi_report_timestamp_data); 7592 alloc_len = scsi_4btoul(cdb->length); 7593 7594 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7595 ctsio->kern_sg_entries = 0; 7596 ctsio->kern_rel_offset = 0; 7597 ctsio->kern_data_len = min(total_len, alloc_len); 7598 ctsio->kern_total_len = ctsio->kern_data_len; 7599 7600 data = (struct scsi_report_timestamp_data *)ctsio->kern_data_ptr; 7601 scsi_ulto2b(sizeof(*data) - 2, data->length); 7602 data->origin = RTS_ORIG_OUTSIDE; 7603 getmicrotime(&tv); 7604 timestamp = (int64_t)tv.tv_sec * 1000 + tv.tv_usec / 1000; 7605 scsi_ulto4b(timestamp >> 16, data->timestamp); 7606 scsi_ulto2b(timestamp & 0xffff, &data->timestamp[4]); 7607 7608 ctl_set_success(ctsio); 7609 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7610 ctsio->be_move_done = ctl_config_move_done; 7611 ctl_datamove((union ctl_io *)ctsio); 7612 return (retval); 7613 } 7614 7615 int 7616 ctl_persistent_reserve_in(struct ctl_scsiio *ctsio) 7617 { 7618 struct ctl_softc *softc = CTL_SOFTC(ctsio); 7619 struct ctl_lun *lun = CTL_LUN(ctsio); 7620 struct scsi_per_res_in *cdb; 7621 int alloc_len, total_len = 0; 7622 /* struct scsi_per_res_in_rsrv in_data; */ 7623 uint64_t key; 7624 7625 CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n")); 7626 7627 cdb = (struct scsi_per_res_in *)ctsio->cdb; 7628 7629 alloc_len = scsi_2btoul(cdb->length); 7630 7631 retry: 7632 mtx_lock(&lun->lun_lock); 7633 switch (cdb->action) { 7634 case SPRI_RK: /* read keys */ 7635 total_len = sizeof(struct scsi_per_res_in_keys) + 7636 lun->pr_key_count * 7637 sizeof(struct scsi_per_res_key); 7638 break; 7639 case SPRI_RR: /* read reservation */ 7640 if (lun->flags & CTL_LUN_PR_RESERVED) 7641 total_len = sizeof(struct scsi_per_res_in_rsrv); 7642 else 7643 total_len = sizeof(struct scsi_per_res_in_header); 7644 break; 7645 case SPRI_RC: /* report capabilities */ 7646 total_len = sizeof(struct scsi_per_res_cap); 7647 break; 7648 case SPRI_RS: /* read full status */ 7649 total_len = sizeof(struct scsi_per_res_in_header) + 7650 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7651 lun->pr_key_count; 7652 break; 7653 default: 7654 panic("%s: Invalid PR type %#x", __func__, cdb->action); 7655 } 7656 mtx_unlock(&lun->lun_lock); 7657 7658 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7659 ctsio->kern_rel_offset = 0; 7660 ctsio->kern_sg_entries = 0; 7661 ctsio->kern_data_len = min(total_len, alloc_len); 7662 ctsio->kern_total_len = ctsio->kern_data_len; 7663 7664 mtx_lock(&lun->lun_lock); 7665 switch (cdb->action) { 7666 case SPRI_RK: { // read keys 7667 struct scsi_per_res_in_keys *res_keys; 7668 int i, key_count; 7669 7670 res_keys = (struct scsi_per_res_in_keys*)ctsio->kern_data_ptr; 7671 7672 /* 7673 * We had to drop the lock to allocate our buffer, which 7674 * leaves time for someone to come in with another 7675 * persistent reservation. (That is unlikely, though, 7676 * since this should be the only persistent reservation 7677 * command active right now.) 7678 */ 7679 if (total_len != (sizeof(struct scsi_per_res_in_keys) + 7680 (lun->pr_key_count * 7681 sizeof(struct scsi_per_res_key)))){ 7682 mtx_unlock(&lun->lun_lock); 7683 free(ctsio->kern_data_ptr, M_CTL); 7684 printf("%s: reservation length changed, retrying\n", 7685 __func__); 7686 goto retry; 7687 } 7688 7689 scsi_ulto4b(lun->pr_generation, res_keys->header.generation); 7690 7691 scsi_ulto4b(sizeof(struct scsi_per_res_key) * 7692 lun->pr_key_count, res_keys->header.length); 7693 7694 for (i = 0, key_count = 0; i < CTL_MAX_INITIATORS; i++) { 7695 if ((key = ctl_get_prkey(lun, i)) == 0) 7696 continue; 7697 7698 /* 7699 * We used lun->pr_key_count to calculate the 7700 * size to allocate. If it turns out the number of 7701 * initiators with the registered flag set is 7702 * larger than that (i.e. they haven't been kept in 7703 * sync), we've got a problem. 7704 */ 7705 if (key_count >= lun->pr_key_count) { 7706 key_count++; 7707 continue; 7708 } 7709 scsi_u64to8b(key, res_keys->keys[key_count].key); 7710 key_count++; 7711 } 7712 break; 7713 } 7714 case SPRI_RR: { // read reservation 7715 struct scsi_per_res_in_rsrv *res; 7716 int tmp_len, header_only; 7717 7718 res = (struct scsi_per_res_in_rsrv *)ctsio->kern_data_ptr; 7719 7720 scsi_ulto4b(lun->pr_generation, res->header.generation); 7721 7722 if (lun->flags & CTL_LUN_PR_RESERVED) 7723 { 7724 tmp_len = sizeof(struct scsi_per_res_in_rsrv); 7725 scsi_ulto4b(sizeof(struct scsi_per_res_in_rsrv_data), 7726 res->header.length); 7727 header_only = 0; 7728 } else { 7729 tmp_len = sizeof(struct scsi_per_res_in_header); 7730 scsi_ulto4b(0, res->header.length); 7731 header_only = 1; 7732 } 7733 7734 /* 7735 * We had to drop the lock to allocate our buffer, which 7736 * leaves time for someone to come in with another 7737 * persistent reservation. (That is unlikely, though, 7738 * since this should be the only persistent reservation 7739 * command active right now.) 7740 */ 7741 if (tmp_len != total_len) { 7742 mtx_unlock(&lun->lun_lock); 7743 free(ctsio->kern_data_ptr, M_CTL); 7744 printf("%s: reservation status changed, retrying\n", 7745 __func__); 7746 goto retry; 7747 } 7748 7749 /* 7750 * No reservation held, so we're done. 7751 */ 7752 if (header_only != 0) 7753 break; 7754 7755 /* 7756 * If the registration is an All Registrants type, the key 7757 * is 0, since it doesn't really matter. 7758 */ 7759 if (lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 7760 scsi_u64to8b(ctl_get_prkey(lun, lun->pr_res_idx), 7761 res->data.reservation); 7762 } 7763 res->data.scopetype = lun->pr_res_type; 7764 break; 7765 } 7766 case SPRI_RC: //report capabilities 7767 { 7768 struct scsi_per_res_cap *res_cap; 7769 uint16_t type_mask; 7770 7771 res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr; 7772 scsi_ulto2b(sizeof(*res_cap), res_cap->length); 7773 res_cap->flags1 = SPRI_CRH; 7774 res_cap->flags2 = SPRI_TMV | SPRI_ALLOW_5; 7775 type_mask = SPRI_TM_WR_EX_AR | 7776 SPRI_TM_EX_AC_RO | 7777 SPRI_TM_WR_EX_RO | 7778 SPRI_TM_EX_AC | 7779 SPRI_TM_WR_EX | 7780 SPRI_TM_EX_AC_AR; 7781 scsi_ulto2b(type_mask, res_cap->type_mask); 7782 break; 7783 } 7784 case SPRI_RS: { // read full status 7785 struct scsi_per_res_in_full *res_status; 7786 struct scsi_per_res_in_full_desc *res_desc; 7787 struct ctl_port *port; 7788 int i, len; 7789 7790 res_status = (struct scsi_per_res_in_full*)ctsio->kern_data_ptr; 7791 7792 /* 7793 * We had to drop the lock to allocate our buffer, which 7794 * leaves time for someone to come in with another 7795 * persistent reservation. (That is unlikely, though, 7796 * since this should be the only persistent reservation 7797 * command active right now.) 7798 */ 7799 if (total_len < (sizeof(struct scsi_per_res_in_header) + 7800 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7801 lun->pr_key_count)){ 7802 mtx_unlock(&lun->lun_lock); 7803 free(ctsio->kern_data_ptr, M_CTL); 7804 printf("%s: reservation length changed, retrying\n", 7805 __func__); 7806 goto retry; 7807 } 7808 7809 scsi_ulto4b(lun->pr_generation, res_status->header.generation); 7810 7811 res_desc = &res_status->desc[0]; 7812 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 7813 if ((key = ctl_get_prkey(lun, i)) == 0) 7814 continue; 7815 7816 scsi_u64to8b(key, res_desc->res_key.key); 7817 if ((lun->flags & CTL_LUN_PR_RESERVED) && 7818 (lun->pr_res_idx == i || 7819 lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS)) { 7820 res_desc->flags = SPRI_FULL_R_HOLDER; 7821 res_desc->scopetype = lun->pr_res_type; 7822 } 7823 scsi_ulto2b(i / CTL_MAX_INIT_PER_PORT, 7824 res_desc->rel_trgt_port_id); 7825 len = 0; 7826 port = softc->ctl_ports[i / CTL_MAX_INIT_PER_PORT]; 7827 if (port != NULL) 7828 len = ctl_create_iid(port, 7829 i % CTL_MAX_INIT_PER_PORT, 7830 res_desc->transport_id); 7831 scsi_ulto4b(len, res_desc->additional_length); 7832 res_desc = (struct scsi_per_res_in_full_desc *) 7833 &res_desc->transport_id[len]; 7834 } 7835 scsi_ulto4b((uint8_t *)res_desc - (uint8_t *)&res_status->desc[0], 7836 res_status->header.length); 7837 break; 7838 } 7839 default: 7840 panic("%s: Invalid PR type %#x", __func__, cdb->action); 7841 } 7842 mtx_unlock(&lun->lun_lock); 7843 7844 ctl_set_success(ctsio); 7845 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7846 ctsio->be_move_done = ctl_config_move_done; 7847 ctl_datamove((union ctl_io *)ctsio); 7848 return (CTL_RETVAL_COMPLETE); 7849 } 7850 7851 /* 7852 * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if 7853 * it should return. 7854 */ 7855 static int 7856 ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key, 7857 uint64_t sa_res_key, uint8_t type, uint32_t residx, 7858 struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb, 7859 struct scsi_per_res_out_parms* param) 7860 { 7861 union ctl_ha_msg persis_io; 7862 int i; 7863 7864 mtx_lock(&lun->lun_lock); 7865 if (sa_res_key == 0) { 7866 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 7867 /* validate scope and type */ 7868 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7869 SPR_LU_SCOPE) { 7870 mtx_unlock(&lun->lun_lock); 7871 ctl_set_invalid_field(/*ctsio*/ ctsio, 7872 /*sks_valid*/ 1, 7873 /*command*/ 1, 7874 /*field*/ 2, 7875 /*bit_valid*/ 1, 7876 /*bit*/ 4); 7877 ctl_done((union ctl_io *)ctsio); 7878 return (1); 7879 } 7880 7881 if (type>8 || type==2 || type==4 || type==0) { 7882 mtx_unlock(&lun->lun_lock); 7883 ctl_set_invalid_field(/*ctsio*/ ctsio, 7884 /*sks_valid*/ 1, 7885 /*command*/ 1, 7886 /*field*/ 2, 7887 /*bit_valid*/ 1, 7888 /*bit*/ 0); 7889 ctl_done((union ctl_io *)ctsio); 7890 return (1); 7891 } 7892 7893 /* 7894 * Unregister everybody else and build UA for 7895 * them 7896 */ 7897 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 7898 if (i == residx || ctl_get_prkey(lun, i) == 0) 7899 continue; 7900 7901 ctl_clr_prkey(lun, i); 7902 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7903 } 7904 lun->pr_key_count = 1; 7905 lun->pr_res_type = type; 7906 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 7907 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 7908 lun->pr_res_idx = residx; 7909 lun->pr_generation++; 7910 mtx_unlock(&lun->lun_lock); 7911 7912 /* send msg to other side */ 7913 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7914 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7915 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7916 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7917 persis_io.pr.pr_info.res_type = type; 7918 memcpy(persis_io.pr.pr_info.sa_res_key, 7919 param->serv_act_res_key, 7920 sizeof(param->serv_act_res_key)); 7921 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 7922 sizeof(persis_io.pr), M_WAITOK); 7923 } else { 7924 /* not all registrants */ 7925 mtx_unlock(&lun->lun_lock); 7926 free(ctsio->kern_data_ptr, M_CTL); 7927 ctl_set_invalid_field(ctsio, 7928 /*sks_valid*/ 1, 7929 /*command*/ 0, 7930 /*field*/ 8, 7931 /*bit_valid*/ 0, 7932 /*bit*/ 0); 7933 ctl_done((union ctl_io *)ctsio); 7934 return (1); 7935 } 7936 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 7937 || !(lun->flags & CTL_LUN_PR_RESERVED)) { 7938 int found = 0; 7939 7940 if (res_key == sa_res_key) { 7941 /* special case */ 7942 /* 7943 * The spec implies this is not good but doesn't 7944 * say what to do. There are two choices either 7945 * generate a res conflict or check condition 7946 * with illegal field in parameter data. Since 7947 * that is what is done when the sa_res_key is 7948 * zero I'll take that approach since this has 7949 * to do with the sa_res_key. 7950 */ 7951 mtx_unlock(&lun->lun_lock); 7952 free(ctsio->kern_data_ptr, M_CTL); 7953 ctl_set_invalid_field(ctsio, 7954 /*sks_valid*/ 1, 7955 /*command*/ 0, 7956 /*field*/ 8, 7957 /*bit_valid*/ 0, 7958 /*bit*/ 0); 7959 ctl_done((union ctl_io *)ctsio); 7960 return (1); 7961 } 7962 7963 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 7964 if (ctl_get_prkey(lun, i) != sa_res_key) 7965 continue; 7966 7967 found = 1; 7968 ctl_clr_prkey(lun, i); 7969 lun->pr_key_count--; 7970 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7971 } 7972 if (!found) { 7973 mtx_unlock(&lun->lun_lock); 7974 free(ctsio->kern_data_ptr, M_CTL); 7975 ctl_set_reservation_conflict(ctsio); 7976 ctl_done((union ctl_io *)ctsio); 7977 return (CTL_RETVAL_COMPLETE); 7978 } 7979 lun->pr_generation++; 7980 mtx_unlock(&lun->lun_lock); 7981 7982 /* send msg to other side */ 7983 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7984 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7985 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7986 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7987 persis_io.pr.pr_info.res_type = type; 7988 memcpy(persis_io.pr.pr_info.sa_res_key, 7989 param->serv_act_res_key, 7990 sizeof(param->serv_act_res_key)); 7991 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 7992 sizeof(persis_io.pr), M_WAITOK); 7993 } else { 7994 /* Reserved but not all registrants */ 7995 /* sa_res_key is res holder */ 7996 if (sa_res_key == ctl_get_prkey(lun, lun->pr_res_idx)) { 7997 /* validate scope and type */ 7998 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7999 SPR_LU_SCOPE) { 8000 mtx_unlock(&lun->lun_lock); 8001 ctl_set_invalid_field(/*ctsio*/ ctsio, 8002 /*sks_valid*/ 1, 8003 /*command*/ 1, 8004 /*field*/ 2, 8005 /*bit_valid*/ 1, 8006 /*bit*/ 4); 8007 ctl_done((union ctl_io *)ctsio); 8008 return (1); 8009 } 8010 8011 if (type>8 || type==2 || type==4 || type==0) { 8012 mtx_unlock(&lun->lun_lock); 8013 ctl_set_invalid_field(/*ctsio*/ ctsio, 8014 /*sks_valid*/ 1, 8015 /*command*/ 1, 8016 /*field*/ 2, 8017 /*bit_valid*/ 1, 8018 /*bit*/ 0); 8019 ctl_done((union ctl_io *)ctsio); 8020 return (1); 8021 } 8022 8023 /* 8024 * Do the following: 8025 * if sa_res_key != res_key remove all 8026 * registrants w/sa_res_key and generate UA 8027 * for these registrants(Registrations 8028 * Preempted) if it wasn't an exclusive 8029 * reservation generate UA(Reservations 8030 * Preempted) for all other registered nexuses 8031 * if the type has changed. Establish the new 8032 * reservation and holder. If res_key and 8033 * sa_res_key are the same do the above 8034 * except don't unregister the res holder. 8035 */ 8036 8037 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 8038 if (i == residx || ctl_get_prkey(lun, i) == 0) 8039 continue; 8040 8041 if (sa_res_key == ctl_get_prkey(lun, i)) { 8042 ctl_clr_prkey(lun, i); 8043 lun->pr_key_count--; 8044 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8045 } else if (type != lun->pr_res_type && 8046 (lun->pr_res_type == SPR_TYPE_WR_EX_RO || 8047 lun->pr_res_type == SPR_TYPE_EX_AC_RO)) { 8048 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8049 } 8050 } 8051 lun->pr_res_type = type; 8052 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 8053 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 8054 lun->pr_res_idx = residx; 8055 else 8056 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8057 lun->pr_generation++; 8058 mtx_unlock(&lun->lun_lock); 8059 8060 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8061 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8062 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8063 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8064 persis_io.pr.pr_info.res_type = type; 8065 memcpy(persis_io.pr.pr_info.sa_res_key, 8066 param->serv_act_res_key, 8067 sizeof(param->serv_act_res_key)); 8068 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8069 sizeof(persis_io.pr), M_WAITOK); 8070 } else { 8071 /* 8072 * sa_res_key is not the res holder just 8073 * remove registrants 8074 */ 8075 int found=0; 8076 8077 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8078 if (sa_res_key != ctl_get_prkey(lun, i)) 8079 continue; 8080 8081 found = 1; 8082 ctl_clr_prkey(lun, i); 8083 lun->pr_key_count--; 8084 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8085 } 8086 8087 if (!found) { 8088 mtx_unlock(&lun->lun_lock); 8089 free(ctsio->kern_data_ptr, M_CTL); 8090 ctl_set_reservation_conflict(ctsio); 8091 ctl_done((union ctl_io *)ctsio); 8092 return (1); 8093 } 8094 lun->pr_generation++; 8095 mtx_unlock(&lun->lun_lock); 8096 8097 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8098 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8099 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8100 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8101 persis_io.pr.pr_info.res_type = type; 8102 memcpy(persis_io.pr.pr_info.sa_res_key, 8103 param->serv_act_res_key, 8104 sizeof(param->serv_act_res_key)); 8105 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8106 sizeof(persis_io.pr), M_WAITOK); 8107 } 8108 } 8109 return (0); 8110 } 8111 8112 static void 8113 ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg) 8114 { 8115 uint64_t sa_res_key; 8116 int i; 8117 8118 sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key); 8119 8120 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 8121 || lun->pr_res_idx == CTL_PR_NO_RESERVATION 8122 || sa_res_key != ctl_get_prkey(lun, lun->pr_res_idx)) { 8123 if (sa_res_key == 0) { 8124 /* 8125 * Unregister everybody else and build UA for 8126 * them 8127 */ 8128 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 8129 if (i == msg->pr.pr_info.residx || 8130 ctl_get_prkey(lun, i) == 0) 8131 continue; 8132 8133 ctl_clr_prkey(lun, i); 8134 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8135 } 8136 8137 lun->pr_key_count = 1; 8138 lun->pr_res_type = msg->pr.pr_info.res_type; 8139 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 8140 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 8141 lun->pr_res_idx = msg->pr.pr_info.residx; 8142 } else { 8143 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8144 if (sa_res_key == ctl_get_prkey(lun, i)) 8145 continue; 8146 8147 ctl_clr_prkey(lun, i); 8148 lun->pr_key_count--; 8149 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8150 } 8151 } 8152 } else { 8153 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8154 if (i == msg->pr.pr_info.residx || 8155 ctl_get_prkey(lun, i) == 0) 8156 continue; 8157 8158 if (sa_res_key == ctl_get_prkey(lun, i)) { 8159 ctl_clr_prkey(lun, i); 8160 lun->pr_key_count--; 8161 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8162 } else if (msg->pr.pr_info.res_type != lun->pr_res_type 8163 && (lun->pr_res_type == SPR_TYPE_WR_EX_RO || 8164 lun->pr_res_type == SPR_TYPE_EX_AC_RO)) { 8165 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8166 } 8167 } 8168 lun->pr_res_type = msg->pr.pr_info.res_type; 8169 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 8170 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 8171 lun->pr_res_idx = msg->pr.pr_info.residx; 8172 else 8173 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8174 } 8175 lun->pr_generation++; 8176 8177 } 8178 8179 int 8180 ctl_persistent_reserve_out(struct ctl_scsiio *ctsio) 8181 { 8182 struct ctl_softc *softc = CTL_SOFTC(ctsio); 8183 struct ctl_lun *lun = CTL_LUN(ctsio); 8184 int retval; 8185 u_int32_t param_len; 8186 struct scsi_per_res_out *cdb; 8187 struct scsi_per_res_out_parms* param; 8188 uint32_t residx; 8189 uint64_t res_key, sa_res_key, key; 8190 uint8_t type; 8191 union ctl_ha_msg persis_io; 8192 int i; 8193 8194 CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n")); 8195 8196 cdb = (struct scsi_per_res_out *)ctsio->cdb; 8197 retval = CTL_RETVAL_COMPLETE; 8198 8199 /* 8200 * We only support whole-LUN scope. The scope & type are ignored for 8201 * register, register and ignore existing key and clear. 8202 * We sometimes ignore scope and type on preempts too!! 8203 * Verify reservation type here as well. 8204 */ 8205 type = cdb->scope_type & SPR_TYPE_MASK; 8206 if ((cdb->action == SPRO_RESERVE) 8207 || (cdb->action == SPRO_RELEASE)) { 8208 if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { 8209 ctl_set_invalid_field(/*ctsio*/ ctsio, 8210 /*sks_valid*/ 1, 8211 /*command*/ 1, 8212 /*field*/ 2, 8213 /*bit_valid*/ 1, 8214 /*bit*/ 4); 8215 ctl_done((union ctl_io *)ctsio); 8216 return (CTL_RETVAL_COMPLETE); 8217 } 8218 8219 if (type>8 || type==2 || type==4 || type==0) { 8220 ctl_set_invalid_field(/*ctsio*/ ctsio, 8221 /*sks_valid*/ 1, 8222 /*command*/ 1, 8223 /*field*/ 2, 8224 /*bit_valid*/ 1, 8225 /*bit*/ 0); 8226 ctl_done((union ctl_io *)ctsio); 8227 return (CTL_RETVAL_COMPLETE); 8228 } 8229 } 8230 8231 param_len = scsi_4btoul(cdb->length); 8232 8233 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 8234 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 8235 ctsio->kern_data_len = param_len; 8236 ctsio->kern_total_len = param_len; 8237 ctsio->kern_rel_offset = 0; 8238 ctsio->kern_sg_entries = 0; 8239 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 8240 ctsio->be_move_done = ctl_config_move_done; 8241 ctl_datamove((union ctl_io *)ctsio); 8242 8243 return (CTL_RETVAL_COMPLETE); 8244 } 8245 8246 param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr; 8247 8248 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 8249 res_key = scsi_8btou64(param->res_key.key); 8250 sa_res_key = scsi_8btou64(param->serv_act_res_key); 8251 8252 /* 8253 * Validate the reservation key here except for SPRO_REG_IGNO 8254 * This must be done for all other service actions 8255 */ 8256 if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) { 8257 mtx_lock(&lun->lun_lock); 8258 if ((key = ctl_get_prkey(lun, residx)) != 0) { 8259 if (res_key != key) { 8260 /* 8261 * The current key passed in doesn't match 8262 * the one the initiator previously 8263 * registered. 8264 */ 8265 mtx_unlock(&lun->lun_lock); 8266 free(ctsio->kern_data_ptr, M_CTL); 8267 ctl_set_reservation_conflict(ctsio); 8268 ctl_done((union ctl_io *)ctsio); 8269 return (CTL_RETVAL_COMPLETE); 8270 } 8271 } else if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REGISTER) { 8272 /* 8273 * We are not registered 8274 */ 8275 mtx_unlock(&lun->lun_lock); 8276 free(ctsio->kern_data_ptr, M_CTL); 8277 ctl_set_reservation_conflict(ctsio); 8278 ctl_done((union ctl_io *)ctsio); 8279 return (CTL_RETVAL_COMPLETE); 8280 } else if (res_key != 0) { 8281 /* 8282 * We are not registered and trying to register but 8283 * the register key isn't zero. 8284 */ 8285 mtx_unlock(&lun->lun_lock); 8286 free(ctsio->kern_data_ptr, M_CTL); 8287 ctl_set_reservation_conflict(ctsio); 8288 ctl_done((union ctl_io *)ctsio); 8289 return (CTL_RETVAL_COMPLETE); 8290 } 8291 mtx_unlock(&lun->lun_lock); 8292 } 8293 8294 switch (cdb->action & SPRO_ACTION_MASK) { 8295 case SPRO_REGISTER: 8296 case SPRO_REG_IGNO: { 8297 /* 8298 * We don't support any of these options, as we report in 8299 * the read capabilities request (see 8300 * ctl_persistent_reserve_in(), above). 8301 */ 8302 if ((param->flags & SPR_SPEC_I_PT) 8303 || (param->flags & SPR_ALL_TG_PT) 8304 || (param->flags & SPR_APTPL)) { 8305 int bit_ptr; 8306 8307 if (param->flags & SPR_APTPL) 8308 bit_ptr = 0; 8309 else if (param->flags & SPR_ALL_TG_PT) 8310 bit_ptr = 2; 8311 else /* SPR_SPEC_I_PT */ 8312 bit_ptr = 3; 8313 8314 free(ctsio->kern_data_ptr, M_CTL); 8315 ctl_set_invalid_field(ctsio, 8316 /*sks_valid*/ 1, 8317 /*command*/ 0, 8318 /*field*/ 20, 8319 /*bit_valid*/ 1, 8320 /*bit*/ bit_ptr); 8321 ctl_done((union ctl_io *)ctsio); 8322 return (CTL_RETVAL_COMPLETE); 8323 } 8324 8325 mtx_lock(&lun->lun_lock); 8326 8327 /* 8328 * The initiator wants to clear the 8329 * key/unregister. 8330 */ 8331 if (sa_res_key == 0) { 8332 if ((res_key == 0 8333 && (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER) 8334 || ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO 8335 && ctl_get_prkey(lun, residx) == 0)) { 8336 mtx_unlock(&lun->lun_lock); 8337 goto done; 8338 } 8339 8340 ctl_clr_prkey(lun, residx); 8341 lun->pr_key_count--; 8342 8343 if (residx == lun->pr_res_idx) { 8344 lun->flags &= ~CTL_LUN_PR_RESERVED; 8345 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8346 8347 if ((lun->pr_res_type == SPR_TYPE_WR_EX_RO || 8348 lun->pr_res_type == SPR_TYPE_EX_AC_RO) && 8349 lun->pr_key_count) { 8350 /* 8351 * If the reservation is a registrants 8352 * only type we need to generate a UA 8353 * for other registered inits. The 8354 * sense code should be RESERVATIONS 8355 * RELEASED 8356 */ 8357 8358 for (i = softc->init_min; i < softc->init_max; i++){ 8359 if (ctl_get_prkey(lun, i) == 0) 8360 continue; 8361 ctl_est_ua(lun, i, 8362 CTL_UA_RES_RELEASE); 8363 } 8364 } 8365 lun->pr_res_type = 0; 8366 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8367 if (lun->pr_key_count==0) { 8368 lun->flags &= ~CTL_LUN_PR_RESERVED; 8369 lun->pr_res_type = 0; 8370 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8371 } 8372 } 8373 lun->pr_generation++; 8374 mtx_unlock(&lun->lun_lock); 8375 8376 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8377 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8378 persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY; 8379 persis_io.pr.pr_info.residx = residx; 8380 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8381 sizeof(persis_io.pr), M_WAITOK); 8382 } else /* sa_res_key != 0 */ { 8383 /* 8384 * If we aren't registered currently then increment 8385 * the key count and set the registered flag. 8386 */ 8387 ctl_alloc_prkey(lun, residx); 8388 if (ctl_get_prkey(lun, residx) == 0) 8389 lun->pr_key_count++; 8390 ctl_set_prkey(lun, residx, sa_res_key); 8391 lun->pr_generation++; 8392 mtx_unlock(&lun->lun_lock); 8393 8394 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8395 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8396 persis_io.pr.pr_info.action = CTL_PR_REG_KEY; 8397 persis_io.pr.pr_info.residx = residx; 8398 memcpy(persis_io.pr.pr_info.sa_res_key, 8399 param->serv_act_res_key, 8400 sizeof(param->serv_act_res_key)); 8401 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8402 sizeof(persis_io.pr), M_WAITOK); 8403 } 8404 8405 break; 8406 } 8407 case SPRO_RESERVE: 8408 mtx_lock(&lun->lun_lock); 8409 if (lun->flags & CTL_LUN_PR_RESERVED) { 8410 /* 8411 * if this isn't the reservation holder and it's 8412 * not a "all registrants" type or if the type is 8413 * different then we have a conflict 8414 */ 8415 if ((lun->pr_res_idx != residx 8416 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) 8417 || lun->pr_res_type != type) { 8418 mtx_unlock(&lun->lun_lock); 8419 free(ctsio->kern_data_ptr, M_CTL); 8420 ctl_set_reservation_conflict(ctsio); 8421 ctl_done((union ctl_io *)ctsio); 8422 return (CTL_RETVAL_COMPLETE); 8423 } 8424 mtx_unlock(&lun->lun_lock); 8425 } else /* create a reservation */ { 8426 /* 8427 * If it's not an "all registrants" type record 8428 * reservation holder 8429 */ 8430 if (type != SPR_TYPE_WR_EX_AR 8431 && type != SPR_TYPE_EX_AC_AR) 8432 lun->pr_res_idx = residx; /* Res holder */ 8433 else 8434 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8435 8436 lun->flags |= CTL_LUN_PR_RESERVED; 8437 lun->pr_res_type = type; 8438 8439 mtx_unlock(&lun->lun_lock); 8440 8441 /* send msg to other side */ 8442 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8443 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8444 persis_io.pr.pr_info.action = CTL_PR_RESERVE; 8445 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8446 persis_io.pr.pr_info.res_type = type; 8447 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8448 sizeof(persis_io.pr), M_WAITOK); 8449 } 8450 break; 8451 8452 case SPRO_RELEASE: 8453 mtx_lock(&lun->lun_lock); 8454 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) { 8455 /* No reservation exists return good status */ 8456 mtx_unlock(&lun->lun_lock); 8457 goto done; 8458 } 8459 /* 8460 * Is this nexus a reservation holder? 8461 */ 8462 if (lun->pr_res_idx != residx 8463 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 8464 /* 8465 * not a res holder return good status but 8466 * do nothing 8467 */ 8468 mtx_unlock(&lun->lun_lock); 8469 goto done; 8470 } 8471 8472 if (lun->pr_res_type != type) { 8473 mtx_unlock(&lun->lun_lock); 8474 free(ctsio->kern_data_ptr, M_CTL); 8475 ctl_set_illegal_pr_release(ctsio); 8476 ctl_done((union ctl_io *)ctsio); 8477 return (CTL_RETVAL_COMPLETE); 8478 } 8479 8480 /* okay to release */ 8481 lun->flags &= ~CTL_LUN_PR_RESERVED; 8482 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8483 lun->pr_res_type = 0; 8484 8485 /* 8486 * If this isn't an exclusive access reservation and NUAR 8487 * is not set, generate UA for all other registrants. 8488 */ 8489 if (type != SPR_TYPE_EX_AC && type != SPR_TYPE_WR_EX && 8490 (lun->MODE_CTRL.queue_flags & SCP_NUAR) == 0) { 8491 for (i = softc->init_min; i < softc->init_max; i++) { 8492 if (i == residx || ctl_get_prkey(lun, i) == 0) 8493 continue; 8494 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8495 } 8496 } 8497 mtx_unlock(&lun->lun_lock); 8498 8499 /* Send msg to other side */ 8500 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8501 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8502 persis_io.pr.pr_info.action = CTL_PR_RELEASE; 8503 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8504 sizeof(persis_io.pr), M_WAITOK); 8505 break; 8506 8507 case SPRO_CLEAR: 8508 /* send msg to other side */ 8509 8510 mtx_lock(&lun->lun_lock); 8511 lun->flags &= ~CTL_LUN_PR_RESERVED; 8512 lun->pr_res_type = 0; 8513 lun->pr_key_count = 0; 8514 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8515 8516 ctl_clr_prkey(lun, residx); 8517 for (i = 0; i < CTL_MAX_INITIATORS; i++) 8518 if (ctl_get_prkey(lun, i) != 0) { 8519 ctl_clr_prkey(lun, i); 8520 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8521 } 8522 lun->pr_generation++; 8523 mtx_unlock(&lun->lun_lock); 8524 8525 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8526 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8527 persis_io.pr.pr_info.action = CTL_PR_CLEAR; 8528 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8529 sizeof(persis_io.pr), M_WAITOK); 8530 break; 8531 8532 case SPRO_PREEMPT: 8533 case SPRO_PRE_ABO: { 8534 int nretval; 8535 8536 nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type, 8537 residx, ctsio, cdb, param); 8538 if (nretval != 0) 8539 return (CTL_RETVAL_COMPLETE); 8540 break; 8541 } 8542 default: 8543 panic("%s: Invalid PR type %#x", __func__, cdb->action); 8544 } 8545 8546 done: 8547 free(ctsio->kern_data_ptr, M_CTL); 8548 ctl_set_success(ctsio); 8549 ctl_done((union ctl_io *)ctsio); 8550 8551 return (retval); 8552 } 8553 8554 /* 8555 * This routine is for handling a message from the other SC pertaining to 8556 * persistent reserve out. All the error checking will have been done 8557 * so only performing the action need be done here to keep the two 8558 * in sync. 8559 */ 8560 static void 8561 ctl_hndl_per_res_out_on_other_sc(union ctl_io *io) 8562 { 8563 struct ctl_softc *softc = CTL_SOFTC(io); 8564 union ctl_ha_msg *msg = (union ctl_ha_msg *)&io->presio.pr_msg; 8565 struct ctl_lun *lun; 8566 int i; 8567 uint32_t residx, targ_lun; 8568 8569 targ_lun = msg->hdr.nexus.targ_mapped_lun; 8570 mtx_lock(&softc->ctl_lock); 8571 if (targ_lun >= ctl_max_luns || 8572 (lun = softc->ctl_luns[targ_lun]) == NULL) { 8573 mtx_unlock(&softc->ctl_lock); 8574 return; 8575 } 8576 mtx_lock(&lun->lun_lock); 8577 mtx_unlock(&softc->ctl_lock); 8578 if (lun->flags & CTL_LUN_DISABLED) { 8579 mtx_unlock(&lun->lun_lock); 8580 return; 8581 } 8582 residx = ctl_get_initindex(&msg->hdr.nexus); 8583 switch(msg->pr.pr_info.action) { 8584 case CTL_PR_REG_KEY: 8585 ctl_alloc_prkey(lun, msg->pr.pr_info.residx); 8586 if (ctl_get_prkey(lun, msg->pr.pr_info.residx) == 0) 8587 lun->pr_key_count++; 8588 ctl_set_prkey(lun, msg->pr.pr_info.residx, 8589 scsi_8btou64(msg->pr.pr_info.sa_res_key)); 8590 lun->pr_generation++; 8591 break; 8592 8593 case CTL_PR_UNREG_KEY: 8594 ctl_clr_prkey(lun, msg->pr.pr_info.residx); 8595 lun->pr_key_count--; 8596 8597 /* XXX Need to see if the reservation has been released */ 8598 /* if so do we need to generate UA? */ 8599 if (msg->pr.pr_info.residx == lun->pr_res_idx) { 8600 lun->flags &= ~CTL_LUN_PR_RESERVED; 8601 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8602 8603 if ((lun->pr_res_type == SPR_TYPE_WR_EX_RO || 8604 lun->pr_res_type == SPR_TYPE_EX_AC_RO) && 8605 lun->pr_key_count) { 8606 /* 8607 * If the reservation is a registrants 8608 * only type we need to generate a UA 8609 * for other registered inits. The 8610 * sense code should be RESERVATIONS 8611 * RELEASED 8612 */ 8613 8614 for (i = softc->init_min; i < softc->init_max; i++) { 8615 if (ctl_get_prkey(lun, i) == 0) 8616 continue; 8617 8618 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8619 } 8620 } 8621 lun->pr_res_type = 0; 8622 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8623 if (lun->pr_key_count==0) { 8624 lun->flags &= ~CTL_LUN_PR_RESERVED; 8625 lun->pr_res_type = 0; 8626 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8627 } 8628 } 8629 lun->pr_generation++; 8630 break; 8631 8632 case CTL_PR_RESERVE: 8633 lun->flags |= CTL_LUN_PR_RESERVED; 8634 lun->pr_res_type = msg->pr.pr_info.res_type; 8635 lun->pr_res_idx = msg->pr.pr_info.residx; 8636 8637 break; 8638 8639 case CTL_PR_RELEASE: 8640 /* 8641 * If this isn't an exclusive access reservation and NUAR 8642 * is not set, generate UA for all other registrants. 8643 */ 8644 if (lun->pr_res_type != SPR_TYPE_EX_AC && 8645 lun->pr_res_type != SPR_TYPE_WR_EX && 8646 (lun->MODE_CTRL.queue_flags & SCP_NUAR) == 0) { 8647 for (i = softc->init_min; i < softc->init_max; i++) { 8648 if (i == residx || ctl_get_prkey(lun, i) == 0) 8649 continue; 8650 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8651 } 8652 } 8653 8654 lun->flags &= ~CTL_LUN_PR_RESERVED; 8655 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8656 lun->pr_res_type = 0; 8657 break; 8658 8659 case CTL_PR_PREEMPT: 8660 ctl_pro_preempt_other(lun, msg); 8661 break; 8662 case CTL_PR_CLEAR: 8663 lun->flags &= ~CTL_LUN_PR_RESERVED; 8664 lun->pr_res_type = 0; 8665 lun->pr_key_count = 0; 8666 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8667 8668 for (i=0; i < CTL_MAX_INITIATORS; i++) { 8669 if (ctl_get_prkey(lun, i) == 0) 8670 continue; 8671 ctl_clr_prkey(lun, i); 8672 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8673 } 8674 lun->pr_generation++; 8675 break; 8676 } 8677 8678 mtx_unlock(&lun->lun_lock); 8679 } 8680 8681 int 8682 ctl_read_write(struct ctl_scsiio *ctsio) 8683 { 8684 struct ctl_lun *lun = CTL_LUN(ctsio); 8685 struct ctl_lba_len_flags *lbalen; 8686 uint64_t lba; 8687 uint32_t num_blocks; 8688 int flags, retval; 8689 int isread; 8690 8691 CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0])); 8692 8693 flags = 0; 8694 isread = ctsio->cdb[0] == READ_6 || ctsio->cdb[0] == READ_10 8695 || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16; 8696 switch (ctsio->cdb[0]) { 8697 case READ_6: 8698 case WRITE_6: { 8699 struct scsi_rw_6 *cdb; 8700 8701 cdb = (struct scsi_rw_6 *)ctsio->cdb; 8702 8703 lba = scsi_3btoul(cdb->addr); 8704 /* only 5 bits are valid in the most significant address byte */ 8705 lba &= 0x1fffff; 8706 num_blocks = cdb->length; 8707 /* 8708 * This is correct according to SBC-2. 8709 */ 8710 if (num_blocks == 0) 8711 num_blocks = 256; 8712 break; 8713 } 8714 case READ_10: 8715 case WRITE_10: { 8716 struct scsi_rw_10 *cdb; 8717 8718 cdb = (struct scsi_rw_10 *)ctsio->cdb; 8719 if (cdb->byte2 & SRW10_FUA) 8720 flags |= CTL_LLF_FUA; 8721 if (cdb->byte2 & SRW10_DPO) 8722 flags |= CTL_LLF_DPO; 8723 lba = scsi_4btoul(cdb->addr); 8724 num_blocks = scsi_2btoul(cdb->length); 8725 break; 8726 } 8727 case WRITE_VERIFY_10: { 8728 struct scsi_write_verify_10 *cdb; 8729 8730 cdb = (struct scsi_write_verify_10 *)ctsio->cdb; 8731 flags |= CTL_LLF_FUA; 8732 if (cdb->byte2 & SWV_DPO) 8733 flags |= CTL_LLF_DPO; 8734 lba = scsi_4btoul(cdb->addr); 8735 num_blocks = scsi_2btoul(cdb->length); 8736 break; 8737 } 8738 case READ_12: 8739 case WRITE_12: { 8740 struct scsi_rw_12 *cdb; 8741 8742 cdb = (struct scsi_rw_12 *)ctsio->cdb; 8743 if (cdb->byte2 & SRW12_FUA) 8744 flags |= CTL_LLF_FUA; 8745 if (cdb->byte2 & SRW12_DPO) 8746 flags |= CTL_LLF_DPO; 8747 lba = scsi_4btoul(cdb->addr); 8748 num_blocks = scsi_4btoul(cdb->length); 8749 break; 8750 } 8751 case WRITE_VERIFY_12: { 8752 struct scsi_write_verify_12 *cdb; 8753 8754 cdb = (struct scsi_write_verify_12 *)ctsio->cdb; 8755 flags |= CTL_LLF_FUA; 8756 if (cdb->byte2 & SWV_DPO) 8757 flags |= CTL_LLF_DPO; 8758 lba = scsi_4btoul(cdb->addr); 8759 num_blocks = scsi_4btoul(cdb->length); 8760 break; 8761 } 8762 case READ_16: 8763 case WRITE_16: { 8764 struct scsi_rw_16 *cdb; 8765 8766 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8767 if (cdb->byte2 & SRW12_FUA) 8768 flags |= CTL_LLF_FUA; 8769 if (cdb->byte2 & SRW12_DPO) 8770 flags |= CTL_LLF_DPO; 8771 lba = scsi_8btou64(cdb->addr); 8772 num_blocks = scsi_4btoul(cdb->length); 8773 break; 8774 } 8775 case WRITE_ATOMIC_16: { 8776 struct scsi_write_atomic_16 *cdb; 8777 8778 if (lun->be_lun->atomicblock == 0) { 8779 ctl_set_invalid_opcode(ctsio); 8780 ctl_done((union ctl_io *)ctsio); 8781 return (CTL_RETVAL_COMPLETE); 8782 } 8783 8784 cdb = (struct scsi_write_atomic_16 *)ctsio->cdb; 8785 if (cdb->byte2 & SRW12_FUA) 8786 flags |= CTL_LLF_FUA; 8787 if (cdb->byte2 & SRW12_DPO) 8788 flags |= CTL_LLF_DPO; 8789 lba = scsi_8btou64(cdb->addr); 8790 num_blocks = scsi_2btoul(cdb->length); 8791 if (num_blocks > lun->be_lun->atomicblock) { 8792 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 8793 /*command*/ 1, /*field*/ 12, /*bit_valid*/ 0, 8794 /*bit*/ 0); 8795 ctl_done((union ctl_io *)ctsio); 8796 return (CTL_RETVAL_COMPLETE); 8797 } 8798 break; 8799 } 8800 case WRITE_VERIFY_16: { 8801 struct scsi_write_verify_16 *cdb; 8802 8803 cdb = (struct scsi_write_verify_16 *)ctsio->cdb; 8804 flags |= CTL_LLF_FUA; 8805 if (cdb->byte2 & SWV_DPO) 8806 flags |= CTL_LLF_DPO; 8807 lba = scsi_8btou64(cdb->addr); 8808 num_blocks = scsi_4btoul(cdb->length); 8809 break; 8810 } 8811 default: 8812 /* 8813 * We got a command we don't support. This shouldn't 8814 * happen, commands should be filtered out above us. 8815 */ 8816 ctl_set_invalid_opcode(ctsio); 8817 ctl_done((union ctl_io *)ctsio); 8818 8819 return (CTL_RETVAL_COMPLETE); 8820 break; /* NOTREACHED */ 8821 } 8822 8823 /* 8824 * The first check is to make sure we're in bounds, the second 8825 * check is to catch wrap-around problems. If the lba + num blocks 8826 * is less than the lba, then we've wrapped around and the block 8827 * range is invalid anyway. 8828 */ 8829 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8830 || ((lba + num_blocks) < lba)) { 8831 ctl_set_lba_out_of_range(ctsio, 8832 MAX(lba, lun->be_lun->maxlba + 1)); 8833 ctl_done((union ctl_io *)ctsio); 8834 return (CTL_RETVAL_COMPLETE); 8835 } 8836 8837 /* 8838 * According to SBC-3, a transfer length of 0 is not an error. 8839 * Note that this cannot happen with WRITE(6) or READ(6), since 0 8840 * translates to 256 blocks for those commands. 8841 */ 8842 if (num_blocks == 0) { 8843 ctl_set_success(ctsio); 8844 ctl_done((union ctl_io *)ctsio); 8845 return (CTL_RETVAL_COMPLETE); 8846 } 8847 8848 /* Set FUA and/or DPO if caches are disabled. */ 8849 if (isread) { 8850 if ((lun->MODE_CACHING.flags1 & SCP_RCD) != 0) 8851 flags |= CTL_LLF_FUA | CTL_LLF_DPO; 8852 } else { 8853 if ((lun->MODE_CACHING.flags1 & SCP_WCE) == 0) 8854 flags |= CTL_LLF_FUA; 8855 } 8856 8857 lbalen = (struct ctl_lba_len_flags *) 8858 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8859 lbalen->lba = lba; 8860 lbalen->len = num_blocks; 8861 lbalen->flags = (isread ? CTL_LLF_READ : CTL_LLF_WRITE) | flags; 8862 8863 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 8864 ctsio->kern_rel_offset = 0; 8865 8866 CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n")); 8867 8868 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8869 return (retval); 8870 } 8871 8872 static int 8873 ctl_cnw_cont(union ctl_io *io) 8874 { 8875 struct ctl_lun *lun = CTL_LUN(io); 8876 struct ctl_scsiio *ctsio; 8877 struct ctl_lba_len_flags *lbalen; 8878 int retval; 8879 8880 ctsio = &io->scsiio; 8881 ctsio->io_hdr.status = CTL_STATUS_NONE; 8882 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT; 8883 lbalen = (struct ctl_lba_len_flags *) 8884 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8885 lbalen->flags &= ~CTL_LLF_COMPARE; 8886 lbalen->flags |= CTL_LLF_WRITE; 8887 8888 CTL_DEBUG_PRINT(("ctl_cnw_cont: calling data_submit()\n")); 8889 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8890 return (retval); 8891 } 8892 8893 int 8894 ctl_cnw(struct ctl_scsiio *ctsio) 8895 { 8896 struct ctl_lun *lun = CTL_LUN(ctsio); 8897 struct ctl_lba_len_flags *lbalen; 8898 uint64_t lba; 8899 uint32_t num_blocks; 8900 int flags, retval; 8901 8902 CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0])); 8903 8904 flags = 0; 8905 switch (ctsio->cdb[0]) { 8906 case COMPARE_AND_WRITE: { 8907 struct scsi_compare_and_write *cdb; 8908 8909 cdb = (struct scsi_compare_and_write *)ctsio->cdb; 8910 if (cdb->byte2 & SRW10_FUA) 8911 flags |= CTL_LLF_FUA; 8912 if (cdb->byte2 & SRW10_DPO) 8913 flags |= CTL_LLF_DPO; 8914 lba = scsi_8btou64(cdb->addr); 8915 num_blocks = cdb->length; 8916 break; 8917 } 8918 default: 8919 /* 8920 * We got a command we don't support. This shouldn't 8921 * happen, commands should be filtered out above us. 8922 */ 8923 ctl_set_invalid_opcode(ctsio); 8924 ctl_done((union ctl_io *)ctsio); 8925 8926 return (CTL_RETVAL_COMPLETE); 8927 break; /* NOTREACHED */ 8928 } 8929 8930 /* 8931 * The first check is to make sure we're in bounds, the second 8932 * check is to catch wrap-around problems. If the lba + num blocks 8933 * is less than the lba, then we've wrapped around and the block 8934 * range is invalid anyway. 8935 */ 8936 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8937 || ((lba + num_blocks) < lba)) { 8938 ctl_set_lba_out_of_range(ctsio, 8939 MAX(lba, lun->be_lun->maxlba + 1)); 8940 ctl_done((union ctl_io *)ctsio); 8941 return (CTL_RETVAL_COMPLETE); 8942 } 8943 8944 /* 8945 * According to SBC-3, a transfer length of 0 is not an error. 8946 */ 8947 if (num_blocks == 0) { 8948 ctl_set_success(ctsio); 8949 ctl_done((union ctl_io *)ctsio); 8950 return (CTL_RETVAL_COMPLETE); 8951 } 8952 8953 /* Set FUA if write cache is disabled. */ 8954 if ((lun->MODE_CACHING.flags1 & SCP_WCE) == 0) 8955 flags |= CTL_LLF_FUA; 8956 8957 ctsio->kern_total_len = 2 * num_blocks * lun->be_lun->blocksize; 8958 ctsio->kern_rel_offset = 0; 8959 8960 /* 8961 * Set the IO_CONT flag, so that if this I/O gets passed to 8962 * ctl_data_submit_done(), it'll get passed back to 8963 * ctl_ctl_cnw_cont() for further processing. 8964 */ 8965 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 8966 ctsio->io_cont = ctl_cnw_cont; 8967 8968 lbalen = (struct ctl_lba_len_flags *) 8969 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8970 lbalen->lba = lba; 8971 lbalen->len = num_blocks; 8972 lbalen->flags = CTL_LLF_COMPARE | flags; 8973 8974 CTL_DEBUG_PRINT(("ctl_cnw: calling data_submit()\n")); 8975 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8976 return (retval); 8977 } 8978 8979 int 8980 ctl_verify(struct ctl_scsiio *ctsio) 8981 { 8982 struct ctl_lun *lun = CTL_LUN(ctsio); 8983 struct ctl_lba_len_flags *lbalen; 8984 uint64_t lba; 8985 uint32_t num_blocks; 8986 int bytchk, flags; 8987 int retval; 8988 8989 CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0])); 8990 8991 bytchk = 0; 8992 flags = CTL_LLF_FUA; 8993 switch (ctsio->cdb[0]) { 8994 case VERIFY_10: { 8995 struct scsi_verify_10 *cdb; 8996 8997 cdb = (struct scsi_verify_10 *)ctsio->cdb; 8998 if (cdb->byte2 & SVFY_BYTCHK) 8999 bytchk = 1; 9000 if (cdb->byte2 & SVFY_DPO) 9001 flags |= CTL_LLF_DPO; 9002 lba = scsi_4btoul(cdb->addr); 9003 num_blocks = scsi_2btoul(cdb->length); 9004 break; 9005 } 9006 case VERIFY_12: { 9007 struct scsi_verify_12 *cdb; 9008 9009 cdb = (struct scsi_verify_12 *)ctsio->cdb; 9010 if (cdb->byte2 & SVFY_BYTCHK) 9011 bytchk = 1; 9012 if (cdb->byte2 & SVFY_DPO) 9013 flags |= CTL_LLF_DPO; 9014 lba = scsi_4btoul(cdb->addr); 9015 num_blocks = scsi_4btoul(cdb->length); 9016 break; 9017 } 9018 case VERIFY_16: { 9019 struct scsi_rw_16 *cdb; 9020 9021 cdb = (struct scsi_rw_16 *)ctsio->cdb; 9022 if (cdb->byte2 & SVFY_BYTCHK) 9023 bytchk = 1; 9024 if (cdb->byte2 & SVFY_DPO) 9025 flags |= CTL_LLF_DPO; 9026 lba = scsi_8btou64(cdb->addr); 9027 num_blocks = scsi_4btoul(cdb->length); 9028 break; 9029 } 9030 default: 9031 /* 9032 * We got a command we don't support. This shouldn't 9033 * happen, commands should be filtered out above us. 9034 */ 9035 ctl_set_invalid_opcode(ctsio); 9036 ctl_done((union ctl_io *)ctsio); 9037 return (CTL_RETVAL_COMPLETE); 9038 } 9039 9040 /* 9041 * The first check is to make sure we're in bounds, the second 9042 * check is to catch wrap-around problems. If the lba + num blocks 9043 * is less than the lba, then we've wrapped around and the block 9044 * range is invalid anyway. 9045 */ 9046 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 9047 || ((lba + num_blocks) < lba)) { 9048 ctl_set_lba_out_of_range(ctsio, 9049 MAX(lba, lun->be_lun->maxlba + 1)); 9050 ctl_done((union ctl_io *)ctsio); 9051 return (CTL_RETVAL_COMPLETE); 9052 } 9053 9054 /* 9055 * According to SBC-3, a transfer length of 0 is not an error. 9056 */ 9057 if (num_blocks == 0) { 9058 ctl_set_success(ctsio); 9059 ctl_done((union ctl_io *)ctsio); 9060 return (CTL_RETVAL_COMPLETE); 9061 } 9062 9063 lbalen = (struct ctl_lba_len_flags *) 9064 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 9065 lbalen->lba = lba; 9066 lbalen->len = num_blocks; 9067 if (bytchk) { 9068 lbalen->flags = CTL_LLF_COMPARE | flags; 9069 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 9070 } else { 9071 lbalen->flags = CTL_LLF_VERIFY | flags; 9072 ctsio->kern_total_len = 0; 9073 } 9074 ctsio->kern_rel_offset = 0; 9075 9076 CTL_DEBUG_PRINT(("ctl_verify: calling data_submit()\n")); 9077 retval = lun->backend->data_submit((union ctl_io *)ctsio); 9078 return (retval); 9079 } 9080 9081 int 9082 ctl_report_luns(struct ctl_scsiio *ctsio) 9083 { 9084 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9085 struct ctl_port *port = CTL_PORT(ctsio); 9086 struct ctl_lun *lun, *request_lun = CTL_LUN(ctsio); 9087 struct scsi_report_luns *cdb; 9088 struct scsi_report_luns_data *lun_data; 9089 int num_filled, num_luns, num_port_luns, retval; 9090 uint32_t alloc_len, lun_datalen; 9091 uint32_t initidx, targ_lun_id, lun_id; 9092 9093 retval = CTL_RETVAL_COMPLETE; 9094 cdb = (struct scsi_report_luns *)ctsio->cdb; 9095 9096 CTL_DEBUG_PRINT(("ctl_report_luns\n")); 9097 9098 num_luns = 0; 9099 num_port_luns = port->lun_map ? port->lun_map_size : ctl_max_luns; 9100 mtx_lock(&softc->ctl_lock); 9101 for (targ_lun_id = 0; targ_lun_id < num_port_luns; targ_lun_id++) { 9102 if (ctl_lun_map_from_port(port, targ_lun_id) != UINT32_MAX) 9103 num_luns++; 9104 } 9105 mtx_unlock(&softc->ctl_lock); 9106 9107 switch (cdb->select_report) { 9108 case RPL_REPORT_DEFAULT: 9109 case RPL_REPORT_ALL: 9110 case RPL_REPORT_NONSUBSID: 9111 break; 9112 case RPL_REPORT_WELLKNOWN: 9113 case RPL_REPORT_ADMIN: 9114 case RPL_REPORT_CONGLOM: 9115 num_luns = 0; 9116 break; 9117 default: 9118 ctl_set_invalid_field(ctsio, 9119 /*sks_valid*/ 1, 9120 /*command*/ 1, 9121 /*field*/ 2, 9122 /*bit_valid*/ 0, 9123 /*bit*/ 0); 9124 ctl_done((union ctl_io *)ctsio); 9125 return (retval); 9126 break; /* NOTREACHED */ 9127 } 9128 9129 alloc_len = scsi_4btoul(cdb->length); 9130 /* 9131 * The initiator has to allocate at least 16 bytes for this request, 9132 * so he can at least get the header and the first LUN. Otherwise 9133 * we reject the request (per SPC-3 rev 14, section 6.21). 9134 */ 9135 if (alloc_len < (sizeof(struct scsi_report_luns_data) + 9136 sizeof(struct scsi_report_luns_lundata))) { 9137 ctl_set_invalid_field(ctsio, 9138 /*sks_valid*/ 1, 9139 /*command*/ 1, 9140 /*field*/ 6, 9141 /*bit_valid*/ 0, 9142 /*bit*/ 0); 9143 ctl_done((union ctl_io *)ctsio); 9144 return (retval); 9145 } 9146 9147 lun_datalen = sizeof(*lun_data) + 9148 (num_luns * sizeof(struct scsi_report_luns_lundata)); 9149 9150 ctsio->kern_data_ptr = malloc(lun_datalen, M_CTL, M_WAITOK | M_ZERO); 9151 lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr; 9152 ctsio->kern_sg_entries = 0; 9153 9154 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9155 9156 mtx_lock(&softc->ctl_lock); 9157 for (targ_lun_id = 0, num_filled = 0; 9158 targ_lun_id < num_port_luns && num_filled < num_luns; 9159 targ_lun_id++) { 9160 lun_id = ctl_lun_map_from_port(port, targ_lun_id); 9161 if (lun_id == UINT32_MAX) 9162 continue; 9163 lun = softc->ctl_luns[lun_id]; 9164 if (lun == NULL) 9165 continue; 9166 9167 be64enc(lun_data->luns[num_filled++].lundata, 9168 ctl_encode_lun(targ_lun_id)); 9169 9170 /* 9171 * According to SPC-3, rev 14 section 6.21: 9172 * 9173 * "The execution of a REPORT LUNS command to any valid and 9174 * installed logical unit shall clear the REPORTED LUNS DATA 9175 * HAS CHANGED unit attention condition for all logical 9176 * units of that target with respect to the requesting 9177 * initiator. A valid and installed logical unit is one 9178 * having a PERIPHERAL QUALIFIER of 000b in the standard 9179 * INQUIRY data (see 6.4.2)." 9180 * 9181 * If request_lun is NULL, the LUN this report luns command 9182 * was issued to is either disabled or doesn't exist. In that 9183 * case, we shouldn't clear any pending lun change unit 9184 * attention. 9185 */ 9186 if (request_lun != NULL) { 9187 mtx_lock(&lun->lun_lock); 9188 ctl_clr_ua(lun, initidx, CTL_UA_LUN_CHANGE); 9189 mtx_unlock(&lun->lun_lock); 9190 } 9191 } 9192 mtx_unlock(&softc->ctl_lock); 9193 9194 /* 9195 * It's quite possible that we've returned fewer LUNs than we allocated 9196 * space for. Trim it. 9197 */ 9198 lun_datalen = sizeof(*lun_data) + 9199 (num_filled * sizeof(struct scsi_report_luns_lundata)); 9200 ctsio->kern_rel_offset = 0; 9201 ctsio->kern_sg_entries = 0; 9202 ctsio->kern_data_len = min(lun_datalen, alloc_len); 9203 ctsio->kern_total_len = ctsio->kern_data_len; 9204 9205 /* 9206 * We set this to the actual data length, regardless of how much 9207 * space we actually have to return results. If the user looks at 9208 * this value, he'll know whether or not he allocated enough space 9209 * and reissue the command if necessary. We don't support well 9210 * known logical units, so if the user asks for that, return none. 9211 */ 9212 scsi_ulto4b(lun_datalen - 8, lun_data->length); 9213 9214 /* 9215 * We can only return SCSI_STATUS_CHECK_COND when we can't satisfy 9216 * this request. 9217 */ 9218 ctl_set_success(ctsio); 9219 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9220 ctsio->be_move_done = ctl_config_move_done; 9221 ctl_datamove((union ctl_io *)ctsio); 9222 return (retval); 9223 } 9224 9225 int 9226 ctl_request_sense(struct ctl_scsiio *ctsio) 9227 { 9228 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9229 struct ctl_lun *lun = CTL_LUN(ctsio); 9230 struct scsi_request_sense *cdb; 9231 struct scsi_sense_data *sense_ptr, *ps; 9232 uint32_t initidx; 9233 int have_error; 9234 u_int sense_len = SSD_FULL_SIZE; 9235 scsi_sense_data_type sense_format; 9236 ctl_ua_type ua_type; 9237 uint8_t asc = 0, ascq = 0; 9238 9239 cdb = (struct scsi_request_sense *)ctsio->cdb; 9240 9241 CTL_DEBUG_PRINT(("ctl_request_sense\n")); 9242 9243 /* 9244 * Determine which sense format the user wants. 9245 */ 9246 if (cdb->byte2 & SRS_DESC) 9247 sense_format = SSD_TYPE_DESC; 9248 else 9249 sense_format = SSD_TYPE_FIXED; 9250 9251 ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK); 9252 sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr; 9253 ctsio->kern_sg_entries = 0; 9254 ctsio->kern_rel_offset = 0; 9255 9256 /* 9257 * struct scsi_sense_data, which is currently set to 256 bytes, is 9258 * larger than the largest allowed value for the length field in the 9259 * REQUEST SENSE CDB, which is 252 bytes as of SPC-4. 9260 */ 9261 ctsio->kern_data_len = cdb->length; 9262 ctsio->kern_total_len = cdb->length; 9263 9264 /* 9265 * If we don't have a LUN, we don't have any pending sense. 9266 */ 9267 if (lun == NULL || 9268 ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 9269 softc->ha_link < CTL_HA_LINK_UNKNOWN)) { 9270 /* "Logical unit not supported" */ 9271 ctl_set_sense_data(sense_ptr, &sense_len, NULL, sense_format, 9272 /*current_error*/ 1, 9273 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 9274 /*asc*/ 0x25, 9275 /*ascq*/ 0x00, 9276 SSD_ELEM_NONE); 9277 goto send; 9278 } 9279 9280 have_error = 0; 9281 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9282 /* 9283 * Check for pending sense, and then for pending unit attentions. 9284 * Pending sense gets returned first, then pending unit attentions. 9285 */ 9286 mtx_lock(&lun->lun_lock); 9287 ps = lun->pending_sense[initidx / CTL_MAX_INIT_PER_PORT]; 9288 if (ps != NULL) 9289 ps += initidx % CTL_MAX_INIT_PER_PORT; 9290 if (ps != NULL && ps->error_code != 0) { 9291 scsi_sense_data_type stored_format; 9292 9293 /* 9294 * Check to see which sense format was used for the stored 9295 * sense data. 9296 */ 9297 stored_format = scsi_sense_type(ps); 9298 9299 /* 9300 * If the user requested a different sense format than the 9301 * one we stored, then we need to convert it to the other 9302 * format. If we're going from descriptor to fixed format 9303 * sense data, we may lose things in translation, depending 9304 * on what options were used. 9305 * 9306 * If the stored format is SSD_TYPE_NONE (i.e. invalid), 9307 * for some reason we'll just copy it out as-is. 9308 */ 9309 if ((stored_format == SSD_TYPE_FIXED) 9310 && (sense_format == SSD_TYPE_DESC)) 9311 ctl_sense_to_desc((struct scsi_sense_data_fixed *) 9312 ps, (struct scsi_sense_data_desc *)sense_ptr); 9313 else if ((stored_format == SSD_TYPE_DESC) 9314 && (sense_format == SSD_TYPE_FIXED)) 9315 ctl_sense_to_fixed((struct scsi_sense_data_desc *) 9316 ps, (struct scsi_sense_data_fixed *)sense_ptr); 9317 else 9318 memcpy(sense_ptr, ps, sizeof(*sense_ptr)); 9319 9320 ps->error_code = 0; 9321 have_error = 1; 9322 } else { 9323 ua_type = ctl_build_ua(lun, initidx, sense_ptr, &sense_len, 9324 sense_format); 9325 if (ua_type != CTL_UA_NONE) 9326 have_error = 1; 9327 } 9328 if (have_error == 0) { 9329 /* 9330 * Report informational exception if have one and allowed. 9331 */ 9332 if (lun->MODE_IE.mrie != SIEP_MRIE_NO) { 9333 asc = lun->ie_asc; 9334 ascq = lun->ie_ascq; 9335 } 9336 ctl_set_sense_data(sense_ptr, &sense_len, lun, sense_format, 9337 /*current_error*/ 1, 9338 /*sense_key*/ SSD_KEY_NO_SENSE, 9339 /*asc*/ asc, 9340 /*ascq*/ ascq, 9341 SSD_ELEM_NONE); 9342 } 9343 mtx_unlock(&lun->lun_lock); 9344 9345 send: 9346 /* 9347 * We report the SCSI status as OK, since the status of the command 9348 * itself is OK. We're reporting sense as parameter data. 9349 */ 9350 ctl_set_success(ctsio); 9351 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9352 ctsio->be_move_done = ctl_config_move_done; 9353 ctl_datamove((union ctl_io *)ctsio); 9354 return (CTL_RETVAL_COMPLETE); 9355 } 9356 9357 int 9358 ctl_tur(struct ctl_scsiio *ctsio) 9359 { 9360 9361 CTL_DEBUG_PRINT(("ctl_tur\n")); 9362 9363 ctl_set_success(ctsio); 9364 ctl_done((union ctl_io *)ctsio); 9365 9366 return (CTL_RETVAL_COMPLETE); 9367 } 9368 9369 /* 9370 * SCSI VPD page 0x00, the Supported VPD Pages page. 9371 */ 9372 static int 9373 ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len) 9374 { 9375 struct ctl_lun *lun = CTL_LUN(ctsio); 9376 struct scsi_vpd_supported_pages *pages; 9377 int sup_page_size; 9378 int p; 9379 9380 sup_page_size = sizeof(struct scsi_vpd_supported_pages) * 9381 SCSI_EVPD_NUM_SUPPORTED_PAGES; 9382 ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO); 9383 pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr; 9384 ctsio->kern_rel_offset = 0; 9385 ctsio->kern_sg_entries = 0; 9386 ctsio->kern_data_len = min(sup_page_size, alloc_len); 9387 ctsio->kern_total_len = ctsio->kern_data_len; 9388 9389 /* 9390 * The control device is always connected. The disk device, on the 9391 * other hand, may not be online all the time. Need to change this 9392 * to figure out whether the disk device is actually online or not. 9393 */ 9394 if (lun != NULL) 9395 pages->device = (SID_QUAL_LU_CONNECTED << 5) | 9396 lun->be_lun->lun_type; 9397 else 9398 pages->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9399 9400 p = 0; 9401 /* Supported VPD pages */ 9402 pages->page_list[p++] = SVPD_SUPPORTED_PAGES; 9403 /* Serial Number */ 9404 pages->page_list[p++] = SVPD_UNIT_SERIAL_NUMBER; 9405 /* Device Identification */ 9406 pages->page_list[p++] = SVPD_DEVICE_ID; 9407 /* Extended INQUIRY Data */ 9408 pages->page_list[p++] = SVPD_EXTENDED_INQUIRY_DATA; 9409 /* Mode Page Policy */ 9410 pages->page_list[p++] = SVPD_MODE_PAGE_POLICY; 9411 /* SCSI Ports */ 9412 pages->page_list[p++] = SVPD_SCSI_PORTS; 9413 /* Third-party Copy */ 9414 pages->page_list[p++] = SVPD_SCSI_TPC; 9415 /* SCSI Feature Sets */ 9416 pages->page_list[p++] = SVPD_SCSI_SFS; 9417 if (lun != NULL && lun->be_lun->lun_type == T_DIRECT) { 9418 /* Block limits */ 9419 pages->page_list[p++] = SVPD_BLOCK_LIMITS; 9420 /* Block Device Characteristics */ 9421 pages->page_list[p++] = SVPD_BDC; 9422 /* Logical Block Provisioning */ 9423 pages->page_list[p++] = SVPD_LBP; 9424 } 9425 pages->length = p; 9426 9427 ctl_set_success(ctsio); 9428 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9429 ctsio->be_move_done = ctl_config_move_done; 9430 ctl_datamove((union ctl_io *)ctsio); 9431 return (CTL_RETVAL_COMPLETE); 9432 } 9433 9434 /* 9435 * SCSI VPD page 0x80, the Unit Serial Number page. 9436 */ 9437 static int 9438 ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len) 9439 { 9440 struct ctl_lun *lun = CTL_LUN(ctsio); 9441 struct scsi_vpd_unit_serial_number *sn_ptr; 9442 int data_len; 9443 9444 data_len = 4 + CTL_SN_LEN; 9445 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9446 sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr; 9447 ctsio->kern_rel_offset = 0; 9448 ctsio->kern_sg_entries = 0; 9449 ctsio->kern_data_len = min(data_len, alloc_len); 9450 ctsio->kern_total_len = ctsio->kern_data_len; 9451 9452 /* 9453 * The control device is always connected. The disk device, on the 9454 * other hand, may not be online all the time. Need to change this 9455 * to figure out whether the disk device is actually online or not. 9456 */ 9457 if (lun != NULL) 9458 sn_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9459 lun->be_lun->lun_type; 9460 else 9461 sn_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9462 9463 sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER; 9464 sn_ptr->length = CTL_SN_LEN; 9465 /* 9466 * If we don't have a LUN, we just leave the serial number as 9467 * all spaces. 9468 */ 9469 if (lun != NULL) { 9470 strncpy((char *)sn_ptr->serial_num, 9471 (char *)lun->be_lun->serial_num, CTL_SN_LEN); 9472 } else 9473 memset(sn_ptr->serial_num, 0x20, CTL_SN_LEN); 9474 9475 ctl_set_success(ctsio); 9476 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9477 ctsio->be_move_done = ctl_config_move_done; 9478 ctl_datamove((union ctl_io *)ctsio); 9479 return (CTL_RETVAL_COMPLETE); 9480 } 9481 9482 /* 9483 * SCSI VPD page 0x86, the Extended INQUIRY Data page. 9484 */ 9485 static int 9486 ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len) 9487 { 9488 struct ctl_lun *lun = CTL_LUN(ctsio); 9489 struct scsi_vpd_extended_inquiry_data *eid_ptr; 9490 int data_len; 9491 9492 data_len = sizeof(struct scsi_vpd_extended_inquiry_data); 9493 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9494 eid_ptr = (struct scsi_vpd_extended_inquiry_data *)ctsio->kern_data_ptr; 9495 ctsio->kern_sg_entries = 0; 9496 ctsio->kern_rel_offset = 0; 9497 ctsio->kern_data_len = min(data_len, alloc_len); 9498 ctsio->kern_total_len = ctsio->kern_data_len; 9499 9500 /* 9501 * The control device is always connected. The disk device, on the 9502 * other hand, may not be online all the time. 9503 */ 9504 if (lun != NULL) 9505 eid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9506 lun->be_lun->lun_type; 9507 else 9508 eid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9509 eid_ptr->page_code = SVPD_EXTENDED_INQUIRY_DATA; 9510 scsi_ulto2b(data_len - 4, eid_ptr->page_length); 9511 /* 9512 * We support head of queue, ordered and simple tags. 9513 */ 9514 eid_ptr->flags2 = SVPD_EID_HEADSUP | SVPD_EID_ORDSUP | SVPD_EID_SIMPSUP; 9515 /* 9516 * Volatile cache supported. 9517 */ 9518 eid_ptr->flags3 = SVPD_EID_V_SUP; 9519 9520 /* 9521 * This means that we clear the REPORTED LUNS DATA HAS CHANGED unit 9522 * attention for a particular IT nexus on all LUNs once we report 9523 * it to that nexus once. This bit is required as of SPC-4. 9524 */ 9525 eid_ptr->flags4 = SVPD_EID_LUICLR; 9526 9527 /* 9528 * We support revert to defaults (RTD) bit in MODE SELECT. 9529 */ 9530 eid_ptr->flags5 = SVPD_EID_RTD_SUP; 9531 9532 /* 9533 * XXX KDM in order to correctly answer this, we would need 9534 * information from the SIM to determine how much sense data it 9535 * can send. So this would really be a path inquiry field, most 9536 * likely. This can be set to a maximum of 252 according to SPC-4, 9537 * but the hardware may or may not be able to support that much. 9538 * 0 just means that the maximum sense data length is not reported. 9539 */ 9540 eid_ptr->max_sense_length = 0; 9541 9542 ctl_set_success(ctsio); 9543 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9544 ctsio->be_move_done = ctl_config_move_done; 9545 ctl_datamove((union ctl_io *)ctsio); 9546 return (CTL_RETVAL_COMPLETE); 9547 } 9548 9549 static int 9550 ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len) 9551 { 9552 struct ctl_lun *lun = CTL_LUN(ctsio); 9553 struct scsi_vpd_mode_page_policy *mpp_ptr; 9554 int data_len; 9555 9556 data_len = sizeof(struct scsi_vpd_mode_page_policy) + 9557 sizeof(struct scsi_vpd_mode_page_policy_descr); 9558 9559 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9560 mpp_ptr = (struct scsi_vpd_mode_page_policy *)ctsio->kern_data_ptr; 9561 ctsio->kern_rel_offset = 0; 9562 ctsio->kern_sg_entries = 0; 9563 ctsio->kern_data_len = min(data_len, alloc_len); 9564 ctsio->kern_total_len = ctsio->kern_data_len; 9565 9566 /* 9567 * The control device is always connected. The disk device, on the 9568 * other hand, may not be online all the time. 9569 */ 9570 if (lun != NULL) 9571 mpp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9572 lun->be_lun->lun_type; 9573 else 9574 mpp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9575 mpp_ptr->page_code = SVPD_MODE_PAGE_POLICY; 9576 scsi_ulto2b(data_len - 4, mpp_ptr->page_length); 9577 mpp_ptr->descr[0].page_code = 0x3f; 9578 mpp_ptr->descr[0].subpage_code = 0xff; 9579 mpp_ptr->descr[0].policy = SVPD_MPP_SHARED; 9580 9581 ctl_set_success(ctsio); 9582 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9583 ctsio->be_move_done = ctl_config_move_done; 9584 ctl_datamove((union ctl_io *)ctsio); 9585 return (CTL_RETVAL_COMPLETE); 9586 } 9587 9588 /* 9589 * SCSI VPD page 0x83, the Device Identification page. 9590 */ 9591 static int 9592 ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len) 9593 { 9594 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9595 struct ctl_port *port = CTL_PORT(ctsio); 9596 struct ctl_lun *lun = CTL_LUN(ctsio); 9597 struct scsi_vpd_device_id *devid_ptr; 9598 struct scsi_vpd_id_descriptor *desc; 9599 int data_len, g; 9600 uint8_t proto; 9601 9602 data_len = sizeof(struct scsi_vpd_device_id) + 9603 sizeof(struct scsi_vpd_id_descriptor) + 9604 sizeof(struct scsi_vpd_id_rel_trgt_port_id) + 9605 sizeof(struct scsi_vpd_id_descriptor) + 9606 sizeof(struct scsi_vpd_id_trgt_port_grp_id); 9607 if (lun && lun->lun_devid) 9608 data_len += lun->lun_devid->len; 9609 if (port && port->port_devid) 9610 data_len += port->port_devid->len; 9611 if (port && port->target_devid) 9612 data_len += port->target_devid->len; 9613 9614 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9615 devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr; 9616 ctsio->kern_sg_entries = 0; 9617 ctsio->kern_rel_offset = 0; 9618 ctsio->kern_sg_entries = 0; 9619 ctsio->kern_data_len = min(data_len, alloc_len); 9620 ctsio->kern_total_len = ctsio->kern_data_len; 9621 9622 /* 9623 * The control device is always connected. The disk device, on the 9624 * other hand, may not be online all the time. 9625 */ 9626 if (lun != NULL) 9627 devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9628 lun->be_lun->lun_type; 9629 else 9630 devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9631 devid_ptr->page_code = SVPD_DEVICE_ID; 9632 scsi_ulto2b(data_len - 4, devid_ptr->length); 9633 9634 if (port && port->port_type == CTL_PORT_FC) 9635 proto = SCSI_PROTO_FC << 4; 9636 else if (port && port->port_type == CTL_PORT_SAS) 9637 proto = SCSI_PROTO_SAS << 4; 9638 else if (port && port->port_type == CTL_PORT_ISCSI) 9639 proto = SCSI_PROTO_ISCSI << 4; 9640 else 9641 proto = SCSI_PROTO_SPI << 4; 9642 desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list; 9643 9644 /* 9645 * We're using a LUN association here. i.e., this device ID is a 9646 * per-LUN identifier. 9647 */ 9648 if (lun && lun->lun_devid) { 9649 memcpy(desc, lun->lun_devid->data, lun->lun_devid->len); 9650 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9651 lun->lun_devid->len); 9652 } 9653 9654 /* 9655 * This is for the WWPN which is a port association. 9656 */ 9657 if (port && port->port_devid) { 9658 memcpy(desc, port->port_devid->data, port->port_devid->len); 9659 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9660 port->port_devid->len); 9661 } 9662 9663 /* 9664 * This is for the Relative Target Port(type 4h) identifier 9665 */ 9666 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9667 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9668 SVPD_ID_TYPE_RELTARG; 9669 desc->length = 4; 9670 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port, &desc->identifier[2]); 9671 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9672 sizeof(struct scsi_vpd_id_rel_trgt_port_id)); 9673 9674 /* 9675 * This is for the Target Port Group(type 5h) identifier 9676 */ 9677 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9678 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9679 SVPD_ID_TYPE_TPORTGRP; 9680 desc->length = 4; 9681 if (softc->is_single || 9682 (port && port->status & CTL_PORT_STATUS_HA_SHARED)) 9683 g = 1; 9684 else 9685 g = 2 + ctsio->io_hdr.nexus.targ_port / softc->port_cnt; 9686 scsi_ulto2b(g, &desc->identifier[2]); 9687 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9688 sizeof(struct scsi_vpd_id_trgt_port_grp_id)); 9689 9690 /* 9691 * This is for the Target identifier 9692 */ 9693 if (port && port->target_devid) { 9694 memcpy(desc, port->target_devid->data, port->target_devid->len); 9695 } 9696 9697 ctl_set_success(ctsio); 9698 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9699 ctsio->be_move_done = ctl_config_move_done; 9700 ctl_datamove((union ctl_io *)ctsio); 9701 return (CTL_RETVAL_COMPLETE); 9702 } 9703 9704 static int 9705 ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len) 9706 { 9707 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9708 struct ctl_lun *lun = CTL_LUN(ctsio); 9709 struct scsi_vpd_scsi_ports *sp; 9710 struct scsi_vpd_port_designation *pd; 9711 struct scsi_vpd_port_designation_cont *pdc; 9712 struct ctl_port *port; 9713 int data_len, num_target_ports, iid_len, id_len; 9714 9715 num_target_ports = 0; 9716 iid_len = 0; 9717 id_len = 0; 9718 mtx_lock(&softc->ctl_lock); 9719 STAILQ_FOREACH(port, &softc->port_list, links) { 9720 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 9721 continue; 9722 if (lun != NULL && 9723 ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 9724 continue; 9725 num_target_ports++; 9726 if (port->init_devid) 9727 iid_len += port->init_devid->len; 9728 if (port->port_devid) 9729 id_len += port->port_devid->len; 9730 } 9731 mtx_unlock(&softc->ctl_lock); 9732 9733 data_len = sizeof(struct scsi_vpd_scsi_ports) + 9734 num_target_ports * (sizeof(struct scsi_vpd_port_designation) + 9735 sizeof(struct scsi_vpd_port_designation_cont)) + iid_len + id_len; 9736 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9737 sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr; 9738 ctsio->kern_sg_entries = 0; 9739 ctsio->kern_rel_offset = 0; 9740 ctsio->kern_sg_entries = 0; 9741 ctsio->kern_data_len = min(data_len, alloc_len); 9742 ctsio->kern_total_len = ctsio->kern_data_len; 9743 9744 /* 9745 * The control device is always connected. The disk device, on the 9746 * other hand, may not be online all the time. Need to change this 9747 * to figure out whether the disk device is actually online or not. 9748 */ 9749 if (lun != NULL) 9750 sp->device = (SID_QUAL_LU_CONNECTED << 5) | 9751 lun->be_lun->lun_type; 9752 else 9753 sp->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9754 9755 sp->page_code = SVPD_SCSI_PORTS; 9756 scsi_ulto2b(data_len - sizeof(struct scsi_vpd_scsi_ports), 9757 sp->page_length); 9758 pd = &sp->design[0]; 9759 9760 mtx_lock(&softc->ctl_lock); 9761 STAILQ_FOREACH(port, &softc->port_list, links) { 9762 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 9763 continue; 9764 if (lun != NULL && 9765 ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 9766 continue; 9767 scsi_ulto2b(port->targ_port, pd->relative_port_id); 9768 if (port->init_devid) { 9769 iid_len = port->init_devid->len; 9770 memcpy(pd->initiator_transportid, 9771 port->init_devid->data, port->init_devid->len); 9772 } else 9773 iid_len = 0; 9774 scsi_ulto2b(iid_len, pd->initiator_transportid_length); 9775 pdc = (struct scsi_vpd_port_designation_cont *) 9776 (&pd->initiator_transportid[iid_len]); 9777 if (port->port_devid) { 9778 id_len = port->port_devid->len; 9779 memcpy(pdc->target_port_descriptors, 9780 port->port_devid->data, port->port_devid->len); 9781 } else 9782 id_len = 0; 9783 scsi_ulto2b(id_len, pdc->target_port_descriptors_length); 9784 pd = (struct scsi_vpd_port_designation *) 9785 ((uint8_t *)pdc->target_port_descriptors + id_len); 9786 } 9787 mtx_unlock(&softc->ctl_lock); 9788 9789 ctl_set_success(ctsio); 9790 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9791 ctsio->be_move_done = ctl_config_move_done; 9792 ctl_datamove((union ctl_io *)ctsio); 9793 return (CTL_RETVAL_COMPLETE); 9794 } 9795 9796 static int 9797 ctl_inquiry_evpd_sfs(struct ctl_scsiio *ctsio, int alloc_len) 9798 { 9799 struct ctl_lun *lun = CTL_LUN(ctsio); 9800 struct scsi_vpd_sfs *sfs_ptr; 9801 int sfs_page_size, n; 9802 9803 sfs_page_size = sizeof(*sfs_ptr) + 5 * 2; 9804 ctsio->kern_data_ptr = malloc(sfs_page_size, M_CTL, M_WAITOK | M_ZERO); 9805 sfs_ptr = (struct scsi_vpd_sfs *)ctsio->kern_data_ptr; 9806 ctsio->kern_sg_entries = 0; 9807 ctsio->kern_rel_offset = 0; 9808 ctsio->kern_sg_entries = 0; 9809 ctsio->kern_data_len = min(sfs_page_size, alloc_len); 9810 ctsio->kern_total_len = ctsio->kern_data_len; 9811 9812 /* 9813 * The control device is always connected. The disk device, on the 9814 * other hand, may not be online all the time. Need to change this 9815 * to figure out whether the disk device is actually online or not. 9816 */ 9817 if (lun != NULL) 9818 sfs_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9819 lun->be_lun->lun_type; 9820 else 9821 sfs_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9822 9823 sfs_ptr->page_code = SVPD_SCSI_SFS; 9824 n = 0; 9825 /* Discovery 2016 */ 9826 scsi_ulto2b(0x0001, &sfs_ptr->codes[2 * n++]); 9827 if (lun != NULL && lun->be_lun->lun_type == T_DIRECT) { 9828 /* SBC Base 2016 */ 9829 scsi_ulto2b(0x0101, &sfs_ptr->codes[2 * n++]); 9830 /* SBC Base 2010 */ 9831 scsi_ulto2b(0x0102, &sfs_ptr->codes[2 * n++]); 9832 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 9833 /* Basic Provisioning 2016 */ 9834 scsi_ulto2b(0x0103, &sfs_ptr->codes[2 * n++]); 9835 } 9836 /* Drive Maintenance 2016 */ 9837 //scsi_ulto2b(0x0104, &sfs_ptr->codes[2 * n++]); 9838 } 9839 scsi_ulto2b(4 + 2 * n, sfs_ptr->page_length); 9840 9841 ctl_set_success(ctsio); 9842 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9843 ctsio->be_move_done = ctl_config_move_done; 9844 ctl_datamove((union ctl_io *)ctsio); 9845 return (CTL_RETVAL_COMPLETE); 9846 } 9847 9848 static int 9849 ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len) 9850 { 9851 struct ctl_lun *lun = CTL_LUN(ctsio); 9852 struct scsi_vpd_block_limits *bl_ptr; 9853 const char *val; 9854 uint64_t ival; 9855 9856 ctsio->kern_data_ptr = malloc(sizeof(*bl_ptr), M_CTL, M_WAITOK | M_ZERO); 9857 bl_ptr = (struct scsi_vpd_block_limits *)ctsio->kern_data_ptr; 9858 ctsio->kern_sg_entries = 0; 9859 ctsio->kern_rel_offset = 0; 9860 ctsio->kern_sg_entries = 0; 9861 ctsio->kern_data_len = min(sizeof(*bl_ptr), alloc_len); 9862 ctsio->kern_total_len = ctsio->kern_data_len; 9863 9864 /* 9865 * The control device is always connected. The disk device, on the 9866 * other hand, may not be online all the time. Need to change this 9867 * to figure out whether the disk device is actually online or not. 9868 */ 9869 if (lun != NULL) 9870 bl_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9871 lun->be_lun->lun_type; 9872 else 9873 bl_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9874 9875 bl_ptr->page_code = SVPD_BLOCK_LIMITS; 9876 scsi_ulto2b(sizeof(*bl_ptr) - 4, bl_ptr->page_length); 9877 bl_ptr->max_cmp_write_len = 0xff; 9878 scsi_ulto4b(0xffffffff, bl_ptr->max_txfer_len); 9879 if (lun != NULL) { 9880 scsi_ulto4b(lun->be_lun->opttxferlen, bl_ptr->opt_txfer_len); 9881 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 9882 ival = 0xffffffff; 9883 val = dnvlist_get_string(lun->be_lun->options, 9884 "unmap_max_lba", NULL); 9885 if (val != NULL) 9886 ctl_expand_number(val, &ival); 9887 scsi_ulto4b(ival, bl_ptr->max_unmap_lba_cnt); 9888 ival = 0xffffffff; 9889 val = dnvlist_get_string(lun->be_lun->options, 9890 "unmap_max_descr", NULL); 9891 if (val != NULL) 9892 ctl_expand_number(val, &ival); 9893 scsi_ulto4b(ival, bl_ptr->max_unmap_blk_cnt); 9894 if (lun->be_lun->ublockexp != 0) { 9895 scsi_ulto4b((1 << lun->be_lun->ublockexp), 9896 bl_ptr->opt_unmap_grain); 9897 scsi_ulto4b(0x80000000 | lun->be_lun->ublockoff, 9898 bl_ptr->unmap_grain_align); 9899 } 9900 } 9901 scsi_ulto4b(lun->be_lun->atomicblock, 9902 bl_ptr->max_atomic_transfer_length); 9903 scsi_ulto4b(0, bl_ptr->atomic_alignment); 9904 scsi_ulto4b(0, bl_ptr->atomic_transfer_length_granularity); 9905 scsi_ulto4b(0, bl_ptr->max_atomic_transfer_length_with_atomic_boundary); 9906 scsi_ulto4b(0, bl_ptr->max_atomic_boundary_size); 9907 ival = UINT64_MAX; 9908 val = dnvlist_get_string(lun->be_lun->options, 9909 "write_same_max_lba", NULL); 9910 if (val != NULL) 9911 ctl_expand_number(val, &ival); 9912 scsi_u64to8b(ival, bl_ptr->max_write_same_length); 9913 if (lun->be_lun->maxlba + 1 > ival) 9914 bl_ptr->flags |= SVPD_BL_WSNZ; 9915 } 9916 9917 ctl_set_success(ctsio); 9918 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9919 ctsio->be_move_done = ctl_config_move_done; 9920 ctl_datamove((union ctl_io *)ctsio); 9921 return (CTL_RETVAL_COMPLETE); 9922 } 9923 9924 static int 9925 ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len) 9926 { 9927 struct ctl_lun *lun = CTL_LUN(ctsio); 9928 struct scsi_vpd_block_device_characteristics *bdc_ptr; 9929 const char *value; 9930 u_int i; 9931 9932 ctsio->kern_data_ptr = malloc(sizeof(*bdc_ptr), M_CTL, M_WAITOK | M_ZERO); 9933 bdc_ptr = (struct scsi_vpd_block_device_characteristics *)ctsio->kern_data_ptr; 9934 ctsio->kern_sg_entries = 0; 9935 ctsio->kern_rel_offset = 0; 9936 ctsio->kern_data_len = min(sizeof(*bdc_ptr), alloc_len); 9937 ctsio->kern_total_len = ctsio->kern_data_len; 9938 9939 /* 9940 * The control device is always connected. The disk device, on the 9941 * other hand, may not be online all the time. Need to change this 9942 * to figure out whether the disk device is actually online or not. 9943 */ 9944 if (lun != NULL) 9945 bdc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9946 lun->be_lun->lun_type; 9947 else 9948 bdc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9949 bdc_ptr->page_code = SVPD_BDC; 9950 scsi_ulto2b(sizeof(*bdc_ptr) - 4, bdc_ptr->page_length); 9951 if (lun != NULL && 9952 (value = dnvlist_get_string(lun->be_lun->options, "rpm", NULL)) != NULL) 9953 i = strtol(value, NULL, 0); 9954 else 9955 i = CTL_DEFAULT_ROTATION_RATE; 9956 scsi_ulto2b(i, bdc_ptr->medium_rotation_rate); 9957 if (lun != NULL && 9958 (value = dnvlist_get_string(lun->be_lun->options, "formfactor", NULL)) != NULL) 9959 i = strtol(value, NULL, 0); 9960 else 9961 i = 0; 9962 bdc_ptr->wab_wac_ff = (i & 0x0f); 9963 bdc_ptr->flags = SVPD_RBWZ | SVPD_FUAB | SVPD_VBULS; 9964 9965 ctl_set_success(ctsio); 9966 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9967 ctsio->be_move_done = ctl_config_move_done; 9968 ctl_datamove((union ctl_io *)ctsio); 9969 return (CTL_RETVAL_COMPLETE); 9970 } 9971 9972 static int 9973 ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len) 9974 { 9975 struct ctl_lun *lun = CTL_LUN(ctsio); 9976 struct scsi_vpd_logical_block_prov *lbp_ptr; 9977 const char *value; 9978 9979 ctsio->kern_data_ptr = malloc(sizeof(*lbp_ptr), M_CTL, M_WAITOK | M_ZERO); 9980 lbp_ptr = (struct scsi_vpd_logical_block_prov *)ctsio->kern_data_ptr; 9981 ctsio->kern_sg_entries = 0; 9982 ctsio->kern_rel_offset = 0; 9983 ctsio->kern_data_len = min(sizeof(*lbp_ptr), alloc_len); 9984 ctsio->kern_total_len = ctsio->kern_data_len; 9985 9986 /* 9987 * The control device is always connected. The disk device, on the 9988 * other hand, may not be online all the time. Need to change this 9989 * to figure out whether the disk device is actually online or not. 9990 */ 9991 if (lun != NULL) 9992 lbp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9993 lun->be_lun->lun_type; 9994 else 9995 lbp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9996 9997 lbp_ptr->page_code = SVPD_LBP; 9998 scsi_ulto2b(sizeof(*lbp_ptr) - 4, lbp_ptr->page_length); 9999 lbp_ptr->threshold_exponent = CTL_LBP_EXPONENT; 10000 if (lun != NULL && lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 10001 lbp_ptr->flags = SVPD_LBP_UNMAP | SVPD_LBP_WS16 | 10002 SVPD_LBP_WS10 | SVPD_LBP_RZ | SVPD_LBP_ANC_SUP; 10003 value = dnvlist_get_string(lun->be_lun->options, 10004 "provisioning_type", NULL); 10005 if (value != NULL) { 10006 if (strcmp(value, "resource") == 0) 10007 lbp_ptr->prov_type = SVPD_LBP_RESOURCE; 10008 else if (strcmp(value, "thin") == 0) 10009 lbp_ptr->prov_type = SVPD_LBP_THIN; 10010 } else 10011 lbp_ptr->prov_type = SVPD_LBP_THIN; 10012 } 10013 10014 ctl_set_success(ctsio); 10015 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10016 ctsio->be_move_done = ctl_config_move_done; 10017 ctl_datamove((union ctl_io *)ctsio); 10018 return (CTL_RETVAL_COMPLETE); 10019 } 10020 10021 /* 10022 * INQUIRY with the EVPD bit set. 10023 */ 10024 static int 10025 ctl_inquiry_evpd(struct ctl_scsiio *ctsio) 10026 { 10027 struct ctl_lun *lun = CTL_LUN(ctsio); 10028 struct scsi_inquiry *cdb; 10029 int alloc_len, retval; 10030 10031 cdb = (struct scsi_inquiry *)ctsio->cdb; 10032 alloc_len = scsi_2btoul(cdb->length); 10033 10034 switch (cdb->page_code) { 10035 case SVPD_SUPPORTED_PAGES: 10036 retval = ctl_inquiry_evpd_supported(ctsio, alloc_len); 10037 break; 10038 case SVPD_UNIT_SERIAL_NUMBER: 10039 retval = ctl_inquiry_evpd_serial(ctsio, alloc_len); 10040 break; 10041 case SVPD_DEVICE_ID: 10042 retval = ctl_inquiry_evpd_devid(ctsio, alloc_len); 10043 break; 10044 case SVPD_EXTENDED_INQUIRY_DATA: 10045 retval = ctl_inquiry_evpd_eid(ctsio, alloc_len); 10046 break; 10047 case SVPD_MODE_PAGE_POLICY: 10048 retval = ctl_inquiry_evpd_mpp(ctsio, alloc_len); 10049 break; 10050 case SVPD_SCSI_PORTS: 10051 retval = ctl_inquiry_evpd_scsi_ports(ctsio, alloc_len); 10052 break; 10053 case SVPD_SCSI_TPC: 10054 retval = ctl_inquiry_evpd_tpc(ctsio, alloc_len); 10055 break; 10056 case SVPD_SCSI_SFS: 10057 retval = ctl_inquiry_evpd_sfs(ctsio, alloc_len); 10058 break; 10059 case SVPD_BLOCK_LIMITS: 10060 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 10061 goto err; 10062 retval = ctl_inquiry_evpd_block_limits(ctsio, alloc_len); 10063 break; 10064 case SVPD_BDC: 10065 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 10066 goto err; 10067 retval = ctl_inquiry_evpd_bdc(ctsio, alloc_len); 10068 break; 10069 case SVPD_LBP: 10070 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 10071 goto err; 10072 retval = ctl_inquiry_evpd_lbp(ctsio, alloc_len); 10073 break; 10074 default: 10075 err: 10076 ctl_set_invalid_field(ctsio, 10077 /*sks_valid*/ 1, 10078 /*command*/ 1, 10079 /*field*/ 2, 10080 /*bit_valid*/ 0, 10081 /*bit*/ 0); 10082 ctl_done((union ctl_io *)ctsio); 10083 retval = CTL_RETVAL_COMPLETE; 10084 break; 10085 } 10086 10087 return (retval); 10088 } 10089 10090 /* 10091 * Standard INQUIRY data. 10092 */ 10093 static int 10094 ctl_inquiry_std(struct ctl_scsiio *ctsio) 10095 { 10096 struct ctl_softc *softc = CTL_SOFTC(ctsio); 10097 struct ctl_port *port = CTL_PORT(ctsio); 10098 struct ctl_lun *lun = CTL_LUN(ctsio); 10099 struct scsi_inquiry_data *inq_ptr; 10100 struct scsi_inquiry *cdb; 10101 const char *val; 10102 uint32_t alloc_len, data_len; 10103 ctl_port_type port_type; 10104 10105 port_type = port->port_type; 10106 if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL) 10107 port_type = CTL_PORT_SCSI; 10108 10109 cdb = (struct scsi_inquiry *)ctsio->cdb; 10110 alloc_len = scsi_2btoul(cdb->length); 10111 10112 /* 10113 * We malloc the full inquiry data size here and fill it 10114 * in. If the user only asks for less, we'll give him 10115 * that much. 10116 */ 10117 data_len = offsetof(struct scsi_inquiry_data, vendor_specific1); 10118 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10119 inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr; 10120 ctsio->kern_sg_entries = 0; 10121 ctsio->kern_rel_offset = 0; 10122 ctsio->kern_data_len = min(data_len, alloc_len); 10123 ctsio->kern_total_len = ctsio->kern_data_len; 10124 10125 if (lun != NULL) { 10126 if ((lun->flags & CTL_LUN_PRIMARY_SC) || 10127 softc->ha_link >= CTL_HA_LINK_UNKNOWN) { 10128 inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10129 lun->be_lun->lun_type; 10130 } else { 10131 inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | 10132 lun->be_lun->lun_type; 10133 } 10134 if (lun->flags & CTL_LUN_REMOVABLE) 10135 inq_ptr->dev_qual2 |= SID_RMB; 10136 } else 10137 inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE; 10138 10139 /* RMB in byte 2 is 0 */ 10140 inq_ptr->version = SCSI_REV_SPC5; 10141 10142 /* 10143 * According to SAM-3, even if a device only supports a single 10144 * level of LUN addressing, it should still set the HISUP bit: 10145 * 10146 * 4.9.1 Logical unit numbers overview 10147 * 10148 * All logical unit number formats described in this standard are 10149 * hierarchical in structure even when only a single level in that 10150 * hierarchy is used. The HISUP bit shall be set to one in the 10151 * standard INQUIRY data (see SPC-2) when any logical unit number 10152 * format described in this standard is used. Non-hierarchical 10153 * formats are outside the scope of this standard. 10154 * 10155 * Therefore we set the HiSup bit here. 10156 * 10157 * The response format is 2, per SPC-3. 10158 */ 10159 inq_ptr->response_format = SID_HiSup | 2; 10160 10161 inq_ptr->additional_length = data_len - 10162 (offsetof(struct scsi_inquiry_data, additional_length) + 1); 10163 CTL_DEBUG_PRINT(("additional_length = %d\n", 10164 inq_ptr->additional_length)); 10165 10166 inq_ptr->spc3_flags = SPC3_SID_3PC | SPC3_SID_TPGS_IMPLICIT; 10167 if (port_type == CTL_PORT_SCSI) 10168 inq_ptr->spc2_flags = SPC2_SID_ADDR16; 10169 inq_ptr->spc2_flags |= SPC2_SID_MultiP; 10170 inq_ptr->flags = SID_CmdQue; 10171 if (port_type == CTL_PORT_SCSI) 10172 inq_ptr->flags |= SID_WBus16 | SID_Sync; 10173 10174 /* 10175 * Per SPC-3, unused bytes in ASCII strings are filled with spaces. 10176 * We have 8 bytes for the vendor name, and 16 bytes for the device 10177 * name and 4 bytes for the revision. 10178 */ 10179 if (lun == NULL || (val = dnvlist_get_string(lun->be_lun->options, 10180 "vendor", NULL)) == NULL) { 10181 strncpy(inq_ptr->vendor, CTL_VENDOR, sizeof(inq_ptr->vendor)); 10182 } else { 10183 memset(inq_ptr->vendor, ' ', sizeof(inq_ptr->vendor)); 10184 strncpy(inq_ptr->vendor, val, 10185 min(sizeof(inq_ptr->vendor), strlen(val))); 10186 } 10187 if (lun == NULL) { 10188 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10189 sizeof(inq_ptr->product)); 10190 } else if ((val = dnvlist_get_string(lun->be_lun->options, "product", 10191 NULL)) == NULL) { 10192 switch (lun->be_lun->lun_type) { 10193 case T_DIRECT: 10194 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10195 sizeof(inq_ptr->product)); 10196 break; 10197 case T_PROCESSOR: 10198 strncpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT, 10199 sizeof(inq_ptr->product)); 10200 break; 10201 case T_CDROM: 10202 strncpy(inq_ptr->product, CTL_CDROM_PRODUCT, 10203 sizeof(inq_ptr->product)); 10204 break; 10205 default: 10206 strncpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT, 10207 sizeof(inq_ptr->product)); 10208 break; 10209 } 10210 } else { 10211 memset(inq_ptr->product, ' ', sizeof(inq_ptr->product)); 10212 strncpy(inq_ptr->product, val, 10213 min(sizeof(inq_ptr->product), strlen(val))); 10214 } 10215 10216 /* 10217 * XXX make this a macro somewhere so it automatically gets 10218 * incremented when we make changes. 10219 */ 10220 if (lun == NULL || (val = dnvlist_get_string(lun->be_lun->options, 10221 "revision", NULL)) == NULL) { 10222 strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision)); 10223 } else { 10224 memset(inq_ptr->revision, ' ', sizeof(inq_ptr->revision)); 10225 strncpy(inq_ptr->revision, val, 10226 min(sizeof(inq_ptr->revision), strlen(val))); 10227 } 10228 10229 /* 10230 * For parallel SCSI, we support double transition and single 10231 * transition clocking. We also support QAS (Quick Arbitration 10232 * and Selection) and Information Unit transfers on both the 10233 * control and array devices. 10234 */ 10235 if (port_type == CTL_PORT_SCSI) 10236 inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS | 10237 SID_SPI_IUS; 10238 10239 /* SAM-6 (no version claimed) */ 10240 scsi_ulto2b(0x00C0, inq_ptr->version1); 10241 /* SPC-5 (no version claimed) */ 10242 scsi_ulto2b(0x05C0, inq_ptr->version2); 10243 if (port_type == CTL_PORT_FC) { 10244 /* FCP-2 ANSI INCITS.350:2003 */ 10245 scsi_ulto2b(0x0917, inq_ptr->version3); 10246 } else if (port_type == CTL_PORT_SCSI) { 10247 /* SPI-4 ANSI INCITS.362:200x */ 10248 scsi_ulto2b(0x0B56, inq_ptr->version3); 10249 } else if (port_type == CTL_PORT_ISCSI) { 10250 /* iSCSI (no version claimed) */ 10251 scsi_ulto2b(0x0960, inq_ptr->version3); 10252 } else if (port_type == CTL_PORT_SAS) { 10253 /* SAS (no version claimed) */ 10254 scsi_ulto2b(0x0BE0, inq_ptr->version3); 10255 } else if (port_type == CTL_PORT_UMASS) { 10256 /* USB Mass Storage Class Bulk-Only Transport, Revision 1.0 */ 10257 scsi_ulto2b(0x1730, inq_ptr->version3); 10258 } 10259 10260 if (lun == NULL) { 10261 /* SBC-4 (no version claimed) */ 10262 scsi_ulto2b(0x0600, inq_ptr->version4); 10263 } else { 10264 switch (lun->be_lun->lun_type) { 10265 case T_DIRECT: 10266 /* SBC-4 (no version claimed) */ 10267 scsi_ulto2b(0x0600, inq_ptr->version4); 10268 break; 10269 case T_PROCESSOR: 10270 break; 10271 case T_CDROM: 10272 /* MMC-6 (no version claimed) */ 10273 scsi_ulto2b(0x04E0, inq_ptr->version4); 10274 break; 10275 default: 10276 break; 10277 } 10278 } 10279 10280 ctl_set_success(ctsio); 10281 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10282 ctsio->be_move_done = ctl_config_move_done; 10283 ctl_datamove((union ctl_io *)ctsio); 10284 return (CTL_RETVAL_COMPLETE); 10285 } 10286 10287 int 10288 ctl_inquiry(struct ctl_scsiio *ctsio) 10289 { 10290 struct scsi_inquiry *cdb; 10291 int retval; 10292 10293 CTL_DEBUG_PRINT(("ctl_inquiry\n")); 10294 10295 cdb = (struct scsi_inquiry *)ctsio->cdb; 10296 if (cdb->byte2 & SI_EVPD) 10297 retval = ctl_inquiry_evpd(ctsio); 10298 else if (cdb->page_code == 0) 10299 retval = ctl_inquiry_std(ctsio); 10300 else { 10301 ctl_set_invalid_field(ctsio, 10302 /*sks_valid*/ 1, 10303 /*command*/ 1, 10304 /*field*/ 2, 10305 /*bit_valid*/ 0, 10306 /*bit*/ 0); 10307 ctl_done((union ctl_io *)ctsio); 10308 return (CTL_RETVAL_COMPLETE); 10309 } 10310 10311 return (retval); 10312 } 10313 10314 int 10315 ctl_get_config(struct ctl_scsiio *ctsio) 10316 { 10317 struct ctl_lun *lun = CTL_LUN(ctsio); 10318 struct scsi_get_config_header *hdr; 10319 struct scsi_get_config_feature *feature; 10320 struct scsi_get_config *cdb; 10321 uint32_t alloc_len, data_len; 10322 int rt, starting; 10323 10324 cdb = (struct scsi_get_config *)ctsio->cdb; 10325 rt = (cdb->rt & SGC_RT_MASK); 10326 starting = scsi_2btoul(cdb->starting_feature); 10327 alloc_len = scsi_2btoul(cdb->length); 10328 10329 data_len = sizeof(struct scsi_get_config_header) + 10330 sizeof(struct scsi_get_config_feature) + 8 + 10331 sizeof(struct scsi_get_config_feature) + 8 + 10332 sizeof(struct scsi_get_config_feature) + 4 + 10333 sizeof(struct scsi_get_config_feature) + 4 + 10334 sizeof(struct scsi_get_config_feature) + 8 + 10335 sizeof(struct scsi_get_config_feature) + 10336 sizeof(struct scsi_get_config_feature) + 4 + 10337 sizeof(struct scsi_get_config_feature) + 4 + 10338 sizeof(struct scsi_get_config_feature) + 4 + 10339 sizeof(struct scsi_get_config_feature) + 4 + 10340 sizeof(struct scsi_get_config_feature) + 4 + 10341 sizeof(struct scsi_get_config_feature) + 4; 10342 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10343 ctsio->kern_sg_entries = 0; 10344 ctsio->kern_rel_offset = 0; 10345 10346 hdr = (struct scsi_get_config_header *)ctsio->kern_data_ptr; 10347 if (lun->flags & CTL_LUN_NO_MEDIA) 10348 scsi_ulto2b(0x0000, hdr->current_profile); 10349 else 10350 scsi_ulto2b(0x0010, hdr->current_profile); 10351 feature = (struct scsi_get_config_feature *)(hdr + 1); 10352 10353 if (starting > 0x003b) 10354 goto done; 10355 if (starting > 0x003a) 10356 goto f3b; 10357 if (starting > 0x002b) 10358 goto f3a; 10359 if (starting > 0x002a) 10360 goto f2b; 10361 if (starting > 0x001f) 10362 goto f2a; 10363 if (starting > 0x001e) 10364 goto f1f; 10365 if (starting > 0x001d) 10366 goto f1e; 10367 if (starting > 0x0010) 10368 goto f1d; 10369 if (starting > 0x0003) 10370 goto f10; 10371 if (starting > 0x0002) 10372 goto f3; 10373 if (starting > 0x0001) 10374 goto f2; 10375 if (starting > 0x0000) 10376 goto f1; 10377 10378 /* Profile List */ 10379 scsi_ulto2b(0x0000, feature->feature_code); 10380 feature->flags = SGC_F_PERSISTENT | SGC_F_CURRENT; 10381 feature->add_length = 8; 10382 scsi_ulto2b(0x0008, &feature->feature_data[0]); /* CD-ROM */ 10383 feature->feature_data[2] = 0x00; 10384 scsi_ulto2b(0x0010, &feature->feature_data[4]); /* DVD-ROM */ 10385 feature->feature_data[6] = 0x01; 10386 feature = (struct scsi_get_config_feature *) 10387 &feature->feature_data[feature->add_length]; 10388 10389 f1: /* Core */ 10390 scsi_ulto2b(0x0001, feature->feature_code); 10391 feature->flags = 0x08 | SGC_F_PERSISTENT | SGC_F_CURRENT; 10392 feature->add_length = 8; 10393 scsi_ulto4b(0x00000000, &feature->feature_data[0]); 10394 feature->feature_data[4] = 0x03; 10395 feature = (struct scsi_get_config_feature *) 10396 &feature->feature_data[feature->add_length]; 10397 10398 f2: /* Morphing */ 10399 scsi_ulto2b(0x0002, feature->feature_code); 10400 feature->flags = 0x04 | SGC_F_PERSISTENT | SGC_F_CURRENT; 10401 feature->add_length = 4; 10402 feature->feature_data[0] = 0x02; 10403 feature = (struct scsi_get_config_feature *) 10404 &feature->feature_data[feature->add_length]; 10405 10406 f3: /* Removable Medium */ 10407 scsi_ulto2b(0x0003, feature->feature_code); 10408 feature->flags = 0x04 | SGC_F_PERSISTENT | SGC_F_CURRENT; 10409 feature->add_length = 4; 10410 feature->feature_data[0] = 0x39; 10411 feature = (struct scsi_get_config_feature *) 10412 &feature->feature_data[feature->add_length]; 10413 10414 if (rt == SGC_RT_CURRENT && (lun->flags & CTL_LUN_NO_MEDIA)) 10415 goto done; 10416 10417 f10: /* Random Read */ 10418 scsi_ulto2b(0x0010, feature->feature_code); 10419 feature->flags = 0x00; 10420 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10421 feature->flags |= SGC_F_CURRENT; 10422 feature->add_length = 8; 10423 scsi_ulto4b(lun->be_lun->blocksize, &feature->feature_data[0]); 10424 scsi_ulto2b(1, &feature->feature_data[4]); 10425 feature->feature_data[6] = 0x00; 10426 feature = (struct scsi_get_config_feature *) 10427 &feature->feature_data[feature->add_length]; 10428 10429 f1d: /* Multi-Read */ 10430 scsi_ulto2b(0x001D, feature->feature_code); 10431 feature->flags = 0x00; 10432 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10433 feature->flags |= SGC_F_CURRENT; 10434 feature->add_length = 0; 10435 feature = (struct scsi_get_config_feature *) 10436 &feature->feature_data[feature->add_length]; 10437 10438 f1e: /* CD Read */ 10439 scsi_ulto2b(0x001E, feature->feature_code); 10440 feature->flags = 0x00; 10441 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10442 feature->flags |= SGC_F_CURRENT; 10443 feature->add_length = 4; 10444 feature->feature_data[0] = 0x00; 10445 feature = (struct scsi_get_config_feature *) 10446 &feature->feature_data[feature->add_length]; 10447 10448 f1f: /* DVD Read */ 10449 scsi_ulto2b(0x001F, feature->feature_code); 10450 feature->flags = 0x08; 10451 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10452 feature->flags |= SGC_F_CURRENT; 10453 feature->add_length = 4; 10454 feature->feature_data[0] = 0x01; 10455 feature->feature_data[2] = 0x03; 10456 feature = (struct scsi_get_config_feature *) 10457 &feature->feature_data[feature->add_length]; 10458 10459 f2a: /* DVD+RW */ 10460 scsi_ulto2b(0x002A, feature->feature_code); 10461 feature->flags = 0x04; 10462 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10463 feature->flags |= SGC_F_CURRENT; 10464 feature->add_length = 4; 10465 feature->feature_data[0] = 0x00; 10466 feature->feature_data[1] = 0x00; 10467 feature = (struct scsi_get_config_feature *) 10468 &feature->feature_data[feature->add_length]; 10469 10470 f2b: /* DVD+R */ 10471 scsi_ulto2b(0x002B, feature->feature_code); 10472 feature->flags = 0x00; 10473 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10474 feature->flags |= SGC_F_CURRENT; 10475 feature->add_length = 4; 10476 feature->feature_data[0] = 0x00; 10477 feature = (struct scsi_get_config_feature *) 10478 &feature->feature_data[feature->add_length]; 10479 10480 f3a: /* DVD+RW Dual Layer */ 10481 scsi_ulto2b(0x003A, feature->feature_code); 10482 feature->flags = 0x00; 10483 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10484 feature->flags |= SGC_F_CURRENT; 10485 feature->add_length = 4; 10486 feature->feature_data[0] = 0x00; 10487 feature->feature_data[1] = 0x00; 10488 feature = (struct scsi_get_config_feature *) 10489 &feature->feature_data[feature->add_length]; 10490 10491 f3b: /* DVD+R Dual Layer */ 10492 scsi_ulto2b(0x003B, feature->feature_code); 10493 feature->flags = 0x00; 10494 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10495 feature->flags |= SGC_F_CURRENT; 10496 feature->add_length = 4; 10497 feature->feature_data[0] = 0x00; 10498 feature = (struct scsi_get_config_feature *) 10499 &feature->feature_data[feature->add_length]; 10500 10501 done: 10502 data_len = (uint8_t *)feature - (uint8_t *)hdr; 10503 if (rt == SGC_RT_SPECIFIC && data_len > 4) { 10504 feature = (struct scsi_get_config_feature *)(hdr + 1); 10505 if (scsi_2btoul(feature->feature_code) == starting) 10506 feature = (struct scsi_get_config_feature *) 10507 &feature->feature_data[feature->add_length]; 10508 data_len = (uint8_t *)feature - (uint8_t *)hdr; 10509 } 10510 scsi_ulto4b(data_len - 4, hdr->data_length); 10511 ctsio->kern_data_len = min(data_len, alloc_len); 10512 ctsio->kern_total_len = ctsio->kern_data_len; 10513 10514 ctl_set_success(ctsio); 10515 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10516 ctsio->be_move_done = ctl_config_move_done; 10517 ctl_datamove((union ctl_io *)ctsio); 10518 return (CTL_RETVAL_COMPLETE); 10519 } 10520 10521 int 10522 ctl_get_event_status(struct ctl_scsiio *ctsio) 10523 { 10524 struct scsi_get_event_status_header *hdr; 10525 struct scsi_get_event_status *cdb; 10526 uint32_t alloc_len, data_len; 10527 10528 cdb = (struct scsi_get_event_status *)ctsio->cdb; 10529 if ((cdb->byte2 & SGESN_POLLED) == 0) { 10530 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, 10531 /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); 10532 ctl_done((union ctl_io *)ctsio); 10533 return (CTL_RETVAL_COMPLETE); 10534 } 10535 alloc_len = scsi_2btoul(cdb->length); 10536 10537 data_len = sizeof(struct scsi_get_event_status_header); 10538 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10539 ctsio->kern_sg_entries = 0; 10540 ctsio->kern_rel_offset = 0; 10541 ctsio->kern_data_len = min(data_len, alloc_len); 10542 ctsio->kern_total_len = ctsio->kern_data_len; 10543 10544 hdr = (struct scsi_get_event_status_header *)ctsio->kern_data_ptr; 10545 scsi_ulto2b(0, hdr->descr_length); 10546 hdr->nea_class = SGESN_NEA; 10547 hdr->supported_class = 0; 10548 10549 ctl_set_success(ctsio); 10550 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10551 ctsio->be_move_done = ctl_config_move_done; 10552 ctl_datamove((union ctl_io *)ctsio); 10553 return (CTL_RETVAL_COMPLETE); 10554 } 10555 10556 int 10557 ctl_mechanism_status(struct ctl_scsiio *ctsio) 10558 { 10559 struct scsi_mechanism_status_header *hdr; 10560 struct scsi_mechanism_status *cdb; 10561 uint32_t alloc_len, data_len; 10562 10563 cdb = (struct scsi_mechanism_status *)ctsio->cdb; 10564 alloc_len = scsi_2btoul(cdb->length); 10565 10566 data_len = sizeof(struct scsi_mechanism_status_header); 10567 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10568 ctsio->kern_sg_entries = 0; 10569 ctsio->kern_rel_offset = 0; 10570 ctsio->kern_data_len = min(data_len, alloc_len); 10571 ctsio->kern_total_len = ctsio->kern_data_len; 10572 10573 hdr = (struct scsi_mechanism_status_header *)ctsio->kern_data_ptr; 10574 hdr->state1 = 0x00; 10575 hdr->state2 = 0xe0; 10576 scsi_ulto3b(0, hdr->lba); 10577 hdr->slots_num = 0; 10578 scsi_ulto2b(0, hdr->slots_length); 10579 10580 ctl_set_success(ctsio); 10581 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10582 ctsio->be_move_done = ctl_config_move_done; 10583 ctl_datamove((union ctl_io *)ctsio); 10584 return (CTL_RETVAL_COMPLETE); 10585 } 10586 10587 static void 10588 ctl_ultomsf(uint32_t lba, uint8_t *buf) 10589 { 10590 10591 lba += 150; 10592 buf[0] = 0; 10593 buf[1] = bin2bcd((lba / 75) / 60); 10594 buf[2] = bin2bcd((lba / 75) % 60); 10595 buf[3] = bin2bcd(lba % 75); 10596 } 10597 10598 int 10599 ctl_read_toc(struct ctl_scsiio *ctsio) 10600 { 10601 struct ctl_lun *lun = CTL_LUN(ctsio); 10602 struct scsi_read_toc_hdr *hdr; 10603 struct scsi_read_toc_type01_descr *descr; 10604 struct scsi_read_toc *cdb; 10605 uint32_t alloc_len, data_len; 10606 int format, msf; 10607 10608 cdb = (struct scsi_read_toc *)ctsio->cdb; 10609 msf = (cdb->byte2 & CD_MSF) != 0; 10610 format = cdb->format; 10611 alloc_len = scsi_2btoul(cdb->data_len); 10612 10613 data_len = sizeof(struct scsi_read_toc_hdr); 10614 if (format == 0) 10615 data_len += 2 * sizeof(struct scsi_read_toc_type01_descr); 10616 else 10617 data_len += sizeof(struct scsi_read_toc_type01_descr); 10618 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10619 ctsio->kern_sg_entries = 0; 10620 ctsio->kern_rel_offset = 0; 10621 ctsio->kern_data_len = min(data_len, alloc_len); 10622 ctsio->kern_total_len = ctsio->kern_data_len; 10623 10624 hdr = (struct scsi_read_toc_hdr *)ctsio->kern_data_ptr; 10625 if (format == 0) { 10626 scsi_ulto2b(0x12, hdr->data_length); 10627 hdr->first = 1; 10628 hdr->last = 1; 10629 descr = (struct scsi_read_toc_type01_descr *)(hdr + 1); 10630 descr->addr_ctl = 0x14; 10631 descr->track_number = 1; 10632 if (msf) 10633 ctl_ultomsf(0, descr->track_start); 10634 else 10635 scsi_ulto4b(0, descr->track_start); 10636 descr++; 10637 descr->addr_ctl = 0x14; 10638 descr->track_number = 0xaa; 10639 if (msf) 10640 ctl_ultomsf(lun->be_lun->maxlba+1, descr->track_start); 10641 else 10642 scsi_ulto4b(lun->be_lun->maxlba+1, descr->track_start); 10643 } else { 10644 scsi_ulto2b(0x0a, hdr->data_length); 10645 hdr->first = 1; 10646 hdr->last = 1; 10647 descr = (struct scsi_read_toc_type01_descr *)(hdr + 1); 10648 descr->addr_ctl = 0x14; 10649 descr->track_number = 1; 10650 if (msf) 10651 ctl_ultomsf(0, descr->track_start); 10652 else 10653 scsi_ulto4b(0, descr->track_start); 10654 } 10655 10656 ctl_set_success(ctsio); 10657 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10658 ctsio->be_move_done = ctl_config_move_done; 10659 ctl_datamove((union ctl_io *)ctsio); 10660 return (CTL_RETVAL_COMPLETE); 10661 } 10662 10663 /* 10664 * For known CDB types, parse the LBA and length. 10665 */ 10666 static int 10667 ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len) 10668 { 10669 10670 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, 10671 ("%s: unexpected I/O type %x", __func__, io->io_hdr.io_type)); 10672 10673 switch (io->scsiio.cdb[0]) { 10674 case COMPARE_AND_WRITE: { 10675 struct scsi_compare_and_write *cdb; 10676 10677 cdb = (struct scsi_compare_and_write *)io->scsiio.cdb; 10678 10679 *lba = scsi_8btou64(cdb->addr); 10680 *len = cdb->length; 10681 break; 10682 } 10683 case READ_6: 10684 case WRITE_6: { 10685 struct scsi_rw_6 *cdb; 10686 10687 cdb = (struct scsi_rw_6 *)io->scsiio.cdb; 10688 10689 *lba = scsi_3btoul(cdb->addr); 10690 /* only 5 bits are valid in the most significant address byte */ 10691 *lba &= 0x1fffff; 10692 *len = cdb->length; 10693 break; 10694 } 10695 case READ_10: 10696 case WRITE_10: { 10697 struct scsi_rw_10 *cdb; 10698 10699 cdb = (struct scsi_rw_10 *)io->scsiio.cdb; 10700 10701 *lba = scsi_4btoul(cdb->addr); 10702 *len = scsi_2btoul(cdb->length); 10703 break; 10704 } 10705 case WRITE_VERIFY_10: { 10706 struct scsi_write_verify_10 *cdb; 10707 10708 cdb = (struct scsi_write_verify_10 *)io->scsiio.cdb; 10709 10710 *lba = scsi_4btoul(cdb->addr); 10711 *len = scsi_2btoul(cdb->length); 10712 break; 10713 } 10714 case READ_12: 10715 case WRITE_12: { 10716 struct scsi_rw_12 *cdb; 10717 10718 cdb = (struct scsi_rw_12 *)io->scsiio.cdb; 10719 10720 *lba = scsi_4btoul(cdb->addr); 10721 *len = scsi_4btoul(cdb->length); 10722 break; 10723 } 10724 case WRITE_VERIFY_12: { 10725 struct scsi_write_verify_12 *cdb; 10726 10727 cdb = (struct scsi_write_verify_12 *)io->scsiio.cdb; 10728 10729 *lba = scsi_4btoul(cdb->addr); 10730 *len = scsi_4btoul(cdb->length); 10731 break; 10732 } 10733 case READ_16: 10734 case WRITE_16: { 10735 struct scsi_rw_16 *cdb; 10736 10737 cdb = (struct scsi_rw_16 *)io->scsiio.cdb; 10738 10739 *lba = scsi_8btou64(cdb->addr); 10740 *len = scsi_4btoul(cdb->length); 10741 break; 10742 } 10743 case WRITE_ATOMIC_16: { 10744 struct scsi_write_atomic_16 *cdb; 10745 10746 cdb = (struct scsi_write_atomic_16 *)io->scsiio.cdb; 10747 10748 *lba = scsi_8btou64(cdb->addr); 10749 *len = scsi_2btoul(cdb->length); 10750 break; 10751 } 10752 case WRITE_VERIFY_16: { 10753 struct scsi_write_verify_16 *cdb; 10754 10755 cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb; 10756 10757 *lba = scsi_8btou64(cdb->addr); 10758 *len = scsi_4btoul(cdb->length); 10759 break; 10760 } 10761 case WRITE_SAME_10: { 10762 struct scsi_write_same_10 *cdb; 10763 10764 cdb = (struct scsi_write_same_10 *)io->scsiio.cdb; 10765 10766 *lba = scsi_4btoul(cdb->addr); 10767 *len = scsi_2btoul(cdb->length); 10768 break; 10769 } 10770 case WRITE_SAME_16: { 10771 struct scsi_write_same_16 *cdb; 10772 10773 cdb = (struct scsi_write_same_16 *)io->scsiio.cdb; 10774 10775 *lba = scsi_8btou64(cdb->addr); 10776 *len = scsi_4btoul(cdb->length); 10777 break; 10778 } 10779 case VERIFY_10: { 10780 struct scsi_verify_10 *cdb; 10781 10782 cdb = (struct scsi_verify_10 *)io->scsiio.cdb; 10783 10784 *lba = scsi_4btoul(cdb->addr); 10785 *len = scsi_2btoul(cdb->length); 10786 break; 10787 } 10788 case VERIFY_12: { 10789 struct scsi_verify_12 *cdb; 10790 10791 cdb = (struct scsi_verify_12 *)io->scsiio.cdb; 10792 10793 *lba = scsi_4btoul(cdb->addr); 10794 *len = scsi_4btoul(cdb->length); 10795 break; 10796 } 10797 case VERIFY_16: { 10798 struct scsi_verify_16 *cdb; 10799 10800 cdb = (struct scsi_verify_16 *)io->scsiio.cdb; 10801 10802 *lba = scsi_8btou64(cdb->addr); 10803 *len = scsi_4btoul(cdb->length); 10804 break; 10805 } 10806 case UNMAP: { 10807 *lba = 0; 10808 *len = UINT64_MAX; 10809 break; 10810 } 10811 case SERVICE_ACTION_IN: { /* GET LBA STATUS */ 10812 struct scsi_get_lba_status *cdb; 10813 10814 cdb = (struct scsi_get_lba_status *)io->scsiio.cdb; 10815 *lba = scsi_8btou64(cdb->addr); 10816 *len = UINT32_MAX; 10817 break; 10818 } 10819 default: 10820 *lba = 0; 10821 *len = UINT64_MAX; 10822 return (1); 10823 } 10824 10825 return (0); 10826 } 10827 10828 static ctl_action 10829 ctl_extent_check_lba(uint64_t lba1, uint64_t len1, uint64_t lba2, uint64_t len2, 10830 bool seq) 10831 { 10832 uint64_t endlba1, endlba2; 10833 10834 endlba1 = lba1 + len1 - (seq ? 0 : 1); 10835 endlba2 = lba2 + len2 - 1; 10836 10837 if ((endlba1 < lba2) || (endlba2 < lba1)) 10838 return (CTL_ACTION_PASS); 10839 else 10840 return (CTL_ACTION_BLOCK); 10841 } 10842 10843 static int 10844 ctl_extent_check_unmap(union ctl_io *io, uint64_t lba2, uint64_t len2) 10845 { 10846 struct ctl_ptr_len_flags *ptrlen; 10847 struct scsi_unmap_desc *buf, *end, *range; 10848 uint64_t lba; 10849 uint32_t len; 10850 10851 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, 10852 ("%s: unexpected I/O type %x", __func__, io->io_hdr.io_type)); 10853 10854 /* If not UNMAP -- go other way. */ 10855 if (io->scsiio.cdb[0] != UNMAP) 10856 return (CTL_ACTION_SKIP); 10857 10858 /* If UNMAP without data -- block and wait for data. */ 10859 ptrlen = (struct ctl_ptr_len_flags *) 10860 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 10861 if ((io->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0 || 10862 ptrlen->ptr == NULL) 10863 return (CTL_ACTION_BLOCK); 10864 10865 /* UNMAP with data -- check for collision. */ 10866 buf = (struct scsi_unmap_desc *)ptrlen->ptr; 10867 end = buf + ptrlen->len / sizeof(*buf); 10868 for (range = buf; range < end; range++) { 10869 lba = scsi_8btou64(range->lba); 10870 len = scsi_4btoul(range->length); 10871 if ((lba < lba2 + len2) && (lba + len > lba2)) 10872 return (CTL_ACTION_BLOCK); 10873 } 10874 return (CTL_ACTION_PASS); 10875 } 10876 10877 static ctl_action 10878 ctl_extent_check(union ctl_io *io1, union ctl_io *io2, bool seq) 10879 { 10880 uint64_t lba1, lba2; 10881 uint64_t len1, len2; 10882 int retval; 10883 10884 retval = ctl_get_lba_len(io2, &lba2, &len2); 10885 KASSERT(retval == 0, ("ctl_get_lba_len() error")); 10886 10887 retval = ctl_extent_check_unmap(io1, lba2, len2); 10888 if (retval != CTL_ACTION_SKIP) 10889 return (retval); 10890 10891 retval = ctl_get_lba_len(io1, &lba1, &len1); 10892 KASSERT(retval == 0, ("ctl_get_lba_len() error")); 10893 10894 if (seq && (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE)) 10895 seq = FALSE; 10896 return (ctl_extent_check_lba(lba1, len1, lba2, len2, seq)); 10897 } 10898 10899 static ctl_action 10900 ctl_seq_check(union ctl_io *io1, union ctl_io *io2) 10901 { 10902 uint64_t lba1, lba2; 10903 uint64_t len1, len2; 10904 int retval __diagused; 10905 10906 if (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE) 10907 return (CTL_ACTION_PASS); 10908 retval = ctl_get_lba_len(io1, &lba1, &len1); 10909 KASSERT(retval == 0, ("ctl_get_lba_len() error")); 10910 retval = ctl_get_lba_len(io2, &lba2, &len2); 10911 KASSERT(retval == 0, ("ctl_get_lba_len() error")); 10912 10913 if (lba1 + len1 == lba2) 10914 return (CTL_ACTION_BLOCK); 10915 return (CTL_ACTION_PASS); 10916 } 10917 10918 static ctl_action 10919 ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io, 10920 const uint8_t *serialize_row, union ctl_io *ooa_io) 10921 { 10922 10923 /* 10924 * The initiator attempted multiple untagged commands at the same 10925 * time. Can't do that. 10926 */ 10927 if (__predict_false(pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10928 && __predict_false(ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10929 && ((pending_io->io_hdr.nexus.targ_port == 10930 ooa_io->io_hdr.nexus.targ_port) 10931 && (pending_io->io_hdr.nexus.initid == 10932 ooa_io->io_hdr.nexus.initid)) 10933 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | 10934 CTL_FLAG_STATUS_SENT)) == 0)) 10935 return (CTL_ACTION_OVERLAP); 10936 10937 /* 10938 * The initiator attempted to send multiple tagged commands with 10939 * the same ID. (It's fine if different initiators have the same 10940 * tag ID.) 10941 * 10942 * Even if all of those conditions are true, we don't kill the I/O 10943 * if the command ahead of us has been aborted. We won't end up 10944 * sending it to the FETD, and it's perfectly legal to resend a 10945 * command with the same tag number as long as the previous 10946 * instance of this tag number has been aborted somehow. 10947 */ 10948 if (__predict_true(pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 10949 && __predict_true(ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 10950 && __predict_false(pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num) 10951 && ((pending_io->io_hdr.nexus.targ_port == 10952 ooa_io->io_hdr.nexus.targ_port) 10953 && (pending_io->io_hdr.nexus.initid == 10954 ooa_io->io_hdr.nexus.initid)) 10955 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | 10956 CTL_FLAG_STATUS_SENT)) == 0)) 10957 return (CTL_ACTION_OVERLAP_TAG); 10958 10959 /* 10960 * If we get a head of queue tag, SAM-3 says that we should 10961 * immediately execute it. 10962 * 10963 * What happens if this command would normally block for some other 10964 * reason? e.g. a request sense with a head of queue tag 10965 * immediately after a write. Normally that would block, but this 10966 * will result in its getting executed immediately... 10967 * 10968 * We currently return "pass" instead of "skip", so we'll end up 10969 * going through the rest of the queue to check for overlapped tags. 10970 * 10971 * XXX KDM check for other types of blockage first?? 10972 */ 10973 if (__predict_false(pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)) 10974 return (CTL_ACTION_PASS); 10975 10976 /* 10977 * Simple tags get blocked until all head of queue and ordered tags 10978 * ahead of them have completed. I'm lumping untagged commands in 10979 * with simple tags here. XXX KDM is that the right thing to do? 10980 */ 10981 if (__predict_false(ooa_io->scsiio.tag_type == CTL_TAG_ORDERED) || 10982 __predict_false(ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)) 10983 return (CTL_ACTION_BLOCK); 10984 10985 /* Unsupported command in OOA queue. */ 10986 if (__predict_false(ooa_io->scsiio.seridx == CTL_SERIDX_INVLD)) 10987 return (CTL_ACTION_PASS); 10988 10989 switch (serialize_row[ooa_io->scsiio.seridx]) { 10990 case CTL_SER_SEQ: 10991 if (lun->be_lun->serseq != CTL_LUN_SERSEQ_OFF) 10992 return (ctl_seq_check(ooa_io, pending_io)); 10993 /* FALLTHROUGH */ 10994 case CTL_SER_PASS: 10995 return (CTL_ACTION_PASS); 10996 case CTL_SER_EXTENTOPT: 10997 if ((lun->MODE_CTRL.queue_flags & SCP_QUEUE_ALG_MASK) == 10998 SCP_QUEUE_ALG_UNRESTRICTED) 10999 return (CTL_ACTION_PASS); 11000 /* FALLTHROUGH */ 11001 case CTL_SER_EXTENT: 11002 return (ctl_extent_check(ooa_io, pending_io, 11003 (lun->be_lun->serseq == CTL_LUN_SERSEQ_ON))); 11004 case CTL_SER_BLOCKOPT: 11005 if ((lun->MODE_CTRL.queue_flags & SCP_QUEUE_ALG_MASK) == 11006 SCP_QUEUE_ALG_UNRESTRICTED) 11007 return (CTL_ACTION_PASS); 11008 /* FALLTHROUGH */ 11009 case CTL_SER_BLOCK: 11010 return (CTL_ACTION_BLOCK); 11011 default: 11012 __assert_unreachable(); 11013 } 11014 } 11015 11016 /* 11017 * Check for blockage or overlaps against the OOA (Order Of Arrival) queue. 11018 * Assumptions: 11019 * - pending_io is generally either incoming, or on the blocked queue 11020 * - starting I/O is the I/O we want to start the check with. 11021 */ 11022 static ctl_action 11023 ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 11024 union ctl_io **starting_io) 11025 { 11026 union ctl_io *ooa_io = *starting_io; 11027 const uint8_t *serialize_row; 11028 ctl_action action; 11029 11030 mtx_assert(&lun->lun_lock, MA_OWNED); 11031 11032 /* 11033 * Aborted commands are not going to be executed and may even 11034 * not report completion, so we don't care about their order. 11035 * Let them complete ASAP to clean the OOA queue. 11036 */ 11037 if (__predict_false(pending_io->io_hdr.flags & CTL_FLAG_ABORT)) 11038 return (CTL_ACTION_SKIP); 11039 11040 /* 11041 * Ordered tags have to block until all items ahead of them have 11042 * completed. If we get called with an ordered tag, we always 11043 * block, if something else is ahead of us in the queue. 11044 */ 11045 if ((pending_io->scsiio.tag_type == CTL_TAG_ORDERED) && 11046 (ooa_io != NULL)) 11047 return (CTL_ACTION_BLOCK); 11048 11049 serialize_row = ctl_serialize_table[pending_io->scsiio.seridx]; 11050 11051 /* 11052 * Run back along the OOA queue, starting with the current 11053 * blocked I/O and going through every I/O before it on the 11054 * queue. If starting_io is NULL, we'll just end up returning 11055 * CTL_ACTION_PASS. 11056 */ 11057 for (; ooa_io != NULL; 11058 ooa_io = (union ctl_io *)LIST_NEXT(&ooa_io->io_hdr, ooa_links)) { 11059 action = ctl_check_for_blockage(lun, pending_io, serialize_row, 11060 ooa_io); 11061 if (action != CTL_ACTION_PASS) { 11062 *starting_io = ooa_io; 11063 return (action); 11064 } 11065 } 11066 11067 *starting_io = NULL; 11068 return (CTL_ACTION_PASS); 11069 } 11070 11071 /* 11072 * Try to unblock the specified I/O. 11073 * 11074 * skip parameter allows explicitly skip present blocker of the I/O, 11075 * starting from the previous one on OOA queue. It can be used when 11076 * we know for sure that the blocker I/O does no longer count. 11077 */ 11078 static void 11079 ctl_try_unblock_io(struct ctl_lun *lun, union ctl_io *io, bool skip) 11080 { 11081 struct ctl_softc *softc = lun->ctl_softc; 11082 union ctl_io *bio, *obio; 11083 const struct ctl_cmd_entry *entry; 11084 union ctl_ha_msg msg_info; 11085 ctl_action action; 11086 11087 mtx_assert(&lun->lun_lock, MA_OWNED); 11088 11089 if (io->io_hdr.blocker == NULL) 11090 return; 11091 11092 obio = bio = io->io_hdr.blocker; 11093 if (skip) 11094 bio = (union ctl_io *)LIST_NEXT(&bio->io_hdr, ooa_links); 11095 action = ctl_check_ooa(lun, io, &bio); 11096 if (action == CTL_ACTION_BLOCK) { 11097 /* Still blocked, but may be by different I/O now. */ 11098 if (bio != obio) { 11099 TAILQ_REMOVE(&obio->io_hdr.blocked_queue, 11100 &io->io_hdr, blocked_links); 11101 TAILQ_INSERT_TAIL(&bio->io_hdr.blocked_queue, 11102 &io->io_hdr, blocked_links); 11103 io->io_hdr.blocker = bio; 11104 } 11105 return; 11106 } 11107 11108 /* No longer blocked, one way or another. */ 11109 TAILQ_REMOVE(&obio->io_hdr.blocked_queue, &io->io_hdr, blocked_links); 11110 io->io_hdr.blocker = NULL; 11111 11112 switch (action) { 11113 case CTL_ACTION_PASS: 11114 case CTL_ACTION_SKIP: 11115 11116 /* Serializing commands from the other SC retire there. */ 11117 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) && 11118 (softc->ha_mode != CTL_HA_MODE_XFER)) { 11119 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 11120 msg_info.hdr.original_sc = io->io_hdr.remote_io; 11121 msg_info.hdr.serializing_sc = io; 11122 msg_info.hdr.msg_type = CTL_MSG_R2R; 11123 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11124 sizeof(msg_info.hdr), M_NOWAIT); 11125 break; 11126 } 11127 11128 /* 11129 * Check this I/O for LUN state changes that may have happened 11130 * while this command was blocked. The LUN state may have been 11131 * changed by a command ahead of us in the queue. 11132 */ 11133 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 11134 if (ctl_scsiio_lun_check(lun, entry, &io->scsiio) != 0) { 11135 ctl_done(io); 11136 break; 11137 } 11138 11139 io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11140 ctl_enqueue_rtr(io); 11141 break; 11142 default: 11143 __assert_unreachable(); 11144 case CTL_ACTION_OVERLAP: 11145 ctl_set_overlapped_cmd(&io->scsiio); 11146 goto error; 11147 case CTL_ACTION_OVERLAP_TAG: 11148 ctl_set_overlapped_tag(&io->scsiio, 11149 io->scsiio.tag_num & 0xff); 11150 error: 11151 /* Serializing commands from the other SC are done here. */ 11152 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) && 11153 (softc->ha_mode != CTL_HA_MODE_XFER)) { 11154 ctl_try_unblock_others(lun, io, TRUE); 11155 LIST_REMOVE(&io->io_hdr, ooa_links); 11156 11157 ctl_copy_sense_data_back(io, &msg_info); 11158 msg_info.hdr.original_sc = io->io_hdr.remote_io; 11159 msg_info.hdr.serializing_sc = NULL; 11160 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 11161 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11162 sizeof(msg_info.scsi), M_WAITOK); 11163 ctl_free_io(io); 11164 break; 11165 } 11166 11167 ctl_done(io); 11168 break; 11169 } 11170 } 11171 11172 /* 11173 * Try to unblock I/Os blocked by the specified I/O. 11174 * 11175 * skip parameter allows explicitly skip the specified I/O as blocker, 11176 * starting from the previous one on the OOA queue. It can be used when 11177 * we know for sure that the specified I/O does no longer count (done). 11178 * It has to be still on OOA queue though so that we know where to start. 11179 */ 11180 static void 11181 ctl_try_unblock_others(struct ctl_lun *lun, union ctl_io *bio, bool skip) 11182 { 11183 union ctl_io *io, *next_io; 11184 11185 mtx_assert(&lun->lun_lock, MA_OWNED); 11186 11187 for (io = (union ctl_io *)TAILQ_FIRST(&bio->io_hdr.blocked_queue); 11188 io != NULL; io = next_io) { 11189 next_io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, blocked_links); 11190 11191 KASSERT(io->io_hdr.blocker != NULL, 11192 ("I/O %p on blocked list without blocker", io)); 11193 ctl_try_unblock_io(lun, io, skip); 11194 } 11195 KASSERT(!skip || TAILQ_EMPTY(&bio->io_hdr.blocked_queue), 11196 ("blocked_queue is not empty after skipping %p", bio)); 11197 } 11198 11199 /* 11200 * This routine (with one exception) checks LUN flags that can be set by 11201 * commands ahead of us in the OOA queue. These flags have to be checked 11202 * when a command initially comes in, and when we pull a command off the 11203 * blocked queue and are preparing to execute it. The reason we have to 11204 * check these flags for commands on the blocked queue is that the LUN 11205 * state may have been changed by a command ahead of us while we're on the 11206 * blocked queue. 11207 * 11208 * Ordering is somewhat important with these checks, so please pay 11209 * careful attention to the placement of any new checks. 11210 */ 11211 static int 11212 ctl_scsiio_lun_check(struct ctl_lun *lun, 11213 const struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio) 11214 { 11215 struct ctl_softc *softc = lun->ctl_softc; 11216 int retval; 11217 uint32_t residx; 11218 11219 retval = 0; 11220 11221 mtx_assert(&lun->lun_lock, MA_OWNED); 11222 11223 /* 11224 * If this shelf is a secondary shelf controller, we may have to 11225 * reject some commands disallowed by HA mode and link state. 11226 */ 11227 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) { 11228 if (softc->ha_link == CTL_HA_LINK_OFFLINE && 11229 (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { 11230 ctl_set_lun_unavail(ctsio); 11231 retval = 1; 11232 goto bailout; 11233 } 11234 if ((lun->flags & CTL_LUN_PEER_SC_PRIMARY) == 0 && 11235 (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { 11236 ctl_set_lun_transit(ctsio); 11237 retval = 1; 11238 goto bailout; 11239 } 11240 if (softc->ha_mode == CTL_HA_MODE_ACT_STBY && 11241 (entry->flags & CTL_CMD_FLAG_OK_ON_STANDBY) == 0) { 11242 ctl_set_lun_standby(ctsio); 11243 retval = 1; 11244 goto bailout; 11245 } 11246 11247 /* The rest of checks are only done on executing side */ 11248 if (softc->ha_mode == CTL_HA_MODE_XFER) 11249 goto bailout; 11250 } 11251 11252 if (entry->pattern & CTL_LUN_PAT_WRITE) { 11253 if (lun->be_lun->flags & CTL_LUN_FLAG_READONLY) { 11254 ctl_set_hw_write_protected(ctsio); 11255 retval = 1; 11256 goto bailout; 11257 } 11258 if ((lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) { 11259 ctl_set_sense(ctsio, /*current_error*/ 1, 11260 /*sense_key*/ SSD_KEY_DATA_PROTECT, 11261 /*asc*/ 0x27, /*ascq*/ 0x02, SSD_ELEM_NONE); 11262 retval = 1; 11263 goto bailout; 11264 } 11265 } 11266 11267 /* 11268 * Check for a reservation conflict. If this command isn't allowed 11269 * even on reserved LUNs, and if this initiator isn't the one who 11270 * reserved us, reject the command with a reservation conflict. 11271 */ 11272 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 11273 if ((lun->flags & CTL_LUN_RESERVED) 11274 && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) { 11275 if (lun->res_idx != residx) { 11276 ctl_set_reservation_conflict(ctsio); 11277 retval = 1; 11278 goto bailout; 11279 } 11280 } 11281 11282 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0 || 11283 (entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV)) { 11284 /* No reservation or command is allowed. */; 11285 } else if ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_WRESV) && 11286 (lun->pr_res_type == SPR_TYPE_WR_EX || 11287 lun->pr_res_type == SPR_TYPE_WR_EX_RO || 11288 lun->pr_res_type == SPR_TYPE_WR_EX_AR)) { 11289 /* The command is allowed for Write Exclusive resv. */; 11290 } else { 11291 /* 11292 * if we aren't registered or it's a res holder type 11293 * reservation and this isn't the res holder then set a 11294 * conflict. 11295 */ 11296 if (ctl_get_prkey(lun, residx) == 0 || 11297 (residx != lun->pr_res_idx && lun->pr_res_type < 4)) { 11298 ctl_set_reservation_conflict(ctsio); 11299 retval = 1; 11300 goto bailout; 11301 } 11302 } 11303 11304 if ((entry->flags & CTL_CMD_FLAG_OK_ON_NO_MEDIA) == 0) { 11305 if (lun->flags & CTL_LUN_EJECTED) 11306 ctl_set_lun_ejected(ctsio); 11307 else if (lun->flags & CTL_LUN_NO_MEDIA) { 11308 if (lun->flags & CTL_LUN_REMOVABLE) 11309 ctl_set_lun_no_media(ctsio); 11310 else 11311 ctl_set_lun_int_reqd(ctsio); 11312 } else if (lun->flags & CTL_LUN_STOPPED) 11313 ctl_set_lun_stopped(ctsio); 11314 else 11315 goto bailout; 11316 retval = 1; 11317 goto bailout; 11318 } 11319 11320 bailout: 11321 return (retval); 11322 } 11323 11324 static void 11325 ctl_failover_io(union ctl_io *io, int have_lock) 11326 { 11327 ctl_set_busy(&io->scsiio); 11328 ctl_done(io); 11329 } 11330 11331 static void 11332 ctl_failover_lun(union ctl_io *rio) 11333 { 11334 struct ctl_softc *softc = CTL_SOFTC(rio); 11335 struct ctl_lun *lun; 11336 struct ctl_io_hdr *io, *next_io; 11337 uint32_t targ_lun; 11338 11339 targ_lun = rio->io_hdr.nexus.targ_mapped_lun; 11340 CTL_DEBUG_PRINT(("FAILOVER for lun %u\n", targ_lun)); 11341 11342 /* Find and lock the LUN. */ 11343 mtx_lock(&softc->ctl_lock); 11344 if (targ_lun > ctl_max_luns || 11345 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11346 mtx_unlock(&softc->ctl_lock); 11347 return; 11348 } 11349 mtx_lock(&lun->lun_lock); 11350 mtx_unlock(&softc->ctl_lock); 11351 if (lun->flags & CTL_LUN_DISABLED) { 11352 mtx_unlock(&lun->lun_lock); 11353 return; 11354 } 11355 11356 if (softc->ha_mode == CTL_HA_MODE_XFER) { 11357 LIST_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { 11358 /* We are master */ 11359 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 11360 if (io->flags & CTL_FLAG_IO_ACTIVE) { 11361 io->flags |= CTL_FLAG_ABORT | 11362 CTL_FLAG_FAILOVER; 11363 ctl_try_unblock_io(lun, 11364 (union ctl_io *)io, FALSE); 11365 } else { /* This can be only due to DATAMOVE */ 11366 io->msg_type = CTL_MSG_DATAMOVE_DONE; 11367 io->flags &= ~CTL_FLAG_DMA_INPROG; 11368 io->flags |= CTL_FLAG_IO_ACTIVE; 11369 io->port_status = 31340; 11370 ctl_enqueue_isc((union ctl_io *)io); 11371 } 11372 } else 11373 /* We are slave */ 11374 if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { 11375 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; 11376 if (io->flags & CTL_FLAG_IO_ACTIVE) { 11377 io->flags |= CTL_FLAG_FAILOVER; 11378 } else { 11379 ctl_set_busy(&((union ctl_io *)io)-> 11380 scsiio); 11381 ctl_done((union ctl_io *)io); 11382 } 11383 } 11384 } 11385 } else { /* SERIALIZE modes */ 11386 LIST_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { 11387 /* We are master */ 11388 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 11389 if (io->blocker != NULL) { 11390 TAILQ_REMOVE(&io->blocker->io_hdr.blocked_queue, 11391 io, blocked_links); 11392 io->blocker = NULL; 11393 } 11394 ctl_try_unblock_others(lun, (union ctl_io *)io, 11395 TRUE); 11396 LIST_REMOVE(io, ooa_links); 11397 ctl_free_io((union ctl_io *)io); 11398 } else 11399 /* We are slave */ 11400 if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { 11401 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; 11402 if (!(io->flags & CTL_FLAG_IO_ACTIVE)) { 11403 ctl_set_busy(&((union ctl_io *)io)-> 11404 scsiio); 11405 ctl_done((union ctl_io *)io); 11406 } 11407 } 11408 } 11409 } 11410 mtx_unlock(&lun->lun_lock); 11411 } 11412 11413 static void 11414 ctl_scsiio_precheck(struct ctl_scsiio *ctsio) 11415 { 11416 struct ctl_softc *softc = CTL_SOFTC(ctsio); 11417 struct ctl_lun *lun; 11418 const struct ctl_cmd_entry *entry; 11419 union ctl_io *bio; 11420 uint32_t initidx, targ_lun; 11421 11422 lun = NULL; 11423 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 11424 if (targ_lun < ctl_max_luns) 11425 lun = softc->ctl_luns[targ_lun]; 11426 if (lun) { 11427 /* 11428 * If the LUN is invalid, pretend that it doesn't exist. 11429 * It will go away as soon as all pending I/O has been 11430 * completed. 11431 */ 11432 mtx_lock(&lun->lun_lock); 11433 if (lun->flags & CTL_LUN_DISABLED) { 11434 mtx_unlock(&lun->lun_lock); 11435 lun = NULL; 11436 } 11437 } 11438 CTL_LUN(ctsio) = lun; 11439 if (lun) { 11440 CTL_BACKEND_LUN(ctsio) = lun->be_lun; 11441 11442 /* 11443 * Every I/O goes into the OOA queue for a particular LUN, 11444 * and stays there until completion. 11445 */ 11446 #ifdef CTL_TIME_IO 11447 if (LIST_EMPTY(&lun->ooa_queue)) 11448 lun->idle_time += getsbinuptime() - lun->last_busy; 11449 #endif 11450 LIST_INSERT_HEAD(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 11451 } 11452 11453 /* Get command entry and return error if it is unsuppotyed. */ 11454 entry = ctl_validate_command(ctsio); 11455 if (entry == NULL) { 11456 if (lun) 11457 mtx_unlock(&lun->lun_lock); 11458 return; 11459 } 11460 11461 ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 11462 ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK; 11463 11464 /* 11465 * Check to see whether we can send this command to LUNs that don't 11466 * exist. This should pretty much only be the case for inquiry 11467 * and request sense. Further checks, below, really require having 11468 * a LUN, so we can't really check the command anymore. Just put 11469 * it on the rtr queue. 11470 */ 11471 if (lun == NULL) { 11472 if (entry->flags & CTL_CMD_FLAG_OK_ON_NO_LUN) { 11473 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11474 ctl_enqueue_rtr((union ctl_io *)ctsio); 11475 return; 11476 } 11477 11478 ctl_set_unsupported_lun(ctsio); 11479 ctl_done((union ctl_io *)ctsio); 11480 CTL_DEBUG_PRINT(("ctl_scsiio_precheck: bailing out due to invalid LUN\n")); 11481 return; 11482 } else { 11483 /* 11484 * Make sure we support this particular command on this LUN. 11485 * e.g., we don't support writes to the control LUN. 11486 */ 11487 if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 11488 mtx_unlock(&lun->lun_lock); 11489 ctl_set_invalid_opcode(ctsio); 11490 ctl_done((union ctl_io *)ctsio); 11491 return; 11492 } 11493 } 11494 11495 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 11496 11497 /* 11498 * If we've got a request sense, it'll clear the contingent 11499 * allegiance condition. Otherwise, if we have a CA condition for 11500 * this initiator, clear it, because it sent down a command other 11501 * than request sense. 11502 */ 11503 if (ctsio->cdb[0] != REQUEST_SENSE) { 11504 struct scsi_sense_data *ps; 11505 11506 ps = lun->pending_sense[initidx / CTL_MAX_INIT_PER_PORT]; 11507 if (ps != NULL) 11508 ps[initidx % CTL_MAX_INIT_PER_PORT].error_code = 0; 11509 } 11510 11511 /* 11512 * If the command has this flag set, it handles its own unit 11513 * attention reporting, we shouldn't do anything. Otherwise we 11514 * check for any pending unit attentions, and send them back to the 11515 * initiator. We only do this when a command initially comes in, 11516 * not when we pull it off the blocked queue. 11517 * 11518 * According to SAM-3, section 5.3.2, the order that things get 11519 * presented back to the host is basically unit attentions caused 11520 * by some sort of reset event, busy status, reservation conflicts 11521 * or task set full, and finally any other status. 11522 * 11523 * One issue here is that some of the unit attentions we report 11524 * don't fall into the "reset" category (e.g. "reported luns data 11525 * has changed"). So reporting it here, before the reservation 11526 * check, may be technically wrong. I guess the only thing to do 11527 * would be to check for and report the reset events here, and then 11528 * check for the other unit attention types after we check for a 11529 * reservation conflict. 11530 * 11531 * XXX KDM need to fix this 11532 */ 11533 if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) { 11534 ctl_ua_type ua_type; 11535 u_int sense_len = 0; 11536 11537 ua_type = ctl_build_ua(lun, initidx, &ctsio->sense_data, 11538 &sense_len, SSD_TYPE_NONE); 11539 if (ua_type != CTL_UA_NONE) { 11540 mtx_unlock(&lun->lun_lock); 11541 ctsio->scsi_status = SCSI_STATUS_CHECK_COND; 11542 ctsio->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 11543 ctsio->sense_len = sense_len; 11544 ctl_done((union ctl_io *)ctsio); 11545 return; 11546 } 11547 } 11548 11549 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { 11550 mtx_unlock(&lun->lun_lock); 11551 ctl_done((union ctl_io *)ctsio); 11552 return; 11553 } 11554 11555 /* 11556 * XXX CHD this is where we want to send IO to other side if 11557 * this LUN is secondary on this SC. We will need to make a copy 11558 * of the IO and flag the IO on this side as SENT_2OTHER and the flag 11559 * the copy we send as FROM_OTHER. 11560 * We also need to stuff the address of the original IO so we can 11561 * find it easily. Something similar will need be done on the other 11562 * side so when we are done we can find the copy. 11563 */ 11564 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 11565 (lun->flags & CTL_LUN_PEER_SC_PRIMARY) != 0 && 11566 (entry->flags & CTL_CMD_FLAG_RUN_HERE) == 0) { 11567 union ctl_ha_msg msg_info; 11568 int isc_retval; 11569 11570 ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; 11571 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 11572 mtx_unlock(&lun->lun_lock); 11573 11574 msg_info.hdr.msg_type = CTL_MSG_SERIALIZE; 11575 msg_info.hdr.original_sc = (union ctl_io *)ctsio; 11576 msg_info.hdr.serializing_sc = NULL; 11577 msg_info.hdr.nexus = ctsio->io_hdr.nexus; 11578 msg_info.scsi.tag_num = ctsio->tag_num; 11579 msg_info.scsi.tag_type = ctsio->tag_type; 11580 memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN); 11581 msg_info.scsi.cdb_len = ctsio->cdb_len; 11582 msg_info.scsi.priority = ctsio->priority; 11583 11584 if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11585 sizeof(msg_info.scsi) - sizeof(msg_info.scsi.sense_data), 11586 M_WAITOK)) > CTL_HA_STATUS_SUCCESS) { 11587 ctl_set_busy(ctsio); 11588 ctl_done((union ctl_io *)ctsio); 11589 return; 11590 } 11591 return; 11592 } 11593 11594 bio = (union ctl_io *)LIST_NEXT(&ctsio->io_hdr, ooa_links); 11595 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, &bio)) { 11596 case CTL_ACTION_PASS: 11597 case CTL_ACTION_SKIP: 11598 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11599 mtx_unlock(&lun->lun_lock); 11600 ctl_enqueue_rtr((union ctl_io *)ctsio); 11601 break; 11602 case CTL_ACTION_BLOCK: 11603 ctsio->io_hdr.blocker = bio; 11604 TAILQ_INSERT_TAIL(&bio->io_hdr.blocked_queue, &ctsio->io_hdr, 11605 blocked_links); 11606 mtx_unlock(&lun->lun_lock); 11607 break; 11608 case CTL_ACTION_OVERLAP: 11609 mtx_unlock(&lun->lun_lock); 11610 ctl_set_overlapped_cmd(ctsio); 11611 ctl_done((union ctl_io *)ctsio); 11612 break; 11613 case CTL_ACTION_OVERLAP_TAG: 11614 mtx_unlock(&lun->lun_lock); 11615 ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff); 11616 ctl_done((union ctl_io *)ctsio); 11617 break; 11618 default: 11619 __assert_unreachable(); 11620 } 11621 } 11622 11623 const struct ctl_cmd_entry * 11624 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa) 11625 { 11626 const struct ctl_cmd_entry *entry; 11627 int service_action; 11628 11629 entry = &ctl_cmd_table[ctsio->cdb[0]]; 11630 if (sa) 11631 *sa = ((entry->flags & CTL_CMD_FLAG_SA5) != 0); 11632 if (entry->flags & CTL_CMD_FLAG_SA5) { 11633 service_action = ctsio->cdb[1] & SERVICE_ACTION_MASK; 11634 entry = &((const struct ctl_cmd_entry *) 11635 entry->execute)[service_action]; 11636 } 11637 return (entry); 11638 } 11639 11640 const struct ctl_cmd_entry * 11641 ctl_validate_command(struct ctl_scsiio *ctsio) 11642 { 11643 const struct ctl_cmd_entry *entry; 11644 int i, sa; 11645 uint8_t diff; 11646 11647 entry = ctl_get_cmd_entry(ctsio, &sa); 11648 ctsio->seridx = entry->seridx; 11649 if (entry->execute == NULL) { 11650 if (sa) 11651 ctl_set_invalid_field(ctsio, 11652 /*sks_valid*/ 1, 11653 /*command*/ 1, 11654 /*field*/ 1, 11655 /*bit_valid*/ 1, 11656 /*bit*/ 4); 11657 else 11658 ctl_set_invalid_opcode(ctsio); 11659 ctl_done((union ctl_io *)ctsio); 11660 return (NULL); 11661 } 11662 KASSERT(entry->length > 0, 11663 ("Not defined length for command 0x%02x/0x%02x", 11664 ctsio->cdb[0], ctsio->cdb[1])); 11665 for (i = 1; i < entry->length; i++) { 11666 diff = ctsio->cdb[i] & ~entry->usage[i - 1]; 11667 if (diff == 0) 11668 continue; 11669 ctl_set_invalid_field(ctsio, 11670 /*sks_valid*/ 1, 11671 /*command*/ 1, 11672 /*field*/ i, 11673 /*bit_valid*/ 1, 11674 /*bit*/ fls(diff) - 1); 11675 ctl_done((union ctl_io *)ctsio); 11676 return (NULL); 11677 } 11678 return (entry); 11679 } 11680 11681 static int 11682 ctl_cmd_applicable(uint8_t lun_type, const struct ctl_cmd_entry *entry) 11683 { 11684 11685 switch (lun_type) { 11686 case T_DIRECT: 11687 if ((entry->flags & CTL_CMD_FLAG_OK_ON_DIRECT) == 0) 11688 return (0); 11689 break; 11690 case T_PROCESSOR: 11691 if ((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0) 11692 return (0); 11693 break; 11694 case T_CDROM: 11695 if ((entry->flags & CTL_CMD_FLAG_OK_ON_CDROM) == 0) 11696 return (0); 11697 break; 11698 default: 11699 return (0); 11700 } 11701 return (1); 11702 } 11703 11704 static int 11705 ctl_scsiio(struct ctl_scsiio *ctsio) 11706 { 11707 int retval; 11708 const struct ctl_cmd_entry *entry; 11709 11710 retval = CTL_RETVAL_COMPLETE; 11711 11712 CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0])); 11713 11714 entry = ctl_get_cmd_entry(ctsio, NULL); 11715 11716 /* 11717 * If this I/O has been aborted, just send it straight to 11718 * ctl_done() without executing it. 11719 */ 11720 if (ctsio->io_hdr.flags & CTL_FLAG_ABORT) { 11721 ctl_done((union ctl_io *)ctsio); 11722 goto bailout; 11723 } 11724 11725 /* 11726 * All the checks should have been handled by ctl_scsiio_precheck(). 11727 * We should be clear now to just execute the I/O. 11728 */ 11729 retval = entry->execute(ctsio); 11730 11731 bailout: 11732 return (retval); 11733 } 11734 11735 static int 11736 ctl_target_reset(union ctl_io *io) 11737 { 11738 struct ctl_softc *softc = CTL_SOFTC(io); 11739 struct ctl_port *port = CTL_PORT(io); 11740 struct ctl_lun *lun; 11741 uint32_t initidx; 11742 ctl_ua_type ua_type; 11743 11744 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 11745 union ctl_ha_msg msg_info; 11746 11747 msg_info.hdr.nexus = io->io_hdr.nexus; 11748 msg_info.task.task_action = io->taskio.task_action; 11749 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11750 msg_info.hdr.original_sc = NULL; 11751 msg_info.hdr.serializing_sc = NULL; 11752 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11753 sizeof(msg_info.task), M_WAITOK); 11754 } 11755 11756 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11757 if (io->taskio.task_action == CTL_TASK_TARGET_RESET) 11758 ua_type = CTL_UA_TARG_RESET; 11759 else 11760 ua_type = CTL_UA_BUS_RESET; 11761 mtx_lock(&softc->ctl_lock); 11762 STAILQ_FOREACH(lun, &softc->lun_list, links) { 11763 if (port != NULL && 11764 ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 11765 continue; 11766 ctl_do_lun_reset(lun, initidx, ua_type); 11767 } 11768 mtx_unlock(&softc->ctl_lock); 11769 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11770 return (0); 11771 } 11772 11773 /* 11774 * The LUN should always be set. The I/O is optional, and is used to 11775 * distinguish between I/Os sent by this initiator, and by other 11776 * initiators. We set unit attention for initiators other than this one. 11777 * SAM-3 is vague on this point. It does say that a unit attention should 11778 * be established for other initiators when a LUN is reset (see section 11779 * 5.7.3), but it doesn't specifically say that the unit attention should 11780 * be established for this particular initiator when a LUN is reset. Here 11781 * is the relevant text, from SAM-3 rev 8: 11782 * 11783 * 5.7.2 When a SCSI initiator port aborts its own tasks 11784 * 11785 * When a SCSI initiator port causes its own task(s) to be aborted, no 11786 * notification that the task(s) have been aborted shall be returned to 11787 * the SCSI initiator port other than the completion response for the 11788 * command or task management function action that caused the task(s) to 11789 * be aborted and notification(s) associated with related effects of the 11790 * action (e.g., a reset unit attention condition). 11791 * 11792 * XXX KDM for now, we're setting unit attention for all initiators. 11793 */ 11794 static void 11795 ctl_do_lun_reset(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua_type) 11796 { 11797 struct ctl_io_hdr *xioh; 11798 int i; 11799 11800 mtx_lock(&lun->lun_lock); 11801 /* Abort tasks. */ 11802 LIST_FOREACH(xioh, &lun->ooa_queue, ooa_links) { 11803 xioh->flags |= CTL_FLAG_ABORT | CTL_FLAG_ABORT_STATUS; 11804 ctl_try_unblock_io(lun, (union ctl_io *)xioh, FALSE); 11805 } 11806 /* Clear CA. */ 11807 for (i = 0; i < ctl_max_ports; i++) { 11808 free(lun->pending_sense[i], M_CTL); 11809 lun->pending_sense[i] = NULL; 11810 } 11811 /* Clear reservation. */ 11812 lun->flags &= ~CTL_LUN_RESERVED; 11813 /* Clear prevent media removal. */ 11814 if (lun->prevent) { 11815 for (i = 0; i < CTL_MAX_INITIATORS; i++) 11816 ctl_clear_mask(lun->prevent, i); 11817 lun->prevent_count = 0; 11818 } 11819 /* Clear TPC status */ 11820 ctl_tpc_lun_clear(lun, -1); 11821 /* Establish UA. */ 11822 #if 0 11823 ctl_est_ua_all(lun, initidx, ua_type); 11824 #else 11825 ctl_est_ua_all(lun, -1, ua_type); 11826 #endif 11827 mtx_unlock(&lun->lun_lock); 11828 } 11829 11830 static int 11831 ctl_lun_reset(union ctl_io *io) 11832 { 11833 struct ctl_softc *softc = CTL_SOFTC(io); 11834 struct ctl_lun *lun; 11835 uint32_t targ_lun, initidx; 11836 11837 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11838 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11839 mtx_lock(&softc->ctl_lock); 11840 if (targ_lun >= ctl_max_luns || 11841 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11842 mtx_unlock(&softc->ctl_lock); 11843 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 11844 return (1); 11845 } 11846 ctl_do_lun_reset(lun, initidx, CTL_UA_LUN_RESET); 11847 mtx_unlock(&softc->ctl_lock); 11848 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11849 11850 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0) { 11851 union ctl_ha_msg msg_info; 11852 11853 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11854 msg_info.hdr.nexus = io->io_hdr.nexus; 11855 msg_info.task.task_action = CTL_TASK_LUN_RESET; 11856 msg_info.hdr.original_sc = NULL; 11857 msg_info.hdr.serializing_sc = NULL; 11858 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11859 sizeof(msg_info.task), M_WAITOK); 11860 } 11861 return (0); 11862 } 11863 11864 static void 11865 ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id, 11866 int other_sc) 11867 { 11868 struct ctl_io_hdr *xioh; 11869 11870 mtx_assert(&lun->lun_lock, MA_OWNED); 11871 11872 /* 11873 * Run through the OOA queue and attempt to find the given I/O. 11874 * The target port, initiator ID, tag type and tag number have to 11875 * match the values that we got from the initiator. If we have an 11876 * untagged command to abort, simply abort the first untagged command 11877 * we come to. We only allow one untagged command at a time of course. 11878 */ 11879 LIST_FOREACH(xioh, &lun->ooa_queue, ooa_links) { 11880 union ctl_io *xio = (union ctl_io *)xioh; 11881 if ((targ_port == UINT32_MAX || 11882 targ_port == xioh->nexus.targ_port) && 11883 (init_id == UINT32_MAX || 11884 init_id == xioh->nexus.initid)) { 11885 if (targ_port != xioh->nexus.targ_port || 11886 init_id != xioh->nexus.initid) 11887 xioh->flags |= CTL_FLAG_ABORT_STATUS; 11888 xioh->flags |= CTL_FLAG_ABORT; 11889 if (!other_sc && !(lun->flags & CTL_LUN_PRIMARY_SC)) { 11890 union ctl_ha_msg msg_info; 11891 11892 msg_info.hdr.nexus = xioh->nexus; 11893 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 11894 msg_info.task.tag_num = xio->scsiio.tag_num; 11895 msg_info.task.tag_type = xio->scsiio.tag_type; 11896 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11897 msg_info.hdr.original_sc = NULL; 11898 msg_info.hdr.serializing_sc = NULL; 11899 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11900 sizeof(msg_info.task), M_NOWAIT); 11901 } 11902 ctl_try_unblock_io(lun, xio, FALSE); 11903 } 11904 } 11905 } 11906 11907 static int 11908 ctl_abort_task_set(union ctl_io *io) 11909 { 11910 struct ctl_softc *softc = CTL_SOFTC(io); 11911 struct ctl_lun *lun; 11912 uint32_t targ_lun; 11913 11914 /* 11915 * Look up the LUN. 11916 */ 11917 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11918 mtx_lock(&softc->ctl_lock); 11919 if (targ_lun >= ctl_max_luns || 11920 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11921 mtx_unlock(&softc->ctl_lock); 11922 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 11923 return (1); 11924 } 11925 11926 mtx_lock(&lun->lun_lock); 11927 mtx_unlock(&softc->ctl_lock); 11928 if (io->taskio.task_action == CTL_TASK_ABORT_TASK_SET) { 11929 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, 11930 io->io_hdr.nexus.initid, 11931 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 11932 } else { /* CTL_TASK_CLEAR_TASK_SET */ 11933 ctl_abort_tasks_lun(lun, UINT32_MAX, UINT32_MAX, 11934 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 11935 } 11936 mtx_unlock(&lun->lun_lock); 11937 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11938 return (0); 11939 } 11940 11941 static void 11942 ctl_i_t_nexus_loss(struct ctl_softc *softc, uint32_t initidx, 11943 ctl_ua_type ua_type) 11944 { 11945 struct ctl_lun *lun; 11946 struct scsi_sense_data *ps; 11947 uint32_t p, i; 11948 11949 p = initidx / CTL_MAX_INIT_PER_PORT; 11950 i = initidx % CTL_MAX_INIT_PER_PORT; 11951 mtx_lock(&softc->ctl_lock); 11952 STAILQ_FOREACH(lun, &softc->lun_list, links) { 11953 mtx_lock(&lun->lun_lock); 11954 /* Abort tasks. */ 11955 ctl_abort_tasks_lun(lun, p, i, 1); 11956 /* Clear CA. */ 11957 ps = lun->pending_sense[p]; 11958 if (ps != NULL) 11959 ps[i].error_code = 0; 11960 /* Clear reservation. */ 11961 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == initidx)) 11962 lun->flags &= ~CTL_LUN_RESERVED; 11963 /* Clear prevent media removal. */ 11964 if (lun->prevent && ctl_is_set(lun->prevent, initidx)) { 11965 ctl_clear_mask(lun->prevent, initidx); 11966 lun->prevent_count--; 11967 } 11968 /* Clear TPC status */ 11969 ctl_tpc_lun_clear(lun, initidx); 11970 /* Establish UA. */ 11971 ctl_est_ua(lun, initidx, ua_type); 11972 mtx_unlock(&lun->lun_lock); 11973 } 11974 mtx_unlock(&softc->ctl_lock); 11975 } 11976 11977 static int 11978 ctl_i_t_nexus_reset(union ctl_io *io) 11979 { 11980 struct ctl_softc *softc = CTL_SOFTC(io); 11981 uint32_t initidx; 11982 11983 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 11984 union ctl_ha_msg msg_info; 11985 11986 msg_info.hdr.nexus = io->io_hdr.nexus; 11987 msg_info.task.task_action = CTL_TASK_I_T_NEXUS_RESET; 11988 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11989 msg_info.hdr.original_sc = NULL; 11990 msg_info.hdr.serializing_sc = NULL; 11991 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11992 sizeof(msg_info.task), M_WAITOK); 11993 } 11994 11995 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11996 ctl_i_t_nexus_loss(softc, initidx, CTL_UA_I_T_NEXUS_LOSS); 11997 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11998 return (0); 11999 } 12000 12001 static int 12002 ctl_abort_task(union ctl_io *io) 12003 { 12004 struct ctl_softc *softc = CTL_SOFTC(io); 12005 struct ctl_io_hdr *xioh; 12006 struct ctl_lun *lun; 12007 uint32_t targ_lun; 12008 12009 /* 12010 * Look up the LUN. 12011 */ 12012 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12013 mtx_lock(&softc->ctl_lock); 12014 if (targ_lun >= ctl_max_luns || 12015 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12016 mtx_unlock(&softc->ctl_lock); 12017 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 12018 return (1); 12019 } 12020 12021 mtx_lock(&lun->lun_lock); 12022 mtx_unlock(&softc->ctl_lock); 12023 /* 12024 * Run through the OOA queue and attempt to find the given I/O. 12025 * The target port, initiator ID, tag type and tag number have to 12026 * match the values that we got from the initiator. If we have an 12027 * untagged command to abort, simply abort the first untagged command 12028 * we come to. We only allow one untagged command at a time of course. 12029 */ 12030 LIST_FOREACH(xioh, &lun->ooa_queue, ooa_links) { 12031 union ctl_io *xio = (union ctl_io *)xioh; 12032 if ((xioh->nexus.targ_port != io->io_hdr.nexus.targ_port) 12033 || (xioh->nexus.initid != io->io_hdr.nexus.initid) 12034 || (xioh->flags & CTL_FLAG_ABORT)) 12035 continue; 12036 12037 /* 12038 * If the abort says that the task is untagged, the 12039 * task in the queue must be untagged. Otherwise, 12040 * we just check to see whether the tag numbers 12041 * match. This is because the QLogic firmware 12042 * doesn't pass back the tag type in an abort 12043 * request. 12044 */ 12045 #if 0 12046 if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED) 12047 && (io->taskio.tag_type == CTL_TAG_UNTAGGED)) 12048 || (xio->scsiio.tag_num == io->taskio.tag_num)) { 12049 #else 12050 /* 12051 * XXX KDM we've got problems with FC, because it 12052 * doesn't send down a tag type with aborts. So we 12053 * can only really go by the tag number... 12054 * This may cause problems with parallel SCSI. 12055 * Need to figure that out!! 12056 */ 12057 if (xio->scsiio.tag_num == io->taskio.tag_num) { 12058 #endif 12059 xioh->flags |= CTL_FLAG_ABORT; 12060 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0 && 12061 !(lun->flags & CTL_LUN_PRIMARY_SC)) { 12062 union ctl_ha_msg msg_info; 12063 12064 msg_info.hdr.nexus = io->io_hdr.nexus; 12065 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 12066 msg_info.task.tag_num = io->taskio.tag_num; 12067 msg_info.task.tag_type = io->taskio.tag_type; 12068 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 12069 msg_info.hdr.original_sc = NULL; 12070 msg_info.hdr.serializing_sc = NULL; 12071 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 12072 sizeof(msg_info.task), M_NOWAIT); 12073 } 12074 ctl_try_unblock_io(lun, xio, FALSE); 12075 } 12076 } 12077 mtx_unlock(&lun->lun_lock); 12078 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12079 return (0); 12080 } 12081 12082 static int 12083 ctl_query_task(union ctl_io *io, int task_set) 12084 { 12085 struct ctl_softc *softc = CTL_SOFTC(io); 12086 struct ctl_io_hdr *xioh; 12087 struct ctl_lun *lun; 12088 int found = 0; 12089 uint32_t targ_lun; 12090 12091 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12092 mtx_lock(&softc->ctl_lock); 12093 if (targ_lun >= ctl_max_luns || 12094 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12095 mtx_unlock(&softc->ctl_lock); 12096 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 12097 return (1); 12098 } 12099 mtx_lock(&lun->lun_lock); 12100 mtx_unlock(&softc->ctl_lock); 12101 LIST_FOREACH(xioh, &lun->ooa_queue, ooa_links) { 12102 union ctl_io *xio = (union ctl_io *)xioh; 12103 if ((xioh->nexus.targ_port != io->io_hdr.nexus.targ_port) 12104 || (xioh->nexus.initid != io->io_hdr.nexus.initid) 12105 || (xioh->flags & CTL_FLAG_ABORT)) 12106 continue; 12107 12108 if (task_set || xio->scsiio.tag_num == io->taskio.tag_num) { 12109 found = 1; 12110 break; 12111 } 12112 } 12113 mtx_unlock(&lun->lun_lock); 12114 if (found) 12115 io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED; 12116 else 12117 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12118 return (0); 12119 } 12120 12121 static int 12122 ctl_query_async_event(union ctl_io *io) 12123 { 12124 struct ctl_softc *softc = CTL_SOFTC(io); 12125 struct ctl_lun *lun; 12126 ctl_ua_type ua; 12127 uint32_t targ_lun, initidx; 12128 12129 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12130 mtx_lock(&softc->ctl_lock); 12131 if (targ_lun >= ctl_max_luns || 12132 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12133 mtx_unlock(&softc->ctl_lock); 12134 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 12135 return (1); 12136 } 12137 mtx_lock(&lun->lun_lock); 12138 mtx_unlock(&softc->ctl_lock); 12139 initidx = ctl_get_initindex(&io->io_hdr.nexus); 12140 ua = ctl_build_qae(lun, initidx, io->taskio.task_resp); 12141 mtx_unlock(&lun->lun_lock); 12142 if (ua != CTL_UA_NONE) 12143 io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED; 12144 else 12145 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12146 return (0); 12147 } 12148 12149 static void 12150 ctl_run_task(union ctl_io *io) 12151 { 12152 int retval = 1; 12153 12154 CTL_DEBUG_PRINT(("ctl_run_task\n")); 12155 KASSERT(io->io_hdr.io_type == CTL_IO_TASK, 12156 ("ctl_run_task: Unextected io_type %d\n", io->io_hdr.io_type)); 12157 io->taskio.task_status = CTL_TASK_FUNCTION_NOT_SUPPORTED; 12158 bzero(io->taskio.task_resp, sizeof(io->taskio.task_resp)); 12159 switch (io->taskio.task_action) { 12160 case CTL_TASK_ABORT_TASK: 12161 retval = ctl_abort_task(io); 12162 break; 12163 case CTL_TASK_ABORT_TASK_SET: 12164 case CTL_TASK_CLEAR_TASK_SET: 12165 retval = ctl_abort_task_set(io); 12166 break; 12167 case CTL_TASK_CLEAR_ACA: 12168 break; 12169 case CTL_TASK_I_T_NEXUS_RESET: 12170 retval = ctl_i_t_nexus_reset(io); 12171 break; 12172 case CTL_TASK_LUN_RESET: 12173 retval = ctl_lun_reset(io); 12174 break; 12175 case CTL_TASK_TARGET_RESET: 12176 case CTL_TASK_BUS_RESET: 12177 retval = ctl_target_reset(io); 12178 break; 12179 case CTL_TASK_PORT_LOGIN: 12180 break; 12181 case CTL_TASK_PORT_LOGOUT: 12182 break; 12183 case CTL_TASK_QUERY_TASK: 12184 retval = ctl_query_task(io, 0); 12185 break; 12186 case CTL_TASK_QUERY_TASK_SET: 12187 retval = ctl_query_task(io, 1); 12188 break; 12189 case CTL_TASK_QUERY_ASYNC_EVENT: 12190 retval = ctl_query_async_event(io); 12191 break; 12192 default: 12193 printf("%s: got unknown task management event %d\n", 12194 __func__, io->taskio.task_action); 12195 break; 12196 } 12197 if (retval == 0) 12198 io->io_hdr.status = CTL_SUCCESS; 12199 else 12200 io->io_hdr.status = CTL_ERROR; 12201 ctl_done(io); 12202 } 12203 12204 /* 12205 * For HA operation. Handle commands that come in from the other 12206 * controller. 12207 */ 12208 static void 12209 ctl_handle_isc(union ctl_io *io) 12210 { 12211 struct ctl_softc *softc = CTL_SOFTC(io); 12212 struct ctl_lun *lun; 12213 const struct ctl_cmd_entry *entry; 12214 uint32_t targ_lun; 12215 12216 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12217 switch (io->io_hdr.msg_type) { 12218 case CTL_MSG_SERIALIZE: 12219 ctl_serialize_other_sc_cmd(&io->scsiio); 12220 break; 12221 case CTL_MSG_R2R: /* Only used in SER_ONLY mode. */ 12222 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 12223 if (targ_lun >= ctl_max_luns || 12224 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12225 ctl_done(io); 12226 break; 12227 } 12228 mtx_lock(&lun->lun_lock); 12229 if (ctl_scsiio_lun_check(lun, entry, &io->scsiio) != 0) { 12230 mtx_unlock(&lun->lun_lock); 12231 ctl_done(io); 12232 break; 12233 } 12234 io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 12235 mtx_unlock(&lun->lun_lock); 12236 ctl_enqueue_rtr(io); 12237 break; 12238 case CTL_MSG_FINISH_IO: 12239 if (softc->ha_mode == CTL_HA_MODE_XFER) { 12240 ctl_done(io); 12241 break; 12242 } 12243 if (targ_lun >= ctl_max_luns || 12244 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12245 ctl_free_io(io); 12246 break; 12247 } 12248 mtx_lock(&lun->lun_lock); 12249 ctl_try_unblock_others(lun, io, TRUE); 12250 LIST_REMOVE(&io->io_hdr, ooa_links); 12251 mtx_unlock(&lun->lun_lock); 12252 ctl_free_io(io); 12253 break; 12254 case CTL_MSG_PERS_ACTION: 12255 ctl_hndl_per_res_out_on_other_sc(io); 12256 ctl_free_io(io); 12257 break; 12258 case CTL_MSG_BAD_JUJU: 12259 ctl_done(io); 12260 break; 12261 case CTL_MSG_DATAMOVE: /* Only used in XFER mode */ 12262 ctl_datamove_remote(io); 12263 break; 12264 case CTL_MSG_DATAMOVE_DONE: /* Only used in XFER mode */ 12265 ctl_datamove_done(io, false); 12266 break; 12267 case CTL_MSG_FAILOVER: 12268 ctl_failover_lun(io); 12269 ctl_free_io(io); 12270 break; 12271 default: 12272 printf("%s: Invalid message type %d\n", 12273 __func__, io->io_hdr.msg_type); 12274 ctl_free_io(io); 12275 break; 12276 } 12277 12278 } 12279 12280 /* 12281 * Returns the match type in the case of a match, or CTL_LUN_PAT_NONE if 12282 * there is no match. 12283 */ 12284 static ctl_lun_error_pattern 12285 ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc) 12286 { 12287 const struct ctl_cmd_entry *entry; 12288 ctl_lun_error_pattern filtered_pattern, pattern; 12289 12290 pattern = desc->error_pattern; 12291 12292 /* 12293 * XXX KDM we need more data passed into this function to match a 12294 * custom pattern, and we actually need to implement custom pattern 12295 * matching. 12296 */ 12297 if (pattern & CTL_LUN_PAT_CMD) 12298 return (CTL_LUN_PAT_CMD); 12299 12300 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY) 12301 return (CTL_LUN_PAT_ANY); 12302 12303 entry = ctl_get_cmd_entry(ctsio, NULL); 12304 12305 filtered_pattern = entry->pattern & pattern; 12306 12307 /* 12308 * If the user requested specific flags in the pattern (e.g. 12309 * CTL_LUN_PAT_RANGE), make sure the command supports all of those 12310 * flags. 12311 * 12312 * If the user did not specify any flags, it doesn't matter whether 12313 * or not the command supports the flags. 12314 */ 12315 if ((filtered_pattern & ~CTL_LUN_PAT_MASK) != 12316 (pattern & ~CTL_LUN_PAT_MASK)) 12317 return (CTL_LUN_PAT_NONE); 12318 12319 /* 12320 * If the user asked for a range check, see if the requested LBA 12321 * range overlaps with this command's LBA range. 12322 */ 12323 if (filtered_pattern & CTL_LUN_PAT_RANGE) { 12324 uint64_t lba1; 12325 uint64_t len1; 12326 ctl_action action; 12327 int retval; 12328 12329 retval = ctl_get_lba_len((union ctl_io *)ctsio, &lba1, &len1); 12330 if (retval != 0) 12331 return (CTL_LUN_PAT_NONE); 12332 12333 action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba, 12334 desc->lba_range.len, FALSE); 12335 /* 12336 * A "pass" means that the LBA ranges don't overlap, so 12337 * this doesn't match the user's range criteria. 12338 */ 12339 if (action == CTL_ACTION_PASS) 12340 return (CTL_LUN_PAT_NONE); 12341 } 12342 12343 return (filtered_pattern); 12344 } 12345 12346 static void 12347 ctl_inject_error(struct ctl_lun *lun, union ctl_io *io) 12348 { 12349 struct ctl_error_desc *desc, *desc2; 12350 12351 mtx_assert(&lun->lun_lock, MA_OWNED); 12352 12353 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 12354 ctl_lun_error_pattern pattern; 12355 /* 12356 * Check to see whether this particular command matches 12357 * the pattern in the descriptor. 12358 */ 12359 pattern = ctl_cmd_pattern_match(&io->scsiio, desc); 12360 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_NONE) 12361 continue; 12362 12363 switch (desc->lun_error & CTL_LUN_INJ_TYPE) { 12364 case CTL_LUN_INJ_ABORTED: 12365 ctl_set_aborted(&io->scsiio); 12366 break; 12367 case CTL_LUN_INJ_MEDIUM_ERR: 12368 ctl_set_medium_error(&io->scsiio, 12369 (io->io_hdr.flags & CTL_FLAG_DATA_MASK) != 12370 CTL_FLAG_DATA_OUT); 12371 break; 12372 case CTL_LUN_INJ_UA: 12373 /* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET 12374 * OCCURRED */ 12375 ctl_set_ua(&io->scsiio, 0x29, 0x00); 12376 break; 12377 case CTL_LUN_INJ_CUSTOM: 12378 /* 12379 * We're assuming the user knows what he is doing. 12380 * Just copy the sense information without doing 12381 * checks. 12382 */ 12383 bcopy(&desc->custom_sense, &io->scsiio.sense_data, 12384 MIN(sizeof(desc->custom_sense), 12385 sizeof(io->scsiio.sense_data))); 12386 io->scsiio.scsi_status = SCSI_STATUS_CHECK_COND; 12387 io->scsiio.sense_len = SSD_FULL_SIZE; 12388 io->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 12389 break; 12390 case CTL_LUN_INJ_NONE: 12391 default: 12392 /* 12393 * If this is an error injection type we don't know 12394 * about, clear the continuous flag (if it is set) 12395 * so it will get deleted below. 12396 */ 12397 desc->lun_error &= ~CTL_LUN_INJ_CONTINUOUS; 12398 break; 12399 } 12400 /* 12401 * By default, each error injection action is a one-shot 12402 */ 12403 if (desc->lun_error & CTL_LUN_INJ_CONTINUOUS) 12404 continue; 12405 12406 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links); 12407 12408 free(desc, M_CTL); 12409 } 12410 } 12411 12412 #ifdef CTL_IO_DELAY 12413 static void 12414 ctl_datamove_timer_wakeup(void *arg) 12415 { 12416 union ctl_io *io; 12417 12418 io = (union ctl_io *)arg; 12419 12420 ctl_datamove(io); 12421 } 12422 #endif /* CTL_IO_DELAY */ 12423 12424 static void 12425 ctl_datamove_done_process(union ctl_io *io) 12426 { 12427 #ifdef CTL_TIME_IO 12428 struct bintime cur_bt; 12429 #endif 12430 12431 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, 12432 ("%s: unexpected I/O type %x", __func__, io->io_hdr.io_type)); 12433 12434 #ifdef CTL_TIME_IO 12435 getbinuptime(&cur_bt); 12436 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); 12437 bintime_add(&io->io_hdr.dma_bt, &cur_bt); 12438 #endif 12439 io->io_hdr.num_dmas++; 12440 12441 if ((io->io_hdr.port_status != 0) && 12442 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 12443 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 12444 ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, 12445 /*retry_count*/ io->io_hdr.port_status); 12446 } else if (io->scsiio.kern_data_resid != 0 && 12447 (io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT && 12448 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 12449 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 12450 ctl_set_invalid_field_ciu(&io->scsiio); 12451 } else if (ctl_debug & CTL_DEBUG_CDB_DATA) 12452 ctl_data_print(io); 12453 } 12454 12455 void 12456 ctl_datamove_done(union ctl_io *io, bool samethr) 12457 { 12458 12459 ctl_datamove_done_process(io); 12460 io->scsiio.be_move_done(io, samethr); 12461 } 12462 12463 void 12464 ctl_datamove(union ctl_io *io) 12465 { 12466 void (*fe_datamove)(union ctl_io *io); 12467 12468 mtx_assert(&((struct ctl_softc *)CTL_SOFTC(io))->ctl_lock, MA_NOTOWNED); 12469 12470 CTL_DEBUG_PRINT(("ctl_datamove\n")); 12471 12472 /* No data transferred yet. Frontend must update this when done. */ 12473 io->scsiio.kern_data_resid = io->scsiio.kern_data_len; 12474 12475 #ifdef CTL_TIME_IO 12476 getbinuptime(&io->io_hdr.dma_start_bt); 12477 #endif /* CTL_TIME_IO */ 12478 12479 #ifdef CTL_IO_DELAY 12480 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 12481 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 12482 } else { 12483 struct ctl_lun *lun; 12484 12485 lun = CTL_LUN(io); 12486 if ((lun != NULL) 12487 && (lun->delay_info.datamove_delay > 0)) { 12488 callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1); 12489 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 12490 callout_reset(&io->io_hdr.delay_callout, 12491 lun->delay_info.datamove_delay * hz, 12492 ctl_datamove_timer_wakeup, io); 12493 if (lun->delay_info.datamove_type == 12494 CTL_DELAY_TYPE_ONESHOT) 12495 lun->delay_info.datamove_delay = 0; 12496 return; 12497 } 12498 } 12499 #endif 12500 12501 /* 12502 * This command has been aborted. Set the port status, so we fail 12503 * the data move. 12504 */ 12505 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12506 printf("ctl_datamove: tag 0x%04x on (%u:%u:%u) aborted\n", 12507 io->scsiio.tag_num, io->io_hdr.nexus.initid, 12508 io->io_hdr.nexus.targ_port, 12509 io->io_hdr.nexus.targ_lun); 12510 io->io_hdr.port_status = 31337; 12511 ctl_datamove_done_process(io); 12512 io->scsiio.be_move_done(io, true); 12513 return; 12514 } 12515 12516 /* Don't confuse frontend with zero length data move. */ 12517 if (io->scsiio.kern_data_len == 0) { 12518 ctl_datamove_done_process(io); 12519 io->scsiio.be_move_done(io, true); 12520 return; 12521 } 12522 12523 fe_datamove = CTL_PORT(io)->fe_datamove; 12524 fe_datamove(io); 12525 } 12526 12527 static void 12528 ctl_send_datamove_done(union ctl_io *io, int have_lock) 12529 { 12530 union ctl_ha_msg msg; 12531 #ifdef CTL_TIME_IO 12532 struct bintime cur_bt; 12533 #endif 12534 12535 memset(&msg, 0, sizeof(msg)); 12536 msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 12537 msg.hdr.original_sc = io; 12538 msg.hdr.serializing_sc = io->io_hdr.remote_io; 12539 msg.hdr.nexus = io->io_hdr.nexus; 12540 msg.hdr.status = io->io_hdr.status; 12541 msg.scsi.kern_data_resid = io->scsiio.kern_data_resid; 12542 msg.scsi.tag_num = io->scsiio.tag_num; 12543 msg.scsi.tag_type = io->scsiio.tag_type; 12544 msg.scsi.scsi_status = io->scsiio.scsi_status; 12545 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 12546 io->scsiio.sense_len); 12547 msg.scsi.sense_len = io->scsiio.sense_len; 12548 msg.scsi.port_status = io->io_hdr.port_status; 12549 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12550 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12551 ctl_failover_io(io, /*have_lock*/ have_lock); 12552 return; 12553 } 12554 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 12555 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + 12556 msg.scsi.sense_len, M_WAITOK); 12557 12558 #ifdef CTL_TIME_IO 12559 getbinuptime(&cur_bt); 12560 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); 12561 bintime_add(&io->io_hdr.dma_bt, &cur_bt); 12562 #endif 12563 io->io_hdr.num_dmas++; 12564 } 12565 12566 /* 12567 * The DMA to the remote side is done, now we need to tell the other side 12568 * we're done so it can continue with its data movement. 12569 */ 12570 static void 12571 ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq) 12572 { 12573 union ctl_io *io; 12574 uint32_t i; 12575 12576 io = rq->context; 12577 12578 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12579 printf("%s: ISC DMA write failed with error %d", __func__, 12580 rq->ret); 12581 ctl_set_internal_failure(&io->scsiio, 12582 /*sks_valid*/ 1, 12583 /*retry_count*/ rq->ret); 12584 } 12585 12586 ctl_dt_req_free(rq); 12587 12588 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12589 free(CTL_LSGLT(io)[i].addr, M_CTL); 12590 free(CTL_RSGL(io), M_CTL); 12591 CTL_RSGL(io) = NULL; 12592 CTL_LSGL(io) = NULL; 12593 12594 /* 12595 * The data is in local and remote memory, so now we need to send 12596 * status (good or back) back to the other side. 12597 */ 12598 ctl_send_datamove_done(io, /*have_lock*/ 0); 12599 } 12600 12601 /* 12602 * We've moved the data from the host/controller into local memory. Now we 12603 * need to push it over to the remote controller's memory. 12604 */ 12605 static int 12606 ctl_datamove_remote_dm_write_cb(union ctl_io *io, bool samethr) 12607 { 12608 int retval; 12609 12610 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_WRITE, 12611 ctl_datamove_remote_write_cb); 12612 return (retval); 12613 } 12614 12615 static void 12616 ctl_datamove_remote_write(union ctl_io *io) 12617 { 12618 int retval; 12619 void (*fe_datamove)(union ctl_io *io); 12620 12621 /* 12622 * - Get the data from the host/HBA into local memory. 12623 * - DMA memory from the local controller to the remote controller. 12624 * - Send status back to the remote controller. 12625 */ 12626 12627 retval = ctl_datamove_remote_sgl_setup(io); 12628 if (retval != 0) 12629 return; 12630 12631 /* Switch the pointer over so the FETD knows what to do */ 12632 io->scsiio.kern_data_ptr = (uint8_t *)CTL_LSGL(io); 12633 12634 /* 12635 * Use a custom move done callback, since we need to send completion 12636 * back to the other controller, not to the backend on this side. 12637 */ 12638 io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb; 12639 12640 fe_datamove = CTL_PORT(io)->fe_datamove; 12641 fe_datamove(io); 12642 } 12643 12644 static int 12645 ctl_datamove_remote_dm_read_cb(union ctl_io *io, bool samethr) 12646 { 12647 uint32_t i; 12648 12649 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12650 free(CTL_LSGLT(io)[i].addr, M_CTL); 12651 free(CTL_RSGL(io), M_CTL); 12652 CTL_RSGL(io) = NULL; 12653 CTL_LSGL(io) = NULL; 12654 12655 /* 12656 * The read is done, now we need to send status (good or bad) back 12657 * to the other side. 12658 */ 12659 ctl_send_datamove_done(io, /*have_lock*/ 0); 12660 12661 return (0); 12662 } 12663 12664 static void 12665 ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq) 12666 { 12667 union ctl_io *io; 12668 void (*fe_datamove)(union ctl_io *io); 12669 12670 io = rq->context; 12671 12672 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12673 printf("%s: ISC DMA read failed with error %d\n", __func__, 12674 rq->ret); 12675 ctl_set_internal_failure(&io->scsiio, 12676 /*sks_valid*/ 1, 12677 /*retry_count*/ rq->ret); 12678 } 12679 12680 ctl_dt_req_free(rq); 12681 12682 /* Switch the pointer over so the FETD knows what to do */ 12683 io->scsiio.kern_data_ptr = (uint8_t *)CTL_LSGL(io); 12684 12685 /* 12686 * Use a custom move done callback, since we need to send completion 12687 * back to the other controller, not to the backend on this side. 12688 */ 12689 io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb; 12690 12691 /* XXX KDM add checks like the ones in ctl_datamove? */ 12692 12693 fe_datamove = CTL_PORT(io)->fe_datamove; 12694 fe_datamove(io); 12695 } 12696 12697 static int 12698 ctl_datamove_remote_sgl_setup(union ctl_io *io) 12699 { 12700 struct ctl_sg_entry *local_sglist; 12701 uint32_t len_to_go; 12702 int retval; 12703 int i; 12704 12705 retval = 0; 12706 local_sglist = CTL_LSGL(io); 12707 len_to_go = io->scsiio.kern_data_len; 12708 12709 /* 12710 * The difficult thing here is that the size of the various 12711 * S/G segments may be different than the size from the 12712 * remote controller. That'll make it harder when DMAing 12713 * the data back to the other side. 12714 */ 12715 for (i = 0; len_to_go > 0; i++) { 12716 local_sglist[i].len = MIN(len_to_go, CTL_HA_DATAMOVE_SEGMENT); 12717 local_sglist[i].addr = 12718 malloc(local_sglist[i].len, M_CTL, M_WAITOK); 12719 12720 len_to_go -= local_sglist[i].len; 12721 } 12722 /* 12723 * Reset the number of S/G entries accordingly. The original 12724 * number of S/G entries is available in rem_sg_entries. 12725 */ 12726 io->scsiio.kern_sg_entries = i; 12727 12728 return (retval); 12729 } 12730 12731 static int 12732 ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 12733 ctl_ha_dt_cb callback) 12734 { 12735 struct ctl_ha_dt_req *rq; 12736 struct ctl_sg_entry *remote_sglist, *local_sglist; 12737 uint32_t local_used, remote_used, total_used; 12738 int i, j, isc_ret; 12739 12740 rq = ctl_dt_req_alloc(); 12741 12742 /* 12743 * If we failed to allocate the request, and if the DMA didn't fail 12744 * anyway, set busy status. This is just a resource allocation 12745 * failure. 12746 */ 12747 if ((rq == NULL) 12748 && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 12749 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) 12750 ctl_set_busy(&io->scsiio); 12751 12752 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 12753 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) { 12754 if (rq != NULL) 12755 ctl_dt_req_free(rq); 12756 12757 /* 12758 * The data move failed. We need to return status back 12759 * to the other controller. No point in trying to DMA 12760 * data to the remote controller. 12761 */ 12762 12763 ctl_send_datamove_done(io, /*have_lock*/ 0); 12764 12765 return (1); 12766 } 12767 12768 local_sglist = CTL_LSGL(io); 12769 remote_sglist = CTL_RSGL(io); 12770 local_used = 0; 12771 remote_used = 0; 12772 total_used = 0; 12773 12774 /* 12775 * Pull/push the data over the wire from/to the other controller. 12776 * This takes into account the possibility that the local and 12777 * remote sglists may not be identical in terms of the size of 12778 * the elements and the number of elements. 12779 * 12780 * One fundamental assumption here is that the length allocated for 12781 * both the local and remote sglists is identical. Otherwise, we've 12782 * essentially got a coding error of some sort. 12783 */ 12784 isc_ret = CTL_HA_STATUS_SUCCESS; 12785 for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) { 12786 uint32_t cur_len; 12787 uint8_t *tmp_ptr; 12788 12789 rq->command = command; 12790 rq->context = io; 12791 12792 /* 12793 * Both pointers should be aligned. But it is possible 12794 * that the allocation length is not. They should both 12795 * also have enough slack left over at the end, though, 12796 * to round up to the next 8 byte boundary. 12797 */ 12798 cur_len = MIN(local_sglist[i].len - local_used, 12799 remote_sglist[j].len - remote_used); 12800 rq->size = cur_len; 12801 12802 tmp_ptr = (uint8_t *)local_sglist[i].addr; 12803 tmp_ptr += local_used; 12804 12805 #if 0 12806 /* Use physical addresses when talking to ISC hardware */ 12807 if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) { 12808 /* XXX KDM use busdma */ 12809 rq->local = vtophys(tmp_ptr); 12810 } else 12811 rq->local = tmp_ptr; 12812 #else 12813 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, 12814 ("HA does not support BUS_ADDR")); 12815 rq->local = tmp_ptr; 12816 #endif 12817 12818 tmp_ptr = (uint8_t *)remote_sglist[j].addr; 12819 tmp_ptr += remote_used; 12820 rq->remote = tmp_ptr; 12821 12822 rq->callback = NULL; 12823 12824 local_used += cur_len; 12825 if (local_used >= local_sglist[i].len) { 12826 i++; 12827 local_used = 0; 12828 } 12829 12830 remote_used += cur_len; 12831 if (remote_used >= remote_sglist[j].len) { 12832 j++; 12833 remote_used = 0; 12834 } 12835 total_used += cur_len; 12836 12837 if (total_used >= io->scsiio.kern_data_len) 12838 rq->callback = callback; 12839 12840 isc_ret = ctl_dt_single(rq); 12841 if (isc_ret > CTL_HA_STATUS_SUCCESS) 12842 break; 12843 } 12844 if (isc_ret != CTL_HA_STATUS_WAIT) { 12845 rq->ret = isc_ret; 12846 callback(rq); 12847 } 12848 12849 return (0); 12850 } 12851 12852 static void 12853 ctl_datamove_remote_read(union ctl_io *io) 12854 { 12855 int retval; 12856 uint32_t i; 12857 12858 /* 12859 * This will send an error to the other controller in the case of a 12860 * failure. 12861 */ 12862 retval = ctl_datamove_remote_sgl_setup(io); 12863 if (retval != 0) 12864 return; 12865 12866 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ, 12867 ctl_datamove_remote_read_cb); 12868 if (retval != 0) { 12869 /* 12870 * Make sure we free memory if there was an error.. The 12871 * ctl_datamove_remote_xfer() function will send the 12872 * datamove done message, or call the callback with an 12873 * error if there is a problem. 12874 */ 12875 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12876 free(CTL_LSGLT(io)[i].addr, M_CTL); 12877 free(CTL_RSGL(io), M_CTL); 12878 CTL_RSGL(io) = NULL; 12879 CTL_LSGL(io) = NULL; 12880 } 12881 } 12882 12883 /* 12884 * Process a datamove request from the other controller. This is used for 12885 * XFER mode only, not SER_ONLY mode. For writes, we DMA into local memory 12886 * first. Once that is complete, the data gets DMAed into the remote 12887 * controller's memory. For reads, we DMA from the remote controller's 12888 * memory into our memory first, and then move it out to the FETD. 12889 */ 12890 static void 12891 ctl_datamove_remote(union ctl_io *io) 12892 { 12893 12894 mtx_assert(&((struct ctl_softc *)CTL_SOFTC(io))->ctl_lock, MA_NOTOWNED); 12895 12896 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12897 ctl_failover_io(io, /*have_lock*/ 0); 12898 return; 12899 } 12900 12901 /* 12902 * Note that we look for an aborted I/O here, but don't do some of 12903 * the other checks that ctl_datamove() normally does. 12904 * We don't need to run the datamove delay code, since that should 12905 * have been done if need be on the other controller. 12906 */ 12907 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12908 printf("%s: tag 0x%04x on (%u:%u:%u) aborted\n", __func__, 12909 io->scsiio.tag_num, io->io_hdr.nexus.initid, 12910 io->io_hdr.nexus.targ_port, 12911 io->io_hdr.nexus.targ_lun); 12912 io->io_hdr.port_status = 31338; 12913 ctl_send_datamove_done(io, /*have_lock*/ 0); 12914 return; 12915 } 12916 12917 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) 12918 ctl_datamove_remote_write(io); 12919 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) 12920 ctl_datamove_remote_read(io); 12921 else { 12922 io->io_hdr.port_status = 31339; 12923 ctl_send_datamove_done(io, /*have_lock*/ 0); 12924 } 12925 } 12926 12927 static void 12928 ctl_process_done(union ctl_io *io) 12929 { 12930 struct ctl_softc *softc = CTL_SOFTC(io); 12931 struct ctl_port *port = CTL_PORT(io); 12932 struct ctl_lun *lun = CTL_LUN(io); 12933 void (*fe_done)(union ctl_io *io); 12934 union ctl_ha_msg msg; 12935 12936 CTL_DEBUG_PRINT(("ctl_process_done\n")); 12937 fe_done = port->fe_done; 12938 12939 #ifdef CTL_TIME_IO 12940 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 12941 char str[256]; 12942 char path_str[64]; 12943 struct sbuf sb; 12944 12945 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 12946 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12947 12948 sbuf_cat(&sb, path_str); 12949 switch (io->io_hdr.io_type) { 12950 case CTL_IO_SCSI: 12951 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 12952 sbuf_printf(&sb, "\n"); 12953 sbuf_cat(&sb, path_str); 12954 sbuf_printf(&sb, "Tag: 0x%04x/%d, Prio: %d\n", 12955 io->scsiio.tag_num, io->scsiio.tag_type, 12956 io->scsiio.priority); 12957 break; 12958 case CTL_IO_TASK: 12959 sbuf_printf(&sb, "Task Action: %d Tag: 0x%04x/%d\n", 12960 io->taskio.task_action, 12961 io->taskio.tag_num, io->taskio.tag_type); 12962 break; 12963 default: 12964 panic("%s: Invalid CTL I/O type %d\n", 12965 __func__, io->io_hdr.io_type); 12966 } 12967 sbuf_cat(&sb, path_str); 12968 sbuf_printf(&sb, "ctl_process_done: %jd seconds\n", 12969 (intmax_t)time_uptime - io->io_hdr.start_time); 12970 sbuf_finish(&sb); 12971 printf("%s", sbuf_data(&sb)); 12972 } 12973 #endif /* CTL_TIME_IO */ 12974 12975 switch (io->io_hdr.io_type) { 12976 case CTL_IO_SCSI: 12977 break; 12978 case CTL_IO_TASK: 12979 if (ctl_debug & CTL_DEBUG_INFO) 12980 ctl_io_error_print(io, NULL); 12981 fe_done(io); 12982 return; 12983 default: 12984 panic("%s: Invalid CTL I/O type %d\n", 12985 __func__, io->io_hdr.io_type); 12986 } 12987 12988 if (lun == NULL) { 12989 CTL_DEBUG_PRINT(("NULL LUN for lun %d\n", 12990 io->io_hdr.nexus.targ_mapped_lun)); 12991 goto bailout; 12992 } 12993 12994 mtx_lock(&lun->lun_lock); 12995 12996 /* 12997 * Check to see if we have any informational exception and status 12998 * of this command can be modified to report it in form of either 12999 * RECOVERED ERROR or NO SENSE, depending on MRIE mode page field. 13000 */ 13001 if (lun->ie_reported == 0 && lun->ie_asc != 0 && 13002 io->io_hdr.status == CTL_SUCCESS && 13003 (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0) { 13004 uint8_t mrie = lun->MODE_IE.mrie; 13005 uint8_t per = ((lun->MODE_RWER.byte3 & SMS_RWER_PER) || 13006 (lun->MODE_VER.byte3 & SMS_VER_PER)); 13007 if (((mrie == SIEP_MRIE_REC_COND && per) || 13008 mrie == SIEP_MRIE_REC_UNCOND || 13009 mrie == SIEP_MRIE_NO_SENSE) && 13010 (ctl_get_cmd_entry(&io->scsiio, NULL)->flags & 13011 CTL_CMD_FLAG_NO_SENSE) == 0) { 13012 ctl_set_sense(&io->scsiio, 13013 /*current_error*/ 1, 13014 /*sense_key*/ (mrie == SIEP_MRIE_NO_SENSE) ? 13015 SSD_KEY_NO_SENSE : SSD_KEY_RECOVERED_ERROR, 13016 /*asc*/ lun->ie_asc, 13017 /*ascq*/ lun->ie_ascq, 13018 SSD_ELEM_NONE); 13019 lun->ie_reported = 1; 13020 } 13021 } else if (lun->ie_reported < 0) 13022 lun->ie_reported = 0; 13023 13024 /* 13025 * Check to see if we have any errors to inject here. We only 13026 * inject errors for commands that don't already have errors set. 13027 */ 13028 if (!STAILQ_EMPTY(&lun->error_list) && 13029 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) && 13030 ((io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0)) 13031 ctl_inject_error(lun, io); 13032 13033 /* 13034 * XXX KDM how do we treat commands that aren't completed 13035 * successfully? 13036 * 13037 * XXX KDM should we also track I/O latency? 13038 */ 13039 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS && 13040 io->io_hdr.io_type == CTL_IO_SCSI) { 13041 int type; 13042 #ifdef CTL_TIME_IO 13043 struct bintime bt; 13044 13045 getbinuptime(&bt); 13046 bintime_sub(&bt, &io->io_hdr.start_bt); 13047 #endif 13048 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13049 CTL_FLAG_DATA_IN) 13050 type = CTL_STATS_READ; 13051 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13052 CTL_FLAG_DATA_OUT) 13053 type = CTL_STATS_WRITE; 13054 else 13055 type = CTL_STATS_NO_IO; 13056 13057 lun->stats.bytes[type] += io->scsiio.kern_total_len; 13058 lun->stats.operations[type] ++; 13059 lun->stats.dmas[type] += io->io_hdr.num_dmas; 13060 #ifdef CTL_TIME_IO 13061 bintime_add(&lun->stats.dma_time[type], &io->io_hdr.dma_bt); 13062 bintime_add(&lun->stats.time[type], &bt); 13063 #endif 13064 13065 mtx_lock(&port->port_lock); 13066 port->stats.bytes[type] += io->scsiio.kern_total_len; 13067 port->stats.operations[type] ++; 13068 port->stats.dmas[type] += io->io_hdr.num_dmas; 13069 #ifdef CTL_TIME_IO 13070 bintime_add(&port->stats.dma_time[type], &io->io_hdr.dma_bt); 13071 bintime_add(&port->stats.time[type], &bt); 13072 #endif 13073 mtx_unlock(&port->port_lock); 13074 } 13075 13076 /* 13077 * Run through the blocked queue of this I/O and see if anything 13078 * can be unblocked, now that this I/O is done and will be removed. 13079 * We need to do it before removal to have OOA position to start. 13080 */ 13081 ctl_try_unblock_others(lun, io, TRUE); 13082 13083 /* 13084 * Remove this from the OOA queue. 13085 */ 13086 LIST_REMOVE(&io->io_hdr, ooa_links); 13087 #ifdef CTL_TIME_IO 13088 if (LIST_EMPTY(&lun->ooa_queue)) 13089 lun->last_busy = getsbinuptime(); 13090 #endif 13091 13092 /* 13093 * If the LUN has been invalidated, free it if there is nothing 13094 * left on its OOA queue. 13095 */ 13096 if ((lun->flags & CTL_LUN_INVALID) 13097 && LIST_EMPTY(&lun->ooa_queue)) { 13098 mtx_unlock(&lun->lun_lock); 13099 ctl_free_lun(lun); 13100 } else 13101 mtx_unlock(&lun->lun_lock); 13102 13103 bailout: 13104 13105 /* 13106 * If this command has been aborted, make sure we set the status 13107 * properly. The FETD is responsible for freeing the I/O and doing 13108 * whatever it needs to do to clean up its state. 13109 */ 13110 if (io->io_hdr.flags & CTL_FLAG_ABORT) 13111 ctl_set_task_aborted(&io->scsiio); 13112 13113 /* 13114 * If enabled, print command error status. 13115 */ 13116 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS && 13117 (ctl_debug & CTL_DEBUG_INFO) != 0) 13118 ctl_io_error_print(io, NULL); 13119 13120 /* 13121 * Tell the FETD or the other shelf controller we're done with this 13122 * command. Note that only SCSI commands get to this point. Task 13123 * management commands are completed above. 13124 */ 13125 if ((softc->ha_mode != CTL_HA_MODE_XFER) && 13126 (io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC)) { 13127 memset(&msg, 0, sizeof(msg)); 13128 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 13129 msg.hdr.serializing_sc = io->io_hdr.remote_io; 13130 msg.hdr.nexus = io->io_hdr.nexus; 13131 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13132 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data), 13133 M_WAITOK); 13134 } 13135 13136 fe_done(io); 13137 } 13138 13139 /* 13140 * Front end should call this if it doesn't do autosense. When the request 13141 * sense comes back in from the initiator, we'll dequeue this and send it. 13142 */ 13143 int 13144 ctl_queue_sense(union ctl_io *io) 13145 { 13146 struct ctl_softc *softc = CTL_SOFTC(io); 13147 struct ctl_port *port = CTL_PORT(io); 13148 struct ctl_lun *lun; 13149 struct scsi_sense_data *ps; 13150 uint32_t initidx, p, targ_lun; 13151 13152 CTL_DEBUG_PRINT(("ctl_queue_sense\n")); 13153 13154 targ_lun = ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); 13155 13156 /* 13157 * LUN lookup will likely move to the ctl_work_thread() once we 13158 * have our new queueing infrastructure (that doesn't put things on 13159 * a per-LUN queue initially). That is so that we can handle 13160 * things like an INQUIRY to a LUN that we don't have enabled. We 13161 * can't deal with that right now. 13162 * If we don't have a LUN for this, just toss the sense information. 13163 */ 13164 mtx_lock(&softc->ctl_lock); 13165 if (targ_lun >= ctl_max_luns || 13166 (lun = softc->ctl_luns[targ_lun]) == NULL) { 13167 mtx_unlock(&softc->ctl_lock); 13168 goto bailout; 13169 } 13170 mtx_lock(&lun->lun_lock); 13171 mtx_unlock(&softc->ctl_lock); 13172 13173 initidx = ctl_get_initindex(&io->io_hdr.nexus); 13174 p = initidx / CTL_MAX_INIT_PER_PORT; 13175 if (lun->pending_sense[p] == NULL) { 13176 lun->pending_sense[p] = malloc(sizeof(*ps) * CTL_MAX_INIT_PER_PORT, 13177 M_CTL, M_NOWAIT | M_ZERO); 13178 } 13179 if ((ps = lun->pending_sense[p]) != NULL) { 13180 ps += initidx % CTL_MAX_INIT_PER_PORT; 13181 memset(ps, 0, sizeof(*ps)); 13182 memcpy(ps, &io->scsiio.sense_data, io->scsiio.sense_len); 13183 } 13184 mtx_unlock(&lun->lun_lock); 13185 13186 bailout: 13187 ctl_free_io(io); 13188 return (CTL_RETVAL_COMPLETE); 13189 } 13190 13191 /* 13192 * Primary command inlet from frontend ports. All SCSI and task I/O 13193 * requests must go through this function. 13194 */ 13195 int 13196 ctl_queue(union ctl_io *io) 13197 { 13198 struct ctl_port *port = CTL_PORT(io); 13199 13200 CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0])); 13201 13202 #ifdef CTL_TIME_IO 13203 io->io_hdr.start_time = time_uptime; 13204 getbinuptime(&io->io_hdr.start_bt); 13205 #endif /* CTL_TIME_IO */ 13206 13207 /* Map FE-specific LUN ID into global one. */ 13208 io->io_hdr.nexus.targ_mapped_lun = 13209 ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); 13210 13211 switch (io->io_hdr.io_type) { 13212 case CTL_IO_SCSI: 13213 case CTL_IO_TASK: 13214 if (ctl_debug & CTL_DEBUG_CDB) 13215 ctl_io_print(io); 13216 ctl_enqueue_incoming(io); 13217 break; 13218 default: 13219 printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type); 13220 return (EINVAL); 13221 } 13222 13223 return (CTL_RETVAL_COMPLETE); 13224 } 13225 13226 int 13227 ctl_run(union ctl_io *io) 13228 { 13229 struct ctl_port *port = CTL_PORT(io); 13230 13231 CTL_DEBUG_PRINT(("ctl_run cdb[0]=%02X\n", io->scsiio.cdb[0])); 13232 13233 #ifdef CTL_TIME_IO 13234 io->io_hdr.start_time = time_uptime; 13235 getbinuptime(&io->io_hdr.start_bt); 13236 #endif /* CTL_TIME_IO */ 13237 13238 /* Map FE-specific LUN ID into global one. */ 13239 io->io_hdr.nexus.targ_mapped_lun = 13240 ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); 13241 13242 switch (io->io_hdr.io_type) { 13243 case CTL_IO_SCSI: 13244 if (ctl_debug & CTL_DEBUG_CDB) 13245 ctl_io_print(io); 13246 ctl_scsiio_precheck(&io->scsiio); 13247 break; 13248 case CTL_IO_TASK: 13249 if (ctl_debug & CTL_DEBUG_CDB) 13250 ctl_io_print(io); 13251 ctl_run_task(io); 13252 break; 13253 default: 13254 printf("ctl_run: unknown I/O type %d\n", io->io_hdr.io_type); 13255 return (EINVAL); 13256 } 13257 13258 return (CTL_RETVAL_COMPLETE); 13259 } 13260 13261 #ifdef CTL_IO_DELAY 13262 static void 13263 ctl_done_timer_wakeup(void *arg) 13264 { 13265 union ctl_io *io; 13266 13267 io = (union ctl_io *)arg; 13268 ctl_done(io); 13269 } 13270 #endif /* CTL_IO_DELAY */ 13271 13272 void 13273 ctl_serseq_done(union ctl_io *io) 13274 { 13275 struct ctl_lun *lun = CTL_LUN(io); 13276 13277 /* This is racy, but should not be a problem. */ 13278 if (!TAILQ_EMPTY(&io->io_hdr.blocked_queue)) { 13279 mtx_lock(&lun->lun_lock); 13280 io->io_hdr.flags |= CTL_FLAG_SERSEQ_DONE; 13281 ctl_try_unblock_others(lun, io, FALSE); 13282 mtx_unlock(&lun->lun_lock); 13283 } else 13284 io->io_hdr.flags |= CTL_FLAG_SERSEQ_DONE; 13285 } 13286 13287 void 13288 ctl_done(union ctl_io *io) 13289 { 13290 13291 /* 13292 * Enable this to catch duplicate completion issues. 13293 */ 13294 #if 0 13295 if (io->io_hdr.flags & CTL_FLAG_ALREADY_DONE) { 13296 printf("%s: type %d msg %d cdb %x iptl: " 13297 "%u:%u:%u tag 0x%04x " 13298 "flag %#x status %x\n", 13299 __func__, 13300 io->io_hdr.io_type, 13301 io->io_hdr.msg_type, 13302 io->scsiio.cdb[0], 13303 io->io_hdr.nexus.initid, 13304 io->io_hdr.nexus.targ_port, 13305 io->io_hdr.nexus.targ_lun, 13306 (io->io_hdr.io_type == 13307 CTL_IO_TASK) ? 13308 io->taskio.tag_num : 13309 io->scsiio.tag_num, 13310 io->io_hdr.flags, 13311 io->io_hdr.status); 13312 } else 13313 io->io_hdr.flags |= CTL_FLAG_ALREADY_DONE; 13314 #endif 13315 13316 /* 13317 * This is an internal copy of an I/O, and should not go through 13318 * the normal done processing logic. 13319 */ 13320 if (io->io_hdr.flags & CTL_FLAG_INT_COPY) 13321 return; 13322 13323 #ifdef CTL_IO_DELAY 13324 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 13325 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 13326 } else { 13327 struct ctl_lun *lun = CTL_LUN(io); 13328 13329 if ((lun != NULL) 13330 && (lun->delay_info.done_delay > 0)) { 13331 callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1); 13332 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 13333 callout_reset(&io->io_hdr.delay_callout, 13334 lun->delay_info.done_delay * hz, 13335 ctl_done_timer_wakeup, io); 13336 if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT) 13337 lun->delay_info.done_delay = 0; 13338 return; 13339 } 13340 } 13341 #endif /* CTL_IO_DELAY */ 13342 13343 ctl_enqueue_done(io); 13344 } 13345 13346 static void 13347 ctl_work_thread(void *arg) 13348 { 13349 struct ctl_thread *thr = (struct ctl_thread *)arg; 13350 struct ctl_softc *softc = thr->ctl_softc; 13351 union ctl_io *io; 13352 int retval; 13353 13354 CTL_DEBUG_PRINT(("ctl_work_thread starting\n")); 13355 thread_lock(curthread); 13356 sched_prio(curthread, PUSER - 1); 13357 thread_unlock(curthread); 13358 13359 while (!softc->shutdown) { 13360 /* 13361 * We handle the queues in this order: 13362 * - ISC 13363 * - done queue (to free up resources, unblock other commands) 13364 * - incoming queue 13365 * - RtR queue 13366 * 13367 * If those queues are empty, we break out of the loop and 13368 * go to sleep. 13369 */ 13370 mtx_lock(&thr->queue_lock); 13371 io = (union ctl_io *)STAILQ_FIRST(&thr->isc_queue); 13372 if (io != NULL) { 13373 STAILQ_REMOVE_HEAD(&thr->isc_queue, links); 13374 mtx_unlock(&thr->queue_lock); 13375 ctl_handle_isc(io); 13376 continue; 13377 } 13378 io = (union ctl_io *)STAILQ_FIRST(&thr->done_queue); 13379 if (io != NULL) { 13380 STAILQ_REMOVE_HEAD(&thr->done_queue, links); 13381 /* clear any blocked commands, call fe_done */ 13382 mtx_unlock(&thr->queue_lock); 13383 ctl_process_done(io); 13384 continue; 13385 } 13386 io = (union ctl_io *)STAILQ_FIRST(&thr->incoming_queue); 13387 if (io != NULL) { 13388 STAILQ_REMOVE_HEAD(&thr->incoming_queue, links); 13389 mtx_unlock(&thr->queue_lock); 13390 if (io->io_hdr.io_type == CTL_IO_TASK) 13391 ctl_run_task(io); 13392 else 13393 ctl_scsiio_precheck(&io->scsiio); 13394 continue; 13395 } 13396 io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue); 13397 if (io != NULL) { 13398 STAILQ_REMOVE_HEAD(&thr->rtr_queue, links); 13399 mtx_unlock(&thr->queue_lock); 13400 retval = ctl_scsiio(&io->scsiio); 13401 if (retval != CTL_RETVAL_COMPLETE) 13402 CTL_DEBUG_PRINT(("ctl_scsiio failed\n")); 13403 continue; 13404 } 13405 13406 /* Sleep until we have something to do. */ 13407 mtx_sleep(thr, &thr->queue_lock, PDROP, "-", 0); 13408 } 13409 thr->thread = NULL; 13410 kthread_exit(); 13411 } 13412 13413 static void 13414 ctl_thresh_thread(void *arg) 13415 { 13416 struct ctl_softc *softc = (struct ctl_softc *)arg; 13417 struct ctl_lun *lun; 13418 struct ctl_logical_block_provisioning_page *page; 13419 const char *attr; 13420 union ctl_ha_msg msg; 13421 uint64_t thres, val; 13422 int i, e, set; 13423 13424 CTL_DEBUG_PRINT(("ctl_thresh_thread starting\n")); 13425 thread_lock(curthread); 13426 sched_prio(curthread, PUSER - 1); 13427 thread_unlock(curthread); 13428 13429 while (!softc->shutdown) { 13430 mtx_lock(&softc->ctl_lock); 13431 STAILQ_FOREACH(lun, &softc->lun_list, links) { 13432 if ((lun->flags & CTL_LUN_DISABLED) || 13433 (lun->flags & CTL_LUN_NO_MEDIA) || 13434 lun->backend->lun_attr == NULL) 13435 continue; 13436 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 13437 softc->ha_mode == CTL_HA_MODE_XFER) 13438 continue; 13439 if ((lun->MODE_RWER.byte8 & SMS_RWER_LBPERE) == 0) 13440 continue; 13441 e = 0; 13442 page = &lun->MODE_LBP; 13443 for (i = 0; i < CTL_NUM_LBP_THRESH; i++) { 13444 if ((page->descr[i].flags & SLBPPD_ENABLED) == 0) 13445 continue; 13446 thres = scsi_4btoul(page->descr[i].count); 13447 thres <<= CTL_LBP_EXPONENT; 13448 switch (page->descr[i].resource) { 13449 case 0x01: 13450 attr = "blocksavail"; 13451 break; 13452 case 0x02: 13453 attr = "blocksused"; 13454 break; 13455 case 0xf1: 13456 attr = "poolblocksavail"; 13457 break; 13458 case 0xf2: 13459 attr = "poolblocksused"; 13460 break; 13461 default: 13462 continue; 13463 } 13464 mtx_unlock(&softc->ctl_lock); // XXX 13465 val = lun->backend->lun_attr(lun->be_lun, attr); 13466 mtx_lock(&softc->ctl_lock); 13467 if (val == UINT64_MAX) 13468 continue; 13469 if ((page->descr[i].flags & SLBPPD_ARMING_MASK) 13470 == SLBPPD_ARMING_INC) 13471 e = (val >= thres); 13472 else 13473 e = (val <= thres); 13474 if (e) 13475 break; 13476 } 13477 mtx_lock(&lun->lun_lock); 13478 if (e) { 13479 scsi_u64to8b((uint8_t *)&page->descr[i] - 13480 (uint8_t *)page, lun->ua_tpt_info); 13481 if (lun->lasttpt == 0 || 13482 time_uptime - lun->lasttpt >= CTL_LBP_UA_PERIOD) { 13483 lun->lasttpt = time_uptime; 13484 ctl_est_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 13485 set = 1; 13486 } else 13487 set = 0; 13488 } else { 13489 lun->lasttpt = 0; 13490 ctl_clr_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 13491 set = -1; 13492 } 13493 mtx_unlock(&lun->lun_lock); 13494 if (set != 0 && 13495 lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 13496 /* Send msg to other side. */ 13497 bzero(&msg.ua, sizeof(msg.ua)); 13498 msg.hdr.msg_type = CTL_MSG_UA; 13499 msg.hdr.nexus.initid = -1; 13500 msg.hdr.nexus.targ_port = -1; 13501 msg.hdr.nexus.targ_lun = lun->lun; 13502 msg.hdr.nexus.targ_mapped_lun = lun->lun; 13503 msg.ua.ua_all = 1; 13504 msg.ua.ua_set = (set > 0); 13505 msg.ua.ua_type = CTL_UA_THIN_PROV_THRES; 13506 memcpy(msg.ua.ua_info, lun->ua_tpt_info, 8); 13507 mtx_unlock(&softc->ctl_lock); // XXX 13508 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13509 sizeof(msg.ua), M_WAITOK); 13510 mtx_lock(&softc->ctl_lock); 13511 } 13512 } 13513 mtx_sleep(&softc->thresh_thread, &softc->ctl_lock, 13514 PDROP, "-", CTL_LBP_PERIOD * hz); 13515 } 13516 softc->thresh_thread = NULL; 13517 kthread_exit(); 13518 } 13519 13520 static void 13521 ctl_enqueue_incoming(union ctl_io *io) 13522 { 13523 struct ctl_softc *softc = CTL_SOFTC(io); 13524 struct ctl_thread *thr; 13525 u_int idx; 13526 13527 idx = (io->io_hdr.nexus.targ_port * 127 + 13528 io->io_hdr.nexus.initid) % worker_threads; 13529 thr = &softc->threads[idx]; 13530 mtx_lock(&thr->queue_lock); 13531 STAILQ_INSERT_TAIL(&thr->incoming_queue, &io->io_hdr, links); 13532 mtx_unlock(&thr->queue_lock); 13533 wakeup(thr); 13534 } 13535 13536 static void 13537 ctl_enqueue_rtr(union ctl_io *io) 13538 { 13539 struct ctl_softc *softc = CTL_SOFTC(io); 13540 struct ctl_thread *thr; 13541 13542 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13543 mtx_lock(&thr->queue_lock); 13544 STAILQ_INSERT_TAIL(&thr->rtr_queue, &io->io_hdr, links); 13545 mtx_unlock(&thr->queue_lock); 13546 wakeup(thr); 13547 } 13548 13549 static void 13550 ctl_enqueue_done(union ctl_io *io) 13551 { 13552 struct ctl_softc *softc = CTL_SOFTC(io); 13553 struct ctl_thread *thr; 13554 13555 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13556 mtx_lock(&thr->queue_lock); 13557 STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links); 13558 mtx_unlock(&thr->queue_lock); 13559 wakeup(thr); 13560 } 13561 13562 static void 13563 ctl_enqueue_isc(union ctl_io *io) 13564 { 13565 struct ctl_softc *softc = CTL_SOFTC(io); 13566 struct ctl_thread *thr; 13567 13568 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13569 mtx_lock(&thr->queue_lock); 13570 STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links); 13571 mtx_unlock(&thr->queue_lock); 13572 wakeup(thr); 13573 } 13574 13575 /* 13576 * vim: ts=8 13577 */ 13578