1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2003-2009 Silicon Graphics International Corp. 5 * Copyright (c) 2012 The FreeBSD Foundation 6 * Copyright (c) 2014-2017 Alexander Motin <mav@FreeBSD.org> 7 * Copyright (c) 2017 Jakub Wojciech Klama <jceel@FreeBSD.org> 8 * Copyright (c) 2018 Marcelo Araujo <araujo@FreeBSD.org> 9 * All rights reserved. 10 * 11 * Portions of this software were developed by Edward Tomasz Napierala 12 * under sponsorship from the FreeBSD Foundation. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions, and the following disclaimer, 19 * without modification. 20 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 21 * substantially similar to the "NO WARRANTY" disclaimer below 22 * ("Disclaimer") and any redistribution must be conditioned upon 23 * including a substantially similar Disclaimer requirement for further 24 * binary redistribution. 25 * 26 * NO WARRANTY 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 31 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 35 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGES. 38 * 39 * $Id$ 40 */ 41 /* 42 * CAM Target Layer, a SCSI device emulation subsystem. 43 * 44 * Author: Ken Merry <ken@FreeBSD.org> 45 */ 46 47 #include <sys/cdefs.h> 48 __FBSDID("$FreeBSD$"); 49 50 #include <sys/param.h> 51 #include <sys/systm.h> 52 #include <sys/ctype.h> 53 #include <sys/kernel.h> 54 #include <sys/types.h> 55 #include <sys/kthread.h> 56 #include <sys/bio.h> 57 #include <sys/fcntl.h> 58 #include <sys/lock.h> 59 #include <sys/module.h> 60 #include <sys/mutex.h> 61 #include <sys/condvar.h> 62 #include <sys/malloc.h> 63 #include <sys/conf.h> 64 #include <sys/ioccom.h> 65 #include <sys/queue.h> 66 #include <sys/sbuf.h> 67 #include <sys/smp.h> 68 #include <sys/endian.h> 69 #include <sys/proc.h> 70 #include <sys/sched.h> 71 #include <sys/sysctl.h> 72 #include <sys/nv.h> 73 #include <sys/dnv.h> 74 #include <vm/uma.h> 75 76 #include <cam/cam.h> 77 #include <cam/scsi/scsi_all.h> 78 #include <cam/scsi/scsi_cd.h> 79 #include <cam/scsi/scsi_da.h> 80 #include <cam/ctl/ctl_io.h> 81 #include <cam/ctl/ctl.h> 82 #include <cam/ctl/ctl_frontend.h> 83 #include <cam/ctl/ctl_util.h> 84 #include <cam/ctl/ctl_backend.h> 85 #include <cam/ctl/ctl_ioctl.h> 86 #include <cam/ctl/ctl_ha.h> 87 #include <cam/ctl/ctl_private.h> 88 #include <cam/ctl/ctl_debug.h> 89 #include <cam/ctl/ctl_scsi_all.h> 90 #include <cam/ctl/ctl_error.h> 91 92 struct ctl_softc *control_softc = NULL; 93 94 /* 95 * Template mode pages. 96 */ 97 98 /* 99 * Note that these are default values only. The actual values will be 100 * filled in when the user does a mode sense. 101 */ 102 const static struct scsi_da_rw_recovery_page rw_er_page_default = { 103 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 104 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 105 /*byte3*/SMS_RWER_AWRE|SMS_RWER_ARRE, 106 /*read_retry_count*/0, 107 /*correction_span*/0, 108 /*head_offset_count*/0, 109 /*data_strobe_offset_cnt*/0, 110 /*byte8*/SMS_RWER_LBPERE, 111 /*write_retry_count*/0, 112 /*reserved2*/0, 113 /*recovery_time_limit*/{0, 0}, 114 }; 115 116 const static struct scsi_da_rw_recovery_page rw_er_page_changeable = { 117 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 118 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 119 /*byte3*/SMS_RWER_PER, 120 /*read_retry_count*/0, 121 /*correction_span*/0, 122 /*head_offset_count*/0, 123 /*data_strobe_offset_cnt*/0, 124 /*byte8*/SMS_RWER_LBPERE, 125 /*write_retry_count*/0, 126 /*reserved2*/0, 127 /*recovery_time_limit*/{0, 0}, 128 }; 129 130 const static struct scsi_format_page format_page_default = { 131 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 132 /*page_length*/sizeof(struct scsi_format_page) - 2, 133 /*tracks_per_zone*/ {0, 0}, 134 /*alt_sectors_per_zone*/ {0, 0}, 135 /*alt_tracks_per_zone*/ {0, 0}, 136 /*alt_tracks_per_lun*/ {0, 0}, 137 /*sectors_per_track*/ {(CTL_DEFAULT_SECTORS_PER_TRACK >> 8) & 0xff, 138 CTL_DEFAULT_SECTORS_PER_TRACK & 0xff}, 139 /*bytes_per_sector*/ {0, 0}, 140 /*interleave*/ {0, 0}, 141 /*track_skew*/ {0, 0}, 142 /*cylinder_skew*/ {0, 0}, 143 /*flags*/ SFP_HSEC, 144 /*reserved*/ {0, 0, 0} 145 }; 146 147 const static struct scsi_format_page format_page_changeable = { 148 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 149 /*page_length*/sizeof(struct scsi_format_page) - 2, 150 /*tracks_per_zone*/ {0, 0}, 151 /*alt_sectors_per_zone*/ {0, 0}, 152 /*alt_tracks_per_zone*/ {0, 0}, 153 /*alt_tracks_per_lun*/ {0, 0}, 154 /*sectors_per_track*/ {0, 0}, 155 /*bytes_per_sector*/ {0, 0}, 156 /*interleave*/ {0, 0}, 157 /*track_skew*/ {0, 0}, 158 /*cylinder_skew*/ {0, 0}, 159 /*flags*/ 0, 160 /*reserved*/ {0, 0, 0} 161 }; 162 163 const static struct scsi_rigid_disk_page rigid_disk_page_default = { 164 /*page_code*/SMS_RIGID_DISK_PAGE, 165 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 166 /*cylinders*/ {0, 0, 0}, 167 /*heads*/ CTL_DEFAULT_HEADS, 168 /*start_write_precomp*/ {0, 0, 0}, 169 /*start_reduced_current*/ {0, 0, 0}, 170 /*step_rate*/ {0, 0}, 171 /*landing_zone_cylinder*/ {0, 0, 0}, 172 /*rpl*/ SRDP_RPL_DISABLED, 173 /*rotational_offset*/ 0, 174 /*reserved1*/ 0, 175 /*rotation_rate*/ {(CTL_DEFAULT_ROTATION_RATE >> 8) & 0xff, 176 CTL_DEFAULT_ROTATION_RATE & 0xff}, 177 /*reserved2*/ {0, 0} 178 }; 179 180 const static struct scsi_rigid_disk_page rigid_disk_page_changeable = { 181 /*page_code*/SMS_RIGID_DISK_PAGE, 182 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 183 /*cylinders*/ {0, 0, 0}, 184 /*heads*/ 0, 185 /*start_write_precomp*/ {0, 0, 0}, 186 /*start_reduced_current*/ {0, 0, 0}, 187 /*step_rate*/ {0, 0}, 188 /*landing_zone_cylinder*/ {0, 0, 0}, 189 /*rpl*/ 0, 190 /*rotational_offset*/ 0, 191 /*reserved1*/ 0, 192 /*rotation_rate*/ {0, 0}, 193 /*reserved2*/ {0, 0} 194 }; 195 196 const static struct scsi_da_verify_recovery_page verify_er_page_default = { 197 /*page_code*/SMS_VERIFY_ERROR_RECOVERY_PAGE, 198 /*page_length*/sizeof(struct scsi_da_verify_recovery_page) - 2, 199 /*byte3*/0, 200 /*read_retry_count*/0, 201 /*reserved*/{ 0, 0, 0, 0, 0, 0 }, 202 /*recovery_time_limit*/{0, 0}, 203 }; 204 205 const static struct scsi_da_verify_recovery_page verify_er_page_changeable = { 206 /*page_code*/SMS_VERIFY_ERROR_RECOVERY_PAGE, 207 /*page_length*/sizeof(struct scsi_da_verify_recovery_page) - 2, 208 /*byte3*/SMS_VER_PER, 209 /*read_retry_count*/0, 210 /*reserved*/{ 0, 0, 0, 0, 0, 0 }, 211 /*recovery_time_limit*/{0, 0}, 212 }; 213 214 const static struct scsi_caching_page caching_page_default = { 215 /*page_code*/SMS_CACHING_PAGE, 216 /*page_length*/sizeof(struct scsi_caching_page) - 2, 217 /*flags1*/ SCP_DISC | SCP_WCE, 218 /*ret_priority*/ 0, 219 /*disable_pf_transfer_len*/ {0xff, 0xff}, 220 /*min_prefetch*/ {0, 0}, 221 /*max_prefetch*/ {0xff, 0xff}, 222 /*max_pf_ceiling*/ {0xff, 0xff}, 223 /*flags2*/ 0, 224 /*cache_segments*/ 0, 225 /*cache_seg_size*/ {0, 0}, 226 /*reserved*/ 0, 227 /*non_cache_seg_size*/ {0, 0, 0} 228 }; 229 230 const static struct scsi_caching_page caching_page_changeable = { 231 /*page_code*/SMS_CACHING_PAGE, 232 /*page_length*/sizeof(struct scsi_caching_page) - 2, 233 /*flags1*/ SCP_WCE | SCP_RCD, 234 /*ret_priority*/ 0, 235 /*disable_pf_transfer_len*/ {0, 0}, 236 /*min_prefetch*/ {0, 0}, 237 /*max_prefetch*/ {0, 0}, 238 /*max_pf_ceiling*/ {0, 0}, 239 /*flags2*/ 0, 240 /*cache_segments*/ 0, 241 /*cache_seg_size*/ {0, 0}, 242 /*reserved*/ 0, 243 /*non_cache_seg_size*/ {0, 0, 0} 244 }; 245 246 const static struct scsi_control_page control_page_default = { 247 /*page_code*/SMS_CONTROL_MODE_PAGE, 248 /*page_length*/sizeof(struct scsi_control_page) - 2, 249 /*rlec*/0, 250 /*queue_flags*/SCP_QUEUE_ALG_RESTRICTED, 251 /*eca_and_aen*/0, 252 /*flags4*/SCP_TAS, 253 /*aen_holdoff_period*/{0, 0}, 254 /*busy_timeout_period*/{0, 0}, 255 /*extended_selftest_completion_time*/{0, 0} 256 }; 257 258 const static struct scsi_control_page control_page_changeable = { 259 /*page_code*/SMS_CONTROL_MODE_PAGE, 260 /*page_length*/sizeof(struct scsi_control_page) - 2, 261 /*rlec*/SCP_DSENSE, 262 /*queue_flags*/SCP_QUEUE_ALG_MASK | SCP_NUAR, 263 /*eca_and_aen*/SCP_SWP, 264 /*flags4*/0, 265 /*aen_holdoff_period*/{0, 0}, 266 /*busy_timeout_period*/{0, 0}, 267 /*extended_selftest_completion_time*/{0, 0} 268 }; 269 270 #define CTL_CEM_LEN (sizeof(struct scsi_control_ext_page) - 4) 271 272 const static struct scsi_control_ext_page control_ext_page_default = { 273 /*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF, 274 /*subpage_code*/0x01, 275 /*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN}, 276 /*flags*/0, 277 /*prio*/0, 278 /*max_sense*/0 279 }; 280 281 const static struct scsi_control_ext_page control_ext_page_changeable = { 282 /*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF, 283 /*subpage_code*/0x01, 284 /*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN}, 285 /*flags*/0, 286 /*prio*/0, 287 /*max_sense*/0xff 288 }; 289 290 const static struct scsi_info_exceptions_page ie_page_default = { 291 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 292 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 293 /*info_flags*/SIEP_FLAGS_EWASC, 294 /*mrie*/SIEP_MRIE_NO, 295 /*interval_timer*/{0, 0, 0, 0}, 296 /*report_count*/{0, 0, 0, 1} 297 }; 298 299 const static struct scsi_info_exceptions_page ie_page_changeable = { 300 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 301 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 302 /*info_flags*/SIEP_FLAGS_EWASC | SIEP_FLAGS_DEXCPT | SIEP_FLAGS_TEST | 303 SIEP_FLAGS_LOGERR, 304 /*mrie*/0x0f, 305 /*interval_timer*/{0xff, 0xff, 0xff, 0xff}, 306 /*report_count*/{0xff, 0xff, 0xff, 0xff} 307 }; 308 309 #define CTL_LBPM_LEN (sizeof(struct ctl_logical_block_provisioning_page) - 4) 310 311 const static struct ctl_logical_block_provisioning_page lbp_page_default = {{ 312 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 313 /*subpage_code*/0x02, 314 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 315 /*flags*/0, 316 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 317 /*descr*/{}}, 318 {{/*flags*/0, 319 /*resource*/0x01, 320 /*reserved*/{0, 0}, 321 /*count*/{0, 0, 0, 0}}, 322 {/*flags*/0, 323 /*resource*/0x02, 324 /*reserved*/{0, 0}, 325 /*count*/{0, 0, 0, 0}}, 326 {/*flags*/0, 327 /*resource*/0xf1, 328 /*reserved*/{0, 0}, 329 /*count*/{0, 0, 0, 0}}, 330 {/*flags*/0, 331 /*resource*/0xf2, 332 /*reserved*/{0, 0}, 333 /*count*/{0, 0, 0, 0}} 334 } 335 }; 336 337 const static struct ctl_logical_block_provisioning_page lbp_page_changeable = {{ 338 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 339 /*subpage_code*/0x02, 340 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 341 /*flags*/SLBPP_SITUA, 342 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 343 /*descr*/{}}, 344 {{/*flags*/0, 345 /*resource*/0, 346 /*reserved*/{0, 0}, 347 /*count*/{0, 0, 0, 0}}, 348 {/*flags*/0, 349 /*resource*/0, 350 /*reserved*/{0, 0}, 351 /*count*/{0, 0, 0, 0}}, 352 {/*flags*/0, 353 /*resource*/0, 354 /*reserved*/{0, 0}, 355 /*count*/{0, 0, 0, 0}}, 356 {/*flags*/0, 357 /*resource*/0, 358 /*reserved*/{0, 0}, 359 /*count*/{0, 0, 0, 0}} 360 } 361 }; 362 363 const static struct scsi_cddvd_capabilities_page cddvd_page_default = { 364 /*page_code*/SMS_CDDVD_CAPS_PAGE, 365 /*page_length*/sizeof(struct scsi_cddvd_capabilities_page) - 2, 366 /*caps1*/0x3f, 367 /*caps2*/0x00, 368 /*caps3*/0xf0, 369 /*caps4*/0x00, 370 /*caps5*/0x29, 371 /*caps6*/0x00, 372 /*obsolete*/{0, 0}, 373 /*nvol_levels*/{0, 0}, 374 /*buffer_size*/{8, 0}, 375 /*obsolete2*/{0, 0}, 376 /*reserved*/0, 377 /*digital*/0, 378 /*obsolete3*/0, 379 /*copy_management*/0, 380 /*reserved2*/0, 381 /*rotation_control*/0, 382 /*cur_write_speed*/0, 383 /*num_speed_descr*/0, 384 }; 385 386 const static struct scsi_cddvd_capabilities_page cddvd_page_changeable = { 387 /*page_code*/SMS_CDDVD_CAPS_PAGE, 388 /*page_length*/sizeof(struct scsi_cddvd_capabilities_page) - 2, 389 /*caps1*/0, 390 /*caps2*/0, 391 /*caps3*/0, 392 /*caps4*/0, 393 /*caps5*/0, 394 /*caps6*/0, 395 /*obsolete*/{0, 0}, 396 /*nvol_levels*/{0, 0}, 397 /*buffer_size*/{0, 0}, 398 /*obsolete2*/{0, 0}, 399 /*reserved*/0, 400 /*digital*/0, 401 /*obsolete3*/0, 402 /*copy_management*/0, 403 /*reserved2*/0, 404 /*rotation_control*/0, 405 /*cur_write_speed*/0, 406 /*num_speed_descr*/0, 407 }; 408 409 SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 410 "CAM Target Layer"); 411 static int worker_threads = -1; 412 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN, 413 &worker_threads, 1, "Number of worker threads"); 414 static int ctl_debug = CTL_DEBUG_NONE; 415 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, debug, CTLFLAG_RWTUN, 416 &ctl_debug, 0, "Enabled debug flags"); 417 static int ctl_lun_map_size = 1024; 418 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, lun_map_size, CTLFLAG_RWTUN, 419 &ctl_lun_map_size, 0, "Size of per-port LUN map (max LUN + 1)"); 420 #ifdef CTL_TIME_IO 421 static int ctl_time_io_secs = CTL_TIME_IO_DEFAULT_SECS; 422 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, time_io_secs, CTLFLAG_RWTUN, 423 &ctl_time_io_secs, 0, "Log requests taking more seconds"); 424 #endif 425 426 /* 427 * Maximum number of LUNs we support. MUST be a power of 2. 428 */ 429 #define CTL_DEFAULT_MAX_LUNS 1024 430 static int ctl_max_luns = CTL_DEFAULT_MAX_LUNS; 431 TUNABLE_INT("kern.cam.ctl.max_luns", &ctl_max_luns); 432 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, max_luns, CTLFLAG_RDTUN, 433 &ctl_max_luns, CTL_DEFAULT_MAX_LUNS, "Maximum number of LUNs"); 434 435 /* 436 * Maximum number of ports registered at one time. 437 */ 438 #define CTL_DEFAULT_MAX_PORTS 256 439 static int ctl_max_ports = CTL_DEFAULT_MAX_PORTS; 440 TUNABLE_INT("kern.cam.ctl.max_ports", &ctl_max_ports); 441 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, max_ports, CTLFLAG_RDTUN, 442 &ctl_max_ports, CTL_DEFAULT_MAX_LUNS, "Maximum number of ports"); 443 444 /* 445 * Maximum number of initiators we support. 446 */ 447 #define CTL_MAX_INITIATORS (CTL_MAX_INIT_PER_PORT * ctl_max_ports) 448 449 /* 450 * Supported pages (0x00), Serial number (0x80), Device ID (0x83), 451 * Extended INQUIRY Data (0x86), Mode Page Policy (0x87), 452 * SCSI Ports (0x88), Third-party Copy (0x8F), SCSI Feature Sets (0x92), 453 * Block limits (0xB0), Block Device Characteristics (0xB1) and 454 * Logical Block Provisioning (0xB2) 455 */ 456 #define SCSI_EVPD_NUM_SUPPORTED_PAGES 11 457 458 static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event, 459 int param); 460 static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest); 461 static void ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest); 462 static int ctl_init(void); 463 static int ctl_shutdown(void); 464 static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td); 465 static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td); 466 static void ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio); 467 static void ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 468 struct ctl_ooa *ooa_hdr, 469 struct ctl_ooa_entry *kern_entries); 470 static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 471 struct thread *td); 472 static int ctl_enable_lun(struct ctl_lun *lun); 473 static int ctl_disable_lun(struct ctl_lun *lun); 474 static int ctl_free_lun(struct ctl_lun *lun); 475 476 static int ctl_do_mode_select(union ctl_io *io); 477 static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, 478 uint64_t res_key, uint64_t sa_res_key, 479 uint8_t type, uint32_t residx, 480 struct ctl_scsiio *ctsio, 481 struct scsi_per_res_out *cdb, 482 struct scsi_per_res_out_parms* param); 483 static void ctl_pro_preempt_other(struct ctl_lun *lun, 484 union ctl_ha_msg *msg); 485 static void ctl_hndl_per_res_out_on_other_sc(union ctl_io *io); 486 static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len); 487 static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len); 488 static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len); 489 static int ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len); 490 static int ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len); 491 static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, 492 int alloc_len); 493 static int ctl_inquiry_evpd_sfs(struct ctl_scsiio *ctsio, int alloc_len); 494 static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, 495 int alloc_len); 496 static int ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len); 497 static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len); 498 static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio); 499 static int ctl_inquiry_std(struct ctl_scsiio *ctsio); 500 static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len); 501 static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2, 502 bool seq); 503 static ctl_action ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2); 504 static ctl_action ctl_check_for_blockage(struct ctl_lun *lun, 505 union ctl_io *pending_io, union ctl_io *ooa_io); 506 static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 507 union ctl_io **starting_io); 508 static void ctl_try_unblock_io(struct ctl_lun *lun, union ctl_io *io, 509 bool skip); 510 static void ctl_try_unblock_others(struct ctl_lun *lun, union ctl_io *io, 511 bool skip); 512 static int ctl_scsiio_lun_check(struct ctl_lun *lun, 513 const struct ctl_cmd_entry *entry, 514 struct ctl_scsiio *ctsio); 515 static void ctl_failover_lun(union ctl_io *io); 516 static int ctl_scsiio_precheck(struct ctl_softc *ctl_softc, 517 struct ctl_scsiio *ctsio); 518 static int ctl_scsiio(struct ctl_scsiio *ctsio); 519 520 static int ctl_target_reset(union ctl_io *io); 521 static void ctl_do_lun_reset(struct ctl_lun *lun, uint32_t initidx, 522 ctl_ua_type ua_type); 523 static int ctl_lun_reset(union ctl_io *io); 524 static int ctl_abort_task(union ctl_io *io); 525 static int ctl_abort_task_set(union ctl_io *io); 526 static int ctl_query_task(union ctl_io *io, int task_set); 527 static void ctl_i_t_nexus_loss(struct ctl_softc *softc, uint32_t initidx, 528 ctl_ua_type ua_type); 529 static int ctl_i_t_nexus_reset(union ctl_io *io); 530 static int ctl_query_async_event(union ctl_io *io); 531 static void ctl_run_task(union ctl_io *io); 532 #ifdef CTL_IO_DELAY 533 static void ctl_datamove_timer_wakeup(void *arg); 534 static void ctl_done_timer_wakeup(void *arg); 535 #endif /* CTL_IO_DELAY */ 536 537 static void ctl_send_datamove_done(union ctl_io *io, int have_lock); 538 static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq); 539 static int ctl_datamove_remote_dm_write_cb(union ctl_io *io); 540 static void ctl_datamove_remote_write(union ctl_io *io); 541 static int ctl_datamove_remote_dm_read_cb(union ctl_io *io); 542 static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq); 543 static int ctl_datamove_remote_sgl_setup(union ctl_io *io); 544 static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 545 ctl_ha_dt_cb callback); 546 static void ctl_datamove_remote_read(union ctl_io *io); 547 static void ctl_datamove_remote(union ctl_io *io); 548 static void ctl_process_done(union ctl_io *io); 549 static void ctl_thresh_thread(void *arg); 550 static void ctl_work_thread(void *arg); 551 static void ctl_enqueue_incoming(union ctl_io *io); 552 static void ctl_enqueue_rtr(union ctl_io *io); 553 static void ctl_enqueue_done(union ctl_io *io); 554 static void ctl_enqueue_isc(union ctl_io *io); 555 static const struct ctl_cmd_entry * 556 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa); 557 static const struct ctl_cmd_entry * 558 ctl_validate_command(struct ctl_scsiio *ctsio); 559 static int ctl_cmd_applicable(uint8_t lun_type, 560 const struct ctl_cmd_entry *entry); 561 static int ctl_ha_init(void); 562 static int ctl_ha_shutdown(void); 563 564 static uint64_t ctl_get_prkey(struct ctl_lun *lun, uint32_t residx); 565 static void ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx); 566 static void ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx); 567 static void ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key); 568 569 /* 570 * Load the serialization table. This isn't very pretty, but is probably 571 * the easiest way to do it. 572 */ 573 #include "ctl_ser_table.c" 574 575 /* 576 * We only need to define open, close and ioctl routines for this driver. 577 */ 578 static struct cdevsw ctl_cdevsw = { 579 .d_version = D_VERSION, 580 .d_flags = 0, 581 .d_open = ctl_open, 582 .d_close = ctl_close, 583 .d_ioctl = ctl_ioctl, 584 .d_name = "ctl", 585 }; 586 587 MALLOC_DEFINE(M_CTL, "ctlmem", "Memory used for CTL"); 588 589 static int ctl_module_event_handler(module_t, int /*modeventtype_t*/, void *); 590 591 static moduledata_t ctl_moduledata = { 592 "ctl", 593 ctl_module_event_handler, 594 NULL 595 }; 596 597 DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD); 598 MODULE_VERSION(ctl, 1); 599 600 static struct ctl_frontend ha_frontend = 601 { 602 .name = "ha", 603 .init = ctl_ha_init, 604 .shutdown = ctl_ha_shutdown, 605 }; 606 607 static int 608 ctl_ha_init(void) 609 { 610 struct ctl_softc *softc = control_softc; 611 612 if (ctl_pool_create(softc, "othersc", CTL_POOL_ENTRIES_OTHER_SC, 613 &softc->othersc_pool) != 0) 614 return (ENOMEM); 615 if (ctl_ha_msg_init(softc) != CTL_HA_STATUS_SUCCESS) { 616 ctl_pool_free(softc->othersc_pool); 617 return (EIO); 618 } 619 if (ctl_ha_msg_register(CTL_HA_CHAN_CTL, ctl_isc_event_handler) 620 != CTL_HA_STATUS_SUCCESS) { 621 ctl_ha_msg_destroy(softc); 622 ctl_pool_free(softc->othersc_pool); 623 return (EIO); 624 } 625 return (0); 626 }; 627 628 static int 629 ctl_ha_shutdown(void) 630 { 631 struct ctl_softc *softc = control_softc; 632 struct ctl_port *port; 633 634 ctl_ha_msg_shutdown(softc); 635 if (ctl_ha_msg_deregister(CTL_HA_CHAN_CTL) != CTL_HA_STATUS_SUCCESS) 636 return (EIO); 637 if (ctl_ha_msg_destroy(softc) != CTL_HA_STATUS_SUCCESS) 638 return (EIO); 639 ctl_pool_free(softc->othersc_pool); 640 while ((port = STAILQ_FIRST(&ha_frontend.port_list)) != NULL) { 641 ctl_port_deregister(port); 642 free(port->port_name, M_CTL); 643 free(port, M_CTL); 644 } 645 return (0); 646 }; 647 648 static void 649 ctl_ha_datamove(union ctl_io *io) 650 { 651 struct ctl_lun *lun = CTL_LUN(io); 652 struct ctl_sg_entry *sgl; 653 union ctl_ha_msg msg; 654 uint32_t sg_entries_sent; 655 int do_sg_copy, i, j; 656 657 memset(&msg.dt, 0, sizeof(msg.dt)); 658 msg.hdr.msg_type = CTL_MSG_DATAMOVE; 659 msg.hdr.original_sc = io->io_hdr.remote_io; 660 msg.hdr.serializing_sc = io; 661 msg.hdr.nexus = io->io_hdr.nexus; 662 msg.hdr.status = io->io_hdr.status; 663 msg.dt.flags = io->io_hdr.flags; 664 665 /* 666 * We convert everything into a S/G list here. We can't 667 * pass by reference, only by value between controllers. 668 * So we can't pass a pointer to the S/G list, only as many 669 * S/G entries as we can fit in here. If it's possible for 670 * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries, 671 * then we need to break this up into multiple transfers. 672 */ 673 if (io->scsiio.kern_sg_entries == 0) { 674 msg.dt.kern_sg_entries = 1; 675 #if 0 676 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 677 msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr; 678 } else { 679 /* XXX KDM use busdma here! */ 680 msg.dt.sg_list[0].addr = 681 (void *)vtophys(io->scsiio.kern_data_ptr); 682 } 683 #else 684 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, 685 ("HA does not support BUS_ADDR")); 686 msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr; 687 #endif 688 msg.dt.sg_list[0].len = io->scsiio.kern_data_len; 689 do_sg_copy = 0; 690 } else { 691 msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries; 692 do_sg_copy = 1; 693 } 694 695 msg.dt.kern_data_len = io->scsiio.kern_data_len; 696 msg.dt.kern_total_len = io->scsiio.kern_total_len; 697 msg.dt.kern_data_resid = io->scsiio.kern_data_resid; 698 msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset; 699 msg.dt.sg_sequence = 0; 700 701 /* 702 * Loop until we've sent all of the S/G entries. On the 703 * other end, we'll recompose these S/G entries into one 704 * contiguous list before processing. 705 */ 706 for (sg_entries_sent = 0; sg_entries_sent < msg.dt.kern_sg_entries; 707 msg.dt.sg_sequence++) { 708 msg.dt.cur_sg_entries = MIN((sizeof(msg.dt.sg_list) / 709 sizeof(msg.dt.sg_list[0])), 710 msg.dt.kern_sg_entries - sg_entries_sent); 711 if (do_sg_copy != 0) { 712 sgl = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; 713 for (i = sg_entries_sent, j = 0; 714 i < msg.dt.cur_sg_entries; i++, j++) { 715 #if 0 716 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 717 msg.dt.sg_list[j].addr = sgl[i].addr; 718 } else { 719 /* XXX KDM use busdma here! */ 720 msg.dt.sg_list[j].addr = 721 (void *)vtophys(sgl[i].addr); 722 } 723 #else 724 KASSERT((io->io_hdr.flags & 725 CTL_FLAG_BUS_ADDR) == 0, 726 ("HA does not support BUS_ADDR")); 727 msg.dt.sg_list[j].addr = sgl[i].addr; 728 #endif 729 msg.dt.sg_list[j].len = sgl[i].len; 730 } 731 } 732 733 sg_entries_sent += msg.dt.cur_sg_entries; 734 msg.dt.sg_last = (sg_entries_sent >= msg.dt.kern_sg_entries); 735 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 736 sizeof(msg.dt) - sizeof(msg.dt.sg_list) + 737 sizeof(struct ctl_sg_entry) * msg.dt.cur_sg_entries, 738 M_WAITOK) > CTL_HA_STATUS_SUCCESS) { 739 io->io_hdr.port_status = 31341; 740 io->scsiio.be_move_done(io); 741 return; 742 } 743 msg.dt.sent_sg_entries = sg_entries_sent; 744 } 745 746 /* 747 * Officially handover the request from us to peer. 748 * If failover has just happened, then we must return error. 749 * If failover happen just after, then it is not our problem. 750 */ 751 if (lun) 752 mtx_lock(&lun->lun_lock); 753 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 754 if (lun) 755 mtx_unlock(&lun->lun_lock); 756 io->io_hdr.port_status = 31342; 757 io->scsiio.be_move_done(io); 758 return; 759 } 760 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 761 io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; 762 if (lun) 763 mtx_unlock(&lun->lun_lock); 764 } 765 766 static void 767 ctl_ha_done(union ctl_io *io) 768 { 769 union ctl_ha_msg msg; 770 771 if (io->io_hdr.io_type == CTL_IO_SCSI) { 772 memset(&msg, 0, sizeof(msg)); 773 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 774 msg.hdr.original_sc = io->io_hdr.remote_io; 775 msg.hdr.nexus = io->io_hdr.nexus; 776 msg.hdr.status = io->io_hdr.status; 777 msg.scsi.scsi_status = io->scsiio.scsi_status; 778 msg.scsi.tag_num = io->scsiio.tag_num; 779 msg.scsi.tag_type = io->scsiio.tag_type; 780 msg.scsi.sense_len = io->scsiio.sense_len; 781 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 782 io->scsiio.sense_len); 783 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 784 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + 785 msg.scsi.sense_len, M_WAITOK); 786 } 787 ctl_free_io(io); 788 } 789 790 static void 791 ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc, 792 union ctl_ha_msg *msg_info) 793 { 794 struct ctl_scsiio *ctsio; 795 796 if (msg_info->hdr.original_sc == NULL) { 797 printf("%s: original_sc == NULL!\n", __func__); 798 /* XXX KDM now what? */ 799 return; 800 } 801 802 ctsio = &msg_info->hdr.original_sc->scsiio; 803 ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 804 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 805 ctsio->io_hdr.status = msg_info->hdr.status; 806 ctsio->scsi_status = msg_info->scsi.scsi_status; 807 ctsio->sense_len = msg_info->scsi.sense_len; 808 memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data, 809 msg_info->scsi.sense_len); 810 ctl_enqueue_isc((union ctl_io *)ctsio); 811 } 812 813 static void 814 ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc, 815 union ctl_ha_msg *msg_info) 816 { 817 struct ctl_scsiio *ctsio; 818 819 if (msg_info->hdr.serializing_sc == NULL) { 820 printf("%s: serializing_sc == NULL!\n", __func__); 821 /* XXX KDM now what? */ 822 return; 823 } 824 825 ctsio = &msg_info->hdr.serializing_sc->scsiio; 826 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 827 ctl_enqueue_isc((union ctl_io *)ctsio); 828 } 829 830 void 831 ctl_isc_announce_lun(struct ctl_lun *lun) 832 { 833 struct ctl_softc *softc = lun->ctl_softc; 834 union ctl_ha_msg *msg; 835 struct ctl_ha_msg_lun_pr_key pr_key; 836 int i, k; 837 838 if (softc->ha_link != CTL_HA_LINK_ONLINE) 839 return; 840 mtx_lock(&lun->lun_lock); 841 i = sizeof(msg->lun); 842 if (lun->lun_devid) 843 i += lun->lun_devid->len; 844 i += sizeof(pr_key) * lun->pr_key_count; 845 alloc: 846 mtx_unlock(&lun->lun_lock); 847 msg = malloc(i, M_CTL, M_WAITOK); 848 mtx_lock(&lun->lun_lock); 849 k = sizeof(msg->lun); 850 if (lun->lun_devid) 851 k += lun->lun_devid->len; 852 k += sizeof(pr_key) * lun->pr_key_count; 853 if (i < k) { 854 free(msg, M_CTL); 855 i = k; 856 goto alloc; 857 } 858 bzero(&msg->lun, sizeof(msg->lun)); 859 msg->hdr.msg_type = CTL_MSG_LUN_SYNC; 860 msg->hdr.nexus.targ_lun = lun->lun; 861 msg->hdr.nexus.targ_mapped_lun = lun->lun; 862 msg->lun.flags = lun->flags; 863 msg->lun.pr_generation = lun->pr_generation; 864 msg->lun.pr_res_idx = lun->pr_res_idx; 865 msg->lun.pr_res_type = lun->pr_res_type; 866 msg->lun.pr_key_count = lun->pr_key_count; 867 i = 0; 868 if (lun->lun_devid) { 869 msg->lun.lun_devid_len = lun->lun_devid->len; 870 memcpy(&msg->lun.data[i], lun->lun_devid->data, 871 msg->lun.lun_devid_len); 872 i += msg->lun.lun_devid_len; 873 } 874 for (k = 0; k < CTL_MAX_INITIATORS; k++) { 875 if ((pr_key.pr_key = ctl_get_prkey(lun, k)) == 0) 876 continue; 877 pr_key.pr_iid = k; 878 memcpy(&msg->lun.data[i], &pr_key, sizeof(pr_key)); 879 i += sizeof(pr_key); 880 } 881 mtx_unlock(&lun->lun_lock); 882 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, 883 M_WAITOK); 884 free(msg, M_CTL); 885 886 if (lun->flags & CTL_LUN_PRIMARY_SC) { 887 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 888 ctl_isc_announce_mode(lun, -1, 889 lun->mode_pages.index[i].page_code & SMPH_PC_MASK, 890 lun->mode_pages.index[i].subpage); 891 } 892 } 893 } 894 895 void 896 ctl_isc_announce_port(struct ctl_port *port) 897 { 898 struct ctl_softc *softc = port->ctl_softc; 899 union ctl_ha_msg *msg; 900 int i; 901 902 if (port->targ_port < softc->port_min || 903 port->targ_port >= softc->port_max || 904 softc->ha_link != CTL_HA_LINK_ONLINE) 905 return; 906 i = sizeof(msg->port) + strlen(port->port_name) + 1; 907 if (port->lun_map) 908 i += port->lun_map_size * sizeof(uint32_t); 909 if (port->port_devid) 910 i += port->port_devid->len; 911 if (port->target_devid) 912 i += port->target_devid->len; 913 if (port->init_devid) 914 i += port->init_devid->len; 915 msg = malloc(i, M_CTL, M_WAITOK); 916 bzero(&msg->port, sizeof(msg->port)); 917 msg->hdr.msg_type = CTL_MSG_PORT_SYNC; 918 msg->hdr.nexus.targ_port = port->targ_port; 919 msg->port.port_type = port->port_type; 920 msg->port.physical_port = port->physical_port; 921 msg->port.virtual_port = port->virtual_port; 922 msg->port.status = port->status; 923 i = 0; 924 msg->port.name_len = sprintf(&msg->port.data[i], 925 "%d:%s", softc->ha_id, port->port_name) + 1; 926 i += msg->port.name_len; 927 if (port->lun_map) { 928 msg->port.lun_map_len = port->lun_map_size * sizeof(uint32_t); 929 memcpy(&msg->port.data[i], port->lun_map, 930 msg->port.lun_map_len); 931 i += msg->port.lun_map_len; 932 } 933 if (port->port_devid) { 934 msg->port.port_devid_len = port->port_devid->len; 935 memcpy(&msg->port.data[i], port->port_devid->data, 936 msg->port.port_devid_len); 937 i += msg->port.port_devid_len; 938 } 939 if (port->target_devid) { 940 msg->port.target_devid_len = port->target_devid->len; 941 memcpy(&msg->port.data[i], port->target_devid->data, 942 msg->port.target_devid_len); 943 i += msg->port.target_devid_len; 944 } 945 if (port->init_devid) { 946 msg->port.init_devid_len = port->init_devid->len; 947 memcpy(&msg->port.data[i], port->init_devid->data, 948 msg->port.init_devid_len); 949 i += msg->port.init_devid_len; 950 } 951 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, 952 M_WAITOK); 953 free(msg, M_CTL); 954 } 955 956 void 957 ctl_isc_announce_iid(struct ctl_port *port, int iid) 958 { 959 struct ctl_softc *softc = port->ctl_softc; 960 union ctl_ha_msg *msg; 961 int i, l; 962 963 if (port->targ_port < softc->port_min || 964 port->targ_port >= softc->port_max || 965 softc->ha_link != CTL_HA_LINK_ONLINE) 966 return; 967 mtx_lock(&softc->ctl_lock); 968 i = sizeof(msg->iid); 969 l = 0; 970 if (port->wwpn_iid[iid].name) 971 l = strlen(port->wwpn_iid[iid].name) + 1; 972 i += l; 973 msg = malloc(i, M_CTL, M_NOWAIT); 974 if (msg == NULL) { 975 mtx_unlock(&softc->ctl_lock); 976 return; 977 } 978 bzero(&msg->iid, sizeof(msg->iid)); 979 msg->hdr.msg_type = CTL_MSG_IID_SYNC; 980 msg->hdr.nexus.targ_port = port->targ_port; 981 msg->hdr.nexus.initid = iid; 982 msg->iid.in_use = port->wwpn_iid[iid].in_use; 983 msg->iid.name_len = l; 984 msg->iid.wwpn = port->wwpn_iid[iid].wwpn; 985 if (port->wwpn_iid[iid].name) 986 strlcpy(msg->iid.data, port->wwpn_iid[iid].name, l); 987 mtx_unlock(&softc->ctl_lock); 988 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->iid, i, M_NOWAIT); 989 free(msg, M_CTL); 990 } 991 992 void 993 ctl_isc_announce_mode(struct ctl_lun *lun, uint32_t initidx, 994 uint8_t page, uint8_t subpage) 995 { 996 struct ctl_softc *softc = lun->ctl_softc; 997 union ctl_ha_msg msg; 998 u_int i; 999 1000 if (softc->ha_link != CTL_HA_LINK_ONLINE) 1001 return; 1002 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 1003 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) == 1004 page && lun->mode_pages.index[i].subpage == subpage) 1005 break; 1006 } 1007 if (i == CTL_NUM_MODE_PAGES) 1008 return; 1009 1010 /* Don't try to replicate pages not present on this device. */ 1011 if (lun->mode_pages.index[i].page_data == NULL) 1012 return; 1013 1014 bzero(&msg.mode, sizeof(msg.mode)); 1015 msg.hdr.msg_type = CTL_MSG_MODE_SYNC; 1016 msg.hdr.nexus.targ_port = initidx / CTL_MAX_INIT_PER_PORT; 1017 msg.hdr.nexus.initid = initidx % CTL_MAX_INIT_PER_PORT; 1018 msg.hdr.nexus.targ_lun = lun->lun; 1019 msg.hdr.nexus.targ_mapped_lun = lun->lun; 1020 msg.mode.page_code = page; 1021 msg.mode.subpage = subpage; 1022 msg.mode.page_len = lun->mode_pages.index[i].page_len; 1023 memcpy(msg.mode.data, lun->mode_pages.index[i].page_data, 1024 msg.mode.page_len); 1025 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg.mode, sizeof(msg.mode), 1026 M_WAITOK); 1027 } 1028 1029 static void 1030 ctl_isc_ha_link_up(struct ctl_softc *softc) 1031 { 1032 struct ctl_port *port; 1033 struct ctl_lun *lun; 1034 union ctl_ha_msg msg; 1035 int i; 1036 1037 /* Announce this node parameters to peer for validation. */ 1038 msg.login.msg_type = CTL_MSG_LOGIN; 1039 msg.login.version = CTL_HA_VERSION; 1040 msg.login.ha_mode = softc->ha_mode; 1041 msg.login.ha_id = softc->ha_id; 1042 msg.login.max_luns = ctl_max_luns; 1043 msg.login.max_ports = ctl_max_ports; 1044 msg.login.max_init_per_port = CTL_MAX_INIT_PER_PORT; 1045 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg.login, sizeof(msg.login), 1046 M_WAITOK); 1047 1048 STAILQ_FOREACH(port, &softc->port_list, links) { 1049 ctl_isc_announce_port(port); 1050 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1051 if (port->wwpn_iid[i].in_use) 1052 ctl_isc_announce_iid(port, i); 1053 } 1054 } 1055 STAILQ_FOREACH(lun, &softc->lun_list, links) 1056 ctl_isc_announce_lun(lun); 1057 } 1058 1059 static void 1060 ctl_isc_ha_link_down(struct ctl_softc *softc) 1061 { 1062 struct ctl_port *port; 1063 struct ctl_lun *lun; 1064 union ctl_io *io; 1065 int i; 1066 1067 mtx_lock(&softc->ctl_lock); 1068 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1069 mtx_lock(&lun->lun_lock); 1070 if (lun->flags & CTL_LUN_PEER_SC_PRIMARY) { 1071 lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; 1072 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 1073 } 1074 mtx_unlock(&lun->lun_lock); 1075 1076 mtx_unlock(&softc->ctl_lock); 1077 io = ctl_alloc_io(softc->othersc_pool); 1078 mtx_lock(&softc->ctl_lock); 1079 ctl_zero_io(io); 1080 io->io_hdr.msg_type = CTL_MSG_FAILOVER; 1081 io->io_hdr.nexus.targ_mapped_lun = lun->lun; 1082 ctl_enqueue_isc(io); 1083 } 1084 1085 STAILQ_FOREACH(port, &softc->port_list, links) { 1086 if (port->targ_port >= softc->port_min && 1087 port->targ_port < softc->port_max) 1088 continue; 1089 port->status &= ~CTL_PORT_STATUS_ONLINE; 1090 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1091 port->wwpn_iid[i].in_use = 0; 1092 free(port->wwpn_iid[i].name, M_CTL); 1093 port->wwpn_iid[i].name = NULL; 1094 } 1095 } 1096 mtx_unlock(&softc->ctl_lock); 1097 } 1098 1099 static void 1100 ctl_isc_ua(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1101 { 1102 struct ctl_lun *lun; 1103 uint32_t iid = ctl_get_initindex(&msg->hdr.nexus); 1104 1105 mtx_lock(&softc->ctl_lock); 1106 if (msg->hdr.nexus.targ_mapped_lun >= ctl_max_luns || 1107 (lun = softc->ctl_luns[msg->hdr.nexus.targ_mapped_lun]) == NULL) { 1108 mtx_unlock(&softc->ctl_lock); 1109 return; 1110 } 1111 mtx_lock(&lun->lun_lock); 1112 mtx_unlock(&softc->ctl_lock); 1113 if (msg->ua.ua_type == CTL_UA_THIN_PROV_THRES && msg->ua.ua_set) 1114 memcpy(lun->ua_tpt_info, msg->ua.ua_info, 8); 1115 if (msg->ua.ua_all) { 1116 if (msg->ua.ua_set) 1117 ctl_est_ua_all(lun, iid, msg->ua.ua_type); 1118 else 1119 ctl_clr_ua_all(lun, iid, msg->ua.ua_type); 1120 } else { 1121 if (msg->ua.ua_set) 1122 ctl_est_ua(lun, iid, msg->ua.ua_type); 1123 else 1124 ctl_clr_ua(lun, iid, msg->ua.ua_type); 1125 } 1126 mtx_unlock(&lun->lun_lock); 1127 } 1128 1129 static void 1130 ctl_isc_lun_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1131 { 1132 struct ctl_lun *lun; 1133 struct ctl_ha_msg_lun_pr_key pr_key; 1134 int i, k; 1135 ctl_lun_flags oflags; 1136 uint32_t targ_lun; 1137 1138 targ_lun = msg->hdr.nexus.targ_mapped_lun; 1139 mtx_lock(&softc->ctl_lock); 1140 if (targ_lun >= ctl_max_luns || 1141 (lun = softc->ctl_luns[targ_lun]) == NULL) { 1142 mtx_unlock(&softc->ctl_lock); 1143 return; 1144 } 1145 mtx_lock(&lun->lun_lock); 1146 mtx_unlock(&softc->ctl_lock); 1147 if (lun->flags & CTL_LUN_DISABLED) { 1148 mtx_unlock(&lun->lun_lock); 1149 return; 1150 } 1151 i = (lun->lun_devid != NULL) ? lun->lun_devid->len : 0; 1152 if (msg->lun.lun_devid_len != i || (i > 0 && 1153 memcmp(&msg->lun.data[0], lun->lun_devid->data, i) != 0)) { 1154 mtx_unlock(&lun->lun_lock); 1155 printf("%s: Received conflicting HA LUN %d\n", 1156 __func__, targ_lun); 1157 return; 1158 } else { 1159 /* Record whether peer is primary. */ 1160 oflags = lun->flags; 1161 if ((msg->lun.flags & CTL_LUN_PRIMARY_SC) && 1162 (msg->lun.flags & CTL_LUN_DISABLED) == 0) 1163 lun->flags |= CTL_LUN_PEER_SC_PRIMARY; 1164 else 1165 lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; 1166 if (oflags != lun->flags) 1167 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 1168 1169 /* If peer is primary and we are not -- use data */ 1170 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 1171 (lun->flags & CTL_LUN_PEER_SC_PRIMARY)) { 1172 lun->pr_generation = msg->lun.pr_generation; 1173 lun->pr_res_idx = msg->lun.pr_res_idx; 1174 lun->pr_res_type = msg->lun.pr_res_type; 1175 lun->pr_key_count = msg->lun.pr_key_count; 1176 for (k = 0; k < CTL_MAX_INITIATORS; k++) 1177 ctl_clr_prkey(lun, k); 1178 for (k = 0; k < msg->lun.pr_key_count; k++) { 1179 memcpy(&pr_key, &msg->lun.data[i], 1180 sizeof(pr_key)); 1181 ctl_alloc_prkey(lun, pr_key.pr_iid); 1182 ctl_set_prkey(lun, pr_key.pr_iid, 1183 pr_key.pr_key); 1184 i += sizeof(pr_key); 1185 } 1186 } 1187 1188 mtx_unlock(&lun->lun_lock); 1189 CTL_DEBUG_PRINT(("%s: Known LUN %d, peer is %s\n", 1190 __func__, targ_lun, 1191 (msg->lun.flags & CTL_LUN_PRIMARY_SC) ? 1192 "primary" : "secondary")); 1193 1194 /* If we are primary but peer doesn't know -- notify */ 1195 if ((lun->flags & CTL_LUN_PRIMARY_SC) && 1196 (msg->lun.flags & CTL_LUN_PEER_SC_PRIMARY) == 0) 1197 ctl_isc_announce_lun(lun); 1198 } 1199 } 1200 1201 static void 1202 ctl_isc_port_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1203 { 1204 struct ctl_port *port; 1205 struct ctl_lun *lun; 1206 int i, new; 1207 1208 port = softc->ctl_ports[msg->hdr.nexus.targ_port]; 1209 if (port == NULL) { 1210 CTL_DEBUG_PRINT(("%s: New port %d\n", __func__, 1211 msg->hdr.nexus.targ_port)); 1212 new = 1; 1213 port = malloc(sizeof(*port), M_CTL, M_WAITOK | M_ZERO); 1214 port->frontend = &ha_frontend; 1215 port->targ_port = msg->hdr.nexus.targ_port; 1216 port->fe_datamove = ctl_ha_datamove; 1217 port->fe_done = ctl_ha_done; 1218 } else if (port->frontend == &ha_frontend) { 1219 CTL_DEBUG_PRINT(("%s: Updated port %d\n", __func__, 1220 msg->hdr.nexus.targ_port)); 1221 new = 0; 1222 } else { 1223 printf("%s: Received conflicting HA port %d\n", 1224 __func__, msg->hdr.nexus.targ_port); 1225 return; 1226 } 1227 port->port_type = msg->port.port_type; 1228 port->physical_port = msg->port.physical_port; 1229 port->virtual_port = msg->port.virtual_port; 1230 port->status = msg->port.status; 1231 i = 0; 1232 free(port->port_name, M_CTL); 1233 port->port_name = strndup(&msg->port.data[i], msg->port.name_len, 1234 M_CTL); 1235 i += msg->port.name_len; 1236 if (msg->port.lun_map_len != 0) { 1237 if (port->lun_map == NULL || 1238 port->lun_map_size * sizeof(uint32_t) < 1239 msg->port.lun_map_len) { 1240 port->lun_map_size = 0; 1241 free(port->lun_map, M_CTL); 1242 port->lun_map = malloc(msg->port.lun_map_len, 1243 M_CTL, M_WAITOK); 1244 } 1245 memcpy(port->lun_map, &msg->port.data[i], msg->port.lun_map_len); 1246 port->lun_map_size = msg->port.lun_map_len / sizeof(uint32_t); 1247 i += msg->port.lun_map_len; 1248 } else { 1249 port->lun_map_size = 0; 1250 free(port->lun_map, M_CTL); 1251 port->lun_map = NULL; 1252 } 1253 if (msg->port.port_devid_len != 0) { 1254 if (port->port_devid == NULL || 1255 port->port_devid->len < msg->port.port_devid_len) { 1256 free(port->port_devid, M_CTL); 1257 port->port_devid = malloc(sizeof(struct ctl_devid) + 1258 msg->port.port_devid_len, M_CTL, M_WAITOK); 1259 } 1260 memcpy(port->port_devid->data, &msg->port.data[i], 1261 msg->port.port_devid_len); 1262 port->port_devid->len = msg->port.port_devid_len; 1263 i += msg->port.port_devid_len; 1264 } else { 1265 free(port->port_devid, M_CTL); 1266 port->port_devid = NULL; 1267 } 1268 if (msg->port.target_devid_len != 0) { 1269 if (port->target_devid == NULL || 1270 port->target_devid->len < msg->port.target_devid_len) { 1271 free(port->target_devid, M_CTL); 1272 port->target_devid = malloc(sizeof(struct ctl_devid) + 1273 msg->port.target_devid_len, M_CTL, M_WAITOK); 1274 } 1275 memcpy(port->target_devid->data, &msg->port.data[i], 1276 msg->port.target_devid_len); 1277 port->target_devid->len = msg->port.target_devid_len; 1278 i += msg->port.target_devid_len; 1279 } else { 1280 free(port->target_devid, M_CTL); 1281 port->target_devid = NULL; 1282 } 1283 if (msg->port.init_devid_len != 0) { 1284 if (port->init_devid == NULL || 1285 port->init_devid->len < msg->port.init_devid_len) { 1286 free(port->init_devid, M_CTL); 1287 port->init_devid = malloc(sizeof(struct ctl_devid) + 1288 msg->port.init_devid_len, M_CTL, M_WAITOK); 1289 } 1290 memcpy(port->init_devid->data, &msg->port.data[i], 1291 msg->port.init_devid_len); 1292 port->init_devid->len = msg->port.init_devid_len; 1293 i += msg->port.init_devid_len; 1294 } else { 1295 free(port->init_devid, M_CTL); 1296 port->init_devid = NULL; 1297 } 1298 if (new) { 1299 if (ctl_port_register(port) != 0) { 1300 printf("%s: ctl_port_register() failed with error\n", 1301 __func__); 1302 } 1303 } 1304 mtx_lock(&softc->ctl_lock); 1305 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1306 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 1307 continue; 1308 mtx_lock(&lun->lun_lock); 1309 ctl_est_ua_all(lun, -1, CTL_UA_INQ_CHANGE); 1310 mtx_unlock(&lun->lun_lock); 1311 } 1312 mtx_unlock(&softc->ctl_lock); 1313 } 1314 1315 static void 1316 ctl_isc_iid_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1317 { 1318 struct ctl_port *port; 1319 int iid; 1320 1321 port = softc->ctl_ports[msg->hdr.nexus.targ_port]; 1322 if (port == NULL) { 1323 printf("%s: Received IID for unknown port %d\n", 1324 __func__, msg->hdr.nexus.targ_port); 1325 return; 1326 } 1327 iid = msg->hdr.nexus.initid; 1328 if (port->wwpn_iid[iid].in_use != 0 && 1329 msg->iid.in_use == 0) 1330 ctl_i_t_nexus_loss(softc, iid, CTL_UA_POWERON); 1331 port->wwpn_iid[iid].in_use = msg->iid.in_use; 1332 port->wwpn_iid[iid].wwpn = msg->iid.wwpn; 1333 free(port->wwpn_iid[iid].name, M_CTL); 1334 if (msg->iid.name_len) { 1335 port->wwpn_iid[iid].name = strndup(&msg->iid.data[0], 1336 msg->iid.name_len, M_CTL); 1337 } else 1338 port->wwpn_iid[iid].name = NULL; 1339 } 1340 1341 static void 1342 ctl_isc_login(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1343 { 1344 1345 if (msg->login.version != CTL_HA_VERSION) { 1346 printf("CTL HA peers have different versions %d != %d\n", 1347 msg->login.version, CTL_HA_VERSION); 1348 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1349 return; 1350 } 1351 if (msg->login.ha_mode != softc->ha_mode) { 1352 printf("CTL HA peers have different ha_mode %d != %d\n", 1353 msg->login.ha_mode, softc->ha_mode); 1354 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1355 return; 1356 } 1357 if (msg->login.ha_id == softc->ha_id) { 1358 printf("CTL HA peers have same ha_id %d\n", msg->login.ha_id); 1359 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1360 return; 1361 } 1362 if (msg->login.max_luns != ctl_max_luns || 1363 msg->login.max_ports != ctl_max_ports || 1364 msg->login.max_init_per_port != CTL_MAX_INIT_PER_PORT) { 1365 printf("CTL HA peers have different limits\n"); 1366 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1367 return; 1368 } 1369 } 1370 1371 static void 1372 ctl_isc_mode_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1373 { 1374 struct ctl_lun *lun; 1375 u_int i; 1376 uint32_t initidx, targ_lun; 1377 1378 targ_lun = msg->hdr.nexus.targ_mapped_lun; 1379 mtx_lock(&softc->ctl_lock); 1380 if (targ_lun >= ctl_max_luns || 1381 (lun = softc->ctl_luns[targ_lun]) == NULL) { 1382 mtx_unlock(&softc->ctl_lock); 1383 return; 1384 } 1385 mtx_lock(&lun->lun_lock); 1386 mtx_unlock(&softc->ctl_lock); 1387 if (lun->flags & CTL_LUN_DISABLED) { 1388 mtx_unlock(&lun->lun_lock); 1389 return; 1390 } 1391 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 1392 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) == 1393 msg->mode.page_code && 1394 lun->mode_pages.index[i].subpage == msg->mode.subpage) 1395 break; 1396 } 1397 if (i == CTL_NUM_MODE_PAGES) { 1398 mtx_unlock(&lun->lun_lock); 1399 return; 1400 } 1401 memcpy(lun->mode_pages.index[i].page_data, msg->mode.data, 1402 lun->mode_pages.index[i].page_len); 1403 initidx = ctl_get_initindex(&msg->hdr.nexus); 1404 if (initidx != -1) 1405 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 1406 mtx_unlock(&lun->lun_lock); 1407 } 1408 1409 /* 1410 * ISC (Inter Shelf Communication) event handler. Events from the HA 1411 * subsystem come in here. 1412 */ 1413 static void 1414 ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param) 1415 { 1416 struct ctl_softc *softc = control_softc; 1417 union ctl_io *io; 1418 struct ctl_prio *presio; 1419 ctl_ha_status isc_status; 1420 1421 CTL_DEBUG_PRINT(("CTL: Isc Msg event %d\n", event)); 1422 if (event == CTL_HA_EVT_MSG_RECV) { 1423 union ctl_ha_msg *msg, msgbuf; 1424 1425 if (param > sizeof(msgbuf)) 1426 msg = malloc(param, M_CTL, M_WAITOK); 1427 else 1428 msg = &msgbuf; 1429 isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, msg, param, 1430 M_WAITOK); 1431 if (isc_status != CTL_HA_STATUS_SUCCESS) { 1432 printf("%s: Error receiving message: %d\n", 1433 __func__, isc_status); 1434 if (msg != &msgbuf) 1435 free(msg, M_CTL); 1436 return; 1437 } 1438 1439 CTL_DEBUG_PRINT(("CTL: msg_type %d\n", msg->hdr.msg_type)); 1440 switch (msg->hdr.msg_type) { 1441 case CTL_MSG_SERIALIZE: 1442 io = ctl_alloc_io(softc->othersc_pool); 1443 ctl_zero_io(io); 1444 // populate ctsio from msg 1445 io->io_hdr.io_type = CTL_IO_SCSI; 1446 io->io_hdr.msg_type = CTL_MSG_SERIALIZE; 1447 io->io_hdr.remote_io = msg->hdr.original_sc; 1448 io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC | 1449 CTL_FLAG_IO_ACTIVE; 1450 /* 1451 * If we're in serialization-only mode, we don't 1452 * want to go through full done processing. Thus 1453 * the COPY flag. 1454 * 1455 * XXX KDM add another flag that is more specific. 1456 */ 1457 if (softc->ha_mode != CTL_HA_MODE_XFER) 1458 io->io_hdr.flags |= CTL_FLAG_INT_COPY; 1459 io->io_hdr.nexus = msg->hdr.nexus; 1460 io->scsiio.priority = msg->scsi.priority; 1461 io->scsiio.tag_num = msg->scsi.tag_num; 1462 io->scsiio.tag_type = msg->scsi.tag_type; 1463 #ifdef CTL_TIME_IO 1464 io->io_hdr.start_time = time_uptime; 1465 getbinuptime(&io->io_hdr.start_bt); 1466 #endif /* CTL_TIME_IO */ 1467 io->scsiio.cdb_len = msg->scsi.cdb_len; 1468 memcpy(io->scsiio.cdb, msg->scsi.cdb, 1469 CTL_MAX_CDBLEN); 1470 if (softc->ha_mode == CTL_HA_MODE_XFER) { 1471 const struct ctl_cmd_entry *entry; 1472 1473 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 1474 io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 1475 io->io_hdr.flags |= 1476 entry->flags & CTL_FLAG_DATA_MASK; 1477 } 1478 ctl_enqueue_isc(io); 1479 break; 1480 1481 /* Performed on the Originating SC, XFER mode only */ 1482 case CTL_MSG_DATAMOVE: { 1483 struct ctl_sg_entry *sgl; 1484 int i, j; 1485 1486 io = msg->hdr.original_sc; 1487 if (io == NULL) { 1488 printf("%s: original_sc == NULL!\n", __func__); 1489 /* XXX KDM do something here */ 1490 break; 1491 } 1492 io->io_hdr.msg_type = CTL_MSG_DATAMOVE; 1493 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1494 /* 1495 * Keep track of this, we need to send it back over 1496 * when the datamove is complete. 1497 */ 1498 io->io_hdr.remote_io = msg->hdr.serializing_sc; 1499 if (msg->hdr.status == CTL_SUCCESS) 1500 io->io_hdr.status = msg->hdr.status; 1501 1502 if (msg->dt.sg_sequence == 0) { 1503 #ifdef CTL_TIME_IO 1504 getbinuptime(&io->io_hdr.dma_start_bt); 1505 #endif 1506 i = msg->dt.kern_sg_entries + 1507 msg->dt.kern_data_len / 1508 CTL_HA_DATAMOVE_SEGMENT + 1; 1509 sgl = malloc(sizeof(*sgl) * i, M_CTL, 1510 M_WAITOK | M_ZERO); 1511 CTL_RSGL(io) = sgl; 1512 CTL_LSGL(io) = &sgl[msg->dt.kern_sg_entries]; 1513 1514 io->scsiio.kern_data_ptr = (uint8_t *)sgl; 1515 1516 io->scsiio.kern_sg_entries = 1517 msg->dt.kern_sg_entries; 1518 io->scsiio.rem_sg_entries = 1519 msg->dt.kern_sg_entries; 1520 io->scsiio.kern_data_len = 1521 msg->dt.kern_data_len; 1522 io->scsiio.kern_total_len = 1523 msg->dt.kern_total_len; 1524 io->scsiio.kern_data_resid = 1525 msg->dt.kern_data_resid; 1526 io->scsiio.kern_rel_offset = 1527 msg->dt.kern_rel_offset; 1528 io->io_hdr.flags &= ~CTL_FLAG_BUS_ADDR; 1529 io->io_hdr.flags |= msg->dt.flags & 1530 CTL_FLAG_BUS_ADDR; 1531 } else 1532 sgl = (struct ctl_sg_entry *) 1533 io->scsiio.kern_data_ptr; 1534 1535 for (i = msg->dt.sent_sg_entries, j = 0; 1536 i < (msg->dt.sent_sg_entries + 1537 msg->dt.cur_sg_entries); i++, j++) { 1538 sgl[i].addr = msg->dt.sg_list[j].addr; 1539 sgl[i].len = msg->dt.sg_list[j].len; 1540 } 1541 1542 /* 1543 * If this is the last piece of the I/O, we've got 1544 * the full S/G list. Queue processing in the thread. 1545 * Otherwise wait for the next piece. 1546 */ 1547 if (msg->dt.sg_last != 0) 1548 ctl_enqueue_isc(io); 1549 break; 1550 } 1551 /* Performed on the Serializing (primary) SC, XFER mode only */ 1552 case CTL_MSG_DATAMOVE_DONE: { 1553 if (msg->hdr.serializing_sc == NULL) { 1554 printf("%s: serializing_sc == NULL!\n", 1555 __func__); 1556 /* XXX KDM now what? */ 1557 break; 1558 } 1559 /* 1560 * We grab the sense information here in case 1561 * there was a failure, so we can return status 1562 * back to the initiator. 1563 */ 1564 io = msg->hdr.serializing_sc; 1565 io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 1566 io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; 1567 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1568 io->io_hdr.port_status = msg->scsi.port_status; 1569 io->scsiio.kern_data_resid = msg->scsi.kern_data_resid; 1570 if (msg->hdr.status != CTL_STATUS_NONE) { 1571 io->io_hdr.status = msg->hdr.status; 1572 io->scsiio.scsi_status = msg->scsi.scsi_status; 1573 io->scsiio.sense_len = msg->scsi.sense_len; 1574 memcpy(&io->scsiio.sense_data, 1575 &msg->scsi.sense_data, 1576 msg->scsi.sense_len); 1577 if (msg->hdr.status == CTL_SUCCESS) 1578 io->io_hdr.flags |= CTL_FLAG_STATUS_SENT; 1579 } 1580 ctl_enqueue_isc(io); 1581 break; 1582 } 1583 1584 /* Preformed on Originating SC, SER_ONLY mode */ 1585 case CTL_MSG_R2R: 1586 io = msg->hdr.original_sc; 1587 if (io == NULL) { 1588 printf("%s: original_sc == NULL!\n", 1589 __func__); 1590 break; 1591 } 1592 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1593 io->io_hdr.msg_type = CTL_MSG_R2R; 1594 io->io_hdr.remote_io = msg->hdr.serializing_sc; 1595 ctl_enqueue_isc(io); 1596 break; 1597 1598 /* 1599 * Performed on Serializing(i.e. primary SC) SC in SER_ONLY 1600 * mode. 1601 * Performed on the Originating (i.e. secondary) SC in XFER 1602 * mode 1603 */ 1604 case CTL_MSG_FINISH_IO: 1605 if (softc->ha_mode == CTL_HA_MODE_XFER) 1606 ctl_isc_handler_finish_xfer(softc, msg); 1607 else 1608 ctl_isc_handler_finish_ser_only(softc, msg); 1609 break; 1610 1611 /* Preformed on Originating SC */ 1612 case CTL_MSG_BAD_JUJU: 1613 io = msg->hdr.original_sc; 1614 if (io == NULL) { 1615 printf("%s: Bad JUJU!, original_sc is NULL!\n", 1616 __func__); 1617 break; 1618 } 1619 ctl_copy_sense_data(msg, io); 1620 /* 1621 * IO should have already been cleaned up on other 1622 * SC so clear this flag so we won't send a message 1623 * back to finish the IO there. 1624 */ 1625 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 1626 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1627 1628 /* io = msg->hdr.serializing_sc; */ 1629 io->io_hdr.msg_type = CTL_MSG_BAD_JUJU; 1630 ctl_enqueue_isc(io); 1631 break; 1632 1633 /* Handle resets sent from the other side */ 1634 case CTL_MSG_MANAGE_TASKS: { 1635 struct ctl_taskio *taskio; 1636 taskio = (struct ctl_taskio *)ctl_alloc_io( 1637 softc->othersc_pool); 1638 ctl_zero_io((union ctl_io *)taskio); 1639 taskio->io_hdr.io_type = CTL_IO_TASK; 1640 taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 1641 taskio->io_hdr.nexus = msg->hdr.nexus; 1642 taskio->task_action = msg->task.task_action; 1643 taskio->tag_num = msg->task.tag_num; 1644 taskio->tag_type = msg->task.tag_type; 1645 #ifdef CTL_TIME_IO 1646 taskio->io_hdr.start_time = time_uptime; 1647 getbinuptime(&taskio->io_hdr.start_bt); 1648 #endif /* CTL_TIME_IO */ 1649 ctl_run_task((union ctl_io *)taskio); 1650 break; 1651 } 1652 /* Persistent Reserve action which needs attention */ 1653 case CTL_MSG_PERS_ACTION: 1654 presio = (struct ctl_prio *)ctl_alloc_io( 1655 softc->othersc_pool); 1656 ctl_zero_io((union ctl_io *)presio); 1657 presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION; 1658 presio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 1659 presio->io_hdr.nexus = msg->hdr.nexus; 1660 presio->pr_msg = msg->pr; 1661 ctl_enqueue_isc((union ctl_io *)presio); 1662 break; 1663 case CTL_MSG_UA: 1664 ctl_isc_ua(softc, msg, param); 1665 break; 1666 case CTL_MSG_PORT_SYNC: 1667 ctl_isc_port_sync(softc, msg, param); 1668 break; 1669 case CTL_MSG_LUN_SYNC: 1670 ctl_isc_lun_sync(softc, msg, param); 1671 break; 1672 case CTL_MSG_IID_SYNC: 1673 ctl_isc_iid_sync(softc, msg, param); 1674 break; 1675 case CTL_MSG_LOGIN: 1676 ctl_isc_login(softc, msg, param); 1677 break; 1678 case CTL_MSG_MODE_SYNC: 1679 ctl_isc_mode_sync(softc, msg, param); 1680 break; 1681 default: 1682 printf("Received HA message of unknown type %d\n", 1683 msg->hdr.msg_type); 1684 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1685 break; 1686 } 1687 if (msg != &msgbuf) 1688 free(msg, M_CTL); 1689 } else if (event == CTL_HA_EVT_LINK_CHANGE) { 1690 printf("CTL: HA link status changed from %d to %d\n", 1691 softc->ha_link, param); 1692 if (param == softc->ha_link) 1693 return; 1694 if (softc->ha_link == CTL_HA_LINK_ONLINE) { 1695 softc->ha_link = param; 1696 ctl_isc_ha_link_down(softc); 1697 } else { 1698 softc->ha_link = param; 1699 if (softc->ha_link == CTL_HA_LINK_ONLINE) 1700 ctl_isc_ha_link_up(softc); 1701 } 1702 return; 1703 } else { 1704 printf("ctl_isc_event_handler: Unknown event %d\n", event); 1705 return; 1706 } 1707 } 1708 1709 static void 1710 ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest) 1711 { 1712 1713 memcpy(&dest->scsiio.sense_data, &src->scsi.sense_data, 1714 src->scsi.sense_len); 1715 dest->scsiio.scsi_status = src->scsi.scsi_status; 1716 dest->scsiio.sense_len = src->scsi.sense_len; 1717 dest->io_hdr.status = src->hdr.status; 1718 } 1719 1720 static void 1721 ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest) 1722 { 1723 1724 memcpy(&dest->scsi.sense_data, &src->scsiio.sense_data, 1725 src->scsiio.sense_len); 1726 dest->scsi.scsi_status = src->scsiio.scsi_status; 1727 dest->scsi.sense_len = src->scsiio.sense_len; 1728 dest->hdr.status = src->io_hdr.status; 1729 } 1730 1731 void 1732 ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 1733 { 1734 struct ctl_softc *softc = lun->ctl_softc; 1735 ctl_ua_type *pu; 1736 1737 if (initidx < softc->init_min || initidx >= softc->init_max) 1738 return; 1739 mtx_assert(&lun->lun_lock, MA_OWNED); 1740 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 1741 if (pu == NULL) 1742 return; 1743 pu[initidx % CTL_MAX_INIT_PER_PORT] |= ua; 1744 } 1745 1746 void 1747 ctl_est_ua_port(struct ctl_lun *lun, int port, uint32_t except, ctl_ua_type ua) 1748 { 1749 int i; 1750 1751 mtx_assert(&lun->lun_lock, MA_OWNED); 1752 if (lun->pending_ua[port] == NULL) 1753 return; 1754 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1755 if (port * CTL_MAX_INIT_PER_PORT + i == except) 1756 continue; 1757 lun->pending_ua[port][i] |= ua; 1758 } 1759 } 1760 1761 void 1762 ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 1763 { 1764 struct ctl_softc *softc = lun->ctl_softc; 1765 int i; 1766 1767 mtx_assert(&lun->lun_lock, MA_OWNED); 1768 for (i = softc->port_min; i < softc->port_max; i++) 1769 ctl_est_ua_port(lun, i, except, ua); 1770 } 1771 1772 void 1773 ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 1774 { 1775 struct ctl_softc *softc = lun->ctl_softc; 1776 ctl_ua_type *pu; 1777 1778 if (initidx < softc->init_min || initidx >= softc->init_max) 1779 return; 1780 mtx_assert(&lun->lun_lock, MA_OWNED); 1781 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 1782 if (pu == NULL) 1783 return; 1784 pu[initidx % CTL_MAX_INIT_PER_PORT] &= ~ua; 1785 } 1786 1787 void 1788 ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 1789 { 1790 struct ctl_softc *softc = lun->ctl_softc; 1791 int i, j; 1792 1793 mtx_assert(&lun->lun_lock, MA_OWNED); 1794 for (i = softc->port_min; i < softc->port_max; i++) { 1795 if (lun->pending_ua[i] == NULL) 1796 continue; 1797 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 1798 if (i * CTL_MAX_INIT_PER_PORT + j == except) 1799 continue; 1800 lun->pending_ua[i][j] &= ~ua; 1801 } 1802 } 1803 } 1804 1805 void 1806 ctl_clr_ua_allluns(struct ctl_softc *ctl_softc, uint32_t initidx, 1807 ctl_ua_type ua_type) 1808 { 1809 struct ctl_lun *lun; 1810 1811 mtx_assert(&ctl_softc->ctl_lock, MA_OWNED); 1812 STAILQ_FOREACH(lun, &ctl_softc->lun_list, links) { 1813 mtx_lock(&lun->lun_lock); 1814 ctl_clr_ua(lun, initidx, ua_type); 1815 mtx_unlock(&lun->lun_lock); 1816 } 1817 } 1818 1819 static int 1820 ctl_ha_role_sysctl(SYSCTL_HANDLER_ARGS) 1821 { 1822 struct ctl_softc *softc = (struct ctl_softc *)arg1; 1823 struct ctl_lun *lun; 1824 struct ctl_lun_req ireq; 1825 int error, value; 1826 1827 value = (softc->flags & CTL_FLAG_ACTIVE_SHELF) ? 0 : 1; 1828 error = sysctl_handle_int(oidp, &value, 0, req); 1829 if ((error != 0) || (req->newptr == NULL)) 1830 return (error); 1831 1832 mtx_lock(&softc->ctl_lock); 1833 if (value == 0) 1834 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1835 else 1836 softc->flags &= ~CTL_FLAG_ACTIVE_SHELF; 1837 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1838 mtx_unlock(&softc->ctl_lock); 1839 bzero(&ireq, sizeof(ireq)); 1840 ireq.reqtype = CTL_LUNREQ_MODIFY; 1841 ireq.reqdata.modify.lun_id = lun->lun; 1842 lun->backend->ioctl(NULL, CTL_LUN_REQ, (caddr_t)&ireq, 0, 1843 curthread); 1844 if (ireq.status != CTL_LUN_OK) { 1845 printf("%s: CTL_LUNREQ_MODIFY returned %d '%s'\n", 1846 __func__, ireq.status, ireq.error_str); 1847 } 1848 mtx_lock(&softc->ctl_lock); 1849 } 1850 mtx_unlock(&softc->ctl_lock); 1851 return (0); 1852 } 1853 1854 static int 1855 ctl_init(void) 1856 { 1857 struct make_dev_args args; 1858 struct ctl_softc *softc; 1859 int i, error; 1860 1861 softc = control_softc = malloc(sizeof(*control_softc), M_DEVBUF, 1862 M_WAITOK | M_ZERO); 1863 1864 make_dev_args_init(&args); 1865 args.mda_devsw = &ctl_cdevsw; 1866 args.mda_uid = UID_ROOT; 1867 args.mda_gid = GID_OPERATOR; 1868 args.mda_mode = 0600; 1869 args.mda_si_drv1 = softc; 1870 args.mda_si_drv2 = NULL; 1871 error = make_dev_s(&args, &softc->dev, "cam/ctl"); 1872 if (error != 0) { 1873 free(softc, M_DEVBUF); 1874 control_softc = NULL; 1875 return (error); 1876 } 1877 1878 sysctl_ctx_init(&softc->sysctl_ctx); 1879 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, 1880 SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl", 1881 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "CAM Target Layer"); 1882 1883 if (softc->sysctl_tree == NULL) { 1884 printf("%s: unable to allocate sysctl tree\n", __func__); 1885 destroy_dev(softc->dev); 1886 free(softc, M_DEVBUF); 1887 control_softc = NULL; 1888 return (ENOMEM); 1889 } 1890 1891 mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF); 1892 softc->io_zone = uma_zcreate("CTL IO", sizeof(union ctl_io), 1893 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 1894 softc->flags = 0; 1895 1896 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1897 OID_AUTO, "ha_mode", CTLFLAG_RDTUN, (int *)&softc->ha_mode, 0, 1898 "HA mode (0 - act/stby, 1 - serialize only, 2 - xfer)"); 1899 1900 if (ctl_max_luns <= 0 || powerof2(ctl_max_luns) == 0) { 1901 printf("Bad value %d for kern.cam.ctl.max_luns, must be a power of two, using %d\n", 1902 ctl_max_luns, CTL_DEFAULT_MAX_LUNS); 1903 ctl_max_luns = CTL_DEFAULT_MAX_LUNS; 1904 } 1905 softc->ctl_luns = malloc(sizeof(struct ctl_lun *) * ctl_max_luns, 1906 M_DEVBUF, M_WAITOK | M_ZERO); 1907 softc->ctl_lun_mask = malloc(sizeof(uint32_t) * 1908 ((ctl_max_luns + 31) / 32), M_DEVBUF, M_WAITOK | M_ZERO); 1909 if (ctl_max_ports <= 0 || powerof2(ctl_max_ports) == 0) { 1910 printf("Bad value %d for kern.cam.ctl.max_ports, must be a power of two, using %d\n", 1911 ctl_max_ports, CTL_DEFAULT_MAX_PORTS); 1912 ctl_max_ports = CTL_DEFAULT_MAX_PORTS; 1913 } 1914 softc->ctl_port_mask = malloc(sizeof(uint32_t) * 1915 ((ctl_max_ports + 31) / 32), M_DEVBUF, M_WAITOK | M_ZERO); 1916 softc->ctl_ports = malloc(sizeof(struct ctl_port *) * ctl_max_ports, 1917 M_DEVBUF, M_WAITOK | M_ZERO); 1918 1919 /* 1920 * In Copan's HA scheme, the "master" and "slave" roles are 1921 * figured out through the slot the controller is in. Although it 1922 * is an active/active system, someone has to be in charge. 1923 */ 1924 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1925 OID_AUTO, "ha_id", CTLFLAG_RDTUN, &softc->ha_id, 0, 1926 "HA head ID (0 - no HA)"); 1927 if (softc->ha_id == 0 || softc->ha_id > NUM_HA_SHELVES) { 1928 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1929 softc->is_single = 1; 1930 softc->port_cnt = ctl_max_ports; 1931 softc->port_min = 0; 1932 } else { 1933 softc->port_cnt = ctl_max_ports / NUM_HA_SHELVES; 1934 softc->port_min = (softc->ha_id - 1) * softc->port_cnt; 1935 } 1936 softc->port_max = softc->port_min + softc->port_cnt; 1937 softc->init_min = softc->port_min * CTL_MAX_INIT_PER_PORT; 1938 softc->init_max = softc->port_max * CTL_MAX_INIT_PER_PORT; 1939 1940 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1941 OID_AUTO, "ha_link", CTLFLAG_RD, (int *)&softc->ha_link, 0, 1942 "HA link state (0 - offline, 1 - unknown, 2 - online)"); 1943 1944 STAILQ_INIT(&softc->lun_list); 1945 STAILQ_INIT(&softc->fe_list); 1946 STAILQ_INIT(&softc->port_list); 1947 STAILQ_INIT(&softc->be_list); 1948 ctl_tpc_init(softc); 1949 1950 if (worker_threads <= 0) 1951 worker_threads = max(1, mp_ncpus / 4); 1952 if (worker_threads > CTL_MAX_THREADS) 1953 worker_threads = CTL_MAX_THREADS; 1954 1955 for (i = 0; i < worker_threads; i++) { 1956 struct ctl_thread *thr = &softc->threads[i]; 1957 1958 mtx_init(&thr->queue_lock, "CTL queue mutex", NULL, MTX_DEF); 1959 thr->ctl_softc = softc; 1960 STAILQ_INIT(&thr->incoming_queue); 1961 STAILQ_INIT(&thr->rtr_queue); 1962 STAILQ_INIT(&thr->done_queue); 1963 STAILQ_INIT(&thr->isc_queue); 1964 1965 error = kproc_kthread_add(ctl_work_thread, thr, 1966 &softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i); 1967 if (error != 0) { 1968 printf("error creating CTL work thread!\n"); 1969 return (error); 1970 } 1971 } 1972 error = kproc_kthread_add(ctl_thresh_thread, softc, 1973 &softc->ctl_proc, &softc->thresh_thread, 0, 0, "ctl", "thresh"); 1974 if (error != 0) { 1975 printf("error creating CTL threshold thread!\n"); 1976 return (error); 1977 } 1978 1979 SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree), 1980 OID_AUTO, "ha_role", 1981 CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, 1982 softc, 0, ctl_ha_role_sysctl, "I", "HA role for this head"); 1983 1984 if (softc->is_single == 0) { 1985 if (ctl_frontend_register(&ha_frontend) != 0) 1986 softc->is_single = 1; 1987 } 1988 return (0); 1989 } 1990 1991 static int 1992 ctl_shutdown(void) 1993 { 1994 struct ctl_softc *softc = control_softc; 1995 int i; 1996 1997 if (softc->is_single == 0) 1998 ctl_frontend_deregister(&ha_frontend); 1999 2000 destroy_dev(softc->dev); 2001 2002 /* Shutdown CTL threads. */ 2003 softc->shutdown = 1; 2004 for (i = 0; i < worker_threads; i++) { 2005 struct ctl_thread *thr = &softc->threads[i]; 2006 while (thr->thread != NULL) { 2007 wakeup(thr); 2008 if (thr->thread != NULL) 2009 pause("CTL thr shutdown", 1); 2010 } 2011 mtx_destroy(&thr->queue_lock); 2012 } 2013 while (softc->thresh_thread != NULL) { 2014 wakeup(softc->thresh_thread); 2015 if (softc->thresh_thread != NULL) 2016 pause("CTL thr shutdown", 1); 2017 } 2018 2019 ctl_tpc_shutdown(softc); 2020 uma_zdestroy(softc->io_zone); 2021 mtx_destroy(&softc->ctl_lock); 2022 2023 free(softc->ctl_luns, M_DEVBUF); 2024 free(softc->ctl_lun_mask, M_DEVBUF); 2025 free(softc->ctl_port_mask, M_DEVBUF); 2026 free(softc->ctl_ports, M_DEVBUF); 2027 2028 sysctl_ctx_free(&softc->sysctl_ctx); 2029 2030 free(softc, M_DEVBUF); 2031 control_softc = NULL; 2032 return (0); 2033 } 2034 2035 static int 2036 ctl_module_event_handler(module_t mod, int what, void *arg) 2037 { 2038 2039 switch (what) { 2040 case MOD_LOAD: 2041 return (ctl_init()); 2042 case MOD_UNLOAD: 2043 return (ctl_shutdown()); 2044 default: 2045 return (EOPNOTSUPP); 2046 } 2047 } 2048 2049 /* 2050 * XXX KDM should we do some access checks here? Bump a reference count to 2051 * prevent a CTL module from being unloaded while someone has it open? 2052 */ 2053 static int 2054 ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td) 2055 { 2056 return (0); 2057 } 2058 2059 static int 2060 ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td) 2061 { 2062 return (0); 2063 } 2064 2065 /* 2066 * Remove an initiator by port number and initiator ID. 2067 * Returns 0 for success, -1 for failure. 2068 */ 2069 int 2070 ctl_remove_initiator(struct ctl_port *port, int iid) 2071 { 2072 struct ctl_softc *softc = port->ctl_softc; 2073 int last; 2074 2075 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 2076 2077 if (iid > CTL_MAX_INIT_PER_PORT) { 2078 printf("%s: initiator ID %u > maximun %u!\n", 2079 __func__, iid, CTL_MAX_INIT_PER_PORT); 2080 return (-1); 2081 } 2082 2083 mtx_lock(&softc->ctl_lock); 2084 last = (--port->wwpn_iid[iid].in_use == 0); 2085 port->wwpn_iid[iid].last_use = time_uptime; 2086 mtx_unlock(&softc->ctl_lock); 2087 if (last) 2088 ctl_i_t_nexus_loss(softc, iid, CTL_UA_POWERON); 2089 ctl_isc_announce_iid(port, iid); 2090 2091 return (0); 2092 } 2093 2094 /* 2095 * Add an initiator to the initiator map. 2096 * Returns iid for success, < 0 for failure. 2097 */ 2098 int 2099 ctl_add_initiator(struct ctl_port *port, int iid, uint64_t wwpn, char *name) 2100 { 2101 struct ctl_softc *softc = port->ctl_softc; 2102 time_t best_time; 2103 int i, best; 2104 2105 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 2106 2107 if (iid >= CTL_MAX_INIT_PER_PORT) { 2108 printf("%s: WWPN %#jx initiator ID %u > maximum %u!\n", 2109 __func__, wwpn, iid, CTL_MAX_INIT_PER_PORT); 2110 free(name, M_CTL); 2111 return (-1); 2112 } 2113 2114 mtx_lock(&softc->ctl_lock); 2115 2116 if (iid < 0 && (wwpn != 0 || name != NULL)) { 2117 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 2118 if (wwpn != 0 && wwpn == port->wwpn_iid[i].wwpn) { 2119 iid = i; 2120 break; 2121 } 2122 if (name != NULL && port->wwpn_iid[i].name != NULL && 2123 strcmp(name, port->wwpn_iid[i].name) == 0) { 2124 iid = i; 2125 break; 2126 } 2127 } 2128 } 2129 2130 if (iid < 0) { 2131 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 2132 if (port->wwpn_iid[i].in_use == 0 && 2133 port->wwpn_iid[i].wwpn == 0 && 2134 port->wwpn_iid[i].name == NULL) { 2135 iid = i; 2136 break; 2137 } 2138 } 2139 } 2140 2141 if (iid < 0) { 2142 best = -1; 2143 best_time = INT32_MAX; 2144 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 2145 if (port->wwpn_iid[i].in_use == 0) { 2146 if (port->wwpn_iid[i].last_use < best_time) { 2147 best = i; 2148 best_time = port->wwpn_iid[i].last_use; 2149 } 2150 } 2151 } 2152 iid = best; 2153 } 2154 2155 if (iid < 0) { 2156 mtx_unlock(&softc->ctl_lock); 2157 free(name, M_CTL); 2158 return (-2); 2159 } 2160 2161 if (port->wwpn_iid[iid].in_use > 0 && (wwpn != 0 || name != NULL)) { 2162 /* 2163 * This is not an error yet. 2164 */ 2165 if (wwpn != 0 && wwpn == port->wwpn_iid[iid].wwpn) { 2166 #if 0 2167 printf("%s: port %d iid %u WWPN %#jx arrived" 2168 " again\n", __func__, port->targ_port, 2169 iid, (uintmax_t)wwpn); 2170 #endif 2171 goto take; 2172 } 2173 if (name != NULL && port->wwpn_iid[iid].name != NULL && 2174 strcmp(name, port->wwpn_iid[iid].name) == 0) { 2175 #if 0 2176 printf("%s: port %d iid %u name '%s' arrived" 2177 " again\n", __func__, port->targ_port, 2178 iid, name); 2179 #endif 2180 goto take; 2181 } 2182 2183 /* 2184 * This is an error, but what do we do about it? The 2185 * driver is telling us we have a new WWPN for this 2186 * initiator ID, so we pretty much need to use it. 2187 */ 2188 printf("%s: port %d iid %u WWPN %#jx '%s' arrived," 2189 " but WWPN %#jx '%s' is still at that address\n", 2190 __func__, port->targ_port, iid, wwpn, name, 2191 (uintmax_t)port->wwpn_iid[iid].wwpn, 2192 port->wwpn_iid[iid].name); 2193 } 2194 take: 2195 free(port->wwpn_iid[iid].name, M_CTL); 2196 port->wwpn_iid[iid].name = name; 2197 port->wwpn_iid[iid].wwpn = wwpn; 2198 port->wwpn_iid[iid].in_use++; 2199 mtx_unlock(&softc->ctl_lock); 2200 ctl_isc_announce_iid(port, iid); 2201 2202 return (iid); 2203 } 2204 2205 static int 2206 ctl_create_iid(struct ctl_port *port, int iid, uint8_t *buf) 2207 { 2208 int len; 2209 2210 switch (port->port_type) { 2211 case CTL_PORT_FC: 2212 { 2213 struct scsi_transportid_fcp *id = 2214 (struct scsi_transportid_fcp *)buf; 2215 if (port->wwpn_iid[iid].wwpn == 0) 2216 return (0); 2217 memset(id, 0, sizeof(*id)); 2218 id->format_protocol = SCSI_PROTO_FC; 2219 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->n_port_name); 2220 return (sizeof(*id)); 2221 } 2222 case CTL_PORT_ISCSI: 2223 { 2224 struct scsi_transportid_iscsi_port *id = 2225 (struct scsi_transportid_iscsi_port *)buf; 2226 if (port->wwpn_iid[iid].name == NULL) 2227 return (0); 2228 memset(id, 0, 256); 2229 id->format_protocol = SCSI_TRN_ISCSI_FORMAT_PORT | 2230 SCSI_PROTO_ISCSI; 2231 len = strlcpy(id->iscsi_name, port->wwpn_iid[iid].name, 252) + 1; 2232 len = roundup2(min(len, 252), 4); 2233 scsi_ulto2b(len, id->additional_length); 2234 return (sizeof(*id) + len); 2235 } 2236 case CTL_PORT_SAS: 2237 { 2238 struct scsi_transportid_sas *id = 2239 (struct scsi_transportid_sas *)buf; 2240 if (port->wwpn_iid[iid].wwpn == 0) 2241 return (0); 2242 memset(id, 0, sizeof(*id)); 2243 id->format_protocol = SCSI_PROTO_SAS; 2244 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->sas_address); 2245 return (sizeof(*id)); 2246 } 2247 default: 2248 { 2249 struct scsi_transportid_spi *id = 2250 (struct scsi_transportid_spi *)buf; 2251 memset(id, 0, sizeof(*id)); 2252 id->format_protocol = SCSI_PROTO_SPI; 2253 scsi_ulto2b(iid, id->scsi_addr); 2254 scsi_ulto2b(port->targ_port, id->rel_trgt_port_id); 2255 return (sizeof(*id)); 2256 } 2257 } 2258 } 2259 2260 /* 2261 * Serialize a command that went down the "wrong" side, and so was sent to 2262 * this controller for execution. The logic is a little different than the 2263 * standard case in ctl_scsiio_precheck(). Errors in this case need to get 2264 * sent back to the other side, but in the success case, we execute the 2265 * command on this side (XFER mode) or tell the other side to execute it 2266 * (SER_ONLY mode). 2267 */ 2268 static void 2269 ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio) 2270 { 2271 struct ctl_softc *softc = CTL_SOFTC(ctsio); 2272 struct ctl_port *port = CTL_PORT(ctsio); 2273 union ctl_ha_msg msg_info; 2274 struct ctl_lun *lun; 2275 const struct ctl_cmd_entry *entry; 2276 union ctl_io *bio; 2277 uint32_t targ_lun; 2278 2279 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 2280 2281 /* Make sure that we know about this port. */ 2282 if (port == NULL || (port->status & CTL_PORT_STATUS_ONLINE) == 0) { 2283 ctl_set_internal_failure(ctsio, /*sks_valid*/ 0, 2284 /*retry_count*/ 1); 2285 goto badjuju; 2286 } 2287 2288 /* Make sure that we know about this LUN. */ 2289 mtx_lock(&softc->ctl_lock); 2290 if (targ_lun >= ctl_max_luns || 2291 (lun = softc->ctl_luns[targ_lun]) == NULL) { 2292 mtx_unlock(&softc->ctl_lock); 2293 2294 /* 2295 * The other node would not send this request to us unless 2296 * received announce that we are primary node for this LUN. 2297 * If this LUN does not exist now, it is probably result of 2298 * a race, so respond to initiator in the most opaque way. 2299 */ 2300 ctl_set_busy(ctsio); 2301 goto badjuju; 2302 } 2303 mtx_lock(&lun->lun_lock); 2304 mtx_unlock(&softc->ctl_lock); 2305 2306 /* 2307 * If the LUN is invalid, pretend that it doesn't exist. 2308 * It will go away as soon as all pending I/Os completed. 2309 */ 2310 if (lun->flags & CTL_LUN_DISABLED) { 2311 mtx_unlock(&lun->lun_lock); 2312 ctl_set_busy(ctsio); 2313 goto badjuju; 2314 } 2315 2316 entry = ctl_get_cmd_entry(ctsio, NULL); 2317 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { 2318 mtx_unlock(&lun->lun_lock); 2319 goto badjuju; 2320 } 2321 2322 CTL_LUN(ctsio) = lun; 2323 CTL_BACKEND_LUN(ctsio) = lun->be_lun; 2324 2325 /* 2326 * Every I/O goes into the OOA queue for a 2327 * particular LUN, and stays there until completion. 2328 */ 2329 #ifdef CTL_TIME_IO 2330 if (TAILQ_EMPTY(&lun->ooa_queue)) 2331 lun->idle_time += getsbinuptime() - lun->last_busy; 2332 #endif 2333 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2334 2335 bio = (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq, ooa_links); 2336 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, &bio)) { 2337 case CTL_ACTION_BLOCK: 2338 ctsio->io_hdr.blocker = bio; 2339 TAILQ_INSERT_TAIL(&bio->io_hdr.blocked_queue, &ctsio->io_hdr, 2340 blocked_links); 2341 mtx_unlock(&lun->lun_lock); 2342 break; 2343 case CTL_ACTION_PASS: 2344 case CTL_ACTION_SKIP: 2345 if (softc->ha_mode == CTL_HA_MODE_XFER) { 2346 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 2347 ctl_enqueue_rtr((union ctl_io *)ctsio); 2348 mtx_unlock(&lun->lun_lock); 2349 } else { 2350 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 2351 mtx_unlock(&lun->lun_lock); 2352 2353 /* send msg back to other side */ 2354 msg_info.hdr.original_sc = ctsio->io_hdr.remote_io; 2355 msg_info.hdr.serializing_sc = (union ctl_io *)ctsio; 2356 msg_info.hdr.msg_type = CTL_MSG_R2R; 2357 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 2358 sizeof(msg_info.hdr), M_WAITOK); 2359 } 2360 break; 2361 case CTL_ACTION_OVERLAP: 2362 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2363 mtx_unlock(&lun->lun_lock); 2364 ctl_set_overlapped_cmd(ctsio); 2365 goto badjuju; 2366 case CTL_ACTION_OVERLAP_TAG: 2367 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2368 mtx_unlock(&lun->lun_lock); 2369 ctl_set_overlapped_tag(ctsio, ctsio->tag_num); 2370 goto badjuju; 2371 case CTL_ACTION_ERROR: 2372 default: 2373 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2374 mtx_unlock(&lun->lun_lock); 2375 2376 ctl_set_internal_failure(ctsio, /*sks_valid*/ 0, 2377 /*retry_count*/ 0); 2378 badjuju: 2379 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); 2380 msg_info.hdr.original_sc = ctsio->io_hdr.remote_io; 2381 msg_info.hdr.serializing_sc = NULL; 2382 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 2383 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 2384 sizeof(msg_info.scsi), M_WAITOK); 2385 ctl_free_io((union ctl_io *)ctsio); 2386 break; 2387 } 2388 } 2389 2390 /* 2391 * Returns 0 for success, errno for failure. 2392 */ 2393 static void 2394 ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 2395 struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries) 2396 { 2397 union ctl_io *io; 2398 2399 mtx_lock(&lun->lun_lock); 2400 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); (io != NULL); 2401 (*cur_fill_num)++, io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, 2402 ooa_links)) { 2403 struct ctl_ooa_entry *entry; 2404 2405 /* 2406 * If we've got more than we can fit, just count the 2407 * remaining entries. 2408 */ 2409 if (*cur_fill_num >= ooa_hdr->alloc_num) 2410 continue; 2411 2412 entry = &kern_entries[*cur_fill_num]; 2413 2414 entry->tag_num = io->scsiio.tag_num; 2415 entry->lun_num = lun->lun; 2416 #ifdef CTL_TIME_IO 2417 entry->start_bt = io->io_hdr.start_bt; 2418 #endif 2419 bcopy(io->scsiio.cdb, entry->cdb, io->scsiio.cdb_len); 2420 entry->cdb_len = io->scsiio.cdb_len; 2421 if (io->io_hdr.blocker != NULL) 2422 entry->cmd_flags |= CTL_OOACMD_FLAG_BLOCKED; 2423 2424 if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) 2425 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA; 2426 2427 if (io->io_hdr.flags & CTL_FLAG_ABORT) 2428 entry->cmd_flags |= CTL_OOACMD_FLAG_ABORT; 2429 2430 if (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR) 2431 entry->cmd_flags |= CTL_OOACMD_FLAG_RTR; 2432 2433 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) 2434 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED; 2435 2436 if (io->io_hdr.flags & CTL_FLAG_STATUS_QUEUED) 2437 entry->cmd_flags |= CTL_OOACMD_FLAG_STATUS_QUEUED; 2438 2439 if (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) 2440 entry->cmd_flags |= CTL_OOACMD_FLAG_STATUS_SENT; 2441 } 2442 mtx_unlock(&lun->lun_lock); 2443 } 2444 2445 /* 2446 * Escape characters that are illegal or not recommended in XML. 2447 */ 2448 int 2449 ctl_sbuf_printf_esc(struct sbuf *sb, char *str, int size) 2450 { 2451 char *end = str + size; 2452 int retval; 2453 2454 retval = 0; 2455 2456 for (; *str && str < end; str++) { 2457 switch (*str) { 2458 case '&': 2459 retval = sbuf_printf(sb, "&"); 2460 break; 2461 case '>': 2462 retval = sbuf_printf(sb, ">"); 2463 break; 2464 case '<': 2465 retval = sbuf_printf(sb, "<"); 2466 break; 2467 default: 2468 retval = sbuf_putc(sb, *str); 2469 break; 2470 } 2471 2472 if (retval != 0) 2473 break; 2474 } 2475 2476 return (retval); 2477 } 2478 2479 static void 2480 ctl_id_sbuf(struct ctl_devid *id, struct sbuf *sb) 2481 { 2482 struct scsi_vpd_id_descriptor *desc; 2483 int i; 2484 2485 if (id == NULL || id->len < 4) 2486 return; 2487 desc = (struct scsi_vpd_id_descriptor *)id->data; 2488 switch (desc->id_type & SVPD_ID_TYPE_MASK) { 2489 case SVPD_ID_TYPE_T10: 2490 sbuf_printf(sb, "t10."); 2491 break; 2492 case SVPD_ID_TYPE_EUI64: 2493 sbuf_printf(sb, "eui."); 2494 break; 2495 case SVPD_ID_TYPE_NAA: 2496 sbuf_printf(sb, "naa."); 2497 break; 2498 case SVPD_ID_TYPE_SCSI_NAME: 2499 break; 2500 } 2501 switch (desc->proto_codeset & SVPD_ID_CODESET_MASK) { 2502 case SVPD_ID_CODESET_BINARY: 2503 for (i = 0; i < desc->length; i++) 2504 sbuf_printf(sb, "%02x", desc->identifier[i]); 2505 break; 2506 case SVPD_ID_CODESET_ASCII: 2507 sbuf_printf(sb, "%.*s", (int)desc->length, 2508 (char *)desc->identifier); 2509 break; 2510 case SVPD_ID_CODESET_UTF8: 2511 sbuf_printf(sb, "%s", (char *)desc->identifier); 2512 break; 2513 } 2514 } 2515 2516 static int 2517 ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 2518 struct thread *td) 2519 { 2520 struct ctl_softc *softc = dev->si_drv1; 2521 struct ctl_port *port; 2522 struct ctl_lun *lun; 2523 int retval; 2524 2525 retval = 0; 2526 2527 switch (cmd) { 2528 case CTL_IO: 2529 retval = ctl_ioctl_io(dev, cmd, addr, flag, td); 2530 break; 2531 case CTL_ENABLE_PORT: 2532 case CTL_DISABLE_PORT: 2533 case CTL_SET_PORT_WWNS: { 2534 struct ctl_port *port; 2535 struct ctl_port_entry *entry; 2536 2537 entry = (struct ctl_port_entry *)addr; 2538 2539 mtx_lock(&softc->ctl_lock); 2540 STAILQ_FOREACH(port, &softc->port_list, links) { 2541 int action, done; 2542 2543 if (port->targ_port < softc->port_min || 2544 port->targ_port >= softc->port_max) 2545 continue; 2546 2547 action = 0; 2548 done = 0; 2549 if ((entry->port_type == CTL_PORT_NONE) 2550 && (entry->targ_port == port->targ_port)) { 2551 /* 2552 * If the user only wants to enable or 2553 * disable or set WWNs on a specific port, 2554 * do the operation and we're done. 2555 */ 2556 action = 1; 2557 done = 1; 2558 } else if (entry->port_type & port->port_type) { 2559 /* 2560 * Compare the user's type mask with the 2561 * particular frontend type to see if we 2562 * have a match. 2563 */ 2564 action = 1; 2565 done = 0; 2566 2567 /* 2568 * Make sure the user isn't trying to set 2569 * WWNs on multiple ports at the same time. 2570 */ 2571 if (cmd == CTL_SET_PORT_WWNS) { 2572 printf("%s: Can't set WWNs on " 2573 "multiple ports\n", __func__); 2574 retval = EINVAL; 2575 break; 2576 } 2577 } 2578 if (action == 0) 2579 continue; 2580 2581 /* 2582 * XXX KDM we have to drop the lock here, because 2583 * the online/offline operations can potentially 2584 * block. We need to reference count the frontends 2585 * so they can't go away, 2586 */ 2587 if (cmd == CTL_ENABLE_PORT) { 2588 mtx_unlock(&softc->ctl_lock); 2589 ctl_port_online(port); 2590 mtx_lock(&softc->ctl_lock); 2591 } else if (cmd == CTL_DISABLE_PORT) { 2592 mtx_unlock(&softc->ctl_lock); 2593 ctl_port_offline(port); 2594 mtx_lock(&softc->ctl_lock); 2595 } else if (cmd == CTL_SET_PORT_WWNS) { 2596 ctl_port_set_wwns(port, 2597 (entry->flags & CTL_PORT_WWNN_VALID) ? 2598 1 : 0, entry->wwnn, 2599 (entry->flags & CTL_PORT_WWPN_VALID) ? 2600 1 : 0, entry->wwpn); 2601 } 2602 if (done != 0) 2603 break; 2604 } 2605 mtx_unlock(&softc->ctl_lock); 2606 break; 2607 } 2608 case CTL_GET_OOA: { 2609 struct ctl_ooa *ooa_hdr; 2610 struct ctl_ooa_entry *entries; 2611 uint32_t cur_fill_num; 2612 2613 ooa_hdr = (struct ctl_ooa *)addr; 2614 2615 if ((ooa_hdr->alloc_len == 0) 2616 || (ooa_hdr->alloc_num == 0)) { 2617 printf("%s: CTL_GET_OOA: alloc len %u and alloc num %u " 2618 "must be non-zero\n", __func__, 2619 ooa_hdr->alloc_len, ooa_hdr->alloc_num); 2620 retval = EINVAL; 2621 break; 2622 } 2623 2624 if (ooa_hdr->alloc_len != (ooa_hdr->alloc_num * 2625 sizeof(struct ctl_ooa_entry))) { 2626 printf("%s: CTL_GET_OOA: alloc len %u must be alloc " 2627 "num %d * sizeof(struct ctl_ooa_entry) %zd\n", 2628 __func__, ooa_hdr->alloc_len, 2629 ooa_hdr->alloc_num,sizeof(struct ctl_ooa_entry)); 2630 retval = EINVAL; 2631 break; 2632 } 2633 2634 entries = malloc(ooa_hdr->alloc_len, M_CTL, M_WAITOK | M_ZERO); 2635 if (entries == NULL) { 2636 printf("%s: could not allocate %d bytes for OOA " 2637 "dump\n", __func__, ooa_hdr->alloc_len); 2638 retval = ENOMEM; 2639 break; 2640 } 2641 2642 mtx_lock(&softc->ctl_lock); 2643 if ((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0 && 2644 (ooa_hdr->lun_num >= ctl_max_luns || 2645 softc->ctl_luns[ooa_hdr->lun_num] == NULL)) { 2646 mtx_unlock(&softc->ctl_lock); 2647 free(entries, M_CTL); 2648 printf("%s: CTL_GET_OOA: invalid LUN %ju\n", 2649 __func__, (uintmax_t)ooa_hdr->lun_num); 2650 retval = EINVAL; 2651 break; 2652 } 2653 2654 cur_fill_num = 0; 2655 2656 if (ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) { 2657 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2658 ctl_ioctl_fill_ooa(lun, &cur_fill_num, 2659 ooa_hdr, entries); 2660 } 2661 } else { 2662 lun = softc->ctl_luns[ooa_hdr->lun_num]; 2663 ctl_ioctl_fill_ooa(lun, &cur_fill_num, ooa_hdr, 2664 entries); 2665 } 2666 mtx_unlock(&softc->ctl_lock); 2667 2668 ooa_hdr->fill_num = min(cur_fill_num, ooa_hdr->alloc_num); 2669 ooa_hdr->fill_len = ooa_hdr->fill_num * 2670 sizeof(struct ctl_ooa_entry); 2671 retval = copyout(entries, ooa_hdr->entries, ooa_hdr->fill_len); 2672 if (retval != 0) { 2673 printf("%s: error copying out %d bytes for OOA dump\n", 2674 __func__, ooa_hdr->fill_len); 2675 } 2676 2677 getbinuptime(&ooa_hdr->cur_bt); 2678 2679 if (cur_fill_num > ooa_hdr->alloc_num) { 2680 ooa_hdr->dropped_num = cur_fill_num -ooa_hdr->alloc_num; 2681 ooa_hdr->status = CTL_OOA_NEED_MORE_SPACE; 2682 } else { 2683 ooa_hdr->dropped_num = 0; 2684 ooa_hdr->status = CTL_OOA_OK; 2685 } 2686 2687 free(entries, M_CTL); 2688 break; 2689 } 2690 case CTL_DELAY_IO: { 2691 struct ctl_io_delay_info *delay_info; 2692 2693 delay_info = (struct ctl_io_delay_info *)addr; 2694 2695 #ifdef CTL_IO_DELAY 2696 mtx_lock(&softc->ctl_lock); 2697 if (delay_info->lun_id >= ctl_max_luns || 2698 (lun = softc->ctl_luns[delay_info->lun_id]) == NULL) { 2699 mtx_unlock(&softc->ctl_lock); 2700 delay_info->status = CTL_DELAY_STATUS_INVALID_LUN; 2701 break; 2702 } 2703 mtx_lock(&lun->lun_lock); 2704 mtx_unlock(&softc->ctl_lock); 2705 delay_info->status = CTL_DELAY_STATUS_OK; 2706 switch (delay_info->delay_type) { 2707 case CTL_DELAY_TYPE_CONT: 2708 case CTL_DELAY_TYPE_ONESHOT: 2709 break; 2710 default: 2711 delay_info->status = CTL_DELAY_STATUS_INVALID_TYPE; 2712 break; 2713 } 2714 switch (delay_info->delay_loc) { 2715 case CTL_DELAY_LOC_DATAMOVE: 2716 lun->delay_info.datamove_type = delay_info->delay_type; 2717 lun->delay_info.datamove_delay = delay_info->delay_secs; 2718 break; 2719 case CTL_DELAY_LOC_DONE: 2720 lun->delay_info.done_type = delay_info->delay_type; 2721 lun->delay_info.done_delay = delay_info->delay_secs; 2722 break; 2723 default: 2724 delay_info->status = CTL_DELAY_STATUS_INVALID_LOC; 2725 break; 2726 } 2727 mtx_unlock(&lun->lun_lock); 2728 #else 2729 delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED; 2730 #endif /* CTL_IO_DELAY */ 2731 break; 2732 } 2733 case CTL_ERROR_INJECT: { 2734 struct ctl_error_desc *err_desc, *new_err_desc; 2735 2736 err_desc = (struct ctl_error_desc *)addr; 2737 2738 new_err_desc = malloc(sizeof(*new_err_desc), M_CTL, 2739 M_WAITOK | M_ZERO); 2740 bcopy(err_desc, new_err_desc, sizeof(*new_err_desc)); 2741 2742 mtx_lock(&softc->ctl_lock); 2743 if (err_desc->lun_id >= ctl_max_luns || 2744 (lun = softc->ctl_luns[err_desc->lun_id]) == NULL) { 2745 mtx_unlock(&softc->ctl_lock); 2746 free(new_err_desc, M_CTL); 2747 printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n", 2748 __func__, (uintmax_t)err_desc->lun_id); 2749 retval = EINVAL; 2750 break; 2751 } 2752 mtx_lock(&lun->lun_lock); 2753 mtx_unlock(&softc->ctl_lock); 2754 2755 /* 2756 * We could do some checking here to verify the validity 2757 * of the request, but given the complexity of error 2758 * injection requests, the checking logic would be fairly 2759 * complex. 2760 * 2761 * For now, if the request is invalid, it just won't get 2762 * executed and might get deleted. 2763 */ 2764 STAILQ_INSERT_TAIL(&lun->error_list, new_err_desc, links); 2765 2766 /* 2767 * XXX KDM check to make sure the serial number is unique, 2768 * in case we somehow manage to wrap. That shouldn't 2769 * happen for a very long time, but it's the right thing to 2770 * do. 2771 */ 2772 new_err_desc->serial = lun->error_serial; 2773 err_desc->serial = lun->error_serial; 2774 lun->error_serial++; 2775 2776 mtx_unlock(&lun->lun_lock); 2777 break; 2778 } 2779 case CTL_ERROR_INJECT_DELETE: { 2780 struct ctl_error_desc *delete_desc, *desc, *desc2; 2781 int delete_done; 2782 2783 delete_desc = (struct ctl_error_desc *)addr; 2784 delete_done = 0; 2785 2786 mtx_lock(&softc->ctl_lock); 2787 if (delete_desc->lun_id >= ctl_max_luns || 2788 (lun = softc->ctl_luns[delete_desc->lun_id]) == NULL) { 2789 mtx_unlock(&softc->ctl_lock); 2790 printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n", 2791 __func__, (uintmax_t)delete_desc->lun_id); 2792 retval = EINVAL; 2793 break; 2794 } 2795 mtx_lock(&lun->lun_lock); 2796 mtx_unlock(&softc->ctl_lock); 2797 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 2798 if (desc->serial != delete_desc->serial) 2799 continue; 2800 2801 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, 2802 links); 2803 free(desc, M_CTL); 2804 delete_done = 1; 2805 } 2806 mtx_unlock(&lun->lun_lock); 2807 if (delete_done == 0) { 2808 printf("%s: CTL_ERROR_INJECT_DELETE: can't find " 2809 "error serial %ju on LUN %u\n", __func__, 2810 delete_desc->serial, delete_desc->lun_id); 2811 retval = EINVAL; 2812 break; 2813 } 2814 break; 2815 } 2816 case CTL_DUMP_STRUCTS: { 2817 int j, k; 2818 struct ctl_port *port; 2819 struct ctl_frontend *fe; 2820 2821 mtx_lock(&softc->ctl_lock); 2822 printf("CTL Persistent Reservation information start:\n"); 2823 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2824 mtx_lock(&lun->lun_lock); 2825 if ((lun->flags & CTL_LUN_DISABLED) != 0) { 2826 mtx_unlock(&lun->lun_lock); 2827 continue; 2828 } 2829 2830 for (j = 0; j < ctl_max_ports; j++) { 2831 if (lun->pr_keys[j] == NULL) 2832 continue; 2833 for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){ 2834 if (lun->pr_keys[j][k] == 0) 2835 continue; 2836 printf(" LUN %ju port %d iid %d key " 2837 "%#jx\n", lun->lun, j, k, 2838 (uintmax_t)lun->pr_keys[j][k]); 2839 } 2840 } 2841 mtx_unlock(&lun->lun_lock); 2842 } 2843 printf("CTL Persistent Reservation information end\n"); 2844 printf("CTL Ports:\n"); 2845 STAILQ_FOREACH(port, &softc->port_list, links) { 2846 printf(" Port %d '%s' Frontend '%s' Type %u pp %d vp %d WWNN " 2847 "%#jx WWPN %#jx\n", port->targ_port, port->port_name, 2848 port->frontend->name, port->port_type, 2849 port->physical_port, port->virtual_port, 2850 (uintmax_t)port->wwnn, (uintmax_t)port->wwpn); 2851 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 2852 if (port->wwpn_iid[j].in_use == 0 && 2853 port->wwpn_iid[j].wwpn == 0 && 2854 port->wwpn_iid[j].name == NULL) 2855 continue; 2856 2857 printf(" iid %u use %d WWPN %#jx '%s'\n", 2858 j, port->wwpn_iid[j].in_use, 2859 (uintmax_t)port->wwpn_iid[j].wwpn, 2860 port->wwpn_iid[j].name); 2861 } 2862 } 2863 printf("CTL Port information end\n"); 2864 mtx_unlock(&softc->ctl_lock); 2865 /* 2866 * XXX KDM calling this without a lock. We'd likely want 2867 * to drop the lock before calling the frontend's dump 2868 * routine anyway. 2869 */ 2870 printf("CTL Frontends:\n"); 2871 STAILQ_FOREACH(fe, &softc->fe_list, links) { 2872 printf(" Frontend '%s'\n", fe->name); 2873 if (fe->fe_dump != NULL) 2874 fe->fe_dump(); 2875 } 2876 printf("CTL Frontend information end\n"); 2877 break; 2878 } 2879 case CTL_LUN_REQ: { 2880 struct ctl_lun_req *lun_req; 2881 struct ctl_backend_driver *backend; 2882 void *packed; 2883 nvlist_t *tmp_args_nvl; 2884 size_t packed_len; 2885 2886 lun_req = (struct ctl_lun_req *)addr; 2887 tmp_args_nvl = lun_req->args_nvl; 2888 2889 backend = ctl_backend_find(lun_req->backend); 2890 if (backend == NULL) { 2891 lun_req->status = CTL_LUN_ERROR; 2892 snprintf(lun_req->error_str, 2893 sizeof(lun_req->error_str), 2894 "Backend \"%s\" not found.", 2895 lun_req->backend); 2896 break; 2897 } 2898 2899 if (lun_req->args != NULL) { 2900 packed = malloc(lun_req->args_len, M_CTL, M_WAITOK); 2901 if (copyin(lun_req->args, packed, lun_req->args_len) != 0) { 2902 free(packed, M_CTL); 2903 lun_req->status = CTL_LUN_ERROR; 2904 snprintf(lun_req->error_str, sizeof(lun_req->error_str), 2905 "Cannot copyin args."); 2906 break; 2907 } 2908 lun_req->args_nvl = nvlist_unpack(packed, 2909 lun_req->args_len, 0); 2910 free(packed, M_CTL); 2911 2912 if (lun_req->args_nvl == NULL) { 2913 lun_req->status = CTL_LUN_ERROR; 2914 snprintf(lun_req->error_str, sizeof(lun_req->error_str), 2915 "Cannot unpack args nvlist."); 2916 break; 2917 } 2918 } else 2919 lun_req->args_nvl = nvlist_create(0); 2920 2921 retval = backend->ioctl(dev, cmd, addr, flag, td); 2922 nvlist_destroy(lun_req->args_nvl); 2923 lun_req->args_nvl = tmp_args_nvl; 2924 2925 if (lun_req->result_nvl != NULL) { 2926 if (lun_req->result != NULL) { 2927 packed = nvlist_pack(lun_req->result_nvl, 2928 &packed_len); 2929 if (packed == NULL) { 2930 lun_req->status = CTL_LUN_ERROR; 2931 snprintf(lun_req->error_str, 2932 sizeof(lun_req->error_str), 2933 "Cannot pack result nvlist."); 2934 break; 2935 } 2936 2937 if (packed_len > lun_req->result_len) { 2938 lun_req->status = CTL_LUN_ERROR; 2939 snprintf(lun_req->error_str, 2940 sizeof(lun_req->error_str), 2941 "Result nvlist too large."); 2942 free(packed, M_NVLIST); 2943 break; 2944 } 2945 2946 if (copyout(packed, lun_req->result, packed_len)) { 2947 lun_req->status = CTL_LUN_ERROR; 2948 snprintf(lun_req->error_str, 2949 sizeof(lun_req->error_str), 2950 "Cannot copyout() the result."); 2951 free(packed, M_NVLIST); 2952 break; 2953 } 2954 2955 lun_req->result_len = packed_len; 2956 free(packed, M_NVLIST); 2957 } 2958 2959 nvlist_destroy(lun_req->result_nvl); 2960 } 2961 break; 2962 } 2963 case CTL_LUN_LIST: { 2964 struct sbuf *sb; 2965 struct ctl_lun_list *list; 2966 const char *name, *value; 2967 void *cookie; 2968 int type; 2969 2970 list = (struct ctl_lun_list *)addr; 2971 2972 /* 2973 * Allocate a fixed length sbuf here, based on the length 2974 * of the user's buffer. We could allocate an auto-extending 2975 * buffer, and then tell the user how much larger our 2976 * amount of data is than his buffer, but that presents 2977 * some problems: 2978 * 2979 * 1. The sbuf(9) routines use a blocking malloc, and so 2980 * we can't hold a lock while calling them with an 2981 * auto-extending buffer. 2982 * 2983 * 2. There is not currently a LUN reference counting 2984 * mechanism, outside of outstanding transactions on 2985 * the LUN's OOA queue. So a LUN could go away on us 2986 * while we're getting the LUN number, backend-specific 2987 * information, etc. Thus, given the way things 2988 * currently work, we need to hold the CTL lock while 2989 * grabbing LUN information. 2990 * 2991 * So, from the user's standpoint, the best thing to do is 2992 * allocate what he thinks is a reasonable buffer length, 2993 * and then if he gets a CTL_LUN_LIST_NEED_MORE_SPACE error, 2994 * double the buffer length and try again. (And repeat 2995 * that until he succeeds.) 2996 */ 2997 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 2998 if (sb == NULL) { 2999 list->status = CTL_LUN_LIST_ERROR; 3000 snprintf(list->error_str, sizeof(list->error_str), 3001 "Unable to allocate %d bytes for LUN list", 3002 list->alloc_len); 3003 break; 3004 } 3005 3006 sbuf_printf(sb, "<ctllunlist>\n"); 3007 3008 mtx_lock(&softc->ctl_lock); 3009 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3010 mtx_lock(&lun->lun_lock); 3011 retval = sbuf_printf(sb, "<lun id=\"%ju\">\n", 3012 (uintmax_t)lun->lun); 3013 3014 /* 3015 * Bail out as soon as we see that we've overfilled 3016 * the buffer. 3017 */ 3018 if (retval != 0) 3019 break; 3020 3021 retval = sbuf_printf(sb, "\t<backend_type>%s" 3022 "</backend_type>\n", 3023 (lun->backend == NULL) ? "none" : 3024 lun->backend->name); 3025 3026 if (retval != 0) 3027 break; 3028 3029 retval = sbuf_printf(sb, "\t<lun_type>%d</lun_type>\n", 3030 lun->be_lun->lun_type); 3031 3032 if (retval != 0) 3033 break; 3034 3035 if (lun->backend == NULL) { 3036 retval = sbuf_printf(sb, "</lun>\n"); 3037 if (retval != 0) 3038 break; 3039 continue; 3040 } 3041 3042 retval = sbuf_printf(sb, "\t<size>%ju</size>\n", 3043 (lun->be_lun->maxlba > 0) ? 3044 lun->be_lun->maxlba + 1 : 0); 3045 3046 if (retval != 0) 3047 break; 3048 3049 retval = sbuf_printf(sb, "\t<blocksize>%u</blocksize>\n", 3050 lun->be_lun->blocksize); 3051 3052 if (retval != 0) 3053 break; 3054 3055 retval = sbuf_printf(sb, "\t<serial_number>"); 3056 3057 if (retval != 0) 3058 break; 3059 3060 retval = ctl_sbuf_printf_esc(sb, 3061 lun->be_lun->serial_num, 3062 sizeof(lun->be_lun->serial_num)); 3063 3064 if (retval != 0) 3065 break; 3066 3067 retval = sbuf_printf(sb, "</serial_number>\n"); 3068 3069 if (retval != 0) 3070 break; 3071 3072 retval = sbuf_printf(sb, "\t<device_id>"); 3073 3074 if (retval != 0) 3075 break; 3076 3077 retval = ctl_sbuf_printf_esc(sb, 3078 lun->be_lun->device_id, 3079 sizeof(lun->be_lun->device_id)); 3080 3081 if (retval != 0) 3082 break; 3083 3084 retval = sbuf_printf(sb, "</device_id>\n"); 3085 3086 if (retval != 0) 3087 break; 3088 3089 if (lun->backend->lun_info != NULL) { 3090 retval = lun->backend->lun_info(lun->be_lun, sb); 3091 if (retval != 0) 3092 break; 3093 } 3094 3095 cookie = NULL; 3096 while ((name = nvlist_next(lun->be_lun->options, &type, 3097 &cookie)) != NULL) { 3098 sbuf_printf(sb, "\t<%s>", name); 3099 3100 if (type == NV_TYPE_STRING) { 3101 value = dnvlist_get_string( 3102 lun->be_lun->options, name, NULL); 3103 if (value != NULL) 3104 sbuf_printf(sb, "%s", value); 3105 } 3106 3107 sbuf_printf(sb, "</%s>\n", name); 3108 } 3109 3110 retval = sbuf_printf(sb, "</lun>\n"); 3111 3112 if (retval != 0) 3113 break; 3114 mtx_unlock(&lun->lun_lock); 3115 } 3116 if (lun != NULL) 3117 mtx_unlock(&lun->lun_lock); 3118 mtx_unlock(&softc->ctl_lock); 3119 3120 if ((retval != 0) 3121 || ((retval = sbuf_printf(sb, "</ctllunlist>\n")) != 0)) { 3122 retval = 0; 3123 sbuf_delete(sb); 3124 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3125 snprintf(list->error_str, sizeof(list->error_str), 3126 "Out of space, %d bytes is too small", 3127 list->alloc_len); 3128 break; 3129 } 3130 3131 sbuf_finish(sb); 3132 3133 retval = copyout(sbuf_data(sb), list->lun_xml, 3134 sbuf_len(sb) + 1); 3135 3136 list->fill_len = sbuf_len(sb) + 1; 3137 list->status = CTL_LUN_LIST_OK; 3138 sbuf_delete(sb); 3139 break; 3140 } 3141 case CTL_ISCSI: { 3142 struct ctl_iscsi *ci; 3143 struct ctl_frontend *fe; 3144 3145 ci = (struct ctl_iscsi *)addr; 3146 3147 fe = ctl_frontend_find("iscsi"); 3148 if (fe == NULL) { 3149 ci->status = CTL_ISCSI_ERROR; 3150 snprintf(ci->error_str, sizeof(ci->error_str), 3151 "Frontend \"iscsi\" not found."); 3152 break; 3153 } 3154 3155 retval = fe->ioctl(dev, cmd, addr, flag, td); 3156 break; 3157 } 3158 case CTL_PORT_REQ: { 3159 struct ctl_req *req; 3160 struct ctl_frontend *fe; 3161 void *packed; 3162 nvlist_t *tmp_args_nvl; 3163 size_t packed_len; 3164 3165 req = (struct ctl_req *)addr; 3166 tmp_args_nvl = req->args_nvl; 3167 3168 fe = ctl_frontend_find(req->driver); 3169 if (fe == NULL) { 3170 req->status = CTL_LUN_ERROR; 3171 snprintf(req->error_str, sizeof(req->error_str), 3172 "Frontend \"%s\" not found.", req->driver); 3173 break; 3174 } 3175 3176 if (req->args != NULL) { 3177 packed = malloc(req->args_len, M_CTL, M_WAITOK); 3178 if (copyin(req->args, packed, req->args_len) != 0) { 3179 free(packed, M_CTL); 3180 req->status = CTL_LUN_ERROR; 3181 snprintf(req->error_str, sizeof(req->error_str), 3182 "Cannot copyin args."); 3183 break; 3184 } 3185 req->args_nvl = nvlist_unpack(packed, 3186 req->args_len, 0); 3187 free(packed, M_CTL); 3188 3189 if (req->args_nvl == NULL) { 3190 req->status = CTL_LUN_ERROR; 3191 snprintf(req->error_str, sizeof(req->error_str), 3192 "Cannot unpack args nvlist."); 3193 break; 3194 } 3195 } else 3196 req->args_nvl = nvlist_create(0); 3197 3198 if (fe->ioctl) 3199 retval = fe->ioctl(dev, cmd, addr, flag, td); 3200 else 3201 retval = ENODEV; 3202 3203 nvlist_destroy(req->args_nvl); 3204 req->args_nvl = tmp_args_nvl; 3205 3206 if (req->result_nvl != NULL) { 3207 if (req->result != NULL) { 3208 packed = nvlist_pack(req->result_nvl, 3209 &packed_len); 3210 if (packed == NULL) { 3211 req->status = CTL_LUN_ERROR; 3212 snprintf(req->error_str, 3213 sizeof(req->error_str), 3214 "Cannot pack result nvlist."); 3215 break; 3216 } 3217 3218 if (packed_len > req->result_len) { 3219 req->status = CTL_LUN_ERROR; 3220 snprintf(req->error_str, 3221 sizeof(req->error_str), 3222 "Result nvlist too large."); 3223 free(packed, M_NVLIST); 3224 break; 3225 } 3226 3227 if (copyout(packed, req->result, packed_len)) { 3228 req->status = CTL_LUN_ERROR; 3229 snprintf(req->error_str, 3230 sizeof(req->error_str), 3231 "Cannot copyout() the result."); 3232 free(packed, M_NVLIST); 3233 break; 3234 } 3235 3236 req->result_len = packed_len; 3237 free(packed, M_NVLIST); 3238 } 3239 3240 nvlist_destroy(req->result_nvl); 3241 } 3242 break; 3243 } 3244 case CTL_PORT_LIST: { 3245 struct sbuf *sb; 3246 struct ctl_port *port; 3247 struct ctl_lun_list *list; 3248 const char *name, *value; 3249 void *cookie; 3250 int j, type; 3251 uint32_t plun; 3252 3253 list = (struct ctl_lun_list *)addr; 3254 3255 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 3256 if (sb == NULL) { 3257 list->status = CTL_LUN_LIST_ERROR; 3258 snprintf(list->error_str, sizeof(list->error_str), 3259 "Unable to allocate %d bytes for LUN list", 3260 list->alloc_len); 3261 break; 3262 } 3263 3264 sbuf_printf(sb, "<ctlportlist>\n"); 3265 3266 mtx_lock(&softc->ctl_lock); 3267 STAILQ_FOREACH(port, &softc->port_list, links) { 3268 retval = sbuf_printf(sb, "<targ_port id=\"%ju\">\n", 3269 (uintmax_t)port->targ_port); 3270 3271 /* 3272 * Bail out as soon as we see that we've overfilled 3273 * the buffer. 3274 */ 3275 if (retval != 0) 3276 break; 3277 3278 retval = sbuf_printf(sb, "\t<frontend_type>%s" 3279 "</frontend_type>\n", port->frontend->name); 3280 if (retval != 0) 3281 break; 3282 3283 retval = sbuf_printf(sb, "\t<port_type>%d</port_type>\n", 3284 port->port_type); 3285 if (retval != 0) 3286 break; 3287 3288 retval = sbuf_printf(sb, "\t<online>%s</online>\n", 3289 (port->status & CTL_PORT_STATUS_ONLINE) ? "YES" : "NO"); 3290 if (retval != 0) 3291 break; 3292 3293 retval = sbuf_printf(sb, "\t<port_name>%s</port_name>\n", 3294 port->port_name); 3295 if (retval != 0) 3296 break; 3297 3298 retval = sbuf_printf(sb, "\t<physical_port>%d</physical_port>\n", 3299 port->physical_port); 3300 if (retval != 0) 3301 break; 3302 3303 retval = sbuf_printf(sb, "\t<virtual_port>%d</virtual_port>\n", 3304 port->virtual_port); 3305 if (retval != 0) 3306 break; 3307 3308 if (port->target_devid != NULL) { 3309 sbuf_printf(sb, "\t<target>"); 3310 ctl_id_sbuf(port->target_devid, sb); 3311 sbuf_printf(sb, "</target>\n"); 3312 } 3313 3314 if (port->port_devid != NULL) { 3315 sbuf_printf(sb, "\t<port>"); 3316 ctl_id_sbuf(port->port_devid, sb); 3317 sbuf_printf(sb, "</port>\n"); 3318 } 3319 3320 if (port->port_info != NULL) { 3321 retval = port->port_info(port->onoff_arg, sb); 3322 if (retval != 0) 3323 break; 3324 } 3325 3326 cookie = NULL; 3327 while ((name = nvlist_next(port->options, &type, 3328 &cookie)) != NULL) { 3329 sbuf_printf(sb, "\t<%s>", name); 3330 3331 if (type == NV_TYPE_STRING) { 3332 value = dnvlist_get_string(port->options, 3333 name, NULL); 3334 if (value != NULL) 3335 sbuf_printf(sb, "%s", value); 3336 } 3337 3338 sbuf_printf(sb, "</%s>\n", name); 3339 } 3340 3341 if (port->lun_map != NULL) { 3342 sbuf_printf(sb, "\t<lun_map>on</lun_map>\n"); 3343 for (j = 0; j < port->lun_map_size; j++) { 3344 plun = ctl_lun_map_from_port(port, j); 3345 if (plun == UINT32_MAX) 3346 continue; 3347 sbuf_printf(sb, 3348 "\t<lun id=\"%u\">%u</lun>\n", 3349 j, plun); 3350 } 3351 } 3352 3353 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 3354 if (port->wwpn_iid[j].in_use == 0 || 3355 (port->wwpn_iid[j].wwpn == 0 && 3356 port->wwpn_iid[j].name == NULL)) 3357 continue; 3358 3359 if (port->wwpn_iid[j].name != NULL) 3360 retval = sbuf_printf(sb, 3361 "\t<initiator id=\"%u\">%s</initiator>\n", 3362 j, port->wwpn_iid[j].name); 3363 else 3364 retval = sbuf_printf(sb, 3365 "\t<initiator id=\"%u\">naa.%08jx</initiator>\n", 3366 j, port->wwpn_iid[j].wwpn); 3367 if (retval != 0) 3368 break; 3369 } 3370 if (retval != 0) 3371 break; 3372 3373 retval = sbuf_printf(sb, "</targ_port>\n"); 3374 if (retval != 0) 3375 break; 3376 } 3377 mtx_unlock(&softc->ctl_lock); 3378 3379 if ((retval != 0) 3380 || ((retval = sbuf_printf(sb, "</ctlportlist>\n")) != 0)) { 3381 retval = 0; 3382 sbuf_delete(sb); 3383 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3384 snprintf(list->error_str, sizeof(list->error_str), 3385 "Out of space, %d bytes is too small", 3386 list->alloc_len); 3387 break; 3388 } 3389 3390 sbuf_finish(sb); 3391 3392 retval = copyout(sbuf_data(sb), list->lun_xml, 3393 sbuf_len(sb) + 1); 3394 3395 list->fill_len = sbuf_len(sb) + 1; 3396 list->status = CTL_LUN_LIST_OK; 3397 sbuf_delete(sb); 3398 break; 3399 } 3400 case CTL_LUN_MAP: { 3401 struct ctl_lun_map *lm = (struct ctl_lun_map *)addr; 3402 struct ctl_port *port; 3403 3404 mtx_lock(&softc->ctl_lock); 3405 if (lm->port < softc->port_min || 3406 lm->port >= softc->port_max || 3407 (port = softc->ctl_ports[lm->port]) == NULL) { 3408 mtx_unlock(&softc->ctl_lock); 3409 return (ENXIO); 3410 } 3411 if (port->status & CTL_PORT_STATUS_ONLINE) { 3412 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3413 if (ctl_lun_map_to_port(port, lun->lun) == 3414 UINT32_MAX) 3415 continue; 3416 mtx_lock(&lun->lun_lock); 3417 ctl_est_ua_port(lun, lm->port, -1, 3418 CTL_UA_LUN_CHANGE); 3419 mtx_unlock(&lun->lun_lock); 3420 } 3421 } 3422 mtx_unlock(&softc->ctl_lock); // XXX: port_enable sleeps 3423 if (lm->plun != UINT32_MAX) { 3424 if (lm->lun == UINT32_MAX) 3425 retval = ctl_lun_map_unset(port, lm->plun); 3426 else if (lm->lun < ctl_max_luns && 3427 softc->ctl_luns[lm->lun] != NULL) 3428 retval = ctl_lun_map_set(port, lm->plun, lm->lun); 3429 else 3430 return (ENXIO); 3431 } else { 3432 if (lm->lun == UINT32_MAX) 3433 retval = ctl_lun_map_deinit(port); 3434 else 3435 retval = ctl_lun_map_init(port); 3436 } 3437 if (port->status & CTL_PORT_STATUS_ONLINE) 3438 ctl_isc_announce_port(port); 3439 break; 3440 } 3441 case CTL_GET_LUN_STATS: { 3442 struct ctl_get_io_stats *stats = (struct ctl_get_io_stats *)addr; 3443 int i; 3444 3445 /* 3446 * XXX KDM no locking here. If the LUN list changes, 3447 * things can blow up. 3448 */ 3449 i = 0; 3450 stats->status = CTL_SS_OK; 3451 stats->fill_len = 0; 3452 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3453 if (lun->lun < stats->first_item) 3454 continue; 3455 if (stats->fill_len + sizeof(lun->stats) > 3456 stats->alloc_len) { 3457 stats->status = CTL_SS_NEED_MORE_SPACE; 3458 break; 3459 } 3460 retval = copyout(&lun->stats, &stats->stats[i++], 3461 sizeof(lun->stats)); 3462 if (retval != 0) 3463 break; 3464 stats->fill_len += sizeof(lun->stats); 3465 } 3466 stats->num_items = softc->num_luns; 3467 stats->flags = CTL_STATS_FLAG_NONE; 3468 #ifdef CTL_TIME_IO 3469 stats->flags |= CTL_STATS_FLAG_TIME_VALID; 3470 #endif 3471 getnanouptime(&stats->timestamp); 3472 break; 3473 } 3474 case CTL_GET_PORT_STATS: { 3475 struct ctl_get_io_stats *stats = (struct ctl_get_io_stats *)addr; 3476 int i; 3477 3478 /* 3479 * XXX KDM no locking here. If the LUN list changes, 3480 * things can blow up. 3481 */ 3482 i = 0; 3483 stats->status = CTL_SS_OK; 3484 stats->fill_len = 0; 3485 STAILQ_FOREACH(port, &softc->port_list, links) { 3486 if (port->targ_port < stats->first_item) 3487 continue; 3488 if (stats->fill_len + sizeof(port->stats) > 3489 stats->alloc_len) { 3490 stats->status = CTL_SS_NEED_MORE_SPACE; 3491 break; 3492 } 3493 retval = copyout(&port->stats, &stats->stats[i++], 3494 sizeof(port->stats)); 3495 if (retval != 0) 3496 break; 3497 stats->fill_len += sizeof(port->stats); 3498 } 3499 stats->num_items = softc->num_ports; 3500 stats->flags = CTL_STATS_FLAG_NONE; 3501 #ifdef CTL_TIME_IO 3502 stats->flags |= CTL_STATS_FLAG_TIME_VALID; 3503 #endif 3504 getnanouptime(&stats->timestamp); 3505 break; 3506 } 3507 default: { 3508 /* XXX KDM should we fix this? */ 3509 #if 0 3510 struct ctl_backend_driver *backend; 3511 unsigned int type; 3512 int found; 3513 3514 found = 0; 3515 3516 /* 3517 * We encode the backend type as the ioctl type for backend 3518 * ioctls. So parse it out here, and then search for a 3519 * backend of this type. 3520 */ 3521 type = _IOC_TYPE(cmd); 3522 3523 STAILQ_FOREACH(backend, &softc->be_list, links) { 3524 if (backend->type == type) { 3525 found = 1; 3526 break; 3527 } 3528 } 3529 if (found == 0) { 3530 printf("ctl: unknown ioctl command %#lx or backend " 3531 "%d\n", cmd, type); 3532 retval = EINVAL; 3533 break; 3534 } 3535 retval = backend->ioctl(dev, cmd, addr, flag, td); 3536 #endif 3537 retval = ENOTTY; 3538 break; 3539 } 3540 } 3541 return (retval); 3542 } 3543 3544 uint32_t 3545 ctl_get_initindex(struct ctl_nexus *nexus) 3546 { 3547 return (nexus->initid + (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); 3548 } 3549 3550 int 3551 ctl_lun_map_init(struct ctl_port *port) 3552 { 3553 struct ctl_softc *softc = port->ctl_softc; 3554 struct ctl_lun *lun; 3555 int size = ctl_lun_map_size; 3556 uint32_t i; 3557 3558 if (port->lun_map == NULL || port->lun_map_size < size) { 3559 port->lun_map_size = 0; 3560 free(port->lun_map, M_CTL); 3561 port->lun_map = malloc(size * sizeof(uint32_t), 3562 M_CTL, M_NOWAIT); 3563 } 3564 if (port->lun_map == NULL) 3565 return (ENOMEM); 3566 for (i = 0; i < size; i++) 3567 port->lun_map[i] = UINT32_MAX; 3568 port->lun_map_size = size; 3569 if (port->status & CTL_PORT_STATUS_ONLINE) { 3570 if (port->lun_disable != NULL) { 3571 STAILQ_FOREACH(lun, &softc->lun_list, links) 3572 port->lun_disable(port->targ_lun_arg, lun->lun); 3573 } 3574 ctl_isc_announce_port(port); 3575 } 3576 return (0); 3577 } 3578 3579 int 3580 ctl_lun_map_deinit(struct ctl_port *port) 3581 { 3582 struct ctl_softc *softc = port->ctl_softc; 3583 struct ctl_lun *lun; 3584 3585 if (port->lun_map == NULL) 3586 return (0); 3587 port->lun_map_size = 0; 3588 free(port->lun_map, M_CTL); 3589 port->lun_map = NULL; 3590 if (port->status & CTL_PORT_STATUS_ONLINE) { 3591 if (port->lun_enable != NULL) { 3592 STAILQ_FOREACH(lun, &softc->lun_list, links) 3593 port->lun_enable(port->targ_lun_arg, lun->lun); 3594 } 3595 ctl_isc_announce_port(port); 3596 } 3597 return (0); 3598 } 3599 3600 int 3601 ctl_lun_map_set(struct ctl_port *port, uint32_t plun, uint32_t glun) 3602 { 3603 int status; 3604 uint32_t old; 3605 3606 if (port->lun_map == NULL) { 3607 status = ctl_lun_map_init(port); 3608 if (status != 0) 3609 return (status); 3610 } 3611 if (plun >= port->lun_map_size) 3612 return (EINVAL); 3613 old = port->lun_map[plun]; 3614 port->lun_map[plun] = glun; 3615 if ((port->status & CTL_PORT_STATUS_ONLINE) && old == UINT32_MAX) { 3616 if (port->lun_enable != NULL) 3617 port->lun_enable(port->targ_lun_arg, plun); 3618 ctl_isc_announce_port(port); 3619 } 3620 return (0); 3621 } 3622 3623 int 3624 ctl_lun_map_unset(struct ctl_port *port, uint32_t plun) 3625 { 3626 uint32_t old; 3627 3628 if (port->lun_map == NULL || plun >= port->lun_map_size) 3629 return (0); 3630 old = port->lun_map[plun]; 3631 port->lun_map[plun] = UINT32_MAX; 3632 if ((port->status & CTL_PORT_STATUS_ONLINE) && old != UINT32_MAX) { 3633 if (port->lun_disable != NULL) 3634 port->lun_disable(port->targ_lun_arg, plun); 3635 ctl_isc_announce_port(port); 3636 } 3637 return (0); 3638 } 3639 3640 uint32_t 3641 ctl_lun_map_from_port(struct ctl_port *port, uint32_t lun_id) 3642 { 3643 3644 if (port == NULL) 3645 return (UINT32_MAX); 3646 if (port->lun_map == NULL) 3647 return (lun_id); 3648 if (lun_id > port->lun_map_size) 3649 return (UINT32_MAX); 3650 return (port->lun_map[lun_id]); 3651 } 3652 3653 uint32_t 3654 ctl_lun_map_to_port(struct ctl_port *port, uint32_t lun_id) 3655 { 3656 uint32_t i; 3657 3658 if (port == NULL) 3659 return (UINT32_MAX); 3660 if (port->lun_map == NULL) 3661 return (lun_id); 3662 for (i = 0; i < port->lun_map_size; i++) { 3663 if (port->lun_map[i] == lun_id) 3664 return (i); 3665 } 3666 return (UINT32_MAX); 3667 } 3668 3669 uint32_t 3670 ctl_decode_lun(uint64_t encoded) 3671 { 3672 uint8_t lun[8]; 3673 uint32_t result = 0xffffffff; 3674 3675 be64enc(lun, encoded); 3676 switch (lun[0] & RPL_LUNDATA_ATYP_MASK) { 3677 case RPL_LUNDATA_ATYP_PERIPH: 3678 if ((lun[0] & 0x3f) == 0 && lun[2] == 0 && lun[3] == 0 && 3679 lun[4] == 0 && lun[5] == 0 && lun[6] == 0 && lun[7] == 0) 3680 result = lun[1]; 3681 break; 3682 case RPL_LUNDATA_ATYP_FLAT: 3683 if (lun[2] == 0 && lun[3] == 0 && lun[4] == 0 && lun[5] == 0 && 3684 lun[6] == 0 && lun[7] == 0) 3685 result = ((lun[0] & 0x3f) << 8) + lun[1]; 3686 break; 3687 case RPL_LUNDATA_ATYP_EXTLUN: 3688 switch (lun[0] & RPL_LUNDATA_EXT_EAM_MASK) { 3689 case 0x02: 3690 switch (lun[0] & RPL_LUNDATA_EXT_LEN_MASK) { 3691 case 0x00: 3692 result = lun[1]; 3693 break; 3694 case 0x10: 3695 result = (lun[1] << 16) + (lun[2] << 8) + 3696 lun[3]; 3697 break; 3698 case 0x20: 3699 if (lun[1] == 0 && lun[6] == 0 && lun[7] == 0) 3700 result = (lun[2] << 24) + 3701 (lun[3] << 16) + (lun[4] << 8) + 3702 lun[5]; 3703 break; 3704 } 3705 break; 3706 case RPL_LUNDATA_EXT_EAM_NOT_SPEC: 3707 result = 0xffffffff; 3708 break; 3709 } 3710 break; 3711 } 3712 return (result); 3713 } 3714 3715 uint64_t 3716 ctl_encode_lun(uint32_t decoded) 3717 { 3718 uint64_t l = decoded; 3719 3720 if (l <= 0xff) 3721 return (((uint64_t)RPL_LUNDATA_ATYP_PERIPH << 56) | (l << 48)); 3722 if (l <= 0x3fff) 3723 return (((uint64_t)RPL_LUNDATA_ATYP_FLAT << 56) | (l << 48)); 3724 if (l <= 0xffffff) 3725 return (((uint64_t)(RPL_LUNDATA_ATYP_EXTLUN | 0x12) << 56) | 3726 (l << 32)); 3727 return ((((uint64_t)RPL_LUNDATA_ATYP_EXTLUN | 0x22) << 56) | (l << 16)); 3728 } 3729 3730 int 3731 ctl_ffz(uint32_t *mask, uint32_t first, uint32_t last) 3732 { 3733 int i; 3734 3735 for (i = first; i < last; i++) { 3736 if ((mask[i / 32] & (1 << (i % 32))) == 0) 3737 return (i); 3738 } 3739 return (-1); 3740 } 3741 3742 int 3743 ctl_set_mask(uint32_t *mask, uint32_t bit) 3744 { 3745 uint32_t chunk, piece; 3746 3747 chunk = bit >> 5; 3748 piece = bit % (sizeof(uint32_t) * 8); 3749 3750 if ((mask[chunk] & (1 << piece)) != 0) 3751 return (-1); 3752 else 3753 mask[chunk] |= (1 << piece); 3754 3755 return (0); 3756 } 3757 3758 int 3759 ctl_clear_mask(uint32_t *mask, uint32_t bit) 3760 { 3761 uint32_t chunk, piece; 3762 3763 chunk = bit >> 5; 3764 piece = bit % (sizeof(uint32_t) * 8); 3765 3766 if ((mask[chunk] & (1 << piece)) == 0) 3767 return (-1); 3768 else 3769 mask[chunk] &= ~(1 << piece); 3770 3771 return (0); 3772 } 3773 3774 int 3775 ctl_is_set(uint32_t *mask, uint32_t bit) 3776 { 3777 uint32_t chunk, piece; 3778 3779 chunk = bit >> 5; 3780 piece = bit % (sizeof(uint32_t) * 8); 3781 3782 if ((mask[chunk] & (1 << piece)) == 0) 3783 return (0); 3784 else 3785 return (1); 3786 } 3787 3788 static uint64_t 3789 ctl_get_prkey(struct ctl_lun *lun, uint32_t residx) 3790 { 3791 uint64_t *t; 3792 3793 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3794 if (t == NULL) 3795 return (0); 3796 return (t[residx % CTL_MAX_INIT_PER_PORT]); 3797 } 3798 3799 static void 3800 ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx) 3801 { 3802 uint64_t *t; 3803 3804 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3805 if (t == NULL) 3806 return; 3807 t[residx % CTL_MAX_INIT_PER_PORT] = 0; 3808 } 3809 3810 static void 3811 ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx) 3812 { 3813 uint64_t *p; 3814 u_int i; 3815 3816 i = residx/CTL_MAX_INIT_PER_PORT; 3817 if (lun->pr_keys[i] != NULL) 3818 return; 3819 mtx_unlock(&lun->lun_lock); 3820 p = malloc(sizeof(uint64_t) * CTL_MAX_INIT_PER_PORT, M_CTL, 3821 M_WAITOK | M_ZERO); 3822 mtx_lock(&lun->lun_lock); 3823 if (lun->pr_keys[i] == NULL) 3824 lun->pr_keys[i] = p; 3825 else 3826 free(p, M_CTL); 3827 } 3828 3829 static void 3830 ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key) 3831 { 3832 uint64_t *t; 3833 3834 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3835 KASSERT(t != NULL, ("prkey %d is not allocated", residx)); 3836 t[residx % CTL_MAX_INIT_PER_PORT] = key; 3837 } 3838 3839 /* 3840 * ctl_softc, pool_name, total_ctl_io are passed in. 3841 * npool is passed out. 3842 */ 3843 int 3844 ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name, 3845 uint32_t total_ctl_io, void **npool) 3846 { 3847 struct ctl_io_pool *pool; 3848 3849 pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL, 3850 M_NOWAIT | M_ZERO); 3851 if (pool == NULL) 3852 return (ENOMEM); 3853 3854 snprintf(pool->name, sizeof(pool->name), "CTL IO %s", pool_name); 3855 pool->ctl_softc = ctl_softc; 3856 #ifdef IO_POOLS 3857 pool->zone = uma_zsecond_create(pool->name, NULL, 3858 NULL, NULL, NULL, ctl_softc->io_zone); 3859 /* uma_prealloc(pool->zone, total_ctl_io); */ 3860 #else 3861 pool->zone = ctl_softc->io_zone; 3862 #endif 3863 3864 *npool = pool; 3865 return (0); 3866 } 3867 3868 void 3869 ctl_pool_free(struct ctl_io_pool *pool) 3870 { 3871 3872 if (pool == NULL) 3873 return; 3874 3875 #ifdef IO_POOLS 3876 uma_zdestroy(pool->zone); 3877 #endif 3878 free(pool, M_CTL); 3879 } 3880 3881 union ctl_io * 3882 ctl_alloc_io(void *pool_ref) 3883 { 3884 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3885 union ctl_io *io; 3886 3887 io = uma_zalloc(pool->zone, M_WAITOK); 3888 if (io != NULL) { 3889 io->io_hdr.pool = pool_ref; 3890 CTL_SOFTC(io) = pool->ctl_softc; 3891 TAILQ_INIT(&io->io_hdr.blocked_queue); 3892 } 3893 return (io); 3894 } 3895 3896 union ctl_io * 3897 ctl_alloc_io_nowait(void *pool_ref) 3898 { 3899 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3900 union ctl_io *io; 3901 3902 io = uma_zalloc(pool->zone, M_NOWAIT); 3903 if (io != NULL) { 3904 io->io_hdr.pool = pool_ref; 3905 CTL_SOFTC(io) = pool->ctl_softc; 3906 TAILQ_INIT(&io->io_hdr.blocked_queue); 3907 } 3908 return (io); 3909 } 3910 3911 void 3912 ctl_free_io(union ctl_io *io) 3913 { 3914 struct ctl_io_pool *pool; 3915 3916 if (io == NULL) 3917 return; 3918 3919 pool = (struct ctl_io_pool *)io->io_hdr.pool; 3920 uma_zfree(pool->zone, io); 3921 } 3922 3923 void 3924 ctl_zero_io(union ctl_io *io) 3925 { 3926 struct ctl_io_pool *pool; 3927 3928 if (io == NULL) 3929 return; 3930 3931 /* 3932 * May need to preserve linked list pointers at some point too. 3933 */ 3934 pool = io->io_hdr.pool; 3935 memset(io, 0, sizeof(*io)); 3936 io->io_hdr.pool = pool; 3937 CTL_SOFTC(io) = pool->ctl_softc; 3938 TAILQ_INIT(&io->io_hdr.blocked_queue); 3939 } 3940 3941 int 3942 ctl_expand_number(const char *buf, uint64_t *num) 3943 { 3944 char *endptr; 3945 uint64_t number; 3946 unsigned shift; 3947 3948 number = strtoq(buf, &endptr, 0); 3949 3950 switch (tolower((unsigned char)*endptr)) { 3951 case 'e': 3952 shift = 60; 3953 break; 3954 case 'p': 3955 shift = 50; 3956 break; 3957 case 't': 3958 shift = 40; 3959 break; 3960 case 'g': 3961 shift = 30; 3962 break; 3963 case 'm': 3964 shift = 20; 3965 break; 3966 case 'k': 3967 shift = 10; 3968 break; 3969 case 'b': 3970 case '\0': /* No unit. */ 3971 *num = number; 3972 return (0); 3973 default: 3974 /* Unrecognized unit. */ 3975 return (-1); 3976 } 3977 3978 if ((number << shift) >> shift != number) { 3979 /* Overflow */ 3980 return (-1); 3981 } 3982 *num = number << shift; 3983 return (0); 3984 } 3985 3986 /* 3987 * This routine could be used in the future to load default and/or saved 3988 * mode page parameters for a particuar lun. 3989 */ 3990 static int 3991 ctl_init_page_index(struct ctl_lun *lun) 3992 { 3993 int i, page_code; 3994 struct ctl_page_index *page_index; 3995 const char *value; 3996 uint64_t ival; 3997 3998 memcpy(&lun->mode_pages.index, page_index_template, 3999 sizeof(page_index_template)); 4000 4001 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 4002 page_index = &lun->mode_pages.index[i]; 4003 if (lun->be_lun->lun_type == T_DIRECT && 4004 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 4005 continue; 4006 if (lun->be_lun->lun_type == T_PROCESSOR && 4007 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 4008 continue; 4009 if (lun->be_lun->lun_type == T_CDROM && 4010 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 4011 continue; 4012 4013 page_code = page_index->page_code & SMPH_PC_MASK; 4014 switch (page_code) { 4015 case SMS_RW_ERROR_RECOVERY_PAGE: { 4016 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4017 ("subpage %#x for page %#x is incorrect!", 4018 page_index->subpage, page_code)); 4019 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT], 4020 &rw_er_page_default, 4021 sizeof(rw_er_page_default)); 4022 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CHANGEABLE], 4023 &rw_er_page_changeable, 4024 sizeof(rw_er_page_changeable)); 4025 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_DEFAULT], 4026 &rw_er_page_default, 4027 sizeof(rw_er_page_default)); 4028 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_SAVED], 4029 &rw_er_page_default, 4030 sizeof(rw_er_page_default)); 4031 page_index->page_data = 4032 (uint8_t *)lun->mode_pages.rw_er_page; 4033 break; 4034 } 4035 case SMS_FORMAT_DEVICE_PAGE: { 4036 struct scsi_format_page *format_page; 4037 4038 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4039 ("subpage %#x for page %#x is incorrect!", 4040 page_index->subpage, page_code)); 4041 4042 /* 4043 * Sectors per track are set above. Bytes per 4044 * sector need to be set here on a per-LUN basis. 4045 */ 4046 memcpy(&lun->mode_pages.format_page[CTL_PAGE_CURRENT], 4047 &format_page_default, 4048 sizeof(format_page_default)); 4049 memcpy(&lun->mode_pages.format_page[ 4050 CTL_PAGE_CHANGEABLE], &format_page_changeable, 4051 sizeof(format_page_changeable)); 4052 memcpy(&lun->mode_pages.format_page[CTL_PAGE_DEFAULT], 4053 &format_page_default, 4054 sizeof(format_page_default)); 4055 memcpy(&lun->mode_pages.format_page[CTL_PAGE_SAVED], 4056 &format_page_default, 4057 sizeof(format_page_default)); 4058 4059 format_page = &lun->mode_pages.format_page[ 4060 CTL_PAGE_CURRENT]; 4061 scsi_ulto2b(lun->be_lun->blocksize, 4062 format_page->bytes_per_sector); 4063 4064 format_page = &lun->mode_pages.format_page[ 4065 CTL_PAGE_DEFAULT]; 4066 scsi_ulto2b(lun->be_lun->blocksize, 4067 format_page->bytes_per_sector); 4068 4069 format_page = &lun->mode_pages.format_page[ 4070 CTL_PAGE_SAVED]; 4071 scsi_ulto2b(lun->be_lun->blocksize, 4072 format_page->bytes_per_sector); 4073 4074 page_index->page_data = 4075 (uint8_t *)lun->mode_pages.format_page; 4076 break; 4077 } 4078 case SMS_RIGID_DISK_PAGE: { 4079 struct scsi_rigid_disk_page *rigid_disk_page; 4080 uint32_t sectors_per_cylinder; 4081 uint64_t cylinders; 4082 #ifndef __XSCALE__ 4083 int shift; 4084 #endif /* !__XSCALE__ */ 4085 4086 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4087 ("subpage %#x for page %#x is incorrect!", 4088 page_index->subpage, page_code)); 4089 4090 /* 4091 * Rotation rate and sectors per track are set 4092 * above. We calculate the cylinders here based on 4093 * capacity. Due to the number of heads and 4094 * sectors per track we're using, smaller arrays 4095 * may turn out to have 0 cylinders. Linux and 4096 * FreeBSD don't pay attention to these mode pages 4097 * to figure out capacity, but Solaris does. It 4098 * seems to deal with 0 cylinders just fine, and 4099 * works out a fake geometry based on the capacity. 4100 */ 4101 memcpy(&lun->mode_pages.rigid_disk_page[ 4102 CTL_PAGE_DEFAULT], &rigid_disk_page_default, 4103 sizeof(rigid_disk_page_default)); 4104 memcpy(&lun->mode_pages.rigid_disk_page[ 4105 CTL_PAGE_CHANGEABLE],&rigid_disk_page_changeable, 4106 sizeof(rigid_disk_page_changeable)); 4107 4108 sectors_per_cylinder = CTL_DEFAULT_SECTORS_PER_TRACK * 4109 CTL_DEFAULT_HEADS; 4110 4111 /* 4112 * The divide method here will be more accurate, 4113 * probably, but results in floating point being 4114 * used in the kernel on i386 (__udivdi3()). On the 4115 * XScale, though, __udivdi3() is implemented in 4116 * software. 4117 * 4118 * The shift method for cylinder calculation is 4119 * accurate if sectors_per_cylinder is a power of 4120 * 2. Otherwise it might be slightly off -- you 4121 * might have a bit of a truncation problem. 4122 */ 4123 #ifdef __XSCALE__ 4124 cylinders = (lun->be_lun->maxlba + 1) / 4125 sectors_per_cylinder; 4126 #else 4127 for (shift = 31; shift > 0; shift--) { 4128 if (sectors_per_cylinder & (1 << shift)) 4129 break; 4130 } 4131 cylinders = (lun->be_lun->maxlba + 1) >> shift; 4132 #endif 4133 4134 /* 4135 * We've basically got 3 bytes, or 24 bits for the 4136 * cylinder size in the mode page. If we're over, 4137 * just round down to 2^24. 4138 */ 4139 if (cylinders > 0xffffff) 4140 cylinders = 0xffffff; 4141 4142 rigid_disk_page = &lun->mode_pages.rigid_disk_page[ 4143 CTL_PAGE_DEFAULT]; 4144 scsi_ulto3b(cylinders, rigid_disk_page->cylinders); 4145 4146 if ((value = dnvlist_get_string(lun->be_lun->options, 4147 "rpm", NULL)) != NULL) { 4148 scsi_ulto2b(strtol(value, NULL, 0), 4149 rigid_disk_page->rotation_rate); 4150 } 4151 4152 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_CURRENT], 4153 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 4154 sizeof(rigid_disk_page_default)); 4155 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_SAVED], 4156 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 4157 sizeof(rigid_disk_page_default)); 4158 4159 page_index->page_data = 4160 (uint8_t *)lun->mode_pages.rigid_disk_page; 4161 break; 4162 } 4163 case SMS_VERIFY_ERROR_RECOVERY_PAGE: { 4164 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4165 ("subpage %#x for page %#x is incorrect!", 4166 page_index->subpage, page_code)); 4167 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_CURRENT], 4168 &verify_er_page_default, 4169 sizeof(verify_er_page_default)); 4170 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_CHANGEABLE], 4171 &verify_er_page_changeable, 4172 sizeof(verify_er_page_changeable)); 4173 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_DEFAULT], 4174 &verify_er_page_default, 4175 sizeof(verify_er_page_default)); 4176 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_SAVED], 4177 &verify_er_page_default, 4178 sizeof(verify_er_page_default)); 4179 page_index->page_data = 4180 (uint8_t *)lun->mode_pages.verify_er_page; 4181 break; 4182 } 4183 case SMS_CACHING_PAGE: { 4184 struct scsi_caching_page *caching_page; 4185 4186 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4187 ("subpage %#x for page %#x is incorrect!", 4188 page_index->subpage, page_code)); 4189 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT], 4190 &caching_page_default, 4191 sizeof(caching_page_default)); 4192 memcpy(&lun->mode_pages.caching_page[ 4193 CTL_PAGE_CHANGEABLE], &caching_page_changeable, 4194 sizeof(caching_page_changeable)); 4195 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_SAVED], 4196 &caching_page_default, 4197 sizeof(caching_page_default)); 4198 caching_page = &lun->mode_pages.caching_page[ 4199 CTL_PAGE_SAVED]; 4200 value = dnvlist_get_string(lun->be_lun->options, 4201 "writecache", NULL); 4202 if (value != NULL && strcmp(value, "off") == 0) 4203 caching_page->flags1 &= ~SCP_WCE; 4204 value = dnvlist_get_string(lun->be_lun->options, 4205 "readcache", NULL); 4206 if (value != NULL && strcmp(value, "off") == 0) 4207 caching_page->flags1 |= SCP_RCD; 4208 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT], 4209 &lun->mode_pages.caching_page[CTL_PAGE_SAVED], 4210 sizeof(caching_page_default)); 4211 page_index->page_data = 4212 (uint8_t *)lun->mode_pages.caching_page; 4213 break; 4214 } 4215 case SMS_CONTROL_MODE_PAGE: { 4216 switch (page_index->subpage) { 4217 case SMS_SUBPAGE_PAGE_0: { 4218 struct scsi_control_page *control_page; 4219 4220 memcpy(&lun->mode_pages.control_page[ 4221 CTL_PAGE_DEFAULT], 4222 &control_page_default, 4223 sizeof(control_page_default)); 4224 memcpy(&lun->mode_pages.control_page[ 4225 CTL_PAGE_CHANGEABLE], 4226 &control_page_changeable, 4227 sizeof(control_page_changeable)); 4228 memcpy(&lun->mode_pages.control_page[ 4229 CTL_PAGE_SAVED], 4230 &control_page_default, 4231 sizeof(control_page_default)); 4232 control_page = &lun->mode_pages.control_page[ 4233 CTL_PAGE_SAVED]; 4234 value = dnvlist_get_string(lun->be_lun->options, 4235 "reordering", NULL); 4236 if (value != NULL && 4237 strcmp(value, "unrestricted") == 0) { 4238 control_page->queue_flags &= 4239 ~SCP_QUEUE_ALG_MASK; 4240 control_page->queue_flags |= 4241 SCP_QUEUE_ALG_UNRESTRICTED; 4242 } 4243 memcpy(&lun->mode_pages.control_page[ 4244 CTL_PAGE_CURRENT], 4245 &lun->mode_pages.control_page[ 4246 CTL_PAGE_SAVED], 4247 sizeof(control_page_default)); 4248 page_index->page_data = 4249 (uint8_t *)lun->mode_pages.control_page; 4250 break; 4251 } 4252 case 0x01: 4253 memcpy(&lun->mode_pages.control_ext_page[ 4254 CTL_PAGE_DEFAULT], 4255 &control_ext_page_default, 4256 sizeof(control_ext_page_default)); 4257 memcpy(&lun->mode_pages.control_ext_page[ 4258 CTL_PAGE_CHANGEABLE], 4259 &control_ext_page_changeable, 4260 sizeof(control_ext_page_changeable)); 4261 memcpy(&lun->mode_pages.control_ext_page[ 4262 CTL_PAGE_SAVED], 4263 &control_ext_page_default, 4264 sizeof(control_ext_page_default)); 4265 memcpy(&lun->mode_pages.control_ext_page[ 4266 CTL_PAGE_CURRENT], 4267 &lun->mode_pages.control_ext_page[ 4268 CTL_PAGE_SAVED], 4269 sizeof(control_ext_page_default)); 4270 page_index->page_data = 4271 (uint8_t *)lun->mode_pages.control_ext_page; 4272 break; 4273 default: 4274 panic("subpage %#x for page %#x is incorrect!", 4275 page_index->subpage, page_code); 4276 } 4277 break; 4278 } 4279 case SMS_INFO_EXCEPTIONS_PAGE: { 4280 switch (page_index->subpage) { 4281 case SMS_SUBPAGE_PAGE_0: 4282 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_CURRENT], 4283 &ie_page_default, 4284 sizeof(ie_page_default)); 4285 memcpy(&lun->mode_pages.ie_page[ 4286 CTL_PAGE_CHANGEABLE], &ie_page_changeable, 4287 sizeof(ie_page_changeable)); 4288 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_DEFAULT], 4289 &ie_page_default, 4290 sizeof(ie_page_default)); 4291 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_SAVED], 4292 &ie_page_default, 4293 sizeof(ie_page_default)); 4294 page_index->page_data = 4295 (uint8_t *)lun->mode_pages.ie_page; 4296 break; 4297 case 0x02: { 4298 struct ctl_logical_block_provisioning_page *page; 4299 4300 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_DEFAULT], 4301 &lbp_page_default, 4302 sizeof(lbp_page_default)); 4303 memcpy(&lun->mode_pages.lbp_page[ 4304 CTL_PAGE_CHANGEABLE], &lbp_page_changeable, 4305 sizeof(lbp_page_changeable)); 4306 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 4307 &lbp_page_default, 4308 sizeof(lbp_page_default)); 4309 page = &lun->mode_pages.lbp_page[CTL_PAGE_SAVED]; 4310 value = dnvlist_get_string(lun->be_lun->options, 4311 "avail-threshold", NULL); 4312 if (value != NULL && 4313 ctl_expand_number(value, &ival) == 0) { 4314 page->descr[0].flags |= SLBPPD_ENABLED | 4315 SLBPPD_ARMING_DEC; 4316 if (lun->be_lun->blocksize) 4317 ival /= lun->be_lun->blocksize; 4318 else 4319 ival /= 512; 4320 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4321 page->descr[0].count); 4322 } 4323 value = dnvlist_get_string(lun->be_lun->options, 4324 "used-threshold", NULL); 4325 if (value != NULL && 4326 ctl_expand_number(value, &ival) == 0) { 4327 page->descr[1].flags |= SLBPPD_ENABLED | 4328 SLBPPD_ARMING_INC; 4329 if (lun->be_lun->blocksize) 4330 ival /= lun->be_lun->blocksize; 4331 else 4332 ival /= 512; 4333 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4334 page->descr[1].count); 4335 } 4336 value = dnvlist_get_string(lun->be_lun->options, 4337 "pool-avail-threshold", NULL); 4338 if (value != NULL && 4339 ctl_expand_number(value, &ival) == 0) { 4340 page->descr[2].flags |= SLBPPD_ENABLED | 4341 SLBPPD_ARMING_DEC; 4342 if (lun->be_lun->blocksize) 4343 ival /= lun->be_lun->blocksize; 4344 else 4345 ival /= 512; 4346 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4347 page->descr[2].count); 4348 } 4349 value = dnvlist_get_string(lun->be_lun->options, 4350 "pool-used-threshold", NULL); 4351 if (value != NULL && 4352 ctl_expand_number(value, &ival) == 0) { 4353 page->descr[3].flags |= SLBPPD_ENABLED | 4354 SLBPPD_ARMING_INC; 4355 if (lun->be_lun->blocksize) 4356 ival /= lun->be_lun->blocksize; 4357 else 4358 ival /= 512; 4359 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4360 page->descr[3].count); 4361 } 4362 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_CURRENT], 4363 &lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 4364 sizeof(lbp_page_default)); 4365 page_index->page_data = 4366 (uint8_t *)lun->mode_pages.lbp_page; 4367 break; 4368 } 4369 default: 4370 panic("subpage %#x for page %#x is incorrect!", 4371 page_index->subpage, page_code); 4372 } 4373 break; 4374 } 4375 case SMS_CDDVD_CAPS_PAGE:{ 4376 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4377 ("subpage %#x for page %#x is incorrect!", 4378 page_index->subpage, page_code)); 4379 memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_DEFAULT], 4380 &cddvd_page_default, 4381 sizeof(cddvd_page_default)); 4382 memcpy(&lun->mode_pages.cddvd_page[ 4383 CTL_PAGE_CHANGEABLE], &cddvd_page_changeable, 4384 sizeof(cddvd_page_changeable)); 4385 memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_SAVED], 4386 &cddvd_page_default, 4387 sizeof(cddvd_page_default)); 4388 memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_CURRENT], 4389 &lun->mode_pages.cddvd_page[CTL_PAGE_SAVED], 4390 sizeof(cddvd_page_default)); 4391 page_index->page_data = 4392 (uint8_t *)lun->mode_pages.cddvd_page; 4393 break; 4394 } 4395 default: 4396 panic("invalid page code value %#x", page_code); 4397 } 4398 } 4399 4400 return (CTL_RETVAL_COMPLETE); 4401 } 4402 4403 static int 4404 ctl_init_log_page_index(struct ctl_lun *lun) 4405 { 4406 struct ctl_page_index *page_index; 4407 int i, j, k, prev; 4408 4409 memcpy(&lun->log_pages.index, log_page_index_template, 4410 sizeof(log_page_index_template)); 4411 4412 prev = -1; 4413 for (i = 0, j = 0, k = 0; i < CTL_NUM_LOG_PAGES; i++) { 4414 page_index = &lun->log_pages.index[i]; 4415 if (lun->be_lun->lun_type == T_DIRECT && 4416 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 4417 continue; 4418 if (lun->be_lun->lun_type == T_PROCESSOR && 4419 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 4420 continue; 4421 if (lun->be_lun->lun_type == T_CDROM && 4422 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 4423 continue; 4424 4425 if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING && 4426 lun->backend->lun_attr == NULL) 4427 continue; 4428 4429 if (page_index->page_code != prev) { 4430 lun->log_pages.pages_page[j] = page_index->page_code; 4431 prev = page_index->page_code; 4432 j++; 4433 } 4434 lun->log_pages.subpages_page[k*2] = page_index->page_code; 4435 lun->log_pages.subpages_page[k*2+1] = page_index->subpage; 4436 k++; 4437 } 4438 lun->log_pages.index[0].page_data = &lun->log_pages.pages_page[0]; 4439 lun->log_pages.index[0].page_len = j; 4440 lun->log_pages.index[1].page_data = &lun->log_pages.subpages_page[0]; 4441 lun->log_pages.index[1].page_len = k * 2; 4442 lun->log_pages.index[2].page_data = (uint8_t *)&lun->log_pages.temp_page; 4443 lun->log_pages.index[2].page_len = sizeof(lun->log_pages.temp_page); 4444 lun->log_pages.index[3].page_data = &lun->log_pages.lbp_page[0]; 4445 lun->log_pages.index[3].page_len = 12*CTL_NUM_LBP_PARAMS; 4446 lun->log_pages.index[4].page_data = (uint8_t *)&lun->log_pages.stat_page; 4447 lun->log_pages.index[4].page_len = sizeof(lun->log_pages.stat_page); 4448 lun->log_pages.index[5].page_data = (uint8_t *)&lun->log_pages.ie_page; 4449 lun->log_pages.index[5].page_len = sizeof(lun->log_pages.ie_page); 4450 4451 return (CTL_RETVAL_COMPLETE); 4452 } 4453 4454 static int 4455 hex2bin(const char *str, uint8_t *buf, int buf_size) 4456 { 4457 int i; 4458 u_char c; 4459 4460 memset(buf, 0, buf_size); 4461 while (isspace(str[0])) 4462 str++; 4463 if (str[0] == '0' && (str[1] == 'x' || str[1] == 'X')) 4464 str += 2; 4465 buf_size *= 2; 4466 for (i = 0; str[i] != 0 && i < buf_size; i++) { 4467 while (str[i] == '-') /* Skip dashes in UUIDs. */ 4468 str++; 4469 c = str[i]; 4470 if (isdigit(c)) 4471 c -= '0'; 4472 else if (isalpha(c)) 4473 c -= isupper(c) ? 'A' - 10 : 'a' - 10; 4474 else 4475 break; 4476 if (c >= 16) 4477 break; 4478 if ((i & 1) == 0) 4479 buf[i / 2] |= (c << 4); 4480 else 4481 buf[i / 2] |= c; 4482 } 4483 return ((i + 1) / 2); 4484 } 4485 4486 /* 4487 * Add LUN. 4488 * 4489 * Returns 0 for success, non-zero (errno) for failure. 4490 */ 4491 int 4492 ctl_add_lun(struct ctl_be_lun *be_lun) 4493 { 4494 struct ctl_softc *ctl_softc = control_softc; 4495 struct ctl_lun *nlun, *lun; 4496 struct scsi_vpd_id_descriptor *desc; 4497 struct scsi_vpd_id_t10 *t10id; 4498 const char *eui, *naa, *scsiname, *uuid, *vendor, *value; 4499 int lun_number; 4500 int devidlen, idlen1, idlen2 = 0, len; 4501 4502 /* 4503 * We support only Direct Access, CD-ROM or Processor LUN types. 4504 */ 4505 switch (be_lun->lun_type) { 4506 case T_DIRECT: 4507 case T_PROCESSOR: 4508 case T_CDROM: 4509 break; 4510 case T_SEQUENTIAL: 4511 case T_CHANGER: 4512 default: 4513 return (EINVAL); 4514 } 4515 lun = malloc(sizeof(*lun), M_CTL, M_WAITOK | M_ZERO); 4516 4517 lun->pending_sense = malloc(sizeof(struct scsi_sense_data *) * 4518 ctl_max_ports, M_DEVBUF, M_WAITOK | M_ZERO); 4519 lun->pending_ua = malloc(sizeof(ctl_ua_type *) * ctl_max_ports, 4520 M_DEVBUF, M_WAITOK | M_ZERO); 4521 lun->pr_keys = malloc(sizeof(uint64_t *) * ctl_max_ports, 4522 M_DEVBUF, M_WAITOK | M_ZERO); 4523 4524 /* Generate LUN ID. */ 4525 devidlen = max(CTL_DEVID_MIN_LEN, 4526 strnlen(be_lun->device_id, CTL_DEVID_LEN)); 4527 idlen1 = sizeof(*t10id) + devidlen; 4528 len = sizeof(struct scsi_vpd_id_descriptor) + idlen1; 4529 scsiname = dnvlist_get_string(be_lun->options, "scsiname", NULL); 4530 if (scsiname != NULL) { 4531 idlen2 = roundup2(strlen(scsiname) + 1, 4); 4532 len += sizeof(struct scsi_vpd_id_descriptor) + idlen2; 4533 } 4534 eui = dnvlist_get_string(be_lun->options, "eui", NULL); 4535 if (eui != NULL) { 4536 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4537 } 4538 naa = dnvlist_get_string(be_lun->options, "naa", NULL); 4539 if (naa != NULL) { 4540 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4541 } 4542 uuid = dnvlist_get_string(be_lun->options, "uuid", NULL); 4543 if (uuid != NULL) { 4544 len += sizeof(struct scsi_vpd_id_descriptor) + 18; 4545 } 4546 lun->lun_devid = malloc(sizeof(struct ctl_devid) + len, 4547 M_CTL, M_WAITOK | M_ZERO); 4548 desc = (struct scsi_vpd_id_descriptor *)lun->lun_devid->data; 4549 desc->proto_codeset = SVPD_ID_CODESET_ASCII; 4550 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10; 4551 desc->length = idlen1; 4552 t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0]; 4553 memset(t10id->vendor, ' ', sizeof(t10id->vendor)); 4554 if ((vendor = dnvlist_get_string(be_lun->options, "vendor", NULL)) == NULL) { 4555 strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor)); 4556 } else { 4557 strncpy(t10id->vendor, vendor, 4558 min(sizeof(t10id->vendor), strlen(vendor))); 4559 } 4560 strncpy((char *)t10id->vendor_spec_id, 4561 (char *)be_lun->device_id, devidlen); 4562 if (scsiname != NULL) { 4563 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4564 desc->length); 4565 desc->proto_codeset = SVPD_ID_CODESET_UTF8; 4566 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4567 SVPD_ID_TYPE_SCSI_NAME; 4568 desc->length = idlen2; 4569 strlcpy(desc->identifier, scsiname, idlen2); 4570 } 4571 if (eui != NULL) { 4572 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4573 desc->length); 4574 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4575 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4576 SVPD_ID_TYPE_EUI64; 4577 desc->length = hex2bin(eui, desc->identifier, 16); 4578 desc->length = desc->length > 12 ? 16 : 4579 (desc->length > 8 ? 12 : 8); 4580 len -= 16 - desc->length; 4581 } 4582 if (naa != NULL) { 4583 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4584 desc->length); 4585 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4586 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4587 SVPD_ID_TYPE_NAA; 4588 desc->length = hex2bin(naa, desc->identifier, 16); 4589 desc->length = desc->length > 8 ? 16 : 8; 4590 len -= 16 - desc->length; 4591 } 4592 if (uuid != NULL) { 4593 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4594 desc->length); 4595 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4596 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4597 SVPD_ID_TYPE_UUID; 4598 desc->identifier[0] = 0x10; 4599 hex2bin(uuid, &desc->identifier[2], 16); 4600 desc->length = 18; 4601 } 4602 lun->lun_devid->len = len; 4603 4604 mtx_lock(&ctl_softc->ctl_lock); 4605 /* 4606 * See if the caller requested a particular LUN number. If so, see 4607 * if it is available. Otherwise, allocate the first available LUN. 4608 */ 4609 if (be_lun->flags & CTL_LUN_FLAG_ID_REQ) { 4610 if ((be_lun->req_lun_id > (ctl_max_luns - 1)) 4611 || (ctl_is_set(ctl_softc->ctl_lun_mask, be_lun->req_lun_id))) { 4612 mtx_unlock(&ctl_softc->ctl_lock); 4613 if (be_lun->req_lun_id > (ctl_max_luns - 1)) { 4614 printf("ctl: requested LUN ID %d is higher " 4615 "than ctl_max_luns - 1 (%d)\n", 4616 be_lun->req_lun_id, ctl_max_luns - 1); 4617 } else { 4618 /* 4619 * XXX KDM return an error, or just assign 4620 * another LUN ID in this case?? 4621 */ 4622 printf("ctl: requested LUN ID %d is already " 4623 "in use\n", be_lun->req_lun_id); 4624 } 4625 fail: 4626 free(lun->lun_devid, M_CTL); 4627 free(lun, M_CTL); 4628 return (ENOSPC); 4629 } 4630 lun_number = be_lun->req_lun_id; 4631 } else { 4632 lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, 0, ctl_max_luns); 4633 if (lun_number == -1) { 4634 mtx_unlock(&ctl_softc->ctl_lock); 4635 printf("ctl: can't allocate LUN, out of LUNs\n"); 4636 goto fail; 4637 } 4638 } 4639 ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number); 4640 mtx_unlock(&ctl_softc->ctl_lock); 4641 4642 mtx_init(&lun->lun_lock, "CTL LUN", NULL, MTX_DEF); 4643 lun->lun = lun_number; 4644 lun->be_lun = be_lun; 4645 /* 4646 * The processor LUN is always enabled. Disk LUNs come on line 4647 * disabled, and must be enabled by the backend. 4648 */ 4649 lun->flags |= CTL_LUN_DISABLED; 4650 lun->backend = be_lun->be; 4651 be_lun->ctl_lun = lun; 4652 be_lun->lun_id = lun_number; 4653 if (be_lun->flags & CTL_LUN_FLAG_EJECTED) 4654 lun->flags |= CTL_LUN_EJECTED; 4655 if (be_lun->flags & CTL_LUN_FLAG_NO_MEDIA) 4656 lun->flags |= CTL_LUN_NO_MEDIA; 4657 if (be_lun->flags & CTL_LUN_FLAG_STOPPED) 4658 lun->flags |= CTL_LUN_STOPPED; 4659 4660 if (be_lun->flags & CTL_LUN_FLAG_PRIMARY) 4661 lun->flags |= CTL_LUN_PRIMARY_SC; 4662 4663 value = dnvlist_get_string(be_lun->options, "removable", NULL); 4664 if (value != NULL) { 4665 if (strcmp(value, "on") == 0) 4666 lun->flags |= CTL_LUN_REMOVABLE; 4667 } else if (be_lun->lun_type == T_CDROM) 4668 lun->flags |= CTL_LUN_REMOVABLE; 4669 4670 lun->ctl_softc = ctl_softc; 4671 #ifdef CTL_TIME_IO 4672 lun->last_busy = getsbinuptime(); 4673 #endif 4674 TAILQ_INIT(&lun->ooa_queue); 4675 STAILQ_INIT(&lun->error_list); 4676 lun->ie_reported = 1; 4677 callout_init_mtx(&lun->ie_callout, &lun->lun_lock, 0); 4678 ctl_tpc_lun_init(lun); 4679 if (lun->flags & CTL_LUN_REMOVABLE) { 4680 lun->prevent = malloc((CTL_MAX_INITIATORS + 31) / 32 * 4, 4681 M_CTL, M_WAITOK); 4682 } 4683 4684 /* 4685 * Initialize the mode and log page index. 4686 */ 4687 ctl_init_page_index(lun); 4688 ctl_init_log_page_index(lun); 4689 4690 /* Setup statistics gathering */ 4691 lun->stats.item = lun_number; 4692 4693 /* 4694 * Now, before we insert this lun on the lun list, set the lun 4695 * inventory changed UA for all other luns. 4696 */ 4697 mtx_lock(&ctl_softc->ctl_lock); 4698 STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) { 4699 mtx_lock(&nlun->lun_lock); 4700 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4701 mtx_unlock(&nlun->lun_lock); 4702 } 4703 STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links); 4704 ctl_softc->ctl_luns[lun_number] = lun; 4705 ctl_softc->num_luns++; 4706 mtx_unlock(&ctl_softc->ctl_lock); 4707 4708 /* 4709 * We successfully added the LUN, attempt to enable it. 4710 */ 4711 if (ctl_enable_lun(lun) != 0) { 4712 printf("%s: ctl_enable_lun() failed!\n", __func__); 4713 mtx_lock(&ctl_softc->ctl_lock); 4714 STAILQ_REMOVE(&ctl_softc->lun_list, lun, ctl_lun, links); 4715 ctl_clear_mask(ctl_softc->ctl_lun_mask, lun_number); 4716 ctl_softc->ctl_luns[lun_number] = NULL; 4717 ctl_softc->num_luns--; 4718 mtx_unlock(&ctl_softc->ctl_lock); 4719 free(lun->lun_devid, M_CTL); 4720 free(lun, M_CTL); 4721 return (EIO); 4722 } 4723 4724 return (0); 4725 } 4726 4727 /* 4728 * Free LUN that has no active requests. 4729 */ 4730 static int 4731 ctl_free_lun(struct ctl_lun *lun) 4732 { 4733 struct ctl_softc *softc = lun->ctl_softc; 4734 struct ctl_lun *nlun; 4735 int i; 4736 4737 KASSERT(TAILQ_EMPTY(&lun->ooa_queue), 4738 ("Freeing a LUN %p with outstanding I/O!\n", lun)); 4739 4740 mtx_lock(&softc->ctl_lock); 4741 STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links); 4742 ctl_clear_mask(softc->ctl_lun_mask, lun->lun); 4743 softc->ctl_luns[lun->lun] = NULL; 4744 softc->num_luns--; 4745 STAILQ_FOREACH(nlun, &softc->lun_list, links) { 4746 mtx_lock(&nlun->lun_lock); 4747 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4748 mtx_unlock(&nlun->lun_lock); 4749 } 4750 mtx_unlock(&softc->ctl_lock); 4751 4752 /* 4753 * Tell the backend to free resources, if this LUN has a backend. 4754 */ 4755 lun->be_lun->lun_shutdown(lun->be_lun); 4756 4757 lun->ie_reportcnt = UINT32_MAX; 4758 callout_drain(&lun->ie_callout); 4759 ctl_tpc_lun_shutdown(lun); 4760 mtx_destroy(&lun->lun_lock); 4761 free(lun->lun_devid, M_CTL); 4762 for (i = 0; i < ctl_max_ports; i++) 4763 free(lun->pending_ua[i], M_CTL); 4764 free(lun->pending_ua, M_DEVBUF); 4765 for (i = 0; i < ctl_max_ports; i++) 4766 free(lun->pr_keys[i], M_CTL); 4767 free(lun->pr_keys, M_DEVBUF); 4768 free(lun->write_buffer, M_CTL); 4769 free(lun->prevent, M_CTL); 4770 free(lun, M_CTL); 4771 4772 return (0); 4773 } 4774 4775 static int 4776 ctl_enable_lun(struct ctl_lun *lun) 4777 { 4778 struct ctl_softc *softc; 4779 struct ctl_port *port, *nport; 4780 int retval; 4781 4782 softc = lun->ctl_softc; 4783 4784 mtx_lock(&softc->ctl_lock); 4785 mtx_lock(&lun->lun_lock); 4786 KASSERT((lun->flags & CTL_LUN_DISABLED) != 0, 4787 ("%s: LUN not disabled", __func__)); 4788 lun->flags &= ~CTL_LUN_DISABLED; 4789 mtx_unlock(&lun->lun_lock); 4790 4791 STAILQ_FOREACH_SAFE(port, &softc->port_list, links, nport) { 4792 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || 4793 port->lun_map != NULL || port->lun_enable == NULL) 4794 continue; 4795 4796 /* 4797 * Drop the lock while we call the FETD's enable routine. 4798 * This can lead to a callback into CTL (at least in the 4799 * case of the internal initiator frontend. 4800 */ 4801 mtx_unlock(&softc->ctl_lock); 4802 retval = port->lun_enable(port->targ_lun_arg, lun->lun); 4803 mtx_lock(&softc->ctl_lock); 4804 if (retval != 0) { 4805 printf("%s: FETD %s port %d returned error " 4806 "%d for lun_enable on lun %jd\n", 4807 __func__, port->port_name, port->targ_port, 4808 retval, (intmax_t)lun->lun); 4809 } 4810 } 4811 4812 mtx_unlock(&softc->ctl_lock); 4813 ctl_isc_announce_lun(lun); 4814 4815 return (0); 4816 } 4817 4818 static int 4819 ctl_disable_lun(struct ctl_lun *lun) 4820 { 4821 struct ctl_softc *softc; 4822 struct ctl_port *port; 4823 int retval; 4824 4825 softc = lun->ctl_softc; 4826 4827 mtx_lock(&softc->ctl_lock); 4828 mtx_lock(&lun->lun_lock); 4829 KASSERT((lun->flags & CTL_LUN_DISABLED) == 0, 4830 ("%s: LUN not enabled", __func__)); 4831 lun->flags |= CTL_LUN_DISABLED; 4832 mtx_unlock(&lun->lun_lock); 4833 4834 STAILQ_FOREACH(port, &softc->port_list, links) { 4835 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || 4836 port->lun_map != NULL || port->lun_disable == NULL) 4837 continue; 4838 4839 /* 4840 * Drop the lock before we call the frontend's disable 4841 * routine, to avoid lock order reversals. 4842 * 4843 * XXX KDM what happens if the frontend list changes while 4844 * we're traversing it? It's unlikely, but should be handled. 4845 */ 4846 mtx_unlock(&softc->ctl_lock); 4847 retval = port->lun_disable(port->targ_lun_arg, lun->lun); 4848 mtx_lock(&softc->ctl_lock); 4849 if (retval != 0) { 4850 printf("%s: FETD %s port %d returned error " 4851 "%d for lun_disable on lun %jd\n", 4852 __func__, port->port_name, port->targ_port, 4853 retval, (intmax_t)lun->lun); 4854 } 4855 } 4856 4857 mtx_unlock(&softc->ctl_lock); 4858 ctl_isc_announce_lun(lun); 4859 4860 return (0); 4861 } 4862 4863 int 4864 ctl_start_lun(struct ctl_be_lun *be_lun) 4865 { 4866 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4867 4868 mtx_lock(&lun->lun_lock); 4869 lun->flags &= ~CTL_LUN_STOPPED; 4870 mtx_unlock(&lun->lun_lock); 4871 return (0); 4872 } 4873 4874 int 4875 ctl_stop_lun(struct ctl_be_lun *be_lun) 4876 { 4877 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4878 4879 mtx_lock(&lun->lun_lock); 4880 lun->flags |= CTL_LUN_STOPPED; 4881 mtx_unlock(&lun->lun_lock); 4882 return (0); 4883 } 4884 4885 int 4886 ctl_lun_no_media(struct ctl_be_lun *be_lun) 4887 { 4888 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4889 4890 mtx_lock(&lun->lun_lock); 4891 lun->flags |= CTL_LUN_NO_MEDIA; 4892 mtx_unlock(&lun->lun_lock); 4893 return (0); 4894 } 4895 4896 int 4897 ctl_lun_has_media(struct ctl_be_lun *be_lun) 4898 { 4899 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4900 union ctl_ha_msg msg; 4901 4902 mtx_lock(&lun->lun_lock); 4903 lun->flags &= ~(CTL_LUN_NO_MEDIA | CTL_LUN_EJECTED); 4904 if (lun->flags & CTL_LUN_REMOVABLE) 4905 ctl_est_ua_all(lun, -1, CTL_UA_MEDIUM_CHANGE); 4906 mtx_unlock(&lun->lun_lock); 4907 if ((lun->flags & CTL_LUN_REMOVABLE) && 4908 lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 4909 bzero(&msg.ua, sizeof(msg.ua)); 4910 msg.hdr.msg_type = CTL_MSG_UA; 4911 msg.hdr.nexus.initid = -1; 4912 msg.hdr.nexus.targ_port = -1; 4913 msg.hdr.nexus.targ_lun = lun->lun; 4914 msg.hdr.nexus.targ_mapped_lun = lun->lun; 4915 msg.ua.ua_all = 1; 4916 msg.ua.ua_set = 1; 4917 msg.ua.ua_type = CTL_UA_MEDIUM_CHANGE; 4918 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), 4919 M_WAITOK); 4920 } 4921 return (0); 4922 } 4923 4924 int 4925 ctl_lun_ejected(struct ctl_be_lun *be_lun) 4926 { 4927 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4928 4929 mtx_lock(&lun->lun_lock); 4930 lun->flags |= CTL_LUN_EJECTED; 4931 mtx_unlock(&lun->lun_lock); 4932 return (0); 4933 } 4934 4935 int 4936 ctl_lun_primary(struct ctl_be_lun *be_lun) 4937 { 4938 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4939 4940 mtx_lock(&lun->lun_lock); 4941 lun->flags |= CTL_LUN_PRIMARY_SC; 4942 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 4943 mtx_unlock(&lun->lun_lock); 4944 ctl_isc_announce_lun(lun); 4945 return (0); 4946 } 4947 4948 int 4949 ctl_lun_secondary(struct ctl_be_lun *be_lun) 4950 { 4951 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4952 4953 mtx_lock(&lun->lun_lock); 4954 lun->flags &= ~CTL_LUN_PRIMARY_SC; 4955 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 4956 mtx_unlock(&lun->lun_lock); 4957 ctl_isc_announce_lun(lun); 4958 return (0); 4959 } 4960 4961 /* 4962 * Remove LUN. If there are active requests, wait for completion. 4963 * 4964 * Returns 0 for success, non-zero (errno) for failure. 4965 * Completion is reported to backed via the lun_shutdown() method. 4966 */ 4967 int 4968 ctl_remove_lun(struct ctl_be_lun *be_lun) 4969 { 4970 struct ctl_lun *lun; 4971 4972 lun = (struct ctl_lun *)be_lun->ctl_lun; 4973 4974 ctl_disable_lun(lun); 4975 4976 mtx_lock(&lun->lun_lock); 4977 lun->flags |= CTL_LUN_INVALID; 4978 4979 /* 4980 * If there is nothing in the OOA queue, go ahead and free the LUN. 4981 * If we have something in the OOA queue, we'll free it when the 4982 * last I/O completes. 4983 */ 4984 if (TAILQ_EMPTY(&lun->ooa_queue)) { 4985 mtx_unlock(&lun->lun_lock); 4986 ctl_free_lun(lun); 4987 } else 4988 mtx_unlock(&lun->lun_lock); 4989 4990 return (0); 4991 } 4992 4993 void 4994 ctl_lun_capacity_changed(struct ctl_be_lun *be_lun) 4995 { 4996 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4997 union ctl_ha_msg msg; 4998 4999 mtx_lock(&lun->lun_lock); 5000 ctl_est_ua_all(lun, -1, CTL_UA_CAPACITY_CHANGE); 5001 mtx_unlock(&lun->lun_lock); 5002 if (lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 5003 /* Send msg to other side. */ 5004 bzero(&msg.ua, sizeof(msg.ua)); 5005 msg.hdr.msg_type = CTL_MSG_UA; 5006 msg.hdr.nexus.initid = -1; 5007 msg.hdr.nexus.targ_port = -1; 5008 msg.hdr.nexus.targ_lun = lun->lun; 5009 msg.hdr.nexus.targ_mapped_lun = lun->lun; 5010 msg.ua.ua_all = 1; 5011 msg.ua.ua_set = 1; 5012 msg.ua.ua_type = CTL_UA_CAPACITY_CHANGE; 5013 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), 5014 M_WAITOK); 5015 } 5016 } 5017 5018 /* 5019 * Backend "memory move is complete" callback for requests that never 5020 * make it down to say RAIDCore's configuration code. 5021 */ 5022 int 5023 ctl_config_move_done(union ctl_io *io) 5024 { 5025 int retval; 5026 5027 CTL_DEBUG_PRINT(("ctl_config_move_done\n")); 5028 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, 5029 ("Config I/O type isn't CTL_IO_SCSI (%d)!", io->io_hdr.io_type)); 5030 5031 if ((io->io_hdr.port_status != 0) && 5032 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5033 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5034 ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, 5035 /*retry_count*/ io->io_hdr.port_status); 5036 } else if (io->scsiio.kern_data_resid != 0 && 5037 (io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT && 5038 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5039 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5040 ctl_set_invalid_field_ciu(&io->scsiio); 5041 } 5042 5043 if (ctl_debug & CTL_DEBUG_CDB_DATA) 5044 ctl_data_print(io); 5045 if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) || 5046 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 5047 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) || 5048 ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) { 5049 /* 5050 * XXX KDM just assuming a single pointer here, and not a 5051 * S/G list. If we start using S/G lists for config data, 5052 * we'll need to know how to clean them up here as well. 5053 */ 5054 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5055 free(io->scsiio.kern_data_ptr, M_CTL); 5056 ctl_done(io); 5057 retval = CTL_RETVAL_COMPLETE; 5058 } else { 5059 /* 5060 * XXX KDM now we need to continue data movement. Some 5061 * options: 5062 * - call ctl_scsiio() again? We don't do this for data 5063 * writes, because for those at least we know ahead of 5064 * time where the write will go and how long it is. For 5065 * config writes, though, that information is largely 5066 * contained within the write itself, thus we need to 5067 * parse out the data again. 5068 * 5069 * - Call some other function once the data is in? 5070 */ 5071 5072 /* 5073 * XXX KDM call ctl_scsiio() again for now, and check flag 5074 * bits to see whether we're allocated or not. 5075 */ 5076 retval = ctl_scsiio(&io->scsiio); 5077 } 5078 return (retval); 5079 } 5080 5081 /* 5082 * This gets called by a backend driver when it is done with a 5083 * data_submit method. 5084 */ 5085 void 5086 ctl_data_submit_done(union ctl_io *io) 5087 { 5088 /* 5089 * If the IO_CONT flag is set, we need to call the supplied 5090 * function to continue processing the I/O, instead of completing 5091 * the I/O just yet. 5092 * 5093 * If there is an error, though, we don't want to keep processing. 5094 * Instead, just send status back to the initiator. 5095 */ 5096 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 5097 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 5098 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5099 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5100 io->scsiio.io_cont(io); 5101 return; 5102 } 5103 ctl_done(io); 5104 } 5105 5106 /* 5107 * This gets called by a backend driver when it is done with a 5108 * configuration write. 5109 */ 5110 void 5111 ctl_config_write_done(union ctl_io *io) 5112 { 5113 uint8_t *buf; 5114 5115 /* 5116 * If the IO_CONT flag is set, we need to call the supplied 5117 * function to continue processing the I/O, instead of completing 5118 * the I/O just yet. 5119 * 5120 * If there is an error, though, we don't want to keep processing. 5121 * Instead, just send status back to the initiator. 5122 */ 5123 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 5124 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 5125 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5126 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5127 io->scsiio.io_cont(io); 5128 return; 5129 } 5130 /* 5131 * Since a configuration write can be done for commands that actually 5132 * have data allocated, like write buffer, and commands that have 5133 * no data, like start/stop unit, we need to check here. 5134 */ 5135 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5136 buf = io->scsiio.kern_data_ptr; 5137 else 5138 buf = NULL; 5139 ctl_done(io); 5140 if (buf) 5141 free(buf, M_CTL); 5142 } 5143 5144 void 5145 ctl_config_read_done(union ctl_io *io) 5146 { 5147 uint8_t *buf; 5148 5149 /* 5150 * If there is some error -- we are done, skip data transfer. 5151 */ 5152 if ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0 || 5153 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 5154 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { 5155 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5156 buf = io->scsiio.kern_data_ptr; 5157 else 5158 buf = NULL; 5159 ctl_done(io); 5160 if (buf) 5161 free(buf, M_CTL); 5162 return; 5163 } 5164 5165 /* 5166 * If the IO_CONT flag is set, we need to call the supplied 5167 * function to continue processing the I/O, instead of completing 5168 * the I/O just yet. 5169 */ 5170 if (io->io_hdr.flags & CTL_FLAG_IO_CONT) { 5171 io->scsiio.io_cont(io); 5172 return; 5173 } 5174 5175 ctl_datamove(io); 5176 } 5177 5178 /* 5179 * SCSI release command. 5180 */ 5181 int 5182 ctl_scsi_release(struct ctl_scsiio *ctsio) 5183 { 5184 struct ctl_lun *lun = CTL_LUN(ctsio); 5185 uint32_t residx; 5186 5187 CTL_DEBUG_PRINT(("ctl_scsi_release\n")); 5188 5189 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5190 5191 /* 5192 * XXX KDM right now, we only support LUN reservation. We don't 5193 * support 3rd party reservations, or extent reservations, which 5194 * might actually need the parameter list. If we've gotten this 5195 * far, we've got a LUN reservation. Anything else got kicked out 5196 * above. So, according to SPC, ignore the length. 5197 */ 5198 5199 mtx_lock(&lun->lun_lock); 5200 5201 /* 5202 * According to SPC, it is not an error for an intiator to attempt 5203 * to release a reservation on a LUN that isn't reserved, or that 5204 * is reserved by another initiator. The reservation can only be 5205 * released, though, by the initiator who made it or by one of 5206 * several reset type events. 5207 */ 5208 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx)) 5209 lun->flags &= ~CTL_LUN_RESERVED; 5210 5211 mtx_unlock(&lun->lun_lock); 5212 5213 ctl_set_success(ctsio); 5214 ctl_done((union ctl_io *)ctsio); 5215 return (CTL_RETVAL_COMPLETE); 5216 } 5217 5218 int 5219 ctl_scsi_reserve(struct ctl_scsiio *ctsio) 5220 { 5221 struct ctl_lun *lun = CTL_LUN(ctsio); 5222 uint32_t residx; 5223 5224 CTL_DEBUG_PRINT(("ctl_reserve\n")); 5225 5226 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5227 5228 /* 5229 * XXX KDM right now, we only support LUN reservation. We don't 5230 * support 3rd party reservations, or extent reservations, which 5231 * might actually need the parameter list. If we've gotten this 5232 * far, we've got a LUN reservation. Anything else got kicked out 5233 * above. So, according to SPC, ignore the length. 5234 */ 5235 5236 mtx_lock(&lun->lun_lock); 5237 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx != residx)) { 5238 ctl_set_reservation_conflict(ctsio); 5239 goto bailout; 5240 } 5241 5242 /* SPC-3 exceptions to SPC-2 RESERVE and RELEASE behavior. */ 5243 if (lun->flags & CTL_LUN_PR_RESERVED) { 5244 ctl_set_success(ctsio); 5245 goto bailout; 5246 } 5247 5248 lun->flags |= CTL_LUN_RESERVED; 5249 lun->res_idx = residx; 5250 ctl_set_success(ctsio); 5251 5252 bailout: 5253 mtx_unlock(&lun->lun_lock); 5254 ctl_done((union ctl_io *)ctsio); 5255 return (CTL_RETVAL_COMPLETE); 5256 } 5257 5258 int 5259 ctl_start_stop(struct ctl_scsiio *ctsio) 5260 { 5261 struct ctl_lun *lun = CTL_LUN(ctsio); 5262 struct scsi_start_stop_unit *cdb; 5263 int retval; 5264 5265 CTL_DEBUG_PRINT(("ctl_start_stop\n")); 5266 5267 cdb = (struct scsi_start_stop_unit *)ctsio->cdb; 5268 5269 if ((cdb->how & SSS_PC_MASK) == 0) { 5270 if ((lun->flags & CTL_LUN_PR_RESERVED) && 5271 (cdb->how & SSS_START) == 0) { 5272 uint32_t residx; 5273 5274 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5275 if (ctl_get_prkey(lun, residx) == 0 || 5276 (lun->pr_res_idx != residx && lun->pr_res_type < 4)) { 5277 ctl_set_reservation_conflict(ctsio); 5278 ctl_done((union ctl_io *)ctsio); 5279 return (CTL_RETVAL_COMPLETE); 5280 } 5281 } 5282 5283 if ((cdb->how & SSS_LOEJ) && 5284 (lun->flags & CTL_LUN_REMOVABLE) == 0) { 5285 ctl_set_invalid_field(ctsio, 5286 /*sks_valid*/ 1, 5287 /*command*/ 1, 5288 /*field*/ 4, 5289 /*bit_valid*/ 1, 5290 /*bit*/ 1); 5291 ctl_done((union ctl_io *)ctsio); 5292 return (CTL_RETVAL_COMPLETE); 5293 } 5294 5295 if ((cdb->how & SSS_START) == 0 && (cdb->how & SSS_LOEJ) && 5296 lun->prevent_count > 0) { 5297 /* "Medium removal prevented" */ 5298 ctl_set_sense(ctsio, /*current_error*/ 1, 5299 /*sense_key*/(lun->flags & CTL_LUN_NO_MEDIA) ? 5300 SSD_KEY_NOT_READY : SSD_KEY_ILLEGAL_REQUEST, 5301 /*asc*/ 0x53, /*ascq*/ 0x02, SSD_ELEM_NONE); 5302 ctl_done((union ctl_io *)ctsio); 5303 return (CTL_RETVAL_COMPLETE); 5304 } 5305 } 5306 5307 retval = lun->backend->config_write((union ctl_io *)ctsio); 5308 return (retval); 5309 } 5310 5311 int 5312 ctl_prevent_allow(struct ctl_scsiio *ctsio) 5313 { 5314 struct ctl_lun *lun = CTL_LUN(ctsio); 5315 struct scsi_prevent *cdb; 5316 int retval; 5317 uint32_t initidx; 5318 5319 CTL_DEBUG_PRINT(("ctl_prevent_allow\n")); 5320 5321 cdb = (struct scsi_prevent *)ctsio->cdb; 5322 5323 if ((lun->flags & CTL_LUN_REMOVABLE) == 0 || lun->prevent == NULL) { 5324 ctl_set_invalid_opcode(ctsio); 5325 ctl_done((union ctl_io *)ctsio); 5326 return (CTL_RETVAL_COMPLETE); 5327 } 5328 5329 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5330 mtx_lock(&lun->lun_lock); 5331 if ((cdb->how & PR_PREVENT) && 5332 ctl_is_set(lun->prevent, initidx) == 0) { 5333 ctl_set_mask(lun->prevent, initidx); 5334 lun->prevent_count++; 5335 } else if ((cdb->how & PR_PREVENT) == 0 && 5336 ctl_is_set(lun->prevent, initidx)) { 5337 ctl_clear_mask(lun->prevent, initidx); 5338 lun->prevent_count--; 5339 } 5340 mtx_unlock(&lun->lun_lock); 5341 retval = lun->backend->config_write((union ctl_io *)ctsio); 5342 return (retval); 5343 } 5344 5345 /* 5346 * We support the SYNCHRONIZE CACHE command (10 and 16 byte versions), but 5347 * we don't really do anything with the LBA and length fields if the user 5348 * passes them in. Instead we'll just flush out the cache for the entire 5349 * LUN. 5350 */ 5351 int 5352 ctl_sync_cache(struct ctl_scsiio *ctsio) 5353 { 5354 struct ctl_lun *lun = CTL_LUN(ctsio); 5355 struct ctl_lba_len_flags *lbalen; 5356 uint64_t starting_lba; 5357 uint32_t block_count; 5358 int retval; 5359 uint8_t byte2; 5360 5361 CTL_DEBUG_PRINT(("ctl_sync_cache\n")); 5362 5363 retval = 0; 5364 5365 switch (ctsio->cdb[0]) { 5366 case SYNCHRONIZE_CACHE: { 5367 struct scsi_sync_cache *cdb; 5368 cdb = (struct scsi_sync_cache *)ctsio->cdb; 5369 5370 starting_lba = scsi_4btoul(cdb->begin_lba); 5371 block_count = scsi_2btoul(cdb->lb_count); 5372 byte2 = cdb->byte2; 5373 break; 5374 } 5375 case SYNCHRONIZE_CACHE_16: { 5376 struct scsi_sync_cache_16 *cdb; 5377 cdb = (struct scsi_sync_cache_16 *)ctsio->cdb; 5378 5379 starting_lba = scsi_8btou64(cdb->begin_lba); 5380 block_count = scsi_4btoul(cdb->lb_count); 5381 byte2 = cdb->byte2; 5382 break; 5383 } 5384 default: 5385 ctl_set_invalid_opcode(ctsio); 5386 ctl_done((union ctl_io *)ctsio); 5387 goto bailout; 5388 break; /* NOTREACHED */ 5389 } 5390 5391 /* 5392 * We check the LBA and length, but don't do anything with them. 5393 * A SYNCHRONIZE CACHE will cause the entire cache for this lun to 5394 * get flushed. This check will just help satisfy anyone who wants 5395 * to see an error for an out of range LBA. 5396 */ 5397 if ((starting_lba + block_count) > (lun->be_lun->maxlba + 1)) { 5398 ctl_set_lba_out_of_range(ctsio, 5399 MAX(starting_lba, lun->be_lun->maxlba + 1)); 5400 ctl_done((union ctl_io *)ctsio); 5401 goto bailout; 5402 } 5403 5404 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5405 lbalen->lba = starting_lba; 5406 lbalen->len = block_count; 5407 lbalen->flags = byte2; 5408 retval = lun->backend->config_write((union ctl_io *)ctsio); 5409 5410 bailout: 5411 return (retval); 5412 } 5413 5414 int 5415 ctl_format(struct ctl_scsiio *ctsio) 5416 { 5417 struct scsi_format *cdb; 5418 int length, defect_list_len; 5419 5420 CTL_DEBUG_PRINT(("ctl_format\n")); 5421 5422 cdb = (struct scsi_format *)ctsio->cdb; 5423 5424 length = 0; 5425 if (cdb->byte2 & SF_FMTDATA) { 5426 if (cdb->byte2 & SF_LONGLIST) 5427 length = sizeof(struct scsi_format_header_long); 5428 else 5429 length = sizeof(struct scsi_format_header_short); 5430 } 5431 5432 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5433 && (length > 0)) { 5434 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5435 ctsio->kern_data_len = length; 5436 ctsio->kern_total_len = length; 5437 ctsio->kern_rel_offset = 0; 5438 ctsio->kern_sg_entries = 0; 5439 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5440 ctsio->be_move_done = ctl_config_move_done; 5441 ctl_datamove((union ctl_io *)ctsio); 5442 5443 return (CTL_RETVAL_COMPLETE); 5444 } 5445 5446 defect_list_len = 0; 5447 5448 if (cdb->byte2 & SF_FMTDATA) { 5449 if (cdb->byte2 & SF_LONGLIST) { 5450 struct scsi_format_header_long *header; 5451 5452 header = (struct scsi_format_header_long *) 5453 ctsio->kern_data_ptr; 5454 5455 defect_list_len = scsi_4btoul(header->defect_list_len); 5456 if (defect_list_len != 0) { 5457 ctl_set_invalid_field(ctsio, 5458 /*sks_valid*/ 1, 5459 /*command*/ 0, 5460 /*field*/ 2, 5461 /*bit_valid*/ 0, 5462 /*bit*/ 0); 5463 goto bailout; 5464 } 5465 } else { 5466 struct scsi_format_header_short *header; 5467 5468 header = (struct scsi_format_header_short *) 5469 ctsio->kern_data_ptr; 5470 5471 defect_list_len = scsi_2btoul(header->defect_list_len); 5472 if (defect_list_len != 0) { 5473 ctl_set_invalid_field(ctsio, 5474 /*sks_valid*/ 1, 5475 /*command*/ 0, 5476 /*field*/ 2, 5477 /*bit_valid*/ 0, 5478 /*bit*/ 0); 5479 goto bailout; 5480 } 5481 } 5482 } 5483 5484 ctl_set_success(ctsio); 5485 bailout: 5486 5487 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5488 free(ctsio->kern_data_ptr, M_CTL); 5489 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5490 } 5491 5492 ctl_done((union ctl_io *)ctsio); 5493 return (CTL_RETVAL_COMPLETE); 5494 } 5495 5496 int 5497 ctl_read_buffer(struct ctl_scsiio *ctsio) 5498 { 5499 struct ctl_lun *lun = CTL_LUN(ctsio); 5500 uint64_t buffer_offset; 5501 uint32_t len; 5502 uint8_t byte2; 5503 static uint8_t descr[4]; 5504 static uint8_t echo_descr[4] = { 0 }; 5505 5506 CTL_DEBUG_PRINT(("ctl_read_buffer\n")); 5507 5508 switch (ctsio->cdb[0]) { 5509 case READ_BUFFER: { 5510 struct scsi_read_buffer *cdb; 5511 5512 cdb = (struct scsi_read_buffer *)ctsio->cdb; 5513 buffer_offset = scsi_3btoul(cdb->offset); 5514 len = scsi_3btoul(cdb->length); 5515 byte2 = cdb->byte2; 5516 break; 5517 } 5518 case READ_BUFFER_16: { 5519 struct scsi_read_buffer_16 *cdb; 5520 5521 cdb = (struct scsi_read_buffer_16 *)ctsio->cdb; 5522 buffer_offset = scsi_8btou64(cdb->offset); 5523 len = scsi_4btoul(cdb->length); 5524 byte2 = cdb->byte2; 5525 break; 5526 } 5527 default: /* This shouldn't happen. */ 5528 ctl_set_invalid_opcode(ctsio); 5529 ctl_done((union ctl_io *)ctsio); 5530 return (CTL_RETVAL_COMPLETE); 5531 } 5532 5533 if (buffer_offset > CTL_WRITE_BUFFER_SIZE || 5534 buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5535 ctl_set_invalid_field(ctsio, 5536 /*sks_valid*/ 1, 5537 /*command*/ 1, 5538 /*field*/ 6, 5539 /*bit_valid*/ 0, 5540 /*bit*/ 0); 5541 ctl_done((union ctl_io *)ctsio); 5542 return (CTL_RETVAL_COMPLETE); 5543 } 5544 5545 if ((byte2 & RWB_MODE) == RWB_MODE_DESCR) { 5546 descr[0] = 0; 5547 scsi_ulto3b(CTL_WRITE_BUFFER_SIZE, &descr[1]); 5548 ctsio->kern_data_ptr = descr; 5549 len = min(len, sizeof(descr)); 5550 } else if ((byte2 & RWB_MODE) == RWB_MODE_ECHO_DESCR) { 5551 ctsio->kern_data_ptr = echo_descr; 5552 len = min(len, sizeof(echo_descr)); 5553 } else { 5554 if (lun->write_buffer == NULL) { 5555 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5556 M_CTL, M_WAITOK); 5557 } 5558 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5559 } 5560 ctsio->kern_data_len = len; 5561 ctsio->kern_total_len = len; 5562 ctsio->kern_rel_offset = 0; 5563 ctsio->kern_sg_entries = 0; 5564 ctl_set_success(ctsio); 5565 ctsio->be_move_done = ctl_config_move_done; 5566 ctl_datamove((union ctl_io *)ctsio); 5567 return (CTL_RETVAL_COMPLETE); 5568 } 5569 5570 int 5571 ctl_write_buffer(struct ctl_scsiio *ctsio) 5572 { 5573 struct ctl_lun *lun = CTL_LUN(ctsio); 5574 struct scsi_write_buffer *cdb; 5575 int buffer_offset, len; 5576 5577 CTL_DEBUG_PRINT(("ctl_write_buffer\n")); 5578 5579 cdb = (struct scsi_write_buffer *)ctsio->cdb; 5580 5581 len = scsi_3btoul(cdb->length); 5582 buffer_offset = scsi_3btoul(cdb->offset); 5583 5584 if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5585 ctl_set_invalid_field(ctsio, 5586 /*sks_valid*/ 1, 5587 /*command*/ 1, 5588 /*field*/ 6, 5589 /*bit_valid*/ 0, 5590 /*bit*/ 0); 5591 ctl_done((union ctl_io *)ctsio); 5592 return (CTL_RETVAL_COMPLETE); 5593 } 5594 5595 /* 5596 * If we've got a kernel request that hasn't been malloced yet, 5597 * malloc it and tell the caller the data buffer is here. 5598 */ 5599 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5600 if (lun->write_buffer == NULL) { 5601 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5602 M_CTL, M_WAITOK); 5603 } 5604 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5605 ctsio->kern_data_len = len; 5606 ctsio->kern_total_len = len; 5607 ctsio->kern_rel_offset = 0; 5608 ctsio->kern_sg_entries = 0; 5609 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5610 ctsio->be_move_done = ctl_config_move_done; 5611 ctl_datamove((union ctl_io *)ctsio); 5612 5613 return (CTL_RETVAL_COMPLETE); 5614 } 5615 5616 ctl_set_success(ctsio); 5617 ctl_done((union ctl_io *)ctsio); 5618 return (CTL_RETVAL_COMPLETE); 5619 } 5620 5621 static int 5622 ctl_write_same_cont(union ctl_io *io) 5623 { 5624 struct ctl_lun *lun = CTL_LUN(io); 5625 struct ctl_scsiio *ctsio; 5626 struct ctl_lba_len_flags *lbalen; 5627 int retval; 5628 5629 ctsio = &io->scsiio; 5630 ctsio->io_hdr.status = CTL_STATUS_NONE; 5631 lbalen = (struct ctl_lba_len_flags *) 5632 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5633 lbalen->lba += lbalen->len; 5634 if ((lun->be_lun->maxlba + 1) - lbalen->lba <= UINT32_MAX) { 5635 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT; 5636 lbalen->len = (lun->be_lun->maxlba + 1) - lbalen->lba; 5637 } 5638 5639 CTL_DEBUG_PRINT(("ctl_write_same_cont: calling config_write()\n")); 5640 retval = lun->backend->config_write((union ctl_io *)ctsio); 5641 return (retval); 5642 } 5643 5644 int 5645 ctl_write_same(struct ctl_scsiio *ctsio) 5646 { 5647 struct ctl_lun *lun = CTL_LUN(ctsio); 5648 struct ctl_lba_len_flags *lbalen; 5649 const char *val; 5650 uint64_t lba, ival; 5651 uint32_t num_blocks; 5652 int len, retval; 5653 uint8_t byte2; 5654 5655 CTL_DEBUG_PRINT(("ctl_write_same\n")); 5656 5657 switch (ctsio->cdb[0]) { 5658 case WRITE_SAME_10: { 5659 struct scsi_write_same_10 *cdb; 5660 5661 cdb = (struct scsi_write_same_10 *)ctsio->cdb; 5662 5663 lba = scsi_4btoul(cdb->addr); 5664 num_blocks = scsi_2btoul(cdb->length); 5665 byte2 = cdb->byte2; 5666 break; 5667 } 5668 case WRITE_SAME_16: { 5669 struct scsi_write_same_16 *cdb; 5670 5671 cdb = (struct scsi_write_same_16 *)ctsio->cdb; 5672 5673 lba = scsi_8btou64(cdb->addr); 5674 num_blocks = scsi_4btoul(cdb->length); 5675 byte2 = cdb->byte2; 5676 break; 5677 } 5678 default: 5679 /* 5680 * We got a command we don't support. This shouldn't 5681 * happen, commands should be filtered out above us. 5682 */ 5683 ctl_set_invalid_opcode(ctsio); 5684 ctl_done((union ctl_io *)ctsio); 5685 5686 return (CTL_RETVAL_COMPLETE); 5687 break; /* NOTREACHED */ 5688 } 5689 5690 /* ANCHOR flag can be used only together with UNMAP */ 5691 if ((byte2 & SWS_UNMAP) == 0 && (byte2 & SWS_ANCHOR) != 0) { 5692 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 5693 /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); 5694 ctl_done((union ctl_io *)ctsio); 5695 return (CTL_RETVAL_COMPLETE); 5696 } 5697 5698 /* 5699 * The first check is to make sure we're in bounds, the second 5700 * check is to catch wrap-around problems. If the lba + num blocks 5701 * is less than the lba, then we've wrapped around and the block 5702 * range is invalid anyway. 5703 */ 5704 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5705 || ((lba + num_blocks) < lba)) { 5706 ctl_set_lba_out_of_range(ctsio, 5707 MAX(lba, lun->be_lun->maxlba + 1)); 5708 ctl_done((union ctl_io *)ctsio); 5709 return (CTL_RETVAL_COMPLETE); 5710 } 5711 5712 /* Zero number of blocks means "to the last logical block" */ 5713 if (num_blocks == 0) { 5714 ival = UINT64_MAX; 5715 val = dnvlist_get_string(lun->be_lun->options, 5716 "write_same_max_lba", NULL); 5717 if (val != NULL) 5718 ctl_expand_number(val, &ival); 5719 if ((lun->be_lun->maxlba + 1) - lba > ival) { 5720 ctl_set_invalid_field(ctsio, 5721 /*sks_valid*/ 1, /*command*/ 1, 5722 /*field*/ ctsio->cdb[0] == WRITE_SAME_10 ? 7 : 10, 5723 /*bit_valid*/ 0, /*bit*/ 0); 5724 ctl_done((union ctl_io *)ctsio); 5725 return (CTL_RETVAL_COMPLETE); 5726 } 5727 if ((lun->be_lun->maxlba + 1) - lba > UINT32_MAX) { 5728 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 5729 ctsio->io_cont = ctl_write_same_cont; 5730 num_blocks = 1 << 31; 5731 } else 5732 num_blocks = (lun->be_lun->maxlba + 1) - lba; 5733 } 5734 5735 len = lun->be_lun->blocksize; 5736 5737 /* 5738 * If we've got a kernel request that hasn't been malloced yet, 5739 * malloc it and tell the caller the data buffer is here. 5740 */ 5741 if ((byte2 & SWS_NDOB) == 0 && 5742 (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5743 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 5744 ctsio->kern_data_len = len; 5745 ctsio->kern_total_len = len; 5746 ctsio->kern_rel_offset = 0; 5747 ctsio->kern_sg_entries = 0; 5748 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5749 ctsio->be_move_done = ctl_config_move_done; 5750 ctl_datamove((union ctl_io *)ctsio); 5751 5752 return (CTL_RETVAL_COMPLETE); 5753 } 5754 5755 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5756 lbalen->lba = lba; 5757 lbalen->len = num_blocks; 5758 lbalen->flags = byte2; 5759 retval = lun->backend->config_write((union ctl_io *)ctsio); 5760 5761 return (retval); 5762 } 5763 5764 int 5765 ctl_unmap(struct ctl_scsiio *ctsio) 5766 { 5767 struct ctl_lun *lun = CTL_LUN(ctsio); 5768 struct scsi_unmap *cdb; 5769 struct ctl_ptr_len_flags *ptrlen; 5770 struct scsi_unmap_header *hdr; 5771 struct scsi_unmap_desc *buf, *end, *endnz, *range; 5772 uint64_t lba; 5773 uint32_t num_blocks; 5774 int len, retval; 5775 uint8_t byte2; 5776 5777 CTL_DEBUG_PRINT(("ctl_unmap\n")); 5778 5779 cdb = (struct scsi_unmap *)ctsio->cdb; 5780 len = scsi_2btoul(cdb->length); 5781 byte2 = cdb->byte2; 5782 5783 /* 5784 * If we've got a kernel request that hasn't been malloced yet, 5785 * malloc it and tell the caller the data buffer is here. 5786 */ 5787 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5788 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 5789 ctsio->kern_data_len = len; 5790 ctsio->kern_total_len = len; 5791 ctsio->kern_rel_offset = 0; 5792 ctsio->kern_sg_entries = 0; 5793 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5794 ctsio->be_move_done = ctl_config_move_done; 5795 ctl_datamove((union ctl_io *)ctsio); 5796 5797 return (CTL_RETVAL_COMPLETE); 5798 } 5799 5800 len = ctsio->kern_total_len - ctsio->kern_data_resid; 5801 hdr = (struct scsi_unmap_header *)ctsio->kern_data_ptr; 5802 if (len < sizeof (*hdr) || 5803 len < (scsi_2btoul(hdr->length) + sizeof(hdr->length)) || 5804 len < (scsi_2btoul(hdr->desc_length) + sizeof (*hdr)) || 5805 scsi_2btoul(hdr->desc_length) % sizeof(*buf) != 0) { 5806 ctl_set_invalid_field(ctsio, 5807 /*sks_valid*/ 0, 5808 /*command*/ 0, 5809 /*field*/ 0, 5810 /*bit_valid*/ 0, 5811 /*bit*/ 0); 5812 goto done; 5813 } 5814 len = scsi_2btoul(hdr->desc_length); 5815 buf = (struct scsi_unmap_desc *)(hdr + 1); 5816 end = buf + len / sizeof(*buf); 5817 5818 endnz = buf; 5819 for (range = buf; range < end; range++) { 5820 lba = scsi_8btou64(range->lba); 5821 num_blocks = scsi_4btoul(range->length); 5822 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5823 || ((lba + num_blocks) < lba)) { 5824 ctl_set_lba_out_of_range(ctsio, 5825 MAX(lba, lun->be_lun->maxlba + 1)); 5826 ctl_done((union ctl_io *)ctsio); 5827 return (CTL_RETVAL_COMPLETE); 5828 } 5829 if (num_blocks != 0) 5830 endnz = range + 1; 5831 } 5832 5833 /* 5834 * Block backend can not handle zero last range. 5835 * Filter it out and return if there is nothing left. 5836 */ 5837 len = (uint8_t *)endnz - (uint8_t *)buf; 5838 if (len == 0) { 5839 ctl_set_success(ctsio); 5840 goto done; 5841 } 5842 5843 mtx_lock(&lun->lun_lock); 5844 ptrlen = (struct ctl_ptr_len_flags *) 5845 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5846 ptrlen->ptr = (void *)buf; 5847 ptrlen->len = len; 5848 ptrlen->flags = byte2; 5849 ctl_try_unblock_others(lun, (union ctl_io *)ctsio, FALSE); 5850 mtx_unlock(&lun->lun_lock); 5851 5852 retval = lun->backend->config_write((union ctl_io *)ctsio); 5853 return (retval); 5854 5855 done: 5856 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5857 free(ctsio->kern_data_ptr, M_CTL); 5858 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5859 } 5860 ctl_done((union ctl_io *)ctsio); 5861 return (CTL_RETVAL_COMPLETE); 5862 } 5863 5864 int 5865 ctl_default_page_handler(struct ctl_scsiio *ctsio, 5866 struct ctl_page_index *page_index, uint8_t *page_ptr) 5867 { 5868 struct ctl_lun *lun = CTL_LUN(ctsio); 5869 uint8_t *current_cp; 5870 int set_ua; 5871 uint32_t initidx; 5872 5873 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5874 set_ua = 0; 5875 5876 current_cp = (page_index->page_data + (page_index->page_len * 5877 CTL_PAGE_CURRENT)); 5878 5879 mtx_lock(&lun->lun_lock); 5880 if (memcmp(current_cp, page_ptr, page_index->page_len)) { 5881 memcpy(current_cp, page_ptr, page_index->page_len); 5882 set_ua = 1; 5883 } 5884 if (set_ua != 0) 5885 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 5886 mtx_unlock(&lun->lun_lock); 5887 if (set_ua) { 5888 ctl_isc_announce_mode(lun, 5889 ctl_get_initindex(&ctsio->io_hdr.nexus), 5890 page_index->page_code, page_index->subpage); 5891 } 5892 return (CTL_RETVAL_COMPLETE); 5893 } 5894 5895 static void 5896 ctl_ie_timer(void *arg) 5897 { 5898 struct ctl_lun *lun = arg; 5899 uint64_t t; 5900 5901 if (lun->ie_asc == 0) 5902 return; 5903 5904 if (lun->MODE_IE.mrie == SIEP_MRIE_UA) 5905 ctl_est_ua_all(lun, -1, CTL_UA_IE); 5906 else 5907 lun->ie_reported = 0; 5908 5909 if (lun->ie_reportcnt < scsi_4btoul(lun->MODE_IE.report_count)) { 5910 lun->ie_reportcnt++; 5911 t = scsi_4btoul(lun->MODE_IE.interval_timer); 5912 if (t == 0 || t == UINT32_MAX) 5913 t = 3000; /* 5 min */ 5914 callout_schedule(&lun->ie_callout, t * hz / 10); 5915 } 5916 } 5917 5918 int 5919 ctl_ie_page_handler(struct ctl_scsiio *ctsio, 5920 struct ctl_page_index *page_index, uint8_t *page_ptr) 5921 { 5922 struct ctl_lun *lun = CTL_LUN(ctsio); 5923 struct scsi_info_exceptions_page *pg; 5924 uint64_t t; 5925 5926 (void)ctl_default_page_handler(ctsio, page_index, page_ptr); 5927 5928 pg = (struct scsi_info_exceptions_page *)page_ptr; 5929 mtx_lock(&lun->lun_lock); 5930 if (pg->info_flags & SIEP_FLAGS_TEST) { 5931 lun->ie_asc = 0x5d; 5932 lun->ie_ascq = 0xff; 5933 if (pg->mrie == SIEP_MRIE_UA) { 5934 ctl_est_ua_all(lun, -1, CTL_UA_IE); 5935 lun->ie_reported = 1; 5936 } else { 5937 ctl_clr_ua_all(lun, -1, CTL_UA_IE); 5938 lun->ie_reported = -1; 5939 } 5940 lun->ie_reportcnt = 1; 5941 if (lun->ie_reportcnt < scsi_4btoul(pg->report_count)) { 5942 lun->ie_reportcnt++; 5943 t = scsi_4btoul(pg->interval_timer); 5944 if (t == 0 || t == UINT32_MAX) 5945 t = 3000; /* 5 min */ 5946 callout_reset(&lun->ie_callout, t * hz / 10, 5947 ctl_ie_timer, lun); 5948 } 5949 } else { 5950 lun->ie_asc = 0; 5951 lun->ie_ascq = 0; 5952 lun->ie_reported = 1; 5953 ctl_clr_ua_all(lun, -1, CTL_UA_IE); 5954 lun->ie_reportcnt = UINT32_MAX; 5955 callout_stop(&lun->ie_callout); 5956 } 5957 mtx_unlock(&lun->lun_lock); 5958 return (CTL_RETVAL_COMPLETE); 5959 } 5960 5961 static int 5962 ctl_do_mode_select(union ctl_io *io) 5963 { 5964 struct ctl_lun *lun = CTL_LUN(io); 5965 struct scsi_mode_page_header *page_header; 5966 struct ctl_page_index *page_index; 5967 struct ctl_scsiio *ctsio; 5968 int page_len, page_len_offset, page_len_size; 5969 union ctl_modepage_info *modepage_info; 5970 uint16_t *len_left, *len_used; 5971 int retval, i; 5972 5973 ctsio = &io->scsiio; 5974 page_index = NULL; 5975 page_len = 0; 5976 5977 modepage_info = (union ctl_modepage_info *) 5978 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 5979 len_left = &modepage_info->header.len_left; 5980 len_used = &modepage_info->header.len_used; 5981 5982 do_next_page: 5983 5984 page_header = (struct scsi_mode_page_header *) 5985 (ctsio->kern_data_ptr + *len_used); 5986 5987 if (*len_left == 0) { 5988 free(ctsio->kern_data_ptr, M_CTL); 5989 ctl_set_success(ctsio); 5990 ctl_done((union ctl_io *)ctsio); 5991 return (CTL_RETVAL_COMPLETE); 5992 } else if (*len_left < sizeof(struct scsi_mode_page_header)) { 5993 free(ctsio->kern_data_ptr, M_CTL); 5994 ctl_set_param_len_error(ctsio); 5995 ctl_done((union ctl_io *)ctsio); 5996 return (CTL_RETVAL_COMPLETE); 5997 5998 } else if ((page_header->page_code & SMPH_SPF) 5999 && (*len_left < sizeof(struct scsi_mode_page_header_sp))) { 6000 free(ctsio->kern_data_ptr, M_CTL); 6001 ctl_set_param_len_error(ctsio); 6002 ctl_done((union ctl_io *)ctsio); 6003 return (CTL_RETVAL_COMPLETE); 6004 } 6005 6006 /* 6007 * XXX KDM should we do something with the block descriptor? 6008 */ 6009 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6010 page_index = &lun->mode_pages.index[i]; 6011 if (lun->be_lun->lun_type == T_DIRECT && 6012 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6013 continue; 6014 if (lun->be_lun->lun_type == T_PROCESSOR && 6015 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6016 continue; 6017 if (lun->be_lun->lun_type == T_CDROM && 6018 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6019 continue; 6020 6021 if ((page_index->page_code & SMPH_PC_MASK) != 6022 (page_header->page_code & SMPH_PC_MASK)) 6023 continue; 6024 6025 /* 6026 * If neither page has a subpage code, then we've got a 6027 * match. 6028 */ 6029 if (((page_index->page_code & SMPH_SPF) == 0) 6030 && ((page_header->page_code & SMPH_SPF) == 0)) { 6031 page_len = page_header->page_length; 6032 break; 6033 } 6034 6035 /* 6036 * If both pages have subpages, then the subpage numbers 6037 * have to match. 6038 */ 6039 if ((page_index->page_code & SMPH_SPF) 6040 && (page_header->page_code & SMPH_SPF)) { 6041 struct scsi_mode_page_header_sp *sph; 6042 6043 sph = (struct scsi_mode_page_header_sp *)page_header; 6044 if (page_index->subpage == sph->subpage) { 6045 page_len = scsi_2btoul(sph->page_length); 6046 break; 6047 } 6048 } 6049 } 6050 6051 /* 6052 * If we couldn't find the page, or if we don't have a mode select 6053 * handler for it, send back an error to the user. 6054 */ 6055 if ((i >= CTL_NUM_MODE_PAGES) 6056 || (page_index->select_handler == NULL)) { 6057 ctl_set_invalid_field(ctsio, 6058 /*sks_valid*/ 1, 6059 /*command*/ 0, 6060 /*field*/ *len_used, 6061 /*bit_valid*/ 0, 6062 /*bit*/ 0); 6063 free(ctsio->kern_data_ptr, M_CTL); 6064 ctl_done((union ctl_io *)ctsio); 6065 return (CTL_RETVAL_COMPLETE); 6066 } 6067 6068 if (page_index->page_code & SMPH_SPF) { 6069 page_len_offset = 2; 6070 page_len_size = 2; 6071 } else { 6072 page_len_size = 1; 6073 page_len_offset = 1; 6074 } 6075 6076 /* 6077 * If the length the initiator gives us isn't the one we specify in 6078 * the mode page header, or if they didn't specify enough data in 6079 * the CDB to avoid truncating this page, kick out the request. 6080 */ 6081 if (page_len != page_index->page_len - page_len_offset - page_len_size) { 6082 ctl_set_invalid_field(ctsio, 6083 /*sks_valid*/ 1, 6084 /*command*/ 0, 6085 /*field*/ *len_used + page_len_offset, 6086 /*bit_valid*/ 0, 6087 /*bit*/ 0); 6088 free(ctsio->kern_data_ptr, M_CTL); 6089 ctl_done((union ctl_io *)ctsio); 6090 return (CTL_RETVAL_COMPLETE); 6091 } 6092 if (*len_left < page_index->page_len) { 6093 free(ctsio->kern_data_ptr, M_CTL); 6094 ctl_set_param_len_error(ctsio); 6095 ctl_done((union ctl_io *)ctsio); 6096 return (CTL_RETVAL_COMPLETE); 6097 } 6098 6099 /* 6100 * Run through the mode page, checking to make sure that the bits 6101 * the user changed are actually legal for him to change. 6102 */ 6103 for (i = 0; i < page_index->page_len; i++) { 6104 uint8_t *user_byte, *change_mask, *current_byte; 6105 int bad_bit; 6106 int j; 6107 6108 user_byte = (uint8_t *)page_header + i; 6109 change_mask = page_index->page_data + 6110 (page_index->page_len * CTL_PAGE_CHANGEABLE) + i; 6111 current_byte = page_index->page_data + 6112 (page_index->page_len * CTL_PAGE_CURRENT) + i; 6113 6114 /* 6115 * Check to see whether the user set any bits in this byte 6116 * that he is not allowed to set. 6117 */ 6118 if ((*user_byte & ~(*change_mask)) == 6119 (*current_byte & ~(*change_mask))) 6120 continue; 6121 6122 /* 6123 * Go through bit by bit to determine which one is illegal. 6124 */ 6125 bad_bit = 0; 6126 for (j = 7; j >= 0; j--) { 6127 if ((((1 << i) & ~(*change_mask)) & *user_byte) != 6128 (((1 << i) & ~(*change_mask)) & *current_byte)) { 6129 bad_bit = i; 6130 break; 6131 } 6132 } 6133 ctl_set_invalid_field(ctsio, 6134 /*sks_valid*/ 1, 6135 /*command*/ 0, 6136 /*field*/ *len_used + i, 6137 /*bit_valid*/ 1, 6138 /*bit*/ bad_bit); 6139 free(ctsio->kern_data_ptr, M_CTL); 6140 ctl_done((union ctl_io *)ctsio); 6141 return (CTL_RETVAL_COMPLETE); 6142 } 6143 6144 /* 6145 * Decrement these before we call the page handler, since we may 6146 * end up getting called back one way or another before the handler 6147 * returns to this context. 6148 */ 6149 *len_left -= page_index->page_len; 6150 *len_used += page_index->page_len; 6151 6152 retval = page_index->select_handler(ctsio, page_index, 6153 (uint8_t *)page_header); 6154 6155 /* 6156 * If the page handler returns CTL_RETVAL_QUEUED, then we need to 6157 * wait until this queued command completes to finish processing 6158 * the mode page. If it returns anything other than 6159 * CTL_RETVAL_COMPLETE (e.g. CTL_RETVAL_ERROR), then it should have 6160 * already set the sense information, freed the data pointer, and 6161 * completed the io for us. 6162 */ 6163 if (retval != CTL_RETVAL_COMPLETE) 6164 goto bailout_no_done; 6165 6166 /* 6167 * If the initiator sent us more than one page, parse the next one. 6168 */ 6169 if (*len_left > 0) 6170 goto do_next_page; 6171 6172 ctl_set_success(ctsio); 6173 free(ctsio->kern_data_ptr, M_CTL); 6174 ctl_done((union ctl_io *)ctsio); 6175 6176 bailout_no_done: 6177 6178 return (CTL_RETVAL_COMPLETE); 6179 6180 } 6181 6182 int 6183 ctl_mode_select(struct ctl_scsiio *ctsio) 6184 { 6185 struct ctl_lun *lun = CTL_LUN(ctsio); 6186 union ctl_modepage_info *modepage_info; 6187 int bd_len, i, header_size, param_len, rtd; 6188 uint32_t initidx; 6189 6190 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 6191 switch (ctsio->cdb[0]) { 6192 case MODE_SELECT_6: { 6193 struct scsi_mode_select_6 *cdb; 6194 6195 cdb = (struct scsi_mode_select_6 *)ctsio->cdb; 6196 6197 rtd = (cdb->byte2 & SMS_RTD) ? 1 : 0; 6198 param_len = cdb->length; 6199 header_size = sizeof(struct scsi_mode_header_6); 6200 break; 6201 } 6202 case MODE_SELECT_10: { 6203 struct scsi_mode_select_10 *cdb; 6204 6205 cdb = (struct scsi_mode_select_10 *)ctsio->cdb; 6206 6207 rtd = (cdb->byte2 & SMS_RTD) ? 1 : 0; 6208 param_len = scsi_2btoul(cdb->length); 6209 header_size = sizeof(struct scsi_mode_header_10); 6210 break; 6211 } 6212 default: 6213 ctl_set_invalid_opcode(ctsio); 6214 ctl_done((union ctl_io *)ctsio); 6215 return (CTL_RETVAL_COMPLETE); 6216 } 6217 6218 if (rtd) { 6219 if (param_len != 0) { 6220 ctl_set_invalid_field(ctsio, /*sks_valid*/ 0, 6221 /*command*/ 1, /*field*/ 0, 6222 /*bit_valid*/ 0, /*bit*/ 0); 6223 ctl_done((union ctl_io *)ctsio); 6224 return (CTL_RETVAL_COMPLETE); 6225 } 6226 6227 /* Revert to defaults. */ 6228 ctl_init_page_index(lun); 6229 mtx_lock(&lun->lun_lock); 6230 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 6231 mtx_unlock(&lun->lun_lock); 6232 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6233 ctl_isc_announce_mode(lun, -1, 6234 lun->mode_pages.index[i].page_code & SMPH_PC_MASK, 6235 lun->mode_pages.index[i].subpage); 6236 } 6237 ctl_set_success(ctsio); 6238 ctl_done((union ctl_io *)ctsio); 6239 return (CTL_RETVAL_COMPLETE); 6240 } 6241 6242 /* 6243 * From SPC-3: 6244 * "A parameter list length of zero indicates that the Data-Out Buffer 6245 * shall be empty. This condition shall not be considered as an error." 6246 */ 6247 if (param_len == 0) { 6248 ctl_set_success(ctsio); 6249 ctl_done((union ctl_io *)ctsio); 6250 return (CTL_RETVAL_COMPLETE); 6251 } 6252 6253 /* 6254 * Since we'll hit this the first time through, prior to 6255 * allocation, we don't need to free a data buffer here. 6256 */ 6257 if (param_len < header_size) { 6258 ctl_set_param_len_error(ctsio); 6259 ctl_done((union ctl_io *)ctsio); 6260 return (CTL_RETVAL_COMPLETE); 6261 } 6262 6263 /* 6264 * Allocate the data buffer and grab the user's data. In theory, 6265 * we shouldn't have to sanity check the parameter list length here 6266 * because the maximum size is 64K. We should be able to malloc 6267 * that much without too many problems. 6268 */ 6269 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 6270 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 6271 ctsio->kern_data_len = param_len; 6272 ctsio->kern_total_len = param_len; 6273 ctsio->kern_rel_offset = 0; 6274 ctsio->kern_sg_entries = 0; 6275 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6276 ctsio->be_move_done = ctl_config_move_done; 6277 ctl_datamove((union ctl_io *)ctsio); 6278 6279 return (CTL_RETVAL_COMPLETE); 6280 } 6281 6282 switch (ctsio->cdb[0]) { 6283 case MODE_SELECT_6: { 6284 struct scsi_mode_header_6 *mh6; 6285 6286 mh6 = (struct scsi_mode_header_6 *)ctsio->kern_data_ptr; 6287 bd_len = mh6->blk_desc_len; 6288 break; 6289 } 6290 case MODE_SELECT_10: { 6291 struct scsi_mode_header_10 *mh10; 6292 6293 mh10 = (struct scsi_mode_header_10 *)ctsio->kern_data_ptr; 6294 bd_len = scsi_2btoul(mh10->blk_desc_len); 6295 break; 6296 } 6297 default: 6298 panic("%s: Invalid CDB type %#x", __func__, ctsio->cdb[0]); 6299 } 6300 6301 if (param_len < (header_size + bd_len)) { 6302 free(ctsio->kern_data_ptr, M_CTL); 6303 ctl_set_param_len_error(ctsio); 6304 ctl_done((union ctl_io *)ctsio); 6305 return (CTL_RETVAL_COMPLETE); 6306 } 6307 6308 /* 6309 * Set the IO_CONT flag, so that if this I/O gets passed to 6310 * ctl_config_write_done(), it'll get passed back to 6311 * ctl_do_mode_select() for further processing, or completion if 6312 * we're all done. 6313 */ 6314 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 6315 ctsio->io_cont = ctl_do_mode_select; 6316 6317 modepage_info = (union ctl_modepage_info *) 6318 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6319 memset(modepage_info, 0, sizeof(*modepage_info)); 6320 modepage_info->header.len_left = param_len - header_size - bd_len; 6321 modepage_info->header.len_used = header_size + bd_len; 6322 6323 return (ctl_do_mode_select((union ctl_io *)ctsio)); 6324 } 6325 6326 int 6327 ctl_mode_sense(struct ctl_scsiio *ctsio) 6328 { 6329 struct ctl_lun *lun = CTL_LUN(ctsio); 6330 int pc, page_code, llba, subpage; 6331 int alloc_len, page_len, header_len, bd_len, total_len; 6332 void *block_desc; 6333 struct ctl_page_index *page_index; 6334 6335 llba = 0; 6336 6337 CTL_DEBUG_PRINT(("ctl_mode_sense\n")); 6338 6339 switch (ctsio->cdb[0]) { 6340 case MODE_SENSE_6: { 6341 struct scsi_mode_sense_6 *cdb; 6342 6343 cdb = (struct scsi_mode_sense_6 *)ctsio->cdb; 6344 6345 header_len = sizeof(struct scsi_mode_hdr_6); 6346 if (cdb->byte2 & SMS_DBD) 6347 bd_len = 0; 6348 else 6349 bd_len = sizeof(struct scsi_mode_block_descr); 6350 header_len += bd_len; 6351 6352 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6353 page_code = cdb->page & SMS_PAGE_CODE; 6354 subpage = cdb->subpage; 6355 alloc_len = cdb->length; 6356 break; 6357 } 6358 case MODE_SENSE_10: { 6359 struct scsi_mode_sense_10 *cdb; 6360 6361 cdb = (struct scsi_mode_sense_10 *)ctsio->cdb; 6362 6363 header_len = sizeof(struct scsi_mode_hdr_10); 6364 if (cdb->byte2 & SMS_DBD) { 6365 bd_len = 0; 6366 } else if (lun->be_lun->lun_type == T_DIRECT) { 6367 if (cdb->byte2 & SMS10_LLBAA) { 6368 llba = 1; 6369 bd_len = sizeof(struct scsi_mode_block_descr_dlong); 6370 } else 6371 bd_len = sizeof(struct scsi_mode_block_descr_dshort); 6372 } else 6373 bd_len = sizeof(struct scsi_mode_block_descr); 6374 header_len += bd_len; 6375 6376 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6377 page_code = cdb->page & SMS_PAGE_CODE; 6378 subpage = cdb->subpage; 6379 alloc_len = scsi_2btoul(cdb->length); 6380 break; 6381 } 6382 default: 6383 ctl_set_invalid_opcode(ctsio); 6384 ctl_done((union ctl_io *)ctsio); 6385 return (CTL_RETVAL_COMPLETE); 6386 break; /* NOTREACHED */ 6387 } 6388 6389 /* 6390 * We have to make a first pass through to calculate the size of 6391 * the pages that match the user's query. Then we allocate enough 6392 * memory to hold it, and actually copy the data into the buffer. 6393 */ 6394 switch (page_code) { 6395 case SMS_ALL_PAGES_PAGE: { 6396 u_int i; 6397 6398 page_len = 0; 6399 6400 /* 6401 * At the moment, values other than 0 and 0xff here are 6402 * reserved according to SPC-3. 6403 */ 6404 if ((subpage != SMS_SUBPAGE_PAGE_0) 6405 && (subpage != SMS_SUBPAGE_ALL)) { 6406 ctl_set_invalid_field(ctsio, 6407 /*sks_valid*/ 1, 6408 /*command*/ 1, 6409 /*field*/ 3, 6410 /*bit_valid*/ 0, 6411 /*bit*/ 0); 6412 ctl_done((union ctl_io *)ctsio); 6413 return (CTL_RETVAL_COMPLETE); 6414 } 6415 6416 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6417 page_index = &lun->mode_pages.index[i]; 6418 6419 /* Make sure the page is supported for this dev type */ 6420 if (lun->be_lun->lun_type == T_DIRECT && 6421 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6422 continue; 6423 if (lun->be_lun->lun_type == T_PROCESSOR && 6424 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6425 continue; 6426 if (lun->be_lun->lun_type == T_CDROM && 6427 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6428 continue; 6429 6430 /* 6431 * We don't use this subpage if the user didn't 6432 * request all subpages. 6433 */ 6434 if ((page_index->subpage != 0) 6435 && (subpage == SMS_SUBPAGE_PAGE_0)) 6436 continue; 6437 6438 page_len += page_index->page_len; 6439 } 6440 break; 6441 } 6442 default: { 6443 u_int i; 6444 6445 page_len = 0; 6446 6447 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6448 page_index = &lun->mode_pages.index[i]; 6449 6450 /* Make sure the page is supported for this dev type */ 6451 if (lun->be_lun->lun_type == T_DIRECT && 6452 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6453 continue; 6454 if (lun->be_lun->lun_type == T_PROCESSOR && 6455 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6456 continue; 6457 if (lun->be_lun->lun_type == T_CDROM && 6458 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6459 continue; 6460 6461 /* Look for the right page code */ 6462 if ((page_index->page_code & SMPH_PC_MASK) != page_code) 6463 continue; 6464 6465 /* Look for the right subpage or the subpage wildcard*/ 6466 if ((page_index->subpage != subpage) 6467 && (subpage != SMS_SUBPAGE_ALL)) 6468 continue; 6469 6470 page_len += page_index->page_len; 6471 } 6472 6473 if (page_len == 0) { 6474 ctl_set_invalid_field(ctsio, 6475 /*sks_valid*/ 1, 6476 /*command*/ 1, 6477 /*field*/ 2, 6478 /*bit_valid*/ 1, 6479 /*bit*/ 5); 6480 ctl_done((union ctl_io *)ctsio); 6481 return (CTL_RETVAL_COMPLETE); 6482 } 6483 break; 6484 } 6485 } 6486 6487 total_len = header_len + page_len; 6488 6489 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6490 ctsio->kern_sg_entries = 0; 6491 ctsio->kern_rel_offset = 0; 6492 ctsio->kern_data_len = min(total_len, alloc_len); 6493 ctsio->kern_total_len = ctsio->kern_data_len; 6494 6495 switch (ctsio->cdb[0]) { 6496 case MODE_SENSE_6: { 6497 struct scsi_mode_hdr_6 *header; 6498 6499 header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr; 6500 6501 header->datalen = MIN(total_len - 1, 254); 6502 if (lun->be_lun->lun_type == T_DIRECT) { 6503 header->dev_specific = 0x10; /* DPOFUA */ 6504 if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || 6505 (lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) 6506 header->dev_specific |= 0x80; /* WP */ 6507 } 6508 header->block_descr_len = bd_len; 6509 block_desc = &header[1]; 6510 break; 6511 } 6512 case MODE_SENSE_10: { 6513 struct scsi_mode_hdr_10 *header; 6514 int datalen; 6515 6516 header = (struct scsi_mode_hdr_10 *)ctsio->kern_data_ptr; 6517 6518 datalen = MIN(total_len - 2, 65533); 6519 scsi_ulto2b(datalen, header->datalen); 6520 if (lun->be_lun->lun_type == T_DIRECT) { 6521 header->dev_specific = 0x10; /* DPOFUA */ 6522 if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || 6523 (lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) 6524 header->dev_specific |= 0x80; /* WP */ 6525 } 6526 if (llba) 6527 header->flags |= SMH_LONGLBA; 6528 scsi_ulto2b(bd_len, header->block_descr_len); 6529 block_desc = &header[1]; 6530 break; 6531 } 6532 default: 6533 panic("%s: Invalid CDB type %#x", __func__, ctsio->cdb[0]); 6534 } 6535 6536 /* 6537 * If we've got a disk, use its blocksize in the block 6538 * descriptor. Otherwise, just set it to 0. 6539 */ 6540 if (bd_len > 0) { 6541 if (lun->be_lun->lun_type == T_DIRECT) { 6542 if (llba) { 6543 struct scsi_mode_block_descr_dlong *bd = block_desc; 6544 if (lun->be_lun->maxlba != 0) 6545 scsi_u64to8b(lun->be_lun->maxlba + 1, 6546 bd->num_blocks); 6547 scsi_ulto4b(lun->be_lun->blocksize, 6548 bd->block_len); 6549 } else { 6550 struct scsi_mode_block_descr_dshort *bd = block_desc; 6551 if (lun->be_lun->maxlba != 0) 6552 scsi_ulto4b(MIN(lun->be_lun->maxlba+1, 6553 UINT32_MAX), bd->num_blocks); 6554 scsi_ulto3b(lun->be_lun->blocksize, 6555 bd->block_len); 6556 } 6557 } else { 6558 struct scsi_mode_block_descr *bd = block_desc; 6559 scsi_ulto3b(0, bd->block_len); 6560 } 6561 } 6562 6563 switch (page_code) { 6564 case SMS_ALL_PAGES_PAGE: { 6565 int i, data_used; 6566 6567 data_used = header_len; 6568 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6569 struct ctl_page_index *page_index; 6570 6571 page_index = &lun->mode_pages.index[i]; 6572 if (lun->be_lun->lun_type == T_DIRECT && 6573 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6574 continue; 6575 if (lun->be_lun->lun_type == T_PROCESSOR && 6576 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6577 continue; 6578 if (lun->be_lun->lun_type == T_CDROM && 6579 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6580 continue; 6581 6582 /* 6583 * We don't use this subpage if the user didn't 6584 * request all subpages. We already checked (above) 6585 * to make sure the user only specified a subpage 6586 * of 0 or 0xff in the SMS_ALL_PAGES_PAGE case. 6587 */ 6588 if ((page_index->subpage != 0) 6589 && (subpage == SMS_SUBPAGE_PAGE_0)) 6590 continue; 6591 6592 /* 6593 * Call the handler, if it exists, to update the 6594 * page to the latest values. 6595 */ 6596 if (page_index->sense_handler != NULL) 6597 page_index->sense_handler(ctsio, page_index,pc); 6598 6599 memcpy(ctsio->kern_data_ptr + data_used, 6600 page_index->page_data + 6601 (page_index->page_len * pc), 6602 page_index->page_len); 6603 data_used += page_index->page_len; 6604 } 6605 break; 6606 } 6607 default: { 6608 int i, data_used; 6609 6610 data_used = header_len; 6611 6612 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6613 struct ctl_page_index *page_index; 6614 6615 page_index = &lun->mode_pages.index[i]; 6616 6617 /* Look for the right page code */ 6618 if ((page_index->page_code & SMPH_PC_MASK) != page_code) 6619 continue; 6620 6621 /* Look for the right subpage or the subpage wildcard*/ 6622 if ((page_index->subpage != subpage) 6623 && (subpage != SMS_SUBPAGE_ALL)) 6624 continue; 6625 6626 /* Make sure the page is supported for this dev type */ 6627 if (lun->be_lun->lun_type == T_DIRECT && 6628 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6629 continue; 6630 if (lun->be_lun->lun_type == T_PROCESSOR && 6631 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6632 continue; 6633 if (lun->be_lun->lun_type == T_CDROM && 6634 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6635 continue; 6636 6637 /* 6638 * Call the handler, if it exists, to update the 6639 * page to the latest values. 6640 */ 6641 if (page_index->sense_handler != NULL) 6642 page_index->sense_handler(ctsio, page_index,pc); 6643 6644 memcpy(ctsio->kern_data_ptr + data_used, 6645 page_index->page_data + 6646 (page_index->page_len * pc), 6647 page_index->page_len); 6648 data_used += page_index->page_len; 6649 } 6650 break; 6651 } 6652 } 6653 6654 ctl_set_success(ctsio); 6655 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6656 ctsio->be_move_done = ctl_config_move_done; 6657 ctl_datamove((union ctl_io *)ctsio); 6658 return (CTL_RETVAL_COMPLETE); 6659 } 6660 6661 int 6662 ctl_temp_log_sense_handler(struct ctl_scsiio *ctsio, 6663 struct ctl_page_index *page_index, 6664 int pc) 6665 { 6666 struct ctl_lun *lun = CTL_LUN(ctsio); 6667 struct scsi_log_temperature *data; 6668 const char *value; 6669 6670 data = (struct scsi_log_temperature *)page_index->page_data; 6671 6672 scsi_ulto2b(SLP_TEMPERATURE, data->hdr.param_code); 6673 data->hdr.param_control = SLP_LBIN; 6674 data->hdr.param_len = sizeof(struct scsi_log_temperature) - 6675 sizeof(struct scsi_log_param_header); 6676 if ((value = dnvlist_get_string(lun->be_lun->options, "temperature", 6677 NULL)) != NULL) 6678 data->temperature = strtol(value, NULL, 0); 6679 else 6680 data->temperature = 0xff; 6681 data++; 6682 6683 scsi_ulto2b(SLP_REFTEMPERATURE, data->hdr.param_code); 6684 data->hdr.param_control = SLP_LBIN; 6685 data->hdr.param_len = sizeof(struct scsi_log_temperature) - 6686 sizeof(struct scsi_log_param_header); 6687 if ((value = dnvlist_get_string(lun->be_lun->options, "reftemperature", 6688 NULL)) != NULL) 6689 data->temperature = strtol(value, NULL, 0); 6690 else 6691 data->temperature = 0xff; 6692 return (0); 6693 } 6694 6695 int 6696 ctl_lbp_log_sense_handler(struct ctl_scsiio *ctsio, 6697 struct ctl_page_index *page_index, 6698 int pc) 6699 { 6700 struct ctl_lun *lun = CTL_LUN(ctsio); 6701 struct scsi_log_param_header *phdr; 6702 uint8_t *data; 6703 uint64_t val; 6704 6705 data = page_index->page_data; 6706 6707 if (lun->backend->lun_attr != NULL && 6708 (val = lun->backend->lun_attr(lun->be_lun, "blocksavail")) 6709 != UINT64_MAX) { 6710 phdr = (struct scsi_log_param_header *)data; 6711 scsi_ulto2b(0x0001, phdr->param_code); 6712 phdr->param_control = SLP_LBIN | SLP_LP; 6713 phdr->param_len = 8; 6714 data = (uint8_t *)(phdr + 1); 6715 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6716 data[4] = 0x02; /* per-pool */ 6717 data += phdr->param_len; 6718 } 6719 6720 if (lun->backend->lun_attr != NULL && 6721 (val = lun->backend->lun_attr(lun->be_lun, "blocksused")) 6722 != UINT64_MAX) { 6723 phdr = (struct scsi_log_param_header *)data; 6724 scsi_ulto2b(0x0002, phdr->param_code); 6725 phdr->param_control = SLP_LBIN | SLP_LP; 6726 phdr->param_len = 8; 6727 data = (uint8_t *)(phdr + 1); 6728 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6729 data[4] = 0x01; /* per-LUN */ 6730 data += phdr->param_len; 6731 } 6732 6733 if (lun->backend->lun_attr != NULL && 6734 (val = lun->backend->lun_attr(lun->be_lun, "poolblocksavail")) 6735 != UINT64_MAX) { 6736 phdr = (struct scsi_log_param_header *)data; 6737 scsi_ulto2b(0x00f1, phdr->param_code); 6738 phdr->param_control = SLP_LBIN | SLP_LP; 6739 phdr->param_len = 8; 6740 data = (uint8_t *)(phdr + 1); 6741 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6742 data[4] = 0x02; /* per-pool */ 6743 data += phdr->param_len; 6744 } 6745 6746 if (lun->backend->lun_attr != NULL && 6747 (val = lun->backend->lun_attr(lun->be_lun, "poolblocksused")) 6748 != UINT64_MAX) { 6749 phdr = (struct scsi_log_param_header *)data; 6750 scsi_ulto2b(0x00f2, phdr->param_code); 6751 phdr->param_control = SLP_LBIN | SLP_LP; 6752 phdr->param_len = 8; 6753 data = (uint8_t *)(phdr + 1); 6754 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6755 data[4] = 0x02; /* per-pool */ 6756 data += phdr->param_len; 6757 } 6758 6759 page_index->page_len = data - page_index->page_data; 6760 return (0); 6761 } 6762 6763 int 6764 ctl_sap_log_sense_handler(struct ctl_scsiio *ctsio, 6765 struct ctl_page_index *page_index, 6766 int pc) 6767 { 6768 struct ctl_lun *lun = CTL_LUN(ctsio); 6769 struct stat_page *data; 6770 struct bintime *t; 6771 6772 data = (struct stat_page *)page_index->page_data; 6773 6774 scsi_ulto2b(SLP_SAP, data->sap.hdr.param_code); 6775 data->sap.hdr.param_control = SLP_LBIN; 6776 data->sap.hdr.param_len = sizeof(struct scsi_log_stat_and_perf) - 6777 sizeof(struct scsi_log_param_header); 6778 scsi_u64to8b(lun->stats.operations[CTL_STATS_READ], 6779 data->sap.read_num); 6780 scsi_u64to8b(lun->stats.operations[CTL_STATS_WRITE], 6781 data->sap.write_num); 6782 if (lun->be_lun->blocksize > 0) { 6783 scsi_u64to8b(lun->stats.bytes[CTL_STATS_WRITE] / 6784 lun->be_lun->blocksize, data->sap.recvieved_lba); 6785 scsi_u64to8b(lun->stats.bytes[CTL_STATS_READ] / 6786 lun->be_lun->blocksize, data->sap.transmitted_lba); 6787 } 6788 t = &lun->stats.time[CTL_STATS_READ]; 6789 scsi_u64to8b((uint64_t)t->sec * 1000 + t->frac / (UINT64_MAX / 1000), 6790 data->sap.read_int); 6791 t = &lun->stats.time[CTL_STATS_WRITE]; 6792 scsi_u64to8b((uint64_t)t->sec * 1000 + t->frac / (UINT64_MAX / 1000), 6793 data->sap.write_int); 6794 scsi_u64to8b(0, data->sap.weighted_num); 6795 scsi_u64to8b(0, data->sap.weighted_int); 6796 scsi_ulto2b(SLP_IT, data->it.hdr.param_code); 6797 data->it.hdr.param_control = SLP_LBIN; 6798 data->it.hdr.param_len = sizeof(struct scsi_log_idle_time) - 6799 sizeof(struct scsi_log_param_header); 6800 #ifdef CTL_TIME_IO 6801 scsi_u64to8b(lun->idle_time / SBT_1MS, data->it.idle_int); 6802 #endif 6803 scsi_ulto2b(SLP_TI, data->ti.hdr.param_code); 6804 data->it.hdr.param_control = SLP_LBIN; 6805 data->ti.hdr.param_len = sizeof(struct scsi_log_time_interval) - 6806 sizeof(struct scsi_log_param_header); 6807 scsi_ulto4b(3, data->ti.exponent); 6808 scsi_ulto4b(1, data->ti.integer); 6809 return (0); 6810 } 6811 6812 int 6813 ctl_ie_log_sense_handler(struct ctl_scsiio *ctsio, 6814 struct ctl_page_index *page_index, 6815 int pc) 6816 { 6817 struct ctl_lun *lun = CTL_LUN(ctsio); 6818 struct scsi_log_informational_exceptions *data; 6819 const char *value; 6820 6821 data = (struct scsi_log_informational_exceptions *)page_index->page_data; 6822 6823 scsi_ulto2b(SLP_IE_GEN, data->hdr.param_code); 6824 data->hdr.param_control = SLP_LBIN; 6825 data->hdr.param_len = sizeof(struct scsi_log_informational_exceptions) - 6826 sizeof(struct scsi_log_param_header); 6827 data->ie_asc = lun->ie_asc; 6828 data->ie_ascq = lun->ie_ascq; 6829 if ((value = dnvlist_get_string(lun->be_lun->options, "temperature", 6830 NULL)) != NULL) 6831 data->temperature = strtol(value, NULL, 0); 6832 else 6833 data->temperature = 0xff; 6834 return (0); 6835 } 6836 6837 int 6838 ctl_log_sense(struct ctl_scsiio *ctsio) 6839 { 6840 struct ctl_lun *lun = CTL_LUN(ctsio); 6841 int i, pc, page_code, subpage; 6842 int alloc_len, total_len; 6843 struct ctl_page_index *page_index; 6844 struct scsi_log_sense *cdb; 6845 struct scsi_log_header *header; 6846 6847 CTL_DEBUG_PRINT(("ctl_log_sense\n")); 6848 6849 cdb = (struct scsi_log_sense *)ctsio->cdb; 6850 pc = (cdb->page & SLS_PAGE_CTRL_MASK) >> 6; 6851 page_code = cdb->page & SLS_PAGE_CODE; 6852 subpage = cdb->subpage; 6853 alloc_len = scsi_2btoul(cdb->length); 6854 6855 page_index = NULL; 6856 for (i = 0; i < CTL_NUM_LOG_PAGES; i++) { 6857 page_index = &lun->log_pages.index[i]; 6858 6859 /* Look for the right page code */ 6860 if ((page_index->page_code & SL_PAGE_CODE) != page_code) 6861 continue; 6862 6863 /* Look for the right subpage or the subpage wildcard*/ 6864 if (page_index->subpage != subpage) 6865 continue; 6866 6867 break; 6868 } 6869 if (i >= CTL_NUM_LOG_PAGES) { 6870 ctl_set_invalid_field(ctsio, 6871 /*sks_valid*/ 1, 6872 /*command*/ 1, 6873 /*field*/ 2, 6874 /*bit_valid*/ 0, 6875 /*bit*/ 0); 6876 ctl_done((union ctl_io *)ctsio); 6877 return (CTL_RETVAL_COMPLETE); 6878 } 6879 6880 total_len = sizeof(struct scsi_log_header) + page_index->page_len; 6881 6882 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6883 ctsio->kern_sg_entries = 0; 6884 ctsio->kern_rel_offset = 0; 6885 ctsio->kern_data_len = min(total_len, alloc_len); 6886 ctsio->kern_total_len = ctsio->kern_data_len; 6887 6888 header = (struct scsi_log_header *)ctsio->kern_data_ptr; 6889 header->page = page_index->page_code; 6890 if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING) 6891 header->page |= SL_DS; 6892 if (page_index->subpage) { 6893 header->page |= SL_SPF; 6894 header->subpage = page_index->subpage; 6895 } 6896 scsi_ulto2b(page_index->page_len, header->datalen); 6897 6898 /* 6899 * Call the handler, if it exists, to update the 6900 * page to the latest values. 6901 */ 6902 if (page_index->sense_handler != NULL) 6903 page_index->sense_handler(ctsio, page_index, pc); 6904 6905 memcpy(header + 1, page_index->page_data, page_index->page_len); 6906 6907 ctl_set_success(ctsio); 6908 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6909 ctsio->be_move_done = ctl_config_move_done; 6910 ctl_datamove((union ctl_io *)ctsio); 6911 return (CTL_RETVAL_COMPLETE); 6912 } 6913 6914 int 6915 ctl_read_capacity(struct ctl_scsiio *ctsio) 6916 { 6917 struct ctl_lun *lun = CTL_LUN(ctsio); 6918 struct scsi_read_capacity *cdb; 6919 struct scsi_read_capacity_data *data; 6920 uint32_t lba; 6921 6922 CTL_DEBUG_PRINT(("ctl_read_capacity\n")); 6923 6924 cdb = (struct scsi_read_capacity *)ctsio->cdb; 6925 6926 lba = scsi_4btoul(cdb->addr); 6927 if (((cdb->pmi & SRC_PMI) == 0) 6928 && (lba != 0)) { 6929 ctl_set_invalid_field(/*ctsio*/ ctsio, 6930 /*sks_valid*/ 1, 6931 /*command*/ 1, 6932 /*field*/ 2, 6933 /*bit_valid*/ 0, 6934 /*bit*/ 0); 6935 ctl_done((union ctl_io *)ctsio); 6936 return (CTL_RETVAL_COMPLETE); 6937 } 6938 6939 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 6940 data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr; 6941 ctsio->kern_data_len = sizeof(*data); 6942 ctsio->kern_total_len = sizeof(*data); 6943 ctsio->kern_rel_offset = 0; 6944 ctsio->kern_sg_entries = 0; 6945 6946 /* 6947 * If the maximum LBA is greater than 0xfffffffe, the user must 6948 * issue a SERVICE ACTION IN (16) command, with the read capacity 6949 * serivce action set. 6950 */ 6951 if (lun->be_lun->maxlba > 0xfffffffe) 6952 scsi_ulto4b(0xffffffff, data->addr); 6953 else 6954 scsi_ulto4b(lun->be_lun->maxlba, data->addr); 6955 6956 /* 6957 * XXX KDM this may not be 512 bytes... 6958 */ 6959 scsi_ulto4b(lun->be_lun->blocksize, data->length); 6960 6961 ctl_set_success(ctsio); 6962 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6963 ctsio->be_move_done = ctl_config_move_done; 6964 ctl_datamove((union ctl_io *)ctsio); 6965 return (CTL_RETVAL_COMPLETE); 6966 } 6967 6968 int 6969 ctl_read_capacity_16(struct ctl_scsiio *ctsio) 6970 { 6971 struct ctl_lun *lun = CTL_LUN(ctsio); 6972 struct scsi_read_capacity_16 *cdb; 6973 struct scsi_read_capacity_data_long *data; 6974 uint64_t lba; 6975 uint32_t alloc_len; 6976 6977 CTL_DEBUG_PRINT(("ctl_read_capacity_16\n")); 6978 6979 cdb = (struct scsi_read_capacity_16 *)ctsio->cdb; 6980 6981 alloc_len = scsi_4btoul(cdb->alloc_len); 6982 lba = scsi_8btou64(cdb->addr); 6983 6984 if ((cdb->reladr & SRC16_PMI) 6985 && (lba != 0)) { 6986 ctl_set_invalid_field(/*ctsio*/ ctsio, 6987 /*sks_valid*/ 1, 6988 /*command*/ 1, 6989 /*field*/ 2, 6990 /*bit_valid*/ 0, 6991 /*bit*/ 0); 6992 ctl_done((union ctl_io *)ctsio); 6993 return (CTL_RETVAL_COMPLETE); 6994 } 6995 6996 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 6997 data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr; 6998 ctsio->kern_rel_offset = 0; 6999 ctsio->kern_sg_entries = 0; 7000 ctsio->kern_data_len = min(sizeof(*data), alloc_len); 7001 ctsio->kern_total_len = ctsio->kern_data_len; 7002 7003 scsi_u64to8b(lun->be_lun->maxlba, data->addr); 7004 /* XXX KDM this may not be 512 bytes... */ 7005 scsi_ulto4b(lun->be_lun->blocksize, data->length); 7006 data->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE; 7007 scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, data->lalba_lbp); 7008 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) 7009 data->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ; 7010 7011 ctl_set_success(ctsio); 7012 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7013 ctsio->be_move_done = ctl_config_move_done; 7014 ctl_datamove((union ctl_io *)ctsio); 7015 return (CTL_RETVAL_COMPLETE); 7016 } 7017 7018 int 7019 ctl_get_lba_status(struct ctl_scsiio *ctsio) 7020 { 7021 struct ctl_lun *lun = CTL_LUN(ctsio); 7022 struct scsi_get_lba_status *cdb; 7023 struct scsi_get_lba_status_data *data; 7024 struct ctl_lba_len_flags *lbalen; 7025 uint64_t lba; 7026 uint32_t alloc_len, total_len; 7027 int retval; 7028 7029 CTL_DEBUG_PRINT(("ctl_get_lba_status\n")); 7030 7031 cdb = (struct scsi_get_lba_status *)ctsio->cdb; 7032 lba = scsi_8btou64(cdb->addr); 7033 alloc_len = scsi_4btoul(cdb->alloc_len); 7034 7035 if (lba > lun->be_lun->maxlba) { 7036 ctl_set_lba_out_of_range(ctsio, lba); 7037 ctl_done((union ctl_io *)ctsio); 7038 return (CTL_RETVAL_COMPLETE); 7039 } 7040 7041 total_len = sizeof(*data) + sizeof(data->descr[0]); 7042 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7043 data = (struct scsi_get_lba_status_data *)ctsio->kern_data_ptr; 7044 ctsio->kern_rel_offset = 0; 7045 ctsio->kern_sg_entries = 0; 7046 ctsio->kern_data_len = min(total_len, alloc_len); 7047 ctsio->kern_total_len = ctsio->kern_data_len; 7048 7049 /* Fill dummy data in case backend can't tell anything. */ 7050 scsi_ulto4b(4 + sizeof(data->descr[0]), data->length); 7051 scsi_u64to8b(lba, data->descr[0].addr); 7052 scsi_ulto4b(MIN(UINT32_MAX, lun->be_lun->maxlba + 1 - lba), 7053 data->descr[0].length); 7054 data->descr[0].status = 0; /* Mapped or unknown. */ 7055 7056 ctl_set_success(ctsio); 7057 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7058 ctsio->be_move_done = ctl_config_move_done; 7059 7060 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 7061 lbalen->lba = lba; 7062 lbalen->len = total_len; 7063 lbalen->flags = 0; 7064 retval = lun->backend->config_read((union ctl_io *)ctsio); 7065 return (retval); 7066 } 7067 7068 int 7069 ctl_read_defect(struct ctl_scsiio *ctsio) 7070 { 7071 struct scsi_read_defect_data_10 *ccb10; 7072 struct scsi_read_defect_data_12 *ccb12; 7073 struct scsi_read_defect_data_hdr_10 *data10; 7074 struct scsi_read_defect_data_hdr_12 *data12; 7075 uint32_t alloc_len, data_len; 7076 uint8_t format; 7077 7078 CTL_DEBUG_PRINT(("ctl_read_defect\n")); 7079 7080 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 7081 ccb10 = (struct scsi_read_defect_data_10 *)&ctsio->cdb; 7082 format = ccb10->format; 7083 alloc_len = scsi_2btoul(ccb10->alloc_length); 7084 data_len = sizeof(*data10); 7085 } else { 7086 ccb12 = (struct scsi_read_defect_data_12 *)&ctsio->cdb; 7087 format = ccb12->format; 7088 alloc_len = scsi_4btoul(ccb12->alloc_length); 7089 data_len = sizeof(*data12); 7090 } 7091 if (alloc_len == 0) { 7092 ctl_set_success(ctsio); 7093 ctl_done((union ctl_io *)ctsio); 7094 return (CTL_RETVAL_COMPLETE); 7095 } 7096 7097 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 7098 ctsio->kern_rel_offset = 0; 7099 ctsio->kern_sg_entries = 0; 7100 ctsio->kern_data_len = min(data_len, alloc_len); 7101 ctsio->kern_total_len = ctsio->kern_data_len; 7102 7103 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 7104 data10 = (struct scsi_read_defect_data_hdr_10 *) 7105 ctsio->kern_data_ptr; 7106 data10->format = format; 7107 scsi_ulto2b(0, data10->length); 7108 } else { 7109 data12 = (struct scsi_read_defect_data_hdr_12 *) 7110 ctsio->kern_data_ptr; 7111 data12->format = format; 7112 scsi_ulto2b(0, data12->generation); 7113 scsi_ulto4b(0, data12->length); 7114 } 7115 7116 ctl_set_success(ctsio); 7117 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7118 ctsio->be_move_done = ctl_config_move_done; 7119 ctl_datamove((union ctl_io *)ctsio); 7120 return (CTL_RETVAL_COMPLETE); 7121 } 7122 7123 int 7124 ctl_report_ident_info(struct ctl_scsiio *ctsio) 7125 { 7126 struct ctl_lun *lun = CTL_LUN(ctsio); 7127 struct scsi_report_ident_info *cdb; 7128 struct scsi_report_ident_info_data *rii_ptr; 7129 struct scsi_report_ident_info_descr *riid_ptr; 7130 const char *oii, *otii; 7131 int retval, alloc_len, total_len = 0, len = 0; 7132 7133 CTL_DEBUG_PRINT(("ctl_report_ident_info\n")); 7134 7135 cdb = (struct scsi_report_ident_info *)ctsio->cdb; 7136 retval = CTL_RETVAL_COMPLETE; 7137 7138 total_len = sizeof(struct scsi_report_ident_info_data); 7139 switch (cdb->type) { 7140 case RII_LUII: 7141 oii = dnvlist_get_string(lun->be_lun->options, 7142 "ident_info", NULL); 7143 if (oii) 7144 len = strlen(oii); /* Approximately */ 7145 break; 7146 case RII_LUTII: 7147 otii = dnvlist_get_string(lun->be_lun->options, 7148 "text_ident_info", NULL); 7149 if (otii) 7150 len = strlen(otii) + 1; /* NULL-terminated */ 7151 break; 7152 case RII_IIS: 7153 len = 2 * sizeof(struct scsi_report_ident_info_descr); 7154 break; 7155 default: 7156 ctl_set_invalid_field(/*ctsio*/ ctsio, 7157 /*sks_valid*/ 1, 7158 /*command*/ 1, 7159 /*field*/ 11, 7160 /*bit_valid*/ 1, 7161 /*bit*/ 2); 7162 ctl_done((union ctl_io *)ctsio); 7163 return(retval); 7164 } 7165 total_len += len; 7166 alloc_len = scsi_4btoul(cdb->length); 7167 7168 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7169 ctsio->kern_sg_entries = 0; 7170 ctsio->kern_rel_offset = 0; 7171 ctsio->kern_data_len = min(total_len, alloc_len); 7172 ctsio->kern_total_len = ctsio->kern_data_len; 7173 7174 rii_ptr = (struct scsi_report_ident_info_data *)ctsio->kern_data_ptr; 7175 switch (cdb->type) { 7176 case RII_LUII: 7177 if (oii) { 7178 if (oii[0] == '0' && oii[1] == 'x') 7179 len = hex2bin(oii, (uint8_t *)(rii_ptr + 1), len); 7180 else 7181 strncpy((uint8_t *)(rii_ptr + 1), oii, len); 7182 } 7183 break; 7184 case RII_LUTII: 7185 if (otii) 7186 strlcpy((uint8_t *)(rii_ptr + 1), otii, len); 7187 break; 7188 case RII_IIS: 7189 riid_ptr = (struct scsi_report_ident_info_descr *)(rii_ptr + 1); 7190 riid_ptr->type = RII_LUII; 7191 scsi_ulto2b(0xffff, riid_ptr->length); 7192 riid_ptr++; 7193 riid_ptr->type = RII_LUTII; 7194 scsi_ulto2b(0xffff, riid_ptr->length); 7195 } 7196 scsi_ulto2b(len, rii_ptr->length); 7197 7198 ctl_set_success(ctsio); 7199 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7200 ctsio->be_move_done = ctl_config_move_done; 7201 ctl_datamove((union ctl_io *)ctsio); 7202 return(retval); 7203 } 7204 7205 int 7206 ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio) 7207 { 7208 struct ctl_softc *softc = CTL_SOFTC(ctsio); 7209 struct ctl_lun *lun = CTL_LUN(ctsio); 7210 struct scsi_maintenance_in *cdb; 7211 int retval; 7212 int alloc_len, ext, total_len = 0, g, pc, pg, ts, os; 7213 int num_ha_groups, num_target_ports, shared_group; 7214 struct ctl_port *port; 7215 struct scsi_target_group_data *rtg_ptr; 7216 struct scsi_target_group_data_extended *rtg_ext_ptr; 7217 struct scsi_target_port_group_descriptor *tpg_desc; 7218 7219 CTL_DEBUG_PRINT(("ctl_report_tagret_port_groups\n")); 7220 7221 cdb = (struct scsi_maintenance_in *)ctsio->cdb; 7222 retval = CTL_RETVAL_COMPLETE; 7223 7224 switch (cdb->byte2 & STG_PDF_MASK) { 7225 case STG_PDF_LENGTH: 7226 ext = 0; 7227 break; 7228 case STG_PDF_EXTENDED: 7229 ext = 1; 7230 break; 7231 default: 7232 ctl_set_invalid_field(/*ctsio*/ ctsio, 7233 /*sks_valid*/ 1, 7234 /*command*/ 1, 7235 /*field*/ 2, 7236 /*bit_valid*/ 1, 7237 /*bit*/ 5); 7238 ctl_done((union ctl_io *)ctsio); 7239 return(retval); 7240 } 7241 7242 num_target_ports = 0; 7243 shared_group = (softc->is_single != 0); 7244 mtx_lock(&softc->ctl_lock); 7245 STAILQ_FOREACH(port, &softc->port_list, links) { 7246 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7247 continue; 7248 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 7249 continue; 7250 num_target_ports++; 7251 if (port->status & CTL_PORT_STATUS_HA_SHARED) 7252 shared_group = 1; 7253 } 7254 mtx_unlock(&softc->ctl_lock); 7255 num_ha_groups = (softc->is_single) ? 0 : NUM_HA_SHELVES; 7256 7257 if (ext) 7258 total_len = sizeof(struct scsi_target_group_data_extended); 7259 else 7260 total_len = sizeof(struct scsi_target_group_data); 7261 total_len += sizeof(struct scsi_target_port_group_descriptor) * 7262 (shared_group + num_ha_groups) + 7263 sizeof(struct scsi_target_port_descriptor) * num_target_ports; 7264 7265 alloc_len = scsi_4btoul(cdb->length); 7266 7267 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7268 ctsio->kern_sg_entries = 0; 7269 ctsio->kern_rel_offset = 0; 7270 ctsio->kern_data_len = min(total_len, alloc_len); 7271 ctsio->kern_total_len = ctsio->kern_data_len; 7272 7273 if (ext) { 7274 rtg_ext_ptr = (struct scsi_target_group_data_extended *) 7275 ctsio->kern_data_ptr; 7276 scsi_ulto4b(total_len - 4, rtg_ext_ptr->length); 7277 rtg_ext_ptr->format_type = 0x10; 7278 rtg_ext_ptr->implicit_transition_time = 0; 7279 tpg_desc = &rtg_ext_ptr->groups[0]; 7280 } else { 7281 rtg_ptr = (struct scsi_target_group_data *) 7282 ctsio->kern_data_ptr; 7283 scsi_ulto4b(total_len - 4, rtg_ptr->length); 7284 tpg_desc = &rtg_ptr->groups[0]; 7285 } 7286 7287 mtx_lock(&softc->ctl_lock); 7288 pg = softc->port_min / softc->port_cnt; 7289 if (lun->flags & (CTL_LUN_PRIMARY_SC | CTL_LUN_PEER_SC_PRIMARY)) { 7290 /* Some shelf is known to be primary. */ 7291 if (softc->ha_link == CTL_HA_LINK_OFFLINE) 7292 os = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE; 7293 else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) 7294 os = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; 7295 else if (softc->ha_mode == CTL_HA_MODE_ACT_STBY) 7296 os = TPG_ASYMMETRIC_ACCESS_STANDBY; 7297 else 7298 os = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 7299 if (lun->flags & CTL_LUN_PRIMARY_SC) { 7300 ts = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7301 } else { 7302 ts = os; 7303 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7304 } 7305 } else { 7306 /* No known primary shelf. */ 7307 if (softc->ha_link == CTL_HA_LINK_OFFLINE) { 7308 ts = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE; 7309 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7310 } else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) { 7311 ts = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; 7312 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7313 } else { 7314 ts = os = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; 7315 } 7316 } 7317 if (shared_group) { 7318 tpg_desc->pref_state = ts; 7319 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP | 7320 TPG_U_SUP | TPG_T_SUP; 7321 scsi_ulto2b(1, tpg_desc->target_port_group); 7322 tpg_desc->status = TPG_IMPLICIT; 7323 pc = 0; 7324 STAILQ_FOREACH(port, &softc->port_list, links) { 7325 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7326 continue; 7327 if (!softc->is_single && 7328 (port->status & CTL_PORT_STATUS_HA_SHARED) == 0) 7329 continue; 7330 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 7331 continue; 7332 scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc]. 7333 relative_target_port_identifier); 7334 pc++; 7335 } 7336 tpg_desc->target_port_count = pc; 7337 tpg_desc = (struct scsi_target_port_group_descriptor *) 7338 &tpg_desc->descriptors[pc]; 7339 } 7340 for (g = 0; g < num_ha_groups; g++) { 7341 tpg_desc->pref_state = (g == pg) ? ts : os; 7342 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP | 7343 TPG_U_SUP | TPG_T_SUP; 7344 scsi_ulto2b(2 + g, tpg_desc->target_port_group); 7345 tpg_desc->status = TPG_IMPLICIT; 7346 pc = 0; 7347 STAILQ_FOREACH(port, &softc->port_list, links) { 7348 if (port->targ_port < g * softc->port_cnt || 7349 port->targ_port >= (g + 1) * softc->port_cnt) 7350 continue; 7351 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7352 continue; 7353 if (port->status & CTL_PORT_STATUS_HA_SHARED) 7354 continue; 7355 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 7356 continue; 7357 scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc]. 7358 relative_target_port_identifier); 7359 pc++; 7360 } 7361 tpg_desc->target_port_count = pc; 7362 tpg_desc = (struct scsi_target_port_group_descriptor *) 7363 &tpg_desc->descriptors[pc]; 7364 } 7365 mtx_unlock(&softc->ctl_lock); 7366 7367 ctl_set_success(ctsio); 7368 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7369 ctsio->be_move_done = ctl_config_move_done; 7370 ctl_datamove((union ctl_io *)ctsio); 7371 return(retval); 7372 } 7373 7374 int 7375 ctl_report_supported_opcodes(struct ctl_scsiio *ctsio) 7376 { 7377 struct ctl_lun *lun = CTL_LUN(ctsio); 7378 struct scsi_report_supported_opcodes *cdb; 7379 const struct ctl_cmd_entry *entry, *sentry; 7380 struct scsi_report_supported_opcodes_all *all; 7381 struct scsi_report_supported_opcodes_descr *descr; 7382 struct scsi_report_supported_opcodes_one *one; 7383 int retval; 7384 int alloc_len, total_len; 7385 int opcode, service_action, i, j, num; 7386 7387 CTL_DEBUG_PRINT(("ctl_report_supported_opcodes\n")); 7388 7389 cdb = (struct scsi_report_supported_opcodes *)ctsio->cdb; 7390 retval = CTL_RETVAL_COMPLETE; 7391 7392 opcode = cdb->requested_opcode; 7393 service_action = scsi_2btoul(cdb->requested_service_action); 7394 switch (cdb->options & RSO_OPTIONS_MASK) { 7395 case RSO_OPTIONS_ALL: 7396 num = 0; 7397 for (i = 0; i < 256; i++) { 7398 entry = &ctl_cmd_table[i]; 7399 if (entry->flags & CTL_CMD_FLAG_SA5) { 7400 for (j = 0; j < 32; j++) { 7401 sentry = &((const struct ctl_cmd_entry *) 7402 entry->execute)[j]; 7403 if (ctl_cmd_applicable( 7404 lun->be_lun->lun_type, sentry)) 7405 num++; 7406 } 7407 } else { 7408 if (ctl_cmd_applicable(lun->be_lun->lun_type, 7409 entry)) 7410 num++; 7411 } 7412 } 7413 total_len = sizeof(struct scsi_report_supported_opcodes_all) + 7414 num * sizeof(struct scsi_report_supported_opcodes_descr); 7415 break; 7416 case RSO_OPTIONS_OC: 7417 if (ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) { 7418 ctl_set_invalid_field(/*ctsio*/ ctsio, 7419 /*sks_valid*/ 1, 7420 /*command*/ 1, 7421 /*field*/ 2, 7422 /*bit_valid*/ 1, 7423 /*bit*/ 2); 7424 ctl_done((union ctl_io *)ctsio); 7425 return (CTL_RETVAL_COMPLETE); 7426 } 7427 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7428 break; 7429 case RSO_OPTIONS_OC_SA: 7430 if ((ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) == 0 || 7431 service_action >= 32) { 7432 ctl_set_invalid_field(/*ctsio*/ ctsio, 7433 /*sks_valid*/ 1, 7434 /*command*/ 1, 7435 /*field*/ 2, 7436 /*bit_valid*/ 1, 7437 /*bit*/ 2); 7438 ctl_done((union ctl_io *)ctsio); 7439 return (CTL_RETVAL_COMPLETE); 7440 } 7441 /* FALLTHROUGH */ 7442 case RSO_OPTIONS_OC_ASA: 7443 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7444 break; 7445 default: 7446 ctl_set_invalid_field(/*ctsio*/ ctsio, 7447 /*sks_valid*/ 1, 7448 /*command*/ 1, 7449 /*field*/ 2, 7450 /*bit_valid*/ 1, 7451 /*bit*/ 2); 7452 ctl_done((union ctl_io *)ctsio); 7453 return (CTL_RETVAL_COMPLETE); 7454 } 7455 7456 alloc_len = scsi_4btoul(cdb->length); 7457 7458 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7459 ctsio->kern_sg_entries = 0; 7460 ctsio->kern_rel_offset = 0; 7461 ctsio->kern_data_len = min(total_len, alloc_len); 7462 ctsio->kern_total_len = ctsio->kern_data_len; 7463 7464 switch (cdb->options & RSO_OPTIONS_MASK) { 7465 case RSO_OPTIONS_ALL: 7466 all = (struct scsi_report_supported_opcodes_all *) 7467 ctsio->kern_data_ptr; 7468 num = 0; 7469 for (i = 0; i < 256; i++) { 7470 entry = &ctl_cmd_table[i]; 7471 if (entry->flags & CTL_CMD_FLAG_SA5) { 7472 for (j = 0; j < 32; j++) { 7473 sentry = &((const struct ctl_cmd_entry *) 7474 entry->execute)[j]; 7475 if (!ctl_cmd_applicable( 7476 lun->be_lun->lun_type, sentry)) 7477 continue; 7478 descr = &all->descr[num++]; 7479 descr->opcode = i; 7480 scsi_ulto2b(j, descr->service_action); 7481 descr->flags = RSO_SERVACTV; 7482 scsi_ulto2b(sentry->length, 7483 descr->cdb_length); 7484 } 7485 } else { 7486 if (!ctl_cmd_applicable(lun->be_lun->lun_type, 7487 entry)) 7488 continue; 7489 descr = &all->descr[num++]; 7490 descr->opcode = i; 7491 scsi_ulto2b(0, descr->service_action); 7492 descr->flags = 0; 7493 scsi_ulto2b(entry->length, descr->cdb_length); 7494 } 7495 } 7496 scsi_ulto4b( 7497 num * sizeof(struct scsi_report_supported_opcodes_descr), 7498 all->length); 7499 break; 7500 case RSO_OPTIONS_OC: 7501 one = (struct scsi_report_supported_opcodes_one *) 7502 ctsio->kern_data_ptr; 7503 entry = &ctl_cmd_table[opcode]; 7504 goto fill_one; 7505 case RSO_OPTIONS_OC_SA: 7506 one = (struct scsi_report_supported_opcodes_one *) 7507 ctsio->kern_data_ptr; 7508 entry = &ctl_cmd_table[opcode]; 7509 entry = &((const struct ctl_cmd_entry *) 7510 entry->execute)[service_action]; 7511 fill_one: 7512 if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 7513 one->support = 3; 7514 scsi_ulto2b(entry->length, one->cdb_length); 7515 one->cdb_usage[0] = opcode; 7516 memcpy(&one->cdb_usage[1], entry->usage, 7517 entry->length - 1); 7518 } else 7519 one->support = 1; 7520 break; 7521 case RSO_OPTIONS_OC_ASA: 7522 one = (struct scsi_report_supported_opcodes_one *) 7523 ctsio->kern_data_ptr; 7524 entry = &ctl_cmd_table[opcode]; 7525 if (entry->flags & CTL_CMD_FLAG_SA5) { 7526 entry = &((const struct ctl_cmd_entry *) 7527 entry->execute)[service_action]; 7528 } else if (service_action != 0) { 7529 one->support = 1; 7530 break; 7531 } 7532 goto fill_one; 7533 } 7534 7535 ctl_set_success(ctsio); 7536 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7537 ctsio->be_move_done = ctl_config_move_done; 7538 ctl_datamove((union ctl_io *)ctsio); 7539 return(retval); 7540 } 7541 7542 int 7543 ctl_report_supported_tmf(struct ctl_scsiio *ctsio) 7544 { 7545 struct scsi_report_supported_tmf *cdb; 7546 struct scsi_report_supported_tmf_ext_data *data; 7547 int retval; 7548 int alloc_len, total_len; 7549 7550 CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n")); 7551 7552 cdb = (struct scsi_report_supported_tmf *)ctsio->cdb; 7553 7554 retval = CTL_RETVAL_COMPLETE; 7555 7556 if (cdb->options & RST_REPD) 7557 total_len = sizeof(struct scsi_report_supported_tmf_ext_data); 7558 else 7559 total_len = sizeof(struct scsi_report_supported_tmf_data); 7560 alloc_len = scsi_4btoul(cdb->length); 7561 7562 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7563 ctsio->kern_sg_entries = 0; 7564 ctsio->kern_rel_offset = 0; 7565 ctsio->kern_data_len = min(total_len, alloc_len); 7566 ctsio->kern_total_len = ctsio->kern_data_len; 7567 7568 data = (struct scsi_report_supported_tmf_ext_data *)ctsio->kern_data_ptr; 7569 data->byte1 |= RST_ATS | RST_ATSS | RST_CTSS | RST_LURS | RST_QTS | 7570 RST_TRS; 7571 data->byte2 |= RST_QAES | RST_QTSS | RST_ITNRS; 7572 data->length = total_len - 4; 7573 7574 ctl_set_success(ctsio); 7575 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7576 ctsio->be_move_done = ctl_config_move_done; 7577 ctl_datamove((union ctl_io *)ctsio); 7578 return (retval); 7579 } 7580 7581 int 7582 ctl_report_timestamp(struct ctl_scsiio *ctsio) 7583 { 7584 struct scsi_report_timestamp *cdb; 7585 struct scsi_report_timestamp_data *data; 7586 struct timeval tv; 7587 int64_t timestamp; 7588 int retval; 7589 int alloc_len, total_len; 7590 7591 CTL_DEBUG_PRINT(("ctl_report_timestamp\n")); 7592 7593 cdb = (struct scsi_report_timestamp *)ctsio->cdb; 7594 7595 retval = CTL_RETVAL_COMPLETE; 7596 7597 total_len = sizeof(struct scsi_report_timestamp_data); 7598 alloc_len = scsi_4btoul(cdb->length); 7599 7600 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7601 ctsio->kern_sg_entries = 0; 7602 ctsio->kern_rel_offset = 0; 7603 ctsio->kern_data_len = min(total_len, alloc_len); 7604 ctsio->kern_total_len = ctsio->kern_data_len; 7605 7606 data = (struct scsi_report_timestamp_data *)ctsio->kern_data_ptr; 7607 scsi_ulto2b(sizeof(*data) - 2, data->length); 7608 data->origin = RTS_ORIG_OUTSIDE; 7609 getmicrotime(&tv); 7610 timestamp = (int64_t)tv.tv_sec * 1000 + tv.tv_usec / 1000; 7611 scsi_ulto4b(timestamp >> 16, data->timestamp); 7612 scsi_ulto2b(timestamp & 0xffff, &data->timestamp[4]); 7613 7614 ctl_set_success(ctsio); 7615 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7616 ctsio->be_move_done = ctl_config_move_done; 7617 ctl_datamove((union ctl_io *)ctsio); 7618 return (retval); 7619 } 7620 7621 int 7622 ctl_persistent_reserve_in(struct ctl_scsiio *ctsio) 7623 { 7624 struct ctl_softc *softc = CTL_SOFTC(ctsio); 7625 struct ctl_lun *lun = CTL_LUN(ctsio); 7626 struct scsi_per_res_in *cdb; 7627 int alloc_len, total_len = 0; 7628 /* struct scsi_per_res_in_rsrv in_data; */ 7629 uint64_t key; 7630 7631 CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n")); 7632 7633 cdb = (struct scsi_per_res_in *)ctsio->cdb; 7634 7635 alloc_len = scsi_2btoul(cdb->length); 7636 7637 retry: 7638 mtx_lock(&lun->lun_lock); 7639 switch (cdb->action) { 7640 case SPRI_RK: /* read keys */ 7641 total_len = sizeof(struct scsi_per_res_in_keys) + 7642 lun->pr_key_count * 7643 sizeof(struct scsi_per_res_key); 7644 break; 7645 case SPRI_RR: /* read reservation */ 7646 if (lun->flags & CTL_LUN_PR_RESERVED) 7647 total_len = sizeof(struct scsi_per_res_in_rsrv); 7648 else 7649 total_len = sizeof(struct scsi_per_res_in_header); 7650 break; 7651 case SPRI_RC: /* report capabilities */ 7652 total_len = sizeof(struct scsi_per_res_cap); 7653 break; 7654 case SPRI_RS: /* read full status */ 7655 total_len = sizeof(struct scsi_per_res_in_header) + 7656 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7657 lun->pr_key_count; 7658 break; 7659 default: 7660 panic("%s: Invalid PR type %#x", __func__, cdb->action); 7661 } 7662 mtx_unlock(&lun->lun_lock); 7663 7664 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7665 ctsio->kern_rel_offset = 0; 7666 ctsio->kern_sg_entries = 0; 7667 ctsio->kern_data_len = min(total_len, alloc_len); 7668 ctsio->kern_total_len = ctsio->kern_data_len; 7669 7670 mtx_lock(&lun->lun_lock); 7671 switch (cdb->action) { 7672 case SPRI_RK: { // read keys 7673 struct scsi_per_res_in_keys *res_keys; 7674 int i, key_count; 7675 7676 res_keys = (struct scsi_per_res_in_keys*)ctsio->kern_data_ptr; 7677 7678 /* 7679 * We had to drop the lock to allocate our buffer, which 7680 * leaves time for someone to come in with another 7681 * persistent reservation. (That is unlikely, though, 7682 * since this should be the only persistent reservation 7683 * command active right now.) 7684 */ 7685 if (total_len != (sizeof(struct scsi_per_res_in_keys) + 7686 (lun->pr_key_count * 7687 sizeof(struct scsi_per_res_key)))){ 7688 mtx_unlock(&lun->lun_lock); 7689 free(ctsio->kern_data_ptr, M_CTL); 7690 printf("%s: reservation length changed, retrying\n", 7691 __func__); 7692 goto retry; 7693 } 7694 7695 scsi_ulto4b(lun->pr_generation, res_keys->header.generation); 7696 7697 scsi_ulto4b(sizeof(struct scsi_per_res_key) * 7698 lun->pr_key_count, res_keys->header.length); 7699 7700 for (i = 0, key_count = 0; i < CTL_MAX_INITIATORS; i++) { 7701 if ((key = ctl_get_prkey(lun, i)) == 0) 7702 continue; 7703 7704 /* 7705 * We used lun->pr_key_count to calculate the 7706 * size to allocate. If it turns out the number of 7707 * initiators with the registered flag set is 7708 * larger than that (i.e. they haven't been kept in 7709 * sync), we've got a problem. 7710 */ 7711 if (key_count >= lun->pr_key_count) { 7712 key_count++; 7713 continue; 7714 } 7715 scsi_u64to8b(key, res_keys->keys[key_count].key); 7716 key_count++; 7717 } 7718 break; 7719 } 7720 case SPRI_RR: { // read reservation 7721 struct scsi_per_res_in_rsrv *res; 7722 int tmp_len, header_only; 7723 7724 res = (struct scsi_per_res_in_rsrv *)ctsio->kern_data_ptr; 7725 7726 scsi_ulto4b(lun->pr_generation, res->header.generation); 7727 7728 if (lun->flags & CTL_LUN_PR_RESERVED) 7729 { 7730 tmp_len = sizeof(struct scsi_per_res_in_rsrv); 7731 scsi_ulto4b(sizeof(struct scsi_per_res_in_rsrv_data), 7732 res->header.length); 7733 header_only = 0; 7734 } else { 7735 tmp_len = sizeof(struct scsi_per_res_in_header); 7736 scsi_ulto4b(0, res->header.length); 7737 header_only = 1; 7738 } 7739 7740 /* 7741 * We had to drop the lock to allocate our buffer, which 7742 * leaves time for someone to come in with another 7743 * persistent reservation. (That is unlikely, though, 7744 * since this should be the only persistent reservation 7745 * command active right now.) 7746 */ 7747 if (tmp_len != total_len) { 7748 mtx_unlock(&lun->lun_lock); 7749 free(ctsio->kern_data_ptr, M_CTL); 7750 printf("%s: reservation status changed, retrying\n", 7751 __func__); 7752 goto retry; 7753 } 7754 7755 /* 7756 * No reservation held, so we're done. 7757 */ 7758 if (header_only != 0) 7759 break; 7760 7761 /* 7762 * If the registration is an All Registrants type, the key 7763 * is 0, since it doesn't really matter. 7764 */ 7765 if (lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 7766 scsi_u64to8b(ctl_get_prkey(lun, lun->pr_res_idx), 7767 res->data.reservation); 7768 } 7769 res->data.scopetype = lun->pr_res_type; 7770 break; 7771 } 7772 case SPRI_RC: //report capabilities 7773 { 7774 struct scsi_per_res_cap *res_cap; 7775 uint16_t type_mask; 7776 7777 res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr; 7778 scsi_ulto2b(sizeof(*res_cap), res_cap->length); 7779 res_cap->flags1 = SPRI_CRH; 7780 res_cap->flags2 = SPRI_TMV | SPRI_ALLOW_5; 7781 type_mask = SPRI_TM_WR_EX_AR | 7782 SPRI_TM_EX_AC_RO | 7783 SPRI_TM_WR_EX_RO | 7784 SPRI_TM_EX_AC | 7785 SPRI_TM_WR_EX | 7786 SPRI_TM_EX_AC_AR; 7787 scsi_ulto2b(type_mask, res_cap->type_mask); 7788 break; 7789 } 7790 case SPRI_RS: { // read full status 7791 struct scsi_per_res_in_full *res_status; 7792 struct scsi_per_res_in_full_desc *res_desc; 7793 struct ctl_port *port; 7794 int i, len; 7795 7796 res_status = (struct scsi_per_res_in_full*)ctsio->kern_data_ptr; 7797 7798 /* 7799 * We had to drop the lock to allocate our buffer, which 7800 * leaves time for someone to come in with another 7801 * persistent reservation. (That is unlikely, though, 7802 * since this should be the only persistent reservation 7803 * command active right now.) 7804 */ 7805 if (total_len < (sizeof(struct scsi_per_res_in_header) + 7806 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7807 lun->pr_key_count)){ 7808 mtx_unlock(&lun->lun_lock); 7809 free(ctsio->kern_data_ptr, M_CTL); 7810 printf("%s: reservation length changed, retrying\n", 7811 __func__); 7812 goto retry; 7813 } 7814 7815 scsi_ulto4b(lun->pr_generation, res_status->header.generation); 7816 7817 res_desc = &res_status->desc[0]; 7818 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 7819 if ((key = ctl_get_prkey(lun, i)) == 0) 7820 continue; 7821 7822 scsi_u64to8b(key, res_desc->res_key.key); 7823 if ((lun->flags & CTL_LUN_PR_RESERVED) && 7824 (lun->pr_res_idx == i || 7825 lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS)) { 7826 res_desc->flags = SPRI_FULL_R_HOLDER; 7827 res_desc->scopetype = lun->pr_res_type; 7828 } 7829 scsi_ulto2b(i / CTL_MAX_INIT_PER_PORT, 7830 res_desc->rel_trgt_port_id); 7831 len = 0; 7832 port = softc->ctl_ports[i / CTL_MAX_INIT_PER_PORT]; 7833 if (port != NULL) 7834 len = ctl_create_iid(port, 7835 i % CTL_MAX_INIT_PER_PORT, 7836 res_desc->transport_id); 7837 scsi_ulto4b(len, res_desc->additional_length); 7838 res_desc = (struct scsi_per_res_in_full_desc *) 7839 &res_desc->transport_id[len]; 7840 } 7841 scsi_ulto4b((uint8_t *)res_desc - (uint8_t *)&res_status->desc[0], 7842 res_status->header.length); 7843 break; 7844 } 7845 default: 7846 panic("%s: Invalid PR type %#x", __func__, cdb->action); 7847 } 7848 mtx_unlock(&lun->lun_lock); 7849 7850 ctl_set_success(ctsio); 7851 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7852 ctsio->be_move_done = ctl_config_move_done; 7853 ctl_datamove((union ctl_io *)ctsio); 7854 return (CTL_RETVAL_COMPLETE); 7855 } 7856 7857 /* 7858 * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if 7859 * it should return. 7860 */ 7861 static int 7862 ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key, 7863 uint64_t sa_res_key, uint8_t type, uint32_t residx, 7864 struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb, 7865 struct scsi_per_res_out_parms* param) 7866 { 7867 union ctl_ha_msg persis_io; 7868 int i; 7869 7870 mtx_lock(&lun->lun_lock); 7871 if (sa_res_key == 0) { 7872 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 7873 /* validate scope and type */ 7874 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7875 SPR_LU_SCOPE) { 7876 mtx_unlock(&lun->lun_lock); 7877 ctl_set_invalid_field(/*ctsio*/ ctsio, 7878 /*sks_valid*/ 1, 7879 /*command*/ 1, 7880 /*field*/ 2, 7881 /*bit_valid*/ 1, 7882 /*bit*/ 4); 7883 ctl_done((union ctl_io *)ctsio); 7884 return (1); 7885 } 7886 7887 if (type>8 || type==2 || type==4 || type==0) { 7888 mtx_unlock(&lun->lun_lock); 7889 ctl_set_invalid_field(/*ctsio*/ ctsio, 7890 /*sks_valid*/ 1, 7891 /*command*/ 1, 7892 /*field*/ 2, 7893 /*bit_valid*/ 1, 7894 /*bit*/ 0); 7895 ctl_done((union ctl_io *)ctsio); 7896 return (1); 7897 } 7898 7899 /* 7900 * Unregister everybody else and build UA for 7901 * them 7902 */ 7903 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 7904 if (i == residx || ctl_get_prkey(lun, i) == 0) 7905 continue; 7906 7907 ctl_clr_prkey(lun, i); 7908 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7909 } 7910 lun->pr_key_count = 1; 7911 lun->pr_res_type = type; 7912 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 7913 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 7914 lun->pr_res_idx = residx; 7915 lun->pr_generation++; 7916 mtx_unlock(&lun->lun_lock); 7917 7918 /* send msg to other side */ 7919 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7920 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7921 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7922 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7923 persis_io.pr.pr_info.res_type = type; 7924 memcpy(persis_io.pr.pr_info.sa_res_key, 7925 param->serv_act_res_key, 7926 sizeof(param->serv_act_res_key)); 7927 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 7928 sizeof(persis_io.pr), M_WAITOK); 7929 } else { 7930 /* not all registrants */ 7931 mtx_unlock(&lun->lun_lock); 7932 free(ctsio->kern_data_ptr, M_CTL); 7933 ctl_set_invalid_field(ctsio, 7934 /*sks_valid*/ 1, 7935 /*command*/ 0, 7936 /*field*/ 8, 7937 /*bit_valid*/ 0, 7938 /*bit*/ 0); 7939 ctl_done((union ctl_io *)ctsio); 7940 return (1); 7941 } 7942 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 7943 || !(lun->flags & CTL_LUN_PR_RESERVED)) { 7944 int found = 0; 7945 7946 if (res_key == sa_res_key) { 7947 /* special case */ 7948 /* 7949 * The spec implies this is not good but doesn't 7950 * say what to do. There are two choices either 7951 * generate a res conflict or check condition 7952 * with illegal field in parameter data. Since 7953 * that is what is done when the sa_res_key is 7954 * zero I'll take that approach since this has 7955 * to do with the sa_res_key. 7956 */ 7957 mtx_unlock(&lun->lun_lock); 7958 free(ctsio->kern_data_ptr, M_CTL); 7959 ctl_set_invalid_field(ctsio, 7960 /*sks_valid*/ 1, 7961 /*command*/ 0, 7962 /*field*/ 8, 7963 /*bit_valid*/ 0, 7964 /*bit*/ 0); 7965 ctl_done((union ctl_io *)ctsio); 7966 return (1); 7967 } 7968 7969 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 7970 if (ctl_get_prkey(lun, i) != sa_res_key) 7971 continue; 7972 7973 found = 1; 7974 ctl_clr_prkey(lun, i); 7975 lun->pr_key_count--; 7976 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7977 } 7978 if (!found) { 7979 mtx_unlock(&lun->lun_lock); 7980 free(ctsio->kern_data_ptr, M_CTL); 7981 ctl_set_reservation_conflict(ctsio); 7982 ctl_done((union ctl_io *)ctsio); 7983 return (CTL_RETVAL_COMPLETE); 7984 } 7985 lun->pr_generation++; 7986 mtx_unlock(&lun->lun_lock); 7987 7988 /* send msg to other side */ 7989 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7990 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7991 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7992 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7993 persis_io.pr.pr_info.res_type = type; 7994 memcpy(persis_io.pr.pr_info.sa_res_key, 7995 param->serv_act_res_key, 7996 sizeof(param->serv_act_res_key)); 7997 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 7998 sizeof(persis_io.pr), M_WAITOK); 7999 } else { 8000 /* Reserved but not all registrants */ 8001 /* sa_res_key is res holder */ 8002 if (sa_res_key == ctl_get_prkey(lun, lun->pr_res_idx)) { 8003 /* validate scope and type */ 8004 if ((cdb->scope_type & SPR_SCOPE_MASK) != 8005 SPR_LU_SCOPE) { 8006 mtx_unlock(&lun->lun_lock); 8007 ctl_set_invalid_field(/*ctsio*/ ctsio, 8008 /*sks_valid*/ 1, 8009 /*command*/ 1, 8010 /*field*/ 2, 8011 /*bit_valid*/ 1, 8012 /*bit*/ 4); 8013 ctl_done((union ctl_io *)ctsio); 8014 return (1); 8015 } 8016 8017 if (type>8 || type==2 || type==4 || type==0) { 8018 mtx_unlock(&lun->lun_lock); 8019 ctl_set_invalid_field(/*ctsio*/ ctsio, 8020 /*sks_valid*/ 1, 8021 /*command*/ 1, 8022 /*field*/ 2, 8023 /*bit_valid*/ 1, 8024 /*bit*/ 0); 8025 ctl_done((union ctl_io *)ctsio); 8026 return (1); 8027 } 8028 8029 /* 8030 * Do the following: 8031 * if sa_res_key != res_key remove all 8032 * registrants w/sa_res_key and generate UA 8033 * for these registrants(Registrations 8034 * Preempted) if it wasn't an exclusive 8035 * reservation generate UA(Reservations 8036 * Preempted) for all other registered nexuses 8037 * if the type has changed. Establish the new 8038 * reservation and holder. If res_key and 8039 * sa_res_key are the same do the above 8040 * except don't unregister the res holder. 8041 */ 8042 8043 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 8044 if (i == residx || ctl_get_prkey(lun, i) == 0) 8045 continue; 8046 8047 if (sa_res_key == ctl_get_prkey(lun, i)) { 8048 ctl_clr_prkey(lun, i); 8049 lun->pr_key_count--; 8050 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8051 } else if (type != lun->pr_res_type && 8052 (lun->pr_res_type == SPR_TYPE_WR_EX_RO || 8053 lun->pr_res_type == SPR_TYPE_EX_AC_RO)) { 8054 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8055 } 8056 } 8057 lun->pr_res_type = type; 8058 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 8059 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 8060 lun->pr_res_idx = residx; 8061 else 8062 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8063 lun->pr_generation++; 8064 mtx_unlock(&lun->lun_lock); 8065 8066 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8067 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8068 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8069 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8070 persis_io.pr.pr_info.res_type = type; 8071 memcpy(persis_io.pr.pr_info.sa_res_key, 8072 param->serv_act_res_key, 8073 sizeof(param->serv_act_res_key)); 8074 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8075 sizeof(persis_io.pr), M_WAITOK); 8076 } else { 8077 /* 8078 * sa_res_key is not the res holder just 8079 * remove registrants 8080 */ 8081 int found=0; 8082 8083 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8084 if (sa_res_key != ctl_get_prkey(lun, i)) 8085 continue; 8086 8087 found = 1; 8088 ctl_clr_prkey(lun, i); 8089 lun->pr_key_count--; 8090 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8091 } 8092 8093 if (!found) { 8094 mtx_unlock(&lun->lun_lock); 8095 free(ctsio->kern_data_ptr, M_CTL); 8096 ctl_set_reservation_conflict(ctsio); 8097 ctl_done((union ctl_io *)ctsio); 8098 return (1); 8099 } 8100 lun->pr_generation++; 8101 mtx_unlock(&lun->lun_lock); 8102 8103 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8104 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8105 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8106 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8107 persis_io.pr.pr_info.res_type = type; 8108 memcpy(persis_io.pr.pr_info.sa_res_key, 8109 param->serv_act_res_key, 8110 sizeof(param->serv_act_res_key)); 8111 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8112 sizeof(persis_io.pr), M_WAITOK); 8113 } 8114 } 8115 return (0); 8116 } 8117 8118 static void 8119 ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg) 8120 { 8121 uint64_t sa_res_key; 8122 int i; 8123 8124 sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key); 8125 8126 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 8127 || lun->pr_res_idx == CTL_PR_NO_RESERVATION 8128 || sa_res_key != ctl_get_prkey(lun, lun->pr_res_idx)) { 8129 if (sa_res_key == 0) { 8130 /* 8131 * Unregister everybody else and build UA for 8132 * them 8133 */ 8134 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 8135 if (i == msg->pr.pr_info.residx || 8136 ctl_get_prkey(lun, i) == 0) 8137 continue; 8138 8139 ctl_clr_prkey(lun, i); 8140 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8141 } 8142 8143 lun->pr_key_count = 1; 8144 lun->pr_res_type = msg->pr.pr_info.res_type; 8145 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 8146 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 8147 lun->pr_res_idx = msg->pr.pr_info.residx; 8148 } else { 8149 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8150 if (sa_res_key == ctl_get_prkey(lun, i)) 8151 continue; 8152 8153 ctl_clr_prkey(lun, i); 8154 lun->pr_key_count--; 8155 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8156 } 8157 } 8158 } else { 8159 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8160 if (i == msg->pr.pr_info.residx || 8161 ctl_get_prkey(lun, i) == 0) 8162 continue; 8163 8164 if (sa_res_key == ctl_get_prkey(lun, i)) { 8165 ctl_clr_prkey(lun, i); 8166 lun->pr_key_count--; 8167 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8168 } else if (msg->pr.pr_info.res_type != lun->pr_res_type 8169 && (lun->pr_res_type == SPR_TYPE_WR_EX_RO || 8170 lun->pr_res_type == SPR_TYPE_EX_AC_RO)) { 8171 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8172 } 8173 } 8174 lun->pr_res_type = msg->pr.pr_info.res_type; 8175 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 8176 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 8177 lun->pr_res_idx = msg->pr.pr_info.residx; 8178 else 8179 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8180 } 8181 lun->pr_generation++; 8182 8183 } 8184 8185 int 8186 ctl_persistent_reserve_out(struct ctl_scsiio *ctsio) 8187 { 8188 struct ctl_softc *softc = CTL_SOFTC(ctsio); 8189 struct ctl_lun *lun = CTL_LUN(ctsio); 8190 int retval; 8191 u_int32_t param_len; 8192 struct scsi_per_res_out *cdb; 8193 struct scsi_per_res_out_parms* param; 8194 uint32_t residx; 8195 uint64_t res_key, sa_res_key, key; 8196 uint8_t type; 8197 union ctl_ha_msg persis_io; 8198 int i; 8199 8200 CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n")); 8201 8202 cdb = (struct scsi_per_res_out *)ctsio->cdb; 8203 retval = CTL_RETVAL_COMPLETE; 8204 8205 /* 8206 * We only support whole-LUN scope. The scope & type are ignored for 8207 * register, register and ignore existing key and clear. 8208 * We sometimes ignore scope and type on preempts too!! 8209 * Verify reservation type here as well. 8210 */ 8211 type = cdb->scope_type & SPR_TYPE_MASK; 8212 if ((cdb->action == SPRO_RESERVE) 8213 || (cdb->action == SPRO_RELEASE)) { 8214 if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { 8215 ctl_set_invalid_field(/*ctsio*/ ctsio, 8216 /*sks_valid*/ 1, 8217 /*command*/ 1, 8218 /*field*/ 2, 8219 /*bit_valid*/ 1, 8220 /*bit*/ 4); 8221 ctl_done((union ctl_io *)ctsio); 8222 return (CTL_RETVAL_COMPLETE); 8223 } 8224 8225 if (type>8 || type==2 || type==4 || type==0) { 8226 ctl_set_invalid_field(/*ctsio*/ ctsio, 8227 /*sks_valid*/ 1, 8228 /*command*/ 1, 8229 /*field*/ 2, 8230 /*bit_valid*/ 1, 8231 /*bit*/ 0); 8232 ctl_done((union ctl_io *)ctsio); 8233 return (CTL_RETVAL_COMPLETE); 8234 } 8235 } 8236 8237 param_len = scsi_4btoul(cdb->length); 8238 8239 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 8240 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 8241 ctsio->kern_data_len = param_len; 8242 ctsio->kern_total_len = param_len; 8243 ctsio->kern_rel_offset = 0; 8244 ctsio->kern_sg_entries = 0; 8245 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 8246 ctsio->be_move_done = ctl_config_move_done; 8247 ctl_datamove((union ctl_io *)ctsio); 8248 8249 return (CTL_RETVAL_COMPLETE); 8250 } 8251 8252 param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr; 8253 8254 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 8255 res_key = scsi_8btou64(param->res_key.key); 8256 sa_res_key = scsi_8btou64(param->serv_act_res_key); 8257 8258 /* 8259 * Validate the reservation key here except for SPRO_REG_IGNO 8260 * This must be done for all other service actions 8261 */ 8262 if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) { 8263 mtx_lock(&lun->lun_lock); 8264 if ((key = ctl_get_prkey(lun, residx)) != 0) { 8265 if (res_key != key) { 8266 /* 8267 * The current key passed in doesn't match 8268 * the one the initiator previously 8269 * registered. 8270 */ 8271 mtx_unlock(&lun->lun_lock); 8272 free(ctsio->kern_data_ptr, M_CTL); 8273 ctl_set_reservation_conflict(ctsio); 8274 ctl_done((union ctl_io *)ctsio); 8275 return (CTL_RETVAL_COMPLETE); 8276 } 8277 } else if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REGISTER) { 8278 /* 8279 * We are not registered 8280 */ 8281 mtx_unlock(&lun->lun_lock); 8282 free(ctsio->kern_data_ptr, M_CTL); 8283 ctl_set_reservation_conflict(ctsio); 8284 ctl_done((union ctl_io *)ctsio); 8285 return (CTL_RETVAL_COMPLETE); 8286 } else if (res_key != 0) { 8287 /* 8288 * We are not registered and trying to register but 8289 * the register key isn't zero. 8290 */ 8291 mtx_unlock(&lun->lun_lock); 8292 free(ctsio->kern_data_ptr, M_CTL); 8293 ctl_set_reservation_conflict(ctsio); 8294 ctl_done((union ctl_io *)ctsio); 8295 return (CTL_RETVAL_COMPLETE); 8296 } 8297 mtx_unlock(&lun->lun_lock); 8298 } 8299 8300 switch (cdb->action & SPRO_ACTION_MASK) { 8301 case SPRO_REGISTER: 8302 case SPRO_REG_IGNO: { 8303 /* 8304 * We don't support any of these options, as we report in 8305 * the read capabilities request (see 8306 * ctl_persistent_reserve_in(), above). 8307 */ 8308 if ((param->flags & SPR_SPEC_I_PT) 8309 || (param->flags & SPR_ALL_TG_PT) 8310 || (param->flags & SPR_APTPL)) { 8311 int bit_ptr; 8312 8313 if (param->flags & SPR_APTPL) 8314 bit_ptr = 0; 8315 else if (param->flags & SPR_ALL_TG_PT) 8316 bit_ptr = 2; 8317 else /* SPR_SPEC_I_PT */ 8318 bit_ptr = 3; 8319 8320 free(ctsio->kern_data_ptr, M_CTL); 8321 ctl_set_invalid_field(ctsio, 8322 /*sks_valid*/ 1, 8323 /*command*/ 0, 8324 /*field*/ 20, 8325 /*bit_valid*/ 1, 8326 /*bit*/ bit_ptr); 8327 ctl_done((union ctl_io *)ctsio); 8328 return (CTL_RETVAL_COMPLETE); 8329 } 8330 8331 mtx_lock(&lun->lun_lock); 8332 8333 /* 8334 * The initiator wants to clear the 8335 * key/unregister. 8336 */ 8337 if (sa_res_key == 0) { 8338 if ((res_key == 0 8339 && (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER) 8340 || ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO 8341 && ctl_get_prkey(lun, residx) == 0)) { 8342 mtx_unlock(&lun->lun_lock); 8343 goto done; 8344 } 8345 8346 ctl_clr_prkey(lun, residx); 8347 lun->pr_key_count--; 8348 8349 if (residx == lun->pr_res_idx) { 8350 lun->flags &= ~CTL_LUN_PR_RESERVED; 8351 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8352 8353 if ((lun->pr_res_type == SPR_TYPE_WR_EX_RO || 8354 lun->pr_res_type == SPR_TYPE_EX_AC_RO) && 8355 lun->pr_key_count) { 8356 /* 8357 * If the reservation is a registrants 8358 * only type we need to generate a UA 8359 * for other registered inits. The 8360 * sense code should be RESERVATIONS 8361 * RELEASED 8362 */ 8363 8364 for (i = softc->init_min; i < softc->init_max; i++){ 8365 if (ctl_get_prkey(lun, i) == 0) 8366 continue; 8367 ctl_est_ua(lun, i, 8368 CTL_UA_RES_RELEASE); 8369 } 8370 } 8371 lun->pr_res_type = 0; 8372 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8373 if (lun->pr_key_count==0) { 8374 lun->flags &= ~CTL_LUN_PR_RESERVED; 8375 lun->pr_res_type = 0; 8376 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8377 } 8378 } 8379 lun->pr_generation++; 8380 mtx_unlock(&lun->lun_lock); 8381 8382 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8383 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8384 persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY; 8385 persis_io.pr.pr_info.residx = residx; 8386 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8387 sizeof(persis_io.pr), M_WAITOK); 8388 } else /* sa_res_key != 0 */ { 8389 /* 8390 * If we aren't registered currently then increment 8391 * the key count and set the registered flag. 8392 */ 8393 ctl_alloc_prkey(lun, residx); 8394 if (ctl_get_prkey(lun, residx) == 0) 8395 lun->pr_key_count++; 8396 ctl_set_prkey(lun, residx, sa_res_key); 8397 lun->pr_generation++; 8398 mtx_unlock(&lun->lun_lock); 8399 8400 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8401 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8402 persis_io.pr.pr_info.action = CTL_PR_REG_KEY; 8403 persis_io.pr.pr_info.residx = residx; 8404 memcpy(persis_io.pr.pr_info.sa_res_key, 8405 param->serv_act_res_key, 8406 sizeof(param->serv_act_res_key)); 8407 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8408 sizeof(persis_io.pr), M_WAITOK); 8409 } 8410 8411 break; 8412 } 8413 case SPRO_RESERVE: 8414 mtx_lock(&lun->lun_lock); 8415 if (lun->flags & CTL_LUN_PR_RESERVED) { 8416 /* 8417 * if this isn't the reservation holder and it's 8418 * not a "all registrants" type or if the type is 8419 * different then we have a conflict 8420 */ 8421 if ((lun->pr_res_idx != residx 8422 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) 8423 || lun->pr_res_type != type) { 8424 mtx_unlock(&lun->lun_lock); 8425 free(ctsio->kern_data_ptr, M_CTL); 8426 ctl_set_reservation_conflict(ctsio); 8427 ctl_done((union ctl_io *)ctsio); 8428 return (CTL_RETVAL_COMPLETE); 8429 } 8430 mtx_unlock(&lun->lun_lock); 8431 } else /* create a reservation */ { 8432 /* 8433 * If it's not an "all registrants" type record 8434 * reservation holder 8435 */ 8436 if (type != SPR_TYPE_WR_EX_AR 8437 && type != SPR_TYPE_EX_AC_AR) 8438 lun->pr_res_idx = residx; /* Res holder */ 8439 else 8440 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8441 8442 lun->flags |= CTL_LUN_PR_RESERVED; 8443 lun->pr_res_type = type; 8444 8445 mtx_unlock(&lun->lun_lock); 8446 8447 /* send msg to other side */ 8448 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8449 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8450 persis_io.pr.pr_info.action = CTL_PR_RESERVE; 8451 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8452 persis_io.pr.pr_info.res_type = type; 8453 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8454 sizeof(persis_io.pr), M_WAITOK); 8455 } 8456 break; 8457 8458 case SPRO_RELEASE: 8459 mtx_lock(&lun->lun_lock); 8460 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) { 8461 /* No reservation exists return good status */ 8462 mtx_unlock(&lun->lun_lock); 8463 goto done; 8464 } 8465 /* 8466 * Is this nexus a reservation holder? 8467 */ 8468 if (lun->pr_res_idx != residx 8469 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 8470 /* 8471 * not a res holder return good status but 8472 * do nothing 8473 */ 8474 mtx_unlock(&lun->lun_lock); 8475 goto done; 8476 } 8477 8478 if (lun->pr_res_type != type) { 8479 mtx_unlock(&lun->lun_lock); 8480 free(ctsio->kern_data_ptr, M_CTL); 8481 ctl_set_illegal_pr_release(ctsio); 8482 ctl_done((union ctl_io *)ctsio); 8483 return (CTL_RETVAL_COMPLETE); 8484 } 8485 8486 /* okay to release */ 8487 lun->flags &= ~CTL_LUN_PR_RESERVED; 8488 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8489 lun->pr_res_type = 0; 8490 8491 /* 8492 * If this isn't an exclusive access reservation and NUAR 8493 * is not set, generate UA for all other registrants. 8494 */ 8495 if (type != SPR_TYPE_EX_AC && type != SPR_TYPE_WR_EX && 8496 (lun->MODE_CTRL.queue_flags & SCP_NUAR) == 0) { 8497 for (i = softc->init_min; i < softc->init_max; i++) { 8498 if (i == residx || ctl_get_prkey(lun, i) == 0) 8499 continue; 8500 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8501 } 8502 } 8503 mtx_unlock(&lun->lun_lock); 8504 8505 /* Send msg to other side */ 8506 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8507 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8508 persis_io.pr.pr_info.action = CTL_PR_RELEASE; 8509 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8510 sizeof(persis_io.pr), M_WAITOK); 8511 break; 8512 8513 case SPRO_CLEAR: 8514 /* send msg to other side */ 8515 8516 mtx_lock(&lun->lun_lock); 8517 lun->flags &= ~CTL_LUN_PR_RESERVED; 8518 lun->pr_res_type = 0; 8519 lun->pr_key_count = 0; 8520 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8521 8522 ctl_clr_prkey(lun, residx); 8523 for (i = 0; i < CTL_MAX_INITIATORS; i++) 8524 if (ctl_get_prkey(lun, i) != 0) { 8525 ctl_clr_prkey(lun, i); 8526 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8527 } 8528 lun->pr_generation++; 8529 mtx_unlock(&lun->lun_lock); 8530 8531 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8532 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8533 persis_io.pr.pr_info.action = CTL_PR_CLEAR; 8534 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8535 sizeof(persis_io.pr), M_WAITOK); 8536 break; 8537 8538 case SPRO_PREEMPT: 8539 case SPRO_PRE_ABO: { 8540 int nretval; 8541 8542 nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type, 8543 residx, ctsio, cdb, param); 8544 if (nretval != 0) 8545 return (CTL_RETVAL_COMPLETE); 8546 break; 8547 } 8548 default: 8549 panic("%s: Invalid PR type %#x", __func__, cdb->action); 8550 } 8551 8552 done: 8553 free(ctsio->kern_data_ptr, M_CTL); 8554 ctl_set_success(ctsio); 8555 ctl_done((union ctl_io *)ctsio); 8556 8557 return (retval); 8558 } 8559 8560 /* 8561 * This routine is for handling a message from the other SC pertaining to 8562 * persistent reserve out. All the error checking will have been done 8563 * so only perorming the action need be done here to keep the two 8564 * in sync. 8565 */ 8566 static void 8567 ctl_hndl_per_res_out_on_other_sc(union ctl_io *io) 8568 { 8569 struct ctl_softc *softc = CTL_SOFTC(io); 8570 union ctl_ha_msg *msg = (union ctl_ha_msg *)&io->presio.pr_msg; 8571 struct ctl_lun *lun; 8572 int i; 8573 uint32_t residx, targ_lun; 8574 8575 targ_lun = msg->hdr.nexus.targ_mapped_lun; 8576 mtx_lock(&softc->ctl_lock); 8577 if (targ_lun >= ctl_max_luns || 8578 (lun = softc->ctl_luns[targ_lun]) == NULL) { 8579 mtx_unlock(&softc->ctl_lock); 8580 return; 8581 } 8582 mtx_lock(&lun->lun_lock); 8583 mtx_unlock(&softc->ctl_lock); 8584 if (lun->flags & CTL_LUN_DISABLED) { 8585 mtx_unlock(&lun->lun_lock); 8586 return; 8587 } 8588 residx = ctl_get_initindex(&msg->hdr.nexus); 8589 switch(msg->pr.pr_info.action) { 8590 case CTL_PR_REG_KEY: 8591 ctl_alloc_prkey(lun, msg->pr.pr_info.residx); 8592 if (ctl_get_prkey(lun, msg->pr.pr_info.residx) == 0) 8593 lun->pr_key_count++; 8594 ctl_set_prkey(lun, msg->pr.pr_info.residx, 8595 scsi_8btou64(msg->pr.pr_info.sa_res_key)); 8596 lun->pr_generation++; 8597 break; 8598 8599 case CTL_PR_UNREG_KEY: 8600 ctl_clr_prkey(lun, msg->pr.pr_info.residx); 8601 lun->pr_key_count--; 8602 8603 /* XXX Need to see if the reservation has been released */ 8604 /* if so do we need to generate UA? */ 8605 if (msg->pr.pr_info.residx == lun->pr_res_idx) { 8606 lun->flags &= ~CTL_LUN_PR_RESERVED; 8607 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8608 8609 if ((lun->pr_res_type == SPR_TYPE_WR_EX_RO || 8610 lun->pr_res_type == SPR_TYPE_EX_AC_RO) && 8611 lun->pr_key_count) { 8612 /* 8613 * If the reservation is a registrants 8614 * only type we need to generate a UA 8615 * for other registered inits. The 8616 * sense code should be RESERVATIONS 8617 * RELEASED 8618 */ 8619 8620 for (i = softc->init_min; i < softc->init_max; i++) { 8621 if (ctl_get_prkey(lun, i) == 0) 8622 continue; 8623 8624 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8625 } 8626 } 8627 lun->pr_res_type = 0; 8628 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8629 if (lun->pr_key_count==0) { 8630 lun->flags &= ~CTL_LUN_PR_RESERVED; 8631 lun->pr_res_type = 0; 8632 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8633 } 8634 } 8635 lun->pr_generation++; 8636 break; 8637 8638 case CTL_PR_RESERVE: 8639 lun->flags |= CTL_LUN_PR_RESERVED; 8640 lun->pr_res_type = msg->pr.pr_info.res_type; 8641 lun->pr_res_idx = msg->pr.pr_info.residx; 8642 8643 break; 8644 8645 case CTL_PR_RELEASE: 8646 /* 8647 * If this isn't an exclusive access reservation and NUAR 8648 * is not set, generate UA for all other registrants. 8649 */ 8650 if (lun->pr_res_type != SPR_TYPE_EX_AC && 8651 lun->pr_res_type != SPR_TYPE_WR_EX && 8652 (lun->MODE_CTRL.queue_flags & SCP_NUAR) == 0) { 8653 for (i = softc->init_min; i < softc->init_max; i++) { 8654 if (i == residx || ctl_get_prkey(lun, i) == 0) 8655 continue; 8656 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8657 } 8658 } 8659 8660 lun->flags &= ~CTL_LUN_PR_RESERVED; 8661 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8662 lun->pr_res_type = 0; 8663 break; 8664 8665 case CTL_PR_PREEMPT: 8666 ctl_pro_preempt_other(lun, msg); 8667 break; 8668 case CTL_PR_CLEAR: 8669 lun->flags &= ~CTL_LUN_PR_RESERVED; 8670 lun->pr_res_type = 0; 8671 lun->pr_key_count = 0; 8672 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8673 8674 for (i=0; i < CTL_MAX_INITIATORS; i++) { 8675 if (ctl_get_prkey(lun, i) == 0) 8676 continue; 8677 ctl_clr_prkey(lun, i); 8678 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8679 } 8680 lun->pr_generation++; 8681 break; 8682 } 8683 8684 mtx_unlock(&lun->lun_lock); 8685 } 8686 8687 int 8688 ctl_read_write(struct ctl_scsiio *ctsio) 8689 { 8690 struct ctl_lun *lun = CTL_LUN(ctsio); 8691 struct ctl_lba_len_flags *lbalen; 8692 uint64_t lba; 8693 uint32_t num_blocks; 8694 int flags, retval; 8695 int isread; 8696 8697 CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0])); 8698 8699 flags = 0; 8700 isread = ctsio->cdb[0] == READ_6 || ctsio->cdb[0] == READ_10 8701 || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16; 8702 switch (ctsio->cdb[0]) { 8703 case READ_6: 8704 case WRITE_6: { 8705 struct scsi_rw_6 *cdb; 8706 8707 cdb = (struct scsi_rw_6 *)ctsio->cdb; 8708 8709 lba = scsi_3btoul(cdb->addr); 8710 /* only 5 bits are valid in the most significant address byte */ 8711 lba &= 0x1fffff; 8712 num_blocks = cdb->length; 8713 /* 8714 * This is correct according to SBC-2. 8715 */ 8716 if (num_blocks == 0) 8717 num_blocks = 256; 8718 break; 8719 } 8720 case READ_10: 8721 case WRITE_10: { 8722 struct scsi_rw_10 *cdb; 8723 8724 cdb = (struct scsi_rw_10 *)ctsio->cdb; 8725 if (cdb->byte2 & SRW10_FUA) 8726 flags |= CTL_LLF_FUA; 8727 if (cdb->byte2 & SRW10_DPO) 8728 flags |= CTL_LLF_DPO; 8729 lba = scsi_4btoul(cdb->addr); 8730 num_blocks = scsi_2btoul(cdb->length); 8731 break; 8732 } 8733 case WRITE_VERIFY_10: { 8734 struct scsi_write_verify_10 *cdb; 8735 8736 cdb = (struct scsi_write_verify_10 *)ctsio->cdb; 8737 flags |= CTL_LLF_FUA; 8738 if (cdb->byte2 & SWV_DPO) 8739 flags |= CTL_LLF_DPO; 8740 lba = scsi_4btoul(cdb->addr); 8741 num_blocks = scsi_2btoul(cdb->length); 8742 break; 8743 } 8744 case READ_12: 8745 case WRITE_12: { 8746 struct scsi_rw_12 *cdb; 8747 8748 cdb = (struct scsi_rw_12 *)ctsio->cdb; 8749 if (cdb->byte2 & SRW12_FUA) 8750 flags |= CTL_LLF_FUA; 8751 if (cdb->byte2 & SRW12_DPO) 8752 flags |= CTL_LLF_DPO; 8753 lba = scsi_4btoul(cdb->addr); 8754 num_blocks = scsi_4btoul(cdb->length); 8755 break; 8756 } 8757 case WRITE_VERIFY_12: { 8758 struct scsi_write_verify_12 *cdb; 8759 8760 cdb = (struct scsi_write_verify_12 *)ctsio->cdb; 8761 flags |= CTL_LLF_FUA; 8762 if (cdb->byte2 & SWV_DPO) 8763 flags |= CTL_LLF_DPO; 8764 lba = scsi_4btoul(cdb->addr); 8765 num_blocks = scsi_4btoul(cdb->length); 8766 break; 8767 } 8768 case READ_16: 8769 case WRITE_16: { 8770 struct scsi_rw_16 *cdb; 8771 8772 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8773 if (cdb->byte2 & SRW12_FUA) 8774 flags |= CTL_LLF_FUA; 8775 if (cdb->byte2 & SRW12_DPO) 8776 flags |= CTL_LLF_DPO; 8777 lba = scsi_8btou64(cdb->addr); 8778 num_blocks = scsi_4btoul(cdb->length); 8779 break; 8780 } 8781 case WRITE_ATOMIC_16: { 8782 struct scsi_write_atomic_16 *cdb; 8783 8784 if (lun->be_lun->atomicblock == 0) { 8785 ctl_set_invalid_opcode(ctsio); 8786 ctl_done((union ctl_io *)ctsio); 8787 return (CTL_RETVAL_COMPLETE); 8788 } 8789 8790 cdb = (struct scsi_write_atomic_16 *)ctsio->cdb; 8791 if (cdb->byte2 & SRW12_FUA) 8792 flags |= CTL_LLF_FUA; 8793 if (cdb->byte2 & SRW12_DPO) 8794 flags |= CTL_LLF_DPO; 8795 lba = scsi_8btou64(cdb->addr); 8796 num_blocks = scsi_2btoul(cdb->length); 8797 if (num_blocks > lun->be_lun->atomicblock) { 8798 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 8799 /*command*/ 1, /*field*/ 12, /*bit_valid*/ 0, 8800 /*bit*/ 0); 8801 ctl_done((union ctl_io *)ctsio); 8802 return (CTL_RETVAL_COMPLETE); 8803 } 8804 break; 8805 } 8806 case WRITE_VERIFY_16: { 8807 struct scsi_write_verify_16 *cdb; 8808 8809 cdb = (struct scsi_write_verify_16 *)ctsio->cdb; 8810 flags |= CTL_LLF_FUA; 8811 if (cdb->byte2 & SWV_DPO) 8812 flags |= CTL_LLF_DPO; 8813 lba = scsi_8btou64(cdb->addr); 8814 num_blocks = scsi_4btoul(cdb->length); 8815 break; 8816 } 8817 default: 8818 /* 8819 * We got a command we don't support. This shouldn't 8820 * happen, commands should be filtered out above us. 8821 */ 8822 ctl_set_invalid_opcode(ctsio); 8823 ctl_done((union ctl_io *)ctsio); 8824 8825 return (CTL_RETVAL_COMPLETE); 8826 break; /* NOTREACHED */ 8827 } 8828 8829 /* 8830 * The first check is to make sure we're in bounds, the second 8831 * check is to catch wrap-around problems. If the lba + num blocks 8832 * is less than the lba, then we've wrapped around and the block 8833 * range is invalid anyway. 8834 */ 8835 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8836 || ((lba + num_blocks) < lba)) { 8837 ctl_set_lba_out_of_range(ctsio, 8838 MAX(lba, lun->be_lun->maxlba + 1)); 8839 ctl_done((union ctl_io *)ctsio); 8840 return (CTL_RETVAL_COMPLETE); 8841 } 8842 8843 /* 8844 * According to SBC-3, a transfer length of 0 is not an error. 8845 * Note that this cannot happen with WRITE(6) or READ(6), since 0 8846 * translates to 256 blocks for those commands. 8847 */ 8848 if (num_blocks == 0) { 8849 ctl_set_success(ctsio); 8850 ctl_done((union ctl_io *)ctsio); 8851 return (CTL_RETVAL_COMPLETE); 8852 } 8853 8854 /* Set FUA and/or DPO if caches are disabled. */ 8855 if (isread) { 8856 if ((lun->MODE_CACHING.flags1 & SCP_RCD) != 0) 8857 flags |= CTL_LLF_FUA | CTL_LLF_DPO; 8858 } else { 8859 if ((lun->MODE_CACHING.flags1 & SCP_WCE) == 0) 8860 flags |= CTL_LLF_FUA; 8861 } 8862 8863 lbalen = (struct ctl_lba_len_flags *) 8864 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8865 lbalen->lba = lba; 8866 lbalen->len = num_blocks; 8867 lbalen->flags = (isread ? CTL_LLF_READ : CTL_LLF_WRITE) | flags; 8868 8869 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 8870 ctsio->kern_rel_offset = 0; 8871 8872 CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n")); 8873 8874 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8875 return (retval); 8876 } 8877 8878 static int 8879 ctl_cnw_cont(union ctl_io *io) 8880 { 8881 struct ctl_lun *lun = CTL_LUN(io); 8882 struct ctl_scsiio *ctsio; 8883 struct ctl_lba_len_flags *lbalen; 8884 int retval; 8885 8886 ctsio = &io->scsiio; 8887 ctsio->io_hdr.status = CTL_STATUS_NONE; 8888 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT; 8889 lbalen = (struct ctl_lba_len_flags *) 8890 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8891 lbalen->flags &= ~CTL_LLF_COMPARE; 8892 lbalen->flags |= CTL_LLF_WRITE; 8893 8894 CTL_DEBUG_PRINT(("ctl_cnw_cont: calling data_submit()\n")); 8895 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8896 return (retval); 8897 } 8898 8899 int 8900 ctl_cnw(struct ctl_scsiio *ctsio) 8901 { 8902 struct ctl_lun *lun = CTL_LUN(ctsio); 8903 struct ctl_lba_len_flags *lbalen; 8904 uint64_t lba; 8905 uint32_t num_blocks; 8906 int flags, retval; 8907 8908 CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0])); 8909 8910 flags = 0; 8911 switch (ctsio->cdb[0]) { 8912 case COMPARE_AND_WRITE: { 8913 struct scsi_compare_and_write *cdb; 8914 8915 cdb = (struct scsi_compare_and_write *)ctsio->cdb; 8916 if (cdb->byte2 & SRW10_FUA) 8917 flags |= CTL_LLF_FUA; 8918 if (cdb->byte2 & SRW10_DPO) 8919 flags |= CTL_LLF_DPO; 8920 lba = scsi_8btou64(cdb->addr); 8921 num_blocks = cdb->length; 8922 break; 8923 } 8924 default: 8925 /* 8926 * We got a command we don't support. This shouldn't 8927 * happen, commands should be filtered out above us. 8928 */ 8929 ctl_set_invalid_opcode(ctsio); 8930 ctl_done((union ctl_io *)ctsio); 8931 8932 return (CTL_RETVAL_COMPLETE); 8933 break; /* NOTREACHED */ 8934 } 8935 8936 /* 8937 * The first check is to make sure we're in bounds, the second 8938 * check is to catch wrap-around problems. If the lba + num blocks 8939 * is less than the lba, then we've wrapped around and the block 8940 * range is invalid anyway. 8941 */ 8942 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8943 || ((lba + num_blocks) < lba)) { 8944 ctl_set_lba_out_of_range(ctsio, 8945 MAX(lba, lun->be_lun->maxlba + 1)); 8946 ctl_done((union ctl_io *)ctsio); 8947 return (CTL_RETVAL_COMPLETE); 8948 } 8949 8950 /* 8951 * According to SBC-3, a transfer length of 0 is not an error. 8952 */ 8953 if (num_blocks == 0) { 8954 ctl_set_success(ctsio); 8955 ctl_done((union ctl_io *)ctsio); 8956 return (CTL_RETVAL_COMPLETE); 8957 } 8958 8959 /* Set FUA if write cache is disabled. */ 8960 if ((lun->MODE_CACHING.flags1 & SCP_WCE) == 0) 8961 flags |= CTL_LLF_FUA; 8962 8963 ctsio->kern_total_len = 2 * num_blocks * lun->be_lun->blocksize; 8964 ctsio->kern_rel_offset = 0; 8965 8966 /* 8967 * Set the IO_CONT flag, so that if this I/O gets passed to 8968 * ctl_data_submit_done(), it'll get passed back to 8969 * ctl_ctl_cnw_cont() for further processing. 8970 */ 8971 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 8972 ctsio->io_cont = ctl_cnw_cont; 8973 8974 lbalen = (struct ctl_lba_len_flags *) 8975 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8976 lbalen->lba = lba; 8977 lbalen->len = num_blocks; 8978 lbalen->flags = CTL_LLF_COMPARE | flags; 8979 8980 CTL_DEBUG_PRINT(("ctl_cnw: calling data_submit()\n")); 8981 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8982 return (retval); 8983 } 8984 8985 int 8986 ctl_verify(struct ctl_scsiio *ctsio) 8987 { 8988 struct ctl_lun *lun = CTL_LUN(ctsio); 8989 struct ctl_lba_len_flags *lbalen; 8990 uint64_t lba; 8991 uint32_t num_blocks; 8992 int bytchk, flags; 8993 int retval; 8994 8995 CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0])); 8996 8997 bytchk = 0; 8998 flags = CTL_LLF_FUA; 8999 switch (ctsio->cdb[0]) { 9000 case VERIFY_10: { 9001 struct scsi_verify_10 *cdb; 9002 9003 cdb = (struct scsi_verify_10 *)ctsio->cdb; 9004 if (cdb->byte2 & SVFY_BYTCHK) 9005 bytchk = 1; 9006 if (cdb->byte2 & SVFY_DPO) 9007 flags |= CTL_LLF_DPO; 9008 lba = scsi_4btoul(cdb->addr); 9009 num_blocks = scsi_2btoul(cdb->length); 9010 break; 9011 } 9012 case VERIFY_12: { 9013 struct scsi_verify_12 *cdb; 9014 9015 cdb = (struct scsi_verify_12 *)ctsio->cdb; 9016 if (cdb->byte2 & SVFY_BYTCHK) 9017 bytchk = 1; 9018 if (cdb->byte2 & SVFY_DPO) 9019 flags |= CTL_LLF_DPO; 9020 lba = scsi_4btoul(cdb->addr); 9021 num_blocks = scsi_4btoul(cdb->length); 9022 break; 9023 } 9024 case VERIFY_16: { 9025 struct scsi_rw_16 *cdb; 9026 9027 cdb = (struct scsi_rw_16 *)ctsio->cdb; 9028 if (cdb->byte2 & SVFY_BYTCHK) 9029 bytchk = 1; 9030 if (cdb->byte2 & SVFY_DPO) 9031 flags |= CTL_LLF_DPO; 9032 lba = scsi_8btou64(cdb->addr); 9033 num_blocks = scsi_4btoul(cdb->length); 9034 break; 9035 } 9036 default: 9037 /* 9038 * We got a command we don't support. This shouldn't 9039 * happen, commands should be filtered out above us. 9040 */ 9041 ctl_set_invalid_opcode(ctsio); 9042 ctl_done((union ctl_io *)ctsio); 9043 return (CTL_RETVAL_COMPLETE); 9044 } 9045 9046 /* 9047 * The first check is to make sure we're in bounds, the second 9048 * check is to catch wrap-around problems. If the lba + num blocks 9049 * is less than the lba, then we've wrapped around and the block 9050 * range is invalid anyway. 9051 */ 9052 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 9053 || ((lba + num_blocks) < lba)) { 9054 ctl_set_lba_out_of_range(ctsio, 9055 MAX(lba, lun->be_lun->maxlba + 1)); 9056 ctl_done((union ctl_io *)ctsio); 9057 return (CTL_RETVAL_COMPLETE); 9058 } 9059 9060 /* 9061 * According to SBC-3, a transfer length of 0 is not an error. 9062 */ 9063 if (num_blocks == 0) { 9064 ctl_set_success(ctsio); 9065 ctl_done((union ctl_io *)ctsio); 9066 return (CTL_RETVAL_COMPLETE); 9067 } 9068 9069 lbalen = (struct ctl_lba_len_flags *) 9070 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 9071 lbalen->lba = lba; 9072 lbalen->len = num_blocks; 9073 if (bytchk) { 9074 lbalen->flags = CTL_LLF_COMPARE | flags; 9075 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 9076 } else { 9077 lbalen->flags = CTL_LLF_VERIFY | flags; 9078 ctsio->kern_total_len = 0; 9079 } 9080 ctsio->kern_rel_offset = 0; 9081 9082 CTL_DEBUG_PRINT(("ctl_verify: calling data_submit()\n")); 9083 retval = lun->backend->data_submit((union ctl_io *)ctsio); 9084 return (retval); 9085 } 9086 9087 int 9088 ctl_report_luns(struct ctl_scsiio *ctsio) 9089 { 9090 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9091 struct ctl_port *port = CTL_PORT(ctsio); 9092 struct ctl_lun *lun, *request_lun = CTL_LUN(ctsio); 9093 struct scsi_report_luns *cdb; 9094 struct scsi_report_luns_data *lun_data; 9095 int num_filled, num_luns, num_port_luns, retval; 9096 uint32_t alloc_len, lun_datalen; 9097 uint32_t initidx, targ_lun_id, lun_id; 9098 9099 retval = CTL_RETVAL_COMPLETE; 9100 cdb = (struct scsi_report_luns *)ctsio->cdb; 9101 9102 CTL_DEBUG_PRINT(("ctl_report_luns\n")); 9103 9104 num_luns = 0; 9105 num_port_luns = port->lun_map ? port->lun_map_size : ctl_max_luns; 9106 mtx_lock(&softc->ctl_lock); 9107 for (targ_lun_id = 0; targ_lun_id < num_port_luns; targ_lun_id++) { 9108 if (ctl_lun_map_from_port(port, targ_lun_id) != UINT32_MAX) 9109 num_luns++; 9110 } 9111 mtx_unlock(&softc->ctl_lock); 9112 9113 switch (cdb->select_report) { 9114 case RPL_REPORT_DEFAULT: 9115 case RPL_REPORT_ALL: 9116 case RPL_REPORT_NONSUBSID: 9117 break; 9118 case RPL_REPORT_WELLKNOWN: 9119 case RPL_REPORT_ADMIN: 9120 case RPL_REPORT_CONGLOM: 9121 num_luns = 0; 9122 break; 9123 default: 9124 ctl_set_invalid_field(ctsio, 9125 /*sks_valid*/ 1, 9126 /*command*/ 1, 9127 /*field*/ 2, 9128 /*bit_valid*/ 0, 9129 /*bit*/ 0); 9130 ctl_done((union ctl_io *)ctsio); 9131 return (retval); 9132 break; /* NOTREACHED */ 9133 } 9134 9135 alloc_len = scsi_4btoul(cdb->length); 9136 /* 9137 * The initiator has to allocate at least 16 bytes for this request, 9138 * so he can at least get the header and the first LUN. Otherwise 9139 * we reject the request (per SPC-3 rev 14, section 6.21). 9140 */ 9141 if (alloc_len < (sizeof(struct scsi_report_luns_data) + 9142 sizeof(struct scsi_report_luns_lundata))) { 9143 ctl_set_invalid_field(ctsio, 9144 /*sks_valid*/ 1, 9145 /*command*/ 1, 9146 /*field*/ 6, 9147 /*bit_valid*/ 0, 9148 /*bit*/ 0); 9149 ctl_done((union ctl_io *)ctsio); 9150 return (retval); 9151 } 9152 9153 lun_datalen = sizeof(*lun_data) + 9154 (num_luns * sizeof(struct scsi_report_luns_lundata)); 9155 9156 ctsio->kern_data_ptr = malloc(lun_datalen, M_CTL, M_WAITOK | M_ZERO); 9157 lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr; 9158 ctsio->kern_sg_entries = 0; 9159 9160 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9161 9162 mtx_lock(&softc->ctl_lock); 9163 for (targ_lun_id = 0, num_filled = 0; 9164 targ_lun_id < num_port_luns && num_filled < num_luns; 9165 targ_lun_id++) { 9166 lun_id = ctl_lun_map_from_port(port, targ_lun_id); 9167 if (lun_id == UINT32_MAX) 9168 continue; 9169 lun = softc->ctl_luns[lun_id]; 9170 if (lun == NULL) 9171 continue; 9172 9173 be64enc(lun_data->luns[num_filled++].lundata, 9174 ctl_encode_lun(targ_lun_id)); 9175 9176 /* 9177 * According to SPC-3, rev 14 section 6.21: 9178 * 9179 * "The execution of a REPORT LUNS command to any valid and 9180 * installed logical unit shall clear the REPORTED LUNS DATA 9181 * HAS CHANGED unit attention condition for all logical 9182 * units of that target with respect to the requesting 9183 * initiator. A valid and installed logical unit is one 9184 * having a PERIPHERAL QUALIFIER of 000b in the standard 9185 * INQUIRY data (see 6.4.2)." 9186 * 9187 * If request_lun is NULL, the LUN this report luns command 9188 * was issued to is either disabled or doesn't exist. In that 9189 * case, we shouldn't clear any pending lun change unit 9190 * attention. 9191 */ 9192 if (request_lun != NULL) { 9193 mtx_lock(&lun->lun_lock); 9194 ctl_clr_ua(lun, initidx, CTL_UA_LUN_CHANGE); 9195 mtx_unlock(&lun->lun_lock); 9196 } 9197 } 9198 mtx_unlock(&softc->ctl_lock); 9199 9200 /* 9201 * It's quite possible that we've returned fewer LUNs than we allocated 9202 * space for. Trim it. 9203 */ 9204 lun_datalen = sizeof(*lun_data) + 9205 (num_filled * sizeof(struct scsi_report_luns_lundata)); 9206 ctsio->kern_rel_offset = 0; 9207 ctsio->kern_sg_entries = 0; 9208 ctsio->kern_data_len = min(lun_datalen, alloc_len); 9209 ctsio->kern_total_len = ctsio->kern_data_len; 9210 9211 /* 9212 * We set this to the actual data length, regardless of how much 9213 * space we actually have to return results. If the user looks at 9214 * this value, he'll know whether or not he allocated enough space 9215 * and reissue the command if necessary. We don't support well 9216 * known logical units, so if the user asks for that, return none. 9217 */ 9218 scsi_ulto4b(lun_datalen - 8, lun_data->length); 9219 9220 /* 9221 * We can only return SCSI_STATUS_CHECK_COND when we can't satisfy 9222 * this request. 9223 */ 9224 ctl_set_success(ctsio); 9225 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9226 ctsio->be_move_done = ctl_config_move_done; 9227 ctl_datamove((union ctl_io *)ctsio); 9228 return (retval); 9229 } 9230 9231 int 9232 ctl_request_sense(struct ctl_scsiio *ctsio) 9233 { 9234 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9235 struct ctl_lun *lun = CTL_LUN(ctsio); 9236 struct scsi_request_sense *cdb; 9237 struct scsi_sense_data *sense_ptr, *ps; 9238 uint32_t initidx; 9239 int have_error; 9240 u_int sense_len = SSD_FULL_SIZE; 9241 scsi_sense_data_type sense_format; 9242 ctl_ua_type ua_type; 9243 uint8_t asc = 0, ascq = 0; 9244 9245 cdb = (struct scsi_request_sense *)ctsio->cdb; 9246 9247 CTL_DEBUG_PRINT(("ctl_request_sense\n")); 9248 9249 /* 9250 * Determine which sense format the user wants. 9251 */ 9252 if (cdb->byte2 & SRS_DESC) 9253 sense_format = SSD_TYPE_DESC; 9254 else 9255 sense_format = SSD_TYPE_FIXED; 9256 9257 ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK); 9258 sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr; 9259 ctsio->kern_sg_entries = 0; 9260 ctsio->kern_rel_offset = 0; 9261 9262 /* 9263 * struct scsi_sense_data, which is currently set to 256 bytes, is 9264 * larger than the largest allowed value for the length field in the 9265 * REQUEST SENSE CDB, which is 252 bytes as of SPC-4. 9266 */ 9267 ctsio->kern_data_len = cdb->length; 9268 ctsio->kern_total_len = cdb->length; 9269 9270 /* 9271 * If we don't have a LUN, we don't have any pending sense. 9272 */ 9273 if (lun == NULL || 9274 ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 9275 softc->ha_link < CTL_HA_LINK_UNKNOWN)) { 9276 /* "Logical unit not supported" */ 9277 ctl_set_sense_data(sense_ptr, &sense_len, NULL, sense_format, 9278 /*current_error*/ 1, 9279 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 9280 /*asc*/ 0x25, 9281 /*ascq*/ 0x00, 9282 SSD_ELEM_NONE); 9283 goto send; 9284 } 9285 9286 have_error = 0; 9287 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9288 /* 9289 * Check for pending sense, and then for pending unit attentions. 9290 * Pending sense gets returned first, then pending unit attentions. 9291 */ 9292 mtx_lock(&lun->lun_lock); 9293 ps = lun->pending_sense[initidx / CTL_MAX_INIT_PER_PORT]; 9294 if (ps != NULL) 9295 ps += initidx % CTL_MAX_INIT_PER_PORT; 9296 if (ps != NULL && ps->error_code != 0) { 9297 scsi_sense_data_type stored_format; 9298 9299 /* 9300 * Check to see which sense format was used for the stored 9301 * sense data. 9302 */ 9303 stored_format = scsi_sense_type(ps); 9304 9305 /* 9306 * If the user requested a different sense format than the 9307 * one we stored, then we need to convert it to the other 9308 * format. If we're going from descriptor to fixed format 9309 * sense data, we may lose things in translation, depending 9310 * on what options were used. 9311 * 9312 * If the stored format is SSD_TYPE_NONE (i.e. invalid), 9313 * for some reason we'll just copy it out as-is. 9314 */ 9315 if ((stored_format == SSD_TYPE_FIXED) 9316 && (sense_format == SSD_TYPE_DESC)) 9317 ctl_sense_to_desc((struct scsi_sense_data_fixed *) 9318 ps, (struct scsi_sense_data_desc *)sense_ptr); 9319 else if ((stored_format == SSD_TYPE_DESC) 9320 && (sense_format == SSD_TYPE_FIXED)) 9321 ctl_sense_to_fixed((struct scsi_sense_data_desc *) 9322 ps, (struct scsi_sense_data_fixed *)sense_ptr); 9323 else 9324 memcpy(sense_ptr, ps, sizeof(*sense_ptr)); 9325 9326 ps->error_code = 0; 9327 have_error = 1; 9328 } else { 9329 ua_type = ctl_build_ua(lun, initidx, sense_ptr, &sense_len, 9330 sense_format); 9331 if (ua_type != CTL_UA_NONE) 9332 have_error = 1; 9333 } 9334 if (have_error == 0) { 9335 /* 9336 * Report informational exception if have one and allowed. 9337 */ 9338 if (lun->MODE_IE.mrie != SIEP_MRIE_NO) { 9339 asc = lun->ie_asc; 9340 ascq = lun->ie_ascq; 9341 } 9342 ctl_set_sense_data(sense_ptr, &sense_len, lun, sense_format, 9343 /*current_error*/ 1, 9344 /*sense_key*/ SSD_KEY_NO_SENSE, 9345 /*asc*/ asc, 9346 /*ascq*/ ascq, 9347 SSD_ELEM_NONE); 9348 } 9349 mtx_unlock(&lun->lun_lock); 9350 9351 send: 9352 /* 9353 * We report the SCSI status as OK, since the status of the command 9354 * itself is OK. We're reporting sense as parameter data. 9355 */ 9356 ctl_set_success(ctsio); 9357 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9358 ctsio->be_move_done = ctl_config_move_done; 9359 ctl_datamove((union ctl_io *)ctsio); 9360 return (CTL_RETVAL_COMPLETE); 9361 } 9362 9363 int 9364 ctl_tur(struct ctl_scsiio *ctsio) 9365 { 9366 9367 CTL_DEBUG_PRINT(("ctl_tur\n")); 9368 9369 ctl_set_success(ctsio); 9370 ctl_done((union ctl_io *)ctsio); 9371 9372 return (CTL_RETVAL_COMPLETE); 9373 } 9374 9375 /* 9376 * SCSI VPD page 0x00, the Supported VPD Pages page. 9377 */ 9378 static int 9379 ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len) 9380 { 9381 struct ctl_lun *lun = CTL_LUN(ctsio); 9382 struct scsi_vpd_supported_pages *pages; 9383 int sup_page_size; 9384 int p; 9385 9386 sup_page_size = sizeof(struct scsi_vpd_supported_pages) * 9387 SCSI_EVPD_NUM_SUPPORTED_PAGES; 9388 ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO); 9389 pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr; 9390 ctsio->kern_rel_offset = 0; 9391 ctsio->kern_sg_entries = 0; 9392 ctsio->kern_data_len = min(sup_page_size, alloc_len); 9393 ctsio->kern_total_len = ctsio->kern_data_len; 9394 9395 /* 9396 * The control device is always connected. The disk device, on the 9397 * other hand, may not be online all the time. Need to change this 9398 * to figure out whether the disk device is actually online or not. 9399 */ 9400 if (lun != NULL) 9401 pages->device = (SID_QUAL_LU_CONNECTED << 5) | 9402 lun->be_lun->lun_type; 9403 else 9404 pages->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9405 9406 p = 0; 9407 /* Supported VPD pages */ 9408 pages->page_list[p++] = SVPD_SUPPORTED_PAGES; 9409 /* Serial Number */ 9410 pages->page_list[p++] = SVPD_UNIT_SERIAL_NUMBER; 9411 /* Device Identification */ 9412 pages->page_list[p++] = SVPD_DEVICE_ID; 9413 /* Extended INQUIRY Data */ 9414 pages->page_list[p++] = SVPD_EXTENDED_INQUIRY_DATA; 9415 /* Mode Page Policy */ 9416 pages->page_list[p++] = SVPD_MODE_PAGE_POLICY; 9417 /* SCSI Ports */ 9418 pages->page_list[p++] = SVPD_SCSI_PORTS; 9419 /* Third-party Copy */ 9420 pages->page_list[p++] = SVPD_SCSI_TPC; 9421 /* SCSI Feature Sets */ 9422 pages->page_list[p++] = SVPD_SCSI_SFS; 9423 if (lun != NULL && lun->be_lun->lun_type == T_DIRECT) { 9424 /* Block limits */ 9425 pages->page_list[p++] = SVPD_BLOCK_LIMITS; 9426 /* Block Device Characteristics */ 9427 pages->page_list[p++] = SVPD_BDC; 9428 /* Logical Block Provisioning */ 9429 pages->page_list[p++] = SVPD_LBP; 9430 } 9431 pages->length = p; 9432 9433 ctl_set_success(ctsio); 9434 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9435 ctsio->be_move_done = ctl_config_move_done; 9436 ctl_datamove((union ctl_io *)ctsio); 9437 return (CTL_RETVAL_COMPLETE); 9438 } 9439 9440 /* 9441 * SCSI VPD page 0x80, the Unit Serial Number page. 9442 */ 9443 static int 9444 ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len) 9445 { 9446 struct ctl_lun *lun = CTL_LUN(ctsio); 9447 struct scsi_vpd_unit_serial_number *sn_ptr; 9448 int data_len; 9449 9450 data_len = 4 + CTL_SN_LEN; 9451 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9452 sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr; 9453 ctsio->kern_rel_offset = 0; 9454 ctsio->kern_sg_entries = 0; 9455 ctsio->kern_data_len = min(data_len, alloc_len); 9456 ctsio->kern_total_len = ctsio->kern_data_len; 9457 9458 /* 9459 * The control device is always connected. The disk device, on the 9460 * other hand, may not be online all the time. Need to change this 9461 * to figure out whether the disk device is actually online or not. 9462 */ 9463 if (lun != NULL) 9464 sn_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9465 lun->be_lun->lun_type; 9466 else 9467 sn_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9468 9469 sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER; 9470 sn_ptr->length = CTL_SN_LEN; 9471 /* 9472 * If we don't have a LUN, we just leave the serial number as 9473 * all spaces. 9474 */ 9475 if (lun != NULL) { 9476 strncpy((char *)sn_ptr->serial_num, 9477 (char *)lun->be_lun->serial_num, CTL_SN_LEN); 9478 } else 9479 memset(sn_ptr->serial_num, 0x20, CTL_SN_LEN); 9480 9481 ctl_set_success(ctsio); 9482 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9483 ctsio->be_move_done = ctl_config_move_done; 9484 ctl_datamove((union ctl_io *)ctsio); 9485 return (CTL_RETVAL_COMPLETE); 9486 } 9487 9488 /* 9489 * SCSI VPD page 0x86, the Extended INQUIRY Data page. 9490 */ 9491 static int 9492 ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len) 9493 { 9494 struct ctl_lun *lun = CTL_LUN(ctsio); 9495 struct scsi_vpd_extended_inquiry_data *eid_ptr; 9496 int data_len; 9497 9498 data_len = sizeof(struct scsi_vpd_extended_inquiry_data); 9499 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9500 eid_ptr = (struct scsi_vpd_extended_inquiry_data *)ctsio->kern_data_ptr; 9501 ctsio->kern_sg_entries = 0; 9502 ctsio->kern_rel_offset = 0; 9503 ctsio->kern_data_len = min(data_len, alloc_len); 9504 ctsio->kern_total_len = ctsio->kern_data_len; 9505 9506 /* 9507 * The control device is always connected. The disk device, on the 9508 * other hand, may not be online all the time. 9509 */ 9510 if (lun != NULL) 9511 eid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9512 lun->be_lun->lun_type; 9513 else 9514 eid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9515 eid_ptr->page_code = SVPD_EXTENDED_INQUIRY_DATA; 9516 scsi_ulto2b(data_len - 4, eid_ptr->page_length); 9517 /* 9518 * We support head of queue, ordered and simple tags. 9519 */ 9520 eid_ptr->flags2 = SVPD_EID_HEADSUP | SVPD_EID_ORDSUP | SVPD_EID_SIMPSUP; 9521 /* 9522 * Volatile cache supported. 9523 */ 9524 eid_ptr->flags3 = SVPD_EID_V_SUP; 9525 9526 /* 9527 * This means that we clear the REPORTED LUNS DATA HAS CHANGED unit 9528 * attention for a particular IT nexus on all LUNs once we report 9529 * it to that nexus once. This bit is required as of SPC-4. 9530 */ 9531 eid_ptr->flags4 = SVPD_EID_LUICLR; 9532 9533 /* 9534 * We support revert to defaults (RTD) bit in MODE SELECT. 9535 */ 9536 eid_ptr->flags5 = SVPD_EID_RTD_SUP; 9537 9538 /* 9539 * XXX KDM in order to correctly answer this, we would need 9540 * information from the SIM to determine how much sense data it 9541 * can send. So this would really be a path inquiry field, most 9542 * likely. This can be set to a maximum of 252 according to SPC-4, 9543 * but the hardware may or may not be able to support that much. 9544 * 0 just means that the maximum sense data length is not reported. 9545 */ 9546 eid_ptr->max_sense_length = 0; 9547 9548 ctl_set_success(ctsio); 9549 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9550 ctsio->be_move_done = ctl_config_move_done; 9551 ctl_datamove((union ctl_io *)ctsio); 9552 return (CTL_RETVAL_COMPLETE); 9553 } 9554 9555 static int 9556 ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len) 9557 { 9558 struct ctl_lun *lun = CTL_LUN(ctsio); 9559 struct scsi_vpd_mode_page_policy *mpp_ptr; 9560 int data_len; 9561 9562 data_len = sizeof(struct scsi_vpd_mode_page_policy) + 9563 sizeof(struct scsi_vpd_mode_page_policy_descr); 9564 9565 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9566 mpp_ptr = (struct scsi_vpd_mode_page_policy *)ctsio->kern_data_ptr; 9567 ctsio->kern_rel_offset = 0; 9568 ctsio->kern_sg_entries = 0; 9569 ctsio->kern_data_len = min(data_len, alloc_len); 9570 ctsio->kern_total_len = ctsio->kern_data_len; 9571 9572 /* 9573 * The control device is always connected. The disk device, on the 9574 * other hand, may not be online all the time. 9575 */ 9576 if (lun != NULL) 9577 mpp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9578 lun->be_lun->lun_type; 9579 else 9580 mpp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9581 mpp_ptr->page_code = SVPD_MODE_PAGE_POLICY; 9582 scsi_ulto2b(data_len - 4, mpp_ptr->page_length); 9583 mpp_ptr->descr[0].page_code = 0x3f; 9584 mpp_ptr->descr[0].subpage_code = 0xff; 9585 mpp_ptr->descr[0].policy = SVPD_MPP_SHARED; 9586 9587 ctl_set_success(ctsio); 9588 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9589 ctsio->be_move_done = ctl_config_move_done; 9590 ctl_datamove((union ctl_io *)ctsio); 9591 return (CTL_RETVAL_COMPLETE); 9592 } 9593 9594 /* 9595 * SCSI VPD page 0x83, the Device Identification page. 9596 */ 9597 static int 9598 ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len) 9599 { 9600 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9601 struct ctl_port *port = CTL_PORT(ctsio); 9602 struct ctl_lun *lun = CTL_LUN(ctsio); 9603 struct scsi_vpd_device_id *devid_ptr; 9604 struct scsi_vpd_id_descriptor *desc; 9605 int data_len, g; 9606 uint8_t proto; 9607 9608 data_len = sizeof(struct scsi_vpd_device_id) + 9609 sizeof(struct scsi_vpd_id_descriptor) + 9610 sizeof(struct scsi_vpd_id_rel_trgt_port_id) + 9611 sizeof(struct scsi_vpd_id_descriptor) + 9612 sizeof(struct scsi_vpd_id_trgt_port_grp_id); 9613 if (lun && lun->lun_devid) 9614 data_len += lun->lun_devid->len; 9615 if (port && port->port_devid) 9616 data_len += port->port_devid->len; 9617 if (port && port->target_devid) 9618 data_len += port->target_devid->len; 9619 9620 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9621 devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr; 9622 ctsio->kern_sg_entries = 0; 9623 ctsio->kern_rel_offset = 0; 9624 ctsio->kern_sg_entries = 0; 9625 ctsio->kern_data_len = min(data_len, alloc_len); 9626 ctsio->kern_total_len = ctsio->kern_data_len; 9627 9628 /* 9629 * The control device is always connected. The disk device, on the 9630 * other hand, may not be online all the time. 9631 */ 9632 if (lun != NULL) 9633 devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9634 lun->be_lun->lun_type; 9635 else 9636 devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9637 devid_ptr->page_code = SVPD_DEVICE_ID; 9638 scsi_ulto2b(data_len - 4, devid_ptr->length); 9639 9640 if (port && port->port_type == CTL_PORT_FC) 9641 proto = SCSI_PROTO_FC << 4; 9642 else if (port && port->port_type == CTL_PORT_SAS) 9643 proto = SCSI_PROTO_SAS << 4; 9644 else if (port && port->port_type == CTL_PORT_ISCSI) 9645 proto = SCSI_PROTO_ISCSI << 4; 9646 else 9647 proto = SCSI_PROTO_SPI << 4; 9648 desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list; 9649 9650 /* 9651 * We're using a LUN association here. i.e., this device ID is a 9652 * per-LUN identifier. 9653 */ 9654 if (lun && lun->lun_devid) { 9655 memcpy(desc, lun->lun_devid->data, lun->lun_devid->len); 9656 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9657 lun->lun_devid->len); 9658 } 9659 9660 /* 9661 * This is for the WWPN which is a port association. 9662 */ 9663 if (port && port->port_devid) { 9664 memcpy(desc, port->port_devid->data, port->port_devid->len); 9665 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9666 port->port_devid->len); 9667 } 9668 9669 /* 9670 * This is for the Relative Target Port(type 4h) identifier 9671 */ 9672 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9673 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9674 SVPD_ID_TYPE_RELTARG; 9675 desc->length = 4; 9676 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port, &desc->identifier[2]); 9677 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9678 sizeof(struct scsi_vpd_id_rel_trgt_port_id)); 9679 9680 /* 9681 * This is for the Target Port Group(type 5h) identifier 9682 */ 9683 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9684 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9685 SVPD_ID_TYPE_TPORTGRP; 9686 desc->length = 4; 9687 if (softc->is_single || 9688 (port && port->status & CTL_PORT_STATUS_HA_SHARED)) 9689 g = 1; 9690 else 9691 g = 2 + ctsio->io_hdr.nexus.targ_port / softc->port_cnt; 9692 scsi_ulto2b(g, &desc->identifier[2]); 9693 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9694 sizeof(struct scsi_vpd_id_trgt_port_grp_id)); 9695 9696 /* 9697 * This is for the Target identifier 9698 */ 9699 if (port && port->target_devid) { 9700 memcpy(desc, port->target_devid->data, port->target_devid->len); 9701 } 9702 9703 ctl_set_success(ctsio); 9704 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9705 ctsio->be_move_done = ctl_config_move_done; 9706 ctl_datamove((union ctl_io *)ctsio); 9707 return (CTL_RETVAL_COMPLETE); 9708 } 9709 9710 static int 9711 ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len) 9712 { 9713 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9714 struct ctl_lun *lun = CTL_LUN(ctsio); 9715 struct scsi_vpd_scsi_ports *sp; 9716 struct scsi_vpd_port_designation *pd; 9717 struct scsi_vpd_port_designation_cont *pdc; 9718 struct ctl_port *port; 9719 int data_len, num_target_ports, iid_len, id_len; 9720 9721 num_target_ports = 0; 9722 iid_len = 0; 9723 id_len = 0; 9724 mtx_lock(&softc->ctl_lock); 9725 STAILQ_FOREACH(port, &softc->port_list, links) { 9726 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 9727 continue; 9728 if (lun != NULL && 9729 ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 9730 continue; 9731 num_target_ports++; 9732 if (port->init_devid) 9733 iid_len += port->init_devid->len; 9734 if (port->port_devid) 9735 id_len += port->port_devid->len; 9736 } 9737 mtx_unlock(&softc->ctl_lock); 9738 9739 data_len = sizeof(struct scsi_vpd_scsi_ports) + 9740 num_target_ports * (sizeof(struct scsi_vpd_port_designation) + 9741 sizeof(struct scsi_vpd_port_designation_cont)) + iid_len + id_len; 9742 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9743 sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr; 9744 ctsio->kern_sg_entries = 0; 9745 ctsio->kern_rel_offset = 0; 9746 ctsio->kern_sg_entries = 0; 9747 ctsio->kern_data_len = min(data_len, alloc_len); 9748 ctsio->kern_total_len = ctsio->kern_data_len; 9749 9750 /* 9751 * The control device is always connected. The disk device, on the 9752 * other hand, may not be online all the time. Need to change this 9753 * to figure out whether the disk device is actually online or not. 9754 */ 9755 if (lun != NULL) 9756 sp->device = (SID_QUAL_LU_CONNECTED << 5) | 9757 lun->be_lun->lun_type; 9758 else 9759 sp->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9760 9761 sp->page_code = SVPD_SCSI_PORTS; 9762 scsi_ulto2b(data_len - sizeof(struct scsi_vpd_scsi_ports), 9763 sp->page_length); 9764 pd = &sp->design[0]; 9765 9766 mtx_lock(&softc->ctl_lock); 9767 STAILQ_FOREACH(port, &softc->port_list, links) { 9768 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 9769 continue; 9770 if (lun != NULL && 9771 ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 9772 continue; 9773 scsi_ulto2b(port->targ_port, pd->relative_port_id); 9774 if (port->init_devid) { 9775 iid_len = port->init_devid->len; 9776 memcpy(pd->initiator_transportid, 9777 port->init_devid->data, port->init_devid->len); 9778 } else 9779 iid_len = 0; 9780 scsi_ulto2b(iid_len, pd->initiator_transportid_length); 9781 pdc = (struct scsi_vpd_port_designation_cont *) 9782 (&pd->initiator_transportid[iid_len]); 9783 if (port->port_devid) { 9784 id_len = port->port_devid->len; 9785 memcpy(pdc->target_port_descriptors, 9786 port->port_devid->data, port->port_devid->len); 9787 } else 9788 id_len = 0; 9789 scsi_ulto2b(id_len, pdc->target_port_descriptors_length); 9790 pd = (struct scsi_vpd_port_designation *) 9791 ((uint8_t *)pdc->target_port_descriptors + id_len); 9792 } 9793 mtx_unlock(&softc->ctl_lock); 9794 9795 ctl_set_success(ctsio); 9796 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9797 ctsio->be_move_done = ctl_config_move_done; 9798 ctl_datamove((union ctl_io *)ctsio); 9799 return (CTL_RETVAL_COMPLETE); 9800 } 9801 9802 static int 9803 ctl_inquiry_evpd_sfs(struct ctl_scsiio *ctsio, int alloc_len) 9804 { 9805 struct ctl_lun *lun = CTL_LUN(ctsio); 9806 struct scsi_vpd_sfs *sfs_ptr; 9807 int sfs_page_size, n; 9808 9809 sfs_page_size = sizeof(*sfs_ptr) + 5 * 2; 9810 ctsio->kern_data_ptr = malloc(sfs_page_size, M_CTL, M_WAITOK | M_ZERO); 9811 sfs_ptr = (struct scsi_vpd_sfs *)ctsio->kern_data_ptr; 9812 ctsio->kern_sg_entries = 0; 9813 ctsio->kern_rel_offset = 0; 9814 ctsio->kern_sg_entries = 0; 9815 ctsio->kern_data_len = min(sfs_page_size, alloc_len); 9816 ctsio->kern_total_len = ctsio->kern_data_len; 9817 9818 /* 9819 * The control device is always connected. The disk device, on the 9820 * other hand, may not be online all the time. Need to change this 9821 * to figure out whether the disk device is actually online or not. 9822 */ 9823 if (lun != NULL) 9824 sfs_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9825 lun->be_lun->lun_type; 9826 else 9827 sfs_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9828 9829 sfs_ptr->page_code = SVPD_SCSI_SFS; 9830 n = 0; 9831 /* Discovery 2016 */ 9832 scsi_ulto2b(0x0001, &sfs_ptr->codes[2 * n++]); 9833 if (lun != NULL && lun->be_lun->lun_type == T_DIRECT) { 9834 /* SBC Base 2016 */ 9835 scsi_ulto2b(0x0101, &sfs_ptr->codes[2 * n++]); 9836 /* SBC Base 2010 */ 9837 scsi_ulto2b(0x0102, &sfs_ptr->codes[2 * n++]); 9838 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 9839 /* Basic Provisioning 2016 */ 9840 scsi_ulto2b(0x0103, &sfs_ptr->codes[2 * n++]); 9841 } 9842 /* Drive Maintenance 2016 */ 9843 //scsi_ulto2b(0x0104, &sfs_ptr->codes[2 * n++]); 9844 } 9845 scsi_ulto2b(4 + 2 * n, sfs_ptr->page_length); 9846 9847 ctl_set_success(ctsio); 9848 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9849 ctsio->be_move_done = ctl_config_move_done; 9850 ctl_datamove((union ctl_io *)ctsio); 9851 return (CTL_RETVAL_COMPLETE); 9852 } 9853 9854 static int 9855 ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len) 9856 { 9857 struct ctl_lun *lun = CTL_LUN(ctsio); 9858 struct scsi_vpd_block_limits *bl_ptr; 9859 const char *val; 9860 uint64_t ival; 9861 9862 ctsio->kern_data_ptr = malloc(sizeof(*bl_ptr), M_CTL, M_WAITOK | M_ZERO); 9863 bl_ptr = (struct scsi_vpd_block_limits *)ctsio->kern_data_ptr; 9864 ctsio->kern_sg_entries = 0; 9865 ctsio->kern_rel_offset = 0; 9866 ctsio->kern_sg_entries = 0; 9867 ctsio->kern_data_len = min(sizeof(*bl_ptr), alloc_len); 9868 ctsio->kern_total_len = ctsio->kern_data_len; 9869 9870 /* 9871 * The control device is always connected. The disk device, on the 9872 * other hand, may not be online all the time. Need to change this 9873 * to figure out whether the disk device is actually online or not. 9874 */ 9875 if (lun != NULL) 9876 bl_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9877 lun->be_lun->lun_type; 9878 else 9879 bl_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9880 9881 bl_ptr->page_code = SVPD_BLOCK_LIMITS; 9882 scsi_ulto2b(sizeof(*bl_ptr) - 4, bl_ptr->page_length); 9883 bl_ptr->max_cmp_write_len = 0xff; 9884 scsi_ulto4b(0xffffffff, bl_ptr->max_txfer_len); 9885 if (lun != NULL) { 9886 scsi_ulto4b(lun->be_lun->opttxferlen, bl_ptr->opt_txfer_len); 9887 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 9888 ival = 0xffffffff; 9889 val = dnvlist_get_string(lun->be_lun->options, 9890 "unmap_max_lba", NULL); 9891 if (val != NULL) 9892 ctl_expand_number(val, &ival); 9893 scsi_ulto4b(ival, bl_ptr->max_unmap_lba_cnt); 9894 ival = 0xffffffff; 9895 val = dnvlist_get_string(lun->be_lun->options, 9896 "unmap_max_descr", NULL); 9897 if (val != NULL) 9898 ctl_expand_number(val, &ival); 9899 scsi_ulto4b(ival, bl_ptr->max_unmap_blk_cnt); 9900 if (lun->be_lun->ublockexp != 0) { 9901 scsi_ulto4b((1 << lun->be_lun->ublockexp), 9902 bl_ptr->opt_unmap_grain); 9903 scsi_ulto4b(0x80000000 | lun->be_lun->ublockoff, 9904 bl_ptr->unmap_grain_align); 9905 } 9906 } 9907 scsi_ulto4b(lun->be_lun->atomicblock, 9908 bl_ptr->max_atomic_transfer_length); 9909 scsi_ulto4b(0, bl_ptr->atomic_alignment); 9910 scsi_ulto4b(0, bl_ptr->atomic_transfer_length_granularity); 9911 scsi_ulto4b(0, bl_ptr->max_atomic_transfer_length_with_atomic_boundary); 9912 scsi_ulto4b(0, bl_ptr->max_atomic_boundary_size); 9913 ival = UINT64_MAX; 9914 val = dnvlist_get_string(lun->be_lun->options, 9915 "write_same_max_lba", NULL); 9916 if (val != NULL) 9917 ctl_expand_number(val, &ival); 9918 scsi_u64to8b(ival, bl_ptr->max_write_same_length); 9919 if (lun->be_lun->maxlba + 1 > ival) 9920 bl_ptr->flags |= SVPD_BL_WSNZ; 9921 } 9922 9923 ctl_set_success(ctsio); 9924 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9925 ctsio->be_move_done = ctl_config_move_done; 9926 ctl_datamove((union ctl_io *)ctsio); 9927 return (CTL_RETVAL_COMPLETE); 9928 } 9929 9930 static int 9931 ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len) 9932 { 9933 struct ctl_lun *lun = CTL_LUN(ctsio); 9934 struct scsi_vpd_block_device_characteristics *bdc_ptr; 9935 const char *value; 9936 u_int i; 9937 9938 ctsio->kern_data_ptr = malloc(sizeof(*bdc_ptr), M_CTL, M_WAITOK | M_ZERO); 9939 bdc_ptr = (struct scsi_vpd_block_device_characteristics *)ctsio->kern_data_ptr; 9940 ctsio->kern_sg_entries = 0; 9941 ctsio->kern_rel_offset = 0; 9942 ctsio->kern_data_len = min(sizeof(*bdc_ptr), alloc_len); 9943 ctsio->kern_total_len = ctsio->kern_data_len; 9944 9945 /* 9946 * The control device is always connected. The disk device, on the 9947 * other hand, may not be online all the time. Need to change this 9948 * to figure out whether the disk device is actually online or not. 9949 */ 9950 if (lun != NULL) 9951 bdc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9952 lun->be_lun->lun_type; 9953 else 9954 bdc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9955 bdc_ptr->page_code = SVPD_BDC; 9956 scsi_ulto2b(sizeof(*bdc_ptr) - 4, bdc_ptr->page_length); 9957 if (lun != NULL && 9958 (value = dnvlist_get_string(lun->be_lun->options, "rpm", NULL)) != NULL) 9959 i = strtol(value, NULL, 0); 9960 else 9961 i = CTL_DEFAULT_ROTATION_RATE; 9962 scsi_ulto2b(i, bdc_ptr->medium_rotation_rate); 9963 if (lun != NULL && 9964 (value = dnvlist_get_string(lun->be_lun->options, "formfactor", NULL)) != NULL) 9965 i = strtol(value, NULL, 0); 9966 else 9967 i = 0; 9968 bdc_ptr->wab_wac_ff = (i & 0x0f); 9969 bdc_ptr->flags = SVPD_RBWZ | SVPD_FUAB | SVPD_VBULS; 9970 9971 ctl_set_success(ctsio); 9972 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9973 ctsio->be_move_done = ctl_config_move_done; 9974 ctl_datamove((union ctl_io *)ctsio); 9975 return (CTL_RETVAL_COMPLETE); 9976 } 9977 9978 static int 9979 ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len) 9980 { 9981 struct ctl_lun *lun = CTL_LUN(ctsio); 9982 struct scsi_vpd_logical_block_prov *lbp_ptr; 9983 const char *value; 9984 9985 ctsio->kern_data_ptr = malloc(sizeof(*lbp_ptr), M_CTL, M_WAITOK | M_ZERO); 9986 lbp_ptr = (struct scsi_vpd_logical_block_prov *)ctsio->kern_data_ptr; 9987 ctsio->kern_sg_entries = 0; 9988 ctsio->kern_rel_offset = 0; 9989 ctsio->kern_data_len = min(sizeof(*lbp_ptr), alloc_len); 9990 ctsio->kern_total_len = ctsio->kern_data_len; 9991 9992 /* 9993 * The control device is always connected. The disk device, on the 9994 * other hand, may not be online all the time. Need to change this 9995 * to figure out whether the disk device is actually online or not. 9996 */ 9997 if (lun != NULL) 9998 lbp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9999 lun->be_lun->lun_type; 10000 else 10001 lbp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10002 10003 lbp_ptr->page_code = SVPD_LBP; 10004 scsi_ulto2b(sizeof(*lbp_ptr) - 4, lbp_ptr->page_length); 10005 lbp_ptr->threshold_exponent = CTL_LBP_EXPONENT; 10006 if (lun != NULL && lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 10007 lbp_ptr->flags = SVPD_LBP_UNMAP | SVPD_LBP_WS16 | 10008 SVPD_LBP_WS10 | SVPD_LBP_RZ | SVPD_LBP_ANC_SUP; 10009 value = dnvlist_get_string(lun->be_lun->options, 10010 "provisioning_type", NULL); 10011 if (value != NULL) { 10012 if (strcmp(value, "resource") == 0) 10013 lbp_ptr->prov_type = SVPD_LBP_RESOURCE; 10014 else if (strcmp(value, "thin") == 0) 10015 lbp_ptr->prov_type = SVPD_LBP_THIN; 10016 } else 10017 lbp_ptr->prov_type = SVPD_LBP_THIN; 10018 } 10019 10020 ctl_set_success(ctsio); 10021 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10022 ctsio->be_move_done = ctl_config_move_done; 10023 ctl_datamove((union ctl_io *)ctsio); 10024 return (CTL_RETVAL_COMPLETE); 10025 } 10026 10027 /* 10028 * INQUIRY with the EVPD bit set. 10029 */ 10030 static int 10031 ctl_inquiry_evpd(struct ctl_scsiio *ctsio) 10032 { 10033 struct ctl_lun *lun = CTL_LUN(ctsio); 10034 struct scsi_inquiry *cdb; 10035 int alloc_len, retval; 10036 10037 cdb = (struct scsi_inquiry *)ctsio->cdb; 10038 alloc_len = scsi_2btoul(cdb->length); 10039 10040 switch (cdb->page_code) { 10041 case SVPD_SUPPORTED_PAGES: 10042 retval = ctl_inquiry_evpd_supported(ctsio, alloc_len); 10043 break; 10044 case SVPD_UNIT_SERIAL_NUMBER: 10045 retval = ctl_inquiry_evpd_serial(ctsio, alloc_len); 10046 break; 10047 case SVPD_DEVICE_ID: 10048 retval = ctl_inquiry_evpd_devid(ctsio, alloc_len); 10049 break; 10050 case SVPD_EXTENDED_INQUIRY_DATA: 10051 retval = ctl_inquiry_evpd_eid(ctsio, alloc_len); 10052 break; 10053 case SVPD_MODE_PAGE_POLICY: 10054 retval = ctl_inquiry_evpd_mpp(ctsio, alloc_len); 10055 break; 10056 case SVPD_SCSI_PORTS: 10057 retval = ctl_inquiry_evpd_scsi_ports(ctsio, alloc_len); 10058 break; 10059 case SVPD_SCSI_TPC: 10060 retval = ctl_inquiry_evpd_tpc(ctsio, alloc_len); 10061 break; 10062 case SVPD_SCSI_SFS: 10063 retval = ctl_inquiry_evpd_sfs(ctsio, alloc_len); 10064 break; 10065 case SVPD_BLOCK_LIMITS: 10066 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 10067 goto err; 10068 retval = ctl_inquiry_evpd_block_limits(ctsio, alloc_len); 10069 break; 10070 case SVPD_BDC: 10071 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 10072 goto err; 10073 retval = ctl_inquiry_evpd_bdc(ctsio, alloc_len); 10074 break; 10075 case SVPD_LBP: 10076 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 10077 goto err; 10078 retval = ctl_inquiry_evpd_lbp(ctsio, alloc_len); 10079 break; 10080 default: 10081 err: 10082 ctl_set_invalid_field(ctsio, 10083 /*sks_valid*/ 1, 10084 /*command*/ 1, 10085 /*field*/ 2, 10086 /*bit_valid*/ 0, 10087 /*bit*/ 0); 10088 ctl_done((union ctl_io *)ctsio); 10089 retval = CTL_RETVAL_COMPLETE; 10090 break; 10091 } 10092 10093 return (retval); 10094 } 10095 10096 /* 10097 * Standard INQUIRY data. 10098 */ 10099 static int 10100 ctl_inquiry_std(struct ctl_scsiio *ctsio) 10101 { 10102 struct ctl_softc *softc = CTL_SOFTC(ctsio); 10103 struct ctl_port *port = CTL_PORT(ctsio); 10104 struct ctl_lun *lun = CTL_LUN(ctsio); 10105 struct scsi_inquiry_data *inq_ptr; 10106 struct scsi_inquiry *cdb; 10107 const char *val; 10108 uint32_t alloc_len, data_len; 10109 ctl_port_type port_type; 10110 10111 port_type = port->port_type; 10112 if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL) 10113 port_type = CTL_PORT_SCSI; 10114 10115 cdb = (struct scsi_inquiry *)ctsio->cdb; 10116 alloc_len = scsi_2btoul(cdb->length); 10117 10118 /* 10119 * We malloc the full inquiry data size here and fill it 10120 * in. If the user only asks for less, we'll give him 10121 * that much. 10122 */ 10123 data_len = offsetof(struct scsi_inquiry_data, vendor_specific1); 10124 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10125 inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr; 10126 ctsio->kern_sg_entries = 0; 10127 ctsio->kern_rel_offset = 0; 10128 ctsio->kern_data_len = min(data_len, alloc_len); 10129 ctsio->kern_total_len = ctsio->kern_data_len; 10130 10131 if (lun != NULL) { 10132 if ((lun->flags & CTL_LUN_PRIMARY_SC) || 10133 softc->ha_link >= CTL_HA_LINK_UNKNOWN) { 10134 inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10135 lun->be_lun->lun_type; 10136 } else { 10137 inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | 10138 lun->be_lun->lun_type; 10139 } 10140 if (lun->flags & CTL_LUN_REMOVABLE) 10141 inq_ptr->dev_qual2 |= SID_RMB; 10142 } else 10143 inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE; 10144 10145 /* RMB in byte 2 is 0 */ 10146 inq_ptr->version = SCSI_REV_SPC5; 10147 10148 /* 10149 * According to SAM-3, even if a device only supports a single 10150 * level of LUN addressing, it should still set the HISUP bit: 10151 * 10152 * 4.9.1 Logical unit numbers overview 10153 * 10154 * All logical unit number formats described in this standard are 10155 * hierarchical in structure even when only a single level in that 10156 * hierarchy is used. The HISUP bit shall be set to one in the 10157 * standard INQUIRY data (see SPC-2) when any logical unit number 10158 * format described in this standard is used. Non-hierarchical 10159 * formats are outside the scope of this standard. 10160 * 10161 * Therefore we set the HiSup bit here. 10162 * 10163 * The response format is 2, per SPC-3. 10164 */ 10165 inq_ptr->response_format = SID_HiSup | 2; 10166 10167 inq_ptr->additional_length = data_len - 10168 (offsetof(struct scsi_inquiry_data, additional_length) + 1); 10169 CTL_DEBUG_PRINT(("additional_length = %d\n", 10170 inq_ptr->additional_length)); 10171 10172 inq_ptr->spc3_flags = SPC3_SID_3PC | SPC3_SID_TPGS_IMPLICIT; 10173 if (port_type == CTL_PORT_SCSI) 10174 inq_ptr->spc2_flags = SPC2_SID_ADDR16; 10175 inq_ptr->spc2_flags |= SPC2_SID_MultiP; 10176 inq_ptr->flags = SID_CmdQue; 10177 if (port_type == CTL_PORT_SCSI) 10178 inq_ptr->flags |= SID_WBus16 | SID_Sync; 10179 10180 /* 10181 * Per SPC-3, unused bytes in ASCII strings are filled with spaces. 10182 * We have 8 bytes for the vendor name, and 16 bytes for the device 10183 * name and 4 bytes for the revision. 10184 */ 10185 if (lun == NULL || (val = dnvlist_get_string(lun->be_lun->options, 10186 "vendor", NULL)) == NULL) { 10187 strncpy(inq_ptr->vendor, CTL_VENDOR, sizeof(inq_ptr->vendor)); 10188 } else { 10189 memset(inq_ptr->vendor, ' ', sizeof(inq_ptr->vendor)); 10190 strncpy(inq_ptr->vendor, val, 10191 min(sizeof(inq_ptr->vendor), strlen(val))); 10192 } 10193 if (lun == NULL) { 10194 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10195 sizeof(inq_ptr->product)); 10196 } else if ((val = dnvlist_get_string(lun->be_lun->options, "product", 10197 NULL)) == NULL) { 10198 switch (lun->be_lun->lun_type) { 10199 case T_DIRECT: 10200 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10201 sizeof(inq_ptr->product)); 10202 break; 10203 case T_PROCESSOR: 10204 strncpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT, 10205 sizeof(inq_ptr->product)); 10206 break; 10207 case T_CDROM: 10208 strncpy(inq_ptr->product, CTL_CDROM_PRODUCT, 10209 sizeof(inq_ptr->product)); 10210 break; 10211 default: 10212 strncpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT, 10213 sizeof(inq_ptr->product)); 10214 break; 10215 } 10216 } else { 10217 memset(inq_ptr->product, ' ', sizeof(inq_ptr->product)); 10218 strncpy(inq_ptr->product, val, 10219 min(sizeof(inq_ptr->product), strlen(val))); 10220 } 10221 10222 /* 10223 * XXX make this a macro somewhere so it automatically gets 10224 * incremented when we make changes. 10225 */ 10226 if (lun == NULL || (val = dnvlist_get_string(lun->be_lun->options, 10227 "revision", NULL)) == NULL) { 10228 strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision)); 10229 } else { 10230 memset(inq_ptr->revision, ' ', sizeof(inq_ptr->revision)); 10231 strncpy(inq_ptr->revision, val, 10232 min(sizeof(inq_ptr->revision), strlen(val))); 10233 } 10234 10235 /* 10236 * For parallel SCSI, we support double transition and single 10237 * transition clocking. We also support QAS (Quick Arbitration 10238 * and Selection) and Information Unit transfers on both the 10239 * control and array devices. 10240 */ 10241 if (port_type == CTL_PORT_SCSI) 10242 inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS | 10243 SID_SPI_IUS; 10244 10245 /* SAM-6 (no version claimed) */ 10246 scsi_ulto2b(0x00C0, inq_ptr->version1); 10247 /* SPC-5 (no version claimed) */ 10248 scsi_ulto2b(0x05C0, inq_ptr->version2); 10249 if (port_type == CTL_PORT_FC) { 10250 /* FCP-2 ANSI INCITS.350:2003 */ 10251 scsi_ulto2b(0x0917, inq_ptr->version3); 10252 } else if (port_type == CTL_PORT_SCSI) { 10253 /* SPI-4 ANSI INCITS.362:200x */ 10254 scsi_ulto2b(0x0B56, inq_ptr->version3); 10255 } else if (port_type == CTL_PORT_ISCSI) { 10256 /* iSCSI (no version claimed) */ 10257 scsi_ulto2b(0x0960, inq_ptr->version3); 10258 } else if (port_type == CTL_PORT_SAS) { 10259 /* SAS (no version claimed) */ 10260 scsi_ulto2b(0x0BE0, inq_ptr->version3); 10261 } else if (port_type == CTL_PORT_UMASS) { 10262 /* USB Mass Storage Class Bulk-Only Transport, Revision 1.0 */ 10263 scsi_ulto2b(0x1730, inq_ptr->version3); 10264 } 10265 10266 if (lun == NULL) { 10267 /* SBC-4 (no version claimed) */ 10268 scsi_ulto2b(0x0600, inq_ptr->version4); 10269 } else { 10270 switch (lun->be_lun->lun_type) { 10271 case T_DIRECT: 10272 /* SBC-4 (no version claimed) */ 10273 scsi_ulto2b(0x0600, inq_ptr->version4); 10274 break; 10275 case T_PROCESSOR: 10276 break; 10277 case T_CDROM: 10278 /* MMC-6 (no version claimed) */ 10279 scsi_ulto2b(0x04E0, inq_ptr->version4); 10280 break; 10281 default: 10282 break; 10283 } 10284 } 10285 10286 ctl_set_success(ctsio); 10287 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10288 ctsio->be_move_done = ctl_config_move_done; 10289 ctl_datamove((union ctl_io *)ctsio); 10290 return (CTL_RETVAL_COMPLETE); 10291 } 10292 10293 int 10294 ctl_inquiry(struct ctl_scsiio *ctsio) 10295 { 10296 struct scsi_inquiry *cdb; 10297 int retval; 10298 10299 CTL_DEBUG_PRINT(("ctl_inquiry\n")); 10300 10301 cdb = (struct scsi_inquiry *)ctsio->cdb; 10302 if (cdb->byte2 & SI_EVPD) 10303 retval = ctl_inquiry_evpd(ctsio); 10304 else if (cdb->page_code == 0) 10305 retval = ctl_inquiry_std(ctsio); 10306 else { 10307 ctl_set_invalid_field(ctsio, 10308 /*sks_valid*/ 1, 10309 /*command*/ 1, 10310 /*field*/ 2, 10311 /*bit_valid*/ 0, 10312 /*bit*/ 0); 10313 ctl_done((union ctl_io *)ctsio); 10314 return (CTL_RETVAL_COMPLETE); 10315 } 10316 10317 return (retval); 10318 } 10319 10320 int 10321 ctl_get_config(struct ctl_scsiio *ctsio) 10322 { 10323 struct ctl_lun *lun = CTL_LUN(ctsio); 10324 struct scsi_get_config_header *hdr; 10325 struct scsi_get_config_feature *feature; 10326 struct scsi_get_config *cdb; 10327 uint32_t alloc_len, data_len; 10328 int rt, starting; 10329 10330 cdb = (struct scsi_get_config *)ctsio->cdb; 10331 rt = (cdb->rt & SGC_RT_MASK); 10332 starting = scsi_2btoul(cdb->starting_feature); 10333 alloc_len = scsi_2btoul(cdb->length); 10334 10335 data_len = sizeof(struct scsi_get_config_header) + 10336 sizeof(struct scsi_get_config_feature) + 8 + 10337 sizeof(struct scsi_get_config_feature) + 8 + 10338 sizeof(struct scsi_get_config_feature) + 4 + 10339 sizeof(struct scsi_get_config_feature) + 4 + 10340 sizeof(struct scsi_get_config_feature) + 8 + 10341 sizeof(struct scsi_get_config_feature) + 10342 sizeof(struct scsi_get_config_feature) + 4 + 10343 sizeof(struct scsi_get_config_feature) + 4 + 10344 sizeof(struct scsi_get_config_feature) + 4 + 10345 sizeof(struct scsi_get_config_feature) + 4 + 10346 sizeof(struct scsi_get_config_feature) + 4 + 10347 sizeof(struct scsi_get_config_feature) + 4; 10348 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10349 ctsio->kern_sg_entries = 0; 10350 ctsio->kern_rel_offset = 0; 10351 10352 hdr = (struct scsi_get_config_header *)ctsio->kern_data_ptr; 10353 if (lun->flags & CTL_LUN_NO_MEDIA) 10354 scsi_ulto2b(0x0000, hdr->current_profile); 10355 else 10356 scsi_ulto2b(0x0010, hdr->current_profile); 10357 feature = (struct scsi_get_config_feature *)(hdr + 1); 10358 10359 if (starting > 0x003b) 10360 goto done; 10361 if (starting > 0x003a) 10362 goto f3b; 10363 if (starting > 0x002b) 10364 goto f3a; 10365 if (starting > 0x002a) 10366 goto f2b; 10367 if (starting > 0x001f) 10368 goto f2a; 10369 if (starting > 0x001e) 10370 goto f1f; 10371 if (starting > 0x001d) 10372 goto f1e; 10373 if (starting > 0x0010) 10374 goto f1d; 10375 if (starting > 0x0003) 10376 goto f10; 10377 if (starting > 0x0002) 10378 goto f3; 10379 if (starting > 0x0001) 10380 goto f2; 10381 if (starting > 0x0000) 10382 goto f1; 10383 10384 /* Profile List */ 10385 scsi_ulto2b(0x0000, feature->feature_code); 10386 feature->flags = SGC_F_PERSISTENT | SGC_F_CURRENT; 10387 feature->add_length = 8; 10388 scsi_ulto2b(0x0008, &feature->feature_data[0]); /* CD-ROM */ 10389 feature->feature_data[2] = 0x00; 10390 scsi_ulto2b(0x0010, &feature->feature_data[4]); /* DVD-ROM */ 10391 feature->feature_data[6] = 0x01; 10392 feature = (struct scsi_get_config_feature *) 10393 &feature->feature_data[feature->add_length]; 10394 10395 f1: /* Core */ 10396 scsi_ulto2b(0x0001, feature->feature_code); 10397 feature->flags = 0x08 | SGC_F_PERSISTENT | SGC_F_CURRENT; 10398 feature->add_length = 8; 10399 scsi_ulto4b(0x00000000, &feature->feature_data[0]); 10400 feature->feature_data[4] = 0x03; 10401 feature = (struct scsi_get_config_feature *) 10402 &feature->feature_data[feature->add_length]; 10403 10404 f2: /* Morphing */ 10405 scsi_ulto2b(0x0002, feature->feature_code); 10406 feature->flags = 0x04 | SGC_F_PERSISTENT | SGC_F_CURRENT; 10407 feature->add_length = 4; 10408 feature->feature_data[0] = 0x02; 10409 feature = (struct scsi_get_config_feature *) 10410 &feature->feature_data[feature->add_length]; 10411 10412 f3: /* Removable Medium */ 10413 scsi_ulto2b(0x0003, feature->feature_code); 10414 feature->flags = 0x04 | SGC_F_PERSISTENT | SGC_F_CURRENT; 10415 feature->add_length = 4; 10416 feature->feature_data[0] = 0x39; 10417 feature = (struct scsi_get_config_feature *) 10418 &feature->feature_data[feature->add_length]; 10419 10420 if (rt == SGC_RT_CURRENT && (lun->flags & CTL_LUN_NO_MEDIA)) 10421 goto done; 10422 10423 f10: /* Random Read */ 10424 scsi_ulto2b(0x0010, feature->feature_code); 10425 feature->flags = 0x00; 10426 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10427 feature->flags |= SGC_F_CURRENT; 10428 feature->add_length = 8; 10429 scsi_ulto4b(lun->be_lun->blocksize, &feature->feature_data[0]); 10430 scsi_ulto2b(1, &feature->feature_data[4]); 10431 feature->feature_data[6] = 0x00; 10432 feature = (struct scsi_get_config_feature *) 10433 &feature->feature_data[feature->add_length]; 10434 10435 f1d: /* Multi-Read */ 10436 scsi_ulto2b(0x001D, feature->feature_code); 10437 feature->flags = 0x00; 10438 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10439 feature->flags |= SGC_F_CURRENT; 10440 feature->add_length = 0; 10441 feature = (struct scsi_get_config_feature *) 10442 &feature->feature_data[feature->add_length]; 10443 10444 f1e: /* CD Read */ 10445 scsi_ulto2b(0x001E, feature->feature_code); 10446 feature->flags = 0x00; 10447 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10448 feature->flags |= SGC_F_CURRENT; 10449 feature->add_length = 4; 10450 feature->feature_data[0] = 0x00; 10451 feature = (struct scsi_get_config_feature *) 10452 &feature->feature_data[feature->add_length]; 10453 10454 f1f: /* DVD Read */ 10455 scsi_ulto2b(0x001F, feature->feature_code); 10456 feature->flags = 0x08; 10457 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10458 feature->flags |= SGC_F_CURRENT; 10459 feature->add_length = 4; 10460 feature->feature_data[0] = 0x01; 10461 feature->feature_data[2] = 0x03; 10462 feature = (struct scsi_get_config_feature *) 10463 &feature->feature_data[feature->add_length]; 10464 10465 f2a: /* DVD+RW */ 10466 scsi_ulto2b(0x002A, feature->feature_code); 10467 feature->flags = 0x04; 10468 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10469 feature->flags |= SGC_F_CURRENT; 10470 feature->add_length = 4; 10471 feature->feature_data[0] = 0x00; 10472 feature->feature_data[1] = 0x00; 10473 feature = (struct scsi_get_config_feature *) 10474 &feature->feature_data[feature->add_length]; 10475 10476 f2b: /* DVD+R */ 10477 scsi_ulto2b(0x002B, feature->feature_code); 10478 feature->flags = 0x00; 10479 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10480 feature->flags |= SGC_F_CURRENT; 10481 feature->add_length = 4; 10482 feature->feature_data[0] = 0x00; 10483 feature = (struct scsi_get_config_feature *) 10484 &feature->feature_data[feature->add_length]; 10485 10486 f3a: /* DVD+RW Dual Layer */ 10487 scsi_ulto2b(0x003A, feature->feature_code); 10488 feature->flags = 0x00; 10489 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10490 feature->flags |= SGC_F_CURRENT; 10491 feature->add_length = 4; 10492 feature->feature_data[0] = 0x00; 10493 feature->feature_data[1] = 0x00; 10494 feature = (struct scsi_get_config_feature *) 10495 &feature->feature_data[feature->add_length]; 10496 10497 f3b: /* DVD+R Dual Layer */ 10498 scsi_ulto2b(0x003B, feature->feature_code); 10499 feature->flags = 0x00; 10500 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10501 feature->flags |= SGC_F_CURRENT; 10502 feature->add_length = 4; 10503 feature->feature_data[0] = 0x00; 10504 feature = (struct scsi_get_config_feature *) 10505 &feature->feature_data[feature->add_length]; 10506 10507 done: 10508 data_len = (uint8_t *)feature - (uint8_t *)hdr; 10509 if (rt == SGC_RT_SPECIFIC && data_len > 4) { 10510 feature = (struct scsi_get_config_feature *)(hdr + 1); 10511 if (scsi_2btoul(feature->feature_code) == starting) 10512 feature = (struct scsi_get_config_feature *) 10513 &feature->feature_data[feature->add_length]; 10514 data_len = (uint8_t *)feature - (uint8_t *)hdr; 10515 } 10516 scsi_ulto4b(data_len - 4, hdr->data_length); 10517 ctsio->kern_data_len = min(data_len, alloc_len); 10518 ctsio->kern_total_len = ctsio->kern_data_len; 10519 10520 ctl_set_success(ctsio); 10521 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10522 ctsio->be_move_done = ctl_config_move_done; 10523 ctl_datamove((union ctl_io *)ctsio); 10524 return (CTL_RETVAL_COMPLETE); 10525 } 10526 10527 int 10528 ctl_get_event_status(struct ctl_scsiio *ctsio) 10529 { 10530 struct scsi_get_event_status_header *hdr; 10531 struct scsi_get_event_status *cdb; 10532 uint32_t alloc_len, data_len; 10533 10534 cdb = (struct scsi_get_event_status *)ctsio->cdb; 10535 if ((cdb->byte2 & SGESN_POLLED) == 0) { 10536 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, 10537 /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); 10538 ctl_done((union ctl_io *)ctsio); 10539 return (CTL_RETVAL_COMPLETE); 10540 } 10541 alloc_len = scsi_2btoul(cdb->length); 10542 10543 data_len = sizeof(struct scsi_get_event_status_header); 10544 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10545 ctsio->kern_sg_entries = 0; 10546 ctsio->kern_rel_offset = 0; 10547 ctsio->kern_data_len = min(data_len, alloc_len); 10548 ctsio->kern_total_len = ctsio->kern_data_len; 10549 10550 hdr = (struct scsi_get_event_status_header *)ctsio->kern_data_ptr; 10551 scsi_ulto2b(0, hdr->descr_length); 10552 hdr->nea_class = SGESN_NEA; 10553 hdr->supported_class = 0; 10554 10555 ctl_set_success(ctsio); 10556 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10557 ctsio->be_move_done = ctl_config_move_done; 10558 ctl_datamove((union ctl_io *)ctsio); 10559 return (CTL_RETVAL_COMPLETE); 10560 } 10561 10562 int 10563 ctl_mechanism_status(struct ctl_scsiio *ctsio) 10564 { 10565 struct scsi_mechanism_status_header *hdr; 10566 struct scsi_mechanism_status *cdb; 10567 uint32_t alloc_len, data_len; 10568 10569 cdb = (struct scsi_mechanism_status *)ctsio->cdb; 10570 alloc_len = scsi_2btoul(cdb->length); 10571 10572 data_len = sizeof(struct scsi_mechanism_status_header); 10573 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10574 ctsio->kern_sg_entries = 0; 10575 ctsio->kern_rel_offset = 0; 10576 ctsio->kern_data_len = min(data_len, alloc_len); 10577 ctsio->kern_total_len = ctsio->kern_data_len; 10578 10579 hdr = (struct scsi_mechanism_status_header *)ctsio->kern_data_ptr; 10580 hdr->state1 = 0x00; 10581 hdr->state2 = 0xe0; 10582 scsi_ulto3b(0, hdr->lba); 10583 hdr->slots_num = 0; 10584 scsi_ulto2b(0, hdr->slots_length); 10585 10586 ctl_set_success(ctsio); 10587 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10588 ctsio->be_move_done = ctl_config_move_done; 10589 ctl_datamove((union ctl_io *)ctsio); 10590 return (CTL_RETVAL_COMPLETE); 10591 } 10592 10593 static void 10594 ctl_ultomsf(uint32_t lba, uint8_t *buf) 10595 { 10596 10597 lba += 150; 10598 buf[0] = 0; 10599 buf[1] = bin2bcd((lba / 75) / 60); 10600 buf[2] = bin2bcd((lba / 75) % 60); 10601 buf[3] = bin2bcd(lba % 75); 10602 } 10603 10604 int 10605 ctl_read_toc(struct ctl_scsiio *ctsio) 10606 { 10607 struct ctl_lun *lun = CTL_LUN(ctsio); 10608 struct scsi_read_toc_hdr *hdr; 10609 struct scsi_read_toc_type01_descr *descr; 10610 struct scsi_read_toc *cdb; 10611 uint32_t alloc_len, data_len; 10612 int format, msf; 10613 10614 cdb = (struct scsi_read_toc *)ctsio->cdb; 10615 msf = (cdb->byte2 & CD_MSF) != 0; 10616 format = cdb->format; 10617 alloc_len = scsi_2btoul(cdb->data_len); 10618 10619 data_len = sizeof(struct scsi_read_toc_hdr); 10620 if (format == 0) 10621 data_len += 2 * sizeof(struct scsi_read_toc_type01_descr); 10622 else 10623 data_len += sizeof(struct scsi_read_toc_type01_descr); 10624 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10625 ctsio->kern_sg_entries = 0; 10626 ctsio->kern_rel_offset = 0; 10627 ctsio->kern_data_len = min(data_len, alloc_len); 10628 ctsio->kern_total_len = ctsio->kern_data_len; 10629 10630 hdr = (struct scsi_read_toc_hdr *)ctsio->kern_data_ptr; 10631 if (format == 0) { 10632 scsi_ulto2b(0x12, hdr->data_length); 10633 hdr->first = 1; 10634 hdr->last = 1; 10635 descr = (struct scsi_read_toc_type01_descr *)(hdr + 1); 10636 descr->addr_ctl = 0x14; 10637 descr->track_number = 1; 10638 if (msf) 10639 ctl_ultomsf(0, descr->track_start); 10640 else 10641 scsi_ulto4b(0, descr->track_start); 10642 descr++; 10643 descr->addr_ctl = 0x14; 10644 descr->track_number = 0xaa; 10645 if (msf) 10646 ctl_ultomsf(lun->be_lun->maxlba+1, descr->track_start); 10647 else 10648 scsi_ulto4b(lun->be_lun->maxlba+1, descr->track_start); 10649 } else { 10650 scsi_ulto2b(0x0a, hdr->data_length); 10651 hdr->first = 1; 10652 hdr->last = 1; 10653 descr = (struct scsi_read_toc_type01_descr *)(hdr + 1); 10654 descr->addr_ctl = 0x14; 10655 descr->track_number = 1; 10656 if (msf) 10657 ctl_ultomsf(0, descr->track_start); 10658 else 10659 scsi_ulto4b(0, descr->track_start); 10660 } 10661 10662 ctl_set_success(ctsio); 10663 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10664 ctsio->be_move_done = ctl_config_move_done; 10665 ctl_datamove((union ctl_io *)ctsio); 10666 return (CTL_RETVAL_COMPLETE); 10667 } 10668 10669 /* 10670 * For known CDB types, parse the LBA and length. 10671 */ 10672 static int 10673 ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len) 10674 { 10675 if (io->io_hdr.io_type != CTL_IO_SCSI) 10676 return (1); 10677 10678 switch (io->scsiio.cdb[0]) { 10679 case COMPARE_AND_WRITE: { 10680 struct scsi_compare_and_write *cdb; 10681 10682 cdb = (struct scsi_compare_and_write *)io->scsiio.cdb; 10683 10684 *lba = scsi_8btou64(cdb->addr); 10685 *len = cdb->length; 10686 break; 10687 } 10688 case READ_6: 10689 case WRITE_6: { 10690 struct scsi_rw_6 *cdb; 10691 10692 cdb = (struct scsi_rw_6 *)io->scsiio.cdb; 10693 10694 *lba = scsi_3btoul(cdb->addr); 10695 /* only 5 bits are valid in the most significant address byte */ 10696 *lba &= 0x1fffff; 10697 *len = cdb->length; 10698 break; 10699 } 10700 case READ_10: 10701 case WRITE_10: { 10702 struct scsi_rw_10 *cdb; 10703 10704 cdb = (struct scsi_rw_10 *)io->scsiio.cdb; 10705 10706 *lba = scsi_4btoul(cdb->addr); 10707 *len = scsi_2btoul(cdb->length); 10708 break; 10709 } 10710 case WRITE_VERIFY_10: { 10711 struct scsi_write_verify_10 *cdb; 10712 10713 cdb = (struct scsi_write_verify_10 *)io->scsiio.cdb; 10714 10715 *lba = scsi_4btoul(cdb->addr); 10716 *len = scsi_2btoul(cdb->length); 10717 break; 10718 } 10719 case READ_12: 10720 case WRITE_12: { 10721 struct scsi_rw_12 *cdb; 10722 10723 cdb = (struct scsi_rw_12 *)io->scsiio.cdb; 10724 10725 *lba = scsi_4btoul(cdb->addr); 10726 *len = scsi_4btoul(cdb->length); 10727 break; 10728 } 10729 case WRITE_VERIFY_12: { 10730 struct scsi_write_verify_12 *cdb; 10731 10732 cdb = (struct scsi_write_verify_12 *)io->scsiio.cdb; 10733 10734 *lba = scsi_4btoul(cdb->addr); 10735 *len = scsi_4btoul(cdb->length); 10736 break; 10737 } 10738 case READ_16: 10739 case WRITE_16: { 10740 struct scsi_rw_16 *cdb; 10741 10742 cdb = (struct scsi_rw_16 *)io->scsiio.cdb; 10743 10744 *lba = scsi_8btou64(cdb->addr); 10745 *len = scsi_4btoul(cdb->length); 10746 break; 10747 } 10748 case WRITE_ATOMIC_16: { 10749 struct scsi_write_atomic_16 *cdb; 10750 10751 cdb = (struct scsi_write_atomic_16 *)io->scsiio.cdb; 10752 10753 *lba = scsi_8btou64(cdb->addr); 10754 *len = scsi_2btoul(cdb->length); 10755 break; 10756 } 10757 case WRITE_VERIFY_16: { 10758 struct scsi_write_verify_16 *cdb; 10759 10760 cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb; 10761 10762 *lba = scsi_8btou64(cdb->addr); 10763 *len = scsi_4btoul(cdb->length); 10764 break; 10765 } 10766 case WRITE_SAME_10: { 10767 struct scsi_write_same_10 *cdb; 10768 10769 cdb = (struct scsi_write_same_10 *)io->scsiio.cdb; 10770 10771 *lba = scsi_4btoul(cdb->addr); 10772 *len = scsi_2btoul(cdb->length); 10773 break; 10774 } 10775 case WRITE_SAME_16: { 10776 struct scsi_write_same_16 *cdb; 10777 10778 cdb = (struct scsi_write_same_16 *)io->scsiio.cdb; 10779 10780 *lba = scsi_8btou64(cdb->addr); 10781 *len = scsi_4btoul(cdb->length); 10782 break; 10783 } 10784 case VERIFY_10: { 10785 struct scsi_verify_10 *cdb; 10786 10787 cdb = (struct scsi_verify_10 *)io->scsiio.cdb; 10788 10789 *lba = scsi_4btoul(cdb->addr); 10790 *len = scsi_2btoul(cdb->length); 10791 break; 10792 } 10793 case VERIFY_12: { 10794 struct scsi_verify_12 *cdb; 10795 10796 cdb = (struct scsi_verify_12 *)io->scsiio.cdb; 10797 10798 *lba = scsi_4btoul(cdb->addr); 10799 *len = scsi_4btoul(cdb->length); 10800 break; 10801 } 10802 case VERIFY_16: { 10803 struct scsi_verify_16 *cdb; 10804 10805 cdb = (struct scsi_verify_16 *)io->scsiio.cdb; 10806 10807 *lba = scsi_8btou64(cdb->addr); 10808 *len = scsi_4btoul(cdb->length); 10809 break; 10810 } 10811 case UNMAP: { 10812 *lba = 0; 10813 *len = UINT64_MAX; 10814 break; 10815 } 10816 case SERVICE_ACTION_IN: { /* GET LBA STATUS */ 10817 struct scsi_get_lba_status *cdb; 10818 10819 cdb = (struct scsi_get_lba_status *)io->scsiio.cdb; 10820 *lba = scsi_8btou64(cdb->addr); 10821 *len = UINT32_MAX; 10822 break; 10823 } 10824 default: 10825 return (1); 10826 break; /* NOTREACHED */ 10827 } 10828 10829 return (0); 10830 } 10831 10832 static ctl_action 10833 ctl_extent_check_lba(uint64_t lba1, uint64_t len1, uint64_t lba2, uint64_t len2, 10834 bool seq) 10835 { 10836 uint64_t endlba1, endlba2; 10837 10838 endlba1 = lba1 + len1 - (seq ? 0 : 1); 10839 endlba2 = lba2 + len2 - 1; 10840 10841 if ((endlba1 < lba2) || (endlba2 < lba1)) 10842 return (CTL_ACTION_PASS); 10843 else 10844 return (CTL_ACTION_BLOCK); 10845 } 10846 10847 static int 10848 ctl_extent_check_unmap(union ctl_io *io, uint64_t lba2, uint64_t len2) 10849 { 10850 struct ctl_ptr_len_flags *ptrlen; 10851 struct scsi_unmap_desc *buf, *end, *range; 10852 uint64_t lba; 10853 uint32_t len; 10854 10855 /* If not UNMAP -- go other way. */ 10856 if (io->io_hdr.io_type != CTL_IO_SCSI || 10857 io->scsiio.cdb[0] != UNMAP) 10858 return (CTL_ACTION_ERROR); 10859 10860 /* If UNMAP without data -- block and wait for data. */ 10861 ptrlen = (struct ctl_ptr_len_flags *) 10862 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 10863 if ((io->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0 || 10864 ptrlen->ptr == NULL) 10865 return (CTL_ACTION_BLOCK); 10866 10867 /* UNMAP with data -- check for collision. */ 10868 buf = (struct scsi_unmap_desc *)ptrlen->ptr; 10869 end = buf + ptrlen->len / sizeof(*buf); 10870 for (range = buf; range < end; range++) { 10871 lba = scsi_8btou64(range->lba); 10872 len = scsi_4btoul(range->length); 10873 if ((lba < lba2 + len2) && (lba + len > lba2)) 10874 return (CTL_ACTION_BLOCK); 10875 } 10876 return (CTL_ACTION_PASS); 10877 } 10878 10879 static ctl_action 10880 ctl_extent_check(union ctl_io *io1, union ctl_io *io2, bool seq) 10881 { 10882 uint64_t lba1, lba2; 10883 uint64_t len1, len2; 10884 int retval; 10885 10886 if (ctl_get_lba_len(io2, &lba2, &len2) != 0) 10887 return (CTL_ACTION_ERROR); 10888 10889 retval = ctl_extent_check_unmap(io1, lba2, len2); 10890 if (retval != CTL_ACTION_ERROR) 10891 return (retval); 10892 10893 if (ctl_get_lba_len(io1, &lba1, &len1) != 0) 10894 return (CTL_ACTION_ERROR); 10895 10896 if (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE) 10897 seq = FALSE; 10898 return (ctl_extent_check_lba(lba1, len1, lba2, len2, seq)); 10899 } 10900 10901 static ctl_action 10902 ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2) 10903 { 10904 uint64_t lba1, lba2; 10905 uint64_t len1, len2; 10906 10907 if (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE) 10908 return (CTL_ACTION_PASS); 10909 if (ctl_get_lba_len(io1, &lba1, &len1) != 0) 10910 return (CTL_ACTION_ERROR); 10911 if (ctl_get_lba_len(io2, &lba2, &len2) != 0) 10912 return (CTL_ACTION_ERROR); 10913 10914 if (lba1 + len1 == lba2) 10915 return (CTL_ACTION_BLOCK); 10916 return (CTL_ACTION_PASS); 10917 } 10918 10919 static ctl_action 10920 ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io, 10921 union ctl_io *ooa_io) 10922 { 10923 const struct ctl_cmd_entry *pending_entry, *ooa_entry; 10924 const ctl_serialize_action *serialize_row; 10925 10926 /* 10927 * Aborted commands are not going to be executed and may even 10928 * not report completion, so we don't care about their order. 10929 * Let them complete ASAP to clean the OOA queue. 10930 */ 10931 if (pending_io->io_hdr.flags & CTL_FLAG_ABORT) 10932 return (CTL_ACTION_SKIP); 10933 10934 /* 10935 * The initiator attempted multiple untagged commands at the same 10936 * time. Can't do that. 10937 */ 10938 if ((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10939 && (ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10940 && ((pending_io->io_hdr.nexus.targ_port == 10941 ooa_io->io_hdr.nexus.targ_port) 10942 && (pending_io->io_hdr.nexus.initid == 10943 ooa_io->io_hdr.nexus.initid)) 10944 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | 10945 CTL_FLAG_STATUS_SENT)) == 0)) 10946 return (CTL_ACTION_OVERLAP); 10947 10948 /* 10949 * The initiator attempted to send multiple tagged commands with 10950 * the same ID. (It's fine if different initiators have the same 10951 * tag ID.) 10952 * 10953 * Even if all of those conditions are true, we don't kill the I/O 10954 * if the command ahead of us has been aborted. We won't end up 10955 * sending it to the FETD, and it's perfectly legal to resend a 10956 * command with the same tag number as long as the previous 10957 * instance of this tag number has been aborted somehow. 10958 */ 10959 if ((pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 10960 && (ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 10961 && (pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num) 10962 && ((pending_io->io_hdr.nexus.targ_port == 10963 ooa_io->io_hdr.nexus.targ_port) 10964 && (pending_io->io_hdr.nexus.initid == 10965 ooa_io->io_hdr.nexus.initid)) 10966 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | 10967 CTL_FLAG_STATUS_SENT)) == 0)) 10968 return (CTL_ACTION_OVERLAP_TAG); 10969 10970 /* 10971 * If we get a head of queue tag, SAM-3 says that we should 10972 * immediately execute it. 10973 * 10974 * What happens if this command would normally block for some other 10975 * reason? e.g. a request sense with a head of queue tag 10976 * immediately after a write. Normally that would block, but this 10977 * will result in its getting executed immediately... 10978 * 10979 * We currently return "pass" instead of "skip", so we'll end up 10980 * going through the rest of the queue to check for overlapped tags. 10981 * 10982 * XXX KDM check for other types of blockage first?? 10983 */ 10984 if (pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 10985 return (CTL_ACTION_PASS); 10986 10987 /* 10988 * Ordered tags have to block until all items ahead of them 10989 * have completed. If we get called with an ordered tag, we always 10990 * block, if something else is ahead of us in the queue. 10991 */ 10992 if (pending_io->scsiio.tag_type == CTL_TAG_ORDERED) 10993 return (CTL_ACTION_BLOCK); 10994 10995 /* 10996 * Simple tags get blocked until all head of queue and ordered tags 10997 * ahead of them have completed. I'm lumping untagged commands in 10998 * with simple tags here. XXX KDM is that the right thing to do? 10999 */ 11000 if (((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 11001 || (pending_io->scsiio.tag_type == CTL_TAG_SIMPLE)) 11002 && ((ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 11003 || (ooa_io->scsiio.tag_type == CTL_TAG_ORDERED))) 11004 return (CTL_ACTION_BLOCK); 11005 11006 pending_entry = ctl_get_cmd_entry(&pending_io->scsiio, NULL); 11007 KASSERT(pending_entry->seridx < CTL_SERIDX_COUNT, 11008 ("%s: Invalid seridx %d for pending CDB %02x %02x @ %p", 11009 __func__, pending_entry->seridx, pending_io->scsiio.cdb[0], 11010 pending_io->scsiio.cdb[1], pending_io)); 11011 ooa_entry = ctl_get_cmd_entry(&ooa_io->scsiio, NULL); 11012 if (ooa_entry->seridx == CTL_SERIDX_INVLD) 11013 return (CTL_ACTION_PASS); /* Unsupported command in OOA queue */ 11014 KASSERT(ooa_entry->seridx < CTL_SERIDX_COUNT, 11015 ("%s: Invalid seridx %d for ooa CDB %02x %02x @ %p", 11016 __func__, ooa_entry->seridx, ooa_io->scsiio.cdb[0], 11017 ooa_io->scsiio.cdb[1], ooa_io)); 11018 11019 serialize_row = ctl_serialize_table[ooa_entry->seridx]; 11020 11021 switch (serialize_row[pending_entry->seridx]) { 11022 case CTL_SER_BLOCK: 11023 return (CTL_ACTION_BLOCK); 11024 case CTL_SER_EXTENT: 11025 return (ctl_extent_check(ooa_io, pending_io, 11026 (lun->be_lun && lun->be_lun->serseq == CTL_LUN_SERSEQ_ON))); 11027 case CTL_SER_EXTENTOPT: 11028 if ((lun->MODE_CTRL.queue_flags & SCP_QUEUE_ALG_MASK) != 11029 SCP_QUEUE_ALG_UNRESTRICTED) 11030 return (ctl_extent_check(ooa_io, pending_io, 11031 (lun->be_lun && 11032 lun->be_lun->serseq == CTL_LUN_SERSEQ_ON))); 11033 return (CTL_ACTION_PASS); 11034 case CTL_SER_EXTENTSEQ: 11035 if (lun->be_lun && lun->be_lun->serseq != CTL_LUN_SERSEQ_OFF) 11036 return (ctl_extent_check_seq(ooa_io, pending_io)); 11037 return (CTL_ACTION_PASS); 11038 case CTL_SER_PASS: 11039 return (CTL_ACTION_PASS); 11040 case CTL_SER_BLOCKOPT: 11041 if ((lun->MODE_CTRL.queue_flags & SCP_QUEUE_ALG_MASK) != 11042 SCP_QUEUE_ALG_UNRESTRICTED) 11043 return (CTL_ACTION_BLOCK); 11044 return (CTL_ACTION_PASS); 11045 case CTL_SER_SKIP: 11046 return (CTL_ACTION_SKIP); 11047 default: 11048 panic("%s: Invalid serialization value %d for %d => %d", 11049 __func__, serialize_row[pending_entry->seridx], 11050 pending_entry->seridx, ooa_entry->seridx); 11051 } 11052 11053 return (CTL_ACTION_ERROR); 11054 } 11055 11056 /* 11057 * Check for blockage or overlaps against the OOA (Order Of Arrival) queue. 11058 * Assumptions: 11059 * - pending_io is generally either incoming, or on the blocked queue 11060 * - starting I/O is the I/O we want to start the check with. 11061 */ 11062 static ctl_action 11063 ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 11064 union ctl_io **starting_io) 11065 { 11066 union ctl_io *ooa_io; 11067 ctl_action action; 11068 11069 mtx_assert(&lun->lun_lock, MA_OWNED); 11070 11071 /* 11072 * Run back along the OOA queue, starting with the current 11073 * blocked I/O and going through every I/O before it on the 11074 * queue. If starting_io is NULL, we'll just end up returning 11075 * CTL_ACTION_PASS. 11076 */ 11077 for (ooa_io = *starting_io; ooa_io != NULL; 11078 ooa_io = (union ctl_io *)TAILQ_PREV(&ooa_io->io_hdr, ctl_ooaq, 11079 ooa_links)){ 11080 action = ctl_check_for_blockage(lun, pending_io, ooa_io); 11081 if (action != CTL_ACTION_PASS) { 11082 *starting_io = ooa_io; 11083 return (action); 11084 } 11085 } 11086 11087 *starting_io = NULL; 11088 return (CTL_ACTION_PASS); 11089 } 11090 11091 /* 11092 * Try to unblock the specified I/O. 11093 * 11094 * skip parameter allows explicitly skip present blocker of the I/O, 11095 * starting from the previous one on OOA queue. It can be used when 11096 * we know for sure that the blocker I/O does no longer count. 11097 */ 11098 static void 11099 ctl_try_unblock_io(struct ctl_lun *lun, union ctl_io *io, bool skip) 11100 { 11101 struct ctl_softc *softc = lun->ctl_softc; 11102 union ctl_io *bio, *obio; 11103 const struct ctl_cmd_entry *entry; 11104 union ctl_ha_msg msg_info; 11105 ctl_action action; 11106 11107 mtx_assert(&lun->lun_lock, MA_OWNED); 11108 11109 if (io->io_hdr.blocker == NULL) 11110 return; 11111 11112 obio = bio = io->io_hdr.blocker; 11113 if (skip) 11114 bio = (union ctl_io *)TAILQ_PREV(&bio->io_hdr, ctl_ooaq, 11115 ooa_links); 11116 action = ctl_check_ooa(lun, io, &bio); 11117 if (action == CTL_ACTION_BLOCK) { 11118 /* Still blocked, but may be by different I/O now. */ 11119 if (bio != obio) { 11120 TAILQ_REMOVE(&obio->io_hdr.blocked_queue, 11121 &io->io_hdr, blocked_links); 11122 TAILQ_INSERT_TAIL(&bio->io_hdr.blocked_queue, 11123 &io->io_hdr, blocked_links); 11124 io->io_hdr.blocker = bio; 11125 } 11126 return; 11127 } 11128 11129 /* No longer blocked, one way or another. */ 11130 TAILQ_REMOVE(&obio->io_hdr.blocked_queue, &io->io_hdr, blocked_links); 11131 io->io_hdr.blocker = NULL; 11132 11133 switch (action) { 11134 case CTL_ACTION_OVERLAP: 11135 ctl_set_overlapped_cmd(&io->scsiio); 11136 goto error; 11137 case CTL_ACTION_OVERLAP_TAG: 11138 ctl_set_overlapped_tag(&io->scsiio, 11139 io->scsiio.tag_num & 0xff); 11140 goto error; 11141 case CTL_ACTION_PASS: 11142 case CTL_ACTION_SKIP: 11143 11144 /* Serializing commands from the other SC retire there. */ 11145 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) && 11146 (softc->ha_mode != CTL_HA_MODE_XFER)) { 11147 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 11148 msg_info.hdr.original_sc = io->io_hdr.remote_io; 11149 msg_info.hdr.serializing_sc = io; 11150 msg_info.hdr.msg_type = CTL_MSG_R2R; 11151 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11152 sizeof(msg_info.hdr), M_NOWAIT); 11153 break; 11154 } 11155 11156 /* 11157 * Check this I/O for LUN state changes that may have happened 11158 * while this command was blocked. The LUN state may have been 11159 * changed by a command ahead of us in the queue. 11160 */ 11161 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 11162 if (ctl_scsiio_lun_check(lun, entry, &io->scsiio) != 0) { 11163 ctl_done(io); 11164 break; 11165 } 11166 11167 io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11168 ctl_enqueue_rtr(io); 11169 break; 11170 case CTL_ACTION_ERROR: 11171 default: 11172 ctl_set_internal_failure(&io->scsiio, 11173 /*sks_valid*/ 0, 11174 /*retry_count*/ 0); 11175 11176 error: 11177 /* Serializing commands from the other SC are done here. */ 11178 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) && 11179 (softc->ha_mode != CTL_HA_MODE_XFER)) { 11180 ctl_try_unblock_others(lun, io, TRUE); 11181 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); 11182 11183 ctl_copy_sense_data_back(io, &msg_info); 11184 msg_info.hdr.original_sc = io->io_hdr.remote_io; 11185 msg_info.hdr.serializing_sc = NULL; 11186 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 11187 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11188 sizeof(msg_info.scsi), M_WAITOK); 11189 ctl_free_io(io); 11190 break; 11191 } 11192 11193 ctl_done(io); 11194 break; 11195 } 11196 } 11197 11198 /* 11199 * Try to unblock I/Os blocked by the specified I/O. 11200 * 11201 * skip parameter allows explicitly skip the specified I/O as blocker, 11202 * starting from the previous one on the OOA queue. It can be used when 11203 * we know for sure that the specified I/O does no longer count (done). 11204 * It has to be still on OOA queue though so that we know where to start. 11205 */ 11206 static void 11207 ctl_try_unblock_others(struct ctl_lun *lun, union ctl_io *bio, bool skip) 11208 { 11209 union ctl_io *io, *next_io; 11210 11211 mtx_assert(&lun->lun_lock, MA_OWNED); 11212 11213 for (io = (union ctl_io *)TAILQ_FIRST(&bio->io_hdr.blocked_queue); 11214 io != NULL; io = next_io) { 11215 next_io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, blocked_links); 11216 11217 KASSERT(io->io_hdr.blocker != NULL, 11218 ("I/O %p on blocked list without blocker", io)); 11219 ctl_try_unblock_io(lun, io, skip); 11220 } 11221 KASSERT(!skip || TAILQ_EMPTY(&bio->io_hdr.blocked_queue), 11222 ("blocked_queue is not empty after skipping %p", bio)); 11223 } 11224 11225 /* 11226 * This routine (with one exception) checks LUN flags that can be set by 11227 * commands ahead of us in the OOA queue. These flags have to be checked 11228 * when a command initially comes in, and when we pull a command off the 11229 * blocked queue and are preparing to execute it. The reason we have to 11230 * check these flags for commands on the blocked queue is that the LUN 11231 * state may have been changed by a command ahead of us while we're on the 11232 * blocked queue. 11233 * 11234 * Ordering is somewhat important with these checks, so please pay 11235 * careful attention to the placement of any new checks. 11236 */ 11237 static int 11238 ctl_scsiio_lun_check(struct ctl_lun *lun, 11239 const struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio) 11240 { 11241 struct ctl_softc *softc = lun->ctl_softc; 11242 int retval; 11243 uint32_t residx; 11244 11245 retval = 0; 11246 11247 mtx_assert(&lun->lun_lock, MA_OWNED); 11248 11249 /* 11250 * If this shelf is a secondary shelf controller, we may have to 11251 * reject some commands disallowed by HA mode and link state. 11252 */ 11253 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) { 11254 if (softc->ha_link == CTL_HA_LINK_OFFLINE && 11255 (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { 11256 ctl_set_lun_unavail(ctsio); 11257 retval = 1; 11258 goto bailout; 11259 } 11260 if ((lun->flags & CTL_LUN_PEER_SC_PRIMARY) == 0 && 11261 (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { 11262 ctl_set_lun_transit(ctsio); 11263 retval = 1; 11264 goto bailout; 11265 } 11266 if (softc->ha_mode == CTL_HA_MODE_ACT_STBY && 11267 (entry->flags & CTL_CMD_FLAG_OK_ON_STANDBY) == 0) { 11268 ctl_set_lun_standby(ctsio); 11269 retval = 1; 11270 goto bailout; 11271 } 11272 11273 /* The rest of checks are only done on executing side */ 11274 if (softc->ha_mode == CTL_HA_MODE_XFER) 11275 goto bailout; 11276 } 11277 11278 if (entry->pattern & CTL_LUN_PAT_WRITE) { 11279 if (lun->be_lun && 11280 lun->be_lun->flags & CTL_LUN_FLAG_READONLY) { 11281 ctl_set_hw_write_protected(ctsio); 11282 retval = 1; 11283 goto bailout; 11284 } 11285 if ((lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) { 11286 ctl_set_sense(ctsio, /*current_error*/ 1, 11287 /*sense_key*/ SSD_KEY_DATA_PROTECT, 11288 /*asc*/ 0x27, /*ascq*/ 0x02, SSD_ELEM_NONE); 11289 retval = 1; 11290 goto bailout; 11291 } 11292 } 11293 11294 /* 11295 * Check for a reservation conflict. If this command isn't allowed 11296 * even on reserved LUNs, and if this initiator isn't the one who 11297 * reserved us, reject the command with a reservation conflict. 11298 */ 11299 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 11300 if ((lun->flags & CTL_LUN_RESERVED) 11301 && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) { 11302 if (lun->res_idx != residx) { 11303 ctl_set_reservation_conflict(ctsio); 11304 retval = 1; 11305 goto bailout; 11306 } 11307 } 11308 11309 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0 || 11310 (entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV)) { 11311 /* No reservation or command is allowed. */; 11312 } else if ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_WRESV) && 11313 (lun->pr_res_type == SPR_TYPE_WR_EX || 11314 lun->pr_res_type == SPR_TYPE_WR_EX_RO || 11315 lun->pr_res_type == SPR_TYPE_WR_EX_AR)) { 11316 /* The command is allowed for Write Exclusive resv. */; 11317 } else { 11318 /* 11319 * if we aren't registered or it's a res holder type 11320 * reservation and this isn't the res holder then set a 11321 * conflict. 11322 */ 11323 if (ctl_get_prkey(lun, residx) == 0 || 11324 (residx != lun->pr_res_idx && lun->pr_res_type < 4)) { 11325 ctl_set_reservation_conflict(ctsio); 11326 retval = 1; 11327 goto bailout; 11328 } 11329 } 11330 11331 if ((entry->flags & CTL_CMD_FLAG_OK_ON_NO_MEDIA) == 0) { 11332 if (lun->flags & CTL_LUN_EJECTED) 11333 ctl_set_lun_ejected(ctsio); 11334 else if (lun->flags & CTL_LUN_NO_MEDIA) { 11335 if (lun->flags & CTL_LUN_REMOVABLE) 11336 ctl_set_lun_no_media(ctsio); 11337 else 11338 ctl_set_lun_int_reqd(ctsio); 11339 } else if (lun->flags & CTL_LUN_STOPPED) 11340 ctl_set_lun_stopped(ctsio); 11341 else 11342 goto bailout; 11343 retval = 1; 11344 goto bailout; 11345 } 11346 11347 bailout: 11348 return (retval); 11349 } 11350 11351 static void 11352 ctl_failover_io(union ctl_io *io, int have_lock) 11353 { 11354 ctl_set_busy(&io->scsiio); 11355 ctl_done(io); 11356 } 11357 11358 static void 11359 ctl_failover_lun(union ctl_io *rio) 11360 { 11361 struct ctl_softc *softc = CTL_SOFTC(rio); 11362 struct ctl_lun *lun; 11363 struct ctl_io_hdr *io, *next_io; 11364 uint32_t targ_lun; 11365 11366 targ_lun = rio->io_hdr.nexus.targ_mapped_lun; 11367 CTL_DEBUG_PRINT(("FAILOVER for lun %u\n", targ_lun)); 11368 11369 /* Find and lock the LUN. */ 11370 mtx_lock(&softc->ctl_lock); 11371 if (targ_lun > ctl_max_luns || 11372 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11373 mtx_unlock(&softc->ctl_lock); 11374 return; 11375 } 11376 mtx_lock(&lun->lun_lock); 11377 mtx_unlock(&softc->ctl_lock); 11378 if (lun->flags & CTL_LUN_DISABLED) { 11379 mtx_unlock(&lun->lun_lock); 11380 return; 11381 } 11382 11383 if (softc->ha_mode == CTL_HA_MODE_XFER) { 11384 TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { 11385 /* We are master */ 11386 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 11387 if (io->flags & CTL_FLAG_IO_ACTIVE) { 11388 io->flags |= CTL_FLAG_ABORT; 11389 io->flags |= CTL_FLAG_FAILOVER; 11390 ctl_try_unblock_io(lun, 11391 (union ctl_io *)io, FALSE); 11392 } else { /* This can be only due to DATAMOVE */ 11393 io->msg_type = CTL_MSG_DATAMOVE_DONE; 11394 io->flags &= ~CTL_FLAG_DMA_INPROG; 11395 io->flags |= CTL_FLAG_IO_ACTIVE; 11396 io->port_status = 31340; 11397 ctl_enqueue_isc((union ctl_io *)io); 11398 } 11399 } else 11400 /* We are slave */ 11401 if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { 11402 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; 11403 if (io->flags & CTL_FLAG_IO_ACTIVE) { 11404 io->flags |= CTL_FLAG_FAILOVER; 11405 } else { 11406 ctl_set_busy(&((union ctl_io *)io)-> 11407 scsiio); 11408 ctl_done((union ctl_io *)io); 11409 } 11410 } 11411 } 11412 } else { /* SERIALIZE modes */ 11413 TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { 11414 /* We are master */ 11415 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 11416 if (io->blocker != NULL) { 11417 TAILQ_REMOVE(&io->blocker->io_hdr.blocked_queue, 11418 io, blocked_links); 11419 io->blocker = NULL; 11420 } 11421 ctl_try_unblock_others(lun, (union ctl_io *)io, 11422 TRUE); 11423 TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links); 11424 ctl_free_io((union ctl_io *)io); 11425 } else 11426 /* We are slave */ 11427 if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { 11428 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; 11429 if (!(io->flags & CTL_FLAG_IO_ACTIVE)) { 11430 ctl_set_busy(&((union ctl_io *)io)-> 11431 scsiio); 11432 ctl_done((union ctl_io *)io); 11433 } 11434 } 11435 } 11436 } 11437 mtx_unlock(&lun->lun_lock); 11438 } 11439 11440 static int 11441 ctl_scsiio_precheck(struct ctl_softc *softc, struct ctl_scsiio *ctsio) 11442 { 11443 struct ctl_lun *lun; 11444 const struct ctl_cmd_entry *entry; 11445 union ctl_io *bio; 11446 uint32_t initidx, targ_lun; 11447 int retval = 0; 11448 11449 lun = NULL; 11450 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 11451 if (targ_lun < ctl_max_luns) 11452 lun = softc->ctl_luns[targ_lun]; 11453 if (lun) { 11454 /* 11455 * If the LUN is invalid, pretend that it doesn't exist. 11456 * It will go away as soon as all pending I/O has been 11457 * completed. 11458 */ 11459 mtx_lock(&lun->lun_lock); 11460 if (lun->flags & CTL_LUN_DISABLED) { 11461 mtx_unlock(&lun->lun_lock); 11462 lun = NULL; 11463 } 11464 } 11465 CTL_LUN(ctsio) = lun; 11466 if (lun) { 11467 CTL_BACKEND_LUN(ctsio) = lun->be_lun; 11468 11469 /* 11470 * Every I/O goes into the OOA queue for a particular LUN, 11471 * and stays there until completion. 11472 */ 11473 #ifdef CTL_TIME_IO 11474 if (TAILQ_EMPTY(&lun->ooa_queue)) 11475 lun->idle_time += getsbinuptime() - lun->last_busy; 11476 #endif 11477 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 11478 } 11479 11480 /* Get command entry and return error if it is unsuppotyed. */ 11481 entry = ctl_validate_command(ctsio); 11482 if (entry == NULL) { 11483 if (lun) 11484 mtx_unlock(&lun->lun_lock); 11485 return (retval); 11486 } 11487 11488 ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 11489 ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK; 11490 11491 /* 11492 * Check to see whether we can send this command to LUNs that don't 11493 * exist. This should pretty much only be the case for inquiry 11494 * and request sense. Further checks, below, really require having 11495 * a LUN, so we can't really check the command anymore. Just put 11496 * it on the rtr queue. 11497 */ 11498 if (lun == NULL) { 11499 if (entry->flags & CTL_CMD_FLAG_OK_ON_NO_LUN) { 11500 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11501 ctl_enqueue_rtr((union ctl_io *)ctsio); 11502 return (retval); 11503 } 11504 11505 ctl_set_unsupported_lun(ctsio); 11506 ctl_done((union ctl_io *)ctsio); 11507 CTL_DEBUG_PRINT(("ctl_scsiio_precheck: bailing out due to invalid LUN\n")); 11508 return (retval); 11509 } else { 11510 /* 11511 * Make sure we support this particular command on this LUN. 11512 * e.g., we don't support writes to the control LUN. 11513 */ 11514 if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 11515 mtx_unlock(&lun->lun_lock); 11516 ctl_set_invalid_opcode(ctsio); 11517 ctl_done((union ctl_io *)ctsio); 11518 return (retval); 11519 } 11520 } 11521 11522 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 11523 11524 /* 11525 * If we've got a request sense, it'll clear the contingent 11526 * allegiance condition. Otherwise, if we have a CA condition for 11527 * this initiator, clear it, because it sent down a command other 11528 * than request sense. 11529 */ 11530 if (ctsio->cdb[0] != REQUEST_SENSE) { 11531 struct scsi_sense_data *ps; 11532 11533 ps = lun->pending_sense[initidx / CTL_MAX_INIT_PER_PORT]; 11534 if (ps != NULL) 11535 ps[initidx % CTL_MAX_INIT_PER_PORT].error_code = 0; 11536 } 11537 11538 /* 11539 * If the command has this flag set, it handles its own unit 11540 * attention reporting, we shouldn't do anything. Otherwise we 11541 * check for any pending unit attentions, and send them back to the 11542 * initiator. We only do this when a command initially comes in, 11543 * not when we pull it off the blocked queue. 11544 * 11545 * According to SAM-3, section 5.3.2, the order that things get 11546 * presented back to the host is basically unit attentions caused 11547 * by some sort of reset event, busy status, reservation conflicts 11548 * or task set full, and finally any other status. 11549 * 11550 * One issue here is that some of the unit attentions we report 11551 * don't fall into the "reset" category (e.g. "reported luns data 11552 * has changed"). So reporting it here, before the reservation 11553 * check, may be technically wrong. I guess the only thing to do 11554 * would be to check for and report the reset events here, and then 11555 * check for the other unit attention types after we check for a 11556 * reservation conflict. 11557 * 11558 * XXX KDM need to fix this 11559 */ 11560 if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) { 11561 ctl_ua_type ua_type; 11562 u_int sense_len = 0; 11563 11564 ua_type = ctl_build_ua(lun, initidx, &ctsio->sense_data, 11565 &sense_len, SSD_TYPE_NONE); 11566 if (ua_type != CTL_UA_NONE) { 11567 mtx_unlock(&lun->lun_lock); 11568 ctsio->scsi_status = SCSI_STATUS_CHECK_COND; 11569 ctsio->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 11570 ctsio->sense_len = sense_len; 11571 ctl_done((union ctl_io *)ctsio); 11572 return (retval); 11573 } 11574 } 11575 11576 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { 11577 mtx_unlock(&lun->lun_lock); 11578 ctl_done((union ctl_io *)ctsio); 11579 return (retval); 11580 } 11581 11582 /* 11583 * XXX CHD this is where we want to send IO to other side if 11584 * this LUN is secondary on this SC. We will need to make a copy 11585 * of the IO and flag the IO on this side as SENT_2OTHER and the flag 11586 * the copy we send as FROM_OTHER. 11587 * We also need to stuff the address of the original IO so we can 11588 * find it easily. Something similar will need be done on the other 11589 * side so when we are done we can find the copy. 11590 */ 11591 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 11592 (lun->flags & CTL_LUN_PEER_SC_PRIMARY) != 0 && 11593 (entry->flags & CTL_CMD_FLAG_RUN_HERE) == 0) { 11594 union ctl_ha_msg msg_info; 11595 int isc_retval; 11596 11597 ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; 11598 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 11599 mtx_unlock(&lun->lun_lock); 11600 11601 msg_info.hdr.msg_type = CTL_MSG_SERIALIZE; 11602 msg_info.hdr.original_sc = (union ctl_io *)ctsio; 11603 msg_info.hdr.serializing_sc = NULL; 11604 msg_info.hdr.nexus = ctsio->io_hdr.nexus; 11605 msg_info.scsi.tag_num = ctsio->tag_num; 11606 msg_info.scsi.tag_type = ctsio->tag_type; 11607 memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN); 11608 msg_info.scsi.cdb_len = ctsio->cdb_len; 11609 msg_info.scsi.priority = ctsio->priority; 11610 11611 if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11612 sizeof(msg_info.scsi) - sizeof(msg_info.scsi.sense_data), 11613 M_WAITOK)) > CTL_HA_STATUS_SUCCESS) { 11614 ctl_set_busy(ctsio); 11615 ctl_done((union ctl_io *)ctsio); 11616 return (retval); 11617 } 11618 return (retval); 11619 } 11620 11621 bio = (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq, ooa_links); 11622 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, &bio)) { 11623 case CTL_ACTION_BLOCK: 11624 ctsio->io_hdr.blocker = bio; 11625 TAILQ_INSERT_TAIL(&bio->io_hdr.blocked_queue, &ctsio->io_hdr, 11626 blocked_links); 11627 mtx_unlock(&lun->lun_lock); 11628 return (retval); 11629 case CTL_ACTION_PASS: 11630 case CTL_ACTION_SKIP: 11631 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11632 mtx_unlock(&lun->lun_lock); 11633 ctl_enqueue_rtr((union ctl_io *)ctsio); 11634 break; 11635 case CTL_ACTION_OVERLAP: 11636 mtx_unlock(&lun->lun_lock); 11637 ctl_set_overlapped_cmd(ctsio); 11638 ctl_done((union ctl_io *)ctsio); 11639 break; 11640 case CTL_ACTION_OVERLAP_TAG: 11641 mtx_unlock(&lun->lun_lock); 11642 ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff); 11643 ctl_done((union ctl_io *)ctsio); 11644 break; 11645 case CTL_ACTION_ERROR: 11646 default: 11647 mtx_unlock(&lun->lun_lock); 11648 ctl_set_internal_failure(ctsio, 11649 /*sks_valid*/ 0, 11650 /*retry_count*/ 0); 11651 ctl_done((union ctl_io *)ctsio); 11652 break; 11653 } 11654 return (retval); 11655 } 11656 11657 const struct ctl_cmd_entry * 11658 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa) 11659 { 11660 const struct ctl_cmd_entry *entry; 11661 int service_action; 11662 11663 entry = &ctl_cmd_table[ctsio->cdb[0]]; 11664 if (sa) 11665 *sa = ((entry->flags & CTL_CMD_FLAG_SA5) != 0); 11666 if (entry->flags & CTL_CMD_FLAG_SA5) { 11667 service_action = ctsio->cdb[1] & SERVICE_ACTION_MASK; 11668 entry = &((const struct ctl_cmd_entry *) 11669 entry->execute)[service_action]; 11670 } 11671 return (entry); 11672 } 11673 11674 const struct ctl_cmd_entry * 11675 ctl_validate_command(struct ctl_scsiio *ctsio) 11676 { 11677 const struct ctl_cmd_entry *entry; 11678 int i, sa; 11679 uint8_t diff; 11680 11681 entry = ctl_get_cmd_entry(ctsio, &sa); 11682 if (entry->execute == NULL) { 11683 if (sa) 11684 ctl_set_invalid_field(ctsio, 11685 /*sks_valid*/ 1, 11686 /*command*/ 1, 11687 /*field*/ 1, 11688 /*bit_valid*/ 1, 11689 /*bit*/ 4); 11690 else 11691 ctl_set_invalid_opcode(ctsio); 11692 ctl_done((union ctl_io *)ctsio); 11693 return (NULL); 11694 } 11695 KASSERT(entry->length > 0, 11696 ("Not defined length for command 0x%02x/0x%02x", 11697 ctsio->cdb[0], ctsio->cdb[1])); 11698 for (i = 1; i < entry->length; i++) { 11699 diff = ctsio->cdb[i] & ~entry->usage[i - 1]; 11700 if (diff == 0) 11701 continue; 11702 ctl_set_invalid_field(ctsio, 11703 /*sks_valid*/ 1, 11704 /*command*/ 1, 11705 /*field*/ i, 11706 /*bit_valid*/ 1, 11707 /*bit*/ fls(diff) - 1); 11708 ctl_done((union ctl_io *)ctsio); 11709 return (NULL); 11710 } 11711 return (entry); 11712 } 11713 11714 static int 11715 ctl_cmd_applicable(uint8_t lun_type, const struct ctl_cmd_entry *entry) 11716 { 11717 11718 switch (lun_type) { 11719 case T_DIRECT: 11720 if ((entry->flags & CTL_CMD_FLAG_OK_ON_DIRECT) == 0) 11721 return (0); 11722 break; 11723 case T_PROCESSOR: 11724 if ((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0) 11725 return (0); 11726 break; 11727 case T_CDROM: 11728 if ((entry->flags & CTL_CMD_FLAG_OK_ON_CDROM) == 0) 11729 return (0); 11730 break; 11731 default: 11732 return (0); 11733 } 11734 return (1); 11735 } 11736 11737 static int 11738 ctl_scsiio(struct ctl_scsiio *ctsio) 11739 { 11740 int retval; 11741 const struct ctl_cmd_entry *entry; 11742 11743 retval = CTL_RETVAL_COMPLETE; 11744 11745 CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0])); 11746 11747 entry = ctl_get_cmd_entry(ctsio, NULL); 11748 11749 /* 11750 * If this I/O has been aborted, just send it straight to 11751 * ctl_done() without executing it. 11752 */ 11753 if (ctsio->io_hdr.flags & CTL_FLAG_ABORT) { 11754 ctl_done((union ctl_io *)ctsio); 11755 goto bailout; 11756 } 11757 11758 /* 11759 * All the checks should have been handled by ctl_scsiio_precheck(). 11760 * We should be clear now to just execute the I/O. 11761 */ 11762 retval = entry->execute(ctsio); 11763 11764 bailout: 11765 return (retval); 11766 } 11767 11768 static int 11769 ctl_target_reset(union ctl_io *io) 11770 { 11771 struct ctl_softc *softc = CTL_SOFTC(io); 11772 struct ctl_port *port = CTL_PORT(io); 11773 struct ctl_lun *lun; 11774 uint32_t initidx; 11775 ctl_ua_type ua_type; 11776 11777 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 11778 union ctl_ha_msg msg_info; 11779 11780 msg_info.hdr.nexus = io->io_hdr.nexus; 11781 msg_info.task.task_action = io->taskio.task_action; 11782 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11783 msg_info.hdr.original_sc = NULL; 11784 msg_info.hdr.serializing_sc = NULL; 11785 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11786 sizeof(msg_info.task), M_WAITOK); 11787 } 11788 11789 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11790 if (io->taskio.task_action == CTL_TASK_TARGET_RESET) 11791 ua_type = CTL_UA_TARG_RESET; 11792 else 11793 ua_type = CTL_UA_BUS_RESET; 11794 mtx_lock(&softc->ctl_lock); 11795 STAILQ_FOREACH(lun, &softc->lun_list, links) { 11796 if (port != NULL && 11797 ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 11798 continue; 11799 ctl_do_lun_reset(lun, initidx, ua_type); 11800 } 11801 mtx_unlock(&softc->ctl_lock); 11802 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11803 return (0); 11804 } 11805 11806 /* 11807 * The LUN should always be set. The I/O is optional, and is used to 11808 * distinguish between I/Os sent by this initiator, and by other 11809 * initiators. We set unit attention for initiators other than this one. 11810 * SAM-3 is vague on this point. It does say that a unit attention should 11811 * be established for other initiators when a LUN is reset (see section 11812 * 5.7.3), but it doesn't specifically say that the unit attention should 11813 * be established for this particular initiator when a LUN is reset. Here 11814 * is the relevant text, from SAM-3 rev 8: 11815 * 11816 * 5.7.2 When a SCSI initiator port aborts its own tasks 11817 * 11818 * When a SCSI initiator port causes its own task(s) to be aborted, no 11819 * notification that the task(s) have been aborted shall be returned to 11820 * the SCSI initiator port other than the completion response for the 11821 * command or task management function action that caused the task(s) to 11822 * be aborted and notification(s) associated with related effects of the 11823 * action (e.g., a reset unit attention condition). 11824 * 11825 * XXX KDM for now, we're setting unit attention for all initiators. 11826 */ 11827 static void 11828 ctl_do_lun_reset(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua_type) 11829 { 11830 union ctl_io *xio; 11831 int i; 11832 11833 mtx_lock(&lun->lun_lock); 11834 /* Abort tasks. */ 11835 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11836 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11837 xio->io_hdr.flags |= CTL_FLAG_ABORT | CTL_FLAG_ABORT_STATUS; 11838 ctl_try_unblock_io(lun, xio, FALSE); 11839 } 11840 /* Clear CA. */ 11841 for (i = 0; i < ctl_max_ports; i++) { 11842 free(lun->pending_sense[i], M_CTL); 11843 lun->pending_sense[i] = NULL; 11844 } 11845 /* Clear reservation. */ 11846 lun->flags &= ~CTL_LUN_RESERVED; 11847 /* Clear prevent media removal. */ 11848 if (lun->prevent) { 11849 for (i = 0; i < CTL_MAX_INITIATORS; i++) 11850 ctl_clear_mask(lun->prevent, i); 11851 lun->prevent_count = 0; 11852 } 11853 /* Clear TPC status */ 11854 ctl_tpc_lun_clear(lun, -1); 11855 /* Establish UA. */ 11856 #if 0 11857 ctl_est_ua_all(lun, initidx, ua_type); 11858 #else 11859 ctl_est_ua_all(lun, -1, ua_type); 11860 #endif 11861 mtx_unlock(&lun->lun_lock); 11862 } 11863 11864 static int 11865 ctl_lun_reset(union ctl_io *io) 11866 { 11867 struct ctl_softc *softc = CTL_SOFTC(io); 11868 struct ctl_lun *lun; 11869 uint32_t targ_lun, initidx; 11870 11871 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11872 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11873 mtx_lock(&softc->ctl_lock); 11874 if (targ_lun >= ctl_max_luns || 11875 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11876 mtx_unlock(&softc->ctl_lock); 11877 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 11878 return (1); 11879 } 11880 ctl_do_lun_reset(lun, initidx, CTL_UA_LUN_RESET); 11881 mtx_unlock(&softc->ctl_lock); 11882 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11883 11884 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0) { 11885 union ctl_ha_msg msg_info; 11886 11887 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11888 msg_info.hdr.nexus = io->io_hdr.nexus; 11889 msg_info.task.task_action = CTL_TASK_LUN_RESET; 11890 msg_info.hdr.original_sc = NULL; 11891 msg_info.hdr.serializing_sc = NULL; 11892 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11893 sizeof(msg_info.task), M_WAITOK); 11894 } 11895 return (0); 11896 } 11897 11898 static void 11899 ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id, 11900 int other_sc) 11901 { 11902 union ctl_io *xio; 11903 11904 mtx_assert(&lun->lun_lock, MA_OWNED); 11905 11906 /* 11907 * Run through the OOA queue and attempt to find the given I/O. 11908 * The target port, initiator ID, tag type and tag number have to 11909 * match the values that we got from the initiator. If we have an 11910 * untagged command to abort, simply abort the first untagged command 11911 * we come to. We only allow one untagged command at a time of course. 11912 */ 11913 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11914 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11915 if ((targ_port == UINT32_MAX || 11916 targ_port == xio->io_hdr.nexus.targ_port) && 11917 (init_id == UINT32_MAX || 11918 init_id == xio->io_hdr.nexus.initid)) { 11919 if (targ_port != xio->io_hdr.nexus.targ_port || 11920 init_id != xio->io_hdr.nexus.initid) 11921 xio->io_hdr.flags |= CTL_FLAG_ABORT_STATUS; 11922 xio->io_hdr.flags |= CTL_FLAG_ABORT; 11923 if (!other_sc && !(lun->flags & CTL_LUN_PRIMARY_SC)) { 11924 union ctl_ha_msg msg_info; 11925 11926 msg_info.hdr.nexus = xio->io_hdr.nexus; 11927 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 11928 msg_info.task.tag_num = xio->scsiio.tag_num; 11929 msg_info.task.tag_type = xio->scsiio.tag_type; 11930 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11931 msg_info.hdr.original_sc = NULL; 11932 msg_info.hdr.serializing_sc = NULL; 11933 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11934 sizeof(msg_info.task), M_NOWAIT); 11935 } 11936 ctl_try_unblock_io(lun, xio, FALSE); 11937 } 11938 } 11939 } 11940 11941 static int 11942 ctl_abort_task_set(union ctl_io *io) 11943 { 11944 struct ctl_softc *softc = CTL_SOFTC(io); 11945 struct ctl_lun *lun; 11946 uint32_t targ_lun; 11947 11948 /* 11949 * Look up the LUN. 11950 */ 11951 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11952 mtx_lock(&softc->ctl_lock); 11953 if (targ_lun >= ctl_max_luns || 11954 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11955 mtx_unlock(&softc->ctl_lock); 11956 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 11957 return (1); 11958 } 11959 11960 mtx_lock(&lun->lun_lock); 11961 mtx_unlock(&softc->ctl_lock); 11962 if (io->taskio.task_action == CTL_TASK_ABORT_TASK_SET) { 11963 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, 11964 io->io_hdr.nexus.initid, 11965 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 11966 } else { /* CTL_TASK_CLEAR_TASK_SET */ 11967 ctl_abort_tasks_lun(lun, UINT32_MAX, UINT32_MAX, 11968 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 11969 } 11970 mtx_unlock(&lun->lun_lock); 11971 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11972 return (0); 11973 } 11974 11975 static void 11976 ctl_i_t_nexus_loss(struct ctl_softc *softc, uint32_t initidx, 11977 ctl_ua_type ua_type) 11978 { 11979 struct ctl_lun *lun; 11980 struct scsi_sense_data *ps; 11981 uint32_t p, i; 11982 11983 p = initidx / CTL_MAX_INIT_PER_PORT; 11984 i = initidx % CTL_MAX_INIT_PER_PORT; 11985 mtx_lock(&softc->ctl_lock); 11986 STAILQ_FOREACH(lun, &softc->lun_list, links) { 11987 mtx_lock(&lun->lun_lock); 11988 /* Abort tasks. */ 11989 ctl_abort_tasks_lun(lun, p, i, 1); 11990 /* Clear CA. */ 11991 ps = lun->pending_sense[p]; 11992 if (ps != NULL) 11993 ps[i].error_code = 0; 11994 /* Clear reservation. */ 11995 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == initidx)) 11996 lun->flags &= ~CTL_LUN_RESERVED; 11997 /* Clear prevent media removal. */ 11998 if (lun->prevent && ctl_is_set(lun->prevent, initidx)) { 11999 ctl_clear_mask(lun->prevent, initidx); 12000 lun->prevent_count--; 12001 } 12002 /* Clear TPC status */ 12003 ctl_tpc_lun_clear(lun, initidx); 12004 /* Establish UA. */ 12005 ctl_est_ua(lun, initidx, ua_type); 12006 mtx_unlock(&lun->lun_lock); 12007 } 12008 mtx_unlock(&softc->ctl_lock); 12009 } 12010 12011 static int 12012 ctl_i_t_nexus_reset(union ctl_io *io) 12013 { 12014 struct ctl_softc *softc = CTL_SOFTC(io); 12015 uint32_t initidx; 12016 12017 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 12018 union ctl_ha_msg msg_info; 12019 12020 msg_info.hdr.nexus = io->io_hdr.nexus; 12021 msg_info.task.task_action = CTL_TASK_I_T_NEXUS_RESET; 12022 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 12023 msg_info.hdr.original_sc = NULL; 12024 msg_info.hdr.serializing_sc = NULL; 12025 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 12026 sizeof(msg_info.task), M_WAITOK); 12027 } 12028 12029 initidx = ctl_get_initindex(&io->io_hdr.nexus); 12030 ctl_i_t_nexus_loss(softc, initidx, CTL_UA_I_T_NEXUS_LOSS); 12031 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12032 return (0); 12033 } 12034 12035 static int 12036 ctl_abort_task(union ctl_io *io) 12037 { 12038 struct ctl_softc *softc = CTL_SOFTC(io); 12039 union ctl_io *xio; 12040 struct ctl_lun *lun; 12041 uint32_t targ_lun; 12042 12043 /* 12044 * Look up the LUN. 12045 */ 12046 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12047 mtx_lock(&softc->ctl_lock); 12048 if (targ_lun >= ctl_max_luns || 12049 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12050 mtx_unlock(&softc->ctl_lock); 12051 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 12052 return (1); 12053 } 12054 12055 mtx_lock(&lun->lun_lock); 12056 mtx_unlock(&softc->ctl_lock); 12057 /* 12058 * Run through the OOA queue and attempt to find the given I/O. 12059 * The target port, initiator ID, tag type and tag number have to 12060 * match the values that we got from the initiator. If we have an 12061 * untagged command to abort, simply abort the first untagged command 12062 * we come to. We only allow one untagged command at a time of course. 12063 */ 12064 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 12065 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 12066 if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port) 12067 || (xio->io_hdr.nexus.initid != io->io_hdr.nexus.initid) 12068 || (xio->io_hdr.flags & CTL_FLAG_ABORT)) 12069 continue; 12070 12071 /* 12072 * If the abort says that the task is untagged, the 12073 * task in the queue must be untagged. Otherwise, 12074 * we just check to see whether the tag numbers 12075 * match. This is because the QLogic firmware 12076 * doesn't pass back the tag type in an abort 12077 * request. 12078 */ 12079 #if 0 12080 if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED) 12081 && (io->taskio.tag_type == CTL_TAG_UNTAGGED)) 12082 || (xio->scsiio.tag_num == io->taskio.tag_num)) { 12083 #else 12084 /* 12085 * XXX KDM we've got problems with FC, because it 12086 * doesn't send down a tag type with aborts. So we 12087 * can only really go by the tag number... 12088 * This may cause problems with parallel SCSI. 12089 * Need to figure that out!! 12090 */ 12091 if (xio->scsiio.tag_num == io->taskio.tag_num) { 12092 #endif 12093 xio->io_hdr.flags |= CTL_FLAG_ABORT; 12094 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0 && 12095 !(lun->flags & CTL_LUN_PRIMARY_SC)) { 12096 union ctl_ha_msg msg_info; 12097 12098 msg_info.hdr.nexus = io->io_hdr.nexus; 12099 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 12100 msg_info.task.tag_num = io->taskio.tag_num; 12101 msg_info.task.tag_type = io->taskio.tag_type; 12102 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 12103 msg_info.hdr.original_sc = NULL; 12104 msg_info.hdr.serializing_sc = NULL; 12105 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 12106 sizeof(msg_info.task), M_NOWAIT); 12107 } 12108 ctl_try_unblock_io(lun, xio, FALSE); 12109 } 12110 } 12111 mtx_unlock(&lun->lun_lock); 12112 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12113 return (0); 12114 } 12115 12116 static int 12117 ctl_query_task(union ctl_io *io, int task_set) 12118 { 12119 struct ctl_softc *softc = CTL_SOFTC(io); 12120 union ctl_io *xio; 12121 struct ctl_lun *lun; 12122 int found = 0; 12123 uint32_t targ_lun; 12124 12125 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12126 mtx_lock(&softc->ctl_lock); 12127 if (targ_lun >= ctl_max_luns || 12128 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12129 mtx_unlock(&softc->ctl_lock); 12130 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 12131 return (1); 12132 } 12133 mtx_lock(&lun->lun_lock); 12134 mtx_unlock(&softc->ctl_lock); 12135 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 12136 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 12137 if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port) 12138 || (xio->io_hdr.nexus.initid != io->io_hdr.nexus.initid) 12139 || (xio->io_hdr.flags & CTL_FLAG_ABORT)) 12140 continue; 12141 12142 if (task_set || xio->scsiio.tag_num == io->taskio.tag_num) { 12143 found = 1; 12144 break; 12145 } 12146 } 12147 mtx_unlock(&lun->lun_lock); 12148 if (found) 12149 io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED; 12150 else 12151 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12152 return (0); 12153 } 12154 12155 static int 12156 ctl_query_async_event(union ctl_io *io) 12157 { 12158 struct ctl_softc *softc = CTL_SOFTC(io); 12159 struct ctl_lun *lun; 12160 ctl_ua_type ua; 12161 uint32_t targ_lun, initidx; 12162 12163 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12164 mtx_lock(&softc->ctl_lock); 12165 if (targ_lun >= ctl_max_luns || 12166 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12167 mtx_unlock(&softc->ctl_lock); 12168 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 12169 return (1); 12170 } 12171 mtx_lock(&lun->lun_lock); 12172 mtx_unlock(&softc->ctl_lock); 12173 initidx = ctl_get_initindex(&io->io_hdr.nexus); 12174 ua = ctl_build_qae(lun, initidx, io->taskio.task_resp); 12175 mtx_unlock(&lun->lun_lock); 12176 if (ua != CTL_UA_NONE) 12177 io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED; 12178 else 12179 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12180 return (0); 12181 } 12182 12183 static void 12184 ctl_run_task(union ctl_io *io) 12185 { 12186 int retval = 1; 12187 12188 CTL_DEBUG_PRINT(("ctl_run_task\n")); 12189 KASSERT(io->io_hdr.io_type == CTL_IO_TASK, 12190 ("ctl_run_task: Unextected io_type %d\n", io->io_hdr.io_type)); 12191 io->taskio.task_status = CTL_TASK_FUNCTION_NOT_SUPPORTED; 12192 bzero(io->taskio.task_resp, sizeof(io->taskio.task_resp)); 12193 switch (io->taskio.task_action) { 12194 case CTL_TASK_ABORT_TASK: 12195 retval = ctl_abort_task(io); 12196 break; 12197 case CTL_TASK_ABORT_TASK_SET: 12198 case CTL_TASK_CLEAR_TASK_SET: 12199 retval = ctl_abort_task_set(io); 12200 break; 12201 case CTL_TASK_CLEAR_ACA: 12202 break; 12203 case CTL_TASK_I_T_NEXUS_RESET: 12204 retval = ctl_i_t_nexus_reset(io); 12205 break; 12206 case CTL_TASK_LUN_RESET: 12207 retval = ctl_lun_reset(io); 12208 break; 12209 case CTL_TASK_TARGET_RESET: 12210 case CTL_TASK_BUS_RESET: 12211 retval = ctl_target_reset(io); 12212 break; 12213 case CTL_TASK_PORT_LOGIN: 12214 break; 12215 case CTL_TASK_PORT_LOGOUT: 12216 break; 12217 case CTL_TASK_QUERY_TASK: 12218 retval = ctl_query_task(io, 0); 12219 break; 12220 case CTL_TASK_QUERY_TASK_SET: 12221 retval = ctl_query_task(io, 1); 12222 break; 12223 case CTL_TASK_QUERY_ASYNC_EVENT: 12224 retval = ctl_query_async_event(io); 12225 break; 12226 default: 12227 printf("%s: got unknown task management event %d\n", 12228 __func__, io->taskio.task_action); 12229 break; 12230 } 12231 if (retval == 0) 12232 io->io_hdr.status = CTL_SUCCESS; 12233 else 12234 io->io_hdr.status = CTL_ERROR; 12235 ctl_done(io); 12236 } 12237 12238 /* 12239 * For HA operation. Handle commands that come in from the other 12240 * controller. 12241 */ 12242 static void 12243 ctl_handle_isc(union ctl_io *io) 12244 { 12245 struct ctl_softc *softc = CTL_SOFTC(io); 12246 struct ctl_lun *lun; 12247 const struct ctl_cmd_entry *entry; 12248 uint32_t targ_lun; 12249 12250 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12251 switch (io->io_hdr.msg_type) { 12252 case CTL_MSG_SERIALIZE: 12253 ctl_serialize_other_sc_cmd(&io->scsiio); 12254 break; 12255 case CTL_MSG_R2R: /* Only used in SER_ONLY mode. */ 12256 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 12257 if (targ_lun >= ctl_max_luns || 12258 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12259 ctl_done(io); 12260 break; 12261 } 12262 mtx_lock(&lun->lun_lock); 12263 if (ctl_scsiio_lun_check(lun, entry, &io->scsiio) != 0) { 12264 mtx_unlock(&lun->lun_lock); 12265 ctl_done(io); 12266 break; 12267 } 12268 io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 12269 mtx_unlock(&lun->lun_lock); 12270 ctl_enqueue_rtr(io); 12271 break; 12272 case CTL_MSG_FINISH_IO: 12273 if (softc->ha_mode == CTL_HA_MODE_XFER) { 12274 ctl_done(io); 12275 break; 12276 } 12277 if (targ_lun >= ctl_max_luns || 12278 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12279 ctl_free_io(io); 12280 break; 12281 } 12282 mtx_lock(&lun->lun_lock); 12283 ctl_try_unblock_others(lun, io, TRUE); 12284 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); 12285 mtx_unlock(&lun->lun_lock); 12286 ctl_free_io(io); 12287 break; 12288 case CTL_MSG_PERS_ACTION: 12289 ctl_hndl_per_res_out_on_other_sc(io); 12290 ctl_free_io(io); 12291 break; 12292 case CTL_MSG_BAD_JUJU: 12293 ctl_done(io); 12294 break; 12295 case CTL_MSG_DATAMOVE: /* Only used in XFER mode */ 12296 ctl_datamove_remote(io); 12297 break; 12298 case CTL_MSG_DATAMOVE_DONE: /* Only used in XFER mode */ 12299 io->scsiio.be_move_done(io); 12300 break; 12301 case CTL_MSG_FAILOVER: 12302 ctl_failover_lun(io); 12303 ctl_free_io(io); 12304 break; 12305 default: 12306 printf("%s: Invalid message type %d\n", 12307 __func__, io->io_hdr.msg_type); 12308 ctl_free_io(io); 12309 break; 12310 } 12311 12312 } 12313 12314 /* 12315 * Returns the match type in the case of a match, or CTL_LUN_PAT_NONE if 12316 * there is no match. 12317 */ 12318 static ctl_lun_error_pattern 12319 ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc) 12320 { 12321 const struct ctl_cmd_entry *entry; 12322 ctl_lun_error_pattern filtered_pattern, pattern; 12323 12324 pattern = desc->error_pattern; 12325 12326 /* 12327 * XXX KDM we need more data passed into this function to match a 12328 * custom pattern, and we actually need to implement custom pattern 12329 * matching. 12330 */ 12331 if (pattern & CTL_LUN_PAT_CMD) 12332 return (CTL_LUN_PAT_CMD); 12333 12334 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY) 12335 return (CTL_LUN_PAT_ANY); 12336 12337 entry = ctl_get_cmd_entry(ctsio, NULL); 12338 12339 filtered_pattern = entry->pattern & pattern; 12340 12341 /* 12342 * If the user requested specific flags in the pattern (e.g. 12343 * CTL_LUN_PAT_RANGE), make sure the command supports all of those 12344 * flags. 12345 * 12346 * If the user did not specify any flags, it doesn't matter whether 12347 * or not the command supports the flags. 12348 */ 12349 if ((filtered_pattern & ~CTL_LUN_PAT_MASK) != 12350 (pattern & ~CTL_LUN_PAT_MASK)) 12351 return (CTL_LUN_PAT_NONE); 12352 12353 /* 12354 * If the user asked for a range check, see if the requested LBA 12355 * range overlaps with this command's LBA range. 12356 */ 12357 if (filtered_pattern & CTL_LUN_PAT_RANGE) { 12358 uint64_t lba1; 12359 uint64_t len1; 12360 ctl_action action; 12361 int retval; 12362 12363 retval = ctl_get_lba_len((union ctl_io *)ctsio, &lba1, &len1); 12364 if (retval != 0) 12365 return (CTL_LUN_PAT_NONE); 12366 12367 action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba, 12368 desc->lba_range.len, FALSE); 12369 /* 12370 * A "pass" means that the LBA ranges don't overlap, so 12371 * this doesn't match the user's range criteria. 12372 */ 12373 if (action == CTL_ACTION_PASS) 12374 return (CTL_LUN_PAT_NONE); 12375 } 12376 12377 return (filtered_pattern); 12378 } 12379 12380 static void 12381 ctl_inject_error(struct ctl_lun *lun, union ctl_io *io) 12382 { 12383 struct ctl_error_desc *desc, *desc2; 12384 12385 mtx_assert(&lun->lun_lock, MA_OWNED); 12386 12387 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 12388 ctl_lun_error_pattern pattern; 12389 /* 12390 * Check to see whether this particular command matches 12391 * the pattern in the descriptor. 12392 */ 12393 pattern = ctl_cmd_pattern_match(&io->scsiio, desc); 12394 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_NONE) 12395 continue; 12396 12397 switch (desc->lun_error & CTL_LUN_INJ_TYPE) { 12398 case CTL_LUN_INJ_ABORTED: 12399 ctl_set_aborted(&io->scsiio); 12400 break; 12401 case CTL_LUN_INJ_MEDIUM_ERR: 12402 ctl_set_medium_error(&io->scsiio, 12403 (io->io_hdr.flags & CTL_FLAG_DATA_MASK) != 12404 CTL_FLAG_DATA_OUT); 12405 break; 12406 case CTL_LUN_INJ_UA: 12407 /* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET 12408 * OCCURRED */ 12409 ctl_set_ua(&io->scsiio, 0x29, 0x00); 12410 break; 12411 case CTL_LUN_INJ_CUSTOM: 12412 /* 12413 * We're assuming the user knows what he is doing. 12414 * Just copy the sense information without doing 12415 * checks. 12416 */ 12417 bcopy(&desc->custom_sense, &io->scsiio.sense_data, 12418 MIN(sizeof(desc->custom_sense), 12419 sizeof(io->scsiio.sense_data))); 12420 io->scsiio.scsi_status = SCSI_STATUS_CHECK_COND; 12421 io->scsiio.sense_len = SSD_FULL_SIZE; 12422 io->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 12423 break; 12424 case CTL_LUN_INJ_NONE: 12425 default: 12426 /* 12427 * If this is an error injection type we don't know 12428 * about, clear the continuous flag (if it is set) 12429 * so it will get deleted below. 12430 */ 12431 desc->lun_error &= ~CTL_LUN_INJ_CONTINUOUS; 12432 break; 12433 } 12434 /* 12435 * By default, each error injection action is a one-shot 12436 */ 12437 if (desc->lun_error & CTL_LUN_INJ_CONTINUOUS) 12438 continue; 12439 12440 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links); 12441 12442 free(desc, M_CTL); 12443 } 12444 } 12445 12446 #ifdef CTL_IO_DELAY 12447 static void 12448 ctl_datamove_timer_wakeup(void *arg) 12449 { 12450 union ctl_io *io; 12451 12452 io = (union ctl_io *)arg; 12453 12454 ctl_datamove(io); 12455 } 12456 #endif /* CTL_IO_DELAY */ 12457 12458 void 12459 ctl_datamove(union ctl_io *io) 12460 { 12461 void (*fe_datamove)(union ctl_io *io); 12462 12463 mtx_assert(&((struct ctl_softc *)CTL_SOFTC(io))->ctl_lock, MA_NOTOWNED); 12464 12465 CTL_DEBUG_PRINT(("ctl_datamove\n")); 12466 12467 /* No data transferred yet. Frontend must update this when done. */ 12468 io->scsiio.kern_data_resid = io->scsiio.kern_data_len; 12469 12470 #ifdef CTL_TIME_IO 12471 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 12472 char str[256]; 12473 char path_str[64]; 12474 struct sbuf sb; 12475 12476 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 12477 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12478 12479 sbuf_cat(&sb, path_str); 12480 switch (io->io_hdr.io_type) { 12481 case CTL_IO_SCSI: 12482 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 12483 sbuf_printf(&sb, "\n"); 12484 sbuf_cat(&sb, path_str); 12485 sbuf_printf(&sb, "Tag: 0x%04x/%d, Prio: %d\n", 12486 io->scsiio.tag_num, io->scsiio.tag_type, 12487 io->scsiio.priority); 12488 break; 12489 case CTL_IO_TASK: 12490 sbuf_printf(&sb, "Task Action: %d Tag: 0x%04x/%d\n", 12491 io->taskio.task_action, 12492 io->taskio.tag_num, io->taskio.tag_type); 12493 break; 12494 default: 12495 panic("%s: Invalid CTL I/O type %d\n", 12496 __func__, io->io_hdr.io_type); 12497 } 12498 sbuf_cat(&sb, path_str); 12499 sbuf_printf(&sb, "ctl_datamove: %jd seconds\n", 12500 (intmax_t)time_uptime - io->io_hdr.start_time); 12501 sbuf_finish(&sb); 12502 printf("%s", sbuf_data(&sb)); 12503 } 12504 #endif /* CTL_TIME_IO */ 12505 12506 #ifdef CTL_IO_DELAY 12507 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 12508 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 12509 } else { 12510 struct ctl_lun *lun; 12511 12512 lun = CTL_LUN(io); 12513 if ((lun != NULL) 12514 && (lun->delay_info.datamove_delay > 0)) { 12515 callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1); 12516 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 12517 callout_reset(&io->io_hdr.delay_callout, 12518 lun->delay_info.datamove_delay * hz, 12519 ctl_datamove_timer_wakeup, io); 12520 if (lun->delay_info.datamove_type == 12521 CTL_DELAY_TYPE_ONESHOT) 12522 lun->delay_info.datamove_delay = 0; 12523 return; 12524 } 12525 } 12526 #endif 12527 12528 /* 12529 * This command has been aborted. Set the port status, so we fail 12530 * the data move. 12531 */ 12532 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12533 printf("ctl_datamove: tag 0x%04x on (%u:%u:%u) aborted\n", 12534 io->scsiio.tag_num, io->io_hdr.nexus.initid, 12535 io->io_hdr.nexus.targ_port, 12536 io->io_hdr.nexus.targ_lun); 12537 io->io_hdr.port_status = 31337; 12538 /* 12539 * Note that the backend, in this case, will get the 12540 * callback in its context. In other cases it may get 12541 * called in the frontend's interrupt thread context. 12542 */ 12543 io->scsiio.be_move_done(io); 12544 return; 12545 } 12546 12547 /* Don't confuse frontend with zero length data move. */ 12548 if (io->scsiio.kern_data_len == 0) { 12549 io->scsiio.be_move_done(io); 12550 return; 12551 } 12552 12553 fe_datamove = CTL_PORT(io)->fe_datamove; 12554 fe_datamove(io); 12555 } 12556 12557 static void 12558 ctl_send_datamove_done(union ctl_io *io, int have_lock) 12559 { 12560 union ctl_ha_msg msg; 12561 #ifdef CTL_TIME_IO 12562 struct bintime cur_bt; 12563 #endif 12564 12565 memset(&msg, 0, sizeof(msg)); 12566 msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 12567 msg.hdr.original_sc = io; 12568 msg.hdr.serializing_sc = io->io_hdr.remote_io; 12569 msg.hdr.nexus = io->io_hdr.nexus; 12570 msg.hdr.status = io->io_hdr.status; 12571 msg.scsi.kern_data_resid = io->scsiio.kern_data_resid; 12572 msg.scsi.tag_num = io->scsiio.tag_num; 12573 msg.scsi.tag_type = io->scsiio.tag_type; 12574 msg.scsi.scsi_status = io->scsiio.scsi_status; 12575 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 12576 io->scsiio.sense_len); 12577 msg.scsi.sense_len = io->scsiio.sense_len; 12578 msg.scsi.port_status = io->io_hdr.port_status; 12579 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12580 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12581 ctl_failover_io(io, /*have_lock*/ have_lock); 12582 return; 12583 } 12584 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 12585 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + 12586 msg.scsi.sense_len, M_WAITOK); 12587 12588 #ifdef CTL_TIME_IO 12589 getbinuptime(&cur_bt); 12590 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); 12591 bintime_add(&io->io_hdr.dma_bt, &cur_bt); 12592 #endif 12593 io->io_hdr.num_dmas++; 12594 } 12595 12596 /* 12597 * The DMA to the remote side is done, now we need to tell the other side 12598 * we're done so it can continue with its data movement. 12599 */ 12600 static void 12601 ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq) 12602 { 12603 union ctl_io *io; 12604 uint32_t i; 12605 12606 io = rq->context; 12607 12608 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12609 printf("%s: ISC DMA write failed with error %d", __func__, 12610 rq->ret); 12611 ctl_set_internal_failure(&io->scsiio, 12612 /*sks_valid*/ 1, 12613 /*retry_count*/ rq->ret); 12614 } 12615 12616 ctl_dt_req_free(rq); 12617 12618 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12619 free(CTL_LSGLT(io)[i].addr, M_CTL); 12620 free(CTL_RSGL(io), M_CTL); 12621 CTL_RSGL(io) = NULL; 12622 CTL_LSGL(io) = NULL; 12623 12624 /* 12625 * The data is in local and remote memory, so now we need to send 12626 * status (good or back) back to the other side. 12627 */ 12628 ctl_send_datamove_done(io, /*have_lock*/ 0); 12629 } 12630 12631 /* 12632 * We've moved the data from the host/controller into local memory. Now we 12633 * need to push it over to the remote controller's memory. 12634 */ 12635 static int 12636 ctl_datamove_remote_dm_write_cb(union ctl_io *io) 12637 { 12638 int retval; 12639 12640 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_WRITE, 12641 ctl_datamove_remote_write_cb); 12642 return (retval); 12643 } 12644 12645 static void 12646 ctl_datamove_remote_write(union ctl_io *io) 12647 { 12648 int retval; 12649 void (*fe_datamove)(union ctl_io *io); 12650 12651 /* 12652 * - Get the data from the host/HBA into local memory. 12653 * - DMA memory from the local controller to the remote controller. 12654 * - Send status back to the remote controller. 12655 */ 12656 12657 retval = ctl_datamove_remote_sgl_setup(io); 12658 if (retval != 0) 12659 return; 12660 12661 /* Switch the pointer over so the FETD knows what to do */ 12662 io->scsiio.kern_data_ptr = (uint8_t *)CTL_LSGL(io); 12663 12664 /* 12665 * Use a custom move done callback, since we need to send completion 12666 * back to the other controller, not to the backend on this side. 12667 */ 12668 io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb; 12669 12670 fe_datamove = CTL_PORT(io)->fe_datamove; 12671 fe_datamove(io); 12672 } 12673 12674 static int 12675 ctl_datamove_remote_dm_read_cb(union ctl_io *io) 12676 { 12677 uint32_t i; 12678 12679 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12680 free(CTL_LSGLT(io)[i].addr, M_CTL); 12681 free(CTL_RSGL(io), M_CTL); 12682 CTL_RSGL(io) = NULL; 12683 CTL_LSGL(io) = NULL; 12684 12685 /* 12686 * The read is done, now we need to send status (good or bad) back 12687 * to the other side. 12688 */ 12689 ctl_send_datamove_done(io, /*have_lock*/ 0); 12690 12691 return (0); 12692 } 12693 12694 static void 12695 ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq) 12696 { 12697 union ctl_io *io; 12698 void (*fe_datamove)(union ctl_io *io); 12699 12700 io = rq->context; 12701 12702 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12703 printf("%s: ISC DMA read failed with error %d\n", __func__, 12704 rq->ret); 12705 ctl_set_internal_failure(&io->scsiio, 12706 /*sks_valid*/ 1, 12707 /*retry_count*/ rq->ret); 12708 } 12709 12710 ctl_dt_req_free(rq); 12711 12712 /* Switch the pointer over so the FETD knows what to do */ 12713 io->scsiio.kern_data_ptr = (uint8_t *)CTL_LSGL(io); 12714 12715 /* 12716 * Use a custom move done callback, since we need to send completion 12717 * back to the other controller, not to the backend on this side. 12718 */ 12719 io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb; 12720 12721 /* XXX KDM add checks like the ones in ctl_datamove? */ 12722 12723 fe_datamove = CTL_PORT(io)->fe_datamove; 12724 fe_datamove(io); 12725 } 12726 12727 static int 12728 ctl_datamove_remote_sgl_setup(union ctl_io *io) 12729 { 12730 struct ctl_sg_entry *local_sglist; 12731 uint32_t len_to_go; 12732 int retval; 12733 int i; 12734 12735 retval = 0; 12736 local_sglist = CTL_LSGL(io); 12737 len_to_go = io->scsiio.kern_data_len; 12738 12739 /* 12740 * The difficult thing here is that the size of the various 12741 * S/G segments may be different than the size from the 12742 * remote controller. That'll make it harder when DMAing 12743 * the data back to the other side. 12744 */ 12745 for (i = 0; len_to_go > 0; i++) { 12746 local_sglist[i].len = MIN(len_to_go, CTL_HA_DATAMOVE_SEGMENT); 12747 local_sglist[i].addr = 12748 malloc(local_sglist[i].len, M_CTL, M_WAITOK); 12749 12750 len_to_go -= local_sglist[i].len; 12751 } 12752 /* 12753 * Reset the number of S/G entries accordingly. The original 12754 * number of S/G entries is available in rem_sg_entries. 12755 */ 12756 io->scsiio.kern_sg_entries = i; 12757 12758 return (retval); 12759 } 12760 12761 static int 12762 ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 12763 ctl_ha_dt_cb callback) 12764 { 12765 struct ctl_ha_dt_req *rq; 12766 struct ctl_sg_entry *remote_sglist, *local_sglist; 12767 uint32_t local_used, remote_used, total_used; 12768 int i, j, isc_ret; 12769 12770 rq = ctl_dt_req_alloc(); 12771 12772 /* 12773 * If we failed to allocate the request, and if the DMA didn't fail 12774 * anyway, set busy status. This is just a resource allocation 12775 * failure. 12776 */ 12777 if ((rq == NULL) 12778 && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 12779 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) 12780 ctl_set_busy(&io->scsiio); 12781 12782 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 12783 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) { 12784 if (rq != NULL) 12785 ctl_dt_req_free(rq); 12786 12787 /* 12788 * The data move failed. We need to return status back 12789 * to the other controller. No point in trying to DMA 12790 * data to the remote controller. 12791 */ 12792 12793 ctl_send_datamove_done(io, /*have_lock*/ 0); 12794 12795 return (1); 12796 } 12797 12798 local_sglist = CTL_LSGL(io); 12799 remote_sglist = CTL_RSGL(io); 12800 local_used = 0; 12801 remote_used = 0; 12802 total_used = 0; 12803 12804 /* 12805 * Pull/push the data over the wire from/to the other controller. 12806 * This takes into account the possibility that the local and 12807 * remote sglists may not be identical in terms of the size of 12808 * the elements and the number of elements. 12809 * 12810 * One fundamental assumption here is that the length allocated for 12811 * both the local and remote sglists is identical. Otherwise, we've 12812 * essentially got a coding error of some sort. 12813 */ 12814 isc_ret = CTL_HA_STATUS_SUCCESS; 12815 for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) { 12816 uint32_t cur_len; 12817 uint8_t *tmp_ptr; 12818 12819 rq->command = command; 12820 rq->context = io; 12821 12822 /* 12823 * Both pointers should be aligned. But it is possible 12824 * that the allocation length is not. They should both 12825 * also have enough slack left over at the end, though, 12826 * to round up to the next 8 byte boundary. 12827 */ 12828 cur_len = MIN(local_sglist[i].len - local_used, 12829 remote_sglist[j].len - remote_used); 12830 rq->size = cur_len; 12831 12832 tmp_ptr = (uint8_t *)local_sglist[i].addr; 12833 tmp_ptr += local_used; 12834 12835 #if 0 12836 /* Use physical addresses when talking to ISC hardware */ 12837 if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) { 12838 /* XXX KDM use busdma */ 12839 rq->local = vtophys(tmp_ptr); 12840 } else 12841 rq->local = tmp_ptr; 12842 #else 12843 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, 12844 ("HA does not support BUS_ADDR")); 12845 rq->local = tmp_ptr; 12846 #endif 12847 12848 tmp_ptr = (uint8_t *)remote_sglist[j].addr; 12849 tmp_ptr += remote_used; 12850 rq->remote = tmp_ptr; 12851 12852 rq->callback = NULL; 12853 12854 local_used += cur_len; 12855 if (local_used >= local_sglist[i].len) { 12856 i++; 12857 local_used = 0; 12858 } 12859 12860 remote_used += cur_len; 12861 if (remote_used >= remote_sglist[j].len) { 12862 j++; 12863 remote_used = 0; 12864 } 12865 total_used += cur_len; 12866 12867 if (total_used >= io->scsiio.kern_data_len) 12868 rq->callback = callback; 12869 12870 isc_ret = ctl_dt_single(rq); 12871 if (isc_ret > CTL_HA_STATUS_SUCCESS) 12872 break; 12873 } 12874 if (isc_ret != CTL_HA_STATUS_WAIT) { 12875 rq->ret = isc_ret; 12876 callback(rq); 12877 } 12878 12879 return (0); 12880 } 12881 12882 static void 12883 ctl_datamove_remote_read(union ctl_io *io) 12884 { 12885 int retval; 12886 uint32_t i; 12887 12888 /* 12889 * This will send an error to the other controller in the case of a 12890 * failure. 12891 */ 12892 retval = ctl_datamove_remote_sgl_setup(io); 12893 if (retval != 0) 12894 return; 12895 12896 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ, 12897 ctl_datamove_remote_read_cb); 12898 if (retval != 0) { 12899 /* 12900 * Make sure we free memory if there was an error.. The 12901 * ctl_datamove_remote_xfer() function will send the 12902 * datamove done message, or call the callback with an 12903 * error if there is a problem. 12904 */ 12905 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12906 free(CTL_LSGLT(io)[i].addr, M_CTL); 12907 free(CTL_RSGL(io), M_CTL); 12908 CTL_RSGL(io) = NULL; 12909 CTL_LSGL(io) = NULL; 12910 } 12911 } 12912 12913 /* 12914 * Process a datamove request from the other controller. This is used for 12915 * XFER mode only, not SER_ONLY mode. For writes, we DMA into local memory 12916 * first. Once that is complete, the data gets DMAed into the remote 12917 * controller's memory. For reads, we DMA from the remote controller's 12918 * memory into our memory first, and then move it out to the FETD. 12919 */ 12920 static void 12921 ctl_datamove_remote(union ctl_io *io) 12922 { 12923 12924 mtx_assert(&((struct ctl_softc *)CTL_SOFTC(io))->ctl_lock, MA_NOTOWNED); 12925 12926 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12927 ctl_failover_io(io, /*have_lock*/ 0); 12928 return; 12929 } 12930 12931 /* 12932 * Note that we look for an aborted I/O here, but don't do some of 12933 * the other checks that ctl_datamove() normally does. 12934 * We don't need to run the datamove delay code, since that should 12935 * have been done if need be on the other controller. 12936 */ 12937 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12938 printf("%s: tag 0x%04x on (%u:%u:%u) aborted\n", __func__, 12939 io->scsiio.tag_num, io->io_hdr.nexus.initid, 12940 io->io_hdr.nexus.targ_port, 12941 io->io_hdr.nexus.targ_lun); 12942 io->io_hdr.port_status = 31338; 12943 ctl_send_datamove_done(io, /*have_lock*/ 0); 12944 return; 12945 } 12946 12947 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) 12948 ctl_datamove_remote_write(io); 12949 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) 12950 ctl_datamove_remote_read(io); 12951 else { 12952 io->io_hdr.port_status = 31339; 12953 ctl_send_datamove_done(io, /*have_lock*/ 0); 12954 } 12955 } 12956 12957 static void 12958 ctl_process_done(union ctl_io *io) 12959 { 12960 struct ctl_softc *softc = CTL_SOFTC(io); 12961 struct ctl_port *port = CTL_PORT(io); 12962 struct ctl_lun *lun = CTL_LUN(io); 12963 void (*fe_done)(union ctl_io *io); 12964 union ctl_ha_msg msg; 12965 12966 CTL_DEBUG_PRINT(("ctl_process_done\n")); 12967 fe_done = port->fe_done; 12968 12969 #ifdef CTL_TIME_IO 12970 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 12971 char str[256]; 12972 char path_str[64]; 12973 struct sbuf sb; 12974 12975 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 12976 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12977 12978 sbuf_cat(&sb, path_str); 12979 switch (io->io_hdr.io_type) { 12980 case CTL_IO_SCSI: 12981 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 12982 sbuf_printf(&sb, "\n"); 12983 sbuf_cat(&sb, path_str); 12984 sbuf_printf(&sb, "Tag: 0x%04x/%d, Prio: %d\n", 12985 io->scsiio.tag_num, io->scsiio.tag_type, 12986 io->scsiio.priority); 12987 break; 12988 case CTL_IO_TASK: 12989 sbuf_printf(&sb, "Task Action: %d Tag: 0x%04x/%d\n", 12990 io->taskio.task_action, 12991 io->taskio.tag_num, io->taskio.tag_type); 12992 break; 12993 default: 12994 panic("%s: Invalid CTL I/O type %d\n", 12995 __func__, io->io_hdr.io_type); 12996 } 12997 sbuf_cat(&sb, path_str); 12998 sbuf_printf(&sb, "ctl_process_done: %jd seconds\n", 12999 (intmax_t)time_uptime - io->io_hdr.start_time); 13000 sbuf_finish(&sb); 13001 printf("%s", sbuf_data(&sb)); 13002 } 13003 #endif /* CTL_TIME_IO */ 13004 13005 switch (io->io_hdr.io_type) { 13006 case CTL_IO_SCSI: 13007 break; 13008 case CTL_IO_TASK: 13009 if (ctl_debug & CTL_DEBUG_INFO) 13010 ctl_io_error_print(io, NULL); 13011 fe_done(io); 13012 return; 13013 default: 13014 panic("%s: Invalid CTL I/O type %d\n", 13015 __func__, io->io_hdr.io_type); 13016 } 13017 13018 if (lun == NULL) { 13019 CTL_DEBUG_PRINT(("NULL LUN for lun %d\n", 13020 io->io_hdr.nexus.targ_mapped_lun)); 13021 goto bailout; 13022 } 13023 13024 mtx_lock(&lun->lun_lock); 13025 13026 /* 13027 * Check to see if we have any informational exception and status 13028 * of this command can be modified to report it in form of either 13029 * RECOVERED ERROR or NO SENSE, depending on MRIE mode page field. 13030 */ 13031 if (lun->ie_reported == 0 && lun->ie_asc != 0 && 13032 io->io_hdr.status == CTL_SUCCESS && 13033 (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0) { 13034 uint8_t mrie = lun->MODE_IE.mrie; 13035 uint8_t per = ((lun->MODE_RWER.byte3 & SMS_RWER_PER) || 13036 (lun->MODE_VER.byte3 & SMS_VER_PER)); 13037 if (((mrie == SIEP_MRIE_REC_COND && per) || 13038 mrie == SIEP_MRIE_REC_UNCOND || 13039 mrie == SIEP_MRIE_NO_SENSE) && 13040 (ctl_get_cmd_entry(&io->scsiio, NULL)->flags & 13041 CTL_CMD_FLAG_NO_SENSE) == 0) { 13042 ctl_set_sense(&io->scsiio, 13043 /*current_error*/ 1, 13044 /*sense_key*/ (mrie == SIEP_MRIE_NO_SENSE) ? 13045 SSD_KEY_NO_SENSE : SSD_KEY_RECOVERED_ERROR, 13046 /*asc*/ lun->ie_asc, 13047 /*ascq*/ lun->ie_ascq, 13048 SSD_ELEM_NONE); 13049 lun->ie_reported = 1; 13050 } 13051 } else if (lun->ie_reported < 0) 13052 lun->ie_reported = 0; 13053 13054 /* 13055 * Check to see if we have any errors to inject here. We only 13056 * inject errors for commands that don't already have errors set. 13057 */ 13058 if (!STAILQ_EMPTY(&lun->error_list) && 13059 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) && 13060 ((io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0)) 13061 ctl_inject_error(lun, io); 13062 13063 /* 13064 * XXX KDM how do we treat commands that aren't completed 13065 * successfully? 13066 * 13067 * XXX KDM should we also track I/O latency? 13068 */ 13069 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS && 13070 io->io_hdr.io_type == CTL_IO_SCSI) { 13071 int type; 13072 #ifdef CTL_TIME_IO 13073 struct bintime bt; 13074 13075 getbinuptime(&bt); 13076 bintime_sub(&bt, &io->io_hdr.start_bt); 13077 #endif 13078 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13079 CTL_FLAG_DATA_IN) 13080 type = CTL_STATS_READ; 13081 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13082 CTL_FLAG_DATA_OUT) 13083 type = CTL_STATS_WRITE; 13084 else 13085 type = CTL_STATS_NO_IO; 13086 13087 lun->stats.bytes[type] += io->scsiio.kern_total_len; 13088 lun->stats.operations[type] ++; 13089 lun->stats.dmas[type] += io->io_hdr.num_dmas; 13090 #ifdef CTL_TIME_IO 13091 bintime_add(&lun->stats.dma_time[type], &io->io_hdr.dma_bt); 13092 bintime_add(&lun->stats.time[type], &bt); 13093 #endif 13094 13095 mtx_lock(&port->port_lock); 13096 port->stats.bytes[type] += io->scsiio.kern_total_len; 13097 port->stats.operations[type] ++; 13098 port->stats.dmas[type] += io->io_hdr.num_dmas; 13099 #ifdef CTL_TIME_IO 13100 bintime_add(&port->stats.dma_time[type], &io->io_hdr.dma_bt); 13101 bintime_add(&port->stats.time[type], &bt); 13102 #endif 13103 mtx_unlock(&port->port_lock); 13104 } 13105 13106 /* 13107 * Run through the blocked queue of this I/O and see if anything 13108 * can be unblocked, now that this I/O is done and will be removed. 13109 * We need to do it before removal to have OOA position to start. 13110 */ 13111 ctl_try_unblock_others(lun, io, TRUE); 13112 13113 /* 13114 * Remove this from the OOA queue. 13115 */ 13116 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); 13117 #ifdef CTL_TIME_IO 13118 if (TAILQ_EMPTY(&lun->ooa_queue)) 13119 lun->last_busy = getsbinuptime(); 13120 #endif 13121 13122 /* 13123 * If the LUN has been invalidated, free it if there is nothing 13124 * left on its OOA queue. 13125 */ 13126 if ((lun->flags & CTL_LUN_INVALID) 13127 && TAILQ_EMPTY(&lun->ooa_queue)) { 13128 mtx_unlock(&lun->lun_lock); 13129 ctl_free_lun(lun); 13130 } else 13131 mtx_unlock(&lun->lun_lock); 13132 13133 bailout: 13134 13135 /* 13136 * If this command has been aborted, make sure we set the status 13137 * properly. The FETD is responsible for freeing the I/O and doing 13138 * whatever it needs to do to clean up its state. 13139 */ 13140 if (io->io_hdr.flags & CTL_FLAG_ABORT) 13141 ctl_set_task_aborted(&io->scsiio); 13142 13143 /* 13144 * If enabled, print command error status. 13145 */ 13146 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS && 13147 (ctl_debug & CTL_DEBUG_INFO) != 0) 13148 ctl_io_error_print(io, NULL); 13149 13150 /* 13151 * Tell the FETD or the other shelf controller we're done with this 13152 * command. Note that only SCSI commands get to this point. Task 13153 * management commands are completed above. 13154 */ 13155 if ((softc->ha_mode != CTL_HA_MODE_XFER) && 13156 (io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC)) { 13157 memset(&msg, 0, sizeof(msg)); 13158 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 13159 msg.hdr.serializing_sc = io->io_hdr.remote_io; 13160 msg.hdr.nexus = io->io_hdr.nexus; 13161 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13162 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data), 13163 M_WAITOK); 13164 } 13165 13166 fe_done(io); 13167 } 13168 13169 /* 13170 * Front end should call this if it doesn't do autosense. When the request 13171 * sense comes back in from the initiator, we'll dequeue this and send it. 13172 */ 13173 int 13174 ctl_queue_sense(union ctl_io *io) 13175 { 13176 struct ctl_softc *softc = CTL_SOFTC(io); 13177 struct ctl_port *port = CTL_PORT(io); 13178 struct ctl_lun *lun; 13179 struct scsi_sense_data *ps; 13180 uint32_t initidx, p, targ_lun; 13181 13182 CTL_DEBUG_PRINT(("ctl_queue_sense\n")); 13183 13184 targ_lun = ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); 13185 13186 /* 13187 * LUN lookup will likely move to the ctl_work_thread() once we 13188 * have our new queueing infrastructure (that doesn't put things on 13189 * a per-LUN queue initially). That is so that we can handle 13190 * things like an INQUIRY to a LUN that we don't have enabled. We 13191 * can't deal with that right now. 13192 * If we don't have a LUN for this, just toss the sense information. 13193 */ 13194 mtx_lock(&softc->ctl_lock); 13195 if (targ_lun >= ctl_max_luns || 13196 (lun = softc->ctl_luns[targ_lun]) == NULL) { 13197 mtx_unlock(&softc->ctl_lock); 13198 goto bailout; 13199 } 13200 mtx_lock(&lun->lun_lock); 13201 mtx_unlock(&softc->ctl_lock); 13202 13203 initidx = ctl_get_initindex(&io->io_hdr.nexus); 13204 p = initidx / CTL_MAX_INIT_PER_PORT; 13205 if (lun->pending_sense[p] == NULL) { 13206 lun->pending_sense[p] = malloc(sizeof(*ps) * CTL_MAX_INIT_PER_PORT, 13207 M_CTL, M_NOWAIT | M_ZERO); 13208 } 13209 if ((ps = lun->pending_sense[p]) != NULL) { 13210 ps += initidx % CTL_MAX_INIT_PER_PORT; 13211 memset(ps, 0, sizeof(*ps)); 13212 memcpy(ps, &io->scsiio.sense_data, io->scsiio.sense_len); 13213 } 13214 mtx_unlock(&lun->lun_lock); 13215 13216 bailout: 13217 ctl_free_io(io); 13218 return (CTL_RETVAL_COMPLETE); 13219 } 13220 13221 /* 13222 * Primary command inlet from frontend ports. All SCSI and task I/O 13223 * requests must go through this function. 13224 */ 13225 int 13226 ctl_queue(union ctl_io *io) 13227 { 13228 struct ctl_port *port = CTL_PORT(io); 13229 13230 CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0])); 13231 13232 #ifdef CTL_TIME_IO 13233 io->io_hdr.start_time = time_uptime; 13234 getbinuptime(&io->io_hdr.start_bt); 13235 #endif /* CTL_TIME_IO */ 13236 13237 /* Map FE-specific LUN ID into global one. */ 13238 io->io_hdr.nexus.targ_mapped_lun = 13239 ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); 13240 13241 switch (io->io_hdr.io_type) { 13242 case CTL_IO_SCSI: 13243 case CTL_IO_TASK: 13244 if (ctl_debug & CTL_DEBUG_CDB) 13245 ctl_io_print(io); 13246 ctl_enqueue_incoming(io); 13247 break; 13248 default: 13249 printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type); 13250 return (EINVAL); 13251 } 13252 13253 return (CTL_RETVAL_COMPLETE); 13254 } 13255 13256 #ifdef CTL_IO_DELAY 13257 static void 13258 ctl_done_timer_wakeup(void *arg) 13259 { 13260 union ctl_io *io; 13261 13262 io = (union ctl_io *)arg; 13263 ctl_done(io); 13264 } 13265 #endif /* CTL_IO_DELAY */ 13266 13267 void 13268 ctl_serseq_done(union ctl_io *io) 13269 { 13270 struct ctl_lun *lun = CTL_LUN(io); 13271 13272 if (lun->be_lun == NULL || 13273 lun->be_lun->serseq == CTL_LUN_SERSEQ_OFF) 13274 return; 13275 mtx_lock(&lun->lun_lock); 13276 io->io_hdr.flags |= CTL_FLAG_SERSEQ_DONE; 13277 ctl_try_unblock_others(lun, io, FALSE); 13278 mtx_unlock(&lun->lun_lock); 13279 } 13280 13281 void 13282 ctl_done(union ctl_io *io) 13283 { 13284 13285 /* 13286 * Enable this to catch duplicate completion issues. 13287 */ 13288 #if 0 13289 if (io->io_hdr.flags & CTL_FLAG_ALREADY_DONE) { 13290 printf("%s: type %d msg %d cdb %x iptl: " 13291 "%u:%u:%u tag 0x%04x " 13292 "flag %#x status %x\n", 13293 __func__, 13294 io->io_hdr.io_type, 13295 io->io_hdr.msg_type, 13296 io->scsiio.cdb[0], 13297 io->io_hdr.nexus.initid, 13298 io->io_hdr.nexus.targ_port, 13299 io->io_hdr.nexus.targ_lun, 13300 (io->io_hdr.io_type == 13301 CTL_IO_TASK) ? 13302 io->taskio.tag_num : 13303 io->scsiio.tag_num, 13304 io->io_hdr.flags, 13305 io->io_hdr.status); 13306 } else 13307 io->io_hdr.flags |= CTL_FLAG_ALREADY_DONE; 13308 #endif 13309 13310 /* 13311 * This is an internal copy of an I/O, and should not go through 13312 * the normal done processing logic. 13313 */ 13314 if (io->io_hdr.flags & CTL_FLAG_INT_COPY) 13315 return; 13316 13317 #ifdef CTL_IO_DELAY 13318 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 13319 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 13320 } else { 13321 struct ctl_lun *lun = CTL_LUN(io); 13322 13323 if ((lun != NULL) 13324 && (lun->delay_info.done_delay > 0)) { 13325 callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1); 13326 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 13327 callout_reset(&io->io_hdr.delay_callout, 13328 lun->delay_info.done_delay * hz, 13329 ctl_done_timer_wakeup, io); 13330 if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT) 13331 lun->delay_info.done_delay = 0; 13332 return; 13333 } 13334 } 13335 #endif /* CTL_IO_DELAY */ 13336 13337 ctl_enqueue_done(io); 13338 } 13339 13340 static void 13341 ctl_work_thread(void *arg) 13342 { 13343 struct ctl_thread *thr = (struct ctl_thread *)arg; 13344 struct ctl_softc *softc = thr->ctl_softc; 13345 union ctl_io *io; 13346 int retval; 13347 13348 CTL_DEBUG_PRINT(("ctl_work_thread starting\n")); 13349 thread_lock(curthread); 13350 sched_prio(curthread, PUSER - 1); 13351 thread_unlock(curthread); 13352 13353 while (!softc->shutdown) { 13354 /* 13355 * We handle the queues in this order: 13356 * - ISC 13357 * - done queue (to free up resources, unblock other commands) 13358 * - incoming queue 13359 * - RtR queue 13360 * 13361 * If those queues are empty, we break out of the loop and 13362 * go to sleep. 13363 */ 13364 mtx_lock(&thr->queue_lock); 13365 io = (union ctl_io *)STAILQ_FIRST(&thr->isc_queue); 13366 if (io != NULL) { 13367 STAILQ_REMOVE_HEAD(&thr->isc_queue, links); 13368 mtx_unlock(&thr->queue_lock); 13369 ctl_handle_isc(io); 13370 continue; 13371 } 13372 io = (union ctl_io *)STAILQ_FIRST(&thr->done_queue); 13373 if (io != NULL) { 13374 STAILQ_REMOVE_HEAD(&thr->done_queue, links); 13375 /* clear any blocked commands, call fe_done */ 13376 mtx_unlock(&thr->queue_lock); 13377 ctl_process_done(io); 13378 continue; 13379 } 13380 io = (union ctl_io *)STAILQ_FIRST(&thr->incoming_queue); 13381 if (io != NULL) { 13382 STAILQ_REMOVE_HEAD(&thr->incoming_queue, links); 13383 mtx_unlock(&thr->queue_lock); 13384 if (io->io_hdr.io_type == CTL_IO_TASK) 13385 ctl_run_task(io); 13386 else 13387 ctl_scsiio_precheck(softc, &io->scsiio); 13388 continue; 13389 } 13390 io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue); 13391 if (io != NULL) { 13392 STAILQ_REMOVE_HEAD(&thr->rtr_queue, links); 13393 mtx_unlock(&thr->queue_lock); 13394 retval = ctl_scsiio(&io->scsiio); 13395 if (retval != CTL_RETVAL_COMPLETE) 13396 CTL_DEBUG_PRINT(("ctl_scsiio failed\n")); 13397 continue; 13398 } 13399 13400 /* Sleep until we have something to do. */ 13401 mtx_sleep(thr, &thr->queue_lock, PDROP, "-", 0); 13402 } 13403 thr->thread = NULL; 13404 kthread_exit(); 13405 } 13406 13407 static void 13408 ctl_thresh_thread(void *arg) 13409 { 13410 struct ctl_softc *softc = (struct ctl_softc *)arg; 13411 struct ctl_lun *lun; 13412 struct ctl_logical_block_provisioning_page *page; 13413 const char *attr; 13414 union ctl_ha_msg msg; 13415 uint64_t thres, val; 13416 int i, e, set; 13417 13418 CTL_DEBUG_PRINT(("ctl_thresh_thread starting\n")); 13419 thread_lock(curthread); 13420 sched_prio(curthread, PUSER - 1); 13421 thread_unlock(curthread); 13422 13423 while (!softc->shutdown) { 13424 mtx_lock(&softc->ctl_lock); 13425 STAILQ_FOREACH(lun, &softc->lun_list, links) { 13426 if ((lun->flags & CTL_LUN_DISABLED) || 13427 (lun->flags & CTL_LUN_NO_MEDIA) || 13428 lun->backend->lun_attr == NULL) 13429 continue; 13430 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 13431 softc->ha_mode == CTL_HA_MODE_XFER) 13432 continue; 13433 if ((lun->MODE_RWER.byte8 & SMS_RWER_LBPERE) == 0) 13434 continue; 13435 e = 0; 13436 page = &lun->MODE_LBP; 13437 for (i = 0; i < CTL_NUM_LBP_THRESH; i++) { 13438 if ((page->descr[i].flags & SLBPPD_ENABLED) == 0) 13439 continue; 13440 thres = scsi_4btoul(page->descr[i].count); 13441 thres <<= CTL_LBP_EXPONENT; 13442 switch (page->descr[i].resource) { 13443 case 0x01: 13444 attr = "blocksavail"; 13445 break; 13446 case 0x02: 13447 attr = "blocksused"; 13448 break; 13449 case 0xf1: 13450 attr = "poolblocksavail"; 13451 break; 13452 case 0xf2: 13453 attr = "poolblocksused"; 13454 break; 13455 default: 13456 continue; 13457 } 13458 mtx_unlock(&softc->ctl_lock); // XXX 13459 val = lun->backend->lun_attr(lun->be_lun, attr); 13460 mtx_lock(&softc->ctl_lock); 13461 if (val == UINT64_MAX) 13462 continue; 13463 if ((page->descr[i].flags & SLBPPD_ARMING_MASK) 13464 == SLBPPD_ARMING_INC) 13465 e = (val >= thres); 13466 else 13467 e = (val <= thres); 13468 if (e) 13469 break; 13470 } 13471 mtx_lock(&lun->lun_lock); 13472 if (e) { 13473 scsi_u64to8b((uint8_t *)&page->descr[i] - 13474 (uint8_t *)page, lun->ua_tpt_info); 13475 if (lun->lasttpt == 0 || 13476 time_uptime - lun->lasttpt >= CTL_LBP_UA_PERIOD) { 13477 lun->lasttpt = time_uptime; 13478 ctl_est_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 13479 set = 1; 13480 } else 13481 set = 0; 13482 } else { 13483 lun->lasttpt = 0; 13484 ctl_clr_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 13485 set = -1; 13486 } 13487 mtx_unlock(&lun->lun_lock); 13488 if (set != 0 && 13489 lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 13490 /* Send msg to other side. */ 13491 bzero(&msg.ua, sizeof(msg.ua)); 13492 msg.hdr.msg_type = CTL_MSG_UA; 13493 msg.hdr.nexus.initid = -1; 13494 msg.hdr.nexus.targ_port = -1; 13495 msg.hdr.nexus.targ_lun = lun->lun; 13496 msg.hdr.nexus.targ_mapped_lun = lun->lun; 13497 msg.ua.ua_all = 1; 13498 msg.ua.ua_set = (set > 0); 13499 msg.ua.ua_type = CTL_UA_THIN_PROV_THRES; 13500 memcpy(msg.ua.ua_info, lun->ua_tpt_info, 8); 13501 mtx_unlock(&softc->ctl_lock); // XXX 13502 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13503 sizeof(msg.ua), M_WAITOK); 13504 mtx_lock(&softc->ctl_lock); 13505 } 13506 } 13507 mtx_sleep(&softc->thresh_thread, &softc->ctl_lock, 13508 PDROP, "-", CTL_LBP_PERIOD * hz); 13509 } 13510 softc->thresh_thread = NULL; 13511 kthread_exit(); 13512 } 13513 13514 static void 13515 ctl_enqueue_incoming(union ctl_io *io) 13516 { 13517 struct ctl_softc *softc = CTL_SOFTC(io); 13518 struct ctl_thread *thr; 13519 u_int idx; 13520 13521 idx = (io->io_hdr.nexus.targ_port * 127 + 13522 io->io_hdr.nexus.initid) % worker_threads; 13523 thr = &softc->threads[idx]; 13524 mtx_lock(&thr->queue_lock); 13525 STAILQ_INSERT_TAIL(&thr->incoming_queue, &io->io_hdr, links); 13526 mtx_unlock(&thr->queue_lock); 13527 wakeup(thr); 13528 } 13529 13530 static void 13531 ctl_enqueue_rtr(union ctl_io *io) 13532 { 13533 struct ctl_softc *softc = CTL_SOFTC(io); 13534 struct ctl_thread *thr; 13535 13536 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13537 mtx_lock(&thr->queue_lock); 13538 STAILQ_INSERT_TAIL(&thr->rtr_queue, &io->io_hdr, links); 13539 mtx_unlock(&thr->queue_lock); 13540 wakeup(thr); 13541 } 13542 13543 static void 13544 ctl_enqueue_done(union ctl_io *io) 13545 { 13546 struct ctl_softc *softc = CTL_SOFTC(io); 13547 struct ctl_thread *thr; 13548 13549 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13550 mtx_lock(&thr->queue_lock); 13551 STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links); 13552 mtx_unlock(&thr->queue_lock); 13553 wakeup(thr); 13554 } 13555 13556 static void 13557 ctl_enqueue_isc(union ctl_io *io) 13558 { 13559 struct ctl_softc *softc = CTL_SOFTC(io); 13560 struct ctl_thread *thr; 13561 13562 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13563 mtx_lock(&thr->queue_lock); 13564 STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links); 13565 mtx_unlock(&thr->queue_lock); 13566 wakeup(thr); 13567 } 13568 13569 /* 13570 * vim: ts=8 13571 */ 13572