1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2003-2009 Silicon Graphics International Corp. 5 * Copyright (c) 2012 The FreeBSD Foundation 6 * Copyright (c) 2014-2017 Alexander Motin <mav@FreeBSD.org> 7 * Copyright (c) 2017 Jakub Wojciech Klama <jceel@FreeBSD.org> 8 * Copyright (c) 2018 Marcelo Araujo <araujo@FreeBSD.org> 9 * All rights reserved. 10 * 11 * Portions of this software were developed by Edward Tomasz Napierala 12 * under sponsorship from the FreeBSD Foundation. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions, and the following disclaimer, 19 * without modification. 20 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 21 * substantially similar to the "NO WARRANTY" disclaimer below 22 * ("Disclaimer") and any redistribution must be conditioned upon 23 * including a substantially similar Disclaimer requirement for further 24 * binary redistribution. 25 * 26 * NO WARRANTY 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 31 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 35 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGES. 38 * 39 * $Id$ 40 */ 41 /* 42 * CAM Target Layer, a SCSI device emulation subsystem. 43 * 44 * Author: Ken Merry <ken@FreeBSD.org> 45 */ 46 47 #include <sys/cdefs.h> 48 __FBSDID("$FreeBSD$"); 49 50 #include <sys/param.h> 51 #include <sys/systm.h> 52 #include <sys/ctype.h> 53 #include <sys/kernel.h> 54 #include <sys/types.h> 55 #include <sys/kthread.h> 56 #include <sys/bio.h> 57 #include <sys/fcntl.h> 58 #include <sys/lock.h> 59 #include <sys/module.h> 60 #include <sys/mutex.h> 61 #include <sys/condvar.h> 62 #include <sys/malloc.h> 63 #include <sys/conf.h> 64 #include <sys/ioccom.h> 65 #include <sys/queue.h> 66 #include <sys/sbuf.h> 67 #include <sys/smp.h> 68 #include <sys/endian.h> 69 #include <sys/proc.h> 70 #include <sys/sched.h> 71 #include <sys/sysctl.h> 72 #include <sys/nv.h> 73 #include <sys/dnv.h> 74 #include <vm/uma.h> 75 76 #include <cam/cam.h> 77 #include <cam/scsi/scsi_all.h> 78 #include <cam/scsi/scsi_cd.h> 79 #include <cam/scsi/scsi_da.h> 80 #include <cam/ctl/ctl_io.h> 81 #include <cam/ctl/ctl.h> 82 #include <cam/ctl/ctl_frontend.h> 83 #include <cam/ctl/ctl_util.h> 84 #include <cam/ctl/ctl_backend.h> 85 #include <cam/ctl/ctl_ioctl.h> 86 #include <cam/ctl/ctl_ha.h> 87 #include <cam/ctl/ctl_private.h> 88 #include <cam/ctl/ctl_debug.h> 89 #include <cam/ctl/ctl_scsi_all.h> 90 #include <cam/ctl/ctl_error.h> 91 92 struct ctl_softc *control_softc = NULL; 93 94 /* 95 * Template mode pages. 96 */ 97 98 /* 99 * Note that these are default values only. The actual values will be 100 * filled in when the user does a mode sense. 101 */ 102 const static struct scsi_da_rw_recovery_page rw_er_page_default = { 103 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 104 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 105 /*byte3*/SMS_RWER_AWRE|SMS_RWER_ARRE, 106 /*read_retry_count*/0, 107 /*correction_span*/0, 108 /*head_offset_count*/0, 109 /*data_strobe_offset_cnt*/0, 110 /*byte8*/SMS_RWER_LBPERE, 111 /*write_retry_count*/0, 112 /*reserved2*/0, 113 /*recovery_time_limit*/{0, 0}, 114 }; 115 116 const static struct scsi_da_rw_recovery_page rw_er_page_changeable = { 117 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 118 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 119 /*byte3*/SMS_RWER_PER, 120 /*read_retry_count*/0, 121 /*correction_span*/0, 122 /*head_offset_count*/0, 123 /*data_strobe_offset_cnt*/0, 124 /*byte8*/SMS_RWER_LBPERE, 125 /*write_retry_count*/0, 126 /*reserved2*/0, 127 /*recovery_time_limit*/{0, 0}, 128 }; 129 130 const static struct scsi_format_page format_page_default = { 131 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 132 /*page_length*/sizeof(struct scsi_format_page) - 2, 133 /*tracks_per_zone*/ {0, 0}, 134 /*alt_sectors_per_zone*/ {0, 0}, 135 /*alt_tracks_per_zone*/ {0, 0}, 136 /*alt_tracks_per_lun*/ {0, 0}, 137 /*sectors_per_track*/ {(CTL_DEFAULT_SECTORS_PER_TRACK >> 8) & 0xff, 138 CTL_DEFAULT_SECTORS_PER_TRACK & 0xff}, 139 /*bytes_per_sector*/ {0, 0}, 140 /*interleave*/ {0, 0}, 141 /*track_skew*/ {0, 0}, 142 /*cylinder_skew*/ {0, 0}, 143 /*flags*/ SFP_HSEC, 144 /*reserved*/ {0, 0, 0} 145 }; 146 147 const static struct scsi_format_page format_page_changeable = { 148 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 149 /*page_length*/sizeof(struct scsi_format_page) - 2, 150 /*tracks_per_zone*/ {0, 0}, 151 /*alt_sectors_per_zone*/ {0, 0}, 152 /*alt_tracks_per_zone*/ {0, 0}, 153 /*alt_tracks_per_lun*/ {0, 0}, 154 /*sectors_per_track*/ {0, 0}, 155 /*bytes_per_sector*/ {0, 0}, 156 /*interleave*/ {0, 0}, 157 /*track_skew*/ {0, 0}, 158 /*cylinder_skew*/ {0, 0}, 159 /*flags*/ 0, 160 /*reserved*/ {0, 0, 0} 161 }; 162 163 const static struct scsi_rigid_disk_page rigid_disk_page_default = { 164 /*page_code*/SMS_RIGID_DISK_PAGE, 165 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 166 /*cylinders*/ {0, 0, 0}, 167 /*heads*/ CTL_DEFAULT_HEADS, 168 /*start_write_precomp*/ {0, 0, 0}, 169 /*start_reduced_current*/ {0, 0, 0}, 170 /*step_rate*/ {0, 0}, 171 /*landing_zone_cylinder*/ {0, 0, 0}, 172 /*rpl*/ SRDP_RPL_DISABLED, 173 /*rotational_offset*/ 0, 174 /*reserved1*/ 0, 175 /*rotation_rate*/ {(CTL_DEFAULT_ROTATION_RATE >> 8) & 0xff, 176 CTL_DEFAULT_ROTATION_RATE & 0xff}, 177 /*reserved2*/ {0, 0} 178 }; 179 180 const static struct scsi_rigid_disk_page rigid_disk_page_changeable = { 181 /*page_code*/SMS_RIGID_DISK_PAGE, 182 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 183 /*cylinders*/ {0, 0, 0}, 184 /*heads*/ 0, 185 /*start_write_precomp*/ {0, 0, 0}, 186 /*start_reduced_current*/ {0, 0, 0}, 187 /*step_rate*/ {0, 0}, 188 /*landing_zone_cylinder*/ {0, 0, 0}, 189 /*rpl*/ 0, 190 /*rotational_offset*/ 0, 191 /*reserved1*/ 0, 192 /*rotation_rate*/ {0, 0}, 193 /*reserved2*/ {0, 0} 194 }; 195 196 const static struct scsi_da_verify_recovery_page verify_er_page_default = { 197 /*page_code*/SMS_VERIFY_ERROR_RECOVERY_PAGE, 198 /*page_length*/sizeof(struct scsi_da_verify_recovery_page) - 2, 199 /*byte3*/0, 200 /*read_retry_count*/0, 201 /*reserved*/{ 0, 0, 0, 0, 0, 0 }, 202 /*recovery_time_limit*/{0, 0}, 203 }; 204 205 const static struct scsi_da_verify_recovery_page verify_er_page_changeable = { 206 /*page_code*/SMS_VERIFY_ERROR_RECOVERY_PAGE, 207 /*page_length*/sizeof(struct scsi_da_verify_recovery_page) - 2, 208 /*byte3*/SMS_VER_PER, 209 /*read_retry_count*/0, 210 /*reserved*/{ 0, 0, 0, 0, 0, 0 }, 211 /*recovery_time_limit*/{0, 0}, 212 }; 213 214 const static struct scsi_caching_page caching_page_default = { 215 /*page_code*/SMS_CACHING_PAGE, 216 /*page_length*/sizeof(struct scsi_caching_page) - 2, 217 /*flags1*/ SCP_DISC | SCP_WCE, 218 /*ret_priority*/ 0, 219 /*disable_pf_transfer_len*/ {0xff, 0xff}, 220 /*min_prefetch*/ {0, 0}, 221 /*max_prefetch*/ {0xff, 0xff}, 222 /*max_pf_ceiling*/ {0xff, 0xff}, 223 /*flags2*/ 0, 224 /*cache_segments*/ 0, 225 /*cache_seg_size*/ {0, 0}, 226 /*reserved*/ 0, 227 /*non_cache_seg_size*/ {0, 0, 0} 228 }; 229 230 const static struct scsi_caching_page caching_page_changeable = { 231 /*page_code*/SMS_CACHING_PAGE, 232 /*page_length*/sizeof(struct scsi_caching_page) - 2, 233 /*flags1*/ SCP_WCE | SCP_RCD, 234 /*ret_priority*/ 0, 235 /*disable_pf_transfer_len*/ {0, 0}, 236 /*min_prefetch*/ {0, 0}, 237 /*max_prefetch*/ {0, 0}, 238 /*max_pf_ceiling*/ {0, 0}, 239 /*flags2*/ 0, 240 /*cache_segments*/ 0, 241 /*cache_seg_size*/ {0, 0}, 242 /*reserved*/ 0, 243 /*non_cache_seg_size*/ {0, 0, 0} 244 }; 245 246 const static struct scsi_control_page control_page_default = { 247 /*page_code*/SMS_CONTROL_MODE_PAGE, 248 /*page_length*/sizeof(struct scsi_control_page) - 2, 249 /*rlec*/0, 250 /*queue_flags*/SCP_QUEUE_ALG_RESTRICTED, 251 /*eca_and_aen*/0, 252 /*flags4*/SCP_TAS, 253 /*aen_holdoff_period*/{0, 0}, 254 /*busy_timeout_period*/{0, 0}, 255 /*extended_selftest_completion_time*/{0, 0} 256 }; 257 258 const static struct scsi_control_page control_page_changeable = { 259 /*page_code*/SMS_CONTROL_MODE_PAGE, 260 /*page_length*/sizeof(struct scsi_control_page) - 2, 261 /*rlec*/SCP_DSENSE, 262 /*queue_flags*/SCP_QUEUE_ALG_MASK | SCP_NUAR, 263 /*eca_and_aen*/SCP_SWP, 264 /*flags4*/0, 265 /*aen_holdoff_period*/{0, 0}, 266 /*busy_timeout_period*/{0, 0}, 267 /*extended_selftest_completion_time*/{0, 0} 268 }; 269 270 #define CTL_CEM_LEN (sizeof(struct scsi_control_ext_page) - 4) 271 272 const static struct scsi_control_ext_page control_ext_page_default = { 273 /*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF, 274 /*subpage_code*/0x01, 275 /*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN}, 276 /*flags*/0, 277 /*prio*/0, 278 /*max_sense*/0 279 }; 280 281 const static struct scsi_control_ext_page control_ext_page_changeable = { 282 /*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF, 283 /*subpage_code*/0x01, 284 /*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN}, 285 /*flags*/0, 286 /*prio*/0, 287 /*max_sense*/0xff 288 }; 289 290 const static struct scsi_info_exceptions_page ie_page_default = { 291 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 292 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 293 /*info_flags*/SIEP_FLAGS_EWASC, 294 /*mrie*/SIEP_MRIE_NO, 295 /*interval_timer*/{0, 0, 0, 0}, 296 /*report_count*/{0, 0, 0, 1} 297 }; 298 299 const static struct scsi_info_exceptions_page ie_page_changeable = { 300 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 301 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 302 /*info_flags*/SIEP_FLAGS_EWASC | SIEP_FLAGS_DEXCPT | SIEP_FLAGS_TEST | 303 SIEP_FLAGS_LOGERR, 304 /*mrie*/0x0f, 305 /*interval_timer*/{0xff, 0xff, 0xff, 0xff}, 306 /*report_count*/{0xff, 0xff, 0xff, 0xff} 307 }; 308 309 #define CTL_LBPM_LEN (sizeof(struct ctl_logical_block_provisioning_page) - 4) 310 311 const static struct ctl_logical_block_provisioning_page lbp_page_default = {{ 312 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 313 /*subpage_code*/0x02, 314 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 315 /*flags*/0, 316 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 317 /*descr*/{}}, 318 {{/*flags*/0, 319 /*resource*/0x01, 320 /*reserved*/{0, 0}, 321 /*count*/{0, 0, 0, 0}}, 322 {/*flags*/0, 323 /*resource*/0x02, 324 /*reserved*/{0, 0}, 325 /*count*/{0, 0, 0, 0}}, 326 {/*flags*/0, 327 /*resource*/0xf1, 328 /*reserved*/{0, 0}, 329 /*count*/{0, 0, 0, 0}}, 330 {/*flags*/0, 331 /*resource*/0xf2, 332 /*reserved*/{0, 0}, 333 /*count*/{0, 0, 0, 0}} 334 } 335 }; 336 337 const static struct ctl_logical_block_provisioning_page lbp_page_changeable = {{ 338 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 339 /*subpage_code*/0x02, 340 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 341 /*flags*/SLBPP_SITUA, 342 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 343 /*descr*/{}}, 344 {{/*flags*/0, 345 /*resource*/0, 346 /*reserved*/{0, 0}, 347 /*count*/{0, 0, 0, 0}}, 348 {/*flags*/0, 349 /*resource*/0, 350 /*reserved*/{0, 0}, 351 /*count*/{0, 0, 0, 0}}, 352 {/*flags*/0, 353 /*resource*/0, 354 /*reserved*/{0, 0}, 355 /*count*/{0, 0, 0, 0}}, 356 {/*flags*/0, 357 /*resource*/0, 358 /*reserved*/{0, 0}, 359 /*count*/{0, 0, 0, 0}} 360 } 361 }; 362 363 const static struct scsi_cddvd_capabilities_page cddvd_page_default = { 364 /*page_code*/SMS_CDDVD_CAPS_PAGE, 365 /*page_length*/sizeof(struct scsi_cddvd_capabilities_page) - 2, 366 /*caps1*/0x3f, 367 /*caps2*/0x00, 368 /*caps3*/0xf0, 369 /*caps4*/0x00, 370 /*caps5*/0x29, 371 /*caps6*/0x00, 372 /*obsolete*/{0, 0}, 373 /*nvol_levels*/{0, 0}, 374 /*buffer_size*/{8, 0}, 375 /*obsolete2*/{0, 0}, 376 /*reserved*/0, 377 /*digital*/0, 378 /*obsolete3*/0, 379 /*copy_management*/0, 380 /*reserved2*/0, 381 /*rotation_control*/0, 382 /*cur_write_speed*/0, 383 /*num_speed_descr*/0, 384 }; 385 386 const static struct scsi_cddvd_capabilities_page cddvd_page_changeable = { 387 /*page_code*/SMS_CDDVD_CAPS_PAGE, 388 /*page_length*/sizeof(struct scsi_cddvd_capabilities_page) - 2, 389 /*caps1*/0, 390 /*caps2*/0, 391 /*caps3*/0, 392 /*caps4*/0, 393 /*caps5*/0, 394 /*caps6*/0, 395 /*obsolete*/{0, 0}, 396 /*nvol_levels*/{0, 0}, 397 /*buffer_size*/{0, 0}, 398 /*obsolete2*/{0, 0}, 399 /*reserved*/0, 400 /*digital*/0, 401 /*obsolete3*/0, 402 /*copy_management*/0, 403 /*reserved2*/0, 404 /*rotation_control*/0, 405 /*cur_write_speed*/0, 406 /*num_speed_descr*/0, 407 }; 408 409 SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 410 "CAM Target Layer"); 411 static int worker_threads = -1; 412 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN, 413 &worker_threads, 1, "Number of worker threads"); 414 static int ctl_debug = CTL_DEBUG_NONE; 415 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, debug, CTLFLAG_RWTUN, 416 &ctl_debug, 0, "Enabled debug flags"); 417 static int ctl_lun_map_size = 1024; 418 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, lun_map_size, CTLFLAG_RWTUN, 419 &ctl_lun_map_size, 0, "Size of per-port LUN map (max LUN + 1)"); 420 #ifdef CTL_TIME_IO 421 static int ctl_time_io_secs = CTL_TIME_IO_DEFAULT_SECS; 422 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, time_io_secs, CTLFLAG_RWTUN, 423 &ctl_time_io_secs, 0, "Log requests taking more seconds"); 424 #endif 425 426 /* 427 * Maximum number of LUNs we support. MUST be a power of 2. 428 */ 429 #define CTL_DEFAULT_MAX_LUNS 1024 430 static int ctl_max_luns = CTL_DEFAULT_MAX_LUNS; 431 TUNABLE_INT("kern.cam.ctl.max_luns", &ctl_max_luns); 432 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, max_luns, CTLFLAG_RDTUN, 433 &ctl_max_luns, CTL_DEFAULT_MAX_LUNS, "Maximum number of LUNs"); 434 435 /* 436 * Maximum number of ports registered at one time. 437 */ 438 #define CTL_DEFAULT_MAX_PORTS 256 439 static int ctl_max_ports = CTL_DEFAULT_MAX_PORTS; 440 TUNABLE_INT("kern.cam.ctl.max_ports", &ctl_max_ports); 441 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, max_ports, CTLFLAG_RDTUN, 442 &ctl_max_ports, CTL_DEFAULT_MAX_LUNS, "Maximum number of ports"); 443 444 /* 445 * Maximum number of initiators we support. 446 */ 447 #define CTL_MAX_INITIATORS (CTL_MAX_INIT_PER_PORT * ctl_max_ports) 448 449 /* 450 * Supported pages (0x00), Serial number (0x80), Device ID (0x83), 451 * Extended INQUIRY Data (0x86), Mode Page Policy (0x87), 452 * SCSI Ports (0x88), Third-party Copy (0x8F), SCSI Feature Sets (0x92), 453 * Block limits (0xB0), Block Device Characteristics (0xB1) and 454 * Logical Block Provisioning (0xB2) 455 */ 456 #define SCSI_EVPD_NUM_SUPPORTED_PAGES 11 457 458 static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event, 459 int param); 460 static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest); 461 static void ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest); 462 static int ctl_init(void); 463 static int ctl_shutdown(void); 464 static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td); 465 static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td); 466 static void ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio); 467 static void ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 468 struct ctl_ooa *ooa_hdr, 469 struct ctl_ooa_entry *kern_entries); 470 static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 471 struct thread *td); 472 static int ctl_enable_lun(struct ctl_lun *lun); 473 static int ctl_disable_lun(struct ctl_lun *lun); 474 static int ctl_free_lun(struct ctl_lun *lun); 475 476 static int ctl_do_mode_select(union ctl_io *io); 477 static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, 478 uint64_t res_key, uint64_t sa_res_key, 479 uint8_t type, uint32_t residx, 480 struct ctl_scsiio *ctsio, 481 struct scsi_per_res_out *cdb, 482 struct scsi_per_res_out_parms* param); 483 static void ctl_pro_preempt_other(struct ctl_lun *lun, 484 union ctl_ha_msg *msg); 485 static void ctl_hndl_per_res_out_on_other_sc(union ctl_io *io); 486 static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len); 487 static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len); 488 static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len); 489 static int ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len); 490 static int ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len); 491 static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, 492 int alloc_len); 493 static int ctl_inquiry_evpd_sfs(struct ctl_scsiio *ctsio, int alloc_len); 494 static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, 495 int alloc_len); 496 static int ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len); 497 static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len); 498 static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio); 499 static int ctl_inquiry_std(struct ctl_scsiio *ctsio); 500 static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len); 501 static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2, 502 bool seq); 503 static ctl_action ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2); 504 static ctl_action ctl_check_for_blockage(struct ctl_lun *lun, 505 union ctl_io *pending_io, union ctl_io *ooa_io); 506 static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 507 union ctl_io **starting_io); 508 static void ctl_try_unblock_io(struct ctl_lun *lun, union ctl_io *io, 509 bool skip); 510 static void ctl_try_unblock_others(struct ctl_lun *lun, union ctl_io *io, 511 bool skip); 512 static int ctl_scsiio_lun_check(struct ctl_lun *lun, 513 const struct ctl_cmd_entry *entry, 514 struct ctl_scsiio *ctsio); 515 static void ctl_failover_lun(union ctl_io *io); 516 static int ctl_scsiio_precheck(struct ctl_softc *ctl_softc, 517 struct ctl_scsiio *ctsio); 518 static int ctl_scsiio(struct ctl_scsiio *ctsio); 519 520 static int ctl_target_reset(union ctl_io *io); 521 static void ctl_do_lun_reset(struct ctl_lun *lun, uint32_t initidx, 522 ctl_ua_type ua_type); 523 static int ctl_lun_reset(union ctl_io *io); 524 static int ctl_abort_task(union ctl_io *io); 525 static int ctl_abort_task_set(union ctl_io *io); 526 static int ctl_query_task(union ctl_io *io, int task_set); 527 static void ctl_i_t_nexus_loss(struct ctl_softc *softc, uint32_t initidx, 528 ctl_ua_type ua_type); 529 static int ctl_i_t_nexus_reset(union ctl_io *io); 530 static int ctl_query_async_event(union ctl_io *io); 531 static void ctl_run_task(union ctl_io *io); 532 #ifdef CTL_IO_DELAY 533 static void ctl_datamove_timer_wakeup(void *arg); 534 static void ctl_done_timer_wakeup(void *arg); 535 #endif /* CTL_IO_DELAY */ 536 537 static void ctl_send_datamove_done(union ctl_io *io, int have_lock); 538 static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq); 539 static int ctl_datamove_remote_dm_write_cb(union ctl_io *io); 540 static void ctl_datamove_remote_write(union ctl_io *io); 541 static int ctl_datamove_remote_dm_read_cb(union ctl_io *io); 542 static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq); 543 static int ctl_datamove_remote_sgl_setup(union ctl_io *io); 544 static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 545 ctl_ha_dt_cb callback); 546 static void ctl_datamove_remote_read(union ctl_io *io); 547 static void ctl_datamove_remote(union ctl_io *io); 548 static void ctl_process_done(union ctl_io *io); 549 static void ctl_thresh_thread(void *arg); 550 static void ctl_work_thread(void *arg); 551 static void ctl_enqueue_incoming(union ctl_io *io); 552 static void ctl_enqueue_rtr(union ctl_io *io); 553 static void ctl_enqueue_done(union ctl_io *io); 554 static void ctl_enqueue_isc(union ctl_io *io); 555 static const struct ctl_cmd_entry * 556 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa); 557 static const struct ctl_cmd_entry * 558 ctl_validate_command(struct ctl_scsiio *ctsio); 559 static int ctl_cmd_applicable(uint8_t lun_type, 560 const struct ctl_cmd_entry *entry); 561 static int ctl_ha_init(void); 562 static int ctl_ha_shutdown(void); 563 564 static uint64_t ctl_get_prkey(struct ctl_lun *lun, uint32_t residx); 565 static void ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx); 566 static void ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx); 567 static void ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key); 568 569 /* 570 * Load the serialization table. This isn't very pretty, but is probably 571 * the easiest way to do it. 572 */ 573 #include "ctl_ser_table.c" 574 575 /* 576 * We only need to define open, close and ioctl routines for this driver. 577 */ 578 static struct cdevsw ctl_cdevsw = { 579 .d_version = D_VERSION, 580 .d_flags = 0, 581 .d_open = ctl_open, 582 .d_close = ctl_close, 583 .d_ioctl = ctl_ioctl, 584 .d_name = "ctl", 585 }; 586 587 MALLOC_DEFINE(M_CTL, "ctlmem", "Memory used for CTL"); 588 589 static int ctl_module_event_handler(module_t, int /*modeventtype_t*/, void *); 590 591 static moduledata_t ctl_moduledata = { 592 "ctl", 593 ctl_module_event_handler, 594 NULL 595 }; 596 597 DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD); 598 MODULE_VERSION(ctl, 1); 599 600 static struct ctl_frontend ha_frontend = 601 { 602 .name = "ha", 603 .init = ctl_ha_init, 604 .shutdown = ctl_ha_shutdown, 605 }; 606 607 static int 608 ctl_ha_init(void) 609 { 610 struct ctl_softc *softc = control_softc; 611 612 if (ctl_pool_create(softc, "othersc", CTL_POOL_ENTRIES_OTHER_SC, 613 &softc->othersc_pool) != 0) 614 return (ENOMEM); 615 if (ctl_ha_msg_init(softc) != CTL_HA_STATUS_SUCCESS) { 616 ctl_pool_free(softc->othersc_pool); 617 return (EIO); 618 } 619 if (ctl_ha_msg_register(CTL_HA_CHAN_CTL, ctl_isc_event_handler) 620 != CTL_HA_STATUS_SUCCESS) { 621 ctl_ha_msg_destroy(softc); 622 ctl_pool_free(softc->othersc_pool); 623 return (EIO); 624 } 625 return (0); 626 }; 627 628 static int 629 ctl_ha_shutdown(void) 630 { 631 struct ctl_softc *softc = control_softc; 632 struct ctl_port *port; 633 634 ctl_ha_msg_shutdown(softc); 635 if (ctl_ha_msg_deregister(CTL_HA_CHAN_CTL) != CTL_HA_STATUS_SUCCESS) 636 return (EIO); 637 if (ctl_ha_msg_destroy(softc) != CTL_HA_STATUS_SUCCESS) 638 return (EIO); 639 ctl_pool_free(softc->othersc_pool); 640 while ((port = STAILQ_FIRST(&ha_frontend.port_list)) != NULL) { 641 ctl_port_deregister(port); 642 free(port->port_name, M_CTL); 643 free(port, M_CTL); 644 } 645 return (0); 646 }; 647 648 static void 649 ctl_ha_datamove(union ctl_io *io) 650 { 651 struct ctl_lun *lun = CTL_LUN(io); 652 struct ctl_sg_entry *sgl; 653 union ctl_ha_msg msg; 654 uint32_t sg_entries_sent; 655 int do_sg_copy, i, j; 656 657 memset(&msg.dt, 0, sizeof(msg.dt)); 658 msg.hdr.msg_type = CTL_MSG_DATAMOVE; 659 msg.hdr.original_sc = io->io_hdr.remote_io; 660 msg.hdr.serializing_sc = io; 661 msg.hdr.nexus = io->io_hdr.nexus; 662 msg.hdr.status = io->io_hdr.status; 663 msg.dt.flags = io->io_hdr.flags; 664 665 /* 666 * We convert everything into a S/G list here. We can't 667 * pass by reference, only by value between controllers. 668 * So we can't pass a pointer to the S/G list, only as many 669 * S/G entries as we can fit in here. If it's possible for 670 * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries, 671 * then we need to break this up into multiple transfers. 672 */ 673 if (io->scsiio.kern_sg_entries == 0) { 674 msg.dt.kern_sg_entries = 1; 675 #if 0 676 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 677 msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr; 678 } else { 679 /* XXX KDM use busdma here! */ 680 msg.dt.sg_list[0].addr = 681 (void *)vtophys(io->scsiio.kern_data_ptr); 682 } 683 #else 684 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, 685 ("HA does not support BUS_ADDR")); 686 msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr; 687 #endif 688 msg.dt.sg_list[0].len = io->scsiio.kern_data_len; 689 do_sg_copy = 0; 690 } else { 691 msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries; 692 do_sg_copy = 1; 693 } 694 695 msg.dt.kern_data_len = io->scsiio.kern_data_len; 696 msg.dt.kern_total_len = io->scsiio.kern_total_len; 697 msg.dt.kern_data_resid = io->scsiio.kern_data_resid; 698 msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset; 699 msg.dt.sg_sequence = 0; 700 701 /* 702 * Loop until we've sent all of the S/G entries. On the 703 * other end, we'll recompose these S/G entries into one 704 * contiguous list before processing. 705 */ 706 for (sg_entries_sent = 0; sg_entries_sent < msg.dt.kern_sg_entries; 707 msg.dt.sg_sequence++) { 708 msg.dt.cur_sg_entries = MIN((sizeof(msg.dt.sg_list) / 709 sizeof(msg.dt.sg_list[0])), 710 msg.dt.kern_sg_entries - sg_entries_sent); 711 if (do_sg_copy != 0) { 712 sgl = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; 713 for (i = sg_entries_sent, j = 0; 714 i < msg.dt.cur_sg_entries; i++, j++) { 715 #if 0 716 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 717 msg.dt.sg_list[j].addr = sgl[i].addr; 718 } else { 719 /* XXX KDM use busdma here! */ 720 msg.dt.sg_list[j].addr = 721 (void *)vtophys(sgl[i].addr); 722 } 723 #else 724 KASSERT((io->io_hdr.flags & 725 CTL_FLAG_BUS_ADDR) == 0, 726 ("HA does not support BUS_ADDR")); 727 msg.dt.sg_list[j].addr = sgl[i].addr; 728 #endif 729 msg.dt.sg_list[j].len = sgl[i].len; 730 } 731 } 732 733 sg_entries_sent += msg.dt.cur_sg_entries; 734 msg.dt.sg_last = (sg_entries_sent >= msg.dt.kern_sg_entries); 735 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 736 sizeof(msg.dt) - sizeof(msg.dt.sg_list) + 737 sizeof(struct ctl_sg_entry) * msg.dt.cur_sg_entries, 738 M_WAITOK) > CTL_HA_STATUS_SUCCESS) { 739 io->io_hdr.port_status = 31341; 740 io->scsiio.be_move_done(io); 741 return; 742 } 743 msg.dt.sent_sg_entries = sg_entries_sent; 744 } 745 746 /* 747 * Officially handover the request from us to peer. 748 * If failover has just happened, then we must return error. 749 * If failover happen just after, then it is not our problem. 750 */ 751 if (lun) 752 mtx_lock(&lun->lun_lock); 753 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 754 if (lun) 755 mtx_unlock(&lun->lun_lock); 756 io->io_hdr.port_status = 31342; 757 io->scsiio.be_move_done(io); 758 return; 759 } 760 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 761 io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; 762 if (lun) 763 mtx_unlock(&lun->lun_lock); 764 } 765 766 static void 767 ctl_ha_done(union ctl_io *io) 768 { 769 union ctl_ha_msg msg; 770 771 if (io->io_hdr.io_type == CTL_IO_SCSI) { 772 memset(&msg, 0, sizeof(msg)); 773 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 774 msg.hdr.original_sc = io->io_hdr.remote_io; 775 msg.hdr.nexus = io->io_hdr.nexus; 776 msg.hdr.status = io->io_hdr.status; 777 msg.scsi.scsi_status = io->scsiio.scsi_status; 778 msg.scsi.tag_num = io->scsiio.tag_num; 779 msg.scsi.tag_type = io->scsiio.tag_type; 780 msg.scsi.sense_len = io->scsiio.sense_len; 781 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 782 io->scsiio.sense_len); 783 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 784 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + 785 msg.scsi.sense_len, M_WAITOK); 786 } 787 ctl_free_io(io); 788 } 789 790 static void 791 ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc, 792 union ctl_ha_msg *msg_info) 793 { 794 struct ctl_scsiio *ctsio; 795 796 if (msg_info->hdr.original_sc == NULL) { 797 printf("%s: original_sc == NULL!\n", __func__); 798 /* XXX KDM now what? */ 799 return; 800 } 801 802 ctsio = &msg_info->hdr.original_sc->scsiio; 803 ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 804 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 805 ctsio->io_hdr.status = msg_info->hdr.status; 806 ctsio->scsi_status = msg_info->scsi.scsi_status; 807 ctsio->sense_len = msg_info->scsi.sense_len; 808 memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data, 809 msg_info->scsi.sense_len); 810 ctl_enqueue_isc((union ctl_io *)ctsio); 811 } 812 813 static void 814 ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc, 815 union ctl_ha_msg *msg_info) 816 { 817 struct ctl_scsiio *ctsio; 818 819 if (msg_info->hdr.serializing_sc == NULL) { 820 printf("%s: serializing_sc == NULL!\n", __func__); 821 /* XXX KDM now what? */ 822 return; 823 } 824 825 ctsio = &msg_info->hdr.serializing_sc->scsiio; 826 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 827 ctl_enqueue_isc((union ctl_io *)ctsio); 828 } 829 830 void 831 ctl_isc_announce_lun(struct ctl_lun *lun) 832 { 833 struct ctl_softc *softc = lun->ctl_softc; 834 union ctl_ha_msg *msg; 835 struct ctl_ha_msg_lun_pr_key pr_key; 836 int i, k; 837 838 if (softc->ha_link != CTL_HA_LINK_ONLINE) 839 return; 840 mtx_lock(&lun->lun_lock); 841 i = sizeof(msg->lun); 842 if (lun->lun_devid) 843 i += lun->lun_devid->len; 844 i += sizeof(pr_key) * lun->pr_key_count; 845 alloc: 846 mtx_unlock(&lun->lun_lock); 847 msg = malloc(i, M_CTL, M_WAITOK); 848 mtx_lock(&lun->lun_lock); 849 k = sizeof(msg->lun); 850 if (lun->lun_devid) 851 k += lun->lun_devid->len; 852 k += sizeof(pr_key) * lun->pr_key_count; 853 if (i < k) { 854 free(msg, M_CTL); 855 i = k; 856 goto alloc; 857 } 858 bzero(&msg->lun, sizeof(msg->lun)); 859 msg->hdr.msg_type = CTL_MSG_LUN_SYNC; 860 msg->hdr.nexus.targ_lun = lun->lun; 861 msg->hdr.nexus.targ_mapped_lun = lun->lun; 862 msg->lun.flags = lun->flags; 863 msg->lun.pr_generation = lun->pr_generation; 864 msg->lun.pr_res_idx = lun->pr_res_idx; 865 msg->lun.pr_res_type = lun->pr_res_type; 866 msg->lun.pr_key_count = lun->pr_key_count; 867 i = 0; 868 if (lun->lun_devid) { 869 msg->lun.lun_devid_len = lun->lun_devid->len; 870 memcpy(&msg->lun.data[i], lun->lun_devid->data, 871 msg->lun.lun_devid_len); 872 i += msg->lun.lun_devid_len; 873 } 874 for (k = 0; k < CTL_MAX_INITIATORS; k++) { 875 if ((pr_key.pr_key = ctl_get_prkey(lun, k)) == 0) 876 continue; 877 pr_key.pr_iid = k; 878 memcpy(&msg->lun.data[i], &pr_key, sizeof(pr_key)); 879 i += sizeof(pr_key); 880 } 881 mtx_unlock(&lun->lun_lock); 882 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, 883 M_WAITOK); 884 free(msg, M_CTL); 885 886 if (lun->flags & CTL_LUN_PRIMARY_SC) { 887 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 888 ctl_isc_announce_mode(lun, -1, 889 lun->mode_pages.index[i].page_code & SMPH_PC_MASK, 890 lun->mode_pages.index[i].subpage); 891 } 892 } 893 } 894 895 void 896 ctl_isc_announce_port(struct ctl_port *port) 897 { 898 struct ctl_softc *softc = port->ctl_softc; 899 union ctl_ha_msg *msg; 900 int i; 901 902 if (port->targ_port < softc->port_min || 903 port->targ_port >= softc->port_max || 904 softc->ha_link != CTL_HA_LINK_ONLINE) 905 return; 906 i = sizeof(msg->port) + strlen(port->port_name) + 1; 907 if (port->lun_map) 908 i += port->lun_map_size * sizeof(uint32_t); 909 if (port->port_devid) 910 i += port->port_devid->len; 911 if (port->target_devid) 912 i += port->target_devid->len; 913 if (port->init_devid) 914 i += port->init_devid->len; 915 msg = malloc(i, M_CTL, M_WAITOK); 916 bzero(&msg->port, sizeof(msg->port)); 917 msg->hdr.msg_type = CTL_MSG_PORT_SYNC; 918 msg->hdr.nexus.targ_port = port->targ_port; 919 msg->port.port_type = port->port_type; 920 msg->port.physical_port = port->physical_port; 921 msg->port.virtual_port = port->virtual_port; 922 msg->port.status = port->status; 923 i = 0; 924 msg->port.name_len = sprintf(&msg->port.data[i], 925 "%d:%s", softc->ha_id, port->port_name) + 1; 926 i += msg->port.name_len; 927 if (port->lun_map) { 928 msg->port.lun_map_len = port->lun_map_size * sizeof(uint32_t); 929 memcpy(&msg->port.data[i], port->lun_map, 930 msg->port.lun_map_len); 931 i += msg->port.lun_map_len; 932 } 933 if (port->port_devid) { 934 msg->port.port_devid_len = port->port_devid->len; 935 memcpy(&msg->port.data[i], port->port_devid->data, 936 msg->port.port_devid_len); 937 i += msg->port.port_devid_len; 938 } 939 if (port->target_devid) { 940 msg->port.target_devid_len = port->target_devid->len; 941 memcpy(&msg->port.data[i], port->target_devid->data, 942 msg->port.target_devid_len); 943 i += msg->port.target_devid_len; 944 } 945 if (port->init_devid) { 946 msg->port.init_devid_len = port->init_devid->len; 947 memcpy(&msg->port.data[i], port->init_devid->data, 948 msg->port.init_devid_len); 949 i += msg->port.init_devid_len; 950 } 951 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, 952 M_WAITOK); 953 free(msg, M_CTL); 954 } 955 956 void 957 ctl_isc_announce_iid(struct ctl_port *port, int iid) 958 { 959 struct ctl_softc *softc = port->ctl_softc; 960 union ctl_ha_msg *msg; 961 int i, l; 962 963 if (port->targ_port < softc->port_min || 964 port->targ_port >= softc->port_max || 965 softc->ha_link != CTL_HA_LINK_ONLINE) 966 return; 967 mtx_lock(&softc->ctl_lock); 968 i = sizeof(msg->iid); 969 l = 0; 970 if (port->wwpn_iid[iid].name) 971 l = strlen(port->wwpn_iid[iid].name) + 1; 972 i += l; 973 msg = malloc(i, M_CTL, M_NOWAIT); 974 if (msg == NULL) { 975 mtx_unlock(&softc->ctl_lock); 976 return; 977 } 978 bzero(&msg->iid, sizeof(msg->iid)); 979 msg->hdr.msg_type = CTL_MSG_IID_SYNC; 980 msg->hdr.nexus.targ_port = port->targ_port; 981 msg->hdr.nexus.initid = iid; 982 msg->iid.in_use = port->wwpn_iid[iid].in_use; 983 msg->iid.name_len = l; 984 msg->iid.wwpn = port->wwpn_iid[iid].wwpn; 985 if (port->wwpn_iid[iid].name) 986 strlcpy(msg->iid.data, port->wwpn_iid[iid].name, l); 987 mtx_unlock(&softc->ctl_lock); 988 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->iid, i, M_NOWAIT); 989 free(msg, M_CTL); 990 } 991 992 void 993 ctl_isc_announce_mode(struct ctl_lun *lun, uint32_t initidx, 994 uint8_t page, uint8_t subpage) 995 { 996 struct ctl_softc *softc = lun->ctl_softc; 997 union ctl_ha_msg msg; 998 u_int i; 999 1000 if (softc->ha_link != CTL_HA_LINK_ONLINE) 1001 return; 1002 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 1003 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) == 1004 page && lun->mode_pages.index[i].subpage == subpage) 1005 break; 1006 } 1007 if (i == CTL_NUM_MODE_PAGES) 1008 return; 1009 1010 /* Don't try to replicate pages not present on this device. */ 1011 if (lun->mode_pages.index[i].page_data == NULL) 1012 return; 1013 1014 bzero(&msg.mode, sizeof(msg.mode)); 1015 msg.hdr.msg_type = CTL_MSG_MODE_SYNC; 1016 msg.hdr.nexus.targ_port = initidx / CTL_MAX_INIT_PER_PORT; 1017 msg.hdr.nexus.initid = initidx % CTL_MAX_INIT_PER_PORT; 1018 msg.hdr.nexus.targ_lun = lun->lun; 1019 msg.hdr.nexus.targ_mapped_lun = lun->lun; 1020 msg.mode.page_code = page; 1021 msg.mode.subpage = subpage; 1022 msg.mode.page_len = lun->mode_pages.index[i].page_len; 1023 memcpy(msg.mode.data, lun->mode_pages.index[i].page_data, 1024 msg.mode.page_len); 1025 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg.mode, sizeof(msg.mode), 1026 M_WAITOK); 1027 } 1028 1029 static void 1030 ctl_isc_ha_link_up(struct ctl_softc *softc) 1031 { 1032 struct ctl_port *port; 1033 struct ctl_lun *lun; 1034 union ctl_ha_msg msg; 1035 int i; 1036 1037 /* Announce this node parameters to peer for validation. */ 1038 msg.login.msg_type = CTL_MSG_LOGIN; 1039 msg.login.version = CTL_HA_VERSION; 1040 msg.login.ha_mode = softc->ha_mode; 1041 msg.login.ha_id = softc->ha_id; 1042 msg.login.max_luns = ctl_max_luns; 1043 msg.login.max_ports = ctl_max_ports; 1044 msg.login.max_init_per_port = CTL_MAX_INIT_PER_PORT; 1045 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg.login, sizeof(msg.login), 1046 M_WAITOK); 1047 1048 STAILQ_FOREACH(port, &softc->port_list, links) { 1049 ctl_isc_announce_port(port); 1050 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1051 if (port->wwpn_iid[i].in_use) 1052 ctl_isc_announce_iid(port, i); 1053 } 1054 } 1055 STAILQ_FOREACH(lun, &softc->lun_list, links) 1056 ctl_isc_announce_lun(lun); 1057 } 1058 1059 static void 1060 ctl_isc_ha_link_down(struct ctl_softc *softc) 1061 { 1062 struct ctl_port *port; 1063 struct ctl_lun *lun; 1064 union ctl_io *io; 1065 int i; 1066 1067 mtx_lock(&softc->ctl_lock); 1068 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1069 mtx_lock(&lun->lun_lock); 1070 if (lun->flags & CTL_LUN_PEER_SC_PRIMARY) { 1071 lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; 1072 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 1073 } 1074 mtx_unlock(&lun->lun_lock); 1075 1076 mtx_unlock(&softc->ctl_lock); 1077 io = ctl_alloc_io(softc->othersc_pool); 1078 mtx_lock(&softc->ctl_lock); 1079 ctl_zero_io(io); 1080 io->io_hdr.msg_type = CTL_MSG_FAILOVER; 1081 io->io_hdr.nexus.targ_mapped_lun = lun->lun; 1082 ctl_enqueue_isc(io); 1083 } 1084 1085 STAILQ_FOREACH(port, &softc->port_list, links) { 1086 if (port->targ_port >= softc->port_min && 1087 port->targ_port < softc->port_max) 1088 continue; 1089 port->status &= ~CTL_PORT_STATUS_ONLINE; 1090 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1091 port->wwpn_iid[i].in_use = 0; 1092 free(port->wwpn_iid[i].name, M_CTL); 1093 port->wwpn_iid[i].name = NULL; 1094 } 1095 } 1096 mtx_unlock(&softc->ctl_lock); 1097 } 1098 1099 static void 1100 ctl_isc_ua(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1101 { 1102 struct ctl_lun *lun; 1103 uint32_t iid = ctl_get_initindex(&msg->hdr.nexus); 1104 1105 mtx_lock(&softc->ctl_lock); 1106 if (msg->hdr.nexus.targ_mapped_lun >= ctl_max_luns || 1107 (lun = softc->ctl_luns[msg->hdr.nexus.targ_mapped_lun]) == NULL) { 1108 mtx_unlock(&softc->ctl_lock); 1109 return; 1110 } 1111 mtx_lock(&lun->lun_lock); 1112 mtx_unlock(&softc->ctl_lock); 1113 if (msg->ua.ua_type == CTL_UA_THIN_PROV_THRES && msg->ua.ua_set) 1114 memcpy(lun->ua_tpt_info, msg->ua.ua_info, 8); 1115 if (msg->ua.ua_all) { 1116 if (msg->ua.ua_set) 1117 ctl_est_ua_all(lun, iid, msg->ua.ua_type); 1118 else 1119 ctl_clr_ua_all(lun, iid, msg->ua.ua_type); 1120 } else { 1121 if (msg->ua.ua_set) 1122 ctl_est_ua(lun, iid, msg->ua.ua_type); 1123 else 1124 ctl_clr_ua(lun, iid, msg->ua.ua_type); 1125 } 1126 mtx_unlock(&lun->lun_lock); 1127 } 1128 1129 static void 1130 ctl_isc_lun_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1131 { 1132 struct ctl_lun *lun; 1133 struct ctl_ha_msg_lun_pr_key pr_key; 1134 int i, k; 1135 ctl_lun_flags oflags; 1136 uint32_t targ_lun; 1137 1138 targ_lun = msg->hdr.nexus.targ_mapped_lun; 1139 mtx_lock(&softc->ctl_lock); 1140 if (targ_lun >= ctl_max_luns || 1141 (lun = softc->ctl_luns[targ_lun]) == NULL) { 1142 mtx_unlock(&softc->ctl_lock); 1143 return; 1144 } 1145 mtx_lock(&lun->lun_lock); 1146 mtx_unlock(&softc->ctl_lock); 1147 if (lun->flags & CTL_LUN_DISABLED) { 1148 mtx_unlock(&lun->lun_lock); 1149 return; 1150 } 1151 i = (lun->lun_devid != NULL) ? lun->lun_devid->len : 0; 1152 if (msg->lun.lun_devid_len != i || (i > 0 && 1153 memcmp(&msg->lun.data[0], lun->lun_devid->data, i) != 0)) { 1154 mtx_unlock(&lun->lun_lock); 1155 printf("%s: Received conflicting HA LUN %d\n", 1156 __func__, targ_lun); 1157 return; 1158 } else { 1159 /* Record whether peer is primary. */ 1160 oflags = lun->flags; 1161 if ((msg->lun.flags & CTL_LUN_PRIMARY_SC) && 1162 (msg->lun.flags & CTL_LUN_DISABLED) == 0) 1163 lun->flags |= CTL_LUN_PEER_SC_PRIMARY; 1164 else 1165 lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; 1166 if (oflags != lun->flags) 1167 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 1168 1169 /* If peer is primary and we are not -- use data */ 1170 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 1171 (lun->flags & CTL_LUN_PEER_SC_PRIMARY)) { 1172 lun->pr_generation = msg->lun.pr_generation; 1173 lun->pr_res_idx = msg->lun.pr_res_idx; 1174 lun->pr_res_type = msg->lun.pr_res_type; 1175 lun->pr_key_count = msg->lun.pr_key_count; 1176 for (k = 0; k < CTL_MAX_INITIATORS; k++) 1177 ctl_clr_prkey(lun, k); 1178 for (k = 0; k < msg->lun.pr_key_count; k++) { 1179 memcpy(&pr_key, &msg->lun.data[i], 1180 sizeof(pr_key)); 1181 ctl_alloc_prkey(lun, pr_key.pr_iid); 1182 ctl_set_prkey(lun, pr_key.pr_iid, 1183 pr_key.pr_key); 1184 i += sizeof(pr_key); 1185 } 1186 } 1187 1188 mtx_unlock(&lun->lun_lock); 1189 CTL_DEBUG_PRINT(("%s: Known LUN %d, peer is %s\n", 1190 __func__, targ_lun, 1191 (msg->lun.flags & CTL_LUN_PRIMARY_SC) ? 1192 "primary" : "secondary")); 1193 1194 /* If we are primary but peer doesn't know -- notify */ 1195 if ((lun->flags & CTL_LUN_PRIMARY_SC) && 1196 (msg->lun.flags & CTL_LUN_PEER_SC_PRIMARY) == 0) 1197 ctl_isc_announce_lun(lun); 1198 } 1199 } 1200 1201 static void 1202 ctl_isc_port_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1203 { 1204 struct ctl_port *port; 1205 struct ctl_lun *lun; 1206 int i, new; 1207 1208 port = softc->ctl_ports[msg->hdr.nexus.targ_port]; 1209 if (port == NULL) { 1210 CTL_DEBUG_PRINT(("%s: New port %d\n", __func__, 1211 msg->hdr.nexus.targ_port)); 1212 new = 1; 1213 port = malloc(sizeof(*port), M_CTL, M_WAITOK | M_ZERO); 1214 port->frontend = &ha_frontend; 1215 port->targ_port = msg->hdr.nexus.targ_port; 1216 port->fe_datamove = ctl_ha_datamove; 1217 port->fe_done = ctl_ha_done; 1218 } else if (port->frontend == &ha_frontend) { 1219 CTL_DEBUG_PRINT(("%s: Updated port %d\n", __func__, 1220 msg->hdr.nexus.targ_port)); 1221 new = 0; 1222 } else { 1223 printf("%s: Received conflicting HA port %d\n", 1224 __func__, msg->hdr.nexus.targ_port); 1225 return; 1226 } 1227 port->port_type = msg->port.port_type; 1228 port->physical_port = msg->port.physical_port; 1229 port->virtual_port = msg->port.virtual_port; 1230 port->status = msg->port.status; 1231 i = 0; 1232 free(port->port_name, M_CTL); 1233 port->port_name = strndup(&msg->port.data[i], msg->port.name_len, 1234 M_CTL); 1235 i += msg->port.name_len; 1236 if (msg->port.lun_map_len != 0) { 1237 if (port->lun_map == NULL || 1238 port->lun_map_size * sizeof(uint32_t) < 1239 msg->port.lun_map_len) { 1240 port->lun_map_size = 0; 1241 free(port->lun_map, M_CTL); 1242 port->lun_map = malloc(msg->port.lun_map_len, 1243 M_CTL, M_WAITOK); 1244 } 1245 memcpy(port->lun_map, &msg->port.data[i], msg->port.lun_map_len); 1246 port->lun_map_size = msg->port.lun_map_len / sizeof(uint32_t); 1247 i += msg->port.lun_map_len; 1248 } else { 1249 port->lun_map_size = 0; 1250 free(port->lun_map, M_CTL); 1251 port->lun_map = NULL; 1252 } 1253 if (msg->port.port_devid_len != 0) { 1254 if (port->port_devid == NULL || 1255 port->port_devid->len < msg->port.port_devid_len) { 1256 free(port->port_devid, M_CTL); 1257 port->port_devid = malloc(sizeof(struct ctl_devid) + 1258 msg->port.port_devid_len, M_CTL, M_WAITOK); 1259 } 1260 memcpy(port->port_devid->data, &msg->port.data[i], 1261 msg->port.port_devid_len); 1262 port->port_devid->len = msg->port.port_devid_len; 1263 i += msg->port.port_devid_len; 1264 } else { 1265 free(port->port_devid, M_CTL); 1266 port->port_devid = NULL; 1267 } 1268 if (msg->port.target_devid_len != 0) { 1269 if (port->target_devid == NULL || 1270 port->target_devid->len < msg->port.target_devid_len) { 1271 free(port->target_devid, M_CTL); 1272 port->target_devid = malloc(sizeof(struct ctl_devid) + 1273 msg->port.target_devid_len, M_CTL, M_WAITOK); 1274 } 1275 memcpy(port->target_devid->data, &msg->port.data[i], 1276 msg->port.target_devid_len); 1277 port->target_devid->len = msg->port.target_devid_len; 1278 i += msg->port.target_devid_len; 1279 } else { 1280 free(port->target_devid, M_CTL); 1281 port->target_devid = NULL; 1282 } 1283 if (msg->port.init_devid_len != 0) { 1284 if (port->init_devid == NULL || 1285 port->init_devid->len < msg->port.init_devid_len) { 1286 free(port->init_devid, M_CTL); 1287 port->init_devid = malloc(sizeof(struct ctl_devid) + 1288 msg->port.init_devid_len, M_CTL, M_WAITOK); 1289 } 1290 memcpy(port->init_devid->data, &msg->port.data[i], 1291 msg->port.init_devid_len); 1292 port->init_devid->len = msg->port.init_devid_len; 1293 i += msg->port.init_devid_len; 1294 } else { 1295 free(port->init_devid, M_CTL); 1296 port->init_devid = NULL; 1297 } 1298 if (new) { 1299 if (ctl_port_register(port) != 0) { 1300 printf("%s: ctl_port_register() failed with error\n", 1301 __func__); 1302 } 1303 } 1304 mtx_lock(&softc->ctl_lock); 1305 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1306 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 1307 continue; 1308 mtx_lock(&lun->lun_lock); 1309 ctl_est_ua_all(lun, -1, CTL_UA_INQ_CHANGE); 1310 mtx_unlock(&lun->lun_lock); 1311 } 1312 mtx_unlock(&softc->ctl_lock); 1313 } 1314 1315 static void 1316 ctl_isc_iid_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1317 { 1318 struct ctl_port *port; 1319 int iid; 1320 1321 port = softc->ctl_ports[msg->hdr.nexus.targ_port]; 1322 if (port == NULL) { 1323 printf("%s: Received IID for unknown port %d\n", 1324 __func__, msg->hdr.nexus.targ_port); 1325 return; 1326 } 1327 iid = msg->hdr.nexus.initid; 1328 if (port->wwpn_iid[iid].in_use != 0 && 1329 msg->iid.in_use == 0) 1330 ctl_i_t_nexus_loss(softc, iid, CTL_UA_POWERON); 1331 port->wwpn_iid[iid].in_use = msg->iid.in_use; 1332 port->wwpn_iid[iid].wwpn = msg->iid.wwpn; 1333 free(port->wwpn_iid[iid].name, M_CTL); 1334 if (msg->iid.name_len) { 1335 port->wwpn_iid[iid].name = strndup(&msg->iid.data[0], 1336 msg->iid.name_len, M_CTL); 1337 } else 1338 port->wwpn_iid[iid].name = NULL; 1339 } 1340 1341 static void 1342 ctl_isc_login(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1343 { 1344 1345 if (msg->login.version != CTL_HA_VERSION) { 1346 printf("CTL HA peers have different versions %d != %d\n", 1347 msg->login.version, CTL_HA_VERSION); 1348 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1349 return; 1350 } 1351 if (msg->login.ha_mode != softc->ha_mode) { 1352 printf("CTL HA peers have different ha_mode %d != %d\n", 1353 msg->login.ha_mode, softc->ha_mode); 1354 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1355 return; 1356 } 1357 if (msg->login.ha_id == softc->ha_id) { 1358 printf("CTL HA peers have same ha_id %d\n", msg->login.ha_id); 1359 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1360 return; 1361 } 1362 if (msg->login.max_luns != ctl_max_luns || 1363 msg->login.max_ports != ctl_max_ports || 1364 msg->login.max_init_per_port != CTL_MAX_INIT_PER_PORT) { 1365 printf("CTL HA peers have different limits\n"); 1366 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1367 return; 1368 } 1369 } 1370 1371 static void 1372 ctl_isc_mode_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1373 { 1374 struct ctl_lun *lun; 1375 u_int i; 1376 uint32_t initidx, targ_lun; 1377 1378 targ_lun = msg->hdr.nexus.targ_mapped_lun; 1379 mtx_lock(&softc->ctl_lock); 1380 if (targ_lun >= ctl_max_luns || 1381 (lun = softc->ctl_luns[targ_lun]) == NULL) { 1382 mtx_unlock(&softc->ctl_lock); 1383 return; 1384 } 1385 mtx_lock(&lun->lun_lock); 1386 mtx_unlock(&softc->ctl_lock); 1387 if (lun->flags & CTL_LUN_DISABLED) { 1388 mtx_unlock(&lun->lun_lock); 1389 return; 1390 } 1391 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 1392 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) == 1393 msg->mode.page_code && 1394 lun->mode_pages.index[i].subpage == msg->mode.subpage) 1395 break; 1396 } 1397 if (i == CTL_NUM_MODE_PAGES) { 1398 mtx_unlock(&lun->lun_lock); 1399 return; 1400 } 1401 memcpy(lun->mode_pages.index[i].page_data, msg->mode.data, 1402 lun->mode_pages.index[i].page_len); 1403 initidx = ctl_get_initindex(&msg->hdr.nexus); 1404 if (initidx != -1) 1405 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 1406 mtx_unlock(&lun->lun_lock); 1407 } 1408 1409 /* 1410 * ISC (Inter Shelf Communication) event handler. Events from the HA 1411 * subsystem come in here. 1412 */ 1413 static void 1414 ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param) 1415 { 1416 struct ctl_softc *softc = control_softc; 1417 union ctl_io *io; 1418 struct ctl_prio *presio; 1419 ctl_ha_status isc_status; 1420 1421 CTL_DEBUG_PRINT(("CTL: Isc Msg event %d\n", event)); 1422 if (event == CTL_HA_EVT_MSG_RECV) { 1423 union ctl_ha_msg *msg, msgbuf; 1424 1425 if (param > sizeof(msgbuf)) 1426 msg = malloc(param, M_CTL, M_WAITOK); 1427 else 1428 msg = &msgbuf; 1429 isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, msg, param, 1430 M_WAITOK); 1431 if (isc_status != CTL_HA_STATUS_SUCCESS) { 1432 printf("%s: Error receiving message: %d\n", 1433 __func__, isc_status); 1434 if (msg != &msgbuf) 1435 free(msg, M_CTL); 1436 return; 1437 } 1438 1439 CTL_DEBUG_PRINT(("CTL: msg_type %d\n", msg->hdr.msg_type)); 1440 switch (msg->hdr.msg_type) { 1441 case CTL_MSG_SERIALIZE: 1442 io = ctl_alloc_io(softc->othersc_pool); 1443 ctl_zero_io(io); 1444 // populate ctsio from msg 1445 io->io_hdr.io_type = CTL_IO_SCSI; 1446 io->io_hdr.msg_type = CTL_MSG_SERIALIZE; 1447 io->io_hdr.remote_io = msg->hdr.original_sc; 1448 io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC | 1449 CTL_FLAG_IO_ACTIVE; 1450 /* 1451 * If we're in serialization-only mode, we don't 1452 * want to go through full done processing. Thus 1453 * the COPY flag. 1454 * 1455 * XXX KDM add another flag that is more specific. 1456 */ 1457 if (softc->ha_mode != CTL_HA_MODE_XFER) 1458 io->io_hdr.flags |= CTL_FLAG_INT_COPY; 1459 io->io_hdr.nexus = msg->hdr.nexus; 1460 io->scsiio.tag_num = msg->scsi.tag_num; 1461 io->scsiio.tag_type = msg->scsi.tag_type; 1462 #ifdef CTL_TIME_IO 1463 io->io_hdr.start_time = time_uptime; 1464 getbinuptime(&io->io_hdr.start_bt); 1465 #endif /* CTL_TIME_IO */ 1466 io->scsiio.cdb_len = msg->scsi.cdb_len; 1467 memcpy(io->scsiio.cdb, msg->scsi.cdb, 1468 CTL_MAX_CDBLEN); 1469 if (softc->ha_mode == CTL_HA_MODE_XFER) { 1470 const struct ctl_cmd_entry *entry; 1471 1472 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 1473 io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 1474 io->io_hdr.flags |= 1475 entry->flags & CTL_FLAG_DATA_MASK; 1476 } 1477 ctl_enqueue_isc(io); 1478 break; 1479 1480 /* Performed on the Originating SC, XFER mode only */ 1481 case CTL_MSG_DATAMOVE: { 1482 struct ctl_sg_entry *sgl; 1483 int i, j; 1484 1485 io = msg->hdr.original_sc; 1486 if (io == NULL) { 1487 printf("%s: original_sc == NULL!\n", __func__); 1488 /* XXX KDM do something here */ 1489 break; 1490 } 1491 io->io_hdr.msg_type = CTL_MSG_DATAMOVE; 1492 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1493 /* 1494 * Keep track of this, we need to send it back over 1495 * when the datamove is complete. 1496 */ 1497 io->io_hdr.remote_io = msg->hdr.serializing_sc; 1498 if (msg->hdr.status == CTL_SUCCESS) 1499 io->io_hdr.status = msg->hdr.status; 1500 1501 if (msg->dt.sg_sequence == 0) { 1502 #ifdef CTL_TIME_IO 1503 getbinuptime(&io->io_hdr.dma_start_bt); 1504 #endif 1505 i = msg->dt.kern_sg_entries + 1506 msg->dt.kern_data_len / 1507 CTL_HA_DATAMOVE_SEGMENT + 1; 1508 sgl = malloc(sizeof(*sgl) * i, M_CTL, 1509 M_WAITOK | M_ZERO); 1510 CTL_RSGL(io) = sgl; 1511 CTL_LSGL(io) = &sgl[msg->dt.kern_sg_entries]; 1512 1513 io->scsiio.kern_data_ptr = (uint8_t *)sgl; 1514 1515 io->scsiio.kern_sg_entries = 1516 msg->dt.kern_sg_entries; 1517 io->scsiio.rem_sg_entries = 1518 msg->dt.kern_sg_entries; 1519 io->scsiio.kern_data_len = 1520 msg->dt.kern_data_len; 1521 io->scsiio.kern_total_len = 1522 msg->dt.kern_total_len; 1523 io->scsiio.kern_data_resid = 1524 msg->dt.kern_data_resid; 1525 io->scsiio.kern_rel_offset = 1526 msg->dt.kern_rel_offset; 1527 io->io_hdr.flags &= ~CTL_FLAG_BUS_ADDR; 1528 io->io_hdr.flags |= msg->dt.flags & 1529 CTL_FLAG_BUS_ADDR; 1530 } else 1531 sgl = (struct ctl_sg_entry *) 1532 io->scsiio.kern_data_ptr; 1533 1534 for (i = msg->dt.sent_sg_entries, j = 0; 1535 i < (msg->dt.sent_sg_entries + 1536 msg->dt.cur_sg_entries); i++, j++) { 1537 sgl[i].addr = msg->dt.sg_list[j].addr; 1538 sgl[i].len = msg->dt.sg_list[j].len; 1539 } 1540 1541 /* 1542 * If this is the last piece of the I/O, we've got 1543 * the full S/G list. Queue processing in the thread. 1544 * Otherwise wait for the next piece. 1545 */ 1546 if (msg->dt.sg_last != 0) 1547 ctl_enqueue_isc(io); 1548 break; 1549 } 1550 /* Performed on the Serializing (primary) SC, XFER mode only */ 1551 case CTL_MSG_DATAMOVE_DONE: { 1552 if (msg->hdr.serializing_sc == NULL) { 1553 printf("%s: serializing_sc == NULL!\n", 1554 __func__); 1555 /* XXX KDM now what? */ 1556 break; 1557 } 1558 /* 1559 * We grab the sense information here in case 1560 * there was a failure, so we can return status 1561 * back to the initiator. 1562 */ 1563 io = msg->hdr.serializing_sc; 1564 io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 1565 io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; 1566 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1567 io->io_hdr.port_status = msg->scsi.port_status; 1568 io->scsiio.kern_data_resid = msg->scsi.kern_data_resid; 1569 if (msg->hdr.status != CTL_STATUS_NONE) { 1570 io->io_hdr.status = msg->hdr.status; 1571 io->scsiio.scsi_status = msg->scsi.scsi_status; 1572 io->scsiio.sense_len = msg->scsi.sense_len; 1573 memcpy(&io->scsiio.sense_data, 1574 &msg->scsi.sense_data, 1575 msg->scsi.sense_len); 1576 if (msg->hdr.status == CTL_SUCCESS) 1577 io->io_hdr.flags |= CTL_FLAG_STATUS_SENT; 1578 } 1579 ctl_enqueue_isc(io); 1580 break; 1581 } 1582 1583 /* Preformed on Originating SC, SER_ONLY mode */ 1584 case CTL_MSG_R2R: 1585 io = msg->hdr.original_sc; 1586 if (io == NULL) { 1587 printf("%s: original_sc == NULL!\n", 1588 __func__); 1589 break; 1590 } 1591 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1592 io->io_hdr.msg_type = CTL_MSG_R2R; 1593 io->io_hdr.remote_io = msg->hdr.serializing_sc; 1594 ctl_enqueue_isc(io); 1595 break; 1596 1597 /* 1598 * Performed on Serializing(i.e. primary SC) SC in SER_ONLY 1599 * mode. 1600 * Performed on the Originating (i.e. secondary) SC in XFER 1601 * mode 1602 */ 1603 case CTL_MSG_FINISH_IO: 1604 if (softc->ha_mode == CTL_HA_MODE_XFER) 1605 ctl_isc_handler_finish_xfer(softc, msg); 1606 else 1607 ctl_isc_handler_finish_ser_only(softc, msg); 1608 break; 1609 1610 /* Preformed on Originating SC */ 1611 case CTL_MSG_BAD_JUJU: 1612 io = msg->hdr.original_sc; 1613 if (io == NULL) { 1614 printf("%s: Bad JUJU!, original_sc is NULL!\n", 1615 __func__); 1616 break; 1617 } 1618 ctl_copy_sense_data(msg, io); 1619 /* 1620 * IO should have already been cleaned up on other 1621 * SC so clear this flag so we won't send a message 1622 * back to finish the IO there. 1623 */ 1624 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 1625 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1626 1627 /* io = msg->hdr.serializing_sc; */ 1628 io->io_hdr.msg_type = CTL_MSG_BAD_JUJU; 1629 ctl_enqueue_isc(io); 1630 break; 1631 1632 /* Handle resets sent from the other side */ 1633 case CTL_MSG_MANAGE_TASKS: { 1634 struct ctl_taskio *taskio; 1635 taskio = (struct ctl_taskio *)ctl_alloc_io( 1636 softc->othersc_pool); 1637 ctl_zero_io((union ctl_io *)taskio); 1638 taskio->io_hdr.io_type = CTL_IO_TASK; 1639 taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 1640 taskio->io_hdr.nexus = msg->hdr.nexus; 1641 taskio->task_action = msg->task.task_action; 1642 taskio->tag_num = msg->task.tag_num; 1643 taskio->tag_type = msg->task.tag_type; 1644 #ifdef CTL_TIME_IO 1645 taskio->io_hdr.start_time = time_uptime; 1646 getbinuptime(&taskio->io_hdr.start_bt); 1647 #endif /* CTL_TIME_IO */ 1648 ctl_run_task((union ctl_io *)taskio); 1649 break; 1650 } 1651 /* Persistent Reserve action which needs attention */ 1652 case CTL_MSG_PERS_ACTION: 1653 presio = (struct ctl_prio *)ctl_alloc_io( 1654 softc->othersc_pool); 1655 ctl_zero_io((union ctl_io *)presio); 1656 presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION; 1657 presio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 1658 presio->io_hdr.nexus = msg->hdr.nexus; 1659 presio->pr_msg = msg->pr; 1660 ctl_enqueue_isc((union ctl_io *)presio); 1661 break; 1662 case CTL_MSG_UA: 1663 ctl_isc_ua(softc, msg, param); 1664 break; 1665 case CTL_MSG_PORT_SYNC: 1666 ctl_isc_port_sync(softc, msg, param); 1667 break; 1668 case CTL_MSG_LUN_SYNC: 1669 ctl_isc_lun_sync(softc, msg, param); 1670 break; 1671 case CTL_MSG_IID_SYNC: 1672 ctl_isc_iid_sync(softc, msg, param); 1673 break; 1674 case CTL_MSG_LOGIN: 1675 ctl_isc_login(softc, msg, param); 1676 break; 1677 case CTL_MSG_MODE_SYNC: 1678 ctl_isc_mode_sync(softc, msg, param); 1679 break; 1680 default: 1681 printf("Received HA message of unknown type %d\n", 1682 msg->hdr.msg_type); 1683 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1684 break; 1685 } 1686 if (msg != &msgbuf) 1687 free(msg, M_CTL); 1688 } else if (event == CTL_HA_EVT_LINK_CHANGE) { 1689 printf("CTL: HA link status changed from %d to %d\n", 1690 softc->ha_link, param); 1691 if (param == softc->ha_link) 1692 return; 1693 if (softc->ha_link == CTL_HA_LINK_ONLINE) { 1694 softc->ha_link = param; 1695 ctl_isc_ha_link_down(softc); 1696 } else { 1697 softc->ha_link = param; 1698 if (softc->ha_link == CTL_HA_LINK_ONLINE) 1699 ctl_isc_ha_link_up(softc); 1700 } 1701 return; 1702 } else { 1703 printf("ctl_isc_event_handler: Unknown event %d\n", event); 1704 return; 1705 } 1706 } 1707 1708 static void 1709 ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest) 1710 { 1711 1712 memcpy(&dest->scsiio.sense_data, &src->scsi.sense_data, 1713 src->scsi.sense_len); 1714 dest->scsiio.scsi_status = src->scsi.scsi_status; 1715 dest->scsiio.sense_len = src->scsi.sense_len; 1716 dest->io_hdr.status = src->hdr.status; 1717 } 1718 1719 static void 1720 ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest) 1721 { 1722 1723 memcpy(&dest->scsi.sense_data, &src->scsiio.sense_data, 1724 src->scsiio.sense_len); 1725 dest->scsi.scsi_status = src->scsiio.scsi_status; 1726 dest->scsi.sense_len = src->scsiio.sense_len; 1727 dest->hdr.status = src->io_hdr.status; 1728 } 1729 1730 void 1731 ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 1732 { 1733 struct ctl_softc *softc = lun->ctl_softc; 1734 ctl_ua_type *pu; 1735 1736 if (initidx < softc->init_min || initidx >= softc->init_max) 1737 return; 1738 mtx_assert(&lun->lun_lock, MA_OWNED); 1739 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 1740 if (pu == NULL) 1741 return; 1742 pu[initidx % CTL_MAX_INIT_PER_PORT] |= ua; 1743 } 1744 1745 void 1746 ctl_est_ua_port(struct ctl_lun *lun, int port, uint32_t except, ctl_ua_type ua) 1747 { 1748 int i; 1749 1750 mtx_assert(&lun->lun_lock, MA_OWNED); 1751 if (lun->pending_ua[port] == NULL) 1752 return; 1753 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1754 if (port * CTL_MAX_INIT_PER_PORT + i == except) 1755 continue; 1756 lun->pending_ua[port][i] |= ua; 1757 } 1758 } 1759 1760 void 1761 ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 1762 { 1763 struct ctl_softc *softc = lun->ctl_softc; 1764 int i; 1765 1766 mtx_assert(&lun->lun_lock, MA_OWNED); 1767 for (i = softc->port_min; i < softc->port_max; i++) 1768 ctl_est_ua_port(lun, i, except, ua); 1769 } 1770 1771 void 1772 ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 1773 { 1774 struct ctl_softc *softc = lun->ctl_softc; 1775 ctl_ua_type *pu; 1776 1777 if (initidx < softc->init_min || initidx >= softc->init_max) 1778 return; 1779 mtx_assert(&lun->lun_lock, MA_OWNED); 1780 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 1781 if (pu == NULL) 1782 return; 1783 pu[initidx % CTL_MAX_INIT_PER_PORT] &= ~ua; 1784 } 1785 1786 void 1787 ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 1788 { 1789 struct ctl_softc *softc = lun->ctl_softc; 1790 int i, j; 1791 1792 mtx_assert(&lun->lun_lock, MA_OWNED); 1793 for (i = softc->port_min; i < softc->port_max; i++) { 1794 if (lun->pending_ua[i] == NULL) 1795 continue; 1796 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 1797 if (i * CTL_MAX_INIT_PER_PORT + j == except) 1798 continue; 1799 lun->pending_ua[i][j] &= ~ua; 1800 } 1801 } 1802 } 1803 1804 void 1805 ctl_clr_ua_allluns(struct ctl_softc *ctl_softc, uint32_t initidx, 1806 ctl_ua_type ua_type) 1807 { 1808 struct ctl_lun *lun; 1809 1810 mtx_assert(&ctl_softc->ctl_lock, MA_OWNED); 1811 STAILQ_FOREACH(lun, &ctl_softc->lun_list, links) { 1812 mtx_lock(&lun->lun_lock); 1813 ctl_clr_ua(lun, initidx, ua_type); 1814 mtx_unlock(&lun->lun_lock); 1815 } 1816 } 1817 1818 static int 1819 ctl_ha_role_sysctl(SYSCTL_HANDLER_ARGS) 1820 { 1821 struct ctl_softc *softc = (struct ctl_softc *)arg1; 1822 struct ctl_lun *lun; 1823 struct ctl_lun_req ireq; 1824 int error, value; 1825 1826 value = (softc->flags & CTL_FLAG_ACTIVE_SHELF) ? 0 : 1; 1827 error = sysctl_handle_int(oidp, &value, 0, req); 1828 if ((error != 0) || (req->newptr == NULL)) 1829 return (error); 1830 1831 mtx_lock(&softc->ctl_lock); 1832 if (value == 0) 1833 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1834 else 1835 softc->flags &= ~CTL_FLAG_ACTIVE_SHELF; 1836 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1837 mtx_unlock(&softc->ctl_lock); 1838 bzero(&ireq, sizeof(ireq)); 1839 ireq.reqtype = CTL_LUNREQ_MODIFY; 1840 ireq.reqdata.modify.lun_id = lun->lun; 1841 lun->backend->ioctl(NULL, CTL_LUN_REQ, (caddr_t)&ireq, 0, 1842 curthread); 1843 if (ireq.status != CTL_LUN_OK) { 1844 printf("%s: CTL_LUNREQ_MODIFY returned %d '%s'\n", 1845 __func__, ireq.status, ireq.error_str); 1846 } 1847 mtx_lock(&softc->ctl_lock); 1848 } 1849 mtx_unlock(&softc->ctl_lock); 1850 return (0); 1851 } 1852 1853 static int 1854 ctl_init(void) 1855 { 1856 struct make_dev_args args; 1857 struct ctl_softc *softc; 1858 int i, error; 1859 1860 softc = control_softc = malloc(sizeof(*control_softc), M_DEVBUF, 1861 M_WAITOK | M_ZERO); 1862 1863 make_dev_args_init(&args); 1864 args.mda_devsw = &ctl_cdevsw; 1865 args.mda_uid = UID_ROOT; 1866 args.mda_gid = GID_OPERATOR; 1867 args.mda_mode = 0600; 1868 args.mda_si_drv1 = softc; 1869 args.mda_si_drv2 = NULL; 1870 error = make_dev_s(&args, &softc->dev, "cam/ctl"); 1871 if (error != 0) { 1872 free(softc, M_DEVBUF); 1873 control_softc = NULL; 1874 return (error); 1875 } 1876 1877 sysctl_ctx_init(&softc->sysctl_ctx); 1878 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, 1879 SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl", 1880 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "CAM Target Layer"); 1881 1882 if (softc->sysctl_tree == NULL) { 1883 printf("%s: unable to allocate sysctl tree\n", __func__); 1884 destroy_dev(softc->dev); 1885 free(softc, M_DEVBUF); 1886 control_softc = NULL; 1887 return (ENOMEM); 1888 } 1889 1890 mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF); 1891 softc->io_zone = uma_zcreate("CTL IO", sizeof(union ctl_io), 1892 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 1893 softc->flags = 0; 1894 1895 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1896 OID_AUTO, "ha_mode", CTLFLAG_RDTUN, (int *)&softc->ha_mode, 0, 1897 "HA mode (0 - act/stby, 1 - serialize only, 2 - xfer)"); 1898 1899 if (ctl_max_luns <= 0 || powerof2(ctl_max_luns) == 0) { 1900 printf("Bad value %d for kern.cam.ctl.max_luns, must be a power of two, using %d\n", 1901 ctl_max_luns, CTL_DEFAULT_MAX_LUNS); 1902 ctl_max_luns = CTL_DEFAULT_MAX_LUNS; 1903 } 1904 softc->ctl_luns = malloc(sizeof(struct ctl_lun *) * ctl_max_luns, 1905 M_DEVBUF, M_WAITOK | M_ZERO); 1906 softc->ctl_lun_mask = malloc(sizeof(uint32_t) * 1907 ((ctl_max_luns + 31) / 32), M_DEVBUF, M_WAITOK | M_ZERO); 1908 if (ctl_max_ports <= 0 || powerof2(ctl_max_ports) == 0) { 1909 printf("Bad value %d for kern.cam.ctl.max_ports, must be a power of two, using %d\n", 1910 ctl_max_ports, CTL_DEFAULT_MAX_PORTS); 1911 ctl_max_ports = CTL_DEFAULT_MAX_PORTS; 1912 } 1913 softc->ctl_port_mask = malloc(sizeof(uint32_t) * 1914 ((ctl_max_ports + 31) / 32), M_DEVBUF, M_WAITOK | M_ZERO); 1915 softc->ctl_ports = malloc(sizeof(struct ctl_port *) * ctl_max_ports, 1916 M_DEVBUF, M_WAITOK | M_ZERO); 1917 1918 /* 1919 * In Copan's HA scheme, the "master" and "slave" roles are 1920 * figured out through the slot the controller is in. Although it 1921 * is an active/active system, someone has to be in charge. 1922 */ 1923 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1924 OID_AUTO, "ha_id", CTLFLAG_RDTUN, &softc->ha_id, 0, 1925 "HA head ID (0 - no HA)"); 1926 if (softc->ha_id == 0 || softc->ha_id > NUM_HA_SHELVES) { 1927 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1928 softc->is_single = 1; 1929 softc->port_cnt = ctl_max_ports; 1930 softc->port_min = 0; 1931 } else { 1932 softc->port_cnt = ctl_max_ports / NUM_HA_SHELVES; 1933 softc->port_min = (softc->ha_id - 1) * softc->port_cnt; 1934 } 1935 softc->port_max = softc->port_min + softc->port_cnt; 1936 softc->init_min = softc->port_min * CTL_MAX_INIT_PER_PORT; 1937 softc->init_max = softc->port_max * CTL_MAX_INIT_PER_PORT; 1938 1939 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1940 OID_AUTO, "ha_link", CTLFLAG_RD, (int *)&softc->ha_link, 0, 1941 "HA link state (0 - offline, 1 - unknown, 2 - online)"); 1942 1943 STAILQ_INIT(&softc->lun_list); 1944 STAILQ_INIT(&softc->fe_list); 1945 STAILQ_INIT(&softc->port_list); 1946 STAILQ_INIT(&softc->be_list); 1947 ctl_tpc_init(softc); 1948 1949 if (worker_threads <= 0) 1950 worker_threads = max(1, mp_ncpus / 4); 1951 if (worker_threads > CTL_MAX_THREADS) 1952 worker_threads = CTL_MAX_THREADS; 1953 1954 for (i = 0; i < worker_threads; i++) { 1955 struct ctl_thread *thr = &softc->threads[i]; 1956 1957 mtx_init(&thr->queue_lock, "CTL queue mutex", NULL, MTX_DEF); 1958 thr->ctl_softc = softc; 1959 STAILQ_INIT(&thr->incoming_queue); 1960 STAILQ_INIT(&thr->rtr_queue); 1961 STAILQ_INIT(&thr->done_queue); 1962 STAILQ_INIT(&thr->isc_queue); 1963 1964 error = kproc_kthread_add(ctl_work_thread, thr, 1965 &softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i); 1966 if (error != 0) { 1967 printf("error creating CTL work thread!\n"); 1968 return (error); 1969 } 1970 } 1971 error = kproc_kthread_add(ctl_thresh_thread, softc, 1972 &softc->ctl_proc, &softc->thresh_thread, 0, 0, "ctl", "thresh"); 1973 if (error != 0) { 1974 printf("error creating CTL threshold thread!\n"); 1975 return (error); 1976 } 1977 1978 SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree), 1979 OID_AUTO, "ha_role", 1980 CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, 1981 softc, 0, ctl_ha_role_sysctl, "I", "HA role for this head"); 1982 1983 if (softc->is_single == 0) { 1984 if (ctl_frontend_register(&ha_frontend) != 0) 1985 softc->is_single = 1; 1986 } 1987 return (0); 1988 } 1989 1990 static int 1991 ctl_shutdown(void) 1992 { 1993 struct ctl_softc *softc = control_softc; 1994 int i; 1995 1996 if (softc->is_single == 0) 1997 ctl_frontend_deregister(&ha_frontend); 1998 1999 destroy_dev(softc->dev); 2000 2001 /* Shutdown CTL threads. */ 2002 softc->shutdown = 1; 2003 for (i = 0; i < worker_threads; i++) { 2004 struct ctl_thread *thr = &softc->threads[i]; 2005 while (thr->thread != NULL) { 2006 wakeup(thr); 2007 if (thr->thread != NULL) 2008 pause("CTL thr shutdown", 1); 2009 } 2010 mtx_destroy(&thr->queue_lock); 2011 } 2012 while (softc->thresh_thread != NULL) { 2013 wakeup(softc->thresh_thread); 2014 if (softc->thresh_thread != NULL) 2015 pause("CTL thr shutdown", 1); 2016 } 2017 2018 ctl_tpc_shutdown(softc); 2019 uma_zdestroy(softc->io_zone); 2020 mtx_destroy(&softc->ctl_lock); 2021 2022 free(softc->ctl_luns, M_DEVBUF); 2023 free(softc->ctl_lun_mask, M_DEVBUF); 2024 free(softc->ctl_port_mask, M_DEVBUF); 2025 free(softc->ctl_ports, M_DEVBUF); 2026 2027 sysctl_ctx_free(&softc->sysctl_ctx); 2028 2029 free(softc, M_DEVBUF); 2030 control_softc = NULL; 2031 return (0); 2032 } 2033 2034 static int 2035 ctl_module_event_handler(module_t mod, int what, void *arg) 2036 { 2037 2038 switch (what) { 2039 case MOD_LOAD: 2040 return (ctl_init()); 2041 case MOD_UNLOAD: 2042 return (ctl_shutdown()); 2043 default: 2044 return (EOPNOTSUPP); 2045 } 2046 } 2047 2048 /* 2049 * XXX KDM should we do some access checks here? Bump a reference count to 2050 * prevent a CTL module from being unloaded while someone has it open? 2051 */ 2052 static int 2053 ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td) 2054 { 2055 return (0); 2056 } 2057 2058 static int 2059 ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td) 2060 { 2061 return (0); 2062 } 2063 2064 /* 2065 * Remove an initiator by port number and initiator ID. 2066 * Returns 0 for success, -1 for failure. 2067 */ 2068 int 2069 ctl_remove_initiator(struct ctl_port *port, int iid) 2070 { 2071 struct ctl_softc *softc = port->ctl_softc; 2072 int last; 2073 2074 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 2075 2076 if (iid > CTL_MAX_INIT_PER_PORT) { 2077 printf("%s: initiator ID %u > maximun %u!\n", 2078 __func__, iid, CTL_MAX_INIT_PER_PORT); 2079 return (-1); 2080 } 2081 2082 mtx_lock(&softc->ctl_lock); 2083 last = (--port->wwpn_iid[iid].in_use == 0); 2084 port->wwpn_iid[iid].last_use = time_uptime; 2085 mtx_unlock(&softc->ctl_lock); 2086 if (last) 2087 ctl_i_t_nexus_loss(softc, iid, CTL_UA_POWERON); 2088 ctl_isc_announce_iid(port, iid); 2089 2090 return (0); 2091 } 2092 2093 /* 2094 * Add an initiator to the initiator map. 2095 * Returns iid for success, < 0 for failure. 2096 */ 2097 int 2098 ctl_add_initiator(struct ctl_port *port, int iid, uint64_t wwpn, char *name) 2099 { 2100 struct ctl_softc *softc = port->ctl_softc; 2101 time_t best_time; 2102 int i, best; 2103 2104 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 2105 2106 if (iid >= CTL_MAX_INIT_PER_PORT) { 2107 printf("%s: WWPN %#jx initiator ID %u > maximum %u!\n", 2108 __func__, wwpn, iid, CTL_MAX_INIT_PER_PORT); 2109 free(name, M_CTL); 2110 return (-1); 2111 } 2112 2113 mtx_lock(&softc->ctl_lock); 2114 2115 if (iid < 0 && (wwpn != 0 || name != NULL)) { 2116 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 2117 if (wwpn != 0 && wwpn == port->wwpn_iid[i].wwpn) { 2118 iid = i; 2119 break; 2120 } 2121 if (name != NULL && port->wwpn_iid[i].name != NULL && 2122 strcmp(name, port->wwpn_iid[i].name) == 0) { 2123 iid = i; 2124 break; 2125 } 2126 } 2127 } 2128 2129 if (iid < 0) { 2130 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 2131 if (port->wwpn_iid[i].in_use == 0 && 2132 port->wwpn_iid[i].wwpn == 0 && 2133 port->wwpn_iid[i].name == NULL) { 2134 iid = i; 2135 break; 2136 } 2137 } 2138 } 2139 2140 if (iid < 0) { 2141 best = -1; 2142 best_time = INT32_MAX; 2143 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 2144 if (port->wwpn_iid[i].in_use == 0) { 2145 if (port->wwpn_iid[i].last_use < best_time) { 2146 best = i; 2147 best_time = port->wwpn_iid[i].last_use; 2148 } 2149 } 2150 } 2151 iid = best; 2152 } 2153 2154 if (iid < 0) { 2155 mtx_unlock(&softc->ctl_lock); 2156 free(name, M_CTL); 2157 return (-2); 2158 } 2159 2160 if (port->wwpn_iid[iid].in_use > 0 && (wwpn != 0 || name != NULL)) { 2161 /* 2162 * This is not an error yet. 2163 */ 2164 if (wwpn != 0 && wwpn == port->wwpn_iid[iid].wwpn) { 2165 #if 0 2166 printf("%s: port %d iid %u WWPN %#jx arrived" 2167 " again\n", __func__, port->targ_port, 2168 iid, (uintmax_t)wwpn); 2169 #endif 2170 goto take; 2171 } 2172 if (name != NULL && port->wwpn_iid[iid].name != NULL && 2173 strcmp(name, port->wwpn_iid[iid].name) == 0) { 2174 #if 0 2175 printf("%s: port %d iid %u name '%s' arrived" 2176 " again\n", __func__, port->targ_port, 2177 iid, name); 2178 #endif 2179 goto take; 2180 } 2181 2182 /* 2183 * This is an error, but what do we do about it? The 2184 * driver is telling us we have a new WWPN for this 2185 * initiator ID, so we pretty much need to use it. 2186 */ 2187 printf("%s: port %d iid %u WWPN %#jx '%s' arrived," 2188 " but WWPN %#jx '%s' is still at that address\n", 2189 __func__, port->targ_port, iid, wwpn, name, 2190 (uintmax_t)port->wwpn_iid[iid].wwpn, 2191 port->wwpn_iid[iid].name); 2192 } 2193 take: 2194 free(port->wwpn_iid[iid].name, M_CTL); 2195 port->wwpn_iid[iid].name = name; 2196 port->wwpn_iid[iid].wwpn = wwpn; 2197 port->wwpn_iid[iid].in_use++; 2198 mtx_unlock(&softc->ctl_lock); 2199 ctl_isc_announce_iid(port, iid); 2200 2201 return (iid); 2202 } 2203 2204 static int 2205 ctl_create_iid(struct ctl_port *port, int iid, uint8_t *buf) 2206 { 2207 int len; 2208 2209 switch (port->port_type) { 2210 case CTL_PORT_FC: 2211 { 2212 struct scsi_transportid_fcp *id = 2213 (struct scsi_transportid_fcp *)buf; 2214 if (port->wwpn_iid[iid].wwpn == 0) 2215 return (0); 2216 memset(id, 0, sizeof(*id)); 2217 id->format_protocol = SCSI_PROTO_FC; 2218 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->n_port_name); 2219 return (sizeof(*id)); 2220 } 2221 case CTL_PORT_ISCSI: 2222 { 2223 struct scsi_transportid_iscsi_port *id = 2224 (struct scsi_transportid_iscsi_port *)buf; 2225 if (port->wwpn_iid[iid].name == NULL) 2226 return (0); 2227 memset(id, 0, 256); 2228 id->format_protocol = SCSI_TRN_ISCSI_FORMAT_PORT | 2229 SCSI_PROTO_ISCSI; 2230 len = strlcpy(id->iscsi_name, port->wwpn_iid[iid].name, 252) + 1; 2231 len = roundup2(min(len, 252), 4); 2232 scsi_ulto2b(len, id->additional_length); 2233 return (sizeof(*id) + len); 2234 } 2235 case CTL_PORT_SAS: 2236 { 2237 struct scsi_transportid_sas *id = 2238 (struct scsi_transportid_sas *)buf; 2239 if (port->wwpn_iid[iid].wwpn == 0) 2240 return (0); 2241 memset(id, 0, sizeof(*id)); 2242 id->format_protocol = SCSI_PROTO_SAS; 2243 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->sas_address); 2244 return (sizeof(*id)); 2245 } 2246 default: 2247 { 2248 struct scsi_transportid_spi *id = 2249 (struct scsi_transportid_spi *)buf; 2250 memset(id, 0, sizeof(*id)); 2251 id->format_protocol = SCSI_PROTO_SPI; 2252 scsi_ulto2b(iid, id->scsi_addr); 2253 scsi_ulto2b(port->targ_port, id->rel_trgt_port_id); 2254 return (sizeof(*id)); 2255 } 2256 } 2257 } 2258 2259 /* 2260 * Serialize a command that went down the "wrong" side, and so was sent to 2261 * this controller for execution. The logic is a little different than the 2262 * standard case in ctl_scsiio_precheck(). Errors in this case need to get 2263 * sent back to the other side, but in the success case, we execute the 2264 * command on this side (XFER mode) or tell the other side to execute it 2265 * (SER_ONLY mode). 2266 */ 2267 static void 2268 ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio) 2269 { 2270 struct ctl_softc *softc = CTL_SOFTC(ctsio); 2271 struct ctl_port *port = CTL_PORT(ctsio); 2272 union ctl_ha_msg msg_info; 2273 struct ctl_lun *lun; 2274 const struct ctl_cmd_entry *entry; 2275 union ctl_io *bio; 2276 uint32_t targ_lun; 2277 2278 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 2279 2280 /* Make sure that we know about this port. */ 2281 if (port == NULL || (port->status & CTL_PORT_STATUS_ONLINE) == 0) { 2282 ctl_set_internal_failure(ctsio, /*sks_valid*/ 0, 2283 /*retry_count*/ 1); 2284 goto badjuju; 2285 } 2286 2287 /* Make sure that we know about this LUN. */ 2288 mtx_lock(&softc->ctl_lock); 2289 if (targ_lun >= ctl_max_luns || 2290 (lun = softc->ctl_luns[targ_lun]) == NULL) { 2291 mtx_unlock(&softc->ctl_lock); 2292 2293 /* 2294 * The other node would not send this request to us unless 2295 * received announce that we are primary node for this LUN. 2296 * If this LUN does not exist now, it is probably result of 2297 * a race, so respond to initiator in the most opaque way. 2298 */ 2299 ctl_set_busy(ctsio); 2300 goto badjuju; 2301 } 2302 mtx_lock(&lun->lun_lock); 2303 mtx_unlock(&softc->ctl_lock); 2304 2305 /* 2306 * If the LUN is invalid, pretend that it doesn't exist. 2307 * It will go away as soon as all pending I/Os completed. 2308 */ 2309 if (lun->flags & CTL_LUN_DISABLED) { 2310 mtx_unlock(&lun->lun_lock); 2311 ctl_set_busy(ctsio); 2312 goto badjuju; 2313 } 2314 2315 entry = ctl_get_cmd_entry(ctsio, NULL); 2316 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { 2317 mtx_unlock(&lun->lun_lock); 2318 goto badjuju; 2319 } 2320 2321 CTL_LUN(ctsio) = lun; 2322 CTL_BACKEND_LUN(ctsio) = lun->be_lun; 2323 2324 /* 2325 * Every I/O goes into the OOA queue for a 2326 * particular LUN, and stays there until completion. 2327 */ 2328 #ifdef CTL_TIME_IO 2329 if (TAILQ_EMPTY(&lun->ooa_queue)) 2330 lun->idle_time += getsbinuptime() - lun->last_busy; 2331 #endif 2332 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2333 2334 bio = (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq, ooa_links); 2335 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, &bio)) { 2336 case CTL_ACTION_BLOCK: 2337 ctsio->io_hdr.blocker = bio; 2338 TAILQ_INSERT_TAIL(&bio->io_hdr.blocked_queue, &ctsio->io_hdr, 2339 blocked_links); 2340 mtx_unlock(&lun->lun_lock); 2341 break; 2342 case CTL_ACTION_PASS: 2343 case CTL_ACTION_SKIP: 2344 if (softc->ha_mode == CTL_HA_MODE_XFER) { 2345 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 2346 ctl_enqueue_rtr((union ctl_io *)ctsio); 2347 mtx_unlock(&lun->lun_lock); 2348 } else { 2349 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 2350 mtx_unlock(&lun->lun_lock); 2351 2352 /* send msg back to other side */ 2353 msg_info.hdr.original_sc = ctsio->io_hdr.remote_io; 2354 msg_info.hdr.serializing_sc = (union ctl_io *)ctsio; 2355 msg_info.hdr.msg_type = CTL_MSG_R2R; 2356 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 2357 sizeof(msg_info.hdr), M_WAITOK); 2358 } 2359 break; 2360 case CTL_ACTION_OVERLAP: 2361 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2362 mtx_unlock(&lun->lun_lock); 2363 ctl_set_overlapped_cmd(ctsio); 2364 goto badjuju; 2365 case CTL_ACTION_OVERLAP_TAG: 2366 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2367 mtx_unlock(&lun->lun_lock); 2368 ctl_set_overlapped_tag(ctsio, ctsio->tag_num); 2369 goto badjuju; 2370 case CTL_ACTION_ERROR: 2371 default: 2372 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2373 mtx_unlock(&lun->lun_lock); 2374 2375 ctl_set_internal_failure(ctsio, /*sks_valid*/ 0, 2376 /*retry_count*/ 0); 2377 badjuju: 2378 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); 2379 msg_info.hdr.original_sc = ctsio->io_hdr.remote_io; 2380 msg_info.hdr.serializing_sc = NULL; 2381 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 2382 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 2383 sizeof(msg_info.scsi), M_WAITOK); 2384 ctl_free_io((union ctl_io *)ctsio); 2385 break; 2386 } 2387 } 2388 2389 /* 2390 * Returns 0 for success, errno for failure. 2391 */ 2392 static void 2393 ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 2394 struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries) 2395 { 2396 union ctl_io *io; 2397 2398 mtx_lock(&lun->lun_lock); 2399 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); (io != NULL); 2400 (*cur_fill_num)++, io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, 2401 ooa_links)) { 2402 struct ctl_ooa_entry *entry; 2403 2404 /* 2405 * If we've got more than we can fit, just count the 2406 * remaining entries. 2407 */ 2408 if (*cur_fill_num >= ooa_hdr->alloc_num) 2409 continue; 2410 2411 entry = &kern_entries[*cur_fill_num]; 2412 2413 entry->tag_num = io->scsiio.tag_num; 2414 entry->lun_num = lun->lun; 2415 #ifdef CTL_TIME_IO 2416 entry->start_bt = io->io_hdr.start_bt; 2417 #endif 2418 bcopy(io->scsiio.cdb, entry->cdb, io->scsiio.cdb_len); 2419 entry->cdb_len = io->scsiio.cdb_len; 2420 if (io->io_hdr.blocker != NULL) 2421 entry->cmd_flags |= CTL_OOACMD_FLAG_BLOCKED; 2422 2423 if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) 2424 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA; 2425 2426 if (io->io_hdr.flags & CTL_FLAG_ABORT) 2427 entry->cmd_flags |= CTL_OOACMD_FLAG_ABORT; 2428 2429 if (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR) 2430 entry->cmd_flags |= CTL_OOACMD_FLAG_RTR; 2431 2432 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) 2433 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED; 2434 2435 if (io->io_hdr.flags & CTL_FLAG_STATUS_QUEUED) 2436 entry->cmd_flags |= CTL_OOACMD_FLAG_STATUS_QUEUED; 2437 2438 if (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) 2439 entry->cmd_flags |= CTL_OOACMD_FLAG_STATUS_SENT; 2440 } 2441 mtx_unlock(&lun->lun_lock); 2442 } 2443 2444 /* 2445 * Escape characters that are illegal or not recommended in XML. 2446 */ 2447 int 2448 ctl_sbuf_printf_esc(struct sbuf *sb, char *str, int size) 2449 { 2450 char *end = str + size; 2451 int retval; 2452 2453 retval = 0; 2454 2455 for (; *str && str < end; str++) { 2456 switch (*str) { 2457 case '&': 2458 retval = sbuf_printf(sb, "&"); 2459 break; 2460 case '>': 2461 retval = sbuf_printf(sb, ">"); 2462 break; 2463 case '<': 2464 retval = sbuf_printf(sb, "<"); 2465 break; 2466 default: 2467 retval = sbuf_putc(sb, *str); 2468 break; 2469 } 2470 2471 if (retval != 0) 2472 break; 2473 } 2474 2475 return (retval); 2476 } 2477 2478 static void 2479 ctl_id_sbuf(struct ctl_devid *id, struct sbuf *sb) 2480 { 2481 struct scsi_vpd_id_descriptor *desc; 2482 int i; 2483 2484 if (id == NULL || id->len < 4) 2485 return; 2486 desc = (struct scsi_vpd_id_descriptor *)id->data; 2487 switch (desc->id_type & SVPD_ID_TYPE_MASK) { 2488 case SVPD_ID_TYPE_T10: 2489 sbuf_printf(sb, "t10."); 2490 break; 2491 case SVPD_ID_TYPE_EUI64: 2492 sbuf_printf(sb, "eui."); 2493 break; 2494 case SVPD_ID_TYPE_NAA: 2495 sbuf_printf(sb, "naa."); 2496 break; 2497 case SVPD_ID_TYPE_SCSI_NAME: 2498 break; 2499 } 2500 switch (desc->proto_codeset & SVPD_ID_CODESET_MASK) { 2501 case SVPD_ID_CODESET_BINARY: 2502 for (i = 0; i < desc->length; i++) 2503 sbuf_printf(sb, "%02x", desc->identifier[i]); 2504 break; 2505 case SVPD_ID_CODESET_ASCII: 2506 sbuf_printf(sb, "%.*s", (int)desc->length, 2507 (char *)desc->identifier); 2508 break; 2509 case SVPD_ID_CODESET_UTF8: 2510 sbuf_printf(sb, "%s", (char *)desc->identifier); 2511 break; 2512 } 2513 } 2514 2515 static int 2516 ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 2517 struct thread *td) 2518 { 2519 struct ctl_softc *softc = dev->si_drv1; 2520 struct ctl_port *port; 2521 struct ctl_lun *lun; 2522 int retval; 2523 2524 retval = 0; 2525 2526 switch (cmd) { 2527 case CTL_IO: 2528 retval = ctl_ioctl_io(dev, cmd, addr, flag, td); 2529 break; 2530 case CTL_ENABLE_PORT: 2531 case CTL_DISABLE_PORT: 2532 case CTL_SET_PORT_WWNS: { 2533 struct ctl_port *port; 2534 struct ctl_port_entry *entry; 2535 2536 entry = (struct ctl_port_entry *)addr; 2537 2538 mtx_lock(&softc->ctl_lock); 2539 STAILQ_FOREACH(port, &softc->port_list, links) { 2540 int action, done; 2541 2542 if (port->targ_port < softc->port_min || 2543 port->targ_port >= softc->port_max) 2544 continue; 2545 2546 action = 0; 2547 done = 0; 2548 if ((entry->port_type == CTL_PORT_NONE) 2549 && (entry->targ_port == port->targ_port)) { 2550 /* 2551 * If the user only wants to enable or 2552 * disable or set WWNs on a specific port, 2553 * do the operation and we're done. 2554 */ 2555 action = 1; 2556 done = 1; 2557 } else if (entry->port_type & port->port_type) { 2558 /* 2559 * Compare the user's type mask with the 2560 * particular frontend type to see if we 2561 * have a match. 2562 */ 2563 action = 1; 2564 done = 0; 2565 2566 /* 2567 * Make sure the user isn't trying to set 2568 * WWNs on multiple ports at the same time. 2569 */ 2570 if (cmd == CTL_SET_PORT_WWNS) { 2571 printf("%s: Can't set WWNs on " 2572 "multiple ports\n", __func__); 2573 retval = EINVAL; 2574 break; 2575 } 2576 } 2577 if (action == 0) 2578 continue; 2579 2580 /* 2581 * XXX KDM we have to drop the lock here, because 2582 * the online/offline operations can potentially 2583 * block. We need to reference count the frontends 2584 * so they can't go away, 2585 */ 2586 if (cmd == CTL_ENABLE_PORT) { 2587 mtx_unlock(&softc->ctl_lock); 2588 ctl_port_online(port); 2589 mtx_lock(&softc->ctl_lock); 2590 } else if (cmd == CTL_DISABLE_PORT) { 2591 mtx_unlock(&softc->ctl_lock); 2592 ctl_port_offline(port); 2593 mtx_lock(&softc->ctl_lock); 2594 } else if (cmd == CTL_SET_PORT_WWNS) { 2595 ctl_port_set_wwns(port, 2596 (entry->flags & CTL_PORT_WWNN_VALID) ? 2597 1 : 0, entry->wwnn, 2598 (entry->flags & CTL_PORT_WWPN_VALID) ? 2599 1 : 0, entry->wwpn); 2600 } 2601 if (done != 0) 2602 break; 2603 } 2604 mtx_unlock(&softc->ctl_lock); 2605 break; 2606 } 2607 case CTL_GET_OOA: { 2608 struct ctl_ooa *ooa_hdr; 2609 struct ctl_ooa_entry *entries; 2610 uint32_t cur_fill_num; 2611 2612 ooa_hdr = (struct ctl_ooa *)addr; 2613 2614 if ((ooa_hdr->alloc_len == 0) 2615 || (ooa_hdr->alloc_num == 0)) { 2616 printf("%s: CTL_GET_OOA: alloc len %u and alloc num %u " 2617 "must be non-zero\n", __func__, 2618 ooa_hdr->alloc_len, ooa_hdr->alloc_num); 2619 retval = EINVAL; 2620 break; 2621 } 2622 2623 if (ooa_hdr->alloc_len != (ooa_hdr->alloc_num * 2624 sizeof(struct ctl_ooa_entry))) { 2625 printf("%s: CTL_GET_OOA: alloc len %u must be alloc " 2626 "num %d * sizeof(struct ctl_ooa_entry) %zd\n", 2627 __func__, ooa_hdr->alloc_len, 2628 ooa_hdr->alloc_num,sizeof(struct ctl_ooa_entry)); 2629 retval = EINVAL; 2630 break; 2631 } 2632 2633 entries = malloc(ooa_hdr->alloc_len, M_CTL, M_WAITOK | M_ZERO); 2634 if (entries == NULL) { 2635 printf("%s: could not allocate %d bytes for OOA " 2636 "dump\n", __func__, ooa_hdr->alloc_len); 2637 retval = ENOMEM; 2638 break; 2639 } 2640 2641 mtx_lock(&softc->ctl_lock); 2642 if ((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0 && 2643 (ooa_hdr->lun_num >= ctl_max_luns || 2644 softc->ctl_luns[ooa_hdr->lun_num] == NULL)) { 2645 mtx_unlock(&softc->ctl_lock); 2646 free(entries, M_CTL); 2647 printf("%s: CTL_GET_OOA: invalid LUN %ju\n", 2648 __func__, (uintmax_t)ooa_hdr->lun_num); 2649 retval = EINVAL; 2650 break; 2651 } 2652 2653 cur_fill_num = 0; 2654 2655 if (ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) { 2656 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2657 ctl_ioctl_fill_ooa(lun, &cur_fill_num, 2658 ooa_hdr, entries); 2659 } 2660 } else { 2661 lun = softc->ctl_luns[ooa_hdr->lun_num]; 2662 ctl_ioctl_fill_ooa(lun, &cur_fill_num, ooa_hdr, 2663 entries); 2664 } 2665 mtx_unlock(&softc->ctl_lock); 2666 2667 ooa_hdr->fill_num = min(cur_fill_num, ooa_hdr->alloc_num); 2668 ooa_hdr->fill_len = ooa_hdr->fill_num * 2669 sizeof(struct ctl_ooa_entry); 2670 retval = copyout(entries, ooa_hdr->entries, ooa_hdr->fill_len); 2671 if (retval != 0) { 2672 printf("%s: error copying out %d bytes for OOA dump\n", 2673 __func__, ooa_hdr->fill_len); 2674 } 2675 2676 getbinuptime(&ooa_hdr->cur_bt); 2677 2678 if (cur_fill_num > ooa_hdr->alloc_num) { 2679 ooa_hdr->dropped_num = cur_fill_num -ooa_hdr->alloc_num; 2680 ooa_hdr->status = CTL_OOA_NEED_MORE_SPACE; 2681 } else { 2682 ooa_hdr->dropped_num = 0; 2683 ooa_hdr->status = CTL_OOA_OK; 2684 } 2685 2686 free(entries, M_CTL); 2687 break; 2688 } 2689 case CTL_DELAY_IO: { 2690 struct ctl_io_delay_info *delay_info; 2691 2692 delay_info = (struct ctl_io_delay_info *)addr; 2693 2694 #ifdef CTL_IO_DELAY 2695 mtx_lock(&softc->ctl_lock); 2696 if (delay_info->lun_id >= ctl_max_luns || 2697 (lun = softc->ctl_luns[delay_info->lun_id]) == NULL) { 2698 mtx_unlock(&softc->ctl_lock); 2699 delay_info->status = CTL_DELAY_STATUS_INVALID_LUN; 2700 break; 2701 } 2702 mtx_lock(&lun->lun_lock); 2703 mtx_unlock(&softc->ctl_lock); 2704 delay_info->status = CTL_DELAY_STATUS_OK; 2705 switch (delay_info->delay_type) { 2706 case CTL_DELAY_TYPE_CONT: 2707 case CTL_DELAY_TYPE_ONESHOT: 2708 break; 2709 default: 2710 delay_info->status = CTL_DELAY_STATUS_INVALID_TYPE; 2711 break; 2712 } 2713 switch (delay_info->delay_loc) { 2714 case CTL_DELAY_LOC_DATAMOVE: 2715 lun->delay_info.datamove_type = delay_info->delay_type; 2716 lun->delay_info.datamove_delay = delay_info->delay_secs; 2717 break; 2718 case CTL_DELAY_LOC_DONE: 2719 lun->delay_info.done_type = delay_info->delay_type; 2720 lun->delay_info.done_delay = delay_info->delay_secs; 2721 break; 2722 default: 2723 delay_info->status = CTL_DELAY_STATUS_INVALID_LOC; 2724 break; 2725 } 2726 mtx_unlock(&lun->lun_lock); 2727 #else 2728 delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED; 2729 #endif /* CTL_IO_DELAY */ 2730 break; 2731 } 2732 case CTL_ERROR_INJECT: { 2733 struct ctl_error_desc *err_desc, *new_err_desc; 2734 2735 err_desc = (struct ctl_error_desc *)addr; 2736 2737 new_err_desc = malloc(sizeof(*new_err_desc), M_CTL, 2738 M_WAITOK | M_ZERO); 2739 bcopy(err_desc, new_err_desc, sizeof(*new_err_desc)); 2740 2741 mtx_lock(&softc->ctl_lock); 2742 if (err_desc->lun_id >= ctl_max_luns || 2743 (lun = softc->ctl_luns[err_desc->lun_id]) == NULL) { 2744 mtx_unlock(&softc->ctl_lock); 2745 free(new_err_desc, M_CTL); 2746 printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n", 2747 __func__, (uintmax_t)err_desc->lun_id); 2748 retval = EINVAL; 2749 break; 2750 } 2751 mtx_lock(&lun->lun_lock); 2752 mtx_unlock(&softc->ctl_lock); 2753 2754 /* 2755 * We could do some checking here to verify the validity 2756 * of the request, but given the complexity of error 2757 * injection requests, the checking logic would be fairly 2758 * complex. 2759 * 2760 * For now, if the request is invalid, it just won't get 2761 * executed and might get deleted. 2762 */ 2763 STAILQ_INSERT_TAIL(&lun->error_list, new_err_desc, links); 2764 2765 /* 2766 * XXX KDM check to make sure the serial number is unique, 2767 * in case we somehow manage to wrap. That shouldn't 2768 * happen for a very long time, but it's the right thing to 2769 * do. 2770 */ 2771 new_err_desc->serial = lun->error_serial; 2772 err_desc->serial = lun->error_serial; 2773 lun->error_serial++; 2774 2775 mtx_unlock(&lun->lun_lock); 2776 break; 2777 } 2778 case CTL_ERROR_INJECT_DELETE: { 2779 struct ctl_error_desc *delete_desc, *desc, *desc2; 2780 int delete_done; 2781 2782 delete_desc = (struct ctl_error_desc *)addr; 2783 delete_done = 0; 2784 2785 mtx_lock(&softc->ctl_lock); 2786 if (delete_desc->lun_id >= ctl_max_luns || 2787 (lun = softc->ctl_luns[delete_desc->lun_id]) == NULL) { 2788 mtx_unlock(&softc->ctl_lock); 2789 printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n", 2790 __func__, (uintmax_t)delete_desc->lun_id); 2791 retval = EINVAL; 2792 break; 2793 } 2794 mtx_lock(&lun->lun_lock); 2795 mtx_unlock(&softc->ctl_lock); 2796 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 2797 if (desc->serial != delete_desc->serial) 2798 continue; 2799 2800 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, 2801 links); 2802 free(desc, M_CTL); 2803 delete_done = 1; 2804 } 2805 mtx_unlock(&lun->lun_lock); 2806 if (delete_done == 0) { 2807 printf("%s: CTL_ERROR_INJECT_DELETE: can't find " 2808 "error serial %ju on LUN %u\n", __func__, 2809 delete_desc->serial, delete_desc->lun_id); 2810 retval = EINVAL; 2811 break; 2812 } 2813 break; 2814 } 2815 case CTL_DUMP_STRUCTS: { 2816 int j, k; 2817 struct ctl_port *port; 2818 struct ctl_frontend *fe; 2819 2820 mtx_lock(&softc->ctl_lock); 2821 printf("CTL Persistent Reservation information start:\n"); 2822 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2823 mtx_lock(&lun->lun_lock); 2824 if ((lun->flags & CTL_LUN_DISABLED) != 0) { 2825 mtx_unlock(&lun->lun_lock); 2826 continue; 2827 } 2828 2829 for (j = 0; j < ctl_max_ports; j++) { 2830 if (lun->pr_keys[j] == NULL) 2831 continue; 2832 for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){ 2833 if (lun->pr_keys[j][k] == 0) 2834 continue; 2835 printf(" LUN %ju port %d iid %d key " 2836 "%#jx\n", lun->lun, j, k, 2837 (uintmax_t)lun->pr_keys[j][k]); 2838 } 2839 } 2840 mtx_unlock(&lun->lun_lock); 2841 } 2842 printf("CTL Persistent Reservation information end\n"); 2843 printf("CTL Ports:\n"); 2844 STAILQ_FOREACH(port, &softc->port_list, links) { 2845 printf(" Port %d '%s' Frontend '%s' Type %u pp %d vp %d WWNN " 2846 "%#jx WWPN %#jx\n", port->targ_port, port->port_name, 2847 port->frontend->name, port->port_type, 2848 port->physical_port, port->virtual_port, 2849 (uintmax_t)port->wwnn, (uintmax_t)port->wwpn); 2850 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 2851 if (port->wwpn_iid[j].in_use == 0 && 2852 port->wwpn_iid[j].wwpn == 0 && 2853 port->wwpn_iid[j].name == NULL) 2854 continue; 2855 2856 printf(" iid %u use %d WWPN %#jx '%s'\n", 2857 j, port->wwpn_iid[j].in_use, 2858 (uintmax_t)port->wwpn_iid[j].wwpn, 2859 port->wwpn_iid[j].name); 2860 } 2861 } 2862 printf("CTL Port information end\n"); 2863 mtx_unlock(&softc->ctl_lock); 2864 /* 2865 * XXX KDM calling this without a lock. We'd likely want 2866 * to drop the lock before calling the frontend's dump 2867 * routine anyway. 2868 */ 2869 printf("CTL Frontends:\n"); 2870 STAILQ_FOREACH(fe, &softc->fe_list, links) { 2871 printf(" Frontend '%s'\n", fe->name); 2872 if (fe->fe_dump != NULL) 2873 fe->fe_dump(); 2874 } 2875 printf("CTL Frontend information end\n"); 2876 break; 2877 } 2878 case CTL_LUN_REQ: { 2879 struct ctl_lun_req *lun_req; 2880 struct ctl_backend_driver *backend; 2881 void *packed; 2882 nvlist_t *tmp_args_nvl; 2883 size_t packed_len; 2884 2885 lun_req = (struct ctl_lun_req *)addr; 2886 tmp_args_nvl = lun_req->args_nvl; 2887 2888 backend = ctl_backend_find(lun_req->backend); 2889 if (backend == NULL) { 2890 lun_req->status = CTL_LUN_ERROR; 2891 snprintf(lun_req->error_str, 2892 sizeof(lun_req->error_str), 2893 "Backend \"%s\" not found.", 2894 lun_req->backend); 2895 break; 2896 } 2897 2898 if (lun_req->args != NULL) { 2899 packed = malloc(lun_req->args_len, M_CTL, M_WAITOK); 2900 if (copyin(lun_req->args, packed, lun_req->args_len) != 0) { 2901 free(packed, M_CTL); 2902 lun_req->status = CTL_LUN_ERROR; 2903 snprintf(lun_req->error_str, sizeof(lun_req->error_str), 2904 "Cannot copyin args."); 2905 break; 2906 } 2907 lun_req->args_nvl = nvlist_unpack(packed, 2908 lun_req->args_len, 0); 2909 free(packed, M_CTL); 2910 2911 if (lun_req->args_nvl == NULL) { 2912 lun_req->status = CTL_LUN_ERROR; 2913 snprintf(lun_req->error_str, sizeof(lun_req->error_str), 2914 "Cannot unpack args nvlist."); 2915 break; 2916 } 2917 } else 2918 lun_req->args_nvl = nvlist_create(0); 2919 2920 retval = backend->ioctl(dev, cmd, addr, flag, td); 2921 nvlist_destroy(lun_req->args_nvl); 2922 lun_req->args_nvl = tmp_args_nvl; 2923 2924 if (lun_req->result_nvl != NULL) { 2925 if (lun_req->result != NULL) { 2926 packed = nvlist_pack(lun_req->result_nvl, 2927 &packed_len); 2928 if (packed == NULL) { 2929 lun_req->status = CTL_LUN_ERROR; 2930 snprintf(lun_req->error_str, 2931 sizeof(lun_req->error_str), 2932 "Cannot pack result nvlist."); 2933 break; 2934 } 2935 2936 if (packed_len > lun_req->result_len) { 2937 lun_req->status = CTL_LUN_ERROR; 2938 snprintf(lun_req->error_str, 2939 sizeof(lun_req->error_str), 2940 "Result nvlist too large."); 2941 free(packed, M_NVLIST); 2942 break; 2943 } 2944 2945 if (copyout(packed, lun_req->result, packed_len)) { 2946 lun_req->status = CTL_LUN_ERROR; 2947 snprintf(lun_req->error_str, 2948 sizeof(lun_req->error_str), 2949 "Cannot copyout() the result."); 2950 free(packed, M_NVLIST); 2951 break; 2952 } 2953 2954 lun_req->result_len = packed_len; 2955 free(packed, M_NVLIST); 2956 } 2957 2958 nvlist_destroy(lun_req->result_nvl); 2959 } 2960 break; 2961 } 2962 case CTL_LUN_LIST: { 2963 struct sbuf *sb; 2964 struct ctl_lun_list *list; 2965 const char *name, *value; 2966 void *cookie; 2967 int type; 2968 2969 list = (struct ctl_lun_list *)addr; 2970 2971 /* 2972 * Allocate a fixed length sbuf here, based on the length 2973 * of the user's buffer. We could allocate an auto-extending 2974 * buffer, and then tell the user how much larger our 2975 * amount of data is than his buffer, but that presents 2976 * some problems: 2977 * 2978 * 1. The sbuf(9) routines use a blocking malloc, and so 2979 * we can't hold a lock while calling them with an 2980 * auto-extending buffer. 2981 * 2982 * 2. There is not currently a LUN reference counting 2983 * mechanism, outside of outstanding transactions on 2984 * the LUN's OOA queue. So a LUN could go away on us 2985 * while we're getting the LUN number, backend-specific 2986 * information, etc. Thus, given the way things 2987 * currently work, we need to hold the CTL lock while 2988 * grabbing LUN information. 2989 * 2990 * So, from the user's standpoint, the best thing to do is 2991 * allocate what he thinks is a reasonable buffer length, 2992 * and then if he gets a CTL_LUN_LIST_NEED_MORE_SPACE error, 2993 * double the buffer length and try again. (And repeat 2994 * that until he succeeds.) 2995 */ 2996 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 2997 if (sb == NULL) { 2998 list->status = CTL_LUN_LIST_ERROR; 2999 snprintf(list->error_str, sizeof(list->error_str), 3000 "Unable to allocate %d bytes for LUN list", 3001 list->alloc_len); 3002 break; 3003 } 3004 3005 sbuf_printf(sb, "<ctllunlist>\n"); 3006 3007 mtx_lock(&softc->ctl_lock); 3008 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3009 mtx_lock(&lun->lun_lock); 3010 retval = sbuf_printf(sb, "<lun id=\"%ju\">\n", 3011 (uintmax_t)lun->lun); 3012 3013 /* 3014 * Bail out as soon as we see that we've overfilled 3015 * the buffer. 3016 */ 3017 if (retval != 0) 3018 break; 3019 3020 retval = sbuf_printf(sb, "\t<backend_type>%s" 3021 "</backend_type>\n", 3022 (lun->backend == NULL) ? "none" : 3023 lun->backend->name); 3024 3025 if (retval != 0) 3026 break; 3027 3028 retval = sbuf_printf(sb, "\t<lun_type>%d</lun_type>\n", 3029 lun->be_lun->lun_type); 3030 3031 if (retval != 0) 3032 break; 3033 3034 if (lun->backend == NULL) { 3035 retval = sbuf_printf(sb, "</lun>\n"); 3036 if (retval != 0) 3037 break; 3038 continue; 3039 } 3040 3041 retval = sbuf_printf(sb, "\t<size>%ju</size>\n", 3042 (lun->be_lun->maxlba > 0) ? 3043 lun->be_lun->maxlba + 1 : 0); 3044 3045 if (retval != 0) 3046 break; 3047 3048 retval = sbuf_printf(sb, "\t<blocksize>%u</blocksize>\n", 3049 lun->be_lun->blocksize); 3050 3051 if (retval != 0) 3052 break; 3053 3054 retval = sbuf_printf(sb, "\t<serial_number>"); 3055 3056 if (retval != 0) 3057 break; 3058 3059 retval = ctl_sbuf_printf_esc(sb, 3060 lun->be_lun->serial_num, 3061 sizeof(lun->be_lun->serial_num)); 3062 3063 if (retval != 0) 3064 break; 3065 3066 retval = sbuf_printf(sb, "</serial_number>\n"); 3067 3068 if (retval != 0) 3069 break; 3070 3071 retval = sbuf_printf(sb, "\t<device_id>"); 3072 3073 if (retval != 0) 3074 break; 3075 3076 retval = ctl_sbuf_printf_esc(sb, 3077 lun->be_lun->device_id, 3078 sizeof(lun->be_lun->device_id)); 3079 3080 if (retval != 0) 3081 break; 3082 3083 retval = sbuf_printf(sb, "</device_id>\n"); 3084 3085 if (retval != 0) 3086 break; 3087 3088 if (lun->backend->lun_info != NULL) { 3089 retval = lun->backend->lun_info(lun->be_lun, sb); 3090 if (retval != 0) 3091 break; 3092 } 3093 3094 cookie = NULL; 3095 while ((name = nvlist_next(lun->be_lun->options, &type, 3096 &cookie)) != NULL) { 3097 sbuf_printf(sb, "\t<%s>", name); 3098 3099 if (type == NV_TYPE_STRING) { 3100 value = dnvlist_get_string( 3101 lun->be_lun->options, name, NULL); 3102 if (value != NULL) 3103 sbuf_printf(sb, "%s", value); 3104 } 3105 3106 sbuf_printf(sb, "</%s>\n", name); 3107 } 3108 3109 retval = sbuf_printf(sb, "</lun>\n"); 3110 3111 if (retval != 0) 3112 break; 3113 mtx_unlock(&lun->lun_lock); 3114 } 3115 if (lun != NULL) 3116 mtx_unlock(&lun->lun_lock); 3117 mtx_unlock(&softc->ctl_lock); 3118 3119 if ((retval != 0) 3120 || ((retval = sbuf_printf(sb, "</ctllunlist>\n")) != 0)) { 3121 retval = 0; 3122 sbuf_delete(sb); 3123 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3124 snprintf(list->error_str, sizeof(list->error_str), 3125 "Out of space, %d bytes is too small", 3126 list->alloc_len); 3127 break; 3128 } 3129 3130 sbuf_finish(sb); 3131 3132 retval = copyout(sbuf_data(sb), list->lun_xml, 3133 sbuf_len(sb) + 1); 3134 3135 list->fill_len = sbuf_len(sb) + 1; 3136 list->status = CTL_LUN_LIST_OK; 3137 sbuf_delete(sb); 3138 break; 3139 } 3140 case CTL_ISCSI: { 3141 struct ctl_iscsi *ci; 3142 struct ctl_frontend *fe; 3143 3144 ci = (struct ctl_iscsi *)addr; 3145 3146 fe = ctl_frontend_find("iscsi"); 3147 if (fe == NULL) { 3148 ci->status = CTL_ISCSI_ERROR; 3149 snprintf(ci->error_str, sizeof(ci->error_str), 3150 "Frontend \"iscsi\" not found."); 3151 break; 3152 } 3153 3154 retval = fe->ioctl(dev, cmd, addr, flag, td); 3155 break; 3156 } 3157 case CTL_PORT_REQ: { 3158 struct ctl_req *req; 3159 struct ctl_frontend *fe; 3160 void *packed; 3161 nvlist_t *tmp_args_nvl; 3162 size_t packed_len; 3163 3164 req = (struct ctl_req *)addr; 3165 tmp_args_nvl = req->args_nvl; 3166 3167 fe = ctl_frontend_find(req->driver); 3168 if (fe == NULL) { 3169 req->status = CTL_LUN_ERROR; 3170 snprintf(req->error_str, sizeof(req->error_str), 3171 "Frontend \"%s\" not found.", req->driver); 3172 break; 3173 } 3174 3175 if (req->args != NULL) { 3176 packed = malloc(req->args_len, M_CTL, M_WAITOK); 3177 if (copyin(req->args, packed, req->args_len) != 0) { 3178 free(packed, M_CTL); 3179 req->status = CTL_LUN_ERROR; 3180 snprintf(req->error_str, sizeof(req->error_str), 3181 "Cannot copyin args."); 3182 break; 3183 } 3184 req->args_nvl = nvlist_unpack(packed, 3185 req->args_len, 0); 3186 free(packed, M_CTL); 3187 3188 if (req->args_nvl == NULL) { 3189 req->status = CTL_LUN_ERROR; 3190 snprintf(req->error_str, sizeof(req->error_str), 3191 "Cannot unpack args nvlist."); 3192 break; 3193 } 3194 } else 3195 req->args_nvl = nvlist_create(0); 3196 3197 if (fe->ioctl) 3198 retval = fe->ioctl(dev, cmd, addr, flag, td); 3199 else 3200 retval = ENODEV; 3201 3202 nvlist_destroy(req->args_nvl); 3203 req->args_nvl = tmp_args_nvl; 3204 3205 if (req->result_nvl != NULL) { 3206 if (req->result != NULL) { 3207 packed = nvlist_pack(req->result_nvl, 3208 &packed_len); 3209 if (packed == NULL) { 3210 req->status = CTL_LUN_ERROR; 3211 snprintf(req->error_str, 3212 sizeof(req->error_str), 3213 "Cannot pack result nvlist."); 3214 break; 3215 } 3216 3217 if (packed_len > req->result_len) { 3218 req->status = CTL_LUN_ERROR; 3219 snprintf(req->error_str, 3220 sizeof(req->error_str), 3221 "Result nvlist too large."); 3222 free(packed, M_NVLIST); 3223 break; 3224 } 3225 3226 if (copyout(packed, req->result, packed_len)) { 3227 req->status = CTL_LUN_ERROR; 3228 snprintf(req->error_str, 3229 sizeof(req->error_str), 3230 "Cannot copyout() the result."); 3231 free(packed, M_NVLIST); 3232 break; 3233 } 3234 3235 req->result_len = packed_len; 3236 free(packed, M_NVLIST); 3237 } 3238 3239 nvlist_destroy(req->result_nvl); 3240 } 3241 break; 3242 } 3243 case CTL_PORT_LIST: { 3244 struct sbuf *sb; 3245 struct ctl_port *port; 3246 struct ctl_lun_list *list; 3247 const char *name, *value; 3248 void *cookie; 3249 int j, type; 3250 uint32_t plun; 3251 3252 list = (struct ctl_lun_list *)addr; 3253 3254 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 3255 if (sb == NULL) { 3256 list->status = CTL_LUN_LIST_ERROR; 3257 snprintf(list->error_str, sizeof(list->error_str), 3258 "Unable to allocate %d bytes for LUN list", 3259 list->alloc_len); 3260 break; 3261 } 3262 3263 sbuf_printf(sb, "<ctlportlist>\n"); 3264 3265 mtx_lock(&softc->ctl_lock); 3266 STAILQ_FOREACH(port, &softc->port_list, links) { 3267 retval = sbuf_printf(sb, "<targ_port id=\"%ju\">\n", 3268 (uintmax_t)port->targ_port); 3269 3270 /* 3271 * Bail out as soon as we see that we've overfilled 3272 * the buffer. 3273 */ 3274 if (retval != 0) 3275 break; 3276 3277 retval = sbuf_printf(sb, "\t<frontend_type>%s" 3278 "</frontend_type>\n", port->frontend->name); 3279 if (retval != 0) 3280 break; 3281 3282 retval = sbuf_printf(sb, "\t<port_type>%d</port_type>\n", 3283 port->port_type); 3284 if (retval != 0) 3285 break; 3286 3287 retval = sbuf_printf(sb, "\t<online>%s</online>\n", 3288 (port->status & CTL_PORT_STATUS_ONLINE) ? "YES" : "NO"); 3289 if (retval != 0) 3290 break; 3291 3292 retval = sbuf_printf(sb, "\t<port_name>%s</port_name>\n", 3293 port->port_name); 3294 if (retval != 0) 3295 break; 3296 3297 retval = sbuf_printf(sb, "\t<physical_port>%d</physical_port>\n", 3298 port->physical_port); 3299 if (retval != 0) 3300 break; 3301 3302 retval = sbuf_printf(sb, "\t<virtual_port>%d</virtual_port>\n", 3303 port->virtual_port); 3304 if (retval != 0) 3305 break; 3306 3307 if (port->target_devid != NULL) { 3308 sbuf_printf(sb, "\t<target>"); 3309 ctl_id_sbuf(port->target_devid, sb); 3310 sbuf_printf(sb, "</target>\n"); 3311 } 3312 3313 if (port->port_devid != NULL) { 3314 sbuf_printf(sb, "\t<port>"); 3315 ctl_id_sbuf(port->port_devid, sb); 3316 sbuf_printf(sb, "</port>\n"); 3317 } 3318 3319 if (port->port_info != NULL) { 3320 retval = port->port_info(port->onoff_arg, sb); 3321 if (retval != 0) 3322 break; 3323 } 3324 3325 cookie = NULL; 3326 while ((name = nvlist_next(port->options, &type, 3327 &cookie)) != NULL) { 3328 sbuf_printf(sb, "\t<%s>", name); 3329 3330 if (type == NV_TYPE_STRING) { 3331 value = dnvlist_get_string(port->options, 3332 name, NULL); 3333 if (value != NULL) 3334 sbuf_printf(sb, "%s", value); 3335 } 3336 3337 sbuf_printf(sb, "</%s>\n", name); 3338 } 3339 3340 if (port->lun_map != NULL) { 3341 sbuf_printf(sb, "\t<lun_map>on</lun_map>\n"); 3342 for (j = 0; j < port->lun_map_size; j++) { 3343 plun = ctl_lun_map_from_port(port, j); 3344 if (plun == UINT32_MAX) 3345 continue; 3346 sbuf_printf(sb, 3347 "\t<lun id=\"%u\">%u</lun>\n", 3348 j, plun); 3349 } 3350 } 3351 3352 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 3353 if (port->wwpn_iid[j].in_use == 0 || 3354 (port->wwpn_iid[j].wwpn == 0 && 3355 port->wwpn_iid[j].name == NULL)) 3356 continue; 3357 3358 if (port->wwpn_iid[j].name != NULL) 3359 retval = sbuf_printf(sb, 3360 "\t<initiator id=\"%u\">%s</initiator>\n", 3361 j, port->wwpn_iid[j].name); 3362 else 3363 retval = sbuf_printf(sb, 3364 "\t<initiator id=\"%u\">naa.%08jx</initiator>\n", 3365 j, port->wwpn_iid[j].wwpn); 3366 if (retval != 0) 3367 break; 3368 } 3369 if (retval != 0) 3370 break; 3371 3372 retval = sbuf_printf(sb, "</targ_port>\n"); 3373 if (retval != 0) 3374 break; 3375 } 3376 mtx_unlock(&softc->ctl_lock); 3377 3378 if ((retval != 0) 3379 || ((retval = sbuf_printf(sb, "</ctlportlist>\n")) != 0)) { 3380 retval = 0; 3381 sbuf_delete(sb); 3382 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3383 snprintf(list->error_str, sizeof(list->error_str), 3384 "Out of space, %d bytes is too small", 3385 list->alloc_len); 3386 break; 3387 } 3388 3389 sbuf_finish(sb); 3390 3391 retval = copyout(sbuf_data(sb), list->lun_xml, 3392 sbuf_len(sb) + 1); 3393 3394 list->fill_len = sbuf_len(sb) + 1; 3395 list->status = CTL_LUN_LIST_OK; 3396 sbuf_delete(sb); 3397 break; 3398 } 3399 case CTL_LUN_MAP: { 3400 struct ctl_lun_map *lm = (struct ctl_lun_map *)addr; 3401 struct ctl_port *port; 3402 3403 mtx_lock(&softc->ctl_lock); 3404 if (lm->port < softc->port_min || 3405 lm->port >= softc->port_max || 3406 (port = softc->ctl_ports[lm->port]) == NULL) { 3407 mtx_unlock(&softc->ctl_lock); 3408 return (ENXIO); 3409 } 3410 if (port->status & CTL_PORT_STATUS_ONLINE) { 3411 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3412 if (ctl_lun_map_to_port(port, lun->lun) == 3413 UINT32_MAX) 3414 continue; 3415 mtx_lock(&lun->lun_lock); 3416 ctl_est_ua_port(lun, lm->port, -1, 3417 CTL_UA_LUN_CHANGE); 3418 mtx_unlock(&lun->lun_lock); 3419 } 3420 } 3421 mtx_unlock(&softc->ctl_lock); // XXX: port_enable sleeps 3422 if (lm->plun != UINT32_MAX) { 3423 if (lm->lun == UINT32_MAX) 3424 retval = ctl_lun_map_unset(port, lm->plun); 3425 else if (lm->lun < ctl_max_luns && 3426 softc->ctl_luns[lm->lun] != NULL) 3427 retval = ctl_lun_map_set(port, lm->plun, lm->lun); 3428 else 3429 return (ENXIO); 3430 } else { 3431 if (lm->lun == UINT32_MAX) 3432 retval = ctl_lun_map_deinit(port); 3433 else 3434 retval = ctl_lun_map_init(port); 3435 } 3436 if (port->status & CTL_PORT_STATUS_ONLINE) 3437 ctl_isc_announce_port(port); 3438 break; 3439 } 3440 case CTL_GET_LUN_STATS: { 3441 struct ctl_get_io_stats *stats = (struct ctl_get_io_stats *)addr; 3442 int i; 3443 3444 /* 3445 * XXX KDM no locking here. If the LUN list changes, 3446 * things can blow up. 3447 */ 3448 i = 0; 3449 stats->status = CTL_SS_OK; 3450 stats->fill_len = 0; 3451 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3452 if (lun->lun < stats->first_item) 3453 continue; 3454 if (stats->fill_len + sizeof(lun->stats) > 3455 stats->alloc_len) { 3456 stats->status = CTL_SS_NEED_MORE_SPACE; 3457 break; 3458 } 3459 retval = copyout(&lun->stats, &stats->stats[i++], 3460 sizeof(lun->stats)); 3461 if (retval != 0) 3462 break; 3463 stats->fill_len += sizeof(lun->stats); 3464 } 3465 stats->num_items = softc->num_luns; 3466 stats->flags = CTL_STATS_FLAG_NONE; 3467 #ifdef CTL_TIME_IO 3468 stats->flags |= CTL_STATS_FLAG_TIME_VALID; 3469 #endif 3470 getnanouptime(&stats->timestamp); 3471 break; 3472 } 3473 case CTL_GET_PORT_STATS: { 3474 struct ctl_get_io_stats *stats = (struct ctl_get_io_stats *)addr; 3475 int i; 3476 3477 /* 3478 * XXX KDM no locking here. If the LUN list changes, 3479 * things can blow up. 3480 */ 3481 i = 0; 3482 stats->status = CTL_SS_OK; 3483 stats->fill_len = 0; 3484 STAILQ_FOREACH(port, &softc->port_list, links) { 3485 if (port->targ_port < stats->first_item) 3486 continue; 3487 if (stats->fill_len + sizeof(port->stats) > 3488 stats->alloc_len) { 3489 stats->status = CTL_SS_NEED_MORE_SPACE; 3490 break; 3491 } 3492 retval = copyout(&port->stats, &stats->stats[i++], 3493 sizeof(port->stats)); 3494 if (retval != 0) 3495 break; 3496 stats->fill_len += sizeof(port->stats); 3497 } 3498 stats->num_items = softc->num_ports; 3499 stats->flags = CTL_STATS_FLAG_NONE; 3500 #ifdef CTL_TIME_IO 3501 stats->flags |= CTL_STATS_FLAG_TIME_VALID; 3502 #endif 3503 getnanouptime(&stats->timestamp); 3504 break; 3505 } 3506 default: { 3507 /* XXX KDM should we fix this? */ 3508 #if 0 3509 struct ctl_backend_driver *backend; 3510 unsigned int type; 3511 int found; 3512 3513 found = 0; 3514 3515 /* 3516 * We encode the backend type as the ioctl type for backend 3517 * ioctls. So parse it out here, and then search for a 3518 * backend of this type. 3519 */ 3520 type = _IOC_TYPE(cmd); 3521 3522 STAILQ_FOREACH(backend, &softc->be_list, links) { 3523 if (backend->type == type) { 3524 found = 1; 3525 break; 3526 } 3527 } 3528 if (found == 0) { 3529 printf("ctl: unknown ioctl command %#lx or backend " 3530 "%d\n", cmd, type); 3531 retval = EINVAL; 3532 break; 3533 } 3534 retval = backend->ioctl(dev, cmd, addr, flag, td); 3535 #endif 3536 retval = ENOTTY; 3537 break; 3538 } 3539 } 3540 return (retval); 3541 } 3542 3543 uint32_t 3544 ctl_get_initindex(struct ctl_nexus *nexus) 3545 { 3546 return (nexus->initid + (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); 3547 } 3548 3549 int 3550 ctl_lun_map_init(struct ctl_port *port) 3551 { 3552 struct ctl_softc *softc = port->ctl_softc; 3553 struct ctl_lun *lun; 3554 int size = ctl_lun_map_size; 3555 uint32_t i; 3556 3557 if (port->lun_map == NULL || port->lun_map_size < size) { 3558 port->lun_map_size = 0; 3559 free(port->lun_map, M_CTL); 3560 port->lun_map = malloc(size * sizeof(uint32_t), 3561 M_CTL, M_NOWAIT); 3562 } 3563 if (port->lun_map == NULL) 3564 return (ENOMEM); 3565 for (i = 0; i < size; i++) 3566 port->lun_map[i] = UINT32_MAX; 3567 port->lun_map_size = size; 3568 if (port->status & CTL_PORT_STATUS_ONLINE) { 3569 if (port->lun_disable != NULL) { 3570 STAILQ_FOREACH(lun, &softc->lun_list, links) 3571 port->lun_disable(port->targ_lun_arg, lun->lun); 3572 } 3573 ctl_isc_announce_port(port); 3574 } 3575 return (0); 3576 } 3577 3578 int 3579 ctl_lun_map_deinit(struct ctl_port *port) 3580 { 3581 struct ctl_softc *softc = port->ctl_softc; 3582 struct ctl_lun *lun; 3583 3584 if (port->lun_map == NULL) 3585 return (0); 3586 port->lun_map_size = 0; 3587 free(port->lun_map, M_CTL); 3588 port->lun_map = NULL; 3589 if (port->status & CTL_PORT_STATUS_ONLINE) { 3590 if (port->lun_enable != NULL) { 3591 STAILQ_FOREACH(lun, &softc->lun_list, links) 3592 port->lun_enable(port->targ_lun_arg, lun->lun); 3593 } 3594 ctl_isc_announce_port(port); 3595 } 3596 return (0); 3597 } 3598 3599 int 3600 ctl_lun_map_set(struct ctl_port *port, uint32_t plun, uint32_t glun) 3601 { 3602 int status; 3603 uint32_t old; 3604 3605 if (port->lun_map == NULL) { 3606 status = ctl_lun_map_init(port); 3607 if (status != 0) 3608 return (status); 3609 } 3610 if (plun >= port->lun_map_size) 3611 return (EINVAL); 3612 old = port->lun_map[plun]; 3613 port->lun_map[plun] = glun; 3614 if ((port->status & CTL_PORT_STATUS_ONLINE) && old == UINT32_MAX) { 3615 if (port->lun_enable != NULL) 3616 port->lun_enable(port->targ_lun_arg, plun); 3617 ctl_isc_announce_port(port); 3618 } 3619 return (0); 3620 } 3621 3622 int 3623 ctl_lun_map_unset(struct ctl_port *port, uint32_t plun) 3624 { 3625 uint32_t old; 3626 3627 if (port->lun_map == NULL || plun >= port->lun_map_size) 3628 return (0); 3629 old = port->lun_map[plun]; 3630 port->lun_map[plun] = UINT32_MAX; 3631 if ((port->status & CTL_PORT_STATUS_ONLINE) && old != UINT32_MAX) { 3632 if (port->lun_disable != NULL) 3633 port->lun_disable(port->targ_lun_arg, plun); 3634 ctl_isc_announce_port(port); 3635 } 3636 return (0); 3637 } 3638 3639 uint32_t 3640 ctl_lun_map_from_port(struct ctl_port *port, uint32_t lun_id) 3641 { 3642 3643 if (port == NULL) 3644 return (UINT32_MAX); 3645 if (port->lun_map == NULL) 3646 return (lun_id); 3647 if (lun_id > port->lun_map_size) 3648 return (UINT32_MAX); 3649 return (port->lun_map[lun_id]); 3650 } 3651 3652 uint32_t 3653 ctl_lun_map_to_port(struct ctl_port *port, uint32_t lun_id) 3654 { 3655 uint32_t i; 3656 3657 if (port == NULL) 3658 return (UINT32_MAX); 3659 if (port->lun_map == NULL) 3660 return (lun_id); 3661 for (i = 0; i < port->lun_map_size; i++) { 3662 if (port->lun_map[i] == lun_id) 3663 return (i); 3664 } 3665 return (UINT32_MAX); 3666 } 3667 3668 uint32_t 3669 ctl_decode_lun(uint64_t encoded) 3670 { 3671 uint8_t lun[8]; 3672 uint32_t result = 0xffffffff; 3673 3674 be64enc(lun, encoded); 3675 switch (lun[0] & RPL_LUNDATA_ATYP_MASK) { 3676 case RPL_LUNDATA_ATYP_PERIPH: 3677 if ((lun[0] & 0x3f) == 0 && lun[2] == 0 && lun[3] == 0 && 3678 lun[4] == 0 && lun[5] == 0 && lun[6] == 0 && lun[7] == 0) 3679 result = lun[1]; 3680 break; 3681 case RPL_LUNDATA_ATYP_FLAT: 3682 if (lun[2] == 0 && lun[3] == 0 && lun[4] == 0 && lun[5] == 0 && 3683 lun[6] == 0 && lun[7] == 0) 3684 result = ((lun[0] & 0x3f) << 8) + lun[1]; 3685 break; 3686 case RPL_LUNDATA_ATYP_EXTLUN: 3687 switch (lun[0] & RPL_LUNDATA_EXT_EAM_MASK) { 3688 case 0x02: 3689 switch (lun[0] & RPL_LUNDATA_EXT_LEN_MASK) { 3690 case 0x00: 3691 result = lun[1]; 3692 break; 3693 case 0x10: 3694 result = (lun[1] << 16) + (lun[2] << 8) + 3695 lun[3]; 3696 break; 3697 case 0x20: 3698 if (lun[1] == 0 && lun[6] == 0 && lun[7] == 0) 3699 result = (lun[2] << 24) + 3700 (lun[3] << 16) + (lun[4] << 8) + 3701 lun[5]; 3702 break; 3703 } 3704 break; 3705 case RPL_LUNDATA_EXT_EAM_NOT_SPEC: 3706 result = 0xffffffff; 3707 break; 3708 } 3709 break; 3710 } 3711 return (result); 3712 } 3713 3714 uint64_t 3715 ctl_encode_lun(uint32_t decoded) 3716 { 3717 uint64_t l = decoded; 3718 3719 if (l <= 0xff) 3720 return (((uint64_t)RPL_LUNDATA_ATYP_PERIPH << 56) | (l << 48)); 3721 if (l <= 0x3fff) 3722 return (((uint64_t)RPL_LUNDATA_ATYP_FLAT << 56) | (l << 48)); 3723 if (l <= 0xffffff) 3724 return (((uint64_t)(RPL_LUNDATA_ATYP_EXTLUN | 0x12) << 56) | 3725 (l << 32)); 3726 return ((((uint64_t)RPL_LUNDATA_ATYP_EXTLUN | 0x22) << 56) | (l << 16)); 3727 } 3728 3729 int 3730 ctl_ffz(uint32_t *mask, uint32_t first, uint32_t last) 3731 { 3732 int i; 3733 3734 for (i = first; i < last; i++) { 3735 if ((mask[i / 32] & (1 << (i % 32))) == 0) 3736 return (i); 3737 } 3738 return (-1); 3739 } 3740 3741 int 3742 ctl_set_mask(uint32_t *mask, uint32_t bit) 3743 { 3744 uint32_t chunk, piece; 3745 3746 chunk = bit >> 5; 3747 piece = bit % (sizeof(uint32_t) * 8); 3748 3749 if ((mask[chunk] & (1 << piece)) != 0) 3750 return (-1); 3751 else 3752 mask[chunk] |= (1 << piece); 3753 3754 return (0); 3755 } 3756 3757 int 3758 ctl_clear_mask(uint32_t *mask, uint32_t bit) 3759 { 3760 uint32_t chunk, piece; 3761 3762 chunk = bit >> 5; 3763 piece = bit % (sizeof(uint32_t) * 8); 3764 3765 if ((mask[chunk] & (1 << piece)) == 0) 3766 return (-1); 3767 else 3768 mask[chunk] &= ~(1 << piece); 3769 3770 return (0); 3771 } 3772 3773 int 3774 ctl_is_set(uint32_t *mask, uint32_t bit) 3775 { 3776 uint32_t chunk, piece; 3777 3778 chunk = bit >> 5; 3779 piece = bit % (sizeof(uint32_t) * 8); 3780 3781 if ((mask[chunk] & (1 << piece)) == 0) 3782 return (0); 3783 else 3784 return (1); 3785 } 3786 3787 static uint64_t 3788 ctl_get_prkey(struct ctl_lun *lun, uint32_t residx) 3789 { 3790 uint64_t *t; 3791 3792 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3793 if (t == NULL) 3794 return (0); 3795 return (t[residx % CTL_MAX_INIT_PER_PORT]); 3796 } 3797 3798 static void 3799 ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx) 3800 { 3801 uint64_t *t; 3802 3803 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3804 if (t == NULL) 3805 return; 3806 t[residx % CTL_MAX_INIT_PER_PORT] = 0; 3807 } 3808 3809 static void 3810 ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx) 3811 { 3812 uint64_t *p; 3813 u_int i; 3814 3815 i = residx/CTL_MAX_INIT_PER_PORT; 3816 if (lun->pr_keys[i] != NULL) 3817 return; 3818 mtx_unlock(&lun->lun_lock); 3819 p = malloc(sizeof(uint64_t) * CTL_MAX_INIT_PER_PORT, M_CTL, 3820 M_WAITOK | M_ZERO); 3821 mtx_lock(&lun->lun_lock); 3822 if (lun->pr_keys[i] == NULL) 3823 lun->pr_keys[i] = p; 3824 else 3825 free(p, M_CTL); 3826 } 3827 3828 static void 3829 ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key) 3830 { 3831 uint64_t *t; 3832 3833 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3834 KASSERT(t != NULL, ("prkey %d is not allocated", residx)); 3835 t[residx % CTL_MAX_INIT_PER_PORT] = key; 3836 } 3837 3838 /* 3839 * ctl_softc, pool_name, total_ctl_io are passed in. 3840 * npool is passed out. 3841 */ 3842 int 3843 ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name, 3844 uint32_t total_ctl_io, void **npool) 3845 { 3846 struct ctl_io_pool *pool; 3847 3848 pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL, 3849 M_NOWAIT | M_ZERO); 3850 if (pool == NULL) 3851 return (ENOMEM); 3852 3853 snprintf(pool->name, sizeof(pool->name), "CTL IO %s", pool_name); 3854 pool->ctl_softc = ctl_softc; 3855 #ifdef IO_POOLS 3856 pool->zone = uma_zsecond_create(pool->name, NULL, 3857 NULL, NULL, NULL, ctl_softc->io_zone); 3858 /* uma_prealloc(pool->zone, total_ctl_io); */ 3859 #else 3860 pool->zone = ctl_softc->io_zone; 3861 #endif 3862 3863 *npool = pool; 3864 return (0); 3865 } 3866 3867 void 3868 ctl_pool_free(struct ctl_io_pool *pool) 3869 { 3870 3871 if (pool == NULL) 3872 return; 3873 3874 #ifdef IO_POOLS 3875 uma_zdestroy(pool->zone); 3876 #endif 3877 free(pool, M_CTL); 3878 } 3879 3880 union ctl_io * 3881 ctl_alloc_io(void *pool_ref) 3882 { 3883 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3884 union ctl_io *io; 3885 3886 io = uma_zalloc(pool->zone, M_WAITOK); 3887 if (io != NULL) { 3888 io->io_hdr.pool = pool_ref; 3889 CTL_SOFTC(io) = pool->ctl_softc; 3890 TAILQ_INIT(&io->io_hdr.blocked_queue); 3891 } 3892 return (io); 3893 } 3894 3895 union ctl_io * 3896 ctl_alloc_io_nowait(void *pool_ref) 3897 { 3898 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3899 union ctl_io *io; 3900 3901 io = uma_zalloc(pool->zone, M_NOWAIT); 3902 if (io != NULL) { 3903 io->io_hdr.pool = pool_ref; 3904 CTL_SOFTC(io) = pool->ctl_softc; 3905 TAILQ_INIT(&io->io_hdr.blocked_queue); 3906 } 3907 return (io); 3908 } 3909 3910 void 3911 ctl_free_io(union ctl_io *io) 3912 { 3913 struct ctl_io_pool *pool; 3914 3915 if (io == NULL) 3916 return; 3917 3918 pool = (struct ctl_io_pool *)io->io_hdr.pool; 3919 uma_zfree(pool->zone, io); 3920 } 3921 3922 void 3923 ctl_zero_io(union ctl_io *io) 3924 { 3925 struct ctl_io_pool *pool; 3926 3927 if (io == NULL) 3928 return; 3929 3930 /* 3931 * May need to preserve linked list pointers at some point too. 3932 */ 3933 pool = io->io_hdr.pool; 3934 memset(io, 0, sizeof(*io)); 3935 io->io_hdr.pool = pool; 3936 CTL_SOFTC(io) = pool->ctl_softc; 3937 TAILQ_INIT(&io->io_hdr.blocked_queue); 3938 } 3939 3940 int 3941 ctl_expand_number(const char *buf, uint64_t *num) 3942 { 3943 char *endptr; 3944 uint64_t number; 3945 unsigned shift; 3946 3947 number = strtoq(buf, &endptr, 0); 3948 3949 switch (tolower((unsigned char)*endptr)) { 3950 case 'e': 3951 shift = 60; 3952 break; 3953 case 'p': 3954 shift = 50; 3955 break; 3956 case 't': 3957 shift = 40; 3958 break; 3959 case 'g': 3960 shift = 30; 3961 break; 3962 case 'm': 3963 shift = 20; 3964 break; 3965 case 'k': 3966 shift = 10; 3967 break; 3968 case 'b': 3969 case '\0': /* No unit. */ 3970 *num = number; 3971 return (0); 3972 default: 3973 /* Unrecognized unit. */ 3974 return (-1); 3975 } 3976 3977 if ((number << shift) >> shift != number) { 3978 /* Overflow */ 3979 return (-1); 3980 } 3981 *num = number << shift; 3982 return (0); 3983 } 3984 3985 /* 3986 * This routine could be used in the future to load default and/or saved 3987 * mode page parameters for a particuar lun. 3988 */ 3989 static int 3990 ctl_init_page_index(struct ctl_lun *lun) 3991 { 3992 int i, page_code; 3993 struct ctl_page_index *page_index; 3994 const char *value; 3995 uint64_t ival; 3996 3997 memcpy(&lun->mode_pages.index, page_index_template, 3998 sizeof(page_index_template)); 3999 4000 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 4001 page_index = &lun->mode_pages.index[i]; 4002 if (lun->be_lun->lun_type == T_DIRECT && 4003 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 4004 continue; 4005 if (lun->be_lun->lun_type == T_PROCESSOR && 4006 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 4007 continue; 4008 if (lun->be_lun->lun_type == T_CDROM && 4009 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 4010 continue; 4011 4012 page_code = page_index->page_code & SMPH_PC_MASK; 4013 switch (page_code) { 4014 case SMS_RW_ERROR_RECOVERY_PAGE: { 4015 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4016 ("subpage %#x for page %#x is incorrect!", 4017 page_index->subpage, page_code)); 4018 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT], 4019 &rw_er_page_default, 4020 sizeof(rw_er_page_default)); 4021 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CHANGEABLE], 4022 &rw_er_page_changeable, 4023 sizeof(rw_er_page_changeable)); 4024 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_DEFAULT], 4025 &rw_er_page_default, 4026 sizeof(rw_er_page_default)); 4027 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_SAVED], 4028 &rw_er_page_default, 4029 sizeof(rw_er_page_default)); 4030 page_index->page_data = 4031 (uint8_t *)lun->mode_pages.rw_er_page; 4032 break; 4033 } 4034 case SMS_FORMAT_DEVICE_PAGE: { 4035 struct scsi_format_page *format_page; 4036 4037 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4038 ("subpage %#x for page %#x is incorrect!", 4039 page_index->subpage, page_code)); 4040 4041 /* 4042 * Sectors per track are set above. Bytes per 4043 * sector need to be set here on a per-LUN basis. 4044 */ 4045 memcpy(&lun->mode_pages.format_page[CTL_PAGE_CURRENT], 4046 &format_page_default, 4047 sizeof(format_page_default)); 4048 memcpy(&lun->mode_pages.format_page[ 4049 CTL_PAGE_CHANGEABLE], &format_page_changeable, 4050 sizeof(format_page_changeable)); 4051 memcpy(&lun->mode_pages.format_page[CTL_PAGE_DEFAULT], 4052 &format_page_default, 4053 sizeof(format_page_default)); 4054 memcpy(&lun->mode_pages.format_page[CTL_PAGE_SAVED], 4055 &format_page_default, 4056 sizeof(format_page_default)); 4057 4058 format_page = &lun->mode_pages.format_page[ 4059 CTL_PAGE_CURRENT]; 4060 scsi_ulto2b(lun->be_lun->blocksize, 4061 format_page->bytes_per_sector); 4062 4063 format_page = &lun->mode_pages.format_page[ 4064 CTL_PAGE_DEFAULT]; 4065 scsi_ulto2b(lun->be_lun->blocksize, 4066 format_page->bytes_per_sector); 4067 4068 format_page = &lun->mode_pages.format_page[ 4069 CTL_PAGE_SAVED]; 4070 scsi_ulto2b(lun->be_lun->blocksize, 4071 format_page->bytes_per_sector); 4072 4073 page_index->page_data = 4074 (uint8_t *)lun->mode_pages.format_page; 4075 break; 4076 } 4077 case SMS_RIGID_DISK_PAGE: { 4078 struct scsi_rigid_disk_page *rigid_disk_page; 4079 uint32_t sectors_per_cylinder; 4080 uint64_t cylinders; 4081 #ifndef __XSCALE__ 4082 int shift; 4083 #endif /* !__XSCALE__ */ 4084 4085 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4086 ("subpage %#x for page %#x is incorrect!", 4087 page_index->subpage, page_code)); 4088 4089 /* 4090 * Rotation rate and sectors per track are set 4091 * above. We calculate the cylinders here based on 4092 * capacity. Due to the number of heads and 4093 * sectors per track we're using, smaller arrays 4094 * may turn out to have 0 cylinders. Linux and 4095 * FreeBSD don't pay attention to these mode pages 4096 * to figure out capacity, but Solaris does. It 4097 * seems to deal with 0 cylinders just fine, and 4098 * works out a fake geometry based on the capacity. 4099 */ 4100 memcpy(&lun->mode_pages.rigid_disk_page[ 4101 CTL_PAGE_DEFAULT], &rigid_disk_page_default, 4102 sizeof(rigid_disk_page_default)); 4103 memcpy(&lun->mode_pages.rigid_disk_page[ 4104 CTL_PAGE_CHANGEABLE],&rigid_disk_page_changeable, 4105 sizeof(rigid_disk_page_changeable)); 4106 4107 sectors_per_cylinder = CTL_DEFAULT_SECTORS_PER_TRACK * 4108 CTL_DEFAULT_HEADS; 4109 4110 /* 4111 * The divide method here will be more accurate, 4112 * probably, but results in floating point being 4113 * used in the kernel on i386 (__udivdi3()). On the 4114 * XScale, though, __udivdi3() is implemented in 4115 * software. 4116 * 4117 * The shift method for cylinder calculation is 4118 * accurate if sectors_per_cylinder is a power of 4119 * 2. Otherwise it might be slightly off -- you 4120 * might have a bit of a truncation problem. 4121 */ 4122 #ifdef __XSCALE__ 4123 cylinders = (lun->be_lun->maxlba + 1) / 4124 sectors_per_cylinder; 4125 #else 4126 for (shift = 31; shift > 0; shift--) { 4127 if (sectors_per_cylinder & (1 << shift)) 4128 break; 4129 } 4130 cylinders = (lun->be_lun->maxlba + 1) >> shift; 4131 #endif 4132 4133 /* 4134 * We've basically got 3 bytes, or 24 bits for the 4135 * cylinder size in the mode page. If we're over, 4136 * just round down to 2^24. 4137 */ 4138 if (cylinders > 0xffffff) 4139 cylinders = 0xffffff; 4140 4141 rigid_disk_page = &lun->mode_pages.rigid_disk_page[ 4142 CTL_PAGE_DEFAULT]; 4143 scsi_ulto3b(cylinders, rigid_disk_page->cylinders); 4144 4145 if ((value = dnvlist_get_string(lun->be_lun->options, 4146 "rpm", NULL)) != NULL) { 4147 scsi_ulto2b(strtol(value, NULL, 0), 4148 rigid_disk_page->rotation_rate); 4149 } 4150 4151 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_CURRENT], 4152 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 4153 sizeof(rigid_disk_page_default)); 4154 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_SAVED], 4155 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 4156 sizeof(rigid_disk_page_default)); 4157 4158 page_index->page_data = 4159 (uint8_t *)lun->mode_pages.rigid_disk_page; 4160 break; 4161 } 4162 case SMS_VERIFY_ERROR_RECOVERY_PAGE: { 4163 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4164 ("subpage %#x for page %#x is incorrect!", 4165 page_index->subpage, page_code)); 4166 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_CURRENT], 4167 &verify_er_page_default, 4168 sizeof(verify_er_page_default)); 4169 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_CHANGEABLE], 4170 &verify_er_page_changeable, 4171 sizeof(verify_er_page_changeable)); 4172 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_DEFAULT], 4173 &verify_er_page_default, 4174 sizeof(verify_er_page_default)); 4175 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_SAVED], 4176 &verify_er_page_default, 4177 sizeof(verify_er_page_default)); 4178 page_index->page_data = 4179 (uint8_t *)lun->mode_pages.verify_er_page; 4180 break; 4181 } 4182 case SMS_CACHING_PAGE: { 4183 struct scsi_caching_page *caching_page; 4184 4185 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4186 ("subpage %#x for page %#x is incorrect!", 4187 page_index->subpage, page_code)); 4188 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT], 4189 &caching_page_default, 4190 sizeof(caching_page_default)); 4191 memcpy(&lun->mode_pages.caching_page[ 4192 CTL_PAGE_CHANGEABLE], &caching_page_changeable, 4193 sizeof(caching_page_changeable)); 4194 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_SAVED], 4195 &caching_page_default, 4196 sizeof(caching_page_default)); 4197 caching_page = &lun->mode_pages.caching_page[ 4198 CTL_PAGE_SAVED]; 4199 value = dnvlist_get_string(lun->be_lun->options, 4200 "writecache", NULL); 4201 if (value != NULL && strcmp(value, "off") == 0) 4202 caching_page->flags1 &= ~SCP_WCE; 4203 value = dnvlist_get_string(lun->be_lun->options, 4204 "readcache", NULL); 4205 if (value != NULL && strcmp(value, "off") == 0) 4206 caching_page->flags1 |= SCP_RCD; 4207 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT], 4208 &lun->mode_pages.caching_page[CTL_PAGE_SAVED], 4209 sizeof(caching_page_default)); 4210 page_index->page_data = 4211 (uint8_t *)lun->mode_pages.caching_page; 4212 break; 4213 } 4214 case SMS_CONTROL_MODE_PAGE: { 4215 switch (page_index->subpage) { 4216 case SMS_SUBPAGE_PAGE_0: { 4217 struct scsi_control_page *control_page; 4218 4219 memcpy(&lun->mode_pages.control_page[ 4220 CTL_PAGE_DEFAULT], 4221 &control_page_default, 4222 sizeof(control_page_default)); 4223 memcpy(&lun->mode_pages.control_page[ 4224 CTL_PAGE_CHANGEABLE], 4225 &control_page_changeable, 4226 sizeof(control_page_changeable)); 4227 memcpy(&lun->mode_pages.control_page[ 4228 CTL_PAGE_SAVED], 4229 &control_page_default, 4230 sizeof(control_page_default)); 4231 control_page = &lun->mode_pages.control_page[ 4232 CTL_PAGE_SAVED]; 4233 value = dnvlist_get_string(lun->be_lun->options, 4234 "reordering", NULL); 4235 if (value != NULL && 4236 strcmp(value, "unrestricted") == 0) { 4237 control_page->queue_flags &= 4238 ~SCP_QUEUE_ALG_MASK; 4239 control_page->queue_flags |= 4240 SCP_QUEUE_ALG_UNRESTRICTED; 4241 } 4242 memcpy(&lun->mode_pages.control_page[ 4243 CTL_PAGE_CURRENT], 4244 &lun->mode_pages.control_page[ 4245 CTL_PAGE_SAVED], 4246 sizeof(control_page_default)); 4247 page_index->page_data = 4248 (uint8_t *)lun->mode_pages.control_page; 4249 break; 4250 } 4251 case 0x01: 4252 memcpy(&lun->mode_pages.control_ext_page[ 4253 CTL_PAGE_DEFAULT], 4254 &control_ext_page_default, 4255 sizeof(control_ext_page_default)); 4256 memcpy(&lun->mode_pages.control_ext_page[ 4257 CTL_PAGE_CHANGEABLE], 4258 &control_ext_page_changeable, 4259 sizeof(control_ext_page_changeable)); 4260 memcpy(&lun->mode_pages.control_ext_page[ 4261 CTL_PAGE_SAVED], 4262 &control_ext_page_default, 4263 sizeof(control_ext_page_default)); 4264 memcpy(&lun->mode_pages.control_ext_page[ 4265 CTL_PAGE_CURRENT], 4266 &lun->mode_pages.control_ext_page[ 4267 CTL_PAGE_SAVED], 4268 sizeof(control_ext_page_default)); 4269 page_index->page_data = 4270 (uint8_t *)lun->mode_pages.control_ext_page; 4271 break; 4272 default: 4273 panic("subpage %#x for page %#x is incorrect!", 4274 page_index->subpage, page_code); 4275 } 4276 break; 4277 } 4278 case SMS_INFO_EXCEPTIONS_PAGE: { 4279 switch (page_index->subpage) { 4280 case SMS_SUBPAGE_PAGE_0: 4281 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_CURRENT], 4282 &ie_page_default, 4283 sizeof(ie_page_default)); 4284 memcpy(&lun->mode_pages.ie_page[ 4285 CTL_PAGE_CHANGEABLE], &ie_page_changeable, 4286 sizeof(ie_page_changeable)); 4287 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_DEFAULT], 4288 &ie_page_default, 4289 sizeof(ie_page_default)); 4290 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_SAVED], 4291 &ie_page_default, 4292 sizeof(ie_page_default)); 4293 page_index->page_data = 4294 (uint8_t *)lun->mode_pages.ie_page; 4295 break; 4296 case 0x02: { 4297 struct ctl_logical_block_provisioning_page *page; 4298 4299 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_DEFAULT], 4300 &lbp_page_default, 4301 sizeof(lbp_page_default)); 4302 memcpy(&lun->mode_pages.lbp_page[ 4303 CTL_PAGE_CHANGEABLE], &lbp_page_changeable, 4304 sizeof(lbp_page_changeable)); 4305 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 4306 &lbp_page_default, 4307 sizeof(lbp_page_default)); 4308 page = &lun->mode_pages.lbp_page[CTL_PAGE_SAVED]; 4309 value = dnvlist_get_string(lun->be_lun->options, 4310 "avail-threshold", NULL); 4311 if (value != NULL && 4312 ctl_expand_number(value, &ival) == 0) { 4313 page->descr[0].flags |= SLBPPD_ENABLED | 4314 SLBPPD_ARMING_DEC; 4315 if (lun->be_lun->blocksize) 4316 ival /= lun->be_lun->blocksize; 4317 else 4318 ival /= 512; 4319 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4320 page->descr[0].count); 4321 } 4322 value = dnvlist_get_string(lun->be_lun->options, 4323 "used-threshold", NULL); 4324 if (value != NULL && 4325 ctl_expand_number(value, &ival) == 0) { 4326 page->descr[1].flags |= SLBPPD_ENABLED | 4327 SLBPPD_ARMING_INC; 4328 if (lun->be_lun->blocksize) 4329 ival /= lun->be_lun->blocksize; 4330 else 4331 ival /= 512; 4332 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4333 page->descr[1].count); 4334 } 4335 value = dnvlist_get_string(lun->be_lun->options, 4336 "pool-avail-threshold", NULL); 4337 if (value != NULL && 4338 ctl_expand_number(value, &ival) == 0) { 4339 page->descr[2].flags |= SLBPPD_ENABLED | 4340 SLBPPD_ARMING_DEC; 4341 if (lun->be_lun->blocksize) 4342 ival /= lun->be_lun->blocksize; 4343 else 4344 ival /= 512; 4345 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4346 page->descr[2].count); 4347 } 4348 value = dnvlist_get_string(lun->be_lun->options, 4349 "pool-used-threshold", NULL); 4350 if (value != NULL && 4351 ctl_expand_number(value, &ival) == 0) { 4352 page->descr[3].flags |= SLBPPD_ENABLED | 4353 SLBPPD_ARMING_INC; 4354 if (lun->be_lun->blocksize) 4355 ival /= lun->be_lun->blocksize; 4356 else 4357 ival /= 512; 4358 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4359 page->descr[3].count); 4360 } 4361 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_CURRENT], 4362 &lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 4363 sizeof(lbp_page_default)); 4364 page_index->page_data = 4365 (uint8_t *)lun->mode_pages.lbp_page; 4366 break; 4367 } 4368 default: 4369 panic("subpage %#x for page %#x is incorrect!", 4370 page_index->subpage, page_code); 4371 } 4372 break; 4373 } 4374 case SMS_CDDVD_CAPS_PAGE:{ 4375 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4376 ("subpage %#x for page %#x is incorrect!", 4377 page_index->subpage, page_code)); 4378 memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_DEFAULT], 4379 &cddvd_page_default, 4380 sizeof(cddvd_page_default)); 4381 memcpy(&lun->mode_pages.cddvd_page[ 4382 CTL_PAGE_CHANGEABLE], &cddvd_page_changeable, 4383 sizeof(cddvd_page_changeable)); 4384 memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_SAVED], 4385 &cddvd_page_default, 4386 sizeof(cddvd_page_default)); 4387 memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_CURRENT], 4388 &lun->mode_pages.cddvd_page[CTL_PAGE_SAVED], 4389 sizeof(cddvd_page_default)); 4390 page_index->page_data = 4391 (uint8_t *)lun->mode_pages.cddvd_page; 4392 break; 4393 } 4394 default: 4395 panic("invalid page code value %#x", page_code); 4396 } 4397 } 4398 4399 return (CTL_RETVAL_COMPLETE); 4400 } 4401 4402 static int 4403 ctl_init_log_page_index(struct ctl_lun *lun) 4404 { 4405 struct ctl_page_index *page_index; 4406 int i, j, k, prev; 4407 4408 memcpy(&lun->log_pages.index, log_page_index_template, 4409 sizeof(log_page_index_template)); 4410 4411 prev = -1; 4412 for (i = 0, j = 0, k = 0; i < CTL_NUM_LOG_PAGES; i++) { 4413 page_index = &lun->log_pages.index[i]; 4414 if (lun->be_lun->lun_type == T_DIRECT && 4415 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 4416 continue; 4417 if (lun->be_lun->lun_type == T_PROCESSOR && 4418 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 4419 continue; 4420 if (lun->be_lun->lun_type == T_CDROM && 4421 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 4422 continue; 4423 4424 if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING && 4425 lun->backend->lun_attr == NULL) 4426 continue; 4427 4428 if (page_index->page_code != prev) { 4429 lun->log_pages.pages_page[j] = page_index->page_code; 4430 prev = page_index->page_code; 4431 j++; 4432 } 4433 lun->log_pages.subpages_page[k*2] = page_index->page_code; 4434 lun->log_pages.subpages_page[k*2+1] = page_index->subpage; 4435 k++; 4436 } 4437 lun->log_pages.index[0].page_data = &lun->log_pages.pages_page[0]; 4438 lun->log_pages.index[0].page_len = j; 4439 lun->log_pages.index[1].page_data = &lun->log_pages.subpages_page[0]; 4440 lun->log_pages.index[1].page_len = k * 2; 4441 lun->log_pages.index[2].page_data = (uint8_t *)&lun->log_pages.temp_page; 4442 lun->log_pages.index[2].page_len = sizeof(lun->log_pages.temp_page); 4443 lun->log_pages.index[3].page_data = &lun->log_pages.lbp_page[0]; 4444 lun->log_pages.index[3].page_len = 12*CTL_NUM_LBP_PARAMS; 4445 lun->log_pages.index[4].page_data = (uint8_t *)&lun->log_pages.stat_page; 4446 lun->log_pages.index[4].page_len = sizeof(lun->log_pages.stat_page); 4447 lun->log_pages.index[5].page_data = (uint8_t *)&lun->log_pages.ie_page; 4448 lun->log_pages.index[5].page_len = sizeof(lun->log_pages.ie_page); 4449 4450 return (CTL_RETVAL_COMPLETE); 4451 } 4452 4453 static int 4454 hex2bin(const char *str, uint8_t *buf, int buf_size) 4455 { 4456 int i; 4457 u_char c; 4458 4459 memset(buf, 0, buf_size); 4460 while (isspace(str[0])) 4461 str++; 4462 if (str[0] == '0' && (str[1] == 'x' || str[1] == 'X')) 4463 str += 2; 4464 buf_size *= 2; 4465 for (i = 0; str[i] != 0 && i < buf_size; i++) { 4466 while (str[i] == '-') /* Skip dashes in UUIDs. */ 4467 str++; 4468 c = str[i]; 4469 if (isdigit(c)) 4470 c -= '0'; 4471 else if (isalpha(c)) 4472 c -= isupper(c) ? 'A' - 10 : 'a' - 10; 4473 else 4474 break; 4475 if (c >= 16) 4476 break; 4477 if ((i & 1) == 0) 4478 buf[i / 2] |= (c << 4); 4479 else 4480 buf[i / 2] |= c; 4481 } 4482 return ((i + 1) / 2); 4483 } 4484 4485 /* 4486 * Add LUN. 4487 * 4488 * Returns 0 for success, non-zero (errno) for failure. 4489 */ 4490 int 4491 ctl_add_lun(struct ctl_be_lun *be_lun) 4492 { 4493 struct ctl_softc *ctl_softc = control_softc; 4494 struct ctl_lun *nlun, *lun; 4495 struct scsi_vpd_id_descriptor *desc; 4496 struct scsi_vpd_id_t10 *t10id; 4497 const char *eui, *naa, *scsiname, *uuid, *vendor, *value; 4498 int lun_number; 4499 int devidlen, idlen1, idlen2 = 0, len; 4500 4501 /* 4502 * We support only Direct Access, CD-ROM or Processor LUN types. 4503 */ 4504 switch (be_lun->lun_type) { 4505 case T_DIRECT: 4506 case T_PROCESSOR: 4507 case T_CDROM: 4508 break; 4509 case T_SEQUENTIAL: 4510 case T_CHANGER: 4511 default: 4512 return (EINVAL); 4513 } 4514 lun = malloc(sizeof(*lun), M_CTL, M_WAITOK | M_ZERO); 4515 4516 lun->pending_sense = malloc(sizeof(struct scsi_sense_data *) * 4517 ctl_max_ports, M_DEVBUF, M_WAITOK | M_ZERO); 4518 lun->pending_ua = malloc(sizeof(ctl_ua_type *) * ctl_max_ports, 4519 M_DEVBUF, M_WAITOK | M_ZERO); 4520 lun->pr_keys = malloc(sizeof(uint64_t *) * ctl_max_ports, 4521 M_DEVBUF, M_WAITOK | M_ZERO); 4522 4523 /* Generate LUN ID. */ 4524 devidlen = max(CTL_DEVID_MIN_LEN, 4525 strnlen(be_lun->device_id, CTL_DEVID_LEN)); 4526 idlen1 = sizeof(*t10id) + devidlen; 4527 len = sizeof(struct scsi_vpd_id_descriptor) + idlen1; 4528 scsiname = dnvlist_get_string(be_lun->options, "scsiname", NULL); 4529 if (scsiname != NULL) { 4530 idlen2 = roundup2(strlen(scsiname) + 1, 4); 4531 len += sizeof(struct scsi_vpd_id_descriptor) + idlen2; 4532 } 4533 eui = dnvlist_get_string(be_lun->options, "eui", NULL); 4534 if (eui != NULL) { 4535 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4536 } 4537 naa = dnvlist_get_string(be_lun->options, "naa", NULL); 4538 if (naa != NULL) { 4539 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4540 } 4541 uuid = dnvlist_get_string(be_lun->options, "uuid", NULL); 4542 if (uuid != NULL) { 4543 len += sizeof(struct scsi_vpd_id_descriptor) + 18; 4544 } 4545 lun->lun_devid = malloc(sizeof(struct ctl_devid) + len, 4546 M_CTL, M_WAITOK | M_ZERO); 4547 desc = (struct scsi_vpd_id_descriptor *)lun->lun_devid->data; 4548 desc->proto_codeset = SVPD_ID_CODESET_ASCII; 4549 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10; 4550 desc->length = idlen1; 4551 t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0]; 4552 memset(t10id->vendor, ' ', sizeof(t10id->vendor)); 4553 if ((vendor = dnvlist_get_string(be_lun->options, "vendor", NULL)) == NULL) { 4554 strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor)); 4555 } else { 4556 strncpy(t10id->vendor, vendor, 4557 min(sizeof(t10id->vendor), strlen(vendor))); 4558 } 4559 strncpy((char *)t10id->vendor_spec_id, 4560 (char *)be_lun->device_id, devidlen); 4561 if (scsiname != NULL) { 4562 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4563 desc->length); 4564 desc->proto_codeset = SVPD_ID_CODESET_UTF8; 4565 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4566 SVPD_ID_TYPE_SCSI_NAME; 4567 desc->length = idlen2; 4568 strlcpy(desc->identifier, scsiname, idlen2); 4569 } 4570 if (eui != NULL) { 4571 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4572 desc->length); 4573 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4574 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4575 SVPD_ID_TYPE_EUI64; 4576 desc->length = hex2bin(eui, desc->identifier, 16); 4577 desc->length = desc->length > 12 ? 16 : 4578 (desc->length > 8 ? 12 : 8); 4579 len -= 16 - desc->length; 4580 } 4581 if (naa != NULL) { 4582 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4583 desc->length); 4584 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4585 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4586 SVPD_ID_TYPE_NAA; 4587 desc->length = hex2bin(naa, desc->identifier, 16); 4588 desc->length = desc->length > 8 ? 16 : 8; 4589 len -= 16 - desc->length; 4590 } 4591 if (uuid != NULL) { 4592 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4593 desc->length); 4594 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4595 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4596 SVPD_ID_TYPE_UUID; 4597 desc->identifier[0] = 0x10; 4598 hex2bin(uuid, &desc->identifier[2], 16); 4599 desc->length = 18; 4600 } 4601 lun->lun_devid->len = len; 4602 4603 mtx_lock(&ctl_softc->ctl_lock); 4604 /* 4605 * See if the caller requested a particular LUN number. If so, see 4606 * if it is available. Otherwise, allocate the first available LUN. 4607 */ 4608 if (be_lun->flags & CTL_LUN_FLAG_ID_REQ) { 4609 if ((be_lun->req_lun_id > (ctl_max_luns - 1)) 4610 || (ctl_is_set(ctl_softc->ctl_lun_mask, be_lun->req_lun_id))) { 4611 mtx_unlock(&ctl_softc->ctl_lock); 4612 if (be_lun->req_lun_id > (ctl_max_luns - 1)) { 4613 printf("ctl: requested LUN ID %d is higher " 4614 "than ctl_max_luns - 1 (%d)\n", 4615 be_lun->req_lun_id, ctl_max_luns - 1); 4616 } else { 4617 /* 4618 * XXX KDM return an error, or just assign 4619 * another LUN ID in this case?? 4620 */ 4621 printf("ctl: requested LUN ID %d is already " 4622 "in use\n", be_lun->req_lun_id); 4623 } 4624 fail: 4625 free(lun->lun_devid, M_CTL); 4626 free(lun, M_CTL); 4627 return (ENOSPC); 4628 } 4629 lun_number = be_lun->req_lun_id; 4630 } else { 4631 lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, 0, ctl_max_luns); 4632 if (lun_number == -1) { 4633 mtx_unlock(&ctl_softc->ctl_lock); 4634 printf("ctl: can't allocate LUN, out of LUNs\n"); 4635 goto fail; 4636 } 4637 } 4638 ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number); 4639 mtx_unlock(&ctl_softc->ctl_lock); 4640 4641 mtx_init(&lun->lun_lock, "CTL LUN", NULL, MTX_DEF); 4642 lun->lun = lun_number; 4643 lun->be_lun = be_lun; 4644 /* 4645 * The processor LUN is always enabled. Disk LUNs come on line 4646 * disabled, and must be enabled by the backend. 4647 */ 4648 lun->flags |= CTL_LUN_DISABLED; 4649 lun->backend = be_lun->be; 4650 be_lun->ctl_lun = lun; 4651 be_lun->lun_id = lun_number; 4652 if (be_lun->flags & CTL_LUN_FLAG_EJECTED) 4653 lun->flags |= CTL_LUN_EJECTED; 4654 if (be_lun->flags & CTL_LUN_FLAG_NO_MEDIA) 4655 lun->flags |= CTL_LUN_NO_MEDIA; 4656 if (be_lun->flags & CTL_LUN_FLAG_STOPPED) 4657 lun->flags |= CTL_LUN_STOPPED; 4658 4659 if (be_lun->flags & CTL_LUN_FLAG_PRIMARY) 4660 lun->flags |= CTL_LUN_PRIMARY_SC; 4661 4662 value = dnvlist_get_string(be_lun->options, "removable", NULL); 4663 if (value != NULL) { 4664 if (strcmp(value, "on") == 0) 4665 lun->flags |= CTL_LUN_REMOVABLE; 4666 } else if (be_lun->lun_type == T_CDROM) 4667 lun->flags |= CTL_LUN_REMOVABLE; 4668 4669 lun->ctl_softc = ctl_softc; 4670 #ifdef CTL_TIME_IO 4671 lun->last_busy = getsbinuptime(); 4672 #endif 4673 TAILQ_INIT(&lun->ooa_queue); 4674 STAILQ_INIT(&lun->error_list); 4675 lun->ie_reported = 1; 4676 callout_init_mtx(&lun->ie_callout, &lun->lun_lock, 0); 4677 ctl_tpc_lun_init(lun); 4678 if (lun->flags & CTL_LUN_REMOVABLE) { 4679 lun->prevent = malloc((CTL_MAX_INITIATORS + 31) / 32 * 4, 4680 M_CTL, M_WAITOK); 4681 } 4682 4683 /* 4684 * Initialize the mode and log page index. 4685 */ 4686 ctl_init_page_index(lun); 4687 ctl_init_log_page_index(lun); 4688 4689 /* Setup statistics gathering */ 4690 lun->stats.item = lun_number; 4691 4692 /* 4693 * Now, before we insert this lun on the lun list, set the lun 4694 * inventory changed UA for all other luns. 4695 */ 4696 mtx_lock(&ctl_softc->ctl_lock); 4697 STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) { 4698 mtx_lock(&nlun->lun_lock); 4699 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4700 mtx_unlock(&nlun->lun_lock); 4701 } 4702 STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links); 4703 ctl_softc->ctl_luns[lun_number] = lun; 4704 ctl_softc->num_luns++; 4705 mtx_unlock(&ctl_softc->ctl_lock); 4706 4707 /* 4708 * We successfully added the LUN, attempt to enable it. 4709 */ 4710 if (ctl_enable_lun(lun) != 0) { 4711 printf("%s: ctl_enable_lun() failed!\n", __func__); 4712 mtx_lock(&ctl_softc->ctl_lock); 4713 STAILQ_REMOVE(&ctl_softc->lun_list, lun, ctl_lun, links); 4714 ctl_clear_mask(ctl_softc->ctl_lun_mask, lun_number); 4715 ctl_softc->ctl_luns[lun_number] = NULL; 4716 ctl_softc->num_luns--; 4717 mtx_unlock(&ctl_softc->ctl_lock); 4718 free(lun->lun_devid, M_CTL); 4719 free(lun, M_CTL); 4720 return (EIO); 4721 } 4722 4723 return (0); 4724 } 4725 4726 /* 4727 * Free LUN that has no active requests. 4728 */ 4729 static int 4730 ctl_free_lun(struct ctl_lun *lun) 4731 { 4732 struct ctl_softc *softc = lun->ctl_softc; 4733 struct ctl_lun *nlun; 4734 int i; 4735 4736 KASSERT(TAILQ_EMPTY(&lun->ooa_queue), 4737 ("Freeing a LUN %p with outstanding I/O!\n", lun)); 4738 4739 mtx_lock(&softc->ctl_lock); 4740 STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links); 4741 ctl_clear_mask(softc->ctl_lun_mask, lun->lun); 4742 softc->ctl_luns[lun->lun] = NULL; 4743 softc->num_luns--; 4744 STAILQ_FOREACH(nlun, &softc->lun_list, links) { 4745 mtx_lock(&nlun->lun_lock); 4746 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4747 mtx_unlock(&nlun->lun_lock); 4748 } 4749 mtx_unlock(&softc->ctl_lock); 4750 4751 /* 4752 * Tell the backend to free resources, if this LUN has a backend. 4753 */ 4754 lun->be_lun->lun_shutdown(lun->be_lun); 4755 4756 lun->ie_reportcnt = UINT32_MAX; 4757 callout_drain(&lun->ie_callout); 4758 ctl_tpc_lun_shutdown(lun); 4759 mtx_destroy(&lun->lun_lock); 4760 free(lun->lun_devid, M_CTL); 4761 for (i = 0; i < ctl_max_ports; i++) 4762 free(lun->pending_ua[i], M_CTL); 4763 free(lun->pending_ua, M_DEVBUF); 4764 for (i = 0; i < ctl_max_ports; i++) 4765 free(lun->pr_keys[i], M_CTL); 4766 free(lun->pr_keys, M_DEVBUF); 4767 free(lun->write_buffer, M_CTL); 4768 free(lun->prevent, M_CTL); 4769 free(lun, M_CTL); 4770 4771 return (0); 4772 } 4773 4774 static int 4775 ctl_enable_lun(struct ctl_lun *lun) 4776 { 4777 struct ctl_softc *softc; 4778 struct ctl_port *port, *nport; 4779 int retval; 4780 4781 softc = lun->ctl_softc; 4782 4783 mtx_lock(&softc->ctl_lock); 4784 mtx_lock(&lun->lun_lock); 4785 KASSERT((lun->flags & CTL_LUN_DISABLED) != 0, 4786 ("%s: LUN not disabled", __func__)); 4787 lun->flags &= ~CTL_LUN_DISABLED; 4788 mtx_unlock(&lun->lun_lock); 4789 4790 STAILQ_FOREACH_SAFE(port, &softc->port_list, links, nport) { 4791 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || 4792 port->lun_map != NULL || port->lun_enable == NULL) 4793 continue; 4794 4795 /* 4796 * Drop the lock while we call the FETD's enable routine. 4797 * This can lead to a callback into CTL (at least in the 4798 * case of the internal initiator frontend. 4799 */ 4800 mtx_unlock(&softc->ctl_lock); 4801 retval = port->lun_enable(port->targ_lun_arg, lun->lun); 4802 mtx_lock(&softc->ctl_lock); 4803 if (retval != 0) { 4804 printf("%s: FETD %s port %d returned error " 4805 "%d for lun_enable on lun %jd\n", 4806 __func__, port->port_name, port->targ_port, 4807 retval, (intmax_t)lun->lun); 4808 } 4809 } 4810 4811 mtx_unlock(&softc->ctl_lock); 4812 ctl_isc_announce_lun(lun); 4813 4814 return (0); 4815 } 4816 4817 static int 4818 ctl_disable_lun(struct ctl_lun *lun) 4819 { 4820 struct ctl_softc *softc; 4821 struct ctl_port *port; 4822 int retval; 4823 4824 softc = lun->ctl_softc; 4825 4826 mtx_lock(&softc->ctl_lock); 4827 mtx_lock(&lun->lun_lock); 4828 KASSERT((lun->flags & CTL_LUN_DISABLED) == 0, 4829 ("%s: LUN not enabled", __func__)); 4830 lun->flags |= CTL_LUN_DISABLED; 4831 mtx_unlock(&lun->lun_lock); 4832 4833 STAILQ_FOREACH(port, &softc->port_list, links) { 4834 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || 4835 port->lun_map != NULL || port->lun_disable == NULL) 4836 continue; 4837 4838 /* 4839 * Drop the lock before we call the frontend's disable 4840 * routine, to avoid lock order reversals. 4841 * 4842 * XXX KDM what happens if the frontend list changes while 4843 * we're traversing it? It's unlikely, but should be handled. 4844 */ 4845 mtx_unlock(&softc->ctl_lock); 4846 retval = port->lun_disable(port->targ_lun_arg, lun->lun); 4847 mtx_lock(&softc->ctl_lock); 4848 if (retval != 0) { 4849 printf("%s: FETD %s port %d returned error " 4850 "%d for lun_disable on lun %jd\n", 4851 __func__, port->port_name, port->targ_port, 4852 retval, (intmax_t)lun->lun); 4853 } 4854 } 4855 4856 mtx_unlock(&softc->ctl_lock); 4857 ctl_isc_announce_lun(lun); 4858 4859 return (0); 4860 } 4861 4862 int 4863 ctl_start_lun(struct ctl_be_lun *be_lun) 4864 { 4865 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4866 4867 mtx_lock(&lun->lun_lock); 4868 lun->flags &= ~CTL_LUN_STOPPED; 4869 mtx_unlock(&lun->lun_lock); 4870 return (0); 4871 } 4872 4873 int 4874 ctl_stop_lun(struct ctl_be_lun *be_lun) 4875 { 4876 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4877 4878 mtx_lock(&lun->lun_lock); 4879 lun->flags |= CTL_LUN_STOPPED; 4880 mtx_unlock(&lun->lun_lock); 4881 return (0); 4882 } 4883 4884 int 4885 ctl_lun_no_media(struct ctl_be_lun *be_lun) 4886 { 4887 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4888 4889 mtx_lock(&lun->lun_lock); 4890 lun->flags |= CTL_LUN_NO_MEDIA; 4891 mtx_unlock(&lun->lun_lock); 4892 return (0); 4893 } 4894 4895 int 4896 ctl_lun_has_media(struct ctl_be_lun *be_lun) 4897 { 4898 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4899 union ctl_ha_msg msg; 4900 4901 mtx_lock(&lun->lun_lock); 4902 lun->flags &= ~(CTL_LUN_NO_MEDIA | CTL_LUN_EJECTED); 4903 if (lun->flags & CTL_LUN_REMOVABLE) 4904 ctl_est_ua_all(lun, -1, CTL_UA_MEDIUM_CHANGE); 4905 mtx_unlock(&lun->lun_lock); 4906 if ((lun->flags & CTL_LUN_REMOVABLE) && 4907 lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 4908 bzero(&msg.ua, sizeof(msg.ua)); 4909 msg.hdr.msg_type = CTL_MSG_UA; 4910 msg.hdr.nexus.initid = -1; 4911 msg.hdr.nexus.targ_port = -1; 4912 msg.hdr.nexus.targ_lun = lun->lun; 4913 msg.hdr.nexus.targ_mapped_lun = lun->lun; 4914 msg.ua.ua_all = 1; 4915 msg.ua.ua_set = 1; 4916 msg.ua.ua_type = CTL_UA_MEDIUM_CHANGE; 4917 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), 4918 M_WAITOK); 4919 } 4920 return (0); 4921 } 4922 4923 int 4924 ctl_lun_ejected(struct ctl_be_lun *be_lun) 4925 { 4926 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4927 4928 mtx_lock(&lun->lun_lock); 4929 lun->flags |= CTL_LUN_EJECTED; 4930 mtx_unlock(&lun->lun_lock); 4931 return (0); 4932 } 4933 4934 int 4935 ctl_lun_primary(struct ctl_be_lun *be_lun) 4936 { 4937 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4938 4939 mtx_lock(&lun->lun_lock); 4940 lun->flags |= CTL_LUN_PRIMARY_SC; 4941 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 4942 mtx_unlock(&lun->lun_lock); 4943 ctl_isc_announce_lun(lun); 4944 return (0); 4945 } 4946 4947 int 4948 ctl_lun_secondary(struct ctl_be_lun *be_lun) 4949 { 4950 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4951 4952 mtx_lock(&lun->lun_lock); 4953 lun->flags &= ~CTL_LUN_PRIMARY_SC; 4954 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 4955 mtx_unlock(&lun->lun_lock); 4956 ctl_isc_announce_lun(lun); 4957 return (0); 4958 } 4959 4960 /* 4961 * Remove LUN. If there are active requests, wait for completion. 4962 * 4963 * Returns 0 for success, non-zero (errno) for failure. 4964 * Completion is reported to backed via the lun_shutdown() method. 4965 */ 4966 int 4967 ctl_remove_lun(struct ctl_be_lun *be_lun) 4968 { 4969 struct ctl_lun *lun; 4970 4971 lun = (struct ctl_lun *)be_lun->ctl_lun; 4972 4973 ctl_disable_lun(lun); 4974 4975 mtx_lock(&lun->lun_lock); 4976 lun->flags |= CTL_LUN_INVALID; 4977 4978 /* 4979 * If there is nothing in the OOA queue, go ahead and free the LUN. 4980 * If we have something in the OOA queue, we'll free it when the 4981 * last I/O completes. 4982 */ 4983 if (TAILQ_EMPTY(&lun->ooa_queue)) { 4984 mtx_unlock(&lun->lun_lock); 4985 ctl_free_lun(lun); 4986 } else 4987 mtx_unlock(&lun->lun_lock); 4988 4989 return (0); 4990 } 4991 4992 void 4993 ctl_lun_capacity_changed(struct ctl_be_lun *be_lun) 4994 { 4995 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4996 union ctl_ha_msg msg; 4997 4998 mtx_lock(&lun->lun_lock); 4999 ctl_est_ua_all(lun, -1, CTL_UA_CAPACITY_CHANGE); 5000 mtx_unlock(&lun->lun_lock); 5001 if (lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 5002 /* Send msg to other side. */ 5003 bzero(&msg.ua, sizeof(msg.ua)); 5004 msg.hdr.msg_type = CTL_MSG_UA; 5005 msg.hdr.nexus.initid = -1; 5006 msg.hdr.nexus.targ_port = -1; 5007 msg.hdr.nexus.targ_lun = lun->lun; 5008 msg.hdr.nexus.targ_mapped_lun = lun->lun; 5009 msg.ua.ua_all = 1; 5010 msg.ua.ua_set = 1; 5011 msg.ua.ua_type = CTL_UA_CAPACITY_CHANGE; 5012 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), 5013 M_WAITOK); 5014 } 5015 } 5016 5017 /* 5018 * Backend "memory move is complete" callback for requests that never 5019 * make it down to say RAIDCore's configuration code. 5020 */ 5021 int 5022 ctl_config_move_done(union ctl_io *io) 5023 { 5024 int retval; 5025 5026 CTL_DEBUG_PRINT(("ctl_config_move_done\n")); 5027 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, 5028 ("Config I/O type isn't CTL_IO_SCSI (%d)!", io->io_hdr.io_type)); 5029 5030 if ((io->io_hdr.port_status != 0) && 5031 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5032 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5033 ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, 5034 /*retry_count*/ io->io_hdr.port_status); 5035 } else if (io->scsiio.kern_data_resid != 0 && 5036 (io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT && 5037 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5038 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5039 ctl_set_invalid_field_ciu(&io->scsiio); 5040 } 5041 5042 if (ctl_debug & CTL_DEBUG_CDB_DATA) 5043 ctl_data_print(io); 5044 if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) || 5045 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 5046 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) || 5047 ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) { 5048 /* 5049 * XXX KDM just assuming a single pointer here, and not a 5050 * S/G list. If we start using S/G lists for config data, 5051 * we'll need to know how to clean them up here as well. 5052 */ 5053 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5054 free(io->scsiio.kern_data_ptr, M_CTL); 5055 ctl_done(io); 5056 retval = CTL_RETVAL_COMPLETE; 5057 } else { 5058 /* 5059 * XXX KDM now we need to continue data movement. Some 5060 * options: 5061 * - call ctl_scsiio() again? We don't do this for data 5062 * writes, because for those at least we know ahead of 5063 * time where the write will go and how long it is. For 5064 * config writes, though, that information is largely 5065 * contained within the write itself, thus we need to 5066 * parse out the data again. 5067 * 5068 * - Call some other function once the data is in? 5069 */ 5070 5071 /* 5072 * XXX KDM call ctl_scsiio() again for now, and check flag 5073 * bits to see whether we're allocated or not. 5074 */ 5075 retval = ctl_scsiio(&io->scsiio); 5076 } 5077 return (retval); 5078 } 5079 5080 /* 5081 * This gets called by a backend driver when it is done with a 5082 * data_submit method. 5083 */ 5084 void 5085 ctl_data_submit_done(union ctl_io *io) 5086 { 5087 /* 5088 * If the IO_CONT flag is set, we need to call the supplied 5089 * function to continue processing the I/O, instead of completing 5090 * the I/O just yet. 5091 * 5092 * If there is an error, though, we don't want to keep processing. 5093 * Instead, just send status back to the initiator. 5094 */ 5095 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 5096 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 5097 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5098 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5099 io->scsiio.io_cont(io); 5100 return; 5101 } 5102 ctl_done(io); 5103 } 5104 5105 /* 5106 * This gets called by a backend driver when it is done with a 5107 * configuration write. 5108 */ 5109 void 5110 ctl_config_write_done(union ctl_io *io) 5111 { 5112 uint8_t *buf; 5113 5114 /* 5115 * If the IO_CONT flag is set, we need to call the supplied 5116 * function to continue processing the I/O, instead of completing 5117 * the I/O just yet. 5118 * 5119 * If there is an error, though, we don't want to keep processing. 5120 * Instead, just send status back to the initiator. 5121 */ 5122 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 5123 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 5124 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5125 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5126 io->scsiio.io_cont(io); 5127 return; 5128 } 5129 /* 5130 * Since a configuration write can be done for commands that actually 5131 * have data allocated, like write buffer, and commands that have 5132 * no data, like start/stop unit, we need to check here. 5133 */ 5134 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5135 buf = io->scsiio.kern_data_ptr; 5136 else 5137 buf = NULL; 5138 ctl_done(io); 5139 if (buf) 5140 free(buf, M_CTL); 5141 } 5142 5143 void 5144 ctl_config_read_done(union ctl_io *io) 5145 { 5146 uint8_t *buf; 5147 5148 /* 5149 * If there is some error -- we are done, skip data transfer. 5150 */ 5151 if ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0 || 5152 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 5153 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { 5154 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5155 buf = io->scsiio.kern_data_ptr; 5156 else 5157 buf = NULL; 5158 ctl_done(io); 5159 if (buf) 5160 free(buf, M_CTL); 5161 return; 5162 } 5163 5164 /* 5165 * If the IO_CONT flag is set, we need to call the supplied 5166 * function to continue processing the I/O, instead of completing 5167 * the I/O just yet. 5168 */ 5169 if (io->io_hdr.flags & CTL_FLAG_IO_CONT) { 5170 io->scsiio.io_cont(io); 5171 return; 5172 } 5173 5174 ctl_datamove(io); 5175 } 5176 5177 /* 5178 * SCSI release command. 5179 */ 5180 int 5181 ctl_scsi_release(struct ctl_scsiio *ctsio) 5182 { 5183 struct ctl_lun *lun = CTL_LUN(ctsio); 5184 uint32_t residx; 5185 5186 CTL_DEBUG_PRINT(("ctl_scsi_release\n")); 5187 5188 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5189 5190 /* 5191 * XXX KDM right now, we only support LUN reservation. We don't 5192 * support 3rd party reservations, or extent reservations, which 5193 * might actually need the parameter list. If we've gotten this 5194 * far, we've got a LUN reservation. Anything else got kicked out 5195 * above. So, according to SPC, ignore the length. 5196 */ 5197 5198 mtx_lock(&lun->lun_lock); 5199 5200 /* 5201 * According to SPC, it is not an error for an intiator to attempt 5202 * to release a reservation on a LUN that isn't reserved, or that 5203 * is reserved by another initiator. The reservation can only be 5204 * released, though, by the initiator who made it or by one of 5205 * several reset type events. 5206 */ 5207 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx)) 5208 lun->flags &= ~CTL_LUN_RESERVED; 5209 5210 mtx_unlock(&lun->lun_lock); 5211 5212 ctl_set_success(ctsio); 5213 ctl_done((union ctl_io *)ctsio); 5214 return (CTL_RETVAL_COMPLETE); 5215 } 5216 5217 int 5218 ctl_scsi_reserve(struct ctl_scsiio *ctsio) 5219 { 5220 struct ctl_lun *lun = CTL_LUN(ctsio); 5221 uint32_t residx; 5222 5223 CTL_DEBUG_PRINT(("ctl_reserve\n")); 5224 5225 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5226 5227 /* 5228 * XXX KDM right now, we only support LUN reservation. We don't 5229 * support 3rd party reservations, or extent reservations, which 5230 * might actually need the parameter list. If we've gotten this 5231 * far, we've got a LUN reservation. Anything else got kicked out 5232 * above. So, according to SPC, ignore the length. 5233 */ 5234 5235 mtx_lock(&lun->lun_lock); 5236 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx != residx)) { 5237 ctl_set_reservation_conflict(ctsio); 5238 goto bailout; 5239 } 5240 5241 /* SPC-3 exceptions to SPC-2 RESERVE and RELEASE behavior. */ 5242 if (lun->flags & CTL_LUN_PR_RESERVED) { 5243 ctl_set_success(ctsio); 5244 goto bailout; 5245 } 5246 5247 lun->flags |= CTL_LUN_RESERVED; 5248 lun->res_idx = residx; 5249 ctl_set_success(ctsio); 5250 5251 bailout: 5252 mtx_unlock(&lun->lun_lock); 5253 ctl_done((union ctl_io *)ctsio); 5254 return (CTL_RETVAL_COMPLETE); 5255 } 5256 5257 int 5258 ctl_start_stop(struct ctl_scsiio *ctsio) 5259 { 5260 struct ctl_lun *lun = CTL_LUN(ctsio); 5261 struct scsi_start_stop_unit *cdb; 5262 int retval; 5263 5264 CTL_DEBUG_PRINT(("ctl_start_stop\n")); 5265 5266 cdb = (struct scsi_start_stop_unit *)ctsio->cdb; 5267 5268 if ((cdb->how & SSS_PC_MASK) == 0) { 5269 if ((lun->flags & CTL_LUN_PR_RESERVED) && 5270 (cdb->how & SSS_START) == 0) { 5271 uint32_t residx; 5272 5273 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5274 if (ctl_get_prkey(lun, residx) == 0 || 5275 (lun->pr_res_idx != residx && lun->pr_res_type < 4)) { 5276 ctl_set_reservation_conflict(ctsio); 5277 ctl_done((union ctl_io *)ctsio); 5278 return (CTL_RETVAL_COMPLETE); 5279 } 5280 } 5281 5282 if ((cdb->how & SSS_LOEJ) && 5283 (lun->flags & CTL_LUN_REMOVABLE) == 0) { 5284 ctl_set_invalid_field(ctsio, 5285 /*sks_valid*/ 1, 5286 /*command*/ 1, 5287 /*field*/ 4, 5288 /*bit_valid*/ 1, 5289 /*bit*/ 1); 5290 ctl_done((union ctl_io *)ctsio); 5291 return (CTL_RETVAL_COMPLETE); 5292 } 5293 5294 if ((cdb->how & SSS_START) == 0 && (cdb->how & SSS_LOEJ) && 5295 lun->prevent_count > 0) { 5296 /* "Medium removal prevented" */ 5297 ctl_set_sense(ctsio, /*current_error*/ 1, 5298 /*sense_key*/(lun->flags & CTL_LUN_NO_MEDIA) ? 5299 SSD_KEY_NOT_READY : SSD_KEY_ILLEGAL_REQUEST, 5300 /*asc*/ 0x53, /*ascq*/ 0x02, SSD_ELEM_NONE); 5301 ctl_done((union ctl_io *)ctsio); 5302 return (CTL_RETVAL_COMPLETE); 5303 } 5304 } 5305 5306 retval = lun->backend->config_write((union ctl_io *)ctsio); 5307 return (retval); 5308 } 5309 5310 int 5311 ctl_prevent_allow(struct ctl_scsiio *ctsio) 5312 { 5313 struct ctl_lun *lun = CTL_LUN(ctsio); 5314 struct scsi_prevent *cdb; 5315 int retval; 5316 uint32_t initidx; 5317 5318 CTL_DEBUG_PRINT(("ctl_prevent_allow\n")); 5319 5320 cdb = (struct scsi_prevent *)ctsio->cdb; 5321 5322 if ((lun->flags & CTL_LUN_REMOVABLE) == 0 || lun->prevent == NULL) { 5323 ctl_set_invalid_opcode(ctsio); 5324 ctl_done((union ctl_io *)ctsio); 5325 return (CTL_RETVAL_COMPLETE); 5326 } 5327 5328 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5329 mtx_lock(&lun->lun_lock); 5330 if ((cdb->how & PR_PREVENT) && 5331 ctl_is_set(lun->prevent, initidx) == 0) { 5332 ctl_set_mask(lun->prevent, initidx); 5333 lun->prevent_count++; 5334 } else if ((cdb->how & PR_PREVENT) == 0 && 5335 ctl_is_set(lun->prevent, initidx)) { 5336 ctl_clear_mask(lun->prevent, initidx); 5337 lun->prevent_count--; 5338 } 5339 mtx_unlock(&lun->lun_lock); 5340 retval = lun->backend->config_write((union ctl_io *)ctsio); 5341 return (retval); 5342 } 5343 5344 /* 5345 * We support the SYNCHRONIZE CACHE command (10 and 16 byte versions), but 5346 * we don't really do anything with the LBA and length fields if the user 5347 * passes them in. Instead we'll just flush out the cache for the entire 5348 * LUN. 5349 */ 5350 int 5351 ctl_sync_cache(struct ctl_scsiio *ctsio) 5352 { 5353 struct ctl_lun *lun = CTL_LUN(ctsio); 5354 struct ctl_lba_len_flags *lbalen; 5355 uint64_t starting_lba; 5356 uint32_t block_count; 5357 int retval; 5358 uint8_t byte2; 5359 5360 CTL_DEBUG_PRINT(("ctl_sync_cache\n")); 5361 5362 retval = 0; 5363 5364 switch (ctsio->cdb[0]) { 5365 case SYNCHRONIZE_CACHE: { 5366 struct scsi_sync_cache *cdb; 5367 cdb = (struct scsi_sync_cache *)ctsio->cdb; 5368 5369 starting_lba = scsi_4btoul(cdb->begin_lba); 5370 block_count = scsi_2btoul(cdb->lb_count); 5371 byte2 = cdb->byte2; 5372 break; 5373 } 5374 case SYNCHRONIZE_CACHE_16: { 5375 struct scsi_sync_cache_16 *cdb; 5376 cdb = (struct scsi_sync_cache_16 *)ctsio->cdb; 5377 5378 starting_lba = scsi_8btou64(cdb->begin_lba); 5379 block_count = scsi_4btoul(cdb->lb_count); 5380 byte2 = cdb->byte2; 5381 break; 5382 } 5383 default: 5384 ctl_set_invalid_opcode(ctsio); 5385 ctl_done((union ctl_io *)ctsio); 5386 goto bailout; 5387 break; /* NOTREACHED */ 5388 } 5389 5390 /* 5391 * We check the LBA and length, but don't do anything with them. 5392 * A SYNCHRONIZE CACHE will cause the entire cache for this lun to 5393 * get flushed. This check will just help satisfy anyone who wants 5394 * to see an error for an out of range LBA. 5395 */ 5396 if ((starting_lba + block_count) > (lun->be_lun->maxlba + 1)) { 5397 ctl_set_lba_out_of_range(ctsio, 5398 MAX(starting_lba, lun->be_lun->maxlba + 1)); 5399 ctl_done((union ctl_io *)ctsio); 5400 goto bailout; 5401 } 5402 5403 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5404 lbalen->lba = starting_lba; 5405 lbalen->len = block_count; 5406 lbalen->flags = byte2; 5407 retval = lun->backend->config_write((union ctl_io *)ctsio); 5408 5409 bailout: 5410 return (retval); 5411 } 5412 5413 int 5414 ctl_format(struct ctl_scsiio *ctsio) 5415 { 5416 struct scsi_format *cdb; 5417 int length, defect_list_len; 5418 5419 CTL_DEBUG_PRINT(("ctl_format\n")); 5420 5421 cdb = (struct scsi_format *)ctsio->cdb; 5422 5423 length = 0; 5424 if (cdb->byte2 & SF_FMTDATA) { 5425 if (cdb->byte2 & SF_LONGLIST) 5426 length = sizeof(struct scsi_format_header_long); 5427 else 5428 length = sizeof(struct scsi_format_header_short); 5429 } 5430 5431 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5432 && (length > 0)) { 5433 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5434 ctsio->kern_data_len = length; 5435 ctsio->kern_total_len = length; 5436 ctsio->kern_rel_offset = 0; 5437 ctsio->kern_sg_entries = 0; 5438 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5439 ctsio->be_move_done = ctl_config_move_done; 5440 ctl_datamove((union ctl_io *)ctsio); 5441 5442 return (CTL_RETVAL_COMPLETE); 5443 } 5444 5445 defect_list_len = 0; 5446 5447 if (cdb->byte2 & SF_FMTDATA) { 5448 if (cdb->byte2 & SF_LONGLIST) { 5449 struct scsi_format_header_long *header; 5450 5451 header = (struct scsi_format_header_long *) 5452 ctsio->kern_data_ptr; 5453 5454 defect_list_len = scsi_4btoul(header->defect_list_len); 5455 if (defect_list_len != 0) { 5456 ctl_set_invalid_field(ctsio, 5457 /*sks_valid*/ 1, 5458 /*command*/ 0, 5459 /*field*/ 2, 5460 /*bit_valid*/ 0, 5461 /*bit*/ 0); 5462 goto bailout; 5463 } 5464 } else { 5465 struct scsi_format_header_short *header; 5466 5467 header = (struct scsi_format_header_short *) 5468 ctsio->kern_data_ptr; 5469 5470 defect_list_len = scsi_2btoul(header->defect_list_len); 5471 if (defect_list_len != 0) { 5472 ctl_set_invalid_field(ctsio, 5473 /*sks_valid*/ 1, 5474 /*command*/ 0, 5475 /*field*/ 2, 5476 /*bit_valid*/ 0, 5477 /*bit*/ 0); 5478 goto bailout; 5479 } 5480 } 5481 } 5482 5483 ctl_set_success(ctsio); 5484 bailout: 5485 5486 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5487 free(ctsio->kern_data_ptr, M_CTL); 5488 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5489 } 5490 5491 ctl_done((union ctl_io *)ctsio); 5492 return (CTL_RETVAL_COMPLETE); 5493 } 5494 5495 int 5496 ctl_read_buffer(struct ctl_scsiio *ctsio) 5497 { 5498 struct ctl_lun *lun = CTL_LUN(ctsio); 5499 uint64_t buffer_offset; 5500 uint32_t len; 5501 uint8_t byte2; 5502 static uint8_t descr[4]; 5503 static uint8_t echo_descr[4] = { 0 }; 5504 5505 CTL_DEBUG_PRINT(("ctl_read_buffer\n")); 5506 5507 switch (ctsio->cdb[0]) { 5508 case READ_BUFFER: { 5509 struct scsi_read_buffer *cdb; 5510 5511 cdb = (struct scsi_read_buffer *)ctsio->cdb; 5512 buffer_offset = scsi_3btoul(cdb->offset); 5513 len = scsi_3btoul(cdb->length); 5514 byte2 = cdb->byte2; 5515 break; 5516 } 5517 case READ_BUFFER_16: { 5518 struct scsi_read_buffer_16 *cdb; 5519 5520 cdb = (struct scsi_read_buffer_16 *)ctsio->cdb; 5521 buffer_offset = scsi_8btou64(cdb->offset); 5522 len = scsi_4btoul(cdb->length); 5523 byte2 = cdb->byte2; 5524 break; 5525 } 5526 default: /* This shouldn't happen. */ 5527 ctl_set_invalid_opcode(ctsio); 5528 ctl_done((union ctl_io *)ctsio); 5529 return (CTL_RETVAL_COMPLETE); 5530 } 5531 5532 if (buffer_offset > CTL_WRITE_BUFFER_SIZE || 5533 buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5534 ctl_set_invalid_field(ctsio, 5535 /*sks_valid*/ 1, 5536 /*command*/ 1, 5537 /*field*/ 6, 5538 /*bit_valid*/ 0, 5539 /*bit*/ 0); 5540 ctl_done((union ctl_io *)ctsio); 5541 return (CTL_RETVAL_COMPLETE); 5542 } 5543 5544 if ((byte2 & RWB_MODE) == RWB_MODE_DESCR) { 5545 descr[0] = 0; 5546 scsi_ulto3b(CTL_WRITE_BUFFER_SIZE, &descr[1]); 5547 ctsio->kern_data_ptr = descr; 5548 len = min(len, sizeof(descr)); 5549 } else if ((byte2 & RWB_MODE) == RWB_MODE_ECHO_DESCR) { 5550 ctsio->kern_data_ptr = echo_descr; 5551 len = min(len, sizeof(echo_descr)); 5552 } else { 5553 if (lun->write_buffer == NULL) { 5554 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5555 M_CTL, M_WAITOK); 5556 } 5557 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5558 } 5559 ctsio->kern_data_len = len; 5560 ctsio->kern_total_len = len; 5561 ctsio->kern_rel_offset = 0; 5562 ctsio->kern_sg_entries = 0; 5563 ctl_set_success(ctsio); 5564 ctsio->be_move_done = ctl_config_move_done; 5565 ctl_datamove((union ctl_io *)ctsio); 5566 return (CTL_RETVAL_COMPLETE); 5567 } 5568 5569 int 5570 ctl_write_buffer(struct ctl_scsiio *ctsio) 5571 { 5572 struct ctl_lun *lun = CTL_LUN(ctsio); 5573 struct scsi_write_buffer *cdb; 5574 int buffer_offset, len; 5575 5576 CTL_DEBUG_PRINT(("ctl_write_buffer\n")); 5577 5578 cdb = (struct scsi_write_buffer *)ctsio->cdb; 5579 5580 len = scsi_3btoul(cdb->length); 5581 buffer_offset = scsi_3btoul(cdb->offset); 5582 5583 if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5584 ctl_set_invalid_field(ctsio, 5585 /*sks_valid*/ 1, 5586 /*command*/ 1, 5587 /*field*/ 6, 5588 /*bit_valid*/ 0, 5589 /*bit*/ 0); 5590 ctl_done((union ctl_io *)ctsio); 5591 return (CTL_RETVAL_COMPLETE); 5592 } 5593 5594 /* 5595 * If we've got a kernel request that hasn't been malloced yet, 5596 * malloc it and tell the caller the data buffer is here. 5597 */ 5598 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5599 if (lun->write_buffer == NULL) { 5600 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5601 M_CTL, M_WAITOK); 5602 } 5603 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5604 ctsio->kern_data_len = len; 5605 ctsio->kern_total_len = len; 5606 ctsio->kern_rel_offset = 0; 5607 ctsio->kern_sg_entries = 0; 5608 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5609 ctsio->be_move_done = ctl_config_move_done; 5610 ctl_datamove((union ctl_io *)ctsio); 5611 5612 return (CTL_RETVAL_COMPLETE); 5613 } 5614 5615 ctl_set_success(ctsio); 5616 ctl_done((union ctl_io *)ctsio); 5617 return (CTL_RETVAL_COMPLETE); 5618 } 5619 5620 static int 5621 ctl_write_same_cont(union ctl_io *io) 5622 { 5623 struct ctl_lun *lun = CTL_LUN(io); 5624 struct ctl_scsiio *ctsio; 5625 struct ctl_lba_len_flags *lbalen; 5626 int retval; 5627 5628 ctsio = &io->scsiio; 5629 ctsio->io_hdr.status = CTL_STATUS_NONE; 5630 lbalen = (struct ctl_lba_len_flags *) 5631 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5632 lbalen->lba += lbalen->len; 5633 if ((lun->be_lun->maxlba + 1) - lbalen->lba <= UINT32_MAX) { 5634 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT; 5635 lbalen->len = (lun->be_lun->maxlba + 1) - lbalen->lba; 5636 } 5637 5638 CTL_DEBUG_PRINT(("ctl_write_same_cont: calling config_write()\n")); 5639 retval = lun->backend->config_write((union ctl_io *)ctsio); 5640 return (retval); 5641 } 5642 5643 int 5644 ctl_write_same(struct ctl_scsiio *ctsio) 5645 { 5646 struct ctl_lun *lun = CTL_LUN(ctsio); 5647 struct ctl_lba_len_flags *lbalen; 5648 const char *val; 5649 uint64_t lba, ival; 5650 uint32_t num_blocks; 5651 int len, retval; 5652 uint8_t byte2; 5653 5654 CTL_DEBUG_PRINT(("ctl_write_same\n")); 5655 5656 switch (ctsio->cdb[0]) { 5657 case WRITE_SAME_10: { 5658 struct scsi_write_same_10 *cdb; 5659 5660 cdb = (struct scsi_write_same_10 *)ctsio->cdb; 5661 5662 lba = scsi_4btoul(cdb->addr); 5663 num_blocks = scsi_2btoul(cdb->length); 5664 byte2 = cdb->byte2; 5665 break; 5666 } 5667 case WRITE_SAME_16: { 5668 struct scsi_write_same_16 *cdb; 5669 5670 cdb = (struct scsi_write_same_16 *)ctsio->cdb; 5671 5672 lba = scsi_8btou64(cdb->addr); 5673 num_blocks = scsi_4btoul(cdb->length); 5674 byte2 = cdb->byte2; 5675 break; 5676 } 5677 default: 5678 /* 5679 * We got a command we don't support. This shouldn't 5680 * happen, commands should be filtered out above us. 5681 */ 5682 ctl_set_invalid_opcode(ctsio); 5683 ctl_done((union ctl_io *)ctsio); 5684 5685 return (CTL_RETVAL_COMPLETE); 5686 break; /* NOTREACHED */ 5687 } 5688 5689 /* ANCHOR flag can be used only together with UNMAP */ 5690 if ((byte2 & SWS_UNMAP) == 0 && (byte2 & SWS_ANCHOR) != 0) { 5691 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 5692 /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); 5693 ctl_done((union ctl_io *)ctsio); 5694 return (CTL_RETVAL_COMPLETE); 5695 } 5696 5697 /* 5698 * The first check is to make sure we're in bounds, the second 5699 * check is to catch wrap-around problems. If the lba + num blocks 5700 * is less than the lba, then we've wrapped around and the block 5701 * range is invalid anyway. 5702 */ 5703 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5704 || ((lba + num_blocks) < lba)) { 5705 ctl_set_lba_out_of_range(ctsio, 5706 MAX(lba, lun->be_lun->maxlba + 1)); 5707 ctl_done((union ctl_io *)ctsio); 5708 return (CTL_RETVAL_COMPLETE); 5709 } 5710 5711 /* Zero number of blocks means "to the last logical block" */ 5712 if (num_blocks == 0) { 5713 ival = UINT64_MAX; 5714 val = dnvlist_get_string(lun->be_lun->options, 5715 "write_same_max_lba", NULL); 5716 if (val != NULL) 5717 ctl_expand_number(val, &ival); 5718 if ((lun->be_lun->maxlba + 1) - lba > ival) { 5719 ctl_set_invalid_field(ctsio, 5720 /*sks_valid*/ 1, /*command*/ 1, 5721 /*field*/ ctsio->cdb[0] == WRITE_SAME_10 ? 7 : 10, 5722 /*bit_valid*/ 0, /*bit*/ 0); 5723 ctl_done((union ctl_io *)ctsio); 5724 return (CTL_RETVAL_COMPLETE); 5725 } 5726 if ((lun->be_lun->maxlba + 1) - lba > UINT32_MAX) { 5727 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 5728 ctsio->io_cont = ctl_write_same_cont; 5729 num_blocks = 1 << 31; 5730 } else 5731 num_blocks = (lun->be_lun->maxlba + 1) - lba; 5732 } 5733 5734 len = lun->be_lun->blocksize; 5735 5736 /* 5737 * If we've got a kernel request that hasn't been malloced yet, 5738 * malloc it and tell the caller the data buffer is here. 5739 */ 5740 if ((byte2 & SWS_NDOB) == 0 && 5741 (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5742 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 5743 ctsio->kern_data_len = len; 5744 ctsio->kern_total_len = len; 5745 ctsio->kern_rel_offset = 0; 5746 ctsio->kern_sg_entries = 0; 5747 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5748 ctsio->be_move_done = ctl_config_move_done; 5749 ctl_datamove((union ctl_io *)ctsio); 5750 5751 return (CTL_RETVAL_COMPLETE); 5752 } 5753 5754 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5755 lbalen->lba = lba; 5756 lbalen->len = num_blocks; 5757 lbalen->flags = byte2; 5758 retval = lun->backend->config_write((union ctl_io *)ctsio); 5759 5760 return (retval); 5761 } 5762 5763 int 5764 ctl_unmap(struct ctl_scsiio *ctsio) 5765 { 5766 struct ctl_lun *lun = CTL_LUN(ctsio); 5767 struct scsi_unmap *cdb; 5768 struct ctl_ptr_len_flags *ptrlen; 5769 struct scsi_unmap_header *hdr; 5770 struct scsi_unmap_desc *buf, *end, *endnz, *range; 5771 uint64_t lba; 5772 uint32_t num_blocks; 5773 int len, retval; 5774 uint8_t byte2; 5775 5776 CTL_DEBUG_PRINT(("ctl_unmap\n")); 5777 5778 cdb = (struct scsi_unmap *)ctsio->cdb; 5779 len = scsi_2btoul(cdb->length); 5780 byte2 = cdb->byte2; 5781 5782 /* 5783 * If we've got a kernel request that hasn't been malloced yet, 5784 * malloc it and tell the caller the data buffer is here. 5785 */ 5786 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5787 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 5788 ctsio->kern_data_len = len; 5789 ctsio->kern_total_len = len; 5790 ctsio->kern_rel_offset = 0; 5791 ctsio->kern_sg_entries = 0; 5792 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5793 ctsio->be_move_done = ctl_config_move_done; 5794 ctl_datamove((union ctl_io *)ctsio); 5795 5796 return (CTL_RETVAL_COMPLETE); 5797 } 5798 5799 len = ctsio->kern_total_len - ctsio->kern_data_resid; 5800 hdr = (struct scsi_unmap_header *)ctsio->kern_data_ptr; 5801 if (len < sizeof (*hdr) || 5802 len < (scsi_2btoul(hdr->length) + sizeof(hdr->length)) || 5803 len < (scsi_2btoul(hdr->desc_length) + sizeof (*hdr)) || 5804 scsi_2btoul(hdr->desc_length) % sizeof(*buf) != 0) { 5805 ctl_set_invalid_field(ctsio, 5806 /*sks_valid*/ 0, 5807 /*command*/ 0, 5808 /*field*/ 0, 5809 /*bit_valid*/ 0, 5810 /*bit*/ 0); 5811 goto done; 5812 } 5813 len = scsi_2btoul(hdr->desc_length); 5814 buf = (struct scsi_unmap_desc *)(hdr + 1); 5815 end = buf + len / sizeof(*buf); 5816 5817 endnz = buf; 5818 for (range = buf; range < end; range++) { 5819 lba = scsi_8btou64(range->lba); 5820 num_blocks = scsi_4btoul(range->length); 5821 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5822 || ((lba + num_blocks) < lba)) { 5823 ctl_set_lba_out_of_range(ctsio, 5824 MAX(lba, lun->be_lun->maxlba + 1)); 5825 ctl_done((union ctl_io *)ctsio); 5826 return (CTL_RETVAL_COMPLETE); 5827 } 5828 if (num_blocks != 0) 5829 endnz = range + 1; 5830 } 5831 5832 /* 5833 * Block backend can not handle zero last range. 5834 * Filter it out and return if there is nothing left. 5835 */ 5836 len = (uint8_t *)endnz - (uint8_t *)buf; 5837 if (len == 0) { 5838 ctl_set_success(ctsio); 5839 goto done; 5840 } 5841 5842 mtx_lock(&lun->lun_lock); 5843 ptrlen = (struct ctl_ptr_len_flags *) 5844 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5845 ptrlen->ptr = (void *)buf; 5846 ptrlen->len = len; 5847 ptrlen->flags = byte2; 5848 ctl_try_unblock_others(lun, (union ctl_io *)ctsio, FALSE); 5849 mtx_unlock(&lun->lun_lock); 5850 5851 retval = lun->backend->config_write((union ctl_io *)ctsio); 5852 return (retval); 5853 5854 done: 5855 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5856 free(ctsio->kern_data_ptr, M_CTL); 5857 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5858 } 5859 ctl_done((union ctl_io *)ctsio); 5860 return (CTL_RETVAL_COMPLETE); 5861 } 5862 5863 int 5864 ctl_default_page_handler(struct ctl_scsiio *ctsio, 5865 struct ctl_page_index *page_index, uint8_t *page_ptr) 5866 { 5867 struct ctl_lun *lun = CTL_LUN(ctsio); 5868 uint8_t *current_cp; 5869 int set_ua; 5870 uint32_t initidx; 5871 5872 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5873 set_ua = 0; 5874 5875 current_cp = (page_index->page_data + (page_index->page_len * 5876 CTL_PAGE_CURRENT)); 5877 5878 mtx_lock(&lun->lun_lock); 5879 if (memcmp(current_cp, page_ptr, page_index->page_len)) { 5880 memcpy(current_cp, page_ptr, page_index->page_len); 5881 set_ua = 1; 5882 } 5883 if (set_ua != 0) 5884 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 5885 mtx_unlock(&lun->lun_lock); 5886 if (set_ua) { 5887 ctl_isc_announce_mode(lun, 5888 ctl_get_initindex(&ctsio->io_hdr.nexus), 5889 page_index->page_code, page_index->subpage); 5890 } 5891 return (CTL_RETVAL_COMPLETE); 5892 } 5893 5894 static void 5895 ctl_ie_timer(void *arg) 5896 { 5897 struct ctl_lun *lun = arg; 5898 uint64_t t; 5899 5900 if (lun->ie_asc == 0) 5901 return; 5902 5903 if (lun->MODE_IE.mrie == SIEP_MRIE_UA) 5904 ctl_est_ua_all(lun, -1, CTL_UA_IE); 5905 else 5906 lun->ie_reported = 0; 5907 5908 if (lun->ie_reportcnt < scsi_4btoul(lun->MODE_IE.report_count)) { 5909 lun->ie_reportcnt++; 5910 t = scsi_4btoul(lun->MODE_IE.interval_timer); 5911 if (t == 0 || t == UINT32_MAX) 5912 t = 3000; /* 5 min */ 5913 callout_schedule(&lun->ie_callout, t * hz / 10); 5914 } 5915 } 5916 5917 int 5918 ctl_ie_page_handler(struct ctl_scsiio *ctsio, 5919 struct ctl_page_index *page_index, uint8_t *page_ptr) 5920 { 5921 struct ctl_lun *lun = CTL_LUN(ctsio); 5922 struct scsi_info_exceptions_page *pg; 5923 uint64_t t; 5924 5925 (void)ctl_default_page_handler(ctsio, page_index, page_ptr); 5926 5927 pg = (struct scsi_info_exceptions_page *)page_ptr; 5928 mtx_lock(&lun->lun_lock); 5929 if (pg->info_flags & SIEP_FLAGS_TEST) { 5930 lun->ie_asc = 0x5d; 5931 lun->ie_ascq = 0xff; 5932 if (pg->mrie == SIEP_MRIE_UA) { 5933 ctl_est_ua_all(lun, -1, CTL_UA_IE); 5934 lun->ie_reported = 1; 5935 } else { 5936 ctl_clr_ua_all(lun, -1, CTL_UA_IE); 5937 lun->ie_reported = -1; 5938 } 5939 lun->ie_reportcnt = 1; 5940 if (lun->ie_reportcnt < scsi_4btoul(pg->report_count)) { 5941 lun->ie_reportcnt++; 5942 t = scsi_4btoul(pg->interval_timer); 5943 if (t == 0 || t == UINT32_MAX) 5944 t = 3000; /* 5 min */ 5945 callout_reset(&lun->ie_callout, t * hz / 10, 5946 ctl_ie_timer, lun); 5947 } 5948 } else { 5949 lun->ie_asc = 0; 5950 lun->ie_ascq = 0; 5951 lun->ie_reported = 1; 5952 ctl_clr_ua_all(lun, -1, CTL_UA_IE); 5953 lun->ie_reportcnt = UINT32_MAX; 5954 callout_stop(&lun->ie_callout); 5955 } 5956 mtx_unlock(&lun->lun_lock); 5957 return (CTL_RETVAL_COMPLETE); 5958 } 5959 5960 static int 5961 ctl_do_mode_select(union ctl_io *io) 5962 { 5963 struct ctl_lun *lun = CTL_LUN(io); 5964 struct scsi_mode_page_header *page_header; 5965 struct ctl_page_index *page_index; 5966 struct ctl_scsiio *ctsio; 5967 int page_len, page_len_offset, page_len_size; 5968 union ctl_modepage_info *modepage_info; 5969 uint16_t *len_left, *len_used; 5970 int retval, i; 5971 5972 ctsio = &io->scsiio; 5973 page_index = NULL; 5974 page_len = 0; 5975 5976 modepage_info = (union ctl_modepage_info *) 5977 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 5978 len_left = &modepage_info->header.len_left; 5979 len_used = &modepage_info->header.len_used; 5980 5981 do_next_page: 5982 5983 page_header = (struct scsi_mode_page_header *) 5984 (ctsio->kern_data_ptr + *len_used); 5985 5986 if (*len_left == 0) { 5987 free(ctsio->kern_data_ptr, M_CTL); 5988 ctl_set_success(ctsio); 5989 ctl_done((union ctl_io *)ctsio); 5990 return (CTL_RETVAL_COMPLETE); 5991 } else if (*len_left < sizeof(struct scsi_mode_page_header)) { 5992 free(ctsio->kern_data_ptr, M_CTL); 5993 ctl_set_param_len_error(ctsio); 5994 ctl_done((union ctl_io *)ctsio); 5995 return (CTL_RETVAL_COMPLETE); 5996 5997 } else if ((page_header->page_code & SMPH_SPF) 5998 && (*len_left < sizeof(struct scsi_mode_page_header_sp))) { 5999 free(ctsio->kern_data_ptr, M_CTL); 6000 ctl_set_param_len_error(ctsio); 6001 ctl_done((union ctl_io *)ctsio); 6002 return (CTL_RETVAL_COMPLETE); 6003 } 6004 6005 /* 6006 * XXX KDM should we do something with the block descriptor? 6007 */ 6008 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6009 page_index = &lun->mode_pages.index[i]; 6010 if (lun->be_lun->lun_type == T_DIRECT && 6011 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6012 continue; 6013 if (lun->be_lun->lun_type == T_PROCESSOR && 6014 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6015 continue; 6016 if (lun->be_lun->lun_type == T_CDROM && 6017 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6018 continue; 6019 6020 if ((page_index->page_code & SMPH_PC_MASK) != 6021 (page_header->page_code & SMPH_PC_MASK)) 6022 continue; 6023 6024 /* 6025 * If neither page has a subpage code, then we've got a 6026 * match. 6027 */ 6028 if (((page_index->page_code & SMPH_SPF) == 0) 6029 && ((page_header->page_code & SMPH_SPF) == 0)) { 6030 page_len = page_header->page_length; 6031 break; 6032 } 6033 6034 /* 6035 * If both pages have subpages, then the subpage numbers 6036 * have to match. 6037 */ 6038 if ((page_index->page_code & SMPH_SPF) 6039 && (page_header->page_code & SMPH_SPF)) { 6040 struct scsi_mode_page_header_sp *sph; 6041 6042 sph = (struct scsi_mode_page_header_sp *)page_header; 6043 if (page_index->subpage == sph->subpage) { 6044 page_len = scsi_2btoul(sph->page_length); 6045 break; 6046 } 6047 } 6048 } 6049 6050 /* 6051 * If we couldn't find the page, or if we don't have a mode select 6052 * handler for it, send back an error to the user. 6053 */ 6054 if ((i >= CTL_NUM_MODE_PAGES) 6055 || (page_index->select_handler == NULL)) { 6056 ctl_set_invalid_field(ctsio, 6057 /*sks_valid*/ 1, 6058 /*command*/ 0, 6059 /*field*/ *len_used, 6060 /*bit_valid*/ 0, 6061 /*bit*/ 0); 6062 free(ctsio->kern_data_ptr, M_CTL); 6063 ctl_done((union ctl_io *)ctsio); 6064 return (CTL_RETVAL_COMPLETE); 6065 } 6066 6067 if (page_index->page_code & SMPH_SPF) { 6068 page_len_offset = 2; 6069 page_len_size = 2; 6070 } else { 6071 page_len_size = 1; 6072 page_len_offset = 1; 6073 } 6074 6075 /* 6076 * If the length the initiator gives us isn't the one we specify in 6077 * the mode page header, or if they didn't specify enough data in 6078 * the CDB to avoid truncating this page, kick out the request. 6079 */ 6080 if (page_len != page_index->page_len - page_len_offset - page_len_size) { 6081 ctl_set_invalid_field(ctsio, 6082 /*sks_valid*/ 1, 6083 /*command*/ 0, 6084 /*field*/ *len_used + page_len_offset, 6085 /*bit_valid*/ 0, 6086 /*bit*/ 0); 6087 free(ctsio->kern_data_ptr, M_CTL); 6088 ctl_done((union ctl_io *)ctsio); 6089 return (CTL_RETVAL_COMPLETE); 6090 } 6091 if (*len_left < page_index->page_len) { 6092 free(ctsio->kern_data_ptr, M_CTL); 6093 ctl_set_param_len_error(ctsio); 6094 ctl_done((union ctl_io *)ctsio); 6095 return (CTL_RETVAL_COMPLETE); 6096 } 6097 6098 /* 6099 * Run through the mode page, checking to make sure that the bits 6100 * the user changed are actually legal for him to change. 6101 */ 6102 for (i = 0; i < page_index->page_len; i++) { 6103 uint8_t *user_byte, *change_mask, *current_byte; 6104 int bad_bit; 6105 int j; 6106 6107 user_byte = (uint8_t *)page_header + i; 6108 change_mask = page_index->page_data + 6109 (page_index->page_len * CTL_PAGE_CHANGEABLE) + i; 6110 current_byte = page_index->page_data + 6111 (page_index->page_len * CTL_PAGE_CURRENT) + i; 6112 6113 /* 6114 * Check to see whether the user set any bits in this byte 6115 * that he is not allowed to set. 6116 */ 6117 if ((*user_byte & ~(*change_mask)) == 6118 (*current_byte & ~(*change_mask))) 6119 continue; 6120 6121 /* 6122 * Go through bit by bit to determine which one is illegal. 6123 */ 6124 bad_bit = 0; 6125 for (j = 7; j >= 0; j--) { 6126 if ((((1 << i) & ~(*change_mask)) & *user_byte) != 6127 (((1 << i) & ~(*change_mask)) & *current_byte)) { 6128 bad_bit = i; 6129 break; 6130 } 6131 } 6132 ctl_set_invalid_field(ctsio, 6133 /*sks_valid*/ 1, 6134 /*command*/ 0, 6135 /*field*/ *len_used + i, 6136 /*bit_valid*/ 1, 6137 /*bit*/ bad_bit); 6138 free(ctsio->kern_data_ptr, M_CTL); 6139 ctl_done((union ctl_io *)ctsio); 6140 return (CTL_RETVAL_COMPLETE); 6141 } 6142 6143 /* 6144 * Decrement these before we call the page handler, since we may 6145 * end up getting called back one way or another before the handler 6146 * returns to this context. 6147 */ 6148 *len_left -= page_index->page_len; 6149 *len_used += page_index->page_len; 6150 6151 retval = page_index->select_handler(ctsio, page_index, 6152 (uint8_t *)page_header); 6153 6154 /* 6155 * If the page handler returns CTL_RETVAL_QUEUED, then we need to 6156 * wait until this queued command completes to finish processing 6157 * the mode page. If it returns anything other than 6158 * CTL_RETVAL_COMPLETE (e.g. CTL_RETVAL_ERROR), then it should have 6159 * already set the sense information, freed the data pointer, and 6160 * completed the io for us. 6161 */ 6162 if (retval != CTL_RETVAL_COMPLETE) 6163 goto bailout_no_done; 6164 6165 /* 6166 * If the initiator sent us more than one page, parse the next one. 6167 */ 6168 if (*len_left > 0) 6169 goto do_next_page; 6170 6171 ctl_set_success(ctsio); 6172 free(ctsio->kern_data_ptr, M_CTL); 6173 ctl_done((union ctl_io *)ctsio); 6174 6175 bailout_no_done: 6176 6177 return (CTL_RETVAL_COMPLETE); 6178 6179 } 6180 6181 int 6182 ctl_mode_select(struct ctl_scsiio *ctsio) 6183 { 6184 struct ctl_lun *lun = CTL_LUN(ctsio); 6185 union ctl_modepage_info *modepage_info; 6186 int bd_len, i, header_size, param_len, rtd; 6187 uint32_t initidx; 6188 6189 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 6190 switch (ctsio->cdb[0]) { 6191 case MODE_SELECT_6: { 6192 struct scsi_mode_select_6 *cdb; 6193 6194 cdb = (struct scsi_mode_select_6 *)ctsio->cdb; 6195 6196 rtd = (cdb->byte2 & SMS_RTD) ? 1 : 0; 6197 param_len = cdb->length; 6198 header_size = sizeof(struct scsi_mode_header_6); 6199 break; 6200 } 6201 case MODE_SELECT_10: { 6202 struct scsi_mode_select_10 *cdb; 6203 6204 cdb = (struct scsi_mode_select_10 *)ctsio->cdb; 6205 6206 rtd = (cdb->byte2 & SMS_RTD) ? 1 : 0; 6207 param_len = scsi_2btoul(cdb->length); 6208 header_size = sizeof(struct scsi_mode_header_10); 6209 break; 6210 } 6211 default: 6212 ctl_set_invalid_opcode(ctsio); 6213 ctl_done((union ctl_io *)ctsio); 6214 return (CTL_RETVAL_COMPLETE); 6215 } 6216 6217 if (rtd) { 6218 if (param_len != 0) { 6219 ctl_set_invalid_field(ctsio, /*sks_valid*/ 0, 6220 /*command*/ 1, /*field*/ 0, 6221 /*bit_valid*/ 0, /*bit*/ 0); 6222 ctl_done((union ctl_io *)ctsio); 6223 return (CTL_RETVAL_COMPLETE); 6224 } 6225 6226 /* Revert to defaults. */ 6227 ctl_init_page_index(lun); 6228 mtx_lock(&lun->lun_lock); 6229 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 6230 mtx_unlock(&lun->lun_lock); 6231 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6232 ctl_isc_announce_mode(lun, -1, 6233 lun->mode_pages.index[i].page_code & SMPH_PC_MASK, 6234 lun->mode_pages.index[i].subpage); 6235 } 6236 ctl_set_success(ctsio); 6237 ctl_done((union ctl_io *)ctsio); 6238 return (CTL_RETVAL_COMPLETE); 6239 } 6240 6241 /* 6242 * From SPC-3: 6243 * "A parameter list length of zero indicates that the Data-Out Buffer 6244 * shall be empty. This condition shall not be considered as an error." 6245 */ 6246 if (param_len == 0) { 6247 ctl_set_success(ctsio); 6248 ctl_done((union ctl_io *)ctsio); 6249 return (CTL_RETVAL_COMPLETE); 6250 } 6251 6252 /* 6253 * Since we'll hit this the first time through, prior to 6254 * allocation, we don't need to free a data buffer here. 6255 */ 6256 if (param_len < header_size) { 6257 ctl_set_param_len_error(ctsio); 6258 ctl_done((union ctl_io *)ctsio); 6259 return (CTL_RETVAL_COMPLETE); 6260 } 6261 6262 /* 6263 * Allocate the data buffer and grab the user's data. In theory, 6264 * we shouldn't have to sanity check the parameter list length here 6265 * because the maximum size is 64K. We should be able to malloc 6266 * that much without too many problems. 6267 */ 6268 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 6269 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 6270 ctsio->kern_data_len = param_len; 6271 ctsio->kern_total_len = param_len; 6272 ctsio->kern_rel_offset = 0; 6273 ctsio->kern_sg_entries = 0; 6274 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6275 ctsio->be_move_done = ctl_config_move_done; 6276 ctl_datamove((union ctl_io *)ctsio); 6277 6278 return (CTL_RETVAL_COMPLETE); 6279 } 6280 6281 switch (ctsio->cdb[0]) { 6282 case MODE_SELECT_6: { 6283 struct scsi_mode_header_6 *mh6; 6284 6285 mh6 = (struct scsi_mode_header_6 *)ctsio->kern_data_ptr; 6286 bd_len = mh6->blk_desc_len; 6287 break; 6288 } 6289 case MODE_SELECT_10: { 6290 struct scsi_mode_header_10 *mh10; 6291 6292 mh10 = (struct scsi_mode_header_10 *)ctsio->kern_data_ptr; 6293 bd_len = scsi_2btoul(mh10->blk_desc_len); 6294 break; 6295 } 6296 default: 6297 panic("%s: Invalid CDB type %#x", __func__, ctsio->cdb[0]); 6298 } 6299 6300 if (param_len < (header_size + bd_len)) { 6301 free(ctsio->kern_data_ptr, M_CTL); 6302 ctl_set_param_len_error(ctsio); 6303 ctl_done((union ctl_io *)ctsio); 6304 return (CTL_RETVAL_COMPLETE); 6305 } 6306 6307 /* 6308 * Set the IO_CONT flag, so that if this I/O gets passed to 6309 * ctl_config_write_done(), it'll get passed back to 6310 * ctl_do_mode_select() for further processing, or completion if 6311 * we're all done. 6312 */ 6313 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 6314 ctsio->io_cont = ctl_do_mode_select; 6315 6316 modepage_info = (union ctl_modepage_info *) 6317 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6318 memset(modepage_info, 0, sizeof(*modepage_info)); 6319 modepage_info->header.len_left = param_len - header_size - bd_len; 6320 modepage_info->header.len_used = header_size + bd_len; 6321 6322 return (ctl_do_mode_select((union ctl_io *)ctsio)); 6323 } 6324 6325 int 6326 ctl_mode_sense(struct ctl_scsiio *ctsio) 6327 { 6328 struct ctl_lun *lun = CTL_LUN(ctsio); 6329 int pc, page_code, llba, subpage; 6330 int alloc_len, page_len, header_len, bd_len, total_len; 6331 void *block_desc; 6332 struct ctl_page_index *page_index; 6333 6334 llba = 0; 6335 6336 CTL_DEBUG_PRINT(("ctl_mode_sense\n")); 6337 6338 switch (ctsio->cdb[0]) { 6339 case MODE_SENSE_6: { 6340 struct scsi_mode_sense_6 *cdb; 6341 6342 cdb = (struct scsi_mode_sense_6 *)ctsio->cdb; 6343 6344 header_len = sizeof(struct scsi_mode_hdr_6); 6345 if (cdb->byte2 & SMS_DBD) 6346 bd_len = 0; 6347 else 6348 bd_len = sizeof(struct scsi_mode_block_descr); 6349 header_len += bd_len; 6350 6351 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6352 page_code = cdb->page & SMS_PAGE_CODE; 6353 subpage = cdb->subpage; 6354 alloc_len = cdb->length; 6355 break; 6356 } 6357 case MODE_SENSE_10: { 6358 struct scsi_mode_sense_10 *cdb; 6359 6360 cdb = (struct scsi_mode_sense_10 *)ctsio->cdb; 6361 6362 header_len = sizeof(struct scsi_mode_hdr_10); 6363 if (cdb->byte2 & SMS_DBD) { 6364 bd_len = 0; 6365 } else if (lun->be_lun->lun_type == T_DIRECT) { 6366 if (cdb->byte2 & SMS10_LLBAA) { 6367 llba = 1; 6368 bd_len = sizeof(struct scsi_mode_block_descr_dlong); 6369 } else 6370 bd_len = sizeof(struct scsi_mode_block_descr_dshort); 6371 } else 6372 bd_len = sizeof(struct scsi_mode_block_descr); 6373 header_len += bd_len; 6374 6375 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6376 page_code = cdb->page & SMS_PAGE_CODE; 6377 subpage = cdb->subpage; 6378 alloc_len = scsi_2btoul(cdb->length); 6379 break; 6380 } 6381 default: 6382 ctl_set_invalid_opcode(ctsio); 6383 ctl_done((union ctl_io *)ctsio); 6384 return (CTL_RETVAL_COMPLETE); 6385 break; /* NOTREACHED */ 6386 } 6387 6388 /* 6389 * We have to make a first pass through to calculate the size of 6390 * the pages that match the user's query. Then we allocate enough 6391 * memory to hold it, and actually copy the data into the buffer. 6392 */ 6393 switch (page_code) { 6394 case SMS_ALL_PAGES_PAGE: { 6395 u_int i; 6396 6397 page_len = 0; 6398 6399 /* 6400 * At the moment, values other than 0 and 0xff here are 6401 * reserved according to SPC-3. 6402 */ 6403 if ((subpage != SMS_SUBPAGE_PAGE_0) 6404 && (subpage != SMS_SUBPAGE_ALL)) { 6405 ctl_set_invalid_field(ctsio, 6406 /*sks_valid*/ 1, 6407 /*command*/ 1, 6408 /*field*/ 3, 6409 /*bit_valid*/ 0, 6410 /*bit*/ 0); 6411 ctl_done((union ctl_io *)ctsio); 6412 return (CTL_RETVAL_COMPLETE); 6413 } 6414 6415 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6416 page_index = &lun->mode_pages.index[i]; 6417 6418 /* Make sure the page is supported for this dev type */ 6419 if (lun->be_lun->lun_type == T_DIRECT && 6420 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6421 continue; 6422 if (lun->be_lun->lun_type == T_PROCESSOR && 6423 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6424 continue; 6425 if (lun->be_lun->lun_type == T_CDROM && 6426 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6427 continue; 6428 6429 /* 6430 * We don't use this subpage if the user didn't 6431 * request all subpages. 6432 */ 6433 if ((page_index->subpage != 0) 6434 && (subpage == SMS_SUBPAGE_PAGE_0)) 6435 continue; 6436 6437 page_len += page_index->page_len; 6438 } 6439 break; 6440 } 6441 default: { 6442 u_int i; 6443 6444 page_len = 0; 6445 6446 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6447 page_index = &lun->mode_pages.index[i]; 6448 6449 /* Make sure the page is supported for this dev type */ 6450 if (lun->be_lun->lun_type == T_DIRECT && 6451 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6452 continue; 6453 if (lun->be_lun->lun_type == T_PROCESSOR && 6454 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6455 continue; 6456 if (lun->be_lun->lun_type == T_CDROM && 6457 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6458 continue; 6459 6460 /* Look for the right page code */ 6461 if ((page_index->page_code & SMPH_PC_MASK) != page_code) 6462 continue; 6463 6464 /* Look for the right subpage or the subpage wildcard*/ 6465 if ((page_index->subpage != subpage) 6466 && (subpage != SMS_SUBPAGE_ALL)) 6467 continue; 6468 6469 page_len += page_index->page_len; 6470 } 6471 6472 if (page_len == 0) { 6473 ctl_set_invalid_field(ctsio, 6474 /*sks_valid*/ 1, 6475 /*command*/ 1, 6476 /*field*/ 2, 6477 /*bit_valid*/ 1, 6478 /*bit*/ 5); 6479 ctl_done((union ctl_io *)ctsio); 6480 return (CTL_RETVAL_COMPLETE); 6481 } 6482 break; 6483 } 6484 } 6485 6486 total_len = header_len + page_len; 6487 6488 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6489 ctsio->kern_sg_entries = 0; 6490 ctsio->kern_rel_offset = 0; 6491 ctsio->kern_data_len = min(total_len, alloc_len); 6492 ctsio->kern_total_len = ctsio->kern_data_len; 6493 6494 switch (ctsio->cdb[0]) { 6495 case MODE_SENSE_6: { 6496 struct scsi_mode_hdr_6 *header; 6497 6498 header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr; 6499 6500 header->datalen = MIN(total_len - 1, 254); 6501 if (lun->be_lun->lun_type == T_DIRECT) { 6502 header->dev_specific = 0x10; /* DPOFUA */ 6503 if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || 6504 (lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) 6505 header->dev_specific |= 0x80; /* WP */ 6506 } 6507 header->block_descr_len = bd_len; 6508 block_desc = &header[1]; 6509 break; 6510 } 6511 case MODE_SENSE_10: { 6512 struct scsi_mode_hdr_10 *header; 6513 int datalen; 6514 6515 header = (struct scsi_mode_hdr_10 *)ctsio->kern_data_ptr; 6516 6517 datalen = MIN(total_len - 2, 65533); 6518 scsi_ulto2b(datalen, header->datalen); 6519 if (lun->be_lun->lun_type == T_DIRECT) { 6520 header->dev_specific = 0x10; /* DPOFUA */ 6521 if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || 6522 (lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) 6523 header->dev_specific |= 0x80; /* WP */ 6524 } 6525 if (llba) 6526 header->flags |= SMH_LONGLBA; 6527 scsi_ulto2b(bd_len, header->block_descr_len); 6528 block_desc = &header[1]; 6529 break; 6530 } 6531 default: 6532 panic("%s: Invalid CDB type %#x", __func__, ctsio->cdb[0]); 6533 } 6534 6535 /* 6536 * If we've got a disk, use its blocksize in the block 6537 * descriptor. Otherwise, just set it to 0. 6538 */ 6539 if (bd_len > 0) { 6540 if (lun->be_lun->lun_type == T_DIRECT) { 6541 if (llba) { 6542 struct scsi_mode_block_descr_dlong *bd = block_desc; 6543 if (lun->be_lun->maxlba != 0) 6544 scsi_u64to8b(lun->be_lun->maxlba + 1, 6545 bd->num_blocks); 6546 scsi_ulto4b(lun->be_lun->blocksize, 6547 bd->block_len); 6548 } else { 6549 struct scsi_mode_block_descr_dshort *bd = block_desc; 6550 if (lun->be_lun->maxlba != 0) 6551 scsi_ulto4b(MIN(lun->be_lun->maxlba+1, 6552 UINT32_MAX), bd->num_blocks); 6553 scsi_ulto3b(lun->be_lun->blocksize, 6554 bd->block_len); 6555 } 6556 } else { 6557 struct scsi_mode_block_descr *bd = block_desc; 6558 scsi_ulto3b(0, bd->block_len); 6559 } 6560 } 6561 6562 switch (page_code) { 6563 case SMS_ALL_PAGES_PAGE: { 6564 int i, data_used; 6565 6566 data_used = header_len; 6567 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6568 struct ctl_page_index *page_index; 6569 6570 page_index = &lun->mode_pages.index[i]; 6571 if (lun->be_lun->lun_type == T_DIRECT && 6572 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6573 continue; 6574 if (lun->be_lun->lun_type == T_PROCESSOR && 6575 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6576 continue; 6577 if (lun->be_lun->lun_type == T_CDROM && 6578 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6579 continue; 6580 6581 /* 6582 * We don't use this subpage if the user didn't 6583 * request all subpages. We already checked (above) 6584 * to make sure the user only specified a subpage 6585 * of 0 or 0xff in the SMS_ALL_PAGES_PAGE case. 6586 */ 6587 if ((page_index->subpage != 0) 6588 && (subpage == SMS_SUBPAGE_PAGE_0)) 6589 continue; 6590 6591 /* 6592 * Call the handler, if it exists, to update the 6593 * page to the latest values. 6594 */ 6595 if (page_index->sense_handler != NULL) 6596 page_index->sense_handler(ctsio, page_index,pc); 6597 6598 memcpy(ctsio->kern_data_ptr + data_used, 6599 page_index->page_data + 6600 (page_index->page_len * pc), 6601 page_index->page_len); 6602 data_used += page_index->page_len; 6603 } 6604 break; 6605 } 6606 default: { 6607 int i, data_used; 6608 6609 data_used = header_len; 6610 6611 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6612 struct ctl_page_index *page_index; 6613 6614 page_index = &lun->mode_pages.index[i]; 6615 6616 /* Look for the right page code */ 6617 if ((page_index->page_code & SMPH_PC_MASK) != page_code) 6618 continue; 6619 6620 /* Look for the right subpage or the subpage wildcard*/ 6621 if ((page_index->subpage != subpage) 6622 && (subpage != SMS_SUBPAGE_ALL)) 6623 continue; 6624 6625 /* Make sure the page is supported for this dev type */ 6626 if (lun->be_lun->lun_type == T_DIRECT && 6627 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6628 continue; 6629 if (lun->be_lun->lun_type == T_PROCESSOR && 6630 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6631 continue; 6632 if (lun->be_lun->lun_type == T_CDROM && 6633 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6634 continue; 6635 6636 /* 6637 * Call the handler, if it exists, to update the 6638 * page to the latest values. 6639 */ 6640 if (page_index->sense_handler != NULL) 6641 page_index->sense_handler(ctsio, page_index,pc); 6642 6643 memcpy(ctsio->kern_data_ptr + data_used, 6644 page_index->page_data + 6645 (page_index->page_len * pc), 6646 page_index->page_len); 6647 data_used += page_index->page_len; 6648 } 6649 break; 6650 } 6651 } 6652 6653 ctl_set_success(ctsio); 6654 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6655 ctsio->be_move_done = ctl_config_move_done; 6656 ctl_datamove((union ctl_io *)ctsio); 6657 return (CTL_RETVAL_COMPLETE); 6658 } 6659 6660 int 6661 ctl_temp_log_sense_handler(struct ctl_scsiio *ctsio, 6662 struct ctl_page_index *page_index, 6663 int pc) 6664 { 6665 struct ctl_lun *lun = CTL_LUN(ctsio); 6666 struct scsi_log_temperature *data; 6667 const char *value; 6668 6669 data = (struct scsi_log_temperature *)page_index->page_data; 6670 6671 scsi_ulto2b(SLP_TEMPERATURE, data->hdr.param_code); 6672 data->hdr.param_control = SLP_LBIN; 6673 data->hdr.param_len = sizeof(struct scsi_log_temperature) - 6674 sizeof(struct scsi_log_param_header); 6675 if ((value = dnvlist_get_string(lun->be_lun->options, "temperature", 6676 NULL)) != NULL) 6677 data->temperature = strtol(value, NULL, 0); 6678 else 6679 data->temperature = 0xff; 6680 data++; 6681 6682 scsi_ulto2b(SLP_REFTEMPERATURE, data->hdr.param_code); 6683 data->hdr.param_control = SLP_LBIN; 6684 data->hdr.param_len = sizeof(struct scsi_log_temperature) - 6685 sizeof(struct scsi_log_param_header); 6686 if ((value = dnvlist_get_string(lun->be_lun->options, "reftemperature", 6687 NULL)) != NULL) 6688 data->temperature = strtol(value, NULL, 0); 6689 else 6690 data->temperature = 0xff; 6691 return (0); 6692 } 6693 6694 int 6695 ctl_lbp_log_sense_handler(struct ctl_scsiio *ctsio, 6696 struct ctl_page_index *page_index, 6697 int pc) 6698 { 6699 struct ctl_lun *lun = CTL_LUN(ctsio); 6700 struct scsi_log_param_header *phdr; 6701 uint8_t *data; 6702 uint64_t val; 6703 6704 data = page_index->page_data; 6705 6706 if (lun->backend->lun_attr != NULL && 6707 (val = lun->backend->lun_attr(lun->be_lun, "blocksavail")) 6708 != UINT64_MAX) { 6709 phdr = (struct scsi_log_param_header *)data; 6710 scsi_ulto2b(0x0001, phdr->param_code); 6711 phdr->param_control = SLP_LBIN | SLP_LP; 6712 phdr->param_len = 8; 6713 data = (uint8_t *)(phdr + 1); 6714 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6715 data[4] = 0x02; /* per-pool */ 6716 data += phdr->param_len; 6717 } 6718 6719 if (lun->backend->lun_attr != NULL && 6720 (val = lun->backend->lun_attr(lun->be_lun, "blocksused")) 6721 != UINT64_MAX) { 6722 phdr = (struct scsi_log_param_header *)data; 6723 scsi_ulto2b(0x0002, phdr->param_code); 6724 phdr->param_control = SLP_LBIN | SLP_LP; 6725 phdr->param_len = 8; 6726 data = (uint8_t *)(phdr + 1); 6727 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6728 data[4] = 0x01; /* per-LUN */ 6729 data += phdr->param_len; 6730 } 6731 6732 if (lun->backend->lun_attr != NULL && 6733 (val = lun->backend->lun_attr(lun->be_lun, "poolblocksavail")) 6734 != UINT64_MAX) { 6735 phdr = (struct scsi_log_param_header *)data; 6736 scsi_ulto2b(0x00f1, phdr->param_code); 6737 phdr->param_control = SLP_LBIN | SLP_LP; 6738 phdr->param_len = 8; 6739 data = (uint8_t *)(phdr + 1); 6740 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6741 data[4] = 0x02; /* per-pool */ 6742 data += phdr->param_len; 6743 } 6744 6745 if (lun->backend->lun_attr != NULL && 6746 (val = lun->backend->lun_attr(lun->be_lun, "poolblocksused")) 6747 != UINT64_MAX) { 6748 phdr = (struct scsi_log_param_header *)data; 6749 scsi_ulto2b(0x00f2, phdr->param_code); 6750 phdr->param_control = SLP_LBIN | SLP_LP; 6751 phdr->param_len = 8; 6752 data = (uint8_t *)(phdr + 1); 6753 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6754 data[4] = 0x02; /* per-pool */ 6755 data += phdr->param_len; 6756 } 6757 6758 page_index->page_len = data - page_index->page_data; 6759 return (0); 6760 } 6761 6762 int 6763 ctl_sap_log_sense_handler(struct ctl_scsiio *ctsio, 6764 struct ctl_page_index *page_index, 6765 int pc) 6766 { 6767 struct ctl_lun *lun = CTL_LUN(ctsio); 6768 struct stat_page *data; 6769 struct bintime *t; 6770 6771 data = (struct stat_page *)page_index->page_data; 6772 6773 scsi_ulto2b(SLP_SAP, data->sap.hdr.param_code); 6774 data->sap.hdr.param_control = SLP_LBIN; 6775 data->sap.hdr.param_len = sizeof(struct scsi_log_stat_and_perf) - 6776 sizeof(struct scsi_log_param_header); 6777 scsi_u64to8b(lun->stats.operations[CTL_STATS_READ], 6778 data->sap.read_num); 6779 scsi_u64to8b(lun->stats.operations[CTL_STATS_WRITE], 6780 data->sap.write_num); 6781 if (lun->be_lun->blocksize > 0) { 6782 scsi_u64to8b(lun->stats.bytes[CTL_STATS_WRITE] / 6783 lun->be_lun->blocksize, data->sap.recvieved_lba); 6784 scsi_u64to8b(lun->stats.bytes[CTL_STATS_READ] / 6785 lun->be_lun->blocksize, data->sap.transmitted_lba); 6786 } 6787 t = &lun->stats.time[CTL_STATS_READ]; 6788 scsi_u64to8b((uint64_t)t->sec * 1000 + t->frac / (UINT64_MAX / 1000), 6789 data->sap.read_int); 6790 t = &lun->stats.time[CTL_STATS_WRITE]; 6791 scsi_u64to8b((uint64_t)t->sec * 1000 + t->frac / (UINT64_MAX / 1000), 6792 data->sap.write_int); 6793 scsi_u64to8b(0, data->sap.weighted_num); 6794 scsi_u64to8b(0, data->sap.weighted_int); 6795 scsi_ulto2b(SLP_IT, data->it.hdr.param_code); 6796 data->it.hdr.param_control = SLP_LBIN; 6797 data->it.hdr.param_len = sizeof(struct scsi_log_idle_time) - 6798 sizeof(struct scsi_log_param_header); 6799 #ifdef CTL_TIME_IO 6800 scsi_u64to8b(lun->idle_time / SBT_1MS, data->it.idle_int); 6801 #endif 6802 scsi_ulto2b(SLP_TI, data->ti.hdr.param_code); 6803 data->it.hdr.param_control = SLP_LBIN; 6804 data->ti.hdr.param_len = sizeof(struct scsi_log_time_interval) - 6805 sizeof(struct scsi_log_param_header); 6806 scsi_ulto4b(3, data->ti.exponent); 6807 scsi_ulto4b(1, data->ti.integer); 6808 return (0); 6809 } 6810 6811 int 6812 ctl_ie_log_sense_handler(struct ctl_scsiio *ctsio, 6813 struct ctl_page_index *page_index, 6814 int pc) 6815 { 6816 struct ctl_lun *lun = CTL_LUN(ctsio); 6817 struct scsi_log_informational_exceptions *data; 6818 const char *value; 6819 6820 data = (struct scsi_log_informational_exceptions *)page_index->page_data; 6821 6822 scsi_ulto2b(SLP_IE_GEN, data->hdr.param_code); 6823 data->hdr.param_control = SLP_LBIN; 6824 data->hdr.param_len = sizeof(struct scsi_log_informational_exceptions) - 6825 sizeof(struct scsi_log_param_header); 6826 data->ie_asc = lun->ie_asc; 6827 data->ie_ascq = lun->ie_ascq; 6828 if ((value = dnvlist_get_string(lun->be_lun->options, "temperature", 6829 NULL)) != NULL) 6830 data->temperature = strtol(value, NULL, 0); 6831 else 6832 data->temperature = 0xff; 6833 return (0); 6834 } 6835 6836 int 6837 ctl_log_sense(struct ctl_scsiio *ctsio) 6838 { 6839 struct ctl_lun *lun = CTL_LUN(ctsio); 6840 int i, pc, page_code, subpage; 6841 int alloc_len, total_len; 6842 struct ctl_page_index *page_index; 6843 struct scsi_log_sense *cdb; 6844 struct scsi_log_header *header; 6845 6846 CTL_DEBUG_PRINT(("ctl_log_sense\n")); 6847 6848 cdb = (struct scsi_log_sense *)ctsio->cdb; 6849 pc = (cdb->page & SLS_PAGE_CTRL_MASK) >> 6; 6850 page_code = cdb->page & SLS_PAGE_CODE; 6851 subpage = cdb->subpage; 6852 alloc_len = scsi_2btoul(cdb->length); 6853 6854 page_index = NULL; 6855 for (i = 0; i < CTL_NUM_LOG_PAGES; i++) { 6856 page_index = &lun->log_pages.index[i]; 6857 6858 /* Look for the right page code */ 6859 if ((page_index->page_code & SL_PAGE_CODE) != page_code) 6860 continue; 6861 6862 /* Look for the right subpage or the subpage wildcard*/ 6863 if (page_index->subpage != subpage) 6864 continue; 6865 6866 break; 6867 } 6868 if (i >= CTL_NUM_LOG_PAGES) { 6869 ctl_set_invalid_field(ctsio, 6870 /*sks_valid*/ 1, 6871 /*command*/ 1, 6872 /*field*/ 2, 6873 /*bit_valid*/ 0, 6874 /*bit*/ 0); 6875 ctl_done((union ctl_io *)ctsio); 6876 return (CTL_RETVAL_COMPLETE); 6877 } 6878 6879 total_len = sizeof(struct scsi_log_header) + page_index->page_len; 6880 6881 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6882 ctsio->kern_sg_entries = 0; 6883 ctsio->kern_rel_offset = 0; 6884 ctsio->kern_data_len = min(total_len, alloc_len); 6885 ctsio->kern_total_len = ctsio->kern_data_len; 6886 6887 header = (struct scsi_log_header *)ctsio->kern_data_ptr; 6888 header->page = page_index->page_code; 6889 if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING) 6890 header->page |= SL_DS; 6891 if (page_index->subpage) { 6892 header->page |= SL_SPF; 6893 header->subpage = page_index->subpage; 6894 } 6895 scsi_ulto2b(page_index->page_len, header->datalen); 6896 6897 /* 6898 * Call the handler, if it exists, to update the 6899 * page to the latest values. 6900 */ 6901 if (page_index->sense_handler != NULL) 6902 page_index->sense_handler(ctsio, page_index, pc); 6903 6904 memcpy(header + 1, page_index->page_data, page_index->page_len); 6905 6906 ctl_set_success(ctsio); 6907 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6908 ctsio->be_move_done = ctl_config_move_done; 6909 ctl_datamove((union ctl_io *)ctsio); 6910 return (CTL_RETVAL_COMPLETE); 6911 } 6912 6913 int 6914 ctl_read_capacity(struct ctl_scsiio *ctsio) 6915 { 6916 struct ctl_lun *lun = CTL_LUN(ctsio); 6917 struct scsi_read_capacity *cdb; 6918 struct scsi_read_capacity_data *data; 6919 uint32_t lba; 6920 6921 CTL_DEBUG_PRINT(("ctl_read_capacity\n")); 6922 6923 cdb = (struct scsi_read_capacity *)ctsio->cdb; 6924 6925 lba = scsi_4btoul(cdb->addr); 6926 if (((cdb->pmi & SRC_PMI) == 0) 6927 && (lba != 0)) { 6928 ctl_set_invalid_field(/*ctsio*/ ctsio, 6929 /*sks_valid*/ 1, 6930 /*command*/ 1, 6931 /*field*/ 2, 6932 /*bit_valid*/ 0, 6933 /*bit*/ 0); 6934 ctl_done((union ctl_io *)ctsio); 6935 return (CTL_RETVAL_COMPLETE); 6936 } 6937 6938 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 6939 data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr; 6940 ctsio->kern_data_len = sizeof(*data); 6941 ctsio->kern_total_len = sizeof(*data); 6942 ctsio->kern_rel_offset = 0; 6943 ctsio->kern_sg_entries = 0; 6944 6945 /* 6946 * If the maximum LBA is greater than 0xfffffffe, the user must 6947 * issue a SERVICE ACTION IN (16) command, with the read capacity 6948 * serivce action set. 6949 */ 6950 if (lun->be_lun->maxlba > 0xfffffffe) 6951 scsi_ulto4b(0xffffffff, data->addr); 6952 else 6953 scsi_ulto4b(lun->be_lun->maxlba, data->addr); 6954 6955 /* 6956 * XXX KDM this may not be 512 bytes... 6957 */ 6958 scsi_ulto4b(lun->be_lun->blocksize, data->length); 6959 6960 ctl_set_success(ctsio); 6961 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6962 ctsio->be_move_done = ctl_config_move_done; 6963 ctl_datamove((union ctl_io *)ctsio); 6964 return (CTL_RETVAL_COMPLETE); 6965 } 6966 6967 int 6968 ctl_read_capacity_16(struct ctl_scsiio *ctsio) 6969 { 6970 struct ctl_lun *lun = CTL_LUN(ctsio); 6971 struct scsi_read_capacity_16 *cdb; 6972 struct scsi_read_capacity_data_long *data; 6973 uint64_t lba; 6974 uint32_t alloc_len; 6975 6976 CTL_DEBUG_PRINT(("ctl_read_capacity_16\n")); 6977 6978 cdb = (struct scsi_read_capacity_16 *)ctsio->cdb; 6979 6980 alloc_len = scsi_4btoul(cdb->alloc_len); 6981 lba = scsi_8btou64(cdb->addr); 6982 6983 if ((cdb->reladr & SRC16_PMI) 6984 && (lba != 0)) { 6985 ctl_set_invalid_field(/*ctsio*/ ctsio, 6986 /*sks_valid*/ 1, 6987 /*command*/ 1, 6988 /*field*/ 2, 6989 /*bit_valid*/ 0, 6990 /*bit*/ 0); 6991 ctl_done((union ctl_io *)ctsio); 6992 return (CTL_RETVAL_COMPLETE); 6993 } 6994 6995 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 6996 data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr; 6997 ctsio->kern_rel_offset = 0; 6998 ctsio->kern_sg_entries = 0; 6999 ctsio->kern_data_len = min(sizeof(*data), alloc_len); 7000 ctsio->kern_total_len = ctsio->kern_data_len; 7001 7002 scsi_u64to8b(lun->be_lun->maxlba, data->addr); 7003 /* XXX KDM this may not be 512 bytes... */ 7004 scsi_ulto4b(lun->be_lun->blocksize, data->length); 7005 data->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE; 7006 scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, data->lalba_lbp); 7007 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) 7008 data->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ; 7009 7010 ctl_set_success(ctsio); 7011 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7012 ctsio->be_move_done = ctl_config_move_done; 7013 ctl_datamove((union ctl_io *)ctsio); 7014 return (CTL_RETVAL_COMPLETE); 7015 } 7016 7017 int 7018 ctl_get_lba_status(struct ctl_scsiio *ctsio) 7019 { 7020 struct ctl_lun *lun = CTL_LUN(ctsio); 7021 struct scsi_get_lba_status *cdb; 7022 struct scsi_get_lba_status_data *data; 7023 struct ctl_lba_len_flags *lbalen; 7024 uint64_t lba; 7025 uint32_t alloc_len, total_len; 7026 int retval; 7027 7028 CTL_DEBUG_PRINT(("ctl_get_lba_status\n")); 7029 7030 cdb = (struct scsi_get_lba_status *)ctsio->cdb; 7031 lba = scsi_8btou64(cdb->addr); 7032 alloc_len = scsi_4btoul(cdb->alloc_len); 7033 7034 if (lba > lun->be_lun->maxlba) { 7035 ctl_set_lba_out_of_range(ctsio, lba); 7036 ctl_done((union ctl_io *)ctsio); 7037 return (CTL_RETVAL_COMPLETE); 7038 } 7039 7040 total_len = sizeof(*data) + sizeof(data->descr[0]); 7041 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7042 data = (struct scsi_get_lba_status_data *)ctsio->kern_data_ptr; 7043 ctsio->kern_rel_offset = 0; 7044 ctsio->kern_sg_entries = 0; 7045 ctsio->kern_data_len = min(total_len, alloc_len); 7046 ctsio->kern_total_len = ctsio->kern_data_len; 7047 7048 /* Fill dummy data in case backend can't tell anything. */ 7049 scsi_ulto4b(4 + sizeof(data->descr[0]), data->length); 7050 scsi_u64to8b(lba, data->descr[0].addr); 7051 scsi_ulto4b(MIN(UINT32_MAX, lun->be_lun->maxlba + 1 - lba), 7052 data->descr[0].length); 7053 data->descr[0].status = 0; /* Mapped or unknown. */ 7054 7055 ctl_set_success(ctsio); 7056 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7057 ctsio->be_move_done = ctl_config_move_done; 7058 7059 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 7060 lbalen->lba = lba; 7061 lbalen->len = total_len; 7062 lbalen->flags = 0; 7063 retval = lun->backend->config_read((union ctl_io *)ctsio); 7064 return (retval); 7065 } 7066 7067 int 7068 ctl_read_defect(struct ctl_scsiio *ctsio) 7069 { 7070 struct scsi_read_defect_data_10 *ccb10; 7071 struct scsi_read_defect_data_12 *ccb12; 7072 struct scsi_read_defect_data_hdr_10 *data10; 7073 struct scsi_read_defect_data_hdr_12 *data12; 7074 uint32_t alloc_len, data_len; 7075 uint8_t format; 7076 7077 CTL_DEBUG_PRINT(("ctl_read_defect\n")); 7078 7079 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 7080 ccb10 = (struct scsi_read_defect_data_10 *)&ctsio->cdb; 7081 format = ccb10->format; 7082 alloc_len = scsi_2btoul(ccb10->alloc_length); 7083 data_len = sizeof(*data10); 7084 } else { 7085 ccb12 = (struct scsi_read_defect_data_12 *)&ctsio->cdb; 7086 format = ccb12->format; 7087 alloc_len = scsi_4btoul(ccb12->alloc_length); 7088 data_len = sizeof(*data12); 7089 } 7090 if (alloc_len == 0) { 7091 ctl_set_success(ctsio); 7092 ctl_done((union ctl_io *)ctsio); 7093 return (CTL_RETVAL_COMPLETE); 7094 } 7095 7096 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 7097 ctsio->kern_rel_offset = 0; 7098 ctsio->kern_sg_entries = 0; 7099 ctsio->kern_data_len = min(data_len, alloc_len); 7100 ctsio->kern_total_len = ctsio->kern_data_len; 7101 7102 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 7103 data10 = (struct scsi_read_defect_data_hdr_10 *) 7104 ctsio->kern_data_ptr; 7105 data10->format = format; 7106 scsi_ulto2b(0, data10->length); 7107 } else { 7108 data12 = (struct scsi_read_defect_data_hdr_12 *) 7109 ctsio->kern_data_ptr; 7110 data12->format = format; 7111 scsi_ulto2b(0, data12->generation); 7112 scsi_ulto4b(0, data12->length); 7113 } 7114 7115 ctl_set_success(ctsio); 7116 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7117 ctsio->be_move_done = ctl_config_move_done; 7118 ctl_datamove((union ctl_io *)ctsio); 7119 return (CTL_RETVAL_COMPLETE); 7120 } 7121 7122 int 7123 ctl_report_ident_info(struct ctl_scsiio *ctsio) 7124 { 7125 struct ctl_lun *lun = CTL_LUN(ctsio); 7126 struct scsi_report_ident_info *cdb; 7127 struct scsi_report_ident_info_data *rii_ptr; 7128 struct scsi_report_ident_info_descr *riid_ptr; 7129 const char *oii, *otii; 7130 int retval, alloc_len, total_len = 0, len = 0; 7131 7132 CTL_DEBUG_PRINT(("ctl_report_ident_info\n")); 7133 7134 cdb = (struct scsi_report_ident_info *)ctsio->cdb; 7135 retval = CTL_RETVAL_COMPLETE; 7136 7137 total_len = sizeof(struct scsi_report_ident_info_data); 7138 switch (cdb->type) { 7139 case RII_LUII: 7140 oii = dnvlist_get_string(lun->be_lun->options, 7141 "ident_info", NULL); 7142 if (oii) 7143 len = strlen(oii); /* Approximately */ 7144 break; 7145 case RII_LUTII: 7146 otii = dnvlist_get_string(lun->be_lun->options, 7147 "text_ident_info", NULL); 7148 if (otii) 7149 len = strlen(otii) + 1; /* NULL-terminated */ 7150 break; 7151 case RII_IIS: 7152 len = 2 * sizeof(struct scsi_report_ident_info_descr); 7153 break; 7154 default: 7155 ctl_set_invalid_field(/*ctsio*/ ctsio, 7156 /*sks_valid*/ 1, 7157 /*command*/ 1, 7158 /*field*/ 11, 7159 /*bit_valid*/ 1, 7160 /*bit*/ 2); 7161 ctl_done((union ctl_io *)ctsio); 7162 return(retval); 7163 } 7164 total_len += len; 7165 alloc_len = scsi_4btoul(cdb->length); 7166 7167 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7168 ctsio->kern_sg_entries = 0; 7169 ctsio->kern_rel_offset = 0; 7170 ctsio->kern_data_len = min(total_len, alloc_len); 7171 ctsio->kern_total_len = ctsio->kern_data_len; 7172 7173 rii_ptr = (struct scsi_report_ident_info_data *)ctsio->kern_data_ptr; 7174 switch (cdb->type) { 7175 case RII_LUII: 7176 if (oii) { 7177 if (oii[0] == '0' && oii[1] == 'x') 7178 len = hex2bin(oii, (uint8_t *)(rii_ptr + 1), len); 7179 else 7180 strncpy((uint8_t *)(rii_ptr + 1), oii, len); 7181 } 7182 break; 7183 case RII_LUTII: 7184 if (otii) 7185 strlcpy((uint8_t *)(rii_ptr + 1), otii, len); 7186 break; 7187 case RII_IIS: 7188 riid_ptr = (struct scsi_report_ident_info_descr *)(rii_ptr + 1); 7189 riid_ptr->type = RII_LUII; 7190 scsi_ulto2b(0xffff, riid_ptr->length); 7191 riid_ptr++; 7192 riid_ptr->type = RII_LUTII; 7193 scsi_ulto2b(0xffff, riid_ptr->length); 7194 } 7195 scsi_ulto2b(len, rii_ptr->length); 7196 7197 ctl_set_success(ctsio); 7198 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7199 ctsio->be_move_done = ctl_config_move_done; 7200 ctl_datamove((union ctl_io *)ctsio); 7201 return(retval); 7202 } 7203 7204 int 7205 ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio) 7206 { 7207 struct ctl_softc *softc = CTL_SOFTC(ctsio); 7208 struct ctl_lun *lun = CTL_LUN(ctsio); 7209 struct scsi_maintenance_in *cdb; 7210 int retval; 7211 int alloc_len, ext, total_len = 0, g, pc, pg, ts, os; 7212 int num_ha_groups, num_target_ports, shared_group; 7213 struct ctl_port *port; 7214 struct scsi_target_group_data *rtg_ptr; 7215 struct scsi_target_group_data_extended *rtg_ext_ptr; 7216 struct scsi_target_port_group_descriptor *tpg_desc; 7217 7218 CTL_DEBUG_PRINT(("ctl_report_tagret_port_groups\n")); 7219 7220 cdb = (struct scsi_maintenance_in *)ctsio->cdb; 7221 retval = CTL_RETVAL_COMPLETE; 7222 7223 switch (cdb->byte2 & STG_PDF_MASK) { 7224 case STG_PDF_LENGTH: 7225 ext = 0; 7226 break; 7227 case STG_PDF_EXTENDED: 7228 ext = 1; 7229 break; 7230 default: 7231 ctl_set_invalid_field(/*ctsio*/ ctsio, 7232 /*sks_valid*/ 1, 7233 /*command*/ 1, 7234 /*field*/ 2, 7235 /*bit_valid*/ 1, 7236 /*bit*/ 5); 7237 ctl_done((union ctl_io *)ctsio); 7238 return(retval); 7239 } 7240 7241 num_target_ports = 0; 7242 shared_group = (softc->is_single != 0); 7243 mtx_lock(&softc->ctl_lock); 7244 STAILQ_FOREACH(port, &softc->port_list, links) { 7245 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7246 continue; 7247 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 7248 continue; 7249 num_target_ports++; 7250 if (port->status & CTL_PORT_STATUS_HA_SHARED) 7251 shared_group = 1; 7252 } 7253 mtx_unlock(&softc->ctl_lock); 7254 num_ha_groups = (softc->is_single) ? 0 : NUM_HA_SHELVES; 7255 7256 if (ext) 7257 total_len = sizeof(struct scsi_target_group_data_extended); 7258 else 7259 total_len = sizeof(struct scsi_target_group_data); 7260 total_len += sizeof(struct scsi_target_port_group_descriptor) * 7261 (shared_group + num_ha_groups) + 7262 sizeof(struct scsi_target_port_descriptor) * num_target_ports; 7263 7264 alloc_len = scsi_4btoul(cdb->length); 7265 7266 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7267 ctsio->kern_sg_entries = 0; 7268 ctsio->kern_rel_offset = 0; 7269 ctsio->kern_data_len = min(total_len, alloc_len); 7270 ctsio->kern_total_len = ctsio->kern_data_len; 7271 7272 if (ext) { 7273 rtg_ext_ptr = (struct scsi_target_group_data_extended *) 7274 ctsio->kern_data_ptr; 7275 scsi_ulto4b(total_len - 4, rtg_ext_ptr->length); 7276 rtg_ext_ptr->format_type = 0x10; 7277 rtg_ext_ptr->implicit_transition_time = 0; 7278 tpg_desc = &rtg_ext_ptr->groups[0]; 7279 } else { 7280 rtg_ptr = (struct scsi_target_group_data *) 7281 ctsio->kern_data_ptr; 7282 scsi_ulto4b(total_len - 4, rtg_ptr->length); 7283 tpg_desc = &rtg_ptr->groups[0]; 7284 } 7285 7286 mtx_lock(&softc->ctl_lock); 7287 pg = softc->port_min / softc->port_cnt; 7288 if (lun->flags & (CTL_LUN_PRIMARY_SC | CTL_LUN_PEER_SC_PRIMARY)) { 7289 /* Some shelf is known to be primary. */ 7290 if (softc->ha_link == CTL_HA_LINK_OFFLINE) 7291 os = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE; 7292 else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) 7293 os = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; 7294 else if (softc->ha_mode == CTL_HA_MODE_ACT_STBY) 7295 os = TPG_ASYMMETRIC_ACCESS_STANDBY; 7296 else 7297 os = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 7298 if (lun->flags & CTL_LUN_PRIMARY_SC) { 7299 ts = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7300 } else { 7301 ts = os; 7302 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7303 } 7304 } else { 7305 /* No known primary shelf. */ 7306 if (softc->ha_link == CTL_HA_LINK_OFFLINE) { 7307 ts = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE; 7308 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7309 } else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) { 7310 ts = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; 7311 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7312 } else { 7313 ts = os = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; 7314 } 7315 } 7316 if (shared_group) { 7317 tpg_desc->pref_state = ts; 7318 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP | 7319 TPG_U_SUP | TPG_T_SUP; 7320 scsi_ulto2b(1, tpg_desc->target_port_group); 7321 tpg_desc->status = TPG_IMPLICIT; 7322 pc = 0; 7323 STAILQ_FOREACH(port, &softc->port_list, links) { 7324 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7325 continue; 7326 if (!softc->is_single && 7327 (port->status & CTL_PORT_STATUS_HA_SHARED) == 0) 7328 continue; 7329 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 7330 continue; 7331 scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc]. 7332 relative_target_port_identifier); 7333 pc++; 7334 } 7335 tpg_desc->target_port_count = pc; 7336 tpg_desc = (struct scsi_target_port_group_descriptor *) 7337 &tpg_desc->descriptors[pc]; 7338 } 7339 for (g = 0; g < num_ha_groups; g++) { 7340 tpg_desc->pref_state = (g == pg) ? ts : os; 7341 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP | 7342 TPG_U_SUP | TPG_T_SUP; 7343 scsi_ulto2b(2 + g, tpg_desc->target_port_group); 7344 tpg_desc->status = TPG_IMPLICIT; 7345 pc = 0; 7346 STAILQ_FOREACH(port, &softc->port_list, links) { 7347 if (port->targ_port < g * softc->port_cnt || 7348 port->targ_port >= (g + 1) * softc->port_cnt) 7349 continue; 7350 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7351 continue; 7352 if (port->status & CTL_PORT_STATUS_HA_SHARED) 7353 continue; 7354 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 7355 continue; 7356 scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc]. 7357 relative_target_port_identifier); 7358 pc++; 7359 } 7360 tpg_desc->target_port_count = pc; 7361 tpg_desc = (struct scsi_target_port_group_descriptor *) 7362 &tpg_desc->descriptors[pc]; 7363 } 7364 mtx_unlock(&softc->ctl_lock); 7365 7366 ctl_set_success(ctsio); 7367 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7368 ctsio->be_move_done = ctl_config_move_done; 7369 ctl_datamove((union ctl_io *)ctsio); 7370 return(retval); 7371 } 7372 7373 int 7374 ctl_report_supported_opcodes(struct ctl_scsiio *ctsio) 7375 { 7376 struct ctl_lun *lun = CTL_LUN(ctsio); 7377 struct scsi_report_supported_opcodes *cdb; 7378 const struct ctl_cmd_entry *entry, *sentry; 7379 struct scsi_report_supported_opcodes_all *all; 7380 struct scsi_report_supported_opcodes_descr *descr; 7381 struct scsi_report_supported_opcodes_one *one; 7382 int retval; 7383 int alloc_len, total_len; 7384 int opcode, service_action, i, j, num; 7385 7386 CTL_DEBUG_PRINT(("ctl_report_supported_opcodes\n")); 7387 7388 cdb = (struct scsi_report_supported_opcodes *)ctsio->cdb; 7389 retval = CTL_RETVAL_COMPLETE; 7390 7391 opcode = cdb->requested_opcode; 7392 service_action = scsi_2btoul(cdb->requested_service_action); 7393 switch (cdb->options & RSO_OPTIONS_MASK) { 7394 case RSO_OPTIONS_ALL: 7395 num = 0; 7396 for (i = 0; i < 256; i++) { 7397 entry = &ctl_cmd_table[i]; 7398 if (entry->flags & CTL_CMD_FLAG_SA5) { 7399 for (j = 0; j < 32; j++) { 7400 sentry = &((const struct ctl_cmd_entry *) 7401 entry->execute)[j]; 7402 if (ctl_cmd_applicable( 7403 lun->be_lun->lun_type, sentry)) 7404 num++; 7405 } 7406 } else { 7407 if (ctl_cmd_applicable(lun->be_lun->lun_type, 7408 entry)) 7409 num++; 7410 } 7411 } 7412 total_len = sizeof(struct scsi_report_supported_opcodes_all) + 7413 num * sizeof(struct scsi_report_supported_opcodes_descr); 7414 break; 7415 case RSO_OPTIONS_OC: 7416 if (ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) { 7417 ctl_set_invalid_field(/*ctsio*/ ctsio, 7418 /*sks_valid*/ 1, 7419 /*command*/ 1, 7420 /*field*/ 2, 7421 /*bit_valid*/ 1, 7422 /*bit*/ 2); 7423 ctl_done((union ctl_io *)ctsio); 7424 return (CTL_RETVAL_COMPLETE); 7425 } 7426 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7427 break; 7428 case RSO_OPTIONS_OC_SA: 7429 if ((ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) == 0 || 7430 service_action >= 32) { 7431 ctl_set_invalid_field(/*ctsio*/ ctsio, 7432 /*sks_valid*/ 1, 7433 /*command*/ 1, 7434 /*field*/ 2, 7435 /*bit_valid*/ 1, 7436 /*bit*/ 2); 7437 ctl_done((union ctl_io *)ctsio); 7438 return (CTL_RETVAL_COMPLETE); 7439 } 7440 /* FALLTHROUGH */ 7441 case RSO_OPTIONS_OC_ASA: 7442 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7443 break; 7444 default: 7445 ctl_set_invalid_field(/*ctsio*/ ctsio, 7446 /*sks_valid*/ 1, 7447 /*command*/ 1, 7448 /*field*/ 2, 7449 /*bit_valid*/ 1, 7450 /*bit*/ 2); 7451 ctl_done((union ctl_io *)ctsio); 7452 return (CTL_RETVAL_COMPLETE); 7453 } 7454 7455 alloc_len = scsi_4btoul(cdb->length); 7456 7457 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7458 ctsio->kern_sg_entries = 0; 7459 ctsio->kern_rel_offset = 0; 7460 ctsio->kern_data_len = min(total_len, alloc_len); 7461 ctsio->kern_total_len = ctsio->kern_data_len; 7462 7463 switch (cdb->options & RSO_OPTIONS_MASK) { 7464 case RSO_OPTIONS_ALL: 7465 all = (struct scsi_report_supported_opcodes_all *) 7466 ctsio->kern_data_ptr; 7467 num = 0; 7468 for (i = 0; i < 256; i++) { 7469 entry = &ctl_cmd_table[i]; 7470 if (entry->flags & CTL_CMD_FLAG_SA5) { 7471 for (j = 0; j < 32; j++) { 7472 sentry = &((const struct ctl_cmd_entry *) 7473 entry->execute)[j]; 7474 if (!ctl_cmd_applicable( 7475 lun->be_lun->lun_type, sentry)) 7476 continue; 7477 descr = &all->descr[num++]; 7478 descr->opcode = i; 7479 scsi_ulto2b(j, descr->service_action); 7480 descr->flags = RSO_SERVACTV; 7481 scsi_ulto2b(sentry->length, 7482 descr->cdb_length); 7483 } 7484 } else { 7485 if (!ctl_cmd_applicable(lun->be_lun->lun_type, 7486 entry)) 7487 continue; 7488 descr = &all->descr[num++]; 7489 descr->opcode = i; 7490 scsi_ulto2b(0, descr->service_action); 7491 descr->flags = 0; 7492 scsi_ulto2b(entry->length, descr->cdb_length); 7493 } 7494 } 7495 scsi_ulto4b( 7496 num * sizeof(struct scsi_report_supported_opcodes_descr), 7497 all->length); 7498 break; 7499 case RSO_OPTIONS_OC: 7500 one = (struct scsi_report_supported_opcodes_one *) 7501 ctsio->kern_data_ptr; 7502 entry = &ctl_cmd_table[opcode]; 7503 goto fill_one; 7504 case RSO_OPTIONS_OC_SA: 7505 one = (struct scsi_report_supported_opcodes_one *) 7506 ctsio->kern_data_ptr; 7507 entry = &ctl_cmd_table[opcode]; 7508 entry = &((const struct ctl_cmd_entry *) 7509 entry->execute)[service_action]; 7510 fill_one: 7511 if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 7512 one->support = 3; 7513 scsi_ulto2b(entry->length, one->cdb_length); 7514 one->cdb_usage[0] = opcode; 7515 memcpy(&one->cdb_usage[1], entry->usage, 7516 entry->length - 1); 7517 } else 7518 one->support = 1; 7519 break; 7520 case RSO_OPTIONS_OC_ASA: 7521 one = (struct scsi_report_supported_opcodes_one *) 7522 ctsio->kern_data_ptr; 7523 entry = &ctl_cmd_table[opcode]; 7524 if (entry->flags & CTL_CMD_FLAG_SA5) { 7525 entry = &((const struct ctl_cmd_entry *) 7526 entry->execute)[service_action]; 7527 } else if (service_action != 0) { 7528 one->support = 1; 7529 break; 7530 } 7531 goto fill_one; 7532 } 7533 7534 ctl_set_success(ctsio); 7535 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7536 ctsio->be_move_done = ctl_config_move_done; 7537 ctl_datamove((union ctl_io *)ctsio); 7538 return(retval); 7539 } 7540 7541 int 7542 ctl_report_supported_tmf(struct ctl_scsiio *ctsio) 7543 { 7544 struct scsi_report_supported_tmf *cdb; 7545 struct scsi_report_supported_tmf_ext_data *data; 7546 int retval; 7547 int alloc_len, total_len; 7548 7549 CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n")); 7550 7551 cdb = (struct scsi_report_supported_tmf *)ctsio->cdb; 7552 7553 retval = CTL_RETVAL_COMPLETE; 7554 7555 if (cdb->options & RST_REPD) 7556 total_len = sizeof(struct scsi_report_supported_tmf_ext_data); 7557 else 7558 total_len = sizeof(struct scsi_report_supported_tmf_data); 7559 alloc_len = scsi_4btoul(cdb->length); 7560 7561 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7562 ctsio->kern_sg_entries = 0; 7563 ctsio->kern_rel_offset = 0; 7564 ctsio->kern_data_len = min(total_len, alloc_len); 7565 ctsio->kern_total_len = ctsio->kern_data_len; 7566 7567 data = (struct scsi_report_supported_tmf_ext_data *)ctsio->kern_data_ptr; 7568 data->byte1 |= RST_ATS | RST_ATSS | RST_CTSS | RST_LURS | RST_QTS | 7569 RST_TRS; 7570 data->byte2 |= RST_QAES | RST_QTSS | RST_ITNRS; 7571 data->length = total_len - 4; 7572 7573 ctl_set_success(ctsio); 7574 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7575 ctsio->be_move_done = ctl_config_move_done; 7576 ctl_datamove((union ctl_io *)ctsio); 7577 return (retval); 7578 } 7579 7580 int 7581 ctl_report_timestamp(struct ctl_scsiio *ctsio) 7582 { 7583 struct scsi_report_timestamp *cdb; 7584 struct scsi_report_timestamp_data *data; 7585 struct timeval tv; 7586 int64_t timestamp; 7587 int retval; 7588 int alloc_len, total_len; 7589 7590 CTL_DEBUG_PRINT(("ctl_report_timestamp\n")); 7591 7592 cdb = (struct scsi_report_timestamp *)ctsio->cdb; 7593 7594 retval = CTL_RETVAL_COMPLETE; 7595 7596 total_len = sizeof(struct scsi_report_timestamp_data); 7597 alloc_len = scsi_4btoul(cdb->length); 7598 7599 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7600 ctsio->kern_sg_entries = 0; 7601 ctsio->kern_rel_offset = 0; 7602 ctsio->kern_data_len = min(total_len, alloc_len); 7603 ctsio->kern_total_len = ctsio->kern_data_len; 7604 7605 data = (struct scsi_report_timestamp_data *)ctsio->kern_data_ptr; 7606 scsi_ulto2b(sizeof(*data) - 2, data->length); 7607 data->origin = RTS_ORIG_OUTSIDE; 7608 getmicrotime(&tv); 7609 timestamp = (int64_t)tv.tv_sec * 1000 + tv.tv_usec / 1000; 7610 scsi_ulto4b(timestamp >> 16, data->timestamp); 7611 scsi_ulto2b(timestamp & 0xffff, &data->timestamp[4]); 7612 7613 ctl_set_success(ctsio); 7614 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7615 ctsio->be_move_done = ctl_config_move_done; 7616 ctl_datamove((union ctl_io *)ctsio); 7617 return (retval); 7618 } 7619 7620 int 7621 ctl_persistent_reserve_in(struct ctl_scsiio *ctsio) 7622 { 7623 struct ctl_softc *softc = CTL_SOFTC(ctsio); 7624 struct ctl_lun *lun = CTL_LUN(ctsio); 7625 struct scsi_per_res_in *cdb; 7626 int alloc_len, total_len = 0; 7627 /* struct scsi_per_res_in_rsrv in_data; */ 7628 uint64_t key; 7629 7630 CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n")); 7631 7632 cdb = (struct scsi_per_res_in *)ctsio->cdb; 7633 7634 alloc_len = scsi_2btoul(cdb->length); 7635 7636 retry: 7637 mtx_lock(&lun->lun_lock); 7638 switch (cdb->action) { 7639 case SPRI_RK: /* read keys */ 7640 total_len = sizeof(struct scsi_per_res_in_keys) + 7641 lun->pr_key_count * 7642 sizeof(struct scsi_per_res_key); 7643 break; 7644 case SPRI_RR: /* read reservation */ 7645 if (lun->flags & CTL_LUN_PR_RESERVED) 7646 total_len = sizeof(struct scsi_per_res_in_rsrv); 7647 else 7648 total_len = sizeof(struct scsi_per_res_in_header); 7649 break; 7650 case SPRI_RC: /* report capabilities */ 7651 total_len = sizeof(struct scsi_per_res_cap); 7652 break; 7653 case SPRI_RS: /* read full status */ 7654 total_len = sizeof(struct scsi_per_res_in_header) + 7655 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7656 lun->pr_key_count; 7657 break; 7658 default: 7659 panic("%s: Invalid PR type %#x", __func__, cdb->action); 7660 } 7661 mtx_unlock(&lun->lun_lock); 7662 7663 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7664 ctsio->kern_rel_offset = 0; 7665 ctsio->kern_sg_entries = 0; 7666 ctsio->kern_data_len = min(total_len, alloc_len); 7667 ctsio->kern_total_len = ctsio->kern_data_len; 7668 7669 mtx_lock(&lun->lun_lock); 7670 switch (cdb->action) { 7671 case SPRI_RK: { // read keys 7672 struct scsi_per_res_in_keys *res_keys; 7673 int i, key_count; 7674 7675 res_keys = (struct scsi_per_res_in_keys*)ctsio->kern_data_ptr; 7676 7677 /* 7678 * We had to drop the lock to allocate our buffer, which 7679 * leaves time for someone to come in with another 7680 * persistent reservation. (That is unlikely, though, 7681 * since this should be the only persistent reservation 7682 * command active right now.) 7683 */ 7684 if (total_len != (sizeof(struct scsi_per_res_in_keys) + 7685 (lun->pr_key_count * 7686 sizeof(struct scsi_per_res_key)))){ 7687 mtx_unlock(&lun->lun_lock); 7688 free(ctsio->kern_data_ptr, M_CTL); 7689 printf("%s: reservation length changed, retrying\n", 7690 __func__); 7691 goto retry; 7692 } 7693 7694 scsi_ulto4b(lun->pr_generation, res_keys->header.generation); 7695 7696 scsi_ulto4b(sizeof(struct scsi_per_res_key) * 7697 lun->pr_key_count, res_keys->header.length); 7698 7699 for (i = 0, key_count = 0; i < CTL_MAX_INITIATORS; i++) { 7700 if ((key = ctl_get_prkey(lun, i)) == 0) 7701 continue; 7702 7703 /* 7704 * We used lun->pr_key_count to calculate the 7705 * size to allocate. If it turns out the number of 7706 * initiators with the registered flag set is 7707 * larger than that (i.e. they haven't been kept in 7708 * sync), we've got a problem. 7709 */ 7710 if (key_count >= lun->pr_key_count) { 7711 key_count++; 7712 continue; 7713 } 7714 scsi_u64to8b(key, res_keys->keys[key_count].key); 7715 key_count++; 7716 } 7717 break; 7718 } 7719 case SPRI_RR: { // read reservation 7720 struct scsi_per_res_in_rsrv *res; 7721 int tmp_len, header_only; 7722 7723 res = (struct scsi_per_res_in_rsrv *)ctsio->kern_data_ptr; 7724 7725 scsi_ulto4b(lun->pr_generation, res->header.generation); 7726 7727 if (lun->flags & CTL_LUN_PR_RESERVED) 7728 { 7729 tmp_len = sizeof(struct scsi_per_res_in_rsrv); 7730 scsi_ulto4b(sizeof(struct scsi_per_res_in_rsrv_data), 7731 res->header.length); 7732 header_only = 0; 7733 } else { 7734 tmp_len = sizeof(struct scsi_per_res_in_header); 7735 scsi_ulto4b(0, res->header.length); 7736 header_only = 1; 7737 } 7738 7739 /* 7740 * We had to drop the lock to allocate our buffer, which 7741 * leaves time for someone to come in with another 7742 * persistent reservation. (That is unlikely, though, 7743 * since this should be the only persistent reservation 7744 * command active right now.) 7745 */ 7746 if (tmp_len != total_len) { 7747 mtx_unlock(&lun->lun_lock); 7748 free(ctsio->kern_data_ptr, M_CTL); 7749 printf("%s: reservation status changed, retrying\n", 7750 __func__); 7751 goto retry; 7752 } 7753 7754 /* 7755 * No reservation held, so we're done. 7756 */ 7757 if (header_only != 0) 7758 break; 7759 7760 /* 7761 * If the registration is an All Registrants type, the key 7762 * is 0, since it doesn't really matter. 7763 */ 7764 if (lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 7765 scsi_u64to8b(ctl_get_prkey(lun, lun->pr_res_idx), 7766 res->data.reservation); 7767 } 7768 res->data.scopetype = lun->pr_res_type; 7769 break; 7770 } 7771 case SPRI_RC: //report capabilities 7772 { 7773 struct scsi_per_res_cap *res_cap; 7774 uint16_t type_mask; 7775 7776 res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr; 7777 scsi_ulto2b(sizeof(*res_cap), res_cap->length); 7778 res_cap->flags1 = SPRI_CRH; 7779 res_cap->flags2 = SPRI_TMV | SPRI_ALLOW_5; 7780 type_mask = SPRI_TM_WR_EX_AR | 7781 SPRI_TM_EX_AC_RO | 7782 SPRI_TM_WR_EX_RO | 7783 SPRI_TM_EX_AC | 7784 SPRI_TM_WR_EX | 7785 SPRI_TM_EX_AC_AR; 7786 scsi_ulto2b(type_mask, res_cap->type_mask); 7787 break; 7788 } 7789 case SPRI_RS: { // read full status 7790 struct scsi_per_res_in_full *res_status; 7791 struct scsi_per_res_in_full_desc *res_desc; 7792 struct ctl_port *port; 7793 int i, len; 7794 7795 res_status = (struct scsi_per_res_in_full*)ctsio->kern_data_ptr; 7796 7797 /* 7798 * We had to drop the lock to allocate our buffer, which 7799 * leaves time for someone to come in with another 7800 * persistent reservation. (That is unlikely, though, 7801 * since this should be the only persistent reservation 7802 * command active right now.) 7803 */ 7804 if (total_len < (sizeof(struct scsi_per_res_in_header) + 7805 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7806 lun->pr_key_count)){ 7807 mtx_unlock(&lun->lun_lock); 7808 free(ctsio->kern_data_ptr, M_CTL); 7809 printf("%s: reservation length changed, retrying\n", 7810 __func__); 7811 goto retry; 7812 } 7813 7814 scsi_ulto4b(lun->pr_generation, res_status->header.generation); 7815 7816 res_desc = &res_status->desc[0]; 7817 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 7818 if ((key = ctl_get_prkey(lun, i)) == 0) 7819 continue; 7820 7821 scsi_u64to8b(key, res_desc->res_key.key); 7822 if ((lun->flags & CTL_LUN_PR_RESERVED) && 7823 (lun->pr_res_idx == i || 7824 lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS)) { 7825 res_desc->flags = SPRI_FULL_R_HOLDER; 7826 res_desc->scopetype = lun->pr_res_type; 7827 } 7828 scsi_ulto2b(i / CTL_MAX_INIT_PER_PORT, 7829 res_desc->rel_trgt_port_id); 7830 len = 0; 7831 port = softc->ctl_ports[i / CTL_MAX_INIT_PER_PORT]; 7832 if (port != NULL) 7833 len = ctl_create_iid(port, 7834 i % CTL_MAX_INIT_PER_PORT, 7835 res_desc->transport_id); 7836 scsi_ulto4b(len, res_desc->additional_length); 7837 res_desc = (struct scsi_per_res_in_full_desc *) 7838 &res_desc->transport_id[len]; 7839 } 7840 scsi_ulto4b((uint8_t *)res_desc - (uint8_t *)&res_status->desc[0], 7841 res_status->header.length); 7842 break; 7843 } 7844 default: 7845 panic("%s: Invalid PR type %#x", __func__, cdb->action); 7846 } 7847 mtx_unlock(&lun->lun_lock); 7848 7849 ctl_set_success(ctsio); 7850 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7851 ctsio->be_move_done = ctl_config_move_done; 7852 ctl_datamove((union ctl_io *)ctsio); 7853 return (CTL_RETVAL_COMPLETE); 7854 } 7855 7856 /* 7857 * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if 7858 * it should return. 7859 */ 7860 static int 7861 ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key, 7862 uint64_t sa_res_key, uint8_t type, uint32_t residx, 7863 struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb, 7864 struct scsi_per_res_out_parms* param) 7865 { 7866 union ctl_ha_msg persis_io; 7867 int i; 7868 7869 mtx_lock(&lun->lun_lock); 7870 if (sa_res_key == 0) { 7871 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 7872 /* validate scope and type */ 7873 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7874 SPR_LU_SCOPE) { 7875 mtx_unlock(&lun->lun_lock); 7876 ctl_set_invalid_field(/*ctsio*/ ctsio, 7877 /*sks_valid*/ 1, 7878 /*command*/ 1, 7879 /*field*/ 2, 7880 /*bit_valid*/ 1, 7881 /*bit*/ 4); 7882 ctl_done((union ctl_io *)ctsio); 7883 return (1); 7884 } 7885 7886 if (type>8 || type==2 || type==4 || type==0) { 7887 mtx_unlock(&lun->lun_lock); 7888 ctl_set_invalid_field(/*ctsio*/ ctsio, 7889 /*sks_valid*/ 1, 7890 /*command*/ 1, 7891 /*field*/ 2, 7892 /*bit_valid*/ 1, 7893 /*bit*/ 0); 7894 ctl_done((union ctl_io *)ctsio); 7895 return (1); 7896 } 7897 7898 /* 7899 * Unregister everybody else and build UA for 7900 * them 7901 */ 7902 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 7903 if (i == residx || ctl_get_prkey(lun, i) == 0) 7904 continue; 7905 7906 ctl_clr_prkey(lun, i); 7907 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7908 } 7909 lun->pr_key_count = 1; 7910 lun->pr_res_type = type; 7911 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 7912 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 7913 lun->pr_res_idx = residx; 7914 lun->pr_generation++; 7915 mtx_unlock(&lun->lun_lock); 7916 7917 /* send msg to other side */ 7918 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7919 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7920 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7921 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7922 persis_io.pr.pr_info.res_type = type; 7923 memcpy(persis_io.pr.pr_info.sa_res_key, 7924 param->serv_act_res_key, 7925 sizeof(param->serv_act_res_key)); 7926 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 7927 sizeof(persis_io.pr), M_WAITOK); 7928 } else { 7929 /* not all registrants */ 7930 mtx_unlock(&lun->lun_lock); 7931 free(ctsio->kern_data_ptr, M_CTL); 7932 ctl_set_invalid_field(ctsio, 7933 /*sks_valid*/ 1, 7934 /*command*/ 0, 7935 /*field*/ 8, 7936 /*bit_valid*/ 0, 7937 /*bit*/ 0); 7938 ctl_done((union ctl_io *)ctsio); 7939 return (1); 7940 } 7941 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 7942 || !(lun->flags & CTL_LUN_PR_RESERVED)) { 7943 int found = 0; 7944 7945 if (res_key == sa_res_key) { 7946 /* special case */ 7947 /* 7948 * The spec implies this is not good but doesn't 7949 * say what to do. There are two choices either 7950 * generate a res conflict or check condition 7951 * with illegal field in parameter data. Since 7952 * that is what is done when the sa_res_key is 7953 * zero I'll take that approach since this has 7954 * to do with the sa_res_key. 7955 */ 7956 mtx_unlock(&lun->lun_lock); 7957 free(ctsio->kern_data_ptr, M_CTL); 7958 ctl_set_invalid_field(ctsio, 7959 /*sks_valid*/ 1, 7960 /*command*/ 0, 7961 /*field*/ 8, 7962 /*bit_valid*/ 0, 7963 /*bit*/ 0); 7964 ctl_done((union ctl_io *)ctsio); 7965 return (1); 7966 } 7967 7968 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 7969 if (ctl_get_prkey(lun, i) != sa_res_key) 7970 continue; 7971 7972 found = 1; 7973 ctl_clr_prkey(lun, i); 7974 lun->pr_key_count--; 7975 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7976 } 7977 if (!found) { 7978 mtx_unlock(&lun->lun_lock); 7979 free(ctsio->kern_data_ptr, M_CTL); 7980 ctl_set_reservation_conflict(ctsio); 7981 ctl_done((union ctl_io *)ctsio); 7982 return (CTL_RETVAL_COMPLETE); 7983 } 7984 lun->pr_generation++; 7985 mtx_unlock(&lun->lun_lock); 7986 7987 /* send msg to other side */ 7988 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7989 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7990 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7991 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7992 persis_io.pr.pr_info.res_type = type; 7993 memcpy(persis_io.pr.pr_info.sa_res_key, 7994 param->serv_act_res_key, 7995 sizeof(param->serv_act_res_key)); 7996 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 7997 sizeof(persis_io.pr), M_WAITOK); 7998 } else { 7999 /* Reserved but not all registrants */ 8000 /* sa_res_key is res holder */ 8001 if (sa_res_key == ctl_get_prkey(lun, lun->pr_res_idx)) { 8002 /* validate scope and type */ 8003 if ((cdb->scope_type & SPR_SCOPE_MASK) != 8004 SPR_LU_SCOPE) { 8005 mtx_unlock(&lun->lun_lock); 8006 ctl_set_invalid_field(/*ctsio*/ ctsio, 8007 /*sks_valid*/ 1, 8008 /*command*/ 1, 8009 /*field*/ 2, 8010 /*bit_valid*/ 1, 8011 /*bit*/ 4); 8012 ctl_done((union ctl_io *)ctsio); 8013 return (1); 8014 } 8015 8016 if (type>8 || type==2 || type==4 || type==0) { 8017 mtx_unlock(&lun->lun_lock); 8018 ctl_set_invalid_field(/*ctsio*/ ctsio, 8019 /*sks_valid*/ 1, 8020 /*command*/ 1, 8021 /*field*/ 2, 8022 /*bit_valid*/ 1, 8023 /*bit*/ 0); 8024 ctl_done((union ctl_io *)ctsio); 8025 return (1); 8026 } 8027 8028 /* 8029 * Do the following: 8030 * if sa_res_key != res_key remove all 8031 * registrants w/sa_res_key and generate UA 8032 * for these registrants(Registrations 8033 * Preempted) if it wasn't an exclusive 8034 * reservation generate UA(Reservations 8035 * Preempted) for all other registered nexuses 8036 * if the type has changed. Establish the new 8037 * reservation and holder. If res_key and 8038 * sa_res_key are the same do the above 8039 * except don't unregister the res holder. 8040 */ 8041 8042 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 8043 if (i == residx || ctl_get_prkey(lun, i) == 0) 8044 continue; 8045 8046 if (sa_res_key == ctl_get_prkey(lun, i)) { 8047 ctl_clr_prkey(lun, i); 8048 lun->pr_key_count--; 8049 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8050 } else if (type != lun->pr_res_type && 8051 (lun->pr_res_type == SPR_TYPE_WR_EX_RO || 8052 lun->pr_res_type == SPR_TYPE_EX_AC_RO)) { 8053 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8054 } 8055 } 8056 lun->pr_res_type = type; 8057 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 8058 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 8059 lun->pr_res_idx = residx; 8060 else 8061 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8062 lun->pr_generation++; 8063 mtx_unlock(&lun->lun_lock); 8064 8065 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8066 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8067 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8068 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8069 persis_io.pr.pr_info.res_type = type; 8070 memcpy(persis_io.pr.pr_info.sa_res_key, 8071 param->serv_act_res_key, 8072 sizeof(param->serv_act_res_key)); 8073 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8074 sizeof(persis_io.pr), M_WAITOK); 8075 } else { 8076 /* 8077 * sa_res_key is not the res holder just 8078 * remove registrants 8079 */ 8080 int found=0; 8081 8082 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8083 if (sa_res_key != ctl_get_prkey(lun, i)) 8084 continue; 8085 8086 found = 1; 8087 ctl_clr_prkey(lun, i); 8088 lun->pr_key_count--; 8089 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8090 } 8091 8092 if (!found) { 8093 mtx_unlock(&lun->lun_lock); 8094 free(ctsio->kern_data_ptr, M_CTL); 8095 ctl_set_reservation_conflict(ctsio); 8096 ctl_done((union ctl_io *)ctsio); 8097 return (1); 8098 } 8099 lun->pr_generation++; 8100 mtx_unlock(&lun->lun_lock); 8101 8102 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8103 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8104 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8105 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8106 persis_io.pr.pr_info.res_type = type; 8107 memcpy(persis_io.pr.pr_info.sa_res_key, 8108 param->serv_act_res_key, 8109 sizeof(param->serv_act_res_key)); 8110 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8111 sizeof(persis_io.pr), M_WAITOK); 8112 } 8113 } 8114 return (0); 8115 } 8116 8117 static void 8118 ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg) 8119 { 8120 uint64_t sa_res_key; 8121 int i; 8122 8123 sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key); 8124 8125 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 8126 || lun->pr_res_idx == CTL_PR_NO_RESERVATION 8127 || sa_res_key != ctl_get_prkey(lun, lun->pr_res_idx)) { 8128 if (sa_res_key == 0) { 8129 /* 8130 * Unregister everybody else and build UA for 8131 * them 8132 */ 8133 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 8134 if (i == msg->pr.pr_info.residx || 8135 ctl_get_prkey(lun, i) == 0) 8136 continue; 8137 8138 ctl_clr_prkey(lun, i); 8139 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8140 } 8141 8142 lun->pr_key_count = 1; 8143 lun->pr_res_type = msg->pr.pr_info.res_type; 8144 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 8145 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 8146 lun->pr_res_idx = msg->pr.pr_info.residx; 8147 } else { 8148 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8149 if (sa_res_key == ctl_get_prkey(lun, i)) 8150 continue; 8151 8152 ctl_clr_prkey(lun, i); 8153 lun->pr_key_count--; 8154 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8155 } 8156 } 8157 } else { 8158 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8159 if (i == msg->pr.pr_info.residx || 8160 ctl_get_prkey(lun, i) == 0) 8161 continue; 8162 8163 if (sa_res_key == ctl_get_prkey(lun, i)) { 8164 ctl_clr_prkey(lun, i); 8165 lun->pr_key_count--; 8166 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8167 } else if (msg->pr.pr_info.res_type != lun->pr_res_type 8168 && (lun->pr_res_type == SPR_TYPE_WR_EX_RO || 8169 lun->pr_res_type == SPR_TYPE_EX_AC_RO)) { 8170 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8171 } 8172 } 8173 lun->pr_res_type = msg->pr.pr_info.res_type; 8174 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 8175 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 8176 lun->pr_res_idx = msg->pr.pr_info.residx; 8177 else 8178 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8179 } 8180 lun->pr_generation++; 8181 8182 } 8183 8184 int 8185 ctl_persistent_reserve_out(struct ctl_scsiio *ctsio) 8186 { 8187 struct ctl_softc *softc = CTL_SOFTC(ctsio); 8188 struct ctl_lun *lun = CTL_LUN(ctsio); 8189 int retval; 8190 u_int32_t param_len; 8191 struct scsi_per_res_out *cdb; 8192 struct scsi_per_res_out_parms* param; 8193 uint32_t residx; 8194 uint64_t res_key, sa_res_key, key; 8195 uint8_t type; 8196 union ctl_ha_msg persis_io; 8197 int i; 8198 8199 CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n")); 8200 8201 cdb = (struct scsi_per_res_out *)ctsio->cdb; 8202 retval = CTL_RETVAL_COMPLETE; 8203 8204 /* 8205 * We only support whole-LUN scope. The scope & type are ignored for 8206 * register, register and ignore existing key and clear. 8207 * We sometimes ignore scope and type on preempts too!! 8208 * Verify reservation type here as well. 8209 */ 8210 type = cdb->scope_type & SPR_TYPE_MASK; 8211 if ((cdb->action == SPRO_RESERVE) 8212 || (cdb->action == SPRO_RELEASE)) { 8213 if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { 8214 ctl_set_invalid_field(/*ctsio*/ ctsio, 8215 /*sks_valid*/ 1, 8216 /*command*/ 1, 8217 /*field*/ 2, 8218 /*bit_valid*/ 1, 8219 /*bit*/ 4); 8220 ctl_done((union ctl_io *)ctsio); 8221 return (CTL_RETVAL_COMPLETE); 8222 } 8223 8224 if (type>8 || type==2 || type==4 || type==0) { 8225 ctl_set_invalid_field(/*ctsio*/ ctsio, 8226 /*sks_valid*/ 1, 8227 /*command*/ 1, 8228 /*field*/ 2, 8229 /*bit_valid*/ 1, 8230 /*bit*/ 0); 8231 ctl_done((union ctl_io *)ctsio); 8232 return (CTL_RETVAL_COMPLETE); 8233 } 8234 } 8235 8236 param_len = scsi_4btoul(cdb->length); 8237 8238 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 8239 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 8240 ctsio->kern_data_len = param_len; 8241 ctsio->kern_total_len = param_len; 8242 ctsio->kern_rel_offset = 0; 8243 ctsio->kern_sg_entries = 0; 8244 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 8245 ctsio->be_move_done = ctl_config_move_done; 8246 ctl_datamove((union ctl_io *)ctsio); 8247 8248 return (CTL_RETVAL_COMPLETE); 8249 } 8250 8251 param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr; 8252 8253 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 8254 res_key = scsi_8btou64(param->res_key.key); 8255 sa_res_key = scsi_8btou64(param->serv_act_res_key); 8256 8257 /* 8258 * Validate the reservation key here except for SPRO_REG_IGNO 8259 * This must be done for all other service actions 8260 */ 8261 if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) { 8262 mtx_lock(&lun->lun_lock); 8263 if ((key = ctl_get_prkey(lun, residx)) != 0) { 8264 if (res_key != key) { 8265 /* 8266 * The current key passed in doesn't match 8267 * the one the initiator previously 8268 * registered. 8269 */ 8270 mtx_unlock(&lun->lun_lock); 8271 free(ctsio->kern_data_ptr, M_CTL); 8272 ctl_set_reservation_conflict(ctsio); 8273 ctl_done((union ctl_io *)ctsio); 8274 return (CTL_RETVAL_COMPLETE); 8275 } 8276 } else if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REGISTER) { 8277 /* 8278 * We are not registered 8279 */ 8280 mtx_unlock(&lun->lun_lock); 8281 free(ctsio->kern_data_ptr, M_CTL); 8282 ctl_set_reservation_conflict(ctsio); 8283 ctl_done((union ctl_io *)ctsio); 8284 return (CTL_RETVAL_COMPLETE); 8285 } else if (res_key != 0) { 8286 /* 8287 * We are not registered and trying to register but 8288 * the register key isn't zero. 8289 */ 8290 mtx_unlock(&lun->lun_lock); 8291 free(ctsio->kern_data_ptr, M_CTL); 8292 ctl_set_reservation_conflict(ctsio); 8293 ctl_done((union ctl_io *)ctsio); 8294 return (CTL_RETVAL_COMPLETE); 8295 } 8296 mtx_unlock(&lun->lun_lock); 8297 } 8298 8299 switch (cdb->action & SPRO_ACTION_MASK) { 8300 case SPRO_REGISTER: 8301 case SPRO_REG_IGNO: { 8302 /* 8303 * We don't support any of these options, as we report in 8304 * the read capabilities request (see 8305 * ctl_persistent_reserve_in(), above). 8306 */ 8307 if ((param->flags & SPR_SPEC_I_PT) 8308 || (param->flags & SPR_ALL_TG_PT) 8309 || (param->flags & SPR_APTPL)) { 8310 int bit_ptr; 8311 8312 if (param->flags & SPR_APTPL) 8313 bit_ptr = 0; 8314 else if (param->flags & SPR_ALL_TG_PT) 8315 bit_ptr = 2; 8316 else /* SPR_SPEC_I_PT */ 8317 bit_ptr = 3; 8318 8319 free(ctsio->kern_data_ptr, M_CTL); 8320 ctl_set_invalid_field(ctsio, 8321 /*sks_valid*/ 1, 8322 /*command*/ 0, 8323 /*field*/ 20, 8324 /*bit_valid*/ 1, 8325 /*bit*/ bit_ptr); 8326 ctl_done((union ctl_io *)ctsio); 8327 return (CTL_RETVAL_COMPLETE); 8328 } 8329 8330 mtx_lock(&lun->lun_lock); 8331 8332 /* 8333 * The initiator wants to clear the 8334 * key/unregister. 8335 */ 8336 if (sa_res_key == 0) { 8337 if ((res_key == 0 8338 && (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER) 8339 || ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO 8340 && ctl_get_prkey(lun, residx) == 0)) { 8341 mtx_unlock(&lun->lun_lock); 8342 goto done; 8343 } 8344 8345 ctl_clr_prkey(lun, residx); 8346 lun->pr_key_count--; 8347 8348 if (residx == lun->pr_res_idx) { 8349 lun->flags &= ~CTL_LUN_PR_RESERVED; 8350 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8351 8352 if ((lun->pr_res_type == SPR_TYPE_WR_EX_RO || 8353 lun->pr_res_type == SPR_TYPE_EX_AC_RO) && 8354 lun->pr_key_count) { 8355 /* 8356 * If the reservation is a registrants 8357 * only type we need to generate a UA 8358 * for other registered inits. The 8359 * sense code should be RESERVATIONS 8360 * RELEASED 8361 */ 8362 8363 for (i = softc->init_min; i < softc->init_max; i++){ 8364 if (ctl_get_prkey(lun, i) == 0) 8365 continue; 8366 ctl_est_ua(lun, i, 8367 CTL_UA_RES_RELEASE); 8368 } 8369 } 8370 lun->pr_res_type = 0; 8371 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8372 if (lun->pr_key_count==0) { 8373 lun->flags &= ~CTL_LUN_PR_RESERVED; 8374 lun->pr_res_type = 0; 8375 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8376 } 8377 } 8378 lun->pr_generation++; 8379 mtx_unlock(&lun->lun_lock); 8380 8381 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8382 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8383 persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY; 8384 persis_io.pr.pr_info.residx = residx; 8385 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8386 sizeof(persis_io.pr), M_WAITOK); 8387 } else /* sa_res_key != 0 */ { 8388 /* 8389 * If we aren't registered currently then increment 8390 * the key count and set the registered flag. 8391 */ 8392 ctl_alloc_prkey(lun, residx); 8393 if (ctl_get_prkey(lun, residx) == 0) 8394 lun->pr_key_count++; 8395 ctl_set_prkey(lun, residx, sa_res_key); 8396 lun->pr_generation++; 8397 mtx_unlock(&lun->lun_lock); 8398 8399 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8400 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8401 persis_io.pr.pr_info.action = CTL_PR_REG_KEY; 8402 persis_io.pr.pr_info.residx = residx; 8403 memcpy(persis_io.pr.pr_info.sa_res_key, 8404 param->serv_act_res_key, 8405 sizeof(param->serv_act_res_key)); 8406 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8407 sizeof(persis_io.pr), M_WAITOK); 8408 } 8409 8410 break; 8411 } 8412 case SPRO_RESERVE: 8413 mtx_lock(&lun->lun_lock); 8414 if (lun->flags & CTL_LUN_PR_RESERVED) { 8415 /* 8416 * if this isn't the reservation holder and it's 8417 * not a "all registrants" type or if the type is 8418 * different then we have a conflict 8419 */ 8420 if ((lun->pr_res_idx != residx 8421 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) 8422 || lun->pr_res_type != type) { 8423 mtx_unlock(&lun->lun_lock); 8424 free(ctsio->kern_data_ptr, M_CTL); 8425 ctl_set_reservation_conflict(ctsio); 8426 ctl_done((union ctl_io *)ctsio); 8427 return (CTL_RETVAL_COMPLETE); 8428 } 8429 mtx_unlock(&lun->lun_lock); 8430 } else /* create a reservation */ { 8431 /* 8432 * If it's not an "all registrants" type record 8433 * reservation holder 8434 */ 8435 if (type != SPR_TYPE_WR_EX_AR 8436 && type != SPR_TYPE_EX_AC_AR) 8437 lun->pr_res_idx = residx; /* Res holder */ 8438 else 8439 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8440 8441 lun->flags |= CTL_LUN_PR_RESERVED; 8442 lun->pr_res_type = type; 8443 8444 mtx_unlock(&lun->lun_lock); 8445 8446 /* send msg to other side */ 8447 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8448 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8449 persis_io.pr.pr_info.action = CTL_PR_RESERVE; 8450 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8451 persis_io.pr.pr_info.res_type = type; 8452 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8453 sizeof(persis_io.pr), M_WAITOK); 8454 } 8455 break; 8456 8457 case SPRO_RELEASE: 8458 mtx_lock(&lun->lun_lock); 8459 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) { 8460 /* No reservation exists return good status */ 8461 mtx_unlock(&lun->lun_lock); 8462 goto done; 8463 } 8464 /* 8465 * Is this nexus a reservation holder? 8466 */ 8467 if (lun->pr_res_idx != residx 8468 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 8469 /* 8470 * not a res holder return good status but 8471 * do nothing 8472 */ 8473 mtx_unlock(&lun->lun_lock); 8474 goto done; 8475 } 8476 8477 if (lun->pr_res_type != type) { 8478 mtx_unlock(&lun->lun_lock); 8479 free(ctsio->kern_data_ptr, M_CTL); 8480 ctl_set_illegal_pr_release(ctsio); 8481 ctl_done((union ctl_io *)ctsio); 8482 return (CTL_RETVAL_COMPLETE); 8483 } 8484 8485 /* okay to release */ 8486 lun->flags &= ~CTL_LUN_PR_RESERVED; 8487 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8488 lun->pr_res_type = 0; 8489 8490 /* 8491 * If this isn't an exclusive access reservation and NUAR 8492 * is not set, generate UA for all other registrants. 8493 */ 8494 if (type != SPR_TYPE_EX_AC && type != SPR_TYPE_WR_EX && 8495 (lun->MODE_CTRL.queue_flags & SCP_NUAR) == 0) { 8496 for (i = softc->init_min; i < softc->init_max; i++) { 8497 if (i == residx || ctl_get_prkey(lun, i) == 0) 8498 continue; 8499 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8500 } 8501 } 8502 mtx_unlock(&lun->lun_lock); 8503 8504 /* Send msg to other side */ 8505 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8506 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8507 persis_io.pr.pr_info.action = CTL_PR_RELEASE; 8508 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8509 sizeof(persis_io.pr), M_WAITOK); 8510 break; 8511 8512 case SPRO_CLEAR: 8513 /* send msg to other side */ 8514 8515 mtx_lock(&lun->lun_lock); 8516 lun->flags &= ~CTL_LUN_PR_RESERVED; 8517 lun->pr_res_type = 0; 8518 lun->pr_key_count = 0; 8519 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8520 8521 ctl_clr_prkey(lun, residx); 8522 for (i = 0; i < CTL_MAX_INITIATORS; i++) 8523 if (ctl_get_prkey(lun, i) != 0) { 8524 ctl_clr_prkey(lun, i); 8525 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8526 } 8527 lun->pr_generation++; 8528 mtx_unlock(&lun->lun_lock); 8529 8530 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8531 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8532 persis_io.pr.pr_info.action = CTL_PR_CLEAR; 8533 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8534 sizeof(persis_io.pr), M_WAITOK); 8535 break; 8536 8537 case SPRO_PREEMPT: 8538 case SPRO_PRE_ABO: { 8539 int nretval; 8540 8541 nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type, 8542 residx, ctsio, cdb, param); 8543 if (nretval != 0) 8544 return (CTL_RETVAL_COMPLETE); 8545 break; 8546 } 8547 default: 8548 panic("%s: Invalid PR type %#x", __func__, cdb->action); 8549 } 8550 8551 done: 8552 free(ctsio->kern_data_ptr, M_CTL); 8553 ctl_set_success(ctsio); 8554 ctl_done((union ctl_io *)ctsio); 8555 8556 return (retval); 8557 } 8558 8559 /* 8560 * This routine is for handling a message from the other SC pertaining to 8561 * persistent reserve out. All the error checking will have been done 8562 * so only perorming the action need be done here to keep the two 8563 * in sync. 8564 */ 8565 static void 8566 ctl_hndl_per_res_out_on_other_sc(union ctl_io *io) 8567 { 8568 struct ctl_softc *softc = CTL_SOFTC(io); 8569 union ctl_ha_msg *msg = (union ctl_ha_msg *)&io->presio.pr_msg; 8570 struct ctl_lun *lun; 8571 int i; 8572 uint32_t residx, targ_lun; 8573 8574 targ_lun = msg->hdr.nexus.targ_mapped_lun; 8575 mtx_lock(&softc->ctl_lock); 8576 if (targ_lun >= ctl_max_luns || 8577 (lun = softc->ctl_luns[targ_lun]) == NULL) { 8578 mtx_unlock(&softc->ctl_lock); 8579 return; 8580 } 8581 mtx_lock(&lun->lun_lock); 8582 mtx_unlock(&softc->ctl_lock); 8583 if (lun->flags & CTL_LUN_DISABLED) { 8584 mtx_unlock(&lun->lun_lock); 8585 return; 8586 } 8587 residx = ctl_get_initindex(&msg->hdr.nexus); 8588 switch(msg->pr.pr_info.action) { 8589 case CTL_PR_REG_KEY: 8590 ctl_alloc_prkey(lun, msg->pr.pr_info.residx); 8591 if (ctl_get_prkey(lun, msg->pr.pr_info.residx) == 0) 8592 lun->pr_key_count++; 8593 ctl_set_prkey(lun, msg->pr.pr_info.residx, 8594 scsi_8btou64(msg->pr.pr_info.sa_res_key)); 8595 lun->pr_generation++; 8596 break; 8597 8598 case CTL_PR_UNREG_KEY: 8599 ctl_clr_prkey(lun, msg->pr.pr_info.residx); 8600 lun->pr_key_count--; 8601 8602 /* XXX Need to see if the reservation has been released */ 8603 /* if so do we need to generate UA? */ 8604 if (msg->pr.pr_info.residx == lun->pr_res_idx) { 8605 lun->flags &= ~CTL_LUN_PR_RESERVED; 8606 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8607 8608 if ((lun->pr_res_type == SPR_TYPE_WR_EX_RO || 8609 lun->pr_res_type == SPR_TYPE_EX_AC_RO) && 8610 lun->pr_key_count) { 8611 /* 8612 * If the reservation is a registrants 8613 * only type we need to generate a UA 8614 * for other registered inits. The 8615 * sense code should be RESERVATIONS 8616 * RELEASED 8617 */ 8618 8619 for (i = softc->init_min; i < softc->init_max; i++) { 8620 if (ctl_get_prkey(lun, i) == 0) 8621 continue; 8622 8623 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8624 } 8625 } 8626 lun->pr_res_type = 0; 8627 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8628 if (lun->pr_key_count==0) { 8629 lun->flags &= ~CTL_LUN_PR_RESERVED; 8630 lun->pr_res_type = 0; 8631 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8632 } 8633 } 8634 lun->pr_generation++; 8635 break; 8636 8637 case CTL_PR_RESERVE: 8638 lun->flags |= CTL_LUN_PR_RESERVED; 8639 lun->pr_res_type = msg->pr.pr_info.res_type; 8640 lun->pr_res_idx = msg->pr.pr_info.residx; 8641 8642 break; 8643 8644 case CTL_PR_RELEASE: 8645 /* 8646 * If this isn't an exclusive access reservation and NUAR 8647 * is not set, generate UA for all other registrants. 8648 */ 8649 if (lun->pr_res_type != SPR_TYPE_EX_AC && 8650 lun->pr_res_type != SPR_TYPE_WR_EX && 8651 (lun->MODE_CTRL.queue_flags & SCP_NUAR) == 0) { 8652 for (i = softc->init_min; i < softc->init_max; i++) { 8653 if (i == residx || ctl_get_prkey(lun, i) == 0) 8654 continue; 8655 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8656 } 8657 } 8658 8659 lun->flags &= ~CTL_LUN_PR_RESERVED; 8660 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8661 lun->pr_res_type = 0; 8662 break; 8663 8664 case CTL_PR_PREEMPT: 8665 ctl_pro_preempt_other(lun, msg); 8666 break; 8667 case CTL_PR_CLEAR: 8668 lun->flags &= ~CTL_LUN_PR_RESERVED; 8669 lun->pr_res_type = 0; 8670 lun->pr_key_count = 0; 8671 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8672 8673 for (i=0; i < CTL_MAX_INITIATORS; i++) { 8674 if (ctl_get_prkey(lun, i) == 0) 8675 continue; 8676 ctl_clr_prkey(lun, i); 8677 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8678 } 8679 lun->pr_generation++; 8680 break; 8681 } 8682 8683 mtx_unlock(&lun->lun_lock); 8684 } 8685 8686 int 8687 ctl_read_write(struct ctl_scsiio *ctsio) 8688 { 8689 struct ctl_lun *lun = CTL_LUN(ctsio); 8690 struct ctl_lba_len_flags *lbalen; 8691 uint64_t lba; 8692 uint32_t num_blocks; 8693 int flags, retval; 8694 int isread; 8695 8696 CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0])); 8697 8698 flags = 0; 8699 isread = ctsio->cdb[0] == READ_6 || ctsio->cdb[0] == READ_10 8700 || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16; 8701 switch (ctsio->cdb[0]) { 8702 case READ_6: 8703 case WRITE_6: { 8704 struct scsi_rw_6 *cdb; 8705 8706 cdb = (struct scsi_rw_6 *)ctsio->cdb; 8707 8708 lba = scsi_3btoul(cdb->addr); 8709 /* only 5 bits are valid in the most significant address byte */ 8710 lba &= 0x1fffff; 8711 num_blocks = cdb->length; 8712 /* 8713 * This is correct according to SBC-2. 8714 */ 8715 if (num_blocks == 0) 8716 num_blocks = 256; 8717 break; 8718 } 8719 case READ_10: 8720 case WRITE_10: { 8721 struct scsi_rw_10 *cdb; 8722 8723 cdb = (struct scsi_rw_10 *)ctsio->cdb; 8724 if (cdb->byte2 & SRW10_FUA) 8725 flags |= CTL_LLF_FUA; 8726 if (cdb->byte2 & SRW10_DPO) 8727 flags |= CTL_LLF_DPO; 8728 lba = scsi_4btoul(cdb->addr); 8729 num_blocks = scsi_2btoul(cdb->length); 8730 break; 8731 } 8732 case WRITE_VERIFY_10: { 8733 struct scsi_write_verify_10 *cdb; 8734 8735 cdb = (struct scsi_write_verify_10 *)ctsio->cdb; 8736 flags |= CTL_LLF_FUA; 8737 if (cdb->byte2 & SWV_DPO) 8738 flags |= CTL_LLF_DPO; 8739 lba = scsi_4btoul(cdb->addr); 8740 num_blocks = scsi_2btoul(cdb->length); 8741 break; 8742 } 8743 case READ_12: 8744 case WRITE_12: { 8745 struct scsi_rw_12 *cdb; 8746 8747 cdb = (struct scsi_rw_12 *)ctsio->cdb; 8748 if (cdb->byte2 & SRW12_FUA) 8749 flags |= CTL_LLF_FUA; 8750 if (cdb->byte2 & SRW12_DPO) 8751 flags |= CTL_LLF_DPO; 8752 lba = scsi_4btoul(cdb->addr); 8753 num_blocks = scsi_4btoul(cdb->length); 8754 break; 8755 } 8756 case WRITE_VERIFY_12: { 8757 struct scsi_write_verify_12 *cdb; 8758 8759 cdb = (struct scsi_write_verify_12 *)ctsio->cdb; 8760 flags |= CTL_LLF_FUA; 8761 if (cdb->byte2 & SWV_DPO) 8762 flags |= CTL_LLF_DPO; 8763 lba = scsi_4btoul(cdb->addr); 8764 num_blocks = scsi_4btoul(cdb->length); 8765 break; 8766 } 8767 case READ_16: 8768 case WRITE_16: { 8769 struct scsi_rw_16 *cdb; 8770 8771 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8772 if (cdb->byte2 & SRW12_FUA) 8773 flags |= CTL_LLF_FUA; 8774 if (cdb->byte2 & SRW12_DPO) 8775 flags |= CTL_LLF_DPO; 8776 lba = scsi_8btou64(cdb->addr); 8777 num_blocks = scsi_4btoul(cdb->length); 8778 break; 8779 } 8780 case WRITE_ATOMIC_16: { 8781 struct scsi_write_atomic_16 *cdb; 8782 8783 if (lun->be_lun->atomicblock == 0) { 8784 ctl_set_invalid_opcode(ctsio); 8785 ctl_done((union ctl_io *)ctsio); 8786 return (CTL_RETVAL_COMPLETE); 8787 } 8788 8789 cdb = (struct scsi_write_atomic_16 *)ctsio->cdb; 8790 if (cdb->byte2 & SRW12_FUA) 8791 flags |= CTL_LLF_FUA; 8792 if (cdb->byte2 & SRW12_DPO) 8793 flags |= CTL_LLF_DPO; 8794 lba = scsi_8btou64(cdb->addr); 8795 num_blocks = scsi_2btoul(cdb->length); 8796 if (num_blocks > lun->be_lun->atomicblock) { 8797 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 8798 /*command*/ 1, /*field*/ 12, /*bit_valid*/ 0, 8799 /*bit*/ 0); 8800 ctl_done((union ctl_io *)ctsio); 8801 return (CTL_RETVAL_COMPLETE); 8802 } 8803 break; 8804 } 8805 case WRITE_VERIFY_16: { 8806 struct scsi_write_verify_16 *cdb; 8807 8808 cdb = (struct scsi_write_verify_16 *)ctsio->cdb; 8809 flags |= CTL_LLF_FUA; 8810 if (cdb->byte2 & SWV_DPO) 8811 flags |= CTL_LLF_DPO; 8812 lba = scsi_8btou64(cdb->addr); 8813 num_blocks = scsi_4btoul(cdb->length); 8814 break; 8815 } 8816 default: 8817 /* 8818 * We got a command we don't support. This shouldn't 8819 * happen, commands should be filtered out above us. 8820 */ 8821 ctl_set_invalid_opcode(ctsio); 8822 ctl_done((union ctl_io *)ctsio); 8823 8824 return (CTL_RETVAL_COMPLETE); 8825 break; /* NOTREACHED */ 8826 } 8827 8828 /* 8829 * The first check is to make sure we're in bounds, the second 8830 * check is to catch wrap-around problems. If the lba + num blocks 8831 * is less than the lba, then we've wrapped around and the block 8832 * range is invalid anyway. 8833 */ 8834 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8835 || ((lba + num_blocks) < lba)) { 8836 ctl_set_lba_out_of_range(ctsio, 8837 MAX(lba, lun->be_lun->maxlba + 1)); 8838 ctl_done((union ctl_io *)ctsio); 8839 return (CTL_RETVAL_COMPLETE); 8840 } 8841 8842 /* 8843 * According to SBC-3, a transfer length of 0 is not an error. 8844 * Note that this cannot happen with WRITE(6) or READ(6), since 0 8845 * translates to 256 blocks for those commands. 8846 */ 8847 if (num_blocks == 0) { 8848 ctl_set_success(ctsio); 8849 ctl_done((union ctl_io *)ctsio); 8850 return (CTL_RETVAL_COMPLETE); 8851 } 8852 8853 /* Set FUA and/or DPO if caches are disabled. */ 8854 if (isread) { 8855 if ((lun->MODE_CACHING.flags1 & SCP_RCD) != 0) 8856 flags |= CTL_LLF_FUA | CTL_LLF_DPO; 8857 } else { 8858 if ((lun->MODE_CACHING.flags1 & SCP_WCE) == 0) 8859 flags |= CTL_LLF_FUA; 8860 } 8861 8862 lbalen = (struct ctl_lba_len_flags *) 8863 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8864 lbalen->lba = lba; 8865 lbalen->len = num_blocks; 8866 lbalen->flags = (isread ? CTL_LLF_READ : CTL_LLF_WRITE) | flags; 8867 8868 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 8869 ctsio->kern_rel_offset = 0; 8870 8871 CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n")); 8872 8873 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8874 return (retval); 8875 } 8876 8877 static int 8878 ctl_cnw_cont(union ctl_io *io) 8879 { 8880 struct ctl_lun *lun = CTL_LUN(io); 8881 struct ctl_scsiio *ctsio; 8882 struct ctl_lba_len_flags *lbalen; 8883 int retval; 8884 8885 ctsio = &io->scsiio; 8886 ctsio->io_hdr.status = CTL_STATUS_NONE; 8887 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT; 8888 lbalen = (struct ctl_lba_len_flags *) 8889 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8890 lbalen->flags &= ~CTL_LLF_COMPARE; 8891 lbalen->flags |= CTL_LLF_WRITE; 8892 8893 CTL_DEBUG_PRINT(("ctl_cnw_cont: calling data_submit()\n")); 8894 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8895 return (retval); 8896 } 8897 8898 int 8899 ctl_cnw(struct ctl_scsiio *ctsio) 8900 { 8901 struct ctl_lun *lun = CTL_LUN(ctsio); 8902 struct ctl_lba_len_flags *lbalen; 8903 uint64_t lba; 8904 uint32_t num_blocks; 8905 int flags, retval; 8906 8907 CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0])); 8908 8909 flags = 0; 8910 switch (ctsio->cdb[0]) { 8911 case COMPARE_AND_WRITE: { 8912 struct scsi_compare_and_write *cdb; 8913 8914 cdb = (struct scsi_compare_and_write *)ctsio->cdb; 8915 if (cdb->byte2 & SRW10_FUA) 8916 flags |= CTL_LLF_FUA; 8917 if (cdb->byte2 & SRW10_DPO) 8918 flags |= CTL_LLF_DPO; 8919 lba = scsi_8btou64(cdb->addr); 8920 num_blocks = cdb->length; 8921 break; 8922 } 8923 default: 8924 /* 8925 * We got a command we don't support. This shouldn't 8926 * happen, commands should be filtered out above us. 8927 */ 8928 ctl_set_invalid_opcode(ctsio); 8929 ctl_done((union ctl_io *)ctsio); 8930 8931 return (CTL_RETVAL_COMPLETE); 8932 break; /* NOTREACHED */ 8933 } 8934 8935 /* 8936 * The first check is to make sure we're in bounds, the second 8937 * check is to catch wrap-around problems. If the lba + num blocks 8938 * is less than the lba, then we've wrapped around and the block 8939 * range is invalid anyway. 8940 */ 8941 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8942 || ((lba + num_blocks) < lba)) { 8943 ctl_set_lba_out_of_range(ctsio, 8944 MAX(lba, lun->be_lun->maxlba + 1)); 8945 ctl_done((union ctl_io *)ctsio); 8946 return (CTL_RETVAL_COMPLETE); 8947 } 8948 8949 /* 8950 * According to SBC-3, a transfer length of 0 is not an error. 8951 */ 8952 if (num_blocks == 0) { 8953 ctl_set_success(ctsio); 8954 ctl_done((union ctl_io *)ctsio); 8955 return (CTL_RETVAL_COMPLETE); 8956 } 8957 8958 /* Set FUA if write cache is disabled. */ 8959 if ((lun->MODE_CACHING.flags1 & SCP_WCE) == 0) 8960 flags |= CTL_LLF_FUA; 8961 8962 ctsio->kern_total_len = 2 * num_blocks * lun->be_lun->blocksize; 8963 ctsio->kern_rel_offset = 0; 8964 8965 /* 8966 * Set the IO_CONT flag, so that if this I/O gets passed to 8967 * ctl_data_submit_done(), it'll get passed back to 8968 * ctl_ctl_cnw_cont() for further processing. 8969 */ 8970 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 8971 ctsio->io_cont = ctl_cnw_cont; 8972 8973 lbalen = (struct ctl_lba_len_flags *) 8974 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8975 lbalen->lba = lba; 8976 lbalen->len = num_blocks; 8977 lbalen->flags = CTL_LLF_COMPARE | flags; 8978 8979 CTL_DEBUG_PRINT(("ctl_cnw: calling data_submit()\n")); 8980 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8981 return (retval); 8982 } 8983 8984 int 8985 ctl_verify(struct ctl_scsiio *ctsio) 8986 { 8987 struct ctl_lun *lun = CTL_LUN(ctsio); 8988 struct ctl_lba_len_flags *lbalen; 8989 uint64_t lba; 8990 uint32_t num_blocks; 8991 int bytchk, flags; 8992 int retval; 8993 8994 CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0])); 8995 8996 bytchk = 0; 8997 flags = CTL_LLF_FUA; 8998 switch (ctsio->cdb[0]) { 8999 case VERIFY_10: { 9000 struct scsi_verify_10 *cdb; 9001 9002 cdb = (struct scsi_verify_10 *)ctsio->cdb; 9003 if (cdb->byte2 & SVFY_BYTCHK) 9004 bytchk = 1; 9005 if (cdb->byte2 & SVFY_DPO) 9006 flags |= CTL_LLF_DPO; 9007 lba = scsi_4btoul(cdb->addr); 9008 num_blocks = scsi_2btoul(cdb->length); 9009 break; 9010 } 9011 case VERIFY_12: { 9012 struct scsi_verify_12 *cdb; 9013 9014 cdb = (struct scsi_verify_12 *)ctsio->cdb; 9015 if (cdb->byte2 & SVFY_BYTCHK) 9016 bytchk = 1; 9017 if (cdb->byte2 & SVFY_DPO) 9018 flags |= CTL_LLF_DPO; 9019 lba = scsi_4btoul(cdb->addr); 9020 num_blocks = scsi_4btoul(cdb->length); 9021 break; 9022 } 9023 case VERIFY_16: { 9024 struct scsi_rw_16 *cdb; 9025 9026 cdb = (struct scsi_rw_16 *)ctsio->cdb; 9027 if (cdb->byte2 & SVFY_BYTCHK) 9028 bytchk = 1; 9029 if (cdb->byte2 & SVFY_DPO) 9030 flags |= CTL_LLF_DPO; 9031 lba = scsi_8btou64(cdb->addr); 9032 num_blocks = scsi_4btoul(cdb->length); 9033 break; 9034 } 9035 default: 9036 /* 9037 * We got a command we don't support. This shouldn't 9038 * happen, commands should be filtered out above us. 9039 */ 9040 ctl_set_invalid_opcode(ctsio); 9041 ctl_done((union ctl_io *)ctsio); 9042 return (CTL_RETVAL_COMPLETE); 9043 } 9044 9045 /* 9046 * The first check is to make sure we're in bounds, the second 9047 * check is to catch wrap-around problems. If the lba + num blocks 9048 * is less than the lba, then we've wrapped around and the block 9049 * range is invalid anyway. 9050 */ 9051 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 9052 || ((lba + num_blocks) < lba)) { 9053 ctl_set_lba_out_of_range(ctsio, 9054 MAX(lba, lun->be_lun->maxlba + 1)); 9055 ctl_done((union ctl_io *)ctsio); 9056 return (CTL_RETVAL_COMPLETE); 9057 } 9058 9059 /* 9060 * According to SBC-3, a transfer length of 0 is not an error. 9061 */ 9062 if (num_blocks == 0) { 9063 ctl_set_success(ctsio); 9064 ctl_done((union ctl_io *)ctsio); 9065 return (CTL_RETVAL_COMPLETE); 9066 } 9067 9068 lbalen = (struct ctl_lba_len_flags *) 9069 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 9070 lbalen->lba = lba; 9071 lbalen->len = num_blocks; 9072 if (bytchk) { 9073 lbalen->flags = CTL_LLF_COMPARE | flags; 9074 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 9075 } else { 9076 lbalen->flags = CTL_LLF_VERIFY | flags; 9077 ctsio->kern_total_len = 0; 9078 } 9079 ctsio->kern_rel_offset = 0; 9080 9081 CTL_DEBUG_PRINT(("ctl_verify: calling data_submit()\n")); 9082 retval = lun->backend->data_submit((union ctl_io *)ctsio); 9083 return (retval); 9084 } 9085 9086 int 9087 ctl_report_luns(struct ctl_scsiio *ctsio) 9088 { 9089 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9090 struct ctl_port *port = CTL_PORT(ctsio); 9091 struct ctl_lun *lun, *request_lun = CTL_LUN(ctsio); 9092 struct scsi_report_luns *cdb; 9093 struct scsi_report_luns_data *lun_data; 9094 int num_filled, num_luns, num_port_luns, retval; 9095 uint32_t alloc_len, lun_datalen; 9096 uint32_t initidx, targ_lun_id, lun_id; 9097 9098 retval = CTL_RETVAL_COMPLETE; 9099 cdb = (struct scsi_report_luns *)ctsio->cdb; 9100 9101 CTL_DEBUG_PRINT(("ctl_report_luns\n")); 9102 9103 num_luns = 0; 9104 num_port_luns = port->lun_map ? port->lun_map_size : ctl_max_luns; 9105 mtx_lock(&softc->ctl_lock); 9106 for (targ_lun_id = 0; targ_lun_id < num_port_luns; targ_lun_id++) { 9107 if (ctl_lun_map_from_port(port, targ_lun_id) != UINT32_MAX) 9108 num_luns++; 9109 } 9110 mtx_unlock(&softc->ctl_lock); 9111 9112 switch (cdb->select_report) { 9113 case RPL_REPORT_DEFAULT: 9114 case RPL_REPORT_ALL: 9115 case RPL_REPORT_NONSUBSID: 9116 break; 9117 case RPL_REPORT_WELLKNOWN: 9118 case RPL_REPORT_ADMIN: 9119 case RPL_REPORT_CONGLOM: 9120 num_luns = 0; 9121 break; 9122 default: 9123 ctl_set_invalid_field(ctsio, 9124 /*sks_valid*/ 1, 9125 /*command*/ 1, 9126 /*field*/ 2, 9127 /*bit_valid*/ 0, 9128 /*bit*/ 0); 9129 ctl_done((union ctl_io *)ctsio); 9130 return (retval); 9131 break; /* NOTREACHED */ 9132 } 9133 9134 alloc_len = scsi_4btoul(cdb->length); 9135 /* 9136 * The initiator has to allocate at least 16 bytes for this request, 9137 * so he can at least get the header and the first LUN. Otherwise 9138 * we reject the request (per SPC-3 rev 14, section 6.21). 9139 */ 9140 if (alloc_len < (sizeof(struct scsi_report_luns_data) + 9141 sizeof(struct scsi_report_luns_lundata))) { 9142 ctl_set_invalid_field(ctsio, 9143 /*sks_valid*/ 1, 9144 /*command*/ 1, 9145 /*field*/ 6, 9146 /*bit_valid*/ 0, 9147 /*bit*/ 0); 9148 ctl_done((union ctl_io *)ctsio); 9149 return (retval); 9150 } 9151 9152 lun_datalen = sizeof(*lun_data) + 9153 (num_luns * sizeof(struct scsi_report_luns_lundata)); 9154 9155 ctsio->kern_data_ptr = malloc(lun_datalen, M_CTL, M_WAITOK | M_ZERO); 9156 lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr; 9157 ctsio->kern_sg_entries = 0; 9158 9159 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9160 9161 mtx_lock(&softc->ctl_lock); 9162 for (targ_lun_id = 0, num_filled = 0; 9163 targ_lun_id < num_port_luns && num_filled < num_luns; 9164 targ_lun_id++) { 9165 lun_id = ctl_lun_map_from_port(port, targ_lun_id); 9166 if (lun_id == UINT32_MAX) 9167 continue; 9168 lun = softc->ctl_luns[lun_id]; 9169 if (lun == NULL) 9170 continue; 9171 9172 be64enc(lun_data->luns[num_filled++].lundata, 9173 ctl_encode_lun(targ_lun_id)); 9174 9175 /* 9176 * According to SPC-3, rev 14 section 6.21: 9177 * 9178 * "The execution of a REPORT LUNS command to any valid and 9179 * installed logical unit shall clear the REPORTED LUNS DATA 9180 * HAS CHANGED unit attention condition for all logical 9181 * units of that target with respect to the requesting 9182 * initiator. A valid and installed logical unit is one 9183 * having a PERIPHERAL QUALIFIER of 000b in the standard 9184 * INQUIRY data (see 6.4.2)." 9185 * 9186 * If request_lun is NULL, the LUN this report luns command 9187 * was issued to is either disabled or doesn't exist. In that 9188 * case, we shouldn't clear any pending lun change unit 9189 * attention. 9190 */ 9191 if (request_lun != NULL) { 9192 mtx_lock(&lun->lun_lock); 9193 ctl_clr_ua(lun, initidx, CTL_UA_LUN_CHANGE); 9194 mtx_unlock(&lun->lun_lock); 9195 } 9196 } 9197 mtx_unlock(&softc->ctl_lock); 9198 9199 /* 9200 * It's quite possible that we've returned fewer LUNs than we allocated 9201 * space for. Trim it. 9202 */ 9203 lun_datalen = sizeof(*lun_data) + 9204 (num_filled * sizeof(struct scsi_report_luns_lundata)); 9205 ctsio->kern_rel_offset = 0; 9206 ctsio->kern_sg_entries = 0; 9207 ctsio->kern_data_len = min(lun_datalen, alloc_len); 9208 ctsio->kern_total_len = ctsio->kern_data_len; 9209 9210 /* 9211 * We set this to the actual data length, regardless of how much 9212 * space we actually have to return results. If the user looks at 9213 * this value, he'll know whether or not he allocated enough space 9214 * and reissue the command if necessary. We don't support well 9215 * known logical units, so if the user asks for that, return none. 9216 */ 9217 scsi_ulto4b(lun_datalen - 8, lun_data->length); 9218 9219 /* 9220 * We can only return SCSI_STATUS_CHECK_COND when we can't satisfy 9221 * this request. 9222 */ 9223 ctl_set_success(ctsio); 9224 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9225 ctsio->be_move_done = ctl_config_move_done; 9226 ctl_datamove((union ctl_io *)ctsio); 9227 return (retval); 9228 } 9229 9230 int 9231 ctl_request_sense(struct ctl_scsiio *ctsio) 9232 { 9233 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9234 struct ctl_lun *lun = CTL_LUN(ctsio); 9235 struct scsi_request_sense *cdb; 9236 struct scsi_sense_data *sense_ptr, *ps; 9237 uint32_t initidx; 9238 int have_error; 9239 u_int sense_len = SSD_FULL_SIZE; 9240 scsi_sense_data_type sense_format; 9241 ctl_ua_type ua_type; 9242 uint8_t asc = 0, ascq = 0; 9243 9244 cdb = (struct scsi_request_sense *)ctsio->cdb; 9245 9246 CTL_DEBUG_PRINT(("ctl_request_sense\n")); 9247 9248 /* 9249 * Determine which sense format the user wants. 9250 */ 9251 if (cdb->byte2 & SRS_DESC) 9252 sense_format = SSD_TYPE_DESC; 9253 else 9254 sense_format = SSD_TYPE_FIXED; 9255 9256 ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK); 9257 sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr; 9258 ctsio->kern_sg_entries = 0; 9259 ctsio->kern_rel_offset = 0; 9260 9261 /* 9262 * struct scsi_sense_data, which is currently set to 256 bytes, is 9263 * larger than the largest allowed value for the length field in the 9264 * REQUEST SENSE CDB, which is 252 bytes as of SPC-4. 9265 */ 9266 ctsio->kern_data_len = cdb->length; 9267 ctsio->kern_total_len = cdb->length; 9268 9269 /* 9270 * If we don't have a LUN, we don't have any pending sense. 9271 */ 9272 if (lun == NULL || 9273 ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 9274 softc->ha_link < CTL_HA_LINK_UNKNOWN)) { 9275 /* "Logical unit not supported" */ 9276 ctl_set_sense_data(sense_ptr, &sense_len, NULL, sense_format, 9277 /*current_error*/ 1, 9278 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 9279 /*asc*/ 0x25, 9280 /*ascq*/ 0x00, 9281 SSD_ELEM_NONE); 9282 goto send; 9283 } 9284 9285 have_error = 0; 9286 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9287 /* 9288 * Check for pending sense, and then for pending unit attentions. 9289 * Pending sense gets returned first, then pending unit attentions. 9290 */ 9291 mtx_lock(&lun->lun_lock); 9292 ps = lun->pending_sense[initidx / CTL_MAX_INIT_PER_PORT]; 9293 if (ps != NULL) 9294 ps += initidx % CTL_MAX_INIT_PER_PORT; 9295 if (ps != NULL && ps->error_code != 0) { 9296 scsi_sense_data_type stored_format; 9297 9298 /* 9299 * Check to see which sense format was used for the stored 9300 * sense data. 9301 */ 9302 stored_format = scsi_sense_type(ps); 9303 9304 /* 9305 * If the user requested a different sense format than the 9306 * one we stored, then we need to convert it to the other 9307 * format. If we're going from descriptor to fixed format 9308 * sense data, we may lose things in translation, depending 9309 * on what options were used. 9310 * 9311 * If the stored format is SSD_TYPE_NONE (i.e. invalid), 9312 * for some reason we'll just copy it out as-is. 9313 */ 9314 if ((stored_format == SSD_TYPE_FIXED) 9315 && (sense_format == SSD_TYPE_DESC)) 9316 ctl_sense_to_desc((struct scsi_sense_data_fixed *) 9317 ps, (struct scsi_sense_data_desc *)sense_ptr); 9318 else if ((stored_format == SSD_TYPE_DESC) 9319 && (sense_format == SSD_TYPE_FIXED)) 9320 ctl_sense_to_fixed((struct scsi_sense_data_desc *) 9321 ps, (struct scsi_sense_data_fixed *)sense_ptr); 9322 else 9323 memcpy(sense_ptr, ps, sizeof(*sense_ptr)); 9324 9325 ps->error_code = 0; 9326 have_error = 1; 9327 } else { 9328 ua_type = ctl_build_ua(lun, initidx, sense_ptr, &sense_len, 9329 sense_format); 9330 if (ua_type != CTL_UA_NONE) 9331 have_error = 1; 9332 } 9333 if (have_error == 0) { 9334 /* 9335 * Report informational exception if have one and allowed. 9336 */ 9337 if (lun->MODE_IE.mrie != SIEP_MRIE_NO) { 9338 asc = lun->ie_asc; 9339 ascq = lun->ie_ascq; 9340 } 9341 ctl_set_sense_data(sense_ptr, &sense_len, lun, sense_format, 9342 /*current_error*/ 1, 9343 /*sense_key*/ SSD_KEY_NO_SENSE, 9344 /*asc*/ asc, 9345 /*ascq*/ ascq, 9346 SSD_ELEM_NONE); 9347 } 9348 mtx_unlock(&lun->lun_lock); 9349 9350 send: 9351 /* 9352 * We report the SCSI status as OK, since the status of the command 9353 * itself is OK. We're reporting sense as parameter data. 9354 */ 9355 ctl_set_success(ctsio); 9356 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9357 ctsio->be_move_done = ctl_config_move_done; 9358 ctl_datamove((union ctl_io *)ctsio); 9359 return (CTL_RETVAL_COMPLETE); 9360 } 9361 9362 int 9363 ctl_tur(struct ctl_scsiio *ctsio) 9364 { 9365 9366 CTL_DEBUG_PRINT(("ctl_tur\n")); 9367 9368 ctl_set_success(ctsio); 9369 ctl_done((union ctl_io *)ctsio); 9370 9371 return (CTL_RETVAL_COMPLETE); 9372 } 9373 9374 /* 9375 * SCSI VPD page 0x00, the Supported VPD Pages page. 9376 */ 9377 static int 9378 ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len) 9379 { 9380 struct ctl_lun *lun = CTL_LUN(ctsio); 9381 struct scsi_vpd_supported_pages *pages; 9382 int sup_page_size; 9383 int p; 9384 9385 sup_page_size = sizeof(struct scsi_vpd_supported_pages) * 9386 SCSI_EVPD_NUM_SUPPORTED_PAGES; 9387 ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO); 9388 pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr; 9389 ctsio->kern_rel_offset = 0; 9390 ctsio->kern_sg_entries = 0; 9391 ctsio->kern_data_len = min(sup_page_size, alloc_len); 9392 ctsio->kern_total_len = ctsio->kern_data_len; 9393 9394 /* 9395 * The control device is always connected. The disk device, on the 9396 * other hand, may not be online all the time. Need to change this 9397 * to figure out whether the disk device is actually online or not. 9398 */ 9399 if (lun != NULL) 9400 pages->device = (SID_QUAL_LU_CONNECTED << 5) | 9401 lun->be_lun->lun_type; 9402 else 9403 pages->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9404 9405 p = 0; 9406 /* Supported VPD pages */ 9407 pages->page_list[p++] = SVPD_SUPPORTED_PAGES; 9408 /* Serial Number */ 9409 pages->page_list[p++] = SVPD_UNIT_SERIAL_NUMBER; 9410 /* Device Identification */ 9411 pages->page_list[p++] = SVPD_DEVICE_ID; 9412 /* Extended INQUIRY Data */ 9413 pages->page_list[p++] = SVPD_EXTENDED_INQUIRY_DATA; 9414 /* Mode Page Policy */ 9415 pages->page_list[p++] = SVPD_MODE_PAGE_POLICY; 9416 /* SCSI Ports */ 9417 pages->page_list[p++] = SVPD_SCSI_PORTS; 9418 /* Third-party Copy */ 9419 pages->page_list[p++] = SVPD_SCSI_TPC; 9420 /* SCSI Feature Sets */ 9421 pages->page_list[p++] = SVPD_SCSI_SFS; 9422 if (lun != NULL && lun->be_lun->lun_type == T_DIRECT) { 9423 /* Block limits */ 9424 pages->page_list[p++] = SVPD_BLOCK_LIMITS; 9425 /* Block Device Characteristics */ 9426 pages->page_list[p++] = SVPD_BDC; 9427 /* Logical Block Provisioning */ 9428 pages->page_list[p++] = SVPD_LBP; 9429 } 9430 pages->length = p; 9431 9432 ctl_set_success(ctsio); 9433 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9434 ctsio->be_move_done = ctl_config_move_done; 9435 ctl_datamove((union ctl_io *)ctsio); 9436 return (CTL_RETVAL_COMPLETE); 9437 } 9438 9439 /* 9440 * SCSI VPD page 0x80, the Unit Serial Number page. 9441 */ 9442 static int 9443 ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len) 9444 { 9445 struct ctl_lun *lun = CTL_LUN(ctsio); 9446 struct scsi_vpd_unit_serial_number *sn_ptr; 9447 int data_len; 9448 9449 data_len = 4 + CTL_SN_LEN; 9450 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9451 sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr; 9452 ctsio->kern_rel_offset = 0; 9453 ctsio->kern_sg_entries = 0; 9454 ctsio->kern_data_len = min(data_len, alloc_len); 9455 ctsio->kern_total_len = ctsio->kern_data_len; 9456 9457 /* 9458 * The control device is always connected. The disk device, on the 9459 * other hand, may not be online all the time. Need to change this 9460 * to figure out whether the disk device is actually online or not. 9461 */ 9462 if (lun != NULL) 9463 sn_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9464 lun->be_lun->lun_type; 9465 else 9466 sn_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9467 9468 sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER; 9469 sn_ptr->length = CTL_SN_LEN; 9470 /* 9471 * If we don't have a LUN, we just leave the serial number as 9472 * all spaces. 9473 */ 9474 if (lun != NULL) { 9475 strncpy((char *)sn_ptr->serial_num, 9476 (char *)lun->be_lun->serial_num, CTL_SN_LEN); 9477 } else 9478 memset(sn_ptr->serial_num, 0x20, CTL_SN_LEN); 9479 9480 ctl_set_success(ctsio); 9481 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9482 ctsio->be_move_done = ctl_config_move_done; 9483 ctl_datamove((union ctl_io *)ctsio); 9484 return (CTL_RETVAL_COMPLETE); 9485 } 9486 9487 /* 9488 * SCSI VPD page 0x86, the Extended INQUIRY Data page. 9489 */ 9490 static int 9491 ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len) 9492 { 9493 struct ctl_lun *lun = CTL_LUN(ctsio); 9494 struct scsi_vpd_extended_inquiry_data *eid_ptr; 9495 int data_len; 9496 9497 data_len = sizeof(struct scsi_vpd_extended_inquiry_data); 9498 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9499 eid_ptr = (struct scsi_vpd_extended_inquiry_data *)ctsio->kern_data_ptr; 9500 ctsio->kern_sg_entries = 0; 9501 ctsio->kern_rel_offset = 0; 9502 ctsio->kern_data_len = min(data_len, alloc_len); 9503 ctsio->kern_total_len = ctsio->kern_data_len; 9504 9505 /* 9506 * The control device is always connected. The disk device, on the 9507 * other hand, may not be online all the time. 9508 */ 9509 if (lun != NULL) 9510 eid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9511 lun->be_lun->lun_type; 9512 else 9513 eid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9514 eid_ptr->page_code = SVPD_EXTENDED_INQUIRY_DATA; 9515 scsi_ulto2b(data_len - 4, eid_ptr->page_length); 9516 /* 9517 * We support head of queue, ordered and simple tags. 9518 */ 9519 eid_ptr->flags2 = SVPD_EID_HEADSUP | SVPD_EID_ORDSUP | SVPD_EID_SIMPSUP; 9520 /* 9521 * Volatile cache supported. 9522 */ 9523 eid_ptr->flags3 = SVPD_EID_V_SUP; 9524 9525 /* 9526 * This means that we clear the REPORTED LUNS DATA HAS CHANGED unit 9527 * attention for a particular IT nexus on all LUNs once we report 9528 * it to that nexus once. This bit is required as of SPC-4. 9529 */ 9530 eid_ptr->flags4 = SVPD_EID_LUICLR; 9531 9532 /* 9533 * We support revert to defaults (RTD) bit in MODE SELECT. 9534 */ 9535 eid_ptr->flags5 = SVPD_EID_RTD_SUP; 9536 9537 /* 9538 * XXX KDM in order to correctly answer this, we would need 9539 * information from the SIM to determine how much sense data it 9540 * can send. So this would really be a path inquiry field, most 9541 * likely. This can be set to a maximum of 252 according to SPC-4, 9542 * but the hardware may or may not be able to support that much. 9543 * 0 just means that the maximum sense data length is not reported. 9544 */ 9545 eid_ptr->max_sense_length = 0; 9546 9547 ctl_set_success(ctsio); 9548 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9549 ctsio->be_move_done = ctl_config_move_done; 9550 ctl_datamove((union ctl_io *)ctsio); 9551 return (CTL_RETVAL_COMPLETE); 9552 } 9553 9554 static int 9555 ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len) 9556 { 9557 struct ctl_lun *lun = CTL_LUN(ctsio); 9558 struct scsi_vpd_mode_page_policy *mpp_ptr; 9559 int data_len; 9560 9561 data_len = sizeof(struct scsi_vpd_mode_page_policy) + 9562 sizeof(struct scsi_vpd_mode_page_policy_descr); 9563 9564 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9565 mpp_ptr = (struct scsi_vpd_mode_page_policy *)ctsio->kern_data_ptr; 9566 ctsio->kern_rel_offset = 0; 9567 ctsio->kern_sg_entries = 0; 9568 ctsio->kern_data_len = min(data_len, alloc_len); 9569 ctsio->kern_total_len = ctsio->kern_data_len; 9570 9571 /* 9572 * The control device is always connected. The disk device, on the 9573 * other hand, may not be online all the time. 9574 */ 9575 if (lun != NULL) 9576 mpp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9577 lun->be_lun->lun_type; 9578 else 9579 mpp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9580 mpp_ptr->page_code = SVPD_MODE_PAGE_POLICY; 9581 scsi_ulto2b(data_len - 4, mpp_ptr->page_length); 9582 mpp_ptr->descr[0].page_code = 0x3f; 9583 mpp_ptr->descr[0].subpage_code = 0xff; 9584 mpp_ptr->descr[0].policy = SVPD_MPP_SHARED; 9585 9586 ctl_set_success(ctsio); 9587 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9588 ctsio->be_move_done = ctl_config_move_done; 9589 ctl_datamove((union ctl_io *)ctsio); 9590 return (CTL_RETVAL_COMPLETE); 9591 } 9592 9593 /* 9594 * SCSI VPD page 0x83, the Device Identification page. 9595 */ 9596 static int 9597 ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len) 9598 { 9599 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9600 struct ctl_port *port = CTL_PORT(ctsio); 9601 struct ctl_lun *lun = CTL_LUN(ctsio); 9602 struct scsi_vpd_device_id *devid_ptr; 9603 struct scsi_vpd_id_descriptor *desc; 9604 int data_len, g; 9605 uint8_t proto; 9606 9607 data_len = sizeof(struct scsi_vpd_device_id) + 9608 sizeof(struct scsi_vpd_id_descriptor) + 9609 sizeof(struct scsi_vpd_id_rel_trgt_port_id) + 9610 sizeof(struct scsi_vpd_id_descriptor) + 9611 sizeof(struct scsi_vpd_id_trgt_port_grp_id); 9612 if (lun && lun->lun_devid) 9613 data_len += lun->lun_devid->len; 9614 if (port && port->port_devid) 9615 data_len += port->port_devid->len; 9616 if (port && port->target_devid) 9617 data_len += port->target_devid->len; 9618 9619 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9620 devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr; 9621 ctsio->kern_sg_entries = 0; 9622 ctsio->kern_rel_offset = 0; 9623 ctsio->kern_sg_entries = 0; 9624 ctsio->kern_data_len = min(data_len, alloc_len); 9625 ctsio->kern_total_len = ctsio->kern_data_len; 9626 9627 /* 9628 * The control device is always connected. The disk device, on the 9629 * other hand, may not be online all the time. 9630 */ 9631 if (lun != NULL) 9632 devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9633 lun->be_lun->lun_type; 9634 else 9635 devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9636 devid_ptr->page_code = SVPD_DEVICE_ID; 9637 scsi_ulto2b(data_len - 4, devid_ptr->length); 9638 9639 if (port && port->port_type == CTL_PORT_FC) 9640 proto = SCSI_PROTO_FC << 4; 9641 else if (port && port->port_type == CTL_PORT_SAS) 9642 proto = SCSI_PROTO_SAS << 4; 9643 else if (port && port->port_type == CTL_PORT_ISCSI) 9644 proto = SCSI_PROTO_ISCSI << 4; 9645 else 9646 proto = SCSI_PROTO_SPI << 4; 9647 desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list; 9648 9649 /* 9650 * We're using a LUN association here. i.e., this device ID is a 9651 * per-LUN identifier. 9652 */ 9653 if (lun && lun->lun_devid) { 9654 memcpy(desc, lun->lun_devid->data, lun->lun_devid->len); 9655 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9656 lun->lun_devid->len); 9657 } 9658 9659 /* 9660 * This is for the WWPN which is a port association. 9661 */ 9662 if (port && port->port_devid) { 9663 memcpy(desc, port->port_devid->data, port->port_devid->len); 9664 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9665 port->port_devid->len); 9666 } 9667 9668 /* 9669 * This is for the Relative Target Port(type 4h) identifier 9670 */ 9671 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9672 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9673 SVPD_ID_TYPE_RELTARG; 9674 desc->length = 4; 9675 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port, &desc->identifier[2]); 9676 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9677 sizeof(struct scsi_vpd_id_rel_trgt_port_id)); 9678 9679 /* 9680 * This is for the Target Port Group(type 5h) identifier 9681 */ 9682 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9683 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9684 SVPD_ID_TYPE_TPORTGRP; 9685 desc->length = 4; 9686 if (softc->is_single || 9687 (port && port->status & CTL_PORT_STATUS_HA_SHARED)) 9688 g = 1; 9689 else 9690 g = 2 + ctsio->io_hdr.nexus.targ_port / softc->port_cnt; 9691 scsi_ulto2b(g, &desc->identifier[2]); 9692 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9693 sizeof(struct scsi_vpd_id_trgt_port_grp_id)); 9694 9695 /* 9696 * This is for the Target identifier 9697 */ 9698 if (port && port->target_devid) { 9699 memcpy(desc, port->target_devid->data, port->target_devid->len); 9700 } 9701 9702 ctl_set_success(ctsio); 9703 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9704 ctsio->be_move_done = ctl_config_move_done; 9705 ctl_datamove((union ctl_io *)ctsio); 9706 return (CTL_RETVAL_COMPLETE); 9707 } 9708 9709 static int 9710 ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len) 9711 { 9712 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9713 struct ctl_lun *lun = CTL_LUN(ctsio); 9714 struct scsi_vpd_scsi_ports *sp; 9715 struct scsi_vpd_port_designation *pd; 9716 struct scsi_vpd_port_designation_cont *pdc; 9717 struct ctl_port *port; 9718 int data_len, num_target_ports, iid_len, id_len; 9719 9720 num_target_ports = 0; 9721 iid_len = 0; 9722 id_len = 0; 9723 mtx_lock(&softc->ctl_lock); 9724 STAILQ_FOREACH(port, &softc->port_list, links) { 9725 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 9726 continue; 9727 if (lun != NULL && 9728 ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 9729 continue; 9730 num_target_ports++; 9731 if (port->init_devid) 9732 iid_len += port->init_devid->len; 9733 if (port->port_devid) 9734 id_len += port->port_devid->len; 9735 } 9736 mtx_unlock(&softc->ctl_lock); 9737 9738 data_len = sizeof(struct scsi_vpd_scsi_ports) + 9739 num_target_ports * (sizeof(struct scsi_vpd_port_designation) + 9740 sizeof(struct scsi_vpd_port_designation_cont)) + iid_len + id_len; 9741 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9742 sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr; 9743 ctsio->kern_sg_entries = 0; 9744 ctsio->kern_rel_offset = 0; 9745 ctsio->kern_sg_entries = 0; 9746 ctsio->kern_data_len = min(data_len, alloc_len); 9747 ctsio->kern_total_len = ctsio->kern_data_len; 9748 9749 /* 9750 * The control device is always connected. The disk device, on the 9751 * other hand, may not be online all the time. Need to change this 9752 * to figure out whether the disk device is actually online or not. 9753 */ 9754 if (lun != NULL) 9755 sp->device = (SID_QUAL_LU_CONNECTED << 5) | 9756 lun->be_lun->lun_type; 9757 else 9758 sp->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9759 9760 sp->page_code = SVPD_SCSI_PORTS; 9761 scsi_ulto2b(data_len - sizeof(struct scsi_vpd_scsi_ports), 9762 sp->page_length); 9763 pd = &sp->design[0]; 9764 9765 mtx_lock(&softc->ctl_lock); 9766 STAILQ_FOREACH(port, &softc->port_list, links) { 9767 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 9768 continue; 9769 if (lun != NULL && 9770 ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 9771 continue; 9772 scsi_ulto2b(port->targ_port, pd->relative_port_id); 9773 if (port->init_devid) { 9774 iid_len = port->init_devid->len; 9775 memcpy(pd->initiator_transportid, 9776 port->init_devid->data, port->init_devid->len); 9777 } else 9778 iid_len = 0; 9779 scsi_ulto2b(iid_len, pd->initiator_transportid_length); 9780 pdc = (struct scsi_vpd_port_designation_cont *) 9781 (&pd->initiator_transportid[iid_len]); 9782 if (port->port_devid) { 9783 id_len = port->port_devid->len; 9784 memcpy(pdc->target_port_descriptors, 9785 port->port_devid->data, port->port_devid->len); 9786 } else 9787 id_len = 0; 9788 scsi_ulto2b(id_len, pdc->target_port_descriptors_length); 9789 pd = (struct scsi_vpd_port_designation *) 9790 ((uint8_t *)pdc->target_port_descriptors + id_len); 9791 } 9792 mtx_unlock(&softc->ctl_lock); 9793 9794 ctl_set_success(ctsio); 9795 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9796 ctsio->be_move_done = ctl_config_move_done; 9797 ctl_datamove((union ctl_io *)ctsio); 9798 return (CTL_RETVAL_COMPLETE); 9799 } 9800 9801 static int 9802 ctl_inquiry_evpd_sfs(struct ctl_scsiio *ctsio, int alloc_len) 9803 { 9804 struct ctl_lun *lun = CTL_LUN(ctsio); 9805 struct scsi_vpd_sfs *sfs_ptr; 9806 int sfs_page_size, n; 9807 9808 sfs_page_size = sizeof(*sfs_ptr) + 5 * 2; 9809 ctsio->kern_data_ptr = malloc(sfs_page_size, M_CTL, M_WAITOK | M_ZERO); 9810 sfs_ptr = (struct scsi_vpd_sfs *)ctsio->kern_data_ptr; 9811 ctsio->kern_sg_entries = 0; 9812 ctsio->kern_rel_offset = 0; 9813 ctsio->kern_sg_entries = 0; 9814 ctsio->kern_data_len = min(sfs_page_size, alloc_len); 9815 ctsio->kern_total_len = ctsio->kern_data_len; 9816 9817 /* 9818 * The control device is always connected. The disk device, on the 9819 * other hand, may not be online all the time. Need to change this 9820 * to figure out whether the disk device is actually online or not. 9821 */ 9822 if (lun != NULL) 9823 sfs_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9824 lun->be_lun->lun_type; 9825 else 9826 sfs_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9827 9828 sfs_ptr->page_code = SVPD_SCSI_SFS; 9829 n = 0; 9830 /* Discovery 2016 */ 9831 scsi_ulto2b(0x0001, &sfs_ptr->codes[2 * n++]); 9832 if (lun != NULL && lun->be_lun->lun_type == T_DIRECT) { 9833 /* SBC Base 2016 */ 9834 scsi_ulto2b(0x0101, &sfs_ptr->codes[2 * n++]); 9835 /* SBC Base 2010 */ 9836 scsi_ulto2b(0x0102, &sfs_ptr->codes[2 * n++]); 9837 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 9838 /* Basic Provisioning 2016 */ 9839 scsi_ulto2b(0x0103, &sfs_ptr->codes[2 * n++]); 9840 } 9841 /* Drive Maintenance 2016 */ 9842 //scsi_ulto2b(0x0104, &sfs_ptr->codes[2 * n++]); 9843 } 9844 scsi_ulto2b(4 + 2 * n, sfs_ptr->page_length); 9845 9846 ctl_set_success(ctsio); 9847 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9848 ctsio->be_move_done = ctl_config_move_done; 9849 ctl_datamove((union ctl_io *)ctsio); 9850 return (CTL_RETVAL_COMPLETE); 9851 } 9852 9853 static int 9854 ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len) 9855 { 9856 struct ctl_lun *lun = CTL_LUN(ctsio); 9857 struct scsi_vpd_block_limits *bl_ptr; 9858 const char *val; 9859 uint64_t ival; 9860 9861 ctsio->kern_data_ptr = malloc(sizeof(*bl_ptr), M_CTL, M_WAITOK | M_ZERO); 9862 bl_ptr = (struct scsi_vpd_block_limits *)ctsio->kern_data_ptr; 9863 ctsio->kern_sg_entries = 0; 9864 ctsio->kern_rel_offset = 0; 9865 ctsio->kern_sg_entries = 0; 9866 ctsio->kern_data_len = min(sizeof(*bl_ptr), alloc_len); 9867 ctsio->kern_total_len = ctsio->kern_data_len; 9868 9869 /* 9870 * The control device is always connected. The disk device, on the 9871 * other hand, may not be online all the time. Need to change this 9872 * to figure out whether the disk device is actually online or not. 9873 */ 9874 if (lun != NULL) 9875 bl_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9876 lun->be_lun->lun_type; 9877 else 9878 bl_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9879 9880 bl_ptr->page_code = SVPD_BLOCK_LIMITS; 9881 scsi_ulto2b(sizeof(*bl_ptr) - 4, bl_ptr->page_length); 9882 bl_ptr->max_cmp_write_len = 0xff; 9883 scsi_ulto4b(0xffffffff, bl_ptr->max_txfer_len); 9884 if (lun != NULL) { 9885 scsi_ulto4b(lun->be_lun->opttxferlen, bl_ptr->opt_txfer_len); 9886 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 9887 ival = 0xffffffff; 9888 val = dnvlist_get_string(lun->be_lun->options, 9889 "unmap_max_lba", NULL); 9890 if (val != NULL) 9891 ctl_expand_number(val, &ival); 9892 scsi_ulto4b(ival, bl_ptr->max_unmap_lba_cnt); 9893 ival = 0xffffffff; 9894 val = dnvlist_get_string(lun->be_lun->options, 9895 "unmap_max_descr", NULL); 9896 if (val != NULL) 9897 ctl_expand_number(val, &ival); 9898 scsi_ulto4b(ival, bl_ptr->max_unmap_blk_cnt); 9899 if (lun->be_lun->ublockexp != 0) { 9900 scsi_ulto4b((1 << lun->be_lun->ublockexp), 9901 bl_ptr->opt_unmap_grain); 9902 scsi_ulto4b(0x80000000 | lun->be_lun->ublockoff, 9903 bl_ptr->unmap_grain_align); 9904 } 9905 } 9906 scsi_ulto4b(lun->be_lun->atomicblock, 9907 bl_ptr->max_atomic_transfer_length); 9908 scsi_ulto4b(0, bl_ptr->atomic_alignment); 9909 scsi_ulto4b(0, bl_ptr->atomic_transfer_length_granularity); 9910 scsi_ulto4b(0, bl_ptr->max_atomic_transfer_length_with_atomic_boundary); 9911 scsi_ulto4b(0, bl_ptr->max_atomic_boundary_size); 9912 ival = UINT64_MAX; 9913 val = dnvlist_get_string(lun->be_lun->options, 9914 "write_same_max_lba", NULL); 9915 if (val != NULL) 9916 ctl_expand_number(val, &ival); 9917 scsi_u64to8b(ival, bl_ptr->max_write_same_length); 9918 if (lun->be_lun->maxlba + 1 > ival) 9919 bl_ptr->flags |= SVPD_BL_WSNZ; 9920 } 9921 9922 ctl_set_success(ctsio); 9923 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9924 ctsio->be_move_done = ctl_config_move_done; 9925 ctl_datamove((union ctl_io *)ctsio); 9926 return (CTL_RETVAL_COMPLETE); 9927 } 9928 9929 static int 9930 ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len) 9931 { 9932 struct ctl_lun *lun = CTL_LUN(ctsio); 9933 struct scsi_vpd_block_device_characteristics *bdc_ptr; 9934 const char *value; 9935 u_int i; 9936 9937 ctsio->kern_data_ptr = malloc(sizeof(*bdc_ptr), M_CTL, M_WAITOK | M_ZERO); 9938 bdc_ptr = (struct scsi_vpd_block_device_characteristics *)ctsio->kern_data_ptr; 9939 ctsio->kern_sg_entries = 0; 9940 ctsio->kern_rel_offset = 0; 9941 ctsio->kern_data_len = min(sizeof(*bdc_ptr), alloc_len); 9942 ctsio->kern_total_len = ctsio->kern_data_len; 9943 9944 /* 9945 * The control device is always connected. The disk device, on the 9946 * other hand, may not be online all the time. Need to change this 9947 * to figure out whether the disk device is actually online or not. 9948 */ 9949 if (lun != NULL) 9950 bdc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9951 lun->be_lun->lun_type; 9952 else 9953 bdc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9954 bdc_ptr->page_code = SVPD_BDC; 9955 scsi_ulto2b(sizeof(*bdc_ptr) - 4, bdc_ptr->page_length); 9956 if (lun != NULL && 9957 (value = dnvlist_get_string(lun->be_lun->options, "rpm", NULL)) != NULL) 9958 i = strtol(value, NULL, 0); 9959 else 9960 i = CTL_DEFAULT_ROTATION_RATE; 9961 scsi_ulto2b(i, bdc_ptr->medium_rotation_rate); 9962 if (lun != NULL && 9963 (value = dnvlist_get_string(lun->be_lun->options, "formfactor", NULL)) != NULL) 9964 i = strtol(value, NULL, 0); 9965 else 9966 i = 0; 9967 bdc_ptr->wab_wac_ff = (i & 0x0f); 9968 bdc_ptr->flags = SVPD_RBWZ | SVPD_FUAB | SVPD_VBULS; 9969 9970 ctl_set_success(ctsio); 9971 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9972 ctsio->be_move_done = ctl_config_move_done; 9973 ctl_datamove((union ctl_io *)ctsio); 9974 return (CTL_RETVAL_COMPLETE); 9975 } 9976 9977 static int 9978 ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len) 9979 { 9980 struct ctl_lun *lun = CTL_LUN(ctsio); 9981 struct scsi_vpd_logical_block_prov *lbp_ptr; 9982 const char *value; 9983 9984 ctsio->kern_data_ptr = malloc(sizeof(*lbp_ptr), M_CTL, M_WAITOK | M_ZERO); 9985 lbp_ptr = (struct scsi_vpd_logical_block_prov *)ctsio->kern_data_ptr; 9986 ctsio->kern_sg_entries = 0; 9987 ctsio->kern_rel_offset = 0; 9988 ctsio->kern_data_len = min(sizeof(*lbp_ptr), alloc_len); 9989 ctsio->kern_total_len = ctsio->kern_data_len; 9990 9991 /* 9992 * The control device is always connected. The disk device, on the 9993 * other hand, may not be online all the time. Need to change this 9994 * to figure out whether the disk device is actually online or not. 9995 */ 9996 if (lun != NULL) 9997 lbp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9998 lun->be_lun->lun_type; 9999 else 10000 lbp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10001 10002 lbp_ptr->page_code = SVPD_LBP; 10003 scsi_ulto2b(sizeof(*lbp_ptr) - 4, lbp_ptr->page_length); 10004 lbp_ptr->threshold_exponent = CTL_LBP_EXPONENT; 10005 if (lun != NULL && lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 10006 lbp_ptr->flags = SVPD_LBP_UNMAP | SVPD_LBP_WS16 | 10007 SVPD_LBP_WS10 | SVPD_LBP_RZ | SVPD_LBP_ANC_SUP; 10008 value = dnvlist_get_string(lun->be_lun->options, 10009 "provisioning_type", NULL); 10010 if (value != NULL) { 10011 if (strcmp(value, "resource") == 0) 10012 lbp_ptr->prov_type = SVPD_LBP_RESOURCE; 10013 else if (strcmp(value, "thin") == 0) 10014 lbp_ptr->prov_type = SVPD_LBP_THIN; 10015 } else 10016 lbp_ptr->prov_type = SVPD_LBP_THIN; 10017 } 10018 10019 ctl_set_success(ctsio); 10020 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10021 ctsio->be_move_done = ctl_config_move_done; 10022 ctl_datamove((union ctl_io *)ctsio); 10023 return (CTL_RETVAL_COMPLETE); 10024 } 10025 10026 /* 10027 * INQUIRY with the EVPD bit set. 10028 */ 10029 static int 10030 ctl_inquiry_evpd(struct ctl_scsiio *ctsio) 10031 { 10032 struct ctl_lun *lun = CTL_LUN(ctsio); 10033 struct scsi_inquiry *cdb; 10034 int alloc_len, retval; 10035 10036 cdb = (struct scsi_inquiry *)ctsio->cdb; 10037 alloc_len = scsi_2btoul(cdb->length); 10038 10039 switch (cdb->page_code) { 10040 case SVPD_SUPPORTED_PAGES: 10041 retval = ctl_inquiry_evpd_supported(ctsio, alloc_len); 10042 break; 10043 case SVPD_UNIT_SERIAL_NUMBER: 10044 retval = ctl_inquiry_evpd_serial(ctsio, alloc_len); 10045 break; 10046 case SVPD_DEVICE_ID: 10047 retval = ctl_inquiry_evpd_devid(ctsio, alloc_len); 10048 break; 10049 case SVPD_EXTENDED_INQUIRY_DATA: 10050 retval = ctl_inquiry_evpd_eid(ctsio, alloc_len); 10051 break; 10052 case SVPD_MODE_PAGE_POLICY: 10053 retval = ctl_inquiry_evpd_mpp(ctsio, alloc_len); 10054 break; 10055 case SVPD_SCSI_PORTS: 10056 retval = ctl_inquiry_evpd_scsi_ports(ctsio, alloc_len); 10057 break; 10058 case SVPD_SCSI_TPC: 10059 retval = ctl_inquiry_evpd_tpc(ctsio, alloc_len); 10060 break; 10061 case SVPD_SCSI_SFS: 10062 retval = ctl_inquiry_evpd_sfs(ctsio, alloc_len); 10063 break; 10064 case SVPD_BLOCK_LIMITS: 10065 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 10066 goto err; 10067 retval = ctl_inquiry_evpd_block_limits(ctsio, alloc_len); 10068 break; 10069 case SVPD_BDC: 10070 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 10071 goto err; 10072 retval = ctl_inquiry_evpd_bdc(ctsio, alloc_len); 10073 break; 10074 case SVPD_LBP: 10075 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 10076 goto err; 10077 retval = ctl_inquiry_evpd_lbp(ctsio, alloc_len); 10078 break; 10079 default: 10080 err: 10081 ctl_set_invalid_field(ctsio, 10082 /*sks_valid*/ 1, 10083 /*command*/ 1, 10084 /*field*/ 2, 10085 /*bit_valid*/ 0, 10086 /*bit*/ 0); 10087 ctl_done((union ctl_io *)ctsio); 10088 retval = CTL_RETVAL_COMPLETE; 10089 break; 10090 } 10091 10092 return (retval); 10093 } 10094 10095 /* 10096 * Standard INQUIRY data. 10097 */ 10098 static int 10099 ctl_inquiry_std(struct ctl_scsiio *ctsio) 10100 { 10101 struct ctl_softc *softc = CTL_SOFTC(ctsio); 10102 struct ctl_port *port = CTL_PORT(ctsio); 10103 struct ctl_lun *lun = CTL_LUN(ctsio); 10104 struct scsi_inquiry_data *inq_ptr; 10105 struct scsi_inquiry *cdb; 10106 const char *val; 10107 uint32_t alloc_len, data_len; 10108 ctl_port_type port_type; 10109 10110 port_type = port->port_type; 10111 if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL) 10112 port_type = CTL_PORT_SCSI; 10113 10114 cdb = (struct scsi_inquiry *)ctsio->cdb; 10115 alloc_len = scsi_2btoul(cdb->length); 10116 10117 /* 10118 * We malloc the full inquiry data size here and fill it 10119 * in. If the user only asks for less, we'll give him 10120 * that much. 10121 */ 10122 data_len = offsetof(struct scsi_inquiry_data, vendor_specific1); 10123 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10124 inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr; 10125 ctsio->kern_sg_entries = 0; 10126 ctsio->kern_rel_offset = 0; 10127 ctsio->kern_data_len = min(data_len, alloc_len); 10128 ctsio->kern_total_len = ctsio->kern_data_len; 10129 10130 if (lun != NULL) { 10131 if ((lun->flags & CTL_LUN_PRIMARY_SC) || 10132 softc->ha_link >= CTL_HA_LINK_UNKNOWN) { 10133 inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10134 lun->be_lun->lun_type; 10135 } else { 10136 inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | 10137 lun->be_lun->lun_type; 10138 } 10139 if (lun->flags & CTL_LUN_REMOVABLE) 10140 inq_ptr->dev_qual2 |= SID_RMB; 10141 } else 10142 inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE; 10143 10144 /* RMB in byte 2 is 0 */ 10145 inq_ptr->version = SCSI_REV_SPC5; 10146 10147 /* 10148 * According to SAM-3, even if a device only supports a single 10149 * level of LUN addressing, it should still set the HISUP bit: 10150 * 10151 * 4.9.1 Logical unit numbers overview 10152 * 10153 * All logical unit number formats described in this standard are 10154 * hierarchical in structure even when only a single level in that 10155 * hierarchy is used. The HISUP bit shall be set to one in the 10156 * standard INQUIRY data (see SPC-2) when any logical unit number 10157 * format described in this standard is used. Non-hierarchical 10158 * formats are outside the scope of this standard. 10159 * 10160 * Therefore we set the HiSup bit here. 10161 * 10162 * The response format is 2, per SPC-3. 10163 */ 10164 inq_ptr->response_format = SID_HiSup | 2; 10165 10166 inq_ptr->additional_length = data_len - 10167 (offsetof(struct scsi_inquiry_data, additional_length) + 1); 10168 CTL_DEBUG_PRINT(("additional_length = %d\n", 10169 inq_ptr->additional_length)); 10170 10171 inq_ptr->spc3_flags = SPC3_SID_3PC | SPC3_SID_TPGS_IMPLICIT; 10172 if (port_type == CTL_PORT_SCSI) 10173 inq_ptr->spc2_flags = SPC2_SID_ADDR16; 10174 inq_ptr->spc2_flags |= SPC2_SID_MultiP; 10175 inq_ptr->flags = SID_CmdQue; 10176 if (port_type == CTL_PORT_SCSI) 10177 inq_ptr->flags |= SID_WBus16 | SID_Sync; 10178 10179 /* 10180 * Per SPC-3, unused bytes in ASCII strings are filled with spaces. 10181 * We have 8 bytes for the vendor name, and 16 bytes for the device 10182 * name and 4 bytes for the revision. 10183 */ 10184 if (lun == NULL || (val = dnvlist_get_string(lun->be_lun->options, 10185 "vendor", NULL)) == NULL) { 10186 strncpy(inq_ptr->vendor, CTL_VENDOR, sizeof(inq_ptr->vendor)); 10187 } else { 10188 memset(inq_ptr->vendor, ' ', sizeof(inq_ptr->vendor)); 10189 strncpy(inq_ptr->vendor, val, 10190 min(sizeof(inq_ptr->vendor), strlen(val))); 10191 } 10192 if (lun == NULL) { 10193 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10194 sizeof(inq_ptr->product)); 10195 } else if ((val = dnvlist_get_string(lun->be_lun->options, "product", 10196 NULL)) == NULL) { 10197 switch (lun->be_lun->lun_type) { 10198 case T_DIRECT: 10199 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10200 sizeof(inq_ptr->product)); 10201 break; 10202 case T_PROCESSOR: 10203 strncpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT, 10204 sizeof(inq_ptr->product)); 10205 break; 10206 case T_CDROM: 10207 strncpy(inq_ptr->product, CTL_CDROM_PRODUCT, 10208 sizeof(inq_ptr->product)); 10209 break; 10210 default: 10211 strncpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT, 10212 sizeof(inq_ptr->product)); 10213 break; 10214 } 10215 } else { 10216 memset(inq_ptr->product, ' ', sizeof(inq_ptr->product)); 10217 strncpy(inq_ptr->product, val, 10218 min(sizeof(inq_ptr->product), strlen(val))); 10219 } 10220 10221 /* 10222 * XXX make this a macro somewhere so it automatically gets 10223 * incremented when we make changes. 10224 */ 10225 if (lun == NULL || (val = dnvlist_get_string(lun->be_lun->options, 10226 "revision", NULL)) == NULL) { 10227 strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision)); 10228 } else { 10229 memset(inq_ptr->revision, ' ', sizeof(inq_ptr->revision)); 10230 strncpy(inq_ptr->revision, val, 10231 min(sizeof(inq_ptr->revision), strlen(val))); 10232 } 10233 10234 /* 10235 * For parallel SCSI, we support double transition and single 10236 * transition clocking. We also support QAS (Quick Arbitration 10237 * and Selection) and Information Unit transfers on both the 10238 * control and array devices. 10239 */ 10240 if (port_type == CTL_PORT_SCSI) 10241 inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS | 10242 SID_SPI_IUS; 10243 10244 /* SAM-6 (no version claimed) */ 10245 scsi_ulto2b(0x00C0, inq_ptr->version1); 10246 /* SPC-5 (no version claimed) */ 10247 scsi_ulto2b(0x05C0, inq_ptr->version2); 10248 if (port_type == CTL_PORT_FC) { 10249 /* FCP-2 ANSI INCITS.350:2003 */ 10250 scsi_ulto2b(0x0917, inq_ptr->version3); 10251 } else if (port_type == CTL_PORT_SCSI) { 10252 /* SPI-4 ANSI INCITS.362:200x */ 10253 scsi_ulto2b(0x0B56, inq_ptr->version3); 10254 } else if (port_type == CTL_PORT_ISCSI) { 10255 /* iSCSI (no version claimed) */ 10256 scsi_ulto2b(0x0960, inq_ptr->version3); 10257 } else if (port_type == CTL_PORT_SAS) { 10258 /* SAS (no version claimed) */ 10259 scsi_ulto2b(0x0BE0, inq_ptr->version3); 10260 } else if (port_type == CTL_PORT_UMASS) { 10261 /* USB Mass Storage Class Bulk-Only Transport, Revision 1.0 */ 10262 scsi_ulto2b(0x1730, inq_ptr->version3); 10263 } 10264 10265 if (lun == NULL) { 10266 /* SBC-4 (no version claimed) */ 10267 scsi_ulto2b(0x0600, inq_ptr->version4); 10268 } else { 10269 switch (lun->be_lun->lun_type) { 10270 case T_DIRECT: 10271 /* SBC-4 (no version claimed) */ 10272 scsi_ulto2b(0x0600, inq_ptr->version4); 10273 break; 10274 case T_PROCESSOR: 10275 break; 10276 case T_CDROM: 10277 /* MMC-6 (no version claimed) */ 10278 scsi_ulto2b(0x04E0, inq_ptr->version4); 10279 break; 10280 default: 10281 break; 10282 } 10283 } 10284 10285 ctl_set_success(ctsio); 10286 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10287 ctsio->be_move_done = ctl_config_move_done; 10288 ctl_datamove((union ctl_io *)ctsio); 10289 return (CTL_RETVAL_COMPLETE); 10290 } 10291 10292 int 10293 ctl_inquiry(struct ctl_scsiio *ctsio) 10294 { 10295 struct scsi_inquiry *cdb; 10296 int retval; 10297 10298 CTL_DEBUG_PRINT(("ctl_inquiry\n")); 10299 10300 cdb = (struct scsi_inquiry *)ctsio->cdb; 10301 if (cdb->byte2 & SI_EVPD) 10302 retval = ctl_inquiry_evpd(ctsio); 10303 else if (cdb->page_code == 0) 10304 retval = ctl_inquiry_std(ctsio); 10305 else { 10306 ctl_set_invalid_field(ctsio, 10307 /*sks_valid*/ 1, 10308 /*command*/ 1, 10309 /*field*/ 2, 10310 /*bit_valid*/ 0, 10311 /*bit*/ 0); 10312 ctl_done((union ctl_io *)ctsio); 10313 return (CTL_RETVAL_COMPLETE); 10314 } 10315 10316 return (retval); 10317 } 10318 10319 int 10320 ctl_get_config(struct ctl_scsiio *ctsio) 10321 { 10322 struct ctl_lun *lun = CTL_LUN(ctsio); 10323 struct scsi_get_config_header *hdr; 10324 struct scsi_get_config_feature *feature; 10325 struct scsi_get_config *cdb; 10326 uint32_t alloc_len, data_len; 10327 int rt, starting; 10328 10329 cdb = (struct scsi_get_config *)ctsio->cdb; 10330 rt = (cdb->rt & SGC_RT_MASK); 10331 starting = scsi_2btoul(cdb->starting_feature); 10332 alloc_len = scsi_2btoul(cdb->length); 10333 10334 data_len = sizeof(struct scsi_get_config_header) + 10335 sizeof(struct scsi_get_config_feature) + 8 + 10336 sizeof(struct scsi_get_config_feature) + 8 + 10337 sizeof(struct scsi_get_config_feature) + 4 + 10338 sizeof(struct scsi_get_config_feature) + 4 + 10339 sizeof(struct scsi_get_config_feature) + 8 + 10340 sizeof(struct scsi_get_config_feature) + 10341 sizeof(struct scsi_get_config_feature) + 4 + 10342 sizeof(struct scsi_get_config_feature) + 4 + 10343 sizeof(struct scsi_get_config_feature) + 4 + 10344 sizeof(struct scsi_get_config_feature) + 4 + 10345 sizeof(struct scsi_get_config_feature) + 4 + 10346 sizeof(struct scsi_get_config_feature) + 4; 10347 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10348 ctsio->kern_sg_entries = 0; 10349 ctsio->kern_rel_offset = 0; 10350 10351 hdr = (struct scsi_get_config_header *)ctsio->kern_data_ptr; 10352 if (lun->flags & CTL_LUN_NO_MEDIA) 10353 scsi_ulto2b(0x0000, hdr->current_profile); 10354 else 10355 scsi_ulto2b(0x0010, hdr->current_profile); 10356 feature = (struct scsi_get_config_feature *)(hdr + 1); 10357 10358 if (starting > 0x003b) 10359 goto done; 10360 if (starting > 0x003a) 10361 goto f3b; 10362 if (starting > 0x002b) 10363 goto f3a; 10364 if (starting > 0x002a) 10365 goto f2b; 10366 if (starting > 0x001f) 10367 goto f2a; 10368 if (starting > 0x001e) 10369 goto f1f; 10370 if (starting > 0x001d) 10371 goto f1e; 10372 if (starting > 0x0010) 10373 goto f1d; 10374 if (starting > 0x0003) 10375 goto f10; 10376 if (starting > 0x0002) 10377 goto f3; 10378 if (starting > 0x0001) 10379 goto f2; 10380 if (starting > 0x0000) 10381 goto f1; 10382 10383 /* Profile List */ 10384 scsi_ulto2b(0x0000, feature->feature_code); 10385 feature->flags = SGC_F_PERSISTENT | SGC_F_CURRENT; 10386 feature->add_length = 8; 10387 scsi_ulto2b(0x0008, &feature->feature_data[0]); /* CD-ROM */ 10388 feature->feature_data[2] = 0x00; 10389 scsi_ulto2b(0x0010, &feature->feature_data[4]); /* DVD-ROM */ 10390 feature->feature_data[6] = 0x01; 10391 feature = (struct scsi_get_config_feature *) 10392 &feature->feature_data[feature->add_length]; 10393 10394 f1: /* Core */ 10395 scsi_ulto2b(0x0001, feature->feature_code); 10396 feature->flags = 0x08 | SGC_F_PERSISTENT | SGC_F_CURRENT; 10397 feature->add_length = 8; 10398 scsi_ulto4b(0x00000000, &feature->feature_data[0]); 10399 feature->feature_data[4] = 0x03; 10400 feature = (struct scsi_get_config_feature *) 10401 &feature->feature_data[feature->add_length]; 10402 10403 f2: /* Morphing */ 10404 scsi_ulto2b(0x0002, feature->feature_code); 10405 feature->flags = 0x04 | SGC_F_PERSISTENT | SGC_F_CURRENT; 10406 feature->add_length = 4; 10407 feature->feature_data[0] = 0x02; 10408 feature = (struct scsi_get_config_feature *) 10409 &feature->feature_data[feature->add_length]; 10410 10411 f3: /* Removable Medium */ 10412 scsi_ulto2b(0x0003, feature->feature_code); 10413 feature->flags = 0x04 | SGC_F_PERSISTENT | SGC_F_CURRENT; 10414 feature->add_length = 4; 10415 feature->feature_data[0] = 0x39; 10416 feature = (struct scsi_get_config_feature *) 10417 &feature->feature_data[feature->add_length]; 10418 10419 if (rt == SGC_RT_CURRENT && (lun->flags & CTL_LUN_NO_MEDIA)) 10420 goto done; 10421 10422 f10: /* Random Read */ 10423 scsi_ulto2b(0x0010, feature->feature_code); 10424 feature->flags = 0x00; 10425 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10426 feature->flags |= SGC_F_CURRENT; 10427 feature->add_length = 8; 10428 scsi_ulto4b(lun->be_lun->blocksize, &feature->feature_data[0]); 10429 scsi_ulto2b(1, &feature->feature_data[4]); 10430 feature->feature_data[6] = 0x00; 10431 feature = (struct scsi_get_config_feature *) 10432 &feature->feature_data[feature->add_length]; 10433 10434 f1d: /* Multi-Read */ 10435 scsi_ulto2b(0x001D, feature->feature_code); 10436 feature->flags = 0x00; 10437 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10438 feature->flags |= SGC_F_CURRENT; 10439 feature->add_length = 0; 10440 feature = (struct scsi_get_config_feature *) 10441 &feature->feature_data[feature->add_length]; 10442 10443 f1e: /* CD Read */ 10444 scsi_ulto2b(0x001E, feature->feature_code); 10445 feature->flags = 0x00; 10446 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10447 feature->flags |= SGC_F_CURRENT; 10448 feature->add_length = 4; 10449 feature->feature_data[0] = 0x00; 10450 feature = (struct scsi_get_config_feature *) 10451 &feature->feature_data[feature->add_length]; 10452 10453 f1f: /* DVD Read */ 10454 scsi_ulto2b(0x001F, feature->feature_code); 10455 feature->flags = 0x08; 10456 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10457 feature->flags |= SGC_F_CURRENT; 10458 feature->add_length = 4; 10459 feature->feature_data[0] = 0x01; 10460 feature->feature_data[2] = 0x03; 10461 feature = (struct scsi_get_config_feature *) 10462 &feature->feature_data[feature->add_length]; 10463 10464 f2a: /* DVD+RW */ 10465 scsi_ulto2b(0x002A, feature->feature_code); 10466 feature->flags = 0x04; 10467 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10468 feature->flags |= SGC_F_CURRENT; 10469 feature->add_length = 4; 10470 feature->feature_data[0] = 0x00; 10471 feature->feature_data[1] = 0x00; 10472 feature = (struct scsi_get_config_feature *) 10473 &feature->feature_data[feature->add_length]; 10474 10475 f2b: /* DVD+R */ 10476 scsi_ulto2b(0x002B, feature->feature_code); 10477 feature->flags = 0x00; 10478 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10479 feature->flags |= SGC_F_CURRENT; 10480 feature->add_length = 4; 10481 feature->feature_data[0] = 0x00; 10482 feature = (struct scsi_get_config_feature *) 10483 &feature->feature_data[feature->add_length]; 10484 10485 f3a: /* DVD+RW Dual Layer */ 10486 scsi_ulto2b(0x003A, feature->feature_code); 10487 feature->flags = 0x00; 10488 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10489 feature->flags |= SGC_F_CURRENT; 10490 feature->add_length = 4; 10491 feature->feature_data[0] = 0x00; 10492 feature->feature_data[1] = 0x00; 10493 feature = (struct scsi_get_config_feature *) 10494 &feature->feature_data[feature->add_length]; 10495 10496 f3b: /* DVD+R Dual Layer */ 10497 scsi_ulto2b(0x003B, feature->feature_code); 10498 feature->flags = 0x00; 10499 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10500 feature->flags |= SGC_F_CURRENT; 10501 feature->add_length = 4; 10502 feature->feature_data[0] = 0x00; 10503 feature = (struct scsi_get_config_feature *) 10504 &feature->feature_data[feature->add_length]; 10505 10506 done: 10507 data_len = (uint8_t *)feature - (uint8_t *)hdr; 10508 if (rt == SGC_RT_SPECIFIC && data_len > 4) { 10509 feature = (struct scsi_get_config_feature *)(hdr + 1); 10510 if (scsi_2btoul(feature->feature_code) == starting) 10511 feature = (struct scsi_get_config_feature *) 10512 &feature->feature_data[feature->add_length]; 10513 data_len = (uint8_t *)feature - (uint8_t *)hdr; 10514 } 10515 scsi_ulto4b(data_len - 4, hdr->data_length); 10516 ctsio->kern_data_len = min(data_len, alloc_len); 10517 ctsio->kern_total_len = ctsio->kern_data_len; 10518 10519 ctl_set_success(ctsio); 10520 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10521 ctsio->be_move_done = ctl_config_move_done; 10522 ctl_datamove((union ctl_io *)ctsio); 10523 return (CTL_RETVAL_COMPLETE); 10524 } 10525 10526 int 10527 ctl_get_event_status(struct ctl_scsiio *ctsio) 10528 { 10529 struct scsi_get_event_status_header *hdr; 10530 struct scsi_get_event_status *cdb; 10531 uint32_t alloc_len, data_len; 10532 10533 cdb = (struct scsi_get_event_status *)ctsio->cdb; 10534 if ((cdb->byte2 & SGESN_POLLED) == 0) { 10535 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, 10536 /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); 10537 ctl_done((union ctl_io *)ctsio); 10538 return (CTL_RETVAL_COMPLETE); 10539 } 10540 alloc_len = scsi_2btoul(cdb->length); 10541 10542 data_len = sizeof(struct scsi_get_event_status_header); 10543 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10544 ctsio->kern_sg_entries = 0; 10545 ctsio->kern_rel_offset = 0; 10546 ctsio->kern_data_len = min(data_len, alloc_len); 10547 ctsio->kern_total_len = ctsio->kern_data_len; 10548 10549 hdr = (struct scsi_get_event_status_header *)ctsio->kern_data_ptr; 10550 scsi_ulto2b(0, hdr->descr_length); 10551 hdr->nea_class = SGESN_NEA; 10552 hdr->supported_class = 0; 10553 10554 ctl_set_success(ctsio); 10555 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10556 ctsio->be_move_done = ctl_config_move_done; 10557 ctl_datamove((union ctl_io *)ctsio); 10558 return (CTL_RETVAL_COMPLETE); 10559 } 10560 10561 int 10562 ctl_mechanism_status(struct ctl_scsiio *ctsio) 10563 { 10564 struct scsi_mechanism_status_header *hdr; 10565 struct scsi_mechanism_status *cdb; 10566 uint32_t alloc_len, data_len; 10567 10568 cdb = (struct scsi_mechanism_status *)ctsio->cdb; 10569 alloc_len = scsi_2btoul(cdb->length); 10570 10571 data_len = sizeof(struct scsi_mechanism_status_header); 10572 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10573 ctsio->kern_sg_entries = 0; 10574 ctsio->kern_rel_offset = 0; 10575 ctsio->kern_data_len = min(data_len, alloc_len); 10576 ctsio->kern_total_len = ctsio->kern_data_len; 10577 10578 hdr = (struct scsi_mechanism_status_header *)ctsio->kern_data_ptr; 10579 hdr->state1 = 0x00; 10580 hdr->state2 = 0xe0; 10581 scsi_ulto3b(0, hdr->lba); 10582 hdr->slots_num = 0; 10583 scsi_ulto2b(0, hdr->slots_length); 10584 10585 ctl_set_success(ctsio); 10586 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10587 ctsio->be_move_done = ctl_config_move_done; 10588 ctl_datamove((union ctl_io *)ctsio); 10589 return (CTL_RETVAL_COMPLETE); 10590 } 10591 10592 static void 10593 ctl_ultomsf(uint32_t lba, uint8_t *buf) 10594 { 10595 10596 lba += 150; 10597 buf[0] = 0; 10598 buf[1] = bin2bcd((lba / 75) / 60); 10599 buf[2] = bin2bcd((lba / 75) % 60); 10600 buf[3] = bin2bcd(lba % 75); 10601 } 10602 10603 int 10604 ctl_read_toc(struct ctl_scsiio *ctsio) 10605 { 10606 struct ctl_lun *lun = CTL_LUN(ctsio); 10607 struct scsi_read_toc_hdr *hdr; 10608 struct scsi_read_toc_type01_descr *descr; 10609 struct scsi_read_toc *cdb; 10610 uint32_t alloc_len, data_len; 10611 int format, msf; 10612 10613 cdb = (struct scsi_read_toc *)ctsio->cdb; 10614 msf = (cdb->byte2 & CD_MSF) != 0; 10615 format = cdb->format; 10616 alloc_len = scsi_2btoul(cdb->data_len); 10617 10618 data_len = sizeof(struct scsi_read_toc_hdr); 10619 if (format == 0) 10620 data_len += 2 * sizeof(struct scsi_read_toc_type01_descr); 10621 else 10622 data_len += sizeof(struct scsi_read_toc_type01_descr); 10623 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10624 ctsio->kern_sg_entries = 0; 10625 ctsio->kern_rel_offset = 0; 10626 ctsio->kern_data_len = min(data_len, alloc_len); 10627 ctsio->kern_total_len = ctsio->kern_data_len; 10628 10629 hdr = (struct scsi_read_toc_hdr *)ctsio->kern_data_ptr; 10630 if (format == 0) { 10631 scsi_ulto2b(0x12, hdr->data_length); 10632 hdr->first = 1; 10633 hdr->last = 1; 10634 descr = (struct scsi_read_toc_type01_descr *)(hdr + 1); 10635 descr->addr_ctl = 0x14; 10636 descr->track_number = 1; 10637 if (msf) 10638 ctl_ultomsf(0, descr->track_start); 10639 else 10640 scsi_ulto4b(0, descr->track_start); 10641 descr++; 10642 descr->addr_ctl = 0x14; 10643 descr->track_number = 0xaa; 10644 if (msf) 10645 ctl_ultomsf(lun->be_lun->maxlba+1, descr->track_start); 10646 else 10647 scsi_ulto4b(lun->be_lun->maxlba+1, descr->track_start); 10648 } else { 10649 scsi_ulto2b(0x0a, hdr->data_length); 10650 hdr->first = 1; 10651 hdr->last = 1; 10652 descr = (struct scsi_read_toc_type01_descr *)(hdr + 1); 10653 descr->addr_ctl = 0x14; 10654 descr->track_number = 1; 10655 if (msf) 10656 ctl_ultomsf(0, descr->track_start); 10657 else 10658 scsi_ulto4b(0, descr->track_start); 10659 } 10660 10661 ctl_set_success(ctsio); 10662 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10663 ctsio->be_move_done = ctl_config_move_done; 10664 ctl_datamove((union ctl_io *)ctsio); 10665 return (CTL_RETVAL_COMPLETE); 10666 } 10667 10668 /* 10669 * For known CDB types, parse the LBA and length. 10670 */ 10671 static int 10672 ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len) 10673 { 10674 if (io->io_hdr.io_type != CTL_IO_SCSI) 10675 return (1); 10676 10677 switch (io->scsiio.cdb[0]) { 10678 case COMPARE_AND_WRITE: { 10679 struct scsi_compare_and_write *cdb; 10680 10681 cdb = (struct scsi_compare_and_write *)io->scsiio.cdb; 10682 10683 *lba = scsi_8btou64(cdb->addr); 10684 *len = cdb->length; 10685 break; 10686 } 10687 case READ_6: 10688 case WRITE_6: { 10689 struct scsi_rw_6 *cdb; 10690 10691 cdb = (struct scsi_rw_6 *)io->scsiio.cdb; 10692 10693 *lba = scsi_3btoul(cdb->addr); 10694 /* only 5 bits are valid in the most significant address byte */ 10695 *lba &= 0x1fffff; 10696 *len = cdb->length; 10697 break; 10698 } 10699 case READ_10: 10700 case WRITE_10: { 10701 struct scsi_rw_10 *cdb; 10702 10703 cdb = (struct scsi_rw_10 *)io->scsiio.cdb; 10704 10705 *lba = scsi_4btoul(cdb->addr); 10706 *len = scsi_2btoul(cdb->length); 10707 break; 10708 } 10709 case WRITE_VERIFY_10: { 10710 struct scsi_write_verify_10 *cdb; 10711 10712 cdb = (struct scsi_write_verify_10 *)io->scsiio.cdb; 10713 10714 *lba = scsi_4btoul(cdb->addr); 10715 *len = scsi_2btoul(cdb->length); 10716 break; 10717 } 10718 case READ_12: 10719 case WRITE_12: { 10720 struct scsi_rw_12 *cdb; 10721 10722 cdb = (struct scsi_rw_12 *)io->scsiio.cdb; 10723 10724 *lba = scsi_4btoul(cdb->addr); 10725 *len = scsi_4btoul(cdb->length); 10726 break; 10727 } 10728 case WRITE_VERIFY_12: { 10729 struct scsi_write_verify_12 *cdb; 10730 10731 cdb = (struct scsi_write_verify_12 *)io->scsiio.cdb; 10732 10733 *lba = scsi_4btoul(cdb->addr); 10734 *len = scsi_4btoul(cdb->length); 10735 break; 10736 } 10737 case READ_16: 10738 case WRITE_16: { 10739 struct scsi_rw_16 *cdb; 10740 10741 cdb = (struct scsi_rw_16 *)io->scsiio.cdb; 10742 10743 *lba = scsi_8btou64(cdb->addr); 10744 *len = scsi_4btoul(cdb->length); 10745 break; 10746 } 10747 case WRITE_ATOMIC_16: { 10748 struct scsi_write_atomic_16 *cdb; 10749 10750 cdb = (struct scsi_write_atomic_16 *)io->scsiio.cdb; 10751 10752 *lba = scsi_8btou64(cdb->addr); 10753 *len = scsi_2btoul(cdb->length); 10754 break; 10755 } 10756 case WRITE_VERIFY_16: { 10757 struct scsi_write_verify_16 *cdb; 10758 10759 cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb; 10760 10761 *lba = scsi_8btou64(cdb->addr); 10762 *len = scsi_4btoul(cdb->length); 10763 break; 10764 } 10765 case WRITE_SAME_10: { 10766 struct scsi_write_same_10 *cdb; 10767 10768 cdb = (struct scsi_write_same_10 *)io->scsiio.cdb; 10769 10770 *lba = scsi_4btoul(cdb->addr); 10771 *len = scsi_2btoul(cdb->length); 10772 break; 10773 } 10774 case WRITE_SAME_16: { 10775 struct scsi_write_same_16 *cdb; 10776 10777 cdb = (struct scsi_write_same_16 *)io->scsiio.cdb; 10778 10779 *lba = scsi_8btou64(cdb->addr); 10780 *len = scsi_4btoul(cdb->length); 10781 break; 10782 } 10783 case VERIFY_10: { 10784 struct scsi_verify_10 *cdb; 10785 10786 cdb = (struct scsi_verify_10 *)io->scsiio.cdb; 10787 10788 *lba = scsi_4btoul(cdb->addr); 10789 *len = scsi_2btoul(cdb->length); 10790 break; 10791 } 10792 case VERIFY_12: { 10793 struct scsi_verify_12 *cdb; 10794 10795 cdb = (struct scsi_verify_12 *)io->scsiio.cdb; 10796 10797 *lba = scsi_4btoul(cdb->addr); 10798 *len = scsi_4btoul(cdb->length); 10799 break; 10800 } 10801 case VERIFY_16: { 10802 struct scsi_verify_16 *cdb; 10803 10804 cdb = (struct scsi_verify_16 *)io->scsiio.cdb; 10805 10806 *lba = scsi_8btou64(cdb->addr); 10807 *len = scsi_4btoul(cdb->length); 10808 break; 10809 } 10810 case UNMAP: { 10811 *lba = 0; 10812 *len = UINT64_MAX; 10813 break; 10814 } 10815 case SERVICE_ACTION_IN: { /* GET LBA STATUS */ 10816 struct scsi_get_lba_status *cdb; 10817 10818 cdb = (struct scsi_get_lba_status *)io->scsiio.cdb; 10819 *lba = scsi_8btou64(cdb->addr); 10820 *len = UINT32_MAX; 10821 break; 10822 } 10823 default: 10824 return (1); 10825 break; /* NOTREACHED */ 10826 } 10827 10828 return (0); 10829 } 10830 10831 static ctl_action 10832 ctl_extent_check_lba(uint64_t lba1, uint64_t len1, uint64_t lba2, uint64_t len2, 10833 bool seq) 10834 { 10835 uint64_t endlba1, endlba2; 10836 10837 endlba1 = lba1 + len1 - (seq ? 0 : 1); 10838 endlba2 = lba2 + len2 - 1; 10839 10840 if ((endlba1 < lba2) || (endlba2 < lba1)) 10841 return (CTL_ACTION_PASS); 10842 else 10843 return (CTL_ACTION_BLOCK); 10844 } 10845 10846 static int 10847 ctl_extent_check_unmap(union ctl_io *io, uint64_t lba2, uint64_t len2) 10848 { 10849 struct ctl_ptr_len_flags *ptrlen; 10850 struct scsi_unmap_desc *buf, *end, *range; 10851 uint64_t lba; 10852 uint32_t len; 10853 10854 /* If not UNMAP -- go other way. */ 10855 if (io->io_hdr.io_type != CTL_IO_SCSI || 10856 io->scsiio.cdb[0] != UNMAP) 10857 return (CTL_ACTION_ERROR); 10858 10859 /* If UNMAP without data -- block and wait for data. */ 10860 ptrlen = (struct ctl_ptr_len_flags *) 10861 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 10862 if ((io->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0 || 10863 ptrlen->ptr == NULL) 10864 return (CTL_ACTION_BLOCK); 10865 10866 /* UNMAP with data -- check for collision. */ 10867 buf = (struct scsi_unmap_desc *)ptrlen->ptr; 10868 end = buf + ptrlen->len / sizeof(*buf); 10869 for (range = buf; range < end; range++) { 10870 lba = scsi_8btou64(range->lba); 10871 len = scsi_4btoul(range->length); 10872 if ((lba < lba2 + len2) && (lba + len > lba2)) 10873 return (CTL_ACTION_BLOCK); 10874 } 10875 return (CTL_ACTION_PASS); 10876 } 10877 10878 static ctl_action 10879 ctl_extent_check(union ctl_io *io1, union ctl_io *io2, bool seq) 10880 { 10881 uint64_t lba1, lba2; 10882 uint64_t len1, len2; 10883 int retval; 10884 10885 if (ctl_get_lba_len(io2, &lba2, &len2) != 0) 10886 return (CTL_ACTION_ERROR); 10887 10888 retval = ctl_extent_check_unmap(io1, lba2, len2); 10889 if (retval != CTL_ACTION_ERROR) 10890 return (retval); 10891 10892 if (ctl_get_lba_len(io1, &lba1, &len1) != 0) 10893 return (CTL_ACTION_ERROR); 10894 10895 if (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE) 10896 seq = FALSE; 10897 return (ctl_extent_check_lba(lba1, len1, lba2, len2, seq)); 10898 } 10899 10900 static ctl_action 10901 ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2) 10902 { 10903 uint64_t lba1, lba2; 10904 uint64_t len1, len2; 10905 10906 if (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE) 10907 return (CTL_ACTION_PASS); 10908 if (ctl_get_lba_len(io1, &lba1, &len1) != 0) 10909 return (CTL_ACTION_ERROR); 10910 if (ctl_get_lba_len(io2, &lba2, &len2) != 0) 10911 return (CTL_ACTION_ERROR); 10912 10913 if (lba1 + len1 == lba2) 10914 return (CTL_ACTION_BLOCK); 10915 return (CTL_ACTION_PASS); 10916 } 10917 10918 static ctl_action 10919 ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io, 10920 union ctl_io *ooa_io) 10921 { 10922 const struct ctl_cmd_entry *pending_entry, *ooa_entry; 10923 const ctl_serialize_action *serialize_row; 10924 10925 /* 10926 * Aborted commands are not going to be executed and may even 10927 * not report completion, so we don't care about their order. 10928 * Let them complete ASAP to clean the OOA queue. 10929 */ 10930 if (pending_io->io_hdr.flags & CTL_FLAG_ABORT) 10931 return (CTL_ACTION_SKIP); 10932 10933 /* 10934 * The initiator attempted multiple untagged commands at the same 10935 * time. Can't do that. 10936 */ 10937 if ((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10938 && (ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10939 && ((pending_io->io_hdr.nexus.targ_port == 10940 ooa_io->io_hdr.nexus.targ_port) 10941 && (pending_io->io_hdr.nexus.initid == 10942 ooa_io->io_hdr.nexus.initid)) 10943 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | 10944 CTL_FLAG_STATUS_SENT)) == 0)) 10945 return (CTL_ACTION_OVERLAP); 10946 10947 /* 10948 * The initiator attempted to send multiple tagged commands with 10949 * the same ID. (It's fine if different initiators have the same 10950 * tag ID.) 10951 * 10952 * Even if all of those conditions are true, we don't kill the I/O 10953 * if the command ahead of us has been aborted. We won't end up 10954 * sending it to the FETD, and it's perfectly legal to resend a 10955 * command with the same tag number as long as the previous 10956 * instance of this tag number has been aborted somehow. 10957 */ 10958 if ((pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 10959 && (ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 10960 && (pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num) 10961 && ((pending_io->io_hdr.nexus.targ_port == 10962 ooa_io->io_hdr.nexus.targ_port) 10963 && (pending_io->io_hdr.nexus.initid == 10964 ooa_io->io_hdr.nexus.initid)) 10965 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | 10966 CTL_FLAG_STATUS_SENT)) == 0)) 10967 return (CTL_ACTION_OVERLAP_TAG); 10968 10969 /* 10970 * If we get a head of queue tag, SAM-3 says that we should 10971 * immediately execute it. 10972 * 10973 * What happens if this command would normally block for some other 10974 * reason? e.g. a request sense with a head of queue tag 10975 * immediately after a write. Normally that would block, but this 10976 * will result in its getting executed immediately... 10977 * 10978 * We currently return "pass" instead of "skip", so we'll end up 10979 * going through the rest of the queue to check for overlapped tags. 10980 * 10981 * XXX KDM check for other types of blockage first?? 10982 */ 10983 if (pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 10984 return (CTL_ACTION_PASS); 10985 10986 /* 10987 * Ordered tags have to block until all items ahead of them 10988 * have completed. If we get called with an ordered tag, we always 10989 * block, if something else is ahead of us in the queue. 10990 */ 10991 if (pending_io->scsiio.tag_type == CTL_TAG_ORDERED) 10992 return (CTL_ACTION_BLOCK); 10993 10994 /* 10995 * Simple tags get blocked until all head of queue and ordered tags 10996 * ahead of them have completed. I'm lumping untagged commands in 10997 * with simple tags here. XXX KDM is that the right thing to do? 10998 */ 10999 if (((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 11000 || (pending_io->scsiio.tag_type == CTL_TAG_SIMPLE)) 11001 && ((ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 11002 || (ooa_io->scsiio.tag_type == CTL_TAG_ORDERED))) 11003 return (CTL_ACTION_BLOCK); 11004 11005 pending_entry = ctl_get_cmd_entry(&pending_io->scsiio, NULL); 11006 KASSERT(pending_entry->seridx < CTL_SERIDX_COUNT, 11007 ("%s: Invalid seridx %d for pending CDB %02x %02x @ %p", 11008 __func__, pending_entry->seridx, pending_io->scsiio.cdb[0], 11009 pending_io->scsiio.cdb[1], pending_io)); 11010 ooa_entry = ctl_get_cmd_entry(&ooa_io->scsiio, NULL); 11011 if (ooa_entry->seridx == CTL_SERIDX_INVLD) 11012 return (CTL_ACTION_PASS); /* Unsupported command in OOA queue */ 11013 KASSERT(ooa_entry->seridx < CTL_SERIDX_COUNT, 11014 ("%s: Invalid seridx %d for ooa CDB %02x %02x @ %p", 11015 __func__, ooa_entry->seridx, ooa_io->scsiio.cdb[0], 11016 ooa_io->scsiio.cdb[1], ooa_io)); 11017 11018 serialize_row = ctl_serialize_table[ooa_entry->seridx]; 11019 11020 switch (serialize_row[pending_entry->seridx]) { 11021 case CTL_SER_BLOCK: 11022 return (CTL_ACTION_BLOCK); 11023 case CTL_SER_EXTENT: 11024 return (ctl_extent_check(ooa_io, pending_io, 11025 (lun->be_lun && lun->be_lun->serseq == CTL_LUN_SERSEQ_ON))); 11026 case CTL_SER_EXTENTOPT: 11027 if ((lun->MODE_CTRL.queue_flags & SCP_QUEUE_ALG_MASK) != 11028 SCP_QUEUE_ALG_UNRESTRICTED) 11029 return (ctl_extent_check(ooa_io, pending_io, 11030 (lun->be_lun && 11031 lun->be_lun->serseq == CTL_LUN_SERSEQ_ON))); 11032 return (CTL_ACTION_PASS); 11033 case CTL_SER_EXTENTSEQ: 11034 if (lun->be_lun && lun->be_lun->serseq != CTL_LUN_SERSEQ_OFF) 11035 return (ctl_extent_check_seq(ooa_io, pending_io)); 11036 return (CTL_ACTION_PASS); 11037 case CTL_SER_PASS: 11038 return (CTL_ACTION_PASS); 11039 case CTL_SER_BLOCKOPT: 11040 if ((lun->MODE_CTRL.queue_flags & SCP_QUEUE_ALG_MASK) != 11041 SCP_QUEUE_ALG_UNRESTRICTED) 11042 return (CTL_ACTION_BLOCK); 11043 return (CTL_ACTION_PASS); 11044 case CTL_SER_SKIP: 11045 return (CTL_ACTION_SKIP); 11046 default: 11047 panic("%s: Invalid serialization value %d for %d => %d", 11048 __func__, serialize_row[pending_entry->seridx], 11049 pending_entry->seridx, ooa_entry->seridx); 11050 } 11051 11052 return (CTL_ACTION_ERROR); 11053 } 11054 11055 /* 11056 * Check for blockage or overlaps against the OOA (Order Of Arrival) queue. 11057 * Assumptions: 11058 * - pending_io is generally either incoming, or on the blocked queue 11059 * - starting I/O is the I/O we want to start the check with. 11060 */ 11061 static ctl_action 11062 ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 11063 union ctl_io **starting_io) 11064 { 11065 union ctl_io *ooa_io; 11066 ctl_action action; 11067 11068 mtx_assert(&lun->lun_lock, MA_OWNED); 11069 11070 /* 11071 * Run back along the OOA queue, starting with the current 11072 * blocked I/O and going through every I/O before it on the 11073 * queue. If starting_io is NULL, we'll just end up returning 11074 * CTL_ACTION_PASS. 11075 */ 11076 for (ooa_io = *starting_io; ooa_io != NULL; 11077 ooa_io = (union ctl_io *)TAILQ_PREV(&ooa_io->io_hdr, ctl_ooaq, 11078 ooa_links)){ 11079 action = ctl_check_for_blockage(lun, pending_io, ooa_io); 11080 if (action != CTL_ACTION_PASS) { 11081 *starting_io = ooa_io; 11082 return (action); 11083 } 11084 } 11085 11086 *starting_io = NULL; 11087 return (CTL_ACTION_PASS); 11088 } 11089 11090 /* 11091 * Try to unblock the specified I/O. 11092 * 11093 * skip parameter allows explicitly skip present blocker of the I/O, 11094 * starting from the previous one on OOA queue. It can be used when 11095 * we know for sure that the blocker I/O does no longer count. 11096 */ 11097 static void 11098 ctl_try_unblock_io(struct ctl_lun *lun, union ctl_io *io, bool skip) 11099 { 11100 struct ctl_softc *softc = lun->ctl_softc; 11101 union ctl_io *bio, *obio; 11102 const struct ctl_cmd_entry *entry; 11103 union ctl_ha_msg msg_info; 11104 ctl_action action; 11105 11106 mtx_assert(&lun->lun_lock, MA_OWNED); 11107 11108 if (io->io_hdr.blocker == NULL) 11109 return; 11110 11111 obio = bio = io->io_hdr.blocker; 11112 if (skip) 11113 bio = (union ctl_io *)TAILQ_PREV(&bio->io_hdr, ctl_ooaq, 11114 ooa_links); 11115 action = ctl_check_ooa(lun, io, &bio); 11116 if (action == CTL_ACTION_BLOCK) { 11117 /* Still blocked, but may be by different I/O now. */ 11118 if (bio != obio) { 11119 TAILQ_REMOVE(&obio->io_hdr.blocked_queue, 11120 &io->io_hdr, blocked_links); 11121 TAILQ_INSERT_TAIL(&bio->io_hdr.blocked_queue, 11122 &io->io_hdr, blocked_links); 11123 io->io_hdr.blocker = bio; 11124 } 11125 return; 11126 } 11127 11128 /* No longer blocked, one way or another. */ 11129 TAILQ_REMOVE(&obio->io_hdr.blocked_queue, &io->io_hdr, blocked_links); 11130 io->io_hdr.blocker = NULL; 11131 11132 switch (action) { 11133 case CTL_ACTION_OVERLAP: 11134 ctl_set_overlapped_cmd(&io->scsiio); 11135 goto error; 11136 case CTL_ACTION_OVERLAP_TAG: 11137 ctl_set_overlapped_tag(&io->scsiio, 11138 io->scsiio.tag_num & 0xff); 11139 goto error; 11140 case CTL_ACTION_PASS: 11141 case CTL_ACTION_SKIP: 11142 11143 /* Serializing commands from the other SC retire there. */ 11144 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) && 11145 (softc->ha_mode != CTL_HA_MODE_XFER)) { 11146 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 11147 msg_info.hdr.original_sc = io->io_hdr.remote_io; 11148 msg_info.hdr.serializing_sc = io; 11149 msg_info.hdr.msg_type = CTL_MSG_R2R; 11150 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11151 sizeof(msg_info.hdr), M_NOWAIT); 11152 break; 11153 } 11154 11155 /* 11156 * Check this I/O for LUN state changes that may have happened 11157 * while this command was blocked. The LUN state may have been 11158 * changed by a command ahead of us in the queue. 11159 */ 11160 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 11161 if (ctl_scsiio_lun_check(lun, entry, &io->scsiio) != 0) { 11162 ctl_done(io); 11163 break; 11164 } 11165 11166 io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11167 ctl_enqueue_rtr(io); 11168 break; 11169 case CTL_ACTION_ERROR: 11170 default: 11171 ctl_set_internal_failure(&io->scsiio, 11172 /*sks_valid*/ 0, 11173 /*retry_count*/ 0); 11174 11175 error: 11176 /* Serializing commands from the other SC are done here. */ 11177 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) && 11178 (softc->ha_mode != CTL_HA_MODE_XFER)) { 11179 ctl_try_unblock_others(lun, io, TRUE); 11180 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); 11181 11182 ctl_copy_sense_data_back(io, &msg_info); 11183 msg_info.hdr.original_sc = io->io_hdr.remote_io; 11184 msg_info.hdr.serializing_sc = NULL; 11185 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 11186 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11187 sizeof(msg_info.scsi), M_WAITOK); 11188 ctl_free_io(io); 11189 break; 11190 } 11191 11192 ctl_done(io); 11193 break; 11194 } 11195 } 11196 11197 /* 11198 * Try to unblock I/Os blocked by the specified I/O. 11199 * 11200 * skip parameter allows explicitly skip the specified I/O as blocker, 11201 * starting from the previous one on the OOA queue. It can be used when 11202 * we know for sure that the specified I/O does no longer count (done). 11203 * It has to be still on OOA queue though so that we know where to start. 11204 */ 11205 static void 11206 ctl_try_unblock_others(struct ctl_lun *lun, union ctl_io *bio, bool skip) 11207 { 11208 union ctl_io *io, *next_io; 11209 11210 mtx_assert(&lun->lun_lock, MA_OWNED); 11211 11212 for (io = (union ctl_io *)TAILQ_FIRST(&bio->io_hdr.blocked_queue); 11213 io != NULL; io = next_io) { 11214 next_io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, blocked_links); 11215 11216 KASSERT(io->io_hdr.blocker != NULL, 11217 ("I/O %p on blocked list without blocker", io)); 11218 ctl_try_unblock_io(lun, io, skip); 11219 } 11220 KASSERT(!skip || TAILQ_EMPTY(&bio->io_hdr.blocked_queue), 11221 ("blocked_queue is not empty after skipping %p", bio)); 11222 } 11223 11224 /* 11225 * This routine (with one exception) checks LUN flags that can be set by 11226 * commands ahead of us in the OOA queue. These flags have to be checked 11227 * when a command initially comes in, and when we pull a command off the 11228 * blocked queue and are preparing to execute it. The reason we have to 11229 * check these flags for commands on the blocked queue is that the LUN 11230 * state may have been changed by a command ahead of us while we're on the 11231 * blocked queue. 11232 * 11233 * Ordering is somewhat important with these checks, so please pay 11234 * careful attention to the placement of any new checks. 11235 */ 11236 static int 11237 ctl_scsiio_lun_check(struct ctl_lun *lun, 11238 const struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio) 11239 { 11240 struct ctl_softc *softc = lun->ctl_softc; 11241 int retval; 11242 uint32_t residx; 11243 11244 retval = 0; 11245 11246 mtx_assert(&lun->lun_lock, MA_OWNED); 11247 11248 /* 11249 * If this shelf is a secondary shelf controller, we may have to 11250 * reject some commands disallowed by HA mode and link state. 11251 */ 11252 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) { 11253 if (softc->ha_link == CTL_HA_LINK_OFFLINE && 11254 (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { 11255 ctl_set_lun_unavail(ctsio); 11256 retval = 1; 11257 goto bailout; 11258 } 11259 if ((lun->flags & CTL_LUN_PEER_SC_PRIMARY) == 0 && 11260 (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { 11261 ctl_set_lun_transit(ctsio); 11262 retval = 1; 11263 goto bailout; 11264 } 11265 if (softc->ha_mode == CTL_HA_MODE_ACT_STBY && 11266 (entry->flags & CTL_CMD_FLAG_OK_ON_STANDBY) == 0) { 11267 ctl_set_lun_standby(ctsio); 11268 retval = 1; 11269 goto bailout; 11270 } 11271 11272 /* The rest of checks are only done on executing side */ 11273 if (softc->ha_mode == CTL_HA_MODE_XFER) 11274 goto bailout; 11275 } 11276 11277 if (entry->pattern & CTL_LUN_PAT_WRITE) { 11278 if (lun->be_lun && 11279 lun->be_lun->flags & CTL_LUN_FLAG_READONLY) { 11280 ctl_set_hw_write_protected(ctsio); 11281 retval = 1; 11282 goto bailout; 11283 } 11284 if ((lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) { 11285 ctl_set_sense(ctsio, /*current_error*/ 1, 11286 /*sense_key*/ SSD_KEY_DATA_PROTECT, 11287 /*asc*/ 0x27, /*ascq*/ 0x02, SSD_ELEM_NONE); 11288 retval = 1; 11289 goto bailout; 11290 } 11291 } 11292 11293 /* 11294 * Check for a reservation conflict. If this command isn't allowed 11295 * even on reserved LUNs, and if this initiator isn't the one who 11296 * reserved us, reject the command with a reservation conflict. 11297 */ 11298 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 11299 if ((lun->flags & CTL_LUN_RESERVED) 11300 && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) { 11301 if (lun->res_idx != residx) { 11302 ctl_set_reservation_conflict(ctsio); 11303 retval = 1; 11304 goto bailout; 11305 } 11306 } 11307 11308 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0 || 11309 (entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV)) { 11310 /* No reservation or command is allowed. */; 11311 } else if ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_WRESV) && 11312 (lun->pr_res_type == SPR_TYPE_WR_EX || 11313 lun->pr_res_type == SPR_TYPE_WR_EX_RO || 11314 lun->pr_res_type == SPR_TYPE_WR_EX_AR)) { 11315 /* The command is allowed for Write Exclusive resv. */; 11316 } else { 11317 /* 11318 * if we aren't registered or it's a res holder type 11319 * reservation and this isn't the res holder then set a 11320 * conflict. 11321 */ 11322 if (ctl_get_prkey(lun, residx) == 0 || 11323 (residx != lun->pr_res_idx && lun->pr_res_type < 4)) { 11324 ctl_set_reservation_conflict(ctsio); 11325 retval = 1; 11326 goto bailout; 11327 } 11328 } 11329 11330 if ((entry->flags & CTL_CMD_FLAG_OK_ON_NO_MEDIA) == 0) { 11331 if (lun->flags & CTL_LUN_EJECTED) 11332 ctl_set_lun_ejected(ctsio); 11333 else if (lun->flags & CTL_LUN_NO_MEDIA) { 11334 if (lun->flags & CTL_LUN_REMOVABLE) 11335 ctl_set_lun_no_media(ctsio); 11336 else 11337 ctl_set_lun_int_reqd(ctsio); 11338 } else if (lun->flags & CTL_LUN_STOPPED) 11339 ctl_set_lun_stopped(ctsio); 11340 else 11341 goto bailout; 11342 retval = 1; 11343 goto bailout; 11344 } 11345 11346 bailout: 11347 return (retval); 11348 } 11349 11350 static void 11351 ctl_failover_io(union ctl_io *io, int have_lock) 11352 { 11353 ctl_set_busy(&io->scsiio); 11354 ctl_done(io); 11355 } 11356 11357 static void 11358 ctl_failover_lun(union ctl_io *rio) 11359 { 11360 struct ctl_softc *softc = CTL_SOFTC(rio); 11361 struct ctl_lun *lun; 11362 struct ctl_io_hdr *io, *next_io; 11363 uint32_t targ_lun; 11364 11365 targ_lun = rio->io_hdr.nexus.targ_mapped_lun; 11366 CTL_DEBUG_PRINT(("FAILOVER for lun %u\n", targ_lun)); 11367 11368 /* Find and lock the LUN. */ 11369 mtx_lock(&softc->ctl_lock); 11370 if (targ_lun > ctl_max_luns || 11371 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11372 mtx_unlock(&softc->ctl_lock); 11373 return; 11374 } 11375 mtx_lock(&lun->lun_lock); 11376 mtx_unlock(&softc->ctl_lock); 11377 if (lun->flags & CTL_LUN_DISABLED) { 11378 mtx_unlock(&lun->lun_lock); 11379 return; 11380 } 11381 11382 if (softc->ha_mode == CTL_HA_MODE_XFER) { 11383 TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { 11384 /* We are master */ 11385 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 11386 if (io->flags & CTL_FLAG_IO_ACTIVE) { 11387 io->flags |= CTL_FLAG_ABORT; 11388 io->flags |= CTL_FLAG_FAILOVER; 11389 ctl_try_unblock_io(lun, 11390 (union ctl_io *)io, FALSE); 11391 } else { /* This can be only due to DATAMOVE */ 11392 io->msg_type = CTL_MSG_DATAMOVE_DONE; 11393 io->flags &= ~CTL_FLAG_DMA_INPROG; 11394 io->flags |= CTL_FLAG_IO_ACTIVE; 11395 io->port_status = 31340; 11396 ctl_enqueue_isc((union ctl_io *)io); 11397 } 11398 } else 11399 /* We are slave */ 11400 if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { 11401 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; 11402 if (io->flags & CTL_FLAG_IO_ACTIVE) { 11403 io->flags |= CTL_FLAG_FAILOVER; 11404 } else { 11405 ctl_set_busy(&((union ctl_io *)io)-> 11406 scsiio); 11407 ctl_done((union ctl_io *)io); 11408 } 11409 } 11410 } 11411 } else { /* SERIALIZE modes */ 11412 TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { 11413 /* We are master */ 11414 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 11415 if (io->blocker != NULL) { 11416 TAILQ_REMOVE(&io->blocker->io_hdr.blocked_queue, 11417 io, blocked_links); 11418 io->blocker = NULL; 11419 } 11420 ctl_try_unblock_others(lun, (union ctl_io *)io, 11421 TRUE); 11422 TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links); 11423 ctl_free_io((union ctl_io *)io); 11424 } else 11425 /* We are slave */ 11426 if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { 11427 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; 11428 if (!(io->flags & CTL_FLAG_IO_ACTIVE)) { 11429 ctl_set_busy(&((union ctl_io *)io)-> 11430 scsiio); 11431 ctl_done((union ctl_io *)io); 11432 } 11433 } 11434 } 11435 } 11436 mtx_unlock(&lun->lun_lock); 11437 } 11438 11439 static int 11440 ctl_scsiio_precheck(struct ctl_softc *softc, struct ctl_scsiio *ctsio) 11441 { 11442 struct ctl_lun *lun; 11443 const struct ctl_cmd_entry *entry; 11444 union ctl_io *bio; 11445 uint32_t initidx, targ_lun; 11446 int retval = 0; 11447 11448 lun = NULL; 11449 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 11450 if (targ_lun < ctl_max_luns) 11451 lun = softc->ctl_luns[targ_lun]; 11452 if (lun) { 11453 /* 11454 * If the LUN is invalid, pretend that it doesn't exist. 11455 * It will go away as soon as all pending I/O has been 11456 * completed. 11457 */ 11458 mtx_lock(&lun->lun_lock); 11459 if (lun->flags & CTL_LUN_DISABLED) { 11460 mtx_unlock(&lun->lun_lock); 11461 lun = NULL; 11462 } 11463 } 11464 CTL_LUN(ctsio) = lun; 11465 if (lun) { 11466 CTL_BACKEND_LUN(ctsio) = lun->be_lun; 11467 11468 /* 11469 * Every I/O goes into the OOA queue for a particular LUN, 11470 * and stays there until completion. 11471 */ 11472 #ifdef CTL_TIME_IO 11473 if (TAILQ_EMPTY(&lun->ooa_queue)) 11474 lun->idle_time += getsbinuptime() - lun->last_busy; 11475 #endif 11476 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 11477 } 11478 11479 /* Get command entry and return error if it is unsuppotyed. */ 11480 entry = ctl_validate_command(ctsio); 11481 if (entry == NULL) { 11482 if (lun) 11483 mtx_unlock(&lun->lun_lock); 11484 return (retval); 11485 } 11486 11487 ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 11488 ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK; 11489 11490 /* 11491 * Check to see whether we can send this command to LUNs that don't 11492 * exist. This should pretty much only be the case for inquiry 11493 * and request sense. Further checks, below, really require having 11494 * a LUN, so we can't really check the command anymore. Just put 11495 * it on the rtr queue. 11496 */ 11497 if (lun == NULL) { 11498 if (entry->flags & CTL_CMD_FLAG_OK_ON_NO_LUN) { 11499 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11500 ctl_enqueue_rtr((union ctl_io *)ctsio); 11501 return (retval); 11502 } 11503 11504 ctl_set_unsupported_lun(ctsio); 11505 ctl_done((union ctl_io *)ctsio); 11506 CTL_DEBUG_PRINT(("ctl_scsiio_precheck: bailing out due to invalid LUN\n")); 11507 return (retval); 11508 } else { 11509 /* 11510 * Make sure we support this particular command on this LUN. 11511 * e.g., we don't support writes to the control LUN. 11512 */ 11513 if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 11514 mtx_unlock(&lun->lun_lock); 11515 ctl_set_invalid_opcode(ctsio); 11516 ctl_done((union ctl_io *)ctsio); 11517 return (retval); 11518 } 11519 } 11520 11521 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 11522 11523 /* 11524 * If we've got a request sense, it'll clear the contingent 11525 * allegiance condition. Otherwise, if we have a CA condition for 11526 * this initiator, clear it, because it sent down a command other 11527 * than request sense. 11528 */ 11529 if (ctsio->cdb[0] != REQUEST_SENSE) { 11530 struct scsi_sense_data *ps; 11531 11532 ps = lun->pending_sense[initidx / CTL_MAX_INIT_PER_PORT]; 11533 if (ps != NULL) 11534 ps[initidx % CTL_MAX_INIT_PER_PORT].error_code = 0; 11535 } 11536 11537 /* 11538 * If the command has this flag set, it handles its own unit 11539 * attention reporting, we shouldn't do anything. Otherwise we 11540 * check for any pending unit attentions, and send them back to the 11541 * initiator. We only do this when a command initially comes in, 11542 * not when we pull it off the blocked queue. 11543 * 11544 * According to SAM-3, section 5.3.2, the order that things get 11545 * presented back to the host is basically unit attentions caused 11546 * by some sort of reset event, busy status, reservation conflicts 11547 * or task set full, and finally any other status. 11548 * 11549 * One issue here is that some of the unit attentions we report 11550 * don't fall into the "reset" category (e.g. "reported luns data 11551 * has changed"). So reporting it here, before the reservation 11552 * check, may be technically wrong. I guess the only thing to do 11553 * would be to check for and report the reset events here, and then 11554 * check for the other unit attention types after we check for a 11555 * reservation conflict. 11556 * 11557 * XXX KDM need to fix this 11558 */ 11559 if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) { 11560 ctl_ua_type ua_type; 11561 u_int sense_len = 0; 11562 11563 ua_type = ctl_build_ua(lun, initidx, &ctsio->sense_data, 11564 &sense_len, SSD_TYPE_NONE); 11565 if (ua_type != CTL_UA_NONE) { 11566 mtx_unlock(&lun->lun_lock); 11567 ctsio->scsi_status = SCSI_STATUS_CHECK_COND; 11568 ctsio->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 11569 ctsio->sense_len = sense_len; 11570 ctl_done((union ctl_io *)ctsio); 11571 return (retval); 11572 } 11573 } 11574 11575 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { 11576 mtx_unlock(&lun->lun_lock); 11577 ctl_done((union ctl_io *)ctsio); 11578 return (retval); 11579 } 11580 11581 /* 11582 * XXX CHD this is where we want to send IO to other side if 11583 * this LUN is secondary on this SC. We will need to make a copy 11584 * of the IO and flag the IO on this side as SENT_2OTHER and the flag 11585 * the copy we send as FROM_OTHER. 11586 * We also need to stuff the address of the original IO so we can 11587 * find it easily. Something similar will need be done on the other 11588 * side so when we are done we can find the copy. 11589 */ 11590 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 11591 (lun->flags & CTL_LUN_PEER_SC_PRIMARY) != 0 && 11592 (entry->flags & CTL_CMD_FLAG_RUN_HERE) == 0) { 11593 union ctl_ha_msg msg_info; 11594 int isc_retval; 11595 11596 ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; 11597 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 11598 mtx_unlock(&lun->lun_lock); 11599 11600 msg_info.hdr.msg_type = CTL_MSG_SERIALIZE; 11601 msg_info.hdr.original_sc = (union ctl_io *)ctsio; 11602 msg_info.hdr.serializing_sc = NULL; 11603 msg_info.hdr.nexus = ctsio->io_hdr.nexus; 11604 msg_info.scsi.tag_num = ctsio->tag_num; 11605 msg_info.scsi.tag_type = ctsio->tag_type; 11606 msg_info.scsi.cdb_len = ctsio->cdb_len; 11607 memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN); 11608 11609 if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11610 sizeof(msg_info.scsi) - sizeof(msg_info.scsi.sense_data), 11611 M_WAITOK)) > CTL_HA_STATUS_SUCCESS) { 11612 ctl_set_busy(ctsio); 11613 ctl_done((union ctl_io *)ctsio); 11614 return (retval); 11615 } 11616 return (retval); 11617 } 11618 11619 bio = (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq, ooa_links); 11620 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, &bio)) { 11621 case CTL_ACTION_BLOCK: 11622 ctsio->io_hdr.blocker = bio; 11623 TAILQ_INSERT_TAIL(&bio->io_hdr.blocked_queue, &ctsio->io_hdr, 11624 blocked_links); 11625 mtx_unlock(&lun->lun_lock); 11626 return (retval); 11627 case CTL_ACTION_PASS: 11628 case CTL_ACTION_SKIP: 11629 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11630 mtx_unlock(&lun->lun_lock); 11631 ctl_enqueue_rtr((union ctl_io *)ctsio); 11632 break; 11633 case CTL_ACTION_OVERLAP: 11634 mtx_unlock(&lun->lun_lock); 11635 ctl_set_overlapped_cmd(ctsio); 11636 ctl_done((union ctl_io *)ctsio); 11637 break; 11638 case CTL_ACTION_OVERLAP_TAG: 11639 mtx_unlock(&lun->lun_lock); 11640 ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff); 11641 ctl_done((union ctl_io *)ctsio); 11642 break; 11643 case CTL_ACTION_ERROR: 11644 default: 11645 mtx_unlock(&lun->lun_lock); 11646 ctl_set_internal_failure(ctsio, 11647 /*sks_valid*/ 0, 11648 /*retry_count*/ 0); 11649 ctl_done((union ctl_io *)ctsio); 11650 break; 11651 } 11652 return (retval); 11653 } 11654 11655 const struct ctl_cmd_entry * 11656 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa) 11657 { 11658 const struct ctl_cmd_entry *entry; 11659 int service_action; 11660 11661 entry = &ctl_cmd_table[ctsio->cdb[0]]; 11662 if (sa) 11663 *sa = ((entry->flags & CTL_CMD_FLAG_SA5) != 0); 11664 if (entry->flags & CTL_CMD_FLAG_SA5) { 11665 service_action = ctsio->cdb[1] & SERVICE_ACTION_MASK; 11666 entry = &((const struct ctl_cmd_entry *) 11667 entry->execute)[service_action]; 11668 } 11669 return (entry); 11670 } 11671 11672 const struct ctl_cmd_entry * 11673 ctl_validate_command(struct ctl_scsiio *ctsio) 11674 { 11675 const struct ctl_cmd_entry *entry; 11676 int i, sa; 11677 uint8_t diff; 11678 11679 entry = ctl_get_cmd_entry(ctsio, &sa); 11680 if (entry->execute == NULL) { 11681 if (sa) 11682 ctl_set_invalid_field(ctsio, 11683 /*sks_valid*/ 1, 11684 /*command*/ 1, 11685 /*field*/ 1, 11686 /*bit_valid*/ 1, 11687 /*bit*/ 4); 11688 else 11689 ctl_set_invalid_opcode(ctsio); 11690 ctl_done((union ctl_io *)ctsio); 11691 return (NULL); 11692 } 11693 KASSERT(entry->length > 0, 11694 ("Not defined length for command 0x%02x/0x%02x", 11695 ctsio->cdb[0], ctsio->cdb[1])); 11696 for (i = 1; i < entry->length; i++) { 11697 diff = ctsio->cdb[i] & ~entry->usage[i - 1]; 11698 if (diff == 0) 11699 continue; 11700 ctl_set_invalid_field(ctsio, 11701 /*sks_valid*/ 1, 11702 /*command*/ 1, 11703 /*field*/ i, 11704 /*bit_valid*/ 1, 11705 /*bit*/ fls(diff) - 1); 11706 ctl_done((union ctl_io *)ctsio); 11707 return (NULL); 11708 } 11709 return (entry); 11710 } 11711 11712 static int 11713 ctl_cmd_applicable(uint8_t lun_type, const struct ctl_cmd_entry *entry) 11714 { 11715 11716 switch (lun_type) { 11717 case T_DIRECT: 11718 if ((entry->flags & CTL_CMD_FLAG_OK_ON_DIRECT) == 0) 11719 return (0); 11720 break; 11721 case T_PROCESSOR: 11722 if ((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0) 11723 return (0); 11724 break; 11725 case T_CDROM: 11726 if ((entry->flags & CTL_CMD_FLAG_OK_ON_CDROM) == 0) 11727 return (0); 11728 break; 11729 default: 11730 return (0); 11731 } 11732 return (1); 11733 } 11734 11735 static int 11736 ctl_scsiio(struct ctl_scsiio *ctsio) 11737 { 11738 int retval; 11739 const struct ctl_cmd_entry *entry; 11740 11741 retval = CTL_RETVAL_COMPLETE; 11742 11743 CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0])); 11744 11745 entry = ctl_get_cmd_entry(ctsio, NULL); 11746 11747 /* 11748 * If this I/O has been aborted, just send it straight to 11749 * ctl_done() without executing it. 11750 */ 11751 if (ctsio->io_hdr.flags & CTL_FLAG_ABORT) { 11752 ctl_done((union ctl_io *)ctsio); 11753 goto bailout; 11754 } 11755 11756 /* 11757 * All the checks should have been handled by ctl_scsiio_precheck(). 11758 * We should be clear now to just execute the I/O. 11759 */ 11760 retval = entry->execute(ctsio); 11761 11762 bailout: 11763 return (retval); 11764 } 11765 11766 static int 11767 ctl_target_reset(union ctl_io *io) 11768 { 11769 struct ctl_softc *softc = CTL_SOFTC(io); 11770 struct ctl_port *port = CTL_PORT(io); 11771 struct ctl_lun *lun; 11772 uint32_t initidx; 11773 ctl_ua_type ua_type; 11774 11775 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 11776 union ctl_ha_msg msg_info; 11777 11778 msg_info.hdr.nexus = io->io_hdr.nexus; 11779 msg_info.task.task_action = io->taskio.task_action; 11780 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11781 msg_info.hdr.original_sc = NULL; 11782 msg_info.hdr.serializing_sc = NULL; 11783 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11784 sizeof(msg_info.task), M_WAITOK); 11785 } 11786 11787 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11788 if (io->taskio.task_action == CTL_TASK_TARGET_RESET) 11789 ua_type = CTL_UA_TARG_RESET; 11790 else 11791 ua_type = CTL_UA_BUS_RESET; 11792 mtx_lock(&softc->ctl_lock); 11793 STAILQ_FOREACH(lun, &softc->lun_list, links) { 11794 if (port != NULL && 11795 ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 11796 continue; 11797 ctl_do_lun_reset(lun, initidx, ua_type); 11798 } 11799 mtx_unlock(&softc->ctl_lock); 11800 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11801 return (0); 11802 } 11803 11804 /* 11805 * The LUN should always be set. The I/O is optional, and is used to 11806 * distinguish between I/Os sent by this initiator, and by other 11807 * initiators. We set unit attention for initiators other than this one. 11808 * SAM-3 is vague on this point. It does say that a unit attention should 11809 * be established for other initiators when a LUN is reset (see section 11810 * 5.7.3), but it doesn't specifically say that the unit attention should 11811 * be established for this particular initiator when a LUN is reset. Here 11812 * is the relevant text, from SAM-3 rev 8: 11813 * 11814 * 5.7.2 When a SCSI initiator port aborts its own tasks 11815 * 11816 * When a SCSI initiator port causes its own task(s) to be aborted, no 11817 * notification that the task(s) have been aborted shall be returned to 11818 * the SCSI initiator port other than the completion response for the 11819 * command or task management function action that caused the task(s) to 11820 * be aborted and notification(s) associated with related effects of the 11821 * action (e.g., a reset unit attention condition). 11822 * 11823 * XXX KDM for now, we're setting unit attention for all initiators. 11824 */ 11825 static void 11826 ctl_do_lun_reset(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua_type) 11827 { 11828 union ctl_io *xio; 11829 int i; 11830 11831 mtx_lock(&lun->lun_lock); 11832 /* Abort tasks. */ 11833 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11834 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11835 xio->io_hdr.flags |= CTL_FLAG_ABORT | CTL_FLAG_ABORT_STATUS; 11836 ctl_try_unblock_io(lun, xio, FALSE); 11837 } 11838 /* Clear CA. */ 11839 for (i = 0; i < ctl_max_ports; i++) { 11840 free(lun->pending_sense[i], M_CTL); 11841 lun->pending_sense[i] = NULL; 11842 } 11843 /* Clear reservation. */ 11844 lun->flags &= ~CTL_LUN_RESERVED; 11845 /* Clear prevent media removal. */ 11846 if (lun->prevent) { 11847 for (i = 0; i < CTL_MAX_INITIATORS; i++) 11848 ctl_clear_mask(lun->prevent, i); 11849 lun->prevent_count = 0; 11850 } 11851 /* Clear TPC status */ 11852 ctl_tpc_lun_clear(lun, -1); 11853 /* Establish UA. */ 11854 #if 0 11855 ctl_est_ua_all(lun, initidx, ua_type); 11856 #else 11857 ctl_est_ua_all(lun, -1, ua_type); 11858 #endif 11859 mtx_unlock(&lun->lun_lock); 11860 } 11861 11862 static int 11863 ctl_lun_reset(union ctl_io *io) 11864 { 11865 struct ctl_softc *softc = CTL_SOFTC(io); 11866 struct ctl_lun *lun; 11867 uint32_t targ_lun, initidx; 11868 11869 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11870 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11871 mtx_lock(&softc->ctl_lock); 11872 if (targ_lun >= ctl_max_luns || 11873 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11874 mtx_unlock(&softc->ctl_lock); 11875 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 11876 return (1); 11877 } 11878 ctl_do_lun_reset(lun, initidx, CTL_UA_LUN_RESET); 11879 mtx_unlock(&softc->ctl_lock); 11880 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11881 11882 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0) { 11883 union ctl_ha_msg msg_info; 11884 11885 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11886 msg_info.hdr.nexus = io->io_hdr.nexus; 11887 msg_info.task.task_action = CTL_TASK_LUN_RESET; 11888 msg_info.hdr.original_sc = NULL; 11889 msg_info.hdr.serializing_sc = NULL; 11890 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11891 sizeof(msg_info.task), M_WAITOK); 11892 } 11893 return (0); 11894 } 11895 11896 static void 11897 ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id, 11898 int other_sc) 11899 { 11900 union ctl_io *xio; 11901 11902 mtx_assert(&lun->lun_lock, MA_OWNED); 11903 11904 /* 11905 * Run through the OOA queue and attempt to find the given I/O. 11906 * The target port, initiator ID, tag type and tag number have to 11907 * match the values that we got from the initiator. If we have an 11908 * untagged command to abort, simply abort the first untagged command 11909 * we come to. We only allow one untagged command at a time of course. 11910 */ 11911 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11912 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11913 if ((targ_port == UINT32_MAX || 11914 targ_port == xio->io_hdr.nexus.targ_port) && 11915 (init_id == UINT32_MAX || 11916 init_id == xio->io_hdr.nexus.initid)) { 11917 if (targ_port != xio->io_hdr.nexus.targ_port || 11918 init_id != xio->io_hdr.nexus.initid) 11919 xio->io_hdr.flags |= CTL_FLAG_ABORT_STATUS; 11920 xio->io_hdr.flags |= CTL_FLAG_ABORT; 11921 if (!other_sc && !(lun->flags & CTL_LUN_PRIMARY_SC)) { 11922 union ctl_ha_msg msg_info; 11923 11924 msg_info.hdr.nexus = xio->io_hdr.nexus; 11925 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 11926 msg_info.task.tag_num = xio->scsiio.tag_num; 11927 msg_info.task.tag_type = xio->scsiio.tag_type; 11928 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11929 msg_info.hdr.original_sc = NULL; 11930 msg_info.hdr.serializing_sc = NULL; 11931 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11932 sizeof(msg_info.task), M_NOWAIT); 11933 } 11934 ctl_try_unblock_io(lun, xio, FALSE); 11935 } 11936 } 11937 } 11938 11939 static int 11940 ctl_abort_task_set(union ctl_io *io) 11941 { 11942 struct ctl_softc *softc = CTL_SOFTC(io); 11943 struct ctl_lun *lun; 11944 uint32_t targ_lun; 11945 11946 /* 11947 * Look up the LUN. 11948 */ 11949 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11950 mtx_lock(&softc->ctl_lock); 11951 if (targ_lun >= ctl_max_luns || 11952 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11953 mtx_unlock(&softc->ctl_lock); 11954 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 11955 return (1); 11956 } 11957 11958 mtx_lock(&lun->lun_lock); 11959 mtx_unlock(&softc->ctl_lock); 11960 if (io->taskio.task_action == CTL_TASK_ABORT_TASK_SET) { 11961 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, 11962 io->io_hdr.nexus.initid, 11963 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 11964 } else { /* CTL_TASK_CLEAR_TASK_SET */ 11965 ctl_abort_tasks_lun(lun, UINT32_MAX, UINT32_MAX, 11966 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 11967 } 11968 mtx_unlock(&lun->lun_lock); 11969 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11970 return (0); 11971 } 11972 11973 static void 11974 ctl_i_t_nexus_loss(struct ctl_softc *softc, uint32_t initidx, 11975 ctl_ua_type ua_type) 11976 { 11977 struct ctl_lun *lun; 11978 struct scsi_sense_data *ps; 11979 uint32_t p, i; 11980 11981 p = initidx / CTL_MAX_INIT_PER_PORT; 11982 i = initidx % CTL_MAX_INIT_PER_PORT; 11983 mtx_lock(&softc->ctl_lock); 11984 STAILQ_FOREACH(lun, &softc->lun_list, links) { 11985 mtx_lock(&lun->lun_lock); 11986 /* Abort tasks. */ 11987 ctl_abort_tasks_lun(lun, p, i, 1); 11988 /* Clear CA. */ 11989 ps = lun->pending_sense[p]; 11990 if (ps != NULL) 11991 ps[i].error_code = 0; 11992 /* Clear reservation. */ 11993 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == initidx)) 11994 lun->flags &= ~CTL_LUN_RESERVED; 11995 /* Clear prevent media removal. */ 11996 if (lun->prevent && ctl_is_set(lun->prevent, initidx)) { 11997 ctl_clear_mask(lun->prevent, initidx); 11998 lun->prevent_count--; 11999 } 12000 /* Clear TPC status */ 12001 ctl_tpc_lun_clear(lun, initidx); 12002 /* Establish UA. */ 12003 ctl_est_ua(lun, initidx, ua_type); 12004 mtx_unlock(&lun->lun_lock); 12005 } 12006 mtx_unlock(&softc->ctl_lock); 12007 } 12008 12009 static int 12010 ctl_i_t_nexus_reset(union ctl_io *io) 12011 { 12012 struct ctl_softc *softc = CTL_SOFTC(io); 12013 uint32_t initidx; 12014 12015 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 12016 union ctl_ha_msg msg_info; 12017 12018 msg_info.hdr.nexus = io->io_hdr.nexus; 12019 msg_info.task.task_action = CTL_TASK_I_T_NEXUS_RESET; 12020 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 12021 msg_info.hdr.original_sc = NULL; 12022 msg_info.hdr.serializing_sc = NULL; 12023 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 12024 sizeof(msg_info.task), M_WAITOK); 12025 } 12026 12027 initidx = ctl_get_initindex(&io->io_hdr.nexus); 12028 ctl_i_t_nexus_loss(softc, initidx, CTL_UA_I_T_NEXUS_LOSS); 12029 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12030 return (0); 12031 } 12032 12033 static int 12034 ctl_abort_task(union ctl_io *io) 12035 { 12036 struct ctl_softc *softc = CTL_SOFTC(io); 12037 union ctl_io *xio; 12038 struct ctl_lun *lun; 12039 uint32_t targ_lun; 12040 12041 /* 12042 * Look up the LUN. 12043 */ 12044 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12045 mtx_lock(&softc->ctl_lock); 12046 if (targ_lun >= ctl_max_luns || 12047 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12048 mtx_unlock(&softc->ctl_lock); 12049 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 12050 return (1); 12051 } 12052 12053 mtx_lock(&lun->lun_lock); 12054 mtx_unlock(&softc->ctl_lock); 12055 /* 12056 * Run through the OOA queue and attempt to find the given I/O. 12057 * The target port, initiator ID, tag type and tag number have to 12058 * match the values that we got from the initiator. If we have an 12059 * untagged command to abort, simply abort the first untagged command 12060 * we come to. We only allow one untagged command at a time of course. 12061 */ 12062 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 12063 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 12064 if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port) 12065 || (xio->io_hdr.nexus.initid != io->io_hdr.nexus.initid) 12066 || (xio->io_hdr.flags & CTL_FLAG_ABORT)) 12067 continue; 12068 12069 /* 12070 * If the abort says that the task is untagged, the 12071 * task in the queue must be untagged. Otherwise, 12072 * we just check to see whether the tag numbers 12073 * match. This is because the QLogic firmware 12074 * doesn't pass back the tag type in an abort 12075 * request. 12076 */ 12077 #if 0 12078 if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED) 12079 && (io->taskio.tag_type == CTL_TAG_UNTAGGED)) 12080 || (xio->scsiio.tag_num == io->taskio.tag_num)) { 12081 #else 12082 /* 12083 * XXX KDM we've got problems with FC, because it 12084 * doesn't send down a tag type with aborts. So we 12085 * can only really go by the tag number... 12086 * This may cause problems with parallel SCSI. 12087 * Need to figure that out!! 12088 */ 12089 if (xio->scsiio.tag_num == io->taskio.tag_num) { 12090 #endif 12091 xio->io_hdr.flags |= CTL_FLAG_ABORT; 12092 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0 && 12093 !(lun->flags & CTL_LUN_PRIMARY_SC)) { 12094 union ctl_ha_msg msg_info; 12095 12096 msg_info.hdr.nexus = io->io_hdr.nexus; 12097 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 12098 msg_info.task.tag_num = io->taskio.tag_num; 12099 msg_info.task.tag_type = io->taskio.tag_type; 12100 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 12101 msg_info.hdr.original_sc = NULL; 12102 msg_info.hdr.serializing_sc = NULL; 12103 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 12104 sizeof(msg_info.task), M_NOWAIT); 12105 } 12106 ctl_try_unblock_io(lun, xio, FALSE); 12107 } 12108 } 12109 mtx_unlock(&lun->lun_lock); 12110 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12111 return (0); 12112 } 12113 12114 static int 12115 ctl_query_task(union ctl_io *io, int task_set) 12116 { 12117 struct ctl_softc *softc = CTL_SOFTC(io); 12118 union ctl_io *xio; 12119 struct ctl_lun *lun; 12120 int found = 0; 12121 uint32_t targ_lun; 12122 12123 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12124 mtx_lock(&softc->ctl_lock); 12125 if (targ_lun >= ctl_max_luns || 12126 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12127 mtx_unlock(&softc->ctl_lock); 12128 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 12129 return (1); 12130 } 12131 mtx_lock(&lun->lun_lock); 12132 mtx_unlock(&softc->ctl_lock); 12133 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 12134 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 12135 if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port) 12136 || (xio->io_hdr.nexus.initid != io->io_hdr.nexus.initid) 12137 || (xio->io_hdr.flags & CTL_FLAG_ABORT)) 12138 continue; 12139 12140 if (task_set || xio->scsiio.tag_num == io->taskio.tag_num) { 12141 found = 1; 12142 break; 12143 } 12144 } 12145 mtx_unlock(&lun->lun_lock); 12146 if (found) 12147 io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED; 12148 else 12149 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12150 return (0); 12151 } 12152 12153 static int 12154 ctl_query_async_event(union ctl_io *io) 12155 { 12156 struct ctl_softc *softc = CTL_SOFTC(io); 12157 struct ctl_lun *lun; 12158 ctl_ua_type ua; 12159 uint32_t targ_lun, initidx; 12160 12161 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12162 mtx_lock(&softc->ctl_lock); 12163 if (targ_lun >= ctl_max_luns || 12164 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12165 mtx_unlock(&softc->ctl_lock); 12166 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 12167 return (1); 12168 } 12169 mtx_lock(&lun->lun_lock); 12170 mtx_unlock(&softc->ctl_lock); 12171 initidx = ctl_get_initindex(&io->io_hdr.nexus); 12172 ua = ctl_build_qae(lun, initidx, io->taskio.task_resp); 12173 mtx_unlock(&lun->lun_lock); 12174 if (ua != CTL_UA_NONE) 12175 io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED; 12176 else 12177 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12178 return (0); 12179 } 12180 12181 static void 12182 ctl_run_task(union ctl_io *io) 12183 { 12184 int retval = 1; 12185 12186 CTL_DEBUG_PRINT(("ctl_run_task\n")); 12187 KASSERT(io->io_hdr.io_type == CTL_IO_TASK, 12188 ("ctl_run_task: Unextected io_type %d\n", io->io_hdr.io_type)); 12189 io->taskio.task_status = CTL_TASK_FUNCTION_NOT_SUPPORTED; 12190 bzero(io->taskio.task_resp, sizeof(io->taskio.task_resp)); 12191 switch (io->taskio.task_action) { 12192 case CTL_TASK_ABORT_TASK: 12193 retval = ctl_abort_task(io); 12194 break; 12195 case CTL_TASK_ABORT_TASK_SET: 12196 case CTL_TASK_CLEAR_TASK_SET: 12197 retval = ctl_abort_task_set(io); 12198 break; 12199 case CTL_TASK_CLEAR_ACA: 12200 break; 12201 case CTL_TASK_I_T_NEXUS_RESET: 12202 retval = ctl_i_t_nexus_reset(io); 12203 break; 12204 case CTL_TASK_LUN_RESET: 12205 retval = ctl_lun_reset(io); 12206 break; 12207 case CTL_TASK_TARGET_RESET: 12208 case CTL_TASK_BUS_RESET: 12209 retval = ctl_target_reset(io); 12210 break; 12211 case CTL_TASK_PORT_LOGIN: 12212 break; 12213 case CTL_TASK_PORT_LOGOUT: 12214 break; 12215 case CTL_TASK_QUERY_TASK: 12216 retval = ctl_query_task(io, 0); 12217 break; 12218 case CTL_TASK_QUERY_TASK_SET: 12219 retval = ctl_query_task(io, 1); 12220 break; 12221 case CTL_TASK_QUERY_ASYNC_EVENT: 12222 retval = ctl_query_async_event(io); 12223 break; 12224 default: 12225 printf("%s: got unknown task management event %d\n", 12226 __func__, io->taskio.task_action); 12227 break; 12228 } 12229 if (retval == 0) 12230 io->io_hdr.status = CTL_SUCCESS; 12231 else 12232 io->io_hdr.status = CTL_ERROR; 12233 ctl_done(io); 12234 } 12235 12236 /* 12237 * For HA operation. Handle commands that come in from the other 12238 * controller. 12239 */ 12240 static void 12241 ctl_handle_isc(union ctl_io *io) 12242 { 12243 struct ctl_softc *softc = CTL_SOFTC(io); 12244 struct ctl_lun *lun; 12245 const struct ctl_cmd_entry *entry; 12246 uint32_t targ_lun; 12247 12248 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12249 switch (io->io_hdr.msg_type) { 12250 case CTL_MSG_SERIALIZE: 12251 ctl_serialize_other_sc_cmd(&io->scsiio); 12252 break; 12253 case CTL_MSG_R2R: /* Only used in SER_ONLY mode. */ 12254 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 12255 if (targ_lun >= ctl_max_luns || 12256 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12257 ctl_done(io); 12258 break; 12259 } 12260 mtx_lock(&lun->lun_lock); 12261 if (ctl_scsiio_lun_check(lun, entry, &io->scsiio) != 0) { 12262 mtx_unlock(&lun->lun_lock); 12263 ctl_done(io); 12264 break; 12265 } 12266 io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 12267 mtx_unlock(&lun->lun_lock); 12268 ctl_enqueue_rtr(io); 12269 break; 12270 case CTL_MSG_FINISH_IO: 12271 if (softc->ha_mode == CTL_HA_MODE_XFER) { 12272 ctl_done(io); 12273 break; 12274 } 12275 if (targ_lun >= ctl_max_luns || 12276 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12277 ctl_free_io(io); 12278 break; 12279 } 12280 mtx_lock(&lun->lun_lock); 12281 ctl_try_unblock_others(lun, io, TRUE); 12282 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); 12283 mtx_unlock(&lun->lun_lock); 12284 ctl_free_io(io); 12285 break; 12286 case CTL_MSG_PERS_ACTION: 12287 ctl_hndl_per_res_out_on_other_sc(io); 12288 ctl_free_io(io); 12289 break; 12290 case CTL_MSG_BAD_JUJU: 12291 ctl_done(io); 12292 break; 12293 case CTL_MSG_DATAMOVE: /* Only used in XFER mode */ 12294 ctl_datamove_remote(io); 12295 break; 12296 case CTL_MSG_DATAMOVE_DONE: /* Only used in XFER mode */ 12297 io->scsiio.be_move_done(io); 12298 break; 12299 case CTL_MSG_FAILOVER: 12300 ctl_failover_lun(io); 12301 ctl_free_io(io); 12302 break; 12303 default: 12304 printf("%s: Invalid message type %d\n", 12305 __func__, io->io_hdr.msg_type); 12306 ctl_free_io(io); 12307 break; 12308 } 12309 12310 } 12311 12312 /* 12313 * Returns the match type in the case of a match, or CTL_LUN_PAT_NONE if 12314 * there is no match. 12315 */ 12316 static ctl_lun_error_pattern 12317 ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc) 12318 { 12319 const struct ctl_cmd_entry *entry; 12320 ctl_lun_error_pattern filtered_pattern, pattern; 12321 12322 pattern = desc->error_pattern; 12323 12324 /* 12325 * XXX KDM we need more data passed into this function to match a 12326 * custom pattern, and we actually need to implement custom pattern 12327 * matching. 12328 */ 12329 if (pattern & CTL_LUN_PAT_CMD) 12330 return (CTL_LUN_PAT_CMD); 12331 12332 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY) 12333 return (CTL_LUN_PAT_ANY); 12334 12335 entry = ctl_get_cmd_entry(ctsio, NULL); 12336 12337 filtered_pattern = entry->pattern & pattern; 12338 12339 /* 12340 * If the user requested specific flags in the pattern (e.g. 12341 * CTL_LUN_PAT_RANGE), make sure the command supports all of those 12342 * flags. 12343 * 12344 * If the user did not specify any flags, it doesn't matter whether 12345 * or not the command supports the flags. 12346 */ 12347 if ((filtered_pattern & ~CTL_LUN_PAT_MASK) != 12348 (pattern & ~CTL_LUN_PAT_MASK)) 12349 return (CTL_LUN_PAT_NONE); 12350 12351 /* 12352 * If the user asked for a range check, see if the requested LBA 12353 * range overlaps with this command's LBA range. 12354 */ 12355 if (filtered_pattern & CTL_LUN_PAT_RANGE) { 12356 uint64_t lba1; 12357 uint64_t len1; 12358 ctl_action action; 12359 int retval; 12360 12361 retval = ctl_get_lba_len((union ctl_io *)ctsio, &lba1, &len1); 12362 if (retval != 0) 12363 return (CTL_LUN_PAT_NONE); 12364 12365 action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba, 12366 desc->lba_range.len, FALSE); 12367 /* 12368 * A "pass" means that the LBA ranges don't overlap, so 12369 * this doesn't match the user's range criteria. 12370 */ 12371 if (action == CTL_ACTION_PASS) 12372 return (CTL_LUN_PAT_NONE); 12373 } 12374 12375 return (filtered_pattern); 12376 } 12377 12378 static void 12379 ctl_inject_error(struct ctl_lun *lun, union ctl_io *io) 12380 { 12381 struct ctl_error_desc *desc, *desc2; 12382 12383 mtx_assert(&lun->lun_lock, MA_OWNED); 12384 12385 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 12386 ctl_lun_error_pattern pattern; 12387 /* 12388 * Check to see whether this particular command matches 12389 * the pattern in the descriptor. 12390 */ 12391 pattern = ctl_cmd_pattern_match(&io->scsiio, desc); 12392 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_NONE) 12393 continue; 12394 12395 switch (desc->lun_error & CTL_LUN_INJ_TYPE) { 12396 case CTL_LUN_INJ_ABORTED: 12397 ctl_set_aborted(&io->scsiio); 12398 break; 12399 case CTL_LUN_INJ_MEDIUM_ERR: 12400 ctl_set_medium_error(&io->scsiio, 12401 (io->io_hdr.flags & CTL_FLAG_DATA_MASK) != 12402 CTL_FLAG_DATA_OUT); 12403 break; 12404 case CTL_LUN_INJ_UA: 12405 /* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET 12406 * OCCURRED */ 12407 ctl_set_ua(&io->scsiio, 0x29, 0x00); 12408 break; 12409 case CTL_LUN_INJ_CUSTOM: 12410 /* 12411 * We're assuming the user knows what he is doing. 12412 * Just copy the sense information without doing 12413 * checks. 12414 */ 12415 bcopy(&desc->custom_sense, &io->scsiio.sense_data, 12416 MIN(sizeof(desc->custom_sense), 12417 sizeof(io->scsiio.sense_data))); 12418 io->scsiio.scsi_status = SCSI_STATUS_CHECK_COND; 12419 io->scsiio.sense_len = SSD_FULL_SIZE; 12420 io->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 12421 break; 12422 case CTL_LUN_INJ_NONE: 12423 default: 12424 /* 12425 * If this is an error injection type we don't know 12426 * about, clear the continuous flag (if it is set) 12427 * so it will get deleted below. 12428 */ 12429 desc->lun_error &= ~CTL_LUN_INJ_CONTINUOUS; 12430 break; 12431 } 12432 /* 12433 * By default, each error injection action is a one-shot 12434 */ 12435 if (desc->lun_error & CTL_LUN_INJ_CONTINUOUS) 12436 continue; 12437 12438 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links); 12439 12440 free(desc, M_CTL); 12441 } 12442 } 12443 12444 #ifdef CTL_IO_DELAY 12445 static void 12446 ctl_datamove_timer_wakeup(void *arg) 12447 { 12448 union ctl_io *io; 12449 12450 io = (union ctl_io *)arg; 12451 12452 ctl_datamove(io); 12453 } 12454 #endif /* CTL_IO_DELAY */ 12455 12456 void 12457 ctl_datamove(union ctl_io *io) 12458 { 12459 void (*fe_datamove)(union ctl_io *io); 12460 12461 mtx_assert(&((struct ctl_softc *)CTL_SOFTC(io))->ctl_lock, MA_NOTOWNED); 12462 12463 CTL_DEBUG_PRINT(("ctl_datamove\n")); 12464 12465 /* No data transferred yet. Frontend must update this when done. */ 12466 io->scsiio.kern_data_resid = io->scsiio.kern_data_len; 12467 12468 #ifdef CTL_TIME_IO 12469 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 12470 char str[256]; 12471 char path_str[64]; 12472 struct sbuf sb; 12473 12474 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 12475 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12476 12477 sbuf_cat(&sb, path_str); 12478 switch (io->io_hdr.io_type) { 12479 case CTL_IO_SCSI: 12480 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 12481 sbuf_printf(&sb, "\n"); 12482 sbuf_cat(&sb, path_str); 12483 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12484 io->scsiio.tag_num, io->scsiio.tag_type); 12485 break; 12486 case CTL_IO_TASK: 12487 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 12488 "Tag Type: %d\n", io->taskio.task_action, 12489 io->taskio.tag_num, io->taskio.tag_type); 12490 break; 12491 default: 12492 panic("%s: Invalid CTL I/O type %d\n", 12493 __func__, io->io_hdr.io_type); 12494 } 12495 sbuf_cat(&sb, path_str); 12496 sbuf_printf(&sb, "ctl_datamove: %jd seconds\n", 12497 (intmax_t)time_uptime - io->io_hdr.start_time); 12498 sbuf_finish(&sb); 12499 printf("%s", sbuf_data(&sb)); 12500 } 12501 #endif /* CTL_TIME_IO */ 12502 12503 #ifdef CTL_IO_DELAY 12504 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 12505 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 12506 } else { 12507 struct ctl_lun *lun; 12508 12509 lun = CTL_LUN(io); 12510 if ((lun != NULL) 12511 && (lun->delay_info.datamove_delay > 0)) { 12512 callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1); 12513 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 12514 callout_reset(&io->io_hdr.delay_callout, 12515 lun->delay_info.datamove_delay * hz, 12516 ctl_datamove_timer_wakeup, io); 12517 if (lun->delay_info.datamove_type == 12518 CTL_DELAY_TYPE_ONESHOT) 12519 lun->delay_info.datamove_delay = 0; 12520 return; 12521 } 12522 } 12523 #endif 12524 12525 /* 12526 * This command has been aborted. Set the port status, so we fail 12527 * the data move. 12528 */ 12529 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12530 printf("ctl_datamove: tag 0x%04x on (%u:%u:%u) aborted\n", 12531 io->scsiio.tag_num, io->io_hdr.nexus.initid, 12532 io->io_hdr.nexus.targ_port, 12533 io->io_hdr.nexus.targ_lun); 12534 io->io_hdr.port_status = 31337; 12535 /* 12536 * Note that the backend, in this case, will get the 12537 * callback in its context. In other cases it may get 12538 * called in the frontend's interrupt thread context. 12539 */ 12540 io->scsiio.be_move_done(io); 12541 return; 12542 } 12543 12544 /* Don't confuse frontend with zero length data move. */ 12545 if (io->scsiio.kern_data_len == 0) { 12546 io->scsiio.be_move_done(io); 12547 return; 12548 } 12549 12550 fe_datamove = CTL_PORT(io)->fe_datamove; 12551 fe_datamove(io); 12552 } 12553 12554 static void 12555 ctl_send_datamove_done(union ctl_io *io, int have_lock) 12556 { 12557 union ctl_ha_msg msg; 12558 #ifdef CTL_TIME_IO 12559 struct bintime cur_bt; 12560 #endif 12561 12562 memset(&msg, 0, sizeof(msg)); 12563 msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 12564 msg.hdr.original_sc = io; 12565 msg.hdr.serializing_sc = io->io_hdr.remote_io; 12566 msg.hdr.nexus = io->io_hdr.nexus; 12567 msg.hdr.status = io->io_hdr.status; 12568 msg.scsi.kern_data_resid = io->scsiio.kern_data_resid; 12569 msg.scsi.tag_num = io->scsiio.tag_num; 12570 msg.scsi.tag_type = io->scsiio.tag_type; 12571 msg.scsi.scsi_status = io->scsiio.scsi_status; 12572 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 12573 io->scsiio.sense_len); 12574 msg.scsi.sense_len = io->scsiio.sense_len; 12575 msg.scsi.port_status = io->io_hdr.port_status; 12576 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12577 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12578 ctl_failover_io(io, /*have_lock*/ have_lock); 12579 return; 12580 } 12581 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 12582 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + 12583 msg.scsi.sense_len, M_WAITOK); 12584 12585 #ifdef CTL_TIME_IO 12586 getbinuptime(&cur_bt); 12587 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); 12588 bintime_add(&io->io_hdr.dma_bt, &cur_bt); 12589 #endif 12590 io->io_hdr.num_dmas++; 12591 } 12592 12593 /* 12594 * The DMA to the remote side is done, now we need to tell the other side 12595 * we're done so it can continue with its data movement. 12596 */ 12597 static void 12598 ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq) 12599 { 12600 union ctl_io *io; 12601 uint32_t i; 12602 12603 io = rq->context; 12604 12605 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12606 printf("%s: ISC DMA write failed with error %d", __func__, 12607 rq->ret); 12608 ctl_set_internal_failure(&io->scsiio, 12609 /*sks_valid*/ 1, 12610 /*retry_count*/ rq->ret); 12611 } 12612 12613 ctl_dt_req_free(rq); 12614 12615 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12616 free(CTL_LSGLT(io)[i].addr, M_CTL); 12617 free(CTL_RSGL(io), M_CTL); 12618 CTL_RSGL(io) = NULL; 12619 CTL_LSGL(io) = NULL; 12620 12621 /* 12622 * The data is in local and remote memory, so now we need to send 12623 * status (good or back) back to the other side. 12624 */ 12625 ctl_send_datamove_done(io, /*have_lock*/ 0); 12626 } 12627 12628 /* 12629 * We've moved the data from the host/controller into local memory. Now we 12630 * need to push it over to the remote controller's memory. 12631 */ 12632 static int 12633 ctl_datamove_remote_dm_write_cb(union ctl_io *io) 12634 { 12635 int retval; 12636 12637 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_WRITE, 12638 ctl_datamove_remote_write_cb); 12639 return (retval); 12640 } 12641 12642 static void 12643 ctl_datamove_remote_write(union ctl_io *io) 12644 { 12645 int retval; 12646 void (*fe_datamove)(union ctl_io *io); 12647 12648 /* 12649 * - Get the data from the host/HBA into local memory. 12650 * - DMA memory from the local controller to the remote controller. 12651 * - Send status back to the remote controller. 12652 */ 12653 12654 retval = ctl_datamove_remote_sgl_setup(io); 12655 if (retval != 0) 12656 return; 12657 12658 /* Switch the pointer over so the FETD knows what to do */ 12659 io->scsiio.kern_data_ptr = (uint8_t *)CTL_LSGL(io); 12660 12661 /* 12662 * Use a custom move done callback, since we need to send completion 12663 * back to the other controller, not to the backend on this side. 12664 */ 12665 io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb; 12666 12667 fe_datamove = CTL_PORT(io)->fe_datamove; 12668 fe_datamove(io); 12669 } 12670 12671 static int 12672 ctl_datamove_remote_dm_read_cb(union ctl_io *io) 12673 { 12674 uint32_t i; 12675 12676 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12677 free(CTL_LSGLT(io)[i].addr, M_CTL); 12678 free(CTL_RSGL(io), M_CTL); 12679 CTL_RSGL(io) = NULL; 12680 CTL_LSGL(io) = NULL; 12681 12682 /* 12683 * The read is done, now we need to send status (good or bad) back 12684 * to the other side. 12685 */ 12686 ctl_send_datamove_done(io, /*have_lock*/ 0); 12687 12688 return (0); 12689 } 12690 12691 static void 12692 ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq) 12693 { 12694 union ctl_io *io; 12695 void (*fe_datamove)(union ctl_io *io); 12696 12697 io = rq->context; 12698 12699 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12700 printf("%s: ISC DMA read failed with error %d\n", __func__, 12701 rq->ret); 12702 ctl_set_internal_failure(&io->scsiio, 12703 /*sks_valid*/ 1, 12704 /*retry_count*/ rq->ret); 12705 } 12706 12707 ctl_dt_req_free(rq); 12708 12709 /* Switch the pointer over so the FETD knows what to do */ 12710 io->scsiio.kern_data_ptr = (uint8_t *)CTL_LSGL(io); 12711 12712 /* 12713 * Use a custom move done callback, since we need to send completion 12714 * back to the other controller, not to the backend on this side. 12715 */ 12716 io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb; 12717 12718 /* XXX KDM add checks like the ones in ctl_datamove? */ 12719 12720 fe_datamove = CTL_PORT(io)->fe_datamove; 12721 fe_datamove(io); 12722 } 12723 12724 static int 12725 ctl_datamove_remote_sgl_setup(union ctl_io *io) 12726 { 12727 struct ctl_sg_entry *local_sglist; 12728 uint32_t len_to_go; 12729 int retval; 12730 int i; 12731 12732 retval = 0; 12733 local_sglist = CTL_LSGL(io); 12734 len_to_go = io->scsiio.kern_data_len; 12735 12736 /* 12737 * The difficult thing here is that the size of the various 12738 * S/G segments may be different than the size from the 12739 * remote controller. That'll make it harder when DMAing 12740 * the data back to the other side. 12741 */ 12742 for (i = 0; len_to_go > 0; i++) { 12743 local_sglist[i].len = MIN(len_to_go, CTL_HA_DATAMOVE_SEGMENT); 12744 local_sglist[i].addr = 12745 malloc(local_sglist[i].len, M_CTL, M_WAITOK); 12746 12747 len_to_go -= local_sglist[i].len; 12748 } 12749 /* 12750 * Reset the number of S/G entries accordingly. The original 12751 * number of S/G entries is available in rem_sg_entries. 12752 */ 12753 io->scsiio.kern_sg_entries = i; 12754 12755 return (retval); 12756 } 12757 12758 static int 12759 ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 12760 ctl_ha_dt_cb callback) 12761 { 12762 struct ctl_ha_dt_req *rq; 12763 struct ctl_sg_entry *remote_sglist, *local_sglist; 12764 uint32_t local_used, remote_used, total_used; 12765 int i, j, isc_ret; 12766 12767 rq = ctl_dt_req_alloc(); 12768 12769 /* 12770 * If we failed to allocate the request, and if the DMA didn't fail 12771 * anyway, set busy status. This is just a resource allocation 12772 * failure. 12773 */ 12774 if ((rq == NULL) 12775 && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 12776 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) 12777 ctl_set_busy(&io->scsiio); 12778 12779 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 12780 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) { 12781 if (rq != NULL) 12782 ctl_dt_req_free(rq); 12783 12784 /* 12785 * The data move failed. We need to return status back 12786 * to the other controller. No point in trying to DMA 12787 * data to the remote controller. 12788 */ 12789 12790 ctl_send_datamove_done(io, /*have_lock*/ 0); 12791 12792 return (1); 12793 } 12794 12795 local_sglist = CTL_LSGL(io); 12796 remote_sglist = CTL_RSGL(io); 12797 local_used = 0; 12798 remote_used = 0; 12799 total_used = 0; 12800 12801 /* 12802 * Pull/push the data over the wire from/to the other controller. 12803 * This takes into account the possibility that the local and 12804 * remote sglists may not be identical in terms of the size of 12805 * the elements and the number of elements. 12806 * 12807 * One fundamental assumption here is that the length allocated for 12808 * both the local and remote sglists is identical. Otherwise, we've 12809 * essentially got a coding error of some sort. 12810 */ 12811 isc_ret = CTL_HA_STATUS_SUCCESS; 12812 for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) { 12813 uint32_t cur_len; 12814 uint8_t *tmp_ptr; 12815 12816 rq->command = command; 12817 rq->context = io; 12818 12819 /* 12820 * Both pointers should be aligned. But it is possible 12821 * that the allocation length is not. They should both 12822 * also have enough slack left over at the end, though, 12823 * to round up to the next 8 byte boundary. 12824 */ 12825 cur_len = MIN(local_sglist[i].len - local_used, 12826 remote_sglist[j].len - remote_used); 12827 rq->size = cur_len; 12828 12829 tmp_ptr = (uint8_t *)local_sglist[i].addr; 12830 tmp_ptr += local_used; 12831 12832 #if 0 12833 /* Use physical addresses when talking to ISC hardware */ 12834 if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) { 12835 /* XXX KDM use busdma */ 12836 rq->local = vtophys(tmp_ptr); 12837 } else 12838 rq->local = tmp_ptr; 12839 #else 12840 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, 12841 ("HA does not support BUS_ADDR")); 12842 rq->local = tmp_ptr; 12843 #endif 12844 12845 tmp_ptr = (uint8_t *)remote_sglist[j].addr; 12846 tmp_ptr += remote_used; 12847 rq->remote = tmp_ptr; 12848 12849 rq->callback = NULL; 12850 12851 local_used += cur_len; 12852 if (local_used >= local_sglist[i].len) { 12853 i++; 12854 local_used = 0; 12855 } 12856 12857 remote_used += cur_len; 12858 if (remote_used >= remote_sglist[j].len) { 12859 j++; 12860 remote_used = 0; 12861 } 12862 total_used += cur_len; 12863 12864 if (total_used >= io->scsiio.kern_data_len) 12865 rq->callback = callback; 12866 12867 isc_ret = ctl_dt_single(rq); 12868 if (isc_ret > CTL_HA_STATUS_SUCCESS) 12869 break; 12870 } 12871 if (isc_ret != CTL_HA_STATUS_WAIT) { 12872 rq->ret = isc_ret; 12873 callback(rq); 12874 } 12875 12876 return (0); 12877 } 12878 12879 static void 12880 ctl_datamove_remote_read(union ctl_io *io) 12881 { 12882 int retval; 12883 uint32_t i; 12884 12885 /* 12886 * This will send an error to the other controller in the case of a 12887 * failure. 12888 */ 12889 retval = ctl_datamove_remote_sgl_setup(io); 12890 if (retval != 0) 12891 return; 12892 12893 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ, 12894 ctl_datamove_remote_read_cb); 12895 if (retval != 0) { 12896 /* 12897 * Make sure we free memory if there was an error.. The 12898 * ctl_datamove_remote_xfer() function will send the 12899 * datamove done message, or call the callback with an 12900 * error if there is a problem. 12901 */ 12902 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12903 free(CTL_LSGLT(io)[i].addr, M_CTL); 12904 free(CTL_RSGL(io), M_CTL); 12905 CTL_RSGL(io) = NULL; 12906 CTL_LSGL(io) = NULL; 12907 } 12908 } 12909 12910 /* 12911 * Process a datamove request from the other controller. This is used for 12912 * XFER mode only, not SER_ONLY mode. For writes, we DMA into local memory 12913 * first. Once that is complete, the data gets DMAed into the remote 12914 * controller's memory. For reads, we DMA from the remote controller's 12915 * memory into our memory first, and then move it out to the FETD. 12916 */ 12917 static void 12918 ctl_datamove_remote(union ctl_io *io) 12919 { 12920 12921 mtx_assert(&((struct ctl_softc *)CTL_SOFTC(io))->ctl_lock, MA_NOTOWNED); 12922 12923 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12924 ctl_failover_io(io, /*have_lock*/ 0); 12925 return; 12926 } 12927 12928 /* 12929 * Note that we look for an aborted I/O here, but don't do some of 12930 * the other checks that ctl_datamove() normally does. 12931 * We don't need to run the datamove delay code, since that should 12932 * have been done if need be on the other controller. 12933 */ 12934 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12935 printf("%s: tag 0x%04x on (%u:%u:%u) aborted\n", __func__, 12936 io->scsiio.tag_num, io->io_hdr.nexus.initid, 12937 io->io_hdr.nexus.targ_port, 12938 io->io_hdr.nexus.targ_lun); 12939 io->io_hdr.port_status = 31338; 12940 ctl_send_datamove_done(io, /*have_lock*/ 0); 12941 return; 12942 } 12943 12944 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) 12945 ctl_datamove_remote_write(io); 12946 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) 12947 ctl_datamove_remote_read(io); 12948 else { 12949 io->io_hdr.port_status = 31339; 12950 ctl_send_datamove_done(io, /*have_lock*/ 0); 12951 } 12952 } 12953 12954 static void 12955 ctl_process_done(union ctl_io *io) 12956 { 12957 struct ctl_softc *softc = CTL_SOFTC(io); 12958 struct ctl_port *port = CTL_PORT(io); 12959 struct ctl_lun *lun = CTL_LUN(io); 12960 void (*fe_done)(union ctl_io *io); 12961 union ctl_ha_msg msg; 12962 12963 CTL_DEBUG_PRINT(("ctl_process_done\n")); 12964 fe_done = port->fe_done; 12965 12966 #ifdef CTL_TIME_IO 12967 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 12968 char str[256]; 12969 char path_str[64]; 12970 struct sbuf sb; 12971 12972 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 12973 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12974 12975 sbuf_cat(&sb, path_str); 12976 switch (io->io_hdr.io_type) { 12977 case CTL_IO_SCSI: 12978 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 12979 sbuf_printf(&sb, "\n"); 12980 sbuf_cat(&sb, path_str); 12981 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12982 io->scsiio.tag_num, io->scsiio.tag_type); 12983 break; 12984 case CTL_IO_TASK: 12985 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 12986 "Tag Type: %d\n", io->taskio.task_action, 12987 io->taskio.tag_num, io->taskio.tag_type); 12988 break; 12989 default: 12990 panic("%s: Invalid CTL I/O type %d\n", 12991 __func__, io->io_hdr.io_type); 12992 } 12993 sbuf_cat(&sb, path_str); 12994 sbuf_printf(&sb, "ctl_process_done: %jd seconds\n", 12995 (intmax_t)time_uptime - io->io_hdr.start_time); 12996 sbuf_finish(&sb); 12997 printf("%s", sbuf_data(&sb)); 12998 } 12999 #endif /* CTL_TIME_IO */ 13000 13001 switch (io->io_hdr.io_type) { 13002 case CTL_IO_SCSI: 13003 break; 13004 case CTL_IO_TASK: 13005 if (ctl_debug & CTL_DEBUG_INFO) 13006 ctl_io_error_print(io, NULL); 13007 fe_done(io); 13008 return; 13009 default: 13010 panic("%s: Invalid CTL I/O type %d\n", 13011 __func__, io->io_hdr.io_type); 13012 } 13013 13014 if (lun == NULL) { 13015 CTL_DEBUG_PRINT(("NULL LUN for lun %d\n", 13016 io->io_hdr.nexus.targ_mapped_lun)); 13017 goto bailout; 13018 } 13019 13020 mtx_lock(&lun->lun_lock); 13021 13022 /* 13023 * Check to see if we have any informational exception and status 13024 * of this command can be modified to report it in form of either 13025 * RECOVERED ERROR or NO SENSE, depending on MRIE mode page field. 13026 */ 13027 if (lun->ie_reported == 0 && lun->ie_asc != 0 && 13028 io->io_hdr.status == CTL_SUCCESS && 13029 (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0) { 13030 uint8_t mrie = lun->MODE_IE.mrie; 13031 uint8_t per = ((lun->MODE_RWER.byte3 & SMS_RWER_PER) || 13032 (lun->MODE_VER.byte3 & SMS_VER_PER)); 13033 if (((mrie == SIEP_MRIE_REC_COND && per) || 13034 mrie == SIEP_MRIE_REC_UNCOND || 13035 mrie == SIEP_MRIE_NO_SENSE) && 13036 (ctl_get_cmd_entry(&io->scsiio, NULL)->flags & 13037 CTL_CMD_FLAG_NO_SENSE) == 0) { 13038 ctl_set_sense(&io->scsiio, 13039 /*current_error*/ 1, 13040 /*sense_key*/ (mrie == SIEP_MRIE_NO_SENSE) ? 13041 SSD_KEY_NO_SENSE : SSD_KEY_RECOVERED_ERROR, 13042 /*asc*/ lun->ie_asc, 13043 /*ascq*/ lun->ie_ascq, 13044 SSD_ELEM_NONE); 13045 lun->ie_reported = 1; 13046 } 13047 } else if (lun->ie_reported < 0) 13048 lun->ie_reported = 0; 13049 13050 /* 13051 * Check to see if we have any errors to inject here. We only 13052 * inject errors for commands that don't already have errors set. 13053 */ 13054 if (!STAILQ_EMPTY(&lun->error_list) && 13055 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) && 13056 ((io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0)) 13057 ctl_inject_error(lun, io); 13058 13059 /* 13060 * XXX KDM how do we treat commands that aren't completed 13061 * successfully? 13062 * 13063 * XXX KDM should we also track I/O latency? 13064 */ 13065 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS && 13066 io->io_hdr.io_type == CTL_IO_SCSI) { 13067 int type; 13068 #ifdef CTL_TIME_IO 13069 struct bintime bt; 13070 13071 getbinuptime(&bt); 13072 bintime_sub(&bt, &io->io_hdr.start_bt); 13073 #endif 13074 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13075 CTL_FLAG_DATA_IN) 13076 type = CTL_STATS_READ; 13077 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13078 CTL_FLAG_DATA_OUT) 13079 type = CTL_STATS_WRITE; 13080 else 13081 type = CTL_STATS_NO_IO; 13082 13083 lun->stats.bytes[type] += io->scsiio.kern_total_len; 13084 lun->stats.operations[type] ++; 13085 lun->stats.dmas[type] += io->io_hdr.num_dmas; 13086 #ifdef CTL_TIME_IO 13087 bintime_add(&lun->stats.dma_time[type], &io->io_hdr.dma_bt); 13088 bintime_add(&lun->stats.time[type], &bt); 13089 #endif 13090 13091 mtx_lock(&port->port_lock); 13092 port->stats.bytes[type] += io->scsiio.kern_total_len; 13093 port->stats.operations[type] ++; 13094 port->stats.dmas[type] += io->io_hdr.num_dmas; 13095 #ifdef CTL_TIME_IO 13096 bintime_add(&port->stats.dma_time[type], &io->io_hdr.dma_bt); 13097 bintime_add(&port->stats.time[type], &bt); 13098 #endif 13099 mtx_unlock(&port->port_lock); 13100 } 13101 13102 /* 13103 * Run through the blocked queue of this I/O and see if anything 13104 * can be unblocked, now that this I/O is done and will be removed. 13105 * We need to do it before removal to have OOA position to start. 13106 */ 13107 ctl_try_unblock_others(lun, io, TRUE); 13108 13109 /* 13110 * Remove this from the OOA queue. 13111 */ 13112 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); 13113 #ifdef CTL_TIME_IO 13114 if (TAILQ_EMPTY(&lun->ooa_queue)) 13115 lun->last_busy = getsbinuptime(); 13116 #endif 13117 13118 /* 13119 * If the LUN has been invalidated, free it if there is nothing 13120 * left on its OOA queue. 13121 */ 13122 if ((lun->flags & CTL_LUN_INVALID) 13123 && TAILQ_EMPTY(&lun->ooa_queue)) { 13124 mtx_unlock(&lun->lun_lock); 13125 ctl_free_lun(lun); 13126 } else 13127 mtx_unlock(&lun->lun_lock); 13128 13129 bailout: 13130 13131 /* 13132 * If this command has been aborted, make sure we set the status 13133 * properly. The FETD is responsible for freeing the I/O and doing 13134 * whatever it needs to do to clean up its state. 13135 */ 13136 if (io->io_hdr.flags & CTL_FLAG_ABORT) 13137 ctl_set_task_aborted(&io->scsiio); 13138 13139 /* 13140 * If enabled, print command error status. 13141 */ 13142 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS && 13143 (ctl_debug & CTL_DEBUG_INFO) != 0) 13144 ctl_io_error_print(io, NULL); 13145 13146 /* 13147 * Tell the FETD or the other shelf controller we're done with this 13148 * command. Note that only SCSI commands get to this point. Task 13149 * management commands are completed above. 13150 */ 13151 if ((softc->ha_mode != CTL_HA_MODE_XFER) && 13152 (io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC)) { 13153 memset(&msg, 0, sizeof(msg)); 13154 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 13155 msg.hdr.serializing_sc = io->io_hdr.remote_io; 13156 msg.hdr.nexus = io->io_hdr.nexus; 13157 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13158 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data), 13159 M_WAITOK); 13160 } 13161 13162 fe_done(io); 13163 } 13164 13165 /* 13166 * Front end should call this if it doesn't do autosense. When the request 13167 * sense comes back in from the initiator, we'll dequeue this and send it. 13168 */ 13169 int 13170 ctl_queue_sense(union ctl_io *io) 13171 { 13172 struct ctl_softc *softc = CTL_SOFTC(io); 13173 struct ctl_port *port = CTL_PORT(io); 13174 struct ctl_lun *lun; 13175 struct scsi_sense_data *ps; 13176 uint32_t initidx, p, targ_lun; 13177 13178 CTL_DEBUG_PRINT(("ctl_queue_sense\n")); 13179 13180 targ_lun = ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); 13181 13182 /* 13183 * LUN lookup will likely move to the ctl_work_thread() once we 13184 * have our new queueing infrastructure (that doesn't put things on 13185 * a per-LUN queue initially). That is so that we can handle 13186 * things like an INQUIRY to a LUN that we don't have enabled. We 13187 * can't deal with that right now. 13188 * If we don't have a LUN for this, just toss the sense information. 13189 */ 13190 mtx_lock(&softc->ctl_lock); 13191 if (targ_lun >= ctl_max_luns || 13192 (lun = softc->ctl_luns[targ_lun]) == NULL) { 13193 mtx_unlock(&softc->ctl_lock); 13194 goto bailout; 13195 } 13196 mtx_lock(&lun->lun_lock); 13197 mtx_unlock(&softc->ctl_lock); 13198 13199 initidx = ctl_get_initindex(&io->io_hdr.nexus); 13200 p = initidx / CTL_MAX_INIT_PER_PORT; 13201 if (lun->pending_sense[p] == NULL) { 13202 lun->pending_sense[p] = malloc(sizeof(*ps) * CTL_MAX_INIT_PER_PORT, 13203 M_CTL, M_NOWAIT | M_ZERO); 13204 } 13205 if ((ps = lun->pending_sense[p]) != NULL) { 13206 ps += initidx % CTL_MAX_INIT_PER_PORT; 13207 memset(ps, 0, sizeof(*ps)); 13208 memcpy(ps, &io->scsiio.sense_data, io->scsiio.sense_len); 13209 } 13210 mtx_unlock(&lun->lun_lock); 13211 13212 bailout: 13213 ctl_free_io(io); 13214 return (CTL_RETVAL_COMPLETE); 13215 } 13216 13217 /* 13218 * Primary command inlet from frontend ports. All SCSI and task I/O 13219 * requests must go through this function. 13220 */ 13221 int 13222 ctl_queue(union ctl_io *io) 13223 { 13224 struct ctl_port *port = CTL_PORT(io); 13225 13226 CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0])); 13227 13228 #ifdef CTL_TIME_IO 13229 io->io_hdr.start_time = time_uptime; 13230 getbinuptime(&io->io_hdr.start_bt); 13231 #endif /* CTL_TIME_IO */ 13232 13233 /* Map FE-specific LUN ID into global one. */ 13234 io->io_hdr.nexus.targ_mapped_lun = 13235 ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); 13236 13237 switch (io->io_hdr.io_type) { 13238 case CTL_IO_SCSI: 13239 case CTL_IO_TASK: 13240 if (ctl_debug & CTL_DEBUG_CDB) 13241 ctl_io_print(io); 13242 ctl_enqueue_incoming(io); 13243 break; 13244 default: 13245 printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type); 13246 return (EINVAL); 13247 } 13248 13249 return (CTL_RETVAL_COMPLETE); 13250 } 13251 13252 #ifdef CTL_IO_DELAY 13253 static void 13254 ctl_done_timer_wakeup(void *arg) 13255 { 13256 union ctl_io *io; 13257 13258 io = (union ctl_io *)arg; 13259 ctl_done(io); 13260 } 13261 #endif /* CTL_IO_DELAY */ 13262 13263 void 13264 ctl_serseq_done(union ctl_io *io) 13265 { 13266 struct ctl_lun *lun = CTL_LUN(io); 13267 13268 if (lun->be_lun == NULL || 13269 lun->be_lun->serseq == CTL_LUN_SERSEQ_OFF) 13270 return; 13271 mtx_lock(&lun->lun_lock); 13272 io->io_hdr.flags |= CTL_FLAG_SERSEQ_DONE; 13273 ctl_try_unblock_others(lun, io, FALSE); 13274 mtx_unlock(&lun->lun_lock); 13275 } 13276 13277 void 13278 ctl_done(union ctl_io *io) 13279 { 13280 13281 /* 13282 * Enable this to catch duplicate completion issues. 13283 */ 13284 #if 0 13285 if (io->io_hdr.flags & CTL_FLAG_ALREADY_DONE) { 13286 printf("%s: type %d msg %d cdb %x iptl: " 13287 "%u:%u:%u tag 0x%04x " 13288 "flag %#x status %x\n", 13289 __func__, 13290 io->io_hdr.io_type, 13291 io->io_hdr.msg_type, 13292 io->scsiio.cdb[0], 13293 io->io_hdr.nexus.initid, 13294 io->io_hdr.nexus.targ_port, 13295 io->io_hdr.nexus.targ_lun, 13296 (io->io_hdr.io_type == 13297 CTL_IO_TASK) ? 13298 io->taskio.tag_num : 13299 io->scsiio.tag_num, 13300 io->io_hdr.flags, 13301 io->io_hdr.status); 13302 } else 13303 io->io_hdr.flags |= CTL_FLAG_ALREADY_DONE; 13304 #endif 13305 13306 /* 13307 * This is an internal copy of an I/O, and should not go through 13308 * the normal done processing logic. 13309 */ 13310 if (io->io_hdr.flags & CTL_FLAG_INT_COPY) 13311 return; 13312 13313 #ifdef CTL_IO_DELAY 13314 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 13315 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 13316 } else { 13317 struct ctl_lun *lun = CTL_LUN(io); 13318 13319 if ((lun != NULL) 13320 && (lun->delay_info.done_delay > 0)) { 13321 callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1); 13322 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 13323 callout_reset(&io->io_hdr.delay_callout, 13324 lun->delay_info.done_delay * hz, 13325 ctl_done_timer_wakeup, io); 13326 if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT) 13327 lun->delay_info.done_delay = 0; 13328 return; 13329 } 13330 } 13331 #endif /* CTL_IO_DELAY */ 13332 13333 ctl_enqueue_done(io); 13334 } 13335 13336 static void 13337 ctl_work_thread(void *arg) 13338 { 13339 struct ctl_thread *thr = (struct ctl_thread *)arg; 13340 struct ctl_softc *softc = thr->ctl_softc; 13341 union ctl_io *io; 13342 int retval; 13343 13344 CTL_DEBUG_PRINT(("ctl_work_thread starting\n")); 13345 thread_lock(curthread); 13346 sched_prio(curthread, PUSER - 1); 13347 thread_unlock(curthread); 13348 13349 while (!softc->shutdown) { 13350 /* 13351 * We handle the queues in this order: 13352 * - ISC 13353 * - done queue (to free up resources, unblock other commands) 13354 * - incoming queue 13355 * - RtR queue 13356 * 13357 * If those queues are empty, we break out of the loop and 13358 * go to sleep. 13359 */ 13360 mtx_lock(&thr->queue_lock); 13361 io = (union ctl_io *)STAILQ_FIRST(&thr->isc_queue); 13362 if (io != NULL) { 13363 STAILQ_REMOVE_HEAD(&thr->isc_queue, links); 13364 mtx_unlock(&thr->queue_lock); 13365 ctl_handle_isc(io); 13366 continue; 13367 } 13368 io = (union ctl_io *)STAILQ_FIRST(&thr->done_queue); 13369 if (io != NULL) { 13370 STAILQ_REMOVE_HEAD(&thr->done_queue, links); 13371 /* clear any blocked commands, call fe_done */ 13372 mtx_unlock(&thr->queue_lock); 13373 ctl_process_done(io); 13374 continue; 13375 } 13376 io = (union ctl_io *)STAILQ_FIRST(&thr->incoming_queue); 13377 if (io != NULL) { 13378 STAILQ_REMOVE_HEAD(&thr->incoming_queue, links); 13379 mtx_unlock(&thr->queue_lock); 13380 if (io->io_hdr.io_type == CTL_IO_TASK) 13381 ctl_run_task(io); 13382 else 13383 ctl_scsiio_precheck(softc, &io->scsiio); 13384 continue; 13385 } 13386 io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue); 13387 if (io != NULL) { 13388 STAILQ_REMOVE_HEAD(&thr->rtr_queue, links); 13389 mtx_unlock(&thr->queue_lock); 13390 retval = ctl_scsiio(&io->scsiio); 13391 if (retval != CTL_RETVAL_COMPLETE) 13392 CTL_DEBUG_PRINT(("ctl_scsiio failed\n")); 13393 continue; 13394 } 13395 13396 /* Sleep until we have something to do. */ 13397 mtx_sleep(thr, &thr->queue_lock, PDROP, "-", 0); 13398 } 13399 thr->thread = NULL; 13400 kthread_exit(); 13401 } 13402 13403 static void 13404 ctl_thresh_thread(void *arg) 13405 { 13406 struct ctl_softc *softc = (struct ctl_softc *)arg; 13407 struct ctl_lun *lun; 13408 struct ctl_logical_block_provisioning_page *page; 13409 const char *attr; 13410 union ctl_ha_msg msg; 13411 uint64_t thres, val; 13412 int i, e, set; 13413 13414 CTL_DEBUG_PRINT(("ctl_thresh_thread starting\n")); 13415 thread_lock(curthread); 13416 sched_prio(curthread, PUSER - 1); 13417 thread_unlock(curthread); 13418 13419 while (!softc->shutdown) { 13420 mtx_lock(&softc->ctl_lock); 13421 STAILQ_FOREACH(lun, &softc->lun_list, links) { 13422 if ((lun->flags & CTL_LUN_DISABLED) || 13423 (lun->flags & CTL_LUN_NO_MEDIA) || 13424 lun->backend->lun_attr == NULL) 13425 continue; 13426 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 13427 softc->ha_mode == CTL_HA_MODE_XFER) 13428 continue; 13429 if ((lun->MODE_RWER.byte8 & SMS_RWER_LBPERE) == 0) 13430 continue; 13431 e = 0; 13432 page = &lun->MODE_LBP; 13433 for (i = 0; i < CTL_NUM_LBP_THRESH; i++) { 13434 if ((page->descr[i].flags & SLBPPD_ENABLED) == 0) 13435 continue; 13436 thres = scsi_4btoul(page->descr[i].count); 13437 thres <<= CTL_LBP_EXPONENT; 13438 switch (page->descr[i].resource) { 13439 case 0x01: 13440 attr = "blocksavail"; 13441 break; 13442 case 0x02: 13443 attr = "blocksused"; 13444 break; 13445 case 0xf1: 13446 attr = "poolblocksavail"; 13447 break; 13448 case 0xf2: 13449 attr = "poolblocksused"; 13450 break; 13451 default: 13452 continue; 13453 } 13454 mtx_unlock(&softc->ctl_lock); // XXX 13455 val = lun->backend->lun_attr(lun->be_lun, attr); 13456 mtx_lock(&softc->ctl_lock); 13457 if (val == UINT64_MAX) 13458 continue; 13459 if ((page->descr[i].flags & SLBPPD_ARMING_MASK) 13460 == SLBPPD_ARMING_INC) 13461 e = (val >= thres); 13462 else 13463 e = (val <= thres); 13464 if (e) 13465 break; 13466 } 13467 mtx_lock(&lun->lun_lock); 13468 if (e) { 13469 scsi_u64to8b((uint8_t *)&page->descr[i] - 13470 (uint8_t *)page, lun->ua_tpt_info); 13471 if (lun->lasttpt == 0 || 13472 time_uptime - lun->lasttpt >= CTL_LBP_UA_PERIOD) { 13473 lun->lasttpt = time_uptime; 13474 ctl_est_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 13475 set = 1; 13476 } else 13477 set = 0; 13478 } else { 13479 lun->lasttpt = 0; 13480 ctl_clr_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 13481 set = -1; 13482 } 13483 mtx_unlock(&lun->lun_lock); 13484 if (set != 0 && 13485 lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 13486 /* Send msg to other side. */ 13487 bzero(&msg.ua, sizeof(msg.ua)); 13488 msg.hdr.msg_type = CTL_MSG_UA; 13489 msg.hdr.nexus.initid = -1; 13490 msg.hdr.nexus.targ_port = -1; 13491 msg.hdr.nexus.targ_lun = lun->lun; 13492 msg.hdr.nexus.targ_mapped_lun = lun->lun; 13493 msg.ua.ua_all = 1; 13494 msg.ua.ua_set = (set > 0); 13495 msg.ua.ua_type = CTL_UA_THIN_PROV_THRES; 13496 memcpy(msg.ua.ua_info, lun->ua_tpt_info, 8); 13497 mtx_unlock(&softc->ctl_lock); // XXX 13498 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13499 sizeof(msg.ua), M_WAITOK); 13500 mtx_lock(&softc->ctl_lock); 13501 } 13502 } 13503 mtx_sleep(&softc->thresh_thread, &softc->ctl_lock, 13504 PDROP, "-", CTL_LBP_PERIOD * hz); 13505 } 13506 softc->thresh_thread = NULL; 13507 kthread_exit(); 13508 } 13509 13510 static void 13511 ctl_enqueue_incoming(union ctl_io *io) 13512 { 13513 struct ctl_softc *softc = CTL_SOFTC(io); 13514 struct ctl_thread *thr; 13515 u_int idx; 13516 13517 idx = (io->io_hdr.nexus.targ_port * 127 + 13518 io->io_hdr.nexus.initid) % worker_threads; 13519 thr = &softc->threads[idx]; 13520 mtx_lock(&thr->queue_lock); 13521 STAILQ_INSERT_TAIL(&thr->incoming_queue, &io->io_hdr, links); 13522 mtx_unlock(&thr->queue_lock); 13523 wakeup(thr); 13524 } 13525 13526 static void 13527 ctl_enqueue_rtr(union ctl_io *io) 13528 { 13529 struct ctl_softc *softc = CTL_SOFTC(io); 13530 struct ctl_thread *thr; 13531 13532 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13533 mtx_lock(&thr->queue_lock); 13534 STAILQ_INSERT_TAIL(&thr->rtr_queue, &io->io_hdr, links); 13535 mtx_unlock(&thr->queue_lock); 13536 wakeup(thr); 13537 } 13538 13539 static void 13540 ctl_enqueue_done(union ctl_io *io) 13541 { 13542 struct ctl_softc *softc = CTL_SOFTC(io); 13543 struct ctl_thread *thr; 13544 13545 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13546 mtx_lock(&thr->queue_lock); 13547 STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links); 13548 mtx_unlock(&thr->queue_lock); 13549 wakeup(thr); 13550 } 13551 13552 static void 13553 ctl_enqueue_isc(union ctl_io *io) 13554 { 13555 struct ctl_softc *softc = CTL_SOFTC(io); 13556 struct ctl_thread *thr; 13557 13558 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13559 mtx_lock(&thr->queue_lock); 13560 STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links); 13561 mtx_unlock(&thr->queue_lock); 13562 wakeup(thr); 13563 } 13564 13565 /* 13566 * vim: ts=8 13567 */ 13568