1 /*- 2 * Copyright (c) 2003-2009 Silicon Graphics International Corp. 3 * Copyright (c) 2012 The FreeBSD Foundation 4 * Copyright (c) 2015 Alexander Motin <mav@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Portions of this software were developed by Edward Tomasz Napierala 8 * under sponsorship from the FreeBSD Foundation. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions, and the following disclaimer, 15 * without modification. 16 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 17 * substantially similar to the "NO WARRANTY" disclaimer below 18 * ("Disclaimer") and any redistribution must be conditioned upon 19 * including a substantially similar Disclaimer requirement for further 20 * binary redistribution. 21 * 22 * NO WARRANTY 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 27 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 32 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 * POSSIBILITY OF SUCH DAMAGES. 34 * 35 * $Id$ 36 */ 37 /* 38 * CAM Target Layer, a SCSI device emulation subsystem. 39 * 40 * Author: Ken Merry <ken@FreeBSD.org> 41 */ 42 43 #define _CTL_C 44 45 #include <sys/cdefs.h> 46 __FBSDID("$FreeBSD$"); 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/ctype.h> 51 #include <sys/kernel.h> 52 #include <sys/types.h> 53 #include <sys/kthread.h> 54 #include <sys/bio.h> 55 #include <sys/fcntl.h> 56 #include <sys/lock.h> 57 #include <sys/module.h> 58 #include <sys/mutex.h> 59 #include <sys/condvar.h> 60 #include <sys/malloc.h> 61 #include <sys/conf.h> 62 #include <sys/ioccom.h> 63 #include <sys/queue.h> 64 #include <sys/sbuf.h> 65 #include <sys/smp.h> 66 #include <sys/endian.h> 67 #include <sys/sysctl.h> 68 #include <vm/uma.h> 69 70 #include <cam/cam.h> 71 #include <cam/scsi/scsi_all.h> 72 #include <cam/scsi/scsi_da.h> 73 #include <cam/ctl/ctl_io.h> 74 #include <cam/ctl/ctl.h> 75 #include <cam/ctl/ctl_frontend.h> 76 #include <cam/ctl/ctl_util.h> 77 #include <cam/ctl/ctl_backend.h> 78 #include <cam/ctl/ctl_ioctl.h> 79 #include <cam/ctl/ctl_ha.h> 80 #include <cam/ctl/ctl_private.h> 81 #include <cam/ctl/ctl_debug.h> 82 #include <cam/ctl/ctl_scsi_all.h> 83 #include <cam/ctl/ctl_error.h> 84 85 struct ctl_softc *control_softc = NULL; 86 87 /* 88 * Template mode pages. 89 */ 90 91 /* 92 * Note that these are default values only. The actual values will be 93 * filled in when the user does a mode sense. 94 */ 95 const static struct copan_debugconf_subpage debugconf_page_default = { 96 DBGCNF_PAGE_CODE | SMPH_SPF, /* page_code */ 97 DBGCNF_SUBPAGE_CODE, /* subpage */ 98 {(sizeof(struct copan_debugconf_subpage) - 4) >> 8, 99 (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */ 100 DBGCNF_VERSION, /* page_version */ 101 {CTL_TIME_IO_DEFAULT_SECS>>8, 102 CTL_TIME_IO_DEFAULT_SECS>>0}, /* ctl_time_io_secs */ 103 }; 104 105 const static struct copan_debugconf_subpage debugconf_page_changeable = { 106 DBGCNF_PAGE_CODE | SMPH_SPF, /* page_code */ 107 DBGCNF_SUBPAGE_CODE, /* subpage */ 108 {(sizeof(struct copan_debugconf_subpage) - 4) >> 8, 109 (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */ 110 0, /* page_version */ 111 {0xff,0xff}, /* ctl_time_io_secs */ 112 }; 113 114 const static struct scsi_da_rw_recovery_page rw_er_page_default = { 115 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 116 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 117 /*byte3*/SMS_RWER_AWRE|SMS_RWER_ARRE, 118 /*read_retry_count*/0, 119 /*correction_span*/0, 120 /*head_offset_count*/0, 121 /*data_strobe_offset_cnt*/0, 122 /*byte8*/SMS_RWER_LBPERE, 123 /*write_retry_count*/0, 124 /*reserved2*/0, 125 /*recovery_time_limit*/{0, 0}, 126 }; 127 128 const static struct scsi_da_rw_recovery_page rw_er_page_changeable = { 129 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 130 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 131 /*byte3*/0, 132 /*read_retry_count*/0, 133 /*correction_span*/0, 134 /*head_offset_count*/0, 135 /*data_strobe_offset_cnt*/0, 136 /*byte8*/0, 137 /*write_retry_count*/0, 138 /*reserved2*/0, 139 /*recovery_time_limit*/{0, 0}, 140 }; 141 142 const static struct scsi_format_page format_page_default = { 143 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 144 /*page_length*/sizeof(struct scsi_format_page) - 2, 145 /*tracks_per_zone*/ {0, 0}, 146 /*alt_sectors_per_zone*/ {0, 0}, 147 /*alt_tracks_per_zone*/ {0, 0}, 148 /*alt_tracks_per_lun*/ {0, 0}, 149 /*sectors_per_track*/ {(CTL_DEFAULT_SECTORS_PER_TRACK >> 8) & 0xff, 150 CTL_DEFAULT_SECTORS_PER_TRACK & 0xff}, 151 /*bytes_per_sector*/ {0, 0}, 152 /*interleave*/ {0, 0}, 153 /*track_skew*/ {0, 0}, 154 /*cylinder_skew*/ {0, 0}, 155 /*flags*/ SFP_HSEC, 156 /*reserved*/ {0, 0, 0} 157 }; 158 159 const static struct scsi_format_page format_page_changeable = { 160 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 161 /*page_length*/sizeof(struct scsi_format_page) - 2, 162 /*tracks_per_zone*/ {0, 0}, 163 /*alt_sectors_per_zone*/ {0, 0}, 164 /*alt_tracks_per_zone*/ {0, 0}, 165 /*alt_tracks_per_lun*/ {0, 0}, 166 /*sectors_per_track*/ {0, 0}, 167 /*bytes_per_sector*/ {0, 0}, 168 /*interleave*/ {0, 0}, 169 /*track_skew*/ {0, 0}, 170 /*cylinder_skew*/ {0, 0}, 171 /*flags*/ 0, 172 /*reserved*/ {0, 0, 0} 173 }; 174 175 const static struct scsi_rigid_disk_page rigid_disk_page_default = { 176 /*page_code*/SMS_RIGID_DISK_PAGE, 177 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 178 /*cylinders*/ {0, 0, 0}, 179 /*heads*/ CTL_DEFAULT_HEADS, 180 /*start_write_precomp*/ {0, 0, 0}, 181 /*start_reduced_current*/ {0, 0, 0}, 182 /*step_rate*/ {0, 0}, 183 /*landing_zone_cylinder*/ {0, 0, 0}, 184 /*rpl*/ SRDP_RPL_DISABLED, 185 /*rotational_offset*/ 0, 186 /*reserved1*/ 0, 187 /*rotation_rate*/ {(CTL_DEFAULT_ROTATION_RATE >> 8) & 0xff, 188 CTL_DEFAULT_ROTATION_RATE & 0xff}, 189 /*reserved2*/ {0, 0} 190 }; 191 192 const static struct scsi_rigid_disk_page rigid_disk_page_changeable = { 193 /*page_code*/SMS_RIGID_DISK_PAGE, 194 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 195 /*cylinders*/ {0, 0, 0}, 196 /*heads*/ 0, 197 /*start_write_precomp*/ {0, 0, 0}, 198 /*start_reduced_current*/ {0, 0, 0}, 199 /*step_rate*/ {0, 0}, 200 /*landing_zone_cylinder*/ {0, 0, 0}, 201 /*rpl*/ 0, 202 /*rotational_offset*/ 0, 203 /*reserved1*/ 0, 204 /*rotation_rate*/ {0, 0}, 205 /*reserved2*/ {0, 0} 206 }; 207 208 const static struct scsi_caching_page caching_page_default = { 209 /*page_code*/SMS_CACHING_PAGE, 210 /*page_length*/sizeof(struct scsi_caching_page) - 2, 211 /*flags1*/ SCP_DISC | SCP_WCE, 212 /*ret_priority*/ 0, 213 /*disable_pf_transfer_len*/ {0xff, 0xff}, 214 /*min_prefetch*/ {0, 0}, 215 /*max_prefetch*/ {0xff, 0xff}, 216 /*max_pf_ceiling*/ {0xff, 0xff}, 217 /*flags2*/ 0, 218 /*cache_segments*/ 0, 219 /*cache_seg_size*/ {0, 0}, 220 /*reserved*/ 0, 221 /*non_cache_seg_size*/ {0, 0, 0} 222 }; 223 224 const static struct scsi_caching_page caching_page_changeable = { 225 /*page_code*/SMS_CACHING_PAGE, 226 /*page_length*/sizeof(struct scsi_caching_page) - 2, 227 /*flags1*/ SCP_WCE | SCP_RCD, 228 /*ret_priority*/ 0, 229 /*disable_pf_transfer_len*/ {0, 0}, 230 /*min_prefetch*/ {0, 0}, 231 /*max_prefetch*/ {0, 0}, 232 /*max_pf_ceiling*/ {0, 0}, 233 /*flags2*/ 0, 234 /*cache_segments*/ 0, 235 /*cache_seg_size*/ {0, 0}, 236 /*reserved*/ 0, 237 /*non_cache_seg_size*/ {0, 0, 0} 238 }; 239 240 const static struct scsi_control_page control_page_default = { 241 /*page_code*/SMS_CONTROL_MODE_PAGE, 242 /*page_length*/sizeof(struct scsi_control_page) - 2, 243 /*rlec*/0, 244 /*queue_flags*/SCP_QUEUE_ALG_RESTRICTED, 245 /*eca_and_aen*/0, 246 /*flags4*/SCP_TAS, 247 /*aen_holdoff_period*/{0, 0}, 248 /*busy_timeout_period*/{0, 0}, 249 /*extended_selftest_completion_time*/{0, 0} 250 }; 251 252 const static struct scsi_control_page control_page_changeable = { 253 /*page_code*/SMS_CONTROL_MODE_PAGE, 254 /*page_length*/sizeof(struct scsi_control_page) - 2, 255 /*rlec*/SCP_DSENSE, 256 /*queue_flags*/SCP_QUEUE_ALG_MASK, 257 /*eca_and_aen*/SCP_SWP, 258 /*flags4*/0, 259 /*aen_holdoff_period*/{0, 0}, 260 /*busy_timeout_period*/{0, 0}, 261 /*extended_selftest_completion_time*/{0, 0} 262 }; 263 264 #define CTL_CEM_LEN (sizeof(struct scsi_control_ext_page) - 4) 265 266 const static struct scsi_control_ext_page control_ext_page_default = { 267 /*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF, 268 /*subpage_code*/0x01, 269 /*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN}, 270 /*flags*/0, 271 /*prio*/0, 272 /*max_sense*/0 273 }; 274 275 const static struct scsi_control_ext_page control_ext_page_changeable = { 276 /*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF, 277 /*subpage_code*/0x01, 278 /*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN}, 279 /*flags*/0, 280 /*prio*/0, 281 /*max_sense*/0 282 }; 283 284 const static struct scsi_info_exceptions_page ie_page_default = { 285 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 286 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 287 /*info_flags*/SIEP_FLAGS_DEXCPT, 288 /*mrie*/0, 289 /*interval_timer*/{0, 0, 0, 0}, 290 /*report_count*/{0, 0, 0, 0} 291 }; 292 293 const static struct scsi_info_exceptions_page ie_page_changeable = { 294 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 295 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 296 /*info_flags*/0, 297 /*mrie*/0, 298 /*interval_timer*/{0, 0, 0, 0}, 299 /*report_count*/{0, 0, 0, 0} 300 }; 301 302 #define CTL_LBPM_LEN (sizeof(struct ctl_logical_block_provisioning_page) - 4) 303 304 const static struct ctl_logical_block_provisioning_page lbp_page_default = {{ 305 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 306 /*subpage_code*/0x02, 307 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 308 /*flags*/0, 309 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 310 /*descr*/{}}, 311 {{/*flags*/0, 312 /*resource*/0x01, 313 /*reserved*/{0, 0}, 314 /*count*/{0, 0, 0, 0}}, 315 {/*flags*/0, 316 /*resource*/0x02, 317 /*reserved*/{0, 0}, 318 /*count*/{0, 0, 0, 0}}, 319 {/*flags*/0, 320 /*resource*/0xf1, 321 /*reserved*/{0, 0}, 322 /*count*/{0, 0, 0, 0}}, 323 {/*flags*/0, 324 /*resource*/0xf2, 325 /*reserved*/{0, 0}, 326 /*count*/{0, 0, 0, 0}} 327 } 328 }; 329 330 const static struct ctl_logical_block_provisioning_page lbp_page_changeable = {{ 331 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 332 /*subpage_code*/0x02, 333 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 334 /*flags*/0, 335 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 336 /*descr*/{}}, 337 {{/*flags*/0, 338 /*resource*/0, 339 /*reserved*/{0, 0}, 340 /*count*/{0, 0, 0, 0}}, 341 {/*flags*/0, 342 /*resource*/0, 343 /*reserved*/{0, 0}, 344 /*count*/{0, 0, 0, 0}}, 345 {/*flags*/0, 346 /*resource*/0, 347 /*reserved*/{0, 0}, 348 /*count*/{0, 0, 0, 0}}, 349 {/*flags*/0, 350 /*resource*/0, 351 /*reserved*/{0, 0}, 352 /*count*/{0, 0, 0, 0}} 353 } 354 }; 355 356 SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer"); 357 static int worker_threads = -1; 358 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN, 359 &worker_threads, 1, "Number of worker threads"); 360 static int ctl_debug = CTL_DEBUG_NONE; 361 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, debug, CTLFLAG_RWTUN, 362 &ctl_debug, 0, "Enabled debug flags"); 363 364 /* 365 * Supported pages (0x00), Serial number (0x80), Device ID (0x83), 366 * Extended INQUIRY Data (0x86), Mode Page Policy (0x87), 367 * SCSI Ports (0x88), Third-party Copy (0x8F), Block limits (0xB0), 368 * Block Device Characteristics (0xB1) and Logical Block Provisioning (0xB2) 369 */ 370 #define SCSI_EVPD_NUM_SUPPORTED_PAGES 10 371 372 static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event, 373 int param); 374 static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest); 375 static void ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest); 376 static int ctl_init(void); 377 void ctl_shutdown(void); 378 static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td); 379 static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td); 380 static int ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio); 381 static int ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 382 struct ctl_ooa *ooa_hdr, 383 struct ctl_ooa_entry *kern_entries); 384 static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 385 struct thread *td); 386 static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun, 387 struct ctl_be_lun *be_lun); 388 static int ctl_free_lun(struct ctl_lun *lun); 389 static void ctl_create_lun(struct ctl_be_lun *be_lun); 390 static struct ctl_port * ctl_io_port(struct ctl_io_hdr *io_hdr); 391 392 static int ctl_do_mode_select(union ctl_io *io); 393 static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, 394 uint64_t res_key, uint64_t sa_res_key, 395 uint8_t type, uint32_t residx, 396 struct ctl_scsiio *ctsio, 397 struct scsi_per_res_out *cdb, 398 struct scsi_per_res_out_parms* param); 399 static void ctl_pro_preempt_other(struct ctl_lun *lun, 400 union ctl_ha_msg *msg); 401 static void ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg); 402 static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len); 403 static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len); 404 static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len); 405 static int ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len); 406 static int ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len); 407 static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, 408 int alloc_len); 409 static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, 410 int alloc_len); 411 static int ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len); 412 static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len); 413 static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio); 414 static int ctl_inquiry_std(struct ctl_scsiio *ctsio); 415 static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len); 416 static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2, 417 bool seq); 418 static ctl_action ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2); 419 static ctl_action ctl_check_for_blockage(struct ctl_lun *lun, 420 union ctl_io *pending_io, union ctl_io *ooa_io); 421 static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 422 union ctl_io *starting_io); 423 static int ctl_check_blocked(struct ctl_lun *lun); 424 static int ctl_scsiio_lun_check(struct ctl_lun *lun, 425 const struct ctl_cmd_entry *entry, 426 struct ctl_scsiio *ctsio); 427 static void ctl_failover_lun(struct ctl_lun *lun); 428 static int ctl_scsiio_precheck(struct ctl_softc *ctl_softc, 429 struct ctl_scsiio *ctsio); 430 static int ctl_scsiio(struct ctl_scsiio *ctsio); 431 432 static int ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io); 433 static int ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io, 434 ctl_ua_type ua_type); 435 static int ctl_do_lun_reset(struct ctl_lun *lun, union ctl_io *io, 436 ctl_ua_type ua_type); 437 static int ctl_lun_reset(struct ctl_softc *ctl_softc, union ctl_io *io); 438 static int ctl_abort_task(union ctl_io *io); 439 static int ctl_abort_task_set(union ctl_io *io); 440 static int ctl_query_task(union ctl_io *io, int task_set); 441 static int ctl_i_t_nexus_reset(union ctl_io *io); 442 static int ctl_query_async_event(union ctl_io *io); 443 static void ctl_run_task(union ctl_io *io); 444 #ifdef CTL_IO_DELAY 445 static void ctl_datamove_timer_wakeup(void *arg); 446 static void ctl_done_timer_wakeup(void *arg); 447 #endif /* CTL_IO_DELAY */ 448 449 static void ctl_send_datamove_done(union ctl_io *io, int have_lock); 450 static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq); 451 static int ctl_datamove_remote_dm_write_cb(union ctl_io *io); 452 static void ctl_datamove_remote_write(union ctl_io *io); 453 static int ctl_datamove_remote_dm_read_cb(union ctl_io *io); 454 static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq); 455 static int ctl_datamove_remote_sgl_setup(union ctl_io *io); 456 static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 457 ctl_ha_dt_cb callback); 458 static void ctl_datamove_remote_read(union ctl_io *io); 459 static void ctl_datamove_remote(union ctl_io *io); 460 static int ctl_process_done(union ctl_io *io); 461 static void ctl_lun_thread(void *arg); 462 static void ctl_thresh_thread(void *arg); 463 static void ctl_work_thread(void *arg); 464 static void ctl_enqueue_incoming(union ctl_io *io); 465 static void ctl_enqueue_rtr(union ctl_io *io); 466 static void ctl_enqueue_done(union ctl_io *io); 467 static void ctl_enqueue_isc(union ctl_io *io); 468 static const struct ctl_cmd_entry * 469 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa); 470 static const struct ctl_cmd_entry * 471 ctl_validate_command(struct ctl_scsiio *ctsio); 472 static int ctl_cmd_applicable(uint8_t lun_type, 473 const struct ctl_cmd_entry *entry); 474 475 static uint64_t ctl_get_prkey(struct ctl_lun *lun, uint32_t residx); 476 static void ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx); 477 static void ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx); 478 static void ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key); 479 480 /* 481 * Load the serialization table. This isn't very pretty, but is probably 482 * the easiest way to do it. 483 */ 484 #include "ctl_ser_table.c" 485 486 /* 487 * We only need to define open, close and ioctl routines for this driver. 488 */ 489 static struct cdevsw ctl_cdevsw = { 490 .d_version = D_VERSION, 491 .d_flags = 0, 492 .d_open = ctl_open, 493 .d_close = ctl_close, 494 .d_ioctl = ctl_ioctl, 495 .d_name = "ctl", 496 }; 497 498 499 MALLOC_DEFINE(M_CTL, "ctlmem", "Memory used for CTL"); 500 501 static int ctl_module_event_handler(module_t, int /*modeventtype_t*/, void *); 502 503 static moduledata_t ctl_moduledata = { 504 "ctl", 505 ctl_module_event_handler, 506 NULL 507 }; 508 509 DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD); 510 MODULE_VERSION(ctl, 1); 511 512 static struct ctl_frontend ha_frontend = 513 { 514 .name = "ha", 515 }; 516 517 static void 518 ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc, 519 union ctl_ha_msg *msg_info) 520 { 521 struct ctl_scsiio *ctsio; 522 523 if (msg_info->hdr.original_sc == NULL) { 524 printf("%s: original_sc == NULL!\n", __func__); 525 /* XXX KDM now what? */ 526 return; 527 } 528 529 ctsio = &msg_info->hdr.original_sc->scsiio; 530 ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 531 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 532 ctsio->io_hdr.status = msg_info->hdr.status; 533 ctsio->scsi_status = msg_info->scsi.scsi_status; 534 ctsio->sense_len = msg_info->scsi.sense_len; 535 ctsio->sense_residual = msg_info->scsi.sense_residual; 536 ctsio->residual = msg_info->scsi.residual; 537 memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data, 538 msg_info->scsi.sense_len); 539 ctl_enqueue_isc((union ctl_io *)ctsio); 540 } 541 542 static void 543 ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc, 544 union ctl_ha_msg *msg_info) 545 { 546 struct ctl_scsiio *ctsio; 547 548 if (msg_info->hdr.serializing_sc == NULL) { 549 printf("%s: serializing_sc == NULL!\n", __func__); 550 /* XXX KDM now what? */ 551 return; 552 } 553 554 ctsio = &msg_info->hdr.serializing_sc->scsiio; 555 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 556 ctl_enqueue_isc((union ctl_io *)ctsio); 557 } 558 559 void 560 ctl_isc_announce_lun(struct ctl_lun *lun) 561 { 562 struct ctl_softc *softc = lun->ctl_softc; 563 union ctl_ha_msg *msg; 564 struct ctl_ha_msg_lun_pr_key pr_key; 565 int i, k; 566 567 if (softc->ha_link != CTL_HA_LINK_ONLINE) 568 return; 569 mtx_lock(&lun->lun_lock); 570 i = sizeof(msg->lun); 571 if (lun->lun_devid) 572 i += lun->lun_devid->len; 573 i += sizeof(pr_key) * lun->pr_key_count; 574 alloc: 575 mtx_unlock(&lun->lun_lock); 576 msg = malloc(i, M_CTL, M_WAITOK); 577 mtx_lock(&lun->lun_lock); 578 k = sizeof(msg->lun); 579 if (lun->lun_devid) 580 k += lun->lun_devid->len; 581 k += sizeof(pr_key) * lun->pr_key_count; 582 if (i < k) { 583 free(msg, M_CTL); 584 i = k; 585 goto alloc; 586 } 587 bzero(&msg->lun, sizeof(msg->lun)); 588 msg->hdr.msg_type = CTL_MSG_LUN_SYNC; 589 msg->hdr.nexus.targ_lun = lun->lun; 590 msg->hdr.nexus.targ_mapped_lun = lun->lun; 591 msg->lun.flags = lun->flags; 592 msg->lun.pr_generation = lun->PRGeneration; 593 msg->lun.pr_res_idx = lun->pr_res_idx; 594 msg->lun.pr_res_type = lun->res_type; 595 msg->lun.pr_key_count = lun->pr_key_count; 596 i = 0; 597 if (lun->lun_devid) { 598 msg->lun.lun_devid_len = lun->lun_devid->len; 599 memcpy(&msg->lun.data[i], lun->lun_devid->data, 600 msg->lun.lun_devid_len); 601 i += msg->lun.lun_devid_len; 602 } 603 for (k = 0; k < CTL_MAX_INITIATORS; k++) { 604 if ((pr_key.pr_key = ctl_get_prkey(lun, k)) == 0) 605 continue; 606 pr_key.pr_iid = k; 607 memcpy(&msg->lun.data[i], &pr_key, sizeof(pr_key)); 608 i += sizeof(pr_key); 609 } 610 mtx_unlock(&lun->lun_lock); 611 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, 612 M_WAITOK); 613 free(msg, M_CTL); 614 } 615 616 void 617 ctl_isc_announce_port(struct ctl_port *port) 618 { 619 struct ctl_softc *softc = control_softc; 620 union ctl_ha_msg *msg; 621 int i; 622 623 if (port->targ_port < softc->port_min || 624 port->targ_port >= softc->port_max || 625 softc->ha_link != CTL_HA_LINK_ONLINE) 626 return; 627 i = sizeof(msg->port) + strlen(port->port_name) + 1; 628 if (port->lun_map) 629 i += sizeof(uint32_t) * CTL_MAX_LUNS; 630 if (port->port_devid) 631 i += port->port_devid->len; 632 if (port->target_devid) 633 i += port->target_devid->len; 634 if (port->init_devid) 635 i += port->init_devid->len; 636 msg = malloc(i, M_CTL, M_WAITOK); 637 bzero(&msg->port, sizeof(msg->port)); 638 msg->hdr.msg_type = CTL_MSG_PORT_SYNC; 639 msg->hdr.nexus.targ_port = port->targ_port; 640 msg->port.port_type = port->port_type; 641 msg->port.physical_port = port->physical_port; 642 msg->port.virtual_port = port->virtual_port; 643 msg->port.status = port->status; 644 i = 0; 645 msg->port.name_len = sprintf(&msg->port.data[i], 646 "%d:%s", softc->ha_id, port->port_name) + 1; 647 i += msg->port.name_len; 648 if (port->lun_map) { 649 msg->port.lun_map_len = sizeof(uint32_t) * CTL_MAX_LUNS; 650 memcpy(&msg->port.data[i], port->lun_map, 651 msg->port.lun_map_len); 652 i += msg->port.lun_map_len; 653 } 654 if (port->port_devid) { 655 msg->port.port_devid_len = port->port_devid->len; 656 memcpy(&msg->port.data[i], port->port_devid->data, 657 msg->port.port_devid_len); 658 i += msg->port.port_devid_len; 659 } 660 if (port->target_devid) { 661 msg->port.target_devid_len = port->target_devid->len; 662 memcpy(&msg->port.data[i], port->target_devid->data, 663 msg->port.target_devid_len); 664 i += msg->port.target_devid_len; 665 } 666 if (port->init_devid) { 667 msg->port.init_devid_len = port->init_devid->len; 668 memcpy(&msg->port.data[i], port->init_devid->data, 669 msg->port.init_devid_len); 670 i += msg->port.init_devid_len; 671 } 672 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, 673 M_WAITOK); 674 free(msg, M_CTL); 675 } 676 677 void 678 ctl_isc_announce_iid(struct ctl_port *port, int iid) 679 { 680 struct ctl_softc *softc = control_softc; 681 union ctl_ha_msg *msg; 682 int i, l; 683 684 if (port->targ_port < softc->port_min || 685 port->targ_port >= softc->port_max || 686 softc->ha_link != CTL_HA_LINK_ONLINE) 687 return; 688 mtx_lock(&softc->ctl_lock); 689 i = sizeof(msg->iid); 690 l = 0; 691 if (port->wwpn_iid[iid].name) 692 l = strlen(port->wwpn_iid[iid].name) + 1; 693 i += l; 694 msg = malloc(i, M_CTL, M_NOWAIT); 695 if (msg == NULL) { 696 mtx_unlock(&softc->ctl_lock); 697 return; 698 } 699 bzero(&msg->iid, sizeof(msg->iid)); 700 msg->hdr.msg_type = CTL_MSG_IID_SYNC; 701 msg->hdr.nexus.targ_port = port->targ_port; 702 msg->hdr.nexus.initid = iid; 703 msg->iid.in_use = port->wwpn_iid[iid].in_use; 704 msg->iid.name_len = l; 705 msg->iid.wwpn = port->wwpn_iid[iid].wwpn; 706 if (port->wwpn_iid[iid].name) 707 strlcpy(msg->iid.data, port->wwpn_iid[iid].name, l); 708 mtx_unlock(&softc->ctl_lock); 709 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->iid, i, M_NOWAIT); 710 free(msg, M_CTL); 711 } 712 713 static void 714 ctl_isc_ha_link_up(struct ctl_softc *softc) 715 { 716 struct ctl_port *port; 717 struct ctl_lun *lun; 718 int i; 719 720 STAILQ_FOREACH(port, &softc->port_list, links) { 721 ctl_isc_announce_port(port); 722 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 723 if (port->wwpn_iid[i].in_use) 724 ctl_isc_announce_iid(port, i); 725 } 726 } 727 STAILQ_FOREACH(lun, &softc->lun_list, links) 728 ctl_isc_announce_lun(lun); 729 } 730 731 static void 732 ctl_isc_ha_link_down(struct ctl_softc *softc) 733 { 734 struct ctl_port *port; 735 struct ctl_lun *lun; 736 union ctl_io *io; 737 int i; 738 739 mtx_lock(&softc->ctl_lock); 740 STAILQ_FOREACH(lun, &softc->lun_list, links) { 741 mtx_lock(&lun->lun_lock); 742 if (lun->flags & CTL_LUN_PEER_SC_PRIMARY) { 743 lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; 744 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 745 } 746 mtx_unlock(&lun->lun_lock); 747 748 mtx_unlock(&softc->ctl_lock); 749 io = ctl_alloc_io(softc->othersc_pool); 750 mtx_lock(&softc->ctl_lock); 751 ctl_zero_io(io); 752 io->io_hdr.msg_type = CTL_MSG_FAILOVER; 753 io->io_hdr.nexus.targ_mapped_lun = lun->lun; 754 ctl_enqueue_isc(io); 755 } 756 757 STAILQ_FOREACH(port, &softc->port_list, links) { 758 if (port->targ_port >= softc->port_min && 759 port->targ_port < softc->port_max) 760 continue; 761 port->status &= ~CTL_PORT_STATUS_ONLINE; 762 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 763 port->wwpn_iid[i].in_use = 0; 764 free(port->wwpn_iid[i].name, M_CTL); 765 port->wwpn_iid[i].name = NULL; 766 } 767 } 768 mtx_unlock(&softc->ctl_lock); 769 } 770 771 static void 772 ctl_isc_ua(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 773 { 774 struct ctl_lun *lun; 775 uint32_t iid = ctl_get_initindex(&msg->hdr.nexus); 776 777 mtx_lock(&softc->ctl_lock); 778 if (msg->hdr.nexus.targ_lun < CTL_MAX_LUNS && 779 (lun = softc->ctl_luns[msg->hdr.nexus.targ_mapped_lun]) != NULL) { 780 mtx_lock(&lun->lun_lock); 781 mtx_unlock(&softc->ctl_lock); 782 if (msg->ua.ua_type == CTL_UA_THIN_PROV_THRES && 783 msg->ua.ua_set) 784 memcpy(lun->ua_tpt_info, msg->ua.ua_info, 8); 785 if (msg->ua.ua_all) { 786 if (msg->ua.ua_set) 787 ctl_est_ua_all(lun, iid, msg->ua.ua_type); 788 else 789 ctl_clr_ua_all(lun, iid, msg->ua.ua_type); 790 } else { 791 if (msg->ua.ua_set) 792 ctl_est_ua(lun, iid, msg->ua.ua_type); 793 else 794 ctl_clr_ua(lun, iid, msg->ua.ua_type); 795 } 796 mtx_unlock(&lun->lun_lock); 797 } else 798 mtx_unlock(&softc->ctl_lock); 799 } 800 801 static void 802 ctl_isc_lun_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 803 { 804 struct ctl_lun *lun; 805 struct ctl_ha_msg_lun_pr_key pr_key; 806 int i, k; 807 ctl_lun_flags oflags; 808 uint32_t targ_lun; 809 810 targ_lun = msg->hdr.nexus.targ_mapped_lun; 811 mtx_lock(&softc->ctl_lock); 812 if ((targ_lun >= CTL_MAX_LUNS) || 813 ((lun = softc->ctl_luns[targ_lun]) == NULL)) { 814 mtx_unlock(&softc->ctl_lock); 815 return; 816 } 817 mtx_lock(&lun->lun_lock); 818 mtx_unlock(&softc->ctl_lock); 819 if (lun->flags & CTL_LUN_DISABLED) { 820 mtx_unlock(&lun->lun_lock); 821 return; 822 } 823 i = (lun->lun_devid != NULL) ? lun->lun_devid->len : 0; 824 if (msg->lun.lun_devid_len != i || (i > 0 && 825 memcmp(&msg->lun.data[0], lun->lun_devid->data, i) != 0)) { 826 mtx_unlock(&lun->lun_lock); 827 printf("%s: Received conflicting HA LUN %d\n", 828 __func__, msg->hdr.nexus.targ_lun); 829 return; 830 } else { 831 /* Record whether peer is primary. */ 832 oflags = lun->flags; 833 if ((msg->lun.flags & CTL_LUN_PRIMARY_SC) && 834 (msg->lun.flags & CTL_LUN_DISABLED) == 0) 835 lun->flags |= CTL_LUN_PEER_SC_PRIMARY; 836 else 837 lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; 838 if (oflags != lun->flags) 839 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 840 841 /* If peer is primary and we are not -- use data */ 842 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 843 (lun->flags & CTL_LUN_PEER_SC_PRIMARY)) { 844 lun->PRGeneration = msg->lun.pr_generation; 845 lun->pr_res_idx = msg->lun.pr_res_idx; 846 lun->res_type = msg->lun.pr_res_type; 847 lun->pr_key_count = msg->lun.pr_key_count; 848 for (k = 0; k < CTL_MAX_INITIATORS; k++) 849 ctl_clr_prkey(lun, k); 850 for (k = 0; k < msg->lun.pr_key_count; k++) { 851 memcpy(&pr_key, &msg->lun.data[i], 852 sizeof(pr_key)); 853 ctl_alloc_prkey(lun, pr_key.pr_iid); 854 ctl_set_prkey(lun, pr_key.pr_iid, 855 pr_key.pr_key); 856 i += sizeof(pr_key); 857 } 858 } 859 860 mtx_unlock(&lun->lun_lock); 861 CTL_DEBUG_PRINT(("%s: Known LUN %d, peer is %s\n", 862 __func__, msg->hdr.nexus.targ_lun, 863 (msg->lun.flags & CTL_LUN_PRIMARY_SC) ? 864 "primary" : "secondary")); 865 866 /* If we are primary but peer doesn't know -- notify */ 867 if ((lun->flags & CTL_LUN_PRIMARY_SC) && 868 (msg->lun.flags & CTL_LUN_PEER_SC_PRIMARY) == 0) 869 ctl_isc_announce_lun(lun); 870 } 871 } 872 873 static void 874 ctl_isc_port_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 875 { 876 struct ctl_port *port; 877 struct ctl_lun *lun; 878 int i, new; 879 880 port = softc->ctl_ports[msg->hdr.nexus.targ_port]; 881 if (port == NULL) { 882 CTL_DEBUG_PRINT(("%s: New port %d\n", __func__, 883 msg->hdr.nexus.targ_port)); 884 new = 1; 885 port = malloc(sizeof(*port), M_CTL, M_WAITOK | M_ZERO); 886 port->frontend = &ha_frontend; 887 port->targ_port = msg->hdr.nexus.targ_port; 888 } else if (port->frontend == &ha_frontend) { 889 CTL_DEBUG_PRINT(("%s: Updated port %d\n", __func__, 890 msg->hdr.nexus.targ_port)); 891 new = 0; 892 } else { 893 printf("%s: Received conflicting HA port %d\n", 894 __func__, msg->hdr.nexus.targ_port); 895 return; 896 } 897 port->port_type = msg->port.port_type; 898 port->physical_port = msg->port.physical_port; 899 port->virtual_port = msg->port.virtual_port; 900 port->status = msg->port.status; 901 i = 0; 902 free(port->port_name, M_CTL); 903 port->port_name = strndup(&msg->port.data[i], msg->port.name_len, 904 M_CTL); 905 i += msg->port.name_len; 906 if (msg->port.lun_map_len != 0) { 907 if (port->lun_map == NULL) 908 port->lun_map = malloc(sizeof(uint32_t) * CTL_MAX_LUNS, 909 M_CTL, M_WAITOK); 910 memcpy(port->lun_map, &msg->port.data[i], 911 sizeof(uint32_t) * CTL_MAX_LUNS); 912 i += msg->port.lun_map_len; 913 } else { 914 free(port->lun_map, M_CTL); 915 port->lun_map = NULL; 916 } 917 if (msg->port.port_devid_len != 0) { 918 if (port->port_devid == NULL || 919 port->port_devid->len != msg->port.port_devid_len) { 920 free(port->port_devid, M_CTL); 921 port->port_devid = malloc(sizeof(struct ctl_devid) + 922 msg->port.port_devid_len, M_CTL, M_WAITOK); 923 } 924 memcpy(port->port_devid->data, &msg->port.data[i], 925 msg->port.port_devid_len); 926 port->port_devid->len = msg->port.port_devid_len; 927 i += msg->port.port_devid_len; 928 } else { 929 free(port->port_devid, M_CTL); 930 port->port_devid = NULL; 931 } 932 if (msg->port.target_devid_len != 0) { 933 if (port->target_devid == NULL || 934 port->target_devid->len != msg->port.target_devid_len) { 935 free(port->target_devid, M_CTL); 936 port->target_devid = malloc(sizeof(struct ctl_devid) + 937 msg->port.target_devid_len, M_CTL, M_WAITOK); 938 } 939 memcpy(port->target_devid->data, &msg->port.data[i], 940 msg->port.target_devid_len); 941 port->target_devid->len = msg->port.target_devid_len; 942 i += msg->port.target_devid_len; 943 } else { 944 free(port->target_devid, M_CTL); 945 port->target_devid = NULL; 946 } 947 if (msg->port.init_devid_len != 0) { 948 if (port->init_devid == NULL || 949 port->init_devid->len != msg->port.init_devid_len) { 950 free(port->init_devid, M_CTL); 951 port->init_devid = malloc(sizeof(struct ctl_devid) + 952 msg->port.init_devid_len, M_CTL, M_WAITOK); 953 } 954 memcpy(port->init_devid->data, &msg->port.data[i], 955 msg->port.init_devid_len); 956 port->init_devid->len = msg->port.init_devid_len; 957 i += msg->port.init_devid_len; 958 } else { 959 free(port->init_devid, M_CTL); 960 port->init_devid = NULL; 961 } 962 if (new) { 963 if (ctl_port_register(port) != 0) { 964 printf("%s: ctl_port_register() failed with error\n", 965 __func__); 966 } 967 } 968 mtx_lock(&softc->ctl_lock); 969 STAILQ_FOREACH(lun, &softc->lun_list, links) { 970 if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 971 continue; 972 mtx_lock(&lun->lun_lock); 973 ctl_est_ua_all(lun, -1, CTL_UA_INQ_CHANGE); 974 mtx_unlock(&lun->lun_lock); 975 } 976 mtx_unlock(&softc->ctl_lock); 977 } 978 979 static void 980 ctl_isc_iid_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 981 { 982 struct ctl_port *port; 983 int iid; 984 985 port = softc->ctl_ports[msg->hdr.nexus.targ_port]; 986 if (port == NULL) { 987 printf("%s: Received IID for unknown port %d\n", 988 __func__, msg->hdr.nexus.targ_port); 989 return; 990 } 991 iid = msg->hdr.nexus.initid; 992 port->wwpn_iid[iid].in_use = msg->iid.in_use; 993 port->wwpn_iid[iid].wwpn = msg->iid.wwpn; 994 free(port->wwpn_iid[iid].name, M_CTL); 995 if (msg->iid.name_len) { 996 port->wwpn_iid[iid].name = strndup(&msg->iid.data[0], 997 msg->iid.name_len, M_CTL); 998 } else 999 port->wwpn_iid[iid].name = NULL; 1000 } 1001 1002 /* 1003 * ISC (Inter Shelf Communication) event handler. Events from the HA 1004 * subsystem come in here. 1005 */ 1006 static void 1007 ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param) 1008 { 1009 struct ctl_softc *softc; 1010 union ctl_io *io; 1011 struct ctl_prio *presio; 1012 ctl_ha_status isc_status; 1013 1014 softc = control_softc; 1015 CTL_DEBUG_PRINT(("CTL: Isc Msg event %d\n", event)); 1016 if (event == CTL_HA_EVT_MSG_RECV) { 1017 union ctl_ha_msg *msg, msgbuf; 1018 1019 if (param > sizeof(msgbuf)) 1020 msg = malloc(param, M_CTL, M_WAITOK); 1021 else 1022 msg = &msgbuf; 1023 isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, msg, param, 1024 M_WAITOK); 1025 if (isc_status != CTL_HA_STATUS_SUCCESS) { 1026 printf("%s: Error receiving message: %d\n", 1027 __func__, isc_status); 1028 if (msg != &msgbuf) 1029 free(msg, M_CTL); 1030 return; 1031 } 1032 1033 CTL_DEBUG_PRINT(("CTL: msg_type %d\n", msg->msg_type)); 1034 switch (msg->hdr.msg_type) { 1035 case CTL_MSG_SERIALIZE: 1036 io = ctl_alloc_io(softc->othersc_pool); 1037 ctl_zero_io(io); 1038 // populate ctsio from msg 1039 io->io_hdr.io_type = CTL_IO_SCSI; 1040 io->io_hdr.msg_type = CTL_MSG_SERIALIZE; 1041 io->io_hdr.original_sc = msg->hdr.original_sc; 1042 io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC | 1043 CTL_FLAG_IO_ACTIVE; 1044 /* 1045 * If we're in serialization-only mode, we don't 1046 * want to go through full done processing. Thus 1047 * the COPY flag. 1048 * 1049 * XXX KDM add another flag that is more specific. 1050 */ 1051 if (softc->ha_mode != CTL_HA_MODE_XFER) 1052 io->io_hdr.flags |= CTL_FLAG_INT_COPY; 1053 io->io_hdr.nexus = msg->hdr.nexus; 1054 #if 0 1055 printf("port %u, iid %u, lun %u\n", 1056 io->io_hdr.nexus.targ_port, 1057 io->io_hdr.nexus.initid, 1058 io->io_hdr.nexus.targ_lun); 1059 #endif 1060 io->scsiio.tag_num = msg->scsi.tag_num; 1061 io->scsiio.tag_type = msg->scsi.tag_type; 1062 #ifdef CTL_TIME_IO 1063 io->io_hdr.start_time = time_uptime; 1064 getbintime(&io->io_hdr.start_bt); 1065 #endif /* CTL_TIME_IO */ 1066 io->scsiio.cdb_len = msg->scsi.cdb_len; 1067 memcpy(io->scsiio.cdb, msg->scsi.cdb, 1068 CTL_MAX_CDBLEN); 1069 if (softc->ha_mode == CTL_HA_MODE_XFER) { 1070 const struct ctl_cmd_entry *entry; 1071 1072 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 1073 io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 1074 io->io_hdr.flags |= 1075 entry->flags & CTL_FLAG_DATA_MASK; 1076 } 1077 ctl_enqueue_isc(io); 1078 break; 1079 1080 /* Performed on the Originating SC, XFER mode only */ 1081 case CTL_MSG_DATAMOVE: { 1082 struct ctl_sg_entry *sgl; 1083 int i, j; 1084 1085 io = msg->hdr.original_sc; 1086 if (io == NULL) { 1087 printf("%s: original_sc == NULL!\n", __func__); 1088 /* XXX KDM do something here */ 1089 break; 1090 } 1091 io->io_hdr.msg_type = CTL_MSG_DATAMOVE; 1092 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1093 /* 1094 * Keep track of this, we need to send it back over 1095 * when the datamove is complete. 1096 */ 1097 io->io_hdr.serializing_sc = msg->hdr.serializing_sc; 1098 if (msg->hdr.status == CTL_SUCCESS) 1099 io->io_hdr.status = msg->hdr.status; 1100 1101 if (msg->dt.sg_sequence == 0) { 1102 i = msg->dt.kern_sg_entries + 1103 msg->dt.kern_data_len / 1104 CTL_HA_DATAMOVE_SEGMENT + 1; 1105 sgl = malloc(sizeof(*sgl) * i, M_CTL, 1106 M_WAITOK | M_ZERO); 1107 io->io_hdr.remote_sglist = sgl; 1108 io->io_hdr.local_sglist = 1109 &sgl[msg->dt.kern_sg_entries]; 1110 1111 io->scsiio.kern_data_ptr = (uint8_t *)sgl; 1112 1113 io->scsiio.kern_sg_entries = 1114 msg->dt.kern_sg_entries; 1115 io->scsiio.rem_sg_entries = 1116 msg->dt.kern_sg_entries; 1117 io->scsiio.kern_data_len = 1118 msg->dt.kern_data_len; 1119 io->scsiio.kern_total_len = 1120 msg->dt.kern_total_len; 1121 io->scsiio.kern_data_resid = 1122 msg->dt.kern_data_resid; 1123 io->scsiio.kern_rel_offset = 1124 msg->dt.kern_rel_offset; 1125 io->io_hdr.flags &= ~CTL_FLAG_BUS_ADDR; 1126 io->io_hdr.flags |= msg->dt.flags & 1127 CTL_FLAG_BUS_ADDR; 1128 } else 1129 sgl = (struct ctl_sg_entry *) 1130 io->scsiio.kern_data_ptr; 1131 1132 for (i = msg->dt.sent_sg_entries, j = 0; 1133 i < (msg->dt.sent_sg_entries + 1134 msg->dt.cur_sg_entries); i++, j++) { 1135 sgl[i].addr = msg->dt.sg_list[j].addr; 1136 sgl[i].len = msg->dt.sg_list[j].len; 1137 1138 #if 0 1139 printf("%s: DATAMOVE: %p,%lu j=%d, i=%d\n", 1140 __func__, sgl[i].addr, sgl[i].len, j, i); 1141 #endif 1142 } 1143 1144 /* 1145 * If this is the last piece of the I/O, we've got 1146 * the full S/G list. Queue processing in the thread. 1147 * Otherwise wait for the next piece. 1148 */ 1149 if (msg->dt.sg_last != 0) 1150 ctl_enqueue_isc(io); 1151 break; 1152 } 1153 /* Performed on the Serializing (primary) SC, XFER mode only */ 1154 case CTL_MSG_DATAMOVE_DONE: { 1155 if (msg->hdr.serializing_sc == NULL) { 1156 printf("%s: serializing_sc == NULL!\n", 1157 __func__); 1158 /* XXX KDM now what? */ 1159 break; 1160 } 1161 /* 1162 * We grab the sense information here in case 1163 * there was a failure, so we can return status 1164 * back to the initiator. 1165 */ 1166 io = msg->hdr.serializing_sc; 1167 io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 1168 io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; 1169 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1170 io->io_hdr.port_status = msg->scsi.fetd_status; 1171 io->scsiio.residual = msg->scsi.residual; 1172 if (msg->hdr.status != CTL_STATUS_NONE) { 1173 io->io_hdr.status = msg->hdr.status; 1174 io->scsiio.scsi_status = msg->scsi.scsi_status; 1175 io->scsiio.sense_len = msg->scsi.sense_len; 1176 io->scsiio.sense_residual =msg->scsi.sense_residual; 1177 memcpy(&io->scsiio.sense_data, 1178 &msg->scsi.sense_data, 1179 msg->scsi.sense_len); 1180 if (msg->hdr.status == CTL_SUCCESS) 1181 io->io_hdr.flags |= CTL_FLAG_STATUS_SENT; 1182 } 1183 ctl_enqueue_isc(io); 1184 break; 1185 } 1186 1187 /* Preformed on Originating SC, SER_ONLY mode */ 1188 case CTL_MSG_R2R: 1189 io = msg->hdr.original_sc; 1190 if (io == NULL) { 1191 printf("%s: original_sc == NULL!\n", 1192 __func__); 1193 break; 1194 } 1195 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1196 io->io_hdr.msg_type = CTL_MSG_R2R; 1197 io->io_hdr.serializing_sc = msg->hdr.serializing_sc; 1198 ctl_enqueue_isc(io); 1199 break; 1200 1201 /* 1202 * Performed on Serializing(i.e. primary SC) SC in SER_ONLY 1203 * mode. 1204 * Performed on the Originating (i.e. secondary) SC in XFER 1205 * mode 1206 */ 1207 case CTL_MSG_FINISH_IO: 1208 if (softc->ha_mode == CTL_HA_MODE_XFER) 1209 ctl_isc_handler_finish_xfer(softc, msg); 1210 else 1211 ctl_isc_handler_finish_ser_only(softc, msg); 1212 break; 1213 1214 /* Preformed on Originating SC */ 1215 case CTL_MSG_BAD_JUJU: 1216 io = msg->hdr.original_sc; 1217 if (io == NULL) { 1218 printf("%s: Bad JUJU!, original_sc is NULL!\n", 1219 __func__); 1220 break; 1221 } 1222 ctl_copy_sense_data(msg, io); 1223 /* 1224 * IO should have already been cleaned up on other 1225 * SC so clear this flag so we won't send a message 1226 * back to finish the IO there. 1227 */ 1228 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 1229 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1230 1231 /* io = msg->hdr.serializing_sc; */ 1232 io->io_hdr.msg_type = CTL_MSG_BAD_JUJU; 1233 ctl_enqueue_isc(io); 1234 break; 1235 1236 /* Handle resets sent from the other side */ 1237 case CTL_MSG_MANAGE_TASKS: { 1238 struct ctl_taskio *taskio; 1239 taskio = (struct ctl_taskio *)ctl_alloc_io( 1240 softc->othersc_pool); 1241 ctl_zero_io((union ctl_io *)taskio); 1242 taskio->io_hdr.io_type = CTL_IO_TASK; 1243 taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 1244 taskio->io_hdr.nexus = msg->hdr.nexus; 1245 taskio->task_action = msg->task.task_action; 1246 taskio->tag_num = msg->task.tag_num; 1247 taskio->tag_type = msg->task.tag_type; 1248 #ifdef CTL_TIME_IO 1249 taskio->io_hdr.start_time = time_uptime; 1250 getbintime(&taskio->io_hdr.start_bt); 1251 #endif /* CTL_TIME_IO */ 1252 ctl_run_task((union ctl_io *)taskio); 1253 break; 1254 } 1255 /* Persistent Reserve action which needs attention */ 1256 case CTL_MSG_PERS_ACTION: 1257 presio = (struct ctl_prio *)ctl_alloc_io( 1258 softc->othersc_pool); 1259 ctl_zero_io((union ctl_io *)presio); 1260 presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION; 1261 presio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 1262 presio->io_hdr.nexus = msg->hdr.nexus; 1263 presio->pr_msg = msg->pr; 1264 ctl_enqueue_isc((union ctl_io *)presio); 1265 break; 1266 case CTL_MSG_UA: 1267 ctl_isc_ua(softc, msg, param); 1268 break; 1269 case CTL_MSG_PORT_SYNC: 1270 ctl_isc_port_sync(softc, msg, param); 1271 break; 1272 case CTL_MSG_LUN_SYNC: 1273 ctl_isc_lun_sync(softc, msg, param); 1274 break; 1275 case CTL_MSG_IID_SYNC: 1276 ctl_isc_iid_sync(softc, msg, param); 1277 break; 1278 default: 1279 printf("Received HA message of unknown type %d\n", 1280 msg->hdr.msg_type); 1281 break; 1282 } 1283 if (msg != &msgbuf) 1284 free(msg, M_CTL); 1285 } else if (event == CTL_HA_EVT_LINK_CHANGE) { 1286 printf("CTL: HA link status changed from %d to %d\n", 1287 softc->ha_link, param); 1288 if (param == softc->ha_link) 1289 return; 1290 if (softc->ha_link == CTL_HA_LINK_ONLINE) { 1291 softc->ha_link = param; 1292 ctl_isc_ha_link_down(softc); 1293 } else { 1294 softc->ha_link = param; 1295 if (softc->ha_link == CTL_HA_LINK_ONLINE) 1296 ctl_isc_ha_link_up(softc); 1297 } 1298 return; 1299 } else { 1300 printf("ctl_isc_event_handler: Unknown event %d\n", event); 1301 return; 1302 } 1303 } 1304 1305 static void 1306 ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest) 1307 { 1308 1309 memcpy(&dest->scsiio.sense_data, &src->scsi.sense_data, 1310 src->scsi.sense_len); 1311 dest->scsiio.scsi_status = src->scsi.scsi_status; 1312 dest->scsiio.sense_len = src->scsi.sense_len; 1313 dest->io_hdr.status = src->hdr.status; 1314 } 1315 1316 static void 1317 ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest) 1318 { 1319 1320 memcpy(&dest->scsi.sense_data, &src->scsiio.sense_data, 1321 src->scsiio.sense_len); 1322 dest->scsi.scsi_status = src->scsiio.scsi_status; 1323 dest->scsi.sense_len = src->scsiio.sense_len; 1324 dest->hdr.status = src->io_hdr.status; 1325 } 1326 1327 void 1328 ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 1329 { 1330 struct ctl_softc *softc = lun->ctl_softc; 1331 ctl_ua_type *pu; 1332 1333 if (initidx < softc->init_min || initidx >= softc->init_max) 1334 return; 1335 mtx_assert(&lun->lun_lock, MA_OWNED); 1336 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 1337 if (pu == NULL) 1338 return; 1339 pu[initidx % CTL_MAX_INIT_PER_PORT] |= ua; 1340 } 1341 1342 void 1343 ctl_est_ua_port(struct ctl_lun *lun, int port, uint32_t except, ctl_ua_type ua) 1344 { 1345 int i; 1346 1347 mtx_assert(&lun->lun_lock, MA_OWNED); 1348 if (lun->pending_ua[port] == NULL) 1349 return; 1350 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1351 if (port * CTL_MAX_INIT_PER_PORT + i == except) 1352 continue; 1353 lun->pending_ua[port][i] |= ua; 1354 } 1355 } 1356 1357 void 1358 ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 1359 { 1360 struct ctl_softc *softc = lun->ctl_softc; 1361 int i; 1362 1363 mtx_assert(&lun->lun_lock, MA_OWNED); 1364 for (i = softc->port_min; i < softc->port_max; i++) 1365 ctl_est_ua_port(lun, i, except, ua); 1366 } 1367 1368 void 1369 ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 1370 { 1371 struct ctl_softc *softc = lun->ctl_softc; 1372 ctl_ua_type *pu; 1373 1374 if (initidx < softc->init_min || initidx >= softc->init_max) 1375 return; 1376 mtx_assert(&lun->lun_lock, MA_OWNED); 1377 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 1378 if (pu == NULL) 1379 return; 1380 pu[initidx % CTL_MAX_INIT_PER_PORT] &= ~ua; 1381 } 1382 1383 void 1384 ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 1385 { 1386 struct ctl_softc *softc = lun->ctl_softc; 1387 int i, j; 1388 1389 mtx_assert(&lun->lun_lock, MA_OWNED); 1390 for (i = softc->port_min; i < softc->port_max; i++) { 1391 if (lun->pending_ua[i] == NULL) 1392 continue; 1393 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 1394 if (i * CTL_MAX_INIT_PER_PORT + j == except) 1395 continue; 1396 lun->pending_ua[i][j] &= ~ua; 1397 } 1398 } 1399 } 1400 1401 void 1402 ctl_clr_ua_allluns(struct ctl_softc *ctl_softc, uint32_t initidx, 1403 ctl_ua_type ua_type) 1404 { 1405 struct ctl_lun *lun; 1406 1407 mtx_assert(&ctl_softc->ctl_lock, MA_OWNED); 1408 STAILQ_FOREACH(lun, &ctl_softc->lun_list, links) { 1409 mtx_lock(&lun->lun_lock); 1410 ctl_clr_ua(lun, initidx, ua_type); 1411 mtx_unlock(&lun->lun_lock); 1412 } 1413 } 1414 1415 static int 1416 ctl_ha_role_sysctl(SYSCTL_HANDLER_ARGS) 1417 { 1418 struct ctl_softc *softc = (struct ctl_softc *)arg1; 1419 struct ctl_lun *lun; 1420 struct ctl_lun_req ireq; 1421 int error, value; 1422 1423 value = (softc->flags & CTL_FLAG_ACTIVE_SHELF) ? 0 : 1; 1424 error = sysctl_handle_int(oidp, &value, 0, req); 1425 if ((error != 0) || (req->newptr == NULL)) 1426 return (error); 1427 1428 mtx_lock(&softc->ctl_lock); 1429 if (value == 0) 1430 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1431 else 1432 softc->flags &= ~CTL_FLAG_ACTIVE_SHELF; 1433 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1434 mtx_unlock(&softc->ctl_lock); 1435 bzero(&ireq, sizeof(ireq)); 1436 ireq.reqtype = CTL_LUNREQ_MODIFY; 1437 ireq.reqdata.modify.lun_id = lun->lun; 1438 lun->backend->ioctl(NULL, CTL_LUN_REQ, (caddr_t)&ireq, 0, 1439 curthread); 1440 if (ireq.status != CTL_LUN_OK) { 1441 printf("%s: CTL_LUNREQ_MODIFY returned %d '%s'\n", 1442 __func__, ireq.status, ireq.error_str); 1443 } 1444 mtx_lock(&softc->ctl_lock); 1445 } 1446 mtx_unlock(&softc->ctl_lock); 1447 return (0); 1448 } 1449 1450 static int 1451 ctl_init(void) 1452 { 1453 struct ctl_softc *softc; 1454 void *other_pool; 1455 int i, error, retval; 1456 1457 retval = 0; 1458 control_softc = malloc(sizeof(*control_softc), M_DEVBUF, 1459 M_WAITOK | M_ZERO); 1460 softc = control_softc; 1461 1462 softc->dev = make_dev(&ctl_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, 1463 "cam/ctl"); 1464 1465 softc->dev->si_drv1 = softc; 1466 1467 sysctl_ctx_init(&softc->sysctl_ctx); 1468 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, 1469 SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl", 1470 CTLFLAG_RD, 0, "CAM Target Layer"); 1471 1472 if (softc->sysctl_tree == NULL) { 1473 printf("%s: unable to allocate sysctl tree\n", __func__); 1474 destroy_dev(softc->dev); 1475 free(control_softc, M_DEVBUF); 1476 control_softc = NULL; 1477 return (ENOMEM); 1478 } 1479 1480 mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF); 1481 softc->io_zone = uma_zcreate("CTL IO", sizeof(union ctl_io), 1482 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 1483 softc->open_count = 0; 1484 1485 /* 1486 * Default to actually sending a SYNCHRONIZE CACHE command down to 1487 * the drive. 1488 */ 1489 softc->flags = CTL_FLAG_REAL_SYNC; 1490 1491 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1492 OID_AUTO, "ha_mode", CTLFLAG_RDTUN, (int *)&softc->ha_mode, 0, 1493 "HA mode (0 - act/stby, 1 - serialize only, 2 - xfer)"); 1494 1495 /* 1496 * In Copan's HA scheme, the "master" and "slave" roles are 1497 * figured out through the slot the controller is in. Although it 1498 * is an active/active system, someone has to be in charge. 1499 */ 1500 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1501 OID_AUTO, "ha_id", CTLFLAG_RDTUN, &softc->ha_id, 0, 1502 "HA head ID (0 - no HA)"); 1503 if (softc->ha_id == 0 || softc->ha_id > NUM_TARGET_PORT_GROUPS) { 1504 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1505 softc->is_single = 1; 1506 softc->port_cnt = CTL_MAX_PORTS; 1507 softc->port_min = 0; 1508 } else { 1509 softc->port_cnt = CTL_MAX_PORTS / NUM_TARGET_PORT_GROUPS; 1510 softc->port_min = (softc->ha_id - 1) * softc->port_cnt; 1511 } 1512 softc->port_max = softc->port_min + softc->port_cnt; 1513 softc->init_min = softc->port_min * CTL_MAX_INIT_PER_PORT; 1514 softc->init_max = softc->port_max * CTL_MAX_INIT_PER_PORT; 1515 1516 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1517 OID_AUTO, "ha_link", CTLFLAG_RD, (int *)&softc->ha_link, 0, 1518 "HA link state (0 - offline, 1 - unknown, 2 - online)"); 1519 1520 STAILQ_INIT(&softc->lun_list); 1521 STAILQ_INIT(&softc->pending_lun_queue); 1522 STAILQ_INIT(&softc->fe_list); 1523 STAILQ_INIT(&softc->port_list); 1524 STAILQ_INIT(&softc->be_list); 1525 ctl_tpc_init(softc); 1526 1527 if (ctl_pool_create(softc, "othersc", CTL_POOL_ENTRIES_OTHER_SC, 1528 &other_pool) != 0) 1529 { 1530 printf("ctl: can't allocate %d entry other SC pool, " 1531 "exiting\n", CTL_POOL_ENTRIES_OTHER_SC); 1532 return (ENOMEM); 1533 } 1534 softc->othersc_pool = other_pool; 1535 1536 if (worker_threads <= 0) 1537 worker_threads = max(1, mp_ncpus / 4); 1538 if (worker_threads > CTL_MAX_THREADS) 1539 worker_threads = CTL_MAX_THREADS; 1540 1541 for (i = 0; i < worker_threads; i++) { 1542 struct ctl_thread *thr = &softc->threads[i]; 1543 1544 mtx_init(&thr->queue_lock, "CTL queue mutex", NULL, MTX_DEF); 1545 thr->ctl_softc = softc; 1546 STAILQ_INIT(&thr->incoming_queue); 1547 STAILQ_INIT(&thr->rtr_queue); 1548 STAILQ_INIT(&thr->done_queue); 1549 STAILQ_INIT(&thr->isc_queue); 1550 1551 error = kproc_kthread_add(ctl_work_thread, thr, 1552 &softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i); 1553 if (error != 0) { 1554 printf("error creating CTL work thread!\n"); 1555 ctl_pool_free(other_pool); 1556 return (error); 1557 } 1558 } 1559 error = kproc_kthread_add(ctl_lun_thread, softc, 1560 &softc->ctl_proc, NULL, 0, 0, "ctl", "lun"); 1561 if (error != 0) { 1562 printf("error creating CTL lun thread!\n"); 1563 ctl_pool_free(other_pool); 1564 return (error); 1565 } 1566 error = kproc_kthread_add(ctl_thresh_thread, softc, 1567 &softc->ctl_proc, NULL, 0, 0, "ctl", "thresh"); 1568 if (error != 0) { 1569 printf("error creating CTL threshold thread!\n"); 1570 ctl_pool_free(other_pool); 1571 return (error); 1572 } 1573 1574 SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree), 1575 OID_AUTO, "ha_role", CTLTYPE_INT | CTLFLAG_RWTUN, 1576 softc, 0, ctl_ha_role_sysctl, "I", "HA role for this head"); 1577 1578 if (softc->is_single == 0) { 1579 ctl_frontend_register(&ha_frontend); 1580 if (ctl_ha_msg_init(softc) != CTL_HA_STATUS_SUCCESS) { 1581 printf("ctl_init: ctl_ha_msg_init failed.\n"); 1582 softc->is_single = 1; 1583 } else 1584 if (ctl_ha_msg_register(CTL_HA_CHAN_CTL, ctl_isc_event_handler) 1585 != CTL_HA_STATUS_SUCCESS) { 1586 printf("ctl_init: ctl_ha_msg_register failed.\n"); 1587 softc->is_single = 1; 1588 } 1589 } 1590 return (0); 1591 } 1592 1593 void 1594 ctl_shutdown(void) 1595 { 1596 struct ctl_softc *softc; 1597 struct ctl_lun *lun, *next_lun; 1598 1599 softc = (struct ctl_softc *)control_softc; 1600 1601 if (softc->is_single == 0) { 1602 ctl_ha_msg_shutdown(softc); 1603 if (ctl_ha_msg_deregister(CTL_HA_CHAN_CTL) 1604 != CTL_HA_STATUS_SUCCESS) 1605 printf("%s: ctl_ha_msg_deregister failed.\n", __func__); 1606 if (ctl_ha_msg_destroy(softc) != CTL_HA_STATUS_SUCCESS) 1607 printf("%s: ctl_ha_msg_destroy failed.\n", __func__); 1608 ctl_frontend_deregister(&ha_frontend); 1609 } 1610 1611 mtx_lock(&softc->ctl_lock); 1612 1613 /* 1614 * Free up each LUN. 1615 */ 1616 for (lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; lun = next_lun){ 1617 next_lun = STAILQ_NEXT(lun, links); 1618 ctl_free_lun(lun); 1619 } 1620 1621 mtx_unlock(&softc->ctl_lock); 1622 1623 #if 0 1624 ctl_shutdown_thread(softc->work_thread); 1625 mtx_destroy(&softc->queue_lock); 1626 #endif 1627 1628 ctl_tpc_shutdown(softc); 1629 uma_zdestroy(softc->io_zone); 1630 mtx_destroy(&softc->ctl_lock); 1631 1632 destroy_dev(softc->dev); 1633 1634 sysctl_ctx_free(&softc->sysctl_ctx); 1635 1636 free(control_softc, M_DEVBUF); 1637 control_softc = NULL; 1638 } 1639 1640 static int 1641 ctl_module_event_handler(module_t mod, int what, void *arg) 1642 { 1643 1644 switch (what) { 1645 case MOD_LOAD: 1646 return (ctl_init()); 1647 case MOD_UNLOAD: 1648 return (EBUSY); 1649 default: 1650 return (EOPNOTSUPP); 1651 } 1652 } 1653 1654 /* 1655 * XXX KDM should we do some access checks here? Bump a reference count to 1656 * prevent a CTL module from being unloaded while someone has it open? 1657 */ 1658 static int 1659 ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td) 1660 { 1661 return (0); 1662 } 1663 1664 static int 1665 ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td) 1666 { 1667 return (0); 1668 } 1669 1670 /* 1671 * Remove an initiator by port number and initiator ID. 1672 * Returns 0 for success, -1 for failure. 1673 */ 1674 int 1675 ctl_remove_initiator(struct ctl_port *port, int iid) 1676 { 1677 struct ctl_softc *softc = control_softc; 1678 1679 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 1680 1681 if (iid > CTL_MAX_INIT_PER_PORT) { 1682 printf("%s: initiator ID %u > maximun %u!\n", 1683 __func__, iid, CTL_MAX_INIT_PER_PORT); 1684 return (-1); 1685 } 1686 1687 mtx_lock(&softc->ctl_lock); 1688 port->wwpn_iid[iid].in_use--; 1689 port->wwpn_iid[iid].last_use = time_uptime; 1690 mtx_unlock(&softc->ctl_lock); 1691 ctl_isc_announce_iid(port, iid); 1692 1693 return (0); 1694 } 1695 1696 /* 1697 * Add an initiator to the initiator map. 1698 * Returns iid for success, < 0 for failure. 1699 */ 1700 int 1701 ctl_add_initiator(struct ctl_port *port, int iid, uint64_t wwpn, char *name) 1702 { 1703 struct ctl_softc *softc = control_softc; 1704 time_t best_time; 1705 int i, best; 1706 1707 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 1708 1709 if (iid >= CTL_MAX_INIT_PER_PORT) { 1710 printf("%s: WWPN %#jx initiator ID %u > maximum %u!\n", 1711 __func__, wwpn, iid, CTL_MAX_INIT_PER_PORT); 1712 free(name, M_CTL); 1713 return (-1); 1714 } 1715 1716 mtx_lock(&softc->ctl_lock); 1717 1718 if (iid < 0 && (wwpn != 0 || name != NULL)) { 1719 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1720 if (wwpn != 0 && wwpn == port->wwpn_iid[i].wwpn) { 1721 iid = i; 1722 break; 1723 } 1724 if (name != NULL && port->wwpn_iid[i].name != NULL && 1725 strcmp(name, port->wwpn_iid[i].name) == 0) { 1726 iid = i; 1727 break; 1728 } 1729 } 1730 } 1731 1732 if (iid < 0) { 1733 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1734 if (port->wwpn_iid[i].in_use == 0 && 1735 port->wwpn_iid[i].wwpn == 0 && 1736 port->wwpn_iid[i].name == NULL) { 1737 iid = i; 1738 break; 1739 } 1740 } 1741 } 1742 1743 if (iid < 0) { 1744 best = -1; 1745 best_time = INT32_MAX; 1746 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1747 if (port->wwpn_iid[i].in_use == 0) { 1748 if (port->wwpn_iid[i].last_use < best_time) { 1749 best = i; 1750 best_time = port->wwpn_iid[i].last_use; 1751 } 1752 } 1753 } 1754 iid = best; 1755 } 1756 1757 if (iid < 0) { 1758 mtx_unlock(&softc->ctl_lock); 1759 free(name, M_CTL); 1760 return (-2); 1761 } 1762 1763 if (port->wwpn_iid[iid].in_use > 0 && (wwpn != 0 || name != NULL)) { 1764 /* 1765 * This is not an error yet. 1766 */ 1767 if (wwpn != 0 && wwpn == port->wwpn_iid[iid].wwpn) { 1768 #if 0 1769 printf("%s: port %d iid %u WWPN %#jx arrived" 1770 " again\n", __func__, port->targ_port, 1771 iid, (uintmax_t)wwpn); 1772 #endif 1773 goto take; 1774 } 1775 if (name != NULL && port->wwpn_iid[iid].name != NULL && 1776 strcmp(name, port->wwpn_iid[iid].name) == 0) { 1777 #if 0 1778 printf("%s: port %d iid %u name '%s' arrived" 1779 " again\n", __func__, port->targ_port, 1780 iid, name); 1781 #endif 1782 goto take; 1783 } 1784 1785 /* 1786 * This is an error, but what do we do about it? The 1787 * driver is telling us we have a new WWPN for this 1788 * initiator ID, so we pretty much need to use it. 1789 */ 1790 printf("%s: port %d iid %u WWPN %#jx '%s' arrived," 1791 " but WWPN %#jx '%s' is still at that address\n", 1792 __func__, port->targ_port, iid, wwpn, name, 1793 (uintmax_t)port->wwpn_iid[iid].wwpn, 1794 port->wwpn_iid[iid].name); 1795 1796 /* 1797 * XXX KDM clear have_ca and ua_pending on each LUN for 1798 * this initiator. 1799 */ 1800 } 1801 take: 1802 free(port->wwpn_iid[iid].name, M_CTL); 1803 port->wwpn_iid[iid].name = name; 1804 port->wwpn_iid[iid].wwpn = wwpn; 1805 port->wwpn_iid[iid].in_use++; 1806 mtx_unlock(&softc->ctl_lock); 1807 ctl_isc_announce_iid(port, iid); 1808 1809 return (iid); 1810 } 1811 1812 static int 1813 ctl_create_iid(struct ctl_port *port, int iid, uint8_t *buf) 1814 { 1815 int len; 1816 1817 switch (port->port_type) { 1818 case CTL_PORT_FC: 1819 { 1820 struct scsi_transportid_fcp *id = 1821 (struct scsi_transportid_fcp *)buf; 1822 if (port->wwpn_iid[iid].wwpn == 0) 1823 return (0); 1824 memset(id, 0, sizeof(*id)); 1825 id->format_protocol = SCSI_PROTO_FC; 1826 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->n_port_name); 1827 return (sizeof(*id)); 1828 } 1829 case CTL_PORT_ISCSI: 1830 { 1831 struct scsi_transportid_iscsi_port *id = 1832 (struct scsi_transportid_iscsi_port *)buf; 1833 if (port->wwpn_iid[iid].name == NULL) 1834 return (0); 1835 memset(id, 0, 256); 1836 id->format_protocol = SCSI_TRN_ISCSI_FORMAT_PORT | 1837 SCSI_PROTO_ISCSI; 1838 len = strlcpy(id->iscsi_name, port->wwpn_iid[iid].name, 252) + 1; 1839 len = roundup2(min(len, 252), 4); 1840 scsi_ulto2b(len, id->additional_length); 1841 return (sizeof(*id) + len); 1842 } 1843 case CTL_PORT_SAS: 1844 { 1845 struct scsi_transportid_sas *id = 1846 (struct scsi_transportid_sas *)buf; 1847 if (port->wwpn_iid[iid].wwpn == 0) 1848 return (0); 1849 memset(id, 0, sizeof(*id)); 1850 id->format_protocol = SCSI_PROTO_SAS; 1851 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->sas_address); 1852 return (sizeof(*id)); 1853 } 1854 default: 1855 { 1856 struct scsi_transportid_spi *id = 1857 (struct scsi_transportid_spi *)buf; 1858 memset(id, 0, sizeof(*id)); 1859 id->format_protocol = SCSI_PROTO_SPI; 1860 scsi_ulto2b(iid, id->scsi_addr); 1861 scsi_ulto2b(port->targ_port, id->rel_trgt_port_id); 1862 return (sizeof(*id)); 1863 } 1864 } 1865 } 1866 1867 /* 1868 * Serialize a command that went down the "wrong" side, and so was sent to 1869 * this controller for execution. The logic is a little different than the 1870 * standard case in ctl_scsiio_precheck(). Errors in this case need to get 1871 * sent back to the other side, but in the success case, we execute the 1872 * command on this side (XFER mode) or tell the other side to execute it 1873 * (SER_ONLY mode). 1874 */ 1875 static int 1876 ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio) 1877 { 1878 struct ctl_softc *softc; 1879 union ctl_ha_msg msg_info; 1880 struct ctl_lun *lun; 1881 const struct ctl_cmd_entry *entry; 1882 int retval = 0; 1883 uint32_t targ_lun; 1884 1885 softc = control_softc; 1886 1887 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 1888 mtx_lock(&softc->ctl_lock); 1889 if ((targ_lun < CTL_MAX_LUNS) && 1890 ((lun = softc->ctl_luns[targ_lun]) != NULL)) { 1891 mtx_lock(&lun->lun_lock); 1892 mtx_unlock(&softc->ctl_lock); 1893 /* 1894 * If the LUN is invalid, pretend that it doesn't exist. 1895 * It will go away as soon as all pending I/O has been 1896 * completed. 1897 */ 1898 if (lun->flags & CTL_LUN_DISABLED) { 1899 mtx_unlock(&lun->lun_lock); 1900 lun = NULL; 1901 } 1902 } else { 1903 mtx_unlock(&softc->ctl_lock); 1904 lun = NULL; 1905 } 1906 if (lun == NULL) { 1907 /* 1908 * The other node would not send this request to us unless 1909 * received announce that we are primary node for this LUN. 1910 * If this LUN does not exist now, it is probably result of 1911 * a race, so respond to initiator in the most opaque way. 1912 */ 1913 ctl_set_busy(ctsio); 1914 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); 1915 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1916 msg_info.hdr.serializing_sc = NULL; 1917 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1918 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1919 sizeof(msg_info.scsi), M_WAITOK); 1920 return(1); 1921 } 1922 1923 entry = ctl_get_cmd_entry(ctsio, NULL); 1924 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { 1925 mtx_unlock(&lun->lun_lock); 1926 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); 1927 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1928 msg_info.hdr.serializing_sc = NULL; 1929 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1930 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1931 sizeof(msg_info.scsi), M_WAITOK); 1932 return(1); 1933 } 1934 1935 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun; 1936 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = lun->be_lun; 1937 1938 /* 1939 * Every I/O goes into the OOA queue for a 1940 * particular LUN, and stays there until completion. 1941 */ 1942 #ifdef CTL_TIME_IO 1943 if (TAILQ_EMPTY(&lun->ooa_queue)) 1944 lun->idle_time += getsbinuptime() - lun->last_busy; 1945 #endif 1946 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1947 1948 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 1949 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq, 1950 ooa_links))) { 1951 case CTL_ACTION_BLOCK: 1952 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 1953 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 1954 blocked_links); 1955 mtx_unlock(&lun->lun_lock); 1956 break; 1957 case CTL_ACTION_PASS: 1958 case CTL_ACTION_SKIP: 1959 if (softc->ha_mode == CTL_HA_MODE_XFER) { 1960 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 1961 ctl_enqueue_rtr((union ctl_io *)ctsio); 1962 mtx_unlock(&lun->lun_lock); 1963 } else { 1964 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 1965 mtx_unlock(&lun->lun_lock); 1966 1967 /* send msg back to other side */ 1968 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1969 msg_info.hdr.serializing_sc = (union ctl_io *)ctsio; 1970 msg_info.hdr.msg_type = CTL_MSG_R2R; 1971 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1972 sizeof(msg_info.hdr), M_WAITOK); 1973 } 1974 break; 1975 case CTL_ACTION_OVERLAP: 1976 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1977 mtx_unlock(&lun->lun_lock); 1978 retval = 1; 1979 1980 ctl_set_overlapped_cmd(ctsio); 1981 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); 1982 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1983 msg_info.hdr.serializing_sc = NULL; 1984 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1985 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1986 sizeof(msg_info.scsi), M_WAITOK); 1987 break; 1988 case CTL_ACTION_OVERLAP_TAG: 1989 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1990 mtx_unlock(&lun->lun_lock); 1991 retval = 1; 1992 ctl_set_overlapped_tag(ctsio, ctsio->tag_num); 1993 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); 1994 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1995 msg_info.hdr.serializing_sc = NULL; 1996 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1997 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1998 sizeof(msg_info.scsi), M_WAITOK); 1999 break; 2000 case CTL_ACTION_ERROR: 2001 default: 2002 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2003 mtx_unlock(&lun->lun_lock); 2004 retval = 1; 2005 2006 ctl_set_internal_failure(ctsio, /*sks_valid*/ 0, 2007 /*retry_count*/ 0); 2008 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); 2009 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 2010 msg_info.hdr.serializing_sc = NULL; 2011 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 2012 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 2013 sizeof(msg_info.scsi), M_WAITOK); 2014 break; 2015 } 2016 return (retval); 2017 } 2018 2019 /* 2020 * Returns 0 for success, errno for failure. 2021 */ 2022 static int 2023 ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 2024 struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries) 2025 { 2026 union ctl_io *io; 2027 int retval; 2028 2029 retval = 0; 2030 2031 mtx_lock(&lun->lun_lock); 2032 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); (io != NULL); 2033 (*cur_fill_num)++, io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, 2034 ooa_links)) { 2035 struct ctl_ooa_entry *entry; 2036 2037 /* 2038 * If we've got more than we can fit, just count the 2039 * remaining entries. 2040 */ 2041 if (*cur_fill_num >= ooa_hdr->alloc_num) 2042 continue; 2043 2044 entry = &kern_entries[*cur_fill_num]; 2045 2046 entry->tag_num = io->scsiio.tag_num; 2047 entry->lun_num = lun->lun; 2048 #ifdef CTL_TIME_IO 2049 entry->start_bt = io->io_hdr.start_bt; 2050 #endif 2051 bcopy(io->scsiio.cdb, entry->cdb, io->scsiio.cdb_len); 2052 entry->cdb_len = io->scsiio.cdb_len; 2053 if (io->io_hdr.flags & CTL_FLAG_BLOCKED) 2054 entry->cmd_flags |= CTL_OOACMD_FLAG_BLOCKED; 2055 2056 if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) 2057 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA; 2058 2059 if (io->io_hdr.flags & CTL_FLAG_ABORT) 2060 entry->cmd_flags |= CTL_OOACMD_FLAG_ABORT; 2061 2062 if (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR) 2063 entry->cmd_flags |= CTL_OOACMD_FLAG_RTR; 2064 2065 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) 2066 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED; 2067 } 2068 mtx_unlock(&lun->lun_lock); 2069 2070 return (retval); 2071 } 2072 2073 static void * 2074 ctl_copyin_alloc(void *user_addr, int len, char *error_str, 2075 size_t error_str_len) 2076 { 2077 void *kptr; 2078 2079 kptr = malloc(len, M_CTL, M_WAITOK | M_ZERO); 2080 2081 if (copyin(user_addr, kptr, len) != 0) { 2082 snprintf(error_str, error_str_len, "Error copying %d bytes " 2083 "from user address %p to kernel address %p", len, 2084 user_addr, kptr); 2085 free(kptr, M_CTL); 2086 return (NULL); 2087 } 2088 2089 return (kptr); 2090 } 2091 2092 static void 2093 ctl_free_args(int num_args, struct ctl_be_arg *args) 2094 { 2095 int i; 2096 2097 if (args == NULL) 2098 return; 2099 2100 for (i = 0; i < num_args; i++) { 2101 free(args[i].kname, M_CTL); 2102 free(args[i].kvalue, M_CTL); 2103 } 2104 2105 free(args, M_CTL); 2106 } 2107 2108 static struct ctl_be_arg * 2109 ctl_copyin_args(int num_args, struct ctl_be_arg *uargs, 2110 char *error_str, size_t error_str_len) 2111 { 2112 struct ctl_be_arg *args; 2113 int i; 2114 2115 args = ctl_copyin_alloc(uargs, num_args * sizeof(*args), 2116 error_str, error_str_len); 2117 2118 if (args == NULL) 2119 goto bailout; 2120 2121 for (i = 0; i < num_args; i++) { 2122 args[i].kname = NULL; 2123 args[i].kvalue = NULL; 2124 } 2125 2126 for (i = 0; i < num_args; i++) { 2127 uint8_t *tmpptr; 2128 2129 args[i].kname = ctl_copyin_alloc(args[i].name, 2130 args[i].namelen, error_str, error_str_len); 2131 if (args[i].kname == NULL) 2132 goto bailout; 2133 2134 if (args[i].kname[args[i].namelen - 1] != '\0') { 2135 snprintf(error_str, error_str_len, "Argument %d " 2136 "name is not NUL-terminated", i); 2137 goto bailout; 2138 } 2139 2140 if (args[i].flags & CTL_BEARG_RD) { 2141 tmpptr = ctl_copyin_alloc(args[i].value, 2142 args[i].vallen, error_str, error_str_len); 2143 if (tmpptr == NULL) 2144 goto bailout; 2145 if ((args[i].flags & CTL_BEARG_ASCII) 2146 && (tmpptr[args[i].vallen - 1] != '\0')) { 2147 snprintf(error_str, error_str_len, "Argument " 2148 "%d value is not NUL-terminated", i); 2149 goto bailout; 2150 } 2151 args[i].kvalue = tmpptr; 2152 } else { 2153 args[i].kvalue = malloc(args[i].vallen, 2154 M_CTL, M_WAITOK | M_ZERO); 2155 } 2156 } 2157 2158 return (args); 2159 bailout: 2160 2161 ctl_free_args(num_args, args); 2162 2163 return (NULL); 2164 } 2165 2166 static void 2167 ctl_copyout_args(int num_args, struct ctl_be_arg *args) 2168 { 2169 int i; 2170 2171 for (i = 0; i < num_args; i++) { 2172 if (args[i].flags & CTL_BEARG_WR) 2173 copyout(args[i].kvalue, args[i].value, args[i].vallen); 2174 } 2175 } 2176 2177 /* 2178 * Escape characters that are illegal or not recommended in XML. 2179 */ 2180 int 2181 ctl_sbuf_printf_esc(struct sbuf *sb, char *str, int size) 2182 { 2183 char *end = str + size; 2184 int retval; 2185 2186 retval = 0; 2187 2188 for (; *str && str < end; str++) { 2189 switch (*str) { 2190 case '&': 2191 retval = sbuf_printf(sb, "&"); 2192 break; 2193 case '>': 2194 retval = sbuf_printf(sb, ">"); 2195 break; 2196 case '<': 2197 retval = sbuf_printf(sb, "<"); 2198 break; 2199 default: 2200 retval = sbuf_putc(sb, *str); 2201 break; 2202 } 2203 2204 if (retval != 0) 2205 break; 2206 2207 } 2208 2209 return (retval); 2210 } 2211 2212 static void 2213 ctl_id_sbuf(struct ctl_devid *id, struct sbuf *sb) 2214 { 2215 struct scsi_vpd_id_descriptor *desc; 2216 int i; 2217 2218 if (id == NULL || id->len < 4) 2219 return; 2220 desc = (struct scsi_vpd_id_descriptor *)id->data; 2221 switch (desc->id_type & SVPD_ID_TYPE_MASK) { 2222 case SVPD_ID_TYPE_T10: 2223 sbuf_printf(sb, "t10."); 2224 break; 2225 case SVPD_ID_TYPE_EUI64: 2226 sbuf_printf(sb, "eui."); 2227 break; 2228 case SVPD_ID_TYPE_NAA: 2229 sbuf_printf(sb, "naa."); 2230 break; 2231 case SVPD_ID_TYPE_SCSI_NAME: 2232 break; 2233 } 2234 switch (desc->proto_codeset & SVPD_ID_CODESET_MASK) { 2235 case SVPD_ID_CODESET_BINARY: 2236 for (i = 0; i < desc->length; i++) 2237 sbuf_printf(sb, "%02x", desc->identifier[i]); 2238 break; 2239 case SVPD_ID_CODESET_ASCII: 2240 sbuf_printf(sb, "%.*s", (int)desc->length, 2241 (char *)desc->identifier); 2242 break; 2243 case SVPD_ID_CODESET_UTF8: 2244 sbuf_printf(sb, "%s", (char *)desc->identifier); 2245 break; 2246 } 2247 } 2248 2249 static int 2250 ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 2251 struct thread *td) 2252 { 2253 struct ctl_softc *softc; 2254 struct ctl_lun *lun; 2255 int retval; 2256 2257 softc = control_softc; 2258 2259 retval = 0; 2260 2261 switch (cmd) { 2262 case CTL_IO: 2263 retval = ctl_ioctl_io(dev, cmd, addr, flag, td); 2264 break; 2265 case CTL_ENABLE_PORT: 2266 case CTL_DISABLE_PORT: 2267 case CTL_SET_PORT_WWNS: { 2268 struct ctl_port *port; 2269 struct ctl_port_entry *entry; 2270 2271 entry = (struct ctl_port_entry *)addr; 2272 2273 mtx_lock(&softc->ctl_lock); 2274 STAILQ_FOREACH(port, &softc->port_list, links) { 2275 int action, done; 2276 2277 if (port->targ_port < softc->port_min || 2278 port->targ_port >= softc->port_max) 2279 continue; 2280 2281 action = 0; 2282 done = 0; 2283 if ((entry->port_type == CTL_PORT_NONE) 2284 && (entry->targ_port == port->targ_port)) { 2285 /* 2286 * If the user only wants to enable or 2287 * disable or set WWNs on a specific port, 2288 * do the operation and we're done. 2289 */ 2290 action = 1; 2291 done = 1; 2292 } else if (entry->port_type & port->port_type) { 2293 /* 2294 * Compare the user's type mask with the 2295 * particular frontend type to see if we 2296 * have a match. 2297 */ 2298 action = 1; 2299 done = 0; 2300 2301 /* 2302 * Make sure the user isn't trying to set 2303 * WWNs on multiple ports at the same time. 2304 */ 2305 if (cmd == CTL_SET_PORT_WWNS) { 2306 printf("%s: Can't set WWNs on " 2307 "multiple ports\n", __func__); 2308 retval = EINVAL; 2309 break; 2310 } 2311 } 2312 if (action == 0) 2313 continue; 2314 2315 /* 2316 * XXX KDM we have to drop the lock here, because 2317 * the online/offline operations can potentially 2318 * block. We need to reference count the frontends 2319 * so they can't go away, 2320 */ 2321 if (cmd == CTL_ENABLE_PORT) { 2322 mtx_unlock(&softc->ctl_lock); 2323 ctl_port_online(port); 2324 mtx_lock(&softc->ctl_lock); 2325 } else if (cmd == CTL_DISABLE_PORT) { 2326 mtx_unlock(&softc->ctl_lock); 2327 ctl_port_offline(port); 2328 mtx_lock(&softc->ctl_lock); 2329 } else if (cmd == CTL_SET_PORT_WWNS) { 2330 ctl_port_set_wwns(port, 2331 (entry->flags & CTL_PORT_WWNN_VALID) ? 2332 1 : 0, entry->wwnn, 2333 (entry->flags & CTL_PORT_WWPN_VALID) ? 2334 1 : 0, entry->wwpn); 2335 } 2336 if (done != 0) 2337 break; 2338 } 2339 mtx_unlock(&softc->ctl_lock); 2340 break; 2341 } 2342 case CTL_GET_PORT_LIST: { 2343 struct ctl_port *port; 2344 struct ctl_port_list *list; 2345 int i; 2346 2347 list = (struct ctl_port_list *)addr; 2348 2349 if (list->alloc_len != (list->alloc_num * 2350 sizeof(struct ctl_port_entry))) { 2351 printf("%s: CTL_GET_PORT_LIST: alloc_len %u != " 2352 "alloc_num %u * sizeof(struct ctl_port_entry) " 2353 "%zu\n", __func__, list->alloc_len, 2354 list->alloc_num, sizeof(struct ctl_port_entry)); 2355 retval = EINVAL; 2356 break; 2357 } 2358 list->fill_len = 0; 2359 list->fill_num = 0; 2360 list->dropped_num = 0; 2361 i = 0; 2362 mtx_lock(&softc->ctl_lock); 2363 STAILQ_FOREACH(port, &softc->port_list, links) { 2364 struct ctl_port_entry entry, *list_entry; 2365 2366 if (list->fill_num >= list->alloc_num) { 2367 list->dropped_num++; 2368 continue; 2369 } 2370 2371 entry.port_type = port->port_type; 2372 strlcpy(entry.port_name, port->port_name, 2373 sizeof(entry.port_name)); 2374 entry.targ_port = port->targ_port; 2375 entry.physical_port = port->physical_port; 2376 entry.virtual_port = port->virtual_port; 2377 entry.wwnn = port->wwnn; 2378 entry.wwpn = port->wwpn; 2379 if (port->status & CTL_PORT_STATUS_ONLINE) 2380 entry.online = 1; 2381 else 2382 entry.online = 0; 2383 2384 list_entry = &list->entries[i]; 2385 2386 retval = copyout(&entry, list_entry, sizeof(entry)); 2387 if (retval != 0) { 2388 printf("%s: CTL_GET_PORT_LIST: copyout " 2389 "returned %d\n", __func__, retval); 2390 break; 2391 } 2392 i++; 2393 list->fill_num++; 2394 list->fill_len += sizeof(entry); 2395 } 2396 mtx_unlock(&softc->ctl_lock); 2397 2398 /* 2399 * If this is non-zero, we had a copyout fault, so there's 2400 * probably no point in attempting to set the status inside 2401 * the structure. 2402 */ 2403 if (retval != 0) 2404 break; 2405 2406 if (list->dropped_num > 0) 2407 list->status = CTL_PORT_LIST_NEED_MORE_SPACE; 2408 else 2409 list->status = CTL_PORT_LIST_OK; 2410 break; 2411 } 2412 case CTL_DUMP_OOA: { 2413 union ctl_io *io; 2414 char printbuf[128]; 2415 struct sbuf sb; 2416 2417 mtx_lock(&softc->ctl_lock); 2418 printf("Dumping OOA queues:\n"); 2419 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2420 mtx_lock(&lun->lun_lock); 2421 for (io = (union ctl_io *)TAILQ_FIRST( 2422 &lun->ooa_queue); io != NULL; 2423 io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, 2424 ooa_links)) { 2425 sbuf_new(&sb, printbuf, sizeof(printbuf), 2426 SBUF_FIXEDLEN); 2427 sbuf_printf(&sb, "LUN %jd tag 0x%04x%s%s%s%s: ", 2428 (intmax_t)lun->lun, 2429 io->scsiio.tag_num, 2430 (io->io_hdr.flags & 2431 CTL_FLAG_BLOCKED) ? "" : " BLOCKED", 2432 (io->io_hdr.flags & 2433 CTL_FLAG_DMA_INPROG) ? " DMA" : "", 2434 (io->io_hdr.flags & 2435 CTL_FLAG_ABORT) ? " ABORT" : "", 2436 (io->io_hdr.flags & 2437 CTL_FLAG_IS_WAS_ON_RTR) ? " RTR" : ""); 2438 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 2439 sbuf_finish(&sb); 2440 printf("%s\n", sbuf_data(&sb)); 2441 } 2442 mtx_unlock(&lun->lun_lock); 2443 } 2444 printf("OOA queues dump done\n"); 2445 mtx_unlock(&softc->ctl_lock); 2446 break; 2447 } 2448 case CTL_GET_OOA: { 2449 struct ctl_ooa *ooa_hdr; 2450 struct ctl_ooa_entry *entries; 2451 uint32_t cur_fill_num; 2452 2453 ooa_hdr = (struct ctl_ooa *)addr; 2454 2455 if ((ooa_hdr->alloc_len == 0) 2456 || (ooa_hdr->alloc_num == 0)) { 2457 printf("%s: CTL_GET_OOA: alloc len %u and alloc num %u " 2458 "must be non-zero\n", __func__, 2459 ooa_hdr->alloc_len, ooa_hdr->alloc_num); 2460 retval = EINVAL; 2461 break; 2462 } 2463 2464 if (ooa_hdr->alloc_len != (ooa_hdr->alloc_num * 2465 sizeof(struct ctl_ooa_entry))) { 2466 printf("%s: CTL_GET_OOA: alloc len %u must be alloc " 2467 "num %d * sizeof(struct ctl_ooa_entry) %zd\n", 2468 __func__, ooa_hdr->alloc_len, 2469 ooa_hdr->alloc_num,sizeof(struct ctl_ooa_entry)); 2470 retval = EINVAL; 2471 break; 2472 } 2473 2474 entries = malloc(ooa_hdr->alloc_len, M_CTL, M_WAITOK | M_ZERO); 2475 if (entries == NULL) { 2476 printf("%s: could not allocate %d bytes for OOA " 2477 "dump\n", __func__, ooa_hdr->alloc_len); 2478 retval = ENOMEM; 2479 break; 2480 } 2481 2482 mtx_lock(&softc->ctl_lock); 2483 if (((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0) 2484 && ((ooa_hdr->lun_num >= CTL_MAX_LUNS) 2485 || (softc->ctl_luns[ooa_hdr->lun_num] == NULL))) { 2486 mtx_unlock(&softc->ctl_lock); 2487 free(entries, M_CTL); 2488 printf("%s: CTL_GET_OOA: invalid LUN %ju\n", 2489 __func__, (uintmax_t)ooa_hdr->lun_num); 2490 retval = EINVAL; 2491 break; 2492 } 2493 2494 cur_fill_num = 0; 2495 2496 if (ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) { 2497 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2498 retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num, 2499 ooa_hdr, entries); 2500 if (retval != 0) 2501 break; 2502 } 2503 if (retval != 0) { 2504 mtx_unlock(&softc->ctl_lock); 2505 free(entries, M_CTL); 2506 break; 2507 } 2508 } else { 2509 lun = softc->ctl_luns[ooa_hdr->lun_num]; 2510 2511 retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num,ooa_hdr, 2512 entries); 2513 } 2514 mtx_unlock(&softc->ctl_lock); 2515 2516 ooa_hdr->fill_num = min(cur_fill_num, ooa_hdr->alloc_num); 2517 ooa_hdr->fill_len = ooa_hdr->fill_num * 2518 sizeof(struct ctl_ooa_entry); 2519 retval = copyout(entries, ooa_hdr->entries, ooa_hdr->fill_len); 2520 if (retval != 0) { 2521 printf("%s: error copying out %d bytes for OOA dump\n", 2522 __func__, ooa_hdr->fill_len); 2523 } 2524 2525 getbintime(&ooa_hdr->cur_bt); 2526 2527 if (cur_fill_num > ooa_hdr->alloc_num) { 2528 ooa_hdr->dropped_num = cur_fill_num -ooa_hdr->alloc_num; 2529 ooa_hdr->status = CTL_OOA_NEED_MORE_SPACE; 2530 } else { 2531 ooa_hdr->dropped_num = 0; 2532 ooa_hdr->status = CTL_OOA_OK; 2533 } 2534 2535 free(entries, M_CTL); 2536 break; 2537 } 2538 case CTL_CHECK_OOA: { 2539 union ctl_io *io; 2540 struct ctl_ooa_info *ooa_info; 2541 2542 2543 ooa_info = (struct ctl_ooa_info *)addr; 2544 2545 if (ooa_info->lun_id >= CTL_MAX_LUNS) { 2546 ooa_info->status = CTL_OOA_INVALID_LUN; 2547 break; 2548 } 2549 mtx_lock(&softc->ctl_lock); 2550 lun = softc->ctl_luns[ooa_info->lun_id]; 2551 if (lun == NULL) { 2552 mtx_unlock(&softc->ctl_lock); 2553 ooa_info->status = CTL_OOA_INVALID_LUN; 2554 break; 2555 } 2556 mtx_lock(&lun->lun_lock); 2557 mtx_unlock(&softc->ctl_lock); 2558 ooa_info->num_entries = 0; 2559 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); 2560 io != NULL; io = (union ctl_io *)TAILQ_NEXT( 2561 &io->io_hdr, ooa_links)) { 2562 ooa_info->num_entries++; 2563 } 2564 mtx_unlock(&lun->lun_lock); 2565 2566 ooa_info->status = CTL_OOA_SUCCESS; 2567 2568 break; 2569 } 2570 case CTL_DELAY_IO: { 2571 struct ctl_io_delay_info *delay_info; 2572 2573 delay_info = (struct ctl_io_delay_info *)addr; 2574 2575 #ifdef CTL_IO_DELAY 2576 mtx_lock(&softc->ctl_lock); 2577 2578 if ((delay_info->lun_id >= CTL_MAX_LUNS) 2579 || (softc->ctl_luns[delay_info->lun_id] == NULL)) { 2580 delay_info->status = CTL_DELAY_STATUS_INVALID_LUN; 2581 } else { 2582 lun = softc->ctl_luns[delay_info->lun_id]; 2583 mtx_lock(&lun->lun_lock); 2584 2585 delay_info->status = CTL_DELAY_STATUS_OK; 2586 2587 switch (delay_info->delay_type) { 2588 case CTL_DELAY_TYPE_CONT: 2589 break; 2590 case CTL_DELAY_TYPE_ONESHOT: 2591 break; 2592 default: 2593 delay_info->status = 2594 CTL_DELAY_STATUS_INVALID_TYPE; 2595 break; 2596 } 2597 2598 switch (delay_info->delay_loc) { 2599 case CTL_DELAY_LOC_DATAMOVE: 2600 lun->delay_info.datamove_type = 2601 delay_info->delay_type; 2602 lun->delay_info.datamove_delay = 2603 delay_info->delay_secs; 2604 break; 2605 case CTL_DELAY_LOC_DONE: 2606 lun->delay_info.done_type = 2607 delay_info->delay_type; 2608 lun->delay_info.done_delay = 2609 delay_info->delay_secs; 2610 break; 2611 default: 2612 delay_info->status = 2613 CTL_DELAY_STATUS_INVALID_LOC; 2614 break; 2615 } 2616 mtx_unlock(&lun->lun_lock); 2617 } 2618 2619 mtx_unlock(&softc->ctl_lock); 2620 #else 2621 delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED; 2622 #endif /* CTL_IO_DELAY */ 2623 break; 2624 } 2625 case CTL_REALSYNC_SET: { 2626 int *syncstate; 2627 2628 syncstate = (int *)addr; 2629 2630 mtx_lock(&softc->ctl_lock); 2631 switch (*syncstate) { 2632 case 0: 2633 softc->flags &= ~CTL_FLAG_REAL_SYNC; 2634 break; 2635 case 1: 2636 softc->flags |= CTL_FLAG_REAL_SYNC; 2637 break; 2638 default: 2639 retval = EINVAL; 2640 break; 2641 } 2642 mtx_unlock(&softc->ctl_lock); 2643 break; 2644 } 2645 case CTL_REALSYNC_GET: { 2646 int *syncstate; 2647 2648 syncstate = (int*)addr; 2649 2650 mtx_lock(&softc->ctl_lock); 2651 if (softc->flags & CTL_FLAG_REAL_SYNC) 2652 *syncstate = 1; 2653 else 2654 *syncstate = 0; 2655 mtx_unlock(&softc->ctl_lock); 2656 2657 break; 2658 } 2659 case CTL_SETSYNC: 2660 case CTL_GETSYNC: { 2661 struct ctl_sync_info *sync_info; 2662 2663 sync_info = (struct ctl_sync_info *)addr; 2664 2665 mtx_lock(&softc->ctl_lock); 2666 lun = softc->ctl_luns[sync_info->lun_id]; 2667 if (lun == NULL) { 2668 mtx_unlock(&softc->ctl_lock); 2669 sync_info->status = CTL_GS_SYNC_NO_LUN; 2670 break; 2671 } 2672 /* 2673 * Get or set the sync interval. We're not bounds checking 2674 * in the set case, hopefully the user won't do something 2675 * silly. 2676 */ 2677 mtx_lock(&lun->lun_lock); 2678 mtx_unlock(&softc->ctl_lock); 2679 if (cmd == CTL_GETSYNC) 2680 sync_info->sync_interval = lun->sync_interval; 2681 else 2682 lun->sync_interval = sync_info->sync_interval; 2683 mtx_unlock(&lun->lun_lock); 2684 2685 sync_info->status = CTL_GS_SYNC_OK; 2686 2687 break; 2688 } 2689 case CTL_GETSTATS: { 2690 struct ctl_stats *stats; 2691 int i; 2692 2693 stats = (struct ctl_stats *)addr; 2694 2695 if ((sizeof(struct ctl_lun_io_stats) * softc->num_luns) > 2696 stats->alloc_len) { 2697 stats->status = CTL_SS_NEED_MORE_SPACE; 2698 stats->num_luns = softc->num_luns; 2699 break; 2700 } 2701 /* 2702 * XXX KDM no locking here. If the LUN list changes, 2703 * things can blow up. 2704 */ 2705 for (i = 0, lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; 2706 i++, lun = STAILQ_NEXT(lun, links)) { 2707 retval = copyout(&lun->stats, &stats->lun_stats[i], 2708 sizeof(lun->stats)); 2709 if (retval != 0) 2710 break; 2711 } 2712 stats->num_luns = softc->num_luns; 2713 stats->fill_len = sizeof(struct ctl_lun_io_stats) * 2714 softc->num_luns; 2715 stats->status = CTL_SS_OK; 2716 #ifdef CTL_TIME_IO 2717 stats->flags = CTL_STATS_FLAG_TIME_VALID; 2718 #else 2719 stats->flags = CTL_STATS_FLAG_NONE; 2720 #endif 2721 getnanouptime(&stats->timestamp); 2722 break; 2723 } 2724 case CTL_ERROR_INJECT: { 2725 struct ctl_error_desc *err_desc, *new_err_desc; 2726 2727 err_desc = (struct ctl_error_desc *)addr; 2728 2729 new_err_desc = malloc(sizeof(*new_err_desc), M_CTL, 2730 M_WAITOK | M_ZERO); 2731 bcopy(err_desc, new_err_desc, sizeof(*new_err_desc)); 2732 2733 mtx_lock(&softc->ctl_lock); 2734 lun = softc->ctl_luns[err_desc->lun_id]; 2735 if (lun == NULL) { 2736 mtx_unlock(&softc->ctl_lock); 2737 free(new_err_desc, M_CTL); 2738 printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n", 2739 __func__, (uintmax_t)err_desc->lun_id); 2740 retval = EINVAL; 2741 break; 2742 } 2743 mtx_lock(&lun->lun_lock); 2744 mtx_unlock(&softc->ctl_lock); 2745 2746 /* 2747 * We could do some checking here to verify the validity 2748 * of the request, but given the complexity of error 2749 * injection requests, the checking logic would be fairly 2750 * complex. 2751 * 2752 * For now, if the request is invalid, it just won't get 2753 * executed and might get deleted. 2754 */ 2755 STAILQ_INSERT_TAIL(&lun->error_list, new_err_desc, links); 2756 2757 /* 2758 * XXX KDM check to make sure the serial number is unique, 2759 * in case we somehow manage to wrap. That shouldn't 2760 * happen for a very long time, but it's the right thing to 2761 * do. 2762 */ 2763 new_err_desc->serial = lun->error_serial; 2764 err_desc->serial = lun->error_serial; 2765 lun->error_serial++; 2766 2767 mtx_unlock(&lun->lun_lock); 2768 break; 2769 } 2770 case CTL_ERROR_INJECT_DELETE: { 2771 struct ctl_error_desc *delete_desc, *desc, *desc2; 2772 int delete_done; 2773 2774 delete_desc = (struct ctl_error_desc *)addr; 2775 delete_done = 0; 2776 2777 mtx_lock(&softc->ctl_lock); 2778 lun = softc->ctl_luns[delete_desc->lun_id]; 2779 if (lun == NULL) { 2780 mtx_unlock(&softc->ctl_lock); 2781 printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n", 2782 __func__, (uintmax_t)delete_desc->lun_id); 2783 retval = EINVAL; 2784 break; 2785 } 2786 mtx_lock(&lun->lun_lock); 2787 mtx_unlock(&softc->ctl_lock); 2788 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 2789 if (desc->serial != delete_desc->serial) 2790 continue; 2791 2792 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, 2793 links); 2794 free(desc, M_CTL); 2795 delete_done = 1; 2796 } 2797 mtx_unlock(&lun->lun_lock); 2798 if (delete_done == 0) { 2799 printf("%s: CTL_ERROR_INJECT_DELETE: can't find " 2800 "error serial %ju on LUN %u\n", __func__, 2801 delete_desc->serial, delete_desc->lun_id); 2802 retval = EINVAL; 2803 break; 2804 } 2805 break; 2806 } 2807 case CTL_DUMP_STRUCTS: { 2808 int i, j, k; 2809 struct ctl_port *port; 2810 struct ctl_frontend *fe; 2811 2812 mtx_lock(&softc->ctl_lock); 2813 printf("CTL Persistent Reservation information start:\n"); 2814 for (i = 0; i < CTL_MAX_LUNS; i++) { 2815 lun = softc->ctl_luns[i]; 2816 2817 if ((lun == NULL) 2818 || ((lun->flags & CTL_LUN_DISABLED) != 0)) 2819 continue; 2820 2821 for (j = 0; j < CTL_MAX_PORTS; j++) { 2822 if (lun->pr_keys[j] == NULL) 2823 continue; 2824 for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){ 2825 if (lun->pr_keys[j][k] == 0) 2826 continue; 2827 printf(" LUN %d port %d iid %d key " 2828 "%#jx\n", i, j, k, 2829 (uintmax_t)lun->pr_keys[j][k]); 2830 } 2831 } 2832 } 2833 printf("CTL Persistent Reservation information end\n"); 2834 printf("CTL Ports:\n"); 2835 STAILQ_FOREACH(port, &softc->port_list, links) { 2836 printf(" Port %d '%s' Frontend '%s' Type %u pp %d vp %d WWNN " 2837 "%#jx WWPN %#jx\n", port->targ_port, port->port_name, 2838 port->frontend->name, port->port_type, 2839 port->physical_port, port->virtual_port, 2840 (uintmax_t)port->wwnn, (uintmax_t)port->wwpn); 2841 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 2842 if (port->wwpn_iid[j].in_use == 0 && 2843 port->wwpn_iid[j].wwpn == 0 && 2844 port->wwpn_iid[j].name == NULL) 2845 continue; 2846 2847 printf(" iid %u use %d WWPN %#jx '%s'\n", 2848 j, port->wwpn_iid[j].in_use, 2849 (uintmax_t)port->wwpn_iid[j].wwpn, 2850 port->wwpn_iid[j].name); 2851 } 2852 } 2853 printf("CTL Port information end\n"); 2854 mtx_unlock(&softc->ctl_lock); 2855 /* 2856 * XXX KDM calling this without a lock. We'd likely want 2857 * to drop the lock before calling the frontend's dump 2858 * routine anyway. 2859 */ 2860 printf("CTL Frontends:\n"); 2861 STAILQ_FOREACH(fe, &softc->fe_list, links) { 2862 printf(" Frontend '%s'\n", fe->name); 2863 if (fe->fe_dump != NULL) 2864 fe->fe_dump(); 2865 } 2866 printf("CTL Frontend information end\n"); 2867 break; 2868 } 2869 case CTL_LUN_REQ: { 2870 struct ctl_lun_req *lun_req; 2871 struct ctl_backend_driver *backend; 2872 2873 lun_req = (struct ctl_lun_req *)addr; 2874 2875 backend = ctl_backend_find(lun_req->backend); 2876 if (backend == NULL) { 2877 lun_req->status = CTL_LUN_ERROR; 2878 snprintf(lun_req->error_str, 2879 sizeof(lun_req->error_str), 2880 "Backend \"%s\" not found.", 2881 lun_req->backend); 2882 break; 2883 } 2884 if (lun_req->num_be_args > 0) { 2885 lun_req->kern_be_args = ctl_copyin_args( 2886 lun_req->num_be_args, 2887 lun_req->be_args, 2888 lun_req->error_str, 2889 sizeof(lun_req->error_str)); 2890 if (lun_req->kern_be_args == NULL) { 2891 lun_req->status = CTL_LUN_ERROR; 2892 break; 2893 } 2894 } 2895 2896 retval = backend->ioctl(dev, cmd, addr, flag, td); 2897 2898 if (lun_req->num_be_args > 0) { 2899 ctl_copyout_args(lun_req->num_be_args, 2900 lun_req->kern_be_args); 2901 ctl_free_args(lun_req->num_be_args, 2902 lun_req->kern_be_args); 2903 } 2904 break; 2905 } 2906 case CTL_LUN_LIST: { 2907 struct sbuf *sb; 2908 struct ctl_lun_list *list; 2909 struct ctl_option *opt; 2910 2911 list = (struct ctl_lun_list *)addr; 2912 2913 /* 2914 * Allocate a fixed length sbuf here, based on the length 2915 * of the user's buffer. We could allocate an auto-extending 2916 * buffer, and then tell the user how much larger our 2917 * amount of data is than his buffer, but that presents 2918 * some problems: 2919 * 2920 * 1. The sbuf(9) routines use a blocking malloc, and so 2921 * we can't hold a lock while calling them with an 2922 * auto-extending buffer. 2923 * 2924 * 2. There is not currently a LUN reference counting 2925 * mechanism, outside of outstanding transactions on 2926 * the LUN's OOA queue. So a LUN could go away on us 2927 * while we're getting the LUN number, backend-specific 2928 * information, etc. Thus, given the way things 2929 * currently work, we need to hold the CTL lock while 2930 * grabbing LUN information. 2931 * 2932 * So, from the user's standpoint, the best thing to do is 2933 * allocate what he thinks is a reasonable buffer length, 2934 * and then if he gets a CTL_LUN_LIST_NEED_MORE_SPACE error, 2935 * double the buffer length and try again. (And repeat 2936 * that until he succeeds.) 2937 */ 2938 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 2939 if (sb == NULL) { 2940 list->status = CTL_LUN_LIST_ERROR; 2941 snprintf(list->error_str, sizeof(list->error_str), 2942 "Unable to allocate %d bytes for LUN list", 2943 list->alloc_len); 2944 break; 2945 } 2946 2947 sbuf_printf(sb, "<ctllunlist>\n"); 2948 2949 mtx_lock(&softc->ctl_lock); 2950 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2951 mtx_lock(&lun->lun_lock); 2952 retval = sbuf_printf(sb, "<lun id=\"%ju\">\n", 2953 (uintmax_t)lun->lun); 2954 2955 /* 2956 * Bail out as soon as we see that we've overfilled 2957 * the buffer. 2958 */ 2959 if (retval != 0) 2960 break; 2961 2962 retval = sbuf_printf(sb, "\t<backend_type>%s" 2963 "</backend_type>\n", 2964 (lun->backend == NULL) ? "none" : 2965 lun->backend->name); 2966 2967 if (retval != 0) 2968 break; 2969 2970 retval = sbuf_printf(sb, "\t<lun_type>%d</lun_type>\n", 2971 lun->be_lun->lun_type); 2972 2973 if (retval != 0) 2974 break; 2975 2976 if (lun->backend == NULL) { 2977 retval = sbuf_printf(sb, "</lun>\n"); 2978 if (retval != 0) 2979 break; 2980 continue; 2981 } 2982 2983 retval = sbuf_printf(sb, "\t<size>%ju</size>\n", 2984 (lun->be_lun->maxlba > 0) ? 2985 lun->be_lun->maxlba + 1 : 0); 2986 2987 if (retval != 0) 2988 break; 2989 2990 retval = sbuf_printf(sb, "\t<blocksize>%u</blocksize>\n", 2991 lun->be_lun->blocksize); 2992 2993 if (retval != 0) 2994 break; 2995 2996 retval = sbuf_printf(sb, "\t<serial_number>"); 2997 2998 if (retval != 0) 2999 break; 3000 3001 retval = ctl_sbuf_printf_esc(sb, 3002 lun->be_lun->serial_num, 3003 sizeof(lun->be_lun->serial_num)); 3004 3005 if (retval != 0) 3006 break; 3007 3008 retval = sbuf_printf(sb, "</serial_number>\n"); 3009 3010 if (retval != 0) 3011 break; 3012 3013 retval = sbuf_printf(sb, "\t<device_id>"); 3014 3015 if (retval != 0) 3016 break; 3017 3018 retval = ctl_sbuf_printf_esc(sb, 3019 lun->be_lun->device_id, 3020 sizeof(lun->be_lun->device_id)); 3021 3022 if (retval != 0) 3023 break; 3024 3025 retval = sbuf_printf(sb, "</device_id>\n"); 3026 3027 if (retval != 0) 3028 break; 3029 3030 if (lun->backend->lun_info != NULL) { 3031 retval = lun->backend->lun_info(lun->be_lun->be_lun, sb); 3032 if (retval != 0) 3033 break; 3034 } 3035 STAILQ_FOREACH(opt, &lun->be_lun->options, links) { 3036 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n", 3037 opt->name, opt->value, opt->name); 3038 if (retval != 0) 3039 break; 3040 } 3041 3042 retval = sbuf_printf(sb, "</lun>\n"); 3043 3044 if (retval != 0) 3045 break; 3046 mtx_unlock(&lun->lun_lock); 3047 } 3048 if (lun != NULL) 3049 mtx_unlock(&lun->lun_lock); 3050 mtx_unlock(&softc->ctl_lock); 3051 3052 if ((retval != 0) 3053 || ((retval = sbuf_printf(sb, "</ctllunlist>\n")) != 0)) { 3054 retval = 0; 3055 sbuf_delete(sb); 3056 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3057 snprintf(list->error_str, sizeof(list->error_str), 3058 "Out of space, %d bytes is too small", 3059 list->alloc_len); 3060 break; 3061 } 3062 3063 sbuf_finish(sb); 3064 3065 retval = copyout(sbuf_data(sb), list->lun_xml, 3066 sbuf_len(sb) + 1); 3067 3068 list->fill_len = sbuf_len(sb) + 1; 3069 list->status = CTL_LUN_LIST_OK; 3070 sbuf_delete(sb); 3071 break; 3072 } 3073 case CTL_ISCSI: { 3074 struct ctl_iscsi *ci; 3075 struct ctl_frontend *fe; 3076 3077 ci = (struct ctl_iscsi *)addr; 3078 3079 fe = ctl_frontend_find("iscsi"); 3080 if (fe == NULL) { 3081 ci->status = CTL_ISCSI_ERROR; 3082 snprintf(ci->error_str, sizeof(ci->error_str), 3083 "Frontend \"iscsi\" not found."); 3084 break; 3085 } 3086 3087 retval = fe->ioctl(dev, cmd, addr, flag, td); 3088 break; 3089 } 3090 case CTL_PORT_REQ: { 3091 struct ctl_req *req; 3092 struct ctl_frontend *fe; 3093 3094 req = (struct ctl_req *)addr; 3095 3096 fe = ctl_frontend_find(req->driver); 3097 if (fe == NULL) { 3098 req->status = CTL_LUN_ERROR; 3099 snprintf(req->error_str, sizeof(req->error_str), 3100 "Frontend \"%s\" not found.", req->driver); 3101 break; 3102 } 3103 if (req->num_args > 0) { 3104 req->kern_args = ctl_copyin_args(req->num_args, 3105 req->args, req->error_str, sizeof(req->error_str)); 3106 if (req->kern_args == NULL) { 3107 req->status = CTL_LUN_ERROR; 3108 break; 3109 } 3110 } 3111 3112 if (fe->ioctl) 3113 retval = fe->ioctl(dev, cmd, addr, flag, td); 3114 else 3115 retval = ENODEV; 3116 3117 if (req->num_args > 0) { 3118 ctl_copyout_args(req->num_args, req->kern_args); 3119 ctl_free_args(req->num_args, req->kern_args); 3120 } 3121 break; 3122 } 3123 case CTL_PORT_LIST: { 3124 struct sbuf *sb; 3125 struct ctl_port *port; 3126 struct ctl_lun_list *list; 3127 struct ctl_option *opt; 3128 int j; 3129 uint32_t plun; 3130 3131 list = (struct ctl_lun_list *)addr; 3132 3133 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 3134 if (sb == NULL) { 3135 list->status = CTL_LUN_LIST_ERROR; 3136 snprintf(list->error_str, sizeof(list->error_str), 3137 "Unable to allocate %d bytes for LUN list", 3138 list->alloc_len); 3139 break; 3140 } 3141 3142 sbuf_printf(sb, "<ctlportlist>\n"); 3143 3144 mtx_lock(&softc->ctl_lock); 3145 STAILQ_FOREACH(port, &softc->port_list, links) { 3146 retval = sbuf_printf(sb, "<targ_port id=\"%ju\">\n", 3147 (uintmax_t)port->targ_port); 3148 3149 /* 3150 * Bail out as soon as we see that we've overfilled 3151 * the buffer. 3152 */ 3153 if (retval != 0) 3154 break; 3155 3156 retval = sbuf_printf(sb, "\t<frontend_type>%s" 3157 "</frontend_type>\n", port->frontend->name); 3158 if (retval != 0) 3159 break; 3160 3161 retval = sbuf_printf(sb, "\t<port_type>%d</port_type>\n", 3162 port->port_type); 3163 if (retval != 0) 3164 break; 3165 3166 retval = sbuf_printf(sb, "\t<online>%s</online>\n", 3167 (port->status & CTL_PORT_STATUS_ONLINE) ? "YES" : "NO"); 3168 if (retval != 0) 3169 break; 3170 3171 retval = sbuf_printf(sb, "\t<port_name>%s</port_name>\n", 3172 port->port_name); 3173 if (retval != 0) 3174 break; 3175 3176 retval = sbuf_printf(sb, "\t<physical_port>%d</physical_port>\n", 3177 port->physical_port); 3178 if (retval != 0) 3179 break; 3180 3181 retval = sbuf_printf(sb, "\t<virtual_port>%d</virtual_port>\n", 3182 port->virtual_port); 3183 if (retval != 0) 3184 break; 3185 3186 if (port->target_devid != NULL) { 3187 sbuf_printf(sb, "\t<target>"); 3188 ctl_id_sbuf(port->target_devid, sb); 3189 sbuf_printf(sb, "</target>\n"); 3190 } 3191 3192 if (port->port_devid != NULL) { 3193 sbuf_printf(sb, "\t<port>"); 3194 ctl_id_sbuf(port->port_devid, sb); 3195 sbuf_printf(sb, "</port>\n"); 3196 } 3197 3198 if (port->port_info != NULL) { 3199 retval = port->port_info(port->onoff_arg, sb); 3200 if (retval != 0) 3201 break; 3202 } 3203 STAILQ_FOREACH(opt, &port->options, links) { 3204 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n", 3205 opt->name, opt->value, opt->name); 3206 if (retval != 0) 3207 break; 3208 } 3209 3210 if (port->lun_map != NULL) { 3211 sbuf_printf(sb, "\t<lun_map>on</lun_map>\n"); 3212 for (j = 0; j < CTL_MAX_LUNS; j++) { 3213 plun = ctl_lun_map_from_port(port, j); 3214 if (plun >= CTL_MAX_LUNS) 3215 continue; 3216 sbuf_printf(sb, 3217 "\t<lun id=\"%u\">%u</lun>\n", 3218 j, plun); 3219 } 3220 } 3221 3222 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 3223 if (port->wwpn_iid[j].in_use == 0 || 3224 (port->wwpn_iid[j].wwpn == 0 && 3225 port->wwpn_iid[j].name == NULL)) 3226 continue; 3227 3228 if (port->wwpn_iid[j].name != NULL) 3229 retval = sbuf_printf(sb, 3230 "\t<initiator id=\"%u\">%s</initiator>\n", 3231 j, port->wwpn_iid[j].name); 3232 else 3233 retval = sbuf_printf(sb, 3234 "\t<initiator id=\"%u\">naa.%08jx</initiator>\n", 3235 j, port->wwpn_iid[j].wwpn); 3236 if (retval != 0) 3237 break; 3238 } 3239 if (retval != 0) 3240 break; 3241 3242 retval = sbuf_printf(sb, "</targ_port>\n"); 3243 if (retval != 0) 3244 break; 3245 } 3246 mtx_unlock(&softc->ctl_lock); 3247 3248 if ((retval != 0) 3249 || ((retval = sbuf_printf(sb, "</ctlportlist>\n")) != 0)) { 3250 retval = 0; 3251 sbuf_delete(sb); 3252 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3253 snprintf(list->error_str, sizeof(list->error_str), 3254 "Out of space, %d bytes is too small", 3255 list->alloc_len); 3256 break; 3257 } 3258 3259 sbuf_finish(sb); 3260 3261 retval = copyout(sbuf_data(sb), list->lun_xml, 3262 sbuf_len(sb) + 1); 3263 3264 list->fill_len = sbuf_len(sb) + 1; 3265 list->status = CTL_LUN_LIST_OK; 3266 sbuf_delete(sb); 3267 break; 3268 } 3269 case CTL_LUN_MAP: { 3270 struct ctl_lun_map *lm = (struct ctl_lun_map *)addr; 3271 struct ctl_port *port; 3272 3273 mtx_lock(&softc->ctl_lock); 3274 if (lm->port < softc->port_min || 3275 lm->port >= softc->port_max || 3276 (port = softc->ctl_ports[lm->port]) == NULL) { 3277 mtx_unlock(&softc->ctl_lock); 3278 return (ENXIO); 3279 } 3280 if (port->status & CTL_PORT_STATUS_ONLINE) { 3281 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3282 if (ctl_lun_map_to_port(port, lun->lun) >= 3283 CTL_MAX_LUNS) 3284 continue; 3285 mtx_lock(&lun->lun_lock); 3286 ctl_est_ua_port(lun, lm->port, -1, 3287 CTL_UA_LUN_CHANGE); 3288 mtx_unlock(&lun->lun_lock); 3289 } 3290 } 3291 mtx_unlock(&softc->ctl_lock); // XXX: port_enable sleeps 3292 if (lm->plun < CTL_MAX_LUNS) { 3293 if (lm->lun == UINT32_MAX) 3294 retval = ctl_lun_map_unset(port, lm->plun); 3295 else if (lm->lun < CTL_MAX_LUNS && 3296 softc->ctl_luns[lm->lun] != NULL) 3297 retval = ctl_lun_map_set(port, lm->plun, lm->lun); 3298 else 3299 return (ENXIO); 3300 } else if (lm->plun == UINT32_MAX) { 3301 if (lm->lun == UINT32_MAX) 3302 retval = ctl_lun_map_deinit(port); 3303 else 3304 retval = ctl_lun_map_init(port); 3305 } else 3306 return (ENXIO); 3307 if (port->status & CTL_PORT_STATUS_ONLINE) 3308 ctl_isc_announce_port(port); 3309 break; 3310 } 3311 default: { 3312 /* XXX KDM should we fix this? */ 3313 #if 0 3314 struct ctl_backend_driver *backend; 3315 unsigned int type; 3316 int found; 3317 3318 found = 0; 3319 3320 /* 3321 * We encode the backend type as the ioctl type for backend 3322 * ioctls. So parse it out here, and then search for a 3323 * backend of this type. 3324 */ 3325 type = _IOC_TYPE(cmd); 3326 3327 STAILQ_FOREACH(backend, &softc->be_list, links) { 3328 if (backend->type == type) { 3329 found = 1; 3330 break; 3331 } 3332 } 3333 if (found == 0) { 3334 printf("ctl: unknown ioctl command %#lx or backend " 3335 "%d\n", cmd, type); 3336 retval = EINVAL; 3337 break; 3338 } 3339 retval = backend->ioctl(dev, cmd, addr, flag, td); 3340 #endif 3341 retval = ENOTTY; 3342 break; 3343 } 3344 } 3345 return (retval); 3346 } 3347 3348 uint32_t 3349 ctl_get_initindex(struct ctl_nexus *nexus) 3350 { 3351 return (nexus->initid + (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); 3352 } 3353 3354 int 3355 ctl_lun_map_init(struct ctl_port *port) 3356 { 3357 struct ctl_softc *softc = control_softc; 3358 struct ctl_lun *lun; 3359 uint32_t i; 3360 3361 if (port->lun_map == NULL) 3362 port->lun_map = malloc(sizeof(uint32_t) * CTL_MAX_LUNS, 3363 M_CTL, M_NOWAIT); 3364 if (port->lun_map == NULL) 3365 return (ENOMEM); 3366 for (i = 0; i < CTL_MAX_LUNS; i++) 3367 port->lun_map[i] = UINT32_MAX; 3368 if (port->status & CTL_PORT_STATUS_ONLINE) { 3369 if (port->lun_disable != NULL) { 3370 STAILQ_FOREACH(lun, &softc->lun_list, links) 3371 port->lun_disable(port->targ_lun_arg, lun->lun); 3372 } 3373 ctl_isc_announce_port(port); 3374 } 3375 return (0); 3376 } 3377 3378 int 3379 ctl_lun_map_deinit(struct ctl_port *port) 3380 { 3381 struct ctl_softc *softc = control_softc; 3382 struct ctl_lun *lun; 3383 3384 if (port->lun_map == NULL) 3385 return (0); 3386 free(port->lun_map, M_CTL); 3387 port->lun_map = NULL; 3388 if (port->status & CTL_PORT_STATUS_ONLINE) { 3389 if (port->lun_enable != NULL) { 3390 STAILQ_FOREACH(lun, &softc->lun_list, links) 3391 port->lun_enable(port->targ_lun_arg, lun->lun); 3392 } 3393 ctl_isc_announce_port(port); 3394 } 3395 return (0); 3396 } 3397 3398 int 3399 ctl_lun_map_set(struct ctl_port *port, uint32_t plun, uint32_t glun) 3400 { 3401 int status; 3402 uint32_t old; 3403 3404 if (port->lun_map == NULL) { 3405 status = ctl_lun_map_init(port); 3406 if (status != 0) 3407 return (status); 3408 } 3409 old = port->lun_map[plun]; 3410 port->lun_map[plun] = glun; 3411 if ((port->status & CTL_PORT_STATUS_ONLINE) && old >= CTL_MAX_LUNS) { 3412 if (port->lun_enable != NULL) 3413 port->lun_enable(port->targ_lun_arg, plun); 3414 ctl_isc_announce_port(port); 3415 } 3416 return (0); 3417 } 3418 3419 int 3420 ctl_lun_map_unset(struct ctl_port *port, uint32_t plun) 3421 { 3422 uint32_t old; 3423 3424 if (port->lun_map == NULL) 3425 return (0); 3426 old = port->lun_map[plun]; 3427 port->lun_map[plun] = UINT32_MAX; 3428 if ((port->status & CTL_PORT_STATUS_ONLINE) && old < CTL_MAX_LUNS) { 3429 if (port->lun_disable != NULL) 3430 port->lun_disable(port->targ_lun_arg, plun); 3431 ctl_isc_announce_port(port); 3432 } 3433 return (0); 3434 } 3435 3436 uint32_t 3437 ctl_lun_map_from_port(struct ctl_port *port, uint32_t lun_id) 3438 { 3439 3440 if (port == NULL) 3441 return (UINT32_MAX); 3442 if (port->lun_map == NULL || lun_id >= CTL_MAX_LUNS) 3443 return (lun_id); 3444 return (port->lun_map[lun_id]); 3445 } 3446 3447 uint32_t 3448 ctl_lun_map_to_port(struct ctl_port *port, uint32_t lun_id) 3449 { 3450 uint32_t i; 3451 3452 if (port == NULL) 3453 return (UINT32_MAX); 3454 if (port->lun_map == NULL) 3455 return (lun_id); 3456 for (i = 0; i < CTL_MAX_LUNS; i++) { 3457 if (port->lun_map[i] == lun_id) 3458 return (i); 3459 } 3460 return (UINT32_MAX); 3461 } 3462 3463 static struct ctl_port * 3464 ctl_io_port(struct ctl_io_hdr *io_hdr) 3465 { 3466 3467 return (control_softc->ctl_ports[io_hdr->nexus.targ_port]); 3468 } 3469 3470 int 3471 ctl_ffz(uint32_t *mask, uint32_t first, uint32_t last) 3472 { 3473 int i; 3474 3475 for (i = first; i < last; i++) { 3476 if ((mask[i / 32] & (1 << (i % 32))) == 0) 3477 return (i); 3478 } 3479 return (-1); 3480 } 3481 3482 int 3483 ctl_set_mask(uint32_t *mask, uint32_t bit) 3484 { 3485 uint32_t chunk, piece; 3486 3487 chunk = bit >> 5; 3488 piece = bit % (sizeof(uint32_t) * 8); 3489 3490 if ((mask[chunk] & (1 << piece)) != 0) 3491 return (-1); 3492 else 3493 mask[chunk] |= (1 << piece); 3494 3495 return (0); 3496 } 3497 3498 int 3499 ctl_clear_mask(uint32_t *mask, uint32_t bit) 3500 { 3501 uint32_t chunk, piece; 3502 3503 chunk = bit >> 5; 3504 piece = bit % (sizeof(uint32_t) * 8); 3505 3506 if ((mask[chunk] & (1 << piece)) == 0) 3507 return (-1); 3508 else 3509 mask[chunk] &= ~(1 << piece); 3510 3511 return (0); 3512 } 3513 3514 int 3515 ctl_is_set(uint32_t *mask, uint32_t bit) 3516 { 3517 uint32_t chunk, piece; 3518 3519 chunk = bit >> 5; 3520 piece = bit % (sizeof(uint32_t) * 8); 3521 3522 if ((mask[chunk] & (1 << piece)) == 0) 3523 return (0); 3524 else 3525 return (1); 3526 } 3527 3528 static uint64_t 3529 ctl_get_prkey(struct ctl_lun *lun, uint32_t residx) 3530 { 3531 uint64_t *t; 3532 3533 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3534 if (t == NULL) 3535 return (0); 3536 return (t[residx % CTL_MAX_INIT_PER_PORT]); 3537 } 3538 3539 static void 3540 ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx) 3541 { 3542 uint64_t *t; 3543 3544 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3545 if (t == NULL) 3546 return; 3547 t[residx % CTL_MAX_INIT_PER_PORT] = 0; 3548 } 3549 3550 static void 3551 ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx) 3552 { 3553 uint64_t *p; 3554 u_int i; 3555 3556 i = residx/CTL_MAX_INIT_PER_PORT; 3557 if (lun->pr_keys[i] != NULL) 3558 return; 3559 mtx_unlock(&lun->lun_lock); 3560 p = malloc(sizeof(uint64_t) * CTL_MAX_INIT_PER_PORT, M_CTL, 3561 M_WAITOK | M_ZERO); 3562 mtx_lock(&lun->lun_lock); 3563 if (lun->pr_keys[i] == NULL) 3564 lun->pr_keys[i] = p; 3565 else 3566 free(p, M_CTL); 3567 } 3568 3569 static void 3570 ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key) 3571 { 3572 uint64_t *t; 3573 3574 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3575 KASSERT(t != NULL, ("prkey %d is not allocated", residx)); 3576 t[residx % CTL_MAX_INIT_PER_PORT] = key; 3577 } 3578 3579 /* 3580 * ctl_softc, pool_name, total_ctl_io are passed in. 3581 * npool is passed out. 3582 */ 3583 int 3584 ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name, 3585 uint32_t total_ctl_io, void **npool) 3586 { 3587 #ifdef IO_POOLS 3588 struct ctl_io_pool *pool; 3589 3590 pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL, 3591 M_NOWAIT | M_ZERO); 3592 if (pool == NULL) 3593 return (ENOMEM); 3594 3595 snprintf(pool->name, sizeof(pool->name), "CTL IO %s", pool_name); 3596 pool->ctl_softc = ctl_softc; 3597 pool->zone = uma_zsecond_create(pool->name, NULL, 3598 NULL, NULL, NULL, ctl_softc->io_zone); 3599 /* uma_prealloc(pool->zone, total_ctl_io); */ 3600 3601 *npool = pool; 3602 #else 3603 *npool = ctl_softc->io_zone; 3604 #endif 3605 return (0); 3606 } 3607 3608 void 3609 ctl_pool_free(struct ctl_io_pool *pool) 3610 { 3611 3612 if (pool == NULL) 3613 return; 3614 3615 #ifdef IO_POOLS 3616 uma_zdestroy(pool->zone); 3617 free(pool, M_CTL); 3618 #endif 3619 } 3620 3621 union ctl_io * 3622 ctl_alloc_io(void *pool_ref) 3623 { 3624 union ctl_io *io; 3625 #ifdef IO_POOLS 3626 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3627 3628 io = uma_zalloc(pool->zone, M_WAITOK); 3629 #else 3630 io = uma_zalloc((uma_zone_t)pool_ref, M_WAITOK); 3631 #endif 3632 if (io != NULL) 3633 io->io_hdr.pool = pool_ref; 3634 return (io); 3635 } 3636 3637 union ctl_io * 3638 ctl_alloc_io_nowait(void *pool_ref) 3639 { 3640 union ctl_io *io; 3641 #ifdef IO_POOLS 3642 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3643 3644 io = uma_zalloc(pool->zone, M_NOWAIT); 3645 #else 3646 io = uma_zalloc((uma_zone_t)pool_ref, M_NOWAIT); 3647 #endif 3648 if (io != NULL) 3649 io->io_hdr.pool = pool_ref; 3650 return (io); 3651 } 3652 3653 void 3654 ctl_free_io(union ctl_io *io) 3655 { 3656 #ifdef IO_POOLS 3657 struct ctl_io_pool *pool; 3658 #endif 3659 3660 if (io == NULL) 3661 return; 3662 3663 #ifdef IO_POOLS 3664 pool = (struct ctl_io_pool *)io->io_hdr.pool; 3665 uma_zfree(pool->zone, io); 3666 #else 3667 uma_zfree((uma_zone_t)io->io_hdr.pool, io); 3668 #endif 3669 } 3670 3671 void 3672 ctl_zero_io(union ctl_io *io) 3673 { 3674 void *pool_ref; 3675 3676 if (io == NULL) 3677 return; 3678 3679 /* 3680 * May need to preserve linked list pointers at some point too. 3681 */ 3682 pool_ref = io->io_hdr.pool; 3683 memset(io, 0, sizeof(*io)); 3684 io->io_hdr.pool = pool_ref; 3685 } 3686 3687 /* 3688 * This routine is currently used for internal copies of ctl_ios that need 3689 * to persist for some reason after we've already returned status to the 3690 * FETD. (Thus the flag set.) 3691 * 3692 * XXX XXX 3693 * Note that this makes a blind copy of all fields in the ctl_io, except 3694 * for the pool reference. This includes any memory that has been 3695 * allocated! That memory will no longer be valid after done has been 3696 * called, so this would be VERY DANGEROUS for command that actually does 3697 * any reads or writes. Right now (11/7/2005), this is only used for immediate 3698 * start and stop commands, which don't transfer any data, so this is not a 3699 * problem. If it is used for anything else, the caller would also need to 3700 * allocate data buffer space and this routine would need to be modified to 3701 * copy the data buffer(s) as well. 3702 */ 3703 void 3704 ctl_copy_io(union ctl_io *src, union ctl_io *dest) 3705 { 3706 void *pool_ref; 3707 3708 if ((src == NULL) 3709 || (dest == NULL)) 3710 return; 3711 3712 /* 3713 * May need to preserve linked list pointers at some point too. 3714 */ 3715 pool_ref = dest->io_hdr.pool; 3716 3717 memcpy(dest, src, MIN(sizeof(*src), sizeof(*dest))); 3718 3719 dest->io_hdr.pool = pool_ref; 3720 /* 3721 * We need to know that this is an internal copy, and doesn't need 3722 * to get passed back to the FETD that allocated it. 3723 */ 3724 dest->io_hdr.flags |= CTL_FLAG_INT_COPY; 3725 } 3726 3727 int 3728 ctl_expand_number(const char *buf, uint64_t *num) 3729 { 3730 char *endptr; 3731 uint64_t number; 3732 unsigned shift; 3733 3734 number = strtoq(buf, &endptr, 0); 3735 3736 switch (tolower((unsigned char)*endptr)) { 3737 case 'e': 3738 shift = 60; 3739 break; 3740 case 'p': 3741 shift = 50; 3742 break; 3743 case 't': 3744 shift = 40; 3745 break; 3746 case 'g': 3747 shift = 30; 3748 break; 3749 case 'm': 3750 shift = 20; 3751 break; 3752 case 'k': 3753 shift = 10; 3754 break; 3755 case 'b': 3756 case '\0': /* No unit. */ 3757 *num = number; 3758 return (0); 3759 default: 3760 /* Unrecognized unit. */ 3761 return (-1); 3762 } 3763 3764 if ((number << shift) >> shift != number) { 3765 /* Overflow */ 3766 return (-1); 3767 } 3768 *num = number << shift; 3769 return (0); 3770 } 3771 3772 3773 /* 3774 * This routine could be used in the future to load default and/or saved 3775 * mode page parameters for a particuar lun. 3776 */ 3777 static int 3778 ctl_init_page_index(struct ctl_lun *lun) 3779 { 3780 int i; 3781 struct ctl_page_index *page_index; 3782 const char *value; 3783 uint64_t ival; 3784 3785 memcpy(&lun->mode_pages.index, page_index_template, 3786 sizeof(page_index_template)); 3787 3788 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 3789 3790 page_index = &lun->mode_pages.index[i]; 3791 /* 3792 * If this is a disk-only mode page, there's no point in 3793 * setting it up. For some pages, we have to have some 3794 * basic information about the disk in order to calculate the 3795 * mode page data. 3796 */ 3797 if ((lun->be_lun->lun_type != T_DIRECT) 3798 && (page_index->page_flags & CTL_PAGE_FLAG_DISK_ONLY)) 3799 continue; 3800 3801 switch (page_index->page_code & SMPH_PC_MASK) { 3802 case SMS_RW_ERROR_RECOVERY_PAGE: { 3803 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3804 panic("subpage is incorrect!"); 3805 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT], 3806 &rw_er_page_default, 3807 sizeof(rw_er_page_default)); 3808 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CHANGEABLE], 3809 &rw_er_page_changeable, 3810 sizeof(rw_er_page_changeable)); 3811 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_DEFAULT], 3812 &rw_er_page_default, 3813 sizeof(rw_er_page_default)); 3814 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_SAVED], 3815 &rw_er_page_default, 3816 sizeof(rw_er_page_default)); 3817 page_index->page_data = 3818 (uint8_t *)lun->mode_pages.rw_er_page; 3819 break; 3820 } 3821 case SMS_FORMAT_DEVICE_PAGE: { 3822 struct scsi_format_page *format_page; 3823 3824 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3825 panic("subpage is incorrect!"); 3826 3827 /* 3828 * Sectors per track are set above. Bytes per 3829 * sector need to be set here on a per-LUN basis. 3830 */ 3831 memcpy(&lun->mode_pages.format_page[CTL_PAGE_CURRENT], 3832 &format_page_default, 3833 sizeof(format_page_default)); 3834 memcpy(&lun->mode_pages.format_page[ 3835 CTL_PAGE_CHANGEABLE], &format_page_changeable, 3836 sizeof(format_page_changeable)); 3837 memcpy(&lun->mode_pages.format_page[CTL_PAGE_DEFAULT], 3838 &format_page_default, 3839 sizeof(format_page_default)); 3840 memcpy(&lun->mode_pages.format_page[CTL_PAGE_SAVED], 3841 &format_page_default, 3842 sizeof(format_page_default)); 3843 3844 format_page = &lun->mode_pages.format_page[ 3845 CTL_PAGE_CURRENT]; 3846 scsi_ulto2b(lun->be_lun->blocksize, 3847 format_page->bytes_per_sector); 3848 3849 format_page = &lun->mode_pages.format_page[ 3850 CTL_PAGE_DEFAULT]; 3851 scsi_ulto2b(lun->be_lun->blocksize, 3852 format_page->bytes_per_sector); 3853 3854 format_page = &lun->mode_pages.format_page[ 3855 CTL_PAGE_SAVED]; 3856 scsi_ulto2b(lun->be_lun->blocksize, 3857 format_page->bytes_per_sector); 3858 3859 page_index->page_data = 3860 (uint8_t *)lun->mode_pages.format_page; 3861 break; 3862 } 3863 case SMS_RIGID_DISK_PAGE: { 3864 struct scsi_rigid_disk_page *rigid_disk_page; 3865 uint32_t sectors_per_cylinder; 3866 uint64_t cylinders; 3867 #ifndef __XSCALE__ 3868 int shift; 3869 #endif /* !__XSCALE__ */ 3870 3871 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3872 panic("invalid subpage value %d", 3873 page_index->subpage); 3874 3875 /* 3876 * Rotation rate and sectors per track are set 3877 * above. We calculate the cylinders here based on 3878 * capacity. Due to the number of heads and 3879 * sectors per track we're using, smaller arrays 3880 * may turn out to have 0 cylinders. Linux and 3881 * FreeBSD don't pay attention to these mode pages 3882 * to figure out capacity, but Solaris does. It 3883 * seems to deal with 0 cylinders just fine, and 3884 * works out a fake geometry based on the capacity. 3885 */ 3886 memcpy(&lun->mode_pages.rigid_disk_page[ 3887 CTL_PAGE_DEFAULT], &rigid_disk_page_default, 3888 sizeof(rigid_disk_page_default)); 3889 memcpy(&lun->mode_pages.rigid_disk_page[ 3890 CTL_PAGE_CHANGEABLE],&rigid_disk_page_changeable, 3891 sizeof(rigid_disk_page_changeable)); 3892 3893 sectors_per_cylinder = CTL_DEFAULT_SECTORS_PER_TRACK * 3894 CTL_DEFAULT_HEADS; 3895 3896 /* 3897 * The divide method here will be more accurate, 3898 * probably, but results in floating point being 3899 * used in the kernel on i386 (__udivdi3()). On the 3900 * XScale, though, __udivdi3() is implemented in 3901 * software. 3902 * 3903 * The shift method for cylinder calculation is 3904 * accurate if sectors_per_cylinder is a power of 3905 * 2. Otherwise it might be slightly off -- you 3906 * might have a bit of a truncation problem. 3907 */ 3908 #ifdef __XSCALE__ 3909 cylinders = (lun->be_lun->maxlba + 1) / 3910 sectors_per_cylinder; 3911 #else 3912 for (shift = 31; shift > 0; shift--) { 3913 if (sectors_per_cylinder & (1 << shift)) 3914 break; 3915 } 3916 cylinders = (lun->be_lun->maxlba + 1) >> shift; 3917 #endif 3918 3919 /* 3920 * We've basically got 3 bytes, or 24 bits for the 3921 * cylinder size in the mode page. If we're over, 3922 * just round down to 2^24. 3923 */ 3924 if (cylinders > 0xffffff) 3925 cylinders = 0xffffff; 3926 3927 rigid_disk_page = &lun->mode_pages.rigid_disk_page[ 3928 CTL_PAGE_DEFAULT]; 3929 scsi_ulto3b(cylinders, rigid_disk_page->cylinders); 3930 3931 if ((value = ctl_get_opt(&lun->be_lun->options, 3932 "rpm")) != NULL) { 3933 scsi_ulto2b(strtol(value, NULL, 0), 3934 rigid_disk_page->rotation_rate); 3935 } 3936 3937 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_CURRENT], 3938 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 3939 sizeof(rigid_disk_page_default)); 3940 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_SAVED], 3941 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 3942 sizeof(rigid_disk_page_default)); 3943 3944 page_index->page_data = 3945 (uint8_t *)lun->mode_pages.rigid_disk_page; 3946 break; 3947 } 3948 case SMS_CACHING_PAGE: { 3949 struct scsi_caching_page *caching_page; 3950 3951 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3952 panic("invalid subpage value %d", 3953 page_index->subpage); 3954 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT], 3955 &caching_page_default, 3956 sizeof(caching_page_default)); 3957 memcpy(&lun->mode_pages.caching_page[ 3958 CTL_PAGE_CHANGEABLE], &caching_page_changeable, 3959 sizeof(caching_page_changeable)); 3960 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_SAVED], 3961 &caching_page_default, 3962 sizeof(caching_page_default)); 3963 caching_page = &lun->mode_pages.caching_page[ 3964 CTL_PAGE_SAVED]; 3965 value = ctl_get_opt(&lun->be_lun->options, "writecache"); 3966 if (value != NULL && strcmp(value, "off") == 0) 3967 caching_page->flags1 &= ~SCP_WCE; 3968 value = ctl_get_opt(&lun->be_lun->options, "readcache"); 3969 if (value != NULL && strcmp(value, "off") == 0) 3970 caching_page->flags1 |= SCP_RCD; 3971 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT], 3972 &lun->mode_pages.caching_page[CTL_PAGE_SAVED], 3973 sizeof(caching_page_default)); 3974 page_index->page_data = 3975 (uint8_t *)lun->mode_pages.caching_page; 3976 break; 3977 } 3978 case SMS_CONTROL_MODE_PAGE: { 3979 switch (page_index->subpage) { 3980 case SMS_SUBPAGE_PAGE_0: { 3981 struct scsi_control_page *control_page; 3982 3983 memcpy(&lun->mode_pages.control_page[ 3984 CTL_PAGE_DEFAULT], 3985 &control_page_default, 3986 sizeof(control_page_default)); 3987 memcpy(&lun->mode_pages.control_page[ 3988 CTL_PAGE_CHANGEABLE], 3989 &control_page_changeable, 3990 sizeof(control_page_changeable)); 3991 memcpy(&lun->mode_pages.control_page[ 3992 CTL_PAGE_SAVED], 3993 &control_page_default, 3994 sizeof(control_page_default)); 3995 control_page = &lun->mode_pages.control_page[ 3996 CTL_PAGE_SAVED]; 3997 value = ctl_get_opt(&lun->be_lun->options, 3998 "reordering"); 3999 if (value != NULL && 4000 strcmp(value, "unrestricted") == 0) { 4001 control_page->queue_flags &= 4002 ~SCP_QUEUE_ALG_MASK; 4003 control_page->queue_flags |= 4004 SCP_QUEUE_ALG_UNRESTRICTED; 4005 } 4006 memcpy(&lun->mode_pages.control_page[ 4007 CTL_PAGE_CURRENT], 4008 &lun->mode_pages.control_page[ 4009 CTL_PAGE_SAVED], 4010 sizeof(control_page_default)); 4011 page_index->page_data = 4012 (uint8_t *)lun->mode_pages.control_page; 4013 break; 4014 } 4015 case 0x01: 4016 memcpy(&lun->mode_pages.control_ext_page[ 4017 CTL_PAGE_DEFAULT], 4018 &control_ext_page_default, 4019 sizeof(control_ext_page_default)); 4020 memcpy(&lun->mode_pages.control_ext_page[ 4021 CTL_PAGE_CHANGEABLE], 4022 &control_ext_page_changeable, 4023 sizeof(control_ext_page_changeable)); 4024 memcpy(&lun->mode_pages.control_ext_page[ 4025 CTL_PAGE_SAVED], 4026 &control_ext_page_default, 4027 sizeof(control_ext_page_default)); 4028 memcpy(&lun->mode_pages.control_ext_page[ 4029 CTL_PAGE_CURRENT], 4030 &lun->mode_pages.control_ext_page[ 4031 CTL_PAGE_SAVED], 4032 sizeof(control_ext_page_default)); 4033 page_index->page_data = 4034 (uint8_t *)lun->mode_pages.control_ext_page; 4035 break; 4036 } 4037 break; 4038 } 4039 case SMS_INFO_EXCEPTIONS_PAGE: { 4040 switch (page_index->subpage) { 4041 case SMS_SUBPAGE_PAGE_0: 4042 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_CURRENT], 4043 &ie_page_default, 4044 sizeof(ie_page_default)); 4045 memcpy(&lun->mode_pages.ie_page[ 4046 CTL_PAGE_CHANGEABLE], &ie_page_changeable, 4047 sizeof(ie_page_changeable)); 4048 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_DEFAULT], 4049 &ie_page_default, 4050 sizeof(ie_page_default)); 4051 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_SAVED], 4052 &ie_page_default, 4053 sizeof(ie_page_default)); 4054 page_index->page_data = 4055 (uint8_t *)lun->mode_pages.ie_page; 4056 break; 4057 case 0x02: { 4058 struct ctl_logical_block_provisioning_page *page; 4059 4060 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_DEFAULT], 4061 &lbp_page_default, 4062 sizeof(lbp_page_default)); 4063 memcpy(&lun->mode_pages.lbp_page[ 4064 CTL_PAGE_CHANGEABLE], &lbp_page_changeable, 4065 sizeof(lbp_page_changeable)); 4066 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 4067 &lbp_page_default, 4068 sizeof(lbp_page_default)); 4069 page = &lun->mode_pages.lbp_page[CTL_PAGE_SAVED]; 4070 value = ctl_get_opt(&lun->be_lun->options, 4071 "avail-threshold"); 4072 if (value != NULL && 4073 ctl_expand_number(value, &ival) == 0) { 4074 page->descr[0].flags |= SLBPPD_ENABLED | 4075 SLBPPD_ARMING_DEC; 4076 if (lun->be_lun->blocksize) 4077 ival /= lun->be_lun->blocksize; 4078 else 4079 ival /= 512; 4080 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4081 page->descr[0].count); 4082 } 4083 value = ctl_get_opt(&lun->be_lun->options, 4084 "used-threshold"); 4085 if (value != NULL && 4086 ctl_expand_number(value, &ival) == 0) { 4087 page->descr[1].flags |= SLBPPD_ENABLED | 4088 SLBPPD_ARMING_INC; 4089 if (lun->be_lun->blocksize) 4090 ival /= lun->be_lun->blocksize; 4091 else 4092 ival /= 512; 4093 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4094 page->descr[1].count); 4095 } 4096 value = ctl_get_opt(&lun->be_lun->options, 4097 "pool-avail-threshold"); 4098 if (value != NULL && 4099 ctl_expand_number(value, &ival) == 0) { 4100 page->descr[2].flags |= SLBPPD_ENABLED | 4101 SLBPPD_ARMING_DEC; 4102 if (lun->be_lun->blocksize) 4103 ival /= lun->be_lun->blocksize; 4104 else 4105 ival /= 512; 4106 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4107 page->descr[2].count); 4108 } 4109 value = ctl_get_opt(&lun->be_lun->options, 4110 "pool-used-threshold"); 4111 if (value != NULL && 4112 ctl_expand_number(value, &ival) == 0) { 4113 page->descr[3].flags |= SLBPPD_ENABLED | 4114 SLBPPD_ARMING_INC; 4115 if (lun->be_lun->blocksize) 4116 ival /= lun->be_lun->blocksize; 4117 else 4118 ival /= 512; 4119 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4120 page->descr[3].count); 4121 } 4122 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_CURRENT], 4123 &lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 4124 sizeof(lbp_page_default)); 4125 page_index->page_data = 4126 (uint8_t *)lun->mode_pages.lbp_page; 4127 }} 4128 break; 4129 } 4130 case SMS_VENDOR_SPECIFIC_PAGE:{ 4131 switch (page_index->subpage) { 4132 case DBGCNF_SUBPAGE_CODE: { 4133 struct copan_debugconf_subpage *current_page, 4134 *saved_page; 4135 4136 memcpy(&lun->mode_pages.debugconf_subpage[ 4137 CTL_PAGE_CURRENT], 4138 &debugconf_page_default, 4139 sizeof(debugconf_page_default)); 4140 memcpy(&lun->mode_pages.debugconf_subpage[ 4141 CTL_PAGE_CHANGEABLE], 4142 &debugconf_page_changeable, 4143 sizeof(debugconf_page_changeable)); 4144 memcpy(&lun->mode_pages.debugconf_subpage[ 4145 CTL_PAGE_DEFAULT], 4146 &debugconf_page_default, 4147 sizeof(debugconf_page_default)); 4148 memcpy(&lun->mode_pages.debugconf_subpage[ 4149 CTL_PAGE_SAVED], 4150 &debugconf_page_default, 4151 sizeof(debugconf_page_default)); 4152 page_index->page_data = 4153 (uint8_t *)lun->mode_pages.debugconf_subpage; 4154 4155 current_page = (struct copan_debugconf_subpage *) 4156 (page_index->page_data + 4157 (page_index->page_len * 4158 CTL_PAGE_CURRENT)); 4159 saved_page = (struct copan_debugconf_subpage *) 4160 (page_index->page_data + 4161 (page_index->page_len * 4162 CTL_PAGE_SAVED)); 4163 break; 4164 } 4165 default: 4166 panic("invalid subpage value %d", 4167 page_index->subpage); 4168 break; 4169 } 4170 break; 4171 } 4172 default: 4173 panic("invalid page value %d", 4174 page_index->page_code & SMPH_PC_MASK); 4175 break; 4176 } 4177 } 4178 4179 return (CTL_RETVAL_COMPLETE); 4180 } 4181 4182 static int 4183 ctl_init_log_page_index(struct ctl_lun *lun) 4184 { 4185 struct ctl_page_index *page_index; 4186 int i, j, k, prev; 4187 4188 memcpy(&lun->log_pages.index, log_page_index_template, 4189 sizeof(log_page_index_template)); 4190 4191 prev = -1; 4192 for (i = 0, j = 0, k = 0; i < CTL_NUM_LOG_PAGES; i++) { 4193 4194 page_index = &lun->log_pages.index[i]; 4195 /* 4196 * If this is a disk-only mode page, there's no point in 4197 * setting it up. For some pages, we have to have some 4198 * basic information about the disk in order to calculate the 4199 * mode page data. 4200 */ 4201 if ((lun->be_lun->lun_type != T_DIRECT) 4202 && (page_index->page_flags & CTL_PAGE_FLAG_DISK_ONLY)) 4203 continue; 4204 4205 if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING && 4206 lun->backend->lun_attr == NULL) 4207 continue; 4208 4209 if (page_index->page_code != prev) { 4210 lun->log_pages.pages_page[j] = page_index->page_code; 4211 prev = page_index->page_code; 4212 j++; 4213 } 4214 lun->log_pages.subpages_page[k*2] = page_index->page_code; 4215 lun->log_pages.subpages_page[k*2+1] = page_index->subpage; 4216 k++; 4217 } 4218 lun->log_pages.index[0].page_data = &lun->log_pages.pages_page[0]; 4219 lun->log_pages.index[0].page_len = j; 4220 lun->log_pages.index[1].page_data = &lun->log_pages.subpages_page[0]; 4221 lun->log_pages.index[1].page_len = k * 2; 4222 lun->log_pages.index[2].page_data = &lun->log_pages.lbp_page[0]; 4223 lun->log_pages.index[2].page_len = 12*CTL_NUM_LBP_PARAMS; 4224 lun->log_pages.index[3].page_data = (uint8_t *)&lun->log_pages.stat_page; 4225 lun->log_pages.index[3].page_len = sizeof(lun->log_pages.stat_page); 4226 4227 return (CTL_RETVAL_COMPLETE); 4228 } 4229 4230 static int 4231 hex2bin(const char *str, uint8_t *buf, int buf_size) 4232 { 4233 int i; 4234 u_char c; 4235 4236 memset(buf, 0, buf_size); 4237 while (isspace(str[0])) 4238 str++; 4239 if (str[0] == '0' && (str[1] == 'x' || str[1] == 'X')) 4240 str += 2; 4241 buf_size *= 2; 4242 for (i = 0; str[i] != 0 && i < buf_size; i++) { 4243 c = str[i]; 4244 if (isdigit(c)) 4245 c -= '0'; 4246 else if (isalpha(c)) 4247 c -= isupper(c) ? 'A' - 10 : 'a' - 10; 4248 else 4249 break; 4250 if (c >= 16) 4251 break; 4252 if ((i & 1) == 0) 4253 buf[i / 2] |= (c << 4); 4254 else 4255 buf[i / 2] |= c; 4256 } 4257 return ((i + 1) / 2); 4258 } 4259 4260 /* 4261 * LUN allocation. 4262 * 4263 * Requirements: 4264 * - caller allocates and zeros LUN storage, or passes in a NULL LUN if he 4265 * wants us to allocate the LUN and he can block. 4266 * - ctl_softc is always set 4267 * - be_lun is set if the LUN has a backend (needed for disk LUNs) 4268 * 4269 * Returns 0 for success, non-zero (errno) for failure. 4270 */ 4271 static int 4272 ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun, 4273 struct ctl_be_lun *const be_lun) 4274 { 4275 struct ctl_lun *nlun, *lun; 4276 struct scsi_vpd_id_descriptor *desc; 4277 struct scsi_vpd_id_t10 *t10id; 4278 const char *eui, *naa, *scsiname, *vendor; 4279 int lun_number, i, lun_malloced; 4280 int devidlen, idlen1, idlen2 = 0, len; 4281 4282 if (be_lun == NULL) 4283 return (EINVAL); 4284 4285 /* 4286 * We currently only support Direct Access or Processor LUN types. 4287 */ 4288 switch (be_lun->lun_type) { 4289 case T_DIRECT: 4290 break; 4291 case T_PROCESSOR: 4292 break; 4293 case T_SEQUENTIAL: 4294 case T_CHANGER: 4295 default: 4296 be_lun->lun_config_status(be_lun->be_lun, 4297 CTL_LUN_CONFIG_FAILURE); 4298 break; 4299 } 4300 if (ctl_lun == NULL) { 4301 lun = malloc(sizeof(*lun), M_CTL, M_WAITOK); 4302 lun_malloced = 1; 4303 } else { 4304 lun_malloced = 0; 4305 lun = ctl_lun; 4306 } 4307 4308 memset(lun, 0, sizeof(*lun)); 4309 if (lun_malloced) 4310 lun->flags = CTL_LUN_MALLOCED; 4311 4312 /* Generate LUN ID. */ 4313 devidlen = max(CTL_DEVID_MIN_LEN, 4314 strnlen(be_lun->device_id, CTL_DEVID_LEN)); 4315 idlen1 = sizeof(*t10id) + devidlen; 4316 len = sizeof(struct scsi_vpd_id_descriptor) + idlen1; 4317 scsiname = ctl_get_opt(&be_lun->options, "scsiname"); 4318 if (scsiname != NULL) { 4319 idlen2 = roundup2(strlen(scsiname) + 1, 4); 4320 len += sizeof(struct scsi_vpd_id_descriptor) + idlen2; 4321 } 4322 eui = ctl_get_opt(&be_lun->options, "eui"); 4323 if (eui != NULL) { 4324 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4325 } 4326 naa = ctl_get_opt(&be_lun->options, "naa"); 4327 if (naa != NULL) { 4328 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4329 } 4330 lun->lun_devid = malloc(sizeof(struct ctl_devid) + len, 4331 M_CTL, M_WAITOK | M_ZERO); 4332 desc = (struct scsi_vpd_id_descriptor *)lun->lun_devid->data; 4333 desc->proto_codeset = SVPD_ID_CODESET_ASCII; 4334 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10; 4335 desc->length = idlen1; 4336 t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0]; 4337 memset(t10id->vendor, ' ', sizeof(t10id->vendor)); 4338 if ((vendor = ctl_get_opt(&be_lun->options, "vendor")) == NULL) { 4339 strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor)); 4340 } else { 4341 strncpy(t10id->vendor, vendor, 4342 min(sizeof(t10id->vendor), strlen(vendor))); 4343 } 4344 strncpy((char *)t10id->vendor_spec_id, 4345 (char *)be_lun->device_id, devidlen); 4346 if (scsiname != NULL) { 4347 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4348 desc->length); 4349 desc->proto_codeset = SVPD_ID_CODESET_UTF8; 4350 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4351 SVPD_ID_TYPE_SCSI_NAME; 4352 desc->length = idlen2; 4353 strlcpy(desc->identifier, scsiname, idlen2); 4354 } 4355 if (eui != NULL) { 4356 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4357 desc->length); 4358 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4359 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4360 SVPD_ID_TYPE_EUI64; 4361 desc->length = hex2bin(eui, desc->identifier, 16); 4362 desc->length = desc->length > 12 ? 16 : 4363 (desc->length > 8 ? 12 : 8); 4364 len -= 16 - desc->length; 4365 } 4366 if (naa != NULL) { 4367 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4368 desc->length); 4369 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4370 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4371 SVPD_ID_TYPE_NAA; 4372 desc->length = hex2bin(naa, desc->identifier, 16); 4373 desc->length = desc->length > 8 ? 16 : 8; 4374 len -= 16 - desc->length; 4375 } 4376 lun->lun_devid->len = len; 4377 4378 mtx_lock(&ctl_softc->ctl_lock); 4379 /* 4380 * See if the caller requested a particular LUN number. If so, see 4381 * if it is available. Otherwise, allocate the first available LUN. 4382 */ 4383 if (be_lun->flags & CTL_LUN_FLAG_ID_REQ) { 4384 if ((be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) 4385 || (ctl_is_set(ctl_softc->ctl_lun_mask, be_lun->req_lun_id))) { 4386 mtx_unlock(&ctl_softc->ctl_lock); 4387 if (be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) { 4388 printf("ctl: requested LUN ID %d is higher " 4389 "than CTL_MAX_LUNS - 1 (%d)\n", 4390 be_lun->req_lun_id, CTL_MAX_LUNS - 1); 4391 } else { 4392 /* 4393 * XXX KDM return an error, or just assign 4394 * another LUN ID in this case?? 4395 */ 4396 printf("ctl: requested LUN ID %d is already " 4397 "in use\n", be_lun->req_lun_id); 4398 } 4399 if (lun->flags & CTL_LUN_MALLOCED) 4400 free(lun, M_CTL); 4401 be_lun->lun_config_status(be_lun->be_lun, 4402 CTL_LUN_CONFIG_FAILURE); 4403 return (ENOSPC); 4404 } 4405 lun_number = be_lun->req_lun_id; 4406 } else { 4407 lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, 0, CTL_MAX_LUNS); 4408 if (lun_number == -1) { 4409 mtx_unlock(&ctl_softc->ctl_lock); 4410 printf("ctl: can't allocate LUN, out of LUNs\n"); 4411 if (lun->flags & CTL_LUN_MALLOCED) 4412 free(lun, M_CTL); 4413 be_lun->lun_config_status(be_lun->be_lun, 4414 CTL_LUN_CONFIG_FAILURE); 4415 return (ENOSPC); 4416 } 4417 } 4418 ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number); 4419 4420 mtx_init(&lun->lun_lock, "CTL LUN", NULL, MTX_DEF); 4421 lun->lun = lun_number; 4422 lun->be_lun = be_lun; 4423 /* 4424 * The processor LUN is always enabled. Disk LUNs come on line 4425 * disabled, and must be enabled by the backend. 4426 */ 4427 lun->flags |= CTL_LUN_DISABLED; 4428 lun->backend = be_lun->be; 4429 be_lun->ctl_lun = lun; 4430 be_lun->lun_id = lun_number; 4431 atomic_add_int(&be_lun->be->num_luns, 1); 4432 if (be_lun->flags & CTL_LUN_FLAG_OFFLINE) 4433 lun->flags |= CTL_LUN_OFFLINE; 4434 4435 if (be_lun->flags & CTL_LUN_FLAG_POWERED_OFF) 4436 lun->flags |= CTL_LUN_STOPPED; 4437 4438 if (be_lun->flags & CTL_LUN_FLAG_INOPERABLE) 4439 lun->flags |= CTL_LUN_INOPERABLE; 4440 4441 if (be_lun->flags & CTL_LUN_FLAG_PRIMARY) 4442 lun->flags |= CTL_LUN_PRIMARY_SC; 4443 4444 lun->ctl_softc = ctl_softc; 4445 #ifdef CTL_TIME_IO 4446 lun->last_busy = getsbinuptime(); 4447 #endif 4448 TAILQ_INIT(&lun->ooa_queue); 4449 TAILQ_INIT(&lun->blocked_queue); 4450 STAILQ_INIT(&lun->error_list); 4451 ctl_tpc_lun_init(lun); 4452 4453 /* 4454 * Initialize the mode and log page index. 4455 */ 4456 ctl_init_page_index(lun); 4457 ctl_init_log_page_index(lun); 4458 4459 /* 4460 * Now, before we insert this lun on the lun list, set the lun 4461 * inventory changed UA for all other luns. 4462 */ 4463 STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) { 4464 mtx_lock(&nlun->lun_lock); 4465 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4466 mtx_unlock(&nlun->lun_lock); 4467 } 4468 4469 STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links); 4470 4471 ctl_softc->ctl_luns[lun_number] = lun; 4472 4473 ctl_softc->num_luns++; 4474 4475 /* Setup statistics gathering */ 4476 lun->stats.device_type = be_lun->lun_type; 4477 lun->stats.lun_number = lun_number; 4478 if (lun->stats.device_type == T_DIRECT) 4479 lun->stats.blocksize = be_lun->blocksize; 4480 else 4481 lun->stats.flags = CTL_LUN_STATS_NO_BLOCKSIZE; 4482 for (i = 0;i < CTL_MAX_PORTS;i++) 4483 lun->stats.ports[i].targ_port = i; 4484 4485 mtx_unlock(&ctl_softc->ctl_lock); 4486 4487 lun->be_lun->lun_config_status(lun->be_lun->be_lun, CTL_LUN_CONFIG_OK); 4488 return (0); 4489 } 4490 4491 /* 4492 * Delete a LUN. 4493 * Assumptions: 4494 * - LUN has already been marked invalid and any pending I/O has been taken 4495 * care of. 4496 */ 4497 static int 4498 ctl_free_lun(struct ctl_lun *lun) 4499 { 4500 struct ctl_softc *softc; 4501 struct ctl_lun *nlun; 4502 int i; 4503 4504 softc = lun->ctl_softc; 4505 4506 mtx_assert(&softc->ctl_lock, MA_OWNED); 4507 4508 STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links); 4509 4510 ctl_clear_mask(softc->ctl_lun_mask, lun->lun); 4511 4512 softc->ctl_luns[lun->lun] = NULL; 4513 4514 if (!TAILQ_EMPTY(&lun->ooa_queue)) 4515 panic("Freeing a LUN %p with outstanding I/O!!\n", lun); 4516 4517 softc->num_luns--; 4518 4519 /* 4520 * Tell the backend to free resources, if this LUN has a backend. 4521 */ 4522 atomic_subtract_int(&lun->be_lun->be->num_luns, 1); 4523 lun->be_lun->lun_shutdown(lun->be_lun->be_lun); 4524 4525 ctl_tpc_lun_shutdown(lun); 4526 mtx_destroy(&lun->lun_lock); 4527 free(lun->lun_devid, M_CTL); 4528 for (i = 0; i < CTL_MAX_PORTS; i++) 4529 free(lun->pending_ua[i], M_CTL); 4530 for (i = 0; i < CTL_MAX_PORTS; i++) 4531 free(lun->pr_keys[i], M_CTL); 4532 free(lun->write_buffer, M_CTL); 4533 if (lun->flags & CTL_LUN_MALLOCED) 4534 free(lun, M_CTL); 4535 4536 STAILQ_FOREACH(nlun, &softc->lun_list, links) { 4537 mtx_lock(&nlun->lun_lock); 4538 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4539 mtx_unlock(&nlun->lun_lock); 4540 } 4541 4542 return (0); 4543 } 4544 4545 static void 4546 ctl_create_lun(struct ctl_be_lun *be_lun) 4547 { 4548 struct ctl_softc *softc; 4549 4550 softc = control_softc; 4551 4552 /* 4553 * ctl_alloc_lun() should handle all potential failure cases. 4554 */ 4555 ctl_alloc_lun(softc, NULL, be_lun); 4556 } 4557 4558 int 4559 ctl_add_lun(struct ctl_be_lun *be_lun) 4560 { 4561 struct ctl_softc *softc = control_softc; 4562 4563 mtx_lock(&softc->ctl_lock); 4564 STAILQ_INSERT_TAIL(&softc->pending_lun_queue, be_lun, links); 4565 mtx_unlock(&softc->ctl_lock); 4566 wakeup(&softc->pending_lun_queue); 4567 4568 return (0); 4569 } 4570 4571 int 4572 ctl_enable_lun(struct ctl_be_lun *be_lun) 4573 { 4574 struct ctl_softc *softc; 4575 struct ctl_port *port, *nport; 4576 struct ctl_lun *lun; 4577 int retval; 4578 4579 lun = (struct ctl_lun *)be_lun->ctl_lun; 4580 softc = lun->ctl_softc; 4581 4582 mtx_lock(&softc->ctl_lock); 4583 mtx_lock(&lun->lun_lock); 4584 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 4585 /* 4586 * eh? Why did we get called if the LUN is already 4587 * enabled? 4588 */ 4589 mtx_unlock(&lun->lun_lock); 4590 mtx_unlock(&softc->ctl_lock); 4591 return (0); 4592 } 4593 lun->flags &= ~CTL_LUN_DISABLED; 4594 mtx_unlock(&lun->lun_lock); 4595 4596 for (port = STAILQ_FIRST(&softc->port_list); port != NULL; port = nport) { 4597 nport = STAILQ_NEXT(port, links); 4598 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || 4599 port->lun_map != NULL || port->lun_enable == NULL) 4600 continue; 4601 4602 /* 4603 * Drop the lock while we call the FETD's enable routine. 4604 * This can lead to a callback into CTL (at least in the 4605 * case of the internal initiator frontend. 4606 */ 4607 mtx_unlock(&softc->ctl_lock); 4608 retval = port->lun_enable(port->targ_lun_arg, lun->lun); 4609 mtx_lock(&softc->ctl_lock); 4610 if (retval != 0) { 4611 printf("%s: FETD %s port %d returned error " 4612 "%d for lun_enable on lun %jd\n", 4613 __func__, port->port_name, port->targ_port, 4614 retval, (intmax_t)lun->lun); 4615 } 4616 } 4617 4618 mtx_unlock(&softc->ctl_lock); 4619 ctl_isc_announce_lun(lun); 4620 4621 return (0); 4622 } 4623 4624 int 4625 ctl_disable_lun(struct ctl_be_lun *be_lun) 4626 { 4627 struct ctl_softc *softc; 4628 struct ctl_port *port; 4629 struct ctl_lun *lun; 4630 int retval; 4631 4632 lun = (struct ctl_lun *)be_lun->ctl_lun; 4633 softc = lun->ctl_softc; 4634 4635 mtx_lock(&softc->ctl_lock); 4636 mtx_lock(&lun->lun_lock); 4637 if (lun->flags & CTL_LUN_DISABLED) { 4638 mtx_unlock(&lun->lun_lock); 4639 mtx_unlock(&softc->ctl_lock); 4640 return (0); 4641 } 4642 lun->flags |= CTL_LUN_DISABLED; 4643 mtx_unlock(&lun->lun_lock); 4644 4645 STAILQ_FOREACH(port, &softc->port_list, links) { 4646 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || 4647 port->lun_map != NULL || port->lun_disable == NULL) 4648 continue; 4649 4650 /* 4651 * Drop the lock before we call the frontend's disable 4652 * routine, to avoid lock order reversals. 4653 * 4654 * XXX KDM what happens if the frontend list changes while 4655 * we're traversing it? It's unlikely, but should be handled. 4656 */ 4657 mtx_unlock(&softc->ctl_lock); 4658 retval = port->lun_disable(port->targ_lun_arg, lun->lun); 4659 mtx_lock(&softc->ctl_lock); 4660 if (retval != 0) { 4661 printf("%s: FETD %s port %d returned error " 4662 "%d for lun_disable on lun %jd\n", 4663 __func__, port->port_name, port->targ_port, 4664 retval, (intmax_t)lun->lun); 4665 } 4666 } 4667 4668 mtx_unlock(&softc->ctl_lock); 4669 ctl_isc_announce_lun(lun); 4670 4671 return (0); 4672 } 4673 4674 int 4675 ctl_start_lun(struct ctl_be_lun *be_lun) 4676 { 4677 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4678 4679 mtx_lock(&lun->lun_lock); 4680 lun->flags &= ~CTL_LUN_STOPPED; 4681 mtx_unlock(&lun->lun_lock); 4682 return (0); 4683 } 4684 4685 int 4686 ctl_stop_lun(struct ctl_be_lun *be_lun) 4687 { 4688 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4689 4690 mtx_lock(&lun->lun_lock); 4691 lun->flags |= CTL_LUN_STOPPED; 4692 mtx_unlock(&lun->lun_lock); 4693 return (0); 4694 } 4695 4696 int 4697 ctl_lun_offline(struct ctl_be_lun *be_lun) 4698 { 4699 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4700 4701 mtx_lock(&lun->lun_lock); 4702 lun->flags |= CTL_LUN_OFFLINE; 4703 mtx_unlock(&lun->lun_lock); 4704 return (0); 4705 } 4706 4707 int 4708 ctl_lun_online(struct ctl_be_lun *be_lun) 4709 { 4710 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4711 4712 mtx_lock(&lun->lun_lock); 4713 lun->flags &= ~CTL_LUN_OFFLINE; 4714 mtx_unlock(&lun->lun_lock); 4715 return (0); 4716 } 4717 4718 int 4719 ctl_lun_primary(struct ctl_be_lun *be_lun) 4720 { 4721 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4722 4723 mtx_lock(&lun->lun_lock); 4724 lun->flags |= CTL_LUN_PRIMARY_SC; 4725 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 4726 mtx_unlock(&lun->lun_lock); 4727 ctl_isc_announce_lun(lun); 4728 return (0); 4729 } 4730 4731 int 4732 ctl_lun_secondary(struct ctl_be_lun *be_lun) 4733 { 4734 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4735 4736 mtx_lock(&lun->lun_lock); 4737 lun->flags &= ~CTL_LUN_PRIMARY_SC; 4738 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 4739 mtx_unlock(&lun->lun_lock); 4740 ctl_isc_announce_lun(lun); 4741 return (0); 4742 } 4743 4744 int 4745 ctl_invalidate_lun(struct ctl_be_lun *be_lun) 4746 { 4747 struct ctl_softc *softc; 4748 struct ctl_lun *lun; 4749 4750 lun = (struct ctl_lun *)be_lun->ctl_lun; 4751 softc = lun->ctl_softc; 4752 4753 mtx_lock(&lun->lun_lock); 4754 4755 /* 4756 * The LUN needs to be disabled before it can be marked invalid. 4757 */ 4758 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 4759 mtx_unlock(&lun->lun_lock); 4760 return (-1); 4761 } 4762 /* 4763 * Mark the LUN invalid. 4764 */ 4765 lun->flags |= CTL_LUN_INVALID; 4766 4767 /* 4768 * If there is nothing in the OOA queue, go ahead and free the LUN. 4769 * If we have something in the OOA queue, we'll free it when the 4770 * last I/O completes. 4771 */ 4772 if (TAILQ_EMPTY(&lun->ooa_queue)) { 4773 mtx_unlock(&lun->lun_lock); 4774 mtx_lock(&softc->ctl_lock); 4775 ctl_free_lun(lun); 4776 mtx_unlock(&softc->ctl_lock); 4777 } else 4778 mtx_unlock(&lun->lun_lock); 4779 4780 return (0); 4781 } 4782 4783 int 4784 ctl_lun_inoperable(struct ctl_be_lun *be_lun) 4785 { 4786 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4787 4788 mtx_lock(&lun->lun_lock); 4789 lun->flags |= CTL_LUN_INOPERABLE; 4790 mtx_unlock(&lun->lun_lock); 4791 return (0); 4792 } 4793 4794 int 4795 ctl_lun_operable(struct ctl_be_lun *be_lun) 4796 { 4797 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4798 4799 mtx_lock(&lun->lun_lock); 4800 lun->flags &= ~CTL_LUN_INOPERABLE; 4801 mtx_unlock(&lun->lun_lock); 4802 return (0); 4803 } 4804 4805 void 4806 ctl_lun_capacity_changed(struct ctl_be_lun *be_lun) 4807 { 4808 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4809 union ctl_ha_msg msg; 4810 4811 mtx_lock(&lun->lun_lock); 4812 ctl_est_ua_all(lun, -1, CTL_UA_CAPACITY_CHANGED); 4813 mtx_unlock(&lun->lun_lock); 4814 if (lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 4815 /* Send msg to other side. */ 4816 bzero(&msg.ua, sizeof(msg.ua)); 4817 msg.hdr.msg_type = CTL_MSG_UA; 4818 msg.hdr.nexus.initid = -1; 4819 msg.hdr.nexus.targ_port = -1; 4820 msg.hdr.nexus.targ_lun = lun->lun; 4821 msg.hdr.nexus.targ_mapped_lun = lun->lun; 4822 msg.ua.ua_all = 1; 4823 msg.ua.ua_set = 1; 4824 msg.ua.ua_type = CTL_UA_CAPACITY_CHANGED; 4825 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), 4826 M_WAITOK); 4827 } 4828 } 4829 4830 /* 4831 * Backend "memory move is complete" callback for requests that never 4832 * make it down to say RAIDCore's configuration code. 4833 */ 4834 int 4835 ctl_config_move_done(union ctl_io *io) 4836 { 4837 int retval; 4838 4839 CTL_DEBUG_PRINT(("ctl_config_move_done\n")); 4840 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, 4841 ("Config I/O type isn't CTL_IO_SCSI (%d)!", io->io_hdr.io_type)); 4842 4843 if ((io->io_hdr.port_status != 0) && 4844 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 4845 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 4846 /* 4847 * For hardware error sense keys, the sense key 4848 * specific value is defined to be a retry count, 4849 * but we use it to pass back an internal FETD 4850 * error code. XXX KDM Hopefully the FETD is only 4851 * using 16 bits for an error code, since that's 4852 * all the space we have in the sks field. 4853 */ 4854 ctl_set_internal_failure(&io->scsiio, 4855 /*sks_valid*/ 1, 4856 /*retry_count*/ 4857 io->io_hdr.port_status); 4858 } 4859 4860 if (ctl_debug & CTL_DEBUG_CDB_DATA) 4861 ctl_data_print(io); 4862 if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) || 4863 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 4864 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) || 4865 ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) { 4866 /* 4867 * XXX KDM just assuming a single pointer here, and not a 4868 * S/G list. If we start using S/G lists for config data, 4869 * we'll need to know how to clean them up here as well. 4870 */ 4871 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 4872 free(io->scsiio.kern_data_ptr, M_CTL); 4873 ctl_done(io); 4874 retval = CTL_RETVAL_COMPLETE; 4875 } else { 4876 /* 4877 * XXX KDM now we need to continue data movement. Some 4878 * options: 4879 * - call ctl_scsiio() again? We don't do this for data 4880 * writes, because for those at least we know ahead of 4881 * time where the write will go and how long it is. For 4882 * config writes, though, that information is largely 4883 * contained within the write itself, thus we need to 4884 * parse out the data again. 4885 * 4886 * - Call some other function once the data is in? 4887 */ 4888 4889 /* 4890 * XXX KDM call ctl_scsiio() again for now, and check flag 4891 * bits to see whether we're allocated or not. 4892 */ 4893 retval = ctl_scsiio(&io->scsiio); 4894 } 4895 return (retval); 4896 } 4897 4898 /* 4899 * This gets called by a backend driver when it is done with a 4900 * data_submit method. 4901 */ 4902 void 4903 ctl_data_submit_done(union ctl_io *io) 4904 { 4905 /* 4906 * If the IO_CONT flag is set, we need to call the supplied 4907 * function to continue processing the I/O, instead of completing 4908 * the I/O just yet. 4909 * 4910 * If there is an error, though, we don't want to keep processing. 4911 * Instead, just send status back to the initiator. 4912 */ 4913 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 4914 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 4915 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 4916 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 4917 io->scsiio.io_cont(io); 4918 return; 4919 } 4920 ctl_done(io); 4921 } 4922 4923 /* 4924 * This gets called by a backend driver when it is done with a 4925 * configuration write. 4926 */ 4927 void 4928 ctl_config_write_done(union ctl_io *io) 4929 { 4930 uint8_t *buf; 4931 4932 /* 4933 * If the IO_CONT flag is set, we need to call the supplied 4934 * function to continue processing the I/O, instead of completing 4935 * the I/O just yet. 4936 * 4937 * If there is an error, though, we don't want to keep processing. 4938 * Instead, just send status back to the initiator. 4939 */ 4940 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 4941 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 4942 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 4943 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 4944 io->scsiio.io_cont(io); 4945 return; 4946 } 4947 /* 4948 * Since a configuration write can be done for commands that actually 4949 * have data allocated, like write buffer, and commands that have 4950 * no data, like start/stop unit, we need to check here. 4951 */ 4952 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 4953 buf = io->scsiio.kern_data_ptr; 4954 else 4955 buf = NULL; 4956 ctl_done(io); 4957 if (buf) 4958 free(buf, M_CTL); 4959 } 4960 4961 void 4962 ctl_config_read_done(union ctl_io *io) 4963 { 4964 uint8_t *buf; 4965 4966 /* 4967 * If there is some error -- we are done, skip data transfer. 4968 */ 4969 if ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0 || 4970 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 4971 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { 4972 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 4973 buf = io->scsiio.kern_data_ptr; 4974 else 4975 buf = NULL; 4976 ctl_done(io); 4977 if (buf) 4978 free(buf, M_CTL); 4979 return; 4980 } 4981 4982 /* 4983 * If the IO_CONT flag is set, we need to call the supplied 4984 * function to continue processing the I/O, instead of completing 4985 * the I/O just yet. 4986 */ 4987 if (io->io_hdr.flags & CTL_FLAG_IO_CONT) { 4988 io->scsiio.io_cont(io); 4989 return; 4990 } 4991 4992 ctl_datamove(io); 4993 } 4994 4995 /* 4996 * SCSI release command. 4997 */ 4998 int 4999 ctl_scsi_release(struct ctl_scsiio *ctsio) 5000 { 5001 int length, longid, thirdparty_id, resv_id; 5002 struct ctl_lun *lun; 5003 uint32_t residx; 5004 5005 length = 0; 5006 resv_id = 0; 5007 5008 CTL_DEBUG_PRINT(("ctl_scsi_release\n")); 5009 5010 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5011 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5012 5013 switch (ctsio->cdb[0]) { 5014 case RELEASE_10: { 5015 struct scsi_release_10 *cdb; 5016 5017 cdb = (struct scsi_release_10 *)ctsio->cdb; 5018 5019 if (cdb->byte2 & SR10_LONGID) 5020 longid = 1; 5021 else 5022 thirdparty_id = cdb->thirdparty_id; 5023 5024 resv_id = cdb->resv_id; 5025 length = scsi_2btoul(cdb->length); 5026 break; 5027 } 5028 } 5029 5030 5031 /* 5032 * XXX KDM right now, we only support LUN reservation. We don't 5033 * support 3rd party reservations, or extent reservations, which 5034 * might actually need the parameter list. If we've gotten this 5035 * far, we've got a LUN reservation. Anything else got kicked out 5036 * above. So, according to SPC, ignore the length. 5037 */ 5038 length = 0; 5039 5040 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5041 && (length > 0)) { 5042 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5043 ctsio->kern_data_len = length; 5044 ctsio->kern_total_len = length; 5045 ctsio->kern_data_resid = 0; 5046 ctsio->kern_rel_offset = 0; 5047 ctsio->kern_sg_entries = 0; 5048 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5049 ctsio->be_move_done = ctl_config_move_done; 5050 ctl_datamove((union ctl_io *)ctsio); 5051 5052 return (CTL_RETVAL_COMPLETE); 5053 } 5054 5055 if (length > 0) 5056 thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr); 5057 5058 mtx_lock(&lun->lun_lock); 5059 5060 /* 5061 * According to SPC, it is not an error for an intiator to attempt 5062 * to release a reservation on a LUN that isn't reserved, or that 5063 * is reserved by another initiator. The reservation can only be 5064 * released, though, by the initiator who made it or by one of 5065 * several reset type events. 5066 */ 5067 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx)) 5068 lun->flags &= ~CTL_LUN_RESERVED; 5069 5070 mtx_unlock(&lun->lun_lock); 5071 5072 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5073 free(ctsio->kern_data_ptr, M_CTL); 5074 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5075 } 5076 5077 ctl_set_success(ctsio); 5078 ctl_done((union ctl_io *)ctsio); 5079 return (CTL_RETVAL_COMPLETE); 5080 } 5081 5082 int 5083 ctl_scsi_reserve(struct ctl_scsiio *ctsio) 5084 { 5085 int extent, thirdparty, longid; 5086 int resv_id, length; 5087 uint64_t thirdparty_id; 5088 struct ctl_lun *lun; 5089 uint32_t residx; 5090 5091 extent = 0; 5092 thirdparty = 0; 5093 longid = 0; 5094 resv_id = 0; 5095 length = 0; 5096 thirdparty_id = 0; 5097 5098 CTL_DEBUG_PRINT(("ctl_reserve\n")); 5099 5100 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5101 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5102 5103 switch (ctsio->cdb[0]) { 5104 case RESERVE_10: { 5105 struct scsi_reserve_10 *cdb; 5106 5107 cdb = (struct scsi_reserve_10 *)ctsio->cdb; 5108 5109 if (cdb->byte2 & SR10_LONGID) 5110 longid = 1; 5111 else 5112 thirdparty_id = cdb->thirdparty_id; 5113 5114 resv_id = cdb->resv_id; 5115 length = scsi_2btoul(cdb->length); 5116 break; 5117 } 5118 } 5119 5120 /* 5121 * XXX KDM right now, we only support LUN reservation. We don't 5122 * support 3rd party reservations, or extent reservations, which 5123 * might actually need the parameter list. If we've gotten this 5124 * far, we've got a LUN reservation. Anything else got kicked out 5125 * above. So, according to SPC, ignore the length. 5126 */ 5127 length = 0; 5128 5129 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5130 && (length > 0)) { 5131 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5132 ctsio->kern_data_len = length; 5133 ctsio->kern_total_len = length; 5134 ctsio->kern_data_resid = 0; 5135 ctsio->kern_rel_offset = 0; 5136 ctsio->kern_sg_entries = 0; 5137 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5138 ctsio->be_move_done = ctl_config_move_done; 5139 ctl_datamove((union ctl_io *)ctsio); 5140 5141 return (CTL_RETVAL_COMPLETE); 5142 } 5143 5144 if (length > 0) 5145 thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr); 5146 5147 mtx_lock(&lun->lun_lock); 5148 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx != residx)) { 5149 ctl_set_reservation_conflict(ctsio); 5150 goto bailout; 5151 } 5152 5153 lun->flags |= CTL_LUN_RESERVED; 5154 lun->res_idx = residx; 5155 5156 ctl_set_success(ctsio); 5157 5158 bailout: 5159 mtx_unlock(&lun->lun_lock); 5160 5161 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5162 free(ctsio->kern_data_ptr, M_CTL); 5163 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5164 } 5165 5166 ctl_done((union ctl_io *)ctsio); 5167 return (CTL_RETVAL_COMPLETE); 5168 } 5169 5170 int 5171 ctl_start_stop(struct ctl_scsiio *ctsio) 5172 { 5173 struct scsi_start_stop_unit *cdb; 5174 struct ctl_lun *lun; 5175 int retval; 5176 5177 CTL_DEBUG_PRINT(("ctl_start_stop\n")); 5178 5179 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5180 retval = 0; 5181 5182 cdb = (struct scsi_start_stop_unit *)ctsio->cdb; 5183 5184 /* 5185 * XXX KDM 5186 * We don't support the immediate bit on a stop unit. In order to 5187 * do that, we would need to code up a way to know that a stop is 5188 * pending, and hold off any new commands until it completes, one 5189 * way or another. Then we could accept or reject those commands 5190 * depending on its status. We would almost need to do the reverse 5191 * of what we do below for an immediate start -- return the copy of 5192 * the ctl_io to the FETD with status to send to the host (and to 5193 * free the copy!) and then free the original I/O once the stop 5194 * actually completes. That way, the OOA queue mechanism can work 5195 * to block commands that shouldn't proceed. Another alternative 5196 * would be to put the copy in the queue in place of the original, 5197 * and return the original back to the caller. That could be 5198 * slightly safer.. 5199 */ 5200 if ((cdb->byte2 & SSS_IMMED) 5201 && ((cdb->how & SSS_START) == 0)) { 5202 ctl_set_invalid_field(ctsio, 5203 /*sks_valid*/ 1, 5204 /*command*/ 1, 5205 /*field*/ 1, 5206 /*bit_valid*/ 1, 5207 /*bit*/ 0); 5208 ctl_done((union ctl_io *)ctsio); 5209 return (CTL_RETVAL_COMPLETE); 5210 } 5211 5212 if ((lun->flags & CTL_LUN_PR_RESERVED) 5213 && ((cdb->how & SSS_START)==0)) { 5214 uint32_t residx; 5215 5216 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5217 if (ctl_get_prkey(lun, residx) == 0 5218 || (lun->pr_res_idx!=residx && lun->res_type < 4)) { 5219 5220 ctl_set_reservation_conflict(ctsio); 5221 ctl_done((union ctl_io *)ctsio); 5222 return (CTL_RETVAL_COMPLETE); 5223 } 5224 } 5225 5226 /* 5227 * If there is no backend on this device, we can't start or stop 5228 * it. In theory we shouldn't get any start/stop commands in the 5229 * first place at this level if the LUN doesn't have a backend. 5230 * That should get stopped by the command decode code. 5231 */ 5232 if (lun->backend == NULL) { 5233 ctl_set_invalid_opcode(ctsio); 5234 ctl_done((union ctl_io *)ctsio); 5235 return (CTL_RETVAL_COMPLETE); 5236 } 5237 5238 /* 5239 * XXX KDM Copan-specific offline behavior. 5240 * Figure out a reasonable way to port this? 5241 */ 5242 #ifdef NEEDTOPORT 5243 mtx_lock(&lun->lun_lock); 5244 5245 if (((cdb->byte2 & SSS_ONOFFLINE) == 0) 5246 && (lun->flags & CTL_LUN_OFFLINE)) { 5247 /* 5248 * If the LUN is offline, and the on/offline bit isn't set, 5249 * reject the start or stop. Otherwise, let it through. 5250 */ 5251 mtx_unlock(&lun->lun_lock); 5252 ctl_set_lun_not_ready(ctsio); 5253 ctl_done((union ctl_io *)ctsio); 5254 } else { 5255 mtx_unlock(&lun->lun_lock); 5256 #endif /* NEEDTOPORT */ 5257 /* 5258 * This could be a start or a stop when we're online, 5259 * or a stop/offline or start/online. A start or stop when 5260 * we're offline is covered in the case above. 5261 */ 5262 /* 5263 * In the non-immediate case, we send the request to 5264 * the backend and return status to the user when 5265 * it is done. 5266 * 5267 * In the immediate case, we allocate a new ctl_io 5268 * to hold a copy of the request, and send that to 5269 * the backend. We then set good status on the 5270 * user's request and return it immediately. 5271 */ 5272 if (cdb->byte2 & SSS_IMMED) { 5273 union ctl_io *new_io; 5274 5275 new_io = ctl_alloc_io(ctsio->io_hdr.pool); 5276 ctl_copy_io((union ctl_io *)ctsio, new_io); 5277 retval = lun->backend->config_write(new_io); 5278 ctl_set_success(ctsio); 5279 ctl_done((union ctl_io *)ctsio); 5280 } else { 5281 retval = lun->backend->config_write( 5282 (union ctl_io *)ctsio); 5283 } 5284 #ifdef NEEDTOPORT 5285 } 5286 #endif 5287 return (retval); 5288 } 5289 5290 /* 5291 * We support the SYNCHRONIZE CACHE command (10 and 16 byte versions), but 5292 * we don't really do anything with the LBA and length fields if the user 5293 * passes them in. Instead we'll just flush out the cache for the entire 5294 * LUN. 5295 */ 5296 int 5297 ctl_sync_cache(struct ctl_scsiio *ctsio) 5298 { 5299 struct ctl_lun *lun; 5300 struct ctl_softc *softc; 5301 struct ctl_lba_len_flags *lbalen; 5302 uint64_t starting_lba; 5303 uint32_t block_count; 5304 int retval; 5305 uint8_t byte2; 5306 5307 CTL_DEBUG_PRINT(("ctl_sync_cache\n")); 5308 5309 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5310 softc = lun->ctl_softc; 5311 retval = 0; 5312 5313 switch (ctsio->cdb[0]) { 5314 case SYNCHRONIZE_CACHE: { 5315 struct scsi_sync_cache *cdb; 5316 cdb = (struct scsi_sync_cache *)ctsio->cdb; 5317 5318 starting_lba = scsi_4btoul(cdb->begin_lba); 5319 block_count = scsi_2btoul(cdb->lb_count); 5320 byte2 = cdb->byte2; 5321 break; 5322 } 5323 case SYNCHRONIZE_CACHE_16: { 5324 struct scsi_sync_cache_16 *cdb; 5325 cdb = (struct scsi_sync_cache_16 *)ctsio->cdb; 5326 5327 starting_lba = scsi_8btou64(cdb->begin_lba); 5328 block_count = scsi_4btoul(cdb->lb_count); 5329 byte2 = cdb->byte2; 5330 break; 5331 } 5332 default: 5333 ctl_set_invalid_opcode(ctsio); 5334 ctl_done((union ctl_io *)ctsio); 5335 goto bailout; 5336 break; /* NOTREACHED */ 5337 } 5338 5339 /* 5340 * We check the LBA and length, but don't do anything with them. 5341 * A SYNCHRONIZE CACHE will cause the entire cache for this lun to 5342 * get flushed. This check will just help satisfy anyone who wants 5343 * to see an error for an out of range LBA. 5344 */ 5345 if ((starting_lba + block_count) > (lun->be_lun->maxlba + 1)) { 5346 ctl_set_lba_out_of_range(ctsio); 5347 ctl_done((union ctl_io *)ctsio); 5348 goto bailout; 5349 } 5350 5351 /* 5352 * If this LUN has no backend, we can't flush the cache anyway. 5353 */ 5354 if (lun->backend == NULL) { 5355 ctl_set_invalid_opcode(ctsio); 5356 ctl_done((union ctl_io *)ctsio); 5357 goto bailout; 5358 } 5359 5360 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5361 lbalen->lba = starting_lba; 5362 lbalen->len = block_count; 5363 lbalen->flags = byte2; 5364 5365 /* 5366 * Check to see whether we're configured to send the SYNCHRONIZE 5367 * CACHE command directly to the back end. 5368 */ 5369 mtx_lock(&lun->lun_lock); 5370 if ((softc->flags & CTL_FLAG_REAL_SYNC) 5371 && (++(lun->sync_count) >= lun->sync_interval)) { 5372 lun->sync_count = 0; 5373 mtx_unlock(&lun->lun_lock); 5374 retval = lun->backend->config_write((union ctl_io *)ctsio); 5375 } else { 5376 mtx_unlock(&lun->lun_lock); 5377 ctl_set_success(ctsio); 5378 ctl_done((union ctl_io *)ctsio); 5379 } 5380 5381 bailout: 5382 5383 return (retval); 5384 } 5385 5386 int 5387 ctl_format(struct ctl_scsiio *ctsio) 5388 { 5389 struct scsi_format *cdb; 5390 struct ctl_lun *lun; 5391 int length, defect_list_len; 5392 5393 CTL_DEBUG_PRINT(("ctl_format\n")); 5394 5395 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5396 5397 cdb = (struct scsi_format *)ctsio->cdb; 5398 5399 length = 0; 5400 if (cdb->byte2 & SF_FMTDATA) { 5401 if (cdb->byte2 & SF_LONGLIST) 5402 length = sizeof(struct scsi_format_header_long); 5403 else 5404 length = sizeof(struct scsi_format_header_short); 5405 } 5406 5407 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5408 && (length > 0)) { 5409 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5410 ctsio->kern_data_len = length; 5411 ctsio->kern_total_len = length; 5412 ctsio->kern_data_resid = 0; 5413 ctsio->kern_rel_offset = 0; 5414 ctsio->kern_sg_entries = 0; 5415 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5416 ctsio->be_move_done = ctl_config_move_done; 5417 ctl_datamove((union ctl_io *)ctsio); 5418 5419 return (CTL_RETVAL_COMPLETE); 5420 } 5421 5422 defect_list_len = 0; 5423 5424 if (cdb->byte2 & SF_FMTDATA) { 5425 if (cdb->byte2 & SF_LONGLIST) { 5426 struct scsi_format_header_long *header; 5427 5428 header = (struct scsi_format_header_long *) 5429 ctsio->kern_data_ptr; 5430 5431 defect_list_len = scsi_4btoul(header->defect_list_len); 5432 if (defect_list_len != 0) { 5433 ctl_set_invalid_field(ctsio, 5434 /*sks_valid*/ 1, 5435 /*command*/ 0, 5436 /*field*/ 2, 5437 /*bit_valid*/ 0, 5438 /*bit*/ 0); 5439 goto bailout; 5440 } 5441 } else { 5442 struct scsi_format_header_short *header; 5443 5444 header = (struct scsi_format_header_short *) 5445 ctsio->kern_data_ptr; 5446 5447 defect_list_len = scsi_2btoul(header->defect_list_len); 5448 if (defect_list_len != 0) { 5449 ctl_set_invalid_field(ctsio, 5450 /*sks_valid*/ 1, 5451 /*command*/ 0, 5452 /*field*/ 2, 5453 /*bit_valid*/ 0, 5454 /*bit*/ 0); 5455 goto bailout; 5456 } 5457 } 5458 } 5459 5460 /* 5461 * The format command will clear out the "Medium format corrupted" 5462 * status if set by the configuration code. That status is really 5463 * just a way to notify the host that we have lost the media, and 5464 * get them to issue a command that will basically make them think 5465 * they're blowing away the media. 5466 */ 5467 mtx_lock(&lun->lun_lock); 5468 lun->flags &= ~CTL_LUN_INOPERABLE; 5469 mtx_unlock(&lun->lun_lock); 5470 5471 ctl_set_success(ctsio); 5472 bailout: 5473 5474 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5475 free(ctsio->kern_data_ptr, M_CTL); 5476 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5477 } 5478 5479 ctl_done((union ctl_io *)ctsio); 5480 return (CTL_RETVAL_COMPLETE); 5481 } 5482 5483 int 5484 ctl_read_buffer(struct ctl_scsiio *ctsio) 5485 { 5486 struct scsi_read_buffer *cdb; 5487 struct ctl_lun *lun; 5488 int buffer_offset, len; 5489 static uint8_t descr[4]; 5490 static uint8_t echo_descr[4] = { 0 }; 5491 5492 CTL_DEBUG_PRINT(("ctl_read_buffer\n")); 5493 5494 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5495 cdb = (struct scsi_read_buffer *)ctsio->cdb; 5496 5497 if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA && 5498 (cdb->byte2 & RWB_MODE) != RWB_MODE_ECHO_DESCR && 5499 (cdb->byte2 & RWB_MODE) != RWB_MODE_DESCR) { 5500 ctl_set_invalid_field(ctsio, 5501 /*sks_valid*/ 1, 5502 /*command*/ 1, 5503 /*field*/ 1, 5504 /*bit_valid*/ 1, 5505 /*bit*/ 4); 5506 ctl_done((union ctl_io *)ctsio); 5507 return (CTL_RETVAL_COMPLETE); 5508 } 5509 5510 len = scsi_3btoul(cdb->length); 5511 buffer_offset = scsi_3btoul(cdb->offset); 5512 5513 if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5514 ctl_set_invalid_field(ctsio, 5515 /*sks_valid*/ 1, 5516 /*command*/ 1, 5517 /*field*/ 6, 5518 /*bit_valid*/ 0, 5519 /*bit*/ 0); 5520 ctl_done((union ctl_io *)ctsio); 5521 return (CTL_RETVAL_COMPLETE); 5522 } 5523 5524 if ((cdb->byte2 & RWB_MODE) == RWB_MODE_DESCR) { 5525 descr[0] = 0; 5526 scsi_ulto3b(CTL_WRITE_BUFFER_SIZE, &descr[1]); 5527 ctsio->kern_data_ptr = descr; 5528 len = min(len, sizeof(descr)); 5529 } else if ((cdb->byte2 & RWB_MODE) == RWB_MODE_ECHO_DESCR) { 5530 ctsio->kern_data_ptr = echo_descr; 5531 len = min(len, sizeof(echo_descr)); 5532 } else { 5533 if (lun->write_buffer == NULL) { 5534 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5535 M_CTL, M_WAITOK); 5536 } 5537 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5538 } 5539 ctsio->kern_data_len = len; 5540 ctsio->kern_total_len = len; 5541 ctsio->kern_data_resid = 0; 5542 ctsio->kern_rel_offset = 0; 5543 ctsio->kern_sg_entries = 0; 5544 ctl_set_success(ctsio); 5545 ctsio->be_move_done = ctl_config_move_done; 5546 ctl_datamove((union ctl_io *)ctsio); 5547 return (CTL_RETVAL_COMPLETE); 5548 } 5549 5550 int 5551 ctl_write_buffer(struct ctl_scsiio *ctsio) 5552 { 5553 struct scsi_write_buffer *cdb; 5554 struct ctl_lun *lun; 5555 int buffer_offset, len; 5556 5557 CTL_DEBUG_PRINT(("ctl_write_buffer\n")); 5558 5559 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5560 cdb = (struct scsi_write_buffer *)ctsio->cdb; 5561 5562 if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA) { 5563 ctl_set_invalid_field(ctsio, 5564 /*sks_valid*/ 1, 5565 /*command*/ 1, 5566 /*field*/ 1, 5567 /*bit_valid*/ 1, 5568 /*bit*/ 4); 5569 ctl_done((union ctl_io *)ctsio); 5570 return (CTL_RETVAL_COMPLETE); 5571 } 5572 5573 len = scsi_3btoul(cdb->length); 5574 buffer_offset = scsi_3btoul(cdb->offset); 5575 5576 if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5577 ctl_set_invalid_field(ctsio, 5578 /*sks_valid*/ 1, 5579 /*command*/ 1, 5580 /*field*/ 6, 5581 /*bit_valid*/ 0, 5582 /*bit*/ 0); 5583 ctl_done((union ctl_io *)ctsio); 5584 return (CTL_RETVAL_COMPLETE); 5585 } 5586 5587 /* 5588 * If we've got a kernel request that hasn't been malloced yet, 5589 * malloc it and tell the caller the data buffer is here. 5590 */ 5591 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5592 if (lun->write_buffer == NULL) { 5593 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5594 M_CTL, M_WAITOK); 5595 } 5596 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5597 ctsio->kern_data_len = len; 5598 ctsio->kern_total_len = len; 5599 ctsio->kern_data_resid = 0; 5600 ctsio->kern_rel_offset = 0; 5601 ctsio->kern_sg_entries = 0; 5602 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5603 ctsio->be_move_done = ctl_config_move_done; 5604 ctl_datamove((union ctl_io *)ctsio); 5605 5606 return (CTL_RETVAL_COMPLETE); 5607 } 5608 5609 ctl_set_success(ctsio); 5610 ctl_done((union ctl_io *)ctsio); 5611 return (CTL_RETVAL_COMPLETE); 5612 } 5613 5614 int 5615 ctl_write_same(struct ctl_scsiio *ctsio) 5616 { 5617 struct ctl_lun *lun; 5618 struct ctl_lba_len_flags *lbalen; 5619 uint64_t lba; 5620 uint32_t num_blocks; 5621 int len, retval; 5622 uint8_t byte2; 5623 5624 retval = CTL_RETVAL_COMPLETE; 5625 5626 CTL_DEBUG_PRINT(("ctl_write_same\n")); 5627 5628 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5629 5630 switch (ctsio->cdb[0]) { 5631 case WRITE_SAME_10: { 5632 struct scsi_write_same_10 *cdb; 5633 5634 cdb = (struct scsi_write_same_10 *)ctsio->cdb; 5635 5636 lba = scsi_4btoul(cdb->addr); 5637 num_blocks = scsi_2btoul(cdb->length); 5638 byte2 = cdb->byte2; 5639 break; 5640 } 5641 case WRITE_SAME_16: { 5642 struct scsi_write_same_16 *cdb; 5643 5644 cdb = (struct scsi_write_same_16 *)ctsio->cdb; 5645 5646 lba = scsi_8btou64(cdb->addr); 5647 num_blocks = scsi_4btoul(cdb->length); 5648 byte2 = cdb->byte2; 5649 break; 5650 } 5651 default: 5652 /* 5653 * We got a command we don't support. This shouldn't 5654 * happen, commands should be filtered out above us. 5655 */ 5656 ctl_set_invalid_opcode(ctsio); 5657 ctl_done((union ctl_io *)ctsio); 5658 5659 return (CTL_RETVAL_COMPLETE); 5660 break; /* NOTREACHED */ 5661 } 5662 5663 /* NDOB and ANCHOR flags can be used only together with UNMAP */ 5664 if ((byte2 & SWS_UNMAP) == 0 && 5665 (byte2 & (SWS_NDOB | SWS_ANCHOR)) != 0) { 5666 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 5667 /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); 5668 ctl_done((union ctl_io *)ctsio); 5669 return (CTL_RETVAL_COMPLETE); 5670 } 5671 5672 /* 5673 * The first check is to make sure we're in bounds, the second 5674 * check is to catch wrap-around problems. If the lba + num blocks 5675 * is less than the lba, then we've wrapped around and the block 5676 * range is invalid anyway. 5677 */ 5678 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5679 || ((lba + num_blocks) < lba)) { 5680 ctl_set_lba_out_of_range(ctsio); 5681 ctl_done((union ctl_io *)ctsio); 5682 return (CTL_RETVAL_COMPLETE); 5683 } 5684 5685 /* Zero number of blocks means "to the last logical block" */ 5686 if (num_blocks == 0) { 5687 if ((lun->be_lun->maxlba + 1) - lba > UINT32_MAX) { 5688 ctl_set_invalid_field(ctsio, 5689 /*sks_valid*/ 0, 5690 /*command*/ 1, 5691 /*field*/ 0, 5692 /*bit_valid*/ 0, 5693 /*bit*/ 0); 5694 ctl_done((union ctl_io *)ctsio); 5695 return (CTL_RETVAL_COMPLETE); 5696 } 5697 num_blocks = (lun->be_lun->maxlba + 1) - lba; 5698 } 5699 5700 len = lun->be_lun->blocksize; 5701 5702 /* 5703 * If we've got a kernel request that hasn't been malloced yet, 5704 * malloc it and tell the caller the data buffer is here. 5705 */ 5706 if ((byte2 & SWS_NDOB) == 0 && 5707 (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5708 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);; 5709 ctsio->kern_data_len = len; 5710 ctsio->kern_total_len = len; 5711 ctsio->kern_data_resid = 0; 5712 ctsio->kern_rel_offset = 0; 5713 ctsio->kern_sg_entries = 0; 5714 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5715 ctsio->be_move_done = ctl_config_move_done; 5716 ctl_datamove((union ctl_io *)ctsio); 5717 5718 return (CTL_RETVAL_COMPLETE); 5719 } 5720 5721 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5722 lbalen->lba = lba; 5723 lbalen->len = num_blocks; 5724 lbalen->flags = byte2; 5725 retval = lun->backend->config_write((union ctl_io *)ctsio); 5726 5727 return (retval); 5728 } 5729 5730 int 5731 ctl_unmap(struct ctl_scsiio *ctsio) 5732 { 5733 struct ctl_lun *lun; 5734 struct scsi_unmap *cdb; 5735 struct ctl_ptr_len_flags *ptrlen; 5736 struct scsi_unmap_header *hdr; 5737 struct scsi_unmap_desc *buf, *end, *endnz, *range; 5738 uint64_t lba; 5739 uint32_t num_blocks; 5740 int len, retval; 5741 uint8_t byte2; 5742 5743 retval = CTL_RETVAL_COMPLETE; 5744 5745 CTL_DEBUG_PRINT(("ctl_unmap\n")); 5746 5747 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5748 cdb = (struct scsi_unmap *)ctsio->cdb; 5749 5750 len = scsi_2btoul(cdb->length); 5751 byte2 = cdb->byte2; 5752 5753 /* 5754 * If we've got a kernel request that hasn't been malloced yet, 5755 * malloc it and tell the caller the data buffer is here. 5756 */ 5757 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5758 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);; 5759 ctsio->kern_data_len = len; 5760 ctsio->kern_total_len = len; 5761 ctsio->kern_data_resid = 0; 5762 ctsio->kern_rel_offset = 0; 5763 ctsio->kern_sg_entries = 0; 5764 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5765 ctsio->be_move_done = ctl_config_move_done; 5766 ctl_datamove((union ctl_io *)ctsio); 5767 5768 return (CTL_RETVAL_COMPLETE); 5769 } 5770 5771 len = ctsio->kern_total_len - ctsio->kern_data_resid; 5772 hdr = (struct scsi_unmap_header *)ctsio->kern_data_ptr; 5773 if (len < sizeof (*hdr) || 5774 len < (scsi_2btoul(hdr->length) + sizeof(hdr->length)) || 5775 len < (scsi_2btoul(hdr->desc_length) + sizeof (*hdr)) || 5776 scsi_2btoul(hdr->desc_length) % sizeof(*buf) != 0) { 5777 ctl_set_invalid_field(ctsio, 5778 /*sks_valid*/ 0, 5779 /*command*/ 0, 5780 /*field*/ 0, 5781 /*bit_valid*/ 0, 5782 /*bit*/ 0); 5783 goto done; 5784 } 5785 len = scsi_2btoul(hdr->desc_length); 5786 buf = (struct scsi_unmap_desc *)(hdr + 1); 5787 end = buf + len / sizeof(*buf); 5788 5789 endnz = buf; 5790 for (range = buf; range < end; range++) { 5791 lba = scsi_8btou64(range->lba); 5792 num_blocks = scsi_4btoul(range->length); 5793 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5794 || ((lba + num_blocks) < lba)) { 5795 ctl_set_lba_out_of_range(ctsio); 5796 ctl_done((union ctl_io *)ctsio); 5797 return (CTL_RETVAL_COMPLETE); 5798 } 5799 if (num_blocks != 0) 5800 endnz = range + 1; 5801 } 5802 5803 /* 5804 * Block backend can not handle zero last range. 5805 * Filter it out and return if there is nothing left. 5806 */ 5807 len = (uint8_t *)endnz - (uint8_t *)buf; 5808 if (len == 0) { 5809 ctl_set_success(ctsio); 5810 goto done; 5811 } 5812 5813 mtx_lock(&lun->lun_lock); 5814 ptrlen = (struct ctl_ptr_len_flags *) 5815 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5816 ptrlen->ptr = (void *)buf; 5817 ptrlen->len = len; 5818 ptrlen->flags = byte2; 5819 ctl_check_blocked(lun); 5820 mtx_unlock(&lun->lun_lock); 5821 5822 retval = lun->backend->config_write((union ctl_io *)ctsio); 5823 return (retval); 5824 5825 done: 5826 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5827 free(ctsio->kern_data_ptr, M_CTL); 5828 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5829 } 5830 ctl_done((union ctl_io *)ctsio); 5831 return (CTL_RETVAL_COMPLETE); 5832 } 5833 5834 /* 5835 * Note that this function currently doesn't actually do anything inside 5836 * CTL to enforce things if the DQue bit is turned on. 5837 * 5838 * Also note that this function can't be used in the default case, because 5839 * the DQue bit isn't set in the changeable mask for the control mode page 5840 * anyway. This is just here as an example for how to implement a page 5841 * handler, and a placeholder in case we want to allow the user to turn 5842 * tagged queueing on and off. 5843 * 5844 * The D_SENSE bit handling is functional, however, and will turn 5845 * descriptor sense on and off for a given LUN. 5846 */ 5847 int 5848 ctl_control_page_handler(struct ctl_scsiio *ctsio, 5849 struct ctl_page_index *page_index, uint8_t *page_ptr) 5850 { 5851 struct scsi_control_page *current_cp, *saved_cp, *user_cp; 5852 struct ctl_lun *lun; 5853 int set_ua; 5854 uint32_t initidx; 5855 5856 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5857 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5858 set_ua = 0; 5859 5860 user_cp = (struct scsi_control_page *)page_ptr; 5861 current_cp = (struct scsi_control_page *) 5862 (page_index->page_data + (page_index->page_len * 5863 CTL_PAGE_CURRENT)); 5864 saved_cp = (struct scsi_control_page *) 5865 (page_index->page_data + (page_index->page_len * 5866 CTL_PAGE_SAVED)); 5867 5868 mtx_lock(&lun->lun_lock); 5869 if (((current_cp->rlec & SCP_DSENSE) == 0) 5870 && ((user_cp->rlec & SCP_DSENSE) != 0)) { 5871 /* 5872 * Descriptor sense is currently turned off and the user 5873 * wants to turn it on. 5874 */ 5875 current_cp->rlec |= SCP_DSENSE; 5876 saved_cp->rlec |= SCP_DSENSE; 5877 lun->flags |= CTL_LUN_SENSE_DESC; 5878 set_ua = 1; 5879 } else if (((current_cp->rlec & SCP_DSENSE) != 0) 5880 && ((user_cp->rlec & SCP_DSENSE) == 0)) { 5881 /* 5882 * Descriptor sense is currently turned on, and the user 5883 * wants to turn it off. 5884 */ 5885 current_cp->rlec &= ~SCP_DSENSE; 5886 saved_cp->rlec &= ~SCP_DSENSE; 5887 lun->flags &= ~CTL_LUN_SENSE_DESC; 5888 set_ua = 1; 5889 } 5890 if ((current_cp->queue_flags & SCP_QUEUE_ALG_MASK) != 5891 (user_cp->queue_flags & SCP_QUEUE_ALG_MASK)) { 5892 current_cp->queue_flags &= ~SCP_QUEUE_ALG_MASK; 5893 current_cp->queue_flags |= user_cp->queue_flags & SCP_QUEUE_ALG_MASK; 5894 saved_cp->queue_flags &= ~SCP_QUEUE_ALG_MASK; 5895 saved_cp->queue_flags |= user_cp->queue_flags & SCP_QUEUE_ALG_MASK; 5896 set_ua = 1; 5897 } 5898 if ((current_cp->eca_and_aen & SCP_SWP) != 5899 (user_cp->eca_and_aen & SCP_SWP)) { 5900 current_cp->eca_and_aen &= ~SCP_SWP; 5901 current_cp->eca_and_aen |= user_cp->eca_and_aen & SCP_SWP; 5902 saved_cp->eca_and_aen &= ~SCP_SWP; 5903 saved_cp->eca_and_aen |= user_cp->eca_and_aen & SCP_SWP; 5904 set_ua = 1; 5905 } 5906 if (set_ua != 0) 5907 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 5908 mtx_unlock(&lun->lun_lock); 5909 5910 return (0); 5911 } 5912 5913 int 5914 ctl_caching_sp_handler(struct ctl_scsiio *ctsio, 5915 struct ctl_page_index *page_index, uint8_t *page_ptr) 5916 { 5917 struct scsi_caching_page *current_cp, *saved_cp, *user_cp; 5918 struct ctl_lun *lun; 5919 int set_ua; 5920 uint32_t initidx; 5921 5922 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5923 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5924 set_ua = 0; 5925 5926 user_cp = (struct scsi_caching_page *)page_ptr; 5927 current_cp = (struct scsi_caching_page *) 5928 (page_index->page_data + (page_index->page_len * 5929 CTL_PAGE_CURRENT)); 5930 saved_cp = (struct scsi_caching_page *) 5931 (page_index->page_data + (page_index->page_len * 5932 CTL_PAGE_SAVED)); 5933 5934 mtx_lock(&lun->lun_lock); 5935 if ((current_cp->flags1 & (SCP_WCE | SCP_RCD)) != 5936 (user_cp->flags1 & (SCP_WCE | SCP_RCD))) { 5937 current_cp->flags1 &= ~(SCP_WCE | SCP_RCD); 5938 current_cp->flags1 |= user_cp->flags1 & (SCP_WCE | SCP_RCD); 5939 saved_cp->flags1 &= ~(SCP_WCE | SCP_RCD); 5940 saved_cp->flags1 |= user_cp->flags1 & (SCP_WCE | SCP_RCD); 5941 set_ua = 1; 5942 } 5943 if (set_ua != 0) 5944 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 5945 mtx_unlock(&lun->lun_lock); 5946 5947 return (0); 5948 } 5949 5950 int 5951 ctl_debugconf_sp_select_handler(struct ctl_scsiio *ctsio, 5952 struct ctl_page_index *page_index, 5953 uint8_t *page_ptr) 5954 { 5955 uint8_t *c; 5956 int i; 5957 5958 c = ((struct copan_debugconf_subpage *)page_ptr)->ctl_time_io_secs; 5959 ctl_time_io_secs = 5960 (c[0] << 8) | 5961 (c[1] << 0) | 5962 0; 5963 CTL_DEBUG_PRINT(("set ctl_time_io_secs to %d\n", ctl_time_io_secs)); 5964 printf("set ctl_time_io_secs to %d\n", ctl_time_io_secs); 5965 printf("page data:"); 5966 for (i=0; i<8; i++) 5967 printf(" %.2x",page_ptr[i]); 5968 printf("\n"); 5969 return (0); 5970 } 5971 5972 int 5973 ctl_debugconf_sp_sense_handler(struct ctl_scsiio *ctsio, 5974 struct ctl_page_index *page_index, 5975 int pc) 5976 { 5977 struct copan_debugconf_subpage *page; 5978 5979 page = (struct copan_debugconf_subpage *)page_index->page_data + 5980 (page_index->page_len * pc); 5981 5982 switch (pc) { 5983 case SMS_PAGE_CTRL_CHANGEABLE >> 6: 5984 case SMS_PAGE_CTRL_DEFAULT >> 6: 5985 case SMS_PAGE_CTRL_SAVED >> 6: 5986 /* 5987 * We don't update the changable or default bits for this page. 5988 */ 5989 break; 5990 case SMS_PAGE_CTRL_CURRENT >> 6: 5991 page->ctl_time_io_secs[0] = ctl_time_io_secs >> 8; 5992 page->ctl_time_io_secs[1] = ctl_time_io_secs >> 0; 5993 break; 5994 default: 5995 #ifdef NEEDTOPORT 5996 EPRINT(0, "Invalid PC %d!!", pc); 5997 #endif /* NEEDTOPORT */ 5998 break; 5999 } 6000 return (0); 6001 } 6002 6003 6004 static int 6005 ctl_do_mode_select(union ctl_io *io) 6006 { 6007 struct scsi_mode_page_header *page_header; 6008 struct ctl_page_index *page_index; 6009 struct ctl_scsiio *ctsio; 6010 int control_dev, page_len; 6011 int page_len_offset, page_len_size; 6012 union ctl_modepage_info *modepage_info; 6013 struct ctl_lun *lun; 6014 int *len_left, *len_used; 6015 int retval, i; 6016 6017 ctsio = &io->scsiio; 6018 page_index = NULL; 6019 page_len = 0; 6020 retval = CTL_RETVAL_COMPLETE; 6021 6022 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6023 6024 if (lun->be_lun->lun_type != T_DIRECT) 6025 control_dev = 1; 6026 else 6027 control_dev = 0; 6028 6029 modepage_info = (union ctl_modepage_info *) 6030 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6031 len_left = &modepage_info->header.len_left; 6032 len_used = &modepage_info->header.len_used; 6033 6034 do_next_page: 6035 6036 page_header = (struct scsi_mode_page_header *) 6037 (ctsio->kern_data_ptr + *len_used); 6038 6039 if (*len_left == 0) { 6040 free(ctsio->kern_data_ptr, M_CTL); 6041 ctl_set_success(ctsio); 6042 ctl_done((union ctl_io *)ctsio); 6043 return (CTL_RETVAL_COMPLETE); 6044 } else if (*len_left < sizeof(struct scsi_mode_page_header)) { 6045 6046 free(ctsio->kern_data_ptr, M_CTL); 6047 ctl_set_param_len_error(ctsio); 6048 ctl_done((union ctl_io *)ctsio); 6049 return (CTL_RETVAL_COMPLETE); 6050 6051 } else if ((page_header->page_code & SMPH_SPF) 6052 && (*len_left < sizeof(struct scsi_mode_page_header_sp))) { 6053 6054 free(ctsio->kern_data_ptr, M_CTL); 6055 ctl_set_param_len_error(ctsio); 6056 ctl_done((union ctl_io *)ctsio); 6057 return (CTL_RETVAL_COMPLETE); 6058 } 6059 6060 6061 /* 6062 * XXX KDM should we do something with the block descriptor? 6063 */ 6064 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6065 6066 if ((control_dev != 0) 6067 && (lun->mode_pages.index[i].page_flags & 6068 CTL_PAGE_FLAG_DISK_ONLY)) 6069 continue; 6070 6071 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) != 6072 (page_header->page_code & SMPH_PC_MASK)) 6073 continue; 6074 6075 /* 6076 * If neither page has a subpage code, then we've got a 6077 * match. 6078 */ 6079 if (((lun->mode_pages.index[i].page_code & SMPH_SPF) == 0) 6080 && ((page_header->page_code & SMPH_SPF) == 0)) { 6081 page_index = &lun->mode_pages.index[i]; 6082 page_len = page_header->page_length; 6083 break; 6084 } 6085 6086 /* 6087 * If both pages have subpages, then the subpage numbers 6088 * have to match. 6089 */ 6090 if ((lun->mode_pages.index[i].page_code & SMPH_SPF) 6091 && (page_header->page_code & SMPH_SPF)) { 6092 struct scsi_mode_page_header_sp *sph; 6093 6094 sph = (struct scsi_mode_page_header_sp *)page_header; 6095 6096 if (lun->mode_pages.index[i].subpage == 6097 sph->subpage) { 6098 page_index = &lun->mode_pages.index[i]; 6099 page_len = scsi_2btoul(sph->page_length); 6100 break; 6101 } 6102 } 6103 } 6104 6105 /* 6106 * If we couldn't find the page, or if we don't have a mode select 6107 * handler for it, send back an error to the user. 6108 */ 6109 if ((page_index == NULL) 6110 || (page_index->select_handler == NULL)) { 6111 ctl_set_invalid_field(ctsio, 6112 /*sks_valid*/ 1, 6113 /*command*/ 0, 6114 /*field*/ *len_used, 6115 /*bit_valid*/ 0, 6116 /*bit*/ 0); 6117 free(ctsio->kern_data_ptr, M_CTL); 6118 ctl_done((union ctl_io *)ctsio); 6119 return (CTL_RETVAL_COMPLETE); 6120 } 6121 6122 if (page_index->page_code & SMPH_SPF) { 6123 page_len_offset = 2; 6124 page_len_size = 2; 6125 } else { 6126 page_len_size = 1; 6127 page_len_offset = 1; 6128 } 6129 6130 /* 6131 * If the length the initiator gives us isn't the one we specify in 6132 * the mode page header, or if they didn't specify enough data in 6133 * the CDB to avoid truncating this page, kick out the request. 6134 */ 6135 if ((page_len != (page_index->page_len - page_len_offset - 6136 page_len_size)) 6137 || (*len_left < page_index->page_len)) { 6138 6139 6140 ctl_set_invalid_field(ctsio, 6141 /*sks_valid*/ 1, 6142 /*command*/ 0, 6143 /*field*/ *len_used + page_len_offset, 6144 /*bit_valid*/ 0, 6145 /*bit*/ 0); 6146 free(ctsio->kern_data_ptr, M_CTL); 6147 ctl_done((union ctl_io *)ctsio); 6148 return (CTL_RETVAL_COMPLETE); 6149 } 6150 6151 /* 6152 * Run through the mode page, checking to make sure that the bits 6153 * the user changed are actually legal for him to change. 6154 */ 6155 for (i = 0; i < page_index->page_len; i++) { 6156 uint8_t *user_byte, *change_mask, *current_byte; 6157 int bad_bit; 6158 int j; 6159 6160 user_byte = (uint8_t *)page_header + i; 6161 change_mask = page_index->page_data + 6162 (page_index->page_len * CTL_PAGE_CHANGEABLE) + i; 6163 current_byte = page_index->page_data + 6164 (page_index->page_len * CTL_PAGE_CURRENT) + i; 6165 6166 /* 6167 * Check to see whether the user set any bits in this byte 6168 * that he is not allowed to set. 6169 */ 6170 if ((*user_byte & ~(*change_mask)) == 6171 (*current_byte & ~(*change_mask))) 6172 continue; 6173 6174 /* 6175 * Go through bit by bit to determine which one is illegal. 6176 */ 6177 bad_bit = 0; 6178 for (j = 7; j >= 0; j--) { 6179 if ((((1 << i) & ~(*change_mask)) & *user_byte) != 6180 (((1 << i) & ~(*change_mask)) & *current_byte)) { 6181 bad_bit = i; 6182 break; 6183 } 6184 } 6185 ctl_set_invalid_field(ctsio, 6186 /*sks_valid*/ 1, 6187 /*command*/ 0, 6188 /*field*/ *len_used + i, 6189 /*bit_valid*/ 1, 6190 /*bit*/ bad_bit); 6191 free(ctsio->kern_data_ptr, M_CTL); 6192 ctl_done((union ctl_io *)ctsio); 6193 return (CTL_RETVAL_COMPLETE); 6194 } 6195 6196 /* 6197 * Decrement these before we call the page handler, since we may 6198 * end up getting called back one way or another before the handler 6199 * returns to this context. 6200 */ 6201 *len_left -= page_index->page_len; 6202 *len_used += page_index->page_len; 6203 6204 retval = page_index->select_handler(ctsio, page_index, 6205 (uint8_t *)page_header); 6206 6207 /* 6208 * If the page handler returns CTL_RETVAL_QUEUED, then we need to 6209 * wait until this queued command completes to finish processing 6210 * the mode page. If it returns anything other than 6211 * CTL_RETVAL_COMPLETE (e.g. CTL_RETVAL_ERROR), then it should have 6212 * already set the sense information, freed the data pointer, and 6213 * completed the io for us. 6214 */ 6215 if (retval != CTL_RETVAL_COMPLETE) 6216 goto bailout_no_done; 6217 6218 /* 6219 * If the initiator sent us more than one page, parse the next one. 6220 */ 6221 if (*len_left > 0) 6222 goto do_next_page; 6223 6224 ctl_set_success(ctsio); 6225 free(ctsio->kern_data_ptr, M_CTL); 6226 ctl_done((union ctl_io *)ctsio); 6227 6228 bailout_no_done: 6229 6230 return (CTL_RETVAL_COMPLETE); 6231 6232 } 6233 6234 int 6235 ctl_mode_select(struct ctl_scsiio *ctsio) 6236 { 6237 int param_len, pf, sp; 6238 int header_size, bd_len; 6239 int len_left, len_used; 6240 struct ctl_page_index *page_index; 6241 struct ctl_lun *lun; 6242 int control_dev, page_len; 6243 union ctl_modepage_info *modepage_info; 6244 int retval; 6245 6246 pf = 0; 6247 sp = 0; 6248 page_len = 0; 6249 len_used = 0; 6250 len_left = 0; 6251 retval = 0; 6252 bd_len = 0; 6253 page_index = NULL; 6254 6255 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6256 6257 if (lun->be_lun->lun_type != T_DIRECT) 6258 control_dev = 1; 6259 else 6260 control_dev = 0; 6261 6262 switch (ctsio->cdb[0]) { 6263 case MODE_SELECT_6: { 6264 struct scsi_mode_select_6 *cdb; 6265 6266 cdb = (struct scsi_mode_select_6 *)ctsio->cdb; 6267 6268 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 6269 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 6270 6271 param_len = cdb->length; 6272 header_size = sizeof(struct scsi_mode_header_6); 6273 break; 6274 } 6275 case MODE_SELECT_10: { 6276 struct scsi_mode_select_10 *cdb; 6277 6278 cdb = (struct scsi_mode_select_10 *)ctsio->cdb; 6279 6280 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 6281 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 6282 6283 param_len = scsi_2btoul(cdb->length); 6284 header_size = sizeof(struct scsi_mode_header_10); 6285 break; 6286 } 6287 default: 6288 ctl_set_invalid_opcode(ctsio); 6289 ctl_done((union ctl_io *)ctsio); 6290 return (CTL_RETVAL_COMPLETE); 6291 break; /* NOTREACHED */ 6292 } 6293 6294 /* 6295 * From SPC-3: 6296 * "A parameter list length of zero indicates that the Data-Out Buffer 6297 * shall be empty. This condition shall not be considered as an error." 6298 */ 6299 if (param_len == 0) { 6300 ctl_set_success(ctsio); 6301 ctl_done((union ctl_io *)ctsio); 6302 return (CTL_RETVAL_COMPLETE); 6303 } 6304 6305 /* 6306 * Since we'll hit this the first time through, prior to 6307 * allocation, we don't need to free a data buffer here. 6308 */ 6309 if (param_len < header_size) { 6310 ctl_set_param_len_error(ctsio); 6311 ctl_done((union ctl_io *)ctsio); 6312 return (CTL_RETVAL_COMPLETE); 6313 } 6314 6315 /* 6316 * Allocate the data buffer and grab the user's data. In theory, 6317 * we shouldn't have to sanity check the parameter list length here 6318 * because the maximum size is 64K. We should be able to malloc 6319 * that much without too many problems. 6320 */ 6321 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 6322 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 6323 ctsio->kern_data_len = param_len; 6324 ctsio->kern_total_len = param_len; 6325 ctsio->kern_data_resid = 0; 6326 ctsio->kern_rel_offset = 0; 6327 ctsio->kern_sg_entries = 0; 6328 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6329 ctsio->be_move_done = ctl_config_move_done; 6330 ctl_datamove((union ctl_io *)ctsio); 6331 6332 return (CTL_RETVAL_COMPLETE); 6333 } 6334 6335 switch (ctsio->cdb[0]) { 6336 case MODE_SELECT_6: { 6337 struct scsi_mode_header_6 *mh6; 6338 6339 mh6 = (struct scsi_mode_header_6 *)ctsio->kern_data_ptr; 6340 bd_len = mh6->blk_desc_len; 6341 break; 6342 } 6343 case MODE_SELECT_10: { 6344 struct scsi_mode_header_10 *mh10; 6345 6346 mh10 = (struct scsi_mode_header_10 *)ctsio->kern_data_ptr; 6347 bd_len = scsi_2btoul(mh10->blk_desc_len); 6348 break; 6349 } 6350 default: 6351 panic("Invalid CDB type %#x", ctsio->cdb[0]); 6352 break; 6353 } 6354 6355 if (param_len < (header_size + bd_len)) { 6356 free(ctsio->kern_data_ptr, M_CTL); 6357 ctl_set_param_len_error(ctsio); 6358 ctl_done((union ctl_io *)ctsio); 6359 return (CTL_RETVAL_COMPLETE); 6360 } 6361 6362 /* 6363 * Set the IO_CONT flag, so that if this I/O gets passed to 6364 * ctl_config_write_done(), it'll get passed back to 6365 * ctl_do_mode_select() for further processing, or completion if 6366 * we're all done. 6367 */ 6368 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 6369 ctsio->io_cont = ctl_do_mode_select; 6370 6371 modepage_info = (union ctl_modepage_info *) 6372 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6373 6374 memset(modepage_info, 0, sizeof(*modepage_info)); 6375 6376 len_left = param_len - header_size - bd_len; 6377 len_used = header_size + bd_len; 6378 6379 modepage_info->header.len_left = len_left; 6380 modepage_info->header.len_used = len_used; 6381 6382 return (ctl_do_mode_select((union ctl_io *)ctsio)); 6383 } 6384 6385 int 6386 ctl_mode_sense(struct ctl_scsiio *ctsio) 6387 { 6388 struct ctl_lun *lun; 6389 int pc, page_code, dbd, llba, subpage; 6390 int alloc_len, page_len, header_len, total_len; 6391 struct scsi_mode_block_descr *block_desc; 6392 struct ctl_page_index *page_index; 6393 int control_dev; 6394 6395 dbd = 0; 6396 llba = 0; 6397 block_desc = NULL; 6398 page_index = NULL; 6399 6400 CTL_DEBUG_PRINT(("ctl_mode_sense\n")); 6401 6402 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6403 6404 if (lun->be_lun->lun_type != T_DIRECT) 6405 control_dev = 1; 6406 else 6407 control_dev = 0; 6408 6409 switch (ctsio->cdb[0]) { 6410 case MODE_SENSE_6: { 6411 struct scsi_mode_sense_6 *cdb; 6412 6413 cdb = (struct scsi_mode_sense_6 *)ctsio->cdb; 6414 6415 header_len = sizeof(struct scsi_mode_hdr_6); 6416 if (cdb->byte2 & SMS_DBD) 6417 dbd = 1; 6418 else 6419 header_len += sizeof(struct scsi_mode_block_descr); 6420 6421 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6422 page_code = cdb->page & SMS_PAGE_CODE; 6423 subpage = cdb->subpage; 6424 alloc_len = cdb->length; 6425 break; 6426 } 6427 case MODE_SENSE_10: { 6428 struct scsi_mode_sense_10 *cdb; 6429 6430 cdb = (struct scsi_mode_sense_10 *)ctsio->cdb; 6431 6432 header_len = sizeof(struct scsi_mode_hdr_10); 6433 6434 if (cdb->byte2 & SMS_DBD) 6435 dbd = 1; 6436 else 6437 header_len += sizeof(struct scsi_mode_block_descr); 6438 if (cdb->byte2 & SMS10_LLBAA) 6439 llba = 1; 6440 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6441 page_code = cdb->page & SMS_PAGE_CODE; 6442 subpage = cdb->subpage; 6443 alloc_len = scsi_2btoul(cdb->length); 6444 break; 6445 } 6446 default: 6447 ctl_set_invalid_opcode(ctsio); 6448 ctl_done((union ctl_io *)ctsio); 6449 return (CTL_RETVAL_COMPLETE); 6450 break; /* NOTREACHED */ 6451 } 6452 6453 /* 6454 * We have to make a first pass through to calculate the size of 6455 * the pages that match the user's query. Then we allocate enough 6456 * memory to hold it, and actually copy the data into the buffer. 6457 */ 6458 switch (page_code) { 6459 case SMS_ALL_PAGES_PAGE: { 6460 int i; 6461 6462 page_len = 0; 6463 6464 /* 6465 * At the moment, values other than 0 and 0xff here are 6466 * reserved according to SPC-3. 6467 */ 6468 if ((subpage != SMS_SUBPAGE_PAGE_0) 6469 && (subpage != SMS_SUBPAGE_ALL)) { 6470 ctl_set_invalid_field(ctsio, 6471 /*sks_valid*/ 1, 6472 /*command*/ 1, 6473 /*field*/ 3, 6474 /*bit_valid*/ 0, 6475 /*bit*/ 0); 6476 ctl_done((union ctl_io *)ctsio); 6477 return (CTL_RETVAL_COMPLETE); 6478 } 6479 6480 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6481 if ((control_dev != 0) 6482 && (lun->mode_pages.index[i].page_flags & 6483 CTL_PAGE_FLAG_DISK_ONLY)) 6484 continue; 6485 6486 /* 6487 * We don't use this subpage if the user didn't 6488 * request all subpages. 6489 */ 6490 if ((lun->mode_pages.index[i].subpage != 0) 6491 && (subpage == SMS_SUBPAGE_PAGE_0)) 6492 continue; 6493 6494 #if 0 6495 printf("found page %#x len %d\n", 6496 lun->mode_pages.index[i].page_code & 6497 SMPH_PC_MASK, 6498 lun->mode_pages.index[i].page_len); 6499 #endif 6500 page_len += lun->mode_pages.index[i].page_len; 6501 } 6502 break; 6503 } 6504 default: { 6505 int i; 6506 6507 page_len = 0; 6508 6509 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6510 /* Look for the right page code */ 6511 if ((lun->mode_pages.index[i].page_code & 6512 SMPH_PC_MASK) != page_code) 6513 continue; 6514 6515 /* Look for the right subpage or the subpage wildcard*/ 6516 if ((lun->mode_pages.index[i].subpage != subpage) 6517 && (subpage != SMS_SUBPAGE_ALL)) 6518 continue; 6519 6520 /* Make sure the page is supported for this dev type */ 6521 if ((control_dev != 0) 6522 && (lun->mode_pages.index[i].page_flags & 6523 CTL_PAGE_FLAG_DISK_ONLY)) 6524 continue; 6525 6526 #if 0 6527 printf("found page %#x len %d\n", 6528 lun->mode_pages.index[i].page_code & 6529 SMPH_PC_MASK, 6530 lun->mode_pages.index[i].page_len); 6531 #endif 6532 6533 page_len += lun->mode_pages.index[i].page_len; 6534 } 6535 6536 if (page_len == 0) { 6537 ctl_set_invalid_field(ctsio, 6538 /*sks_valid*/ 1, 6539 /*command*/ 1, 6540 /*field*/ 2, 6541 /*bit_valid*/ 1, 6542 /*bit*/ 5); 6543 ctl_done((union ctl_io *)ctsio); 6544 return (CTL_RETVAL_COMPLETE); 6545 } 6546 break; 6547 } 6548 } 6549 6550 total_len = header_len + page_len; 6551 #if 0 6552 printf("header_len = %d, page_len = %d, total_len = %d\n", 6553 header_len, page_len, total_len); 6554 #endif 6555 6556 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6557 ctsio->kern_sg_entries = 0; 6558 ctsio->kern_data_resid = 0; 6559 ctsio->kern_rel_offset = 0; 6560 if (total_len < alloc_len) { 6561 ctsio->residual = alloc_len - total_len; 6562 ctsio->kern_data_len = total_len; 6563 ctsio->kern_total_len = total_len; 6564 } else { 6565 ctsio->residual = 0; 6566 ctsio->kern_data_len = alloc_len; 6567 ctsio->kern_total_len = alloc_len; 6568 } 6569 6570 switch (ctsio->cdb[0]) { 6571 case MODE_SENSE_6: { 6572 struct scsi_mode_hdr_6 *header; 6573 6574 header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr; 6575 6576 header->datalen = MIN(total_len - 1, 254); 6577 if (control_dev == 0) { 6578 header->dev_specific = 0x10; /* DPOFUA */ 6579 if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || 6580 (lun->mode_pages.control_page[CTL_PAGE_CURRENT] 6581 .eca_and_aen & SCP_SWP) != 0) 6582 header->dev_specific |= 0x80; /* WP */ 6583 } 6584 if (dbd) 6585 header->block_descr_len = 0; 6586 else 6587 header->block_descr_len = 6588 sizeof(struct scsi_mode_block_descr); 6589 block_desc = (struct scsi_mode_block_descr *)&header[1]; 6590 break; 6591 } 6592 case MODE_SENSE_10: { 6593 struct scsi_mode_hdr_10 *header; 6594 int datalen; 6595 6596 header = (struct scsi_mode_hdr_10 *)ctsio->kern_data_ptr; 6597 6598 datalen = MIN(total_len - 2, 65533); 6599 scsi_ulto2b(datalen, header->datalen); 6600 if (control_dev == 0) { 6601 header->dev_specific = 0x10; /* DPOFUA */ 6602 if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || 6603 (lun->mode_pages.control_page[CTL_PAGE_CURRENT] 6604 .eca_and_aen & SCP_SWP) != 0) 6605 header->dev_specific |= 0x80; /* WP */ 6606 } 6607 if (dbd) 6608 scsi_ulto2b(0, header->block_descr_len); 6609 else 6610 scsi_ulto2b(sizeof(struct scsi_mode_block_descr), 6611 header->block_descr_len); 6612 block_desc = (struct scsi_mode_block_descr *)&header[1]; 6613 break; 6614 } 6615 default: 6616 panic("invalid CDB type %#x", ctsio->cdb[0]); 6617 break; /* NOTREACHED */ 6618 } 6619 6620 /* 6621 * If we've got a disk, use its blocksize in the block 6622 * descriptor. Otherwise, just set it to 0. 6623 */ 6624 if (dbd == 0) { 6625 if (control_dev == 0) 6626 scsi_ulto3b(lun->be_lun->blocksize, 6627 block_desc->block_len); 6628 else 6629 scsi_ulto3b(0, block_desc->block_len); 6630 } 6631 6632 switch (page_code) { 6633 case SMS_ALL_PAGES_PAGE: { 6634 int i, data_used; 6635 6636 data_used = header_len; 6637 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6638 struct ctl_page_index *page_index; 6639 6640 page_index = &lun->mode_pages.index[i]; 6641 6642 if ((control_dev != 0) 6643 && (page_index->page_flags & 6644 CTL_PAGE_FLAG_DISK_ONLY)) 6645 continue; 6646 6647 /* 6648 * We don't use this subpage if the user didn't 6649 * request all subpages. We already checked (above) 6650 * to make sure the user only specified a subpage 6651 * of 0 or 0xff in the SMS_ALL_PAGES_PAGE case. 6652 */ 6653 if ((page_index->subpage != 0) 6654 && (subpage == SMS_SUBPAGE_PAGE_0)) 6655 continue; 6656 6657 /* 6658 * Call the handler, if it exists, to update the 6659 * page to the latest values. 6660 */ 6661 if (page_index->sense_handler != NULL) 6662 page_index->sense_handler(ctsio, page_index,pc); 6663 6664 memcpy(ctsio->kern_data_ptr + data_used, 6665 page_index->page_data + 6666 (page_index->page_len * pc), 6667 page_index->page_len); 6668 data_used += page_index->page_len; 6669 } 6670 break; 6671 } 6672 default: { 6673 int i, data_used; 6674 6675 data_used = header_len; 6676 6677 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6678 struct ctl_page_index *page_index; 6679 6680 page_index = &lun->mode_pages.index[i]; 6681 6682 /* Look for the right page code */ 6683 if ((page_index->page_code & SMPH_PC_MASK) != page_code) 6684 continue; 6685 6686 /* Look for the right subpage or the subpage wildcard*/ 6687 if ((page_index->subpage != subpage) 6688 && (subpage != SMS_SUBPAGE_ALL)) 6689 continue; 6690 6691 /* Make sure the page is supported for this dev type */ 6692 if ((control_dev != 0) 6693 && (page_index->page_flags & 6694 CTL_PAGE_FLAG_DISK_ONLY)) 6695 continue; 6696 6697 /* 6698 * Call the handler, if it exists, to update the 6699 * page to the latest values. 6700 */ 6701 if (page_index->sense_handler != NULL) 6702 page_index->sense_handler(ctsio, page_index,pc); 6703 6704 memcpy(ctsio->kern_data_ptr + data_used, 6705 page_index->page_data + 6706 (page_index->page_len * pc), 6707 page_index->page_len); 6708 data_used += page_index->page_len; 6709 } 6710 break; 6711 } 6712 } 6713 6714 ctl_set_success(ctsio); 6715 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6716 ctsio->be_move_done = ctl_config_move_done; 6717 ctl_datamove((union ctl_io *)ctsio); 6718 return (CTL_RETVAL_COMPLETE); 6719 } 6720 6721 int 6722 ctl_lbp_log_sense_handler(struct ctl_scsiio *ctsio, 6723 struct ctl_page_index *page_index, 6724 int pc) 6725 { 6726 struct ctl_lun *lun; 6727 struct scsi_log_param_header *phdr; 6728 uint8_t *data; 6729 uint64_t val; 6730 6731 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6732 data = page_index->page_data; 6733 6734 if (lun->backend->lun_attr != NULL && 6735 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksavail")) 6736 != UINT64_MAX) { 6737 phdr = (struct scsi_log_param_header *)data; 6738 scsi_ulto2b(0x0001, phdr->param_code); 6739 phdr->param_control = SLP_LBIN | SLP_LP; 6740 phdr->param_len = 8; 6741 data = (uint8_t *)(phdr + 1); 6742 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6743 data[4] = 0x02; /* per-pool */ 6744 data += phdr->param_len; 6745 } 6746 6747 if (lun->backend->lun_attr != NULL && 6748 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksused")) 6749 != UINT64_MAX) { 6750 phdr = (struct scsi_log_param_header *)data; 6751 scsi_ulto2b(0x0002, phdr->param_code); 6752 phdr->param_control = SLP_LBIN | SLP_LP; 6753 phdr->param_len = 8; 6754 data = (uint8_t *)(phdr + 1); 6755 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6756 data[4] = 0x01; /* per-LUN */ 6757 data += phdr->param_len; 6758 } 6759 6760 if (lun->backend->lun_attr != NULL && 6761 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksavail")) 6762 != UINT64_MAX) { 6763 phdr = (struct scsi_log_param_header *)data; 6764 scsi_ulto2b(0x00f1, phdr->param_code); 6765 phdr->param_control = SLP_LBIN | SLP_LP; 6766 phdr->param_len = 8; 6767 data = (uint8_t *)(phdr + 1); 6768 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6769 data[4] = 0x02; /* per-pool */ 6770 data += phdr->param_len; 6771 } 6772 6773 if (lun->backend->lun_attr != NULL && 6774 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksused")) 6775 != UINT64_MAX) { 6776 phdr = (struct scsi_log_param_header *)data; 6777 scsi_ulto2b(0x00f2, phdr->param_code); 6778 phdr->param_control = SLP_LBIN | SLP_LP; 6779 phdr->param_len = 8; 6780 data = (uint8_t *)(phdr + 1); 6781 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6782 data[4] = 0x02; /* per-pool */ 6783 data += phdr->param_len; 6784 } 6785 6786 page_index->page_len = data - page_index->page_data; 6787 return (0); 6788 } 6789 6790 int 6791 ctl_sap_log_sense_handler(struct ctl_scsiio *ctsio, 6792 struct ctl_page_index *page_index, 6793 int pc) 6794 { 6795 struct ctl_lun *lun; 6796 struct stat_page *data; 6797 uint64_t rn, wn, rb, wb; 6798 struct bintime rt, wt; 6799 int i; 6800 6801 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6802 data = (struct stat_page *)page_index->page_data; 6803 6804 scsi_ulto2b(SLP_SAP, data->sap.hdr.param_code); 6805 data->sap.hdr.param_control = SLP_LBIN; 6806 data->sap.hdr.param_len = sizeof(struct scsi_log_stat_and_perf) - 6807 sizeof(struct scsi_log_param_header); 6808 rn = wn = rb = wb = 0; 6809 bintime_clear(&rt); 6810 bintime_clear(&wt); 6811 for (i = 0; i < CTL_MAX_PORTS; i++) { 6812 rn += lun->stats.ports[i].operations[CTL_STATS_READ]; 6813 wn += lun->stats.ports[i].operations[CTL_STATS_WRITE]; 6814 rb += lun->stats.ports[i].bytes[CTL_STATS_READ]; 6815 wb += lun->stats.ports[i].bytes[CTL_STATS_WRITE]; 6816 bintime_add(&rt, &lun->stats.ports[i].time[CTL_STATS_READ]); 6817 bintime_add(&wt, &lun->stats.ports[i].time[CTL_STATS_WRITE]); 6818 } 6819 scsi_u64to8b(rn, data->sap.read_num); 6820 scsi_u64to8b(wn, data->sap.write_num); 6821 if (lun->stats.blocksize > 0) { 6822 scsi_u64to8b(wb / lun->stats.blocksize, 6823 data->sap.recvieved_lba); 6824 scsi_u64to8b(rb / lun->stats.blocksize, 6825 data->sap.transmitted_lba); 6826 } 6827 scsi_u64to8b((uint64_t)rt.sec * 1000 + rt.frac / (UINT64_MAX / 1000), 6828 data->sap.read_int); 6829 scsi_u64to8b((uint64_t)wt.sec * 1000 + wt.frac / (UINT64_MAX / 1000), 6830 data->sap.write_int); 6831 scsi_u64to8b(0, data->sap.weighted_num); 6832 scsi_u64to8b(0, data->sap.weighted_int); 6833 scsi_ulto2b(SLP_IT, data->it.hdr.param_code); 6834 data->it.hdr.param_control = SLP_LBIN; 6835 data->it.hdr.param_len = sizeof(struct scsi_log_idle_time) - 6836 sizeof(struct scsi_log_param_header); 6837 #ifdef CTL_TIME_IO 6838 scsi_u64to8b(lun->idle_time / SBT_1MS, data->it.idle_int); 6839 #endif 6840 scsi_ulto2b(SLP_TI, data->ti.hdr.param_code); 6841 data->it.hdr.param_control = SLP_LBIN; 6842 data->ti.hdr.param_len = sizeof(struct scsi_log_time_interval) - 6843 sizeof(struct scsi_log_param_header); 6844 scsi_ulto4b(3, data->ti.exponent); 6845 scsi_ulto4b(1, data->ti.integer); 6846 6847 page_index->page_len = sizeof(*data); 6848 return (0); 6849 } 6850 6851 int 6852 ctl_log_sense(struct ctl_scsiio *ctsio) 6853 { 6854 struct ctl_lun *lun; 6855 int i, pc, page_code, subpage; 6856 int alloc_len, total_len; 6857 struct ctl_page_index *page_index; 6858 struct scsi_log_sense *cdb; 6859 struct scsi_log_header *header; 6860 6861 CTL_DEBUG_PRINT(("ctl_log_sense\n")); 6862 6863 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6864 cdb = (struct scsi_log_sense *)ctsio->cdb; 6865 pc = (cdb->page & SLS_PAGE_CTRL_MASK) >> 6; 6866 page_code = cdb->page & SLS_PAGE_CODE; 6867 subpage = cdb->subpage; 6868 alloc_len = scsi_2btoul(cdb->length); 6869 6870 page_index = NULL; 6871 for (i = 0; i < CTL_NUM_LOG_PAGES; i++) { 6872 page_index = &lun->log_pages.index[i]; 6873 6874 /* Look for the right page code */ 6875 if ((page_index->page_code & SL_PAGE_CODE) != page_code) 6876 continue; 6877 6878 /* Look for the right subpage or the subpage wildcard*/ 6879 if (page_index->subpage != subpage) 6880 continue; 6881 6882 break; 6883 } 6884 if (i >= CTL_NUM_LOG_PAGES) { 6885 ctl_set_invalid_field(ctsio, 6886 /*sks_valid*/ 1, 6887 /*command*/ 1, 6888 /*field*/ 2, 6889 /*bit_valid*/ 0, 6890 /*bit*/ 0); 6891 ctl_done((union ctl_io *)ctsio); 6892 return (CTL_RETVAL_COMPLETE); 6893 } 6894 6895 total_len = sizeof(struct scsi_log_header) + page_index->page_len; 6896 6897 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6898 ctsio->kern_sg_entries = 0; 6899 ctsio->kern_data_resid = 0; 6900 ctsio->kern_rel_offset = 0; 6901 if (total_len < alloc_len) { 6902 ctsio->residual = alloc_len - total_len; 6903 ctsio->kern_data_len = total_len; 6904 ctsio->kern_total_len = total_len; 6905 } else { 6906 ctsio->residual = 0; 6907 ctsio->kern_data_len = alloc_len; 6908 ctsio->kern_total_len = alloc_len; 6909 } 6910 6911 header = (struct scsi_log_header *)ctsio->kern_data_ptr; 6912 header->page = page_index->page_code; 6913 if (page_index->subpage) { 6914 header->page |= SL_SPF; 6915 header->subpage = page_index->subpage; 6916 } 6917 scsi_ulto2b(page_index->page_len, header->datalen); 6918 6919 /* 6920 * Call the handler, if it exists, to update the 6921 * page to the latest values. 6922 */ 6923 if (page_index->sense_handler != NULL) 6924 page_index->sense_handler(ctsio, page_index, pc); 6925 6926 memcpy(header + 1, page_index->page_data, page_index->page_len); 6927 6928 ctl_set_success(ctsio); 6929 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6930 ctsio->be_move_done = ctl_config_move_done; 6931 ctl_datamove((union ctl_io *)ctsio); 6932 return (CTL_RETVAL_COMPLETE); 6933 } 6934 6935 int 6936 ctl_read_capacity(struct ctl_scsiio *ctsio) 6937 { 6938 struct scsi_read_capacity *cdb; 6939 struct scsi_read_capacity_data *data; 6940 struct ctl_lun *lun; 6941 uint32_t lba; 6942 6943 CTL_DEBUG_PRINT(("ctl_read_capacity\n")); 6944 6945 cdb = (struct scsi_read_capacity *)ctsio->cdb; 6946 6947 lba = scsi_4btoul(cdb->addr); 6948 if (((cdb->pmi & SRC_PMI) == 0) 6949 && (lba != 0)) { 6950 ctl_set_invalid_field(/*ctsio*/ ctsio, 6951 /*sks_valid*/ 1, 6952 /*command*/ 1, 6953 /*field*/ 2, 6954 /*bit_valid*/ 0, 6955 /*bit*/ 0); 6956 ctl_done((union ctl_io *)ctsio); 6957 return (CTL_RETVAL_COMPLETE); 6958 } 6959 6960 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6961 6962 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 6963 data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr; 6964 ctsio->residual = 0; 6965 ctsio->kern_data_len = sizeof(*data); 6966 ctsio->kern_total_len = sizeof(*data); 6967 ctsio->kern_data_resid = 0; 6968 ctsio->kern_rel_offset = 0; 6969 ctsio->kern_sg_entries = 0; 6970 6971 /* 6972 * If the maximum LBA is greater than 0xfffffffe, the user must 6973 * issue a SERVICE ACTION IN (16) command, with the read capacity 6974 * serivce action set. 6975 */ 6976 if (lun->be_lun->maxlba > 0xfffffffe) 6977 scsi_ulto4b(0xffffffff, data->addr); 6978 else 6979 scsi_ulto4b(lun->be_lun->maxlba, data->addr); 6980 6981 /* 6982 * XXX KDM this may not be 512 bytes... 6983 */ 6984 scsi_ulto4b(lun->be_lun->blocksize, data->length); 6985 6986 ctl_set_success(ctsio); 6987 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6988 ctsio->be_move_done = ctl_config_move_done; 6989 ctl_datamove((union ctl_io *)ctsio); 6990 return (CTL_RETVAL_COMPLETE); 6991 } 6992 6993 int 6994 ctl_read_capacity_16(struct ctl_scsiio *ctsio) 6995 { 6996 struct scsi_read_capacity_16 *cdb; 6997 struct scsi_read_capacity_data_long *data; 6998 struct ctl_lun *lun; 6999 uint64_t lba; 7000 uint32_t alloc_len; 7001 7002 CTL_DEBUG_PRINT(("ctl_read_capacity_16\n")); 7003 7004 cdb = (struct scsi_read_capacity_16 *)ctsio->cdb; 7005 7006 alloc_len = scsi_4btoul(cdb->alloc_len); 7007 lba = scsi_8btou64(cdb->addr); 7008 7009 if ((cdb->reladr & SRC16_PMI) 7010 && (lba != 0)) { 7011 ctl_set_invalid_field(/*ctsio*/ ctsio, 7012 /*sks_valid*/ 1, 7013 /*command*/ 1, 7014 /*field*/ 2, 7015 /*bit_valid*/ 0, 7016 /*bit*/ 0); 7017 ctl_done((union ctl_io *)ctsio); 7018 return (CTL_RETVAL_COMPLETE); 7019 } 7020 7021 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7022 7023 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 7024 data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr; 7025 7026 if (sizeof(*data) < alloc_len) { 7027 ctsio->residual = alloc_len - sizeof(*data); 7028 ctsio->kern_data_len = sizeof(*data); 7029 ctsio->kern_total_len = sizeof(*data); 7030 } else { 7031 ctsio->residual = 0; 7032 ctsio->kern_data_len = alloc_len; 7033 ctsio->kern_total_len = alloc_len; 7034 } 7035 ctsio->kern_data_resid = 0; 7036 ctsio->kern_rel_offset = 0; 7037 ctsio->kern_sg_entries = 0; 7038 7039 scsi_u64to8b(lun->be_lun->maxlba, data->addr); 7040 /* XXX KDM this may not be 512 bytes... */ 7041 scsi_ulto4b(lun->be_lun->blocksize, data->length); 7042 data->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE; 7043 scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, data->lalba_lbp); 7044 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) 7045 data->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ; 7046 7047 ctl_set_success(ctsio); 7048 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7049 ctsio->be_move_done = ctl_config_move_done; 7050 ctl_datamove((union ctl_io *)ctsio); 7051 return (CTL_RETVAL_COMPLETE); 7052 } 7053 7054 int 7055 ctl_get_lba_status(struct ctl_scsiio *ctsio) 7056 { 7057 struct scsi_get_lba_status *cdb; 7058 struct scsi_get_lba_status_data *data; 7059 struct ctl_lun *lun; 7060 struct ctl_lba_len_flags *lbalen; 7061 uint64_t lba; 7062 uint32_t alloc_len, total_len; 7063 int retval; 7064 7065 CTL_DEBUG_PRINT(("ctl_get_lba_status\n")); 7066 7067 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7068 cdb = (struct scsi_get_lba_status *)ctsio->cdb; 7069 lba = scsi_8btou64(cdb->addr); 7070 alloc_len = scsi_4btoul(cdb->alloc_len); 7071 7072 if (lba > lun->be_lun->maxlba) { 7073 ctl_set_lba_out_of_range(ctsio); 7074 ctl_done((union ctl_io *)ctsio); 7075 return (CTL_RETVAL_COMPLETE); 7076 } 7077 7078 total_len = sizeof(*data) + sizeof(data->descr[0]); 7079 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7080 data = (struct scsi_get_lba_status_data *)ctsio->kern_data_ptr; 7081 7082 if (total_len < alloc_len) { 7083 ctsio->residual = alloc_len - total_len; 7084 ctsio->kern_data_len = total_len; 7085 ctsio->kern_total_len = total_len; 7086 } else { 7087 ctsio->residual = 0; 7088 ctsio->kern_data_len = alloc_len; 7089 ctsio->kern_total_len = alloc_len; 7090 } 7091 ctsio->kern_data_resid = 0; 7092 ctsio->kern_rel_offset = 0; 7093 ctsio->kern_sg_entries = 0; 7094 7095 /* Fill dummy data in case backend can't tell anything. */ 7096 scsi_ulto4b(4 + sizeof(data->descr[0]), data->length); 7097 scsi_u64to8b(lba, data->descr[0].addr); 7098 scsi_ulto4b(MIN(UINT32_MAX, lun->be_lun->maxlba + 1 - lba), 7099 data->descr[0].length); 7100 data->descr[0].status = 0; /* Mapped or unknown. */ 7101 7102 ctl_set_success(ctsio); 7103 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7104 ctsio->be_move_done = ctl_config_move_done; 7105 7106 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 7107 lbalen->lba = lba; 7108 lbalen->len = total_len; 7109 lbalen->flags = 0; 7110 retval = lun->backend->config_read((union ctl_io *)ctsio); 7111 return (CTL_RETVAL_COMPLETE); 7112 } 7113 7114 int 7115 ctl_read_defect(struct ctl_scsiio *ctsio) 7116 { 7117 struct scsi_read_defect_data_10 *ccb10; 7118 struct scsi_read_defect_data_12 *ccb12; 7119 struct scsi_read_defect_data_hdr_10 *data10; 7120 struct scsi_read_defect_data_hdr_12 *data12; 7121 uint32_t alloc_len, data_len; 7122 uint8_t format; 7123 7124 CTL_DEBUG_PRINT(("ctl_read_defect\n")); 7125 7126 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 7127 ccb10 = (struct scsi_read_defect_data_10 *)&ctsio->cdb; 7128 format = ccb10->format; 7129 alloc_len = scsi_2btoul(ccb10->alloc_length); 7130 data_len = sizeof(*data10); 7131 } else { 7132 ccb12 = (struct scsi_read_defect_data_12 *)&ctsio->cdb; 7133 format = ccb12->format; 7134 alloc_len = scsi_4btoul(ccb12->alloc_length); 7135 data_len = sizeof(*data12); 7136 } 7137 if (alloc_len == 0) { 7138 ctl_set_success(ctsio); 7139 ctl_done((union ctl_io *)ctsio); 7140 return (CTL_RETVAL_COMPLETE); 7141 } 7142 7143 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 7144 if (data_len < alloc_len) { 7145 ctsio->residual = alloc_len - data_len; 7146 ctsio->kern_data_len = data_len; 7147 ctsio->kern_total_len = data_len; 7148 } else { 7149 ctsio->residual = 0; 7150 ctsio->kern_data_len = alloc_len; 7151 ctsio->kern_total_len = alloc_len; 7152 } 7153 ctsio->kern_data_resid = 0; 7154 ctsio->kern_rel_offset = 0; 7155 ctsio->kern_sg_entries = 0; 7156 7157 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 7158 data10 = (struct scsi_read_defect_data_hdr_10 *) 7159 ctsio->kern_data_ptr; 7160 data10->format = format; 7161 scsi_ulto2b(0, data10->length); 7162 } else { 7163 data12 = (struct scsi_read_defect_data_hdr_12 *) 7164 ctsio->kern_data_ptr; 7165 data12->format = format; 7166 scsi_ulto2b(0, data12->generation); 7167 scsi_ulto4b(0, data12->length); 7168 } 7169 7170 ctl_set_success(ctsio); 7171 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7172 ctsio->be_move_done = ctl_config_move_done; 7173 ctl_datamove((union ctl_io *)ctsio); 7174 return (CTL_RETVAL_COMPLETE); 7175 } 7176 7177 int 7178 ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio) 7179 { 7180 struct scsi_maintenance_in *cdb; 7181 int retval; 7182 int alloc_len, ext, total_len = 0, g, pc, pg, gs, os; 7183 int num_target_port_groups, num_target_ports; 7184 struct ctl_lun *lun; 7185 struct ctl_softc *softc; 7186 struct ctl_port *port; 7187 struct scsi_target_group_data *rtg_ptr; 7188 struct scsi_target_group_data_extended *rtg_ext_ptr; 7189 struct scsi_target_port_group_descriptor *tpg_desc; 7190 7191 CTL_DEBUG_PRINT(("ctl_report_tagret_port_groups\n")); 7192 7193 cdb = (struct scsi_maintenance_in *)ctsio->cdb; 7194 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7195 softc = lun->ctl_softc; 7196 7197 retval = CTL_RETVAL_COMPLETE; 7198 7199 switch (cdb->byte2 & STG_PDF_MASK) { 7200 case STG_PDF_LENGTH: 7201 ext = 0; 7202 break; 7203 case STG_PDF_EXTENDED: 7204 ext = 1; 7205 break; 7206 default: 7207 ctl_set_invalid_field(/*ctsio*/ ctsio, 7208 /*sks_valid*/ 1, 7209 /*command*/ 1, 7210 /*field*/ 2, 7211 /*bit_valid*/ 1, 7212 /*bit*/ 5); 7213 ctl_done((union ctl_io *)ctsio); 7214 return(retval); 7215 } 7216 7217 if (softc->is_single) 7218 num_target_port_groups = 1; 7219 else 7220 num_target_port_groups = NUM_TARGET_PORT_GROUPS; 7221 num_target_ports = 0; 7222 mtx_lock(&softc->ctl_lock); 7223 STAILQ_FOREACH(port, &softc->port_list, links) { 7224 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7225 continue; 7226 if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 7227 continue; 7228 num_target_ports++; 7229 } 7230 mtx_unlock(&softc->ctl_lock); 7231 7232 if (ext) 7233 total_len = sizeof(struct scsi_target_group_data_extended); 7234 else 7235 total_len = sizeof(struct scsi_target_group_data); 7236 total_len += sizeof(struct scsi_target_port_group_descriptor) * 7237 num_target_port_groups + 7238 sizeof(struct scsi_target_port_descriptor) * num_target_ports; 7239 7240 alloc_len = scsi_4btoul(cdb->length); 7241 7242 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7243 7244 ctsio->kern_sg_entries = 0; 7245 7246 if (total_len < alloc_len) { 7247 ctsio->residual = alloc_len - total_len; 7248 ctsio->kern_data_len = total_len; 7249 ctsio->kern_total_len = total_len; 7250 } else { 7251 ctsio->residual = 0; 7252 ctsio->kern_data_len = alloc_len; 7253 ctsio->kern_total_len = alloc_len; 7254 } 7255 ctsio->kern_data_resid = 0; 7256 ctsio->kern_rel_offset = 0; 7257 7258 if (ext) { 7259 rtg_ext_ptr = (struct scsi_target_group_data_extended *) 7260 ctsio->kern_data_ptr; 7261 scsi_ulto4b(total_len - 4, rtg_ext_ptr->length); 7262 rtg_ext_ptr->format_type = 0x10; 7263 rtg_ext_ptr->implicit_transition_time = 0; 7264 tpg_desc = &rtg_ext_ptr->groups[0]; 7265 } else { 7266 rtg_ptr = (struct scsi_target_group_data *) 7267 ctsio->kern_data_ptr; 7268 scsi_ulto4b(total_len - 4, rtg_ptr->length); 7269 tpg_desc = &rtg_ptr->groups[0]; 7270 } 7271 7272 mtx_lock(&softc->ctl_lock); 7273 pg = softc->port_min / softc->port_cnt; 7274 if (softc->ha_link == CTL_HA_LINK_OFFLINE) 7275 gs = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE; 7276 else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) 7277 gs = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; 7278 else if (softc->ha_mode == CTL_HA_MODE_ACT_STBY) 7279 gs = TPG_ASYMMETRIC_ACCESS_STANDBY; 7280 else 7281 gs = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 7282 if (lun->flags & CTL_LUN_PRIMARY_SC) { 7283 os = gs; 7284 gs = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7285 } else 7286 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7287 for (g = 0; g < num_target_port_groups; g++) { 7288 tpg_desc->pref_state = (g == pg) ? gs : os; 7289 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP | 7290 TPG_U_SUP | TPG_T_SUP; 7291 scsi_ulto2b(g + 1, tpg_desc->target_port_group); 7292 tpg_desc->status = TPG_IMPLICIT; 7293 pc = 0; 7294 STAILQ_FOREACH(port, &softc->port_list, links) { 7295 if (port->targ_port < g * softc->port_cnt || 7296 port->targ_port >= (g + 1) * softc->port_cnt) 7297 continue; 7298 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7299 continue; 7300 if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 7301 continue; 7302 scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc]. 7303 relative_target_port_identifier); 7304 pc++; 7305 } 7306 tpg_desc->target_port_count = pc; 7307 tpg_desc = (struct scsi_target_port_group_descriptor *) 7308 &tpg_desc->descriptors[pc]; 7309 } 7310 mtx_unlock(&softc->ctl_lock); 7311 7312 ctl_set_success(ctsio); 7313 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7314 ctsio->be_move_done = ctl_config_move_done; 7315 ctl_datamove((union ctl_io *)ctsio); 7316 return(retval); 7317 } 7318 7319 int 7320 ctl_report_supported_opcodes(struct ctl_scsiio *ctsio) 7321 { 7322 struct ctl_lun *lun; 7323 struct scsi_report_supported_opcodes *cdb; 7324 const struct ctl_cmd_entry *entry, *sentry; 7325 struct scsi_report_supported_opcodes_all *all; 7326 struct scsi_report_supported_opcodes_descr *descr; 7327 struct scsi_report_supported_opcodes_one *one; 7328 int retval; 7329 int alloc_len, total_len; 7330 int opcode, service_action, i, j, num; 7331 7332 CTL_DEBUG_PRINT(("ctl_report_supported_opcodes\n")); 7333 7334 cdb = (struct scsi_report_supported_opcodes *)ctsio->cdb; 7335 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7336 7337 retval = CTL_RETVAL_COMPLETE; 7338 7339 opcode = cdb->requested_opcode; 7340 service_action = scsi_2btoul(cdb->requested_service_action); 7341 switch (cdb->options & RSO_OPTIONS_MASK) { 7342 case RSO_OPTIONS_ALL: 7343 num = 0; 7344 for (i = 0; i < 256; i++) { 7345 entry = &ctl_cmd_table[i]; 7346 if (entry->flags & CTL_CMD_FLAG_SA5) { 7347 for (j = 0; j < 32; j++) { 7348 sentry = &((const struct ctl_cmd_entry *) 7349 entry->execute)[j]; 7350 if (ctl_cmd_applicable( 7351 lun->be_lun->lun_type, sentry)) 7352 num++; 7353 } 7354 } else { 7355 if (ctl_cmd_applicable(lun->be_lun->lun_type, 7356 entry)) 7357 num++; 7358 } 7359 } 7360 total_len = sizeof(struct scsi_report_supported_opcodes_all) + 7361 num * sizeof(struct scsi_report_supported_opcodes_descr); 7362 break; 7363 case RSO_OPTIONS_OC: 7364 if (ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) { 7365 ctl_set_invalid_field(/*ctsio*/ ctsio, 7366 /*sks_valid*/ 1, 7367 /*command*/ 1, 7368 /*field*/ 2, 7369 /*bit_valid*/ 1, 7370 /*bit*/ 2); 7371 ctl_done((union ctl_io *)ctsio); 7372 return (CTL_RETVAL_COMPLETE); 7373 } 7374 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7375 break; 7376 case RSO_OPTIONS_OC_SA: 7377 if ((ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) == 0 || 7378 service_action >= 32) { 7379 ctl_set_invalid_field(/*ctsio*/ ctsio, 7380 /*sks_valid*/ 1, 7381 /*command*/ 1, 7382 /*field*/ 2, 7383 /*bit_valid*/ 1, 7384 /*bit*/ 2); 7385 ctl_done((union ctl_io *)ctsio); 7386 return (CTL_RETVAL_COMPLETE); 7387 } 7388 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7389 break; 7390 default: 7391 ctl_set_invalid_field(/*ctsio*/ ctsio, 7392 /*sks_valid*/ 1, 7393 /*command*/ 1, 7394 /*field*/ 2, 7395 /*bit_valid*/ 1, 7396 /*bit*/ 2); 7397 ctl_done((union ctl_io *)ctsio); 7398 return (CTL_RETVAL_COMPLETE); 7399 } 7400 7401 alloc_len = scsi_4btoul(cdb->length); 7402 7403 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7404 7405 ctsio->kern_sg_entries = 0; 7406 7407 if (total_len < alloc_len) { 7408 ctsio->residual = alloc_len - total_len; 7409 ctsio->kern_data_len = total_len; 7410 ctsio->kern_total_len = total_len; 7411 } else { 7412 ctsio->residual = 0; 7413 ctsio->kern_data_len = alloc_len; 7414 ctsio->kern_total_len = alloc_len; 7415 } 7416 ctsio->kern_data_resid = 0; 7417 ctsio->kern_rel_offset = 0; 7418 7419 switch (cdb->options & RSO_OPTIONS_MASK) { 7420 case RSO_OPTIONS_ALL: 7421 all = (struct scsi_report_supported_opcodes_all *) 7422 ctsio->kern_data_ptr; 7423 num = 0; 7424 for (i = 0; i < 256; i++) { 7425 entry = &ctl_cmd_table[i]; 7426 if (entry->flags & CTL_CMD_FLAG_SA5) { 7427 for (j = 0; j < 32; j++) { 7428 sentry = &((const struct ctl_cmd_entry *) 7429 entry->execute)[j]; 7430 if (!ctl_cmd_applicable( 7431 lun->be_lun->lun_type, sentry)) 7432 continue; 7433 descr = &all->descr[num++]; 7434 descr->opcode = i; 7435 scsi_ulto2b(j, descr->service_action); 7436 descr->flags = RSO_SERVACTV; 7437 scsi_ulto2b(sentry->length, 7438 descr->cdb_length); 7439 } 7440 } else { 7441 if (!ctl_cmd_applicable(lun->be_lun->lun_type, 7442 entry)) 7443 continue; 7444 descr = &all->descr[num++]; 7445 descr->opcode = i; 7446 scsi_ulto2b(0, descr->service_action); 7447 descr->flags = 0; 7448 scsi_ulto2b(entry->length, descr->cdb_length); 7449 } 7450 } 7451 scsi_ulto4b( 7452 num * sizeof(struct scsi_report_supported_opcodes_descr), 7453 all->length); 7454 break; 7455 case RSO_OPTIONS_OC: 7456 one = (struct scsi_report_supported_opcodes_one *) 7457 ctsio->kern_data_ptr; 7458 entry = &ctl_cmd_table[opcode]; 7459 goto fill_one; 7460 case RSO_OPTIONS_OC_SA: 7461 one = (struct scsi_report_supported_opcodes_one *) 7462 ctsio->kern_data_ptr; 7463 entry = &ctl_cmd_table[opcode]; 7464 entry = &((const struct ctl_cmd_entry *) 7465 entry->execute)[service_action]; 7466 fill_one: 7467 if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 7468 one->support = 3; 7469 scsi_ulto2b(entry->length, one->cdb_length); 7470 one->cdb_usage[0] = opcode; 7471 memcpy(&one->cdb_usage[1], entry->usage, 7472 entry->length - 1); 7473 } else 7474 one->support = 1; 7475 break; 7476 } 7477 7478 ctl_set_success(ctsio); 7479 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7480 ctsio->be_move_done = ctl_config_move_done; 7481 ctl_datamove((union ctl_io *)ctsio); 7482 return(retval); 7483 } 7484 7485 int 7486 ctl_report_supported_tmf(struct ctl_scsiio *ctsio) 7487 { 7488 struct scsi_report_supported_tmf *cdb; 7489 struct scsi_report_supported_tmf_data *data; 7490 int retval; 7491 int alloc_len, total_len; 7492 7493 CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n")); 7494 7495 cdb = (struct scsi_report_supported_tmf *)ctsio->cdb; 7496 7497 retval = CTL_RETVAL_COMPLETE; 7498 7499 total_len = sizeof(struct scsi_report_supported_tmf_data); 7500 alloc_len = scsi_4btoul(cdb->length); 7501 7502 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7503 7504 ctsio->kern_sg_entries = 0; 7505 7506 if (total_len < alloc_len) { 7507 ctsio->residual = alloc_len - total_len; 7508 ctsio->kern_data_len = total_len; 7509 ctsio->kern_total_len = total_len; 7510 } else { 7511 ctsio->residual = 0; 7512 ctsio->kern_data_len = alloc_len; 7513 ctsio->kern_total_len = alloc_len; 7514 } 7515 ctsio->kern_data_resid = 0; 7516 ctsio->kern_rel_offset = 0; 7517 7518 data = (struct scsi_report_supported_tmf_data *)ctsio->kern_data_ptr; 7519 data->byte1 |= RST_ATS | RST_ATSS | RST_CTSS | RST_LURS | RST_QTS | 7520 RST_TRS; 7521 data->byte2 |= RST_QAES | RST_QTSS | RST_ITNRS; 7522 7523 ctl_set_success(ctsio); 7524 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7525 ctsio->be_move_done = ctl_config_move_done; 7526 ctl_datamove((union ctl_io *)ctsio); 7527 return (retval); 7528 } 7529 7530 int 7531 ctl_report_timestamp(struct ctl_scsiio *ctsio) 7532 { 7533 struct scsi_report_timestamp *cdb; 7534 struct scsi_report_timestamp_data *data; 7535 struct timeval tv; 7536 int64_t timestamp; 7537 int retval; 7538 int alloc_len, total_len; 7539 7540 CTL_DEBUG_PRINT(("ctl_report_timestamp\n")); 7541 7542 cdb = (struct scsi_report_timestamp *)ctsio->cdb; 7543 7544 retval = CTL_RETVAL_COMPLETE; 7545 7546 total_len = sizeof(struct scsi_report_timestamp_data); 7547 alloc_len = scsi_4btoul(cdb->length); 7548 7549 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7550 7551 ctsio->kern_sg_entries = 0; 7552 7553 if (total_len < alloc_len) { 7554 ctsio->residual = alloc_len - total_len; 7555 ctsio->kern_data_len = total_len; 7556 ctsio->kern_total_len = total_len; 7557 } else { 7558 ctsio->residual = 0; 7559 ctsio->kern_data_len = alloc_len; 7560 ctsio->kern_total_len = alloc_len; 7561 } 7562 ctsio->kern_data_resid = 0; 7563 ctsio->kern_rel_offset = 0; 7564 7565 data = (struct scsi_report_timestamp_data *)ctsio->kern_data_ptr; 7566 scsi_ulto2b(sizeof(*data) - 2, data->length); 7567 data->origin = RTS_ORIG_OUTSIDE; 7568 getmicrotime(&tv); 7569 timestamp = (int64_t)tv.tv_sec * 1000 + tv.tv_usec / 1000; 7570 scsi_ulto4b(timestamp >> 16, data->timestamp); 7571 scsi_ulto2b(timestamp & 0xffff, &data->timestamp[4]); 7572 7573 ctl_set_success(ctsio); 7574 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7575 ctsio->be_move_done = ctl_config_move_done; 7576 ctl_datamove((union ctl_io *)ctsio); 7577 return (retval); 7578 } 7579 7580 int 7581 ctl_persistent_reserve_in(struct ctl_scsiio *ctsio) 7582 { 7583 struct scsi_per_res_in *cdb; 7584 int alloc_len, total_len = 0; 7585 /* struct scsi_per_res_in_rsrv in_data; */ 7586 struct ctl_lun *lun; 7587 struct ctl_softc *softc; 7588 uint64_t key; 7589 7590 CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n")); 7591 7592 cdb = (struct scsi_per_res_in *)ctsio->cdb; 7593 7594 alloc_len = scsi_2btoul(cdb->length); 7595 7596 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7597 softc = lun->ctl_softc; 7598 7599 retry: 7600 mtx_lock(&lun->lun_lock); 7601 switch (cdb->action) { 7602 case SPRI_RK: /* read keys */ 7603 total_len = sizeof(struct scsi_per_res_in_keys) + 7604 lun->pr_key_count * 7605 sizeof(struct scsi_per_res_key); 7606 break; 7607 case SPRI_RR: /* read reservation */ 7608 if (lun->flags & CTL_LUN_PR_RESERVED) 7609 total_len = sizeof(struct scsi_per_res_in_rsrv); 7610 else 7611 total_len = sizeof(struct scsi_per_res_in_header); 7612 break; 7613 case SPRI_RC: /* report capabilities */ 7614 total_len = sizeof(struct scsi_per_res_cap); 7615 break; 7616 case SPRI_RS: /* read full status */ 7617 total_len = sizeof(struct scsi_per_res_in_header) + 7618 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7619 lun->pr_key_count; 7620 break; 7621 default: 7622 panic("Invalid PR type %x", cdb->action); 7623 } 7624 mtx_unlock(&lun->lun_lock); 7625 7626 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7627 7628 if (total_len < alloc_len) { 7629 ctsio->residual = alloc_len - total_len; 7630 ctsio->kern_data_len = total_len; 7631 ctsio->kern_total_len = total_len; 7632 } else { 7633 ctsio->residual = 0; 7634 ctsio->kern_data_len = alloc_len; 7635 ctsio->kern_total_len = alloc_len; 7636 } 7637 7638 ctsio->kern_data_resid = 0; 7639 ctsio->kern_rel_offset = 0; 7640 ctsio->kern_sg_entries = 0; 7641 7642 mtx_lock(&lun->lun_lock); 7643 switch (cdb->action) { 7644 case SPRI_RK: { // read keys 7645 struct scsi_per_res_in_keys *res_keys; 7646 int i, key_count; 7647 7648 res_keys = (struct scsi_per_res_in_keys*)ctsio->kern_data_ptr; 7649 7650 /* 7651 * We had to drop the lock to allocate our buffer, which 7652 * leaves time for someone to come in with another 7653 * persistent reservation. (That is unlikely, though, 7654 * since this should be the only persistent reservation 7655 * command active right now.) 7656 */ 7657 if (total_len != (sizeof(struct scsi_per_res_in_keys) + 7658 (lun->pr_key_count * 7659 sizeof(struct scsi_per_res_key)))){ 7660 mtx_unlock(&lun->lun_lock); 7661 free(ctsio->kern_data_ptr, M_CTL); 7662 printf("%s: reservation length changed, retrying\n", 7663 __func__); 7664 goto retry; 7665 } 7666 7667 scsi_ulto4b(lun->PRGeneration, res_keys->header.generation); 7668 7669 scsi_ulto4b(sizeof(struct scsi_per_res_key) * 7670 lun->pr_key_count, res_keys->header.length); 7671 7672 for (i = 0, key_count = 0; i < CTL_MAX_INITIATORS; i++) { 7673 if ((key = ctl_get_prkey(lun, i)) == 0) 7674 continue; 7675 7676 /* 7677 * We used lun->pr_key_count to calculate the 7678 * size to allocate. If it turns out the number of 7679 * initiators with the registered flag set is 7680 * larger than that (i.e. they haven't been kept in 7681 * sync), we've got a problem. 7682 */ 7683 if (key_count >= lun->pr_key_count) { 7684 #ifdef NEEDTOPORT 7685 csevent_log(CSC_CTL | CSC_SHELF_SW | 7686 CTL_PR_ERROR, 7687 csevent_LogType_Fault, 7688 csevent_AlertLevel_Yellow, 7689 csevent_FRU_ShelfController, 7690 csevent_FRU_Firmware, 7691 csevent_FRU_Unknown, 7692 "registered keys %d >= key " 7693 "count %d", key_count, 7694 lun->pr_key_count); 7695 #endif 7696 key_count++; 7697 continue; 7698 } 7699 scsi_u64to8b(key, res_keys->keys[key_count].key); 7700 key_count++; 7701 } 7702 break; 7703 } 7704 case SPRI_RR: { // read reservation 7705 struct scsi_per_res_in_rsrv *res; 7706 int tmp_len, header_only; 7707 7708 res = (struct scsi_per_res_in_rsrv *)ctsio->kern_data_ptr; 7709 7710 scsi_ulto4b(lun->PRGeneration, res->header.generation); 7711 7712 if (lun->flags & CTL_LUN_PR_RESERVED) 7713 { 7714 tmp_len = sizeof(struct scsi_per_res_in_rsrv); 7715 scsi_ulto4b(sizeof(struct scsi_per_res_in_rsrv_data), 7716 res->header.length); 7717 header_only = 0; 7718 } else { 7719 tmp_len = sizeof(struct scsi_per_res_in_header); 7720 scsi_ulto4b(0, res->header.length); 7721 header_only = 1; 7722 } 7723 7724 /* 7725 * We had to drop the lock to allocate our buffer, which 7726 * leaves time for someone to come in with another 7727 * persistent reservation. (That is unlikely, though, 7728 * since this should be the only persistent reservation 7729 * command active right now.) 7730 */ 7731 if (tmp_len != total_len) { 7732 mtx_unlock(&lun->lun_lock); 7733 free(ctsio->kern_data_ptr, M_CTL); 7734 printf("%s: reservation status changed, retrying\n", 7735 __func__); 7736 goto retry; 7737 } 7738 7739 /* 7740 * No reservation held, so we're done. 7741 */ 7742 if (header_only != 0) 7743 break; 7744 7745 /* 7746 * If the registration is an All Registrants type, the key 7747 * is 0, since it doesn't really matter. 7748 */ 7749 if (lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 7750 scsi_u64to8b(ctl_get_prkey(lun, lun->pr_res_idx), 7751 res->data.reservation); 7752 } 7753 res->data.scopetype = lun->res_type; 7754 break; 7755 } 7756 case SPRI_RC: //report capabilities 7757 { 7758 struct scsi_per_res_cap *res_cap; 7759 uint16_t type_mask; 7760 7761 res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr; 7762 scsi_ulto2b(sizeof(*res_cap), res_cap->length); 7763 res_cap->flags2 |= SPRI_TMV | SPRI_ALLOW_5; 7764 type_mask = SPRI_TM_WR_EX_AR | 7765 SPRI_TM_EX_AC_RO | 7766 SPRI_TM_WR_EX_RO | 7767 SPRI_TM_EX_AC | 7768 SPRI_TM_WR_EX | 7769 SPRI_TM_EX_AC_AR; 7770 scsi_ulto2b(type_mask, res_cap->type_mask); 7771 break; 7772 } 7773 case SPRI_RS: { // read full status 7774 struct scsi_per_res_in_full *res_status; 7775 struct scsi_per_res_in_full_desc *res_desc; 7776 struct ctl_port *port; 7777 int i, len; 7778 7779 res_status = (struct scsi_per_res_in_full*)ctsio->kern_data_ptr; 7780 7781 /* 7782 * We had to drop the lock to allocate our buffer, which 7783 * leaves time for someone to come in with another 7784 * persistent reservation. (That is unlikely, though, 7785 * since this should be the only persistent reservation 7786 * command active right now.) 7787 */ 7788 if (total_len < (sizeof(struct scsi_per_res_in_header) + 7789 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7790 lun->pr_key_count)){ 7791 mtx_unlock(&lun->lun_lock); 7792 free(ctsio->kern_data_ptr, M_CTL); 7793 printf("%s: reservation length changed, retrying\n", 7794 __func__); 7795 goto retry; 7796 } 7797 7798 scsi_ulto4b(lun->PRGeneration, res_status->header.generation); 7799 7800 res_desc = &res_status->desc[0]; 7801 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 7802 if ((key = ctl_get_prkey(lun, i)) == 0) 7803 continue; 7804 7805 scsi_u64to8b(key, res_desc->res_key.key); 7806 if ((lun->flags & CTL_LUN_PR_RESERVED) && 7807 (lun->pr_res_idx == i || 7808 lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS)) { 7809 res_desc->flags = SPRI_FULL_R_HOLDER; 7810 res_desc->scopetype = lun->res_type; 7811 } 7812 scsi_ulto2b(i / CTL_MAX_INIT_PER_PORT, 7813 res_desc->rel_trgt_port_id); 7814 len = 0; 7815 port = softc->ctl_ports[i / CTL_MAX_INIT_PER_PORT]; 7816 if (port != NULL) 7817 len = ctl_create_iid(port, 7818 i % CTL_MAX_INIT_PER_PORT, 7819 res_desc->transport_id); 7820 scsi_ulto4b(len, res_desc->additional_length); 7821 res_desc = (struct scsi_per_res_in_full_desc *) 7822 &res_desc->transport_id[len]; 7823 } 7824 scsi_ulto4b((uint8_t *)res_desc - (uint8_t *)&res_status->desc[0], 7825 res_status->header.length); 7826 break; 7827 } 7828 default: 7829 /* 7830 * This is a bug, because we just checked for this above, 7831 * and should have returned an error. 7832 */ 7833 panic("Invalid PR type %x", cdb->action); 7834 break; /* NOTREACHED */ 7835 } 7836 mtx_unlock(&lun->lun_lock); 7837 7838 ctl_set_success(ctsio); 7839 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7840 ctsio->be_move_done = ctl_config_move_done; 7841 ctl_datamove((union ctl_io *)ctsio); 7842 return (CTL_RETVAL_COMPLETE); 7843 } 7844 7845 /* 7846 * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if 7847 * it should return. 7848 */ 7849 static int 7850 ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key, 7851 uint64_t sa_res_key, uint8_t type, uint32_t residx, 7852 struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb, 7853 struct scsi_per_res_out_parms* param) 7854 { 7855 union ctl_ha_msg persis_io; 7856 int i; 7857 7858 mtx_lock(&lun->lun_lock); 7859 if (sa_res_key == 0) { 7860 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 7861 /* validate scope and type */ 7862 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7863 SPR_LU_SCOPE) { 7864 mtx_unlock(&lun->lun_lock); 7865 ctl_set_invalid_field(/*ctsio*/ ctsio, 7866 /*sks_valid*/ 1, 7867 /*command*/ 1, 7868 /*field*/ 2, 7869 /*bit_valid*/ 1, 7870 /*bit*/ 4); 7871 ctl_done((union ctl_io *)ctsio); 7872 return (1); 7873 } 7874 7875 if (type>8 || type==2 || type==4 || type==0) { 7876 mtx_unlock(&lun->lun_lock); 7877 ctl_set_invalid_field(/*ctsio*/ ctsio, 7878 /*sks_valid*/ 1, 7879 /*command*/ 1, 7880 /*field*/ 2, 7881 /*bit_valid*/ 1, 7882 /*bit*/ 0); 7883 ctl_done((union ctl_io *)ctsio); 7884 return (1); 7885 } 7886 7887 /* 7888 * Unregister everybody else and build UA for 7889 * them 7890 */ 7891 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 7892 if (i == residx || ctl_get_prkey(lun, i) == 0) 7893 continue; 7894 7895 ctl_clr_prkey(lun, i); 7896 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7897 } 7898 lun->pr_key_count = 1; 7899 lun->res_type = type; 7900 if (lun->res_type != SPR_TYPE_WR_EX_AR 7901 && lun->res_type != SPR_TYPE_EX_AC_AR) 7902 lun->pr_res_idx = residx; 7903 lun->PRGeneration++; 7904 mtx_unlock(&lun->lun_lock); 7905 7906 /* send msg to other side */ 7907 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7908 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7909 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7910 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7911 persis_io.pr.pr_info.res_type = type; 7912 memcpy(persis_io.pr.pr_info.sa_res_key, 7913 param->serv_act_res_key, 7914 sizeof(param->serv_act_res_key)); 7915 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 7916 sizeof(persis_io.pr), M_WAITOK); 7917 } else { 7918 /* not all registrants */ 7919 mtx_unlock(&lun->lun_lock); 7920 free(ctsio->kern_data_ptr, M_CTL); 7921 ctl_set_invalid_field(ctsio, 7922 /*sks_valid*/ 1, 7923 /*command*/ 0, 7924 /*field*/ 8, 7925 /*bit_valid*/ 0, 7926 /*bit*/ 0); 7927 ctl_done((union ctl_io *)ctsio); 7928 return (1); 7929 } 7930 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 7931 || !(lun->flags & CTL_LUN_PR_RESERVED)) { 7932 int found = 0; 7933 7934 if (res_key == sa_res_key) { 7935 /* special case */ 7936 /* 7937 * The spec implies this is not good but doesn't 7938 * say what to do. There are two choices either 7939 * generate a res conflict or check condition 7940 * with illegal field in parameter data. Since 7941 * that is what is done when the sa_res_key is 7942 * zero I'll take that approach since this has 7943 * to do with the sa_res_key. 7944 */ 7945 mtx_unlock(&lun->lun_lock); 7946 free(ctsio->kern_data_ptr, M_CTL); 7947 ctl_set_invalid_field(ctsio, 7948 /*sks_valid*/ 1, 7949 /*command*/ 0, 7950 /*field*/ 8, 7951 /*bit_valid*/ 0, 7952 /*bit*/ 0); 7953 ctl_done((union ctl_io *)ctsio); 7954 return (1); 7955 } 7956 7957 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 7958 if (ctl_get_prkey(lun, i) != sa_res_key) 7959 continue; 7960 7961 found = 1; 7962 ctl_clr_prkey(lun, i); 7963 lun->pr_key_count--; 7964 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7965 } 7966 if (!found) { 7967 mtx_unlock(&lun->lun_lock); 7968 free(ctsio->kern_data_ptr, M_CTL); 7969 ctl_set_reservation_conflict(ctsio); 7970 ctl_done((union ctl_io *)ctsio); 7971 return (CTL_RETVAL_COMPLETE); 7972 } 7973 lun->PRGeneration++; 7974 mtx_unlock(&lun->lun_lock); 7975 7976 /* send msg to other side */ 7977 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7978 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7979 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7980 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7981 persis_io.pr.pr_info.res_type = type; 7982 memcpy(persis_io.pr.pr_info.sa_res_key, 7983 param->serv_act_res_key, 7984 sizeof(param->serv_act_res_key)); 7985 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 7986 sizeof(persis_io.pr), M_WAITOK); 7987 } else { 7988 /* Reserved but not all registrants */ 7989 /* sa_res_key is res holder */ 7990 if (sa_res_key == ctl_get_prkey(lun, lun->pr_res_idx)) { 7991 /* validate scope and type */ 7992 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7993 SPR_LU_SCOPE) { 7994 mtx_unlock(&lun->lun_lock); 7995 ctl_set_invalid_field(/*ctsio*/ ctsio, 7996 /*sks_valid*/ 1, 7997 /*command*/ 1, 7998 /*field*/ 2, 7999 /*bit_valid*/ 1, 8000 /*bit*/ 4); 8001 ctl_done((union ctl_io *)ctsio); 8002 return (1); 8003 } 8004 8005 if (type>8 || type==2 || type==4 || type==0) { 8006 mtx_unlock(&lun->lun_lock); 8007 ctl_set_invalid_field(/*ctsio*/ ctsio, 8008 /*sks_valid*/ 1, 8009 /*command*/ 1, 8010 /*field*/ 2, 8011 /*bit_valid*/ 1, 8012 /*bit*/ 0); 8013 ctl_done((union ctl_io *)ctsio); 8014 return (1); 8015 } 8016 8017 /* 8018 * Do the following: 8019 * if sa_res_key != res_key remove all 8020 * registrants w/sa_res_key and generate UA 8021 * for these registrants(Registrations 8022 * Preempted) if it wasn't an exclusive 8023 * reservation generate UA(Reservations 8024 * Preempted) for all other registered nexuses 8025 * if the type has changed. Establish the new 8026 * reservation and holder. If res_key and 8027 * sa_res_key are the same do the above 8028 * except don't unregister the res holder. 8029 */ 8030 8031 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 8032 if (i == residx || ctl_get_prkey(lun, i) == 0) 8033 continue; 8034 8035 if (sa_res_key == ctl_get_prkey(lun, i)) { 8036 ctl_clr_prkey(lun, i); 8037 lun->pr_key_count--; 8038 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8039 } else if (type != lun->res_type 8040 && (lun->res_type == SPR_TYPE_WR_EX_RO 8041 || lun->res_type ==SPR_TYPE_EX_AC_RO)){ 8042 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8043 } 8044 } 8045 lun->res_type = type; 8046 if (lun->res_type != SPR_TYPE_WR_EX_AR 8047 && lun->res_type != SPR_TYPE_EX_AC_AR) 8048 lun->pr_res_idx = residx; 8049 else 8050 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8051 lun->PRGeneration++; 8052 mtx_unlock(&lun->lun_lock); 8053 8054 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8055 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8056 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8057 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8058 persis_io.pr.pr_info.res_type = type; 8059 memcpy(persis_io.pr.pr_info.sa_res_key, 8060 param->serv_act_res_key, 8061 sizeof(param->serv_act_res_key)); 8062 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8063 sizeof(persis_io.pr), M_WAITOK); 8064 } else { 8065 /* 8066 * sa_res_key is not the res holder just 8067 * remove registrants 8068 */ 8069 int found=0; 8070 8071 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8072 if (sa_res_key != ctl_get_prkey(lun, i)) 8073 continue; 8074 8075 found = 1; 8076 ctl_clr_prkey(lun, i); 8077 lun->pr_key_count--; 8078 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8079 } 8080 8081 if (!found) { 8082 mtx_unlock(&lun->lun_lock); 8083 free(ctsio->kern_data_ptr, M_CTL); 8084 ctl_set_reservation_conflict(ctsio); 8085 ctl_done((union ctl_io *)ctsio); 8086 return (1); 8087 } 8088 lun->PRGeneration++; 8089 mtx_unlock(&lun->lun_lock); 8090 8091 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8092 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8093 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8094 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8095 persis_io.pr.pr_info.res_type = type; 8096 memcpy(persis_io.pr.pr_info.sa_res_key, 8097 param->serv_act_res_key, 8098 sizeof(param->serv_act_res_key)); 8099 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8100 sizeof(persis_io.pr), M_WAITOK); 8101 } 8102 } 8103 return (0); 8104 } 8105 8106 static void 8107 ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg) 8108 { 8109 uint64_t sa_res_key; 8110 int i; 8111 8112 sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key); 8113 8114 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 8115 || lun->pr_res_idx == CTL_PR_NO_RESERVATION 8116 || sa_res_key != ctl_get_prkey(lun, lun->pr_res_idx)) { 8117 if (sa_res_key == 0) { 8118 /* 8119 * Unregister everybody else and build UA for 8120 * them 8121 */ 8122 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 8123 if (i == msg->pr.pr_info.residx || 8124 ctl_get_prkey(lun, i) == 0) 8125 continue; 8126 8127 ctl_clr_prkey(lun, i); 8128 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8129 } 8130 8131 lun->pr_key_count = 1; 8132 lun->res_type = msg->pr.pr_info.res_type; 8133 if (lun->res_type != SPR_TYPE_WR_EX_AR 8134 && lun->res_type != SPR_TYPE_EX_AC_AR) 8135 lun->pr_res_idx = msg->pr.pr_info.residx; 8136 } else { 8137 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8138 if (sa_res_key == ctl_get_prkey(lun, i)) 8139 continue; 8140 8141 ctl_clr_prkey(lun, i); 8142 lun->pr_key_count--; 8143 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8144 } 8145 } 8146 } else { 8147 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8148 if (i == msg->pr.pr_info.residx || 8149 ctl_get_prkey(lun, i) == 0) 8150 continue; 8151 8152 if (sa_res_key == ctl_get_prkey(lun, i)) { 8153 ctl_clr_prkey(lun, i); 8154 lun->pr_key_count--; 8155 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8156 } else if (msg->pr.pr_info.res_type != lun->res_type 8157 && (lun->res_type == SPR_TYPE_WR_EX_RO 8158 || lun->res_type == SPR_TYPE_EX_AC_RO)) { 8159 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8160 } 8161 } 8162 lun->res_type = msg->pr.pr_info.res_type; 8163 if (lun->res_type != SPR_TYPE_WR_EX_AR 8164 && lun->res_type != SPR_TYPE_EX_AC_AR) 8165 lun->pr_res_idx = msg->pr.pr_info.residx; 8166 else 8167 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8168 } 8169 lun->PRGeneration++; 8170 8171 } 8172 8173 8174 int 8175 ctl_persistent_reserve_out(struct ctl_scsiio *ctsio) 8176 { 8177 int retval; 8178 u_int32_t param_len; 8179 struct scsi_per_res_out *cdb; 8180 struct ctl_lun *lun; 8181 struct scsi_per_res_out_parms* param; 8182 struct ctl_softc *softc; 8183 uint32_t residx; 8184 uint64_t res_key, sa_res_key, key; 8185 uint8_t type; 8186 union ctl_ha_msg persis_io; 8187 int i; 8188 8189 CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n")); 8190 8191 retval = CTL_RETVAL_COMPLETE; 8192 8193 cdb = (struct scsi_per_res_out *)ctsio->cdb; 8194 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8195 softc = lun->ctl_softc; 8196 8197 /* 8198 * We only support whole-LUN scope. The scope & type are ignored for 8199 * register, register and ignore existing key and clear. 8200 * We sometimes ignore scope and type on preempts too!! 8201 * Verify reservation type here as well. 8202 */ 8203 type = cdb->scope_type & SPR_TYPE_MASK; 8204 if ((cdb->action == SPRO_RESERVE) 8205 || (cdb->action == SPRO_RELEASE)) { 8206 if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { 8207 ctl_set_invalid_field(/*ctsio*/ ctsio, 8208 /*sks_valid*/ 1, 8209 /*command*/ 1, 8210 /*field*/ 2, 8211 /*bit_valid*/ 1, 8212 /*bit*/ 4); 8213 ctl_done((union ctl_io *)ctsio); 8214 return (CTL_RETVAL_COMPLETE); 8215 } 8216 8217 if (type>8 || type==2 || type==4 || type==0) { 8218 ctl_set_invalid_field(/*ctsio*/ ctsio, 8219 /*sks_valid*/ 1, 8220 /*command*/ 1, 8221 /*field*/ 2, 8222 /*bit_valid*/ 1, 8223 /*bit*/ 0); 8224 ctl_done((union ctl_io *)ctsio); 8225 return (CTL_RETVAL_COMPLETE); 8226 } 8227 } 8228 8229 param_len = scsi_4btoul(cdb->length); 8230 8231 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 8232 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 8233 ctsio->kern_data_len = param_len; 8234 ctsio->kern_total_len = param_len; 8235 ctsio->kern_data_resid = 0; 8236 ctsio->kern_rel_offset = 0; 8237 ctsio->kern_sg_entries = 0; 8238 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 8239 ctsio->be_move_done = ctl_config_move_done; 8240 ctl_datamove((union ctl_io *)ctsio); 8241 8242 return (CTL_RETVAL_COMPLETE); 8243 } 8244 8245 param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr; 8246 8247 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 8248 res_key = scsi_8btou64(param->res_key.key); 8249 sa_res_key = scsi_8btou64(param->serv_act_res_key); 8250 8251 /* 8252 * Validate the reservation key here except for SPRO_REG_IGNO 8253 * This must be done for all other service actions 8254 */ 8255 if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) { 8256 mtx_lock(&lun->lun_lock); 8257 if ((key = ctl_get_prkey(lun, residx)) != 0) { 8258 if (res_key != key) { 8259 /* 8260 * The current key passed in doesn't match 8261 * the one the initiator previously 8262 * registered. 8263 */ 8264 mtx_unlock(&lun->lun_lock); 8265 free(ctsio->kern_data_ptr, M_CTL); 8266 ctl_set_reservation_conflict(ctsio); 8267 ctl_done((union ctl_io *)ctsio); 8268 return (CTL_RETVAL_COMPLETE); 8269 } 8270 } else if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REGISTER) { 8271 /* 8272 * We are not registered 8273 */ 8274 mtx_unlock(&lun->lun_lock); 8275 free(ctsio->kern_data_ptr, M_CTL); 8276 ctl_set_reservation_conflict(ctsio); 8277 ctl_done((union ctl_io *)ctsio); 8278 return (CTL_RETVAL_COMPLETE); 8279 } else if (res_key != 0) { 8280 /* 8281 * We are not registered and trying to register but 8282 * the register key isn't zero. 8283 */ 8284 mtx_unlock(&lun->lun_lock); 8285 free(ctsio->kern_data_ptr, M_CTL); 8286 ctl_set_reservation_conflict(ctsio); 8287 ctl_done((union ctl_io *)ctsio); 8288 return (CTL_RETVAL_COMPLETE); 8289 } 8290 mtx_unlock(&lun->lun_lock); 8291 } 8292 8293 switch (cdb->action & SPRO_ACTION_MASK) { 8294 case SPRO_REGISTER: 8295 case SPRO_REG_IGNO: { 8296 8297 #if 0 8298 printf("Registration received\n"); 8299 #endif 8300 8301 /* 8302 * We don't support any of these options, as we report in 8303 * the read capabilities request (see 8304 * ctl_persistent_reserve_in(), above). 8305 */ 8306 if ((param->flags & SPR_SPEC_I_PT) 8307 || (param->flags & SPR_ALL_TG_PT) 8308 || (param->flags & SPR_APTPL)) { 8309 int bit_ptr; 8310 8311 if (param->flags & SPR_APTPL) 8312 bit_ptr = 0; 8313 else if (param->flags & SPR_ALL_TG_PT) 8314 bit_ptr = 2; 8315 else /* SPR_SPEC_I_PT */ 8316 bit_ptr = 3; 8317 8318 free(ctsio->kern_data_ptr, M_CTL); 8319 ctl_set_invalid_field(ctsio, 8320 /*sks_valid*/ 1, 8321 /*command*/ 0, 8322 /*field*/ 20, 8323 /*bit_valid*/ 1, 8324 /*bit*/ bit_ptr); 8325 ctl_done((union ctl_io *)ctsio); 8326 return (CTL_RETVAL_COMPLETE); 8327 } 8328 8329 mtx_lock(&lun->lun_lock); 8330 8331 /* 8332 * The initiator wants to clear the 8333 * key/unregister. 8334 */ 8335 if (sa_res_key == 0) { 8336 if ((res_key == 0 8337 && (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER) 8338 || ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO 8339 && ctl_get_prkey(lun, residx) == 0)) { 8340 mtx_unlock(&lun->lun_lock); 8341 goto done; 8342 } 8343 8344 ctl_clr_prkey(lun, residx); 8345 lun->pr_key_count--; 8346 8347 if (residx == lun->pr_res_idx) { 8348 lun->flags &= ~CTL_LUN_PR_RESERVED; 8349 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8350 8351 if ((lun->res_type == SPR_TYPE_WR_EX_RO 8352 || lun->res_type == SPR_TYPE_EX_AC_RO) 8353 && lun->pr_key_count) { 8354 /* 8355 * If the reservation is a registrants 8356 * only type we need to generate a UA 8357 * for other registered inits. The 8358 * sense code should be RESERVATIONS 8359 * RELEASED 8360 */ 8361 8362 for (i = softc->init_min; i < softc->init_max; i++){ 8363 if (ctl_get_prkey(lun, i) == 0) 8364 continue; 8365 ctl_est_ua(lun, i, 8366 CTL_UA_RES_RELEASE); 8367 } 8368 } 8369 lun->res_type = 0; 8370 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8371 if (lun->pr_key_count==0) { 8372 lun->flags &= ~CTL_LUN_PR_RESERVED; 8373 lun->res_type = 0; 8374 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8375 } 8376 } 8377 lun->PRGeneration++; 8378 mtx_unlock(&lun->lun_lock); 8379 8380 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8381 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8382 persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY; 8383 persis_io.pr.pr_info.residx = residx; 8384 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8385 sizeof(persis_io.pr), M_WAITOK); 8386 } else /* sa_res_key != 0 */ { 8387 8388 /* 8389 * If we aren't registered currently then increment 8390 * the key count and set the registered flag. 8391 */ 8392 ctl_alloc_prkey(lun, residx); 8393 if (ctl_get_prkey(lun, residx) == 0) 8394 lun->pr_key_count++; 8395 ctl_set_prkey(lun, residx, sa_res_key); 8396 lun->PRGeneration++; 8397 mtx_unlock(&lun->lun_lock); 8398 8399 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8400 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8401 persis_io.pr.pr_info.action = CTL_PR_REG_KEY; 8402 persis_io.pr.pr_info.residx = residx; 8403 memcpy(persis_io.pr.pr_info.sa_res_key, 8404 param->serv_act_res_key, 8405 sizeof(param->serv_act_res_key)); 8406 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8407 sizeof(persis_io.pr), M_WAITOK); 8408 } 8409 8410 break; 8411 } 8412 case SPRO_RESERVE: 8413 #if 0 8414 printf("Reserve executed type %d\n", type); 8415 #endif 8416 mtx_lock(&lun->lun_lock); 8417 if (lun->flags & CTL_LUN_PR_RESERVED) { 8418 /* 8419 * if this isn't the reservation holder and it's 8420 * not a "all registrants" type or if the type is 8421 * different then we have a conflict 8422 */ 8423 if ((lun->pr_res_idx != residx 8424 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) 8425 || lun->res_type != type) { 8426 mtx_unlock(&lun->lun_lock); 8427 free(ctsio->kern_data_ptr, M_CTL); 8428 ctl_set_reservation_conflict(ctsio); 8429 ctl_done((union ctl_io *)ctsio); 8430 return (CTL_RETVAL_COMPLETE); 8431 } 8432 mtx_unlock(&lun->lun_lock); 8433 } else /* create a reservation */ { 8434 /* 8435 * If it's not an "all registrants" type record 8436 * reservation holder 8437 */ 8438 if (type != SPR_TYPE_WR_EX_AR 8439 && type != SPR_TYPE_EX_AC_AR) 8440 lun->pr_res_idx = residx; /* Res holder */ 8441 else 8442 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8443 8444 lun->flags |= CTL_LUN_PR_RESERVED; 8445 lun->res_type = type; 8446 8447 mtx_unlock(&lun->lun_lock); 8448 8449 /* send msg to other side */ 8450 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8451 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8452 persis_io.pr.pr_info.action = CTL_PR_RESERVE; 8453 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8454 persis_io.pr.pr_info.res_type = type; 8455 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8456 sizeof(persis_io.pr), M_WAITOK); 8457 } 8458 break; 8459 8460 case SPRO_RELEASE: 8461 mtx_lock(&lun->lun_lock); 8462 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) { 8463 /* No reservation exists return good status */ 8464 mtx_unlock(&lun->lun_lock); 8465 goto done; 8466 } 8467 /* 8468 * Is this nexus a reservation holder? 8469 */ 8470 if (lun->pr_res_idx != residx 8471 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 8472 /* 8473 * not a res holder return good status but 8474 * do nothing 8475 */ 8476 mtx_unlock(&lun->lun_lock); 8477 goto done; 8478 } 8479 8480 if (lun->res_type != type) { 8481 mtx_unlock(&lun->lun_lock); 8482 free(ctsio->kern_data_ptr, M_CTL); 8483 ctl_set_illegal_pr_release(ctsio); 8484 ctl_done((union ctl_io *)ctsio); 8485 return (CTL_RETVAL_COMPLETE); 8486 } 8487 8488 /* okay to release */ 8489 lun->flags &= ~CTL_LUN_PR_RESERVED; 8490 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8491 lun->res_type = 0; 8492 8493 /* 8494 * if this isn't an exclusive access 8495 * res generate UA for all other 8496 * registrants. 8497 */ 8498 if (type != SPR_TYPE_EX_AC 8499 && type != SPR_TYPE_WR_EX) { 8500 for (i = softc->init_min; i < softc->init_max; i++) { 8501 if (i == residx || ctl_get_prkey(lun, i) == 0) 8502 continue; 8503 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8504 } 8505 } 8506 mtx_unlock(&lun->lun_lock); 8507 8508 /* Send msg to other side */ 8509 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8510 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8511 persis_io.pr.pr_info.action = CTL_PR_RELEASE; 8512 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8513 sizeof(persis_io.pr), M_WAITOK); 8514 break; 8515 8516 case SPRO_CLEAR: 8517 /* send msg to other side */ 8518 8519 mtx_lock(&lun->lun_lock); 8520 lun->flags &= ~CTL_LUN_PR_RESERVED; 8521 lun->res_type = 0; 8522 lun->pr_key_count = 0; 8523 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8524 8525 ctl_clr_prkey(lun, residx); 8526 for (i = 0; i < CTL_MAX_INITIATORS; i++) 8527 if (ctl_get_prkey(lun, i) != 0) { 8528 ctl_clr_prkey(lun, i); 8529 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8530 } 8531 lun->PRGeneration++; 8532 mtx_unlock(&lun->lun_lock); 8533 8534 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8535 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8536 persis_io.pr.pr_info.action = CTL_PR_CLEAR; 8537 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8538 sizeof(persis_io.pr), M_WAITOK); 8539 break; 8540 8541 case SPRO_PREEMPT: 8542 case SPRO_PRE_ABO: { 8543 int nretval; 8544 8545 nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type, 8546 residx, ctsio, cdb, param); 8547 if (nretval != 0) 8548 return (CTL_RETVAL_COMPLETE); 8549 break; 8550 } 8551 default: 8552 panic("Invalid PR type %x", cdb->action); 8553 } 8554 8555 done: 8556 free(ctsio->kern_data_ptr, M_CTL); 8557 ctl_set_success(ctsio); 8558 ctl_done((union ctl_io *)ctsio); 8559 8560 return (retval); 8561 } 8562 8563 /* 8564 * This routine is for handling a message from the other SC pertaining to 8565 * persistent reserve out. All the error checking will have been done 8566 * so only perorming the action need be done here to keep the two 8567 * in sync. 8568 */ 8569 static void 8570 ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg) 8571 { 8572 struct ctl_lun *lun; 8573 struct ctl_softc *softc; 8574 int i; 8575 uint32_t residx, targ_lun; 8576 8577 softc = control_softc; 8578 targ_lun = msg->hdr.nexus.targ_mapped_lun; 8579 mtx_lock(&softc->ctl_lock); 8580 if ((targ_lun >= CTL_MAX_LUNS) || 8581 ((lun = softc->ctl_luns[targ_lun]) == NULL)) { 8582 mtx_unlock(&softc->ctl_lock); 8583 return; 8584 } 8585 mtx_lock(&lun->lun_lock); 8586 mtx_unlock(&softc->ctl_lock); 8587 if (lun->flags & CTL_LUN_DISABLED) { 8588 mtx_unlock(&lun->lun_lock); 8589 return; 8590 } 8591 residx = ctl_get_initindex(&msg->hdr.nexus); 8592 switch(msg->pr.pr_info.action) { 8593 case CTL_PR_REG_KEY: 8594 ctl_alloc_prkey(lun, msg->pr.pr_info.residx); 8595 if (ctl_get_prkey(lun, msg->pr.pr_info.residx) == 0) 8596 lun->pr_key_count++; 8597 ctl_set_prkey(lun, msg->pr.pr_info.residx, 8598 scsi_8btou64(msg->pr.pr_info.sa_res_key)); 8599 lun->PRGeneration++; 8600 break; 8601 8602 case CTL_PR_UNREG_KEY: 8603 ctl_clr_prkey(lun, msg->pr.pr_info.residx); 8604 lun->pr_key_count--; 8605 8606 /* XXX Need to see if the reservation has been released */ 8607 /* if so do we need to generate UA? */ 8608 if (msg->pr.pr_info.residx == lun->pr_res_idx) { 8609 lun->flags &= ~CTL_LUN_PR_RESERVED; 8610 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8611 8612 if ((lun->res_type == SPR_TYPE_WR_EX_RO 8613 || lun->res_type == SPR_TYPE_EX_AC_RO) 8614 && lun->pr_key_count) { 8615 /* 8616 * If the reservation is a registrants 8617 * only type we need to generate a UA 8618 * for other registered inits. The 8619 * sense code should be RESERVATIONS 8620 * RELEASED 8621 */ 8622 8623 for (i = softc->init_min; i < softc->init_max; i++) { 8624 if (ctl_get_prkey(lun, i) == 0) 8625 continue; 8626 8627 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8628 } 8629 } 8630 lun->res_type = 0; 8631 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8632 if (lun->pr_key_count==0) { 8633 lun->flags &= ~CTL_LUN_PR_RESERVED; 8634 lun->res_type = 0; 8635 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8636 } 8637 } 8638 lun->PRGeneration++; 8639 break; 8640 8641 case CTL_PR_RESERVE: 8642 lun->flags |= CTL_LUN_PR_RESERVED; 8643 lun->res_type = msg->pr.pr_info.res_type; 8644 lun->pr_res_idx = msg->pr.pr_info.residx; 8645 8646 break; 8647 8648 case CTL_PR_RELEASE: 8649 /* 8650 * if this isn't an exclusive access res generate UA for all 8651 * other registrants. 8652 */ 8653 if (lun->res_type != SPR_TYPE_EX_AC 8654 && lun->res_type != SPR_TYPE_WR_EX) { 8655 for (i = softc->init_min; i < softc->init_max; i++) 8656 if (i == residx || ctl_get_prkey(lun, i) == 0) 8657 continue; 8658 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8659 } 8660 8661 lun->flags &= ~CTL_LUN_PR_RESERVED; 8662 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8663 lun->res_type = 0; 8664 break; 8665 8666 case CTL_PR_PREEMPT: 8667 ctl_pro_preempt_other(lun, msg); 8668 break; 8669 case CTL_PR_CLEAR: 8670 lun->flags &= ~CTL_LUN_PR_RESERVED; 8671 lun->res_type = 0; 8672 lun->pr_key_count = 0; 8673 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8674 8675 for (i=0; i < CTL_MAX_INITIATORS; i++) { 8676 if (ctl_get_prkey(lun, i) == 0) 8677 continue; 8678 ctl_clr_prkey(lun, i); 8679 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8680 } 8681 lun->PRGeneration++; 8682 break; 8683 } 8684 8685 mtx_unlock(&lun->lun_lock); 8686 } 8687 8688 int 8689 ctl_read_write(struct ctl_scsiio *ctsio) 8690 { 8691 struct ctl_lun *lun; 8692 struct ctl_lba_len_flags *lbalen; 8693 uint64_t lba; 8694 uint32_t num_blocks; 8695 int flags, retval; 8696 int isread; 8697 8698 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8699 8700 CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0])); 8701 8702 flags = 0; 8703 retval = CTL_RETVAL_COMPLETE; 8704 8705 isread = ctsio->cdb[0] == READ_6 || ctsio->cdb[0] == READ_10 8706 || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16; 8707 switch (ctsio->cdb[0]) { 8708 case READ_6: 8709 case WRITE_6: { 8710 struct scsi_rw_6 *cdb; 8711 8712 cdb = (struct scsi_rw_6 *)ctsio->cdb; 8713 8714 lba = scsi_3btoul(cdb->addr); 8715 /* only 5 bits are valid in the most significant address byte */ 8716 lba &= 0x1fffff; 8717 num_blocks = cdb->length; 8718 /* 8719 * This is correct according to SBC-2. 8720 */ 8721 if (num_blocks == 0) 8722 num_blocks = 256; 8723 break; 8724 } 8725 case READ_10: 8726 case WRITE_10: { 8727 struct scsi_rw_10 *cdb; 8728 8729 cdb = (struct scsi_rw_10 *)ctsio->cdb; 8730 if (cdb->byte2 & SRW10_FUA) 8731 flags |= CTL_LLF_FUA; 8732 if (cdb->byte2 & SRW10_DPO) 8733 flags |= CTL_LLF_DPO; 8734 lba = scsi_4btoul(cdb->addr); 8735 num_blocks = scsi_2btoul(cdb->length); 8736 break; 8737 } 8738 case WRITE_VERIFY_10: { 8739 struct scsi_write_verify_10 *cdb; 8740 8741 cdb = (struct scsi_write_verify_10 *)ctsio->cdb; 8742 flags |= CTL_LLF_FUA; 8743 if (cdb->byte2 & SWV_DPO) 8744 flags |= CTL_LLF_DPO; 8745 lba = scsi_4btoul(cdb->addr); 8746 num_blocks = scsi_2btoul(cdb->length); 8747 break; 8748 } 8749 case READ_12: 8750 case WRITE_12: { 8751 struct scsi_rw_12 *cdb; 8752 8753 cdb = (struct scsi_rw_12 *)ctsio->cdb; 8754 if (cdb->byte2 & SRW12_FUA) 8755 flags |= CTL_LLF_FUA; 8756 if (cdb->byte2 & SRW12_DPO) 8757 flags |= CTL_LLF_DPO; 8758 lba = scsi_4btoul(cdb->addr); 8759 num_blocks = scsi_4btoul(cdb->length); 8760 break; 8761 } 8762 case WRITE_VERIFY_12: { 8763 struct scsi_write_verify_12 *cdb; 8764 8765 cdb = (struct scsi_write_verify_12 *)ctsio->cdb; 8766 flags |= CTL_LLF_FUA; 8767 if (cdb->byte2 & SWV_DPO) 8768 flags |= CTL_LLF_DPO; 8769 lba = scsi_4btoul(cdb->addr); 8770 num_blocks = scsi_4btoul(cdb->length); 8771 break; 8772 } 8773 case READ_16: 8774 case WRITE_16: { 8775 struct scsi_rw_16 *cdb; 8776 8777 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8778 if (cdb->byte2 & SRW12_FUA) 8779 flags |= CTL_LLF_FUA; 8780 if (cdb->byte2 & SRW12_DPO) 8781 flags |= CTL_LLF_DPO; 8782 lba = scsi_8btou64(cdb->addr); 8783 num_blocks = scsi_4btoul(cdb->length); 8784 break; 8785 } 8786 case WRITE_ATOMIC_16: { 8787 struct scsi_rw_16 *cdb; 8788 8789 if (lun->be_lun->atomicblock == 0) { 8790 ctl_set_invalid_opcode(ctsio); 8791 ctl_done((union ctl_io *)ctsio); 8792 return (CTL_RETVAL_COMPLETE); 8793 } 8794 8795 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8796 if (cdb->byte2 & SRW12_FUA) 8797 flags |= CTL_LLF_FUA; 8798 if (cdb->byte2 & SRW12_DPO) 8799 flags |= CTL_LLF_DPO; 8800 lba = scsi_8btou64(cdb->addr); 8801 num_blocks = scsi_4btoul(cdb->length); 8802 if (num_blocks > lun->be_lun->atomicblock) { 8803 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 8804 /*command*/ 1, /*field*/ 12, /*bit_valid*/ 0, 8805 /*bit*/ 0); 8806 ctl_done((union ctl_io *)ctsio); 8807 return (CTL_RETVAL_COMPLETE); 8808 } 8809 break; 8810 } 8811 case WRITE_VERIFY_16: { 8812 struct scsi_write_verify_16 *cdb; 8813 8814 cdb = (struct scsi_write_verify_16 *)ctsio->cdb; 8815 flags |= CTL_LLF_FUA; 8816 if (cdb->byte2 & SWV_DPO) 8817 flags |= CTL_LLF_DPO; 8818 lba = scsi_8btou64(cdb->addr); 8819 num_blocks = scsi_4btoul(cdb->length); 8820 break; 8821 } 8822 default: 8823 /* 8824 * We got a command we don't support. This shouldn't 8825 * happen, commands should be filtered out above us. 8826 */ 8827 ctl_set_invalid_opcode(ctsio); 8828 ctl_done((union ctl_io *)ctsio); 8829 8830 return (CTL_RETVAL_COMPLETE); 8831 break; /* NOTREACHED */ 8832 } 8833 8834 /* 8835 * The first check is to make sure we're in bounds, the second 8836 * check is to catch wrap-around problems. If the lba + num blocks 8837 * is less than the lba, then we've wrapped around and the block 8838 * range is invalid anyway. 8839 */ 8840 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8841 || ((lba + num_blocks) < lba)) { 8842 ctl_set_lba_out_of_range(ctsio); 8843 ctl_done((union ctl_io *)ctsio); 8844 return (CTL_RETVAL_COMPLETE); 8845 } 8846 8847 /* 8848 * According to SBC-3, a transfer length of 0 is not an error. 8849 * Note that this cannot happen with WRITE(6) or READ(6), since 0 8850 * translates to 256 blocks for those commands. 8851 */ 8852 if (num_blocks == 0) { 8853 ctl_set_success(ctsio); 8854 ctl_done((union ctl_io *)ctsio); 8855 return (CTL_RETVAL_COMPLETE); 8856 } 8857 8858 /* Set FUA and/or DPO if caches are disabled. */ 8859 if (isread) { 8860 if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 & 8861 SCP_RCD) != 0) 8862 flags |= CTL_LLF_FUA | CTL_LLF_DPO; 8863 } else { 8864 if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 & 8865 SCP_WCE) == 0) 8866 flags |= CTL_LLF_FUA; 8867 } 8868 8869 lbalen = (struct ctl_lba_len_flags *) 8870 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8871 lbalen->lba = lba; 8872 lbalen->len = num_blocks; 8873 lbalen->flags = (isread ? CTL_LLF_READ : CTL_LLF_WRITE) | flags; 8874 8875 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 8876 ctsio->kern_rel_offset = 0; 8877 8878 CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n")); 8879 8880 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8881 8882 return (retval); 8883 } 8884 8885 static int 8886 ctl_cnw_cont(union ctl_io *io) 8887 { 8888 struct ctl_scsiio *ctsio; 8889 struct ctl_lun *lun; 8890 struct ctl_lba_len_flags *lbalen; 8891 int retval; 8892 8893 ctsio = &io->scsiio; 8894 ctsio->io_hdr.status = CTL_STATUS_NONE; 8895 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT; 8896 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8897 lbalen = (struct ctl_lba_len_flags *) 8898 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8899 lbalen->flags &= ~CTL_LLF_COMPARE; 8900 lbalen->flags |= CTL_LLF_WRITE; 8901 8902 CTL_DEBUG_PRINT(("ctl_cnw_cont: calling data_submit()\n")); 8903 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8904 return (retval); 8905 } 8906 8907 int 8908 ctl_cnw(struct ctl_scsiio *ctsio) 8909 { 8910 struct ctl_lun *lun; 8911 struct ctl_lba_len_flags *lbalen; 8912 uint64_t lba; 8913 uint32_t num_blocks; 8914 int flags, retval; 8915 8916 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8917 8918 CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0])); 8919 8920 flags = 0; 8921 retval = CTL_RETVAL_COMPLETE; 8922 8923 switch (ctsio->cdb[0]) { 8924 case COMPARE_AND_WRITE: { 8925 struct scsi_compare_and_write *cdb; 8926 8927 cdb = (struct scsi_compare_and_write *)ctsio->cdb; 8928 if (cdb->byte2 & SRW10_FUA) 8929 flags |= CTL_LLF_FUA; 8930 if (cdb->byte2 & SRW10_DPO) 8931 flags |= CTL_LLF_DPO; 8932 lba = scsi_8btou64(cdb->addr); 8933 num_blocks = cdb->length; 8934 break; 8935 } 8936 default: 8937 /* 8938 * We got a command we don't support. This shouldn't 8939 * happen, commands should be filtered out above us. 8940 */ 8941 ctl_set_invalid_opcode(ctsio); 8942 ctl_done((union ctl_io *)ctsio); 8943 8944 return (CTL_RETVAL_COMPLETE); 8945 break; /* NOTREACHED */ 8946 } 8947 8948 /* 8949 * The first check is to make sure we're in bounds, the second 8950 * check is to catch wrap-around problems. If the lba + num blocks 8951 * is less than the lba, then we've wrapped around and the block 8952 * range is invalid anyway. 8953 */ 8954 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8955 || ((lba + num_blocks) < lba)) { 8956 ctl_set_lba_out_of_range(ctsio); 8957 ctl_done((union ctl_io *)ctsio); 8958 return (CTL_RETVAL_COMPLETE); 8959 } 8960 8961 /* 8962 * According to SBC-3, a transfer length of 0 is not an error. 8963 */ 8964 if (num_blocks == 0) { 8965 ctl_set_success(ctsio); 8966 ctl_done((union ctl_io *)ctsio); 8967 return (CTL_RETVAL_COMPLETE); 8968 } 8969 8970 /* Set FUA if write cache is disabled. */ 8971 if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 & 8972 SCP_WCE) == 0) 8973 flags |= CTL_LLF_FUA; 8974 8975 ctsio->kern_total_len = 2 * num_blocks * lun->be_lun->blocksize; 8976 ctsio->kern_rel_offset = 0; 8977 8978 /* 8979 * Set the IO_CONT flag, so that if this I/O gets passed to 8980 * ctl_data_submit_done(), it'll get passed back to 8981 * ctl_ctl_cnw_cont() for further processing. 8982 */ 8983 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 8984 ctsio->io_cont = ctl_cnw_cont; 8985 8986 lbalen = (struct ctl_lba_len_flags *) 8987 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8988 lbalen->lba = lba; 8989 lbalen->len = num_blocks; 8990 lbalen->flags = CTL_LLF_COMPARE | flags; 8991 8992 CTL_DEBUG_PRINT(("ctl_cnw: calling data_submit()\n")); 8993 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8994 return (retval); 8995 } 8996 8997 int 8998 ctl_verify(struct ctl_scsiio *ctsio) 8999 { 9000 struct ctl_lun *lun; 9001 struct ctl_lba_len_flags *lbalen; 9002 uint64_t lba; 9003 uint32_t num_blocks; 9004 int bytchk, flags; 9005 int retval; 9006 9007 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9008 9009 CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0])); 9010 9011 bytchk = 0; 9012 flags = CTL_LLF_FUA; 9013 retval = CTL_RETVAL_COMPLETE; 9014 9015 switch (ctsio->cdb[0]) { 9016 case VERIFY_10: { 9017 struct scsi_verify_10 *cdb; 9018 9019 cdb = (struct scsi_verify_10 *)ctsio->cdb; 9020 if (cdb->byte2 & SVFY_BYTCHK) 9021 bytchk = 1; 9022 if (cdb->byte2 & SVFY_DPO) 9023 flags |= CTL_LLF_DPO; 9024 lba = scsi_4btoul(cdb->addr); 9025 num_blocks = scsi_2btoul(cdb->length); 9026 break; 9027 } 9028 case VERIFY_12: { 9029 struct scsi_verify_12 *cdb; 9030 9031 cdb = (struct scsi_verify_12 *)ctsio->cdb; 9032 if (cdb->byte2 & SVFY_BYTCHK) 9033 bytchk = 1; 9034 if (cdb->byte2 & SVFY_DPO) 9035 flags |= CTL_LLF_DPO; 9036 lba = scsi_4btoul(cdb->addr); 9037 num_blocks = scsi_4btoul(cdb->length); 9038 break; 9039 } 9040 case VERIFY_16: { 9041 struct scsi_rw_16 *cdb; 9042 9043 cdb = (struct scsi_rw_16 *)ctsio->cdb; 9044 if (cdb->byte2 & SVFY_BYTCHK) 9045 bytchk = 1; 9046 if (cdb->byte2 & SVFY_DPO) 9047 flags |= CTL_LLF_DPO; 9048 lba = scsi_8btou64(cdb->addr); 9049 num_blocks = scsi_4btoul(cdb->length); 9050 break; 9051 } 9052 default: 9053 /* 9054 * We got a command we don't support. This shouldn't 9055 * happen, commands should be filtered out above us. 9056 */ 9057 ctl_set_invalid_opcode(ctsio); 9058 ctl_done((union ctl_io *)ctsio); 9059 return (CTL_RETVAL_COMPLETE); 9060 } 9061 9062 /* 9063 * The first check is to make sure we're in bounds, the second 9064 * check is to catch wrap-around problems. If the lba + num blocks 9065 * is less than the lba, then we've wrapped around and the block 9066 * range is invalid anyway. 9067 */ 9068 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 9069 || ((lba + num_blocks) < lba)) { 9070 ctl_set_lba_out_of_range(ctsio); 9071 ctl_done((union ctl_io *)ctsio); 9072 return (CTL_RETVAL_COMPLETE); 9073 } 9074 9075 /* 9076 * According to SBC-3, a transfer length of 0 is not an error. 9077 */ 9078 if (num_blocks == 0) { 9079 ctl_set_success(ctsio); 9080 ctl_done((union ctl_io *)ctsio); 9081 return (CTL_RETVAL_COMPLETE); 9082 } 9083 9084 lbalen = (struct ctl_lba_len_flags *) 9085 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 9086 lbalen->lba = lba; 9087 lbalen->len = num_blocks; 9088 if (bytchk) { 9089 lbalen->flags = CTL_LLF_COMPARE | flags; 9090 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 9091 } else { 9092 lbalen->flags = CTL_LLF_VERIFY | flags; 9093 ctsio->kern_total_len = 0; 9094 } 9095 ctsio->kern_rel_offset = 0; 9096 9097 CTL_DEBUG_PRINT(("ctl_verify: calling data_submit()\n")); 9098 retval = lun->backend->data_submit((union ctl_io *)ctsio); 9099 return (retval); 9100 } 9101 9102 int 9103 ctl_report_luns(struct ctl_scsiio *ctsio) 9104 { 9105 struct ctl_softc *softc = control_softc; 9106 struct scsi_report_luns *cdb; 9107 struct scsi_report_luns_data *lun_data; 9108 struct ctl_lun *lun, *request_lun; 9109 struct ctl_port *port; 9110 int num_luns, retval; 9111 uint32_t alloc_len, lun_datalen; 9112 int num_filled, well_known; 9113 uint32_t initidx, targ_lun_id, lun_id; 9114 9115 retval = CTL_RETVAL_COMPLETE; 9116 well_known = 0; 9117 9118 cdb = (struct scsi_report_luns *)ctsio->cdb; 9119 port = ctl_io_port(&ctsio->io_hdr); 9120 9121 CTL_DEBUG_PRINT(("ctl_report_luns\n")); 9122 9123 mtx_lock(&softc->ctl_lock); 9124 num_luns = 0; 9125 for (targ_lun_id = 0; targ_lun_id < CTL_MAX_LUNS; targ_lun_id++) { 9126 if (ctl_lun_map_from_port(port, targ_lun_id) < CTL_MAX_LUNS) 9127 num_luns++; 9128 } 9129 mtx_unlock(&softc->ctl_lock); 9130 9131 switch (cdb->select_report) { 9132 case RPL_REPORT_DEFAULT: 9133 case RPL_REPORT_ALL: 9134 break; 9135 case RPL_REPORT_WELLKNOWN: 9136 well_known = 1; 9137 num_luns = 0; 9138 break; 9139 default: 9140 ctl_set_invalid_field(ctsio, 9141 /*sks_valid*/ 1, 9142 /*command*/ 1, 9143 /*field*/ 2, 9144 /*bit_valid*/ 0, 9145 /*bit*/ 0); 9146 ctl_done((union ctl_io *)ctsio); 9147 return (retval); 9148 break; /* NOTREACHED */ 9149 } 9150 9151 alloc_len = scsi_4btoul(cdb->length); 9152 /* 9153 * The initiator has to allocate at least 16 bytes for this request, 9154 * so he can at least get the header and the first LUN. Otherwise 9155 * we reject the request (per SPC-3 rev 14, section 6.21). 9156 */ 9157 if (alloc_len < (sizeof(struct scsi_report_luns_data) + 9158 sizeof(struct scsi_report_luns_lundata))) { 9159 ctl_set_invalid_field(ctsio, 9160 /*sks_valid*/ 1, 9161 /*command*/ 1, 9162 /*field*/ 6, 9163 /*bit_valid*/ 0, 9164 /*bit*/ 0); 9165 ctl_done((union ctl_io *)ctsio); 9166 return (retval); 9167 } 9168 9169 request_lun = (struct ctl_lun *) 9170 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9171 9172 lun_datalen = sizeof(*lun_data) + 9173 (num_luns * sizeof(struct scsi_report_luns_lundata)); 9174 9175 ctsio->kern_data_ptr = malloc(lun_datalen, M_CTL, M_WAITOK | M_ZERO); 9176 lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr; 9177 ctsio->kern_sg_entries = 0; 9178 9179 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9180 9181 mtx_lock(&softc->ctl_lock); 9182 for (targ_lun_id = 0, num_filled = 0; targ_lun_id < CTL_MAX_LUNS && num_filled < num_luns; targ_lun_id++) { 9183 lun_id = ctl_lun_map_from_port(port, targ_lun_id); 9184 if (lun_id >= CTL_MAX_LUNS) 9185 continue; 9186 lun = softc->ctl_luns[lun_id]; 9187 if (lun == NULL) 9188 continue; 9189 9190 if (targ_lun_id <= 0xff) { 9191 /* 9192 * Peripheral addressing method, bus number 0. 9193 */ 9194 lun_data->luns[num_filled].lundata[0] = 9195 RPL_LUNDATA_ATYP_PERIPH; 9196 lun_data->luns[num_filled].lundata[1] = targ_lun_id; 9197 num_filled++; 9198 } else if (targ_lun_id <= 0x3fff) { 9199 /* 9200 * Flat addressing method. 9201 */ 9202 lun_data->luns[num_filled].lundata[0] = 9203 RPL_LUNDATA_ATYP_FLAT | (targ_lun_id >> 8); 9204 lun_data->luns[num_filled].lundata[1] = 9205 (targ_lun_id & 0xff); 9206 num_filled++; 9207 } else if (targ_lun_id <= 0xffffff) { 9208 /* 9209 * Extended flat addressing method. 9210 */ 9211 lun_data->luns[num_filled].lundata[0] = 9212 RPL_LUNDATA_ATYP_EXTLUN | 0x12; 9213 scsi_ulto3b(targ_lun_id, 9214 &lun_data->luns[num_filled].lundata[1]); 9215 num_filled++; 9216 } else { 9217 printf("ctl_report_luns: bogus LUN number %jd, " 9218 "skipping\n", (intmax_t)targ_lun_id); 9219 } 9220 /* 9221 * According to SPC-3, rev 14 section 6.21: 9222 * 9223 * "The execution of a REPORT LUNS command to any valid and 9224 * installed logical unit shall clear the REPORTED LUNS DATA 9225 * HAS CHANGED unit attention condition for all logical 9226 * units of that target with respect to the requesting 9227 * initiator. A valid and installed logical unit is one 9228 * having a PERIPHERAL QUALIFIER of 000b in the standard 9229 * INQUIRY data (see 6.4.2)." 9230 * 9231 * If request_lun is NULL, the LUN this report luns command 9232 * was issued to is either disabled or doesn't exist. In that 9233 * case, we shouldn't clear any pending lun change unit 9234 * attention. 9235 */ 9236 if (request_lun != NULL) { 9237 mtx_lock(&lun->lun_lock); 9238 ctl_clr_ua(lun, initidx, CTL_UA_LUN_CHANGE); 9239 mtx_unlock(&lun->lun_lock); 9240 } 9241 } 9242 mtx_unlock(&softc->ctl_lock); 9243 9244 /* 9245 * It's quite possible that we've returned fewer LUNs than we allocated 9246 * space for. Trim it. 9247 */ 9248 lun_datalen = sizeof(*lun_data) + 9249 (num_filled * sizeof(struct scsi_report_luns_lundata)); 9250 9251 if (lun_datalen < alloc_len) { 9252 ctsio->residual = alloc_len - lun_datalen; 9253 ctsio->kern_data_len = lun_datalen; 9254 ctsio->kern_total_len = lun_datalen; 9255 } else { 9256 ctsio->residual = 0; 9257 ctsio->kern_data_len = alloc_len; 9258 ctsio->kern_total_len = alloc_len; 9259 } 9260 ctsio->kern_data_resid = 0; 9261 ctsio->kern_rel_offset = 0; 9262 ctsio->kern_sg_entries = 0; 9263 9264 /* 9265 * We set this to the actual data length, regardless of how much 9266 * space we actually have to return results. If the user looks at 9267 * this value, he'll know whether or not he allocated enough space 9268 * and reissue the command if necessary. We don't support well 9269 * known logical units, so if the user asks for that, return none. 9270 */ 9271 scsi_ulto4b(lun_datalen - 8, lun_data->length); 9272 9273 /* 9274 * We can only return SCSI_STATUS_CHECK_COND when we can't satisfy 9275 * this request. 9276 */ 9277 ctl_set_success(ctsio); 9278 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9279 ctsio->be_move_done = ctl_config_move_done; 9280 ctl_datamove((union ctl_io *)ctsio); 9281 return (retval); 9282 } 9283 9284 int 9285 ctl_request_sense(struct ctl_scsiio *ctsio) 9286 { 9287 struct scsi_request_sense *cdb; 9288 struct scsi_sense_data *sense_ptr; 9289 struct ctl_softc *ctl_softc; 9290 struct ctl_lun *lun; 9291 uint32_t initidx; 9292 int have_error; 9293 scsi_sense_data_type sense_format; 9294 ctl_ua_type ua_type; 9295 9296 cdb = (struct scsi_request_sense *)ctsio->cdb; 9297 9298 ctl_softc = control_softc; 9299 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9300 9301 CTL_DEBUG_PRINT(("ctl_request_sense\n")); 9302 9303 /* 9304 * Determine which sense format the user wants. 9305 */ 9306 if (cdb->byte2 & SRS_DESC) 9307 sense_format = SSD_TYPE_DESC; 9308 else 9309 sense_format = SSD_TYPE_FIXED; 9310 9311 ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK); 9312 sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr; 9313 ctsio->kern_sg_entries = 0; 9314 9315 /* 9316 * struct scsi_sense_data, which is currently set to 256 bytes, is 9317 * larger than the largest allowed value for the length field in the 9318 * REQUEST SENSE CDB, which is 252 bytes as of SPC-4. 9319 */ 9320 ctsio->residual = 0; 9321 ctsio->kern_data_len = cdb->length; 9322 ctsio->kern_total_len = cdb->length; 9323 9324 ctsio->kern_data_resid = 0; 9325 ctsio->kern_rel_offset = 0; 9326 ctsio->kern_sg_entries = 0; 9327 9328 /* 9329 * If we don't have a LUN, we don't have any pending sense. 9330 */ 9331 if (lun == NULL) 9332 goto no_sense; 9333 9334 have_error = 0; 9335 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9336 /* 9337 * Check for pending sense, and then for pending unit attentions. 9338 * Pending sense gets returned first, then pending unit attentions. 9339 */ 9340 mtx_lock(&lun->lun_lock); 9341 #ifdef CTL_WITH_CA 9342 if (ctl_is_set(lun->have_ca, initidx)) { 9343 scsi_sense_data_type stored_format; 9344 9345 /* 9346 * Check to see which sense format was used for the stored 9347 * sense data. 9348 */ 9349 stored_format = scsi_sense_type(&lun->pending_sense[initidx]); 9350 9351 /* 9352 * If the user requested a different sense format than the 9353 * one we stored, then we need to convert it to the other 9354 * format. If we're going from descriptor to fixed format 9355 * sense data, we may lose things in translation, depending 9356 * on what options were used. 9357 * 9358 * If the stored format is SSD_TYPE_NONE (i.e. invalid), 9359 * for some reason we'll just copy it out as-is. 9360 */ 9361 if ((stored_format == SSD_TYPE_FIXED) 9362 && (sense_format == SSD_TYPE_DESC)) 9363 ctl_sense_to_desc((struct scsi_sense_data_fixed *) 9364 &lun->pending_sense[initidx], 9365 (struct scsi_sense_data_desc *)sense_ptr); 9366 else if ((stored_format == SSD_TYPE_DESC) 9367 && (sense_format == SSD_TYPE_FIXED)) 9368 ctl_sense_to_fixed((struct scsi_sense_data_desc *) 9369 &lun->pending_sense[initidx], 9370 (struct scsi_sense_data_fixed *)sense_ptr); 9371 else 9372 memcpy(sense_ptr, &lun->pending_sense[initidx], 9373 MIN(sizeof(*sense_ptr), 9374 sizeof(lun->pending_sense[initidx]))); 9375 9376 ctl_clear_mask(lun->have_ca, initidx); 9377 have_error = 1; 9378 } else 9379 #endif 9380 { 9381 ua_type = ctl_build_ua(lun, initidx, sense_ptr, sense_format); 9382 if (ua_type != CTL_UA_NONE) 9383 have_error = 1; 9384 if (ua_type == CTL_UA_LUN_CHANGE) { 9385 mtx_unlock(&lun->lun_lock); 9386 mtx_lock(&ctl_softc->ctl_lock); 9387 ctl_clr_ua_allluns(ctl_softc, initidx, ua_type); 9388 mtx_unlock(&ctl_softc->ctl_lock); 9389 mtx_lock(&lun->lun_lock); 9390 } 9391 9392 } 9393 mtx_unlock(&lun->lun_lock); 9394 9395 /* 9396 * We already have a pending error, return it. 9397 */ 9398 if (have_error != 0) { 9399 /* 9400 * We report the SCSI status as OK, since the status of the 9401 * request sense command itself is OK. 9402 * We report 0 for the sense length, because we aren't doing 9403 * autosense in this case. We're reporting sense as 9404 * parameter data. 9405 */ 9406 ctl_set_success(ctsio); 9407 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9408 ctsio->be_move_done = ctl_config_move_done; 9409 ctl_datamove((union ctl_io *)ctsio); 9410 return (CTL_RETVAL_COMPLETE); 9411 } 9412 9413 no_sense: 9414 9415 /* 9416 * No sense information to report, so we report that everything is 9417 * okay. 9418 */ 9419 ctl_set_sense_data(sense_ptr, 9420 lun, 9421 sense_format, 9422 /*current_error*/ 1, 9423 /*sense_key*/ SSD_KEY_NO_SENSE, 9424 /*asc*/ 0x00, 9425 /*ascq*/ 0x00, 9426 SSD_ELEM_NONE); 9427 9428 /* 9429 * We report 0 for the sense length, because we aren't doing 9430 * autosense in this case. We're reporting sense as parameter data. 9431 */ 9432 ctl_set_success(ctsio); 9433 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9434 ctsio->be_move_done = ctl_config_move_done; 9435 ctl_datamove((union ctl_io *)ctsio); 9436 return (CTL_RETVAL_COMPLETE); 9437 } 9438 9439 int 9440 ctl_tur(struct ctl_scsiio *ctsio) 9441 { 9442 9443 CTL_DEBUG_PRINT(("ctl_tur\n")); 9444 9445 ctl_set_success(ctsio); 9446 ctl_done((union ctl_io *)ctsio); 9447 9448 return (CTL_RETVAL_COMPLETE); 9449 } 9450 9451 /* 9452 * SCSI VPD page 0x00, the Supported VPD Pages page. 9453 */ 9454 static int 9455 ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len) 9456 { 9457 struct scsi_vpd_supported_pages *pages; 9458 int sup_page_size; 9459 struct ctl_lun *lun; 9460 int p; 9461 9462 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9463 9464 sup_page_size = sizeof(struct scsi_vpd_supported_pages) * 9465 SCSI_EVPD_NUM_SUPPORTED_PAGES; 9466 ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO); 9467 pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr; 9468 ctsio->kern_sg_entries = 0; 9469 9470 if (sup_page_size < alloc_len) { 9471 ctsio->residual = alloc_len - sup_page_size; 9472 ctsio->kern_data_len = sup_page_size; 9473 ctsio->kern_total_len = sup_page_size; 9474 } else { 9475 ctsio->residual = 0; 9476 ctsio->kern_data_len = alloc_len; 9477 ctsio->kern_total_len = alloc_len; 9478 } 9479 ctsio->kern_data_resid = 0; 9480 ctsio->kern_rel_offset = 0; 9481 ctsio->kern_sg_entries = 0; 9482 9483 /* 9484 * The control device is always connected. The disk device, on the 9485 * other hand, may not be online all the time. Need to change this 9486 * to figure out whether the disk device is actually online or not. 9487 */ 9488 if (lun != NULL) 9489 pages->device = (SID_QUAL_LU_CONNECTED << 5) | 9490 lun->be_lun->lun_type; 9491 else 9492 pages->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9493 9494 p = 0; 9495 /* Supported VPD pages */ 9496 pages->page_list[p++] = SVPD_SUPPORTED_PAGES; 9497 /* Serial Number */ 9498 pages->page_list[p++] = SVPD_UNIT_SERIAL_NUMBER; 9499 /* Device Identification */ 9500 pages->page_list[p++] = SVPD_DEVICE_ID; 9501 /* Extended INQUIRY Data */ 9502 pages->page_list[p++] = SVPD_EXTENDED_INQUIRY_DATA; 9503 /* Mode Page Policy */ 9504 pages->page_list[p++] = SVPD_MODE_PAGE_POLICY; 9505 /* SCSI Ports */ 9506 pages->page_list[p++] = SVPD_SCSI_PORTS; 9507 /* Third-party Copy */ 9508 pages->page_list[p++] = SVPD_SCSI_TPC; 9509 if (lun != NULL && lun->be_lun->lun_type == T_DIRECT) { 9510 /* Block limits */ 9511 pages->page_list[p++] = SVPD_BLOCK_LIMITS; 9512 /* Block Device Characteristics */ 9513 pages->page_list[p++] = SVPD_BDC; 9514 /* Logical Block Provisioning */ 9515 pages->page_list[p++] = SVPD_LBP; 9516 } 9517 pages->length = p; 9518 9519 ctl_set_success(ctsio); 9520 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9521 ctsio->be_move_done = ctl_config_move_done; 9522 ctl_datamove((union ctl_io *)ctsio); 9523 return (CTL_RETVAL_COMPLETE); 9524 } 9525 9526 /* 9527 * SCSI VPD page 0x80, the Unit Serial Number page. 9528 */ 9529 static int 9530 ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len) 9531 { 9532 struct scsi_vpd_unit_serial_number *sn_ptr; 9533 struct ctl_lun *lun; 9534 int data_len; 9535 9536 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9537 9538 data_len = 4 + CTL_SN_LEN; 9539 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9540 sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr; 9541 if (data_len < alloc_len) { 9542 ctsio->residual = alloc_len - data_len; 9543 ctsio->kern_data_len = data_len; 9544 ctsio->kern_total_len = data_len; 9545 } else { 9546 ctsio->residual = 0; 9547 ctsio->kern_data_len = alloc_len; 9548 ctsio->kern_total_len = alloc_len; 9549 } 9550 ctsio->kern_data_resid = 0; 9551 ctsio->kern_rel_offset = 0; 9552 ctsio->kern_sg_entries = 0; 9553 9554 /* 9555 * The control device is always connected. The disk device, on the 9556 * other hand, may not be online all the time. Need to change this 9557 * to figure out whether the disk device is actually online or not. 9558 */ 9559 if (lun != NULL) 9560 sn_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9561 lun->be_lun->lun_type; 9562 else 9563 sn_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9564 9565 sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER; 9566 sn_ptr->length = CTL_SN_LEN; 9567 /* 9568 * If we don't have a LUN, we just leave the serial number as 9569 * all spaces. 9570 */ 9571 if (lun != NULL) { 9572 strncpy((char *)sn_ptr->serial_num, 9573 (char *)lun->be_lun->serial_num, CTL_SN_LEN); 9574 } else 9575 memset(sn_ptr->serial_num, 0x20, CTL_SN_LEN); 9576 9577 ctl_set_success(ctsio); 9578 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9579 ctsio->be_move_done = ctl_config_move_done; 9580 ctl_datamove((union ctl_io *)ctsio); 9581 return (CTL_RETVAL_COMPLETE); 9582 } 9583 9584 9585 /* 9586 * SCSI VPD page 0x86, the Extended INQUIRY Data page. 9587 */ 9588 static int 9589 ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len) 9590 { 9591 struct scsi_vpd_extended_inquiry_data *eid_ptr; 9592 struct ctl_lun *lun; 9593 int data_len; 9594 9595 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9596 9597 data_len = sizeof(struct scsi_vpd_extended_inquiry_data); 9598 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9599 eid_ptr = (struct scsi_vpd_extended_inquiry_data *)ctsio->kern_data_ptr; 9600 ctsio->kern_sg_entries = 0; 9601 9602 if (data_len < alloc_len) { 9603 ctsio->residual = alloc_len - data_len; 9604 ctsio->kern_data_len = data_len; 9605 ctsio->kern_total_len = data_len; 9606 } else { 9607 ctsio->residual = 0; 9608 ctsio->kern_data_len = alloc_len; 9609 ctsio->kern_total_len = alloc_len; 9610 } 9611 ctsio->kern_data_resid = 0; 9612 ctsio->kern_rel_offset = 0; 9613 ctsio->kern_sg_entries = 0; 9614 9615 /* 9616 * The control device is always connected. The disk device, on the 9617 * other hand, may not be online all the time. 9618 */ 9619 if (lun != NULL) 9620 eid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9621 lun->be_lun->lun_type; 9622 else 9623 eid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9624 eid_ptr->page_code = SVPD_EXTENDED_INQUIRY_DATA; 9625 scsi_ulto2b(data_len - 4, eid_ptr->page_length); 9626 /* 9627 * We support head of queue, ordered and simple tags. 9628 */ 9629 eid_ptr->flags2 = SVPD_EID_HEADSUP | SVPD_EID_ORDSUP | SVPD_EID_SIMPSUP; 9630 /* 9631 * Volatile cache supported. 9632 */ 9633 eid_ptr->flags3 = SVPD_EID_V_SUP; 9634 9635 /* 9636 * This means that we clear the REPORTED LUNS DATA HAS CHANGED unit 9637 * attention for a particular IT nexus on all LUNs once we report 9638 * it to that nexus once. This bit is required as of SPC-4. 9639 */ 9640 eid_ptr->flags4 = SVPD_EID_LUICLT; 9641 9642 /* 9643 * XXX KDM in order to correctly answer this, we would need 9644 * information from the SIM to determine how much sense data it 9645 * can send. So this would really be a path inquiry field, most 9646 * likely. This can be set to a maximum of 252 according to SPC-4, 9647 * but the hardware may or may not be able to support that much. 9648 * 0 just means that the maximum sense data length is not reported. 9649 */ 9650 eid_ptr->max_sense_length = 0; 9651 9652 ctl_set_success(ctsio); 9653 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9654 ctsio->be_move_done = ctl_config_move_done; 9655 ctl_datamove((union ctl_io *)ctsio); 9656 return (CTL_RETVAL_COMPLETE); 9657 } 9658 9659 static int 9660 ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len) 9661 { 9662 struct scsi_vpd_mode_page_policy *mpp_ptr; 9663 struct ctl_lun *lun; 9664 int data_len; 9665 9666 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9667 9668 data_len = sizeof(struct scsi_vpd_mode_page_policy) + 9669 sizeof(struct scsi_vpd_mode_page_policy_descr); 9670 9671 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9672 mpp_ptr = (struct scsi_vpd_mode_page_policy *)ctsio->kern_data_ptr; 9673 ctsio->kern_sg_entries = 0; 9674 9675 if (data_len < alloc_len) { 9676 ctsio->residual = alloc_len - data_len; 9677 ctsio->kern_data_len = data_len; 9678 ctsio->kern_total_len = data_len; 9679 } else { 9680 ctsio->residual = 0; 9681 ctsio->kern_data_len = alloc_len; 9682 ctsio->kern_total_len = alloc_len; 9683 } 9684 ctsio->kern_data_resid = 0; 9685 ctsio->kern_rel_offset = 0; 9686 ctsio->kern_sg_entries = 0; 9687 9688 /* 9689 * The control device is always connected. The disk device, on the 9690 * other hand, may not be online all the time. 9691 */ 9692 if (lun != NULL) 9693 mpp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9694 lun->be_lun->lun_type; 9695 else 9696 mpp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9697 mpp_ptr->page_code = SVPD_MODE_PAGE_POLICY; 9698 scsi_ulto2b(data_len - 4, mpp_ptr->page_length); 9699 mpp_ptr->descr[0].page_code = 0x3f; 9700 mpp_ptr->descr[0].subpage_code = 0xff; 9701 mpp_ptr->descr[0].policy = SVPD_MPP_SHARED; 9702 9703 ctl_set_success(ctsio); 9704 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9705 ctsio->be_move_done = ctl_config_move_done; 9706 ctl_datamove((union ctl_io *)ctsio); 9707 return (CTL_RETVAL_COMPLETE); 9708 } 9709 9710 /* 9711 * SCSI VPD page 0x83, the Device Identification page. 9712 */ 9713 static int 9714 ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len) 9715 { 9716 struct scsi_vpd_device_id *devid_ptr; 9717 struct scsi_vpd_id_descriptor *desc; 9718 struct ctl_softc *softc; 9719 struct ctl_lun *lun; 9720 struct ctl_port *port; 9721 int data_len; 9722 uint8_t proto; 9723 9724 softc = control_softc; 9725 9726 port = ctl_io_port(&ctsio->io_hdr); 9727 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9728 9729 data_len = sizeof(struct scsi_vpd_device_id) + 9730 sizeof(struct scsi_vpd_id_descriptor) + 9731 sizeof(struct scsi_vpd_id_rel_trgt_port_id) + 9732 sizeof(struct scsi_vpd_id_descriptor) + 9733 sizeof(struct scsi_vpd_id_trgt_port_grp_id); 9734 if (lun && lun->lun_devid) 9735 data_len += lun->lun_devid->len; 9736 if (port && port->port_devid) 9737 data_len += port->port_devid->len; 9738 if (port && port->target_devid) 9739 data_len += port->target_devid->len; 9740 9741 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9742 devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr; 9743 ctsio->kern_sg_entries = 0; 9744 9745 if (data_len < alloc_len) { 9746 ctsio->residual = alloc_len - data_len; 9747 ctsio->kern_data_len = data_len; 9748 ctsio->kern_total_len = data_len; 9749 } else { 9750 ctsio->residual = 0; 9751 ctsio->kern_data_len = alloc_len; 9752 ctsio->kern_total_len = alloc_len; 9753 } 9754 ctsio->kern_data_resid = 0; 9755 ctsio->kern_rel_offset = 0; 9756 ctsio->kern_sg_entries = 0; 9757 9758 /* 9759 * The control device is always connected. The disk device, on the 9760 * other hand, may not be online all the time. 9761 */ 9762 if (lun != NULL) 9763 devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9764 lun->be_lun->lun_type; 9765 else 9766 devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9767 devid_ptr->page_code = SVPD_DEVICE_ID; 9768 scsi_ulto2b(data_len - 4, devid_ptr->length); 9769 9770 if (port && port->port_type == CTL_PORT_FC) 9771 proto = SCSI_PROTO_FC << 4; 9772 else if (port && port->port_type == CTL_PORT_ISCSI) 9773 proto = SCSI_PROTO_ISCSI << 4; 9774 else 9775 proto = SCSI_PROTO_SPI << 4; 9776 desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list; 9777 9778 /* 9779 * We're using a LUN association here. i.e., this device ID is a 9780 * per-LUN identifier. 9781 */ 9782 if (lun && lun->lun_devid) { 9783 memcpy(desc, lun->lun_devid->data, lun->lun_devid->len); 9784 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9785 lun->lun_devid->len); 9786 } 9787 9788 /* 9789 * This is for the WWPN which is a port association. 9790 */ 9791 if (port && port->port_devid) { 9792 memcpy(desc, port->port_devid->data, port->port_devid->len); 9793 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9794 port->port_devid->len); 9795 } 9796 9797 /* 9798 * This is for the Relative Target Port(type 4h) identifier 9799 */ 9800 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9801 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9802 SVPD_ID_TYPE_RELTARG; 9803 desc->length = 4; 9804 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port, &desc->identifier[2]); 9805 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9806 sizeof(struct scsi_vpd_id_rel_trgt_port_id)); 9807 9808 /* 9809 * This is for the Target Port Group(type 5h) identifier 9810 */ 9811 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9812 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9813 SVPD_ID_TYPE_TPORTGRP; 9814 desc->length = 4; 9815 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port / softc->port_cnt + 1, 9816 &desc->identifier[2]); 9817 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9818 sizeof(struct scsi_vpd_id_trgt_port_grp_id)); 9819 9820 /* 9821 * This is for the Target identifier 9822 */ 9823 if (port && port->target_devid) { 9824 memcpy(desc, port->target_devid->data, port->target_devid->len); 9825 } 9826 9827 ctl_set_success(ctsio); 9828 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9829 ctsio->be_move_done = ctl_config_move_done; 9830 ctl_datamove((union ctl_io *)ctsio); 9831 return (CTL_RETVAL_COMPLETE); 9832 } 9833 9834 static int 9835 ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len) 9836 { 9837 struct ctl_softc *softc = control_softc; 9838 struct scsi_vpd_scsi_ports *sp; 9839 struct scsi_vpd_port_designation *pd; 9840 struct scsi_vpd_port_designation_cont *pdc; 9841 struct ctl_lun *lun; 9842 struct ctl_port *port; 9843 int data_len, num_target_ports, iid_len, id_len; 9844 9845 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9846 9847 num_target_ports = 0; 9848 iid_len = 0; 9849 id_len = 0; 9850 mtx_lock(&softc->ctl_lock); 9851 STAILQ_FOREACH(port, &softc->port_list, links) { 9852 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 9853 continue; 9854 if (lun != NULL && 9855 ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 9856 continue; 9857 num_target_ports++; 9858 if (port->init_devid) 9859 iid_len += port->init_devid->len; 9860 if (port->port_devid) 9861 id_len += port->port_devid->len; 9862 } 9863 mtx_unlock(&softc->ctl_lock); 9864 9865 data_len = sizeof(struct scsi_vpd_scsi_ports) + 9866 num_target_ports * (sizeof(struct scsi_vpd_port_designation) + 9867 sizeof(struct scsi_vpd_port_designation_cont)) + iid_len + id_len; 9868 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9869 sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr; 9870 ctsio->kern_sg_entries = 0; 9871 9872 if (data_len < alloc_len) { 9873 ctsio->residual = alloc_len - data_len; 9874 ctsio->kern_data_len = data_len; 9875 ctsio->kern_total_len = data_len; 9876 } else { 9877 ctsio->residual = 0; 9878 ctsio->kern_data_len = alloc_len; 9879 ctsio->kern_total_len = alloc_len; 9880 } 9881 ctsio->kern_data_resid = 0; 9882 ctsio->kern_rel_offset = 0; 9883 ctsio->kern_sg_entries = 0; 9884 9885 /* 9886 * The control device is always connected. The disk device, on the 9887 * other hand, may not be online all the time. Need to change this 9888 * to figure out whether the disk device is actually online or not. 9889 */ 9890 if (lun != NULL) 9891 sp->device = (SID_QUAL_LU_CONNECTED << 5) | 9892 lun->be_lun->lun_type; 9893 else 9894 sp->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9895 9896 sp->page_code = SVPD_SCSI_PORTS; 9897 scsi_ulto2b(data_len - sizeof(struct scsi_vpd_scsi_ports), 9898 sp->page_length); 9899 pd = &sp->design[0]; 9900 9901 mtx_lock(&softc->ctl_lock); 9902 STAILQ_FOREACH(port, &softc->port_list, links) { 9903 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 9904 continue; 9905 if (lun != NULL && 9906 ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 9907 continue; 9908 scsi_ulto2b(port->targ_port, pd->relative_port_id); 9909 if (port->init_devid) { 9910 iid_len = port->init_devid->len; 9911 memcpy(pd->initiator_transportid, 9912 port->init_devid->data, port->init_devid->len); 9913 } else 9914 iid_len = 0; 9915 scsi_ulto2b(iid_len, pd->initiator_transportid_length); 9916 pdc = (struct scsi_vpd_port_designation_cont *) 9917 (&pd->initiator_transportid[iid_len]); 9918 if (port->port_devid) { 9919 id_len = port->port_devid->len; 9920 memcpy(pdc->target_port_descriptors, 9921 port->port_devid->data, port->port_devid->len); 9922 } else 9923 id_len = 0; 9924 scsi_ulto2b(id_len, pdc->target_port_descriptors_length); 9925 pd = (struct scsi_vpd_port_designation *) 9926 ((uint8_t *)pdc->target_port_descriptors + id_len); 9927 } 9928 mtx_unlock(&softc->ctl_lock); 9929 9930 ctl_set_success(ctsio); 9931 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9932 ctsio->be_move_done = ctl_config_move_done; 9933 ctl_datamove((union ctl_io *)ctsio); 9934 return (CTL_RETVAL_COMPLETE); 9935 } 9936 9937 static int 9938 ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len) 9939 { 9940 struct scsi_vpd_block_limits *bl_ptr; 9941 struct ctl_lun *lun; 9942 int bs; 9943 9944 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9945 9946 ctsio->kern_data_ptr = malloc(sizeof(*bl_ptr), M_CTL, M_WAITOK | M_ZERO); 9947 bl_ptr = (struct scsi_vpd_block_limits *)ctsio->kern_data_ptr; 9948 ctsio->kern_sg_entries = 0; 9949 9950 if (sizeof(*bl_ptr) < alloc_len) { 9951 ctsio->residual = alloc_len - sizeof(*bl_ptr); 9952 ctsio->kern_data_len = sizeof(*bl_ptr); 9953 ctsio->kern_total_len = sizeof(*bl_ptr); 9954 } else { 9955 ctsio->residual = 0; 9956 ctsio->kern_data_len = alloc_len; 9957 ctsio->kern_total_len = alloc_len; 9958 } 9959 ctsio->kern_data_resid = 0; 9960 ctsio->kern_rel_offset = 0; 9961 ctsio->kern_sg_entries = 0; 9962 9963 /* 9964 * The control device is always connected. The disk device, on the 9965 * other hand, may not be online all the time. Need to change this 9966 * to figure out whether the disk device is actually online or not. 9967 */ 9968 if (lun != NULL) 9969 bl_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9970 lun->be_lun->lun_type; 9971 else 9972 bl_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9973 9974 bl_ptr->page_code = SVPD_BLOCK_LIMITS; 9975 scsi_ulto2b(sizeof(*bl_ptr) - 4, bl_ptr->page_length); 9976 bl_ptr->max_cmp_write_len = 0xff; 9977 scsi_ulto4b(0xffffffff, bl_ptr->max_txfer_len); 9978 if (lun != NULL) { 9979 bs = lun->be_lun->blocksize; 9980 scsi_ulto4b(lun->be_lun->opttxferlen, bl_ptr->opt_txfer_len); 9981 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 9982 scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_lba_cnt); 9983 scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_blk_cnt); 9984 if (lun->be_lun->ublockexp != 0) { 9985 scsi_ulto4b((1 << lun->be_lun->ublockexp), 9986 bl_ptr->opt_unmap_grain); 9987 scsi_ulto4b(0x80000000 | lun->be_lun->ublockoff, 9988 bl_ptr->unmap_grain_align); 9989 } 9990 } 9991 scsi_ulto4b(lun->be_lun->atomicblock, 9992 bl_ptr->max_atomic_transfer_length); 9993 scsi_ulto4b(0, bl_ptr->atomic_alignment); 9994 scsi_ulto4b(0, bl_ptr->atomic_transfer_length_granularity); 9995 } 9996 scsi_u64to8b(UINT64_MAX, bl_ptr->max_write_same_length); 9997 9998 ctl_set_success(ctsio); 9999 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10000 ctsio->be_move_done = ctl_config_move_done; 10001 ctl_datamove((union ctl_io *)ctsio); 10002 return (CTL_RETVAL_COMPLETE); 10003 } 10004 10005 static int 10006 ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len) 10007 { 10008 struct scsi_vpd_block_device_characteristics *bdc_ptr; 10009 struct ctl_lun *lun; 10010 const char *value; 10011 u_int i; 10012 10013 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10014 10015 ctsio->kern_data_ptr = malloc(sizeof(*bdc_ptr), M_CTL, M_WAITOK | M_ZERO); 10016 bdc_ptr = (struct scsi_vpd_block_device_characteristics *)ctsio->kern_data_ptr; 10017 ctsio->kern_sg_entries = 0; 10018 10019 if (sizeof(*bdc_ptr) < alloc_len) { 10020 ctsio->residual = alloc_len - sizeof(*bdc_ptr); 10021 ctsio->kern_data_len = sizeof(*bdc_ptr); 10022 ctsio->kern_total_len = sizeof(*bdc_ptr); 10023 } else { 10024 ctsio->residual = 0; 10025 ctsio->kern_data_len = alloc_len; 10026 ctsio->kern_total_len = alloc_len; 10027 } 10028 ctsio->kern_data_resid = 0; 10029 ctsio->kern_rel_offset = 0; 10030 ctsio->kern_sg_entries = 0; 10031 10032 /* 10033 * The control device is always connected. The disk device, on the 10034 * other hand, may not be online all the time. Need to change this 10035 * to figure out whether the disk device is actually online or not. 10036 */ 10037 if (lun != NULL) 10038 bdc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10039 lun->be_lun->lun_type; 10040 else 10041 bdc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10042 bdc_ptr->page_code = SVPD_BDC; 10043 scsi_ulto2b(sizeof(*bdc_ptr) - 4, bdc_ptr->page_length); 10044 if (lun != NULL && 10045 (value = ctl_get_opt(&lun->be_lun->options, "rpm")) != NULL) 10046 i = strtol(value, NULL, 0); 10047 else 10048 i = CTL_DEFAULT_ROTATION_RATE; 10049 scsi_ulto2b(i, bdc_ptr->medium_rotation_rate); 10050 if (lun != NULL && 10051 (value = ctl_get_opt(&lun->be_lun->options, "formfactor")) != NULL) 10052 i = strtol(value, NULL, 0); 10053 else 10054 i = 0; 10055 bdc_ptr->wab_wac_ff = (i & 0x0f); 10056 bdc_ptr->flags = SVPD_FUAB | SVPD_VBULS; 10057 10058 ctl_set_success(ctsio); 10059 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10060 ctsio->be_move_done = ctl_config_move_done; 10061 ctl_datamove((union ctl_io *)ctsio); 10062 return (CTL_RETVAL_COMPLETE); 10063 } 10064 10065 static int 10066 ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len) 10067 { 10068 struct scsi_vpd_logical_block_prov *lbp_ptr; 10069 struct ctl_lun *lun; 10070 10071 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10072 10073 ctsio->kern_data_ptr = malloc(sizeof(*lbp_ptr), M_CTL, M_WAITOK | M_ZERO); 10074 lbp_ptr = (struct scsi_vpd_logical_block_prov *)ctsio->kern_data_ptr; 10075 ctsio->kern_sg_entries = 0; 10076 10077 if (sizeof(*lbp_ptr) < alloc_len) { 10078 ctsio->residual = alloc_len - sizeof(*lbp_ptr); 10079 ctsio->kern_data_len = sizeof(*lbp_ptr); 10080 ctsio->kern_total_len = sizeof(*lbp_ptr); 10081 } else { 10082 ctsio->residual = 0; 10083 ctsio->kern_data_len = alloc_len; 10084 ctsio->kern_total_len = alloc_len; 10085 } 10086 ctsio->kern_data_resid = 0; 10087 ctsio->kern_rel_offset = 0; 10088 ctsio->kern_sg_entries = 0; 10089 10090 /* 10091 * The control device is always connected. The disk device, on the 10092 * other hand, may not be online all the time. Need to change this 10093 * to figure out whether the disk device is actually online or not. 10094 */ 10095 if (lun != NULL) 10096 lbp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10097 lun->be_lun->lun_type; 10098 else 10099 lbp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10100 10101 lbp_ptr->page_code = SVPD_LBP; 10102 scsi_ulto2b(sizeof(*lbp_ptr) - 4, lbp_ptr->page_length); 10103 lbp_ptr->threshold_exponent = CTL_LBP_EXPONENT; 10104 if (lun != NULL && lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 10105 lbp_ptr->flags = SVPD_LBP_UNMAP | SVPD_LBP_WS16 | 10106 SVPD_LBP_WS10 | SVPD_LBP_RZ | SVPD_LBP_ANC_SUP; 10107 lbp_ptr->prov_type = SVPD_LBP_THIN; 10108 } 10109 10110 ctl_set_success(ctsio); 10111 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10112 ctsio->be_move_done = ctl_config_move_done; 10113 ctl_datamove((union ctl_io *)ctsio); 10114 return (CTL_RETVAL_COMPLETE); 10115 } 10116 10117 /* 10118 * INQUIRY with the EVPD bit set. 10119 */ 10120 static int 10121 ctl_inquiry_evpd(struct ctl_scsiio *ctsio) 10122 { 10123 struct ctl_lun *lun; 10124 struct scsi_inquiry *cdb; 10125 int alloc_len, retval; 10126 10127 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10128 cdb = (struct scsi_inquiry *)ctsio->cdb; 10129 alloc_len = scsi_2btoul(cdb->length); 10130 10131 switch (cdb->page_code) { 10132 case SVPD_SUPPORTED_PAGES: 10133 retval = ctl_inquiry_evpd_supported(ctsio, alloc_len); 10134 break; 10135 case SVPD_UNIT_SERIAL_NUMBER: 10136 retval = ctl_inquiry_evpd_serial(ctsio, alloc_len); 10137 break; 10138 case SVPD_DEVICE_ID: 10139 retval = ctl_inquiry_evpd_devid(ctsio, alloc_len); 10140 break; 10141 case SVPD_EXTENDED_INQUIRY_DATA: 10142 retval = ctl_inquiry_evpd_eid(ctsio, alloc_len); 10143 break; 10144 case SVPD_MODE_PAGE_POLICY: 10145 retval = ctl_inquiry_evpd_mpp(ctsio, alloc_len); 10146 break; 10147 case SVPD_SCSI_PORTS: 10148 retval = ctl_inquiry_evpd_scsi_ports(ctsio, alloc_len); 10149 break; 10150 case SVPD_SCSI_TPC: 10151 retval = ctl_inquiry_evpd_tpc(ctsio, alloc_len); 10152 break; 10153 case SVPD_BLOCK_LIMITS: 10154 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 10155 goto err; 10156 retval = ctl_inquiry_evpd_block_limits(ctsio, alloc_len); 10157 break; 10158 case SVPD_BDC: 10159 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 10160 goto err; 10161 retval = ctl_inquiry_evpd_bdc(ctsio, alloc_len); 10162 break; 10163 case SVPD_LBP: 10164 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 10165 goto err; 10166 retval = ctl_inquiry_evpd_lbp(ctsio, alloc_len); 10167 break; 10168 default: 10169 err: 10170 ctl_set_invalid_field(ctsio, 10171 /*sks_valid*/ 1, 10172 /*command*/ 1, 10173 /*field*/ 2, 10174 /*bit_valid*/ 0, 10175 /*bit*/ 0); 10176 ctl_done((union ctl_io *)ctsio); 10177 retval = CTL_RETVAL_COMPLETE; 10178 break; 10179 } 10180 10181 return (retval); 10182 } 10183 10184 /* 10185 * Standard INQUIRY data. 10186 */ 10187 static int 10188 ctl_inquiry_std(struct ctl_scsiio *ctsio) 10189 { 10190 struct scsi_inquiry_data *inq_ptr; 10191 struct scsi_inquiry *cdb; 10192 struct ctl_softc *softc; 10193 struct ctl_port *port; 10194 struct ctl_lun *lun; 10195 char *val; 10196 uint32_t alloc_len, data_len; 10197 ctl_port_type port_type; 10198 10199 softc = control_softc; 10200 10201 /* 10202 * Figure out whether we're talking to a Fibre Channel port or not. 10203 * We treat the ioctl front end, and any SCSI adapters, as packetized 10204 * SCSI front ends. 10205 */ 10206 port = ctl_io_port(&ctsio->io_hdr); 10207 if (port != NULL) 10208 port_type = port->port_type; 10209 else 10210 port_type = CTL_PORT_SCSI; 10211 if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL) 10212 port_type = CTL_PORT_SCSI; 10213 10214 lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10215 cdb = (struct scsi_inquiry *)ctsio->cdb; 10216 alloc_len = scsi_2btoul(cdb->length); 10217 10218 /* 10219 * We malloc the full inquiry data size here and fill it 10220 * in. If the user only asks for less, we'll give him 10221 * that much. 10222 */ 10223 data_len = offsetof(struct scsi_inquiry_data, vendor_specific1); 10224 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10225 inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr; 10226 ctsio->kern_sg_entries = 0; 10227 ctsio->kern_data_resid = 0; 10228 ctsio->kern_rel_offset = 0; 10229 10230 if (data_len < alloc_len) { 10231 ctsio->residual = alloc_len - data_len; 10232 ctsio->kern_data_len = data_len; 10233 ctsio->kern_total_len = data_len; 10234 } else { 10235 ctsio->residual = 0; 10236 ctsio->kern_data_len = alloc_len; 10237 ctsio->kern_total_len = alloc_len; 10238 } 10239 10240 if (lun != NULL) { 10241 if ((lun->flags & CTL_LUN_PRIMARY_SC) || 10242 softc->ha_link >= CTL_HA_LINK_UNKNOWN) { 10243 inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10244 lun->be_lun->lun_type; 10245 } else { 10246 inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | 10247 lun->be_lun->lun_type; 10248 } 10249 } else 10250 inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE; 10251 10252 /* RMB in byte 2 is 0 */ 10253 inq_ptr->version = SCSI_REV_SPC4; 10254 10255 /* 10256 * According to SAM-3, even if a device only supports a single 10257 * level of LUN addressing, it should still set the HISUP bit: 10258 * 10259 * 4.9.1 Logical unit numbers overview 10260 * 10261 * All logical unit number formats described in this standard are 10262 * hierarchical in structure even when only a single level in that 10263 * hierarchy is used. The HISUP bit shall be set to one in the 10264 * standard INQUIRY data (see SPC-2) when any logical unit number 10265 * format described in this standard is used. Non-hierarchical 10266 * formats are outside the scope of this standard. 10267 * 10268 * Therefore we set the HiSup bit here. 10269 * 10270 * The reponse format is 2, per SPC-3. 10271 */ 10272 inq_ptr->response_format = SID_HiSup | 2; 10273 10274 inq_ptr->additional_length = data_len - 10275 (offsetof(struct scsi_inquiry_data, additional_length) + 1); 10276 CTL_DEBUG_PRINT(("additional_length = %d\n", 10277 inq_ptr->additional_length)); 10278 10279 inq_ptr->spc3_flags = SPC3_SID_3PC | SPC3_SID_TPGS_IMPLICIT; 10280 /* 16 bit addressing */ 10281 if (port_type == CTL_PORT_SCSI) 10282 inq_ptr->spc2_flags = SPC2_SID_ADDR16; 10283 /* XXX set the SID_MultiP bit here if we're actually going to 10284 respond on multiple ports */ 10285 inq_ptr->spc2_flags |= SPC2_SID_MultiP; 10286 10287 /* 16 bit data bus, synchronous transfers */ 10288 if (port_type == CTL_PORT_SCSI) 10289 inq_ptr->flags = SID_WBus16 | SID_Sync; 10290 /* 10291 * XXX KDM do we want to support tagged queueing on the control 10292 * device at all? 10293 */ 10294 if ((lun == NULL) 10295 || (lun->be_lun->lun_type != T_PROCESSOR)) 10296 inq_ptr->flags |= SID_CmdQue; 10297 /* 10298 * Per SPC-3, unused bytes in ASCII strings are filled with spaces. 10299 * We have 8 bytes for the vendor name, and 16 bytes for the device 10300 * name and 4 bytes for the revision. 10301 */ 10302 if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options, 10303 "vendor")) == NULL) { 10304 strncpy(inq_ptr->vendor, CTL_VENDOR, sizeof(inq_ptr->vendor)); 10305 } else { 10306 memset(inq_ptr->vendor, ' ', sizeof(inq_ptr->vendor)); 10307 strncpy(inq_ptr->vendor, val, 10308 min(sizeof(inq_ptr->vendor), strlen(val))); 10309 } 10310 if (lun == NULL) { 10311 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10312 sizeof(inq_ptr->product)); 10313 } else if ((val = ctl_get_opt(&lun->be_lun->options, "product")) == NULL) { 10314 switch (lun->be_lun->lun_type) { 10315 case T_DIRECT: 10316 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10317 sizeof(inq_ptr->product)); 10318 break; 10319 case T_PROCESSOR: 10320 strncpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT, 10321 sizeof(inq_ptr->product)); 10322 break; 10323 default: 10324 strncpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT, 10325 sizeof(inq_ptr->product)); 10326 break; 10327 } 10328 } else { 10329 memset(inq_ptr->product, ' ', sizeof(inq_ptr->product)); 10330 strncpy(inq_ptr->product, val, 10331 min(sizeof(inq_ptr->product), strlen(val))); 10332 } 10333 10334 /* 10335 * XXX make this a macro somewhere so it automatically gets 10336 * incremented when we make changes. 10337 */ 10338 if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options, 10339 "revision")) == NULL) { 10340 strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision)); 10341 } else { 10342 memset(inq_ptr->revision, ' ', sizeof(inq_ptr->revision)); 10343 strncpy(inq_ptr->revision, val, 10344 min(sizeof(inq_ptr->revision), strlen(val))); 10345 } 10346 10347 /* 10348 * For parallel SCSI, we support double transition and single 10349 * transition clocking. We also support QAS (Quick Arbitration 10350 * and Selection) and Information Unit transfers on both the 10351 * control and array devices. 10352 */ 10353 if (port_type == CTL_PORT_SCSI) 10354 inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS | 10355 SID_SPI_IUS; 10356 10357 /* SAM-5 (no version claimed) */ 10358 scsi_ulto2b(0x00A0, inq_ptr->version1); 10359 /* SPC-4 (no version claimed) */ 10360 scsi_ulto2b(0x0460, inq_ptr->version2); 10361 if (port_type == CTL_PORT_FC) { 10362 /* FCP-2 ANSI INCITS.350:2003 */ 10363 scsi_ulto2b(0x0917, inq_ptr->version3); 10364 } else if (port_type == CTL_PORT_SCSI) { 10365 /* SPI-4 ANSI INCITS.362:200x */ 10366 scsi_ulto2b(0x0B56, inq_ptr->version3); 10367 } else if (port_type == CTL_PORT_ISCSI) { 10368 /* iSCSI (no version claimed) */ 10369 scsi_ulto2b(0x0960, inq_ptr->version3); 10370 } else if (port_type == CTL_PORT_SAS) { 10371 /* SAS (no version claimed) */ 10372 scsi_ulto2b(0x0BE0, inq_ptr->version3); 10373 } 10374 10375 if (lun == NULL) { 10376 /* SBC-4 (no version claimed) */ 10377 scsi_ulto2b(0x0600, inq_ptr->version4); 10378 } else { 10379 switch (lun->be_lun->lun_type) { 10380 case T_DIRECT: 10381 /* SBC-4 (no version claimed) */ 10382 scsi_ulto2b(0x0600, inq_ptr->version4); 10383 break; 10384 case T_PROCESSOR: 10385 default: 10386 break; 10387 } 10388 } 10389 10390 ctl_set_success(ctsio); 10391 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10392 ctsio->be_move_done = ctl_config_move_done; 10393 ctl_datamove((union ctl_io *)ctsio); 10394 return (CTL_RETVAL_COMPLETE); 10395 } 10396 10397 int 10398 ctl_inquiry(struct ctl_scsiio *ctsio) 10399 { 10400 struct scsi_inquiry *cdb; 10401 int retval; 10402 10403 CTL_DEBUG_PRINT(("ctl_inquiry\n")); 10404 10405 cdb = (struct scsi_inquiry *)ctsio->cdb; 10406 if (cdb->byte2 & SI_EVPD) 10407 retval = ctl_inquiry_evpd(ctsio); 10408 else if (cdb->page_code == 0) 10409 retval = ctl_inquiry_std(ctsio); 10410 else { 10411 ctl_set_invalid_field(ctsio, 10412 /*sks_valid*/ 1, 10413 /*command*/ 1, 10414 /*field*/ 2, 10415 /*bit_valid*/ 0, 10416 /*bit*/ 0); 10417 ctl_done((union ctl_io *)ctsio); 10418 return (CTL_RETVAL_COMPLETE); 10419 } 10420 10421 return (retval); 10422 } 10423 10424 /* 10425 * For known CDB types, parse the LBA and length. 10426 */ 10427 static int 10428 ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len) 10429 { 10430 if (io->io_hdr.io_type != CTL_IO_SCSI) 10431 return (1); 10432 10433 switch (io->scsiio.cdb[0]) { 10434 case COMPARE_AND_WRITE: { 10435 struct scsi_compare_and_write *cdb; 10436 10437 cdb = (struct scsi_compare_and_write *)io->scsiio.cdb; 10438 10439 *lba = scsi_8btou64(cdb->addr); 10440 *len = cdb->length; 10441 break; 10442 } 10443 case READ_6: 10444 case WRITE_6: { 10445 struct scsi_rw_6 *cdb; 10446 10447 cdb = (struct scsi_rw_6 *)io->scsiio.cdb; 10448 10449 *lba = scsi_3btoul(cdb->addr); 10450 /* only 5 bits are valid in the most significant address byte */ 10451 *lba &= 0x1fffff; 10452 *len = cdb->length; 10453 break; 10454 } 10455 case READ_10: 10456 case WRITE_10: { 10457 struct scsi_rw_10 *cdb; 10458 10459 cdb = (struct scsi_rw_10 *)io->scsiio.cdb; 10460 10461 *lba = scsi_4btoul(cdb->addr); 10462 *len = scsi_2btoul(cdb->length); 10463 break; 10464 } 10465 case WRITE_VERIFY_10: { 10466 struct scsi_write_verify_10 *cdb; 10467 10468 cdb = (struct scsi_write_verify_10 *)io->scsiio.cdb; 10469 10470 *lba = scsi_4btoul(cdb->addr); 10471 *len = scsi_2btoul(cdb->length); 10472 break; 10473 } 10474 case READ_12: 10475 case WRITE_12: { 10476 struct scsi_rw_12 *cdb; 10477 10478 cdb = (struct scsi_rw_12 *)io->scsiio.cdb; 10479 10480 *lba = scsi_4btoul(cdb->addr); 10481 *len = scsi_4btoul(cdb->length); 10482 break; 10483 } 10484 case WRITE_VERIFY_12: { 10485 struct scsi_write_verify_12 *cdb; 10486 10487 cdb = (struct scsi_write_verify_12 *)io->scsiio.cdb; 10488 10489 *lba = scsi_4btoul(cdb->addr); 10490 *len = scsi_4btoul(cdb->length); 10491 break; 10492 } 10493 case READ_16: 10494 case WRITE_16: 10495 case WRITE_ATOMIC_16: { 10496 struct scsi_rw_16 *cdb; 10497 10498 cdb = (struct scsi_rw_16 *)io->scsiio.cdb; 10499 10500 *lba = scsi_8btou64(cdb->addr); 10501 *len = scsi_4btoul(cdb->length); 10502 break; 10503 } 10504 case WRITE_VERIFY_16: { 10505 struct scsi_write_verify_16 *cdb; 10506 10507 cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb; 10508 10509 *lba = scsi_8btou64(cdb->addr); 10510 *len = scsi_4btoul(cdb->length); 10511 break; 10512 } 10513 case WRITE_SAME_10: { 10514 struct scsi_write_same_10 *cdb; 10515 10516 cdb = (struct scsi_write_same_10 *)io->scsiio.cdb; 10517 10518 *lba = scsi_4btoul(cdb->addr); 10519 *len = scsi_2btoul(cdb->length); 10520 break; 10521 } 10522 case WRITE_SAME_16: { 10523 struct scsi_write_same_16 *cdb; 10524 10525 cdb = (struct scsi_write_same_16 *)io->scsiio.cdb; 10526 10527 *lba = scsi_8btou64(cdb->addr); 10528 *len = scsi_4btoul(cdb->length); 10529 break; 10530 } 10531 case VERIFY_10: { 10532 struct scsi_verify_10 *cdb; 10533 10534 cdb = (struct scsi_verify_10 *)io->scsiio.cdb; 10535 10536 *lba = scsi_4btoul(cdb->addr); 10537 *len = scsi_2btoul(cdb->length); 10538 break; 10539 } 10540 case VERIFY_12: { 10541 struct scsi_verify_12 *cdb; 10542 10543 cdb = (struct scsi_verify_12 *)io->scsiio.cdb; 10544 10545 *lba = scsi_4btoul(cdb->addr); 10546 *len = scsi_4btoul(cdb->length); 10547 break; 10548 } 10549 case VERIFY_16: { 10550 struct scsi_verify_16 *cdb; 10551 10552 cdb = (struct scsi_verify_16 *)io->scsiio.cdb; 10553 10554 *lba = scsi_8btou64(cdb->addr); 10555 *len = scsi_4btoul(cdb->length); 10556 break; 10557 } 10558 case UNMAP: { 10559 *lba = 0; 10560 *len = UINT64_MAX; 10561 break; 10562 } 10563 case SERVICE_ACTION_IN: { /* GET LBA STATUS */ 10564 struct scsi_get_lba_status *cdb; 10565 10566 cdb = (struct scsi_get_lba_status *)io->scsiio.cdb; 10567 *lba = scsi_8btou64(cdb->addr); 10568 *len = UINT32_MAX; 10569 break; 10570 } 10571 default: 10572 return (1); 10573 break; /* NOTREACHED */ 10574 } 10575 10576 return (0); 10577 } 10578 10579 static ctl_action 10580 ctl_extent_check_lba(uint64_t lba1, uint64_t len1, uint64_t lba2, uint64_t len2, 10581 bool seq) 10582 { 10583 uint64_t endlba1, endlba2; 10584 10585 endlba1 = lba1 + len1 - (seq ? 0 : 1); 10586 endlba2 = lba2 + len2 - 1; 10587 10588 if ((endlba1 < lba2) || (endlba2 < lba1)) 10589 return (CTL_ACTION_PASS); 10590 else 10591 return (CTL_ACTION_BLOCK); 10592 } 10593 10594 static int 10595 ctl_extent_check_unmap(union ctl_io *io, uint64_t lba2, uint64_t len2) 10596 { 10597 struct ctl_ptr_len_flags *ptrlen; 10598 struct scsi_unmap_desc *buf, *end, *range; 10599 uint64_t lba; 10600 uint32_t len; 10601 10602 /* If not UNMAP -- go other way. */ 10603 if (io->io_hdr.io_type != CTL_IO_SCSI || 10604 io->scsiio.cdb[0] != UNMAP) 10605 return (CTL_ACTION_ERROR); 10606 10607 /* If UNMAP without data -- block and wait for data. */ 10608 ptrlen = (struct ctl_ptr_len_flags *) 10609 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 10610 if ((io->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0 || 10611 ptrlen->ptr == NULL) 10612 return (CTL_ACTION_BLOCK); 10613 10614 /* UNMAP with data -- check for collision. */ 10615 buf = (struct scsi_unmap_desc *)ptrlen->ptr; 10616 end = buf + ptrlen->len / sizeof(*buf); 10617 for (range = buf; range < end; range++) { 10618 lba = scsi_8btou64(range->lba); 10619 len = scsi_4btoul(range->length); 10620 if ((lba < lba2 + len2) && (lba + len > lba2)) 10621 return (CTL_ACTION_BLOCK); 10622 } 10623 return (CTL_ACTION_PASS); 10624 } 10625 10626 static ctl_action 10627 ctl_extent_check(union ctl_io *io1, union ctl_io *io2, bool seq) 10628 { 10629 uint64_t lba1, lba2; 10630 uint64_t len1, len2; 10631 int retval; 10632 10633 if (ctl_get_lba_len(io2, &lba2, &len2) != 0) 10634 return (CTL_ACTION_ERROR); 10635 10636 retval = ctl_extent_check_unmap(io1, lba2, len2); 10637 if (retval != CTL_ACTION_ERROR) 10638 return (retval); 10639 10640 if (ctl_get_lba_len(io1, &lba1, &len1) != 0) 10641 return (CTL_ACTION_ERROR); 10642 10643 if (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE) 10644 seq = FALSE; 10645 return (ctl_extent_check_lba(lba1, len1, lba2, len2, seq)); 10646 } 10647 10648 static ctl_action 10649 ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2) 10650 { 10651 uint64_t lba1, lba2; 10652 uint64_t len1, len2; 10653 10654 if (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE) 10655 return (CTL_ACTION_PASS); 10656 if (ctl_get_lba_len(io1, &lba1, &len1) != 0) 10657 return (CTL_ACTION_ERROR); 10658 if (ctl_get_lba_len(io2, &lba2, &len2) != 0) 10659 return (CTL_ACTION_ERROR); 10660 10661 if (lba1 + len1 == lba2) 10662 return (CTL_ACTION_BLOCK); 10663 return (CTL_ACTION_PASS); 10664 } 10665 10666 static ctl_action 10667 ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io, 10668 union ctl_io *ooa_io) 10669 { 10670 const struct ctl_cmd_entry *pending_entry, *ooa_entry; 10671 ctl_serialize_action *serialize_row; 10672 10673 /* 10674 * The initiator attempted multiple untagged commands at the same 10675 * time. Can't do that. 10676 */ 10677 if ((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10678 && (ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10679 && ((pending_io->io_hdr.nexus.targ_port == 10680 ooa_io->io_hdr.nexus.targ_port) 10681 && (pending_io->io_hdr.nexus.initid == 10682 ooa_io->io_hdr.nexus.initid)) 10683 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | 10684 CTL_FLAG_STATUS_SENT)) == 0)) 10685 return (CTL_ACTION_OVERLAP); 10686 10687 /* 10688 * The initiator attempted to send multiple tagged commands with 10689 * the same ID. (It's fine if different initiators have the same 10690 * tag ID.) 10691 * 10692 * Even if all of those conditions are true, we don't kill the I/O 10693 * if the command ahead of us has been aborted. We won't end up 10694 * sending it to the FETD, and it's perfectly legal to resend a 10695 * command with the same tag number as long as the previous 10696 * instance of this tag number has been aborted somehow. 10697 */ 10698 if ((pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 10699 && (ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 10700 && (pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num) 10701 && ((pending_io->io_hdr.nexus.targ_port == 10702 ooa_io->io_hdr.nexus.targ_port) 10703 && (pending_io->io_hdr.nexus.initid == 10704 ooa_io->io_hdr.nexus.initid)) 10705 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | 10706 CTL_FLAG_STATUS_SENT)) == 0)) 10707 return (CTL_ACTION_OVERLAP_TAG); 10708 10709 /* 10710 * If we get a head of queue tag, SAM-3 says that we should 10711 * immediately execute it. 10712 * 10713 * What happens if this command would normally block for some other 10714 * reason? e.g. a request sense with a head of queue tag 10715 * immediately after a write. Normally that would block, but this 10716 * will result in its getting executed immediately... 10717 * 10718 * We currently return "pass" instead of "skip", so we'll end up 10719 * going through the rest of the queue to check for overlapped tags. 10720 * 10721 * XXX KDM check for other types of blockage first?? 10722 */ 10723 if (pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 10724 return (CTL_ACTION_PASS); 10725 10726 /* 10727 * Ordered tags have to block until all items ahead of them 10728 * have completed. If we get called with an ordered tag, we always 10729 * block, if something else is ahead of us in the queue. 10730 */ 10731 if (pending_io->scsiio.tag_type == CTL_TAG_ORDERED) 10732 return (CTL_ACTION_BLOCK); 10733 10734 /* 10735 * Simple tags get blocked until all head of queue and ordered tags 10736 * ahead of them have completed. I'm lumping untagged commands in 10737 * with simple tags here. XXX KDM is that the right thing to do? 10738 */ 10739 if (((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10740 || (pending_io->scsiio.tag_type == CTL_TAG_SIMPLE)) 10741 && ((ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 10742 || (ooa_io->scsiio.tag_type == CTL_TAG_ORDERED))) 10743 return (CTL_ACTION_BLOCK); 10744 10745 pending_entry = ctl_get_cmd_entry(&pending_io->scsiio, NULL); 10746 ooa_entry = ctl_get_cmd_entry(&ooa_io->scsiio, NULL); 10747 10748 serialize_row = ctl_serialize_table[ooa_entry->seridx]; 10749 10750 switch (serialize_row[pending_entry->seridx]) { 10751 case CTL_SER_BLOCK: 10752 return (CTL_ACTION_BLOCK); 10753 case CTL_SER_EXTENT: 10754 return (ctl_extent_check(ooa_io, pending_io, 10755 (lun->be_lun && lun->be_lun->serseq == CTL_LUN_SERSEQ_ON))); 10756 case CTL_SER_EXTENTOPT: 10757 if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT].queue_flags 10758 & SCP_QUEUE_ALG_MASK) != SCP_QUEUE_ALG_UNRESTRICTED) 10759 return (ctl_extent_check(ooa_io, pending_io, 10760 (lun->be_lun && 10761 lun->be_lun->serseq == CTL_LUN_SERSEQ_ON))); 10762 return (CTL_ACTION_PASS); 10763 case CTL_SER_EXTENTSEQ: 10764 if (lun->be_lun && lun->be_lun->serseq != CTL_LUN_SERSEQ_OFF) 10765 return (ctl_extent_check_seq(ooa_io, pending_io)); 10766 return (CTL_ACTION_PASS); 10767 case CTL_SER_PASS: 10768 return (CTL_ACTION_PASS); 10769 case CTL_SER_BLOCKOPT: 10770 if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT].queue_flags 10771 & SCP_QUEUE_ALG_MASK) != SCP_QUEUE_ALG_UNRESTRICTED) 10772 return (CTL_ACTION_BLOCK); 10773 return (CTL_ACTION_PASS); 10774 case CTL_SER_SKIP: 10775 return (CTL_ACTION_SKIP); 10776 default: 10777 panic("invalid serialization value %d", 10778 serialize_row[pending_entry->seridx]); 10779 } 10780 10781 return (CTL_ACTION_ERROR); 10782 } 10783 10784 /* 10785 * Check for blockage or overlaps against the OOA (Order Of Arrival) queue. 10786 * Assumptions: 10787 * - pending_io is generally either incoming, or on the blocked queue 10788 * - starting I/O is the I/O we want to start the check with. 10789 */ 10790 static ctl_action 10791 ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 10792 union ctl_io *starting_io) 10793 { 10794 union ctl_io *ooa_io; 10795 ctl_action action; 10796 10797 mtx_assert(&lun->lun_lock, MA_OWNED); 10798 10799 /* 10800 * Run back along the OOA queue, starting with the current 10801 * blocked I/O and going through every I/O before it on the 10802 * queue. If starting_io is NULL, we'll just end up returning 10803 * CTL_ACTION_PASS. 10804 */ 10805 for (ooa_io = starting_io; ooa_io != NULL; 10806 ooa_io = (union ctl_io *)TAILQ_PREV(&ooa_io->io_hdr, ctl_ooaq, 10807 ooa_links)){ 10808 10809 /* 10810 * This routine just checks to see whether 10811 * cur_blocked is blocked by ooa_io, which is ahead 10812 * of it in the queue. It doesn't queue/dequeue 10813 * cur_blocked. 10814 */ 10815 action = ctl_check_for_blockage(lun, pending_io, ooa_io); 10816 switch (action) { 10817 case CTL_ACTION_BLOCK: 10818 case CTL_ACTION_OVERLAP: 10819 case CTL_ACTION_OVERLAP_TAG: 10820 case CTL_ACTION_SKIP: 10821 case CTL_ACTION_ERROR: 10822 return (action); 10823 break; /* NOTREACHED */ 10824 case CTL_ACTION_PASS: 10825 break; 10826 default: 10827 panic("invalid action %d", action); 10828 break; /* NOTREACHED */ 10829 } 10830 } 10831 10832 return (CTL_ACTION_PASS); 10833 } 10834 10835 /* 10836 * Assumptions: 10837 * - An I/O has just completed, and has been removed from the per-LUN OOA 10838 * queue, so some items on the blocked queue may now be unblocked. 10839 */ 10840 static int 10841 ctl_check_blocked(struct ctl_lun *lun) 10842 { 10843 struct ctl_softc *softc = lun->ctl_softc; 10844 union ctl_io *cur_blocked, *next_blocked; 10845 10846 mtx_assert(&lun->lun_lock, MA_OWNED); 10847 10848 /* 10849 * Run forward from the head of the blocked queue, checking each 10850 * entry against the I/Os prior to it on the OOA queue to see if 10851 * there is still any blockage. 10852 * 10853 * We cannot use the TAILQ_FOREACH() macro, because it can't deal 10854 * with our removing a variable on it while it is traversing the 10855 * list. 10856 */ 10857 for (cur_blocked = (union ctl_io *)TAILQ_FIRST(&lun->blocked_queue); 10858 cur_blocked != NULL; cur_blocked = next_blocked) { 10859 union ctl_io *prev_ooa; 10860 ctl_action action; 10861 10862 next_blocked = (union ctl_io *)TAILQ_NEXT(&cur_blocked->io_hdr, 10863 blocked_links); 10864 10865 prev_ooa = (union ctl_io *)TAILQ_PREV(&cur_blocked->io_hdr, 10866 ctl_ooaq, ooa_links); 10867 10868 /* 10869 * If cur_blocked happens to be the first item in the OOA 10870 * queue now, prev_ooa will be NULL, and the action 10871 * returned will just be CTL_ACTION_PASS. 10872 */ 10873 action = ctl_check_ooa(lun, cur_blocked, prev_ooa); 10874 10875 switch (action) { 10876 case CTL_ACTION_BLOCK: 10877 /* Nothing to do here, still blocked */ 10878 break; 10879 case CTL_ACTION_OVERLAP: 10880 case CTL_ACTION_OVERLAP_TAG: 10881 /* 10882 * This shouldn't happen! In theory we've already 10883 * checked this command for overlap... 10884 */ 10885 break; 10886 case CTL_ACTION_PASS: 10887 case CTL_ACTION_SKIP: { 10888 const struct ctl_cmd_entry *entry; 10889 10890 /* 10891 * The skip case shouldn't happen, this transaction 10892 * should have never made it onto the blocked queue. 10893 */ 10894 /* 10895 * This I/O is no longer blocked, we can remove it 10896 * from the blocked queue. Since this is a TAILQ 10897 * (doubly linked list), we can do O(1) removals 10898 * from any place on the list. 10899 */ 10900 TAILQ_REMOVE(&lun->blocked_queue, &cur_blocked->io_hdr, 10901 blocked_links); 10902 cur_blocked->io_hdr.flags &= ~CTL_FLAG_BLOCKED; 10903 10904 if ((softc->ha_mode != CTL_HA_MODE_XFER) && 10905 (cur_blocked->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)){ 10906 /* 10907 * Need to send IO back to original side to 10908 * run 10909 */ 10910 union ctl_ha_msg msg_info; 10911 10912 cur_blocked->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 10913 msg_info.hdr.original_sc = 10914 cur_blocked->io_hdr.original_sc; 10915 msg_info.hdr.serializing_sc = cur_blocked; 10916 msg_info.hdr.msg_type = CTL_MSG_R2R; 10917 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 10918 sizeof(msg_info.hdr), M_NOWAIT); 10919 break; 10920 } 10921 entry = ctl_get_cmd_entry(&cur_blocked->scsiio, NULL); 10922 10923 /* 10924 * Check this I/O for LUN state changes that may 10925 * have happened while this command was blocked. 10926 * The LUN state may have been changed by a command 10927 * ahead of us in the queue, so we need to re-check 10928 * for any states that can be caused by SCSI 10929 * commands. 10930 */ 10931 if (ctl_scsiio_lun_check(lun, entry, 10932 &cur_blocked->scsiio) == 0) { 10933 cur_blocked->io_hdr.flags |= 10934 CTL_FLAG_IS_WAS_ON_RTR; 10935 ctl_enqueue_rtr(cur_blocked); 10936 } else 10937 ctl_done(cur_blocked); 10938 break; 10939 } 10940 default: 10941 /* 10942 * This probably shouldn't happen -- we shouldn't 10943 * get CTL_ACTION_ERROR, or anything else. 10944 */ 10945 break; 10946 } 10947 } 10948 10949 return (CTL_RETVAL_COMPLETE); 10950 } 10951 10952 /* 10953 * This routine (with one exception) checks LUN flags that can be set by 10954 * commands ahead of us in the OOA queue. These flags have to be checked 10955 * when a command initially comes in, and when we pull a command off the 10956 * blocked queue and are preparing to execute it. The reason we have to 10957 * check these flags for commands on the blocked queue is that the LUN 10958 * state may have been changed by a command ahead of us while we're on the 10959 * blocked queue. 10960 * 10961 * Ordering is somewhat important with these checks, so please pay 10962 * careful attention to the placement of any new checks. 10963 */ 10964 static int 10965 ctl_scsiio_lun_check(struct ctl_lun *lun, 10966 const struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio) 10967 { 10968 struct ctl_softc *softc = lun->ctl_softc; 10969 int retval; 10970 uint32_t residx; 10971 10972 retval = 0; 10973 10974 mtx_assert(&lun->lun_lock, MA_OWNED); 10975 10976 /* 10977 * If this shelf is a secondary shelf controller, we may have to 10978 * reject some commands disallowed by HA mode and link state. 10979 */ 10980 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) { 10981 if (softc->ha_link == CTL_HA_LINK_OFFLINE && 10982 (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { 10983 ctl_set_lun_unavail(ctsio); 10984 retval = 1; 10985 goto bailout; 10986 } 10987 if ((lun->flags & CTL_LUN_PEER_SC_PRIMARY) == 0 && 10988 (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { 10989 ctl_set_lun_transit(ctsio); 10990 retval = 1; 10991 goto bailout; 10992 } 10993 if (softc->ha_mode == CTL_HA_MODE_ACT_STBY && 10994 (entry->flags & CTL_CMD_FLAG_OK_ON_STANDBY) == 0) { 10995 ctl_set_lun_standby(ctsio); 10996 retval = 1; 10997 goto bailout; 10998 } 10999 11000 /* The rest of checks are only done on executing side */ 11001 if (softc->ha_mode == CTL_HA_MODE_XFER) 11002 goto bailout; 11003 } 11004 11005 if (entry->pattern & CTL_LUN_PAT_WRITE) { 11006 if (lun->be_lun && 11007 lun->be_lun->flags & CTL_LUN_FLAG_READONLY) { 11008 ctl_set_hw_write_protected(ctsio); 11009 retval = 1; 11010 goto bailout; 11011 } 11012 if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT] 11013 .eca_and_aen & SCP_SWP) != 0) { 11014 ctl_set_sense(ctsio, /*current_error*/ 1, 11015 /*sense_key*/ SSD_KEY_DATA_PROTECT, 11016 /*asc*/ 0x27, /*ascq*/ 0x02, SSD_ELEM_NONE); 11017 retval = 1; 11018 goto bailout; 11019 } 11020 } 11021 11022 /* 11023 * Check for a reservation conflict. If this command isn't allowed 11024 * even on reserved LUNs, and if this initiator isn't the one who 11025 * reserved us, reject the command with a reservation conflict. 11026 */ 11027 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 11028 if ((lun->flags & CTL_LUN_RESERVED) 11029 && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) { 11030 if (lun->res_idx != residx) { 11031 ctl_set_reservation_conflict(ctsio); 11032 retval = 1; 11033 goto bailout; 11034 } 11035 } 11036 11037 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0 || 11038 (entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV)) { 11039 /* No reservation or command is allowed. */; 11040 } else if ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_WRESV) && 11041 (lun->res_type == SPR_TYPE_WR_EX || 11042 lun->res_type == SPR_TYPE_WR_EX_RO || 11043 lun->res_type == SPR_TYPE_WR_EX_AR)) { 11044 /* The command is allowed for Write Exclusive resv. */; 11045 } else { 11046 /* 11047 * if we aren't registered or it's a res holder type 11048 * reservation and this isn't the res holder then set a 11049 * conflict. 11050 */ 11051 if (ctl_get_prkey(lun, residx) == 0 11052 || (residx != lun->pr_res_idx && lun->res_type < 4)) { 11053 ctl_set_reservation_conflict(ctsio); 11054 retval = 1; 11055 goto bailout; 11056 } 11057 } 11058 11059 if ((lun->flags & CTL_LUN_OFFLINE) 11060 && ((entry->flags & CTL_CMD_FLAG_OK_ON_STANDBY) == 0)) { 11061 ctl_set_lun_not_ready(ctsio); 11062 retval = 1; 11063 goto bailout; 11064 } 11065 11066 if ((lun->flags & CTL_LUN_STOPPED) 11067 && ((entry->flags & CTL_CMD_FLAG_OK_ON_STOPPED) == 0)) { 11068 /* "Logical unit not ready, initializing cmd. required" */ 11069 ctl_set_lun_stopped(ctsio); 11070 retval = 1; 11071 goto bailout; 11072 } 11073 11074 if ((lun->flags & CTL_LUN_INOPERABLE) 11075 && ((entry->flags & CTL_CMD_FLAG_OK_ON_INOPERABLE) == 0)) { 11076 /* "Medium format corrupted" */ 11077 ctl_set_medium_format_corrupted(ctsio); 11078 retval = 1; 11079 goto bailout; 11080 } 11081 11082 bailout: 11083 return (retval); 11084 } 11085 11086 static void 11087 ctl_failover_io(union ctl_io *io, int have_lock) 11088 { 11089 ctl_set_busy(&io->scsiio); 11090 ctl_done(io); 11091 } 11092 11093 static void 11094 ctl_failover_lun(struct ctl_lun *lun) 11095 { 11096 struct ctl_softc *softc = lun->ctl_softc; 11097 struct ctl_io_hdr *io, *next_io; 11098 11099 CTL_DEBUG_PRINT(("FAILOVER for lun %ju\n", lun->lun)); 11100 if (softc->ha_mode == CTL_HA_MODE_XFER) { 11101 TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { 11102 /* We are master */ 11103 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 11104 if (io->flags & CTL_FLAG_IO_ACTIVE) { 11105 io->flags |= CTL_FLAG_ABORT; 11106 io->flags |= CTL_FLAG_FAILOVER; 11107 } else { /* This can be only due to DATAMOVE */ 11108 io->msg_type = CTL_MSG_DATAMOVE_DONE; 11109 io->flags &= ~CTL_FLAG_DMA_INPROG; 11110 io->flags |= CTL_FLAG_IO_ACTIVE; 11111 io->port_status = 31340; 11112 ctl_enqueue_isc((union ctl_io *)io); 11113 } 11114 } 11115 /* We are slave */ 11116 if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { 11117 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; 11118 if (io->flags & CTL_FLAG_IO_ACTIVE) { 11119 io->flags |= CTL_FLAG_FAILOVER; 11120 } else { 11121 ctl_set_busy(&((union ctl_io *)io)-> 11122 scsiio); 11123 ctl_done((union ctl_io *)io); 11124 } 11125 } 11126 } 11127 } else { /* SERIALIZE modes */ 11128 TAILQ_FOREACH_SAFE(io, &lun->blocked_queue, blocked_links, 11129 next_io) { 11130 /* We are master */ 11131 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 11132 TAILQ_REMOVE(&lun->blocked_queue, io, 11133 blocked_links); 11134 io->flags &= ~CTL_FLAG_BLOCKED; 11135 TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links); 11136 ctl_free_io((union ctl_io *)io); 11137 } 11138 } 11139 TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { 11140 /* We are master */ 11141 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 11142 TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links); 11143 ctl_free_io((union ctl_io *)io); 11144 } 11145 /* We are slave */ 11146 if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { 11147 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; 11148 if (!(io->flags & CTL_FLAG_IO_ACTIVE)) { 11149 ctl_set_busy(&((union ctl_io *)io)-> 11150 scsiio); 11151 ctl_done((union ctl_io *)io); 11152 } 11153 } 11154 } 11155 ctl_check_blocked(lun); 11156 } 11157 } 11158 11159 static int 11160 ctl_scsiio_precheck(struct ctl_softc *softc, struct ctl_scsiio *ctsio) 11161 { 11162 struct ctl_lun *lun; 11163 const struct ctl_cmd_entry *entry; 11164 uint32_t initidx, targ_lun; 11165 int retval; 11166 11167 retval = 0; 11168 11169 lun = NULL; 11170 11171 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 11172 if ((targ_lun < CTL_MAX_LUNS) 11173 && ((lun = softc->ctl_luns[targ_lun]) != NULL)) { 11174 /* 11175 * If the LUN is invalid, pretend that it doesn't exist. 11176 * It will go away as soon as all pending I/O has been 11177 * completed. 11178 */ 11179 mtx_lock(&lun->lun_lock); 11180 if (lun->flags & CTL_LUN_DISABLED) { 11181 mtx_unlock(&lun->lun_lock); 11182 lun = NULL; 11183 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL; 11184 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL; 11185 } else { 11186 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun; 11187 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = 11188 lun->be_lun; 11189 11190 /* 11191 * Every I/O goes into the OOA queue for a 11192 * particular LUN, and stays there until completion. 11193 */ 11194 #ifdef CTL_TIME_IO 11195 if (TAILQ_EMPTY(&lun->ooa_queue)) { 11196 lun->idle_time += getsbinuptime() - 11197 lun->last_busy; 11198 } 11199 #endif 11200 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, 11201 ooa_links); 11202 } 11203 } else { 11204 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL; 11205 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL; 11206 } 11207 11208 /* Get command entry and return error if it is unsuppotyed. */ 11209 entry = ctl_validate_command(ctsio); 11210 if (entry == NULL) { 11211 if (lun) 11212 mtx_unlock(&lun->lun_lock); 11213 return (retval); 11214 } 11215 11216 ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 11217 ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK; 11218 11219 /* 11220 * Check to see whether we can send this command to LUNs that don't 11221 * exist. This should pretty much only be the case for inquiry 11222 * and request sense. Further checks, below, really require having 11223 * a LUN, so we can't really check the command anymore. Just put 11224 * it on the rtr queue. 11225 */ 11226 if (lun == NULL) { 11227 if (entry->flags & CTL_CMD_FLAG_OK_ON_NO_LUN) { 11228 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11229 ctl_enqueue_rtr((union ctl_io *)ctsio); 11230 return (retval); 11231 } 11232 11233 ctl_set_unsupported_lun(ctsio); 11234 ctl_done((union ctl_io *)ctsio); 11235 CTL_DEBUG_PRINT(("ctl_scsiio_precheck: bailing out due to invalid LUN\n")); 11236 return (retval); 11237 } else { 11238 /* 11239 * Make sure we support this particular command on this LUN. 11240 * e.g., we don't support writes to the control LUN. 11241 */ 11242 if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 11243 mtx_unlock(&lun->lun_lock); 11244 ctl_set_invalid_opcode(ctsio); 11245 ctl_done((union ctl_io *)ctsio); 11246 return (retval); 11247 } 11248 } 11249 11250 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 11251 11252 #ifdef CTL_WITH_CA 11253 /* 11254 * If we've got a request sense, it'll clear the contingent 11255 * allegiance condition. Otherwise, if we have a CA condition for 11256 * this initiator, clear it, because it sent down a command other 11257 * than request sense. 11258 */ 11259 if ((ctsio->cdb[0] != REQUEST_SENSE) 11260 && (ctl_is_set(lun->have_ca, initidx))) 11261 ctl_clear_mask(lun->have_ca, initidx); 11262 #endif 11263 11264 /* 11265 * If the command has this flag set, it handles its own unit 11266 * attention reporting, we shouldn't do anything. Otherwise we 11267 * check for any pending unit attentions, and send them back to the 11268 * initiator. We only do this when a command initially comes in, 11269 * not when we pull it off the blocked queue. 11270 * 11271 * According to SAM-3, section 5.3.2, the order that things get 11272 * presented back to the host is basically unit attentions caused 11273 * by some sort of reset event, busy status, reservation conflicts 11274 * or task set full, and finally any other status. 11275 * 11276 * One issue here is that some of the unit attentions we report 11277 * don't fall into the "reset" category (e.g. "reported luns data 11278 * has changed"). So reporting it here, before the reservation 11279 * check, may be technically wrong. I guess the only thing to do 11280 * would be to check for and report the reset events here, and then 11281 * check for the other unit attention types after we check for a 11282 * reservation conflict. 11283 * 11284 * XXX KDM need to fix this 11285 */ 11286 if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) { 11287 ctl_ua_type ua_type; 11288 11289 ua_type = ctl_build_ua(lun, initidx, &ctsio->sense_data, 11290 SSD_TYPE_NONE); 11291 if (ua_type != CTL_UA_NONE) { 11292 mtx_unlock(&lun->lun_lock); 11293 ctsio->scsi_status = SCSI_STATUS_CHECK_COND; 11294 ctsio->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 11295 ctsio->sense_len = SSD_FULL_SIZE; 11296 ctl_done((union ctl_io *)ctsio); 11297 return (retval); 11298 } 11299 } 11300 11301 11302 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { 11303 mtx_unlock(&lun->lun_lock); 11304 ctl_done((union ctl_io *)ctsio); 11305 return (retval); 11306 } 11307 11308 /* 11309 * XXX CHD this is where we want to send IO to other side if 11310 * this LUN is secondary on this SC. We will need to make a copy 11311 * of the IO and flag the IO on this side as SENT_2OTHER and the flag 11312 * the copy we send as FROM_OTHER. 11313 * We also need to stuff the address of the original IO so we can 11314 * find it easily. Something similar will need be done on the other 11315 * side so when we are done we can find the copy. 11316 */ 11317 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 11318 (lun->flags & CTL_LUN_PEER_SC_PRIMARY) != 0 && 11319 (entry->flags & CTL_CMD_FLAG_RUN_HERE) == 0) { 11320 union ctl_ha_msg msg_info; 11321 int isc_retval; 11322 11323 ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; 11324 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 11325 mtx_unlock(&lun->lun_lock); 11326 11327 msg_info.hdr.msg_type = CTL_MSG_SERIALIZE; 11328 msg_info.hdr.original_sc = (union ctl_io *)ctsio; 11329 msg_info.hdr.serializing_sc = NULL; 11330 msg_info.hdr.nexus = ctsio->io_hdr.nexus; 11331 msg_info.scsi.tag_num = ctsio->tag_num; 11332 msg_info.scsi.tag_type = ctsio->tag_type; 11333 msg_info.scsi.cdb_len = ctsio->cdb_len; 11334 memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN); 11335 11336 if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11337 sizeof(msg_info.scsi) - sizeof(msg_info.scsi.sense_data), 11338 M_WAITOK)) > CTL_HA_STATUS_SUCCESS) { 11339 ctl_set_busy(ctsio); 11340 ctl_done((union ctl_io *)ctsio); 11341 return (retval); 11342 } 11343 return (retval); 11344 } 11345 11346 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 11347 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, 11348 ctl_ooaq, ooa_links))) { 11349 case CTL_ACTION_BLOCK: 11350 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 11351 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 11352 blocked_links); 11353 mtx_unlock(&lun->lun_lock); 11354 return (retval); 11355 case CTL_ACTION_PASS: 11356 case CTL_ACTION_SKIP: 11357 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11358 mtx_unlock(&lun->lun_lock); 11359 ctl_enqueue_rtr((union ctl_io *)ctsio); 11360 break; 11361 case CTL_ACTION_OVERLAP: 11362 mtx_unlock(&lun->lun_lock); 11363 ctl_set_overlapped_cmd(ctsio); 11364 ctl_done((union ctl_io *)ctsio); 11365 break; 11366 case CTL_ACTION_OVERLAP_TAG: 11367 mtx_unlock(&lun->lun_lock); 11368 ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff); 11369 ctl_done((union ctl_io *)ctsio); 11370 break; 11371 case CTL_ACTION_ERROR: 11372 default: 11373 mtx_unlock(&lun->lun_lock); 11374 ctl_set_internal_failure(ctsio, 11375 /*sks_valid*/ 0, 11376 /*retry_count*/ 0); 11377 ctl_done((union ctl_io *)ctsio); 11378 break; 11379 } 11380 return (retval); 11381 } 11382 11383 const struct ctl_cmd_entry * 11384 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa) 11385 { 11386 const struct ctl_cmd_entry *entry; 11387 int service_action; 11388 11389 entry = &ctl_cmd_table[ctsio->cdb[0]]; 11390 if (sa) 11391 *sa = ((entry->flags & CTL_CMD_FLAG_SA5) != 0); 11392 if (entry->flags & CTL_CMD_FLAG_SA5) { 11393 service_action = ctsio->cdb[1] & SERVICE_ACTION_MASK; 11394 entry = &((const struct ctl_cmd_entry *) 11395 entry->execute)[service_action]; 11396 } 11397 return (entry); 11398 } 11399 11400 const struct ctl_cmd_entry * 11401 ctl_validate_command(struct ctl_scsiio *ctsio) 11402 { 11403 const struct ctl_cmd_entry *entry; 11404 int i, sa; 11405 uint8_t diff; 11406 11407 entry = ctl_get_cmd_entry(ctsio, &sa); 11408 if (entry->execute == NULL) { 11409 if (sa) 11410 ctl_set_invalid_field(ctsio, 11411 /*sks_valid*/ 1, 11412 /*command*/ 1, 11413 /*field*/ 1, 11414 /*bit_valid*/ 1, 11415 /*bit*/ 4); 11416 else 11417 ctl_set_invalid_opcode(ctsio); 11418 ctl_done((union ctl_io *)ctsio); 11419 return (NULL); 11420 } 11421 KASSERT(entry->length > 0, 11422 ("Not defined length for command 0x%02x/0x%02x", 11423 ctsio->cdb[0], ctsio->cdb[1])); 11424 for (i = 1; i < entry->length; i++) { 11425 diff = ctsio->cdb[i] & ~entry->usage[i - 1]; 11426 if (diff == 0) 11427 continue; 11428 ctl_set_invalid_field(ctsio, 11429 /*sks_valid*/ 1, 11430 /*command*/ 1, 11431 /*field*/ i, 11432 /*bit_valid*/ 1, 11433 /*bit*/ fls(diff) - 1); 11434 ctl_done((union ctl_io *)ctsio); 11435 return (NULL); 11436 } 11437 return (entry); 11438 } 11439 11440 static int 11441 ctl_cmd_applicable(uint8_t lun_type, const struct ctl_cmd_entry *entry) 11442 { 11443 11444 switch (lun_type) { 11445 case T_PROCESSOR: 11446 if ((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0) 11447 return (0); 11448 break; 11449 case T_DIRECT: 11450 if ((entry->flags & CTL_CMD_FLAG_OK_ON_SLUN) == 0) 11451 return (0); 11452 break; 11453 default: 11454 return (0); 11455 } 11456 return (1); 11457 } 11458 11459 static int 11460 ctl_scsiio(struct ctl_scsiio *ctsio) 11461 { 11462 int retval; 11463 const struct ctl_cmd_entry *entry; 11464 11465 retval = CTL_RETVAL_COMPLETE; 11466 11467 CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0])); 11468 11469 entry = ctl_get_cmd_entry(ctsio, NULL); 11470 11471 /* 11472 * If this I/O has been aborted, just send it straight to 11473 * ctl_done() without executing it. 11474 */ 11475 if (ctsio->io_hdr.flags & CTL_FLAG_ABORT) { 11476 ctl_done((union ctl_io *)ctsio); 11477 goto bailout; 11478 } 11479 11480 /* 11481 * All the checks should have been handled by ctl_scsiio_precheck(). 11482 * We should be clear now to just execute the I/O. 11483 */ 11484 retval = entry->execute(ctsio); 11485 11486 bailout: 11487 return (retval); 11488 } 11489 11490 /* 11491 * Since we only implement one target right now, a bus reset simply resets 11492 * our single target. 11493 */ 11494 static int 11495 ctl_bus_reset(struct ctl_softc *softc, union ctl_io *io) 11496 { 11497 return(ctl_target_reset(softc, io, CTL_UA_BUS_RESET)); 11498 } 11499 11500 static int 11501 ctl_target_reset(struct ctl_softc *softc, union ctl_io *io, 11502 ctl_ua_type ua_type) 11503 { 11504 struct ctl_port *port; 11505 struct ctl_lun *lun; 11506 int retval; 11507 11508 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 11509 union ctl_ha_msg msg_info; 11510 11511 msg_info.hdr.nexus = io->io_hdr.nexus; 11512 if (ua_type==CTL_UA_TARG_RESET) 11513 msg_info.task.task_action = CTL_TASK_TARGET_RESET; 11514 else 11515 msg_info.task.task_action = CTL_TASK_BUS_RESET; 11516 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11517 msg_info.hdr.original_sc = NULL; 11518 msg_info.hdr.serializing_sc = NULL; 11519 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11520 sizeof(msg_info.task), M_WAITOK); 11521 } 11522 retval = 0; 11523 11524 mtx_lock(&softc->ctl_lock); 11525 port = softc->ctl_ports[io->io_hdr.nexus.targ_port]; 11526 STAILQ_FOREACH(lun, &softc->lun_list, links) { 11527 if (port != NULL && 11528 ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 11529 continue; 11530 retval += ctl_do_lun_reset(lun, io, ua_type); 11531 } 11532 mtx_unlock(&softc->ctl_lock); 11533 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11534 return (retval); 11535 } 11536 11537 /* 11538 * The LUN should always be set. The I/O is optional, and is used to 11539 * distinguish between I/Os sent by this initiator, and by other 11540 * initiators. We set unit attention for initiators other than this one. 11541 * SAM-3 is vague on this point. It does say that a unit attention should 11542 * be established for other initiators when a LUN is reset (see section 11543 * 5.7.3), but it doesn't specifically say that the unit attention should 11544 * be established for this particular initiator when a LUN is reset. Here 11545 * is the relevant text, from SAM-3 rev 8: 11546 * 11547 * 5.7.2 When a SCSI initiator port aborts its own tasks 11548 * 11549 * When a SCSI initiator port causes its own task(s) to be aborted, no 11550 * notification that the task(s) have been aborted shall be returned to 11551 * the SCSI initiator port other than the completion response for the 11552 * command or task management function action that caused the task(s) to 11553 * be aborted and notification(s) associated with related effects of the 11554 * action (e.g., a reset unit attention condition). 11555 * 11556 * XXX KDM for now, we're setting unit attention for all initiators. 11557 */ 11558 static int 11559 ctl_do_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type) 11560 { 11561 union ctl_io *xio; 11562 #if 0 11563 uint32_t initidx; 11564 #endif 11565 #ifdef CTL_WITH_CA 11566 int i; 11567 #endif 11568 11569 mtx_lock(&lun->lun_lock); 11570 /* 11571 * Run through the OOA queue and abort each I/O. 11572 */ 11573 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11574 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11575 xio->io_hdr.flags |= CTL_FLAG_ABORT | CTL_FLAG_ABORT_STATUS; 11576 } 11577 11578 /* 11579 * This version sets unit attention for every 11580 */ 11581 #if 0 11582 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11583 ctl_est_ua_all(lun, initidx, ua_type); 11584 #else 11585 ctl_est_ua_all(lun, -1, ua_type); 11586 #endif 11587 11588 /* 11589 * A reset (any kind, really) clears reservations established with 11590 * RESERVE/RELEASE. It does not clear reservations established 11591 * with PERSISTENT RESERVE OUT, but we don't support that at the 11592 * moment anyway. See SPC-2, section 5.6. SPC-3 doesn't address 11593 * reservations made with the RESERVE/RELEASE commands, because 11594 * those commands are obsolete in SPC-3. 11595 */ 11596 lun->flags &= ~CTL_LUN_RESERVED; 11597 11598 #ifdef CTL_WITH_CA 11599 for (i = 0; i < CTL_MAX_INITIATORS; i++) 11600 ctl_clear_mask(lun->have_ca, i); 11601 #endif 11602 mtx_unlock(&lun->lun_lock); 11603 11604 return (0); 11605 } 11606 11607 static int 11608 ctl_lun_reset(struct ctl_softc *softc, union ctl_io *io) 11609 { 11610 struct ctl_lun *lun; 11611 uint32_t targ_lun; 11612 int retval; 11613 11614 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11615 mtx_lock(&softc->ctl_lock); 11616 if ((targ_lun >= CTL_MAX_LUNS) || 11617 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11618 mtx_unlock(&softc->ctl_lock); 11619 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 11620 return (1); 11621 } 11622 retval = ctl_do_lun_reset(lun, io, CTL_UA_LUN_RESET); 11623 mtx_unlock(&softc->ctl_lock); 11624 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11625 11626 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0) { 11627 union ctl_ha_msg msg_info; 11628 11629 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11630 msg_info.hdr.nexus = io->io_hdr.nexus; 11631 msg_info.task.task_action = CTL_TASK_LUN_RESET; 11632 msg_info.hdr.original_sc = NULL; 11633 msg_info.hdr.serializing_sc = NULL; 11634 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11635 sizeof(msg_info.task), M_WAITOK); 11636 } 11637 return (retval); 11638 } 11639 11640 static void 11641 ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id, 11642 int other_sc) 11643 { 11644 union ctl_io *xio; 11645 11646 mtx_assert(&lun->lun_lock, MA_OWNED); 11647 11648 /* 11649 * Run through the OOA queue and attempt to find the given I/O. 11650 * The target port, initiator ID, tag type and tag number have to 11651 * match the values that we got from the initiator. If we have an 11652 * untagged command to abort, simply abort the first untagged command 11653 * we come to. We only allow one untagged command at a time of course. 11654 */ 11655 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11656 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11657 11658 if ((targ_port == UINT32_MAX || 11659 targ_port == xio->io_hdr.nexus.targ_port) && 11660 (init_id == UINT32_MAX || 11661 init_id == xio->io_hdr.nexus.initid)) { 11662 if (targ_port != xio->io_hdr.nexus.targ_port || 11663 init_id != xio->io_hdr.nexus.initid) 11664 xio->io_hdr.flags |= CTL_FLAG_ABORT_STATUS; 11665 xio->io_hdr.flags |= CTL_FLAG_ABORT; 11666 if (!other_sc && !(lun->flags & CTL_LUN_PRIMARY_SC)) { 11667 union ctl_ha_msg msg_info; 11668 11669 msg_info.hdr.nexus = xio->io_hdr.nexus; 11670 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 11671 msg_info.task.tag_num = xio->scsiio.tag_num; 11672 msg_info.task.tag_type = xio->scsiio.tag_type; 11673 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11674 msg_info.hdr.original_sc = NULL; 11675 msg_info.hdr.serializing_sc = NULL; 11676 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11677 sizeof(msg_info.task), M_NOWAIT); 11678 } 11679 } 11680 } 11681 } 11682 11683 static int 11684 ctl_abort_task_set(union ctl_io *io) 11685 { 11686 struct ctl_softc *softc = control_softc; 11687 struct ctl_lun *lun; 11688 uint32_t targ_lun; 11689 11690 /* 11691 * Look up the LUN. 11692 */ 11693 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11694 mtx_lock(&softc->ctl_lock); 11695 if ((targ_lun >= CTL_MAX_LUNS) || 11696 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11697 mtx_unlock(&softc->ctl_lock); 11698 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 11699 return (1); 11700 } 11701 11702 mtx_lock(&lun->lun_lock); 11703 mtx_unlock(&softc->ctl_lock); 11704 if (io->taskio.task_action == CTL_TASK_ABORT_TASK_SET) { 11705 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, 11706 io->io_hdr.nexus.initid, 11707 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 11708 } else { /* CTL_TASK_CLEAR_TASK_SET */ 11709 ctl_abort_tasks_lun(lun, UINT32_MAX, UINT32_MAX, 11710 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 11711 } 11712 mtx_unlock(&lun->lun_lock); 11713 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11714 return (0); 11715 } 11716 11717 static int 11718 ctl_i_t_nexus_reset(union ctl_io *io) 11719 { 11720 struct ctl_softc *softc = control_softc; 11721 struct ctl_lun *lun; 11722 uint32_t initidx; 11723 11724 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 11725 union ctl_ha_msg msg_info; 11726 11727 msg_info.hdr.nexus = io->io_hdr.nexus; 11728 msg_info.task.task_action = CTL_TASK_I_T_NEXUS_RESET; 11729 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11730 msg_info.hdr.original_sc = NULL; 11731 msg_info.hdr.serializing_sc = NULL; 11732 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11733 sizeof(msg_info.task), M_WAITOK); 11734 } 11735 11736 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11737 mtx_lock(&softc->ctl_lock); 11738 STAILQ_FOREACH(lun, &softc->lun_list, links) { 11739 mtx_lock(&lun->lun_lock); 11740 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, 11741 io->io_hdr.nexus.initid, 1); 11742 #ifdef CTL_WITH_CA 11743 ctl_clear_mask(lun->have_ca, initidx); 11744 #endif 11745 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == initidx)) 11746 lun->flags &= ~CTL_LUN_RESERVED; 11747 ctl_est_ua(lun, initidx, CTL_UA_I_T_NEXUS_LOSS); 11748 mtx_unlock(&lun->lun_lock); 11749 } 11750 mtx_unlock(&softc->ctl_lock); 11751 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11752 return (0); 11753 } 11754 11755 static int 11756 ctl_abort_task(union ctl_io *io) 11757 { 11758 union ctl_io *xio; 11759 struct ctl_lun *lun; 11760 struct ctl_softc *softc; 11761 #if 0 11762 struct sbuf sb; 11763 char printbuf[128]; 11764 #endif 11765 int found; 11766 uint32_t targ_lun; 11767 11768 softc = control_softc; 11769 found = 0; 11770 11771 /* 11772 * Look up the LUN. 11773 */ 11774 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11775 mtx_lock(&softc->ctl_lock); 11776 if ((targ_lun >= CTL_MAX_LUNS) || 11777 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11778 mtx_unlock(&softc->ctl_lock); 11779 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 11780 return (1); 11781 } 11782 11783 #if 0 11784 printf("ctl_abort_task: called for lun %lld, tag %d type %d\n", 11785 lun->lun, io->taskio.tag_num, io->taskio.tag_type); 11786 #endif 11787 11788 mtx_lock(&lun->lun_lock); 11789 mtx_unlock(&softc->ctl_lock); 11790 /* 11791 * Run through the OOA queue and attempt to find the given I/O. 11792 * The target port, initiator ID, tag type and tag number have to 11793 * match the values that we got from the initiator. If we have an 11794 * untagged command to abort, simply abort the first untagged command 11795 * we come to. We only allow one untagged command at a time of course. 11796 */ 11797 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11798 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11799 #if 0 11800 sbuf_new(&sb, printbuf, sizeof(printbuf), SBUF_FIXEDLEN); 11801 11802 sbuf_printf(&sb, "LUN %lld tag %d type %d%s%s%s%s: ", 11803 lun->lun, xio->scsiio.tag_num, 11804 xio->scsiio.tag_type, 11805 (xio->io_hdr.blocked_links.tqe_prev 11806 == NULL) ? "" : " BLOCKED", 11807 (xio->io_hdr.flags & 11808 CTL_FLAG_DMA_INPROG) ? " DMA" : "", 11809 (xio->io_hdr.flags & 11810 CTL_FLAG_ABORT) ? " ABORT" : "", 11811 (xio->io_hdr.flags & 11812 CTL_FLAG_IS_WAS_ON_RTR ? " RTR" : "")); 11813 ctl_scsi_command_string(&xio->scsiio, NULL, &sb); 11814 sbuf_finish(&sb); 11815 printf("%s\n", sbuf_data(&sb)); 11816 #endif 11817 11818 if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port) 11819 || (xio->io_hdr.nexus.initid != io->io_hdr.nexus.initid) 11820 || (xio->io_hdr.flags & CTL_FLAG_ABORT)) 11821 continue; 11822 11823 /* 11824 * If the abort says that the task is untagged, the 11825 * task in the queue must be untagged. Otherwise, 11826 * we just check to see whether the tag numbers 11827 * match. This is because the QLogic firmware 11828 * doesn't pass back the tag type in an abort 11829 * request. 11830 */ 11831 #if 0 11832 if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED) 11833 && (io->taskio.tag_type == CTL_TAG_UNTAGGED)) 11834 || (xio->scsiio.tag_num == io->taskio.tag_num)) 11835 #endif 11836 /* 11837 * XXX KDM we've got problems with FC, because it 11838 * doesn't send down a tag type with aborts. So we 11839 * can only really go by the tag number... 11840 * This may cause problems with parallel SCSI. 11841 * Need to figure that out!! 11842 */ 11843 if (xio->scsiio.tag_num == io->taskio.tag_num) { 11844 xio->io_hdr.flags |= CTL_FLAG_ABORT; 11845 found = 1; 11846 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0 && 11847 !(lun->flags & CTL_LUN_PRIMARY_SC)) { 11848 union ctl_ha_msg msg_info; 11849 11850 msg_info.hdr.nexus = io->io_hdr.nexus; 11851 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 11852 msg_info.task.tag_num = io->taskio.tag_num; 11853 msg_info.task.tag_type = io->taskio.tag_type; 11854 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11855 msg_info.hdr.original_sc = NULL; 11856 msg_info.hdr.serializing_sc = NULL; 11857 #if 0 11858 printf("Sent Abort to other side\n"); 11859 #endif 11860 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11861 sizeof(msg_info.task), M_NOWAIT); 11862 } 11863 #if 0 11864 printf("ctl_abort_task: found I/O to abort\n"); 11865 #endif 11866 } 11867 } 11868 mtx_unlock(&lun->lun_lock); 11869 11870 if (found == 0) { 11871 /* 11872 * This isn't really an error. It's entirely possible for 11873 * the abort and command completion to cross on the wire. 11874 * This is more of an informative/diagnostic error. 11875 */ 11876 #if 0 11877 printf("ctl_abort_task: ABORT sent for nonexistent I/O: " 11878 "%u:%u:%u tag %d type %d\n", 11879 io->io_hdr.nexus.initid, 11880 io->io_hdr.nexus.targ_port, 11881 io->io_hdr.nexus.targ_lun, io->taskio.tag_num, 11882 io->taskio.tag_type); 11883 #endif 11884 } 11885 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11886 return (0); 11887 } 11888 11889 static int 11890 ctl_query_task(union ctl_io *io, int task_set) 11891 { 11892 union ctl_io *xio; 11893 struct ctl_lun *lun; 11894 struct ctl_softc *softc; 11895 int found = 0; 11896 uint32_t targ_lun; 11897 11898 softc = control_softc; 11899 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11900 mtx_lock(&softc->ctl_lock); 11901 if ((targ_lun >= CTL_MAX_LUNS) || 11902 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11903 mtx_unlock(&softc->ctl_lock); 11904 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 11905 return (1); 11906 } 11907 mtx_lock(&lun->lun_lock); 11908 mtx_unlock(&softc->ctl_lock); 11909 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11910 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11911 11912 if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port) 11913 || (xio->io_hdr.nexus.initid != io->io_hdr.nexus.initid) 11914 || (xio->io_hdr.flags & CTL_FLAG_ABORT)) 11915 continue; 11916 11917 if (task_set || xio->scsiio.tag_num == io->taskio.tag_num) { 11918 found = 1; 11919 break; 11920 } 11921 } 11922 mtx_unlock(&lun->lun_lock); 11923 if (found) 11924 io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED; 11925 else 11926 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11927 return (0); 11928 } 11929 11930 static int 11931 ctl_query_async_event(union ctl_io *io) 11932 { 11933 struct ctl_lun *lun; 11934 struct ctl_softc *softc; 11935 ctl_ua_type ua; 11936 uint32_t targ_lun, initidx; 11937 11938 softc = control_softc; 11939 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11940 mtx_lock(&softc->ctl_lock); 11941 if ((targ_lun >= CTL_MAX_LUNS) || 11942 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11943 mtx_unlock(&softc->ctl_lock); 11944 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 11945 return (1); 11946 } 11947 mtx_lock(&lun->lun_lock); 11948 mtx_unlock(&softc->ctl_lock); 11949 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11950 ua = ctl_build_qae(lun, initidx, io->taskio.task_resp); 11951 mtx_unlock(&lun->lun_lock); 11952 if (ua != CTL_UA_NONE) 11953 io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED; 11954 else 11955 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11956 return (0); 11957 } 11958 11959 static void 11960 ctl_run_task(union ctl_io *io) 11961 { 11962 struct ctl_softc *softc = control_softc; 11963 int retval = 1; 11964 11965 CTL_DEBUG_PRINT(("ctl_run_task\n")); 11966 KASSERT(io->io_hdr.io_type == CTL_IO_TASK, 11967 ("ctl_run_task: Unextected io_type %d\n", io->io_hdr.io_type)); 11968 io->taskio.task_status = CTL_TASK_FUNCTION_NOT_SUPPORTED; 11969 bzero(io->taskio.task_resp, sizeof(io->taskio.task_resp)); 11970 switch (io->taskio.task_action) { 11971 case CTL_TASK_ABORT_TASK: 11972 retval = ctl_abort_task(io); 11973 break; 11974 case CTL_TASK_ABORT_TASK_SET: 11975 case CTL_TASK_CLEAR_TASK_SET: 11976 retval = ctl_abort_task_set(io); 11977 break; 11978 case CTL_TASK_CLEAR_ACA: 11979 break; 11980 case CTL_TASK_I_T_NEXUS_RESET: 11981 retval = ctl_i_t_nexus_reset(io); 11982 break; 11983 case CTL_TASK_LUN_RESET: 11984 retval = ctl_lun_reset(softc, io); 11985 break; 11986 case CTL_TASK_TARGET_RESET: 11987 retval = ctl_target_reset(softc, io, CTL_UA_TARG_RESET); 11988 break; 11989 case CTL_TASK_BUS_RESET: 11990 retval = ctl_bus_reset(softc, io); 11991 break; 11992 case CTL_TASK_PORT_LOGIN: 11993 break; 11994 case CTL_TASK_PORT_LOGOUT: 11995 break; 11996 case CTL_TASK_QUERY_TASK: 11997 retval = ctl_query_task(io, 0); 11998 break; 11999 case CTL_TASK_QUERY_TASK_SET: 12000 retval = ctl_query_task(io, 1); 12001 break; 12002 case CTL_TASK_QUERY_ASYNC_EVENT: 12003 retval = ctl_query_async_event(io); 12004 break; 12005 default: 12006 printf("%s: got unknown task management event %d\n", 12007 __func__, io->taskio.task_action); 12008 break; 12009 } 12010 if (retval == 0) 12011 io->io_hdr.status = CTL_SUCCESS; 12012 else 12013 io->io_hdr.status = CTL_ERROR; 12014 ctl_done(io); 12015 } 12016 12017 /* 12018 * For HA operation. Handle commands that come in from the other 12019 * controller. 12020 */ 12021 static void 12022 ctl_handle_isc(union ctl_io *io) 12023 { 12024 int free_io; 12025 struct ctl_lun *lun; 12026 struct ctl_softc *softc; 12027 uint32_t targ_lun; 12028 12029 softc = control_softc; 12030 12031 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12032 lun = softc->ctl_luns[targ_lun]; 12033 12034 switch (io->io_hdr.msg_type) { 12035 case CTL_MSG_SERIALIZE: 12036 free_io = ctl_serialize_other_sc_cmd(&io->scsiio); 12037 break; 12038 case CTL_MSG_R2R: { 12039 const struct ctl_cmd_entry *entry; 12040 12041 /* 12042 * This is only used in SER_ONLY mode. 12043 */ 12044 free_io = 0; 12045 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 12046 mtx_lock(&lun->lun_lock); 12047 if (ctl_scsiio_lun_check(lun, 12048 entry, (struct ctl_scsiio *)io) != 0) { 12049 mtx_unlock(&lun->lun_lock); 12050 ctl_done(io); 12051 break; 12052 } 12053 io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 12054 mtx_unlock(&lun->lun_lock); 12055 ctl_enqueue_rtr(io); 12056 break; 12057 } 12058 case CTL_MSG_FINISH_IO: 12059 if (softc->ha_mode == CTL_HA_MODE_XFER) { 12060 free_io = 0; 12061 ctl_done(io); 12062 } else { 12063 free_io = 1; 12064 mtx_lock(&lun->lun_lock); 12065 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, 12066 ooa_links); 12067 ctl_check_blocked(lun); 12068 mtx_unlock(&lun->lun_lock); 12069 } 12070 break; 12071 case CTL_MSG_PERS_ACTION: 12072 ctl_hndl_per_res_out_on_other_sc( 12073 (union ctl_ha_msg *)&io->presio.pr_msg); 12074 free_io = 1; 12075 break; 12076 case CTL_MSG_BAD_JUJU: 12077 free_io = 0; 12078 ctl_done(io); 12079 break; 12080 case CTL_MSG_DATAMOVE: 12081 /* Only used in XFER mode */ 12082 free_io = 0; 12083 ctl_datamove_remote(io); 12084 break; 12085 case CTL_MSG_DATAMOVE_DONE: 12086 /* Only used in XFER mode */ 12087 free_io = 0; 12088 io->scsiio.be_move_done(io); 12089 break; 12090 case CTL_MSG_FAILOVER: 12091 mtx_lock(&lun->lun_lock); 12092 ctl_failover_lun(lun); 12093 mtx_unlock(&lun->lun_lock); 12094 free_io = 1; 12095 break; 12096 default: 12097 free_io = 1; 12098 printf("%s: Invalid message type %d\n", 12099 __func__, io->io_hdr.msg_type); 12100 break; 12101 } 12102 if (free_io) 12103 ctl_free_io(io); 12104 12105 } 12106 12107 12108 /* 12109 * Returns the match type in the case of a match, or CTL_LUN_PAT_NONE if 12110 * there is no match. 12111 */ 12112 static ctl_lun_error_pattern 12113 ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc) 12114 { 12115 const struct ctl_cmd_entry *entry; 12116 ctl_lun_error_pattern filtered_pattern, pattern; 12117 12118 pattern = desc->error_pattern; 12119 12120 /* 12121 * XXX KDM we need more data passed into this function to match a 12122 * custom pattern, and we actually need to implement custom pattern 12123 * matching. 12124 */ 12125 if (pattern & CTL_LUN_PAT_CMD) 12126 return (CTL_LUN_PAT_CMD); 12127 12128 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY) 12129 return (CTL_LUN_PAT_ANY); 12130 12131 entry = ctl_get_cmd_entry(ctsio, NULL); 12132 12133 filtered_pattern = entry->pattern & pattern; 12134 12135 /* 12136 * If the user requested specific flags in the pattern (e.g. 12137 * CTL_LUN_PAT_RANGE), make sure the command supports all of those 12138 * flags. 12139 * 12140 * If the user did not specify any flags, it doesn't matter whether 12141 * or not the command supports the flags. 12142 */ 12143 if ((filtered_pattern & ~CTL_LUN_PAT_MASK) != 12144 (pattern & ~CTL_LUN_PAT_MASK)) 12145 return (CTL_LUN_PAT_NONE); 12146 12147 /* 12148 * If the user asked for a range check, see if the requested LBA 12149 * range overlaps with this command's LBA range. 12150 */ 12151 if (filtered_pattern & CTL_LUN_PAT_RANGE) { 12152 uint64_t lba1; 12153 uint64_t len1; 12154 ctl_action action; 12155 int retval; 12156 12157 retval = ctl_get_lba_len((union ctl_io *)ctsio, &lba1, &len1); 12158 if (retval != 0) 12159 return (CTL_LUN_PAT_NONE); 12160 12161 action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba, 12162 desc->lba_range.len, FALSE); 12163 /* 12164 * A "pass" means that the LBA ranges don't overlap, so 12165 * this doesn't match the user's range criteria. 12166 */ 12167 if (action == CTL_ACTION_PASS) 12168 return (CTL_LUN_PAT_NONE); 12169 } 12170 12171 return (filtered_pattern); 12172 } 12173 12174 static void 12175 ctl_inject_error(struct ctl_lun *lun, union ctl_io *io) 12176 { 12177 struct ctl_error_desc *desc, *desc2; 12178 12179 mtx_assert(&lun->lun_lock, MA_OWNED); 12180 12181 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 12182 ctl_lun_error_pattern pattern; 12183 /* 12184 * Check to see whether this particular command matches 12185 * the pattern in the descriptor. 12186 */ 12187 pattern = ctl_cmd_pattern_match(&io->scsiio, desc); 12188 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_NONE) 12189 continue; 12190 12191 switch (desc->lun_error & CTL_LUN_INJ_TYPE) { 12192 case CTL_LUN_INJ_ABORTED: 12193 ctl_set_aborted(&io->scsiio); 12194 break; 12195 case CTL_LUN_INJ_MEDIUM_ERR: 12196 ctl_set_medium_error(&io->scsiio, 12197 (io->io_hdr.flags & CTL_FLAG_DATA_MASK) != 12198 CTL_FLAG_DATA_OUT); 12199 break; 12200 case CTL_LUN_INJ_UA: 12201 /* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET 12202 * OCCURRED */ 12203 ctl_set_ua(&io->scsiio, 0x29, 0x00); 12204 break; 12205 case CTL_LUN_INJ_CUSTOM: 12206 /* 12207 * We're assuming the user knows what he is doing. 12208 * Just copy the sense information without doing 12209 * checks. 12210 */ 12211 bcopy(&desc->custom_sense, &io->scsiio.sense_data, 12212 MIN(sizeof(desc->custom_sense), 12213 sizeof(io->scsiio.sense_data))); 12214 io->scsiio.scsi_status = SCSI_STATUS_CHECK_COND; 12215 io->scsiio.sense_len = SSD_FULL_SIZE; 12216 io->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 12217 break; 12218 case CTL_LUN_INJ_NONE: 12219 default: 12220 /* 12221 * If this is an error injection type we don't know 12222 * about, clear the continuous flag (if it is set) 12223 * so it will get deleted below. 12224 */ 12225 desc->lun_error &= ~CTL_LUN_INJ_CONTINUOUS; 12226 break; 12227 } 12228 /* 12229 * By default, each error injection action is a one-shot 12230 */ 12231 if (desc->lun_error & CTL_LUN_INJ_CONTINUOUS) 12232 continue; 12233 12234 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links); 12235 12236 free(desc, M_CTL); 12237 } 12238 } 12239 12240 #ifdef CTL_IO_DELAY 12241 static void 12242 ctl_datamove_timer_wakeup(void *arg) 12243 { 12244 union ctl_io *io; 12245 12246 io = (union ctl_io *)arg; 12247 12248 ctl_datamove(io); 12249 } 12250 #endif /* CTL_IO_DELAY */ 12251 12252 void 12253 ctl_datamove(union ctl_io *io) 12254 { 12255 struct ctl_lun *lun; 12256 void (*fe_datamove)(union ctl_io *io); 12257 12258 mtx_assert(&control_softc->ctl_lock, MA_NOTOWNED); 12259 12260 CTL_DEBUG_PRINT(("ctl_datamove\n")); 12261 12262 lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 12263 #ifdef CTL_TIME_IO 12264 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 12265 char str[256]; 12266 char path_str[64]; 12267 struct sbuf sb; 12268 12269 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 12270 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12271 12272 sbuf_cat(&sb, path_str); 12273 switch (io->io_hdr.io_type) { 12274 case CTL_IO_SCSI: 12275 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 12276 sbuf_printf(&sb, "\n"); 12277 sbuf_cat(&sb, path_str); 12278 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12279 io->scsiio.tag_num, io->scsiio.tag_type); 12280 break; 12281 case CTL_IO_TASK: 12282 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 12283 "Tag Type: %d\n", io->taskio.task_action, 12284 io->taskio.tag_num, io->taskio.tag_type); 12285 break; 12286 default: 12287 printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 12288 panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 12289 break; 12290 } 12291 sbuf_cat(&sb, path_str); 12292 sbuf_printf(&sb, "ctl_datamove: %jd seconds\n", 12293 (intmax_t)time_uptime - io->io_hdr.start_time); 12294 sbuf_finish(&sb); 12295 printf("%s", sbuf_data(&sb)); 12296 } 12297 #endif /* CTL_TIME_IO */ 12298 12299 #ifdef CTL_IO_DELAY 12300 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 12301 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 12302 } else { 12303 if ((lun != NULL) 12304 && (lun->delay_info.datamove_delay > 0)) { 12305 12306 callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1); 12307 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 12308 callout_reset(&io->io_hdr.delay_callout, 12309 lun->delay_info.datamove_delay * hz, 12310 ctl_datamove_timer_wakeup, io); 12311 if (lun->delay_info.datamove_type == 12312 CTL_DELAY_TYPE_ONESHOT) 12313 lun->delay_info.datamove_delay = 0; 12314 return; 12315 } 12316 } 12317 #endif 12318 12319 /* 12320 * This command has been aborted. Set the port status, so we fail 12321 * the data move. 12322 */ 12323 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12324 printf("ctl_datamove: tag 0x%04x on (%u:%u:%u) aborted\n", 12325 io->scsiio.tag_num, io->io_hdr.nexus.initid, 12326 io->io_hdr.nexus.targ_port, 12327 io->io_hdr.nexus.targ_lun); 12328 io->io_hdr.port_status = 31337; 12329 /* 12330 * Note that the backend, in this case, will get the 12331 * callback in its context. In other cases it may get 12332 * called in the frontend's interrupt thread context. 12333 */ 12334 io->scsiio.be_move_done(io); 12335 return; 12336 } 12337 12338 /* Don't confuse frontend with zero length data move. */ 12339 if (io->scsiio.kern_data_len == 0) { 12340 io->scsiio.be_move_done(io); 12341 return; 12342 } 12343 12344 /* 12345 * If we're in XFER mode and this I/O is from the other shelf 12346 * controller, we need to send the DMA to the other side to 12347 * actually transfer the data to/from the host. In serialize only 12348 * mode the transfer happens below CTL and ctl_datamove() is only 12349 * called on the machine that originally received the I/O. 12350 */ 12351 if ((control_softc->ha_mode == CTL_HA_MODE_XFER) 12352 && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 12353 union ctl_ha_msg msg; 12354 uint32_t sg_entries_sent; 12355 int do_sg_copy; 12356 int i; 12357 12358 memset(&msg, 0, sizeof(msg)); 12359 msg.hdr.msg_type = CTL_MSG_DATAMOVE; 12360 msg.hdr.original_sc = io->io_hdr.original_sc; 12361 msg.hdr.serializing_sc = io; 12362 msg.hdr.nexus = io->io_hdr.nexus; 12363 msg.hdr.status = io->io_hdr.status; 12364 msg.dt.flags = io->io_hdr.flags; 12365 /* 12366 * We convert everything into a S/G list here. We can't 12367 * pass by reference, only by value between controllers. 12368 * So we can't pass a pointer to the S/G list, only as many 12369 * S/G entries as we can fit in here. If it's possible for 12370 * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries, 12371 * then we need to break this up into multiple transfers. 12372 */ 12373 if (io->scsiio.kern_sg_entries == 0) { 12374 msg.dt.kern_sg_entries = 1; 12375 #if 0 12376 /* 12377 * Convert to a physical address if this is a 12378 * virtual address. 12379 */ 12380 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 12381 msg.dt.sg_list[0].addr = 12382 io->scsiio.kern_data_ptr; 12383 } else { 12384 /* 12385 * XXX KDM use busdma here! 12386 */ 12387 msg.dt.sg_list[0].addr = (void *) 12388 vtophys(io->scsiio.kern_data_ptr); 12389 } 12390 #else 12391 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, 12392 ("HA does not support BUS_ADDR")); 12393 msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr; 12394 #endif 12395 12396 msg.dt.sg_list[0].len = io->scsiio.kern_data_len; 12397 do_sg_copy = 0; 12398 } else { 12399 msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries; 12400 do_sg_copy = 1; 12401 } 12402 12403 msg.dt.kern_data_len = io->scsiio.kern_data_len; 12404 msg.dt.kern_total_len = io->scsiio.kern_total_len; 12405 msg.dt.kern_data_resid = io->scsiio.kern_data_resid; 12406 msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset; 12407 msg.dt.sg_sequence = 0; 12408 12409 /* 12410 * Loop until we've sent all of the S/G entries. On the 12411 * other end, we'll recompose these S/G entries into one 12412 * contiguous list before passing it to the 12413 */ 12414 for (sg_entries_sent = 0; sg_entries_sent < 12415 msg.dt.kern_sg_entries; msg.dt.sg_sequence++) { 12416 msg.dt.cur_sg_entries = MIN((sizeof(msg.dt.sg_list)/ 12417 sizeof(msg.dt.sg_list[0])), 12418 msg.dt.kern_sg_entries - sg_entries_sent); 12419 12420 if (do_sg_copy != 0) { 12421 struct ctl_sg_entry *sgl; 12422 int j; 12423 12424 sgl = (struct ctl_sg_entry *) 12425 io->scsiio.kern_data_ptr; 12426 /* 12427 * If this is in cached memory, flush the cache 12428 * before we send the DMA request to the other 12429 * controller. We want to do this in either 12430 * the * read or the write case. The read 12431 * case is straightforward. In the write 12432 * case, we want to make sure nothing is 12433 * in the local cache that could overwrite 12434 * the DMAed data. 12435 */ 12436 12437 for (i = sg_entries_sent, j = 0; 12438 i < msg.dt.cur_sg_entries; i++, j++) { 12439 #if 0 12440 if ((io->io_hdr.flags & 12441 CTL_FLAG_BUS_ADDR) == 0) { 12442 /* 12443 * XXX KDM use busdma. 12444 */ 12445 msg.dt.sg_list[j].addr =(void *) 12446 vtophys(sgl[i].addr); 12447 } else { 12448 msg.dt.sg_list[j].addr = 12449 sgl[i].addr; 12450 } 12451 #else 12452 KASSERT((io->io_hdr.flags & 12453 CTL_FLAG_BUS_ADDR) == 0, 12454 ("HA does not support BUS_ADDR")); 12455 msg.dt.sg_list[j].addr = sgl[i].addr; 12456 #endif 12457 msg.dt.sg_list[j].len = sgl[i].len; 12458 } 12459 } 12460 12461 sg_entries_sent += msg.dt.cur_sg_entries; 12462 if (sg_entries_sent >= msg.dt.kern_sg_entries) 12463 msg.dt.sg_last = 1; 12464 else 12465 msg.dt.sg_last = 0; 12466 12467 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 12468 sizeof(msg.dt) - sizeof(msg.dt.sg_list) + 12469 sizeof(struct ctl_sg_entry)*msg.dt.cur_sg_entries, 12470 M_WAITOK) > CTL_HA_STATUS_SUCCESS) { 12471 io->io_hdr.port_status = 31341; 12472 io->scsiio.be_move_done(io); 12473 return; 12474 } 12475 12476 msg.dt.sent_sg_entries = sg_entries_sent; 12477 } 12478 12479 /* 12480 * Officially handover the request from us to peer. 12481 * If failover has just happened, then we must return error. 12482 * If failover happen just after, then it is not our problem. 12483 */ 12484 if (lun) 12485 mtx_lock(&lun->lun_lock); 12486 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12487 if (lun) 12488 mtx_unlock(&lun->lun_lock); 12489 io->io_hdr.port_status = 31342; 12490 io->scsiio.be_move_done(io); 12491 return; 12492 } 12493 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12494 io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; 12495 if (lun) 12496 mtx_unlock(&lun->lun_lock); 12497 } else { 12498 12499 /* 12500 * Lookup the fe_datamove() function for this particular 12501 * front end. 12502 */ 12503 fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove; 12504 12505 fe_datamove(io); 12506 } 12507 } 12508 12509 static void 12510 ctl_send_datamove_done(union ctl_io *io, int have_lock) 12511 { 12512 union ctl_ha_msg msg; 12513 12514 memset(&msg, 0, sizeof(msg)); 12515 12516 msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 12517 msg.hdr.original_sc = io; 12518 msg.hdr.serializing_sc = io->io_hdr.serializing_sc; 12519 msg.hdr.nexus = io->io_hdr.nexus; 12520 msg.hdr.status = io->io_hdr.status; 12521 msg.scsi.tag_num = io->scsiio.tag_num; 12522 msg.scsi.tag_type = io->scsiio.tag_type; 12523 msg.scsi.scsi_status = io->scsiio.scsi_status; 12524 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 12525 io->scsiio.sense_len); 12526 msg.scsi.sense_len = io->scsiio.sense_len; 12527 msg.scsi.sense_residual = io->scsiio.sense_residual; 12528 msg.scsi.fetd_status = io->io_hdr.port_status; 12529 msg.scsi.residual = io->scsiio.residual; 12530 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12531 12532 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12533 ctl_failover_io(io, /*have_lock*/ have_lock); 12534 return; 12535 } 12536 12537 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 12538 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + 12539 msg.scsi.sense_len, M_WAITOK); 12540 } 12541 12542 /* 12543 * The DMA to the remote side is done, now we need to tell the other side 12544 * we're done so it can continue with its data movement. 12545 */ 12546 static void 12547 ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq) 12548 { 12549 union ctl_io *io; 12550 int i; 12551 12552 io = rq->context; 12553 12554 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12555 printf("%s: ISC DMA write failed with error %d", __func__, 12556 rq->ret); 12557 ctl_set_internal_failure(&io->scsiio, 12558 /*sks_valid*/ 1, 12559 /*retry_count*/ rq->ret); 12560 } 12561 12562 ctl_dt_req_free(rq); 12563 12564 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12565 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12566 free(io->io_hdr.remote_sglist, M_CTL); 12567 io->io_hdr.remote_sglist = NULL; 12568 io->io_hdr.local_sglist = NULL; 12569 12570 /* 12571 * The data is in local and remote memory, so now we need to send 12572 * status (good or back) back to the other side. 12573 */ 12574 ctl_send_datamove_done(io, /*have_lock*/ 0); 12575 } 12576 12577 /* 12578 * We've moved the data from the host/controller into local memory. Now we 12579 * need to push it over to the remote controller's memory. 12580 */ 12581 static int 12582 ctl_datamove_remote_dm_write_cb(union ctl_io *io) 12583 { 12584 int retval; 12585 12586 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_WRITE, 12587 ctl_datamove_remote_write_cb); 12588 return (retval); 12589 } 12590 12591 static void 12592 ctl_datamove_remote_write(union ctl_io *io) 12593 { 12594 int retval; 12595 void (*fe_datamove)(union ctl_io *io); 12596 12597 /* 12598 * - Get the data from the host/HBA into local memory. 12599 * - DMA memory from the local controller to the remote controller. 12600 * - Send status back to the remote controller. 12601 */ 12602 12603 retval = ctl_datamove_remote_sgl_setup(io); 12604 if (retval != 0) 12605 return; 12606 12607 /* Switch the pointer over so the FETD knows what to do */ 12608 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 12609 12610 /* 12611 * Use a custom move done callback, since we need to send completion 12612 * back to the other controller, not to the backend on this side. 12613 */ 12614 io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb; 12615 12616 fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove; 12617 fe_datamove(io); 12618 } 12619 12620 static int 12621 ctl_datamove_remote_dm_read_cb(union ctl_io *io) 12622 { 12623 #if 0 12624 char str[256]; 12625 char path_str[64]; 12626 struct sbuf sb; 12627 #endif 12628 int i; 12629 12630 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12631 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12632 free(io->io_hdr.remote_sglist, M_CTL); 12633 io->io_hdr.remote_sglist = NULL; 12634 io->io_hdr.local_sglist = NULL; 12635 12636 #if 0 12637 scsi_path_string(io, path_str, sizeof(path_str)); 12638 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12639 sbuf_cat(&sb, path_str); 12640 scsi_command_string(&io->scsiio, NULL, &sb); 12641 sbuf_printf(&sb, "\n"); 12642 sbuf_cat(&sb, path_str); 12643 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12644 io->scsiio.tag_num, io->scsiio.tag_type); 12645 sbuf_cat(&sb, path_str); 12646 sbuf_printf(&sb, "%s: flags %#x, status %#x\n", __func__, 12647 io->io_hdr.flags, io->io_hdr.status); 12648 sbuf_finish(&sb); 12649 printk("%s", sbuf_data(&sb)); 12650 #endif 12651 12652 12653 /* 12654 * The read is done, now we need to send status (good or bad) back 12655 * to the other side. 12656 */ 12657 ctl_send_datamove_done(io, /*have_lock*/ 0); 12658 12659 return (0); 12660 } 12661 12662 static void 12663 ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq) 12664 { 12665 union ctl_io *io; 12666 void (*fe_datamove)(union ctl_io *io); 12667 12668 io = rq->context; 12669 12670 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12671 printf("%s: ISC DMA read failed with error %d\n", __func__, 12672 rq->ret); 12673 ctl_set_internal_failure(&io->scsiio, 12674 /*sks_valid*/ 1, 12675 /*retry_count*/ rq->ret); 12676 } 12677 12678 ctl_dt_req_free(rq); 12679 12680 /* Switch the pointer over so the FETD knows what to do */ 12681 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 12682 12683 /* 12684 * Use a custom move done callback, since we need to send completion 12685 * back to the other controller, not to the backend on this side. 12686 */ 12687 io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb; 12688 12689 /* XXX KDM add checks like the ones in ctl_datamove? */ 12690 12691 fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove; 12692 fe_datamove(io); 12693 } 12694 12695 static int 12696 ctl_datamove_remote_sgl_setup(union ctl_io *io) 12697 { 12698 struct ctl_sg_entry *local_sglist; 12699 struct ctl_softc *softc; 12700 uint32_t len_to_go; 12701 int retval; 12702 int i; 12703 12704 retval = 0; 12705 softc = control_softc; 12706 local_sglist = io->io_hdr.local_sglist; 12707 len_to_go = io->scsiio.kern_data_len; 12708 12709 /* 12710 * The difficult thing here is that the size of the various 12711 * S/G segments may be different than the size from the 12712 * remote controller. That'll make it harder when DMAing 12713 * the data back to the other side. 12714 */ 12715 for (i = 0; len_to_go > 0; i++) { 12716 local_sglist[i].len = MIN(len_to_go, CTL_HA_DATAMOVE_SEGMENT); 12717 local_sglist[i].addr = 12718 malloc(local_sglist[i].len, M_CTL, M_WAITOK); 12719 12720 len_to_go -= local_sglist[i].len; 12721 } 12722 /* 12723 * Reset the number of S/G entries accordingly. The original 12724 * number of S/G entries is available in rem_sg_entries. 12725 */ 12726 io->scsiio.kern_sg_entries = i; 12727 12728 #if 0 12729 printf("%s: kern_sg_entries = %d\n", __func__, 12730 io->scsiio.kern_sg_entries); 12731 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12732 printf("%s: sg[%d] = %p, %lu\n", __func__, i, 12733 local_sglist[i].addr, local_sglist[i].len); 12734 #endif 12735 12736 return (retval); 12737 } 12738 12739 static int 12740 ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 12741 ctl_ha_dt_cb callback) 12742 { 12743 struct ctl_ha_dt_req *rq; 12744 struct ctl_sg_entry *remote_sglist, *local_sglist; 12745 uint32_t local_used, remote_used, total_used; 12746 int i, j, isc_ret; 12747 12748 rq = ctl_dt_req_alloc(); 12749 12750 /* 12751 * If we failed to allocate the request, and if the DMA didn't fail 12752 * anyway, set busy status. This is just a resource allocation 12753 * failure. 12754 */ 12755 if ((rq == NULL) 12756 && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 12757 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) 12758 ctl_set_busy(&io->scsiio); 12759 12760 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 12761 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) { 12762 12763 if (rq != NULL) 12764 ctl_dt_req_free(rq); 12765 12766 /* 12767 * The data move failed. We need to return status back 12768 * to the other controller. No point in trying to DMA 12769 * data to the remote controller. 12770 */ 12771 12772 ctl_send_datamove_done(io, /*have_lock*/ 0); 12773 12774 return (1); 12775 } 12776 12777 local_sglist = io->io_hdr.local_sglist; 12778 remote_sglist = io->io_hdr.remote_sglist; 12779 local_used = 0; 12780 remote_used = 0; 12781 total_used = 0; 12782 12783 /* 12784 * Pull/push the data over the wire from/to the other controller. 12785 * This takes into account the possibility that the local and 12786 * remote sglists may not be identical in terms of the size of 12787 * the elements and the number of elements. 12788 * 12789 * One fundamental assumption here is that the length allocated for 12790 * both the local and remote sglists is identical. Otherwise, we've 12791 * essentially got a coding error of some sort. 12792 */ 12793 isc_ret = CTL_HA_STATUS_SUCCESS; 12794 for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) { 12795 uint32_t cur_len; 12796 uint8_t *tmp_ptr; 12797 12798 rq->command = command; 12799 rq->context = io; 12800 12801 /* 12802 * Both pointers should be aligned. But it is possible 12803 * that the allocation length is not. They should both 12804 * also have enough slack left over at the end, though, 12805 * to round up to the next 8 byte boundary. 12806 */ 12807 cur_len = MIN(local_sglist[i].len - local_used, 12808 remote_sglist[j].len - remote_used); 12809 rq->size = cur_len; 12810 12811 tmp_ptr = (uint8_t *)local_sglist[i].addr; 12812 tmp_ptr += local_used; 12813 12814 #if 0 12815 /* Use physical addresses when talking to ISC hardware */ 12816 if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) { 12817 /* XXX KDM use busdma */ 12818 rq->local = vtophys(tmp_ptr); 12819 } else 12820 rq->local = tmp_ptr; 12821 #else 12822 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, 12823 ("HA does not support BUS_ADDR")); 12824 rq->local = tmp_ptr; 12825 #endif 12826 12827 tmp_ptr = (uint8_t *)remote_sglist[j].addr; 12828 tmp_ptr += remote_used; 12829 rq->remote = tmp_ptr; 12830 12831 rq->callback = NULL; 12832 12833 local_used += cur_len; 12834 if (local_used >= local_sglist[i].len) { 12835 i++; 12836 local_used = 0; 12837 } 12838 12839 remote_used += cur_len; 12840 if (remote_used >= remote_sglist[j].len) { 12841 j++; 12842 remote_used = 0; 12843 } 12844 total_used += cur_len; 12845 12846 if (total_used >= io->scsiio.kern_data_len) 12847 rq->callback = callback; 12848 12849 #if 0 12850 printf("%s: %s: local %p remote %p size %d\n", __func__, 12851 (command == CTL_HA_DT_CMD_WRITE) ? "WRITE" : "READ", 12852 rq->local, rq->remote, rq->size); 12853 #endif 12854 12855 isc_ret = ctl_dt_single(rq); 12856 if (isc_ret > CTL_HA_STATUS_SUCCESS) 12857 break; 12858 } 12859 if (isc_ret != CTL_HA_STATUS_WAIT) { 12860 rq->ret = isc_ret; 12861 callback(rq); 12862 } 12863 12864 return (0); 12865 } 12866 12867 static void 12868 ctl_datamove_remote_read(union ctl_io *io) 12869 { 12870 int retval; 12871 int i; 12872 12873 /* 12874 * This will send an error to the other controller in the case of a 12875 * failure. 12876 */ 12877 retval = ctl_datamove_remote_sgl_setup(io); 12878 if (retval != 0) 12879 return; 12880 12881 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ, 12882 ctl_datamove_remote_read_cb); 12883 if (retval != 0) { 12884 /* 12885 * Make sure we free memory if there was an error.. The 12886 * ctl_datamove_remote_xfer() function will send the 12887 * datamove done message, or call the callback with an 12888 * error if there is a problem. 12889 */ 12890 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12891 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12892 free(io->io_hdr.remote_sglist, M_CTL); 12893 io->io_hdr.remote_sglist = NULL; 12894 io->io_hdr.local_sglist = NULL; 12895 } 12896 } 12897 12898 /* 12899 * Process a datamove request from the other controller. This is used for 12900 * XFER mode only, not SER_ONLY mode. For writes, we DMA into local memory 12901 * first. Once that is complete, the data gets DMAed into the remote 12902 * controller's memory. For reads, we DMA from the remote controller's 12903 * memory into our memory first, and then move it out to the FETD. 12904 */ 12905 static void 12906 ctl_datamove_remote(union ctl_io *io) 12907 { 12908 12909 mtx_assert(&control_softc->ctl_lock, MA_NOTOWNED); 12910 12911 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12912 ctl_failover_io(io, /*have_lock*/ 0); 12913 return; 12914 } 12915 12916 /* 12917 * Note that we look for an aborted I/O here, but don't do some of 12918 * the other checks that ctl_datamove() normally does. 12919 * We don't need to run the datamove delay code, since that should 12920 * have been done if need be on the other controller. 12921 */ 12922 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12923 printf("%s: tag 0x%04x on (%u:%u:%u) aborted\n", __func__, 12924 io->scsiio.tag_num, io->io_hdr.nexus.initid, 12925 io->io_hdr.nexus.targ_port, 12926 io->io_hdr.nexus.targ_lun); 12927 io->io_hdr.port_status = 31338; 12928 ctl_send_datamove_done(io, /*have_lock*/ 0); 12929 return; 12930 } 12931 12932 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) 12933 ctl_datamove_remote_write(io); 12934 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) 12935 ctl_datamove_remote_read(io); 12936 else { 12937 io->io_hdr.port_status = 31339; 12938 ctl_send_datamove_done(io, /*have_lock*/ 0); 12939 } 12940 } 12941 12942 static int 12943 ctl_process_done(union ctl_io *io) 12944 { 12945 struct ctl_lun *lun; 12946 struct ctl_softc *softc = control_softc; 12947 void (*fe_done)(union ctl_io *io); 12948 union ctl_ha_msg msg; 12949 uint32_t targ_port = io->io_hdr.nexus.targ_port; 12950 12951 CTL_DEBUG_PRINT(("ctl_process_done\n")); 12952 12953 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0) 12954 fe_done = softc->ctl_ports[targ_port]->fe_done; 12955 else 12956 fe_done = NULL; 12957 12958 #ifdef CTL_TIME_IO 12959 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 12960 char str[256]; 12961 char path_str[64]; 12962 struct sbuf sb; 12963 12964 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 12965 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12966 12967 sbuf_cat(&sb, path_str); 12968 switch (io->io_hdr.io_type) { 12969 case CTL_IO_SCSI: 12970 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 12971 sbuf_printf(&sb, "\n"); 12972 sbuf_cat(&sb, path_str); 12973 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12974 io->scsiio.tag_num, io->scsiio.tag_type); 12975 break; 12976 case CTL_IO_TASK: 12977 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 12978 "Tag Type: %d\n", io->taskio.task_action, 12979 io->taskio.tag_num, io->taskio.tag_type); 12980 break; 12981 default: 12982 printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 12983 panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 12984 break; 12985 } 12986 sbuf_cat(&sb, path_str); 12987 sbuf_printf(&sb, "ctl_process_done: %jd seconds\n", 12988 (intmax_t)time_uptime - io->io_hdr.start_time); 12989 sbuf_finish(&sb); 12990 printf("%s", sbuf_data(&sb)); 12991 } 12992 #endif /* CTL_TIME_IO */ 12993 12994 switch (io->io_hdr.io_type) { 12995 case CTL_IO_SCSI: 12996 break; 12997 case CTL_IO_TASK: 12998 if (ctl_debug & CTL_DEBUG_INFO) 12999 ctl_io_error_print(io, NULL); 13000 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) 13001 ctl_free_io(io); 13002 else 13003 fe_done(io); 13004 return (CTL_RETVAL_COMPLETE); 13005 default: 13006 panic("ctl_process_done: invalid io type %d\n", 13007 io->io_hdr.io_type); 13008 break; /* NOTREACHED */ 13009 } 13010 13011 lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13012 if (lun == NULL) { 13013 CTL_DEBUG_PRINT(("NULL LUN for lun %d\n", 13014 io->io_hdr.nexus.targ_mapped_lun)); 13015 goto bailout; 13016 } 13017 13018 mtx_lock(&lun->lun_lock); 13019 13020 /* 13021 * Check to see if we have any errors to inject here. We only 13022 * inject errors for commands that don't already have errors set. 13023 */ 13024 if ((STAILQ_FIRST(&lun->error_list) != NULL) && 13025 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) && 13026 ((io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0)) 13027 ctl_inject_error(lun, io); 13028 13029 /* 13030 * XXX KDM how do we treat commands that aren't completed 13031 * successfully? 13032 * 13033 * XXX KDM should we also track I/O latency? 13034 */ 13035 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS && 13036 io->io_hdr.io_type == CTL_IO_SCSI) { 13037 #ifdef CTL_TIME_IO 13038 struct bintime cur_bt; 13039 #endif 13040 int type; 13041 13042 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13043 CTL_FLAG_DATA_IN) 13044 type = CTL_STATS_READ; 13045 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13046 CTL_FLAG_DATA_OUT) 13047 type = CTL_STATS_WRITE; 13048 else 13049 type = CTL_STATS_NO_IO; 13050 13051 lun->stats.ports[targ_port].bytes[type] += 13052 io->scsiio.kern_total_len; 13053 lun->stats.ports[targ_port].operations[type]++; 13054 #ifdef CTL_TIME_IO 13055 bintime_add(&lun->stats.ports[targ_port].dma_time[type], 13056 &io->io_hdr.dma_bt); 13057 lun->stats.ports[targ_port].num_dmas[type] += 13058 io->io_hdr.num_dmas; 13059 getbintime(&cur_bt); 13060 bintime_sub(&cur_bt, &io->io_hdr.start_bt); 13061 bintime_add(&lun->stats.ports[targ_port].time[type], &cur_bt); 13062 #endif 13063 } 13064 13065 /* 13066 * Remove this from the OOA queue. 13067 */ 13068 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); 13069 #ifdef CTL_TIME_IO 13070 if (TAILQ_EMPTY(&lun->ooa_queue)) 13071 lun->last_busy = getsbinuptime(); 13072 #endif 13073 13074 /* 13075 * Run through the blocked queue on this LUN and see if anything 13076 * has become unblocked, now that this transaction is done. 13077 */ 13078 ctl_check_blocked(lun); 13079 13080 /* 13081 * If the LUN has been invalidated, free it if there is nothing 13082 * left on its OOA queue. 13083 */ 13084 if ((lun->flags & CTL_LUN_INVALID) 13085 && TAILQ_EMPTY(&lun->ooa_queue)) { 13086 mtx_unlock(&lun->lun_lock); 13087 mtx_lock(&softc->ctl_lock); 13088 ctl_free_lun(lun); 13089 mtx_unlock(&softc->ctl_lock); 13090 } else 13091 mtx_unlock(&lun->lun_lock); 13092 13093 bailout: 13094 13095 /* 13096 * If this command has been aborted, make sure we set the status 13097 * properly. The FETD is responsible for freeing the I/O and doing 13098 * whatever it needs to do to clean up its state. 13099 */ 13100 if (io->io_hdr.flags & CTL_FLAG_ABORT) 13101 ctl_set_task_aborted(&io->scsiio); 13102 13103 /* 13104 * If enabled, print command error status. 13105 */ 13106 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS && 13107 (ctl_debug & CTL_DEBUG_INFO) != 0) 13108 ctl_io_error_print(io, NULL); 13109 13110 /* 13111 * Tell the FETD or the other shelf controller we're done with this 13112 * command. Note that only SCSI commands get to this point. Task 13113 * management commands are completed above. 13114 */ 13115 if ((softc->ha_mode != CTL_HA_MODE_XFER) && 13116 (io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC)) { 13117 memset(&msg, 0, sizeof(msg)); 13118 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 13119 msg.hdr.serializing_sc = io->io_hdr.serializing_sc; 13120 msg.hdr.nexus = io->io_hdr.nexus; 13121 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13122 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data), 13123 M_WAITOK); 13124 } 13125 if ((softc->ha_mode == CTL_HA_MODE_XFER) 13126 && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 13127 memset(&msg, 0, sizeof(msg)); 13128 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 13129 msg.hdr.original_sc = io->io_hdr.original_sc; 13130 msg.hdr.nexus = io->io_hdr.nexus; 13131 msg.hdr.status = io->io_hdr.status; 13132 msg.scsi.scsi_status = io->scsiio.scsi_status; 13133 msg.scsi.tag_num = io->scsiio.tag_num; 13134 msg.scsi.tag_type = io->scsiio.tag_type; 13135 msg.scsi.sense_len = io->scsiio.sense_len; 13136 msg.scsi.sense_residual = io->scsiio.sense_residual; 13137 msg.scsi.residual = io->scsiio.residual; 13138 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 13139 io->scsiio.sense_len); 13140 13141 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13142 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + 13143 msg.scsi.sense_len, M_WAITOK); 13144 ctl_free_io(io); 13145 } else 13146 fe_done(io); 13147 13148 return (CTL_RETVAL_COMPLETE); 13149 } 13150 13151 #ifdef CTL_WITH_CA 13152 /* 13153 * Front end should call this if it doesn't do autosense. When the request 13154 * sense comes back in from the initiator, we'll dequeue this and send it. 13155 */ 13156 int 13157 ctl_queue_sense(union ctl_io *io) 13158 { 13159 struct ctl_lun *lun; 13160 struct ctl_port *port; 13161 struct ctl_softc *softc; 13162 uint32_t initidx, targ_lun; 13163 13164 softc = control_softc; 13165 13166 CTL_DEBUG_PRINT(("ctl_queue_sense\n")); 13167 13168 /* 13169 * LUN lookup will likely move to the ctl_work_thread() once we 13170 * have our new queueing infrastructure (that doesn't put things on 13171 * a per-LUN queue initially). That is so that we can handle 13172 * things like an INQUIRY to a LUN that we don't have enabled. We 13173 * can't deal with that right now. 13174 */ 13175 mtx_lock(&softc->ctl_lock); 13176 13177 /* 13178 * If we don't have a LUN for this, just toss the sense 13179 * information. 13180 */ 13181 port = ctl_io_port(&ctsio->io_hdr); 13182 targ_lun = ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); 13183 if ((targ_lun < CTL_MAX_LUNS) 13184 && (softc->ctl_luns[targ_lun] != NULL)) 13185 lun = softc->ctl_luns[targ_lun]; 13186 else 13187 goto bailout; 13188 13189 initidx = ctl_get_initindex(&io->io_hdr.nexus); 13190 13191 mtx_lock(&lun->lun_lock); 13192 /* 13193 * Already have CA set for this LUN...toss the sense information. 13194 */ 13195 if (ctl_is_set(lun->have_ca, initidx)) { 13196 mtx_unlock(&lun->lun_lock); 13197 goto bailout; 13198 } 13199 13200 memcpy(&lun->pending_sense[initidx], &io->scsiio.sense_data, 13201 MIN(sizeof(lun->pending_sense[initidx]), 13202 sizeof(io->scsiio.sense_data))); 13203 ctl_set_mask(lun->have_ca, initidx); 13204 mtx_unlock(&lun->lun_lock); 13205 13206 bailout: 13207 mtx_unlock(&softc->ctl_lock); 13208 13209 ctl_free_io(io); 13210 13211 return (CTL_RETVAL_COMPLETE); 13212 } 13213 #endif 13214 13215 /* 13216 * Primary command inlet from frontend ports. All SCSI and task I/O 13217 * requests must go through this function. 13218 */ 13219 int 13220 ctl_queue(union ctl_io *io) 13221 { 13222 struct ctl_port *port; 13223 13224 CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0])); 13225 13226 #ifdef CTL_TIME_IO 13227 io->io_hdr.start_time = time_uptime; 13228 getbintime(&io->io_hdr.start_bt); 13229 #endif /* CTL_TIME_IO */ 13230 13231 /* Map FE-specific LUN ID into global one. */ 13232 port = ctl_io_port(&io->io_hdr); 13233 io->io_hdr.nexus.targ_mapped_lun = 13234 ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); 13235 13236 switch (io->io_hdr.io_type) { 13237 case CTL_IO_SCSI: 13238 case CTL_IO_TASK: 13239 if (ctl_debug & CTL_DEBUG_CDB) 13240 ctl_io_print(io); 13241 ctl_enqueue_incoming(io); 13242 break; 13243 default: 13244 printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type); 13245 return (EINVAL); 13246 } 13247 13248 return (CTL_RETVAL_COMPLETE); 13249 } 13250 13251 #ifdef CTL_IO_DELAY 13252 static void 13253 ctl_done_timer_wakeup(void *arg) 13254 { 13255 union ctl_io *io; 13256 13257 io = (union ctl_io *)arg; 13258 ctl_done(io); 13259 } 13260 #endif /* CTL_IO_DELAY */ 13261 13262 void 13263 ctl_serseq_done(union ctl_io *io) 13264 { 13265 struct ctl_lun *lun; 13266 13267 lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13268 if (lun->be_lun == NULL || 13269 lun->be_lun->serseq == CTL_LUN_SERSEQ_OFF) 13270 return; 13271 mtx_lock(&lun->lun_lock); 13272 io->io_hdr.flags |= CTL_FLAG_SERSEQ_DONE; 13273 ctl_check_blocked(lun); 13274 mtx_unlock(&lun->lun_lock); 13275 } 13276 13277 void 13278 ctl_done(union ctl_io *io) 13279 { 13280 13281 /* 13282 * Enable this to catch duplicate completion issues. 13283 */ 13284 #if 0 13285 if (io->io_hdr.flags & CTL_FLAG_ALREADY_DONE) { 13286 printf("%s: type %d msg %d cdb %x iptl: " 13287 "%u:%u:%u tag 0x%04x " 13288 "flag %#x status %x\n", 13289 __func__, 13290 io->io_hdr.io_type, 13291 io->io_hdr.msg_type, 13292 io->scsiio.cdb[0], 13293 io->io_hdr.nexus.initid, 13294 io->io_hdr.nexus.targ_port, 13295 io->io_hdr.nexus.targ_lun, 13296 (io->io_hdr.io_type == 13297 CTL_IO_TASK) ? 13298 io->taskio.tag_num : 13299 io->scsiio.tag_num, 13300 io->io_hdr.flags, 13301 io->io_hdr.status); 13302 } else 13303 io->io_hdr.flags |= CTL_FLAG_ALREADY_DONE; 13304 #endif 13305 13306 /* 13307 * This is an internal copy of an I/O, and should not go through 13308 * the normal done processing logic. 13309 */ 13310 if (io->io_hdr.flags & CTL_FLAG_INT_COPY) 13311 return; 13312 13313 #ifdef CTL_IO_DELAY 13314 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 13315 struct ctl_lun *lun; 13316 13317 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13318 13319 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 13320 } else { 13321 struct ctl_lun *lun; 13322 13323 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13324 13325 if ((lun != NULL) 13326 && (lun->delay_info.done_delay > 0)) { 13327 13328 callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1); 13329 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 13330 callout_reset(&io->io_hdr.delay_callout, 13331 lun->delay_info.done_delay * hz, 13332 ctl_done_timer_wakeup, io); 13333 if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT) 13334 lun->delay_info.done_delay = 0; 13335 return; 13336 } 13337 } 13338 #endif /* CTL_IO_DELAY */ 13339 13340 ctl_enqueue_done(io); 13341 } 13342 13343 static void 13344 ctl_work_thread(void *arg) 13345 { 13346 struct ctl_thread *thr = (struct ctl_thread *)arg; 13347 struct ctl_softc *softc = thr->ctl_softc; 13348 union ctl_io *io; 13349 int retval; 13350 13351 CTL_DEBUG_PRINT(("ctl_work_thread starting\n")); 13352 13353 for (;;) { 13354 retval = 0; 13355 13356 /* 13357 * We handle the queues in this order: 13358 * - ISC 13359 * - done queue (to free up resources, unblock other commands) 13360 * - RtR queue 13361 * - incoming queue 13362 * 13363 * If those queues are empty, we break out of the loop and 13364 * go to sleep. 13365 */ 13366 mtx_lock(&thr->queue_lock); 13367 io = (union ctl_io *)STAILQ_FIRST(&thr->isc_queue); 13368 if (io != NULL) { 13369 STAILQ_REMOVE_HEAD(&thr->isc_queue, links); 13370 mtx_unlock(&thr->queue_lock); 13371 ctl_handle_isc(io); 13372 continue; 13373 } 13374 io = (union ctl_io *)STAILQ_FIRST(&thr->done_queue); 13375 if (io != NULL) { 13376 STAILQ_REMOVE_HEAD(&thr->done_queue, links); 13377 /* clear any blocked commands, call fe_done */ 13378 mtx_unlock(&thr->queue_lock); 13379 retval = ctl_process_done(io); 13380 continue; 13381 } 13382 io = (union ctl_io *)STAILQ_FIRST(&thr->incoming_queue); 13383 if (io != NULL) { 13384 STAILQ_REMOVE_HEAD(&thr->incoming_queue, links); 13385 mtx_unlock(&thr->queue_lock); 13386 if (io->io_hdr.io_type == CTL_IO_TASK) 13387 ctl_run_task(io); 13388 else 13389 ctl_scsiio_precheck(softc, &io->scsiio); 13390 continue; 13391 } 13392 io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue); 13393 if (io != NULL) { 13394 STAILQ_REMOVE_HEAD(&thr->rtr_queue, links); 13395 mtx_unlock(&thr->queue_lock); 13396 retval = ctl_scsiio(&io->scsiio); 13397 if (retval != CTL_RETVAL_COMPLETE) 13398 CTL_DEBUG_PRINT(("ctl_scsiio failed\n")); 13399 continue; 13400 } 13401 13402 /* Sleep until we have something to do. */ 13403 mtx_sleep(thr, &thr->queue_lock, PDROP | PRIBIO, "-", 0); 13404 } 13405 } 13406 13407 static void 13408 ctl_lun_thread(void *arg) 13409 { 13410 struct ctl_softc *softc = (struct ctl_softc *)arg; 13411 struct ctl_be_lun *be_lun; 13412 int retval; 13413 13414 CTL_DEBUG_PRINT(("ctl_lun_thread starting\n")); 13415 13416 for (;;) { 13417 retval = 0; 13418 mtx_lock(&softc->ctl_lock); 13419 be_lun = STAILQ_FIRST(&softc->pending_lun_queue); 13420 if (be_lun != NULL) { 13421 STAILQ_REMOVE_HEAD(&softc->pending_lun_queue, links); 13422 mtx_unlock(&softc->ctl_lock); 13423 ctl_create_lun(be_lun); 13424 continue; 13425 } 13426 13427 /* Sleep until we have something to do. */ 13428 mtx_sleep(&softc->pending_lun_queue, &softc->ctl_lock, 13429 PDROP | PRIBIO, "-", 0); 13430 } 13431 } 13432 13433 static void 13434 ctl_thresh_thread(void *arg) 13435 { 13436 struct ctl_softc *softc = (struct ctl_softc *)arg; 13437 struct ctl_lun *lun; 13438 struct ctl_be_lun *be_lun; 13439 struct scsi_da_rw_recovery_page *rwpage; 13440 struct ctl_logical_block_provisioning_page *page; 13441 const char *attr; 13442 union ctl_ha_msg msg; 13443 uint64_t thres, val; 13444 int i, e, set; 13445 13446 CTL_DEBUG_PRINT(("ctl_thresh_thread starting\n")); 13447 13448 for (;;) { 13449 mtx_lock(&softc->ctl_lock); 13450 STAILQ_FOREACH(lun, &softc->lun_list, links) { 13451 be_lun = lun->be_lun; 13452 if ((lun->flags & CTL_LUN_DISABLED) || 13453 (lun->flags & CTL_LUN_OFFLINE) || 13454 lun->backend->lun_attr == NULL) 13455 continue; 13456 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 13457 softc->ha_mode == CTL_HA_MODE_XFER) 13458 continue; 13459 rwpage = &lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT]; 13460 if ((rwpage->byte8 & SMS_RWER_LBPERE) == 0) 13461 continue; 13462 e = 0; 13463 page = &lun->mode_pages.lbp_page[CTL_PAGE_CURRENT]; 13464 for (i = 0; i < CTL_NUM_LBP_THRESH; i++) { 13465 if ((page->descr[i].flags & SLBPPD_ENABLED) == 0) 13466 continue; 13467 thres = scsi_4btoul(page->descr[i].count); 13468 thres <<= CTL_LBP_EXPONENT; 13469 switch (page->descr[i].resource) { 13470 case 0x01: 13471 attr = "blocksavail"; 13472 break; 13473 case 0x02: 13474 attr = "blocksused"; 13475 break; 13476 case 0xf1: 13477 attr = "poolblocksavail"; 13478 break; 13479 case 0xf2: 13480 attr = "poolblocksused"; 13481 break; 13482 default: 13483 continue; 13484 } 13485 mtx_unlock(&softc->ctl_lock); // XXX 13486 val = lun->backend->lun_attr( 13487 lun->be_lun->be_lun, attr); 13488 mtx_lock(&softc->ctl_lock); 13489 if (val == UINT64_MAX) 13490 continue; 13491 if ((page->descr[i].flags & SLBPPD_ARMING_MASK) 13492 == SLBPPD_ARMING_INC) 13493 e = (val >= thres); 13494 else 13495 e = (val <= thres); 13496 if (e) 13497 break; 13498 } 13499 mtx_lock(&lun->lun_lock); 13500 if (e) { 13501 scsi_u64to8b((uint8_t *)&page->descr[i] - 13502 (uint8_t *)page, lun->ua_tpt_info); 13503 if (lun->lasttpt == 0 || 13504 time_uptime - lun->lasttpt >= CTL_LBP_UA_PERIOD) { 13505 lun->lasttpt = time_uptime; 13506 ctl_est_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 13507 set = 1; 13508 } else 13509 set = 0; 13510 } else { 13511 lun->lasttpt = 0; 13512 ctl_clr_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 13513 set = -1; 13514 } 13515 mtx_unlock(&lun->lun_lock); 13516 if (set != 0 && 13517 lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 13518 /* Send msg to other side. */ 13519 bzero(&msg.ua, sizeof(msg.ua)); 13520 msg.hdr.msg_type = CTL_MSG_UA; 13521 msg.hdr.nexus.initid = -1; 13522 msg.hdr.nexus.targ_port = -1; 13523 msg.hdr.nexus.targ_lun = lun->lun; 13524 msg.hdr.nexus.targ_mapped_lun = lun->lun; 13525 msg.ua.ua_all = 1; 13526 msg.ua.ua_set = (set > 0); 13527 msg.ua.ua_type = CTL_UA_THIN_PROV_THRES; 13528 memcpy(msg.ua.ua_info, lun->ua_tpt_info, 8); 13529 mtx_unlock(&softc->ctl_lock); // XXX 13530 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13531 sizeof(msg.ua), M_WAITOK); 13532 mtx_lock(&softc->ctl_lock); 13533 } 13534 } 13535 mtx_unlock(&softc->ctl_lock); 13536 pause("-", CTL_LBP_PERIOD * hz); 13537 } 13538 } 13539 13540 static void 13541 ctl_enqueue_incoming(union ctl_io *io) 13542 { 13543 struct ctl_softc *softc = control_softc; 13544 struct ctl_thread *thr; 13545 u_int idx; 13546 13547 idx = (io->io_hdr.nexus.targ_port * 127 + 13548 io->io_hdr.nexus.initid) % worker_threads; 13549 thr = &softc->threads[idx]; 13550 mtx_lock(&thr->queue_lock); 13551 STAILQ_INSERT_TAIL(&thr->incoming_queue, &io->io_hdr, links); 13552 mtx_unlock(&thr->queue_lock); 13553 wakeup(thr); 13554 } 13555 13556 static void 13557 ctl_enqueue_rtr(union ctl_io *io) 13558 { 13559 struct ctl_softc *softc = control_softc; 13560 struct ctl_thread *thr; 13561 13562 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13563 mtx_lock(&thr->queue_lock); 13564 STAILQ_INSERT_TAIL(&thr->rtr_queue, &io->io_hdr, links); 13565 mtx_unlock(&thr->queue_lock); 13566 wakeup(thr); 13567 } 13568 13569 static void 13570 ctl_enqueue_done(union ctl_io *io) 13571 { 13572 struct ctl_softc *softc = control_softc; 13573 struct ctl_thread *thr; 13574 13575 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13576 mtx_lock(&thr->queue_lock); 13577 STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links); 13578 mtx_unlock(&thr->queue_lock); 13579 wakeup(thr); 13580 } 13581 13582 static void 13583 ctl_enqueue_isc(union ctl_io *io) 13584 { 13585 struct ctl_softc *softc = control_softc; 13586 struct ctl_thread *thr; 13587 13588 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13589 mtx_lock(&thr->queue_lock); 13590 STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links); 13591 mtx_unlock(&thr->queue_lock); 13592 wakeup(thr); 13593 } 13594 13595 /* 13596 * vim: ts=8 13597 */ 13598