1 /*- 2 * Copyright (c) 2003-2009 Silicon Graphics International Corp. 3 * Copyright (c) 2012 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * Portions of this software were developed by Edward Tomasz Napierala 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions, and the following disclaimer, 14 * without modification. 15 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 16 * substantially similar to the "NO WARRANTY" disclaimer below 17 * ("Disclaimer") and any redistribution must be conditioned upon 18 * including a substantially similar Disclaimer requirement for further 19 * binary redistribution. 20 * 21 * NO WARRANTY 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 30 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 31 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGES. 33 * 34 * $Id$ 35 */ 36 /* 37 * CAM Target Layer, a SCSI device emulation subsystem. 38 * 39 * Author: Ken Merry <ken@FreeBSD.org> 40 */ 41 42 #define _CTL_C 43 44 #include <sys/cdefs.h> 45 __FBSDID("$FreeBSD$"); 46 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/ctype.h> 50 #include <sys/kernel.h> 51 #include <sys/types.h> 52 #include <sys/kthread.h> 53 #include <sys/bio.h> 54 #include <sys/fcntl.h> 55 #include <sys/lock.h> 56 #include <sys/module.h> 57 #include <sys/mutex.h> 58 #include <sys/condvar.h> 59 #include <sys/malloc.h> 60 #include <sys/conf.h> 61 #include <sys/ioccom.h> 62 #include <sys/queue.h> 63 #include <sys/sbuf.h> 64 #include <sys/smp.h> 65 #include <sys/endian.h> 66 #include <sys/sysctl.h> 67 #include <vm/uma.h> 68 69 #include <cam/cam.h> 70 #include <cam/scsi/scsi_all.h> 71 #include <cam/scsi/scsi_da.h> 72 #include <cam/ctl/ctl_io.h> 73 #include <cam/ctl/ctl.h> 74 #include <cam/ctl/ctl_frontend.h> 75 #include <cam/ctl/ctl_frontend_internal.h> 76 #include <cam/ctl/ctl_util.h> 77 #include <cam/ctl/ctl_backend.h> 78 #include <cam/ctl/ctl_ioctl.h> 79 #include <cam/ctl/ctl_ha.h> 80 #include <cam/ctl/ctl_private.h> 81 #include <cam/ctl/ctl_debug.h> 82 #include <cam/ctl/ctl_scsi_all.h> 83 #include <cam/ctl/ctl_error.h> 84 85 struct ctl_softc *control_softc = NULL; 86 87 /* 88 * Size and alignment macros needed for Copan-specific HA hardware. These 89 * can go away when the HA code is re-written, and uses busdma for any 90 * hardware. 91 */ 92 #define CTL_ALIGN_8B(target, source, type) \ 93 if (((uint32_t)source & 0x7) != 0) \ 94 target = (type)(source + (0x8 - ((uint32_t)source & 0x7)));\ 95 else \ 96 target = (type)source; 97 98 #define CTL_SIZE_8B(target, size) \ 99 if ((size & 0x7) != 0) \ 100 target = size + (0x8 - (size & 0x7)); \ 101 else \ 102 target = size; 103 104 #define CTL_ALIGN_8B_MARGIN 16 105 106 /* 107 * Template mode pages. 108 */ 109 110 /* 111 * Note that these are default values only. The actual values will be 112 * filled in when the user does a mode sense. 113 */ 114 const static struct copan_debugconf_subpage debugconf_page_default = { 115 DBGCNF_PAGE_CODE | SMPH_SPF, /* page_code */ 116 DBGCNF_SUBPAGE_CODE, /* subpage */ 117 {(sizeof(struct copan_debugconf_subpage) - 4) >> 8, 118 (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */ 119 DBGCNF_VERSION, /* page_version */ 120 {CTL_TIME_IO_DEFAULT_SECS>>8, 121 CTL_TIME_IO_DEFAULT_SECS>>0}, /* ctl_time_io_secs */ 122 }; 123 124 const static struct copan_debugconf_subpage debugconf_page_changeable = { 125 DBGCNF_PAGE_CODE | SMPH_SPF, /* page_code */ 126 DBGCNF_SUBPAGE_CODE, /* subpage */ 127 {(sizeof(struct copan_debugconf_subpage) - 4) >> 8, 128 (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */ 129 0, /* page_version */ 130 {0xff,0xff}, /* ctl_time_io_secs */ 131 }; 132 133 const static struct scsi_da_rw_recovery_page rw_er_page_default = { 134 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 135 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 136 /*byte3*/SMS_RWER_AWRE|SMS_RWER_ARRE, 137 /*read_retry_count*/0, 138 /*correction_span*/0, 139 /*head_offset_count*/0, 140 /*data_strobe_offset_cnt*/0, 141 /*byte8*/SMS_RWER_LBPERE, 142 /*write_retry_count*/0, 143 /*reserved2*/0, 144 /*recovery_time_limit*/{0, 0}, 145 }; 146 147 const static struct scsi_da_rw_recovery_page rw_er_page_changeable = { 148 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 149 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 150 /*byte3*/0, 151 /*read_retry_count*/0, 152 /*correction_span*/0, 153 /*head_offset_count*/0, 154 /*data_strobe_offset_cnt*/0, 155 /*byte8*/0, 156 /*write_retry_count*/0, 157 /*reserved2*/0, 158 /*recovery_time_limit*/{0, 0}, 159 }; 160 161 const static struct scsi_format_page format_page_default = { 162 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 163 /*page_length*/sizeof(struct scsi_format_page) - 2, 164 /*tracks_per_zone*/ {0, 0}, 165 /*alt_sectors_per_zone*/ {0, 0}, 166 /*alt_tracks_per_zone*/ {0, 0}, 167 /*alt_tracks_per_lun*/ {0, 0}, 168 /*sectors_per_track*/ {(CTL_DEFAULT_SECTORS_PER_TRACK >> 8) & 0xff, 169 CTL_DEFAULT_SECTORS_PER_TRACK & 0xff}, 170 /*bytes_per_sector*/ {0, 0}, 171 /*interleave*/ {0, 0}, 172 /*track_skew*/ {0, 0}, 173 /*cylinder_skew*/ {0, 0}, 174 /*flags*/ SFP_HSEC, 175 /*reserved*/ {0, 0, 0} 176 }; 177 178 const static struct scsi_format_page format_page_changeable = { 179 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 180 /*page_length*/sizeof(struct scsi_format_page) - 2, 181 /*tracks_per_zone*/ {0, 0}, 182 /*alt_sectors_per_zone*/ {0, 0}, 183 /*alt_tracks_per_zone*/ {0, 0}, 184 /*alt_tracks_per_lun*/ {0, 0}, 185 /*sectors_per_track*/ {0, 0}, 186 /*bytes_per_sector*/ {0, 0}, 187 /*interleave*/ {0, 0}, 188 /*track_skew*/ {0, 0}, 189 /*cylinder_skew*/ {0, 0}, 190 /*flags*/ 0, 191 /*reserved*/ {0, 0, 0} 192 }; 193 194 const static struct scsi_rigid_disk_page rigid_disk_page_default = { 195 /*page_code*/SMS_RIGID_DISK_PAGE, 196 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 197 /*cylinders*/ {0, 0, 0}, 198 /*heads*/ CTL_DEFAULT_HEADS, 199 /*start_write_precomp*/ {0, 0, 0}, 200 /*start_reduced_current*/ {0, 0, 0}, 201 /*step_rate*/ {0, 0}, 202 /*landing_zone_cylinder*/ {0, 0, 0}, 203 /*rpl*/ SRDP_RPL_DISABLED, 204 /*rotational_offset*/ 0, 205 /*reserved1*/ 0, 206 /*rotation_rate*/ {(CTL_DEFAULT_ROTATION_RATE >> 8) & 0xff, 207 CTL_DEFAULT_ROTATION_RATE & 0xff}, 208 /*reserved2*/ {0, 0} 209 }; 210 211 const static struct scsi_rigid_disk_page rigid_disk_page_changeable = { 212 /*page_code*/SMS_RIGID_DISK_PAGE, 213 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 214 /*cylinders*/ {0, 0, 0}, 215 /*heads*/ 0, 216 /*start_write_precomp*/ {0, 0, 0}, 217 /*start_reduced_current*/ {0, 0, 0}, 218 /*step_rate*/ {0, 0}, 219 /*landing_zone_cylinder*/ {0, 0, 0}, 220 /*rpl*/ 0, 221 /*rotational_offset*/ 0, 222 /*reserved1*/ 0, 223 /*rotation_rate*/ {0, 0}, 224 /*reserved2*/ {0, 0} 225 }; 226 227 const static struct scsi_caching_page caching_page_default = { 228 /*page_code*/SMS_CACHING_PAGE, 229 /*page_length*/sizeof(struct scsi_caching_page) - 2, 230 /*flags1*/ SCP_DISC | SCP_WCE, 231 /*ret_priority*/ 0, 232 /*disable_pf_transfer_len*/ {0xff, 0xff}, 233 /*min_prefetch*/ {0, 0}, 234 /*max_prefetch*/ {0xff, 0xff}, 235 /*max_pf_ceiling*/ {0xff, 0xff}, 236 /*flags2*/ 0, 237 /*cache_segments*/ 0, 238 /*cache_seg_size*/ {0, 0}, 239 /*reserved*/ 0, 240 /*non_cache_seg_size*/ {0, 0, 0} 241 }; 242 243 const static struct scsi_caching_page caching_page_changeable = { 244 /*page_code*/SMS_CACHING_PAGE, 245 /*page_length*/sizeof(struct scsi_caching_page) - 2, 246 /*flags1*/ SCP_WCE | SCP_RCD, 247 /*ret_priority*/ 0, 248 /*disable_pf_transfer_len*/ {0, 0}, 249 /*min_prefetch*/ {0, 0}, 250 /*max_prefetch*/ {0, 0}, 251 /*max_pf_ceiling*/ {0, 0}, 252 /*flags2*/ 0, 253 /*cache_segments*/ 0, 254 /*cache_seg_size*/ {0, 0}, 255 /*reserved*/ 0, 256 /*non_cache_seg_size*/ {0, 0, 0} 257 }; 258 259 const static struct scsi_control_page control_page_default = { 260 /*page_code*/SMS_CONTROL_MODE_PAGE, 261 /*page_length*/sizeof(struct scsi_control_page) - 2, 262 /*rlec*/0, 263 /*queue_flags*/SCP_QUEUE_ALG_RESTRICTED, 264 /*eca_and_aen*/0, 265 /*flags4*/SCP_TAS, 266 /*aen_holdoff_period*/{0, 0}, 267 /*busy_timeout_period*/{0, 0}, 268 /*extended_selftest_completion_time*/{0, 0} 269 }; 270 271 const static struct scsi_control_page control_page_changeable = { 272 /*page_code*/SMS_CONTROL_MODE_PAGE, 273 /*page_length*/sizeof(struct scsi_control_page) - 2, 274 /*rlec*/SCP_DSENSE, 275 /*queue_flags*/SCP_QUEUE_ALG_MASK, 276 /*eca_and_aen*/SCP_SWP, 277 /*flags4*/0, 278 /*aen_holdoff_period*/{0, 0}, 279 /*busy_timeout_period*/{0, 0}, 280 /*extended_selftest_completion_time*/{0, 0} 281 }; 282 283 const static struct scsi_info_exceptions_page ie_page_default = { 284 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 285 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 286 /*info_flags*/SIEP_FLAGS_DEXCPT, 287 /*mrie*/0, 288 /*interval_timer*/{0, 0, 0, 0}, 289 /*report_count*/{0, 0, 0, 0} 290 }; 291 292 const static struct scsi_info_exceptions_page ie_page_changeable = { 293 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 294 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 295 /*info_flags*/0, 296 /*mrie*/0, 297 /*interval_timer*/{0, 0, 0, 0}, 298 /*report_count*/{0, 0, 0, 0} 299 }; 300 301 #define CTL_LBPM_LEN (sizeof(struct ctl_logical_block_provisioning_page) - 4) 302 303 const static struct ctl_logical_block_provisioning_page lbp_page_default = {{ 304 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 305 /*subpage_code*/0x02, 306 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 307 /*flags*/0, 308 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 309 /*descr*/{}}, 310 {{/*flags*/0, 311 /*resource*/0x01, 312 /*reserved*/{0, 0}, 313 /*count*/{0, 0, 0, 0}}, 314 {/*flags*/0, 315 /*resource*/0x02, 316 /*reserved*/{0, 0}, 317 /*count*/{0, 0, 0, 0}}, 318 {/*flags*/0, 319 /*resource*/0xf1, 320 /*reserved*/{0, 0}, 321 /*count*/{0, 0, 0, 0}}, 322 {/*flags*/0, 323 /*resource*/0xf2, 324 /*reserved*/{0, 0}, 325 /*count*/{0, 0, 0, 0}} 326 } 327 }; 328 329 const static struct ctl_logical_block_provisioning_page lbp_page_changeable = {{ 330 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 331 /*subpage_code*/0x02, 332 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 333 /*flags*/0, 334 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 335 /*descr*/{}}, 336 {{/*flags*/0, 337 /*resource*/0, 338 /*reserved*/{0, 0}, 339 /*count*/{0, 0, 0, 0}}, 340 {/*flags*/0, 341 /*resource*/0, 342 /*reserved*/{0, 0}, 343 /*count*/{0, 0, 0, 0}}, 344 {/*flags*/0, 345 /*resource*/0, 346 /*reserved*/{0, 0}, 347 /*count*/{0, 0, 0, 0}}, 348 {/*flags*/0, 349 /*resource*/0, 350 /*reserved*/{0, 0}, 351 /*count*/{0, 0, 0, 0}} 352 } 353 }; 354 355 /* 356 * XXX KDM move these into the softc. 357 */ 358 static int rcv_sync_msg; 359 static uint8_t ctl_pause_rtr; 360 361 SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer"); 362 static int worker_threads = -1; 363 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN, 364 &worker_threads, 1, "Number of worker threads"); 365 static int ctl_debug = CTL_DEBUG_NONE; 366 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, debug, CTLFLAG_RWTUN, 367 &ctl_debug, 0, "Enabled debug flags"); 368 369 /* 370 * Supported pages (0x00), Serial number (0x80), Device ID (0x83), 371 * Extended INQUIRY Data (0x86), Mode Page Policy (0x87), 372 * SCSI Ports (0x88), Third-party Copy (0x8F), Block limits (0xB0), 373 * Block Device Characteristics (0xB1) and Logical Block Provisioning (0xB2) 374 */ 375 #define SCSI_EVPD_NUM_SUPPORTED_PAGES 10 376 377 static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event, 378 int param); 379 static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest); 380 static int ctl_init(void); 381 void ctl_shutdown(void); 382 static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td); 383 static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td); 384 static void ctl_ioctl_online(void *arg); 385 static void ctl_ioctl_offline(void *arg); 386 static int ctl_ioctl_lun_enable(void *arg, struct ctl_id targ_id, int lun_id); 387 static int ctl_ioctl_lun_disable(void *arg, struct ctl_id targ_id, int lun_id); 388 static int ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio); 389 static int ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio); 390 static int ctl_ioctl_submit_wait(union ctl_io *io); 391 static void ctl_ioctl_datamove(union ctl_io *io); 392 static void ctl_ioctl_done(union ctl_io *io); 393 static void ctl_ioctl_hard_startstop_callback(void *arg, 394 struct cfi_metatask *metatask); 395 static void ctl_ioctl_bbrread_callback(void *arg,struct cfi_metatask *metatask); 396 static int ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 397 struct ctl_ooa *ooa_hdr, 398 struct ctl_ooa_entry *kern_entries); 399 static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 400 struct thread *td); 401 static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun, 402 struct ctl_be_lun *be_lun, struct ctl_id target_id); 403 static int ctl_free_lun(struct ctl_lun *lun); 404 static void ctl_create_lun(struct ctl_be_lun *be_lun); 405 static struct ctl_port * ctl_io_port(struct ctl_io_hdr *io_hdr); 406 /** 407 static void ctl_failover_change_pages(struct ctl_softc *softc, 408 struct ctl_scsiio *ctsio, int master); 409 **/ 410 411 static int ctl_do_mode_select(union ctl_io *io); 412 static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, 413 uint64_t res_key, uint64_t sa_res_key, 414 uint8_t type, uint32_t residx, 415 struct ctl_scsiio *ctsio, 416 struct scsi_per_res_out *cdb, 417 struct scsi_per_res_out_parms* param); 418 static void ctl_pro_preempt_other(struct ctl_lun *lun, 419 union ctl_ha_msg *msg); 420 static void ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg); 421 static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len); 422 static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len); 423 static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len); 424 static int ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len); 425 static int ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len); 426 static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, 427 int alloc_len); 428 static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, 429 int alloc_len); 430 static int ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len); 431 static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len); 432 static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio); 433 static int ctl_inquiry_std(struct ctl_scsiio *ctsio); 434 static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len); 435 static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2, 436 bool seq); 437 static ctl_action ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2); 438 static ctl_action ctl_check_for_blockage(struct ctl_lun *lun, 439 union ctl_io *pending_io, union ctl_io *ooa_io); 440 static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 441 union ctl_io *starting_io); 442 static int ctl_check_blocked(struct ctl_lun *lun); 443 static int ctl_scsiio_lun_check(struct ctl_lun *lun, 444 const struct ctl_cmd_entry *entry, 445 struct ctl_scsiio *ctsio); 446 //static int ctl_check_rtr(union ctl_io *pending_io, struct ctl_softc *softc); 447 static void ctl_failover(void); 448 static void ctl_clear_ua(struct ctl_softc *ctl_softc, uint32_t initidx, 449 ctl_ua_type ua_type); 450 static int ctl_scsiio_precheck(struct ctl_softc *ctl_softc, 451 struct ctl_scsiio *ctsio); 452 static int ctl_scsiio(struct ctl_scsiio *ctsio); 453 454 static int ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io); 455 static int ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io, 456 ctl_ua_type ua_type); 457 static int ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, 458 ctl_ua_type ua_type); 459 static int ctl_abort_task(union ctl_io *io); 460 static int ctl_abort_task_set(union ctl_io *io); 461 static int ctl_i_t_nexus_reset(union ctl_io *io); 462 static void ctl_run_task(union ctl_io *io); 463 #ifdef CTL_IO_DELAY 464 static void ctl_datamove_timer_wakeup(void *arg); 465 static void ctl_done_timer_wakeup(void *arg); 466 #endif /* CTL_IO_DELAY */ 467 468 static void ctl_send_datamove_done(union ctl_io *io, int have_lock); 469 static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq); 470 static int ctl_datamove_remote_dm_write_cb(union ctl_io *io); 471 static void ctl_datamove_remote_write(union ctl_io *io); 472 static int ctl_datamove_remote_dm_read_cb(union ctl_io *io); 473 static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq); 474 static int ctl_datamove_remote_sgl_setup(union ctl_io *io); 475 static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 476 ctl_ha_dt_cb callback); 477 static void ctl_datamove_remote_read(union ctl_io *io); 478 static void ctl_datamove_remote(union ctl_io *io); 479 static int ctl_process_done(union ctl_io *io); 480 static void ctl_lun_thread(void *arg); 481 static void ctl_thresh_thread(void *arg); 482 static void ctl_work_thread(void *arg); 483 static void ctl_enqueue_incoming(union ctl_io *io); 484 static void ctl_enqueue_rtr(union ctl_io *io); 485 static void ctl_enqueue_done(union ctl_io *io); 486 static void ctl_enqueue_isc(union ctl_io *io); 487 static const struct ctl_cmd_entry * 488 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa); 489 static const struct ctl_cmd_entry * 490 ctl_validate_command(struct ctl_scsiio *ctsio); 491 static int ctl_cmd_applicable(uint8_t lun_type, 492 const struct ctl_cmd_entry *entry); 493 494 /* 495 * Load the serialization table. This isn't very pretty, but is probably 496 * the easiest way to do it. 497 */ 498 #include "ctl_ser_table.c" 499 500 /* 501 * We only need to define open, close and ioctl routines for this driver. 502 */ 503 static struct cdevsw ctl_cdevsw = { 504 .d_version = D_VERSION, 505 .d_flags = 0, 506 .d_open = ctl_open, 507 .d_close = ctl_close, 508 .d_ioctl = ctl_ioctl, 509 .d_name = "ctl", 510 }; 511 512 513 MALLOC_DEFINE(M_CTL, "ctlmem", "Memory used for CTL"); 514 MALLOC_DEFINE(M_CTLIO, "ctlio", "Memory used for CTL requests"); 515 516 static int ctl_module_event_handler(module_t, int /*modeventtype_t*/, void *); 517 518 static moduledata_t ctl_moduledata = { 519 "ctl", 520 ctl_module_event_handler, 521 NULL 522 }; 523 524 DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD); 525 MODULE_VERSION(ctl, 1); 526 527 static struct ctl_frontend ioctl_frontend = 528 { 529 .name = "ioctl", 530 }; 531 532 static void 533 ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc, 534 union ctl_ha_msg *msg_info) 535 { 536 struct ctl_scsiio *ctsio; 537 538 if (msg_info->hdr.original_sc == NULL) { 539 printf("%s: original_sc == NULL!\n", __func__); 540 /* XXX KDM now what? */ 541 return; 542 } 543 544 ctsio = &msg_info->hdr.original_sc->scsiio; 545 ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 546 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 547 ctsio->io_hdr.status = msg_info->hdr.status; 548 ctsio->scsi_status = msg_info->scsi.scsi_status; 549 ctsio->sense_len = msg_info->scsi.sense_len; 550 ctsio->sense_residual = msg_info->scsi.sense_residual; 551 ctsio->residual = msg_info->scsi.residual; 552 memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data, 553 sizeof(ctsio->sense_data)); 554 memcpy(&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 555 &msg_info->scsi.lbalen, sizeof(msg_info->scsi.lbalen)); 556 ctl_enqueue_isc((union ctl_io *)ctsio); 557 } 558 559 static void 560 ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc, 561 union ctl_ha_msg *msg_info) 562 { 563 struct ctl_scsiio *ctsio; 564 565 if (msg_info->hdr.serializing_sc == NULL) { 566 printf("%s: serializing_sc == NULL!\n", __func__); 567 /* XXX KDM now what? */ 568 return; 569 } 570 571 ctsio = &msg_info->hdr.serializing_sc->scsiio; 572 #if 0 573 /* 574 * Attempt to catch the situation where an I/O has 575 * been freed, and we're using it again. 576 */ 577 if (ctsio->io_hdr.io_type == 0xff) { 578 union ctl_io *tmp_io; 579 tmp_io = (union ctl_io *)ctsio; 580 printf("%s: %p use after free!\n", __func__, 581 ctsio); 582 printf("%s: type %d msg %d cdb %x iptl: " 583 "%d:%d:%d:%d tag 0x%04x " 584 "flag %#x status %x\n", 585 __func__, 586 tmp_io->io_hdr.io_type, 587 tmp_io->io_hdr.msg_type, 588 tmp_io->scsiio.cdb[0], 589 tmp_io->io_hdr.nexus.initid.id, 590 tmp_io->io_hdr.nexus.targ_port, 591 tmp_io->io_hdr.nexus.targ_target.id, 592 tmp_io->io_hdr.nexus.targ_lun, 593 (tmp_io->io_hdr.io_type == 594 CTL_IO_TASK) ? 595 tmp_io->taskio.tag_num : 596 tmp_io->scsiio.tag_num, 597 tmp_io->io_hdr.flags, 598 tmp_io->io_hdr.status); 599 } 600 #endif 601 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 602 ctl_enqueue_isc((union ctl_io *)ctsio); 603 } 604 605 /* 606 * ISC (Inter Shelf Communication) event handler. Events from the HA 607 * subsystem come in here. 608 */ 609 static void 610 ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param) 611 { 612 struct ctl_softc *softc; 613 union ctl_io *io; 614 struct ctl_prio *presio; 615 ctl_ha_status isc_status; 616 617 softc = control_softc; 618 io = NULL; 619 620 621 #if 0 622 printf("CTL: Isc Msg event %d\n", event); 623 #endif 624 if (event == CTL_HA_EVT_MSG_RECV) { 625 union ctl_ha_msg msg_info; 626 627 isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, &msg_info, 628 sizeof(msg_info), /*wait*/ 0); 629 #if 0 630 printf("CTL: msg_type %d\n", msg_info.msg_type); 631 #endif 632 if (isc_status != 0) { 633 printf("Error receiving message, status = %d\n", 634 isc_status); 635 return; 636 } 637 638 switch (msg_info.hdr.msg_type) { 639 case CTL_MSG_SERIALIZE: 640 #if 0 641 printf("Serialize\n"); 642 #endif 643 io = ctl_alloc_io_nowait(softc->othersc_pool); 644 if (io == NULL) { 645 printf("ctl_isc_event_handler: can't allocate " 646 "ctl_io!\n"); 647 /* Bad Juju */ 648 /* Need to set busy and send msg back */ 649 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 650 msg_info.hdr.status = CTL_SCSI_ERROR; 651 msg_info.scsi.scsi_status = SCSI_STATUS_BUSY; 652 msg_info.scsi.sense_len = 0; 653 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 654 sizeof(msg_info), 0) > CTL_HA_STATUS_SUCCESS){ 655 } 656 goto bailout; 657 } 658 ctl_zero_io(io); 659 // populate ctsio from msg_info 660 io->io_hdr.io_type = CTL_IO_SCSI; 661 io->io_hdr.msg_type = CTL_MSG_SERIALIZE; 662 io->io_hdr.original_sc = msg_info.hdr.original_sc; 663 #if 0 664 printf("pOrig %x\n", (int)msg_info.original_sc); 665 #endif 666 io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC | 667 CTL_FLAG_IO_ACTIVE; 668 /* 669 * If we're in serialization-only mode, we don't 670 * want to go through full done processing. Thus 671 * the COPY flag. 672 * 673 * XXX KDM add another flag that is more specific. 674 */ 675 if (softc->ha_mode == CTL_HA_MODE_SER_ONLY) 676 io->io_hdr.flags |= CTL_FLAG_INT_COPY; 677 io->io_hdr.nexus = msg_info.hdr.nexus; 678 #if 0 679 printf("targ %d, port %d, iid %d, lun %d\n", 680 io->io_hdr.nexus.targ_target.id, 681 io->io_hdr.nexus.targ_port, 682 io->io_hdr.nexus.initid.id, 683 io->io_hdr.nexus.targ_lun); 684 #endif 685 io->scsiio.tag_num = msg_info.scsi.tag_num; 686 io->scsiio.tag_type = msg_info.scsi.tag_type; 687 memcpy(io->scsiio.cdb, msg_info.scsi.cdb, 688 CTL_MAX_CDBLEN); 689 if (softc->ha_mode == CTL_HA_MODE_XFER) { 690 const struct ctl_cmd_entry *entry; 691 692 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 693 io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 694 io->io_hdr.flags |= 695 entry->flags & CTL_FLAG_DATA_MASK; 696 } 697 ctl_enqueue_isc(io); 698 break; 699 700 /* Performed on the Originating SC, XFER mode only */ 701 case CTL_MSG_DATAMOVE: { 702 struct ctl_sg_entry *sgl; 703 int i, j; 704 705 io = msg_info.hdr.original_sc; 706 if (io == NULL) { 707 printf("%s: original_sc == NULL!\n", __func__); 708 /* XXX KDM do something here */ 709 break; 710 } 711 io->io_hdr.msg_type = CTL_MSG_DATAMOVE; 712 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 713 /* 714 * Keep track of this, we need to send it back over 715 * when the datamove is complete. 716 */ 717 io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc; 718 719 if (msg_info.dt.sg_sequence == 0) { 720 /* 721 * XXX KDM we use the preallocated S/G list 722 * here, but we'll need to change this to 723 * dynamic allocation if we need larger S/G 724 * lists. 725 */ 726 if (msg_info.dt.kern_sg_entries > 727 sizeof(io->io_hdr.remote_sglist) / 728 sizeof(io->io_hdr.remote_sglist[0])) { 729 printf("%s: number of S/G entries " 730 "needed %u > allocated num %zd\n", 731 __func__, 732 msg_info.dt.kern_sg_entries, 733 sizeof(io->io_hdr.remote_sglist)/ 734 sizeof(io->io_hdr.remote_sglist[0])); 735 736 /* 737 * XXX KDM send a message back to 738 * the other side to shut down the 739 * DMA. The error will come back 740 * through via the normal channel. 741 */ 742 break; 743 } 744 sgl = io->io_hdr.remote_sglist; 745 memset(sgl, 0, 746 sizeof(io->io_hdr.remote_sglist)); 747 748 io->scsiio.kern_data_ptr = (uint8_t *)sgl; 749 750 io->scsiio.kern_sg_entries = 751 msg_info.dt.kern_sg_entries; 752 io->scsiio.rem_sg_entries = 753 msg_info.dt.kern_sg_entries; 754 io->scsiio.kern_data_len = 755 msg_info.dt.kern_data_len; 756 io->scsiio.kern_total_len = 757 msg_info.dt.kern_total_len; 758 io->scsiio.kern_data_resid = 759 msg_info.dt.kern_data_resid; 760 io->scsiio.kern_rel_offset = 761 msg_info.dt.kern_rel_offset; 762 /* 763 * Clear out per-DMA flags. 764 */ 765 io->io_hdr.flags &= ~CTL_FLAG_RDMA_MASK; 766 /* 767 * Add per-DMA flags that are set for this 768 * particular DMA request. 769 */ 770 io->io_hdr.flags |= msg_info.dt.flags & 771 CTL_FLAG_RDMA_MASK; 772 } else 773 sgl = (struct ctl_sg_entry *) 774 io->scsiio.kern_data_ptr; 775 776 for (i = msg_info.dt.sent_sg_entries, j = 0; 777 i < (msg_info.dt.sent_sg_entries + 778 msg_info.dt.cur_sg_entries); i++, j++) { 779 sgl[i].addr = msg_info.dt.sg_list[j].addr; 780 sgl[i].len = msg_info.dt.sg_list[j].len; 781 782 #if 0 783 printf("%s: L: %p,%d -> %p,%d j=%d, i=%d\n", 784 __func__, 785 msg_info.dt.sg_list[j].addr, 786 msg_info.dt.sg_list[j].len, 787 sgl[i].addr, sgl[i].len, j, i); 788 #endif 789 } 790 #if 0 791 memcpy(&sgl[msg_info.dt.sent_sg_entries], 792 msg_info.dt.sg_list, 793 sizeof(*sgl) * msg_info.dt.cur_sg_entries); 794 #endif 795 796 /* 797 * If this is the last piece of the I/O, we've got 798 * the full S/G list. Queue processing in the thread. 799 * Otherwise wait for the next piece. 800 */ 801 if (msg_info.dt.sg_last != 0) 802 ctl_enqueue_isc(io); 803 break; 804 } 805 /* Performed on the Serializing (primary) SC, XFER mode only */ 806 case CTL_MSG_DATAMOVE_DONE: { 807 if (msg_info.hdr.serializing_sc == NULL) { 808 printf("%s: serializing_sc == NULL!\n", 809 __func__); 810 /* XXX KDM now what? */ 811 break; 812 } 813 /* 814 * We grab the sense information here in case 815 * there was a failure, so we can return status 816 * back to the initiator. 817 */ 818 io = msg_info.hdr.serializing_sc; 819 io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 820 io->io_hdr.status = msg_info.hdr.status; 821 io->scsiio.scsi_status = msg_info.scsi.scsi_status; 822 io->scsiio.sense_len = msg_info.scsi.sense_len; 823 io->scsiio.sense_residual =msg_info.scsi.sense_residual; 824 io->io_hdr.port_status = msg_info.scsi.fetd_status; 825 io->scsiio.residual = msg_info.scsi.residual; 826 memcpy(&io->scsiio.sense_data,&msg_info.scsi.sense_data, 827 sizeof(io->scsiio.sense_data)); 828 ctl_enqueue_isc(io); 829 break; 830 } 831 832 /* Preformed on Originating SC, SER_ONLY mode */ 833 case CTL_MSG_R2R: 834 io = msg_info.hdr.original_sc; 835 if (io == NULL) { 836 printf("%s: Major Bummer\n", __func__); 837 return; 838 } else { 839 #if 0 840 printf("pOrig %x\n",(int) ctsio); 841 #endif 842 } 843 io->io_hdr.msg_type = CTL_MSG_R2R; 844 io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc; 845 ctl_enqueue_isc(io); 846 break; 847 848 /* 849 * Performed on Serializing(i.e. primary SC) SC in SER_ONLY 850 * mode. 851 * Performed on the Originating (i.e. secondary) SC in XFER 852 * mode 853 */ 854 case CTL_MSG_FINISH_IO: 855 if (softc->ha_mode == CTL_HA_MODE_XFER) 856 ctl_isc_handler_finish_xfer(softc, 857 &msg_info); 858 else 859 ctl_isc_handler_finish_ser_only(softc, 860 &msg_info); 861 break; 862 863 /* Preformed on Originating SC */ 864 case CTL_MSG_BAD_JUJU: 865 io = msg_info.hdr.original_sc; 866 if (io == NULL) { 867 printf("%s: Bad JUJU!, original_sc is NULL!\n", 868 __func__); 869 break; 870 } 871 ctl_copy_sense_data(&msg_info, io); 872 /* 873 * IO should have already been cleaned up on other 874 * SC so clear this flag so we won't send a message 875 * back to finish the IO there. 876 */ 877 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 878 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 879 880 /* io = msg_info.hdr.serializing_sc; */ 881 io->io_hdr.msg_type = CTL_MSG_BAD_JUJU; 882 ctl_enqueue_isc(io); 883 break; 884 885 /* Handle resets sent from the other side */ 886 case CTL_MSG_MANAGE_TASKS: { 887 struct ctl_taskio *taskio; 888 taskio = (struct ctl_taskio *)ctl_alloc_io_nowait( 889 softc->othersc_pool); 890 if (taskio == NULL) { 891 printf("ctl_isc_event_handler: can't allocate " 892 "ctl_io!\n"); 893 /* Bad Juju */ 894 /* should I just call the proper reset func 895 here??? */ 896 goto bailout; 897 } 898 ctl_zero_io((union ctl_io *)taskio); 899 taskio->io_hdr.io_type = CTL_IO_TASK; 900 taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 901 taskio->io_hdr.nexus = msg_info.hdr.nexus; 902 taskio->task_action = msg_info.task.task_action; 903 taskio->tag_num = msg_info.task.tag_num; 904 taskio->tag_type = msg_info.task.tag_type; 905 #ifdef CTL_TIME_IO 906 taskio->io_hdr.start_time = time_uptime; 907 getbintime(&taskio->io_hdr.start_bt); 908 #if 0 909 cs_prof_gettime(&taskio->io_hdr.start_ticks); 910 #endif 911 #endif /* CTL_TIME_IO */ 912 ctl_run_task((union ctl_io *)taskio); 913 break; 914 } 915 /* Persistent Reserve action which needs attention */ 916 case CTL_MSG_PERS_ACTION: 917 presio = (struct ctl_prio *)ctl_alloc_io_nowait( 918 softc->othersc_pool); 919 if (presio == NULL) { 920 printf("ctl_isc_event_handler: can't allocate " 921 "ctl_io!\n"); 922 /* Bad Juju */ 923 /* Need to set busy and send msg back */ 924 goto bailout; 925 } 926 ctl_zero_io((union ctl_io *)presio); 927 presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION; 928 presio->pr_msg = msg_info.pr; 929 ctl_enqueue_isc((union ctl_io *)presio); 930 break; 931 case CTL_MSG_SYNC_FE: 932 rcv_sync_msg = 1; 933 break; 934 default: 935 printf("How did I get here?\n"); 936 } 937 } else if (event == CTL_HA_EVT_MSG_SENT) { 938 if (param != CTL_HA_STATUS_SUCCESS) { 939 printf("Bad status from ctl_ha_msg_send status %d\n", 940 param); 941 } 942 return; 943 } else if (event == CTL_HA_EVT_DISCONNECT) { 944 printf("CTL: Got a disconnect from Isc\n"); 945 return; 946 } else { 947 printf("ctl_isc_event_handler: Unknown event %d\n", event); 948 return; 949 } 950 951 bailout: 952 return; 953 } 954 955 static void 956 ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest) 957 { 958 struct scsi_sense_data *sense; 959 960 sense = &dest->scsiio.sense_data; 961 bcopy(&src->scsi.sense_data, sense, sizeof(*sense)); 962 dest->scsiio.scsi_status = src->scsi.scsi_status; 963 dest->scsiio.sense_len = src->scsi.sense_len; 964 dest->io_hdr.status = src->hdr.status; 965 } 966 967 static void 968 ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 969 { 970 ctl_ua_type *pu; 971 972 mtx_assert(&lun->lun_lock, MA_OWNED); 973 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 974 if (pu == NULL) 975 return; 976 pu[initidx % CTL_MAX_INIT_PER_PORT] |= ua; 977 } 978 979 static void 980 ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 981 { 982 int i, j; 983 984 mtx_assert(&lun->lun_lock, MA_OWNED); 985 for (i = 0; i < CTL_MAX_PORTS; i++) { 986 if (lun->pending_ua[i] == NULL) 987 continue; 988 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 989 if (i * CTL_MAX_INIT_PER_PORT + j == except) 990 continue; 991 lun->pending_ua[i][j] |= ua; 992 } 993 } 994 } 995 996 static void 997 ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 998 { 999 ctl_ua_type *pu; 1000 1001 mtx_assert(&lun->lun_lock, MA_OWNED); 1002 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 1003 if (pu == NULL) 1004 return; 1005 pu[initidx % CTL_MAX_INIT_PER_PORT] &= ~ua; 1006 } 1007 1008 static void 1009 ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 1010 { 1011 int i, j; 1012 1013 mtx_assert(&lun->lun_lock, MA_OWNED); 1014 for (i = 0; i < CTL_MAX_PORTS; i++) { 1015 if (lun->pending_ua[i] == NULL) 1016 continue; 1017 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 1018 if (i * CTL_MAX_INIT_PER_PORT + j == except) 1019 continue; 1020 lun->pending_ua[i][j] &= ~ua; 1021 } 1022 } 1023 } 1024 1025 static int 1026 ctl_ha_state_sysctl(SYSCTL_HANDLER_ARGS) 1027 { 1028 struct ctl_softc *softc = (struct ctl_softc *)arg1; 1029 struct ctl_lun *lun; 1030 int error, value; 1031 1032 if (softc->flags & CTL_FLAG_ACTIVE_SHELF) 1033 value = 0; 1034 else 1035 value = 1; 1036 1037 error = sysctl_handle_int(oidp, &value, 0, req); 1038 if ((error != 0) || (req->newptr == NULL)) 1039 return (error); 1040 1041 mtx_lock(&softc->ctl_lock); 1042 if (value == 0) 1043 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1044 else 1045 softc->flags &= ~CTL_FLAG_ACTIVE_SHELF; 1046 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1047 mtx_lock(&lun->lun_lock); 1048 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 1049 mtx_unlock(&lun->lun_lock); 1050 } 1051 mtx_unlock(&softc->ctl_lock); 1052 return (0); 1053 } 1054 1055 static int 1056 ctl_init(void) 1057 { 1058 struct ctl_softc *softc; 1059 void *other_pool; 1060 struct ctl_port *port; 1061 int i, error, retval; 1062 //int isc_retval; 1063 1064 retval = 0; 1065 ctl_pause_rtr = 0; 1066 rcv_sync_msg = 0; 1067 1068 control_softc = malloc(sizeof(*control_softc), M_DEVBUF, 1069 M_WAITOK | M_ZERO); 1070 softc = control_softc; 1071 1072 softc->dev = make_dev(&ctl_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, 1073 "cam/ctl"); 1074 1075 softc->dev->si_drv1 = softc; 1076 1077 /* 1078 * By default, return a "bad LUN" peripheral qualifier for unknown 1079 * LUNs. The user can override this default using the tunable or 1080 * sysctl. See the comment in ctl_inquiry_std() for more details. 1081 */ 1082 softc->inquiry_pq_no_lun = 1; 1083 TUNABLE_INT_FETCH("kern.cam.ctl.inquiry_pq_no_lun", 1084 &softc->inquiry_pq_no_lun); 1085 sysctl_ctx_init(&softc->sysctl_ctx); 1086 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, 1087 SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl", 1088 CTLFLAG_RD, 0, "CAM Target Layer"); 1089 1090 if (softc->sysctl_tree == NULL) { 1091 printf("%s: unable to allocate sysctl tree\n", __func__); 1092 destroy_dev(softc->dev); 1093 free(control_softc, M_DEVBUF); 1094 control_softc = NULL; 1095 return (ENOMEM); 1096 } 1097 1098 SYSCTL_ADD_INT(&softc->sysctl_ctx, 1099 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, 1100 "inquiry_pq_no_lun", CTLFLAG_RW, 1101 &softc->inquiry_pq_no_lun, 0, 1102 "Report no lun possible for invalid LUNs"); 1103 1104 mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF); 1105 softc->io_zone = uma_zcreate("CTL IO", sizeof(union ctl_io), 1106 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 1107 softc->open_count = 0; 1108 1109 /* 1110 * Default to actually sending a SYNCHRONIZE CACHE command down to 1111 * the drive. 1112 */ 1113 softc->flags = CTL_FLAG_REAL_SYNC; 1114 1115 /* 1116 * In Copan's HA scheme, the "master" and "slave" roles are 1117 * figured out through the slot the controller is in. Although it 1118 * is an active/active system, someone has to be in charge. 1119 */ 1120 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1121 OID_AUTO, "ha_id", CTLFLAG_RDTUN, &softc->ha_id, 0, 1122 "HA head ID (0 - no HA)"); 1123 if (softc->ha_id == 0) { 1124 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1125 softc->is_single = 1; 1126 softc->port_offset = 0; 1127 } else 1128 softc->port_offset = (softc->ha_id - 1) * CTL_MAX_PORTS; 1129 softc->persis_offset = softc->port_offset * CTL_MAX_INIT_PER_PORT; 1130 1131 /* 1132 * XXX KDM need to figure out where we want to get our target ID 1133 * and WWID. Is it different on each port? 1134 */ 1135 softc->target.id = 0; 1136 softc->target.wwid[0] = 0x12345678; 1137 softc->target.wwid[1] = 0x87654321; 1138 STAILQ_INIT(&softc->lun_list); 1139 STAILQ_INIT(&softc->pending_lun_queue); 1140 STAILQ_INIT(&softc->fe_list); 1141 STAILQ_INIT(&softc->port_list); 1142 STAILQ_INIT(&softc->be_list); 1143 ctl_tpc_init(softc); 1144 1145 if (ctl_pool_create(softc, "othersc", CTL_POOL_ENTRIES_OTHER_SC, 1146 &other_pool) != 0) 1147 { 1148 printf("ctl: can't allocate %d entry other SC pool, " 1149 "exiting\n", CTL_POOL_ENTRIES_OTHER_SC); 1150 return (ENOMEM); 1151 } 1152 softc->othersc_pool = other_pool; 1153 1154 if (worker_threads <= 0) 1155 worker_threads = max(1, mp_ncpus / 4); 1156 if (worker_threads > CTL_MAX_THREADS) 1157 worker_threads = CTL_MAX_THREADS; 1158 1159 for (i = 0; i < worker_threads; i++) { 1160 struct ctl_thread *thr = &softc->threads[i]; 1161 1162 mtx_init(&thr->queue_lock, "CTL queue mutex", NULL, MTX_DEF); 1163 thr->ctl_softc = softc; 1164 STAILQ_INIT(&thr->incoming_queue); 1165 STAILQ_INIT(&thr->rtr_queue); 1166 STAILQ_INIT(&thr->done_queue); 1167 STAILQ_INIT(&thr->isc_queue); 1168 1169 error = kproc_kthread_add(ctl_work_thread, thr, 1170 &softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i); 1171 if (error != 0) { 1172 printf("error creating CTL work thread!\n"); 1173 ctl_pool_free(other_pool); 1174 return (error); 1175 } 1176 } 1177 error = kproc_kthread_add(ctl_lun_thread, softc, 1178 &softc->ctl_proc, NULL, 0, 0, "ctl", "lun"); 1179 if (error != 0) { 1180 printf("error creating CTL lun thread!\n"); 1181 ctl_pool_free(other_pool); 1182 return (error); 1183 } 1184 error = kproc_kthread_add(ctl_thresh_thread, softc, 1185 &softc->ctl_proc, NULL, 0, 0, "ctl", "thresh"); 1186 if (error != 0) { 1187 printf("error creating CTL threshold thread!\n"); 1188 ctl_pool_free(other_pool); 1189 return (error); 1190 } 1191 if (bootverbose) 1192 printf("ctl: CAM Target Layer loaded\n"); 1193 1194 /* 1195 * Initialize the ioctl front end. 1196 */ 1197 ctl_frontend_register(&ioctl_frontend); 1198 port = &softc->ioctl_info.port; 1199 port->frontend = &ioctl_frontend; 1200 sprintf(softc->ioctl_info.port_name, "ioctl"); 1201 port->port_type = CTL_PORT_IOCTL; 1202 port->num_requested_ctl_io = 100; 1203 port->port_name = softc->ioctl_info.port_name; 1204 port->port_online = ctl_ioctl_online; 1205 port->port_offline = ctl_ioctl_offline; 1206 port->onoff_arg = &softc->ioctl_info; 1207 port->lun_enable = ctl_ioctl_lun_enable; 1208 port->lun_disable = ctl_ioctl_lun_disable; 1209 port->targ_lun_arg = &softc->ioctl_info; 1210 port->fe_datamove = ctl_ioctl_datamove; 1211 port->fe_done = ctl_ioctl_done; 1212 port->max_targets = 15; 1213 port->max_target_id = 15; 1214 1215 if (ctl_port_register(&softc->ioctl_info.port) != 0) { 1216 printf("ctl: ioctl front end registration failed, will " 1217 "continue anyway\n"); 1218 } 1219 1220 SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree), 1221 OID_AUTO, "ha_state", CTLTYPE_INT | CTLFLAG_RWTUN, 1222 softc, 0, ctl_ha_state_sysctl, "I", "HA state for this head"); 1223 1224 #ifdef CTL_IO_DELAY 1225 if (sizeof(struct callout) > CTL_TIMER_BYTES) { 1226 printf("sizeof(struct callout) %zd > CTL_TIMER_BYTES %zd\n", 1227 sizeof(struct callout), CTL_TIMER_BYTES); 1228 return (EINVAL); 1229 } 1230 #endif /* CTL_IO_DELAY */ 1231 1232 return (0); 1233 } 1234 1235 void 1236 ctl_shutdown(void) 1237 { 1238 struct ctl_softc *softc; 1239 struct ctl_lun *lun, *next_lun; 1240 1241 softc = (struct ctl_softc *)control_softc; 1242 1243 if (ctl_port_deregister(&softc->ioctl_info.port) != 0) 1244 printf("ctl: ioctl front end deregistration failed\n"); 1245 1246 mtx_lock(&softc->ctl_lock); 1247 1248 /* 1249 * Free up each LUN. 1250 */ 1251 for (lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; lun = next_lun){ 1252 next_lun = STAILQ_NEXT(lun, links); 1253 ctl_free_lun(lun); 1254 } 1255 1256 mtx_unlock(&softc->ctl_lock); 1257 1258 ctl_frontend_deregister(&ioctl_frontend); 1259 1260 #if 0 1261 ctl_shutdown_thread(softc->work_thread); 1262 mtx_destroy(&softc->queue_lock); 1263 #endif 1264 1265 ctl_tpc_shutdown(softc); 1266 uma_zdestroy(softc->io_zone); 1267 mtx_destroy(&softc->ctl_lock); 1268 1269 destroy_dev(softc->dev); 1270 1271 sysctl_ctx_free(&softc->sysctl_ctx); 1272 1273 free(control_softc, M_DEVBUF); 1274 control_softc = NULL; 1275 1276 if (bootverbose) 1277 printf("ctl: CAM Target Layer unloaded\n"); 1278 } 1279 1280 static int 1281 ctl_module_event_handler(module_t mod, int what, void *arg) 1282 { 1283 1284 switch (what) { 1285 case MOD_LOAD: 1286 return (ctl_init()); 1287 case MOD_UNLOAD: 1288 return (EBUSY); 1289 default: 1290 return (EOPNOTSUPP); 1291 } 1292 } 1293 1294 /* 1295 * XXX KDM should we do some access checks here? Bump a reference count to 1296 * prevent a CTL module from being unloaded while someone has it open? 1297 */ 1298 static int 1299 ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td) 1300 { 1301 return (0); 1302 } 1303 1304 static int 1305 ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td) 1306 { 1307 return (0); 1308 } 1309 1310 int 1311 ctl_port_enable(ctl_port_type port_type) 1312 { 1313 struct ctl_softc *softc = control_softc; 1314 struct ctl_port *port; 1315 1316 if (softc->is_single == 0) { 1317 union ctl_ha_msg msg_info; 1318 int isc_retval; 1319 1320 #if 0 1321 printf("%s: HA mode, synchronizing frontend enable\n", 1322 __func__); 1323 #endif 1324 msg_info.hdr.msg_type = CTL_MSG_SYNC_FE; 1325 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1326 sizeof(msg_info), 1 )) > CTL_HA_STATUS_SUCCESS) { 1327 printf("Sync msg send error retval %d\n", isc_retval); 1328 } 1329 if (!rcv_sync_msg) { 1330 isc_retval=ctl_ha_msg_recv(CTL_HA_CHAN_CTL, &msg_info, 1331 sizeof(msg_info), 1); 1332 } 1333 #if 0 1334 printf("CTL:Frontend Enable\n"); 1335 } else { 1336 printf("%s: single mode, skipping frontend synchronization\n", 1337 __func__); 1338 #endif 1339 } 1340 1341 STAILQ_FOREACH(port, &softc->port_list, links) { 1342 if (port_type & port->port_type) 1343 { 1344 #if 0 1345 printf("port %d\n", port->targ_port); 1346 #endif 1347 ctl_port_online(port); 1348 } 1349 } 1350 1351 return (0); 1352 } 1353 1354 int 1355 ctl_port_disable(ctl_port_type port_type) 1356 { 1357 struct ctl_softc *softc; 1358 struct ctl_port *port; 1359 1360 softc = control_softc; 1361 1362 STAILQ_FOREACH(port, &softc->port_list, links) { 1363 if (port_type & port->port_type) 1364 ctl_port_offline(port); 1365 } 1366 1367 return (0); 1368 } 1369 1370 /* 1371 * Returns 0 for success, 1 for failure. 1372 * Currently the only failure mode is if there aren't enough entries 1373 * allocated. So, in case of a failure, look at num_entries_dropped, 1374 * reallocate and try again. 1375 */ 1376 int 1377 ctl_port_list(struct ctl_port_entry *entries, int num_entries_alloced, 1378 int *num_entries_filled, int *num_entries_dropped, 1379 ctl_port_type port_type, int no_virtual) 1380 { 1381 struct ctl_softc *softc; 1382 struct ctl_port *port; 1383 int entries_dropped, entries_filled; 1384 int retval; 1385 int i; 1386 1387 softc = control_softc; 1388 1389 retval = 0; 1390 entries_filled = 0; 1391 entries_dropped = 0; 1392 1393 i = 0; 1394 mtx_lock(&softc->ctl_lock); 1395 STAILQ_FOREACH(port, &softc->port_list, links) { 1396 struct ctl_port_entry *entry; 1397 1398 if ((port->port_type & port_type) == 0) 1399 continue; 1400 1401 if ((no_virtual != 0) 1402 && (port->virtual_port != 0)) 1403 continue; 1404 1405 if (entries_filled >= num_entries_alloced) { 1406 entries_dropped++; 1407 continue; 1408 } 1409 entry = &entries[i]; 1410 1411 entry->port_type = port->port_type; 1412 strlcpy(entry->port_name, port->port_name, 1413 sizeof(entry->port_name)); 1414 entry->physical_port = port->physical_port; 1415 entry->virtual_port = port->virtual_port; 1416 entry->wwnn = port->wwnn; 1417 entry->wwpn = port->wwpn; 1418 1419 i++; 1420 entries_filled++; 1421 } 1422 1423 mtx_unlock(&softc->ctl_lock); 1424 1425 if (entries_dropped > 0) 1426 retval = 1; 1427 1428 *num_entries_dropped = entries_dropped; 1429 *num_entries_filled = entries_filled; 1430 1431 return (retval); 1432 } 1433 1434 static void 1435 ctl_ioctl_online(void *arg) 1436 { 1437 struct ctl_ioctl_info *ioctl_info; 1438 1439 ioctl_info = (struct ctl_ioctl_info *)arg; 1440 1441 ioctl_info->flags |= CTL_IOCTL_FLAG_ENABLED; 1442 } 1443 1444 static void 1445 ctl_ioctl_offline(void *arg) 1446 { 1447 struct ctl_ioctl_info *ioctl_info; 1448 1449 ioctl_info = (struct ctl_ioctl_info *)arg; 1450 1451 ioctl_info->flags &= ~CTL_IOCTL_FLAG_ENABLED; 1452 } 1453 1454 /* 1455 * Remove an initiator by port number and initiator ID. 1456 * Returns 0 for success, -1 for failure. 1457 */ 1458 int 1459 ctl_remove_initiator(struct ctl_port *port, int iid) 1460 { 1461 struct ctl_softc *softc = control_softc; 1462 1463 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 1464 1465 if (iid > CTL_MAX_INIT_PER_PORT) { 1466 printf("%s: initiator ID %u > maximun %u!\n", 1467 __func__, iid, CTL_MAX_INIT_PER_PORT); 1468 return (-1); 1469 } 1470 1471 mtx_lock(&softc->ctl_lock); 1472 port->wwpn_iid[iid].in_use--; 1473 port->wwpn_iid[iid].last_use = time_uptime; 1474 mtx_unlock(&softc->ctl_lock); 1475 1476 return (0); 1477 } 1478 1479 /* 1480 * Add an initiator to the initiator map. 1481 * Returns iid for success, < 0 for failure. 1482 */ 1483 int 1484 ctl_add_initiator(struct ctl_port *port, int iid, uint64_t wwpn, char *name) 1485 { 1486 struct ctl_softc *softc = control_softc; 1487 time_t best_time; 1488 int i, best; 1489 1490 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 1491 1492 if (iid >= CTL_MAX_INIT_PER_PORT) { 1493 printf("%s: WWPN %#jx initiator ID %u > maximum %u!\n", 1494 __func__, wwpn, iid, CTL_MAX_INIT_PER_PORT); 1495 free(name, M_CTL); 1496 return (-1); 1497 } 1498 1499 mtx_lock(&softc->ctl_lock); 1500 1501 if (iid < 0 && (wwpn != 0 || name != NULL)) { 1502 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1503 if (wwpn != 0 && wwpn == port->wwpn_iid[i].wwpn) { 1504 iid = i; 1505 break; 1506 } 1507 if (name != NULL && port->wwpn_iid[i].name != NULL && 1508 strcmp(name, port->wwpn_iid[i].name) == 0) { 1509 iid = i; 1510 break; 1511 } 1512 } 1513 } 1514 1515 if (iid < 0) { 1516 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1517 if (port->wwpn_iid[i].in_use == 0 && 1518 port->wwpn_iid[i].wwpn == 0 && 1519 port->wwpn_iid[i].name == NULL) { 1520 iid = i; 1521 break; 1522 } 1523 } 1524 } 1525 1526 if (iid < 0) { 1527 best = -1; 1528 best_time = INT32_MAX; 1529 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1530 if (port->wwpn_iid[i].in_use == 0) { 1531 if (port->wwpn_iid[i].last_use < best_time) { 1532 best = i; 1533 best_time = port->wwpn_iid[i].last_use; 1534 } 1535 } 1536 } 1537 iid = best; 1538 } 1539 1540 if (iid < 0) { 1541 mtx_unlock(&softc->ctl_lock); 1542 free(name, M_CTL); 1543 return (-2); 1544 } 1545 1546 if (port->wwpn_iid[iid].in_use > 0 && (wwpn != 0 || name != NULL)) { 1547 /* 1548 * This is not an error yet. 1549 */ 1550 if (wwpn != 0 && wwpn == port->wwpn_iid[iid].wwpn) { 1551 #if 0 1552 printf("%s: port %d iid %u WWPN %#jx arrived" 1553 " again\n", __func__, port->targ_port, 1554 iid, (uintmax_t)wwpn); 1555 #endif 1556 goto take; 1557 } 1558 if (name != NULL && port->wwpn_iid[iid].name != NULL && 1559 strcmp(name, port->wwpn_iid[iid].name) == 0) { 1560 #if 0 1561 printf("%s: port %d iid %u name '%s' arrived" 1562 " again\n", __func__, port->targ_port, 1563 iid, name); 1564 #endif 1565 goto take; 1566 } 1567 1568 /* 1569 * This is an error, but what do we do about it? The 1570 * driver is telling us we have a new WWPN for this 1571 * initiator ID, so we pretty much need to use it. 1572 */ 1573 printf("%s: port %d iid %u WWPN %#jx '%s' arrived," 1574 " but WWPN %#jx '%s' is still at that address\n", 1575 __func__, port->targ_port, iid, wwpn, name, 1576 (uintmax_t)port->wwpn_iid[iid].wwpn, 1577 port->wwpn_iid[iid].name); 1578 1579 /* 1580 * XXX KDM clear have_ca and ua_pending on each LUN for 1581 * this initiator. 1582 */ 1583 } 1584 take: 1585 free(port->wwpn_iid[iid].name, M_CTL); 1586 port->wwpn_iid[iid].name = name; 1587 port->wwpn_iid[iid].wwpn = wwpn; 1588 port->wwpn_iid[iid].in_use++; 1589 mtx_unlock(&softc->ctl_lock); 1590 1591 return (iid); 1592 } 1593 1594 static int 1595 ctl_create_iid(struct ctl_port *port, int iid, uint8_t *buf) 1596 { 1597 int len; 1598 1599 switch (port->port_type) { 1600 case CTL_PORT_FC: 1601 { 1602 struct scsi_transportid_fcp *id = 1603 (struct scsi_transportid_fcp *)buf; 1604 if (port->wwpn_iid[iid].wwpn == 0) 1605 return (0); 1606 memset(id, 0, sizeof(*id)); 1607 id->format_protocol = SCSI_PROTO_FC; 1608 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->n_port_name); 1609 return (sizeof(*id)); 1610 } 1611 case CTL_PORT_ISCSI: 1612 { 1613 struct scsi_transportid_iscsi_port *id = 1614 (struct scsi_transportid_iscsi_port *)buf; 1615 if (port->wwpn_iid[iid].name == NULL) 1616 return (0); 1617 memset(id, 0, 256); 1618 id->format_protocol = SCSI_TRN_ISCSI_FORMAT_PORT | 1619 SCSI_PROTO_ISCSI; 1620 len = strlcpy(id->iscsi_name, port->wwpn_iid[iid].name, 252) + 1; 1621 len = roundup2(min(len, 252), 4); 1622 scsi_ulto2b(len, id->additional_length); 1623 return (sizeof(*id) + len); 1624 } 1625 case CTL_PORT_SAS: 1626 { 1627 struct scsi_transportid_sas *id = 1628 (struct scsi_transportid_sas *)buf; 1629 if (port->wwpn_iid[iid].wwpn == 0) 1630 return (0); 1631 memset(id, 0, sizeof(*id)); 1632 id->format_protocol = SCSI_PROTO_SAS; 1633 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->sas_address); 1634 return (sizeof(*id)); 1635 } 1636 default: 1637 { 1638 struct scsi_transportid_spi *id = 1639 (struct scsi_transportid_spi *)buf; 1640 memset(id, 0, sizeof(*id)); 1641 id->format_protocol = SCSI_PROTO_SPI; 1642 scsi_ulto2b(iid, id->scsi_addr); 1643 scsi_ulto2b(port->targ_port, id->rel_trgt_port_id); 1644 return (sizeof(*id)); 1645 } 1646 } 1647 } 1648 1649 static int 1650 ctl_ioctl_lun_enable(void *arg, struct ctl_id targ_id, int lun_id) 1651 { 1652 return (0); 1653 } 1654 1655 static int 1656 ctl_ioctl_lun_disable(void *arg, struct ctl_id targ_id, int lun_id) 1657 { 1658 return (0); 1659 } 1660 1661 /* 1662 * Data movement routine for the CTL ioctl frontend port. 1663 */ 1664 static int 1665 ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio) 1666 { 1667 struct ctl_sg_entry *ext_sglist, *kern_sglist; 1668 struct ctl_sg_entry ext_entry, kern_entry; 1669 int ext_sglen, ext_sg_entries, kern_sg_entries; 1670 int ext_sg_start, ext_offset; 1671 int len_to_copy, len_copied; 1672 int kern_watermark, ext_watermark; 1673 int ext_sglist_malloced; 1674 int i, j; 1675 1676 ext_sglist_malloced = 0; 1677 ext_sg_start = 0; 1678 ext_offset = 0; 1679 1680 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove\n")); 1681 1682 /* 1683 * If this flag is set, fake the data transfer. 1684 */ 1685 if (ctsio->io_hdr.flags & CTL_FLAG_NO_DATAMOVE) { 1686 ctsio->ext_data_filled = ctsio->ext_data_len; 1687 goto bailout; 1688 } 1689 1690 /* 1691 * To simplify things here, if we have a single buffer, stick it in 1692 * a S/G entry and just make it a single entry S/G list. 1693 */ 1694 if (ctsio->io_hdr.flags & CTL_FLAG_EDPTR_SGLIST) { 1695 int len_seen; 1696 1697 ext_sglen = ctsio->ext_sg_entries * sizeof(*ext_sglist); 1698 1699 ext_sglist = (struct ctl_sg_entry *)malloc(ext_sglen, M_CTL, 1700 M_WAITOK); 1701 ext_sglist_malloced = 1; 1702 if (copyin(ctsio->ext_data_ptr, ext_sglist, 1703 ext_sglen) != 0) { 1704 ctl_set_internal_failure(ctsio, 1705 /*sks_valid*/ 0, 1706 /*retry_count*/ 0); 1707 goto bailout; 1708 } 1709 ext_sg_entries = ctsio->ext_sg_entries; 1710 len_seen = 0; 1711 for (i = 0; i < ext_sg_entries; i++) { 1712 if ((len_seen + ext_sglist[i].len) >= 1713 ctsio->ext_data_filled) { 1714 ext_sg_start = i; 1715 ext_offset = ctsio->ext_data_filled - len_seen; 1716 break; 1717 } 1718 len_seen += ext_sglist[i].len; 1719 } 1720 } else { 1721 ext_sglist = &ext_entry; 1722 ext_sglist->addr = ctsio->ext_data_ptr; 1723 ext_sglist->len = ctsio->ext_data_len; 1724 ext_sg_entries = 1; 1725 ext_sg_start = 0; 1726 ext_offset = ctsio->ext_data_filled; 1727 } 1728 1729 if (ctsio->kern_sg_entries > 0) { 1730 kern_sglist = (struct ctl_sg_entry *)ctsio->kern_data_ptr; 1731 kern_sg_entries = ctsio->kern_sg_entries; 1732 } else { 1733 kern_sglist = &kern_entry; 1734 kern_sglist->addr = ctsio->kern_data_ptr; 1735 kern_sglist->len = ctsio->kern_data_len; 1736 kern_sg_entries = 1; 1737 } 1738 1739 1740 kern_watermark = 0; 1741 ext_watermark = ext_offset; 1742 len_copied = 0; 1743 for (i = ext_sg_start, j = 0; 1744 i < ext_sg_entries && j < kern_sg_entries;) { 1745 uint8_t *ext_ptr, *kern_ptr; 1746 1747 len_to_copy = MIN(ext_sglist[i].len - ext_watermark, 1748 kern_sglist[j].len - kern_watermark); 1749 1750 ext_ptr = (uint8_t *)ext_sglist[i].addr; 1751 ext_ptr = ext_ptr + ext_watermark; 1752 if (ctsio->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 1753 /* 1754 * XXX KDM fix this! 1755 */ 1756 panic("need to implement bus address support"); 1757 #if 0 1758 kern_ptr = bus_to_virt(kern_sglist[j].addr); 1759 #endif 1760 } else 1761 kern_ptr = (uint8_t *)kern_sglist[j].addr; 1762 kern_ptr = kern_ptr + kern_watermark; 1763 1764 kern_watermark += len_to_copy; 1765 ext_watermark += len_to_copy; 1766 1767 if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) == 1768 CTL_FLAG_DATA_IN) { 1769 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d " 1770 "bytes to user\n", len_to_copy)); 1771 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p " 1772 "to %p\n", kern_ptr, ext_ptr)); 1773 if (copyout(kern_ptr, ext_ptr, len_to_copy) != 0) { 1774 ctl_set_internal_failure(ctsio, 1775 /*sks_valid*/ 0, 1776 /*retry_count*/ 0); 1777 goto bailout; 1778 } 1779 } else { 1780 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d " 1781 "bytes from user\n", len_to_copy)); 1782 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p " 1783 "to %p\n", ext_ptr, kern_ptr)); 1784 if (copyin(ext_ptr, kern_ptr, len_to_copy)!= 0){ 1785 ctl_set_internal_failure(ctsio, 1786 /*sks_valid*/ 0, 1787 /*retry_count*/0); 1788 goto bailout; 1789 } 1790 } 1791 1792 len_copied += len_to_copy; 1793 1794 if (ext_sglist[i].len == ext_watermark) { 1795 i++; 1796 ext_watermark = 0; 1797 } 1798 1799 if (kern_sglist[j].len == kern_watermark) { 1800 j++; 1801 kern_watermark = 0; 1802 } 1803 } 1804 1805 ctsio->ext_data_filled += len_copied; 1806 1807 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_sg_entries: %d, " 1808 "kern_sg_entries: %d\n", ext_sg_entries, 1809 kern_sg_entries)); 1810 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_data_len = %d, " 1811 "kern_data_len = %d\n", ctsio->ext_data_len, 1812 ctsio->kern_data_len)); 1813 1814 1815 /* XXX KDM set residual?? */ 1816 bailout: 1817 1818 if (ext_sglist_malloced != 0) 1819 free(ext_sglist, M_CTL); 1820 1821 return (CTL_RETVAL_COMPLETE); 1822 } 1823 1824 /* 1825 * Serialize a command that went down the "wrong" side, and so was sent to 1826 * this controller for execution. The logic is a little different than the 1827 * standard case in ctl_scsiio_precheck(). Errors in this case need to get 1828 * sent back to the other side, but in the success case, we execute the 1829 * command on this side (XFER mode) or tell the other side to execute it 1830 * (SER_ONLY mode). 1831 */ 1832 static int 1833 ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio) 1834 { 1835 struct ctl_softc *softc; 1836 union ctl_ha_msg msg_info; 1837 struct ctl_lun *lun; 1838 int retval = 0; 1839 uint32_t targ_lun; 1840 1841 softc = control_softc; 1842 1843 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 1844 lun = softc->ctl_luns[targ_lun]; 1845 if (lun==NULL) 1846 { 1847 /* 1848 * Why isn't LUN defined? The other side wouldn't 1849 * send a cmd if the LUN is undefined. 1850 */ 1851 printf("%s: Bad JUJU!, LUN is NULL!\n", __func__); 1852 1853 /* "Logical unit not supported" */ 1854 ctl_set_sense_data(&msg_info.scsi.sense_data, 1855 lun, 1856 /*sense_format*/SSD_TYPE_NONE, 1857 /*current_error*/ 1, 1858 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1859 /*asc*/ 0x25, 1860 /*ascq*/ 0x00, 1861 SSD_ELEM_NONE); 1862 1863 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1864 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1865 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1866 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1867 msg_info.hdr.serializing_sc = NULL; 1868 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1869 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1870 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1871 } 1872 return(1); 1873 1874 } 1875 1876 mtx_lock(&lun->lun_lock); 1877 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1878 1879 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 1880 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq, 1881 ooa_links))) { 1882 case CTL_ACTION_BLOCK: 1883 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 1884 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 1885 blocked_links); 1886 break; 1887 case CTL_ACTION_PASS: 1888 case CTL_ACTION_SKIP: 1889 if (softc->ha_mode == CTL_HA_MODE_XFER) { 1890 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 1891 ctl_enqueue_rtr((union ctl_io *)ctsio); 1892 } else { 1893 1894 /* send msg back to other side */ 1895 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1896 msg_info.hdr.serializing_sc = (union ctl_io *)ctsio; 1897 msg_info.hdr.msg_type = CTL_MSG_R2R; 1898 #if 0 1899 printf("2. pOrig %x\n", (int)msg_info.hdr.original_sc); 1900 #endif 1901 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1902 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1903 } 1904 } 1905 break; 1906 case CTL_ACTION_OVERLAP: 1907 /* OVERLAPPED COMMANDS ATTEMPTED */ 1908 ctl_set_sense_data(&msg_info.scsi.sense_data, 1909 lun, 1910 /*sense_format*/SSD_TYPE_NONE, 1911 /*current_error*/ 1, 1912 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1913 /*asc*/ 0x4E, 1914 /*ascq*/ 0x00, 1915 SSD_ELEM_NONE); 1916 1917 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1918 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1919 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1920 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1921 msg_info.hdr.serializing_sc = NULL; 1922 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1923 #if 0 1924 printf("BAD JUJU:Major Bummer Overlap\n"); 1925 #endif 1926 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1927 retval = 1; 1928 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1929 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1930 } 1931 break; 1932 case CTL_ACTION_OVERLAP_TAG: 1933 /* TAGGED OVERLAPPED COMMANDS (NN = QUEUE TAG) */ 1934 ctl_set_sense_data(&msg_info.scsi.sense_data, 1935 lun, 1936 /*sense_format*/SSD_TYPE_NONE, 1937 /*current_error*/ 1, 1938 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1939 /*asc*/ 0x4D, 1940 /*ascq*/ ctsio->tag_num & 0xff, 1941 SSD_ELEM_NONE); 1942 1943 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1944 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1945 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1946 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1947 msg_info.hdr.serializing_sc = NULL; 1948 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1949 #if 0 1950 printf("BAD JUJU:Major Bummer Overlap Tag\n"); 1951 #endif 1952 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1953 retval = 1; 1954 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1955 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1956 } 1957 break; 1958 case CTL_ACTION_ERROR: 1959 default: 1960 /* "Internal target failure" */ 1961 ctl_set_sense_data(&msg_info.scsi.sense_data, 1962 lun, 1963 /*sense_format*/SSD_TYPE_NONE, 1964 /*current_error*/ 1, 1965 /*sense_key*/ SSD_KEY_HARDWARE_ERROR, 1966 /*asc*/ 0x44, 1967 /*ascq*/ 0x00, 1968 SSD_ELEM_NONE); 1969 1970 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1971 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1972 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1973 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1974 msg_info.hdr.serializing_sc = NULL; 1975 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1976 #if 0 1977 printf("BAD JUJU:Major Bummer HW Error\n"); 1978 #endif 1979 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1980 retval = 1; 1981 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1982 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1983 } 1984 break; 1985 } 1986 mtx_unlock(&lun->lun_lock); 1987 return (retval); 1988 } 1989 1990 static int 1991 ctl_ioctl_submit_wait(union ctl_io *io) 1992 { 1993 struct ctl_fe_ioctl_params params; 1994 ctl_fe_ioctl_state last_state; 1995 int done, retval; 1996 1997 retval = 0; 1998 1999 bzero(¶ms, sizeof(params)); 2000 2001 mtx_init(¶ms.ioctl_mtx, "ctliocmtx", NULL, MTX_DEF); 2002 cv_init(¶ms.sem, "ctlioccv"); 2003 params.state = CTL_IOCTL_INPROG; 2004 last_state = params.state; 2005 2006 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = ¶ms; 2007 2008 CTL_DEBUG_PRINT(("ctl_ioctl_submit_wait\n")); 2009 2010 /* This shouldn't happen */ 2011 if ((retval = ctl_queue(io)) != CTL_RETVAL_COMPLETE) 2012 return (retval); 2013 2014 done = 0; 2015 2016 do { 2017 mtx_lock(¶ms.ioctl_mtx); 2018 /* 2019 * Check the state here, and don't sleep if the state has 2020 * already changed (i.e. wakeup has already occured, but we 2021 * weren't waiting yet). 2022 */ 2023 if (params.state == last_state) { 2024 /* XXX KDM cv_wait_sig instead? */ 2025 cv_wait(¶ms.sem, ¶ms.ioctl_mtx); 2026 } 2027 last_state = params.state; 2028 2029 switch (params.state) { 2030 case CTL_IOCTL_INPROG: 2031 /* Why did we wake up? */ 2032 /* XXX KDM error here? */ 2033 mtx_unlock(¶ms.ioctl_mtx); 2034 break; 2035 case CTL_IOCTL_DATAMOVE: 2036 CTL_DEBUG_PRINT(("got CTL_IOCTL_DATAMOVE\n")); 2037 2038 /* 2039 * change last_state back to INPROG to avoid 2040 * deadlock on subsequent data moves. 2041 */ 2042 params.state = last_state = CTL_IOCTL_INPROG; 2043 2044 mtx_unlock(¶ms.ioctl_mtx); 2045 ctl_ioctl_do_datamove(&io->scsiio); 2046 /* 2047 * Note that in some cases, most notably writes, 2048 * this will queue the I/O and call us back later. 2049 * In other cases, generally reads, this routine 2050 * will immediately call back and wake us up, 2051 * probably using our own context. 2052 */ 2053 io->scsiio.be_move_done(io); 2054 break; 2055 case CTL_IOCTL_DONE: 2056 mtx_unlock(¶ms.ioctl_mtx); 2057 CTL_DEBUG_PRINT(("got CTL_IOCTL_DONE\n")); 2058 done = 1; 2059 break; 2060 default: 2061 mtx_unlock(¶ms.ioctl_mtx); 2062 /* XXX KDM error here? */ 2063 break; 2064 } 2065 } while (done == 0); 2066 2067 mtx_destroy(¶ms.ioctl_mtx); 2068 cv_destroy(¶ms.sem); 2069 2070 return (CTL_RETVAL_COMPLETE); 2071 } 2072 2073 static void 2074 ctl_ioctl_datamove(union ctl_io *io) 2075 { 2076 struct ctl_fe_ioctl_params *params; 2077 2078 params = (struct ctl_fe_ioctl_params *) 2079 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; 2080 2081 mtx_lock(¶ms->ioctl_mtx); 2082 params->state = CTL_IOCTL_DATAMOVE; 2083 cv_broadcast(¶ms->sem); 2084 mtx_unlock(¶ms->ioctl_mtx); 2085 } 2086 2087 static void 2088 ctl_ioctl_done(union ctl_io *io) 2089 { 2090 struct ctl_fe_ioctl_params *params; 2091 2092 params = (struct ctl_fe_ioctl_params *) 2093 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; 2094 2095 mtx_lock(¶ms->ioctl_mtx); 2096 params->state = CTL_IOCTL_DONE; 2097 cv_broadcast(¶ms->sem); 2098 mtx_unlock(¶ms->ioctl_mtx); 2099 } 2100 2101 static void 2102 ctl_ioctl_hard_startstop_callback(void *arg, struct cfi_metatask *metatask) 2103 { 2104 struct ctl_fe_ioctl_startstop_info *sd_info; 2105 2106 sd_info = (struct ctl_fe_ioctl_startstop_info *)arg; 2107 2108 sd_info->hs_info.status = metatask->status; 2109 sd_info->hs_info.total_luns = metatask->taskinfo.startstop.total_luns; 2110 sd_info->hs_info.luns_complete = 2111 metatask->taskinfo.startstop.luns_complete; 2112 sd_info->hs_info.luns_failed = metatask->taskinfo.startstop.luns_failed; 2113 2114 cv_broadcast(&sd_info->sem); 2115 } 2116 2117 static void 2118 ctl_ioctl_bbrread_callback(void *arg, struct cfi_metatask *metatask) 2119 { 2120 struct ctl_fe_ioctl_bbrread_info *fe_bbr_info; 2121 2122 fe_bbr_info = (struct ctl_fe_ioctl_bbrread_info *)arg; 2123 2124 mtx_lock(fe_bbr_info->lock); 2125 fe_bbr_info->bbr_info->status = metatask->status; 2126 fe_bbr_info->bbr_info->bbr_status = metatask->taskinfo.bbrread.status; 2127 fe_bbr_info->wakeup_done = 1; 2128 mtx_unlock(fe_bbr_info->lock); 2129 2130 cv_broadcast(&fe_bbr_info->sem); 2131 } 2132 2133 /* 2134 * Returns 0 for success, errno for failure. 2135 */ 2136 static int 2137 ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 2138 struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries) 2139 { 2140 union ctl_io *io; 2141 int retval; 2142 2143 retval = 0; 2144 2145 mtx_lock(&lun->lun_lock); 2146 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); (io != NULL); 2147 (*cur_fill_num)++, io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, 2148 ooa_links)) { 2149 struct ctl_ooa_entry *entry; 2150 2151 /* 2152 * If we've got more than we can fit, just count the 2153 * remaining entries. 2154 */ 2155 if (*cur_fill_num >= ooa_hdr->alloc_num) 2156 continue; 2157 2158 entry = &kern_entries[*cur_fill_num]; 2159 2160 entry->tag_num = io->scsiio.tag_num; 2161 entry->lun_num = lun->lun; 2162 #ifdef CTL_TIME_IO 2163 entry->start_bt = io->io_hdr.start_bt; 2164 #endif 2165 bcopy(io->scsiio.cdb, entry->cdb, io->scsiio.cdb_len); 2166 entry->cdb_len = io->scsiio.cdb_len; 2167 if (io->io_hdr.flags & CTL_FLAG_BLOCKED) 2168 entry->cmd_flags |= CTL_OOACMD_FLAG_BLOCKED; 2169 2170 if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) 2171 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA; 2172 2173 if (io->io_hdr.flags & CTL_FLAG_ABORT) 2174 entry->cmd_flags |= CTL_OOACMD_FLAG_ABORT; 2175 2176 if (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR) 2177 entry->cmd_flags |= CTL_OOACMD_FLAG_RTR; 2178 2179 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) 2180 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED; 2181 } 2182 mtx_unlock(&lun->lun_lock); 2183 2184 return (retval); 2185 } 2186 2187 static void * 2188 ctl_copyin_alloc(void *user_addr, int len, char *error_str, 2189 size_t error_str_len) 2190 { 2191 void *kptr; 2192 2193 kptr = malloc(len, M_CTL, M_WAITOK | M_ZERO); 2194 2195 if (copyin(user_addr, kptr, len) != 0) { 2196 snprintf(error_str, error_str_len, "Error copying %d bytes " 2197 "from user address %p to kernel address %p", len, 2198 user_addr, kptr); 2199 free(kptr, M_CTL); 2200 return (NULL); 2201 } 2202 2203 return (kptr); 2204 } 2205 2206 static void 2207 ctl_free_args(int num_args, struct ctl_be_arg *args) 2208 { 2209 int i; 2210 2211 if (args == NULL) 2212 return; 2213 2214 for (i = 0; i < num_args; i++) { 2215 free(args[i].kname, M_CTL); 2216 free(args[i].kvalue, M_CTL); 2217 } 2218 2219 free(args, M_CTL); 2220 } 2221 2222 static struct ctl_be_arg * 2223 ctl_copyin_args(int num_args, struct ctl_be_arg *uargs, 2224 char *error_str, size_t error_str_len) 2225 { 2226 struct ctl_be_arg *args; 2227 int i; 2228 2229 args = ctl_copyin_alloc(uargs, num_args * sizeof(*args), 2230 error_str, error_str_len); 2231 2232 if (args == NULL) 2233 goto bailout; 2234 2235 for (i = 0; i < num_args; i++) { 2236 args[i].kname = NULL; 2237 args[i].kvalue = NULL; 2238 } 2239 2240 for (i = 0; i < num_args; i++) { 2241 uint8_t *tmpptr; 2242 2243 args[i].kname = ctl_copyin_alloc(args[i].name, 2244 args[i].namelen, error_str, error_str_len); 2245 if (args[i].kname == NULL) 2246 goto bailout; 2247 2248 if (args[i].kname[args[i].namelen - 1] != '\0') { 2249 snprintf(error_str, error_str_len, "Argument %d " 2250 "name is not NUL-terminated", i); 2251 goto bailout; 2252 } 2253 2254 if (args[i].flags & CTL_BEARG_RD) { 2255 tmpptr = ctl_copyin_alloc(args[i].value, 2256 args[i].vallen, error_str, error_str_len); 2257 if (tmpptr == NULL) 2258 goto bailout; 2259 if ((args[i].flags & CTL_BEARG_ASCII) 2260 && (tmpptr[args[i].vallen - 1] != '\0')) { 2261 snprintf(error_str, error_str_len, "Argument " 2262 "%d value is not NUL-terminated", i); 2263 goto bailout; 2264 } 2265 args[i].kvalue = tmpptr; 2266 } else { 2267 args[i].kvalue = malloc(args[i].vallen, 2268 M_CTL, M_WAITOK | M_ZERO); 2269 } 2270 } 2271 2272 return (args); 2273 bailout: 2274 2275 ctl_free_args(num_args, args); 2276 2277 return (NULL); 2278 } 2279 2280 static void 2281 ctl_copyout_args(int num_args, struct ctl_be_arg *args) 2282 { 2283 int i; 2284 2285 for (i = 0; i < num_args; i++) { 2286 if (args[i].flags & CTL_BEARG_WR) 2287 copyout(args[i].kvalue, args[i].value, args[i].vallen); 2288 } 2289 } 2290 2291 /* 2292 * Escape characters that are illegal or not recommended in XML. 2293 */ 2294 int 2295 ctl_sbuf_printf_esc(struct sbuf *sb, char *str, int size) 2296 { 2297 char *end = str + size; 2298 int retval; 2299 2300 retval = 0; 2301 2302 for (; *str && str < end; str++) { 2303 switch (*str) { 2304 case '&': 2305 retval = sbuf_printf(sb, "&"); 2306 break; 2307 case '>': 2308 retval = sbuf_printf(sb, ">"); 2309 break; 2310 case '<': 2311 retval = sbuf_printf(sb, "<"); 2312 break; 2313 default: 2314 retval = sbuf_putc(sb, *str); 2315 break; 2316 } 2317 2318 if (retval != 0) 2319 break; 2320 2321 } 2322 2323 return (retval); 2324 } 2325 2326 static void 2327 ctl_id_sbuf(struct ctl_devid *id, struct sbuf *sb) 2328 { 2329 struct scsi_vpd_id_descriptor *desc; 2330 int i; 2331 2332 if (id == NULL || id->len < 4) 2333 return; 2334 desc = (struct scsi_vpd_id_descriptor *)id->data; 2335 switch (desc->id_type & SVPD_ID_TYPE_MASK) { 2336 case SVPD_ID_TYPE_T10: 2337 sbuf_printf(sb, "t10."); 2338 break; 2339 case SVPD_ID_TYPE_EUI64: 2340 sbuf_printf(sb, "eui."); 2341 break; 2342 case SVPD_ID_TYPE_NAA: 2343 sbuf_printf(sb, "naa."); 2344 break; 2345 case SVPD_ID_TYPE_SCSI_NAME: 2346 break; 2347 } 2348 switch (desc->proto_codeset & SVPD_ID_CODESET_MASK) { 2349 case SVPD_ID_CODESET_BINARY: 2350 for (i = 0; i < desc->length; i++) 2351 sbuf_printf(sb, "%02x", desc->identifier[i]); 2352 break; 2353 case SVPD_ID_CODESET_ASCII: 2354 sbuf_printf(sb, "%.*s", (int)desc->length, 2355 (char *)desc->identifier); 2356 break; 2357 case SVPD_ID_CODESET_UTF8: 2358 sbuf_printf(sb, "%s", (char *)desc->identifier); 2359 break; 2360 } 2361 } 2362 2363 static int 2364 ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 2365 struct thread *td) 2366 { 2367 struct ctl_softc *softc; 2368 int retval; 2369 2370 softc = control_softc; 2371 2372 retval = 0; 2373 2374 switch (cmd) { 2375 case CTL_IO: { 2376 union ctl_io *io; 2377 void *pool_tmp; 2378 2379 /* 2380 * If we haven't been "enabled", don't allow any SCSI I/O 2381 * to this FETD. 2382 */ 2383 if ((softc->ioctl_info.flags & CTL_IOCTL_FLAG_ENABLED) == 0) { 2384 retval = EPERM; 2385 break; 2386 } 2387 2388 io = ctl_alloc_io(softc->ioctl_info.port.ctl_pool_ref); 2389 2390 /* 2391 * Need to save the pool reference so it doesn't get 2392 * spammed by the user's ctl_io. 2393 */ 2394 pool_tmp = io->io_hdr.pool; 2395 memcpy(io, (void *)addr, sizeof(*io)); 2396 io->io_hdr.pool = pool_tmp; 2397 2398 /* 2399 * No status yet, so make sure the status is set properly. 2400 */ 2401 io->io_hdr.status = CTL_STATUS_NONE; 2402 2403 /* 2404 * The user sets the initiator ID, target and LUN IDs. 2405 */ 2406 io->io_hdr.nexus.targ_port = softc->ioctl_info.port.targ_port; 2407 io->io_hdr.flags |= CTL_FLAG_USER_REQ; 2408 if ((io->io_hdr.io_type == CTL_IO_SCSI) 2409 && (io->scsiio.tag_type != CTL_TAG_UNTAGGED)) 2410 io->scsiio.tag_num = softc->ioctl_info.cur_tag_num++; 2411 2412 retval = ctl_ioctl_submit_wait(io); 2413 2414 if (retval != 0) { 2415 ctl_free_io(io); 2416 break; 2417 } 2418 2419 memcpy((void *)addr, io, sizeof(*io)); 2420 2421 /* return this to our pool */ 2422 ctl_free_io(io); 2423 2424 break; 2425 } 2426 case CTL_ENABLE_PORT: 2427 case CTL_DISABLE_PORT: 2428 case CTL_SET_PORT_WWNS: { 2429 struct ctl_port *port; 2430 struct ctl_port_entry *entry; 2431 2432 entry = (struct ctl_port_entry *)addr; 2433 2434 mtx_lock(&softc->ctl_lock); 2435 STAILQ_FOREACH(port, &softc->port_list, links) { 2436 int action, done; 2437 2438 action = 0; 2439 done = 0; 2440 2441 if ((entry->port_type == CTL_PORT_NONE) 2442 && (entry->targ_port == port->targ_port)) { 2443 /* 2444 * If the user only wants to enable or 2445 * disable or set WWNs on a specific port, 2446 * do the operation and we're done. 2447 */ 2448 action = 1; 2449 done = 1; 2450 } else if (entry->port_type & port->port_type) { 2451 /* 2452 * Compare the user's type mask with the 2453 * particular frontend type to see if we 2454 * have a match. 2455 */ 2456 action = 1; 2457 done = 0; 2458 2459 /* 2460 * Make sure the user isn't trying to set 2461 * WWNs on multiple ports at the same time. 2462 */ 2463 if (cmd == CTL_SET_PORT_WWNS) { 2464 printf("%s: Can't set WWNs on " 2465 "multiple ports\n", __func__); 2466 retval = EINVAL; 2467 break; 2468 } 2469 } 2470 if (action != 0) { 2471 /* 2472 * XXX KDM we have to drop the lock here, 2473 * because the online/offline operations 2474 * can potentially block. We need to 2475 * reference count the frontends so they 2476 * can't go away, 2477 */ 2478 mtx_unlock(&softc->ctl_lock); 2479 2480 if (cmd == CTL_ENABLE_PORT) { 2481 struct ctl_lun *lun; 2482 2483 STAILQ_FOREACH(lun, &softc->lun_list, 2484 links) { 2485 port->lun_enable(port->targ_lun_arg, 2486 lun->target, 2487 lun->lun); 2488 } 2489 2490 ctl_port_online(port); 2491 } else if (cmd == CTL_DISABLE_PORT) { 2492 struct ctl_lun *lun; 2493 2494 ctl_port_offline(port); 2495 2496 STAILQ_FOREACH(lun, &softc->lun_list, 2497 links) { 2498 port->lun_disable( 2499 port->targ_lun_arg, 2500 lun->target, 2501 lun->lun); 2502 } 2503 } 2504 2505 mtx_lock(&softc->ctl_lock); 2506 2507 if (cmd == CTL_SET_PORT_WWNS) 2508 ctl_port_set_wwns(port, 2509 (entry->flags & CTL_PORT_WWNN_VALID) ? 2510 1 : 0, entry->wwnn, 2511 (entry->flags & CTL_PORT_WWPN_VALID) ? 2512 1 : 0, entry->wwpn); 2513 } 2514 if (done != 0) 2515 break; 2516 } 2517 mtx_unlock(&softc->ctl_lock); 2518 break; 2519 } 2520 case CTL_GET_PORT_LIST: { 2521 struct ctl_port *port; 2522 struct ctl_port_list *list; 2523 int i; 2524 2525 list = (struct ctl_port_list *)addr; 2526 2527 if (list->alloc_len != (list->alloc_num * 2528 sizeof(struct ctl_port_entry))) { 2529 printf("%s: CTL_GET_PORT_LIST: alloc_len %u != " 2530 "alloc_num %u * sizeof(struct ctl_port_entry) " 2531 "%zu\n", __func__, list->alloc_len, 2532 list->alloc_num, sizeof(struct ctl_port_entry)); 2533 retval = EINVAL; 2534 break; 2535 } 2536 list->fill_len = 0; 2537 list->fill_num = 0; 2538 list->dropped_num = 0; 2539 i = 0; 2540 mtx_lock(&softc->ctl_lock); 2541 STAILQ_FOREACH(port, &softc->port_list, links) { 2542 struct ctl_port_entry entry, *list_entry; 2543 2544 if (list->fill_num >= list->alloc_num) { 2545 list->dropped_num++; 2546 continue; 2547 } 2548 2549 entry.port_type = port->port_type; 2550 strlcpy(entry.port_name, port->port_name, 2551 sizeof(entry.port_name)); 2552 entry.targ_port = port->targ_port; 2553 entry.physical_port = port->physical_port; 2554 entry.virtual_port = port->virtual_port; 2555 entry.wwnn = port->wwnn; 2556 entry.wwpn = port->wwpn; 2557 if (port->status & CTL_PORT_STATUS_ONLINE) 2558 entry.online = 1; 2559 else 2560 entry.online = 0; 2561 2562 list_entry = &list->entries[i]; 2563 2564 retval = copyout(&entry, list_entry, sizeof(entry)); 2565 if (retval != 0) { 2566 printf("%s: CTL_GET_PORT_LIST: copyout " 2567 "returned %d\n", __func__, retval); 2568 break; 2569 } 2570 i++; 2571 list->fill_num++; 2572 list->fill_len += sizeof(entry); 2573 } 2574 mtx_unlock(&softc->ctl_lock); 2575 2576 /* 2577 * If this is non-zero, we had a copyout fault, so there's 2578 * probably no point in attempting to set the status inside 2579 * the structure. 2580 */ 2581 if (retval != 0) 2582 break; 2583 2584 if (list->dropped_num > 0) 2585 list->status = CTL_PORT_LIST_NEED_MORE_SPACE; 2586 else 2587 list->status = CTL_PORT_LIST_OK; 2588 break; 2589 } 2590 case CTL_DUMP_OOA: { 2591 struct ctl_lun *lun; 2592 union ctl_io *io; 2593 char printbuf[128]; 2594 struct sbuf sb; 2595 2596 mtx_lock(&softc->ctl_lock); 2597 printf("Dumping OOA queues:\n"); 2598 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2599 mtx_lock(&lun->lun_lock); 2600 for (io = (union ctl_io *)TAILQ_FIRST( 2601 &lun->ooa_queue); io != NULL; 2602 io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, 2603 ooa_links)) { 2604 sbuf_new(&sb, printbuf, sizeof(printbuf), 2605 SBUF_FIXEDLEN); 2606 sbuf_printf(&sb, "LUN %jd tag 0x%04x%s%s%s%s: ", 2607 (intmax_t)lun->lun, 2608 io->scsiio.tag_num, 2609 (io->io_hdr.flags & 2610 CTL_FLAG_BLOCKED) ? "" : " BLOCKED", 2611 (io->io_hdr.flags & 2612 CTL_FLAG_DMA_INPROG) ? " DMA" : "", 2613 (io->io_hdr.flags & 2614 CTL_FLAG_ABORT) ? " ABORT" : "", 2615 (io->io_hdr.flags & 2616 CTL_FLAG_IS_WAS_ON_RTR) ? " RTR" : ""); 2617 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 2618 sbuf_finish(&sb); 2619 printf("%s\n", sbuf_data(&sb)); 2620 } 2621 mtx_unlock(&lun->lun_lock); 2622 } 2623 printf("OOA queues dump done\n"); 2624 mtx_unlock(&softc->ctl_lock); 2625 break; 2626 } 2627 case CTL_GET_OOA: { 2628 struct ctl_lun *lun; 2629 struct ctl_ooa *ooa_hdr; 2630 struct ctl_ooa_entry *entries; 2631 uint32_t cur_fill_num; 2632 2633 ooa_hdr = (struct ctl_ooa *)addr; 2634 2635 if ((ooa_hdr->alloc_len == 0) 2636 || (ooa_hdr->alloc_num == 0)) { 2637 printf("%s: CTL_GET_OOA: alloc len %u and alloc num %u " 2638 "must be non-zero\n", __func__, 2639 ooa_hdr->alloc_len, ooa_hdr->alloc_num); 2640 retval = EINVAL; 2641 break; 2642 } 2643 2644 if (ooa_hdr->alloc_len != (ooa_hdr->alloc_num * 2645 sizeof(struct ctl_ooa_entry))) { 2646 printf("%s: CTL_GET_OOA: alloc len %u must be alloc " 2647 "num %d * sizeof(struct ctl_ooa_entry) %zd\n", 2648 __func__, ooa_hdr->alloc_len, 2649 ooa_hdr->alloc_num,sizeof(struct ctl_ooa_entry)); 2650 retval = EINVAL; 2651 break; 2652 } 2653 2654 entries = malloc(ooa_hdr->alloc_len, M_CTL, M_WAITOK | M_ZERO); 2655 if (entries == NULL) { 2656 printf("%s: could not allocate %d bytes for OOA " 2657 "dump\n", __func__, ooa_hdr->alloc_len); 2658 retval = ENOMEM; 2659 break; 2660 } 2661 2662 mtx_lock(&softc->ctl_lock); 2663 if (((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0) 2664 && ((ooa_hdr->lun_num >= CTL_MAX_LUNS) 2665 || (softc->ctl_luns[ooa_hdr->lun_num] == NULL))) { 2666 mtx_unlock(&softc->ctl_lock); 2667 free(entries, M_CTL); 2668 printf("%s: CTL_GET_OOA: invalid LUN %ju\n", 2669 __func__, (uintmax_t)ooa_hdr->lun_num); 2670 retval = EINVAL; 2671 break; 2672 } 2673 2674 cur_fill_num = 0; 2675 2676 if (ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) { 2677 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2678 retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num, 2679 ooa_hdr, entries); 2680 if (retval != 0) 2681 break; 2682 } 2683 if (retval != 0) { 2684 mtx_unlock(&softc->ctl_lock); 2685 free(entries, M_CTL); 2686 break; 2687 } 2688 } else { 2689 lun = softc->ctl_luns[ooa_hdr->lun_num]; 2690 2691 retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num,ooa_hdr, 2692 entries); 2693 } 2694 mtx_unlock(&softc->ctl_lock); 2695 2696 ooa_hdr->fill_num = min(cur_fill_num, ooa_hdr->alloc_num); 2697 ooa_hdr->fill_len = ooa_hdr->fill_num * 2698 sizeof(struct ctl_ooa_entry); 2699 retval = copyout(entries, ooa_hdr->entries, ooa_hdr->fill_len); 2700 if (retval != 0) { 2701 printf("%s: error copying out %d bytes for OOA dump\n", 2702 __func__, ooa_hdr->fill_len); 2703 } 2704 2705 getbintime(&ooa_hdr->cur_bt); 2706 2707 if (cur_fill_num > ooa_hdr->alloc_num) { 2708 ooa_hdr->dropped_num = cur_fill_num -ooa_hdr->alloc_num; 2709 ooa_hdr->status = CTL_OOA_NEED_MORE_SPACE; 2710 } else { 2711 ooa_hdr->dropped_num = 0; 2712 ooa_hdr->status = CTL_OOA_OK; 2713 } 2714 2715 free(entries, M_CTL); 2716 break; 2717 } 2718 case CTL_CHECK_OOA: { 2719 union ctl_io *io; 2720 struct ctl_lun *lun; 2721 struct ctl_ooa_info *ooa_info; 2722 2723 2724 ooa_info = (struct ctl_ooa_info *)addr; 2725 2726 if (ooa_info->lun_id >= CTL_MAX_LUNS) { 2727 ooa_info->status = CTL_OOA_INVALID_LUN; 2728 break; 2729 } 2730 mtx_lock(&softc->ctl_lock); 2731 lun = softc->ctl_luns[ooa_info->lun_id]; 2732 if (lun == NULL) { 2733 mtx_unlock(&softc->ctl_lock); 2734 ooa_info->status = CTL_OOA_INVALID_LUN; 2735 break; 2736 } 2737 mtx_lock(&lun->lun_lock); 2738 mtx_unlock(&softc->ctl_lock); 2739 ooa_info->num_entries = 0; 2740 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); 2741 io != NULL; io = (union ctl_io *)TAILQ_NEXT( 2742 &io->io_hdr, ooa_links)) { 2743 ooa_info->num_entries++; 2744 } 2745 mtx_unlock(&lun->lun_lock); 2746 2747 ooa_info->status = CTL_OOA_SUCCESS; 2748 2749 break; 2750 } 2751 case CTL_HARD_START: 2752 case CTL_HARD_STOP: { 2753 struct ctl_fe_ioctl_startstop_info ss_info; 2754 struct cfi_metatask *metatask; 2755 struct mtx hs_mtx; 2756 2757 mtx_init(&hs_mtx, "HS Mutex", NULL, MTX_DEF); 2758 2759 cv_init(&ss_info.sem, "hard start/stop cv" ); 2760 2761 metatask = cfi_alloc_metatask(/*can_wait*/ 1); 2762 if (metatask == NULL) { 2763 retval = ENOMEM; 2764 mtx_destroy(&hs_mtx); 2765 break; 2766 } 2767 2768 if (cmd == CTL_HARD_START) 2769 metatask->tasktype = CFI_TASK_STARTUP; 2770 else 2771 metatask->tasktype = CFI_TASK_SHUTDOWN; 2772 2773 metatask->callback = ctl_ioctl_hard_startstop_callback; 2774 metatask->callback_arg = &ss_info; 2775 2776 cfi_action(metatask); 2777 2778 /* Wait for the callback */ 2779 mtx_lock(&hs_mtx); 2780 cv_wait_sig(&ss_info.sem, &hs_mtx); 2781 mtx_unlock(&hs_mtx); 2782 2783 /* 2784 * All information has been copied from the metatask by the 2785 * time cv_broadcast() is called, so we free the metatask here. 2786 */ 2787 cfi_free_metatask(metatask); 2788 2789 memcpy((void *)addr, &ss_info.hs_info, sizeof(ss_info.hs_info)); 2790 2791 mtx_destroy(&hs_mtx); 2792 break; 2793 } 2794 case CTL_BBRREAD: { 2795 struct ctl_bbrread_info *bbr_info; 2796 struct ctl_fe_ioctl_bbrread_info fe_bbr_info; 2797 struct mtx bbr_mtx; 2798 struct cfi_metatask *metatask; 2799 2800 bbr_info = (struct ctl_bbrread_info *)addr; 2801 2802 bzero(&fe_bbr_info, sizeof(fe_bbr_info)); 2803 2804 bzero(&bbr_mtx, sizeof(bbr_mtx)); 2805 mtx_init(&bbr_mtx, "BBR Mutex", NULL, MTX_DEF); 2806 2807 fe_bbr_info.bbr_info = bbr_info; 2808 fe_bbr_info.lock = &bbr_mtx; 2809 2810 cv_init(&fe_bbr_info.sem, "BBR read cv"); 2811 metatask = cfi_alloc_metatask(/*can_wait*/ 1); 2812 2813 if (metatask == NULL) { 2814 mtx_destroy(&bbr_mtx); 2815 cv_destroy(&fe_bbr_info.sem); 2816 retval = ENOMEM; 2817 break; 2818 } 2819 metatask->tasktype = CFI_TASK_BBRREAD; 2820 metatask->callback = ctl_ioctl_bbrread_callback; 2821 metatask->callback_arg = &fe_bbr_info; 2822 metatask->taskinfo.bbrread.lun_num = bbr_info->lun_num; 2823 metatask->taskinfo.bbrread.lba = bbr_info->lba; 2824 metatask->taskinfo.bbrread.len = bbr_info->len; 2825 2826 cfi_action(metatask); 2827 2828 mtx_lock(&bbr_mtx); 2829 while (fe_bbr_info.wakeup_done == 0) 2830 cv_wait_sig(&fe_bbr_info.sem, &bbr_mtx); 2831 mtx_unlock(&bbr_mtx); 2832 2833 bbr_info->status = metatask->status; 2834 bbr_info->bbr_status = metatask->taskinfo.bbrread.status; 2835 bbr_info->scsi_status = metatask->taskinfo.bbrread.scsi_status; 2836 memcpy(&bbr_info->sense_data, 2837 &metatask->taskinfo.bbrread.sense_data, 2838 MIN(sizeof(bbr_info->sense_data), 2839 sizeof(metatask->taskinfo.bbrread.sense_data))); 2840 2841 cfi_free_metatask(metatask); 2842 2843 mtx_destroy(&bbr_mtx); 2844 cv_destroy(&fe_bbr_info.sem); 2845 2846 break; 2847 } 2848 case CTL_DELAY_IO: { 2849 struct ctl_io_delay_info *delay_info; 2850 #ifdef CTL_IO_DELAY 2851 struct ctl_lun *lun; 2852 #endif /* CTL_IO_DELAY */ 2853 2854 delay_info = (struct ctl_io_delay_info *)addr; 2855 2856 #ifdef CTL_IO_DELAY 2857 mtx_lock(&softc->ctl_lock); 2858 2859 if ((delay_info->lun_id >= CTL_MAX_LUNS) 2860 || (softc->ctl_luns[delay_info->lun_id] == NULL)) { 2861 delay_info->status = CTL_DELAY_STATUS_INVALID_LUN; 2862 } else { 2863 lun = softc->ctl_luns[delay_info->lun_id]; 2864 mtx_lock(&lun->lun_lock); 2865 2866 delay_info->status = CTL_DELAY_STATUS_OK; 2867 2868 switch (delay_info->delay_type) { 2869 case CTL_DELAY_TYPE_CONT: 2870 break; 2871 case CTL_DELAY_TYPE_ONESHOT: 2872 break; 2873 default: 2874 delay_info->status = 2875 CTL_DELAY_STATUS_INVALID_TYPE; 2876 break; 2877 } 2878 2879 switch (delay_info->delay_loc) { 2880 case CTL_DELAY_LOC_DATAMOVE: 2881 lun->delay_info.datamove_type = 2882 delay_info->delay_type; 2883 lun->delay_info.datamove_delay = 2884 delay_info->delay_secs; 2885 break; 2886 case CTL_DELAY_LOC_DONE: 2887 lun->delay_info.done_type = 2888 delay_info->delay_type; 2889 lun->delay_info.done_delay = 2890 delay_info->delay_secs; 2891 break; 2892 default: 2893 delay_info->status = 2894 CTL_DELAY_STATUS_INVALID_LOC; 2895 break; 2896 } 2897 mtx_unlock(&lun->lun_lock); 2898 } 2899 2900 mtx_unlock(&softc->ctl_lock); 2901 #else 2902 delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED; 2903 #endif /* CTL_IO_DELAY */ 2904 break; 2905 } 2906 case CTL_REALSYNC_SET: { 2907 int *syncstate; 2908 2909 syncstate = (int *)addr; 2910 2911 mtx_lock(&softc->ctl_lock); 2912 switch (*syncstate) { 2913 case 0: 2914 softc->flags &= ~CTL_FLAG_REAL_SYNC; 2915 break; 2916 case 1: 2917 softc->flags |= CTL_FLAG_REAL_SYNC; 2918 break; 2919 default: 2920 retval = EINVAL; 2921 break; 2922 } 2923 mtx_unlock(&softc->ctl_lock); 2924 break; 2925 } 2926 case CTL_REALSYNC_GET: { 2927 int *syncstate; 2928 2929 syncstate = (int*)addr; 2930 2931 mtx_lock(&softc->ctl_lock); 2932 if (softc->flags & CTL_FLAG_REAL_SYNC) 2933 *syncstate = 1; 2934 else 2935 *syncstate = 0; 2936 mtx_unlock(&softc->ctl_lock); 2937 2938 break; 2939 } 2940 case CTL_SETSYNC: 2941 case CTL_GETSYNC: { 2942 struct ctl_sync_info *sync_info; 2943 struct ctl_lun *lun; 2944 2945 sync_info = (struct ctl_sync_info *)addr; 2946 2947 mtx_lock(&softc->ctl_lock); 2948 lun = softc->ctl_luns[sync_info->lun_id]; 2949 if (lun == NULL) { 2950 mtx_unlock(&softc->ctl_lock); 2951 sync_info->status = CTL_GS_SYNC_NO_LUN; 2952 } 2953 /* 2954 * Get or set the sync interval. We're not bounds checking 2955 * in the set case, hopefully the user won't do something 2956 * silly. 2957 */ 2958 mtx_lock(&lun->lun_lock); 2959 mtx_unlock(&softc->ctl_lock); 2960 if (cmd == CTL_GETSYNC) 2961 sync_info->sync_interval = lun->sync_interval; 2962 else 2963 lun->sync_interval = sync_info->sync_interval; 2964 mtx_unlock(&lun->lun_lock); 2965 2966 sync_info->status = CTL_GS_SYNC_OK; 2967 2968 break; 2969 } 2970 case CTL_GETSTATS: { 2971 struct ctl_stats *stats; 2972 struct ctl_lun *lun; 2973 int i; 2974 2975 stats = (struct ctl_stats *)addr; 2976 2977 if ((sizeof(struct ctl_lun_io_stats) * softc->num_luns) > 2978 stats->alloc_len) { 2979 stats->status = CTL_SS_NEED_MORE_SPACE; 2980 stats->num_luns = softc->num_luns; 2981 break; 2982 } 2983 /* 2984 * XXX KDM no locking here. If the LUN list changes, 2985 * things can blow up. 2986 */ 2987 for (i = 0, lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; 2988 i++, lun = STAILQ_NEXT(lun, links)) { 2989 retval = copyout(&lun->stats, &stats->lun_stats[i], 2990 sizeof(lun->stats)); 2991 if (retval != 0) 2992 break; 2993 } 2994 stats->num_luns = softc->num_luns; 2995 stats->fill_len = sizeof(struct ctl_lun_io_stats) * 2996 softc->num_luns; 2997 stats->status = CTL_SS_OK; 2998 #ifdef CTL_TIME_IO 2999 stats->flags = CTL_STATS_FLAG_TIME_VALID; 3000 #else 3001 stats->flags = CTL_STATS_FLAG_NONE; 3002 #endif 3003 getnanouptime(&stats->timestamp); 3004 break; 3005 } 3006 case CTL_ERROR_INJECT: { 3007 struct ctl_error_desc *err_desc, *new_err_desc; 3008 struct ctl_lun *lun; 3009 3010 err_desc = (struct ctl_error_desc *)addr; 3011 3012 new_err_desc = malloc(sizeof(*new_err_desc), M_CTL, 3013 M_WAITOK | M_ZERO); 3014 bcopy(err_desc, new_err_desc, sizeof(*new_err_desc)); 3015 3016 mtx_lock(&softc->ctl_lock); 3017 lun = softc->ctl_luns[err_desc->lun_id]; 3018 if (lun == NULL) { 3019 mtx_unlock(&softc->ctl_lock); 3020 free(new_err_desc, M_CTL); 3021 printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n", 3022 __func__, (uintmax_t)err_desc->lun_id); 3023 retval = EINVAL; 3024 break; 3025 } 3026 mtx_lock(&lun->lun_lock); 3027 mtx_unlock(&softc->ctl_lock); 3028 3029 /* 3030 * We could do some checking here to verify the validity 3031 * of the request, but given the complexity of error 3032 * injection requests, the checking logic would be fairly 3033 * complex. 3034 * 3035 * For now, if the request is invalid, it just won't get 3036 * executed and might get deleted. 3037 */ 3038 STAILQ_INSERT_TAIL(&lun->error_list, new_err_desc, links); 3039 3040 /* 3041 * XXX KDM check to make sure the serial number is unique, 3042 * in case we somehow manage to wrap. That shouldn't 3043 * happen for a very long time, but it's the right thing to 3044 * do. 3045 */ 3046 new_err_desc->serial = lun->error_serial; 3047 err_desc->serial = lun->error_serial; 3048 lun->error_serial++; 3049 3050 mtx_unlock(&lun->lun_lock); 3051 break; 3052 } 3053 case CTL_ERROR_INJECT_DELETE: { 3054 struct ctl_error_desc *delete_desc, *desc, *desc2; 3055 struct ctl_lun *lun; 3056 int delete_done; 3057 3058 delete_desc = (struct ctl_error_desc *)addr; 3059 delete_done = 0; 3060 3061 mtx_lock(&softc->ctl_lock); 3062 lun = softc->ctl_luns[delete_desc->lun_id]; 3063 if (lun == NULL) { 3064 mtx_unlock(&softc->ctl_lock); 3065 printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n", 3066 __func__, (uintmax_t)delete_desc->lun_id); 3067 retval = EINVAL; 3068 break; 3069 } 3070 mtx_lock(&lun->lun_lock); 3071 mtx_unlock(&softc->ctl_lock); 3072 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 3073 if (desc->serial != delete_desc->serial) 3074 continue; 3075 3076 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, 3077 links); 3078 free(desc, M_CTL); 3079 delete_done = 1; 3080 } 3081 mtx_unlock(&lun->lun_lock); 3082 if (delete_done == 0) { 3083 printf("%s: CTL_ERROR_INJECT_DELETE: can't find " 3084 "error serial %ju on LUN %u\n", __func__, 3085 delete_desc->serial, delete_desc->lun_id); 3086 retval = EINVAL; 3087 break; 3088 } 3089 break; 3090 } 3091 case CTL_DUMP_STRUCTS: { 3092 int i, j, k; 3093 struct ctl_port *port; 3094 struct ctl_frontend *fe; 3095 3096 mtx_lock(&softc->ctl_lock); 3097 printf("CTL Persistent Reservation information start:\n"); 3098 for (i = 0; i < CTL_MAX_LUNS; i++) { 3099 struct ctl_lun *lun; 3100 3101 lun = softc->ctl_luns[i]; 3102 3103 if ((lun == NULL) 3104 || ((lun->flags & CTL_LUN_DISABLED) != 0)) 3105 continue; 3106 3107 for (j = 0; j < (CTL_MAX_PORTS * 2); j++) { 3108 if (lun->pr_keys[j] == NULL) 3109 continue; 3110 for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){ 3111 if (lun->pr_keys[j][k] == 0) 3112 continue; 3113 printf(" LUN %d port %d iid %d key " 3114 "%#jx\n", i, j, k, 3115 (uintmax_t)lun->pr_keys[j][k]); 3116 } 3117 } 3118 } 3119 printf("CTL Persistent Reservation information end\n"); 3120 printf("CTL Ports:\n"); 3121 STAILQ_FOREACH(port, &softc->port_list, links) { 3122 printf(" Port %d '%s' Frontend '%s' Type %u pp %d vp %d WWNN " 3123 "%#jx WWPN %#jx\n", port->targ_port, port->port_name, 3124 port->frontend->name, port->port_type, 3125 port->physical_port, port->virtual_port, 3126 (uintmax_t)port->wwnn, (uintmax_t)port->wwpn); 3127 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 3128 if (port->wwpn_iid[j].in_use == 0 && 3129 port->wwpn_iid[j].wwpn == 0 && 3130 port->wwpn_iid[j].name == NULL) 3131 continue; 3132 3133 printf(" iid %u use %d WWPN %#jx '%s'\n", 3134 j, port->wwpn_iid[j].in_use, 3135 (uintmax_t)port->wwpn_iid[j].wwpn, 3136 port->wwpn_iid[j].name); 3137 } 3138 } 3139 printf("CTL Port information end\n"); 3140 mtx_unlock(&softc->ctl_lock); 3141 /* 3142 * XXX KDM calling this without a lock. We'd likely want 3143 * to drop the lock before calling the frontend's dump 3144 * routine anyway. 3145 */ 3146 printf("CTL Frontends:\n"); 3147 STAILQ_FOREACH(fe, &softc->fe_list, links) { 3148 printf(" Frontend '%s'\n", fe->name); 3149 if (fe->fe_dump != NULL) 3150 fe->fe_dump(); 3151 } 3152 printf("CTL Frontend information end\n"); 3153 break; 3154 } 3155 case CTL_LUN_REQ: { 3156 struct ctl_lun_req *lun_req; 3157 struct ctl_backend_driver *backend; 3158 3159 lun_req = (struct ctl_lun_req *)addr; 3160 3161 backend = ctl_backend_find(lun_req->backend); 3162 if (backend == NULL) { 3163 lun_req->status = CTL_LUN_ERROR; 3164 snprintf(lun_req->error_str, 3165 sizeof(lun_req->error_str), 3166 "Backend \"%s\" not found.", 3167 lun_req->backend); 3168 break; 3169 } 3170 if (lun_req->num_be_args > 0) { 3171 lun_req->kern_be_args = ctl_copyin_args( 3172 lun_req->num_be_args, 3173 lun_req->be_args, 3174 lun_req->error_str, 3175 sizeof(lun_req->error_str)); 3176 if (lun_req->kern_be_args == NULL) { 3177 lun_req->status = CTL_LUN_ERROR; 3178 break; 3179 } 3180 } 3181 3182 retval = backend->ioctl(dev, cmd, addr, flag, td); 3183 3184 if (lun_req->num_be_args > 0) { 3185 ctl_copyout_args(lun_req->num_be_args, 3186 lun_req->kern_be_args); 3187 ctl_free_args(lun_req->num_be_args, 3188 lun_req->kern_be_args); 3189 } 3190 break; 3191 } 3192 case CTL_LUN_LIST: { 3193 struct sbuf *sb; 3194 struct ctl_lun *lun; 3195 struct ctl_lun_list *list; 3196 struct ctl_option *opt; 3197 3198 list = (struct ctl_lun_list *)addr; 3199 3200 /* 3201 * Allocate a fixed length sbuf here, based on the length 3202 * of the user's buffer. We could allocate an auto-extending 3203 * buffer, and then tell the user how much larger our 3204 * amount of data is than his buffer, but that presents 3205 * some problems: 3206 * 3207 * 1. The sbuf(9) routines use a blocking malloc, and so 3208 * we can't hold a lock while calling them with an 3209 * auto-extending buffer. 3210 * 3211 * 2. There is not currently a LUN reference counting 3212 * mechanism, outside of outstanding transactions on 3213 * the LUN's OOA queue. So a LUN could go away on us 3214 * while we're getting the LUN number, backend-specific 3215 * information, etc. Thus, given the way things 3216 * currently work, we need to hold the CTL lock while 3217 * grabbing LUN information. 3218 * 3219 * So, from the user's standpoint, the best thing to do is 3220 * allocate what he thinks is a reasonable buffer length, 3221 * and then if he gets a CTL_LUN_LIST_NEED_MORE_SPACE error, 3222 * double the buffer length and try again. (And repeat 3223 * that until he succeeds.) 3224 */ 3225 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 3226 if (sb == NULL) { 3227 list->status = CTL_LUN_LIST_ERROR; 3228 snprintf(list->error_str, sizeof(list->error_str), 3229 "Unable to allocate %d bytes for LUN list", 3230 list->alloc_len); 3231 break; 3232 } 3233 3234 sbuf_printf(sb, "<ctllunlist>\n"); 3235 3236 mtx_lock(&softc->ctl_lock); 3237 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3238 mtx_lock(&lun->lun_lock); 3239 retval = sbuf_printf(sb, "<lun id=\"%ju\">\n", 3240 (uintmax_t)lun->lun); 3241 3242 /* 3243 * Bail out as soon as we see that we've overfilled 3244 * the buffer. 3245 */ 3246 if (retval != 0) 3247 break; 3248 3249 retval = sbuf_printf(sb, "\t<backend_type>%s" 3250 "</backend_type>\n", 3251 (lun->backend == NULL) ? "none" : 3252 lun->backend->name); 3253 3254 if (retval != 0) 3255 break; 3256 3257 retval = sbuf_printf(sb, "\t<lun_type>%d</lun_type>\n", 3258 lun->be_lun->lun_type); 3259 3260 if (retval != 0) 3261 break; 3262 3263 if (lun->backend == NULL) { 3264 retval = sbuf_printf(sb, "</lun>\n"); 3265 if (retval != 0) 3266 break; 3267 continue; 3268 } 3269 3270 retval = sbuf_printf(sb, "\t<size>%ju</size>\n", 3271 (lun->be_lun->maxlba > 0) ? 3272 lun->be_lun->maxlba + 1 : 0); 3273 3274 if (retval != 0) 3275 break; 3276 3277 retval = sbuf_printf(sb, "\t<blocksize>%u</blocksize>\n", 3278 lun->be_lun->blocksize); 3279 3280 if (retval != 0) 3281 break; 3282 3283 retval = sbuf_printf(sb, "\t<serial_number>"); 3284 3285 if (retval != 0) 3286 break; 3287 3288 retval = ctl_sbuf_printf_esc(sb, 3289 lun->be_lun->serial_num, 3290 sizeof(lun->be_lun->serial_num)); 3291 3292 if (retval != 0) 3293 break; 3294 3295 retval = sbuf_printf(sb, "</serial_number>\n"); 3296 3297 if (retval != 0) 3298 break; 3299 3300 retval = sbuf_printf(sb, "\t<device_id>"); 3301 3302 if (retval != 0) 3303 break; 3304 3305 retval = ctl_sbuf_printf_esc(sb, 3306 lun->be_lun->device_id, 3307 sizeof(lun->be_lun->device_id)); 3308 3309 if (retval != 0) 3310 break; 3311 3312 retval = sbuf_printf(sb, "</device_id>\n"); 3313 3314 if (retval != 0) 3315 break; 3316 3317 if (lun->backend->lun_info != NULL) { 3318 retval = lun->backend->lun_info(lun->be_lun->be_lun, sb); 3319 if (retval != 0) 3320 break; 3321 } 3322 STAILQ_FOREACH(opt, &lun->be_lun->options, links) { 3323 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n", 3324 opt->name, opt->value, opt->name); 3325 if (retval != 0) 3326 break; 3327 } 3328 3329 retval = sbuf_printf(sb, "</lun>\n"); 3330 3331 if (retval != 0) 3332 break; 3333 mtx_unlock(&lun->lun_lock); 3334 } 3335 if (lun != NULL) 3336 mtx_unlock(&lun->lun_lock); 3337 mtx_unlock(&softc->ctl_lock); 3338 3339 if ((retval != 0) 3340 || ((retval = sbuf_printf(sb, "</ctllunlist>\n")) != 0)) { 3341 retval = 0; 3342 sbuf_delete(sb); 3343 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3344 snprintf(list->error_str, sizeof(list->error_str), 3345 "Out of space, %d bytes is too small", 3346 list->alloc_len); 3347 break; 3348 } 3349 3350 sbuf_finish(sb); 3351 3352 retval = copyout(sbuf_data(sb), list->lun_xml, 3353 sbuf_len(sb) + 1); 3354 3355 list->fill_len = sbuf_len(sb) + 1; 3356 list->status = CTL_LUN_LIST_OK; 3357 sbuf_delete(sb); 3358 break; 3359 } 3360 case CTL_ISCSI: { 3361 struct ctl_iscsi *ci; 3362 struct ctl_frontend *fe; 3363 3364 ci = (struct ctl_iscsi *)addr; 3365 3366 fe = ctl_frontend_find("iscsi"); 3367 if (fe == NULL) { 3368 ci->status = CTL_ISCSI_ERROR; 3369 snprintf(ci->error_str, sizeof(ci->error_str), 3370 "Frontend \"iscsi\" not found."); 3371 break; 3372 } 3373 3374 retval = fe->ioctl(dev, cmd, addr, flag, td); 3375 break; 3376 } 3377 case CTL_PORT_REQ: { 3378 struct ctl_req *req; 3379 struct ctl_frontend *fe; 3380 3381 req = (struct ctl_req *)addr; 3382 3383 fe = ctl_frontend_find(req->driver); 3384 if (fe == NULL) { 3385 req->status = CTL_LUN_ERROR; 3386 snprintf(req->error_str, sizeof(req->error_str), 3387 "Frontend \"%s\" not found.", req->driver); 3388 break; 3389 } 3390 if (req->num_args > 0) { 3391 req->kern_args = ctl_copyin_args(req->num_args, 3392 req->args, req->error_str, sizeof(req->error_str)); 3393 if (req->kern_args == NULL) { 3394 req->status = CTL_LUN_ERROR; 3395 break; 3396 } 3397 } 3398 3399 retval = fe->ioctl(dev, cmd, addr, flag, td); 3400 3401 if (req->num_args > 0) { 3402 ctl_copyout_args(req->num_args, req->kern_args); 3403 ctl_free_args(req->num_args, req->kern_args); 3404 } 3405 break; 3406 } 3407 case CTL_PORT_LIST: { 3408 struct sbuf *sb; 3409 struct ctl_port *port; 3410 struct ctl_lun_list *list; 3411 struct ctl_option *opt; 3412 int j; 3413 uint32_t plun; 3414 3415 list = (struct ctl_lun_list *)addr; 3416 3417 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 3418 if (sb == NULL) { 3419 list->status = CTL_LUN_LIST_ERROR; 3420 snprintf(list->error_str, sizeof(list->error_str), 3421 "Unable to allocate %d bytes for LUN list", 3422 list->alloc_len); 3423 break; 3424 } 3425 3426 sbuf_printf(sb, "<ctlportlist>\n"); 3427 3428 mtx_lock(&softc->ctl_lock); 3429 STAILQ_FOREACH(port, &softc->port_list, links) { 3430 retval = sbuf_printf(sb, "<targ_port id=\"%ju\">\n", 3431 (uintmax_t)port->targ_port); 3432 3433 /* 3434 * Bail out as soon as we see that we've overfilled 3435 * the buffer. 3436 */ 3437 if (retval != 0) 3438 break; 3439 3440 retval = sbuf_printf(sb, "\t<frontend_type>%s" 3441 "</frontend_type>\n", port->frontend->name); 3442 if (retval != 0) 3443 break; 3444 3445 retval = sbuf_printf(sb, "\t<port_type>%d</port_type>\n", 3446 port->port_type); 3447 if (retval != 0) 3448 break; 3449 3450 retval = sbuf_printf(sb, "\t<online>%s</online>\n", 3451 (port->status & CTL_PORT_STATUS_ONLINE) ? "YES" : "NO"); 3452 if (retval != 0) 3453 break; 3454 3455 retval = sbuf_printf(sb, "\t<port_name>%s</port_name>\n", 3456 port->port_name); 3457 if (retval != 0) 3458 break; 3459 3460 retval = sbuf_printf(sb, "\t<physical_port>%d</physical_port>\n", 3461 port->physical_port); 3462 if (retval != 0) 3463 break; 3464 3465 retval = sbuf_printf(sb, "\t<virtual_port>%d</virtual_port>\n", 3466 port->virtual_port); 3467 if (retval != 0) 3468 break; 3469 3470 if (port->target_devid != NULL) { 3471 sbuf_printf(sb, "\t<target>"); 3472 ctl_id_sbuf(port->target_devid, sb); 3473 sbuf_printf(sb, "</target>\n"); 3474 } 3475 3476 if (port->port_devid != NULL) { 3477 sbuf_printf(sb, "\t<port>"); 3478 ctl_id_sbuf(port->port_devid, sb); 3479 sbuf_printf(sb, "</port>\n"); 3480 } 3481 3482 if (port->port_info != NULL) { 3483 retval = port->port_info(port->onoff_arg, sb); 3484 if (retval != 0) 3485 break; 3486 } 3487 STAILQ_FOREACH(opt, &port->options, links) { 3488 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n", 3489 opt->name, opt->value, opt->name); 3490 if (retval != 0) 3491 break; 3492 } 3493 3494 if (port->lun_map != NULL) { 3495 sbuf_printf(sb, "\t<lun_map>on</lun_map>\n"); 3496 for (j = 0; j < CTL_MAX_LUNS; j++) { 3497 plun = ctl_lun_map_from_port(port, j); 3498 if (plun >= CTL_MAX_LUNS) 3499 continue; 3500 sbuf_printf(sb, 3501 "\t<lun id=\"%u\">%u</lun>\n", 3502 j, plun); 3503 } 3504 } 3505 3506 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 3507 if (port->wwpn_iid[j].in_use == 0 || 3508 (port->wwpn_iid[j].wwpn == 0 && 3509 port->wwpn_iid[j].name == NULL)) 3510 continue; 3511 3512 if (port->wwpn_iid[j].name != NULL) 3513 retval = sbuf_printf(sb, 3514 "\t<initiator id=\"%u\">%s</initiator>\n", 3515 j, port->wwpn_iid[j].name); 3516 else 3517 retval = sbuf_printf(sb, 3518 "\t<initiator id=\"%u\">naa.%08jx</initiator>\n", 3519 j, port->wwpn_iid[j].wwpn); 3520 if (retval != 0) 3521 break; 3522 } 3523 if (retval != 0) 3524 break; 3525 3526 retval = sbuf_printf(sb, "</targ_port>\n"); 3527 if (retval != 0) 3528 break; 3529 } 3530 mtx_unlock(&softc->ctl_lock); 3531 3532 if ((retval != 0) 3533 || ((retval = sbuf_printf(sb, "</ctlportlist>\n")) != 0)) { 3534 retval = 0; 3535 sbuf_delete(sb); 3536 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3537 snprintf(list->error_str, sizeof(list->error_str), 3538 "Out of space, %d bytes is too small", 3539 list->alloc_len); 3540 break; 3541 } 3542 3543 sbuf_finish(sb); 3544 3545 retval = copyout(sbuf_data(sb), list->lun_xml, 3546 sbuf_len(sb) + 1); 3547 3548 list->fill_len = sbuf_len(sb) + 1; 3549 list->status = CTL_LUN_LIST_OK; 3550 sbuf_delete(sb); 3551 break; 3552 } 3553 case CTL_LUN_MAP: { 3554 struct ctl_lun_map *lm = (struct ctl_lun_map *)addr; 3555 struct ctl_port *port; 3556 3557 mtx_lock(&softc->ctl_lock); 3558 if (lm->port >= CTL_MAX_PORTS || 3559 (port = softc->ctl_ports[lm->port]) == NULL) { 3560 mtx_unlock(&softc->ctl_lock); 3561 return (ENXIO); 3562 } 3563 if (lm->plun < CTL_MAX_LUNS) { 3564 if (lm->lun == UINT32_MAX) 3565 retval = ctl_lun_map_unset(port, lm->plun); 3566 else if (lm->lun < CTL_MAX_LUNS && 3567 softc->ctl_luns[lm->lun] != NULL) 3568 retval = ctl_lun_map_set(port, lm->plun, lm->lun); 3569 else { 3570 mtx_unlock(&softc->ctl_lock); 3571 return (ENXIO); 3572 } 3573 } else if (lm->plun == UINT32_MAX) { 3574 if (lm->lun == UINT32_MAX) 3575 retval = ctl_lun_map_deinit(port); 3576 else 3577 retval = ctl_lun_map_init(port); 3578 } else { 3579 mtx_unlock(&softc->ctl_lock); 3580 return (ENXIO); 3581 } 3582 mtx_unlock(&softc->ctl_lock); 3583 break; 3584 } 3585 default: { 3586 /* XXX KDM should we fix this? */ 3587 #if 0 3588 struct ctl_backend_driver *backend; 3589 unsigned int type; 3590 int found; 3591 3592 found = 0; 3593 3594 /* 3595 * We encode the backend type as the ioctl type for backend 3596 * ioctls. So parse it out here, and then search for a 3597 * backend of this type. 3598 */ 3599 type = _IOC_TYPE(cmd); 3600 3601 STAILQ_FOREACH(backend, &softc->be_list, links) { 3602 if (backend->type == type) { 3603 found = 1; 3604 break; 3605 } 3606 } 3607 if (found == 0) { 3608 printf("ctl: unknown ioctl command %#lx or backend " 3609 "%d\n", cmd, type); 3610 retval = EINVAL; 3611 break; 3612 } 3613 retval = backend->ioctl(dev, cmd, addr, flag, td); 3614 #endif 3615 retval = ENOTTY; 3616 break; 3617 } 3618 } 3619 return (retval); 3620 } 3621 3622 uint32_t 3623 ctl_get_initindex(struct ctl_nexus *nexus) 3624 { 3625 if (nexus->targ_port < CTL_MAX_PORTS) 3626 return (nexus->initid.id + 3627 (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); 3628 else 3629 return (nexus->initid.id + 3630 ((nexus->targ_port - CTL_MAX_PORTS) * 3631 CTL_MAX_INIT_PER_PORT)); 3632 } 3633 3634 uint32_t 3635 ctl_get_resindex(struct ctl_nexus *nexus) 3636 { 3637 return (nexus->initid.id + (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); 3638 } 3639 3640 uint32_t 3641 ctl_port_idx(int port_num) 3642 { 3643 if (port_num < CTL_MAX_PORTS) 3644 return(port_num); 3645 else 3646 return(port_num - CTL_MAX_PORTS); 3647 } 3648 3649 int 3650 ctl_lun_map_init(struct ctl_port *port) 3651 { 3652 uint32_t i; 3653 3654 if (port->lun_map == NULL) 3655 port->lun_map = malloc(sizeof(uint32_t) * CTL_MAX_LUNS, 3656 M_CTL, M_NOWAIT); 3657 if (port->lun_map == NULL) 3658 return (ENOMEM); 3659 for (i = 0; i < CTL_MAX_LUNS; i++) 3660 port->lun_map[i] = UINT32_MAX; 3661 return (0); 3662 } 3663 3664 int 3665 ctl_lun_map_deinit(struct ctl_port *port) 3666 { 3667 3668 if (port->lun_map == NULL) 3669 return (0); 3670 free(port->lun_map, M_CTL); 3671 port->lun_map = NULL; 3672 return (0); 3673 } 3674 3675 int 3676 ctl_lun_map_set(struct ctl_port *port, uint32_t plun, uint32_t glun) 3677 { 3678 int status; 3679 3680 if (port->lun_map == NULL) { 3681 status = ctl_lun_map_init(port); 3682 if (status != 0) 3683 return (status); 3684 } 3685 port->lun_map[plun] = glun; 3686 return (0); 3687 } 3688 3689 int 3690 ctl_lun_map_unset(struct ctl_port *port, uint32_t plun) 3691 { 3692 3693 if (port->lun_map == NULL) 3694 return (0); 3695 port->lun_map[plun] = UINT32_MAX; 3696 return (0); 3697 } 3698 3699 int 3700 ctl_lun_map_unsetg(struct ctl_port *port, uint32_t glun) 3701 { 3702 int i; 3703 3704 if (port->lun_map == NULL) 3705 return (0); 3706 for (i = 0; i < CTL_MAX_LUNS; i++) { 3707 if (port->lun_map[i] == glun) 3708 port->lun_map[i] = UINT32_MAX; 3709 } 3710 return (0); 3711 } 3712 3713 uint32_t 3714 ctl_lun_map_from_port(struct ctl_port *port, uint32_t lun_id) 3715 { 3716 3717 if (port == NULL) 3718 return (UINT32_MAX); 3719 if (port->lun_map == NULL || lun_id >= CTL_MAX_LUNS) 3720 return (lun_id); 3721 return (port->lun_map[lun_id]); 3722 } 3723 3724 uint32_t 3725 ctl_lun_map_to_port(struct ctl_port *port, uint32_t lun_id) 3726 { 3727 uint32_t i; 3728 3729 if (port == NULL) 3730 return (UINT32_MAX); 3731 if (port->lun_map == NULL) 3732 return (lun_id); 3733 for (i = 0; i < CTL_MAX_LUNS; i++) { 3734 if (port->lun_map[i] == lun_id) 3735 return (i); 3736 } 3737 return (UINT32_MAX); 3738 } 3739 3740 static struct ctl_port * 3741 ctl_io_port(struct ctl_io_hdr *io_hdr) 3742 { 3743 int port_num; 3744 3745 port_num = io_hdr->nexus.targ_port; 3746 return (control_softc->ctl_ports[ctl_port_idx(port_num)]); 3747 } 3748 3749 /* 3750 * Note: This only works for bitmask sizes that are at least 32 bits, and 3751 * that are a power of 2. 3752 */ 3753 int 3754 ctl_ffz(uint32_t *mask, uint32_t size) 3755 { 3756 uint32_t num_chunks, num_pieces; 3757 int i, j; 3758 3759 num_chunks = (size >> 5); 3760 if (num_chunks == 0) 3761 num_chunks++; 3762 num_pieces = MIN((sizeof(uint32_t) * 8), size); 3763 3764 for (i = 0; i < num_chunks; i++) { 3765 for (j = 0; j < num_pieces; j++) { 3766 if ((mask[i] & (1 << j)) == 0) 3767 return ((i << 5) + j); 3768 } 3769 } 3770 3771 return (-1); 3772 } 3773 3774 int 3775 ctl_set_mask(uint32_t *mask, uint32_t bit) 3776 { 3777 uint32_t chunk, piece; 3778 3779 chunk = bit >> 5; 3780 piece = bit % (sizeof(uint32_t) * 8); 3781 3782 if ((mask[chunk] & (1 << piece)) != 0) 3783 return (-1); 3784 else 3785 mask[chunk] |= (1 << piece); 3786 3787 return (0); 3788 } 3789 3790 int 3791 ctl_clear_mask(uint32_t *mask, uint32_t bit) 3792 { 3793 uint32_t chunk, piece; 3794 3795 chunk = bit >> 5; 3796 piece = bit % (sizeof(uint32_t) * 8); 3797 3798 if ((mask[chunk] & (1 << piece)) == 0) 3799 return (-1); 3800 else 3801 mask[chunk] &= ~(1 << piece); 3802 3803 return (0); 3804 } 3805 3806 int 3807 ctl_is_set(uint32_t *mask, uint32_t bit) 3808 { 3809 uint32_t chunk, piece; 3810 3811 chunk = bit >> 5; 3812 piece = bit % (sizeof(uint32_t) * 8); 3813 3814 if ((mask[chunk] & (1 << piece)) == 0) 3815 return (0); 3816 else 3817 return (1); 3818 } 3819 3820 static uint64_t 3821 ctl_get_prkey(struct ctl_lun *lun, uint32_t residx) 3822 { 3823 uint64_t *t; 3824 3825 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3826 if (t == NULL) 3827 return (0); 3828 return (t[residx % CTL_MAX_INIT_PER_PORT]); 3829 } 3830 3831 static void 3832 ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx) 3833 { 3834 uint64_t *t; 3835 3836 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3837 if (t == NULL) 3838 return; 3839 t[residx % CTL_MAX_INIT_PER_PORT] = 0; 3840 } 3841 3842 static void 3843 ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx) 3844 { 3845 uint64_t *p; 3846 u_int i; 3847 3848 i = residx/CTL_MAX_INIT_PER_PORT; 3849 if (lun->pr_keys[i] != NULL) 3850 return; 3851 mtx_unlock(&lun->lun_lock); 3852 p = malloc(sizeof(uint64_t) * CTL_MAX_INIT_PER_PORT, M_CTL, 3853 M_WAITOK | M_ZERO); 3854 mtx_lock(&lun->lun_lock); 3855 if (lun->pr_keys[i] == NULL) 3856 lun->pr_keys[i] = p; 3857 else 3858 free(p, M_CTL); 3859 } 3860 3861 static void 3862 ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key) 3863 { 3864 uint64_t *t; 3865 3866 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3867 KASSERT(t != NULL, ("prkey %d is not allocated", residx)); 3868 t[residx % CTL_MAX_INIT_PER_PORT] = key; 3869 } 3870 3871 /* 3872 * ctl_softc, pool_name, total_ctl_io are passed in. 3873 * npool is passed out. 3874 */ 3875 int 3876 ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name, 3877 uint32_t total_ctl_io, void **npool) 3878 { 3879 #ifdef IO_POOLS 3880 struct ctl_io_pool *pool; 3881 3882 pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL, 3883 M_NOWAIT | M_ZERO); 3884 if (pool == NULL) 3885 return (ENOMEM); 3886 3887 snprintf(pool->name, sizeof(pool->name), "CTL IO %s", pool_name); 3888 pool->ctl_softc = ctl_softc; 3889 pool->zone = uma_zsecond_create(pool->name, NULL, 3890 NULL, NULL, NULL, ctl_softc->io_zone); 3891 /* uma_prealloc(pool->zone, total_ctl_io); */ 3892 3893 *npool = pool; 3894 #else 3895 *npool = ctl_softc->io_zone; 3896 #endif 3897 return (0); 3898 } 3899 3900 void 3901 ctl_pool_free(struct ctl_io_pool *pool) 3902 { 3903 3904 if (pool == NULL) 3905 return; 3906 3907 #ifdef IO_POOLS 3908 uma_zdestroy(pool->zone); 3909 free(pool, M_CTL); 3910 #endif 3911 } 3912 3913 union ctl_io * 3914 ctl_alloc_io(void *pool_ref) 3915 { 3916 union ctl_io *io; 3917 #ifdef IO_POOLS 3918 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3919 3920 io = uma_zalloc(pool->zone, M_WAITOK); 3921 #else 3922 io = uma_zalloc((uma_zone_t)pool_ref, M_WAITOK); 3923 #endif 3924 if (io != NULL) 3925 io->io_hdr.pool = pool_ref; 3926 return (io); 3927 } 3928 3929 union ctl_io * 3930 ctl_alloc_io_nowait(void *pool_ref) 3931 { 3932 union ctl_io *io; 3933 #ifdef IO_POOLS 3934 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3935 3936 io = uma_zalloc(pool->zone, M_NOWAIT); 3937 #else 3938 io = uma_zalloc((uma_zone_t)pool_ref, M_NOWAIT); 3939 #endif 3940 if (io != NULL) 3941 io->io_hdr.pool = pool_ref; 3942 return (io); 3943 } 3944 3945 void 3946 ctl_free_io(union ctl_io *io) 3947 { 3948 #ifdef IO_POOLS 3949 struct ctl_io_pool *pool; 3950 #endif 3951 3952 if (io == NULL) 3953 return; 3954 3955 #ifdef IO_POOLS 3956 pool = (struct ctl_io_pool *)io->io_hdr.pool; 3957 uma_zfree(pool->zone, io); 3958 #else 3959 uma_zfree((uma_zone_t)io->io_hdr.pool, io); 3960 #endif 3961 } 3962 3963 void 3964 ctl_zero_io(union ctl_io *io) 3965 { 3966 void *pool_ref; 3967 3968 if (io == NULL) 3969 return; 3970 3971 /* 3972 * May need to preserve linked list pointers at some point too. 3973 */ 3974 pool_ref = io->io_hdr.pool; 3975 memset(io, 0, sizeof(*io)); 3976 io->io_hdr.pool = pool_ref; 3977 } 3978 3979 /* 3980 * This routine is currently used for internal copies of ctl_ios that need 3981 * to persist for some reason after we've already returned status to the 3982 * FETD. (Thus the flag set.) 3983 * 3984 * XXX XXX 3985 * Note that this makes a blind copy of all fields in the ctl_io, except 3986 * for the pool reference. This includes any memory that has been 3987 * allocated! That memory will no longer be valid after done has been 3988 * called, so this would be VERY DANGEROUS for command that actually does 3989 * any reads or writes. Right now (11/7/2005), this is only used for immediate 3990 * start and stop commands, which don't transfer any data, so this is not a 3991 * problem. If it is used for anything else, the caller would also need to 3992 * allocate data buffer space and this routine would need to be modified to 3993 * copy the data buffer(s) as well. 3994 */ 3995 void 3996 ctl_copy_io(union ctl_io *src, union ctl_io *dest) 3997 { 3998 void *pool_ref; 3999 4000 if ((src == NULL) 4001 || (dest == NULL)) 4002 return; 4003 4004 /* 4005 * May need to preserve linked list pointers at some point too. 4006 */ 4007 pool_ref = dest->io_hdr.pool; 4008 4009 memcpy(dest, src, MIN(sizeof(*src), sizeof(*dest))); 4010 4011 dest->io_hdr.pool = pool_ref; 4012 /* 4013 * We need to know that this is an internal copy, and doesn't need 4014 * to get passed back to the FETD that allocated it. 4015 */ 4016 dest->io_hdr.flags |= CTL_FLAG_INT_COPY; 4017 } 4018 4019 int 4020 ctl_expand_number(const char *buf, uint64_t *num) 4021 { 4022 char *endptr; 4023 uint64_t number; 4024 unsigned shift; 4025 4026 number = strtoq(buf, &endptr, 0); 4027 4028 switch (tolower((unsigned char)*endptr)) { 4029 case 'e': 4030 shift = 60; 4031 break; 4032 case 'p': 4033 shift = 50; 4034 break; 4035 case 't': 4036 shift = 40; 4037 break; 4038 case 'g': 4039 shift = 30; 4040 break; 4041 case 'm': 4042 shift = 20; 4043 break; 4044 case 'k': 4045 shift = 10; 4046 break; 4047 case 'b': 4048 case '\0': /* No unit. */ 4049 *num = number; 4050 return (0); 4051 default: 4052 /* Unrecognized unit. */ 4053 return (-1); 4054 } 4055 4056 if ((number << shift) >> shift != number) { 4057 /* Overflow */ 4058 return (-1); 4059 } 4060 *num = number << shift; 4061 return (0); 4062 } 4063 4064 4065 /* 4066 * This routine could be used in the future to load default and/or saved 4067 * mode page parameters for a particuar lun. 4068 */ 4069 static int 4070 ctl_init_page_index(struct ctl_lun *lun) 4071 { 4072 int i; 4073 struct ctl_page_index *page_index; 4074 const char *value; 4075 uint64_t ival; 4076 4077 memcpy(&lun->mode_pages.index, page_index_template, 4078 sizeof(page_index_template)); 4079 4080 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 4081 4082 page_index = &lun->mode_pages.index[i]; 4083 /* 4084 * If this is a disk-only mode page, there's no point in 4085 * setting it up. For some pages, we have to have some 4086 * basic information about the disk in order to calculate the 4087 * mode page data. 4088 */ 4089 if ((lun->be_lun->lun_type != T_DIRECT) 4090 && (page_index->page_flags & CTL_PAGE_FLAG_DISK_ONLY)) 4091 continue; 4092 4093 switch (page_index->page_code & SMPH_PC_MASK) { 4094 case SMS_RW_ERROR_RECOVERY_PAGE: { 4095 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 4096 panic("subpage is incorrect!"); 4097 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT], 4098 &rw_er_page_default, 4099 sizeof(rw_er_page_default)); 4100 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CHANGEABLE], 4101 &rw_er_page_changeable, 4102 sizeof(rw_er_page_changeable)); 4103 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_DEFAULT], 4104 &rw_er_page_default, 4105 sizeof(rw_er_page_default)); 4106 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_SAVED], 4107 &rw_er_page_default, 4108 sizeof(rw_er_page_default)); 4109 page_index->page_data = 4110 (uint8_t *)lun->mode_pages.rw_er_page; 4111 break; 4112 } 4113 case SMS_FORMAT_DEVICE_PAGE: { 4114 struct scsi_format_page *format_page; 4115 4116 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 4117 panic("subpage is incorrect!"); 4118 4119 /* 4120 * Sectors per track are set above. Bytes per 4121 * sector need to be set here on a per-LUN basis. 4122 */ 4123 memcpy(&lun->mode_pages.format_page[CTL_PAGE_CURRENT], 4124 &format_page_default, 4125 sizeof(format_page_default)); 4126 memcpy(&lun->mode_pages.format_page[ 4127 CTL_PAGE_CHANGEABLE], &format_page_changeable, 4128 sizeof(format_page_changeable)); 4129 memcpy(&lun->mode_pages.format_page[CTL_PAGE_DEFAULT], 4130 &format_page_default, 4131 sizeof(format_page_default)); 4132 memcpy(&lun->mode_pages.format_page[CTL_PAGE_SAVED], 4133 &format_page_default, 4134 sizeof(format_page_default)); 4135 4136 format_page = &lun->mode_pages.format_page[ 4137 CTL_PAGE_CURRENT]; 4138 scsi_ulto2b(lun->be_lun->blocksize, 4139 format_page->bytes_per_sector); 4140 4141 format_page = &lun->mode_pages.format_page[ 4142 CTL_PAGE_DEFAULT]; 4143 scsi_ulto2b(lun->be_lun->blocksize, 4144 format_page->bytes_per_sector); 4145 4146 format_page = &lun->mode_pages.format_page[ 4147 CTL_PAGE_SAVED]; 4148 scsi_ulto2b(lun->be_lun->blocksize, 4149 format_page->bytes_per_sector); 4150 4151 page_index->page_data = 4152 (uint8_t *)lun->mode_pages.format_page; 4153 break; 4154 } 4155 case SMS_RIGID_DISK_PAGE: { 4156 struct scsi_rigid_disk_page *rigid_disk_page; 4157 uint32_t sectors_per_cylinder; 4158 uint64_t cylinders; 4159 #ifndef __XSCALE__ 4160 int shift; 4161 #endif /* !__XSCALE__ */ 4162 4163 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 4164 panic("invalid subpage value %d", 4165 page_index->subpage); 4166 4167 /* 4168 * Rotation rate and sectors per track are set 4169 * above. We calculate the cylinders here based on 4170 * capacity. Due to the number of heads and 4171 * sectors per track we're using, smaller arrays 4172 * may turn out to have 0 cylinders. Linux and 4173 * FreeBSD don't pay attention to these mode pages 4174 * to figure out capacity, but Solaris does. It 4175 * seems to deal with 0 cylinders just fine, and 4176 * works out a fake geometry based on the capacity. 4177 */ 4178 memcpy(&lun->mode_pages.rigid_disk_page[ 4179 CTL_PAGE_DEFAULT], &rigid_disk_page_default, 4180 sizeof(rigid_disk_page_default)); 4181 memcpy(&lun->mode_pages.rigid_disk_page[ 4182 CTL_PAGE_CHANGEABLE],&rigid_disk_page_changeable, 4183 sizeof(rigid_disk_page_changeable)); 4184 4185 sectors_per_cylinder = CTL_DEFAULT_SECTORS_PER_TRACK * 4186 CTL_DEFAULT_HEADS; 4187 4188 /* 4189 * The divide method here will be more accurate, 4190 * probably, but results in floating point being 4191 * used in the kernel on i386 (__udivdi3()). On the 4192 * XScale, though, __udivdi3() is implemented in 4193 * software. 4194 * 4195 * The shift method for cylinder calculation is 4196 * accurate if sectors_per_cylinder is a power of 4197 * 2. Otherwise it might be slightly off -- you 4198 * might have a bit of a truncation problem. 4199 */ 4200 #ifdef __XSCALE__ 4201 cylinders = (lun->be_lun->maxlba + 1) / 4202 sectors_per_cylinder; 4203 #else 4204 for (shift = 31; shift > 0; shift--) { 4205 if (sectors_per_cylinder & (1 << shift)) 4206 break; 4207 } 4208 cylinders = (lun->be_lun->maxlba + 1) >> shift; 4209 #endif 4210 4211 /* 4212 * We've basically got 3 bytes, or 24 bits for the 4213 * cylinder size in the mode page. If we're over, 4214 * just round down to 2^24. 4215 */ 4216 if (cylinders > 0xffffff) 4217 cylinders = 0xffffff; 4218 4219 rigid_disk_page = &lun->mode_pages.rigid_disk_page[ 4220 CTL_PAGE_DEFAULT]; 4221 scsi_ulto3b(cylinders, rigid_disk_page->cylinders); 4222 4223 if ((value = ctl_get_opt(&lun->be_lun->options, 4224 "rpm")) != NULL) { 4225 scsi_ulto2b(strtol(value, NULL, 0), 4226 rigid_disk_page->rotation_rate); 4227 } 4228 4229 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_CURRENT], 4230 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 4231 sizeof(rigid_disk_page_default)); 4232 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_SAVED], 4233 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 4234 sizeof(rigid_disk_page_default)); 4235 4236 page_index->page_data = 4237 (uint8_t *)lun->mode_pages.rigid_disk_page; 4238 break; 4239 } 4240 case SMS_CACHING_PAGE: { 4241 struct scsi_caching_page *caching_page; 4242 4243 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 4244 panic("invalid subpage value %d", 4245 page_index->subpage); 4246 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT], 4247 &caching_page_default, 4248 sizeof(caching_page_default)); 4249 memcpy(&lun->mode_pages.caching_page[ 4250 CTL_PAGE_CHANGEABLE], &caching_page_changeable, 4251 sizeof(caching_page_changeable)); 4252 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_SAVED], 4253 &caching_page_default, 4254 sizeof(caching_page_default)); 4255 caching_page = &lun->mode_pages.caching_page[ 4256 CTL_PAGE_SAVED]; 4257 value = ctl_get_opt(&lun->be_lun->options, "writecache"); 4258 if (value != NULL && strcmp(value, "off") == 0) 4259 caching_page->flags1 &= ~SCP_WCE; 4260 value = ctl_get_opt(&lun->be_lun->options, "readcache"); 4261 if (value != NULL && strcmp(value, "off") == 0) 4262 caching_page->flags1 |= SCP_RCD; 4263 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT], 4264 &lun->mode_pages.caching_page[CTL_PAGE_SAVED], 4265 sizeof(caching_page_default)); 4266 page_index->page_data = 4267 (uint8_t *)lun->mode_pages.caching_page; 4268 break; 4269 } 4270 case SMS_CONTROL_MODE_PAGE: { 4271 struct scsi_control_page *control_page; 4272 4273 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 4274 panic("invalid subpage value %d", 4275 page_index->subpage); 4276 4277 memcpy(&lun->mode_pages.control_page[CTL_PAGE_DEFAULT], 4278 &control_page_default, 4279 sizeof(control_page_default)); 4280 memcpy(&lun->mode_pages.control_page[ 4281 CTL_PAGE_CHANGEABLE], &control_page_changeable, 4282 sizeof(control_page_changeable)); 4283 memcpy(&lun->mode_pages.control_page[CTL_PAGE_SAVED], 4284 &control_page_default, 4285 sizeof(control_page_default)); 4286 control_page = &lun->mode_pages.control_page[ 4287 CTL_PAGE_SAVED]; 4288 value = ctl_get_opt(&lun->be_lun->options, "reordering"); 4289 if (value != NULL && strcmp(value, "unrestricted") == 0) { 4290 control_page->queue_flags &= ~SCP_QUEUE_ALG_MASK; 4291 control_page->queue_flags |= SCP_QUEUE_ALG_UNRESTRICTED; 4292 } 4293 memcpy(&lun->mode_pages.control_page[CTL_PAGE_CURRENT], 4294 &lun->mode_pages.control_page[CTL_PAGE_SAVED], 4295 sizeof(control_page_default)); 4296 page_index->page_data = 4297 (uint8_t *)lun->mode_pages.control_page; 4298 break; 4299 4300 } 4301 case SMS_INFO_EXCEPTIONS_PAGE: { 4302 switch (page_index->subpage) { 4303 case SMS_SUBPAGE_PAGE_0: 4304 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_CURRENT], 4305 &ie_page_default, 4306 sizeof(ie_page_default)); 4307 memcpy(&lun->mode_pages.ie_page[ 4308 CTL_PAGE_CHANGEABLE], &ie_page_changeable, 4309 sizeof(ie_page_changeable)); 4310 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_DEFAULT], 4311 &ie_page_default, 4312 sizeof(ie_page_default)); 4313 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_SAVED], 4314 &ie_page_default, 4315 sizeof(ie_page_default)); 4316 page_index->page_data = 4317 (uint8_t *)lun->mode_pages.ie_page; 4318 break; 4319 case 0x02: { 4320 struct ctl_logical_block_provisioning_page *page; 4321 4322 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_DEFAULT], 4323 &lbp_page_default, 4324 sizeof(lbp_page_default)); 4325 memcpy(&lun->mode_pages.lbp_page[ 4326 CTL_PAGE_CHANGEABLE], &lbp_page_changeable, 4327 sizeof(lbp_page_changeable)); 4328 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 4329 &lbp_page_default, 4330 sizeof(lbp_page_default)); 4331 page = &lun->mode_pages.lbp_page[CTL_PAGE_SAVED]; 4332 value = ctl_get_opt(&lun->be_lun->options, 4333 "avail-threshold"); 4334 if (value != NULL && 4335 ctl_expand_number(value, &ival) == 0) { 4336 page->descr[0].flags |= SLBPPD_ENABLED | 4337 SLBPPD_ARMING_DEC; 4338 if (lun->be_lun->blocksize) 4339 ival /= lun->be_lun->blocksize; 4340 else 4341 ival /= 512; 4342 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4343 page->descr[0].count); 4344 } 4345 value = ctl_get_opt(&lun->be_lun->options, 4346 "used-threshold"); 4347 if (value != NULL && 4348 ctl_expand_number(value, &ival) == 0) { 4349 page->descr[1].flags |= SLBPPD_ENABLED | 4350 SLBPPD_ARMING_INC; 4351 if (lun->be_lun->blocksize) 4352 ival /= lun->be_lun->blocksize; 4353 else 4354 ival /= 512; 4355 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4356 page->descr[1].count); 4357 } 4358 value = ctl_get_opt(&lun->be_lun->options, 4359 "pool-avail-threshold"); 4360 if (value != NULL && 4361 ctl_expand_number(value, &ival) == 0) { 4362 page->descr[2].flags |= SLBPPD_ENABLED | 4363 SLBPPD_ARMING_DEC; 4364 if (lun->be_lun->blocksize) 4365 ival /= lun->be_lun->blocksize; 4366 else 4367 ival /= 512; 4368 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4369 page->descr[2].count); 4370 } 4371 value = ctl_get_opt(&lun->be_lun->options, 4372 "pool-used-threshold"); 4373 if (value != NULL && 4374 ctl_expand_number(value, &ival) == 0) { 4375 page->descr[3].flags |= SLBPPD_ENABLED | 4376 SLBPPD_ARMING_INC; 4377 if (lun->be_lun->blocksize) 4378 ival /= lun->be_lun->blocksize; 4379 else 4380 ival /= 512; 4381 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4382 page->descr[3].count); 4383 } 4384 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_CURRENT], 4385 &lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 4386 sizeof(lbp_page_default)); 4387 page_index->page_data = 4388 (uint8_t *)lun->mode_pages.lbp_page; 4389 }} 4390 break; 4391 } 4392 case SMS_VENDOR_SPECIFIC_PAGE:{ 4393 switch (page_index->subpage) { 4394 case DBGCNF_SUBPAGE_CODE: { 4395 struct copan_debugconf_subpage *current_page, 4396 *saved_page; 4397 4398 memcpy(&lun->mode_pages.debugconf_subpage[ 4399 CTL_PAGE_CURRENT], 4400 &debugconf_page_default, 4401 sizeof(debugconf_page_default)); 4402 memcpy(&lun->mode_pages.debugconf_subpage[ 4403 CTL_PAGE_CHANGEABLE], 4404 &debugconf_page_changeable, 4405 sizeof(debugconf_page_changeable)); 4406 memcpy(&lun->mode_pages.debugconf_subpage[ 4407 CTL_PAGE_DEFAULT], 4408 &debugconf_page_default, 4409 sizeof(debugconf_page_default)); 4410 memcpy(&lun->mode_pages.debugconf_subpage[ 4411 CTL_PAGE_SAVED], 4412 &debugconf_page_default, 4413 sizeof(debugconf_page_default)); 4414 page_index->page_data = 4415 (uint8_t *)lun->mode_pages.debugconf_subpage; 4416 4417 current_page = (struct copan_debugconf_subpage *) 4418 (page_index->page_data + 4419 (page_index->page_len * 4420 CTL_PAGE_CURRENT)); 4421 saved_page = (struct copan_debugconf_subpage *) 4422 (page_index->page_data + 4423 (page_index->page_len * 4424 CTL_PAGE_SAVED)); 4425 break; 4426 } 4427 default: 4428 panic("invalid subpage value %d", 4429 page_index->subpage); 4430 break; 4431 } 4432 break; 4433 } 4434 default: 4435 panic("invalid page value %d", 4436 page_index->page_code & SMPH_PC_MASK); 4437 break; 4438 } 4439 } 4440 4441 return (CTL_RETVAL_COMPLETE); 4442 } 4443 4444 static int 4445 ctl_init_log_page_index(struct ctl_lun *lun) 4446 { 4447 struct ctl_page_index *page_index; 4448 int i, j, k, prev; 4449 4450 memcpy(&lun->log_pages.index, log_page_index_template, 4451 sizeof(log_page_index_template)); 4452 4453 prev = -1; 4454 for (i = 0, j = 0, k = 0; i < CTL_NUM_LOG_PAGES; i++) { 4455 4456 page_index = &lun->log_pages.index[i]; 4457 /* 4458 * If this is a disk-only mode page, there's no point in 4459 * setting it up. For some pages, we have to have some 4460 * basic information about the disk in order to calculate the 4461 * mode page data. 4462 */ 4463 if ((lun->be_lun->lun_type != T_DIRECT) 4464 && (page_index->page_flags & CTL_PAGE_FLAG_DISK_ONLY)) 4465 continue; 4466 4467 if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING && 4468 lun->backend->lun_attr == NULL) 4469 continue; 4470 4471 if (page_index->page_code != prev) { 4472 lun->log_pages.pages_page[j] = page_index->page_code; 4473 prev = page_index->page_code; 4474 j++; 4475 } 4476 lun->log_pages.subpages_page[k*2] = page_index->page_code; 4477 lun->log_pages.subpages_page[k*2+1] = page_index->subpage; 4478 k++; 4479 } 4480 lun->log_pages.index[0].page_data = &lun->log_pages.pages_page[0]; 4481 lun->log_pages.index[0].page_len = j; 4482 lun->log_pages.index[1].page_data = &lun->log_pages.subpages_page[0]; 4483 lun->log_pages.index[1].page_len = k * 2; 4484 lun->log_pages.index[2].page_data = &lun->log_pages.lbp_page[0]; 4485 lun->log_pages.index[2].page_len = 12*CTL_NUM_LBP_PARAMS; 4486 4487 return (CTL_RETVAL_COMPLETE); 4488 } 4489 4490 static int 4491 hex2bin(const char *str, uint8_t *buf, int buf_size) 4492 { 4493 int i; 4494 u_char c; 4495 4496 memset(buf, 0, buf_size); 4497 while (isspace(str[0])) 4498 str++; 4499 if (str[0] == '0' && (str[1] == 'x' || str[1] == 'X')) 4500 str += 2; 4501 buf_size *= 2; 4502 for (i = 0; str[i] != 0 && i < buf_size; i++) { 4503 c = str[i]; 4504 if (isdigit(c)) 4505 c -= '0'; 4506 else if (isalpha(c)) 4507 c -= isupper(c) ? 'A' - 10 : 'a' - 10; 4508 else 4509 break; 4510 if (c >= 16) 4511 break; 4512 if ((i & 1) == 0) 4513 buf[i / 2] |= (c << 4); 4514 else 4515 buf[i / 2] |= c; 4516 } 4517 return ((i + 1) / 2); 4518 } 4519 4520 /* 4521 * LUN allocation. 4522 * 4523 * Requirements: 4524 * - caller allocates and zeros LUN storage, or passes in a NULL LUN if he 4525 * wants us to allocate the LUN and he can block. 4526 * - ctl_softc is always set 4527 * - be_lun is set if the LUN has a backend (needed for disk LUNs) 4528 * 4529 * Returns 0 for success, non-zero (errno) for failure. 4530 */ 4531 static int 4532 ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun, 4533 struct ctl_be_lun *const be_lun, struct ctl_id target_id) 4534 { 4535 struct ctl_lun *nlun, *lun; 4536 struct ctl_port *port; 4537 struct scsi_vpd_id_descriptor *desc; 4538 struct scsi_vpd_id_t10 *t10id; 4539 const char *eui, *naa, *scsiname, *vendor, *value; 4540 int lun_number, i, lun_malloced; 4541 int devidlen, idlen1, idlen2 = 0, len; 4542 4543 if (be_lun == NULL) 4544 return (EINVAL); 4545 4546 /* 4547 * We currently only support Direct Access or Processor LUN types. 4548 */ 4549 switch (be_lun->lun_type) { 4550 case T_DIRECT: 4551 break; 4552 case T_PROCESSOR: 4553 break; 4554 case T_SEQUENTIAL: 4555 case T_CHANGER: 4556 default: 4557 be_lun->lun_config_status(be_lun->be_lun, 4558 CTL_LUN_CONFIG_FAILURE); 4559 break; 4560 } 4561 if (ctl_lun == NULL) { 4562 lun = malloc(sizeof(*lun), M_CTL, M_WAITOK); 4563 lun_malloced = 1; 4564 } else { 4565 lun_malloced = 0; 4566 lun = ctl_lun; 4567 } 4568 4569 memset(lun, 0, sizeof(*lun)); 4570 if (lun_malloced) 4571 lun->flags = CTL_LUN_MALLOCED; 4572 4573 /* Generate LUN ID. */ 4574 devidlen = max(CTL_DEVID_MIN_LEN, 4575 strnlen(be_lun->device_id, CTL_DEVID_LEN)); 4576 idlen1 = sizeof(*t10id) + devidlen; 4577 len = sizeof(struct scsi_vpd_id_descriptor) + idlen1; 4578 scsiname = ctl_get_opt(&be_lun->options, "scsiname"); 4579 if (scsiname != NULL) { 4580 idlen2 = roundup2(strlen(scsiname) + 1, 4); 4581 len += sizeof(struct scsi_vpd_id_descriptor) + idlen2; 4582 } 4583 eui = ctl_get_opt(&be_lun->options, "eui"); 4584 if (eui != NULL) { 4585 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4586 } 4587 naa = ctl_get_opt(&be_lun->options, "naa"); 4588 if (naa != NULL) { 4589 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4590 } 4591 lun->lun_devid = malloc(sizeof(struct ctl_devid) + len, 4592 M_CTL, M_WAITOK | M_ZERO); 4593 desc = (struct scsi_vpd_id_descriptor *)lun->lun_devid->data; 4594 desc->proto_codeset = SVPD_ID_CODESET_ASCII; 4595 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10; 4596 desc->length = idlen1; 4597 t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0]; 4598 memset(t10id->vendor, ' ', sizeof(t10id->vendor)); 4599 if ((vendor = ctl_get_opt(&be_lun->options, "vendor")) == NULL) { 4600 strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor)); 4601 } else { 4602 strncpy(t10id->vendor, vendor, 4603 min(sizeof(t10id->vendor), strlen(vendor))); 4604 } 4605 strncpy((char *)t10id->vendor_spec_id, 4606 (char *)be_lun->device_id, devidlen); 4607 if (scsiname != NULL) { 4608 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4609 desc->length); 4610 desc->proto_codeset = SVPD_ID_CODESET_UTF8; 4611 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4612 SVPD_ID_TYPE_SCSI_NAME; 4613 desc->length = idlen2; 4614 strlcpy(desc->identifier, scsiname, idlen2); 4615 } 4616 if (eui != NULL) { 4617 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4618 desc->length); 4619 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4620 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4621 SVPD_ID_TYPE_EUI64; 4622 desc->length = hex2bin(eui, desc->identifier, 16); 4623 desc->length = desc->length > 12 ? 16 : 4624 (desc->length > 8 ? 12 : 8); 4625 len -= 16 - desc->length; 4626 } 4627 if (naa != NULL) { 4628 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4629 desc->length); 4630 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4631 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4632 SVPD_ID_TYPE_NAA; 4633 desc->length = hex2bin(naa, desc->identifier, 16); 4634 desc->length = desc->length > 8 ? 16 : 8; 4635 len -= 16 - desc->length; 4636 } 4637 lun->lun_devid->len = len; 4638 4639 mtx_lock(&ctl_softc->ctl_lock); 4640 /* 4641 * See if the caller requested a particular LUN number. If so, see 4642 * if it is available. Otherwise, allocate the first available LUN. 4643 */ 4644 if (be_lun->flags & CTL_LUN_FLAG_ID_REQ) { 4645 if ((be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) 4646 || (ctl_is_set(ctl_softc->ctl_lun_mask, be_lun->req_lun_id))) { 4647 mtx_unlock(&ctl_softc->ctl_lock); 4648 if (be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) { 4649 printf("ctl: requested LUN ID %d is higher " 4650 "than CTL_MAX_LUNS - 1 (%d)\n", 4651 be_lun->req_lun_id, CTL_MAX_LUNS - 1); 4652 } else { 4653 /* 4654 * XXX KDM return an error, or just assign 4655 * another LUN ID in this case?? 4656 */ 4657 printf("ctl: requested LUN ID %d is already " 4658 "in use\n", be_lun->req_lun_id); 4659 } 4660 if (lun->flags & CTL_LUN_MALLOCED) 4661 free(lun, M_CTL); 4662 be_lun->lun_config_status(be_lun->be_lun, 4663 CTL_LUN_CONFIG_FAILURE); 4664 return (ENOSPC); 4665 } 4666 lun_number = be_lun->req_lun_id; 4667 } else { 4668 lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, CTL_MAX_LUNS); 4669 if (lun_number == -1) { 4670 mtx_unlock(&ctl_softc->ctl_lock); 4671 printf("ctl: can't allocate LUN on target %ju, out of " 4672 "LUNs\n", (uintmax_t)target_id.id); 4673 if (lun->flags & CTL_LUN_MALLOCED) 4674 free(lun, M_CTL); 4675 be_lun->lun_config_status(be_lun->be_lun, 4676 CTL_LUN_CONFIG_FAILURE); 4677 return (ENOSPC); 4678 } 4679 } 4680 ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number); 4681 4682 mtx_init(&lun->lun_lock, "CTL LUN", NULL, MTX_DEF); 4683 lun->target = target_id; 4684 lun->lun = lun_number; 4685 lun->be_lun = be_lun; 4686 /* 4687 * The processor LUN is always enabled. Disk LUNs come on line 4688 * disabled, and must be enabled by the backend. 4689 */ 4690 lun->flags |= CTL_LUN_DISABLED; 4691 lun->backend = be_lun->be; 4692 be_lun->ctl_lun = lun; 4693 be_lun->lun_id = lun_number; 4694 atomic_add_int(&be_lun->be->num_luns, 1); 4695 if (be_lun->flags & CTL_LUN_FLAG_OFFLINE) 4696 lun->flags |= CTL_LUN_OFFLINE; 4697 4698 if (be_lun->flags & CTL_LUN_FLAG_POWERED_OFF) 4699 lun->flags |= CTL_LUN_STOPPED; 4700 4701 if (be_lun->flags & CTL_LUN_FLAG_INOPERABLE) 4702 lun->flags |= CTL_LUN_INOPERABLE; 4703 4704 if (be_lun->flags & CTL_LUN_FLAG_PRIMARY) 4705 lun->flags |= CTL_LUN_PRIMARY_SC; 4706 4707 value = ctl_get_opt(&be_lun->options, "readonly"); 4708 if (value != NULL && strcmp(value, "on") == 0) 4709 lun->flags |= CTL_LUN_READONLY; 4710 4711 lun->serseq = CTL_LUN_SERSEQ_OFF; 4712 if (be_lun->flags & CTL_LUN_FLAG_SERSEQ_READ) 4713 lun->serseq = CTL_LUN_SERSEQ_READ; 4714 value = ctl_get_opt(&be_lun->options, "serseq"); 4715 if (value != NULL && strcmp(value, "on") == 0) 4716 lun->serseq = CTL_LUN_SERSEQ_ON; 4717 else if (value != NULL && strcmp(value, "read") == 0) 4718 lun->serseq = CTL_LUN_SERSEQ_READ; 4719 else if (value != NULL && strcmp(value, "off") == 0) 4720 lun->serseq = CTL_LUN_SERSEQ_OFF; 4721 4722 lun->ctl_softc = ctl_softc; 4723 TAILQ_INIT(&lun->ooa_queue); 4724 TAILQ_INIT(&lun->blocked_queue); 4725 STAILQ_INIT(&lun->error_list); 4726 ctl_tpc_lun_init(lun); 4727 4728 /* 4729 * Initialize the mode and log page index. 4730 */ 4731 ctl_init_page_index(lun); 4732 ctl_init_log_page_index(lun); 4733 4734 /* 4735 * Now, before we insert this lun on the lun list, set the lun 4736 * inventory changed UA for all other luns. 4737 */ 4738 STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) { 4739 mtx_lock(&nlun->lun_lock); 4740 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4741 mtx_unlock(&nlun->lun_lock); 4742 } 4743 4744 STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links); 4745 4746 ctl_softc->ctl_luns[lun_number] = lun; 4747 4748 ctl_softc->num_luns++; 4749 4750 /* Setup statistics gathering */ 4751 lun->stats.device_type = be_lun->lun_type; 4752 lun->stats.lun_number = lun_number; 4753 if (lun->stats.device_type == T_DIRECT) 4754 lun->stats.blocksize = be_lun->blocksize; 4755 else 4756 lun->stats.flags = CTL_LUN_STATS_NO_BLOCKSIZE; 4757 for (i = 0;i < CTL_MAX_PORTS;i++) 4758 lun->stats.ports[i].targ_port = i; 4759 4760 mtx_unlock(&ctl_softc->ctl_lock); 4761 4762 lun->be_lun->lun_config_status(lun->be_lun->be_lun, CTL_LUN_CONFIG_OK); 4763 4764 /* 4765 * Run through each registered FETD and bring it online if it isn't 4766 * already. Enable the target ID if it hasn't been enabled, and 4767 * enable this particular LUN. 4768 */ 4769 STAILQ_FOREACH(port, &ctl_softc->port_list, links) { 4770 int retval; 4771 4772 retval = port->lun_enable(port->targ_lun_arg, target_id,lun_number); 4773 if (retval != 0) { 4774 printf("ctl_alloc_lun: FETD %s port %d returned error " 4775 "%d for lun_enable on target %ju lun %d\n", 4776 port->port_name, port->targ_port, retval, 4777 (uintmax_t)target_id.id, lun_number); 4778 } else 4779 port->status |= CTL_PORT_STATUS_LUN_ONLINE; 4780 } 4781 return (0); 4782 } 4783 4784 /* 4785 * Delete a LUN. 4786 * Assumptions: 4787 * - LUN has already been marked invalid and any pending I/O has been taken 4788 * care of. 4789 */ 4790 static int 4791 ctl_free_lun(struct ctl_lun *lun) 4792 { 4793 struct ctl_softc *softc; 4794 struct ctl_port *port; 4795 struct ctl_lun *nlun; 4796 int i; 4797 4798 softc = lun->ctl_softc; 4799 4800 mtx_assert(&softc->ctl_lock, MA_OWNED); 4801 4802 STAILQ_FOREACH(port, &softc->port_list, links) 4803 ctl_lun_map_unsetg(port, lun->lun); 4804 4805 STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links); 4806 4807 ctl_clear_mask(softc->ctl_lun_mask, lun->lun); 4808 4809 softc->ctl_luns[lun->lun] = NULL; 4810 4811 if (!TAILQ_EMPTY(&lun->ooa_queue)) 4812 panic("Freeing a LUN %p with outstanding I/O!!\n", lun); 4813 4814 softc->num_luns--; 4815 4816 /* 4817 * XXX KDM this scheme only works for a single target/multiple LUN 4818 * setup. It needs to be revamped for a multiple target scheme. 4819 * 4820 * XXX KDM this results in port->lun_disable() getting called twice, 4821 * once when ctl_disable_lun() is called, and a second time here. 4822 * We really need to re-think the LUN disable semantics. There 4823 * should probably be several steps/levels to LUN removal: 4824 * - disable 4825 * - invalidate 4826 * - free 4827 * 4828 * Right now we only have a disable method when communicating to 4829 * the front end ports, at least for individual LUNs. 4830 */ 4831 #if 0 4832 STAILQ_FOREACH(port, &softc->port_list, links) { 4833 int retval; 4834 4835 retval = port->lun_disable(port->targ_lun_arg, lun->target, 4836 lun->lun); 4837 if (retval != 0) { 4838 printf("ctl_free_lun: FETD %s port %d returned error " 4839 "%d for lun_disable on target %ju lun %jd\n", 4840 port->port_name, port->targ_port, retval, 4841 (uintmax_t)lun->target.id, (intmax_t)lun->lun); 4842 } 4843 4844 if (STAILQ_FIRST(&softc->lun_list) == NULL) { 4845 port->status &= ~CTL_PORT_STATUS_LUN_ONLINE; 4846 4847 retval = port->targ_disable(port->targ_lun_arg,lun->target); 4848 if (retval != 0) { 4849 printf("ctl_free_lun: FETD %s port %d " 4850 "returned error %d for targ_disable on " 4851 "target %ju\n", port->port_name, 4852 port->targ_port, retval, 4853 (uintmax_t)lun->target.id); 4854 } else 4855 port->status &= ~CTL_PORT_STATUS_TARG_ONLINE; 4856 4857 if ((port->status & CTL_PORT_STATUS_TARG_ONLINE) != 0) 4858 continue; 4859 4860 #if 0 4861 port->port_offline(port->onoff_arg); 4862 port->status &= ~CTL_PORT_STATUS_ONLINE; 4863 #endif 4864 } 4865 } 4866 #endif 4867 4868 /* 4869 * Tell the backend to free resources, if this LUN has a backend. 4870 */ 4871 atomic_subtract_int(&lun->be_lun->be->num_luns, 1); 4872 lun->be_lun->lun_shutdown(lun->be_lun->be_lun); 4873 4874 ctl_tpc_lun_shutdown(lun); 4875 mtx_destroy(&lun->lun_lock); 4876 free(lun->lun_devid, M_CTL); 4877 for (i = 0; i < CTL_MAX_PORTS; i++) 4878 free(lun->pending_ua[i], M_CTL); 4879 for (i = 0; i < 2 * CTL_MAX_PORTS; i++) 4880 free(lun->pr_keys[i], M_CTL); 4881 free(lun->write_buffer, M_CTL); 4882 if (lun->flags & CTL_LUN_MALLOCED) 4883 free(lun, M_CTL); 4884 4885 STAILQ_FOREACH(nlun, &softc->lun_list, links) { 4886 mtx_lock(&nlun->lun_lock); 4887 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4888 mtx_unlock(&nlun->lun_lock); 4889 } 4890 4891 return (0); 4892 } 4893 4894 static void 4895 ctl_create_lun(struct ctl_be_lun *be_lun) 4896 { 4897 struct ctl_softc *softc; 4898 4899 softc = control_softc; 4900 4901 /* 4902 * ctl_alloc_lun() should handle all potential failure cases. 4903 */ 4904 ctl_alloc_lun(softc, NULL, be_lun, softc->target); 4905 } 4906 4907 int 4908 ctl_add_lun(struct ctl_be_lun *be_lun) 4909 { 4910 struct ctl_softc *softc = control_softc; 4911 4912 mtx_lock(&softc->ctl_lock); 4913 STAILQ_INSERT_TAIL(&softc->pending_lun_queue, be_lun, links); 4914 mtx_unlock(&softc->ctl_lock); 4915 wakeup(&softc->pending_lun_queue); 4916 4917 return (0); 4918 } 4919 4920 int 4921 ctl_enable_lun(struct ctl_be_lun *be_lun) 4922 { 4923 struct ctl_softc *softc; 4924 struct ctl_port *port, *nport; 4925 struct ctl_lun *lun; 4926 int retval; 4927 4928 lun = (struct ctl_lun *)be_lun->ctl_lun; 4929 softc = lun->ctl_softc; 4930 4931 mtx_lock(&softc->ctl_lock); 4932 mtx_lock(&lun->lun_lock); 4933 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 4934 /* 4935 * eh? Why did we get called if the LUN is already 4936 * enabled? 4937 */ 4938 mtx_unlock(&lun->lun_lock); 4939 mtx_unlock(&softc->ctl_lock); 4940 return (0); 4941 } 4942 lun->flags &= ~CTL_LUN_DISABLED; 4943 mtx_unlock(&lun->lun_lock); 4944 4945 for (port = STAILQ_FIRST(&softc->port_list); port != NULL; port = nport) { 4946 nport = STAILQ_NEXT(port, links); 4947 4948 /* 4949 * Drop the lock while we call the FETD's enable routine. 4950 * This can lead to a callback into CTL (at least in the 4951 * case of the internal initiator frontend. 4952 */ 4953 mtx_unlock(&softc->ctl_lock); 4954 retval = port->lun_enable(port->targ_lun_arg, lun->target,lun->lun); 4955 mtx_lock(&softc->ctl_lock); 4956 if (retval != 0) { 4957 printf("%s: FETD %s port %d returned error " 4958 "%d for lun_enable on target %ju lun %jd\n", 4959 __func__, port->port_name, port->targ_port, retval, 4960 (uintmax_t)lun->target.id, (intmax_t)lun->lun); 4961 } 4962 #if 0 4963 else { 4964 /* NOTE: TODO: why does lun enable affect port status? */ 4965 port->status |= CTL_PORT_STATUS_LUN_ONLINE; 4966 } 4967 #endif 4968 } 4969 4970 mtx_unlock(&softc->ctl_lock); 4971 4972 return (0); 4973 } 4974 4975 int 4976 ctl_disable_lun(struct ctl_be_lun *be_lun) 4977 { 4978 struct ctl_softc *softc; 4979 struct ctl_port *port; 4980 struct ctl_lun *lun; 4981 int retval; 4982 4983 lun = (struct ctl_lun *)be_lun->ctl_lun; 4984 softc = lun->ctl_softc; 4985 4986 mtx_lock(&softc->ctl_lock); 4987 mtx_lock(&lun->lun_lock); 4988 if (lun->flags & CTL_LUN_DISABLED) { 4989 mtx_unlock(&lun->lun_lock); 4990 mtx_unlock(&softc->ctl_lock); 4991 return (0); 4992 } 4993 lun->flags |= CTL_LUN_DISABLED; 4994 mtx_unlock(&lun->lun_lock); 4995 4996 STAILQ_FOREACH(port, &softc->port_list, links) { 4997 mtx_unlock(&softc->ctl_lock); 4998 /* 4999 * Drop the lock before we call the frontend's disable 5000 * routine, to avoid lock order reversals. 5001 * 5002 * XXX KDM what happens if the frontend list changes while 5003 * we're traversing it? It's unlikely, but should be handled. 5004 */ 5005 retval = port->lun_disable(port->targ_lun_arg, lun->target, 5006 lun->lun); 5007 mtx_lock(&softc->ctl_lock); 5008 if (retval != 0) { 5009 printf("ctl_alloc_lun: FETD %s port %d returned error " 5010 "%d for lun_disable on target %ju lun %jd\n", 5011 port->port_name, port->targ_port, retval, 5012 (uintmax_t)lun->target.id, (intmax_t)lun->lun); 5013 } 5014 } 5015 5016 mtx_unlock(&softc->ctl_lock); 5017 5018 return (0); 5019 } 5020 5021 int 5022 ctl_start_lun(struct ctl_be_lun *be_lun) 5023 { 5024 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 5025 5026 mtx_lock(&lun->lun_lock); 5027 lun->flags &= ~CTL_LUN_STOPPED; 5028 mtx_unlock(&lun->lun_lock); 5029 return (0); 5030 } 5031 5032 int 5033 ctl_stop_lun(struct ctl_be_lun *be_lun) 5034 { 5035 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 5036 5037 mtx_lock(&lun->lun_lock); 5038 lun->flags |= CTL_LUN_STOPPED; 5039 mtx_unlock(&lun->lun_lock); 5040 return (0); 5041 } 5042 5043 int 5044 ctl_lun_offline(struct ctl_be_lun *be_lun) 5045 { 5046 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 5047 5048 mtx_lock(&lun->lun_lock); 5049 lun->flags |= CTL_LUN_OFFLINE; 5050 mtx_unlock(&lun->lun_lock); 5051 return (0); 5052 } 5053 5054 int 5055 ctl_lun_online(struct ctl_be_lun *be_lun) 5056 { 5057 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 5058 5059 mtx_lock(&lun->lun_lock); 5060 lun->flags &= ~CTL_LUN_OFFLINE; 5061 mtx_unlock(&lun->lun_lock); 5062 return (0); 5063 } 5064 5065 int 5066 ctl_invalidate_lun(struct ctl_be_lun *be_lun) 5067 { 5068 struct ctl_softc *softc; 5069 struct ctl_lun *lun; 5070 5071 lun = (struct ctl_lun *)be_lun->ctl_lun; 5072 softc = lun->ctl_softc; 5073 5074 mtx_lock(&lun->lun_lock); 5075 5076 /* 5077 * The LUN needs to be disabled before it can be marked invalid. 5078 */ 5079 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 5080 mtx_unlock(&lun->lun_lock); 5081 return (-1); 5082 } 5083 /* 5084 * Mark the LUN invalid. 5085 */ 5086 lun->flags |= CTL_LUN_INVALID; 5087 5088 /* 5089 * If there is nothing in the OOA queue, go ahead and free the LUN. 5090 * If we have something in the OOA queue, we'll free it when the 5091 * last I/O completes. 5092 */ 5093 if (TAILQ_EMPTY(&lun->ooa_queue)) { 5094 mtx_unlock(&lun->lun_lock); 5095 mtx_lock(&softc->ctl_lock); 5096 ctl_free_lun(lun); 5097 mtx_unlock(&softc->ctl_lock); 5098 } else 5099 mtx_unlock(&lun->lun_lock); 5100 5101 return (0); 5102 } 5103 5104 int 5105 ctl_lun_inoperable(struct ctl_be_lun *be_lun) 5106 { 5107 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 5108 5109 mtx_lock(&lun->lun_lock); 5110 lun->flags |= CTL_LUN_INOPERABLE; 5111 mtx_unlock(&lun->lun_lock); 5112 return (0); 5113 } 5114 5115 int 5116 ctl_lun_operable(struct ctl_be_lun *be_lun) 5117 { 5118 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 5119 5120 mtx_lock(&lun->lun_lock); 5121 lun->flags &= ~CTL_LUN_INOPERABLE; 5122 mtx_unlock(&lun->lun_lock); 5123 return (0); 5124 } 5125 5126 void 5127 ctl_lun_capacity_changed(struct ctl_be_lun *be_lun) 5128 { 5129 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 5130 5131 mtx_lock(&lun->lun_lock); 5132 ctl_est_ua_all(lun, -1, CTL_UA_CAPACITY_CHANGED); 5133 mtx_unlock(&lun->lun_lock); 5134 } 5135 5136 /* 5137 * Backend "memory move is complete" callback for requests that never 5138 * make it down to say RAIDCore's configuration code. 5139 */ 5140 int 5141 ctl_config_move_done(union ctl_io *io) 5142 { 5143 int retval; 5144 5145 CTL_DEBUG_PRINT(("ctl_config_move_done\n")); 5146 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, 5147 ("Config I/O type isn't CTL_IO_SCSI (%d)!", io->io_hdr.io_type)); 5148 5149 if ((io->io_hdr.port_status != 0) && 5150 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5151 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5152 /* 5153 * For hardware error sense keys, the sense key 5154 * specific value is defined to be a retry count, 5155 * but we use it to pass back an internal FETD 5156 * error code. XXX KDM Hopefully the FETD is only 5157 * using 16 bits for an error code, since that's 5158 * all the space we have in the sks field. 5159 */ 5160 ctl_set_internal_failure(&io->scsiio, 5161 /*sks_valid*/ 1, 5162 /*retry_count*/ 5163 io->io_hdr.port_status); 5164 } 5165 5166 if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) || 5167 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 5168 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) || 5169 ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) { 5170 /* 5171 * XXX KDM just assuming a single pointer here, and not a 5172 * S/G list. If we start using S/G lists for config data, 5173 * we'll need to know how to clean them up here as well. 5174 */ 5175 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5176 free(io->scsiio.kern_data_ptr, M_CTL); 5177 ctl_done(io); 5178 retval = CTL_RETVAL_COMPLETE; 5179 } else { 5180 /* 5181 * XXX KDM now we need to continue data movement. Some 5182 * options: 5183 * - call ctl_scsiio() again? We don't do this for data 5184 * writes, because for those at least we know ahead of 5185 * time where the write will go and how long it is. For 5186 * config writes, though, that information is largely 5187 * contained within the write itself, thus we need to 5188 * parse out the data again. 5189 * 5190 * - Call some other function once the data is in? 5191 */ 5192 if (ctl_debug & CTL_DEBUG_CDB_DATA) 5193 ctl_data_print(io); 5194 5195 /* 5196 * XXX KDM call ctl_scsiio() again for now, and check flag 5197 * bits to see whether we're allocated or not. 5198 */ 5199 retval = ctl_scsiio(&io->scsiio); 5200 } 5201 return (retval); 5202 } 5203 5204 /* 5205 * This gets called by a backend driver when it is done with a 5206 * data_submit method. 5207 */ 5208 void 5209 ctl_data_submit_done(union ctl_io *io) 5210 { 5211 /* 5212 * If the IO_CONT flag is set, we need to call the supplied 5213 * function to continue processing the I/O, instead of completing 5214 * the I/O just yet. 5215 * 5216 * If there is an error, though, we don't want to keep processing. 5217 * Instead, just send status back to the initiator. 5218 */ 5219 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 5220 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 5221 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5222 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5223 io->scsiio.io_cont(io); 5224 return; 5225 } 5226 ctl_done(io); 5227 } 5228 5229 /* 5230 * This gets called by a backend driver when it is done with a 5231 * configuration write. 5232 */ 5233 void 5234 ctl_config_write_done(union ctl_io *io) 5235 { 5236 uint8_t *buf; 5237 5238 /* 5239 * If the IO_CONT flag is set, we need to call the supplied 5240 * function to continue processing the I/O, instead of completing 5241 * the I/O just yet. 5242 * 5243 * If there is an error, though, we don't want to keep processing. 5244 * Instead, just send status back to the initiator. 5245 */ 5246 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 5247 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 5248 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5249 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5250 io->scsiio.io_cont(io); 5251 return; 5252 } 5253 /* 5254 * Since a configuration write can be done for commands that actually 5255 * have data allocated, like write buffer, and commands that have 5256 * no data, like start/stop unit, we need to check here. 5257 */ 5258 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5259 buf = io->scsiio.kern_data_ptr; 5260 else 5261 buf = NULL; 5262 ctl_done(io); 5263 if (buf) 5264 free(buf, M_CTL); 5265 } 5266 5267 void 5268 ctl_config_read_done(union ctl_io *io) 5269 { 5270 uint8_t *buf; 5271 5272 /* 5273 * If there is some error -- we are done, skip data transfer. 5274 */ 5275 if ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0 || 5276 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 5277 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { 5278 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5279 buf = io->scsiio.kern_data_ptr; 5280 else 5281 buf = NULL; 5282 ctl_done(io); 5283 if (buf) 5284 free(buf, M_CTL); 5285 return; 5286 } 5287 5288 /* 5289 * If the IO_CONT flag is set, we need to call the supplied 5290 * function to continue processing the I/O, instead of completing 5291 * the I/O just yet. 5292 */ 5293 if (io->io_hdr.flags & CTL_FLAG_IO_CONT) { 5294 io->scsiio.io_cont(io); 5295 return; 5296 } 5297 5298 ctl_datamove(io); 5299 } 5300 5301 /* 5302 * SCSI release command. 5303 */ 5304 int 5305 ctl_scsi_release(struct ctl_scsiio *ctsio) 5306 { 5307 int length, longid, thirdparty_id, resv_id; 5308 struct ctl_lun *lun; 5309 uint32_t residx; 5310 5311 length = 0; 5312 resv_id = 0; 5313 5314 CTL_DEBUG_PRINT(("ctl_scsi_release\n")); 5315 5316 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 5317 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5318 5319 switch (ctsio->cdb[0]) { 5320 case RELEASE_10: { 5321 struct scsi_release_10 *cdb; 5322 5323 cdb = (struct scsi_release_10 *)ctsio->cdb; 5324 5325 if (cdb->byte2 & SR10_LONGID) 5326 longid = 1; 5327 else 5328 thirdparty_id = cdb->thirdparty_id; 5329 5330 resv_id = cdb->resv_id; 5331 length = scsi_2btoul(cdb->length); 5332 break; 5333 } 5334 } 5335 5336 5337 /* 5338 * XXX KDM right now, we only support LUN reservation. We don't 5339 * support 3rd party reservations, or extent reservations, which 5340 * might actually need the parameter list. If we've gotten this 5341 * far, we've got a LUN reservation. Anything else got kicked out 5342 * above. So, according to SPC, ignore the length. 5343 */ 5344 length = 0; 5345 5346 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5347 && (length > 0)) { 5348 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5349 ctsio->kern_data_len = length; 5350 ctsio->kern_total_len = length; 5351 ctsio->kern_data_resid = 0; 5352 ctsio->kern_rel_offset = 0; 5353 ctsio->kern_sg_entries = 0; 5354 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5355 ctsio->be_move_done = ctl_config_move_done; 5356 ctl_datamove((union ctl_io *)ctsio); 5357 5358 return (CTL_RETVAL_COMPLETE); 5359 } 5360 5361 if (length > 0) 5362 thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr); 5363 5364 mtx_lock(&lun->lun_lock); 5365 5366 /* 5367 * According to SPC, it is not an error for an intiator to attempt 5368 * to release a reservation on a LUN that isn't reserved, or that 5369 * is reserved by another initiator. The reservation can only be 5370 * released, though, by the initiator who made it or by one of 5371 * several reset type events. 5372 */ 5373 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx)) 5374 lun->flags &= ~CTL_LUN_RESERVED; 5375 5376 mtx_unlock(&lun->lun_lock); 5377 5378 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5379 free(ctsio->kern_data_ptr, M_CTL); 5380 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5381 } 5382 5383 ctl_set_success(ctsio); 5384 ctl_done((union ctl_io *)ctsio); 5385 return (CTL_RETVAL_COMPLETE); 5386 } 5387 5388 int 5389 ctl_scsi_reserve(struct ctl_scsiio *ctsio) 5390 { 5391 int extent, thirdparty, longid; 5392 int resv_id, length; 5393 uint64_t thirdparty_id; 5394 struct ctl_lun *lun; 5395 uint32_t residx; 5396 5397 extent = 0; 5398 thirdparty = 0; 5399 longid = 0; 5400 resv_id = 0; 5401 length = 0; 5402 thirdparty_id = 0; 5403 5404 CTL_DEBUG_PRINT(("ctl_reserve\n")); 5405 5406 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 5407 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5408 5409 switch (ctsio->cdb[0]) { 5410 case RESERVE_10: { 5411 struct scsi_reserve_10 *cdb; 5412 5413 cdb = (struct scsi_reserve_10 *)ctsio->cdb; 5414 5415 if (cdb->byte2 & SR10_LONGID) 5416 longid = 1; 5417 else 5418 thirdparty_id = cdb->thirdparty_id; 5419 5420 resv_id = cdb->resv_id; 5421 length = scsi_2btoul(cdb->length); 5422 break; 5423 } 5424 } 5425 5426 /* 5427 * XXX KDM right now, we only support LUN reservation. We don't 5428 * support 3rd party reservations, or extent reservations, which 5429 * might actually need the parameter list. If we've gotten this 5430 * far, we've got a LUN reservation. Anything else got kicked out 5431 * above. So, according to SPC, ignore the length. 5432 */ 5433 length = 0; 5434 5435 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5436 && (length > 0)) { 5437 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5438 ctsio->kern_data_len = length; 5439 ctsio->kern_total_len = length; 5440 ctsio->kern_data_resid = 0; 5441 ctsio->kern_rel_offset = 0; 5442 ctsio->kern_sg_entries = 0; 5443 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5444 ctsio->be_move_done = ctl_config_move_done; 5445 ctl_datamove((union ctl_io *)ctsio); 5446 5447 return (CTL_RETVAL_COMPLETE); 5448 } 5449 5450 if (length > 0) 5451 thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr); 5452 5453 mtx_lock(&lun->lun_lock); 5454 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx != residx)) { 5455 ctl_set_reservation_conflict(ctsio); 5456 goto bailout; 5457 } 5458 5459 lun->flags |= CTL_LUN_RESERVED; 5460 lun->res_idx = residx; 5461 5462 ctl_set_success(ctsio); 5463 5464 bailout: 5465 mtx_unlock(&lun->lun_lock); 5466 5467 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5468 free(ctsio->kern_data_ptr, M_CTL); 5469 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5470 } 5471 5472 ctl_done((union ctl_io *)ctsio); 5473 return (CTL_RETVAL_COMPLETE); 5474 } 5475 5476 int 5477 ctl_start_stop(struct ctl_scsiio *ctsio) 5478 { 5479 struct scsi_start_stop_unit *cdb; 5480 struct ctl_lun *lun; 5481 int retval; 5482 5483 CTL_DEBUG_PRINT(("ctl_start_stop\n")); 5484 5485 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5486 retval = 0; 5487 5488 cdb = (struct scsi_start_stop_unit *)ctsio->cdb; 5489 5490 /* 5491 * XXX KDM 5492 * We don't support the immediate bit on a stop unit. In order to 5493 * do that, we would need to code up a way to know that a stop is 5494 * pending, and hold off any new commands until it completes, one 5495 * way or another. Then we could accept or reject those commands 5496 * depending on its status. We would almost need to do the reverse 5497 * of what we do below for an immediate start -- return the copy of 5498 * the ctl_io to the FETD with status to send to the host (and to 5499 * free the copy!) and then free the original I/O once the stop 5500 * actually completes. That way, the OOA queue mechanism can work 5501 * to block commands that shouldn't proceed. Another alternative 5502 * would be to put the copy in the queue in place of the original, 5503 * and return the original back to the caller. That could be 5504 * slightly safer.. 5505 */ 5506 if ((cdb->byte2 & SSS_IMMED) 5507 && ((cdb->how & SSS_START) == 0)) { 5508 ctl_set_invalid_field(ctsio, 5509 /*sks_valid*/ 1, 5510 /*command*/ 1, 5511 /*field*/ 1, 5512 /*bit_valid*/ 1, 5513 /*bit*/ 0); 5514 ctl_done((union ctl_io *)ctsio); 5515 return (CTL_RETVAL_COMPLETE); 5516 } 5517 5518 if ((lun->flags & CTL_LUN_PR_RESERVED) 5519 && ((cdb->how & SSS_START)==0)) { 5520 uint32_t residx; 5521 5522 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 5523 if (ctl_get_prkey(lun, residx) == 0 5524 || (lun->pr_res_idx!=residx && lun->res_type < 4)) { 5525 5526 ctl_set_reservation_conflict(ctsio); 5527 ctl_done((union ctl_io *)ctsio); 5528 return (CTL_RETVAL_COMPLETE); 5529 } 5530 } 5531 5532 /* 5533 * If there is no backend on this device, we can't start or stop 5534 * it. In theory we shouldn't get any start/stop commands in the 5535 * first place at this level if the LUN doesn't have a backend. 5536 * That should get stopped by the command decode code. 5537 */ 5538 if (lun->backend == NULL) { 5539 ctl_set_invalid_opcode(ctsio); 5540 ctl_done((union ctl_io *)ctsio); 5541 return (CTL_RETVAL_COMPLETE); 5542 } 5543 5544 /* 5545 * XXX KDM Copan-specific offline behavior. 5546 * Figure out a reasonable way to port this? 5547 */ 5548 #ifdef NEEDTOPORT 5549 mtx_lock(&lun->lun_lock); 5550 5551 if (((cdb->byte2 & SSS_ONOFFLINE) == 0) 5552 && (lun->flags & CTL_LUN_OFFLINE)) { 5553 /* 5554 * If the LUN is offline, and the on/offline bit isn't set, 5555 * reject the start or stop. Otherwise, let it through. 5556 */ 5557 mtx_unlock(&lun->lun_lock); 5558 ctl_set_lun_not_ready(ctsio); 5559 ctl_done((union ctl_io *)ctsio); 5560 } else { 5561 mtx_unlock(&lun->lun_lock); 5562 #endif /* NEEDTOPORT */ 5563 /* 5564 * This could be a start or a stop when we're online, 5565 * or a stop/offline or start/online. A start or stop when 5566 * we're offline is covered in the case above. 5567 */ 5568 /* 5569 * In the non-immediate case, we send the request to 5570 * the backend and return status to the user when 5571 * it is done. 5572 * 5573 * In the immediate case, we allocate a new ctl_io 5574 * to hold a copy of the request, and send that to 5575 * the backend. We then set good status on the 5576 * user's request and return it immediately. 5577 */ 5578 if (cdb->byte2 & SSS_IMMED) { 5579 union ctl_io *new_io; 5580 5581 new_io = ctl_alloc_io(ctsio->io_hdr.pool); 5582 ctl_copy_io((union ctl_io *)ctsio, new_io); 5583 retval = lun->backend->config_write(new_io); 5584 ctl_set_success(ctsio); 5585 ctl_done((union ctl_io *)ctsio); 5586 } else { 5587 retval = lun->backend->config_write( 5588 (union ctl_io *)ctsio); 5589 } 5590 #ifdef NEEDTOPORT 5591 } 5592 #endif 5593 return (retval); 5594 } 5595 5596 /* 5597 * We support the SYNCHRONIZE CACHE command (10 and 16 byte versions), but 5598 * we don't really do anything with the LBA and length fields if the user 5599 * passes them in. Instead we'll just flush out the cache for the entire 5600 * LUN. 5601 */ 5602 int 5603 ctl_sync_cache(struct ctl_scsiio *ctsio) 5604 { 5605 struct ctl_lun *lun; 5606 struct ctl_softc *softc; 5607 uint64_t starting_lba; 5608 uint32_t block_count; 5609 int retval; 5610 5611 CTL_DEBUG_PRINT(("ctl_sync_cache\n")); 5612 5613 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5614 softc = lun->ctl_softc; 5615 retval = 0; 5616 5617 switch (ctsio->cdb[0]) { 5618 case SYNCHRONIZE_CACHE: { 5619 struct scsi_sync_cache *cdb; 5620 cdb = (struct scsi_sync_cache *)ctsio->cdb; 5621 5622 starting_lba = scsi_4btoul(cdb->begin_lba); 5623 block_count = scsi_2btoul(cdb->lb_count); 5624 break; 5625 } 5626 case SYNCHRONIZE_CACHE_16: { 5627 struct scsi_sync_cache_16 *cdb; 5628 cdb = (struct scsi_sync_cache_16 *)ctsio->cdb; 5629 5630 starting_lba = scsi_8btou64(cdb->begin_lba); 5631 block_count = scsi_4btoul(cdb->lb_count); 5632 break; 5633 } 5634 default: 5635 ctl_set_invalid_opcode(ctsio); 5636 ctl_done((union ctl_io *)ctsio); 5637 goto bailout; 5638 break; /* NOTREACHED */ 5639 } 5640 5641 /* 5642 * We check the LBA and length, but don't do anything with them. 5643 * A SYNCHRONIZE CACHE will cause the entire cache for this lun to 5644 * get flushed. This check will just help satisfy anyone who wants 5645 * to see an error for an out of range LBA. 5646 */ 5647 if ((starting_lba + block_count) > (lun->be_lun->maxlba + 1)) { 5648 ctl_set_lba_out_of_range(ctsio); 5649 ctl_done((union ctl_io *)ctsio); 5650 goto bailout; 5651 } 5652 5653 /* 5654 * If this LUN has no backend, we can't flush the cache anyway. 5655 */ 5656 if (lun->backend == NULL) { 5657 ctl_set_invalid_opcode(ctsio); 5658 ctl_done((union ctl_io *)ctsio); 5659 goto bailout; 5660 } 5661 5662 /* 5663 * Check to see whether we're configured to send the SYNCHRONIZE 5664 * CACHE command directly to the back end. 5665 */ 5666 mtx_lock(&lun->lun_lock); 5667 if ((softc->flags & CTL_FLAG_REAL_SYNC) 5668 && (++(lun->sync_count) >= lun->sync_interval)) { 5669 lun->sync_count = 0; 5670 mtx_unlock(&lun->lun_lock); 5671 retval = lun->backend->config_write((union ctl_io *)ctsio); 5672 } else { 5673 mtx_unlock(&lun->lun_lock); 5674 ctl_set_success(ctsio); 5675 ctl_done((union ctl_io *)ctsio); 5676 } 5677 5678 bailout: 5679 5680 return (retval); 5681 } 5682 5683 int 5684 ctl_format(struct ctl_scsiio *ctsio) 5685 { 5686 struct scsi_format *cdb; 5687 struct ctl_lun *lun; 5688 int length, defect_list_len; 5689 5690 CTL_DEBUG_PRINT(("ctl_format\n")); 5691 5692 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5693 5694 cdb = (struct scsi_format *)ctsio->cdb; 5695 5696 length = 0; 5697 if (cdb->byte2 & SF_FMTDATA) { 5698 if (cdb->byte2 & SF_LONGLIST) 5699 length = sizeof(struct scsi_format_header_long); 5700 else 5701 length = sizeof(struct scsi_format_header_short); 5702 } 5703 5704 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5705 && (length > 0)) { 5706 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5707 ctsio->kern_data_len = length; 5708 ctsio->kern_total_len = length; 5709 ctsio->kern_data_resid = 0; 5710 ctsio->kern_rel_offset = 0; 5711 ctsio->kern_sg_entries = 0; 5712 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5713 ctsio->be_move_done = ctl_config_move_done; 5714 ctl_datamove((union ctl_io *)ctsio); 5715 5716 return (CTL_RETVAL_COMPLETE); 5717 } 5718 5719 defect_list_len = 0; 5720 5721 if (cdb->byte2 & SF_FMTDATA) { 5722 if (cdb->byte2 & SF_LONGLIST) { 5723 struct scsi_format_header_long *header; 5724 5725 header = (struct scsi_format_header_long *) 5726 ctsio->kern_data_ptr; 5727 5728 defect_list_len = scsi_4btoul(header->defect_list_len); 5729 if (defect_list_len != 0) { 5730 ctl_set_invalid_field(ctsio, 5731 /*sks_valid*/ 1, 5732 /*command*/ 0, 5733 /*field*/ 2, 5734 /*bit_valid*/ 0, 5735 /*bit*/ 0); 5736 goto bailout; 5737 } 5738 } else { 5739 struct scsi_format_header_short *header; 5740 5741 header = (struct scsi_format_header_short *) 5742 ctsio->kern_data_ptr; 5743 5744 defect_list_len = scsi_2btoul(header->defect_list_len); 5745 if (defect_list_len != 0) { 5746 ctl_set_invalid_field(ctsio, 5747 /*sks_valid*/ 1, 5748 /*command*/ 0, 5749 /*field*/ 2, 5750 /*bit_valid*/ 0, 5751 /*bit*/ 0); 5752 goto bailout; 5753 } 5754 } 5755 } 5756 5757 /* 5758 * The format command will clear out the "Medium format corrupted" 5759 * status if set by the configuration code. That status is really 5760 * just a way to notify the host that we have lost the media, and 5761 * get them to issue a command that will basically make them think 5762 * they're blowing away the media. 5763 */ 5764 mtx_lock(&lun->lun_lock); 5765 lun->flags &= ~CTL_LUN_INOPERABLE; 5766 mtx_unlock(&lun->lun_lock); 5767 5768 ctl_set_success(ctsio); 5769 bailout: 5770 5771 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5772 free(ctsio->kern_data_ptr, M_CTL); 5773 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5774 } 5775 5776 ctl_done((union ctl_io *)ctsio); 5777 return (CTL_RETVAL_COMPLETE); 5778 } 5779 5780 int 5781 ctl_read_buffer(struct ctl_scsiio *ctsio) 5782 { 5783 struct scsi_read_buffer *cdb; 5784 struct ctl_lun *lun; 5785 int buffer_offset, len; 5786 static uint8_t descr[4]; 5787 static uint8_t echo_descr[4] = { 0 }; 5788 5789 CTL_DEBUG_PRINT(("ctl_read_buffer\n")); 5790 5791 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5792 cdb = (struct scsi_read_buffer *)ctsio->cdb; 5793 5794 if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA && 5795 (cdb->byte2 & RWB_MODE) != RWB_MODE_ECHO_DESCR && 5796 (cdb->byte2 & RWB_MODE) != RWB_MODE_DESCR) { 5797 ctl_set_invalid_field(ctsio, 5798 /*sks_valid*/ 1, 5799 /*command*/ 1, 5800 /*field*/ 1, 5801 /*bit_valid*/ 1, 5802 /*bit*/ 4); 5803 ctl_done((union ctl_io *)ctsio); 5804 return (CTL_RETVAL_COMPLETE); 5805 } 5806 5807 len = scsi_3btoul(cdb->length); 5808 buffer_offset = scsi_3btoul(cdb->offset); 5809 5810 if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5811 ctl_set_invalid_field(ctsio, 5812 /*sks_valid*/ 1, 5813 /*command*/ 1, 5814 /*field*/ 6, 5815 /*bit_valid*/ 0, 5816 /*bit*/ 0); 5817 ctl_done((union ctl_io *)ctsio); 5818 return (CTL_RETVAL_COMPLETE); 5819 } 5820 5821 if ((cdb->byte2 & RWB_MODE) == RWB_MODE_DESCR) { 5822 descr[0] = 0; 5823 scsi_ulto3b(CTL_WRITE_BUFFER_SIZE, &descr[1]); 5824 ctsio->kern_data_ptr = descr; 5825 len = min(len, sizeof(descr)); 5826 } else if ((cdb->byte2 & RWB_MODE) == RWB_MODE_ECHO_DESCR) { 5827 ctsio->kern_data_ptr = echo_descr; 5828 len = min(len, sizeof(echo_descr)); 5829 } else { 5830 if (lun->write_buffer == NULL) { 5831 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5832 M_CTL, M_WAITOK); 5833 } 5834 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5835 } 5836 ctsio->kern_data_len = len; 5837 ctsio->kern_total_len = len; 5838 ctsio->kern_data_resid = 0; 5839 ctsio->kern_rel_offset = 0; 5840 ctsio->kern_sg_entries = 0; 5841 ctl_set_success(ctsio); 5842 ctsio->be_move_done = ctl_config_move_done; 5843 ctl_datamove((union ctl_io *)ctsio); 5844 return (CTL_RETVAL_COMPLETE); 5845 } 5846 5847 int 5848 ctl_write_buffer(struct ctl_scsiio *ctsio) 5849 { 5850 struct scsi_write_buffer *cdb; 5851 struct ctl_lun *lun; 5852 int buffer_offset, len; 5853 5854 CTL_DEBUG_PRINT(("ctl_write_buffer\n")); 5855 5856 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5857 cdb = (struct scsi_write_buffer *)ctsio->cdb; 5858 5859 if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA) { 5860 ctl_set_invalid_field(ctsio, 5861 /*sks_valid*/ 1, 5862 /*command*/ 1, 5863 /*field*/ 1, 5864 /*bit_valid*/ 1, 5865 /*bit*/ 4); 5866 ctl_done((union ctl_io *)ctsio); 5867 return (CTL_RETVAL_COMPLETE); 5868 } 5869 5870 len = scsi_3btoul(cdb->length); 5871 buffer_offset = scsi_3btoul(cdb->offset); 5872 5873 if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5874 ctl_set_invalid_field(ctsio, 5875 /*sks_valid*/ 1, 5876 /*command*/ 1, 5877 /*field*/ 6, 5878 /*bit_valid*/ 0, 5879 /*bit*/ 0); 5880 ctl_done((union ctl_io *)ctsio); 5881 return (CTL_RETVAL_COMPLETE); 5882 } 5883 5884 /* 5885 * If we've got a kernel request that hasn't been malloced yet, 5886 * malloc it and tell the caller the data buffer is here. 5887 */ 5888 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5889 if (lun->write_buffer == NULL) { 5890 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5891 M_CTL, M_WAITOK); 5892 } 5893 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5894 ctsio->kern_data_len = len; 5895 ctsio->kern_total_len = len; 5896 ctsio->kern_data_resid = 0; 5897 ctsio->kern_rel_offset = 0; 5898 ctsio->kern_sg_entries = 0; 5899 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5900 ctsio->be_move_done = ctl_config_move_done; 5901 ctl_datamove((union ctl_io *)ctsio); 5902 5903 return (CTL_RETVAL_COMPLETE); 5904 } 5905 5906 ctl_set_success(ctsio); 5907 ctl_done((union ctl_io *)ctsio); 5908 return (CTL_RETVAL_COMPLETE); 5909 } 5910 5911 int 5912 ctl_write_same(struct ctl_scsiio *ctsio) 5913 { 5914 struct ctl_lun *lun; 5915 struct ctl_lba_len_flags *lbalen; 5916 uint64_t lba; 5917 uint32_t num_blocks; 5918 int len, retval; 5919 uint8_t byte2; 5920 5921 retval = CTL_RETVAL_COMPLETE; 5922 5923 CTL_DEBUG_PRINT(("ctl_write_same\n")); 5924 5925 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5926 5927 switch (ctsio->cdb[0]) { 5928 case WRITE_SAME_10: { 5929 struct scsi_write_same_10 *cdb; 5930 5931 cdb = (struct scsi_write_same_10 *)ctsio->cdb; 5932 5933 lba = scsi_4btoul(cdb->addr); 5934 num_blocks = scsi_2btoul(cdb->length); 5935 byte2 = cdb->byte2; 5936 break; 5937 } 5938 case WRITE_SAME_16: { 5939 struct scsi_write_same_16 *cdb; 5940 5941 cdb = (struct scsi_write_same_16 *)ctsio->cdb; 5942 5943 lba = scsi_8btou64(cdb->addr); 5944 num_blocks = scsi_4btoul(cdb->length); 5945 byte2 = cdb->byte2; 5946 break; 5947 } 5948 default: 5949 /* 5950 * We got a command we don't support. This shouldn't 5951 * happen, commands should be filtered out above us. 5952 */ 5953 ctl_set_invalid_opcode(ctsio); 5954 ctl_done((union ctl_io *)ctsio); 5955 5956 return (CTL_RETVAL_COMPLETE); 5957 break; /* NOTREACHED */ 5958 } 5959 5960 /* NDOB and ANCHOR flags can be used only together with UNMAP */ 5961 if ((byte2 & SWS_UNMAP) == 0 && 5962 (byte2 & (SWS_NDOB | SWS_ANCHOR)) != 0) { 5963 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 5964 /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); 5965 ctl_done((union ctl_io *)ctsio); 5966 return (CTL_RETVAL_COMPLETE); 5967 } 5968 5969 /* 5970 * The first check is to make sure we're in bounds, the second 5971 * check is to catch wrap-around problems. If the lba + num blocks 5972 * is less than the lba, then we've wrapped around and the block 5973 * range is invalid anyway. 5974 */ 5975 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5976 || ((lba + num_blocks) < lba)) { 5977 ctl_set_lba_out_of_range(ctsio); 5978 ctl_done((union ctl_io *)ctsio); 5979 return (CTL_RETVAL_COMPLETE); 5980 } 5981 5982 /* Zero number of blocks means "to the last logical block" */ 5983 if (num_blocks == 0) { 5984 if ((lun->be_lun->maxlba + 1) - lba > UINT32_MAX) { 5985 ctl_set_invalid_field(ctsio, 5986 /*sks_valid*/ 0, 5987 /*command*/ 1, 5988 /*field*/ 0, 5989 /*bit_valid*/ 0, 5990 /*bit*/ 0); 5991 ctl_done((union ctl_io *)ctsio); 5992 return (CTL_RETVAL_COMPLETE); 5993 } 5994 num_blocks = (lun->be_lun->maxlba + 1) - lba; 5995 } 5996 5997 len = lun->be_lun->blocksize; 5998 5999 /* 6000 * If we've got a kernel request that hasn't been malloced yet, 6001 * malloc it and tell the caller the data buffer is here. 6002 */ 6003 if ((byte2 & SWS_NDOB) == 0 && 6004 (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 6005 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);; 6006 ctsio->kern_data_len = len; 6007 ctsio->kern_total_len = len; 6008 ctsio->kern_data_resid = 0; 6009 ctsio->kern_rel_offset = 0; 6010 ctsio->kern_sg_entries = 0; 6011 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6012 ctsio->be_move_done = ctl_config_move_done; 6013 ctl_datamove((union ctl_io *)ctsio); 6014 6015 return (CTL_RETVAL_COMPLETE); 6016 } 6017 6018 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 6019 lbalen->lba = lba; 6020 lbalen->len = num_blocks; 6021 lbalen->flags = byte2; 6022 retval = lun->backend->config_write((union ctl_io *)ctsio); 6023 6024 return (retval); 6025 } 6026 6027 int 6028 ctl_unmap(struct ctl_scsiio *ctsio) 6029 { 6030 struct ctl_lun *lun; 6031 struct scsi_unmap *cdb; 6032 struct ctl_ptr_len_flags *ptrlen; 6033 struct scsi_unmap_header *hdr; 6034 struct scsi_unmap_desc *buf, *end, *endnz, *range; 6035 uint64_t lba; 6036 uint32_t num_blocks; 6037 int len, retval; 6038 uint8_t byte2; 6039 6040 retval = CTL_RETVAL_COMPLETE; 6041 6042 CTL_DEBUG_PRINT(("ctl_unmap\n")); 6043 6044 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6045 cdb = (struct scsi_unmap *)ctsio->cdb; 6046 6047 len = scsi_2btoul(cdb->length); 6048 byte2 = cdb->byte2; 6049 6050 /* 6051 * If we've got a kernel request that hasn't been malloced yet, 6052 * malloc it and tell the caller the data buffer is here. 6053 */ 6054 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 6055 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);; 6056 ctsio->kern_data_len = len; 6057 ctsio->kern_total_len = len; 6058 ctsio->kern_data_resid = 0; 6059 ctsio->kern_rel_offset = 0; 6060 ctsio->kern_sg_entries = 0; 6061 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6062 ctsio->be_move_done = ctl_config_move_done; 6063 ctl_datamove((union ctl_io *)ctsio); 6064 6065 return (CTL_RETVAL_COMPLETE); 6066 } 6067 6068 len = ctsio->kern_total_len - ctsio->kern_data_resid; 6069 hdr = (struct scsi_unmap_header *)ctsio->kern_data_ptr; 6070 if (len < sizeof (*hdr) || 6071 len < (scsi_2btoul(hdr->length) + sizeof(hdr->length)) || 6072 len < (scsi_2btoul(hdr->desc_length) + sizeof (*hdr)) || 6073 scsi_2btoul(hdr->desc_length) % sizeof(*buf) != 0) { 6074 ctl_set_invalid_field(ctsio, 6075 /*sks_valid*/ 0, 6076 /*command*/ 0, 6077 /*field*/ 0, 6078 /*bit_valid*/ 0, 6079 /*bit*/ 0); 6080 goto done; 6081 } 6082 len = scsi_2btoul(hdr->desc_length); 6083 buf = (struct scsi_unmap_desc *)(hdr + 1); 6084 end = buf + len / sizeof(*buf); 6085 6086 endnz = buf; 6087 for (range = buf; range < end; range++) { 6088 lba = scsi_8btou64(range->lba); 6089 num_blocks = scsi_4btoul(range->length); 6090 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 6091 || ((lba + num_blocks) < lba)) { 6092 ctl_set_lba_out_of_range(ctsio); 6093 ctl_done((union ctl_io *)ctsio); 6094 return (CTL_RETVAL_COMPLETE); 6095 } 6096 if (num_blocks != 0) 6097 endnz = range + 1; 6098 } 6099 6100 /* 6101 * Block backend can not handle zero last range. 6102 * Filter it out and return if there is nothing left. 6103 */ 6104 len = (uint8_t *)endnz - (uint8_t *)buf; 6105 if (len == 0) { 6106 ctl_set_success(ctsio); 6107 goto done; 6108 } 6109 6110 mtx_lock(&lun->lun_lock); 6111 ptrlen = (struct ctl_ptr_len_flags *) 6112 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 6113 ptrlen->ptr = (void *)buf; 6114 ptrlen->len = len; 6115 ptrlen->flags = byte2; 6116 ctl_check_blocked(lun); 6117 mtx_unlock(&lun->lun_lock); 6118 6119 retval = lun->backend->config_write((union ctl_io *)ctsio); 6120 return (retval); 6121 6122 done: 6123 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 6124 free(ctsio->kern_data_ptr, M_CTL); 6125 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 6126 } 6127 ctl_done((union ctl_io *)ctsio); 6128 return (CTL_RETVAL_COMPLETE); 6129 } 6130 6131 /* 6132 * Note that this function currently doesn't actually do anything inside 6133 * CTL to enforce things if the DQue bit is turned on. 6134 * 6135 * Also note that this function can't be used in the default case, because 6136 * the DQue bit isn't set in the changeable mask for the control mode page 6137 * anyway. This is just here as an example for how to implement a page 6138 * handler, and a placeholder in case we want to allow the user to turn 6139 * tagged queueing on and off. 6140 * 6141 * The D_SENSE bit handling is functional, however, and will turn 6142 * descriptor sense on and off for a given LUN. 6143 */ 6144 int 6145 ctl_control_page_handler(struct ctl_scsiio *ctsio, 6146 struct ctl_page_index *page_index, uint8_t *page_ptr) 6147 { 6148 struct scsi_control_page *current_cp, *saved_cp, *user_cp; 6149 struct ctl_lun *lun; 6150 int set_ua; 6151 uint32_t initidx; 6152 6153 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6154 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 6155 set_ua = 0; 6156 6157 user_cp = (struct scsi_control_page *)page_ptr; 6158 current_cp = (struct scsi_control_page *) 6159 (page_index->page_data + (page_index->page_len * 6160 CTL_PAGE_CURRENT)); 6161 saved_cp = (struct scsi_control_page *) 6162 (page_index->page_data + (page_index->page_len * 6163 CTL_PAGE_SAVED)); 6164 6165 mtx_lock(&lun->lun_lock); 6166 if (((current_cp->rlec & SCP_DSENSE) == 0) 6167 && ((user_cp->rlec & SCP_DSENSE) != 0)) { 6168 /* 6169 * Descriptor sense is currently turned off and the user 6170 * wants to turn it on. 6171 */ 6172 current_cp->rlec |= SCP_DSENSE; 6173 saved_cp->rlec |= SCP_DSENSE; 6174 lun->flags |= CTL_LUN_SENSE_DESC; 6175 set_ua = 1; 6176 } else if (((current_cp->rlec & SCP_DSENSE) != 0) 6177 && ((user_cp->rlec & SCP_DSENSE) == 0)) { 6178 /* 6179 * Descriptor sense is currently turned on, and the user 6180 * wants to turn it off. 6181 */ 6182 current_cp->rlec &= ~SCP_DSENSE; 6183 saved_cp->rlec &= ~SCP_DSENSE; 6184 lun->flags &= ~CTL_LUN_SENSE_DESC; 6185 set_ua = 1; 6186 } 6187 if ((current_cp->queue_flags & SCP_QUEUE_ALG_MASK) != 6188 (user_cp->queue_flags & SCP_QUEUE_ALG_MASK)) { 6189 current_cp->queue_flags &= ~SCP_QUEUE_ALG_MASK; 6190 current_cp->queue_flags |= user_cp->queue_flags & SCP_QUEUE_ALG_MASK; 6191 saved_cp->queue_flags &= ~SCP_QUEUE_ALG_MASK; 6192 saved_cp->queue_flags |= user_cp->queue_flags & SCP_QUEUE_ALG_MASK; 6193 set_ua = 1; 6194 } 6195 if ((current_cp->eca_and_aen & SCP_SWP) != 6196 (user_cp->eca_and_aen & SCP_SWP)) { 6197 current_cp->eca_and_aen &= ~SCP_SWP; 6198 current_cp->eca_and_aen |= user_cp->eca_and_aen & SCP_SWP; 6199 saved_cp->eca_and_aen &= ~SCP_SWP; 6200 saved_cp->eca_and_aen |= user_cp->eca_and_aen & SCP_SWP; 6201 set_ua = 1; 6202 } 6203 if (set_ua != 0) 6204 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 6205 mtx_unlock(&lun->lun_lock); 6206 6207 return (0); 6208 } 6209 6210 int 6211 ctl_caching_sp_handler(struct ctl_scsiio *ctsio, 6212 struct ctl_page_index *page_index, uint8_t *page_ptr) 6213 { 6214 struct scsi_caching_page *current_cp, *saved_cp, *user_cp; 6215 struct ctl_lun *lun; 6216 int set_ua; 6217 uint32_t initidx; 6218 6219 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6220 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 6221 set_ua = 0; 6222 6223 user_cp = (struct scsi_caching_page *)page_ptr; 6224 current_cp = (struct scsi_caching_page *) 6225 (page_index->page_data + (page_index->page_len * 6226 CTL_PAGE_CURRENT)); 6227 saved_cp = (struct scsi_caching_page *) 6228 (page_index->page_data + (page_index->page_len * 6229 CTL_PAGE_SAVED)); 6230 6231 mtx_lock(&lun->lun_lock); 6232 if ((current_cp->flags1 & (SCP_WCE | SCP_RCD)) != 6233 (user_cp->flags1 & (SCP_WCE | SCP_RCD))) { 6234 current_cp->flags1 &= ~(SCP_WCE | SCP_RCD); 6235 current_cp->flags1 |= user_cp->flags1 & (SCP_WCE | SCP_RCD); 6236 saved_cp->flags1 &= ~(SCP_WCE | SCP_RCD); 6237 saved_cp->flags1 |= user_cp->flags1 & (SCP_WCE | SCP_RCD); 6238 set_ua = 1; 6239 } 6240 if (set_ua != 0) 6241 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 6242 mtx_unlock(&lun->lun_lock); 6243 6244 return (0); 6245 } 6246 6247 int 6248 ctl_debugconf_sp_select_handler(struct ctl_scsiio *ctsio, 6249 struct ctl_page_index *page_index, 6250 uint8_t *page_ptr) 6251 { 6252 uint8_t *c; 6253 int i; 6254 6255 c = ((struct copan_debugconf_subpage *)page_ptr)->ctl_time_io_secs; 6256 ctl_time_io_secs = 6257 (c[0] << 8) | 6258 (c[1] << 0) | 6259 0; 6260 CTL_DEBUG_PRINT(("set ctl_time_io_secs to %d\n", ctl_time_io_secs)); 6261 printf("set ctl_time_io_secs to %d\n", ctl_time_io_secs); 6262 printf("page data:"); 6263 for (i=0; i<8; i++) 6264 printf(" %.2x",page_ptr[i]); 6265 printf("\n"); 6266 return (0); 6267 } 6268 6269 int 6270 ctl_debugconf_sp_sense_handler(struct ctl_scsiio *ctsio, 6271 struct ctl_page_index *page_index, 6272 int pc) 6273 { 6274 struct copan_debugconf_subpage *page; 6275 6276 page = (struct copan_debugconf_subpage *)page_index->page_data + 6277 (page_index->page_len * pc); 6278 6279 switch (pc) { 6280 case SMS_PAGE_CTRL_CHANGEABLE >> 6: 6281 case SMS_PAGE_CTRL_DEFAULT >> 6: 6282 case SMS_PAGE_CTRL_SAVED >> 6: 6283 /* 6284 * We don't update the changable or default bits for this page. 6285 */ 6286 break; 6287 case SMS_PAGE_CTRL_CURRENT >> 6: 6288 page->ctl_time_io_secs[0] = ctl_time_io_secs >> 8; 6289 page->ctl_time_io_secs[1] = ctl_time_io_secs >> 0; 6290 break; 6291 default: 6292 #ifdef NEEDTOPORT 6293 EPRINT(0, "Invalid PC %d!!", pc); 6294 #endif /* NEEDTOPORT */ 6295 break; 6296 } 6297 return (0); 6298 } 6299 6300 6301 static int 6302 ctl_do_mode_select(union ctl_io *io) 6303 { 6304 struct scsi_mode_page_header *page_header; 6305 struct ctl_page_index *page_index; 6306 struct ctl_scsiio *ctsio; 6307 int control_dev, page_len; 6308 int page_len_offset, page_len_size; 6309 union ctl_modepage_info *modepage_info; 6310 struct ctl_lun *lun; 6311 int *len_left, *len_used; 6312 int retval, i; 6313 6314 ctsio = &io->scsiio; 6315 page_index = NULL; 6316 page_len = 0; 6317 retval = CTL_RETVAL_COMPLETE; 6318 6319 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6320 6321 if (lun->be_lun->lun_type != T_DIRECT) 6322 control_dev = 1; 6323 else 6324 control_dev = 0; 6325 6326 modepage_info = (union ctl_modepage_info *) 6327 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6328 len_left = &modepage_info->header.len_left; 6329 len_used = &modepage_info->header.len_used; 6330 6331 do_next_page: 6332 6333 page_header = (struct scsi_mode_page_header *) 6334 (ctsio->kern_data_ptr + *len_used); 6335 6336 if (*len_left == 0) { 6337 free(ctsio->kern_data_ptr, M_CTL); 6338 ctl_set_success(ctsio); 6339 ctl_done((union ctl_io *)ctsio); 6340 return (CTL_RETVAL_COMPLETE); 6341 } else if (*len_left < sizeof(struct scsi_mode_page_header)) { 6342 6343 free(ctsio->kern_data_ptr, M_CTL); 6344 ctl_set_param_len_error(ctsio); 6345 ctl_done((union ctl_io *)ctsio); 6346 return (CTL_RETVAL_COMPLETE); 6347 6348 } else if ((page_header->page_code & SMPH_SPF) 6349 && (*len_left < sizeof(struct scsi_mode_page_header_sp))) { 6350 6351 free(ctsio->kern_data_ptr, M_CTL); 6352 ctl_set_param_len_error(ctsio); 6353 ctl_done((union ctl_io *)ctsio); 6354 return (CTL_RETVAL_COMPLETE); 6355 } 6356 6357 6358 /* 6359 * XXX KDM should we do something with the block descriptor? 6360 */ 6361 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6362 6363 if ((control_dev != 0) 6364 && (lun->mode_pages.index[i].page_flags & 6365 CTL_PAGE_FLAG_DISK_ONLY)) 6366 continue; 6367 6368 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) != 6369 (page_header->page_code & SMPH_PC_MASK)) 6370 continue; 6371 6372 /* 6373 * If neither page has a subpage code, then we've got a 6374 * match. 6375 */ 6376 if (((lun->mode_pages.index[i].page_code & SMPH_SPF) == 0) 6377 && ((page_header->page_code & SMPH_SPF) == 0)) { 6378 page_index = &lun->mode_pages.index[i]; 6379 page_len = page_header->page_length; 6380 break; 6381 } 6382 6383 /* 6384 * If both pages have subpages, then the subpage numbers 6385 * have to match. 6386 */ 6387 if ((lun->mode_pages.index[i].page_code & SMPH_SPF) 6388 && (page_header->page_code & SMPH_SPF)) { 6389 struct scsi_mode_page_header_sp *sph; 6390 6391 sph = (struct scsi_mode_page_header_sp *)page_header; 6392 6393 if (lun->mode_pages.index[i].subpage == 6394 sph->subpage) { 6395 page_index = &lun->mode_pages.index[i]; 6396 page_len = scsi_2btoul(sph->page_length); 6397 break; 6398 } 6399 } 6400 } 6401 6402 /* 6403 * If we couldn't find the page, or if we don't have a mode select 6404 * handler for it, send back an error to the user. 6405 */ 6406 if ((page_index == NULL) 6407 || (page_index->select_handler == NULL)) { 6408 ctl_set_invalid_field(ctsio, 6409 /*sks_valid*/ 1, 6410 /*command*/ 0, 6411 /*field*/ *len_used, 6412 /*bit_valid*/ 0, 6413 /*bit*/ 0); 6414 free(ctsio->kern_data_ptr, M_CTL); 6415 ctl_done((union ctl_io *)ctsio); 6416 return (CTL_RETVAL_COMPLETE); 6417 } 6418 6419 if (page_index->page_code & SMPH_SPF) { 6420 page_len_offset = 2; 6421 page_len_size = 2; 6422 } else { 6423 page_len_size = 1; 6424 page_len_offset = 1; 6425 } 6426 6427 /* 6428 * If the length the initiator gives us isn't the one we specify in 6429 * the mode page header, or if they didn't specify enough data in 6430 * the CDB to avoid truncating this page, kick out the request. 6431 */ 6432 if ((page_len != (page_index->page_len - page_len_offset - 6433 page_len_size)) 6434 || (*len_left < page_index->page_len)) { 6435 6436 6437 ctl_set_invalid_field(ctsio, 6438 /*sks_valid*/ 1, 6439 /*command*/ 0, 6440 /*field*/ *len_used + page_len_offset, 6441 /*bit_valid*/ 0, 6442 /*bit*/ 0); 6443 free(ctsio->kern_data_ptr, M_CTL); 6444 ctl_done((union ctl_io *)ctsio); 6445 return (CTL_RETVAL_COMPLETE); 6446 } 6447 6448 /* 6449 * Run through the mode page, checking to make sure that the bits 6450 * the user changed are actually legal for him to change. 6451 */ 6452 for (i = 0; i < page_index->page_len; i++) { 6453 uint8_t *user_byte, *change_mask, *current_byte; 6454 int bad_bit; 6455 int j; 6456 6457 user_byte = (uint8_t *)page_header + i; 6458 change_mask = page_index->page_data + 6459 (page_index->page_len * CTL_PAGE_CHANGEABLE) + i; 6460 current_byte = page_index->page_data + 6461 (page_index->page_len * CTL_PAGE_CURRENT) + i; 6462 6463 /* 6464 * Check to see whether the user set any bits in this byte 6465 * that he is not allowed to set. 6466 */ 6467 if ((*user_byte & ~(*change_mask)) == 6468 (*current_byte & ~(*change_mask))) 6469 continue; 6470 6471 /* 6472 * Go through bit by bit to determine which one is illegal. 6473 */ 6474 bad_bit = 0; 6475 for (j = 7; j >= 0; j--) { 6476 if ((((1 << i) & ~(*change_mask)) & *user_byte) != 6477 (((1 << i) & ~(*change_mask)) & *current_byte)) { 6478 bad_bit = i; 6479 break; 6480 } 6481 } 6482 ctl_set_invalid_field(ctsio, 6483 /*sks_valid*/ 1, 6484 /*command*/ 0, 6485 /*field*/ *len_used + i, 6486 /*bit_valid*/ 1, 6487 /*bit*/ bad_bit); 6488 free(ctsio->kern_data_ptr, M_CTL); 6489 ctl_done((union ctl_io *)ctsio); 6490 return (CTL_RETVAL_COMPLETE); 6491 } 6492 6493 /* 6494 * Decrement these before we call the page handler, since we may 6495 * end up getting called back one way or another before the handler 6496 * returns to this context. 6497 */ 6498 *len_left -= page_index->page_len; 6499 *len_used += page_index->page_len; 6500 6501 retval = page_index->select_handler(ctsio, page_index, 6502 (uint8_t *)page_header); 6503 6504 /* 6505 * If the page handler returns CTL_RETVAL_QUEUED, then we need to 6506 * wait until this queued command completes to finish processing 6507 * the mode page. If it returns anything other than 6508 * CTL_RETVAL_COMPLETE (e.g. CTL_RETVAL_ERROR), then it should have 6509 * already set the sense information, freed the data pointer, and 6510 * completed the io for us. 6511 */ 6512 if (retval != CTL_RETVAL_COMPLETE) 6513 goto bailout_no_done; 6514 6515 /* 6516 * If the initiator sent us more than one page, parse the next one. 6517 */ 6518 if (*len_left > 0) 6519 goto do_next_page; 6520 6521 ctl_set_success(ctsio); 6522 free(ctsio->kern_data_ptr, M_CTL); 6523 ctl_done((union ctl_io *)ctsio); 6524 6525 bailout_no_done: 6526 6527 return (CTL_RETVAL_COMPLETE); 6528 6529 } 6530 6531 int 6532 ctl_mode_select(struct ctl_scsiio *ctsio) 6533 { 6534 int param_len, pf, sp; 6535 int header_size, bd_len; 6536 int len_left, len_used; 6537 struct ctl_page_index *page_index; 6538 struct ctl_lun *lun; 6539 int control_dev, page_len; 6540 union ctl_modepage_info *modepage_info; 6541 int retval; 6542 6543 pf = 0; 6544 sp = 0; 6545 page_len = 0; 6546 len_used = 0; 6547 len_left = 0; 6548 retval = 0; 6549 bd_len = 0; 6550 page_index = NULL; 6551 6552 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6553 6554 if (lun->be_lun->lun_type != T_DIRECT) 6555 control_dev = 1; 6556 else 6557 control_dev = 0; 6558 6559 switch (ctsio->cdb[0]) { 6560 case MODE_SELECT_6: { 6561 struct scsi_mode_select_6 *cdb; 6562 6563 cdb = (struct scsi_mode_select_6 *)ctsio->cdb; 6564 6565 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 6566 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 6567 6568 param_len = cdb->length; 6569 header_size = sizeof(struct scsi_mode_header_6); 6570 break; 6571 } 6572 case MODE_SELECT_10: { 6573 struct scsi_mode_select_10 *cdb; 6574 6575 cdb = (struct scsi_mode_select_10 *)ctsio->cdb; 6576 6577 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 6578 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 6579 6580 param_len = scsi_2btoul(cdb->length); 6581 header_size = sizeof(struct scsi_mode_header_10); 6582 break; 6583 } 6584 default: 6585 ctl_set_invalid_opcode(ctsio); 6586 ctl_done((union ctl_io *)ctsio); 6587 return (CTL_RETVAL_COMPLETE); 6588 break; /* NOTREACHED */ 6589 } 6590 6591 /* 6592 * From SPC-3: 6593 * "A parameter list length of zero indicates that the Data-Out Buffer 6594 * shall be empty. This condition shall not be considered as an error." 6595 */ 6596 if (param_len == 0) { 6597 ctl_set_success(ctsio); 6598 ctl_done((union ctl_io *)ctsio); 6599 return (CTL_RETVAL_COMPLETE); 6600 } 6601 6602 /* 6603 * Since we'll hit this the first time through, prior to 6604 * allocation, we don't need to free a data buffer here. 6605 */ 6606 if (param_len < header_size) { 6607 ctl_set_param_len_error(ctsio); 6608 ctl_done((union ctl_io *)ctsio); 6609 return (CTL_RETVAL_COMPLETE); 6610 } 6611 6612 /* 6613 * Allocate the data buffer and grab the user's data. In theory, 6614 * we shouldn't have to sanity check the parameter list length here 6615 * because the maximum size is 64K. We should be able to malloc 6616 * that much without too many problems. 6617 */ 6618 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 6619 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 6620 ctsio->kern_data_len = param_len; 6621 ctsio->kern_total_len = param_len; 6622 ctsio->kern_data_resid = 0; 6623 ctsio->kern_rel_offset = 0; 6624 ctsio->kern_sg_entries = 0; 6625 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6626 ctsio->be_move_done = ctl_config_move_done; 6627 ctl_datamove((union ctl_io *)ctsio); 6628 6629 return (CTL_RETVAL_COMPLETE); 6630 } 6631 6632 switch (ctsio->cdb[0]) { 6633 case MODE_SELECT_6: { 6634 struct scsi_mode_header_6 *mh6; 6635 6636 mh6 = (struct scsi_mode_header_6 *)ctsio->kern_data_ptr; 6637 bd_len = mh6->blk_desc_len; 6638 break; 6639 } 6640 case MODE_SELECT_10: { 6641 struct scsi_mode_header_10 *mh10; 6642 6643 mh10 = (struct scsi_mode_header_10 *)ctsio->kern_data_ptr; 6644 bd_len = scsi_2btoul(mh10->blk_desc_len); 6645 break; 6646 } 6647 default: 6648 panic("Invalid CDB type %#x", ctsio->cdb[0]); 6649 break; 6650 } 6651 6652 if (param_len < (header_size + bd_len)) { 6653 free(ctsio->kern_data_ptr, M_CTL); 6654 ctl_set_param_len_error(ctsio); 6655 ctl_done((union ctl_io *)ctsio); 6656 return (CTL_RETVAL_COMPLETE); 6657 } 6658 6659 /* 6660 * Set the IO_CONT flag, so that if this I/O gets passed to 6661 * ctl_config_write_done(), it'll get passed back to 6662 * ctl_do_mode_select() for further processing, or completion if 6663 * we're all done. 6664 */ 6665 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 6666 ctsio->io_cont = ctl_do_mode_select; 6667 6668 modepage_info = (union ctl_modepage_info *) 6669 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6670 6671 memset(modepage_info, 0, sizeof(*modepage_info)); 6672 6673 len_left = param_len - header_size - bd_len; 6674 len_used = header_size + bd_len; 6675 6676 modepage_info->header.len_left = len_left; 6677 modepage_info->header.len_used = len_used; 6678 6679 return (ctl_do_mode_select((union ctl_io *)ctsio)); 6680 } 6681 6682 int 6683 ctl_mode_sense(struct ctl_scsiio *ctsio) 6684 { 6685 struct ctl_lun *lun; 6686 int pc, page_code, dbd, llba, subpage; 6687 int alloc_len, page_len, header_len, total_len; 6688 struct scsi_mode_block_descr *block_desc; 6689 struct ctl_page_index *page_index; 6690 int control_dev; 6691 6692 dbd = 0; 6693 llba = 0; 6694 block_desc = NULL; 6695 page_index = NULL; 6696 6697 CTL_DEBUG_PRINT(("ctl_mode_sense\n")); 6698 6699 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6700 6701 if (lun->be_lun->lun_type != T_DIRECT) 6702 control_dev = 1; 6703 else 6704 control_dev = 0; 6705 6706 switch (ctsio->cdb[0]) { 6707 case MODE_SENSE_6: { 6708 struct scsi_mode_sense_6 *cdb; 6709 6710 cdb = (struct scsi_mode_sense_6 *)ctsio->cdb; 6711 6712 header_len = sizeof(struct scsi_mode_hdr_6); 6713 if (cdb->byte2 & SMS_DBD) 6714 dbd = 1; 6715 else 6716 header_len += sizeof(struct scsi_mode_block_descr); 6717 6718 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6719 page_code = cdb->page & SMS_PAGE_CODE; 6720 subpage = cdb->subpage; 6721 alloc_len = cdb->length; 6722 break; 6723 } 6724 case MODE_SENSE_10: { 6725 struct scsi_mode_sense_10 *cdb; 6726 6727 cdb = (struct scsi_mode_sense_10 *)ctsio->cdb; 6728 6729 header_len = sizeof(struct scsi_mode_hdr_10); 6730 6731 if (cdb->byte2 & SMS_DBD) 6732 dbd = 1; 6733 else 6734 header_len += sizeof(struct scsi_mode_block_descr); 6735 if (cdb->byte2 & SMS10_LLBAA) 6736 llba = 1; 6737 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6738 page_code = cdb->page & SMS_PAGE_CODE; 6739 subpage = cdb->subpage; 6740 alloc_len = scsi_2btoul(cdb->length); 6741 break; 6742 } 6743 default: 6744 ctl_set_invalid_opcode(ctsio); 6745 ctl_done((union ctl_io *)ctsio); 6746 return (CTL_RETVAL_COMPLETE); 6747 break; /* NOTREACHED */ 6748 } 6749 6750 /* 6751 * We have to make a first pass through to calculate the size of 6752 * the pages that match the user's query. Then we allocate enough 6753 * memory to hold it, and actually copy the data into the buffer. 6754 */ 6755 switch (page_code) { 6756 case SMS_ALL_PAGES_PAGE: { 6757 int i; 6758 6759 page_len = 0; 6760 6761 /* 6762 * At the moment, values other than 0 and 0xff here are 6763 * reserved according to SPC-3. 6764 */ 6765 if ((subpage != SMS_SUBPAGE_PAGE_0) 6766 && (subpage != SMS_SUBPAGE_ALL)) { 6767 ctl_set_invalid_field(ctsio, 6768 /*sks_valid*/ 1, 6769 /*command*/ 1, 6770 /*field*/ 3, 6771 /*bit_valid*/ 0, 6772 /*bit*/ 0); 6773 ctl_done((union ctl_io *)ctsio); 6774 return (CTL_RETVAL_COMPLETE); 6775 } 6776 6777 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6778 if ((control_dev != 0) 6779 && (lun->mode_pages.index[i].page_flags & 6780 CTL_PAGE_FLAG_DISK_ONLY)) 6781 continue; 6782 6783 /* 6784 * We don't use this subpage if the user didn't 6785 * request all subpages. 6786 */ 6787 if ((lun->mode_pages.index[i].subpage != 0) 6788 && (subpage == SMS_SUBPAGE_PAGE_0)) 6789 continue; 6790 6791 #if 0 6792 printf("found page %#x len %d\n", 6793 lun->mode_pages.index[i].page_code & 6794 SMPH_PC_MASK, 6795 lun->mode_pages.index[i].page_len); 6796 #endif 6797 page_len += lun->mode_pages.index[i].page_len; 6798 } 6799 break; 6800 } 6801 default: { 6802 int i; 6803 6804 page_len = 0; 6805 6806 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6807 /* Look for the right page code */ 6808 if ((lun->mode_pages.index[i].page_code & 6809 SMPH_PC_MASK) != page_code) 6810 continue; 6811 6812 /* Look for the right subpage or the subpage wildcard*/ 6813 if ((lun->mode_pages.index[i].subpage != subpage) 6814 && (subpage != SMS_SUBPAGE_ALL)) 6815 continue; 6816 6817 /* Make sure the page is supported for this dev type */ 6818 if ((control_dev != 0) 6819 && (lun->mode_pages.index[i].page_flags & 6820 CTL_PAGE_FLAG_DISK_ONLY)) 6821 continue; 6822 6823 #if 0 6824 printf("found page %#x len %d\n", 6825 lun->mode_pages.index[i].page_code & 6826 SMPH_PC_MASK, 6827 lun->mode_pages.index[i].page_len); 6828 #endif 6829 6830 page_len += lun->mode_pages.index[i].page_len; 6831 } 6832 6833 if (page_len == 0) { 6834 ctl_set_invalid_field(ctsio, 6835 /*sks_valid*/ 1, 6836 /*command*/ 1, 6837 /*field*/ 2, 6838 /*bit_valid*/ 1, 6839 /*bit*/ 5); 6840 ctl_done((union ctl_io *)ctsio); 6841 return (CTL_RETVAL_COMPLETE); 6842 } 6843 break; 6844 } 6845 } 6846 6847 total_len = header_len + page_len; 6848 #if 0 6849 printf("header_len = %d, page_len = %d, total_len = %d\n", 6850 header_len, page_len, total_len); 6851 #endif 6852 6853 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6854 ctsio->kern_sg_entries = 0; 6855 ctsio->kern_data_resid = 0; 6856 ctsio->kern_rel_offset = 0; 6857 if (total_len < alloc_len) { 6858 ctsio->residual = alloc_len - total_len; 6859 ctsio->kern_data_len = total_len; 6860 ctsio->kern_total_len = total_len; 6861 } else { 6862 ctsio->residual = 0; 6863 ctsio->kern_data_len = alloc_len; 6864 ctsio->kern_total_len = alloc_len; 6865 } 6866 6867 switch (ctsio->cdb[0]) { 6868 case MODE_SENSE_6: { 6869 struct scsi_mode_hdr_6 *header; 6870 6871 header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr; 6872 6873 header->datalen = MIN(total_len - 1, 254); 6874 if (control_dev == 0) { 6875 header->dev_specific = 0x10; /* DPOFUA */ 6876 if ((lun->flags & CTL_LUN_READONLY) || 6877 (lun->mode_pages.control_page[CTL_PAGE_CURRENT] 6878 .eca_and_aen & SCP_SWP) != 0) 6879 header->dev_specific |= 0x80; /* WP */ 6880 } 6881 if (dbd) 6882 header->block_descr_len = 0; 6883 else 6884 header->block_descr_len = 6885 sizeof(struct scsi_mode_block_descr); 6886 block_desc = (struct scsi_mode_block_descr *)&header[1]; 6887 break; 6888 } 6889 case MODE_SENSE_10: { 6890 struct scsi_mode_hdr_10 *header; 6891 int datalen; 6892 6893 header = (struct scsi_mode_hdr_10 *)ctsio->kern_data_ptr; 6894 6895 datalen = MIN(total_len - 2, 65533); 6896 scsi_ulto2b(datalen, header->datalen); 6897 if (control_dev == 0) { 6898 header->dev_specific = 0x10; /* DPOFUA */ 6899 if ((lun->flags & CTL_LUN_READONLY) || 6900 (lun->mode_pages.control_page[CTL_PAGE_CURRENT] 6901 .eca_and_aen & SCP_SWP) != 0) 6902 header->dev_specific |= 0x80; /* WP */ 6903 } 6904 if (dbd) 6905 scsi_ulto2b(0, header->block_descr_len); 6906 else 6907 scsi_ulto2b(sizeof(struct scsi_mode_block_descr), 6908 header->block_descr_len); 6909 block_desc = (struct scsi_mode_block_descr *)&header[1]; 6910 break; 6911 } 6912 default: 6913 panic("invalid CDB type %#x", ctsio->cdb[0]); 6914 break; /* NOTREACHED */ 6915 } 6916 6917 /* 6918 * If we've got a disk, use its blocksize in the block 6919 * descriptor. Otherwise, just set it to 0. 6920 */ 6921 if (dbd == 0) { 6922 if (control_dev == 0) 6923 scsi_ulto3b(lun->be_lun->blocksize, 6924 block_desc->block_len); 6925 else 6926 scsi_ulto3b(0, block_desc->block_len); 6927 } 6928 6929 switch (page_code) { 6930 case SMS_ALL_PAGES_PAGE: { 6931 int i, data_used; 6932 6933 data_used = header_len; 6934 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6935 struct ctl_page_index *page_index; 6936 6937 page_index = &lun->mode_pages.index[i]; 6938 6939 if ((control_dev != 0) 6940 && (page_index->page_flags & 6941 CTL_PAGE_FLAG_DISK_ONLY)) 6942 continue; 6943 6944 /* 6945 * We don't use this subpage if the user didn't 6946 * request all subpages. We already checked (above) 6947 * to make sure the user only specified a subpage 6948 * of 0 or 0xff in the SMS_ALL_PAGES_PAGE case. 6949 */ 6950 if ((page_index->subpage != 0) 6951 && (subpage == SMS_SUBPAGE_PAGE_0)) 6952 continue; 6953 6954 /* 6955 * Call the handler, if it exists, to update the 6956 * page to the latest values. 6957 */ 6958 if (page_index->sense_handler != NULL) 6959 page_index->sense_handler(ctsio, page_index,pc); 6960 6961 memcpy(ctsio->kern_data_ptr + data_used, 6962 page_index->page_data + 6963 (page_index->page_len * pc), 6964 page_index->page_len); 6965 data_used += page_index->page_len; 6966 } 6967 break; 6968 } 6969 default: { 6970 int i, data_used; 6971 6972 data_used = header_len; 6973 6974 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6975 struct ctl_page_index *page_index; 6976 6977 page_index = &lun->mode_pages.index[i]; 6978 6979 /* Look for the right page code */ 6980 if ((page_index->page_code & SMPH_PC_MASK) != page_code) 6981 continue; 6982 6983 /* Look for the right subpage or the subpage wildcard*/ 6984 if ((page_index->subpage != subpage) 6985 && (subpage != SMS_SUBPAGE_ALL)) 6986 continue; 6987 6988 /* Make sure the page is supported for this dev type */ 6989 if ((control_dev != 0) 6990 && (page_index->page_flags & 6991 CTL_PAGE_FLAG_DISK_ONLY)) 6992 continue; 6993 6994 /* 6995 * Call the handler, if it exists, to update the 6996 * page to the latest values. 6997 */ 6998 if (page_index->sense_handler != NULL) 6999 page_index->sense_handler(ctsio, page_index,pc); 7000 7001 memcpy(ctsio->kern_data_ptr + data_used, 7002 page_index->page_data + 7003 (page_index->page_len * pc), 7004 page_index->page_len); 7005 data_used += page_index->page_len; 7006 } 7007 break; 7008 } 7009 } 7010 7011 ctl_set_success(ctsio); 7012 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7013 ctsio->be_move_done = ctl_config_move_done; 7014 ctl_datamove((union ctl_io *)ctsio); 7015 return (CTL_RETVAL_COMPLETE); 7016 } 7017 7018 int 7019 ctl_lbp_log_sense_handler(struct ctl_scsiio *ctsio, 7020 struct ctl_page_index *page_index, 7021 int pc) 7022 { 7023 struct ctl_lun *lun; 7024 struct scsi_log_param_header *phdr; 7025 uint8_t *data; 7026 uint64_t val; 7027 7028 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7029 data = page_index->page_data; 7030 7031 if (lun->backend->lun_attr != NULL && 7032 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksavail")) 7033 != UINT64_MAX) { 7034 phdr = (struct scsi_log_param_header *)data; 7035 scsi_ulto2b(0x0001, phdr->param_code); 7036 phdr->param_control = SLP_LBIN | SLP_LP; 7037 phdr->param_len = 8; 7038 data = (uint8_t *)(phdr + 1); 7039 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 7040 data[4] = 0x02; /* per-pool */ 7041 data += phdr->param_len; 7042 } 7043 7044 if (lun->backend->lun_attr != NULL && 7045 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksused")) 7046 != UINT64_MAX) { 7047 phdr = (struct scsi_log_param_header *)data; 7048 scsi_ulto2b(0x0002, phdr->param_code); 7049 phdr->param_control = SLP_LBIN | SLP_LP; 7050 phdr->param_len = 8; 7051 data = (uint8_t *)(phdr + 1); 7052 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 7053 data[4] = 0x01; /* per-LUN */ 7054 data += phdr->param_len; 7055 } 7056 7057 if (lun->backend->lun_attr != NULL && 7058 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksavail")) 7059 != UINT64_MAX) { 7060 phdr = (struct scsi_log_param_header *)data; 7061 scsi_ulto2b(0x00f1, phdr->param_code); 7062 phdr->param_control = SLP_LBIN | SLP_LP; 7063 phdr->param_len = 8; 7064 data = (uint8_t *)(phdr + 1); 7065 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 7066 data[4] = 0x02; /* per-pool */ 7067 data += phdr->param_len; 7068 } 7069 7070 if (lun->backend->lun_attr != NULL && 7071 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksused")) 7072 != UINT64_MAX) { 7073 phdr = (struct scsi_log_param_header *)data; 7074 scsi_ulto2b(0x00f2, phdr->param_code); 7075 phdr->param_control = SLP_LBIN | SLP_LP; 7076 phdr->param_len = 8; 7077 data = (uint8_t *)(phdr + 1); 7078 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 7079 data[4] = 0x02; /* per-pool */ 7080 data += phdr->param_len; 7081 } 7082 7083 page_index->page_len = data - page_index->page_data; 7084 return (0); 7085 } 7086 7087 int 7088 ctl_log_sense(struct ctl_scsiio *ctsio) 7089 { 7090 struct ctl_lun *lun; 7091 int i, pc, page_code, subpage; 7092 int alloc_len, total_len; 7093 struct ctl_page_index *page_index; 7094 struct scsi_log_sense *cdb; 7095 struct scsi_log_header *header; 7096 7097 CTL_DEBUG_PRINT(("ctl_log_sense\n")); 7098 7099 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7100 cdb = (struct scsi_log_sense *)ctsio->cdb; 7101 pc = (cdb->page & SLS_PAGE_CTRL_MASK) >> 6; 7102 page_code = cdb->page & SLS_PAGE_CODE; 7103 subpage = cdb->subpage; 7104 alloc_len = scsi_2btoul(cdb->length); 7105 7106 page_index = NULL; 7107 for (i = 0; i < CTL_NUM_LOG_PAGES; i++) { 7108 page_index = &lun->log_pages.index[i]; 7109 7110 /* Look for the right page code */ 7111 if ((page_index->page_code & SL_PAGE_CODE) != page_code) 7112 continue; 7113 7114 /* Look for the right subpage or the subpage wildcard*/ 7115 if (page_index->subpage != subpage) 7116 continue; 7117 7118 break; 7119 } 7120 if (i >= CTL_NUM_LOG_PAGES) { 7121 ctl_set_invalid_field(ctsio, 7122 /*sks_valid*/ 1, 7123 /*command*/ 1, 7124 /*field*/ 2, 7125 /*bit_valid*/ 0, 7126 /*bit*/ 0); 7127 ctl_done((union ctl_io *)ctsio); 7128 return (CTL_RETVAL_COMPLETE); 7129 } 7130 7131 total_len = sizeof(struct scsi_log_header) + page_index->page_len; 7132 7133 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7134 ctsio->kern_sg_entries = 0; 7135 ctsio->kern_data_resid = 0; 7136 ctsio->kern_rel_offset = 0; 7137 if (total_len < alloc_len) { 7138 ctsio->residual = alloc_len - total_len; 7139 ctsio->kern_data_len = total_len; 7140 ctsio->kern_total_len = total_len; 7141 } else { 7142 ctsio->residual = 0; 7143 ctsio->kern_data_len = alloc_len; 7144 ctsio->kern_total_len = alloc_len; 7145 } 7146 7147 header = (struct scsi_log_header *)ctsio->kern_data_ptr; 7148 header->page = page_index->page_code; 7149 if (page_index->subpage) { 7150 header->page |= SL_SPF; 7151 header->subpage = page_index->subpage; 7152 } 7153 scsi_ulto2b(page_index->page_len, header->datalen); 7154 7155 /* 7156 * Call the handler, if it exists, to update the 7157 * page to the latest values. 7158 */ 7159 if (page_index->sense_handler != NULL) 7160 page_index->sense_handler(ctsio, page_index, pc); 7161 7162 memcpy(header + 1, page_index->page_data, page_index->page_len); 7163 7164 ctl_set_success(ctsio); 7165 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7166 ctsio->be_move_done = ctl_config_move_done; 7167 ctl_datamove((union ctl_io *)ctsio); 7168 return (CTL_RETVAL_COMPLETE); 7169 } 7170 7171 int 7172 ctl_read_capacity(struct ctl_scsiio *ctsio) 7173 { 7174 struct scsi_read_capacity *cdb; 7175 struct scsi_read_capacity_data *data; 7176 struct ctl_lun *lun; 7177 uint32_t lba; 7178 7179 CTL_DEBUG_PRINT(("ctl_read_capacity\n")); 7180 7181 cdb = (struct scsi_read_capacity *)ctsio->cdb; 7182 7183 lba = scsi_4btoul(cdb->addr); 7184 if (((cdb->pmi & SRC_PMI) == 0) 7185 && (lba != 0)) { 7186 ctl_set_invalid_field(/*ctsio*/ ctsio, 7187 /*sks_valid*/ 1, 7188 /*command*/ 1, 7189 /*field*/ 2, 7190 /*bit_valid*/ 0, 7191 /*bit*/ 0); 7192 ctl_done((union ctl_io *)ctsio); 7193 return (CTL_RETVAL_COMPLETE); 7194 } 7195 7196 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7197 7198 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 7199 data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr; 7200 ctsio->residual = 0; 7201 ctsio->kern_data_len = sizeof(*data); 7202 ctsio->kern_total_len = sizeof(*data); 7203 ctsio->kern_data_resid = 0; 7204 ctsio->kern_rel_offset = 0; 7205 ctsio->kern_sg_entries = 0; 7206 7207 /* 7208 * If the maximum LBA is greater than 0xfffffffe, the user must 7209 * issue a SERVICE ACTION IN (16) command, with the read capacity 7210 * serivce action set. 7211 */ 7212 if (lun->be_lun->maxlba > 0xfffffffe) 7213 scsi_ulto4b(0xffffffff, data->addr); 7214 else 7215 scsi_ulto4b(lun->be_lun->maxlba, data->addr); 7216 7217 /* 7218 * XXX KDM this may not be 512 bytes... 7219 */ 7220 scsi_ulto4b(lun->be_lun->blocksize, data->length); 7221 7222 ctl_set_success(ctsio); 7223 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7224 ctsio->be_move_done = ctl_config_move_done; 7225 ctl_datamove((union ctl_io *)ctsio); 7226 return (CTL_RETVAL_COMPLETE); 7227 } 7228 7229 int 7230 ctl_read_capacity_16(struct ctl_scsiio *ctsio) 7231 { 7232 struct scsi_read_capacity_16 *cdb; 7233 struct scsi_read_capacity_data_long *data; 7234 struct ctl_lun *lun; 7235 uint64_t lba; 7236 uint32_t alloc_len; 7237 7238 CTL_DEBUG_PRINT(("ctl_read_capacity_16\n")); 7239 7240 cdb = (struct scsi_read_capacity_16 *)ctsio->cdb; 7241 7242 alloc_len = scsi_4btoul(cdb->alloc_len); 7243 lba = scsi_8btou64(cdb->addr); 7244 7245 if ((cdb->reladr & SRC16_PMI) 7246 && (lba != 0)) { 7247 ctl_set_invalid_field(/*ctsio*/ ctsio, 7248 /*sks_valid*/ 1, 7249 /*command*/ 1, 7250 /*field*/ 2, 7251 /*bit_valid*/ 0, 7252 /*bit*/ 0); 7253 ctl_done((union ctl_io *)ctsio); 7254 return (CTL_RETVAL_COMPLETE); 7255 } 7256 7257 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7258 7259 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 7260 data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr; 7261 7262 if (sizeof(*data) < alloc_len) { 7263 ctsio->residual = alloc_len - sizeof(*data); 7264 ctsio->kern_data_len = sizeof(*data); 7265 ctsio->kern_total_len = sizeof(*data); 7266 } else { 7267 ctsio->residual = 0; 7268 ctsio->kern_data_len = alloc_len; 7269 ctsio->kern_total_len = alloc_len; 7270 } 7271 ctsio->kern_data_resid = 0; 7272 ctsio->kern_rel_offset = 0; 7273 ctsio->kern_sg_entries = 0; 7274 7275 scsi_u64to8b(lun->be_lun->maxlba, data->addr); 7276 /* XXX KDM this may not be 512 bytes... */ 7277 scsi_ulto4b(lun->be_lun->blocksize, data->length); 7278 data->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE; 7279 scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, data->lalba_lbp); 7280 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) 7281 data->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ; 7282 7283 ctl_set_success(ctsio); 7284 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7285 ctsio->be_move_done = ctl_config_move_done; 7286 ctl_datamove((union ctl_io *)ctsio); 7287 return (CTL_RETVAL_COMPLETE); 7288 } 7289 7290 int 7291 ctl_get_lba_status(struct ctl_scsiio *ctsio) 7292 { 7293 struct scsi_get_lba_status *cdb; 7294 struct scsi_get_lba_status_data *data; 7295 struct ctl_lun *lun; 7296 struct ctl_lba_len_flags *lbalen; 7297 uint64_t lba; 7298 uint32_t alloc_len, total_len; 7299 int retval; 7300 7301 CTL_DEBUG_PRINT(("ctl_get_lba_status\n")); 7302 7303 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7304 cdb = (struct scsi_get_lba_status *)ctsio->cdb; 7305 lba = scsi_8btou64(cdb->addr); 7306 alloc_len = scsi_4btoul(cdb->alloc_len); 7307 7308 if (lba > lun->be_lun->maxlba) { 7309 ctl_set_lba_out_of_range(ctsio); 7310 ctl_done((union ctl_io *)ctsio); 7311 return (CTL_RETVAL_COMPLETE); 7312 } 7313 7314 total_len = sizeof(*data) + sizeof(data->descr[0]); 7315 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7316 data = (struct scsi_get_lba_status_data *)ctsio->kern_data_ptr; 7317 7318 if (total_len < alloc_len) { 7319 ctsio->residual = alloc_len - total_len; 7320 ctsio->kern_data_len = total_len; 7321 ctsio->kern_total_len = total_len; 7322 } else { 7323 ctsio->residual = 0; 7324 ctsio->kern_data_len = alloc_len; 7325 ctsio->kern_total_len = alloc_len; 7326 } 7327 ctsio->kern_data_resid = 0; 7328 ctsio->kern_rel_offset = 0; 7329 ctsio->kern_sg_entries = 0; 7330 7331 /* Fill dummy data in case backend can't tell anything. */ 7332 scsi_ulto4b(4 + sizeof(data->descr[0]), data->length); 7333 scsi_u64to8b(lba, data->descr[0].addr); 7334 scsi_ulto4b(MIN(UINT32_MAX, lun->be_lun->maxlba + 1 - lba), 7335 data->descr[0].length); 7336 data->descr[0].status = 0; /* Mapped or unknown. */ 7337 7338 ctl_set_success(ctsio); 7339 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7340 ctsio->be_move_done = ctl_config_move_done; 7341 7342 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 7343 lbalen->lba = lba; 7344 lbalen->len = total_len; 7345 lbalen->flags = 0; 7346 retval = lun->backend->config_read((union ctl_io *)ctsio); 7347 return (CTL_RETVAL_COMPLETE); 7348 } 7349 7350 int 7351 ctl_read_defect(struct ctl_scsiio *ctsio) 7352 { 7353 struct scsi_read_defect_data_10 *ccb10; 7354 struct scsi_read_defect_data_12 *ccb12; 7355 struct scsi_read_defect_data_hdr_10 *data10; 7356 struct scsi_read_defect_data_hdr_12 *data12; 7357 uint32_t alloc_len, data_len; 7358 uint8_t format; 7359 7360 CTL_DEBUG_PRINT(("ctl_read_defect\n")); 7361 7362 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 7363 ccb10 = (struct scsi_read_defect_data_10 *)&ctsio->cdb; 7364 format = ccb10->format; 7365 alloc_len = scsi_2btoul(ccb10->alloc_length); 7366 data_len = sizeof(*data10); 7367 } else { 7368 ccb12 = (struct scsi_read_defect_data_12 *)&ctsio->cdb; 7369 format = ccb12->format; 7370 alloc_len = scsi_4btoul(ccb12->alloc_length); 7371 data_len = sizeof(*data12); 7372 } 7373 if (alloc_len == 0) { 7374 ctl_set_success(ctsio); 7375 ctl_done((union ctl_io *)ctsio); 7376 return (CTL_RETVAL_COMPLETE); 7377 } 7378 7379 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 7380 if (data_len < alloc_len) { 7381 ctsio->residual = alloc_len - data_len; 7382 ctsio->kern_data_len = data_len; 7383 ctsio->kern_total_len = data_len; 7384 } else { 7385 ctsio->residual = 0; 7386 ctsio->kern_data_len = alloc_len; 7387 ctsio->kern_total_len = alloc_len; 7388 } 7389 ctsio->kern_data_resid = 0; 7390 ctsio->kern_rel_offset = 0; 7391 ctsio->kern_sg_entries = 0; 7392 7393 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 7394 data10 = (struct scsi_read_defect_data_hdr_10 *) 7395 ctsio->kern_data_ptr; 7396 data10->format = format; 7397 scsi_ulto2b(0, data10->length); 7398 } else { 7399 data12 = (struct scsi_read_defect_data_hdr_12 *) 7400 ctsio->kern_data_ptr; 7401 data12->format = format; 7402 scsi_ulto2b(0, data12->generation); 7403 scsi_ulto4b(0, data12->length); 7404 } 7405 7406 ctl_set_success(ctsio); 7407 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7408 ctsio->be_move_done = ctl_config_move_done; 7409 ctl_datamove((union ctl_io *)ctsio); 7410 return (CTL_RETVAL_COMPLETE); 7411 } 7412 7413 int 7414 ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio) 7415 { 7416 struct scsi_maintenance_in *cdb; 7417 int retval; 7418 int alloc_len, ext, total_len = 0, g, p, pc, pg, gs, os; 7419 int num_target_port_groups, num_target_ports; 7420 struct ctl_lun *lun; 7421 struct ctl_softc *softc; 7422 struct ctl_port *port; 7423 struct scsi_target_group_data *rtg_ptr; 7424 struct scsi_target_group_data_extended *rtg_ext_ptr; 7425 struct scsi_target_port_group_descriptor *tpg_desc; 7426 7427 CTL_DEBUG_PRINT(("ctl_report_tagret_port_groups\n")); 7428 7429 cdb = (struct scsi_maintenance_in *)ctsio->cdb; 7430 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7431 softc = lun->ctl_softc; 7432 7433 retval = CTL_RETVAL_COMPLETE; 7434 7435 switch (cdb->byte2 & STG_PDF_MASK) { 7436 case STG_PDF_LENGTH: 7437 ext = 0; 7438 break; 7439 case STG_PDF_EXTENDED: 7440 ext = 1; 7441 break; 7442 default: 7443 ctl_set_invalid_field(/*ctsio*/ ctsio, 7444 /*sks_valid*/ 1, 7445 /*command*/ 1, 7446 /*field*/ 2, 7447 /*bit_valid*/ 1, 7448 /*bit*/ 5); 7449 ctl_done((union ctl_io *)ctsio); 7450 return(retval); 7451 } 7452 7453 if (softc->is_single) 7454 num_target_port_groups = 1; 7455 else 7456 num_target_port_groups = NUM_TARGET_PORT_GROUPS; 7457 num_target_ports = 0; 7458 mtx_lock(&softc->ctl_lock); 7459 STAILQ_FOREACH(port, &softc->port_list, links) { 7460 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7461 continue; 7462 if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 7463 continue; 7464 num_target_ports++; 7465 } 7466 mtx_unlock(&softc->ctl_lock); 7467 7468 if (ext) 7469 total_len = sizeof(struct scsi_target_group_data_extended); 7470 else 7471 total_len = sizeof(struct scsi_target_group_data); 7472 total_len += sizeof(struct scsi_target_port_group_descriptor) * 7473 num_target_port_groups + 7474 sizeof(struct scsi_target_port_descriptor) * 7475 num_target_ports * num_target_port_groups; 7476 7477 alloc_len = scsi_4btoul(cdb->length); 7478 7479 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7480 7481 ctsio->kern_sg_entries = 0; 7482 7483 if (total_len < alloc_len) { 7484 ctsio->residual = alloc_len - total_len; 7485 ctsio->kern_data_len = total_len; 7486 ctsio->kern_total_len = total_len; 7487 } else { 7488 ctsio->residual = 0; 7489 ctsio->kern_data_len = alloc_len; 7490 ctsio->kern_total_len = alloc_len; 7491 } 7492 ctsio->kern_data_resid = 0; 7493 ctsio->kern_rel_offset = 0; 7494 7495 if (ext) { 7496 rtg_ext_ptr = (struct scsi_target_group_data_extended *) 7497 ctsio->kern_data_ptr; 7498 scsi_ulto4b(total_len - 4, rtg_ext_ptr->length); 7499 rtg_ext_ptr->format_type = 0x10; 7500 rtg_ext_ptr->implicit_transition_time = 0; 7501 tpg_desc = &rtg_ext_ptr->groups[0]; 7502 } else { 7503 rtg_ptr = (struct scsi_target_group_data *) 7504 ctsio->kern_data_ptr; 7505 scsi_ulto4b(total_len - 4, rtg_ptr->length); 7506 tpg_desc = &rtg_ptr->groups[0]; 7507 } 7508 7509 mtx_lock(&softc->ctl_lock); 7510 pg = softc->port_offset / CTL_MAX_PORTS; 7511 if (softc->flags & CTL_FLAG_ACTIVE_SHELF) { 7512 if (softc->ha_mode == CTL_HA_MODE_ACT_STBY) { 7513 gs = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7514 os = TPG_ASYMMETRIC_ACCESS_STANDBY; 7515 } else if (lun->flags & CTL_LUN_PRIMARY_SC) { 7516 gs = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7517 os = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 7518 } else { 7519 gs = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 7520 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7521 } 7522 } else { 7523 gs = TPG_ASYMMETRIC_ACCESS_STANDBY; 7524 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7525 } 7526 for (g = 0; g < num_target_port_groups; g++) { 7527 tpg_desc->pref_state = (g == pg) ? gs : os; 7528 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP; 7529 scsi_ulto2b(g + 1, tpg_desc->target_port_group); 7530 tpg_desc->status = TPG_IMPLICIT; 7531 pc = 0; 7532 STAILQ_FOREACH(port, &softc->port_list, links) { 7533 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7534 continue; 7535 if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 7536 continue; 7537 p = port->targ_port % CTL_MAX_PORTS + g * CTL_MAX_PORTS; 7538 scsi_ulto2b(p, tpg_desc->descriptors[pc]. 7539 relative_target_port_identifier); 7540 pc++; 7541 } 7542 tpg_desc->target_port_count = pc; 7543 tpg_desc = (struct scsi_target_port_group_descriptor *) 7544 &tpg_desc->descriptors[pc]; 7545 } 7546 mtx_unlock(&softc->ctl_lock); 7547 7548 ctl_set_success(ctsio); 7549 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7550 ctsio->be_move_done = ctl_config_move_done; 7551 ctl_datamove((union ctl_io *)ctsio); 7552 return(retval); 7553 } 7554 7555 int 7556 ctl_report_supported_opcodes(struct ctl_scsiio *ctsio) 7557 { 7558 struct ctl_lun *lun; 7559 struct scsi_report_supported_opcodes *cdb; 7560 const struct ctl_cmd_entry *entry, *sentry; 7561 struct scsi_report_supported_opcodes_all *all; 7562 struct scsi_report_supported_opcodes_descr *descr; 7563 struct scsi_report_supported_opcodes_one *one; 7564 int retval; 7565 int alloc_len, total_len; 7566 int opcode, service_action, i, j, num; 7567 7568 CTL_DEBUG_PRINT(("ctl_report_supported_opcodes\n")); 7569 7570 cdb = (struct scsi_report_supported_opcodes *)ctsio->cdb; 7571 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7572 7573 retval = CTL_RETVAL_COMPLETE; 7574 7575 opcode = cdb->requested_opcode; 7576 service_action = scsi_2btoul(cdb->requested_service_action); 7577 switch (cdb->options & RSO_OPTIONS_MASK) { 7578 case RSO_OPTIONS_ALL: 7579 num = 0; 7580 for (i = 0; i < 256; i++) { 7581 entry = &ctl_cmd_table[i]; 7582 if (entry->flags & CTL_CMD_FLAG_SA5) { 7583 for (j = 0; j < 32; j++) { 7584 sentry = &((const struct ctl_cmd_entry *) 7585 entry->execute)[j]; 7586 if (ctl_cmd_applicable( 7587 lun->be_lun->lun_type, sentry)) 7588 num++; 7589 } 7590 } else { 7591 if (ctl_cmd_applicable(lun->be_lun->lun_type, 7592 entry)) 7593 num++; 7594 } 7595 } 7596 total_len = sizeof(struct scsi_report_supported_opcodes_all) + 7597 num * sizeof(struct scsi_report_supported_opcodes_descr); 7598 break; 7599 case RSO_OPTIONS_OC: 7600 if (ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) { 7601 ctl_set_invalid_field(/*ctsio*/ ctsio, 7602 /*sks_valid*/ 1, 7603 /*command*/ 1, 7604 /*field*/ 2, 7605 /*bit_valid*/ 1, 7606 /*bit*/ 2); 7607 ctl_done((union ctl_io *)ctsio); 7608 return (CTL_RETVAL_COMPLETE); 7609 } 7610 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7611 break; 7612 case RSO_OPTIONS_OC_SA: 7613 if ((ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) == 0 || 7614 service_action >= 32) { 7615 ctl_set_invalid_field(/*ctsio*/ ctsio, 7616 /*sks_valid*/ 1, 7617 /*command*/ 1, 7618 /*field*/ 2, 7619 /*bit_valid*/ 1, 7620 /*bit*/ 2); 7621 ctl_done((union ctl_io *)ctsio); 7622 return (CTL_RETVAL_COMPLETE); 7623 } 7624 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7625 break; 7626 default: 7627 ctl_set_invalid_field(/*ctsio*/ ctsio, 7628 /*sks_valid*/ 1, 7629 /*command*/ 1, 7630 /*field*/ 2, 7631 /*bit_valid*/ 1, 7632 /*bit*/ 2); 7633 ctl_done((union ctl_io *)ctsio); 7634 return (CTL_RETVAL_COMPLETE); 7635 } 7636 7637 alloc_len = scsi_4btoul(cdb->length); 7638 7639 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7640 7641 ctsio->kern_sg_entries = 0; 7642 7643 if (total_len < alloc_len) { 7644 ctsio->residual = alloc_len - total_len; 7645 ctsio->kern_data_len = total_len; 7646 ctsio->kern_total_len = total_len; 7647 } else { 7648 ctsio->residual = 0; 7649 ctsio->kern_data_len = alloc_len; 7650 ctsio->kern_total_len = alloc_len; 7651 } 7652 ctsio->kern_data_resid = 0; 7653 ctsio->kern_rel_offset = 0; 7654 7655 switch (cdb->options & RSO_OPTIONS_MASK) { 7656 case RSO_OPTIONS_ALL: 7657 all = (struct scsi_report_supported_opcodes_all *) 7658 ctsio->kern_data_ptr; 7659 num = 0; 7660 for (i = 0; i < 256; i++) { 7661 entry = &ctl_cmd_table[i]; 7662 if (entry->flags & CTL_CMD_FLAG_SA5) { 7663 for (j = 0; j < 32; j++) { 7664 sentry = &((const struct ctl_cmd_entry *) 7665 entry->execute)[j]; 7666 if (!ctl_cmd_applicable( 7667 lun->be_lun->lun_type, sentry)) 7668 continue; 7669 descr = &all->descr[num++]; 7670 descr->opcode = i; 7671 scsi_ulto2b(j, descr->service_action); 7672 descr->flags = RSO_SERVACTV; 7673 scsi_ulto2b(sentry->length, 7674 descr->cdb_length); 7675 } 7676 } else { 7677 if (!ctl_cmd_applicable(lun->be_lun->lun_type, 7678 entry)) 7679 continue; 7680 descr = &all->descr[num++]; 7681 descr->opcode = i; 7682 scsi_ulto2b(0, descr->service_action); 7683 descr->flags = 0; 7684 scsi_ulto2b(entry->length, descr->cdb_length); 7685 } 7686 } 7687 scsi_ulto4b( 7688 num * sizeof(struct scsi_report_supported_opcodes_descr), 7689 all->length); 7690 break; 7691 case RSO_OPTIONS_OC: 7692 one = (struct scsi_report_supported_opcodes_one *) 7693 ctsio->kern_data_ptr; 7694 entry = &ctl_cmd_table[opcode]; 7695 goto fill_one; 7696 case RSO_OPTIONS_OC_SA: 7697 one = (struct scsi_report_supported_opcodes_one *) 7698 ctsio->kern_data_ptr; 7699 entry = &ctl_cmd_table[opcode]; 7700 entry = &((const struct ctl_cmd_entry *) 7701 entry->execute)[service_action]; 7702 fill_one: 7703 if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 7704 one->support = 3; 7705 scsi_ulto2b(entry->length, one->cdb_length); 7706 one->cdb_usage[0] = opcode; 7707 memcpy(&one->cdb_usage[1], entry->usage, 7708 entry->length - 1); 7709 } else 7710 one->support = 1; 7711 break; 7712 } 7713 7714 ctl_set_success(ctsio); 7715 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7716 ctsio->be_move_done = ctl_config_move_done; 7717 ctl_datamove((union ctl_io *)ctsio); 7718 return(retval); 7719 } 7720 7721 int 7722 ctl_report_supported_tmf(struct ctl_scsiio *ctsio) 7723 { 7724 struct scsi_report_supported_tmf *cdb; 7725 struct scsi_report_supported_tmf_data *data; 7726 int retval; 7727 int alloc_len, total_len; 7728 7729 CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n")); 7730 7731 cdb = (struct scsi_report_supported_tmf *)ctsio->cdb; 7732 7733 retval = CTL_RETVAL_COMPLETE; 7734 7735 total_len = sizeof(struct scsi_report_supported_tmf_data); 7736 alloc_len = scsi_4btoul(cdb->length); 7737 7738 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7739 7740 ctsio->kern_sg_entries = 0; 7741 7742 if (total_len < alloc_len) { 7743 ctsio->residual = alloc_len - total_len; 7744 ctsio->kern_data_len = total_len; 7745 ctsio->kern_total_len = total_len; 7746 } else { 7747 ctsio->residual = 0; 7748 ctsio->kern_data_len = alloc_len; 7749 ctsio->kern_total_len = alloc_len; 7750 } 7751 ctsio->kern_data_resid = 0; 7752 ctsio->kern_rel_offset = 0; 7753 7754 data = (struct scsi_report_supported_tmf_data *)ctsio->kern_data_ptr; 7755 data->byte1 |= RST_ATS | RST_ATSS | RST_CTSS | RST_LURS | RST_TRS; 7756 data->byte2 |= RST_ITNRS; 7757 7758 ctl_set_success(ctsio); 7759 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7760 ctsio->be_move_done = ctl_config_move_done; 7761 ctl_datamove((union ctl_io *)ctsio); 7762 return (retval); 7763 } 7764 7765 int 7766 ctl_report_timestamp(struct ctl_scsiio *ctsio) 7767 { 7768 struct scsi_report_timestamp *cdb; 7769 struct scsi_report_timestamp_data *data; 7770 struct timeval tv; 7771 int64_t timestamp; 7772 int retval; 7773 int alloc_len, total_len; 7774 7775 CTL_DEBUG_PRINT(("ctl_report_timestamp\n")); 7776 7777 cdb = (struct scsi_report_timestamp *)ctsio->cdb; 7778 7779 retval = CTL_RETVAL_COMPLETE; 7780 7781 total_len = sizeof(struct scsi_report_timestamp_data); 7782 alloc_len = scsi_4btoul(cdb->length); 7783 7784 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7785 7786 ctsio->kern_sg_entries = 0; 7787 7788 if (total_len < alloc_len) { 7789 ctsio->residual = alloc_len - total_len; 7790 ctsio->kern_data_len = total_len; 7791 ctsio->kern_total_len = total_len; 7792 } else { 7793 ctsio->residual = 0; 7794 ctsio->kern_data_len = alloc_len; 7795 ctsio->kern_total_len = alloc_len; 7796 } 7797 ctsio->kern_data_resid = 0; 7798 ctsio->kern_rel_offset = 0; 7799 7800 data = (struct scsi_report_timestamp_data *)ctsio->kern_data_ptr; 7801 scsi_ulto2b(sizeof(*data) - 2, data->length); 7802 data->origin = RTS_ORIG_OUTSIDE; 7803 getmicrotime(&tv); 7804 timestamp = (int64_t)tv.tv_sec * 1000 + tv.tv_usec / 1000; 7805 scsi_ulto4b(timestamp >> 16, data->timestamp); 7806 scsi_ulto2b(timestamp & 0xffff, &data->timestamp[4]); 7807 7808 ctl_set_success(ctsio); 7809 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7810 ctsio->be_move_done = ctl_config_move_done; 7811 ctl_datamove((union ctl_io *)ctsio); 7812 return (retval); 7813 } 7814 7815 int 7816 ctl_persistent_reserve_in(struct ctl_scsiio *ctsio) 7817 { 7818 struct scsi_per_res_in *cdb; 7819 int alloc_len, total_len = 0; 7820 /* struct scsi_per_res_in_rsrv in_data; */ 7821 struct ctl_lun *lun; 7822 struct ctl_softc *softc; 7823 uint64_t key; 7824 7825 CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n")); 7826 7827 cdb = (struct scsi_per_res_in *)ctsio->cdb; 7828 7829 alloc_len = scsi_2btoul(cdb->length); 7830 7831 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7832 softc = lun->ctl_softc; 7833 7834 retry: 7835 mtx_lock(&lun->lun_lock); 7836 switch (cdb->action) { 7837 case SPRI_RK: /* read keys */ 7838 total_len = sizeof(struct scsi_per_res_in_keys) + 7839 lun->pr_key_count * 7840 sizeof(struct scsi_per_res_key); 7841 break; 7842 case SPRI_RR: /* read reservation */ 7843 if (lun->flags & CTL_LUN_PR_RESERVED) 7844 total_len = sizeof(struct scsi_per_res_in_rsrv); 7845 else 7846 total_len = sizeof(struct scsi_per_res_in_header); 7847 break; 7848 case SPRI_RC: /* report capabilities */ 7849 total_len = sizeof(struct scsi_per_res_cap); 7850 break; 7851 case SPRI_RS: /* read full status */ 7852 total_len = sizeof(struct scsi_per_res_in_header) + 7853 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7854 lun->pr_key_count; 7855 break; 7856 default: 7857 panic("Invalid PR type %x", cdb->action); 7858 } 7859 mtx_unlock(&lun->lun_lock); 7860 7861 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7862 7863 if (total_len < alloc_len) { 7864 ctsio->residual = alloc_len - total_len; 7865 ctsio->kern_data_len = total_len; 7866 ctsio->kern_total_len = total_len; 7867 } else { 7868 ctsio->residual = 0; 7869 ctsio->kern_data_len = alloc_len; 7870 ctsio->kern_total_len = alloc_len; 7871 } 7872 7873 ctsio->kern_data_resid = 0; 7874 ctsio->kern_rel_offset = 0; 7875 ctsio->kern_sg_entries = 0; 7876 7877 mtx_lock(&lun->lun_lock); 7878 switch (cdb->action) { 7879 case SPRI_RK: { // read keys 7880 struct scsi_per_res_in_keys *res_keys; 7881 int i, key_count; 7882 7883 res_keys = (struct scsi_per_res_in_keys*)ctsio->kern_data_ptr; 7884 7885 /* 7886 * We had to drop the lock to allocate our buffer, which 7887 * leaves time for someone to come in with another 7888 * persistent reservation. (That is unlikely, though, 7889 * since this should be the only persistent reservation 7890 * command active right now.) 7891 */ 7892 if (total_len != (sizeof(struct scsi_per_res_in_keys) + 7893 (lun->pr_key_count * 7894 sizeof(struct scsi_per_res_key)))){ 7895 mtx_unlock(&lun->lun_lock); 7896 free(ctsio->kern_data_ptr, M_CTL); 7897 printf("%s: reservation length changed, retrying\n", 7898 __func__); 7899 goto retry; 7900 } 7901 7902 scsi_ulto4b(lun->PRGeneration, res_keys->header.generation); 7903 7904 scsi_ulto4b(sizeof(struct scsi_per_res_key) * 7905 lun->pr_key_count, res_keys->header.length); 7906 7907 for (i = 0, key_count = 0; i < 2*CTL_MAX_INITIATORS; i++) { 7908 if ((key = ctl_get_prkey(lun, i)) == 0) 7909 continue; 7910 7911 /* 7912 * We used lun->pr_key_count to calculate the 7913 * size to allocate. If it turns out the number of 7914 * initiators with the registered flag set is 7915 * larger than that (i.e. they haven't been kept in 7916 * sync), we've got a problem. 7917 */ 7918 if (key_count >= lun->pr_key_count) { 7919 #ifdef NEEDTOPORT 7920 csevent_log(CSC_CTL | CSC_SHELF_SW | 7921 CTL_PR_ERROR, 7922 csevent_LogType_Fault, 7923 csevent_AlertLevel_Yellow, 7924 csevent_FRU_ShelfController, 7925 csevent_FRU_Firmware, 7926 csevent_FRU_Unknown, 7927 "registered keys %d >= key " 7928 "count %d", key_count, 7929 lun->pr_key_count); 7930 #endif 7931 key_count++; 7932 continue; 7933 } 7934 scsi_u64to8b(key, res_keys->keys[key_count].key); 7935 key_count++; 7936 } 7937 break; 7938 } 7939 case SPRI_RR: { // read reservation 7940 struct scsi_per_res_in_rsrv *res; 7941 int tmp_len, header_only; 7942 7943 res = (struct scsi_per_res_in_rsrv *)ctsio->kern_data_ptr; 7944 7945 scsi_ulto4b(lun->PRGeneration, res->header.generation); 7946 7947 if (lun->flags & CTL_LUN_PR_RESERVED) 7948 { 7949 tmp_len = sizeof(struct scsi_per_res_in_rsrv); 7950 scsi_ulto4b(sizeof(struct scsi_per_res_in_rsrv_data), 7951 res->header.length); 7952 header_only = 0; 7953 } else { 7954 tmp_len = sizeof(struct scsi_per_res_in_header); 7955 scsi_ulto4b(0, res->header.length); 7956 header_only = 1; 7957 } 7958 7959 /* 7960 * We had to drop the lock to allocate our buffer, which 7961 * leaves time for someone to come in with another 7962 * persistent reservation. (That is unlikely, though, 7963 * since this should be the only persistent reservation 7964 * command active right now.) 7965 */ 7966 if (tmp_len != total_len) { 7967 mtx_unlock(&lun->lun_lock); 7968 free(ctsio->kern_data_ptr, M_CTL); 7969 printf("%s: reservation status changed, retrying\n", 7970 __func__); 7971 goto retry; 7972 } 7973 7974 /* 7975 * No reservation held, so we're done. 7976 */ 7977 if (header_only != 0) 7978 break; 7979 7980 /* 7981 * If the registration is an All Registrants type, the key 7982 * is 0, since it doesn't really matter. 7983 */ 7984 if (lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 7985 scsi_u64to8b(ctl_get_prkey(lun, lun->pr_res_idx), 7986 res->data.reservation); 7987 } 7988 res->data.scopetype = lun->res_type; 7989 break; 7990 } 7991 case SPRI_RC: //report capabilities 7992 { 7993 struct scsi_per_res_cap *res_cap; 7994 uint16_t type_mask; 7995 7996 res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr; 7997 scsi_ulto2b(sizeof(*res_cap), res_cap->length); 7998 res_cap->flags2 |= SPRI_TMV | SPRI_ALLOW_5; 7999 type_mask = SPRI_TM_WR_EX_AR | 8000 SPRI_TM_EX_AC_RO | 8001 SPRI_TM_WR_EX_RO | 8002 SPRI_TM_EX_AC | 8003 SPRI_TM_WR_EX | 8004 SPRI_TM_EX_AC_AR; 8005 scsi_ulto2b(type_mask, res_cap->type_mask); 8006 break; 8007 } 8008 case SPRI_RS: { // read full status 8009 struct scsi_per_res_in_full *res_status; 8010 struct scsi_per_res_in_full_desc *res_desc; 8011 struct ctl_port *port; 8012 int i, len; 8013 8014 res_status = (struct scsi_per_res_in_full*)ctsio->kern_data_ptr; 8015 8016 /* 8017 * We had to drop the lock to allocate our buffer, which 8018 * leaves time for someone to come in with another 8019 * persistent reservation. (That is unlikely, though, 8020 * since this should be the only persistent reservation 8021 * command active right now.) 8022 */ 8023 if (total_len < (sizeof(struct scsi_per_res_in_header) + 8024 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 8025 lun->pr_key_count)){ 8026 mtx_unlock(&lun->lun_lock); 8027 free(ctsio->kern_data_ptr, M_CTL); 8028 printf("%s: reservation length changed, retrying\n", 8029 __func__); 8030 goto retry; 8031 } 8032 8033 scsi_ulto4b(lun->PRGeneration, res_status->header.generation); 8034 8035 res_desc = &res_status->desc[0]; 8036 for (i = 0; i < 2*CTL_MAX_INITIATORS; i++) { 8037 if ((key = ctl_get_prkey(lun, i)) == 0) 8038 continue; 8039 8040 scsi_u64to8b(key, res_desc->res_key.key); 8041 if ((lun->flags & CTL_LUN_PR_RESERVED) && 8042 (lun->pr_res_idx == i || 8043 lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS)) { 8044 res_desc->flags = SPRI_FULL_R_HOLDER; 8045 res_desc->scopetype = lun->res_type; 8046 } 8047 scsi_ulto2b(i / CTL_MAX_INIT_PER_PORT, 8048 res_desc->rel_trgt_port_id); 8049 len = 0; 8050 port = softc->ctl_ports[ 8051 ctl_port_idx(i / CTL_MAX_INIT_PER_PORT)]; 8052 if (port != NULL) 8053 len = ctl_create_iid(port, 8054 i % CTL_MAX_INIT_PER_PORT, 8055 res_desc->transport_id); 8056 scsi_ulto4b(len, res_desc->additional_length); 8057 res_desc = (struct scsi_per_res_in_full_desc *) 8058 &res_desc->transport_id[len]; 8059 } 8060 scsi_ulto4b((uint8_t *)res_desc - (uint8_t *)&res_status->desc[0], 8061 res_status->header.length); 8062 break; 8063 } 8064 default: 8065 /* 8066 * This is a bug, because we just checked for this above, 8067 * and should have returned an error. 8068 */ 8069 panic("Invalid PR type %x", cdb->action); 8070 break; /* NOTREACHED */ 8071 } 8072 mtx_unlock(&lun->lun_lock); 8073 8074 ctl_set_success(ctsio); 8075 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 8076 ctsio->be_move_done = ctl_config_move_done; 8077 ctl_datamove((union ctl_io *)ctsio); 8078 return (CTL_RETVAL_COMPLETE); 8079 } 8080 8081 static void 8082 ctl_est_res_ua(struct ctl_lun *lun, uint32_t residx, ctl_ua_type ua) 8083 { 8084 int off = lun->ctl_softc->persis_offset; 8085 8086 if (residx >= off && residx < off + CTL_MAX_INITIATORS) 8087 ctl_est_ua(lun, residx - off, ua); 8088 } 8089 8090 /* 8091 * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if 8092 * it should return. 8093 */ 8094 static int 8095 ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key, 8096 uint64_t sa_res_key, uint8_t type, uint32_t residx, 8097 struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb, 8098 struct scsi_per_res_out_parms* param) 8099 { 8100 union ctl_ha_msg persis_io; 8101 int retval, i; 8102 int isc_retval; 8103 8104 retval = 0; 8105 8106 mtx_lock(&lun->lun_lock); 8107 if (sa_res_key == 0) { 8108 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8109 /* validate scope and type */ 8110 if ((cdb->scope_type & SPR_SCOPE_MASK) != 8111 SPR_LU_SCOPE) { 8112 mtx_unlock(&lun->lun_lock); 8113 ctl_set_invalid_field(/*ctsio*/ ctsio, 8114 /*sks_valid*/ 1, 8115 /*command*/ 1, 8116 /*field*/ 2, 8117 /*bit_valid*/ 1, 8118 /*bit*/ 4); 8119 ctl_done((union ctl_io *)ctsio); 8120 return (1); 8121 } 8122 8123 if (type>8 || type==2 || type==4 || type==0) { 8124 mtx_unlock(&lun->lun_lock); 8125 ctl_set_invalid_field(/*ctsio*/ ctsio, 8126 /*sks_valid*/ 1, 8127 /*command*/ 1, 8128 /*field*/ 2, 8129 /*bit_valid*/ 1, 8130 /*bit*/ 0); 8131 ctl_done((union ctl_io *)ctsio); 8132 return (1); 8133 } 8134 8135 /* 8136 * Unregister everybody else and build UA for 8137 * them 8138 */ 8139 for(i=0; i < 2*CTL_MAX_INITIATORS; i++) { 8140 if (i == residx || ctl_get_prkey(lun, i) == 0) 8141 continue; 8142 8143 ctl_clr_prkey(lun, i); 8144 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 8145 } 8146 lun->pr_key_count = 1; 8147 lun->res_type = type; 8148 if (lun->res_type != SPR_TYPE_WR_EX_AR 8149 && lun->res_type != SPR_TYPE_EX_AC_AR) 8150 lun->pr_res_idx = residx; 8151 8152 /* send msg to other side */ 8153 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8154 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8155 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8156 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8157 persis_io.pr.pr_info.res_type = type; 8158 memcpy(persis_io.pr.pr_info.sa_res_key, 8159 param->serv_act_res_key, 8160 sizeof(param->serv_act_res_key)); 8161 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8162 &persis_io, sizeof(persis_io), 0)) > 8163 CTL_HA_STATUS_SUCCESS) { 8164 printf("CTL:Persis Out error returned " 8165 "from ctl_ha_msg_send %d\n", 8166 isc_retval); 8167 } 8168 } else { 8169 /* not all registrants */ 8170 mtx_unlock(&lun->lun_lock); 8171 free(ctsio->kern_data_ptr, M_CTL); 8172 ctl_set_invalid_field(ctsio, 8173 /*sks_valid*/ 1, 8174 /*command*/ 0, 8175 /*field*/ 8, 8176 /*bit_valid*/ 0, 8177 /*bit*/ 0); 8178 ctl_done((union ctl_io *)ctsio); 8179 return (1); 8180 } 8181 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 8182 || !(lun->flags & CTL_LUN_PR_RESERVED)) { 8183 int found = 0; 8184 8185 if (res_key == sa_res_key) { 8186 /* special case */ 8187 /* 8188 * The spec implies this is not good but doesn't 8189 * say what to do. There are two choices either 8190 * generate a res conflict or check condition 8191 * with illegal field in parameter data. Since 8192 * that is what is done when the sa_res_key is 8193 * zero I'll take that approach since this has 8194 * to do with the sa_res_key. 8195 */ 8196 mtx_unlock(&lun->lun_lock); 8197 free(ctsio->kern_data_ptr, M_CTL); 8198 ctl_set_invalid_field(ctsio, 8199 /*sks_valid*/ 1, 8200 /*command*/ 0, 8201 /*field*/ 8, 8202 /*bit_valid*/ 0, 8203 /*bit*/ 0); 8204 ctl_done((union ctl_io *)ctsio); 8205 return (1); 8206 } 8207 8208 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 8209 if (ctl_get_prkey(lun, i) != sa_res_key) 8210 continue; 8211 8212 found = 1; 8213 ctl_clr_prkey(lun, i); 8214 lun->pr_key_count--; 8215 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 8216 } 8217 if (!found) { 8218 mtx_unlock(&lun->lun_lock); 8219 free(ctsio->kern_data_ptr, M_CTL); 8220 ctl_set_reservation_conflict(ctsio); 8221 ctl_done((union ctl_io *)ctsio); 8222 return (CTL_RETVAL_COMPLETE); 8223 } 8224 /* send msg to other side */ 8225 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8226 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8227 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8228 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8229 persis_io.pr.pr_info.res_type = type; 8230 memcpy(persis_io.pr.pr_info.sa_res_key, 8231 param->serv_act_res_key, 8232 sizeof(param->serv_act_res_key)); 8233 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8234 &persis_io, sizeof(persis_io), 0)) > 8235 CTL_HA_STATUS_SUCCESS) { 8236 printf("CTL:Persis Out error returned from " 8237 "ctl_ha_msg_send %d\n", isc_retval); 8238 } 8239 } else { 8240 /* Reserved but not all registrants */ 8241 /* sa_res_key is res holder */ 8242 if (sa_res_key == ctl_get_prkey(lun, lun->pr_res_idx)) { 8243 /* validate scope and type */ 8244 if ((cdb->scope_type & SPR_SCOPE_MASK) != 8245 SPR_LU_SCOPE) { 8246 mtx_unlock(&lun->lun_lock); 8247 ctl_set_invalid_field(/*ctsio*/ ctsio, 8248 /*sks_valid*/ 1, 8249 /*command*/ 1, 8250 /*field*/ 2, 8251 /*bit_valid*/ 1, 8252 /*bit*/ 4); 8253 ctl_done((union ctl_io *)ctsio); 8254 return (1); 8255 } 8256 8257 if (type>8 || type==2 || type==4 || type==0) { 8258 mtx_unlock(&lun->lun_lock); 8259 ctl_set_invalid_field(/*ctsio*/ ctsio, 8260 /*sks_valid*/ 1, 8261 /*command*/ 1, 8262 /*field*/ 2, 8263 /*bit_valid*/ 1, 8264 /*bit*/ 0); 8265 ctl_done((union ctl_io *)ctsio); 8266 return (1); 8267 } 8268 8269 /* 8270 * Do the following: 8271 * if sa_res_key != res_key remove all 8272 * registrants w/sa_res_key and generate UA 8273 * for these registrants(Registrations 8274 * Preempted) if it wasn't an exclusive 8275 * reservation generate UA(Reservations 8276 * Preempted) for all other registered nexuses 8277 * if the type has changed. Establish the new 8278 * reservation and holder. If res_key and 8279 * sa_res_key are the same do the above 8280 * except don't unregister the res holder. 8281 */ 8282 8283 for(i=0; i < 2*CTL_MAX_INITIATORS; i++) { 8284 if (i == residx || ctl_get_prkey(lun, i) == 0) 8285 continue; 8286 8287 if (sa_res_key == ctl_get_prkey(lun, i)) { 8288 ctl_clr_prkey(lun, i); 8289 lun->pr_key_count--; 8290 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 8291 } else if (type != lun->res_type 8292 && (lun->res_type == SPR_TYPE_WR_EX_RO 8293 || lun->res_type ==SPR_TYPE_EX_AC_RO)){ 8294 ctl_est_res_ua(lun, i, CTL_UA_RES_RELEASE); 8295 } 8296 } 8297 lun->res_type = type; 8298 if (lun->res_type != SPR_TYPE_WR_EX_AR 8299 && lun->res_type != SPR_TYPE_EX_AC_AR) 8300 lun->pr_res_idx = residx; 8301 else 8302 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8303 8304 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8305 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8306 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8307 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8308 persis_io.pr.pr_info.res_type = type; 8309 memcpy(persis_io.pr.pr_info.sa_res_key, 8310 param->serv_act_res_key, 8311 sizeof(param->serv_act_res_key)); 8312 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8313 &persis_io, sizeof(persis_io), 0)) > 8314 CTL_HA_STATUS_SUCCESS) { 8315 printf("CTL:Persis Out error returned " 8316 "from ctl_ha_msg_send %d\n", 8317 isc_retval); 8318 } 8319 } else { 8320 /* 8321 * sa_res_key is not the res holder just 8322 * remove registrants 8323 */ 8324 int found=0; 8325 8326 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 8327 if (sa_res_key != ctl_get_prkey(lun, i)) 8328 continue; 8329 8330 found = 1; 8331 ctl_clr_prkey(lun, i); 8332 lun->pr_key_count--; 8333 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 8334 } 8335 8336 if (!found) { 8337 mtx_unlock(&lun->lun_lock); 8338 free(ctsio->kern_data_ptr, M_CTL); 8339 ctl_set_reservation_conflict(ctsio); 8340 ctl_done((union ctl_io *)ctsio); 8341 return (1); 8342 } 8343 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8344 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8345 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8346 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8347 persis_io.pr.pr_info.res_type = type; 8348 memcpy(persis_io.pr.pr_info.sa_res_key, 8349 param->serv_act_res_key, 8350 sizeof(param->serv_act_res_key)); 8351 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8352 &persis_io, sizeof(persis_io), 0)) > 8353 CTL_HA_STATUS_SUCCESS) { 8354 printf("CTL:Persis Out error returned " 8355 "from ctl_ha_msg_send %d\n", 8356 isc_retval); 8357 } 8358 } 8359 } 8360 8361 lun->PRGeneration++; 8362 mtx_unlock(&lun->lun_lock); 8363 8364 return (retval); 8365 } 8366 8367 static void 8368 ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg) 8369 { 8370 uint64_t sa_res_key; 8371 int i; 8372 8373 sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key); 8374 8375 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 8376 || lun->pr_res_idx == CTL_PR_NO_RESERVATION 8377 || sa_res_key != ctl_get_prkey(lun, lun->pr_res_idx)) { 8378 if (sa_res_key == 0) { 8379 /* 8380 * Unregister everybody else and build UA for 8381 * them 8382 */ 8383 for(i=0; i < 2*CTL_MAX_INITIATORS; i++) { 8384 if (i == msg->pr.pr_info.residx || 8385 ctl_get_prkey(lun, i) == 0) 8386 continue; 8387 8388 ctl_clr_prkey(lun, i); 8389 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 8390 } 8391 8392 lun->pr_key_count = 1; 8393 lun->res_type = msg->pr.pr_info.res_type; 8394 if (lun->res_type != SPR_TYPE_WR_EX_AR 8395 && lun->res_type != SPR_TYPE_EX_AC_AR) 8396 lun->pr_res_idx = msg->pr.pr_info.residx; 8397 } else { 8398 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 8399 if (sa_res_key == ctl_get_prkey(lun, i)) 8400 continue; 8401 8402 ctl_clr_prkey(lun, i); 8403 lun->pr_key_count--; 8404 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 8405 } 8406 } 8407 } else { 8408 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 8409 if (i == msg->pr.pr_info.residx || 8410 ctl_get_prkey(lun, i) == 0) 8411 continue; 8412 8413 if (sa_res_key == ctl_get_prkey(lun, i)) { 8414 ctl_clr_prkey(lun, i); 8415 lun->pr_key_count--; 8416 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 8417 } else if (msg->pr.pr_info.res_type != lun->res_type 8418 && (lun->res_type == SPR_TYPE_WR_EX_RO 8419 || lun->res_type == SPR_TYPE_EX_AC_RO)) { 8420 ctl_est_res_ua(lun, i, CTL_UA_RES_RELEASE); 8421 } 8422 } 8423 lun->res_type = msg->pr.pr_info.res_type; 8424 if (lun->res_type != SPR_TYPE_WR_EX_AR 8425 && lun->res_type != SPR_TYPE_EX_AC_AR) 8426 lun->pr_res_idx = msg->pr.pr_info.residx; 8427 else 8428 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8429 } 8430 lun->PRGeneration++; 8431 8432 } 8433 8434 8435 int 8436 ctl_persistent_reserve_out(struct ctl_scsiio *ctsio) 8437 { 8438 int retval; 8439 int isc_retval; 8440 u_int32_t param_len; 8441 struct scsi_per_res_out *cdb; 8442 struct ctl_lun *lun; 8443 struct scsi_per_res_out_parms* param; 8444 struct ctl_softc *softc; 8445 uint32_t residx; 8446 uint64_t res_key, sa_res_key, key; 8447 uint8_t type; 8448 union ctl_ha_msg persis_io; 8449 int i; 8450 8451 CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n")); 8452 8453 retval = CTL_RETVAL_COMPLETE; 8454 8455 cdb = (struct scsi_per_res_out *)ctsio->cdb; 8456 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8457 softc = lun->ctl_softc; 8458 8459 /* 8460 * We only support whole-LUN scope. The scope & type are ignored for 8461 * register, register and ignore existing key and clear. 8462 * We sometimes ignore scope and type on preempts too!! 8463 * Verify reservation type here as well. 8464 */ 8465 type = cdb->scope_type & SPR_TYPE_MASK; 8466 if ((cdb->action == SPRO_RESERVE) 8467 || (cdb->action == SPRO_RELEASE)) { 8468 if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { 8469 ctl_set_invalid_field(/*ctsio*/ ctsio, 8470 /*sks_valid*/ 1, 8471 /*command*/ 1, 8472 /*field*/ 2, 8473 /*bit_valid*/ 1, 8474 /*bit*/ 4); 8475 ctl_done((union ctl_io *)ctsio); 8476 return (CTL_RETVAL_COMPLETE); 8477 } 8478 8479 if (type>8 || type==2 || type==4 || type==0) { 8480 ctl_set_invalid_field(/*ctsio*/ ctsio, 8481 /*sks_valid*/ 1, 8482 /*command*/ 1, 8483 /*field*/ 2, 8484 /*bit_valid*/ 1, 8485 /*bit*/ 0); 8486 ctl_done((union ctl_io *)ctsio); 8487 return (CTL_RETVAL_COMPLETE); 8488 } 8489 } 8490 8491 param_len = scsi_4btoul(cdb->length); 8492 8493 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 8494 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 8495 ctsio->kern_data_len = param_len; 8496 ctsio->kern_total_len = param_len; 8497 ctsio->kern_data_resid = 0; 8498 ctsio->kern_rel_offset = 0; 8499 ctsio->kern_sg_entries = 0; 8500 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 8501 ctsio->be_move_done = ctl_config_move_done; 8502 ctl_datamove((union ctl_io *)ctsio); 8503 8504 return (CTL_RETVAL_COMPLETE); 8505 } 8506 8507 param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr; 8508 8509 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 8510 res_key = scsi_8btou64(param->res_key.key); 8511 sa_res_key = scsi_8btou64(param->serv_act_res_key); 8512 8513 /* 8514 * Validate the reservation key here except for SPRO_REG_IGNO 8515 * This must be done for all other service actions 8516 */ 8517 if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) { 8518 mtx_lock(&lun->lun_lock); 8519 if ((key = ctl_get_prkey(lun, residx)) != 0) { 8520 if (res_key != key) { 8521 /* 8522 * The current key passed in doesn't match 8523 * the one the initiator previously 8524 * registered. 8525 */ 8526 mtx_unlock(&lun->lun_lock); 8527 free(ctsio->kern_data_ptr, M_CTL); 8528 ctl_set_reservation_conflict(ctsio); 8529 ctl_done((union ctl_io *)ctsio); 8530 return (CTL_RETVAL_COMPLETE); 8531 } 8532 } else if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REGISTER) { 8533 /* 8534 * We are not registered 8535 */ 8536 mtx_unlock(&lun->lun_lock); 8537 free(ctsio->kern_data_ptr, M_CTL); 8538 ctl_set_reservation_conflict(ctsio); 8539 ctl_done((union ctl_io *)ctsio); 8540 return (CTL_RETVAL_COMPLETE); 8541 } else if (res_key != 0) { 8542 /* 8543 * We are not registered and trying to register but 8544 * the register key isn't zero. 8545 */ 8546 mtx_unlock(&lun->lun_lock); 8547 free(ctsio->kern_data_ptr, M_CTL); 8548 ctl_set_reservation_conflict(ctsio); 8549 ctl_done((union ctl_io *)ctsio); 8550 return (CTL_RETVAL_COMPLETE); 8551 } 8552 mtx_unlock(&lun->lun_lock); 8553 } 8554 8555 switch (cdb->action & SPRO_ACTION_MASK) { 8556 case SPRO_REGISTER: 8557 case SPRO_REG_IGNO: { 8558 8559 #if 0 8560 printf("Registration received\n"); 8561 #endif 8562 8563 /* 8564 * We don't support any of these options, as we report in 8565 * the read capabilities request (see 8566 * ctl_persistent_reserve_in(), above). 8567 */ 8568 if ((param->flags & SPR_SPEC_I_PT) 8569 || (param->flags & SPR_ALL_TG_PT) 8570 || (param->flags & SPR_APTPL)) { 8571 int bit_ptr; 8572 8573 if (param->flags & SPR_APTPL) 8574 bit_ptr = 0; 8575 else if (param->flags & SPR_ALL_TG_PT) 8576 bit_ptr = 2; 8577 else /* SPR_SPEC_I_PT */ 8578 bit_ptr = 3; 8579 8580 free(ctsio->kern_data_ptr, M_CTL); 8581 ctl_set_invalid_field(ctsio, 8582 /*sks_valid*/ 1, 8583 /*command*/ 0, 8584 /*field*/ 20, 8585 /*bit_valid*/ 1, 8586 /*bit*/ bit_ptr); 8587 ctl_done((union ctl_io *)ctsio); 8588 return (CTL_RETVAL_COMPLETE); 8589 } 8590 8591 mtx_lock(&lun->lun_lock); 8592 8593 /* 8594 * The initiator wants to clear the 8595 * key/unregister. 8596 */ 8597 if (sa_res_key == 0) { 8598 if ((res_key == 0 8599 && (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER) 8600 || ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO 8601 && ctl_get_prkey(lun, residx) == 0)) { 8602 mtx_unlock(&lun->lun_lock); 8603 goto done; 8604 } 8605 8606 ctl_clr_prkey(lun, residx); 8607 lun->pr_key_count--; 8608 8609 if (residx == lun->pr_res_idx) { 8610 lun->flags &= ~CTL_LUN_PR_RESERVED; 8611 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8612 8613 if ((lun->res_type == SPR_TYPE_WR_EX_RO 8614 || lun->res_type == SPR_TYPE_EX_AC_RO) 8615 && lun->pr_key_count) { 8616 /* 8617 * If the reservation is a registrants 8618 * only type we need to generate a UA 8619 * for other registered inits. The 8620 * sense code should be RESERVATIONS 8621 * RELEASED 8622 */ 8623 8624 for (i = 0; i < CTL_MAX_INITIATORS;i++){ 8625 if (ctl_get_prkey(lun, i + 8626 softc->persis_offset) == 0) 8627 continue; 8628 ctl_est_ua(lun, i, 8629 CTL_UA_RES_RELEASE); 8630 } 8631 } 8632 lun->res_type = 0; 8633 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8634 if (lun->pr_key_count==0) { 8635 lun->flags &= ~CTL_LUN_PR_RESERVED; 8636 lun->res_type = 0; 8637 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8638 } 8639 } 8640 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8641 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8642 persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY; 8643 persis_io.pr.pr_info.residx = residx; 8644 if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8645 &persis_io, sizeof(persis_io), 0 )) > 8646 CTL_HA_STATUS_SUCCESS) { 8647 printf("CTL:Persis Out error returned from " 8648 "ctl_ha_msg_send %d\n", isc_retval); 8649 } 8650 } else /* sa_res_key != 0 */ { 8651 8652 /* 8653 * If we aren't registered currently then increment 8654 * the key count and set the registered flag. 8655 */ 8656 ctl_alloc_prkey(lun, residx); 8657 if (ctl_get_prkey(lun, residx) == 0) 8658 lun->pr_key_count++; 8659 ctl_set_prkey(lun, residx, sa_res_key); 8660 8661 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8662 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8663 persis_io.pr.pr_info.action = CTL_PR_REG_KEY; 8664 persis_io.pr.pr_info.residx = residx; 8665 memcpy(persis_io.pr.pr_info.sa_res_key, 8666 param->serv_act_res_key, 8667 sizeof(param->serv_act_res_key)); 8668 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8669 &persis_io, sizeof(persis_io), 0)) > 8670 CTL_HA_STATUS_SUCCESS) { 8671 printf("CTL:Persis Out error returned from " 8672 "ctl_ha_msg_send %d\n", isc_retval); 8673 } 8674 } 8675 lun->PRGeneration++; 8676 mtx_unlock(&lun->lun_lock); 8677 8678 break; 8679 } 8680 case SPRO_RESERVE: 8681 #if 0 8682 printf("Reserve executed type %d\n", type); 8683 #endif 8684 mtx_lock(&lun->lun_lock); 8685 if (lun->flags & CTL_LUN_PR_RESERVED) { 8686 /* 8687 * if this isn't the reservation holder and it's 8688 * not a "all registrants" type or if the type is 8689 * different then we have a conflict 8690 */ 8691 if ((lun->pr_res_idx != residx 8692 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) 8693 || lun->res_type != type) { 8694 mtx_unlock(&lun->lun_lock); 8695 free(ctsio->kern_data_ptr, M_CTL); 8696 ctl_set_reservation_conflict(ctsio); 8697 ctl_done((union ctl_io *)ctsio); 8698 return (CTL_RETVAL_COMPLETE); 8699 } 8700 mtx_unlock(&lun->lun_lock); 8701 } else /* create a reservation */ { 8702 /* 8703 * If it's not an "all registrants" type record 8704 * reservation holder 8705 */ 8706 if (type != SPR_TYPE_WR_EX_AR 8707 && type != SPR_TYPE_EX_AC_AR) 8708 lun->pr_res_idx = residx; /* Res holder */ 8709 else 8710 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8711 8712 lun->flags |= CTL_LUN_PR_RESERVED; 8713 lun->res_type = type; 8714 8715 mtx_unlock(&lun->lun_lock); 8716 8717 /* send msg to other side */ 8718 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8719 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8720 persis_io.pr.pr_info.action = CTL_PR_RESERVE; 8721 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8722 persis_io.pr.pr_info.res_type = type; 8723 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8724 &persis_io, sizeof(persis_io), 0)) > 8725 CTL_HA_STATUS_SUCCESS) { 8726 printf("CTL:Persis Out error returned from " 8727 "ctl_ha_msg_send %d\n", isc_retval); 8728 } 8729 } 8730 break; 8731 8732 case SPRO_RELEASE: 8733 mtx_lock(&lun->lun_lock); 8734 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) { 8735 /* No reservation exists return good status */ 8736 mtx_unlock(&lun->lun_lock); 8737 goto done; 8738 } 8739 /* 8740 * Is this nexus a reservation holder? 8741 */ 8742 if (lun->pr_res_idx != residx 8743 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 8744 /* 8745 * not a res holder return good status but 8746 * do nothing 8747 */ 8748 mtx_unlock(&lun->lun_lock); 8749 goto done; 8750 } 8751 8752 if (lun->res_type != type) { 8753 mtx_unlock(&lun->lun_lock); 8754 free(ctsio->kern_data_ptr, M_CTL); 8755 ctl_set_illegal_pr_release(ctsio); 8756 ctl_done((union ctl_io *)ctsio); 8757 return (CTL_RETVAL_COMPLETE); 8758 } 8759 8760 /* okay to release */ 8761 lun->flags &= ~CTL_LUN_PR_RESERVED; 8762 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8763 lun->res_type = 0; 8764 8765 /* 8766 * if this isn't an exclusive access 8767 * res generate UA for all other 8768 * registrants. 8769 */ 8770 if (type != SPR_TYPE_EX_AC 8771 && type != SPR_TYPE_WR_EX) { 8772 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8773 if (i == residx || 8774 ctl_get_prkey(lun, 8775 i + softc->persis_offset) == 0) 8776 continue; 8777 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8778 } 8779 } 8780 mtx_unlock(&lun->lun_lock); 8781 /* Send msg to other side */ 8782 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8783 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8784 persis_io.pr.pr_info.action = CTL_PR_RELEASE; 8785 if ((isc_retval=ctl_ha_msg_send( CTL_HA_CHAN_CTL, &persis_io, 8786 sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) { 8787 printf("CTL:Persis Out error returned from " 8788 "ctl_ha_msg_send %d\n", isc_retval); 8789 } 8790 break; 8791 8792 case SPRO_CLEAR: 8793 /* send msg to other side */ 8794 8795 mtx_lock(&lun->lun_lock); 8796 lun->flags &= ~CTL_LUN_PR_RESERVED; 8797 lun->res_type = 0; 8798 lun->pr_key_count = 0; 8799 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8800 8801 ctl_clr_prkey(lun, residx); 8802 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) 8803 if (ctl_get_prkey(lun, i) != 0) { 8804 ctl_clr_prkey(lun, i); 8805 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 8806 } 8807 lun->PRGeneration++; 8808 mtx_unlock(&lun->lun_lock); 8809 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8810 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8811 persis_io.pr.pr_info.action = CTL_PR_CLEAR; 8812 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8813 sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) { 8814 printf("CTL:Persis Out error returned from " 8815 "ctl_ha_msg_send %d\n", isc_retval); 8816 } 8817 break; 8818 8819 case SPRO_PREEMPT: 8820 case SPRO_PRE_ABO: { 8821 int nretval; 8822 8823 nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type, 8824 residx, ctsio, cdb, param); 8825 if (nretval != 0) 8826 return (CTL_RETVAL_COMPLETE); 8827 break; 8828 } 8829 default: 8830 panic("Invalid PR type %x", cdb->action); 8831 } 8832 8833 done: 8834 free(ctsio->kern_data_ptr, M_CTL); 8835 ctl_set_success(ctsio); 8836 ctl_done((union ctl_io *)ctsio); 8837 8838 return (retval); 8839 } 8840 8841 /* 8842 * This routine is for handling a message from the other SC pertaining to 8843 * persistent reserve out. All the error checking will have been done 8844 * so only perorming the action need be done here to keep the two 8845 * in sync. 8846 */ 8847 static void 8848 ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg) 8849 { 8850 struct ctl_lun *lun; 8851 struct ctl_softc *softc; 8852 int i; 8853 uint32_t targ_lun; 8854 8855 softc = control_softc; 8856 8857 targ_lun = msg->hdr.nexus.targ_mapped_lun; 8858 lun = softc->ctl_luns[targ_lun]; 8859 mtx_lock(&lun->lun_lock); 8860 switch(msg->pr.pr_info.action) { 8861 case CTL_PR_REG_KEY: 8862 ctl_alloc_prkey(lun, msg->pr.pr_info.residx); 8863 if (ctl_get_prkey(lun, msg->pr.pr_info.residx) == 0) 8864 lun->pr_key_count++; 8865 ctl_set_prkey(lun, msg->pr.pr_info.residx, 8866 scsi_8btou64(msg->pr.pr_info.sa_res_key)); 8867 lun->PRGeneration++; 8868 break; 8869 8870 case CTL_PR_UNREG_KEY: 8871 ctl_clr_prkey(lun, msg->pr.pr_info.residx); 8872 lun->pr_key_count--; 8873 8874 /* XXX Need to see if the reservation has been released */ 8875 /* if so do we need to generate UA? */ 8876 if (msg->pr.pr_info.residx == lun->pr_res_idx) { 8877 lun->flags &= ~CTL_LUN_PR_RESERVED; 8878 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8879 8880 if ((lun->res_type == SPR_TYPE_WR_EX_RO 8881 || lun->res_type == SPR_TYPE_EX_AC_RO) 8882 && lun->pr_key_count) { 8883 /* 8884 * If the reservation is a registrants 8885 * only type we need to generate a UA 8886 * for other registered inits. The 8887 * sense code should be RESERVATIONS 8888 * RELEASED 8889 */ 8890 8891 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8892 if (ctl_get_prkey(lun, i + 8893 softc->persis_offset) == 0) 8894 continue; 8895 8896 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8897 } 8898 } 8899 lun->res_type = 0; 8900 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8901 if (lun->pr_key_count==0) { 8902 lun->flags &= ~CTL_LUN_PR_RESERVED; 8903 lun->res_type = 0; 8904 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8905 } 8906 } 8907 lun->PRGeneration++; 8908 break; 8909 8910 case CTL_PR_RESERVE: 8911 lun->flags |= CTL_LUN_PR_RESERVED; 8912 lun->res_type = msg->pr.pr_info.res_type; 8913 lun->pr_res_idx = msg->pr.pr_info.residx; 8914 8915 break; 8916 8917 case CTL_PR_RELEASE: 8918 /* 8919 * if this isn't an exclusive access res generate UA for all 8920 * other registrants. 8921 */ 8922 if (lun->res_type != SPR_TYPE_EX_AC 8923 && lun->res_type != SPR_TYPE_WR_EX) { 8924 for (i = 0; i < CTL_MAX_INITIATORS; i++) 8925 if (ctl_get_prkey(lun, i + softc->persis_offset) != 0) 8926 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8927 } 8928 8929 lun->flags &= ~CTL_LUN_PR_RESERVED; 8930 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8931 lun->res_type = 0; 8932 break; 8933 8934 case CTL_PR_PREEMPT: 8935 ctl_pro_preempt_other(lun, msg); 8936 break; 8937 case CTL_PR_CLEAR: 8938 lun->flags &= ~CTL_LUN_PR_RESERVED; 8939 lun->res_type = 0; 8940 lun->pr_key_count = 0; 8941 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8942 8943 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 8944 if (ctl_get_prkey(lun, i) == 0) 8945 continue; 8946 ctl_clr_prkey(lun, i); 8947 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 8948 } 8949 lun->PRGeneration++; 8950 break; 8951 } 8952 8953 mtx_unlock(&lun->lun_lock); 8954 } 8955 8956 int 8957 ctl_read_write(struct ctl_scsiio *ctsio) 8958 { 8959 struct ctl_lun *lun; 8960 struct ctl_lba_len_flags *lbalen; 8961 uint64_t lba; 8962 uint32_t num_blocks; 8963 int flags, retval; 8964 int isread; 8965 8966 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8967 8968 CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0])); 8969 8970 flags = 0; 8971 retval = CTL_RETVAL_COMPLETE; 8972 8973 isread = ctsio->cdb[0] == READ_6 || ctsio->cdb[0] == READ_10 8974 || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16; 8975 switch (ctsio->cdb[0]) { 8976 case READ_6: 8977 case WRITE_6: { 8978 struct scsi_rw_6 *cdb; 8979 8980 cdb = (struct scsi_rw_6 *)ctsio->cdb; 8981 8982 lba = scsi_3btoul(cdb->addr); 8983 /* only 5 bits are valid in the most significant address byte */ 8984 lba &= 0x1fffff; 8985 num_blocks = cdb->length; 8986 /* 8987 * This is correct according to SBC-2. 8988 */ 8989 if (num_blocks == 0) 8990 num_blocks = 256; 8991 break; 8992 } 8993 case READ_10: 8994 case WRITE_10: { 8995 struct scsi_rw_10 *cdb; 8996 8997 cdb = (struct scsi_rw_10 *)ctsio->cdb; 8998 if (cdb->byte2 & SRW10_FUA) 8999 flags |= CTL_LLF_FUA; 9000 if (cdb->byte2 & SRW10_DPO) 9001 flags |= CTL_LLF_DPO; 9002 lba = scsi_4btoul(cdb->addr); 9003 num_blocks = scsi_2btoul(cdb->length); 9004 break; 9005 } 9006 case WRITE_VERIFY_10: { 9007 struct scsi_write_verify_10 *cdb; 9008 9009 cdb = (struct scsi_write_verify_10 *)ctsio->cdb; 9010 flags |= CTL_LLF_FUA; 9011 if (cdb->byte2 & SWV_DPO) 9012 flags |= CTL_LLF_DPO; 9013 lba = scsi_4btoul(cdb->addr); 9014 num_blocks = scsi_2btoul(cdb->length); 9015 break; 9016 } 9017 case READ_12: 9018 case WRITE_12: { 9019 struct scsi_rw_12 *cdb; 9020 9021 cdb = (struct scsi_rw_12 *)ctsio->cdb; 9022 if (cdb->byte2 & SRW12_FUA) 9023 flags |= CTL_LLF_FUA; 9024 if (cdb->byte2 & SRW12_DPO) 9025 flags |= CTL_LLF_DPO; 9026 lba = scsi_4btoul(cdb->addr); 9027 num_blocks = scsi_4btoul(cdb->length); 9028 break; 9029 } 9030 case WRITE_VERIFY_12: { 9031 struct scsi_write_verify_12 *cdb; 9032 9033 cdb = (struct scsi_write_verify_12 *)ctsio->cdb; 9034 flags |= CTL_LLF_FUA; 9035 if (cdb->byte2 & SWV_DPO) 9036 flags |= CTL_LLF_DPO; 9037 lba = scsi_4btoul(cdb->addr); 9038 num_blocks = scsi_4btoul(cdb->length); 9039 break; 9040 } 9041 case READ_16: 9042 case WRITE_16: { 9043 struct scsi_rw_16 *cdb; 9044 9045 cdb = (struct scsi_rw_16 *)ctsio->cdb; 9046 if (cdb->byte2 & SRW12_FUA) 9047 flags |= CTL_LLF_FUA; 9048 if (cdb->byte2 & SRW12_DPO) 9049 flags |= CTL_LLF_DPO; 9050 lba = scsi_8btou64(cdb->addr); 9051 num_blocks = scsi_4btoul(cdb->length); 9052 break; 9053 } 9054 case WRITE_ATOMIC_16: { 9055 struct scsi_rw_16 *cdb; 9056 9057 if (lun->be_lun->atomicblock == 0) { 9058 ctl_set_invalid_opcode(ctsio); 9059 ctl_done((union ctl_io *)ctsio); 9060 return (CTL_RETVAL_COMPLETE); 9061 } 9062 9063 cdb = (struct scsi_rw_16 *)ctsio->cdb; 9064 if (cdb->byte2 & SRW12_FUA) 9065 flags |= CTL_LLF_FUA; 9066 if (cdb->byte2 & SRW12_DPO) 9067 flags |= CTL_LLF_DPO; 9068 lba = scsi_8btou64(cdb->addr); 9069 num_blocks = scsi_4btoul(cdb->length); 9070 if (num_blocks > lun->be_lun->atomicblock) { 9071 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 9072 /*command*/ 1, /*field*/ 12, /*bit_valid*/ 0, 9073 /*bit*/ 0); 9074 ctl_done((union ctl_io *)ctsio); 9075 return (CTL_RETVAL_COMPLETE); 9076 } 9077 break; 9078 } 9079 case WRITE_VERIFY_16: { 9080 struct scsi_write_verify_16 *cdb; 9081 9082 cdb = (struct scsi_write_verify_16 *)ctsio->cdb; 9083 flags |= CTL_LLF_FUA; 9084 if (cdb->byte2 & SWV_DPO) 9085 flags |= CTL_LLF_DPO; 9086 lba = scsi_8btou64(cdb->addr); 9087 num_blocks = scsi_4btoul(cdb->length); 9088 break; 9089 } 9090 default: 9091 /* 9092 * We got a command we don't support. This shouldn't 9093 * happen, commands should be filtered out above us. 9094 */ 9095 ctl_set_invalid_opcode(ctsio); 9096 ctl_done((union ctl_io *)ctsio); 9097 9098 return (CTL_RETVAL_COMPLETE); 9099 break; /* NOTREACHED */ 9100 } 9101 9102 /* 9103 * The first check is to make sure we're in bounds, the second 9104 * check is to catch wrap-around problems. If the lba + num blocks 9105 * is less than the lba, then we've wrapped around and the block 9106 * range is invalid anyway. 9107 */ 9108 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 9109 || ((lba + num_blocks) < lba)) { 9110 ctl_set_lba_out_of_range(ctsio); 9111 ctl_done((union ctl_io *)ctsio); 9112 return (CTL_RETVAL_COMPLETE); 9113 } 9114 9115 /* 9116 * According to SBC-3, a transfer length of 0 is not an error. 9117 * Note that this cannot happen with WRITE(6) or READ(6), since 0 9118 * translates to 256 blocks for those commands. 9119 */ 9120 if (num_blocks == 0) { 9121 ctl_set_success(ctsio); 9122 ctl_done((union ctl_io *)ctsio); 9123 return (CTL_RETVAL_COMPLETE); 9124 } 9125 9126 /* Set FUA and/or DPO if caches are disabled. */ 9127 if (isread) { 9128 if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 & 9129 SCP_RCD) != 0) 9130 flags |= CTL_LLF_FUA | CTL_LLF_DPO; 9131 } else { 9132 if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 & 9133 SCP_WCE) == 0) 9134 flags |= CTL_LLF_FUA; 9135 } 9136 9137 lbalen = (struct ctl_lba_len_flags *) 9138 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 9139 lbalen->lba = lba; 9140 lbalen->len = num_blocks; 9141 lbalen->flags = (isread ? CTL_LLF_READ : CTL_LLF_WRITE) | flags; 9142 9143 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 9144 ctsio->kern_rel_offset = 0; 9145 9146 CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n")); 9147 9148 retval = lun->backend->data_submit((union ctl_io *)ctsio); 9149 9150 return (retval); 9151 } 9152 9153 static int 9154 ctl_cnw_cont(union ctl_io *io) 9155 { 9156 struct ctl_scsiio *ctsio; 9157 struct ctl_lun *lun; 9158 struct ctl_lba_len_flags *lbalen; 9159 int retval; 9160 9161 ctsio = &io->scsiio; 9162 ctsio->io_hdr.status = CTL_STATUS_NONE; 9163 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT; 9164 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9165 lbalen = (struct ctl_lba_len_flags *) 9166 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 9167 lbalen->flags &= ~CTL_LLF_COMPARE; 9168 lbalen->flags |= CTL_LLF_WRITE; 9169 9170 CTL_DEBUG_PRINT(("ctl_cnw_cont: calling data_submit()\n")); 9171 retval = lun->backend->data_submit((union ctl_io *)ctsio); 9172 return (retval); 9173 } 9174 9175 int 9176 ctl_cnw(struct ctl_scsiio *ctsio) 9177 { 9178 struct ctl_lun *lun; 9179 struct ctl_lba_len_flags *lbalen; 9180 uint64_t lba; 9181 uint32_t num_blocks; 9182 int flags, retval; 9183 9184 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9185 9186 CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0])); 9187 9188 flags = 0; 9189 retval = CTL_RETVAL_COMPLETE; 9190 9191 switch (ctsio->cdb[0]) { 9192 case COMPARE_AND_WRITE: { 9193 struct scsi_compare_and_write *cdb; 9194 9195 cdb = (struct scsi_compare_and_write *)ctsio->cdb; 9196 if (cdb->byte2 & SRW10_FUA) 9197 flags |= CTL_LLF_FUA; 9198 if (cdb->byte2 & SRW10_DPO) 9199 flags |= CTL_LLF_DPO; 9200 lba = scsi_8btou64(cdb->addr); 9201 num_blocks = cdb->length; 9202 break; 9203 } 9204 default: 9205 /* 9206 * We got a command we don't support. This shouldn't 9207 * happen, commands should be filtered out above us. 9208 */ 9209 ctl_set_invalid_opcode(ctsio); 9210 ctl_done((union ctl_io *)ctsio); 9211 9212 return (CTL_RETVAL_COMPLETE); 9213 break; /* NOTREACHED */ 9214 } 9215 9216 /* 9217 * The first check is to make sure we're in bounds, the second 9218 * check is to catch wrap-around problems. If the lba + num blocks 9219 * is less than the lba, then we've wrapped around and the block 9220 * range is invalid anyway. 9221 */ 9222 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 9223 || ((lba + num_blocks) < lba)) { 9224 ctl_set_lba_out_of_range(ctsio); 9225 ctl_done((union ctl_io *)ctsio); 9226 return (CTL_RETVAL_COMPLETE); 9227 } 9228 9229 /* 9230 * According to SBC-3, a transfer length of 0 is not an error. 9231 */ 9232 if (num_blocks == 0) { 9233 ctl_set_success(ctsio); 9234 ctl_done((union ctl_io *)ctsio); 9235 return (CTL_RETVAL_COMPLETE); 9236 } 9237 9238 /* Set FUA if write cache is disabled. */ 9239 if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 & 9240 SCP_WCE) == 0) 9241 flags |= CTL_LLF_FUA; 9242 9243 ctsio->kern_total_len = 2 * num_blocks * lun->be_lun->blocksize; 9244 ctsio->kern_rel_offset = 0; 9245 9246 /* 9247 * Set the IO_CONT flag, so that if this I/O gets passed to 9248 * ctl_data_submit_done(), it'll get passed back to 9249 * ctl_ctl_cnw_cont() for further processing. 9250 */ 9251 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 9252 ctsio->io_cont = ctl_cnw_cont; 9253 9254 lbalen = (struct ctl_lba_len_flags *) 9255 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 9256 lbalen->lba = lba; 9257 lbalen->len = num_blocks; 9258 lbalen->flags = CTL_LLF_COMPARE | flags; 9259 9260 CTL_DEBUG_PRINT(("ctl_cnw: calling data_submit()\n")); 9261 retval = lun->backend->data_submit((union ctl_io *)ctsio); 9262 return (retval); 9263 } 9264 9265 int 9266 ctl_verify(struct ctl_scsiio *ctsio) 9267 { 9268 struct ctl_lun *lun; 9269 struct ctl_lba_len_flags *lbalen; 9270 uint64_t lba; 9271 uint32_t num_blocks; 9272 int bytchk, flags; 9273 int retval; 9274 9275 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9276 9277 CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0])); 9278 9279 bytchk = 0; 9280 flags = CTL_LLF_FUA; 9281 retval = CTL_RETVAL_COMPLETE; 9282 9283 switch (ctsio->cdb[0]) { 9284 case VERIFY_10: { 9285 struct scsi_verify_10 *cdb; 9286 9287 cdb = (struct scsi_verify_10 *)ctsio->cdb; 9288 if (cdb->byte2 & SVFY_BYTCHK) 9289 bytchk = 1; 9290 if (cdb->byte2 & SVFY_DPO) 9291 flags |= CTL_LLF_DPO; 9292 lba = scsi_4btoul(cdb->addr); 9293 num_blocks = scsi_2btoul(cdb->length); 9294 break; 9295 } 9296 case VERIFY_12: { 9297 struct scsi_verify_12 *cdb; 9298 9299 cdb = (struct scsi_verify_12 *)ctsio->cdb; 9300 if (cdb->byte2 & SVFY_BYTCHK) 9301 bytchk = 1; 9302 if (cdb->byte2 & SVFY_DPO) 9303 flags |= CTL_LLF_DPO; 9304 lba = scsi_4btoul(cdb->addr); 9305 num_blocks = scsi_4btoul(cdb->length); 9306 break; 9307 } 9308 case VERIFY_16: { 9309 struct scsi_rw_16 *cdb; 9310 9311 cdb = (struct scsi_rw_16 *)ctsio->cdb; 9312 if (cdb->byte2 & SVFY_BYTCHK) 9313 bytchk = 1; 9314 if (cdb->byte2 & SVFY_DPO) 9315 flags |= CTL_LLF_DPO; 9316 lba = scsi_8btou64(cdb->addr); 9317 num_blocks = scsi_4btoul(cdb->length); 9318 break; 9319 } 9320 default: 9321 /* 9322 * We got a command we don't support. This shouldn't 9323 * happen, commands should be filtered out above us. 9324 */ 9325 ctl_set_invalid_opcode(ctsio); 9326 ctl_done((union ctl_io *)ctsio); 9327 return (CTL_RETVAL_COMPLETE); 9328 } 9329 9330 /* 9331 * The first check is to make sure we're in bounds, the second 9332 * check is to catch wrap-around problems. If the lba + num blocks 9333 * is less than the lba, then we've wrapped around and the block 9334 * range is invalid anyway. 9335 */ 9336 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 9337 || ((lba + num_blocks) < lba)) { 9338 ctl_set_lba_out_of_range(ctsio); 9339 ctl_done((union ctl_io *)ctsio); 9340 return (CTL_RETVAL_COMPLETE); 9341 } 9342 9343 /* 9344 * According to SBC-3, a transfer length of 0 is not an error. 9345 */ 9346 if (num_blocks == 0) { 9347 ctl_set_success(ctsio); 9348 ctl_done((union ctl_io *)ctsio); 9349 return (CTL_RETVAL_COMPLETE); 9350 } 9351 9352 lbalen = (struct ctl_lba_len_flags *) 9353 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 9354 lbalen->lba = lba; 9355 lbalen->len = num_blocks; 9356 if (bytchk) { 9357 lbalen->flags = CTL_LLF_COMPARE | flags; 9358 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 9359 } else { 9360 lbalen->flags = CTL_LLF_VERIFY | flags; 9361 ctsio->kern_total_len = 0; 9362 } 9363 ctsio->kern_rel_offset = 0; 9364 9365 CTL_DEBUG_PRINT(("ctl_verify: calling data_submit()\n")); 9366 retval = lun->backend->data_submit((union ctl_io *)ctsio); 9367 return (retval); 9368 } 9369 9370 int 9371 ctl_report_luns(struct ctl_scsiio *ctsio) 9372 { 9373 struct ctl_softc *softc = control_softc; 9374 struct scsi_report_luns *cdb; 9375 struct scsi_report_luns_data *lun_data; 9376 struct ctl_lun *lun, *request_lun; 9377 struct ctl_port *port; 9378 int num_luns, retval; 9379 uint32_t alloc_len, lun_datalen; 9380 int num_filled, well_known; 9381 uint32_t initidx, targ_lun_id, lun_id; 9382 9383 retval = CTL_RETVAL_COMPLETE; 9384 well_known = 0; 9385 9386 cdb = (struct scsi_report_luns *)ctsio->cdb; 9387 9388 CTL_DEBUG_PRINT(("ctl_report_luns\n")); 9389 9390 mtx_lock(&softc->ctl_lock); 9391 num_luns = softc->num_luns; 9392 mtx_unlock(&softc->ctl_lock); 9393 9394 switch (cdb->select_report) { 9395 case RPL_REPORT_DEFAULT: 9396 case RPL_REPORT_ALL: 9397 break; 9398 case RPL_REPORT_WELLKNOWN: 9399 well_known = 1; 9400 num_luns = 0; 9401 break; 9402 default: 9403 ctl_set_invalid_field(ctsio, 9404 /*sks_valid*/ 1, 9405 /*command*/ 1, 9406 /*field*/ 2, 9407 /*bit_valid*/ 0, 9408 /*bit*/ 0); 9409 ctl_done((union ctl_io *)ctsio); 9410 return (retval); 9411 break; /* NOTREACHED */ 9412 } 9413 9414 alloc_len = scsi_4btoul(cdb->length); 9415 /* 9416 * The initiator has to allocate at least 16 bytes for this request, 9417 * so he can at least get the header and the first LUN. Otherwise 9418 * we reject the request (per SPC-3 rev 14, section 6.21). 9419 */ 9420 if (alloc_len < (sizeof(struct scsi_report_luns_data) + 9421 sizeof(struct scsi_report_luns_lundata))) { 9422 ctl_set_invalid_field(ctsio, 9423 /*sks_valid*/ 1, 9424 /*command*/ 1, 9425 /*field*/ 6, 9426 /*bit_valid*/ 0, 9427 /*bit*/ 0); 9428 ctl_done((union ctl_io *)ctsio); 9429 return (retval); 9430 } 9431 9432 request_lun = (struct ctl_lun *) 9433 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9434 port = ctl_io_port(&ctsio->io_hdr); 9435 9436 lun_datalen = sizeof(*lun_data) + 9437 (num_luns * sizeof(struct scsi_report_luns_lundata)); 9438 9439 ctsio->kern_data_ptr = malloc(lun_datalen, M_CTL, M_WAITOK | M_ZERO); 9440 lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr; 9441 ctsio->kern_sg_entries = 0; 9442 9443 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9444 9445 mtx_lock(&softc->ctl_lock); 9446 for (targ_lun_id = 0, num_filled = 0; targ_lun_id < CTL_MAX_LUNS && num_filled < num_luns; targ_lun_id++) { 9447 lun_id = ctl_lun_map_from_port(port, targ_lun_id); 9448 if (lun_id >= CTL_MAX_LUNS) 9449 continue; 9450 lun = softc->ctl_luns[lun_id]; 9451 if (lun == NULL) 9452 continue; 9453 9454 if (targ_lun_id <= 0xff) { 9455 /* 9456 * Peripheral addressing method, bus number 0. 9457 */ 9458 lun_data->luns[num_filled].lundata[0] = 9459 RPL_LUNDATA_ATYP_PERIPH; 9460 lun_data->luns[num_filled].lundata[1] = targ_lun_id; 9461 num_filled++; 9462 } else if (targ_lun_id <= 0x3fff) { 9463 /* 9464 * Flat addressing method. 9465 */ 9466 lun_data->luns[num_filled].lundata[0] = 9467 RPL_LUNDATA_ATYP_FLAT | (targ_lun_id >> 8); 9468 lun_data->luns[num_filled].lundata[1] = 9469 (targ_lun_id & 0xff); 9470 num_filled++; 9471 } else if (targ_lun_id <= 0xffffff) { 9472 /* 9473 * Extended flat addressing method. 9474 */ 9475 lun_data->luns[num_filled].lundata[0] = 9476 RPL_LUNDATA_ATYP_EXTLUN | 0x12; 9477 scsi_ulto3b(targ_lun_id, 9478 &lun_data->luns[num_filled].lundata[1]); 9479 num_filled++; 9480 } else { 9481 printf("ctl_report_luns: bogus LUN number %jd, " 9482 "skipping\n", (intmax_t)targ_lun_id); 9483 } 9484 /* 9485 * According to SPC-3, rev 14 section 6.21: 9486 * 9487 * "The execution of a REPORT LUNS command to any valid and 9488 * installed logical unit shall clear the REPORTED LUNS DATA 9489 * HAS CHANGED unit attention condition for all logical 9490 * units of that target with respect to the requesting 9491 * initiator. A valid and installed logical unit is one 9492 * having a PERIPHERAL QUALIFIER of 000b in the standard 9493 * INQUIRY data (see 6.4.2)." 9494 * 9495 * If request_lun is NULL, the LUN this report luns command 9496 * was issued to is either disabled or doesn't exist. In that 9497 * case, we shouldn't clear any pending lun change unit 9498 * attention. 9499 */ 9500 if (request_lun != NULL) { 9501 mtx_lock(&lun->lun_lock); 9502 ctl_clr_ua(lun, initidx, CTL_UA_RES_RELEASE); 9503 mtx_unlock(&lun->lun_lock); 9504 } 9505 } 9506 mtx_unlock(&softc->ctl_lock); 9507 9508 /* 9509 * It's quite possible that we've returned fewer LUNs than we allocated 9510 * space for. Trim it. 9511 */ 9512 lun_datalen = sizeof(*lun_data) + 9513 (num_filled * sizeof(struct scsi_report_luns_lundata)); 9514 9515 if (lun_datalen < alloc_len) { 9516 ctsio->residual = alloc_len - lun_datalen; 9517 ctsio->kern_data_len = lun_datalen; 9518 ctsio->kern_total_len = lun_datalen; 9519 } else { 9520 ctsio->residual = 0; 9521 ctsio->kern_data_len = alloc_len; 9522 ctsio->kern_total_len = alloc_len; 9523 } 9524 ctsio->kern_data_resid = 0; 9525 ctsio->kern_rel_offset = 0; 9526 ctsio->kern_sg_entries = 0; 9527 9528 /* 9529 * We set this to the actual data length, regardless of how much 9530 * space we actually have to return results. If the user looks at 9531 * this value, he'll know whether or not he allocated enough space 9532 * and reissue the command if necessary. We don't support well 9533 * known logical units, so if the user asks for that, return none. 9534 */ 9535 scsi_ulto4b(lun_datalen - 8, lun_data->length); 9536 9537 /* 9538 * We can only return SCSI_STATUS_CHECK_COND when we can't satisfy 9539 * this request. 9540 */ 9541 ctl_set_success(ctsio); 9542 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9543 ctsio->be_move_done = ctl_config_move_done; 9544 ctl_datamove((union ctl_io *)ctsio); 9545 return (retval); 9546 } 9547 9548 int 9549 ctl_request_sense(struct ctl_scsiio *ctsio) 9550 { 9551 struct scsi_request_sense *cdb; 9552 struct scsi_sense_data *sense_ptr; 9553 struct ctl_softc *ctl_softc; 9554 struct ctl_lun *lun; 9555 uint32_t initidx; 9556 int have_error; 9557 scsi_sense_data_type sense_format; 9558 ctl_ua_type ua_type; 9559 9560 cdb = (struct scsi_request_sense *)ctsio->cdb; 9561 9562 ctl_softc = control_softc; 9563 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9564 9565 CTL_DEBUG_PRINT(("ctl_request_sense\n")); 9566 9567 /* 9568 * Determine which sense format the user wants. 9569 */ 9570 if (cdb->byte2 & SRS_DESC) 9571 sense_format = SSD_TYPE_DESC; 9572 else 9573 sense_format = SSD_TYPE_FIXED; 9574 9575 ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK); 9576 sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr; 9577 ctsio->kern_sg_entries = 0; 9578 9579 /* 9580 * struct scsi_sense_data, which is currently set to 256 bytes, is 9581 * larger than the largest allowed value for the length field in the 9582 * REQUEST SENSE CDB, which is 252 bytes as of SPC-4. 9583 */ 9584 ctsio->residual = 0; 9585 ctsio->kern_data_len = cdb->length; 9586 ctsio->kern_total_len = cdb->length; 9587 9588 ctsio->kern_data_resid = 0; 9589 ctsio->kern_rel_offset = 0; 9590 ctsio->kern_sg_entries = 0; 9591 9592 /* 9593 * If we don't have a LUN, we don't have any pending sense. 9594 */ 9595 if (lun == NULL) 9596 goto no_sense; 9597 9598 have_error = 0; 9599 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9600 /* 9601 * Check for pending sense, and then for pending unit attentions. 9602 * Pending sense gets returned first, then pending unit attentions. 9603 */ 9604 mtx_lock(&lun->lun_lock); 9605 #ifdef CTL_WITH_CA 9606 if (ctl_is_set(lun->have_ca, initidx)) { 9607 scsi_sense_data_type stored_format; 9608 9609 /* 9610 * Check to see which sense format was used for the stored 9611 * sense data. 9612 */ 9613 stored_format = scsi_sense_type(&lun->pending_sense[initidx]); 9614 9615 /* 9616 * If the user requested a different sense format than the 9617 * one we stored, then we need to convert it to the other 9618 * format. If we're going from descriptor to fixed format 9619 * sense data, we may lose things in translation, depending 9620 * on what options were used. 9621 * 9622 * If the stored format is SSD_TYPE_NONE (i.e. invalid), 9623 * for some reason we'll just copy it out as-is. 9624 */ 9625 if ((stored_format == SSD_TYPE_FIXED) 9626 && (sense_format == SSD_TYPE_DESC)) 9627 ctl_sense_to_desc((struct scsi_sense_data_fixed *) 9628 &lun->pending_sense[initidx], 9629 (struct scsi_sense_data_desc *)sense_ptr); 9630 else if ((stored_format == SSD_TYPE_DESC) 9631 && (sense_format == SSD_TYPE_FIXED)) 9632 ctl_sense_to_fixed((struct scsi_sense_data_desc *) 9633 &lun->pending_sense[initidx], 9634 (struct scsi_sense_data_fixed *)sense_ptr); 9635 else 9636 memcpy(sense_ptr, &lun->pending_sense[initidx], 9637 MIN(sizeof(*sense_ptr), 9638 sizeof(lun->pending_sense[initidx]))); 9639 9640 ctl_clear_mask(lun->have_ca, initidx); 9641 have_error = 1; 9642 } else 9643 #endif 9644 { 9645 ua_type = ctl_build_ua(lun, initidx, sense_ptr, sense_format); 9646 if (ua_type != CTL_UA_NONE) 9647 have_error = 1; 9648 if (ua_type == CTL_UA_LUN_CHANGE) { 9649 mtx_unlock(&lun->lun_lock); 9650 mtx_lock(&ctl_softc->ctl_lock); 9651 ctl_clear_ua(ctl_softc, initidx, ua_type); 9652 mtx_unlock(&ctl_softc->ctl_lock); 9653 mtx_lock(&lun->lun_lock); 9654 } 9655 9656 } 9657 mtx_unlock(&lun->lun_lock); 9658 9659 /* 9660 * We already have a pending error, return it. 9661 */ 9662 if (have_error != 0) { 9663 /* 9664 * We report the SCSI status as OK, since the status of the 9665 * request sense command itself is OK. 9666 * We report 0 for the sense length, because we aren't doing 9667 * autosense in this case. We're reporting sense as 9668 * parameter data. 9669 */ 9670 ctl_set_success(ctsio); 9671 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9672 ctsio->be_move_done = ctl_config_move_done; 9673 ctl_datamove((union ctl_io *)ctsio); 9674 return (CTL_RETVAL_COMPLETE); 9675 } 9676 9677 no_sense: 9678 9679 /* 9680 * No sense information to report, so we report that everything is 9681 * okay. 9682 */ 9683 ctl_set_sense_data(sense_ptr, 9684 lun, 9685 sense_format, 9686 /*current_error*/ 1, 9687 /*sense_key*/ SSD_KEY_NO_SENSE, 9688 /*asc*/ 0x00, 9689 /*ascq*/ 0x00, 9690 SSD_ELEM_NONE); 9691 9692 /* 9693 * We report 0 for the sense length, because we aren't doing 9694 * autosense in this case. We're reporting sense as parameter data. 9695 */ 9696 ctl_set_success(ctsio); 9697 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9698 ctsio->be_move_done = ctl_config_move_done; 9699 ctl_datamove((union ctl_io *)ctsio); 9700 return (CTL_RETVAL_COMPLETE); 9701 } 9702 9703 int 9704 ctl_tur(struct ctl_scsiio *ctsio) 9705 { 9706 9707 CTL_DEBUG_PRINT(("ctl_tur\n")); 9708 9709 ctl_set_success(ctsio); 9710 ctl_done((union ctl_io *)ctsio); 9711 9712 return (CTL_RETVAL_COMPLETE); 9713 } 9714 9715 #ifdef notyet 9716 static int 9717 ctl_cmddt_inquiry(struct ctl_scsiio *ctsio) 9718 { 9719 9720 } 9721 #endif 9722 9723 /* 9724 * SCSI VPD page 0x00, the Supported VPD Pages page. 9725 */ 9726 static int 9727 ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len) 9728 { 9729 struct scsi_vpd_supported_pages *pages; 9730 int sup_page_size; 9731 struct ctl_lun *lun; 9732 int p; 9733 9734 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9735 9736 sup_page_size = sizeof(struct scsi_vpd_supported_pages) * 9737 SCSI_EVPD_NUM_SUPPORTED_PAGES; 9738 ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO); 9739 pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr; 9740 ctsio->kern_sg_entries = 0; 9741 9742 if (sup_page_size < alloc_len) { 9743 ctsio->residual = alloc_len - sup_page_size; 9744 ctsio->kern_data_len = sup_page_size; 9745 ctsio->kern_total_len = sup_page_size; 9746 } else { 9747 ctsio->residual = 0; 9748 ctsio->kern_data_len = alloc_len; 9749 ctsio->kern_total_len = alloc_len; 9750 } 9751 ctsio->kern_data_resid = 0; 9752 ctsio->kern_rel_offset = 0; 9753 ctsio->kern_sg_entries = 0; 9754 9755 /* 9756 * The control device is always connected. The disk device, on the 9757 * other hand, may not be online all the time. Need to change this 9758 * to figure out whether the disk device is actually online or not. 9759 */ 9760 if (lun != NULL) 9761 pages->device = (SID_QUAL_LU_CONNECTED << 5) | 9762 lun->be_lun->lun_type; 9763 else 9764 pages->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9765 9766 p = 0; 9767 /* Supported VPD pages */ 9768 pages->page_list[p++] = SVPD_SUPPORTED_PAGES; 9769 /* Serial Number */ 9770 pages->page_list[p++] = SVPD_UNIT_SERIAL_NUMBER; 9771 /* Device Identification */ 9772 pages->page_list[p++] = SVPD_DEVICE_ID; 9773 /* Extended INQUIRY Data */ 9774 pages->page_list[p++] = SVPD_EXTENDED_INQUIRY_DATA; 9775 /* Mode Page Policy */ 9776 pages->page_list[p++] = SVPD_MODE_PAGE_POLICY; 9777 /* SCSI Ports */ 9778 pages->page_list[p++] = SVPD_SCSI_PORTS; 9779 /* Third-party Copy */ 9780 pages->page_list[p++] = SVPD_SCSI_TPC; 9781 if (lun != NULL && lun->be_lun->lun_type == T_DIRECT) { 9782 /* Block limits */ 9783 pages->page_list[p++] = SVPD_BLOCK_LIMITS; 9784 /* Block Device Characteristics */ 9785 pages->page_list[p++] = SVPD_BDC; 9786 /* Logical Block Provisioning */ 9787 pages->page_list[p++] = SVPD_LBP; 9788 } 9789 pages->length = p; 9790 9791 ctl_set_success(ctsio); 9792 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9793 ctsio->be_move_done = ctl_config_move_done; 9794 ctl_datamove((union ctl_io *)ctsio); 9795 return (CTL_RETVAL_COMPLETE); 9796 } 9797 9798 /* 9799 * SCSI VPD page 0x80, the Unit Serial Number page. 9800 */ 9801 static int 9802 ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len) 9803 { 9804 struct scsi_vpd_unit_serial_number *sn_ptr; 9805 struct ctl_lun *lun; 9806 int data_len; 9807 9808 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9809 9810 data_len = 4 + CTL_SN_LEN; 9811 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9812 sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr; 9813 if (data_len < alloc_len) { 9814 ctsio->residual = alloc_len - data_len; 9815 ctsio->kern_data_len = data_len; 9816 ctsio->kern_total_len = data_len; 9817 } else { 9818 ctsio->residual = 0; 9819 ctsio->kern_data_len = alloc_len; 9820 ctsio->kern_total_len = alloc_len; 9821 } 9822 ctsio->kern_data_resid = 0; 9823 ctsio->kern_rel_offset = 0; 9824 ctsio->kern_sg_entries = 0; 9825 9826 /* 9827 * The control device is always connected. The disk device, on the 9828 * other hand, may not be online all the time. Need to change this 9829 * to figure out whether the disk device is actually online or not. 9830 */ 9831 if (lun != NULL) 9832 sn_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9833 lun->be_lun->lun_type; 9834 else 9835 sn_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9836 9837 sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER; 9838 sn_ptr->length = CTL_SN_LEN; 9839 /* 9840 * If we don't have a LUN, we just leave the serial number as 9841 * all spaces. 9842 */ 9843 if (lun != NULL) { 9844 strncpy((char *)sn_ptr->serial_num, 9845 (char *)lun->be_lun->serial_num, CTL_SN_LEN); 9846 } else 9847 memset(sn_ptr->serial_num, 0x20, CTL_SN_LEN); 9848 9849 ctl_set_success(ctsio); 9850 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9851 ctsio->be_move_done = ctl_config_move_done; 9852 ctl_datamove((union ctl_io *)ctsio); 9853 return (CTL_RETVAL_COMPLETE); 9854 } 9855 9856 9857 /* 9858 * SCSI VPD page 0x86, the Extended INQUIRY Data page. 9859 */ 9860 static int 9861 ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len) 9862 { 9863 struct scsi_vpd_extended_inquiry_data *eid_ptr; 9864 struct ctl_lun *lun; 9865 int data_len; 9866 9867 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9868 9869 data_len = sizeof(struct scsi_vpd_extended_inquiry_data); 9870 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9871 eid_ptr = (struct scsi_vpd_extended_inquiry_data *)ctsio->kern_data_ptr; 9872 ctsio->kern_sg_entries = 0; 9873 9874 if (data_len < alloc_len) { 9875 ctsio->residual = alloc_len - data_len; 9876 ctsio->kern_data_len = data_len; 9877 ctsio->kern_total_len = data_len; 9878 } else { 9879 ctsio->residual = 0; 9880 ctsio->kern_data_len = alloc_len; 9881 ctsio->kern_total_len = alloc_len; 9882 } 9883 ctsio->kern_data_resid = 0; 9884 ctsio->kern_rel_offset = 0; 9885 ctsio->kern_sg_entries = 0; 9886 9887 /* 9888 * The control device is always connected. The disk device, on the 9889 * other hand, may not be online all the time. 9890 */ 9891 if (lun != NULL) 9892 eid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9893 lun->be_lun->lun_type; 9894 else 9895 eid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9896 eid_ptr->page_code = SVPD_EXTENDED_INQUIRY_DATA; 9897 scsi_ulto2b(data_len - 4, eid_ptr->page_length); 9898 /* 9899 * We support head of queue, ordered and simple tags. 9900 */ 9901 eid_ptr->flags2 = SVPD_EID_HEADSUP | SVPD_EID_ORDSUP | SVPD_EID_SIMPSUP; 9902 /* 9903 * Volatile cache supported. 9904 */ 9905 eid_ptr->flags3 = SVPD_EID_V_SUP; 9906 9907 /* 9908 * This means that we clear the REPORTED LUNS DATA HAS CHANGED unit 9909 * attention for a particular IT nexus on all LUNs once we report 9910 * it to that nexus once. This bit is required as of SPC-4. 9911 */ 9912 eid_ptr->flags4 = SVPD_EID_LUICLT; 9913 9914 /* 9915 * XXX KDM in order to correctly answer this, we would need 9916 * information from the SIM to determine how much sense data it 9917 * can send. So this would really be a path inquiry field, most 9918 * likely. This can be set to a maximum of 252 according to SPC-4, 9919 * but the hardware may or may not be able to support that much. 9920 * 0 just means that the maximum sense data length is not reported. 9921 */ 9922 eid_ptr->max_sense_length = 0; 9923 9924 ctl_set_success(ctsio); 9925 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9926 ctsio->be_move_done = ctl_config_move_done; 9927 ctl_datamove((union ctl_io *)ctsio); 9928 return (CTL_RETVAL_COMPLETE); 9929 } 9930 9931 static int 9932 ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len) 9933 { 9934 struct scsi_vpd_mode_page_policy *mpp_ptr; 9935 struct ctl_lun *lun; 9936 int data_len; 9937 9938 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9939 9940 data_len = sizeof(struct scsi_vpd_mode_page_policy) + 9941 sizeof(struct scsi_vpd_mode_page_policy_descr); 9942 9943 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9944 mpp_ptr = (struct scsi_vpd_mode_page_policy *)ctsio->kern_data_ptr; 9945 ctsio->kern_sg_entries = 0; 9946 9947 if (data_len < alloc_len) { 9948 ctsio->residual = alloc_len - data_len; 9949 ctsio->kern_data_len = data_len; 9950 ctsio->kern_total_len = data_len; 9951 } else { 9952 ctsio->residual = 0; 9953 ctsio->kern_data_len = alloc_len; 9954 ctsio->kern_total_len = alloc_len; 9955 } 9956 ctsio->kern_data_resid = 0; 9957 ctsio->kern_rel_offset = 0; 9958 ctsio->kern_sg_entries = 0; 9959 9960 /* 9961 * The control device is always connected. The disk device, on the 9962 * other hand, may not be online all the time. 9963 */ 9964 if (lun != NULL) 9965 mpp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9966 lun->be_lun->lun_type; 9967 else 9968 mpp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9969 mpp_ptr->page_code = SVPD_MODE_PAGE_POLICY; 9970 scsi_ulto2b(data_len - 4, mpp_ptr->page_length); 9971 mpp_ptr->descr[0].page_code = 0x3f; 9972 mpp_ptr->descr[0].subpage_code = 0xff; 9973 mpp_ptr->descr[0].policy = SVPD_MPP_SHARED; 9974 9975 ctl_set_success(ctsio); 9976 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9977 ctsio->be_move_done = ctl_config_move_done; 9978 ctl_datamove((union ctl_io *)ctsio); 9979 return (CTL_RETVAL_COMPLETE); 9980 } 9981 9982 /* 9983 * SCSI VPD page 0x83, the Device Identification page. 9984 */ 9985 static int 9986 ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len) 9987 { 9988 struct scsi_vpd_device_id *devid_ptr; 9989 struct scsi_vpd_id_descriptor *desc; 9990 struct ctl_softc *softc; 9991 struct ctl_lun *lun; 9992 struct ctl_port *port; 9993 int data_len; 9994 uint8_t proto; 9995 9996 softc = control_softc; 9997 9998 port = softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)]; 9999 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10000 10001 data_len = sizeof(struct scsi_vpd_device_id) + 10002 sizeof(struct scsi_vpd_id_descriptor) + 10003 sizeof(struct scsi_vpd_id_rel_trgt_port_id) + 10004 sizeof(struct scsi_vpd_id_descriptor) + 10005 sizeof(struct scsi_vpd_id_trgt_port_grp_id); 10006 if (lun && lun->lun_devid) 10007 data_len += lun->lun_devid->len; 10008 if (port->port_devid) 10009 data_len += port->port_devid->len; 10010 if (port->target_devid) 10011 data_len += port->target_devid->len; 10012 10013 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10014 devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr; 10015 ctsio->kern_sg_entries = 0; 10016 10017 if (data_len < alloc_len) { 10018 ctsio->residual = alloc_len - data_len; 10019 ctsio->kern_data_len = data_len; 10020 ctsio->kern_total_len = data_len; 10021 } else { 10022 ctsio->residual = 0; 10023 ctsio->kern_data_len = alloc_len; 10024 ctsio->kern_total_len = alloc_len; 10025 } 10026 ctsio->kern_data_resid = 0; 10027 ctsio->kern_rel_offset = 0; 10028 ctsio->kern_sg_entries = 0; 10029 10030 /* 10031 * The control device is always connected. The disk device, on the 10032 * other hand, may not be online all the time. 10033 */ 10034 if (lun != NULL) 10035 devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10036 lun->be_lun->lun_type; 10037 else 10038 devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10039 devid_ptr->page_code = SVPD_DEVICE_ID; 10040 scsi_ulto2b(data_len - 4, devid_ptr->length); 10041 10042 if (port->port_type == CTL_PORT_FC) 10043 proto = SCSI_PROTO_FC << 4; 10044 else if (port->port_type == CTL_PORT_ISCSI) 10045 proto = SCSI_PROTO_ISCSI << 4; 10046 else 10047 proto = SCSI_PROTO_SPI << 4; 10048 desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list; 10049 10050 /* 10051 * We're using a LUN association here. i.e., this device ID is a 10052 * per-LUN identifier. 10053 */ 10054 if (lun && lun->lun_devid) { 10055 memcpy(desc, lun->lun_devid->data, lun->lun_devid->len); 10056 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 10057 lun->lun_devid->len); 10058 } 10059 10060 /* 10061 * This is for the WWPN which is a port association. 10062 */ 10063 if (port->port_devid) { 10064 memcpy(desc, port->port_devid->data, port->port_devid->len); 10065 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 10066 port->port_devid->len); 10067 } 10068 10069 /* 10070 * This is for the Relative Target Port(type 4h) identifier 10071 */ 10072 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 10073 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 10074 SVPD_ID_TYPE_RELTARG; 10075 desc->length = 4; 10076 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port, &desc->identifier[2]); 10077 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 10078 sizeof(struct scsi_vpd_id_rel_trgt_port_id)); 10079 10080 /* 10081 * This is for the Target Port Group(type 5h) identifier 10082 */ 10083 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 10084 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 10085 SVPD_ID_TYPE_TPORTGRP; 10086 desc->length = 4; 10087 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port / CTL_MAX_PORTS + 1, 10088 &desc->identifier[2]); 10089 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 10090 sizeof(struct scsi_vpd_id_trgt_port_grp_id)); 10091 10092 /* 10093 * This is for the Target identifier 10094 */ 10095 if (port->target_devid) { 10096 memcpy(desc, port->target_devid->data, port->target_devid->len); 10097 } 10098 10099 ctl_set_success(ctsio); 10100 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10101 ctsio->be_move_done = ctl_config_move_done; 10102 ctl_datamove((union ctl_io *)ctsio); 10103 return (CTL_RETVAL_COMPLETE); 10104 } 10105 10106 static int 10107 ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len) 10108 { 10109 struct ctl_softc *softc = control_softc; 10110 struct scsi_vpd_scsi_ports *sp; 10111 struct scsi_vpd_port_designation *pd; 10112 struct scsi_vpd_port_designation_cont *pdc; 10113 struct ctl_lun *lun; 10114 struct ctl_port *port; 10115 int data_len, num_target_ports, iid_len, id_len, g, pg, p; 10116 int num_target_port_groups; 10117 10118 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10119 10120 if (softc->is_single) 10121 num_target_port_groups = 1; 10122 else 10123 num_target_port_groups = NUM_TARGET_PORT_GROUPS; 10124 num_target_ports = 0; 10125 iid_len = 0; 10126 id_len = 0; 10127 mtx_lock(&softc->ctl_lock); 10128 STAILQ_FOREACH(port, &softc->port_list, links) { 10129 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 10130 continue; 10131 if (lun != NULL && 10132 ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 10133 continue; 10134 num_target_ports++; 10135 if (port->init_devid) 10136 iid_len += port->init_devid->len; 10137 if (port->port_devid) 10138 id_len += port->port_devid->len; 10139 } 10140 mtx_unlock(&softc->ctl_lock); 10141 10142 data_len = sizeof(struct scsi_vpd_scsi_ports) + num_target_port_groups * 10143 num_target_ports * (sizeof(struct scsi_vpd_port_designation) + 10144 sizeof(struct scsi_vpd_port_designation_cont)) + iid_len + id_len; 10145 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10146 sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr; 10147 ctsio->kern_sg_entries = 0; 10148 10149 if (data_len < alloc_len) { 10150 ctsio->residual = alloc_len - data_len; 10151 ctsio->kern_data_len = data_len; 10152 ctsio->kern_total_len = data_len; 10153 } else { 10154 ctsio->residual = 0; 10155 ctsio->kern_data_len = alloc_len; 10156 ctsio->kern_total_len = alloc_len; 10157 } 10158 ctsio->kern_data_resid = 0; 10159 ctsio->kern_rel_offset = 0; 10160 ctsio->kern_sg_entries = 0; 10161 10162 /* 10163 * The control device is always connected. The disk device, on the 10164 * other hand, may not be online all the time. Need to change this 10165 * to figure out whether the disk device is actually online or not. 10166 */ 10167 if (lun != NULL) 10168 sp->device = (SID_QUAL_LU_CONNECTED << 5) | 10169 lun->be_lun->lun_type; 10170 else 10171 sp->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10172 10173 sp->page_code = SVPD_SCSI_PORTS; 10174 scsi_ulto2b(data_len - sizeof(struct scsi_vpd_scsi_ports), 10175 sp->page_length); 10176 pd = &sp->design[0]; 10177 10178 mtx_lock(&softc->ctl_lock); 10179 pg = softc->port_offset / CTL_MAX_PORTS; 10180 for (g = 0; g < num_target_port_groups; g++) { 10181 STAILQ_FOREACH(port, &softc->port_list, links) { 10182 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 10183 continue; 10184 if (lun != NULL && 10185 ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 10186 continue; 10187 p = port->targ_port % CTL_MAX_PORTS + g * CTL_MAX_PORTS; 10188 scsi_ulto2b(p, pd->relative_port_id); 10189 if (port->init_devid && g == pg) { 10190 iid_len = port->init_devid->len; 10191 memcpy(pd->initiator_transportid, 10192 port->init_devid->data, port->init_devid->len); 10193 } else 10194 iid_len = 0; 10195 scsi_ulto2b(iid_len, pd->initiator_transportid_length); 10196 pdc = (struct scsi_vpd_port_designation_cont *) 10197 (&pd->initiator_transportid[iid_len]); 10198 if (port->port_devid && g == pg) { 10199 id_len = port->port_devid->len; 10200 memcpy(pdc->target_port_descriptors, 10201 port->port_devid->data, port->port_devid->len); 10202 } else 10203 id_len = 0; 10204 scsi_ulto2b(id_len, pdc->target_port_descriptors_length); 10205 pd = (struct scsi_vpd_port_designation *) 10206 ((uint8_t *)pdc->target_port_descriptors + id_len); 10207 } 10208 } 10209 mtx_unlock(&softc->ctl_lock); 10210 10211 ctl_set_success(ctsio); 10212 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10213 ctsio->be_move_done = ctl_config_move_done; 10214 ctl_datamove((union ctl_io *)ctsio); 10215 return (CTL_RETVAL_COMPLETE); 10216 } 10217 10218 static int 10219 ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len) 10220 { 10221 struct scsi_vpd_block_limits *bl_ptr; 10222 struct ctl_lun *lun; 10223 int bs; 10224 10225 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10226 10227 ctsio->kern_data_ptr = malloc(sizeof(*bl_ptr), M_CTL, M_WAITOK | M_ZERO); 10228 bl_ptr = (struct scsi_vpd_block_limits *)ctsio->kern_data_ptr; 10229 ctsio->kern_sg_entries = 0; 10230 10231 if (sizeof(*bl_ptr) < alloc_len) { 10232 ctsio->residual = alloc_len - sizeof(*bl_ptr); 10233 ctsio->kern_data_len = sizeof(*bl_ptr); 10234 ctsio->kern_total_len = sizeof(*bl_ptr); 10235 } else { 10236 ctsio->residual = 0; 10237 ctsio->kern_data_len = alloc_len; 10238 ctsio->kern_total_len = alloc_len; 10239 } 10240 ctsio->kern_data_resid = 0; 10241 ctsio->kern_rel_offset = 0; 10242 ctsio->kern_sg_entries = 0; 10243 10244 /* 10245 * The control device is always connected. The disk device, on the 10246 * other hand, may not be online all the time. Need to change this 10247 * to figure out whether the disk device is actually online or not. 10248 */ 10249 if (lun != NULL) 10250 bl_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10251 lun->be_lun->lun_type; 10252 else 10253 bl_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10254 10255 bl_ptr->page_code = SVPD_BLOCK_LIMITS; 10256 scsi_ulto2b(sizeof(*bl_ptr) - 4, bl_ptr->page_length); 10257 bl_ptr->max_cmp_write_len = 0xff; 10258 scsi_ulto4b(0xffffffff, bl_ptr->max_txfer_len); 10259 if (lun != NULL) { 10260 bs = lun->be_lun->blocksize; 10261 scsi_ulto4b(lun->be_lun->opttxferlen, bl_ptr->opt_txfer_len); 10262 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 10263 scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_lba_cnt); 10264 scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_blk_cnt); 10265 if (lun->be_lun->ublockexp != 0) { 10266 scsi_ulto4b((1 << lun->be_lun->ublockexp), 10267 bl_ptr->opt_unmap_grain); 10268 scsi_ulto4b(0x80000000 | lun->be_lun->ublockoff, 10269 bl_ptr->unmap_grain_align); 10270 } 10271 } 10272 scsi_ulto4b(lun->be_lun->atomicblock, 10273 bl_ptr->max_atomic_transfer_length); 10274 scsi_ulto4b(0, bl_ptr->atomic_alignment); 10275 scsi_ulto4b(0, bl_ptr->atomic_transfer_length_granularity); 10276 } 10277 scsi_u64to8b(UINT64_MAX, bl_ptr->max_write_same_length); 10278 10279 ctl_set_success(ctsio); 10280 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10281 ctsio->be_move_done = ctl_config_move_done; 10282 ctl_datamove((union ctl_io *)ctsio); 10283 return (CTL_RETVAL_COMPLETE); 10284 } 10285 10286 static int 10287 ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len) 10288 { 10289 struct scsi_vpd_block_device_characteristics *bdc_ptr; 10290 struct ctl_lun *lun; 10291 const char *value; 10292 u_int i; 10293 10294 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10295 10296 ctsio->kern_data_ptr = malloc(sizeof(*bdc_ptr), M_CTL, M_WAITOK | M_ZERO); 10297 bdc_ptr = (struct scsi_vpd_block_device_characteristics *)ctsio->kern_data_ptr; 10298 ctsio->kern_sg_entries = 0; 10299 10300 if (sizeof(*bdc_ptr) < alloc_len) { 10301 ctsio->residual = alloc_len - sizeof(*bdc_ptr); 10302 ctsio->kern_data_len = sizeof(*bdc_ptr); 10303 ctsio->kern_total_len = sizeof(*bdc_ptr); 10304 } else { 10305 ctsio->residual = 0; 10306 ctsio->kern_data_len = alloc_len; 10307 ctsio->kern_total_len = alloc_len; 10308 } 10309 ctsio->kern_data_resid = 0; 10310 ctsio->kern_rel_offset = 0; 10311 ctsio->kern_sg_entries = 0; 10312 10313 /* 10314 * The control device is always connected. The disk device, on the 10315 * other hand, may not be online all the time. Need to change this 10316 * to figure out whether the disk device is actually online or not. 10317 */ 10318 if (lun != NULL) 10319 bdc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10320 lun->be_lun->lun_type; 10321 else 10322 bdc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10323 bdc_ptr->page_code = SVPD_BDC; 10324 scsi_ulto2b(sizeof(*bdc_ptr) - 4, bdc_ptr->page_length); 10325 if (lun != NULL && 10326 (value = ctl_get_opt(&lun->be_lun->options, "rpm")) != NULL) 10327 i = strtol(value, NULL, 0); 10328 else 10329 i = CTL_DEFAULT_ROTATION_RATE; 10330 scsi_ulto2b(i, bdc_ptr->medium_rotation_rate); 10331 if (lun != NULL && 10332 (value = ctl_get_opt(&lun->be_lun->options, "formfactor")) != NULL) 10333 i = strtol(value, NULL, 0); 10334 else 10335 i = 0; 10336 bdc_ptr->wab_wac_ff = (i & 0x0f); 10337 bdc_ptr->flags = SVPD_FUAB | SVPD_VBULS; 10338 10339 ctl_set_success(ctsio); 10340 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10341 ctsio->be_move_done = ctl_config_move_done; 10342 ctl_datamove((union ctl_io *)ctsio); 10343 return (CTL_RETVAL_COMPLETE); 10344 } 10345 10346 static int 10347 ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len) 10348 { 10349 struct scsi_vpd_logical_block_prov *lbp_ptr; 10350 struct ctl_lun *lun; 10351 10352 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10353 10354 ctsio->kern_data_ptr = malloc(sizeof(*lbp_ptr), M_CTL, M_WAITOK | M_ZERO); 10355 lbp_ptr = (struct scsi_vpd_logical_block_prov *)ctsio->kern_data_ptr; 10356 ctsio->kern_sg_entries = 0; 10357 10358 if (sizeof(*lbp_ptr) < alloc_len) { 10359 ctsio->residual = alloc_len - sizeof(*lbp_ptr); 10360 ctsio->kern_data_len = sizeof(*lbp_ptr); 10361 ctsio->kern_total_len = sizeof(*lbp_ptr); 10362 } else { 10363 ctsio->residual = 0; 10364 ctsio->kern_data_len = alloc_len; 10365 ctsio->kern_total_len = alloc_len; 10366 } 10367 ctsio->kern_data_resid = 0; 10368 ctsio->kern_rel_offset = 0; 10369 ctsio->kern_sg_entries = 0; 10370 10371 /* 10372 * The control device is always connected. The disk device, on the 10373 * other hand, may not be online all the time. Need to change this 10374 * to figure out whether the disk device is actually online or not. 10375 */ 10376 if (lun != NULL) 10377 lbp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10378 lun->be_lun->lun_type; 10379 else 10380 lbp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10381 10382 lbp_ptr->page_code = SVPD_LBP; 10383 scsi_ulto2b(sizeof(*lbp_ptr) - 4, lbp_ptr->page_length); 10384 lbp_ptr->threshold_exponent = CTL_LBP_EXPONENT; 10385 if (lun != NULL && lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 10386 lbp_ptr->flags = SVPD_LBP_UNMAP | SVPD_LBP_WS16 | 10387 SVPD_LBP_WS10 | SVPD_LBP_RZ | SVPD_LBP_ANC_SUP; 10388 lbp_ptr->prov_type = SVPD_LBP_THIN; 10389 } 10390 10391 ctl_set_success(ctsio); 10392 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10393 ctsio->be_move_done = ctl_config_move_done; 10394 ctl_datamove((union ctl_io *)ctsio); 10395 return (CTL_RETVAL_COMPLETE); 10396 } 10397 10398 /* 10399 * INQUIRY with the EVPD bit set. 10400 */ 10401 static int 10402 ctl_inquiry_evpd(struct ctl_scsiio *ctsio) 10403 { 10404 struct ctl_lun *lun; 10405 struct scsi_inquiry *cdb; 10406 int alloc_len, retval; 10407 10408 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10409 cdb = (struct scsi_inquiry *)ctsio->cdb; 10410 alloc_len = scsi_2btoul(cdb->length); 10411 10412 switch (cdb->page_code) { 10413 case SVPD_SUPPORTED_PAGES: 10414 retval = ctl_inquiry_evpd_supported(ctsio, alloc_len); 10415 break; 10416 case SVPD_UNIT_SERIAL_NUMBER: 10417 retval = ctl_inquiry_evpd_serial(ctsio, alloc_len); 10418 break; 10419 case SVPD_DEVICE_ID: 10420 retval = ctl_inquiry_evpd_devid(ctsio, alloc_len); 10421 break; 10422 case SVPD_EXTENDED_INQUIRY_DATA: 10423 retval = ctl_inquiry_evpd_eid(ctsio, alloc_len); 10424 break; 10425 case SVPD_MODE_PAGE_POLICY: 10426 retval = ctl_inquiry_evpd_mpp(ctsio, alloc_len); 10427 break; 10428 case SVPD_SCSI_PORTS: 10429 retval = ctl_inquiry_evpd_scsi_ports(ctsio, alloc_len); 10430 break; 10431 case SVPD_SCSI_TPC: 10432 retval = ctl_inquiry_evpd_tpc(ctsio, alloc_len); 10433 break; 10434 case SVPD_BLOCK_LIMITS: 10435 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 10436 goto err; 10437 retval = ctl_inquiry_evpd_block_limits(ctsio, alloc_len); 10438 break; 10439 case SVPD_BDC: 10440 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 10441 goto err; 10442 retval = ctl_inquiry_evpd_bdc(ctsio, alloc_len); 10443 break; 10444 case SVPD_LBP: 10445 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 10446 goto err; 10447 retval = ctl_inquiry_evpd_lbp(ctsio, alloc_len); 10448 break; 10449 default: 10450 err: 10451 ctl_set_invalid_field(ctsio, 10452 /*sks_valid*/ 1, 10453 /*command*/ 1, 10454 /*field*/ 2, 10455 /*bit_valid*/ 0, 10456 /*bit*/ 0); 10457 ctl_done((union ctl_io *)ctsio); 10458 retval = CTL_RETVAL_COMPLETE; 10459 break; 10460 } 10461 10462 return (retval); 10463 } 10464 10465 /* 10466 * Standard INQUIRY data. 10467 */ 10468 static int 10469 ctl_inquiry_std(struct ctl_scsiio *ctsio) 10470 { 10471 struct scsi_inquiry_data *inq_ptr; 10472 struct scsi_inquiry *cdb; 10473 struct ctl_softc *softc; 10474 struct ctl_lun *lun; 10475 char *val; 10476 uint32_t alloc_len, data_len; 10477 ctl_port_type port_type; 10478 10479 softc = control_softc; 10480 10481 /* 10482 * Figure out whether we're talking to a Fibre Channel port or not. 10483 * We treat the ioctl front end, and any SCSI adapters, as packetized 10484 * SCSI front ends. 10485 */ 10486 port_type = softc->ctl_ports[ 10487 ctl_port_idx(ctsio->io_hdr.nexus.targ_port)]->port_type; 10488 if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL) 10489 port_type = CTL_PORT_SCSI; 10490 10491 lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10492 cdb = (struct scsi_inquiry *)ctsio->cdb; 10493 alloc_len = scsi_2btoul(cdb->length); 10494 10495 /* 10496 * We malloc the full inquiry data size here and fill it 10497 * in. If the user only asks for less, we'll give him 10498 * that much. 10499 */ 10500 data_len = offsetof(struct scsi_inquiry_data, vendor_specific1); 10501 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10502 inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr; 10503 ctsio->kern_sg_entries = 0; 10504 ctsio->kern_data_resid = 0; 10505 ctsio->kern_rel_offset = 0; 10506 10507 if (data_len < alloc_len) { 10508 ctsio->residual = alloc_len - data_len; 10509 ctsio->kern_data_len = data_len; 10510 ctsio->kern_total_len = data_len; 10511 } else { 10512 ctsio->residual = 0; 10513 ctsio->kern_data_len = alloc_len; 10514 ctsio->kern_total_len = alloc_len; 10515 } 10516 10517 /* 10518 * If we have a LUN configured, report it as connected. Otherwise, 10519 * report that it is offline or no device is supported, depending 10520 * on the value of inquiry_pq_no_lun. 10521 * 10522 * According to the spec (SPC-4 r34), the peripheral qualifier 10523 * SID_QUAL_LU_OFFLINE (001b) is used in the following scenario: 10524 * 10525 * "A peripheral device having the specified peripheral device type 10526 * is not connected to this logical unit. However, the device 10527 * server is capable of supporting the specified peripheral device 10528 * type on this logical unit." 10529 * 10530 * According to the same spec, the peripheral qualifier 10531 * SID_QUAL_BAD_LU (011b) is used in this scenario: 10532 * 10533 * "The device server is not capable of supporting a peripheral 10534 * device on this logical unit. For this peripheral qualifier the 10535 * peripheral device type shall be set to 1Fh. All other peripheral 10536 * device type values are reserved for this peripheral qualifier." 10537 * 10538 * Given the text, it would seem that we probably want to report that 10539 * the LUN is offline here. There is no LUN connected, but we can 10540 * support a LUN at the given LUN number. 10541 * 10542 * In the real world, though, it sounds like things are a little 10543 * different: 10544 * 10545 * - Linux, when presented with a LUN with the offline peripheral 10546 * qualifier, will create an sg driver instance for it. So when 10547 * you attach it to CTL, you wind up with a ton of sg driver 10548 * instances. (One for every LUN that Linux bothered to probe.) 10549 * Linux does this despite the fact that it issues a REPORT LUNs 10550 * to LUN 0 to get the inventory of supported LUNs. 10551 * 10552 * - There is other anecdotal evidence (from Emulex folks) about 10553 * arrays that use the offline peripheral qualifier for LUNs that 10554 * are on the "passive" path in an active/passive array. 10555 * 10556 * So the solution is provide a hopefully reasonable default 10557 * (return bad/no LUN) and allow the user to change the behavior 10558 * with a tunable/sysctl variable. 10559 */ 10560 if (lun != NULL) 10561 inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10562 lun->be_lun->lun_type; 10563 else if (softc->inquiry_pq_no_lun == 0) 10564 inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10565 else 10566 inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE; 10567 10568 /* RMB in byte 2 is 0 */ 10569 inq_ptr->version = SCSI_REV_SPC4; 10570 10571 /* 10572 * According to SAM-3, even if a device only supports a single 10573 * level of LUN addressing, it should still set the HISUP bit: 10574 * 10575 * 4.9.1 Logical unit numbers overview 10576 * 10577 * All logical unit number formats described in this standard are 10578 * hierarchical in structure even when only a single level in that 10579 * hierarchy is used. The HISUP bit shall be set to one in the 10580 * standard INQUIRY data (see SPC-2) when any logical unit number 10581 * format described in this standard is used. Non-hierarchical 10582 * formats are outside the scope of this standard. 10583 * 10584 * Therefore we set the HiSup bit here. 10585 * 10586 * The reponse format is 2, per SPC-3. 10587 */ 10588 inq_ptr->response_format = SID_HiSup | 2; 10589 10590 inq_ptr->additional_length = data_len - 10591 (offsetof(struct scsi_inquiry_data, additional_length) + 1); 10592 CTL_DEBUG_PRINT(("additional_length = %d\n", 10593 inq_ptr->additional_length)); 10594 10595 inq_ptr->spc3_flags = SPC3_SID_3PC | SPC3_SID_TPGS_IMPLICIT; 10596 /* 16 bit addressing */ 10597 if (port_type == CTL_PORT_SCSI) 10598 inq_ptr->spc2_flags = SPC2_SID_ADDR16; 10599 /* XXX set the SID_MultiP bit here if we're actually going to 10600 respond on multiple ports */ 10601 inq_ptr->spc2_flags |= SPC2_SID_MultiP; 10602 10603 /* 16 bit data bus, synchronous transfers */ 10604 if (port_type == CTL_PORT_SCSI) 10605 inq_ptr->flags = SID_WBus16 | SID_Sync; 10606 /* 10607 * XXX KDM do we want to support tagged queueing on the control 10608 * device at all? 10609 */ 10610 if ((lun == NULL) 10611 || (lun->be_lun->lun_type != T_PROCESSOR)) 10612 inq_ptr->flags |= SID_CmdQue; 10613 /* 10614 * Per SPC-3, unused bytes in ASCII strings are filled with spaces. 10615 * We have 8 bytes for the vendor name, and 16 bytes for the device 10616 * name and 4 bytes for the revision. 10617 */ 10618 if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options, 10619 "vendor")) == NULL) { 10620 strncpy(inq_ptr->vendor, CTL_VENDOR, sizeof(inq_ptr->vendor)); 10621 } else { 10622 memset(inq_ptr->vendor, ' ', sizeof(inq_ptr->vendor)); 10623 strncpy(inq_ptr->vendor, val, 10624 min(sizeof(inq_ptr->vendor), strlen(val))); 10625 } 10626 if (lun == NULL) { 10627 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10628 sizeof(inq_ptr->product)); 10629 } else if ((val = ctl_get_opt(&lun->be_lun->options, "product")) == NULL) { 10630 switch (lun->be_lun->lun_type) { 10631 case T_DIRECT: 10632 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10633 sizeof(inq_ptr->product)); 10634 break; 10635 case T_PROCESSOR: 10636 strncpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT, 10637 sizeof(inq_ptr->product)); 10638 break; 10639 default: 10640 strncpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT, 10641 sizeof(inq_ptr->product)); 10642 break; 10643 } 10644 } else { 10645 memset(inq_ptr->product, ' ', sizeof(inq_ptr->product)); 10646 strncpy(inq_ptr->product, val, 10647 min(sizeof(inq_ptr->product), strlen(val))); 10648 } 10649 10650 /* 10651 * XXX make this a macro somewhere so it automatically gets 10652 * incremented when we make changes. 10653 */ 10654 if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options, 10655 "revision")) == NULL) { 10656 strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision)); 10657 } else { 10658 memset(inq_ptr->revision, ' ', sizeof(inq_ptr->revision)); 10659 strncpy(inq_ptr->revision, val, 10660 min(sizeof(inq_ptr->revision), strlen(val))); 10661 } 10662 10663 /* 10664 * For parallel SCSI, we support double transition and single 10665 * transition clocking. We also support QAS (Quick Arbitration 10666 * and Selection) and Information Unit transfers on both the 10667 * control and array devices. 10668 */ 10669 if (port_type == CTL_PORT_SCSI) 10670 inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS | 10671 SID_SPI_IUS; 10672 10673 /* SAM-5 (no version claimed) */ 10674 scsi_ulto2b(0x00A0, inq_ptr->version1); 10675 /* SPC-4 (no version claimed) */ 10676 scsi_ulto2b(0x0460, inq_ptr->version2); 10677 if (port_type == CTL_PORT_FC) { 10678 /* FCP-2 ANSI INCITS.350:2003 */ 10679 scsi_ulto2b(0x0917, inq_ptr->version3); 10680 } else if (port_type == CTL_PORT_SCSI) { 10681 /* SPI-4 ANSI INCITS.362:200x */ 10682 scsi_ulto2b(0x0B56, inq_ptr->version3); 10683 } else if (port_type == CTL_PORT_ISCSI) { 10684 /* iSCSI (no version claimed) */ 10685 scsi_ulto2b(0x0960, inq_ptr->version3); 10686 } else if (port_type == CTL_PORT_SAS) { 10687 /* SAS (no version claimed) */ 10688 scsi_ulto2b(0x0BE0, inq_ptr->version3); 10689 } 10690 10691 if (lun == NULL) { 10692 /* SBC-4 (no version claimed) */ 10693 scsi_ulto2b(0x0600, inq_ptr->version4); 10694 } else { 10695 switch (lun->be_lun->lun_type) { 10696 case T_DIRECT: 10697 /* SBC-4 (no version claimed) */ 10698 scsi_ulto2b(0x0600, inq_ptr->version4); 10699 break; 10700 case T_PROCESSOR: 10701 default: 10702 break; 10703 } 10704 } 10705 10706 ctl_set_success(ctsio); 10707 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10708 ctsio->be_move_done = ctl_config_move_done; 10709 ctl_datamove((union ctl_io *)ctsio); 10710 return (CTL_RETVAL_COMPLETE); 10711 } 10712 10713 int 10714 ctl_inquiry(struct ctl_scsiio *ctsio) 10715 { 10716 struct scsi_inquiry *cdb; 10717 int retval; 10718 10719 CTL_DEBUG_PRINT(("ctl_inquiry\n")); 10720 10721 cdb = (struct scsi_inquiry *)ctsio->cdb; 10722 if (cdb->byte2 & SI_EVPD) 10723 retval = ctl_inquiry_evpd(ctsio); 10724 else if (cdb->page_code == 0) 10725 retval = ctl_inquiry_std(ctsio); 10726 else { 10727 ctl_set_invalid_field(ctsio, 10728 /*sks_valid*/ 1, 10729 /*command*/ 1, 10730 /*field*/ 2, 10731 /*bit_valid*/ 0, 10732 /*bit*/ 0); 10733 ctl_done((union ctl_io *)ctsio); 10734 return (CTL_RETVAL_COMPLETE); 10735 } 10736 10737 return (retval); 10738 } 10739 10740 /* 10741 * For known CDB types, parse the LBA and length. 10742 */ 10743 static int 10744 ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len) 10745 { 10746 if (io->io_hdr.io_type != CTL_IO_SCSI) 10747 return (1); 10748 10749 switch (io->scsiio.cdb[0]) { 10750 case COMPARE_AND_WRITE: { 10751 struct scsi_compare_and_write *cdb; 10752 10753 cdb = (struct scsi_compare_and_write *)io->scsiio.cdb; 10754 10755 *lba = scsi_8btou64(cdb->addr); 10756 *len = cdb->length; 10757 break; 10758 } 10759 case READ_6: 10760 case WRITE_6: { 10761 struct scsi_rw_6 *cdb; 10762 10763 cdb = (struct scsi_rw_6 *)io->scsiio.cdb; 10764 10765 *lba = scsi_3btoul(cdb->addr); 10766 /* only 5 bits are valid in the most significant address byte */ 10767 *lba &= 0x1fffff; 10768 *len = cdb->length; 10769 break; 10770 } 10771 case READ_10: 10772 case WRITE_10: { 10773 struct scsi_rw_10 *cdb; 10774 10775 cdb = (struct scsi_rw_10 *)io->scsiio.cdb; 10776 10777 *lba = scsi_4btoul(cdb->addr); 10778 *len = scsi_2btoul(cdb->length); 10779 break; 10780 } 10781 case WRITE_VERIFY_10: { 10782 struct scsi_write_verify_10 *cdb; 10783 10784 cdb = (struct scsi_write_verify_10 *)io->scsiio.cdb; 10785 10786 *lba = scsi_4btoul(cdb->addr); 10787 *len = scsi_2btoul(cdb->length); 10788 break; 10789 } 10790 case READ_12: 10791 case WRITE_12: { 10792 struct scsi_rw_12 *cdb; 10793 10794 cdb = (struct scsi_rw_12 *)io->scsiio.cdb; 10795 10796 *lba = scsi_4btoul(cdb->addr); 10797 *len = scsi_4btoul(cdb->length); 10798 break; 10799 } 10800 case WRITE_VERIFY_12: { 10801 struct scsi_write_verify_12 *cdb; 10802 10803 cdb = (struct scsi_write_verify_12 *)io->scsiio.cdb; 10804 10805 *lba = scsi_4btoul(cdb->addr); 10806 *len = scsi_4btoul(cdb->length); 10807 break; 10808 } 10809 case READ_16: 10810 case WRITE_16: 10811 case WRITE_ATOMIC_16: { 10812 struct scsi_rw_16 *cdb; 10813 10814 cdb = (struct scsi_rw_16 *)io->scsiio.cdb; 10815 10816 *lba = scsi_8btou64(cdb->addr); 10817 *len = scsi_4btoul(cdb->length); 10818 break; 10819 } 10820 case WRITE_VERIFY_16: { 10821 struct scsi_write_verify_16 *cdb; 10822 10823 cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb; 10824 10825 *lba = scsi_8btou64(cdb->addr); 10826 *len = scsi_4btoul(cdb->length); 10827 break; 10828 } 10829 case WRITE_SAME_10: { 10830 struct scsi_write_same_10 *cdb; 10831 10832 cdb = (struct scsi_write_same_10 *)io->scsiio.cdb; 10833 10834 *lba = scsi_4btoul(cdb->addr); 10835 *len = scsi_2btoul(cdb->length); 10836 break; 10837 } 10838 case WRITE_SAME_16: { 10839 struct scsi_write_same_16 *cdb; 10840 10841 cdb = (struct scsi_write_same_16 *)io->scsiio.cdb; 10842 10843 *lba = scsi_8btou64(cdb->addr); 10844 *len = scsi_4btoul(cdb->length); 10845 break; 10846 } 10847 case VERIFY_10: { 10848 struct scsi_verify_10 *cdb; 10849 10850 cdb = (struct scsi_verify_10 *)io->scsiio.cdb; 10851 10852 *lba = scsi_4btoul(cdb->addr); 10853 *len = scsi_2btoul(cdb->length); 10854 break; 10855 } 10856 case VERIFY_12: { 10857 struct scsi_verify_12 *cdb; 10858 10859 cdb = (struct scsi_verify_12 *)io->scsiio.cdb; 10860 10861 *lba = scsi_4btoul(cdb->addr); 10862 *len = scsi_4btoul(cdb->length); 10863 break; 10864 } 10865 case VERIFY_16: { 10866 struct scsi_verify_16 *cdb; 10867 10868 cdb = (struct scsi_verify_16 *)io->scsiio.cdb; 10869 10870 *lba = scsi_8btou64(cdb->addr); 10871 *len = scsi_4btoul(cdb->length); 10872 break; 10873 } 10874 case UNMAP: { 10875 *lba = 0; 10876 *len = UINT64_MAX; 10877 break; 10878 } 10879 case SERVICE_ACTION_IN: { /* GET LBA STATUS */ 10880 struct scsi_get_lba_status *cdb; 10881 10882 cdb = (struct scsi_get_lba_status *)io->scsiio.cdb; 10883 *lba = scsi_8btou64(cdb->addr); 10884 *len = UINT32_MAX; 10885 break; 10886 } 10887 default: 10888 return (1); 10889 break; /* NOTREACHED */ 10890 } 10891 10892 return (0); 10893 } 10894 10895 static ctl_action 10896 ctl_extent_check_lba(uint64_t lba1, uint64_t len1, uint64_t lba2, uint64_t len2, 10897 bool seq) 10898 { 10899 uint64_t endlba1, endlba2; 10900 10901 endlba1 = lba1 + len1 - (seq ? 0 : 1); 10902 endlba2 = lba2 + len2 - 1; 10903 10904 if ((endlba1 < lba2) || (endlba2 < lba1)) 10905 return (CTL_ACTION_PASS); 10906 else 10907 return (CTL_ACTION_BLOCK); 10908 } 10909 10910 static int 10911 ctl_extent_check_unmap(union ctl_io *io, uint64_t lba2, uint64_t len2) 10912 { 10913 struct ctl_ptr_len_flags *ptrlen; 10914 struct scsi_unmap_desc *buf, *end, *range; 10915 uint64_t lba; 10916 uint32_t len; 10917 10918 /* If not UNMAP -- go other way. */ 10919 if (io->io_hdr.io_type != CTL_IO_SCSI || 10920 io->scsiio.cdb[0] != UNMAP) 10921 return (CTL_ACTION_ERROR); 10922 10923 /* If UNMAP without data -- block and wait for data. */ 10924 ptrlen = (struct ctl_ptr_len_flags *) 10925 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 10926 if ((io->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0 || 10927 ptrlen->ptr == NULL) 10928 return (CTL_ACTION_BLOCK); 10929 10930 /* UNMAP with data -- check for collision. */ 10931 buf = (struct scsi_unmap_desc *)ptrlen->ptr; 10932 end = buf + ptrlen->len / sizeof(*buf); 10933 for (range = buf; range < end; range++) { 10934 lba = scsi_8btou64(range->lba); 10935 len = scsi_4btoul(range->length); 10936 if ((lba < lba2 + len2) && (lba + len > lba2)) 10937 return (CTL_ACTION_BLOCK); 10938 } 10939 return (CTL_ACTION_PASS); 10940 } 10941 10942 static ctl_action 10943 ctl_extent_check(union ctl_io *io1, union ctl_io *io2, bool seq) 10944 { 10945 uint64_t lba1, lba2; 10946 uint64_t len1, len2; 10947 int retval; 10948 10949 if (ctl_get_lba_len(io2, &lba2, &len2) != 0) 10950 return (CTL_ACTION_ERROR); 10951 10952 retval = ctl_extent_check_unmap(io1, lba2, len2); 10953 if (retval != CTL_ACTION_ERROR) 10954 return (retval); 10955 10956 if (ctl_get_lba_len(io1, &lba1, &len1) != 0) 10957 return (CTL_ACTION_ERROR); 10958 10959 return (ctl_extent_check_lba(lba1, len1, lba2, len2, seq)); 10960 } 10961 10962 static ctl_action 10963 ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2) 10964 { 10965 uint64_t lba1, lba2; 10966 uint64_t len1, len2; 10967 10968 if (ctl_get_lba_len(io1, &lba1, &len1) != 0) 10969 return (CTL_ACTION_ERROR); 10970 if (ctl_get_lba_len(io2, &lba2, &len2) != 0) 10971 return (CTL_ACTION_ERROR); 10972 10973 if (lba1 + len1 == lba2) 10974 return (CTL_ACTION_BLOCK); 10975 return (CTL_ACTION_PASS); 10976 } 10977 10978 static ctl_action 10979 ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io, 10980 union ctl_io *ooa_io) 10981 { 10982 const struct ctl_cmd_entry *pending_entry, *ooa_entry; 10983 ctl_serialize_action *serialize_row; 10984 10985 /* 10986 * The initiator attempted multiple untagged commands at the same 10987 * time. Can't do that. 10988 */ 10989 if ((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10990 && (ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10991 && ((pending_io->io_hdr.nexus.targ_port == 10992 ooa_io->io_hdr.nexus.targ_port) 10993 && (pending_io->io_hdr.nexus.initid.id == 10994 ooa_io->io_hdr.nexus.initid.id)) 10995 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | 10996 CTL_FLAG_STATUS_SENT)) == 0)) 10997 return (CTL_ACTION_OVERLAP); 10998 10999 /* 11000 * The initiator attempted to send multiple tagged commands with 11001 * the same ID. (It's fine if different initiators have the same 11002 * tag ID.) 11003 * 11004 * Even if all of those conditions are true, we don't kill the I/O 11005 * if the command ahead of us has been aborted. We won't end up 11006 * sending it to the FETD, and it's perfectly legal to resend a 11007 * command with the same tag number as long as the previous 11008 * instance of this tag number has been aborted somehow. 11009 */ 11010 if ((pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 11011 && (ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 11012 && (pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num) 11013 && ((pending_io->io_hdr.nexus.targ_port == 11014 ooa_io->io_hdr.nexus.targ_port) 11015 && (pending_io->io_hdr.nexus.initid.id == 11016 ooa_io->io_hdr.nexus.initid.id)) 11017 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | 11018 CTL_FLAG_STATUS_SENT)) == 0)) 11019 return (CTL_ACTION_OVERLAP_TAG); 11020 11021 /* 11022 * If we get a head of queue tag, SAM-3 says that we should 11023 * immediately execute it. 11024 * 11025 * What happens if this command would normally block for some other 11026 * reason? e.g. a request sense with a head of queue tag 11027 * immediately after a write. Normally that would block, but this 11028 * will result in its getting executed immediately... 11029 * 11030 * We currently return "pass" instead of "skip", so we'll end up 11031 * going through the rest of the queue to check for overlapped tags. 11032 * 11033 * XXX KDM check for other types of blockage first?? 11034 */ 11035 if (pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 11036 return (CTL_ACTION_PASS); 11037 11038 /* 11039 * Ordered tags have to block until all items ahead of them 11040 * have completed. If we get called with an ordered tag, we always 11041 * block, if something else is ahead of us in the queue. 11042 */ 11043 if (pending_io->scsiio.tag_type == CTL_TAG_ORDERED) 11044 return (CTL_ACTION_BLOCK); 11045 11046 /* 11047 * Simple tags get blocked until all head of queue and ordered tags 11048 * ahead of them have completed. I'm lumping untagged commands in 11049 * with simple tags here. XXX KDM is that the right thing to do? 11050 */ 11051 if (((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 11052 || (pending_io->scsiio.tag_type == CTL_TAG_SIMPLE)) 11053 && ((ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 11054 || (ooa_io->scsiio.tag_type == CTL_TAG_ORDERED))) 11055 return (CTL_ACTION_BLOCK); 11056 11057 pending_entry = ctl_get_cmd_entry(&pending_io->scsiio, NULL); 11058 ooa_entry = ctl_get_cmd_entry(&ooa_io->scsiio, NULL); 11059 11060 serialize_row = ctl_serialize_table[ooa_entry->seridx]; 11061 11062 switch (serialize_row[pending_entry->seridx]) { 11063 case CTL_SER_BLOCK: 11064 return (CTL_ACTION_BLOCK); 11065 case CTL_SER_EXTENT: 11066 return (ctl_extent_check(ooa_io, pending_io, 11067 (lun->serseq == CTL_LUN_SERSEQ_ON))); 11068 case CTL_SER_EXTENTOPT: 11069 if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT].queue_flags 11070 & SCP_QUEUE_ALG_MASK) != SCP_QUEUE_ALG_UNRESTRICTED) 11071 return (ctl_extent_check(ooa_io, pending_io, 11072 (lun->serseq == CTL_LUN_SERSEQ_ON))); 11073 return (CTL_ACTION_PASS); 11074 case CTL_SER_EXTENTSEQ: 11075 if (lun->serseq != CTL_LUN_SERSEQ_OFF) 11076 return (ctl_extent_check_seq(ooa_io, pending_io)); 11077 return (CTL_ACTION_PASS); 11078 case CTL_SER_PASS: 11079 return (CTL_ACTION_PASS); 11080 case CTL_SER_BLOCKOPT: 11081 if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT].queue_flags 11082 & SCP_QUEUE_ALG_MASK) != SCP_QUEUE_ALG_UNRESTRICTED) 11083 return (CTL_ACTION_BLOCK); 11084 return (CTL_ACTION_PASS); 11085 case CTL_SER_SKIP: 11086 return (CTL_ACTION_SKIP); 11087 default: 11088 panic("invalid serialization value %d", 11089 serialize_row[pending_entry->seridx]); 11090 } 11091 11092 return (CTL_ACTION_ERROR); 11093 } 11094 11095 /* 11096 * Check for blockage or overlaps against the OOA (Order Of Arrival) queue. 11097 * Assumptions: 11098 * - pending_io is generally either incoming, or on the blocked queue 11099 * - starting I/O is the I/O we want to start the check with. 11100 */ 11101 static ctl_action 11102 ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 11103 union ctl_io *starting_io) 11104 { 11105 union ctl_io *ooa_io; 11106 ctl_action action; 11107 11108 mtx_assert(&lun->lun_lock, MA_OWNED); 11109 11110 /* 11111 * Run back along the OOA queue, starting with the current 11112 * blocked I/O and going through every I/O before it on the 11113 * queue. If starting_io is NULL, we'll just end up returning 11114 * CTL_ACTION_PASS. 11115 */ 11116 for (ooa_io = starting_io; ooa_io != NULL; 11117 ooa_io = (union ctl_io *)TAILQ_PREV(&ooa_io->io_hdr, ctl_ooaq, 11118 ooa_links)){ 11119 11120 /* 11121 * This routine just checks to see whether 11122 * cur_blocked is blocked by ooa_io, which is ahead 11123 * of it in the queue. It doesn't queue/dequeue 11124 * cur_blocked. 11125 */ 11126 action = ctl_check_for_blockage(lun, pending_io, ooa_io); 11127 switch (action) { 11128 case CTL_ACTION_BLOCK: 11129 case CTL_ACTION_OVERLAP: 11130 case CTL_ACTION_OVERLAP_TAG: 11131 case CTL_ACTION_SKIP: 11132 case CTL_ACTION_ERROR: 11133 return (action); 11134 break; /* NOTREACHED */ 11135 case CTL_ACTION_PASS: 11136 break; 11137 default: 11138 panic("invalid action %d", action); 11139 break; /* NOTREACHED */ 11140 } 11141 } 11142 11143 return (CTL_ACTION_PASS); 11144 } 11145 11146 /* 11147 * Assumptions: 11148 * - An I/O has just completed, and has been removed from the per-LUN OOA 11149 * queue, so some items on the blocked queue may now be unblocked. 11150 */ 11151 static int 11152 ctl_check_blocked(struct ctl_lun *lun) 11153 { 11154 union ctl_io *cur_blocked, *next_blocked; 11155 11156 mtx_assert(&lun->lun_lock, MA_OWNED); 11157 11158 /* 11159 * Run forward from the head of the blocked queue, checking each 11160 * entry against the I/Os prior to it on the OOA queue to see if 11161 * there is still any blockage. 11162 * 11163 * We cannot use the TAILQ_FOREACH() macro, because it can't deal 11164 * with our removing a variable on it while it is traversing the 11165 * list. 11166 */ 11167 for (cur_blocked = (union ctl_io *)TAILQ_FIRST(&lun->blocked_queue); 11168 cur_blocked != NULL; cur_blocked = next_blocked) { 11169 union ctl_io *prev_ooa; 11170 ctl_action action; 11171 11172 next_blocked = (union ctl_io *)TAILQ_NEXT(&cur_blocked->io_hdr, 11173 blocked_links); 11174 11175 prev_ooa = (union ctl_io *)TAILQ_PREV(&cur_blocked->io_hdr, 11176 ctl_ooaq, ooa_links); 11177 11178 /* 11179 * If cur_blocked happens to be the first item in the OOA 11180 * queue now, prev_ooa will be NULL, and the action 11181 * returned will just be CTL_ACTION_PASS. 11182 */ 11183 action = ctl_check_ooa(lun, cur_blocked, prev_ooa); 11184 11185 switch (action) { 11186 case CTL_ACTION_BLOCK: 11187 /* Nothing to do here, still blocked */ 11188 break; 11189 case CTL_ACTION_OVERLAP: 11190 case CTL_ACTION_OVERLAP_TAG: 11191 /* 11192 * This shouldn't happen! In theory we've already 11193 * checked this command for overlap... 11194 */ 11195 break; 11196 case CTL_ACTION_PASS: 11197 case CTL_ACTION_SKIP: { 11198 const struct ctl_cmd_entry *entry; 11199 int isc_retval; 11200 11201 /* 11202 * The skip case shouldn't happen, this transaction 11203 * should have never made it onto the blocked queue. 11204 */ 11205 /* 11206 * This I/O is no longer blocked, we can remove it 11207 * from the blocked queue. Since this is a TAILQ 11208 * (doubly linked list), we can do O(1) removals 11209 * from any place on the list. 11210 */ 11211 TAILQ_REMOVE(&lun->blocked_queue, &cur_blocked->io_hdr, 11212 blocked_links); 11213 cur_blocked->io_hdr.flags &= ~CTL_FLAG_BLOCKED; 11214 11215 if (cur_blocked->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC){ 11216 /* 11217 * Need to send IO back to original side to 11218 * run 11219 */ 11220 union ctl_ha_msg msg_info; 11221 11222 msg_info.hdr.original_sc = 11223 cur_blocked->io_hdr.original_sc; 11224 msg_info.hdr.serializing_sc = cur_blocked; 11225 msg_info.hdr.msg_type = CTL_MSG_R2R; 11226 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 11227 &msg_info, sizeof(msg_info), 0)) > 11228 CTL_HA_STATUS_SUCCESS) { 11229 printf("CTL:Check Blocked error from " 11230 "ctl_ha_msg_send %d\n", 11231 isc_retval); 11232 } 11233 break; 11234 } 11235 entry = ctl_get_cmd_entry(&cur_blocked->scsiio, NULL); 11236 11237 /* 11238 * Check this I/O for LUN state changes that may 11239 * have happened while this command was blocked. 11240 * The LUN state may have been changed by a command 11241 * ahead of us in the queue, so we need to re-check 11242 * for any states that can be caused by SCSI 11243 * commands. 11244 */ 11245 if (ctl_scsiio_lun_check(lun, entry, 11246 &cur_blocked->scsiio) == 0) { 11247 cur_blocked->io_hdr.flags |= 11248 CTL_FLAG_IS_WAS_ON_RTR; 11249 ctl_enqueue_rtr(cur_blocked); 11250 } else 11251 ctl_done(cur_blocked); 11252 break; 11253 } 11254 default: 11255 /* 11256 * This probably shouldn't happen -- we shouldn't 11257 * get CTL_ACTION_ERROR, or anything else. 11258 */ 11259 break; 11260 } 11261 } 11262 11263 return (CTL_RETVAL_COMPLETE); 11264 } 11265 11266 /* 11267 * This routine (with one exception) checks LUN flags that can be set by 11268 * commands ahead of us in the OOA queue. These flags have to be checked 11269 * when a command initially comes in, and when we pull a command off the 11270 * blocked queue and are preparing to execute it. The reason we have to 11271 * check these flags for commands on the blocked queue is that the LUN 11272 * state may have been changed by a command ahead of us while we're on the 11273 * blocked queue. 11274 * 11275 * Ordering is somewhat important with these checks, so please pay 11276 * careful attention to the placement of any new checks. 11277 */ 11278 static int 11279 ctl_scsiio_lun_check(struct ctl_lun *lun, 11280 const struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio) 11281 { 11282 struct ctl_softc *softc = lun->ctl_softc; 11283 int retval; 11284 uint32_t residx; 11285 11286 retval = 0; 11287 11288 mtx_assert(&lun->lun_lock, MA_OWNED); 11289 11290 /* 11291 * If this shelf is a secondary shelf controller, we have to reject 11292 * any media access commands. 11293 */ 11294 if ((softc->flags & CTL_FLAG_ACTIVE_SHELF) == 0 && 11295 (entry->flags & CTL_CMD_FLAG_OK_ON_SECONDARY) == 0) { 11296 ctl_set_lun_standby(ctsio); 11297 retval = 1; 11298 goto bailout; 11299 } 11300 11301 if (entry->pattern & CTL_LUN_PAT_WRITE) { 11302 if (lun->flags & CTL_LUN_READONLY) { 11303 ctl_set_sense(ctsio, /*current_error*/ 1, 11304 /*sense_key*/ SSD_KEY_DATA_PROTECT, 11305 /*asc*/ 0x27, /*ascq*/ 0x01, SSD_ELEM_NONE); 11306 retval = 1; 11307 goto bailout; 11308 } 11309 if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT] 11310 .eca_and_aen & SCP_SWP) != 0) { 11311 ctl_set_sense(ctsio, /*current_error*/ 1, 11312 /*sense_key*/ SSD_KEY_DATA_PROTECT, 11313 /*asc*/ 0x27, /*ascq*/ 0x02, SSD_ELEM_NONE); 11314 retval = 1; 11315 goto bailout; 11316 } 11317 } 11318 11319 /* 11320 * Check for a reservation conflict. If this command isn't allowed 11321 * even on reserved LUNs, and if this initiator isn't the one who 11322 * reserved us, reject the command with a reservation conflict. 11323 */ 11324 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 11325 if ((lun->flags & CTL_LUN_RESERVED) 11326 && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) { 11327 if (lun->res_idx != residx) { 11328 ctl_set_reservation_conflict(ctsio); 11329 retval = 1; 11330 goto bailout; 11331 } 11332 } 11333 11334 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0 || 11335 (entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV)) { 11336 /* No reservation or command is allowed. */; 11337 } else if ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_WRESV) && 11338 (lun->res_type == SPR_TYPE_WR_EX || 11339 lun->res_type == SPR_TYPE_WR_EX_RO || 11340 lun->res_type == SPR_TYPE_WR_EX_AR)) { 11341 /* The command is allowed for Write Exclusive resv. */; 11342 } else { 11343 /* 11344 * if we aren't registered or it's a res holder type 11345 * reservation and this isn't the res holder then set a 11346 * conflict. 11347 */ 11348 if (ctl_get_prkey(lun, residx) == 0 11349 || (residx != lun->pr_res_idx && lun->res_type < 4)) { 11350 ctl_set_reservation_conflict(ctsio); 11351 retval = 1; 11352 goto bailout; 11353 } 11354 11355 } 11356 11357 if ((lun->flags & CTL_LUN_OFFLINE) 11358 && ((entry->flags & CTL_CMD_FLAG_OK_ON_OFFLINE) == 0)) { 11359 ctl_set_lun_not_ready(ctsio); 11360 retval = 1; 11361 goto bailout; 11362 } 11363 11364 /* 11365 * If the LUN is stopped, see if this particular command is allowed 11366 * for a stopped lun. Otherwise, reject it with 0x04,0x02. 11367 */ 11368 if ((lun->flags & CTL_LUN_STOPPED) 11369 && ((entry->flags & CTL_CMD_FLAG_OK_ON_STOPPED) == 0)) { 11370 /* "Logical unit not ready, initializing cmd. required" */ 11371 ctl_set_lun_stopped(ctsio); 11372 retval = 1; 11373 goto bailout; 11374 } 11375 11376 if ((lun->flags & CTL_LUN_INOPERABLE) 11377 && ((entry->flags & CTL_CMD_FLAG_OK_ON_INOPERABLE) == 0)) { 11378 /* "Medium format corrupted" */ 11379 ctl_set_medium_format_corrupted(ctsio); 11380 retval = 1; 11381 goto bailout; 11382 } 11383 11384 bailout: 11385 return (retval); 11386 11387 } 11388 11389 static void 11390 ctl_failover_io(union ctl_io *io, int have_lock) 11391 { 11392 ctl_set_busy(&io->scsiio); 11393 ctl_done(io); 11394 } 11395 11396 static void 11397 ctl_failover(void) 11398 { 11399 struct ctl_lun *lun; 11400 struct ctl_softc *softc; 11401 union ctl_io *next_io, *pending_io; 11402 union ctl_io *io; 11403 int lun_idx; 11404 11405 softc = control_softc; 11406 11407 mtx_lock(&softc->ctl_lock); 11408 /* 11409 * Remove any cmds from the other SC from the rtr queue. These 11410 * will obviously only be for LUNs for which we're the primary. 11411 * We can't send status or get/send data for these commands. 11412 * Since they haven't been executed yet, we can just remove them. 11413 * We'll either abort them or delete them below, depending on 11414 * which HA mode we're in. 11415 */ 11416 #ifdef notyet 11417 mtx_lock(&softc->queue_lock); 11418 for (io = (union ctl_io *)STAILQ_FIRST(&softc->rtr_queue); 11419 io != NULL; io = next_io) { 11420 next_io = (union ctl_io *)STAILQ_NEXT(&io->io_hdr, links); 11421 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) 11422 STAILQ_REMOVE(&softc->rtr_queue, &io->io_hdr, 11423 ctl_io_hdr, links); 11424 } 11425 mtx_unlock(&softc->queue_lock); 11426 #endif 11427 11428 for (lun_idx=0; lun_idx < softc->num_luns; lun_idx++) { 11429 lun = softc->ctl_luns[lun_idx]; 11430 if (lun==NULL) 11431 continue; 11432 11433 /* 11434 * Processor LUNs are primary on both sides. 11435 * XXX will this always be true? 11436 */ 11437 if (lun->be_lun->lun_type == T_PROCESSOR) 11438 continue; 11439 11440 if ((lun->flags & CTL_LUN_PRIMARY_SC) 11441 && (softc->ha_mode == CTL_HA_MODE_SER_ONLY)) { 11442 printf("FAILOVER: primary lun %d\n", lun_idx); 11443 /* 11444 * Remove all commands from the other SC. First from the 11445 * blocked queue then from the ooa queue. Once we have 11446 * removed them. Call ctl_check_blocked to see if there 11447 * is anything that can run. 11448 */ 11449 for (io = (union ctl_io *)TAILQ_FIRST( 11450 &lun->blocked_queue); io != NULL; io = next_io) { 11451 11452 next_io = (union ctl_io *)TAILQ_NEXT( 11453 &io->io_hdr, blocked_links); 11454 11455 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) { 11456 TAILQ_REMOVE(&lun->blocked_queue, 11457 &io->io_hdr,blocked_links); 11458 io->io_hdr.flags &= ~CTL_FLAG_BLOCKED; 11459 TAILQ_REMOVE(&lun->ooa_queue, 11460 &io->io_hdr, ooa_links); 11461 11462 ctl_free_io(io); 11463 } 11464 } 11465 11466 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); 11467 io != NULL; io = next_io) { 11468 11469 next_io = (union ctl_io *)TAILQ_NEXT( 11470 &io->io_hdr, ooa_links); 11471 11472 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) { 11473 11474 TAILQ_REMOVE(&lun->ooa_queue, 11475 &io->io_hdr, 11476 ooa_links); 11477 11478 ctl_free_io(io); 11479 } 11480 } 11481 ctl_check_blocked(lun); 11482 } else if ((lun->flags & CTL_LUN_PRIMARY_SC) 11483 && (softc->ha_mode == CTL_HA_MODE_XFER)) { 11484 11485 printf("FAILOVER: primary lun %d\n", lun_idx); 11486 /* 11487 * Abort all commands from the other SC. We can't 11488 * send status back for them now. These should get 11489 * cleaned up when they are completed or come out 11490 * for a datamove operation. 11491 */ 11492 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); 11493 io != NULL; io = next_io) { 11494 next_io = (union ctl_io *)TAILQ_NEXT( 11495 &io->io_hdr, ooa_links); 11496 11497 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) 11498 io->io_hdr.flags |= CTL_FLAG_ABORT; 11499 } 11500 } else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0) 11501 && (softc->ha_mode == CTL_HA_MODE_XFER)) { 11502 11503 printf("FAILOVER: secondary lun %d\n", lun_idx); 11504 11505 lun->flags |= CTL_LUN_PRIMARY_SC; 11506 11507 /* 11508 * We send all I/O that was sent to this controller 11509 * and redirected to the other side back with 11510 * busy status, and have the initiator retry it. 11511 * Figuring out how much data has been transferred, 11512 * etc. and picking up where we left off would be 11513 * very tricky. 11514 * 11515 * XXX KDM need to remove I/O from the blocked 11516 * queue as well! 11517 */ 11518 for (pending_io = (union ctl_io *)TAILQ_FIRST( 11519 &lun->ooa_queue); pending_io != NULL; 11520 pending_io = next_io) { 11521 11522 next_io = (union ctl_io *)TAILQ_NEXT( 11523 &pending_io->io_hdr, ooa_links); 11524 11525 pending_io->io_hdr.flags &= 11526 ~CTL_FLAG_SENT_2OTHER_SC; 11527 11528 if (pending_io->io_hdr.flags & 11529 CTL_FLAG_IO_ACTIVE) { 11530 pending_io->io_hdr.flags |= 11531 CTL_FLAG_FAILOVER; 11532 } else { 11533 ctl_set_busy(&pending_io->scsiio); 11534 ctl_done(pending_io); 11535 } 11536 } 11537 11538 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 11539 } else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0) 11540 && (softc->ha_mode == CTL_HA_MODE_SER_ONLY)) { 11541 printf("FAILOVER: secondary lun %d\n", lun_idx); 11542 /* 11543 * if the first io on the OOA is not on the RtR queue 11544 * add it. 11545 */ 11546 lun->flags |= CTL_LUN_PRIMARY_SC; 11547 11548 pending_io = (union ctl_io *)TAILQ_FIRST( 11549 &lun->ooa_queue); 11550 if (pending_io==NULL) { 11551 printf("Nothing on OOA queue\n"); 11552 continue; 11553 } 11554 11555 pending_io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 11556 if ((pending_io->io_hdr.flags & 11557 CTL_FLAG_IS_WAS_ON_RTR) == 0) { 11558 pending_io->io_hdr.flags |= 11559 CTL_FLAG_IS_WAS_ON_RTR; 11560 ctl_enqueue_rtr(pending_io); 11561 } 11562 #if 0 11563 else 11564 { 11565 printf("Tag 0x%04x is running\n", 11566 pending_io->scsiio.tag_num); 11567 } 11568 #endif 11569 11570 next_io = (union ctl_io *)TAILQ_NEXT( 11571 &pending_io->io_hdr, ooa_links); 11572 for (pending_io=next_io; pending_io != NULL; 11573 pending_io = next_io) { 11574 pending_io->io_hdr.flags &= 11575 ~CTL_FLAG_SENT_2OTHER_SC; 11576 next_io = (union ctl_io *)TAILQ_NEXT( 11577 &pending_io->io_hdr, ooa_links); 11578 if (pending_io->io_hdr.flags & 11579 CTL_FLAG_IS_WAS_ON_RTR) { 11580 #if 0 11581 printf("Tag 0x%04x is running\n", 11582 pending_io->scsiio.tag_num); 11583 #endif 11584 continue; 11585 } 11586 11587 switch (ctl_check_ooa(lun, pending_io, 11588 (union ctl_io *)TAILQ_PREV( 11589 &pending_io->io_hdr, ctl_ooaq, 11590 ooa_links))) { 11591 11592 case CTL_ACTION_BLOCK: 11593 TAILQ_INSERT_TAIL(&lun->blocked_queue, 11594 &pending_io->io_hdr, 11595 blocked_links); 11596 pending_io->io_hdr.flags |= 11597 CTL_FLAG_BLOCKED; 11598 break; 11599 case CTL_ACTION_PASS: 11600 case CTL_ACTION_SKIP: 11601 pending_io->io_hdr.flags |= 11602 CTL_FLAG_IS_WAS_ON_RTR; 11603 ctl_enqueue_rtr(pending_io); 11604 break; 11605 case CTL_ACTION_OVERLAP: 11606 ctl_set_overlapped_cmd( 11607 (struct ctl_scsiio *)pending_io); 11608 ctl_done(pending_io); 11609 break; 11610 case CTL_ACTION_OVERLAP_TAG: 11611 ctl_set_overlapped_tag( 11612 (struct ctl_scsiio *)pending_io, 11613 pending_io->scsiio.tag_num & 0xff); 11614 ctl_done(pending_io); 11615 break; 11616 case CTL_ACTION_ERROR: 11617 default: 11618 ctl_set_internal_failure( 11619 (struct ctl_scsiio *)pending_io, 11620 0, // sks_valid 11621 0); //retry count 11622 ctl_done(pending_io); 11623 break; 11624 } 11625 } 11626 11627 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 11628 } else { 11629 panic("Unhandled HA mode failover, LUN flags = %#x, " 11630 "ha_mode = #%x", lun->flags, softc->ha_mode); 11631 } 11632 } 11633 ctl_pause_rtr = 0; 11634 mtx_unlock(&softc->ctl_lock); 11635 } 11636 11637 static void 11638 ctl_clear_ua(struct ctl_softc *ctl_softc, uint32_t initidx, 11639 ctl_ua_type ua_type) 11640 { 11641 struct ctl_lun *lun; 11642 ctl_ua_type *pu; 11643 11644 mtx_assert(&ctl_softc->ctl_lock, MA_OWNED); 11645 11646 STAILQ_FOREACH(lun, &ctl_softc->lun_list, links) { 11647 mtx_lock(&lun->lun_lock); 11648 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 11649 pu[initidx % CTL_MAX_INIT_PER_PORT] &= ~ua_type; 11650 mtx_unlock(&lun->lun_lock); 11651 } 11652 } 11653 11654 static int 11655 ctl_scsiio_precheck(struct ctl_softc *softc, struct ctl_scsiio *ctsio) 11656 { 11657 struct ctl_lun *lun; 11658 const struct ctl_cmd_entry *entry; 11659 uint32_t initidx, targ_lun; 11660 int retval; 11661 11662 retval = 0; 11663 11664 lun = NULL; 11665 11666 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 11667 if ((targ_lun < CTL_MAX_LUNS) 11668 && ((lun = softc->ctl_luns[targ_lun]) != NULL)) { 11669 /* 11670 * If the LUN is invalid, pretend that it doesn't exist. 11671 * It will go away as soon as all pending I/O has been 11672 * completed. 11673 */ 11674 mtx_lock(&lun->lun_lock); 11675 if (lun->flags & CTL_LUN_DISABLED) { 11676 mtx_unlock(&lun->lun_lock); 11677 lun = NULL; 11678 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL; 11679 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL; 11680 } else { 11681 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun; 11682 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = 11683 lun->be_lun; 11684 if (lun->be_lun->lun_type == T_PROCESSOR) { 11685 ctsio->io_hdr.flags |= CTL_FLAG_CONTROL_DEV; 11686 } 11687 11688 /* 11689 * Every I/O goes into the OOA queue for a 11690 * particular LUN, and stays there until completion. 11691 */ 11692 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, 11693 ooa_links); 11694 } 11695 } else { 11696 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL; 11697 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL; 11698 } 11699 11700 /* Get command entry and return error if it is unsuppotyed. */ 11701 entry = ctl_validate_command(ctsio); 11702 if (entry == NULL) { 11703 if (lun) 11704 mtx_unlock(&lun->lun_lock); 11705 return (retval); 11706 } 11707 11708 ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 11709 ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK; 11710 11711 /* 11712 * Check to see whether we can send this command to LUNs that don't 11713 * exist. This should pretty much only be the case for inquiry 11714 * and request sense. Further checks, below, really require having 11715 * a LUN, so we can't really check the command anymore. Just put 11716 * it on the rtr queue. 11717 */ 11718 if (lun == NULL) { 11719 if (entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) { 11720 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11721 ctl_enqueue_rtr((union ctl_io *)ctsio); 11722 return (retval); 11723 } 11724 11725 ctl_set_unsupported_lun(ctsio); 11726 ctl_done((union ctl_io *)ctsio); 11727 CTL_DEBUG_PRINT(("ctl_scsiio_precheck: bailing out due to invalid LUN\n")); 11728 return (retval); 11729 } else { 11730 /* 11731 * Make sure we support this particular command on this LUN. 11732 * e.g., we don't support writes to the control LUN. 11733 */ 11734 if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 11735 mtx_unlock(&lun->lun_lock); 11736 ctl_set_invalid_opcode(ctsio); 11737 ctl_done((union ctl_io *)ctsio); 11738 return (retval); 11739 } 11740 } 11741 11742 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 11743 11744 #ifdef CTL_WITH_CA 11745 /* 11746 * If we've got a request sense, it'll clear the contingent 11747 * allegiance condition. Otherwise, if we have a CA condition for 11748 * this initiator, clear it, because it sent down a command other 11749 * than request sense. 11750 */ 11751 if ((ctsio->cdb[0] != REQUEST_SENSE) 11752 && (ctl_is_set(lun->have_ca, initidx))) 11753 ctl_clear_mask(lun->have_ca, initidx); 11754 #endif 11755 11756 /* 11757 * If the command has this flag set, it handles its own unit 11758 * attention reporting, we shouldn't do anything. Otherwise we 11759 * check for any pending unit attentions, and send them back to the 11760 * initiator. We only do this when a command initially comes in, 11761 * not when we pull it off the blocked queue. 11762 * 11763 * According to SAM-3, section 5.3.2, the order that things get 11764 * presented back to the host is basically unit attentions caused 11765 * by some sort of reset event, busy status, reservation conflicts 11766 * or task set full, and finally any other status. 11767 * 11768 * One issue here is that some of the unit attentions we report 11769 * don't fall into the "reset" category (e.g. "reported luns data 11770 * has changed"). So reporting it here, before the reservation 11771 * check, may be technically wrong. I guess the only thing to do 11772 * would be to check for and report the reset events here, and then 11773 * check for the other unit attention types after we check for a 11774 * reservation conflict. 11775 * 11776 * XXX KDM need to fix this 11777 */ 11778 if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) { 11779 ctl_ua_type ua_type; 11780 scsi_sense_data_type sense_format; 11781 11782 if (lun->flags & CTL_LUN_SENSE_DESC) 11783 sense_format = SSD_TYPE_DESC; 11784 else 11785 sense_format = SSD_TYPE_FIXED; 11786 11787 ua_type = ctl_build_ua(lun, initidx, &ctsio->sense_data, 11788 sense_format); 11789 if (ua_type != CTL_UA_NONE) { 11790 mtx_unlock(&lun->lun_lock); 11791 ctsio->scsi_status = SCSI_STATUS_CHECK_COND; 11792 ctsio->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 11793 ctsio->sense_len = SSD_FULL_SIZE; 11794 ctl_done((union ctl_io *)ctsio); 11795 return (retval); 11796 } 11797 } 11798 11799 11800 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { 11801 mtx_unlock(&lun->lun_lock); 11802 ctl_done((union ctl_io *)ctsio); 11803 return (retval); 11804 } 11805 11806 /* 11807 * XXX CHD this is where we want to send IO to other side if 11808 * this LUN is secondary on this SC. We will need to make a copy 11809 * of the IO and flag the IO on this side as SENT_2OTHER and the flag 11810 * the copy we send as FROM_OTHER. 11811 * We also need to stuff the address of the original IO so we can 11812 * find it easily. Something similar will need be done on the other 11813 * side so when we are done we can find the copy. 11814 */ 11815 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) { 11816 union ctl_ha_msg msg_info; 11817 int isc_retval; 11818 11819 ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; 11820 11821 msg_info.hdr.msg_type = CTL_MSG_SERIALIZE; 11822 msg_info.hdr.original_sc = (union ctl_io *)ctsio; 11823 #if 0 11824 printf("1. ctsio %p\n", ctsio); 11825 #endif 11826 msg_info.hdr.serializing_sc = NULL; 11827 msg_info.hdr.nexus = ctsio->io_hdr.nexus; 11828 msg_info.scsi.tag_num = ctsio->tag_num; 11829 msg_info.scsi.tag_type = ctsio->tag_type; 11830 memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN); 11831 11832 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 11833 11834 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 11835 (void *)&msg_info, sizeof(msg_info), 0)) > 11836 CTL_HA_STATUS_SUCCESS) { 11837 printf("CTL:precheck, ctl_ha_msg_send returned %d\n", 11838 isc_retval); 11839 printf("CTL:opcode is %x\n", ctsio->cdb[0]); 11840 } else { 11841 #if 0 11842 printf("CTL:Precheck sent msg, opcode is %x\n",opcode); 11843 #endif 11844 } 11845 11846 /* 11847 * XXX KDM this I/O is off the incoming queue, but hasn't 11848 * been inserted on any other queue. We may need to come 11849 * up with a holding queue while we wait for serialization 11850 * so that we have an idea of what we're waiting for from 11851 * the other side. 11852 */ 11853 mtx_unlock(&lun->lun_lock); 11854 return (retval); 11855 } 11856 11857 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 11858 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, 11859 ctl_ooaq, ooa_links))) { 11860 case CTL_ACTION_BLOCK: 11861 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 11862 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 11863 blocked_links); 11864 mtx_unlock(&lun->lun_lock); 11865 return (retval); 11866 case CTL_ACTION_PASS: 11867 case CTL_ACTION_SKIP: 11868 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11869 mtx_unlock(&lun->lun_lock); 11870 ctl_enqueue_rtr((union ctl_io *)ctsio); 11871 break; 11872 case CTL_ACTION_OVERLAP: 11873 mtx_unlock(&lun->lun_lock); 11874 ctl_set_overlapped_cmd(ctsio); 11875 ctl_done((union ctl_io *)ctsio); 11876 break; 11877 case CTL_ACTION_OVERLAP_TAG: 11878 mtx_unlock(&lun->lun_lock); 11879 ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff); 11880 ctl_done((union ctl_io *)ctsio); 11881 break; 11882 case CTL_ACTION_ERROR: 11883 default: 11884 mtx_unlock(&lun->lun_lock); 11885 ctl_set_internal_failure(ctsio, 11886 /*sks_valid*/ 0, 11887 /*retry_count*/ 0); 11888 ctl_done((union ctl_io *)ctsio); 11889 break; 11890 } 11891 return (retval); 11892 } 11893 11894 const struct ctl_cmd_entry * 11895 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa) 11896 { 11897 const struct ctl_cmd_entry *entry; 11898 int service_action; 11899 11900 entry = &ctl_cmd_table[ctsio->cdb[0]]; 11901 if (sa) 11902 *sa = ((entry->flags & CTL_CMD_FLAG_SA5) != 0); 11903 if (entry->flags & CTL_CMD_FLAG_SA5) { 11904 service_action = ctsio->cdb[1] & SERVICE_ACTION_MASK; 11905 entry = &((const struct ctl_cmd_entry *) 11906 entry->execute)[service_action]; 11907 } 11908 return (entry); 11909 } 11910 11911 const struct ctl_cmd_entry * 11912 ctl_validate_command(struct ctl_scsiio *ctsio) 11913 { 11914 const struct ctl_cmd_entry *entry; 11915 int i, sa; 11916 uint8_t diff; 11917 11918 entry = ctl_get_cmd_entry(ctsio, &sa); 11919 if (entry->execute == NULL) { 11920 if (sa) 11921 ctl_set_invalid_field(ctsio, 11922 /*sks_valid*/ 1, 11923 /*command*/ 1, 11924 /*field*/ 1, 11925 /*bit_valid*/ 1, 11926 /*bit*/ 4); 11927 else 11928 ctl_set_invalid_opcode(ctsio); 11929 ctl_done((union ctl_io *)ctsio); 11930 return (NULL); 11931 } 11932 KASSERT(entry->length > 0, 11933 ("Not defined length for command 0x%02x/0x%02x", 11934 ctsio->cdb[0], ctsio->cdb[1])); 11935 for (i = 1; i < entry->length; i++) { 11936 diff = ctsio->cdb[i] & ~entry->usage[i - 1]; 11937 if (diff == 0) 11938 continue; 11939 ctl_set_invalid_field(ctsio, 11940 /*sks_valid*/ 1, 11941 /*command*/ 1, 11942 /*field*/ i, 11943 /*bit_valid*/ 1, 11944 /*bit*/ fls(diff) - 1); 11945 ctl_done((union ctl_io *)ctsio); 11946 return (NULL); 11947 } 11948 return (entry); 11949 } 11950 11951 static int 11952 ctl_cmd_applicable(uint8_t lun_type, const struct ctl_cmd_entry *entry) 11953 { 11954 11955 switch (lun_type) { 11956 case T_PROCESSOR: 11957 if (((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0) && 11958 ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) == 0)) 11959 return (0); 11960 break; 11961 case T_DIRECT: 11962 if (((entry->flags & CTL_CMD_FLAG_OK_ON_SLUN) == 0) && 11963 ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) == 0)) 11964 return (0); 11965 break; 11966 default: 11967 return (0); 11968 } 11969 return (1); 11970 } 11971 11972 static int 11973 ctl_scsiio(struct ctl_scsiio *ctsio) 11974 { 11975 int retval; 11976 const struct ctl_cmd_entry *entry; 11977 11978 retval = CTL_RETVAL_COMPLETE; 11979 11980 CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0])); 11981 11982 entry = ctl_get_cmd_entry(ctsio, NULL); 11983 11984 /* 11985 * If this I/O has been aborted, just send it straight to 11986 * ctl_done() without executing it. 11987 */ 11988 if (ctsio->io_hdr.flags & CTL_FLAG_ABORT) { 11989 ctl_done((union ctl_io *)ctsio); 11990 goto bailout; 11991 } 11992 11993 /* 11994 * All the checks should have been handled by ctl_scsiio_precheck(). 11995 * We should be clear now to just execute the I/O. 11996 */ 11997 retval = entry->execute(ctsio); 11998 11999 bailout: 12000 return (retval); 12001 } 12002 12003 /* 12004 * Since we only implement one target right now, a bus reset simply resets 12005 * our single target. 12006 */ 12007 static int 12008 ctl_bus_reset(struct ctl_softc *softc, union ctl_io *io) 12009 { 12010 return(ctl_target_reset(softc, io, CTL_UA_BUS_RESET)); 12011 } 12012 12013 static int 12014 ctl_target_reset(struct ctl_softc *softc, union ctl_io *io, 12015 ctl_ua_type ua_type) 12016 { 12017 struct ctl_lun *lun; 12018 int retval; 12019 12020 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 12021 union ctl_ha_msg msg_info; 12022 12023 io->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; 12024 msg_info.hdr.nexus = io->io_hdr.nexus; 12025 if (ua_type==CTL_UA_TARG_RESET) 12026 msg_info.task.task_action = CTL_TASK_TARGET_RESET; 12027 else 12028 msg_info.task.task_action = CTL_TASK_BUS_RESET; 12029 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 12030 msg_info.hdr.original_sc = NULL; 12031 msg_info.hdr.serializing_sc = NULL; 12032 if (CTL_HA_STATUS_SUCCESS != ctl_ha_msg_send(CTL_HA_CHAN_CTL, 12033 (void *)&msg_info, sizeof(msg_info), 0)) { 12034 } 12035 } 12036 retval = 0; 12037 12038 mtx_lock(&softc->ctl_lock); 12039 STAILQ_FOREACH(lun, &softc->lun_list, links) 12040 retval += ctl_lun_reset(lun, io, ua_type); 12041 mtx_unlock(&softc->ctl_lock); 12042 12043 return (retval); 12044 } 12045 12046 /* 12047 * The LUN should always be set. The I/O is optional, and is used to 12048 * distinguish between I/Os sent by this initiator, and by other 12049 * initiators. We set unit attention for initiators other than this one. 12050 * SAM-3 is vague on this point. It does say that a unit attention should 12051 * be established for other initiators when a LUN is reset (see section 12052 * 5.7.3), but it doesn't specifically say that the unit attention should 12053 * be established for this particular initiator when a LUN is reset. Here 12054 * is the relevant text, from SAM-3 rev 8: 12055 * 12056 * 5.7.2 When a SCSI initiator port aborts its own tasks 12057 * 12058 * When a SCSI initiator port causes its own task(s) to be aborted, no 12059 * notification that the task(s) have been aborted shall be returned to 12060 * the SCSI initiator port other than the completion response for the 12061 * command or task management function action that caused the task(s) to 12062 * be aborted and notification(s) associated with related effects of the 12063 * action (e.g., a reset unit attention condition). 12064 * 12065 * XXX KDM for now, we're setting unit attention for all initiators. 12066 */ 12067 static int 12068 ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type) 12069 { 12070 union ctl_io *xio; 12071 #if 0 12072 uint32_t initidx; 12073 #endif 12074 #ifdef CTL_WITH_CA 12075 int i; 12076 #endif 12077 12078 mtx_lock(&lun->lun_lock); 12079 /* 12080 * Run through the OOA queue and abort each I/O. 12081 */ 12082 #if 0 12083 TAILQ_FOREACH((struct ctl_io_hdr *)xio, &lun->ooa_queue, ooa_links) { 12084 #endif 12085 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 12086 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 12087 xio->io_hdr.flags |= CTL_FLAG_ABORT | CTL_FLAG_ABORT_STATUS; 12088 } 12089 12090 /* 12091 * This version sets unit attention for every 12092 */ 12093 #if 0 12094 initidx = ctl_get_initindex(&io->io_hdr.nexus); 12095 ctl_est_ua_all(lun, initidx, ua_type); 12096 #else 12097 ctl_est_ua_all(lun, -1, ua_type); 12098 #endif 12099 12100 /* 12101 * A reset (any kind, really) clears reservations established with 12102 * RESERVE/RELEASE. It does not clear reservations established 12103 * with PERSISTENT RESERVE OUT, but we don't support that at the 12104 * moment anyway. See SPC-2, section 5.6. SPC-3 doesn't address 12105 * reservations made with the RESERVE/RELEASE commands, because 12106 * those commands are obsolete in SPC-3. 12107 */ 12108 lun->flags &= ~CTL_LUN_RESERVED; 12109 12110 #ifdef CTL_WITH_CA 12111 for (i = 0; i < CTL_MAX_INITIATORS; i++) 12112 ctl_clear_mask(lun->have_ca, i); 12113 #endif 12114 mtx_unlock(&lun->lun_lock); 12115 12116 return (0); 12117 } 12118 12119 static void 12120 ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id, 12121 int other_sc) 12122 { 12123 union ctl_io *xio; 12124 12125 mtx_assert(&lun->lun_lock, MA_OWNED); 12126 12127 /* 12128 * Run through the OOA queue and attempt to find the given I/O. 12129 * The target port, initiator ID, tag type and tag number have to 12130 * match the values that we got from the initiator. If we have an 12131 * untagged command to abort, simply abort the first untagged command 12132 * we come to. We only allow one untagged command at a time of course. 12133 */ 12134 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 12135 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 12136 12137 if ((targ_port == UINT32_MAX || 12138 targ_port == xio->io_hdr.nexus.targ_port) && 12139 (init_id == UINT32_MAX || 12140 init_id == xio->io_hdr.nexus.initid.id)) { 12141 if (targ_port != xio->io_hdr.nexus.targ_port || 12142 init_id != xio->io_hdr.nexus.initid.id) 12143 xio->io_hdr.flags |= CTL_FLAG_ABORT_STATUS; 12144 xio->io_hdr.flags |= CTL_FLAG_ABORT; 12145 if (!other_sc && !(lun->flags & CTL_LUN_PRIMARY_SC)) { 12146 union ctl_ha_msg msg_info; 12147 12148 msg_info.hdr.nexus = xio->io_hdr.nexus; 12149 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 12150 msg_info.task.tag_num = xio->scsiio.tag_num; 12151 msg_info.task.tag_type = xio->scsiio.tag_type; 12152 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 12153 msg_info.hdr.original_sc = NULL; 12154 msg_info.hdr.serializing_sc = NULL; 12155 ctl_ha_msg_send(CTL_HA_CHAN_CTL, 12156 (void *)&msg_info, sizeof(msg_info), 0); 12157 } 12158 } 12159 } 12160 } 12161 12162 static int 12163 ctl_abort_task_set(union ctl_io *io) 12164 { 12165 struct ctl_softc *softc = control_softc; 12166 struct ctl_lun *lun; 12167 uint32_t targ_lun; 12168 12169 /* 12170 * Look up the LUN. 12171 */ 12172 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12173 mtx_lock(&softc->ctl_lock); 12174 if ((targ_lun < CTL_MAX_LUNS) && (softc->ctl_luns[targ_lun] != NULL)) 12175 lun = softc->ctl_luns[targ_lun]; 12176 else { 12177 mtx_unlock(&softc->ctl_lock); 12178 return (1); 12179 } 12180 12181 mtx_lock(&lun->lun_lock); 12182 mtx_unlock(&softc->ctl_lock); 12183 if (io->taskio.task_action == CTL_TASK_ABORT_TASK_SET) { 12184 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, 12185 io->io_hdr.nexus.initid.id, 12186 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 12187 } else { /* CTL_TASK_CLEAR_TASK_SET */ 12188 ctl_abort_tasks_lun(lun, UINT32_MAX, UINT32_MAX, 12189 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 12190 } 12191 mtx_unlock(&lun->lun_lock); 12192 return (0); 12193 } 12194 12195 static int 12196 ctl_i_t_nexus_reset(union ctl_io *io) 12197 { 12198 struct ctl_softc *softc = control_softc; 12199 struct ctl_lun *lun; 12200 uint32_t initidx, residx; 12201 12202 initidx = ctl_get_initindex(&io->io_hdr.nexus); 12203 residx = ctl_get_resindex(&io->io_hdr.nexus); 12204 mtx_lock(&softc->ctl_lock); 12205 STAILQ_FOREACH(lun, &softc->lun_list, links) { 12206 mtx_lock(&lun->lun_lock); 12207 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, 12208 io->io_hdr.nexus.initid.id, 12209 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 12210 #ifdef CTL_WITH_CA 12211 ctl_clear_mask(lun->have_ca, initidx); 12212 #endif 12213 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx)) 12214 lun->flags &= ~CTL_LUN_RESERVED; 12215 ctl_est_ua(lun, initidx, CTL_UA_I_T_NEXUS_LOSS); 12216 mtx_unlock(&lun->lun_lock); 12217 } 12218 mtx_unlock(&softc->ctl_lock); 12219 return (0); 12220 } 12221 12222 static int 12223 ctl_abort_task(union ctl_io *io) 12224 { 12225 union ctl_io *xio; 12226 struct ctl_lun *lun; 12227 struct ctl_softc *softc; 12228 #if 0 12229 struct sbuf sb; 12230 char printbuf[128]; 12231 #endif 12232 int found; 12233 uint32_t targ_lun; 12234 12235 softc = control_softc; 12236 found = 0; 12237 12238 /* 12239 * Look up the LUN. 12240 */ 12241 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12242 mtx_lock(&softc->ctl_lock); 12243 if ((targ_lun < CTL_MAX_LUNS) 12244 && (softc->ctl_luns[targ_lun] != NULL)) 12245 lun = softc->ctl_luns[targ_lun]; 12246 else { 12247 mtx_unlock(&softc->ctl_lock); 12248 return (1); 12249 } 12250 12251 #if 0 12252 printf("ctl_abort_task: called for lun %lld, tag %d type %d\n", 12253 lun->lun, io->taskio.tag_num, io->taskio.tag_type); 12254 #endif 12255 12256 mtx_lock(&lun->lun_lock); 12257 mtx_unlock(&softc->ctl_lock); 12258 /* 12259 * Run through the OOA queue and attempt to find the given I/O. 12260 * The target port, initiator ID, tag type and tag number have to 12261 * match the values that we got from the initiator. If we have an 12262 * untagged command to abort, simply abort the first untagged command 12263 * we come to. We only allow one untagged command at a time of course. 12264 */ 12265 #if 0 12266 TAILQ_FOREACH((struct ctl_io_hdr *)xio, &lun->ooa_queue, ooa_links) { 12267 #endif 12268 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 12269 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 12270 #if 0 12271 sbuf_new(&sb, printbuf, sizeof(printbuf), SBUF_FIXEDLEN); 12272 12273 sbuf_printf(&sb, "LUN %lld tag %d type %d%s%s%s%s: ", 12274 lun->lun, xio->scsiio.tag_num, 12275 xio->scsiio.tag_type, 12276 (xio->io_hdr.blocked_links.tqe_prev 12277 == NULL) ? "" : " BLOCKED", 12278 (xio->io_hdr.flags & 12279 CTL_FLAG_DMA_INPROG) ? " DMA" : "", 12280 (xio->io_hdr.flags & 12281 CTL_FLAG_ABORT) ? " ABORT" : "", 12282 (xio->io_hdr.flags & 12283 CTL_FLAG_IS_WAS_ON_RTR ? " RTR" : "")); 12284 ctl_scsi_command_string(&xio->scsiio, NULL, &sb); 12285 sbuf_finish(&sb); 12286 printf("%s\n", sbuf_data(&sb)); 12287 #endif 12288 12289 if ((xio->io_hdr.nexus.targ_port == io->io_hdr.nexus.targ_port) 12290 && (xio->io_hdr.nexus.initid.id == 12291 io->io_hdr.nexus.initid.id)) { 12292 /* 12293 * If the abort says that the task is untagged, the 12294 * task in the queue must be untagged. Otherwise, 12295 * we just check to see whether the tag numbers 12296 * match. This is because the QLogic firmware 12297 * doesn't pass back the tag type in an abort 12298 * request. 12299 */ 12300 #if 0 12301 if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED) 12302 && (io->taskio.tag_type == CTL_TAG_UNTAGGED)) 12303 || (xio->scsiio.tag_num == io->taskio.tag_num)) { 12304 #endif 12305 /* 12306 * XXX KDM we've got problems with FC, because it 12307 * doesn't send down a tag type with aborts. So we 12308 * can only really go by the tag number... 12309 * This may cause problems with parallel SCSI. 12310 * Need to figure that out!! 12311 */ 12312 if (xio->scsiio.tag_num == io->taskio.tag_num) { 12313 xio->io_hdr.flags |= CTL_FLAG_ABORT; 12314 found = 1; 12315 if ((io->io_hdr.flags & 12316 CTL_FLAG_FROM_OTHER_SC) == 0 && 12317 !(lun->flags & CTL_LUN_PRIMARY_SC)) { 12318 union ctl_ha_msg msg_info; 12319 12320 io->io_hdr.flags |= 12321 CTL_FLAG_SENT_2OTHER_SC; 12322 msg_info.hdr.nexus = io->io_hdr.nexus; 12323 msg_info.task.task_action = 12324 CTL_TASK_ABORT_TASK; 12325 msg_info.task.tag_num = 12326 io->taskio.tag_num; 12327 msg_info.task.tag_type = 12328 io->taskio.tag_type; 12329 msg_info.hdr.msg_type = 12330 CTL_MSG_MANAGE_TASKS; 12331 msg_info.hdr.original_sc = NULL; 12332 msg_info.hdr.serializing_sc = NULL; 12333 #if 0 12334 printf("Sent Abort to other side\n"); 12335 #endif 12336 if (CTL_HA_STATUS_SUCCESS != 12337 ctl_ha_msg_send(CTL_HA_CHAN_CTL, 12338 (void *)&msg_info, 12339 sizeof(msg_info), 0)) { 12340 } 12341 } 12342 #if 0 12343 printf("ctl_abort_task: found I/O to abort\n"); 12344 #endif 12345 break; 12346 } 12347 } 12348 } 12349 mtx_unlock(&lun->lun_lock); 12350 12351 if (found == 0) { 12352 /* 12353 * This isn't really an error. It's entirely possible for 12354 * the abort and command completion to cross on the wire. 12355 * This is more of an informative/diagnostic error. 12356 */ 12357 #if 0 12358 printf("ctl_abort_task: ABORT sent for nonexistent I/O: " 12359 "%d:%d:%d:%d tag %d type %d\n", 12360 io->io_hdr.nexus.initid.id, 12361 io->io_hdr.nexus.targ_port, 12362 io->io_hdr.nexus.targ_target.id, 12363 io->io_hdr.nexus.targ_lun, io->taskio.tag_num, 12364 io->taskio.tag_type); 12365 #endif 12366 } 12367 return (0); 12368 } 12369 12370 static void 12371 ctl_run_task(union ctl_io *io) 12372 { 12373 struct ctl_softc *softc = control_softc; 12374 int retval = 1; 12375 const char *task_desc; 12376 12377 CTL_DEBUG_PRINT(("ctl_run_task\n")); 12378 12379 KASSERT(io->io_hdr.io_type == CTL_IO_TASK, 12380 ("ctl_run_task: Unextected io_type %d\n", 12381 io->io_hdr.io_type)); 12382 12383 task_desc = ctl_scsi_task_string(&io->taskio); 12384 if (task_desc != NULL) { 12385 #ifdef NEEDTOPORT 12386 csevent_log(CSC_CTL | CSC_SHELF_SW | 12387 CTL_TASK_REPORT, 12388 csevent_LogType_Trace, 12389 csevent_Severity_Information, 12390 csevent_AlertLevel_Green, 12391 csevent_FRU_Firmware, 12392 csevent_FRU_Unknown, 12393 "CTL: received task: %s",task_desc); 12394 #endif 12395 } else { 12396 #ifdef NEEDTOPORT 12397 csevent_log(CSC_CTL | CSC_SHELF_SW | 12398 CTL_TASK_REPORT, 12399 csevent_LogType_Trace, 12400 csevent_Severity_Information, 12401 csevent_AlertLevel_Green, 12402 csevent_FRU_Firmware, 12403 csevent_FRU_Unknown, 12404 "CTL: received unknown task " 12405 "type: %d (%#x)", 12406 io->taskio.task_action, 12407 io->taskio.task_action); 12408 #endif 12409 } 12410 switch (io->taskio.task_action) { 12411 case CTL_TASK_ABORT_TASK: 12412 retval = ctl_abort_task(io); 12413 break; 12414 case CTL_TASK_ABORT_TASK_SET: 12415 case CTL_TASK_CLEAR_TASK_SET: 12416 retval = ctl_abort_task_set(io); 12417 break; 12418 case CTL_TASK_CLEAR_ACA: 12419 break; 12420 case CTL_TASK_I_T_NEXUS_RESET: 12421 retval = ctl_i_t_nexus_reset(io); 12422 break; 12423 case CTL_TASK_LUN_RESET: { 12424 struct ctl_lun *lun; 12425 uint32_t targ_lun; 12426 12427 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12428 mtx_lock(&softc->ctl_lock); 12429 if ((targ_lun < CTL_MAX_LUNS) 12430 && (softc->ctl_luns[targ_lun] != NULL)) 12431 lun = softc->ctl_luns[targ_lun]; 12432 else { 12433 mtx_unlock(&softc->ctl_lock); 12434 retval = 1; 12435 break; 12436 } 12437 12438 if (!(io->io_hdr.flags & 12439 CTL_FLAG_FROM_OTHER_SC)) { 12440 union ctl_ha_msg msg_info; 12441 12442 io->io_hdr.flags |= 12443 CTL_FLAG_SENT_2OTHER_SC; 12444 msg_info.hdr.msg_type = 12445 CTL_MSG_MANAGE_TASKS; 12446 msg_info.hdr.nexus = io->io_hdr.nexus; 12447 msg_info.task.task_action = 12448 CTL_TASK_LUN_RESET; 12449 msg_info.hdr.original_sc = NULL; 12450 msg_info.hdr.serializing_sc = NULL; 12451 if (CTL_HA_STATUS_SUCCESS != 12452 ctl_ha_msg_send(CTL_HA_CHAN_CTL, 12453 (void *)&msg_info, 12454 sizeof(msg_info), 0)) { 12455 } 12456 } 12457 12458 retval = ctl_lun_reset(lun, io, 12459 CTL_UA_LUN_RESET); 12460 mtx_unlock(&softc->ctl_lock); 12461 break; 12462 } 12463 case CTL_TASK_TARGET_RESET: 12464 retval = ctl_target_reset(softc, io, CTL_UA_TARG_RESET); 12465 break; 12466 case CTL_TASK_BUS_RESET: 12467 retval = ctl_bus_reset(softc, io); 12468 break; 12469 case CTL_TASK_PORT_LOGIN: 12470 break; 12471 case CTL_TASK_PORT_LOGOUT: 12472 break; 12473 default: 12474 printf("ctl_run_task: got unknown task management event %d\n", 12475 io->taskio.task_action); 12476 break; 12477 } 12478 if (retval == 0) 12479 io->io_hdr.status = CTL_SUCCESS; 12480 else 12481 io->io_hdr.status = CTL_ERROR; 12482 ctl_done(io); 12483 } 12484 12485 /* 12486 * For HA operation. Handle commands that come in from the other 12487 * controller. 12488 */ 12489 static void 12490 ctl_handle_isc(union ctl_io *io) 12491 { 12492 int free_io; 12493 struct ctl_lun *lun; 12494 struct ctl_softc *softc; 12495 uint32_t targ_lun; 12496 12497 softc = control_softc; 12498 12499 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12500 lun = softc->ctl_luns[targ_lun]; 12501 12502 switch (io->io_hdr.msg_type) { 12503 case CTL_MSG_SERIALIZE: 12504 free_io = ctl_serialize_other_sc_cmd(&io->scsiio); 12505 break; 12506 case CTL_MSG_R2R: { 12507 const struct ctl_cmd_entry *entry; 12508 12509 /* 12510 * This is only used in SER_ONLY mode. 12511 */ 12512 free_io = 0; 12513 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 12514 mtx_lock(&lun->lun_lock); 12515 if (ctl_scsiio_lun_check(lun, 12516 entry, (struct ctl_scsiio *)io) != 0) { 12517 mtx_unlock(&lun->lun_lock); 12518 ctl_done(io); 12519 break; 12520 } 12521 io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 12522 mtx_unlock(&lun->lun_lock); 12523 ctl_enqueue_rtr(io); 12524 break; 12525 } 12526 case CTL_MSG_FINISH_IO: 12527 if (softc->ha_mode == CTL_HA_MODE_XFER) { 12528 free_io = 0; 12529 ctl_done(io); 12530 } else { 12531 free_io = 1; 12532 mtx_lock(&lun->lun_lock); 12533 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, 12534 ooa_links); 12535 ctl_check_blocked(lun); 12536 mtx_unlock(&lun->lun_lock); 12537 } 12538 break; 12539 case CTL_MSG_PERS_ACTION: 12540 ctl_hndl_per_res_out_on_other_sc( 12541 (union ctl_ha_msg *)&io->presio.pr_msg); 12542 free_io = 1; 12543 break; 12544 case CTL_MSG_BAD_JUJU: 12545 free_io = 0; 12546 ctl_done(io); 12547 break; 12548 case CTL_MSG_DATAMOVE: 12549 /* Only used in XFER mode */ 12550 free_io = 0; 12551 ctl_datamove_remote(io); 12552 break; 12553 case CTL_MSG_DATAMOVE_DONE: 12554 /* Only used in XFER mode */ 12555 free_io = 0; 12556 io->scsiio.be_move_done(io); 12557 break; 12558 default: 12559 free_io = 1; 12560 printf("%s: Invalid message type %d\n", 12561 __func__, io->io_hdr.msg_type); 12562 break; 12563 } 12564 if (free_io) 12565 ctl_free_io(io); 12566 12567 } 12568 12569 12570 /* 12571 * Returns the match type in the case of a match, or CTL_LUN_PAT_NONE if 12572 * there is no match. 12573 */ 12574 static ctl_lun_error_pattern 12575 ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc) 12576 { 12577 const struct ctl_cmd_entry *entry; 12578 ctl_lun_error_pattern filtered_pattern, pattern; 12579 12580 pattern = desc->error_pattern; 12581 12582 /* 12583 * XXX KDM we need more data passed into this function to match a 12584 * custom pattern, and we actually need to implement custom pattern 12585 * matching. 12586 */ 12587 if (pattern & CTL_LUN_PAT_CMD) 12588 return (CTL_LUN_PAT_CMD); 12589 12590 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY) 12591 return (CTL_LUN_PAT_ANY); 12592 12593 entry = ctl_get_cmd_entry(ctsio, NULL); 12594 12595 filtered_pattern = entry->pattern & pattern; 12596 12597 /* 12598 * If the user requested specific flags in the pattern (e.g. 12599 * CTL_LUN_PAT_RANGE), make sure the command supports all of those 12600 * flags. 12601 * 12602 * If the user did not specify any flags, it doesn't matter whether 12603 * or not the command supports the flags. 12604 */ 12605 if ((filtered_pattern & ~CTL_LUN_PAT_MASK) != 12606 (pattern & ~CTL_LUN_PAT_MASK)) 12607 return (CTL_LUN_PAT_NONE); 12608 12609 /* 12610 * If the user asked for a range check, see if the requested LBA 12611 * range overlaps with this command's LBA range. 12612 */ 12613 if (filtered_pattern & CTL_LUN_PAT_RANGE) { 12614 uint64_t lba1; 12615 uint64_t len1; 12616 ctl_action action; 12617 int retval; 12618 12619 retval = ctl_get_lba_len((union ctl_io *)ctsio, &lba1, &len1); 12620 if (retval != 0) 12621 return (CTL_LUN_PAT_NONE); 12622 12623 action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba, 12624 desc->lba_range.len, FALSE); 12625 /* 12626 * A "pass" means that the LBA ranges don't overlap, so 12627 * this doesn't match the user's range criteria. 12628 */ 12629 if (action == CTL_ACTION_PASS) 12630 return (CTL_LUN_PAT_NONE); 12631 } 12632 12633 return (filtered_pattern); 12634 } 12635 12636 static void 12637 ctl_inject_error(struct ctl_lun *lun, union ctl_io *io) 12638 { 12639 struct ctl_error_desc *desc, *desc2; 12640 12641 mtx_assert(&lun->lun_lock, MA_OWNED); 12642 12643 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 12644 ctl_lun_error_pattern pattern; 12645 /* 12646 * Check to see whether this particular command matches 12647 * the pattern in the descriptor. 12648 */ 12649 pattern = ctl_cmd_pattern_match(&io->scsiio, desc); 12650 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_NONE) 12651 continue; 12652 12653 switch (desc->lun_error & CTL_LUN_INJ_TYPE) { 12654 case CTL_LUN_INJ_ABORTED: 12655 ctl_set_aborted(&io->scsiio); 12656 break; 12657 case CTL_LUN_INJ_MEDIUM_ERR: 12658 ctl_set_medium_error(&io->scsiio); 12659 break; 12660 case CTL_LUN_INJ_UA: 12661 /* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET 12662 * OCCURRED */ 12663 ctl_set_ua(&io->scsiio, 0x29, 0x00); 12664 break; 12665 case CTL_LUN_INJ_CUSTOM: 12666 /* 12667 * We're assuming the user knows what he is doing. 12668 * Just copy the sense information without doing 12669 * checks. 12670 */ 12671 bcopy(&desc->custom_sense, &io->scsiio.sense_data, 12672 MIN(sizeof(desc->custom_sense), 12673 sizeof(io->scsiio.sense_data))); 12674 io->scsiio.scsi_status = SCSI_STATUS_CHECK_COND; 12675 io->scsiio.sense_len = SSD_FULL_SIZE; 12676 io->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 12677 break; 12678 case CTL_LUN_INJ_NONE: 12679 default: 12680 /* 12681 * If this is an error injection type we don't know 12682 * about, clear the continuous flag (if it is set) 12683 * so it will get deleted below. 12684 */ 12685 desc->lun_error &= ~CTL_LUN_INJ_CONTINUOUS; 12686 break; 12687 } 12688 /* 12689 * By default, each error injection action is a one-shot 12690 */ 12691 if (desc->lun_error & CTL_LUN_INJ_CONTINUOUS) 12692 continue; 12693 12694 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links); 12695 12696 free(desc, M_CTL); 12697 } 12698 } 12699 12700 #ifdef CTL_IO_DELAY 12701 static void 12702 ctl_datamove_timer_wakeup(void *arg) 12703 { 12704 union ctl_io *io; 12705 12706 io = (union ctl_io *)arg; 12707 12708 ctl_datamove(io); 12709 } 12710 #endif /* CTL_IO_DELAY */ 12711 12712 void 12713 ctl_datamove(union ctl_io *io) 12714 { 12715 void (*fe_datamove)(union ctl_io *io); 12716 12717 mtx_assert(&control_softc->ctl_lock, MA_NOTOWNED); 12718 12719 CTL_DEBUG_PRINT(("ctl_datamove\n")); 12720 12721 #ifdef CTL_TIME_IO 12722 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 12723 char str[256]; 12724 char path_str[64]; 12725 struct sbuf sb; 12726 12727 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 12728 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12729 12730 sbuf_cat(&sb, path_str); 12731 switch (io->io_hdr.io_type) { 12732 case CTL_IO_SCSI: 12733 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 12734 sbuf_printf(&sb, "\n"); 12735 sbuf_cat(&sb, path_str); 12736 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12737 io->scsiio.tag_num, io->scsiio.tag_type); 12738 break; 12739 case CTL_IO_TASK: 12740 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 12741 "Tag Type: %d\n", io->taskio.task_action, 12742 io->taskio.tag_num, io->taskio.tag_type); 12743 break; 12744 default: 12745 printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 12746 panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 12747 break; 12748 } 12749 sbuf_cat(&sb, path_str); 12750 sbuf_printf(&sb, "ctl_datamove: %jd seconds\n", 12751 (intmax_t)time_uptime - io->io_hdr.start_time); 12752 sbuf_finish(&sb); 12753 printf("%s", sbuf_data(&sb)); 12754 } 12755 #endif /* CTL_TIME_IO */ 12756 12757 #ifdef CTL_IO_DELAY 12758 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 12759 struct ctl_lun *lun; 12760 12761 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 12762 12763 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 12764 } else { 12765 struct ctl_lun *lun; 12766 12767 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 12768 if ((lun != NULL) 12769 && (lun->delay_info.datamove_delay > 0)) { 12770 struct callout *callout; 12771 12772 callout = (struct callout *)&io->io_hdr.timer_bytes; 12773 callout_init(callout, /*mpsafe*/ 1); 12774 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 12775 callout_reset(callout, 12776 lun->delay_info.datamove_delay * hz, 12777 ctl_datamove_timer_wakeup, io); 12778 if (lun->delay_info.datamove_type == 12779 CTL_DELAY_TYPE_ONESHOT) 12780 lun->delay_info.datamove_delay = 0; 12781 return; 12782 } 12783 } 12784 #endif 12785 12786 /* 12787 * This command has been aborted. Set the port status, so we fail 12788 * the data move. 12789 */ 12790 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12791 printf("ctl_datamove: tag 0x%04x on (%ju:%d:%ju:%d) aborted\n", 12792 io->scsiio.tag_num,(uintmax_t)io->io_hdr.nexus.initid.id, 12793 io->io_hdr.nexus.targ_port, 12794 (uintmax_t)io->io_hdr.nexus.targ_target.id, 12795 io->io_hdr.nexus.targ_lun); 12796 io->io_hdr.port_status = 31337; 12797 /* 12798 * Note that the backend, in this case, will get the 12799 * callback in its context. In other cases it may get 12800 * called in the frontend's interrupt thread context. 12801 */ 12802 io->scsiio.be_move_done(io); 12803 return; 12804 } 12805 12806 /* Don't confuse frontend with zero length data move. */ 12807 if (io->scsiio.kern_data_len == 0) { 12808 io->scsiio.be_move_done(io); 12809 return; 12810 } 12811 12812 /* 12813 * If we're in XFER mode and this I/O is from the other shelf 12814 * controller, we need to send the DMA to the other side to 12815 * actually transfer the data to/from the host. In serialize only 12816 * mode the transfer happens below CTL and ctl_datamove() is only 12817 * called on the machine that originally received the I/O. 12818 */ 12819 if ((control_softc->ha_mode == CTL_HA_MODE_XFER) 12820 && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 12821 union ctl_ha_msg msg; 12822 uint32_t sg_entries_sent; 12823 int do_sg_copy; 12824 int i; 12825 12826 memset(&msg, 0, sizeof(msg)); 12827 msg.hdr.msg_type = CTL_MSG_DATAMOVE; 12828 msg.hdr.original_sc = io->io_hdr.original_sc; 12829 msg.hdr.serializing_sc = io; 12830 msg.hdr.nexus = io->io_hdr.nexus; 12831 msg.dt.flags = io->io_hdr.flags; 12832 /* 12833 * We convert everything into a S/G list here. We can't 12834 * pass by reference, only by value between controllers. 12835 * So we can't pass a pointer to the S/G list, only as many 12836 * S/G entries as we can fit in here. If it's possible for 12837 * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries, 12838 * then we need to break this up into multiple transfers. 12839 */ 12840 if (io->scsiio.kern_sg_entries == 0) { 12841 msg.dt.kern_sg_entries = 1; 12842 /* 12843 * If this is in cached memory, flush the cache 12844 * before we send the DMA request to the other 12845 * controller. We want to do this in either the 12846 * read or the write case. The read case is 12847 * straightforward. In the write case, we want to 12848 * make sure nothing is in the local cache that 12849 * could overwrite the DMAed data. 12850 */ 12851 if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) { 12852 /* 12853 * XXX KDM use bus_dmamap_sync() here. 12854 */ 12855 } 12856 12857 /* 12858 * Convert to a physical address if this is a 12859 * virtual address. 12860 */ 12861 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 12862 msg.dt.sg_list[0].addr = 12863 io->scsiio.kern_data_ptr; 12864 } else { 12865 /* 12866 * XXX KDM use busdma here! 12867 */ 12868 #if 0 12869 msg.dt.sg_list[0].addr = (void *) 12870 vtophys(io->scsiio.kern_data_ptr); 12871 #endif 12872 } 12873 12874 msg.dt.sg_list[0].len = io->scsiio.kern_data_len; 12875 do_sg_copy = 0; 12876 } else { 12877 struct ctl_sg_entry *sgl; 12878 12879 do_sg_copy = 1; 12880 msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries; 12881 sgl = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; 12882 if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) { 12883 /* 12884 * XXX KDM use bus_dmamap_sync() here. 12885 */ 12886 } 12887 } 12888 12889 msg.dt.kern_data_len = io->scsiio.kern_data_len; 12890 msg.dt.kern_total_len = io->scsiio.kern_total_len; 12891 msg.dt.kern_data_resid = io->scsiio.kern_data_resid; 12892 msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset; 12893 msg.dt.sg_sequence = 0; 12894 12895 /* 12896 * Loop until we've sent all of the S/G entries. On the 12897 * other end, we'll recompose these S/G entries into one 12898 * contiguous list before passing it to the 12899 */ 12900 for (sg_entries_sent = 0; sg_entries_sent < 12901 msg.dt.kern_sg_entries; msg.dt.sg_sequence++) { 12902 msg.dt.cur_sg_entries = MIN((sizeof(msg.dt.sg_list)/ 12903 sizeof(msg.dt.sg_list[0])), 12904 msg.dt.kern_sg_entries - sg_entries_sent); 12905 12906 if (do_sg_copy != 0) { 12907 struct ctl_sg_entry *sgl; 12908 int j; 12909 12910 sgl = (struct ctl_sg_entry *) 12911 io->scsiio.kern_data_ptr; 12912 /* 12913 * If this is in cached memory, flush the cache 12914 * before we send the DMA request to the other 12915 * controller. We want to do this in either 12916 * the * read or the write case. The read 12917 * case is straightforward. In the write 12918 * case, we want to make sure nothing is 12919 * in the local cache that could overwrite 12920 * the DMAed data. 12921 */ 12922 12923 for (i = sg_entries_sent, j = 0; 12924 i < msg.dt.cur_sg_entries; i++, j++) { 12925 if ((io->io_hdr.flags & 12926 CTL_FLAG_NO_DATASYNC) == 0) { 12927 /* 12928 * XXX KDM use bus_dmamap_sync() 12929 */ 12930 } 12931 if ((io->io_hdr.flags & 12932 CTL_FLAG_BUS_ADDR) == 0) { 12933 /* 12934 * XXX KDM use busdma. 12935 */ 12936 #if 0 12937 msg.dt.sg_list[j].addr =(void *) 12938 vtophys(sgl[i].addr); 12939 #endif 12940 } else { 12941 msg.dt.sg_list[j].addr = 12942 sgl[i].addr; 12943 } 12944 msg.dt.sg_list[j].len = sgl[i].len; 12945 } 12946 } 12947 12948 sg_entries_sent += msg.dt.cur_sg_entries; 12949 if (sg_entries_sent >= msg.dt.kern_sg_entries) 12950 msg.dt.sg_last = 1; 12951 else 12952 msg.dt.sg_last = 0; 12953 12954 /* 12955 * XXX KDM drop and reacquire the lock here? 12956 */ 12957 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 12958 sizeof(msg), 0) > CTL_HA_STATUS_SUCCESS) { 12959 /* 12960 * XXX do something here. 12961 */ 12962 } 12963 12964 msg.dt.sent_sg_entries = sg_entries_sent; 12965 } 12966 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12967 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) 12968 ctl_failover_io(io, /*have_lock*/ 0); 12969 12970 } else { 12971 12972 /* 12973 * Lookup the fe_datamove() function for this particular 12974 * front end. 12975 */ 12976 fe_datamove = 12977 control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove; 12978 12979 fe_datamove(io); 12980 } 12981 } 12982 12983 static void 12984 ctl_send_datamove_done(union ctl_io *io, int have_lock) 12985 { 12986 union ctl_ha_msg msg; 12987 int isc_status; 12988 12989 memset(&msg, 0, sizeof(msg)); 12990 12991 msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 12992 msg.hdr.original_sc = io; 12993 msg.hdr.serializing_sc = io->io_hdr.serializing_sc; 12994 msg.hdr.nexus = io->io_hdr.nexus; 12995 msg.hdr.status = io->io_hdr.status; 12996 msg.scsi.tag_num = io->scsiio.tag_num; 12997 msg.scsi.tag_type = io->scsiio.tag_type; 12998 msg.scsi.scsi_status = io->scsiio.scsi_status; 12999 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 13000 sizeof(io->scsiio.sense_data)); 13001 msg.scsi.sense_len = io->scsiio.sense_len; 13002 msg.scsi.sense_residual = io->scsiio.sense_residual; 13003 msg.scsi.fetd_status = io->io_hdr.port_status; 13004 msg.scsi.residual = io->scsiio.residual; 13005 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 13006 13007 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 13008 ctl_failover_io(io, /*have_lock*/ have_lock); 13009 return; 13010 } 13011 13012 isc_status = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0); 13013 if (isc_status > CTL_HA_STATUS_SUCCESS) { 13014 /* XXX do something if this fails */ 13015 } 13016 13017 } 13018 13019 /* 13020 * The DMA to the remote side is done, now we need to tell the other side 13021 * we're done so it can continue with its data movement. 13022 */ 13023 static void 13024 ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq) 13025 { 13026 union ctl_io *io; 13027 13028 io = rq->context; 13029 13030 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 13031 printf("%s: ISC DMA write failed with error %d", __func__, 13032 rq->ret); 13033 ctl_set_internal_failure(&io->scsiio, 13034 /*sks_valid*/ 1, 13035 /*retry_count*/ rq->ret); 13036 } 13037 13038 ctl_dt_req_free(rq); 13039 13040 /* 13041 * In this case, we had to malloc the memory locally. Free it. 13042 */ 13043 if ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0) { 13044 int i; 13045 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 13046 free(io->io_hdr.local_sglist[i].addr, M_CTL); 13047 } 13048 /* 13049 * The data is in local and remote memory, so now we need to send 13050 * status (good or back) back to the other side. 13051 */ 13052 ctl_send_datamove_done(io, /*have_lock*/ 0); 13053 } 13054 13055 /* 13056 * We've moved the data from the host/controller into local memory. Now we 13057 * need to push it over to the remote controller's memory. 13058 */ 13059 static int 13060 ctl_datamove_remote_dm_write_cb(union ctl_io *io) 13061 { 13062 int retval; 13063 13064 retval = 0; 13065 13066 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_WRITE, 13067 ctl_datamove_remote_write_cb); 13068 13069 return (retval); 13070 } 13071 13072 static void 13073 ctl_datamove_remote_write(union ctl_io *io) 13074 { 13075 int retval; 13076 void (*fe_datamove)(union ctl_io *io); 13077 13078 /* 13079 * - Get the data from the host/HBA into local memory. 13080 * - DMA memory from the local controller to the remote controller. 13081 * - Send status back to the remote controller. 13082 */ 13083 13084 retval = ctl_datamove_remote_sgl_setup(io); 13085 if (retval != 0) 13086 return; 13087 13088 /* Switch the pointer over so the FETD knows what to do */ 13089 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 13090 13091 /* 13092 * Use a custom move done callback, since we need to send completion 13093 * back to the other controller, not to the backend on this side. 13094 */ 13095 io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb; 13096 13097 fe_datamove = control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove; 13098 13099 fe_datamove(io); 13100 13101 return; 13102 13103 } 13104 13105 static int 13106 ctl_datamove_remote_dm_read_cb(union ctl_io *io) 13107 { 13108 #if 0 13109 char str[256]; 13110 char path_str[64]; 13111 struct sbuf sb; 13112 #endif 13113 13114 /* 13115 * In this case, we had to malloc the memory locally. Free it. 13116 */ 13117 if ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0) { 13118 int i; 13119 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 13120 free(io->io_hdr.local_sglist[i].addr, M_CTL); 13121 } 13122 13123 #if 0 13124 scsi_path_string(io, path_str, sizeof(path_str)); 13125 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 13126 sbuf_cat(&sb, path_str); 13127 scsi_command_string(&io->scsiio, NULL, &sb); 13128 sbuf_printf(&sb, "\n"); 13129 sbuf_cat(&sb, path_str); 13130 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 13131 io->scsiio.tag_num, io->scsiio.tag_type); 13132 sbuf_cat(&sb, path_str); 13133 sbuf_printf(&sb, "%s: flags %#x, status %#x\n", __func__, 13134 io->io_hdr.flags, io->io_hdr.status); 13135 sbuf_finish(&sb); 13136 printk("%s", sbuf_data(&sb)); 13137 #endif 13138 13139 13140 /* 13141 * The read is done, now we need to send status (good or bad) back 13142 * to the other side. 13143 */ 13144 ctl_send_datamove_done(io, /*have_lock*/ 0); 13145 13146 return (0); 13147 } 13148 13149 static void 13150 ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq) 13151 { 13152 union ctl_io *io; 13153 void (*fe_datamove)(union ctl_io *io); 13154 13155 io = rq->context; 13156 13157 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 13158 printf("%s: ISC DMA read failed with error %d", __func__, 13159 rq->ret); 13160 ctl_set_internal_failure(&io->scsiio, 13161 /*sks_valid*/ 1, 13162 /*retry_count*/ rq->ret); 13163 } 13164 13165 ctl_dt_req_free(rq); 13166 13167 /* Switch the pointer over so the FETD knows what to do */ 13168 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 13169 13170 /* 13171 * Use a custom move done callback, since we need to send completion 13172 * back to the other controller, not to the backend on this side. 13173 */ 13174 io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb; 13175 13176 /* XXX KDM add checks like the ones in ctl_datamove? */ 13177 13178 fe_datamove = control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove; 13179 13180 fe_datamove(io); 13181 } 13182 13183 static int 13184 ctl_datamove_remote_sgl_setup(union ctl_io *io) 13185 { 13186 struct ctl_sg_entry *local_sglist, *remote_sglist; 13187 struct ctl_sg_entry *local_dma_sglist, *remote_dma_sglist; 13188 struct ctl_softc *softc; 13189 int retval; 13190 int i; 13191 13192 retval = 0; 13193 softc = control_softc; 13194 13195 local_sglist = io->io_hdr.local_sglist; 13196 local_dma_sglist = io->io_hdr.local_dma_sglist; 13197 remote_sglist = io->io_hdr.remote_sglist; 13198 remote_dma_sglist = io->io_hdr.remote_dma_sglist; 13199 13200 if (io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) { 13201 for (i = 0; i < io->scsiio.kern_sg_entries; i++) { 13202 local_sglist[i].len = remote_sglist[i].len; 13203 13204 /* 13205 * XXX Detect the situation where the RS-level I/O 13206 * redirector on the other side has already read the 13207 * data off of the AOR RS on this side, and 13208 * transferred it to remote (mirror) memory on the 13209 * other side. Since we already have the data in 13210 * memory here, we just need to use it. 13211 * 13212 * XXX KDM this can probably be removed once we 13213 * get the cache device code in and take the 13214 * current AOR implementation out. 13215 */ 13216 #ifdef NEEDTOPORT 13217 if ((remote_sglist[i].addr >= 13218 (void *)vtophys(softc->mirr->addr)) 13219 && (remote_sglist[i].addr < 13220 ((void *)vtophys(softc->mirr->addr) + 13221 CacheMirrorOffset))) { 13222 local_sglist[i].addr = remote_sglist[i].addr - 13223 CacheMirrorOffset; 13224 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13225 CTL_FLAG_DATA_IN) 13226 io->io_hdr.flags |= CTL_FLAG_REDIR_DONE; 13227 } else { 13228 local_sglist[i].addr = remote_sglist[i].addr + 13229 CacheMirrorOffset; 13230 } 13231 #endif 13232 #if 0 13233 printf("%s: local %p, remote %p, len %d\n", 13234 __func__, local_sglist[i].addr, 13235 remote_sglist[i].addr, local_sglist[i].len); 13236 #endif 13237 } 13238 } else { 13239 uint32_t len_to_go; 13240 13241 /* 13242 * In this case, we don't have automatically allocated 13243 * memory for this I/O on this controller. This typically 13244 * happens with internal CTL I/O -- e.g. inquiry, mode 13245 * sense, etc. Anything coming from RAIDCore will have 13246 * a mirror area available. 13247 */ 13248 len_to_go = io->scsiio.kern_data_len; 13249 13250 /* 13251 * Clear the no datasync flag, we have to use malloced 13252 * buffers. 13253 */ 13254 io->io_hdr.flags &= ~CTL_FLAG_NO_DATASYNC; 13255 13256 /* 13257 * The difficult thing here is that the size of the various 13258 * S/G segments may be different than the size from the 13259 * remote controller. That'll make it harder when DMAing 13260 * the data back to the other side. 13261 */ 13262 for (i = 0; (i < sizeof(io->io_hdr.remote_sglist) / 13263 sizeof(io->io_hdr.remote_sglist[0])) && 13264 (len_to_go > 0); i++) { 13265 local_sglist[i].len = MIN(len_to_go, 131072); 13266 CTL_SIZE_8B(local_dma_sglist[i].len, 13267 local_sglist[i].len); 13268 local_sglist[i].addr = 13269 malloc(local_dma_sglist[i].len, M_CTL,M_WAITOK); 13270 13271 local_dma_sglist[i].addr = local_sglist[i].addr; 13272 13273 if (local_sglist[i].addr == NULL) { 13274 int j; 13275 13276 printf("malloc failed for %zd bytes!", 13277 local_dma_sglist[i].len); 13278 for (j = 0; j < i; j++) { 13279 free(local_sglist[j].addr, M_CTL); 13280 } 13281 ctl_set_internal_failure(&io->scsiio, 13282 /*sks_valid*/ 1, 13283 /*retry_count*/ 4857); 13284 retval = 1; 13285 goto bailout_error; 13286 13287 } 13288 /* XXX KDM do we need a sync here? */ 13289 13290 len_to_go -= local_sglist[i].len; 13291 } 13292 /* 13293 * Reset the number of S/G entries accordingly. The 13294 * original number of S/G entries is available in 13295 * rem_sg_entries. 13296 */ 13297 io->scsiio.kern_sg_entries = i; 13298 13299 #if 0 13300 printf("%s: kern_sg_entries = %d\n", __func__, 13301 io->scsiio.kern_sg_entries); 13302 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 13303 printf("%s: sg[%d] = %p, %d (DMA: %d)\n", __func__, i, 13304 local_sglist[i].addr, local_sglist[i].len, 13305 local_dma_sglist[i].len); 13306 #endif 13307 } 13308 13309 13310 return (retval); 13311 13312 bailout_error: 13313 13314 ctl_send_datamove_done(io, /*have_lock*/ 0); 13315 13316 return (retval); 13317 } 13318 13319 static int 13320 ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 13321 ctl_ha_dt_cb callback) 13322 { 13323 struct ctl_ha_dt_req *rq; 13324 struct ctl_sg_entry *remote_sglist, *local_sglist; 13325 struct ctl_sg_entry *remote_dma_sglist, *local_dma_sglist; 13326 uint32_t local_used, remote_used, total_used; 13327 int retval; 13328 int i, j; 13329 13330 retval = 0; 13331 13332 rq = ctl_dt_req_alloc(); 13333 13334 /* 13335 * If we failed to allocate the request, and if the DMA didn't fail 13336 * anyway, set busy status. This is just a resource allocation 13337 * failure. 13338 */ 13339 if ((rq == NULL) 13340 && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)) 13341 ctl_set_busy(&io->scsiio); 13342 13343 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) { 13344 13345 if (rq != NULL) 13346 ctl_dt_req_free(rq); 13347 13348 /* 13349 * The data move failed. We need to return status back 13350 * to the other controller. No point in trying to DMA 13351 * data to the remote controller. 13352 */ 13353 13354 ctl_send_datamove_done(io, /*have_lock*/ 0); 13355 13356 retval = 1; 13357 13358 goto bailout; 13359 } 13360 13361 local_sglist = io->io_hdr.local_sglist; 13362 local_dma_sglist = io->io_hdr.local_dma_sglist; 13363 remote_sglist = io->io_hdr.remote_sglist; 13364 remote_dma_sglist = io->io_hdr.remote_dma_sglist; 13365 local_used = 0; 13366 remote_used = 0; 13367 total_used = 0; 13368 13369 if (io->io_hdr.flags & CTL_FLAG_REDIR_DONE) { 13370 rq->ret = CTL_HA_STATUS_SUCCESS; 13371 rq->context = io; 13372 callback(rq); 13373 goto bailout; 13374 } 13375 13376 /* 13377 * Pull/push the data over the wire from/to the other controller. 13378 * This takes into account the possibility that the local and 13379 * remote sglists may not be identical in terms of the size of 13380 * the elements and the number of elements. 13381 * 13382 * One fundamental assumption here is that the length allocated for 13383 * both the local and remote sglists is identical. Otherwise, we've 13384 * essentially got a coding error of some sort. 13385 */ 13386 for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) { 13387 int isc_ret; 13388 uint32_t cur_len, dma_length; 13389 uint8_t *tmp_ptr; 13390 13391 rq->id = CTL_HA_DATA_CTL; 13392 rq->command = command; 13393 rq->context = io; 13394 13395 /* 13396 * Both pointers should be aligned. But it is possible 13397 * that the allocation length is not. They should both 13398 * also have enough slack left over at the end, though, 13399 * to round up to the next 8 byte boundary. 13400 */ 13401 cur_len = MIN(local_sglist[i].len - local_used, 13402 remote_sglist[j].len - remote_used); 13403 13404 /* 13405 * In this case, we have a size issue and need to decrease 13406 * the size, except in the case where we actually have less 13407 * than 8 bytes left. In that case, we need to increase 13408 * the DMA length to get the last bit. 13409 */ 13410 if ((cur_len & 0x7) != 0) { 13411 if (cur_len > 0x7) { 13412 cur_len = cur_len - (cur_len & 0x7); 13413 dma_length = cur_len; 13414 } else { 13415 CTL_SIZE_8B(dma_length, cur_len); 13416 } 13417 13418 } else 13419 dma_length = cur_len; 13420 13421 /* 13422 * If we had to allocate memory for this I/O, instead of using 13423 * the non-cached mirror memory, we'll need to flush the cache 13424 * before trying to DMA to the other controller. 13425 * 13426 * We could end up doing this multiple times for the same 13427 * segment if we have a larger local segment than remote 13428 * segment. That shouldn't be an issue. 13429 */ 13430 if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) { 13431 /* 13432 * XXX KDM use bus_dmamap_sync() here. 13433 */ 13434 } 13435 13436 rq->size = dma_length; 13437 13438 tmp_ptr = (uint8_t *)local_sglist[i].addr; 13439 tmp_ptr += local_used; 13440 13441 /* Use physical addresses when talking to ISC hardware */ 13442 if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) { 13443 /* XXX KDM use busdma */ 13444 #if 0 13445 rq->local = vtophys(tmp_ptr); 13446 #endif 13447 } else 13448 rq->local = tmp_ptr; 13449 13450 tmp_ptr = (uint8_t *)remote_sglist[j].addr; 13451 tmp_ptr += remote_used; 13452 rq->remote = tmp_ptr; 13453 13454 rq->callback = NULL; 13455 13456 local_used += cur_len; 13457 if (local_used >= local_sglist[i].len) { 13458 i++; 13459 local_used = 0; 13460 } 13461 13462 remote_used += cur_len; 13463 if (remote_used >= remote_sglist[j].len) { 13464 j++; 13465 remote_used = 0; 13466 } 13467 total_used += cur_len; 13468 13469 if (total_used >= io->scsiio.kern_data_len) 13470 rq->callback = callback; 13471 13472 if ((rq->size & 0x7) != 0) { 13473 printf("%s: warning: size %d is not on 8b boundary\n", 13474 __func__, rq->size); 13475 } 13476 if (((uintptr_t)rq->local & 0x7) != 0) { 13477 printf("%s: warning: local %p not on 8b boundary\n", 13478 __func__, rq->local); 13479 } 13480 if (((uintptr_t)rq->remote & 0x7) != 0) { 13481 printf("%s: warning: remote %p not on 8b boundary\n", 13482 __func__, rq->local); 13483 } 13484 #if 0 13485 printf("%s: %s: local %#x remote %#x size %d\n", __func__, 13486 (command == CTL_HA_DT_CMD_WRITE) ? "WRITE" : "READ", 13487 rq->local, rq->remote, rq->size); 13488 #endif 13489 13490 isc_ret = ctl_dt_single(rq); 13491 if (isc_ret == CTL_HA_STATUS_WAIT) 13492 continue; 13493 13494 if (isc_ret == CTL_HA_STATUS_DISCONNECT) { 13495 rq->ret = CTL_HA_STATUS_SUCCESS; 13496 } else { 13497 rq->ret = isc_ret; 13498 } 13499 callback(rq); 13500 goto bailout; 13501 } 13502 13503 bailout: 13504 return (retval); 13505 13506 } 13507 13508 static void 13509 ctl_datamove_remote_read(union ctl_io *io) 13510 { 13511 int retval; 13512 int i; 13513 13514 /* 13515 * This will send an error to the other controller in the case of a 13516 * failure. 13517 */ 13518 retval = ctl_datamove_remote_sgl_setup(io); 13519 if (retval != 0) 13520 return; 13521 13522 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ, 13523 ctl_datamove_remote_read_cb); 13524 if ((retval != 0) 13525 && ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0)) { 13526 /* 13527 * Make sure we free memory if there was an error.. The 13528 * ctl_datamove_remote_xfer() function will send the 13529 * datamove done message, or call the callback with an 13530 * error if there is a problem. 13531 */ 13532 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 13533 free(io->io_hdr.local_sglist[i].addr, M_CTL); 13534 } 13535 13536 return; 13537 } 13538 13539 /* 13540 * Process a datamove request from the other controller. This is used for 13541 * XFER mode only, not SER_ONLY mode. For writes, we DMA into local memory 13542 * first. Once that is complete, the data gets DMAed into the remote 13543 * controller's memory. For reads, we DMA from the remote controller's 13544 * memory into our memory first, and then move it out to the FETD. 13545 */ 13546 static void 13547 ctl_datamove_remote(union ctl_io *io) 13548 { 13549 struct ctl_softc *softc; 13550 13551 softc = control_softc; 13552 13553 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 13554 13555 /* 13556 * Note that we look for an aborted I/O here, but don't do some of 13557 * the other checks that ctl_datamove() normally does. 13558 * We don't need to run the datamove delay code, since that should 13559 * have been done if need be on the other controller. 13560 */ 13561 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 13562 printf("%s: tag 0x%04x on (%d:%d:%d:%d) aborted\n", __func__, 13563 io->scsiio.tag_num, io->io_hdr.nexus.initid.id, 13564 io->io_hdr.nexus.targ_port, 13565 io->io_hdr.nexus.targ_target.id, 13566 io->io_hdr.nexus.targ_lun); 13567 io->io_hdr.port_status = 31338; 13568 ctl_send_datamove_done(io, /*have_lock*/ 0); 13569 return; 13570 } 13571 13572 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) { 13573 ctl_datamove_remote_write(io); 13574 } else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN){ 13575 ctl_datamove_remote_read(io); 13576 } else { 13577 union ctl_ha_msg msg; 13578 struct scsi_sense_data *sense; 13579 uint8_t sks[3]; 13580 int retry_count; 13581 13582 memset(&msg, 0, sizeof(msg)); 13583 13584 msg.hdr.msg_type = CTL_MSG_BAD_JUJU; 13585 msg.hdr.status = CTL_SCSI_ERROR; 13586 msg.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 13587 13588 retry_count = 4243; 13589 13590 sense = &msg.scsi.sense_data; 13591 sks[0] = SSD_SCS_VALID; 13592 sks[1] = (retry_count >> 8) & 0xff; 13593 sks[2] = retry_count & 0xff; 13594 13595 /* "Internal target failure" */ 13596 scsi_set_sense_data(sense, 13597 /*sense_format*/ SSD_TYPE_NONE, 13598 /*current_error*/ 1, 13599 /*sense_key*/ SSD_KEY_HARDWARE_ERROR, 13600 /*asc*/ 0x44, 13601 /*ascq*/ 0x00, 13602 /*type*/ SSD_ELEM_SKS, 13603 /*size*/ sizeof(sks), 13604 /*data*/ sks, 13605 SSD_ELEM_NONE); 13606 13607 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 13608 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 13609 ctl_failover_io(io, /*have_lock*/ 1); 13610 return; 13611 } 13612 13613 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0) > 13614 CTL_HA_STATUS_SUCCESS) { 13615 /* XXX KDM what to do if this fails? */ 13616 } 13617 return; 13618 } 13619 13620 } 13621 13622 static int 13623 ctl_process_done(union ctl_io *io) 13624 { 13625 struct ctl_lun *lun; 13626 struct ctl_softc *softc = control_softc; 13627 void (*fe_done)(union ctl_io *io); 13628 uint32_t targ_port = ctl_port_idx(io->io_hdr.nexus.targ_port); 13629 13630 CTL_DEBUG_PRINT(("ctl_process_done\n")); 13631 13632 fe_done = softc->ctl_ports[targ_port]->fe_done; 13633 13634 #ifdef CTL_TIME_IO 13635 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 13636 char str[256]; 13637 char path_str[64]; 13638 struct sbuf sb; 13639 13640 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 13641 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 13642 13643 sbuf_cat(&sb, path_str); 13644 switch (io->io_hdr.io_type) { 13645 case CTL_IO_SCSI: 13646 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 13647 sbuf_printf(&sb, "\n"); 13648 sbuf_cat(&sb, path_str); 13649 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 13650 io->scsiio.tag_num, io->scsiio.tag_type); 13651 break; 13652 case CTL_IO_TASK: 13653 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 13654 "Tag Type: %d\n", io->taskio.task_action, 13655 io->taskio.tag_num, io->taskio.tag_type); 13656 break; 13657 default: 13658 printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 13659 panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 13660 break; 13661 } 13662 sbuf_cat(&sb, path_str); 13663 sbuf_printf(&sb, "ctl_process_done: %jd seconds\n", 13664 (intmax_t)time_uptime - io->io_hdr.start_time); 13665 sbuf_finish(&sb); 13666 printf("%s", sbuf_data(&sb)); 13667 } 13668 #endif /* CTL_TIME_IO */ 13669 13670 switch (io->io_hdr.io_type) { 13671 case CTL_IO_SCSI: 13672 break; 13673 case CTL_IO_TASK: 13674 if (bootverbose || (ctl_debug & CTL_DEBUG_INFO)) 13675 ctl_io_error_print(io, NULL); 13676 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) 13677 ctl_free_io(io); 13678 else 13679 fe_done(io); 13680 return (CTL_RETVAL_COMPLETE); 13681 default: 13682 panic("ctl_process_done: invalid io type %d\n", 13683 io->io_hdr.io_type); 13684 break; /* NOTREACHED */ 13685 } 13686 13687 lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13688 if (lun == NULL) { 13689 CTL_DEBUG_PRINT(("NULL LUN for lun %d\n", 13690 io->io_hdr.nexus.targ_mapped_lun)); 13691 goto bailout; 13692 } 13693 13694 mtx_lock(&lun->lun_lock); 13695 13696 /* 13697 * Check to see if we have any errors to inject here. We only 13698 * inject errors for commands that don't already have errors set. 13699 */ 13700 if ((STAILQ_FIRST(&lun->error_list) != NULL) && 13701 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) && 13702 ((io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0)) 13703 ctl_inject_error(lun, io); 13704 13705 /* 13706 * XXX KDM how do we treat commands that aren't completed 13707 * successfully? 13708 * 13709 * XXX KDM should we also track I/O latency? 13710 */ 13711 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS && 13712 io->io_hdr.io_type == CTL_IO_SCSI) { 13713 #ifdef CTL_TIME_IO 13714 struct bintime cur_bt; 13715 #endif 13716 int type; 13717 13718 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13719 CTL_FLAG_DATA_IN) 13720 type = CTL_STATS_READ; 13721 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13722 CTL_FLAG_DATA_OUT) 13723 type = CTL_STATS_WRITE; 13724 else 13725 type = CTL_STATS_NO_IO; 13726 13727 lun->stats.ports[targ_port].bytes[type] += 13728 io->scsiio.kern_total_len; 13729 lun->stats.ports[targ_port].operations[type]++; 13730 #ifdef CTL_TIME_IO 13731 bintime_add(&lun->stats.ports[targ_port].dma_time[type], 13732 &io->io_hdr.dma_bt); 13733 lun->stats.ports[targ_port].num_dmas[type] += 13734 io->io_hdr.num_dmas; 13735 getbintime(&cur_bt); 13736 bintime_sub(&cur_bt, &io->io_hdr.start_bt); 13737 bintime_add(&lun->stats.ports[targ_port].time[type], &cur_bt); 13738 #endif 13739 } 13740 13741 /* 13742 * Remove this from the OOA queue. 13743 */ 13744 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); 13745 13746 /* 13747 * Run through the blocked queue on this LUN and see if anything 13748 * has become unblocked, now that this transaction is done. 13749 */ 13750 ctl_check_blocked(lun); 13751 13752 /* 13753 * If the LUN has been invalidated, free it if there is nothing 13754 * left on its OOA queue. 13755 */ 13756 if ((lun->flags & CTL_LUN_INVALID) 13757 && TAILQ_EMPTY(&lun->ooa_queue)) { 13758 mtx_unlock(&lun->lun_lock); 13759 mtx_lock(&softc->ctl_lock); 13760 ctl_free_lun(lun); 13761 mtx_unlock(&softc->ctl_lock); 13762 } else 13763 mtx_unlock(&lun->lun_lock); 13764 13765 bailout: 13766 13767 /* 13768 * If this command has been aborted, make sure we set the status 13769 * properly. The FETD is responsible for freeing the I/O and doing 13770 * whatever it needs to do to clean up its state. 13771 */ 13772 if (io->io_hdr.flags & CTL_FLAG_ABORT) 13773 ctl_set_task_aborted(&io->scsiio); 13774 13775 /* 13776 * If enabled, print command error status. 13777 * We don't print UAs unless debugging was enabled explicitly. 13778 */ 13779 do { 13780 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) 13781 break; 13782 if (!bootverbose && (ctl_debug & CTL_DEBUG_INFO) == 0) 13783 break; 13784 if ((ctl_debug & CTL_DEBUG_INFO) == 0 && 13785 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SCSI_ERROR) && 13786 (io->scsiio.scsi_status == SCSI_STATUS_CHECK_COND)) { 13787 int error_code, sense_key, asc, ascq; 13788 13789 scsi_extract_sense_len(&io->scsiio.sense_data, 13790 io->scsiio.sense_len, &error_code, &sense_key, 13791 &asc, &ascq, /*show_errors*/ 0); 13792 if (sense_key == SSD_KEY_UNIT_ATTENTION) 13793 break; 13794 } 13795 13796 ctl_io_error_print(io, NULL); 13797 } while (0); 13798 13799 /* 13800 * Tell the FETD or the other shelf controller we're done with this 13801 * command. Note that only SCSI commands get to this point. Task 13802 * management commands are completed above. 13803 * 13804 * We only send status to the other controller if we're in XFER 13805 * mode. In SER_ONLY mode, the I/O is done on the controller that 13806 * received the I/O (from CTL's perspective), and so the status is 13807 * generated there. 13808 * 13809 * XXX KDM if we hold the lock here, we could cause a deadlock 13810 * if the frontend comes back in in this context to queue 13811 * something. 13812 */ 13813 if ((softc->ha_mode == CTL_HA_MODE_XFER) 13814 && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 13815 union ctl_ha_msg msg; 13816 13817 memset(&msg, 0, sizeof(msg)); 13818 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 13819 msg.hdr.original_sc = io->io_hdr.original_sc; 13820 msg.hdr.nexus = io->io_hdr.nexus; 13821 msg.hdr.status = io->io_hdr.status; 13822 msg.scsi.scsi_status = io->scsiio.scsi_status; 13823 msg.scsi.tag_num = io->scsiio.tag_num; 13824 msg.scsi.tag_type = io->scsiio.tag_type; 13825 msg.scsi.sense_len = io->scsiio.sense_len; 13826 msg.scsi.sense_residual = io->scsiio.sense_residual; 13827 msg.scsi.residual = io->scsiio.residual; 13828 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 13829 sizeof(io->scsiio.sense_data)); 13830 /* 13831 * We copy this whether or not this is an I/O-related 13832 * command. Otherwise, we'd have to go and check to see 13833 * whether it's a read/write command, and it really isn't 13834 * worth it. 13835 */ 13836 memcpy(&msg.scsi.lbalen, 13837 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 13838 sizeof(msg.scsi.lbalen)); 13839 13840 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13841 sizeof(msg), 0) > CTL_HA_STATUS_SUCCESS) { 13842 /* XXX do something here */ 13843 } 13844 13845 ctl_free_io(io); 13846 } else 13847 fe_done(io); 13848 13849 return (CTL_RETVAL_COMPLETE); 13850 } 13851 13852 #ifdef CTL_WITH_CA 13853 /* 13854 * Front end should call this if it doesn't do autosense. When the request 13855 * sense comes back in from the initiator, we'll dequeue this and send it. 13856 */ 13857 int 13858 ctl_queue_sense(union ctl_io *io) 13859 { 13860 struct ctl_lun *lun; 13861 struct ctl_port *port; 13862 struct ctl_softc *softc; 13863 uint32_t initidx, targ_lun; 13864 13865 softc = control_softc; 13866 13867 CTL_DEBUG_PRINT(("ctl_queue_sense\n")); 13868 13869 /* 13870 * LUN lookup will likely move to the ctl_work_thread() once we 13871 * have our new queueing infrastructure (that doesn't put things on 13872 * a per-LUN queue initially). That is so that we can handle 13873 * things like an INQUIRY to a LUN that we don't have enabled. We 13874 * can't deal with that right now. 13875 */ 13876 mtx_lock(&softc->ctl_lock); 13877 13878 /* 13879 * If we don't have a LUN for this, just toss the sense 13880 * information. 13881 */ 13882 port = ctl_io_port(&ctsio->io_hdr); 13883 targ_lun = ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); 13884 if ((targ_lun < CTL_MAX_LUNS) 13885 && (softc->ctl_luns[targ_lun] != NULL)) 13886 lun = softc->ctl_luns[targ_lun]; 13887 else 13888 goto bailout; 13889 13890 initidx = ctl_get_initindex(&io->io_hdr.nexus); 13891 13892 mtx_lock(&lun->lun_lock); 13893 /* 13894 * Already have CA set for this LUN...toss the sense information. 13895 */ 13896 if (ctl_is_set(lun->have_ca, initidx)) { 13897 mtx_unlock(&lun->lun_lock); 13898 goto bailout; 13899 } 13900 13901 memcpy(&lun->pending_sense[initidx], &io->scsiio.sense_data, 13902 MIN(sizeof(lun->pending_sense[initidx]), 13903 sizeof(io->scsiio.sense_data))); 13904 ctl_set_mask(lun->have_ca, initidx); 13905 mtx_unlock(&lun->lun_lock); 13906 13907 bailout: 13908 mtx_unlock(&softc->ctl_lock); 13909 13910 ctl_free_io(io); 13911 13912 return (CTL_RETVAL_COMPLETE); 13913 } 13914 #endif 13915 13916 /* 13917 * Primary command inlet from frontend ports. All SCSI and task I/O 13918 * requests must go through this function. 13919 */ 13920 int 13921 ctl_queue(union ctl_io *io) 13922 { 13923 struct ctl_port *port; 13924 13925 CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0])); 13926 13927 #ifdef CTL_TIME_IO 13928 io->io_hdr.start_time = time_uptime; 13929 getbintime(&io->io_hdr.start_bt); 13930 #endif /* CTL_TIME_IO */ 13931 13932 /* Map FE-specific LUN ID into global one. */ 13933 port = ctl_io_port(&io->io_hdr); 13934 io->io_hdr.nexus.targ_mapped_lun = 13935 ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); 13936 13937 switch (io->io_hdr.io_type) { 13938 case CTL_IO_SCSI: 13939 case CTL_IO_TASK: 13940 if (ctl_debug & CTL_DEBUG_CDB) 13941 ctl_io_print(io); 13942 ctl_enqueue_incoming(io); 13943 break; 13944 default: 13945 printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type); 13946 return (EINVAL); 13947 } 13948 13949 return (CTL_RETVAL_COMPLETE); 13950 } 13951 13952 #ifdef CTL_IO_DELAY 13953 static void 13954 ctl_done_timer_wakeup(void *arg) 13955 { 13956 union ctl_io *io; 13957 13958 io = (union ctl_io *)arg; 13959 ctl_done(io); 13960 } 13961 #endif /* CTL_IO_DELAY */ 13962 13963 void 13964 ctl_done(union ctl_io *io) 13965 { 13966 13967 /* 13968 * Enable this to catch duplicate completion issues. 13969 */ 13970 #if 0 13971 if (io->io_hdr.flags & CTL_FLAG_ALREADY_DONE) { 13972 printf("%s: type %d msg %d cdb %x iptl: " 13973 "%d:%d:%d:%d tag 0x%04x " 13974 "flag %#x status %x\n", 13975 __func__, 13976 io->io_hdr.io_type, 13977 io->io_hdr.msg_type, 13978 io->scsiio.cdb[0], 13979 io->io_hdr.nexus.initid.id, 13980 io->io_hdr.nexus.targ_port, 13981 io->io_hdr.nexus.targ_target.id, 13982 io->io_hdr.nexus.targ_lun, 13983 (io->io_hdr.io_type == 13984 CTL_IO_TASK) ? 13985 io->taskio.tag_num : 13986 io->scsiio.tag_num, 13987 io->io_hdr.flags, 13988 io->io_hdr.status); 13989 } else 13990 io->io_hdr.flags |= CTL_FLAG_ALREADY_DONE; 13991 #endif 13992 13993 /* 13994 * This is an internal copy of an I/O, and should not go through 13995 * the normal done processing logic. 13996 */ 13997 if (io->io_hdr.flags & CTL_FLAG_INT_COPY) 13998 return; 13999 14000 /* 14001 * We need to send a msg to the serializing shelf to finish the IO 14002 * as well. We don't send a finish message to the other shelf if 14003 * this is a task management command. Task management commands 14004 * aren't serialized in the OOA queue, but rather just executed on 14005 * both shelf controllers for commands that originated on that 14006 * controller. 14007 */ 14008 if ((io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC) 14009 && (io->io_hdr.io_type != CTL_IO_TASK)) { 14010 union ctl_ha_msg msg_io; 14011 14012 msg_io.hdr.msg_type = CTL_MSG_FINISH_IO; 14013 msg_io.hdr.serializing_sc = io->io_hdr.serializing_sc; 14014 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_io, 14015 sizeof(msg_io), 0 ) != CTL_HA_STATUS_SUCCESS) { 14016 } 14017 /* continue on to finish IO */ 14018 } 14019 #ifdef CTL_IO_DELAY 14020 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 14021 struct ctl_lun *lun; 14022 14023 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 14024 14025 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 14026 } else { 14027 struct ctl_lun *lun; 14028 14029 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 14030 14031 if ((lun != NULL) 14032 && (lun->delay_info.done_delay > 0)) { 14033 struct callout *callout; 14034 14035 callout = (struct callout *)&io->io_hdr.timer_bytes; 14036 callout_init(callout, /*mpsafe*/ 1); 14037 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 14038 callout_reset(callout, 14039 lun->delay_info.done_delay * hz, 14040 ctl_done_timer_wakeup, io); 14041 if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT) 14042 lun->delay_info.done_delay = 0; 14043 return; 14044 } 14045 } 14046 #endif /* CTL_IO_DELAY */ 14047 14048 ctl_enqueue_done(io); 14049 } 14050 14051 int 14052 ctl_isc(struct ctl_scsiio *ctsio) 14053 { 14054 struct ctl_lun *lun; 14055 int retval; 14056 14057 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 14058 14059 CTL_DEBUG_PRINT(("ctl_isc: command: %02x\n", ctsio->cdb[0])); 14060 14061 CTL_DEBUG_PRINT(("ctl_isc: calling data_submit()\n")); 14062 14063 retval = lun->backend->data_submit((union ctl_io *)ctsio); 14064 14065 return (retval); 14066 } 14067 14068 14069 static void 14070 ctl_work_thread(void *arg) 14071 { 14072 struct ctl_thread *thr = (struct ctl_thread *)arg; 14073 struct ctl_softc *softc = thr->ctl_softc; 14074 union ctl_io *io; 14075 int retval; 14076 14077 CTL_DEBUG_PRINT(("ctl_work_thread starting\n")); 14078 14079 for (;;) { 14080 retval = 0; 14081 14082 /* 14083 * We handle the queues in this order: 14084 * - ISC 14085 * - done queue (to free up resources, unblock other commands) 14086 * - RtR queue 14087 * - incoming queue 14088 * 14089 * If those queues are empty, we break out of the loop and 14090 * go to sleep. 14091 */ 14092 mtx_lock(&thr->queue_lock); 14093 io = (union ctl_io *)STAILQ_FIRST(&thr->isc_queue); 14094 if (io != NULL) { 14095 STAILQ_REMOVE_HEAD(&thr->isc_queue, links); 14096 mtx_unlock(&thr->queue_lock); 14097 ctl_handle_isc(io); 14098 continue; 14099 } 14100 io = (union ctl_io *)STAILQ_FIRST(&thr->done_queue); 14101 if (io != NULL) { 14102 STAILQ_REMOVE_HEAD(&thr->done_queue, links); 14103 /* clear any blocked commands, call fe_done */ 14104 mtx_unlock(&thr->queue_lock); 14105 retval = ctl_process_done(io); 14106 continue; 14107 } 14108 io = (union ctl_io *)STAILQ_FIRST(&thr->incoming_queue); 14109 if (io != NULL) { 14110 STAILQ_REMOVE_HEAD(&thr->incoming_queue, links); 14111 mtx_unlock(&thr->queue_lock); 14112 if (io->io_hdr.io_type == CTL_IO_TASK) 14113 ctl_run_task(io); 14114 else 14115 ctl_scsiio_precheck(softc, &io->scsiio); 14116 continue; 14117 } 14118 if (!ctl_pause_rtr) { 14119 io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue); 14120 if (io != NULL) { 14121 STAILQ_REMOVE_HEAD(&thr->rtr_queue, links); 14122 mtx_unlock(&thr->queue_lock); 14123 retval = ctl_scsiio(&io->scsiio); 14124 if (retval != CTL_RETVAL_COMPLETE) 14125 CTL_DEBUG_PRINT(("ctl_scsiio failed\n")); 14126 continue; 14127 } 14128 } 14129 14130 /* Sleep until we have something to do. */ 14131 mtx_sleep(thr, &thr->queue_lock, PDROP | PRIBIO, "-", 0); 14132 } 14133 } 14134 14135 static void 14136 ctl_lun_thread(void *arg) 14137 { 14138 struct ctl_softc *softc = (struct ctl_softc *)arg; 14139 struct ctl_be_lun *be_lun; 14140 int retval; 14141 14142 CTL_DEBUG_PRINT(("ctl_lun_thread starting\n")); 14143 14144 for (;;) { 14145 retval = 0; 14146 mtx_lock(&softc->ctl_lock); 14147 be_lun = STAILQ_FIRST(&softc->pending_lun_queue); 14148 if (be_lun != NULL) { 14149 STAILQ_REMOVE_HEAD(&softc->pending_lun_queue, links); 14150 mtx_unlock(&softc->ctl_lock); 14151 ctl_create_lun(be_lun); 14152 continue; 14153 } 14154 14155 /* Sleep until we have something to do. */ 14156 mtx_sleep(&softc->pending_lun_queue, &softc->ctl_lock, 14157 PDROP | PRIBIO, "-", 0); 14158 } 14159 } 14160 14161 static void 14162 ctl_thresh_thread(void *arg) 14163 { 14164 struct ctl_softc *softc = (struct ctl_softc *)arg; 14165 struct ctl_lun *lun; 14166 struct ctl_be_lun *be_lun; 14167 struct scsi_da_rw_recovery_page *rwpage; 14168 struct ctl_logical_block_provisioning_page *page; 14169 const char *attr; 14170 uint64_t thres, val; 14171 int i, e; 14172 14173 CTL_DEBUG_PRINT(("ctl_thresh_thread starting\n")); 14174 14175 for (;;) { 14176 mtx_lock(&softc->ctl_lock); 14177 STAILQ_FOREACH(lun, &softc->lun_list, links) { 14178 be_lun = lun->be_lun; 14179 if ((lun->flags & CTL_LUN_DISABLED) || 14180 (lun->flags & CTL_LUN_OFFLINE) || 14181 lun->backend->lun_attr == NULL) 14182 continue; 14183 rwpage = &lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT]; 14184 if ((rwpage->byte8 & SMS_RWER_LBPERE) == 0) 14185 continue; 14186 e = 0; 14187 page = &lun->mode_pages.lbp_page[CTL_PAGE_CURRENT]; 14188 for (i = 0; i < CTL_NUM_LBP_THRESH; i++) { 14189 if ((page->descr[i].flags & SLBPPD_ENABLED) == 0) 14190 continue; 14191 thres = scsi_4btoul(page->descr[i].count); 14192 thres <<= CTL_LBP_EXPONENT; 14193 switch (page->descr[i].resource) { 14194 case 0x01: 14195 attr = "blocksavail"; 14196 break; 14197 case 0x02: 14198 attr = "blocksused"; 14199 break; 14200 case 0xf1: 14201 attr = "poolblocksavail"; 14202 break; 14203 case 0xf2: 14204 attr = "poolblocksused"; 14205 break; 14206 default: 14207 continue; 14208 } 14209 mtx_unlock(&softc->ctl_lock); // XXX 14210 val = lun->backend->lun_attr( 14211 lun->be_lun->be_lun, attr); 14212 mtx_lock(&softc->ctl_lock); 14213 if (val == UINT64_MAX) 14214 continue; 14215 if ((page->descr[i].flags & SLBPPD_ARMING_MASK) 14216 == SLBPPD_ARMING_INC) 14217 e |= (val >= thres); 14218 else 14219 e |= (val <= thres); 14220 } 14221 mtx_lock(&lun->lun_lock); 14222 if (e) { 14223 if (lun->lasttpt == 0 || 14224 time_uptime - lun->lasttpt >= CTL_LBP_UA_PERIOD) { 14225 lun->lasttpt = time_uptime; 14226 ctl_est_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 14227 } 14228 } else { 14229 lun->lasttpt = 0; 14230 ctl_clr_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 14231 } 14232 mtx_unlock(&lun->lun_lock); 14233 } 14234 mtx_unlock(&softc->ctl_lock); 14235 pause("-", CTL_LBP_PERIOD * hz); 14236 } 14237 } 14238 14239 static void 14240 ctl_enqueue_incoming(union ctl_io *io) 14241 { 14242 struct ctl_softc *softc = control_softc; 14243 struct ctl_thread *thr; 14244 u_int idx; 14245 14246 idx = (io->io_hdr.nexus.targ_port * 127 + 14247 io->io_hdr.nexus.initid.id) % worker_threads; 14248 thr = &softc->threads[idx]; 14249 mtx_lock(&thr->queue_lock); 14250 STAILQ_INSERT_TAIL(&thr->incoming_queue, &io->io_hdr, links); 14251 mtx_unlock(&thr->queue_lock); 14252 wakeup(thr); 14253 } 14254 14255 static void 14256 ctl_enqueue_rtr(union ctl_io *io) 14257 { 14258 struct ctl_softc *softc = control_softc; 14259 struct ctl_thread *thr; 14260 14261 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 14262 mtx_lock(&thr->queue_lock); 14263 STAILQ_INSERT_TAIL(&thr->rtr_queue, &io->io_hdr, links); 14264 mtx_unlock(&thr->queue_lock); 14265 wakeup(thr); 14266 } 14267 14268 static void 14269 ctl_enqueue_done(union ctl_io *io) 14270 { 14271 struct ctl_softc *softc = control_softc; 14272 struct ctl_thread *thr; 14273 14274 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 14275 mtx_lock(&thr->queue_lock); 14276 STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links); 14277 mtx_unlock(&thr->queue_lock); 14278 wakeup(thr); 14279 } 14280 14281 static void 14282 ctl_enqueue_isc(union ctl_io *io) 14283 { 14284 struct ctl_softc *softc = control_softc; 14285 struct ctl_thread *thr; 14286 14287 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 14288 mtx_lock(&thr->queue_lock); 14289 STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links); 14290 mtx_unlock(&thr->queue_lock); 14291 wakeup(thr); 14292 } 14293 14294 /* Initialization and failover */ 14295 14296 void 14297 ctl_init_isc_msg(void) 14298 { 14299 printf("CTL: Still calling this thing\n"); 14300 } 14301 14302 /* 14303 * Init component 14304 * Initializes component into configuration defined by bootMode 14305 * (see hasc-sv.c) 14306 * returns hasc_Status: 14307 * OK 14308 * ERROR - fatal error 14309 */ 14310 static ctl_ha_comp_status 14311 ctl_isc_init(struct ctl_ha_component *c) 14312 { 14313 ctl_ha_comp_status ret = CTL_HA_COMP_STATUS_OK; 14314 14315 c->status = ret; 14316 return ret; 14317 } 14318 14319 /* Start component 14320 * Starts component in state requested. If component starts successfully, 14321 * it must set its own state to the requestrd state 14322 * When requested state is HASC_STATE_HA, the component may refine it 14323 * by adding _SLAVE or _MASTER flags. 14324 * Currently allowed state transitions are: 14325 * UNKNOWN->HA - initial startup 14326 * UNKNOWN->SINGLE - initial startup when no parter detected 14327 * HA->SINGLE - failover 14328 * returns ctl_ha_comp_status: 14329 * OK - component successfully started in requested state 14330 * FAILED - could not start the requested state, failover may 14331 * be possible 14332 * ERROR - fatal error detected, no future startup possible 14333 */ 14334 static ctl_ha_comp_status 14335 ctl_isc_start(struct ctl_ha_component *c, ctl_ha_state state) 14336 { 14337 ctl_ha_comp_status ret = CTL_HA_COMP_STATUS_OK; 14338 14339 printf("%s: go\n", __func__); 14340 14341 // UNKNOWN->HA or UNKNOWN->SINGLE (bootstrap) 14342 if (c->state == CTL_HA_STATE_UNKNOWN ) { 14343 control_softc->is_single = 0; 14344 if (ctl_ha_msg_create(CTL_HA_CHAN_CTL, ctl_isc_event_handler) 14345 != CTL_HA_STATUS_SUCCESS) { 14346 printf("ctl_isc_start: ctl_ha_msg_create failed.\n"); 14347 ret = CTL_HA_COMP_STATUS_ERROR; 14348 } 14349 } else if (CTL_HA_STATE_IS_HA(c->state) 14350 && CTL_HA_STATE_IS_SINGLE(state)){ 14351 // HA->SINGLE transition 14352 ctl_failover(); 14353 control_softc->is_single = 1; 14354 } else { 14355 printf("ctl_isc_start:Invalid state transition %X->%X\n", 14356 c->state, state); 14357 ret = CTL_HA_COMP_STATUS_ERROR; 14358 } 14359 if (CTL_HA_STATE_IS_SINGLE(state)) 14360 control_softc->is_single = 1; 14361 14362 c->state = state; 14363 c->status = ret; 14364 return ret; 14365 } 14366 14367 /* 14368 * Quiesce component 14369 * The component must clear any error conditions (set status to OK) and 14370 * prepare itself to another Start call 14371 * returns ctl_ha_comp_status: 14372 * OK 14373 * ERROR 14374 */ 14375 static ctl_ha_comp_status 14376 ctl_isc_quiesce(struct ctl_ha_component *c) 14377 { 14378 int ret = CTL_HA_COMP_STATUS_OK; 14379 14380 ctl_pause_rtr = 1; 14381 c->status = ret; 14382 return ret; 14383 } 14384 14385 struct ctl_ha_component ctl_ha_component_ctlisc = 14386 { 14387 .name = "CTL ISC", 14388 .state = CTL_HA_STATE_UNKNOWN, 14389 .init = ctl_isc_init, 14390 .start = ctl_isc_start, 14391 .quiesce = ctl_isc_quiesce 14392 }; 14393 14394 /* 14395 * vim: ts=8 14396 */ 14397