1 /*- 2 * Copyright (c) 2003-2009 Silicon Graphics International Corp. 3 * Copyright (c) 2012 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * Portions of this software were developed by Edward Tomasz Napierala 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions, and the following disclaimer, 14 * without modification. 15 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 16 * substantially similar to the "NO WARRANTY" disclaimer below 17 * ("Disclaimer") and any redistribution must be conditioned upon 18 * including a substantially similar Disclaimer requirement for further 19 * binary redistribution. 20 * 21 * NO WARRANTY 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 30 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 31 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGES. 33 * 34 * $Id$ 35 */ 36 /* 37 * CAM Target Layer, a SCSI device emulation subsystem. 38 * 39 * Author: Ken Merry <ken@FreeBSD.org> 40 */ 41 42 #define _CTL_C 43 44 #include <sys/cdefs.h> 45 __FBSDID("$FreeBSD$"); 46 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/ctype.h> 50 #include <sys/kernel.h> 51 #include <sys/types.h> 52 #include <sys/kthread.h> 53 #include <sys/bio.h> 54 #include <sys/fcntl.h> 55 #include <sys/lock.h> 56 #include <sys/module.h> 57 #include <sys/mutex.h> 58 #include <sys/condvar.h> 59 #include <sys/malloc.h> 60 #include <sys/conf.h> 61 #include <sys/ioccom.h> 62 #include <sys/queue.h> 63 #include <sys/sbuf.h> 64 #include <sys/smp.h> 65 #include <sys/endian.h> 66 #include <sys/sysctl.h> 67 #include <vm/uma.h> 68 69 #include <cam/cam.h> 70 #include <cam/scsi/scsi_all.h> 71 #include <cam/scsi/scsi_da.h> 72 #include <cam/ctl/ctl_io.h> 73 #include <cam/ctl/ctl.h> 74 #include <cam/ctl/ctl_frontend.h> 75 #include <cam/ctl/ctl_util.h> 76 #include <cam/ctl/ctl_backend.h> 77 #include <cam/ctl/ctl_ioctl.h> 78 #include <cam/ctl/ctl_ha.h> 79 #include <cam/ctl/ctl_private.h> 80 #include <cam/ctl/ctl_debug.h> 81 #include <cam/ctl/ctl_scsi_all.h> 82 #include <cam/ctl/ctl_error.h> 83 84 struct ctl_softc *control_softc = NULL; 85 86 /* 87 * Size and alignment macros needed for Copan-specific HA hardware. These 88 * can go away when the HA code is re-written, and uses busdma for any 89 * hardware. 90 */ 91 #define CTL_ALIGN_8B(target, source, type) \ 92 if (((uint32_t)source & 0x7) != 0) \ 93 target = (type)(source + (0x8 - ((uint32_t)source & 0x7)));\ 94 else \ 95 target = (type)source; 96 97 #define CTL_SIZE_8B(target, size) \ 98 if ((size & 0x7) != 0) \ 99 target = size + (0x8 - (size & 0x7)); \ 100 else \ 101 target = size; 102 103 #define CTL_ALIGN_8B_MARGIN 16 104 105 /* 106 * Template mode pages. 107 */ 108 109 /* 110 * Note that these are default values only. The actual values will be 111 * filled in when the user does a mode sense. 112 */ 113 const static struct copan_debugconf_subpage debugconf_page_default = { 114 DBGCNF_PAGE_CODE | SMPH_SPF, /* page_code */ 115 DBGCNF_SUBPAGE_CODE, /* subpage */ 116 {(sizeof(struct copan_debugconf_subpage) - 4) >> 8, 117 (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */ 118 DBGCNF_VERSION, /* page_version */ 119 {CTL_TIME_IO_DEFAULT_SECS>>8, 120 CTL_TIME_IO_DEFAULT_SECS>>0}, /* ctl_time_io_secs */ 121 }; 122 123 const static struct copan_debugconf_subpage debugconf_page_changeable = { 124 DBGCNF_PAGE_CODE | SMPH_SPF, /* page_code */ 125 DBGCNF_SUBPAGE_CODE, /* subpage */ 126 {(sizeof(struct copan_debugconf_subpage) - 4) >> 8, 127 (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */ 128 0, /* page_version */ 129 {0xff,0xff}, /* ctl_time_io_secs */ 130 }; 131 132 const static struct scsi_da_rw_recovery_page rw_er_page_default = { 133 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 134 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 135 /*byte3*/SMS_RWER_AWRE|SMS_RWER_ARRE, 136 /*read_retry_count*/0, 137 /*correction_span*/0, 138 /*head_offset_count*/0, 139 /*data_strobe_offset_cnt*/0, 140 /*byte8*/SMS_RWER_LBPERE, 141 /*write_retry_count*/0, 142 /*reserved2*/0, 143 /*recovery_time_limit*/{0, 0}, 144 }; 145 146 const static struct scsi_da_rw_recovery_page rw_er_page_changeable = { 147 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 148 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 149 /*byte3*/0, 150 /*read_retry_count*/0, 151 /*correction_span*/0, 152 /*head_offset_count*/0, 153 /*data_strobe_offset_cnt*/0, 154 /*byte8*/0, 155 /*write_retry_count*/0, 156 /*reserved2*/0, 157 /*recovery_time_limit*/{0, 0}, 158 }; 159 160 const static struct scsi_format_page format_page_default = { 161 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 162 /*page_length*/sizeof(struct scsi_format_page) - 2, 163 /*tracks_per_zone*/ {0, 0}, 164 /*alt_sectors_per_zone*/ {0, 0}, 165 /*alt_tracks_per_zone*/ {0, 0}, 166 /*alt_tracks_per_lun*/ {0, 0}, 167 /*sectors_per_track*/ {(CTL_DEFAULT_SECTORS_PER_TRACK >> 8) & 0xff, 168 CTL_DEFAULT_SECTORS_PER_TRACK & 0xff}, 169 /*bytes_per_sector*/ {0, 0}, 170 /*interleave*/ {0, 0}, 171 /*track_skew*/ {0, 0}, 172 /*cylinder_skew*/ {0, 0}, 173 /*flags*/ SFP_HSEC, 174 /*reserved*/ {0, 0, 0} 175 }; 176 177 const static struct scsi_format_page format_page_changeable = { 178 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 179 /*page_length*/sizeof(struct scsi_format_page) - 2, 180 /*tracks_per_zone*/ {0, 0}, 181 /*alt_sectors_per_zone*/ {0, 0}, 182 /*alt_tracks_per_zone*/ {0, 0}, 183 /*alt_tracks_per_lun*/ {0, 0}, 184 /*sectors_per_track*/ {0, 0}, 185 /*bytes_per_sector*/ {0, 0}, 186 /*interleave*/ {0, 0}, 187 /*track_skew*/ {0, 0}, 188 /*cylinder_skew*/ {0, 0}, 189 /*flags*/ 0, 190 /*reserved*/ {0, 0, 0} 191 }; 192 193 const static struct scsi_rigid_disk_page rigid_disk_page_default = { 194 /*page_code*/SMS_RIGID_DISK_PAGE, 195 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 196 /*cylinders*/ {0, 0, 0}, 197 /*heads*/ CTL_DEFAULT_HEADS, 198 /*start_write_precomp*/ {0, 0, 0}, 199 /*start_reduced_current*/ {0, 0, 0}, 200 /*step_rate*/ {0, 0}, 201 /*landing_zone_cylinder*/ {0, 0, 0}, 202 /*rpl*/ SRDP_RPL_DISABLED, 203 /*rotational_offset*/ 0, 204 /*reserved1*/ 0, 205 /*rotation_rate*/ {(CTL_DEFAULT_ROTATION_RATE >> 8) & 0xff, 206 CTL_DEFAULT_ROTATION_RATE & 0xff}, 207 /*reserved2*/ {0, 0} 208 }; 209 210 const static struct scsi_rigid_disk_page rigid_disk_page_changeable = { 211 /*page_code*/SMS_RIGID_DISK_PAGE, 212 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 213 /*cylinders*/ {0, 0, 0}, 214 /*heads*/ 0, 215 /*start_write_precomp*/ {0, 0, 0}, 216 /*start_reduced_current*/ {0, 0, 0}, 217 /*step_rate*/ {0, 0}, 218 /*landing_zone_cylinder*/ {0, 0, 0}, 219 /*rpl*/ 0, 220 /*rotational_offset*/ 0, 221 /*reserved1*/ 0, 222 /*rotation_rate*/ {0, 0}, 223 /*reserved2*/ {0, 0} 224 }; 225 226 const static struct scsi_caching_page caching_page_default = { 227 /*page_code*/SMS_CACHING_PAGE, 228 /*page_length*/sizeof(struct scsi_caching_page) - 2, 229 /*flags1*/ SCP_DISC | SCP_WCE, 230 /*ret_priority*/ 0, 231 /*disable_pf_transfer_len*/ {0xff, 0xff}, 232 /*min_prefetch*/ {0, 0}, 233 /*max_prefetch*/ {0xff, 0xff}, 234 /*max_pf_ceiling*/ {0xff, 0xff}, 235 /*flags2*/ 0, 236 /*cache_segments*/ 0, 237 /*cache_seg_size*/ {0, 0}, 238 /*reserved*/ 0, 239 /*non_cache_seg_size*/ {0, 0, 0} 240 }; 241 242 const static struct scsi_caching_page caching_page_changeable = { 243 /*page_code*/SMS_CACHING_PAGE, 244 /*page_length*/sizeof(struct scsi_caching_page) - 2, 245 /*flags1*/ SCP_WCE | SCP_RCD, 246 /*ret_priority*/ 0, 247 /*disable_pf_transfer_len*/ {0, 0}, 248 /*min_prefetch*/ {0, 0}, 249 /*max_prefetch*/ {0, 0}, 250 /*max_pf_ceiling*/ {0, 0}, 251 /*flags2*/ 0, 252 /*cache_segments*/ 0, 253 /*cache_seg_size*/ {0, 0}, 254 /*reserved*/ 0, 255 /*non_cache_seg_size*/ {0, 0, 0} 256 }; 257 258 const static struct scsi_control_page control_page_default = { 259 /*page_code*/SMS_CONTROL_MODE_PAGE, 260 /*page_length*/sizeof(struct scsi_control_page) - 2, 261 /*rlec*/0, 262 /*queue_flags*/SCP_QUEUE_ALG_RESTRICTED, 263 /*eca_and_aen*/0, 264 /*flags4*/SCP_TAS, 265 /*aen_holdoff_period*/{0, 0}, 266 /*busy_timeout_period*/{0, 0}, 267 /*extended_selftest_completion_time*/{0, 0} 268 }; 269 270 const static struct scsi_control_page control_page_changeable = { 271 /*page_code*/SMS_CONTROL_MODE_PAGE, 272 /*page_length*/sizeof(struct scsi_control_page) - 2, 273 /*rlec*/SCP_DSENSE, 274 /*queue_flags*/SCP_QUEUE_ALG_MASK, 275 /*eca_and_aen*/SCP_SWP, 276 /*flags4*/0, 277 /*aen_holdoff_period*/{0, 0}, 278 /*busy_timeout_period*/{0, 0}, 279 /*extended_selftest_completion_time*/{0, 0} 280 }; 281 282 const static struct scsi_info_exceptions_page ie_page_default = { 283 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 284 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 285 /*info_flags*/SIEP_FLAGS_DEXCPT, 286 /*mrie*/0, 287 /*interval_timer*/{0, 0, 0, 0}, 288 /*report_count*/{0, 0, 0, 0} 289 }; 290 291 const static struct scsi_info_exceptions_page ie_page_changeable = { 292 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 293 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 294 /*info_flags*/0, 295 /*mrie*/0, 296 /*interval_timer*/{0, 0, 0, 0}, 297 /*report_count*/{0, 0, 0, 0} 298 }; 299 300 #define CTL_LBPM_LEN (sizeof(struct ctl_logical_block_provisioning_page) - 4) 301 302 const static struct ctl_logical_block_provisioning_page lbp_page_default = {{ 303 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 304 /*subpage_code*/0x02, 305 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 306 /*flags*/0, 307 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 308 /*descr*/{}}, 309 {{/*flags*/0, 310 /*resource*/0x01, 311 /*reserved*/{0, 0}, 312 /*count*/{0, 0, 0, 0}}, 313 {/*flags*/0, 314 /*resource*/0x02, 315 /*reserved*/{0, 0}, 316 /*count*/{0, 0, 0, 0}}, 317 {/*flags*/0, 318 /*resource*/0xf1, 319 /*reserved*/{0, 0}, 320 /*count*/{0, 0, 0, 0}}, 321 {/*flags*/0, 322 /*resource*/0xf2, 323 /*reserved*/{0, 0}, 324 /*count*/{0, 0, 0, 0}} 325 } 326 }; 327 328 const static struct ctl_logical_block_provisioning_page lbp_page_changeable = {{ 329 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 330 /*subpage_code*/0x02, 331 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 332 /*flags*/0, 333 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 334 /*descr*/{}}, 335 {{/*flags*/0, 336 /*resource*/0, 337 /*reserved*/{0, 0}, 338 /*count*/{0, 0, 0, 0}}, 339 {/*flags*/0, 340 /*resource*/0, 341 /*reserved*/{0, 0}, 342 /*count*/{0, 0, 0, 0}}, 343 {/*flags*/0, 344 /*resource*/0, 345 /*reserved*/{0, 0}, 346 /*count*/{0, 0, 0, 0}}, 347 {/*flags*/0, 348 /*resource*/0, 349 /*reserved*/{0, 0}, 350 /*count*/{0, 0, 0, 0}} 351 } 352 }; 353 354 /* 355 * XXX KDM move these into the softc. 356 */ 357 static int rcv_sync_msg; 358 static uint8_t ctl_pause_rtr; 359 360 SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer"); 361 static int worker_threads = -1; 362 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN, 363 &worker_threads, 1, "Number of worker threads"); 364 static int ctl_debug = CTL_DEBUG_NONE; 365 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, debug, CTLFLAG_RWTUN, 366 &ctl_debug, 0, "Enabled debug flags"); 367 368 /* 369 * Supported pages (0x00), Serial number (0x80), Device ID (0x83), 370 * Extended INQUIRY Data (0x86), Mode Page Policy (0x87), 371 * SCSI Ports (0x88), Third-party Copy (0x8F), Block limits (0xB0), 372 * Block Device Characteristics (0xB1) and Logical Block Provisioning (0xB2) 373 */ 374 #define SCSI_EVPD_NUM_SUPPORTED_PAGES 10 375 376 #ifdef notyet 377 static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event, 378 int param); 379 static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest); 380 #endif 381 static int ctl_init(void); 382 void ctl_shutdown(void); 383 static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td); 384 static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td); 385 static int ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio); 386 static int ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 387 struct ctl_ooa *ooa_hdr, 388 struct ctl_ooa_entry *kern_entries); 389 static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 390 struct thread *td); 391 static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun, 392 struct ctl_be_lun *be_lun); 393 static int ctl_free_lun(struct ctl_lun *lun); 394 static void ctl_create_lun(struct ctl_be_lun *be_lun); 395 static struct ctl_port * ctl_io_port(struct ctl_io_hdr *io_hdr); 396 /** 397 static void ctl_failover_change_pages(struct ctl_softc *softc, 398 struct ctl_scsiio *ctsio, int master); 399 **/ 400 401 static int ctl_do_mode_select(union ctl_io *io); 402 static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, 403 uint64_t res_key, uint64_t sa_res_key, 404 uint8_t type, uint32_t residx, 405 struct ctl_scsiio *ctsio, 406 struct scsi_per_res_out *cdb, 407 struct scsi_per_res_out_parms* param); 408 static void ctl_pro_preempt_other(struct ctl_lun *lun, 409 union ctl_ha_msg *msg); 410 static void ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg); 411 static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len); 412 static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len); 413 static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len); 414 static int ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len); 415 static int ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len); 416 static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, 417 int alloc_len); 418 static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, 419 int alloc_len); 420 static int ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len); 421 static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len); 422 static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio); 423 static int ctl_inquiry_std(struct ctl_scsiio *ctsio); 424 static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len); 425 static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2, 426 bool seq); 427 static ctl_action ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2); 428 static ctl_action ctl_check_for_blockage(struct ctl_lun *lun, 429 union ctl_io *pending_io, union ctl_io *ooa_io); 430 static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 431 union ctl_io *starting_io); 432 static int ctl_check_blocked(struct ctl_lun *lun); 433 static int ctl_scsiio_lun_check(struct ctl_lun *lun, 434 const struct ctl_cmd_entry *entry, 435 struct ctl_scsiio *ctsio); 436 //static int ctl_check_rtr(union ctl_io *pending_io, struct ctl_softc *softc); 437 #ifdef notyet 438 static void ctl_failover(void); 439 #endif 440 static void ctl_clear_ua(struct ctl_softc *ctl_softc, uint32_t initidx, 441 ctl_ua_type ua_type); 442 static int ctl_scsiio_precheck(struct ctl_softc *ctl_softc, 443 struct ctl_scsiio *ctsio); 444 static int ctl_scsiio(struct ctl_scsiio *ctsio); 445 446 static int ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io); 447 static int ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io, 448 ctl_ua_type ua_type); 449 static int ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, 450 ctl_ua_type ua_type); 451 static int ctl_abort_task(union ctl_io *io); 452 static int ctl_abort_task_set(union ctl_io *io); 453 static int ctl_i_t_nexus_reset(union ctl_io *io); 454 static void ctl_run_task(union ctl_io *io); 455 #ifdef CTL_IO_DELAY 456 static void ctl_datamove_timer_wakeup(void *arg); 457 static void ctl_done_timer_wakeup(void *arg); 458 #endif /* CTL_IO_DELAY */ 459 460 static void ctl_send_datamove_done(union ctl_io *io, int have_lock); 461 static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq); 462 static int ctl_datamove_remote_dm_write_cb(union ctl_io *io); 463 static void ctl_datamove_remote_write(union ctl_io *io); 464 static int ctl_datamove_remote_dm_read_cb(union ctl_io *io); 465 static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq); 466 static int ctl_datamove_remote_sgl_setup(union ctl_io *io); 467 static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 468 ctl_ha_dt_cb callback); 469 static void ctl_datamove_remote_read(union ctl_io *io); 470 static void ctl_datamove_remote(union ctl_io *io); 471 static int ctl_process_done(union ctl_io *io); 472 static void ctl_lun_thread(void *arg); 473 static void ctl_thresh_thread(void *arg); 474 static void ctl_work_thread(void *arg); 475 static void ctl_enqueue_incoming(union ctl_io *io); 476 static void ctl_enqueue_rtr(union ctl_io *io); 477 static void ctl_enqueue_done(union ctl_io *io); 478 #ifdef notyet 479 static void ctl_enqueue_isc(union ctl_io *io); 480 #endif 481 static const struct ctl_cmd_entry * 482 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa); 483 static const struct ctl_cmd_entry * 484 ctl_validate_command(struct ctl_scsiio *ctsio); 485 static int ctl_cmd_applicable(uint8_t lun_type, 486 const struct ctl_cmd_entry *entry); 487 488 /* 489 * Load the serialization table. This isn't very pretty, but is probably 490 * the easiest way to do it. 491 */ 492 #include "ctl_ser_table.c" 493 494 /* 495 * We only need to define open, close and ioctl routines for this driver. 496 */ 497 static struct cdevsw ctl_cdevsw = { 498 .d_version = D_VERSION, 499 .d_flags = 0, 500 .d_open = ctl_open, 501 .d_close = ctl_close, 502 .d_ioctl = ctl_ioctl, 503 .d_name = "ctl", 504 }; 505 506 507 MALLOC_DEFINE(M_CTL, "ctlmem", "Memory used for CTL"); 508 509 static int ctl_module_event_handler(module_t, int /*modeventtype_t*/, void *); 510 511 static moduledata_t ctl_moduledata = { 512 "ctl", 513 ctl_module_event_handler, 514 NULL 515 }; 516 517 DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD); 518 MODULE_VERSION(ctl, 1); 519 520 #ifdef notyet 521 static void 522 ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc, 523 union ctl_ha_msg *msg_info) 524 { 525 struct ctl_scsiio *ctsio; 526 527 if (msg_info->hdr.original_sc == NULL) { 528 printf("%s: original_sc == NULL!\n", __func__); 529 /* XXX KDM now what? */ 530 return; 531 } 532 533 ctsio = &msg_info->hdr.original_sc->scsiio; 534 ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 535 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 536 ctsio->io_hdr.status = msg_info->hdr.status; 537 ctsio->scsi_status = msg_info->scsi.scsi_status; 538 ctsio->sense_len = msg_info->scsi.sense_len; 539 ctsio->sense_residual = msg_info->scsi.sense_residual; 540 ctsio->residual = msg_info->scsi.residual; 541 memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data, 542 sizeof(ctsio->sense_data)); 543 memcpy(&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 544 &msg_info->scsi.lbalen, sizeof(msg_info->scsi.lbalen)); 545 ctl_enqueue_isc((union ctl_io *)ctsio); 546 } 547 548 static void 549 ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc, 550 union ctl_ha_msg *msg_info) 551 { 552 struct ctl_scsiio *ctsio; 553 554 if (msg_info->hdr.serializing_sc == NULL) { 555 printf("%s: serializing_sc == NULL!\n", __func__); 556 /* XXX KDM now what? */ 557 return; 558 } 559 560 ctsio = &msg_info->hdr.serializing_sc->scsiio; 561 #if 0 562 /* 563 * Attempt to catch the situation where an I/O has 564 * been freed, and we're using it again. 565 */ 566 if (ctsio->io_hdr.io_type == 0xff) { 567 union ctl_io *tmp_io; 568 tmp_io = (union ctl_io *)ctsio; 569 printf("%s: %p use after free!\n", __func__, 570 ctsio); 571 printf("%s: type %d msg %d cdb %x iptl: " 572 "%d:%d:%d:%d tag 0x%04x " 573 "flag %#x status %x\n", 574 __func__, 575 tmp_io->io_hdr.io_type, 576 tmp_io->io_hdr.msg_type, 577 tmp_io->scsiio.cdb[0], 578 tmp_io->io_hdr.nexus.initid.id, 579 tmp_io->io_hdr.nexus.targ_port, 580 tmp_io->io_hdr.nexus.targ_target.id, 581 tmp_io->io_hdr.nexus.targ_lun, 582 (tmp_io->io_hdr.io_type == 583 CTL_IO_TASK) ? 584 tmp_io->taskio.tag_num : 585 tmp_io->scsiio.tag_num, 586 tmp_io->io_hdr.flags, 587 tmp_io->io_hdr.status); 588 } 589 #endif 590 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 591 ctl_enqueue_isc((union ctl_io *)ctsio); 592 } 593 594 /* 595 * ISC (Inter Shelf Communication) event handler. Events from the HA 596 * subsystem come in here. 597 */ 598 static void 599 ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param) 600 { 601 struct ctl_softc *softc; 602 union ctl_io *io; 603 struct ctl_prio *presio; 604 ctl_ha_status isc_status; 605 606 softc = control_softc; 607 io = NULL; 608 609 610 #if 0 611 printf("CTL: Isc Msg event %d\n", event); 612 #endif 613 if (event == CTL_HA_EVT_MSG_RECV) { 614 union ctl_ha_msg msg_info; 615 616 isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, &msg_info, 617 sizeof(msg_info), /*wait*/ 0); 618 #if 0 619 printf("CTL: msg_type %d\n", msg_info.msg_type); 620 #endif 621 if (isc_status != 0) { 622 printf("Error receiving message, status = %d\n", 623 isc_status); 624 return; 625 } 626 627 switch (msg_info.hdr.msg_type) { 628 case CTL_MSG_SERIALIZE: 629 #if 0 630 printf("Serialize\n"); 631 #endif 632 io = ctl_alloc_io_nowait(softc->othersc_pool); 633 if (io == NULL) { 634 printf("ctl_isc_event_handler: can't allocate " 635 "ctl_io!\n"); 636 /* Bad Juju */ 637 /* Need to set busy and send msg back */ 638 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 639 msg_info.hdr.status = CTL_SCSI_ERROR; 640 msg_info.scsi.scsi_status = SCSI_STATUS_BUSY; 641 msg_info.scsi.sense_len = 0; 642 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 643 sizeof(msg_info), 0) > CTL_HA_STATUS_SUCCESS){ 644 } 645 goto bailout; 646 } 647 ctl_zero_io(io); 648 // populate ctsio from msg_info 649 io->io_hdr.io_type = CTL_IO_SCSI; 650 io->io_hdr.msg_type = CTL_MSG_SERIALIZE; 651 io->io_hdr.original_sc = msg_info.hdr.original_sc; 652 #if 0 653 printf("pOrig %x\n", (int)msg_info.original_sc); 654 #endif 655 io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC | 656 CTL_FLAG_IO_ACTIVE; 657 /* 658 * If we're in serialization-only mode, we don't 659 * want to go through full done processing. Thus 660 * the COPY flag. 661 * 662 * XXX KDM add another flag that is more specific. 663 */ 664 if (softc->ha_mode == CTL_HA_MODE_SER_ONLY) 665 io->io_hdr.flags |= CTL_FLAG_INT_COPY; 666 io->io_hdr.nexus = msg_info.hdr.nexus; 667 #if 0 668 printf("targ %d, port %d, iid %d, lun %d\n", 669 io->io_hdr.nexus.targ_target.id, 670 io->io_hdr.nexus.targ_port, 671 io->io_hdr.nexus.initid.id, 672 io->io_hdr.nexus.targ_lun); 673 #endif 674 io->scsiio.tag_num = msg_info.scsi.tag_num; 675 io->scsiio.tag_type = msg_info.scsi.tag_type; 676 memcpy(io->scsiio.cdb, msg_info.scsi.cdb, 677 CTL_MAX_CDBLEN); 678 if (softc->ha_mode == CTL_HA_MODE_XFER) { 679 const struct ctl_cmd_entry *entry; 680 681 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 682 io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 683 io->io_hdr.flags |= 684 entry->flags & CTL_FLAG_DATA_MASK; 685 } 686 ctl_enqueue_isc(io); 687 break; 688 689 /* Performed on the Originating SC, XFER mode only */ 690 case CTL_MSG_DATAMOVE: { 691 struct ctl_sg_entry *sgl; 692 int i, j; 693 694 io = msg_info.hdr.original_sc; 695 if (io == NULL) { 696 printf("%s: original_sc == NULL!\n", __func__); 697 /* XXX KDM do something here */ 698 break; 699 } 700 io->io_hdr.msg_type = CTL_MSG_DATAMOVE; 701 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 702 /* 703 * Keep track of this, we need to send it back over 704 * when the datamove is complete. 705 */ 706 io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc; 707 708 if (msg_info.dt.sg_sequence == 0) { 709 /* 710 * XXX KDM we use the preallocated S/G list 711 * here, but we'll need to change this to 712 * dynamic allocation if we need larger S/G 713 * lists. 714 */ 715 if (msg_info.dt.kern_sg_entries > 716 sizeof(io->io_hdr.remote_sglist) / 717 sizeof(io->io_hdr.remote_sglist[0])) { 718 printf("%s: number of S/G entries " 719 "needed %u > allocated num %zd\n", 720 __func__, 721 msg_info.dt.kern_sg_entries, 722 sizeof(io->io_hdr.remote_sglist)/ 723 sizeof(io->io_hdr.remote_sglist[0])); 724 725 /* 726 * XXX KDM send a message back to 727 * the other side to shut down the 728 * DMA. The error will come back 729 * through via the normal channel. 730 */ 731 break; 732 } 733 sgl = io->io_hdr.remote_sglist; 734 memset(sgl, 0, 735 sizeof(io->io_hdr.remote_sglist)); 736 737 io->scsiio.kern_data_ptr = (uint8_t *)sgl; 738 739 io->scsiio.kern_sg_entries = 740 msg_info.dt.kern_sg_entries; 741 io->scsiio.rem_sg_entries = 742 msg_info.dt.kern_sg_entries; 743 io->scsiio.kern_data_len = 744 msg_info.dt.kern_data_len; 745 io->scsiio.kern_total_len = 746 msg_info.dt.kern_total_len; 747 io->scsiio.kern_data_resid = 748 msg_info.dt.kern_data_resid; 749 io->scsiio.kern_rel_offset = 750 msg_info.dt.kern_rel_offset; 751 /* 752 * Clear out per-DMA flags. 753 */ 754 io->io_hdr.flags &= ~CTL_FLAG_RDMA_MASK; 755 /* 756 * Add per-DMA flags that are set for this 757 * particular DMA request. 758 */ 759 io->io_hdr.flags |= msg_info.dt.flags & 760 CTL_FLAG_RDMA_MASK; 761 } else 762 sgl = (struct ctl_sg_entry *) 763 io->scsiio.kern_data_ptr; 764 765 for (i = msg_info.dt.sent_sg_entries, j = 0; 766 i < (msg_info.dt.sent_sg_entries + 767 msg_info.dt.cur_sg_entries); i++, j++) { 768 sgl[i].addr = msg_info.dt.sg_list[j].addr; 769 sgl[i].len = msg_info.dt.sg_list[j].len; 770 771 #if 0 772 printf("%s: L: %p,%d -> %p,%d j=%d, i=%d\n", 773 __func__, 774 msg_info.dt.sg_list[j].addr, 775 msg_info.dt.sg_list[j].len, 776 sgl[i].addr, sgl[i].len, j, i); 777 #endif 778 } 779 #if 0 780 memcpy(&sgl[msg_info.dt.sent_sg_entries], 781 msg_info.dt.sg_list, 782 sizeof(*sgl) * msg_info.dt.cur_sg_entries); 783 #endif 784 785 /* 786 * If this is the last piece of the I/O, we've got 787 * the full S/G list. Queue processing in the thread. 788 * Otherwise wait for the next piece. 789 */ 790 if (msg_info.dt.sg_last != 0) 791 ctl_enqueue_isc(io); 792 break; 793 } 794 /* Performed on the Serializing (primary) SC, XFER mode only */ 795 case CTL_MSG_DATAMOVE_DONE: { 796 if (msg_info.hdr.serializing_sc == NULL) { 797 printf("%s: serializing_sc == NULL!\n", 798 __func__); 799 /* XXX KDM now what? */ 800 break; 801 } 802 /* 803 * We grab the sense information here in case 804 * there was a failure, so we can return status 805 * back to the initiator. 806 */ 807 io = msg_info.hdr.serializing_sc; 808 io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 809 io->io_hdr.status = msg_info.hdr.status; 810 io->scsiio.scsi_status = msg_info.scsi.scsi_status; 811 io->scsiio.sense_len = msg_info.scsi.sense_len; 812 io->scsiio.sense_residual =msg_info.scsi.sense_residual; 813 io->io_hdr.port_status = msg_info.scsi.fetd_status; 814 io->scsiio.residual = msg_info.scsi.residual; 815 memcpy(&io->scsiio.sense_data,&msg_info.scsi.sense_data, 816 sizeof(io->scsiio.sense_data)); 817 ctl_enqueue_isc(io); 818 break; 819 } 820 821 /* Preformed on Originating SC, SER_ONLY mode */ 822 case CTL_MSG_R2R: 823 io = msg_info.hdr.original_sc; 824 if (io == NULL) { 825 printf("%s: Major Bummer\n", __func__); 826 return; 827 } else { 828 #if 0 829 printf("pOrig %x\n",(int) ctsio); 830 #endif 831 } 832 io->io_hdr.msg_type = CTL_MSG_R2R; 833 io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc; 834 ctl_enqueue_isc(io); 835 break; 836 837 /* 838 * Performed on Serializing(i.e. primary SC) SC in SER_ONLY 839 * mode. 840 * Performed on the Originating (i.e. secondary) SC in XFER 841 * mode 842 */ 843 case CTL_MSG_FINISH_IO: 844 if (softc->ha_mode == CTL_HA_MODE_XFER) 845 ctl_isc_handler_finish_xfer(softc, 846 &msg_info); 847 else 848 ctl_isc_handler_finish_ser_only(softc, 849 &msg_info); 850 break; 851 852 /* Preformed on Originating SC */ 853 case CTL_MSG_BAD_JUJU: 854 io = msg_info.hdr.original_sc; 855 if (io == NULL) { 856 printf("%s: Bad JUJU!, original_sc is NULL!\n", 857 __func__); 858 break; 859 } 860 ctl_copy_sense_data(&msg_info, io); 861 /* 862 * IO should have already been cleaned up on other 863 * SC so clear this flag so we won't send a message 864 * back to finish the IO there. 865 */ 866 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 867 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 868 869 /* io = msg_info.hdr.serializing_sc; */ 870 io->io_hdr.msg_type = CTL_MSG_BAD_JUJU; 871 ctl_enqueue_isc(io); 872 break; 873 874 /* Handle resets sent from the other side */ 875 case CTL_MSG_MANAGE_TASKS: { 876 struct ctl_taskio *taskio; 877 taskio = (struct ctl_taskio *)ctl_alloc_io_nowait( 878 softc->othersc_pool); 879 if (taskio == NULL) { 880 printf("ctl_isc_event_handler: can't allocate " 881 "ctl_io!\n"); 882 /* Bad Juju */ 883 /* should I just call the proper reset func 884 here??? */ 885 goto bailout; 886 } 887 ctl_zero_io((union ctl_io *)taskio); 888 taskio->io_hdr.io_type = CTL_IO_TASK; 889 taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 890 taskio->io_hdr.nexus = msg_info.hdr.nexus; 891 taskio->task_action = msg_info.task.task_action; 892 taskio->tag_num = msg_info.task.tag_num; 893 taskio->tag_type = msg_info.task.tag_type; 894 #ifdef CTL_TIME_IO 895 taskio->io_hdr.start_time = time_uptime; 896 getbintime(&taskio->io_hdr.start_bt); 897 #if 0 898 cs_prof_gettime(&taskio->io_hdr.start_ticks); 899 #endif 900 #endif /* CTL_TIME_IO */ 901 ctl_run_task((union ctl_io *)taskio); 902 break; 903 } 904 /* Persistent Reserve action which needs attention */ 905 case CTL_MSG_PERS_ACTION: 906 presio = (struct ctl_prio *)ctl_alloc_io_nowait( 907 softc->othersc_pool); 908 if (presio == NULL) { 909 printf("ctl_isc_event_handler: can't allocate " 910 "ctl_io!\n"); 911 /* Bad Juju */ 912 /* Need to set busy and send msg back */ 913 goto bailout; 914 } 915 ctl_zero_io((union ctl_io *)presio); 916 presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION; 917 presio->pr_msg = msg_info.pr; 918 ctl_enqueue_isc((union ctl_io *)presio); 919 break; 920 case CTL_MSG_SYNC_FE: 921 rcv_sync_msg = 1; 922 break; 923 default: 924 printf("How did I get here?\n"); 925 } 926 } else if (event == CTL_HA_EVT_MSG_SENT) { 927 if (param != CTL_HA_STATUS_SUCCESS) { 928 printf("Bad status from ctl_ha_msg_send status %d\n", 929 param); 930 } 931 return; 932 } else if (event == CTL_HA_EVT_DISCONNECT) { 933 printf("CTL: Got a disconnect from Isc\n"); 934 return; 935 } else { 936 printf("ctl_isc_event_handler: Unknown event %d\n", event); 937 return; 938 } 939 940 bailout: 941 return; 942 } 943 944 static void 945 ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest) 946 { 947 struct scsi_sense_data *sense; 948 949 sense = &dest->scsiio.sense_data; 950 bcopy(&src->scsi.sense_data, sense, sizeof(*sense)); 951 dest->scsiio.scsi_status = src->scsi.scsi_status; 952 dest->scsiio.sense_len = src->scsi.sense_len; 953 dest->io_hdr.status = src->hdr.status; 954 } 955 #endif 956 957 static void 958 ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 959 { 960 ctl_ua_type *pu; 961 962 mtx_assert(&lun->lun_lock, MA_OWNED); 963 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 964 if (pu == NULL) 965 return; 966 pu[initidx % CTL_MAX_INIT_PER_PORT] |= ua; 967 } 968 969 static void 970 ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 971 { 972 int i, j; 973 974 mtx_assert(&lun->lun_lock, MA_OWNED); 975 for (i = 0; i < CTL_MAX_PORTS; i++) { 976 if (lun->pending_ua[i] == NULL) 977 continue; 978 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 979 if (i * CTL_MAX_INIT_PER_PORT + j == except) 980 continue; 981 lun->pending_ua[i][j] |= ua; 982 } 983 } 984 } 985 986 static void 987 ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 988 { 989 ctl_ua_type *pu; 990 991 mtx_assert(&lun->lun_lock, MA_OWNED); 992 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 993 if (pu == NULL) 994 return; 995 pu[initidx % CTL_MAX_INIT_PER_PORT] &= ~ua; 996 } 997 998 static void 999 ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 1000 { 1001 int i, j; 1002 1003 mtx_assert(&lun->lun_lock, MA_OWNED); 1004 for (i = 0; i < CTL_MAX_PORTS; i++) { 1005 if (lun->pending_ua[i] == NULL) 1006 continue; 1007 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 1008 if (i * CTL_MAX_INIT_PER_PORT + j == except) 1009 continue; 1010 lun->pending_ua[i][j] &= ~ua; 1011 } 1012 } 1013 } 1014 1015 static int 1016 ctl_ha_state_sysctl(SYSCTL_HANDLER_ARGS) 1017 { 1018 struct ctl_softc *softc = (struct ctl_softc *)arg1; 1019 struct ctl_lun *lun; 1020 int error, value; 1021 1022 if (softc->flags & CTL_FLAG_ACTIVE_SHELF) 1023 value = 0; 1024 else 1025 value = 1; 1026 1027 error = sysctl_handle_int(oidp, &value, 0, req); 1028 if ((error != 0) || (req->newptr == NULL)) 1029 return (error); 1030 1031 mtx_lock(&softc->ctl_lock); 1032 if (value == 0) 1033 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1034 else 1035 softc->flags &= ~CTL_FLAG_ACTIVE_SHELF; 1036 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1037 mtx_lock(&lun->lun_lock); 1038 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 1039 mtx_unlock(&lun->lun_lock); 1040 } 1041 mtx_unlock(&softc->ctl_lock); 1042 return (0); 1043 } 1044 1045 static int 1046 ctl_init(void) 1047 { 1048 struct ctl_softc *softc; 1049 void *other_pool; 1050 int i, error, retval; 1051 //int isc_retval; 1052 1053 retval = 0; 1054 ctl_pause_rtr = 0; 1055 rcv_sync_msg = 0; 1056 1057 control_softc = malloc(sizeof(*control_softc), M_DEVBUF, 1058 M_WAITOK | M_ZERO); 1059 softc = control_softc; 1060 1061 softc->dev = make_dev(&ctl_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, 1062 "cam/ctl"); 1063 1064 softc->dev->si_drv1 = softc; 1065 1066 /* 1067 * By default, return a "bad LUN" peripheral qualifier for unknown 1068 * LUNs. The user can override this default using the tunable or 1069 * sysctl. See the comment in ctl_inquiry_std() for more details. 1070 */ 1071 softc->inquiry_pq_no_lun = 1; 1072 TUNABLE_INT_FETCH("kern.cam.ctl.inquiry_pq_no_lun", 1073 &softc->inquiry_pq_no_lun); 1074 sysctl_ctx_init(&softc->sysctl_ctx); 1075 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, 1076 SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl", 1077 CTLFLAG_RD, 0, "CAM Target Layer"); 1078 1079 if (softc->sysctl_tree == NULL) { 1080 printf("%s: unable to allocate sysctl tree\n", __func__); 1081 destroy_dev(softc->dev); 1082 free(control_softc, M_DEVBUF); 1083 control_softc = NULL; 1084 return (ENOMEM); 1085 } 1086 1087 SYSCTL_ADD_INT(&softc->sysctl_ctx, 1088 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, 1089 "inquiry_pq_no_lun", CTLFLAG_RW, 1090 &softc->inquiry_pq_no_lun, 0, 1091 "Report no lun possible for invalid LUNs"); 1092 1093 mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF); 1094 softc->io_zone = uma_zcreate("CTL IO", sizeof(union ctl_io), 1095 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 1096 softc->open_count = 0; 1097 1098 /* 1099 * Default to actually sending a SYNCHRONIZE CACHE command down to 1100 * the drive. 1101 */ 1102 softc->flags = CTL_FLAG_REAL_SYNC; 1103 1104 /* 1105 * In Copan's HA scheme, the "master" and "slave" roles are 1106 * figured out through the slot the controller is in. Although it 1107 * is an active/active system, someone has to be in charge. 1108 */ 1109 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1110 OID_AUTO, "ha_id", CTLFLAG_RDTUN, &softc->ha_id, 0, 1111 "HA head ID (0 - no HA)"); 1112 if (softc->ha_id == 0) { 1113 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1114 softc->is_single = 1; 1115 softc->port_offset = 0; 1116 } else 1117 softc->port_offset = (softc->ha_id - 1) * CTL_MAX_PORTS; 1118 softc->persis_offset = softc->port_offset * CTL_MAX_INIT_PER_PORT; 1119 1120 STAILQ_INIT(&softc->lun_list); 1121 STAILQ_INIT(&softc->pending_lun_queue); 1122 STAILQ_INIT(&softc->fe_list); 1123 STAILQ_INIT(&softc->port_list); 1124 STAILQ_INIT(&softc->be_list); 1125 ctl_tpc_init(softc); 1126 1127 if (ctl_pool_create(softc, "othersc", CTL_POOL_ENTRIES_OTHER_SC, 1128 &other_pool) != 0) 1129 { 1130 printf("ctl: can't allocate %d entry other SC pool, " 1131 "exiting\n", CTL_POOL_ENTRIES_OTHER_SC); 1132 return (ENOMEM); 1133 } 1134 softc->othersc_pool = other_pool; 1135 1136 if (worker_threads <= 0) 1137 worker_threads = max(1, mp_ncpus / 4); 1138 if (worker_threads > CTL_MAX_THREADS) 1139 worker_threads = CTL_MAX_THREADS; 1140 1141 for (i = 0; i < worker_threads; i++) { 1142 struct ctl_thread *thr = &softc->threads[i]; 1143 1144 mtx_init(&thr->queue_lock, "CTL queue mutex", NULL, MTX_DEF); 1145 thr->ctl_softc = softc; 1146 STAILQ_INIT(&thr->incoming_queue); 1147 STAILQ_INIT(&thr->rtr_queue); 1148 STAILQ_INIT(&thr->done_queue); 1149 STAILQ_INIT(&thr->isc_queue); 1150 1151 error = kproc_kthread_add(ctl_work_thread, thr, 1152 &softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i); 1153 if (error != 0) { 1154 printf("error creating CTL work thread!\n"); 1155 ctl_pool_free(other_pool); 1156 return (error); 1157 } 1158 } 1159 error = kproc_kthread_add(ctl_lun_thread, softc, 1160 &softc->ctl_proc, NULL, 0, 0, "ctl", "lun"); 1161 if (error != 0) { 1162 printf("error creating CTL lun thread!\n"); 1163 ctl_pool_free(other_pool); 1164 return (error); 1165 } 1166 error = kproc_kthread_add(ctl_thresh_thread, softc, 1167 &softc->ctl_proc, NULL, 0, 0, "ctl", "thresh"); 1168 if (error != 0) { 1169 printf("error creating CTL threshold thread!\n"); 1170 ctl_pool_free(other_pool); 1171 return (error); 1172 } 1173 1174 SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree), 1175 OID_AUTO, "ha_state", CTLTYPE_INT | CTLFLAG_RWTUN, 1176 softc, 0, ctl_ha_state_sysctl, "I", "HA state for this head"); 1177 1178 #ifdef CTL_IO_DELAY 1179 if (sizeof(struct callout) > CTL_TIMER_BYTES) { 1180 printf("sizeof(struct callout) %zd > CTL_TIMER_BYTES %zd\n", 1181 sizeof(struct callout), CTL_TIMER_BYTES); 1182 return (EINVAL); 1183 } 1184 #endif /* CTL_IO_DELAY */ 1185 1186 return (0); 1187 } 1188 1189 void 1190 ctl_shutdown(void) 1191 { 1192 struct ctl_softc *softc; 1193 struct ctl_lun *lun, *next_lun; 1194 1195 softc = (struct ctl_softc *)control_softc; 1196 1197 mtx_lock(&softc->ctl_lock); 1198 1199 /* 1200 * Free up each LUN. 1201 */ 1202 for (lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; lun = next_lun){ 1203 next_lun = STAILQ_NEXT(lun, links); 1204 ctl_free_lun(lun); 1205 } 1206 1207 mtx_unlock(&softc->ctl_lock); 1208 1209 #if 0 1210 ctl_shutdown_thread(softc->work_thread); 1211 mtx_destroy(&softc->queue_lock); 1212 #endif 1213 1214 ctl_tpc_shutdown(softc); 1215 uma_zdestroy(softc->io_zone); 1216 mtx_destroy(&softc->ctl_lock); 1217 1218 destroy_dev(softc->dev); 1219 1220 sysctl_ctx_free(&softc->sysctl_ctx); 1221 1222 free(control_softc, M_DEVBUF); 1223 control_softc = NULL; 1224 } 1225 1226 static int 1227 ctl_module_event_handler(module_t mod, int what, void *arg) 1228 { 1229 1230 switch (what) { 1231 case MOD_LOAD: 1232 return (ctl_init()); 1233 case MOD_UNLOAD: 1234 return (EBUSY); 1235 default: 1236 return (EOPNOTSUPP); 1237 } 1238 } 1239 1240 /* 1241 * XXX KDM should we do some access checks here? Bump a reference count to 1242 * prevent a CTL module from being unloaded while someone has it open? 1243 */ 1244 static int 1245 ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td) 1246 { 1247 return (0); 1248 } 1249 1250 static int 1251 ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td) 1252 { 1253 return (0); 1254 } 1255 1256 int 1257 ctl_port_enable(ctl_port_type port_type) 1258 { 1259 struct ctl_softc *softc = control_softc; 1260 struct ctl_port *port; 1261 1262 if (softc->is_single == 0) { 1263 union ctl_ha_msg msg_info; 1264 int isc_retval; 1265 1266 #if 0 1267 printf("%s: HA mode, synchronizing frontend enable\n", 1268 __func__); 1269 #endif 1270 msg_info.hdr.msg_type = CTL_MSG_SYNC_FE; 1271 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1272 sizeof(msg_info), 1 )) > CTL_HA_STATUS_SUCCESS) { 1273 printf("Sync msg send error retval %d\n", isc_retval); 1274 } 1275 if (!rcv_sync_msg) { 1276 isc_retval=ctl_ha_msg_recv(CTL_HA_CHAN_CTL, &msg_info, 1277 sizeof(msg_info), 1); 1278 } 1279 #if 0 1280 printf("CTL:Frontend Enable\n"); 1281 } else { 1282 printf("%s: single mode, skipping frontend synchronization\n", 1283 __func__); 1284 #endif 1285 } 1286 1287 STAILQ_FOREACH(port, &softc->port_list, links) { 1288 if (port_type & port->port_type) 1289 { 1290 #if 0 1291 printf("port %d\n", port->targ_port); 1292 #endif 1293 ctl_port_online(port); 1294 } 1295 } 1296 1297 return (0); 1298 } 1299 1300 int 1301 ctl_port_disable(ctl_port_type port_type) 1302 { 1303 struct ctl_softc *softc; 1304 struct ctl_port *port; 1305 1306 softc = control_softc; 1307 1308 STAILQ_FOREACH(port, &softc->port_list, links) { 1309 if (port_type & port->port_type) 1310 ctl_port_offline(port); 1311 } 1312 1313 return (0); 1314 } 1315 1316 /* 1317 * Returns 0 for success, 1 for failure. 1318 * Currently the only failure mode is if there aren't enough entries 1319 * allocated. So, in case of a failure, look at num_entries_dropped, 1320 * reallocate and try again. 1321 */ 1322 int 1323 ctl_port_list(struct ctl_port_entry *entries, int num_entries_alloced, 1324 int *num_entries_filled, int *num_entries_dropped, 1325 ctl_port_type port_type, int no_virtual) 1326 { 1327 struct ctl_softc *softc; 1328 struct ctl_port *port; 1329 int entries_dropped, entries_filled; 1330 int retval; 1331 int i; 1332 1333 softc = control_softc; 1334 1335 retval = 0; 1336 entries_filled = 0; 1337 entries_dropped = 0; 1338 1339 i = 0; 1340 mtx_lock(&softc->ctl_lock); 1341 STAILQ_FOREACH(port, &softc->port_list, links) { 1342 struct ctl_port_entry *entry; 1343 1344 if ((port->port_type & port_type) == 0) 1345 continue; 1346 1347 if ((no_virtual != 0) 1348 && (port->virtual_port != 0)) 1349 continue; 1350 1351 if (entries_filled >= num_entries_alloced) { 1352 entries_dropped++; 1353 continue; 1354 } 1355 entry = &entries[i]; 1356 1357 entry->port_type = port->port_type; 1358 strlcpy(entry->port_name, port->port_name, 1359 sizeof(entry->port_name)); 1360 entry->physical_port = port->physical_port; 1361 entry->virtual_port = port->virtual_port; 1362 entry->wwnn = port->wwnn; 1363 entry->wwpn = port->wwpn; 1364 1365 i++; 1366 entries_filled++; 1367 } 1368 1369 mtx_unlock(&softc->ctl_lock); 1370 1371 if (entries_dropped > 0) 1372 retval = 1; 1373 1374 *num_entries_dropped = entries_dropped; 1375 *num_entries_filled = entries_filled; 1376 1377 return (retval); 1378 } 1379 1380 /* 1381 * Remove an initiator by port number and initiator ID. 1382 * Returns 0 for success, -1 for failure. 1383 */ 1384 int 1385 ctl_remove_initiator(struct ctl_port *port, int iid) 1386 { 1387 struct ctl_softc *softc = control_softc; 1388 1389 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 1390 1391 if (iid > CTL_MAX_INIT_PER_PORT) { 1392 printf("%s: initiator ID %u > maximun %u!\n", 1393 __func__, iid, CTL_MAX_INIT_PER_PORT); 1394 return (-1); 1395 } 1396 1397 mtx_lock(&softc->ctl_lock); 1398 port->wwpn_iid[iid].in_use--; 1399 port->wwpn_iid[iid].last_use = time_uptime; 1400 mtx_unlock(&softc->ctl_lock); 1401 1402 return (0); 1403 } 1404 1405 /* 1406 * Add an initiator to the initiator map. 1407 * Returns iid for success, < 0 for failure. 1408 */ 1409 int 1410 ctl_add_initiator(struct ctl_port *port, int iid, uint64_t wwpn, char *name) 1411 { 1412 struct ctl_softc *softc = control_softc; 1413 time_t best_time; 1414 int i, best; 1415 1416 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 1417 1418 if (iid >= CTL_MAX_INIT_PER_PORT) { 1419 printf("%s: WWPN %#jx initiator ID %u > maximum %u!\n", 1420 __func__, wwpn, iid, CTL_MAX_INIT_PER_PORT); 1421 free(name, M_CTL); 1422 return (-1); 1423 } 1424 1425 mtx_lock(&softc->ctl_lock); 1426 1427 if (iid < 0 && (wwpn != 0 || name != NULL)) { 1428 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1429 if (wwpn != 0 && wwpn == port->wwpn_iid[i].wwpn) { 1430 iid = i; 1431 break; 1432 } 1433 if (name != NULL && port->wwpn_iid[i].name != NULL && 1434 strcmp(name, port->wwpn_iid[i].name) == 0) { 1435 iid = i; 1436 break; 1437 } 1438 } 1439 } 1440 1441 if (iid < 0) { 1442 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1443 if (port->wwpn_iid[i].in_use == 0 && 1444 port->wwpn_iid[i].wwpn == 0 && 1445 port->wwpn_iid[i].name == NULL) { 1446 iid = i; 1447 break; 1448 } 1449 } 1450 } 1451 1452 if (iid < 0) { 1453 best = -1; 1454 best_time = INT32_MAX; 1455 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1456 if (port->wwpn_iid[i].in_use == 0) { 1457 if (port->wwpn_iid[i].last_use < best_time) { 1458 best = i; 1459 best_time = port->wwpn_iid[i].last_use; 1460 } 1461 } 1462 } 1463 iid = best; 1464 } 1465 1466 if (iid < 0) { 1467 mtx_unlock(&softc->ctl_lock); 1468 free(name, M_CTL); 1469 return (-2); 1470 } 1471 1472 if (port->wwpn_iid[iid].in_use > 0 && (wwpn != 0 || name != NULL)) { 1473 /* 1474 * This is not an error yet. 1475 */ 1476 if (wwpn != 0 && wwpn == port->wwpn_iid[iid].wwpn) { 1477 #if 0 1478 printf("%s: port %d iid %u WWPN %#jx arrived" 1479 " again\n", __func__, port->targ_port, 1480 iid, (uintmax_t)wwpn); 1481 #endif 1482 goto take; 1483 } 1484 if (name != NULL && port->wwpn_iid[iid].name != NULL && 1485 strcmp(name, port->wwpn_iid[iid].name) == 0) { 1486 #if 0 1487 printf("%s: port %d iid %u name '%s' arrived" 1488 " again\n", __func__, port->targ_port, 1489 iid, name); 1490 #endif 1491 goto take; 1492 } 1493 1494 /* 1495 * This is an error, but what do we do about it? The 1496 * driver is telling us we have a new WWPN for this 1497 * initiator ID, so we pretty much need to use it. 1498 */ 1499 printf("%s: port %d iid %u WWPN %#jx '%s' arrived," 1500 " but WWPN %#jx '%s' is still at that address\n", 1501 __func__, port->targ_port, iid, wwpn, name, 1502 (uintmax_t)port->wwpn_iid[iid].wwpn, 1503 port->wwpn_iid[iid].name); 1504 1505 /* 1506 * XXX KDM clear have_ca and ua_pending on each LUN for 1507 * this initiator. 1508 */ 1509 } 1510 take: 1511 free(port->wwpn_iid[iid].name, M_CTL); 1512 port->wwpn_iid[iid].name = name; 1513 port->wwpn_iid[iid].wwpn = wwpn; 1514 port->wwpn_iid[iid].in_use++; 1515 mtx_unlock(&softc->ctl_lock); 1516 1517 return (iid); 1518 } 1519 1520 static int 1521 ctl_create_iid(struct ctl_port *port, int iid, uint8_t *buf) 1522 { 1523 int len; 1524 1525 switch (port->port_type) { 1526 case CTL_PORT_FC: 1527 { 1528 struct scsi_transportid_fcp *id = 1529 (struct scsi_transportid_fcp *)buf; 1530 if (port->wwpn_iid[iid].wwpn == 0) 1531 return (0); 1532 memset(id, 0, sizeof(*id)); 1533 id->format_protocol = SCSI_PROTO_FC; 1534 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->n_port_name); 1535 return (sizeof(*id)); 1536 } 1537 case CTL_PORT_ISCSI: 1538 { 1539 struct scsi_transportid_iscsi_port *id = 1540 (struct scsi_transportid_iscsi_port *)buf; 1541 if (port->wwpn_iid[iid].name == NULL) 1542 return (0); 1543 memset(id, 0, 256); 1544 id->format_protocol = SCSI_TRN_ISCSI_FORMAT_PORT | 1545 SCSI_PROTO_ISCSI; 1546 len = strlcpy(id->iscsi_name, port->wwpn_iid[iid].name, 252) + 1; 1547 len = roundup2(min(len, 252), 4); 1548 scsi_ulto2b(len, id->additional_length); 1549 return (sizeof(*id) + len); 1550 } 1551 case CTL_PORT_SAS: 1552 { 1553 struct scsi_transportid_sas *id = 1554 (struct scsi_transportid_sas *)buf; 1555 if (port->wwpn_iid[iid].wwpn == 0) 1556 return (0); 1557 memset(id, 0, sizeof(*id)); 1558 id->format_protocol = SCSI_PROTO_SAS; 1559 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->sas_address); 1560 return (sizeof(*id)); 1561 } 1562 default: 1563 { 1564 struct scsi_transportid_spi *id = 1565 (struct scsi_transportid_spi *)buf; 1566 memset(id, 0, sizeof(*id)); 1567 id->format_protocol = SCSI_PROTO_SPI; 1568 scsi_ulto2b(iid, id->scsi_addr); 1569 scsi_ulto2b(port->targ_port, id->rel_trgt_port_id); 1570 return (sizeof(*id)); 1571 } 1572 } 1573 } 1574 1575 /* 1576 * Serialize a command that went down the "wrong" side, and so was sent to 1577 * this controller for execution. The logic is a little different than the 1578 * standard case in ctl_scsiio_precheck(). Errors in this case need to get 1579 * sent back to the other side, but in the success case, we execute the 1580 * command on this side (XFER mode) or tell the other side to execute it 1581 * (SER_ONLY mode). 1582 */ 1583 static int 1584 ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio) 1585 { 1586 struct ctl_softc *softc; 1587 union ctl_ha_msg msg_info; 1588 struct ctl_lun *lun; 1589 int retval = 0; 1590 uint32_t targ_lun; 1591 1592 softc = control_softc; 1593 1594 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 1595 lun = softc->ctl_luns[targ_lun]; 1596 if (lun==NULL) 1597 { 1598 /* 1599 * Why isn't LUN defined? The other side wouldn't 1600 * send a cmd if the LUN is undefined. 1601 */ 1602 printf("%s: Bad JUJU!, LUN is NULL!\n", __func__); 1603 1604 /* "Logical unit not supported" */ 1605 ctl_set_sense_data(&msg_info.scsi.sense_data, 1606 lun, 1607 /*sense_format*/SSD_TYPE_NONE, 1608 /*current_error*/ 1, 1609 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1610 /*asc*/ 0x25, 1611 /*ascq*/ 0x00, 1612 SSD_ELEM_NONE); 1613 1614 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1615 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1616 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1617 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1618 msg_info.hdr.serializing_sc = NULL; 1619 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1620 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1621 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1622 } 1623 return(1); 1624 1625 } 1626 1627 mtx_lock(&lun->lun_lock); 1628 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1629 1630 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 1631 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq, 1632 ooa_links))) { 1633 case CTL_ACTION_BLOCK: 1634 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 1635 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 1636 blocked_links); 1637 break; 1638 case CTL_ACTION_PASS: 1639 case CTL_ACTION_SKIP: 1640 if (softc->ha_mode == CTL_HA_MODE_XFER) { 1641 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 1642 ctl_enqueue_rtr((union ctl_io *)ctsio); 1643 } else { 1644 1645 /* send msg back to other side */ 1646 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1647 msg_info.hdr.serializing_sc = (union ctl_io *)ctsio; 1648 msg_info.hdr.msg_type = CTL_MSG_R2R; 1649 #if 0 1650 printf("2. pOrig %x\n", (int)msg_info.hdr.original_sc); 1651 #endif 1652 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1653 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1654 } 1655 } 1656 break; 1657 case CTL_ACTION_OVERLAP: 1658 /* OVERLAPPED COMMANDS ATTEMPTED */ 1659 ctl_set_sense_data(&msg_info.scsi.sense_data, 1660 lun, 1661 /*sense_format*/SSD_TYPE_NONE, 1662 /*current_error*/ 1, 1663 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1664 /*asc*/ 0x4E, 1665 /*ascq*/ 0x00, 1666 SSD_ELEM_NONE); 1667 1668 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1669 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1670 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1671 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1672 msg_info.hdr.serializing_sc = NULL; 1673 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1674 #if 0 1675 printf("BAD JUJU:Major Bummer Overlap\n"); 1676 #endif 1677 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1678 retval = 1; 1679 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1680 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1681 } 1682 break; 1683 case CTL_ACTION_OVERLAP_TAG: 1684 /* TAGGED OVERLAPPED COMMANDS (NN = QUEUE TAG) */ 1685 ctl_set_sense_data(&msg_info.scsi.sense_data, 1686 lun, 1687 /*sense_format*/SSD_TYPE_NONE, 1688 /*current_error*/ 1, 1689 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1690 /*asc*/ 0x4D, 1691 /*ascq*/ ctsio->tag_num & 0xff, 1692 SSD_ELEM_NONE); 1693 1694 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1695 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1696 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1697 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1698 msg_info.hdr.serializing_sc = NULL; 1699 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1700 #if 0 1701 printf("BAD JUJU:Major Bummer Overlap Tag\n"); 1702 #endif 1703 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1704 retval = 1; 1705 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1706 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1707 } 1708 break; 1709 case CTL_ACTION_ERROR: 1710 default: 1711 /* "Internal target failure" */ 1712 ctl_set_sense_data(&msg_info.scsi.sense_data, 1713 lun, 1714 /*sense_format*/SSD_TYPE_NONE, 1715 /*current_error*/ 1, 1716 /*sense_key*/ SSD_KEY_HARDWARE_ERROR, 1717 /*asc*/ 0x44, 1718 /*ascq*/ 0x00, 1719 SSD_ELEM_NONE); 1720 1721 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1722 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1723 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1724 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1725 msg_info.hdr.serializing_sc = NULL; 1726 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1727 #if 0 1728 printf("BAD JUJU:Major Bummer HW Error\n"); 1729 #endif 1730 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1731 retval = 1; 1732 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1733 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1734 } 1735 break; 1736 } 1737 mtx_unlock(&lun->lun_lock); 1738 return (retval); 1739 } 1740 1741 /* 1742 * Returns 0 for success, errno for failure. 1743 */ 1744 static int 1745 ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 1746 struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries) 1747 { 1748 union ctl_io *io; 1749 int retval; 1750 1751 retval = 0; 1752 1753 mtx_lock(&lun->lun_lock); 1754 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); (io != NULL); 1755 (*cur_fill_num)++, io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, 1756 ooa_links)) { 1757 struct ctl_ooa_entry *entry; 1758 1759 /* 1760 * If we've got more than we can fit, just count the 1761 * remaining entries. 1762 */ 1763 if (*cur_fill_num >= ooa_hdr->alloc_num) 1764 continue; 1765 1766 entry = &kern_entries[*cur_fill_num]; 1767 1768 entry->tag_num = io->scsiio.tag_num; 1769 entry->lun_num = lun->lun; 1770 #ifdef CTL_TIME_IO 1771 entry->start_bt = io->io_hdr.start_bt; 1772 #endif 1773 bcopy(io->scsiio.cdb, entry->cdb, io->scsiio.cdb_len); 1774 entry->cdb_len = io->scsiio.cdb_len; 1775 if (io->io_hdr.flags & CTL_FLAG_BLOCKED) 1776 entry->cmd_flags |= CTL_OOACMD_FLAG_BLOCKED; 1777 1778 if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) 1779 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA; 1780 1781 if (io->io_hdr.flags & CTL_FLAG_ABORT) 1782 entry->cmd_flags |= CTL_OOACMD_FLAG_ABORT; 1783 1784 if (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR) 1785 entry->cmd_flags |= CTL_OOACMD_FLAG_RTR; 1786 1787 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) 1788 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED; 1789 } 1790 mtx_unlock(&lun->lun_lock); 1791 1792 return (retval); 1793 } 1794 1795 static void * 1796 ctl_copyin_alloc(void *user_addr, int len, char *error_str, 1797 size_t error_str_len) 1798 { 1799 void *kptr; 1800 1801 kptr = malloc(len, M_CTL, M_WAITOK | M_ZERO); 1802 1803 if (copyin(user_addr, kptr, len) != 0) { 1804 snprintf(error_str, error_str_len, "Error copying %d bytes " 1805 "from user address %p to kernel address %p", len, 1806 user_addr, kptr); 1807 free(kptr, M_CTL); 1808 return (NULL); 1809 } 1810 1811 return (kptr); 1812 } 1813 1814 static void 1815 ctl_free_args(int num_args, struct ctl_be_arg *args) 1816 { 1817 int i; 1818 1819 if (args == NULL) 1820 return; 1821 1822 for (i = 0; i < num_args; i++) { 1823 free(args[i].kname, M_CTL); 1824 free(args[i].kvalue, M_CTL); 1825 } 1826 1827 free(args, M_CTL); 1828 } 1829 1830 static struct ctl_be_arg * 1831 ctl_copyin_args(int num_args, struct ctl_be_arg *uargs, 1832 char *error_str, size_t error_str_len) 1833 { 1834 struct ctl_be_arg *args; 1835 int i; 1836 1837 args = ctl_copyin_alloc(uargs, num_args * sizeof(*args), 1838 error_str, error_str_len); 1839 1840 if (args == NULL) 1841 goto bailout; 1842 1843 for (i = 0; i < num_args; i++) { 1844 args[i].kname = NULL; 1845 args[i].kvalue = NULL; 1846 } 1847 1848 for (i = 0; i < num_args; i++) { 1849 uint8_t *tmpptr; 1850 1851 args[i].kname = ctl_copyin_alloc(args[i].name, 1852 args[i].namelen, error_str, error_str_len); 1853 if (args[i].kname == NULL) 1854 goto bailout; 1855 1856 if (args[i].kname[args[i].namelen - 1] != '\0') { 1857 snprintf(error_str, error_str_len, "Argument %d " 1858 "name is not NUL-terminated", i); 1859 goto bailout; 1860 } 1861 1862 if (args[i].flags & CTL_BEARG_RD) { 1863 tmpptr = ctl_copyin_alloc(args[i].value, 1864 args[i].vallen, error_str, error_str_len); 1865 if (tmpptr == NULL) 1866 goto bailout; 1867 if ((args[i].flags & CTL_BEARG_ASCII) 1868 && (tmpptr[args[i].vallen - 1] != '\0')) { 1869 snprintf(error_str, error_str_len, "Argument " 1870 "%d value is not NUL-terminated", i); 1871 goto bailout; 1872 } 1873 args[i].kvalue = tmpptr; 1874 } else { 1875 args[i].kvalue = malloc(args[i].vallen, 1876 M_CTL, M_WAITOK | M_ZERO); 1877 } 1878 } 1879 1880 return (args); 1881 bailout: 1882 1883 ctl_free_args(num_args, args); 1884 1885 return (NULL); 1886 } 1887 1888 static void 1889 ctl_copyout_args(int num_args, struct ctl_be_arg *args) 1890 { 1891 int i; 1892 1893 for (i = 0; i < num_args; i++) { 1894 if (args[i].flags & CTL_BEARG_WR) 1895 copyout(args[i].kvalue, args[i].value, args[i].vallen); 1896 } 1897 } 1898 1899 /* 1900 * Escape characters that are illegal or not recommended in XML. 1901 */ 1902 int 1903 ctl_sbuf_printf_esc(struct sbuf *sb, char *str, int size) 1904 { 1905 char *end = str + size; 1906 int retval; 1907 1908 retval = 0; 1909 1910 for (; *str && str < end; str++) { 1911 switch (*str) { 1912 case '&': 1913 retval = sbuf_printf(sb, "&"); 1914 break; 1915 case '>': 1916 retval = sbuf_printf(sb, ">"); 1917 break; 1918 case '<': 1919 retval = sbuf_printf(sb, "<"); 1920 break; 1921 default: 1922 retval = sbuf_putc(sb, *str); 1923 break; 1924 } 1925 1926 if (retval != 0) 1927 break; 1928 1929 } 1930 1931 return (retval); 1932 } 1933 1934 static void 1935 ctl_id_sbuf(struct ctl_devid *id, struct sbuf *sb) 1936 { 1937 struct scsi_vpd_id_descriptor *desc; 1938 int i; 1939 1940 if (id == NULL || id->len < 4) 1941 return; 1942 desc = (struct scsi_vpd_id_descriptor *)id->data; 1943 switch (desc->id_type & SVPD_ID_TYPE_MASK) { 1944 case SVPD_ID_TYPE_T10: 1945 sbuf_printf(sb, "t10."); 1946 break; 1947 case SVPD_ID_TYPE_EUI64: 1948 sbuf_printf(sb, "eui."); 1949 break; 1950 case SVPD_ID_TYPE_NAA: 1951 sbuf_printf(sb, "naa."); 1952 break; 1953 case SVPD_ID_TYPE_SCSI_NAME: 1954 break; 1955 } 1956 switch (desc->proto_codeset & SVPD_ID_CODESET_MASK) { 1957 case SVPD_ID_CODESET_BINARY: 1958 for (i = 0; i < desc->length; i++) 1959 sbuf_printf(sb, "%02x", desc->identifier[i]); 1960 break; 1961 case SVPD_ID_CODESET_ASCII: 1962 sbuf_printf(sb, "%.*s", (int)desc->length, 1963 (char *)desc->identifier); 1964 break; 1965 case SVPD_ID_CODESET_UTF8: 1966 sbuf_printf(sb, "%s", (char *)desc->identifier); 1967 break; 1968 } 1969 } 1970 1971 static int 1972 ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 1973 struct thread *td) 1974 { 1975 struct ctl_softc *softc; 1976 int retval; 1977 1978 softc = control_softc; 1979 1980 retval = 0; 1981 1982 switch (cmd) { 1983 case CTL_IO: 1984 retval = ctl_ioctl_io(dev, cmd, addr, flag, td); 1985 break; 1986 case CTL_ENABLE_PORT: 1987 case CTL_DISABLE_PORT: 1988 case CTL_SET_PORT_WWNS: { 1989 struct ctl_port *port; 1990 struct ctl_port_entry *entry; 1991 1992 entry = (struct ctl_port_entry *)addr; 1993 1994 mtx_lock(&softc->ctl_lock); 1995 STAILQ_FOREACH(port, &softc->port_list, links) { 1996 int action, done; 1997 1998 action = 0; 1999 done = 0; 2000 2001 if ((entry->port_type == CTL_PORT_NONE) 2002 && (entry->targ_port == port->targ_port)) { 2003 /* 2004 * If the user only wants to enable or 2005 * disable or set WWNs on a specific port, 2006 * do the operation and we're done. 2007 */ 2008 action = 1; 2009 done = 1; 2010 } else if (entry->port_type & port->port_type) { 2011 /* 2012 * Compare the user's type mask with the 2013 * particular frontend type to see if we 2014 * have a match. 2015 */ 2016 action = 1; 2017 done = 0; 2018 2019 /* 2020 * Make sure the user isn't trying to set 2021 * WWNs on multiple ports at the same time. 2022 */ 2023 if (cmd == CTL_SET_PORT_WWNS) { 2024 printf("%s: Can't set WWNs on " 2025 "multiple ports\n", __func__); 2026 retval = EINVAL; 2027 break; 2028 } 2029 } 2030 if (action != 0) { 2031 /* 2032 * XXX KDM we have to drop the lock here, 2033 * because the online/offline operations 2034 * can potentially block. We need to 2035 * reference count the frontends so they 2036 * can't go away, 2037 */ 2038 mtx_unlock(&softc->ctl_lock); 2039 2040 if (cmd == CTL_ENABLE_PORT) { 2041 ctl_port_online(port); 2042 } else if (cmd == CTL_DISABLE_PORT) { 2043 ctl_port_offline(port); 2044 } 2045 2046 mtx_lock(&softc->ctl_lock); 2047 2048 if (cmd == CTL_SET_PORT_WWNS) 2049 ctl_port_set_wwns(port, 2050 (entry->flags & CTL_PORT_WWNN_VALID) ? 2051 1 : 0, entry->wwnn, 2052 (entry->flags & CTL_PORT_WWPN_VALID) ? 2053 1 : 0, entry->wwpn); 2054 } 2055 if (done != 0) 2056 break; 2057 } 2058 mtx_unlock(&softc->ctl_lock); 2059 break; 2060 } 2061 case CTL_GET_PORT_LIST: { 2062 struct ctl_port *port; 2063 struct ctl_port_list *list; 2064 int i; 2065 2066 list = (struct ctl_port_list *)addr; 2067 2068 if (list->alloc_len != (list->alloc_num * 2069 sizeof(struct ctl_port_entry))) { 2070 printf("%s: CTL_GET_PORT_LIST: alloc_len %u != " 2071 "alloc_num %u * sizeof(struct ctl_port_entry) " 2072 "%zu\n", __func__, list->alloc_len, 2073 list->alloc_num, sizeof(struct ctl_port_entry)); 2074 retval = EINVAL; 2075 break; 2076 } 2077 list->fill_len = 0; 2078 list->fill_num = 0; 2079 list->dropped_num = 0; 2080 i = 0; 2081 mtx_lock(&softc->ctl_lock); 2082 STAILQ_FOREACH(port, &softc->port_list, links) { 2083 struct ctl_port_entry entry, *list_entry; 2084 2085 if (list->fill_num >= list->alloc_num) { 2086 list->dropped_num++; 2087 continue; 2088 } 2089 2090 entry.port_type = port->port_type; 2091 strlcpy(entry.port_name, port->port_name, 2092 sizeof(entry.port_name)); 2093 entry.targ_port = port->targ_port; 2094 entry.physical_port = port->physical_port; 2095 entry.virtual_port = port->virtual_port; 2096 entry.wwnn = port->wwnn; 2097 entry.wwpn = port->wwpn; 2098 if (port->status & CTL_PORT_STATUS_ONLINE) 2099 entry.online = 1; 2100 else 2101 entry.online = 0; 2102 2103 list_entry = &list->entries[i]; 2104 2105 retval = copyout(&entry, list_entry, sizeof(entry)); 2106 if (retval != 0) { 2107 printf("%s: CTL_GET_PORT_LIST: copyout " 2108 "returned %d\n", __func__, retval); 2109 break; 2110 } 2111 i++; 2112 list->fill_num++; 2113 list->fill_len += sizeof(entry); 2114 } 2115 mtx_unlock(&softc->ctl_lock); 2116 2117 /* 2118 * If this is non-zero, we had a copyout fault, so there's 2119 * probably no point in attempting to set the status inside 2120 * the structure. 2121 */ 2122 if (retval != 0) 2123 break; 2124 2125 if (list->dropped_num > 0) 2126 list->status = CTL_PORT_LIST_NEED_MORE_SPACE; 2127 else 2128 list->status = CTL_PORT_LIST_OK; 2129 break; 2130 } 2131 case CTL_DUMP_OOA: { 2132 struct ctl_lun *lun; 2133 union ctl_io *io; 2134 char printbuf[128]; 2135 struct sbuf sb; 2136 2137 mtx_lock(&softc->ctl_lock); 2138 printf("Dumping OOA queues:\n"); 2139 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2140 mtx_lock(&lun->lun_lock); 2141 for (io = (union ctl_io *)TAILQ_FIRST( 2142 &lun->ooa_queue); io != NULL; 2143 io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, 2144 ooa_links)) { 2145 sbuf_new(&sb, printbuf, sizeof(printbuf), 2146 SBUF_FIXEDLEN); 2147 sbuf_printf(&sb, "LUN %jd tag 0x%04x%s%s%s%s: ", 2148 (intmax_t)lun->lun, 2149 io->scsiio.tag_num, 2150 (io->io_hdr.flags & 2151 CTL_FLAG_BLOCKED) ? "" : " BLOCKED", 2152 (io->io_hdr.flags & 2153 CTL_FLAG_DMA_INPROG) ? " DMA" : "", 2154 (io->io_hdr.flags & 2155 CTL_FLAG_ABORT) ? " ABORT" : "", 2156 (io->io_hdr.flags & 2157 CTL_FLAG_IS_WAS_ON_RTR) ? " RTR" : ""); 2158 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 2159 sbuf_finish(&sb); 2160 printf("%s\n", sbuf_data(&sb)); 2161 } 2162 mtx_unlock(&lun->lun_lock); 2163 } 2164 printf("OOA queues dump done\n"); 2165 mtx_unlock(&softc->ctl_lock); 2166 break; 2167 } 2168 case CTL_GET_OOA: { 2169 struct ctl_lun *lun; 2170 struct ctl_ooa *ooa_hdr; 2171 struct ctl_ooa_entry *entries; 2172 uint32_t cur_fill_num; 2173 2174 ooa_hdr = (struct ctl_ooa *)addr; 2175 2176 if ((ooa_hdr->alloc_len == 0) 2177 || (ooa_hdr->alloc_num == 0)) { 2178 printf("%s: CTL_GET_OOA: alloc len %u and alloc num %u " 2179 "must be non-zero\n", __func__, 2180 ooa_hdr->alloc_len, ooa_hdr->alloc_num); 2181 retval = EINVAL; 2182 break; 2183 } 2184 2185 if (ooa_hdr->alloc_len != (ooa_hdr->alloc_num * 2186 sizeof(struct ctl_ooa_entry))) { 2187 printf("%s: CTL_GET_OOA: alloc len %u must be alloc " 2188 "num %d * sizeof(struct ctl_ooa_entry) %zd\n", 2189 __func__, ooa_hdr->alloc_len, 2190 ooa_hdr->alloc_num,sizeof(struct ctl_ooa_entry)); 2191 retval = EINVAL; 2192 break; 2193 } 2194 2195 entries = malloc(ooa_hdr->alloc_len, M_CTL, M_WAITOK | M_ZERO); 2196 if (entries == NULL) { 2197 printf("%s: could not allocate %d bytes for OOA " 2198 "dump\n", __func__, ooa_hdr->alloc_len); 2199 retval = ENOMEM; 2200 break; 2201 } 2202 2203 mtx_lock(&softc->ctl_lock); 2204 if (((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0) 2205 && ((ooa_hdr->lun_num >= CTL_MAX_LUNS) 2206 || (softc->ctl_luns[ooa_hdr->lun_num] == NULL))) { 2207 mtx_unlock(&softc->ctl_lock); 2208 free(entries, M_CTL); 2209 printf("%s: CTL_GET_OOA: invalid LUN %ju\n", 2210 __func__, (uintmax_t)ooa_hdr->lun_num); 2211 retval = EINVAL; 2212 break; 2213 } 2214 2215 cur_fill_num = 0; 2216 2217 if (ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) { 2218 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2219 retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num, 2220 ooa_hdr, entries); 2221 if (retval != 0) 2222 break; 2223 } 2224 if (retval != 0) { 2225 mtx_unlock(&softc->ctl_lock); 2226 free(entries, M_CTL); 2227 break; 2228 } 2229 } else { 2230 lun = softc->ctl_luns[ooa_hdr->lun_num]; 2231 2232 retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num,ooa_hdr, 2233 entries); 2234 } 2235 mtx_unlock(&softc->ctl_lock); 2236 2237 ooa_hdr->fill_num = min(cur_fill_num, ooa_hdr->alloc_num); 2238 ooa_hdr->fill_len = ooa_hdr->fill_num * 2239 sizeof(struct ctl_ooa_entry); 2240 retval = copyout(entries, ooa_hdr->entries, ooa_hdr->fill_len); 2241 if (retval != 0) { 2242 printf("%s: error copying out %d bytes for OOA dump\n", 2243 __func__, ooa_hdr->fill_len); 2244 } 2245 2246 getbintime(&ooa_hdr->cur_bt); 2247 2248 if (cur_fill_num > ooa_hdr->alloc_num) { 2249 ooa_hdr->dropped_num = cur_fill_num -ooa_hdr->alloc_num; 2250 ooa_hdr->status = CTL_OOA_NEED_MORE_SPACE; 2251 } else { 2252 ooa_hdr->dropped_num = 0; 2253 ooa_hdr->status = CTL_OOA_OK; 2254 } 2255 2256 free(entries, M_CTL); 2257 break; 2258 } 2259 case CTL_CHECK_OOA: { 2260 union ctl_io *io; 2261 struct ctl_lun *lun; 2262 struct ctl_ooa_info *ooa_info; 2263 2264 2265 ooa_info = (struct ctl_ooa_info *)addr; 2266 2267 if (ooa_info->lun_id >= CTL_MAX_LUNS) { 2268 ooa_info->status = CTL_OOA_INVALID_LUN; 2269 break; 2270 } 2271 mtx_lock(&softc->ctl_lock); 2272 lun = softc->ctl_luns[ooa_info->lun_id]; 2273 if (lun == NULL) { 2274 mtx_unlock(&softc->ctl_lock); 2275 ooa_info->status = CTL_OOA_INVALID_LUN; 2276 break; 2277 } 2278 mtx_lock(&lun->lun_lock); 2279 mtx_unlock(&softc->ctl_lock); 2280 ooa_info->num_entries = 0; 2281 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); 2282 io != NULL; io = (union ctl_io *)TAILQ_NEXT( 2283 &io->io_hdr, ooa_links)) { 2284 ooa_info->num_entries++; 2285 } 2286 mtx_unlock(&lun->lun_lock); 2287 2288 ooa_info->status = CTL_OOA_SUCCESS; 2289 2290 break; 2291 } 2292 case CTL_DELAY_IO: { 2293 struct ctl_io_delay_info *delay_info; 2294 #ifdef CTL_IO_DELAY 2295 struct ctl_lun *lun; 2296 #endif /* CTL_IO_DELAY */ 2297 2298 delay_info = (struct ctl_io_delay_info *)addr; 2299 2300 #ifdef CTL_IO_DELAY 2301 mtx_lock(&softc->ctl_lock); 2302 2303 if ((delay_info->lun_id >= CTL_MAX_LUNS) 2304 || (softc->ctl_luns[delay_info->lun_id] == NULL)) { 2305 delay_info->status = CTL_DELAY_STATUS_INVALID_LUN; 2306 } else { 2307 lun = softc->ctl_luns[delay_info->lun_id]; 2308 mtx_lock(&lun->lun_lock); 2309 2310 delay_info->status = CTL_DELAY_STATUS_OK; 2311 2312 switch (delay_info->delay_type) { 2313 case CTL_DELAY_TYPE_CONT: 2314 break; 2315 case CTL_DELAY_TYPE_ONESHOT: 2316 break; 2317 default: 2318 delay_info->status = 2319 CTL_DELAY_STATUS_INVALID_TYPE; 2320 break; 2321 } 2322 2323 switch (delay_info->delay_loc) { 2324 case CTL_DELAY_LOC_DATAMOVE: 2325 lun->delay_info.datamove_type = 2326 delay_info->delay_type; 2327 lun->delay_info.datamove_delay = 2328 delay_info->delay_secs; 2329 break; 2330 case CTL_DELAY_LOC_DONE: 2331 lun->delay_info.done_type = 2332 delay_info->delay_type; 2333 lun->delay_info.done_delay = 2334 delay_info->delay_secs; 2335 break; 2336 default: 2337 delay_info->status = 2338 CTL_DELAY_STATUS_INVALID_LOC; 2339 break; 2340 } 2341 mtx_unlock(&lun->lun_lock); 2342 } 2343 2344 mtx_unlock(&softc->ctl_lock); 2345 #else 2346 delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED; 2347 #endif /* CTL_IO_DELAY */ 2348 break; 2349 } 2350 case CTL_REALSYNC_SET: { 2351 int *syncstate; 2352 2353 syncstate = (int *)addr; 2354 2355 mtx_lock(&softc->ctl_lock); 2356 switch (*syncstate) { 2357 case 0: 2358 softc->flags &= ~CTL_FLAG_REAL_SYNC; 2359 break; 2360 case 1: 2361 softc->flags |= CTL_FLAG_REAL_SYNC; 2362 break; 2363 default: 2364 retval = EINVAL; 2365 break; 2366 } 2367 mtx_unlock(&softc->ctl_lock); 2368 break; 2369 } 2370 case CTL_REALSYNC_GET: { 2371 int *syncstate; 2372 2373 syncstate = (int*)addr; 2374 2375 mtx_lock(&softc->ctl_lock); 2376 if (softc->flags & CTL_FLAG_REAL_SYNC) 2377 *syncstate = 1; 2378 else 2379 *syncstate = 0; 2380 mtx_unlock(&softc->ctl_lock); 2381 2382 break; 2383 } 2384 case CTL_SETSYNC: 2385 case CTL_GETSYNC: { 2386 struct ctl_sync_info *sync_info; 2387 struct ctl_lun *lun; 2388 2389 sync_info = (struct ctl_sync_info *)addr; 2390 2391 mtx_lock(&softc->ctl_lock); 2392 lun = softc->ctl_luns[sync_info->lun_id]; 2393 if (lun == NULL) { 2394 mtx_unlock(&softc->ctl_lock); 2395 sync_info->status = CTL_GS_SYNC_NO_LUN; 2396 } 2397 /* 2398 * Get or set the sync interval. We're not bounds checking 2399 * in the set case, hopefully the user won't do something 2400 * silly. 2401 */ 2402 mtx_lock(&lun->lun_lock); 2403 mtx_unlock(&softc->ctl_lock); 2404 if (cmd == CTL_GETSYNC) 2405 sync_info->sync_interval = lun->sync_interval; 2406 else 2407 lun->sync_interval = sync_info->sync_interval; 2408 mtx_unlock(&lun->lun_lock); 2409 2410 sync_info->status = CTL_GS_SYNC_OK; 2411 2412 break; 2413 } 2414 case CTL_GETSTATS: { 2415 struct ctl_stats *stats; 2416 struct ctl_lun *lun; 2417 int i; 2418 2419 stats = (struct ctl_stats *)addr; 2420 2421 if ((sizeof(struct ctl_lun_io_stats) * softc->num_luns) > 2422 stats->alloc_len) { 2423 stats->status = CTL_SS_NEED_MORE_SPACE; 2424 stats->num_luns = softc->num_luns; 2425 break; 2426 } 2427 /* 2428 * XXX KDM no locking here. If the LUN list changes, 2429 * things can blow up. 2430 */ 2431 for (i = 0, lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; 2432 i++, lun = STAILQ_NEXT(lun, links)) { 2433 retval = copyout(&lun->stats, &stats->lun_stats[i], 2434 sizeof(lun->stats)); 2435 if (retval != 0) 2436 break; 2437 } 2438 stats->num_luns = softc->num_luns; 2439 stats->fill_len = sizeof(struct ctl_lun_io_stats) * 2440 softc->num_luns; 2441 stats->status = CTL_SS_OK; 2442 #ifdef CTL_TIME_IO 2443 stats->flags = CTL_STATS_FLAG_TIME_VALID; 2444 #else 2445 stats->flags = CTL_STATS_FLAG_NONE; 2446 #endif 2447 getnanouptime(&stats->timestamp); 2448 break; 2449 } 2450 case CTL_ERROR_INJECT: { 2451 struct ctl_error_desc *err_desc, *new_err_desc; 2452 struct ctl_lun *lun; 2453 2454 err_desc = (struct ctl_error_desc *)addr; 2455 2456 new_err_desc = malloc(sizeof(*new_err_desc), M_CTL, 2457 M_WAITOK | M_ZERO); 2458 bcopy(err_desc, new_err_desc, sizeof(*new_err_desc)); 2459 2460 mtx_lock(&softc->ctl_lock); 2461 lun = softc->ctl_luns[err_desc->lun_id]; 2462 if (lun == NULL) { 2463 mtx_unlock(&softc->ctl_lock); 2464 free(new_err_desc, M_CTL); 2465 printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n", 2466 __func__, (uintmax_t)err_desc->lun_id); 2467 retval = EINVAL; 2468 break; 2469 } 2470 mtx_lock(&lun->lun_lock); 2471 mtx_unlock(&softc->ctl_lock); 2472 2473 /* 2474 * We could do some checking here to verify the validity 2475 * of the request, but given the complexity of error 2476 * injection requests, the checking logic would be fairly 2477 * complex. 2478 * 2479 * For now, if the request is invalid, it just won't get 2480 * executed and might get deleted. 2481 */ 2482 STAILQ_INSERT_TAIL(&lun->error_list, new_err_desc, links); 2483 2484 /* 2485 * XXX KDM check to make sure the serial number is unique, 2486 * in case we somehow manage to wrap. That shouldn't 2487 * happen for a very long time, but it's the right thing to 2488 * do. 2489 */ 2490 new_err_desc->serial = lun->error_serial; 2491 err_desc->serial = lun->error_serial; 2492 lun->error_serial++; 2493 2494 mtx_unlock(&lun->lun_lock); 2495 break; 2496 } 2497 case CTL_ERROR_INJECT_DELETE: { 2498 struct ctl_error_desc *delete_desc, *desc, *desc2; 2499 struct ctl_lun *lun; 2500 int delete_done; 2501 2502 delete_desc = (struct ctl_error_desc *)addr; 2503 delete_done = 0; 2504 2505 mtx_lock(&softc->ctl_lock); 2506 lun = softc->ctl_luns[delete_desc->lun_id]; 2507 if (lun == NULL) { 2508 mtx_unlock(&softc->ctl_lock); 2509 printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n", 2510 __func__, (uintmax_t)delete_desc->lun_id); 2511 retval = EINVAL; 2512 break; 2513 } 2514 mtx_lock(&lun->lun_lock); 2515 mtx_unlock(&softc->ctl_lock); 2516 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 2517 if (desc->serial != delete_desc->serial) 2518 continue; 2519 2520 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, 2521 links); 2522 free(desc, M_CTL); 2523 delete_done = 1; 2524 } 2525 mtx_unlock(&lun->lun_lock); 2526 if (delete_done == 0) { 2527 printf("%s: CTL_ERROR_INJECT_DELETE: can't find " 2528 "error serial %ju on LUN %u\n", __func__, 2529 delete_desc->serial, delete_desc->lun_id); 2530 retval = EINVAL; 2531 break; 2532 } 2533 break; 2534 } 2535 case CTL_DUMP_STRUCTS: { 2536 int i, j, k; 2537 struct ctl_port *port; 2538 struct ctl_frontend *fe; 2539 2540 mtx_lock(&softc->ctl_lock); 2541 printf("CTL Persistent Reservation information start:\n"); 2542 for (i = 0; i < CTL_MAX_LUNS; i++) { 2543 struct ctl_lun *lun; 2544 2545 lun = softc->ctl_luns[i]; 2546 2547 if ((lun == NULL) 2548 || ((lun->flags & CTL_LUN_DISABLED) != 0)) 2549 continue; 2550 2551 for (j = 0; j < (CTL_MAX_PORTS * 2); j++) { 2552 if (lun->pr_keys[j] == NULL) 2553 continue; 2554 for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){ 2555 if (lun->pr_keys[j][k] == 0) 2556 continue; 2557 printf(" LUN %d port %d iid %d key " 2558 "%#jx\n", i, j, k, 2559 (uintmax_t)lun->pr_keys[j][k]); 2560 } 2561 } 2562 } 2563 printf("CTL Persistent Reservation information end\n"); 2564 printf("CTL Ports:\n"); 2565 STAILQ_FOREACH(port, &softc->port_list, links) { 2566 printf(" Port %d '%s' Frontend '%s' Type %u pp %d vp %d WWNN " 2567 "%#jx WWPN %#jx\n", port->targ_port, port->port_name, 2568 port->frontend->name, port->port_type, 2569 port->physical_port, port->virtual_port, 2570 (uintmax_t)port->wwnn, (uintmax_t)port->wwpn); 2571 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 2572 if (port->wwpn_iid[j].in_use == 0 && 2573 port->wwpn_iid[j].wwpn == 0 && 2574 port->wwpn_iid[j].name == NULL) 2575 continue; 2576 2577 printf(" iid %u use %d WWPN %#jx '%s'\n", 2578 j, port->wwpn_iid[j].in_use, 2579 (uintmax_t)port->wwpn_iid[j].wwpn, 2580 port->wwpn_iid[j].name); 2581 } 2582 } 2583 printf("CTL Port information end\n"); 2584 mtx_unlock(&softc->ctl_lock); 2585 /* 2586 * XXX KDM calling this without a lock. We'd likely want 2587 * to drop the lock before calling the frontend's dump 2588 * routine anyway. 2589 */ 2590 printf("CTL Frontends:\n"); 2591 STAILQ_FOREACH(fe, &softc->fe_list, links) { 2592 printf(" Frontend '%s'\n", fe->name); 2593 if (fe->fe_dump != NULL) 2594 fe->fe_dump(); 2595 } 2596 printf("CTL Frontend information end\n"); 2597 break; 2598 } 2599 case CTL_LUN_REQ: { 2600 struct ctl_lun_req *lun_req; 2601 struct ctl_backend_driver *backend; 2602 2603 lun_req = (struct ctl_lun_req *)addr; 2604 2605 backend = ctl_backend_find(lun_req->backend); 2606 if (backend == NULL) { 2607 lun_req->status = CTL_LUN_ERROR; 2608 snprintf(lun_req->error_str, 2609 sizeof(lun_req->error_str), 2610 "Backend \"%s\" not found.", 2611 lun_req->backend); 2612 break; 2613 } 2614 if (lun_req->num_be_args > 0) { 2615 lun_req->kern_be_args = ctl_copyin_args( 2616 lun_req->num_be_args, 2617 lun_req->be_args, 2618 lun_req->error_str, 2619 sizeof(lun_req->error_str)); 2620 if (lun_req->kern_be_args == NULL) { 2621 lun_req->status = CTL_LUN_ERROR; 2622 break; 2623 } 2624 } 2625 2626 retval = backend->ioctl(dev, cmd, addr, flag, td); 2627 2628 if (lun_req->num_be_args > 0) { 2629 ctl_copyout_args(lun_req->num_be_args, 2630 lun_req->kern_be_args); 2631 ctl_free_args(lun_req->num_be_args, 2632 lun_req->kern_be_args); 2633 } 2634 break; 2635 } 2636 case CTL_LUN_LIST: { 2637 struct sbuf *sb; 2638 struct ctl_lun *lun; 2639 struct ctl_lun_list *list; 2640 struct ctl_option *opt; 2641 2642 list = (struct ctl_lun_list *)addr; 2643 2644 /* 2645 * Allocate a fixed length sbuf here, based on the length 2646 * of the user's buffer. We could allocate an auto-extending 2647 * buffer, and then tell the user how much larger our 2648 * amount of data is than his buffer, but that presents 2649 * some problems: 2650 * 2651 * 1. The sbuf(9) routines use a blocking malloc, and so 2652 * we can't hold a lock while calling them with an 2653 * auto-extending buffer. 2654 * 2655 * 2. There is not currently a LUN reference counting 2656 * mechanism, outside of outstanding transactions on 2657 * the LUN's OOA queue. So a LUN could go away on us 2658 * while we're getting the LUN number, backend-specific 2659 * information, etc. Thus, given the way things 2660 * currently work, we need to hold the CTL lock while 2661 * grabbing LUN information. 2662 * 2663 * So, from the user's standpoint, the best thing to do is 2664 * allocate what he thinks is a reasonable buffer length, 2665 * and then if he gets a CTL_LUN_LIST_NEED_MORE_SPACE error, 2666 * double the buffer length and try again. (And repeat 2667 * that until he succeeds.) 2668 */ 2669 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 2670 if (sb == NULL) { 2671 list->status = CTL_LUN_LIST_ERROR; 2672 snprintf(list->error_str, sizeof(list->error_str), 2673 "Unable to allocate %d bytes for LUN list", 2674 list->alloc_len); 2675 break; 2676 } 2677 2678 sbuf_printf(sb, "<ctllunlist>\n"); 2679 2680 mtx_lock(&softc->ctl_lock); 2681 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2682 mtx_lock(&lun->lun_lock); 2683 retval = sbuf_printf(sb, "<lun id=\"%ju\">\n", 2684 (uintmax_t)lun->lun); 2685 2686 /* 2687 * Bail out as soon as we see that we've overfilled 2688 * the buffer. 2689 */ 2690 if (retval != 0) 2691 break; 2692 2693 retval = sbuf_printf(sb, "\t<backend_type>%s" 2694 "</backend_type>\n", 2695 (lun->backend == NULL) ? "none" : 2696 lun->backend->name); 2697 2698 if (retval != 0) 2699 break; 2700 2701 retval = sbuf_printf(sb, "\t<lun_type>%d</lun_type>\n", 2702 lun->be_lun->lun_type); 2703 2704 if (retval != 0) 2705 break; 2706 2707 if (lun->backend == NULL) { 2708 retval = sbuf_printf(sb, "</lun>\n"); 2709 if (retval != 0) 2710 break; 2711 continue; 2712 } 2713 2714 retval = sbuf_printf(sb, "\t<size>%ju</size>\n", 2715 (lun->be_lun->maxlba > 0) ? 2716 lun->be_lun->maxlba + 1 : 0); 2717 2718 if (retval != 0) 2719 break; 2720 2721 retval = sbuf_printf(sb, "\t<blocksize>%u</blocksize>\n", 2722 lun->be_lun->blocksize); 2723 2724 if (retval != 0) 2725 break; 2726 2727 retval = sbuf_printf(sb, "\t<serial_number>"); 2728 2729 if (retval != 0) 2730 break; 2731 2732 retval = ctl_sbuf_printf_esc(sb, 2733 lun->be_lun->serial_num, 2734 sizeof(lun->be_lun->serial_num)); 2735 2736 if (retval != 0) 2737 break; 2738 2739 retval = sbuf_printf(sb, "</serial_number>\n"); 2740 2741 if (retval != 0) 2742 break; 2743 2744 retval = sbuf_printf(sb, "\t<device_id>"); 2745 2746 if (retval != 0) 2747 break; 2748 2749 retval = ctl_sbuf_printf_esc(sb, 2750 lun->be_lun->device_id, 2751 sizeof(lun->be_lun->device_id)); 2752 2753 if (retval != 0) 2754 break; 2755 2756 retval = sbuf_printf(sb, "</device_id>\n"); 2757 2758 if (retval != 0) 2759 break; 2760 2761 if (lun->backend->lun_info != NULL) { 2762 retval = lun->backend->lun_info(lun->be_lun->be_lun, sb); 2763 if (retval != 0) 2764 break; 2765 } 2766 STAILQ_FOREACH(opt, &lun->be_lun->options, links) { 2767 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n", 2768 opt->name, opt->value, opt->name); 2769 if (retval != 0) 2770 break; 2771 } 2772 2773 retval = sbuf_printf(sb, "</lun>\n"); 2774 2775 if (retval != 0) 2776 break; 2777 mtx_unlock(&lun->lun_lock); 2778 } 2779 if (lun != NULL) 2780 mtx_unlock(&lun->lun_lock); 2781 mtx_unlock(&softc->ctl_lock); 2782 2783 if ((retval != 0) 2784 || ((retval = sbuf_printf(sb, "</ctllunlist>\n")) != 0)) { 2785 retval = 0; 2786 sbuf_delete(sb); 2787 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 2788 snprintf(list->error_str, sizeof(list->error_str), 2789 "Out of space, %d bytes is too small", 2790 list->alloc_len); 2791 break; 2792 } 2793 2794 sbuf_finish(sb); 2795 2796 retval = copyout(sbuf_data(sb), list->lun_xml, 2797 sbuf_len(sb) + 1); 2798 2799 list->fill_len = sbuf_len(sb) + 1; 2800 list->status = CTL_LUN_LIST_OK; 2801 sbuf_delete(sb); 2802 break; 2803 } 2804 case CTL_ISCSI: { 2805 struct ctl_iscsi *ci; 2806 struct ctl_frontend *fe; 2807 2808 ci = (struct ctl_iscsi *)addr; 2809 2810 fe = ctl_frontend_find("iscsi"); 2811 if (fe == NULL) { 2812 ci->status = CTL_ISCSI_ERROR; 2813 snprintf(ci->error_str, sizeof(ci->error_str), 2814 "Frontend \"iscsi\" not found."); 2815 break; 2816 } 2817 2818 retval = fe->ioctl(dev, cmd, addr, flag, td); 2819 break; 2820 } 2821 case CTL_PORT_REQ: { 2822 struct ctl_req *req; 2823 struct ctl_frontend *fe; 2824 2825 req = (struct ctl_req *)addr; 2826 2827 fe = ctl_frontend_find(req->driver); 2828 if (fe == NULL) { 2829 req->status = CTL_LUN_ERROR; 2830 snprintf(req->error_str, sizeof(req->error_str), 2831 "Frontend \"%s\" not found.", req->driver); 2832 break; 2833 } 2834 if (req->num_args > 0) { 2835 req->kern_args = ctl_copyin_args(req->num_args, 2836 req->args, req->error_str, sizeof(req->error_str)); 2837 if (req->kern_args == NULL) { 2838 req->status = CTL_LUN_ERROR; 2839 break; 2840 } 2841 } 2842 2843 retval = fe->ioctl(dev, cmd, addr, flag, td); 2844 2845 if (req->num_args > 0) { 2846 ctl_copyout_args(req->num_args, req->kern_args); 2847 ctl_free_args(req->num_args, req->kern_args); 2848 } 2849 break; 2850 } 2851 case CTL_PORT_LIST: { 2852 struct sbuf *sb; 2853 struct ctl_port *port; 2854 struct ctl_lun_list *list; 2855 struct ctl_option *opt; 2856 int j; 2857 uint32_t plun; 2858 2859 list = (struct ctl_lun_list *)addr; 2860 2861 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 2862 if (sb == NULL) { 2863 list->status = CTL_LUN_LIST_ERROR; 2864 snprintf(list->error_str, sizeof(list->error_str), 2865 "Unable to allocate %d bytes for LUN list", 2866 list->alloc_len); 2867 break; 2868 } 2869 2870 sbuf_printf(sb, "<ctlportlist>\n"); 2871 2872 mtx_lock(&softc->ctl_lock); 2873 STAILQ_FOREACH(port, &softc->port_list, links) { 2874 retval = sbuf_printf(sb, "<targ_port id=\"%ju\">\n", 2875 (uintmax_t)port->targ_port); 2876 2877 /* 2878 * Bail out as soon as we see that we've overfilled 2879 * the buffer. 2880 */ 2881 if (retval != 0) 2882 break; 2883 2884 retval = sbuf_printf(sb, "\t<frontend_type>%s" 2885 "</frontend_type>\n", port->frontend->name); 2886 if (retval != 0) 2887 break; 2888 2889 retval = sbuf_printf(sb, "\t<port_type>%d</port_type>\n", 2890 port->port_type); 2891 if (retval != 0) 2892 break; 2893 2894 retval = sbuf_printf(sb, "\t<online>%s</online>\n", 2895 (port->status & CTL_PORT_STATUS_ONLINE) ? "YES" : "NO"); 2896 if (retval != 0) 2897 break; 2898 2899 retval = sbuf_printf(sb, "\t<port_name>%s</port_name>\n", 2900 port->port_name); 2901 if (retval != 0) 2902 break; 2903 2904 retval = sbuf_printf(sb, "\t<physical_port>%d</physical_port>\n", 2905 port->physical_port); 2906 if (retval != 0) 2907 break; 2908 2909 retval = sbuf_printf(sb, "\t<virtual_port>%d</virtual_port>\n", 2910 port->virtual_port); 2911 if (retval != 0) 2912 break; 2913 2914 if (port->target_devid != NULL) { 2915 sbuf_printf(sb, "\t<target>"); 2916 ctl_id_sbuf(port->target_devid, sb); 2917 sbuf_printf(sb, "</target>\n"); 2918 } 2919 2920 if (port->port_devid != NULL) { 2921 sbuf_printf(sb, "\t<port>"); 2922 ctl_id_sbuf(port->port_devid, sb); 2923 sbuf_printf(sb, "</port>\n"); 2924 } 2925 2926 if (port->port_info != NULL) { 2927 retval = port->port_info(port->onoff_arg, sb); 2928 if (retval != 0) 2929 break; 2930 } 2931 STAILQ_FOREACH(opt, &port->options, links) { 2932 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n", 2933 opt->name, opt->value, opt->name); 2934 if (retval != 0) 2935 break; 2936 } 2937 2938 if (port->lun_map != NULL) { 2939 sbuf_printf(sb, "\t<lun_map>on</lun_map>\n"); 2940 for (j = 0; j < CTL_MAX_LUNS; j++) { 2941 plun = ctl_lun_map_from_port(port, j); 2942 if (plun >= CTL_MAX_LUNS) 2943 continue; 2944 sbuf_printf(sb, 2945 "\t<lun id=\"%u\">%u</lun>\n", 2946 j, plun); 2947 } 2948 } 2949 2950 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 2951 if (port->wwpn_iid[j].in_use == 0 || 2952 (port->wwpn_iid[j].wwpn == 0 && 2953 port->wwpn_iid[j].name == NULL)) 2954 continue; 2955 2956 if (port->wwpn_iid[j].name != NULL) 2957 retval = sbuf_printf(sb, 2958 "\t<initiator id=\"%u\">%s</initiator>\n", 2959 j, port->wwpn_iid[j].name); 2960 else 2961 retval = sbuf_printf(sb, 2962 "\t<initiator id=\"%u\">naa.%08jx</initiator>\n", 2963 j, port->wwpn_iid[j].wwpn); 2964 if (retval != 0) 2965 break; 2966 } 2967 if (retval != 0) 2968 break; 2969 2970 retval = sbuf_printf(sb, "</targ_port>\n"); 2971 if (retval != 0) 2972 break; 2973 } 2974 mtx_unlock(&softc->ctl_lock); 2975 2976 if ((retval != 0) 2977 || ((retval = sbuf_printf(sb, "</ctlportlist>\n")) != 0)) { 2978 retval = 0; 2979 sbuf_delete(sb); 2980 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 2981 snprintf(list->error_str, sizeof(list->error_str), 2982 "Out of space, %d bytes is too small", 2983 list->alloc_len); 2984 break; 2985 } 2986 2987 sbuf_finish(sb); 2988 2989 retval = copyout(sbuf_data(sb), list->lun_xml, 2990 sbuf_len(sb) + 1); 2991 2992 list->fill_len = sbuf_len(sb) + 1; 2993 list->status = CTL_LUN_LIST_OK; 2994 sbuf_delete(sb); 2995 break; 2996 } 2997 case CTL_LUN_MAP: { 2998 struct ctl_lun_map *lm = (struct ctl_lun_map *)addr; 2999 struct ctl_port *port; 3000 3001 mtx_lock(&softc->ctl_lock); 3002 if (lm->port >= CTL_MAX_PORTS || 3003 (port = softc->ctl_ports[lm->port]) == NULL) { 3004 mtx_unlock(&softc->ctl_lock); 3005 return (ENXIO); 3006 } 3007 mtx_unlock(&softc->ctl_lock); // XXX: port_enable sleeps 3008 if (lm->plun < CTL_MAX_LUNS) { 3009 if (lm->lun == UINT32_MAX) 3010 retval = ctl_lun_map_unset(port, lm->plun); 3011 else if (lm->lun < CTL_MAX_LUNS && 3012 softc->ctl_luns[lm->lun] != NULL) 3013 retval = ctl_lun_map_set(port, lm->plun, lm->lun); 3014 else 3015 return (ENXIO); 3016 } else if (lm->plun == UINT32_MAX) { 3017 if (lm->lun == UINT32_MAX) 3018 retval = ctl_lun_map_deinit(port); 3019 else 3020 retval = ctl_lun_map_init(port); 3021 } else 3022 return (ENXIO); 3023 break; 3024 } 3025 default: { 3026 /* XXX KDM should we fix this? */ 3027 #if 0 3028 struct ctl_backend_driver *backend; 3029 unsigned int type; 3030 int found; 3031 3032 found = 0; 3033 3034 /* 3035 * We encode the backend type as the ioctl type for backend 3036 * ioctls. So parse it out here, and then search for a 3037 * backend of this type. 3038 */ 3039 type = _IOC_TYPE(cmd); 3040 3041 STAILQ_FOREACH(backend, &softc->be_list, links) { 3042 if (backend->type == type) { 3043 found = 1; 3044 break; 3045 } 3046 } 3047 if (found == 0) { 3048 printf("ctl: unknown ioctl command %#lx or backend " 3049 "%d\n", cmd, type); 3050 retval = EINVAL; 3051 break; 3052 } 3053 retval = backend->ioctl(dev, cmd, addr, flag, td); 3054 #endif 3055 retval = ENOTTY; 3056 break; 3057 } 3058 } 3059 return (retval); 3060 } 3061 3062 uint32_t 3063 ctl_get_initindex(struct ctl_nexus *nexus) 3064 { 3065 if (nexus->targ_port < CTL_MAX_PORTS) 3066 return (nexus->initid.id + 3067 (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); 3068 else 3069 return (nexus->initid.id + 3070 ((nexus->targ_port - CTL_MAX_PORTS) * 3071 CTL_MAX_INIT_PER_PORT)); 3072 } 3073 3074 uint32_t 3075 ctl_get_resindex(struct ctl_nexus *nexus) 3076 { 3077 return (nexus->initid.id + (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); 3078 } 3079 3080 uint32_t 3081 ctl_port_idx(int port_num) 3082 { 3083 if (port_num < CTL_MAX_PORTS) 3084 return(port_num); 3085 else 3086 return(port_num - CTL_MAX_PORTS); 3087 } 3088 3089 int 3090 ctl_lun_map_init(struct ctl_port *port) 3091 { 3092 struct ctl_softc *softc = control_softc; 3093 struct ctl_lun *lun; 3094 uint32_t i; 3095 3096 if (port->lun_map == NULL) 3097 port->lun_map = malloc(sizeof(uint32_t) * CTL_MAX_LUNS, 3098 M_CTL, M_NOWAIT); 3099 if (port->lun_map == NULL) 3100 return (ENOMEM); 3101 for (i = 0; i < CTL_MAX_LUNS; i++) 3102 port->lun_map[i] = UINT32_MAX; 3103 if (port->status & CTL_PORT_STATUS_ONLINE) { 3104 STAILQ_FOREACH(lun, &softc->lun_list, links) 3105 port->lun_disable(port->targ_lun_arg, lun->lun); 3106 } 3107 return (0); 3108 } 3109 3110 int 3111 ctl_lun_map_deinit(struct ctl_port *port) 3112 { 3113 struct ctl_softc *softc = control_softc; 3114 struct ctl_lun *lun; 3115 3116 if (port->lun_map == NULL) 3117 return (0); 3118 free(port->lun_map, M_CTL); 3119 port->lun_map = NULL; 3120 if (port->status & CTL_PORT_STATUS_ONLINE) { 3121 STAILQ_FOREACH(lun, &softc->lun_list, links) 3122 port->lun_enable(port->targ_lun_arg, lun->lun); 3123 } 3124 return (0); 3125 } 3126 3127 int 3128 ctl_lun_map_set(struct ctl_port *port, uint32_t plun, uint32_t glun) 3129 { 3130 int status; 3131 uint32_t old; 3132 3133 if (port->lun_map == NULL) { 3134 status = ctl_lun_map_init(port); 3135 if (status != 0) 3136 return (status); 3137 } 3138 old = port->lun_map[plun]; 3139 port->lun_map[plun] = glun; 3140 if ((port->status & CTL_PORT_STATUS_ONLINE) && old >= CTL_MAX_LUNS) 3141 port->lun_enable(port->targ_lun_arg, plun); 3142 return (0); 3143 } 3144 3145 int 3146 ctl_lun_map_unset(struct ctl_port *port, uint32_t plun) 3147 { 3148 uint32_t old; 3149 3150 if (port->lun_map == NULL) 3151 return (0); 3152 old = port->lun_map[plun]; 3153 port->lun_map[plun] = UINT32_MAX; 3154 if ((port->status & CTL_PORT_STATUS_ONLINE) && old < CTL_MAX_LUNS) 3155 port->lun_disable(port->targ_lun_arg, plun); 3156 return (0); 3157 } 3158 3159 uint32_t 3160 ctl_lun_map_from_port(struct ctl_port *port, uint32_t lun_id) 3161 { 3162 3163 if (port == NULL) 3164 return (UINT32_MAX); 3165 if (port->lun_map == NULL || lun_id >= CTL_MAX_LUNS) 3166 return (lun_id); 3167 return (port->lun_map[lun_id]); 3168 } 3169 3170 uint32_t 3171 ctl_lun_map_to_port(struct ctl_port *port, uint32_t lun_id) 3172 { 3173 uint32_t i; 3174 3175 if (port == NULL) 3176 return (UINT32_MAX); 3177 if (port->lun_map == NULL) 3178 return (lun_id); 3179 for (i = 0; i < CTL_MAX_LUNS; i++) { 3180 if (port->lun_map[i] == lun_id) 3181 return (i); 3182 } 3183 return (UINT32_MAX); 3184 } 3185 3186 static struct ctl_port * 3187 ctl_io_port(struct ctl_io_hdr *io_hdr) 3188 { 3189 int port_num; 3190 3191 port_num = io_hdr->nexus.targ_port; 3192 return (control_softc->ctl_ports[ctl_port_idx(port_num)]); 3193 } 3194 3195 /* 3196 * Note: This only works for bitmask sizes that are at least 32 bits, and 3197 * that are a power of 2. 3198 */ 3199 int 3200 ctl_ffz(uint32_t *mask, uint32_t size) 3201 { 3202 uint32_t num_chunks, num_pieces; 3203 int i, j; 3204 3205 num_chunks = (size >> 5); 3206 if (num_chunks == 0) 3207 num_chunks++; 3208 num_pieces = MIN((sizeof(uint32_t) * 8), size); 3209 3210 for (i = 0; i < num_chunks; i++) { 3211 for (j = 0; j < num_pieces; j++) { 3212 if ((mask[i] & (1 << j)) == 0) 3213 return ((i << 5) + j); 3214 } 3215 } 3216 3217 return (-1); 3218 } 3219 3220 int 3221 ctl_set_mask(uint32_t *mask, uint32_t bit) 3222 { 3223 uint32_t chunk, piece; 3224 3225 chunk = bit >> 5; 3226 piece = bit % (sizeof(uint32_t) * 8); 3227 3228 if ((mask[chunk] & (1 << piece)) != 0) 3229 return (-1); 3230 else 3231 mask[chunk] |= (1 << piece); 3232 3233 return (0); 3234 } 3235 3236 int 3237 ctl_clear_mask(uint32_t *mask, uint32_t bit) 3238 { 3239 uint32_t chunk, piece; 3240 3241 chunk = bit >> 5; 3242 piece = bit % (sizeof(uint32_t) * 8); 3243 3244 if ((mask[chunk] & (1 << piece)) == 0) 3245 return (-1); 3246 else 3247 mask[chunk] &= ~(1 << piece); 3248 3249 return (0); 3250 } 3251 3252 int 3253 ctl_is_set(uint32_t *mask, uint32_t bit) 3254 { 3255 uint32_t chunk, piece; 3256 3257 chunk = bit >> 5; 3258 piece = bit % (sizeof(uint32_t) * 8); 3259 3260 if ((mask[chunk] & (1 << piece)) == 0) 3261 return (0); 3262 else 3263 return (1); 3264 } 3265 3266 static uint64_t 3267 ctl_get_prkey(struct ctl_lun *lun, uint32_t residx) 3268 { 3269 uint64_t *t; 3270 3271 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3272 if (t == NULL) 3273 return (0); 3274 return (t[residx % CTL_MAX_INIT_PER_PORT]); 3275 } 3276 3277 static void 3278 ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx) 3279 { 3280 uint64_t *t; 3281 3282 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3283 if (t == NULL) 3284 return; 3285 t[residx % CTL_MAX_INIT_PER_PORT] = 0; 3286 } 3287 3288 static void 3289 ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx) 3290 { 3291 uint64_t *p; 3292 u_int i; 3293 3294 i = residx/CTL_MAX_INIT_PER_PORT; 3295 if (lun->pr_keys[i] != NULL) 3296 return; 3297 mtx_unlock(&lun->lun_lock); 3298 p = malloc(sizeof(uint64_t) * CTL_MAX_INIT_PER_PORT, M_CTL, 3299 M_WAITOK | M_ZERO); 3300 mtx_lock(&lun->lun_lock); 3301 if (lun->pr_keys[i] == NULL) 3302 lun->pr_keys[i] = p; 3303 else 3304 free(p, M_CTL); 3305 } 3306 3307 static void 3308 ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key) 3309 { 3310 uint64_t *t; 3311 3312 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3313 KASSERT(t != NULL, ("prkey %d is not allocated", residx)); 3314 t[residx % CTL_MAX_INIT_PER_PORT] = key; 3315 } 3316 3317 /* 3318 * ctl_softc, pool_name, total_ctl_io are passed in. 3319 * npool is passed out. 3320 */ 3321 int 3322 ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name, 3323 uint32_t total_ctl_io, void **npool) 3324 { 3325 #ifdef IO_POOLS 3326 struct ctl_io_pool *pool; 3327 3328 pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL, 3329 M_NOWAIT | M_ZERO); 3330 if (pool == NULL) 3331 return (ENOMEM); 3332 3333 snprintf(pool->name, sizeof(pool->name), "CTL IO %s", pool_name); 3334 pool->ctl_softc = ctl_softc; 3335 pool->zone = uma_zsecond_create(pool->name, NULL, 3336 NULL, NULL, NULL, ctl_softc->io_zone); 3337 /* uma_prealloc(pool->zone, total_ctl_io); */ 3338 3339 *npool = pool; 3340 #else 3341 *npool = ctl_softc->io_zone; 3342 #endif 3343 return (0); 3344 } 3345 3346 void 3347 ctl_pool_free(struct ctl_io_pool *pool) 3348 { 3349 3350 if (pool == NULL) 3351 return; 3352 3353 #ifdef IO_POOLS 3354 uma_zdestroy(pool->zone); 3355 free(pool, M_CTL); 3356 #endif 3357 } 3358 3359 union ctl_io * 3360 ctl_alloc_io(void *pool_ref) 3361 { 3362 union ctl_io *io; 3363 #ifdef IO_POOLS 3364 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3365 3366 io = uma_zalloc(pool->zone, M_WAITOK); 3367 #else 3368 io = uma_zalloc((uma_zone_t)pool_ref, M_WAITOK); 3369 #endif 3370 if (io != NULL) 3371 io->io_hdr.pool = pool_ref; 3372 return (io); 3373 } 3374 3375 union ctl_io * 3376 ctl_alloc_io_nowait(void *pool_ref) 3377 { 3378 union ctl_io *io; 3379 #ifdef IO_POOLS 3380 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3381 3382 io = uma_zalloc(pool->zone, M_NOWAIT); 3383 #else 3384 io = uma_zalloc((uma_zone_t)pool_ref, M_NOWAIT); 3385 #endif 3386 if (io != NULL) 3387 io->io_hdr.pool = pool_ref; 3388 return (io); 3389 } 3390 3391 void 3392 ctl_free_io(union ctl_io *io) 3393 { 3394 #ifdef IO_POOLS 3395 struct ctl_io_pool *pool; 3396 #endif 3397 3398 if (io == NULL) 3399 return; 3400 3401 #ifdef IO_POOLS 3402 pool = (struct ctl_io_pool *)io->io_hdr.pool; 3403 uma_zfree(pool->zone, io); 3404 #else 3405 uma_zfree((uma_zone_t)io->io_hdr.pool, io); 3406 #endif 3407 } 3408 3409 void 3410 ctl_zero_io(union ctl_io *io) 3411 { 3412 void *pool_ref; 3413 3414 if (io == NULL) 3415 return; 3416 3417 /* 3418 * May need to preserve linked list pointers at some point too. 3419 */ 3420 pool_ref = io->io_hdr.pool; 3421 memset(io, 0, sizeof(*io)); 3422 io->io_hdr.pool = pool_ref; 3423 } 3424 3425 /* 3426 * This routine is currently used for internal copies of ctl_ios that need 3427 * to persist for some reason after we've already returned status to the 3428 * FETD. (Thus the flag set.) 3429 * 3430 * XXX XXX 3431 * Note that this makes a blind copy of all fields in the ctl_io, except 3432 * for the pool reference. This includes any memory that has been 3433 * allocated! That memory will no longer be valid after done has been 3434 * called, so this would be VERY DANGEROUS for command that actually does 3435 * any reads or writes. Right now (11/7/2005), this is only used for immediate 3436 * start and stop commands, which don't transfer any data, so this is not a 3437 * problem. If it is used for anything else, the caller would also need to 3438 * allocate data buffer space and this routine would need to be modified to 3439 * copy the data buffer(s) as well. 3440 */ 3441 void 3442 ctl_copy_io(union ctl_io *src, union ctl_io *dest) 3443 { 3444 void *pool_ref; 3445 3446 if ((src == NULL) 3447 || (dest == NULL)) 3448 return; 3449 3450 /* 3451 * May need to preserve linked list pointers at some point too. 3452 */ 3453 pool_ref = dest->io_hdr.pool; 3454 3455 memcpy(dest, src, MIN(sizeof(*src), sizeof(*dest))); 3456 3457 dest->io_hdr.pool = pool_ref; 3458 /* 3459 * We need to know that this is an internal copy, and doesn't need 3460 * to get passed back to the FETD that allocated it. 3461 */ 3462 dest->io_hdr.flags |= CTL_FLAG_INT_COPY; 3463 } 3464 3465 int 3466 ctl_expand_number(const char *buf, uint64_t *num) 3467 { 3468 char *endptr; 3469 uint64_t number; 3470 unsigned shift; 3471 3472 number = strtoq(buf, &endptr, 0); 3473 3474 switch (tolower((unsigned char)*endptr)) { 3475 case 'e': 3476 shift = 60; 3477 break; 3478 case 'p': 3479 shift = 50; 3480 break; 3481 case 't': 3482 shift = 40; 3483 break; 3484 case 'g': 3485 shift = 30; 3486 break; 3487 case 'm': 3488 shift = 20; 3489 break; 3490 case 'k': 3491 shift = 10; 3492 break; 3493 case 'b': 3494 case '\0': /* No unit. */ 3495 *num = number; 3496 return (0); 3497 default: 3498 /* Unrecognized unit. */ 3499 return (-1); 3500 } 3501 3502 if ((number << shift) >> shift != number) { 3503 /* Overflow */ 3504 return (-1); 3505 } 3506 *num = number << shift; 3507 return (0); 3508 } 3509 3510 3511 /* 3512 * This routine could be used in the future to load default and/or saved 3513 * mode page parameters for a particuar lun. 3514 */ 3515 static int 3516 ctl_init_page_index(struct ctl_lun *lun) 3517 { 3518 int i; 3519 struct ctl_page_index *page_index; 3520 const char *value; 3521 uint64_t ival; 3522 3523 memcpy(&lun->mode_pages.index, page_index_template, 3524 sizeof(page_index_template)); 3525 3526 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 3527 3528 page_index = &lun->mode_pages.index[i]; 3529 /* 3530 * If this is a disk-only mode page, there's no point in 3531 * setting it up. For some pages, we have to have some 3532 * basic information about the disk in order to calculate the 3533 * mode page data. 3534 */ 3535 if ((lun->be_lun->lun_type != T_DIRECT) 3536 && (page_index->page_flags & CTL_PAGE_FLAG_DISK_ONLY)) 3537 continue; 3538 3539 switch (page_index->page_code & SMPH_PC_MASK) { 3540 case SMS_RW_ERROR_RECOVERY_PAGE: { 3541 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3542 panic("subpage is incorrect!"); 3543 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT], 3544 &rw_er_page_default, 3545 sizeof(rw_er_page_default)); 3546 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CHANGEABLE], 3547 &rw_er_page_changeable, 3548 sizeof(rw_er_page_changeable)); 3549 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_DEFAULT], 3550 &rw_er_page_default, 3551 sizeof(rw_er_page_default)); 3552 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_SAVED], 3553 &rw_er_page_default, 3554 sizeof(rw_er_page_default)); 3555 page_index->page_data = 3556 (uint8_t *)lun->mode_pages.rw_er_page; 3557 break; 3558 } 3559 case SMS_FORMAT_DEVICE_PAGE: { 3560 struct scsi_format_page *format_page; 3561 3562 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3563 panic("subpage is incorrect!"); 3564 3565 /* 3566 * Sectors per track are set above. Bytes per 3567 * sector need to be set here on a per-LUN basis. 3568 */ 3569 memcpy(&lun->mode_pages.format_page[CTL_PAGE_CURRENT], 3570 &format_page_default, 3571 sizeof(format_page_default)); 3572 memcpy(&lun->mode_pages.format_page[ 3573 CTL_PAGE_CHANGEABLE], &format_page_changeable, 3574 sizeof(format_page_changeable)); 3575 memcpy(&lun->mode_pages.format_page[CTL_PAGE_DEFAULT], 3576 &format_page_default, 3577 sizeof(format_page_default)); 3578 memcpy(&lun->mode_pages.format_page[CTL_PAGE_SAVED], 3579 &format_page_default, 3580 sizeof(format_page_default)); 3581 3582 format_page = &lun->mode_pages.format_page[ 3583 CTL_PAGE_CURRENT]; 3584 scsi_ulto2b(lun->be_lun->blocksize, 3585 format_page->bytes_per_sector); 3586 3587 format_page = &lun->mode_pages.format_page[ 3588 CTL_PAGE_DEFAULT]; 3589 scsi_ulto2b(lun->be_lun->blocksize, 3590 format_page->bytes_per_sector); 3591 3592 format_page = &lun->mode_pages.format_page[ 3593 CTL_PAGE_SAVED]; 3594 scsi_ulto2b(lun->be_lun->blocksize, 3595 format_page->bytes_per_sector); 3596 3597 page_index->page_data = 3598 (uint8_t *)lun->mode_pages.format_page; 3599 break; 3600 } 3601 case SMS_RIGID_DISK_PAGE: { 3602 struct scsi_rigid_disk_page *rigid_disk_page; 3603 uint32_t sectors_per_cylinder; 3604 uint64_t cylinders; 3605 #ifndef __XSCALE__ 3606 int shift; 3607 #endif /* !__XSCALE__ */ 3608 3609 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3610 panic("invalid subpage value %d", 3611 page_index->subpage); 3612 3613 /* 3614 * Rotation rate and sectors per track are set 3615 * above. We calculate the cylinders here based on 3616 * capacity. Due to the number of heads and 3617 * sectors per track we're using, smaller arrays 3618 * may turn out to have 0 cylinders. Linux and 3619 * FreeBSD don't pay attention to these mode pages 3620 * to figure out capacity, but Solaris does. It 3621 * seems to deal with 0 cylinders just fine, and 3622 * works out a fake geometry based on the capacity. 3623 */ 3624 memcpy(&lun->mode_pages.rigid_disk_page[ 3625 CTL_PAGE_DEFAULT], &rigid_disk_page_default, 3626 sizeof(rigid_disk_page_default)); 3627 memcpy(&lun->mode_pages.rigid_disk_page[ 3628 CTL_PAGE_CHANGEABLE],&rigid_disk_page_changeable, 3629 sizeof(rigid_disk_page_changeable)); 3630 3631 sectors_per_cylinder = CTL_DEFAULT_SECTORS_PER_TRACK * 3632 CTL_DEFAULT_HEADS; 3633 3634 /* 3635 * The divide method here will be more accurate, 3636 * probably, but results in floating point being 3637 * used in the kernel on i386 (__udivdi3()). On the 3638 * XScale, though, __udivdi3() is implemented in 3639 * software. 3640 * 3641 * The shift method for cylinder calculation is 3642 * accurate if sectors_per_cylinder is a power of 3643 * 2. Otherwise it might be slightly off -- you 3644 * might have a bit of a truncation problem. 3645 */ 3646 #ifdef __XSCALE__ 3647 cylinders = (lun->be_lun->maxlba + 1) / 3648 sectors_per_cylinder; 3649 #else 3650 for (shift = 31; shift > 0; shift--) { 3651 if (sectors_per_cylinder & (1 << shift)) 3652 break; 3653 } 3654 cylinders = (lun->be_lun->maxlba + 1) >> shift; 3655 #endif 3656 3657 /* 3658 * We've basically got 3 bytes, or 24 bits for the 3659 * cylinder size in the mode page. If we're over, 3660 * just round down to 2^24. 3661 */ 3662 if (cylinders > 0xffffff) 3663 cylinders = 0xffffff; 3664 3665 rigid_disk_page = &lun->mode_pages.rigid_disk_page[ 3666 CTL_PAGE_DEFAULT]; 3667 scsi_ulto3b(cylinders, rigid_disk_page->cylinders); 3668 3669 if ((value = ctl_get_opt(&lun->be_lun->options, 3670 "rpm")) != NULL) { 3671 scsi_ulto2b(strtol(value, NULL, 0), 3672 rigid_disk_page->rotation_rate); 3673 } 3674 3675 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_CURRENT], 3676 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 3677 sizeof(rigid_disk_page_default)); 3678 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_SAVED], 3679 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 3680 sizeof(rigid_disk_page_default)); 3681 3682 page_index->page_data = 3683 (uint8_t *)lun->mode_pages.rigid_disk_page; 3684 break; 3685 } 3686 case SMS_CACHING_PAGE: { 3687 struct scsi_caching_page *caching_page; 3688 3689 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3690 panic("invalid subpage value %d", 3691 page_index->subpage); 3692 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT], 3693 &caching_page_default, 3694 sizeof(caching_page_default)); 3695 memcpy(&lun->mode_pages.caching_page[ 3696 CTL_PAGE_CHANGEABLE], &caching_page_changeable, 3697 sizeof(caching_page_changeable)); 3698 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_SAVED], 3699 &caching_page_default, 3700 sizeof(caching_page_default)); 3701 caching_page = &lun->mode_pages.caching_page[ 3702 CTL_PAGE_SAVED]; 3703 value = ctl_get_opt(&lun->be_lun->options, "writecache"); 3704 if (value != NULL && strcmp(value, "off") == 0) 3705 caching_page->flags1 &= ~SCP_WCE; 3706 value = ctl_get_opt(&lun->be_lun->options, "readcache"); 3707 if (value != NULL && strcmp(value, "off") == 0) 3708 caching_page->flags1 |= SCP_RCD; 3709 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT], 3710 &lun->mode_pages.caching_page[CTL_PAGE_SAVED], 3711 sizeof(caching_page_default)); 3712 page_index->page_data = 3713 (uint8_t *)lun->mode_pages.caching_page; 3714 break; 3715 } 3716 case SMS_CONTROL_MODE_PAGE: { 3717 struct scsi_control_page *control_page; 3718 3719 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3720 panic("invalid subpage value %d", 3721 page_index->subpage); 3722 3723 memcpy(&lun->mode_pages.control_page[CTL_PAGE_DEFAULT], 3724 &control_page_default, 3725 sizeof(control_page_default)); 3726 memcpy(&lun->mode_pages.control_page[ 3727 CTL_PAGE_CHANGEABLE], &control_page_changeable, 3728 sizeof(control_page_changeable)); 3729 memcpy(&lun->mode_pages.control_page[CTL_PAGE_SAVED], 3730 &control_page_default, 3731 sizeof(control_page_default)); 3732 control_page = &lun->mode_pages.control_page[ 3733 CTL_PAGE_SAVED]; 3734 value = ctl_get_opt(&lun->be_lun->options, "reordering"); 3735 if (value != NULL && strcmp(value, "unrestricted") == 0) { 3736 control_page->queue_flags &= ~SCP_QUEUE_ALG_MASK; 3737 control_page->queue_flags |= SCP_QUEUE_ALG_UNRESTRICTED; 3738 } 3739 memcpy(&lun->mode_pages.control_page[CTL_PAGE_CURRENT], 3740 &lun->mode_pages.control_page[CTL_PAGE_SAVED], 3741 sizeof(control_page_default)); 3742 page_index->page_data = 3743 (uint8_t *)lun->mode_pages.control_page; 3744 break; 3745 3746 } 3747 case SMS_INFO_EXCEPTIONS_PAGE: { 3748 switch (page_index->subpage) { 3749 case SMS_SUBPAGE_PAGE_0: 3750 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_CURRENT], 3751 &ie_page_default, 3752 sizeof(ie_page_default)); 3753 memcpy(&lun->mode_pages.ie_page[ 3754 CTL_PAGE_CHANGEABLE], &ie_page_changeable, 3755 sizeof(ie_page_changeable)); 3756 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_DEFAULT], 3757 &ie_page_default, 3758 sizeof(ie_page_default)); 3759 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_SAVED], 3760 &ie_page_default, 3761 sizeof(ie_page_default)); 3762 page_index->page_data = 3763 (uint8_t *)lun->mode_pages.ie_page; 3764 break; 3765 case 0x02: { 3766 struct ctl_logical_block_provisioning_page *page; 3767 3768 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_DEFAULT], 3769 &lbp_page_default, 3770 sizeof(lbp_page_default)); 3771 memcpy(&lun->mode_pages.lbp_page[ 3772 CTL_PAGE_CHANGEABLE], &lbp_page_changeable, 3773 sizeof(lbp_page_changeable)); 3774 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 3775 &lbp_page_default, 3776 sizeof(lbp_page_default)); 3777 page = &lun->mode_pages.lbp_page[CTL_PAGE_SAVED]; 3778 value = ctl_get_opt(&lun->be_lun->options, 3779 "avail-threshold"); 3780 if (value != NULL && 3781 ctl_expand_number(value, &ival) == 0) { 3782 page->descr[0].flags |= SLBPPD_ENABLED | 3783 SLBPPD_ARMING_DEC; 3784 if (lun->be_lun->blocksize) 3785 ival /= lun->be_lun->blocksize; 3786 else 3787 ival /= 512; 3788 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 3789 page->descr[0].count); 3790 } 3791 value = ctl_get_opt(&lun->be_lun->options, 3792 "used-threshold"); 3793 if (value != NULL && 3794 ctl_expand_number(value, &ival) == 0) { 3795 page->descr[1].flags |= SLBPPD_ENABLED | 3796 SLBPPD_ARMING_INC; 3797 if (lun->be_lun->blocksize) 3798 ival /= lun->be_lun->blocksize; 3799 else 3800 ival /= 512; 3801 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 3802 page->descr[1].count); 3803 } 3804 value = ctl_get_opt(&lun->be_lun->options, 3805 "pool-avail-threshold"); 3806 if (value != NULL && 3807 ctl_expand_number(value, &ival) == 0) { 3808 page->descr[2].flags |= SLBPPD_ENABLED | 3809 SLBPPD_ARMING_DEC; 3810 if (lun->be_lun->blocksize) 3811 ival /= lun->be_lun->blocksize; 3812 else 3813 ival /= 512; 3814 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 3815 page->descr[2].count); 3816 } 3817 value = ctl_get_opt(&lun->be_lun->options, 3818 "pool-used-threshold"); 3819 if (value != NULL && 3820 ctl_expand_number(value, &ival) == 0) { 3821 page->descr[3].flags |= SLBPPD_ENABLED | 3822 SLBPPD_ARMING_INC; 3823 if (lun->be_lun->blocksize) 3824 ival /= lun->be_lun->blocksize; 3825 else 3826 ival /= 512; 3827 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 3828 page->descr[3].count); 3829 } 3830 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_CURRENT], 3831 &lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 3832 sizeof(lbp_page_default)); 3833 page_index->page_data = 3834 (uint8_t *)lun->mode_pages.lbp_page; 3835 }} 3836 break; 3837 } 3838 case SMS_VENDOR_SPECIFIC_PAGE:{ 3839 switch (page_index->subpage) { 3840 case DBGCNF_SUBPAGE_CODE: { 3841 struct copan_debugconf_subpage *current_page, 3842 *saved_page; 3843 3844 memcpy(&lun->mode_pages.debugconf_subpage[ 3845 CTL_PAGE_CURRENT], 3846 &debugconf_page_default, 3847 sizeof(debugconf_page_default)); 3848 memcpy(&lun->mode_pages.debugconf_subpage[ 3849 CTL_PAGE_CHANGEABLE], 3850 &debugconf_page_changeable, 3851 sizeof(debugconf_page_changeable)); 3852 memcpy(&lun->mode_pages.debugconf_subpage[ 3853 CTL_PAGE_DEFAULT], 3854 &debugconf_page_default, 3855 sizeof(debugconf_page_default)); 3856 memcpy(&lun->mode_pages.debugconf_subpage[ 3857 CTL_PAGE_SAVED], 3858 &debugconf_page_default, 3859 sizeof(debugconf_page_default)); 3860 page_index->page_data = 3861 (uint8_t *)lun->mode_pages.debugconf_subpage; 3862 3863 current_page = (struct copan_debugconf_subpage *) 3864 (page_index->page_data + 3865 (page_index->page_len * 3866 CTL_PAGE_CURRENT)); 3867 saved_page = (struct copan_debugconf_subpage *) 3868 (page_index->page_data + 3869 (page_index->page_len * 3870 CTL_PAGE_SAVED)); 3871 break; 3872 } 3873 default: 3874 panic("invalid subpage value %d", 3875 page_index->subpage); 3876 break; 3877 } 3878 break; 3879 } 3880 default: 3881 panic("invalid page value %d", 3882 page_index->page_code & SMPH_PC_MASK); 3883 break; 3884 } 3885 } 3886 3887 return (CTL_RETVAL_COMPLETE); 3888 } 3889 3890 static int 3891 ctl_init_log_page_index(struct ctl_lun *lun) 3892 { 3893 struct ctl_page_index *page_index; 3894 int i, j, k, prev; 3895 3896 memcpy(&lun->log_pages.index, log_page_index_template, 3897 sizeof(log_page_index_template)); 3898 3899 prev = -1; 3900 for (i = 0, j = 0, k = 0; i < CTL_NUM_LOG_PAGES; i++) { 3901 3902 page_index = &lun->log_pages.index[i]; 3903 /* 3904 * If this is a disk-only mode page, there's no point in 3905 * setting it up. For some pages, we have to have some 3906 * basic information about the disk in order to calculate the 3907 * mode page data. 3908 */ 3909 if ((lun->be_lun->lun_type != T_DIRECT) 3910 && (page_index->page_flags & CTL_PAGE_FLAG_DISK_ONLY)) 3911 continue; 3912 3913 if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING && 3914 lun->backend->lun_attr == NULL) 3915 continue; 3916 3917 if (page_index->page_code != prev) { 3918 lun->log_pages.pages_page[j] = page_index->page_code; 3919 prev = page_index->page_code; 3920 j++; 3921 } 3922 lun->log_pages.subpages_page[k*2] = page_index->page_code; 3923 lun->log_pages.subpages_page[k*2+1] = page_index->subpage; 3924 k++; 3925 } 3926 lun->log_pages.index[0].page_data = &lun->log_pages.pages_page[0]; 3927 lun->log_pages.index[0].page_len = j; 3928 lun->log_pages.index[1].page_data = &lun->log_pages.subpages_page[0]; 3929 lun->log_pages.index[1].page_len = k * 2; 3930 lun->log_pages.index[2].page_data = &lun->log_pages.lbp_page[0]; 3931 lun->log_pages.index[2].page_len = 12*CTL_NUM_LBP_PARAMS; 3932 lun->log_pages.index[3].page_data = (uint8_t *)&lun->log_pages.stat_page; 3933 lun->log_pages.index[3].page_len = sizeof(lun->log_pages.stat_page); 3934 3935 return (CTL_RETVAL_COMPLETE); 3936 } 3937 3938 static int 3939 hex2bin(const char *str, uint8_t *buf, int buf_size) 3940 { 3941 int i; 3942 u_char c; 3943 3944 memset(buf, 0, buf_size); 3945 while (isspace(str[0])) 3946 str++; 3947 if (str[0] == '0' && (str[1] == 'x' || str[1] == 'X')) 3948 str += 2; 3949 buf_size *= 2; 3950 for (i = 0; str[i] != 0 && i < buf_size; i++) { 3951 c = str[i]; 3952 if (isdigit(c)) 3953 c -= '0'; 3954 else if (isalpha(c)) 3955 c -= isupper(c) ? 'A' - 10 : 'a' - 10; 3956 else 3957 break; 3958 if (c >= 16) 3959 break; 3960 if ((i & 1) == 0) 3961 buf[i / 2] |= (c << 4); 3962 else 3963 buf[i / 2] |= c; 3964 } 3965 return ((i + 1) / 2); 3966 } 3967 3968 /* 3969 * LUN allocation. 3970 * 3971 * Requirements: 3972 * - caller allocates and zeros LUN storage, or passes in a NULL LUN if he 3973 * wants us to allocate the LUN and he can block. 3974 * - ctl_softc is always set 3975 * - be_lun is set if the LUN has a backend (needed for disk LUNs) 3976 * 3977 * Returns 0 for success, non-zero (errno) for failure. 3978 */ 3979 static int 3980 ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun, 3981 struct ctl_be_lun *const be_lun) 3982 { 3983 struct ctl_lun *nlun, *lun; 3984 struct scsi_vpd_id_descriptor *desc; 3985 struct scsi_vpd_id_t10 *t10id; 3986 const char *eui, *naa, *scsiname, *vendor, *value; 3987 int lun_number, i, lun_malloced; 3988 int devidlen, idlen1, idlen2 = 0, len; 3989 3990 if (be_lun == NULL) 3991 return (EINVAL); 3992 3993 /* 3994 * We currently only support Direct Access or Processor LUN types. 3995 */ 3996 switch (be_lun->lun_type) { 3997 case T_DIRECT: 3998 break; 3999 case T_PROCESSOR: 4000 break; 4001 case T_SEQUENTIAL: 4002 case T_CHANGER: 4003 default: 4004 be_lun->lun_config_status(be_lun->be_lun, 4005 CTL_LUN_CONFIG_FAILURE); 4006 break; 4007 } 4008 if (ctl_lun == NULL) { 4009 lun = malloc(sizeof(*lun), M_CTL, M_WAITOK); 4010 lun_malloced = 1; 4011 } else { 4012 lun_malloced = 0; 4013 lun = ctl_lun; 4014 } 4015 4016 memset(lun, 0, sizeof(*lun)); 4017 if (lun_malloced) 4018 lun->flags = CTL_LUN_MALLOCED; 4019 4020 /* Generate LUN ID. */ 4021 devidlen = max(CTL_DEVID_MIN_LEN, 4022 strnlen(be_lun->device_id, CTL_DEVID_LEN)); 4023 idlen1 = sizeof(*t10id) + devidlen; 4024 len = sizeof(struct scsi_vpd_id_descriptor) + idlen1; 4025 scsiname = ctl_get_opt(&be_lun->options, "scsiname"); 4026 if (scsiname != NULL) { 4027 idlen2 = roundup2(strlen(scsiname) + 1, 4); 4028 len += sizeof(struct scsi_vpd_id_descriptor) + idlen2; 4029 } 4030 eui = ctl_get_opt(&be_lun->options, "eui"); 4031 if (eui != NULL) { 4032 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4033 } 4034 naa = ctl_get_opt(&be_lun->options, "naa"); 4035 if (naa != NULL) { 4036 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4037 } 4038 lun->lun_devid = malloc(sizeof(struct ctl_devid) + len, 4039 M_CTL, M_WAITOK | M_ZERO); 4040 desc = (struct scsi_vpd_id_descriptor *)lun->lun_devid->data; 4041 desc->proto_codeset = SVPD_ID_CODESET_ASCII; 4042 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10; 4043 desc->length = idlen1; 4044 t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0]; 4045 memset(t10id->vendor, ' ', sizeof(t10id->vendor)); 4046 if ((vendor = ctl_get_opt(&be_lun->options, "vendor")) == NULL) { 4047 strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor)); 4048 } else { 4049 strncpy(t10id->vendor, vendor, 4050 min(sizeof(t10id->vendor), strlen(vendor))); 4051 } 4052 strncpy((char *)t10id->vendor_spec_id, 4053 (char *)be_lun->device_id, devidlen); 4054 if (scsiname != NULL) { 4055 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4056 desc->length); 4057 desc->proto_codeset = SVPD_ID_CODESET_UTF8; 4058 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4059 SVPD_ID_TYPE_SCSI_NAME; 4060 desc->length = idlen2; 4061 strlcpy(desc->identifier, scsiname, idlen2); 4062 } 4063 if (eui != NULL) { 4064 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4065 desc->length); 4066 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4067 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4068 SVPD_ID_TYPE_EUI64; 4069 desc->length = hex2bin(eui, desc->identifier, 16); 4070 desc->length = desc->length > 12 ? 16 : 4071 (desc->length > 8 ? 12 : 8); 4072 len -= 16 - desc->length; 4073 } 4074 if (naa != NULL) { 4075 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4076 desc->length); 4077 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4078 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4079 SVPD_ID_TYPE_NAA; 4080 desc->length = hex2bin(naa, desc->identifier, 16); 4081 desc->length = desc->length > 8 ? 16 : 8; 4082 len -= 16 - desc->length; 4083 } 4084 lun->lun_devid->len = len; 4085 4086 mtx_lock(&ctl_softc->ctl_lock); 4087 /* 4088 * See if the caller requested a particular LUN number. If so, see 4089 * if it is available. Otherwise, allocate the first available LUN. 4090 */ 4091 if (be_lun->flags & CTL_LUN_FLAG_ID_REQ) { 4092 if ((be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) 4093 || (ctl_is_set(ctl_softc->ctl_lun_mask, be_lun->req_lun_id))) { 4094 mtx_unlock(&ctl_softc->ctl_lock); 4095 if (be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) { 4096 printf("ctl: requested LUN ID %d is higher " 4097 "than CTL_MAX_LUNS - 1 (%d)\n", 4098 be_lun->req_lun_id, CTL_MAX_LUNS - 1); 4099 } else { 4100 /* 4101 * XXX KDM return an error, or just assign 4102 * another LUN ID in this case?? 4103 */ 4104 printf("ctl: requested LUN ID %d is already " 4105 "in use\n", be_lun->req_lun_id); 4106 } 4107 if (lun->flags & CTL_LUN_MALLOCED) 4108 free(lun, M_CTL); 4109 be_lun->lun_config_status(be_lun->be_lun, 4110 CTL_LUN_CONFIG_FAILURE); 4111 return (ENOSPC); 4112 } 4113 lun_number = be_lun->req_lun_id; 4114 } else { 4115 lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, CTL_MAX_LUNS); 4116 if (lun_number == -1) { 4117 mtx_unlock(&ctl_softc->ctl_lock); 4118 printf("ctl: can't allocate LUN, out of LUNs\n"); 4119 if (lun->flags & CTL_LUN_MALLOCED) 4120 free(lun, M_CTL); 4121 be_lun->lun_config_status(be_lun->be_lun, 4122 CTL_LUN_CONFIG_FAILURE); 4123 return (ENOSPC); 4124 } 4125 } 4126 ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number); 4127 4128 mtx_init(&lun->lun_lock, "CTL LUN", NULL, MTX_DEF); 4129 lun->lun = lun_number; 4130 lun->be_lun = be_lun; 4131 /* 4132 * The processor LUN is always enabled. Disk LUNs come on line 4133 * disabled, and must be enabled by the backend. 4134 */ 4135 lun->flags |= CTL_LUN_DISABLED; 4136 lun->backend = be_lun->be; 4137 be_lun->ctl_lun = lun; 4138 be_lun->lun_id = lun_number; 4139 atomic_add_int(&be_lun->be->num_luns, 1); 4140 if (be_lun->flags & CTL_LUN_FLAG_OFFLINE) 4141 lun->flags |= CTL_LUN_OFFLINE; 4142 4143 if (be_lun->flags & CTL_LUN_FLAG_POWERED_OFF) 4144 lun->flags |= CTL_LUN_STOPPED; 4145 4146 if (be_lun->flags & CTL_LUN_FLAG_INOPERABLE) 4147 lun->flags |= CTL_LUN_INOPERABLE; 4148 4149 if (be_lun->flags & CTL_LUN_FLAG_PRIMARY) 4150 lun->flags |= CTL_LUN_PRIMARY_SC; 4151 4152 value = ctl_get_opt(&be_lun->options, "readonly"); 4153 if (value != NULL && strcmp(value, "on") == 0) 4154 lun->flags |= CTL_LUN_READONLY; 4155 4156 lun->serseq = CTL_LUN_SERSEQ_OFF; 4157 if (be_lun->flags & CTL_LUN_FLAG_SERSEQ_READ) 4158 lun->serseq = CTL_LUN_SERSEQ_READ; 4159 value = ctl_get_opt(&be_lun->options, "serseq"); 4160 if (value != NULL && strcmp(value, "on") == 0) 4161 lun->serseq = CTL_LUN_SERSEQ_ON; 4162 else if (value != NULL && strcmp(value, "read") == 0) 4163 lun->serseq = CTL_LUN_SERSEQ_READ; 4164 else if (value != NULL && strcmp(value, "off") == 0) 4165 lun->serseq = CTL_LUN_SERSEQ_OFF; 4166 4167 lun->ctl_softc = ctl_softc; 4168 #ifdef CTL_TIME_IO 4169 lun->last_busy = getsbinuptime(); 4170 #endif 4171 TAILQ_INIT(&lun->ooa_queue); 4172 TAILQ_INIT(&lun->blocked_queue); 4173 STAILQ_INIT(&lun->error_list); 4174 ctl_tpc_lun_init(lun); 4175 4176 /* 4177 * Initialize the mode and log page index. 4178 */ 4179 ctl_init_page_index(lun); 4180 ctl_init_log_page_index(lun); 4181 4182 /* 4183 * Now, before we insert this lun on the lun list, set the lun 4184 * inventory changed UA for all other luns. 4185 */ 4186 STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) { 4187 mtx_lock(&nlun->lun_lock); 4188 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4189 mtx_unlock(&nlun->lun_lock); 4190 } 4191 4192 STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links); 4193 4194 ctl_softc->ctl_luns[lun_number] = lun; 4195 4196 ctl_softc->num_luns++; 4197 4198 /* Setup statistics gathering */ 4199 lun->stats.device_type = be_lun->lun_type; 4200 lun->stats.lun_number = lun_number; 4201 if (lun->stats.device_type == T_DIRECT) 4202 lun->stats.blocksize = be_lun->blocksize; 4203 else 4204 lun->stats.flags = CTL_LUN_STATS_NO_BLOCKSIZE; 4205 for (i = 0;i < CTL_MAX_PORTS;i++) 4206 lun->stats.ports[i].targ_port = i; 4207 4208 mtx_unlock(&ctl_softc->ctl_lock); 4209 4210 lun->be_lun->lun_config_status(lun->be_lun->be_lun, CTL_LUN_CONFIG_OK); 4211 return (0); 4212 } 4213 4214 /* 4215 * Delete a LUN. 4216 * Assumptions: 4217 * - LUN has already been marked invalid and any pending I/O has been taken 4218 * care of. 4219 */ 4220 static int 4221 ctl_free_lun(struct ctl_lun *lun) 4222 { 4223 struct ctl_softc *softc; 4224 struct ctl_lun *nlun; 4225 int i; 4226 4227 softc = lun->ctl_softc; 4228 4229 mtx_assert(&softc->ctl_lock, MA_OWNED); 4230 4231 STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links); 4232 4233 ctl_clear_mask(softc->ctl_lun_mask, lun->lun); 4234 4235 softc->ctl_luns[lun->lun] = NULL; 4236 4237 if (!TAILQ_EMPTY(&lun->ooa_queue)) 4238 panic("Freeing a LUN %p with outstanding I/O!!\n", lun); 4239 4240 softc->num_luns--; 4241 4242 /* 4243 * Tell the backend to free resources, if this LUN has a backend. 4244 */ 4245 atomic_subtract_int(&lun->be_lun->be->num_luns, 1); 4246 lun->be_lun->lun_shutdown(lun->be_lun->be_lun); 4247 4248 ctl_tpc_lun_shutdown(lun); 4249 mtx_destroy(&lun->lun_lock); 4250 free(lun->lun_devid, M_CTL); 4251 for (i = 0; i < CTL_MAX_PORTS; i++) 4252 free(lun->pending_ua[i], M_CTL); 4253 for (i = 0; i < 2 * CTL_MAX_PORTS; i++) 4254 free(lun->pr_keys[i], M_CTL); 4255 free(lun->write_buffer, M_CTL); 4256 if (lun->flags & CTL_LUN_MALLOCED) 4257 free(lun, M_CTL); 4258 4259 STAILQ_FOREACH(nlun, &softc->lun_list, links) { 4260 mtx_lock(&nlun->lun_lock); 4261 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4262 mtx_unlock(&nlun->lun_lock); 4263 } 4264 4265 return (0); 4266 } 4267 4268 static void 4269 ctl_create_lun(struct ctl_be_lun *be_lun) 4270 { 4271 struct ctl_softc *softc; 4272 4273 softc = control_softc; 4274 4275 /* 4276 * ctl_alloc_lun() should handle all potential failure cases. 4277 */ 4278 ctl_alloc_lun(softc, NULL, be_lun); 4279 } 4280 4281 int 4282 ctl_add_lun(struct ctl_be_lun *be_lun) 4283 { 4284 struct ctl_softc *softc = control_softc; 4285 4286 mtx_lock(&softc->ctl_lock); 4287 STAILQ_INSERT_TAIL(&softc->pending_lun_queue, be_lun, links); 4288 mtx_unlock(&softc->ctl_lock); 4289 wakeup(&softc->pending_lun_queue); 4290 4291 return (0); 4292 } 4293 4294 int 4295 ctl_enable_lun(struct ctl_be_lun *be_lun) 4296 { 4297 struct ctl_softc *softc; 4298 struct ctl_port *port, *nport; 4299 struct ctl_lun *lun; 4300 int retval; 4301 4302 lun = (struct ctl_lun *)be_lun->ctl_lun; 4303 softc = lun->ctl_softc; 4304 4305 mtx_lock(&softc->ctl_lock); 4306 mtx_lock(&lun->lun_lock); 4307 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 4308 /* 4309 * eh? Why did we get called if the LUN is already 4310 * enabled? 4311 */ 4312 mtx_unlock(&lun->lun_lock); 4313 mtx_unlock(&softc->ctl_lock); 4314 return (0); 4315 } 4316 lun->flags &= ~CTL_LUN_DISABLED; 4317 mtx_unlock(&lun->lun_lock); 4318 4319 for (port = STAILQ_FIRST(&softc->port_list); port != NULL; port = nport) { 4320 nport = STAILQ_NEXT(port, links); 4321 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || 4322 port->lun_map != NULL) 4323 continue; 4324 4325 /* 4326 * Drop the lock while we call the FETD's enable routine. 4327 * This can lead to a callback into CTL (at least in the 4328 * case of the internal initiator frontend. 4329 */ 4330 mtx_unlock(&softc->ctl_lock); 4331 retval = port->lun_enable(port->targ_lun_arg, lun->lun); 4332 mtx_lock(&softc->ctl_lock); 4333 if (retval != 0) { 4334 printf("%s: FETD %s port %d returned error " 4335 "%d for lun_enable on lun %jd\n", 4336 __func__, port->port_name, port->targ_port, 4337 retval, (intmax_t)lun->lun); 4338 } 4339 } 4340 4341 mtx_unlock(&softc->ctl_lock); 4342 4343 return (0); 4344 } 4345 4346 int 4347 ctl_disable_lun(struct ctl_be_lun *be_lun) 4348 { 4349 struct ctl_softc *softc; 4350 struct ctl_port *port; 4351 struct ctl_lun *lun; 4352 int retval; 4353 4354 lun = (struct ctl_lun *)be_lun->ctl_lun; 4355 softc = lun->ctl_softc; 4356 4357 mtx_lock(&softc->ctl_lock); 4358 mtx_lock(&lun->lun_lock); 4359 if (lun->flags & CTL_LUN_DISABLED) { 4360 mtx_unlock(&lun->lun_lock); 4361 mtx_unlock(&softc->ctl_lock); 4362 return (0); 4363 } 4364 lun->flags |= CTL_LUN_DISABLED; 4365 mtx_unlock(&lun->lun_lock); 4366 4367 STAILQ_FOREACH(port, &softc->port_list, links) { 4368 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || 4369 port->lun_map != NULL) 4370 continue; 4371 mtx_unlock(&softc->ctl_lock); 4372 /* 4373 * Drop the lock before we call the frontend's disable 4374 * routine, to avoid lock order reversals. 4375 * 4376 * XXX KDM what happens if the frontend list changes while 4377 * we're traversing it? It's unlikely, but should be handled. 4378 */ 4379 retval = port->lun_disable(port->targ_lun_arg, lun->lun); 4380 mtx_lock(&softc->ctl_lock); 4381 if (retval != 0) { 4382 printf("%s: FETD %s port %d returned error " 4383 "%d for lun_disable on lun %jd\n", 4384 __func__, port->port_name, port->targ_port, 4385 retval, (intmax_t)lun->lun); 4386 } 4387 } 4388 4389 mtx_unlock(&softc->ctl_lock); 4390 4391 return (0); 4392 } 4393 4394 int 4395 ctl_start_lun(struct ctl_be_lun *be_lun) 4396 { 4397 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4398 4399 mtx_lock(&lun->lun_lock); 4400 lun->flags &= ~CTL_LUN_STOPPED; 4401 mtx_unlock(&lun->lun_lock); 4402 return (0); 4403 } 4404 4405 int 4406 ctl_stop_lun(struct ctl_be_lun *be_lun) 4407 { 4408 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4409 4410 mtx_lock(&lun->lun_lock); 4411 lun->flags |= CTL_LUN_STOPPED; 4412 mtx_unlock(&lun->lun_lock); 4413 return (0); 4414 } 4415 4416 int 4417 ctl_lun_offline(struct ctl_be_lun *be_lun) 4418 { 4419 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4420 4421 mtx_lock(&lun->lun_lock); 4422 lun->flags |= CTL_LUN_OFFLINE; 4423 mtx_unlock(&lun->lun_lock); 4424 return (0); 4425 } 4426 4427 int 4428 ctl_lun_online(struct ctl_be_lun *be_lun) 4429 { 4430 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4431 4432 mtx_lock(&lun->lun_lock); 4433 lun->flags &= ~CTL_LUN_OFFLINE; 4434 mtx_unlock(&lun->lun_lock); 4435 return (0); 4436 } 4437 4438 int 4439 ctl_invalidate_lun(struct ctl_be_lun *be_lun) 4440 { 4441 struct ctl_softc *softc; 4442 struct ctl_lun *lun; 4443 4444 lun = (struct ctl_lun *)be_lun->ctl_lun; 4445 softc = lun->ctl_softc; 4446 4447 mtx_lock(&lun->lun_lock); 4448 4449 /* 4450 * The LUN needs to be disabled before it can be marked invalid. 4451 */ 4452 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 4453 mtx_unlock(&lun->lun_lock); 4454 return (-1); 4455 } 4456 /* 4457 * Mark the LUN invalid. 4458 */ 4459 lun->flags |= CTL_LUN_INVALID; 4460 4461 /* 4462 * If there is nothing in the OOA queue, go ahead and free the LUN. 4463 * If we have something in the OOA queue, we'll free it when the 4464 * last I/O completes. 4465 */ 4466 if (TAILQ_EMPTY(&lun->ooa_queue)) { 4467 mtx_unlock(&lun->lun_lock); 4468 mtx_lock(&softc->ctl_lock); 4469 ctl_free_lun(lun); 4470 mtx_unlock(&softc->ctl_lock); 4471 } else 4472 mtx_unlock(&lun->lun_lock); 4473 4474 return (0); 4475 } 4476 4477 int 4478 ctl_lun_inoperable(struct ctl_be_lun *be_lun) 4479 { 4480 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4481 4482 mtx_lock(&lun->lun_lock); 4483 lun->flags |= CTL_LUN_INOPERABLE; 4484 mtx_unlock(&lun->lun_lock); 4485 return (0); 4486 } 4487 4488 int 4489 ctl_lun_operable(struct ctl_be_lun *be_lun) 4490 { 4491 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4492 4493 mtx_lock(&lun->lun_lock); 4494 lun->flags &= ~CTL_LUN_INOPERABLE; 4495 mtx_unlock(&lun->lun_lock); 4496 return (0); 4497 } 4498 4499 void 4500 ctl_lun_capacity_changed(struct ctl_be_lun *be_lun) 4501 { 4502 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4503 4504 mtx_lock(&lun->lun_lock); 4505 ctl_est_ua_all(lun, -1, CTL_UA_CAPACITY_CHANGED); 4506 mtx_unlock(&lun->lun_lock); 4507 } 4508 4509 /* 4510 * Backend "memory move is complete" callback for requests that never 4511 * make it down to say RAIDCore's configuration code. 4512 */ 4513 int 4514 ctl_config_move_done(union ctl_io *io) 4515 { 4516 int retval; 4517 4518 CTL_DEBUG_PRINT(("ctl_config_move_done\n")); 4519 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, 4520 ("Config I/O type isn't CTL_IO_SCSI (%d)!", io->io_hdr.io_type)); 4521 4522 if ((io->io_hdr.port_status != 0) && 4523 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 4524 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 4525 /* 4526 * For hardware error sense keys, the sense key 4527 * specific value is defined to be a retry count, 4528 * but we use it to pass back an internal FETD 4529 * error code. XXX KDM Hopefully the FETD is only 4530 * using 16 bits for an error code, since that's 4531 * all the space we have in the sks field. 4532 */ 4533 ctl_set_internal_failure(&io->scsiio, 4534 /*sks_valid*/ 1, 4535 /*retry_count*/ 4536 io->io_hdr.port_status); 4537 } 4538 4539 if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) || 4540 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 4541 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) || 4542 ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) { 4543 /* 4544 * XXX KDM just assuming a single pointer here, and not a 4545 * S/G list. If we start using S/G lists for config data, 4546 * we'll need to know how to clean them up here as well. 4547 */ 4548 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 4549 free(io->scsiio.kern_data_ptr, M_CTL); 4550 ctl_done(io); 4551 retval = CTL_RETVAL_COMPLETE; 4552 } else { 4553 /* 4554 * XXX KDM now we need to continue data movement. Some 4555 * options: 4556 * - call ctl_scsiio() again? We don't do this for data 4557 * writes, because for those at least we know ahead of 4558 * time where the write will go and how long it is. For 4559 * config writes, though, that information is largely 4560 * contained within the write itself, thus we need to 4561 * parse out the data again. 4562 * 4563 * - Call some other function once the data is in? 4564 */ 4565 if (ctl_debug & CTL_DEBUG_CDB_DATA) 4566 ctl_data_print(io); 4567 4568 /* 4569 * XXX KDM call ctl_scsiio() again for now, and check flag 4570 * bits to see whether we're allocated or not. 4571 */ 4572 retval = ctl_scsiio(&io->scsiio); 4573 } 4574 return (retval); 4575 } 4576 4577 /* 4578 * This gets called by a backend driver when it is done with a 4579 * data_submit method. 4580 */ 4581 void 4582 ctl_data_submit_done(union ctl_io *io) 4583 { 4584 /* 4585 * If the IO_CONT flag is set, we need to call the supplied 4586 * function to continue processing the I/O, instead of completing 4587 * the I/O just yet. 4588 * 4589 * If there is an error, though, we don't want to keep processing. 4590 * Instead, just send status back to the initiator. 4591 */ 4592 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 4593 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 4594 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 4595 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 4596 io->scsiio.io_cont(io); 4597 return; 4598 } 4599 ctl_done(io); 4600 } 4601 4602 /* 4603 * This gets called by a backend driver when it is done with a 4604 * configuration write. 4605 */ 4606 void 4607 ctl_config_write_done(union ctl_io *io) 4608 { 4609 uint8_t *buf; 4610 4611 /* 4612 * If the IO_CONT flag is set, we need to call the supplied 4613 * function to continue processing the I/O, instead of completing 4614 * the I/O just yet. 4615 * 4616 * If there is an error, though, we don't want to keep processing. 4617 * Instead, just send status back to the initiator. 4618 */ 4619 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 4620 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 4621 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 4622 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 4623 io->scsiio.io_cont(io); 4624 return; 4625 } 4626 /* 4627 * Since a configuration write can be done for commands that actually 4628 * have data allocated, like write buffer, and commands that have 4629 * no data, like start/stop unit, we need to check here. 4630 */ 4631 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 4632 buf = io->scsiio.kern_data_ptr; 4633 else 4634 buf = NULL; 4635 ctl_done(io); 4636 if (buf) 4637 free(buf, M_CTL); 4638 } 4639 4640 void 4641 ctl_config_read_done(union ctl_io *io) 4642 { 4643 uint8_t *buf; 4644 4645 /* 4646 * If there is some error -- we are done, skip data transfer. 4647 */ 4648 if ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0 || 4649 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 4650 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { 4651 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 4652 buf = io->scsiio.kern_data_ptr; 4653 else 4654 buf = NULL; 4655 ctl_done(io); 4656 if (buf) 4657 free(buf, M_CTL); 4658 return; 4659 } 4660 4661 /* 4662 * If the IO_CONT flag is set, we need to call the supplied 4663 * function to continue processing the I/O, instead of completing 4664 * the I/O just yet. 4665 */ 4666 if (io->io_hdr.flags & CTL_FLAG_IO_CONT) { 4667 io->scsiio.io_cont(io); 4668 return; 4669 } 4670 4671 ctl_datamove(io); 4672 } 4673 4674 /* 4675 * SCSI release command. 4676 */ 4677 int 4678 ctl_scsi_release(struct ctl_scsiio *ctsio) 4679 { 4680 int length, longid, thirdparty_id, resv_id; 4681 struct ctl_lun *lun; 4682 uint32_t residx; 4683 4684 length = 0; 4685 resv_id = 0; 4686 4687 CTL_DEBUG_PRINT(("ctl_scsi_release\n")); 4688 4689 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 4690 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 4691 4692 switch (ctsio->cdb[0]) { 4693 case RELEASE_10: { 4694 struct scsi_release_10 *cdb; 4695 4696 cdb = (struct scsi_release_10 *)ctsio->cdb; 4697 4698 if (cdb->byte2 & SR10_LONGID) 4699 longid = 1; 4700 else 4701 thirdparty_id = cdb->thirdparty_id; 4702 4703 resv_id = cdb->resv_id; 4704 length = scsi_2btoul(cdb->length); 4705 break; 4706 } 4707 } 4708 4709 4710 /* 4711 * XXX KDM right now, we only support LUN reservation. We don't 4712 * support 3rd party reservations, or extent reservations, which 4713 * might actually need the parameter list. If we've gotten this 4714 * far, we've got a LUN reservation. Anything else got kicked out 4715 * above. So, according to SPC, ignore the length. 4716 */ 4717 length = 0; 4718 4719 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 4720 && (length > 0)) { 4721 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 4722 ctsio->kern_data_len = length; 4723 ctsio->kern_total_len = length; 4724 ctsio->kern_data_resid = 0; 4725 ctsio->kern_rel_offset = 0; 4726 ctsio->kern_sg_entries = 0; 4727 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 4728 ctsio->be_move_done = ctl_config_move_done; 4729 ctl_datamove((union ctl_io *)ctsio); 4730 4731 return (CTL_RETVAL_COMPLETE); 4732 } 4733 4734 if (length > 0) 4735 thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr); 4736 4737 mtx_lock(&lun->lun_lock); 4738 4739 /* 4740 * According to SPC, it is not an error for an intiator to attempt 4741 * to release a reservation on a LUN that isn't reserved, or that 4742 * is reserved by another initiator. The reservation can only be 4743 * released, though, by the initiator who made it or by one of 4744 * several reset type events. 4745 */ 4746 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx)) 4747 lun->flags &= ~CTL_LUN_RESERVED; 4748 4749 mtx_unlock(&lun->lun_lock); 4750 4751 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 4752 free(ctsio->kern_data_ptr, M_CTL); 4753 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 4754 } 4755 4756 ctl_set_success(ctsio); 4757 ctl_done((union ctl_io *)ctsio); 4758 return (CTL_RETVAL_COMPLETE); 4759 } 4760 4761 int 4762 ctl_scsi_reserve(struct ctl_scsiio *ctsio) 4763 { 4764 int extent, thirdparty, longid; 4765 int resv_id, length; 4766 uint64_t thirdparty_id; 4767 struct ctl_lun *lun; 4768 uint32_t residx; 4769 4770 extent = 0; 4771 thirdparty = 0; 4772 longid = 0; 4773 resv_id = 0; 4774 length = 0; 4775 thirdparty_id = 0; 4776 4777 CTL_DEBUG_PRINT(("ctl_reserve\n")); 4778 4779 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 4780 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 4781 4782 switch (ctsio->cdb[0]) { 4783 case RESERVE_10: { 4784 struct scsi_reserve_10 *cdb; 4785 4786 cdb = (struct scsi_reserve_10 *)ctsio->cdb; 4787 4788 if (cdb->byte2 & SR10_LONGID) 4789 longid = 1; 4790 else 4791 thirdparty_id = cdb->thirdparty_id; 4792 4793 resv_id = cdb->resv_id; 4794 length = scsi_2btoul(cdb->length); 4795 break; 4796 } 4797 } 4798 4799 /* 4800 * XXX KDM right now, we only support LUN reservation. We don't 4801 * support 3rd party reservations, or extent reservations, which 4802 * might actually need the parameter list. If we've gotten this 4803 * far, we've got a LUN reservation. Anything else got kicked out 4804 * above. So, according to SPC, ignore the length. 4805 */ 4806 length = 0; 4807 4808 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 4809 && (length > 0)) { 4810 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 4811 ctsio->kern_data_len = length; 4812 ctsio->kern_total_len = length; 4813 ctsio->kern_data_resid = 0; 4814 ctsio->kern_rel_offset = 0; 4815 ctsio->kern_sg_entries = 0; 4816 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 4817 ctsio->be_move_done = ctl_config_move_done; 4818 ctl_datamove((union ctl_io *)ctsio); 4819 4820 return (CTL_RETVAL_COMPLETE); 4821 } 4822 4823 if (length > 0) 4824 thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr); 4825 4826 mtx_lock(&lun->lun_lock); 4827 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx != residx)) { 4828 ctl_set_reservation_conflict(ctsio); 4829 goto bailout; 4830 } 4831 4832 lun->flags |= CTL_LUN_RESERVED; 4833 lun->res_idx = residx; 4834 4835 ctl_set_success(ctsio); 4836 4837 bailout: 4838 mtx_unlock(&lun->lun_lock); 4839 4840 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 4841 free(ctsio->kern_data_ptr, M_CTL); 4842 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 4843 } 4844 4845 ctl_done((union ctl_io *)ctsio); 4846 return (CTL_RETVAL_COMPLETE); 4847 } 4848 4849 int 4850 ctl_start_stop(struct ctl_scsiio *ctsio) 4851 { 4852 struct scsi_start_stop_unit *cdb; 4853 struct ctl_lun *lun; 4854 int retval; 4855 4856 CTL_DEBUG_PRINT(("ctl_start_stop\n")); 4857 4858 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 4859 retval = 0; 4860 4861 cdb = (struct scsi_start_stop_unit *)ctsio->cdb; 4862 4863 /* 4864 * XXX KDM 4865 * We don't support the immediate bit on a stop unit. In order to 4866 * do that, we would need to code up a way to know that a stop is 4867 * pending, and hold off any new commands until it completes, one 4868 * way or another. Then we could accept or reject those commands 4869 * depending on its status. We would almost need to do the reverse 4870 * of what we do below for an immediate start -- return the copy of 4871 * the ctl_io to the FETD with status to send to the host (and to 4872 * free the copy!) and then free the original I/O once the stop 4873 * actually completes. That way, the OOA queue mechanism can work 4874 * to block commands that shouldn't proceed. Another alternative 4875 * would be to put the copy in the queue in place of the original, 4876 * and return the original back to the caller. That could be 4877 * slightly safer.. 4878 */ 4879 if ((cdb->byte2 & SSS_IMMED) 4880 && ((cdb->how & SSS_START) == 0)) { 4881 ctl_set_invalid_field(ctsio, 4882 /*sks_valid*/ 1, 4883 /*command*/ 1, 4884 /*field*/ 1, 4885 /*bit_valid*/ 1, 4886 /*bit*/ 0); 4887 ctl_done((union ctl_io *)ctsio); 4888 return (CTL_RETVAL_COMPLETE); 4889 } 4890 4891 if ((lun->flags & CTL_LUN_PR_RESERVED) 4892 && ((cdb->how & SSS_START)==0)) { 4893 uint32_t residx; 4894 4895 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 4896 if (ctl_get_prkey(lun, residx) == 0 4897 || (lun->pr_res_idx!=residx && lun->res_type < 4)) { 4898 4899 ctl_set_reservation_conflict(ctsio); 4900 ctl_done((union ctl_io *)ctsio); 4901 return (CTL_RETVAL_COMPLETE); 4902 } 4903 } 4904 4905 /* 4906 * If there is no backend on this device, we can't start or stop 4907 * it. In theory we shouldn't get any start/stop commands in the 4908 * first place at this level if the LUN doesn't have a backend. 4909 * That should get stopped by the command decode code. 4910 */ 4911 if (lun->backend == NULL) { 4912 ctl_set_invalid_opcode(ctsio); 4913 ctl_done((union ctl_io *)ctsio); 4914 return (CTL_RETVAL_COMPLETE); 4915 } 4916 4917 /* 4918 * XXX KDM Copan-specific offline behavior. 4919 * Figure out a reasonable way to port this? 4920 */ 4921 #ifdef NEEDTOPORT 4922 mtx_lock(&lun->lun_lock); 4923 4924 if (((cdb->byte2 & SSS_ONOFFLINE) == 0) 4925 && (lun->flags & CTL_LUN_OFFLINE)) { 4926 /* 4927 * If the LUN is offline, and the on/offline bit isn't set, 4928 * reject the start or stop. Otherwise, let it through. 4929 */ 4930 mtx_unlock(&lun->lun_lock); 4931 ctl_set_lun_not_ready(ctsio); 4932 ctl_done((union ctl_io *)ctsio); 4933 } else { 4934 mtx_unlock(&lun->lun_lock); 4935 #endif /* NEEDTOPORT */ 4936 /* 4937 * This could be a start or a stop when we're online, 4938 * or a stop/offline or start/online. A start or stop when 4939 * we're offline is covered in the case above. 4940 */ 4941 /* 4942 * In the non-immediate case, we send the request to 4943 * the backend and return status to the user when 4944 * it is done. 4945 * 4946 * In the immediate case, we allocate a new ctl_io 4947 * to hold a copy of the request, and send that to 4948 * the backend. We then set good status on the 4949 * user's request and return it immediately. 4950 */ 4951 if (cdb->byte2 & SSS_IMMED) { 4952 union ctl_io *new_io; 4953 4954 new_io = ctl_alloc_io(ctsio->io_hdr.pool); 4955 ctl_copy_io((union ctl_io *)ctsio, new_io); 4956 retval = lun->backend->config_write(new_io); 4957 ctl_set_success(ctsio); 4958 ctl_done((union ctl_io *)ctsio); 4959 } else { 4960 retval = lun->backend->config_write( 4961 (union ctl_io *)ctsio); 4962 } 4963 #ifdef NEEDTOPORT 4964 } 4965 #endif 4966 return (retval); 4967 } 4968 4969 /* 4970 * We support the SYNCHRONIZE CACHE command (10 and 16 byte versions), but 4971 * we don't really do anything with the LBA and length fields if the user 4972 * passes them in. Instead we'll just flush out the cache for the entire 4973 * LUN. 4974 */ 4975 int 4976 ctl_sync_cache(struct ctl_scsiio *ctsio) 4977 { 4978 struct ctl_lun *lun; 4979 struct ctl_softc *softc; 4980 struct ctl_lba_len_flags *lbalen; 4981 uint64_t starting_lba; 4982 uint32_t block_count; 4983 int retval; 4984 uint8_t byte2; 4985 4986 CTL_DEBUG_PRINT(("ctl_sync_cache\n")); 4987 4988 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 4989 softc = lun->ctl_softc; 4990 retval = 0; 4991 4992 switch (ctsio->cdb[0]) { 4993 case SYNCHRONIZE_CACHE: { 4994 struct scsi_sync_cache *cdb; 4995 cdb = (struct scsi_sync_cache *)ctsio->cdb; 4996 4997 starting_lba = scsi_4btoul(cdb->begin_lba); 4998 block_count = scsi_2btoul(cdb->lb_count); 4999 byte2 = cdb->byte2; 5000 break; 5001 } 5002 case SYNCHRONIZE_CACHE_16: { 5003 struct scsi_sync_cache_16 *cdb; 5004 cdb = (struct scsi_sync_cache_16 *)ctsio->cdb; 5005 5006 starting_lba = scsi_8btou64(cdb->begin_lba); 5007 block_count = scsi_4btoul(cdb->lb_count); 5008 byte2 = cdb->byte2; 5009 break; 5010 } 5011 default: 5012 ctl_set_invalid_opcode(ctsio); 5013 ctl_done((union ctl_io *)ctsio); 5014 goto bailout; 5015 break; /* NOTREACHED */ 5016 } 5017 5018 /* 5019 * We check the LBA and length, but don't do anything with them. 5020 * A SYNCHRONIZE CACHE will cause the entire cache for this lun to 5021 * get flushed. This check will just help satisfy anyone who wants 5022 * to see an error for an out of range LBA. 5023 */ 5024 if ((starting_lba + block_count) > (lun->be_lun->maxlba + 1)) { 5025 ctl_set_lba_out_of_range(ctsio); 5026 ctl_done((union ctl_io *)ctsio); 5027 goto bailout; 5028 } 5029 5030 /* 5031 * If this LUN has no backend, we can't flush the cache anyway. 5032 */ 5033 if (lun->backend == NULL) { 5034 ctl_set_invalid_opcode(ctsio); 5035 ctl_done((union ctl_io *)ctsio); 5036 goto bailout; 5037 } 5038 5039 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5040 lbalen->lba = starting_lba; 5041 lbalen->len = block_count; 5042 lbalen->flags = byte2; 5043 5044 /* 5045 * Check to see whether we're configured to send the SYNCHRONIZE 5046 * CACHE command directly to the back end. 5047 */ 5048 mtx_lock(&lun->lun_lock); 5049 if ((softc->flags & CTL_FLAG_REAL_SYNC) 5050 && (++(lun->sync_count) >= lun->sync_interval)) { 5051 lun->sync_count = 0; 5052 mtx_unlock(&lun->lun_lock); 5053 retval = lun->backend->config_write((union ctl_io *)ctsio); 5054 } else { 5055 mtx_unlock(&lun->lun_lock); 5056 ctl_set_success(ctsio); 5057 ctl_done((union ctl_io *)ctsio); 5058 } 5059 5060 bailout: 5061 5062 return (retval); 5063 } 5064 5065 int 5066 ctl_format(struct ctl_scsiio *ctsio) 5067 { 5068 struct scsi_format *cdb; 5069 struct ctl_lun *lun; 5070 int length, defect_list_len; 5071 5072 CTL_DEBUG_PRINT(("ctl_format\n")); 5073 5074 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5075 5076 cdb = (struct scsi_format *)ctsio->cdb; 5077 5078 length = 0; 5079 if (cdb->byte2 & SF_FMTDATA) { 5080 if (cdb->byte2 & SF_LONGLIST) 5081 length = sizeof(struct scsi_format_header_long); 5082 else 5083 length = sizeof(struct scsi_format_header_short); 5084 } 5085 5086 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5087 && (length > 0)) { 5088 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5089 ctsio->kern_data_len = length; 5090 ctsio->kern_total_len = length; 5091 ctsio->kern_data_resid = 0; 5092 ctsio->kern_rel_offset = 0; 5093 ctsio->kern_sg_entries = 0; 5094 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5095 ctsio->be_move_done = ctl_config_move_done; 5096 ctl_datamove((union ctl_io *)ctsio); 5097 5098 return (CTL_RETVAL_COMPLETE); 5099 } 5100 5101 defect_list_len = 0; 5102 5103 if (cdb->byte2 & SF_FMTDATA) { 5104 if (cdb->byte2 & SF_LONGLIST) { 5105 struct scsi_format_header_long *header; 5106 5107 header = (struct scsi_format_header_long *) 5108 ctsio->kern_data_ptr; 5109 5110 defect_list_len = scsi_4btoul(header->defect_list_len); 5111 if (defect_list_len != 0) { 5112 ctl_set_invalid_field(ctsio, 5113 /*sks_valid*/ 1, 5114 /*command*/ 0, 5115 /*field*/ 2, 5116 /*bit_valid*/ 0, 5117 /*bit*/ 0); 5118 goto bailout; 5119 } 5120 } else { 5121 struct scsi_format_header_short *header; 5122 5123 header = (struct scsi_format_header_short *) 5124 ctsio->kern_data_ptr; 5125 5126 defect_list_len = scsi_2btoul(header->defect_list_len); 5127 if (defect_list_len != 0) { 5128 ctl_set_invalid_field(ctsio, 5129 /*sks_valid*/ 1, 5130 /*command*/ 0, 5131 /*field*/ 2, 5132 /*bit_valid*/ 0, 5133 /*bit*/ 0); 5134 goto bailout; 5135 } 5136 } 5137 } 5138 5139 /* 5140 * The format command will clear out the "Medium format corrupted" 5141 * status if set by the configuration code. That status is really 5142 * just a way to notify the host that we have lost the media, and 5143 * get them to issue a command that will basically make them think 5144 * they're blowing away the media. 5145 */ 5146 mtx_lock(&lun->lun_lock); 5147 lun->flags &= ~CTL_LUN_INOPERABLE; 5148 mtx_unlock(&lun->lun_lock); 5149 5150 ctl_set_success(ctsio); 5151 bailout: 5152 5153 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5154 free(ctsio->kern_data_ptr, M_CTL); 5155 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5156 } 5157 5158 ctl_done((union ctl_io *)ctsio); 5159 return (CTL_RETVAL_COMPLETE); 5160 } 5161 5162 int 5163 ctl_read_buffer(struct ctl_scsiio *ctsio) 5164 { 5165 struct scsi_read_buffer *cdb; 5166 struct ctl_lun *lun; 5167 int buffer_offset, len; 5168 static uint8_t descr[4]; 5169 static uint8_t echo_descr[4] = { 0 }; 5170 5171 CTL_DEBUG_PRINT(("ctl_read_buffer\n")); 5172 5173 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5174 cdb = (struct scsi_read_buffer *)ctsio->cdb; 5175 5176 if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA && 5177 (cdb->byte2 & RWB_MODE) != RWB_MODE_ECHO_DESCR && 5178 (cdb->byte2 & RWB_MODE) != RWB_MODE_DESCR) { 5179 ctl_set_invalid_field(ctsio, 5180 /*sks_valid*/ 1, 5181 /*command*/ 1, 5182 /*field*/ 1, 5183 /*bit_valid*/ 1, 5184 /*bit*/ 4); 5185 ctl_done((union ctl_io *)ctsio); 5186 return (CTL_RETVAL_COMPLETE); 5187 } 5188 5189 len = scsi_3btoul(cdb->length); 5190 buffer_offset = scsi_3btoul(cdb->offset); 5191 5192 if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5193 ctl_set_invalid_field(ctsio, 5194 /*sks_valid*/ 1, 5195 /*command*/ 1, 5196 /*field*/ 6, 5197 /*bit_valid*/ 0, 5198 /*bit*/ 0); 5199 ctl_done((union ctl_io *)ctsio); 5200 return (CTL_RETVAL_COMPLETE); 5201 } 5202 5203 if ((cdb->byte2 & RWB_MODE) == RWB_MODE_DESCR) { 5204 descr[0] = 0; 5205 scsi_ulto3b(CTL_WRITE_BUFFER_SIZE, &descr[1]); 5206 ctsio->kern_data_ptr = descr; 5207 len = min(len, sizeof(descr)); 5208 } else if ((cdb->byte2 & RWB_MODE) == RWB_MODE_ECHO_DESCR) { 5209 ctsio->kern_data_ptr = echo_descr; 5210 len = min(len, sizeof(echo_descr)); 5211 } else { 5212 if (lun->write_buffer == NULL) { 5213 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5214 M_CTL, M_WAITOK); 5215 } 5216 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5217 } 5218 ctsio->kern_data_len = len; 5219 ctsio->kern_total_len = len; 5220 ctsio->kern_data_resid = 0; 5221 ctsio->kern_rel_offset = 0; 5222 ctsio->kern_sg_entries = 0; 5223 ctl_set_success(ctsio); 5224 ctsio->be_move_done = ctl_config_move_done; 5225 ctl_datamove((union ctl_io *)ctsio); 5226 return (CTL_RETVAL_COMPLETE); 5227 } 5228 5229 int 5230 ctl_write_buffer(struct ctl_scsiio *ctsio) 5231 { 5232 struct scsi_write_buffer *cdb; 5233 struct ctl_lun *lun; 5234 int buffer_offset, len; 5235 5236 CTL_DEBUG_PRINT(("ctl_write_buffer\n")); 5237 5238 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5239 cdb = (struct scsi_write_buffer *)ctsio->cdb; 5240 5241 if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA) { 5242 ctl_set_invalid_field(ctsio, 5243 /*sks_valid*/ 1, 5244 /*command*/ 1, 5245 /*field*/ 1, 5246 /*bit_valid*/ 1, 5247 /*bit*/ 4); 5248 ctl_done((union ctl_io *)ctsio); 5249 return (CTL_RETVAL_COMPLETE); 5250 } 5251 5252 len = scsi_3btoul(cdb->length); 5253 buffer_offset = scsi_3btoul(cdb->offset); 5254 5255 if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5256 ctl_set_invalid_field(ctsio, 5257 /*sks_valid*/ 1, 5258 /*command*/ 1, 5259 /*field*/ 6, 5260 /*bit_valid*/ 0, 5261 /*bit*/ 0); 5262 ctl_done((union ctl_io *)ctsio); 5263 return (CTL_RETVAL_COMPLETE); 5264 } 5265 5266 /* 5267 * If we've got a kernel request that hasn't been malloced yet, 5268 * malloc it and tell the caller the data buffer is here. 5269 */ 5270 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5271 if (lun->write_buffer == NULL) { 5272 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5273 M_CTL, M_WAITOK); 5274 } 5275 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5276 ctsio->kern_data_len = len; 5277 ctsio->kern_total_len = len; 5278 ctsio->kern_data_resid = 0; 5279 ctsio->kern_rel_offset = 0; 5280 ctsio->kern_sg_entries = 0; 5281 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5282 ctsio->be_move_done = ctl_config_move_done; 5283 ctl_datamove((union ctl_io *)ctsio); 5284 5285 return (CTL_RETVAL_COMPLETE); 5286 } 5287 5288 ctl_set_success(ctsio); 5289 ctl_done((union ctl_io *)ctsio); 5290 return (CTL_RETVAL_COMPLETE); 5291 } 5292 5293 int 5294 ctl_write_same(struct ctl_scsiio *ctsio) 5295 { 5296 struct ctl_lun *lun; 5297 struct ctl_lba_len_flags *lbalen; 5298 uint64_t lba; 5299 uint32_t num_blocks; 5300 int len, retval; 5301 uint8_t byte2; 5302 5303 retval = CTL_RETVAL_COMPLETE; 5304 5305 CTL_DEBUG_PRINT(("ctl_write_same\n")); 5306 5307 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5308 5309 switch (ctsio->cdb[0]) { 5310 case WRITE_SAME_10: { 5311 struct scsi_write_same_10 *cdb; 5312 5313 cdb = (struct scsi_write_same_10 *)ctsio->cdb; 5314 5315 lba = scsi_4btoul(cdb->addr); 5316 num_blocks = scsi_2btoul(cdb->length); 5317 byte2 = cdb->byte2; 5318 break; 5319 } 5320 case WRITE_SAME_16: { 5321 struct scsi_write_same_16 *cdb; 5322 5323 cdb = (struct scsi_write_same_16 *)ctsio->cdb; 5324 5325 lba = scsi_8btou64(cdb->addr); 5326 num_blocks = scsi_4btoul(cdb->length); 5327 byte2 = cdb->byte2; 5328 break; 5329 } 5330 default: 5331 /* 5332 * We got a command we don't support. This shouldn't 5333 * happen, commands should be filtered out above us. 5334 */ 5335 ctl_set_invalid_opcode(ctsio); 5336 ctl_done((union ctl_io *)ctsio); 5337 5338 return (CTL_RETVAL_COMPLETE); 5339 break; /* NOTREACHED */ 5340 } 5341 5342 /* NDOB and ANCHOR flags can be used only together with UNMAP */ 5343 if ((byte2 & SWS_UNMAP) == 0 && 5344 (byte2 & (SWS_NDOB | SWS_ANCHOR)) != 0) { 5345 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 5346 /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); 5347 ctl_done((union ctl_io *)ctsio); 5348 return (CTL_RETVAL_COMPLETE); 5349 } 5350 5351 /* 5352 * The first check is to make sure we're in bounds, the second 5353 * check is to catch wrap-around problems. If the lba + num blocks 5354 * is less than the lba, then we've wrapped around and the block 5355 * range is invalid anyway. 5356 */ 5357 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5358 || ((lba + num_blocks) < lba)) { 5359 ctl_set_lba_out_of_range(ctsio); 5360 ctl_done((union ctl_io *)ctsio); 5361 return (CTL_RETVAL_COMPLETE); 5362 } 5363 5364 /* Zero number of blocks means "to the last logical block" */ 5365 if (num_blocks == 0) { 5366 if ((lun->be_lun->maxlba + 1) - lba > UINT32_MAX) { 5367 ctl_set_invalid_field(ctsio, 5368 /*sks_valid*/ 0, 5369 /*command*/ 1, 5370 /*field*/ 0, 5371 /*bit_valid*/ 0, 5372 /*bit*/ 0); 5373 ctl_done((union ctl_io *)ctsio); 5374 return (CTL_RETVAL_COMPLETE); 5375 } 5376 num_blocks = (lun->be_lun->maxlba + 1) - lba; 5377 } 5378 5379 len = lun->be_lun->blocksize; 5380 5381 /* 5382 * If we've got a kernel request that hasn't been malloced yet, 5383 * malloc it and tell the caller the data buffer is here. 5384 */ 5385 if ((byte2 & SWS_NDOB) == 0 && 5386 (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5387 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);; 5388 ctsio->kern_data_len = len; 5389 ctsio->kern_total_len = len; 5390 ctsio->kern_data_resid = 0; 5391 ctsio->kern_rel_offset = 0; 5392 ctsio->kern_sg_entries = 0; 5393 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5394 ctsio->be_move_done = ctl_config_move_done; 5395 ctl_datamove((union ctl_io *)ctsio); 5396 5397 return (CTL_RETVAL_COMPLETE); 5398 } 5399 5400 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5401 lbalen->lba = lba; 5402 lbalen->len = num_blocks; 5403 lbalen->flags = byte2; 5404 retval = lun->backend->config_write((union ctl_io *)ctsio); 5405 5406 return (retval); 5407 } 5408 5409 int 5410 ctl_unmap(struct ctl_scsiio *ctsio) 5411 { 5412 struct ctl_lun *lun; 5413 struct scsi_unmap *cdb; 5414 struct ctl_ptr_len_flags *ptrlen; 5415 struct scsi_unmap_header *hdr; 5416 struct scsi_unmap_desc *buf, *end, *endnz, *range; 5417 uint64_t lba; 5418 uint32_t num_blocks; 5419 int len, retval; 5420 uint8_t byte2; 5421 5422 retval = CTL_RETVAL_COMPLETE; 5423 5424 CTL_DEBUG_PRINT(("ctl_unmap\n")); 5425 5426 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5427 cdb = (struct scsi_unmap *)ctsio->cdb; 5428 5429 len = scsi_2btoul(cdb->length); 5430 byte2 = cdb->byte2; 5431 5432 /* 5433 * If we've got a kernel request that hasn't been malloced yet, 5434 * malloc it and tell the caller the data buffer is here. 5435 */ 5436 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5437 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);; 5438 ctsio->kern_data_len = len; 5439 ctsio->kern_total_len = len; 5440 ctsio->kern_data_resid = 0; 5441 ctsio->kern_rel_offset = 0; 5442 ctsio->kern_sg_entries = 0; 5443 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5444 ctsio->be_move_done = ctl_config_move_done; 5445 ctl_datamove((union ctl_io *)ctsio); 5446 5447 return (CTL_RETVAL_COMPLETE); 5448 } 5449 5450 len = ctsio->kern_total_len - ctsio->kern_data_resid; 5451 hdr = (struct scsi_unmap_header *)ctsio->kern_data_ptr; 5452 if (len < sizeof (*hdr) || 5453 len < (scsi_2btoul(hdr->length) + sizeof(hdr->length)) || 5454 len < (scsi_2btoul(hdr->desc_length) + sizeof (*hdr)) || 5455 scsi_2btoul(hdr->desc_length) % sizeof(*buf) != 0) { 5456 ctl_set_invalid_field(ctsio, 5457 /*sks_valid*/ 0, 5458 /*command*/ 0, 5459 /*field*/ 0, 5460 /*bit_valid*/ 0, 5461 /*bit*/ 0); 5462 goto done; 5463 } 5464 len = scsi_2btoul(hdr->desc_length); 5465 buf = (struct scsi_unmap_desc *)(hdr + 1); 5466 end = buf + len / sizeof(*buf); 5467 5468 endnz = buf; 5469 for (range = buf; range < end; range++) { 5470 lba = scsi_8btou64(range->lba); 5471 num_blocks = scsi_4btoul(range->length); 5472 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5473 || ((lba + num_blocks) < lba)) { 5474 ctl_set_lba_out_of_range(ctsio); 5475 ctl_done((union ctl_io *)ctsio); 5476 return (CTL_RETVAL_COMPLETE); 5477 } 5478 if (num_blocks != 0) 5479 endnz = range + 1; 5480 } 5481 5482 /* 5483 * Block backend can not handle zero last range. 5484 * Filter it out and return if there is nothing left. 5485 */ 5486 len = (uint8_t *)endnz - (uint8_t *)buf; 5487 if (len == 0) { 5488 ctl_set_success(ctsio); 5489 goto done; 5490 } 5491 5492 mtx_lock(&lun->lun_lock); 5493 ptrlen = (struct ctl_ptr_len_flags *) 5494 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5495 ptrlen->ptr = (void *)buf; 5496 ptrlen->len = len; 5497 ptrlen->flags = byte2; 5498 ctl_check_blocked(lun); 5499 mtx_unlock(&lun->lun_lock); 5500 5501 retval = lun->backend->config_write((union ctl_io *)ctsio); 5502 return (retval); 5503 5504 done: 5505 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5506 free(ctsio->kern_data_ptr, M_CTL); 5507 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5508 } 5509 ctl_done((union ctl_io *)ctsio); 5510 return (CTL_RETVAL_COMPLETE); 5511 } 5512 5513 /* 5514 * Note that this function currently doesn't actually do anything inside 5515 * CTL to enforce things if the DQue bit is turned on. 5516 * 5517 * Also note that this function can't be used in the default case, because 5518 * the DQue bit isn't set in the changeable mask for the control mode page 5519 * anyway. This is just here as an example for how to implement a page 5520 * handler, and a placeholder in case we want to allow the user to turn 5521 * tagged queueing on and off. 5522 * 5523 * The D_SENSE bit handling is functional, however, and will turn 5524 * descriptor sense on and off for a given LUN. 5525 */ 5526 int 5527 ctl_control_page_handler(struct ctl_scsiio *ctsio, 5528 struct ctl_page_index *page_index, uint8_t *page_ptr) 5529 { 5530 struct scsi_control_page *current_cp, *saved_cp, *user_cp; 5531 struct ctl_lun *lun; 5532 int set_ua; 5533 uint32_t initidx; 5534 5535 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5536 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5537 set_ua = 0; 5538 5539 user_cp = (struct scsi_control_page *)page_ptr; 5540 current_cp = (struct scsi_control_page *) 5541 (page_index->page_data + (page_index->page_len * 5542 CTL_PAGE_CURRENT)); 5543 saved_cp = (struct scsi_control_page *) 5544 (page_index->page_data + (page_index->page_len * 5545 CTL_PAGE_SAVED)); 5546 5547 mtx_lock(&lun->lun_lock); 5548 if (((current_cp->rlec & SCP_DSENSE) == 0) 5549 && ((user_cp->rlec & SCP_DSENSE) != 0)) { 5550 /* 5551 * Descriptor sense is currently turned off and the user 5552 * wants to turn it on. 5553 */ 5554 current_cp->rlec |= SCP_DSENSE; 5555 saved_cp->rlec |= SCP_DSENSE; 5556 lun->flags |= CTL_LUN_SENSE_DESC; 5557 set_ua = 1; 5558 } else if (((current_cp->rlec & SCP_DSENSE) != 0) 5559 && ((user_cp->rlec & SCP_DSENSE) == 0)) { 5560 /* 5561 * Descriptor sense is currently turned on, and the user 5562 * wants to turn it off. 5563 */ 5564 current_cp->rlec &= ~SCP_DSENSE; 5565 saved_cp->rlec &= ~SCP_DSENSE; 5566 lun->flags &= ~CTL_LUN_SENSE_DESC; 5567 set_ua = 1; 5568 } 5569 if ((current_cp->queue_flags & SCP_QUEUE_ALG_MASK) != 5570 (user_cp->queue_flags & SCP_QUEUE_ALG_MASK)) { 5571 current_cp->queue_flags &= ~SCP_QUEUE_ALG_MASK; 5572 current_cp->queue_flags |= user_cp->queue_flags & SCP_QUEUE_ALG_MASK; 5573 saved_cp->queue_flags &= ~SCP_QUEUE_ALG_MASK; 5574 saved_cp->queue_flags |= user_cp->queue_flags & SCP_QUEUE_ALG_MASK; 5575 set_ua = 1; 5576 } 5577 if ((current_cp->eca_and_aen & SCP_SWP) != 5578 (user_cp->eca_and_aen & SCP_SWP)) { 5579 current_cp->eca_and_aen &= ~SCP_SWP; 5580 current_cp->eca_and_aen |= user_cp->eca_and_aen & SCP_SWP; 5581 saved_cp->eca_and_aen &= ~SCP_SWP; 5582 saved_cp->eca_and_aen |= user_cp->eca_and_aen & SCP_SWP; 5583 set_ua = 1; 5584 } 5585 if (set_ua != 0) 5586 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 5587 mtx_unlock(&lun->lun_lock); 5588 5589 return (0); 5590 } 5591 5592 int 5593 ctl_caching_sp_handler(struct ctl_scsiio *ctsio, 5594 struct ctl_page_index *page_index, uint8_t *page_ptr) 5595 { 5596 struct scsi_caching_page *current_cp, *saved_cp, *user_cp; 5597 struct ctl_lun *lun; 5598 int set_ua; 5599 uint32_t initidx; 5600 5601 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5602 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5603 set_ua = 0; 5604 5605 user_cp = (struct scsi_caching_page *)page_ptr; 5606 current_cp = (struct scsi_caching_page *) 5607 (page_index->page_data + (page_index->page_len * 5608 CTL_PAGE_CURRENT)); 5609 saved_cp = (struct scsi_caching_page *) 5610 (page_index->page_data + (page_index->page_len * 5611 CTL_PAGE_SAVED)); 5612 5613 mtx_lock(&lun->lun_lock); 5614 if ((current_cp->flags1 & (SCP_WCE | SCP_RCD)) != 5615 (user_cp->flags1 & (SCP_WCE | SCP_RCD))) { 5616 current_cp->flags1 &= ~(SCP_WCE | SCP_RCD); 5617 current_cp->flags1 |= user_cp->flags1 & (SCP_WCE | SCP_RCD); 5618 saved_cp->flags1 &= ~(SCP_WCE | SCP_RCD); 5619 saved_cp->flags1 |= user_cp->flags1 & (SCP_WCE | SCP_RCD); 5620 set_ua = 1; 5621 } 5622 if (set_ua != 0) 5623 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 5624 mtx_unlock(&lun->lun_lock); 5625 5626 return (0); 5627 } 5628 5629 int 5630 ctl_debugconf_sp_select_handler(struct ctl_scsiio *ctsio, 5631 struct ctl_page_index *page_index, 5632 uint8_t *page_ptr) 5633 { 5634 uint8_t *c; 5635 int i; 5636 5637 c = ((struct copan_debugconf_subpage *)page_ptr)->ctl_time_io_secs; 5638 ctl_time_io_secs = 5639 (c[0] << 8) | 5640 (c[1] << 0) | 5641 0; 5642 CTL_DEBUG_PRINT(("set ctl_time_io_secs to %d\n", ctl_time_io_secs)); 5643 printf("set ctl_time_io_secs to %d\n", ctl_time_io_secs); 5644 printf("page data:"); 5645 for (i=0; i<8; i++) 5646 printf(" %.2x",page_ptr[i]); 5647 printf("\n"); 5648 return (0); 5649 } 5650 5651 int 5652 ctl_debugconf_sp_sense_handler(struct ctl_scsiio *ctsio, 5653 struct ctl_page_index *page_index, 5654 int pc) 5655 { 5656 struct copan_debugconf_subpage *page; 5657 5658 page = (struct copan_debugconf_subpage *)page_index->page_data + 5659 (page_index->page_len * pc); 5660 5661 switch (pc) { 5662 case SMS_PAGE_CTRL_CHANGEABLE >> 6: 5663 case SMS_PAGE_CTRL_DEFAULT >> 6: 5664 case SMS_PAGE_CTRL_SAVED >> 6: 5665 /* 5666 * We don't update the changable or default bits for this page. 5667 */ 5668 break; 5669 case SMS_PAGE_CTRL_CURRENT >> 6: 5670 page->ctl_time_io_secs[0] = ctl_time_io_secs >> 8; 5671 page->ctl_time_io_secs[1] = ctl_time_io_secs >> 0; 5672 break; 5673 default: 5674 #ifdef NEEDTOPORT 5675 EPRINT(0, "Invalid PC %d!!", pc); 5676 #endif /* NEEDTOPORT */ 5677 break; 5678 } 5679 return (0); 5680 } 5681 5682 5683 static int 5684 ctl_do_mode_select(union ctl_io *io) 5685 { 5686 struct scsi_mode_page_header *page_header; 5687 struct ctl_page_index *page_index; 5688 struct ctl_scsiio *ctsio; 5689 int control_dev, page_len; 5690 int page_len_offset, page_len_size; 5691 union ctl_modepage_info *modepage_info; 5692 struct ctl_lun *lun; 5693 int *len_left, *len_used; 5694 int retval, i; 5695 5696 ctsio = &io->scsiio; 5697 page_index = NULL; 5698 page_len = 0; 5699 retval = CTL_RETVAL_COMPLETE; 5700 5701 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5702 5703 if (lun->be_lun->lun_type != T_DIRECT) 5704 control_dev = 1; 5705 else 5706 control_dev = 0; 5707 5708 modepage_info = (union ctl_modepage_info *) 5709 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 5710 len_left = &modepage_info->header.len_left; 5711 len_used = &modepage_info->header.len_used; 5712 5713 do_next_page: 5714 5715 page_header = (struct scsi_mode_page_header *) 5716 (ctsio->kern_data_ptr + *len_used); 5717 5718 if (*len_left == 0) { 5719 free(ctsio->kern_data_ptr, M_CTL); 5720 ctl_set_success(ctsio); 5721 ctl_done((union ctl_io *)ctsio); 5722 return (CTL_RETVAL_COMPLETE); 5723 } else if (*len_left < sizeof(struct scsi_mode_page_header)) { 5724 5725 free(ctsio->kern_data_ptr, M_CTL); 5726 ctl_set_param_len_error(ctsio); 5727 ctl_done((union ctl_io *)ctsio); 5728 return (CTL_RETVAL_COMPLETE); 5729 5730 } else if ((page_header->page_code & SMPH_SPF) 5731 && (*len_left < sizeof(struct scsi_mode_page_header_sp))) { 5732 5733 free(ctsio->kern_data_ptr, M_CTL); 5734 ctl_set_param_len_error(ctsio); 5735 ctl_done((union ctl_io *)ctsio); 5736 return (CTL_RETVAL_COMPLETE); 5737 } 5738 5739 5740 /* 5741 * XXX KDM should we do something with the block descriptor? 5742 */ 5743 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 5744 5745 if ((control_dev != 0) 5746 && (lun->mode_pages.index[i].page_flags & 5747 CTL_PAGE_FLAG_DISK_ONLY)) 5748 continue; 5749 5750 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) != 5751 (page_header->page_code & SMPH_PC_MASK)) 5752 continue; 5753 5754 /* 5755 * If neither page has a subpage code, then we've got a 5756 * match. 5757 */ 5758 if (((lun->mode_pages.index[i].page_code & SMPH_SPF) == 0) 5759 && ((page_header->page_code & SMPH_SPF) == 0)) { 5760 page_index = &lun->mode_pages.index[i]; 5761 page_len = page_header->page_length; 5762 break; 5763 } 5764 5765 /* 5766 * If both pages have subpages, then the subpage numbers 5767 * have to match. 5768 */ 5769 if ((lun->mode_pages.index[i].page_code & SMPH_SPF) 5770 && (page_header->page_code & SMPH_SPF)) { 5771 struct scsi_mode_page_header_sp *sph; 5772 5773 sph = (struct scsi_mode_page_header_sp *)page_header; 5774 5775 if (lun->mode_pages.index[i].subpage == 5776 sph->subpage) { 5777 page_index = &lun->mode_pages.index[i]; 5778 page_len = scsi_2btoul(sph->page_length); 5779 break; 5780 } 5781 } 5782 } 5783 5784 /* 5785 * If we couldn't find the page, or if we don't have a mode select 5786 * handler for it, send back an error to the user. 5787 */ 5788 if ((page_index == NULL) 5789 || (page_index->select_handler == NULL)) { 5790 ctl_set_invalid_field(ctsio, 5791 /*sks_valid*/ 1, 5792 /*command*/ 0, 5793 /*field*/ *len_used, 5794 /*bit_valid*/ 0, 5795 /*bit*/ 0); 5796 free(ctsio->kern_data_ptr, M_CTL); 5797 ctl_done((union ctl_io *)ctsio); 5798 return (CTL_RETVAL_COMPLETE); 5799 } 5800 5801 if (page_index->page_code & SMPH_SPF) { 5802 page_len_offset = 2; 5803 page_len_size = 2; 5804 } else { 5805 page_len_size = 1; 5806 page_len_offset = 1; 5807 } 5808 5809 /* 5810 * If the length the initiator gives us isn't the one we specify in 5811 * the mode page header, or if they didn't specify enough data in 5812 * the CDB to avoid truncating this page, kick out the request. 5813 */ 5814 if ((page_len != (page_index->page_len - page_len_offset - 5815 page_len_size)) 5816 || (*len_left < page_index->page_len)) { 5817 5818 5819 ctl_set_invalid_field(ctsio, 5820 /*sks_valid*/ 1, 5821 /*command*/ 0, 5822 /*field*/ *len_used + page_len_offset, 5823 /*bit_valid*/ 0, 5824 /*bit*/ 0); 5825 free(ctsio->kern_data_ptr, M_CTL); 5826 ctl_done((union ctl_io *)ctsio); 5827 return (CTL_RETVAL_COMPLETE); 5828 } 5829 5830 /* 5831 * Run through the mode page, checking to make sure that the bits 5832 * the user changed are actually legal for him to change. 5833 */ 5834 for (i = 0; i < page_index->page_len; i++) { 5835 uint8_t *user_byte, *change_mask, *current_byte; 5836 int bad_bit; 5837 int j; 5838 5839 user_byte = (uint8_t *)page_header + i; 5840 change_mask = page_index->page_data + 5841 (page_index->page_len * CTL_PAGE_CHANGEABLE) + i; 5842 current_byte = page_index->page_data + 5843 (page_index->page_len * CTL_PAGE_CURRENT) + i; 5844 5845 /* 5846 * Check to see whether the user set any bits in this byte 5847 * that he is not allowed to set. 5848 */ 5849 if ((*user_byte & ~(*change_mask)) == 5850 (*current_byte & ~(*change_mask))) 5851 continue; 5852 5853 /* 5854 * Go through bit by bit to determine which one is illegal. 5855 */ 5856 bad_bit = 0; 5857 for (j = 7; j >= 0; j--) { 5858 if ((((1 << i) & ~(*change_mask)) & *user_byte) != 5859 (((1 << i) & ~(*change_mask)) & *current_byte)) { 5860 bad_bit = i; 5861 break; 5862 } 5863 } 5864 ctl_set_invalid_field(ctsio, 5865 /*sks_valid*/ 1, 5866 /*command*/ 0, 5867 /*field*/ *len_used + i, 5868 /*bit_valid*/ 1, 5869 /*bit*/ bad_bit); 5870 free(ctsio->kern_data_ptr, M_CTL); 5871 ctl_done((union ctl_io *)ctsio); 5872 return (CTL_RETVAL_COMPLETE); 5873 } 5874 5875 /* 5876 * Decrement these before we call the page handler, since we may 5877 * end up getting called back one way or another before the handler 5878 * returns to this context. 5879 */ 5880 *len_left -= page_index->page_len; 5881 *len_used += page_index->page_len; 5882 5883 retval = page_index->select_handler(ctsio, page_index, 5884 (uint8_t *)page_header); 5885 5886 /* 5887 * If the page handler returns CTL_RETVAL_QUEUED, then we need to 5888 * wait until this queued command completes to finish processing 5889 * the mode page. If it returns anything other than 5890 * CTL_RETVAL_COMPLETE (e.g. CTL_RETVAL_ERROR), then it should have 5891 * already set the sense information, freed the data pointer, and 5892 * completed the io for us. 5893 */ 5894 if (retval != CTL_RETVAL_COMPLETE) 5895 goto bailout_no_done; 5896 5897 /* 5898 * If the initiator sent us more than one page, parse the next one. 5899 */ 5900 if (*len_left > 0) 5901 goto do_next_page; 5902 5903 ctl_set_success(ctsio); 5904 free(ctsio->kern_data_ptr, M_CTL); 5905 ctl_done((union ctl_io *)ctsio); 5906 5907 bailout_no_done: 5908 5909 return (CTL_RETVAL_COMPLETE); 5910 5911 } 5912 5913 int 5914 ctl_mode_select(struct ctl_scsiio *ctsio) 5915 { 5916 int param_len, pf, sp; 5917 int header_size, bd_len; 5918 int len_left, len_used; 5919 struct ctl_page_index *page_index; 5920 struct ctl_lun *lun; 5921 int control_dev, page_len; 5922 union ctl_modepage_info *modepage_info; 5923 int retval; 5924 5925 pf = 0; 5926 sp = 0; 5927 page_len = 0; 5928 len_used = 0; 5929 len_left = 0; 5930 retval = 0; 5931 bd_len = 0; 5932 page_index = NULL; 5933 5934 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5935 5936 if (lun->be_lun->lun_type != T_DIRECT) 5937 control_dev = 1; 5938 else 5939 control_dev = 0; 5940 5941 switch (ctsio->cdb[0]) { 5942 case MODE_SELECT_6: { 5943 struct scsi_mode_select_6 *cdb; 5944 5945 cdb = (struct scsi_mode_select_6 *)ctsio->cdb; 5946 5947 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 5948 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 5949 5950 param_len = cdb->length; 5951 header_size = sizeof(struct scsi_mode_header_6); 5952 break; 5953 } 5954 case MODE_SELECT_10: { 5955 struct scsi_mode_select_10 *cdb; 5956 5957 cdb = (struct scsi_mode_select_10 *)ctsio->cdb; 5958 5959 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 5960 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 5961 5962 param_len = scsi_2btoul(cdb->length); 5963 header_size = sizeof(struct scsi_mode_header_10); 5964 break; 5965 } 5966 default: 5967 ctl_set_invalid_opcode(ctsio); 5968 ctl_done((union ctl_io *)ctsio); 5969 return (CTL_RETVAL_COMPLETE); 5970 break; /* NOTREACHED */ 5971 } 5972 5973 /* 5974 * From SPC-3: 5975 * "A parameter list length of zero indicates that the Data-Out Buffer 5976 * shall be empty. This condition shall not be considered as an error." 5977 */ 5978 if (param_len == 0) { 5979 ctl_set_success(ctsio); 5980 ctl_done((union ctl_io *)ctsio); 5981 return (CTL_RETVAL_COMPLETE); 5982 } 5983 5984 /* 5985 * Since we'll hit this the first time through, prior to 5986 * allocation, we don't need to free a data buffer here. 5987 */ 5988 if (param_len < header_size) { 5989 ctl_set_param_len_error(ctsio); 5990 ctl_done((union ctl_io *)ctsio); 5991 return (CTL_RETVAL_COMPLETE); 5992 } 5993 5994 /* 5995 * Allocate the data buffer and grab the user's data. In theory, 5996 * we shouldn't have to sanity check the parameter list length here 5997 * because the maximum size is 64K. We should be able to malloc 5998 * that much without too many problems. 5999 */ 6000 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 6001 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 6002 ctsio->kern_data_len = param_len; 6003 ctsio->kern_total_len = param_len; 6004 ctsio->kern_data_resid = 0; 6005 ctsio->kern_rel_offset = 0; 6006 ctsio->kern_sg_entries = 0; 6007 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6008 ctsio->be_move_done = ctl_config_move_done; 6009 ctl_datamove((union ctl_io *)ctsio); 6010 6011 return (CTL_RETVAL_COMPLETE); 6012 } 6013 6014 switch (ctsio->cdb[0]) { 6015 case MODE_SELECT_6: { 6016 struct scsi_mode_header_6 *mh6; 6017 6018 mh6 = (struct scsi_mode_header_6 *)ctsio->kern_data_ptr; 6019 bd_len = mh6->blk_desc_len; 6020 break; 6021 } 6022 case MODE_SELECT_10: { 6023 struct scsi_mode_header_10 *mh10; 6024 6025 mh10 = (struct scsi_mode_header_10 *)ctsio->kern_data_ptr; 6026 bd_len = scsi_2btoul(mh10->blk_desc_len); 6027 break; 6028 } 6029 default: 6030 panic("Invalid CDB type %#x", ctsio->cdb[0]); 6031 break; 6032 } 6033 6034 if (param_len < (header_size + bd_len)) { 6035 free(ctsio->kern_data_ptr, M_CTL); 6036 ctl_set_param_len_error(ctsio); 6037 ctl_done((union ctl_io *)ctsio); 6038 return (CTL_RETVAL_COMPLETE); 6039 } 6040 6041 /* 6042 * Set the IO_CONT flag, so that if this I/O gets passed to 6043 * ctl_config_write_done(), it'll get passed back to 6044 * ctl_do_mode_select() for further processing, or completion if 6045 * we're all done. 6046 */ 6047 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 6048 ctsio->io_cont = ctl_do_mode_select; 6049 6050 modepage_info = (union ctl_modepage_info *) 6051 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6052 6053 memset(modepage_info, 0, sizeof(*modepage_info)); 6054 6055 len_left = param_len - header_size - bd_len; 6056 len_used = header_size + bd_len; 6057 6058 modepage_info->header.len_left = len_left; 6059 modepage_info->header.len_used = len_used; 6060 6061 return (ctl_do_mode_select((union ctl_io *)ctsio)); 6062 } 6063 6064 int 6065 ctl_mode_sense(struct ctl_scsiio *ctsio) 6066 { 6067 struct ctl_lun *lun; 6068 int pc, page_code, dbd, llba, subpage; 6069 int alloc_len, page_len, header_len, total_len; 6070 struct scsi_mode_block_descr *block_desc; 6071 struct ctl_page_index *page_index; 6072 int control_dev; 6073 6074 dbd = 0; 6075 llba = 0; 6076 block_desc = NULL; 6077 page_index = NULL; 6078 6079 CTL_DEBUG_PRINT(("ctl_mode_sense\n")); 6080 6081 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6082 6083 if (lun->be_lun->lun_type != T_DIRECT) 6084 control_dev = 1; 6085 else 6086 control_dev = 0; 6087 6088 switch (ctsio->cdb[0]) { 6089 case MODE_SENSE_6: { 6090 struct scsi_mode_sense_6 *cdb; 6091 6092 cdb = (struct scsi_mode_sense_6 *)ctsio->cdb; 6093 6094 header_len = sizeof(struct scsi_mode_hdr_6); 6095 if (cdb->byte2 & SMS_DBD) 6096 dbd = 1; 6097 else 6098 header_len += sizeof(struct scsi_mode_block_descr); 6099 6100 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6101 page_code = cdb->page & SMS_PAGE_CODE; 6102 subpage = cdb->subpage; 6103 alloc_len = cdb->length; 6104 break; 6105 } 6106 case MODE_SENSE_10: { 6107 struct scsi_mode_sense_10 *cdb; 6108 6109 cdb = (struct scsi_mode_sense_10 *)ctsio->cdb; 6110 6111 header_len = sizeof(struct scsi_mode_hdr_10); 6112 6113 if (cdb->byte2 & SMS_DBD) 6114 dbd = 1; 6115 else 6116 header_len += sizeof(struct scsi_mode_block_descr); 6117 if (cdb->byte2 & SMS10_LLBAA) 6118 llba = 1; 6119 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6120 page_code = cdb->page & SMS_PAGE_CODE; 6121 subpage = cdb->subpage; 6122 alloc_len = scsi_2btoul(cdb->length); 6123 break; 6124 } 6125 default: 6126 ctl_set_invalid_opcode(ctsio); 6127 ctl_done((union ctl_io *)ctsio); 6128 return (CTL_RETVAL_COMPLETE); 6129 break; /* NOTREACHED */ 6130 } 6131 6132 /* 6133 * We have to make a first pass through to calculate the size of 6134 * the pages that match the user's query. Then we allocate enough 6135 * memory to hold it, and actually copy the data into the buffer. 6136 */ 6137 switch (page_code) { 6138 case SMS_ALL_PAGES_PAGE: { 6139 int i; 6140 6141 page_len = 0; 6142 6143 /* 6144 * At the moment, values other than 0 and 0xff here are 6145 * reserved according to SPC-3. 6146 */ 6147 if ((subpage != SMS_SUBPAGE_PAGE_0) 6148 && (subpage != SMS_SUBPAGE_ALL)) { 6149 ctl_set_invalid_field(ctsio, 6150 /*sks_valid*/ 1, 6151 /*command*/ 1, 6152 /*field*/ 3, 6153 /*bit_valid*/ 0, 6154 /*bit*/ 0); 6155 ctl_done((union ctl_io *)ctsio); 6156 return (CTL_RETVAL_COMPLETE); 6157 } 6158 6159 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6160 if ((control_dev != 0) 6161 && (lun->mode_pages.index[i].page_flags & 6162 CTL_PAGE_FLAG_DISK_ONLY)) 6163 continue; 6164 6165 /* 6166 * We don't use this subpage if the user didn't 6167 * request all subpages. 6168 */ 6169 if ((lun->mode_pages.index[i].subpage != 0) 6170 && (subpage == SMS_SUBPAGE_PAGE_0)) 6171 continue; 6172 6173 #if 0 6174 printf("found page %#x len %d\n", 6175 lun->mode_pages.index[i].page_code & 6176 SMPH_PC_MASK, 6177 lun->mode_pages.index[i].page_len); 6178 #endif 6179 page_len += lun->mode_pages.index[i].page_len; 6180 } 6181 break; 6182 } 6183 default: { 6184 int i; 6185 6186 page_len = 0; 6187 6188 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6189 /* Look for the right page code */ 6190 if ((lun->mode_pages.index[i].page_code & 6191 SMPH_PC_MASK) != page_code) 6192 continue; 6193 6194 /* Look for the right subpage or the subpage wildcard*/ 6195 if ((lun->mode_pages.index[i].subpage != subpage) 6196 && (subpage != SMS_SUBPAGE_ALL)) 6197 continue; 6198 6199 /* Make sure the page is supported for this dev type */ 6200 if ((control_dev != 0) 6201 && (lun->mode_pages.index[i].page_flags & 6202 CTL_PAGE_FLAG_DISK_ONLY)) 6203 continue; 6204 6205 #if 0 6206 printf("found page %#x len %d\n", 6207 lun->mode_pages.index[i].page_code & 6208 SMPH_PC_MASK, 6209 lun->mode_pages.index[i].page_len); 6210 #endif 6211 6212 page_len += lun->mode_pages.index[i].page_len; 6213 } 6214 6215 if (page_len == 0) { 6216 ctl_set_invalid_field(ctsio, 6217 /*sks_valid*/ 1, 6218 /*command*/ 1, 6219 /*field*/ 2, 6220 /*bit_valid*/ 1, 6221 /*bit*/ 5); 6222 ctl_done((union ctl_io *)ctsio); 6223 return (CTL_RETVAL_COMPLETE); 6224 } 6225 break; 6226 } 6227 } 6228 6229 total_len = header_len + page_len; 6230 #if 0 6231 printf("header_len = %d, page_len = %d, total_len = %d\n", 6232 header_len, page_len, total_len); 6233 #endif 6234 6235 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6236 ctsio->kern_sg_entries = 0; 6237 ctsio->kern_data_resid = 0; 6238 ctsio->kern_rel_offset = 0; 6239 if (total_len < alloc_len) { 6240 ctsio->residual = alloc_len - total_len; 6241 ctsio->kern_data_len = total_len; 6242 ctsio->kern_total_len = total_len; 6243 } else { 6244 ctsio->residual = 0; 6245 ctsio->kern_data_len = alloc_len; 6246 ctsio->kern_total_len = alloc_len; 6247 } 6248 6249 switch (ctsio->cdb[0]) { 6250 case MODE_SENSE_6: { 6251 struct scsi_mode_hdr_6 *header; 6252 6253 header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr; 6254 6255 header->datalen = MIN(total_len - 1, 254); 6256 if (control_dev == 0) { 6257 header->dev_specific = 0x10; /* DPOFUA */ 6258 if ((lun->flags & CTL_LUN_READONLY) || 6259 (lun->mode_pages.control_page[CTL_PAGE_CURRENT] 6260 .eca_and_aen & SCP_SWP) != 0) 6261 header->dev_specific |= 0x80; /* WP */ 6262 } 6263 if (dbd) 6264 header->block_descr_len = 0; 6265 else 6266 header->block_descr_len = 6267 sizeof(struct scsi_mode_block_descr); 6268 block_desc = (struct scsi_mode_block_descr *)&header[1]; 6269 break; 6270 } 6271 case MODE_SENSE_10: { 6272 struct scsi_mode_hdr_10 *header; 6273 int datalen; 6274 6275 header = (struct scsi_mode_hdr_10 *)ctsio->kern_data_ptr; 6276 6277 datalen = MIN(total_len - 2, 65533); 6278 scsi_ulto2b(datalen, header->datalen); 6279 if (control_dev == 0) { 6280 header->dev_specific = 0x10; /* DPOFUA */ 6281 if ((lun->flags & CTL_LUN_READONLY) || 6282 (lun->mode_pages.control_page[CTL_PAGE_CURRENT] 6283 .eca_and_aen & SCP_SWP) != 0) 6284 header->dev_specific |= 0x80; /* WP */ 6285 } 6286 if (dbd) 6287 scsi_ulto2b(0, header->block_descr_len); 6288 else 6289 scsi_ulto2b(sizeof(struct scsi_mode_block_descr), 6290 header->block_descr_len); 6291 block_desc = (struct scsi_mode_block_descr *)&header[1]; 6292 break; 6293 } 6294 default: 6295 panic("invalid CDB type %#x", ctsio->cdb[0]); 6296 break; /* NOTREACHED */ 6297 } 6298 6299 /* 6300 * If we've got a disk, use its blocksize in the block 6301 * descriptor. Otherwise, just set it to 0. 6302 */ 6303 if (dbd == 0) { 6304 if (control_dev == 0) 6305 scsi_ulto3b(lun->be_lun->blocksize, 6306 block_desc->block_len); 6307 else 6308 scsi_ulto3b(0, block_desc->block_len); 6309 } 6310 6311 switch (page_code) { 6312 case SMS_ALL_PAGES_PAGE: { 6313 int i, data_used; 6314 6315 data_used = header_len; 6316 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6317 struct ctl_page_index *page_index; 6318 6319 page_index = &lun->mode_pages.index[i]; 6320 6321 if ((control_dev != 0) 6322 && (page_index->page_flags & 6323 CTL_PAGE_FLAG_DISK_ONLY)) 6324 continue; 6325 6326 /* 6327 * We don't use this subpage if the user didn't 6328 * request all subpages. We already checked (above) 6329 * to make sure the user only specified a subpage 6330 * of 0 or 0xff in the SMS_ALL_PAGES_PAGE case. 6331 */ 6332 if ((page_index->subpage != 0) 6333 && (subpage == SMS_SUBPAGE_PAGE_0)) 6334 continue; 6335 6336 /* 6337 * Call the handler, if it exists, to update the 6338 * page to the latest values. 6339 */ 6340 if (page_index->sense_handler != NULL) 6341 page_index->sense_handler(ctsio, page_index,pc); 6342 6343 memcpy(ctsio->kern_data_ptr + data_used, 6344 page_index->page_data + 6345 (page_index->page_len * pc), 6346 page_index->page_len); 6347 data_used += page_index->page_len; 6348 } 6349 break; 6350 } 6351 default: { 6352 int i, data_used; 6353 6354 data_used = header_len; 6355 6356 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6357 struct ctl_page_index *page_index; 6358 6359 page_index = &lun->mode_pages.index[i]; 6360 6361 /* Look for the right page code */ 6362 if ((page_index->page_code & SMPH_PC_MASK) != page_code) 6363 continue; 6364 6365 /* Look for the right subpage or the subpage wildcard*/ 6366 if ((page_index->subpage != subpage) 6367 && (subpage != SMS_SUBPAGE_ALL)) 6368 continue; 6369 6370 /* Make sure the page is supported for this dev type */ 6371 if ((control_dev != 0) 6372 && (page_index->page_flags & 6373 CTL_PAGE_FLAG_DISK_ONLY)) 6374 continue; 6375 6376 /* 6377 * Call the handler, if it exists, to update the 6378 * page to the latest values. 6379 */ 6380 if (page_index->sense_handler != NULL) 6381 page_index->sense_handler(ctsio, page_index,pc); 6382 6383 memcpy(ctsio->kern_data_ptr + data_used, 6384 page_index->page_data + 6385 (page_index->page_len * pc), 6386 page_index->page_len); 6387 data_used += page_index->page_len; 6388 } 6389 break; 6390 } 6391 } 6392 6393 ctl_set_success(ctsio); 6394 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6395 ctsio->be_move_done = ctl_config_move_done; 6396 ctl_datamove((union ctl_io *)ctsio); 6397 return (CTL_RETVAL_COMPLETE); 6398 } 6399 6400 int 6401 ctl_lbp_log_sense_handler(struct ctl_scsiio *ctsio, 6402 struct ctl_page_index *page_index, 6403 int pc) 6404 { 6405 struct ctl_lun *lun; 6406 struct scsi_log_param_header *phdr; 6407 uint8_t *data; 6408 uint64_t val; 6409 6410 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6411 data = page_index->page_data; 6412 6413 if (lun->backend->lun_attr != NULL && 6414 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksavail")) 6415 != UINT64_MAX) { 6416 phdr = (struct scsi_log_param_header *)data; 6417 scsi_ulto2b(0x0001, phdr->param_code); 6418 phdr->param_control = SLP_LBIN | SLP_LP; 6419 phdr->param_len = 8; 6420 data = (uint8_t *)(phdr + 1); 6421 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6422 data[4] = 0x02; /* per-pool */ 6423 data += phdr->param_len; 6424 } 6425 6426 if (lun->backend->lun_attr != NULL && 6427 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksused")) 6428 != UINT64_MAX) { 6429 phdr = (struct scsi_log_param_header *)data; 6430 scsi_ulto2b(0x0002, phdr->param_code); 6431 phdr->param_control = SLP_LBIN | SLP_LP; 6432 phdr->param_len = 8; 6433 data = (uint8_t *)(phdr + 1); 6434 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6435 data[4] = 0x01; /* per-LUN */ 6436 data += phdr->param_len; 6437 } 6438 6439 if (lun->backend->lun_attr != NULL && 6440 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksavail")) 6441 != UINT64_MAX) { 6442 phdr = (struct scsi_log_param_header *)data; 6443 scsi_ulto2b(0x00f1, phdr->param_code); 6444 phdr->param_control = SLP_LBIN | SLP_LP; 6445 phdr->param_len = 8; 6446 data = (uint8_t *)(phdr + 1); 6447 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6448 data[4] = 0x02; /* per-pool */ 6449 data += phdr->param_len; 6450 } 6451 6452 if (lun->backend->lun_attr != NULL && 6453 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksused")) 6454 != UINT64_MAX) { 6455 phdr = (struct scsi_log_param_header *)data; 6456 scsi_ulto2b(0x00f2, phdr->param_code); 6457 phdr->param_control = SLP_LBIN | SLP_LP; 6458 phdr->param_len = 8; 6459 data = (uint8_t *)(phdr + 1); 6460 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6461 data[4] = 0x02; /* per-pool */ 6462 data += phdr->param_len; 6463 } 6464 6465 page_index->page_len = data - page_index->page_data; 6466 return (0); 6467 } 6468 6469 int 6470 ctl_sap_log_sense_handler(struct ctl_scsiio *ctsio, 6471 struct ctl_page_index *page_index, 6472 int pc) 6473 { 6474 struct ctl_lun *lun; 6475 struct stat_page *data; 6476 uint64_t rn, wn, rb, wb; 6477 struct bintime rt, wt; 6478 int i; 6479 6480 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6481 data = (struct stat_page *)page_index->page_data; 6482 6483 scsi_ulto2b(SLP_SAP, data->sap.hdr.param_code); 6484 data->sap.hdr.param_control = SLP_LBIN; 6485 data->sap.hdr.param_len = sizeof(struct scsi_log_stat_and_perf) - 6486 sizeof(struct scsi_log_param_header); 6487 rn = wn = rb = wb = 0; 6488 bintime_clear(&rt); 6489 bintime_clear(&wt); 6490 for (i = 0; i < CTL_MAX_PORTS; i++) { 6491 rn += lun->stats.ports[i].operations[CTL_STATS_READ]; 6492 wn += lun->stats.ports[i].operations[CTL_STATS_WRITE]; 6493 rb += lun->stats.ports[i].bytes[CTL_STATS_READ]; 6494 wb += lun->stats.ports[i].bytes[CTL_STATS_WRITE]; 6495 bintime_add(&rt, &lun->stats.ports[i].time[CTL_STATS_READ]); 6496 bintime_add(&wt, &lun->stats.ports[i].time[CTL_STATS_WRITE]); 6497 } 6498 scsi_u64to8b(rn, data->sap.read_num); 6499 scsi_u64to8b(wn, data->sap.write_num); 6500 if (lun->stats.blocksize > 0) { 6501 scsi_u64to8b(wb / lun->stats.blocksize, 6502 data->sap.recvieved_lba); 6503 scsi_u64to8b(rb / lun->stats.blocksize, 6504 data->sap.transmitted_lba); 6505 } 6506 scsi_u64to8b((uint64_t)rt.sec * 1000 + rt.frac / (UINT64_MAX / 1000), 6507 data->sap.read_int); 6508 scsi_u64to8b((uint64_t)wt.sec * 1000 + wt.frac / (UINT64_MAX / 1000), 6509 data->sap.write_int); 6510 scsi_u64to8b(0, data->sap.weighted_num); 6511 scsi_u64to8b(0, data->sap.weighted_int); 6512 scsi_ulto2b(SLP_IT, data->it.hdr.param_code); 6513 data->it.hdr.param_control = SLP_LBIN; 6514 data->it.hdr.param_len = sizeof(struct scsi_log_idle_time) - 6515 sizeof(struct scsi_log_param_header); 6516 #ifdef CTL_TIME_IO 6517 scsi_u64to8b(lun->idle_time / SBT_1MS, data->it.idle_int); 6518 #endif 6519 scsi_ulto2b(SLP_TI, data->ti.hdr.param_code); 6520 data->it.hdr.param_control = SLP_LBIN; 6521 data->ti.hdr.param_len = sizeof(struct scsi_log_time_interval) - 6522 sizeof(struct scsi_log_param_header); 6523 scsi_ulto4b(3, data->ti.exponent); 6524 scsi_ulto4b(1, data->ti.integer); 6525 6526 page_index->page_len = sizeof(*data); 6527 return (0); 6528 } 6529 6530 int 6531 ctl_log_sense(struct ctl_scsiio *ctsio) 6532 { 6533 struct ctl_lun *lun; 6534 int i, pc, page_code, subpage; 6535 int alloc_len, total_len; 6536 struct ctl_page_index *page_index; 6537 struct scsi_log_sense *cdb; 6538 struct scsi_log_header *header; 6539 6540 CTL_DEBUG_PRINT(("ctl_log_sense\n")); 6541 6542 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6543 cdb = (struct scsi_log_sense *)ctsio->cdb; 6544 pc = (cdb->page & SLS_PAGE_CTRL_MASK) >> 6; 6545 page_code = cdb->page & SLS_PAGE_CODE; 6546 subpage = cdb->subpage; 6547 alloc_len = scsi_2btoul(cdb->length); 6548 6549 page_index = NULL; 6550 for (i = 0; i < CTL_NUM_LOG_PAGES; i++) { 6551 page_index = &lun->log_pages.index[i]; 6552 6553 /* Look for the right page code */ 6554 if ((page_index->page_code & SL_PAGE_CODE) != page_code) 6555 continue; 6556 6557 /* Look for the right subpage or the subpage wildcard*/ 6558 if (page_index->subpage != subpage) 6559 continue; 6560 6561 break; 6562 } 6563 if (i >= CTL_NUM_LOG_PAGES) { 6564 ctl_set_invalid_field(ctsio, 6565 /*sks_valid*/ 1, 6566 /*command*/ 1, 6567 /*field*/ 2, 6568 /*bit_valid*/ 0, 6569 /*bit*/ 0); 6570 ctl_done((union ctl_io *)ctsio); 6571 return (CTL_RETVAL_COMPLETE); 6572 } 6573 6574 total_len = sizeof(struct scsi_log_header) + page_index->page_len; 6575 6576 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6577 ctsio->kern_sg_entries = 0; 6578 ctsio->kern_data_resid = 0; 6579 ctsio->kern_rel_offset = 0; 6580 if (total_len < alloc_len) { 6581 ctsio->residual = alloc_len - total_len; 6582 ctsio->kern_data_len = total_len; 6583 ctsio->kern_total_len = total_len; 6584 } else { 6585 ctsio->residual = 0; 6586 ctsio->kern_data_len = alloc_len; 6587 ctsio->kern_total_len = alloc_len; 6588 } 6589 6590 header = (struct scsi_log_header *)ctsio->kern_data_ptr; 6591 header->page = page_index->page_code; 6592 if (page_index->subpage) { 6593 header->page |= SL_SPF; 6594 header->subpage = page_index->subpage; 6595 } 6596 scsi_ulto2b(page_index->page_len, header->datalen); 6597 6598 /* 6599 * Call the handler, if it exists, to update the 6600 * page to the latest values. 6601 */ 6602 if (page_index->sense_handler != NULL) 6603 page_index->sense_handler(ctsio, page_index, pc); 6604 6605 memcpy(header + 1, page_index->page_data, page_index->page_len); 6606 6607 ctl_set_success(ctsio); 6608 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6609 ctsio->be_move_done = ctl_config_move_done; 6610 ctl_datamove((union ctl_io *)ctsio); 6611 return (CTL_RETVAL_COMPLETE); 6612 } 6613 6614 int 6615 ctl_read_capacity(struct ctl_scsiio *ctsio) 6616 { 6617 struct scsi_read_capacity *cdb; 6618 struct scsi_read_capacity_data *data; 6619 struct ctl_lun *lun; 6620 uint32_t lba; 6621 6622 CTL_DEBUG_PRINT(("ctl_read_capacity\n")); 6623 6624 cdb = (struct scsi_read_capacity *)ctsio->cdb; 6625 6626 lba = scsi_4btoul(cdb->addr); 6627 if (((cdb->pmi & SRC_PMI) == 0) 6628 && (lba != 0)) { 6629 ctl_set_invalid_field(/*ctsio*/ ctsio, 6630 /*sks_valid*/ 1, 6631 /*command*/ 1, 6632 /*field*/ 2, 6633 /*bit_valid*/ 0, 6634 /*bit*/ 0); 6635 ctl_done((union ctl_io *)ctsio); 6636 return (CTL_RETVAL_COMPLETE); 6637 } 6638 6639 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6640 6641 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 6642 data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr; 6643 ctsio->residual = 0; 6644 ctsio->kern_data_len = sizeof(*data); 6645 ctsio->kern_total_len = sizeof(*data); 6646 ctsio->kern_data_resid = 0; 6647 ctsio->kern_rel_offset = 0; 6648 ctsio->kern_sg_entries = 0; 6649 6650 /* 6651 * If the maximum LBA is greater than 0xfffffffe, the user must 6652 * issue a SERVICE ACTION IN (16) command, with the read capacity 6653 * serivce action set. 6654 */ 6655 if (lun->be_lun->maxlba > 0xfffffffe) 6656 scsi_ulto4b(0xffffffff, data->addr); 6657 else 6658 scsi_ulto4b(lun->be_lun->maxlba, data->addr); 6659 6660 /* 6661 * XXX KDM this may not be 512 bytes... 6662 */ 6663 scsi_ulto4b(lun->be_lun->blocksize, data->length); 6664 6665 ctl_set_success(ctsio); 6666 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6667 ctsio->be_move_done = ctl_config_move_done; 6668 ctl_datamove((union ctl_io *)ctsio); 6669 return (CTL_RETVAL_COMPLETE); 6670 } 6671 6672 int 6673 ctl_read_capacity_16(struct ctl_scsiio *ctsio) 6674 { 6675 struct scsi_read_capacity_16 *cdb; 6676 struct scsi_read_capacity_data_long *data; 6677 struct ctl_lun *lun; 6678 uint64_t lba; 6679 uint32_t alloc_len; 6680 6681 CTL_DEBUG_PRINT(("ctl_read_capacity_16\n")); 6682 6683 cdb = (struct scsi_read_capacity_16 *)ctsio->cdb; 6684 6685 alloc_len = scsi_4btoul(cdb->alloc_len); 6686 lba = scsi_8btou64(cdb->addr); 6687 6688 if ((cdb->reladr & SRC16_PMI) 6689 && (lba != 0)) { 6690 ctl_set_invalid_field(/*ctsio*/ ctsio, 6691 /*sks_valid*/ 1, 6692 /*command*/ 1, 6693 /*field*/ 2, 6694 /*bit_valid*/ 0, 6695 /*bit*/ 0); 6696 ctl_done((union ctl_io *)ctsio); 6697 return (CTL_RETVAL_COMPLETE); 6698 } 6699 6700 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6701 6702 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 6703 data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr; 6704 6705 if (sizeof(*data) < alloc_len) { 6706 ctsio->residual = alloc_len - sizeof(*data); 6707 ctsio->kern_data_len = sizeof(*data); 6708 ctsio->kern_total_len = sizeof(*data); 6709 } else { 6710 ctsio->residual = 0; 6711 ctsio->kern_data_len = alloc_len; 6712 ctsio->kern_total_len = alloc_len; 6713 } 6714 ctsio->kern_data_resid = 0; 6715 ctsio->kern_rel_offset = 0; 6716 ctsio->kern_sg_entries = 0; 6717 6718 scsi_u64to8b(lun->be_lun->maxlba, data->addr); 6719 /* XXX KDM this may not be 512 bytes... */ 6720 scsi_ulto4b(lun->be_lun->blocksize, data->length); 6721 data->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE; 6722 scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, data->lalba_lbp); 6723 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) 6724 data->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ; 6725 6726 ctl_set_success(ctsio); 6727 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6728 ctsio->be_move_done = ctl_config_move_done; 6729 ctl_datamove((union ctl_io *)ctsio); 6730 return (CTL_RETVAL_COMPLETE); 6731 } 6732 6733 int 6734 ctl_get_lba_status(struct ctl_scsiio *ctsio) 6735 { 6736 struct scsi_get_lba_status *cdb; 6737 struct scsi_get_lba_status_data *data; 6738 struct ctl_lun *lun; 6739 struct ctl_lba_len_flags *lbalen; 6740 uint64_t lba; 6741 uint32_t alloc_len, total_len; 6742 int retval; 6743 6744 CTL_DEBUG_PRINT(("ctl_get_lba_status\n")); 6745 6746 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6747 cdb = (struct scsi_get_lba_status *)ctsio->cdb; 6748 lba = scsi_8btou64(cdb->addr); 6749 alloc_len = scsi_4btoul(cdb->alloc_len); 6750 6751 if (lba > lun->be_lun->maxlba) { 6752 ctl_set_lba_out_of_range(ctsio); 6753 ctl_done((union ctl_io *)ctsio); 6754 return (CTL_RETVAL_COMPLETE); 6755 } 6756 6757 total_len = sizeof(*data) + sizeof(data->descr[0]); 6758 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6759 data = (struct scsi_get_lba_status_data *)ctsio->kern_data_ptr; 6760 6761 if (total_len < alloc_len) { 6762 ctsio->residual = alloc_len - total_len; 6763 ctsio->kern_data_len = total_len; 6764 ctsio->kern_total_len = total_len; 6765 } else { 6766 ctsio->residual = 0; 6767 ctsio->kern_data_len = alloc_len; 6768 ctsio->kern_total_len = alloc_len; 6769 } 6770 ctsio->kern_data_resid = 0; 6771 ctsio->kern_rel_offset = 0; 6772 ctsio->kern_sg_entries = 0; 6773 6774 /* Fill dummy data in case backend can't tell anything. */ 6775 scsi_ulto4b(4 + sizeof(data->descr[0]), data->length); 6776 scsi_u64to8b(lba, data->descr[0].addr); 6777 scsi_ulto4b(MIN(UINT32_MAX, lun->be_lun->maxlba + 1 - lba), 6778 data->descr[0].length); 6779 data->descr[0].status = 0; /* Mapped or unknown. */ 6780 6781 ctl_set_success(ctsio); 6782 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6783 ctsio->be_move_done = ctl_config_move_done; 6784 6785 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 6786 lbalen->lba = lba; 6787 lbalen->len = total_len; 6788 lbalen->flags = 0; 6789 retval = lun->backend->config_read((union ctl_io *)ctsio); 6790 return (CTL_RETVAL_COMPLETE); 6791 } 6792 6793 int 6794 ctl_read_defect(struct ctl_scsiio *ctsio) 6795 { 6796 struct scsi_read_defect_data_10 *ccb10; 6797 struct scsi_read_defect_data_12 *ccb12; 6798 struct scsi_read_defect_data_hdr_10 *data10; 6799 struct scsi_read_defect_data_hdr_12 *data12; 6800 uint32_t alloc_len, data_len; 6801 uint8_t format; 6802 6803 CTL_DEBUG_PRINT(("ctl_read_defect\n")); 6804 6805 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 6806 ccb10 = (struct scsi_read_defect_data_10 *)&ctsio->cdb; 6807 format = ccb10->format; 6808 alloc_len = scsi_2btoul(ccb10->alloc_length); 6809 data_len = sizeof(*data10); 6810 } else { 6811 ccb12 = (struct scsi_read_defect_data_12 *)&ctsio->cdb; 6812 format = ccb12->format; 6813 alloc_len = scsi_4btoul(ccb12->alloc_length); 6814 data_len = sizeof(*data12); 6815 } 6816 if (alloc_len == 0) { 6817 ctl_set_success(ctsio); 6818 ctl_done((union ctl_io *)ctsio); 6819 return (CTL_RETVAL_COMPLETE); 6820 } 6821 6822 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 6823 if (data_len < alloc_len) { 6824 ctsio->residual = alloc_len - data_len; 6825 ctsio->kern_data_len = data_len; 6826 ctsio->kern_total_len = data_len; 6827 } else { 6828 ctsio->residual = 0; 6829 ctsio->kern_data_len = alloc_len; 6830 ctsio->kern_total_len = alloc_len; 6831 } 6832 ctsio->kern_data_resid = 0; 6833 ctsio->kern_rel_offset = 0; 6834 ctsio->kern_sg_entries = 0; 6835 6836 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 6837 data10 = (struct scsi_read_defect_data_hdr_10 *) 6838 ctsio->kern_data_ptr; 6839 data10->format = format; 6840 scsi_ulto2b(0, data10->length); 6841 } else { 6842 data12 = (struct scsi_read_defect_data_hdr_12 *) 6843 ctsio->kern_data_ptr; 6844 data12->format = format; 6845 scsi_ulto2b(0, data12->generation); 6846 scsi_ulto4b(0, data12->length); 6847 } 6848 6849 ctl_set_success(ctsio); 6850 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6851 ctsio->be_move_done = ctl_config_move_done; 6852 ctl_datamove((union ctl_io *)ctsio); 6853 return (CTL_RETVAL_COMPLETE); 6854 } 6855 6856 int 6857 ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio) 6858 { 6859 struct scsi_maintenance_in *cdb; 6860 int retval; 6861 int alloc_len, ext, total_len = 0, g, p, pc, pg, gs, os; 6862 int num_target_port_groups, num_target_ports; 6863 struct ctl_lun *lun; 6864 struct ctl_softc *softc; 6865 struct ctl_port *port; 6866 struct scsi_target_group_data *rtg_ptr; 6867 struct scsi_target_group_data_extended *rtg_ext_ptr; 6868 struct scsi_target_port_group_descriptor *tpg_desc; 6869 6870 CTL_DEBUG_PRINT(("ctl_report_tagret_port_groups\n")); 6871 6872 cdb = (struct scsi_maintenance_in *)ctsio->cdb; 6873 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6874 softc = lun->ctl_softc; 6875 6876 retval = CTL_RETVAL_COMPLETE; 6877 6878 switch (cdb->byte2 & STG_PDF_MASK) { 6879 case STG_PDF_LENGTH: 6880 ext = 0; 6881 break; 6882 case STG_PDF_EXTENDED: 6883 ext = 1; 6884 break; 6885 default: 6886 ctl_set_invalid_field(/*ctsio*/ ctsio, 6887 /*sks_valid*/ 1, 6888 /*command*/ 1, 6889 /*field*/ 2, 6890 /*bit_valid*/ 1, 6891 /*bit*/ 5); 6892 ctl_done((union ctl_io *)ctsio); 6893 return(retval); 6894 } 6895 6896 if (softc->is_single) 6897 num_target_port_groups = 1; 6898 else 6899 num_target_port_groups = NUM_TARGET_PORT_GROUPS; 6900 num_target_ports = 0; 6901 mtx_lock(&softc->ctl_lock); 6902 STAILQ_FOREACH(port, &softc->port_list, links) { 6903 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 6904 continue; 6905 if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 6906 continue; 6907 num_target_ports++; 6908 } 6909 mtx_unlock(&softc->ctl_lock); 6910 6911 if (ext) 6912 total_len = sizeof(struct scsi_target_group_data_extended); 6913 else 6914 total_len = sizeof(struct scsi_target_group_data); 6915 total_len += sizeof(struct scsi_target_port_group_descriptor) * 6916 num_target_port_groups + 6917 sizeof(struct scsi_target_port_descriptor) * 6918 num_target_ports * num_target_port_groups; 6919 6920 alloc_len = scsi_4btoul(cdb->length); 6921 6922 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6923 6924 ctsio->kern_sg_entries = 0; 6925 6926 if (total_len < alloc_len) { 6927 ctsio->residual = alloc_len - total_len; 6928 ctsio->kern_data_len = total_len; 6929 ctsio->kern_total_len = total_len; 6930 } else { 6931 ctsio->residual = 0; 6932 ctsio->kern_data_len = alloc_len; 6933 ctsio->kern_total_len = alloc_len; 6934 } 6935 ctsio->kern_data_resid = 0; 6936 ctsio->kern_rel_offset = 0; 6937 6938 if (ext) { 6939 rtg_ext_ptr = (struct scsi_target_group_data_extended *) 6940 ctsio->kern_data_ptr; 6941 scsi_ulto4b(total_len - 4, rtg_ext_ptr->length); 6942 rtg_ext_ptr->format_type = 0x10; 6943 rtg_ext_ptr->implicit_transition_time = 0; 6944 tpg_desc = &rtg_ext_ptr->groups[0]; 6945 } else { 6946 rtg_ptr = (struct scsi_target_group_data *) 6947 ctsio->kern_data_ptr; 6948 scsi_ulto4b(total_len - 4, rtg_ptr->length); 6949 tpg_desc = &rtg_ptr->groups[0]; 6950 } 6951 6952 mtx_lock(&softc->ctl_lock); 6953 pg = softc->port_offset / CTL_MAX_PORTS; 6954 if (softc->flags & CTL_FLAG_ACTIVE_SHELF) { 6955 if (softc->ha_mode == CTL_HA_MODE_ACT_STBY) { 6956 gs = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 6957 os = TPG_ASYMMETRIC_ACCESS_STANDBY; 6958 } else if (lun->flags & CTL_LUN_PRIMARY_SC) { 6959 gs = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 6960 os = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 6961 } else { 6962 gs = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 6963 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 6964 } 6965 } else { 6966 gs = TPG_ASYMMETRIC_ACCESS_STANDBY; 6967 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 6968 } 6969 for (g = 0; g < num_target_port_groups; g++) { 6970 tpg_desc->pref_state = (g == pg) ? gs : os; 6971 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP; 6972 scsi_ulto2b(g + 1, tpg_desc->target_port_group); 6973 tpg_desc->status = TPG_IMPLICIT; 6974 pc = 0; 6975 STAILQ_FOREACH(port, &softc->port_list, links) { 6976 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 6977 continue; 6978 if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 6979 continue; 6980 p = port->targ_port % CTL_MAX_PORTS + g * CTL_MAX_PORTS; 6981 scsi_ulto2b(p, tpg_desc->descriptors[pc]. 6982 relative_target_port_identifier); 6983 pc++; 6984 } 6985 tpg_desc->target_port_count = pc; 6986 tpg_desc = (struct scsi_target_port_group_descriptor *) 6987 &tpg_desc->descriptors[pc]; 6988 } 6989 mtx_unlock(&softc->ctl_lock); 6990 6991 ctl_set_success(ctsio); 6992 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6993 ctsio->be_move_done = ctl_config_move_done; 6994 ctl_datamove((union ctl_io *)ctsio); 6995 return(retval); 6996 } 6997 6998 int 6999 ctl_report_supported_opcodes(struct ctl_scsiio *ctsio) 7000 { 7001 struct ctl_lun *lun; 7002 struct scsi_report_supported_opcodes *cdb; 7003 const struct ctl_cmd_entry *entry, *sentry; 7004 struct scsi_report_supported_opcodes_all *all; 7005 struct scsi_report_supported_opcodes_descr *descr; 7006 struct scsi_report_supported_opcodes_one *one; 7007 int retval; 7008 int alloc_len, total_len; 7009 int opcode, service_action, i, j, num; 7010 7011 CTL_DEBUG_PRINT(("ctl_report_supported_opcodes\n")); 7012 7013 cdb = (struct scsi_report_supported_opcodes *)ctsio->cdb; 7014 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7015 7016 retval = CTL_RETVAL_COMPLETE; 7017 7018 opcode = cdb->requested_opcode; 7019 service_action = scsi_2btoul(cdb->requested_service_action); 7020 switch (cdb->options & RSO_OPTIONS_MASK) { 7021 case RSO_OPTIONS_ALL: 7022 num = 0; 7023 for (i = 0; i < 256; i++) { 7024 entry = &ctl_cmd_table[i]; 7025 if (entry->flags & CTL_CMD_FLAG_SA5) { 7026 for (j = 0; j < 32; j++) { 7027 sentry = &((const struct ctl_cmd_entry *) 7028 entry->execute)[j]; 7029 if (ctl_cmd_applicable( 7030 lun->be_lun->lun_type, sentry)) 7031 num++; 7032 } 7033 } else { 7034 if (ctl_cmd_applicable(lun->be_lun->lun_type, 7035 entry)) 7036 num++; 7037 } 7038 } 7039 total_len = sizeof(struct scsi_report_supported_opcodes_all) + 7040 num * sizeof(struct scsi_report_supported_opcodes_descr); 7041 break; 7042 case RSO_OPTIONS_OC: 7043 if (ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) { 7044 ctl_set_invalid_field(/*ctsio*/ ctsio, 7045 /*sks_valid*/ 1, 7046 /*command*/ 1, 7047 /*field*/ 2, 7048 /*bit_valid*/ 1, 7049 /*bit*/ 2); 7050 ctl_done((union ctl_io *)ctsio); 7051 return (CTL_RETVAL_COMPLETE); 7052 } 7053 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7054 break; 7055 case RSO_OPTIONS_OC_SA: 7056 if ((ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) == 0 || 7057 service_action >= 32) { 7058 ctl_set_invalid_field(/*ctsio*/ ctsio, 7059 /*sks_valid*/ 1, 7060 /*command*/ 1, 7061 /*field*/ 2, 7062 /*bit_valid*/ 1, 7063 /*bit*/ 2); 7064 ctl_done((union ctl_io *)ctsio); 7065 return (CTL_RETVAL_COMPLETE); 7066 } 7067 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7068 break; 7069 default: 7070 ctl_set_invalid_field(/*ctsio*/ ctsio, 7071 /*sks_valid*/ 1, 7072 /*command*/ 1, 7073 /*field*/ 2, 7074 /*bit_valid*/ 1, 7075 /*bit*/ 2); 7076 ctl_done((union ctl_io *)ctsio); 7077 return (CTL_RETVAL_COMPLETE); 7078 } 7079 7080 alloc_len = scsi_4btoul(cdb->length); 7081 7082 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7083 7084 ctsio->kern_sg_entries = 0; 7085 7086 if (total_len < alloc_len) { 7087 ctsio->residual = alloc_len - total_len; 7088 ctsio->kern_data_len = total_len; 7089 ctsio->kern_total_len = total_len; 7090 } else { 7091 ctsio->residual = 0; 7092 ctsio->kern_data_len = alloc_len; 7093 ctsio->kern_total_len = alloc_len; 7094 } 7095 ctsio->kern_data_resid = 0; 7096 ctsio->kern_rel_offset = 0; 7097 7098 switch (cdb->options & RSO_OPTIONS_MASK) { 7099 case RSO_OPTIONS_ALL: 7100 all = (struct scsi_report_supported_opcodes_all *) 7101 ctsio->kern_data_ptr; 7102 num = 0; 7103 for (i = 0; i < 256; i++) { 7104 entry = &ctl_cmd_table[i]; 7105 if (entry->flags & CTL_CMD_FLAG_SA5) { 7106 for (j = 0; j < 32; j++) { 7107 sentry = &((const struct ctl_cmd_entry *) 7108 entry->execute)[j]; 7109 if (!ctl_cmd_applicable( 7110 lun->be_lun->lun_type, sentry)) 7111 continue; 7112 descr = &all->descr[num++]; 7113 descr->opcode = i; 7114 scsi_ulto2b(j, descr->service_action); 7115 descr->flags = RSO_SERVACTV; 7116 scsi_ulto2b(sentry->length, 7117 descr->cdb_length); 7118 } 7119 } else { 7120 if (!ctl_cmd_applicable(lun->be_lun->lun_type, 7121 entry)) 7122 continue; 7123 descr = &all->descr[num++]; 7124 descr->opcode = i; 7125 scsi_ulto2b(0, descr->service_action); 7126 descr->flags = 0; 7127 scsi_ulto2b(entry->length, descr->cdb_length); 7128 } 7129 } 7130 scsi_ulto4b( 7131 num * sizeof(struct scsi_report_supported_opcodes_descr), 7132 all->length); 7133 break; 7134 case RSO_OPTIONS_OC: 7135 one = (struct scsi_report_supported_opcodes_one *) 7136 ctsio->kern_data_ptr; 7137 entry = &ctl_cmd_table[opcode]; 7138 goto fill_one; 7139 case RSO_OPTIONS_OC_SA: 7140 one = (struct scsi_report_supported_opcodes_one *) 7141 ctsio->kern_data_ptr; 7142 entry = &ctl_cmd_table[opcode]; 7143 entry = &((const struct ctl_cmd_entry *) 7144 entry->execute)[service_action]; 7145 fill_one: 7146 if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 7147 one->support = 3; 7148 scsi_ulto2b(entry->length, one->cdb_length); 7149 one->cdb_usage[0] = opcode; 7150 memcpy(&one->cdb_usage[1], entry->usage, 7151 entry->length - 1); 7152 } else 7153 one->support = 1; 7154 break; 7155 } 7156 7157 ctl_set_success(ctsio); 7158 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7159 ctsio->be_move_done = ctl_config_move_done; 7160 ctl_datamove((union ctl_io *)ctsio); 7161 return(retval); 7162 } 7163 7164 int 7165 ctl_report_supported_tmf(struct ctl_scsiio *ctsio) 7166 { 7167 struct scsi_report_supported_tmf *cdb; 7168 struct scsi_report_supported_tmf_data *data; 7169 int retval; 7170 int alloc_len, total_len; 7171 7172 CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n")); 7173 7174 cdb = (struct scsi_report_supported_tmf *)ctsio->cdb; 7175 7176 retval = CTL_RETVAL_COMPLETE; 7177 7178 total_len = sizeof(struct scsi_report_supported_tmf_data); 7179 alloc_len = scsi_4btoul(cdb->length); 7180 7181 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7182 7183 ctsio->kern_sg_entries = 0; 7184 7185 if (total_len < alloc_len) { 7186 ctsio->residual = alloc_len - total_len; 7187 ctsio->kern_data_len = total_len; 7188 ctsio->kern_total_len = total_len; 7189 } else { 7190 ctsio->residual = 0; 7191 ctsio->kern_data_len = alloc_len; 7192 ctsio->kern_total_len = alloc_len; 7193 } 7194 ctsio->kern_data_resid = 0; 7195 ctsio->kern_rel_offset = 0; 7196 7197 data = (struct scsi_report_supported_tmf_data *)ctsio->kern_data_ptr; 7198 data->byte1 |= RST_ATS | RST_ATSS | RST_CTSS | RST_LURS | RST_TRS; 7199 data->byte2 |= RST_ITNRS; 7200 7201 ctl_set_success(ctsio); 7202 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7203 ctsio->be_move_done = ctl_config_move_done; 7204 ctl_datamove((union ctl_io *)ctsio); 7205 return (retval); 7206 } 7207 7208 int 7209 ctl_report_timestamp(struct ctl_scsiio *ctsio) 7210 { 7211 struct scsi_report_timestamp *cdb; 7212 struct scsi_report_timestamp_data *data; 7213 struct timeval tv; 7214 int64_t timestamp; 7215 int retval; 7216 int alloc_len, total_len; 7217 7218 CTL_DEBUG_PRINT(("ctl_report_timestamp\n")); 7219 7220 cdb = (struct scsi_report_timestamp *)ctsio->cdb; 7221 7222 retval = CTL_RETVAL_COMPLETE; 7223 7224 total_len = sizeof(struct scsi_report_timestamp_data); 7225 alloc_len = scsi_4btoul(cdb->length); 7226 7227 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7228 7229 ctsio->kern_sg_entries = 0; 7230 7231 if (total_len < alloc_len) { 7232 ctsio->residual = alloc_len - total_len; 7233 ctsio->kern_data_len = total_len; 7234 ctsio->kern_total_len = total_len; 7235 } else { 7236 ctsio->residual = 0; 7237 ctsio->kern_data_len = alloc_len; 7238 ctsio->kern_total_len = alloc_len; 7239 } 7240 ctsio->kern_data_resid = 0; 7241 ctsio->kern_rel_offset = 0; 7242 7243 data = (struct scsi_report_timestamp_data *)ctsio->kern_data_ptr; 7244 scsi_ulto2b(sizeof(*data) - 2, data->length); 7245 data->origin = RTS_ORIG_OUTSIDE; 7246 getmicrotime(&tv); 7247 timestamp = (int64_t)tv.tv_sec * 1000 + tv.tv_usec / 1000; 7248 scsi_ulto4b(timestamp >> 16, data->timestamp); 7249 scsi_ulto2b(timestamp & 0xffff, &data->timestamp[4]); 7250 7251 ctl_set_success(ctsio); 7252 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7253 ctsio->be_move_done = ctl_config_move_done; 7254 ctl_datamove((union ctl_io *)ctsio); 7255 return (retval); 7256 } 7257 7258 int 7259 ctl_persistent_reserve_in(struct ctl_scsiio *ctsio) 7260 { 7261 struct scsi_per_res_in *cdb; 7262 int alloc_len, total_len = 0; 7263 /* struct scsi_per_res_in_rsrv in_data; */ 7264 struct ctl_lun *lun; 7265 struct ctl_softc *softc; 7266 uint64_t key; 7267 7268 CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n")); 7269 7270 cdb = (struct scsi_per_res_in *)ctsio->cdb; 7271 7272 alloc_len = scsi_2btoul(cdb->length); 7273 7274 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7275 softc = lun->ctl_softc; 7276 7277 retry: 7278 mtx_lock(&lun->lun_lock); 7279 switch (cdb->action) { 7280 case SPRI_RK: /* read keys */ 7281 total_len = sizeof(struct scsi_per_res_in_keys) + 7282 lun->pr_key_count * 7283 sizeof(struct scsi_per_res_key); 7284 break; 7285 case SPRI_RR: /* read reservation */ 7286 if (lun->flags & CTL_LUN_PR_RESERVED) 7287 total_len = sizeof(struct scsi_per_res_in_rsrv); 7288 else 7289 total_len = sizeof(struct scsi_per_res_in_header); 7290 break; 7291 case SPRI_RC: /* report capabilities */ 7292 total_len = sizeof(struct scsi_per_res_cap); 7293 break; 7294 case SPRI_RS: /* read full status */ 7295 total_len = sizeof(struct scsi_per_res_in_header) + 7296 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7297 lun->pr_key_count; 7298 break; 7299 default: 7300 panic("Invalid PR type %x", cdb->action); 7301 } 7302 mtx_unlock(&lun->lun_lock); 7303 7304 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7305 7306 if (total_len < alloc_len) { 7307 ctsio->residual = alloc_len - total_len; 7308 ctsio->kern_data_len = total_len; 7309 ctsio->kern_total_len = total_len; 7310 } else { 7311 ctsio->residual = 0; 7312 ctsio->kern_data_len = alloc_len; 7313 ctsio->kern_total_len = alloc_len; 7314 } 7315 7316 ctsio->kern_data_resid = 0; 7317 ctsio->kern_rel_offset = 0; 7318 ctsio->kern_sg_entries = 0; 7319 7320 mtx_lock(&lun->lun_lock); 7321 switch (cdb->action) { 7322 case SPRI_RK: { // read keys 7323 struct scsi_per_res_in_keys *res_keys; 7324 int i, key_count; 7325 7326 res_keys = (struct scsi_per_res_in_keys*)ctsio->kern_data_ptr; 7327 7328 /* 7329 * We had to drop the lock to allocate our buffer, which 7330 * leaves time for someone to come in with another 7331 * persistent reservation. (That is unlikely, though, 7332 * since this should be the only persistent reservation 7333 * command active right now.) 7334 */ 7335 if (total_len != (sizeof(struct scsi_per_res_in_keys) + 7336 (lun->pr_key_count * 7337 sizeof(struct scsi_per_res_key)))){ 7338 mtx_unlock(&lun->lun_lock); 7339 free(ctsio->kern_data_ptr, M_CTL); 7340 printf("%s: reservation length changed, retrying\n", 7341 __func__); 7342 goto retry; 7343 } 7344 7345 scsi_ulto4b(lun->PRGeneration, res_keys->header.generation); 7346 7347 scsi_ulto4b(sizeof(struct scsi_per_res_key) * 7348 lun->pr_key_count, res_keys->header.length); 7349 7350 for (i = 0, key_count = 0; i < 2*CTL_MAX_INITIATORS; i++) { 7351 if ((key = ctl_get_prkey(lun, i)) == 0) 7352 continue; 7353 7354 /* 7355 * We used lun->pr_key_count to calculate the 7356 * size to allocate. If it turns out the number of 7357 * initiators with the registered flag set is 7358 * larger than that (i.e. they haven't been kept in 7359 * sync), we've got a problem. 7360 */ 7361 if (key_count >= lun->pr_key_count) { 7362 #ifdef NEEDTOPORT 7363 csevent_log(CSC_CTL | CSC_SHELF_SW | 7364 CTL_PR_ERROR, 7365 csevent_LogType_Fault, 7366 csevent_AlertLevel_Yellow, 7367 csevent_FRU_ShelfController, 7368 csevent_FRU_Firmware, 7369 csevent_FRU_Unknown, 7370 "registered keys %d >= key " 7371 "count %d", key_count, 7372 lun->pr_key_count); 7373 #endif 7374 key_count++; 7375 continue; 7376 } 7377 scsi_u64to8b(key, res_keys->keys[key_count].key); 7378 key_count++; 7379 } 7380 break; 7381 } 7382 case SPRI_RR: { // read reservation 7383 struct scsi_per_res_in_rsrv *res; 7384 int tmp_len, header_only; 7385 7386 res = (struct scsi_per_res_in_rsrv *)ctsio->kern_data_ptr; 7387 7388 scsi_ulto4b(lun->PRGeneration, res->header.generation); 7389 7390 if (lun->flags & CTL_LUN_PR_RESERVED) 7391 { 7392 tmp_len = sizeof(struct scsi_per_res_in_rsrv); 7393 scsi_ulto4b(sizeof(struct scsi_per_res_in_rsrv_data), 7394 res->header.length); 7395 header_only = 0; 7396 } else { 7397 tmp_len = sizeof(struct scsi_per_res_in_header); 7398 scsi_ulto4b(0, res->header.length); 7399 header_only = 1; 7400 } 7401 7402 /* 7403 * We had to drop the lock to allocate our buffer, which 7404 * leaves time for someone to come in with another 7405 * persistent reservation. (That is unlikely, though, 7406 * since this should be the only persistent reservation 7407 * command active right now.) 7408 */ 7409 if (tmp_len != total_len) { 7410 mtx_unlock(&lun->lun_lock); 7411 free(ctsio->kern_data_ptr, M_CTL); 7412 printf("%s: reservation status changed, retrying\n", 7413 __func__); 7414 goto retry; 7415 } 7416 7417 /* 7418 * No reservation held, so we're done. 7419 */ 7420 if (header_only != 0) 7421 break; 7422 7423 /* 7424 * If the registration is an All Registrants type, the key 7425 * is 0, since it doesn't really matter. 7426 */ 7427 if (lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 7428 scsi_u64to8b(ctl_get_prkey(lun, lun->pr_res_idx), 7429 res->data.reservation); 7430 } 7431 res->data.scopetype = lun->res_type; 7432 break; 7433 } 7434 case SPRI_RC: //report capabilities 7435 { 7436 struct scsi_per_res_cap *res_cap; 7437 uint16_t type_mask; 7438 7439 res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr; 7440 scsi_ulto2b(sizeof(*res_cap), res_cap->length); 7441 res_cap->flags2 |= SPRI_TMV | SPRI_ALLOW_5; 7442 type_mask = SPRI_TM_WR_EX_AR | 7443 SPRI_TM_EX_AC_RO | 7444 SPRI_TM_WR_EX_RO | 7445 SPRI_TM_EX_AC | 7446 SPRI_TM_WR_EX | 7447 SPRI_TM_EX_AC_AR; 7448 scsi_ulto2b(type_mask, res_cap->type_mask); 7449 break; 7450 } 7451 case SPRI_RS: { // read full status 7452 struct scsi_per_res_in_full *res_status; 7453 struct scsi_per_res_in_full_desc *res_desc; 7454 struct ctl_port *port; 7455 int i, len; 7456 7457 res_status = (struct scsi_per_res_in_full*)ctsio->kern_data_ptr; 7458 7459 /* 7460 * We had to drop the lock to allocate our buffer, which 7461 * leaves time for someone to come in with another 7462 * persistent reservation. (That is unlikely, though, 7463 * since this should be the only persistent reservation 7464 * command active right now.) 7465 */ 7466 if (total_len < (sizeof(struct scsi_per_res_in_header) + 7467 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7468 lun->pr_key_count)){ 7469 mtx_unlock(&lun->lun_lock); 7470 free(ctsio->kern_data_ptr, M_CTL); 7471 printf("%s: reservation length changed, retrying\n", 7472 __func__); 7473 goto retry; 7474 } 7475 7476 scsi_ulto4b(lun->PRGeneration, res_status->header.generation); 7477 7478 res_desc = &res_status->desc[0]; 7479 for (i = 0; i < 2*CTL_MAX_INITIATORS; i++) { 7480 if ((key = ctl_get_prkey(lun, i)) == 0) 7481 continue; 7482 7483 scsi_u64to8b(key, res_desc->res_key.key); 7484 if ((lun->flags & CTL_LUN_PR_RESERVED) && 7485 (lun->pr_res_idx == i || 7486 lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS)) { 7487 res_desc->flags = SPRI_FULL_R_HOLDER; 7488 res_desc->scopetype = lun->res_type; 7489 } 7490 scsi_ulto2b(i / CTL_MAX_INIT_PER_PORT, 7491 res_desc->rel_trgt_port_id); 7492 len = 0; 7493 port = softc->ctl_ports[ 7494 ctl_port_idx(i / CTL_MAX_INIT_PER_PORT)]; 7495 if (port != NULL) 7496 len = ctl_create_iid(port, 7497 i % CTL_MAX_INIT_PER_PORT, 7498 res_desc->transport_id); 7499 scsi_ulto4b(len, res_desc->additional_length); 7500 res_desc = (struct scsi_per_res_in_full_desc *) 7501 &res_desc->transport_id[len]; 7502 } 7503 scsi_ulto4b((uint8_t *)res_desc - (uint8_t *)&res_status->desc[0], 7504 res_status->header.length); 7505 break; 7506 } 7507 default: 7508 /* 7509 * This is a bug, because we just checked for this above, 7510 * and should have returned an error. 7511 */ 7512 panic("Invalid PR type %x", cdb->action); 7513 break; /* NOTREACHED */ 7514 } 7515 mtx_unlock(&lun->lun_lock); 7516 7517 ctl_set_success(ctsio); 7518 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7519 ctsio->be_move_done = ctl_config_move_done; 7520 ctl_datamove((union ctl_io *)ctsio); 7521 return (CTL_RETVAL_COMPLETE); 7522 } 7523 7524 static void 7525 ctl_est_res_ua(struct ctl_lun *lun, uint32_t residx, ctl_ua_type ua) 7526 { 7527 int off = lun->ctl_softc->persis_offset; 7528 7529 if (residx >= off && residx < off + CTL_MAX_INITIATORS) 7530 ctl_est_ua(lun, residx - off, ua); 7531 } 7532 7533 /* 7534 * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if 7535 * it should return. 7536 */ 7537 static int 7538 ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key, 7539 uint64_t sa_res_key, uint8_t type, uint32_t residx, 7540 struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb, 7541 struct scsi_per_res_out_parms* param) 7542 { 7543 union ctl_ha_msg persis_io; 7544 int retval, i; 7545 int isc_retval; 7546 7547 retval = 0; 7548 7549 mtx_lock(&lun->lun_lock); 7550 if (sa_res_key == 0) { 7551 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 7552 /* validate scope and type */ 7553 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7554 SPR_LU_SCOPE) { 7555 mtx_unlock(&lun->lun_lock); 7556 ctl_set_invalid_field(/*ctsio*/ ctsio, 7557 /*sks_valid*/ 1, 7558 /*command*/ 1, 7559 /*field*/ 2, 7560 /*bit_valid*/ 1, 7561 /*bit*/ 4); 7562 ctl_done((union ctl_io *)ctsio); 7563 return (1); 7564 } 7565 7566 if (type>8 || type==2 || type==4 || type==0) { 7567 mtx_unlock(&lun->lun_lock); 7568 ctl_set_invalid_field(/*ctsio*/ ctsio, 7569 /*sks_valid*/ 1, 7570 /*command*/ 1, 7571 /*field*/ 2, 7572 /*bit_valid*/ 1, 7573 /*bit*/ 0); 7574 ctl_done((union ctl_io *)ctsio); 7575 return (1); 7576 } 7577 7578 /* 7579 * Unregister everybody else and build UA for 7580 * them 7581 */ 7582 for(i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7583 if (i == residx || ctl_get_prkey(lun, i) == 0) 7584 continue; 7585 7586 ctl_clr_prkey(lun, i); 7587 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 7588 } 7589 lun->pr_key_count = 1; 7590 lun->res_type = type; 7591 if (lun->res_type != SPR_TYPE_WR_EX_AR 7592 && lun->res_type != SPR_TYPE_EX_AC_AR) 7593 lun->pr_res_idx = residx; 7594 7595 /* send msg to other side */ 7596 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7597 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7598 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7599 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7600 persis_io.pr.pr_info.res_type = type; 7601 memcpy(persis_io.pr.pr_info.sa_res_key, 7602 param->serv_act_res_key, 7603 sizeof(param->serv_act_res_key)); 7604 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 7605 &persis_io, sizeof(persis_io), 0)) > 7606 CTL_HA_STATUS_SUCCESS) { 7607 printf("CTL:Persis Out error returned " 7608 "from ctl_ha_msg_send %d\n", 7609 isc_retval); 7610 } 7611 } else { 7612 /* not all registrants */ 7613 mtx_unlock(&lun->lun_lock); 7614 free(ctsio->kern_data_ptr, M_CTL); 7615 ctl_set_invalid_field(ctsio, 7616 /*sks_valid*/ 1, 7617 /*command*/ 0, 7618 /*field*/ 8, 7619 /*bit_valid*/ 0, 7620 /*bit*/ 0); 7621 ctl_done((union ctl_io *)ctsio); 7622 return (1); 7623 } 7624 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 7625 || !(lun->flags & CTL_LUN_PR_RESERVED)) { 7626 int found = 0; 7627 7628 if (res_key == sa_res_key) { 7629 /* special case */ 7630 /* 7631 * The spec implies this is not good but doesn't 7632 * say what to do. There are two choices either 7633 * generate a res conflict or check condition 7634 * with illegal field in parameter data. Since 7635 * that is what is done when the sa_res_key is 7636 * zero I'll take that approach since this has 7637 * to do with the sa_res_key. 7638 */ 7639 mtx_unlock(&lun->lun_lock); 7640 free(ctsio->kern_data_ptr, M_CTL); 7641 ctl_set_invalid_field(ctsio, 7642 /*sks_valid*/ 1, 7643 /*command*/ 0, 7644 /*field*/ 8, 7645 /*bit_valid*/ 0, 7646 /*bit*/ 0); 7647 ctl_done((union ctl_io *)ctsio); 7648 return (1); 7649 } 7650 7651 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7652 if (ctl_get_prkey(lun, i) != sa_res_key) 7653 continue; 7654 7655 found = 1; 7656 ctl_clr_prkey(lun, i); 7657 lun->pr_key_count--; 7658 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 7659 } 7660 if (!found) { 7661 mtx_unlock(&lun->lun_lock); 7662 free(ctsio->kern_data_ptr, M_CTL); 7663 ctl_set_reservation_conflict(ctsio); 7664 ctl_done((union ctl_io *)ctsio); 7665 return (CTL_RETVAL_COMPLETE); 7666 } 7667 /* send msg to other side */ 7668 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7669 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7670 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7671 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7672 persis_io.pr.pr_info.res_type = type; 7673 memcpy(persis_io.pr.pr_info.sa_res_key, 7674 param->serv_act_res_key, 7675 sizeof(param->serv_act_res_key)); 7676 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 7677 &persis_io, sizeof(persis_io), 0)) > 7678 CTL_HA_STATUS_SUCCESS) { 7679 printf("CTL:Persis Out error returned from " 7680 "ctl_ha_msg_send %d\n", isc_retval); 7681 } 7682 } else { 7683 /* Reserved but not all registrants */ 7684 /* sa_res_key is res holder */ 7685 if (sa_res_key == ctl_get_prkey(lun, lun->pr_res_idx)) { 7686 /* validate scope and type */ 7687 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7688 SPR_LU_SCOPE) { 7689 mtx_unlock(&lun->lun_lock); 7690 ctl_set_invalid_field(/*ctsio*/ ctsio, 7691 /*sks_valid*/ 1, 7692 /*command*/ 1, 7693 /*field*/ 2, 7694 /*bit_valid*/ 1, 7695 /*bit*/ 4); 7696 ctl_done((union ctl_io *)ctsio); 7697 return (1); 7698 } 7699 7700 if (type>8 || type==2 || type==4 || type==0) { 7701 mtx_unlock(&lun->lun_lock); 7702 ctl_set_invalid_field(/*ctsio*/ ctsio, 7703 /*sks_valid*/ 1, 7704 /*command*/ 1, 7705 /*field*/ 2, 7706 /*bit_valid*/ 1, 7707 /*bit*/ 0); 7708 ctl_done((union ctl_io *)ctsio); 7709 return (1); 7710 } 7711 7712 /* 7713 * Do the following: 7714 * if sa_res_key != res_key remove all 7715 * registrants w/sa_res_key and generate UA 7716 * for these registrants(Registrations 7717 * Preempted) if it wasn't an exclusive 7718 * reservation generate UA(Reservations 7719 * Preempted) for all other registered nexuses 7720 * if the type has changed. Establish the new 7721 * reservation and holder. If res_key and 7722 * sa_res_key are the same do the above 7723 * except don't unregister the res holder. 7724 */ 7725 7726 for(i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7727 if (i == residx || ctl_get_prkey(lun, i) == 0) 7728 continue; 7729 7730 if (sa_res_key == ctl_get_prkey(lun, i)) { 7731 ctl_clr_prkey(lun, i); 7732 lun->pr_key_count--; 7733 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 7734 } else if (type != lun->res_type 7735 && (lun->res_type == SPR_TYPE_WR_EX_RO 7736 || lun->res_type ==SPR_TYPE_EX_AC_RO)){ 7737 ctl_est_res_ua(lun, i, CTL_UA_RES_RELEASE); 7738 } 7739 } 7740 lun->res_type = type; 7741 if (lun->res_type != SPR_TYPE_WR_EX_AR 7742 && lun->res_type != SPR_TYPE_EX_AC_AR) 7743 lun->pr_res_idx = residx; 7744 else 7745 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 7746 7747 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7748 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7749 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7750 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7751 persis_io.pr.pr_info.res_type = type; 7752 memcpy(persis_io.pr.pr_info.sa_res_key, 7753 param->serv_act_res_key, 7754 sizeof(param->serv_act_res_key)); 7755 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 7756 &persis_io, sizeof(persis_io), 0)) > 7757 CTL_HA_STATUS_SUCCESS) { 7758 printf("CTL:Persis Out error returned " 7759 "from ctl_ha_msg_send %d\n", 7760 isc_retval); 7761 } 7762 } else { 7763 /* 7764 * sa_res_key is not the res holder just 7765 * remove registrants 7766 */ 7767 int found=0; 7768 7769 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7770 if (sa_res_key != ctl_get_prkey(lun, i)) 7771 continue; 7772 7773 found = 1; 7774 ctl_clr_prkey(lun, i); 7775 lun->pr_key_count--; 7776 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 7777 } 7778 7779 if (!found) { 7780 mtx_unlock(&lun->lun_lock); 7781 free(ctsio->kern_data_ptr, M_CTL); 7782 ctl_set_reservation_conflict(ctsio); 7783 ctl_done((union ctl_io *)ctsio); 7784 return (1); 7785 } 7786 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7787 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7788 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7789 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7790 persis_io.pr.pr_info.res_type = type; 7791 memcpy(persis_io.pr.pr_info.sa_res_key, 7792 param->serv_act_res_key, 7793 sizeof(param->serv_act_res_key)); 7794 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 7795 &persis_io, sizeof(persis_io), 0)) > 7796 CTL_HA_STATUS_SUCCESS) { 7797 printf("CTL:Persis Out error returned " 7798 "from ctl_ha_msg_send %d\n", 7799 isc_retval); 7800 } 7801 } 7802 } 7803 7804 lun->PRGeneration++; 7805 mtx_unlock(&lun->lun_lock); 7806 7807 return (retval); 7808 } 7809 7810 static void 7811 ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg) 7812 { 7813 uint64_t sa_res_key; 7814 int i; 7815 7816 sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key); 7817 7818 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 7819 || lun->pr_res_idx == CTL_PR_NO_RESERVATION 7820 || sa_res_key != ctl_get_prkey(lun, lun->pr_res_idx)) { 7821 if (sa_res_key == 0) { 7822 /* 7823 * Unregister everybody else and build UA for 7824 * them 7825 */ 7826 for(i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7827 if (i == msg->pr.pr_info.residx || 7828 ctl_get_prkey(lun, i) == 0) 7829 continue; 7830 7831 ctl_clr_prkey(lun, i); 7832 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 7833 } 7834 7835 lun->pr_key_count = 1; 7836 lun->res_type = msg->pr.pr_info.res_type; 7837 if (lun->res_type != SPR_TYPE_WR_EX_AR 7838 && lun->res_type != SPR_TYPE_EX_AC_AR) 7839 lun->pr_res_idx = msg->pr.pr_info.residx; 7840 } else { 7841 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7842 if (sa_res_key == ctl_get_prkey(lun, i)) 7843 continue; 7844 7845 ctl_clr_prkey(lun, i); 7846 lun->pr_key_count--; 7847 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 7848 } 7849 } 7850 } else { 7851 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7852 if (i == msg->pr.pr_info.residx || 7853 ctl_get_prkey(lun, i) == 0) 7854 continue; 7855 7856 if (sa_res_key == ctl_get_prkey(lun, i)) { 7857 ctl_clr_prkey(lun, i); 7858 lun->pr_key_count--; 7859 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 7860 } else if (msg->pr.pr_info.res_type != lun->res_type 7861 && (lun->res_type == SPR_TYPE_WR_EX_RO 7862 || lun->res_type == SPR_TYPE_EX_AC_RO)) { 7863 ctl_est_res_ua(lun, i, CTL_UA_RES_RELEASE); 7864 } 7865 } 7866 lun->res_type = msg->pr.pr_info.res_type; 7867 if (lun->res_type != SPR_TYPE_WR_EX_AR 7868 && lun->res_type != SPR_TYPE_EX_AC_AR) 7869 lun->pr_res_idx = msg->pr.pr_info.residx; 7870 else 7871 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 7872 } 7873 lun->PRGeneration++; 7874 7875 } 7876 7877 7878 int 7879 ctl_persistent_reserve_out(struct ctl_scsiio *ctsio) 7880 { 7881 int retval; 7882 int isc_retval; 7883 u_int32_t param_len; 7884 struct scsi_per_res_out *cdb; 7885 struct ctl_lun *lun; 7886 struct scsi_per_res_out_parms* param; 7887 struct ctl_softc *softc; 7888 uint32_t residx; 7889 uint64_t res_key, sa_res_key, key; 7890 uint8_t type; 7891 union ctl_ha_msg persis_io; 7892 int i; 7893 7894 CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n")); 7895 7896 retval = CTL_RETVAL_COMPLETE; 7897 7898 cdb = (struct scsi_per_res_out *)ctsio->cdb; 7899 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7900 softc = lun->ctl_softc; 7901 7902 /* 7903 * We only support whole-LUN scope. The scope & type are ignored for 7904 * register, register and ignore existing key and clear. 7905 * We sometimes ignore scope and type on preempts too!! 7906 * Verify reservation type here as well. 7907 */ 7908 type = cdb->scope_type & SPR_TYPE_MASK; 7909 if ((cdb->action == SPRO_RESERVE) 7910 || (cdb->action == SPRO_RELEASE)) { 7911 if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { 7912 ctl_set_invalid_field(/*ctsio*/ ctsio, 7913 /*sks_valid*/ 1, 7914 /*command*/ 1, 7915 /*field*/ 2, 7916 /*bit_valid*/ 1, 7917 /*bit*/ 4); 7918 ctl_done((union ctl_io *)ctsio); 7919 return (CTL_RETVAL_COMPLETE); 7920 } 7921 7922 if (type>8 || type==2 || type==4 || type==0) { 7923 ctl_set_invalid_field(/*ctsio*/ ctsio, 7924 /*sks_valid*/ 1, 7925 /*command*/ 1, 7926 /*field*/ 2, 7927 /*bit_valid*/ 1, 7928 /*bit*/ 0); 7929 ctl_done((union ctl_io *)ctsio); 7930 return (CTL_RETVAL_COMPLETE); 7931 } 7932 } 7933 7934 param_len = scsi_4btoul(cdb->length); 7935 7936 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 7937 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 7938 ctsio->kern_data_len = param_len; 7939 ctsio->kern_total_len = param_len; 7940 ctsio->kern_data_resid = 0; 7941 ctsio->kern_rel_offset = 0; 7942 ctsio->kern_sg_entries = 0; 7943 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7944 ctsio->be_move_done = ctl_config_move_done; 7945 ctl_datamove((union ctl_io *)ctsio); 7946 7947 return (CTL_RETVAL_COMPLETE); 7948 } 7949 7950 param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr; 7951 7952 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 7953 res_key = scsi_8btou64(param->res_key.key); 7954 sa_res_key = scsi_8btou64(param->serv_act_res_key); 7955 7956 /* 7957 * Validate the reservation key here except for SPRO_REG_IGNO 7958 * This must be done for all other service actions 7959 */ 7960 if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) { 7961 mtx_lock(&lun->lun_lock); 7962 if ((key = ctl_get_prkey(lun, residx)) != 0) { 7963 if (res_key != key) { 7964 /* 7965 * The current key passed in doesn't match 7966 * the one the initiator previously 7967 * registered. 7968 */ 7969 mtx_unlock(&lun->lun_lock); 7970 free(ctsio->kern_data_ptr, M_CTL); 7971 ctl_set_reservation_conflict(ctsio); 7972 ctl_done((union ctl_io *)ctsio); 7973 return (CTL_RETVAL_COMPLETE); 7974 } 7975 } else if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REGISTER) { 7976 /* 7977 * We are not registered 7978 */ 7979 mtx_unlock(&lun->lun_lock); 7980 free(ctsio->kern_data_ptr, M_CTL); 7981 ctl_set_reservation_conflict(ctsio); 7982 ctl_done((union ctl_io *)ctsio); 7983 return (CTL_RETVAL_COMPLETE); 7984 } else if (res_key != 0) { 7985 /* 7986 * We are not registered and trying to register but 7987 * the register key isn't zero. 7988 */ 7989 mtx_unlock(&lun->lun_lock); 7990 free(ctsio->kern_data_ptr, M_CTL); 7991 ctl_set_reservation_conflict(ctsio); 7992 ctl_done((union ctl_io *)ctsio); 7993 return (CTL_RETVAL_COMPLETE); 7994 } 7995 mtx_unlock(&lun->lun_lock); 7996 } 7997 7998 switch (cdb->action & SPRO_ACTION_MASK) { 7999 case SPRO_REGISTER: 8000 case SPRO_REG_IGNO: { 8001 8002 #if 0 8003 printf("Registration received\n"); 8004 #endif 8005 8006 /* 8007 * We don't support any of these options, as we report in 8008 * the read capabilities request (see 8009 * ctl_persistent_reserve_in(), above). 8010 */ 8011 if ((param->flags & SPR_SPEC_I_PT) 8012 || (param->flags & SPR_ALL_TG_PT) 8013 || (param->flags & SPR_APTPL)) { 8014 int bit_ptr; 8015 8016 if (param->flags & SPR_APTPL) 8017 bit_ptr = 0; 8018 else if (param->flags & SPR_ALL_TG_PT) 8019 bit_ptr = 2; 8020 else /* SPR_SPEC_I_PT */ 8021 bit_ptr = 3; 8022 8023 free(ctsio->kern_data_ptr, M_CTL); 8024 ctl_set_invalid_field(ctsio, 8025 /*sks_valid*/ 1, 8026 /*command*/ 0, 8027 /*field*/ 20, 8028 /*bit_valid*/ 1, 8029 /*bit*/ bit_ptr); 8030 ctl_done((union ctl_io *)ctsio); 8031 return (CTL_RETVAL_COMPLETE); 8032 } 8033 8034 mtx_lock(&lun->lun_lock); 8035 8036 /* 8037 * The initiator wants to clear the 8038 * key/unregister. 8039 */ 8040 if (sa_res_key == 0) { 8041 if ((res_key == 0 8042 && (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER) 8043 || ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO 8044 && ctl_get_prkey(lun, residx) == 0)) { 8045 mtx_unlock(&lun->lun_lock); 8046 goto done; 8047 } 8048 8049 ctl_clr_prkey(lun, residx); 8050 lun->pr_key_count--; 8051 8052 if (residx == lun->pr_res_idx) { 8053 lun->flags &= ~CTL_LUN_PR_RESERVED; 8054 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8055 8056 if ((lun->res_type == SPR_TYPE_WR_EX_RO 8057 || lun->res_type == SPR_TYPE_EX_AC_RO) 8058 && lun->pr_key_count) { 8059 /* 8060 * If the reservation is a registrants 8061 * only type we need to generate a UA 8062 * for other registered inits. The 8063 * sense code should be RESERVATIONS 8064 * RELEASED 8065 */ 8066 8067 for (i = 0; i < CTL_MAX_INITIATORS;i++){ 8068 if (ctl_get_prkey(lun, i + 8069 softc->persis_offset) == 0) 8070 continue; 8071 ctl_est_ua(lun, i, 8072 CTL_UA_RES_RELEASE); 8073 } 8074 } 8075 lun->res_type = 0; 8076 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8077 if (lun->pr_key_count==0) { 8078 lun->flags &= ~CTL_LUN_PR_RESERVED; 8079 lun->res_type = 0; 8080 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8081 } 8082 } 8083 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8084 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8085 persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY; 8086 persis_io.pr.pr_info.residx = residx; 8087 if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8088 &persis_io, sizeof(persis_io), 0 )) > 8089 CTL_HA_STATUS_SUCCESS) { 8090 printf("CTL:Persis Out error returned from " 8091 "ctl_ha_msg_send %d\n", isc_retval); 8092 } 8093 } else /* sa_res_key != 0 */ { 8094 8095 /* 8096 * If we aren't registered currently then increment 8097 * the key count and set the registered flag. 8098 */ 8099 ctl_alloc_prkey(lun, residx); 8100 if (ctl_get_prkey(lun, residx) == 0) 8101 lun->pr_key_count++; 8102 ctl_set_prkey(lun, residx, sa_res_key); 8103 8104 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8105 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8106 persis_io.pr.pr_info.action = CTL_PR_REG_KEY; 8107 persis_io.pr.pr_info.residx = residx; 8108 memcpy(persis_io.pr.pr_info.sa_res_key, 8109 param->serv_act_res_key, 8110 sizeof(param->serv_act_res_key)); 8111 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8112 &persis_io, sizeof(persis_io), 0)) > 8113 CTL_HA_STATUS_SUCCESS) { 8114 printf("CTL:Persis Out error returned from " 8115 "ctl_ha_msg_send %d\n", isc_retval); 8116 } 8117 } 8118 lun->PRGeneration++; 8119 mtx_unlock(&lun->lun_lock); 8120 8121 break; 8122 } 8123 case SPRO_RESERVE: 8124 #if 0 8125 printf("Reserve executed type %d\n", type); 8126 #endif 8127 mtx_lock(&lun->lun_lock); 8128 if (lun->flags & CTL_LUN_PR_RESERVED) { 8129 /* 8130 * if this isn't the reservation holder and it's 8131 * not a "all registrants" type or if the type is 8132 * different then we have a conflict 8133 */ 8134 if ((lun->pr_res_idx != residx 8135 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) 8136 || lun->res_type != type) { 8137 mtx_unlock(&lun->lun_lock); 8138 free(ctsio->kern_data_ptr, M_CTL); 8139 ctl_set_reservation_conflict(ctsio); 8140 ctl_done((union ctl_io *)ctsio); 8141 return (CTL_RETVAL_COMPLETE); 8142 } 8143 mtx_unlock(&lun->lun_lock); 8144 } else /* create a reservation */ { 8145 /* 8146 * If it's not an "all registrants" type record 8147 * reservation holder 8148 */ 8149 if (type != SPR_TYPE_WR_EX_AR 8150 && type != SPR_TYPE_EX_AC_AR) 8151 lun->pr_res_idx = residx; /* Res holder */ 8152 else 8153 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8154 8155 lun->flags |= CTL_LUN_PR_RESERVED; 8156 lun->res_type = type; 8157 8158 mtx_unlock(&lun->lun_lock); 8159 8160 /* send msg to other side */ 8161 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8162 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8163 persis_io.pr.pr_info.action = CTL_PR_RESERVE; 8164 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8165 persis_io.pr.pr_info.res_type = type; 8166 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8167 &persis_io, sizeof(persis_io), 0)) > 8168 CTL_HA_STATUS_SUCCESS) { 8169 printf("CTL:Persis Out error returned from " 8170 "ctl_ha_msg_send %d\n", isc_retval); 8171 } 8172 } 8173 break; 8174 8175 case SPRO_RELEASE: 8176 mtx_lock(&lun->lun_lock); 8177 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) { 8178 /* No reservation exists return good status */ 8179 mtx_unlock(&lun->lun_lock); 8180 goto done; 8181 } 8182 /* 8183 * Is this nexus a reservation holder? 8184 */ 8185 if (lun->pr_res_idx != residx 8186 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 8187 /* 8188 * not a res holder return good status but 8189 * do nothing 8190 */ 8191 mtx_unlock(&lun->lun_lock); 8192 goto done; 8193 } 8194 8195 if (lun->res_type != type) { 8196 mtx_unlock(&lun->lun_lock); 8197 free(ctsio->kern_data_ptr, M_CTL); 8198 ctl_set_illegal_pr_release(ctsio); 8199 ctl_done((union ctl_io *)ctsio); 8200 return (CTL_RETVAL_COMPLETE); 8201 } 8202 8203 /* okay to release */ 8204 lun->flags &= ~CTL_LUN_PR_RESERVED; 8205 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8206 lun->res_type = 0; 8207 8208 /* 8209 * if this isn't an exclusive access 8210 * res generate UA for all other 8211 * registrants. 8212 */ 8213 if (type != SPR_TYPE_EX_AC 8214 && type != SPR_TYPE_WR_EX) { 8215 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8216 if (i == residx || 8217 ctl_get_prkey(lun, 8218 i + softc->persis_offset) == 0) 8219 continue; 8220 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8221 } 8222 } 8223 mtx_unlock(&lun->lun_lock); 8224 /* Send msg to other side */ 8225 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8226 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8227 persis_io.pr.pr_info.action = CTL_PR_RELEASE; 8228 if ((isc_retval=ctl_ha_msg_send( CTL_HA_CHAN_CTL, &persis_io, 8229 sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) { 8230 printf("CTL:Persis Out error returned from " 8231 "ctl_ha_msg_send %d\n", isc_retval); 8232 } 8233 break; 8234 8235 case SPRO_CLEAR: 8236 /* send msg to other side */ 8237 8238 mtx_lock(&lun->lun_lock); 8239 lun->flags &= ~CTL_LUN_PR_RESERVED; 8240 lun->res_type = 0; 8241 lun->pr_key_count = 0; 8242 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8243 8244 ctl_clr_prkey(lun, residx); 8245 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) 8246 if (ctl_get_prkey(lun, i) != 0) { 8247 ctl_clr_prkey(lun, i); 8248 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 8249 } 8250 lun->PRGeneration++; 8251 mtx_unlock(&lun->lun_lock); 8252 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8253 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8254 persis_io.pr.pr_info.action = CTL_PR_CLEAR; 8255 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8256 sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) { 8257 printf("CTL:Persis Out error returned from " 8258 "ctl_ha_msg_send %d\n", isc_retval); 8259 } 8260 break; 8261 8262 case SPRO_PREEMPT: 8263 case SPRO_PRE_ABO: { 8264 int nretval; 8265 8266 nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type, 8267 residx, ctsio, cdb, param); 8268 if (nretval != 0) 8269 return (CTL_RETVAL_COMPLETE); 8270 break; 8271 } 8272 default: 8273 panic("Invalid PR type %x", cdb->action); 8274 } 8275 8276 done: 8277 free(ctsio->kern_data_ptr, M_CTL); 8278 ctl_set_success(ctsio); 8279 ctl_done((union ctl_io *)ctsio); 8280 8281 return (retval); 8282 } 8283 8284 /* 8285 * This routine is for handling a message from the other SC pertaining to 8286 * persistent reserve out. All the error checking will have been done 8287 * so only perorming the action need be done here to keep the two 8288 * in sync. 8289 */ 8290 static void 8291 ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg) 8292 { 8293 struct ctl_lun *lun; 8294 struct ctl_softc *softc; 8295 int i; 8296 uint32_t targ_lun; 8297 8298 softc = control_softc; 8299 8300 targ_lun = msg->hdr.nexus.targ_mapped_lun; 8301 lun = softc->ctl_luns[targ_lun]; 8302 mtx_lock(&lun->lun_lock); 8303 switch(msg->pr.pr_info.action) { 8304 case CTL_PR_REG_KEY: 8305 ctl_alloc_prkey(lun, msg->pr.pr_info.residx); 8306 if (ctl_get_prkey(lun, msg->pr.pr_info.residx) == 0) 8307 lun->pr_key_count++; 8308 ctl_set_prkey(lun, msg->pr.pr_info.residx, 8309 scsi_8btou64(msg->pr.pr_info.sa_res_key)); 8310 lun->PRGeneration++; 8311 break; 8312 8313 case CTL_PR_UNREG_KEY: 8314 ctl_clr_prkey(lun, msg->pr.pr_info.residx); 8315 lun->pr_key_count--; 8316 8317 /* XXX Need to see if the reservation has been released */ 8318 /* if so do we need to generate UA? */ 8319 if (msg->pr.pr_info.residx == lun->pr_res_idx) { 8320 lun->flags &= ~CTL_LUN_PR_RESERVED; 8321 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8322 8323 if ((lun->res_type == SPR_TYPE_WR_EX_RO 8324 || lun->res_type == SPR_TYPE_EX_AC_RO) 8325 && lun->pr_key_count) { 8326 /* 8327 * If the reservation is a registrants 8328 * only type we need to generate a UA 8329 * for other registered inits. The 8330 * sense code should be RESERVATIONS 8331 * RELEASED 8332 */ 8333 8334 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8335 if (ctl_get_prkey(lun, i + 8336 softc->persis_offset) == 0) 8337 continue; 8338 8339 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8340 } 8341 } 8342 lun->res_type = 0; 8343 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8344 if (lun->pr_key_count==0) { 8345 lun->flags &= ~CTL_LUN_PR_RESERVED; 8346 lun->res_type = 0; 8347 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8348 } 8349 } 8350 lun->PRGeneration++; 8351 break; 8352 8353 case CTL_PR_RESERVE: 8354 lun->flags |= CTL_LUN_PR_RESERVED; 8355 lun->res_type = msg->pr.pr_info.res_type; 8356 lun->pr_res_idx = msg->pr.pr_info.residx; 8357 8358 break; 8359 8360 case CTL_PR_RELEASE: 8361 /* 8362 * if this isn't an exclusive access res generate UA for all 8363 * other registrants. 8364 */ 8365 if (lun->res_type != SPR_TYPE_EX_AC 8366 && lun->res_type != SPR_TYPE_WR_EX) { 8367 for (i = 0; i < CTL_MAX_INITIATORS; i++) 8368 if (ctl_get_prkey(lun, i + softc->persis_offset) != 0) 8369 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8370 } 8371 8372 lun->flags &= ~CTL_LUN_PR_RESERVED; 8373 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8374 lun->res_type = 0; 8375 break; 8376 8377 case CTL_PR_PREEMPT: 8378 ctl_pro_preempt_other(lun, msg); 8379 break; 8380 case CTL_PR_CLEAR: 8381 lun->flags &= ~CTL_LUN_PR_RESERVED; 8382 lun->res_type = 0; 8383 lun->pr_key_count = 0; 8384 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8385 8386 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 8387 if (ctl_get_prkey(lun, i) == 0) 8388 continue; 8389 ctl_clr_prkey(lun, i); 8390 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 8391 } 8392 lun->PRGeneration++; 8393 break; 8394 } 8395 8396 mtx_unlock(&lun->lun_lock); 8397 } 8398 8399 int 8400 ctl_read_write(struct ctl_scsiio *ctsio) 8401 { 8402 struct ctl_lun *lun; 8403 struct ctl_lba_len_flags *lbalen; 8404 uint64_t lba; 8405 uint32_t num_blocks; 8406 int flags, retval; 8407 int isread; 8408 8409 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8410 8411 CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0])); 8412 8413 flags = 0; 8414 retval = CTL_RETVAL_COMPLETE; 8415 8416 isread = ctsio->cdb[0] == READ_6 || ctsio->cdb[0] == READ_10 8417 || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16; 8418 switch (ctsio->cdb[0]) { 8419 case READ_6: 8420 case WRITE_6: { 8421 struct scsi_rw_6 *cdb; 8422 8423 cdb = (struct scsi_rw_6 *)ctsio->cdb; 8424 8425 lba = scsi_3btoul(cdb->addr); 8426 /* only 5 bits are valid in the most significant address byte */ 8427 lba &= 0x1fffff; 8428 num_blocks = cdb->length; 8429 /* 8430 * This is correct according to SBC-2. 8431 */ 8432 if (num_blocks == 0) 8433 num_blocks = 256; 8434 break; 8435 } 8436 case READ_10: 8437 case WRITE_10: { 8438 struct scsi_rw_10 *cdb; 8439 8440 cdb = (struct scsi_rw_10 *)ctsio->cdb; 8441 if (cdb->byte2 & SRW10_FUA) 8442 flags |= CTL_LLF_FUA; 8443 if (cdb->byte2 & SRW10_DPO) 8444 flags |= CTL_LLF_DPO; 8445 lba = scsi_4btoul(cdb->addr); 8446 num_blocks = scsi_2btoul(cdb->length); 8447 break; 8448 } 8449 case WRITE_VERIFY_10: { 8450 struct scsi_write_verify_10 *cdb; 8451 8452 cdb = (struct scsi_write_verify_10 *)ctsio->cdb; 8453 flags |= CTL_LLF_FUA; 8454 if (cdb->byte2 & SWV_DPO) 8455 flags |= CTL_LLF_DPO; 8456 lba = scsi_4btoul(cdb->addr); 8457 num_blocks = scsi_2btoul(cdb->length); 8458 break; 8459 } 8460 case READ_12: 8461 case WRITE_12: { 8462 struct scsi_rw_12 *cdb; 8463 8464 cdb = (struct scsi_rw_12 *)ctsio->cdb; 8465 if (cdb->byte2 & SRW12_FUA) 8466 flags |= CTL_LLF_FUA; 8467 if (cdb->byte2 & SRW12_DPO) 8468 flags |= CTL_LLF_DPO; 8469 lba = scsi_4btoul(cdb->addr); 8470 num_blocks = scsi_4btoul(cdb->length); 8471 break; 8472 } 8473 case WRITE_VERIFY_12: { 8474 struct scsi_write_verify_12 *cdb; 8475 8476 cdb = (struct scsi_write_verify_12 *)ctsio->cdb; 8477 flags |= CTL_LLF_FUA; 8478 if (cdb->byte2 & SWV_DPO) 8479 flags |= CTL_LLF_DPO; 8480 lba = scsi_4btoul(cdb->addr); 8481 num_blocks = scsi_4btoul(cdb->length); 8482 break; 8483 } 8484 case READ_16: 8485 case WRITE_16: { 8486 struct scsi_rw_16 *cdb; 8487 8488 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8489 if (cdb->byte2 & SRW12_FUA) 8490 flags |= CTL_LLF_FUA; 8491 if (cdb->byte2 & SRW12_DPO) 8492 flags |= CTL_LLF_DPO; 8493 lba = scsi_8btou64(cdb->addr); 8494 num_blocks = scsi_4btoul(cdb->length); 8495 break; 8496 } 8497 case WRITE_ATOMIC_16: { 8498 struct scsi_rw_16 *cdb; 8499 8500 if (lun->be_lun->atomicblock == 0) { 8501 ctl_set_invalid_opcode(ctsio); 8502 ctl_done((union ctl_io *)ctsio); 8503 return (CTL_RETVAL_COMPLETE); 8504 } 8505 8506 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8507 if (cdb->byte2 & SRW12_FUA) 8508 flags |= CTL_LLF_FUA; 8509 if (cdb->byte2 & SRW12_DPO) 8510 flags |= CTL_LLF_DPO; 8511 lba = scsi_8btou64(cdb->addr); 8512 num_blocks = scsi_4btoul(cdb->length); 8513 if (num_blocks > lun->be_lun->atomicblock) { 8514 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 8515 /*command*/ 1, /*field*/ 12, /*bit_valid*/ 0, 8516 /*bit*/ 0); 8517 ctl_done((union ctl_io *)ctsio); 8518 return (CTL_RETVAL_COMPLETE); 8519 } 8520 break; 8521 } 8522 case WRITE_VERIFY_16: { 8523 struct scsi_write_verify_16 *cdb; 8524 8525 cdb = (struct scsi_write_verify_16 *)ctsio->cdb; 8526 flags |= CTL_LLF_FUA; 8527 if (cdb->byte2 & SWV_DPO) 8528 flags |= CTL_LLF_DPO; 8529 lba = scsi_8btou64(cdb->addr); 8530 num_blocks = scsi_4btoul(cdb->length); 8531 break; 8532 } 8533 default: 8534 /* 8535 * We got a command we don't support. This shouldn't 8536 * happen, commands should be filtered out above us. 8537 */ 8538 ctl_set_invalid_opcode(ctsio); 8539 ctl_done((union ctl_io *)ctsio); 8540 8541 return (CTL_RETVAL_COMPLETE); 8542 break; /* NOTREACHED */ 8543 } 8544 8545 /* 8546 * The first check is to make sure we're in bounds, the second 8547 * check is to catch wrap-around problems. If the lba + num blocks 8548 * is less than the lba, then we've wrapped around and the block 8549 * range is invalid anyway. 8550 */ 8551 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8552 || ((lba + num_blocks) < lba)) { 8553 ctl_set_lba_out_of_range(ctsio); 8554 ctl_done((union ctl_io *)ctsio); 8555 return (CTL_RETVAL_COMPLETE); 8556 } 8557 8558 /* 8559 * According to SBC-3, a transfer length of 0 is not an error. 8560 * Note that this cannot happen with WRITE(6) or READ(6), since 0 8561 * translates to 256 blocks for those commands. 8562 */ 8563 if (num_blocks == 0) { 8564 ctl_set_success(ctsio); 8565 ctl_done((union ctl_io *)ctsio); 8566 return (CTL_RETVAL_COMPLETE); 8567 } 8568 8569 /* Set FUA and/or DPO if caches are disabled. */ 8570 if (isread) { 8571 if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 & 8572 SCP_RCD) != 0) 8573 flags |= CTL_LLF_FUA | CTL_LLF_DPO; 8574 } else { 8575 if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 & 8576 SCP_WCE) == 0) 8577 flags |= CTL_LLF_FUA; 8578 } 8579 8580 lbalen = (struct ctl_lba_len_flags *) 8581 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8582 lbalen->lba = lba; 8583 lbalen->len = num_blocks; 8584 lbalen->flags = (isread ? CTL_LLF_READ : CTL_LLF_WRITE) | flags; 8585 8586 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 8587 ctsio->kern_rel_offset = 0; 8588 8589 CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n")); 8590 8591 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8592 8593 return (retval); 8594 } 8595 8596 static int 8597 ctl_cnw_cont(union ctl_io *io) 8598 { 8599 struct ctl_scsiio *ctsio; 8600 struct ctl_lun *lun; 8601 struct ctl_lba_len_flags *lbalen; 8602 int retval; 8603 8604 ctsio = &io->scsiio; 8605 ctsio->io_hdr.status = CTL_STATUS_NONE; 8606 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT; 8607 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8608 lbalen = (struct ctl_lba_len_flags *) 8609 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8610 lbalen->flags &= ~CTL_LLF_COMPARE; 8611 lbalen->flags |= CTL_LLF_WRITE; 8612 8613 CTL_DEBUG_PRINT(("ctl_cnw_cont: calling data_submit()\n")); 8614 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8615 return (retval); 8616 } 8617 8618 int 8619 ctl_cnw(struct ctl_scsiio *ctsio) 8620 { 8621 struct ctl_lun *lun; 8622 struct ctl_lba_len_flags *lbalen; 8623 uint64_t lba; 8624 uint32_t num_blocks; 8625 int flags, retval; 8626 8627 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8628 8629 CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0])); 8630 8631 flags = 0; 8632 retval = CTL_RETVAL_COMPLETE; 8633 8634 switch (ctsio->cdb[0]) { 8635 case COMPARE_AND_WRITE: { 8636 struct scsi_compare_and_write *cdb; 8637 8638 cdb = (struct scsi_compare_and_write *)ctsio->cdb; 8639 if (cdb->byte2 & SRW10_FUA) 8640 flags |= CTL_LLF_FUA; 8641 if (cdb->byte2 & SRW10_DPO) 8642 flags |= CTL_LLF_DPO; 8643 lba = scsi_8btou64(cdb->addr); 8644 num_blocks = cdb->length; 8645 break; 8646 } 8647 default: 8648 /* 8649 * We got a command we don't support. This shouldn't 8650 * happen, commands should be filtered out above us. 8651 */ 8652 ctl_set_invalid_opcode(ctsio); 8653 ctl_done((union ctl_io *)ctsio); 8654 8655 return (CTL_RETVAL_COMPLETE); 8656 break; /* NOTREACHED */ 8657 } 8658 8659 /* 8660 * The first check is to make sure we're in bounds, the second 8661 * check is to catch wrap-around problems. If the lba + num blocks 8662 * is less than the lba, then we've wrapped around and the block 8663 * range is invalid anyway. 8664 */ 8665 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8666 || ((lba + num_blocks) < lba)) { 8667 ctl_set_lba_out_of_range(ctsio); 8668 ctl_done((union ctl_io *)ctsio); 8669 return (CTL_RETVAL_COMPLETE); 8670 } 8671 8672 /* 8673 * According to SBC-3, a transfer length of 0 is not an error. 8674 */ 8675 if (num_blocks == 0) { 8676 ctl_set_success(ctsio); 8677 ctl_done((union ctl_io *)ctsio); 8678 return (CTL_RETVAL_COMPLETE); 8679 } 8680 8681 /* Set FUA if write cache is disabled. */ 8682 if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 & 8683 SCP_WCE) == 0) 8684 flags |= CTL_LLF_FUA; 8685 8686 ctsio->kern_total_len = 2 * num_blocks * lun->be_lun->blocksize; 8687 ctsio->kern_rel_offset = 0; 8688 8689 /* 8690 * Set the IO_CONT flag, so that if this I/O gets passed to 8691 * ctl_data_submit_done(), it'll get passed back to 8692 * ctl_ctl_cnw_cont() for further processing. 8693 */ 8694 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 8695 ctsio->io_cont = ctl_cnw_cont; 8696 8697 lbalen = (struct ctl_lba_len_flags *) 8698 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8699 lbalen->lba = lba; 8700 lbalen->len = num_blocks; 8701 lbalen->flags = CTL_LLF_COMPARE | flags; 8702 8703 CTL_DEBUG_PRINT(("ctl_cnw: calling data_submit()\n")); 8704 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8705 return (retval); 8706 } 8707 8708 int 8709 ctl_verify(struct ctl_scsiio *ctsio) 8710 { 8711 struct ctl_lun *lun; 8712 struct ctl_lba_len_flags *lbalen; 8713 uint64_t lba; 8714 uint32_t num_blocks; 8715 int bytchk, flags; 8716 int retval; 8717 8718 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8719 8720 CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0])); 8721 8722 bytchk = 0; 8723 flags = CTL_LLF_FUA; 8724 retval = CTL_RETVAL_COMPLETE; 8725 8726 switch (ctsio->cdb[0]) { 8727 case VERIFY_10: { 8728 struct scsi_verify_10 *cdb; 8729 8730 cdb = (struct scsi_verify_10 *)ctsio->cdb; 8731 if (cdb->byte2 & SVFY_BYTCHK) 8732 bytchk = 1; 8733 if (cdb->byte2 & SVFY_DPO) 8734 flags |= CTL_LLF_DPO; 8735 lba = scsi_4btoul(cdb->addr); 8736 num_blocks = scsi_2btoul(cdb->length); 8737 break; 8738 } 8739 case VERIFY_12: { 8740 struct scsi_verify_12 *cdb; 8741 8742 cdb = (struct scsi_verify_12 *)ctsio->cdb; 8743 if (cdb->byte2 & SVFY_BYTCHK) 8744 bytchk = 1; 8745 if (cdb->byte2 & SVFY_DPO) 8746 flags |= CTL_LLF_DPO; 8747 lba = scsi_4btoul(cdb->addr); 8748 num_blocks = scsi_4btoul(cdb->length); 8749 break; 8750 } 8751 case VERIFY_16: { 8752 struct scsi_rw_16 *cdb; 8753 8754 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8755 if (cdb->byte2 & SVFY_BYTCHK) 8756 bytchk = 1; 8757 if (cdb->byte2 & SVFY_DPO) 8758 flags |= CTL_LLF_DPO; 8759 lba = scsi_8btou64(cdb->addr); 8760 num_blocks = scsi_4btoul(cdb->length); 8761 break; 8762 } 8763 default: 8764 /* 8765 * We got a command we don't support. This shouldn't 8766 * happen, commands should be filtered out above us. 8767 */ 8768 ctl_set_invalid_opcode(ctsio); 8769 ctl_done((union ctl_io *)ctsio); 8770 return (CTL_RETVAL_COMPLETE); 8771 } 8772 8773 /* 8774 * The first check is to make sure we're in bounds, the second 8775 * check is to catch wrap-around problems. If the lba + num blocks 8776 * is less than the lba, then we've wrapped around and the block 8777 * range is invalid anyway. 8778 */ 8779 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8780 || ((lba + num_blocks) < lba)) { 8781 ctl_set_lba_out_of_range(ctsio); 8782 ctl_done((union ctl_io *)ctsio); 8783 return (CTL_RETVAL_COMPLETE); 8784 } 8785 8786 /* 8787 * According to SBC-3, a transfer length of 0 is not an error. 8788 */ 8789 if (num_blocks == 0) { 8790 ctl_set_success(ctsio); 8791 ctl_done((union ctl_io *)ctsio); 8792 return (CTL_RETVAL_COMPLETE); 8793 } 8794 8795 lbalen = (struct ctl_lba_len_flags *) 8796 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8797 lbalen->lba = lba; 8798 lbalen->len = num_blocks; 8799 if (bytchk) { 8800 lbalen->flags = CTL_LLF_COMPARE | flags; 8801 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 8802 } else { 8803 lbalen->flags = CTL_LLF_VERIFY | flags; 8804 ctsio->kern_total_len = 0; 8805 } 8806 ctsio->kern_rel_offset = 0; 8807 8808 CTL_DEBUG_PRINT(("ctl_verify: calling data_submit()\n")); 8809 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8810 return (retval); 8811 } 8812 8813 int 8814 ctl_report_luns(struct ctl_scsiio *ctsio) 8815 { 8816 struct ctl_softc *softc = control_softc; 8817 struct scsi_report_luns *cdb; 8818 struct scsi_report_luns_data *lun_data; 8819 struct ctl_lun *lun, *request_lun; 8820 struct ctl_port *port; 8821 int num_luns, retval; 8822 uint32_t alloc_len, lun_datalen; 8823 int num_filled, well_known; 8824 uint32_t initidx, targ_lun_id, lun_id; 8825 8826 retval = CTL_RETVAL_COMPLETE; 8827 well_known = 0; 8828 8829 cdb = (struct scsi_report_luns *)ctsio->cdb; 8830 port = ctl_io_port(&ctsio->io_hdr); 8831 8832 CTL_DEBUG_PRINT(("ctl_report_luns\n")); 8833 8834 mtx_lock(&softc->ctl_lock); 8835 num_luns = 0; 8836 for (targ_lun_id = 0; targ_lun_id < CTL_MAX_LUNS; targ_lun_id++) { 8837 if (ctl_lun_map_from_port(port, targ_lun_id) < CTL_MAX_LUNS) 8838 num_luns++; 8839 } 8840 mtx_unlock(&softc->ctl_lock); 8841 8842 switch (cdb->select_report) { 8843 case RPL_REPORT_DEFAULT: 8844 case RPL_REPORT_ALL: 8845 break; 8846 case RPL_REPORT_WELLKNOWN: 8847 well_known = 1; 8848 num_luns = 0; 8849 break; 8850 default: 8851 ctl_set_invalid_field(ctsio, 8852 /*sks_valid*/ 1, 8853 /*command*/ 1, 8854 /*field*/ 2, 8855 /*bit_valid*/ 0, 8856 /*bit*/ 0); 8857 ctl_done((union ctl_io *)ctsio); 8858 return (retval); 8859 break; /* NOTREACHED */ 8860 } 8861 8862 alloc_len = scsi_4btoul(cdb->length); 8863 /* 8864 * The initiator has to allocate at least 16 bytes for this request, 8865 * so he can at least get the header and the first LUN. Otherwise 8866 * we reject the request (per SPC-3 rev 14, section 6.21). 8867 */ 8868 if (alloc_len < (sizeof(struct scsi_report_luns_data) + 8869 sizeof(struct scsi_report_luns_lundata))) { 8870 ctl_set_invalid_field(ctsio, 8871 /*sks_valid*/ 1, 8872 /*command*/ 1, 8873 /*field*/ 6, 8874 /*bit_valid*/ 0, 8875 /*bit*/ 0); 8876 ctl_done((union ctl_io *)ctsio); 8877 return (retval); 8878 } 8879 8880 request_lun = (struct ctl_lun *) 8881 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8882 8883 lun_datalen = sizeof(*lun_data) + 8884 (num_luns * sizeof(struct scsi_report_luns_lundata)); 8885 8886 ctsio->kern_data_ptr = malloc(lun_datalen, M_CTL, M_WAITOK | M_ZERO); 8887 lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr; 8888 ctsio->kern_sg_entries = 0; 8889 8890 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 8891 8892 mtx_lock(&softc->ctl_lock); 8893 for (targ_lun_id = 0, num_filled = 0; targ_lun_id < CTL_MAX_LUNS && num_filled < num_luns; targ_lun_id++) { 8894 lun_id = ctl_lun_map_from_port(port, targ_lun_id); 8895 if (lun_id >= CTL_MAX_LUNS) 8896 continue; 8897 lun = softc->ctl_luns[lun_id]; 8898 if (lun == NULL) 8899 continue; 8900 8901 if (targ_lun_id <= 0xff) { 8902 /* 8903 * Peripheral addressing method, bus number 0. 8904 */ 8905 lun_data->luns[num_filled].lundata[0] = 8906 RPL_LUNDATA_ATYP_PERIPH; 8907 lun_data->luns[num_filled].lundata[1] = targ_lun_id; 8908 num_filled++; 8909 } else if (targ_lun_id <= 0x3fff) { 8910 /* 8911 * Flat addressing method. 8912 */ 8913 lun_data->luns[num_filled].lundata[0] = 8914 RPL_LUNDATA_ATYP_FLAT | (targ_lun_id >> 8); 8915 lun_data->luns[num_filled].lundata[1] = 8916 (targ_lun_id & 0xff); 8917 num_filled++; 8918 } else if (targ_lun_id <= 0xffffff) { 8919 /* 8920 * Extended flat addressing method. 8921 */ 8922 lun_data->luns[num_filled].lundata[0] = 8923 RPL_LUNDATA_ATYP_EXTLUN | 0x12; 8924 scsi_ulto3b(targ_lun_id, 8925 &lun_data->luns[num_filled].lundata[1]); 8926 num_filled++; 8927 } else { 8928 printf("ctl_report_luns: bogus LUN number %jd, " 8929 "skipping\n", (intmax_t)targ_lun_id); 8930 } 8931 /* 8932 * According to SPC-3, rev 14 section 6.21: 8933 * 8934 * "The execution of a REPORT LUNS command to any valid and 8935 * installed logical unit shall clear the REPORTED LUNS DATA 8936 * HAS CHANGED unit attention condition for all logical 8937 * units of that target with respect to the requesting 8938 * initiator. A valid and installed logical unit is one 8939 * having a PERIPHERAL QUALIFIER of 000b in the standard 8940 * INQUIRY data (see 6.4.2)." 8941 * 8942 * If request_lun is NULL, the LUN this report luns command 8943 * was issued to is either disabled or doesn't exist. In that 8944 * case, we shouldn't clear any pending lun change unit 8945 * attention. 8946 */ 8947 if (request_lun != NULL) { 8948 mtx_lock(&lun->lun_lock); 8949 ctl_clr_ua(lun, initidx, CTL_UA_RES_RELEASE); 8950 mtx_unlock(&lun->lun_lock); 8951 } 8952 } 8953 mtx_unlock(&softc->ctl_lock); 8954 8955 /* 8956 * It's quite possible that we've returned fewer LUNs than we allocated 8957 * space for. Trim it. 8958 */ 8959 lun_datalen = sizeof(*lun_data) + 8960 (num_filled * sizeof(struct scsi_report_luns_lundata)); 8961 8962 if (lun_datalen < alloc_len) { 8963 ctsio->residual = alloc_len - lun_datalen; 8964 ctsio->kern_data_len = lun_datalen; 8965 ctsio->kern_total_len = lun_datalen; 8966 } else { 8967 ctsio->residual = 0; 8968 ctsio->kern_data_len = alloc_len; 8969 ctsio->kern_total_len = alloc_len; 8970 } 8971 ctsio->kern_data_resid = 0; 8972 ctsio->kern_rel_offset = 0; 8973 ctsio->kern_sg_entries = 0; 8974 8975 /* 8976 * We set this to the actual data length, regardless of how much 8977 * space we actually have to return results. If the user looks at 8978 * this value, he'll know whether or not he allocated enough space 8979 * and reissue the command if necessary. We don't support well 8980 * known logical units, so if the user asks for that, return none. 8981 */ 8982 scsi_ulto4b(lun_datalen - 8, lun_data->length); 8983 8984 /* 8985 * We can only return SCSI_STATUS_CHECK_COND when we can't satisfy 8986 * this request. 8987 */ 8988 ctl_set_success(ctsio); 8989 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 8990 ctsio->be_move_done = ctl_config_move_done; 8991 ctl_datamove((union ctl_io *)ctsio); 8992 return (retval); 8993 } 8994 8995 int 8996 ctl_request_sense(struct ctl_scsiio *ctsio) 8997 { 8998 struct scsi_request_sense *cdb; 8999 struct scsi_sense_data *sense_ptr; 9000 struct ctl_softc *ctl_softc; 9001 struct ctl_lun *lun; 9002 uint32_t initidx; 9003 int have_error; 9004 scsi_sense_data_type sense_format; 9005 ctl_ua_type ua_type; 9006 9007 cdb = (struct scsi_request_sense *)ctsio->cdb; 9008 9009 ctl_softc = control_softc; 9010 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9011 9012 CTL_DEBUG_PRINT(("ctl_request_sense\n")); 9013 9014 /* 9015 * Determine which sense format the user wants. 9016 */ 9017 if (cdb->byte2 & SRS_DESC) 9018 sense_format = SSD_TYPE_DESC; 9019 else 9020 sense_format = SSD_TYPE_FIXED; 9021 9022 ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK); 9023 sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr; 9024 ctsio->kern_sg_entries = 0; 9025 9026 /* 9027 * struct scsi_sense_data, which is currently set to 256 bytes, is 9028 * larger than the largest allowed value for the length field in the 9029 * REQUEST SENSE CDB, which is 252 bytes as of SPC-4. 9030 */ 9031 ctsio->residual = 0; 9032 ctsio->kern_data_len = cdb->length; 9033 ctsio->kern_total_len = cdb->length; 9034 9035 ctsio->kern_data_resid = 0; 9036 ctsio->kern_rel_offset = 0; 9037 ctsio->kern_sg_entries = 0; 9038 9039 /* 9040 * If we don't have a LUN, we don't have any pending sense. 9041 */ 9042 if (lun == NULL) 9043 goto no_sense; 9044 9045 have_error = 0; 9046 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9047 /* 9048 * Check for pending sense, and then for pending unit attentions. 9049 * Pending sense gets returned first, then pending unit attentions. 9050 */ 9051 mtx_lock(&lun->lun_lock); 9052 #ifdef CTL_WITH_CA 9053 if (ctl_is_set(lun->have_ca, initidx)) { 9054 scsi_sense_data_type stored_format; 9055 9056 /* 9057 * Check to see which sense format was used for the stored 9058 * sense data. 9059 */ 9060 stored_format = scsi_sense_type(&lun->pending_sense[initidx]); 9061 9062 /* 9063 * If the user requested a different sense format than the 9064 * one we stored, then we need to convert it to the other 9065 * format. If we're going from descriptor to fixed format 9066 * sense data, we may lose things in translation, depending 9067 * on what options were used. 9068 * 9069 * If the stored format is SSD_TYPE_NONE (i.e. invalid), 9070 * for some reason we'll just copy it out as-is. 9071 */ 9072 if ((stored_format == SSD_TYPE_FIXED) 9073 && (sense_format == SSD_TYPE_DESC)) 9074 ctl_sense_to_desc((struct scsi_sense_data_fixed *) 9075 &lun->pending_sense[initidx], 9076 (struct scsi_sense_data_desc *)sense_ptr); 9077 else if ((stored_format == SSD_TYPE_DESC) 9078 && (sense_format == SSD_TYPE_FIXED)) 9079 ctl_sense_to_fixed((struct scsi_sense_data_desc *) 9080 &lun->pending_sense[initidx], 9081 (struct scsi_sense_data_fixed *)sense_ptr); 9082 else 9083 memcpy(sense_ptr, &lun->pending_sense[initidx], 9084 MIN(sizeof(*sense_ptr), 9085 sizeof(lun->pending_sense[initidx]))); 9086 9087 ctl_clear_mask(lun->have_ca, initidx); 9088 have_error = 1; 9089 } else 9090 #endif 9091 { 9092 ua_type = ctl_build_ua(lun, initidx, sense_ptr, sense_format); 9093 if (ua_type != CTL_UA_NONE) 9094 have_error = 1; 9095 if (ua_type == CTL_UA_LUN_CHANGE) { 9096 mtx_unlock(&lun->lun_lock); 9097 mtx_lock(&ctl_softc->ctl_lock); 9098 ctl_clear_ua(ctl_softc, initidx, ua_type); 9099 mtx_unlock(&ctl_softc->ctl_lock); 9100 mtx_lock(&lun->lun_lock); 9101 } 9102 9103 } 9104 mtx_unlock(&lun->lun_lock); 9105 9106 /* 9107 * We already have a pending error, return it. 9108 */ 9109 if (have_error != 0) { 9110 /* 9111 * We report the SCSI status as OK, since the status of the 9112 * request sense command itself is OK. 9113 * We report 0 for the sense length, because we aren't doing 9114 * autosense in this case. We're reporting sense as 9115 * parameter data. 9116 */ 9117 ctl_set_success(ctsio); 9118 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9119 ctsio->be_move_done = ctl_config_move_done; 9120 ctl_datamove((union ctl_io *)ctsio); 9121 return (CTL_RETVAL_COMPLETE); 9122 } 9123 9124 no_sense: 9125 9126 /* 9127 * No sense information to report, so we report that everything is 9128 * okay. 9129 */ 9130 ctl_set_sense_data(sense_ptr, 9131 lun, 9132 sense_format, 9133 /*current_error*/ 1, 9134 /*sense_key*/ SSD_KEY_NO_SENSE, 9135 /*asc*/ 0x00, 9136 /*ascq*/ 0x00, 9137 SSD_ELEM_NONE); 9138 9139 /* 9140 * We report 0 for the sense length, because we aren't doing 9141 * autosense in this case. We're reporting sense as parameter data. 9142 */ 9143 ctl_set_success(ctsio); 9144 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9145 ctsio->be_move_done = ctl_config_move_done; 9146 ctl_datamove((union ctl_io *)ctsio); 9147 return (CTL_RETVAL_COMPLETE); 9148 } 9149 9150 int 9151 ctl_tur(struct ctl_scsiio *ctsio) 9152 { 9153 9154 CTL_DEBUG_PRINT(("ctl_tur\n")); 9155 9156 ctl_set_success(ctsio); 9157 ctl_done((union ctl_io *)ctsio); 9158 9159 return (CTL_RETVAL_COMPLETE); 9160 } 9161 9162 #ifdef notyet 9163 static int 9164 ctl_cmddt_inquiry(struct ctl_scsiio *ctsio) 9165 { 9166 9167 } 9168 #endif 9169 9170 /* 9171 * SCSI VPD page 0x00, the Supported VPD Pages page. 9172 */ 9173 static int 9174 ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len) 9175 { 9176 struct scsi_vpd_supported_pages *pages; 9177 int sup_page_size; 9178 struct ctl_lun *lun; 9179 int p; 9180 9181 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9182 9183 sup_page_size = sizeof(struct scsi_vpd_supported_pages) * 9184 SCSI_EVPD_NUM_SUPPORTED_PAGES; 9185 ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO); 9186 pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr; 9187 ctsio->kern_sg_entries = 0; 9188 9189 if (sup_page_size < alloc_len) { 9190 ctsio->residual = alloc_len - sup_page_size; 9191 ctsio->kern_data_len = sup_page_size; 9192 ctsio->kern_total_len = sup_page_size; 9193 } else { 9194 ctsio->residual = 0; 9195 ctsio->kern_data_len = alloc_len; 9196 ctsio->kern_total_len = alloc_len; 9197 } 9198 ctsio->kern_data_resid = 0; 9199 ctsio->kern_rel_offset = 0; 9200 ctsio->kern_sg_entries = 0; 9201 9202 /* 9203 * The control device is always connected. The disk device, on the 9204 * other hand, may not be online all the time. Need to change this 9205 * to figure out whether the disk device is actually online or not. 9206 */ 9207 if (lun != NULL) 9208 pages->device = (SID_QUAL_LU_CONNECTED << 5) | 9209 lun->be_lun->lun_type; 9210 else 9211 pages->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9212 9213 p = 0; 9214 /* Supported VPD pages */ 9215 pages->page_list[p++] = SVPD_SUPPORTED_PAGES; 9216 /* Serial Number */ 9217 pages->page_list[p++] = SVPD_UNIT_SERIAL_NUMBER; 9218 /* Device Identification */ 9219 pages->page_list[p++] = SVPD_DEVICE_ID; 9220 /* Extended INQUIRY Data */ 9221 pages->page_list[p++] = SVPD_EXTENDED_INQUIRY_DATA; 9222 /* Mode Page Policy */ 9223 pages->page_list[p++] = SVPD_MODE_PAGE_POLICY; 9224 /* SCSI Ports */ 9225 pages->page_list[p++] = SVPD_SCSI_PORTS; 9226 /* Third-party Copy */ 9227 pages->page_list[p++] = SVPD_SCSI_TPC; 9228 if (lun != NULL && lun->be_lun->lun_type == T_DIRECT) { 9229 /* Block limits */ 9230 pages->page_list[p++] = SVPD_BLOCK_LIMITS; 9231 /* Block Device Characteristics */ 9232 pages->page_list[p++] = SVPD_BDC; 9233 /* Logical Block Provisioning */ 9234 pages->page_list[p++] = SVPD_LBP; 9235 } 9236 pages->length = p; 9237 9238 ctl_set_success(ctsio); 9239 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9240 ctsio->be_move_done = ctl_config_move_done; 9241 ctl_datamove((union ctl_io *)ctsio); 9242 return (CTL_RETVAL_COMPLETE); 9243 } 9244 9245 /* 9246 * SCSI VPD page 0x80, the Unit Serial Number page. 9247 */ 9248 static int 9249 ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len) 9250 { 9251 struct scsi_vpd_unit_serial_number *sn_ptr; 9252 struct ctl_lun *lun; 9253 int data_len; 9254 9255 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9256 9257 data_len = 4 + CTL_SN_LEN; 9258 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9259 sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr; 9260 if (data_len < alloc_len) { 9261 ctsio->residual = alloc_len - data_len; 9262 ctsio->kern_data_len = data_len; 9263 ctsio->kern_total_len = data_len; 9264 } else { 9265 ctsio->residual = 0; 9266 ctsio->kern_data_len = alloc_len; 9267 ctsio->kern_total_len = alloc_len; 9268 } 9269 ctsio->kern_data_resid = 0; 9270 ctsio->kern_rel_offset = 0; 9271 ctsio->kern_sg_entries = 0; 9272 9273 /* 9274 * The control device is always connected. The disk device, on the 9275 * other hand, may not be online all the time. Need to change this 9276 * to figure out whether the disk device is actually online or not. 9277 */ 9278 if (lun != NULL) 9279 sn_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9280 lun->be_lun->lun_type; 9281 else 9282 sn_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9283 9284 sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER; 9285 sn_ptr->length = CTL_SN_LEN; 9286 /* 9287 * If we don't have a LUN, we just leave the serial number as 9288 * all spaces. 9289 */ 9290 if (lun != NULL) { 9291 strncpy((char *)sn_ptr->serial_num, 9292 (char *)lun->be_lun->serial_num, CTL_SN_LEN); 9293 } else 9294 memset(sn_ptr->serial_num, 0x20, CTL_SN_LEN); 9295 9296 ctl_set_success(ctsio); 9297 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9298 ctsio->be_move_done = ctl_config_move_done; 9299 ctl_datamove((union ctl_io *)ctsio); 9300 return (CTL_RETVAL_COMPLETE); 9301 } 9302 9303 9304 /* 9305 * SCSI VPD page 0x86, the Extended INQUIRY Data page. 9306 */ 9307 static int 9308 ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len) 9309 { 9310 struct scsi_vpd_extended_inquiry_data *eid_ptr; 9311 struct ctl_lun *lun; 9312 int data_len; 9313 9314 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9315 9316 data_len = sizeof(struct scsi_vpd_extended_inquiry_data); 9317 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9318 eid_ptr = (struct scsi_vpd_extended_inquiry_data *)ctsio->kern_data_ptr; 9319 ctsio->kern_sg_entries = 0; 9320 9321 if (data_len < alloc_len) { 9322 ctsio->residual = alloc_len - data_len; 9323 ctsio->kern_data_len = data_len; 9324 ctsio->kern_total_len = data_len; 9325 } else { 9326 ctsio->residual = 0; 9327 ctsio->kern_data_len = alloc_len; 9328 ctsio->kern_total_len = alloc_len; 9329 } 9330 ctsio->kern_data_resid = 0; 9331 ctsio->kern_rel_offset = 0; 9332 ctsio->kern_sg_entries = 0; 9333 9334 /* 9335 * The control device is always connected. The disk device, on the 9336 * other hand, may not be online all the time. 9337 */ 9338 if (lun != NULL) 9339 eid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9340 lun->be_lun->lun_type; 9341 else 9342 eid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9343 eid_ptr->page_code = SVPD_EXTENDED_INQUIRY_DATA; 9344 scsi_ulto2b(data_len - 4, eid_ptr->page_length); 9345 /* 9346 * We support head of queue, ordered and simple tags. 9347 */ 9348 eid_ptr->flags2 = SVPD_EID_HEADSUP | SVPD_EID_ORDSUP | SVPD_EID_SIMPSUP; 9349 /* 9350 * Volatile cache supported. 9351 */ 9352 eid_ptr->flags3 = SVPD_EID_V_SUP; 9353 9354 /* 9355 * This means that we clear the REPORTED LUNS DATA HAS CHANGED unit 9356 * attention for a particular IT nexus on all LUNs once we report 9357 * it to that nexus once. This bit is required as of SPC-4. 9358 */ 9359 eid_ptr->flags4 = SVPD_EID_LUICLT; 9360 9361 /* 9362 * XXX KDM in order to correctly answer this, we would need 9363 * information from the SIM to determine how much sense data it 9364 * can send. So this would really be a path inquiry field, most 9365 * likely. This can be set to a maximum of 252 according to SPC-4, 9366 * but the hardware may or may not be able to support that much. 9367 * 0 just means that the maximum sense data length is not reported. 9368 */ 9369 eid_ptr->max_sense_length = 0; 9370 9371 ctl_set_success(ctsio); 9372 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9373 ctsio->be_move_done = ctl_config_move_done; 9374 ctl_datamove((union ctl_io *)ctsio); 9375 return (CTL_RETVAL_COMPLETE); 9376 } 9377 9378 static int 9379 ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len) 9380 { 9381 struct scsi_vpd_mode_page_policy *mpp_ptr; 9382 struct ctl_lun *lun; 9383 int data_len; 9384 9385 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9386 9387 data_len = sizeof(struct scsi_vpd_mode_page_policy) + 9388 sizeof(struct scsi_vpd_mode_page_policy_descr); 9389 9390 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9391 mpp_ptr = (struct scsi_vpd_mode_page_policy *)ctsio->kern_data_ptr; 9392 ctsio->kern_sg_entries = 0; 9393 9394 if (data_len < alloc_len) { 9395 ctsio->residual = alloc_len - data_len; 9396 ctsio->kern_data_len = data_len; 9397 ctsio->kern_total_len = data_len; 9398 } else { 9399 ctsio->residual = 0; 9400 ctsio->kern_data_len = alloc_len; 9401 ctsio->kern_total_len = alloc_len; 9402 } 9403 ctsio->kern_data_resid = 0; 9404 ctsio->kern_rel_offset = 0; 9405 ctsio->kern_sg_entries = 0; 9406 9407 /* 9408 * The control device is always connected. The disk device, on the 9409 * other hand, may not be online all the time. 9410 */ 9411 if (lun != NULL) 9412 mpp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9413 lun->be_lun->lun_type; 9414 else 9415 mpp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9416 mpp_ptr->page_code = SVPD_MODE_PAGE_POLICY; 9417 scsi_ulto2b(data_len - 4, mpp_ptr->page_length); 9418 mpp_ptr->descr[0].page_code = 0x3f; 9419 mpp_ptr->descr[0].subpage_code = 0xff; 9420 mpp_ptr->descr[0].policy = SVPD_MPP_SHARED; 9421 9422 ctl_set_success(ctsio); 9423 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9424 ctsio->be_move_done = ctl_config_move_done; 9425 ctl_datamove((union ctl_io *)ctsio); 9426 return (CTL_RETVAL_COMPLETE); 9427 } 9428 9429 /* 9430 * SCSI VPD page 0x83, the Device Identification page. 9431 */ 9432 static int 9433 ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len) 9434 { 9435 struct scsi_vpd_device_id *devid_ptr; 9436 struct scsi_vpd_id_descriptor *desc; 9437 struct ctl_softc *softc; 9438 struct ctl_lun *lun; 9439 struct ctl_port *port; 9440 int data_len; 9441 uint8_t proto; 9442 9443 softc = control_softc; 9444 9445 port = softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)]; 9446 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9447 9448 data_len = sizeof(struct scsi_vpd_device_id) + 9449 sizeof(struct scsi_vpd_id_descriptor) + 9450 sizeof(struct scsi_vpd_id_rel_trgt_port_id) + 9451 sizeof(struct scsi_vpd_id_descriptor) + 9452 sizeof(struct scsi_vpd_id_trgt_port_grp_id); 9453 if (lun && lun->lun_devid) 9454 data_len += lun->lun_devid->len; 9455 if (port->port_devid) 9456 data_len += port->port_devid->len; 9457 if (port->target_devid) 9458 data_len += port->target_devid->len; 9459 9460 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9461 devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr; 9462 ctsio->kern_sg_entries = 0; 9463 9464 if (data_len < alloc_len) { 9465 ctsio->residual = alloc_len - data_len; 9466 ctsio->kern_data_len = data_len; 9467 ctsio->kern_total_len = data_len; 9468 } else { 9469 ctsio->residual = 0; 9470 ctsio->kern_data_len = alloc_len; 9471 ctsio->kern_total_len = alloc_len; 9472 } 9473 ctsio->kern_data_resid = 0; 9474 ctsio->kern_rel_offset = 0; 9475 ctsio->kern_sg_entries = 0; 9476 9477 /* 9478 * The control device is always connected. The disk device, on the 9479 * other hand, may not be online all the time. 9480 */ 9481 if (lun != NULL) 9482 devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9483 lun->be_lun->lun_type; 9484 else 9485 devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9486 devid_ptr->page_code = SVPD_DEVICE_ID; 9487 scsi_ulto2b(data_len - 4, devid_ptr->length); 9488 9489 if (port->port_type == CTL_PORT_FC) 9490 proto = SCSI_PROTO_FC << 4; 9491 else if (port->port_type == CTL_PORT_ISCSI) 9492 proto = SCSI_PROTO_ISCSI << 4; 9493 else 9494 proto = SCSI_PROTO_SPI << 4; 9495 desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list; 9496 9497 /* 9498 * We're using a LUN association here. i.e., this device ID is a 9499 * per-LUN identifier. 9500 */ 9501 if (lun && lun->lun_devid) { 9502 memcpy(desc, lun->lun_devid->data, lun->lun_devid->len); 9503 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9504 lun->lun_devid->len); 9505 } 9506 9507 /* 9508 * This is for the WWPN which is a port association. 9509 */ 9510 if (port->port_devid) { 9511 memcpy(desc, port->port_devid->data, port->port_devid->len); 9512 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9513 port->port_devid->len); 9514 } 9515 9516 /* 9517 * This is for the Relative Target Port(type 4h) identifier 9518 */ 9519 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9520 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9521 SVPD_ID_TYPE_RELTARG; 9522 desc->length = 4; 9523 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port, &desc->identifier[2]); 9524 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9525 sizeof(struct scsi_vpd_id_rel_trgt_port_id)); 9526 9527 /* 9528 * This is for the Target Port Group(type 5h) identifier 9529 */ 9530 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9531 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9532 SVPD_ID_TYPE_TPORTGRP; 9533 desc->length = 4; 9534 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port / CTL_MAX_PORTS + 1, 9535 &desc->identifier[2]); 9536 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9537 sizeof(struct scsi_vpd_id_trgt_port_grp_id)); 9538 9539 /* 9540 * This is for the Target identifier 9541 */ 9542 if (port->target_devid) { 9543 memcpy(desc, port->target_devid->data, port->target_devid->len); 9544 } 9545 9546 ctl_set_success(ctsio); 9547 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9548 ctsio->be_move_done = ctl_config_move_done; 9549 ctl_datamove((union ctl_io *)ctsio); 9550 return (CTL_RETVAL_COMPLETE); 9551 } 9552 9553 static int 9554 ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len) 9555 { 9556 struct ctl_softc *softc = control_softc; 9557 struct scsi_vpd_scsi_ports *sp; 9558 struct scsi_vpd_port_designation *pd; 9559 struct scsi_vpd_port_designation_cont *pdc; 9560 struct ctl_lun *lun; 9561 struct ctl_port *port; 9562 int data_len, num_target_ports, iid_len, id_len, g, pg, p; 9563 int num_target_port_groups; 9564 9565 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9566 9567 if (softc->is_single) 9568 num_target_port_groups = 1; 9569 else 9570 num_target_port_groups = NUM_TARGET_PORT_GROUPS; 9571 num_target_ports = 0; 9572 iid_len = 0; 9573 id_len = 0; 9574 mtx_lock(&softc->ctl_lock); 9575 STAILQ_FOREACH(port, &softc->port_list, links) { 9576 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 9577 continue; 9578 if (lun != NULL && 9579 ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 9580 continue; 9581 num_target_ports++; 9582 if (port->init_devid) 9583 iid_len += port->init_devid->len; 9584 if (port->port_devid) 9585 id_len += port->port_devid->len; 9586 } 9587 mtx_unlock(&softc->ctl_lock); 9588 9589 data_len = sizeof(struct scsi_vpd_scsi_ports) + num_target_port_groups * 9590 num_target_ports * (sizeof(struct scsi_vpd_port_designation) + 9591 sizeof(struct scsi_vpd_port_designation_cont)) + iid_len + id_len; 9592 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9593 sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr; 9594 ctsio->kern_sg_entries = 0; 9595 9596 if (data_len < alloc_len) { 9597 ctsio->residual = alloc_len - data_len; 9598 ctsio->kern_data_len = data_len; 9599 ctsio->kern_total_len = data_len; 9600 } else { 9601 ctsio->residual = 0; 9602 ctsio->kern_data_len = alloc_len; 9603 ctsio->kern_total_len = alloc_len; 9604 } 9605 ctsio->kern_data_resid = 0; 9606 ctsio->kern_rel_offset = 0; 9607 ctsio->kern_sg_entries = 0; 9608 9609 /* 9610 * The control device is always connected. The disk device, on the 9611 * other hand, may not be online all the time. Need to change this 9612 * to figure out whether the disk device is actually online or not. 9613 */ 9614 if (lun != NULL) 9615 sp->device = (SID_QUAL_LU_CONNECTED << 5) | 9616 lun->be_lun->lun_type; 9617 else 9618 sp->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9619 9620 sp->page_code = SVPD_SCSI_PORTS; 9621 scsi_ulto2b(data_len - sizeof(struct scsi_vpd_scsi_ports), 9622 sp->page_length); 9623 pd = &sp->design[0]; 9624 9625 mtx_lock(&softc->ctl_lock); 9626 pg = softc->port_offset / CTL_MAX_PORTS; 9627 for (g = 0; g < num_target_port_groups; g++) { 9628 STAILQ_FOREACH(port, &softc->port_list, links) { 9629 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 9630 continue; 9631 if (lun != NULL && 9632 ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 9633 continue; 9634 p = port->targ_port % CTL_MAX_PORTS + g * CTL_MAX_PORTS; 9635 scsi_ulto2b(p, pd->relative_port_id); 9636 if (port->init_devid && g == pg) { 9637 iid_len = port->init_devid->len; 9638 memcpy(pd->initiator_transportid, 9639 port->init_devid->data, port->init_devid->len); 9640 } else 9641 iid_len = 0; 9642 scsi_ulto2b(iid_len, pd->initiator_transportid_length); 9643 pdc = (struct scsi_vpd_port_designation_cont *) 9644 (&pd->initiator_transportid[iid_len]); 9645 if (port->port_devid && g == pg) { 9646 id_len = port->port_devid->len; 9647 memcpy(pdc->target_port_descriptors, 9648 port->port_devid->data, port->port_devid->len); 9649 } else 9650 id_len = 0; 9651 scsi_ulto2b(id_len, pdc->target_port_descriptors_length); 9652 pd = (struct scsi_vpd_port_designation *) 9653 ((uint8_t *)pdc->target_port_descriptors + id_len); 9654 } 9655 } 9656 mtx_unlock(&softc->ctl_lock); 9657 9658 ctl_set_success(ctsio); 9659 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9660 ctsio->be_move_done = ctl_config_move_done; 9661 ctl_datamove((union ctl_io *)ctsio); 9662 return (CTL_RETVAL_COMPLETE); 9663 } 9664 9665 static int 9666 ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len) 9667 { 9668 struct scsi_vpd_block_limits *bl_ptr; 9669 struct ctl_lun *lun; 9670 int bs; 9671 9672 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9673 9674 ctsio->kern_data_ptr = malloc(sizeof(*bl_ptr), M_CTL, M_WAITOK | M_ZERO); 9675 bl_ptr = (struct scsi_vpd_block_limits *)ctsio->kern_data_ptr; 9676 ctsio->kern_sg_entries = 0; 9677 9678 if (sizeof(*bl_ptr) < alloc_len) { 9679 ctsio->residual = alloc_len - sizeof(*bl_ptr); 9680 ctsio->kern_data_len = sizeof(*bl_ptr); 9681 ctsio->kern_total_len = sizeof(*bl_ptr); 9682 } else { 9683 ctsio->residual = 0; 9684 ctsio->kern_data_len = alloc_len; 9685 ctsio->kern_total_len = alloc_len; 9686 } 9687 ctsio->kern_data_resid = 0; 9688 ctsio->kern_rel_offset = 0; 9689 ctsio->kern_sg_entries = 0; 9690 9691 /* 9692 * The control device is always connected. The disk device, on the 9693 * other hand, may not be online all the time. Need to change this 9694 * to figure out whether the disk device is actually online or not. 9695 */ 9696 if (lun != NULL) 9697 bl_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9698 lun->be_lun->lun_type; 9699 else 9700 bl_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9701 9702 bl_ptr->page_code = SVPD_BLOCK_LIMITS; 9703 scsi_ulto2b(sizeof(*bl_ptr) - 4, bl_ptr->page_length); 9704 bl_ptr->max_cmp_write_len = 0xff; 9705 scsi_ulto4b(0xffffffff, bl_ptr->max_txfer_len); 9706 if (lun != NULL) { 9707 bs = lun->be_lun->blocksize; 9708 scsi_ulto4b(lun->be_lun->opttxferlen, bl_ptr->opt_txfer_len); 9709 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 9710 scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_lba_cnt); 9711 scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_blk_cnt); 9712 if (lun->be_lun->ublockexp != 0) { 9713 scsi_ulto4b((1 << lun->be_lun->ublockexp), 9714 bl_ptr->opt_unmap_grain); 9715 scsi_ulto4b(0x80000000 | lun->be_lun->ublockoff, 9716 bl_ptr->unmap_grain_align); 9717 } 9718 } 9719 scsi_ulto4b(lun->be_lun->atomicblock, 9720 bl_ptr->max_atomic_transfer_length); 9721 scsi_ulto4b(0, bl_ptr->atomic_alignment); 9722 scsi_ulto4b(0, bl_ptr->atomic_transfer_length_granularity); 9723 } 9724 scsi_u64to8b(UINT64_MAX, bl_ptr->max_write_same_length); 9725 9726 ctl_set_success(ctsio); 9727 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9728 ctsio->be_move_done = ctl_config_move_done; 9729 ctl_datamove((union ctl_io *)ctsio); 9730 return (CTL_RETVAL_COMPLETE); 9731 } 9732 9733 static int 9734 ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len) 9735 { 9736 struct scsi_vpd_block_device_characteristics *bdc_ptr; 9737 struct ctl_lun *lun; 9738 const char *value; 9739 u_int i; 9740 9741 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9742 9743 ctsio->kern_data_ptr = malloc(sizeof(*bdc_ptr), M_CTL, M_WAITOK | M_ZERO); 9744 bdc_ptr = (struct scsi_vpd_block_device_characteristics *)ctsio->kern_data_ptr; 9745 ctsio->kern_sg_entries = 0; 9746 9747 if (sizeof(*bdc_ptr) < alloc_len) { 9748 ctsio->residual = alloc_len - sizeof(*bdc_ptr); 9749 ctsio->kern_data_len = sizeof(*bdc_ptr); 9750 ctsio->kern_total_len = sizeof(*bdc_ptr); 9751 } else { 9752 ctsio->residual = 0; 9753 ctsio->kern_data_len = alloc_len; 9754 ctsio->kern_total_len = alloc_len; 9755 } 9756 ctsio->kern_data_resid = 0; 9757 ctsio->kern_rel_offset = 0; 9758 ctsio->kern_sg_entries = 0; 9759 9760 /* 9761 * The control device is always connected. The disk device, on the 9762 * other hand, may not be online all the time. Need to change this 9763 * to figure out whether the disk device is actually online or not. 9764 */ 9765 if (lun != NULL) 9766 bdc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9767 lun->be_lun->lun_type; 9768 else 9769 bdc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9770 bdc_ptr->page_code = SVPD_BDC; 9771 scsi_ulto2b(sizeof(*bdc_ptr) - 4, bdc_ptr->page_length); 9772 if (lun != NULL && 9773 (value = ctl_get_opt(&lun->be_lun->options, "rpm")) != NULL) 9774 i = strtol(value, NULL, 0); 9775 else 9776 i = CTL_DEFAULT_ROTATION_RATE; 9777 scsi_ulto2b(i, bdc_ptr->medium_rotation_rate); 9778 if (lun != NULL && 9779 (value = ctl_get_opt(&lun->be_lun->options, "formfactor")) != NULL) 9780 i = strtol(value, NULL, 0); 9781 else 9782 i = 0; 9783 bdc_ptr->wab_wac_ff = (i & 0x0f); 9784 bdc_ptr->flags = SVPD_FUAB | SVPD_VBULS; 9785 9786 ctl_set_success(ctsio); 9787 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9788 ctsio->be_move_done = ctl_config_move_done; 9789 ctl_datamove((union ctl_io *)ctsio); 9790 return (CTL_RETVAL_COMPLETE); 9791 } 9792 9793 static int 9794 ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len) 9795 { 9796 struct scsi_vpd_logical_block_prov *lbp_ptr; 9797 struct ctl_lun *lun; 9798 9799 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9800 9801 ctsio->kern_data_ptr = malloc(sizeof(*lbp_ptr), M_CTL, M_WAITOK | M_ZERO); 9802 lbp_ptr = (struct scsi_vpd_logical_block_prov *)ctsio->kern_data_ptr; 9803 ctsio->kern_sg_entries = 0; 9804 9805 if (sizeof(*lbp_ptr) < alloc_len) { 9806 ctsio->residual = alloc_len - sizeof(*lbp_ptr); 9807 ctsio->kern_data_len = sizeof(*lbp_ptr); 9808 ctsio->kern_total_len = sizeof(*lbp_ptr); 9809 } else { 9810 ctsio->residual = 0; 9811 ctsio->kern_data_len = alloc_len; 9812 ctsio->kern_total_len = alloc_len; 9813 } 9814 ctsio->kern_data_resid = 0; 9815 ctsio->kern_rel_offset = 0; 9816 ctsio->kern_sg_entries = 0; 9817 9818 /* 9819 * The control device is always connected. The disk device, on the 9820 * other hand, may not be online all the time. Need to change this 9821 * to figure out whether the disk device is actually online or not. 9822 */ 9823 if (lun != NULL) 9824 lbp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9825 lun->be_lun->lun_type; 9826 else 9827 lbp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9828 9829 lbp_ptr->page_code = SVPD_LBP; 9830 scsi_ulto2b(sizeof(*lbp_ptr) - 4, lbp_ptr->page_length); 9831 lbp_ptr->threshold_exponent = CTL_LBP_EXPONENT; 9832 if (lun != NULL && lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 9833 lbp_ptr->flags = SVPD_LBP_UNMAP | SVPD_LBP_WS16 | 9834 SVPD_LBP_WS10 | SVPD_LBP_RZ | SVPD_LBP_ANC_SUP; 9835 lbp_ptr->prov_type = SVPD_LBP_THIN; 9836 } 9837 9838 ctl_set_success(ctsio); 9839 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9840 ctsio->be_move_done = ctl_config_move_done; 9841 ctl_datamove((union ctl_io *)ctsio); 9842 return (CTL_RETVAL_COMPLETE); 9843 } 9844 9845 /* 9846 * INQUIRY with the EVPD bit set. 9847 */ 9848 static int 9849 ctl_inquiry_evpd(struct ctl_scsiio *ctsio) 9850 { 9851 struct ctl_lun *lun; 9852 struct scsi_inquiry *cdb; 9853 int alloc_len, retval; 9854 9855 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9856 cdb = (struct scsi_inquiry *)ctsio->cdb; 9857 alloc_len = scsi_2btoul(cdb->length); 9858 9859 switch (cdb->page_code) { 9860 case SVPD_SUPPORTED_PAGES: 9861 retval = ctl_inquiry_evpd_supported(ctsio, alloc_len); 9862 break; 9863 case SVPD_UNIT_SERIAL_NUMBER: 9864 retval = ctl_inquiry_evpd_serial(ctsio, alloc_len); 9865 break; 9866 case SVPD_DEVICE_ID: 9867 retval = ctl_inquiry_evpd_devid(ctsio, alloc_len); 9868 break; 9869 case SVPD_EXTENDED_INQUIRY_DATA: 9870 retval = ctl_inquiry_evpd_eid(ctsio, alloc_len); 9871 break; 9872 case SVPD_MODE_PAGE_POLICY: 9873 retval = ctl_inquiry_evpd_mpp(ctsio, alloc_len); 9874 break; 9875 case SVPD_SCSI_PORTS: 9876 retval = ctl_inquiry_evpd_scsi_ports(ctsio, alloc_len); 9877 break; 9878 case SVPD_SCSI_TPC: 9879 retval = ctl_inquiry_evpd_tpc(ctsio, alloc_len); 9880 break; 9881 case SVPD_BLOCK_LIMITS: 9882 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 9883 goto err; 9884 retval = ctl_inquiry_evpd_block_limits(ctsio, alloc_len); 9885 break; 9886 case SVPD_BDC: 9887 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 9888 goto err; 9889 retval = ctl_inquiry_evpd_bdc(ctsio, alloc_len); 9890 break; 9891 case SVPD_LBP: 9892 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 9893 goto err; 9894 retval = ctl_inquiry_evpd_lbp(ctsio, alloc_len); 9895 break; 9896 default: 9897 err: 9898 ctl_set_invalid_field(ctsio, 9899 /*sks_valid*/ 1, 9900 /*command*/ 1, 9901 /*field*/ 2, 9902 /*bit_valid*/ 0, 9903 /*bit*/ 0); 9904 ctl_done((union ctl_io *)ctsio); 9905 retval = CTL_RETVAL_COMPLETE; 9906 break; 9907 } 9908 9909 return (retval); 9910 } 9911 9912 /* 9913 * Standard INQUIRY data. 9914 */ 9915 static int 9916 ctl_inquiry_std(struct ctl_scsiio *ctsio) 9917 { 9918 struct scsi_inquiry_data *inq_ptr; 9919 struct scsi_inquiry *cdb; 9920 struct ctl_softc *softc; 9921 struct ctl_lun *lun; 9922 char *val; 9923 uint32_t alloc_len, data_len; 9924 ctl_port_type port_type; 9925 9926 softc = control_softc; 9927 9928 /* 9929 * Figure out whether we're talking to a Fibre Channel port or not. 9930 * We treat the ioctl front end, and any SCSI adapters, as packetized 9931 * SCSI front ends. 9932 */ 9933 port_type = softc->ctl_ports[ 9934 ctl_port_idx(ctsio->io_hdr.nexus.targ_port)]->port_type; 9935 if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL) 9936 port_type = CTL_PORT_SCSI; 9937 9938 lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9939 cdb = (struct scsi_inquiry *)ctsio->cdb; 9940 alloc_len = scsi_2btoul(cdb->length); 9941 9942 /* 9943 * We malloc the full inquiry data size here and fill it 9944 * in. If the user only asks for less, we'll give him 9945 * that much. 9946 */ 9947 data_len = offsetof(struct scsi_inquiry_data, vendor_specific1); 9948 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9949 inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr; 9950 ctsio->kern_sg_entries = 0; 9951 ctsio->kern_data_resid = 0; 9952 ctsio->kern_rel_offset = 0; 9953 9954 if (data_len < alloc_len) { 9955 ctsio->residual = alloc_len - data_len; 9956 ctsio->kern_data_len = data_len; 9957 ctsio->kern_total_len = data_len; 9958 } else { 9959 ctsio->residual = 0; 9960 ctsio->kern_data_len = alloc_len; 9961 ctsio->kern_total_len = alloc_len; 9962 } 9963 9964 /* 9965 * If we have a LUN configured, report it as connected. Otherwise, 9966 * report that it is offline or no device is supported, depending 9967 * on the value of inquiry_pq_no_lun. 9968 * 9969 * According to the spec (SPC-4 r34), the peripheral qualifier 9970 * SID_QUAL_LU_OFFLINE (001b) is used in the following scenario: 9971 * 9972 * "A peripheral device having the specified peripheral device type 9973 * is not connected to this logical unit. However, the device 9974 * server is capable of supporting the specified peripheral device 9975 * type on this logical unit." 9976 * 9977 * According to the same spec, the peripheral qualifier 9978 * SID_QUAL_BAD_LU (011b) is used in this scenario: 9979 * 9980 * "The device server is not capable of supporting a peripheral 9981 * device on this logical unit. For this peripheral qualifier the 9982 * peripheral device type shall be set to 1Fh. All other peripheral 9983 * device type values are reserved for this peripheral qualifier." 9984 * 9985 * Given the text, it would seem that we probably want to report that 9986 * the LUN is offline here. There is no LUN connected, but we can 9987 * support a LUN at the given LUN number. 9988 * 9989 * In the real world, though, it sounds like things are a little 9990 * different: 9991 * 9992 * - Linux, when presented with a LUN with the offline peripheral 9993 * qualifier, will create an sg driver instance for it. So when 9994 * you attach it to CTL, you wind up with a ton of sg driver 9995 * instances. (One for every LUN that Linux bothered to probe.) 9996 * Linux does this despite the fact that it issues a REPORT LUNs 9997 * to LUN 0 to get the inventory of supported LUNs. 9998 * 9999 * - There is other anecdotal evidence (from Emulex folks) about 10000 * arrays that use the offline peripheral qualifier for LUNs that 10001 * are on the "passive" path in an active/passive array. 10002 * 10003 * So the solution is provide a hopefully reasonable default 10004 * (return bad/no LUN) and allow the user to change the behavior 10005 * with a tunable/sysctl variable. 10006 */ 10007 if (lun != NULL) 10008 inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10009 lun->be_lun->lun_type; 10010 else if (softc->inquiry_pq_no_lun == 0) 10011 inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10012 else 10013 inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE; 10014 10015 /* RMB in byte 2 is 0 */ 10016 inq_ptr->version = SCSI_REV_SPC4; 10017 10018 /* 10019 * According to SAM-3, even if a device only supports a single 10020 * level of LUN addressing, it should still set the HISUP bit: 10021 * 10022 * 4.9.1 Logical unit numbers overview 10023 * 10024 * All logical unit number formats described in this standard are 10025 * hierarchical in structure even when only a single level in that 10026 * hierarchy is used. The HISUP bit shall be set to one in the 10027 * standard INQUIRY data (see SPC-2) when any logical unit number 10028 * format described in this standard is used. Non-hierarchical 10029 * formats are outside the scope of this standard. 10030 * 10031 * Therefore we set the HiSup bit here. 10032 * 10033 * The reponse format is 2, per SPC-3. 10034 */ 10035 inq_ptr->response_format = SID_HiSup | 2; 10036 10037 inq_ptr->additional_length = data_len - 10038 (offsetof(struct scsi_inquiry_data, additional_length) + 1); 10039 CTL_DEBUG_PRINT(("additional_length = %d\n", 10040 inq_ptr->additional_length)); 10041 10042 inq_ptr->spc3_flags = SPC3_SID_3PC | SPC3_SID_TPGS_IMPLICIT; 10043 /* 16 bit addressing */ 10044 if (port_type == CTL_PORT_SCSI) 10045 inq_ptr->spc2_flags = SPC2_SID_ADDR16; 10046 /* XXX set the SID_MultiP bit here if we're actually going to 10047 respond on multiple ports */ 10048 inq_ptr->spc2_flags |= SPC2_SID_MultiP; 10049 10050 /* 16 bit data bus, synchronous transfers */ 10051 if (port_type == CTL_PORT_SCSI) 10052 inq_ptr->flags = SID_WBus16 | SID_Sync; 10053 /* 10054 * XXX KDM do we want to support tagged queueing on the control 10055 * device at all? 10056 */ 10057 if ((lun == NULL) 10058 || (lun->be_lun->lun_type != T_PROCESSOR)) 10059 inq_ptr->flags |= SID_CmdQue; 10060 /* 10061 * Per SPC-3, unused bytes in ASCII strings are filled with spaces. 10062 * We have 8 bytes for the vendor name, and 16 bytes for the device 10063 * name and 4 bytes for the revision. 10064 */ 10065 if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options, 10066 "vendor")) == NULL) { 10067 strncpy(inq_ptr->vendor, CTL_VENDOR, sizeof(inq_ptr->vendor)); 10068 } else { 10069 memset(inq_ptr->vendor, ' ', sizeof(inq_ptr->vendor)); 10070 strncpy(inq_ptr->vendor, val, 10071 min(sizeof(inq_ptr->vendor), strlen(val))); 10072 } 10073 if (lun == NULL) { 10074 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10075 sizeof(inq_ptr->product)); 10076 } else if ((val = ctl_get_opt(&lun->be_lun->options, "product")) == NULL) { 10077 switch (lun->be_lun->lun_type) { 10078 case T_DIRECT: 10079 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10080 sizeof(inq_ptr->product)); 10081 break; 10082 case T_PROCESSOR: 10083 strncpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT, 10084 sizeof(inq_ptr->product)); 10085 break; 10086 default: 10087 strncpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT, 10088 sizeof(inq_ptr->product)); 10089 break; 10090 } 10091 } else { 10092 memset(inq_ptr->product, ' ', sizeof(inq_ptr->product)); 10093 strncpy(inq_ptr->product, val, 10094 min(sizeof(inq_ptr->product), strlen(val))); 10095 } 10096 10097 /* 10098 * XXX make this a macro somewhere so it automatically gets 10099 * incremented when we make changes. 10100 */ 10101 if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options, 10102 "revision")) == NULL) { 10103 strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision)); 10104 } else { 10105 memset(inq_ptr->revision, ' ', sizeof(inq_ptr->revision)); 10106 strncpy(inq_ptr->revision, val, 10107 min(sizeof(inq_ptr->revision), strlen(val))); 10108 } 10109 10110 /* 10111 * For parallel SCSI, we support double transition and single 10112 * transition clocking. We also support QAS (Quick Arbitration 10113 * and Selection) and Information Unit transfers on both the 10114 * control and array devices. 10115 */ 10116 if (port_type == CTL_PORT_SCSI) 10117 inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS | 10118 SID_SPI_IUS; 10119 10120 /* SAM-5 (no version claimed) */ 10121 scsi_ulto2b(0x00A0, inq_ptr->version1); 10122 /* SPC-4 (no version claimed) */ 10123 scsi_ulto2b(0x0460, inq_ptr->version2); 10124 if (port_type == CTL_PORT_FC) { 10125 /* FCP-2 ANSI INCITS.350:2003 */ 10126 scsi_ulto2b(0x0917, inq_ptr->version3); 10127 } else if (port_type == CTL_PORT_SCSI) { 10128 /* SPI-4 ANSI INCITS.362:200x */ 10129 scsi_ulto2b(0x0B56, inq_ptr->version3); 10130 } else if (port_type == CTL_PORT_ISCSI) { 10131 /* iSCSI (no version claimed) */ 10132 scsi_ulto2b(0x0960, inq_ptr->version3); 10133 } else if (port_type == CTL_PORT_SAS) { 10134 /* SAS (no version claimed) */ 10135 scsi_ulto2b(0x0BE0, inq_ptr->version3); 10136 } 10137 10138 if (lun == NULL) { 10139 /* SBC-4 (no version claimed) */ 10140 scsi_ulto2b(0x0600, inq_ptr->version4); 10141 } else { 10142 switch (lun->be_lun->lun_type) { 10143 case T_DIRECT: 10144 /* SBC-4 (no version claimed) */ 10145 scsi_ulto2b(0x0600, inq_ptr->version4); 10146 break; 10147 case T_PROCESSOR: 10148 default: 10149 break; 10150 } 10151 } 10152 10153 ctl_set_success(ctsio); 10154 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10155 ctsio->be_move_done = ctl_config_move_done; 10156 ctl_datamove((union ctl_io *)ctsio); 10157 return (CTL_RETVAL_COMPLETE); 10158 } 10159 10160 int 10161 ctl_inquiry(struct ctl_scsiio *ctsio) 10162 { 10163 struct scsi_inquiry *cdb; 10164 int retval; 10165 10166 CTL_DEBUG_PRINT(("ctl_inquiry\n")); 10167 10168 cdb = (struct scsi_inquiry *)ctsio->cdb; 10169 if (cdb->byte2 & SI_EVPD) 10170 retval = ctl_inquiry_evpd(ctsio); 10171 else if (cdb->page_code == 0) 10172 retval = ctl_inquiry_std(ctsio); 10173 else { 10174 ctl_set_invalid_field(ctsio, 10175 /*sks_valid*/ 1, 10176 /*command*/ 1, 10177 /*field*/ 2, 10178 /*bit_valid*/ 0, 10179 /*bit*/ 0); 10180 ctl_done((union ctl_io *)ctsio); 10181 return (CTL_RETVAL_COMPLETE); 10182 } 10183 10184 return (retval); 10185 } 10186 10187 /* 10188 * For known CDB types, parse the LBA and length. 10189 */ 10190 static int 10191 ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len) 10192 { 10193 if (io->io_hdr.io_type != CTL_IO_SCSI) 10194 return (1); 10195 10196 switch (io->scsiio.cdb[0]) { 10197 case COMPARE_AND_WRITE: { 10198 struct scsi_compare_and_write *cdb; 10199 10200 cdb = (struct scsi_compare_and_write *)io->scsiio.cdb; 10201 10202 *lba = scsi_8btou64(cdb->addr); 10203 *len = cdb->length; 10204 break; 10205 } 10206 case READ_6: 10207 case WRITE_6: { 10208 struct scsi_rw_6 *cdb; 10209 10210 cdb = (struct scsi_rw_6 *)io->scsiio.cdb; 10211 10212 *lba = scsi_3btoul(cdb->addr); 10213 /* only 5 bits are valid in the most significant address byte */ 10214 *lba &= 0x1fffff; 10215 *len = cdb->length; 10216 break; 10217 } 10218 case READ_10: 10219 case WRITE_10: { 10220 struct scsi_rw_10 *cdb; 10221 10222 cdb = (struct scsi_rw_10 *)io->scsiio.cdb; 10223 10224 *lba = scsi_4btoul(cdb->addr); 10225 *len = scsi_2btoul(cdb->length); 10226 break; 10227 } 10228 case WRITE_VERIFY_10: { 10229 struct scsi_write_verify_10 *cdb; 10230 10231 cdb = (struct scsi_write_verify_10 *)io->scsiio.cdb; 10232 10233 *lba = scsi_4btoul(cdb->addr); 10234 *len = scsi_2btoul(cdb->length); 10235 break; 10236 } 10237 case READ_12: 10238 case WRITE_12: { 10239 struct scsi_rw_12 *cdb; 10240 10241 cdb = (struct scsi_rw_12 *)io->scsiio.cdb; 10242 10243 *lba = scsi_4btoul(cdb->addr); 10244 *len = scsi_4btoul(cdb->length); 10245 break; 10246 } 10247 case WRITE_VERIFY_12: { 10248 struct scsi_write_verify_12 *cdb; 10249 10250 cdb = (struct scsi_write_verify_12 *)io->scsiio.cdb; 10251 10252 *lba = scsi_4btoul(cdb->addr); 10253 *len = scsi_4btoul(cdb->length); 10254 break; 10255 } 10256 case READ_16: 10257 case WRITE_16: 10258 case WRITE_ATOMIC_16: { 10259 struct scsi_rw_16 *cdb; 10260 10261 cdb = (struct scsi_rw_16 *)io->scsiio.cdb; 10262 10263 *lba = scsi_8btou64(cdb->addr); 10264 *len = scsi_4btoul(cdb->length); 10265 break; 10266 } 10267 case WRITE_VERIFY_16: { 10268 struct scsi_write_verify_16 *cdb; 10269 10270 cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb; 10271 10272 *lba = scsi_8btou64(cdb->addr); 10273 *len = scsi_4btoul(cdb->length); 10274 break; 10275 } 10276 case WRITE_SAME_10: { 10277 struct scsi_write_same_10 *cdb; 10278 10279 cdb = (struct scsi_write_same_10 *)io->scsiio.cdb; 10280 10281 *lba = scsi_4btoul(cdb->addr); 10282 *len = scsi_2btoul(cdb->length); 10283 break; 10284 } 10285 case WRITE_SAME_16: { 10286 struct scsi_write_same_16 *cdb; 10287 10288 cdb = (struct scsi_write_same_16 *)io->scsiio.cdb; 10289 10290 *lba = scsi_8btou64(cdb->addr); 10291 *len = scsi_4btoul(cdb->length); 10292 break; 10293 } 10294 case VERIFY_10: { 10295 struct scsi_verify_10 *cdb; 10296 10297 cdb = (struct scsi_verify_10 *)io->scsiio.cdb; 10298 10299 *lba = scsi_4btoul(cdb->addr); 10300 *len = scsi_2btoul(cdb->length); 10301 break; 10302 } 10303 case VERIFY_12: { 10304 struct scsi_verify_12 *cdb; 10305 10306 cdb = (struct scsi_verify_12 *)io->scsiio.cdb; 10307 10308 *lba = scsi_4btoul(cdb->addr); 10309 *len = scsi_4btoul(cdb->length); 10310 break; 10311 } 10312 case VERIFY_16: { 10313 struct scsi_verify_16 *cdb; 10314 10315 cdb = (struct scsi_verify_16 *)io->scsiio.cdb; 10316 10317 *lba = scsi_8btou64(cdb->addr); 10318 *len = scsi_4btoul(cdb->length); 10319 break; 10320 } 10321 case UNMAP: { 10322 *lba = 0; 10323 *len = UINT64_MAX; 10324 break; 10325 } 10326 case SERVICE_ACTION_IN: { /* GET LBA STATUS */ 10327 struct scsi_get_lba_status *cdb; 10328 10329 cdb = (struct scsi_get_lba_status *)io->scsiio.cdb; 10330 *lba = scsi_8btou64(cdb->addr); 10331 *len = UINT32_MAX; 10332 break; 10333 } 10334 default: 10335 return (1); 10336 break; /* NOTREACHED */ 10337 } 10338 10339 return (0); 10340 } 10341 10342 static ctl_action 10343 ctl_extent_check_lba(uint64_t lba1, uint64_t len1, uint64_t lba2, uint64_t len2, 10344 bool seq) 10345 { 10346 uint64_t endlba1, endlba2; 10347 10348 endlba1 = lba1 + len1 - (seq ? 0 : 1); 10349 endlba2 = lba2 + len2 - 1; 10350 10351 if ((endlba1 < lba2) || (endlba2 < lba1)) 10352 return (CTL_ACTION_PASS); 10353 else 10354 return (CTL_ACTION_BLOCK); 10355 } 10356 10357 static int 10358 ctl_extent_check_unmap(union ctl_io *io, uint64_t lba2, uint64_t len2) 10359 { 10360 struct ctl_ptr_len_flags *ptrlen; 10361 struct scsi_unmap_desc *buf, *end, *range; 10362 uint64_t lba; 10363 uint32_t len; 10364 10365 /* If not UNMAP -- go other way. */ 10366 if (io->io_hdr.io_type != CTL_IO_SCSI || 10367 io->scsiio.cdb[0] != UNMAP) 10368 return (CTL_ACTION_ERROR); 10369 10370 /* If UNMAP without data -- block and wait for data. */ 10371 ptrlen = (struct ctl_ptr_len_flags *) 10372 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 10373 if ((io->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0 || 10374 ptrlen->ptr == NULL) 10375 return (CTL_ACTION_BLOCK); 10376 10377 /* UNMAP with data -- check for collision. */ 10378 buf = (struct scsi_unmap_desc *)ptrlen->ptr; 10379 end = buf + ptrlen->len / sizeof(*buf); 10380 for (range = buf; range < end; range++) { 10381 lba = scsi_8btou64(range->lba); 10382 len = scsi_4btoul(range->length); 10383 if ((lba < lba2 + len2) && (lba + len > lba2)) 10384 return (CTL_ACTION_BLOCK); 10385 } 10386 return (CTL_ACTION_PASS); 10387 } 10388 10389 static ctl_action 10390 ctl_extent_check(union ctl_io *io1, union ctl_io *io2, bool seq) 10391 { 10392 uint64_t lba1, lba2; 10393 uint64_t len1, len2; 10394 int retval; 10395 10396 if (ctl_get_lba_len(io2, &lba2, &len2) != 0) 10397 return (CTL_ACTION_ERROR); 10398 10399 retval = ctl_extent_check_unmap(io1, lba2, len2); 10400 if (retval != CTL_ACTION_ERROR) 10401 return (retval); 10402 10403 if (ctl_get_lba_len(io1, &lba1, &len1) != 0) 10404 return (CTL_ACTION_ERROR); 10405 10406 return (ctl_extent_check_lba(lba1, len1, lba2, len2, seq)); 10407 } 10408 10409 static ctl_action 10410 ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2) 10411 { 10412 uint64_t lba1, lba2; 10413 uint64_t len1, len2; 10414 10415 if (ctl_get_lba_len(io1, &lba1, &len1) != 0) 10416 return (CTL_ACTION_ERROR); 10417 if (ctl_get_lba_len(io2, &lba2, &len2) != 0) 10418 return (CTL_ACTION_ERROR); 10419 10420 if (lba1 + len1 == lba2) 10421 return (CTL_ACTION_BLOCK); 10422 return (CTL_ACTION_PASS); 10423 } 10424 10425 static ctl_action 10426 ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io, 10427 union ctl_io *ooa_io) 10428 { 10429 const struct ctl_cmd_entry *pending_entry, *ooa_entry; 10430 ctl_serialize_action *serialize_row; 10431 10432 /* 10433 * The initiator attempted multiple untagged commands at the same 10434 * time. Can't do that. 10435 */ 10436 if ((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10437 && (ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10438 && ((pending_io->io_hdr.nexus.targ_port == 10439 ooa_io->io_hdr.nexus.targ_port) 10440 && (pending_io->io_hdr.nexus.initid.id == 10441 ooa_io->io_hdr.nexus.initid.id)) 10442 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | 10443 CTL_FLAG_STATUS_SENT)) == 0)) 10444 return (CTL_ACTION_OVERLAP); 10445 10446 /* 10447 * The initiator attempted to send multiple tagged commands with 10448 * the same ID. (It's fine if different initiators have the same 10449 * tag ID.) 10450 * 10451 * Even if all of those conditions are true, we don't kill the I/O 10452 * if the command ahead of us has been aborted. We won't end up 10453 * sending it to the FETD, and it's perfectly legal to resend a 10454 * command with the same tag number as long as the previous 10455 * instance of this tag number has been aborted somehow. 10456 */ 10457 if ((pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 10458 && (ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 10459 && (pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num) 10460 && ((pending_io->io_hdr.nexus.targ_port == 10461 ooa_io->io_hdr.nexus.targ_port) 10462 && (pending_io->io_hdr.nexus.initid.id == 10463 ooa_io->io_hdr.nexus.initid.id)) 10464 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | 10465 CTL_FLAG_STATUS_SENT)) == 0)) 10466 return (CTL_ACTION_OVERLAP_TAG); 10467 10468 /* 10469 * If we get a head of queue tag, SAM-3 says that we should 10470 * immediately execute it. 10471 * 10472 * What happens if this command would normally block for some other 10473 * reason? e.g. a request sense with a head of queue tag 10474 * immediately after a write. Normally that would block, but this 10475 * will result in its getting executed immediately... 10476 * 10477 * We currently return "pass" instead of "skip", so we'll end up 10478 * going through the rest of the queue to check for overlapped tags. 10479 * 10480 * XXX KDM check for other types of blockage first?? 10481 */ 10482 if (pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 10483 return (CTL_ACTION_PASS); 10484 10485 /* 10486 * Ordered tags have to block until all items ahead of them 10487 * have completed. If we get called with an ordered tag, we always 10488 * block, if something else is ahead of us in the queue. 10489 */ 10490 if (pending_io->scsiio.tag_type == CTL_TAG_ORDERED) 10491 return (CTL_ACTION_BLOCK); 10492 10493 /* 10494 * Simple tags get blocked until all head of queue and ordered tags 10495 * ahead of them have completed. I'm lumping untagged commands in 10496 * with simple tags here. XXX KDM is that the right thing to do? 10497 */ 10498 if (((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10499 || (pending_io->scsiio.tag_type == CTL_TAG_SIMPLE)) 10500 && ((ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 10501 || (ooa_io->scsiio.tag_type == CTL_TAG_ORDERED))) 10502 return (CTL_ACTION_BLOCK); 10503 10504 pending_entry = ctl_get_cmd_entry(&pending_io->scsiio, NULL); 10505 ooa_entry = ctl_get_cmd_entry(&ooa_io->scsiio, NULL); 10506 10507 serialize_row = ctl_serialize_table[ooa_entry->seridx]; 10508 10509 switch (serialize_row[pending_entry->seridx]) { 10510 case CTL_SER_BLOCK: 10511 return (CTL_ACTION_BLOCK); 10512 case CTL_SER_EXTENT: 10513 return (ctl_extent_check(ooa_io, pending_io, 10514 (lun->serseq == CTL_LUN_SERSEQ_ON))); 10515 case CTL_SER_EXTENTOPT: 10516 if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT].queue_flags 10517 & SCP_QUEUE_ALG_MASK) != SCP_QUEUE_ALG_UNRESTRICTED) 10518 return (ctl_extent_check(ooa_io, pending_io, 10519 (lun->serseq == CTL_LUN_SERSEQ_ON))); 10520 return (CTL_ACTION_PASS); 10521 case CTL_SER_EXTENTSEQ: 10522 if (lun->serseq != CTL_LUN_SERSEQ_OFF) 10523 return (ctl_extent_check_seq(ooa_io, pending_io)); 10524 return (CTL_ACTION_PASS); 10525 case CTL_SER_PASS: 10526 return (CTL_ACTION_PASS); 10527 case CTL_SER_BLOCKOPT: 10528 if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT].queue_flags 10529 & SCP_QUEUE_ALG_MASK) != SCP_QUEUE_ALG_UNRESTRICTED) 10530 return (CTL_ACTION_BLOCK); 10531 return (CTL_ACTION_PASS); 10532 case CTL_SER_SKIP: 10533 return (CTL_ACTION_SKIP); 10534 default: 10535 panic("invalid serialization value %d", 10536 serialize_row[pending_entry->seridx]); 10537 } 10538 10539 return (CTL_ACTION_ERROR); 10540 } 10541 10542 /* 10543 * Check for blockage or overlaps against the OOA (Order Of Arrival) queue. 10544 * Assumptions: 10545 * - pending_io is generally either incoming, or on the blocked queue 10546 * - starting I/O is the I/O we want to start the check with. 10547 */ 10548 static ctl_action 10549 ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 10550 union ctl_io *starting_io) 10551 { 10552 union ctl_io *ooa_io; 10553 ctl_action action; 10554 10555 mtx_assert(&lun->lun_lock, MA_OWNED); 10556 10557 /* 10558 * Run back along the OOA queue, starting with the current 10559 * blocked I/O and going through every I/O before it on the 10560 * queue. If starting_io is NULL, we'll just end up returning 10561 * CTL_ACTION_PASS. 10562 */ 10563 for (ooa_io = starting_io; ooa_io != NULL; 10564 ooa_io = (union ctl_io *)TAILQ_PREV(&ooa_io->io_hdr, ctl_ooaq, 10565 ooa_links)){ 10566 10567 /* 10568 * This routine just checks to see whether 10569 * cur_blocked is blocked by ooa_io, which is ahead 10570 * of it in the queue. It doesn't queue/dequeue 10571 * cur_blocked. 10572 */ 10573 action = ctl_check_for_blockage(lun, pending_io, ooa_io); 10574 switch (action) { 10575 case CTL_ACTION_BLOCK: 10576 case CTL_ACTION_OVERLAP: 10577 case CTL_ACTION_OVERLAP_TAG: 10578 case CTL_ACTION_SKIP: 10579 case CTL_ACTION_ERROR: 10580 return (action); 10581 break; /* NOTREACHED */ 10582 case CTL_ACTION_PASS: 10583 break; 10584 default: 10585 panic("invalid action %d", action); 10586 break; /* NOTREACHED */ 10587 } 10588 } 10589 10590 return (CTL_ACTION_PASS); 10591 } 10592 10593 /* 10594 * Assumptions: 10595 * - An I/O has just completed, and has been removed from the per-LUN OOA 10596 * queue, so some items on the blocked queue may now be unblocked. 10597 */ 10598 static int 10599 ctl_check_blocked(struct ctl_lun *lun) 10600 { 10601 union ctl_io *cur_blocked, *next_blocked; 10602 10603 mtx_assert(&lun->lun_lock, MA_OWNED); 10604 10605 /* 10606 * Run forward from the head of the blocked queue, checking each 10607 * entry against the I/Os prior to it on the OOA queue to see if 10608 * there is still any blockage. 10609 * 10610 * We cannot use the TAILQ_FOREACH() macro, because it can't deal 10611 * with our removing a variable on it while it is traversing the 10612 * list. 10613 */ 10614 for (cur_blocked = (union ctl_io *)TAILQ_FIRST(&lun->blocked_queue); 10615 cur_blocked != NULL; cur_blocked = next_blocked) { 10616 union ctl_io *prev_ooa; 10617 ctl_action action; 10618 10619 next_blocked = (union ctl_io *)TAILQ_NEXT(&cur_blocked->io_hdr, 10620 blocked_links); 10621 10622 prev_ooa = (union ctl_io *)TAILQ_PREV(&cur_blocked->io_hdr, 10623 ctl_ooaq, ooa_links); 10624 10625 /* 10626 * If cur_blocked happens to be the first item in the OOA 10627 * queue now, prev_ooa will be NULL, and the action 10628 * returned will just be CTL_ACTION_PASS. 10629 */ 10630 action = ctl_check_ooa(lun, cur_blocked, prev_ooa); 10631 10632 switch (action) { 10633 case CTL_ACTION_BLOCK: 10634 /* Nothing to do here, still blocked */ 10635 break; 10636 case CTL_ACTION_OVERLAP: 10637 case CTL_ACTION_OVERLAP_TAG: 10638 /* 10639 * This shouldn't happen! In theory we've already 10640 * checked this command for overlap... 10641 */ 10642 break; 10643 case CTL_ACTION_PASS: 10644 case CTL_ACTION_SKIP: { 10645 const struct ctl_cmd_entry *entry; 10646 int isc_retval; 10647 10648 /* 10649 * The skip case shouldn't happen, this transaction 10650 * should have never made it onto the blocked queue. 10651 */ 10652 /* 10653 * This I/O is no longer blocked, we can remove it 10654 * from the blocked queue. Since this is a TAILQ 10655 * (doubly linked list), we can do O(1) removals 10656 * from any place on the list. 10657 */ 10658 TAILQ_REMOVE(&lun->blocked_queue, &cur_blocked->io_hdr, 10659 blocked_links); 10660 cur_blocked->io_hdr.flags &= ~CTL_FLAG_BLOCKED; 10661 10662 if (cur_blocked->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC){ 10663 /* 10664 * Need to send IO back to original side to 10665 * run 10666 */ 10667 union ctl_ha_msg msg_info; 10668 10669 msg_info.hdr.original_sc = 10670 cur_blocked->io_hdr.original_sc; 10671 msg_info.hdr.serializing_sc = cur_blocked; 10672 msg_info.hdr.msg_type = CTL_MSG_R2R; 10673 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 10674 &msg_info, sizeof(msg_info), 0)) > 10675 CTL_HA_STATUS_SUCCESS) { 10676 printf("CTL:Check Blocked error from " 10677 "ctl_ha_msg_send %d\n", 10678 isc_retval); 10679 } 10680 break; 10681 } 10682 entry = ctl_get_cmd_entry(&cur_blocked->scsiio, NULL); 10683 10684 /* 10685 * Check this I/O for LUN state changes that may 10686 * have happened while this command was blocked. 10687 * The LUN state may have been changed by a command 10688 * ahead of us in the queue, so we need to re-check 10689 * for any states that can be caused by SCSI 10690 * commands. 10691 */ 10692 if (ctl_scsiio_lun_check(lun, entry, 10693 &cur_blocked->scsiio) == 0) { 10694 cur_blocked->io_hdr.flags |= 10695 CTL_FLAG_IS_WAS_ON_RTR; 10696 ctl_enqueue_rtr(cur_blocked); 10697 } else 10698 ctl_done(cur_blocked); 10699 break; 10700 } 10701 default: 10702 /* 10703 * This probably shouldn't happen -- we shouldn't 10704 * get CTL_ACTION_ERROR, or anything else. 10705 */ 10706 break; 10707 } 10708 } 10709 10710 return (CTL_RETVAL_COMPLETE); 10711 } 10712 10713 /* 10714 * This routine (with one exception) checks LUN flags that can be set by 10715 * commands ahead of us in the OOA queue. These flags have to be checked 10716 * when a command initially comes in, and when we pull a command off the 10717 * blocked queue and are preparing to execute it. The reason we have to 10718 * check these flags for commands on the blocked queue is that the LUN 10719 * state may have been changed by a command ahead of us while we're on the 10720 * blocked queue. 10721 * 10722 * Ordering is somewhat important with these checks, so please pay 10723 * careful attention to the placement of any new checks. 10724 */ 10725 static int 10726 ctl_scsiio_lun_check(struct ctl_lun *lun, 10727 const struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio) 10728 { 10729 struct ctl_softc *softc = lun->ctl_softc; 10730 int retval; 10731 uint32_t residx; 10732 10733 retval = 0; 10734 10735 mtx_assert(&lun->lun_lock, MA_OWNED); 10736 10737 /* 10738 * If this shelf is a secondary shelf controller, we have to reject 10739 * any media access commands. 10740 */ 10741 if ((softc->flags & CTL_FLAG_ACTIVE_SHELF) == 0 && 10742 (entry->flags & CTL_CMD_FLAG_OK_ON_SECONDARY) == 0) { 10743 ctl_set_lun_standby(ctsio); 10744 retval = 1; 10745 goto bailout; 10746 } 10747 10748 if (entry->pattern & CTL_LUN_PAT_WRITE) { 10749 if (lun->flags & CTL_LUN_READONLY) { 10750 ctl_set_sense(ctsio, /*current_error*/ 1, 10751 /*sense_key*/ SSD_KEY_DATA_PROTECT, 10752 /*asc*/ 0x27, /*ascq*/ 0x01, SSD_ELEM_NONE); 10753 retval = 1; 10754 goto bailout; 10755 } 10756 if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT] 10757 .eca_and_aen & SCP_SWP) != 0) { 10758 ctl_set_sense(ctsio, /*current_error*/ 1, 10759 /*sense_key*/ SSD_KEY_DATA_PROTECT, 10760 /*asc*/ 0x27, /*ascq*/ 0x02, SSD_ELEM_NONE); 10761 retval = 1; 10762 goto bailout; 10763 } 10764 } 10765 10766 /* 10767 * Check for a reservation conflict. If this command isn't allowed 10768 * even on reserved LUNs, and if this initiator isn't the one who 10769 * reserved us, reject the command with a reservation conflict. 10770 */ 10771 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 10772 if ((lun->flags & CTL_LUN_RESERVED) 10773 && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) { 10774 if (lun->res_idx != residx) { 10775 ctl_set_reservation_conflict(ctsio); 10776 retval = 1; 10777 goto bailout; 10778 } 10779 } 10780 10781 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0 || 10782 (entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV)) { 10783 /* No reservation or command is allowed. */; 10784 } else if ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_WRESV) && 10785 (lun->res_type == SPR_TYPE_WR_EX || 10786 lun->res_type == SPR_TYPE_WR_EX_RO || 10787 lun->res_type == SPR_TYPE_WR_EX_AR)) { 10788 /* The command is allowed for Write Exclusive resv. */; 10789 } else { 10790 /* 10791 * if we aren't registered or it's a res holder type 10792 * reservation and this isn't the res holder then set a 10793 * conflict. 10794 */ 10795 if (ctl_get_prkey(lun, residx) == 0 10796 || (residx != lun->pr_res_idx && lun->res_type < 4)) { 10797 ctl_set_reservation_conflict(ctsio); 10798 retval = 1; 10799 goto bailout; 10800 } 10801 10802 } 10803 10804 if ((lun->flags & CTL_LUN_OFFLINE) 10805 && ((entry->flags & CTL_CMD_FLAG_OK_ON_OFFLINE) == 0)) { 10806 ctl_set_lun_not_ready(ctsio); 10807 retval = 1; 10808 goto bailout; 10809 } 10810 10811 /* 10812 * If the LUN is stopped, see if this particular command is allowed 10813 * for a stopped lun. Otherwise, reject it with 0x04,0x02. 10814 */ 10815 if ((lun->flags & CTL_LUN_STOPPED) 10816 && ((entry->flags & CTL_CMD_FLAG_OK_ON_STOPPED) == 0)) { 10817 /* "Logical unit not ready, initializing cmd. required" */ 10818 ctl_set_lun_stopped(ctsio); 10819 retval = 1; 10820 goto bailout; 10821 } 10822 10823 if ((lun->flags & CTL_LUN_INOPERABLE) 10824 && ((entry->flags & CTL_CMD_FLAG_OK_ON_INOPERABLE) == 0)) { 10825 /* "Medium format corrupted" */ 10826 ctl_set_medium_format_corrupted(ctsio); 10827 retval = 1; 10828 goto bailout; 10829 } 10830 10831 bailout: 10832 return (retval); 10833 10834 } 10835 10836 static void 10837 ctl_failover_io(union ctl_io *io, int have_lock) 10838 { 10839 ctl_set_busy(&io->scsiio); 10840 ctl_done(io); 10841 } 10842 10843 #ifdef notyet 10844 static void 10845 ctl_failover(void) 10846 { 10847 struct ctl_lun *lun; 10848 struct ctl_softc *softc; 10849 union ctl_io *next_io, *pending_io; 10850 union ctl_io *io; 10851 int lun_idx; 10852 10853 softc = control_softc; 10854 10855 mtx_lock(&softc->ctl_lock); 10856 /* 10857 * Remove any cmds from the other SC from the rtr queue. These 10858 * will obviously only be for LUNs for which we're the primary. 10859 * We can't send status or get/send data for these commands. 10860 * Since they haven't been executed yet, we can just remove them. 10861 * We'll either abort them or delete them below, depending on 10862 * which HA mode we're in. 10863 */ 10864 #ifdef notyet 10865 mtx_lock(&softc->queue_lock); 10866 for (io = (union ctl_io *)STAILQ_FIRST(&softc->rtr_queue); 10867 io != NULL; io = next_io) { 10868 next_io = (union ctl_io *)STAILQ_NEXT(&io->io_hdr, links); 10869 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) 10870 STAILQ_REMOVE(&softc->rtr_queue, &io->io_hdr, 10871 ctl_io_hdr, links); 10872 } 10873 mtx_unlock(&softc->queue_lock); 10874 #endif 10875 10876 for (lun_idx=0; lun_idx < softc->num_luns; lun_idx++) { 10877 lun = softc->ctl_luns[lun_idx]; 10878 if (lun==NULL) 10879 continue; 10880 10881 /* 10882 * Processor LUNs are primary on both sides. 10883 * XXX will this always be true? 10884 */ 10885 if (lun->be_lun->lun_type == T_PROCESSOR) 10886 continue; 10887 10888 if ((lun->flags & CTL_LUN_PRIMARY_SC) 10889 && (softc->ha_mode == CTL_HA_MODE_SER_ONLY)) { 10890 printf("FAILOVER: primary lun %d\n", lun_idx); 10891 /* 10892 * Remove all commands from the other SC. First from the 10893 * blocked queue then from the ooa queue. Once we have 10894 * removed them. Call ctl_check_blocked to see if there 10895 * is anything that can run. 10896 */ 10897 for (io = (union ctl_io *)TAILQ_FIRST( 10898 &lun->blocked_queue); io != NULL; io = next_io) { 10899 10900 next_io = (union ctl_io *)TAILQ_NEXT( 10901 &io->io_hdr, blocked_links); 10902 10903 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) { 10904 TAILQ_REMOVE(&lun->blocked_queue, 10905 &io->io_hdr,blocked_links); 10906 io->io_hdr.flags &= ~CTL_FLAG_BLOCKED; 10907 TAILQ_REMOVE(&lun->ooa_queue, 10908 &io->io_hdr, ooa_links); 10909 10910 ctl_free_io(io); 10911 } 10912 } 10913 10914 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); 10915 io != NULL; io = next_io) { 10916 10917 next_io = (union ctl_io *)TAILQ_NEXT( 10918 &io->io_hdr, ooa_links); 10919 10920 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) { 10921 10922 TAILQ_REMOVE(&lun->ooa_queue, 10923 &io->io_hdr, 10924 ooa_links); 10925 10926 ctl_free_io(io); 10927 } 10928 } 10929 ctl_check_blocked(lun); 10930 } else if ((lun->flags & CTL_LUN_PRIMARY_SC) 10931 && (softc->ha_mode == CTL_HA_MODE_XFER)) { 10932 10933 printf("FAILOVER: primary lun %d\n", lun_idx); 10934 /* 10935 * Abort all commands from the other SC. We can't 10936 * send status back for them now. These should get 10937 * cleaned up when they are completed or come out 10938 * for a datamove operation. 10939 */ 10940 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); 10941 io != NULL; io = next_io) { 10942 next_io = (union ctl_io *)TAILQ_NEXT( 10943 &io->io_hdr, ooa_links); 10944 10945 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) 10946 io->io_hdr.flags |= CTL_FLAG_ABORT; 10947 } 10948 } else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0) 10949 && (softc->ha_mode == CTL_HA_MODE_XFER)) { 10950 10951 printf("FAILOVER: secondary lun %d\n", lun_idx); 10952 10953 lun->flags |= CTL_LUN_PRIMARY_SC; 10954 10955 /* 10956 * We send all I/O that was sent to this controller 10957 * and redirected to the other side back with 10958 * busy status, and have the initiator retry it. 10959 * Figuring out how much data has been transferred, 10960 * etc. and picking up where we left off would be 10961 * very tricky. 10962 * 10963 * XXX KDM need to remove I/O from the blocked 10964 * queue as well! 10965 */ 10966 for (pending_io = (union ctl_io *)TAILQ_FIRST( 10967 &lun->ooa_queue); pending_io != NULL; 10968 pending_io = next_io) { 10969 10970 next_io = (union ctl_io *)TAILQ_NEXT( 10971 &pending_io->io_hdr, ooa_links); 10972 10973 pending_io->io_hdr.flags &= 10974 ~CTL_FLAG_SENT_2OTHER_SC; 10975 10976 if (pending_io->io_hdr.flags & 10977 CTL_FLAG_IO_ACTIVE) { 10978 pending_io->io_hdr.flags |= 10979 CTL_FLAG_FAILOVER; 10980 } else { 10981 ctl_set_busy(&pending_io->scsiio); 10982 ctl_done(pending_io); 10983 } 10984 } 10985 10986 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 10987 } else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0) 10988 && (softc->ha_mode == CTL_HA_MODE_SER_ONLY)) { 10989 printf("FAILOVER: secondary lun %d\n", lun_idx); 10990 /* 10991 * if the first io on the OOA is not on the RtR queue 10992 * add it. 10993 */ 10994 lun->flags |= CTL_LUN_PRIMARY_SC; 10995 10996 pending_io = (union ctl_io *)TAILQ_FIRST( 10997 &lun->ooa_queue); 10998 if (pending_io==NULL) { 10999 printf("Nothing on OOA queue\n"); 11000 continue; 11001 } 11002 11003 pending_io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 11004 if ((pending_io->io_hdr.flags & 11005 CTL_FLAG_IS_WAS_ON_RTR) == 0) { 11006 pending_io->io_hdr.flags |= 11007 CTL_FLAG_IS_WAS_ON_RTR; 11008 ctl_enqueue_rtr(pending_io); 11009 } 11010 #if 0 11011 else 11012 { 11013 printf("Tag 0x%04x is running\n", 11014 pending_io->scsiio.tag_num); 11015 } 11016 #endif 11017 11018 next_io = (union ctl_io *)TAILQ_NEXT( 11019 &pending_io->io_hdr, ooa_links); 11020 for (pending_io=next_io; pending_io != NULL; 11021 pending_io = next_io) { 11022 pending_io->io_hdr.flags &= 11023 ~CTL_FLAG_SENT_2OTHER_SC; 11024 next_io = (union ctl_io *)TAILQ_NEXT( 11025 &pending_io->io_hdr, ooa_links); 11026 if (pending_io->io_hdr.flags & 11027 CTL_FLAG_IS_WAS_ON_RTR) { 11028 #if 0 11029 printf("Tag 0x%04x is running\n", 11030 pending_io->scsiio.tag_num); 11031 #endif 11032 continue; 11033 } 11034 11035 switch (ctl_check_ooa(lun, pending_io, 11036 (union ctl_io *)TAILQ_PREV( 11037 &pending_io->io_hdr, ctl_ooaq, 11038 ooa_links))) { 11039 11040 case CTL_ACTION_BLOCK: 11041 TAILQ_INSERT_TAIL(&lun->blocked_queue, 11042 &pending_io->io_hdr, 11043 blocked_links); 11044 pending_io->io_hdr.flags |= 11045 CTL_FLAG_BLOCKED; 11046 break; 11047 case CTL_ACTION_PASS: 11048 case CTL_ACTION_SKIP: 11049 pending_io->io_hdr.flags |= 11050 CTL_FLAG_IS_WAS_ON_RTR; 11051 ctl_enqueue_rtr(pending_io); 11052 break; 11053 case CTL_ACTION_OVERLAP: 11054 ctl_set_overlapped_cmd( 11055 (struct ctl_scsiio *)pending_io); 11056 ctl_done(pending_io); 11057 break; 11058 case CTL_ACTION_OVERLAP_TAG: 11059 ctl_set_overlapped_tag( 11060 (struct ctl_scsiio *)pending_io, 11061 pending_io->scsiio.tag_num & 0xff); 11062 ctl_done(pending_io); 11063 break; 11064 case CTL_ACTION_ERROR: 11065 default: 11066 ctl_set_internal_failure( 11067 (struct ctl_scsiio *)pending_io, 11068 0, // sks_valid 11069 0); //retry count 11070 ctl_done(pending_io); 11071 break; 11072 } 11073 } 11074 11075 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 11076 } else { 11077 panic("Unhandled HA mode failover, LUN flags = %#x, " 11078 "ha_mode = #%x", lun->flags, softc->ha_mode); 11079 } 11080 } 11081 ctl_pause_rtr = 0; 11082 mtx_unlock(&softc->ctl_lock); 11083 } 11084 #endif 11085 11086 static void 11087 ctl_clear_ua(struct ctl_softc *ctl_softc, uint32_t initidx, 11088 ctl_ua_type ua_type) 11089 { 11090 struct ctl_lun *lun; 11091 ctl_ua_type *pu; 11092 11093 mtx_assert(&ctl_softc->ctl_lock, MA_OWNED); 11094 11095 STAILQ_FOREACH(lun, &ctl_softc->lun_list, links) { 11096 mtx_lock(&lun->lun_lock); 11097 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 11098 if (pu != NULL) 11099 pu[initidx % CTL_MAX_INIT_PER_PORT] &= ~ua_type; 11100 mtx_unlock(&lun->lun_lock); 11101 } 11102 } 11103 11104 static int 11105 ctl_scsiio_precheck(struct ctl_softc *softc, struct ctl_scsiio *ctsio) 11106 { 11107 struct ctl_lun *lun; 11108 const struct ctl_cmd_entry *entry; 11109 uint32_t initidx, targ_lun; 11110 int retval; 11111 11112 retval = 0; 11113 11114 lun = NULL; 11115 11116 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 11117 if ((targ_lun < CTL_MAX_LUNS) 11118 && ((lun = softc->ctl_luns[targ_lun]) != NULL)) { 11119 /* 11120 * If the LUN is invalid, pretend that it doesn't exist. 11121 * It will go away as soon as all pending I/O has been 11122 * completed. 11123 */ 11124 mtx_lock(&lun->lun_lock); 11125 if (lun->flags & CTL_LUN_DISABLED) { 11126 mtx_unlock(&lun->lun_lock); 11127 lun = NULL; 11128 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL; 11129 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL; 11130 } else { 11131 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun; 11132 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = 11133 lun->be_lun; 11134 if (lun->be_lun->lun_type == T_PROCESSOR) { 11135 ctsio->io_hdr.flags |= CTL_FLAG_CONTROL_DEV; 11136 } 11137 11138 /* 11139 * Every I/O goes into the OOA queue for a 11140 * particular LUN, and stays there until completion. 11141 */ 11142 #ifdef CTL_TIME_IO 11143 if (TAILQ_EMPTY(&lun->ooa_queue)) { 11144 lun->idle_time += getsbinuptime() - 11145 lun->last_busy; 11146 } 11147 #endif 11148 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, 11149 ooa_links); 11150 } 11151 } else { 11152 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL; 11153 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL; 11154 } 11155 11156 /* Get command entry and return error if it is unsuppotyed. */ 11157 entry = ctl_validate_command(ctsio); 11158 if (entry == NULL) { 11159 if (lun) 11160 mtx_unlock(&lun->lun_lock); 11161 return (retval); 11162 } 11163 11164 ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 11165 ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK; 11166 11167 /* 11168 * Check to see whether we can send this command to LUNs that don't 11169 * exist. This should pretty much only be the case for inquiry 11170 * and request sense. Further checks, below, really require having 11171 * a LUN, so we can't really check the command anymore. Just put 11172 * it on the rtr queue. 11173 */ 11174 if (lun == NULL) { 11175 if (entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) { 11176 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11177 ctl_enqueue_rtr((union ctl_io *)ctsio); 11178 return (retval); 11179 } 11180 11181 ctl_set_unsupported_lun(ctsio); 11182 ctl_done((union ctl_io *)ctsio); 11183 CTL_DEBUG_PRINT(("ctl_scsiio_precheck: bailing out due to invalid LUN\n")); 11184 return (retval); 11185 } else { 11186 /* 11187 * Make sure we support this particular command on this LUN. 11188 * e.g., we don't support writes to the control LUN. 11189 */ 11190 if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 11191 mtx_unlock(&lun->lun_lock); 11192 ctl_set_invalid_opcode(ctsio); 11193 ctl_done((union ctl_io *)ctsio); 11194 return (retval); 11195 } 11196 } 11197 11198 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 11199 11200 #ifdef CTL_WITH_CA 11201 /* 11202 * If we've got a request sense, it'll clear the contingent 11203 * allegiance condition. Otherwise, if we have a CA condition for 11204 * this initiator, clear it, because it sent down a command other 11205 * than request sense. 11206 */ 11207 if ((ctsio->cdb[0] != REQUEST_SENSE) 11208 && (ctl_is_set(lun->have_ca, initidx))) 11209 ctl_clear_mask(lun->have_ca, initidx); 11210 #endif 11211 11212 /* 11213 * If the command has this flag set, it handles its own unit 11214 * attention reporting, we shouldn't do anything. Otherwise we 11215 * check for any pending unit attentions, and send them back to the 11216 * initiator. We only do this when a command initially comes in, 11217 * not when we pull it off the blocked queue. 11218 * 11219 * According to SAM-3, section 5.3.2, the order that things get 11220 * presented back to the host is basically unit attentions caused 11221 * by some sort of reset event, busy status, reservation conflicts 11222 * or task set full, and finally any other status. 11223 * 11224 * One issue here is that some of the unit attentions we report 11225 * don't fall into the "reset" category (e.g. "reported luns data 11226 * has changed"). So reporting it here, before the reservation 11227 * check, may be technically wrong. I guess the only thing to do 11228 * would be to check for and report the reset events here, and then 11229 * check for the other unit attention types after we check for a 11230 * reservation conflict. 11231 * 11232 * XXX KDM need to fix this 11233 */ 11234 if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) { 11235 ctl_ua_type ua_type; 11236 scsi_sense_data_type sense_format; 11237 11238 if (lun->flags & CTL_LUN_SENSE_DESC) 11239 sense_format = SSD_TYPE_DESC; 11240 else 11241 sense_format = SSD_TYPE_FIXED; 11242 11243 ua_type = ctl_build_ua(lun, initidx, &ctsio->sense_data, 11244 sense_format); 11245 if (ua_type != CTL_UA_NONE) { 11246 mtx_unlock(&lun->lun_lock); 11247 ctsio->scsi_status = SCSI_STATUS_CHECK_COND; 11248 ctsio->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 11249 ctsio->sense_len = SSD_FULL_SIZE; 11250 ctl_done((union ctl_io *)ctsio); 11251 return (retval); 11252 } 11253 } 11254 11255 11256 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { 11257 mtx_unlock(&lun->lun_lock); 11258 ctl_done((union ctl_io *)ctsio); 11259 return (retval); 11260 } 11261 11262 /* 11263 * XXX CHD this is where we want to send IO to other side if 11264 * this LUN is secondary on this SC. We will need to make a copy 11265 * of the IO and flag the IO on this side as SENT_2OTHER and the flag 11266 * the copy we send as FROM_OTHER. 11267 * We also need to stuff the address of the original IO so we can 11268 * find it easily. Something similar will need be done on the other 11269 * side so when we are done we can find the copy. 11270 */ 11271 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) { 11272 union ctl_ha_msg msg_info; 11273 int isc_retval; 11274 11275 ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; 11276 11277 msg_info.hdr.msg_type = CTL_MSG_SERIALIZE; 11278 msg_info.hdr.original_sc = (union ctl_io *)ctsio; 11279 #if 0 11280 printf("1. ctsio %p\n", ctsio); 11281 #endif 11282 msg_info.hdr.serializing_sc = NULL; 11283 msg_info.hdr.nexus = ctsio->io_hdr.nexus; 11284 msg_info.scsi.tag_num = ctsio->tag_num; 11285 msg_info.scsi.tag_type = ctsio->tag_type; 11286 memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN); 11287 11288 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 11289 11290 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 11291 (void *)&msg_info, sizeof(msg_info), 0)) > 11292 CTL_HA_STATUS_SUCCESS) { 11293 printf("CTL:precheck, ctl_ha_msg_send returned %d\n", 11294 isc_retval); 11295 printf("CTL:opcode is %x\n", ctsio->cdb[0]); 11296 } else { 11297 #if 0 11298 printf("CTL:Precheck sent msg, opcode is %x\n",opcode); 11299 #endif 11300 } 11301 11302 /* 11303 * XXX KDM this I/O is off the incoming queue, but hasn't 11304 * been inserted on any other queue. We may need to come 11305 * up with a holding queue while we wait for serialization 11306 * so that we have an idea of what we're waiting for from 11307 * the other side. 11308 */ 11309 mtx_unlock(&lun->lun_lock); 11310 return (retval); 11311 } 11312 11313 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 11314 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, 11315 ctl_ooaq, ooa_links))) { 11316 case CTL_ACTION_BLOCK: 11317 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 11318 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 11319 blocked_links); 11320 mtx_unlock(&lun->lun_lock); 11321 return (retval); 11322 case CTL_ACTION_PASS: 11323 case CTL_ACTION_SKIP: 11324 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11325 mtx_unlock(&lun->lun_lock); 11326 ctl_enqueue_rtr((union ctl_io *)ctsio); 11327 break; 11328 case CTL_ACTION_OVERLAP: 11329 mtx_unlock(&lun->lun_lock); 11330 ctl_set_overlapped_cmd(ctsio); 11331 ctl_done((union ctl_io *)ctsio); 11332 break; 11333 case CTL_ACTION_OVERLAP_TAG: 11334 mtx_unlock(&lun->lun_lock); 11335 ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff); 11336 ctl_done((union ctl_io *)ctsio); 11337 break; 11338 case CTL_ACTION_ERROR: 11339 default: 11340 mtx_unlock(&lun->lun_lock); 11341 ctl_set_internal_failure(ctsio, 11342 /*sks_valid*/ 0, 11343 /*retry_count*/ 0); 11344 ctl_done((union ctl_io *)ctsio); 11345 break; 11346 } 11347 return (retval); 11348 } 11349 11350 const struct ctl_cmd_entry * 11351 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa) 11352 { 11353 const struct ctl_cmd_entry *entry; 11354 int service_action; 11355 11356 entry = &ctl_cmd_table[ctsio->cdb[0]]; 11357 if (sa) 11358 *sa = ((entry->flags & CTL_CMD_FLAG_SA5) != 0); 11359 if (entry->flags & CTL_CMD_FLAG_SA5) { 11360 service_action = ctsio->cdb[1] & SERVICE_ACTION_MASK; 11361 entry = &((const struct ctl_cmd_entry *) 11362 entry->execute)[service_action]; 11363 } 11364 return (entry); 11365 } 11366 11367 const struct ctl_cmd_entry * 11368 ctl_validate_command(struct ctl_scsiio *ctsio) 11369 { 11370 const struct ctl_cmd_entry *entry; 11371 int i, sa; 11372 uint8_t diff; 11373 11374 entry = ctl_get_cmd_entry(ctsio, &sa); 11375 if (entry->execute == NULL) { 11376 if (sa) 11377 ctl_set_invalid_field(ctsio, 11378 /*sks_valid*/ 1, 11379 /*command*/ 1, 11380 /*field*/ 1, 11381 /*bit_valid*/ 1, 11382 /*bit*/ 4); 11383 else 11384 ctl_set_invalid_opcode(ctsio); 11385 ctl_done((union ctl_io *)ctsio); 11386 return (NULL); 11387 } 11388 KASSERT(entry->length > 0, 11389 ("Not defined length for command 0x%02x/0x%02x", 11390 ctsio->cdb[0], ctsio->cdb[1])); 11391 for (i = 1; i < entry->length; i++) { 11392 diff = ctsio->cdb[i] & ~entry->usage[i - 1]; 11393 if (diff == 0) 11394 continue; 11395 ctl_set_invalid_field(ctsio, 11396 /*sks_valid*/ 1, 11397 /*command*/ 1, 11398 /*field*/ i, 11399 /*bit_valid*/ 1, 11400 /*bit*/ fls(diff) - 1); 11401 ctl_done((union ctl_io *)ctsio); 11402 return (NULL); 11403 } 11404 return (entry); 11405 } 11406 11407 static int 11408 ctl_cmd_applicable(uint8_t lun_type, const struct ctl_cmd_entry *entry) 11409 { 11410 11411 switch (lun_type) { 11412 case T_PROCESSOR: 11413 if (((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0) && 11414 ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) == 0)) 11415 return (0); 11416 break; 11417 case T_DIRECT: 11418 if (((entry->flags & CTL_CMD_FLAG_OK_ON_SLUN) == 0) && 11419 ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) == 0)) 11420 return (0); 11421 break; 11422 default: 11423 return (0); 11424 } 11425 return (1); 11426 } 11427 11428 static int 11429 ctl_scsiio(struct ctl_scsiio *ctsio) 11430 { 11431 int retval; 11432 const struct ctl_cmd_entry *entry; 11433 11434 retval = CTL_RETVAL_COMPLETE; 11435 11436 CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0])); 11437 11438 entry = ctl_get_cmd_entry(ctsio, NULL); 11439 11440 /* 11441 * If this I/O has been aborted, just send it straight to 11442 * ctl_done() without executing it. 11443 */ 11444 if (ctsio->io_hdr.flags & CTL_FLAG_ABORT) { 11445 ctl_done((union ctl_io *)ctsio); 11446 goto bailout; 11447 } 11448 11449 /* 11450 * All the checks should have been handled by ctl_scsiio_precheck(). 11451 * We should be clear now to just execute the I/O. 11452 */ 11453 retval = entry->execute(ctsio); 11454 11455 bailout: 11456 return (retval); 11457 } 11458 11459 /* 11460 * Since we only implement one target right now, a bus reset simply resets 11461 * our single target. 11462 */ 11463 static int 11464 ctl_bus_reset(struct ctl_softc *softc, union ctl_io *io) 11465 { 11466 return(ctl_target_reset(softc, io, CTL_UA_BUS_RESET)); 11467 } 11468 11469 static int 11470 ctl_target_reset(struct ctl_softc *softc, union ctl_io *io, 11471 ctl_ua_type ua_type) 11472 { 11473 struct ctl_lun *lun; 11474 int retval; 11475 11476 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 11477 union ctl_ha_msg msg_info; 11478 11479 io->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; 11480 msg_info.hdr.nexus = io->io_hdr.nexus; 11481 if (ua_type==CTL_UA_TARG_RESET) 11482 msg_info.task.task_action = CTL_TASK_TARGET_RESET; 11483 else 11484 msg_info.task.task_action = CTL_TASK_BUS_RESET; 11485 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11486 msg_info.hdr.original_sc = NULL; 11487 msg_info.hdr.serializing_sc = NULL; 11488 if (CTL_HA_STATUS_SUCCESS != ctl_ha_msg_send(CTL_HA_CHAN_CTL, 11489 (void *)&msg_info, sizeof(msg_info), 0)) { 11490 } 11491 } 11492 retval = 0; 11493 11494 mtx_lock(&softc->ctl_lock); 11495 STAILQ_FOREACH(lun, &softc->lun_list, links) 11496 retval += ctl_lun_reset(lun, io, ua_type); 11497 mtx_unlock(&softc->ctl_lock); 11498 11499 return (retval); 11500 } 11501 11502 /* 11503 * The LUN should always be set. The I/O is optional, and is used to 11504 * distinguish between I/Os sent by this initiator, and by other 11505 * initiators. We set unit attention for initiators other than this one. 11506 * SAM-3 is vague on this point. It does say that a unit attention should 11507 * be established for other initiators when a LUN is reset (see section 11508 * 5.7.3), but it doesn't specifically say that the unit attention should 11509 * be established for this particular initiator when a LUN is reset. Here 11510 * is the relevant text, from SAM-3 rev 8: 11511 * 11512 * 5.7.2 When a SCSI initiator port aborts its own tasks 11513 * 11514 * When a SCSI initiator port causes its own task(s) to be aborted, no 11515 * notification that the task(s) have been aborted shall be returned to 11516 * the SCSI initiator port other than the completion response for the 11517 * command or task management function action that caused the task(s) to 11518 * be aborted and notification(s) associated with related effects of the 11519 * action (e.g., a reset unit attention condition). 11520 * 11521 * XXX KDM for now, we're setting unit attention for all initiators. 11522 */ 11523 static int 11524 ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type) 11525 { 11526 union ctl_io *xio; 11527 #if 0 11528 uint32_t initidx; 11529 #endif 11530 #ifdef CTL_WITH_CA 11531 int i; 11532 #endif 11533 11534 mtx_lock(&lun->lun_lock); 11535 /* 11536 * Run through the OOA queue and abort each I/O. 11537 */ 11538 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11539 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11540 xio->io_hdr.flags |= CTL_FLAG_ABORT | CTL_FLAG_ABORT_STATUS; 11541 } 11542 11543 /* 11544 * This version sets unit attention for every 11545 */ 11546 #if 0 11547 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11548 ctl_est_ua_all(lun, initidx, ua_type); 11549 #else 11550 ctl_est_ua_all(lun, -1, ua_type); 11551 #endif 11552 11553 /* 11554 * A reset (any kind, really) clears reservations established with 11555 * RESERVE/RELEASE. It does not clear reservations established 11556 * with PERSISTENT RESERVE OUT, but we don't support that at the 11557 * moment anyway. See SPC-2, section 5.6. SPC-3 doesn't address 11558 * reservations made with the RESERVE/RELEASE commands, because 11559 * those commands are obsolete in SPC-3. 11560 */ 11561 lun->flags &= ~CTL_LUN_RESERVED; 11562 11563 #ifdef CTL_WITH_CA 11564 for (i = 0; i < CTL_MAX_INITIATORS; i++) 11565 ctl_clear_mask(lun->have_ca, i); 11566 #endif 11567 mtx_unlock(&lun->lun_lock); 11568 11569 return (0); 11570 } 11571 11572 static void 11573 ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id, 11574 int other_sc) 11575 { 11576 union ctl_io *xio; 11577 11578 mtx_assert(&lun->lun_lock, MA_OWNED); 11579 11580 /* 11581 * Run through the OOA queue and attempt to find the given I/O. 11582 * The target port, initiator ID, tag type and tag number have to 11583 * match the values that we got from the initiator. If we have an 11584 * untagged command to abort, simply abort the first untagged command 11585 * we come to. We only allow one untagged command at a time of course. 11586 */ 11587 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11588 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11589 11590 if ((targ_port == UINT32_MAX || 11591 targ_port == xio->io_hdr.nexus.targ_port) && 11592 (init_id == UINT32_MAX || 11593 init_id == xio->io_hdr.nexus.initid.id)) { 11594 if (targ_port != xio->io_hdr.nexus.targ_port || 11595 init_id != xio->io_hdr.nexus.initid.id) 11596 xio->io_hdr.flags |= CTL_FLAG_ABORT_STATUS; 11597 xio->io_hdr.flags |= CTL_FLAG_ABORT; 11598 if (!other_sc && !(lun->flags & CTL_LUN_PRIMARY_SC)) { 11599 union ctl_ha_msg msg_info; 11600 11601 msg_info.hdr.nexus = xio->io_hdr.nexus; 11602 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 11603 msg_info.task.tag_num = xio->scsiio.tag_num; 11604 msg_info.task.tag_type = xio->scsiio.tag_type; 11605 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11606 msg_info.hdr.original_sc = NULL; 11607 msg_info.hdr.serializing_sc = NULL; 11608 ctl_ha_msg_send(CTL_HA_CHAN_CTL, 11609 (void *)&msg_info, sizeof(msg_info), 0); 11610 } 11611 } 11612 } 11613 } 11614 11615 static int 11616 ctl_abort_task_set(union ctl_io *io) 11617 { 11618 struct ctl_softc *softc = control_softc; 11619 struct ctl_lun *lun; 11620 uint32_t targ_lun; 11621 11622 /* 11623 * Look up the LUN. 11624 */ 11625 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11626 mtx_lock(&softc->ctl_lock); 11627 if ((targ_lun < CTL_MAX_LUNS) && (softc->ctl_luns[targ_lun] != NULL)) 11628 lun = softc->ctl_luns[targ_lun]; 11629 else { 11630 mtx_unlock(&softc->ctl_lock); 11631 return (1); 11632 } 11633 11634 mtx_lock(&lun->lun_lock); 11635 mtx_unlock(&softc->ctl_lock); 11636 if (io->taskio.task_action == CTL_TASK_ABORT_TASK_SET) { 11637 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, 11638 io->io_hdr.nexus.initid.id, 11639 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 11640 } else { /* CTL_TASK_CLEAR_TASK_SET */ 11641 ctl_abort_tasks_lun(lun, UINT32_MAX, UINT32_MAX, 11642 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 11643 } 11644 mtx_unlock(&lun->lun_lock); 11645 return (0); 11646 } 11647 11648 static int 11649 ctl_i_t_nexus_reset(union ctl_io *io) 11650 { 11651 struct ctl_softc *softc = control_softc; 11652 struct ctl_lun *lun; 11653 uint32_t initidx, residx; 11654 11655 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11656 residx = ctl_get_resindex(&io->io_hdr.nexus); 11657 mtx_lock(&softc->ctl_lock); 11658 STAILQ_FOREACH(lun, &softc->lun_list, links) { 11659 mtx_lock(&lun->lun_lock); 11660 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, 11661 io->io_hdr.nexus.initid.id, 11662 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 11663 #ifdef CTL_WITH_CA 11664 ctl_clear_mask(lun->have_ca, initidx); 11665 #endif 11666 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx)) 11667 lun->flags &= ~CTL_LUN_RESERVED; 11668 ctl_est_ua(lun, initidx, CTL_UA_I_T_NEXUS_LOSS); 11669 mtx_unlock(&lun->lun_lock); 11670 } 11671 mtx_unlock(&softc->ctl_lock); 11672 return (0); 11673 } 11674 11675 static int 11676 ctl_abort_task(union ctl_io *io) 11677 { 11678 union ctl_io *xio; 11679 struct ctl_lun *lun; 11680 struct ctl_softc *softc; 11681 #if 0 11682 struct sbuf sb; 11683 char printbuf[128]; 11684 #endif 11685 int found; 11686 uint32_t targ_lun; 11687 11688 softc = control_softc; 11689 found = 0; 11690 11691 /* 11692 * Look up the LUN. 11693 */ 11694 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11695 mtx_lock(&softc->ctl_lock); 11696 if ((targ_lun < CTL_MAX_LUNS) 11697 && (softc->ctl_luns[targ_lun] != NULL)) 11698 lun = softc->ctl_luns[targ_lun]; 11699 else { 11700 mtx_unlock(&softc->ctl_lock); 11701 return (1); 11702 } 11703 11704 #if 0 11705 printf("ctl_abort_task: called for lun %lld, tag %d type %d\n", 11706 lun->lun, io->taskio.tag_num, io->taskio.tag_type); 11707 #endif 11708 11709 mtx_lock(&lun->lun_lock); 11710 mtx_unlock(&softc->ctl_lock); 11711 /* 11712 * Run through the OOA queue and attempt to find the given I/O. 11713 * The target port, initiator ID, tag type and tag number have to 11714 * match the values that we got from the initiator. If we have an 11715 * untagged command to abort, simply abort the first untagged command 11716 * we come to. We only allow one untagged command at a time of course. 11717 */ 11718 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11719 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11720 #if 0 11721 sbuf_new(&sb, printbuf, sizeof(printbuf), SBUF_FIXEDLEN); 11722 11723 sbuf_printf(&sb, "LUN %lld tag %d type %d%s%s%s%s: ", 11724 lun->lun, xio->scsiio.tag_num, 11725 xio->scsiio.tag_type, 11726 (xio->io_hdr.blocked_links.tqe_prev 11727 == NULL) ? "" : " BLOCKED", 11728 (xio->io_hdr.flags & 11729 CTL_FLAG_DMA_INPROG) ? " DMA" : "", 11730 (xio->io_hdr.flags & 11731 CTL_FLAG_ABORT) ? " ABORT" : "", 11732 (xio->io_hdr.flags & 11733 CTL_FLAG_IS_WAS_ON_RTR ? " RTR" : "")); 11734 ctl_scsi_command_string(&xio->scsiio, NULL, &sb); 11735 sbuf_finish(&sb); 11736 printf("%s\n", sbuf_data(&sb)); 11737 #endif 11738 11739 if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port) 11740 || (xio->io_hdr.nexus.initid.id != io->io_hdr.nexus.initid.id) 11741 || (xio->io_hdr.flags & CTL_FLAG_ABORT)) 11742 continue; 11743 11744 /* 11745 * If the abort says that the task is untagged, the 11746 * task in the queue must be untagged. Otherwise, 11747 * we just check to see whether the tag numbers 11748 * match. This is because the QLogic firmware 11749 * doesn't pass back the tag type in an abort 11750 * request. 11751 */ 11752 #if 0 11753 if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED) 11754 && (io->taskio.tag_type == CTL_TAG_UNTAGGED)) 11755 || (xio->scsiio.tag_num == io->taskio.tag_num)) 11756 #endif 11757 /* 11758 * XXX KDM we've got problems with FC, because it 11759 * doesn't send down a tag type with aborts. So we 11760 * can only really go by the tag number... 11761 * This may cause problems with parallel SCSI. 11762 * Need to figure that out!! 11763 */ 11764 if (xio->scsiio.tag_num == io->taskio.tag_num) { 11765 xio->io_hdr.flags |= CTL_FLAG_ABORT; 11766 found = 1; 11767 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0 && 11768 !(lun->flags & CTL_LUN_PRIMARY_SC)) { 11769 union ctl_ha_msg msg_info; 11770 11771 io->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; 11772 msg_info.hdr.nexus = io->io_hdr.nexus; 11773 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 11774 msg_info.task.tag_num = io->taskio.tag_num; 11775 msg_info.task.tag_type = io->taskio.tag_type; 11776 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11777 msg_info.hdr.original_sc = NULL; 11778 msg_info.hdr.serializing_sc = NULL; 11779 #if 0 11780 printf("Sent Abort to other side\n"); 11781 #endif 11782 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, 11783 (void *)&msg_info, sizeof(msg_info), 0) != 11784 CTL_HA_STATUS_SUCCESS) { 11785 } 11786 } 11787 #if 0 11788 printf("ctl_abort_task: found I/O to abort\n"); 11789 #endif 11790 } 11791 } 11792 mtx_unlock(&lun->lun_lock); 11793 11794 if (found == 0) { 11795 /* 11796 * This isn't really an error. It's entirely possible for 11797 * the abort and command completion to cross on the wire. 11798 * This is more of an informative/diagnostic error. 11799 */ 11800 #if 0 11801 printf("ctl_abort_task: ABORT sent for nonexistent I/O: " 11802 "%d:%d:%d:%d tag %d type %d\n", 11803 io->io_hdr.nexus.initid.id, 11804 io->io_hdr.nexus.targ_port, 11805 io->io_hdr.nexus.targ_target.id, 11806 io->io_hdr.nexus.targ_lun, io->taskio.tag_num, 11807 io->taskio.tag_type); 11808 #endif 11809 } 11810 return (0); 11811 } 11812 11813 static void 11814 ctl_run_task(union ctl_io *io) 11815 { 11816 struct ctl_softc *softc = control_softc; 11817 int retval = 1; 11818 const char *task_desc; 11819 11820 CTL_DEBUG_PRINT(("ctl_run_task\n")); 11821 11822 KASSERT(io->io_hdr.io_type == CTL_IO_TASK, 11823 ("ctl_run_task: Unextected io_type %d\n", 11824 io->io_hdr.io_type)); 11825 11826 task_desc = ctl_scsi_task_string(&io->taskio); 11827 if (task_desc != NULL) { 11828 #ifdef NEEDTOPORT 11829 csevent_log(CSC_CTL | CSC_SHELF_SW | 11830 CTL_TASK_REPORT, 11831 csevent_LogType_Trace, 11832 csevent_Severity_Information, 11833 csevent_AlertLevel_Green, 11834 csevent_FRU_Firmware, 11835 csevent_FRU_Unknown, 11836 "CTL: received task: %s",task_desc); 11837 #endif 11838 } else { 11839 #ifdef NEEDTOPORT 11840 csevent_log(CSC_CTL | CSC_SHELF_SW | 11841 CTL_TASK_REPORT, 11842 csevent_LogType_Trace, 11843 csevent_Severity_Information, 11844 csevent_AlertLevel_Green, 11845 csevent_FRU_Firmware, 11846 csevent_FRU_Unknown, 11847 "CTL: received unknown task " 11848 "type: %d (%#x)", 11849 io->taskio.task_action, 11850 io->taskio.task_action); 11851 #endif 11852 } 11853 switch (io->taskio.task_action) { 11854 case CTL_TASK_ABORT_TASK: 11855 retval = ctl_abort_task(io); 11856 break; 11857 case CTL_TASK_ABORT_TASK_SET: 11858 case CTL_TASK_CLEAR_TASK_SET: 11859 retval = ctl_abort_task_set(io); 11860 break; 11861 case CTL_TASK_CLEAR_ACA: 11862 break; 11863 case CTL_TASK_I_T_NEXUS_RESET: 11864 retval = ctl_i_t_nexus_reset(io); 11865 break; 11866 case CTL_TASK_LUN_RESET: { 11867 struct ctl_lun *lun; 11868 uint32_t targ_lun; 11869 11870 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11871 mtx_lock(&softc->ctl_lock); 11872 if ((targ_lun < CTL_MAX_LUNS) 11873 && (softc->ctl_luns[targ_lun] != NULL)) 11874 lun = softc->ctl_luns[targ_lun]; 11875 else { 11876 mtx_unlock(&softc->ctl_lock); 11877 retval = 1; 11878 break; 11879 } 11880 11881 if (!(io->io_hdr.flags & 11882 CTL_FLAG_FROM_OTHER_SC)) { 11883 union ctl_ha_msg msg_info; 11884 11885 io->io_hdr.flags |= 11886 CTL_FLAG_SENT_2OTHER_SC; 11887 msg_info.hdr.msg_type = 11888 CTL_MSG_MANAGE_TASKS; 11889 msg_info.hdr.nexus = io->io_hdr.nexus; 11890 msg_info.task.task_action = 11891 CTL_TASK_LUN_RESET; 11892 msg_info.hdr.original_sc = NULL; 11893 msg_info.hdr.serializing_sc = NULL; 11894 if (CTL_HA_STATUS_SUCCESS != 11895 ctl_ha_msg_send(CTL_HA_CHAN_CTL, 11896 (void *)&msg_info, 11897 sizeof(msg_info), 0)) { 11898 } 11899 } 11900 11901 retval = ctl_lun_reset(lun, io, 11902 CTL_UA_LUN_RESET); 11903 mtx_unlock(&softc->ctl_lock); 11904 break; 11905 } 11906 case CTL_TASK_TARGET_RESET: 11907 retval = ctl_target_reset(softc, io, CTL_UA_TARG_RESET); 11908 break; 11909 case CTL_TASK_BUS_RESET: 11910 retval = ctl_bus_reset(softc, io); 11911 break; 11912 case CTL_TASK_PORT_LOGIN: 11913 break; 11914 case CTL_TASK_PORT_LOGOUT: 11915 break; 11916 default: 11917 printf("ctl_run_task: got unknown task management event %d\n", 11918 io->taskio.task_action); 11919 break; 11920 } 11921 if (retval == 0) 11922 io->io_hdr.status = CTL_SUCCESS; 11923 else 11924 io->io_hdr.status = CTL_ERROR; 11925 ctl_done(io); 11926 } 11927 11928 /* 11929 * For HA operation. Handle commands that come in from the other 11930 * controller. 11931 */ 11932 static void 11933 ctl_handle_isc(union ctl_io *io) 11934 { 11935 int free_io; 11936 struct ctl_lun *lun; 11937 struct ctl_softc *softc; 11938 uint32_t targ_lun; 11939 11940 softc = control_softc; 11941 11942 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11943 lun = softc->ctl_luns[targ_lun]; 11944 11945 switch (io->io_hdr.msg_type) { 11946 case CTL_MSG_SERIALIZE: 11947 free_io = ctl_serialize_other_sc_cmd(&io->scsiio); 11948 break; 11949 case CTL_MSG_R2R: { 11950 const struct ctl_cmd_entry *entry; 11951 11952 /* 11953 * This is only used in SER_ONLY mode. 11954 */ 11955 free_io = 0; 11956 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 11957 mtx_lock(&lun->lun_lock); 11958 if (ctl_scsiio_lun_check(lun, 11959 entry, (struct ctl_scsiio *)io) != 0) { 11960 mtx_unlock(&lun->lun_lock); 11961 ctl_done(io); 11962 break; 11963 } 11964 io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11965 mtx_unlock(&lun->lun_lock); 11966 ctl_enqueue_rtr(io); 11967 break; 11968 } 11969 case CTL_MSG_FINISH_IO: 11970 if (softc->ha_mode == CTL_HA_MODE_XFER) { 11971 free_io = 0; 11972 ctl_done(io); 11973 } else { 11974 free_io = 1; 11975 mtx_lock(&lun->lun_lock); 11976 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, 11977 ooa_links); 11978 ctl_check_blocked(lun); 11979 mtx_unlock(&lun->lun_lock); 11980 } 11981 break; 11982 case CTL_MSG_PERS_ACTION: 11983 ctl_hndl_per_res_out_on_other_sc( 11984 (union ctl_ha_msg *)&io->presio.pr_msg); 11985 free_io = 1; 11986 break; 11987 case CTL_MSG_BAD_JUJU: 11988 free_io = 0; 11989 ctl_done(io); 11990 break; 11991 case CTL_MSG_DATAMOVE: 11992 /* Only used in XFER mode */ 11993 free_io = 0; 11994 ctl_datamove_remote(io); 11995 break; 11996 case CTL_MSG_DATAMOVE_DONE: 11997 /* Only used in XFER mode */ 11998 free_io = 0; 11999 io->scsiio.be_move_done(io); 12000 break; 12001 default: 12002 free_io = 1; 12003 printf("%s: Invalid message type %d\n", 12004 __func__, io->io_hdr.msg_type); 12005 break; 12006 } 12007 if (free_io) 12008 ctl_free_io(io); 12009 12010 } 12011 12012 12013 /* 12014 * Returns the match type in the case of a match, or CTL_LUN_PAT_NONE if 12015 * there is no match. 12016 */ 12017 static ctl_lun_error_pattern 12018 ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc) 12019 { 12020 const struct ctl_cmd_entry *entry; 12021 ctl_lun_error_pattern filtered_pattern, pattern; 12022 12023 pattern = desc->error_pattern; 12024 12025 /* 12026 * XXX KDM we need more data passed into this function to match a 12027 * custom pattern, and we actually need to implement custom pattern 12028 * matching. 12029 */ 12030 if (pattern & CTL_LUN_PAT_CMD) 12031 return (CTL_LUN_PAT_CMD); 12032 12033 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY) 12034 return (CTL_LUN_PAT_ANY); 12035 12036 entry = ctl_get_cmd_entry(ctsio, NULL); 12037 12038 filtered_pattern = entry->pattern & pattern; 12039 12040 /* 12041 * If the user requested specific flags in the pattern (e.g. 12042 * CTL_LUN_PAT_RANGE), make sure the command supports all of those 12043 * flags. 12044 * 12045 * If the user did not specify any flags, it doesn't matter whether 12046 * or not the command supports the flags. 12047 */ 12048 if ((filtered_pattern & ~CTL_LUN_PAT_MASK) != 12049 (pattern & ~CTL_LUN_PAT_MASK)) 12050 return (CTL_LUN_PAT_NONE); 12051 12052 /* 12053 * If the user asked for a range check, see if the requested LBA 12054 * range overlaps with this command's LBA range. 12055 */ 12056 if (filtered_pattern & CTL_LUN_PAT_RANGE) { 12057 uint64_t lba1; 12058 uint64_t len1; 12059 ctl_action action; 12060 int retval; 12061 12062 retval = ctl_get_lba_len((union ctl_io *)ctsio, &lba1, &len1); 12063 if (retval != 0) 12064 return (CTL_LUN_PAT_NONE); 12065 12066 action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba, 12067 desc->lba_range.len, FALSE); 12068 /* 12069 * A "pass" means that the LBA ranges don't overlap, so 12070 * this doesn't match the user's range criteria. 12071 */ 12072 if (action == CTL_ACTION_PASS) 12073 return (CTL_LUN_PAT_NONE); 12074 } 12075 12076 return (filtered_pattern); 12077 } 12078 12079 static void 12080 ctl_inject_error(struct ctl_lun *lun, union ctl_io *io) 12081 { 12082 struct ctl_error_desc *desc, *desc2; 12083 12084 mtx_assert(&lun->lun_lock, MA_OWNED); 12085 12086 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 12087 ctl_lun_error_pattern pattern; 12088 /* 12089 * Check to see whether this particular command matches 12090 * the pattern in the descriptor. 12091 */ 12092 pattern = ctl_cmd_pattern_match(&io->scsiio, desc); 12093 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_NONE) 12094 continue; 12095 12096 switch (desc->lun_error & CTL_LUN_INJ_TYPE) { 12097 case CTL_LUN_INJ_ABORTED: 12098 ctl_set_aborted(&io->scsiio); 12099 break; 12100 case CTL_LUN_INJ_MEDIUM_ERR: 12101 ctl_set_medium_error(&io->scsiio); 12102 break; 12103 case CTL_LUN_INJ_UA: 12104 /* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET 12105 * OCCURRED */ 12106 ctl_set_ua(&io->scsiio, 0x29, 0x00); 12107 break; 12108 case CTL_LUN_INJ_CUSTOM: 12109 /* 12110 * We're assuming the user knows what he is doing. 12111 * Just copy the sense information without doing 12112 * checks. 12113 */ 12114 bcopy(&desc->custom_sense, &io->scsiio.sense_data, 12115 MIN(sizeof(desc->custom_sense), 12116 sizeof(io->scsiio.sense_data))); 12117 io->scsiio.scsi_status = SCSI_STATUS_CHECK_COND; 12118 io->scsiio.sense_len = SSD_FULL_SIZE; 12119 io->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 12120 break; 12121 case CTL_LUN_INJ_NONE: 12122 default: 12123 /* 12124 * If this is an error injection type we don't know 12125 * about, clear the continuous flag (if it is set) 12126 * so it will get deleted below. 12127 */ 12128 desc->lun_error &= ~CTL_LUN_INJ_CONTINUOUS; 12129 break; 12130 } 12131 /* 12132 * By default, each error injection action is a one-shot 12133 */ 12134 if (desc->lun_error & CTL_LUN_INJ_CONTINUOUS) 12135 continue; 12136 12137 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links); 12138 12139 free(desc, M_CTL); 12140 } 12141 } 12142 12143 #ifdef CTL_IO_DELAY 12144 static void 12145 ctl_datamove_timer_wakeup(void *arg) 12146 { 12147 union ctl_io *io; 12148 12149 io = (union ctl_io *)arg; 12150 12151 ctl_datamove(io); 12152 } 12153 #endif /* CTL_IO_DELAY */ 12154 12155 void 12156 ctl_datamove(union ctl_io *io) 12157 { 12158 void (*fe_datamove)(union ctl_io *io); 12159 12160 mtx_assert(&control_softc->ctl_lock, MA_NOTOWNED); 12161 12162 CTL_DEBUG_PRINT(("ctl_datamove\n")); 12163 12164 #ifdef CTL_TIME_IO 12165 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 12166 char str[256]; 12167 char path_str[64]; 12168 struct sbuf sb; 12169 12170 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 12171 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12172 12173 sbuf_cat(&sb, path_str); 12174 switch (io->io_hdr.io_type) { 12175 case CTL_IO_SCSI: 12176 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 12177 sbuf_printf(&sb, "\n"); 12178 sbuf_cat(&sb, path_str); 12179 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12180 io->scsiio.tag_num, io->scsiio.tag_type); 12181 break; 12182 case CTL_IO_TASK: 12183 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 12184 "Tag Type: %d\n", io->taskio.task_action, 12185 io->taskio.tag_num, io->taskio.tag_type); 12186 break; 12187 default: 12188 printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 12189 panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 12190 break; 12191 } 12192 sbuf_cat(&sb, path_str); 12193 sbuf_printf(&sb, "ctl_datamove: %jd seconds\n", 12194 (intmax_t)time_uptime - io->io_hdr.start_time); 12195 sbuf_finish(&sb); 12196 printf("%s", sbuf_data(&sb)); 12197 } 12198 #endif /* CTL_TIME_IO */ 12199 12200 #ifdef CTL_IO_DELAY 12201 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 12202 struct ctl_lun *lun; 12203 12204 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 12205 12206 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 12207 } else { 12208 struct ctl_lun *lun; 12209 12210 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 12211 if ((lun != NULL) 12212 && (lun->delay_info.datamove_delay > 0)) { 12213 struct callout *callout; 12214 12215 callout = (struct callout *)&io->io_hdr.timer_bytes; 12216 callout_init(callout, /*mpsafe*/ 1); 12217 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 12218 callout_reset(callout, 12219 lun->delay_info.datamove_delay * hz, 12220 ctl_datamove_timer_wakeup, io); 12221 if (lun->delay_info.datamove_type == 12222 CTL_DELAY_TYPE_ONESHOT) 12223 lun->delay_info.datamove_delay = 0; 12224 return; 12225 } 12226 } 12227 #endif 12228 12229 /* 12230 * This command has been aborted. Set the port status, so we fail 12231 * the data move. 12232 */ 12233 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12234 printf("ctl_datamove: tag 0x%04x on (%ju:%d:%ju:%d) aborted\n", 12235 io->scsiio.tag_num,(uintmax_t)io->io_hdr.nexus.initid.id, 12236 io->io_hdr.nexus.targ_port, 12237 (uintmax_t)io->io_hdr.nexus.targ_target.id, 12238 io->io_hdr.nexus.targ_lun); 12239 io->io_hdr.port_status = 31337; 12240 /* 12241 * Note that the backend, in this case, will get the 12242 * callback in its context. In other cases it may get 12243 * called in the frontend's interrupt thread context. 12244 */ 12245 io->scsiio.be_move_done(io); 12246 return; 12247 } 12248 12249 /* Don't confuse frontend with zero length data move. */ 12250 if (io->scsiio.kern_data_len == 0) { 12251 io->scsiio.be_move_done(io); 12252 return; 12253 } 12254 12255 /* 12256 * If we're in XFER mode and this I/O is from the other shelf 12257 * controller, we need to send the DMA to the other side to 12258 * actually transfer the data to/from the host. In serialize only 12259 * mode the transfer happens below CTL and ctl_datamove() is only 12260 * called on the machine that originally received the I/O. 12261 */ 12262 if ((control_softc->ha_mode == CTL_HA_MODE_XFER) 12263 && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 12264 union ctl_ha_msg msg; 12265 uint32_t sg_entries_sent; 12266 int do_sg_copy; 12267 int i; 12268 12269 memset(&msg, 0, sizeof(msg)); 12270 msg.hdr.msg_type = CTL_MSG_DATAMOVE; 12271 msg.hdr.original_sc = io->io_hdr.original_sc; 12272 msg.hdr.serializing_sc = io; 12273 msg.hdr.nexus = io->io_hdr.nexus; 12274 msg.dt.flags = io->io_hdr.flags; 12275 /* 12276 * We convert everything into a S/G list here. We can't 12277 * pass by reference, only by value between controllers. 12278 * So we can't pass a pointer to the S/G list, only as many 12279 * S/G entries as we can fit in here. If it's possible for 12280 * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries, 12281 * then we need to break this up into multiple transfers. 12282 */ 12283 if (io->scsiio.kern_sg_entries == 0) { 12284 msg.dt.kern_sg_entries = 1; 12285 /* 12286 * If this is in cached memory, flush the cache 12287 * before we send the DMA request to the other 12288 * controller. We want to do this in either the 12289 * read or the write case. The read case is 12290 * straightforward. In the write case, we want to 12291 * make sure nothing is in the local cache that 12292 * could overwrite the DMAed data. 12293 */ 12294 if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) { 12295 /* 12296 * XXX KDM use bus_dmamap_sync() here. 12297 */ 12298 } 12299 12300 /* 12301 * Convert to a physical address if this is a 12302 * virtual address. 12303 */ 12304 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 12305 msg.dt.sg_list[0].addr = 12306 io->scsiio.kern_data_ptr; 12307 } else { 12308 /* 12309 * XXX KDM use busdma here! 12310 */ 12311 #if 0 12312 msg.dt.sg_list[0].addr = (void *) 12313 vtophys(io->scsiio.kern_data_ptr); 12314 #endif 12315 } 12316 12317 msg.dt.sg_list[0].len = io->scsiio.kern_data_len; 12318 do_sg_copy = 0; 12319 } else { 12320 struct ctl_sg_entry *sgl; 12321 12322 do_sg_copy = 1; 12323 msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries; 12324 sgl = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; 12325 if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) { 12326 /* 12327 * XXX KDM use bus_dmamap_sync() here. 12328 */ 12329 } 12330 } 12331 12332 msg.dt.kern_data_len = io->scsiio.kern_data_len; 12333 msg.dt.kern_total_len = io->scsiio.kern_total_len; 12334 msg.dt.kern_data_resid = io->scsiio.kern_data_resid; 12335 msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset; 12336 msg.dt.sg_sequence = 0; 12337 12338 /* 12339 * Loop until we've sent all of the S/G entries. On the 12340 * other end, we'll recompose these S/G entries into one 12341 * contiguous list before passing it to the 12342 */ 12343 for (sg_entries_sent = 0; sg_entries_sent < 12344 msg.dt.kern_sg_entries; msg.dt.sg_sequence++) { 12345 msg.dt.cur_sg_entries = MIN((sizeof(msg.dt.sg_list)/ 12346 sizeof(msg.dt.sg_list[0])), 12347 msg.dt.kern_sg_entries - sg_entries_sent); 12348 12349 if (do_sg_copy != 0) { 12350 struct ctl_sg_entry *sgl; 12351 int j; 12352 12353 sgl = (struct ctl_sg_entry *) 12354 io->scsiio.kern_data_ptr; 12355 /* 12356 * If this is in cached memory, flush the cache 12357 * before we send the DMA request to the other 12358 * controller. We want to do this in either 12359 * the * read or the write case. The read 12360 * case is straightforward. In the write 12361 * case, we want to make sure nothing is 12362 * in the local cache that could overwrite 12363 * the DMAed data. 12364 */ 12365 12366 for (i = sg_entries_sent, j = 0; 12367 i < msg.dt.cur_sg_entries; i++, j++) { 12368 if ((io->io_hdr.flags & 12369 CTL_FLAG_NO_DATASYNC) == 0) { 12370 /* 12371 * XXX KDM use bus_dmamap_sync() 12372 */ 12373 } 12374 if ((io->io_hdr.flags & 12375 CTL_FLAG_BUS_ADDR) == 0) { 12376 /* 12377 * XXX KDM use busdma. 12378 */ 12379 #if 0 12380 msg.dt.sg_list[j].addr =(void *) 12381 vtophys(sgl[i].addr); 12382 #endif 12383 } else { 12384 msg.dt.sg_list[j].addr = 12385 sgl[i].addr; 12386 } 12387 msg.dt.sg_list[j].len = sgl[i].len; 12388 } 12389 } 12390 12391 sg_entries_sent += msg.dt.cur_sg_entries; 12392 if (sg_entries_sent >= msg.dt.kern_sg_entries) 12393 msg.dt.sg_last = 1; 12394 else 12395 msg.dt.sg_last = 0; 12396 12397 /* 12398 * XXX KDM drop and reacquire the lock here? 12399 */ 12400 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 12401 sizeof(msg), 0) > CTL_HA_STATUS_SUCCESS) { 12402 /* 12403 * XXX do something here. 12404 */ 12405 } 12406 12407 msg.dt.sent_sg_entries = sg_entries_sent; 12408 } 12409 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12410 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) 12411 ctl_failover_io(io, /*have_lock*/ 0); 12412 12413 } else { 12414 12415 /* 12416 * Lookup the fe_datamove() function for this particular 12417 * front end. 12418 */ 12419 fe_datamove = 12420 control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove; 12421 12422 fe_datamove(io); 12423 } 12424 } 12425 12426 static void 12427 ctl_send_datamove_done(union ctl_io *io, int have_lock) 12428 { 12429 union ctl_ha_msg msg; 12430 int isc_status; 12431 12432 memset(&msg, 0, sizeof(msg)); 12433 12434 msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 12435 msg.hdr.original_sc = io; 12436 msg.hdr.serializing_sc = io->io_hdr.serializing_sc; 12437 msg.hdr.nexus = io->io_hdr.nexus; 12438 msg.hdr.status = io->io_hdr.status; 12439 msg.scsi.tag_num = io->scsiio.tag_num; 12440 msg.scsi.tag_type = io->scsiio.tag_type; 12441 msg.scsi.scsi_status = io->scsiio.scsi_status; 12442 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 12443 sizeof(io->scsiio.sense_data)); 12444 msg.scsi.sense_len = io->scsiio.sense_len; 12445 msg.scsi.sense_residual = io->scsiio.sense_residual; 12446 msg.scsi.fetd_status = io->io_hdr.port_status; 12447 msg.scsi.residual = io->scsiio.residual; 12448 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12449 12450 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12451 ctl_failover_io(io, /*have_lock*/ have_lock); 12452 return; 12453 } 12454 12455 isc_status = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0); 12456 if (isc_status > CTL_HA_STATUS_SUCCESS) { 12457 /* XXX do something if this fails */ 12458 } 12459 12460 } 12461 12462 /* 12463 * The DMA to the remote side is done, now we need to tell the other side 12464 * we're done so it can continue with its data movement. 12465 */ 12466 static void 12467 ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq) 12468 { 12469 union ctl_io *io; 12470 12471 io = rq->context; 12472 12473 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12474 printf("%s: ISC DMA write failed with error %d", __func__, 12475 rq->ret); 12476 ctl_set_internal_failure(&io->scsiio, 12477 /*sks_valid*/ 1, 12478 /*retry_count*/ rq->ret); 12479 } 12480 12481 ctl_dt_req_free(rq); 12482 12483 /* 12484 * In this case, we had to malloc the memory locally. Free it. 12485 */ 12486 if ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0) { 12487 int i; 12488 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12489 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12490 } 12491 /* 12492 * The data is in local and remote memory, so now we need to send 12493 * status (good or back) back to the other side. 12494 */ 12495 ctl_send_datamove_done(io, /*have_lock*/ 0); 12496 } 12497 12498 /* 12499 * We've moved the data from the host/controller into local memory. Now we 12500 * need to push it over to the remote controller's memory. 12501 */ 12502 static int 12503 ctl_datamove_remote_dm_write_cb(union ctl_io *io) 12504 { 12505 int retval; 12506 12507 retval = 0; 12508 12509 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_WRITE, 12510 ctl_datamove_remote_write_cb); 12511 12512 return (retval); 12513 } 12514 12515 static void 12516 ctl_datamove_remote_write(union ctl_io *io) 12517 { 12518 int retval; 12519 void (*fe_datamove)(union ctl_io *io); 12520 12521 /* 12522 * - Get the data from the host/HBA into local memory. 12523 * - DMA memory from the local controller to the remote controller. 12524 * - Send status back to the remote controller. 12525 */ 12526 12527 retval = ctl_datamove_remote_sgl_setup(io); 12528 if (retval != 0) 12529 return; 12530 12531 /* Switch the pointer over so the FETD knows what to do */ 12532 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 12533 12534 /* 12535 * Use a custom move done callback, since we need to send completion 12536 * back to the other controller, not to the backend on this side. 12537 */ 12538 io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb; 12539 12540 fe_datamove = control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove; 12541 12542 fe_datamove(io); 12543 12544 return; 12545 12546 } 12547 12548 static int 12549 ctl_datamove_remote_dm_read_cb(union ctl_io *io) 12550 { 12551 #if 0 12552 char str[256]; 12553 char path_str[64]; 12554 struct sbuf sb; 12555 #endif 12556 12557 /* 12558 * In this case, we had to malloc the memory locally. Free it. 12559 */ 12560 if ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0) { 12561 int i; 12562 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12563 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12564 } 12565 12566 #if 0 12567 scsi_path_string(io, path_str, sizeof(path_str)); 12568 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12569 sbuf_cat(&sb, path_str); 12570 scsi_command_string(&io->scsiio, NULL, &sb); 12571 sbuf_printf(&sb, "\n"); 12572 sbuf_cat(&sb, path_str); 12573 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12574 io->scsiio.tag_num, io->scsiio.tag_type); 12575 sbuf_cat(&sb, path_str); 12576 sbuf_printf(&sb, "%s: flags %#x, status %#x\n", __func__, 12577 io->io_hdr.flags, io->io_hdr.status); 12578 sbuf_finish(&sb); 12579 printk("%s", sbuf_data(&sb)); 12580 #endif 12581 12582 12583 /* 12584 * The read is done, now we need to send status (good or bad) back 12585 * to the other side. 12586 */ 12587 ctl_send_datamove_done(io, /*have_lock*/ 0); 12588 12589 return (0); 12590 } 12591 12592 static void 12593 ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq) 12594 { 12595 union ctl_io *io; 12596 void (*fe_datamove)(union ctl_io *io); 12597 12598 io = rq->context; 12599 12600 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12601 printf("%s: ISC DMA read failed with error %d", __func__, 12602 rq->ret); 12603 ctl_set_internal_failure(&io->scsiio, 12604 /*sks_valid*/ 1, 12605 /*retry_count*/ rq->ret); 12606 } 12607 12608 ctl_dt_req_free(rq); 12609 12610 /* Switch the pointer over so the FETD knows what to do */ 12611 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 12612 12613 /* 12614 * Use a custom move done callback, since we need to send completion 12615 * back to the other controller, not to the backend on this side. 12616 */ 12617 io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb; 12618 12619 /* XXX KDM add checks like the ones in ctl_datamove? */ 12620 12621 fe_datamove = control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove; 12622 12623 fe_datamove(io); 12624 } 12625 12626 static int 12627 ctl_datamove_remote_sgl_setup(union ctl_io *io) 12628 { 12629 struct ctl_sg_entry *local_sglist, *remote_sglist; 12630 struct ctl_sg_entry *local_dma_sglist, *remote_dma_sglist; 12631 struct ctl_softc *softc; 12632 int retval; 12633 int i; 12634 12635 retval = 0; 12636 softc = control_softc; 12637 12638 local_sglist = io->io_hdr.local_sglist; 12639 local_dma_sglist = io->io_hdr.local_dma_sglist; 12640 remote_sglist = io->io_hdr.remote_sglist; 12641 remote_dma_sglist = io->io_hdr.remote_dma_sglist; 12642 12643 if (io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) { 12644 for (i = 0; i < io->scsiio.kern_sg_entries; i++) { 12645 local_sglist[i].len = remote_sglist[i].len; 12646 12647 /* 12648 * XXX Detect the situation where the RS-level I/O 12649 * redirector on the other side has already read the 12650 * data off of the AOR RS on this side, and 12651 * transferred it to remote (mirror) memory on the 12652 * other side. Since we already have the data in 12653 * memory here, we just need to use it. 12654 * 12655 * XXX KDM this can probably be removed once we 12656 * get the cache device code in and take the 12657 * current AOR implementation out. 12658 */ 12659 #ifdef NEEDTOPORT 12660 if ((remote_sglist[i].addr >= 12661 (void *)vtophys(softc->mirr->addr)) 12662 && (remote_sglist[i].addr < 12663 ((void *)vtophys(softc->mirr->addr) + 12664 CacheMirrorOffset))) { 12665 local_sglist[i].addr = remote_sglist[i].addr - 12666 CacheMirrorOffset; 12667 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 12668 CTL_FLAG_DATA_IN) 12669 io->io_hdr.flags |= CTL_FLAG_REDIR_DONE; 12670 } else { 12671 local_sglist[i].addr = remote_sglist[i].addr + 12672 CacheMirrorOffset; 12673 } 12674 #endif 12675 #if 0 12676 printf("%s: local %p, remote %p, len %d\n", 12677 __func__, local_sglist[i].addr, 12678 remote_sglist[i].addr, local_sglist[i].len); 12679 #endif 12680 } 12681 } else { 12682 uint32_t len_to_go; 12683 12684 /* 12685 * In this case, we don't have automatically allocated 12686 * memory for this I/O on this controller. This typically 12687 * happens with internal CTL I/O -- e.g. inquiry, mode 12688 * sense, etc. Anything coming from RAIDCore will have 12689 * a mirror area available. 12690 */ 12691 len_to_go = io->scsiio.kern_data_len; 12692 12693 /* 12694 * Clear the no datasync flag, we have to use malloced 12695 * buffers. 12696 */ 12697 io->io_hdr.flags &= ~CTL_FLAG_NO_DATASYNC; 12698 12699 /* 12700 * The difficult thing here is that the size of the various 12701 * S/G segments may be different than the size from the 12702 * remote controller. That'll make it harder when DMAing 12703 * the data back to the other side. 12704 */ 12705 for (i = 0; (i < sizeof(io->io_hdr.remote_sglist) / 12706 sizeof(io->io_hdr.remote_sglist[0])) && 12707 (len_to_go > 0); i++) { 12708 local_sglist[i].len = MIN(len_to_go, 131072); 12709 CTL_SIZE_8B(local_dma_sglist[i].len, 12710 local_sglist[i].len); 12711 local_sglist[i].addr = 12712 malloc(local_dma_sglist[i].len, M_CTL,M_WAITOK); 12713 12714 local_dma_sglist[i].addr = local_sglist[i].addr; 12715 12716 if (local_sglist[i].addr == NULL) { 12717 int j; 12718 12719 printf("malloc failed for %zd bytes!", 12720 local_dma_sglist[i].len); 12721 for (j = 0; j < i; j++) { 12722 free(local_sglist[j].addr, M_CTL); 12723 } 12724 ctl_set_internal_failure(&io->scsiio, 12725 /*sks_valid*/ 1, 12726 /*retry_count*/ 4857); 12727 retval = 1; 12728 goto bailout_error; 12729 12730 } 12731 /* XXX KDM do we need a sync here? */ 12732 12733 len_to_go -= local_sglist[i].len; 12734 } 12735 /* 12736 * Reset the number of S/G entries accordingly. The 12737 * original number of S/G entries is available in 12738 * rem_sg_entries. 12739 */ 12740 io->scsiio.kern_sg_entries = i; 12741 12742 #if 0 12743 printf("%s: kern_sg_entries = %d\n", __func__, 12744 io->scsiio.kern_sg_entries); 12745 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12746 printf("%s: sg[%d] = %p, %d (DMA: %d)\n", __func__, i, 12747 local_sglist[i].addr, local_sglist[i].len, 12748 local_dma_sglist[i].len); 12749 #endif 12750 } 12751 12752 12753 return (retval); 12754 12755 bailout_error: 12756 12757 ctl_send_datamove_done(io, /*have_lock*/ 0); 12758 12759 return (retval); 12760 } 12761 12762 static int 12763 ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 12764 ctl_ha_dt_cb callback) 12765 { 12766 struct ctl_ha_dt_req *rq; 12767 struct ctl_sg_entry *remote_sglist, *local_sglist; 12768 struct ctl_sg_entry *remote_dma_sglist, *local_dma_sglist; 12769 uint32_t local_used, remote_used, total_used; 12770 int retval; 12771 int i, j; 12772 12773 retval = 0; 12774 12775 rq = ctl_dt_req_alloc(); 12776 12777 /* 12778 * If we failed to allocate the request, and if the DMA didn't fail 12779 * anyway, set busy status. This is just a resource allocation 12780 * failure. 12781 */ 12782 if ((rq == NULL) 12783 && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)) 12784 ctl_set_busy(&io->scsiio); 12785 12786 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) { 12787 12788 if (rq != NULL) 12789 ctl_dt_req_free(rq); 12790 12791 /* 12792 * The data move failed. We need to return status back 12793 * to the other controller. No point in trying to DMA 12794 * data to the remote controller. 12795 */ 12796 12797 ctl_send_datamove_done(io, /*have_lock*/ 0); 12798 12799 retval = 1; 12800 12801 goto bailout; 12802 } 12803 12804 local_sglist = io->io_hdr.local_sglist; 12805 local_dma_sglist = io->io_hdr.local_dma_sglist; 12806 remote_sglist = io->io_hdr.remote_sglist; 12807 remote_dma_sglist = io->io_hdr.remote_dma_sglist; 12808 local_used = 0; 12809 remote_used = 0; 12810 total_used = 0; 12811 12812 if (io->io_hdr.flags & CTL_FLAG_REDIR_DONE) { 12813 rq->ret = CTL_HA_STATUS_SUCCESS; 12814 rq->context = io; 12815 callback(rq); 12816 goto bailout; 12817 } 12818 12819 /* 12820 * Pull/push the data over the wire from/to the other controller. 12821 * This takes into account the possibility that the local and 12822 * remote sglists may not be identical in terms of the size of 12823 * the elements and the number of elements. 12824 * 12825 * One fundamental assumption here is that the length allocated for 12826 * both the local and remote sglists is identical. Otherwise, we've 12827 * essentially got a coding error of some sort. 12828 */ 12829 for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) { 12830 int isc_ret; 12831 uint32_t cur_len, dma_length; 12832 uint8_t *tmp_ptr; 12833 12834 rq->id = CTL_HA_DATA_CTL; 12835 rq->command = command; 12836 rq->context = io; 12837 12838 /* 12839 * Both pointers should be aligned. But it is possible 12840 * that the allocation length is not. They should both 12841 * also have enough slack left over at the end, though, 12842 * to round up to the next 8 byte boundary. 12843 */ 12844 cur_len = MIN(local_sglist[i].len - local_used, 12845 remote_sglist[j].len - remote_used); 12846 12847 /* 12848 * In this case, we have a size issue and need to decrease 12849 * the size, except in the case where we actually have less 12850 * than 8 bytes left. In that case, we need to increase 12851 * the DMA length to get the last bit. 12852 */ 12853 if ((cur_len & 0x7) != 0) { 12854 if (cur_len > 0x7) { 12855 cur_len = cur_len - (cur_len & 0x7); 12856 dma_length = cur_len; 12857 } else { 12858 CTL_SIZE_8B(dma_length, cur_len); 12859 } 12860 12861 } else 12862 dma_length = cur_len; 12863 12864 /* 12865 * If we had to allocate memory for this I/O, instead of using 12866 * the non-cached mirror memory, we'll need to flush the cache 12867 * before trying to DMA to the other controller. 12868 * 12869 * We could end up doing this multiple times for the same 12870 * segment if we have a larger local segment than remote 12871 * segment. That shouldn't be an issue. 12872 */ 12873 if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) { 12874 /* 12875 * XXX KDM use bus_dmamap_sync() here. 12876 */ 12877 } 12878 12879 rq->size = dma_length; 12880 12881 tmp_ptr = (uint8_t *)local_sglist[i].addr; 12882 tmp_ptr += local_used; 12883 12884 /* Use physical addresses when talking to ISC hardware */ 12885 if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) { 12886 /* XXX KDM use busdma */ 12887 #if 0 12888 rq->local = vtophys(tmp_ptr); 12889 #endif 12890 } else 12891 rq->local = tmp_ptr; 12892 12893 tmp_ptr = (uint8_t *)remote_sglist[j].addr; 12894 tmp_ptr += remote_used; 12895 rq->remote = tmp_ptr; 12896 12897 rq->callback = NULL; 12898 12899 local_used += cur_len; 12900 if (local_used >= local_sglist[i].len) { 12901 i++; 12902 local_used = 0; 12903 } 12904 12905 remote_used += cur_len; 12906 if (remote_used >= remote_sglist[j].len) { 12907 j++; 12908 remote_used = 0; 12909 } 12910 total_used += cur_len; 12911 12912 if (total_used >= io->scsiio.kern_data_len) 12913 rq->callback = callback; 12914 12915 if ((rq->size & 0x7) != 0) { 12916 printf("%s: warning: size %d is not on 8b boundary\n", 12917 __func__, rq->size); 12918 } 12919 if (((uintptr_t)rq->local & 0x7) != 0) { 12920 printf("%s: warning: local %p not on 8b boundary\n", 12921 __func__, rq->local); 12922 } 12923 if (((uintptr_t)rq->remote & 0x7) != 0) { 12924 printf("%s: warning: remote %p not on 8b boundary\n", 12925 __func__, rq->local); 12926 } 12927 #if 0 12928 printf("%s: %s: local %#x remote %#x size %d\n", __func__, 12929 (command == CTL_HA_DT_CMD_WRITE) ? "WRITE" : "READ", 12930 rq->local, rq->remote, rq->size); 12931 #endif 12932 12933 isc_ret = ctl_dt_single(rq); 12934 if (isc_ret == CTL_HA_STATUS_WAIT) 12935 continue; 12936 12937 if (isc_ret == CTL_HA_STATUS_DISCONNECT) { 12938 rq->ret = CTL_HA_STATUS_SUCCESS; 12939 } else { 12940 rq->ret = isc_ret; 12941 } 12942 callback(rq); 12943 goto bailout; 12944 } 12945 12946 bailout: 12947 return (retval); 12948 12949 } 12950 12951 static void 12952 ctl_datamove_remote_read(union ctl_io *io) 12953 { 12954 int retval; 12955 int i; 12956 12957 /* 12958 * This will send an error to the other controller in the case of a 12959 * failure. 12960 */ 12961 retval = ctl_datamove_remote_sgl_setup(io); 12962 if (retval != 0) 12963 return; 12964 12965 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ, 12966 ctl_datamove_remote_read_cb); 12967 if ((retval != 0) 12968 && ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0)) { 12969 /* 12970 * Make sure we free memory if there was an error.. The 12971 * ctl_datamove_remote_xfer() function will send the 12972 * datamove done message, or call the callback with an 12973 * error if there is a problem. 12974 */ 12975 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12976 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12977 } 12978 12979 return; 12980 } 12981 12982 /* 12983 * Process a datamove request from the other controller. This is used for 12984 * XFER mode only, not SER_ONLY mode. For writes, we DMA into local memory 12985 * first. Once that is complete, the data gets DMAed into the remote 12986 * controller's memory. For reads, we DMA from the remote controller's 12987 * memory into our memory first, and then move it out to the FETD. 12988 */ 12989 static void 12990 ctl_datamove_remote(union ctl_io *io) 12991 { 12992 struct ctl_softc *softc; 12993 12994 softc = control_softc; 12995 12996 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 12997 12998 /* 12999 * Note that we look for an aborted I/O here, but don't do some of 13000 * the other checks that ctl_datamove() normally does. 13001 * We don't need to run the datamove delay code, since that should 13002 * have been done if need be on the other controller. 13003 */ 13004 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 13005 printf("%s: tag 0x%04x on (%d:%d:%d:%d) aborted\n", __func__, 13006 io->scsiio.tag_num, io->io_hdr.nexus.initid.id, 13007 io->io_hdr.nexus.targ_port, 13008 io->io_hdr.nexus.targ_target.id, 13009 io->io_hdr.nexus.targ_lun); 13010 io->io_hdr.port_status = 31338; 13011 ctl_send_datamove_done(io, /*have_lock*/ 0); 13012 return; 13013 } 13014 13015 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) { 13016 ctl_datamove_remote_write(io); 13017 } else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN){ 13018 ctl_datamove_remote_read(io); 13019 } else { 13020 union ctl_ha_msg msg; 13021 struct scsi_sense_data *sense; 13022 uint8_t sks[3]; 13023 int retry_count; 13024 13025 memset(&msg, 0, sizeof(msg)); 13026 13027 msg.hdr.msg_type = CTL_MSG_BAD_JUJU; 13028 msg.hdr.status = CTL_SCSI_ERROR; 13029 msg.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 13030 13031 retry_count = 4243; 13032 13033 sense = &msg.scsi.sense_data; 13034 sks[0] = SSD_SCS_VALID; 13035 sks[1] = (retry_count >> 8) & 0xff; 13036 sks[2] = retry_count & 0xff; 13037 13038 /* "Internal target failure" */ 13039 scsi_set_sense_data(sense, 13040 /*sense_format*/ SSD_TYPE_NONE, 13041 /*current_error*/ 1, 13042 /*sense_key*/ SSD_KEY_HARDWARE_ERROR, 13043 /*asc*/ 0x44, 13044 /*ascq*/ 0x00, 13045 /*type*/ SSD_ELEM_SKS, 13046 /*size*/ sizeof(sks), 13047 /*data*/ sks, 13048 SSD_ELEM_NONE); 13049 13050 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 13051 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 13052 ctl_failover_io(io, /*have_lock*/ 1); 13053 return; 13054 } 13055 13056 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0) > 13057 CTL_HA_STATUS_SUCCESS) { 13058 /* XXX KDM what to do if this fails? */ 13059 } 13060 return; 13061 } 13062 13063 } 13064 13065 static int 13066 ctl_process_done(union ctl_io *io) 13067 { 13068 struct ctl_lun *lun; 13069 struct ctl_softc *softc = control_softc; 13070 void (*fe_done)(union ctl_io *io); 13071 uint32_t targ_port = ctl_port_idx(io->io_hdr.nexus.targ_port); 13072 13073 CTL_DEBUG_PRINT(("ctl_process_done\n")); 13074 13075 fe_done = softc->ctl_ports[targ_port]->fe_done; 13076 13077 #ifdef CTL_TIME_IO 13078 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 13079 char str[256]; 13080 char path_str[64]; 13081 struct sbuf sb; 13082 13083 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 13084 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 13085 13086 sbuf_cat(&sb, path_str); 13087 switch (io->io_hdr.io_type) { 13088 case CTL_IO_SCSI: 13089 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 13090 sbuf_printf(&sb, "\n"); 13091 sbuf_cat(&sb, path_str); 13092 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 13093 io->scsiio.tag_num, io->scsiio.tag_type); 13094 break; 13095 case CTL_IO_TASK: 13096 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 13097 "Tag Type: %d\n", io->taskio.task_action, 13098 io->taskio.tag_num, io->taskio.tag_type); 13099 break; 13100 default: 13101 printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 13102 panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 13103 break; 13104 } 13105 sbuf_cat(&sb, path_str); 13106 sbuf_printf(&sb, "ctl_process_done: %jd seconds\n", 13107 (intmax_t)time_uptime - io->io_hdr.start_time); 13108 sbuf_finish(&sb); 13109 printf("%s", sbuf_data(&sb)); 13110 } 13111 #endif /* CTL_TIME_IO */ 13112 13113 switch (io->io_hdr.io_type) { 13114 case CTL_IO_SCSI: 13115 break; 13116 case CTL_IO_TASK: 13117 if (ctl_debug & CTL_DEBUG_INFO) 13118 ctl_io_error_print(io, NULL); 13119 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) 13120 ctl_free_io(io); 13121 else 13122 fe_done(io); 13123 return (CTL_RETVAL_COMPLETE); 13124 default: 13125 panic("ctl_process_done: invalid io type %d\n", 13126 io->io_hdr.io_type); 13127 break; /* NOTREACHED */ 13128 } 13129 13130 lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13131 if (lun == NULL) { 13132 CTL_DEBUG_PRINT(("NULL LUN for lun %d\n", 13133 io->io_hdr.nexus.targ_mapped_lun)); 13134 goto bailout; 13135 } 13136 13137 mtx_lock(&lun->lun_lock); 13138 13139 /* 13140 * Check to see if we have any errors to inject here. We only 13141 * inject errors for commands that don't already have errors set. 13142 */ 13143 if ((STAILQ_FIRST(&lun->error_list) != NULL) && 13144 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) && 13145 ((io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0)) 13146 ctl_inject_error(lun, io); 13147 13148 /* 13149 * XXX KDM how do we treat commands that aren't completed 13150 * successfully? 13151 * 13152 * XXX KDM should we also track I/O latency? 13153 */ 13154 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS && 13155 io->io_hdr.io_type == CTL_IO_SCSI) { 13156 #ifdef CTL_TIME_IO 13157 struct bintime cur_bt; 13158 #endif 13159 int type; 13160 13161 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13162 CTL_FLAG_DATA_IN) 13163 type = CTL_STATS_READ; 13164 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13165 CTL_FLAG_DATA_OUT) 13166 type = CTL_STATS_WRITE; 13167 else 13168 type = CTL_STATS_NO_IO; 13169 13170 lun->stats.ports[targ_port].bytes[type] += 13171 io->scsiio.kern_total_len; 13172 lun->stats.ports[targ_port].operations[type]++; 13173 #ifdef CTL_TIME_IO 13174 bintime_add(&lun->stats.ports[targ_port].dma_time[type], 13175 &io->io_hdr.dma_bt); 13176 lun->stats.ports[targ_port].num_dmas[type] += 13177 io->io_hdr.num_dmas; 13178 getbintime(&cur_bt); 13179 bintime_sub(&cur_bt, &io->io_hdr.start_bt); 13180 bintime_add(&lun->stats.ports[targ_port].time[type], &cur_bt); 13181 #endif 13182 } 13183 13184 /* 13185 * Remove this from the OOA queue. 13186 */ 13187 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); 13188 #ifdef CTL_TIME_IO 13189 if (TAILQ_EMPTY(&lun->ooa_queue)) 13190 lun->last_busy = getsbinuptime(); 13191 #endif 13192 13193 /* 13194 * Run through the blocked queue on this LUN and see if anything 13195 * has become unblocked, now that this transaction is done. 13196 */ 13197 ctl_check_blocked(lun); 13198 13199 /* 13200 * If the LUN has been invalidated, free it if there is nothing 13201 * left on its OOA queue. 13202 */ 13203 if ((lun->flags & CTL_LUN_INVALID) 13204 && TAILQ_EMPTY(&lun->ooa_queue)) { 13205 mtx_unlock(&lun->lun_lock); 13206 mtx_lock(&softc->ctl_lock); 13207 ctl_free_lun(lun); 13208 mtx_unlock(&softc->ctl_lock); 13209 } else 13210 mtx_unlock(&lun->lun_lock); 13211 13212 bailout: 13213 13214 /* 13215 * If this command has been aborted, make sure we set the status 13216 * properly. The FETD is responsible for freeing the I/O and doing 13217 * whatever it needs to do to clean up its state. 13218 */ 13219 if (io->io_hdr.flags & CTL_FLAG_ABORT) 13220 ctl_set_task_aborted(&io->scsiio); 13221 13222 /* 13223 * If enabled, print command error status. 13224 */ 13225 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS && 13226 (ctl_debug & CTL_DEBUG_INFO) != 0) 13227 ctl_io_error_print(io, NULL); 13228 13229 /* 13230 * Tell the FETD or the other shelf controller we're done with this 13231 * command. Note that only SCSI commands get to this point. Task 13232 * management commands are completed above. 13233 * 13234 * We only send status to the other controller if we're in XFER 13235 * mode. In SER_ONLY mode, the I/O is done on the controller that 13236 * received the I/O (from CTL's perspective), and so the status is 13237 * generated there. 13238 * 13239 * XXX KDM if we hold the lock here, we could cause a deadlock 13240 * if the frontend comes back in in this context to queue 13241 * something. 13242 */ 13243 if ((softc->ha_mode == CTL_HA_MODE_XFER) 13244 && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 13245 union ctl_ha_msg msg; 13246 13247 memset(&msg, 0, sizeof(msg)); 13248 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 13249 msg.hdr.original_sc = io->io_hdr.original_sc; 13250 msg.hdr.nexus = io->io_hdr.nexus; 13251 msg.hdr.status = io->io_hdr.status; 13252 msg.scsi.scsi_status = io->scsiio.scsi_status; 13253 msg.scsi.tag_num = io->scsiio.tag_num; 13254 msg.scsi.tag_type = io->scsiio.tag_type; 13255 msg.scsi.sense_len = io->scsiio.sense_len; 13256 msg.scsi.sense_residual = io->scsiio.sense_residual; 13257 msg.scsi.residual = io->scsiio.residual; 13258 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 13259 sizeof(io->scsiio.sense_data)); 13260 /* 13261 * We copy this whether or not this is an I/O-related 13262 * command. Otherwise, we'd have to go and check to see 13263 * whether it's a read/write command, and it really isn't 13264 * worth it. 13265 */ 13266 memcpy(&msg.scsi.lbalen, 13267 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 13268 sizeof(msg.scsi.lbalen)); 13269 13270 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13271 sizeof(msg), 0) > CTL_HA_STATUS_SUCCESS) { 13272 /* XXX do something here */ 13273 } 13274 13275 ctl_free_io(io); 13276 } else 13277 fe_done(io); 13278 13279 return (CTL_RETVAL_COMPLETE); 13280 } 13281 13282 #ifdef CTL_WITH_CA 13283 /* 13284 * Front end should call this if it doesn't do autosense. When the request 13285 * sense comes back in from the initiator, we'll dequeue this and send it. 13286 */ 13287 int 13288 ctl_queue_sense(union ctl_io *io) 13289 { 13290 struct ctl_lun *lun; 13291 struct ctl_port *port; 13292 struct ctl_softc *softc; 13293 uint32_t initidx, targ_lun; 13294 13295 softc = control_softc; 13296 13297 CTL_DEBUG_PRINT(("ctl_queue_sense\n")); 13298 13299 /* 13300 * LUN lookup will likely move to the ctl_work_thread() once we 13301 * have our new queueing infrastructure (that doesn't put things on 13302 * a per-LUN queue initially). That is so that we can handle 13303 * things like an INQUIRY to a LUN that we don't have enabled. We 13304 * can't deal with that right now. 13305 */ 13306 mtx_lock(&softc->ctl_lock); 13307 13308 /* 13309 * If we don't have a LUN for this, just toss the sense 13310 * information. 13311 */ 13312 port = ctl_io_port(&ctsio->io_hdr); 13313 targ_lun = ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); 13314 if ((targ_lun < CTL_MAX_LUNS) 13315 && (softc->ctl_luns[targ_lun] != NULL)) 13316 lun = softc->ctl_luns[targ_lun]; 13317 else 13318 goto bailout; 13319 13320 initidx = ctl_get_initindex(&io->io_hdr.nexus); 13321 13322 mtx_lock(&lun->lun_lock); 13323 /* 13324 * Already have CA set for this LUN...toss the sense information. 13325 */ 13326 if (ctl_is_set(lun->have_ca, initidx)) { 13327 mtx_unlock(&lun->lun_lock); 13328 goto bailout; 13329 } 13330 13331 memcpy(&lun->pending_sense[initidx], &io->scsiio.sense_data, 13332 MIN(sizeof(lun->pending_sense[initidx]), 13333 sizeof(io->scsiio.sense_data))); 13334 ctl_set_mask(lun->have_ca, initidx); 13335 mtx_unlock(&lun->lun_lock); 13336 13337 bailout: 13338 mtx_unlock(&softc->ctl_lock); 13339 13340 ctl_free_io(io); 13341 13342 return (CTL_RETVAL_COMPLETE); 13343 } 13344 #endif 13345 13346 /* 13347 * Primary command inlet from frontend ports. All SCSI and task I/O 13348 * requests must go through this function. 13349 */ 13350 int 13351 ctl_queue(union ctl_io *io) 13352 { 13353 struct ctl_port *port; 13354 13355 CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0])); 13356 13357 #ifdef CTL_TIME_IO 13358 io->io_hdr.start_time = time_uptime; 13359 getbintime(&io->io_hdr.start_bt); 13360 #endif /* CTL_TIME_IO */ 13361 13362 /* Map FE-specific LUN ID into global one. */ 13363 port = ctl_io_port(&io->io_hdr); 13364 io->io_hdr.nexus.targ_mapped_lun = 13365 ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); 13366 13367 switch (io->io_hdr.io_type) { 13368 case CTL_IO_SCSI: 13369 case CTL_IO_TASK: 13370 if (ctl_debug & CTL_DEBUG_CDB) 13371 ctl_io_print(io); 13372 ctl_enqueue_incoming(io); 13373 break; 13374 default: 13375 printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type); 13376 return (EINVAL); 13377 } 13378 13379 return (CTL_RETVAL_COMPLETE); 13380 } 13381 13382 #ifdef CTL_IO_DELAY 13383 static void 13384 ctl_done_timer_wakeup(void *arg) 13385 { 13386 union ctl_io *io; 13387 13388 io = (union ctl_io *)arg; 13389 ctl_done(io); 13390 } 13391 #endif /* CTL_IO_DELAY */ 13392 13393 void 13394 ctl_done(union ctl_io *io) 13395 { 13396 13397 /* 13398 * Enable this to catch duplicate completion issues. 13399 */ 13400 #if 0 13401 if (io->io_hdr.flags & CTL_FLAG_ALREADY_DONE) { 13402 printf("%s: type %d msg %d cdb %x iptl: " 13403 "%d:%d:%d:%d tag 0x%04x " 13404 "flag %#x status %x\n", 13405 __func__, 13406 io->io_hdr.io_type, 13407 io->io_hdr.msg_type, 13408 io->scsiio.cdb[0], 13409 io->io_hdr.nexus.initid.id, 13410 io->io_hdr.nexus.targ_port, 13411 io->io_hdr.nexus.targ_target.id, 13412 io->io_hdr.nexus.targ_lun, 13413 (io->io_hdr.io_type == 13414 CTL_IO_TASK) ? 13415 io->taskio.tag_num : 13416 io->scsiio.tag_num, 13417 io->io_hdr.flags, 13418 io->io_hdr.status); 13419 } else 13420 io->io_hdr.flags |= CTL_FLAG_ALREADY_DONE; 13421 #endif 13422 13423 /* 13424 * This is an internal copy of an I/O, and should not go through 13425 * the normal done processing logic. 13426 */ 13427 if (io->io_hdr.flags & CTL_FLAG_INT_COPY) 13428 return; 13429 13430 /* 13431 * We need to send a msg to the serializing shelf to finish the IO 13432 * as well. We don't send a finish message to the other shelf if 13433 * this is a task management command. Task management commands 13434 * aren't serialized in the OOA queue, but rather just executed on 13435 * both shelf controllers for commands that originated on that 13436 * controller. 13437 */ 13438 if ((io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC) 13439 && (io->io_hdr.io_type != CTL_IO_TASK)) { 13440 union ctl_ha_msg msg_io; 13441 13442 msg_io.hdr.msg_type = CTL_MSG_FINISH_IO; 13443 msg_io.hdr.serializing_sc = io->io_hdr.serializing_sc; 13444 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_io, 13445 sizeof(msg_io), 0 ) != CTL_HA_STATUS_SUCCESS) { 13446 } 13447 /* continue on to finish IO */ 13448 } 13449 #ifdef CTL_IO_DELAY 13450 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 13451 struct ctl_lun *lun; 13452 13453 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13454 13455 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 13456 } else { 13457 struct ctl_lun *lun; 13458 13459 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13460 13461 if ((lun != NULL) 13462 && (lun->delay_info.done_delay > 0)) { 13463 struct callout *callout; 13464 13465 callout = (struct callout *)&io->io_hdr.timer_bytes; 13466 callout_init(callout, /*mpsafe*/ 1); 13467 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 13468 callout_reset(callout, 13469 lun->delay_info.done_delay * hz, 13470 ctl_done_timer_wakeup, io); 13471 if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT) 13472 lun->delay_info.done_delay = 0; 13473 return; 13474 } 13475 } 13476 #endif /* CTL_IO_DELAY */ 13477 13478 ctl_enqueue_done(io); 13479 } 13480 13481 int 13482 ctl_isc(struct ctl_scsiio *ctsio) 13483 { 13484 struct ctl_lun *lun; 13485 int retval; 13486 13487 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13488 13489 CTL_DEBUG_PRINT(("ctl_isc: command: %02x\n", ctsio->cdb[0])); 13490 13491 CTL_DEBUG_PRINT(("ctl_isc: calling data_submit()\n")); 13492 13493 retval = lun->backend->data_submit((union ctl_io *)ctsio); 13494 13495 return (retval); 13496 } 13497 13498 13499 static void 13500 ctl_work_thread(void *arg) 13501 { 13502 struct ctl_thread *thr = (struct ctl_thread *)arg; 13503 struct ctl_softc *softc = thr->ctl_softc; 13504 union ctl_io *io; 13505 int retval; 13506 13507 CTL_DEBUG_PRINT(("ctl_work_thread starting\n")); 13508 13509 for (;;) { 13510 retval = 0; 13511 13512 /* 13513 * We handle the queues in this order: 13514 * - ISC 13515 * - done queue (to free up resources, unblock other commands) 13516 * - RtR queue 13517 * - incoming queue 13518 * 13519 * If those queues are empty, we break out of the loop and 13520 * go to sleep. 13521 */ 13522 mtx_lock(&thr->queue_lock); 13523 io = (union ctl_io *)STAILQ_FIRST(&thr->isc_queue); 13524 if (io != NULL) { 13525 STAILQ_REMOVE_HEAD(&thr->isc_queue, links); 13526 mtx_unlock(&thr->queue_lock); 13527 ctl_handle_isc(io); 13528 continue; 13529 } 13530 io = (union ctl_io *)STAILQ_FIRST(&thr->done_queue); 13531 if (io != NULL) { 13532 STAILQ_REMOVE_HEAD(&thr->done_queue, links); 13533 /* clear any blocked commands, call fe_done */ 13534 mtx_unlock(&thr->queue_lock); 13535 retval = ctl_process_done(io); 13536 continue; 13537 } 13538 io = (union ctl_io *)STAILQ_FIRST(&thr->incoming_queue); 13539 if (io != NULL) { 13540 STAILQ_REMOVE_HEAD(&thr->incoming_queue, links); 13541 mtx_unlock(&thr->queue_lock); 13542 if (io->io_hdr.io_type == CTL_IO_TASK) 13543 ctl_run_task(io); 13544 else 13545 ctl_scsiio_precheck(softc, &io->scsiio); 13546 continue; 13547 } 13548 if (!ctl_pause_rtr) { 13549 io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue); 13550 if (io != NULL) { 13551 STAILQ_REMOVE_HEAD(&thr->rtr_queue, links); 13552 mtx_unlock(&thr->queue_lock); 13553 retval = ctl_scsiio(&io->scsiio); 13554 if (retval != CTL_RETVAL_COMPLETE) 13555 CTL_DEBUG_PRINT(("ctl_scsiio failed\n")); 13556 continue; 13557 } 13558 } 13559 13560 /* Sleep until we have something to do. */ 13561 mtx_sleep(thr, &thr->queue_lock, PDROP | PRIBIO, "-", 0); 13562 } 13563 } 13564 13565 static void 13566 ctl_lun_thread(void *arg) 13567 { 13568 struct ctl_softc *softc = (struct ctl_softc *)arg; 13569 struct ctl_be_lun *be_lun; 13570 int retval; 13571 13572 CTL_DEBUG_PRINT(("ctl_lun_thread starting\n")); 13573 13574 for (;;) { 13575 retval = 0; 13576 mtx_lock(&softc->ctl_lock); 13577 be_lun = STAILQ_FIRST(&softc->pending_lun_queue); 13578 if (be_lun != NULL) { 13579 STAILQ_REMOVE_HEAD(&softc->pending_lun_queue, links); 13580 mtx_unlock(&softc->ctl_lock); 13581 ctl_create_lun(be_lun); 13582 continue; 13583 } 13584 13585 /* Sleep until we have something to do. */ 13586 mtx_sleep(&softc->pending_lun_queue, &softc->ctl_lock, 13587 PDROP | PRIBIO, "-", 0); 13588 } 13589 } 13590 13591 static void 13592 ctl_thresh_thread(void *arg) 13593 { 13594 struct ctl_softc *softc = (struct ctl_softc *)arg; 13595 struct ctl_lun *lun; 13596 struct ctl_be_lun *be_lun; 13597 struct scsi_da_rw_recovery_page *rwpage; 13598 struct ctl_logical_block_provisioning_page *page; 13599 const char *attr; 13600 uint64_t thres, val; 13601 int i, e; 13602 13603 CTL_DEBUG_PRINT(("ctl_thresh_thread starting\n")); 13604 13605 for (;;) { 13606 mtx_lock(&softc->ctl_lock); 13607 STAILQ_FOREACH(lun, &softc->lun_list, links) { 13608 be_lun = lun->be_lun; 13609 if ((lun->flags & CTL_LUN_DISABLED) || 13610 (lun->flags & CTL_LUN_OFFLINE) || 13611 lun->backend->lun_attr == NULL) 13612 continue; 13613 rwpage = &lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT]; 13614 if ((rwpage->byte8 & SMS_RWER_LBPERE) == 0) 13615 continue; 13616 e = 0; 13617 page = &lun->mode_pages.lbp_page[CTL_PAGE_CURRENT]; 13618 for (i = 0; i < CTL_NUM_LBP_THRESH; i++) { 13619 if ((page->descr[i].flags & SLBPPD_ENABLED) == 0) 13620 continue; 13621 thres = scsi_4btoul(page->descr[i].count); 13622 thres <<= CTL_LBP_EXPONENT; 13623 switch (page->descr[i].resource) { 13624 case 0x01: 13625 attr = "blocksavail"; 13626 break; 13627 case 0x02: 13628 attr = "blocksused"; 13629 break; 13630 case 0xf1: 13631 attr = "poolblocksavail"; 13632 break; 13633 case 0xf2: 13634 attr = "poolblocksused"; 13635 break; 13636 default: 13637 continue; 13638 } 13639 mtx_unlock(&softc->ctl_lock); // XXX 13640 val = lun->backend->lun_attr( 13641 lun->be_lun->be_lun, attr); 13642 mtx_lock(&softc->ctl_lock); 13643 if (val == UINT64_MAX) 13644 continue; 13645 if ((page->descr[i].flags & SLBPPD_ARMING_MASK) 13646 == SLBPPD_ARMING_INC) 13647 e |= (val >= thres); 13648 else 13649 e |= (val <= thres); 13650 } 13651 mtx_lock(&lun->lun_lock); 13652 if (e) { 13653 if (lun->lasttpt == 0 || 13654 time_uptime - lun->lasttpt >= CTL_LBP_UA_PERIOD) { 13655 lun->lasttpt = time_uptime; 13656 ctl_est_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 13657 } 13658 } else { 13659 lun->lasttpt = 0; 13660 ctl_clr_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 13661 } 13662 mtx_unlock(&lun->lun_lock); 13663 } 13664 mtx_unlock(&softc->ctl_lock); 13665 pause("-", CTL_LBP_PERIOD * hz); 13666 } 13667 } 13668 13669 static void 13670 ctl_enqueue_incoming(union ctl_io *io) 13671 { 13672 struct ctl_softc *softc = control_softc; 13673 struct ctl_thread *thr; 13674 u_int idx; 13675 13676 idx = (io->io_hdr.nexus.targ_port * 127 + 13677 io->io_hdr.nexus.initid.id) % worker_threads; 13678 thr = &softc->threads[idx]; 13679 mtx_lock(&thr->queue_lock); 13680 STAILQ_INSERT_TAIL(&thr->incoming_queue, &io->io_hdr, links); 13681 mtx_unlock(&thr->queue_lock); 13682 wakeup(thr); 13683 } 13684 13685 static void 13686 ctl_enqueue_rtr(union ctl_io *io) 13687 { 13688 struct ctl_softc *softc = control_softc; 13689 struct ctl_thread *thr; 13690 13691 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13692 mtx_lock(&thr->queue_lock); 13693 STAILQ_INSERT_TAIL(&thr->rtr_queue, &io->io_hdr, links); 13694 mtx_unlock(&thr->queue_lock); 13695 wakeup(thr); 13696 } 13697 13698 static void 13699 ctl_enqueue_done(union ctl_io *io) 13700 { 13701 struct ctl_softc *softc = control_softc; 13702 struct ctl_thread *thr; 13703 13704 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13705 mtx_lock(&thr->queue_lock); 13706 STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links); 13707 mtx_unlock(&thr->queue_lock); 13708 wakeup(thr); 13709 } 13710 13711 #ifdef notyet 13712 static void 13713 ctl_enqueue_isc(union ctl_io *io) 13714 { 13715 struct ctl_softc *softc = control_softc; 13716 struct ctl_thread *thr; 13717 13718 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13719 mtx_lock(&thr->queue_lock); 13720 STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links); 13721 mtx_unlock(&thr->queue_lock); 13722 wakeup(thr); 13723 } 13724 13725 /* Initialization and failover */ 13726 13727 void 13728 ctl_init_isc_msg(void) 13729 { 13730 printf("CTL: Still calling this thing\n"); 13731 } 13732 13733 /* 13734 * Init component 13735 * Initializes component into configuration defined by bootMode 13736 * (see hasc-sv.c) 13737 * returns hasc_Status: 13738 * OK 13739 * ERROR - fatal error 13740 */ 13741 static ctl_ha_comp_status 13742 ctl_isc_init(struct ctl_ha_component *c) 13743 { 13744 ctl_ha_comp_status ret = CTL_HA_COMP_STATUS_OK; 13745 13746 c->status = ret; 13747 return ret; 13748 } 13749 13750 /* Start component 13751 * Starts component in state requested. If component starts successfully, 13752 * it must set its own state to the requestrd state 13753 * When requested state is HASC_STATE_HA, the component may refine it 13754 * by adding _SLAVE or _MASTER flags. 13755 * Currently allowed state transitions are: 13756 * UNKNOWN->HA - initial startup 13757 * UNKNOWN->SINGLE - initial startup when no parter detected 13758 * HA->SINGLE - failover 13759 * returns ctl_ha_comp_status: 13760 * OK - component successfully started in requested state 13761 * FAILED - could not start the requested state, failover may 13762 * be possible 13763 * ERROR - fatal error detected, no future startup possible 13764 */ 13765 static ctl_ha_comp_status 13766 ctl_isc_start(struct ctl_ha_component *c, ctl_ha_state state) 13767 { 13768 ctl_ha_comp_status ret = CTL_HA_COMP_STATUS_OK; 13769 13770 printf("%s: go\n", __func__); 13771 13772 // UNKNOWN->HA or UNKNOWN->SINGLE (bootstrap) 13773 if (c->state == CTL_HA_STATE_UNKNOWN ) { 13774 control_softc->is_single = 0; 13775 if (ctl_ha_msg_create(CTL_HA_CHAN_CTL, ctl_isc_event_handler) 13776 != CTL_HA_STATUS_SUCCESS) { 13777 printf("ctl_isc_start: ctl_ha_msg_create failed.\n"); 13778 ret = CTL_HA_COMP_STATUS_ERROR; 13779 } 13780 } else if (CTL_HA_STATE_IS_HA(c->state) 13781 && CTL_HA_STATE_IS_SINGLE(state)){ 13782 // HA->SINGLE transition 13783 ctl_failover(); 13784 control_softc->is_single = 1; 13785 } else { 13786 printf("ctl_isc_start:Invalid state transition %X->%X\n", 13787 c->state, state); 13788 ret = CTL_HA_COMP_STATUS_ERROR; 13789 } 13790 if (CTL_HA_STATE_IS_SINGLE(state)) 13791 control_softc->is_single = 1; 13792 13793 c->state = state; 13794 c->status = ret; 13795 return ret; 13796 } 13797 13798 /* 13799 * Quiesce component 13800 * The component must clear any error conditions (set status to OK) and 13801 * prepare itself to another Start call 13802 * returns ctl_ha_comp_status: 13803 * OK 13804 * ERROR 13805 */ 13806 static ctl_ha_comp_status 13807 ctl_isc_quiesce(struct ctl_ha_component *c) 13808 { 13809 int ret = CTL_HA_COMP_STATUS_OK; 13810 13811 ctl_pause_rtr = 1; 13812 c->status = ret; 13813 return ret; 13814 } 13815 13816 struct ctl_ha_component ctl_ha_component_ctlisc = 13817 { 13818 .name = "CTL ISC", 13819 .state = CTL_HA_STATE_UNKNOWN, 13820 .init = ctl_isc_init, 13821 .start = ctl_isc_start, 13822 .quiesce = ctl_isc_quiesce 13823 }; 13824 #endif 13825 13826 /* 13827 * vim: ts=8 13828 */ 13829