1 /*- 2 * Copyright (c) 2003-2009 Silicon Graphics International Corp. 3 * Copyright (c) 2012 The FreeBSD Foundation 4 * Copyright (c) 2015 Alexander Motin <mav@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Portions of this software were developed by Edward Tomasz Napierala 8 * under sponsorship from the FreeBSD Foundation. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions, and the following disclaimer, 15 * without modification. 16 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 17 * substantially similar to the "NO WARRANTY" disclaimer below 18 * ("Disclaimer") and any redistribution must be conditioned upon 19 * including a substantially similar Disclaimer requirement for further 20 * binary redistribution. 21 * 22 * NO WARRANTY 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 27 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 32 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 * POSSIBILITY OF SUCH DAMAGES. 34 * 35 * $Id$ 36 */ 37 /* 38 * CAM Target Layer, a SCSI device emulation subsystem. 39 * 40 * Author: Ken Merry <ken@FreeBSD.org> 41 */ 42 43 #define _CTL_C 44 45 #include <sys/cdefs.h> 46 __FBSDID("$FreeBSD$"); 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/ctype.h> 51 #include <sys/kernel.h> 52 #include <sys/types.h> 53 #include <sys/kthread.h> 54 #include <sys/bio.h> 55 #include <sys/fcntl.h> 56 #include <sys/lock.h> 57 #include <sys/module.h> 58 #include <sys/mutex.h> 59 #include <sys/condvar.h> 60 #include <sys/malloc.h> 61 #include <sys/conf.h> 62 #include <sys/ioccom.h> 63 #include <sys/queue.h> 64 #include <sys/sbuf.h> 65 #include <sys/smp.h> 66 #include <sys/endian.h> 67 #include <sys/sysctl.h> 68 #include <vm/uma.h> 69 70 #include <cam/cam.h> 71 #include <cam/scsi/scsi_all.h> 72 #include <cam/scsi/scsi_da.h> 73 #include <cam/ctl/ctl_io.h> 74 #include <cam/ctl/ctl.h> 75 #include <cam/ctl/ctl_frontend.h> 76 #include <cam/ctl/ctl_util.h> 77 #include <cam/ctl/ctl_backend.h> 78 #include <cam/ctl/ctl_ioctl.h> 79 #include <cam/ctl/ctl_ha.h> 80 #include <cam/ctl/ctl_private.h> 81 #include <cam/ctl/ctl_debug.h> 82 #include <cam/ctl/ctl_scsi_all.h> 83 #include <cam/ctl/ctl_error.h> 84 85 struct ctl_softc *control_softc = NULL; 86 87 /* 88 * Template mode pages. 89 */ 90 91 /* 92 * Note that these are default values only. The actual values will be 93 * filled in when the user does a mode sense. 94 */ 95 const static struct copan_debugconf_subpage debugconf_page_default = { 96 DBGCNF_PAGE_CODE | SMPH_SPF, /* page_code */ 97 DBGCNF_SUBPAGE_CODE, /* subpage */ 98 {(sizeof(struct copan_debugconf_subpage) - 4) >> 8, 99 (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */ 100 DBGCNF_VERSION, /* page_version */ 101 {CTL_TIME_IO_DEFAULT_SECS>>8, 102 CTL_TIME_IO_DEFAULT_SECS>>0}, /* ctl_time_io_secs */ 103 }; 104 105 const static struct copan_debugconf_subpage debugconf_page_changeable = { 106 DBGCNF_PAGE_CODE | SMPH_SPF, /* page_code */ 107 DBGCNF_SUBPAGE_CODE, /* subpage */ 108 {(sizeof(struct copan_debugconf_subpage) - 4) >> 8, 109 (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */ 110 0, /* page_version */ 111 {0xff,0xff}, /* ctl_time_io_secs */ 112 }; 113 114 const static struct scsi_da_rw_recovery_page rw_er_page_default = { 115 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 116 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 117 /*byte3*/SMS_RWER_AWRE|SMS_RWER_ARRE, 118 /*read_retry_count*/0, 119 /*correction_span*/0, 120 /*head_offset_count*/0, 121 /*data_strobe_offset_cnt*/0, 122 /*byte8*/SMS_RWER_LBPERE, 123 /*write_retry_count*/0, 124 /*reserved2*/0, 125 /*recovery_time_limit*/{0, 0}, 126 }; 127 128 const static struct scsi_da_rw_recovery_page rw_er_page_changeable = { 129 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 130 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 131 /*byte3*/0, 132 /*read_retry_count*/0, 133 /*correction_span*/0, 134 /*head_offset_count*/0, 135 /*data_strobe_offset_cnt*/0, 136 /*byte8*/0, 137 /*write_retry_count*/0, 138 /*reserved2*/0, 139 /*recovery_time_limit*/{0, 0}, 140 }; 141 142 const static struct scsi_format_page format_page_default = { 143 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 144 /*page_length*/sizeof(struct scsi_format_page) - 2, 145 /*tracks_per_zone*/ {0, 0}, 146 /*alt_sectors_per_zone*/ {0, 0}, 147 /*alt_tracks_per_zone*/ {0, 0}, 148 /*alt_tracks_per_lun*/ {0, 0}, 149 /*sectors_per_track*/ {(CTL_DEFAULT_SECTORS_PER_TRACK >> 8) & 0xff, 150 CTL_DEFAULT_SECTORS_PER_TRACK & 0xff}, 151 /*bytes_per_sector*/ {0, 0}, 152 /*interleave*/ {0, 0}, 153 /*track_skew*/ {0, 0}, 154 /*cylinder_skew*/ {0, 0}, 155 /*flags*/ SFP_HSEC, 156 /*reserved*/ {0, 0, 0} 157 }; 158 159 const static struct scsi_format_page format_page_changeable = { 160 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 161 /*page_length*/sizeof(struct scsi_format_page) - 2, 162 /*tracks_per_zone*/ {0, 0}, 163 /*alt_sectors_per_zone*/ {0, 0}, 164 /*alt_tracks_per_zone*/ {0, 0}, 165 /*alt_tracks_per_lun*/ {0, 0}, 166 /*sectors_per_track*/ {0, 0}, 167 /*bytes_per_sector*/ {0, 0}, 168 /*interleave*/ {0, 0}, 169 /*track_skew*/ {0, 0}, 170 /*cylinder_skew*/ {0, 0}, 171 /*flags*/ 0, 172 /*reserved*/ {0, 0, 0} 173 }; 174 175 const static struct scsi_rigid_disk_page rigid_disk_page_default = { 176 /*page_code*/SMS_RIGID_DISK_PAGE, 177 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 178 /*cylinders*/ {0, 0, 0}, 179 /*heads*/ CTL_DEFAULT_HEADS, 180 /*start_write_precomp*/ {0, 0, 0}, 181 /*start_reduced_current*/ {0, 0, 0}, 182 /*step_rate*/ {0, 0}, 183 /*landing_zone_cylinder*/ {0, 0, 0}, 184 /*rpl*/ SRDP_RPL_DISABLED, 185 /*rotational_offset*/ 0, 186 /*reserved1*/ 0, 187 /*rotation_rate*/ {(CTL_DEFAULT_ROTATION_RATE >> 8) & 0xff, 188 CTL_DEFAULT_ROTATION_RATE & 0xff}, 189 /*reserved2*/ {0, 0} 190 }; 191 192 const static struct scsi_rigid_disk_page rigid_disk_page_changeable = { 193 /*page_code*/SMS_RIGID_DISK_PAGE, 194 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 195 /*cylinders*/ {0, 0, 0}, 196 /*heads*/ 0, 197 /*start_write_precomp*/ {0, 0, 0}, 198 /*start_reduced_current*/ {0, 0, 0}, 199 /*step_rate*/ {0, 0}, 200 /*landing_zone_cylinder*/ {0, 0, 0}, 201 /*rpl*/ 0, 202 /*rotational_offset*/ 0, 203 /*reserved1*/ 0, 204 /*rotation_rate*/ {0, 0}, 205 /*reserved2*/ {0, 0} 206 }; 207 208 const static struct scsi_caching_page caching_page_default = { 209 /*page_code*/SMS_CACHING_PAGE, 210 /*page_length*/sizeof(struct scsi_caching_page) - 2, 211 /*flags1*/ SCP_DISC | SCP_WCE, 212 /*ret_priority*/ 0, 213 /*disable_pf_transfer_len*/ {0xff, 0xff}, 214 /*min_prefetch*/ {0, 0}, 215 /*max_prefetch*/ {0xff, 0xff}, 216 /*max_pf_ceiling*/ {0xff, 0xff}, 217 /*flags2*/ 0, 218 /*cache_segments*/ 0, 219 /*cache_seg_size*/ {0, 0}, 220 /*reserved*/ 0, 221 /*non_cache_seg_size*/ {0, 0, 0} 222 }; 223 224 const static struct scsi_caching_page caching_page_changeable = { 225 /*page_code*/SMS_CACHING_PAGE, 226 /*page_length*/sizeof(struct scsi_caching_page) - 2, 227 /*flags1*/ SCP_WCE | SCP_RCD, 228 /*ret_priority*/ 0, 229 /*disable_pf_transfer_len*/ {0, 0}, 230 /*min_prefetch*/ {0, 0}, 231 /*max_prefetch*/ {0, 0}, 232 /*max_pf_ceiling*/ {0, 0}, 233 /*flags2*/ 0, 234 /*cache_segments*/ 0, 235 /*cache_seg_size*/ {0, 0}, 236 /*reserved*/ 0, 237 /*non_cache_seg_size*/ {0, 0, 0} 238 }; 239 240 const static struct scsi_control_page control_page_default = { 241 /*page_code*/SMS_CONTROL_MODE_PAGE, 242 /*page_length*/sizeof(struct scsi_control_page) - 2, 243 /*rlec*/0, 244 /*queue_flags*/SCP_QUEUE_ALG_RESTRICTED, 245 /*eca_and_aen*/0, 246 /*flags4*/SCP_TAS, 247 /*aen_holdoff_period*/{0, 0}, 248 /*busy_timeout_period*/{0, 0}, 249 /*extended_selftest_completion_time*/{0, 0} 250 }; 251 252 const static struct scsi_control_page control_page_changeable = { 253 /*page_code*/SMS_CONTROL_MODE_PAGE, 254 /*page_length*/sizeof(struct scsi_control_page) - 2, 255 /*rlec*/SCP_DSENSE, 256 /*queue_flags*/SCP_QUEUE_ALG_MASK, 257 /*eca_and_aen*/SCP_SWP, 258 /*flags4*/0, 259 /*aen_holdoff_period*/{0, 0}, 260 /*busy_timeout_period*/{0, 0}, 261 /*extended_selftest_completion_time*/{0, 0} 262 }; 263 264 const static struct scsi_info_exceptions_page ie_page_default = { 265 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 266 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 267 /*info_flags*/SIEP_FLAGS_DEXCPT, 268 /*mrie*/0, 269 /*interval_timer*/{0, 0, 0, 0}, 270 /*report_count*/{0, 0, 0, 0} 271 }; 272 273 const static struct scsi_info_exceptions_page ie_page_changeable = { 274 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 275 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 276 /*info_flags*/0, 277 /*mrie*/0, 278 /*interval_timer*/{0, 0, 0, 0}, 279 /*report_count*/{0, 0, 0, 0} 280 }; 281 282 #define CTL_LBPM_LEN (sizeof(struct ctl_logical_block_provisioning_page) - 4) 283 284 const static struct ctl_logical_block_provisioning_page lbp_page_default = {{ 285 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 286 /*subpage_code*/0x02, 287 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 288 /*flags*/0, 289 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 290 /*descr*/{}}, 291 {{/*flags*/0, 292 /*resource*/0x01, 293 /*reserved*/{0, 0}, 294 /*count*/{0, 0, 0, 0}}, 295 {/*flags*/0, 296 /*resource*/0x02, 297 /*reserved*/{0, 0}, 298 /*count*/{0, 0, 0, 0}}, 299 {/*flags*/0, 300 /*resource*/0xf1, 301 /*reserved*/{0, 0}, 302 /*count*/{0, 0, 0, 0}}, 303 {/*flags*/0, 304 /*resource*/0xf2, 305 /*reserved*/{0, 0}, 306 /*count*/{0, 0, 0, 0}} 307 } 308 }; 309 310 const static struct ctl_logical_block_provisioning_page lbp_page_changeable = {{ 311 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 312 /*subpage_code*/0x02, 313 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 314 /*flags*/0, 315 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 316 /*descr*/{}}, 317 {{/*flags*/0, 318 /*resource*/0, 319 /*reserved*/{0, 0}, 320 /*count*/{0, 0, 0, 0}}, 321 {/*flags*/0, 322 /*resource*/0, 323 /*reserved*/{0, 0}, 324 /*count*/{0, 0, 0, 0}}, 325 {/*flags*/0, 326 /*resource*/0, 327 /*reserved*/{0, 0}, 328 /*count*/{0, 0, 0, 0}}, 329 {/*flags*/0, 330 /*resource*/0, 331 /*reserved*/{0, 0}, 332 /*count*/{0, 0, 0, 0}} 333 } 334 }; 335 336 SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer"); 337 static int worker_threads = -1; 338 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN, 339 &worker_threads, 1, "Number of worker threads"); 340 static int ctl_debug = CTL_DEBUG_NONE; 341 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, debug, CTLFLAG_RWTUN, 342 &ctl_debug, 0, "Enabled debug flags"); 343 344 /* 345 * Supported pages (0x00), Serial number (0x80), Device ID (0x83), 346 * Extended INQUIRY Data (0x86), Mode Page Policy (0x87), 347 * SCSI Ports (0x88), Third-party Copy (0x8F), Block limits (0xB0), 348 * Block Device Characteristics (0xB1) and Logical Block Provisioning (0xB2) 349 */ 350 #define SCSI_EVPD_NUM_SUPPORTED_PAGES 10 351 352 static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event, 353 int param); 354 static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest); 355 static void ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest); 356 static int ctl_init(void); 357 void ctl_shutdown(void); 358 static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td); 359 static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td); 360 static int ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio); 361 static int ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 362 struct ctl_ooa *ooa_hdr, 363 struct ctl_ooa_entry *kern_entries); 364 static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 365 struct thread *td); 366 static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun, 367 struct ctl_be_lun *be_lun); 368 static int ctl_free_lun(struct ctl_lun *lun); 369 static void ctl_create_lun(struct ctl_be_lun *be_lun); 370 static struct ctl_port * ctl_io_port(struct ctl_io_hdr *io_hdr); 371 372 static int ctl_do_mode_select(union ctl_io *io); 373 static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, 374 uint64_t res_key, uint64_t sa_res_key, 375 uint8_t type, uint32_t residx, 376 struct ctl_scsiio *ctsio, 377 struct scsi_per_res_out *cdb, 378 struct scsi_per_res_out_parms* param); 379 static void ctl_pro_preempt_other(struct ctl_lun *lun, 380 union ctl_ha_msg *msg); 381 static void ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg); 382 static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len); 383 static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len); 384 static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len); 385 static int ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len); 386 static int ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len); 387 static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, 388 int alloc_len); 389 static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, 390 int alloc_len); 391 static int ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len); 392 static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len); 393 static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio); 394 static int ctl_inquiry_std(struct ctl_scsiio *ctsio); 395 static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len); 396 static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2, 397 bool seq); 398 static ctl_action ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2); 399 static ctl_action ctl_check_for_blockage(struct ctl_lun *lun, 400 union ctl_io *pending_io, union ctl_io *ooa_io); 401 static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 402 union ctl_io *starting_io); 403 static int ctl_check_blocked(struct ctl_lun *lun); 404 static int ctl_scsiio_lun_check(struct ctl_lun *lun, 405 const struct ctl_cmd_entry *entry, 406 struct ctl_scsiio *ctsio); 407 static void ctl_failover_lun(struct ctl_lun *lun); 408 static int ctl_scsiio_precheck(struct ctl_softc *ctl_softc, 409 struct ctl_scsiio *ctsio); 410 static int ctl_scsiio(struct ctl_scsiio *ctsio); 411 412 static int ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io); 413 static int ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io, 414 ctl_ua_type ua_type); 415 static int ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, 416 ctl_ua_type ua_type); 417 static int ctl_abort_task(union ctl_io *io); 418 static int ctl_abort_task_set(union ctl_io *io); 419 static int ctl_i_t_nexus_reset(union ctl_io *io); 420 static void ctl_run_task(union ctl_io *io); 421 #ifdef CTL_IO_DELAY 422 static void ctl_datamove_timer_wakeup(void *arg); 423 static void ctl_done_timer_wakeup(void *arg); 424 #endif /* CTL_IO_DELAY */ 425 426 static void ctl_send_datamove_done(union ctl_io *io, int have_lock); 427 static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq); 428 static int ctl_datamove_remote_dm_write_cb(union ctl_io *io); 429 static void ctl_datamove_remote_write(union ctl_io *io); 430 static int ctl_datamove_remote_dm_read_cb(union ctl_io *io); 431 static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq); 432 static int ctl_datamove_remote_sgl_setup(union ctl_io *io); 433 static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 434 ctl_ha_dt_cb callback); 435 static void ctl_datamove_remote_read(union ctl_io *io); 436 static void ctl_datamove_remote(union ctl_io *io); 437 static int ctl_process_done(union ctl_io *io); 438 static void ctl_lun_thread(void *arg); 439 static void ctl_thresh_thread(void *arg); 440 static void ctl_work_thread(void *arg); 441 static void ctl_enqueue_incoming(union ctl_io *io); 442 static void ctl_enqueue_rtr(union ctl_io *io); 443 static void ctl_enqueue_done(union ctl_io *io); 444 static void ctl_enqueue_isc(union ctl_io *io); 445 static const struct ctl_cmd_entry * 446 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa); 447 static const struct ctl_cmd_entry * 448 ctl_validate_command(struct ctl_scsiio *ctsio); 449 static int ctl_cmd_applicable(uint8_t lun_type, 450 const struct ctl_cmd_entry *entry); 451 452 static uint64_t ctl_get_prkey(struct ctl_lun *lun, uint32_t residx); 453 static void ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx); 454 static void ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx); 455 static void ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key); 456 457 /* 458 * Load the serialization table. This isn't very pretty, but is probably 459 * the easiest way to do it. 460 */ 461 #include "ctl_ser_table.c" 462 463 /* 464 * We only need to define open, close and ioctl routines for this driver. 465 */ 466 static struct cdevsw ctl_cdevsw = { 467 .d_version = D_VERSION, 468 .d_flags = 0, 469 .d_open = ctl_open, 470 .d_close = ctl_close, 471 .d_ioctl = ctl_ioctl, 472 .d_name = "ctl", 473 }; 474 475 476 MALLOC_DEFINE(M_CTL, "ctlmem", "Memory used for CTL"); 477 478 static int ctl_module_event_handler(module_t, int /*modeventtype_t*/, void *); 479 480 static moduledata_t ctl_moduledata = { 481 "ctl", 482 ctl_module_event_handler, 483 NULL 484 }; 485 486 DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD); 487 MODULE_VERSION(ctl, 1); 488 489 static struct ctl_frontend ha_frontend = 490 { 491 .name = "ha", 492 }; 493 494 static void 495 ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc, 496 union ctl_ha_msg *msg_info) 497 { 498 struct ctl_scsiio *ctsio; 499 500 if (msg_info->hdr.original_sc == NULL) { 501 printf("%s: original_sc == NULL!\n", __func__); 502 /* XXX KDM now what? */ 503 return; 504 } 505 506 ctsio = &msg_info->hdr.original_sc->scsiio; 507 ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 508 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 509 ctsio->io_hdr.status = msg_info->hdr.status; 510 ctsio->scsi_status = msg_info->scsi.scsi_status; 511 ctsio->sense_len = msg_info->scsi.sense_len; 512 ctsio->sense_residual = msg_info->scsi.sense_residual; 513 ctsio->residual = msg_info->scsi.residual; 514 memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data, 515 msg_info->scsi.sense_len); 516 memcpy(&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 517 &msg_info->scsi.lbalen, sizeof(msg_info->scsi.lbalen)); 518 ctl_enqueue_isc((union ctl_io *)ctsio); 519 } 520 521 static void 522 ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc, 523 union ctl_ha_msg *msg_info) 524 { 525 struct ctl_scsiio *ctsio; 526 527 if (msg_info->hdr.serializing_sc == NULL) { 528 printf("%s: serializing_sc == NULL!\n", __func__); 529 /* XXX KDM now what? */ 530 return; 531 } 532 533 ctsio = &msg_info->hdr.serializing_sc->scsiio; 534 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 535 ctl_enqueue_isc((union ctl_io *)ctsio); 536 } 537 538 void 539 ctl_isc_announce_lun(struct ctl_lun *lun) 540 { 541 struct ctl_softc *softc = lun->ctl_softc; 542 union ctl_ha_msg *msg; 543 struct ctl_ha_msg_lun_pr_key pr_key; 544 int i, k; 545 546 if (softc->ha_link != CTL_HA_LINK_ONLINE) 547 return; 548 mtx_lock(&lun->lun_lock); 549 i = sizeof(msg->lun); 550 if (lun->lun_devid) 551 i += lun->lun_devid->len; 552 i += sizeof(pr_key) * lun->pr_key_count; 553 alloc: 554 mtx_unlock(&lun->lun_lock); 555 msg = malloc(i, M_CTL, M_WAITOK); 556 mtx_lock(&lun->lun_lock); 557 k = sizeof(msg->lun); 558 if (lun->lun_devid) 559 k += lun->lun_devid->len; 560 k += sizeof(pr_key) * lun->pr_key_count; 561 if (i < k) { 562 free(msg, M_CTL); 563 i = k; 564 goto alloc; 565 } 566 bzero(&msg->lun, sizeof(msg->lun)); 567 msg->hdr.msg_type = CTL_MSG_LUN_SYNC; 568 msg->hdr.nexus.targ_lun = lun->lun; 569 msg->hdr.nexus.targ_mapped_lun = lun->lun; 570 msg->lun.flags = lun->flags; 571 msg->lun.pr_generation = lun->PRGeneration; 572 msg->lun.pr_res_idx = lun->pr_res_idx; 573 msg->lun.pr_res_type = lun->res_type; 574 msg->lun.pr_key_count = lun->pr_key_count; 575 i = 0; 576 if (lun->lun_devid) { 577 msg->lun.lun_devid_len = lun->lun_devid->len; 578 memcpy(&msg->lun.data[i], lun->lun_devid->data, 579 msg->lun.lun_devid_len); 580 i += msg->lun.lun_devid_len; 581 } 582 for (k = 0; k < CTL_MAX_INITIATORS; k++) { 583 if ((pr_key.pr_key = ctl_get_prkey(lun, k)) == 0) 584 continue; 585 pr_key.pr_iid = k; 586 memcpy(&msg->lun.data[i], &pr_key, sizeof(pr_key)); 587 i += sizeof(pr_key); 588 } 589 mtx_unlock(&lun->lun_lock); 590 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, 591 M_WAITOK); 592 free(msg, M_CTL); 593 } 594 595 void 596 ctl_isc_announce_port(struct ctl_port *port) 597 { 598 struct ctl_softc *softc = control_softc; 599 union ctl_ha_msg *msg; 600 int i; 601 602 if (port->targ_port < softc->port_min || 603 port->targ_port >= softc->port_max || 604 softc->ha_link != CTL_HA_LINK_ONLINE) 605 return; 606 i = sizeof(msg->port) + strlen(port->port_name) + 1; 607 if (port->lun_map) 608 i += sizeof(uint32_t) * CTL_MAX_LUNS; 609 if (port->port_devid) 610 i += port->port_devid->len; 611 if (port->target_devid) 612 i += port->target_devid->len; 613 msg = malloc(i, M_CTL, M_WAITOK); 614 bzero(&msg->port, sizeof(msg->port)); 615 msg->hdr.msg_type = CTL_MSG_PORT_SYNC; 616 msg->hdr.nexus.targ_port = port->targ_port; 617 msg->port.port_type = port->port_type; 618 msg->port.physical_port = port->physical_port; 619 msg->port.virtual_port = port->virtual_port; 620 msg->port.status = port->status; 621 i = 0; 622 msg->port.name_len = sprintf(&msg->port.data[i], 623 "%d:%s", softc->ha_id, port->port_name) + 1; 624 i += msg->port.name_len; 625 if (port->lun_map) { 626 msg->port.lun_map_len = sizeof(uint32_t) * CTL_MAX_LUNS; 627 memcpy(&msg->port.data[i], port->lun_map, 628 msg->port.lun_map_len); 629 i += msg->port.lun_map_len; 630 } 631 if (port->port_devid) { 632 msg->port.port_devid_len = port->port_devid->len; 633 memcpy(&msg->port.data[i], port->port_devid->data, 634 msg->port.port_devid_len); 635 i += msg->port.port_devid_len; 636 } 637 if (port->target_devid) { 638 msg->port.target_devid_len = port->target_devid->len; 639 memcpy(&msg->port.data[i], port->target_devid->data, 640 msg->port.target_devid_len); 641 i += msg->port.target_devid_len; 642 } 643 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, 644 M_WAITOK); 645 free(msg, M_CTL); 646 } 647 648 static void 649 ctl_isc_ha_link_up(struct ctl_softc *softc) 650 { 651 struct ctl_port *port; 652 struct ctl_lun *lun; 653 654 STAILQ_FOREACH(port, &softc->port_list, links) 655 ctl_isc_announce_port(port); 656 STAILQ_FOREACH(lun, &softc->lun_list, links) 657 ctl_isc_announce_lun(lun); 658 } 659 660 static void 661 ctl_isc_ha_link_down(struct ctl_softc *softc) 662 { 663 struct ctl_port *port; 664 struct ctl_lun *lun; 665 union ctl_io *io; 666 667 mtx_lock(&softc->ctl_lock); 668 STAILQ_FOREACH(lun, &softc->lun_list, links) { 669 mtx_lock(&lun->lun_lock); 670 if (lun->flags & CTL_LUN_PEER_SC_PRIMARY) { 671 lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; 672 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 673 } 674 mtx_unlock(&lun->lun_lock); 675 676 mtx_unlock(&softc->ctl_lock); 677 io = ctl_alloc_io(softc->othersc_pool); 678 mtx_lock(&softc->ctl_lock); 679 ctl_zero_io(io); 680 io->io_hdr.msg_type = CTL_MSG_FAILOVER; 681 io->io_hdr.nexus.targ_mapped_lun = lun->lun; 682 ctl_enqueue_isc(io); 683 } 684 685 STAILQ_FOREACH(port, &softc->port_list, links) { 686 if (port->targ_port >= softc->port_min && 687 port->targ_port < softc->port_max) 688 continue; 689 port->status &= ~CTL_PORT_STATUS_ONLINE; 690 } 691 mtx_unlock(&softc->ctl_lock); 692 } 693 694 static void 695 ctl_isc_ua(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 696 { 697 struct ctl_lun *lun; 698 uint32_t iid = ctl_get_initindex(&msg->hdr.nexus); 699 700 mtx_lock(&softc->ctl_lock); 701 if (msg->hdr.nexus.targ_lun < CTL_MAX_LUNS && 702 (lun = softc->ctl_luns[msg->hdr.nexus.targ_mapped_lun]) != NULL) { 703 mtx_lock(&lun->lun_lock); 704 mtx_unlock(&softc->ctl_lock); 705 if (msg->ua.ua_all) { 706 if (msg->ua.ua_set) 707 ctl_est_ua_all(lun, iid, msg->ua.ua_type); 708 else 709 ctl_clr_ua_all(lun, iid, msg->ua.ua_type); 710 } else { 711 if (msg->ua.ua_set) 712 ctl_est_ua(lun, iid, msg->ua.ua_type); 713 else 714 ctl_clr_ua(lun, iid, msg->ua.ua_type); 715 } 716 mtx_unlock(&lun->lun_lock); 717 } else 718 mtx_unlock(&softc->ctl_lock); 719 } 720 721 static void 722 ctl_isc_lun_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 723 { 724 struct ctl_lun *lun; 725 struct ctl_ha_msg_lun_pr_key pr_key; 726 int i, k; 727 ctl_lun_flags oflags; 728 uint32_t targ_lun; 729 730 targ_lun = msg->hdr.nexus.targ_mapped_lun; 731 mtx_lock(&softc->ctl_lock); 732 if ((targ_lun >= CTL_MAX_LUNS) || 733 ((lun = softc->ctl_luns[targ_lun]) == NULL)) { 734 mtx_unlock(&softc->ctl_lock); 735 return; 736 } 737 mtx_lock(&lun->lun_lock); 738 mtx_unlock(&softc->ctl_lock); 739 if (lun->flags & CTL_LUN_DISABLED) { 740 mtx_unlock(&lun->lun_lock); 741 return; 742 } 743 i = (lun->lun_devid != NULL) ? lun->lun_devid->len : 0; 744 if (msg->lun.lun_devid_len != i || (i > 0 && 745 memcmp(&msg->lun.data[0], lun->lun_devid->data, i) != 0)) { 746 mtx_unlock(&lun->lun_lock); 747 printf("%s: Received conflicting HA LUN %d\n", 748 __func__, msg->hdr.nexus.targ_lun); 749 return; 750 } else { 751 /* Record whether peer is primary. */ 752 oflags = lun->flags; 753 if ((msg->lun.flags & CTL_LUN_PRIMARY_SC) && 754 (msg->lun.flags & CTL_LUN_DISABLED) == 0) 755 lun->flags |= CTL_LUN_PEER_SC_PRIMARY; 756 else 757 lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; 758 if (oflags != lun->flags) 759 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 760 761 /* If peer is primary and we are not -- use data */ 762 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 763 (lun->flags & CTL_LUN_PEER_SC_PRIMARY)) { 764 lun->PRGeneration = msg->lun.pr_generation; 765 lun->pr_res_idx = msg->lun.pr_res_idx; 766 lun->res_type = msg->lun.pr_res_type; 767 lun->pr_key_count = msg->lun.pr_key_count; 768 for (k = 0; k < CTL_MAX_INITIATORS; k++) 769 ctl_clr_prkey(lun, k); 770 for (k = 0; k < msg->lun.pr_key_count; k++) { 771 memcpy(&pr_key, &msg->lun.data[i], 772 sizeof(pr_key)); 773 ctl_alloc_prkey(lun, pr_key.pr_iid); 774 ctl_set_prkey(lun, pr_key.pr_iid, 775 pr_key.pr_key); 776 i += sizeof(pr_key); 777 } 778 } 779 780 mtx_unlock(&lun->lun_lock); 781 CTL_DEBUG_PRINT(("%s: Known LUN %d, peer is %s\n", 782 __func__, msg->hdr.nexus.targ_lun, 783 (msg->lun.flags & CTL_LUN_PRIMARY_SC) ? 784 "primary" : "secondary")); 785 786 /* If we are primary but peer doesn't know -- notify */ 787 if ((lun->flags & CTL_LUN_PRIMARY_SC) && 788 (msg->lun.flags & CTL_LUN_PEER_SC_PRIMARY) == 0) 789 ctl_isc_announce_lun(lun); 790 } 791 } 792 793 static void 794 ctl_isc_port_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 795 { 796 struct ctl_port *port; 797 struct ctl_lun *lun; 798 int i, new; 799 800 port = softc->ctl_ports[msg->hdr.nexus.targ_port]; 801 if (port == NULL) { 802 CTL_DEBUG_PRINT(("%s: New port %d\n", __func__, 803 msg->hdr.nexus.targ_port)); 804 new = 1; 805 port = malloc(sizeof(*port), M_CTL, M_WAITOK | M_ZERO); 806 port->frontend = &ha_frontend; 807 port->targ_port = msg->hdr.nexus.targ_port; 808 } else if (port->frontend == &ha_frontend) { 809 CTL_DEBUG_PRINT(("%s: Updated port %d\n", __func__, 810 msg->hdr.nexus.targ_port)); 811 new = 0; 812 } else { 813 printf("%s: Received conflicting HA port %d\n", 814 __func__, msg->hdr.nexus.targ_port); 815 return; 816 } 817 port->port_type = msg->port.port_type; 818 port->physical_port = msg->port.physical_port; 819 port->virtual_port = msg->port.virtual_port; 820 port->status = msg->port.status; 821 i = 0; 822 free(port->port_name, M_CTL); 823 port->port_name = strndup(&msg->port.data[i], msg->port.name_len, 824 M_CTL); 825 i += msg->port.name_len; 826 if (msg->port.lun_map_len != 0) { 827 if (port->lun_map == NULL) 828 port->lun_map = malloc(sizeof(uint32_t) * CTL_MAX_LUNS, 829 M_CTL, M_WAITOK); 830 memcpy(port->lun_map, &msg->port.data[i], 831 sizeof(uint32_t) * CTL_MAX_LUNS); 832 i += msg->port.lun_map_len; 833 } else { 834 free(port->lun_map, M_CTL); 835 port->lun_map = NULL; 836 } 837 if (msg->port.port_devid_len != 0) { 838 if (port->port_devid == NULL || 839 port->port_devid->len != msg->port.port_devid_len) { 840 free(port->port_devid, M_CTL); 841 port->port_devid = malloc(sizeof(struct ctl_devid) + 842 msg->port.port_devid_len, M_CTL, M_WAITOK); 843 } 844 memcpy(port->port_devid->data, &msg->port.data[i], 845 msg->port.port_devid_len); 846 port->port_devid->len = msg->port.port_devid_len; 847 i += msg->port.port_devid_len; 848 } else { 849 free(port->port_devid, M_CTL); 850 port->port_devid = NULL; 851 } 852 if (msg->port.target_devid_len != 0) { 853 if (port->target_devid == NULL || 854 port->target_devid->len != msg->port.target_devid_len) { 855 free(port->target_devid, M_CTL); 856 port->target_devid = malloc(sizeof(struct ctl_devid) + 857 msg->port.target_devid_len, M_CTL, M_WAITOK); 858 } 859 memcpy(port->target_devid->data, &msg->port.data[i], 860 msg->port.target_devid_len); 861 port->target_devid->len = msg->port.target_devid_len; 862 i += msg->port.target_devid_len; 863 } else { 864 free(port->port_devid, M_CTL); 865 port->port_devid = NULL; 866 } 867 if (new) { 868 if (ctl_port_register(port) != 0) { 869 printf("%s: ctl_port_register() failed with error\n", 870 __func__); 871 } 872 } 873 mtx_lock(&softc->ctl_lock); 874 STAILQ_FOREACH(lun, &softc->lun_list, links) { 875 if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 876 continue; 877 mtx_lock(&lun->lun_lock); 878 ctl_est_ua_all(lun, -1, CTL_UA_INQ_CHANGE); 879 mtx_unlock(&lun->lun_lock); 880 } 881 mtx_unlock(&softc->ctl_lock); 882 } 883 884 /* 885 * ISC (Inter Shelf Communication) event handler. Events from the HA 886 * subsystem come in here. 887 */ 888 static void 889 ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param) 890 { 891 struct ctl_softc *softc; 892 union ctl_io *io; 893 struct ctl_prio *presio; 894 ctl_ha_status isc_status; 895 896 softc = control_softc; 897 CTL_DEBUG_PRINT(("CTL: Isc Msg event %d\n", event)); 898 if (event == CTL_HA_EVT_MSG_RECV) { 899 union ctl_ha_msg *msg, msgbuf; 900 901 if (param > sizeof(msgbuf)) 902 msg = malloc(param, M_CTL, M_WAITOK); 903 else 904 msg = &msgbuf; 905 isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, msg, param, 906 M_WAITOK); 907 if (isc_status != CTL_HA_STATUS_SUCCESS) { 908 printf("%s: Error receiving message: %d\n", 909 __func__, isc_status); 910 if (msg != &msgbuf) 911 free(msg, M_CTL); 912 return; 913 } 914 915 CTL_DEBUG_PRINT(("CTL: msg_type %d\n", msg->msg_type)); 916 switch (msg->hdr.msg_type) { 917 case CTL_MSG_SERIALIZE: 918 io = ctl_alloc_io(softc->othersc_pool); 919 ctl_zero_io(io); 920 // populate ctsio from msg 921 io->io_hdr.io_type = CTL_IO_SCSI; 922 io->io_hdr.msg_type = CTL_MSG_SERIALIZE; 923 io->io_hdr.original_sc = msg->hdr.original_sc; 924 io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC | 925 CTL_FLAG_IO_ACTIVE; 926 /* 927 * If we're in serialization-only mode, we don't 928 * want to go through full done processing. Thus 929 * the COPY flag. 930 * 931 * XXX KDM add another flag that is more specific. 932 */ 933 if (softc->ha_mode != CTL_HA_MODE_XFER) 934 io->io_hdr.flags |= CTL_FLAG_INT_COPY; 935 io->io_hdr.nexus = msg->hdr.nexus; 936 #if 0 937 printf("port %u, iid %u, lun %u\n", 938 io->io_hdr.nexus.targ_port, 939 io->io_hdr.nexus.initid, 940 io->io_hdr.nexus.targ_lun); 941 #endif 942 io->scsiio.tag_num = msg->scsi.tag_num; 943 io->scsiio.tag_type = msg->scsi.tag_type; 944 #ifdef CTL_TIME_IO 945 io->io_hdr.start_time = time_uptime; 946 getbintime(&io->io_hdr.start_bt); 947 #endif /* CTL_TIME_IO */ 948 io->scsiio.cdb_len = msg->scsi.cdb_len; 949 memcpy(io->scsiio.cdb, msg->scsi.cdb, 950 CTL_MAX_CDBLEN); 951 if (softc->ha_mode == CTL_HA_MODE_XFER) { 952 const struct ctl_cmd_entry *entry; 953 954 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 955 io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 956 io->io_hdr.flags |= 957 entry->flags & CTL_FLAG_DATA_MASK; 958 } 959 ctl_enqueue_isc(io); 960 break; 961 962 /* Performed on the Originating SC, XFER mode only */ 963 case CTL_MSG_DATAMOVE: { 964 struct ctl_sg_entry *sgl; 965 int i, j; 966 967 io = msg->hdr.original_sc; 968 if (io == NULL) { 969 printf("%s: original_sc == NULL!\n", __func__); 970 /* XXX KDM do something here */ 971 break; 972 } 973 io->io_hdr.msg_type = CTL_MSG_DATAMOVE; 974 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 975 /* 976 * Keep track of this, we need to send it back over 977 * when the datamove is complete. 978 */ 979 io->io_hdr.serializing_sc = msg->hdr.serializing_sc; 980 981 if (msg->dt.sg_sequence == 0) { 982 i = msg->dt.kern_sg_entries + 983 io->scsiio.kern_data_len / 984 CTL_HA_DATAMOVE_SEGMENT + 1; 985 sgl = malloc(sizeof(*sgl) * i, M_CTL, 986 M_WAITOK | M_ZERO); 987 io->io_hdr.remote_sglist = sgl; 988 io->io_hdr.local_sglist = 989 &sgl[msg->dt.kern_sg_entries]; 990 991 io->scsiio.kern_data_ptr = (uint8_t *)sgl; 992 993 io->scsiio.kern_sg_entries = 994 msg->dt.kern_sg_entries; 995 io->scsiio.rem_sg_entries = 996 msg->dt.kern_sg_entries; 997 io->scsiio.kern_data_len = 998 msg->dt.kern_data_len; 999 io->scsiio.kern_total_len = 1000 msg->dt.kern_total_len; 1001 io->scsiio.kern_data_resid = 1002 msg->dt.kern_data_resid; 1003 io->scsiio.kern_rel_offset = 1004 msg->dt.kern_rel_offset; 1005 io->io_hdr.flags &= ~CTL_FLAG_BUS_ADDR; 1006 io->io_hdr.flags |= msg->dt.flags & 1007 CTL_FLAG_BUS_ADDR; 1008 } else 1009 sgl = (struct ctl_sg_entry *) 1010 io->scsiio.kern_data_ptr; 1011 1012 for (i = msg->dt.sent_sg_entries, j = 0; 1013 i < (msg->dt.sent_sg_entries + 1014 msg->dt.cur_sg_entries); i++, j++) { 1015 sgl[i].addr = msg->dt.sg_list[j].addr; 1016 sgl[i].len = msg->dt.sg_list[j].len; 1017 1018 #if 0 1019 printf("%s: L: %p,%d -> %p,%d j=%d, i=%d\n", 1020 __func__, 1021 msg->dt.sg_list[j].addr, 1022 msg->dt.sg_list[j].len, 1023 sgl[i].addr, sgl[i].len, j, i); 1024 #endif 1025 } 1026 1027 /* 1028 * If this is the last piece of the I/O, we've got 1029 * the full S/G list. Queue processing in the thread. 1030 * Otherwise wait for the next piece. 1031 */ 1032 if (msg->dt.sg_last != 0) 1033 ctl_enqueue_isc(io); 1034 break; 1035 } 1036 /* Performed on the Serializing (primary) SC, XFER mode only */ 1037 case CTL_MSG_DATAMOVE_DONE: { 1038 if (msg->hdr.serializing_sc == NULL) { 1039 printf("%s: serializing_sc == NULL!\n", 1040 __func__); 1041 /* XXX KDM now what? */ 1042 break; 1043 } 1044 /* 1045 * We grab the sense information here in case 1046 * there was a failure, so we can return status 1047 * back to the initiator. 1048 */ 1049 io = msg->hdr.serializing_sc; 1050 io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 1051 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1052 io->io_hdr.port_status = msg->scsi.fetd_status; 1053 io->scsiio.residual = msg->scsi.residual; 1054 if (msg->hdr.status != CTL_STATUS_NONE) { 1055 io->io_hdr.status = msg->hdr.status; 1056 io->scsiio.scsi_status = msg->scsi.scsi_status; 1057 io->scsiio.sense_len = msg->scsi.sense_len; 1058 io->scsiio.sense_residual =msg->scsi.sense_residual; 1059 memcpy(&io->scsiio.sense_data, 1060 &msg->scsi.sense_data, 1061 msg->scsi.sense_len); 1062 } 1063 ctl_enqueue_isc(io); 1064 break; 1065 } 1066 1067 /* Preformed on Originating SC, SER_ONLY mode */ 1068 case CTL_MSG_R2R: 1069 io = msg->hdr.original_sc; 1070 if (io == NULL) { 1071 printf("%s: original_sc == NULL!\n", 1072 __func__); 1073 break; 1074 } 1075 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1076 io->io_hdr.msg_type = CTL_MSG_R2R; 1077 io->io_hdr.serializing_sc = msg->hdr.serializing_sc; 1078 ctl_enqueue_isc(io); 1079 break; 1080 1081 /* 1082 * Performed on Serializing(i.e. primary SC) SC in SER_ONLY 1083 * mode. 1084 * Performed on the Originating (i.e. secondary) SC in XFER 1085 * mode 1086 */ 1087 case CTL_MSG_FINISH_IO: 1088 if (softc->ha_mode == CTL_HA_MODE_XFER) 1089 ctl_isc_handler_finish_xfer(softc, msg); 1090 else 1091 ctl_isc_handler_finish_ser_only(softc, msg); 1092 break; 1093 1094 /* Preformed on Originating SC */ 1095 case CTL_MSG_BAD_JUJU: 1096 io = msg->hdr.original_sc; 1097 if (io == NULL) { 1098 printf("%s: Bad JUJU!, original_sc is NULL!\n", 1099 __func__); 1100 break; 1101 } 1102 ctl_copy_sense_data(msg, io); 1103 /* 1104 * IO should have already been cleaned up on other 1105 * SC so clear this flag so we won't send a message 1106 * back to finish the IO there. 1107 */ 1108 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 1109 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1110 1111 /* io = msg->hdr.serializing_sc; */ 1112 io->io_hdr.msg_type = CTL_MSG_BAD_JUJU; 1113 ctl_enqueue_isc(io); 1114 break; 1115 1116 /* Handle resets sent from the other side */ 1117 case CTL_MSG_MANAGE_TASKS: { 1118 struct ctl_taskio *taskio; 1119 taskio = (struct ctl_taskio *)ctl_alloc_io( 1120 softc->othersc_pool); 1121 ctl_zero_io((union ctl_io *)taskio); 1122 taskio->io_hdr.io_type = CTL_IO_TASK; 1123 taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 1124 taskio->io_hdr.nexus = msg->hdr.nexus; 1125 taskio->task_action = msg->task.task_action; 1126 taskio->tag_num = msg->task.tag_num; 1127 taskio->tag_type = msg->task.tag_type; 1128 #ifdef CTL_TIME_IO 1129 taskio->io_hdr.start_time = time_uptime; 1130 getbintime(&taskio->io_hdr.start_bt); 1131 #endif /* CTL_TIME_IO */ 1132 ctl_run_task((union ctl_io *)taskio); 1133 break; 1134 } 1135 /* Persistent Reserve action which needs attention */ 1136 case CTL_MSG_PERS_ACTION: 1137 presio = (struct ctl_prio *)ctl_alloc_io( 1138 softc->othersc_pool); 1139 ctl_zero_io((union ctl_io *)presio); 1140 presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION; 1141 presio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 1142 presio->io_hdr.nexus = msg->hdr.nexus; 1143 presio->pr_msg = msg->pr; 1144 ctl_enqueue_isc((union ctl_io *)presio); 1145 break; 1146 case CTL_MSG_UA: 1147 ctl_isc_ua(softc, msg, param); 1148 break; 1149 case CTL_MSG_PORT_SYNC: 1150 ctl_isc_port_sync(softc, msg, param); 1151 break; 1152 case CTL_MSG_LUN_SYNC: 1153 ctl_isc_lun_sync(softc, msg, param); 1154 break; 1155 default: 1156 printf("Received HA message of unknown type %d\n", 1157 msg->hdr.msg_type); 1158 break; 1159 } 1160 if (msg != &msgbuf) 1161 free(msg, M_CTL); 1162 } else if (event == CTL_HA_EVT_LINK_CHANGE) { 1163 printf("CTL: HA link status changed from %d to %d\n", 1164 softc->ha_link, param); 1165 if (param == softc->ha_link) 1166 return; 1167 if (softc->ha_link == CTL_HA_LINK_ONLINE) { 1168 softc->ha_link = param; 1169 ctl_isc_ha_link_down(softc); 1170 } else { 1171 softc->ha_link = param; 1172 if (softc->ha_link == CTL_HA_LINK_ONLINE) 1173 ctl_isc_ha_link_up(softc); 1174 } 1175 return; 1176 } else { 1177 printf("ctl_isc_event_handler: Unknown event %d\n", event); 1178 return; 1179 } 1180 } 1181 1182 static void 1183 ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest) 1184 { 1185 1186 memcpy(&dest->scsiio.sense_data, &src->scsi.sense_data, 1187 src->scsi.sense_len); 1188 dest->scsiio.scsi_status = src->scsi.scsi_status; 1189 dest->scsiio.sense_len = src->scsi.sense_len; 1190 dest->io_hdr.status = src->hdr.status; 1191 } 1192 1193 static void 1194 ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest) 1195 { 1196 1197 memcpy(&dest->scsi.sense_data, &src->scsiio.sense_data, 1198 src->scsiio.sense_len); 1199 dest->scsi.scsi_status = src->scsiio.scsi_status; 1200 dest->scsi.sense_len = src->scsiio.sense_len; 1201 dest->hdr.status = src->io_hdr.status; 1202 } 1203 1204 void 1205 ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 1206 { 1207 struct ctl_softc *softc = lun->ctl_softc; 1208 ctl_ua_type *pu; 1209 1210 if (initidx < softc->init_min || initidx >= softc->init_max) 1211 return; 1212 mtx_assert(&lun->lun_lock, MA_OWNED); 1213 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 1214 if (pu == NULL) 1215 return; 1216 pu[initidx % CTL_MAX_INIT_PER_PORT] |= ua; 1217 } 1218 1219 void 1220 ctl_est_ua_port(struct ctl_lun *lun, int port, uint32_t except, ctl_ua_type ua) 1221 { 1222 int i; 1223 1224 mtx_assert(&lun->lun_lock, MA_OWNED); 1225 if (lun->pending_ua[port] == NULL) 1226 return; 1227 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1228 if (port * CTL_MAX_INIT_PER_PORT + i == except) 1229 continue; 1230 lun->pending_ua[port][i] |= ua; 1231 } 1232 } 1233 1234 void 1235 ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 1236 { 1237 struct ctl_softc *softc = lun->ctl_softc; 1238 int i; 1239 1240 mtx_assert(&lun->lun_lock, MA_OWNED); 1241 for (i = softc->port_min; i < softc->port_max; i++) 1242 ctl_est_ua_port(lun, i, except, ua); 1243 } 1244 1245 void 1246 ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 1247 { 1248 struct ctl_softc *softc = lun->ctl_softc; 1249 ctl_ua_type *pu; 1250 1251 if (initidx < softc->init_min || initidx >= softc->init_max) 1252 return; 1253 mtx_assert(&lun->lun_lock, MA_OWNED); 1254 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 1255 if (pu == NULL) 1256 return; 1257 pu[initidx % CTL_MAX_INIT_PER_PORT] &= ~ua; 1258 } 1259 1260 void 1261 ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 1262 { 1263 struct ctl_softc *softc = lun->ctl_softc; 1264 int i, j; 1265 1266 mtx_assert(&lun->lun_lock, MA_OWNED); 1267 for (i = softc->port_min; i < softc->port_max; i++) { 1268 if (lun->pending_ua[i] == NULL) 1269 continue; 1270 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 1271 if (i * CTL_MAX_INIT_PER_PORT + j == except) 1272 continue; 1273 lun->pending_ua[i][j] &= ~ua; 1274 } 1275 } 1276 } 1277 1278 void 1279 ctl_clr_ua_allluns(struct ctl_softc *ctl_softc, uint32_t initidx, 1280 ctl_ua_type ua_type) 1281 { 1282 struct ctl_lun *lun; 1283 1284 mtx_assert(&ctl_softc->ctl_lock, MA_OWNED); 1285 STAILQ_FOREACH(lun, &ctl_softc->lun_list, links) { 1286 mtx_lock(&lun->lun_lock); 1287 ctl_clr_ua(lun, initidx, ua_type); 1288 mtx_unlock(&lun->lun_lock); 1289 } 1290 } 1291 1292 static int 1293 ctl_ha_role_sysctl(SYSCTL_HANDLER_ARGS) 1294 { 1295 struct ctl_softc *softc = (struct ctl_softc *)arg1; 1296 struct ctl_lun *lun; 1297 struct ctl_lun_req ireq; 1298 int error, value; 1299 1300 value = (softc->flags & CTL_FLAG_ACTIVE_SHELF) ? 0 : 1; 1301 error = sysctl_handle_int(oidp, &value, 0, req); 1302 if ((error != 0) || (req->newptr == NULL)) 1303 return (error); 1304 1305 mtx_lock(&softc->ctl_lock); 1306 if (value == 0) 1307 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1308 else 1309 softc->flags &= ~CTL_FLAG_ACTIVE_SHELF; 1310 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1311 mtx_unlock(&softc->ctl_lock); 1312 bzero(&ireq, sizeof(ireq)); 1313 ireq.reqtype = CTL_LUNREQ_MODIFY; 1314 ireq.reqdata.modify.lun_id = lun->lun; 1315 lun->backend->ioctl(NULL, CTL_LUN_REQ, (caddr_t)&ireq, 0, 1316 curthread); 1317 if (ireq.status != CTL_LUN_OK) { 1318 printf("%s: CTL_LUNREQ_MODIFY returned %d '%s'\n", 1319 __func__, ireq.status, ireq.error_str); 1320 } 1321 mtx_lock(&softc->ctl_lock); 1322 } 1323 mtx_unlock(&softc->ctl_lock); 1324 return (0); 1325 } 1326 1327 static int 1328 ctl_init(void) 1329 { 1330 struct ctl_softc *softc; 1331 void *other_pool; 1332 int i, error, retval; 1333 1334 retval = 0; 1335 control_softc = malloc(sizeof(*control_softc), M_DEVBUF, 1336 M_WAITOK | M_ZERO); 1337 softc = control_softc; 1338 1339 softc->dev = make_dev(&ctl_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, 1340 "cam/ctl"); 1341 1342 softc->dev->si_drv1 = softc; 1343 1344 sysctl_ctx_init(&softc->sysctl_ctx); 1345 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, 1346 SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl", 1347 CTLFLAG_RD, 0, "CAM Target Layer"); 1348 1349 if (softc->sysctl_tree == NULL) { 1350 printf("%s: unable to allocate sysctl tree\n", __func__); 1351 destroy_dev(softc->dev); 1352 free(control_softc, M_DEVBUF); 1353 control_softc = NULL; 1354 return (ENOMEM); 1355 } 1356 1357 mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF); 1358 softc->io_zone = uma_zcreate("CTL IO", sizeof(union ctl_io), 1359 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 1360 softc->open_count = 0; 1361 1362 /* 1363 * Default to actually sending a SYNCHRONIZE CACHE command down to 1364 * the drive. 1365 */ 1366 softc->flags = CTL_FLAG_REAL_SYNC; 1367 1368 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1369 OID_AUTO, "ha_mode", CTLFLAG_RDTUN, (int *)&softc->ha_mode, 0, 1370 "HA mode (0 - act/stby, 1 - serialize only, 2 - xfer)"); 1371 1372 /* 1373 * In Copan's HA scheme, the "master" and "slave" roles are 1374 * figured out through the slot the controller is in. Although it 1375 * is an active/active system, someone has to be in charge. 1376 */ 1377 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1378 OID_AUTO, "ha_id", CTLFLAG_RDTUN, &softc->ha_id, 0, 1379 "HA head ID (0 - no HA)"); 1380 if (softc->ha_id == 0 || softc->ha_id > NUM_TARGET_PORT_GROUPS) { 1381 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1382 softc->is_single = 1; 1383 softc->port_cnt = CTL_MAX_PORTS; 1384 softc->port_min = 0; 1385 } else { 1386 softc->port_cnt = CTL_MAX_PORTS / NUM_TARGET_PORT_GROUPS; 1387 softc->port_min = (softc->ha_id - 1) * softc->port_cnt; 1388 } 1389 softc->port_max = softc->port_min + softc->port_cnt; 1390 softc->init_min = softc->port_min * CTL_MAX_INIT_PER_PORT; 1391 softc->init_max = softc->port_max * CTL_MAX_INIT_PER_PORT; 1392 1393 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1394 OID_AUTO, "ha_link", CTLFLAG_RD, (int *)&softc->ha_link, 0, 1395 "HA link state (0 - offline, 1 - unknown, 2 - online)"); 1396 1397 STAILQ_INIT(&softc->lun_list); 1398 STAILQ_INIT(&softc->pending_lun_queue); 1399 STAILQ_INIT(&softc->fe_list); 1400 STAILQ_INIT(&softc->port_list); 1401 STAILQ_INIT(&softc->be_list); 1402 ctl_tpc_init(softc); 1403 1404 if (ctl_pool_create(softc, "othersc", CTL_POOL_ENTRIES_OTHER_SC, 1405 &other_pool) != 0) 1406 { 1407 printf("ctl: can't allocate %d entry other SC pool, " 1408 "exiting\n", CTL_POOL_ENTRIES_OTHER_SC); 1409 return (ENOMEM); 1410 } 1411 softc->othersc_pool = other_pool; 1412 1413 if (worker_threads <= 0) 1414 worker_threads = max(1, mp_ncpus / 4); 1415 if (worker_threads > CTL_MAX_THREADS) 1416 worker_threads = CTL_MAX_THREADS; 1417 1418 for (i = 0; i < worker_threads; i++) { 1419 struct ctl_thread *thr = &softc->threads[i]; 1420 1421 mtx_init(&thr->queue_lock, "CTL queue mutex", NULL, MTX_DEF); 1422 thr->ctl_softc = softc; 1423 STAILQ_INIT(&thr->incoming_queue); 1424 STAILQ_INIT(&thr->rtr_queue); 1425 STAILQ_INIT(&thr->done_queue); 1426 STAILQ_INIT(&thr->isc_queue); 1427 1428 error = kproc_kthread_add(ctl_work_thread, thr, 1429 &softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i); 1430 if (error != 0) { 1431 printf("error creating CTL work thread!\n"); 1432 ctl_pool_free(other_pool); 1433 return (error); 1434 } 1435 } 1436 error = kproc_kthread_add(ctl_lun_thread, softc, 1437 &softc->ctl_proc, NULL, 0, 0, "ctl", "lun"); 1438 if (error != 0) { 1439 printf("error creating CTL lun thread!\n"); 1440 ctl_pool_free(other_pool); 1441 return (error); 1442 } 1443 error = kproc_kthread_add(ctl_thresh_thread, softc, 1444 &softc->ctl_proc, NULL, 0, 0, "ctl", "thresh"); 1445 if (error != 0) { 1446 printf("error creating CTL threshold thread!\n"); 1447 ctl_pool_free(other_pool); 1448 return (error); 1449 } 1450 1451 SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree), 1452 OID_AUTO, "ha_role", CTLTYPE_INT | CTLFLAG_RWTUN, 1453 softc, 0, ctl_ha_role_sysctl, "I", "HA role for this head"); 1454 1455 if (softc->is_single == 0) { 1456 ctl_frontend_register(&ha_frontend); 1457 if (ctl_ha_msg_init(softc) != CTL_HA_STATUS_SUCCESS) { 1458 printf("ctl_init: ctl_ha_msg_init failed.\n"); 1459 softc->is_single = 1; 1460 } else 1461 if (ctl_ha_msg_register(CTL_HA_CHAN_CTL, ctl_isc_event_handler) 1462 != CTL_HA_STATUS_SUCCESS) { 1463 printf("ctl_init: ctl_ha_msg_register failed.\n"); 1464 softc->is_single = 1; 1465 } 1466 } 1467 return (0); 1468 } 1469 1470 void 1471 ctl_shutdown(void) 1472 { 1473 struct ctl_softc *softc; 1474 struct ctl_lun *lun, *next_lun; 1475 1476 softc = (struct ctl_softc *)control_softc; 1477 1478 if (softc->is_single == 0) { 1479 if (ctl_ha_msg_deregister(CTL_HA_CHAN_CTL) 1480 != CTL_HA_STATUS_SUCCESS) { 1481 printf("ctl_shutdown: ctl_ha_msg_deregister failed.\n"); 1482 } 1483 if (ctl_ha_msg_shutdown(softc) != CTL_HA_STATUS_SUCCESS) { 1484 printf("ctl_shutdown: ctl_ha_msg_shutdown failed.\n"); 1485 } 1486 ctl_frontend_deregister(&ha_frontend); 1487 } 1488 1489 mtx_lock(&softc->ctl_lock); 1490 1491 /* 1492 * Free up each LUN. 1493 */ 1494 for (lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; lun = next_lun){ 1495 next_lun = STAILQ_NEXT(lun, links); 1496 ctl_free_lun(lun); 1497 } 1498 1499 mtx_unlock(&softc->ctl_lock); 1500 1501 #if 0 1502 ctl_shutdown_thread(softc->work_thread); 1503 mtx_destroy(&softc->queue_lock); 1504 #endif 1505 1506 ctl_tpc_shutdown(softc); 1507 uma_zdestroy(softc->io_zone); 1508 mtx_destroy(&softc->ctl_lock); 1509 1510 destroy_dev(softc->dev); 1511 1512 sysctl_ctx_free(&softc->sysctl_ctx); 1513 1514 free(control_softc, M_DEVBUF); 1515 control_softc = NULL; 1516 } 1517 1518 static int 1519 ctl_module_event_handler(module_t mod, int what, void *arg) 1520 { 1521 1522 switch (what) { 1523 case MOD_LOAD: 1524 return (ctl_init()); 1525 case MOD_UNLOAD: 1526 return (EBUSY); 1527 default: 1528 return (EOPNOTSUPP); 1529 } 1530 } 1531 1532 /* 1533 * XXX KDM should we do some access checks here? Bump a reference count to 1534 * prevent a CTL module from being unloaded while someone has it open? 1535 */ 1536 static int 1537 ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td) 1538 { 1539 return (0); 1540 } 1541 1542 static int 1543 ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td) 1544 { 1545 return (0); 1546 } 1547 1548 /* 1549 * Remove an initiator by port number and initiator ID. 1550 * Returns 0 for success, -1 for failure. 1551 */ 1552 int 1553 ctl_remove_initiator(struct ctl_port *port, int iid) 1554 { 1555 struct ctl_softc *softc = control_softc; 1556 1557 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 1558 1559 if (iid > CTL_MAX_INIT_PER_PORT) { 1560 printf("%s: initiator ID %u > maximun %u!\n", 1561 __func__, iid, CTL_MAX_INIT_PER_PORT); 1562 return (-1); 1563 } 1564 1565 mtx_lock(&softc->ctl_lock); 1566 port->wwpn_iid[iid].in_use--; 1567 port->wwpn_iid[iid].last_use = time_uptime; 1568 mtx_unlock(&softc->ctl_lock); 1569 1570 return (0); 1571 } 1572 1573 /* 1574 * Add an initiator to the initiator map. 1575 * Returns iid for success, < 0 for failure. 1576 */ 1577 int 1578 ctl_add_initiator(struct ctl_port *port, int iid, uint64_t wwpn, char *name) 1579 { 1580 struct ctl_softc *softc = control_softc; 1581 time_t best_time; 1582 int i, best; 1583 1584 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 1585 1586 if (iid >= CTL_MAX_INIT_PER_PORT) { 1587 printf("%s: WWPN %#jx initiator ID %u > maximum %u!\n", 1588 __func__, wwpn, iid, CTL_MAX_INIT_PER_PORT); 1589 free(name, M_CTL); 1590 return (-1); 1591 } 1592 1593 mtx_lock(&softc->ctl_lock); 1594 1595 if (iid < 0 && (wwpn != 0 || name != NULL)) { 1596 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1597 if (wwpn != 0 && wwpn == port->wwpn_iid[i].wwpn) { 1598 iid = i; 1599 break; 1600 } 1601 if (name != NULL && port->wwpn_iid[i].name != NULL && 1602 strcmp(name, port->wwpn_iid[i].name) == 0) { 1603 iid = i; 1604 break; 1605 } 1606 } 1607 } 1608 1609 if (iid < 0) { 1610 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1611 if (port->wwpn_iid[i].in_use == 0 && 1612 port->wwpn_iid[i].wwpn == 0 && 1613 port->wwpn_iid[i].name == NULL) { 1614 iid = i; 1615 break; 1616 } 1617 } 1618 } 1619 1620 if (iid < 0) { 1621 best = -1; 1622 best_time = INT32_MAX; 1623 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1624 if (port->wwpn_iid[i].in_use == 0) { 1625 if (port->wwpn_iid[i].last_use < best_time) { 1626 best = i; 1627 best_time = port->wwpn_iid[i].last_use; 1628 } 1629 } 1630 } 1631 iid = best; 1632 } 1633 1634 if (iid < 0) { 1635 mtx_unlock(&softc->ctl_lock); 1636 free(name, M_CTL); 1637 return (-2); 1638 } 1639 1640 if (port->wwpn_iid[iid].in_use > 0 && (wwpn != 0 || name != NULL)) { 1641 /* 1642 * This is not an error yet. 1643 */ 1644 if (wwpn != 0 && wwpn == port->wwpn_iid[iid].wwpn) { 1645 #if 0 1646 printf("%s: port %d iid %u WWPN %#jx arrived" 1647 " again\n", __func__, port->targ_port, 1648 iid, (uintmax_t)wwpn); 1649 #endif 1650 goto take; 1651 } 1652 if (name != NULL && port->wwpn_iid[iid].name != NULL && 1653 strcmp(name, port->wwpn_iid[iid].name) == 0) { 1654 #if 0 1655 printf("%s: port %d iid %u name '%s' arrived" 1656 " again\n", __func__, port->targ_port, 1657 iid, name); 1658 #endif 1659 goto take; 1660 } 1661 1662 /* 1663 * This is an error, but what do we do about it? The 1664 * driver is telling us we have a new WWPN for this 1665 * initiator ID, so we pretty much need to use it. 1666 */ 1667 printf("%s: port %d iid %u WWPN %#jx '%s' arrived," 1668 " but WWPN %#jx '%s' is still at that address\n", 1669 __func__, port->targ_port, iid, wwpn, name, 1670 (uintmax_t)port->wwpn_iid[iid].wwpn, 1671 port->wwpn_iid[iid].name); 1672 1673 /* 1674 * XXX KDM clear have_ca and ua_pending on each LUN for 1675 * this initiator. 1676 */ 1677 } 1678 take: 1679 free(port->wwpn_iid[iid].name, M_CTL); 1680 port->wwpn_iid[iid].name = name; 1681 port->wwpn_iid[iid].wwpn = wwpn; 1682 port->wwpn_iid[iid].in_use++; 1683 mtx_unlock(&softc->ctl_lock); 1684 1685 return (iid); 1686 } 1687 1688 static int 1689 ctl_create_iid(struct ctl_port *port, int iid, uint8_t *buf) 1690 { 1691 int len; 1692 1693 switch (port->port_type) { 1694 case CTL_PORT_FC: 1695 { 1696 struct scsi_transportid_fcp *id = 1697 (struct scsi_transportid_fcp *)buf; 1698 if (port->wwpn_iid[iid].wwpn == 0) 1699 return (0); 1700 memset(id, 0, sizeof(*id)); 1701 id->format_protocol = SCSI_PROTO_FC; 1702 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->n_port_name); 1703 return (sizeof(*id)); 1704 } 1705 case CTL_PORT_ISCSI: 1706 { 1707 struct scsi_transportid_iscsi_port *id = 1708 (struct scsi_transportid_iscsi_port *)buf; 1709 if (port->wwpn_iid[iid].name == NULL) 1710 return (0); 1711 memset(id, 0, 256); 1712 id->format_protocol = SCSI_TRN_ISCSI_FORMAT_PORT | 1713 SCSI_PROTO_ISCSI; 1714 len = strlcpy(id->iscsi_name, port->wwpn_iid[iid].name, 252) + 1; 1715 len = roundup2(min(len, 252), 4); 1716 scsi_ulto2b(len, id->additional_length); 1717 return (sizeof(*id) + len); 1718 } 1719 case CTL_PORT_SAS: 1720 { 1721 struct scsi_transportid_sas *id = 1722 (struct scsi_transportid_sas *)buf; 1723 if (port->wwpn_iid[iid].wwpn == 0) 1724 return (0); 1725 memset(id, 0, sizeof(*id)); 1726 id->format_protocol = SCSI_PROTO_SAS; 1727 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->sas_address); 1728 return (sizeof(*id)); 1729 } 1730 default: 1731 { 1732 struct scsi_transportid_spi *id = 1733 (struct scsi_transportid_spi *)buf; 1734 memset(id, 0, sizeof(*id)); 1735 id->format_protocol = SCSI_PROTO_SPI; 1736 scsi_ulto2b(iid, id->scsi_addr); 1737 scsi_ulto2b(port->targ_port, id->rel_trgt_port_id); 1738 return (sizeof(*id)); 1739 } 1740 } 1741 } 1742 1743 /* 1744 * Serialize a command that went down the "wrong" side, and so was sent to 1745 * this controller for execution. The logic is a little different than the 1746 * standard case in ctl_scsiio_precheck(). Errors in this case need to get 1747 * sent back to the other side, but in the success case, we execute the 1748 * command on this side (XFER mode) or tell the other side to execute it 1749 * (SER_ONLY mode). 1750 */ 1751 static int 1752 ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio) 1753 { 1754 struct ctl_softc *softc; 1755 union ctl_ha_msg msg_info; 1756 struct ctl_lun *lun; 1757 const struct ctl_cmd_entry *entry; 1758 int retval = 0; 1759 uint32_t targ_lun; 1760 1761 softc = control_softc; 1762 1763 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 1764 mtx_lock(&softc->ctl_lock); 1765 if ((targ_lun < CTL_MAX_LUNS) && 1766 ((lun = softc->ctl_luns[targ_lun]) != NULL)) { 1767 mtx_lock(&lun->lun_lock); 1768 mtx_unlock(&softc->ctl_lock); 1769 /* 1770 * If the LUN is invalid, pretend that it doesn't exist. 1771 * It will go away as soon as all pending I/O has been 1772 * completed. 1773 */ 1774 if (lun->flags & CTL_LUN_DISABLED) { 1775 mtx_unlock(&lun->lun_lock); 1776 lun = NULL; 1777 } 1778 } else { 1779 mtx_unlock(&softc->ctl_lock); 1780 lun = NULL; 1781 } 1782 if (lun == NULL) { 1783 /* 1784 * The other node would not send this request to us unless 1785 * received announce that we are primary node for this LUN. 1786 * If this LUN does not exist now, it is probably result of 1787 * a race, so respond to initiator in the most opaque way. 1788 */ 1789 ctl_set_busy(ctsio); 1790 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); 1791 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1792 msg_info.hdr.serializing_sc = NULL; 1793 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1794 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1795 sizeof(msg_info.scsi), M_WAITOK); 1796 return(1); 1797 } 1798 1799 entry = ctl_get_cmd_entry(ctsio, NULL); 1800 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { 1801 mtx_unlock(&lun->lun_lock); 1802 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); 1803 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1804 msg_info.hdr.serializing_sc = NULL; 1805 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1806 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1807 sizeof(msg_info.scsi), M_WAITOK); 1808 return(1); 1809 } 1810 1811 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun; 1812 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = lun->be_lun; 1813 1814 /* 1815 * Every I/O goes into the OOA queue for a 1816 * particular LUN, and stays there until completion. 1817 */ 1818 #ifdef CTL_TIME_IO 1819 if (TAILQ_EMPTY(&lun->ooa_queue)) 1820 lun->idle_time += getsbinuptime() - lun->last_busy; 1821 #endif 1822 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1823 1824 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 1825 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq, 1826 ooa_links))) { 1827 case CTL_ACTION_BLOCK: 1828 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 1829 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 1830 blocked_links); 1831 mtx_unlock(&lun->lun_lock); 1832 break; 1833 case CTL_ACTION_PASS: 1834 case CTL_ACTION_SKIP: 1835 if (softc->ha_mode == CTL_HA_MODE_XFER) { 1836 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 1837 ctl_enqueue_rtr((union ctl_io *)ctsio); 1838 mtx_unlock(&lun->lun_lock); 1839 } else { 1840 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 1841 mtx_unlock(&lun->lun_lock); 1842 1843 /* send msg back to other side */ 1844 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1845 msg_info.hdr.serializing_sc = (union ctl_io *)ctsio; 1846 msg_info.hdr.msg_type = CTL_MSG_R2R; 1847 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1848 sizeof(msg_info.hdr), M_WAITOK); 1849 } 1850 break; 1851 case CTL_ACTION_OVERLAP: 1852 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1853 mtx_unlock(&lun->lun_lock); 1854 retval = 1; 1855 1856 ctl_set_overlapped_cmd(ctsio); 1857 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); 1858 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1859 msg_info.hdr.serializing_sc = NULL; 1860 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1861 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1862 sizeof(msg_info.scsi), M_WAITOK); 1863 break; 1864 case CTL_ACTION_OVERLAP_TAG: 1865 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1866 mtx_unlock(&lun->lun_lock); 1867 retval = 1; 1868 ctl_set_overlapped_tag(ctsio, ctsio->tag_num); 1869 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); 1870 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1871 msg_info.hdr.serializing_sc = NULL; 1872 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1873 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1874 sizeof(msg_info.scsi), M_WAITOK); 1875 break; 1876 case CTL_ACTION_ERROR: 1877 default: 1878 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1879 mtx_unlock(&lun->lun_lock); 1880 retval = 1; 1881 1882 ctl_set_internal_failure(ctsio, /*sks_valid*/ 0, 1883 /*retry_count*/ 0); 1884 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); 1885 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1886 msg_info.hdr.serializing_sc = NULL; 1887 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1888 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1889 sizeof(msg_info.scsi), M_WAITOK); 1890 break; 1891 } 1892 return (retval); 1893 } 1894 1895 /* 1896 * Returns 0 for success, errno for failure. 1897 */ 1898 static int 1899 ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 1900 struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries) 1901 { 1902 union ctl_io *io; 1903 int retval; 1904 1905 retval = 0; 1906 1907 mtx_lock(&lun->lun_lock); 1908 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); (io != NULL); 1909 (*cur_fill_num)++, io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, 1910 ooa_links)) { 1911 struct ctl_ooa_entry *entry; 1912 1913 /* 1914 * If we've got more than we can fit, just count the 1915 * remaining entries. 1916 */ 1917 if (*cur_fill_num >= ooa_hdr->alloc_num) 1918 continue; 1919 1920 entry = &kern_entries[*cur_fill_num]; 1921 1922 entry->tag_num = io->scsiio.tag_num; 1923 entry->lun_num = lun->lun; 1924 #ifdef CTL_TIME_IO 1925 entry->start_bt = io->io_hdr.start_bt; 1926 #endif 1927 bcopy(io->scsiio.cdb, entry->cdb, io->scsiio.cdb_len); 1928 entry->cdb_len = io->scsiio.cdb_len; 1929 if (io->io_hdr.flags & CTL_FLAG_BLOCKED) 1930 entry->cmd_flags |= CTL_OOACMD_FLAG_BLOCKED; 1931 1932 if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) 1933 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA; 1934 1935 if (io->io_hdr.flags & CTL_FLAG_ABORT) 1936 entry->cmd_flags |= CTL_OOACMD_FLAG_ABORT; 1937 1938 if (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR) 1939 entry->cmd_flags |= CTL_OOACMD_FLAG_RTR; 1940 1941 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) 1942 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED; 1943 } 1944 mtx_unlock(&lun->lun_lock); 1945 1946 return (retval); 1947 } 1948 1949 static void * 1950 ctl_copyin_alloc(void *user_addr, int len, char *error_str, 1951 size_t error_str_len) 1952 { 1953 void *kptr; 1954 1955 kptr = malloc(len, M_CTL, M_WAITOK | M_ZERO); 1956 1957 if (copyin(user_addr, kptr, len) != 0) { 1958 snprintf(error_str, error_str_len, "Error copying %d bytes " 1959 "from user address %p to kernel address %p", len, 1960 user_addr, kptr); 1961 free(kptr, M_CTL); 1962 return (NULL); 1963 } 1964 1965 return (kptr); 1966 } 1967 1968 static void 1969 ctl_free_args(int num_args, struct ctl_be_arg *args) 1970 { 1971 int i; 1972 1973 if (args == NULL) 1974 return; 1975 1976 for (i = 0; i < num_args; i++) { 1977 free(args[i].kname, M_CTL); 1978 free(args[i].kvalue, M_CTL); 1979 } 1980 1981 free(args, M_CTL); 1982 } 1983 1984 static struct ctl_be_arg * 1985 ctl_copyin_args(int num_args, struct ctl_be_arg *uargs, 1986 char *error_str, size_t error_str_len) 1987 { 1988 struct ctl_be_arg *args; 1989 int i; 1990 1991 args = ctl_copyin_alloc(uargs, num_args * sizeof(*args), 1992 error_str, error_str_len); 1993 1994 if (args == NULL) 1995 goto bailout; 1996 1997 for (i = 0; i < num_args; i++) { 1998 args[i].kname = NULL; 1999 args[i].kvalue = NULL; 2000 } 2001 2002 for (i = 0; i < num_args; i++) { 2003 uint8_t *tmpptr; 2004 2005 args[i].kname = ctl_copyin_alloc(args[i].name, 2006 args[i].namelen, error_str, error_str_len); 2007 if (args[i].kname == NULL) 2008 goto bailout; 2009 2010 if (args[i].kname[args[i].namelen - 1] != '\0') { 2011 snprintf(error_str, error_str_len, "Argument %d " 2012 "name is not NUL-terminated", i); 2013 goto bailout; 2014 } 2015 2016 if (args[i].flags & CTL_BEARG_RD) { 2017 tmpptr = ctl_copyin_alloc(args[i].value, 2018 args[i].vallen, error_str, error_str_len); 2019 if (tmpptr == NULL) 2020 goto bailout; 2021 if ((args[i].flags & CTL_BEARG_ASCII) 2022 && (tmpptr[args[i].vallen - 1] != '\0')) { 2023 snprintf(error_str, error_str_len, "Argument " 2024 "%d value is not NUL-terminated", i); 2025 goto bailout; 2026 } 2027 args[i].kvalue = tmpptr; 2028 } else { 2029 args[i].kvalue = malloc(args[i].vallen, 2030 M_CTL, M_WAITOK | M_ZERO); 2031 } 2032 } 2033 2034 return (args); 2035 bailout: 2036 2037 ctl_free_args(num_args, args); 2038 2039 return (NULL); 2040 } 2041 2042 static void 2043 ctl_copyout_args(int num_args, struct ctl_be_arg *args) 2044 { 2045 int i; 2046 2047 for (i = 0; i < num_args; i++) { 2048 if (args[i].flags & CTL_BEARG_WR) 2049 copyout(args[i].kvalue, args[i].value, args[i].vallen); 2050 } 2051 } 2052 2053 /* 2054 * Escape characters that are illegal or not recommended in XML. 2055 */ 2056 int 2057 ctl_sbuf_printf_esc(struct sbuf *sb, char *str, int size) 2058 { 2059 char *end = str + size; 2060 int retval; 2061 2062 retval = 0; 2063 2064 for (; *str && str < end; str++) { 2065 switch (*str) { 2066 case '&': 2067 retval = sbuf_printf(sb, "&"); 2068 break; 2069 case '>': 2070 retval = sbuf_printf(sb, ">"); 2071 break; 2072 case '<': 2073 retval = sbuf_printf(sb, "<"); 2074 break; 2075 default: 2076 retval = sbuf_putc(sb, *str); 2077 break; 2078 } 2079 2080 if (retval != 0) 2081 break; 2082 2083 } 2084 2085 return (retval); 2086 } 2087 2088 static void 2089 ctl_id_sbuf(struct ctl_devid *id, struct sbuf *sb) 2090 { 2091 struct scsi_vpd_id_descriptor *desc; 2092 int i; 2093 2094 if (id == NULL || id->len < 4) 2095 return; 2096 desc = (struct scsi_vpd_id_descriptor *)id->data; 2097 switch (desc->id_type & SVPD_ID_TYPE_MASK) { 2098 case SVPD_ID_TYPE_T10: 2099 sbuf_printf(sb, "t10."); 2100 break; 2101 case SVPD_ID_TYPE_EUI64: 2102 sbuf_printf(sb, "eui."); 2103 break; 2104 case SVPD_ID_TYPE_NAA: 2105 sbuf_printf(sb, "naa."); 2106 break; 2107 case SVPD_ID_TYPE_SCSI_NAME: 2108 break; 2109 } 2110 switch (desc->proto_codeset & SVPD_ID_CODESET_MASK) { 2111 case SVPD_ID_CODESET_BINARY: 2112 for (i = 0; i < desc->length; i++) 2113 sbuf_printf(sb, "%02x", desc->identifier[i]); 2114 break; 2115 case SVPD_ID_CODESET_ASCII: 2116 sbuf_printf(sb, "%.*s", (int)desc->length, 2117 (char *)desc->identifier); 2118 break; 2119 case SVPD_ID_CODESET_UTF8: 2120 sbuf_printf(sb, "%s", (char *)desc->identifier); 2121 break; 2122 } 2123 } 2124 2125 static int 2126 ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 2127 struct thread *td) 2128 { 2129 struct ctl_softc *softc; 2130 struct ctl_lun *lun; 2131 int retval; 2132 2133 softc = control_softc; 2134 2135 retval = 0; 2136 2137 switch (cmd) { 2138 case CTL_IO: 2139 retval = ctl_ioctl_io(dev, cmd, addr, flag, td); 2140 break; 2141 case CTL_ENABLE_PORT: 2142 case CTL_DISABLE_PORT: 2143 case CTL_SET_PORT_WWNS: { 2144 struct ctl_port *port; 2145 struct ctl_port_entry *entry; 2146 2147 entry = (struct ctl_port_entry *)addr; 2148 2149 mtx_lock(&softc->ctl_lock); 2150 STAILQ_FOREACH(port, &softc->port_list, links) { 2151 int action, done; 2152 2153 if (port->targ_port < softc->port_min || 2154 port->targ_port >= softc->port_max) 2155 continue; 2156 2157 action = 0; 2158 done = 0; 2159 if ((entry->port_type == CTL_PORT_NONE) 2160 && (entry->targ_port == port->targ_port)) { 2161 /* 2162 * If the user only wants to enable or 2163 * disable or set WWNs on a specific port, 2164 * do the operation and we're done. 2165 */ 2166 action = 1; 2167 done = 1; 2168 } else if (entry->port_type & port->port_type) { 2169 /* 2170 * Compare the user's type mask with the 2171 * particular frontend type to see if we 2172 * have a match. 2173 */ 2174 action = 1; 2175 done = 0; 2176 2177 /* 2178 * Make sure the user isn't trying to set 2179 * WWNs on multiple ports at the same time. 2180 */ 2181 if (cmd == CTL_SET_PORT_WWNS) { 2182 printf("%s: Can't set WWNs on " 2183 "multiple ports\n", __func__); 2184 retval = EINVAL; 2185 break; 2186 } 2187 } 2188 if (action == 0) 2189 continue; 2190 2191 /* 2192 * XXX KDM we have to drop the lock here, because 2193 * the online/offline operations can potentially 2194 * block. We need to reference count the frontends 2195 * so they can't go away, 2196 */ 2197 if (cmd == CTL_ENABLE_PORT) { 2198 mtx_unlock(&softc->ctl_lock); 2199 ctl_port_online(port); 2200 mtx_lock(&softc->ctl_lock); 2201 } else if (cmd == CTL_DISABLE_PORT) { 2202 mtx_unlock(&softc->ctl_lock); 2203 ctl_port_offline(port); 2204 mtx_lock(&softc->ctl_lock); 2205 } else if (cmd == CTL_SET_PORT_WWNS) { 2206 ctl_port_set_wwns(port, 2207 (entry->flags & CTL_PORT_WWNN_VALID) ? 2208 1 : 0, entry->wwnn, 2209 (entry->flags & CTL_PORT_WWPN_VALID) ? 2210 1 : 0, entry->wwpn); 2211 } 2212 if (done != 0) 2213 break; 2214 } 2215 mtx_unlock(&softc->ctl_lock); 2216 break; 2217 } 2218 case CTL_GET_PORT_LIST: { 2219 struct ctl_port *port; 2220 struct ctl_port_list *list; 2221 int i; 2222 2223 list = (struct ctl_port_list *)addr; 2224 2225 if (list->alloc_len != (list->alloc_num * 2226 sizeof(struct ctl_port_entry))) { 2227 printf("%s: CTL_GET_PORT_LIST: alloc_len %u != " 2228 "alloc_num %u * sizeof(struct ctl_port_entry) " 2229 "%zu\n", __func__, list->alloc_len, 2230 list->alloc_num, sizeof(struct ctl_port_entry)); 2231 retval = EINVAL; 2232 break; 2233 } 2234 list->fill_len = 0; 2235 list->fill_num = 0; 2236 list->dropped_num = 0; 2237 i = 0; 2238 mtx_lock(&softc->ctl_lock); 2239 STAILQ_FOREACH(port, &softc->port_list, links) { 2240 struct ctl_port_entry entry, *list_entry; 2241 2242 if (list->fill_num >= list->alloc_num) { 2243 list->dropped_num++; 2244 continue; 2245 } 2246 2247 entry.port_type = port->port_type; 2248 strlcpy(entry.port_name, port->port_name, 2249 sizeof(entry.port_name)); 2250 entry.targ_port = port->targ_port; 2251 entry.physical_port = port->physical_port; 2252 entry.virtual_port = port->virtual_port; 2253 entry.wwnn = port->wwnn; 2254 entry.wwpn = port->wwpn; 2255 if (port->status & CTL_PORT_STATUS_ONLINE) 2256 entry.online = 1; 2257 else 2258 entry.online = 0; 2259 2260 list_entry = &list->entries[i]; 2261 2262 retval = copyout(&entry, list_entry, sizeof(entry)); 2263 if (retval != 0) { 2264 printf("%s: CTL_GET_PORT_LIST: copyout " 2265 "returned %d\n", __func__, retval); 2266 break; 2267 } 2268 i++; 2269 list->fill_num++; 2270 list->fill_len += sizeof(entry); 2271 } 2272 mtx_unlock(&softc->ctl_lock); 2273 2274 /* 2275 * If this is non-zero, we had a copyout fault, so there's 2276 * probably no point in attempting to set the status inside 2277 * the structure. 2278 */ 2279 if (retval != 0) 2280 break; 2281 2282 if (list->dropped_num > 0) 2283 list->status = CTL_PORT_LIST_NEED_MORE_SPACE; 2284 else 2285 list->status = CTL_PORT_LIST_OK; 2286 break; 2287 } 2288 case CTL_DUMP_OOA: { 2289 union ctl_io *io; 2290 char printbuf[128]; 2291 struct sbuf sb; 2292 2293 mtx_lock(&softc->ctl_lock); 2294 printf("Dumping OOA queues:\n"); 2295 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2296 mtx_lock(&lun->lun_lock); 2297 for (io = (union ctl_io *)TAILQ_FIRST( 2298 &lun->ooa_queue); io != NULL; 2299 io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, 2300 ooa_links)) { 2301 sbuf_new(&sb, printbuf, sizeof(printbuf), 2302 SBUF_FIXEDLEN); 2303 sbuf_printf(&sb, "LUN %jd tag 0x%04x%s%s%s%s: ", 2304 (intmax_t)lun->lun, 2305 io->scsiio.tag_num, 2306 (io->io_hdr.flags & 2307 CTL_FLAG_BLOCKED) ? "" : " BLOCKED", 2308 (io->io_hdr.flags & 2309 CTL_FLAG_DMA_INPROG) ? " DMA" : "", 2310 (io->io_hdr.flags & 2311 CTL_FLAG_ABORT) ? " ABORT" : "", 2312 (io->io_hdr.flags & 2313 CTL_FLAG_IS_WAS_ON_RTR) ? " RTR" : ""); 2314 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 2315 sbuf_finish(&sb); 2316 printf("%s\n", sbuf_data(&sb)); 2317 } 2318 mtx_unlock(&lun->lun_lock); 2319 } 2320 printf("OOA queues dump done\n"); 2321 mtx_unlock(&softc->ctl_lock); 2322 break; 2323 } 2324 case CTL_GET_OOA: { 2325 struct ctl_ooa *ooa_hdr; 2326 struct ctl_ooa_entry *entries; 2327 uint32_t cur_fill_num; 2328 2329 ooa_hdr = (struct ctl_ooa *)addr; 2330 2331 if ((ooa_hdr->alloc_len == 0) 2332 || (ooa_hdr->alloc_num == 0)) { 2333 printf("%s: CTL_GET_OOA: alloc len %u and alloc num %u " 2334 "must be non-zero\n", __func__, 2335 ooa_hdr->alloc_len, ooa_hdr->alloc_num); 2336 retval = EINVAL; 2337 break; 2338 } 2339 2340 if (ooa_hdr->alloc_len != (ooa_hdr->alloc_num * 2341 sizeof(struct ctl_ooa_entry))) { 2342 printf("%s: CTL_GET_OOA: alloc len %u must be alloc " 2343 "num %d * sizeof(struct ctl_ooa_entry) %zd\n", 2344 __func__, ooa_hdr->alloc_len, 2345 ooa_hdr->alloc_num,sizeof(struct ctl_ooa_entry)); 2346 retval = EINVAL; 2347 break; 2348 } 2349 2350 entries = malloc(ooa_hdr->alloc_len, M_CTL, M_WAITOK | M_ZERO); 2351 if (entries == NULL) { 2352 printf("%s: could not allocate %d bytes for OOA " 2353 "dump\n", __func__, ooa_hdr->alloc_len); 2354 retval = ENOMEM; 2355 break; 2356 } 2357 2358 mtx_lock(&softc->ctl_lock); 2359 if (((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0) 2360 && ((ooa_hdr->lun_num >= CTL_MAX_LUNS) 2361 || (softc->ctl_luns[ooa_hdr->lun_num] == NULL))) { 2362 mtx_unlock(&softc->ctl_lock); 2363 free(entries, M_CTL); 2364 printf("%s: CTL_GET_OOA: invalid LUN %ju\n", 2365 __func__, (uintmax_t)ooa_hdr->lun_num); 2366 retval = EINVAL; 2367 break; 2368 } 2369 2370 cur_fill_num = 0; 2371 2372 if (ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) { 2373 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2374 retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num, 2375 ooa_hdr, entries); 2376 if (retval != 0) 2377 break; 2378 } 2379 if (retval != 0) { 2380 mtx_unlock(&softc->ctl_lock); 2381 free(entries, M_CTL); 2382 break; 2383 } 2384 } else { 2385 lun = softc->ctl_luns[ooa_hdr->lun_num]; 2386 2387 retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num,ooa_hdr, 2388 entries); 2389 } 2390 mtx_unlock(&softc->ctl_lock); 2391 2392 ooa_hdr->fill_num = min(cur_fill_num, ooa_hdr->alloc_num); 2393 ooa_hdr->fill_len = ooa_hdr->fill_num * 2394 sizeof(struct ctl_ooa_entry); 2395 retval = copyout(entries, ooa_hdr->entries, ooa_hdr->fill_len); 2396 if (retval != 0) { 2397 printf("%s: error copying out %d bytes for OOA dump\n", 2398 __func__, ooa_hdr->fill_len); 2399 } 2400 2401 getbintime(&ooa_hdr->cur_bt); 2402 2403 if (cur_fill_num > ooa_hdr->alloc_num) { 2404 ooa_hdr->dropped_num = cur_fill_num -ooa_hdr->alloc_num; 2405 ooa_hdr->status = CTL_OOA_NEED_MORE_SPACE; 2406 } else { 2407 ooa_hdr->dropped_num = 0; 2408 ooa_hdr->status = CTL_OOA_OK; 2409 } 2410 2411 free(entries, M_CTL); 2412 break; 2413 } 2414 case CTL_CHECK_OOA: { 2415 union ctl_io *io; 2416 struct ctl_ooa_info *ooa_info; 2417 2418 2419 ooa_info = (struct ctl_ooa_info *)addr; 2420 2421 if (ooa_info->lun_id >= CTL_MAX_LUNS) { 2422 ooa_info->status = CTL_OOA_INVALID_LUN; 2423 break; 2424 } 2425 mtx_lock(&softc->ctl_lock); 2426 lun = softc->ctl_luns[ooa_info->lun_id]; 2427 if (lun == NULL) { 2428 mtx_unlock(&softc->ctl_lock); 2429 ooa_info->status = CTL_OOA_INVALID_LUN; 2430 break; 2431 } 2432 mtx_lock(&lun->lun_lock); 2433 mtx_unlock(&softc->ctl_lock); 2434 ooa_info->num_entries = 0; 2435 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); 2436 io != NULL; io = (union ctl_io *)TAILQ_NEXT( 2437 &io->io_hdr, ooa_links)) { 2438 ooa_info->num_entries++; 2439 } 2440 mtx_unlock(&lun->lun_lock); 2441 2442 ooa_info->status = CTL_OOA_SUCCESS; 2443 2444 break; 2445 } 2446 case CTL_DELAY_IO: { 2447 struct ctl_io_delay_info *delay_info; 2448 2449 delay_info = (struct ctl_io_delay_info *)addr; 2450 2451 #ifdef CTL_IO_DELAY 2452 mtx_lock(&softc->ctl_lock); 2453 2454 if ((delay_info->lun_id >= CTL_MAX_LUNS) 2455 || (softc->ctl_luns[delay_info->lun_id] == NULL)) { 2456 delay_info->status = CTL_DELAY_STATUS_INVALID_LUN; 2457 } else { 2458 lun = softc->ctl_luns[delay_info->lun_id]; 2459 mtx_lock(&lun->lun_lock); 2460 2461 delay_info->status = CTL_DELAY_STATUS_OK; 2462 2463 switch (delay_info->delay_type) { 2464 case CTL_DELAY_TYPE_CONT: 2465 break; 2466 case CTL_DELAY_TYPE_ONESHOT: 2467 break; 2468 default: 2469 delay_info->status = 2470 CTL_DELAY_STATUS_INVALID_TYPE; 2471 break; 2472 } 2473 2474 switch (delay_info->delay_loc) { 2475 case CTL_DELAY_LOC_DATAMOVE: 2476 lun->delay_info.datamove_type = 2477 delay_info->delay_type; 2478 lun->delay_info.datamove_delay = 2479 delay_info->delay_secs; 2480 break; 2481 case CTL_DELAY_LOC_DONE: 2482 lun->delay_info.done_type = 2483 delay_info->delay_type; 2484 lun->delay_info.done_delay = 2485 delay_info->delay_secs; 2486 break; 2487 default: 2488 delay_info->status = 2489 CTL_DELAY_STATUS_INVALID_LOC; 2490 break; 2491 } 2492 mtx_unlock(&lun->lun_lock); 2493 } 2494 2495 mtx_unlock(&softc->ctl_lock); 2496 #else 2497 delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED; 2498 #endif /* CTL_IO_DELAY */ 2499 break; 2500 } 2501 case CTL_REALSYNC_SET: { 2502 int *syncstate; 2503 2504 syncstate = (int *)addr; 2505 2506 mtx_lock(&softc->ctl_lock); 2507 switch (*syncstate) { 2508 case 0: 2509 softc->flags &= ~CTL_FLAG_REAL_SYNC; 2510 break; 2511 case 1: 2512 softc->flags |= CTL_FLAG_REAL_SYNC; 2513 break; 2514 default: 2515 retval = EINVAL; 2516 break; 2517 } 2518 mtx_unlock(&softc->ctl_lock); 2519 break; 2520 } 2521 case CTL_REALSYNC_GET: { 2522 int *syncstate; 2523 2524 syncstate = (int*)addr; 2525 2526 mtx_lock(&softc->ctl_lock); 2527 if (softc->flags & CTL_FLAG_REAL_SYNC) 2528 *syncstate = 1; 2529 else 2530 *syncstate = 0; 2531 mtx_unlock(&softc->ctl_lock); 2532 2533 break; 2534 } 2535 case CTL_SETSYNC: 2536 case CTL_GETSYNC: { 2537 struct ctl_sync_info *sync_info; 2538 2539 sync_info = (struct ctl_sync_info *)addr; 2540 2541 mtx_lock(&softc->ctl_lock); 2542 lun = softc->ctl_luns[sync_info->lun_id]; 2543 if (lun == NULL) { 2544 mtx_unlock(&softc->ctl_lock); 2545 sync_info->status = CTL_GS_SYNC_NO_LUN; 2546 break; 2547 } 2548 /* 2549 * Get or set the sync interval. We're not bounds checking 2550 * in the set case, hopefully the user won't do something 2551 * silly. 2552 */ 2553 mtx_lock(&lun->lun_lock); 2554 mtx_unlock(&softc->ctl_lock); 2555 if (cmd == CTL_GETSYNC) 2556 sync_info->sync_interval = lun->sync_interval; 2557 else 2558 lun->sync_interval = sync_info->sync_interval; 2559 mtx_unlock(&lun->lun_lock); 2560 2561 sync_info->status = CTL_GS_SYNC_OK; 2562 2563 break; 2564 } 2565 case CTL_GETSTATS: { 2566 struct ctl_stats *stats; 2567 int i; 2568 2569 stats = (struct ctl_stats *)addr; 2570 2571 if ((sizeof(struct ctl_lun_io_stats) * softc->num_luns) > 2572 stats->alloc_len) { 2573 stats->status = CTL_SS_NEED_MORE_SPACE; 2574 stats->num_luns = softc->num_luns; 2575 break; 2576 } 2577 /* 2578 * XXX KDM no locking here. If the LUN list changes, 2579 * things can blow up. 2580 */ 2581 for (i = 0, lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; 2582 i++, lun = STAILQ_NEXT(lun, links)) { 2583 retval = copyout(&lun->stats, &stats->lun_stats[i], 2584 sizeof(lun->stats)); 2585 if (retval != 0) 2586 break; 2587 } 2588 stats->num_luns = softc->num_luns; 2589 stats->fill_len = sizeof(struct ctl_lun_io_stats) * 2590 softc->num_luns; 2591 stats->status = CTL_SS_OK; 2592 #ifdef CTL_TIME_IO 2593 stats->flags = CTL_STATS_FLAG_TIME_VALID; 2594 #else 2595 stats->flags = CTL_STATS_FLAG_NONE; 2596 #endif 2597 getnanouptime(&stats->timestamp); 2598 break; 2599 } 2600 case CTL_ERROR_INJECT: { 2601 struct ctl_error_desc *err_desc, *new_err_desc; 2602 2603 err_desc = (struct ctl_error_desc *)addr; 2604 2605 new_err_desc = malloc(sizeof(*new_err_desc), M_CTL, 2606 M_WAITOK | M_ZERO); 2607 bcopy(err_desc, new_err_desc, sizeof(*new_err_desc)); 2608 2609 mtx_lock(&softc->ctl_lock); 2610 lun = softc->ctl_luns[err_desc->lun_id]; 2611 if (lun == NULL) { 2612 mtx_unlock(&softc->ctl_lock); 2613 free(new_err_desc, M_CTL); 2614 printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n", 2615 __func__, (uintmax_t)err_desc->lun_id); 2616 retval = EINVAL; 2617 break; 2618 } 2619 mtx_lock(&lun->lun_lock); 2620 mtx_unlock(&softc->ctl_lock); 2621 2622 /* 2623 * We could do some checking here to verify the validity 2624 * of the request, but given the complexity of error 2625 * injection requests, the checking logic would be fairly 2626 * complex. 2627 * 2628 * For now, if the request is invalid, it just won't get 2629 * executed and might get deleted. 2630 */ 2631 STAILQ_INSERT_TAIL(&lun->error_list, new_err_desc, links); 2632 2633 /* 2634 * XXX KDM check to make sure the serial number is unique, 2635 * in case we somehow manage to wrap. That shouldn't 2636 * happen for a very long time, but it's the right thing to 2637 * do. 2638 */ 2639 new_err_desc->serial = lun->error_serial; 2640 err_desc->serial = lun->error_serial; 2641 lun->error_serial++; 2642 2643 mtx_unlock(&lun->lun_lock); 2644 break; 2645 } 2646 case CTL_ERROR_INJECT_DELETE: { 2647 struct ctl_error_desc *delete_desc, *desc, *desc2; 2648 int delete_done; 2649 2650 delete_desc = (struct ctl_error_desc *)addr; 2651 delete_done = 0; 2652 2653 mtx_lock(&softc->ctl_lock); 2654 lun = softc->ctl_luns[delete_desc->lun_id]; 2655 if (lun == NULL) { 2656 mtx_unlock(&softc->ctl_lock); 2657 printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n", 2658 __func__, (uintmax_t)delete_desc->lun_id); 2659 retval = EINVAL; 2660 break; 2661 } 2662 mtx_lock(&lun->lun_lock); 2663 mtx_unlock(&softc->ctl_lock); 2664 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 2665 if (desc->serial != delete_desc->serial) 2666 continue; 2667 2668 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, 2669 links); 2670 free(desc, M_CTL); 2671 delete_done = 1; 2672 } 2673 mtx_unlock(&lun->lun_lock); 2674 if (delete_done == 0) { 2675 printf("%s: CTL_ERROR_INJECT_DELETE: can't find " 2676 "error serial %ju on LUN %u\n", __func__, 2677 delete_desc->serial, delete_desc->lun_id); 2678 retval = EINVAL; 2679 break; 2680 } 2681 break; 2682 } 2683 case CTL_DUMP_STRUCTS: { 2684 int i, j, k; 2685 struct ctl_port *port; 2686 struct ctl_frontend *fe; 2687 2688 mtx_lock(&softc->ctl_lock); 2689 printf("CTL Persistent Reservation information start:\n"); 2690 for (i = 0; i < CTL_MAX_LUNS; i++) { 2691 lun = softc->ctl_luns[i]; 2692 2693 if ((lun == NULL) 2694 || ((lun->flags & CTL_LUN_DISABLED) != 0)) 2695 continue; 2696 2697 for (j = 0; j < CTL_MAX_PORTS; j++) { 2698 if (lun->pr_keys[j] == NULL) 2699 continue; 2700 for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){ 2701 if (lun->pr_keys[j][k] == 0) 2702 continue; 2703 printf(" LUN %d port %d iid %d key " 2704 "%#jx\n", i, j, k, 2705 (uintmax_t)lun->pr_keys[j][k]); 2706 } 2707 } 2708 } 2709 printf("CTL Persistent Reservation information end\n"); 2710 printf("CTL Ports:\n"); 2711 STAILQ_FOREACH(port, &softc->port_list, links) { 2712 printf(" Port %d '%s' Frontend '%s' Type %u pp %d vp %d WWNN " 2713 "%#jx WWPN %#jx\n", port->targ_port, port->port_name, 2714 port->frontend->name, port->port_type, 2715 port->physical_port, port->virtual_port, 2716 (uintmax_t)port->wwnn, (uintmax_t)port->wwpn); 2717 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 2718 if (port->wwpn_iid[j].in_use == 0 && 2719 port->wwpn_iid[j].wwpn == 0 && 2720 port->wwpn_iid[j].name == NULL) 2721 continue; 2722 2723 printf(" iid %u use %d WWPN %#jx '%s'\n", 2724 j, port->wwpn_iid[j].in_use, 2725 (uintmax_t)port->wwpn_iid[j].wwpn, 2726 port->wwpn_iid[j].name); 2727 } 2728 } 2729 printf("CTL Port information end\n"); 2730 mtx_unlock(&softc->ctl_lock); 2731 /* 2732 * XXX KDM calling this without a lock. We'd likely want 2733 * to drop the lock before calling the frontend's dump 2734 * routine anyway. 2735 */ 2736 printf("CTL Frontends:\n"); 2737 STAILQ_FOREACH(fe, &softc->fe_list, links) { 2738 printf(" Frontend '%s'\n", fe->name); 2739 if (fe->fe_dump != NULL) 2740 fe->fe_dump(); 2741 } 2742 printf("CTL Frontend information end\n"); 2743 break; 2744 } 2745 case CTL_LUN_REQ: { 2746 struct ctl_lun_req *lun_req; 2747 struct ctl_backend_driver *backend; 2748 2749 lun_req = (struct ctl_lun_req *)addr; 2750 2751 backend = ctl_backend_find(lun_req->backend); 2752 if (backend == NULL) { 2753 lun_req->status = CTL_LUN_ERROR; 2754 snprintf(lun_req->error_str, 2755 sizeof(lun_req->error_str), 2756 "Backend \"%s\" not found.", 2757 lun_req->backend); 2758 break; 2759 } 2760 if (lun_req->num_be_args > 0) { 2761 lun_req->kern_be_args = ctl_copyin_args( 2762 lun_req->num_be_args, 2763 lun_req->be_args, 2764 lun_req->error_str, 2765 sizeof(lun_req->error_str)); 2766 if (lun_req->kern_be_args == NULL) { 2767 lun_req->status = CTL_LUN_ERROR; 2768 break; 2769 } 2770 } 2771 2772 retval = backend->ioctl(dev, cmd, addr, flag, td); 2773 2774 if (lun_req->num_be_args > 0) { 2775 ctl_copyout_args(lun_req->num_be_args, 2776 lun_req->kern_be_args); 2777 ctl_free_args(lun_req->num_be_args, 2778 lun_req->kern_be_args); 2779 } 2780 break; 2781 } 2782 case CTL_LUN_LIST: { 2783 struct sbuf *sb; 2784 struct ctl_lun_list *list; 2785 struct ctl_option *opt; 2786 2787 list = (struct ctl_lun_list *)addr; 2788 2789 /* 2790 * Allocate a fixed length sbuf here, based on the length 2791 * of the user's buffer. We could allocate an auto-extending 2792 * buffer, and then tell the user how much larger our 2793 * amount of data is than his buffer, but that presents 2794 * some problems: 2795 * 2796 * 1. The sbuf(9) routines use a blocking malloc, and so 2797 * we can't hold a lock while calling them with an 2798 * auto-extending buffer. 2799 * 2800 * 2. There is not currently a LUN reference counting 2801 * mechanism, outside of outstanding transactions on 2802 * the LUN's OOA queue. So a LUN could go away on us 2803 * while we're getting the LUN number, backend-specific 2804 * information, etc. Thus, given the way things 2805 * currently work, we need to hold the CTL lock while 2806 * grabbing LUN information. 2807 * 2808 * So, from the user's standpoint, the best thing to do is 2809 * allocate what he thinks is a reasonable buffer length, 2810 * and then if he gets a CTL_LUN_LIST_NEED_MORE_SPACE error, 2811 * double the buffer length and try again. (And repeat 2812 * that until he succeeds.) 2813 */ 2814 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 2815 if (sb == NULL) { 2816 list->status = CTL_LUN_LIST_ERROR; 2817 snprintf(list->error_str, sizeof(list->error_str), 2818 "Unable to allocate %d bytes for LUN list", 2819 list->alloc_len); 2820 break; 2821 } 2822 2823 sbuf_printf(sb, "<ctllunlist>\n"); 2824 2825 mtx_lock(&softc->ctl_lock); 2826 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2827 mtx_lock(&lun->lun_lock); 2828 retval = sbuf_printf(sb, "<lun id=\"%ju\">\n", 2829 (uintmax_t)lun->lun); 2830 2831 /* 2832 * Bail out as soon as we see that we've overfilled 2833 * the buffer. 2834 */ 2835 if (retval != 0) 2836 break; 2837 2838 retval = sbuf_printf(sb, "\t<backend_type>%s" 2839 "</backend_type>\n", 2840 (lun->backend == NULL) ? "none" : 2841 lun->backend->name); 2842 2843 if (retval != 0) 2844 break; 2845 2846 retval = sbuf_printf(sb, "\t<lun_type>%d</lun_type>\n", 2847 lun->be_lun->lun_type); 2848 2849 if (retval != 0) 2850 break; 2851 2852 if (lun->backend == NULL) { 2853 retval = sbuf_printf(sb, "</lun>\n"); 2854 if (retval != 0) 2855 break; 2856 continue; 2857 } 2858 2859 retval = sbuf_printf(sb, "\t<size>%ju</size>\n", 2860 (lun->be_lun->maxlba > 0) ? 2861 lun->be_lun->maxlba + 1 : 0); 2862 2863 if (retval != 0) 2864 break; 2865 2866 retval = sbuf_printf(sb, "\t<blocksize>%u</blocksize>\n", 2867 lun->be_lun->blocksize); 2868 2869 if (retval != 0) 2870 break; 2871 2872 retval = sbuf_printf(sb, "\t<serial_number>"); 2873 2874 if (retval != 0) 2875 break; 2876 2877 retval = ctl_sbuf_printf_esc(sb, 2878 lun->be_lun->serial_num, 2879 sizeof(lun->be_lun->serial_num)); 2880 2881 if (retval != 0) 2882 break; 2883 2884 retval = sbuf_printf(sb, "</serial_number>\n"); 2885 2886 if (retval != 0) 2887 break; 2888 2889 retval = sbuf_printf(sb, "\t<device_id>"); 2890 2891 if (retval != 0) 2892 break; 2893 2894 retval = ctl_sbuf_printf_esc(sb, 2895 lun->be_lun->device_id, 2896 sizeof(lun->be_lun->device_id)); 2897 2898 if (retval != 0) 2899 break; 2900 2901 retval = sbuf_printf(sb, "</device_id>\n"); 2902 2903 if (retval != 0) 2904 break; 2905 2906 if (lun->backend->lun_info != NULL) { 2907 retval = lun->backend->lun_info(lun->be_lun->be_lun, sb); 2908 if (retval != 0) 2909 break; 2910 } 2911 STAILQ_FOREACH(opt, &lun->be_lun->options, links) { 2912 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n", 2913 opt->name, opt->value, opt->name); 2914 if (retval != 0) 2915 break; 2916 } 2917 2918 retval = sbuf_printf(sb, "</lun>\n"); 2919 2920 if (retval != 0) 2921 break; 2922 mtx_unlock(&lun->lun_lock); 2923 } 2924 if (lun != NULL) 2925 mtx_unlock(&lun->lun_lock); 2926 mtx_unlock(&softc->ctl_lock); 2927 2928 if ((retval != 0) 2929 || ((retval = sbuf_printf(sb, "</ctllunlist>\n")) != 0)) { 2930 retval = 0; 2931 sbuf_delete(sb); 2932 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 2933 snprintf(list->error_str, sizeof(list->error_str), 2934 "Out of space, %d bytes is too small", 2935 list->alloc_len); 2936 break; 2937 } 2938 2939 sbuf_finish(sb); 2940 2941 retval = copyout(sbuf_data(sb), list->lun_xml, 2942 sbuf_len(sb) + 1); 2943 2944 list->fill_len = sbuf_len(sb) + 1; 2945 list->status = CTL_LUN_LIST_OK; 2946 sbuf_delete(sb); 2947 break; 2948 } 2949 case CTL_ISCSI: { 2950 struct ctl_iscsi *ci; 2951 struct ctl_frontend *fe; 2952 2953 ci = (struct ctl_iscsi *)addr; 2954 2955 fe = ctl_frontend_find("iscsi"); 2956 if (fe == NULL) { 2957 ci->status = CTL_ISCSI_ERROR; 2958 snprintf(ci->error_str, sizeof(ci->error_str), 2959 "Frontend \"iscsi\" not found."); 2960 break; 2961 } 2962 2963 retval = fe->ioctl(dev, cmd, addr, flag, td); 2964 break; 2965 } 2966 case CTL_PORT_REQ: { 2967 struct ctl_req *req; 2968 struct ctl_frontend *fe; 2969 2970 req = (struct ctl_req *)addr; 2971 2972 fe = ctl_frontend_find(req->driver); 2973 if (fe == NULL) { 2974 req->status = CTL_LUN_ERROR; 2975 snprintf(req->error_str, sizeof(req->error_str), 2976 "Frontend \"%s\" not found.", req->driver); 2977 break; 2978 } 2979 if (req->num_args > 0) { 2980 req->kern_args = ctl_copyin_args(req->num_args, 2981 req->args, req->error_str, sizeof(req->error_str)); 2982 if (req->kern_args == NULL) { 2983 req->status = CTL_LUN_ERROR; 2984 break; 2985 } 2986 } 2987 2988 if (fe->ioctl) 2989 retval = fe->ioctl(dev, cmd, addr, flag, td); 2990 else 2991 retval = ENODEV; 2992 2993 if (req->num_args > 0) { 2994 ctl_copyout_args(req->num_args, req->kern_args); 2995 ctl_free_args(req->num_args, req->kern_args); 2996 } 2997 break; 2998 } 2999 case CTL_PORT_LIST: { 3000 struct sbuf *sb; 3001 struct ctl_port *port; 3002 struct ctl_lun_list *list; 3003 struct ctl_option *opt; 3004 int j; 3005 uint32_t plun; 3006 3007 list = (struct ctl_lun_list *)addr; 3008 3009 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 3010 if (sb == NULL) { 3011 list->status = CTL_LUN_LIST_ERROR; 3012 snprintf(list->error_str, sizeof(list->error_str), 3013 "Unable to allocate %d bytes for LUN list", 3014 list->alloc_len); 3015 break; 3016 } 3017 3018 sbuf_printf(sb, "<ctlportlist>\n"); 3019 3020 mtx_lock(&softc->ctl_lock); 3021 STAILQ_FOREACH(port, &softc->port_list, links) { 3022 retval = sbuf_printf(sb, "<targ_port id=\"%ju\">\n", 3023 (uintmax_t)port->targ_port); 3024 3025 /* 3026 * Bail out as soon as we see that we've overfilled 3027 * the buffer. 3028 */ 3029 if (retval != 0) 3030 break; 3031 3032 retval = sbuf_printf(sb, "\t<frontend_type>%s" 3033 "</frontend_type>\n", port->frontend->name); 3034 if (retval != 0) 3035 break; 3036 3037 retval = sbuf_printf(sb, "\t<port_type>%d</port_type>\n", 3038 port->port_type); 3039 if (retval != 0) 3040 break; 3041 3042 retval = sbuf_printf(sb, "\t<online>%s</online>\n", 3043 (port->status & CTL_PORT_STATUS_ONLINE) ? "YES" : "NO"); 3044 if (retval != 0) 3045 break; 3046 3047 retval = sbuf_printf(sb, "\t<port_name>%s</port_name>\n", 3048 port->port_name); 3049 if (retval != 0) 3050 break; 3051 3052 retval = sbuf_printf(sb, "\t<physical_port>%d</physical_port>\n", 3053 port->physical_port); 3054 if (retval != 0) 3055 break; 3056 3057 retval = sbuf_printf(sb, "\t<virtual_port>%d</virtual_port>\n", 3058 port->virtual_port); 3059 if (retval != 0) 3060 break; 3061 3062 if (port->target_devid != NULL) { 3063 sbuf_printf(sb, "\t<target>"); 3064 ctl_id_sbuf(port->target_devid, sb); 3065 sbuf_printf(sb, "</target>\n"); 3066 } 3067 3068 if (port->port_devid != NULL) { 3069 sbuf_printf(sb, "\t<port>"); 3070 ctl_id_sbuf(port->port_devid, sb); 3071 sbuf_printf(sb, "</port>\n"); 3072 } 3073 3074 if (port->port_info != NULL) { 3075 retval = port->port_info(port->onoff_arg, sb); 3076 if (retval != 0) 3077 break; 3078 } 3079 STAILQ_FOREACH(opt, &port->options, links) { 3080 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n", 3081 opt->name, opt->value, opt->name); 3082 if (retval != 0) 3083 break; 3084 } 3085 3086 if (port->lun_map != NULL) { 3087 sbuf_printf(sb, "\t<lun_map>on</lun_map>\n"); 3088 for (j = 0; j < CTL_MAX_LUNS; j++) { 3089 plun = ctl_lun_map_from_port(port, j); 3090 if (plun >= CTL_MAX_LUNS) 3091 continue; 3092 sbuf_printf(sb, 3093 "\t<lun id=\"%u\">%u</lun>\n", 3094 j, plun); 3095 } 3096 } 3097 3098 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 3099 if (port->wwpn_iid[j].in_use == 0 || 3100 (port->wwpn_iid[j].wwpn == 0 && 3101 port->wwpn_iid[j].name == NULL)) 3102 continue; 3103 3104 if (port->wwpn_iid[j].name != NULL) 3105 retval = sbuf_printf(sb, 3106 "\t<initiator id=\"%u\">%s</initiator>\n", 3107 j, port->wwpn_iid[j].name); 3108 else 3109 retval = sbuf_printf(sb, 3110 "\t<initiator id=\"%u\">naa.%08jx</initiator>\n", 3111 j, port->wwpn_iid[j].wwpn); 3112 if (retval != 0) 3113 break; 3114 } 3115 if (retval != 0) 3116 break; 3117 3118 retval = sbuf_printf(sb, "</targ_port>\n"); 3119 if (retval != 0) 3120 break; 3121 } 3122 mtx_unlock(&softc->ctl_lock); 3123 3124 if ((retval != 0) 3125 || ((retval = sbuf_printf(sb, "</ctlportlist>\n")) != 0)) { 3126 retval = 0; 3127 sbuf_delete(sb); 3128 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3129 snprintf(list->error_str, sizeof(list->error_str), 3130 "Out of space, %d bytes is too small", 3131 list->alloc_len); 3132 break; 3133 } 3134 3135 sbuf_finish(sb); 3136 3137 retval = copyout(sbuf_data(sb), list->lun_xml, 3138 sbuf_len(sb) + 1); 3139 3140 list->fill_len = sbuf_len(sb) + 1; 3141 list->status = CTL_LUN_LIST_OK; 3142 sbuf_delete(sb); 3143 break; 3144 } 3145 case CTL_LUN_MAP: { 3146 struct ctl_lun_map *lm = (struct ctl_lun_map *)addr; 3147 struct ctl_port *port; 3148 3149 mtx_lock(&softc->ctl_lock); 3150 if (lm->port < softc->port_min || 3151 lm->port >= softc->port_max || 3152 (port = softc->ctl_ports[lm->port]) == NULL) { 3153 mtx_unlock(&softc->ctl_lock); 3154 return (ENXIO); 3155 } 3156 if (port->status & CTL_PORT_STATUS_ONLINE) { 3157 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3158 if (ctl_lun_map_to_port(port, lun->lun) >= 3159 CTL_MAX_LUNS) 3160 continue; 3161 mtx_lock(&lun->lun_lock); 3162 ctl_est_ua_port(lun, lm->port, -1, 3163 CTL_UA_LUN_CHANGE); 3164 mtx_unlock(&lun->lun_lock); 3165 } 3166 } 3167 mtx_unlock(&softc->ctl_lock); // XXX: port_enable sleeps 3168 if (lm->plun < CTL_MAX_LUNS) { 3169 if (lm->lun == UINT32_MAX) 3170 retval = ctl_lun_map_unset(port, lm->plun); 3171 else if (lm->lun < CTL_MAX_LUNS && 3172 softc->ctl_luns[lm->lun] != NULL) 3173 retval = ctl_lun_map_set(port, lm->plun, lm->lun); 3174 else 3175 return (ENXIO); 3176 } else if (lm->plun == UINT32_MAX) { 3177 if (lm->lun == UINT32_MAX) 3178 retval = ctl_lun_map_deinit(port); 3179 else 3180 retval = ctl_lun_map_init(port); 3181 } else 3182 return (ENXIO); 3183 if (port->status & CTL_PORT_STATUS_ONLINE) 3184 ctl_isc_announce_port(port); 3185 break; 3186 } 3187 default: { 3188 /* XXX KDM should we fix this? */ 3189 #if 0 3190 struct ctl_backend_driver *backend; 3191 unsigned int type; 3192 int found; 3193 3194 found = 0; 3195 3196 /* 3197 * We encode the backend type as the ioctl type for backend 3198 * ioctls. So parse it out here, and then search for a 3199 * backend of this type. 3200 */ 3201 type = _IOC_TYPE(cmd); 3202 3203 STAILQ_FOREACH(backend, &softc->be_list, links) { 3204 if (backend->type == type) { 3205 found = 1; 3206 break; 3207 } 3208 } 3209 if (found == 0) { 3210 printf("ctl: unknown ioctl command %#lx or backend " 3211 "%d\n", cmd, type); 3212 retval = EINVAL; 3213 break; 3214 } 3215 retval = backend->ioctl(dev, cmd, addr, flag, td); 3216 #endif 3217 retval = ENOTTY; 3218 break; 3219 } 3220 } 3221 return (retval); 3222 } 3223 3224 uint32_t 3225 ctl_get_initindex(struct ctl_nexus *nexus) 3226 { 3227 return (nexus->initid + (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); 3228 } 3229 3230 int 3231 ctl_lun_map_init(struct ctl_port *port) 3232 { 3233 struct ctl_softc *softc = control_softc; 3234 struct ctl_lun *lun; 3235 uint32_t i; 3236 3237 if (port->lun_map == NULL) 3238 port->lun_map = malloc(sizeof(uint32_t) * CTL_MAX_LUNS, 3239 M_CTL, M_NOWAIT); 3240 if (port->lun_map == NULL) 3241 return (ENOMEM); 3242 for (i = 0; i < CTL_MAX_LUNS; i++) 3243 port->lun_map[i] = UINT32_MAX; 3244 if (port->status & CTL_PORT_STATUS_ONLINE) { 3245 if (port->lun_disable != NULL) { 3246 STAILQ_FOREACH(lun, &softc->lun_list, links) 3247 port->lun_disable(port->targ_lun_arg, lun->lun); 3248 } 3249 ctl_isc_announce_port(port); 3250 } 3251 return (0); 3252 } 3253 3254 int 3255 ctl_lun_map_deinit(struct ctl_port *port) 3256 { 3257 struct ctl_softc *softc = control_softc; 3258 struct ctl_lun *lun; 3259 3260 if (port->lun_map == NULL) 3261 return (0); 3262 free(port->lun_map, M_CTL); 3263 port->lun_map = NULL; 3264 if (port->status & CTL_PORT_STATUS_ONLINE) { 3265 if (port->lun_enable != NULL) { 3266 STAILQ_FOREACH(lun, &softc->lun_list, links) 3267 port->lun_enable(port->targ_lun_arg, lun->lun); 3268 } 3269 ctl_isc_announce_port(port); 3270 } 3271 return (0); 3272 } 3273 3274 int 3275 ctl_lun_map_set(struct ctl_port *port, uint32_t plun, uint32_t glun) 3276 { 3277 int status; 3278 uint32_t old; 3279 3280 if (port->lun_map == NULL) { 3281 status = ctl_lun_map_init(port); 3282 if (status != 0) 3283 return (status); 3284 } 3285 old = port->lun_map[plun]; 3286 port->lun_map[plun] = glun; 3287 if ((port->status & CTL_PORT_STATUS_ONLINE) && old >= CTL_MAX_LUNS) { 3288 if (port->lun_enable != NULL) 3289 port->lun_enable(port->targ_lun_arg, plun); 3290 ctl_isc_announce_port(port); 3291 } 3292 return (0); 3293 } 3294 3295 int 3296 ctl_lun_map_unset(struct ctl_port *port, uint32_t plun) 3297 { 3298 uint32_t old; 3299 3300 if (port->lun_map == NULL) 3301 return (0); 3302 old = port->lun_map[plun]; 3303 port->lun_map[plun] = UINT32_MAX; 3304 if ((port->status & CTL_PORT_STATUS_ONLINE) && old < CTL_MAX_LUNS) { 3305 if (port->lun_disable != NULL) 3306 port->lun_disable(port->targ_lun_arg, plun); 3307 ctl_isc_announce_port(port); 3308 } 3309 return (0); 3310 } 3311 3312 uint32_t 3313 ctl_lun_map_from_port(struct ctl_port *port, uint32_t lun_id) 3314 { 3315 3316 if (port == NULL) 3317 return (UINT32_MAX); 3318 if (port->lun_map == NULL || lun_id >= CTL_MAX_LUNS) 3319 return (lun_id); 3320 return (port->lun_map[lun_id]); 3321 } 3322 3323 uint32_t 3324 ctl_lun_map_to_port(struct ctl_port *port, uint32_t lun_id) 3325 { 3326 uint32_t i; 3327 3328 if (port == NULL) 3329 return (UINT32_MAX); 3330 if (port->lun_map == NULL) 3331 return (lun_id); 3332 for (i = 0; i < CTL_MAX_LUNS; i++) { 3333 if (port->lun_map[i] == lun_id) 3334 return (i); 3335 } 3336 return (UINT32_MAX); 3337 } 3338 3339 static struct ctl_port * 3340 ctl_io_port(struct ctl_io_hdr *io_hdr) 3341 { 3342 3343 return (control_softc->ctl_ports[io_hdr->nexus.targ_port]); 3344 } 3345 3346 int 3347 ctl_ffz(uint32_t *mask, uint32_t first, uint32_t last) 3348 { 3349 int i; 3350 3351 for (i = first; i < last; i++) { 3352 if ((mask[i / 32] & (1 << (i % 32))) == 0) 3353 return (i); 3354 } 3355 return (-1); 3356 } 3357 3358 int 3359 ctl_set_mask(uint32_t *mask, uint32_t bit) 3360 { 3361 uint32_t chunk, piece; 3362 3363 chunk = bit >> 5; 3364 piece = bit % (sizeof(uint32_t) * 8); 3365 3366 if ((mask[chunk] & (1 << piece)) != 0) 3367 return (-1); 3368 else 3369 mask[chunk] |= (1 << piece); 3370 3371 return (0); 3372 } 3373 3374 int 3375 ctl_clear_mask(uint32_t *mask, uint32_t bit) 3376 { 3377 uint32_t chunk, piece; 3378 3379 chunk = bit >> 5; 3380 piece = bit % (sizeof(uint32_t) * 8); 3381 3382 if ((mask[chunk] & (1 << piece)) == 0) 3383 return (-1); 3384 else 3385 mask[chunk] &= ~(1 << piece); 3386 3387 return (0); 3388 } 3389 3390 int 3391 ctl_is_set(uint32_t *mask, uint32_t bit) 3392 { 3393 uint32_t chunk, piece; 3394 3395 chunk = bit >> 5; 3396 piece = bit % (sizeof(uint32_t) * 8); 3397 3398 if ((mask[chunk] & (1 << piece)) == 0) 3399 return (0); 3400 else 3401 return (1); 3402 } 3403 3404 static uint64_t 3405 ctl_get_prkey(struct ctl_lun *lun, uint32_t residx) 3406 { 3407 uint64_t *t; 3408 3409 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3410 if (t == NULL) 3411 return (0); 3412 return (t[residx % CTL_MAX_INIT_PER_PORT]); 3413 } 3414 3415 static void 3416 ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx) 3417 { 3418 uint64_t *t; 3419 3420 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3421 if (t == NULL) 3422 return; 3423 t[residx % CTL_MAX_INIT_PER_PORT] = 0; 3424 } 3425 3426 static void 3427 ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx) 3428 { 3429 uint64_t *p; 3430 u_int i; 3431 3432 i = residx/CTL_MAX_INIT_PER_PORT; 3433 if (lun->pr_keys[i] != NULL) 3434 return; 3435 mtx_unlock(&lun->lun_lock); 3436 p = malloc(sizeof(uint64_t) * CTL_MAX_INIT_PER_PORT, M_CTL, 3437 M_WAITOK | M_ZERO); 3438 mtx_lock(&lun->lun_lock); 3439 if (lun->pr_keys[i] == NULL) 3440 lun->pr_keys[i] = p; 3441 else 3442 free(p, M_CTL); 3443 } 3444 3445 static void 3446 ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key) 3447 { 3448 uint64_t *t; 3449 3450 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3451 KASSERT(t != NULL, ("prkey %d is not allocated", residx)); 3452 t[residx % CTL_MAX_INIT_PER_PORT] = key; 3453 } 3454 3455 /* 3456 * ctl_softc, pool_name, total_ctl_io are passed in. 3457 * npool is passed out. 3458 */ 3459 int 3460 ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name, 3461 uint32_t total_ctl_io, void **npool) 3462 { 3463 #ifdef IO_POOLS 3464 struct ctl_io_pool *pool; 3465 3466 pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL, 3467 M_NOWAIT | M_ZERO); 3468 if (pool == NULL) 3469 return (ENOMEM); 3470 3471 snprintf(pool->name, sizeof(pool->name), "CTL IO %s", pool_name); 3472 pool->ctl_softc = ctl_softc; 3473 pool->zone = uma_zsecond_create(pool->name, NULL, 3474 NULL, NULL, NULL, ctl_softc->io_zone); 3475 /* uma_prealloc(pool->zone, total_ctl_io); */ 3476 3477 *npool = pool; 3478 #else 3479 *npool = ctl_softc->io_zone; 3480 #endif 3481 return (0); 3482 } 3483 3484 void 3485 ctl_pool_free(struct ctl_io_pool *pool) 3486 { 3487 3488 if (pool == NULL) 3489 return; 3490 3491 #ifdef IO_POOLS 3492 uma_zdestroy(pool->zone); 3493 free(pool, M_CTL); 3494 #endif 3495 } 3496 3497 union ctl_io * 3498 ctl_alloc_io(void *pool_ref) 3499 { 3500 union ctl_io *io; 3501 #ifdef IO_POOLS 3502 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3503 3504 io = uma_zalloc(pool->zone, M_WAITOK); 3505 #else 3506 io = uma_zalloc((uma_zone_t)pool_ref, M_WAITOK); 3507 #endif 3508 if (io != NULL) 3509 io->io_hdr.pool = pool_ref; 3510 return (io); 3511 } 3512 3513 union ctl_io * 3514 ctl_alloc_io_nowait(void *pool_ref) 3515 { 3516 union ctl_io *io; 3517 #ifdef IO_POOLS 3518 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3519 3520 io = uma_zalloc(pool->zone, M_NOWAIT); 3521 #else 3522 io = uma_zalloc((uma_zone_t)pool_ref, M_NOWAIT); 3523 #endif 3524 if (io != NULL) 3525 io->io_hdr.pool = pool_ref; 3526 return (io); 3527 } 3528 3529 void 3530 ctl_free_io(union ctl_io *io) 3531 { 3532 #ifdef IO_POOLS 3533 struct ctl_io_pool *pool; 3534 #endif 3535 3536 if (io == NULL) 3537 return; 3538 3539 #ifdef IO_POOLS 3540 pool = (struct ctl_io_pool *)io->io_hdr.pool; 3541 uma_zfree(pool->zone, io); 3542 #else 3543 uma_zfree((uma_zone_t)io->io_hdr.pool, io); 3544 #endif 3545 } 3546 3547 void 3548 ctl_zero_io(union ctl_io *io) 3549 { 3550 void *pool_ref; 3551 3552 if (io == NULL) 3553 return; 3554 3555 /* 3556 * May need to preserve linked list pointers at some point too. 3557 */ 3558 pool_ref = io->io_hdr.pool; 3559 memset(io, 0, sizeof(*io)); 3560 io->io_hdr.pool = pool_ref; 3561 } 3562 3563 /* 3564 * This routine is currently used for internal copies of ctl_ios that need 3565 * to persist for some reason after we've already returned status to the 3566 * FETD. (Thus the flag set.) 3567 * 3568 * XXX XXX 3569 * Note that this makes a blind copy of all fields in the ctl_io, except 3570 * for the pool reference. This includes any memory that has been 3571 * allocated! That memory will no longer be valid after done has been 3572 * called, so this would be VERY DANGEROUS for command that actually does 3573 * any reads or writes. Right now (11/7/2005), this is only used for immediate 3574 * start and stop commands, which don't transfer any data, so this is not a 3575 * problem. If it is used for anything else, the caller would also need to 3576 * allocate data buffer space and this routine would need to be modified to 3577 * copy the data buffer(s) as well. 3578 */ 3579 void 3580 ctl_copy_io(union ctl_io *src, union ctl_io *dest) 3581 { 3582 void *pool_ref; 3583 3584 if ((src == NULL) 3585 || (dest == NULL)) 3586 return; 3587 3588 /* 3589 * May need to preserve linked list pointers at some point too. 3590 */ 3591 pool_ref = dest->io_hdr.pool; 3592 3593 memcpy(dest, src, MIN(sizeof(*src), sizeof(*dest))); 3594 3595 dest->io_hdr.pool = pool_ref; 3596 /* 3597 * We need to know that this is an internal copy, and doesn't need 3598 * to get passed back to the FETD that allocated it. 3599 */ 3600 dest->io_hdr.flags |= CTL_FLAG_INT_COPY; 3601 } 3602 3603 int 3604 ctl_expand_number(const char *buf, uint64_t *num) 3605 { 3606 char *endptr; 3607 uint64_t number; 3608 unsigned shift; 3609 3610 number = strtoq(buf, &endptr, 0); 3611 3612 switch (tolower((unsigned char)*endptr)) { 3613 case 'e': 3614 shift = 60; 3615 break; 3616 case 'p': 3617 shift = 50; 3618 break; 3619 case 't': 3620 shift = 40; 3621 break; 3622 case 'g': 3623 shift = 30; 3624 break; 3625 case 'm': 3626 shift = 20; 3627 break; 3628 case 'k': 3629 shift = 10; 3630 break; 3631 case 'b': 3632 case '\0': /* No unit. */ 3633 *num = number; 3634 return (0); 3635 default: 3636 /* Unrecognized unit. */ 3637 return (-1); 3638 } 3639 3640 if ((number << shift) >> shift != number) { 3641 /* Overflow */ 3642 return (-1); 3643 } 3644 *num = number << shift; 3645 return (0); 3646 } 3647 3648 3649 /* 3650 * This routine could be used in the future to load default and/or saved 3651 * mode page parameters for a particuar lun. 3652 */ 3653 static int 3654 ctl_init_page_index(struct ctl_lun *lun) 3655 { 3656 int i; 3657 struct ctl_page_index *page_index; 3658 const char *value; 3659 uint64_t ival; 3660 3661 memcpy(&lun->mode_pages.index, page_index_template, 3662 sizeof(page_index_template)); 3663 3664 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 3665 3666 page_index = &lun->mode_pages.index[i]; 3667 /* 3668 * If this is a disk-only mode page, there's no point in 3669 * setting it up. For some pages, we have to have some 3670 * basic information about the disk in order to calculate the 3671 * mode page data. 3672 */ 3673 if ((lun->be_lun->lun_type != T_DIRECT) 3674 && (page_index->page_flags & CTL_PAGE_FLAG_DISK_ONLY)) 3675 continue; 3676 3677 switch (page_index->page_code & SMPH_PC_MASK) { 3678 case SMS_RW_ERROR_RECOVERY_PAGE: { 3679 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3680 panic("subpage is incorrect!"); 3681 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT], 3682 &rw_er_page_default, 3683 sizeof(rw_er_page_default)); 3684 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CHANGEABLE], 3685 &rw_er_page_changeable, 3686 sizeof(rw_er_page_changeable)); 3687 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_DEFAULT], 3688 &rw_er_page_default, 3689 sizeof(rw_er_page_default)); 3690 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_SAVED], 3691 &rw_er_page_default, 3692 sizeof(rw_er_page_default)); 3693 page_index->page_data = 3694 (uint8_t *)lun->mode_pages.rw_er_page; 3695 break; 3696 } 3697 case SMS_FORMAT_DEVICE_PAGE: { 3698 struct scsi_format_page *format_page; 3699 3700 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3701 panic("subpage is incorrect!"); 3702 3703 /* 3704 * Sectors per track are set above. Bytes per 3705 * sector need to be set here on a per-LUN basis. 3706 */ 3707 memcpy(&lun->mode_pages.format_page[CTL_PAGE_CURRENT], 3708 &format_page_default, 3709 sizeof(format_page_default)); 3710 memcpy(&lun->mode_pages.format_page[ 3711 CTL_PAGE_CHANGEABLE], &format_page_changeable, 3712 sizeof(format_page_changeable)); 3713 memcpy(&lun->mode_pages.format_page[CTL_PAGE_DEFAULT], 3714 &format_page_default, 3715 sizeof(format_page_default)); 3716 memcpy(&lun->mode_pages.format_page[CTL_PAGE_SAVED], 3717 &format_page_default, 3718 sizeof(format_page_default)); 3719 3720 format_page = &lun->mode_pages.format_page[ 3721 CTL_PAGE_CURRENT]; 3722 scsi_ulto2b(lun->be_lun->blocksize, 3723 format_page->bytes_per_sector); 3724 3725 format_page = &lun->mode_pages.format_page[ 3726 CTL_PAGE_DEFAULT]; 3727 scsi_ulto2b(lun->be_lun->blocksize, 3728 format_page->bytes_per_sector); 3729 3730 format_page = &lun->mode_pages.format_page[ 3731 CTL_PAGE_SAVED]; 3732 scsi_ulto2b(lun->be_lun->blocksize, 3733 format_page->bytes_per_sector); 3734 3735 page_index->page_data = 3736 (uint8_t *)lun->mode_pages.format_page; 3737 break; 3738 } 3739 case SMS_RIGID_DISK_PAGE: { 3740 struct scsi_rigid_disk_page *rigid_disk_page; 3741 uint32_t sectors_per_cylinder; 3742 uint64_t cylinders; 3743 #ifndef __XSCALE__ 3744 int shift; 3745 #endif /* !__XSCALE__ */ 3746 3747 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3748 panic("invalid subpage value %d", 3749 page_index->subpage); 3750 3751 /* 3752 * Rotation rate and sectors per track are set 3753 * above. We calculate the cylinders here based on 3754 * capacity. Due to the number of heads and 3755 * sectors per track we're using, smaller arrays 3756 * may turn out to have 0 cylinders. Linux and 3757 * FreeBSD don't pay attention to these mode pages 3758 * to figure out capacity, but Solaris does. It 3759 * seems to deal with 0 cylinders just fine, and 3760 * works out a fake geometry based on the capacity. 3761 */ 3762 memcpy(&lun->mode_pages.rigid_disk_page[ 3763 CTL_PAGE_DEFAULT], &rigid_disk_page_default, 3764 sizeof(rigid_disk_page_default)); 3765 memcpy(&lun->mode_pages.rigid_disk_page[ 3766 CTL_PAGE_CHANGEABLE],&rigid_disk_page_changeable, 3767 sizeof(rigid_disk_page_changeable)); 3768 3769 sectors_per_cylinder = CTL_DEFAULT_SECTORS_PER_TRACK * 3770 CTL_DEFAULT_HEADS; 3771 3772 /* 3773 * The divide method here will be more accurate, 3774 * probably, but results in floating point being 3775 * used in the kernel on i386 (__udivdi3()). On the 3776 * XScale, though, __udivdi3() is implemented in 3777 * software. 3778 * 3779 * The shift method for cylinder calculation is 3780 * accurate if sectors_per_cylinder is a power of 3781 * 2. Otherwise it might be slightly off -- you 3782 * might have a bit of a truncation problem. 3783 */ 3784 #ifdef __XSCALE__ 3785 cylinders = (lun->be_lun->maxlba + 1) / 3786 sectors_per_cylinder; 3787 #else 3788 for (shift = 31; shift > 0; shift--) { 3789 if (sectors_per_cylinder & (1 << shift)) 3790 break; 3791 } 3792 cylinders = (lun->be_lun->maxlba + 1) >> shift; 3793 #endif 3794 3795 /* 3796 * We've basically got 3 bytes, or 24 bits for the 3797 * cylinder size in the mode page. If we're over, 3798 * just round down to 2^24. 3799 */ 3800 if (cylinders > 0xffffff) 3801 cylinders = 0xffffff; 3802 3803 rigid_disk_page = &lun->mode_pages.rigid_disk_page[ 3804 CTL_PAGE_DEFAULT]; 3805 scsi_ulto3b(cylinders, rigid_disk_page->cylinders); 3806 3807 if ((value = ctl_get_opt(&lun->be_lun->options, 3808 "rpm")) != NULL) { 3809 scsi_ulto2b(strtol(value, NULL, 0), 3810 rigid_disk_page->rotation_rate); 3811 } 3812 3813 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_CURRENT], 3814 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 3815 sizeof(rigid_disk_page_default)); 3816 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_SAVED], 3817 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 3818 sizeof(rigid_disk_page_default)); 3819 3820 page_index->page_data = 3821 (uint8_t *)lun->mode_pages.rigid_disk_page; 3822 break; 3823 } 3824 case SMS_CACHING_PAGE: { 3825 struct scsi_caching_page *caching_page; 3826 3827 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3828 panic("invalid subpage value %d", 3829 page_index->subpage); 3830 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT], 3831 &caching_page_default, 3832 sizeof(caching_page_default)); 3833 memcpy(&lun->mode_pages.caching_page[ 3834 CTL_PAGE_CHANGEABLE], &caching_page_changeable, 3835 sizeof(caching_page_changeable)); 3836 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_SAVED], 3837 &caching_page_default, 3838 sizeof(caching_page_default)); 3839 caching_page = &lun->mode_pages.caching_page[ 3840 CTL_PAGE_SAVED]; 3841 value = ctl_get_opt(&lun->be_lun->options, "writecache"); 3842 if (value != NULL && strcmp(value, "off") == 0) 3843 caching_page->flags1 &= ~SCP_WCE; 3844 value = ctl_get_opt(&lun->be_lun->options, "readcache"); 3845 if (value != NULL && strcmp(value, "off") == 0) 3846 caching_page->flags1 |= SCP_RCD; 3847 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT], 3848 &lun->mode_pages.caching_page[CTL_PAGE_SAVED], 3849 sizeof(caching_page_default)); 3850 page_index->page_data = 3851 (uint8_t *)lun->mode_pages.caching_page; 3852 break; 3853 } 3854 case SMS_CONTROL_MODE_PAGE: { 3855 struct scsi_control_page *control_page; 3856 3857 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3858 panic("invalid subpage value %d", 3859 page_index->subpage); 3860 3861 memcpy(&lun->mode_pages.control_page[CTL_PAGE_DEFAULT], 3862 &control_page_default, 3863 sizeof(control_page_default)); 3864 memcpy(&lun->mode_pages.control_page[ 3865 CTL_PAGE_CHANGEABLE], &control_page_changeable, 3866 sizeof(control_page_changeable)); 3867 memcpy(&lun->mode_pages.control_page[CTL_PAGE_SAVED], 3868 &control_page_default, 3869 sizeof(control_page_default)); 3870 control_page = &lun->mode_pages.control_page[ 3871 CTL_PAGE_SAVED]; 3872 value = ctl_get_opt(&lun->be_lun->options, "reordering"); 3873 if (value != NULL && strcmp(value, "unrestricted") == 0) { 3874 control_page->queue_flags &= ~SCP_QUEUE_ALG_MASK; 3875 control_page->queue_flags |= SCP_QUEUE_ALG_UNRESTRICTED; 3876 } 3877 memcpy(&lun->mode_pages.control_page[CTL_PAGE_CURRENT], 3878 &lun->mode_pages.control_page[CTL_PAGE_SAVED], 3879 sizeof(control_page_default)); 3880 page_index->page_data = 3881 (uint8_t *)lun->mode_pages.control_page; 3882 break; 3883 3884 } 3885 case SMS_INFO_EXCEPTIONS_PAGE: { 3886 switch (page_index->subpage) { 3887 case SMS_SUBPAGE_PAGE_0: 3888 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_CURRENT], 3889 &ie_page_default, 3890 sizeof(ie_page_default)); 3891 memcpy(&lun->mode_pages.ie_page[ 3892 CTL_PAGE_CHANGEABLE], &ie_page_changeable, 3893 sizeof(ie_page_changeable)); 3894 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_DEFAULT], 3895 &ie_page_default, 3896 sizeof(ie_page_default)); 3897 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_SAVED], 3898 &ie_page_default, 3899 sizeof(ie_page_default)); 3900 page_index->page_data = 3901 (uint8_t *)lun->mode_pages.ie_page; 3902 break; 3903 case 0x02: { 3904 struct ctl_logical_block_provisioning_page *page; 3905 3906 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_DEFAULT], 3907 &lbp_page_default, 3908 sizeof(lbp_page_default)); 3909 memcpy(&lun->mode_pages.lbp_page[ 3910 CTL_PAGE_CHANGEABLE], &lbp_page_changeable, 3911 sizeof(lbp_page_changeable)); 3912 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 3913 &lbp_page_default, 3914 sizeof(lbp_page_default)); 3915 page = &lun->mode_pages.lbp_page[CTL_PAGE_SAVED]; 3916 value = ctl_get_opt(&lun->be_lun->options, 3917 "avail-threshold"); 3918 if (value != NULL && 3919 ctl_expand_number(value, &ival) == 0) { 3920 page->descr[0].flags |= SLBPPD_ENABLED | 3921 SLBPPD_ARMING_DEC; 3922 if (lun->be_lun->blocksize) 3923 ival /= lun->be_lun->blocksize; 3924 else 3925 ival /= 512; 3926 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 3927 page->descr[0].count); 3928 } 3929 value = ctl_get_opt(&lun->be_lun->options, 3930 "used-threshold"); 3931 if (value != NULL && 3932 ctl_expand_number(value, &ival) == 0) { 3933 page->descr[1].flags |= SLBPPD_ENABLED | 3934 SLBPPD_ARMING_INC; 3935 if (lun->be_lun->blocksize) 3936 ival /= lun->be_lun->blocksize; 3937 else 3938 ival /= 512; 3939 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 3940 page->descr[1].count); 3941 } 3942 value = ctl_get_opt(&lun->be_lun->options, 3943 "pool-avail-threshold"); 3944 if (value != NULL && 3945 ctl_expand_number(value, &ival) == 0) { 3946 page->descr[2].flags |= SLBPPD_ENABLED | 3947 SLBPPD_ARMING_DEC; 3948 if (lun->be_lun->blocksize) 3949 ival /= lun->be_lun->blocksize; 3950 else 3951 ival /= 512; 3952 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 3953 page->descr[2].count); 3954 } 3955 value = ctl_get_opt(&lun->be_lun->options, 3956 "pool-used-threshold"); 3957 if (value != NULL && 3958 ctl_expand_number(value, &ival) == 0) { 3959 page->descr[3].flags |= SLBPPD_ENABLED | 3960 SLBPPD_ARMING_INC; 3961 if (lun->be_lun->blocksize) 3962 ival /= lun->be_lun->blocksize; 3963 else 3964 ival /= 512; 3965 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 3966 page->descr[3].count); 3967 } 3968 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_CURRENT], 3969 &lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 3970 sizeof(lbp_page_default)); 3971 page_index->page_data = 3972 (uint8_t *)lun->mode_pages.lbp_page; 3973 }} 3974 break; 3975 } 3976 case SMS_VENDOR_SPECIFIC_PAGE:{ 3977 switch (page_index->subpage) { 3978 case DBGCNF_SUBPAGE_CODE: { 3979 struct copan_debugconf_subpage *current_page, 3980 *saved_page; 3981 3982 memcpy(&lun->mode_pages.debugconf_subpage[ 3983 CTL_PAGE_CURRENT], 3984 &debugconf_page_default, 3985 sizeof(debugconf_page_default)); 3986 memcpy(&lun->mode_pages.debugconf_subpage[ 3987 CTL_PAGE_CHANGEABLE], 3988 &debugconf_page_changeable, 3989 sizeof(debugconf_page_changeable)); 3990 memcpy(&lun->mode_pages.debugconf_subpage[ 3991 CTL_PAGE_DEFAULT], 3992 &debugconf_page_default, 3993 sizeof(debugconf_page_default)); 3994 memcpy(&lun->mode_pages.debugconf_subpage[ 3995 CTL_PAGE_SAVED], 3996 &debugconf_page_default, 3997 sizeof(debugconf_page_default)); 3998 page_index->page_data = 3999 (uint8_t *)lun->mode_pages.debugconf_subpage; 4000 4001 current_page = (struct copan_debugconf_subpage *) 4002 (page_index->page_data + 4003 (page_index->page_len * 4004 CTL_PAGE_CURRENT)); 4005 saved_page = (struct copan_debugconf_subpage *) 4006 (page_index->page_data + 4007 (page_index->page_len * 4008 CTL_PAGE_SAVED)); 4009 break; 4010 } 4011 default: 4012 panic("invalid subpage value %d", 4013 page_index->subpage); 4014 break; 4015 } 4016 break; 4017 } 4018 default: 4019 panic("invalid page value %d", 4020 page_index->page_code & SMPH_PC_MASK); 4021 break; 4022 } 4023 } 4024 4025 return (CTL_RETVAL_COMPLETE); 4026 } 4027 4028 static int 4029 ctl_init_log_page_index(struct ctl_lun *lun) 4030 { 4031 struct ctl_page_index *page_index; 4032 int i, j, k, prev; 4033 4034 memcpy(&lun->log_pages.index, log_page_index_template, 4035 sizeof(log_page_index_template)); 4036 4037 prev = -1; 4038 for (i = 0, j = 0, k = 0; i < CTL_NUM_LOG_PAGES; i++) { 4039 4040 page_index = &lun->log_pages.index[i]; 4041 /* 4042 * If this is a disk-only mode page, there's no point in 4043 * setting it up. For some pages, we have to have some 4044 * basic information about the disk in order to calculate the 4045 * mode page data. 4046 */ 4047 if ((lun->be_lun->lun_type != T_DIRECT) 4048 && (page_index->page_flags & CTL_PAGE_FLAG_DISK_ONLY)) 4049 continue; 4050 4051 if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING && 4052 lun->backend->lun_attr == NULL) 4053 continue; 4054 4055 if (page_index->page_code != prev) { 4056 lun->log_pages.pages_page[j] = page_index->page_code; 4057 prev = page_index->page_code; 4058 j++; 4059 } 4060 lun->log_pages.subpages_page[k*2] = page_index->page_code; 4061 lun->log_pages.subpages_page[k*2+1] = page_index->subpage; 4062 k++; 4063 } 4064 lun->log_pages.index[0].page_data = &lun->log_pages.pages_page[0]; 4065 lun->log_pages.index[0].page_len = j; 4066 lun->log_pages.index[1].page_data = &lun->log_pages.subpages_page[0]; 4067 lun->log_pages.index[1].page_len = k * 2; 4068 lun->log_pages.index[2].page_data = &lun->log_pages.lbp_page[0]; 4069 lun->log_pages.index[2].page_len = 12*CTL_NUM_LBP_PARAMS; 4070 lun->log_pages.index[3].page_data = (uint8_t *)&lun->log_pages.stat_page; 4071 lun->log_pages.index[3].page_len = sizeof(lun->log_pages.stat_page); 4072 4073 return (CTL_RETVAL_COMPLETE); 4074 } 4075 4076 static int 4077 hex2bin(const char *str, uint8_t *buf, int buf_size) 4078 { 4079 int i; 4080 u_char c; 4081 4082 memset(buf, 0, buf_size); 4083 while (isspace(str[0])) 4084 str++; 4085 if (str[0] == '0' && (str[1] == 'x' || str[1] == 'X')) 4086 str += 2; 4087 buf_size *= 2; 4088 for (i = 0; str[i] != 0 && i < buf_size; i++) { 4089 c = str[i]; 4090 if (isdigit(c)) 4091 c -= '0'; 4092 else if (isalpha(c)) 4093 c -= isupper(c) ? 'A' - 10 : 'a' - 10; 4094 else 4095 break; 4096 if (c >= 16) 4097 break; 4098 if ((i & 1) == 0) 4099 buf[i / 2] |= (c << 4); 4100 else 4101 buf[i / 2] |= c; 4102 } 4103 return ((i + 1) / 2); 4104 } 4105 4106 /* 4107 * LUN allocation. 4108 * 4109 * Requirements: 4110 * - caller allocates and zeros LUN storage, or passes in a NULL LUN if he 4111 * wants us to allocate the LUN and he can block. 4112 * - ctl_softc is always set 4113 * - be_lun is set if the LUN has a backend (needed for disk LUNs) 4114 * 4115 * Returns 0 for success, non-zero (errno) for failure. 4116 */ 4117 static int 4118 ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun, 4119 struct ctl_be_lun *const be_lun) 4120 { 4121 struct ctl_lun *nlun, *lun; 4122 struct scsi_vpd_id_descriptor *desc; 4123 struct scsi_vpd_id_t10 *t10id; 4124 const char *eui, *naa, *scsiname, *vendor; 4125 int lun_number, i, lun_malloced; 4126 int devidlen, idlen1, idlen2 = 0, len; 4127 4128 if (be_lun == NULL) 4129 return (EINVAL); 4130 4131 /* 4132 * We currently only support Direct Access or Processor LUN types. 4133 */ 4134 switch (be_lun->lun_type) { 4135 case T_DIRECT: 4136 break; 4137 case T_PROCESSOR: 4138 break; 4139 case T_SEQUENTIAL: 4140 case T_CHANGER: 4141 default: 4142 be_lun->lun_config_status(be_lun->be_lun, 4143 CTL_LUN_CONFIG_FAILURE); 4144 break; 4145 } 4146 if (ctl_lun == NULL) { 4147 lun = malloc(sizeof(*lun), M_CTL, M_WAITOK); 4148 lun_malloced = 1; 4149 } else { 4150 lun_malloced = 0; 4151 lun = ctl_lun; 4152 } 4153 4154 memset(lun, 0, sizeof(*lun)); 4155 if (lun_malloced) 4156 lun->flags = CTL_LUN_MALLOCED; 4157 4158 /* Generate LUN ID. */ 4159 devidlen = max(CTL_DEVID_MIN_LEN, 4160 strnlen(be_lun->device_id, CTL_DEVID_LEN)); 4161 idlen1 = sizeof(*t10id) + devidlen; 4162 len = sizeof(struct scsi_vpd_id_descriptor) + idlen1; 4163 scsiname = ctl_get_opt(&be_lun->options, "scsiname"); 4164 if (scsiname != NULL) { 4165 idlen2 = roundup2(strlen(scsiname) + 1, 4); 4166 len += sizeof(struct scsi_vpd_id_descriptor) + idlen2; 4167 } 4168 eui = ctl_get_opt(&be_lun->options, "eui"); 4169 if (eui != NULL) { 4170 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4171 } 4172 naa = ctl_get_opt(&be_lun->options, "naa"); 4173 if (naa != NULL) { 4174 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4175 } 4176 lun->lun_devid = malloc(sizeof(struct ctl_devid) + len, 4177 M_CTL, M_WAITOK | M_ZERO); 4178 desc = (struct scsi_vpd_id_descriptor *)lun->lun_devid->data; 4179 desc->proto_codeset = SVPD_ID_CODESET_ASCII; 4180 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10; 4181 desc->length = idlen1; 4182 t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0]; 4183 memset(t10id->vendor, ' ', sizeof(t10id->vendor)); 4184 if ((vendor = ctl_get_opt(&be_lun->options, "vendor")) == NULL) { 4185 strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor)); 4186 } else { 4187 strncpy(t10id->vendor, vendor, 4188 min(sizeof(t10id->vendor), strlen(vendor))); 4189 } 4190 strncpy((char *)t10id->vendor_spec_id, 4191 (char *)be_lun->device_id, devidlen); 4192 if (scsiname != NULL) { 4193 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4194 desc->length); 4195 desc->proto_codeset = SVPD_ID_CODESET_UTF8; 4196 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4197 SVPD_ID_TYPE_SCSI_NAME; 4198 desc->length = idlen2; 4199 strlcpy(desc->identifier, scsiname, idlen2); 4200 } 4201 if (eui != NULL) { 4202 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4203 desc->length); 4204 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4205 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4206 SVPD_ID_TYPE_EUI64; 4207 desc->length = hex2bin(eui, desc->identifier, 16); 4208 desc->length = desc->length > 12 ? 16 : 4209 (desc->length > 8 ? 12 : 8); 4210 len -= 16 - desc->length; 4211 } 4212 if (naa != NULL) { 4213 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4214 desc->length); 4215 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4216 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4217 SVPD_ID_TYPE_NAA; 4218 desc->length = hex2bin(naa, desc->identifier, 16); 4219 desc->length = desc->length > 8 ? 16 : 8; 4220 len -= 16 - desc->length; 4221 } 4222 lun->lun_devid->len = len; 4223 4224 mtx_lock(&ctl_softc->ctl_lock); 4225 /* 4226 * See if the caller requested a particular LUN number. If so, see 4227 * if it is available. Otherwise, allocate the first available LUN. 4228 */ 4229 if (be_lun->flags & CTL_LUN_FLAG_ID_REQ) { 4230 if ((be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) 4231 || (ctl_is_set(ctl_softc->ctl_lun_mask, be_lun->req_lun_id))) { 4232 mtx_unlock(&ctl_softc->ctl_lock); 4233 if (be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) { 4234 printf("ctl: requested LUN ID %d is higher " 4235 "than CTL_MAX_LUNS - 1 (%d)\n", 4236 be_lun->req_lun_id, CTL_MAX_LUNS - 1); 4237 } else { 4238 /* 4239 * XXX KDM return an error, or just assign 4240 * another LUN ID in this case?? 4241 */ 4242 printf("ctl: requested LUN ID %d is already " 4243 "in use\n", be_lun->req_lun_id); 4244 } 4245 if (lun->flags & CTL_LUN_MALLOCED) 4246 free(lun, M_CTL); 4247 be_lun->lun_config_status(be_lun->be_lun, 4248 CTL_LUN_CONFIG_FAILURE); 4249 return (ENOSPC); 4250 } 4251 lun_number = be_lun->req_lun_id; 4252 } else { 4253 lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, 0, CTL_MAX_LUNS); 4254 if (lun_number == -1) { 4255 mtx_unlock(&ctl_softc->ctl_lock); 4256 printf("ctl: can't allocate LUN, out of LUNs\n"); 4257 if (lun->flags & CTL_LUN_MALLOCED) 4258 free(lun, M_CTL); 4259 be_lun->lun_config_status(be_lun->be_lun, 4260 CTL_LUN_CONFIG_FAILURE); 4261 return (ENOSPC); 4262 } 4263 } 4264 ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number); 4265 4266 mtx_init(&lun->lun_lock, "CTL LUN", NULL, MTX_DEF); 4267 lun->lun = lun_number; 4268 lun->be_lun = be_lun; 4269 /* 4270 * The processor LUN is always enabled. Disk LUNs come on line 4271 * disabled, and must be enabled by the backend. 4272 */ 4273 lun->flags |= CTL_LUN_DISABLED; 4274 lun->backend = be_lun->be; 4275 be_lun->ctl_lun = lun; 4276 be_lun->lun_id = lun_number; 4277 atomic_add_int(&be_lun->be->num_luns, 1); 4278 if (be_lun->flags & CTL_LUN_FLAG_OFFLINE) 4279 lun->flags |= CTL_LUN_OFFLINE; 4280 4281 if (be_lun->flags & CTL_LUN_FLAG_POWERED_OFF) 4282 lun->flags |= CTL_LUN_STOPPED; 4283 4284 if (be_lun->flags & CTL_LUN_FLAG_INOPERABLE) 4285 lun->flags |= CTL_LUN_INOPERABLE; 4286 4287 if (be_lun->flags & CTL_LUN_FLAG_PRIMARY) 4288 lun->flags |= CTL_LUN_PRIMARY_SC; 4289 4290 lun->ctl_softc = ctl_softc; 4291 #ifdef CTL_TIME_IO 4292 lun->last_busy = getsbinuptime(); 4293 #endif 4294 TAILQ_INIT(&lun->ooa_queue); 4295 TAILQ_INIT(&lun->blocked_queue); 4296 STAILQ_INIT(&lun->error_list); 4297 ctl_tpc_lun_init(lun); 4298 4299 /* 4300 * Initialize the mode and log page index. 4301 */ 4302 ctl_init_page_index(lun); 4303 ctl_init_log_page_index(lun); 4304 4305 /* 4306 * Now, before we insert this lun on the lun list, set the lun 4307 * inventory changed UA for all other luns. 4308 */ 4309 STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) { 4310 mtx_lock(&nlun->lun_lock); 4311 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4312 mtx_unlock(&nlun->lun_lock); 4313 } 4314 4315 STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links); 4316 4317 ctl_softc->ctl_luns[lun_number] = lun; 4318 4319 ctl_softc->num_luns++; 4320 4321 /* Setup statistics gathering */ 4322 lun->stats.device_type = be_lun->lun_type; 4323 lun->stats.lun_number = lun_number; 4324 if (lun->stats.device_type == T_DIRECT) 4325 lun->stats.blocksize = be_lun->blocksize; 4326 else 4327 lun->stats.flags = CTL_LUN_STATS_NO_BLOCKSIZE; 4328 for (i = 0;i < CTL_MAX_PORTS;i++) 4329 lun->stats.ports[i].targ_port = i; 4330 4331 mtx_unlock(&ctl_softc->ctl_lock); 4332 4333 lun->be_lun->lun_config_status(lun->be_lun->be_lun, CTL_LUN_CONFIG_OK); 4334 return (0); 4335 } 4336 4337 /* 4338 * Delete a LUN. 4339 * Assumptions: 4340 * - LUN has already been marked invalid and any pending I/O has been taken 4341 * care of. 4342 */ 4343 static int 4344 ctl_free_lun(struct ctl_lun *lun) 4345 { 4346 struct ctl_softc *softc; 4347 struct ctl_lun *nlun; 4348 int i; 4349 4350 softc = lun->ctl_softc; 4351 4352 mtx_assert(&softc->ctl_lock, MA_OWNED); 4353 4354 STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links); 4355 4356 ctl_clear_mask(softc->ctl_lun_mask, lun->lun); 4357 4358 softc->ctl_luns[lun->lun] = NULL; 4359 4360 if (!TAILQ_EMPTY(&lun->ooa_queue)) 4361 panic("Freeing a LUN %p with outstanding I/O!!\n", lun); 4362 4363 softc->num_luns--; 4364 4365 /* 4366 * Tell the backend to free resources, if this LUN has a backend. 4367 */ 4368 atomic_subtract_int(&lun->be_lun->be->num_luns, 1); 4369 lun->be_lun->lun_shutdown(lun->be_lun->be_lun); 4370 4371 ctl_tpc_lun_shutdown(lun); 4372 mtx_destroy(&lun->lun_lock); 4373 free(lun->lun_devid, M_CTL); 4374 for (i = 0; i < CTL_MAX_PORTS; i++) 4375 free(lun->pending_ua[i], M_CTL); 4376 for (i = 0; i < CTL_MAX_PORTS; i++) 4377 free(lun->pr_keys[i], M_CTL); 4378 free(lun->write_buffer, M_CTL); 4379 if (lun->flags & CTL_LUN_MALLOCED) 4380 free(lun, M_CTL); 4381 4382 STAILQ_FOREACH(nlun, &softc->lun_list, links) { 4383 mtx_lock(&nlun->lun_lock); 4384 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4385 mtx_unlock(&nlun->lun_lock); 4386 } 4387 4388 return (0); 4389 } 4390 4391 static void 4392 ctl_create_lun(struct ctl_be_lun *be_lun) 4393 { 4394 struct ctl_softc *softc; 4395 4396 softc = control_softc; 4397 4398 /* 4399 * ctl_alloc_lun() should handle all potential failure cases. 4400 */ 4401 ctl_alloc_lun(softc, NULL, be_lun); 4402 } 4403 4404 int 4405 ctl_add_lun(struct ctl_be_lun *be_lun) 4406 { 4407 struct ctl_softc *softc = control_softc; 4408 4409 mtx_lock(&softc->ctl_lock); 4410 STAILQ_INSERT_TAIL(&softc->pending_lun_queue, be_lun, links); 4411 mtx_unlock(&softc->ctl_lock); 4412 wakeup(&softc->pending_lun_queue); 4413 4414 return (0); 4415 } 4416 4417 int 4418 ctl_enable_lun(struct ctl_be_lun *be_lun) 4419 { 4420 struct ctl_softc *softc; 4421 struct ctl_port *port, *nport; 4422 struct ctl_lun *lun; 4423 int retval; 4424 4425 lun = (struct ctl_lun *)be_lun->ctl_lun; 4426 softc = lun->ctl_softc; 4427 4428 mtx_lock(&softc->ctl_lock); 4429 mtx_lock(&lun->lun_lock); 4430 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 4431 /* 4432 * eh? Why did we get called if the LUN is already 4433 * enabled? 4434 */ 4435 mtx_unlock(&lun->lun_lock); 4436 mtx_unlock(&softc->ctl_lock); 4437 return (0); 4438 } 4439 lun->flags &= ~CTL_LUN_DISABLED; 4440 mtx_unlock(&lun->lun_lock); 4441 4442 for (port = STAILQ_FIRST(&softc->port_list); port != NULL; port = nport) { 4443 nport = STAILQ_NEXT(port, links); 4444 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || 4445 port->lun_map != NULL || port->lun_enable == NULL) 4446 continue; 4447 4448 /* 4449 * Drop the lock while we call the FETD's enable routine. 4450 * This can lead to a callback into CTL (at least in the 4451 * case of the internal initiator frontend. 4452 */ 4453 mtx_unlock(&softc->ctl_lock); 4454 retval = port->lun_enable(port->targ_lun_arg, lun->lun); 4455 mtx_lock(&softc->ctl_lock); 4456 if (retval != 0) { 4457 printf("%s: FETD %s port %d returned error " 4458 "%d for lun_enable on lun %jd\n", 4459 __func__, port->port_name, port->targ_port, 4460 retval, (intmax_t)lun->lun); 4461 } 4462 } 4463 4464 mtx_unlock(&softc->ctl_lock); 4465 ctl_isc_announce_lun(lun); 4466 4467 return (0); 4468 } 4469 4470 int 4471 ctl_disable_lun(struct ctl_be_lun *be_lun) 4472 { 4473 struct ctl_softc *softc; 4474 struct ctl_port *port; 4475 struct ctl_lun *lun; 4476 int retval; 4477 4478 lun = (struct ctl_lun *)be_lun->ctl_lun; 4479 softc = lun->ctl_softc; 4480 4481 mtx_lock(&softc->ctl_lock); 4482 mtx_lock(&lun->lun_lock); 4483 if (lun->flags & CTL_LUN_DISABLED) { 4484 mtx_unlock(&lun->lun_lock); 4485 mtx_unlock(&softc->ctl_lock); 4486 return (0); 4487 } 4488 lun->flags |= CTL_LUN_DISABLED; 4489 mtx_unlock(&lun->lun_lock); 4490 4491 STAILQ_FOREACH(port, &softc->port_list, links) { 4492 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || 4493 port->lun_map != NULL || port->lun_disable == NULL) 4494 continue; 4495 4496 /* 4497 * Drop the lock before we call the frontend's disable 4498 * routine, to avoid lock order reversals. 4499 * 4500 * XXX KDM what happens if the frontend list changes while 4501 * we're traversing it? It's unlikely, but should be handled. 4502 */ 4503 mtx_unlock(&softc->ctl_lock); 4504 retval = port->lun_disable(port->targ_lun_arg, lun->lun); 4505 mtx_lock(&softc->ctl_lock); 4506 if (retval != 0) { 4507 printf("%s: FETD %s port %d returned error " 4508 "%d for lun_disable on lun %jd\n", 4509 __func__, port->port_name, port->targ_port, 4510 retval, (intmax_t)lun->lun); 4511 } 4512 } 4513 4514 mtx_unlock(&softc->ctl_lock); 4515 ctl_isc_announce_lun(lun); 4516 4517 return (0); 4518 } 4519 4520 int 4521 ctl_start_lun(struct ctl_be_lun *be_lun) 4522 { 4523 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4524 4525 mtx_lock(&lun->lun_lock); 4526 lun->flags &= ~CTL_LUN_STOPPED; 4527 mtx_unlock(&lun->lun_lock); 4528 return (0); 4529 } 4530 4531 int 4532 ctl_stop_lun(struct ctl_be_lun *be_lun) 4533 { 4534 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4535 4536 mtx_lock(&lun->lun_lock); 4537 lun->flags |= CTL_LUN_STOPPED; 4538 mtx_unlock(&lun->lun_lock); 4539 return (0); 4540 } 4541 4542 int 4543 ctl_lun_offline(struct ctl_be_lun *be_lun) 4544 { 4545 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4546 4547 mtx_lock(&lun->lun_lock); 4548 lun->flags |= CTL_LUN_OFFLINE; 4549 mtx_unlock(&lun->lun_lock); 4550 return (0); 4551 } 4552 4553 int 4554 ctl_lun_online(struct ctl_be_lun *be_lun) 4555 { 4556 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4557 4558 mtx_lock(&lun->lun_lock); 4559 lun->flags &= ~CTL_LUN_OFFLINE; 4560 mtx_unlock(&lun->lun_lock); 4561 return (0); 4562 } 4563 4564 int 4565 ctl_lun_primary(struct ctl_be_lun *be_lun) 4566 { 4567 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4568 4569 mtx_lock(&lun->lun_lock); 4570 lun->flags |= CTL_LUN_PRIMARY_SC; 4571 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 4572 mtx_unlock(&lun->lun_lock); 4573 ctl_isc_announce_lun(lun); 4574 return (0); 4575 } 4576 4577 int 4578 ctl_lun_secondary(struct ctl_be_lun *be_lun) 4579 { 4580 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4581 4582 mtx_lock(&lun->lun_lock); 4583 lun->flags &= ~CTL_LUN_PRIMARY_SC; 4584 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 4585 mtx_unlock(&lun->lun_lock); 4586 ctl_isc_announce_lun(lun); 4587 return (0); 4588 } 4589 4590 int 4591 ctl_invalidate_lun(struct ctl_be_lun *be_lun) 4592 { 4593 struct ctl_softc *softc; 4594 struct ctl_lun *lun; 4595 4596 lun = (struct ctl_lun *)be_lun->ctl_lun; 4597 softc = lun->ctl_softc; 4598 4599 mtx_lock(&lun->lun_lock); 4600 4601 /* 4602 * The LUN needs to be disabled before it can be marked invalid. 4603 */ 4604 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 4605 mtx_unlock(&lun->lun_lock); 4606 return (-1); 4607 } 4608 /* 4609 * Mark the LUN invalid. 4610 */ 4611 lun->flags |= CTL_LUN_INVALID; 4612 4613 /* 4614 * If there is nothing in the OOA queue, go ahead and free the LUN. 4615 * If we have something in the OOA queue, we'll free it when the 4616 * last I/O completes. 4617 */ 4618 if (TAILQ_EMPTY(&lun->ooa_queue)) { 4619 mtx_unlock(&lun->lun_lock); 4620 mtx_lock(&softc->ctl_lock); 4621 ctl_free_lun(lun); 4622 mtx_unlock(&softc->ctl_lock); 4623 } else 4624 mtx_unlock(&lun->lun_lock); 4625 4626 return (0); 4627 } 4628 4629 int 4630 ctl_lun_inoperable(struct ctl_be_lun *be_lun) 4631 { 4632 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4633 4634 mtx_lock(&lun->lun_lock); 4635 lun->flags |= CTL_LUN_INOPERABLE; 4636 mtx_unlock(&lun->lun_lock); 4637 return (0); 4638 } 4639 4640 int 4641 ctl_lun_operable(struct ctl_be_lun *be_lun) 4642 { 4643 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4644 4645 mtx_lock(&lun->lun_lock); 4646 lun->flags &= ~CTL_LUN_INOPERABLE; 4647 mtx_unlock(&lun->lun_lock); 4648 return (0); 4649 } 4650 4651 void 4652 ctl_lun_capacity_changed(struct ctl_be_lun *be_lun) 4653 { 4654 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4655 union ctl_ha_msg msg; 4656 4657 mtx_lock(&lun->lun_lock); 4658 ctl_est_ua_all(lun, -1, CTL_UA_CAPACITY_CHANGED); 4659 mtx_unlock(&lun->lun_lock); 4660 if (lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 4661 /* Send msg to other side. */ 4662 bzero(&msg.ua, sizeof(msg.ua)); 4663 msg.hdr.msg_type = CTL_MSG_UA; 4664 msg.hdr.nexus.initid = -1; 4665 msg.hdr.nexus.targ_port = -1; 4666 msg.hdr.nexus.targ_lun = lun->lun; 4667 msg.hdr.nexus.targ_mapped_lun = lun->lun; 4668 msg.ua.ua_all = 1; 4669 msg.ua.ua_set = 1; 4670 msg.ua.ua_type = CTL_UA_CAPACITY_CHANGED; 4671 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), 4672 M_WAITOK); 4673 } 4674 } 4675 4676 /* 4677 * Backend "memory move is complete" callback for requests that never 4678 * make it down to say RAIDCore's configuration code. 4679 */ 4680 int 4681 ctl_config_move_done(union ctl_io *io) 4682 { 4683 int retval; 4684 4685 CTL_DEBUG_PRINT(("ctl_config_move_done\n")); 4686 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, 4687 ("Config I/O type isn't CTL_IO_SCSI (%d)!", io->io_hdr.io_type)); 4688 4689 if ((io->io_hdr.port_status != 0) && 4690 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 4691 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 4692 /* 4693 * For hardware error sense keys, the sense key 4694 * specific value is defined to be a retry count, 4695 * but we use it to pass back an internal FETD 4696 * error code. XXX KDM Hopefully the FETD is only 4697 * using 16 bits for an error code, since that's 4698 * all the space we have in the sks field. 4699 */ 4700 ctl_set_internal_failure(&io->scsiio, 4701 /*sks_valid*/ 1, 4702 /*retry_count*/ 4703 io->io_hdr.port_status); 4704 } 4705 4706 if (ctl_debug & CTL_DEBUG_CDB_DATA) 4707 ctl_data_print(io); 4708 if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) || 4709 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 4710 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) || 4711 ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) { 4712 /* 4713 * XXX KDM just assuming a single pointer here, and not a 4714 * S/G list. If we start using S/G lists for config data, 4715 * we'll need to know how to clean them up here as well. 4716 */ 4717 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 4718 free(io->scsiio.kern_data_ptr, M_CTL); 4719 ctl_done(io); 4720 retval = CTL_RETVAL_COMPLETE; 4721 } else { 4722 /* 4723 * XXX KDM now we need to continue data movement. Some 4724 * options: 4725 * - call ctl_scsiio() again? We don't do this for data 4726 * writes, because for those at least we know ahead of 4727 * time where the write will go and how long it is. For 4728 * config writes, though, that information is largely 4729 * contained within the write itself, thus we need to 4730 * parse out the data again. 4731 * 4732 * - Call some other function once the data is in? 4733 */ 4734 4735 /* 4736 * XXX KDM call ctl_scsiio() again for now, and check flag 4737 * bits to see whether we're allocated or not. 4738 */ 4739 retval = ctl_scsiio(&io->scsiio); 4740 } 4741 return (retval); 4742 } 4743 4744 /* 4745 * This gets called by a backend driver when it is done with a 4746 * data_submit method. 4747 */ 4748 void 4749 ctl_data_submit_done(union ctl_io *io) 4750 { 4751 /* 4752 * If the IO_CONT flag is set, we need to call the supplied 4753 * function to continue processing the I/O, instead of completing 4754 * the I/O just yet. 4755 * 4756 * If there is an error, though, we don't want to keep processing. 4757 * Instead, just send status back to the initiator. 4758 */ 4759 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 4760 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 4761 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 4762 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 4763 io->scsiio.io_cont(io); 4764 return; 4765 } 4766 ctl_done(io); 4767 } 4768 4769 /* 4770 * This gets called by a backend driver when it is done with a 4771 * configuration write. 4772 */ 4773 void 4774 ctl_config_write_done(union ctl_io *io) 4775 { 4776 uint8_t *buf; 4777 4778 /* 4779 * If the IO_CONT flag is set, we need to call the supplied 4780 * function to continue processing the I/O, instead of completing 4781 * the I/O just yet. 4782 * 4783 * If there is an error, though, we don't want to keep processing. 4784 * Instead, just send status back to the initiator. 4785 */ 4786 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 4787 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 4788 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 4789 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 4790 io->scsiio.io_cont(io); 4791 return; 4792 } 4793 /* 4794 * Since a configuration write can be done for commands that actually 4795 * have data allocated, like write buffer, and commands that have 4796 * no data, like start/stop unit, we need to check here. 4797 */ 4798 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 4799 buf = io->scsiio.kern_data_ptr; 4800 else 4801 buf = NULL; 4802 ctl_done(io); 4803 if (buf) 4804 free(buf, M_CTL); 4805 } 4806 4807 void 4808 ctl_config_read_done(union ctl_io *io) 4809 { 4810 uint8_t *buf; 4811 4812 /* 4813 * If there is some error -- we are done, skip data transfer. 4814 */ 4815 if ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0 || 4816 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 4817 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { 4818 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 4819 buf = io->scsiio.kern_data_ptr; 4820 else 4821 buf = NULL; 4822 ctl_done(io); 4823 if (buf) 4824 free(buf, M_CTL); 4825 return; 4826 } 4827 4828 /* 4829 * If the IO_CONT flag is set, we need to call the supplied 4830 * function to continue processing the I/O, instead of completing 4831 * the I/O just yet. 4832 */ 4833 if (io->io_hdr.flags & CTL_FLAG_IO_CONT) { 4834 io->scsiio.io_cont(io); 4835 return; 4836 } 4837 4838 ctl_datamove(io); 4839 } 4840 4841 /* 4842 * SCSI release command. 4843 */ 4844 int 4845 ctl_scsi_release(struct ctl_scsiio *ctsio) 4846 { 4847 int length, longid, thirdparty_id, resv_id; 4848 struct ctl_lun *lun; 4849 uint32_t residx; 4850 4851 length = 0; 4852 resv_id = 0; 4853 4854 CTL_DEBUG_PRINT(("ctl_scsi_release\n")); 4855 4856 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 4857 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 4858 4859 switch (ctsio->cdb[0]) { 4860 case RELEASE_10: { 4861 struct scsi_release_10 *cdb; 4862 4863 cdb = (struct scsi_release_10 *)ctsio->cdb; 4864 4865 if (cdb->byte2 & SR10_LONGID) 4866 longid = 1; 4867 else 4868 thirdparty_id = cdb->thirdparty_id; 4869 4870 resv_id = cdb->resv_id; 4871 length = scsi_2btoul(cdb->length); 4872 break; 4873 } 4874 } 4875 4876 4877 /* 4878 * XXX KDM right now, we only support LUN reservation. We don't 4879 * support 3rd party reservations, or extent reservations, which 4880 * might actually need the parameter list. If we've gotten this 4881 * far, we've got a LUN reservation. Anything else got kicked out 4882 * above. So, according to SPC, ignore the length. 4883 */ 4884 length = 0; 4885 4886 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 4887 && (length > 0)) { 4888 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 4889 ctsio->kern_data_len = length; 4890 ctsio->kern_total_len = length; 4891 ctsio->kern_data_resid = 0; 4892 ctsio->kern_rel_offset = 0; 4893 ctsio->kern_sg_entries = 0; 4894 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 4895 ctsio->be_move_done = ctl_config_move_done; 4896 ctl_datamove((union ctl_io *)ctsio); 4897 4898 return (CTL_RETVAL_COMPLETE); 4899 } 4900 4901 if (length > 0) 4902 thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr); 4903 4904 mtx_lock(&lun->lun_lock); 4905 4906 /* 4907 * According to SPC, it is not an error for an intiator to attempt 4908 * to release a reservation on a LUN that isn't reserved, or that 4909 * is reserved by another initiator. The reservation can only be 4910 * released, though, by the initiator who made it or by one of 4911 * several reset type events. 4912 */ 4913 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx)) 4914 lun->flags &= ~CTL_LUN_RESERVED; 4915 4916 mtx_unlock(&lun->lun_lock); 4917 4918 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 4919 free(ctsio->kern_data_ptr, M_CTL); 4920 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 4921 } 4922 4923 ctl_set_success(ctsio); 4924 ctl_done((union ctl_io *)ctsio); 4925 return (CTL_RETVAL_COMPLETE); 4926 } 4927 4928 int 4929 ctl_scsi_reserve(struct ctl_scsiio *ctsio) 4930 { 4931 int extent, thirdparty, longid; 4932 int resv_id, length; 4933 uint64_t thirdparty_id; 4934 struct ctl_lun *lun; 4935 uint32_t residx; 4936 4937 extent = 0; 4938 thirdparty = 0; 4939 longid = 0; 4940 resv_id = 0; 4941 length = 0; 4942 thirdparty_id = 0; 4943 4944 CTL_DEBUG_PRINT(("ctl_reserve\n")); 4945 4946 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 4947 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 4948 4949 switch (ctsio->cdb[0]) { 4950 case RESERVE_10: { 4951 struct scsi_reserve_10 *cdb; 4952 4953 cdb = (struct scsi_reserve_10 *)ctsio->cdb; 4954 4955 if (cdb->byte2 & SR10_LONGID) 4956 longid = 1; 4957 else 4958 thirdparty_id = cdb->thirdparty_id; 4959 4960 resv_id = cdb->resv_id; 4961 length = scsi_2btoul(cdb->length); 4962 break; 4963 } 4964 } 4965 4966 /* 4967 * XXX KDM right now, we only support LUN reservation. We don't 4968 * support 3rd party reservations, or extent reservations, which 4969 * might actually need the parameter list. If we've gotten this 4970 * far, we've got a LUN reservation. Anything else got kicked out 4971 * above. So, according to SPC, ignore the length. 4972 */ 4973 length = 0; 4974 4975 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 4976 && (length > 0)) { 4977 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 4978 ctsio->kern_data_len = length; 4979 ctsio->kern_total_len = length; 4980 ctsio->kern_data_resid = 0; 4981 ctsio->kern_rel_offset = 0; 4982 ctsio->kern_sg_entries = 0; 4983 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 4984 ctsio->be_move_done = ctl_config_move_done; 4985 ctl_datamove((union ctl_io *)ctsio); 4986 4987 return (CTL_RETVAL_COMPLETE); 4988 } 4989 4990 if (length > 0) 4991 thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr); 4992 4993 mtx_lock(&lun->lun_lock); 4994 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx != residx)) { 4995 ctl_set_reservation_conflict(ctsio); 4996 goto bailout; 4997 } 4998 4999 lun->flags |= CTL_LUN_RESERVED; 5000 lun->res_idx = residx; 5001 5002 ctl_set_success(ctsio); 5003 5004 bailout: 5005 mtx_unlock(&lun->lun_lock); 5006 5007 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5008 free(ctsio->kern_data_ptr, M_CTL); 5009 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5010 } 5011 5012 ctl_done((union ctl_io *)ctsio); 5013 return (CTL_RETVAL_COMPLETE); 5014 } 5015 5016 int 5017 ctl_start_stop(struct ctl_scsiio *ctsio) 5018 { 5019 struct scsi_start_stop_unit *cdb; 5020 struct ctl_lun *lun; 5021 int retval; 5022 5023 CTL_DEBUG_PRINT(("ctl_start_stop\n")); 5024 5025 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5026 retval = 0; 5027 5028 cdb = (struct scsi_start_stop_unit *)ctsio->cdb; 5029 5030 /* 5031 * XXX KDM 5032 * We don't support the immediate bit on a stop unit. In order to 5033 * do that, we would need to code up a way to know that a stop is 5034 * pending, and hold off any new commands until it completes, one 5035 * way or another. Then we could accept or reject those commands 5036 * depending on its status. We would almost need to do the reverse 5037 * of what we do below for an immediate start -- return the copy of 5038 * the ctl_io to the FETD with status to send to the host (and to 5039 * free the copy!) and then free the original I/O once the stop 5040 * actually completes. That way, the OOA queue mechanism can work 5041 * to block commands that shouldn't proceed. Another alternative 5042 * would be to put the copy in the queue in place of the original, 5043 * and return the original back to the caller. That could be 5044 * slightly safer.. 5045 */ 5046 if ((cdb->byte2 & SSS_IMMED) 5047 && ((cdb->how & SSS_START) == 0)) { 5048 ctl_set_invalid_field(ctsio, 5049 /*sks_valid*/ 1, 5050 /*command*/ 1, 5051 /*field*/ 1, 5052 /*bit_valid*/ 1, 5053 /*bit*/ 0); 5054 ctl_done((union ctl_io *)ctsio); 5055 return (CTL_RETVAL_COMPLETE); 5056 } 5057 5058 if ((lun->flags & CTL_LUN_PR_RESERVED) 5059 && ((cdb->how & SSS_START)==0)) { 5060 uint32_t residx; 5061 5062 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5063 if (ctl_get_prkey(lun, residx) == 0 5064 || (lun->pr_res_idx!=residx && lun->res_type < 4)) { 5065 5066 ctl_set_reservation_conflict(ctsio); 5067 ctl_done((union ctl_io *)ctsio); 5068 return (CTL_RETVAL_COMPLETE); 5069 } 5070 } 5071 5072 /* 5073 * If there is no backend on this device, we can't start or stop 5074 * it. In theory we shouldn't get any start/stop commands in the 5075 * first place at this level if the LUN doesn't have a backend. 5076 * That should get stopped by the command decode code. 5077 */ 5078 if (lun->backend == NULL) { 5079 ctl_set_invalid_opcode(ctsio); 5080 ctl_done((union ctl_io *)ctsio); 5081 return (CTL_RETVAL_COMPLETE); 5082 } 5083 5084 /* 5085 * XXX KDM Copan-specific offline behavior. 5086 * Figure out a reasonable way to port this? 5087 */ 5088 #ifdef NEEDTOPORT 5089 mtx_lock(&lun->lun_lock); 5090 5091 if (((cdb->byte2 & SSS_ONOFFLINE) == 0) 5092 && (lun->flags & CTL_LUN_OFFLINE)) { 5093 /* 5094 * If the LUN is offline, and the on/offline bit isn't set, 5095 * reject the start or stop. Otherwise, let it through. 5096 */ 5097 mtx_unlock(&lun->lun_lock); 5098 ctl_set_lun_not_ready(ctsio); 5099 ctl_done((union ctl_io *)ctsio); 5100 } else { 5101 mtx_unlock(&lun->lun_lock); 5102 #endif /* NEEDTOPORT */ 5103 /* 5104 * This could be a start or a stop when we're online, 5105 * or a stop/offline or start/online. A start or stop when 5106 * we're offline is covered in the case above. 5107 */ 5108 /* 5109 * In the non-immediate case, we send the request to 5110 * the backend and return status to the user when 5111 * it is done. 5112 * 5113 * In the immediate case, we allocate a new ctl_io 5114 * to hold a copy of the request, and send that to 5115 * the backend. We then set good status on the 5116 * user's request and return it immediately. 5117 */ 5118 if (cdb->byte2 & SSS_IMMED) { 5119 union ctl_io *new_io; 5120 5121 new_io = ctl_alloc_io(ctsio->io_hdr.pool); 5122 ctl_copy_io((union ctl_io *)ctsio, new_io); 5123 retval = lun->backend->config_write(new_io); 5124 ctl_set_success(ctsio); 5125 ctl_done((union ctl_io *)ctsio); 5126 } else { 5127 retval = lun->backend->config_write( 5128 (union ctl_io *)ctsio); 5129 } 5130 #ifdef NEEDTOPORT 5131 } 5132 #endif 5133 return (retval); 5134 } 5135 5136 /* 5137 * We support the SYNCHRONIZE CACHE command (10 and 16 byte versions), but 5138 * we don't really do anything with the LBA and length fields if the user 5139 * passes them in. Instead we'll just flush out the cache for the entire 5140 * LUN. 5141 */ 5142 int 5143 ctl_sync_cache(struct ctl_scsiio *ctsio) 5144 { 5145 struct ctl_lun *lun; 5146 struct ctl_softc *softc; 5147 struct ctl_lba_len_flags *lbalen; 5148 uint64_t starting_lba; 5149 uint32_t block_count; 5150 int retval; 5151 uint8_t byte2; 5152 5153 CTL_DEBUG_PRINT(("ctl_sync_cache\n")); 5154 5155 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5156 softc = lun->ctl_softc; 5157 retval = 0; 5158 5159 switch (ctsio->cdb[0]) { 5160 case SYNCHRONIZE_CACHE: { 5161 struct scsi_sync_cache *cdb; 5162 cdb = (struct scsi_sync_cache *)ctsio->cdb; 5163 5164 starting_lba = scsi_4btoul(cdb->begin_lba); 5165 block_count = scsi_2btoul(cdb->lb_count); 5166 byte2 = cdb->byte2; 5167 break; 5168 } 5169 case SYNCHRONIZE_CACHE_16: { 5170 struct scsi_sync_cache_16 *cdb; 5171 cdb = (struct scsi_sync_cache_16 *)ctsio->cdb; 5172 5173 starting_lba = scsi_8btou64(cdb->begin_lba); 5174 block_count = scsi_4btoul(cdb->lb_count); 5175 byte2 = cdb->byte2; 5176 break; 5177 } 5178 default: 5179 ctl_set_invalid_opcode(ctsio); 5180 ctl_done((union ctl_io *)ctsio); 5181 goto bailout; 5182 break; /* NOTREACHED */ 5183 } 5184 5185 /* 5186 * We check the LBA and length, but don't do anything with them. 5187 * A SYNCHRONIZE CACHE will cause the entire cache for this lun to 5188 * get flushed. This check will just help satisfy anyone who wants 5189 * to see an error for an out of range LBA. 5190 */ 5191 if ((starting_lba + block_count) > (lun->be_lun->maxlba + 1)) { 5192 ctl_set_lba_out_of_range(ctsio); 5193 ctl_done((union ctl_io *)ctsio); 5194 goto bailout; 5195 } 5196 5197 /* 5198 * If this LUN has no backend, we can't flush the cache anyway. 5199 */ 5200 if (lun->backend == NULL) { 5201 ctl_set_invalid_opcode(ctsio); 5202 ctl_done((union ctl_io *)ctsio); 5203 goto bailout; 5204 } 5205 5206 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5207 lbalen->lba = starting_lba; 5208 lbalen->len = block_count; 5209 lbalen->flags = byte2; 5210 5211 /* 5212 * Check to see whether we're configured to send the SYNCHRONIZE 5213 * CACHE command directly to the back end. 5214 */ 5215 mtx_lock(&lun->lun_lock); 5216 if ((softc->flags & CTL_FLAG_REAL_SYNC) 5217 && (++(lun->sync_count) >= lun->sync_interval)) { 5218 lun->sync_count = 0; 5219 mtx_unlock(&lun->lun_lock); 5220 retval = lun->backend->config_write((union ctl_io *)ctsio); 5221 } else { 5222 mtx_unlock(&lun->lun_lock); 5223 ctl_set_success(ctsio); 5224 ctl_done((union ctl_io *)ctsio); 5225 } 5226 5227 bailout: 5228 5229 return (retval); 5230 } 5231 5232 int 5233 ctl_format(struct ctl_scsiio *ctsio) 5234 { 5235 struct scsi_format *cdb; 5236 struct ctl_lun *lun; 5237 int length, defect_list_len; 5238 5239 CTL_DEBUG_PRINT(("ctl_format\n")); 5240 5241 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5242 5243 cdb = (struct scsi_format *)ctsio->cdb; 5244 5245 length = 0; 5246 if (cdb->byte2 & SF_FMTDATA) { 5247 if (cdb->byte2 & SF_LONGLIST) 5248 length = sizeof(struct scsi_format_header_long); 5249 else 5250 length = sizeof(struct scsi_format_header_short); 5251 } 5252 5253 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5254 && (length > 0)) { 5255 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5256 ctsio->kern_data_len = length; 5257 ctsio->kern_total_len = length; 5258 ctsio->kern_data_resid = 0; 5259 ctsio->kern_rel_offset = 0; 5260 ctsio->kern_sg_entries = 0; 5261 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5262 ctsio->be_move_done = ctl_config_move_done; 5263 ctl_datamove((union ctl_io *)ctsio); 5264 5265 return (CTL_RETVAL_COMPLETE); 5266 } 5267 5268 defect_list_len = 0; 5269 5270 if (cdb->byte2 & SF_FMTDATA) { 5271 if (cdb->byte2 & SF_LONGLIST) { 5272 struct scsi_format_header_long *header; 5273 5274 header = (struct scsi_format_header_long *) 5275 ctsio->kern_data_ptr; 5276 5277 defect_list_len = scsi_4btoul(header->defect_list_len); 5278 if (defect_list_len != 0) { 5279 ctl_set_invalid_field(ctsio, 5280 /*sks_valid*/ 1, 5281 /*command*/ 0, 5282 /*field*/ 2, 5283 /*bit_valid*/ 0, 5284 /*bit*/ 0); 5285 goto bailout; 5286 } 5287 } else { 5288 struct scsi_format_header_short *header; 5289 5290 header = (struct scsi_format_header_short *) 5291 ctsio->kern_data_ptr; 5292 5293 defect_list_len = scsi_2btoul(header->defect_list_len); 5294 if (defect_list_len != 0) { 5295 ctl_set_invalid_field(ctsio, 5296 /*sks_valid*/ 1, 5297 /*command*/ 0, 5298 /*field*/ 2, 5299 /*bit_valid*/ 0, 5300 /*bit*/ 0); 5301 goto bailout; 5302 } 5303 } 5304 } 5305 5306 /* 5307 * The format command will clear out the "Medium format corrupted" 5308 * status if set by the configuration code. That status is really 5309 * just a way to notify the host that we have lost the media, and 5310 * get them to issue a command that will basically make them think 5311 * they're blowing away the media. 5312 */ 5313 mtx_lock(&lun->lun_lock); 5314 lun->flags &= ~CTL_LUN_INOPERABLE; 5315 mtx_unlock(&lun->lun_lock); 5316 5317 ctl_set_success(ctsio); 5318 bailout: 5319 5320 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5321 free(ctsio->kern_data_ptr, M_CTL); 5322 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5323 } 5324 5325 ctl_done((union ctl_io *)ctsio); 5326 return (CTL_RETVAL_COMPLETE); 5327 } 5328 5329 int 5330 ctl_read_buffer(struct ctl_scsiio *ctsio) 5331 { 5332 struct scsi_read_buffer *cdb; 5333 struct ctl_lun *lun; 5334 int buffer_offset, len; 5335 static uint8_t descr[4]; 5336 static uint8_t echo_descr[4] = { 0 }; 5337 5338 CTL_DEBUG_PRINT(("ctl_read_buffer\n")); 5339 5340 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5341 cdb = (struct scsi_read_buffer *)ctsio->cdb; 5342 5343 if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA && 5344 (cdb->byte2 & RWB_MODE) != RWB_MODE_ECHO_DESCR && 5345 (cdb->byte2 & RWB_MODE) != RWB_MODE_DESCR) { 5346 ctl_set_invalid_field(ctsio, 5347 /*sks_valid*/ 1, 5348 /*command*/ 1, 5349 /*field*/ 1, 5350 /*bit_valid*/ 1, 5351 /*bit*/ 4); 5352 ctl_done((union ctl_io *)ctsio); 5353 return (CTL_RETVAL_COMPLETE); 5354 } 5355 5356 len = scsi_3btoul(cdb->length); 5357 buffer_offset = scsi_3btoul(cdb->offset); 5358 5359 if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5360 ctl_set_invalid_field(ctsio, 5361 /*sks_valid*/ 1, 5362 /*command*/ 1, 5363 /*field*/ 6, 5364 /*bit_valid*/ 0, 5365 /*bit*/ 0); 5366 ctl_done((union ctl_io *)ctsio); 5367 return (CTL_RETVAL_COMPLETE); 5368 } 5369 5370 if ((cdb->byte2 & RWB_MODE) == RWB_MODE_DESCR) { 5371 descr[0] = 0; 5372 scsi_ulto3b(CTL_WRITE_BUFFER_SIZE, &descr[1]); 5373 ctsio->kern_data_ptr = descr; 5374 len = min(len, sizeof(descr)); 5375 } else if ((cdb->byte2 & RWB_MODE) == RWB_MODE_ECHO_DESCR) { 5376 ctsio->kern_data_ptr = echo_descr; 5377 len = min(len, sizeof(echo_descr)); 5378 } else { 5379 if (lun->write_buffer == NULL) { 5380 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5381 M_CTL, M_WAITOK); 5382 } 5383 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5384 } 5385 ctsio->kern_data_len = len; 5386 ctsio->kern_total_len = len; 5387 ctsio->kern_data_resid = 0; 5388 ctsio->kern_rel_offset = 0; 5389 ctsio->kern_sg_entries = 0; 5390 ctl_set_success(ctsio); 5391 ctsio->be_move_done = ctl_config_move_done; 5392 ctl_datamove((union ctl_io *)ctsio); 5393 return (CTL_RETVAL_COMPLETE); 5394 } 5395 5396 int 5397 ctl_write_buffer(struct ctl_scsiio *ctsio) 5398 { 5399 struct scsi_write_buffer *cdb; 5400 struct ctl_lun *lun; 5401 int buffer_offset, len; 5402 5403 CTL_DEBUG_PRINT(("ctl_write_buffer\n")); 5404 5405 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5406 cdb = (struct scsi_write_buffer *)ctsio->cdb; 5407 5408 if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA) { 5409 ctl_set_invalid_field(ctsio, 5410 /*sks_valid*/ 1, 5411 /*command*/ 1, 5412 /*field*/ 1, 5413 /*bit_valid*/ 1, 5414 /*bit*/ 4); 5415 ctl_done((union ctl_io *)ctsio); 5416 return (CTL_RETVAL_COMPLETE); 5417 } 5418 5419 len = scsi_3btoul(cdb->length); 5420 buffer_offset = scsi_3btoul(cdb->offset); 5421 5422 if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5423 ctl_set_invalid_field(ctsio, 5424 /*sks_valid*/ 1, 5425 /*command*/ 1, 5426 /*field*/ 6, 5427 /*bit_valid*/ 0, 5428 /*bit*/ 0); 5429 ctl_done((union ctl_io *)ctsio); 5430 return (CTL_RETVAL_COMPLETE); 5431 } 5432 5433 /* 5434 * If we've got a kernel request that hasn't been malloced yet, 5435 * malloc it and tell the caller the data buffer is here. 5436 */ 5437 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5438 if (lun->write_buffer == NULL) { 5439 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5440 M_CTL, M_WAITOK); 5441 } 5442 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5443 ctsio->kern_data_len = len; 5444 ctsio->kern_total_len = len; 5445 ctsio->kern_data_resid = 0; 5446 ctsio->kern_rel_offset = 0; 5447 ctsio->kern_sg_entries = 0; 5448 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5449 ctsio->be_move_done = ctl_config_move_done; 5450 ctl_datamove((union ctl_io *)ctsio); 5451 5452 return (CTL_RETVAL_COMPLETE); 5453 } 5454 5455 ctl_set_success(ctsio); 5456 ctl_done((union ctl_io *)ctsio); 5457 return (CTL_RETVAL_COMPLETE); 5458 } 5459 5460 int 5461 ctl_write_same(struct ctl_scsiio *ctsio) 5462 { 5463 struct ctl_lun *lun; 5464 struct ctl_lba_len_flags *lbalen; 5465 uint64_t lba; 5466 uint32_t num_blocks; 5467 int len, retval; 5468 uint8_t byte2; 5469 5470 retval = CTL_RETVAL_COMPLETE; 5471 5472 CTL_DEBUG_PRINT(("ctl_write_same\n")); 5473 5474 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5475 5476 switch (ctsio->cdb[0]) { 5477 case WRITE_SAME_10: { 5478 struct scsi_write_same_10 *cdb; 5479 5480 cdb = (struct scsi_write_same_10 *)ctsio->cdb; 5481 5482 lba = scsi_4btoul(cdb->addr); 5483 num_blocks = scsi_2btoul(cdb->length); 5484 byte2 = cdb->byte2; 5485 break; 5486 } 5487 case WRITE_SAME_16: { 5488 struct scsi_write_same_16 *cdb; 5489 5490 cdb = (struct scsi_write_same_16 *)ctsio->cdb; 5491 5492 lba = scsi_8btou64(cdb->addr); 5493 num_blocks = scsi_4btoul(cdb->length); 5494 byte2 = cdb->byte2; 5495 break; 5496 } 5497 default: 5498 /* 5499 * We got a command we don't support. This shouldn't 5500 * happen, commands should be filtered out above us. 5501 */ 5502 ctl_set_invalid_opcode(ctsio); 5503 ctl_done((union ctl_io *)ctsio); 5504 5505 return (CTL_RETVAL_COMPLETE); 5506 break; /* NOTREACHED */ 5507 } 5508 5509 /* NDOB and ANCHOR flags can be used only together with UNMAP */ 5510 if ((byte2 & SWS_UNMAP) == 0 && 5511 (byte2 & (SWS_NDOB | SWS_ANCHOR)) != 0) { 5512 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 5513 /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); 5514 ctl_done((union ctl_io *)ctsio); 5515 return (CTL_RETVAL_COMPLETE); 5516 } 5517 5518 /* 5519 * The first check is to make sure we're in bounds, the second 5520 * check is to catch wrap-around problems. If the lba + num blocks 5521 * is less than the lba, then we've wrapped around and the block 5522 * range is invalid anyway. 5523 */ 5524 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5525 || ((lba + num_blocks) < lba)) { 5526 ctl_set_lba_out_of_range(ctsio); 5527 ctl_done((union ctl_io *)ctsio); 5528 return (CTL_RETVAL_COMPLETE); 5529 } 5530 5531 /* Zero number of blocks means "to the last logical block" */ 5532 if (num_blocks == 0) { 5533 if ((lun->be_lun->maxlba + 1) - lba > UINT32_MAX) { 5534 ctl_set_invalid_field(ctsio, 5535 /*sks_valid*/ 0, 5536 /*command*/ 1, 5537 /*field*/ 0, 5538 /*bit_valid*/ 0, 5539 /*bit*/ 0); 5540 ctl_done((union ctl_io *)ctsio); 5541 return (CTL_RETVAL_COMPLETE); 5542 } 5543 num_blocks = (lun->be_lun->maxlba + 1) - lba; 5544 } 5545 5546 len = lun->be_lun->blocksize; 5547 5548 /* 5549 * If we've got a kernel request that hasn't been malloced yet, 5550 * malloc it and tell the caller the data buffer is here. 5551 */ 5552 if ((byte2 & SWS_NDOB) == 0 && 5553 (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5554 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);; 5555 ctsio->kern_data_len = len; 5556 ctsio->kern_total_len = len; 5557 ctsio->kern_data_resid = 0; 5558 ctsio->kern_rel_offset = 0; 5559 ctsio->kern_sg_entries = 0; 5560 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5561 ctsio->be_move_done = ctl_config_move_done; 5562 ctl_datamove((union ctl_io *)ctsio); 5563 5564 return (CTL_RETVAL_COMPLETE); 5565 } 5566 5567 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5568 lbalen->lba = lba; 5569 lbalen->len = num_blocks; 5570 lbalen->flags = byte2; 5571 retval = lun->backend->config_write((union ctl_io *)ctsio); 5572 5573 return (retval); 5574 } 5575 5576 int 5577 ctl_unmap(struct ctl_scsiio *ctsio) 5578 { 5579 struct ctl_lun *lun; 5580 struct scsi_unmap *cdb; 5581 struct ctl_ptr_len_flags *ptrlen; 5582 struct scsi_unmap_header *hdr; 5583 struct scsi_unmap_desc *buf, *end, *endnz, *range; 5584 uint64_t lba; 5585 uint32_t num_blocks; 5586 int len, retval; 5587 uint8_t byte2; 5588 5589 retval = CTL_RETVAL_COMPLETE; 5590 5591 CTL_DEBUG_PRINT(("ctl_unmap\n")); 5592 5593 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5594 cdb = (struct scsi_unmap *)ctsio->cdb; 5595 5596 len = scsi_2btoul(cdb->length); 5597 byte2 = cdb->byte2; 5598 5599 /* 5600 * If we've got a kernel request that hasn't been malloced yet, 5601 * malloc it and tell the caller the data buffer is here. 5602 */ 5603 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5604 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);; 5605 ctsio->kern_data_len = len; 5606 ctsio->kern_total_len = len; 5607 ctsio->kern_data_resid = 0; 5608 ctsio->kern_rel_offset = 0; 5609 ctsio->kern_sg_entries = 0; 5610 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5611 ctsio->be_move_done = ctl_config_move_done; 5612 ctl_datamove((union ctl_io *)ctsio); 5613 5614 return (CTL_RETVAL_COMPLETE); 5615 } 5616 5617 len = ctsio->kern_total_len - ctsio->kern_data_resid; 5618 hdr = (struct scsi_unmap_header *)ctsio->kern_data_ptr; 5619 if (len < sizeof (*hdr) || 5620 len < (scsi_2btoul(hdr->length) + sizeof(hdr->length)) || 5621 len < (scsi_2btoul(hdr->desc_length) + sizeof (*hdr)) || 5622 scsi_2btoul(hdr->desc_length) % sizeof(*buf) != 0) { 5623 ctl_set_invalid_field(ctsio, 5624 /*sks_valid*/ 0, 5625 /*command*/ 0, 5626 /*field*/ 0, 5627 /*bit_valid*/ 0, 5628 /*bit*/ 0); 5629 goto done; 5630 } 5631 len = scsi_2btoul(hdr->desc_length); 5632 buf = (struct scsi_unmap_desc *)(hdr + 1); 5633 end = buf + len / sizeof(*buf); 5634 5635 endnz = buf; 5636 for (range = buf; range < end; range++) { 5637 lba = scsi_8btou64(range->lba); 5638 num_blocks = scsi_4btoul(range->length); 5639 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5640 || ((lba + num_blocks) < lba)) { 5641 ctl_set_lba_out_of_range(ctsio); 5642 ctl_done((union ctl_io *)ctsio); 5643 return (CTL_RETVAL_COMPLETE); 5644 } 5645 if (num_blocks != 0) 5646 endnz = range + 1; 5647 } 5648 5649 /* 5650 * Block backend can not handle zero last range. 5651 * Filter it out and return if there is nothing left. 5652 */ 5653 len = (uint8_t *)endnz - (uint8_t *)buf; 5654 if (len == 0) { 5655 ctl_set_success(ctsio); 5656 goto done; 5657 } 5658 5659 mtx_lock(&lun->lun_lock); 5660 ptrlen = (struct ctl_ptr_len_flags *) 5661 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5662 ptrlen->ptr = (void *)buf; 5663 ptrlen->len = len; 5664 ptrlen->flags = byte2; 5665 ctl_check_blocked(lun); 5666 mtx_unlock(&lun->lun_lock); 5667 5668 retval = lun->backend->config_write((union ctl_io *)ctsio); 5669 return (retval); 5670 5671 done: 5672 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5673 free(ctsio->kern_data_ptr, M_CTL); 5674 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5675 } 5676 ctl_done((union ctl_io *)ctsio); 5677 return (CTL_RETVAL_COMPLETE); 5678 } 5679 5680 /* 5681 * Note that this function currently doesn't actually do anything inside 5682 * CTL to enforce things if the DQue bit is turned on. 5683 * 5684 * Also note that this function can't be used in the default case, because 5685 * the DQue bit isn't set in the changeable mask for the control mode page 5686 * anyway. This is just here as an example for how to implement a page 5687 * handler, and a placeholder in case we want to allow the user to turn 5688 * tagged queueing on and off. 5689 * 5690 * The D_SENSE bit handling is functional, however, and will turn 5691 * descriptor sense on and off for a given LUN. 5692 */ 5693 int 5694 ctl_control_page_handler(struct ctl_scsiio *ctsio, 5695 struct ctl_page_index *page_index, uint8_t *page_ptr) 5696 { 5697 struct scsi_control_page *current_cp, *saved_cp, *user_cp; 5698 struct ctl_lun *lun; 5699 int set_ua; 5700 uint32_t initidx; 5701 5702 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5703 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5704 set_ua = 0; 5705 5706 user_cp = (struct scsi_control_page *)page_ptr; 5707 current_cp = (struct scsi_control_page *) 5708 (page_index->page_data + (page_index->page_len * 5709 CTL_PAGE_CURRENT)); 5710 saved_cp = (struct scsi_control_page *) 5711 (page_index->page_data + (page_index->page_len * 5712 CTL_PAGE_SAVED)); 5713 5714 mtx_lock(&lun->lun_lock); 5715 if (((current_cp->rlec & SCP_DSENSE) == 0) 5716 && ((user_cp->rlec & SCP_DSENSE) != 0)) { 5717 /* 5718 * Descriptor sense is currently turned off and the user 5719 * wants to turn it on. 5720 */ 5721 current_cp->rlec |= SCP_DSENSE; 5722 saved_cp->rlec |= SCP_DSENSE; 5723 lun->flags |= CTL_LUN_SENSE_DESC; 5724 set_ua = 1; 5725 } else if (((current_cp->rlec & SCP_DSENSE) != 0) 5726 && ((user_cp->rlec & SCP_DSENSE) == 0)) { 5727 /* 5728 * Descriptor sense is currently turned on, and the user 5729 * wants to turn it off. 5730 */ 5731 current_cp->rlec &= ~SCP_DSENSE; 5732 saved_cp->rlec &= ~SCP_DSENSE; 5733 lun->flags &= ~CTL_LUN_SENSE_DESC; 5734 set_ua = 1; 5735 } 5736 if ((current_cp->queue_flags & SCP_QUEUE_ALG_MASK) != 5737 (user_cp->queue_flags & SCP_QUEUE_ALG_MASK)) { 5738 current_cp->queue_flags &= ~SCP_QUEUE_ALG_MASK; 5739 current_cp->queue_flags |= user_cp->queue_flags & SCP_QUEUE_ALG_MASK; 5740 saved_cp->queue_flags &= ~SCP_QUEUE_ALG_MASK; 5741 saved_cp->queue_flags |= user_cp->queue_flags & SCP_QUEUE_ALG_MASK; 5742 set_ua = 1; 5743 } 5744 if ((current_cp->eca_and_aen & SCP_SWP) != 5745 (user_cp->eca_and_aen & SCP_SWP)) { 5746 current_cp->eca_and_aen &= ~SCP_SWP; 5747 current_cp->eca_and_aen |= user_cp->eca_and_aen & SCP_SWP; 5748 saved_cp->eca_and_aen &= ~SCP_SWP; 5749 saved_cp->eca_and_aen |= user_cp->eca_and_aen & SCP_SWP; 5750 set_ua = 1; 5751 } 5752 if (set_ua != 0) 5753 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 5754 mtx_unlock(&lun->lun_lock); 5755 5756 return (0); 5757 } 5758 5759 int 5760 ctl_caching_sp_handler(struct ctl_scsiio *ctsio, 5761 struct ctl_page_index *page_index, uint8_t *page_ptr) 5762 { 5763 struct scsi_caching_page *current_cp, *saved_cp, *user_cp; 5764 struct ctl_lun *lun; 5765 int set_ua; 5766 uint32_t initidx; 5767 5768 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5769 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5770 set_ua = 0; 5771 5772 user_cp = (struct scsi_caching_page *)page_ptr; 5773 current_cp = (struct scsi_caching_page *) 5774 (page_index->page_data + (page_index->page_len * 5775 CTL_PAGE_CURRENT)); 5776 saved_cp = (struct scsi_caching_page *) 5777 (page_index->page_data + (page_index->page_len * 5778 CTL_PAGE_SAVED)); 5779 5780 mtx_lock(&lun->lun_lock); 5781 if ((current_cp->flags1 & (SCP_WCE | SCP_RCD)) != 5782 (user_cp->flags1 & (SCP_WCE | SCP_RCD))) { 5783 current_cp->flags1 &= ~(SCP_WCE | SCP_RCD); 5784 current_cp->flags1 |= user_cp->flags1 & (SCP_WCE | SCP_RCD); 5785 saved_cp->flags1 &= ~(SCP_WCE | SCP_RCD); 5786 saved_cp->flags1 |= user_cp->flags1 & (SCP_WCE | SCP_RCD); 5787 set_ua = 1; 5788 } 5789 if (set_ua != 0) 5790 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 5791 mtx_unlock(&lun->lun_lock); 5792 5793 return (0); 5794 } 5795 5796 int 5797 ctl_debugconf_sp_select_handler(struct ctl_scsiio *ctsio, 5798 struct ctl_page_index *page_index, 5799 uint8_t *page_ptr) 5800 { 5801 uint8_t *c; 5802 int i; 5803 5804 c = ((struct copan_debugconf_subpage *)page_ptr)->ctl_time_io_secs; 5805 ctl_time_io_secs = 5806 (c[0] << 8) | 5807 (c[1] << 0) | 5808 0; 5809 CTL_DEBUG_PRINT(("set ctl_time_io_secs to %d\n", ctl_time_io_secs)); 5810 printf("set ctl_time_io_secs to %d\n", ctl_time_io_secs); 5811 printf("page data:"); 5812 for (i=0; i<8; i++) 5813 printf(" %.2x",page_ptr[i]); 5814 printf("\n"); 5815 return (0); 5816 } 5817 5818 int 5819 ctl_debugconf_sp_sense_handler(struct ctl_scsiio *ctsio, 5820 struct ctl_page_index *page_index, 5821 int pc) 5822 { 5823 struct copan_debugconf_subpage *page; 5824 5825 page = (struct copan_debugconf_subpage *)page_index->page_data + 5826 (page_index->page_len * pc); 5827 5828 switch (pc) { 5829 case SMS_PAGE_CTRL_CHANGEABLE >> 6: 5830 case SMS_PAGE_CTRL_DEFAULT >> 6: 5831 case SMS_PAGE_CTRL_SAVED >> 6: 5832 /* 5833 * We don't update the changable or default bits for this page. 5834 */ 5835 break; 5836 case SMS_PAGE_CTRL_CURRENT >> 6: 5837 page->ctl_time_io_secs[0] = ctl_time_io_secs >> 8; 5838 page->ctl_time_io_secs[1] = ctl_time_io_secs >> 0; 5839 break; 5840 default: 5841 #ifdef NEEDTOPORT 5842 EPRINT(0, "Invalid PC %d!!", pc); 5843 #endif /* NEEDTOPORT */ 5844 break; 5845 } 5846 return (0); 5847 } 5848 5849 5850 static int 5851 ctl_do_mode_select(union ctl_io *io) 5852 { 5853 struct scsi_mode_page_header *page_header; 5854 struct ctl_page_index *page_index; 5855 struct ctl_scsiio *ctsio; 5856 int control_dev, page_len; 5857 int page_len_offset, page_len_size; 5858 union ctl_modepage_info *modepage_info; 5859 struct ctl_lun *lun; 5860 int *len_left, *len_used; 5861 int retval, i; 5862 5863 ctsio = &io->scsiio; 5864 page_index = NULL; 5865 page_len = 0; 5866 retval = CTL_RETVAL_COMPLETE; 5867 5868 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5869 5870 if (lun->be_lun->lun_type != T_DIRECT) 5871 control_dev = 1; 5872 else 5873 control_dev = 0; 5874 5875 modepage_info = (union ctl_modepage_info *) 5876 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 5877 len_left = &modepage_info->header.len_left; 5878 len_used = &modepage_info->header.len_used; 5879 5880 do_next_page: 5881 5882 page_header = (struct scsi_mode_page_header *) 5883 (ctsio->kern_data_ptr + *len_used); 5884 5885 if (*len_left == 0) { 5886 free(ctsio->kern_data_ptr, M_CTL); 5887 ctl_set_success(ctsio); 5888 ctl_done((union ctl_io *)ctsio); 5889 return (CTL_RETVAL_COMPLETE); 5890 } else if (*len_left < sizeof(struct scsi_mode_page_header)) { 5891 5892 free(ctsio->kern_data_ptr, M_CTL); 5893 ctl_set_param_len_error(ctsio); 5894 ctl_done((union ctl_io *)ctsio); 5895 return (CTL_RETVAL_COMPLETE); 5896 5897 } else if ((page_header->page_code & SMPH_SPF) 5898 && (*len_left < sizeof(struct scsi_mode_page_header_sp))) { 5899 5900 free(ctsio->kern_data_ptr, M_CTL); 5901 ctl_set_param_len_error(ctsio); 5902 ctl_done((union ctl_io *)ctsio); 5903 return (CTL_RETVAL_COMPLETE); 5904 } 5905 5906 5907 /* 5908 * XXX KDM should we do something with the block descriptor? 5909 */ 5910 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 5911 5912 if ((control_dev != 0) 5913 && (lun->mode_pages.index[i].page_flags & 5914 CTL_PAGE_FLAG_DISK_ONLY)) 5915 continue; 5916 5917 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) != 5918 (page_header->page_code & SMPH_PC_MASK)) 5919 continue; 5920 5921 /* 5922 * If neither page has a subpage code, then we've got a 5923 * match. 5924 */ 5925 if (((lun->mode_pages.index[i].page_code & SMPH_SPF) == 0) 5926 && ((page_header->page_code & SMPH_SPF) == 0)) { 5927 page_index = &lun->mode_pages.index[i]; 5928 page_len = page_header->page_length; 5929 break; 5930 } 5931 5932 /* 5933 * If both pages have subpages, then the subpage numbers 5934 * have to match. 5935 */ 5936 if ((lun->mode_pages.index[i].page_code & SMPH_SPF) 5937 && (page_header->page_code & SMPH_SPF)) { 5938 struct scsi_mode_page_header_sp *sph; 5939 5940 sph = (struct scsi_mode_page_header_sp *)page_header; 5941 5942 if (lun->mode_pages.index[i].subpage == 5943 sph->subpage) { 5944 page_index = &lun->mode_pages.index[i]; 5945 page_len = scsi_2btoul(sph->page_length); 5946 break; 5947 } 5948 } 5949 } 5950 5951 /* 5952 * If we couldn't find the page, or if we don't have a mode select 5953 * handler for it, send back an error to the user. 5954 */ 5955 if ((page_index == NULL) 5956 || (page_index->select_handler == NULL)) { 5957 ctl_set_invalid_field(ctsio, 5958 /*sks_valid*/ 1, 5959 /*command*/ 0, 5960 /*field*/ *len_used, 5961 /*bit_valid*/ 0, 5962 /*bit*/ 0); 5963 free(ctsio->kern_data_ptr, M_CTL); 5964 ctl_done((union ctl_io *)ctsio); 5965 return (CTL_RETVAL_COMPLETE); 5966 } 5967 5968 if (page_index->page_code & SMPH_SPF) { 5969 page_len_offset = 2; 5970 page_len_size = 2; 5971 } else { 5972 page_len_size = 1; 5973 page_len_offset = 1; 5974 } 5975 5976 /* 5977 * If the length the initiator gives us isn't the one we specify in 5978 * the mode page header, or if they didn't specify enough data in 5979 * the CDB to avoid truncating this page, kick out the request. 5980 */ 5981 if ((page_len != (page_index->page_len - page_len_offset - 5982 page_len_size)) 5983 || (*len_left < page_index->page_len)) { 5984 5985 5986 ctl_set_invalid_field(ctsio, 5987 /*sks_valid*/ 1, 5988 /*command*/ 0, 5989 /*field*/ *len_used + page_len_offset, 5990 /*bit_valid*/ 0, 5991 /*bit*/ 0); 5992 free(ctsio->kern_data_ptr, M_CTL); 5993 ctl_done((union ctl_io *)ctsio); 5994 return (CTL_RETVAL_COMPLETE); 5995 } 5996 5997 /* 5998 * Run through the mode page, checking to make sure that the bits 5999 * the user changed are actually legal for him to change. 6000 */ 6001 for (i = 0; i < page_index->page_len; i++) { 6002 uint8_t *user_byte, *change_mask, *current_byte; 6003 int bad_bit; 6004 int j; 6005 6006 user_byte = (uint8_t *)page_header + i; 6007 change_mask = page_index->page_data + 6008 (page_index->page_len * CTL_PAGE_CHANGEABLE) + i; 6009 current_byte = page_index->page_data + 6010 (page_index->page_len * CTL_PAGE_CURRENT) + i; 6011 6012 /* 6013 * Check to see whether the user set any bits in this byte 6014 * that he is not allowed to set. 6015 */ 6016 if ((*user_byte & ~(*change_mask)) == 6017 (*current_byte & ~(*change_mask))) 6018 continue; 6019 6020 /* 6021 * Go through bit by bit to determine which one is illegal. 6022 */ 6023 bad_bit = 0; 6024 for (j = 7; j >= 0; j--) { 6025 if ((((1 << i) & ~(*change_mask)) & *user_byte) != 6026 (((1 << i) & ~(*change_mask)) & *current_byte)) { 6027 bad_bit = i; 6028 break; 6029 } 6030 } 6031 ctl_set_invalid_field(ctsio, 6032 /*sks_valid*/ 1, 6033 /*command*/ 0, 6034 /*field*/ *len_used + i, 6035 /*bit_valid*/ 1, 6036 /*bit*/ bad_bit); 6037 free(ctsio->kern_data_ptr, M_CTL); 6038 ctl_done((union ctl_io *)ctsio); 6039 return (CTL_RETVAL_COMPLETE); 6040 } 6041 6042 /* 6043 * Decrement these before we call the page handler, since we may 6044 * end up getting called back one way or another before the handler 6045 * returns to this context. 6046 */ 6047 *len_left -= page_index->page_len; 6048 *len_used += page_index->page_len; 6049 6050 retval = page_index->select_handler(ctsio, page_index, 6051 (uint8_t *)page_header); 6052 6053 /* 6054 * If the page handler returns CTL_RETVAL_QUEUED, then we need to 6055 * wait until this queued command completes to finish processing 6056 * the mode page. If it returns anything other than 6057 * CTL_RETVAL_COMPLETE (e.g. CTL_RETVAL_ERROR), then it should have 6058 * already set the sense information, freed the data pointer, and 6059 * completed the io for us. 6060 */ 6061 if (retval != CTL_RETVAL_COMPLETE) 6062 goto bailout_no_done; 6063 6064 /* 6065 * If the initiator sent us more than one page, parse the next one. 6066 */ 6067 if (*len_left > 0) 6068 goto do_next_page; 6069 6070 ctl_set_success(ctsio); 6071 free(ctsio->kern_data_ptr, M_CTL); 6072 ctl_done((union ctl_io *)ctsio); 6073 6074 bailout_no_done: 6075 6076 return (CTL_RETVAL_COMPLETE); 6077 6078 } 6079 6080 int 6081 ctl_mode_select(struct ctl_scsiio *ctsio) 6082 { 6083 int param_len, pf, sp; 6084 int header_size, bd_len; 6085 int len_left, len_used; 6086 struct ctl_page_index *page_index; 6087 struct ctl_lun *lun; 6088 int control_dev, page_len; 6089 union ctl_modepage_info *modepage_info; 6090 int retval; 6091 6092 pf = 0; 6093 sp = 0; 6094 page_len = 0; 6095 len_used = 0; 6096 len_left = 0; 6097 retval = 0; 6098 bd_len = 0; 6099 page_index = NULL; 6100 6101 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6102 6103 if (lun->be_lun->lun_type != T_DIRECT) 6104 control_dev = 1; 6105 else 6106 control_dev = 0; 6107 6108 switch (ctsio->cdb[0]) { 6109 case MODE_SELECT_6: { 6110 struct scsi_mode_select_6 *cdb; 6111 6112 cdb = (struct scsi_mode_select_6 *)ctsio->cdb; 6113 6114 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 6115 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 6116 6117 param_len = cdb->length; 6118 header_size = sizeof(struct scsi_mode_header_6); 6119 break; 6120 } 6121 case MODE_SELECT_10: { 6122 struct scsi_mode_select_10 *cdb; 6123 6124 cdb = (struct scsi_mode_select_10 *)ctsio->cdb; 6125 6126 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 6127 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 6128 6129 param_len = scsi_2btoul(cdb->length); 6130 header_size = sizeof(struct scsi_mode_header_10); 6131 break; 6132 } 6133 default: 6134 ctl_set_invalid_opcode(ctsio); 6135 ctl_done((union ctl_io *)ctsio); 6136 return (CTL_RETVAL_COMPLETE); 6137 break; /* NOTREACHED */ 6138 } 6139 6140 /* 6141 * From SPC-3: 6142 * "A parameter list length of zero indicates that the Data-Out Buffer 6143 * shall be empty. This condition shall not be considered as an error." 6144 */ 6145 if (param_len == 0) { 6146 ctl_set_success(ctsio); 6147 ctl_done((union ctl_io *)ctsio); 6148 return (CTL_RETVAL_COMPLETE); 6149 } 6150 6151 /* 6152 * Since we'll hit this the first time through, prior to 6153 * allocation, we don't need to free a data buffer here. 6154 */ 6155 if (param_len < header_size) { 6156 ctl_set_param_len_error(ctsio); 6157 ctl_done((union ctl_io *)ctsio); 6158 return (CTL_RETVAL_COMPLETE); 6159 } 6160 6161 /* 6162 * Allocate the data buffer and grab the user's data. In theory, 6163 * we shouldn't have to sanity check the parameter list length here 6164 * because the maximum size is 64K. We should be able to malloc 6165 * that much without too many problems. 6166 */ 6167 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 6168 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 6169 ctsio->kern_data_len = param_len; 6170 ctsio->kern_total_len = param_len; 6171 ctsio->kern_data_resid = 0; 6172 ctsio->kern_rel_offset = 0; 6173 ctsio->kern_sg_entries = 0; 6174 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6175 ctsio->be_move_done = ctl_config_move_done; 6176 ctl_datamove((union ctl_io *)ctsio); 6177 6178 return (CTL_RETVAL_COMPLETE); 6179 } 6180 6181 switch (ctsio->cdb[0]) { 6182 case MODE_SELECT_6: { 6183 struct scsi_mode_header_6 *mh6; 6184 6185 mh6 = (struct scsi_mode_header_6 *)ctsio->kern_data_ptr; 6186 bd_len = mh6->blk_desc_len; 6187 break; 6188 } 6189 case MODE_SELECT_10: { 6190 struct scsi_mode_header_10 *mh10; 6191 6192 mh10 = (struct scsi_mode_header_10 *)ctsio->kern_data_ptr; 6193 bd_len = scsi_2btoul(mh10->blk_desc_len); 6194 break; 6195 } 6196 default: 6197 panic("Invalid CDB type %#x", ctsio->cdb[0]); 6198 break; 6199 } 6200 6201 if (param_len < (header_size + bd_len)) { 6202 free(ctsio->kern_data_ptr, M_CTL); 6203 ctl_set_param_len_error(ctsio); 6204 ctl_done((union ctl_io *)ctsio); 6205 return (CTL_RETVAL_COMPLETE); 6206 } 6207 6208 /* 6209 * Set the IO_CONT flag, so that if this I/O gets passed to 6210 * ctl_config_write_done(), it'll get passed back to 6211 * ctl_do_mode_select() for further processing, or completion if 6212 * we're all done. 6213 */ 6214 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 6215 ctsio->io_cont = ctl_do_mode_select; 6216 6217 modepage_info = (union ctl_modepage_info *) 6218 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6219 6220 memset(modepage_info, 0, sizeof(*modepage_info)); 6221 6222 len_left = param_len - header_size - bd_len; 6223 len_used = header_size + bd_len; 6224 6225 modepage_info->header.len_left = len_left; 6226 modepage_info->header.len_used = len_used; 6227 6228 return (ctl_do_mode_select((union ctl_io *)ctsio)); 6229 } 6230 6231 int 6232 ctl_mode_sense(struct ctl_scsiio *ctsio) 6233 { 6234 struct ctl_lun *lun; 6235 int pc, page_code, dbd, llba, subpage; 6236 int alloc_len, page_len, header_len, total_len; 6237 struct scsi_mode_block_descr *block_desc; 6238 struct ctl_page_index *page_index; 6239 int control_dev; 6240 6241 dbd = 0; 6242 llba = 0; 6243 block_desc = NULL; 6244 page_index = NULL; 6245 6246 CTL_DEBUG_PRINT(("ctl_mode_sense\n")); 6247 6248 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6249 6250 if (lun->be_lun->lun_type != T_DIRECT) 6251 control_dev = 1; 6252 else 6253 control_dev = 0; 6254 6255 switch (ctsio->cdb[0]) { 6256 case MODE_SENSE_6: { 6257 struct scsi_mode_sense_6 *cdb; 6258 6259 cdb = (struct scsi_mode_sense_6 *)ctsio->cdb; 6260 6261 header_len = sizeof(struct scsi_mode_hdr_6); 6262 if (cdb->byte2 & SMS_DBD) 6263 dbd = 1; 6264 else 6265 header_len += sizeof(struct scsi_mode_block_descr); 6266 6267 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6268 page_code = cdb->page & SMS_PAGE_CODE; 6269 subpage = cdb->subpage; 6270 alloc_len = cdb->length; 6271 break; 6272 } 6273 case MODE_SENSE_10: { 6274 struct scsi_mode_sense_10 *cdb; 6275 6276 cdb = (struct scsi_mode_sense_10 *)ctsio->cdb; 6277 6278 header_len = sizeof(struct scsi_mode_hdr_10); 6279 6280 if (cdb->byte2 & SMS_DBD) 6281 dbd = 1; 6282 else 6283 header_len += sizeof(struct scsi_mode_block_descr); 6284 if (cdb->byte2 & SMS10_LLBAA) 6285 llba = 1; 6286 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6287 page_code = cdb->page & SMS_PAGE_CODE; 6288 subpage = cdb->subpage; 6289 alloc_len = scsi_2btoul(cdb->length); 6290 break; 6291 } 6292 default: 6293 ctl_set_invalid_opcode(ctsio); 6294 ctl_done((union ctl_io *)ctsio); 6295 return (CTL_RETVAL_COMPLETE); 6296 break; /* NOTREACHED */ 6297 } 6298 6299 /* 6300 * We have to make a first pass through to calculate the size of 6301 * the pages that match the user's query. Then we allocate enough 6302 * memory to hold it, and actually copy the data into the buffer. 6303 */ 6304 switch (page_code) { 6305 case SMS_ALL_PAGES_PAGE: { 6306 int i; 6307 6308 page_len = 0; 6309 6310 /* 6311 * At the moment, values other than 0 and 0xff here are 6312 * reserved according to SPC-3. 6313 */ 6314 if ((subpage != SMS_SUBPAGE_PAGE_0) 6315 && (subpage != SMS_SUBPAGE_ALL)) { 6316 ctl_set_invalid_field(ctsio, 6317 /*sks_valid*/ 1, 6318 /*command*/ 1, 6319 /*field*/ 3, 6320 /*bit_valid*/ 0, 6321 /*bit*/ 0); 6322 ctl_done((union ctl_io *)ctsio); 6323 return (CTL_RETVAL_COMPLETE); 6324 } 6325 6326 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6327 if ((control_dev != 0) 6328 && (lun->mode_pages.index[i].page_flags & 6329 CTL_PAGE_FLAG_DISK_ONLY)) 6330 continue; 6331 6332 /* 6333 * We don't use this subpage if the user didn't 6334 * request all subpages. 6335 */ 6336 if ((lun->mode_pages.index[i].subpage != 0) 6337 && (subpage == SMS_SUBPAGE_PAGE_0)) 6338 continue; 6339 6340 #if 0 6341 printf("found page %#x len %d\n", 6342 lun->mode_pages.index[i].page_code & 6343 SMPH_PC_MASK, 6344 lun->mode_pages.index[i].page_len); 6345 #endif 6346 page_len += lun->mode_pages.index[i].page_len; 6347 } 6348 break; 6349 } 6350 default: { 6351 int i; 6352 6353 page_len = 0; 6354 6355 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6356 /* Look for the right page code */ 6357 if ((lun->mode_pages.index[i].page_code & 6358 SMPH_PC_MASK) != page_code) 6359 continue; 6360 6361 /* Look for the right subpage or the subpage wildcard*/ 6362 if ((lun->mode_pages.index[i].subpage != subpage) 6363 && (subpage != SMS_SUBPAGE_ALL)) 6364 continue; 6365 6366 /* Make sure the page is supported for this dev type */ 6367 if ((control_dev != 0) 6368 && (lun->mode_pages.index[i].page_flags & 6369 CTL_PAGE_FLAG_DISK_ONLY)) 6370 continue; 6371 6372 #if 0 6373 printf("found page %#x len %d\n", 6374 lun->mode_pages.index[i].page_code & 6375 SMPH_PC_MASK, 6376 lun->mode_pages.index[i].page_len); 6377 #endif 6378 6379 page_len += lun->mode_pages.index[i].page_len; 6380 } 6381 6382 if (page_len == 0) { 6383 ctl_set_invalid_field(ctsio, 6384 /*sks_valid*/ 1, 6385 /*command*/ 1, 6386 /*field*/ 2, 6387 /*bit_valid*/ 1, 6388 /*bit*/ 5); 6389 ctl_done((union ctl_io *)ctsio); 6390 return (CTL_RETVAL_COMPLETE); 6391 } 6392 break; 6393 } 6394 } 6395 6396 total_len = header_len + page_len; 6397 #if 0 6398 printf("header_len = %d, page_len = %d, total_len = %d\n", 6399 header_len, page_len, total_len); 6400 #endif 6401 6402 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6403 ctsio->kern_sg_entries = 0; 6404 ctsio->kern_data_resid = 0; 6405 ctsio->kern_rel_offset = 0; 6406 if (total_len < alloc_len) { 6407 ctsio->residual = alloc_len - total_len; 6408 ctsio->kern_data_len = total_len; 6409 ctsio->kern_total_len = total_len; 6410 } else { 6411 ctsio->residual = 0; 6412 ctsio->kern_data_len = alloc_len; 6413 ctsio->kern_total_len = alloc_len; 6414 } 6415 6416 switch (ctsio->cdb[0]) { 6417 case MODE_SENSE_6: { 6418 struct scsi_mode_hdr_6 *header; 6419 6420 header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr; 6421 6422 header->datalen = MIN(total_len - 1, 254); 6423 if (control_dev == 0) { 6424 header->dev_specific = 0x10; /* DPOFUA */ 6425 if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || 6426 (lun->mode_pages.control_page[CTL_PAGE_CURRENT] 6427 .eca_and_aen & SCP_SWP) != 0) 6428 header->dev_specific |= 0x80; /* WP */ 6429 } 6430 if (dbd) 6431 header->block_descr_len = 0; 6432 else 6433 header->block_descr_len = 6434 sizeof(struct scsi_mode_block_descr); 6435 block_desc = (struct scsi_mode_block_descr *)&header[1]; 6436 break; 6437 } 6438 case MODE_SENSE_10: { 6439 struct scsi_mode_hdr_10 *header; 6440 int datalen; 6441 6442 header = (struct scsi_mode_hdr_10 *)ctsio->kern_data_ptr; 6443 6444 datalen = MIN(total_len - 2, 65533); 6445 scsi_ulto2b(datalen, header->datalen); 6446 if (control_dev == 0) { 6447 header->dev_specific = 0x10; /* DPOFUA */ 6448 if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || 6449 (lun->mode_pages.control_page[CTL_PAGE_CURRENT] 6450 .eca_and_aen & SCP_SWP) != 0) 6451 header->dev_specific |= 0x80; /* WP */ 6452 } 6453 if (dbd) 6454 scsi_ulto2b(0, header->block_descr_len); 6455 else 6456 scsi_ulto2b(sizeof(struct scsi_mode_block_descr), 6457 header->block_descr_len); 6458 block_desc = (struct scsi_mode_block_descr *)&header[1]; 6459 break; 6460 } 6461 default: 6462 panic("invalid CDB type %#x", ctsio->cdb[0]); 6463 break; /* NOTREACHED */ 6464 } 6465 6466 /* 6467 * If we've got a disk, use its blocksize in the block 6468 * descriptor. Otherwise, just set it to 0. 6469 */ 6470 if (dbd == 0) { 6471 if (control_dev == 0) 6472 scsi_ulto3b(lun->be_lun->blocksize, 6473 block_desc->block_len); 6474 else 6475 scsi_ulto3b(0, block_desc->block_len); 6476 } 6477 6478 switch (page_code) { 6479 case SMS_ALL_PAGES_PAGE: { 6480 int i, data_used; 6481 6482 data_used = header_len; 6483 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6484 struct ctl_page_index *page_index; 6485 6486 page_index = &lun->mode_pages.index[i]; 6487 6488 if ((control_dev != 0) 6489 && (page_index->page_flags & 6490 CTL_PAGE_FLAG_DISK_ONLY)) 6491 continue; 6492 6493 /* 6494 * We don't use this subpage if the user didn't 6495 * request all subpages. We already checked (above) 6496 * to make sure the user only specified a subpage 6497 * of 0 or 0xff in the SMS_ALL_PAGES_PAGE case. 6498 */ 6499 if ((page_index->subpage != 0) 6500 && (subpage == SMS_SUBPAGE_PAGE_0)) 6501 continue; 6502 6503 /* 6504 * Call the handler, if it exists, to update the 6505 * page to the latest values. 6506 */ 6507 if (page_index->sense_handler != NULL) 6508 page_index->sense_handler(ctsio, page_index,pc); 6509 6510 memcpy(ctsio->kern_data_ptr + data_used, 6511 page_index->page_data + 6512 (page_index->page_len * pc), 6513 page_index->page_len); 6514 data_used += page_index->page_len; 6515 } 6516 break; 6517 } 6518 default: { 6519 int i, data_used; 6520 6521 data_used = header_len; 6522 6523 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6524 struct ctl_page_index *page_index; 6525 6526 page_index = &lun->mode_pages.index[i]; 6527 6528 /* Look for the right page code */ 6529 if ((page_index->page_code & SMPH_PC_MASK) != page_code) 6530 continue; 6531 6532 /* Look for the right subpage or the subpage wildcard*/ 6533 if ((page_index->subpage != subpage) 6534 && (subpage != SMS_SUBPAGE_ALL)) 6535 continue; 6536 6537 /* Make sure the page is supported for this dev type */ 6538 if ((control_dev != 0) 6539 && (page_index->page_flags & 6540 CTL_PAGE_FLAG_DISK_ONLY)) 6541 continue; 6542 6543 /* 6544 * Call the handler, if it exists, to update the 6545 * page to the latest values. 6546 */ 6547 if (page_index->sense_handler != NULL) 6548 page_index->sense_handler(ctsio, page_index,pc); 6549 6550 memcpy(ctsio->kern_data_ptr + data_used, 6551 page_index->page_data + 6552 (page_index->page_len * pc), 6553 page_index->page_len); 6554 data_used += page_index->page_len; 6555 } 6556 break; 6557 } 6558 } 6559 6560 ctl_set_success(ctsio); 6561 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6562 ctsio->be_move_done = ctl_config_move_done; 6563 ctl_datamove((union ctl_io *)ctsio); 6564 return (CTL_RETVAL_COMPLETE); 6565 } 6566 6567 int 6568 ctl_lbp_log_sense_handler(struct ctl_scsiio *ctsio, 6569 struct ctl_page_index *page_index, 6570 int pc) 6571 { 6572 struct ctl_lun *lun; 6573 struct scsi_log_param_header *phdr; 6574 uint8_t *data; 6575 uint64_t val; 6576 6577 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6578 data = page_index->page_data; 6579 6580 if (lun->backend->lun_attr != NULL && 6581 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksavail")) 6582 != UINT64_MAX) { 6583 phdr = (struct scsi_log_param_header *)data; 6584 scsi_ulto2b(0x0001, phdr->param_code); 6585 phdr->param_control = SLP_LBIN | SLP_LP; 6586 phdr->param_len = 8; 6587 data = (uint8_t *)(phdr + 1); 6588 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6589 data[4] = 0x02; /* per-pool */ 6590 data += phdr->param_len; 6591 } 6592 6593 if (lun->backend->lun_attr != NULL && 6594 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksused")) 6595 != UINT64_MAX) { 6596 phdr = (struct scsi_log_param_header *)data; 6597 scsi_ulto2b(0x0002, phdr->param_code); 6598 phdr->param_control = SLP_LBIN | SLP_LP; 6599 phdr->param_len = 8; 6600 data = (uint8_t *)(phdr + 1); 6601 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6602 data[4] = 0x01; /* per-LUN */ 6603 data += phdr->param_len; 6604 } 6605 6606 if (lun->backend->lun_attr != NULL && 6607 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksavail")) 6608 != UINT64_MAX) { 6609 phdr = (struct scsi_log_param_header *)data; 6610 scsi_ulto2b(0x00f1, phdr->param_code); 6611 phdr->param_control = SLP_LBIN | SLP_LP; 6612 phdr->param_len = 8; 6613 data = (uint8_t *)(phdr + 1); 6614 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6615 data[4] = 0x02; /* per-pool */ 6616 data += phdr->param_len; 6617 } 6618 6619 if (lun->backend->lun_attr != NULL && 6620 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksused")) 6621 != UINT64_MAX) { 6622 phdr = (struct scsi_log_param_header *)data; 6623 scsi_ulto2b(0x00f2, phdr->param_code); 6624 phdr->param_control = SLP_LBIN | SLP_LP; 6625 phdr->param_len = 8; 6626 data = (uint8_t *)(phdr + 1); 6627 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6628 data[4] = 0x02; /* per-pool */ 6629 data += phdr->param_len; 6630 } 6631 6632 page_index->page_len = data - page_index->page_data; 6633 return (0); 6634 } 6635 6636 int 6637 ctl_sap_log_sense_handler(struct ctl_scsiio *ctsio, 6638 struct ctl_page_index *page_index, 6639 int pc) 6640 { 6641 struct ctl_lun *lun; 6642 struct stat_page *data; 6643 uint64_t rn, wn, rb, wb; 6644 struct bintime rt, wt; 6645 int i; 6646 6647 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6648 data = (struct stat_page *)page_index->page_data; 6649 6650 scsi_ulto2b(SLP_SAP, data->sap.hdr.param_code); 6651 data->sap.hdr.param_control = SLP_LBIN; 6652 data->sap.hdr.param_len = sizeof(struct scsi_log_stat_and_perf) - 6653 sizeof(struct scsi_log_param_header); 6654 rn = wn = rb = wb = 0; 6655 bintime_clear(&rt); 6656 bintime_clear(&wt); 6657 for (i = 0; i < CTL_MAX_PORTS; i++) { 6658 rn += lun->stats.ports[i].operations[CTL_STATS_READ]; 6659 wn += lun->stats.ports[i].operations[CTL_STATS_WRITE]; 6660 rb += lun->stats.ports[i].bytes[CTL_STATS_READ]; 6661 wb += lun->stats.ports[i].bytes[CTL_STATS_WRITE]; 6662 bintime_add(&rt, &lun->stats.ports[i].time[CTL_STATS_READ]); 6663 bintime_add(&wt, &lun->stats.ports[i].time[CTL_STATS_WRITE]); 6664 } 6665 scsi_u64to8b(rn, data->sap.read_num); 6666 scsi_u64to8b(wn, data->sap.write_num); 6667 if (lun->stats.blocksize > 0) { 6668 scsi_u64to8b(wb / lun->stats.blocksize, 6669 data->sap.recvieved_lba); 6670 scsi_u64to8b(rb / lun->stats.blocksize, 6671 data->sap.transmitted_lba); 6672 } 6673 scsi_u64to8b((uint64_t)rt.sec * 1000 + rt.frac / (UINT64_MAX / 1000), 6674 data->sap.read_int); 6675 scsi_u64to8b((uint64_t)wt.sec * 1000 + wt.frac / (UINT64_MAX / 1000), 6676 data->sap.write_int); 6677 scsi_u64to8b(0, data->sap.weighted_num); 6678 scsi_u64to8b(0, data->sap.weighted_int); 6679 scsi_ulto2b(SLP_IT, data->it.hdr.param_code); 6680 data->it.hdr.param_control = SLP_LBIN; 6681 data->it.hdr.param_len = sizeof(struct scsi_log_idle_time) - 6682 sizeof(struct scsi_log_param_header); 6683 #ifdef CTL_TIME_IO 6684 scsi_u64to8b(lun->idle_time / SBT_1MS, data->it.idle_int); 6685 #endif 6686 scsi_ulto2b(SLP_TI, data->ti.hdr.param_code); 6687 data->it.hdr.param_control = SLP_LBIN; 6688 data->ti.hdr.param_len = sizeof(struct scsi_log_time_interval) - 6689 sizeof(struct scsi_log_param_header); 6690 scsi_ulto4b(3, data->ti.exponent); 6691 scsi_ulto4b(1, data->ti.integer); 6692 6693 page_index->page_len = sizeof(*data); 6694 return (0); 6695 } 6696 6697 int 6698 ctl_log_sense(struct ctl_scsiio *ctsio) 6699 { 6700 struct ctl_lun *lun; 6701 int i, pc, page_code, subpage; 6702 int alloc_len, total_len; 6703 struct ctl_page_index *page_index; 6704 struct scsi_log_sense *cdb; 6705 struct scsi_log_header *header; 6706 6707 CTL_DEBUG_PRINT(("ctl_log_sense\n")); 6708 6709 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6710 cdb = (struct scsi_log_sense *)ctsio->cdb; 6711 pc = (cdb->page & SLS_PAGE_CTRL_MASK) >> 6; 6712 page_code = cdb->page & SLS_PAGE_CODE; 6713 subpage = cdb->subpage; 6714 alloc_len = scsi_2btoul(cdb->length); 6715 6716 page_index = NULL; 6717 for (i = 0; i < CTL_NUM_LOG_PAGES; i++) { 6718 page_index = &lun->log_pages.index[i]; 6719 6720 /* Look for the right page code */ 6721 if ((page_index->page_code & SL_PAGE_CODE) != page_code) 6722 continue; 6723 6724 /* Look for the right subpage or the subpage wildcard*/ 6725 if (page_index->subpage != subpage) 6726 continue; 6727 6728 break; 6729 } 6730 if (i >= CTL_NUM_LOG_PAGES) { 6731 ctl_set_invalid_field(ctsio, 6732 /*sks_valid*/ 1, 6733 /*command*/ 1, 6734 /*field*/ 2, 6735 /*bit_valid*/ 0, 6736 /*bit*/ 0); 6737 ctl_done((union ctl_io *)ctsio); 6738 return (CTL_RETVAL_COMPLETE); 6739 } 6740 6741 total_len = sizeof(struct scsi_log_header) + page_index->page_len; 6742 6743 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6744 ctsio->kern_sg_entries = 0; 6745 ctsio->kern_data_resid = 0; 6746 ctsio->kern_rel_offset = 0; 6747 if (total_len < alloc_len) { 6748 ctsio->residual = alloc_len - total_len; 6749 ctsio->kern_data_len = total_len; 6750 ctsio->kern_total_len = total_len; 6751 } else { 6752 ctsio->residual = 0; 6753 ctsio->kern_data_len = alloc_len; 6754 ctsio->kern_total_len = alloc_len; 6755 } 6756 6757 header = (struct scsi_log_header *)ctsio->kern_data_ptr; 6758 header->page = page_index->page_code; 6759 if (page_index->subpage) { 6760 header->page |= SL_SPF; 6761 header->subpage = page_index->subpage; 6762 } 6763 scsi_ulto2b(page_index->page_len, header->datalen); 6764 6765 /* 6766 * Call the handler, if it exists, to update the 6767 * page to the latest values. 6768 */ 6769 if (page_index->sense_handler != NULL) 6770 page_index->sense_handler(ctsio, page_index, pc); 6771 6772 memcpy(header + 1, page_index->page_data, page_index->page_len); 6773 6774 ctl_set_success(ctsio); 6775 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6776 ctsio->be_move_done = ctl_config_move_done; 6777 ctl_datamove((union ctl_io *)ctsio); 6778 return (CTL_RETVAL_COMPLETE); 6779 } 6780 6781 int 6782 ctl_read_capacity(struct ctl_scsiio *ctsio) 6783 { 6784 struct scsi_read_capacity *cdb; 6785 struct scsi_read_capacity_data *data; 6786 struct ctl_lun *lun; 6787 uint32_t lba; 6788 6789 CTL_DEBUG_PRINT(("ctl_read_capacity\n")); 6790 6791 cdb = (struct scsi_read_capacity *)ctsio->cdb; 6792 6793 lba = scsi_4btoul(cdb->addr); 6794 if (((cdb->pmi & SRC_PMI) == 0) 6795 && (lba != 0)) { 6796 ctl_set_invalid_field(/*ctsio*/ ctsio, 6797 /*sks_valid*/ 1, 6798 /*command*/ 1, 6799 /*field*/ 2, 6800 /*bit_valid*/ 0, 6801 /*bit*/ 0); 6802 ctl_done((union ctl_io *)ctsio); 6803 return (CTL_RETVAL_COMPLETE); 6804 } 6805 6806 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6807 6808 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 6809 data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr; 6810 ctsio->residual = 0; 6811 ctsio->kern_data_len = sizeof(*data); 6812 ctsio->kern_total_len = sizeof(*data); 6813 ctsio->kern_data_resid = 0; 6814 ctsio->kern_rel_offset = 0; 6815 ctsio->kern_sg_entries = 0; 6816 6817 /* 6818 * If the maximum LBA is greater than 0xfffffffe, the user must 6819 * issue a SERVICE ACTION IN (16) command, with the read capacity 6820 * serivce action set. 6821 */ 6822 if (lun->be_lun->maxlba > 0xfffffffe) 6823 scsi_ulto4b(0xffffffff, data->addr); 6824 else 6825 scsi_ulto4b(lun->be_lun->maxlba, data->addr); 6826 6827 /* 6828 * XXX KDM this may not be 512 bytes... 6829 */ 6830 scsi_ulto4b(lun->be_lun->blocksize, data->length); 6831 6832 ctl_set_success(ctsio); 6833 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6834 ctsio->be_move_done = ctl_config_move_done; 6835 ctl_datamove((union ctl_io *)ctsio); 6836 return (CTL_RETVAL_COMPLETE); 6837 } 6838 6839 int 6840 ctl_read_capacity_16(struct ctl_scsiio *ctsio) 6841 { 6842 struct scsi_read_capacity_16 *cdb; 6843 struct scsi_read_capacity_data_long *data; 6844 struct ctl_lun *lun; 6845 uint64_t lba; 6846 uint32_t alloc_len; 6847 6848 CTL_DEBUG_PRINT(("ctl_read_capacity_16\n")); 6849 6850 cdb = (struct scsi_read_capacity_16 *)ctsio->cdb; 6851 6852 alloc_len = scsi_4btoul(cdb->alloc_len); 6853 lba = scsi_8btou64(cdb->addr); 6854 6855 if ((cdb->reladr & SRC16_PMI) 6856 && (lba != 0)) { 6857 ctl_set_invalid_field(/*ctsio*/ ctsio, 6858 /*sks_valid*/ 1, 6859 /*command*/ 1, 6860 /*field*/ 2, 6861 /*bit_valid*/ 0, 6862 /*bit*/ 0); 6863 ctl_done((union ctl_io *)ctsio); 6864 return (CTL_RETVAL_COMPLETE); 6865 } 6866 6867 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6868 6869 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 6870 data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr; 6871 6872 if (sizeof(*data) < alloc_len) { 6873 ctsio->residual = alloc_len - sizeof(*data); 6874 ctsio->kern_data_len = sizeof(*data); 6875 ctsio->kern_total_len = sizeof(*data); 6876 } else { 6877 ctsio->residual = 0; 6878 ctsio->kern_data_len = alloc_len; 6879 ctsio->kern_total_len = alloc_len; 6880 } 6881 ctsio->kern_data_resid = 0; 6882 ctsio->kern_rel_offset = 0; 6883 ctsio->kern_sg_entries = 0; 6884 6885 scsi_u64to8b(lun->be_lun->maxlba, data->addr); 6886 /* XXX KDM this may not be 512 bytes... */ 6887 scsi_ulto4b(lun->be_lun->blocksize, data->length); 6888 data->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE; 6889 scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, data->lalba_lbp); 6890 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) 6891 data->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ; 6892 6893 ctl_set_success(ctsio); 6894 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6895 ctsio->be_move_done = ctl_config_move_done; 6896 ctl_datamove((union ctl_io *)ctsio); 6897 return (CTL_RETVAL_COMPLETE); 6898 } 6899 6900 int 6901 ctl_get_lba_status(struct ctl_scsiio *ctsio) 6902 { 6903 struct scsi_get_lba_status *cdb; 6904 struct scsi_get_lba_status_data *data; 6905 struct ctl_lun *lun; 6906 struct ctl_lba_len_flags *lbalen; 6907 uint64_t lba; 6908 uint32_t alloc_len, total_len; 6909 int retval; 6910 6911 CTL_DEBUG_PRINT(("ctl_get_lba_status\n")); 6912 6913 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6914 cdb = (struct scsi_get_lba_status *)ctsio->cdb; 6915 lba = scsi_8btou64(cdb->addr); 6916 alloc_len = scsi_4btoul(cdb->alloc_len); 6917 6918 if (lba > lun->be_lun->maxlba) { 6919 ctl_set_lba_out_of_range(ctsio); 6920 ctl_done((union ctl_io *)ctsio); 6921 return (CTL_RETVAL_COMPLETE); 6922 } 6923 6924 total_len = sizeof(*data) + sizeof(data->descr[0]); 6925 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6926 data = (struct scsi_get_lba_status_data *)ctsio->kern_data_ptr; 6927 6928 if (total_len < alloc_len) { 6929 ctsio->residual = alloc_len - total_len; 6930 ctsio->kern_data_len = total_len; 6931 ctsio->kern_total_len = total_len; 6932 } else { 6933 ctsio->residual = 0; 6934 ctsio->kern_data_len = alloc_len; 6935 ctsio->kern_total_len = alloc_len; 6936 } 6937 ctsio->kern_data_resid = 0; 6938 ctsio->kern_rel_offset = 0; 6939 ctsio->kern_sg_entries = 0; 6940 6941 /* Fill dummy data in case backend can't tell anything. */ 6942 scsi_ulto4b(4 + sizeof(data->descr[0]), data->length); 6943 scsi_u64to8b(lba, data->descr[0].addr); 6944 scsi_ulto4b(MIN(UINT32_MAX, lun->be_lun->maxlba + 1 - lba), 6945 data->descr[0].length); 6946 data->descr[0].status = 0; /* Mapped or unknown. */ 6947 6948 ctl_set_success(ctsio); 6949 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6950 ctsio->be_move_done = ctl_config_move_done; 6951 6952 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 6953 lbalen->lba = lba; 6954 lbalen->len = total_len; 6955 lbalen->flags = 0; 6956 retval = lun->backend->config_read((union ctl_io *)ctsio); 6957 return (CTL_RETVAL_COMPLETE); 6958 } 6959 6960 int 6961 ctl_read_defect(struct ctl_scsiio *ctsio) 6962 { 6963 struct scsi_read_defect_data_10 *ccb10; 6964 struct scsi_read_defect_data_12 *ccb12; 6965 struct scsi_read_defect_data_hdr_10 *data10; 6966 struct scsi_read_defect_data_hdr_12 *data12; 6967 uint32_t alloc_len, data_len; 6968 uint8_t format; 6969 6970 CTL_DEBUG_PRINT(("ctl_read_defect\n")); 6971 6972 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 6973 ccb10 = (struct scsi_read_defect_data_10 *)&ctsio->cdb; 6974 format = ccb10->format; 6975 alloc_len = scsi_2btoul(ccb10->alloc_length); 6976 data_len = sizeof(*data10); 6977 } else { 6978 ccb12 = (struct scsi_read_defect_data_12 *)&ctsio->cdb; 6979 format = ccb12->format; 6980 alloc_len = scsi_4btoul(ccb12->alloc_length); 6981 data_len = sizeof(*data12); 6982 } 6983 if (alloc_len == 0) { 6984 ctl_set_success(ctsio); 6985 ctl_done((union ctl_io *)ctsio); 6986 return (CTL_RETVAL_COMPLETE); 6987 } 6988 6989 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 6990 if (data_len < alloc_len) { 6991 ctsio->residual = alloc_len - data_len; 6992 ctsio->kern_data_len = data_len; 6993 ctsio->kern_total_len = data_len; 6994 } else { 6995 ctsio->residual = 0; 6996 ctsio->kern_data_len = alloc_len; 6997 ctsio->kern_total_len = alloc_len; 6998 } 6999 ctsio->kern_data_resid = 0; 7000 ctsio->kern_rel_offset = 0; 7001 ctsio->kern_sg_entries = 0; 7002 7003 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 7004 data10 = (struct scsi_read_defect_data_hdr_10 *) 7005 ctsio->kern_data_ptr; 7006 data10->format = format; 7007 scsi_ulto2b(0, data10->length); 7008 } else { 7009 data12 = (struct scsi_read_defect_data_hdr_12 *) 7010 ctsio->kern_data_ptr; 7011 data12->format = format; 7012 scsi_ulto2b(0, data12->generation); 7013 scsi_ulto4b(0, data12->length); 7014 } 7015 7016 ctl_set_success(ctsio); 7017 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7018 ctsio->be_move_done = ctl_config_move_done; 7019 ctl_datamove((union ctl_io *)ctsio); 7020 return (CTL_RETVAL_COMPLETE); 7021 } 7022 7023 int 7024 ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio) 7025 { 7026 struct scsi_maintenance_in *cdb; 7027 int retval; 7028 int alloc_len, ext, total_len = 0, g, pc, pg, gs, os; 7029 int num_target_port_groups, num_target_ports; 7030 struct ctl_lun *lun; 7031 struct ctl_softc *softc; 7032 struct ctl_port *port; 7033 struct scsi_target_group_data *rtg_ptr; 7034 struct scsi_target_group_data_extended *rtg_ext_ptr; 7035 struct scsi_target_port_group_descriptor *tpg_desc; 7036 7037 CTL_DEBUG_PRINT(("ctl_report_tagret_port_groups\n")); 7038 7039 cdb = (struct scsi_maintenance_in *)ctsio->cdb; 7040 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7041 softc = lun->ctl_softc; 7042 7043 retval = CTL_RETVAL_COMPLETE; 7044 7045 switch (cdb->byte2 & STG_PDF_MASK) { 7046 case STG_PDF_LENGTH: 7047 ext = 0; 7048 break; 7049 case STG_PDF_EXTENDED: 7050 ext = 1; 7051 break; 7052 default: 7053 ctl_set_invalid_field(/*ctsio*/ ctsio, 7054 /*sks_valid*/ 1, 7055 /*command*/ 1, 7056 /*field*/ 2, 7057 /*bit_valid*/ 1, 7058 /*bit*/ 5); 7059 ctl_done((union ctl_io *)ctsio); 7060 return(retval); 7061 } 7062 7063 if (softc->is_single) 7064 num_target_port_groups = 1; 7065 else 7066 num_target_port_groups = NUM_TARGET_PORT_GROUPS; 7067 num_target_ports = 0; 7068 mtx_lock(&softc->ctl_lock); 7069 STAILQ_FOREACH(port, &softc->port_list, links) { 7070 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7071 continue; 7072 if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 7073 continue; 7074 num_target_ports++; 7075 } 7076 mtx_unlock(&softc->ctl_lock); 7077 7078 if (ext) 7079 total_len = sizeof(struct scsi_target_group_data_extended); 7080 else 7081 total_len = sizeof(struct scsi_target_group_data); 7082 total_len += sizeof(struct scsi_target_port_group_descriptor) * 7083 num_target_port_groups + 7084 sizeof(struct scsi_target_port_descriptor) * num_target_ports; 7085 7086 alloc_len = scsi_4btoul(cdb->length); 7087 7088 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7089 7090 ctsio->kern_sg_entries = 0; 7091 7092 if (total_len < alloc_len) { 7093 ctsio->residual = alloc_len - total_len; 7094 ctsio->kern_data_len = total_len; 7095 ctsio->kern_total_len = total_len; 7096 } else { 7097 ctsio->residual = 0; 7098 ctsio->kern_data_len = alloc_len; 7099 ctsio->kern_total_len = alloc_len; 7100 } 7101 ctsio->kern_data_resid = 0; 7102 ctsio->kern_rel_offset = 0; 7103 7104 if (ext) { 7105 rtg_ext_ptr = (struct scsi_target_group_data_extended *) 7106 ctsio->kern_data_ptr; 7107 scsi_ulto4b(total_len - 4, rtg_ext_ptr->length); 7108 rtg_ext_ptr->format_type = 0x10; 7109 rtg_ext_ptr->implicit_transition_time = 0; 7110 tpg_desc = &rtg_ext_ptr->groups[0]; 7111 } else { 7112 rtg_ptr = (struct scsi_target_group_data *) 7113 ctsio->kern_data_ptr; 7114 scsi_ulto4b(total_len - 4, rtg_ptr->length); 7115 tpg_desc = &rtg_ptr->groups[0]; 7116 } 7117 7118 mtx_lock(&softc->ctl_lock); 7119 pg = softc->port_min / softc->port_cnt; 7120 if (softc->ha_link == CTL_HA_LINK_OFFLINE) 7121 gs = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE; 7122 else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) 7123 gs = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; 7124 else if (softc->ha_mode == CTL_HA_MODE_ACT_STBY) 7125 gs = TPG_ASYMMETRIC_ACCESS_STANDBY; 7126 else 7127 gs = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 7128 if (lun->flags & CTL_LUN_PRIMARY_SC) { 7129 os = gs; 7130 gs = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7131 } else 7132 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7133 for (g = 0; g < num_target_port_groups; g++) { 7134 tpg_desc->pref_state = (g == pg) ? gs : os; 7135 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP | 7136 TPG_U_SUP | TPG_T_SUP; 7137 scsi_ulto2b(g + 1, tpg_desc->target_port_group); 7138 tpg_desc->status = TPG_IMPLICIT; 7139 pc = 0; 7140 STAILQ_FOREACH(port, &softc->port_list, links) { 7141 if (port->targ_port < g * softc->port_cnt || 7142 port->targ_port >= (g + 1) * softc->port_cnt) 7143 continue; 7144 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7145 continue; 7146 if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 7147 continue; 7148 scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc]. 7149 relative_target_port_identifier); 7150 pc++; 7151 } 7152 tpg_desc->target_port_count = pc; 7153 tpg_desc = (struct scsi_target_port_group_descriptor *) 7154 &tpg_desc->descriptors[pc]; 7155 } 7156 mtx_unlock(&softc->ctl_lock); 7157 7158 ctl_set_success(ctsio); 7159 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7160 ctsio->be_move_done = ctl_config_move_done; 7161 ctl_datamove((union ctl_io *)ctsio); 7162 return(retval); 7163 } 7164 7165 int 7166 ctl_report_supported_opcodes(struct ctl_scsiio *ctsio) 7167 { 7168 struct ctl_lun *lun; 7169 struct scsi_report_supported_opcodes *cdb; 7170 const struct ctl_cmd_entry *entry, *sentry; 7171 struct scsi_report_supported_opcodes_all *all; 7172 struct scsi_report_supported_opcodes_descr *descr; 7173 struct scsi_report_supported_opcodes_one *one; 7174 int retval; 7175 int alloc_len, total_len; 7176 int opcode, service_action, i, j, num; 7177 7178 CTL_DEBUG_PRINT(("ctl_report_supported_opcodes\n")); 7179 7180 cdb = (struct scsi_report_supported_opcodes *)ctsio->cdb; 7181 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7182 7183 retval = CTL_RETVAL_COMPLETE; 7184 7185 opcode = cdb->requested_opcode; 7186 service_action = scsi_2btoul(cdb->requested_service_action); 7187 switch (cdb->options & RSO_OPTIONS_MASK) { 7188 case RSO_OPTIONS_ALL: 7189 num = 0; 7190 for (i = 0; i < 256; i++) { 7191 entry = &ctl_cmd_table[i]; 7192 if (entry->flags & CTL_CMD_FLAG_SA5) { 7193 for (j = 0; j < 32; j++) { 7194 sentry = &((const struct ctl_cmd_entry *) 7195 entry->execute)[j]; 7196 if (ctl_cmd_applicable( 7197 lun->be_lun->lun_type, sentry)) 7198 num++; 7199 } 7200 } else { 7201 if (ctl_cmd_applicable(lun->be_lun->lun_type, 7202 entry)) 7203 num++; 7204 } 7205 } 7206 total_len = sizeof(struct scsi_report_supported_opcodes_all) + 7207 num * sizeof(struct scsi_report_supported_opcodes_descr); 7208 break; 7209 case RSO_OPTIONS_OC: 7210 if (ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) { 7211 ctl_set_invalid_field(/*ctsio*/ ctsio, 7212 /*sks_valid*/ 1, 7213 /*command*/ 1, 7214 /*field*/ 2, 7215 /*bit_valid*/ 1, 7216 /*bit*/ 2); 7217 ctl_done((union ctl_io *)ctsio); 7218 return (CTL_RETVAL_COMPLETE); 7219 } 7220 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7221 break; 7222 case RSO_OPTIONS_OC_SA: 7223 if ((ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) == 0 || 7224 service_action >= 32) { 7225 ctl_set_invalid_field(/*ctsio*/ ctsio, 7226 /*sks_valid*/ 1, 7227 /*command*/ 1, 7228 /*field*/ 2, 7229 /*bit_valid*/ 1, 7230 /*bit*/ 2); 7231 ctl_done((union ctl_io *)ctsio); 7232 return (CTL_RETVAL_COMPLETE); 7233 } 7234 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7235 break; 7236 default: 7237 ctl_set_invalid_field(/*ctsio*/ ctsio, 7238 /*sks_valid*/ 1, 7239 /*command*/ 1, 7240 /*field*/ 2, 7241 /*bit_valid*/ 1, 7242 /*bit*/ 2); 7243 ctl_done((union ctl_io *)ctsio); 7244 return (CTL_RETVAL_COMPLETE); 7245 } 7246 7247 alloc_len = scsi_4btoul(cdb->length); 7248 7249 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7250 7251 ctsio->kern_sg_entries = 0; 7252 7253 if (total_len < alloc_len) { 7254 ctsio->residual = alloc_len - total_len; 7255 ctsio->kern_data_len = total_len; 7256 ctsio->kern_total_len = total_len; 7257 } else { 7258 ctsio->residual = 0; 7259 ctsio->kern_data_len = alloc_len; 7260 ctsio->kern_total_len = alloc_len; 7261 } 7262 ctsio->kern_data_resid = 0; 7263 ctsio->kern_rel_offset = 0; 7264 7265 switch (cdb->options & RSO_OPTIONS_MASK) { 7266 case RSO_OPTIONS_ALL: 7267 all = (struct scsi_report_supported_opcodes_all *) 7268 ctsio->kern_data_ptr; 7269 num = 0; 7270 for (i = 0; i < 256; i++) { 7271 entry = &ctl_cmd_table[i]; 7272 if (entry->flags & CTL_CMD_FLAG_SA5) { 7273 for (j = 0; j < 32; j++) { 7274 sentry = &((const struct ctl_cmd_entry *) 7275 entry->execute)[j]; 7276 if (!ctl_cmd_applicable( 7277 lun->be_lun->lun_type, sentry)) 7278 continue; 7279 descr = &all->descr[num++]; 7280 descr->opcode = i; 7281 scsi_ulto2b(j, descr->service_action); 7282 descr->flags = RSO_SERVACTV; 7283 scsi_ulto2b(sentry->length, 7284 descr->cdb_length); 7285 } 7286 } else { 7287 if (!ctl_cmd_applicable(lun->be_lun->lun_type, 7288 entry)) 7289 continue; 7290 descr = &all->descr[num++]; 7291 descr->opcode = i; 7292 scsi_ulto2b(0, descr->service_action); 7293 descr->flags = 0; 7294 scsi_ulto2b(entry->length, descr->cdb_length); 7295 } 7296 } 7297 scsi_ulto4b( 7298 num * sizeof(struct scsi_report_supported_opcodes_descr), 7299 all->length); 7300 break; 7301 case RSO_OPTIONS_OC: 7302 one = (struct scsi_report_supported_opcodes_one *) 7303 ctsio->kern_data_ptr; 7304 entry = &ctl_cmd_table[opcode]; 7305 goto fill_one; 7306 case RSO_OPTIONS_OC_SA: 7307 one = (struct scsi_report_supported_opcodes_one *) 7308 ctsio->kern_data_ptr; 7309 entry = &ctl_cmd_table[opcode]; 7310 entry = &((const struct ctl_cmd_entry *) 7311 entry->execute)[service_action]; 7312 fill_one: 7313 if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 7314 one->support = 3; 7315 scsi_ulto2b(entry->length, one->cdb_length); 7316 one->cdb_usage[0] = opcode; 7317 memcpy(&one->cdb_usage[1], entry->usage, 7318 entry->length - 1); 7319 } else 7320 one->support = 1; 7321 break; 7322 } 7323 7324 ctl_set_success(ctsio); 7325 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7326 ctsio->be_move_done = ctl_config_move_done; 7327 ctl_datamove((union ctl_io *)ctsio); 7328 return(retval); 7329 } 7330 7331 int 7332 ctl_report_supported_tmf(struct ctl_scsiio *ctsio) 7333 { 7334 struct scsi_report_supported_tmf *cdb; 7335 struct scsi_report_supported_tmf_data *data; 7336 int retval; 7337 int alloc_len, total_len; 7338 7339 CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n")); 7340 7341 cdb = (struct scsi_report_supported_tmf *)ctsio->cdb; 7342 7343 retval = CTL_RETVAL_COMPLETE; 7344 7345 total_len = sizeof(struct scsi_report_supported_tmf_data); 7346 alloc_len = scsi_4btoul(cdb->length); 7347 7348 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7349 7350 ctsio->kern_sg_entries = 0; 7351 7352 if (total_len < alloc_len) { 7353 ctsio->residual = alloc_len - total_len; 7354 ctsio->kern_data_len = total_len; 7355 ctsio->kern_total_len = total_len; 7356 } else { 7357 ctsio->residual = 0; 7358 ctsio->kern_data_len = alloc_len; 7359 ctsio->kern_total_len = alloc_len; 7360 } 7361 ctsio->kern_data_resid = 0; 7362 ctsio->kern_rel_offset = 0; 7363 7364 data = (struct scsi_report_supported_tmf_data *)ctsio->kern_data_ptr; 7365 data->byte1 |= RST_ATS | RST_ATSS | RST_CTSS | RST_LURS | RST_TRS; 7366 data->byte2 |= RST_ITNRS; 7367 7368 ctl_set_success(ctsio); 7369 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7370 ctsio->be_move_done = ctl_config_move_done; 7371 ctl_datamove((union ctl_io *)ctsio); 7372 return (retval); 7373 } 7374 7375 int 7376 ctl_report_timestamp(struct ctl_scsiio *ctsio) 7377 { 7378 struct scsi_report_timestamp *cdb; 7379 struct scsi_report_timestamp_data *data; 7380 struct timeval tv; 7381 int64_t timestamp; 7382 int retval; 7383 int alloc_len, total_len; 7384 7385 CTL_DEBUG_PRINT(("ctl_report_timestamp\n")); 7386 7387 cdb = (struct scsi_report_timestamp *)ctsio->cdb; 7388 7389 retval = CTL_RETVAL_COMPLETE; 7390 7391 total_len = sizeof(struct scsi_report_timestamp_data); 7392 alloc_len = scsi_4btoul(cdb->length); 7393 7394 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7395 7396 ctsio->kern_sg_entries = 0; 7397 7398 if (total_len < alloc_len) { 7399 ctsio->residual = alloc_len - total_len; 7400 ctsio->kern_data_len = total_len; 7401 ctsio->kern_total_len = total_len; 7402 } else { 7403 ctsio->residual = 0; 7404 ctsio->kern_data_len = alloc_len; 7405 ctsio->kern_total_len = alloc_len; 7406 } 7407 ctsio->kern_data_resid = 0; 7408 ctsio->kern_rel_offset = 0; 7409 7410 data = (struct scsi_report_timestamp_data *)ctsio->kern_data_ptr; 7411 scsi_ulto2b(sizeof(*data) - 2, data->length); 7412 data->origin = RTS_ORIG_OUTSIDE; 7413 getmicrotime(&tv); 7414 timestamp = (int64_t)tv.tv_sec * 1000 + tv.tv_usec / 1000; 7415 scsi_ulto4b(timestamp >> 16, data->timestamp); 7416 scsi_ulto2b(timestamp & 0xffff, &data->timestamp[4]); 7417 7418 ctl_set_success(ctsio); 7419 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7420 ctsio->be_move_done = ctl_config_move_done; 7421 ctl_datamove((union ctl_io *)ctsio); 7422 return (retval); 7423 } 7424 7425 int 7426 ctl_persistent_reserve_in(struct ctl_scsiio *ctsio) 7427 { 7428 struct scsi_per_res_in *cdb; 7429 int alloc_len, total_len = 0; 7430 /* struct scsi_per_res_in_rsrv in_data; */ 7431 struct ctl_lun *lun; 7432 struct ctl_softc *softc; 7433 uint64_t key; 7434 7435 CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n")); 7436 7437 cdb = (struct scsi_per_res_in *)ctsio->cdb; 7438 7439 alloc_len = scsi_2btoul(cdb->length); 7440 7441 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7442 softc = lun->ctl_softc; 7443 7444 retry: 7445 mtx_lock(&lun->lun_lock); 7446 switch (cdb->action) { 7447 case SPRI_RK: /* read keys */ 7448 total_len = sizeof(struct scsi_per_res_in_keys) + 7449 lun->pr_key_count * 7450 sizeof(struct scsi_per_res_key); 7451 break; 7452 case SPRI_RR: /* read reservation */ 7453 if (lun->flags & CTL_LUN_PR_RESERVED) 7454 total_len = sizeof(struct scsi_per_res_in_rsrv); 7455 else 7456 total_len = sizeof(struct scsi_per_res_in_header); 7457 break; 7458 case SPRI_RC: /* report capabilities */ 7459 total_len = sizeof(struct scsi_per_res_cap); 7460 break; 7461 case SPRI_RS: /* read full status */ 7462 total_len = sizeof(struct scsi_per_res_in_header) + 7463 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7464 lun->pr_key_count; 7465 break; 7466 default: 7467 panic("Invalid PR type %x", cdb->action); 7468 } 7469 mtx_unlock(&lun->lun_lock); 7470 7471 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7472 7473 if (total_len < alloc_len) { 7474 ctsio->residual = alloc_len - total_len; 7475 ctsio->kern_data_len = total_len; 7476 ctsio->kern_total_len = total_len; 7477 } else { 7478 ctsio->residual = 0; 7479 ctsio->kern_data_len = alloc_len; 7480 ctsio->kern_total_len = alloc_len; 7481 } 7482 7483 ctsio->kern_data_resid = 0; 7484 ctsio->kern_rel_offset = 0; 7485 ctsio->kern_sg_entries = 0; 7486 7487 mtx_lock(&lun->lun_lock); 7488 switch (cdb->action) { 7489 case SPRI_RK: { // read keys 7490 struct scsi_per_res_in_keys *res_keys; 7491 int i, key_count; 7492 7493 res_keys = (struct scsi_per_res_in_keys*)ctsio->kern_data_ptr; 7494 7495 /* 7496 * We had to drop the lock to allocate our buffer, which 7497 * leaves time for someone to come in with another 7498 * persistent reservation. (That is unlikely, though, 7499 * since this should be the only persistent reservation 7500 * command active right now.) 7501 */ 7502 if (total_len != (sizeof(struct scsi_per_res_in_keys) + 7503 (lun->pr_key_count * 7504 sizeof(struct scsi_per_res_key)))){ 7505 mtx_unlock(&lun->lun_lock); 7506 free(ctsio->kern_data_ptr, M_CTL); 7507 printf("%s: reservation length changed, retrying\n", 7508 __func__); 7509 goto retry; 7510 } 7511 7512 scsi_ulto4b(lun->PRGeneration, res_keys->header.generation); 7513 7514 scsi_ulto4b(sizeof(struct scsi_per_res_key) * 7515 lun->pr_key_count, res_keys->header.length); 7516 7517 for (i = 0, key_count = 0; i < CTL_MAX_INITIATORS; i++) { 7518 if ((key = ctl_get_prkey(lun, i)) == 0) 7519 continue; 7520 7521 /* 7522 * We used lun->pr_key_count to calculate the 7523 * size to allocate. If it turns out the number of 7524 * initiators with the registered flag set is 7525 * larger than that (i.e. they haven't been kept in 7526 * sync), we've got a problem. 7527 */ 7528 if (key_count >= lun->pr_key_count) { 7529 #ifdef NEEDTOPORT 7530 csevent_log(CSC_CTL | CSC_SHELF_SW | 7531 CTL_PR_ERROR, 7532 csevent_LogType_Fault, 7533 csevent_AlertLevel_Yellow, 7534 csevent_FRU_ShelfController, 7535 csevent_FRU_Firmware, 7536 csevent_FRU_Unknown, 7537 "registered keys %d >= key " 7538 "count %d", key_count, 7539 lun->pr_key_count); 7540 #endif 7541 key_count++; 7542 continue; 7543 } 7544 scsi_u64to8b(key, res_keys->keys[key_count].key); 7545 key_count++; 7546 } 7547 break; 7548 } 7549 case SPRI_RR: { // read reservation 7550 struct scsi_per_res_in_rsrv *res; 7551 int tmp_len, header_only; 7552 7553 res = (struct scsi_per_res_in_rsrv *)ctsio->kern_data_ptr; 7554 7555 scsi_ulto4b(lun->PRGeneration, res->header.generation); 7556 7557 if (lun->flags & CTL_LUN_PR_RESERVED) 7558 { 7559 tmp_len = sizeof(struct scsi_per_res_in_rsrv); 7560 scsi_ulto4b(sizeof(struct scsi_per_res_in_rsrv_data), 7561 res->header.length); 7562 header_only = 0; 7563 } else { 7564 tmp_len = sizeof(struct scsi_per_res_in_header); 7565 scsi_ulto4b(0, res->header.length); 7566 header_only = 1; 7567 } 7568 7569 /* 7570 * We had to drop the lock to allocate our buffer, which 7571 * leaves time for someone to come in with another 7572 * persistent reservation. (That is unlikely, though, 7573 * since this should be the only persistent reservation 7574 * command active right now.) 7575 */ 7576 if (tmp_len != total_len) { 7577 mtx_unlock(&lun->lun_lock); 7578 free(ctsio->kern_data_ptr, M_CTL); 7579 printf("%s: reservation status changed, retrying\n", 7580 __func__); 7581 goto retry; 7582 } 7583 7584 /* 7585 * No reservation held, so we're done. 7586 */ 7587 if (header_only != 0) 7588 break; 7589 7590 /* 7591 * If the registration is an All Registrants type, the key 7592 * is 0, since it doesn't really matter. 7593 */ 7594 if (lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 7595 scsi_u64to8b(ctl_get_prkey(lun, lun->pr_res_idx), 7596 res->data.reservation); 7597 } 7598 res->data.scopetype = lun->res_type; 7599 break; 7600 } 7601 case SPRI_RC: //report capabilities 7602 { 7603 struct scsi_per_res_cap *res_cap; 7604 uint16_t type_mask; 7605 7606 res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr; 7607 scsi_ulto2b(sizeof(*res_cap), res_cap->length); 7608 res_cap->flags2 |= SPRI_TMV | SPRI_ALLOW_5; 7609 type_mask = SPRI_TM_WR_EX_AR | 7610 SPRI_TM_EX_AC_RO | 7611 SPRI_TM_WR_EX_RO | 7612 SPRI_TM_EX_AC | 7613 SPRI_TM_WR_EX | 7614 SPRI_TM_EX_AC_AR; 7615 scsi_ulto2b(type_mask, res_cap->type_mask); 7616 break; 7617 } 7618 case SPRI_RS: { // read full status 7619 struct scsi_per_res_in_full *res_status; 7620 struct scsi_per_res_in_full_desc *res_desc; 7621 struct ctl_port *port; 7622 int i, len; 7623 7624 res_status = (struct scsi_per_res_in_full*)ctsio->kern_data_ptr; 7625 7626 /* 7627 * We had to drop the lock to allocate our buffer, which 7628 * leaves time for someone to come in with another 7629 * persistent reservation. (That is unlikely, though, 7630 * since this should be the only persistent reservation 7631 * command active right now.) 7632 */ 7633 if (total_len < (sizeof(struct scsi_per_res_in_header) + 7634 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7635 lun->pr_key_count)){ 7636 mtx_unlock(&lun->lun_lock); 7637 free(ctsio->kern_data_ptr, M_CTL); 7638 printf("%s: reservation length changed, retrying\n", 7639 __func__); 7640 goto retry; 7641 } 7642 7643 scsi_ulto4b(lun->PRGeneration, res_status->header.generation); 7644 7645 res_desc = &res_status->desc[0]; 7646 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 7647 if ((key = ctl_get_prkey(lun, i)) == 0) 7648 continue; 7649 7650 scsi_u64to8b(key, res_desc->res_key.key); 7651 if ((lun->flags & CTL_LUN_PR_RESERVED) && 7652 (lun->pr_res_idx == i || 7653 lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS)) { 7654 res_desc->flags = SPRI_FULL_R_HOLDER; 7655 res_desc->scopetype = lun->res_type; 7656 } 7657 scsi_ulto2b(i / CTL_MAX_INIT_PER_PORT, 7658 res_desc->rel_trgt_port_id); 7659 len = 0; 7660 port = softc->ctl_ports[i / CTL_MAX_INIT_PER_PORT]; 7661 if (port != NULL) 7662 len = ctl_create_iid(port, 7663 i % CTL_MAX_INIT_PER_PORT, 7664 res_desc->transport_id); 7665 scsi_ulto4b(len, res_desc->additional_length); 7666 res_desc = (struct scsi_per_res_in_full_desc *) 7667 &res_desc->transport_id[len]; 7668 } 7669 scsi_ulto4b((uint8_t *)res_desc - (uint8_t *)&res_status->desc[0], 7670 res_status->header.length); 7671 break; 7672 } 7673 default: 7674 /* 7675 * This is a bug, because we just checked for this above, 7676 * and should have returned an error. 7677 */ 7678 panic("Invalid PR type %x", cdb->action); 7679 break; /* NOTREACHED */ 7680 } 7681 mtx_unlock(&lun->lun_lock); 7682 7683 ctl_set_success(ctsio); 7684 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7685 ctsio->be_move_done = ctl_config_move_done; 7686 ctl_datamove((union ctl_io *)ctsio); 7687 return (CTL_RETVAL_COMPLETE); 7688 } 7689 7690 /* 7691 * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if 7692 * it should return. 7693 */ 7694 static int 7695 ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key, 7696 uint64_t sa_res_key, uint8_t type, uint32_t residx, 7697 struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb, 7698 struct scsi_per_res_out_parms* param) 7699 { 7700 union ctl_ha_msg persis_io; 7701 int i; 7702 7703 mtx_lock(&lun->lun_lock); 7704 if (sa_res_key == 0) { 7705 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 7706 /* validate scope and type */ 7707 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7708 SPR_LU_SCOPE) { 7709 mtx_unlock(&lun->lun_lock); 7710 ctl_set_invalid_field(/*ctsio*/ ctsio, 7711 /*sks_valid*/ 1, 7712 /*command*/ 1, 7713 /*field*/ 2, 7714 /*bit_valid*/ 1, 7715 /*bit*/ 4); 7716 ctl_done((union ctl_io *)ctsio); 7717 return (1); 7718 } 7719 7720 if (type>8 || type==2 || type==4 || type==0) { 7721 mtx_unlock(&lun->lun_lock); 7722 ctl_set_invalid_field(/*ctsio*/ ctsio, 7723 /*sks_valid*/ 1, 7724 /*command*/ 1, 7725 /*field*/ 2, 7726 /*bit_valid*/ 1, 7727 /*bit*/ 0); 7728 ctl_done((union ctl_io *)ctsio); 7729 return (1); 7730 } 7731 7732 /* 7733 * Unregister everybody else and build UA for 7734 * them 7735 */ 7736 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 7737 if (i == residx || ctl_get_prkey(lun, i) == 0) 7738 continue; 7739 7740 ctl_clr_prkey(lun, i); 7741 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7742 } 7743 lun->pr_key_count = 1; 7744 lun->res_type = type; 7745 if (lun->res_type != SPR_TYPE_WR_EX_AR 7746 && lun->res_type != SPR_TYPE_EX_AC_AR) 7747 lun->pr_res_idx = residx; 7748 lun->PRGeneration++; 7749 mtx_unlock(&lun->lun_lock); 7750 7751 /* send msg to other side */ 7752 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7753 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7754 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7755 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7756 persis_io.pr.pr_info.res_type = type; 7757 memcpy(persis_io.pr.pr_info.sa_res_key, 7758 param->serv_act_res_key, 7759 sizeof(param->serv_act_res_key)); 7760 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 7761 sizeof(persis_io.pr), M_WAITOK); 7762 } else { 7763 /* not all registrants */ 7764 mtx_unlock(&lun->lun_lock); 7765 free(ctsio->kern_data_ptr, M_CTL); 7766 ctl_set_invalid_field(ctsio, 7767 /*sks_valid*/ 1, 7768 /*command*/ 0, 7769 /*field*/ 8, 7770 /*bit_valid*/ 0, 7771 /*bit*/ 0); 7772 ctl_done((union ctl_io *)ctsio); 7773 return (1); 7774 } 7775 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 7776 || !(lun->flags & CTL_LUN_PR_RESERVED)) { 7777 int found = 0; 7778 7779 if (res_key == sa_res_key) { 7780 /* special case */ 7781 /* 7782 * The spec implies this is not good but doesn't 7783 * say what to do. There are two choices either 7784 * generate a res conflict or check condition 7785 * with illegal field in parameter data. Since 7786 * that is what is done when the sa_res_key is 7787 * zero I'll take that approach since this has 7788 * to do with the sa_res_key. 7789 */ 7790 mtx_unlock(&lun->lun_lock); 7791 free(ctsio->kern_data_ptr, M_CTL); 7792 ctl_set_invalid_field(ctsio, 7793 /*sks_valid*/ 1, 7794 /*command*/ 0, 7795 /*field*/ 8, 7796 /*bit_valid*/ 0, 7797 /*bit*/ 0); 7798 ctl_done((union ctl_io *)ctsio); 7799 return (1); 7800 } 7801 7802 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 7803 if (ctl_get_prkey(lun, i) != sa_res_key) 7804 continue; 7805 7806 found = 1; 7807 ctl_clr_prkey(lun, i); 7808 lun->pr_key_count--; 7809 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7810 } 7811 if (!found) { 7812 mtx_unlock(&lun->lun_lock); 7813 free(ctsio->kern_data_ptr, M_CTL); 7814 ctl_set_reservation_conflict(ctsio); 7815 ctl_done((union ctl_io *)ctsio); 7816 return (CTL_RETVAL_COMPLETE); 7817 } 7818 lun->PRGeneration++; 7819 mtx_unlock(&lun->lun_lock); 7820 7821 /* send msg to other side */ 7822 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7823 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7824 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7825 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7826 persis_io.pr.pr_info.res_type = type; 7827 memcpy(persis_io.pr.pr_info.sa_res_key, 7828 param->serv_act_res_key, 7829 sizeof(param->serv_act_res_key)); 7830 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 7831 sizeof(persis_io.pr), M_WAITOK); 7832 } else { 7833 /* Reserved but not all registrants */ 7834 /* sa_res_key is res holder */ 7835 if (sa_res_key == ctl_get_prkey(lun, lun->pr_res_idx)) { 7836 /* validate scope and type */ 7837 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7838 SPR_LU_SCOPE) { 7839 mtx_unlock(&lun->lun_lock); 7840 ctl_set_invalid_field(/*ctsio*/ ctsio, 7841 /*sks_valid*/ 1, 7842 /*command*/ 1, 7843 /*field*/ 2, 7844 /*bit_valid*/ 1, 7845 /*bit*/ 4); 7846 ctl_done((union ctl_io *)ctsio); 7847 return (1); 7848 } 7849 7850 if (type>8 || type==2 || type==4 || type==0) { 7851 mtx_unlock(&lun->lun_lock); 7852 ctl_set_invalid_field(/*ctsio*/ ctsio, 7853 /*sks_valid*/ 1, 7854 /*command*/ 1, 7855 /*field*/ 2, 7856 /*bit_valid*/ 1, 7857 /*bit*/ 0); 7858 ctl_done((union ctl_io *)ctsio); 7859 return (1); 7860 } 7861 7862 /* 7863 * Do the following: 7864 * if sa_res_key != res_key remove all 7865 * registrants w/sa_res_key and generate UA 7866 * for these registrants(Registrations 7867 * Preempted) if it wasn't an exclusive 7868 * reservation generate UA(Reservations 7869 * Preempted) for all other registered nexuses 7870 * if the type has changed. Establish the new 7871 * reservation and holder. If res_key and 7872 * sa_res_key are the same do the above 7873 * except don't unregister the res holder. 7874 */ 7875 7876 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 7877 if (i == residx || ctl_get_prkey(lun, i) == 0) 7878 continue; 7879 7880 if (sa_res_key == ctl_get_prkey(lun, i)) { 7881 ctl_clr_prkey(lun, i); 7882 lun->pr_key_count--; 7883 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7884 } else if (type != lun->res_type 7885 && (lun->res_type == SPR_TYPE_WR_EX_RO 7886 || lun->res_type ==SPR_TYPE_EX_AC_RO)){ 7887 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 7888 } 7889 } 7890 lun->res_type = type; 7891 if (lun->res_type != SPR_TYPE_WR_EX_AR 7892 && lun->res_type != SPR_TYPE_EX_AC_AR) 7893 lun->pr_res_idx = residx; 7894 else 7895 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 7896 lun->PRGeneration++; 7897 mtx_unlock(&lun->lun_lock); 7898 7899 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7900 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7901 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7902 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7903 persis_io.pr.pr_info.res_type = type; 7904 memcpy(persis_io.pr.pr_info.sa_res_key, 7905 param->serv_act_res_key, 7906 sizeof(param->serv_act_res_key)); 7907 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 7908 sizeof(persis_io.pr), M_WAITOK); 7909 } else { 7910 /* 7911 * sa_res_key is not the res holder just 7912 * remove registrants 7913 */ 7914 int found=0; 7915 7916 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 7917 if (sa_res_key != ctl_get_prkey(lun, i)) 7918 continue; 7919 7920 found = 1; 7921 ctl_clr_prkey(lun, i); 7922 lun->pr_key_count--; 7923 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7924 } 7925 7926 if (!found) { 7927 mtx_unlock(&lun->lun_lock); 7928 free(ctsio->kern_data_ptr, M_CTL); 7929 ctl_set_reservation_conflict(ctsio); 7930 ctl_done((union ctl_io *)ctsio); 7931 return (1); 7932 } 7933 lun->PRGeneration++; 7934 mtx_unlock(&lun->lun_lock); 7935 7936 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7937 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7938 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7939 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7940 persis_io.pr.pr_info.res_type = type; 7941 memcpy(persis_io.pr.pr_info.sa_res_key, 7942 param->serv_act_res_key, 7943 sizeof(param->serv_act_res_key)); 7944 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 7945 sizeof(persis_io.pr), M_WAITOK); 7946 } 7947 } 7948 return (0); 7949 } 7950 7951 static void 7952 ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg) 7953 { 7954 uint64_t sa_res_key; 7955 int i; 7956 7957 sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key); 7958 7959 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 7960 || lun->pr_res_idx == CTL_PR_NO_RESERVATION 7961 || sa_res_key != ctl_get_prkey(lun, lun->pr_res_idx)) { 7962 if (sa_res_key == 0) { 7963 /* 7964 * Unregister everybody else and build UA for 7965 * them 7966 */ 7967 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 7968 if (i == msg->pr.pr_info.residx || 7969 ctl_get_prkey(lun, i) == 0) 7970 continue; 7971 7972 ctl_clr_prkey(lun, i); 7973 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7974 } 7975 7976 lun->pr_key_count = 1; 7977 lun->res_type = msg->pr.pr_info.res_type; 7978 if (lun->res_type != SPR_TYPE_WR_EX_AR 7979 && lun->res_type != SPR_TYPE_EX_AC_AR) 7980 lun->pr_res_idx = msg->pr.pr_info.residx; 7981 } else { 7982 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 7983 if (sa_res_key == ctl_get_prkey(lun, i)) 7984 continue; 7985 7986 ctl_clr_prkey(lun, i); 7987 lun->pr_key_count--; 7988 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7989 } 7990 } 7991 } else { 7992 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 7993 if (i == msg->pr.pr_info.residx || 7994 ctl_get_prkey(lun, i) == 0) 7995 continue; 7996 7997 if (sa_res_key == ctl_get_prkey(lun, i)) { 7998 ctl_clr_prkey(lun, i); 7999 lun->pr_key_count--; 8000 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8001 } else if (msg->pr.pr_info.res_type != lun->res_type 8002 && (lun->res_type == SPR_TYPE_WR_EX_RO 8003 || lun->res_type == SPR_TYPE_EX_AC_RO)) { 8004 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8005 } 8006 } 8007 lun->res_type = msg->pr.pr_info.res_type; 8008 if (lun->res_type != SPR_TYPE_WR_EX_AR 8009 && lun->res_type != SPR_TYPE_EX_AC_AR) 8010 lun->pr_res_idx = msg->pr.pr_info.residx; 8011 else 8012 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8013 } 8014 lun->PRGeneration++; 8015 8016 } 8017 8018 8019 int 8020 ctl_persistent_reserve_out(struct ctl_scsiio *ctsio) 8021 { 8022 int retval; 8023 u_int32_t param_len; 8024 struct scsi_per_res_out *cdb; 8025 struct ctl_lun *lun; 8026 struct scsi_per_res_out_parms* param; 8027 struct ctl_softc *softc; 8028 uint32_t residx; 8029 uint64_t res_key, sa_res_key, key; 8030 uint8_t type; 8031 union ctl_ha_msg persis_io; 8032 int i; 8033 8034 CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n")); 8035 8036 retval = CTL_RETVAL_COMPLETE; 8037 8038 cdb = (struct scsi_per_res_out *)ctsio->cdb; 8039 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8040 softc = lun->ctl_softc; 8041 8042 /* 8043 * We only support whole-LUN scope. The scope & type are ignored for 8044 * register, register and ignore existing key and clear. 8045 * We sometimes ignore scope and type on preempts too!! 8046 * Verify reservation type here as well. 8047 */ 8048 type = cdb->scope_type & SPR_TYPE_MASK; 8049 if ((cdb->action == SPRO_RESERVE) 8050 || (cdb->action == SPRO_RELEASE)) { 8051 if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { 8052 ctl_set_invalid_field(/*ctsio*/ ctsio, 8053 /*sks_valid*/ 1, 8054 /*command*/ 1, 8055 /*field*/ 2, 8056 /*bit_valid*/ 1, 8057 /*bit*/ 4); 8058 ctl_done((union ctl_io *)ctsio); 8059 return (CTL_RETVAL_COMPLETE); 8060 } 8061 8062 if (type>8 || type==2 || type==4 || type==0) { 8063 ctl_set_invalid_field(/*ctsio*/ ctsio, 8064 /*sks_valid*/ 1, 8065 /*command*/ 1, 8066 /*field*/ 2, 8067 /*bit_valid*/ 1, 8068 /*bit*/ 0); 8069 ctl_done((union ctl_io *)ctsio); 8070 return (CTL_RETVAL_COMPLETE); 8071 } 8072 } 8073 8074 param_len = scsi_4btoul(cdb->length); 8075 8076 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 8077 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 8078 ctsio->kern_data_len = param_len; 8079 ctsio->kern_total_len = param_len; 8080 ctsio->kern_data_resid = 0; 8081 ctsio->kern_rel_offset = 0; 8082 ctsio->kern_sg_entries = 0; 8083 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 8084 ctsio->be_move_done = ctl_config_move_done; 8085 ctl_datamove((union ctl_io *)ctsio); 8086 8087 return (CTL_RETVAL_COMPLETE); 8088 } 8089 8090 param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr; 8091 8092 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 8093 res_key = scsi_8btou64(param->res_key.key); 8094 sa_res_key = scsi_8btou64(param->serv_act_res_key); 8095 8096 /* 8097 * Validate the reservation key here except for SPRO_REG_IGNO 8098 * This must be done for all other service actions 8099 */ 8100 if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) { 8101 mtx_lock(&lun->lun_lock); 8102 if ((key = ctl_get_prkey(lun, residx)) != 0) { 8103 if (res_key != key) { 8104 /* 8105 * The current key passed in doesn't match 8106 * the one the initiator previously 8107 * registered. 8108 */ 8109 mtx_unlock(&lun->lun_lock); 8110 free(ctsio->kern_data_ptr, M_CTL); 8111 ctl_set_reservation_conflict(ctsio); 8112 ctl_done((union ctl_io *)ctsio); 8113 return (CTL_RETVAL_COMPLETE); 8114 } 8115 } else if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REGISTER) { 8116 /* 8117 * We are not registered 8118 */ 8119 mtx_unlock(&lun->lun_lock); 8120 free(ctsio->kern_data_ptr, M_CTL); 8121 ctl_set_reservation_conflict(ctsio); 8122 ctl_done((union ctl_io *)ctsio); 8123 return (CTL_RETVAL_COMPLETE); 8124 } else if (res_key != 0) { 8125 /* 8126 * We are not registered and trying to register but 8127 * the register key isn't zero. 8128 */ 8129 mtx_unlock(&lun->lun_lock); 8130 free(ctsio->kern_data_ptr, M_CTL); 8131 ctl_set_reservation_conflict(ctsio); 8132 ctl_done((union ctl_io *)ctsio); 8133 return (CTL_RETVAL_COMPLETE); 8134 } 8135 mtx_unlock(&lun->lun_lock); 8136 } 8137 8138 switch (cdb->action & SPRO_ACTION_MASK) { 8139 case SPRO_REGISTER: 8140 case SPRO_REG_IGNO: { 8141 8142 #if 0 8143 printf("Registration received\n"); 8144 #endif 8145 8146 /* 8147 * We don't support any of these options, as we report in 8148 * the read capabilities request (see 8149 * ctl_persistent_reserve_in(), above). 8150 */ 8151 if ((param->flags & SPR_SPEC_I_PT) 8152 || (param->flags & SPR_ALL_TG_PT) 8153 || (param->flags & SPR_APTPL)) { 8154 int bit_ptr; 8155 8156 if (param->flags & SPR_APTPL) 8157 bit_ptr = 0; 8158 else if (param->flags & SPR_ALL_TG_PT) 8159 bit_ptr = 2; 8160 else /* SPR_SPEC_I_PT */ 8161 bit_ptr = 3; 8162 8163 free(ctsio->kern_data_ptr, M_CTL); 8164 ctl_set_invalid_field(ctsio, 8165 /*sks_valid*/ 1, 8166 /*command*/ 0, 8167 /*field*/ 20, 8168 /*bit_valid*/ 1, 8169 /*bit*/ bit_ptr); 8170 ctl_done((union ctl_io *)ctsio); 8171 return (CTL_RETVAL_COMPLETE); 8172 } 8173 8174 mtx_lock(&lun->lun_lock); 8175 8176 /* 8177 * The initiator wants to clear the 8178 * key/unregister. 8179 */ 8180 if (sa_res_key == 0) { 8181 if ((res_key == 0 8182 && (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER) 8183 || ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO 8184 && ctl_get_prkey(lun, residx) == 0)) { 8185 mtx_unlock(&lun->lun_lock); 8186 goto done; 8187 } 8188 8189 ctl_clr_prkey(lun, residx); 8190 lun->pr_key_count--; 8191 8192 if (residx == lun->pr_res_idx) { 8193 lun->flags &= ~CTL_LUN_PR_RESERVED; 8194 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8195 8196 if ((lun->res_type == SPR_TYPE_WR_EX_RO 8197 || lun->res_type == SPR_TYPE_EX_AC_RO) 8198 && lun->pr_key_count) { 8199 /* 8200 * If the reservation is a registrants 8201 * only type we need to generate a UA 8202 * for other registered inits. The 8203 * sense code should be RESERVATIONS 8204 * RELEASED 8205 */ 8206 8207 for (i = softc->init_min; i < softc->init_max; i++){ 8208 if (ctl_get_prkey(lun, i) == 0) 8209 continue; 8210 ctl_est_ua(lun, i, 8211 CTL_UA_RES_RELEASE); 8212 } 8213 } 8214 lun->res_type = 0; 8215 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8216 if (lun->pr_key_count==0) { 8217 lun->flags &= ~CTL_LUN_PR_RESERVED; 8218 lun->res_type = 0; 8219 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8220 } 8221 } 8222 lun->PRGeneration++; 8223 mtx_unlock(&lun->lun_lock); 8224 8225 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8226 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8227 persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY; 8228 persis_io.pr.pr_info.residx = residx; 8229 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8230 sizeof(persis_io.pr), M_WAITOK); 8231 } else /* sa_res_key != 0 */ { 8232 8233 /* 8234 * If we aren't registered currently then increment 8235 * the key count and set the registered flag. 8236 */ 8237 ctl_alloc_prkey(lun, residx); 8238 if (ctl_get_prkey(lun, residx) == 0) 8239 lun->pr_key_count++; 8240 ctl_set_prkey(lun, residx, sa_res_key); 8241 lun->PRGeneration++; 8242 mtx_unlock(&lun->lun_lock); 8243 8244 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8245 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8246 persis_io.pr.pr_info.action = CTL_PR_REG_KEY; 8247 persis_io.pr.pr_info.residx = residx; 8248 memcpy(persis_io.pr.pr_info.sa_res_key, 8249 param->serv_act_res_key, 8250 sizeof(param->serv_act_res_key)); 8251 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8252 sizeof(persis_io.pr), M_WAITOK); 8253 } 8254 8255 break; 8256 } 8257 case SPRO_RESERVE: 8258 #if 0 8259 printf("Reserve executed type %d\n", type); 8260 #endif 8261 mtx_lock(&lun->lun_lock); 8262 if (lun->flags & CTL_LUN_PR_RESERVED) { 8263 /* 8264 * if this isn't the reservation holder and it's 8265 * not a "all registrants" type or if the type is 8266 * different then we have a conflict 8267 */ 8268 if ((lun->pr_res_idx != residx 8269 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) 8270 || lun->res_type != type) { 8271 mtx_unlock(&lun->lun_lock); 8272 free(ctsio->kern_data_ptr, M_CTL); 8273 ctl_set_reservation_conflict(ctsio); 8274 ctl_done((union ctl_io *)ctsio); 8275 return (CTL_RETVAL_COMPLETE); 8276 } 8277 mtx_unlock(&lun->lun_lock); 8278 } else /* create a reservation */ { 8279 /* 8280 * If it's not an "all registrants" type record 8281 * reservation holder 8282 */ 8283 if (type != SPR_TYPE_WR_EX_AR 8284 && type != SPR_TYPE_EX_AC_AR) 8285 lun->pr_res_idx = residx; /* Res holder */ 8286 else 8287 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8288 8289 lun->flags |= CTL_LUN_PR_RESERVED; 8290 lun->res_type = type; 8291 8292 mtx_unlock(&lun->lun_lock); 8293 8294 /* send msg to other side */ 8295 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8296 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8297 persis_io.pr.pr_info.action = CTL_PR_RESERVE; 8298 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8299 persis_io.pr.pr_info.res_type = type; 8300 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8301 sizeof(persis_io.pr), M_WAITOK); 8302 } 8303 break; 8304 8305 case SPRO_RELEASE: 8306 mtx_lock(&lun->lun_lock); 8307 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) { 8308 /* No reservation exists return good status */ 8309 mtx_unlock(&lun->lun_lock); 8310 goto done; 8311 } 8312 /* 8313 * Is this nexus a reservation holder? 8314 */ 8315 if (lun->pr_res_idx != residx 8316 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 8317 /* 8318 * not a res holder return good status but 8319 * do nothing 8320 */ 8321 mtx_unlock(&lun->lun_lock); 8322 goto done; 8323 } 8324 8325 if (lun->res_type != type) { 8326 mtx_unlock(&lun->lun_lock); 8327 free(ctsio->kern_data_ptr, M_CTL); 8328 ctl_set_illegal_pr_release(ctsio); 8329 ctl_done((union ctl_io *)ctsio); 8330 return (CTL_RETVAL_COMPLETE); 8331 } 8332 8333 /* okay to release */ 8334 lun->flags &= ~CTL_LUN_PR_RESERVED; 8335 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8336 lun->res_type = 0; 8337 8338 /* 8339 * if this isn't an exclusive access 8340 * res generate UA for all other 8341 * registrants. 8342 */ 8343 if (type != SPR_TYPE_EX_AC 8344 && type != SPR_TYPE_WR_EX) { 8345 for (i = softc->init_min; i < softc->init_max; i++) { 8346 if (i == residx || ctl_get_prkey(lun, i) == 0) 8347 continue; 8348 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8349 } 8350 } 8351 mtx_unlock(&lun->lun_lock); 8352 8353 /* Send msg to other side */ 8354 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8355 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8356 persis_io.pr.pr_info.action = CTL_PR_RELEASE; 8357 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8358 sizeof(persis_io.pr), M_WAITOK); 8359 break; 8360 8361 case SPRO_CLEAR: 8362 /* send msg to other side */ 8363 8364 mtx_lock(&lun->lun_lock); 8365 lun->flags &= ~CTL_LUN_PR_RESERVED; 8366 lun->res_type = 0; 8367 lun->pr_key_count = 0; 8368 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8369 8370 ctl_clr_prkey(lun, residx); 8371 for (i = 0; i < CTL_MAX_INITIATORS; i++) 8372 if (ctl_get_prkey(lun, i) != 0) { 8373 ctl_clr_prkey(lun, i); 8374 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8375 } 8376 lun->PRGeneration++; 8377 mtx_unlock(&lun->lun_lock); 8378 8379 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8380 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8381 persis_io.pr.pr_info.action = CTL_PR_CLEAR; 8382 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8383 sizeof(persis_io.pr), M_WAITOK); 8384 break; 8385 8386 case SPRO_PREEMPT: 8387 case SPRO_PRE_ABO: { 8388 int nretval; 8389 8390 nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type, 8391 residx, ctsio, cdb, param); 8392 if (nretval != 0) 8393 return (CTL_RETVAL_COMPLETE); 8394 break; 8395 } 8396 default: 8397 panic("Invalid PR type %x", cdb->action); 8398 } 8399 8400 done: 8401 free(ctsio->kern_data_ptr, M_CTL); 8402 ctl_set_success(ctsio); 8403 ctl_done((union ctl_io *)ctsio); 8404 8405 return (retval); 8406 } 8407 8408 /* 8409 * This routine is for handling a message from the other SC pertaining to 8410 * persistent reserve out. All the error checking will have been done 8411 * so only perorming the action need be done here to keep the two 8412 * in sync. 8413 */ 8414 static void 8415 ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg) 8416 { 8417 struct ctl_lun *lun; 8418 struct ctl_softc *softc; 8419 int i; 8420 uint32_t residx, targ_lun; 8421 8422 softc = control_softc; 8423 targ_lun = msg->hdr.nexus.targ_mapped_lun; 8424 mtx_lock(&softc->ctl_lock); 8425 if ((targ_lun >= CTL_MAX_LUNS) || 8426 ((lun = softc->ctl_luns[targ_lun]) == NULL)) { 8427 mtx_unlock(&softc->ctl_lock); 8428 return; 8429 } 8430 mtx_lock(&lun->lun_lock); 8431 mtx_unlock(&softc->ctl_lock); 8432 if (lun->flags & CTL_LUN_DISABLED) { 8433 mtx_unlock(&lun->lun_lock); 8434 return; 8435 } 8436 residx = ctl_get_initindex(&msg->hdr.nexus); 8437 switch(msg->pr.pr_info.action) { 8438 case CTL_PR_REG_KEY: 8439 ctl_alloc_prkey(lun, msg->pr.pr_info.residx); 8440 if (ctl_get_prkey(lun, msg->pr.pr_info.residx) == 0) 8441 lun->pr_key_count++; 8442 ctl_set_prkey(lun, msg->pr.pr_info.residx, 8443 scsi_8btou64(msg->pr.pr_info.sa_res_key)); 8444 lun->PRGeneration++; 8445 break; 8446 8447 case CTL_PR_UNREG_KEY: 8448 ctl_clr_prkey(lun, msg->pr.pr_info.residx); 8449 lun->pr_key_count--; 8450 8451 /* XXX Need to see if the reservation has been released */ 8452 /* if so do we need to generate UA? */ 8453 if (msg->pr.pr_info.residx == lun->pr_res_idx) { 8454 lun->flags &= ~CTL_LUN_PR_RESERVED; 8455 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8456 8457 if ((lun->res_type == SPR_TYPE_WR_EX_RO 8458 || lun->res_type == SPR_TYPE_EX_AC_RO) 8459 && lun->pr_key_count) { 8460 /* 8461 * If the reservation is a registrants 8462 * only type we need to generate a UA 8463 * for other registered inits. The 8464 * sense code should be RESERVATIONS 8465 * RELEASED 8466 */ 8467 8468 for (i = softc->init_min; i < softc->init_max; i++) { 8469 if (ctl_get_prkey(lun, i) == 0) 8470 continue; 8471 8472 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8473 } 8474 } 8475 lun->res_type = 0; 8476 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8477 if (lun->pr_key_count==0) { 8478 lun->flags &= ~CTL_LUN_PR_RESERVED; 8479 lun->res_type = 0; 8480 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8481 } 8482 } 8483 lun->PRGeneration++; 8484 break; 8485 8486 case CTL_PR_RESERVE: 8487 lun->flags |= CTL_LUN_PR_RESERVED; 8488 lun->res_type = msg->pr.pr_info.res_type; 8489 lun->pr_res_idx = msg->pr.pr_info.residx; 8490 8491 break; 8492 8493 case CTL_PR_RELEASE: 8494 /* 8495 * if this isn't an exclusive access res generate UA for all 8496 * other registrants. 8497 */ 8498 if (lun->res_type != SPR_TYPE_EX_AC 8499 && lun->res_type != SPR_TYPE_WR_EX) { 8500 for (i = softc->init_min; i < softc->init_max; i++) 8501 if (i == residx || ctl_get_prkey(lun, i) == 0) 8502 continue; 8503 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8504 } 8505 8506 lun->flags &= ~CTL_LUN_PR_RESERVED; 8507 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8508 lun->res_type = 0; 8509 break; 8510 8511 case CTL_PR_PREEMPT: 8512 ctl_pro_preempt_other(lun, msg); 8513 break; 8514 case CTL_PR_CLEAR: 8515 lun->flags &= ~CTL_LUN_PR_RESERVED; 8516 lun->res_type = 0; 8517 lun->pr_key_count = 0; 8518 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8519 8520 for (i=0; i < CTL_MAX_INITIATORS; i++) { 8521 if (ctl_get_prkey(lun, i) == 0) 8522 continue; 8523 ctl_clr_prkey(lun, i); 8524 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8525 } 8526 lun->PRGeneration++; 8527 break; 8528 } 8529 8530 mtx_unlock(&lun->lun_lock); 8531 } 8532 8533 int 8534 ctl_read_write(struct ctl_scsiio *ctsio) 8535 { 8536 struct ctl_lun *lun; 8537 struct ctl_lba_len_flags *lbalen; 8538 uint64_t lba; 8539 uint32_t num_blocks; 8540 int flags, retval; 8541 int isread; 8542 8543 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8544 8545 CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0])); 8546 8547 flags = 0; 8548 retval = CTL_RETVAL_COMPLETE; 8549 8550 isread = ctsio->cdb[0] == READ_6 || ctsio->cdb[0] == READ_10 8551 || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16; 8552 switch (ctsio->cdb[0]) { 8553 case READ_6: 8554 case WRITE_6: { 8555 struct scsi_rw_6 *cdb; 8556 8557 cdb = (struct scsi_rw_6 *)ctsio->cdb; 8558 8559 lba = scsi_3btoul(cdb->addr); 8560 /* only 5 bits are valid in the most significant address byte */ 8561 lba &= 0x1fffff; 8562 num_blocks = cdb->length; 8563 /* 8564 * This is correct according to SBC-2. 8565 */ 8566 if (num_blocks == 0) 8567 num_blocks = 256; 8568 break; 8569 } 8570 case READ_10: 8571 case WRITE_10: { 8572 struct scsi_rw_10 *cdb; 8573 8574 cdb = (struct scsi_rw_10 *)ctsio->cdb; 8575 if (cdb->byte2 & SRW10_FUA) 8576 flags |= CTL_LLF_FUA; 8577 if (cdb->byte2 & SRW10_DPO) 8578 flags |= CTL_LLF_DPO; 8579 lba = scsi_4btoul(cdb->addr); 8580 num_blocks = scsi_2btoul(cdb->length); 8581 break; 8582 } 8583 case WRITE_VERIFY_10: { 8584 struct scsi_write_verify_10 *cdb; 8585 8586 cdb = (struct scsi_write_verify_10 *)ctsio->cdb; 8587 flags |= CTL_LLF_FUA; 8588 if (cdb->byte2 & SWV_DPO) 8589 flags |= CTL_LLF_DPO; 8590 lba = scsi_4btoul(cdb->addr); 8591 num_blocks = scsi_2btoul(cdb->length); 8592 break; 8593 } 8594 case READ_12: 8595 case WRITE_12: { 8596 struct scsi_rw_12 *cdb; 8597 8598 cdb = (struct scsi_rw_12 *)ctsio->cdb; 8599 if (cdb->byte2 & SRW12_FUA) 8600 flags |= CTL_LLF_FUA; 8601 if (cdb->byte2 & SRW12_DPO) 8602 flags |= CTL_LLF_DPO; 8603 lba = scsi_4btoul(cdb->addr); 8604 num_blocks = scsi_4btoul(cdb->length); 8605 break; 8606 } 8607 case WRITE_VERIFY_12: { 8608 struct scsi_write_verify_12 *cdb; 8609 8610 cdb = (struct scsi_write_verify_12 *)ctsio->cdb; 8611 flags |= CTL_LLF_FUA; 8612 if (cdb->byte2 & SWV_DPO) 8613 flags |= CTL_LLF_DPO; 8614 lba = scsi_4btoul(cdb->addr); 8615 num_blocks = scsi_4btoul(cdb->length); 8616 break; 8617 } 8618 case READ_16: 8619 case WRITE_16: { 8620 struct scsi_rw_16 *cdb; 8621 8622 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8623 if (cdb->byte2 & SRW12_FUA) 8624 flags |= CTL_LLF_FUA; 8625 if (cdb->byte2 & SRW12_DPO) 8626 flags |= CTL_LLF_DPO; 8627 lba = scsi_8btou64(cdb->addr); 8628 num_blocks = scsi_4btoul(cdb->length); 8629 break; 8630 } 8631 case WRITE_ATOMIC_16: { 8632 struct scsi_rw_16 *cdb; 8633 8634 if (lun->be_lun->atomicblock == 0) { 8635 ctl_set_invalid_opcode(ctsio); 8636 ctl_done((union ctl_io *)ctsio); 8637 return (CTL_RETVAL_COMPLETE); 8638 } 8639 8640 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8641 if (cdb->byte2 & SRW12_FUA) 8642 flags |= CTL_LLF_FUA; 8643 if (cdb->byte2 & SRW12_DPO) 8644 flags |= CTL_LLF_DPO; 8645 lba = scsi_8btou64(cdb->addr); 8646 num_blocks = scsi_4btoul(cdb->length); 8647 if (num_blocks > lun->be_lun->atomicblock) { 8648 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 8649 /*command*/ 1, /*field*/ 12, /*bit_valid*/ 0, 8650 /*bit*/ 0); 8651 ctl_done((union ctl_io *)ctsio); 8652 return (CTL_RETVAL_COMPLETE); 8653 } 8654 break; 8655 } 8656 case WRITE_VERIFY_16: { 8657 struct scsi_write_verify_16 *cdb; 8658 8659 cdb = (struct scsi_write_verify_16 *)ctsio->cdb; 8660 flags |= CTL_LLF_FUA; 8661 if (cdb->byte2 & SWV_DPO) 8662 flags |= CTL_LLF_DPO; 8663 lba = scsi_8btou64(cdb->addr); 8664 num_blocks = scsi_4btoul(cdb->length); 8665 break; 8666 } 8667 default: 8668 /* 8669 * We got a command we don't support. This shouldn't 8670 * happen, commands should be filtered out above us. 8671 */ 8672 ctl_set_invalid_opcode(ctsio); 8673 ctl_done((union ctl_io *)ctsio); 8674 8675 return (CTL_RETVAL_COMPLETE); 8676 break; /* NOTREACHED */ 8677 } 8678 8679 /* 8680 * The first check is to make sure we're in bounds, the second 8681 * check is to catch wrap-around problems. If the lba + num blocks 8682 * is less than the lba, then we've wrapped around and the block 8683 * range is invalid anyway. 8684 */ 8685 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8686 || ((lba + num_blocks) < lba)) { 8687 ctl_set_lba_out_of_range(ctsio); 8688 ctl_done((union ctl_io *)ctsio); 8689 return (CTL_RETVAL_COMPLETE); 8690 } 8691 8692 /* 8693 * According to SBC-3, a transfer length of 0 is not an error. 8694 * Note that this cannot happen with WRITE(6) or READ(6), since 0 8695 * translates to 256 blocks for those commands. 8696 */ 8697 if (num_blocks == 0) { 8698 ctl_set_success(ctsio); 8699 ctl_done((union ctl_io *)ctsio); 8700 return (CTL_RETVAL_COMPLETE); 8701 } 8702 8703 /* Set FUA and/or DPO if caches are disabled. */ 8704 if (isread) { 8705 if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 & 8706 SCP_RCD) != 0) 8707 flags |= CTL_LLF_FUA | CTL_LLF_DPO; 8708 } else { 8709 if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 & 8710 SCP_WCE) == 0) 8711 flags |= CTL_LLF_FUA; 8712 } 8713 8714 lbalen = (struct ctl_lba_len_flags *) 8715 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8716 lbalen->lba = lba; 8717 lbalen->len = num_blocks; 8718 lbalen->flags = (isread ? CTL_LLF_READ : CTL_LLF_WRITE) | flags; 8719 8720 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 8721 ctsio->kern_rel_offset = 0; 8722 8723 CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n")); 8724 8725 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8726 8727 return (retval); 8728 } 8729 8730 static int 8731 ctl_cnw_cont(union ctl_io *io) 8732 { 8733 struct ctl_scsiio *ctsio; 8734 struct ctl_lun *lun; 8735 struct ctl_lba_len_flags *lbalen; 8736 int retval; 8737 8738 ctsio = &io->scsiio; 8739 ctsio->io_hdr.status = CTL_STATUS_NONE; 8740 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT; 8741 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8742 lbalen = (struct ctl_lba_len_flags *) 8743 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8744 lbalen->flags &= ~CTL_LLF_COMPARE; 8745 lbalen->flags |= CTL_LLF_WRITE; 8746 8747 CTL_DEBUG_PRINT(("ctl_cnw_cont: calling data_submit()\n")); 8748 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8749 return (retval); 8750 } 8751 8752 int 8753 ctl_cnw(struct ctl_scsiio *ctsio) 8754 { 8755 struct ctl_lun *lun; 8756 struct ctl_lba_len_flags *lbalen; 8757 uint64_t lba; 8758 uint32_t num_blocks; 8759 int flags, retval; 8760 8761 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8762 8763 CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0])); 8764 8765 flags = 0; 8766 retval = CTL_RETVAL_COMPLETE; 8767 8768 switch (ctsio->cdb[0]) { 8769 case COMPARE_AND_WRITE: { 8770 struct scsi_compare_and_write *cdb; 8771 8772 cdb = (struct scsi_compare_and_write *)ctsio->cdb; 8773 if (cdb->byte2 & SRW10_FUA) 8774 flags |= CTL_LLF_FUA; 8775 if (cdb->byte2 & SRW10_DPO) 8776 flags |= CTL_LLF_DPO; 8777 lba = scsi_8btou64(cdb->addr); 8778 num_blocks = cdb->length; 8779 break; 8780 } 8781 default: 8782 /* 8783 * We got a command we don't support. This shouldn't 8784 * happen, commands should be filtered out above us. 8785 */ 8786 ctl_set_invalid_opcode(ctsio); 8787 ctl_done((union ctl_io *)ctsio); 8788 8789 return (CTL_RETVAL_COMPLETE); 8790 break; /* NOTREACHED */ 8791 } 8792 8793 /* 8794 * The first check is to make sure we're in bounds, the second 8795 * check is to catch wrap-around problems. If the lba + num blocks 8796 * is less than the lba, then we've wrapped around and the block 8797 * range is invalid anyway. 8798 */ 8799 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8800 || ((lba + num_blocks) < lba)) { 8801 ctl_set_lba_out_of_range(ctsio); 8802 ctl_done((union ctl_io *)ctsio); 8803 return (CTL_RETVAL_COMPLETE); 8804 } 8805 8806 /* 8807 * According to SBC-3, a transfer length of 0 is not an error. 8808 */ 8809 if (num_blocks == 0) { 8810 ctl_set_success(ctsio); 8811 ctl_done((union ctl_io *)ctsio); 8812 return (CTL_RETVAL_COMPLETE); 8813 } 8814 8815 /* Set FUA if write cache is disabled. */ 8816 if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 & 8817 SCP_WCE) == 0) 8818 flags |= CTL_LLF_FUA; 8819 8820 ctsio->kern_total_len = 2 * num_blocks * lun->be_lun->blocksize; 8821 ctsio->kern_rel_offset = 0; 8822 8823 /* 8824 * Set the IO_CONT flag, so that if this I/O gets passed to 8825 * ctl_data_submit_done(), it'll get passed back to 8826 * ctl_ctl_cnw_cont() for further processing. 8827 */ 8828 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 8829 ctsio->io_cont = ctl_cnw_cont; 8830 8831 lbalen = (struct ctl_lba_len_flags *) 8832 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8833 lbalen->lba = lba; 8834 lbalen->len = num_blocks; 8835 lbalen->flags = CTL_LLF_COMPARE | flags; 8836 8837 CTL_DEBUG_PRINT(("ctl_cnw: calling data_submit()\n")); 8838 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8839 return (retval); 8840 } 8841 8842 int 8843 ctl_verify(struct ctl_scsiio *ctsio) 8844 { 8845 struct ctl_lun *lun; 8846 struct ctl_lba_len_flags *lbalen; 8847 uint64_t lba; 8848 uint32_t num_blocks; 8849 int bytchk, flags; 8850 int retval; 8851 8852 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8853 8854 CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0])); 8855 8856 bytchk = 0; 8857 flags = CTL_LLF_FUA; 8858 retval = CTL_RETVAL_COMPLETE; 8859 8860 switch (ctsio->cdb[0]) { 8861 case VERIFY_10: { 8862 struct scsi_verify_10 *cdb; 8863 8864 cdb = (struct scsi_verify_10 *)ctsio->cdb; 8865 if (cdb->byte2 & SVFY_BYTCHK) 8866 bytchk = 1; 8867 if (cdb->byte2 & SVFY_DPO) 8868 flags |= CTL_LLF_DPO; 8869 lba = scsi_4btoul(cdb->addr); 8870 num_blocks = scsi_2btoul(cdb->length); 8871 break; 8872 } 8873 case VERIFY_12: { 8874 struct scsi_verify_12 *cdb; 8875 8876 cdb = (struct scsi_verify_12 *)ctsio->cdb; 8877 if (cdb->byte2 & SVFY_BYTCHK) 8878 bytchk = 1; 8879 if (cdb->byte2 & SVFY_DPO) 8880 flags |= CTL_LLF_DPO; 8881 lba = scsi_4btoul(cdb->addr); 8882 num_blocks = scsi_4btoul(cdb->length); 8883 break; 8884 } 8885 case VERIFY_16: { 8886 struct scsi_rw_16 *cdb; 8887 8888 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8889 if (cdb->byte2 & SVFY_BYTCHK) 8890 bytchk = 1; 8891 if (cdb->byte2 & SVFY_DPO) 8892 flags |= CTL_LLF_DPO; 8893 lba = scsi_8btou64(cdb->addr); 8894 num_blocks = scsi_4btoul(cdb->length); 8895 break; 8896 } 8897 default: 8898 /* 8899 * We got a command we don't support. This shouldn't 8900 * happen, commands should be filtered out above us. 8901 */ 8902 ctl_set_invalid_opcode(ctsio); 8903 ctl_done((union ctl_io *)ctsio); 8904 return (CTL_RETVAL_COMPLETE); 8905 } 8906 8907 /* 8908 * The first check is to make sure we're in bounds, the second 8909 * check is to catch wrap-around problems. If the lba + num blocks 8910 * is less than the lba, then we've wrapped around and the block 8911 * range is invalid anyway. 8912 */ 8913 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8914 || ((lba + num_blocks) < lba)) { 8915 ctl_set_lba_out_of_range(ctsio); 8916 ctl_done((union ctl_io *)ctsio); 8917 return (CTL_RETVAL_COMPLETE); 8918 } 8919 8920 /* 8921 * According to SBC-3, a transfer length of 0 is not an error. 8922 */ 8923 if (num_blocks == 0) { 8924 ctl_set_success(ctsio); 8925 ctl_done((union ctl_io *)ctsio); 8926 return (CTL_RETVAL_COMPLETE); 8927 } 8928 8929 lbalen = (struct ctl_lba_len_flags *) 8930 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8931 lbalen->lba = lba; 8932 lbalen->len = num_blocks; 8933 if (bytchk) { 8934 lbalen->flags = CTL_LLF_COMPARE | flags; 8935 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 8936 } else { 8937 lbalen->flags = CTL_LLF_VERIFY | flags; 8938 ctsio->kern_total_len = 0; 8939 } 8940 ctsio->kern_rel_offset = 0; 8941 8942 CTL_DEBUG_PRINT(("ctl_verify: calling data_submit()\n")); 8943 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8944 return (retval); 8945 } 8946 8947 int 8948 ctl_report_luns(struct ctl_scsiio *ctsio) 8949 { 8950 struct ctl_softc *softc = control_softc; 8951 struct scsi_report_luns *cdb; 8952 struct scsi_report_luns_data *lun_data; 8953 struct ctl_lun *lun, *request_lun; 8954 struct ctl_port *port; 8955 int num_luns, retval; 8956 uint32_t alloc_len, lun_datalen; 8957 int num_filled, well_known; 8958 uint32_t initidx, targ_lun_id, lun_id; 8959 8960 retval = CTL_RETVAL_COMPLETE; 8961 well_known = 0; 8962 8963 cdb = (struct scsi_report_luns *)ctsio->cdb; 8964 port = ctl_io_port(&ctsio->io_hdr); 8965 8966 CTL_DEBUG_PRINT(("ctl_report_luns\n")); 8967 8968 mtx_lock(&softc->ctl_lock); 8969 num_luns = 0; 8970 for (targ_lun_id = 0; targ_lun_id < CTL_MAX_LUNS; targ_lun_id++) { 8971 if (ctl_lun_map_from_port(port, targ_lun_id) < CTL_MAX_LUNS) 8972 num_luns++; 8973 } 8974 mtx_unlock(&softc->ctl_lock); 8975 8976 switch (cdb->select_report) { 8977 case RPL_REPORT_DEFAULT: 8978 case RPL_REPORT_ALL: 8979 break; 8980 case RPL_REPORT_WELLKNOWN: 8981 well_known = 1; 8982 num_luns = 0; 8983 break; 8984 default: 8985 ctl_set_invalid_field(ctsio, 8986 /*sks_valid*/ 1, 8987 /*command*/ 1, 8988 /*field*/ 2, 8989 /*bit_valid*/ 0, 8990 /*bit*/ 0); 8991 ctl_done((union ctl_io *)ctsio); 8992 return (retval); 8993 break; /* NOTREACHED */ 8994 } 8995 8996 alloc_len = scsi_4btoul(cdb->length); 8997 /* 8998 * The initiator has to allocate at least 16 bytes for this request, 8999 * so he can at least get the header and the first LUN. Otherwise 9000 * we reject the request (per SPC-3 rev 14, section 6.21). 9001 */ 9002 if (alloc_len < (sizeof(struct scsi_report_luns_data) + 9003 sizeof(struct scsi_report_luns_lundata))) { 9004 ctl_set_invalid_field(ctsio, 9005 /*sks_valid*/ 1, 9006 /*command*/ 1, 9007 /*field*/ 6, 9008 /*bit_valid*/ 0, 9009 /*bit*/ 0); 9010 ctl_done((union ctl_io *)ctsio); 9011 return (retval); 9012 } 9013 9014 request_lun = (struct ctl_lun *) 9015 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9016 9017 lun_datalen = sizeof(*lun_data) + 9018 (num_luns * sizeof(struct scsi_report_luns_lundata)); 9019 9020 ctsio->kern_data_ptr = malloc(lun_datalen, M_CTL, M_WAITOK | M_ZERO); 9021 lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr; 9022 ctsio->kern_sg_entries = 0; 9023 9024 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9025 9026 mtx_lock(&softc->ctl_lock); 9027 for (targ_lun_id = 0, num_filled = 0; targ_lun_id < CTL_MAX_LUNS && num_filled < num_luns; targ_lun_id++) { 9028 lun_id = ctl_lun_map_from_port(port, targ_lun_id); 9029 if (lun_id >= CTL_MAX_LUNS) 9030 continue; 9031 lun = softc->ctl_luns[lun_id]; 9032 if (lun == NULL) 9033 continue; 9034 9035 if (targ_lun_id <= 0xff) { 9036 /* 9037 * Peripheral addressing method, bus number 0. 9038 */ 9039 lun_data->luns[num_filled].lundata[0] = 9040 RPL_LUNDATA_ATYP_PERIPH; 9041 lun_data->luns[num_filled].lundata[1] = targ_lun_id; 9042 num_filled++; 9043 } else if (targ_lun_id <= 0x3fff) { 9044 /* 9045 * Flat addressing method. 9046 */ 9047 lun_data->luns[num_filled].lundata[0] = 9048 RPL_LUNDATA_ATYP_FLAT | (targ_lun_id >> 8); 9049 lun_data->luns[num_filled].lundata[1] = 9050 (targ_lun_id & 0xff); 9051 num_filled++; 9052 } else if (targ_lun_id <= 0xffffff) { 9053 /* 9054 * Extended flat addressing method. 9055 */ 9056 lun_data->luns[num_filled].lundata[0] = 9057 RPL_LUNDATA_ATYP_EXTLUN | 0x12; 9058 scsi_ulto3b(targ_lun_id, 9059 &lun_data->luns[num_filled].lundata[1]); 9060 num_filled++; 9061 } else { 9062 printf("ctl_report_luns: bogus LUN number %jd, " 9063 "skipping\n", (intmax_t)targ_lun_id); 9064 } 9065 /* 9066 * According to SPC-3, rev 14 section 6.21: 9067 * 9068 * "The execution of a REPORT LUNS command to any valid and 9069 * installed logical unit shall clear the REPORTED LUNS DATA 9070 * HAS CHANGED unit attention condition for all logical 9071 * units of that target with respect to the requesting 9072 * initiator. A valid and installed logical unit is one 9073 * having a PERIPHERAL QUALIFIER of 000b in the standard 9074 * INQUIRY data (see 6.4.2)." 9075 * 9076 * If request_lun is NULL, the LUN this report luns command 9077 * was issued to is either disabled or doesn't exist. In that 9078 * case, we shouldn't clear any pending lun change unit 9079 * attention. 9080 */ 9081 if (request_lun != NULL) { 9082 mtx_lock(&lun->lun_lock); 9083 ctl_clr_ua(lun, initidx, CTL_UA_LUN_CHANGE); 9084 mtx_unlock(&lun->lun_lock); 9085 } 9086 } 9087 mtx_unlock(&softc->ctl_lock); 9088 9089 /* 9090 * It's quite possible that we've returned fewer LUNs than we allocated 9091 * space for. Trim it. 9092 */ 9093 lun_datalen = sizeof(*lun_data) + 9094 (num_filled * sizeof(struct scsi_report_luns_lundata)); 9095 9096 if (lun_datalen < alloc_len) { 9097 ctsio->residual = alloc_len - lun_datalen; 9098 ctsio->kern_data_len = lun_datalen; 9099 ctsio->kern_total_len = lun_datalen; 9100 } else { 9101 ctsio->residual = 0; 9102 ctsio->kern_data_len = alloc_len; 9103 ctsio->kern_total_len = alloc_len; 9104 } 9105 ctsio->kern_data_resid = 0; 9106 ctsio->kern_rel_offset = 0; 9107 ctsio->kern_sg_entries = 0; 9108 9109 /* 9110 * We set this to the actual data length, regardless of how much 9111 * space we actually have to return results. If the user looks at 9112 * this value, he'll know whether or not he allocated enough space 9113 * and reissue the command if necessary. We don't support well 9114 * known logical units, so if the user asks for that, return none. 9115 */ 9116 scsi_ulto4b(lun_datalen - 8, lun_data->length); 9117 9118 /* 9119 * We can only return SCSI_STATUS_CHECK_COND when we can't satisfy 9120 * this request. 9121 */ 9122 ctl_set_success(ctsio); 9123 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9124 ctsio->be_move_done = ctl_config_move_done; 9125 ctl_datamove((union ctl_io *)ctsio); 9126 return (retval); 9127 } 9128 9129 int 9130 ctl_request_sense(struct ctl_scsiio *ctsio) 9131 { 9132 struct scsi_request_sense *cdb; 9133 struct scsi_sense_data *sense_ptr; 9134 struct ctl_softc *ctl_softc; 9135 struct ctl_lun *lun; 9136 uint32_t initidx; 9137 int have_error; 9138 scsi_sense_data_type sense_format; 9139 ctl_ua_type ua_type; 9140 9141 cdb = (struct scsi_request_sense *)ctsio->cdb; 9142 9143 ctl_softc = control_softc; 9144 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9145 9146 CTL_DEBUG_PRINT(("ctl_request_sense\n")); 9147 9148 /* 9149 * Determine which sense format the user wants. 9150 */ 9151 if (cdb->byte2 & SRS_DESC) 9152 sense_format = SSD_TYPE_DESC; 9153 else 9154 sense_format = SSD_TYPE_FIXED; 9155 9156 ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK); 9157 sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr; 9158 ctsio->kern_sg_entries = 0; 9159 9160 /* 9161 * struct scsi_sense_data, which is currently set to 256 bytes, is 9162 * larger than the largest allowed value for the length field in the 9163 * REQUEST SENSE CDB, which is 252 bytes as of SPC-4. 9164 */ 9165 ctsio->residual = 0; 9166 ctsio->kern_data_len = cdb->length; 9167 ctsio->kern_total_len = cdb->length; 9168 9169 ctsio->kern_data_resid = 0; 9170 ctsio->kern_rel_offset = 0; 9171 ctsio->kern_sg_entries = 0; 9172 9173 /* 9174 * If we don't have a LUN, we don't have any pending sense. 9175 */ 9176 if (lun == NULL) 9177 goto no_sense; 9178 9179 have_error = 0; 9180 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9181 /* 9182 * Check for pending sense, and then for pending unit attentions. 9183 * Pending sense gets returned first, then pending unit attentions. 9184 */ 9185 mtx_lock(&lun->lun_lock); 9186 #ifdef CTL_WITH_CA 9187 if (ctl_is_set(lun->have_ca, initidx)) { 9188 scsi_sense_data_type stored_format; 9189 9190 /* 9191 * Check to see which sense format was used for the stored 9192 * sense data. 9193 */ 9194 stored_format = scsi_sense_type(&lun->pending_sense[initidx]); 9195 9196 /* 9197 * If the user requested a different sense format than the 9198 * one we stored, then we need to convert it to the other 9199 * format. If we're going from descriptor to fixed format 9200 * sense data, we may lose things in translation, depending 9201 * on what options were used. 9202 * 9203 * If the stored format is SSD_TYPE_NONE (i.e. invalid), 9204 * for some reason we'll just copy it out as-is. 9205 */ 9206 if ((stored_format == SSD_TYPE_FIXED) 9207 && (sense_format == SSD_TYPE_DESC)) 9208 ctl_sense_to_desc((struct scsi_sense_data_fixed *) 9209 &lun->pending_sense[initidx], 9210 (struct scsi_sense_data_desc *)sense_ptr); 9211 else if ((stored_format == SSD_TYPE_DESC) 9212 && (sense_format == SSD_TYPE_FIXED)) 9213 ctl_sense_to_fixed((struct scsi_sense_data_desc *) 9214 &lun->pending_sense[initidx], 9215 (struct scsi_sense_data_fixed *)sense_ptr); 9216 else 9217 memcpy(sense_ptr, &lun->pending_sense[initidx], 9218 MIN(sizeof(*sense_ptr), 9219 sizeof(lun->pending_sense[initidx]))); 9220 9221 ctl_clear_mask(lun->have_ca, initidx); 9222 have_error = 1; 9223 } else 9224 #endif 9225 { 9226 ua_type = ctl_build_ua(lun, initidx, sense_ptr, sense_format); 9227 if (ua_type != CTL_UA_NONE) 9228 have_error = 1; 9229 if (ua_type == CTL_UA_LUN_CHANGE) { 9230 mtx_unlock(&lun->lun_lock); 9231 mtx_lock(&ctl_softc->ctl_lock); 9232 ctl_clr_ua_allluns(ctl_softc, initidx, ua_type); 9233 mtx_unlock(&ctl_softc->ctl_lock); 9234 mtx_lock(&lun->lun_lock); 9235 } 9236 9237 } 9238 mtx_unlock(&lun->lun_lock); 9239 9240 /* 9241 * We already have a pending error, return it. 9242 */ 9243 if (have_error != 0) { 9244 /* 9245 * We report the SCSI status as OK, since the status of the 9246 * request sense command itself is OK. 9247 * We report 0 for the sense length, because we aren't doing 9248 * autosense in this case. We're reporting sense as 9249 * parameter data. 9250 */ 9251 ctl_set_success(ctsio); 9252 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9253 ctsio->be_move_done = ctl_config_move_done; 9254 ctl_datamove((union ctl_io *)ctsio); 9255 return (CTL_RETVAL_COMPLETE); 9256 } 9257 9258 no_sense: 9259 9260 /* 9261 * No sense information to report, so we report that everything is 9262 * okay. 9263 */ 9264 ctl_set_sense_data(sense_ptr, 9265 lun, 9266 sense_format, 9267 /*current_error*/ 1, 9268 /*sense_key*/ SSD_KEY_NO_SENSE, 9269 /*asc*/ 0x00, 9270 /*ascq*/ 0x00, 9271 SSD_ELEM_NONE); 9272 9273 /* 9274 * We report 0 for the sense length, because we aren't doing 9275 * autosense in this case. We're reporting sense as parameter data. 9276 */ 9277 ctl_set_success(ctsio); 9278 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9279 ctsio->be_move_done = ctl_config_move_done; 9280 ctl_datamove((union ctl_io *)ctsio); 9281 return (CTL_RETVAL_COMPLETE); 9282 } 9283 9284 int 9285 ctl_tur(struct ctl_scsiio *ctsio) 9286 { 9287 9288 CTL_DEBUG_PRINT(("ctl_tur\n")); 9289 9290 ctl_set_success(ctsio); 9291 ctl_done((union ctl_io *)ctsio); 9292 9293 return (CTL_RETVAL_COMPLETE); 9294 } 9295 9296 /* 9297 * SCSI VPD page 0x00, the Supported VPD Pages page. 9298 */ 9299 static int 9300 ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len) 9301 { 9302 struct scsi_vpd_supported_pages *pages; 9303 int sup_page_size; 9304 struct ctl_lun *lun; 9305 int p; 9306 9307 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9308 9309 sup_page_size = sizeof(struct scsi_vpd_supported_pages) * 9310 SCSI_EVPD_NUM_SUPPORTED_PAGES; 9311 ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO); 9312 pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr; 9313 ctsio->kern_sg_entries = 0; 9314 9315 if (sup_page_size < alloc_len) { 9316 ctsio->residual = alloc_len - sup_page_size; 9317 ctsio->kern_data_len = sup_page_size; 9318 ctsio->kern_total_len = sup_page_size; 9319 } else { 9320 ctsio->residual = 0; 9321 ctsio->kern_data_len = alloc_len; 9322 ctsio->kern_total_len = alloc_len; 9323 } 9324 ctsio->kern_data_resid = 0; 9325 ctsio->kern_rel_offset = 0; 9326 ctsio->kern_sg_entries = 0; 9327 9328 /* 9329 * The control device is always connected. The disk device, on the 9330 * other hand, may not be online all the time. Need to change this 9331 * to figure out whether the disk device is actually online or not. 9332 */ 9333 if (lun != NULL) 9334 pages->device = (SID_QUAL_LU_CONNECTED << 5) | 9335 lun->be_lun->lun_type; 9336 else 9337 pages->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9338 9339 p = 0; 9340 /* Supported VPD pages */ 9341 pages->page_list[p++] = SVPD_SUPPORTED_PAGES; 9342 /* Serial Number */ 9343 pages->page_list[p++] = SVPD_UNIT_SERIAL_NUMBER; 9344 /* Device Identification */ 9345 pages->page_list[p++] = SVPD_DEVICE_ID; 9346 /* Extended INQUIRY Data */ 9347 pages->page_list[p++] = SVPD_EXTENDED_INQUIRY_DATA; 9348 /* Mode Page Policy */ 9349 pages->page_list[p++] = SVPD_MODE_PAGE_POLICY; 9350 /* SCSI Ports */ 9351 pages->page_list[p++] = SVPD_SCSI_PORTS; 9352 /* Third-party Copy */ 9353 pages->page_list[p++] = SVPD_SCSI_TPC; 9354 if (lun != NULL && lun->be_lun->lun_type == T_DIRECT) { 9355 /* Block limits */ 9356 pages->page_list[p++] = SVPD_BLOCK_LIMITS; 9357 /* Block Device Characteristics */ 9358 pages->page_list[p++] = SVPD_BDC; 9359 /* Logical Block Provisioning */ 9360 pages->page_list[p++] = SVPD_LBP; 9361 } 9362 pages->length = p; 9363 9364 ctl_set_success(ctsio); 9365 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9366 ctsio->be_move_done = ctl_config_move_done; 9367 ctl_datamove((union ctl_io *)ctsio); 9368 return (CTL_RETVAL_COMPLETE); 9369 } 9370 9371 /* 9372 * SCSI VPD page 0x80, the Unit Serial Number page. 9373 */ 9374 static int 9375 ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len) 9376 { 9377 struct scsi_vpd_unit_serial_number *sn_ptr; 9378 struct ctl_lun *lun; 9379 int data_len; 9380 9381 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9382 9383 data_len = 4 + CTL_SN_LEN; 9384 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9385 sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr; 9386 if (data_len < alloc_len) { 9387 ctsio->residual = alloc_len - data_len; 9388 ctsio->kern_data_len = data_len; 9389 ctsio->kern_total_len = data_len; 9390 } else { 9391 ctsio->residual = 0; 9392 ctsio->kern_data_len = alloc_len; 9393 ctsio->kern_total_len = alloc_len; 9394 } 9395 ctsio->kern_data_resid = 0; 9396 ctsio->kern_rel_offset = 0; 9397 ctsio->kern_sg_entries = 0; 9398 9399 /* 9400 * The control device is always connected. The disk device, on the 9401 * other hand, may not be online all the time. Need to change this 9402 * to figure out whether the disk device is actually online or not. 9403 */ 9404 if (lun != NULL) 9405 sn_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9406 lun->be_lun->lun_type; 9407 else 9408 sn_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9409 9410 sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER; 9411 sn_ptr->length = CTL_SN_LEN; 9412 /* 9413 * If we don't have a LUN, we just leave the serial number as 9414 * all spaces. 9415 */ 9416 if (lun != NULL) { 9417 strncpy((char *)sn_ptr->serial_num, 9418 (char *)lun->be_lun->serial_num, CTL_SN_LEN); 9419 } else 9420 memset(sn_ptr->serial_num, 0x20, CTL_SN_LEN); 9421 9422 ctl_set_success(ctsio); 9423 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9424 ctsio->be_move_done = ctl_config_move_done; 9425 ctl_datamove((union ctl_io *)ctsio); 9426 return (CTL_RETVAL_COMPLETE); 9427 } 9428 9429 9430 /* 9431 * SCSI VPD page 0x86, the Extended INQUIRY Data page. 9432 */ 9433 static int 9434 ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len) 9435 { 9436 struct scsi_vpd_extended_inquiry_data *eid_ptr; 9437 struct ctl_lun *lun; 9438 int data_len; 9439 9440 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9441 9442 data_len = sizeof(struct scsi_vpd_extended_inquiry_data); 9443 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9444 eid_ptr = (struct scsi_vpd_extended_inquiry_data *)ctsio->kern_data_ptr; 9445 ctsio->kern_sg_entries = 0; 9446 9447 if (data_len < alloc_len) { 9448 ctsio->residual = alloc_len - data_len; 9449 ctsio->kern_data_len = data_len; 9450 ctsio->kern_total_len = data_len; 9451 } else { 9452 ctsio->residual = 0; 9453 ctsio->kern_data_len = alloc_len; 9454 ctsio->kern_total_len = alloc_len; 9455 } 9456 ctsio->kern_data_resid = 0; 9457 ctsio->kern_rel_offset = 0; 9458 ctsio->kern_sg_entries = 0; 9459 9460 /* 9461 * The control device is always connected. The disk device, on the 9462 * other hand, may not be online all the time. 9463 */ 9464 if (lun != NULL) 9465 eid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9466 lun->be_lun->lun_type; 9467 else 9468 eid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9469 eid_ptr->page_code = SVPD_EXTENDED_INQUIRY_DATA; 9470 scsi_ulto2b(data_len - 4, eid_ptr->page_length); 9471 /* 9472 * We support head of queue, ordered and simple tags. 9473 */ 9474 eid_ptr->flags2 = SVPD_EID_HEADSUP | SVPD_EID_ORDSUP | SVPD_EID_SIMPSUP; 9475 /* 9476 * Volatile cache supported. 9477 */ 9478 eid_ptr->flags3 = SVPD_EID_V_SUP; 9479 9480 /* 9481 * This means that we clear the REPORTED LUNS DATA HAS CHANGED unit 9482 * attention for a particular IT nexus on all LUNs once we report 9483 * it to that nexus once. This bit is required as of SPC-4. 9484 */ 9485 eid_ptr->flags4 = SVPD_EID_LUICLT; 9486 9487 /* 9488 * XXX KDM in order to correctly answer this, we would need 9489 * information from the SIM to determine how much sense data it 9490 * can send. So this would really be a path inquiry field, most 9491 * likely. This can be set to a maximum of 252 according to SPC-4, 9492 * but the hardware may or may not be able to support that much. 9493 * 0 just means that the maximum sense data length is not reported. 9494 */ 9495 eid_ptr->max_sense_length = 0; 9496 9497 ctl_set_success(ctsio); 9498 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9499 ctsio->be_move_done = ctl_config_move_done; 9500 ctl_datamove((union ctl_io *)ctsio); 9501 return (CTL_RETVAL_COMPLETE); 9502 } 9503 9504 static int 9505 ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len) 9506 { 9507 struct scsi_vpd_mode_page_policy *mpp_ptr; 9508 struct ctl_lun *lun; 9509 int data_len; 9510 9511 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9512 9513 data_len = sizeof(struct scsi_vpd_mode_page_policy) + 9514 sizeof(struct scsi_vpd_mode_page_policy_descr); 9515 9516 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9517 mpp_ptr = (struct scsi_vpd_mode_page_policy *)ctsio->kern_data_ptr; 9518 ctsio->kern_sg_entries = 0; 9519 9520 if (data_len < alloc_len) { 9521 ctsio->residual = alloc_len - data_len; 9522 ctsio->kern_data_len = data_len; 9523 ctsio->kern_total_len = data_len; 9524 } else { 9525 ctsio->residual = 0; 9526 ctsio->kern_data_len = alloc_len; 9527 ctsio->kern_total_len = alloc_len; 9528 } 9529 ctsio->kern_data_resid = 0; 9530 ctsio->kern_rel_offset = 0; 9531 ctsio->kern_sg_entries = 0; 9532 9533 /* 9534 * The control device is always connected. The disk device, on the 9535 * other hand, may not be online all the time. 9536 */ 9537 if (lun != NULL) 9538 mpp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9539 lun->be_lun->lun_type; 9540 else 9541 mpp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9542 mpp_ptr->page_code = SVPD_MODE_PAGE_POLICY; 9543 scsi_ulto2b(data_len - 4, mpp_ptr->page_length); 9544 mpp_ptr->descr[0].page_code = 0x3f; 9545 mpp_ptr->descr[0].subpage_code = 0xff; 9546 mpp_ptr->descr[0].policy = SVPD_MPP_SHARED; 9547 9548 ctl_set_success(ctsio); 9549 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9550 ctsio->be_move_done = ctl_config_move_done; 9551 ctl_datamove((union ctl_io *)ctsio); 9552 return (CTL_RETVAL_COMPLETE); 9553 } 9554 9555 /* 9556 * SCSI VPD page 0x83, the Device Identification page. 9557 */ 9558 static int 9559 ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len) 9560 { 9561 struct scsi_vpd_device_id *devid_ptr; 9562 struct scsi_vpd_id_descriptor *desc; 9563 struct ctl_softc *softc; 9564 struct ctl_lun *lun; 9565 struct ctl_port *port; 9566 int data_len; 9567 uint8_t proto; 9568 9569 softc = control_softc; 9570 9571 port = ctl_io_port(&ctsio->io_hdr); 9572 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9573 9574 data_len = sizeof(struct scsi_vpd_device_id) + 9575 sizeof(struct scsi_vpd_id_descriptor) + 9576 sizeof(struct scsi_vpd_id_rel_trgt_port_id) + 9577 sizeof(struct scsi_vpd_id_descriptor) + 9578 sizeof(struct scsi_vpd_id_trgt_port_grp_id); 9579 if (lun && lun->lun_devid) 9580 data_len += lun->lun_devid->len; 9581 if (port && port->port_devid) 9582 data_len += port->port_devid->len; 9583 if (port && port->target_devid) 9584 data_len += port->target_devid->len; 9585 9586 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9587 devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr; 9588 ctsio->kern_sg_entries = 0; 9589 9590 if (data_len < alloc_len) { 9591 ctsio->residual = alloc_len - data_len; 9592 ctsio->kern_data_len = data_len; 9593 ctsio->kern_total_len = data_len; 9594 } else { 9595 ctsio->residual = 0; 9596 ctsio->kern_data_len = alloc_len; 9597 ctsio->kern_total_len = alloc_len; 9598 } 9599 ctsio->kern_data_resid = 0; 9600 ctsio->kern_rel_offset = 0; 9601 ctsio->kern_sg_entries = 0; 9602 9603 /* 9604 * The control device is always connected. The disk device, on the 9605 * other hand, may not be online all the time. 9606 */ 9607 if (lun != NULL) 9608 devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9609 lun->be_lun->lun_type; 9610 else 9611 devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9612 devid_ptr->page_code = SVPD_DEVICE_ID; 9613 scsi_ulto2b(data_len - 4, devid_ptr->length); 9614 9615 if (port && port->port_type == CTL_PORT_FC) 9616 proto = SCSI_PROTO_FC << 4; 9617 else if (port && port->port_type == CTL_PORT_ISCSI) 9618 proto = SCSI_PROTO_ISCSI << 4; 9619 else 9620 proto = SCSI_PROTO_SPI << 4; 9621 desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list; 9622 9623 /* 9624 * We're using a LUN association here. i.e., this device ID is a 9625 * per-LUN identifier. 9626 */ 9627 if (lun && lun->lun_devid) { 9628 memcpy(desc, lun->lun_devid->data, lun->lun_devid->len); 9629 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9630 lun->lun_devid->len); 9631 } 9632 9633 /* 9634 * This is for the WWPN which is a port association. 9635 */ 9636 if (port && port->port_devid) { 9637 memcpy(desc, port->port_devid->data, port->port_devid->len); 9638 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9639 port->port_devid->len); 9640 } 9641 9642 /* 9643 * This is for the Relative Target Port(type 4h) identifier 9644 */ 9645 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9646 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9647 SVPD_ID_TYPE_RELTARG; 9648 desc->length = 4; 9649 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port, &desc->identifier[2]); 9650 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9651 sizeof(struct scsi_vpd_id_rel_trgt_port_id)); 9652 9653 /* 9654 * This is for the Target Port Group(type 5h) identifier 9655 */ 9656 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9657 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9658 SVPD_ID_TYPE_TPORTGRP; 9659 desc->length = 4; 9660 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port / softc->port_cnt + 1, 9661 &desc->identifier[2]); 9662 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9663 sizeof(struct scsi_vpd_id_trgt_port_grp_id)); 9664 9665 /* 9666 * This is for the Target identifier 9667 */ 9668 if (port && port->target_devid) { 9669 memcpy(desc, port->target_devid->data, port->target_devid->len); 9670 } 9671 9672 ctl_set_success(ctsio); 9673 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9674 ctsio->be_move_done = ctl_config_move_done; 9675 ctl_datamove((union ctl_io *)ctsio); 9676 return (CTL_RETVAL_COMPLETE); 9677 } 9678 9679 static int 9680 ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len) 9681 { 9682 struct ctl_softc *softc = control_softc; 9683 struct scsi_vpd_scsi_ports *sp; 9684 struct scsi_vpd_port_designation *pd; 9685 struct scsi_vpd_port_designation_cont *pdc; 9686 struct ctl_lun *lun; 9687 struct ctl_port *port; 9688 int data_len, num_target_ports, iid_len, id_len; 9689 9690 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9691 9692 num_target_ports = 0; 9693 iid_len = 0; 9694 id_len = 0; 9695 mtx_lock(&softc->ctl_lock); 9696 STAILQ_FOREACH(port, &softc->port_list, links) { 9697 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 9698 continue; 9699 if (lun != NULL && 9700 ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 9701 continue; 9702 num_target_ports++; 9703 if (port->init_devid) 9704 iid_len += port->init_devid->len; 9705 if (port->port_devid) 9706 id_len += port->port_devid->len; 9707 } 9708 mtx_unlock(&softc->ctl_lock); 9709 9710 data_len = sizeof(struct scsi_vpd_scsi_ports) + 9711 num_target_ports * (sizeof(struct scsi_vpd_port_designation) + 9712 sizeof(struct scsi_vpd_port_designation_cont)) + iid_len + id_len; 9713 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9714 sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr; 9715 ctsio->kern_sg_entries = 0; 9716 9717 if (data_len < alloc_len) { 9718 ctsio->residual = alloc_len - data_len; 9719 ctsio->kern_data_len = data_len; 9720 ctsio->kern_total_len = data_len; 9721 } else { 9722 ctsio->residual = 0; 9723 ctsio->kern_data_len = alloc_len; 9724 ctsio->kern_total_len = alloc_len; 9725 } 9726 ctsio->kern_data_resid = 0; 9727 ctsio->kern_rel_offset = 0; 9728 ctsio->kern_sg_entries = 0; 9729 9730 /* 9731 * The control device is always connected. The disk device, on the 9732 * other hand, may not be online all the time. Need to change this 9733 * to figure out whether the disk device is actually online or not. 9734 */ 9735 if (lun != NULL) 9736 sp->device = (SID_QUAL_LU_CONNECTED << 5) | 9737 lun->be_lun->lun_type; 9738 else 9739 sp->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9740 9741 sp->page_code = SVPD_SCSI_PORTS; 9742 scsi_ulto2b(data_len - sizeof(struct scsi_vpd_scsi_ports), 9743 sp->page_length); 9744 pd = &sp->design[0]; 9745 9746 mtx_lock(&softc->ctl_lock); 9747 STAILQ_FOREACH(port, &softc->port_list, links) { 9748 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 9749 continue; 9750 if (lun != NULL && 9751 ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 9752 continue; 9753 scsi_ulto2b(port->targ_port, pd->relative_port_id); 9754 if (port->init_devid) { 9755 iid_len = port->init_devid->len; 9756 memcpy(pd->initiator_transportid, 9757 port->init_devid->data, port->init_devid->len); 9758 } else 9759 iid_len = 0; 9760 scsi_ulto2b(iid_len, pd->initiator_transportid_length); 9761 pdc = (struct scsi_vpd_port_designation_cont *) 9762 (&pd->initiator_transportid[iid_len]); 9763 if (port->port_devid) { 9764 id_len = port->port_devid->len; 9765 memcpy(pdc->target_port_descriptors, 9766 port->port_devid->data, port->port_devid->len); 9767 } else 9768 id_len = 0; 9769 scsi_ulto2b(id_len, pdc->target_port_descriptors_length); 9770 pd = (struct scsi_vpd_port_designation *) 9771 ((uint8_t *)pdc->target_port_descriptors + id_len); 9772 } 9773 mtx_unlock(&softc->ctl_lock); 9774 9775 ctl_set_success(ctsio); 9776 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9777 ctsio->be_move_done = ctl_config_move_done; 9778 ctl_datamove((union ctl_io *)ctsio); 9779 return (CTL_RETVAL_COMPLETE); 9780 } 9781 9782 static int 9783 ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len) 9784 { 9785 struct scsi_vpd_block_limits *bl_ptr; 9786 struct ctl_lun *lun; 9787 int bs; 9788 9789 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9790 9791 ctsio->kern_data_ptr = malloc(sizeof(*bl_ptr), M_CTL, M_WAITOK | M_ZERO); 9792 bl_ptr = (struct scsi_vpd_block_limits *)ctsio->kern_data_ptr; 9793 ctsio->kern_sg_entries = 0; 9794 9795 if (sizeof(*bl_ptr) < alloc_len) { 9796 ctsio->residual = alloc_len - sizeof(*bl_ptr); 9797 ctsio->kern_data_len = sizeof(*bl_ptr); 9798 ctsio->kern_total_len = sizeof(*bl_ptr); 9799 } else { 9800 ctsio->residual = 0; 9801 ctsio->kern_data_len = alloc_len; 9802 ctsio->kern_total_len = alloc_len; 9803 } 9804 ctsio->kern_data_resid = 0; 9805 ctsio->kern_rel_offset = 0; 9806 ctsio->kern_sg_entries = 0; 9807 9808 /* 9809 * The control device is always connected. The disk device, on the 9810 * other hand, may not be online all the time. Need to change this 9811 * to figure out whether the disk device is actually online or not. 9812 */ 9813 if (lun != NULL) 9814 bl_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9815 lun->be_lun->lun_type; 9816 else 9817 bl_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9818 9819 bl_ptr->page_code = SVPD_BLOCK_LIMITS; 9820 scsi_ulto2b(sizeof(*bl_ptr) - 4, bl_ptr->page_length); 9821 bl_ptr->max_cmp_write_len = 0xff; 9822 scsi_ulto4b(0xffffffff, bl_ptr->max_txfer_len); 9823 if (lun != NULL) { 9824 bs = lun->be_lun->blocksize; 9825 scsi_ulto4b(lun->be_lun->opttxferlen, bl_ptr->opt_txfer_len); 9826 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 9827 scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_lba_cnt); 9828 scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_blk_cnt); 9829 if (lun->be_lun->ublockexp != 0) { 9830 scsi_ulto4b((1 << lun->be_lun->ublockexp), 9831 bl_ptr->opt_unmap_grain); 9832 scsi_ulto4b(0x80000000 | lun->be_lun->ublockoff, 9833 bl_ptr->unmap_grain_align); 9834 } 9835 } 9836 scsi_ulto4b(lun->be_lun->atomicblock, 9837 bl_ptr->max_atomic_transfer_length); 9838 scsi_ulto4b(0, bl_ptr->atomic_alignment); 9839 scsi_ulto4b(0, bl_ptr->atomic_transfer_length_granularity); 9840 } 9841 scsi_u64to8b(UINT64_MAX, bl_ptr->max_write_same_length); 9842 9843 ctl_set_success(ctsio); 9844 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9845 ctsio->be_move_done = ctl_config_move_done; 9846 ctl_datamove((union ctl_io *)ctsio); 9847 return (CTL_RETVAL_COMPLETE); 9848 } 9849 9850 static int 9851 ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len) 9852 { 9853 struct scsi_vpd_block_device_characteristics *bdc_ptr; 9854 struct ctl_lun *lun; 9855 const char *value; 9856 u_int i; 9857 9858 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9859 9860 ctsio->kern_data_ptr = malloc(sizeof(*bdc_ptr), M_CTL, M_WAITOK | M_ZERO); 9861 bdc_ptr = (struct scsi_vpd_block_device_characteristics *)ctsio->kern_data_ptr; 9862 ctsio->kern_sg_entries = 0; 9863 9864 if (sizeof(*bdc_ptr) < alloc_len) { 9865 ctsio->residual = alloc_len - sizeof(*bdc_ptr); 9866 ctsio->kern_data_len = sizeof(*bdc_ptr); 9867 ctsio->kern_total_len = sizeof(*bdc_ptr); 9868 } else { 9869 ctsio->residual = 0; 9870 ctsio->kern_data_len = alloc_len; 9871 ctsio->kern_total_len = alloc_len; 9872 } 9873 ctsio->kern_data_resid = 0; 9874 ctsio->kern_rel_offset = 0; 9875 ctsio->kern_sg_entries = 0; 9876 9877 /* 9878 * The control device is always connected. The disk device, on the 9879 * other hand, may not be online all the time. Need to change this 9880 * to figure out whether the disk device is actually online or not. 9881 */ 9882 if (lun != NULL) 9883 bdc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9884 lun->be_lun->lun_type; 9885 else 9886 bdc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9887 bdc_ptr->page_code = SVPD_BDC; 9888 scsi_ulto2b(sizeof(*bdc_ptr) - 4, bdc_ptr->page_length); 9889 if (lun != NULL && 9890 (value = ctl_get_opt(&lun->be_lun->options, "rpm")) != NULL) 9891 i = strtol(value, NULL, 0); 9892 else 9893 i = CTL_DEFAULT_ROTATION_RATE; 9894 scsi_ulto2b(i, bdc_ptr->medium_rotation_rate); 9895 if (lun != NULL && 9896 (value = ctl_get_opt(&lun->be_lun->options, "formfactor")) != NULL) 9897 i = strtol(value, NULL, 0); 9898 else 9899 i = 0; 9900 bdc_ptr->wab_wac_ff = (i & 0x0f); 9901 bdc_ptr->flags = SVPD_FUAB | SVPD_VBULS; 9902 9903 ctl_set_success(ctsio); 9904 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9905 ctsio->be_move_done = ctl_config_move_done; 9906 ctl_datamove((union ctl_io *)ctsio); 9907 return (CTL_RETVAL_COMPLETE); 9908 } 9909 9910 static int 9911 ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len) 9912 { 9913 struct scsi_vpd_logical_block_prov *lbp_ptr; 9914 struct ctl_lun *lun; 9915 9916 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9917 9918 ctsio->kern_data_ptr = malloc(sizeof(*lbp_ptr), M_CTL, M_WAITOK | M_ZERO); 9919 lbp_ptr = (struct scsi_vpd_logical_block_prov *)ctsio->kern_data_ptr; 9920 ctsio->kern_sg_entries = 0; 9921 9922 if (sizeof(*lbp_ptr) < alloc_len) { 9923 ctsio->residual = alloc_len - sizeof(*lbp_ptr); 9924 ctsio->kern_data_len = sizeof(*lbp_ptr); 9925 ctsio->kern_total_len = sizeof(*lbp_ptr); 9926 } else { 9927 ctsio->residual = 0; 9928 ctsio->kern_data_len = alloc_len; 9929 ctsio->kern_total_len = alloc_len; 9930 } 9931 ctsio->kern_data_resid = 0; 9932 ctsio->kern_rel_offset = 0; 9933 ctsio->kern_sg_entries = 0; 9934 9935 /* 9936 * The control device is always connected. The disk device, on the 9937 * other hand, may not be online all the time. Need to change this 9938 * to figure out whether the disk device is actually online or not. 9939 */ 9940 if (lun != NULL) 9941 lbp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9942 lun->be_lun->lun_type; 9943 else 9944 lbp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9945 9946 lbp_ptr->page_code = SVPD_LBP; 9947 scsi_ulto2b(sizeof(*lbp_ptr) - 4, lbp_ptr->page_length); 9948 lbp_ptr->threshold_exponent = CTL_LBP_EXPONENT; 9949 if (lun != NULL && lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 9950 lbp_ptr->flags = SVPD_LBP_UNMAP | SVPD_LBP_WS16 | 9951 SVPD_LBP_WS10 | SVPD_LBP_RZ | SVPD_LBP_ANC_SUP; 9952 lbp_ptr->prov_type = SVPD_LBP_THIN; 9953 } 9954 9955 ctl_set_success(ctsio); 9956 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9957 ctsio->be_move_done = ctl_config_move_done; 9958 ctl_datamove((union ctl_io *)ctsio); 9959 return (CTL_RETVAL_COMPLETE); 9960 } 9961 9962 /* 9963 * INQUIRY with the EVPD bit set. 9964 */ 9965 static int 9966 ctl_inquiry_evpd(struct ctl_scsiio *ctsio) 9967 { 9968 struct ctl_lun *lun; 9969 struct scsi_inquiry *cdb; 9970 int alloc_len, retval; 9971 9972 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9973 cdb = (struct scsi_inquiry *)ctsio->cdb; 9974 alloc_len = scsi_2btoul(cdb->length); 9975 9976 switch (cdb->page_code) { 9977 case SVPD_SUPPORTED_PAGES: 9978 retval = ctl_inquiry_evpd_supported(ctsio, alloc_len); 9979 break; 9980 case SVPD_UNIT_SERIAL_NUMBER: 9981 retval = ctl_inquiry_evpd_serial(ctsio, alloc_len); 9982 break; 9983 case SVPD_DEVICE_ID: 9984 retval = ctl_inquiry_evpd_devid(ctsio, alloc_len); 9985 break; 9986 case SVPD_EXTENDED_INQUIRY_DATA: 9987 retval = ctl_inquiry_evpd_eid(ctsio, alloc_len); 9988 break; 9989 case SVPD_MODE_PAGE_POLICY: 9990 retval = ctl_inquiry_evpd_mpp(ctsio, alloc_len); 9991 break; 9992 case SVPD_SCSI_PORTS: 9993 retval = ctl_inquiry_evpd_scsi_ports(ctsio, alloc_len); 9994 break; 9995 case SVPD_SCSI_TPC: 9996 retval = ctl_inquiry_evpd_tpc(ctsio, alloc_len); 9997 break; 9998 case SVPD_BLOCK_LIMITS: 9999 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 10000 goto err; 10001 retval = ctl_inquiry_evpd_block_limits(ctsio, alloc_len); 10002 break; 10003 case SVPD_BDC: 10004 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 10005 goto err; 10006 retval = ctl_inquiry_evpd_bdc(ctsio, alloc_len); 10007 break; 10008 case SVPD_LBP: 10009 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 10010 goto err; 10011 retval = ctl_inquiry_evpd_lbp(ctsio, alloc_len); 10012 break; 10013 default: 10014 err: 10015 ctl_set_invalid_field(ctsio, 10016 /*sks_valid*/ 1, 10017 /*command*/ 1, 10018 /*field*/ 2, 10019 /*bit_valid*/ 0, 10020 /*bit*/ 0); 10021 ctl_done((union ctl_io *)ctsio); 10022 retval = CTL_RETVAL_COMPLETE; 10023 break; 10024 } 10025 10026 return (retval); 10027 } 10028 10029 /* 10030 * Standard INQUIRY data. 10031 */ 10032 static int 10033 ctl_inquiry_std(struct ctl_scsiio *ctsio) 10034 { 10035 struct scsi_inquiry_data *inq_ptr; 10036 struct scsi_inquiry *cdb; 10037 struct ctl_softc *softc; 10038 struct ctl_port *port; 10039 struct ctl_lun *lun; 10040 char *val; 10041 uint32_t alloc_len, data_len; 10042 ctl_port_type port_type; 10043 10044 softc = control_softc; 10045 10046 /* 10047 * Figure out whether we're talking to a Fibre Channel port or not. 10048 * We treat the ioctl front end, and any SCSI adapters, as packetized 10049 * SCSI front ends. 10050 */ 10051 port = ctl_io_port(&ctsio->io_hdr); 10052 if (port != NULL) 10053 port_type = port->port_type; 10054 else 10055 port_type = CTL_PORT_SCSI; 10056 if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL) 10057 port_type = CTL_PORT_SCSI; 10058 10059 lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10060 cdb = (struct scsi_inquiry *)ctsio->cdb; 10061 alloc_len = scsi_2btoul(cdb->length); 10062 10063 /* 10064 * We malloc the full inquiry data size here and fill it 10065 * in. If the user only asks for less, we'll give him 10066 * that much. 10067 */ 10068 data_len = offsetof(struct scsi_inquiry_data, vendor_specific1); 10069 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10070 inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr; 10071 ctsio->kern_sg_entries = 0; 10072 ctsio->kern_data_resid = 0; 10073 ctsio->kern_rel_offset = 0; 10074 10075 if (data_len < alloc_len) { 10076 ctsio->residual = alloc_len - data_len; 10077 ctsio->kern_data_len = data_len; 10078 ctsio->kern_total_len = data_len; 10079 } else { 10080 ctsio->residual = 0; 10081 ctsio->kern_data_len = alloc_len; 10082 ctsio->kern_total_len = alloc_len; 10083 } 10084 10085 if (lun != NULL) { 10086 if ((lun->flags & CTL_LUN_PRIMARY_SC) || 10087 softc->ha_link >= CTL_HA_LINK_UNKNOWN) { 10088 inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10089 lun->be_lun->lun_type; 10090 } else { 10091 inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | 10092 lun->be_lun->lun_type; 10093 } 10094 } else 10095 inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE; 10096 10097 /* RMB in byte 2 is 0 */ 10098 inq_ptr->version = SCSI_REV_SPC4; 10099 10100 /* 10101 * According to SAM-3, even if a device only supports a single 10102 * level of LUN addressing, it should still set the HISUP bit: 10103 * 10104 * 4.9.1 Logical unit numbers overview 10105 * 10106 * All logical unit number formats described in this standard are 10107 * hierarchical in structure even when only a single level in that 10108 * hierarchy is used. The HISUP bit shall be set to one in the 10109 * standard INQUIRY data (see SPC-2) when any logical unit number 10110 * format described in this standard is used. Non-hierarchical 10111 * formats are outside the scope of this standard. 10112 * 10113 * Therefore we set the HiSup bit here. 10114 * 10115 * The reponse format is 2, per SPC-3. 10116 */ 10117 inq_ptr->response_format = SID_HiSup | 2; 10118 10119 inq_ptr->additional_length = data_len - 10120 (offsetof(struct scsi_inquiry_data, additional_length) + 1); 10121 CTL_DEBUG_PRINT(("additional_length = %d\n", 10122 inq_ptr->additional_length)); 10123 10124 inq_ptr->spc3_flags = SPC3_SID_3PC | SPC3_SID_TPGS_IMPLICIT; 10125 /* 16 bit addressing */ 10126 if (port_type == CTL_PORT_SCSI) 10127 inq_ptr->spc2_flags = SPC2_SID_ADDR16; 10128 /* XXX set the SID_MultiP bit here if we're actually going to 10129 respond on multiple ports */ 10130 inq_ptr->spc2_flags |= SPC2_SID_MultiP; 10131 10132 /* 16 bit data bus, synchronous transfers */ 10133 if (port_type == CTL_PORT_SCSI) 10134 inq_ptr->flags = SID_WBus16 | SID_Sync; 10135 /* 10136 * XXX KDM do we want to support tagged queueing on the control 10137 * device at all? 10138 */ 10139 if ((lun == NULL) 10140 || (lun->be_lun->lun_type != T_PROCESSOR)) 10141 inq_ptr->flags |= SID_CmdQue; 10142 /* 10143 * Per SPC-3, unused bytes in ASCII strings are filled with spaces. 10144 * We have 8 bytes for the vendor name, and 16 bytes for the device 10145 * name and 4 bytes for the revision. 10146 */ 10147 if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options, 10148 "vendor")) == NULL) { 10149 strncpy(inq_ptr->vendor, CTL_VENDOR, sizeof(inq_ptr->vendor)); 10150 } else { 10151 memset(inq_ptr->vendor, ' ', sizeof(inq_ptr->vendor)); 10152 strncpy(inq_ptr->vendor, val, 10153 min(sizeof(inq_ptr->vendor), strlen(val))); 10154 } 10155 if (lun == NULL) { 10156 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10157 sizeof(inq_ptr->product)); 10158 } else if ((val = ctl_get_opt(&lun->be_lun->options, "product")) == NULL) { 10159 switch (lun->be_lun->lun_type) { 10160 case T_DIRECT: 10161 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10162 sizeof(inq_ptr->product)); 10163 break; 10164 case T_PROCESSOR: 10165 strncpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT, 10166 sizeof(inq_ptr->product)); 10167 break; 10168 default: 10169 strncpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT, 10170 sizeof(inq_ptr->product)); 10171 break; 10172 } 10173 } else { 10174 memset(inq_ptr->product, ' ', sizeof(inq_ptr->product)); 10175 strncpy(inq_ptr->product, val, 10176 min(sizeof(inq_ptr->product), strlen(val))); 10177 } 10178 10179 /* 10180 * XXX make this a macro somewhere so it automatically gets 10181 * incremented when we make changes. 10182 */ 10183 if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options, 10184 "revision")) == NULL) { 10185 strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision)); 10186 } else { 10187 memset(inq_ptr->revision, ' ', sizeof(inq_ptr->revision)); 10188 strncpy(inq_ptr->revision, val, 10189 min(sizeof(inq_ptr->revision), strlen(val))); 10190 } 10191 10192 /* 10193 * For parallel SCSI, we support double transition and single 10194 * transition clocking. We also support QAS (Quick Arbitration 10195 * and Selection) and Information Unit transfers on both the 10196 * control and array devices. 10197 */ 10198 if (port_type == CTL_PORT_SCSI) 10199 inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS | 10200 SID_SPI_IUS; 10201 10202 /* SAM-5 (no version claimed) */ 10203 scsi_ulto2b(0x00A0, inq_ptr->version1); 10204 /* SPC-4 (no version claimed) */ 10205 scsi_ulto2b(0x0460, inq_ptr->version2); 10206 if (port_type == CTL_PORT_FC) { 10207 /* FCP-2 ANSI INCITS.350:2003 */ 10208 scsi_ulto2b(0x0917, inq_ptr->version3); 10209 } else if (port_type == CTL_PORT_SCSI) { 10210 /* SPI-4 ANSI INCITS.362:200x */ 10211 scsi_ulto2b(0x0B56, inq_ptr->version3); 10212 } else if (port_type == CTL_PORT_ISCSI) { 10213 /* iSCSI (no version claimed) */ 10214 scsi_ulto2b(0x0960, inq_ptr->version3); 10215 } else if (port_type == CTL_PORT_SAS) { 10216 /* SAS (no version claimed) */ 10217 scsi_ulto2b(0x0BE0, inq_ptr->version3); 10218 } 10219 10220 if (lun == NULL) { 10221 /* SBC-4 (no version claimed) */ 10222 scsi_ulto2b(0x0600, inq_ptr->version4); 10223 } else { 10224 switch (lun->be_lun->lun_type) { 10225 case T_DIRECT: 10226 /* SBC-4 (no version claimed) */ 10227 scsi_ulto2b(0x0600, inq_ptr->version4); 10228 break; 10229 case T_PROCESSOR: 10230 default: 10231 break; 10232 } 10233 } 10234 10235 ctl_set_success(ctsio); 10236 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10237 ctsio->be_move_done = ctl_config_move_done; 10238 ctl_datamove((union ctl_io *)ctsio); 10239 return (CTL_RETVAL_COMPLETE); 10240 } 10241 10242 int 10243 ctl_inquiry(struct ctl_scsiio *ctsio) 10244 { 10245 struct scsi_inquiry *cdb; 10246 int retval; 10247 10248 CTL_DEBUG_PRINT(("ctl_inquiry\n")); 10249 10250 cdb = (struct scsi_inquiry *)ctsio->cdb; 10251 if (cdb->byte2 & SI_EVPD) 10252 retval = ctl_inquiry_evpd(ctsio); 10253 else if (cdb->page_code == 0) 10254 retval = ctl_inquiry_std(ctsio); 10255 else { 10256 ctl_set_invalid_field(ctsio, 10257 /*sks_valid*/ 1, 10258 /*command*/ 1, 10259 /*field*/ 2, 10260 /*bit_valid*/ 0, 10261 /*bit*/ 0); 10262 ctl_done((union ctl_io *)ctsio); 10263 return (CTL_RETVAL_COMPLETE); 10264 } 10265 10266 return (retval); 10267 } 10268 10269 /* 10270 * For known CDB types, parse the LBA and length. 10271 */ 10272 static int 10273 ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len) 10274 { 10275 if (io->io_hdr.io_type != CTL_IO_SCSI) 10276 return (1); 10277 10278 switch (io->scsiio.cdb[0]) { 10279 case COMPARE_AND_WRITE: { 10280 struct scsi_compare_and_write *cdb; 10281 10282 cdb = (struct scsi_compare_and_write *)io->scsiio.cdb; 10283 10284 *lba = scsi_8btou64(cdb->addr); 10285 *len = cdb->length; 10286 break; 10287 } 10288 case READ_6: 10289 case WRITE_6: { 10290 struct scsi_rw_6 *cdb; 10291 10292 cdb = (struct scsi_rw_6 *)io->scsiio.cdb; 10293 10294 *lba = scsi_3btoul(cdb->addr); 10295 /* only 5 bits are valid in the most significant address byte */ 10296 *lba &= 0x1fffff; 10297 *len = cdb->length; 10298 break; 10299 } 10300 case READ_10: 10301 case WRITE_10: { 10302 struct scsi_rw_10 *cdb; 10303 10304 cdb = (struct scsi_rw_10 *)io->scsiio.cdb; 10305 10306 *lba = scsi_4btoul(cdb->addr); 10307 *len = scsi_2btoul(cdb->length); 10308 break; 10309 } 10310 case WRITE_VERIFY_10: { 10311 struct scsi_write_verify_10 *cdb; 10312 10313 cdb = (struct scsi_write_verify_10 *)io->scsiio.cdb; 10314 10315 *lba = scsi_4btoul(cdb->addr); 10316 *len = scsi_2btoul(cdb->length); 10317 break; 10318 } 10319 case READ_12: 10320 case WRITE_12: { 10321 struct scsi_rw_12 *cdb; 10322 10323 cdb = (struct scsi_rw_12 *)io->scsiio.cdb; 10324 10325 *lba = scsi_4btoul(cdb->addr); 10326 *len = scsi_4btoul(cdb->length); 10327 break; 10328 } 10329 case WRITE_VERIFY_12: { 10330 struct scsi_write_verify_12 *cdb; 10331 10332 cdb = (struct scsi_write_verify_12 *)io->scsiio.cdb; 10333 10334 *lba = scsi_4btoul(cdb->addr); 10335 *len = scsi_4btoul(cdb->length); 10336 break; 10337 } 10338 case READ_16: 10339 case WRITE_16: 10340 case WRITE_ATOMIC_16: { 10341 struct scsi_rw_16 *cdb; 10342 10343 cdb = (struct scsi_rw_16 *)io->scsiio.cdb; 10344 10345 *lba = scsi_8btou64(cdb->addr); 10346 *len = scsi_4btoul(cdb->length); 10347 break; 10348 } 10349 case WRITE_VERIFY_16: { 10350 struct scsi_write_verify_16 *cdb; 10351 10352 cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb; 10353 10354 *lba = scsi_8btou64(cdb->addr); 10355 *len = scsi_4btoul(cdb->length); 10356 break; 10357 } 10358 case WRITE_SAME_10: { 10359 struct scsi_write_same_10 *cdb; 10360 10361 cdb = (struct scsi_write_same_10 *)io->scsiio.cdb; 10362 10363 *lba = scsi_4btoul(cdb->addr); 10364 *len = scsi_2btoul(cdb->length); 10365 break; 10366 } 10367 case WRITE_SAME_16: { 10368 struct scsi_write_same_16 *cdb; 10369 10370 cdb = (struct scsi_write_same_16 *)io->scsiio.cdb; 10371 10372 *lba = scsi_8btou64(cdb->addr); 10373 *len = scsi_4btoul(cdb->length); 10374 break; 10375 } 10376 case VERIFY_10: { 10377 struct scsi_verify_10 *cdb; 10378 10379 cdb = (struct scsi_verify_10 *)io->scsiio.cdb; 10380 10381 *lba = scsi_4btoul(cdb->addr); 10382 *len = scsi_2btoul(cdb->length); 10383 break; 10384 } 10385 case VERIFY_12: { 10386 struct scsi_verify_12 *cdb; 10387 10388 cdb = (struct scsi_verify_12 *)io->scsiio.cdb; 10389 10390 *lba = scsi_4btoul(cdb->addr); 10391 *len = scsi_4btoul(cdb->length); 10392 break; 10393 } 10394 case VERIFY_16: { 10395 struct scsi_verify_16 *cdb; 10396 10397 cdb = (struct scsi_verify_16 *)io->scsiio.cdb; 10398 10399 *lba = scsi_8btou64(cdb->addr); 10400 *len = scsi_4btoul(cdb->length); 10401 break; 10402 } 10403 case UNMAP: { 10404 *lba = 0; 10405 *len = UINT64_MAX; 10406 break; 10407 } 10408 case SERVICE_ACTION_IN: { /* GET LBA STATUS */ 10409 struct scsi_get_lba_status *cdb; 10410 10411 cdb = (struct scsi_get_lba_status *)io->scsiio.cdb; 10412 *lba = scsi_8btou64(cdb->addr); 10413 *len = UINT32_MAX; 10414 break; 10415 } 10416 default: 10417 return (1); 10418 break; /* NOTREACHED */ 10419 } 10420 10421 return (0); 10422 } 10423 10424 static ctl_action 10425 ctl_extent_check_lba(uint64_t lba1, uint64_t len1, uint64_t lba2, uint64_t len2, 10426 bool seq) 10427 { 10428 uint64_t endlba1, endlba2; 10429 10430 endlba1 = lba1 + len1 - (seq ? 0 : 1); 10431 endlba2 = lba2 + len2 - 1; 10432 10433 if ((endlba1 < lba2) || (endlba2 < lba1)) 10434 return (CTL_ACTION_PASS); 10435 else 10436 return (CTL_ACTION_BLOCK); 10437 } 10438 10439 static int 10440 ctl_extent_check_unmap(union ctl_io *io, uint64_t lba2, uint64_t len2) 10441 { 10442 struct ctl_ptr_len_flags *ptrlen; 10443 struct scsi_unmap_desc *buf, *end, *range; 10444 uint64_t lba; 10445 uint32_t len; 10446 10447 /* If not UNMAP -- go other way. */ 10448 if (io->io_hdr.io_type != CTL_IO_SCSI || 10449 io->scsiio.cdb[0] != UNMAP) 10450 return (CTL_ACTION_ERROR); 10451 10452 /* If UNMAP without data -- block and wait for data. */ 10453 ptrlen = (struct ctl_ptr_len_flags *) 10454 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 10455 if ((io->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0 || 10456 ptrlen->ptr == NULL) 10457 return (CTL_ACTION_BLOCK); 10458 10459 /* UNMAP with data -- check for collision. */ 10460 buf = (struct scsi_unmap_desc *)ptrlen->ptr; 10461 end = buf + ptrlen->len / sizeof(*buf); 10462 for (range = buf; range < end; range++) { 10463 lba = scsi_8btou64(range->lba); 10464 len = scsi_4btoul(range->length); 10465 if ((lba < lba2 + len2) && (lba + len > lba2)) 10466 return (CTL_ACTION_BLOCK); 10467 } 10468 return (CTL_ACTION_PASS); 10469 } 10470 10471 static ctl_action 10472 ctl_extent_check(union ctl_io *io1, union ctl_io *io2, bool seq) 10473 { 10474 uint64_t lba1, lba2; 10475 uint64_t len1, len2; 10476 int retval; 10477 10478 if (ctl_get_lba_len(io2, &lba2, &len2) != 0) 10479 return (CTL_ACTION_ERROR); 10480 10481 retval = ctl_extent_check_unmap(io1, lba2, len2); 10482 if (retval != CTL_ACTION_ERROR) 10483 return (retval); 10484 10485 if (ctl_get_lba_len(io1, &lba1, &len1) != 0) 10486 return (CTL_ACTION_ERROR); 10487 10488 return (ctl_extent_check_lba(lba1, len1, lba2, len2, seq)); 10489 } 10490 10491 static ctl_action 10492 ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2) 10493 { 10494 uint64_t lba1, lba2; 10495 uint64_t len1, len2; 10496 10497 if (ctl_get_lba_len(io1, &lba1, &len1) != 0) 10498 return (CTL_ACTION_ERROR); 10499 if (ctl_get_lba_len(io2, &lba2, &len2) != 0) 10500 return (CTL_ACTION_ERROR); 10501 10502 if (lba1 + len1 == lba2) 10503 return (CTL_ACTION_BLOCK); 10504 return (CTL_ACTION_PASS); 10505 } 10506 10507 static ctl_action 10508 ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io, 10509 union ctl_io *ooa_io) 10510 { 10511 const struct ctl_cmd_entry *pending_entry, *ooa_entry; 10512 ctl_serialize_action *serialize_row; 10513 10514 /* 10515 * The initiator attempted multiple untagged commands at the same 10516 * time. Can't do that. 10517 */ 10518 if ((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10519 && (ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10520 && ((pending_io->io_hdr.nexus.targ_port == 10521 ooa_io->io_hdr.nexus.targ_port) 10522 && (pending_io->io_hdr.nexus.initid == 10523 ooa_io->io_hdr.nexus.initid)) 10524 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | 10525 CTL_FLAG_STATUS_SENT)) == 0)) 10526 return (CTL_ACTION_OVERLAP); 10527 10528 /* 10529 * The initiator attempted to send multiple tagged commands with 10530 * the same ID. (It's fine if different initiators have the same 10531 * tag ID.) 10532 * 10533 * Even if all of those conditions are true, we don't kill the I/O 10534 * if the command ahead of us has been aborted. We won't end up 10535 * sending it to the FETD, and it's perfectly legal to resend a 10536 * command with the same tag number as long as the previous 10537 * instance of this tag number has been aborted somehow. 10538 */ 10539 if ((pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 10540 && (ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 10541 && (pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num) 10542 && ((pending_io->io_hdr.nexus.targ_port == 10543 ooa_io->io_hdr.nexus.targ_port) 10544 && (pending_io->io_hdr.nexus.initid == 10545 ooa_io->io_hdr.nexus.initid)) 10546 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | 10547 CTL_FLAG_STATUS_SENT)) == 0)) 10548 return (CTL_ACTION_OVERLAP_TAG); 10549 10550 /* 10551 * If we get a head of queue tag, SAM-3 says that we should 10552 * immediately execute it. 10553 * 10554 * What happens if this command would normally block for some other 10555 * reason? e.g. a request sense with a head of queue tag 10556 * immediately after a write. Normally that would block, but this 10557 * will result in its getting executed immediately... 10558 * 10559 * We currently return "pass" instead of "skip", so we'll end up 10560 * going through the rest of the queue to check for overlapped tags. 10561 * 10562 * XXX KDM check for other types of blockage first?? 10563 */ 10564 if (pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 10565 return (CTL_ACTION_PASS); 10566 10567 /* 10568 * Ordered tags have to block until all items ahead of them 10569 * have completed. If we get called with an ordered tag, we always 10570 * block, if something else is ahead of us in the queue. 10571 */ 10572 if (pending_io->scsiio.tag_type == CTL_TAG_ORDERED) 10573 return (CTL_ACTION_BLOCK); 10574 10575 /* 10576 * Simple tags get blocked until all head of queue and ordered tags 10577 * ahead of them have completed. I'm lumping untagged commands in 10578 * with simple tags here. XXX KDM is that the right thing to do? 10579 */ 10580 if (((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10581 || (pending_io->scsiio.tag_type == CTL_TAG_SIMPLE)) 10582 && ((ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 10583 || (ooa_io->scsiio.tag_type == CTL_TAG_ORDERED))) 10584 return (CTL_ACTION_BLOCK); 10585 10586 pending_entry = ctl_get_cmd_entry(&pending_io->scsiio, NULL); 10587 ooa_entry = ctl_get_cmd_entry(&ooa_io->scsiio, NULL); 10588 10589 serialize_row = ctl_serialize_table[ooa_entry->seridx]; 10590 10591 switch (serialize_row[pending_entry->seridx]) { 10592 case CTL_SER_BLOCK: 10593 return (CTL_ACTION_BLOCK); 10594 case CTL_SER_EXTENT: 10595 return (ctl_extent_check(ooa_io, pending_io, 10596 (lun->be_lun && lun->be_lun->serseq == CTL_LUN_SERSEQ_ON))); 10597 case CTL_SER_EXTENTOPT: 10598 if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT].queue_flags 10599 & SCP_QUEUE_ALG_MASK) != SCP_QUEUE_ALG_UNRESTRICTED) 10600 return (ctl_extent_check(ooa_io, pending_io, 10601 (lun->be_lun && 10602 lun->be_lun->serseq == CTL_LUN_SERSEQ_ON))); 10603 return (CTL_ACTION_PASS); 10604 case CTL_SER_EXTENTSEQ: 10605 if (lun->be_lun && lun->be_lun->serseq != CTL_LUN_SERSEQ_OFF) 10606 return (ctl_extent_check_seq(ooa_io, pending_io)); 10607 return (CTL_ACTION_PASS); 10608 case CTL_SER_PASS: 10609 return (CTL_ACTION_PASS); 10610 case CTL_SER_BLOCKOPT: 10611 if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT].queue_flags 10612 & SCP_QUEUE_ALG_MASK) != SCP_QUEUE_ALG_UNRESTRICTED) 10613 return (CTL_ACTION_BLOCK); 10614 return (CTL_ACTION_PASS); 10615 case CTL_SER_SKIP: 10616 return (CTL_ACTION_SKIP); 10617 default: 10618 panic("invalid serialization value %d", 10619 serialize_row[pending_entry->seridx]); 10620 } 10621 10622 return (CTL_ACTION_ERROR); 10623 } 10624 10625 /* 10626 * Check for blockage or overlaps against the OOA (Order Of Arrival) queue. 10627 * Assumptions: 10628 * - pending_io is generally either incoming, or on the blocked queue 10629 * - starting I/O is the I/O we want to start the check with. 10630 */ 10631 static ctl_action 10632 ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 10633 union ctl_io *starting_io) 10634 { 10635 union ctl_io *ooa_io; 10636 ctl_action action; 10637 10638 mtx_assert(&lun->lun_lock, MA_OWNED); 10639 10640 /* 10641 * Run back along the OOA queue, starting with the current 10642 * blocked I/O and going through every I/O before it on the 10643 * queue. If starting_io is NULL, we'll just end up returning 10644 * CTL_ACTION_PASS. 10645 */ 10646 for (ooa_io = starting_io; ooa_io != NULL; 10647 ooa_io = (union ctl_io *)TAILQ_PREV(&ooa_io->io_hdr, ctl_ooaq, 10648 ooa_links)){ 10649 10650 /* 10651 * This routine just checks to see whether 10652 * cur_blocked is blocked by ooa_io, which is ahead 10653 * of it in the queue. It doesn't queue/dequeue 10654 * cur_blocked. 10655 */ 10656 action = ctl_check_for_blockage(lun, pending_io, ooa_io); 10657 switch (action) { 10658 case CTL_ACTION_BLOCK: 10659 case CTL_ACTION_OVERLAP: 10660 case CTL_ACTION_OVERLAP_TAG: 10661 case CTL_ACTION_SKIP: 10662 case CTL_ACTION_ERROR: 10663 return (action); 10664 break; /* NOTREACHED */ 10665 case CTL_ACTION_PASS: 10666 break; 10667 default: 10668 panic("invalid action %d", action); 10669 break; /* NOTREACHED */ 10670 } 10671 } 10672 10673 return (CTL_ACTION_PASS); 10674 } 10675 10676 /* 10677 * Assumptions: 10678 * - An I/O has just completed, and has been removed from the per-LUN OOA 10679 * queue, so some items on the blocked queue may now be unblocked. 10680 */ 10681 static int 10682 ctl_check_blocked(struct ctl_lun *lun) 10683 { 10684 struct ctl_softc *softc = lun->ctl_softc; 10685 union ctl_io *cur_blocked, *next_blocked; 10686 10687 mtx_assert(&lun->lun_lock, MA_OWNED); 10688 10689 /* 10690 * Run forward from the head of the blocked queue, checking each 10691 * entry against the I/Os prior to it on the OOA queue to see if 10692 * there is still any blockage. 10693 * 10694 * We cannot use the TAILQ_FOREACH() macro, because it can't deal 10695 * with our removing a variable on it while it is traversing the 10696 * list. 10697 */ 10698 for (cur_blocked = (union ctl_io *)TAILQ_FIRST(&lun->blocked_queue); 10699 cur_blocked != NULL; cur_blocked = next_blocked) { 10700 union ctl_io *prev_ooa; 10701 ctl_action action; 10702 10703 next_blocked = (union ctl_io *)TAILQ_NEXT(&cur_blocked->io_hdr, 10704 blocked_links); 10705 10706 prev_ooa = (union ctl_io *)TAILQ_PREV(&cur_blocked->io_hdr, 10707 ctl_ooaq, ooa_links); 10708 10709 /* 10710 * If cur_blocked happens to be the first item in the OOA 10711 * queue now, prev_ooa will be NULL, and the action 10712 * returned will just be CTL_ACTION_PASS. 10713 */ 10714 action = ctl_check_ooa(lun, cur_blocked, prev_ooa); 10715 10716 switch (action) { 10717 case CTL_ACTION_BLOCK: 10718 /* Nothing to do here, still blocked */ 10719 break; 10720 case CTL_ACTION_OVERLAP: 10721 case CTL_ACTION_OVERLAP_TAG: 10722 /* 10723 * This shouldn't happen! In theory we've already 10724 * checked this command for overlap... 10725 */ 10726 break; 10727 case CTL_ACTION_PASS: 10728 case CTL_ACTION_SKIP: { 10729 const struct ctl_cmd_entry *entry; 10730 10731 /* 10732 * The skip case shouldn't happen, this transaction 10733 * should have never made it onto the blocked queue. 10734 */ 10735 /* 10736 * This I/O is no longer blocked, we can remove it 10737 * from the blocked queue. Since this is a TAILQ 10738 * (doubly linked list), we can do O(1) removals 10739 * from any place on the list. 10740 */ 10741 TAILQ_REMOVE(&lun->blocked_queue, &cur_blocked->io_hdr, 10742 blocked_links); 10743 cur_blocked->io_hdr.flags &= ~CTL_FLAG_BLOCKED; 10744 10745 if ((softc->ha_mode != CTL_HA_MODE_XFER) && 10746 (cur_blocked->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)){ 10747 /* 10748 * Need to send IO back to original side to 10749 * run 10750 */ 10751 union ctl_ha_msg msg_info; 10752 10753 cur_blocked->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 10754 msg_info.hdr.original_sc = 10755 cur_blocked->io_hdr.original_sc; 10756 msg_info.hdr.serializing_sc = cur_blocked; 10757 msg_info.hdr.msg_type = CTL_MSG_R2R; 10758 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 10759 sizeof(msg_info.hdr), M_NOWAIT); 10760 break; 10761 } 10762 entry = ctl_get_cmd_entry(&cur_blocked->scsiio, NULL); 10763 10764 /* 10765 * Check this I/O for LUN state changes that may 10766 * have happened while this command was blocked. 10767 * The LUN state may have been changed by a command 10768 * ahead of us in the queue, so we need to re-check 10769 * for any states that can be caused by SCSI 10770 * commands. 10771 */ 10772 if (ctl_scsiio_lun_check(lun, entry, 10773 &cur_blocked->scsiio) == 0) { 10774 cur_blocked->io_hdr.flags |= 10775 CTL_FLAG_IS_WAS_ON_RTR; 10776 ctl_enqueue_rtr(cur_blocked); 10777 } else 10778 ctl_done(cur_blocked); 10779 break; 10780 } 10781 default: 10782 /* 10783 * This probably shouldn't happen -- we shouldn't 10784 * get CTL_ACTION_ERROR, or anything else. 10785 */ 10786 break; 10787 } 10788 } 10789 10790 return (CTL_RETVAL_COMPLETE); 10791 } 10792 10793 /* 10794 * This routine (with one exception) checks LUN flags that can be set by 10795 * commands ahead of us in the OOA queue. These flags have to be checked 10796 * when a command initially comes in, and when we pull a command off the 10797 * blocked queue and are preparing to execute it. The reason we have to 10798 * check these flags for commands on the blocked queue is that the LUN 10799 * state may have been changed by a command ahead of us while we're on the 10800 * blocked queue. 10801 * 10802 * Ordering is somewhat important with these checks, so please pay 10803 * careful attention to the placement of any new checks. 10804 */ 10805 static int 10806 ctl_scsiio_lun_check(struct ctl_lun *lun, 10807 const struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio) 10808 { 10809 struct ctl_softc *softc = lun->ctl_softc; 10810 int retval; 10811 uint32_t residx; 10812 10813 retval = 0; 10814 10815 mtx_assert(&lun->lun_lock, MA_OWNED); 10816 10817 /* 10818 * If this shelf is a secondary shelf controller, we may have to 10819 * reject some commands disallowed by HA mode and link state. 10820 */ 10821 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) { 10822 if (softc->ha_link == CTL_HA_LINK_OFFLINE && 10823 (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { 10824 ctl_set_lun_unavail(ctsio); 10825 retval = 1; 10826 goto bailout; 10827 } 10828 if ((lun->flags & CTL_LUN_PEER_SC_PRIMARY) == 0 && 10829 (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { 10830 ctl_set_lun_transit(ctsio); 10831 retval = 1; 10832 goto bailout; 10833 } 10834 if (softc->ha_mode == CTL_HA_MODE_ACT_STBY && 10835 (entry->flags & CTL_CMD_FLAG_OK_ON_STANDBY) == 0) { 10836 ctl_set_lun_standby(ctsio); 10837 retval = 1; 10838 goto bailout; 10839 } 10840 10841 /* The rest of checks are only done on executing side */ 10842 if (softc->ha_mode == CTL_HA_MODE_XFER) 10843 goto bailout; 10844 } 10845 10846 if (entry->pattern & CTL_LUN_PAT_WRITE) { 10847 if (lun->be_lun && 10848 lun->be_lun->flags & CTL_LUN_FLAG_READONLY) { 10849 ctl_set_sense(ctsio, /*current_error*/ 1, 10850 /*sense_key*/ SSD_KEY_DATA_PROTECT, 10851 /*asc*/ 0x27, /*ascq*/ 0x01, SSD_ELEM_NONE); 10852 retval = 1; 10853 goto bailout; 10854 } 10855 if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT] 10856 .eca_and_aen & SCP_SWP) != 0) { 10857 ctl_set_sense(ctsio, /*current_error*/ 1, 10858 /*sense_key*/ SSD_KEY_DATA_PROTECT, 10859 /*asc*/ 0x27, /*ascq*/ 0x02, SSD_ELEM_NONE); 10860 retval = 1; 10861 goto bailout; 10862 } 10863 } 10864 10865 /* 10866 * Check for a reservation conflict. If this command isn't allowed 10867 * even on reserved LUNs, and if this initiator isn't the one who 10868 * reserved us, reject the command with a reservation conflict. 10869 */ 10870 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 10871 if ((lun->flags & CTL_LUN_RESERVED) 10872 && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) { 10873 if (lun->res_idx != residx) { 10874 ctl_set_reservation_conflict(ctsio); 10875 retval = 1; 10876 goto bailout; 10877 } 10878 } 10879 10880 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0 || 10881 (entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV)) { 10882 /* No reservation or command is allowed. */; 10883 } else if ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_WRESV) && 10884 (lun->res_type == SPR_TYPE_WR_EX || 10885 lun->res_type == SPR_TYPE_WR_EX_RO || 10886 lun->res_type == SPR_TYPE_WR_EX_AR)) { 10887 /* The command is allowed for Write Exclusive resv. */; 10888 } else { 10889 /* 10890 * if we aren't registered or it's a res holder type 10891 * reservation and this isn't the res holder then set a 10892 * conflict. 10893 */ 10894 if (ctl_get_prkey(lun, residx) == 0 10895 || (residx != lun->pr_res_idx && lun->res_type < 4)) { 10896 ctl_set_reservation_conflict(ctsio); 10897 retval = 1; 10898 goto bailout; 10899 } 10900 } 10901 10902 if ((lun->flags & CTL_LUN_OFFLINE) 10903 && ((entry->flags & CTL_CMD_FLAG_OK_ON_STANDBY) == 0)) { 10904 ctl_set_lun_not_ready(ctsio); 10905 retval = 1; 10906 goto bailout; 10907 } 10908 10909 if ((lun->flags & CTL_LUN_STOPPED) 10910 && ((entry->flags & CTL_CMD_FLAG_OK_ON_STOPPED) == 0)) { 10911 /* "Logical unit not ready, initializing cmd. required" */ 10912 ctl_set_lun_stopped(ctsio); 10913 retval = 1; 10914 goto bailout; 10915 } 10916 10917 if ((lun->flags & CTL_LUN_INOPERABLE) 10918 && ((entry->flags & CTL_CMD_FLAG_OK_ON_INOPERABLE) == 0)) { 10919 /* "Medium format corrupted" */ 10920 ctl_set_medium_format_corrupted(ctsio); 10921 retval = 1; 10922 goto bailout; 10923 } 10924 10925 bailout: 10926 return (retval); 10927 } 10928 10929 static void 10930 ctl_failover_io(union ctl_io *io, int have_lock) 10931 { 10932 ctl_set_busy(&io->scsiio); 10933 ctl_done(io); 10934 } 10935 10936 static void 10937 ctl_failover_lun(struct ctl_lun *lun) 10938 { 10939 struct ctl_softc *softc = lun->ctl_softc; 10940 struct ctl_io_hdr *io, *next_io; 10941 10942 CTL_DEBUG_PRINT(("FAILOVER for lun %ju\n", lun->lun)); 10943 if (softc->ha_mode == CTL_HA_MODE_XFER) { 10944 TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { 10945 /* We are master */ 10946 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 10947 if (io->flags & CTL_FLAG_IO_ACTIVE) { 10948 io->flags |= CTL_FLAG_ABORT; 10949 } else { /* This can be only due to DATAMOVE */ 10950 io->msg_type = CTL_MSG_DATAMOVE_DONE; 10951 io->flags |= CTL_FLAG_IO_ACTIVE; 10952 io->port_status = 31340; 10953 ctl_enqueue_isc((union ctl_io *)io); 10954 } 10955 } 10956 /* We are slave */ 10957 if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { 10958 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; 10959 if (io->flags & CTL_FLAG_IO_ACTIVE) { 10960 io->flags |= CTL_FLAG_FAILOVER; 10961 } else { 10962 ctl_set_busy(&((union ctl_io *)io)-> 10963 scsiio); 10964 ctl_done((union ctl_io *)io); 10965 } 10966 } 10967 } 10968 } else { /* SERIALIZE modes */ 10969 TAILQ_FOREACH_SAFE(io, &lun->blocked_queue, blocked_links, 10970 next_io) { 10971 /* We are master */ 10972 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 10973 TAILQ_REMOVE(&lun->blocked_queue, io, 10974 blocked_links); 10975 io->flags &= ~CTL_FLAG_BLOCKED; 10976 TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links); 10977 ctl_free_io((union ctl_io *)io); 10978 } 10979 } 10980 TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { 10981 /* We are master */ 10982 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 10983 TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links); 10984 ctl_free_io((union ctl_io *)io); 10985 } 10986 /* We are slave */ 10987 if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { 10988 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; 10989 if (!(io->flags & CTL_FLAG_IO_ACTIVE)) { 10990 ctl_set_busy(&((union ctl_io *)io)-> 10991 scsiio); 10992 ctl_done((union ctl_io *)io); 10993 } 10994 } 10995 } 10996 ctl_check_blocked(lun); 10997 } 10998 } 10999 11000 static int 11001 ctl_scsiio_precheck(struct ctl_softc *softc, struct ctl_scsiio *ctsio) 11002 { 11003 struct ctl_lun *lun; 11004 const struct ctl_cmd_entry *entry; 11005 uint32_t initidx, targ_lun; 11006 int retval; 11007 11008 retval = 0; 11009 11010 lun = NULL; 11011 11012 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 11013 if ((targ_lun < CTL_MAX_LUNS) 11014 && ((lun = softc->ctl_luns[targ_lun]) != NULL)) { 11015 /* 11016 * If the LUN is invalid, pretend that it doesn't exist. 11017 * It will go away as soon as all pending I/O has been 11018 * completed. 11019 */ 11020 mtx_lock(&lun->lun_lock); 11021 if (lun->flags & CTL_LUN_DISABLED) { 11022 mtx_unlock(&lun->lun_lock); 11023 lun = NULL; 11024 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL; 11025 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL; 11026 } else { 11027 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun; 11028 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = 11029 lun->be_lun; 11030 11031 /* 11032 * Every I/O goes into the OOA queue for a 11033 * particular LUN, and stays there until completion. 11034 */ 11035 #ifdef CTL_TIME_IO 11036 if (TAILQ_EMPTY(&lun->ooa_queue)) { 11037 lun->idle_time += getsbinuptime() - 11038 lun->last_busy; 11039 } 11040 #endif 11041 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, 11042 ooa_links); 11043 } 11044 } else { 11045 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL; 11046 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL; 11047 } 11048 11049 /* Get command entry and return error if it is unsuppotyed. */ 11050 entry = ctl_validate_command(ctsio); 11051 if (entry == NULL) { 11052 if (lun) 11053 mtx_unlock(&lun->lun_lock); 11054 return (retval); 11055 } 11056 11057 ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 11058 ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK; 11059 11060 /* 11061 * Check to see whether we can send this command to LUNs that don't 11062 * exist. This should pretty much only be the case for inquiry 11063 * and request sense. Further checks, below, really require having 11064 * a LUN, so we can't really check the command anymore. Just put 11065 * it on the rtr queue. 11066 */ 11067 if (lun == NULL) { 11068 if (entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) { 11069 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11070 ctl_enqueue_rtr((union ctl_io *)ctsio); 11071 return (retval); 11072 } 11073 11074 ctl_set_unsupported_lun(ctsio); 11075 ctl_done((union ctl_io *)ctsio); 11076 CTL_DEBUG_PRINT(("ctl_scsiio_precheck: bailing out due to invalid LUN\n")); 11077 return (retval); 11078 } else { 11079 /* 11080 * Make sure we support this particular command on this LUN. 11081 * e.g., we don't support writes to the control LUN. 11082 */ 11083 if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 11084 mtx_unlock(&lun->lun_lock); 11085 ctl_set_invalid_opcode(ctsio); 11086 ctl_done((union ctl_io *)ctsio); 11087 return (retval); 11088 } 11089 } 11090 11091 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 11092 11093 #ifdef CTL_WITH_CA 11094 /* 11095 * If we've got a request sense, it'll clear the contingent 11096 * allegiance condition. Otherwise, if we have a CA condition for 11097 * this initiator, clear it, because it sent down a command other 11098 * than request sense. 11099 */ 11100 if ((ctsio->cdb[0] != REQUEST_SENSE) 11101 && (ctl_is_set(lun->have_ca, initidx))) 11102 ctl_clear_mask(lun->have_ca, initidx); 11103 #endif 11104 11105 /* 11106 * If the command has this flag set, it handles its own unit 11107 * attention reporting, we shouldn't do anything. Otherwise we 11108 * check for any pending unit attentions, and send them back to the 11109 * initiator. We only do this when a command initially comes in, 11110 * not when we pull it off the blocked queue. 11111 * 11112 * According to SAM-3, section 5.3.2, the order that things get 11113 * presented back to the host is basically unit attentions caused 11114 * by some sort of reset event, busy status, reservation conflicts 11115 * or task set full, and finally any other status. 11116 * 11117 * One issue here is that some of the unit attentions we report 11118 * don't fall into the "reset" category (e.g. "reported luns data 11119 * has changed"). So reporting it here, before the reservation 11120 * check, may be technically wrong. I guess the only thing to do 11121 * would be to check for and report the reset events here, and then 11122 * check for the other unit attention types after we check for a 11123 * reservation conflict. 11124 * 11125 * XXX KDM need to fix this 11126 */ 11127 if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) { 11128 ctl_ua_type ua_type; 11129 scsi_sense_data_type sense_format; 11130 11131 if (lun->flags & CTL_LUN_SENSE_DESC) 11132 sense_format = SSD_TYPE_DESC; 11133 else 11134 sense_format = SSD_TYPE_FIXED; 11135 11136 ua_type = ctl_build_ua(lun, initidx, &ctsio->sense_data, 11137 sense_format); 11138 if (ua_type != CTL_UA_NONE) { 11139 mtx_unlock(&lun->lun_lock); 11140 ctsio->scsi_status = SCSI_STATUS_CHECK_COND; 11141 ctsio->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 11142 ctsio->sense_len = SSD_FULL_SIZE; 11143 ctl_done((union ctl_io *)ctsio); 11144 return (retval); 11145 } 11146 } 11147 11148 11149 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { 11150 mtx_unlock(&lun->lun_lock); 11151 ctl_done((union ctl_io *)ctsio); 11152 return (retval); 11153 } 11154 11155 /* 11156 * XXX CHD this is where we want to send IO to other side if 11157 * this LUN is secondary on this SC. We will need to make a copy 11158 * of the IO and flag the IO on this side as SENT_2OTHER and the flag 11159 * the copy we send as FROM_OTHER. 11160 * We also need to stuff the address of the original IO so we can 11161 * find it easily. Something similar will need be done on the other 11162 * side so when we are done we can find the copy. 11163 */ 11164 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 11165 (lun->flags & CTL_LUN_PEER_SC_PRIMARY) != 0) { 11166 union ctl_ha_msg msg_info; 11167 int isc_retval; 11168 11169 ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; 11170 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 11171 mtx_unlock(&lun->lun_lock); 11172 11173 msg_info.hdr.msg_type = CTL_MSG_SERIALIZE; 11174 msg_info.hdr.original_sc = (union ctl_io *)ctsio; 11175 msg_info.hdr.serializing_sc = NULL; 11176 msg_info.hdr.nexus = ctsio->io_hdr.nexus; 11177 msg_info.scsi.tag_num = ctsio->tag_num; 11178 msg_info.scsi.tag_type = ctsio->tag_type; 11179 msg_info.scsi.cdb_len = ctsio->cdb_len; 11180 memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN); 11181 11182 if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11183 sizeof(msg_info.scsi) - sizeof(msg_info.scsi.sense_data), 11184 M_WAITOK)) > CTL_HA_STATUS_SUCCESS) { 11185 ctl_set_busy(ctsio); 11186 ctl_done((union ctl_io *)ctsio); 11187 return (retval); 11188 } 11189 return (retval); 11190 } 11191 11192 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 11193 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, 11194 ctl_ooaq, ooa_links))) { 11195 case CTL_ACTION_BLOCK: 11196 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 11197 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 11198 blocked_links); 11199 mtx_unlock(&lun->lun_lock); 11200 return (retval); 11201 case CTL_ACTION_PASS: 11202 case CTL_ACTION_SKIP: 11203 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11204 mtx_unlock(&lun->lun_lock); 11205 ctl_enqueue_rtr((union ctl_io *)ctsio); 11206 break; 11207 case CTL_ACTION_OVERLAP: 11208 mtx_unlock(&lun->lun_lock); 11209 ctl_set_overlapped_cmd(ctsio); 11210 ctl_done((union ctl_io *)ctsio); 11211 break; 11212 case CTL_ACTION_OVERLAP_TAG: 11213 mtx_unlock(&lun->lun_lock); 11214 ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff); 11215 ctl_done((union ctl_io *)ctsio); 11216 break; 11217 case CTL_ACTION_ERROR: 11218 default: 11219 mtx_unlock(&lun->lun_lock); 11220 ctl_set_internal_failure(ctsio, 11221 /*sks_valid*/ 0, 11222 /*retry_count*/ 0); 11223 ctl_done((union ctl_io *)ctsio); 11224 break; 11225 } 11226 return (retval); 11227 } 11228 11229 const struct ctl_cmd_entry * 11230 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa) 11231 { 11232 const struct ctl_cmd_entry *entry; 11233 int service_action; 11234 11235 entry = &ctl_cmd_table[ctsio->cdb[0]]; 11236 if (sa) 11237 *sa = ((entry->flags & CTL_CMD_FLAG_SA5) != 0); 11238 if (entry->flags & CTL_CMD_FLAG_SA5) { 11239 service_action = ctsio->cdb[1] & SERVICE_ACTION_MASK; 11240 entry = &((const struct ctl_cmd_entry *) 11241 entry->execute)[service_action]; 11242 } 11243 return (entry); 11244 } 11245 11246 const struct ctl_cmd_entry * 11247 ctl_validate_command(struct ctl_scsiio *ctsio) 11248 { 11249 const struct ctl_cmd_entry *entry; 11250 int i, sa; 11251 uint8_t diff; 11252 11253 entry = ctl_get_cmd_entry(ctsio, &sa); 11254 if (entry->execute == NULL) { 11255 if (sa) 11256 ctl_set_invalid_field(ctsio, 11257 /*sks_valid*/ 1, 11258 /*command*/ 1, 11259 /*field*/ 1, 11260 /*bit_valid*/ 1, 11261 /*bit*/ 4); 11262 else 11263 ctl_set_invalid_opcode(ctsio); 11264 ctl_done((union ctl_io *)ctsio); 11265 return (NULL); 11266 } 11267 KASSERT(entry->length > 0, 11268 ("Not defined length for command 0x%02x/0x%02x", 11269 ctsio->cdb[0], ctsio->cdb[1])); 11270 for (i = 1; i < entry->length; i++) { 11271 diff = ctsio->cdb[i] & ~entry->usage[i - 1]; 11272 if (diff == 0) 11273 continue; 11274 ctl_set_invalid_field(ctsio, 11275 /*sks_valid*/ 1, 11276 /*command*/ 1, 11277 /*field*/ i, 11278 /*bit_valid*/ 1, 11279 /*bit*/ fls(diff) - 1); 11280 ctl_done((union ctl_io *)ctsio); 11281 return (NULL); 11282 } 11283 return (entry); 11284 } 11285 11286 static int 11287 ctl_cmd_applicable(uint8_t lun_type, const struct ctl_cmd_entry *entry) 11288 { 11289 11290 switch (lun_type) { 11291 case T_PROCESSOR: 11292 if (((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0) && 11293 ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) == 0)) 11294 return (0); 11295 break; 11296 case T_DIRECT: 11297 if (((entry->flags & CTL_CMD_FLAG_OK_ON_SLUN) == 0) && 11298 ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) == 0)) 11299 return (0); 11300 break; 11301 default: 11302 return (0); 11303 } 11304 return (1); 11305 } 11306 11307 static int 11308 ctl_scsiio(struct ctl_scsiio *ctsio) 11309 { 11310 int retval; 11311 const struct ctl_cmd_entry *entry; 11312 11313 retval = CTL_RETVAL_COMPLETE; 11314 11315 CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0])); 11316 11317 entry = ctl_get_cmd_entry(ctsio, NULL); 11318 11319 /* 11320 * If this I/O has been aborted, just send it straight to 11321 * ctl_done() without executing it. 11322 */ 11323 if (ctsio->io_hdr.flags & CTL_FLAG_ABORT) { 11324 ctl_done((union ctl_io *)ctsio); 11325 goto bailout; 11326 } 11327 11328 /* 11329 * All the checks should have been handled by ctl_scsiio_precheck(). 11330 * We should be clear now to just execute the I/O. 11331 */ 11332 retval = entry->execute(ctsio); 11333 11334 bailout: 11335 return (retval); 11336 } 11337 11338 /* 11339 * Since we only implement one target right now, a bus reset simply resets 11340 * our single target. 11341 */ 11342 static int 11343 ctl_bus_reset(struct ctl_softc *softc, union ctl_io *io) 11344 { 11345 return(ctl_target_reset(softc, io, CTL_UA_BUS_RESET)); 11346 } 11347 11348 static int 11349 ctl_target_reset(struct ctl_softc *softc, union ctl_io *io, 11350 ctl_ua_type ua_type) 11351 { 11352 struct ctl_port *port; 11353 struct ctl_lun *lun; 11354 int retval; 11355 11356 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 11357 union ctl_ha_msg msg_info; 11358 11359 msg_info.hdr.nexus = io->io_hdr.nexus; 11360 if (ua_type==CTL_UA_TARG_RESET) 11361 msg_info.task.task_action = CTL_TASK_TARGET_RESET; 11362 else 11363 msg_info.task.task_action = CTL_TASK_BUS_RESET; 11364 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11365 msg_info.hdr.original_sc = NULL; 11366 msg_info.hdr.serializing_sc = NULL; 11367 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11368 sizeof(msg_info.task), M_WAITOK); 11369 } 11370 retval = 0; 11371 11372 mtx_lock(&softc->ctl_lock); 11373 port = softc->ctl_ports[io->io_hdr.nexus.targ_port]; 11374 STAILQ_FOREACH(lun, &softc->lun_list, links) { 11375 if (port != NULL && 11376 ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 11377 continue; 11378 retval += ctl_lun_reset(lun, io, ua_type); 11379 } 11380 mtx_unlock(&softc->ctl_lock); 11381 11382 return (retval); 11383 } 11384 11385 /* 11386 * The LUN should always be set. The I/O is optional, and is used to 11387 * distinguish between I/Os sent by this initiator, and by other 11388 * initiators. We set unit attention for initiators other than this one. 11389 * SAM-3 is vague on this point. It does say that a unit attention should 11390 * be established for other initiators when a LUN is reset (see section 11391 * 5.7.3), but it doesn't specifically say that the unit attention should 11392 * be established for this particular initiator when a LUN is reset. Here 11393 * is the relevant text, from SAM-3 rev 8: 11394 * 11395 * 5.7.2 When a SCSI initiator port aborts its own tasks 11396 * 11397 * When a SCSI initiator port causes its own task(s) to be aborted, no 11398 * notification that the task(s) have been aborted shall be returned to 11399 * the SCSI initiator port other than the completion response for the 11400 * command or task management function action that caused the task(s) to 11401 * be aborted and notification(s) associated with related effects of the 11402 * action (e.g., a reset unit attention condition). 11403 * 11404 * XXX KDM for now, we're setting unit attention for all initiators. 11405 */ 11406 static int 11407 ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type) 11408 { 11409 union ctl_io *xio; 11410 #if 0 11411 uint32_t initidx; 11412 #endif 11413 #ifdef CTL_WITH_CA 11414 int i; 11415 #endif 11416 11417 mtx_lock(&lun->lun_lock); 11418 /* 11419 * Run through the OOA queue and abort each I/O. 11420 */ 11421 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11422 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11423 xio->io_hdr.flags |= CTL_FLAG_ABORT | CTL_FLAG_ABORT_STATUS; 11424 } 11425 11426 /* 11427 * This version sets unit attention for every 11428 */ 11429 #if 0 11430 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11431 ctl_est_ua_all(lun, initidx, ua_type); 11432 #else 11433 ctl_est_ua_all(lun, -1, ua_type); 11434 #endif 11435 11436 /* 11437 * A reset (any kind, really) clears reservations established with 11438 * RESERVE/RELEASE. It does not clear reservations established 11439 * with PERSISTENT RESERVE OUT, but we don't support that at the 11440 * moment anyway. See SPC-2, section 5.6. SPC-3 doesn't address 11441 * reservations made with the RESERVE/RELEASE commands, because 11442 * those commands are obsolete in SPC-3. 11443 */ 11444 lun->flags &= ~CTL_LUN_RESERVED; 11445 11446 #ifdef CTL_WITH_CA 11447 for (i = 0; i < CTL_MAX_INITIATORS; i++) 11448 ctl_clear_mask(lun->have_ca, i); 11449 #endif 11450 mtx_unlock(&lun->lun_lock); 11451 11452 return (0); 11453 } 11454 11455 static void 11456 ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id, 11457 int other_sc) 11458 { 11459 union ctl_io *xio; 11460 11461 mtx_assert(&lun->lun_lock, MA_OWNED); 11462 11463 /* 11464 * Run through the OOA queue and attempt to find the given I/O. 11465 * The target port, initiator ID, tag type and tag number have to 11466 * match the values that we got from the initiator. If we have an 11467 * untagged command to abort, simply abort the first untagged command 11468 * we come to. We only allow one untagged command at a time of course. 11469 */ 11470 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11471 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11472 11473 if ((targ_port == UINT32_MAX || 11474 targ_port == xio->io_hdr.nexus.targ_port) && 11475 (init_id == UINT32_MAX || 11476 init_id == xio->io_hdr.nexus.initid)) { 11477 if (targ_port != xio->io_hdr.nexus.targ_port || 11478 init_id != xio->io_hdr.nexus.initid) 11479 xio->io_hdr.flags |= CTL_FLAG_ABORT_STATUS; 11480 xio->io_hdr.flags |= CTL_FLAG_ABORT; 11481 if (!other_sc && !(lun->flags & CTL_LUN_PRIMARY_SC)) { 11482 union ctl_ha_msg msg_info; 11483 11484 msg_info.hdr.nexus = xio->io_hdr.nexus; 11485 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 11486 msg_info.task.tag_num = xio->scsiio.tag_num; 11487 msg_info.task.tag_type = xio->scsiio.tag_type; 11488 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11489 msg_info.hdr.original_sc = NULL; 11490 msg_info.hdr.serializing_sc = NULL; 11491 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11492 sizeof(msg_info.task), M_NOWAIT); 11493 } 11494 } 11495 } 11496 } 11497 11498 static int 11499 ctl_abort_task_set(union ctl_io *io) 11500 { 11501 struct ctl_softc *softc = control_softc; 11502 struct ctl_lun *lun; 11503 uint32_t targ_lun; 11504 11505 /* 11506 * Look up the LUN. 11507 */ 11508 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11509 mtx_lock(&softc->ctl_lock); 11510 if ((targ_lun < CTL_MAX_LUNS) && (softc->ctl_luns[targ_lun] != NULL)) 11511 lun = softc->ctl_luns[targ_lun]; 11512 else { 11513 mtx_unlock(&softc->ctl_lock); 11514 return (1); 11515 } 11516 11517 mtx_lock(&lun->lun_lock); 11518 mtx_unlock(&softc->ctl_lock); 11519 if (io->taskio.task_action == CTL_TASK_ABORT_TASK_SET) { 11520 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, 11521 io->io_hdr.nexus.initid, 11522 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 11523 } else { /* CTL_TASK_CLEAR_TASK_SET */ 11524 ctl_abort_tasks_lun(lun, UINT32_MAX, UINT32_MAX, 11525 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 11526 } 11527 mtx_unlock(&lun->lun_lock); 11528 return (0); 11529 } 11530 11531 static int 11532 ctl_i_t_nexus_reset(union ctl_io *io) 11533 { 11534 struct ctl_softc *softc = control_softc; 11535 struct ctl_lun *lun; 11536 uint32_t initidx; 11537 11538 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 11539 union ctl_ha_msg msg_info; 11540 11541 msg_info.hdr.nexus = io->io_hdr.nexus; 11542 msg_info.task.task_action = CTL_TASK_I_T_NEXUS_RESET; 11543 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11544 msg_info.hdr.original_sc = NULL; 11545 msg_info.hdr.serializing_sc = NULL; 11546 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11547 sizeof(msg_info.task), M_WAITOK); 11548 } 11549 11550 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11551 mtx_lock(&softc->ctl_lock); 11552 STAILQ_FOREACH(lun, &softc->lun_list, links) { 11553 mtx_lock(&lun->lun_lock); 11554 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, 11555 io->io_hdr.nexus.initid, 1); 11556 #ifdef CTL_WITH_CA 11557 ctl_clear_mask(lun->have_ca, initidx); 11558 #endif 11559 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == initidx)) 11560 lun->flags &= ~CTL_LUN_RESERVED; 11561 ctl_est_ua(lun, initidx, CTL_UA_I_T_NEXUS_LOSS); 11562 mtx_unlock(&lun->lun_lock); 11563 } 11564 mtx_unlock(&softc->ctl_lock); 11565 return (0); 11566 } 11567 11568 static int 11569 ctl_abort_task(union ctl_io *io) 11570 { 11571 union ctl_io *xio; 11572 struct ctl_lun *lun; 11573 struct ctl_softc *softc; 11574 #if 0 11575 struct sbuf sb; 11576 char printbuf[128]; 11577 #endif 11578 int found; 11579 uint32_t targ_lun; 11580 11581 softc = control_softc; 11582 found = 0; 11583 11584 /* 11585 * Look up the LUN. 11586 */ 11587 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11588 mtx_lock(&softc->ctl_lock); 11589 if ((targ_lun < CTL_MAX_LUNS) 11590 && (softc->ctl_luns[targ_lun] != NULL)) 11591 lun = softc->ctl_luns[targ_lun]; 11592 else { 11593 mtx_unlock(&softc->ctl_lock); 11594 return (1); 11595 } 11596 11597 #if 0 11598 printf("ctl_abort_task: called for lun %lld, tag %d type %d\n", 11599 lun->lun, io->taskio.tag_num, io->taskio.tag_type); 11600 #endif 11601 11602 mtx_lock(&lun->lun_lock); 11603 mtx_unlock(&softc->ctl_lock); 11604 /* 11605 * Run through the OOA queue and attempt to find the given I/O. 11606 * The target port, initiator ID, tag type and tag number have to 11607 * match the values that we got from the initiator. If we have an 11608 * untagged command to abort, simply abort the first untagged command 11609 * we come to. We only allow one untagged command at a time of course. 11610 */ 11611 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11612 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11613 #if 0 11614 sbuf_new(&sb, printbuf, sizeof(printbuf), SBUF_FIXEDLEN); 11615 11616 sbuf_printf(&sb, "LUN %lld tag %d type %d%s%s%s%s: ", 11617 lun->lun, xio->scsiio.tag_num, 11618 xio->scsiio.tag_type, 11619 (xio->io_hdr.blocked_links.tqe_prev 11620 == NULL) ? "" : " BLOCKED", 11621 (xio->io_hdr.flags & 11622 CTL_FLAG_DMA_INPROG) ? " DMA" : "", 11623 (xio->io_hdr.flags & 11624 CTL_FLAG_ABORT) ? " ABORT" : "", 11625 (xio->io_hdr.flags & 11626 CTL_FLAG_IS_WAS_ON_RTR ? " RTR" : "")); 11627 ctl_scsi_command_string(&xio->scsiio, NULL, &sb); 11628 sbuf_finish(&sb); 11629 printf("%s\n", sbuf_data(&sb)); 11630 #endif 11631 11632 if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port) 11633 || (xio->io_hdr.nexus.initid != io->io_hdr.nexus.initid) 11634 || (xio->io_hdr.flags & CTL_FLAG_ABORT)) 11635 continue; 11636 11637 /* 11638 * If the abort says that the task is untagged, the 11639 * task in the queue must be untagged. Otherwise, 11640 * we just check to see whether the tag numbers 11641 * match. This is because the QLogic firmware 11642 * doesn't pass back the tag type in an abort 11643 * request. 11644 */ 11645 #if 0 11646 if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED) 11647 && (io->taskio.tag_type == CTL_TAG_UNTAGGED)) 11648 || (xio->scsiio.tag_num == io->taskio.tag_num)) 11649 #endif 11650 /* 11651 * XXX KDM we've got problems with FC, because it 11652 * doesn't send down a tag type with aborts. So we 11653 * can only really go by the tag number... 11654 * This may cause problems with parallel SCSI. 11655 * Need to figure that out!! 11656 */ 11657 if (xio->scsiio.tag_num == io->taskio.tag_num) { 11658 xio->io_hdr.flags |= CTL_FLAG_ABORT; 11659 found = 1; 11660 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0 && 11661 !(lun->flags & CTL_LUN_PRIMARY_SC)) { 11662 union ctl_ha_msg msg_info; 11663 11664 msg_info.hdr.nexus = io->io_hdr.nexus; 11665 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 11666 msg_info.task.tag_num = io->taskio.tag_num; 11667 msg_info.task.tag_type = io->taskio.tag_type; 11668 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11669 msg_info.hdr.original_sc = NULL; 11670 msg_info.hdr.serializing_sc = NULL; 11671 #if 0 11672 printf("Sent Abort to other side\n"); 11673 #endif 11674 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11675 sizeof(msg_info.task), M_NOWAIT); 11676 } 11677 #if 0 11678 printf("ctl_abort_task: found I/O to abort\n"); 11679 #endif 11680 } 11681 } 11682 mtx_unlock(&lun->lun_lock); 11683 11684 if (found == 0) { 11685 /* 11686 * This isn't really an error. It's entirely possible for 11687 * the abort and command completion to cross on the wire. 11688 * This is more of an informative/diagnostic error. 11689 */ 11690 #if 0 11691 printf("ctl_abort_task: ABORT sent for nonexistent I/O: " 11692 "%u:%u:%u tag %d type %d\n", 11693 io->io_hdr.nexus.initid, 11694 io->io_hdr.nexus.targ_port, 11695 io->io_hdr.nexus.targ_lun, io->taskio.tag_num, 11696 io->taskio.tag_type); 11697 #endif 11698 } 11699 return (0); 11700 } 11701 11702 static void 11703 ctl_run_task(union ctl_io *io) 11704 { 11705 struct ctl_softc *softc = control_softc; 11706 int retval = 1; 11707 const char *task_desc; 11708 11709 CTL_DEBUG_PRINT(("ctl_run_task\n")); 11710 11711 KASSERT(io->io_hdr.io_type == CTL_IO_TASK, 11712 ("ctl_run_task: Unextected io_type %d\n", 11713 io->io_hdr.io_type)); 11714 11715 task_desc = ctl_scsi_task_string(&io->taskio); 11716 if (task_desc != NULL) { 11717 #ifdef NEEDTOPORT 11718 csevent_log(CSC_CTL | CSC_SHELF_SW | 11719 CTL_TASK_REPORT, 11720 csevent_LogType_Trace, 11721 csevent_Severity_Information, 11722 csevent_AlertLevel_Green, 11723 csevent_FRU_Firmware, 11724 csevent_FRU_Unknown, 11725 "CTL: received task: %s",task_desc); 11726 #endif 11727 } else { 11728 #ifdef NEEDTOPORT 11729 csevent_log(CSC_CTL | CSC_SHELF_SW | 11730 CTL_TASK_REPORT, 11731 csevent_LogType_Trace, 11732 csevent_Severity_Information, 11733 csevent_AlertLevel_Green, 11734 csevent_FRU_Firmware, 11735 csevent_FRU_Unknown, 11736 "CTL: received unknown task " 11737 "type: %d (%#x)", 11738 io->taskio.task_action, 11739 io->taskio.task_action); 11740 #endif 11741 } 11742 switch (io->taskio.task_action) { 11743 case CTL_TASK_ABORT_TASK: 11744 retval = ctl_abort_task(io); 11745 break; 11746 case CTL_TASK_ABORT_TASK_SET: 11747 case CTL_TASK_CLEAR_TASK_SET: 11748 retval = ctl_abort_task_set(io); 11749 break; 11750 case CTL_TASK_CLEAR_ACA: 11751 break; 11752 case CTL_TASK_I_T_NEXUS_RESET: 11753 retval = ctl_i_t_nexus_reset(io); 11754 break; 11755 case CTL_TASK_LUN_RESET: { 11756 struct ctl_lun *lun; 11757 uint32_t targ_lun; 11758 11759 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11760 mtx_lock(&softc->ctl_lock); 11761 if ((targ_lun < CTL_MAX_LUNS) 11762 && (softc->ctl_luns[targ_lun] != NULL)) 11763 lun = softc->ctl_luns[targ_lun]; 11764 else { 11765 mtx_unlock(&softc->ctl_lock); 11766 retval = 1; 11767 break; 11768 } 11769 retval = ctl_lun_reset(lun, io, CTL_UA_LUN_RESET); 11770 mtx_unlock(&softc->ctl_lock); 11771 11772 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0) { 11773 union ctl_ha_msg msg_info; 11774 11775 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11776 msg_info.hdr.nexus = io->io_hdr.nexus; 11777 msg_info.task.task_action = CTL_TASK_LUN_RESET; 11778 msg_info.hdr.original_sc = NULL; 11779 msg_info.hdr.serializing_sc = NULL; 11780 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11781 sizeof(msg_info.task), M_WAITOK); 11782 } 11783 break; 11784 } 11785 case CTL_TASK_TARGET_RESET: 11786 retval = ctl_target_reset(softc, io, CTL_UA_TARG_RESET); 11787 break; 11788 case CTL_TASK_BUS_RESET: 11789 retval = ctl_bus_reset(softc, io); 11790 break; 11791 case CTL_TASK_PORT_LOGIN: 11792 break; 11793 case CTL_TASK_PORT_LOGOUT: 11794 break; 11795 default: 11796 printf("ctl_run_task: got unknown task management event %d\n", 11797 io->taskio.task_action); 11798 break; 11799 } 11800 if (retval == 0) 11801 io->io_hdr.status = CTL_SUCCESS; 11802 else 11803 io->io_hdr.status = CTL_ERROR; 11804 ctl_done(io); 11805 } 11806 11807 /* 11808 * For HA operation. Handle commands that come in from the other 11809 * controller. 11810 */ 11811 static void 11812 ctl_handle_isc(union ctl_io *io) 11813 { 11814 int free_io; 11815 struct ctl_lun *lun; 11816 struct ctl_softc *softc; 11817 uint32_t targ_lun; 11818 11819 softc = control_softc; 11820 11821 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11822 lun = softc->ctl_luns[targ_lun]; 11823 11824 switch (io->io_hdr.msg_type) { 11825 case CTL_MSG_SERIALIZE: 11826 free_io = ctl_serialize_other_sc_cmd(&io->scsiio); 11827 break; 11828 case CTL_MSG_R2R: { 11829 const struct ctl_cmd_entry *entry; 11830 11831 /* 11832 * This is only used in SER_ONLY mode. 11833 */ 11834 free_io = 0; 11835 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 11836 mtx_lock(&lun->lun_lock); 11837 if (ctl_scsiio_lun_check(lun, 11838 entry, (struct ctl_scsiio *)io) != 0) { 11839 mtx_unlock(&lun->lun_lock); 11840 ctl_done(io); 11841 break; 11842 } 11843 io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11844 mtx_unlock(&lun->lun_lock); 11845 ctl_enqueue_rtr(io); 11846 break; 11847 } 11848 case CTL_MSG_FINISH_IO: 11849 if (softc->ha_mode == CTL_HA_MODE_XFER) { 11850 free_io = 0; 11851 ctl_done(io); 11852 } else { 11853 free_io = 1; 11854 mtx_lock(&lun->lun_lock); 11855 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, 11856 ooa_links); 11857 ctl_check_blocked(lun); 11858 mtx_unlock(&lun->lun_lock); 11859 } 11860 break; 11861 case CTL_MSG_PERS_ACTION: 11862 ctl_hndl_per_res_out_on_other_sc( 11863 (union ctl_ha_msg *)&io->presio.pr_msg); 11864 free_io = 1; 11865 break; 11866 case CTL_MSG_BAD_JUJU: 11867 free_io = 0; 11868 ctl_done(io); 11869 break; 11870 case CTL_MSG_DATAMOVE: 11871 /* Only used in XFER mode */ 11872 free_io = 0; 11873 ctl_datamove_remote(io); 11874 break; 11875 case CTL_MSG_DATAMOVE_DONE: 11876 /* Only used in XFER mode */ 11877 free_io = 0; 11878 io->scsiio.be_move_done(io); 11879 break; 11880 case CTL_MSG_FAILOVER: 11881 mtx_lock(&lun->lun_lock); 11882 ctl_failover_lun(lun); 11883 mtx_unlock(&lun->lun_lock); 11884 free_io = 1; 11885 break; 11886 default: 11887 free_io = 1; 11888 printf("%s: Invalid message type %d\n", 11889 __func__, io->io_hdr.msg_type); 11890 break; 11891 } 11892 if (free_io) 11893 ctl_free_io(io); 11894 11895 } 11896 11897 11898 /* 11899 * Returns the match type in the case of a match, or CTL_LUN_PAT_NONE if 11900 * there is no match. 11901 */ 11902 static ctl_lun_error_pattern 11903 ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc) 11904 { 11905 const struct ctl_cmd_entry *entry; 11906 ctl_lun_error_pattern filtered_pattern, pattern; 11907 11908 pattern = desc->error_pattern; 11909 11910 /* 11911 * XXX KDM we need more data passed into this function to match a 11912 * custom pattern, and we actually need to implement custom pattern 11913 * matching. 11914 */ 11915 if (pattern & CTL_LUN_PAT_CMD) 11916 return (CTL_LUN_PAT_CMD); 11917 11918 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY) 11919 return (CTL_LUN_PAT_ANY); 11920 11921 entry = ctl_get_cmd_entry(ctsio, NULL); 11922 11923 filtered_pattern = entry->pattern & pattern; 11924 11925 /* 11926 * If the user requested specific flags in the pattern (e.g. 11927 * CTL_LUN_PAT_RANGE), make sure the command supports all of those 11928 * flags. 11929 * 11930 * If the user did not specify any flags, it doesn't matter whether 11931 * or not the command supports the flags. 11932 */ 11933 if ((filtered_pattern & ~CTL_LUN_PAT_MASK) != 11934 (pattern & ~CTL_LUN_PAT_MASK)) 11935 return (CTL_LUN_PAT_NONE); 11936 11937 /* 11938 * If the user asked for a range check, see if the requested LBA 11939 * range overlaps with this command's LBA range. 11940 */ 11941 if (filtered_pattern & CTL_LUN_PAT_RANGE) { 11942 uint64_t lba1; 11943 uint64_t len1; 11944 ctl_action action; 11945 int retval; 11946 11947 retval = ctl_get_lba_len((union ctl_io *)ctsio, &lba1, &len1); 11948 if (retval != 0) 11949 return (CTL_LUN_PAT_NONE); 11950 11951 action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba, 11952 desc->lba_range.len, FALSE); 11953 /* 11954 * A "pass" means that the LBA ranges don't overlap, so 11955 * this doesn't match the user's range criteria. 11956 */ 11957 if (action == CTL_ACTION_PASS) 11958 return (CTL_LUN_PAT_NONE); 11959 } 11960 11961 return (filtered_pattern); 11962 } 11963 11964 static void 11965 ctl_inject_error(struct ctl_lun *lun, union ctl_io *io) 11966 { 11967 struct ctl_error_desc *desc, *desc2; 11968 11969 mtx_assert(&lun->lun_lock, MA_OWNED); 11970 11971 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 11972 ctl_lun_error_pattern pattern; 11973 /* 11974 * Check to see whether this particular command matches 11975 * the pattern in the descriptor. 11976 */ 11977 pattern = ctl_cmd_pattern_match(&io->scsiio, desc); 11978 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_NONE) 11979 continue; 11980 11981 switch (desc->lun_error & CTL_LUN_INJ_TYPE) { 11982 case CTL_LUN_INJ_ABORTED: 11983 ctl_set_aborted(&io->scsiio); 11984 break; 11985 case CTL_LUN_INJ_MEDIUM_ERR: 11986 ctl_set_medium_error(&io->scsiio); 11987 break; 11988 case CTL_LUN_INJ_UA: 11989 /* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET 11990 * OCCURRED */ 11991 ctl_set_ua(&io->scsiio, 0x29, 0x00); 11992 break; 11993 case CTL_LUN_INJ_CUSTOM: 11994 /* 11995 * We're assuming the user knows what he is doing. 11996 * Just copy the sense information without doing 11997 * checks. 11998 */ 11999 bcopy(&desc->custom_sense, &io->scsiio.sense_data, 12000 MIN(sizeof(desc->custom_sense), 12001 sizeof(io->scsiio.sense_data))); 12002 io->scsiio.scsi_status = SCSI_STATUS_CHECK_COND; 12003 io->scsiio.sense_len = SSD_FULL_SIZE; 12004 io->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 12005 break; 12006 case CTL_LUN_INJ_NONE: 12007 default: 12008 /* 12009 * If this is an error injection type we don't know 12010 * about, clear the continuous flag (if it is set) 12011 * so it will get deleted below. 12012 */ 12013 desc->lun_error &= ~CTL_LUN_INJ_CONTINUOUS; 12014 break; 12015 } 12016 /* 12017 * By default, each error injection action is a one-shot 12018 */ 12019 if (desc->lun_error & CTL_LUN_INJ_CONTINUOUS) 12020 continue; 12021 12022 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links); 12023 12024 free(desc, M_CTL); 12025 } 12026 } 12027 12028 #ifdef CTL_IO_DELAY 12029 static void 12030 ctl_datamove_timer_wakeup(void *arg) 12031 { 12032 union ctl_io *io; 12033 12034 io = (union ctl_io *)arg; 12035 12036 ctl_datamove(io); 12037 } 12038 #endif /* CTL_IO_DELAY */ 12039 12040 void 12041 ctl_datamove(union ctl_io *io) 12042 { 12043 void (*fe_datamove)(union ctl_io *io); 12044 12045 mtx_assert(&control_softc->ctl_lock, MA_NOTOWNED); 12046 12047 CTL_DEBUG_PRINT(("ctl_datamove\n")); 12048 12049 #ifdef CTL_TIME_IO 12050 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 12051 char str[256]; 12052 char path_str[64]; 12053 struct sbuf sb; 12054 12055 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 12056 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12057 12058 sbuf_cat(&sb, path_str); 12059 switch (io->io_hdr.io_type) { 12060 case CTL_IO_SCSI: 12061 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 12062 sbuf_printf(&sb, "\n"); 12063 sbuf_cat(&sb, path_str); 12064 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12065 io->scsiio.tag_num, io->scsiio.tag_type); 12066 break; 12067 case CTL_IO_TASK: 12068 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 12069 "Tag Type: %d\n", io->taskio.task_action, 12070 io->taskio.tag_num, io->taskio.tag_type); 12071 break; 12072 default: 12073 printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 12074 panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 12075 break; 12076 } 12077 sbuf_cat(&sb, path_str); 12078 sbuf_printf(&sb, "ctl_datamove: %jd seconds\n", 12079 (intmax_t)time_uptime - io->io_hdr.start_time); 12080 sbuf_finish(&sb); 12081 printf("%s", sbuf_data(&sb)); 12082 } 12083 #endif /* CTL_TIME_IO */ 12084 12085 #ifdef CTL_IO_DELAY 12086 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 12087 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 12088 } else { 12089 struct ctl_lun *lun; 12090 12091 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 12092 if ((lun != NULL) 12093 && (lun->delay_info.datamove_delay > 0)) { 12094 12095 callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1); 12096 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 12097 callout_reset(&io->io_hdr.delay_callout, 12098 lun->delay_info.datamove_delay * hz, 12099 ctl_datamove_timer_wakeup, io); 12100 if (lun->delay_info.datamove_type == 12101 CTL_DELAY_TYPE_ONESHOT) 12102 lun->delay_info.datamove_delay = 0; 12103 return; 12104 } 12105 } 12106 #endif 12107 12108 /* 12109 * This command has been aborted. Set the port status, so we fail 12110 * the data move. 12111 */ 12112 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12113 printf("ctl_datamove: tag 0x%04x on (%u:%u:%u) aborted\n", 12114 io->scsiio.tag_num, io->io_hdr.nexus.initid, 12115 io->io_hdr.nexus.targ_port, 12116 io->io_hdr.nexus.targ_lun); 12117 io->io_hdr.port_status = 31337; 12118 /* 12119 * Note that the backend, in this case, will get the 12120 * callback in its context. In other cases it may get 12121 * called in the frontend's interrupt thread context. 12122 */ 12123 io->scsiio.be_move_done(io); 12124 return; 12125 } 12126 12127 /* Don't confuse frontend with zero length data move. */ 12128 if (io->scsiio.kern_data_len == 0) { 12129 io->scsiio.be_move_done(io); 12130 return; 12131 } 12132 12133 /* 12134 * If we're in XFER mode and this I/O is from the other shelf 12135 * controller, we need to send the DMA to the other side to 12136 * actually transfer the data to/from the host. In serialize only 12137 * mode the transfer happens below CTL and ctl_datamove() is only 12138 * called on the machine that originally received the I/O. 12139 */ 12140 if ((control_softc->ha_mode == CTL_HA_MODE_XFER) 12141 && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 12142 union ctl_ha_msg msg; 12143 uint32_t sg_entries_sent; 12144 int do_sg_copy; 12145 int i; 12146 12147 memset(&msg, 0, sizeof(msg)); 12148 msg.hdr.msg_type = CTL_MSG_DATAMOVE; 12149 msg.hdr.original_sc = io->io_hdr.original_sc; 12150 msg.hdr.serializing_sc = io; 12151 msg.hdr.nexus = io->io_hdr.nexus; 12152 msg.dt.flags = io->io_hdr.flags; 12153 /* 12154 * We convert everything into a S/G list here. We can't 12155 * pass by reference, only by value between controllers. 12156 * So we can't pass a pointer to the S/G list, only as many 12157 * S/G entries as we can fit in here. If it's possible for 12158 * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries, 12159 * then we need to break this up into multiple transfers. 12160 */ 12161 if (io->scsiio.kern_sg_entries == 0) { 12162 msg.dt.kern_sg_entries = 1; 12163 #if 0 12164 /* 12165 * Convert to a physical address if this is a 12166 * virtual address. 12167 */ 12168 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 12169 msg.dt.sg_list[0].addr = 12170 io->scsiio.kern_data_ptr; 12171 } else { 12172 /* 12173 * XXX KDM use busdma here! 12174 */ 12175 msg.dt.sg_list[0].addr = (void *) 12176 vtophys(io->scsiio.kern_data_ptr); 12177 } 12178 #else 12179 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, 12180 ("HA does not support BUS_ADDR")); 12181 msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr; 12182 #endif 12183 12184 msg.dt.sg_list[0].len = io->scsiio.kern_data_len; 12185 do_sg_copy = 0; 12186 } else { 12187 msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries; 12188 do_sg_copy = 1; 12189 } 12190 12191 msg.dt.kern_data_len = io->scsiio.kern_data_len; 12192 msg.dt.kern_total_len = io->scsiio.kern_total_len; 12193 msg.dt.kern_data_resid = io->scsiio.kern_data_resid; 12194 msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset; 12195 msg.dt.sg_sequence = 0; 12196 12197 /* 12198 * Loop until we've sent all of the S/G entries. On the 12199 * other end, we'll recompose these S/G entries into one 12200 * contiguous list before passing it to the 12201 */ 12202 for (sg_entries_sent = 0; sg_entries_sent < 12203 msg.dt.kern_sg_entries; msg.dt.sg_sequence++) { 12204 msg.dt.cur_sg_entries = MIN((sizeof(msg.dt.sg_list)/ 12205 sizeof(msg.dt.sg_list[0])), 12206 msg.dt.kern_sg_entries - sg_entries_sent); 12207 12208 if (do_sg_copy != 0) { 12209 struct ctl_sg_entry *sgl; 12210 int j; 12211 12212 sgl = (struct ctl_sg_entry *) 12213 io->scsiio.kern_data_ptr; 12214 /* 12215 * If this is in cached memory, flush the cache 12216 * before we send the DMA request to the other 12217 * controller. We want to do this in either 12218 * the * read or the write case. The read 12219 * case is straightforward. In the write 12220 * case, we want to make sure nothing is 12221 * in the local cache that could overwrite 12222 * the DMAed data. 12223 */ 12224 12225 for (i = sg_entries_sent, j = 0; 12226 i < msg.dt.cur_sg_entries; i++, j++) { 12227 #if 0 12228 if ((io->io_hdr.flags & 12229 CTL_FLAG_BUS_ADDR) == 0) { 12230 /* 12231 * XXX KDM use busdma. 12232 */ 12233 msg.dt.sg_list[j].addr =(void *) 12234 vtophys(sgl[i].addr); 12235 } else { 12236 msg.dt.sg_list[j].addr = 12237 sgl[i].addr; 12238 } 12239 #else 12240 KASSERT((io->io_hdr.flags & 12241 CTL_FLAG_BUS_ADDR) == 0, 12242 ("HA does not support BUS_ADDR")); 12243 msg.dt.sg_list[j].addr = sgl[i].addr; 12244 #endif 12245 msg.dt.sg_list[j].len = sgl[i].len; 12246 } 12247 } 12248 12249 sg_entries_sent += msg.dt.cur_sg_entries; 12250 if (sg_entries_sent >= msg.dt.kern_sg_entries) 12251 msg.dt.sg_last = 1; 12252 else 12253 msg.dt.sg_last = 0; 12254 12255 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 12256 sizeof(msg.dt) - sizeof(msg.dt.sg_list) + 12257 sizeof(struct ctl_sg_entry)*msg.dt.cur_sg_entries, 12258 M_WAITOK) > CTL_HA_STATUS_SUCCESS) { 12259 io->io_hdr.port_status = 31341; 12260 io->scsiio.be_move_done(io); 12261 return; 12262 } 12263 12264 msg.dt.sent_sg_entries = sg_entries_sent; 12265 } 12266 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12267 } else { 12268 12269 /* 12270 * Lookup the fe_datamove() function for this particular 12271 * front end. 12272 */ 12273 fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove; 12274 12275 fe_datamove(io); 12276 } 12277 } 12278 12279 static void 12280 ctl_send_datamove_done(union ctl_io *io, int have_lock) 12281 { 12282 union ctl_ha_msg msg; 12283 12284 memset(&msg, 0, sizeof(msg)); 12285 12286 msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 12287 msg.hdr.original_sc = io; 12288 msg.hdr.serializing_sc = io->io_hdr.serializing_sc; 12289 msg.hdr.nexus = io->io_hdr.nexus; 12290 msg.hdr.status = io->io_hdr.status; 12291 msg.scsi.tag_num = io->scsiio.tag_num; 12292 msg.scsi.tag_type = io->scsiio.tag_type; 12293 msg.scsi.scsi_status = io->scsiio.scsi_status; 12294 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 12295 io->scsiio.sense_len); 12296 msg.scsi.sense_len = io->scsiio.sense_len; 12297 msg.scsi.sense_residual = io->scsiio.sense_residual; 12298 msg.scsi.fetd_status = io->io_hdr.port_status; 12299 msg.scsi.residual = io->scsiio.residual; 12300 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12301 12302 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12303 ctl_failover_io(io, /*have_lock*/ have_lock); 12304 return; 12305 } 12306 12307 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 12308 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + 12309 msg.scsi.sense_len, M_WAITOK); 12310 } 12311 12312 /* 12313 * The DMA to the remote side is done, now we need to tell the other side 12314 * we're done so it can continue with its data movement. 12315 */ 12316 static void 12317 ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq) 12318 { 12319 union ctl_io *io; 12320 int i; 12321 12322 io = rq->context; 12323 12324 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12325 printf("%s: ISC DMA write failed with error %d", __func__, 12326 rq->ret); 12327 ctl_set_internal_failure(&io->scsiio, 12328 /*sks_valid*/ 1, 12329 /*retry_count*/ rq->ret); 12330 } 12331 12332 ctl_dt_req_free(rq); 12333 12334 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12335 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12336 free(io->io_hdr.remote_sglist, M_CTL); 12337 io->io_hdr.remote_sglist = NULL; 12338 io->io_hdr.local_sglist = NULL; 12339 12340 /* 12341 * The data is in local and remote memory, so now we need to send 12342 * status (good or back) back to the other side. 12343 */ 12344 ctl_send_datamove_done(io, /*have_lock*/ 0); 12345 } 12346 12347 /* 12348 * We've moved the data from the host/controller into local memory. Now we 12349 * need to push it over to the remote controller's memory. 12350 */ 12351 static int 12352 ctl_datamove_remote_dm_write_cb(union ctl_io *io) 12353 { 12354 int retval; 12355 12356 retval = 0; 12357 12358 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_WRITE, 12359 ctl_datamove_remote_write_cb); 12360 12361 return (retval); 12362 } 12363 12364 static void 12365 ctl_datamove_remote_write(union ctl_io *io) 12366 { 12367 int retval; 12368 void (*fe_datamove)(union ctl_io *io); 12369 12370 /* 12371 * - Get the data from the host/HBA into local memory. 12372 * - DMA memory from the local controller to the remote controller. 12373 * - Send status back to the remote controller. 12374 */ 12375 12376 retval = ctl_datamove_remote_sgl_setup(io); 12377 if (retval != 0) 12378 return; 12379 12380 /* Switch the pointer over so the FETD knows what to do */ 12381 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 12382 12383 /* 12384 * Use a custom move done callback, since we need to send completion 12385 * back to the other controller, not to the backend on this side. 12386 */ 12387 io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb; 12388 12389 fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove; 12390 12391 fe_datamove(io); 12392 12393 return; 12394 12395 } 12396 12397 static int 12398 ctl_datamove_remote_dm_read_cb(union ctl_io *io) 12399 { 12400 #if 0 12401 char str[256]; 12402 char path_str[64]; 12403 struct sbuf sb; 12404 #endif 12405 int i; 12406 12407 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12408 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12409 free(io->io_hdr.remote_sglist, M_CTL); 12410 io->io_hdr.remote_sglist = NULL; 12411 io->io_hdr.local_sglist = NULL; 12412 12413 #if 0 12414 scsi_path_string(io, path_str, sizeof(path_str)); 12415 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12416 sbuf_cat(&sb, path_str); 12417 scsi_command_string(&io->scsiio, NULL, &sb); 12418 sbuf_printf(&sb, "\n"); 12419 sbuf_cat(&sb, path_str); 12420 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12421 io->scsiio.tag_num, io->scsiio.tag_type); 12422 sbuf_cat(&sb, path_str); 12423 sbuf_printf(&sb, "%s: flags %#x, status %#x\n", __func__, 12424 io->io_hdr.flags, io->io_hdr.status); 12425 sbuf_finish(&sb); 12426 printk("%s", sbuf_data(&sb)); 12427 #endif 12428 12429 12430 /* 12431 * The read is done, now we need to send status (good or bad) back 12432 * to the other side. 12433 */ 12434 ctl_send_datamove_done(io, /*have_lock*/ 0); 12435 12436 return (0); 12437 } 12438 12439 static void 12440 ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq) 12441 { 12442 union ctl_io *io; 12443 void (*fe_datamove)(union ctl_io *io); 12444 12445 io = rq->context; 12446 12447 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12448 printf("%s: ISC DMA read failed with error %d\n", __func__, 12449 rq->ret); 12450 ctl_set_internal_failure(&io->scsiio, 12451 /*sks_valid*/ 1, 12452 /*retry_count*/ rq->ret); 12453 } 12454 12455 ctl_dt_req_free(rq); 12456 12457 /* Switch the pointer over so the FETD knows what to do */ 12458 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 12459 12460 /* 12461 * Use a custom move done callback, since we need to send completion 12462 * back to the other controller, not to the backend on this side. 12463 */ 12464 io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb; 12465 12466 /* XXX KDM add checks like the ones in ctl_datamove? */ 12467 12468 fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove; 12469 12470 fe_datamove(io); 12471 } 12472 12473 static int 12474 ctl_datamove_remote_sgl_setup(union ctl_io *io) 12475 { 12476 struct ctl_sg_entry *local_sglist, *remote_sglist; 12477 struct ctl_softc *softc; 12478 uint32_t len_to_go; 12479 int retval; 12480 int i; 12481 12482 retval = 0; 12483 softc = control_softc; 12484 local_sglist = io->io_hdr.local_sglist; 12485 remote_sglist = io->io_hdr.remote_sglist; 12486 len_to_go = io->scsiio.kern_data_len; 12487 12488 /* 12489 * The difficult thing here is that the size of the various 12490 * S/G segments may be different than the size from the 12491 * remote controller. That'll make it harder when DMAing 12492 * the data back to the other side. 12493 */ 12494 for (i = 0; len_to_go > 0; i++) { 12495 local_sglist[i].len = MIN(len_to_go, CTL_HA_DATAMOVE_SEGMENT); 12496 local_sglist[i].addr = 12497 malloc(local_sglist[i].len, M_CTL, M_WAITOK); 12498 12499 len_to_go -= local_sglist[i].len; 12500 } 12501 /* 12502 * Reset the number of S/G entries accordingly. The original 12503 * number of S/G entries is available in rem_sg_entries. 12504 */ 12505 io->scsiio.kern_sg_entries = i; 12506 12507 #if 0 12508 printf("%s: kern_sg_entries = %d\n", __func__, 12509 io->scsiio.kern_sg_entries); 12510 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12511 printf("%s: sg[%d] = %p, %d\n", __func__, i, 12512 local_sglist[i].addr, local_sglist[i].len); 12513 #endif 12514 12515 return (retval); 12516 } 12517 12518 static int 12519 ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 12520 ctl_ha_dt_cb callback) 12521 { 12522 struct ctl_ha_dt_req *rq; 12523 struct ctl_sg_entry *remote_sglist, *local_sglist; 12524 uint32_t local_used, remote_used, total_used; 12525 int i, j, isc_ret; 12526 12527 rq = ctl_dt_req_alloc(); 12528 12529 /* 12530 * If we failed to allocate the request, and if the DMA didn't fail 12531 * anyway, set busy status. This is just a resource allocation 12532 * failure. 12533 */ 12534 if ((rq == NULL) 12535 && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)) 12536 ctl_set_busy(&io->scsiio); 12537 12538 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) { 12539 12540 if (rq != NULL) 12541 ctl_dt_req_free(rq); 12542 12543 /* 12544 * The data move failed. We need to return status back 12545 * to the other controller. No point in trying to DMA 12546 * data to the remote controller. 12547 */ 12548 12549 ctl_send_datamove_done(io, /*have_lock*/ 0); 12550 12551 return (1); 12552 } 12553 12554 local_sglist = io->io_hdr.local_sglist; 12555 remote_sglist = io->io_hdr.remote_sglist; 12556 local_used = 0; 12557 remote_used = 0; 12558 total_used = 0; 12559 12560 /* 12561 * Pull/push the data over the wire from/to the other controller. 12562 * This takes into account the possibility that the local and 12563 * remote sglists may not be identical in terms of the size of 12564 * the elements and the number of elements. 12565 * 12566 * One fundamental assumption here is that the length allocated for 12567 * both the local and remote sglists is identical. Otherwise, we've 12568 * essentially got a coding error of some sort. 12569 */ 12570 isc_ret = CTL_HA_STATUS_SUCCESS; 12571 for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) { 12572 uint32_t cur_len; 12573 uint8_t *tmp_ptr; 12574 12575 rq->command = command; 12576 rq->context = io; 12577 12578 /* 12579 * Both pointers should be aligned. But it is possible 12580 * that the allocation length is not. They should both 12581 * also have enough slack left over at the end, though, 12582 * to round up to the next 8 byte boundary. 12583 */ 12584 cur_len = MIN(local_sglist[i].len - local_used, 12585 remote_sglist[j].len - remote_used); 12586 rq->size = cur_len; 12587 12588 tmp_ptr = (uint8_t *)local_sglist[i].addr; 12589 tmp_ptr += local_used; 12590 12591 #if 0 12592 /* Use physical addresses when talking to ISC hardware */ 12593 if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) { 12594 /* XXX KDM use busdma */ 12595 rq->local = vtophys(tmp_ptr); 12596 } else 12597 rq->local = tmp_ptr; 12598 #else 12599 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, 12600 ("HA does not support BUS_ADDR")); 12601 rq->local = tmp_ptr; 12602 #endif 12603 12604 tmp_ptr = (uint8_t *)remote_sglist[j].addr; 12605 tmp_ptr += remote_used; 12606 rq->remote = tmp_ptr; 12607 12608 rq->callback = NULL; 12609 12610 local_used += cur_len; 12611 if (local_used >= local_sglist[i].len) { 12612 i++; 12613 local_used = 0; 12614 } 12615 12616 remote_used += cur_len; 12617 if (remote_used >= remote_sglist[j].len) { 12618 j++; 12619 remote_used = 0; 12620 } 12621 total_used += cur_len; 12622 12623 if (total_used >= io->scsiio.kern_data_len) 12624 rq->callback = callback; 12625 12626 #if 0 12627 printf("%s: %s: local %#x remote %#x size %d\n", __func__, 12628 (command == CTL_HA_DT_CMD_WRITE) ? "WRITE" : "READ", 12629 rq->local, rq->remote, rq->size); 12630 #endif 12631 12632 isc_ret = ctl_dt_single(rq); 12633 if (isc_ret > CTL_HA_STATUS_SUCCESS) 12634 break; 12635 } 12636 if (isc_ret != CTL_HA_STATUS_WAIT) { 12637 rq->ret = isc_ret; 12638 callback(rq); 12639 } 12640 12641 return (0); 12642 } 12643 12644 static void 12645 ctl_datamove_remote_read(union ctl_io *io) 12646 { 12647 int retval; 12648 int i; 12649 12650 /* 12651 * This will send an error to the other controller in the case of a 12652 * failure. 12653 */ 12654 retval = ctl_datamove_remote_sgl_setup(io); 12655 if (retval != 0) 12656 return; 12657 12658 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ, 12659 ctl_datamove_remote_read_cb); 12660 if (retval != 0) { 12661 /* 12662 * Make sure we free memory if there was an error.. The 12663 * ctl_datamove_remote_xfer() function will send the 12664 * datamove done message, or call the callback with an 12665 * error if there is a problem. 12666 */ 12667 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12668 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12669 free(io->io_hdr.remote_sglist, M_CTL); 12670 io->io_hdr.remote_sglist = NULL; 12671 io->io_hdr.local_sglist = NULL; 12672 } 12673 12674 return; 12675 } 12676 12677 /* 12678 * Process a datamove request from the other controller. This is used for 12679 * XFER mode only, not SER_ONLY mode. For writes, we DMA into local memory 12680 * first. Once that is complete, the data gets DMAed into the remote 12681 * controller's memory. For reads, we DMA from the remote controller's 12682 * memory into our memory first, and then move it out to the FETD. 12683 */ 12684 static void 12685 ctl_datamove_remote(union ctl_io *io) 12686 { 12687 12688 mtx_assert(&control_softc->ctl_lock, MA_NOTOWNED); 12689 12690 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12691 ctl_failover_io(io, /*have_lock*/ 0); 12692 return; 12693 } 12694 12695 /* 12696 * Note that we look for an aborted I/O here, but don't do some of 12697 * the other checks that ctl_datamove() normally does. 12698 * We don't need to run the datamove delay code, since that should 12699 * have been done if need be on the other controller. 12700 */ 12701 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12702 printf("%s: tag 0x%04x on (%u:%u:%u) aborted\n", __func__, 12703 io->scsiio.tag_num, io->io_hdr.nexus.initid, 12704 io->io_hdr.nexus.targ_port, 12705 io->io_hdr.nexus.targ_lun); 12706 io->io_hdr.port_status = 31338; 12707 ctl_send_datamove_done(io, /*have_lock*/ 0); 12708 return; 12709 } 12710 12711 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) 12712 ctl_datamove_remote_write(io); 12713 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) 12714 ctl_datamove_remote_read(io); 12715 else { 12716 io->io_hdr.port_status = 31339; 12717 ctl_send_datamove_done(io, /*have_lock*/ 0); 12718 } 12719 } 12720 12721 static int 12722 ctl_process_done(union ctl_io *io) 12723 { 12724 struct ctl_lun *lun; 12725 struct ctl_softc *softc = control_softc; 12726 void (*fe_done)(union ctl_io *io); 12727 union ctl_ha_msg msg; 12728 uint32_t targ_port = io->io_hdr.nexus.targ_port; 12729 12730 CTL_DEBUG_PRINT(("ctl_process_done\n")); 12731 12732 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0) 12733 fe_done = softc->ctl_ports[targ_port]->fe_done; 12734 else 12735 fe_done = NULL; 12736 12737 #ifdef CTL_TIME_IO 12738 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 12739 char str[256]; 12740 char path_str[64]; 12741 struct sbuf sb; 12742 12743 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 12744 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12745 12746 sbuf_cat(&sb, path_str); 12747 switch (io->io_hdr.io_type) { 12748 case CTL_IO_SCSI: 12749 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 12750 sbuf_printf(&sb, "\n"); 12751 sbuf_cat(&sb, path_str); 12752 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12753 io->scsiio.tag_num, io->scsiio.tag_type); 12754 break; 12755 case CTL_IO_TASK: 12756 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 12757 "Tag Type: %d\n", io->taskio.task_action, 12758 io->taskio.tag_num, io->taskio.tag_type); 12759 break; 12760 default: 12761 printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 12762 panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 12763 break; 12764 } 12765 sbuf_cat(&sb, path_str); 12766 sbuf_printf(&sb, "ctl_process_done: %jd seconds\n", 12767 (intmax_t)time_uptime - io->io_hdr.start_time); 12768 sbuf_finish(&sb); 12769 printf("%s", sbuf_data(&sb)); 12770 } 12771 #endif /* CTL_TIME_IO */ 12772 12773 switch (io->io_hdr.io_type) { 12774 case CTL_IO_SCSI: 12775 break; 12776 case CTL_IO_TASK: 12777 if (ctl_debug & CTL_DEBUG_INFO) 12778 ctl_io_error_print(io, NULL); 12779 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) 12780 ctl_free_io(io); 12781 else 12782 fe_done(io); 12783 return (CTL_RETVAL_COMPLETE); 12784 default: 12785 panic("ctl_process_done: invalid io type %d\n", 12786 io->io_hdr.io_type); 12787 break; /* NOTREACHED */ 12788 } 12789 12790 lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 12791 if (lun == NULL) { 12792 CTL_DEBUG_PRINT(("NULL LUN for lun %d\n", 12793 io->io_hdr.nexus.targ_mapped_lun)); 12794 goto bailout; 12795 } 12796 12797 mtx_lock(&lun->lun_lock); 12798 12799 /* 12800 * Check to see if we have any errors to inject here. We only 12801 * inject errors for commands that don't already have errors set. 12802 */ 12803 if ((STAILQ_FIRST(&lun->error_list) != NULL) && 12804 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) && 12805 ((io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0)) 12806 ctl_inject_error(lun, io); 12807 12808 /* 12809 * XXX KDM how do we treat commands that aren't completed 12810 * successfully? 12811 * 12812 * XXX KDM should we also track I/O latency? 12813 */ 12814 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS && 12815 io->io_hdr.io_type == CTL_IO_SCSI) { 12816 #ifdef CTL_TIME_IO 12817 struct bintime cur_bt; 12818 #endif 12819 int type; 12820 12821 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 12822 CTL_FLAG_DATA_IN) 12823 type = CTL_STATS_READ; 12824 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 12825 CTL_FLAG_DATA_OUT) 12826 type = CTL_STATS_WRITE; 12827 else 12828 type = CTL_STATS_NO_IO; 12829 12830 lun->stats.ports[targ_port].bytes[type] += 12831 io->scsiio.kern_total_len; 12832 lun->stats.ports[targ_port].operations[type]++; 12833 #ifdef CTL_TIME_IO 12834 bintime_add(&lun->stats.ports[targ_port].dma_time[type], 12835 &io->io_hdr.dma_bt); 12836 lun->stats.ports[targ_port].num_dmas[type] += 12837 io->io_hdr.num_dmas; 12838 getbintime(&cur_bt); 12839 bintime_sub(&cur_bt, &io->io_hdr.start_bt); 12840 bintime_add(&lun->stats.ports[targ_port].time[type], &cur_bt); 12841 #endif 12842 } 12843 12844 /* 12845 * Remove this from the OOA queue. 12846 */ 12847 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); 12848 #ifdef CTL_TIME_IO 12849 if (TAILQ_EMPTY(&lun->ooa_queue)) 12850 lun->last_busy = getsbinuptime(); 12851 #endif 12852 12853 /* 12854 * Run through the blocked queue on this LUN and see if anything 12855 * has become unblocked, now that this transaction is done. 12856 */ 12857 ctl_check_blocked(lun); 12858 12859 /* 12860 * If the LUN has been invalidated, free it if there is nothing 12861 * left on its OOA queue. 12862 */ 12863 if ((lun->flags & CTL_LUN_INVALID) 12864 && TAILQ_EMPTY(&lun->ooa_queue)) { 12865 mtx_unlock(&lun->lun_lock); 12866 mtx_lock(&softc->ctl_lock); 12867 ctl_free_lun(lun); 12868 mtx_unlock(&softc->ctl_lock); 12869 } else 12870 mtx_unlock(&lun->lun_lock); 12871 12872 bailout: 12873 12874 /* 12875 * If this command has been aborted, make sure we set the status 12876 * properly. The FETD is responsible for freeing the I/O and doing 12877 * whatever it needs to do to clean up its state. 12878 */ 12879 if (io->io_hdr.flags & CTL_FLAG_ABORT) 12880 ctl_set_task_aborted(&io->scsiio); 12881 12882 /* 12883 * If enabled, print command error status. 12884 */ 12885 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS && 12886 (ctl_debug & CTL_DEBUG_INFO) != 0) 12887 ctl_io_error_print(io, NULL); 12888 12889 /* 12890 * Tell the FETD or the other shelf controller we're done with this 12891 * command. Note that only SCSI commands get to this point. Task 12892 * management commands are completed above. 12893 */ 12894 if ((softc->ha_mode != CTL_HA_MODE_XFER) && 12895 (io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC)) { 12896 memset(&msg, 0, sizeof(msg)); 12897 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 12898 msg.hdr.serializing_sc = io->io_hdr.serializing_sc; 12899 msg.hdr.nexus = io->io_hdr.nexus; 12900 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 12901 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data), 12902 M_WAITOK); 12903 } 12904 if ((softc->ha_mode == CTL_HA_MODE_XFER) 12905 && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 12906 memset(&msg, 0, sizeof(msg)); 12907 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 12908 msg.hdr.original_sc = io->io_hdr.original_sc; 12909 msg.hdr.nexus = io->io_hdr.nexus; 12910 msg.hdr.status = io->io_hdr.status; 12911 msg.scsi.scsi_status = io->scsiio.scsi_status; 12912 msg.scsi.tag_num = io->scsiio.tag_num; 12913 msg.scsi.tag_type = io->scsiio.tag_type; 12914 msg.scsi.sense_len = io->scsiio.sense_len; 12915 msg.scsi.sense_residual = io->scsiio.sense_residual; 12916 msg.scsi.residual = io->scsiio.residual; 12917 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 12918 io->scsiio.sense_len); 12919 /* 12920 * We copy this whether or not this is an I/O-related 12921 * command. Otherwise, we'd have to go and check to see 12922 * whether it's a read/write command, and it really isn't 12923 * worth it. 12924 */ 12925 memcpy(&msg.scsi.lbalen, 12926 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 12927 sizeof(msg.scsi.lbalen)); 12928 12929 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 12930 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + 12931 msg.scsi.sense_len, M_WAITOK); 12932 ctl_free_io(io); 12933 } else 12934 fe_done(io); 12935 12936 return (CTL_RETVAL_COMPLETE); 12937 } 12938 12939 #ifdef CTL_WITH_CA 12940 /* 12941 * Front end should call this if it doesn't do autosense. When the request 12942 * sense comes back in from the initiator, we'll dequeue this and send it. 12943 */ 12944 int 12945 ctl_queue_sense(union ctl_io *io) 12946 { 12947 struct ctl_lun *lun; 12948 struct ctl_port *port; 12949 struct ctl_softc *softc; 12950 uint32_t initidx, targ_lun; 12951 12952 softc = control_softc; 12953 12954 CTL_DEBUG_PRINT(("ctl_queue_sense\n")); 12955 12956 /* 12957 * LUN lookup will likely move to the ctl_work_thread() once we 12958 * have our new queueing infrastructure (that doesn't put things on 12959 * a per-LUN queue initially). That is so that we can handle 12960 * things like an INQUIRY to a LUN that we don't have enabled. We 12961 * can't deal with that right now. 12962 */ 12963 mtx_lock(&softc->ctl_lock); 12964 12965 /* 12966 * If we don't have a LUN for this, just toss the sense 12967 * information. 12968 */ 12969 port = ctl_io_port(&ctsio->io_hdr); 12970 targ_lun = ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); 12971 if ((targ_lun < CTL_MAX_LUNS) 12972 && (softc->ctl_luns[targ_lun] != NULL)) 12973 lun = softc->ctl_luns[targ_lun]; 12974 else 12975 goto bailout; 12976 12977 initidx = ctl_get_initindex(&io->io_hdr.nexus); 12978 12979 mtx_lock(&lun->lun_lock); 12980 /* 12981 * Already have CA set for this LUN...toss the sense information. 12982 */ 12983 if (ctl_is_set(lun->have_ca, initidx)) { 12984 mtx_unlock(&lun->lun_lock); 12985 goto bailout; 12986 } 12987 12988 memcpy(&lun->pending_sense[initidx], &io->scsiio.sense_data, 12989 MIN(sizeof(lun->pending_sense[initidx]), 12990 sizeof(io->scsiio.sense_data))); 12991 ctl_set_mask(lun->have_ca, initidx); 12992 mtx_unlock(&lun->lun_lock); 12993 12994 bailout: 12995 mtx_unlock(&softc->ctl_lock); 12996 12997 ctl_free_io(io); 12998 12999 return (CTL_RETVAL_COMPLETE); 13000 } 13001 #endif 13002 13003 /* 13004 * Primary command inlet from frontend ports. All SCSI and task I/O 13005 * requests must go through this function. 13006 */ 13007 int 13008 ctl_queue(union ctl_io *io) 13009 { 13010 struct ctl_port *port; 13011 13012 CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0])); 13013 13014 #ifdef CTL_TIME_IO 13015 io->io_hdr.start_time = time_uptime; 13016 getbintime(&io->io_hdr.start_bt); 13017 #endif /* CTL_TIME_IO */ 13018 13019 /* Map FE-specific LUN ID into global one. */ 13020 port = ctl_io_port(&io->io_hdr); 13021 io->io_hdr.nexus.targ_mapped_lun = 13022 ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); 13023 13024 switch (io->io_hdr.io_type) { 13025 case CTL_IO_SCSI: 13026 case CTL_IO_TASK: 13027 if (ctl_debug & CTL_DEBUG_CDB) 13028 ctl_io_print(io); 13029 ctl_enqueue_incoming(io); 13030 break; 13031 default: 13032 printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type); 13033 return (EINVAL); 13034 } 13035 13036 return (CTL_RETVAL_COMPLETE); 13037 } 13038 13039 #ifdef CTL_IO_DELAY 13040 static void 13041 ctl_done_timer_wakeup(void *arg) 13042 { 13043 union ctl_io *io; 13044 13045 io = (union ctl_io *)arg; 13046 ctl_done(io); 13047 } 13048 #endif /* CTL_IO_DELAY */ 13049 13050 void 13051 ctl_done(union ctl_io *io) 13052 { 13053 13054 /* 13055 * Enable this to catch duplicate completion issues. 13056 */ 13057 #if 0 13058 if (io->io_hdr.flags & CTL_FLAG_ALREADY_DONE) { 13059 printf("%s: type %d msg %d cdb %x iptl: " 13060 "%u:%u:%u tag 0x%04x " 13061 "flag %#x status %x\n", 13062 __func__, 13063 io->io_hdr.io_type, 13064 io->io_hdr.msg_type, 13065 io->scsiio.cdb[0], 13066 io->io_hdr.nexus.initid, 13067 io->io_hdr.nexus.targ_port, 13068 io->io_hdr.nexus.targ_lun, 13069 (io->io_hdr.io_type == 13070 CTL_IO_TASK) ? 13071 io->taskio.tag_num : 13072 io->scsiio.tag_num, 13073 io->io_hdr.flags, 13074 io->io_hdr.status); 13075 } else 13076 io->io_hdr.flags |= CTL_FLAG_ALREADY_DONE; 13077 #endif 13078 13079 /* 13080 * This is an internal copy of an I/O, and should not go through 13081 * the normal done processing logic. 13082 */ 13083 if (io->io_hdr.flags & CTL_FLAG_INT_COPY) 13084 return; 13085 13086 #ifdef CTL_IO_DELAY 13087 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 13088 struct ctl_lun *lun; 13089 13090 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13091 13092 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 13093 } else { 13094 struct ctl_lun *lun; 13095 13096 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13097 13098 if ((lun != NULL) 13099 && (lun->delay_info.done_delay > 0)) { 13100 13101 callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1); 13102 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 13103 callout_reset(&io->io_hdr.delay_callout, 13104 lun->delay_info.done_delay * hz, 13105 ctl_done_timer_wakeup, io); 13106 if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT) 13107 lun->delay_info.done_delay = 0; 13108 return; 13109 } 13110 } 13111 #endif /* CTL_IO_DELAY */ 13112 13113 ctl_enqueue_done(io); 13114 } 13115 13116 static void 13117 ctl_work_thread(void *arg) 13118 { 13119 struct ctl_thread *thr = (struct ctl_thread *)arg; 13120 struct ctl_softc *softc = thr->ctl_softc; 13121 union ctl_io *io; 13122 int retval; 13123 13124 CTL_DEBUG_PRINT(("ctl_work_thread starting\n")); 13125 13126 for (;;) { 13127 retval = 0; 13128 13129 /* 13130 * We handle the queues in this order: 13131 * - ISC 13132 * - done queue (to free up resources, unblock other commands) 13133 * - RtR queue 13134 * - incoming queue 13135 * 13136 * If those queues are empty, we break out of the loop and 13137 * go to sleep. 13138 */ 13139 mtx_lock(&thr->queue_lock); 13140 io = (union ctl_io *)STAILQ_FIRST(&thr->isc_queue); 13141 if (io != NULL) { 13142 STAILQ_REMOVE_HEAD(&thr->isc_queue, links); 13143 mtx_unlock(&thr->queue_lock); 13144 ctl_handle_isc(io); 13145 continue; 13146 } 13147 io = (union ctl_io *)STAILQ_FIRST(&thr->done_queue); 13148 if (io != NULL) { 13149 STAILQ_REMOVE_HEAD(&thr->done_queue, links); 13150 /* clear any blocked commands, call fe_done */ 13151 mtx_unlock(&thr->queue_lock); 13152 retval = ctl_process_done(io); 13153 continue; 13154 } 13155 io = (union ctl_io *)STAILQ_FIRST(&thr->incoming_queue); 13156 if (io != NULL) { 13157 STAILQ_REMOVE_HEAD(&thr->incoming_queue, links); 13158 mtx_unlock(&thr->queue_lock); 13159 if (io->io_hdr.io_type == CTL_IO_TASK) 13160 ctl_run_task(io); 13161 else 13162 ctl_scsiio_precheck(softc, &io->scsiio); 13163 continue; 13164 } 13165 io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue); 13166 if (io != NULL) { 13167 STAILQ_REMOVE_HEAD(&thr->rtr_queue, links); 13168 mtx_unlock(&thr->queue_lock); 13169 retval = ctl_scsiio(&io->scsiio); 13170 if (retval != CTL_RETVAL_COMPLETE) 13171 CTL_DEBUG_PRINT(("ctl_scsiio failed\n")); 13172 continue; 13173 } 13174 13175 /* Sleep until we have something to do. */ 13176 mtx_sleep(thr, &thr->queue_lock, PDROP | PRIBIO, "-", 0); 13177 } 13178 } 13179 13180 static void 13181 ctl_lun_thread(void *arg) 13182 { 13183 struct ctl_softc *softc = (struct ctl_softc *)arg; 13184 struct ctl_be_lun *be_lun; 13185 int retval; 13186 13187 CTL_DEBUG_PRINT(("ctl_lun_thread starting\n")); 13188 13189 for (;;) { 13190 retval = 0; 13191 mtx_lock(&softc->ctl_lock); 13192 be_lun = STAILQ_FIRST(&softc->pending_lun_queue); 13193 if (be_lun != NULL) { 13194 STAILQ_REMOVE_HEAD(&softc->pending_lun_queue, links); 13195 mtx_unlock(&softc->ctl_lock); 13196 ctl_create_lun(be_lun); 13197 continue; 13198 } 13199 13200 /* Sleep until we have something to do. */ 13201 mtx_sleep(&softc->pending_lun_queue, &softc->ctl_lock, 13202 PDROP | PRIBIO, "-", 0); 13203 } 13204 } 13205 13206 static void 13207 ctl_thresh_thread(void *arg) 13208 { 13209 struct ctl_softc *softc = (struct ctl_softc *)arg; 13210 struct ctl_lun *lun; 13211 struct ctl_be_lun *be_lun; 13212 struct scsi_da_rw_recovery_page *rwpage; 13213 struct ctl_logical_block_provisioning_page *page; 13214 const char *attr; 13215 union ctl_ha_msg msg; 13216 uint64_t thres, val; 13217 int i, e, set; 13218 13219 CTL_DEBUG_PRINT(("ctl_thresh_thread starting\n")); 13220 13221 for (;;) { 13222 mtx_lock(&softc->ctl_lock); 13223 STAILQ_FOREACH(lun, &softc->lun_list, links) { 13224 be_lun = lun->be_lun; 13225 if ((lun->flags & CTL_LUN_DISABLED) || 13226 (lun->flags & CTL_LUN_OFFLINE) || 13227 lun->backend->lun_attr == NULL) 13228 continue; 13229 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 13230 softc->ha_mode == CTL_HA_MODE_XFER) 13231 continue; 13232 rwpage = &lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT]; 13233 if ((rwpage->byte8 & SMS_RWER_LBPERE) == 0) 13234 continue; 13235 e = 0; 13236 page = &lun->mode_pages.lbp_page[CTL_PAGE_CURRENT]; 13237 for (i = 0; i < CTL_NUM_LBP_THRESH; i++) { 13238 if ((page->descr[i].flags & SLBPPD_ENABLED) == 0) 13239 continue; 13240 thres = scsi_4btoul(page->descr[i].count); 13241 thres <<= CTL_LBP_EXPONENT; 13242 switch (page->descr[i].resource) { 13243 case 0x01: 13244 attr = "blocksavail"; 13245 break; 13246 case 0x02: 13247 attr = "blocksused"; 13248 break; 13249 case 0xf1: 13250 attr = "poolblocksavail"; 13251 break; 13252 case 0xf2: 13253 attr = "poolblocksused"; 13254 break; 13255 default: 13256 continue; 13257 } 13258 mtx_unlock(&softc->ctl_lock); // XXX 13259 val = lun->backend->lun_attr( 13260 lun->be_lun->be_lun, attr); 13261 mtx_lock(&softc->ctl_lock); 13262 if (val == UINT64_MAX) 13263 continue; 13264 if ((page->descr[i].flags & SLBPPD_ARMING_MASK) 13265 == SLBPPD_ARMING_INC) 13266 e |= (val >= thres); 13267 else 13268 e |= (val <= thres); 13269 } 13270 mtx_lock(&lun->lun_lock); 13271 if (e) { 13272 if (lun->lasttpt == 0 || 13273 time_uptime - lun->lasttpt >= CTL_LBP_UA_PERIOD) { 13274 lun->lasttpt = time_uptime; 13275 ctl_est_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 13276 set = 1; 13277 } else 13278 set = 0; 13279 } else { 13280 lun->lasttpt = 0; 13281 ctl_clr_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 13282 set = -1; 13283 } 13284 mtx_unlock(&lun->lun_lock); 13285 if (set != 0 && 13286 lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 13287 /* Send msg to other side. */ 13288 bzero(&msg.ua, sizeof(msg.ua)); 13289 msg.hdr.msg_type = CTL_MSG_UA; 13290 msg.hdr.nexus.initid = -1; 13291 msg.hdr.nexus.targ_port = -1; 13292 msg.hdr.nexus.targ_lun = lun->lun; 13293 msg.hdr.nexus.targ_mapped_lun = lun->lun; 13294 msg.ua.ua_all = 1; 13295 msg.ua.ua_set = (set > 0); 13296 msg.ua.ua_type = CTL_UA_THIN_PROV_THRES; 13297 mtx_unlock(&softc->ctl_lock); // XXX 13298 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13299 sizeof(msg.ua), M_WAITOK); 13300 mtx_lock(&softc->ctl_lock); 13301 } 13302 } 13303 mtx_unlock(&softc->ctl_lock); 13304 pause("-", CTL_LBP_PERIOD * hz); 13305 } 13306 } 13307 13308 static void 13309 ctl_enqueue_incoming(union ctl_io *io) 13310 { 13311 struct ctl_softc *softc = control_softc; 13312 struct ctl_thread *thr; 13313 u_int idx; 13314 13315 idx = (io->io_hdr.nexus.targ_port * 127 + 13316 io->io_hdr.nexus.initid) % worker_threads; 13317 thr = &softc->threads[idx]; 13318 mtx_lock(&thr->queue_lock); 13319 STAILQ_INSERT_TAIL(&thr->incoming_queue, &io->io_hdr, links); 13320 mtx_unlock(&thr->queue_lock); 13321 wakeup(thr); 13322 } 13323 13324 static void 13325 ctl_enqueue_rtr(union ctl_io *io) 13326 { 13327 struct ctl_softc *softc = control_softc; 13328 struct ctl_thread *thr; 13329 13330 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13331 mtx_lock(&thr->queue_lock); 13332 STAILQ_INSERT_TAIL(&thr->rtr_queue, &io->io_hdr, links); 13333 mtx_unlock(&thr->queue_lock); 13334 wakeup(thr); 13335 } 13336 13337 static void 13338 ctl_enqueue_done(union ctl_io *io) 13339 { 13340 struct ctl_softc *softc = control_softc; 13341 struct ctl_thread *thr; 13342 13343 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13344 mtx_lock(&thr->queue_lock); 13345 STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links); 13346 mtx_unlock(&thr->queue_lock); 13347 wakeup(thr); 13348 } 13349 13350 static void 13351 ctl_enqueue_isc(union ctl_io *io) 13352 { 13353 struct ctl_softc *softc = control_softc; 13354 struct ctl_thread *thr; 13355 13356 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13357 mtx_lock(&thr->queue_lock); 13358 STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links); 13359 mtx_unlock(&thr->queue_lock); 13360 wakeup(thr); 13361 } 13362 13363 /* 13364 * vim: ts=8 13365 */ 13366