1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/conf.h> 27 #include <sys/file.h> 28 #include <sys/ddi.h> 29 #include <sys/sunddi.h> 30 #include <sys/modctl.h> 31 #include <sys/scsi/scsi.h> 32 #include <sys/scsi/impl/scsi_reset_notify.h> 33 #include <sys/disp.h> 34 #include <sys/byteorder.h> 35 #include <sys/atomic.h> 36 #include <sys/ethernet.h> 37 #include <sys/sdt.h> 38 #include <sys/nvpair.h> 39 #include <sys/zone.h> 40 41 #include <stmf.h> 42 #include <lpif.h> 43 #include <portif.h> 44 #include <stmf_ioctl.h> 45 #include <stmf_impl.h> 46 #include <lun_map.h> 47 #include <stmf_state.h> 48 #include <pppt_ic_if.h> 49 50 static uint64_t stmf_session_counter = 0; 51 static uint16_t stmf_rtpid_counter = 0; 52 /* start messages at 1 */ 53 static uint64_t stmf_proxy_msg_id = 1; 54 #define MSG_ID_TM_BIT 0x8000000000000000 55 56 static int stmf_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 57 static int stmf_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 58 static int stmf_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, 59 void **result); 60 static int stmf_open(dev_t *devp, int flag, int otype, cred_t *credp); 61 static int stmf_close(dev_t dev, int flag, int otype, cred_t *credp); 62 static int stmf_ioctl(dev_t dev, int cmd, intptr_t data, int mode, 63 cred_t *credp, int *rval); 64 static int stmf_get_stmf_state(stmf_state_desc_t *std); 65 static int stmf_set_stmf_state(stmf_state_desc_t *std); 66 static void stmf_abort_task_offline(scsi_task_t *task, int offline_lu, 67 char *info); 68 static int stmf_set_alua_state(stmf_alua_state_desc_t *alua_state); 69 static void stmf_get_alua_state(stmf_alua_state_desc_t *alua_state); 70 stmf_xfer_data_t *stmf_prepare_tpgs_data(uint8_t ilu_alua); 71 void stmf_svc_init(); 72 stmf_status_t stmf_svc_fini(); 73 void stmf_svc(void *arg); 74 void stmf_svc_queue(int cmd, void *obj, stmf_state_change_info_t *info); 75 void stmf_check_freetask(); 76 void stmf_abort_target_reset(scsi_task_t *task); 77 stmf_status_t stmf_lun_reset_poll(stmf_lu_t *lu, struct scsi_task *task, 78 int target_reset); 79 void stmf_target_reset_poll(struct scsi_task *task); 80 void stmf_handle_lun_reset(scsi_task_t *task); 81 void stmf_handle_target_reset(scsi_task_t *task); 82 void stmf_xd_to_dbuf(stmf_data_buf_t *dbuf); 83 int stmf_load_ppd_ioctl(stmf_ppioctl_data_t *ppi, uint64_t *ppi_token, 84 uint32_t *err_ret); 85 int stmf_delete_ppd_ioctl(stmf_ppioctl_data_t *ppi); 86 int stmf_get_ppd_ioctl(stmf_ppioctl_data_t *ppi, stmf_ppioctl_data_t *ppi_out, 87 uint32_t *err_ret); 88 void stmf_delete_ppd(stmf_pp_data_t *ppd); 89 void stmf_delete_all_ppds(); 90 void stmf_trace_clear(); 91 void stmf_worker_init(); 92 stmf_status_t stmf_worker_fini(); 93 void stmf_worker_mgmt(); 94 void stmf_worker_task(void *arg); 95 static void stmf_task_lu_free(scsi_task_t *task, stmf_i_scsi_session_t *iss); 96 static stmf_status_t stmf_ic_lu_reg(stmf_ic_reg_dereg_lun_msg_t *msg, 97 uint32_t type); 98 static stmf_status_t stmf_ic_lu_dereg(stmf_ic_reg_dereg_lun_msg_t *msg); 99 static stmf_status_t stmf_ic_rx_scsi_status(stmf_ic_scsi_status_msg_t *msg); 100 static stmf_status_t stmf_ic_rx_status(stmf_ic_status_msg_t *msg); 101 static stmf_status_t stmf_ic_rx_scsi_data(stmf_ic_scsi_data_msg_t *msg); 102 void stmf_task_lu_killall(stmf_lu_t *lu, scsi_task_t *tm_task, stmf_status_t s); 103 104 /* pppt modhandle */ 105 ddi_modhandle_t pppt_mod; 106 107 /* pppt modload imported functions */ 108 stmf_ic_reg_port_msg_alloc_func_t ic_reg_port_msg_alloc; 109 stmf_ic_dereg_port_msg_alloc_func_t ic_dereg_port_msg_alloc; 110 stmf_ic_reg_lun_msg_alloc_func_t ic_reg_lun_msg_alloc; 111 stmf_ic_dereg_lun_msg_alloc_func_t ic_dereg_lun_msg_alloc; 112 stmf_ic_lun_active_msg_alloc_func_t ic_lun_active_msg_alloc; 113 stmf_ic_scsi_cmd_msg_alloc_func_t ic_scsi_cmd_msg_alloc; 114 stmf_ic_scsi_data_xfer_done_msg_alloc_func_t ic_scsi_data_xfer_done_msg_alloc; 115 stmf_ic_session_create_msg_alloc_func_t ic_session_reg_msg_alloc; 116 stmf_ic_session_destroy_msg_alloc_func_t ic_session_dereg_msg_alloc; 117 stmf_ic_tx_msg_func_t ic_tx_msg; 118 stmf_ic_msg_free_func_t ic_msg_free; 119 120 static void stmf_update_kstat_lu_q(scsi_task_t *, void()); 121 static void stmf_update_kstat_lport_q(scsi_task_t *, void()); 122 static void stmf_update_kstat_lu_io(scsi_task_t *, stmf_data_buf_t *); 123 static void stmf_update_kstat_lport_io(scsi_task_t *, stmf_data_buf_t *); 124 125 extern struct mod_ops mod_driverops; 126 127 /* =====[ Tunables ]===== */ 128 /* Internal tracing */ 129 volatile int stmf_trace_on = 1; 130 volatile int stmf_trace_buf_size = (1 * 1024 * 1024); 131 /* 132 * The reason default task timeout is 75 is because we want the 133 * host to timeout 1st and mostly host timeout is 60 seconds. 134 */ 135 volatile int stmf_default_task_timeout = 75; 136 /* 137 * Setting this to one means, you are responsible for config load and keeping 138 * things in sync with persistent database. 139 */ 140 volatile int stmf_allow_modunload = 0; 141 142 volatile int stmf_max_nworkers = 256; 143 volatile int stmf_min_nworkers = 4; 144 volatile int stmf_worker_scale_down_delay = 20; 145 146 /* === [ Debugging and fault injection ] === */ 147 #ifdef DEBUG 148 volatile int stmf_drop_task_counter = 0; 149 volatile int stmf_drop_buf_counter = 0; 150 151 #endif 152 153 stmf_state_t stmf_state; 154 static stmf_lu_t *dlun0; 155 156 static uint8_t stmf_first_zero[] = 157 { 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 0xff }; 158 static uint8_t stmf_first_one[] = 159 { 0xff, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0 }; 160 161 static kmutex_t trace_buf_lock; 162 static int trace_buf_size; 163 static int trace_buf_curndx; 164 caddr_t stmf_trace_buf; 165 166 static enum { 167 STMF_WORKERS_DISABLED = 0, 168 STMF_WORKERS_ENABLING, 169 STMF_WORKERS_ENABLED 170 } stmf_workers_state = STMF_WORKERS_DISABLED; 171 static int stmf_i_max_nworkers; 172 static int stmf_i_min_nworkers; 173 static int stmf_nworkers_cur; /* # of workers currently running */ 174 static int stmf_nworkers_needed; /* # of workers need to be running */ 175 static int stmf_worker_sel_counter = 0; 176 static uint32_t stmf_cur_ntasks = 0; 177 static clock_t stmf_wm_last = 0; 178 /* 179 * This is equal to stmf_nworkers_cur while we are increasing # workers and 180 * stmf_nworkers_needed while we are decreasing the worker count. 181 */ 182 static int stmf_nworkers_accepting_cmds; 183 static stmf_worker_t *stmf_workers = NULL; 184 static clock_t stmf_worker_mgmt_delay = 2; 185 static clock_t stmf_worker_scale_down_timer = 0; 186 static int stmf_worker_scale_down_qd = 0; 187 188 static struct cb_ops stmf_cb_ops = { 189 stmf_open, /* open */ 190 stmf_close, /* close */ 191 nodev, /* strategy */ 192 nodev, /* print */ 193 nodev, /* dump */ 194 nodev, /* read */ 195 nodev, /* write */ 196 stmf_ioctl, /* ioctl */ 197 nodev, /* devmap */ 198 nodev, /* mmap */ 199 nodev, /* segmap */ 200 nochpoll, /* chpoll */ 201 ddi_prop_op, /* cb_prop_op */ 202 0, /* streamtab */ 203 D_NEW | D_MP, /* cb_flag */ 204 CB_REV, /* rev */ 205 nodev, /* aread */ 206 nodev /* awrite */ 207 }; 208 209 static struct dev_ops stmf_ops = { 210 DEVO_REV, 211 0, 212 stmf_getinfo, 213 nulldev, /* identify */ 214 nulldev, /* probe */ 215 stmf_attach, 216 stmf_detach, 217 nodev, /* reset */ 218 &stmf_cb_ops, 219 NULL, /* bus_ops */ 220 NULL /* power */ 221 }; 222 223 #define STMF_NAME "COMSTAR STMF" 224 #define STMF_MODULE_NAME "stmf" 225 226 static struct modldrv modldrv = { 227 &mod_driverops, 228 STMF_NAME, 229 &stmf_ops 230 }; 231 232 static struct modlinkage modlinkage = { 233 MODREV_1, 234 &modldrv, 235 NULL 236 }; 237 238 int 239 _init(void) 240 { 241 int ret; 242 243 ret = mod_install(&modlinkage); 244 if (ret) 245 return (ret); 246 stmf_trace_buf = kmem_zalloc(stmf_trace_buf_size, KM_SLEEP); 247 trace_buf_size = stmf_trace_buf_size; 248 trace_buf_curndx = 0; 249 mutex_init(&trace_buf_lock, NULL, MUTEX_DRIVER, 0); 250 bzero(&stmf_state, sizeof (stmf_state_t)); 251 /* STMF service is off by default */ 252 stmf_state.stmf_service_running = 0; 253 mutex_init(&stmf_state.stmf_lock, NULL, MUTEX_DRIVER, NULL); 254 cv_init(&stmf_state.stmf_cv, NULL, CV_DRIVER, NULL); 255 stmf_session_counter = (uint64_t)ddi_get_lbolt(); 256 stmf_view_init(); 257 stmf_svc_init(); 258 stmf_dlun_init(); 259 return (ret); 260 } 261 262 int 263 _fini(void) 264 { 265 int ret; 266 267 if (stmf_state.stmf_service_running) 268 return (EBUSY); 269 if ((!stmf_allow_modunload) && 270 (stmf_state.stmf_config_state != STMF_CONFIG_NONE)) { 271 return (EBUSY); 272 } 273 if (stmf_state.stmf_nlps || stmf_state.stmf_npps) { 274 return (EBUSY); 275 } 276 if (stmf_dlun_fini() != STMF_SUCCESS) 277 return (EBUSY); 278 if (stmf_worker_fini() != STMF_SUCCESS) { 279 stmf_dlun_init(); 280 return (EBUSY); 281 } 282 if (stmf_svc_fini() != STMF_SUCCESS) { 283 stmf_dlun_init(); 284 stmf_worker_init(); 285 return (EBUSY); 286 } 287 288 ret = mod_remove(&modlinkage); 289 if (ret) { 290 stmf_svc_init(); 291 stmf_dlun_init(); 292 stmf_worker_init(); 293 return (ret); 294 } 295 296 stmf_view_clear_config(); 297 kmem_free(stmf_trace_buf, stmf_trace_buf_size); 298 mutex_destroy(&trace_buf_lock); 299 mutex_destroy(&stmf_state.stmf_lock); 300 cv_destroy(&stmf_state.stmf_cv); 301 return (ret); 302 } 303 304 int 305 _info(struct modinfo *modinfop) 306 { 307 return (mod_info(&modlinkage, modinfop)); 308 } 309 310 /* ARGSUSED */ 311 static int 312 stmf_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result) 313 { 314 switch (cmd) { 315 case DDI_INFO_DEVT2DEVINFO: 316 *result = stmf_state.stmf_dip; 317 break; 318 case DDI_INFO_DEVT2INSTANCE: 319 *result = 320 (void *)(uintptr_t)ddi_get_instance(stmf_state.stmf_dip); 321 break; 322 default: 323 return (DDI_FAILURE); 324 } 325 326 return (DDI_SUCCESS); 327 } 328 329 static int 330 stmf_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 331 { 332 switch (cmd) { 333 case DDI_ATTACH: 334 stmf_state.stmf_dip = dip; 335 336 if (ddi_create_minor_node(dip, "admin", S_IFCHR, 0, 337 DDI_NT_STMF, 0) != DDI_SUCCESS) { 338 break; 339 } 340 ddi_report_dev(dip); 341 return (DDI_SUCCESS); 342 } 343 344 return (DDI_FAILURE); 345 } 346 347 static int 348 stmf_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 349 { 350 switch (cmd) { 351 case DDI_DETACH: 352 ddi_remove_minor_node(dip, 0); 353 return (DDI_SUCCESS); 354 } 355 356 return (DDI_FAILURE); 357 } 358 359 /* ARGSUSED */ 360 static int 361 stmf_open(dev_t *devp, int flag, int otype, cred_t *credp) 362 { 363 mutex_enter(&stmf_state.stmf_lock); 364 if (stmf_state.stmf_exclusive_open) { 365 mutex_exit(&stmf_state.stmf_lock); 366 return (EBUSY); 367 } 368 if (flag & FEXCL) { 369 if (stmf_state.stmf_opened) { 370 mutex_exit(&stmf_state.stmf_lock); 371 return (EBUSY); 372 } 373 stmf_state.stmf_exclusive_open = 1; 374 } 375 stmf_state.stmf_opened = 1; 376 mutex_exit(&stmf_state.stmf_lock); 377 return (0); 378 } 379 380 /* ARGSUSED */ 381 static int 382 stmf_close(dev_t dev, int flag, int otype, cred_t *credp) 383 { 384 mutex_enter(&stmf_state.stmf_lock); 385 stmf_state.stmf_opened = 0; 386 if (stmf_state.stmf_exclusive_open && 387 (stmf_state.stmf_config_state != STMF_CONFIG_INIT_DONE)) { 388 stmf_state.stmf_config_state = STMF_CONFIG_NONE; 389 stmf_delete_all_ppds(); 390 stmf_view_clear_config(); 391 stmf_view_init(); 392 } 393 stmf_state.stmf_exclusive_open = 0; 394 mutex_exit(&stmf_state.stmf_lock); 395 return (0); 396 } 397 398 int 399 stmf_copyin_iocdata(intptr_t data, int mode, stmf_iocdata_t **iocd, 400 void **ibuf, void **obuf) 401 { 402 int ret; 403 404 *ibuf = NULL; 405 *obuf = NULL; 406 *iocd = kmem_zalloc(sizeof (stmf_iocdata_t), KM_SLEEP); 407 408 ret = ddi_copyin((void *)data, *iocd, sizeof (stmf_iocdata_t), mode); 409 if (ret) 410 return (EFAULT); 411 if ((*iocd)->stmf_version != STMF_VERSION_1) { 412 ret = EINVAL; 413 goto copyin_iocdata_done; 414 } 415 if ((*iocd)->stmf_ibuf_size) { 416 *ibuf = kmem_zalloc((*iocd)->stmf_ibuf_size, KM_SLEEP); 417 ret = ddi_copyin((void *)((unsigned long)(*iocd)->stmf_ibuf), 418 *ibuf, (*iocd)->stmf_ibuf_size, mode); 419 } 420 if ((*iocd)->stmf_obuf_size) 421 *obuf = kmem_zalloc((*iocd)->stmf_obuf_size, KM_SLEEP); 422 423 if (ret == 0) 424 return (0); 425 ret = EFAULT; 426 copyin_iocdata_done:; 427 if (*obuf) { 428 kmem_free(*obuf, (*iocd)->stmf_obuf_size); 429 *obuf = NULL; 430 } 431 if (*ibuf) { 432 kmem_free(*ibuf, (*iocd)->stmf_ibuf_size); 433 *ibuf = NULL; 434 } 435 kmem_free(*iocd, sizeof (stmf_iocdata_t)); 436 return (ret); 437 } 438 439 int 440 stmf_copyout_iocdata(intptr_t data, int mode, stmf_iocdata_t *iocd, void *obuf) 441 { 442 int ret; 443 444 if (iocd->stmf_obuf_size) { 445 ret = ddi_copyout(obuf, (void *)(unsigned long)iocd->stmf_obuf, 446 iocd->stmf_obuf_size, mode); 447 if (ret) 448 return (EFAULT); 449 } 450 ret = ddi_copyout(iocd, (void *)data, sizeof (stmf_iocdata_t), mode); 451 if (ret) 452 return (EFAULT); 453 return (0); 454 } 455 456 /* ARGSUSED */ 457 static int 458 stmf_ioctl(dev_t dev, int cmd, intptr_t data, int mode, 459 cred_t *credp, int *rval) 460 { 461 stmf_iocdata_t *iocd; 462 void *ibuf = NULL, *obuf = NULL; 463 slist_lu_t *luid_list; 464 slist_target_port_t *lportid_list; 465 stmf_i_lu_t *ilu; 466 stmf_i_local_port_t *ilport; 467 stmf_i_scsi_session_t *iss; 468 slist_scsi_session_t *iss_list; 469 sioc_lu_props_t *lup; 470 sioc_target_port_props_t *lportp; 471 stmf_ppioctl_data_t *ppi, *ppi_out = NULL; 472 uint64_t *ppi_token = NULL; 473 uint8_t *p_id, *id; 474 stmf_state_desc_t *std; 475 stmf_status_t ctl_ret; 476 stmf_state_change_info_t ssi; 477 int ret = 0; 478 uint32_t n; 479 int i; 480 stmf_group_op_data_t *grp_entry; 481 stmf_group_name_t *grpname; 482 stmf_view_op_entry_t *ve; 483 stmf_id_type_t idtype; 484 stmf_id_data_t *id_entry; 485 stmf_id_list_t *id_list; 486 stmf_view_entry_t *view_entry; 487 uint32_t veid; 488 489 if ((cmd & 0xff000000) != STMF_IOCTL) { 490 return (ENOTTY); 491 } 492 493 if (drv_priv(credp) != 0) { 494 return (EPERM); 495 } 496 497 ret = stmf_copyin_iocdata(data, mode, &iocd, &ibuf, &obuf); 498 if (ret) 499 return (ret); 500 iocd->stmf_error = 0; 501 502 switch (cmd) { 503 case STMF_IOCTL_LU_LIST: 504 /* retrieves both registered/unregistered */ 505 mutex_enter(&stmf_state.stmf_lock); 506 id_list = &stmf_state.stmf_luid_list; 507 n = min(id_list->id_count, 508 (iocd->stmf_obuf_size)/sizeof (slist_lu_t)); 509 iocd->stmf_obuf_max_nentries = id_list->id_count; 510 luid_list = (slist_lu_t *)obuf; 511 id_entry = id_list->idl_head; 512 for (i = 0; i < n; i++) { 513 bcopy(id_entry->id_data, luid_list[i].lu_guid, 16); 514 id_entry = id_entry->id_next; 515 } 516 517 n = iocd->stmf_obuf_size/sizeof (slist_lu_t); 518 for (ilu = stmf_state.stmf_ilulist; ilu; ilu = ilu->ilu_next) { 519 id = (uint8_t *)ilu->ilu_lu->lu_id; 520 if (stmf_lookup_id(id_list, 16, id + 4) == NULL) { 521 iocd->stmf_obuf_max_nentries++; 522 if (i < n) { 523 bcopy(id + 4, luid_list[i].lu_guid, 524 sizeof (slist_lu_t)); 525 i++; 526 } 527 } 528 } 529 iocd->stmf_obuf_nentries = i; 530 mutex_exit(&stmf_state.stmf_lock); 531 break; 532 533 case STMF_IOCTL_REG_LU_LIST: 534 mutex_enter(&stmf_state.stmf_lock); 535 iocd->stmf_obuf_max_nentries = stmf_state.stmf_nlus; 536 n = min(stmf_state.stmf_nlus, 537 (iocd->stmf_obuf_size)/sizeof (slist_lu_t)); 538 iocd->stmf_obuf_nentries = n; 539 ilu = stmf_state.stmf_ilulist; 540 luid_list = (slist_lu_t *)obuf; 541 for (i = 0; i < n; i++) { 542 uint8_t *id; 543 id = (uint8_t *)ilu->ilu_lu->lu_id; 544 bcopy(id + 4, luid_list[i].lu_guid, 16); 545 ilu = ilu->ilu_next; 546 } 547 mutex_exit(&stmf_state.stmf_lock); 548 break; 549 550 case STMF_IOCTL_VE_LU_LIST: 551 mutex_enter(&stmf_state.stmf_lock); 552 id_list = &stmf_state.stmf_luid_list; 553 n = min(id_list->id_count, 554 (iocd->stmf_obuf_size)/sizeof (slist_lu_t)); 555 iocd->stmf_obuf_max_nentries = id_list->id_count; 556 iocd->stmf_obuf_nentries = n; 557 luid_list = (slist_lu_t *)obuf; 558 id_entry = id_list->idl_head; 559 for (i = 0; i < n; i++) { 560 bcopy(id_entry->id_data, luid_list[i].lu_guid, 16); 561 id_entry = id_entry->id_next; 562 } 563 mutex_exit(&stmf_state.stmf_lock); 564 break; 565 566 case STMF_IOCTL_TARGET_PORT_LIST: 567 mutex_enter(&stmf_state.stmf_lock); 568 iocd->stmf_obuf_max_nentries = stmf_state.stmf_nlports; 569 n = min(stmf_state.stmf_nlports, 570 (iocd->stmf_obuf_size)/sizeof (slist_target_port_t)); 571 iocd->stmf_obuf_nentries = n; 572 ilport = stmf_state.stmf_ilportlist; 573 lportid_list = (slist_target_port_t *)obuf; 574 for (i = 0; i < n; i++) { 575 uint8_t *id; 576 id = (uint8_t *)ilport->ilport_lport->lport_id; 577 bcopy(id, lportid_list[i].target, id[3] + 4); 578 ilport = ilport->ilport_next; 579 } 580 mutex_exit(&stmf_state.stmf_lock); 581 break; 582 583 case STMF_IOCTL_SESSION_LIST: 584 p_id = (uint8_t *)ibuf; 585 if ((p_id == NULL) || (iocd->stmf_ibuf_size < 4) || 586 (iocd->stmf_ibuf_size < (p_id[3] + 4))) { 587 ret = EINVAL; 588 break; 589 } 590 mutex_enter(&stmf_state.stmf_lock); 591 for (ilport = stmf_state.stmf_ilportlist; ilport; ilport = 592 ilport->ilport_next) { 593 uint8_t *id; 594 id = (uint8_t *)ilport->ilport_lport->lport_id; 595 if ((p_id[3] == id[3]) && 596 (bcmp(p_id + 4, id + 4, id[3]) == 0)) { 597 break; 598 } 599 } 600 if (ilport == NULL) { 601 mutex_exit(&stmf_state.stmf_lock); 602 ret = ENOENT; 603 break; 604 } 605 iocd->stmf_obuf_max_nentries = ilport->ilport_nsessions; 606 n = min(ilport->ilport_nsessions, 607 (iocd->stmf_obuf_size)/sizeof (slist_scsi_session_t)); 608 iocd->stmf_obuf_nentries = n; 609 iss = ilport->ilport_ss_list; 610 iss_list = (slist_scsi_session_t *)obuf; 611 for (i = 0; i < n; i++) { 612 uint8_t *id; 613 id = (uint8_t *)iss->iss_ss->ss_rport_id; 614 bcopy(id, iss_list[i].initiator, id[3] + 4); 615 iss_list[i].creation_time = (uint32_t) 616 iss->iss_creation_time; 617 if (iss->iss_ss->ss_rport_alias) { 618 (void) strncpy(iss_list[i].alias, 619 iss->iss_ss->ss_rport_alias, 255); 620 iss_list[i].alias[255] = 0; 621 } else { 622 iss_list[i].alias[0] = 0; 623 } 624 iss = iss->iss_next; 625 } 626 mutex_exit(&stmf_state.stmf_lock); 627 break; 628 629 case STMF_IOCTL_GET_LU_PROPERTIES: 630 p_id = (uint8_t *)ibuf; 631 if ((iocd->stmf_ibuf_size < 16) || 632 (iocd->stmf_obuf_size < sizeof (sioc_lu_props_t)) || 633 (p_id[0] == 0)) { 634 ret = EINVAL; 635 break; 636 } 637 mutex_enter(&stmf_state.stmf_lock); 638 for (ilu = stmf_state.stmf_ilulist; ilu; ilu = ilu->ilu_next) { 639 if (bcmp(p_id, ilu->ilu_lu->lu_id->ident, 16) == 0) 640 break; 641 } 642 if (ilu == NULL) { 643 mutex_exit(&stmf_state.stmf_lock); 644 ret = ENOENT; 645 break; 646 } 647 lup = (sioc_lu_props_t *)obuf; 648 bcopy(ilu->ilu_lu->lu_id->ident, lup->lu_guid, 16); 649 lup->lu_state = ilu->ilu_state & 0x0f; 650 lup->lu_present = 1; /* XXX */ 651 (void) strncpy(lup->lu_provider_name, 652 ilu->ilu_lu->lu_lp->lp_name, 255); 653 lup->lu_provider_name[254] = 0; 654 if (ilu->ilu_lu->lu_alias) { 655 (void) strncpy(lup->lu_alias, 656 ilu->ilu_lu->lu_alias, 255); 657 lup->lu_alias[255] = 0; 658 } else { 659 lup->lu_alias[0] = 0; 660 } 661 mutex_exit(&stmf_state.stmf_lock); 662 break; 663 664 case STMF_IOCTL_GET_TARGET_PORT_PROPERTIES: 665 p_id = (uint8_t *)ibuf; 666 if ((p_id == NULL) || 667 (iocd->stmf_ibuf_size < (p_id[3] + 4)) || 668 (iocd->stmf_obuf_size < 669 sizeof (sioc_target_port_props_t))) { 670 ret = EINVAL; 671 break; 672 } 673 mutex_enter(&stmf_state.stmf_lock); 674 for (ilport = stmf_state.stmf_ilportlist; ilport; 675 ilport = ilport->ilport_next) { 676 uint8_t *id; 677 id = (uint8_t *)ilport->ilport_lport->lport_id; 678 if ((p_id[3] == id[3]) && 679 (bcmp(p_id+4, id+4, id[3]) == 0)) 680 break; 681 } 682 if (ilport == NULL) { 683 mutex_exit(&stmf_state.stmf_lock); 684 ret = ENOENT; 685 break; 686 } 687 lportp = (sioc_target_port_props_t *)obuf; 688 bcopy(ilport->ilport_lport->lport_id, lportp->tgt_id, 689 ilport->ilport_lport->lport_id->ident_length + 4); 690 lportp->tgt_state = ilport->ilport_state & 0x0f; 691 lportp->tgt_present = 1; /* XXX */ 692 (void) strncpy(lportp->tgt_provider_name, 693 ilport->ilport_lport->lport_pp->pp_name, 255); 694 lportp->tgt_provider_name[254] = 0; 695 if (ilport->ilport_lport->lport_alias) { 696 (void) strncpy(lportp->tgt_alias, 697 ilport->ilport_lport->lport_alias, 255); 698 lportp->tgt_alias[255] = 0; 699 } else { 700 lportp->tgt_alias[0] = 0; 701 } 702 mutex_exit(&stmf_state.stmf_lock); 703 break; 704 705 case STMF_IOCTL_SET_STMF_STATE: 706 if ((ibuf == NULL) || 707 (iocd->stmf_ibuf_size < sizeof (stmf_state_desc_t))) { 708 ret = EINVAL; 709 break; 710 } 711 ret = stmf_set_stmf_state((stmf_state_desc_t *)ibuf); 712 break; 713 714 case STMF_IOCTL_GET_STMF_STATE: 715 if ((obuf == NULL) || 716 (iocd->stmf_obuf_size < sizeof (stmf_state_desc_t))) { 717 ret = EINVAL; 718 break; 719 } 720 ret = stmf_get_stmf_state((stmf_state_desc_t *)obuf); 721 break; 722 723 case STMF_IOCTL_SET_ALUA_STATE: 724 if ((ibuf == NULL) || 725 (iocd->stmf_ibuf_size < sizeof (stmf_alua_state_desc_t))) { 726 ret = EINVAL; 727 break; 728 } 729 ret = stmf_set_alua_state((stmf_alua_state_desc_t *)ibuf); 730 break; 731 732 case STMF_IOCTL_GET_ALUA_STATE: 733 if ((obuf == NULL) || 734 (iocd->stmf_obuf_size < sizeof (stmf_alua_state_desc_t))) { 735 ret = EINVAL; 736 break; 737 } 738 stmf_get_alua_state((stmf_alua_state_desc_t *)obuf); 739 break; 740 741 case STMF_IOCTL_SET_LU_STATE: 742 ssi.st_rflags = STMF_RFLAG_USER_REQUEST; 743 ssi.st_additional_info = NULL; 744 std = (stmf_state_desc_t *)ibuf; 745 if ((ibuf == NULL) || 746 (iocd->stmf_ibuf_size < sizeof (stmf_state_desc_t))) { 747 ret = EINVAL; 748 break; 749 } 750 p_id = std->ident; 751 mutex_enter(&stmf_state.stmf_lock); 752 if (stmf_state.stmf_inventory_locked) { 753 mutex_exit(&stmf_state.stmf_lock); 754 ret = EBUSY; 755 break; 756 } 757 for (ilu = stmf_state.stmf_ilulist; ilu; ilu = ilu->ilu_next) { 758 if (bcmp(p_id, ilu->ilu_lu->lu_id->ident, 16) == 0) 759 break; 760 } 761 if (ilu == NULL) { 762 mutex_exit(&stmf_state.stmf_lock); 763 ret = ENOENT; 764 break; 765 } 766 stmf_state.stmf_inventory_locked = 1; 767 mutex_exit(&stmf_state.stmf_lock); 768 cmd = (std->state == STMF_STATE_ONLINE) ? STMF_CMD_LU_ONLINE : 769 STMF_CMD_LU_OFFLINE; 770 ctl_ret = stmf_ctl(cmd, (void *)ilu->ilu_lu, &ssi); 771 if (ctl_ret == STMF_ALREADY) 772 ret = 0; 773 else if (ctl_ret == STMF_BUSY) 774 ret = EBUSY; 775 else if (ctl_ret != STMF_SUCCESS) 776 ret = EIO; 777 mutex_enter(&stmf_state.stmf_lock); 778 stmf_state.stmf_inventory_locked = 0; 779 mutex_exit(&stmf_state.stmf_lock); 780 break; 781 782 case STMF_IOCTL_SET_TARGET_PORT_STATE: 783 ssi.st_rflags = STMF_RFLAG_USER_REQUEST; 784 ssi.st_additional_info = NULL; 785 std = (stmf_state_desc_t *)ibuf; 786 if ((ibuf == NULL) || 787 (iocd->stmf_ibuf_size < sizeof (stmf_state_desc_t))) { 788 ret = EINVAL; 789 break; 790 } 791 p_id = std->ident; 792 mutex_enter(&stmf_state.stmf_lock); 793 if (stmf_state.stmf_inventory_locked) { 794 mutex_exit(&stmf_state.stmf_lock); 795 ret = EBUSY; 796 break; 797 } 798 for (ilport = stmf_state.stmf_ilportlist; ilport; 799 ilport = ilport->ilport_next) { 800 uint8_t *id; 801 id = (uint8_t *)ilport->ilport_lport->lport_id; 802 if ((id[3] == p_id[3]) && 803 (bcmp(id+4, p_id+4, id[3]) == 0)) { 804 break; 805 } 806 } 807 if (ilport == NULL) { 808 mutex_exit(&stmf_state.stmf_lock); 809 ret = ENOENT; 810 break; 811 } 812 stmf_state.stmf_inventory_locked = 1; 813 mutex_exit(&stmf_state.stmf_lock); 814 cmd = (std->state == STMF_STATE_ONLINE) ? 815 STMF_CMD_LPORT_ONLINE : STMF_CMD_LPORT_OFFLINE; 816 ctl_ret = stmf_ctl(cmd, (void *)ilport->ilport_lport, &ssi); 817 if (ctl_ret == STMF_ALREADY) 818 ret = 0; 819 else if (ctl_ret == STMF_BUSY) 820 ret = EBUSY; 821 else if (ctl_ret != STMF_SUCCESS) 822 ret = EIO; 823 mutex_enter(&stmf_state.stmf_lock); 824 stmf_state.stmf_inventory_locked = 0; 825 mutex_exit(&stmf_state.stmf_lock); 826 break; 827 828 case STMF_IOCTL_ADD_HG_ENTRY: 829 idtype = STMF_ID_TYPE_HOST; 830 /* FALLTHROUGH */ 831 case STMF_IOCTL_ADD_TG_ENTRY: 832 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) { 833 ret = EACCES; 834 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT; 835 break; 836 } 837 if (cmd == STMF_IOCTL_ADD_TG_ENTRY) { 838 idtype = STMF_ID_TYPE_TARGET; 839 } 840 grp_entry = (stmf_group_op_data_t *)ibuf; 841 if ((ibuf == NULL) || 842 (iocd->stmf_ibuf_size < sizeof (stmf_group_op_data_t))) { 843 ret = EINVAL; 844 break; 845 } 846 if (grp_entry->group.name[0] == '*') { 847 ret = EINVAL; 848 break; /* not allowed */ 849 } 850 mutex_enter(&stmf_state.stmf_lock); 851 ret = stmf_add_group_member(grp_entry->group.name, 852 grp_entry->group.name_size, 853 grp_entry->ident + 4, 854 grp_entry->ident[3], 855 idtype, 856 &iocd->stmf_error); 857 mutex_exit(&stmf_state.stmf_lock); 858 break; 859 case STMF_IOCTL_REMOVE_HG_ENTRY: 860 idtype = STMF_ID_TYPE_HOST; 861 /* FALLTHROUGH */ 862 case STMF_IOCTL_REMOVE_TG_ENTRY: 863 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) { 864 ret = EACCES; 865 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT; 866 break; 867 } 868 if (cmd == STMF_IOCTL_REMOVE_TG_ENTRY) { 869 idtype = STMF_ID_TYPE_TARGET; 870 } 871 grp_entry = (stmf_group_op_data_t *)ibuf; 872 if ((ibuf == NULL) || 873 (iocd->stmf_ibuf_size < sizeof (stmf_group_op_data_t))) { 874 ret = EINVAL; 875 break; 876 } 877 if (grp_entry->group.name[0] == '*') { 878 ret = EINVAL; 879 break; /* not allowed */ 880 } 881 mutex_enter(&stmf_state.stmf_lock); 882 ret = stmf_remove_group_member(grp_entry->group.name, 883 grp_entry->group.name_size, 884 grp_entry->ident + 4, 885 grp_entry->ident[3], 886 idtype, 887 &iocd->stmf_error); 888 mutex_exit(&stmf_state.stmf_lock); 889 break; 890 case STMF_IOCTL_CREATE_HOST_GROUP: 891 idtype = STMF_ID_TYPE_HOST_GROUP; 892 /* FALLTHROUGH */ 893 case STMF_IOCTL_CREATE_TARGET_GROUP: 894 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) { 895 ret = EACCES; 896 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT; 897 break; 898 } 899 grpname = (stmf_group_name_t *)ibuf; 900 901 if (cmd == STMF_IOCTL_CREATE_TARGET_GROUP) 902 idtype = STMF_ID_TYPE_TARGET_GROUP; 903 if ((ibuf == NULL) || 904 (iocd->stmf_ibuf_size < sizeof (stmf_group_name_t))) { 905 ret = EINVAL; 906 break; 907 } 908 if (grpname->name[0] == '*') { 909 ret = EINVAL; 910 break; /* not allowed */ 911 } 912 mutex_enter(&stmf_state.stmf_lock); 913 ret = stmf_add_group(grpname->name, 914 grpname->name_size, idtype, &iocd->stmf_error); 915 mutex_exit(&stmf_state.stmf_lock); 916 break; 917 case STMF_IOCTL_REMOVE_HOST_GROUP: 918 idtype = STMF_ID_TYPE_HOST_GROUP; 919 /* FALLTHROUGH */ 920 case STMF_IOCTL_REMOVE_TARGET_GROUP: 921 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) { 922 ret = EACCES; 923 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT; 924 break; 925 } 926 grpname = (stmf_group_name_t *)ibuf; 927 if (cmd == STMF_IOCTL_REMOVE_TARGET_GROUP) 928 idtype = STMF_ID_TYPE_TARGET_GROUP; 929 if ((ibuf == NULL) || 930 (iocd->stmf_ibuf_size < sizeof (stmf_group_name_t))) { 931 ret = EINVAL; 932 break; 933 } 934 if (grpname->name[0] == '*') { 935 ret = EINVAL; 936 break; /* not allowed */ 937 } 938 mutex_enter(&stmf_state.stmf_lock); 939 ret = stmf_remove_group(grpname->name, 940 grpname->name_size, idtype, &iocd->stmf_error); 941 mutex_exit(&stmf_state.stmf_lock); 942 break; 943 case STMF_IOCTL_VALIDATE_VIEW: 944 case STMF_IOCTL_ADD_VIEW_ENTRY: 945 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) { 946 ret = EACCES; 947 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT; 948 break; 949 } 950 ve = (stmf_view_op_entry_t *)ibuf; 951 if ((ibuf == NULL) || 952 (iocd->stmf_ibuf_size < sizeof (stmf_view_op_entry_t))) { 953 ret = EINVAL; 954 break; 955 } 956 if (!ve->ve_lu_number_valid) 957 ve->ve_lu_nbr[2] = 0xFF; 958 if (ve->ve_all_hosts) { 959 ve->ve_host_group.name[0] = '*'; 960 ve->ve_host_group.name_size = 1; 961 } 962 if (ve->ve_all_targets) { 963 ve->ve_target_group.name[0] = '*'; 964 ve->ve_target_group.name_size = 1; 965 } 966 if (ve->ve_ndx_valid) 967 veid = ve->ve_ndx; 968 else 969 veid = 0xffffffff; 970 mutex_enter(&stmf_state.stmf_lock); 971 if (cmd == STMF_IOCTL_ADD_VIEW_ENTRY) { 972 ret = stmf_add_ve(ve->ve_host_group.name, 973 ve->ve_host_group.name_size, 974 ve->ve_target_group.name, 975 ve->ve_target_group.name_size, 976 ve->ve_guid, 977 &veid, 978 ve->ve_lu_nbr, 979 &iocd->stmf_error); 980 } else { /* STMF_IOCTL_VALIDATE_VIEW */ 981 ret = stmf_validate_lun_ve(ve->ve_host_group.name, 982 ve->ve_host_group.name_size, 983 ve->ve_target_group.name, 984 ve->ve_target_group.name_size, 985 ve->ve_lu_nbr, 986 &iocd->stmf_error); 987 } 988 mutex_exit(&stmf_state.stmf_lock); 989 if (ret == 0 && 990 (!ve->ve_ndx_valid || !ve->ve_lu_number_valid) && 991 iocd->stmf_obuf_size >= sizeof (stmf_view_op_entry_t)) { 992 stmf_view_op_entry_t *ve_ret = 993 (stmf_view_op_entry_t *)obuf; 994 iocd->stmf_obuf_nentries = 1; 995 iocd->stmf_obuf_max_nentries = 1; 996 if (!ve->ve_ndx_valid) { 997 ve_ret->ve_ndx = veid; 998 ve_ret->ve_ndx_valid = 1; 999 } 1000 if (!ve->ve_lu_number_valid) { 1001 ve_ret->ve_lu_number_valid = 1; 1002 bcopy(ve->ve_lu_nbr, ve_ret->ve_lu_nbr, 8); 1003 } 1004 } 1005 break; 1006 case STMF_IOCTL_REMOVE_VIEW_ENTRY: 1007 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) { 1008 ret = EACCES; 1009 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT; 1010 break; 1011 } 1012 ve = (stmf_view_op_entry_t *)ibuf; 1013 if ((ibuf == NULL) || 1014 (iocd->stmf_ibuf_size < sizeof (stmf_view_op_entry_t))) { 1015 ret = EINVAL; 1016 break; 1017 } 1018 if (!ve->ve_ndx_valid) { 1019 ret = EINVAL; 1020 break; 1021 } 1022 mutex_enter(&stmf_state.stmf_lock); 1023 ret = stmf_remove_ve_by_id(ve->ve_guid, ve->ve_ndx, 1024 &iocd->stmf_error); 1025 mutex_exit(&stmf_state.stmf_lock); 1026 break; 1027 case STMF_IOCTL_GET_HG_LIST: 1028 id_list = &stmf_state.stmf_hg_list; 1029 /* FALLTHROUGH */ 1030 case STMF_IOCTL_GET_TG_LIST: 1031 if (cmd == STMF_IOCTL_GET_TG_LIST) 1032 id_list = &stmf_state.stmf_tg_list; 1033 mutex_enter(&stmf_state.stmf_lock); 1034 iocd->stmf_obuf_max_nentries = id_list->id_count; 1035 n = min(id_list->id_count, 1036 (iocd->stmf_obuf_size)/sizeof (stmf_group_name_t)); 1037 iocd->stmf_obuf_nentries = n; 1038 id_entry = id_list->idl_head; 1039 grpname = (stmf_group_name_t *)obuf; 1040 for (i = 0; i < n; i++) { 1041 if (id_entry->id_data[0] == '*') { 1042 if (iocd->stmf_obuf_nentries > 0) { 1043 iocd->stmf_obuf_nentries--; 1044 } 1045 id_entry = id_entry->id_next; 1046 continue; 1047 } 1048 grpname->name_size = id_entry->id_data_size; 1049 bcopy(id_entry->id_data, grpname->name, 1050 id_entry->id_data_size); 1051 grpname++; 1052 id_entry = id_entry->id_next; 1053 } 1054 mutex_exit(&stmf_state.stmf_lock); 1055 break; 1056 case STMF_IOCTL_GET_HG_ENTRIES: 1057 id_list = &stmf_state.stmf_hg_list; 1058 /* FALLTHROUGH */ 1059 case STMF_IOCTL_GET_TG_ENTRIES: 1060 grpname = (stmf_group_name_t *)ibuf; 1061 if ((ibuf == NULL) || 1062 (iocd->stmf_ibuf_size < sizeof (stmf_group_name_t))) { 1063 ret = EINVAL; 1064 break; 1065 } 1066 if (cmd == STMF_IOCTL_GET_TG_ENTRIES) { 1067 id_list = &stmf_state.stmf_tg_list; 1068 } 1069 mutex_enter(&stmf_state.stmf_lock); 1070 id_entry = stmf_lookup_id(id_list, grpname->name_size, 1071 grpname->name); 1072 if (!id_entry) 1073 ret = ENODEV; 1074 else { 1075 stmf_ge_ident_t *grp_entry; 1076 id_list = (stmf_id_list_t *)id_entry->id_impl_specific; 1077 iocd->stmf_obuf_max_nentries = id_list->id_count; 1078 n = min(id_list->id_count, 1079 iocd->stmf_obuf_size/sizeof (stmf_ge_ident_t)); 1080 iocd->stmf_obuf_nentries = n; 1081 id_entry = id_list->idl_head; 1082 grp_entry = (stmf_ge_ident_t *)obuf; 1083 for (i = 0; i < n; i++) { 1084 bcopy(id_entry->id_data, grp_entry->ident, 1085 id_entry->id_data_size); 1086 grp_entry->ident_size = id_entry->id_data_size; 1087 id_entry = id_entry->id_next; 1088 grp_entry++; 1089 } 1090 } 1091 mutex_exit(&stmf_state.stmf_lock); 1092 break; 1093 1094 case STMF_IOCTL_GET_VE_LIST: 1095 n = iocd->stmf_obuf_size/sizeof (stmf_view_op_entry_t); 1096 mutex_enter(&stmf_state.stmf_lock); 1097 ve = (stmf_view_op_entry_t *)obuf; 1098 for (id_entry = stmf_state.stmf_luid_list.idl_head; 1099 id_entry; id_entry = id_entry->id_next) { 1100 for (view_entry = (stmf_view_entry_t *) 1101 id_entry->id_impl_specific; view_entry; 1102 view_entry = view_entry->ve_next) { 1103 iocd->stmf_obuf_max_nentries++; 1104 if (iocd->stmf_obuf_nentries >= n) 1105 continue; 1106 ve->ve_ndx_valid = 1; 1107 ve->ve_ndx = view_entry->ve_id; 1108 ve->ve_lu_number_valid = 1; 1109 bcopy(view_entry->ve_lun, ve->ve_lu_nbr, 8); 1110 bcopy(view_entry->ve_luid->id_data, ve->ve_guid, 1111 view_entry->ve_luid->id_data_size); 1112 if (view_entry->ve_hg->id_data[0] == '*') { 1113 ve->ve_all_hosts = 1; 1114 } else { 1115 bcopy(view_entry->ve_hg->id_data, 1116 ve->ve_host_group.name, 1117 view_entry->ve_hg->id_data_size); 1118 ve->ve_host_group.name_size = 1119 view_entry->ve_hg->id_data_size; 1120 } 1121 1122 if (view_entry->ve_tg->id_data[0] == '*') { 1123 ve->ve_all_targets = 1; 1124 } else { 1125 bcopy(view_entry->ve_tg->id_data, 1126 ve->ve_target_group.name, 1127 view_entry->ve_tg->id_data_size); 1128 ve->ve_target_group.name_size = 1129 view_entry->ve_tg->id_data_size; 1130 } 1131 ve++; 1132 iocd->stmf_obuf_nentries++; 1133 } 1134 } 1135 mutex_exit(&stmf_state.stmf_lock); 1136 break; 1137 1138 case STMF_IOCTL_LU_VE_LIST: 1139 p_id = (uint8_t *)ibuf; 1140 if ((iocd->stmf_ibuf_size != 16) || 1141 (iocd->stmf_obuf_size < sizeof (stmf_view_op_entry_t))) { 1142 ret = EINVAL; 1143 break; 1144 } 1145 1146 n = iocd->stmf_obuf_size/sizeof (stmf_view_op_entry_t); 1147 mutex_enter(&stmf_state.stmf_lock); 1148 ve = (stmf_view_op_entry_t *)obuf; 1149 for (id_entry = stmf_state.stmf_luid_list.idl_head; 1150 id_entry; id_entry = id_entry->id_next) { 1151 if (bcmp(id_entry->id_data, p_id, 16) != 0) 1152 continue; 1153 for (view_entry = (stmf_view_entry_t *) 1154 id_entry->id_impl_specific; view_entry; 1155 view_entry = view_entry->ve_next) { 1156 iocd->stmf_obuf_max_nentries++; 1157 if (iocd->stmf_obuf_nentries >= n) 1158 continue; 1159 ve->ve_ndx_valid = 1; 1160 ve->ve_ndx = view_entry->ve_id; 1161 ve->ve_lu_number_valid = 1; 1162 bcopy(view_entry->ve_lun, ve->ve_lu_nbr, 8); 1163 bcopy(view_entry->ve_luid->id_data, ve->ve_guid, 1164 view_entry->ve_luid->id_data_size); 1165 if (view_entry->ve_hg->id_data[0] == '*') { 1166 ve->ve_all_hosts = 1; 1167 } else { 1168 bcopy(view_entry->ve_hg->id_data, 1169 ve->ve_host_group.name, 1170 view_entry->ve_hg->id_data_size); 1171 ve->ve_host_group.name_size = 1172 view_entry->ve_hg->id_data_size; 1173 } 1174 1175 if (view_entry->ve_tg->id_data[0] == '*') { 1176 ve->ve_all_targets = 1; 1177 } else { 1178 bcopy(view_entry->ve_tg->id_data, 1179 ve->ve_target_group.name, 1180 view_entry->ve_tg->id_data_size); 1181 ve->ve_target_group.name_size = 1182 view_entry->ve_tg->id_data_size; 1183 } 1184 ve++; 1185 iocd->stmf_obuf_nentries++; 1186 } 1187 break; 1188 } 1189 mutex_exit(&stmf_state.stmf_lock); 1190 break; 1191 1192 case STMF_IOCTL_LOAD_PP_DATA: 1193 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) { 1194 ret = EACCES; 1195 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT; 1196 break; 1197 } 1198 ppi = (stmf_ppioctl_data_t *)ibuf; 1199 if ((ppi == NULL) || 1200 (iocd->stmf_ibuf_size < sizeof (stmf_ppioctl_data_t))) { 1201 ret = EINVAL; 1202 break; 1203 } 1204 /* returned token */ 1205 ppi_token = (uint64_t *)obuf; 1206 if ((ppi_token == NULL) || 1207 (iocd->stmf_obuf_size < sizeof (uint64_t))) { 1208 ret = EINVAL; 1209 break; 1210 } 1211 ret = stmf_load_ppd_ioctl(ppi, ppi_token, &iocd->stmf_error); 1212 break; 1213 1214 case STMF_IOCTL_GET_PP_DATA: 1215 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) { 1216 ret = EACCES; 1217 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT; 1218 break; 1219 } 1220 ppi = (stmf_ppioctl_data_t *)ibuf; 1221 if (ppi == NULL || 1222 (iocd->stmf_ibuf_size < sizeof (stmf_ppioctl_data_t))) { 1223 ret = EINVAL; 1224 break; 1225 } 1226 ppi_out = (stmf_ppioctl_data_t *)obuf; 1227 if ((ppi_out == NULL) || 1228 (iocd->stmf_obuf_size < sizeof (stmf_ppioctl_data_t))) { 1229 ret = EINVAL; 1230 break; 1231 } 1232 ret = stmf_get_ppd_ioctl(ppi, ppi_out, &iocd->stmf_error); 1233 break; 1234 1235 case STMF_IOCTL_CLEAR_PP_DATA: 1236 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) { 1237 ret = EACCES; 1238 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT; 1239 break; 1240 } 1241 ppi = (stmf_ppioctl_data_t *)ibuf; 1242 if ((ppi == NULL) || 1243 (iocd->stmf_ibuf_size < sizeof (stmf_ppioctl_data_t))) { 1244 ret = EINVAL; 1245 break; 1246 } 1247 ret = stmf_delete_ppd_ioctl(ppi); 1248 break; 1249 1250 case STMF_IOCTL_CLEAR_TRACE: 1251 stmf_trace_clear(); 1252 break; 1253 1254 case STMF_IOCTL_ADD_TRACE: 1255 if (iocd->stmf_ibuf_size && ibuf) { 1256 ((uint8_t *)ibuf)[iocd->stmf_ibuf_size - 1] = 0; 1257 stmf_trace("\nstradm", "%s\n", ibuf); 1258 } 1259 break; 1260 1261 case STMF_IOCTL_GET_TRACE_POSITION: 1262 if (obuf && (iocd->stmf_obuf_size > 3)) { 1263 mutex_enter(&trace_buf_lock); 1264 *((int *)obuf) = trace_buf_curndx; 1265 mutex_exit(&trace_buf_lock); 1266 } else { 1267 ret = EINVAL; 1268 } 1269 break; 1270 1271 case STMF_IOCTL_GET_TRACE: 1272 if ((iocd->stmf_obuf_size == 0) || (iocd->stmf_ibuf_size < 4)) { 1273 ret = EINVAL; 1274 break; 1275 } 1276 i = *((int *)ibuf); 1277 if ((i > trace_buf_size) || ((i + iocd->stmf_obuf_size) > 1278 trace_buf_size)) { 1279 ret = EINVAL; 1280 break; 1281 } 1282 mutex_enter(&trace_buf_lock); 1283 bcopy(stmf_trace_buf + i, obuf, iocd->stmf_obuf_size); 1284 mutex_exit(&trace_buf_lock); 1285 break; 1286 1287 default: 1288 ret = ENOTTY; 1289 } 1290 1291 if (ret == 0) { 1292 ret = stmf_copyout_iocdata(data, mode, iocd, obuf); 1293 } else if (iocd->stmf_error) { 1294 (void) stmf_copyout_iocdata(data, mode, iocd, obuf); 1295 } 1296 if (obuf) { 1297 kmem_free(obuf, iocd->stmf_obuf_size); 1298 obuf = NULL; 1299 } 1300 if (ibuf) { 1301 kmem_free(ibuf, iocd->stmf_ibuf_size); 1302 ibuf = NULL; 1303 } 1304 kmem_free(iocd, sizeof (stmf_iocdata_t)); 1305 return (ret); 1306 } 1307 1308 static int 1309 stmf_get_service_state() 1310 { 1311 stmf_i_local_port_t *ilport; 1312 stmf_i_lu_t *ilu; 1313 int online = 0; 1314 int offline = 0; 1315 int onlining = 0; 1316 int offlining = 0; 1317 1318 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 1319 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL; 1320 ilport = ilport->ilport_next) { 1321 if (ilport->ilport_state == STMF_STATE_OFFLINE) 1322 offline++; 1323 else if (ilport->ilport_state == STMF_STATE_ONLINE) 1324 online++; 1325 else if (ilport->ilport_state == STMF_STATE_ONLINING) 1326 onlining++; 1327 else if (ilport->ilport_state == STMF_STATE_OFFLINING) 1328 offlining++; 1329 } 1330 1331 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; 1332 ilu = ilu->ilu_next) { 1333 if (ilu->ilu_state == STMF_STATE_OFFLINE) 1334 offline++; 1335 else if (ilu->ilu_state == STMF_STATE_ONLINE) 1336 online++; 1337 else if (ilu->ilu_state == STMF_STATE_ONLINING) 1338 onlining++; 1339 else if (ilu->ilu_state == STMF_STATE_OFFLINING) 1340 offlining++; 1341 } 1342 1343 if (stmf_state.stmf_service_running) { 1344 if (onlining) 1345 return (STMF_STATE_ONLINING); 1346 else 1347 return (STMF_STATE_ONLINE); 1348 } 1349 1350 if (offlining) { 1351 return (STMF_STATE_OFFLINING); 1352 } 1353 1354 return (STMF_STATE_OFFLINE); 1355 } 1356 1357 static int 1358 stmf_set_stmf_state(stmf_state_desc_t *std) 1359 { 1360 stmf_i_local_port_t *ilport; 1361 stmf_i_lu_t *ilu; 1362 stmf_state_change_info_t ssi; 1363 int svc_state; 1364 1365 ssi.st_rflags = STMF_RFLAG_USER_REQUEST; 1366 ssi.st_additional_info = NULL; 1367 1368 mutex_enter(&stmf_state.stmf_lock); 1369 if (!stmf_state.stmf_exclusive_open) { 1370 mutex_exit(&stmf_state.stmf_lock); 1371 return (EACCES); 1372 } 1373 1374 if (stmf_state.stmf_inventory_locked) { 1375 mutex_exit(&stmf_state.stmf_lock); 1376 return (EBUSY); 1377 } 1378 1379 if ((std->state != STMF_STATE_ONLINE) && 1380 (std->state != STMF_STATE_OFFLINE)) { 1381 mutex_exit(&stmf_state.stmf_lock); 1382 return (EINVAL); 1383 } 1384 1385 svc_state = stmf_get_service_state(); 1386 if ((svc_state == STMF_STATE_OFFLINING) || 1387 (svc_state == STMF_STATE_ONLINING)) { 1388 mutex_exit(&stmf_state.stmf_lock); 1389 return (EBUSY); 1390 } 1391 1392 if (svc_state == STMF_STATE_OFFLINE) { 1393 if (std->config_state == STMF_CONFIG_INIT) { 1394 if (std->state != STMF_STATE_OFFLINE) { 1395 mutex_exit(&stmf_state.stmf_lock); 1396 return (EINVAL); 1397 } 1398 stmf_state.stmf_config_state = STMF_CONFIG_INIT; 1399 stmf_delete_all_ppds(); 1400 stmf_view_clear_config(); 1401 stmf_view_init(); 1402 mutex_exit(&stmf_state.stmf_lock); 1403 return (0); 1404 } 1405 if ((stmf_state.stmf_config_state == STMF_CONFIG_INIT) || 1406 (stmf_state.stmf_config_state == STMF_CONFIG_NONE)) { 1407 if (std->config_state != STMF_CONFIG_INIT_DONE) { 1408 mutex_exit(&stmf_state.stmf_lock); 1409 return (EINVAL); 1410 } 1411 stmf_state.stmf_config_state = STMF_CONFIG_INIT_DONE; 1412 } 1413 if (std->state == STMF_STATE_OFFLINE) { 1414 mutex_exit(&stmf_state.stmf_lock); 1415 return (0); 1416 } 1417 if (stmf_state.stmf_config_state == STMF_CONFIG_INIT) { 1418 mutex_exit(&stmf_state.stmf_lock); 1419 return (EINVAL); 1420 } 1421 stmf_state.stmf_inventory_locked = 1; 1422 stmf_state.stmf_service_running = 1; 1423 mutex_exit(&stmf_state.stmf_lock); 1424 1425 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL; 1426 ilport = ilport->ilport_next) { 1427 if (ilport->ilport_prev_state != STMF_STATE_ONLINE) 1428 continue; 1429 (void) stmf_ctl(STMF_CMD_LPORT_ONLINE, 1430 ilport->ilport_lport, &ssi); 1431 } 1432 1433 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; 1434 ilu = ilu->ilu_next) { 1435 if (ilu->ilu_prev_state != STMF_STATE_ONLINE) 1436 continue; 1437 (void) stmf_ctl(STMF_CMD_LU_ONLINE, ilu->ilu_lu, &ssi); 1438 } 1439 mutex_enter(&stmf_state.stmf_lock); 1440 stmf_state.stmf_inventory_locked = 0; 1441 mutex_exit(&stmf_state.stmf_lock); 1442 return (0); 1443 } 1444 1445 /* svc_state is STMF_STATE_ONLINE here */ 1446 if ((std->state != STMF_STATE_OFFLINE) || 1447 (std->config_state == STMF_CONFIG_INIT)) { 1448 mutex_exit(&stmf_state.stmf_lock); 1449 return (EACCES); 1450 } 1451 1452 stmf_state.stmf_inventory_locked = 1; 1453 stmf_state.stmf_service_running = 0; 1454 1455 mutex_exit(&stmf_state.stmf_lock); 1456 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL; 1457 ilport = ilport->ilport_next) { 1458 if (ilport->ilport_state != STMF_STATE_ONLINE) 1459 continue; 1460 (void) stmf_ctl(STMF_CMD_LPORT_OFFLINE, 1461 ilport->ilport_lport, &ssi); 1462 } 1463 1464 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; 1465 ilu = ilu->ilu_next) { 1466 if (ilu->ilu_state != STMF_STATE_ONLINE) 1467 continue; 1468 (void) stmf_ctl(STMF_CMD_LU_OFFLINE, ilu->ilu_lu, &ssi); 1469 } 1470 mutex_enter(&stmf_state.stmf_lock); 1471 stmf_state.stmf_inventory_locked = 0; 1472 mutex_exit(&stmf_state.stmf_lock); 1473 return (0); 1474 } 1475 1476 static int 1477 stmf_get_stmf_state(stmf_state_desc_t *std) 1478 { 1479 mutex_enter(&stmf_state.stmf_lock); 1480 std->state = stmf_get_service_state(); 1481 std->config_state = stmf_state.stmf_config_state; 1482 mutex_exit(&stmf_state.stmf_lock); 1483 1484 return (0); 1485 } 1486 1487 /* 1488 * handles registration message from pppt for a logical unit 1489 */ 1490 stmf_status_t 1491 stmf_ic_lu_reg(stmf_ic_reg_dereg_lun_msg_t *msg, uint32_t type) 1492 { 1493 stmf_i_lu_provider_t *ilp; 1494 stmf_lu_provider_t *lp; 1495 mutex_enter(&stmf_state.stmf_lock); 1496 for (ilp = stmf_state.stmf_ilplist; ilp != NULL; ilp = ilp->ilp_next) { 1497 if (strcmp(msg->icrl_lu_provider_name, 1498 ilp->ilp_lp->lp_name) == 0) { 1499 lp = ilp->ilp_lp; 1500 mutex_exit(&stmf_state.stmf_lock); 1501 lp->lp_proxy_msg(msg->icrl_lun_id, msg->icrl_cb_arg, 1502 msg->icrl_cb_arg_len, type); 1503 return (STMF_SUCCESS); 1504 } 1505 } 1506 mutex_exit(&stmf_state.stmf_lock); 1507 return (STMF_SUCCESS); 1508 } 1509 1510 /* 1511 * handles de-registration message from pppt for a logical unit 1512 */ 1513 stmf_status_t 1514 stmf_ic_lu_dereg(stmf_ic_reg_dereg_lun_msg_t *msg) 1515 { 1516 stmf_i_lu_provider_t *ilp; 1517 stmf_lu_provider_t *lp; 1518 mutex_enter(&stmf_state.stmf_lock); 1519 for (ilp = stmf_state.stmf_ilplist; ilp != NULL; ilp = ilp->ilp_next) { 1520 if (strcmp(msg->icrl_lu_provider_name, 1521 ilp->ilp_lp->lp_name) == 0) { 1522 lp = ilp->ilp_lp; 1523 mutex_exit(&stmf_state.stmf_lock); 1524 lp->lp_proxy_msg(msg->icrl_lun_id, NULL, 0, 1525 STMF_MSG_LU_DEREGISTER); 1526 return (STMF_SUCCESS); 1527 } 1528 } 1529 mutex_exit(&stmf_state.stmf_lock); 1530 return (STMF_SUCCESS); 1531 } 1532 1533 /* 1534 * helper function to find a task that matches a task_msgid 1535 */ 1536 scsi_task_t * 1537 find_task_from_msgid(uint8_t *lu_id, stmf_ic_msgid_t task_msgid) 1538 { 1539 stmf_i_lu_t *ilu; 1540 stmf_i_scsi_task_t *itask; 1541 1542 mutex_enter(&stmf_state.stmf_lock); 1543 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) { 1544 if (bcmp(lu_id, ilu->ilu_lu->lu_id->ident, 16) == 0) { 1545 break; 1546 } 1547 } 1548 1549 if (ilu == NULL) { 1550 mutex_exit(&stmf_state.stmf_lock); 1551 return (NULL); 1552 } 1553 1554 mutex_enter(&ilu->ilu_task_lock); 1555 for (itask = ilu->ilu_tasks; itask != NULL; 1556 itask = itask->itask_lu_next) { 1557 if (itask->itask_flags & (ITASK_IN_FREE_LIST | 1558 ITASK_BEING_ABORTED)) { 1559 continue; 1560 } 1561 if (itask->itask_proxy_msg_id == task_msgid) { 1562 break; 1563 } 1564 } 1565 mutex_exit(&ilu->ilu_task_lock); 1566 mutex_exit(&stmf_state.stmf_lock); 1567 1568 if (itask != NULL) { 1569 return (itask->itask_task); 1570 } else { 1571 /* task not found. Likely already aborted. */ 1572 return (NULL); 1573 } 1574 } 1575 1576 /* 1577 * message received from pppt/ic 1578 */ 1579 stmf_status_t 1580 stmf_msg_rx(stmf_ic_msg_t *msg) 1581 { 1582 mutex_enter(&stmf_state.stmf_lock); 1583 if (stmf_state.stmf_alua_state != 1) { 1584 mutex_exit(&stmf_state.stmf_lock); 1585 cmn_err(CE_WARN, "stmf alua state is disabled"); 1586 ic_msg_free(msg); 1587 return (STMF_FAILURE); 1588 } 1589 mutex_exit(&stmf_state.stmf_lock); 1590 1591 switch (msg->icm_msg_type) { 1592 case STMF_ICM_REGISTER_LUN: 1593 (void) stmf_ic_lu_reg( 1594 (stmf_ic_reg_dereg_lun_msg_t *)msg->icm_msg, 1595 STMF_MSG_LU_REGISTER); 1596 break; 1597 case STMF_ICM_LUN_ACTIVE: 1598 (void) stmf_ic_lu_reg( 1599 (stmf_ic_reg_dereg_lun_msg_t *)msg->icm_msg, 1600 STMF_MSG_LU_ACTIVE); 1601 break; 1602 case STMF_ICM_DEREGISTER_LUN: 1603 (void) stmf_ic_lu_dereg( 1604 (stmf_ic_reg_dereg_lun_msg_t *)msg->icm_msg); 1605 break; 1606 case STMF_ICM_SCSI_DATA: 1607 (void) stmf_ic_rx_scsi_data( 1608 (stmf_ic_scsi_data_msg_t *)msg->icm_msg); 1609 break; 1610 case STMF_ICM_SCSI_STATUS: 1611 (void) stmf_ic_rx_scsi_status( 1612 (stmf_ic_scsi_status_msg_t *)msg->icm_msg); 1613 break; 1614 case STMF_ICM_STATUS: 1615 (void) stmf_ic_rx_status( 1616 (stmf_ic_status_msg_t *)msg->icm_msg); 1617 break; 1618 default: 1619 cmn_err(CE_WARN, "unknown message received %d", 1620 msg->icm_msg_type); 1621 ic_msg_free(msg); 1622 return (STMF_FAILURE); 1623 } 1624 ic_msg_free(msg); 1625 return (STMF_SUCCESS); 1626 } 1627 1628 stmf_status_t 1629 stmf_ic_rx_status(stmf_ic_status_msg_t *msg) 1630 { 1631 stmf_i_local_port_t *ilport; 1632 1633 if (msg->ics_msg_type != STMF_ICM_REGISTER_PROXY_PORT) { 1634 /* for now, ignore other message status */ 1635 return (STMF_SUCCESS); 1636 } 1637 1638 if (msg->ics_status != STMF_SUCCESS) { 1639 return (STMF_SUCCESS); 1640 } 1641 1642 mutex_enter(&stmf_state.stmf_lock); 1643 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL; 1644 ilport = ilport->ilport_next) { 1645 if (msg->ics_msgid == ilport->ilport_reg_msgid) { 1646 ilport->ilport_proxy_registered = 1; 1647 break; 1648 } 1649 } 1650 mutex_exit(&stmf_state.stmf_lock); 1651 return (STMF_SUCCESS); 1652 } 1653 1654 /* 1655 * handles scsi status message from pppt 1656 */ 1657 stmf_status_t 1658 stmf_ic_rx_scsi_status(stmf_ic_scsi_status_msg_t *msg) 1659 { 1660 scsi_task_t *task; 1661 1662 /* is this a task management command */ 1663 if (msg->icss_task_msgid & MSG_ID_TM_BIT) { 1664 return (STMF_SUCCESS); 1665 } 1666 1667 task = find_task_from_msgid(msg->icss_lun_id, msg->icss_task_msgid); 1668 1669 if (task == NULL) { 1670 return (STMF_SUCCESS); 1671 } 1672 1673 task->task_scsi_status = msg->icss_status; 1674 task->task_sense_data = msg->icss_sense; 1675 task->task_sense_length = msg->icss_sense_len; 1676 (void) stmf_send_scsi_status(task, STMF_IOF_LU_DONE); 1677 1678 return (STMF_SUCCESS); 1679 } 1680 1681 /* 1682 * handles scsi data message from pppt 1683 */ 1684 stmf_status_t 1685 stmf_ic_rx_scsi_data(stmf_ic_scsi_data_msg_t *msg) 1686 { 1687 stmf_i_scsi_task_t *itask; 1688 scsi_task_t *task; 1689 stmf_xfer_data_t *xd = NULL; 1690 stmf_data_buf_t *dbuf; 1691 uint32_t sz, minsz, xd_sz, asz; 1692 1693 /* is this a task management command */ 1694 if (msg->icsd_task_msgid & MSG_ID_TM_BIT) { 1695 return (STMF_SUCCESS); 1696 } 1697 1698 task = find_task_from_msgid(msg->icsd_lun_id, msg->icsd_task_msgid); 1699 if (task == NULL) { 1700 stmf_ic_msg_t *ic_xfer_done_msg = NULL; 1701 static uint64_t data_msg_id; 1702 stmf_status_t ic_ret = STMF_FAILURE; 1703 mutex_enter(&stmf_state.stmf_lock); 1704 data_msg_id = stmf_proxy_msg_id++; 1705 mutex_exit(&stmf_state.stmf_lock); 1706 /* 1707 * send xfer done status to pppt 1708 * for now, set the session id to 0 as we cannot 1709 * ascertain it since we cannot find the task 1710 */ 1711 ic_xfer_done_msg = ic_scsi_data_xfer_done_msg_alloc( 1712 msg->icsd_task_msgid, 0, STMF_FAILURE, data_msg_id); 1713 if (ic_xfer_done_msg) { 1714 ic_ret = ic_tx_msg(ic_xfer_done_msg); 1715 if (ic_ret != STMF_IC_MSG_SUCCESS) { 1716 cmn_err(CE_WARN, "unable to xmit proxy msg"); 1717 } 1718 } 1719 return (STMF_FAILURE); 1720 } 1721 1722 itask = (stmf_i_scsi_task_t *)task->task_stmf_private; 1723 dbuf = itask->itask_proxy_dbuf; 1724 1725 task->task_cmd_xfer_length = msg->icsd_data_len; 1726 1727 if (task->task_additional_flags & 1728 TASK_AF_NO_EXPECTED_XFER_LENGTH) { 1729 task->task_expected_xfer_length = 1730 task->task_cmd_xfer_length; 1731 } 1732 1733 sz = min(task->task_expected_xfer_length, 1734 task->task_cmd_xfer_length); 1735 1736 xd_sz = msg->icsd_data_len; 1737 asz = xd_sz + sizeof (*xd) - 4; 1738 xd = (stmf_xfer_data_t *)kmem_zalloc(asz, KM_NOSLEEP); 1739 1740 if (xd == NULL) { 1741 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 1742 STMF_ALLOC_FAILURE, NULL); 1743 return (STMF_FAILURE); 1744 } 1745 1746 xd->alloc_size = asz; 1747 xd->size_left = xd_sz; 1748 bcopy(msg->icsd_data, xd->buf, xd_sz); 1749 1750 sz = min(sz, xd->size_left); 1751 xd->size_left = sz; 1752 minsz = min(512, sz); 1753 1754 if (dbuf == NULL) 1755 dbuf = stmf_alloc_dbuf(task, sz, &minsz, 0); 1756 if (dbuf == NULL) { 1757 kmem_free(xd, xd->alloc_size); 1758 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 1759 STMF_ALLOC_FAILURE, NULL); 1760 return (STMF_FAILURE); 1761 } 1762 dbuf->db_lu_private = xd; 1763 stmf_xd_to_dbuf(dbuf); 1764 1765 dbuf->db_flags = DB_DIRECTION_TO_RPORT; 1766 (void) stmf_xfer_data(task, dbuf, 0); 1767 return (STMF_SUCCESS); 1768 } 1769 1770 stmf_status_t 1771 stmf_proxy_scsi_cmd(scsi_task_t *task, stmf_data_buf_t *dbuf) 1772 { 1773 stmf_i_scsi_task_t *itask = 1774 (stmf_i_scsi_task_t *)task->task_stmf_private; 1775 stmf_i_local_port_t *ilport = 1776 (stmf_i_local_port_t *)task->task_lport->lport_stmf_private; 1777 stmf_ic_msg_t *ic_cmd_msg; 1778 stmf_ic_msg_status_t ic_ret; 1779 stmf_status_t ret = STMF_FAILURE; 1780 1781 if (stmf_state.stmf_alua_state != 1) { 1782 cmn_err(CE_WARN, "stmf alua state is disabled"); 1783 return (STMF_FAILURE); 1784 } 1785 1786 if (ilport->ilport_proxy_registered == 0) { 1787 return (STMF_FAILURE); 1788 } 1789 1790 mutex_enter(&stmf_state.stmf_lock); 1791 itask->itask_proxy_msg_id = stmf_proxy_msg_id++; 1792 mutex_exit(&stmf_state.stmf_lock); 1793 itask->itask_proxy_dbuf = dbuf; 1794 1795 /* 1796 * stmf will now take over the task handling for this task 1797 * but it still needs to be treated differently from other 1798 * default handled tasks, hence the ITASK_PROXY_TASK. 1799 * If this is a task management function, we're really just 1800 * duping the command to the peer. Set the TM bit so that 1801 * we can recognize this on return since we won't be completing 1802 * the proxied task in that case. 1803 */ 1804 if (task->task_mgmt_function) { 1805 itask->itask_proxy_msg_id |= MSG_ID_TM_BIT; 1806 } else { 1807 itask->itask_flags |= ITASK_DEFAULT_HANDLING | ITASK_PROXY_TASK; 1808 } 1809 if (dbuf) { 1810 ic_cmd_msg = ic_scsi_cmd_msg_alloc(itask->itask_proxy_msg_id, 1811 task, dbuf->db_data_size, dbuf->db_sglist[0].seg_addr, 1812 itask->itask_proxy_msg_id); 1813 } else { 1814 ic_cmd_msg = ic_scsi_cmd_msg_alloc(itask->itask_proxy_msg_id, 1815 task, 0, NULL, itask->itask_proxy_msg_id); 1816 } 1817 if (ic_cmd_msg) { 1818 ic_ret = ic_tx_msg(ic_cmd_msg); 1819 if (ic_ret == STMF_IC_MSG_SUCCESS) { 1820 ret = STMF_SUCCESS; 1821 } 1822 } 1823 return (ret); 1824 } 1825 1826 1827 stmf_status_t 1828 pppt_modload() 1829 { 1830 int error; 1831 1832 if (pppt_mod == NULL && ((pppt_mod = 1833 ddi_modopen("drv/pppt", KRTLD_MODE_FIRST, &error)) == NULL)) { 1834 cmn_err(CE_WARN, "Unable to load pppt"); 1835 return (STMF_FAILURE); 1836 } 1837 1838 if (ic_reg_port_msg_alloc == NULL && ((ic_reg_port_msg_alloc = 1839 (stmf_ic_reg_port_msg_alloc_func_t) 1840 ddi_modsym(pppt_mod, "stmf_ic_reg_port_msg_alloc", 1841 &error)) == NULL)) { 1842 cmn_err(CE_WARN, 1843 "Unable to find symbol - stmf_ic_reg_port_msg_alloc"); 1844 return (STMF_FAILURE); 1845 } 1846 1847 1848 if (ic_dereg_port_msg_alloc == NULL && ((ic_dereg_port_msg_alloc = 1849 (stmf_ic_dereg_port_msg_alloc_func_t) 1850 ddi_modsym(pppt_mod, "stmf_ic_dereg_port_msg_alloc", 1851 &error)) == NULL)) { 1852 cmn_err(CE_WARN, 1853 "Unable to find symbol - stmf_ic_dereg_port_msg_alloc"); 1854 return (STMF_FAILURE); 1855 } 1856 1857 if (ic_reg_lun_msg_alloc == NULL && ((ic_reg_lun_msg_alloc = 1858 (stmf_ic_reg_lun_msg_alloc_func_t) 1859 ddi_modsym(pppt_mod, "stmf_ic_reg_lun_msg_alloc", 1860 &error)) == NULL)) { 1861 cmn_err(CE_WARN, 1862 "Unable to find symbol - stmf_ic_reg_lun_msg_alloc"); 1863 return (STMF_FAILURE); 1864 } 1865 1866 if (ic_lun_active_msg_alloc == NULL && ((ic_lun_active_msg_alloc = 1867 (stmf_ic_lun_active_msg_alloc_func_t) 1868 ddi_modsym(pppt_mod, "stmf_ic_lun_active_msg_alloc", 1869 &error)) == NULL)) { 1870 cmn_err(CE_WARN, 1871 "Unable to find symbol - stmf_ic_lun_active_msg_alloc"); 1872 return (STMF_FAILURE); 1873 } 1874 1875 if (ic_dereg_lun_msg_alloc == NULL && ((ic_dereg_lun_msg_alloc = 1876 (stmf_ic_dereg_lun_msg_alloc_func_t) 1877 ddi_modsym(pppt_mod, "stmf_ic_dereg_lun_msg_alloc", 1878 &error)) == NULL)) { 1879 cmn_err(CE_WARN, 1880 "Unable to find symbol - stmf_ic_dereg_lun_msg_alloc"); 1881 return (STMF_FAILURE); 1882 } 1883 1884 if (ic_scsi_cmd_msg_alloc == NULL && ((ic_scsi_cmd_msg_alloc = 1885 (stmf_ic_scsi_cmd_msg_alloc_func_t) 1886 ddi_modsym(pppt_mod, "stmf_ic_scsi_cmd_msg_alloc", 1887 &error)) == NULL)) { 1888 cmn_err(CE_WARN, 1889 "Unable to find symbol - stmf_ic_scsi_cmd_msg_alloc"); 1890 return (STMF_FAILURE); 1891 } 1892 1893 if (ic_scsi_data_xfer_done_msg_alloc == NULL && 1894 ((ic_scsi_data_xfer_done_msg_alloc = 1895 (stmf_ic_scsi_data_xfer_done_msg_alloc_func_t) 1896 ddi_modsym(pppt_mod, "stmf_ic_scsi_data_xfer_done_msg_alloc", 1897 &error)) == NULL)) { 1898 cmn_err(CE_WARN, 1899 "Unable to find symbol -" 1900 "stmf_ic_scsi_data_xfer_done_msg_alloc"); 1901 return (STMF_FAILURE); 1902 } 1903 1904 if (ic_session_reg_msg_alloc == NULL && 1905 ((ic_session_reg_msg_alloc = 1906 (stmf_ic_session_create_msg_alloc_func_t) 1907 ddi_modsym(pppt_mod, "stmf_ic_session_create_msg_alloc", 1908 &error)) == NULL)) { 1909 cmn_err(CE_WARN, 1910 "Unable to find symbol -" 1911 "stmf_ic_session_create_msg_alloc"); 1912 return (STMF_FAILURE); 1913 } 1914 1915 if (ic_session_dereg_msg_alloc == NULL && 1916 ((ic_session_dereg_msg_alloc = 1917 (stmf_ic_session_destroy_msg_alloc_func_t) 1918 ddi_modsym(pppt_mod, "stmf_ic_session_destroy_msg_alloc", 1919 &error)) == NULL)) { 1920 cmn_err(CE_WARN, 1921 "Unable to find symbol -" 1922 "stmf_ic_session_destroy_msg_alloc"); 1923 return (STMF_FAILURE); 1924 } 1925 1926 if (ic_tx_msg == NULL && ((ic_tx_msg = 1927 (stmf_ic_tx_msg_func_t)ddi_modsym(pppt_mod, "stmf_ic_tx_msg", 1928 &error)) == NULL)) { 1929 cmn_err(CE_WARN, "Unable to find symbol - stmf_ic_tx_msg"); 1930 return (STMF_FAILURE); 1931 } 1932 1933 if (ic_msg_free == NULL && ((ic_msg_free = 1934 (stmf_ic_msg_free_func_t)ddi_modsym(pppt_mod, "stmf_ic_msg_free", 1935 &error)) == NULL)) { 1936 cmn_err(CE_WARN, "Unable to find symbol - stmf_ic_msg_free"); 1937 return (STMF_FAILURE); 1938 } 1939 return (STMF_SUCCESS); 1940 } 1941 1942 static void 1943 stmf_get_alua_state(stmf_alua_state_desc_t *alua_state) 1944 { 1945 mutex_enter(&stmf_state.stmf_lock); 1946 alua_state->alua_node = stmf_state.stmf_alua_node; 1947 alua_state->alua_state = stmf_state.stmf_alua_state; 1948 mutex_exit(&stmf_state.stmf_lock); 1949 } 1950 1951 1952 static int 1953 stmf_set_alua_state(stmf_alua_state_desc_t *alua_state) 1954 { 1955 stmf_i_local_port_t *ilport; 1956 stmf_i_lu_t *ilu; 1957 stmf_lu_t *lu; 1958 stmf_ic_msg_status_t ic_ret; 1959 stmf_ic_msg_t *ic_reg_lun, *ic_reg_port; 1960 stmf_local_port_t *lport; 1961 int ret = 0; 1962 1963 if (alua_state->alua_state > 1 || alua_state->alua_node > 1) { 1964 return (EINVAL); 1965 } 1966 1967 mutex_enter(&stmf_state.stmf_lock); 1968 if (alua_state->alua_state == 1) { 1969 if (pppt_modload() == STMF_FAILURE) { 1970 ret = EIO; 1971 goto err; 1972 } 1973 if (alua_state->alua_node != 0) { 1974 /* reset existing rtpids to new base */ 1975 stmf_rtpid_counter = 255; 1976 } 1977 stmf_state.stmf_alua_node = alua_state->alua_node; 1978 stmf_state.stmf_alua_state = 1; 1979 /* register existing local ports with ppp */ 1980 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL; 1981 ilport = ilport->ilport_next) { 1982 /* skip standby ports and non-alua participants */ 1983 if (ilport->ilport_standby == 1 || 1984 ilport->ilport_alua == 0) { 1985 continue; 1986 } 1987 if (alua_state->alua_node != 0) { 1988 ilport->ilport_rtpid = 1989 atomic_add_16_nv(&stmf_rtpid_counter, 1); 1990 } 1991 lport = ilport->ilport_lport; 1992 ic_reg_port = ic_reg_port_msg_alloc( 1993 lport->lport_id, ilport->ilport_rtpid, 1994 0, NULL, stmf_proxy_msg_id); 1995 if (ic_reg_port) { 1996 ic_ret = ic_tx_msg(ic_reg_port); 1997 if (ic_ret == STMF_IC_MSG_SUCCESS) { 1998 ilport->ilport_reg_msgid = 1999 stmf_proxy_msg_id++; 2000 } else { 2001 cmn_err(CE_WARN, 2002 "error on port registration " 2003 "port - %s", 2004 ilport->ilport_kstat_tgt_name); 2005 } 2006 } 2007 } 2008 /* register existing logical units */ 2009 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; 2010 ilu = ilu->ilu_next) { 2011 if (ilu->ilu_access != STMF_LU_ACTIVE) { 2012 continue; 2013 } 2014 /* register with proxy module */ 2015 lu = ilu->ilu_lu; 2016 if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 && 2017 lu->lu_lp->lp_alua_support) { 2018 ilu->ilu_alua = 1; 2019 /* allocate the register message */ 2020 ic_reg_lun = ic_reg_lun_msg_alloc( 2021 lu->lu_id->ident, lu->lu_lp->lp_name, 2022 lu->lu_proxy_reg_arg_len, 2023 (uint8_t *)lu->lu_proxy_reg_arg, 2024 stmf_proxy_msg_id); 2025 /* send the message */ 2026 if (ic_reg_lun) { 2027 ic_ret = ic_tx_msg(ic_reg_lun); 2028 if (ic_ret == STMF_IC_MSG_SUCCESS) { 2029 stmf_proxy_msg_id++; 2030 } 2031 } 2032 } 2033 } 2034 } else { 2035 stmf_state.stmf_alua_state = 0; 2036 } 2037 2038 err: 2039 mutex_exit(&stmf_state.stmf_lock); 2040 return (ret); 2041 } 2042 2043 2044 typedef struct { 2045 void *bp; /* back pointer from internal struct to main struct */ 2046 int alloc_size; 2047 } __istmf_t; 2048 2049 typedef struct { 2050 __istmf_t *fp; /* Framework private */ 2051 void *cp; /* Caller private */ 2052 void *ss; /* struct specific */ 2053 } __stmf_t; 2054 2055 static struct { 2056 int shared; 2057 int fw_private; 2058 } stmf_sizes[] = { { 0, 0 }, 2059 { GET_STRUCT_SIZE(stmf_lu_provider_t), 2060 GET_STRUCT_SIZE(stmf_i_lu_provider_t) }, 2061 { GET_STRUCT_SIZE(stmf_port_provider_t), 2062 GET_STRUCT_SIZE(stmf_i_port_provider_t) }, 2063 { GET_STRUCT_SIZE(stmf_local_port_t), 2064 GET_STRUCT_SIZE(stmf_i_local_port_t) }, 2065 { GET_STRUCT_SIZE(stmf_lu_t), 2066 GET_STRUCT_SIZE(stmf_i_lu_t) }, 2067 { GET_STRUCT_SIZE(stmf_scsi_session_t), 2068 GET_STRUCT_SIZE(stmf_i_scsi_session_t) }, 2069 { GET_STRUCT_SIZE(scsi_task_t), 2070 GET_STRUCT_SIZE(stmf_i_scsi_task_t) }, 2071 { GET_STRUCT_SIZE(stmf_data_buf_t), 2072 GET_STRUCT_SIZE(__istmf_t) }, 2073 { GET_STRUCT_SIZE(stmf_dbuf_store_t), 2074 GET_STRUCT_SIZE(__istmf_t) } 2075 2076 }; 2077 2078 void * 2079 stmf_alloc(stmf_struct_id_t struct_id, int additional_size, int flags) 2080 { 2081 int stmf_size; 2082 int kmem_flag; 2083 __stmf_t *sh; 2084 2085 if ((struct_id == 0) || (struct_id >= STMF_MAX_STRUCT_IDS)) 2086 return (NULL); 2087 2088 if ((curthread->t_flag & T_INTR_THREAD) || (flags & AF_FORCE_NOSLEEP)) { 2089 kmem_flag = KM_NOSLEEP; 2090 } else { 2091 kmem_flag = KM_SLEEP; 2092 } 2093 2094 additional_size = (additional_size + 7) & (~7); 2095 stmf_size = stmf_sizes[struct_id].shared + 2096 stmf_sizes[struct_id].fw_private + additional_size; 2097 2098 sh = (__stmf_t *)kmem_zalloc(stmf_size, kmem_flag); 2099 2100 if (sh == NULL) 2101 return (NULL); 2102 2103 /* 2104 * In principle, the implementation inside stmf_alloc should not 2105 * be changed anyway. But the original order of framework private 2106 * data and caller private data does not support sglist in the caller 2107 * private data. 2108 * To work around this, the memory segments of framework private 2109 * data and caller private data are re-ordered here. 2110 * A better solution is to provide a specific interface to allocate 2111 * the sglist, then we will not need this workaround any more. 2112 * But before the new interface is available, the memory segment 2113 * ordering should be kept as is. 2114 */ 2115 sh->cp = GET_BYTE_OFFSET(sh, stmf_sizes[struct_id].shared); 2116 sh->fp = (__istmf_t *)GET_BYTE_OFFSET(sh, 2117 stmf_sizes[struct_id].shared + additional_size); 2118 2119 sh->fp->bp = sh; 2120 /* Just store the total size instead of storing additional size */ 2121 sh->fp->alloc_size = stmf_size; 2122 2123 return (sh); 2124 } 2125 2126 void 2127 stmf_free(void *ptr) 2128 { 2129 __stmf_t *sh = (__stmf_t *)ptr; 2130 2131 /* 2132 * So far we dont need any struct specific processing. If such 2133 * a need ever arises, then store the struct id in the framework 2134 * private section and get it here as sh->fp->struct_id. 2135 */ 2136 kmem_free(ptr, sh->fp->alloc_size); 2137 } 2138 2139 /* 2140 * Given a pointer to stmf_lu_t, verifies if this lu is registered with the 2141 * framework and returns a pointer to framework private data for the lu. 2142 * Returns NULL if the lu was not found. 2143 */ 2144 stmf_i_lu_t * 2145 stmf_lookup_lu(stmf_lu_t *lu) 2146 { 2147 stmf_i_lu_t *ilu; 2148 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 2149 2150 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) { 2151 if (ilu->ilu_lu == lu) 2152 return (ilu); 2153 } 2154 return (NULL); 2155 } 2156 2157 /* 2158 * Given a pointer to stmf_local_port_t, verifies if this lport is registered 2159 * with the framework and returns a pointer to framework private data for 2160 * the lport. 2161 * Returns NULL if the lport was not found. 2162 */ 2163 stmf_i_local_port_t * 2164 stmf_lookup_lport(stmf_local_port_t *lport) 2165 { 2166 stmf_i_local_port_t *ilport; 2167 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 2168 2169 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL; 2170 ilport = ilport->ilport_next) { 2171 if (ilport->ilport_lport == lport) 2172 return (ilport); 2173 } 2174 return (NULL); 2175 } 2176 2177 stmf_status_t 2178 stmf_register_lu_provider(stmf_lu_provider_t *lp) 2179 { 2180 stmf_i_lu_provider_t *ilp = (stmf_i_lu_provider_t *)lp->lp_stmf_private; 2181 stmf_pp_data_t *ppd; 2182 uint32_t cb_flags; 2183 2184 if (lp->lp_lpif_rev != LPIF_REV_1 && lp->lp_lpif_rev != LPIF_REV_2) 2185 return (STMF_FAILURE); 2186 2187 mutex_enter(&stmf_state.stmf_lock); 2188 ilp->ilp_next = stmf_state.stmf_ilplist; 2189 stmf_state.stmf_ilplist = ilp; 2190 stmf_state.stmf_nlps++; 2191 2192 /* See if we need to do a callback */ 2193 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) { 2194 if (strcmp(ppd->ppd_name, lp->lp_name) == 0) { 2195 break; 2196 } 2197 } 2198 if ((ppd == NULL) || (ppd->ppd_nv == NULL)) { 2199 goto rlp_bail_out; 2200 } 2201 ilp->ilp_ppd = ppd; 2202 ppd->ppd_provider = ilp; 2203 if (lp->lp_cb == NULL) 2204 goto rlp_bail_out; 2205 ilp->ilp_cb_in_progress = 1; 2206 cb_flags = STMF_PCB_PREG_COMPLETE; 2207 if (stmf_state.stmf_config_state == STMF_CONFIG_INIT) 2208 cb_flags |= STMF_PCB_STMF_ONLINING; 2209 mutex_exit(&stmf_state.stmf_lock); 2210 lp->lp_cb(lp, STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags); 2211 mutex_enter(&stmf_state.stmf_lock); 2212 ilp->ilp_cb_in_progress = 0; 2213 2214 rlp_bail_out: 2215 mutex_exit(&stmf_state.stmf_lock); 2216 2217 return (STMF_SUCCESS); 2218 } 2219 2220 stmf_status_t 2221 stmf_deregister_lu_provider(stmf_lu_provider_t *lp) 2222 { 2223 stmf_i_lu_provider_t **ppilp; 2224 stmf_i_lu_provider_t *ilp = (stmf_i_lu_provider_t *)lp->lp_stmf_private; 2225 2226 mutex_enter(&stmf_state.stmf_lock); 2227 if (ilp->ilp_nlus || ilp->ilp_cb_in_progress) { 2228 mutex_exit(&stmf_state.stmf_lock); 2229 return (STMF_BUSY); 2230 } 2231 for (ppilp = &stmf_state.stmf_ilplist; *ppilp != NULL; 2232 ppilp = &((*ppilp)->ilp_next)) { 2233 if (*ppilp == ilp) { 2234 *ppilp = ilp->ilp_next; 2235 stmf_state.stmf_nlps--; 2236 if (ilp->ilp_ppd) { 2237 ilp->ilp_ppd->ppd_provider = NULL; 2238 ilp->ilp_ppd = NULL; 2239 } 2240 mutex_exit(&stmf_state.stmf_lock); 2241 return (STMF_SUCCESS); 2242 } 2243 } 2244 mutex_exit(&stmf_state.stmf_lock); 2245 return (STMF_NOT_FOUND); 2246 } 2247 2248 stmf_status_t 2249 stmf_register_port_provider(stmf_port_provider_t *pp) 2250 { 2251 stmf_i_port_provider_t *ipp = 2252 (stmf_i_port_provider_t *)pp->pp_stmf_private; 2253 stmf_pp_data_t *ppd; 2254 uint32_t cb_flags; 2255 2256 if (pp->pp_portif_rev != PORTIF_REV_1) 2257 return (STMF_FAILURE); 2258 2259 mutex_enter(&stmf_state.stmf_lock); 2260 ipp->ipp_next = stmf_state.stmf_ipplist; 2261 stmf_state.stmf_ipplist = ipp; 2262 stmf_state.stmf_npps++; 2263 /* See if we need to do a callback */ 2264 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) { 2265 if (strcmp(ppd->ppd_name, pp->pp_name) == 0) { 2266 break; 2267 } 2268 } 2269 if ((ppd == NULL) || (ppd->ppd_nv == NULL)) { 2270 goto rpp_bail_out; 2271 } 2272 ipp->ipp_ppd = ppd; 2273 ppd->ppd_provider = ipp; 2274 if (pp->pp_cb == NULL) 2275 goto rpp_bail_out; 2276 ipp->ipp_cb_in_progress = 1; 2277 cb_flags = STMF_PCB_PREG_COMPLETE; 2278 if (stmf_state.stmf_config_state == STMF_CONFIG_INIT) 2279 cb_flags |= STMF_PCB_STMF_ONLINING; 2280 mutex_exit(&stmf_state.stmf_lock); 2281 pp->pp_cb(pp, STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags); 2282 mutex_enter(&stmf_state.stmf_lock); 2283 ipp->ipp_cb_in_progress = 0; 2284 2285 rpp_bail_out: 2286 mutex_exit(&stmf_state.stmf_lock); 2287 2288 return (STMF_SUCCESS); 2289 } 2290 2291 stmf_status_t 2292 stmf_deregister_port_provider(stmf_port_provider_t *pp) 2293 { 2294 stmf_i_port_provider_t *ipp = 2295 (stmf_i_port_provider_t *)pp->pp_stmf_private; 2296 stmf_i_port_provider_t **ppipp; 2297 2298 mutex_enter(&stmf_state.stmf_lock); 2299 if (ipp->ipp_npps || ipp->ipp_cb_in_progress) { 2300 mutex_exit(&stmf_state.stmf_lock); 2301 return (STMF_BUSY); 2302 } 2303 for (ppipp = &stmf_state.stmf_ipplist; *ppipp != NULL; 2304 ppipp = &((*ppipp)->ipp_next)) { 2305 if (*ppipp == ipp) { 2306 *ppipp = ipp->ipp_next; 2307 stmf_state.stmf_npps--; 2308 if (ipp->ipp_ppd) { 2309 ipp->ipp_ppd->ppd_provider = NULL; 2310 ipp->ipp_ppd = NULL; 2311 } 2312 mutex_exit(&stmf_state.stmf_lock); 2313 return (STMF_SUCCESS); 2314 } 2315 } 2316 mutex_exit(&stmf_state.stmf_lock); 2317 return (STMF_NOT_FOUND); 2318 } 2319 2320 int 2321 stmf_load_ppd_ioctl(stmf_ppioctl_data_t *ppi, uint64_t *ppi_token, 2322 uint32_t *err_ret) 2323 { 2324 stmf_i_port_provider_t *ipp; 2325 stmf_i_lu_provider_t *ilp; 2326 stmf_pp_data_t *ppd; 2327 nvlist_t *nv; 2328 int s; 2329 int ret; 2330 2331 *err_ret = 0; 2332 2333 if ((ppi->ppi_lu_provider + ppi->ppi_port_provider) != 1) { 2334 return (EINVAL); 2335 } 2336 2337 mutex_enter(&stmf_state.stmf_lock); 2338 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) { 2339 if (ppi->ppi_lu_provider) { 2340 if (!ppd->ppd_lu_provider) 2341 continue; 2342 } else if (ppi->ppi_port_provider) { 2343 if (!ppd->ppd_port_provider) 2344 continue; 2345 } 2346 if (strncmp(ppi->ppi_name, ppd->ppd_name, 254) == 0) 2347 break; 2348 } 2349 2350 if (ppd == NULL) { 2351 /* New provider */ 2352 s = strlen(ppi->ppi_name); 2353 if (s > 254) { 2354 mutex_exit(&stmf_state.stmf_lock); 2355 return (EINVAL); 2356 } 2357 s += sizeof (stmf_pp_data_t) - 7; 2358 2359 ppd = kmem_zalloc(s, KM_NOSLEEP); 2360 if (ppd == NULL) { 2361 mutex_exit(&stmf_state.stmf_lock); 2362 return (ENOMEM); 2363 } 2364 ppd->ppd_alloc_size = s; 2365 (void) strcpy(ppd->ppd_name, ppi->ppi_name); 2366 2367 /* See if this provider already exists */ 2368 if (ppi->ppi_lu_provider) { 2369 ppd->ppd_lu_provider = 1; 2370 for (ilp = stmf_state.stmf_ilplist; ilp != NULL; 2371 ilp = ilp->ilp_next) { 2372 if (strcmp(ppi->ppi_name, 2373 ilp->ilp_lp->lp_name) == 0) { 2374 ppd->ppd_provider = ilp; 2375 ilp->ilp_ppd = ppd; 2376 break; 2377 } 2378 } 2379 } else { 2380 ppd->ppd_port_provider = 1; 2381 for (ipp = stmf_state.stmf_ipplist; ipp != NULL; 2382 ipp = ipp->ipp_next) { 2383 if (strcmp(ppi->ppi_name, 2384 ipp->ipp_pp->pp_name) == 0) { 2385 ppd->ppd_provider = ipp; 2386 ipp->ipp_ppd = ppd; 2387 break; 2388 } 2389 } 2390 } 2391 2392 /* Link this ppd in */ 2393 ppd->ppd_next = stmf_state.stmf_ppdlist; 2394 stmf_state.stmf_ppdlist = ppd; 2395 } 2396 2397 /* 2398 * User is requesting that the token be checked. 2399 * If there was another set after the user's get 2400 * it's an error 2401 */ 2402 if (ppi->ppi_token_valid) { 2403 if (ppi->ppi_token != ppd->ppd_token) { 2404 *err_ret = STMF_IOCERR_PPD_UPDATED; 2405 mutex_exit(&stmf_state.stmf_lock); 2406 return (EINVAL); 2407 } 2408 } 2409 2410 if ((ret = nvlist_unpack((char *)ppi->ppi_data, 2411 (size_t)ppi->ppi_data_size, &nv, KM_NOSLEEP)) != 0) { 2412 mutex_exit(&stmf_state.stmf_lock); 2413 return (ret); 2414 } 2415 2416 /* Free any existing lists and add this one to the ppd */ 2417 if (ppd->ppd_nv) 2418 nvlist_free(ppd->ppd_nv); 2419 ppd->ppd_nv = nv; 2420 2421 /* set the token for writes */ 2422 ppd->ppd_token++; 2423 /* return token to caller */ 2424 if (ppi_token) { 2425 *ppi_token = ppd->ppd_token; 2426 } 2427 2428 /* If there is a provider registered, do the notifications */ 2429 if (ppd->ppd_provider) { 2430 uint32_t cb_flags = 0; 2431 2432 if (stmf_state.stmf_config_state == STMF_CONFIG_INIT) 2433 cb_flags |= STMF_PCB_STMF_ONLINING; 2434 if (ppi->ppi_lu_provider) { 2435 ilp = (stmf_i_lu_provider_t *)ppd->ppd_provider; 2436 if (ilp->ilp_lp->lp_cb == NULL) 2437 goto bail_out; 2438 ilp->ilp_cb_in_progress = 1; 2439 mutex_exit(&stmf_state.stmf_lock); 2440 ilp->ilp_lp->lp_cb(ilp->ilp_lp, 2441 STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags); 2442 mutex_enter(&stmf_state.stmf_lock); 2443 ilp->ilp_cb_in_progress = 0; 2444 } else { 2445 ipp = (stmf_i_port_provider_t *)ppd->ppd_provider; 2446 if (ipp->ipp_pp->pp_cb == NULL) 2447 goto bail_out; 2448 ipp->ipp_cb_in_progress = 1; 2449 mutex_exit(&stmf_state.stmf_lock); 2450 ipp->ipp_pp->pp_cb(ipp->ipp_pp, 2451 STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags); 2452 mutex_enter(&stmf_state.stmf_lock); 2453 ipp->ipp_cb_in_progress = 0; 2454 } 2455 } 2456 2457 bail_out: 2458 mutex_exit(&stmf_state.stmf_lock); 2459 2460 return (0); 2461 } 2462 2463 void 2464 stmf_delete_ppd(stmf_pp_data_t *ppd) 2465 { 2466 stmf_pp_data_t **pppd; 2467 2468 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 2469 if (ppd->ppd_provider) { 2470 if (ppd->ppd_lu_provider) { 2471 ((stmf_i_lu_provider_t *) 2472 ppd->ppd_provider)->ilp_ppd = NULL; 2473 } else { 2474 ((stmf_i_port_provider_t *) 2475 ppd->ppd_provider)->ipp_ppd = NULL; 2476 } 2477 ppd->ppd_provider = NULL; 2478 } 2479 2480 for (pppd = &stmf_state.stmf_ppdlist; *pppd != NULL; 2481 pppd = &((*pppd)->ppd_next)) { 2482 if (*pppd == ppd) 2483 break; 2484 } 2485 2486 if (*pppd == NULL) 2487 return; 2488 2489 *pppd = ppd->ppd_next; 2490 if (ppd->ppd_nv) 2491 nvlist_free(ppd->ppd_nv); 2492 2493 kmem_free(ppd, ppd->ppd_alloc_size); 2494 } 2495 2496 int 2497 stmf_delete_ppd_ioctl(stmf_ppioctl_data_t *ppi) 2498 { 2499 stmf_pp_data_t *ppd; 2500 int ret = ENOENT; 2501 2502 if ((ppi->ppi_lu_provider + ppi->ppi_port_provider) != 1) { 2503 return (EINVAL); 2504 } 2505 2506 mutex_enter(&stmf_state.stmf_lock); 2507 2508 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) { 2509 if (ppi->ppi_lu_provider) { 2510 if (!ppd->ppd_lu_provider) 2511 continue; 2512 } else if (ppi->ppi_port_provider) { 2513 if (!ppd->ppd_port_provider) 2514 continue; 2515 } 2516 if (strncmp(ppi->ppi_name, ppd->ppd_name, 254) == 0) 2517 break; 2518 } 2519 2520 if (ppd) { 2521 ret = 0; 2522 stmf_delete_ppd(ppd); 2523 } 2524 mutex_exit(&stmf_state.stmf_lock); 2525 2526 return (ret); 2527 } 2528 2529 int 2530 stmf_get_ppd_ioctl(stmf_ppioctl_data_t *ppi, stmf_ppioctl_data_t *ppi_out, 2531 uint32_t *err_ret) 2532 { 2533 stmf_pp_data_t *ppd; 2534 size_t req_size; 2535 int ret = ENOENT; 2536 char *bufp = (char *)ppi_out->ppi_data; 2537 2538 if ((ppi->ppi_lu_provider + ppi->ppi_port_provider) != 1) { 2539 return (EINVAL); 2540 } 2541 2542 mutex_enter(&stmf_state.stmf_lock); 2543 2544 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) { 2545 if (ppi->ppi_lu_provider) { 2546 if (!ppd->ppd_lu_provider) 2547 continue; 2548 } else if (ppi->ppi_port_provider) { 2549 if (!ppd->ppd_port_provider) 2550 continue; 2551 } 2552 if (strncmp(ppi->ppi_name, ppd->ppd_name, 254) == 0) 2553 break; 2554 } 2555 2556 if (ppd && ppd->ppd_nv) { 2557 ppi_out->ppi_token = ppd->ppd_token; 2558 if ((ret = nvlist_size(ppd->ppd_nv, &req_size, 2559 NV_ENCODE_XDR)) != 0) { 2560 goto done; 2561 } 2562 ppi_out->ppi_data_size = req_size; 2563 if (req_size > ppi->ppi_data_size) { 2564 *err_ret = STMF_IOCERR_INSUFFICIENT_BUF; 2565 ret = EINVAL; 2566 goto done; 2567 } 2568 2569 if ((ret = nvlist_pack(ppd->ppd_nv, &bufp, &req_size, 2570 NV_ENCODE_XDR, 0)) != 0) { 2571 goto done; 2572 } 2573 ret = 0; 2574 } 2575 2576 done: 2577 mutex_exit(&stmf_state.stmf_lock); 2578 2579 return (ret); 2580 } 2581 2582 void 2583 stmf_delete_all_ppds() 2584 { 2585 stmf_pp_data_t *ppd, *nppd; 2586 2587 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 2588 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = nppd) { 2589 nppd = ppd->ppd_next; 2590 stmf_delete_ppd(ppd); 2591 } 2592 } 2593 2594 /* 2595 * 16 is the max string length of a protocol_ident, increase 2596 * the size if needed. 2597 */ 2598 #define STMF_KSTAT_LU_SZ (STMF_GUID_INPUT + 1 + 256) 2599 #define STMF_KSTAT_TGT_SZ (256 * 2 + 16) 2600 2601 typedef struct stmf_kstat_lu_info { 2602 kstat_named_t i_lun_guid; 2603 kstat_named_t i_lun_alias; 2604 } stmf_kstat_lu_info_t; 2605 2606 typedef struct stmf_kstat_tgt_info { 2607 kstat_named_t i_tgt_name; 2608 kstat_named_t i_tgt_alias; 2609 kstat_named_t i_protocol; 2610 } stmf_kstat_tgt_info_t; 2611 2612 /* 2613 * This array matches the Protocol Identifier in stmf_ioctl.h 2614 */ 2615 char *protocol_ident[PROTOCOL_ANY] = { 2616 "Fibre Channel", 2617 "Parallel SCSI", 2618 "SSA", 2619 "IEEE_1394", 2620 "SRP", 2621 "iSCSI", 2622 "SAS", 2623 "ADT", 2624 "ATAPI", 2625 "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN" 2626 }; 2627 2628 /* 2629 * Update the lun wait/run queue count 2630 */ 2631 static void 2632 stmf_update_kstat_lu_q(scsi_task_t *task, void func()) 2633 { 2634 stmf_i_lu_t *ilu; 2635 kstat_io_t *kip; 2636 2637 if (task->task_lu == dlun0) 2638 return; 2639 ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private; 2640 if (ilu != NULL && ilu->ilu_kstat_io != NULL) { 2641 kip = KSTAT_IO_PTR(ilu->ilu_kstat_io); 2642 if (kip != NULL) { 2643 mutex_enter(ilu->ilu_kstat_io->ks_lock); 2644 func(kip); 2645 mutex_exit(ilu->ilu_kstat_io->ks_lock); 2646 } 2647 } 2648 } 2649 2650 /* 2651 * Update the target(lport) wait/run queue count 2652 */ 2653 static void 2654 stmf_update_kstat_lport_q(scsi_task_t *task, void func()) 2655 { 2656 stmf_i_local_port_t *ilp; 2657 kstat_io_t *kip; 2658 2659 ilp = (stmf_i_local_port_t *)task->task_lport->lport_stmf_private; 2660 if (ilp != NULL && ilp->ilport_kstat_io != NULL) { 2661 kip = KSTAT_IO_PTR(ilp->ilport_kstat_io); 2662 if (kip != NULL) { 2663 mutex_enter(ilp->ilport_kstat_io->ks_lock); 2664 func(kip); 2665 mutex_exit(ilp->ilport_kstat_io->ks_lock); 2666 } 2667 } 2668 } 2669 2670 static void 2671 stmf_update_kstat_lport_io(scsi_task_t *task, stmf_data_buf_t *dbuf) 2672 { 2673 stmf_i_local_port_t *ilp; 2674 kstat_io_t *kip; 2675 2676 ilp = (stmf_i_local_port_t *)task->task_lport->lport_stmf_private; 2677 if (ilp != NULL && ilp->ilport_kstat_io != NULL) { 2678 kip = KSTAT_IO_PTR(ilp->ilport_kstat_io); 2679 if (kip != NULL) { 2680 mutex_enter(ilp->ilport_kstat_io->ks_lock); 2681 STMF_UPDATE_KSTAT_IO(kip, dbuf); 2682 mutex_exit(ilp->ilport_kstat_io->ks_lock); 2683 } 2684 } 2685 } 2686 2687 static void 2688 stmf_update_kstat_lu_io(scsi_task_t *task, stmf_data_buf_t *dbuf) 2689 { 2690 stmf_i_lu_t *ilu; 2691 kstat_io_t *kip; 2692 2693 ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private; 2694 if (ilu != NULL && ilu->ilu_kstat_io != NULL) { 2695 kip = KSTAT_IO_PTR(ilu->ilu_kstat_io); 2696 if (kip != NULL) { 2697 mutex_enter(ilu->ilu_kstat_io->ks_lock); 2698 STMF_UPDATE_KSTAT_IO(kip, dbuf); 2699 mutex_exit(ilu->ilu_kstat_io->ks_lock); 2700 } 2701 } 2702 } 2703 2704 static void 2705 stmf_create_kstat_lu(stmf_i_lu_t *ilu) 2706 { 2707 char ks_nm[KSTAT_STRLEN]; 2708 stmf_kstat_lu_info_t *ks_lu; 2709 2710 /* create kstat lun info */ 2711 ks_lu = (stmf_kstat_lu_info_t *)kmem_zalloc(STMF_KSTAT_LU_SZ, 2712 KM_NOSLEEP); 2713 if (ks_lu == NULL) { 2714 cmn_err(CE_WARN, "STMF: kmem_zalloc failed"); 2715 return; 2716 } 2717 2718 bzero(ks_nm, sizeof (ks_nm)); 2719 (void) sprintf(ks_nm, "stmf_lu_%"PRIxPTR"", (uintptr_t)ilu); 2720 if ((ilu->ilu_kstat_info = kstat_create(STMF_MODULE_NAME, 0, 2721 ks_nm, "misc", KSTAT_TYPE_NAMED, 2722 sizeof (stmf_kstat_lu_info_t) / sizeof (kstat_named_t), 2723 KSTAT_FLAG_VIRTUAL)) == NULL) { 2724 kmem_free(ks_lu, STMF_KSTAT_LU_SZ); 2725 cmn_err(CE_WARN, "STMF: kstat_create lu failed"); 2726 return; 2727 } 2728 2729 ilu->ilu_kstat_info->ks_data_size = STMF_KSTAT_LU_SZ; 2730 ilu->ilu_kstat_info->ks_data = ks_lu; 2731 2732 kstat_named_init(&ks_lu->i_lun_guid, "lun-guid", 2733 KSTAT_DATA_STRING); 2734 kstat_named_init(&ks_lu->i_lun_alias, "lun-alias", 2735 KSTAT_DATA_STRING); 2736 2737 /* convert guid to hex string */ 2738 int i; 2739 uint8_t *p = ilu->ilu_lu->lu_id->ident; 2740 bzero(ilu->ilu_ascii_hex_guid, sizeof (ilu->ilu_ascii_hex_guid)); 2741 for (i = 0; i < STMF_GUID_INPUT / 2; i++) { 2742 (void) sprintf(&ilu->ilu_ascii_hex_guid[i * 2], "%02x", p[i]); 2743 } 2744 kstat_named_setstr(&ks_lu->i_lun_guid, 2745 (const char *)ilu->ilu_ascii_hex_guid); 2746 kstat_named_setstr(&ks_lu->i_lun_alias, 2747 (const char *)ilu->ilu_lu->lu_alias); 2748 kstat_install(ilu->ilu_kstat_info); 2749 2750 /* create kstat lun io */ 2751 bzero(ks_nm, sizeof (ks_nm)); 2752 (void) sprintf(ks_nm, "stmf_lu_io_%"PRIxPTR"", (uintptr_t)ilu); 2753 if ((ilu->ilu_kstat_io = kstat_create(STMF_MODULE_NAME, 0, 2754 ks_nm, "io", KSTAT_TYPE_IO, 1, 0)) == NULL) { 2755 cmn_err(CE_WARN, "STMF: kstat_create lu_io failed"); 2756 return; 2757 } 2758 mutex_init(&ilu->ilu_kstat_lock, NULL, MUTEX_DRIVER, 0); 2759 ilu->ilu_kstat_io->ks_lock = &ilu->ilu_kstat_lock; 2760 kstat_install(ilu->ilu_kstat_io); 2761 } 2762 2763 static void 2764 stmf_create_kstat_lport(stmf_i_local_port_t *ilport) 2765 { 2766 char ks_nm[KSTAT_STRLEN]; 2767 stmf_kstat_tgt_info_t *ks_tgt; 2768 int id, len; 2769 2770 /* create kstat lport info */ 2771 ks_tgt = (stmf_kstat_tgt_info_t *)kmem_zalloc(STMF_KSTAT_TGT_SZ, 2772 KM_NOSLEEP); 2773 if (ks_tgt == NULL) { 2774 cmn_err(CE_WARN, "STMF: kmem_zalloc failed"); 2775 return; 2776 } 2777 2778 bzero(ks_nm, sizeof (ks_nm)); 2779 (void) sprintf(ks_nm, "stmf_tgt_%"PRIxPTR"", (uintptr_t)ilport); 2780 if ((ilport->ilport_kstat_info = kstat_create(STMF_MODULE_NAME, 2781 0, ks_nm, "misc", KSTAT_TYPE_NAMED, 2782 sizeof (stmf_kstat_tgt_info_t) / sizeof (kstat_named_t), 2783 KSTAT_FLAG_VIRTUAL)) == NULL) { 2784 kmem_free(ks_tgt, STMF_KSTAT_TGT_SZ); 2785 cmn_err(CE_WARN, "STMF: kstat_create target failed"); 2786 return; 2787 } 2788 2789 ilport->ilport_kstat_info->ks_data_size = STMF_KSTAT_TGT_SZ; 2790 ilport->ilport_kstat_info->ks_data = ks_tgt; 2791 2792 kstat_named_init(&ks_tgt->i_tgt_name, "target-name", 2793 KSTAT_DATA_STRING); 2794 kstat_named_init(&ks_tgt->i_tgt_alias, "target-alias", 2795 KSTAT_DATA_STRING); 2796 kstat_named_init(&ks_tgt->i_protocol, "protocol", 2797 KSTAT_DATA_STRING); 2798 2799 /* ident might not be null terminated */ 2800 len = ilport->ilport_lport->lport_id->ident_length; 2801 bcopy(ilport->ilport_lport->lport_id->ident, 2802 ilport->ilport_kstat_tgt_name, len); 2803 ilport->ilport_kstat_tgt_name[len + 1] = NULL; 2804 kstat_named_setstr(&ks_tgt->i_tgt_name, 2805 (const char *)ilport->ilport_kstat_tgt_name); 2806 kstat_named_setstr(&ks_tgt->i_tgt_alias, 2807 (const char *)ilport->ilport_lport->lport_alias); 2808 /* protocol */ 2809 if ((id = ilport->ilport_lport->lport_id->protocol_id) > PROTOCOL_ANY) { 2810 cmn_err(CE_WARN, "STMF: protocol_id out of bound"); 2811 id = PROTOCOL_ANY; 2812 } 2813 kstat_named_setstr(&ks_tgt->i_protocol, protocol_ident[id]); 2814 kstat_install(ilport->ilport_kstat_info); 2815 2816 /* create kstat lport io */ 2817 bzero(ks_nm, sizeof (ks_nm)); 2818 (void) sprintf(ks_nm, "stmf_tgt_io_%"PRIxPTR"", (uintptr_t)ilport); 2819 if ((ilport->ilport_kstat_io = kstat_create(STMF_MODULE_NAME, 0, 2820 ks_nm, "io", KSTAT_TYPE_IO, 1, 0)) == NULL) { 2821 cmn_err(CE_WARN, "STMF: kstat_create target_io failed"); 2822 return; 2823 } 2824 mutex_init(&ilport->ilport_kstat_lock, NULL, MUTEX_DRIVER, 0); 2825 ilport->ilport_kstat_io->ks_lock = &ilport->ilport_kstat_lock; 2826 kstat_install(ilport->ilport_kstat_io); 2827 } 2828 2829 /* 2830 * set the asymmetric access state for a logical unit 2831 * caller is responsible for establishing SCSI unit attention on 2832 * state change 2833 */ 2834 stmf_status_t 2835 stmf_set_lu_access(stmf_lu_t *lu, uint8_t access_state) 2836 { 2837 stmf_i_lu_t *ilu; 2838 uint8_t *p1, *p2; 2839 2840 if ((access_state != STMF_LU_STANDBY) && 2841 (access_state != STMF_LU_ACTIVE)) { 2842 return (STMF_INVALID_ARG); 2843 } 2844 2845 p1 = &lu->lu_id->ident[0]; 2846 mutex_enter(&stmf_state.stmf_lock); 2847 if (stmf_state.stmf_inventory_locked) { 2848 mutex_exit(&stmf_state.stmf_lock); 2849 return (STMF_BUSY); 2850 } 2851 2852 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) { 2853 p2 = &ilu->ilu_lu->lu_id->ident[0]; 2854 if (bcmp(p1, p2, 16) == 0) { 2855 break; 2856 } 2857 } 2858 2859 if (!ilu) { 2860 ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 2861 } else { 2862 /* 2863 * We're changing access state on an existing logical unit 2864 * Send the proxy registration message for this logical unit 2865 * if we're in alua mode. 2866 * If the requested state is STMF_LU_ACTIVE, we want to register 2867 * this logical unit. 2868 * If the requested state is STMF_LU_STANDBY, we're going to 2869 * abort all tasks for this logical unit. 2870 */ 2871 if (stmf_state.stmf_alua_state == 1 && 2872 access_state == STMF_LU_ACTIVE) { 2873 stmf_ic_msg_status_t ic_ret = STMF_IC_MSG_SUCCESS; 2874 stmf_ic_msg_t *ic_reg_lun; 2875 if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 && 2876 lu->lu_lp->lp_alua_support) { 2877 ilu->ilu_alua = 1; 2878 /* allocate the register message */ 2879 ic_reg_lun = ic_lun_active_msg_alloc(p1, 2880 lu->lu_lp->lp_name, 2881 lu->lu_proxy_reg_arg_len, 2882 (uint8_t *)lu->lu_proxy_reg_arg, 2883 stmf_proxy_msg_id); 2884 /* send the message */ 2885 if (ic_reg_lun) { 2886 ic_ret = ic_tx_msg(ic_reg_lun); 2887 if (ic_ret == STMF_IC_MSG_SUCCESS) { 2888 stmf_proxy_msg_id++; 2889 } 2890 } 2891 } 2892 } else if (stmf_state.stmf_alua_state == 1 && 2893 access_state == STMF_LU_STANDBY) { 2894 /* abort all tasks for this lu */ 2895 stmf_task_lu_killall(lu, NULL, STMF_ABORTED); 2896 } 2897 } 2898 2899 ilu->ilu_access = access_state; 2900 2901 mutex_exit(&stmf_state.stmf_lock); 2902 return (STMF_SUCCESS); 2903 } 2904 2905 2906 stmf_status_t 2907 stmf_register_lu(stmf_lu_t *lu) 2908 { 2909 stmf_i_lu_t *ilu; 2910 uint8_t *p1, *p2; 2911 stmf_state_change_info_t ssci; 2912 stmf_id_data_t *luid; 2913 2914 if ((lu->lu_id->ident_type != ID_TYPE_NAA) || 2915 (lu->lu_id->ident_length != 16) || 2916 ((lu->lu_id->ident[0] & 0xf0) != 0x60)) { 2917 return (STMF_INVALID_ARG); 2918 } 2919 p1 = &lu->lu_id->ident[0]; 2920 mutex_enter(&stmf_state.stmf_lock); 2921 if (stmf_state.stmf_inventory_locked) { 2922 mutex_exit(&stmf_state.stmf_lock); 2923 return (STMF_BUSY); 2924 } 2925 2926 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) { 2927 p2 = &ilu->ilu_lu->lu_id->ident[0]; 2928 if (bcmp(p1, p2, 16) == 0) { 2929 mutex_exit(&stmf_state.stmf_lock); 2930 return (STMF_ALREADY); 2931 } 2932 } 2933 2934 ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 2935 luid = stmf_lookup_id(&stmf_state.stmf_luid_list, 2936 lu->lu_id->ident_length, lu->lu_id->ident); 2937 if (luid) { 2938 luid->id_pt_to_object = (void *)ilu; 2939 ilu->ilu_luid = luid; 2940 } 2941 ilu->ilu_alias = NULL; 2942 2943 ilu->ilu_next = stmf_state.stmf_ilulist; 2944 ilu->ilu_prev = NULL; 2945 if (ilu->ilu_next) 2946 ilu->ilu_next->ilu_prev = ilu; 2947 stmf_state.stmf_ilulist = ilu; 2948 stmf_state.stmf_nlus++; 2949 if (lu->lu_lp) { 2950 ((stmf_i_lu_provider_t *) 2951 (lu->lu_lp->lp_stmf_private))->ilp_nlus++; 2952 } 2953 ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr1; 2954 STMF_EVENT_ALLOC_HANDLE(ilu->ilu_event_hdl); 2955 stmf_create_kstat_lu(ilu); 2956 /* 2957 * register with proxy module if available and logical unit 2958 * is in active state 2959 */ 2960 if (stmf_state.stmf_alua_state == 1 && 2961 ilu->ilu_access == STMF_LU_ACTIVE) { 2962 stmf_ic_msg_status_t ic_ret = STMF_IC_MSG_SUCCESS; 2963 stmf_ic_msg_t *ic_reg_lun; 2964 if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 && 2965 lu->lu_lp->lp_alua_support) { 2966 ilu->ilu_alua = 1; 2967 /* allocate the register message */ 2968 ic_reg_lun = ic_reg_lun_msg_alloc(p1, 2969 lu->lu_lp->lp_name, lu->lu_proxy_reg_arg_len, 2970 (uint8_t *)lu->lu_proxy_reg_arg, stmf_proxy_msg_id); 2971 /* send the message */ 2972 if (ic_reg_lun) { 2973 ic_ret = ic_tx_msg(ic_reg_lun); 2974 if (ic_ret == STMF_IC_MSG_SUCCESS) { 2975 stmf_proxy_msg_id++; 2976 } 2977 } 2978 } 2979 } 2980 mutex_exit(&stmf_state.stmf_lock); 2981 2982 /* XXX we should probably check if this lu can be brought online */ 2983 ilu->ilu_prev_state = STMF_STATE_ONLINE; 2984 if (stmf_state.stmf_service_running) { 2985 ssci.st_rflags = 0; 2986 ssci.st_additional_info = NULL; 2987 (void) stmf_ctl(STMF_CMD_LU_ONLINE, lu, &ssci); 2988 } 2989 2990 /* XXX: Generate event */ 2991 return (STMF_SUCCESS); 2992 } 2993 2994 stmf_status_t 2995 stmf_deregister_lu(stmf_lu_t *lu) 2996 { 2997 stmf_i_lu_t *ilu; 2998 2999 mutex_enter(&stmf_state.stmf_lock); 3000 if (stmf_state.stmf_inventory_locked) { 3001 mutex_exit(&stmf_state.stmf_lock); 3002 return (STMF_BUSY); 3003 } 3004 ilu = stmf_lookup_lu(lu); 3005 if (ilu == NULL) { 3006 mutex_exit(&stmf_state.stmf_lock); 3007 return (STMF_INVALID_ARG); 3008 } 3009 if (ilu->ilu_state == STMF_STATE_OFFLINE) { 3010 ASSERT(ilu->ilu_ntasks == ilu->ilu_ntasks_free); 3011 while (ilu->ilu_flags & ILU_STALL_DEREGISTER) { 3012 cv_wait(&stmf_state.stmf_cv, &stmf_state.stmf_lock); 3013 } 3014 if (ilu->ilu_ntasks) { 3015 stmf_i_scsi_task_t *itask, *nitask; 3016 3017 nitask = ilu->ilu_tasks; 3018 do { 3019 itask = nitask; 3020 nitask = itask->itask_lu_next; 3021 lu->lu_task_free(itask->itask_task); 3022 stmf_free(itask->itask_task); 3023 } while (nitask != NULL); 3024 3025 ilu->ilu_tasks = ilu->ilu_free_tasks = NULL; 3026 ilu->ilu_ntasks = ilu->ilu_ntasks_free = 0; 3027 } 3028 /* de-register with proxy if available */ 3029 if (ilu->ilu_access == STMF_LU_ACTIVE && 3030 stmf_state.stmf_alua_state == 1) { 3031 /* de-register with proxy module */ 3032 stmf_ic_msg_status_t ic_ret = STMF_IC_MSG_SUCCESS; 3033 stmf_ic_msg_t *ic_dereg_lun; 3034 if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 && 3035 lu->lu_lp->lp_alua_support) { 3036 ilu->ilu_alua = 1; 3037 /* allocate the de-register message */ 3038 ic_dereg_lun = ic_dereg_lun_msg_alloc( 3039 lu->lu_id->ident, lu->lu_lp->lp_name, 0, 3040 NULL, stmf_proxy_msg_id); 3041 /* send the message */ 3042 if (ic_dereg_lun) { 3043 ic_ret = ic_tx_msg(ic_dereg_lun); 3044 if (ic_ret == STMF_IC_MSG_SUCCESS) { 3045 stmf_proxy_msg_id++; 3046 } 3047 } 3048 } 3049 } 3050 3051 if (ilu->ilu_next) 3052 ilu->ilu_next->ilu_prev = ilu->ilu_prev; 3053 if (ilu->ilu_prev) 3054 ilu->ilu_prev->ilu_next = ilu->ilu_next; 3055 else 3056 stmf_state.stmf_ilulist = ilu->ilu_next; 3057 stmf_state.stmf_nlus--; 3058 3059 if (ilu == stmf_state.stmf_svc_ilu_draining) { 3060 stmf_state.stmf_svc_ilu_draining = ilu->ilu_next; 3061 } 3062 if (ilu == stmf_state.stmf_svc_ilu_timing) { 3063 stmf_state.stmf_svc_ilu_timing = ilu->ilu_next; 3064 } 3065 if (lu->lu_lp) { 3066 ((stmf_i_lu_provider_t *) 3067 (lu->lu_lp->lp_stmf_private))->ilp_nlus--; 3068 } 3069 if (ilu->ilu_luid) { 3070 ((stmf_id_data_t *)ilu->ilu_luid)->id_pt_to_object = 3071 NULL; 3072 ilu->ilu_luid = NULL; 3073 } 3074 STMF_EVENT_FREE_HANDLE(ilu->ilu_event_hdl); 3075 } else { 3076 mutex_exit(&stmf_state.stmf_lock); 3077 return (STMF_BUSY); 3078 } 3079 if (ilu->ilu_kstat_info) { 3080 kmem_free(ilu->ilu_kstat_info->ks_data, 3081 ilu->ilu_kstat_info->ks_data_size); 3082 kstat_delete(ilu->ilu_kstat_info); 3083 } 3084 if (ilu->ilu_kstat_io) { 3085 kstat_delete(ilu->ilu_kstat_io); 3086 mutex_destroy(&ilu->ilu_kstat_lock); 3087 } 3088 mutex_exit(&stmf_state.stmf_lock); 3089 return (STMF_SUCCESS); 3090 } 3091 3092 void 3093 stmf_set_port_standby(stmf_local_port_t *lport, uint16_t rtpid) 3094 { 3095 stmf_i_local_port_t *ilport = 3096 (stmf_i_local_port_t *)lport->lport_stmf_private; 3097 ilport->ilport_rtpid = rtpid; 3098 ilport->ilport_standby = 1; 3099 } 3100 3101 void 3102 stmf_set_port_alua(stmf_local_port_t *lport) 3103 { 3104 stmf_i_local_port_t *ilport = 3105 (stmf_i_local_port_t *)lport->lport_stmf_private; 3106 ilport->ilport_alua = 1; 3107 } 3108 3109 stmf_status_t 3110 stmf_register_local_port(stmf_local_port_t *lport) 3111 { 3112 stmf_i_local_port_t *ilport; 3113 stmf_state_change_info_t ssci; 3114 int start_workers = 0; 3115 3116 mutex_enter(&stmf_state.stmf_lock); 3117 if (stmf_state.stmf_inventory_locked) { 3118 mutex_exit(&stmf_state.stmf_lock); 3119 return (STMF_BUSY); 3120 } 3121 ilport = (stmf_i_local_port_t *)lport->lport_stmf_private; 3122 rw_init(&ilport->ilport_lock, NULL, RW_DRIVER, NULL); 3123 3124 ilport->ilport_next = stmf_state.stmf_ilportlist; 3125 ilport->ilport_prev = NULL; 3126 if (ilport->ilport_next) 3127 ilport->ilport_next->ilport_prev = ilport; 3128 stmf_state.stmf_ilportlist = ilport; 3129 stmf_state.stmf_nlports++; 3130 if (lport->lport_pp) { 3131 ((stmf_i_port_provider_t *) 3132 (lport->lport_pp->pp_stmf_private))->ipp_npps++; 3133 } 3134 ilport->ilport_tg = 3135 stmf_lookup_group_for_target(lport->lport_id->ident, 3136 lport->lport_id->ident_length); 3137 3138 /* 3139 * rtpid will/must be set if this is a standby port 3140 * only register ports that are not standby (proxy) ports 3141 * and ports that are alua participants (ilport_alua == 1) 3142 */ 3143 if (ilport->ilport_standby == 0) { 3144 ilport->ilport_rtpid = atomic_add_16_nv(&stmf_rtpid_counter, 1); 3145 } 3146 3147 if (stmf_state.stmf_alua_state == 1 && 3148 ilport->ilport_standby == 0 && 3149 ilport->ilport_alua == 1) { 3150 stmf_ic_msg_t *ic_reg_port; 3151 stmf_ic_msg_status_t ic_ret; 3152 stmf_local_port_t *lport; 3153 lport = ilport->ilport_lport; 3154 ic_reg_port = ic_reg_port_msg_alloc( 3155 lport->lport_id, ilport->ilport_rtpid, 3156 0, NULL, stmf_proxy_msg_id); 3157 if (ic_reg_port) { 3158 ic_ret = ic_tx_msg(ic_reg_port); 3159 if (ic_ret == STMF_IC_MSG_SUCCESS) { 3160 ilport->ilport_reg_msgid = stmf_proxy_msg_id++; 3161 } else { 3162 cmn_err(CE_WARN, "error on port registration " 3163 "port - %s", ilport->ilport_kstat_tgt_name); 3164 } 3165 } 3166 } 3167 STMF_EVENT_ALLOC_HANDLE(ilport->ilport_event_hdl); 3168 stmf_create_kstat_lport(ilport); 3169 if (stmf_workers_state == STMF_WORKERS_DISABLED) { 3170 stmf_workers_state = STMF_WORKERS_ENABLING; 3171 start_workers = 1; 3172 } 3173 mutex_exit(&stmf_state.stmf_lock); 3174 3175 if (start_workers) 3176 stmf_worker_init(); 3177 3178 /* XXX we should probably check if this lport can be brought online */ 3179 ilport->ilport_prev_state = STMF_STATE_ONLINE; 3180 if (stmf_state.stmf_service_running) { 3181 ssci.st_rflags = 0; 3182 ssci.st_additional_info = NULL; 3183 (void) stmf_ctl(STMF_CMD_LPORT_ONLINE, lport, &ssci); 3184 } 3185 3186 /* XXX: Generate event */ 3187 return (STMF_SUCCESS); 3188 } 3189 3190 stmf_status_t 3191 stmf_deregister_local_port(stmf_local_port_t *lport) 3192 { 3193 stmf_i_local_port_t *ilport; 3194 3195 mutex_enter(&stmf_state.stmf_lock); 3196 if (stmf_state.stmf_inventory_locked) { 3197 mutex_exit(&stmf_state.stmf_lock); 3198 return (STMF_BUSY); 3199 } 3200 3201 ilport = (stmf_i_local_port_t *)lport->lport_stmf_private; 3202 3203 /* 3204 * deregister ports that are not standby (proxy) 3205 */ 3206 if (stmf_state.stmf_alua_state == 1 && 3207 ilport->ilport_standby == 0 && 3208 ilport->ilport_alua == 1) { 3209 stmf_ic_msg_t *ic_dereg_port; 3210 stmf_ic_msg_status_t ic_ret; 3211 ic_dereg_port = ic_dereg_port_msg_alloc( 3212 lport->lport_id, 0, NULL, stmf_proxy_msg_id); 3213 if (ic_dereg_port) { 3214 ic_ret = ic_tx_msg(ic_dereg_port); 3215 if (ic_ret == STMF_IC_MSG_SUCCESS) { 3216 stmf_proxy_msg_id++; 3217 } 3218 } 3219 } 3220 3221 if (ilport->ilport_nsessions == 0) { 3222 if (ilport->ilport_next) 3223 ilport->ilport_next->ilport_prev = ilport->ilport_prev; 3224 if (ilport->ilport_prev) 3225 ilport->ilport_prev->ilport_next = ilport->ilport_next; 3226 else 3227 stmf_state.stmf_ilportlist = ilport->ilport_next; 3228 rw_destroy(&ilport->ilport_lock); 3229 stmf_state.stmf_nlports--; 3230 if (lport->lport_pp) { 3231 ((stmf_i_port_provider_t *) 3232 (lport->lport_pp->pp_stmf_private))->ipp_npps--; 3233 } 3234 ilport->ilport_tg = NULL; 3235 STMF_EVENT_FREE_HANDLE(ilport->ilport_event_hdl); 3236 } else { 3237 mutex_exit(&stmf_state.stmf_lock); 3238 return (STMF_BUSY); 3239 } 3240 if (ilport->ilport_kstat_info) { 3241 kmem_free(ilport->ilport_kstat_info->ks_data, 3242 ilport->ilport_kstat_info->ks_data_size); 3243 kstat_delete(ilport->ilport_kstat_info); 3244 } 3245 if (ilport->ilport_kstat_io) { 3246 kstat_delete(ilport->ilport_kstat_io); 3247 mutex_destroy(&ilport->ilport_kstat_lock); 3248 } 3249 mutex_exit(&stmf_state.stmf_lock); 3250 return (STMF_SUCCESS); 3251 } 3252 3253 /* 3254 * Port provider has to make sure that register/deregister session and 3255 * port are serialized calls. 3256 */ 3257 stmf_status_t 3258 stmf_register_scsi_session(stmf_local_port_t *lport, stmf_scsi_session_t *ss) 3259 { 3260 stmf_i_scsi_session_t *iss; 3261 stmf_i_local_port_t *ilport = (stmf_i_local_port_t *) 3262 lport->lport_stmf_private; 3263 uint8_t lun[8]; 3264 3265 /* 3266 * Port state has to be online to register a scsi session. It is 3267 * possible that we started an offline operation and a new SCSI 3268 * session started at the same time (in that case also we are going 3269 * to fail the registeration). But any other state is simply 3270 * a bad port provider implementation. 3271 */ 3272 if (ilport->ilport_state != STMF_STATE_ONLINE) { 3273 if (ilport->ilport_state != STMF_STATE_OFFLINING) { 3274 stmf_trace(lport->lport_alias, "Port is trying to " 3275 "register a session while the state is neither " 3276 "online nor offlining"); 3277 } 3278 return (STMF_FAILURE); 3279 } 3280 bzero(lun, 8); 3281 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private; 3282 iss->iss_flags |= ISS_BEING_CREATED; 3283 3284 /* sessions use the ilport_lock. No separate lock is required */ 3285 iss->iss_lockp = &ilport->ilport_lock; 3286 (void) stmf_session_create_lun_map(ilport, iss); 3287 3288 rw_enter(&ilport->ilport_lock, RW_WRITER); 3289 ilport->ilport_nsessions++; 3290 iss->iss_next = ilport->ilport_ss_list; 3291 ilport->ilport_ss_list = iss; 3292 rw_exit(&ilport->ilport_lock); 3293 3294 iss->iss_creation_time = ddi_get_time(); 3295 ss->ss_session_id = atomic_add_64_nv(&stmf_session_counter, 1); 3296 iss->iss_flags &= ~ISS_BEING_CREATED; 3297 /* XXX should we remove ISS_LUN_INVENTORY_CHANGED on new session? */ 3298 iss->iss_flags &= ~ISS_LUN_INVENTORY_CHANGED; 3299 DTRACE_PROBE2(session__online, stmf_local_port_t *, lport, 3300 stmf_scsi_session_t *, ss); 3301 return (STMF_SUCCESS); 3302 } 3303 3304 void 3305 stmf_deregister_scsi_session(stmf_local_port_t *lport, stmf_scsi_session_t *ss) 3306 { 3307 stmf_i_local_port_t *ilport = (stmf_i_local_port_t *) 3308 lport->lport_stmf_private; 3309 stmf_i_scsi_session_t *iss, **ppss; 3310 int found = 0; 3311 stmf_ic_msg_t *ic_session_dereg; 3312 stmf_status_t ic_ret = STMF_FAILURE; 3313 3314 DTRACE_PROBE2(session__offline, stmf_local_port_t *, lport, 3315 stmf_scsi_session_t *, ss); 3316 3317 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private; 3318 if (ss->ss_rport_alias) { 3319 ss->ss_rport_alias = NULL; 3320 } 3321 3322 try_dereg_ss_again: 3323 mutex_enter(&stmf_state.stmf_lock); 3324 atomic_and_32(&iss->iss_flags, 3325 ~(ISS_LUN_INVENTORY_CHANGED | ISS_GOT_INITIAL_LUNS)); 3326 if (iss->iss_flags & ISS_EVENT_ACTIVE) { 3327 mutex_exit(&stmf_state.stmf_lock); 3328 delay(1); 3329 goto try_dereg_ss_again; 3330 } 3331 3332 /* dereg proxy session if not standby port */ 3333 if (stmf_state.stmf_alua_state == 1 && 3334 ilport->ilport_standby == 0 && 3335 ilport->ilport_alua == 1) { 3336 ic_session_dereg = ic_session_dereg_msg_alloc( 3337 ss, stmf_proxy_msg_id); 3338 if (ic_session_dereg) { 3339 ic_ret = ic_tx_msg(ic_session_dereg); 3340 if (ic_ret == STMF_IC_MSG_SUCCESS) { 3341 stmf_proxy_msg_id++; 3342 } 3343 } 3344 } 3345 3346 mutex_exit(&stmf_state.stmf_lock); 3347 3348 rw_enter(&ilport->ilport_lock, RW_WRITER); 3349 for (ppss = &ilport->ilport_ss_list; *ppss != NULL; 3350 ppss = &((*ppss)->iss_next)) { 3351 if (iss == (*ppss)) { 3352 *ppss = (*ppss)->iss_next; 3353 found = 1; 3354 break; 3355 } 3356 } 3357 if (!found) { 3358 cmn_err(CE_PANIC, "Deregister session called for non existent" 3359 " session"); 3360 } 3361 ilport->ilport_nsessions--; 3362 rw_exit(&ilport->ilport_lock); 3363 3364 (void) stmf_session_destroy_lun_map(ilport, iss); 3365 } 3366 3367 stmf_i_scsi_session_t * 3368 stmf_session_id_to_issptr(uint64_t session_id, int stay_locked) 3369 { 3370 stmf_i_local_port_t *ilport; 3371 stmf_i_scsi_session_t *iss; 3372 3373 mutex_enter(&stmf_state.stmf_lock); 3374 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL; 3375 ilport = ilport->ilport_next) { 3376 rw_enter(&ilport->ilport_lock, RW_WRITER); 3377 for (iss = ilport->ilport_ss_list; iss != NULL; 3378 iss = iss->iss_next) { 3379 if (iss->iss_ss->ss_session_id == session_id) { 3380 if (!stay_locked) 3381 rw_exit(&ilport->ilport_lock); 3382 mutex_exit(&stmf_state.stmf_lock); 3383 return (iss); 3384 } 3385 } 3386 rw_exit(&ilport->ilport_lock); 3387 } 3388 mutex_exit(&stmf_state.stmf_lock); 3389 return (NULL); 3390 } 3391 3392 void 3393 stmf_release_itl_handle(stmf_lu_t *lu, stmf_itl_data_t *itl) 3394 { 3395 stmf_itl_data_t **itlpp; 3396 stmf_i_lu_t *ilu; 3397 3398 ASSERT(itl->itl_flags & STMF_ITL_BEING_TERMINATED); 3399 3400 ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 3401 mutex_enter(&ilu->ilu_task_lock); 3402 for (itlpp = &ilu->ilu_itl_list; (*itlpp) != NULL; 3403 itlpp = &(*itlpp)->itl_next) { 3404 if ((*itlpp) == itl) 3405 break; 3406 } 3407 ASSERT((*itlpp) != NULL); 3408 *itlpp = itl->itl_next; 3409 mutex_exit(&ilu->ilu_task_lock); 3410 lu->lu_abort(lu, STMF_LU_ITL_HANDLE_REMOVED, itl->itl_handle, 3411 (uint32_t)itl->itl_hdlrm_reason); 3412 kmem_free(itl, sizeof (*itl)); 3413 } 3414 3415 stmf_status_t 3416 stmf_register_itl_handle(stmf_lu_t *lu, uint8_t *lun, 3417 stmf_scsi_session_t *ss, uint64_t session_id, void *itl_handle) 3418 { 3419 stmf_itl_data_t *itl; 3420 stmf_i_scsi_session_t *iss; 3421 stmf_lun_map_ent_t *lun_map_ent; 3422 stmf_i_lu_t *ilu; 3423 uint16_t n; 3424 3425 ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 3426 if (ss == NULL) { 3427 iss = stmf_session_id_to_issptr(session_id, 1); 3428 if (iss == NULL) 3429 return (STMF_NOT_FOUND); 3430 } else { 3431 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private; 3432 rw_enter(iss->iss_lockp, RW_WRITER); 3433 } 3434 3435 n = ((uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8)); 3436 lun_map_ent = (stmf_lun_map_ent_t *) 3437 stmf_get_ent_from_map(iss->iss_sm, n); 3438 if ((lun_map_ent == NULL) || (lun_map_ent->ent_lu != lu)) { 3439 rw_exit(iss->iss_lockp); 3440 return (STMF_NOT_FOUND); 3441 } 3442 if (lun_map_ent->ent_itl_datap != NULL) { 3443 rw_exit(iss->iss_lockp); 3444 return (STMF_ALREADY); 3445 } 3446 3447 itl = (stmf_itl_data_t *)kmem_zalloc(sizeof (*itl), KM_NOSLEEP); 3448 if (itl == NULL) { 3449 rw_exit(iss->iss_lockp); 3450 return (STMF_ALLOC_FAILURE); 3451 } 3452 3453 itl->itl_counter = 1; 3454 itl->itl_lun = n; 3455 itl->itl_handle = itl_handle; 3456 itl->itl_session = iss; 3457 mutex_enter(&ilu->ilu_task_lock); 3458 itl->itl_next = ilu->ilu_itl_list; 3459 ilu->ilu_itl_list = itl; 3460 mutex_exit(&ilu->ilu_task_lock); 3461 lun_map_ent->ent_itl_datap = itl; 3462 rw_exit(iss->iss_lockp); 3463 3464 return (STMF_SUCCESS); 3465 } 3466 3467 void 3468 stmf_do_itl_dereg(stmf_lu_t *lu, stmf_itl_data_t *itl, uint8_t hdlrm_reason) 3469 { 3470 uint8_t old, new; 3471 3472 do { 3473 old = new = itl->itl_flags; 3474 if (old & STMF_ITL_BEING_TERMINATED) 3475 return; 3476 new |= STMF_ITL_BEING_TERMINATED; 3477 } while (atomic_cas_8(&itl->itl_flags, old, new) != old); 3478 itl->itl_hdlrm_reason = hdlrm_reason; 3479 3480 ASSERT(itl->itl_counter); 3481 3482 if (atomic_add_32_nv(&itl->itl_counter, -1)) 3483 return; 3484 3485 drv_usecwait(10); 3486 if (itl->itl_counter) 3487 return; 3488 3489 stmf_release_itl_handle(lu, itl); 3490 } 3491 3492 stmf_status_t 3493 stmf_deregister_all_lu_itl_handles(stmf_lu_t *lu) 3494 { 3495 stmf_i_lu_t *ilu; 3496 stmf_i_local_port_t *ilport; 3497 stmf_i_scsi_session_t *iss; 3498 stmf_lun_map_t *lm; 3499 stmf_lun_map_ent_t *ent; 3500 uint32_t nmaps, nu; 3501 stmf_itl_data_t **itl_list; 3502 int i; 3503 3504 ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 3505 3506 dereg_itl_start:; 3507 nmaps = ilu->ilu_ref_cnt; 3508 if (nmaps == 0) 3509 return (STMF_NOT_FOUND); 3510 itl_list = (stmf_itl_data_t **)kmem_zalloc( 3511 nmaps * sizeof (stmf_itl_data_t *), KM_SLEEP); 3512 mutex_enter(&stmf_state.stmf_lock); 3513 if (nmaps != ilu->ilu_ref_cnt) { 3514 /* Something changed, start all over */ 3515 mutex_exit(&stmf_state.stmf_lock); 3516 kmem_free(itl_list, nmaps * sizeof (stmf_itl_data_t *)); 3517 goto dereg_itl_start; 3518 } 3519 nu = 0; 3520 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL; 3521 ilport = ilport->ilport_next) { 3522 rw_enter(&ilport->ilport_lock, RW_WRITER); 3523 for (iss = ilport->ilport_ss_list; iss != NULL; 3524 iss = iss->iss_next) { 3525 lm = iss->iss_sm; 3526 if (!lm) 3527 continue; 3528 for (i = 0; i < lm->lm_nentries; i++) { 3529 if (lm->lm_plus[i] == NULL) 3530 continue; 3531 ent = (stmf_lun_map_ent_t *)lm->lm_plus[i]; 3532 if ((ent->ent_lu == lu) && 3533 (ent->ent_itl_datap)) { 3534 itl_list[nu++] = ent->ent_itl_datap; 3535 ent->ent_itl_datap = NULL; 3536 if (nu == nmaps) { 3537 rw_exit(&ilport->ilport_lock); 3538 goto dai_scan_done; 3539 } 3540 } 3541 } /* lun table for a session */ 3542 } /* sessions */ 3543 rw_exit(&ilport->ilport_lock); 3544 } /* ports */ 3545 3546 dai_scan_done: 3547 mutex_exit(&stmf_state.stmf_lock); 3548 3549 for (i = 0; i < nu; i++) { 3550 stmf_do_itl_dereg(lu, itl_list[i], 3551 STMF_ITL_REASON_DEREG_REQUEST); 3552 } 3553 kmem_free(itl_list, nmaps * sizeof (stmf_itl_data_t *)); 3554 3555 return (STMF_SUCCESS); 3556 } 3557 3558 stmf_status_t 3559 stmf_deregister_itl_handle(stmf_lu_t *lu, uint8_t *lun, 3560 stmf_scsi_session_t *ss, uint64_t session_id, void *itl_handle) 3561 { 3562 stmf_i_scsi_session_t *iss; 3563 stmf_itl_data_t *itl; 3564 stmf_lun_map_ent_t *ent; 3565 stmf_lun_map_t *lm; 3566 int i; 3567 uint16_t n; 3568 3569 if (ss == NULL) { 3570 if (session_id == STMF_SESSION_ID_NONE) 3571 return (STMF_INVALID_ARG); 3572 iss = stmf_session_id_to_issptr(session_id, 1); 3573 if (iss == NULL) 3574 return (STMF_NOT_FOUND); 3575 } else { 3576 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private; 3577 rw_enter(iss->iss_lockp, RW_WRITER); 3578 } 3579 lm = iss->iss_sm; 3580 if (lm == NULL) { 3581 rw_exit(iss->iss_lockp); 3582 return (STMF_NOT_FOUND); 3583 } 3584 3585 if (lun) { 3586 n = ((uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8)); 3587 ent = (stmf_lun_map_ent_t *) 3588 stmf_get_ent_from_map(iss->iss_sm, n); 3589 } else { 3590 if (itl_handle == NULL) { 3591 rw_exit(iss->iss_lockp); 3592 return (STMF_INVALID_ARG); 3593 } 3594 ent = NULL; 3595 for (i = 0; i < lm->lm_nentries; i++) { 3596 if (lm->lm_plus[i] == NULL) 3597 continue; 3598 ent = (stmf_lun_map_ent_t *)lm->lm_plus[i]; 3599 if (ent->ent_itl_datap && 3600 (ent->ent_itl_datap->itl_handle == itl_handle)) { 3601 break; 3602 } 3603 } 3604 } 3605 if ((ent == NULL) || (ent->ent_lu != lu) || 3606 (ent->ent_itl_datap == NULL)) { 3607 rw_exit(iss->iss_lockp); 3608 return (STMF_NOT_FOUND); 3609 } 3610 itl = ent->ent_itl_datap; 3611 ent->ent_itl_datap = NULL; 3612 rw_exit(iss->iss_lockp); 3613 stmf_do_itl_dereg(lu, itl, STMF_ITL_REASON_DEREG_REQUEST); 3614 3615 return (STMF_SUCCESS); 3616 } 3617 3618 stmf_status_t 3619 stmf_get_itl_handle(stmf_lu_t *lu, uint8_t *lun, stmf_scsi_session_t *ss, 3620 uint64_t session_id, void **itl_handle_retp) 3621 { 3622 stmf_i_scsi_session_t *iss; 3623 stmf_lun_map_ent_t *ent; 3624 stmf_lun_map_t *lm; 3625 stmf_status_t ret; 3626 int i; 3627 uint16_t n; 3628 3629 if (ss == NULL) { 3630 iss = stmf_session_id_to_issptr(session_id, 1); 3631 if (iss == NULL) 3632 return (STMF_NOT_FOUND); 3633 } else { 3634 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private; 3635 rw_enter(iss->iss_lockp, RW_WRITER); 3636 } 3637 3638 ent = NULL; 3639 if (lun == NULL) { 3640 lm = iss->iss_sm; 3641 for (i = 0; i < lm->lm_nentries; i++) { 3642 if (lm->lm_plus[i] == NULL) 3643 continue; 3644 ent = (stmf_lun_map_ent_t *)lm->lm_plus[i]; 3645 if (ent->ent_lu == lu) 3646 break; 3647 } 3648 } else { 3649 n = ((uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8)); 3650 ent = (stmf_lun_map_ent_t *) 3651 stmf_get_ent_from_map(iss->iss_sm, n); 3652 if (lu && (ent->ent_lu != lu)) 3653 ent = NULL; 3654 } 3655 if (ent && ent->ent_itl_datap) { 3656 *itl_handle_retp = ent->ent_itl_datap->itl_handle; 3657 ret = STMF_SUCCESS; 3658 } else { 3659 ret = STMF_NOT_FOUND; 3660 } 3661 3662 rw_exit(iss->iss_lockp); 3663 return (ret); 3664 } 3665 3666 stmf_data_buf_t * 3667 stmf_alloc_dbuf(scsi_task_t *task, uint32_t size, uint32_t *pminsize, 3668 uint32_t flags) 3669 { 3670 stmf_i_scsi_task_t *itask = 3671 (stmf_i_scsi_task_t *)task->task_stmf_private; 3672 stmf_local_port_t *lport = task->task_lport; 3673 stmf_data_buf_t *dbuf; 3674 uint8_t ndx; 3675 3676 ndx = stmf_first_zero[itask->itask_allocated_buf_map]; 3677 if (ndx == 0xff) 3678 return (NULL); 3679 dbuf = itask->itask_dbufs[ndx] = lport->lport_ds->ds_alloc_data_buf( 3680 task, size, pminsize, flags); 3681 if (dbuf) { 3682 task->task_cur_nbufs++; 3683 itask->itask_allocated_buf_map |= (1 << ndx); 3684 dbuf->db_handle = ndx; 3685 return (dbuf); 3686 } 3687 3688 return (NULL); 3689 } 3690 3691 void 3692 stmf_free_dbuf(scsi_task_t *task, stmf_data_buf_t *dbuf) 3693 { 3694 stmf_i_scsi_task_t *itask = 3695 (stmf_i_scsi_task_t *)task->task_stmf_private; 3696 stmf_local_port_t *lport = task->task_lport; 3697 3698 itask->itask_allocated_buf_map &= ~(1 << dbuf->db_handle); 3699 task->task_cur_nbufs--; 3700 lport->lport_ds->ds_free_data_buf(lport->lport_ds, dbuf); 3701 } 3702 3703 stmf_data_buf_t * 3704 stmf_handle_to_buf(scsi_task_t *task, uint8_t h) 3705 { 3706 stmf_i_scsi_task_t *itask; 3707 3708 itask = (stmf_i_scsi_task_t *)task->task_stmf_private; 3709 if (h > 3) 3710 return (NULL); 3711 return (itask->itask_dbufs[h]); 3712 } 3713 3714 /* ARGSUSED */ 3715 struct scsi_task * 3716 stmf_task_alloc(struct stmf_local_port *lport, stmf_scsi_session_t *ss, 3717 uint8_t *lun, uint16_t cdb_length_in, uint16_t ext_id) 3718 { 3719 stmf_lu_t *lu; 3720 stmf_i_scsi_session_t *iss; 3721 stmf_i_lu_t *ilu; 3722 stmf_i_scsi_task_t *itask; 3723 stmf_i_scsi_task_t **ppitask; 3724 scsi_task_t *task; 3725 uint64_t *p; 3726 uint8_t *l; 3727 stmf_lun_map_ent_t *lun_map_ent; 3728 uint16_t cdb_length; 3729 uint16_t luNbr; 3730 uint8_t new_task = 0; 3731 3732 /* 3733 * We allocate 7 extra bytes for CDB to provide a cdb pointer which 3734 * is guaranteed to be 8 byte aligned. Some LU providers like OSD 3735 * depend upon this alignment. 3736 */ 3737 if (cdb_length_in >= 16) 3738 cdb_length = cdb_length_in + 7; 3739 else 3740 cdb_length = 16 + 7; 3741 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private; 3742 luNbr = ((uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8)); 3743 rw_enter(iss->iss_lockp, RW_READER); 3744 lun_map_ent = 3745 (stmf_lun_map_ent_t *)stmf_get_ent_from_map(iss->iss_sm, luNbr); 3746 if (!lun_map_ent) { 3747 lu = dlun0; 3748 } else { 3749 lu = lun_map_ent->ent_lu; 3750 } 3751 ilu = lu->lu_stmf_private; 3752 if (ilu->ilu_flags & ILU_RESET_ACTIVE) { 3753 rw_exit(iss->iss_lockp); 3754 return (NULL); 3755 } 3756 do { 3757 if (ilu->ilu_free_tasks == NULL) { 3758 new_task = 1; 3759 break; 3760 } 3761 mutex_enter(&ilu->ilu_task_lock); 3762 for (ppitask = &ilu->ilu_free_tasks; (*ppitask != NULL) && 3763 ((*ppitask)->itask_cdb_buf_size < cdb_length); 3764 ppitask = &((*ppitask)->itask_lu_free_next)) 3765 ; 3766 if (*ppitask) { 3767 itask = *ppitask; 3768 *ppitask = (*ppitask)->itask_lu_free_next; 3769 ilu->ilu_ntasks_free--; 3770 if (ilu->ilu_ntasks_free < ilu->ilu_ntasks_min_free) 3771 ilu->ilu_ntasks_min_free = ilu->ilu_ntasks_free; 3772 } else { 3773 new_task = 1; 3774 } 3775 mutex_exit(&ilu->ilu_task_lock); 3776 /* CONSTCOND */ 3777 } while (0); 3778 3779 if (!new_task) { 3780 task = itask->itask_task; 3781 task->task_timeout = 0; 3782 p = (uint64_t *)&task->task_flags; 3783 *p++ = 0; *p++ = 0; p++; p++; *p++ = 0; *p++ = 0; *p = 0; 3784 itask->itask_ncmds = 0; 3785 } else { 3786 task = (scsi_task_t *)stmf_alloc(STMF_STRUCT_SCSI_TASK, 3787 cdb_length, AF_FORCE_NOSLEEP); 3788 if (task == NULL) { 3789 rw_exit(iss->iss_lockp); 3790 return (NULL); 3791 } 3792 task->task_lu = lu; 3793 l = task->task_lun_no; 3794 l[0] = lun[0]; 3795 l[1] = lun[1]; 3796 l[2] = lun[2]; 3797 l[3] = lun[3]; 3798 l[4] = lun[4]; 3799 l[5] = lun[5]; 3800 l[6] = lun[6]; 3801 l[7] = lun[7]; 3802 task->task_cdb = (uint8_t *)task->task_port_private; 3803 if ((ulong_t)(task->task_cdb) & 7ul) { 3804 task->task_cdb = (uint8_t *)(((ulong_t) 3805 (task->task_cdb) + 7ul) & ~(7ul)); 3806 } 3807 itask = (stmf_i_scsi_task_t *)task->task_stmf_private; 3808 itask->itask_cdb_buf_size = cdb_length; 3809 } 3810 task->task_session = ss; 3811 task->task_lport = lport; 3812 task->task_cdb_length = cdb_length_in; 3813 itask->itask_flags = ITASK_IN_TRANSITION; 3814 3815 if (new_task) { 3816 if (lu->lu_task_alloc(task) != STMF_SUCCESS) { 3817 rw_exit(iss->iss_lockp); 3818 stmf_free(task); 3819 return (NULL); 3820 } 3821 mutex_enter(&ilu->ilu_task_lock); 3822 if (ilu->ilu_flags & ILU_RESET_ACTIVE) { 3823 mutex_exit(&ilu->ilu_task_lock); 3824 rw_exit(iss->iss_lockp); 3825 stmf_free(task); 3826 return (NULL); 3827 } 3828 itask->itask_lu_next = ilu->ilu_tasks; 3829 if (ilu->ilu_tasks) 3830 ilu->ilu_tasks->itask_lu_prev = itask; 3831 ilu->ilu_tasks = itask; 3832 /* kmem_zalloc automatically makes itask->itask_lu_prev NULL */ 3833 ilu->ilu_ntasks++; 3834 mutex_exit(&ilu->ilu_task_lock); 3835 } 3836 3837 itask->itask_ilu_task_cntr = ilu->ilu_cur_task_cntr; 3838 atomic_add_32(itask->itask_ilu_task_cntr, 1); 3839 itask->itask_start_time = ddi_get_lbolt(); 3840 3841 if ((lun_map_ent != NULL) && ((itask->itask_itl_datap = 3842 lun_map_ent->ent_itl_datap) != NULL)) { 3843 atomic_add_32(&itask->itask_itl_datap->itl_counter, 1); 3844 task->task_lu_itl_handle = itask->itask_itl_datap->itl_handle; 3845 } else { 3846 itask->itask_itl_datap = NULL; 3847 task->task_lu_itl_handle = NULL; 3848 } 3849 3850 rw_exit(iss->iss_lockp); 3851 return (task); 3852 } 3853 3854 static void 3855 stmf_task_lu_free(scsi_task_t *task, stmf_i_scsi_session_t *iss) 3856 { 3857 stmf_i_scsi_task_t *itask = 3858 (stmf_i_scsi_task_t *)task->task_stmf_private; 3859 stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private; 3860 3861 ASSERT(rw_lock_held(iss->iss_lockp)); 3862 itask->itask_flags = ITASK_IN_FREE_LIST; 3863 itask->itask_proxy_msg_id = 0; 3864 mutex_enter(&ilu->ilu_task_lock); 3865 itask->itask_lu_free_next = ilu->ilu_free_tasks; 3866 ilu->ilu_free_tasks = itask; 3867 ilu->ilu_ntasks_free++; 3868 mutex_exit(&ilu->ilu_task_lock); 3869 atomic_add_32(itask->itask_ilu_task_cntr, -1); 3870 } 3871 3872 void 3873 stmf_task_lu_check_freelist(stmf_i_lu_t *ilu) 3874 { 3875 uint32_t num_to_release, ndx; 3876 stmf_i_scsi_task_t *itask; 3877 stmf_lu_t *lu = ilu->ilu_lu; 3878 3879 ASSERT(ilu->ilu_ntasks_min_free <= ilu->ilu_ntasks_free); 3880 3881 /* free half of the minimal free of the free tasks */ 3882 num_to_release = (ilu->ilu_ntasks_min_free + 1) / 2; 3883 if (!num_to_release) { 3884 return; 3885 } 3886 for (ndx = 0; ndx < num_to_release; ndx++) { 3887 mutex_enter(&ilu->ilu_task_lock); 3888 itask = ilu->ilu_free_tasks; 3889 if (itask == NULL) { 3890 mutex_exit(&ilu->ilu_task_lock); 3891 break; 3892 } 3893 ilu->ilu_free_tasks = itask->itask_lu_free_next; 3894 ilu->ilu_ntasks_free--; 3895 mutex_exit(&ilu->ilu_task_lock); 3896 3897 lu->lu_task_free(itask->itask_task); 3898 mutex_enter(&ilu->ilu_task_lock); 3899 if (itask->itask_lu_next) 3900 itask->itask_lu_next->itask_lu_prev = 3901 itask->itask_lu_prev; 3902 if (itask->itask_lu_prev) 3903 itask->itask_lu_prev->itask_lu_next = 3904 itask->itask_lu_next; 3905 else 3906 ilu->ilu_tasks = itask->itask_lu_next; 3907 3908 ilu->ilu_ntasks--; 3909 mutex_exit(&ilu->ilu_task_lock); 3910 stmf_free(itask->itask_task); 3911 } 3912 } 3913 3914 /* 3915 * Called with stmf_lock held 3916 */ 3917 void 3918 stmf_check_freetask() 3919 { 3920 stmf_i_lu_t *ilu; 3921 clock_t endtime = ddi_get_lbolt() + drv_usectohz(10000); 3922 3923 /* stmf_svc_ilu_draining may get changed after stmf_lock is released */ 3924 while ((ilu = stmf_state.stmf_svc_ilu_draining) != NULL) { 3925 stmf_state.stmf_svc_ilu_draining = ilu->ilu_next; 3926 if (!ilu->ilu_ntasks_min_free) { 3927 ilu->ilu_ntasks_min_free = ilu->ilu_ntasks_free; 3928 continue; 3929 } 3930 ilu->ilu_flags |= ILU_STALL_DEREGISTER; 3931 mutex_exit(&stmf_state.stmf_lock); 3932 stmf_task_lu_check_freelist(ilu); 3933 /* 3934 * we do not care about the accuracy of 3935 * ilu_ntasks_min_free, so we don't lock here 3936 */ 3937 ilu->ilu_ntasks_min_free = ilu->ilu_ntasks_free; 3938 mutex_enter(&stmf_state.stmf_lock); 3939 ilu->ilu_flags &= ~ILU_STALL_DEREGISTER; 3940 cv_broadcast(&stmf_state.stmf_cv); 3941 if (ddi_get_lbolt() >= endtime) 3942 break; 3943 } 3944 } 3945 3946 void 3947 stmf_do_ilu_timeouts(stmf_i_lu_t *ilu) 3948 { 3949 clock_t l = ddi_get_lbolt(); 3950 clock_t ps = drv_usectohz(1000000); 3951 stmf_i_scsi_task_t *itask; 3952 scsi_task_t *task; 3953 uint32_t to; 3954 3955 mutex_enter(&ilu->ilu_task_lock); 3956 for (itask = ilu->ilu_tasks; itask != NULL; 3957 itask = itask->itask_lu_next) { 3958 if (itask->itask_flags & (ITASK_IN_FREE_LIST | 3959 ITASK_BEING_ABORTED)) { 3960 continue; 3961 } 3962 task = itask->itask_task; 3963 if (task->task_timeout == 0) 3964 to = stmf_default_task_timeout; 3965 else 3966 to = task->task_timeout; 3967 if ((itask->itask_start_time + (to * ps)) > l) 3968 continue; 3969 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 3970 STMF_TIMEOUT, NULL); 3971 } 3972 mutex_exit(&ilu->ilu_task_lock); 3973 } 3974 3975 /* 3976 * Called with stmf_lock held 3977 */ 3978 void 3979 stmf_check_ilu_timing() 3980 { 3981 stmf_i_lu_t *ilu; 3982 clock_t endtime = ddi_get_lbolt() + drv_usectohz(10000); 3983 3984 /* stmf_svc_ilu_timing may get changed after stmf_lock is released */ 3985 while ((ilu = stmf_state.stmf_svc_ilu_timing) != NULL) { 3986 stmf_state.stmf_svc_ilu_timing = ilu->ilu_next; 3987 if (ilu->ilu_cur_task_cntr == (&ilu->ilu_task_cntr1)) { 3988 if (ilu->ilu_task_cntr2 == 0) { 3989 ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr2; 3990 continue; 3991 } 3992 } else { 3993 if (ilu->ilu_task_cntr1 == 0) { 3994 ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr1; 3995 continue; 3996 } 3997 } 3998 /* 3999 * If we are here then it means that there is some slowdown 4000 * in tasks on this lu. We need to check. 4001 */ 4002 ilu->ilu_flags |= ILU_STALL_DEREGISTER; 4003 mutex_exit(&stmf_state.stmf_lock); 4004 stmf_do_ilu_timeouts(ilu); 4005 mutex_enter(&stmf_state.stmf_lock); 4006 ilu->ilu_flags &= ~ILU_STALL_DEREGISTER; 4007 cv_broadcast(&stmf_state.stmf_cv); 4008 if (ddi_get_lbolt() >= endtime) 4009 break; 4010 } 4011 } 4012 4013 /* 4014 * Kills all tasks on a lu except tm_task 4015 */ 4016 void 4017 stmf_task_lu_killall(stmf_lu_t *lu, scsi_task_t *tm_task, stmf_status_t s) 4018 { 4019 stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 4020 stmf_i_scsi_task_t *itask; 4021 4022 mutex_enter(&ilu->ilu_task_lock); 4023 4024 for (itask = ilu->ilu_tasks; itask != NULL; 4025 itask = itask->itask_lu_next) { 4026 if (itask->itask_flags & ITASK_IN_FREE_LIST) 4027 continue; 4028 if (itask->itask_task == tm_task) 4029 continue; 4030 stmf_abort(STMF_QUEUE_TASK_ABORT, itask->itask_task, s, NULL); 4031 } 4032 mutex_exit(&ilu->ilu_task_lock); 4033 } 4034 4035 void 4036 stmf_free_task_bufs(stmf_i_scsi_task_t *itask, stmf_local_port_t *lport) 4037 { 4038 int i; 4039 uint8_t map; 4040 4041 if ((map = itask->itask_allocated_buf_map) != 0) { 4042 for (i = 0; i < 4; i++) { 4043 if (map & 1) { 4044 stmf_data_buf_t *dbuf; 4045 4046 dbuf = itask->itask_dbufs[i]; 4047 if (dbuf->db_lu_private) { 4048 dbuf->db_lu_private = NULL; 4049 } 4050 lport->lport_ds->ds_free_data_buf( 4051 lport->lport_ds, dbuf); 4052 } 4053 map >>= 1; 4054 } 4055 itask->itask_allocated_buf_map = 0; 4056 } 4057 } 4058 4059 void 4060 stmf_task_free(scsi_task_t *task) 4061 { 4062 stmf_local_port_t *lport = task->task_lport; 4063 stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *) 4064 task->task_stmf_private; 4065 stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *) 4066 task->task_session->ss_stmf_private; 4067 4068 DTRACE_PROBE1(stmf__task__end, scsi_task_t *, task); 4069 stmf_free_task_bufs(itask, lport); 4070 if (itask->itask_itl_datap) { 4071 if (atomic_add_32_nv(&itask->itask_itl_datap->itl_counter, 4072 -1) == 0) { 4073 stmf_release_itl_handle(task->task_lu, 4074 itask->itask_itl_datap); 4075 } 4076 } 4077 4078 rw_enter(iss->iss_lockp, RW_READER); 4079 lport->lport_task_free(task); 4080 if (itask->itask_worker) { 4081 atomic_add_32(&stmf_cur_ntasks, -1); 4082 atomic_add_32(&itask->itask_worker->worker_ref_count, -1); 4083 } 4084 /* 4085 * After calling stmf_task_lu_free, the task pointer can no longer 4086 * be trusted. 4087 */ 4088 stmf_task_lu_free(task, iss); 4089 rw_exit(iss->iss_lockp); 4090 } 4091 4092 void 4093 stmf_post_task(scsi_task_t *task, stmf_data_buf_t *dbuf) 4094 { 4095 stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *) 4096 task->task_stmf_private; 4097 stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private; 4098 int nv; 4099 uint32_t old, new; 4100 uint32_t ct; 4101 stmf_worker_t *w, *w1; 4102 uint8_t tm; 4103 4104 if (task->task_max_nbufs > 4) 4105 task->task_max_nbufs = 4; 4106 task->task_cur_nbufs = 0; 4107 /* Latest value of currently running tasks */ 4108 ct = atomic_add_32_nv(&stmf_cur_ntasks, 1); 4109 4110 /* Select the next worker using round robin */ 4111 nv = (int)atomic_add_32_nv((uint32_t *)&stmf_worker_sel_counter, 1); 4112 if (nv >= stmf_nworkers_accepting_cmds) { 4113 int s = nv; 4114 do { 4115 nv -= stmf_nworkers_accepting_cmds; 4116 } while (nv >= stmf_nworkers_accepting_cmds); 4117 if (nv < 0) 4118 nv = 0; 4119 /* Its ok if this cas fails */ 4120 (void) atomic_cas_32((uint32_t *)&stmf_worker_sel_counter, 4121 s, nv); 4122 } 4123 w = &stmf_workers[nv]; 4124 4125 /* 4126 * A worker can be pinned by interrupt. So select the next one 4127 * if it has lower load. 4128 */ 4129 if ((nv + 1) >= stmf_nworkers_accepting_cmds) { 4130 w1 = stmf_workers; 4131 } else { 4132 w1 = &stmf_workers[nv + 1]; 4133 } 4134 if (w1->worker_queue_depth < w->worker_queue_depth) 4135 w = w1; 4136 4137 mutex_enter(&w->worker_lock); 4138 if (((w->worker_flags & STMF_WORKER_STARTED) == 0) || 4139 (w->worker_flags & STMF_WORKER_TERMINATE)) { 4140 /* 4141 * Maybe we are in the middle of a change. Just go to 4142 * the 1st worker. 4143 */ 4144 mutex_exit(&w->worker_lock); 4145 w = stmf_workers; 4146 mutex_enter(&w->worker_lock); 4147 } 4148 itask->itask_worker = w; 4149 /* 4150 * Track max system load inside the worker as we already have the 4151 * worker lock (no point implementing another lock). The service 4152 * thread will do the comparisons and figure out the max overall 4153 * system load. 4154 */ 4155 if (w->worker_max_sys_qdepth_pu < ct) 4156 w->worker_max_sys_qdepth_pu = ct; 4157 4158 do { 4159 old = new = itask->itask_flags; 4160 new |= ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE; 4161 if (task->task_mgmt_function) { 4162 tm = task->task_mgmt_function; 4163 if ((tm == TM_TARGET_RESET) || 4164 (tm == TM_TARGET_COLD_RESET) || 4165 (tm == TM_TARGET_WARM_RESET)) { 4166 new |= ITASK_DEFAULT_HANDLING; 4167 } 4168 } else if (task->task_cdb[0] == SCMD_REPORT_LUNS) { 4169 new |= ITASK_DEFAULT_HANDLING; 4170 } 4171 new &= ~ITASK_IN_TRANSITION; 4172 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 4173 itask->itask_worker_next = NULL; 4174 if (w->worker_task_tail) { 4175 w->worker_task_tail->itask_worker_next = itask; 4176 } else { 4177 w->worker_task_head = itask; 4178 } 4179 w->worker_task_tail = itask; 4180 if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) { 4181 w->worker_max_qdepth_pu = w->worker_queue_depth; 4182 } 4183 atomic_add_32(&w->worker_ref_count, 1); 4184 itask->itask_cmd_stack[0] = ITASK_CMD_NEW_TASK; 4185 itask->itask_ncmds = 1; 4186 if (dbuf) { 4187 itask->itask_allocated_buf_map = 1; 4188 itask->itask_dbufs[0] = dbuf; 4189 dbuf->db_handle = 0; 4190 } else { 4191 itask->itask_allocated_buf_map = 0; 4192 itask->itask_dbufs[0] = NULL; 4193 } 4194 4195 stmf_update_kstat_lu_q(task, kstat_waitq_enter); 4196 stmf_update_kstat_lport_q(task, kstat_waitq_enter); 4197 4198 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0) 4199 cv_signal(&w->worker_cv); 4200 mutex_exit(&w->worker_lock); 4201 4202 /* 4203 * This can only happen if during stmf_task_alloc(), ILU_RESET_ACTIVE 4204 * was set between checking of ILU_RESET_ACTIVE and clearing of the 4205 * ITASK_IN_FREE_LIST flag. Take care of these "sneaked-in" tasks here. 4206 */ 4207 if (ilu->ilu_flags & ILU_RESET_ACTIVE) { 4208 stmf_abort(STMF_QUEUE_TASK_ABORT, task, STMF_ABORTED, NULL); 4209 } 4210 } 4211 4212 /* 4213 * ++++++++++++++ ABORT LOGIC ++++++++++++++++++++ 4214 * Once ITASK_BEING_ABORTED is set, ITASK_KNOWN_TO_LU can be reset already 4215 * i.e. before ITASK_BEING_ABORTED being set. But if it was not, it cannot 4216 * be reset until the LU explicitly calls stmf_task_lu_aborted(). Of course 4217 * the LU will make this call only if we call the LU's abort entry point. 4218 * we will only call that entry point if ITASK_KNOWN_TO_LU was set. 4219 * 4220 * Same logic applies for the port. 4221 * 4222 * Also ITASK_BEING_ABORTED will not be allowed to set if both KNOWN_TO_LU 4223 * and KNOWN_TO_TGT_PORT are reset. 4224 * 4225 * +++++++++++++++++++++++++++++++++++++++++++++++ 4226 */ 4227 4228 stmf_status_t 4229 stmf_xfer_data(scsi_task_t *task, stmf_data_buf_t *dbuf, uint32_t ioflags) 4230 { 4231 stmf_status_t ret; 4232 4233 stmf_i_scsi_task_t *itask = 4234 (stmf_i_scsi_task_t *)task->task_stmf_private; 4235 4236 if (ioflags & STMF_IOF_LU_DONE) { 4237 uint32_t new, old; 4238 do { 4239 new = old = itask->itask_flags; 4240 if (new & ITASK_BEING_ABORTED) 4241 return (STMF_ABORTED); 4242 new &= ~ITASK_KNOWN_TO_LU; 4243 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 4244 } 4245 if (itask->itask_flags & ITASK_BEING_ABORTED) 4246 return (STMF_ABORTED); 4247 #ifdef DEBUG 4248 if (stmf_drop_buf_counter > 0) { 4249 if (atomic_add_32_nv((uint32_t *)&stmf_drop_buf_counter, -1) == 4250 1) 4251 return (STMF_SUCCESS); 4252 } 4253 #endif 4254 4255 stmf_update_kstat_lu_io(task, dbuf); 4256 stmf_update_kstat_lport_io(task, dbuf); 4257 4258 DTRACE_PROBE2(scsi__xfer__start, scsi_task_t *, task, 4259 stmf_data_buf_t *, dbuf); 4260 ret = task->task_lport->lport_xfer_data(task, dbuf, ioflags); 4261 DTRACE_PROBE2(scsi__xfer__end, scsi_task_t *, task, 4262 stmf_data_buf_t *, dbuf); 4263 return (ret); 4264 } 4265 4266 void 4267 stmf_data_xfer_done(scsi_task_t *task, stmf_data_buf_t *dbuf, uint32_t iof) 4268 { 4269 stmf_i_scsi_task_t *itask = 4270 (stmf_i_scsi_task_t *)task->task_stmf_private; 4271 stmf_worker_t *w = itask->itask_worker; 4272 uint32_t new, old; 4273 uint8_t update_queue_flags, free_it, queue_it, kstat_it; 4274 4275 mutex_enter(&w->worker_lock); 4276 do { 4277 new = old = itask->itask_flags; 4278 if (old & ITASK_BEING_ABORTED) { 4279 mutex_exit(&w->worker_lock); 4280 return; 4281 } 4282 free_it = 0; 4283 kstat_it = 0; 4284 if (iof & STMF_IOF_LPORT_DONE) { 4285 new &= ~ITASK_KNOWN_TO_TGT_PORT; 4286 task->task_completion_status = dbuf->db_xfer_status; 4287 free_it = 1; 4288 kstat_it = 1; 4289 } 4290 /* 4291 * If the task is known to LU then queue it. But if 4292 * it is already queued (multiple completions) then 4293 * just update the buffer information by grabbing the 4294 * worker lock. If the task is not known to LU, 4295 * completed/aborted, then see if we need to 4296 * free this task. 4297 */ 4298 if (old & ITASK_KNOWN_TO_LU) { 4299 free_it = 0; 4300 update_queue_flags = 1; 4301 if (old & ITASK_IN_WORKER_QUEUE) { 4302 queue_it = 0; 4303 } else { 4304 queue_it = 1; 4305 new |= ITASK_IN_WORKER_QUEUE; 4306 } 4307 } else { 4308 update_queue_flags = 0; 4309 queue_it = 0; 4310 } 4311 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 4312 4313 if (kstat_it) { 4314 stmf_update_kstat_lu_q(task, kstat_runq_exit); 4315 stmf_update_kstat_lport_q(task, kstat_runq_exit); 4316 } 4317 if (update_queue_flags) { 4318 uint8_t cmd = (dbuf->db_handle << 5) | ITASK_CMD_DATA_XFER_DONE; 4319 4320 ASSERT(itask->itask_ncmds < ITASK_MAX_NCMDS); 4321 itask->itask_cmd_stack[itask->itask_ncmds++] = cmd; 4322 if (queue_it) { 4323 itask->itask_worker_next = NULL; 4324 if (w->worker_task_tail) { 4325 w->worker_task_tail->itask_worker_next = itask; 4326 } else { 4327 w->worker_task_head = itask; 4328 } 4329 w->worker_task_tail = itask; 4330 if (++(w->worker_queue_depth) > 4331 w->worker_max_qdepth_pu) { 4332 w->worker_max_qdepth_pu = w->worker_queue_depth; 4333 } 4334 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0) 4335 cv_signal(&w->worker_cv); 4336 } 4337 } 4338 mutex_exit(&w->worker_lock); 4339 4340 if (free_it) { 4341 if ((itask->itask_flags & (ITASK_KNOWN_TO_LU | 4342 ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE | 4343 ITASK_BEING_ABORTED)) == 0) { 4344 stmf_task_free(task); 4345 } 4346 } 4347 } 4348 4349 stmf_status_t 4350 stmf_send_scsi_status(scsi_task_t *task, uint32_t ioflags) 4351 { 4352 DTRACE_PROBE1(scsi__send__status, scsi_task_t *, task); 4353 4354 stmf_i_scsi_task_t *itask = 4355 (stmf_i_scsi_task_t *)task->task_stmf_private; 4356 if (ioflags & STMF_IOF_LU_DONE) { 4357 uint32_t new, old; 4358 do { 4359 new = old = itask->itask_flags; 4360 if (new & ITASK_BEING_ABORTED) 4361 return (STMF_ABORTED); 4362 new &= ~ITASK_KNOWN_TO_LU; 4363 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 4364 } 4365 4366 if (!(itask->itask_flags & ITASK_KNOWN_TO_TGT_PORT)) { 4367 return (STMF_SUCCESS); 4368 } 4369 4370 if (itask->itask_flags & ITASK_BEING_ABORTED) 4371 return (STMF_ABORTED); 4372 4373 if (task->task_additional_flags & TASK_AF_NO_EXPECTED_XFER_LENGTH) { 4374 task->task_status_ctrl = 0; 4375 task->task_resid = 0; 4376 } else if (task->task_cmd_xfer_length > 4377 task->task_expected_xfer_length) { 4378 task->task_status_ctrl = TASK_SCTRL_OVER; 4379 task->task_resid = task->task_cmd_xfer_length - 4380 task->task_expected_xfer_length; 4381 } else if (task->task_nbytes_transferred < 4382 task->task_expected_xfer_length) { 4383 task->task_status_ctrl = TASK_SCTRL_UNDER; 4384 task->task_resid = task->task_expected_xfer_length - 4385 task->task_nbytes_transferred; 4386 } else { 4387 task->task_status_ctrl = 0; 4388 task->task_resid = 0; 4389 } 4390 return (task->task_lport->lport_send_status(task, ioflags)); 4391 } 4392 4393 void 4394 stmf_send_status_done(scsi_task_t *task, stmf_status_t s, uint32_t iof) 4395 { 4396 stmf_i_scsi_task_t *itask = 4397 (stmf_i_scsi_task_t *)task->task_stmf_private; 4398 stmf_worker_t *w = itask->itask_worker; 4399 uint32_t new, old; 4400 uint8_t free_it, queue_it, kstat_it; 4401 4402 mutex_enter(&w->worker_lock); 4403 do { 4404 new = old = itask->itask_flags; 4405 if (old & ITASK_BEING_ABORTED) { 4406 mutex_exit(&w->worker_lock); 4407 return; 4408 } 4409 free_it = 0; 4410 kstat_it = 0; 4411 if (iof & STMF_IOF_LPORT_DONE) { 4412 new &= ~ITASK_KNOWN_TO_TGT_PORT; 4413 free_it = 1; 4414 kstat_it = 1; 4415 } 4416 /* 4417 * If the task is known to LU then queue it. But if 4418 * it is already queued (multiple completions) then 4419 * just update the buffer information by grabbing the 4420 * worker lock. If the task is not known to LU, 4421 * completed/aborted, then see if we need to 4422 * free this task. 4423 */ 4424 if (old & ITASK_KNOWN_TO_LU) { 4425 free_it = 0; 4426 queue_it = 1; 4427 if (old & ITASK_IN_WORKER_QUEUE) { 4428 cmn_err(CE_PANIC, "status completion received" 4429 " when task is already in worker queue " 4430 " task = %p", (void *)task); 4431 } 4432 new |= ITASK_IN_WORKER_QUEUE; 4433 } else { 4434 queue_it = 0; 4435 } 4436 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 4437 task->task_completion_status = s; 4438 4439 if (kstat_it) { 4440 stmf_update_kstat_lu_q(task, kstat_runq_exit); 4441 stmf_update_kstat_lport_q(task, kstat_runq_exit); 4442 } 4443 4444 if (queue_it) { 4445 ASSERT(itask->itask_ncmds < ITASK_MAX_NCMDS); 4446 itask->itask_cmd_stack[itask->itask_ncmds++] = 4447 ITASK_CMD_STATUS_DONE; 4448 itask->itask_worker_next = NULL; 4449 if (w->worker_task_tail) { 4450 w->worker_task_tail->itask_worker_next = itask; 4451 } else { 4452 w->worker_task_head = itask; 4453 } 4454 w->worker_task_tail = itask; 4455 if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) { 4456 w->worker_max_qdepth_pu = w->worker_queue_depth; 4457 } 4458 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0) 4459 cv_signal(&w->worker_cv); 4460 } 4461 mutex_exit(&w->worker_lock); 4462 4463 if (free_it) { 4464 if ((itask->itask_flags & (ITASK_KNOWN_TO_LU | 4465 ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE | 4466 ITASK_BEING_ABORTED)) == 0) { 4467 stmf_task_free(task); 4468 } else { 4469 cmn_err(CE_PANIC, "LU is done with the task but LPORT " 4470 " is not done, itask %p itask_flags %x", 4471 (void *)itask, itask->itask_flags); 4472 } 4473 } 4474 } 4475 4476 void 4477 stmf_task_lu_done(scsi_task_t *task) 4478 { 4479 stmf_i_scsi_task_t *itask = 4480 (stmf_i_scsi_task_t *)task->task_stmf_private; 4481 stmf_worker_t *w = itask->itask_worker; 4482 uint32_t new, old; 4483 4484 mutex_enter(&w->worker_lock); 4485 do { 4486 new = old = itask->itask_flags; 4487 if (old & ITASK_BEING_ABORTED) { 4488 mutex_exit(&w->worker_lock); 4489 return; 4490 } 4491 if (old & ITASK_IN_WORKER_QUEUE) { 4492 cmn_err(CE_PANIC, "task_lu_done received" 4493 " when task is in worker queue " 4494 " task = %p", (void *)task); 4495 } 4496 new &= ~ITASK_KNOWN_TO_LU; 4497 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 4498 4499 mutex_exit(&w->worker_lock); 4500 4501 if ((itask->itask_flags & (ITASK_KNOWN_TO_LU | 4502 ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE | 4503 ITASK_BEING_ABORTED)) == 0) { 4504 stmf_task_free(task); 4505 } else { 4506 cmn_err(CE_PANIC, "stmf_lu_done should be the last stage but " 4507 " the task is still not done, task = %p", (void *)task); 4508 } 4509 } 4510 4511 void 4512 stmf_queue_task_for_abort(scsi_task_t *task, stmf_status_t s) 4513 { 4514 stmf_i_scsi_task_t *itask = 4515 (stmf_i_scsi_task_t *)task->task_stmf_private; 4516 stmf_worker_t *w; 4517 uint32_t old, new; 4518 4519 do { 4520 old = new = itask->itask_flags; 4521 if ((old & ITASK_BEING_ABORTED) || 4522 ((old & (ITASK_KNOWN_TO_TGT_PORT | 4523 ITASK_KNOWN_TO_LU)) == 0)) { 4524 return; 4525 } 4526 new |= ITASK_BEING_ABORTED; 4527 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 4528 task->task_completion_status = s; 4529 itask->itask_start_time = ddi_get_lbolt(); 4530 4531 if (((w = itask->itask_worker) == NULL) || 4532 (itask->itask_flags & ITASK_IN_TRANSITION)) { 4533 return; 4534 } 4535 4536 /* Queue it and get out */ 4537 mutex_enter(&w->worker_lock); 4538 if (itask->itask_flags & ITASK_IN_WORKER_QUEUE) { 4539 mutex_exit(&w->worker_lock); 4540 return; 4541 } 4542 atomic_or_32(&itask->itask_flags, ITASK_IN_WORKER_QUEUE); 4543 itask->itask_worker_next = NULL; 4544 if (w->worker_task_tail) { 4545 w->worker_task_tail->itask_worker_next = itask; 4546 } else { 4547 w->worker_task_head = itask; 4548 } 4549 w->worker_task_tail = itask; 4550 if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) { 4551 w->worker_max_qdepth_pu = w->worker_queue_depth; 4552 } 4553 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0) 4554 cv_signal(&w->worker_cv); 4555 mutex_exit(&w->worker_lock); 4556 } 4557 4558 void 4559 stmf_abort(int abort_cmd, scsi_task_t *task, stmf_status_t s, void *arg) 4560 { 4561 stmf_i_scsi_task_t *itask = NULL; 4562 uint32_t old, new, f, rf; 4563 4564 DTRACE_PROBE2(scsi__task__abort, scsi_task_t *, task, 4565 stmf_status_t, s); 4566 4567 switch (abort_cmd) { 4568 case STMF_QUEUE_ABORT_LU: 4569 stmf_task_lu_killall((stmf_lu_t *)arg, task, s); 4570 return; 4571 case STMF_QUEUE_TASK_ABORT: 4572 stmf_queue_task_for_abort(task, s); 4573 return; 4574 case STMF_REQUEUE_TASK_ABORT_LPORT: 4575 rf = ITASK_TGT_PORT_ABORT_CALLED; 4576 f = ITASK_KNOWN_TO_TGT_PORT; 4577 break; 4578 case STMF_REQUEUE_TASK_ABORT_LU: 4579 rf = ITASK_LU_ABORT_CALLED; 4580 f = ITASK_KNOWN_TO_LU; 4581 break; 4582 default: 4583 return; 4584 } 4585 itask = (stmf_i_scsi_task_t *)task->task_stmf_private; 4586 f |= ITASK_BEING_ABORTED | rf; 4587 do { 4588 old = new = itask->itask_flags; 4589 if ((old & f) != f) { 4590 return; 4591 } 4592 new &= ~rf; 4593 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 4594 } 4595 4596 void 4597 stmf_task_lu_aborted(scsi_task_t *task, stmf_status_t s, uint32_t iof) 4598 { 4599 char info[STMF_CHANGE_INFO_LEN]; 4600 stmf_i_scsi_task_t *itask = TASK_TO_ITASK(task); 4601 unsigned long long st; 4602 4603 st = s; /* gcc fix */ 4604 if ((s != STMF_ABORT_SUCCESS) && (s != STMF_NOT_FOUND)) { 4605 (void) snprintf(info, STMF_CHANGE_INFO_LEN, 4606 "task %p, lu failed to abort ret=%llx", (void *)task, st); 4607 } else if ((iof & STMF_IOF_LU_DONE) == 0) { 4608 (void) snprintf(info, STMF_CHANGE_INFO_LEN, 4609 "Task aborted but LU is not finished, task =" 4610 "%p, s=%llx, iof=%x", (void *)task, st, iof); 4611 } else { 4612 /* 4613 * LU abort successfully 4614 */ 4615 atomic_and_32(&itask->itask_flags, ~ITASK_KNOWN_TO_LU); 4616 return; 4617 } 4618 4619 info[STMF_CHANGE_INFO_LEN - 1] = 0; 4620 stmf_abort_task_offline(task, 1, info); 4621 } 4622 4623 void 4624 stmf_task_lport_aborted(scsi_task_t *task, stmf_status_t s, uint32_t iof) 4625 { 4626 char info[STMF_CHANGE_INFO_LEN]; 4627 stmf_i_scsi_task_t *itask = TASK_TO_ITASK(task); 4628 unsigned long long st; 4629 uint32_t old, new; 4630 4631 st = s; 4632 if ((s != STMF_ABORT_SUCCESS) && (s != STMF_NOT_FOUND)) { 4633 (void) snprintf(info, STMF_CHANGE_INFO_LEN, 4634 "task %p, tgt port failed to abort ret=%llx", (void *)task, 4635 st); 4636 } else if ((iof & STMF_IOF_LPORT_DONE) == 0) { 4637 (void) snprintf(info, STMF_CHANGE_INFO_LEN, 4638 "Task aborted but tgt port is not finished, " 4639 "task=%p, s=%llx, iof=%x", (void *)task, st, iof); 4640 } else { 4641 /* 4642 * LPORT abort successfully 4643 */ 4644 do { 4645 old = new = itask->itask_flags; 4646 if (!(old & ITASK_KNOWN_TO_TGT_PORT)) 4647 return; 4648 new &= ~ITASK_KNOWN_TO_TGT_PORT; 4649 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 4650 4651 if (!(itask->itask_flags & ITASK_KSTAT_IN_RUNQ)) { 4652 stmf_update_kstat_lu_q(task, kstat_waitq_exit); 4653 stmf_update_kstat_lport_q(task, kstat_waitq_exit); 4654 } else { 4655 stmf_update_kstat_lu_q(task, kstat_runq_exit); 4656 stmf_update_kstat_lport_q(task, kstat_runq_exit); 4657 } 4658 return; 4659 } 4660 4661 info[STMF_CHANGE_INFO_LEN - 1] = 0; 4662 stmf_abort_task_offline(task, 0, info); 4663 } 4664 4665 stmf_status_t 4666 stmf_task_poll_lu(scsi_task_t *task, uint32_t timeout) 4667 { 4668 stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *) 4669 task->task_stmf_private; 4670 stmf_worker_t *w = itask->itask_worker; 4671 int i; 4672 4673 ASSERT(itask->itask_flags & ITASK_KNOWN_TO_LU); 4674 mutex_enter(&w->worker_lock); 4675 if (itask->itask_ncmds >= ITASK_MAX_NCMDS) { 4676 mutex_exit(&w->worker_lock); 4677 return (STMF_BUSY); 4678 } 4679 for (i = 0; i < itask->itask_ncmds; i++) { 4680 if (itask->itask_cmd_stack[i] == ITASK_CMD_POLL_LU) { 4681 mutex_exit(&w->worker_lock); 4682 return (STMF_SUCCESS); 4683 } 4684 } 4685 itask->itask_cmd_stack[itask->itask_ncmds++] = ITASK_CMD_POLL_LU; 4686 if (timeout == ITASK_DEFAULT_POLL_TIMEOUT) { 4687 itask->itask_poll_timeout = ddi_get_lbolt() + 1; 4688 } else { 4689 clock_t t = drv_usectohz(timeout * 1000); 4690 if (t == 0) 4691 t = 1; 4692 itask->itask_poll_timeout = ddi_get_lbolt() + t; 4693 } 4694 if ((itask->itask_flags & ITASK_IN_WORKER_QUEUE) == 0) { 4695 itask->itask_worker_next = NULL; 4696 if (w->worker_task_tail) { 4697 w->worker_task_tail->itask_worker_next = itask; 4698 } else { 4699 w->worker_task_head = itask; 4700 } 4701 w->worker_task_tail = itask; 4702 if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) { 4703 w->worker_max_qdepth_pu = w->worker_queue_depth; 4704 } 4705 atomic_or_32(&itask->itask_flags, ITASK_IN_WORKER_QUEUE); 4706 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0) 4707 cv_signal(&w->worker_cv); 4708 } 4709 mutex_exit(&w->worker_lock); 4710 return (STMF_SUCCESS); 4711 } 4712 4713 stmf_status_t 4714 stmf_task_poll_lport(scsi_task_t *task, uint32_t timeout) 4715 { 4716 stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *) 4717 task->task_stmf_private; 4718 stmf_worker_t *w = itask->itask_worker; 4719 int i; 4720 4721 ASSERT(itask->itask_flags & ITASK_KNOWN_TO_TGT_PORT); 4722 mutex_enter(&w->worker_lock); 4723 if (itask->itask_ncmds >= ITASK_MAX_NCMDS) { 4724 mutex_exit(&w->worker_lock); 4725 return (STMF_BUSY); 4726 } 4727 for (i = 0; i < itask->itask_ncmds; i++) { 4728 if (itask->itask_cmd_stack[i] == ITASK_CMD_POLL_LPORT) { 4729 mutex_exit(&w->worker_lock); 4730 return (STMF_SUCCESS); 4731 } 4732 } 4733 itask->itask_cmd_stack[itask->itask_ncmds++] = ITASK_CMD_POLL_LPORT; 4734 if (timeout == ITASK_DEFAULT_POLL_TIMEOUT) { 4735 itask->itask_poll_timeout = ddi_get_lbolt() + 1; 4736 } else { 4737 clock_t t = drv_usectohz(timeout * 1000); 4738 if (t == 0) 4739 t = 1; 4740 itask->itask_poll_timeout = ddi_get_lbolt() + t; 4741 } 4742 if ((itask->itask_flags & ITASK_IN_WORKER_QUEUE) == 0) { 4743 itask->itask_worker_next = NULL; 4744 if (w->worker_task_tail) { 4745 w->worker_task_tail->itask_worker_next = itask; 4746 } else { 4747 w->worker_task_head = itask; 4748 } 4749 w->worker_task_tail = itask; 4750 if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) { 4751 w->worker_max_qdepth_pu = w->worker_queue_depth; 4752 } 4753 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0) 4754 cv_signal(&w->worker_cv); 4755 } 4756 mutex_exit(&w->worker_lock); 4757 return (STMF_SUCCESS); 4758 } 4759 4760 void 4761 stmf_do_task_abort(scsi_task_t *task) 4762 { 4763 stmf_i_scsi_task_t *itask = TASK_TO_ITASK(task); 4764 stmf_lu_t *lu; 4765 stmf_local_port_t *lport; 4766 unsigned long long ret; 4767 uint32_t old, new; 4768 uint8_t call_lu_abort, call_port_abort; 4769 char info[STMF_CHANGE_INFO_LEN]; 4770 4771 lu = task->task_lu; 4772 lport = task->task_lport; 4773 do { 4774 old = new = itask->itask_flags; 4775 if ((old & (ITASK_KNOWN_TO_LU | ITASK_LU_ABORT_CALLED)) == 4776 ITASK_KNOWN_TO_LU) { 4777 new |= ITASK_LU_ABORT_CALLED; 4778 call_lu_abort = 1; 4779 } else { 4780 call_lu_abort = 0; 4781 } 4782 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 4783 4784 if (call_lu_abort) { 4785 if ((itask->itask_flags & ITASK_DEFAULT_HANDLING) == 0) { 4786 ret = lu->lu_abort(lu, STMF_LU_ABORT_TASK, task, 0); 4787 } else { 4788 ret = dlun0->lu_abort(lu, STMF_LU_ABORT_TASK, task, 0); 4789 } 4790 if ((ret == STMF_ABORT_SUCCESS) || (ret == STMF_NOT_FOUND)) { 4791 stmf_task_lu_aborted(task, ret, STMF_IOF_LU_DONE); 4792 } else if (ret == STMF_BUSY) { 4793 atomic_and_32(&itask->itask_flags, 4794 ~ITASK_LU_ABORT_CALLED); 4795 } else if (ret != STMF_SUCCESS) { 4796 (void) snprintf(info, STMF_CHANGE_INFO_LEN, 4797 "Abort failed by LU %p, ret %llx", (void *)lu, ret); 4798 info[STMF_CHANGE_INFO_LEN - 1] = 0; 4799 stmf_abort_task_offline(task, 1, info); 4800 } 4801 } else if (itask->itask_flags & ITASK_KNOWN_TO_LU) { 4802 if (ddi_get_lbolt() > (itask->itask_start_time + 4803 STMF_SEC2TICK(lu->lu_abort_timeout? 4804 lu->lu_abort_timeout : ITASK_DEFAULT_ABORT_TIMEOUT))) { 4805 (void) snprintf(info, STMF_CHANGE_INFO_LEN, 4806 "lu abort timed out"); 4807 info[STMF_CHANGE_INFO_LEN - 1] = 0; 4808 stmf_abort_task_offline(itask->itask_task, 1, info); 4809 } 4810 } 4811 4812 do { 4813 old = new = itask->itask_flags; 4814 if ((old & (ITASK_KNOWN_TO_TGT_PORT | 4815 ITASK_TGT_PORT_ABORT_CALLED)) == ITASK_KNOWN_TO_TGT_PORT) { 4816 new |= ITASK_TGT_PORT_ABORT_CALLED; 4817 call_port_abort = 1; 4818 } else { 4819 call_port_abort = 0; 4820 } 4821 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 4822 if (call_port_abort) { 4823 ret = lport->lport_abort(lport, STMF_LPORT_ABORT_TASK, task, 0); 4824 if ((ret == STMF_ABORT_SUCCESS) || (ret == STMF_NOT_FOUND)) { 4825 stmf_task_lport_aborted(task, ret, STMF_IOF_LPORT_DONE); 4826 } else if (ret == STMF_BUSY) { 4827 atomic_and_32(&itask->itask_flags, 4828 ~ITASK_TGT_PORT_ABORT_CALLED); 4829 } else if (ret != STMF_SUCCESS) { 4830 (void) snprintf(info, STMF_CHANGE_INFO_LEN, 4831 "Abort failed by tgt port %p ret %llx", 4832 (void *)lport, ret); 4833 info[STMF_CHANGE_INFO_LEN - 1] = 0; 4834 stmf_abort_task_offline(task, 0, info); 4835 } 4836 } else if (itask->itask_flags & ITASK_KNOWN_TO_TGT_PORT) { 4837 if (ddi_get_lbolt() > (itask->itask_start_time + 4838 STMF_SEC2TICK(lport->lport_abort_timeout? 4839 lport->lport_abort_timeout : 4840 ITASK_DEFAULT_ABORT_TIMEOUT))) { 4841 (void) snprintf(info, STMF_CHANGE_INFO_LEN, 4842 "lport abort timed out"); 4843 info[STMF_CHANGE_INFO_LEN - 1] = 0; 4844 stmf_abort_task_offline(itask->itask_task, 0, info); 4845 } 4846 } 4847 } 4848 4849 stmf_status_t 4850 stmf_ctl(int cmd, void *obj, void *arg) 4851 { 4852 stmf_status_t ret; 4853 stmf_i_lu_t *ilu; 4854 stmf_i_local_port_t *ilport; 4855 stmf_state_change_info_t *ssci = (stmf_state_change_info_t *)arg; 4856 4857 mutex_enter(&stmf_state.stmf_lock); 4858 ret = STMF_INVALID_ARG; 4859 if (cmd & STMF_CMD_LU_OP) { 4860 ilu = stmf_lookup_lu((stmf_lu_t *)obj); 4861 if (ilu == NULL) { 4862 goto stmf_ctl_lock_exit; 4863 } 4864 DTRACE_PROBE3(lu__state__change, 4865 stmf_lu_t *, ilu->ilu_lu, 4866 int, cmd, stmf_state_change_info_t *, ssci); 4867 } else if (cmd & STMF_CMD_LPORT_OP) { 4868 ilport = stmf_lookup_lport((stmf_local_port_t *)obj); 4869 if (ilport == NULL) { 4870 goto stmf_ctl_lock_exit; 4871 } 4872 DTRACE_PROBE3(lport__state__change, 4873 stmf_local_port_t *, ilport->ilport_lport, 4874 int, cmd, stmf_state_change_info_t *, ssci); 4875 } else { 4876 goto stmf_ctl_lock_exit; 4877 } 4878 4879 switch (cmd) { 4880 case STMF_CMD_LU_ONLINE: 4881 switch (ilu->ilu_state) { 4882 case STMF_STATE_OFFLINE: 4883 ret = STMF_SUCCESS; 4884 break; 4885 case STMF_STATE_ONLINE: 4886 case STMF_STATE_ONLINING: 4887 ret = STMF_ALREADY; 4888 break; 4889 case STMF_STATE_OFFLINING: 4890 ret = STMF_BUSY; 4891 break; 4892 default: 4893 ret = STMF_BADSTATE; 4894 break; 4895 } 4896 if (ret != STMF_SUCCESS) 4897 goto stmf_ctl_lock_exit; 4898 4899 ilu->ilu_state = STMF_STATE_ONLINING; 4900 mutex_exit(&stmf_state.stmf_lock); 4901 stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg); 4902 break; 4903 4904 case STMF_CMD_LU_ONLINE_COMPLETE: 4905 if (ilu->ilu_state != STMF_STATE_ONLINING) { 4906 ret = STMF_BADSTATE; 4907 goto stmf_ctl_lock_exit; 4908 } 4909 if (((stmf_change_status_t *)arg)->st_completion_status == 4910 STMF_SUCCESS) { 4911 ilu->ilu_state = STMF_STATE_ONLINE; 4912 mutex_exit(&stmf_state.stmf_lock); 4913 ((stmf_lu_t *)obj)->lu_ctl((stmf_lu_t *)obj, 4914 STMF_ACK_LU_ONLINE_COMPLETE, arg); 4915 mutex_enter(&stmf_state.stmf_lock); 4916 stmf_add_lu_to_active_sessions((stmf_lu_t *)obj); 4917 } else { 4918 /* XXX: should throw a meesage an record more data */ 4919 ilu->ilu_state = STMF_STATE_OFFLINE; 4920 } 4921 ret = STMF_SUCCESS; 4922 goto stmf_ctl_lock_exit; 4923 4924 case STMF_CMD_LU_OFFLINE: 4925 switch (ilu->ilu_state) { 4926 case STMF_STATE_ONLINE: 4927 ret = STMF_SUCCESS; 4928 break; 4929 case STMF_STATE_OFFLINE: 4930 case STMF_STATE_OFFLINING: 4931 ret = STMF_ALREADY; 4932 break; 4933 case STMF_STATE_ONLINING: 4934 ret = STMF_BUSY; 4935 break; 4936 default: 4937 ret = STMF_BADSTATE; 4938 break; 4939 } 4940 if (ret != STMF_SUCCESS) 4941 goto stmf_ctl_lock_exit; 4942 ilu->ilu_state = STMF_STATE_OFFLINING; 4943 mutex_exit(&stmf_state.stmf_lock); 4944 stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg); 4945 break; 4946 4947 case STMF_CMD_LU_OFFLINE_COMPLETE: 4948 if (ilu->ilu_state != STMF_STATE_OFFLINING) { 4949 ret = STMF_BADSTATE; 4950 goto stmf_ctl_lock_exit; 4951 } 4952 if (((stmf_change_status_t *)arg)->st_completion_status == 4953 STMF_SUCCESS) { 4954 ilu->ilu_state = STMF_STATE_OFFLINE; 4955 mutex_exit(&stmf_state.stmf_lock); 4956 ((stmf_lu_t *)obj)->lu_ctl((stmf_lu_t *)obj, 4957 STMF_ACK_LU_OFFLINE_COMPLETE, arg); 4958 mutex_enter(&stmf_state.stmf_lock); 4959 } else { 4960 ilu->ilu_state = STMF_STATE_ONLINE; 4961 stmf_add_lu_to_active_sessions((stmf_lu_t *)obj); 4962 } 4963 mutex_exit(&stmf_state.stmf_lock); 4964 break; 4965 4966 /* 4967 * LPORT_ONLINE/OFFLINE has nothing to do with link offline/online. 4968 * It's related with hardware disable/enable. 4969 */ 4970 case STMF_CMD_LPORT_ONLINE: 4971 switch (ilport->ilport_state) { 4972 case STMF_STATE_OFFLINE: 4973 ret = STMF_SUCCESS; 4974 break; 4975 case STMF_STATE_ONLINE: 4976 case STMF_STATE_ONLINING: 4977 ret = STMF_ALREADY; 4978 break; 4979 case STMF_STATE_OFFLINING: 4980 ret = STMF_BUSY; 4981 break; 4982 default: 4983 ret = STMF_BADSTATE; 4984 break; 4985 } 4986 if (ret != STMF_SUCCESS) 4987 goto stmf_ctl_lock_exit; 4988 4989 /* 4990 * Only user request can recover the port from the 4991 * FORCED_OFFLINE state 4992 */ 4993 if (ilport->ilport_flags & ILPORT_FORCED_OFFLINE) { 4994 if (!(ssci->st_rflags & STMF_RFLAG_USER_REQUEST)) { 4995 ret = STMF_FAILURE; 4996 goto stmf_ctl_lock_exit; 4997 } 4998 } 4999 5000 /* 5001 * Avoid too frequent request to online 5002 */ 5003 if (ssci->st_rflags & STMF_RFLAG_USER_REQUEST) { 5004 ilport->ilport_online_times = 0; 5005 ilport->ilport_avg_interval = 0; 5006 } 5007 if ((ilport->ilport_avg_interval < STMF_AVG_ONLINE_INTERVAL) && 5008 (ilport->ilport_online_times >= 4)) { 5009 ret = STMF_FAILURE; 5010 ilport->ilport_flags |= ILPORT_FORCED_OFFLINE; 5011 stmf_trace(NULL, "stmf_ctl: too frequent request to " 5012 "online the port"); 5013 cmn_err(CE_WARN, "stmf_ctl: too frequent request to " 5014 "online the port, set FORCED_OFFLINE now"); 5015 goto stmf_ctl_lock_exit; 5016 } 5017 if (ilport->ilport_online_times > 0) { 5018 if (ilport->ilport_online_times == 1) { 5019 ilport->ilport_avg_interval = ddi_get_lbolt() - 5020 ilport->ilport_last_online_clock; 5021 } else { 5022 ilport->ilport_avg_interval = 5023 (ilport->ilport_avg_interval + 5024 ddi_get_lbolt() - 5025 ilport->ilport_last_online_clock) >> 1; 5026 } 5027 } 5028 ilport->ilport_last_online_clock = ddi_get_lbolt(); 5029 ilport->ilport_online_times++; 5030 5031 /* 5032 * Submit online service request 5033 */ 5034 ilport->ilport_flags &= ~ILPORT_FORCED_OFFLINE; 5035 ilport->ilport_state = STMF_STATE_ONLINING; 5036 mutex_exit(&stmf_state.stmf_lock); 5037 stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg); 5038 break; 5039 5040 case STMF_CMD_LPORT_ONLINE_COMPLETE: 5041 if (ilport->ilport_state != STMF_STATE_ONLINING) { 5042 ret = STMF_BADSTATE; 5043 goto stmf_ctl_lock_exit; 5044 } 5045 if (((stmf_change_status_t *)arg)->st_completion_status == 5046 STMF_SUCCESS) { 5047 ilport->ilport_state = STMF_STATE_ONLINE; 5048 mutex_exit(&stmf_state.stmf_lock); 5049 ((stmf_local_port_t *)obj)->lport_ctl( 5050 (stmf_local_port_t *)obj, 5051 STMF_ACK_LPORT_ONLINE_COMPLETE, arg); 5052 mutex_enter(&stmf_state.stmf_lock); 5053 } else { 5054 ilport->ilport_state = STMF_STATE_OFFLINE; 5055 } 5056 ret = STMF_SUCCESS; 5057 goto stmf_ctl_lock_exit; 5058 5059 case STMF_CMD_LPORT_OFFLINE: 5060 switch (ilport->ilport_state) { 5061 case STMF_STATE_ONLINE: 5062 ret = STMF_SUCCESS; 5063 break; 5064 case STMF_STATE_OFFLINE: 5065 case STMF_STATE_OFFLINING: 5066 ret = STMF_ALREADY; 5067 break; 5068 case STMF_STATE_ONLINING: 5069 ret = STMF_BUSY; 5070 break; 5071 default: 5072 ret = STMF_BADSTATE; 5073 break; 5074 } 5075 if (ret != STMF_SUCCESS) 5076 goto stmf_ctl_lock_exit; 5077 5078 ilport->ilport_state = STMF_STATE_OFFLINING; 5079 mutex_exit(&stmf_state.stmf_lock); 5080 stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg); 5081 break; 5082 5083 case STMF_CMD_LPORT_OFFLINE_COMPLETE: 5084 if (ilport->ilport_state != STMF_STATE_OFFLINING) { 5085 ret = STMF_BADSTATE; 5086 goto stmf_ctl_lock_exit; 5087 } 5088 if (((stmf_change_status_t *)arg)->st_completion_status == 5089 STMF_SUCCESS) { 5090 ilport->ilport_state = STMF_STATE_OFFLINE; 5091 mutex_exit(&stmf_state.stmf_lock); 5092 ((stmf_local_port_t *)obj)->lport_ctl( 5093 (stmf_local_port_t *)obj, 5094 STMF_ACK_LPORT_OFFLINE_COMPLETE, arg); 5095 mutex_enter(&stmf_state.stmf_lock); 5096 } else { 5097 ilport->ilport_state = STMF_STATE_ONLINE; 5098 } 5099 mutex_exit(&stmf_state.stmf_lock); 5100 break; 5101 5102 default: 5103 cmn_err(CE_WARN, "Invalid ctl cmd received %x", cmd); 5104 ret = STMF_INVALID_ARG; 5105 goto stmf_ctl_lock_exit; 5106 } 5107 5108 return (STMF_SUCCESS); 5109 5110 stmf_ctl_lock_exit:; 5111 mutex_exit(&stmf_state.stmf_lock); 5112 return (ret); 5113 } 5114 5115 /* ARGSUSED */ 5116 stmf_status_t 5117 stmf_info_impl(uint32_t cmd, void *arg1, void *arg2, uint8_t *buf, 5118 uint32_t *bufsizep) 5119 { 5120 return (STMF_NOT_SUPPORTED); 5121 } 5122 5123 /* ARGSUSED */ 5124 stmf_status_t 5125 stmf_info(uint32_t cmd, void *arg1, void *arg2, uint8_t *buf, 5126 uint32_t *bufsizep) 5127 { 5128 uint32_t cl = SI_GET_CLASS(cmd); 5129 5130 if (cl == SI_STMF) { 5131 return (stmf_info_impl(cmd, arg1, arg2, buf, bufsizep)); 5132 } 5133 if (cl == SI_LPORT) { 5134 return (((stmf_local_port_t *)arg1)->lport_info(cmd, arg1, 5135 arg2, buf, bufsizep)); 5136 } else if (cl == SI_LU) { 5137 return (((stmf_lu_t *)arg1)->lu_info(cmd, arg1, arg2, buf, 5138 bufsizep)); 5139 } 5140 5141 return (STMF_NOT_SUPPORTED); 5142 } 5143 5144 /* 5145 * Used by port providers. pwwn is 8 byte wwn, sdid is the devid used by 5146 * stmf to register local ports. The ident should have 20 bytes in buffer 5147 * space to convert the wwn to "wwn.xxxxxxxxxxxxxxxx" string. 5148 */ 5149 void 5150 stmf_wwn_to_devid_desc(scsi_devid_desc_t *sdid, uint8_t *wwn, 5151 uint8_t protocol_id) 5152 { 5153 char wwn_str[20+1]; 5154 5155 sdid->protocol_id = protocol_id; 5156 sdid->piv = 1; 5157 sdid->code_set = CODE_SET_ASCII; 5158 sdid->association = ID_IS_TARGET_PORT; 5159 sdid->ident_length = 20; 5160 /* Convert wwn value to "wwn.XXXXXXXXXXXXXXXX" format */ 5161 (void) snprintf(wwn_str, sizeof (wwn_str), 5162 "wwn.%02X%02X%02X%02X%02X%02X%02X%02X", 5163 wwn[0], wwn[1], wwn[2], wwn[3], wwn[4], wwn[5], wwn[6], wwn[7]); 5164 bcopy(wwn_str, (char *)sdid->ident, 20); 5165 } 5166 5167 5168 stmf_xfer_data_t * 5169 stmf_prepare_tpgs_data(uint8_t ilu_alua) 5170 { 5171 stmf_xfer_data_t *xd; 5172 stmf_i_local_port_t *ilport; 5173 uint8_t *p; 5174 uint32_t sz, asz, nports = 0, nports_standby = 0; 5175 5176 mutex_enter(&stmf_state.stmf_lock); 5177 /* check if any ports are standby and create second group */ 5178 for (ilport = stmf_state.stmf_ilportlist; ilport; 5179 ilport = ilport->ilport_next) { 5180 if (ilport->ilport_standby == 1) { 5181 nports_standby++; 5182 } else { 5183 nports++; 5184 } 5185 } 5186 5187 /* The spec only allows for 255 ports to be reported per group */ 5188 nports = min(nports, 255); 5189 nports_standby = min(nports_standby, 255); 5190 sz = (nports * 4) + 12; 5191 if (nports_standby && ilu_alua) { 5192 sz += (nports_standby * 4) + 8; 5193 } 5194 asz = sz + sizeof (*xd) - 4; 5195 xd = (stmf_xfer_data_t *)kmem_zalloc(asz, KM_NOSLEEP); 5196 if (xd == NULL) { 5197 mutex_exit(&stmf_state.stmf_lock); 5198 return (NULL); 5199 } 5200 xd->alloc_size = asz; 5201 xd->size_left = sz; 5202 5203 p = xd->buf; 5204 5205 *((uint32_t *)p) = BE_32(sz - 4); 5206 p += 4; 5207 p[0] = 0x80; /* PREF */ 5208 p[1] = 5; /* AO_SUP, S_SUP */ 5209 if (stmf_state.stmf_alua_node == 1) { 5210 p[3] = 1; /* Group 1 */ 5211 } else { 5212 p[3] = 0; /* Group 0 */ 5213 } 5214 p[7] = nports & 0xff; 5215 p += 8; 5216 for (ilport = stmf_state.stmf_ilportlist; ilport; 5217 ilport = ilport->ilport_next) { 5218 if (ilport->ilport_standby == 1) { 5219 continue; 5220 } 5221 ((uint16_t *)p)[1] = BE_16(ilport->ilport_rtpid); 5222 p += 4; 5223 } 5224 if (nports_standby && ilu_alua) { 5225 p[0] = 0x02; /* Non PREF, Standby */ 5226 p[1] = 5; /* AO_SUP, S_SUP */ 5227 if (stmf_state.stmf_alua_node == 1) { 5228 p[3] = 0; /* Group 0 */ 5229 } else { 5230 p[3] = 1; /* Group 1 */ 5231 } 5232 p[7] = nports_standby & 0xff; 5233 p += 8; 5234 for (ilport = stmf_state.stmf_ilportlist; ilport; 5235 ilport = ilport->ilport_next) { 5236 if (ilport->ilport_standby == 0) { 5237 continue; 5238 } 5239 ((uint16_t *)p)[1] = BE_16(ilport->ilport_rtpid); 5240 p += 4; 5241 } 5242 } 5243 5244 mutex_exit(&stmf_state.stmf_lock); 5245 5246 return (xd); 5247 } 5248 5249 struct scsi_devid_desc * 5250 stmf_scsilib_get_devid_desc(uint16_t rtpid) 5251 { 5252 scsi_devid_desc_t *devid = NULL; 5253 stmf_i_local_port_t *ilport; 5254 5255 mutex_enter(&stmf_state.stmf_lock); 5256 5257 for (ilport = stmf_state.stmf_ilportlist; ilport; 5258 ilport = ilport->ilport_next) { 5259 if (ilport->ilport_rtpid == rtpid) { 5260 scsi_devid_desc_t *id = ilport->ilport_lport->lport_id; 5261 uint32_t id_sz = sizeof (scsi_devid_desc_t) - 1 + 5262 id->ident_length; 5263 devid = (scsi_devid_desc_t *)kmem_zalloc(id_sz, 5264 KM_NOSLEEP); 5265 if (devid != NULL) { 5266 bcopy(id, devid, id_sz); 5267 } 5268 break; 5269 } 5270 } 5271 5272 mutex_exit(&stmf_state.stmf_lock); 5273 return (devid); 5274 } 5275 5276 uint16_t 5277 stmf_scsilib_get_lport_rtid(struct scsi_devid_desc *devid) 5278 { 5279 stmf_i_local_port_t *ilport; 5280 scsi_devid_desc_t *id; 5281 uint16_t rtpid = 0; 5282 5283 mutex_enter(&stmf_state.stmf_lock); 5284 for (ilport = stmf_state.stmf_ilportlist; ilport; 5285 ilport = ilport->ilport_next) { 5286 id = ilport->ilport_lport->lport_id; 5287 if ((devid->ident_length == id->ident_length) && 5288 (memcmp(devid->ident, id->ident, id->ident_length) == 0)) { 5289 rtpid = ilport->ilport_rtpid; 5290 break; 5291 } 5292 } 5293 mutex_exit(&stmf_state.stmf_lock); 5294 return (rtpid); 5295 } 5296 5297 static uint16_t stmf_lu_id_gen_number = 0; 5298 5299 stmf_status_t 5300 stmf_scsilib_uniq_lu_id(uint32_t company_id, scsi_devid_desc_t *lu_id) 5301 { 5302 return (stmf_scsilib_uniq_lu_id2(company_id, 0, lu_id)); 5303 } 5304 5305 stmf_status_t 5306 stmf_scsilib_uniq_lu_id2(uint32_t company_id, uint32_t host_id, 5307 scsi_devid_desc_t *lu_id) 5308 { 5309 uint8_t *p; 5310 struct timeval32 timestamp32; 5311 uint32_t *t = (uint32_t *)×tamp32; 5312 struct ether_addr mac; 5313 uint8_t *e = (uint8_t *)&mac; 5314 int hid = (int)host_id; 5315 5316 if (company_id == COMPANY_ID_NONE) 5317 company_id = COMPANY_ID_SUN; 5318 5319 if (lu_id->ident_length != 0x10) 5320 return (STMF_INVALID_ARG); 5321 5322 p = (uint8_t *)lu_id; 5323 5324 atomic_add_16(&stmf_lu_id_gen_number, 1); 5325 5326 p[0] = 0xf1; p[1] = 3; p[2] = 0; p[3] = 0x10; 5327 p[4] = ((company_id >> 20) & 0xf) | 0x60; 5328 p[5] = (company_id >> 12) & 0xff; 5329 p[6] = (company_id >> 4) & 0xff; 5330 p[7] = (company_id << 4) & 0xf0; 5331 if (hid == 0 && !localetheraddr((struct ether_addr *)NULL, &mac)) { 5332 hid = BE_32((int)zone_get_hostid(NULL)); 5333 } 5334 if (hid != 0) { 5335 e[0] = (hid >> 24) & 0xff; 5336 e[1] = (hid >> 16) & 0xff; 5337 e[2] = (hid >> 8) & 0xff; 5338 e[3] = hid & 0xff; 5339 e[4] = e[5] = 0; 5340 } 5341 bcopy(e, p+8, 6); 5342 uniqtime32(×tamp32); 5343 *t = BE_32(*t); 5344 bcopy(t, p+14, 4); 5345 p[18] = (stmf_lu_id_gen_number >> 8) & 0xff; 5346 p[19] = stmf_lu_id_gen_number & 0xff; 5347 5348 return (STMF_SUCCESS); 5349 } 5350 5351 /* 5352 * saa is sense key, ASC, ASCQ 5353 */ 5354 void 5355 stmf_scsilib_send_status(scsi_task_t *task, uint8_t st, uint32_t saa) 5356 { 5357 uint8_t sd[18]; 5358 task->task_scsi_status = st; 5359 if (st == 2) { 5360 bzero(sd, 18); 5361 sd[0] = 0x70; 5362 sd[2] = (saa >> 16) & 0xf; 5363 sd[7] = 10; 5364 sd[12] = (saa >> 8) & 0xff; 5365 sd[13] = saa & 0xff; 5366 task->task_sense_data = sd; 5367 task->task_sense_length = 18; 5368 } else { 5369 task->task_sense_data = NULL; 5370 task->task_sense_length = 0; 5371 } 5372 (void) stmf_send_scsi_status(task, STMF_IOF_LU_DONE); 5373 } 5374 5375 uint32_t 5376 stmf_scsilib_prepare_vpd_page83(scsi_task_t *task, uint8_t *page, 5377 uint32_t page_len, uint8_t byte0, uint32_t vpd_mask) 5378 { 5379 uint8_t *p = NULL; 5380 uint8_t small_buf[32]; 5381 uint32_t sz = 0; 5382 uint32_t n = 4; 5383 uint32_t m = 0; 5384 uint32_t last_bit = 0; 5385 5386 if (page_len < 4) 5387 return (0); 5388 if (page_len > 65535) 5389 page_len = 65535; 5390 5391 page[0] = byte0; 5392 page[1] = 0x83; 5393 5394 /* CONSTCOND */ 5395 while (1) { 5396 m += sz; 5397 if (sz && (page_len > n)) { 5398 uint32_t copysz; 5399 copysz = page_len > (n + sz) ? sz : page_len - n; 5400 bcopy(p, page + n, copysz); 5401 n += copysz; 5402 } 5403 vpd_mask &= ~last_bit; 5404 if (vpd_mask == 0) 5405 break; 5406 5407 if (vpd_mask & STMF_VPD_LU_ID) { 5408 last_bit = STMF_VPD_LU_ID; 5409 sz = task->task_lu->lu_id->ident_length + 4; 5410 p = (uint8_t *)task->task_lu->lu_id; 5411 continue; 5412 } else if (vpd_mask & STMF_VPD_TARGET_ID) { 5413 last_bit = STMF_VPD_TARGET_ID; 5414 sz = task->task_lport->lport_id->ident_length + 4; 5415 p = (uint8_t *)task->task_lport->lport_id; 5416 continue; 5417 } else if (vpd_mask & STMF_VPD_TP_GROUP) { 5418 stmf_i_local_port_t *ilport; 5419 last_bit = STMF_VPD_TP_GROUP; 5420 p = small_buf; 5421 bzero(p, 8); 5422 p[0] = 1; 5423 p[1] = 0x15; 5424 p[3] = 4; 5425 ilport = (stmf_i_local_port_t *) 5426 task->task_lport->lport_stmf_private; 5427 /* 5428 * If we're in alua mode, group 1 contains all alua 5429 * participating ports and all standby ports 5430 * > 255. Otherwise, if we're in alua mode, any local 5431 * ports (non standby/pppt) are also in group 1 if the 5432 * alua node is 1. Otherwise the group is 0. 5433 */ 5434 if ((stmf_state.stmf_alua_state && 5435 (ilport->ilport_alua || ilport->ilport_standby) && 5436 ilport->ilport_rtpid > 255) || 5437 (stmf_state.stmf_alua_node == 1 && 5438 ilport->ilport_standby != 1)) { 5439 p[7] = 1; /* Group 1 */ 5440 } 5441 sz = 8; 5442 continue; 5443 } else if (vpd_mask & STMF_VPD_RELATIVE_TP_ID) { 5444 stmf_i_local_port_t *ilport; 5445 5446 last_bit = STMF_VPD_RELATIVE_TP_ID; 5447 p = small_buf; 5448 bzero(p, 8); 5449 p[0] = 1; 5450 p[1] = 0x14; 5451 p[3] = 4; 5452 ilport = (stmf_i_local_port_t *) 5453 task->task_lport->lport_stmf_private; 5454 p[6] = (ilport->ilport_rtpid >> 8) & 0xff; 5455 p[7] = ilport->ilport_rtpid & 0xff; 5456 sz = 8; 5457 continue; 5458 } else { 5459 cmn_err(CE_WARN, "Invalid vpd_mask"); 5460 break; 5461 } 5462 } 5463 5464 page[2] = (m >> 8) & 0xff; 5465 page[3] = m & 0xff; 5466 5467 return (n); 5468 } 5469 5470 void 5471 stmf_scsilib_handle_report_tpgs(scsi_task_t *task, stmf_data_buf_t *dbuf) 5472 { 5473 stmf_i_scsi_task_t *itask = 5474 (stmf_i_scsi_task_t *)task->task_stmf_private; 5475 stmf_i_lu_t *ilu = 5476 (stmf_i_lu_t *)task->task_lu->lu_stmf_private; 5477 stmf_xfer_data_t *xd; 5478 uint32_t sz, minsz; 5479 5480 itask->itask_flags |= ITASK_DEFAULT_HANDLING; 5481 task->task_cmd_xfer_length = 5482 ((((uint32_t)task->task_cdb[6]) << 24) | 5483 (((uint32_t)task->task_cdb[7]) << 16) | 5484 (((uint32_t)task->task_cdb[8]) << 8) | 5485 ((uint32_t)task->task_cdb[9])); 5486 5487 if (task->task_additional_flags & 5488 TASK_AF_NO_EXPECTED_XFER_LENGTH) { 5489 task->task_expected_xfer_length = 5490 task->task_cmd_xfer_length; 5491 } 5492 5493 if (task->task_cmd_xfer_length == 0) { 5494 stmf_scsilib_send_status(task, STATUS_GOOD, 0); 5495 return; 5496 } 5497 if (task->task_cmd_xfer_length < 4) { 5498 stmf_scsilib_send_status(task, STATUS_CHECK, 5499 STMF_SAA_INVALID_FIELD_IN_CDB); 5500 return; 5501 } 5502 5503 sz = min(task->task_expected_xfer_length, 5504 task->task_cmd_xfer_length); 5505 5506 xd = stmf_prepare_tpgs_data(ilu->ilu_alua); 5507 5508 if (xd == NULL) { 5509 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 5510 STMF_ALLOC_FAILURE, NULL); 5511 return; 5512 } 5513 5514 sz = min(sz, xd->size_left); 5515 xd->size_left = sz; 5516 minsz = min(512, sz); 5517 5518 if (dbuf == NULL) 5519 dbuf = stmf_alloc_dbuf(task, sz, &minsz, 0); 5520 if (dbuf == NULL) { 5521 kmem_free(xd, xd->alloc_size); 5522 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 5523 STMF_ALLOC_FAILURE, NULL); 5524 return; 5525 } 5526 dbuf->db_lu_private = xd; 5527 stmf_xd_to_dbuf(dbuf); 5528 5529 dbuf->db_flags = DB_DIRECTION_TO_RPORT; 5530 (void) stmf_xfer_data(task, dbuf, 0); 5531 5532 } 5533 5534 void 5535 stmf_scsilib_handle_task_mgmt(scsi_task_t *task) 5536 { 5537 5538 switch (task->task_mgmt_function) { 5539 /* 5540 * For now we will abort all I/Os on the LU in case of ABORT_TASK_SET 5541 * and ABORT_TASK. But unlike LUN_RESET we will not reset LU state 5542 * in these cases. This needs to be changed to abort only the required 5543 * set. 5544 */ 5545 case TM_ABORT_TASK: 5546 case TM_ABORT_TASK_SET: 5547 case TM_CLEAR_TASK_SET: 5548 case TM_LUN_RESET: 5549 stmf_handle_lun_reset(task); 5550 /* issue the reset to the proxy node as well */ 5551 if (stmf_state.stmf_alua_state == 1) { 5552 (void) stmf_proxy_scsi_cmd(task, NULL); 5553 } 5554 return; 5555 case TM_TARGET_RESET: 5556 case TM_TARGET_COLD_RESET: 5557 case TM_TARGET_WARM_RESET: 5558 stmf_handle_target_reset(task); 5559 return; 5560 default: 5561 /* We dont support this task mgmt function */ 5562 stmf_scsilib_send_status(task, STATUS_CHECK, 5563 STMF_SAA_INVALID_FIELD_IN_CMD_IU); 5564 return; 5565 } 5566 } 5567 5568 void 5569 stmf_handle_lun_reset(scsi_task_t *task) 5570 { 5571 stmf_i_scsi_task_t *itask; 5572 stmf_i_lu_t *ilu; 5573 5574 itask = (stmf_i_scsi_task_t *)task->task_stmf_private; 5575 ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private; 5576 5577 /* 5578 * To sync with target reset, grab this lock. The LU is not going 5579 * anywhere as there is atleast one task pending (this task). 5580 */ 5581 mutex_enter(&stmf_state.stmf_lock); 5582 5583 if (ilu->ilu_flags & ILU_RESET_ACTIVE) { 5584 mutex_exit(&stmf_state.stmf_lock); 5585 stmf_scsilib_send_status(task, STATUS_CHECK, 5586 STMF_SAA_OPERATION_IN_PROGRESS); 5587 return; 5588 } 5589 atomic_or_32(&ilu->ilu_flags, ILU_RESET_ACTIVE); 5590 mutex_exit(&stmf_state.stmf_lock); 5591 5592 /* 5593 * Mark this task as the one causing LU reset so that we know who 5594 * was responsible for setting the ILU_RESET_ACTIVE. In case this 5595 * task itself gets aborted, we will clear ILU_RESET_ACTIVE. 5596 */ 5597 itask->itask_flags |= ITASK_DEFAULT_HANDLING | ITASK_CAUSING_LU_RESET; 5598 5599 /* Initiatiate abort on all commands on this LU except this one */ 5600 stmf_abort(STMF_QUEUE_ABORT_LU, task, STMF_ABORTED, task->task_lu); 5601 5602 /* Start polling on this task */ 5603 if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT) 5604 != STMF_SUCCESS) { 5605 stmf_abort(STMF_QUEUE_TASK_ABORT, task, STMF_ALLOC_FAILURE, 5606 NULL); 5607 return; 5608 } 5609 } 5610 5611 void 5612 stmf_handle_target_reset(scsi_task_t *task) 5613 { 5614 stmf_i_scsi_task_t *itask; 5615 stmf_i_lu_t *ilu; 5616 stmf_i_scsi_session_t *iss; 5617 stmf_lun_map_t *lm; 5618 stmf_lun_map_ent_t *lm_ent; 5619 int i, lf; 5620 5621 itask = (stmf_i_scsi_task_t *)task->task_stmf_private; 5622 iss = (stmf_i_scsi_session_t *)task->task_session->ss_stmf_private; 5623 ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private; 5624 5625 /* 5626 * To sync with LUN reset, grab this lock. The session is not going 5627 * anywhere as there is atleast one task pending (this task). 5628 */ 5629 mutex_enter(&stmf_state.stmf_lock); 5630 5631 /* Grab the session lock as a writer to prevent any changes in it */ 5632 rw_enter(iss->iss_lockp, RW_WRITER); 5633 5634 if (iss->iss_flags & ISS_RESET_ACTIVE) { 5635 rw_exit(iss->iss_lockp); 5636 mutex_exit(&stmf_state.stmf_lock); 5637 stmf_scsilib_send_status(task, STATUS_CHECK, 5638 STMF_SAA_OPERATION_IN_PROGRESS); 5639 return; 5640 } 5641 atomic_or_32(&iss->iss_flags, ISS_RESET_ACTIVE); 5642 5643 /* 5644 * Now go through each LUN in this session and make sure all of them 5645 * can be reset. 5646 */ 5647 lm = iss->iss_sm; 5648 for (i = 0, lf = 0; i < lm->lm_nentries; i++) { 5649 if (lm->lm_plus[i] == NULL) 5650 continue; 5651 lf++; 5652 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i]; 5653 ilu = (stmf_i_lu_t *)(lm_ent->ent_lu->lu_stmf_private); 5654 if (ilu->ilu_flags & ILU_RESET_ACTIVE) { 5655 atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE); 5656 rw_exit(iss->iss_lockp); 5657 mutex_exit(&stmf_state.stmf_lock); 5658 stmf_scsilib_send_status(task, STATUS_CHECK, 5659 STMF_SAA_OPERATION_IN_PROGRESS); 5660 return; 5661 } 5662 } 5663 if (lf == 0) { 5664 /* No luns in this session */ 5665 atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE); 5666 rw_exit(iss->iss_lockp); 5667 mutex_exit(&stmf_state.stmf_lock); 5668 stmf_scsilib_send_status(task, STATUS_GOOD, 0); 5669 return; 5670 } 5671 5672 /* ok, start the damage */ 5673 itask->itask_flags |= ITASK_DEFAULT_HANDLING | 5674 ITASK_CAUSING_TARGET_RESET; 5675 for (i = 0; i < lm->lm_nentries; i++) { 5676 if (lm->lm_plus[i] == NULL) 5677 continue; 5678 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i]; 5679 ilu = (stmf_i_lu_t *)(lm_ent->ent_lu->lu_stmf_private); 5680 atomic_or_32(&ilu->ilu_flags, ILU_RESET_ACTIVE); 5681 } 5682 rw_exit(iss->iss_lockp); 5683 mutex_exit(&stmf_state.stmf_lock); 5684 5685 for (i = 0; i < lm->lm_nentries; i++) { 5686 if (lm->lm_plus[i] == NULL) 5687 continue; 5688 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i]; 5689 stmf_abort(STMF_QUEUE_ABORT_LU, task, STMF_ABORTED, 5690 lm_ent->ent_lu); 5691 } 5692 5693 /* Start polling on this task */ 5694 if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT) 5695 != STMF_SUCCESS) { 5696 stmf_abort(STMF_QUEUE_TASK_ABORT, task, STMF_ALLOC_FAILURE, 5697 NULL); 5698 return; 5699 } 5700 } 5701 5702 int 5703 stmf_handle_cmd_during_ic(stmf_i_scsi_task_t *itask) 5704 { 5705 scsi_task_t *task = itask->itask_task; 5706 stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *) 5707 task->task_session->ss_stmf_private; 5708 5709 rw_enter(iss->iss_lockp, RW_WRITER); 5710 if (((iss->iss_flags & ISS_LUN_INVENTORY_CHANGED) == 0) || 5711 (task->task_cdb[0] == SCMD_INQUIRY)) { 5712 rw_exit(iss->iss_lockp); 5713 return (0); 5714 } 5715 atomic_and_32(&iss->iss_flags, 5716 ~(ISS_LUN_INVENTORY_CHANGED | ISS_GOT_INITIAL_LUNS)); 5717 rw_exit(iss->iss_lockp); 5718 5719 if (task->task_cdb[0] == SCMD_REPORT_LUNS) { 5720 return (0); 5721 } 5722 stmf_scsilib_send_status(task, STATUS_CHECK, 5723 STMF_SAA_REPORT_LUN_DATA_HAS_CHANGED); 5724 return (1); 5725 } 5726 5727 void 5728 stmf_worker_init() 5729 { 5730 uint32_t i; 5731 5732 /* Make local copy of global tunables */ 5733 stmf_i_max_nworkers = stmf_max_nworkers; 5734 stmf_i_min_nworkers = stmf_min_nworkers; 5735 5736 ASSERT(stmf_workers == NULL); 5737 if (stmf_i_min_nworkers < 4) { 5738 stmf_i_min_nworkers = 4; 5739 } 5740 if (stmf_i_max_nworkers < stmf_i_min_nworkers) { 5741 stmf_i_max_nworkers = stmf_i_min_nworkers; 5742 } 5743 stmf_workers = (stmf_worker_t *)kmem_zalloc( 5744 sizeof (stmf_worker_t) * stmf_i_max_nworkers, KM_SLEEP); 5745 for (i = 0; i < stmf_i_max_nworkers; i++) { 5746 stmf_worker_t *w = &stmf_workers[i]; 5747 mutex_init(&w->worker_lock, NULL, MUTEX_DRIVER, NULL); 5748 cv_init(&w->worker_cv, NULL, CV_DRIVER, NULL); 5749 } 5750 stmf_worker_mgmt_delay = drv_usectohz(20 * 1000); 5751 stmf_workers_state = STMF_WORKERS_ENABLED; 5752 5753 /* Workers will be started by stmf_worker_mgmt() */ 5754 5755 /* Lets wait for atleast one worker to start */ 5756 while (stmf_nworkers_cur == 0) 5757 delay(drv_usectohz(20 * 1000)); 5758 stmf_worker_mgmt_delay = drv_usectohz(3 * 1000 * 1000); 5759 } 5760 5761 stmf_status_t 5762 stmf_worker_fini() 5763 { 5764 int i; 5765 clock_t sb; 5766 5767 if (stmf_workers_state == STMF_WORKERS_DISABLED) 5768 return (STMF_SUCCESS); 5769 ASSERT(stmf_workers); 5770 stmf_workers_state = STMF_WORKERS_DISABLED; 5771 stmf_worker_mgmt_delay = drv_usectohz(20 * 1000); 5772 cv_signal(&stmf_state.stmf_cv); 5773 5774 sb = ddi_get_lbolt() + drv_usectohz(10 * 1000 * 1000); 5775 /* Wait for all the threads to die */ 5776 while (stmf_nworkers_cur != 0) { 5777 if (ddi_get_lbolt() > sb) { 5778 stmf_workers_state = STMF_WORKERS_ENABLED; 5779 return (STMF_BUSY); 5780 } 5781 delay(drv_usectohz(100 * 1000)); 5782 } 5783 for (i = 0; i < stmf_i_max_nworkers; i++) { 5784 stmf_worker_t *w = &stmf_workers[i]; 5785 mutex_destroy(&w->worker_lock); 5786 cv_destroy(&w->worker_cv); 5787 } 5788 kmem_free(stmf_workers, sizeof (stmf_worker_t) * stmf_i_max_nworkers); 5789 stmf_workers = NULL; 5790 5791 return (STMF_SUCCESS); 5792 } 5793 5794 void 5795 stmf_worker_task(void *arg) 5796 { 5797 stmf_worker_t *w; 5798 stmf_i_scsi_session_t *iss; 5799 scsi_task_t *task; 5800 stmf_i_scsi_task_t *itask; 5801 stmf_data_buf_t *dbuf; 5802 stmf_lu_t *lu; 5803 clock_t wait_timer = 0; 5804 clock_t wait_ticks, wait_delta = 0; 5805 uint32_t old, new; 5806 uint8_t curcmd; 5807 uint8_t abort_free; 5808 uint8_t wait_queue; 5809 uint8_t dec_qdepth; 5810 5811 w = (stmf_worker_t *)arg; 5812 wait_ticks = drv_usectohz(10000); 5813 5814 mutex_enter(&w->worker_lock); 5815 w->worker_flags |= STMF_WORKER_STARTED | STMF_WORKER_ACTIVE; 5816 stmf_worker_loop:; 5817 if ((w->worker_ref_count == 0) && 5818 (w->worker_flags & STMF_WORKER_TERMINATE)) { 5819 w->worker_flags &= ~(STMF_WORKER_STARTED | 5820 STMF_WORKER_ACTIVE | STMF_WORKER_TERMINATE); 5821 w->worker_tid = NULL; 5822 mutex_exit(&w->worker_lock); 5823 thread_exit(); 5824 } 5825 /* CONSTCOND */ 5826 while (1) { 5827 dec_qdepth = 0; 5828 if (wait_timer && (ddi_get_lbolt() >= wait_timer)) { 5829 wait_timer = 0; 5830 wait_delta = 0; 5831 if (w->worker_wait_head) { 5832 ASSERT(w->worker_wait_tail); 5833 if (w->worker_task_head == NULL) 5834 w->worker_task_head = 5835 w->worker_wait_head; 5836 else 5837 w->worker_task_tail->itask_worker_next = 5838 w->worker_wait_head; 5839 w->worker_task_tail = w->worker_wait_tail; 5840 w->worker_wait_head = w->worker_wait_tail = 5841 NULL; 5842 } 5843 } 5844 if ((itask = w->worker_task_head) == NULL) { 5845 break; 5846 } 5847 task = itask->itask_task; 5848 w->worker_task_head = itask->itask_worker_next; 5849 if (w->worker_task_head == NULL) 5850 w->worker_task_tail = NULL; 5851 5852 wait_queue = 0; 5853 abort_free = 0; 5854 if (itask->itask_ncmds > 0) { 5855 curcmd = itask->itask_cmd_stack[itask->itask_ncmds - 1]; 5856 } else { 5857 ASSERT(itask->itask_flags & ITASK_BEING_ABORTED); 5858 } 5859 do { 5860 old = itask->itask_flags; 5861 if (old & ITASK_BEING_ABORTED) { 5862 itask->itask_ncmds = 1; 5863 curcmd = itask->itask_cmd_stack[0] = 5864 ITASK_CMD_ABORT; 5865 goto out_itask_flag_loop; 5866 } else if ((curcmd & ITASK_CMD_MASK) == 5867 ITASK_CMD_NEW_TASK) { 5868 /* 5869 * set ITASK_KSTAT_IN_RUNQ, this flag 5870 * will not reset until task completed 5871 */ 5872 new = old | ITASK_KNOWN_TO_LU | 5873 ITASK_KSTAT_IN_RUNQ; 5874 } else { 5875 goto out_itask_flag_loop; 5876 } 5877 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 5878 5879 out_itask_flag_loop: 5880 5881 /* 5882 * Decide if this task needs to go to a queue and/or if 5883 * we can decrement the itask_cmd_stack. 5884 */ 5885 if (curcmd == ITASK_CMD_ABORT) { 5886 if (itask->itask_flags & (ITASK_KNOWN_TO_LU | 5887 ITASK_KNOWN_TO_TGT_PORT)) { 5888 wait_queue = 1; 5889 } else { 5890 abort_free = 1; 5891 } 5892 } else if ((curcmd & ITASK_CMD_POLL) && 5893 (itask->itask_poll_timeout > ddi_get_lbolt())) { 5894 wait_queue = 1; 5895 } 5896 5897 if (wait_queue) { 5898 itask->itask_worker_next = NULL; 5899 if (w->worker_wait_tail) { 5900 w->worker_wait_tail->itask_worker_next = itask; 5901 } else { 5902 w->worker_wait_head = itask; 5903 } 5904 w->worker_wait_tail = itask; 5905 if (wait_timer == 0) { 5906 wait_timer = ddi_get_lbolt() + wait_ticks; 5907 wait_delta = wait_ticks; 5908 } 5909 } else if ((--(itask->itask_ncmds)) != 0) { 5910 itask->itask_worker_next = NULL; 5911 if (w->worker_task_tail) { 5912 w->worker_task_tail->itask_worker_next = itask; 5913 } else { 5914 w->worker_task_head = itask; 5915 } 5916 w->worker_task_tail = itask; 5917 } else { 5918 atomic_and_32(&itask->itask_flags, 5919 ~ITASK_IN_WORKER_QUEUE); 5920 /* 5921 * This is where the queue depth should go down by 5922 * one but we delay that on purpose to account for 5923 * the call into the provider. The actual decrement 5924 * happens after the worker has done its job. 5925 */ 5926 dec_qdepth = 1; 5927 } 5928 5929 /* We made it here means we are going to call LU */ 5930 if ((itask->itask_flags & ITASK_DEFAULT_HANDLING) == 0) 5931 lu = task->task_lu; 5932 else 5933 lu = dlun0; 5934 dbuf = itask->itask_dbufs[ITASK_CMD_BUF_NDX(curcmd)]; 5935 mutex_exit(&w->worker_lock); 5936 curcmd &= ITASK_CMD_MASK; 5937 switch (curcmd) { 5938 case ITASK_CMD_NEW_TASK: 5939 iss = (stmf_i_scsi_session_t *) 5940 task->task_session->ss_stmf_private; 5941 stmf_update_kstat_lu_q(task, kstat_waitq_to_runq); 5942 stmf_update_kstat_lport_q(task, kstat_waitq_to_runq); 5943 if (iss->iss_flags & ISS_LUN_INVENTORY_CHANGED) { 5944 if (stmf_handle_cmd_during_ic(itask)) 5945 break; 5946 } 5947 #ifdef DEBUG 5948 if (stmf_drop_task_counter > 0) { 5949 if (atomic_add_32_nv( 5950 (uint32_t *)&stmf_drop_task_counter, 5951 -1) == 1) { 5952 break; 5953 } 5954 } 5955 #endif 5956 DTRACE_PROBE1(scsi__task__start, scsi_task_t *, task); 5957 lu->lu_new_task(task, dbuf); 5958 break; 5959 case ITASK_CMD_DATA_XFER_DONE: 5960 lu->lu_dbuf_xfer_done(task, dbuf); 5961 break; 5962 case ITASK_CMD_STATUS_DONE: 5963 lu->lu_send_status_done(task); 5964 break; 5965 case ITASK_CMD_ABORT: 5966 if (abort_free) { 5967 stmf_task_free(task); 5968 } else { 5969 stmf_do_task_abort(task); 5970 } 5971 break; 5972 case ITASK_CMD_POLL_LU: 5973 if (!wait_queue) { 5974 lu->lu_task_poll(task); 5975 } 5976 break; 5977 case ITASK_CMD_POLL_LPORT: 5978 if (!wait_queue) 5979 task->task_lport->lport_task_poll(task); 5980 break; 5981 case ITASK_CMD_SEND_STATUS: 5982 /* case ITASK_CMD_XFER_DATA: */ 5983 break; 5984 } 5985 mutex_enter(&w->worker_lock); 5986 if (dec_qdepth) { 5987 w->worker_queue_depth--; 5988 } 5989 } 5990 if ((w->worker_flags & STMF_WORKER_TERMINATE) && (wait_timer == 0)) { 5991 if (w->worker_ref_count == 0) 5992 goto stmf_worker_loop; 5993 else { 5994 wait_timer = ddi_get_lbolt() + 1; 5995 wait_delta = 1; 5996 } 5997 } 5998 w->worker_flags &= ~STMF_WORKER_ACTIVE; 5999 if (wait_timer) { 6000 (void) cv_reltimedwait(&w->worker_cv, &w->worker_lock, 6001 wait_delta, TR_CLOCK_TICK); 6002 } else { 6003 cv_wait(&w->worker_cv, &w->worker_lock); 6004 } 6005 w->worker_flags |= STMF_WORKER_ACTIVE; 6006 goto stmf_worker_loop; 6007 } 6008 6009 void 6010 stmf_worker_mgmt() 6011 { 6012 int i; 6013 int workers_needed; 6014 uint32_t qd; 6015 clock_t tps, d = 0; 6016 uint32_t cur_max_ntasks = 0; 6017 stmf_worker_t *w; 6018 6019 /* Check if we are trying to increase the # of threads */ 6020 for (i = stmf_nworkers_cur; i < stmf_nworkers_needed; i++) { 6021 if (stmf_workers[i].worker_flags & STMF_WORKER_STARTED) { 6022 stmf_nworkers_cur++; 6023 stmf_nworkers_accepting_cmds++; 6024 } else { 6025 /* Wait for transition to complete */ 6026 return; 6027 } 6028 } 6029 /* Check if we are trying to decrease the # of workers */ 6030 for (i = (stmf_nworkers_cur - 1); i >= stmf_nworkers_needed; i--) { 6031 if ((stmf_workers[i].worker_flags & STMF_WORKER_STARTED) == 0) { 6032 stmf_nworkers_cur--; 6033 /* 6034 * stmf_nworkers_accepting_cmds has already been 6035 * updated by the request to reduce the # of workers. 6036 */ 6037 } else { 6038 /* Wait for transition to complete */ 6039 return; 6040 } 6041 } 6042 /* Check if we are being asked to quit */ 6043 if (stmf_workers_state != STMF_WORKERS_ENABLED) { 6044 if (stmf_nworkers_cur) { 6045 workers_needed = 0; 6046 goto worker_mgmt_trigger_change; 6047 } 6048 return; 6049 } 6050 /* Check if we are starting */ 6051 if (stmf_nworkers_cur < stmf_i_min_nworkers) { 6052 workers_needed = stmf_i_min_nworkers; 6053 goto worker_mgmt_trigger_change; 6054 } 6055 6056 tps = drv_usectohz(1 * 1000 * 1000); 6057 if ((stmf_wm_last != 0) && 6058 ((d = ddi_get_lbolt() - stmf_wm_last) > tps)) { 6059 qd = 0; 6060 for (i = 0; i < stmf_nworkers_accepting_cmds; i++) { 6061 qd += stmf_workers[i].worker_max_qdepth_pu; 6062 stmf_workers[i].worker_max_qdepth_pu = 0; 6063 if (stmf_workers[i].worker_max_sys_qdepth_pu > 6064 cur_max_ntasks) { 6065 cur_max_ntasks = 6066 stmf_workers[i].worker_max_sys_qdepth_pu; 6067 } 6068 stmf_workers[i].worker_max_sys_qdepth_pu = 0; 6069 } 6070 } 6071 stmf_wm_last = ddi_get_lbolt(); 6072 if (d <= tps) { 6073 /* still ramping up */ 6074 return; 6075 } 6076 /* max qdepth cannot be more than max tasks */ 6077 if (qd > cur_max_ntasks) 6078 qd = cur_max_ntasks; 6079 6080 /* See if we have more workers */ 6081 if (qd < stmf_nworkers_accepting_cmds) { 6082 /* 6083 * Since we dont reduce the worker count right away, monitor 6084 * the highest load during the scale_down_delay. 6085 */ 6086 if (qd > stmf_worker_scale_down_qd) 6087 stmf_worker_scale_down_qd = qd; 6088 if (stmf_worker_scale_down_timer == 0) { 6089 stmf_worker_scale_down_timer = ddi_get_lbolt() + 6090 drv_usectohz(stmf_worker_scale_down_delay * 6091 1000 * 1000); 6092 return; 6093 } 6094 if (ddi_get_lbolt() < stmf_worker_scale_down_timer) { 6095 return; 6096 } 6097 /* Its time to reduce the workers */ 6098 if (stmf_worker_scale_down_qd < stmf_i_min_nworkers) 6099 stmf_worker_scale_down_qd = stmf_i_min_nworkers; 6100 if (stmf_worker_scale_down_qd > stmf_i_max_nworkers) 6101 stmf_worker_scale_down_qd = stmf_i_max_nworkers; 6102 if (stmf_worker_scale_down_qd == stmf_nworkers_cur) 6103 return; 6104 workers_needed = stmf_worker_scale_down_qd; 6105 stmf_worker_scale_down_qd = 0; 6106 goto worker_mgmt_trigger_change; 6107 } 6108 stmf_worker_scale_down_qd = 0; 6109 stmf_worker_scale_down_timer = 0; 6110 if (qd > stmf_i_max_nworkers) 6111 qd = stmf_i_max_nworkers; 6112 if (qd < stmf_i_min_nworkers) 6113 qd = stmf_i_min_nworkers; 6114 if (qd == stmf_nworkers_cur) 6115 return; 6116 workers_needed = qd; 6117 goto worker_mgmt_trigger_change; 6118 6119 /* NOTREACHED */ 6120 return; 6121 6122 worker_mgmt_trigger_change: 6123 ASSERT(workers_needed != stmf_nworkers_cur); 6124 if (workers_needed > stmf_nworkers_cur) { 6125 stmf_nworkers_needed = workers_needed; 6126 for (i = stmf_nworkers_cur; i < workers_needed; i++) { 6127 w = &stmf_workers[i]; 6128 w->worker_tid = thread_create(NULL, 0, stmf_worker_task, 6129 (void *)&stmf_workers[i], 0, &p0, TS_RUN, 6130 minclsyspri); 6131 } 6132 return; 6133 } 6134 /* At this point we know that we are decreasing the # of workers */ 6135 stmf_nworkers_accepting_cmds = workers_needed; 6136 stmf_nworkers_needed = workers_needed; 6137 /* Signal the workers that its time to quit */ 6138 for (i = (stmf_nworkers_cur - 1); i >= stmf_nworkers_needed; i--) { 6139 w = &stmf_workers[i]; 6140 ASSERT(w && (w->worker_flags & STMF_WORKER_STARTED)); 6141 mutex_enter(&w->worker_lock); 6142 w->worker_flags |= STMF_WORKER_TERMINATE; 6143 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0) 6144 cv_signal(&w->worker_cv); 6145 mutex_exit(&w->worker_lock); 6146 } 6147 } 6148 6149 /* 6150 * Fills out a dbuf from stmf_xfer_data_t (contained in the db_lu_private). 6151 * If all the data has been filled out, frees the xd and makes 6152 * db_lu_private NULL. 6153 */ 6154 void 6155 stmf_xd_to_dbuf(stmf_data_buf_t *dbuf) 6156 { 6157 stmf_xfer_data_t *xd; 6158 uint8_t *p; 6159 int i; 6160 uint32_t s; 6161 6162 xd = (stmf_xfer_data_t *)dbuf->db_lu_private; 6163 dbuf->db_data_size = 0; 6164 dbuf->db_relative_offset = xd->size_done; 6165 for (i = 0; i < dbuf->db_sglist_length; i++) { 6166 s = min(xd->size_left, dbuf->db_sglist[i].seg_length); 6167 p = &xd->buf[xd->size_done]; 6168 bcopy(p, dbuf->db_sglist[i].seg_addr, s); 6169 xd->size_left -= s; 6170 xd->size_done += s; 6171 dbuf->db_data_size += s; 6172 if (xd->size_left == 0) { 6173 kmem_free(xd, xd->alloc_size); 6174 dbuf->db_lu_private = NULL; 6175 return; 6176 } 6177 } 6178 } 6179 6180 /* ARGSUSED */ 6181 stmf_status_t 6182 stmf_dlun0_task_alloc(scsi_task_t *task) 6183 { 6184 return (STMF_SUCCESS); 6185 } 6186 6187 void 6188 stmf_dlun0_new_task(scsi_task_t *task, stmf_data_buf_t *dbuf) 6189 { 6190 uint8_t *cdbp = (uint8_t *)&task->task_cdb[0]; 6191 stmf_i_scsi_session_t *iss; 6192 uint32_t sz, minsz; 6193 uint8_t *p; 6194 stmf_xfer_data_t *xd; 6195 uint8_t inq_page_length = 31; 6196 6197 if (task->task_mgmt_function) { 6198 stmf_scsilib_handle_task_mgmt(task); 6199 return; 6200 } 6201 6202 switch (cdbp[0]) { 6203 case SCMD_INQUIRY: 6204 /* 6205 * Basic protocol checks. In addition, only reply to 6206 * standard inquiry. Otherwise, the LU provider needs 6207 * to respond. 6208 */ 6209 6210 if (cdbp[2] || (cdbp[1] & 1) || cdbp[5]) { 6211 stmf_scsilib_send_status(task, STATUS_CHECK, 6212 STMF_SAA_INVALID_FIELD_IN_CDB); 6213 return; 6214 } 6215 6216 task->task_cmd_xfer_length = 6217 (((uint32_t)cdbp[3]) << 8) | cdbp[4]; 6218 6219 if (task->task_additional_flags & 6220 TASK_AF_NO_EXPECTED_XFER_LENGTH) { 6221 task->task_expected_xfer_length = 6222 task->task_cmd_xfer_length; 6223 } 6224 6225 sz = min(task->task_expected_xfer_length, 6226 min(36, task->task_cmd_xfer_length)); 6227 minsz = 36; 6228 6229 if (sz == 0) { 6230 stmf_scsilib_send_status(task, STATUS_GOOD, 0); 6231 return; 6232 } 6233 6234 if (dbuf && (dbuf->db_sglist[0].seg_length < 36)) { 6235 /* 6236 * Ignore any preallocated dbuf if the size is less 6237 * than 36. It will be freed during the task_free. 6238 */ 6239 dbuf = NULL; 6240 } 6241 if (dbuf == NULL) 6242 dbuf = stmf_alloc_dbuf(task, minsz, &minsz, 0); 6243 if ((dbuf == NULL) || (dbuf->db_sglist[0].seg_length < sz)) { 6244 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 6245 STMF_ALLOC_FAILURE, NULL); 6246 return; 6247 } 6248 dbuf->db_lu_private = NULL; 6249 6250 p = dbuf->db_sglist[0].seg_addr; 6251 6252 /* 6253 * Standard inquiry handling only. 6254 */ 6255 6256 bzero(p, inq_page_length + 5); 6257 6258 p[0] = DPQ_SUPPORTED | DTYPE_UNKNOWN; 6259 p[2] = 5; 6260 p[3] = 0x12; 6261 p[4] = inq_page_length; 6262 p[6] = 0x80; 6263 6264 (void) strncpy((char *)p+8, "SUN ", 8); 6265 (void) strncpy((char *)p+16, "COMSTAR ", 16); 6266 (void) strncpy((char *)p+32, "1.0 ", 4); 6267 6268 dbuf->db_data_size = sz; 6269 dbuf->db_relative_offset = 0; 6270 dbuf->db_flags = DB_DIRECTION_TO_RPORT; 6271 (void) stmf_xfer_data(task, dbuf, 0); 6272 6273 return; 6274 6275 case SCMD_REPORT_LUNS: 6276 task->task_cmd_xfer_length = 6277 ((((uint32_t)task->task_cdb[6]) << 24) | 6278 (((uint32_t)task->task_cdb[7]) << 16) | 6279 (((uint32_t)task->task_cdb[8]) << 8) | 6280 ((uint32_t)task->task_cdb[9])); 6281 6282 if (task->task_additional_flags & 6283 TASK_AF_NO_EXPECTED_XFER_LENGTH) { 6284 task->task_expected_xfer_length = 6285 task->task_cmd_xfer_length; 6286 } 6287 6288 sz = min(task->task_expected_xfer_length, 6289 task->task_cmd_xfer_length); 6290 6291 if (sz < 16) { 6292 stmf_scsilib_send_status(task, STATUS_CHECK, 6293 STMF_SAA_INVALID_FIELD_IN_CDB); 6294 return; 6295 } 6296 6297 iss = (stmf_i_scsi_session_t *) 6298 task->task_session->ss_stmf_private; 6299 rw_enter(iss->iss_lockp, RW_WRITER); 6300 xd = stmf_session_prepare_report_lun_data(iss->iss_sm); 6301 rw_exit(iss->iss_lockp); 6302 6303 if (xd == NULL) { 6304 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 6305 STMF_ALLOC_FAILURE, NULL); 6306 return; 6307 } 6308 6309 sz = min(sz, xd->size_left); 6310 xd->size_left = sz; 6311 minsz = min(512, sz); 6312 6313 if (dbuf == NULL) 6314 dbuf = stmf_alloc_dbuf(task, sz, &minsz, 0); 6315 if (dbuf == NULL) { 6316 kmem_free(xd, xd->alloc_size); 6317 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 6318 STMF_ALLOC_FAILURE, NULL); 6319 return; 6320 } 6321 dbuf->db_lu_private = xd; 6322 stmf_xd_to_dbuf(dbuf); 6323 6324 atomic_and_32(&iss->iss_flags, 6325 ~(ISS_LUN_INVENTORY_CHANGED | ISS_GOT_INITIAL_LUNS)); 6326 dbuf->db_flags = DB_DIRECTION_TO_RPORT; 6327 (void) stmf_xfer_data(task, dbuf, 0); 6328 return; 6329 } 6330 6331 stmf_scsilib_send_status(task, STATUS_CHECK, STMF_SAA_INVALID_OPCODE); 6332 } 6333 6334 void 6335 stmf_dlun0_dbuf_done(scsi_task_t *task, stmf_data_buf_t *dbuf) 6336 { 6337 stmf_i_scsi_task_t *itask = 6338 (stmf_i_scsi_task_t *)task->task_stmf_private; 6339 6340 if (dbuf->db_xfer_status != STMF_SUCCESS) { 6341 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 6342 dbuf->db_xfer_status, NULL); 6343 return; 6344 } 6345 task->task_nbytes_transferred = dbuf->db_data_size; 6346 if (dbuf->db_lu_private) { 6347 /* There is more */ 6348 stmf_xd_to_dbuf(dbuf); 6349 (void) stmf_xfer_data(task, dbuf, 0); 6350 return; 6351 } 6352 /* 6353 * If this is a proxy task, it will need to be completed from the 6354 * proxy port provider. This message lets pppt know that the xfer 6355 * is complete. When we receive the status from pppt, we will 6356 * then relay that status back to the lport. 6357 */ 6358 if (itask->itask_flags & ITASK_PROXY_TASK) { 6359 stmf_ic_msg_t *ic_xfer_done_msg = NULL; 6360 stmf_status_t ic_ret = STMF_FAILURE; 6361 uint64_t session_msg_id; 6362 mutex_enter(&stmf_state.stmf_lock); 6363 session_msg_id = stmf_proxy_msg_id++; 6364 mutex_exit(&stmf_state.stmf_lock); 6365 /* send xfer done status to pppt */ 6366 ic_xfer_done_msg = ic_scsi_data_xfer_done_msg_alloc( 6367 itask->itask_proxy_msg_id, 6368 task->task_session->ss_session_id, 6369 STMF_SUCCESS, session_msg_id); 6370 if (ic_xfer_done_msg) { 6371 ic_ret = ic_tx_msg(ic_xfer_done_msg); 6372 if (ic_ret != STMF_IC_MSG_SUCCESS) { 6373 cmn_err(CE_WARN, "unable to xmit session msg"); 6374 } 6375 } 6376 /* task will be completed from pppt */ 6377 return; 6378 } 6379 stmf_scsilib_send_status(task, STATUS_GOOD, 0); 6380 } 6381 6382 /* ARGSUSED */ 6383 void 6384 stmf_dlun0_status_done(scsi_task_t *task) 6385 { 6386 } 6387 6388 /* ARGSUSED */ 6389 void 6390 stmf_dlun0_task_free(scsi_task_t *task) 6391 { 6392 } 6393 6394 /* ARGSUSED */ 6395 stmf_status_t 6396 stmf_dlun0_abort(struct stmf_lu *lu, int abort_cmd, void *arg, uint32_t flags) 6397 { 6398 scsi_task_t *task = (scsi_task_t *)arg; 6399 stmf_i_scsi_task_t *itask = 6400 (stmf_i_scsi_task_t *)task->task_stmf_private; 6401 stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private; 6402 int i; 6403 uint8_t map; 6404 6405 if ((task->task_mgmt_function) && (itask->itask_flags & 6406 (ITASK_CAUSING_LU_RESET | ITASK_CAUSING_TARGET_RESET))) { 6407 switch (task->task_mgmt_function) { 6408 case TM_ABORT_TASK: 6409 case TM_ABORT_TASK_SET: 6410 case TM_CLEAR_TASK_SET: 6411 case TM_LUN_RESET: 6412 atomic_and_32(&ilu->ilu_flags, ~ILU_RESET_ACTIVE); 6413 break; 6414 case TM_TARGET_RESET: 6415 case TM_TARGET_COLD_RESET: 6416 case TM_TARGET_WARM_RESET: 6417 stmf_abort_target_reset(task); 6418 break; 6419 } 6420 return (STMF_ABORT_SUCCESS); 6421 } 6422 6423 /* 6424 * OK so its not a task mgmt. Make sure we free any xd sitting 6425 * inside any dbuf. 6426 */ 6427 if ((map = itask->itask_allocated_buf_map) != 0) { 6428 for (i = 0; i < 4; i++) { 6429 if ((map & 1) && 6430 ((itask->itask_dbufs[i])->db_lu_private)) { 6431 stmf_xfer_data_t *xd; 6432 stmf_data_buf_t *dbuf; 6433 6434 dbuf = itask->itask_dbufs[i]; 6435 xd = (stmf_xfer_data_t *)dbuf->db_lu_private; 6436 dbuf->db_lu_private = NULL; 6437 kmem_free(xd, xd->alloc_size); 6438 } 6439 map >>= 1; 6440 } 6441 } 6442 return (STMF_ABORT_SUCCESS); 6443 } 6444 6445 void 6446 stmf_dlun0_task_poll(struct scsi_task *task) 6447 { 6448 /* Right now we only do this for handling task management functions */ 6449 ASSERT(task->task_mgmt_function); 6450 6451 switch (task->task_mgmt_function) { 6452 case TM_ABORT_TASK: 6453 case TM_ABORT_TASK_SET: 6454 case TM_CLEAR_TASK_SET: 6455 case TM_LUN_RESET: 6456 (void) stmf_lun_reset_poll(task->task_lu, task, 0); 6457 return; 6458 case TM_TARGET_RESET: 6459 case TM_TARGET_COLD_RESET: 6460 case TM_TARGET_WARM_RESET: 6461 stmf_target_reset_poll(task); 6462 return; 6463 } 6464 } 6465 6466 /* ARGSUSED */ 6467 void 6468 stmf_dlun0_ctl(struct stmf_lu *lu, int cmd, void *arg) 6469 { 6470 /* This function will never be called */ 6471 cmn_err(CE_WARN, "stmf_dlun0_ctl called with cmd %x", cmd); 6472 } 6473 6474 void 6475 stmf_dlun_init() 6476 { 6477 stmf_i_lu_t *ilu; 6478 6479 dlun0 = stmf_alloc(STMF_STRUCT_STMF_LU, 0, 0); 6480 dlun0->lu_task_alloc = stmf_dlun0_task_alloc; 6481 dlun0->lu_new_task = stmf_dlun0_new_task; 6482 dlun0->lu_dbuf_xfer_done = stmf_dlun0_dbuf_done; 6483 dlun0->lu_send_status_done = stmf_dlun0_status_done; 6484 dlun0->lu_task_free = stmf_dlun0_task_free; 6485 dlun0->lu_abort = stmf_dlun0_abort; 6486 dlun0->lu_task_poll = stmf_dlun0_task_poll; 6487 dlun0->lu_ctl = stmf_dlun0_ctl; 6488 6489 ilu = (stmf_i_lu_t *)dlun0->lu_stmf_private; 6490 ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr1; 6491 } 6492 6493 stmf_status_t 6494 stmf_dlun_fini() 6495 { 6496 stmf_i_lu_t *ilu; 6497 6498 ilu = (stmf_i_lu_t *)dlun0->lu_stmf_private; 6499 6500 ASSERT(ilu->ilu_ntasks == ilu->ilu_ntasks_free); 6501 if (ilu->ilu_ntasks) { 6502 stmf_i_scsi_task_t *itask, *nitask; 6503 6504 nitask = ilu->ilu_tasks; 6505 do { 6506 itask = nitask; 6507 nitask = itask->itask_lu_next; 6508 dlun0->lu_task_free(itask->itask_task); 6509 stmf_free(itask->itask_task); 6510 } while (nitask != NULL); 6511 6512 } 6513 stmf_free(dlun0); 6514 return (STMF_SUCCESS); 6515 } 6516 6517 void 6518 stmf_abort_target_reset(scsi_task_t *task) 6519 { 6520 stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *) 6521 task->task_session->ss_stmf_private; 6522 stmf_lun_map_t *lm; 6523 stmf_lun_map_ent_t *lm_ent; 6524 stmf_i_lu_t *ilu; 6525 int i; 6526 6527 rw_enter(iss->iss_lockp, RW_READER); 6528 lm = iss->iss_sm; 6529 for (i = 0; i < lm->lm_nentries; i++) { 6530 if (lm->lm_plus[i] == NULL) 6531 continue; 6532 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i]; 6533 ilu = (stmf_i_lu_t *)lm_ent->ent_lu->lu_stmf_private; 6534 if (ilu->ilu_flags & ILU_RESET_ACTIVE) { 6535 atomic_and_32(&ilu->ilu_flags, ~ILU_RESET_ACTIVE); 6536 } 6537 } 6538 atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE); 6539 rw_exit(iss->iss_lockp); 6540 } 6541 6542 /* 6543 * The return value is only used by function managing target reset. 6544 */ 6545 stmf_status_t 6546 stmf_lun_reset_poll(stmf_lu_t *lu, struct scsi_task *task, int target_reset) 6547 { 6548 stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 6549 int ntasks_pending; 6550 6551 ntasks_pending = ilu->ilu_ntasks - ilu->ilu_ntasks_free; 6552 /* 6553 * This function is also used during Target reset. The idea is that 6554 * once all the commands are aborted, call the LU's reset entry 6555 * point (abort entry point with a reset flag). But if this Task 6556 * mgmt is running on this LU then all the tasks cannot be aborted. 6557 * one task (this task) will still be running which is OK. 6558 */ 6559 if ((ntasks_pending == 0) || ((task->task_lu == lu) && 6560 (ntasks_pending == 1))) { 6561 stmf_status_t ret; 6562 6563 if ((task->task_mgmt_function == TM_LUN_RESET) || 6564 (task->task_mgmt_function == TM_TARGET_RESET) || 6565 (task->task_mgmt_function == TM_TARGET_WARM_RESET) || 6566 (task->task_mgmt_function == TM_TARGET_COLD_RESET)) { 6567 ret = lu->lu_abort(lu, STMF_LU_RESET_STATE, task, 0); 6568 } else { 6569 ret = STMF_SUCCESS; 6570 } 6571 if (ret == STMF_SUCCESS) { 6572 atomic_and_32(&ilu->ilu_flags, ~ILU_RESET_ACTIVE); 6573 } 6574 if (target_reset) { 6575 return (ret); 6576 } 6577 if (ret == STMF_SUCCESS) { 6578 stmf_scsilib_send_status(task, STATUS_GOOD, 0); 6579 return (ret); 6580 } 6581 if (ret != STMF_BUSY) { 6582 stmf_abort(STMF_QUEUE_TASK_ABORT, task, ret, NULL); 6583 return (ret); 6584 } 6585 } 6586 6587 if (target_reset) { 6588 /* Tell target reset polling code that we are not done */ 6589 return (STMF_BUSY); 6590 } 6591 6592 if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT) 6593 != STMF_SUCCESS) { 6594 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 6595 STMF_ALLOC_FAILURE, NULL); 6596 return (STMF_SUCCESS); 6597 } 6598 6599 return (STMF_SUCCESS); 6600 } 6601 6602 void 6603 stmf_target_reset_poll(struct scsi_task *task) 6604 { 6605 stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *) 6606 task->task_session->ss_stmf_private; 6607 stmf_lun_map_t *lm; 6608 stmf_lun_map_ent_t *lm_ent; 6609 stmf_i_lu_t *ilu; 6610 stmf_status_t ret; 6611 int i; 6612 int not_done = 0; 6613 6614 ASSERT(iss->iss_flags & ISS_RESET_ACTIVE); 6615 6616 rw_enter(iss->iss_lockp, RW_READER); 6617 lm = iss->iss_sm; 6618 for (i = 0; i < lm->lm_nentries; i++) { 6619 if (lm->lm_plus[i] == NULL) 6620 continue; 6621 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i]; 6622 ilu = (stmf_i_lu_t *)lm_ent->ent_lu->lu_stmf_private; 6623 if (ilu->ilu_flags & ILU_RESET_ACTIVE) { 6624 rw_exit(iss->iss_lockp); 6625 ret = stmf_lun_reset_poll(lm_ent->ent_lu, task, 1); 6626 rw_enter(iss->iss_lockp, RW_READER); 6627 if (ret == STMF_SUCCESS) 6628 continue; 6629 not_done = 1; 6630 if (ret != STMF_BUSY) { 6631 rw_exit(iss->iss_lockp); 6632 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 6633 STMF_ABORTED, NULL); 6634 return; 6635 } 6636 } 6637 } 6638 rw_exit(iss->iss_lockp); 6639 6640 if (not_done) { 6641 if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT) 6642 != STMF_SUCCESS) { 6643 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 6644 STMF_ALLOC_FAILURE, NULL); 6645 return; 6646 } 6647 return; 6648 } 6649 6650 atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE); 6651 6652 stmf_scsilib_send_status(task, STATUS_GOOD, 0); 6653 } 6654 6655 stmf_status_t 6656 stmf_lu_add_event(stmf_lu_t *lu, int eventid) 6657 { 6658 stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 6659 6660 if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) { 6661 return (STMF_INVALID_ARG); 6662 } 6663 6664 STMF_EVENT_ADD(ilu->ilu_event_hdl, eventid); 6665 return (STMF_SUCCESS); 6666 } 6667 6668 stmf_status_t 6669 stmf_lu_remove_event(stmf_lu_t *lu, int eventid) 6670 { 6671 stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 6672 6673 if (eventid == STMF_EVENT_ALL) { 6674 STMF_EVENT_CLEAR_ALL(ilu->ilu_event_hdl); 6675 return (STMF_SUCCESS); 6676 } 6677 6678 if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) { 6679 return (STMF_INVALID_ARG); 6680 } 6681 6682 STMF_EVENT_REMOVE(ilu->ilu_event_hdl, eventid); 6683 return (STMF_SUCCESS); 6684 } 6685 6686 stmf_status_t 6687 stmf_lport_add_event(stmf_local_port_t *lport, int eventid) 6688 { 6689 stmf_i_local_port_t *ilport = 6690 (stmf_i_local_port_t *)lport->lport_stmf_private; 6691 6692 if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) { 6693 return (STMF_INVALID_ARG); 6694 } 6695 6696 STMF_EVENT_ADD(ilport->ilport_event_hdl, eventid); 6697 return (STMF_SUCCESS); 6698 } 6699 6700 stmf_status_t 6701 stmf_lport_remove_event(stmf_local_port_t *lport, int eventid) 6702 { 6703 stmf_i_local_port_t *ilport = 6704 (stmf_i_local_port_t *)lport->lport_stmf_private; 6705 6706 if (eventid == STMF_EVENT_ALL) { 6707 STMF_EVENT_CLEAR_ALL(ilport->ilport_event_hdl); 6708 return (STMF_SUCCESS); 6709 } 6710 6711 if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) { 6712 return (STMF_INVALID_ARG); 6713 } 6714 6715 STMF_EVENT_REMOVE(ilport->ilport_event_hdl, eventid); 6716 return (STMF_SUCCESS); 6717 } 6718 6719 void 6720 stmf_generate_lu_event(stmf_i_lu_t *ilu, int eventid, void *arg, uint32_t flags) 6721 { 6722 if (STMF_EVENT_ENABLED(ilu->ilu_event_hdl, eventid) && 6723 (ilu->ilu_lu->lu_event_handler != NULL)) { 6724 ilu->ilu_lu->lu_event_handler(ilu->ilu_lu, eventid, arg, flags); 6725 } 6726 } 6727 6728 void 6729 stmf_generate_lport_event(stmf_i_local_port_t *ilport, int eventid, void *arg, 6730 uint32_t flags) 6731 { 6732 if (STMF_EVENT_ENABLED(ilport->ilport_event_hdl, eventid) && 6733 (ilport->ilport_lport->lport_event_handler != NULL)) { 6734 ilport->ilport_lport->lport_event_handler( 6735 ilport->ilport_lport, eventid, arg, flags); 6736 } 6737 } 6738 6739 void 6740 stmf_svc_init() 6741 { 6742 if (stmf_state.stmf_svc_flags & STMF_SVC_STARTED) 6743 return; 6744 stmf_state.stmf_svc_taskq = ddi_taskq_create(0, "STMF_SVC_TASKQ", 1, 6745 TASKQ_DEFAULTPRI, 0); 6746 (void) ddi_taskq_dispatch(stmf_state.stmf_svc_taskq, 6747 stmf_svc, 0, DDI_SLEEP); 6748 } 6749 6750 stmf_status_t 6751 stmf_svc_fini() 6752 { 6753 uint32_t i; 6754 6755 mutex_enter(&stmf_state.stmf_lock); 6756 if (stmf_state.stmf_svc_flags & STMF_SVC_STARTED) { 6757 stmf_state.stmf_svc_flags |= STMF_SVC_TERMINATE; 6758 cv_signal(&stmf_state.stmf_cv); 6759 } 6760 mutex_exit(&stmf_state.stmf_lock); 6761 6762 /* Wait for 5 seconds */ 6763 for (i = 0; i < 500; i++) { 6764 if (stmf_state.stmf_svc_flags & STMF_SVC_STARTED) 6765 delay(drv_usectohz(10000)); 6766 else 6767 break; 6768 } 6769 if (i == 500) 6770 return (STMF_BUSY); 6771 6772 ddi_taskq_destroy(stmf_state.stmf_svc_taskq); 6773 6774 return (STMF_SUCCESS); 6775 } 6776 6777 /* ARGSUSED */ 6778 void 6779 stmf_svc(void *arg) 6780 { 6781 stmf_svc_req_t *req, **preq; 6782 clock_t td; 6783 clock_t drain_start, drain_next = 0; 6784 clock_t timing_start, timing_next = 0; 6785 clock_t worker_delay = 0; 6786 int deq; 6787 stmf_lu_t *lu; 6788 stmf_i_lu_t *ilu; 6789 stmf_local_port_t *lport; 6790 stmf_i_local_port_t *ilport, *next_ilport; 6791 stmf_i_scsi_session_t *iss; 6792 6793 td = drv_usectohz(20000); 6794 6795 mutex_enter(&stmf_state.stmf_lock); 6796 stmf_state.stmf_svc_flags |= STMF_SVC_STARTED | STMF_SVC_ACTIVE; 6797 6798 stmf_svc_loop: 6799 if (stmf_state.stmf_svc_flags & STMF_SVC_TERMINATE) { 6800 stmf_state.stmf_svc_flags &= 6801 ~(STMF_SVC_STARTED | STMF_SVC_ACTIVE); 6802 mutex_exit(&stmf_state.stmf_lock); 6803 return; 6804 } 6805 6806 if (stmf_state.stmf_svc_active) { 6807 int waitq_add = 0; 6808 req = stmf_state.stmf_svc_active; 6809 stmf_state.stmf_svc_active = req->svc_next; 6810 6811 switch (req->svc_cmd) { 6812 case STMF_CMD_LPORT_ONLINE: 6813 /* Fallthrough */ 6814 case STMF_CMD_LPORT_OFFLINE: 6815 /* Fallthrough */ 6816 case STMF_CMD_LU_ONLINE: 6817 /* Nothing to do */ 6818 waitq_add = 1; 6819 break; 6820 6821 case STMF_CMD_LU_OFFLINE: 6822 /* Remove all mappings of this LU */ 6823 stmf_session_lu_unmapall((stmf_lu_t *)req->svc_obj); 6824 /* Kill all the pending I/Os for this LU */ 6825 mutex_exit(&stmf_state.stmf_lock); 6826 stmf_task_lu_killall((stmf_lu_t *)req->svc_obj, NULL, 6827 STMF_ABORTED); 6828 mutex_enter(&stmf_state.stmf_lock); 6829 waitq_add = 1; 6830 break; 6831 default: 6832 cmn_err(CE_PANIC, "stmf_svc: unknown cmd %d", 6833 req->svc_cmd); 6834 } 6835 6836 if (waitq_add) { 6837 /* Put it in the wait queue */ 6838 req->svc_next = stmf_state.stmf_svc_waiting; 6839 stmf_state.stmf_svc_waiting = req; 6840 } 6841 } 6842 6843 /* The waiting list is not going to be modified by anybody else */ 6844 mutex_exit(&stmf_state.stmf_lock); 6845 6846 for (preq = &stmf_state.stmf_svc_waiting; (*preq) != NULL; ) { 6847 req = *preq; 6848 deq = 0; 6849 6850 switch (req->svc_cmd) { 6851 case STMF_CMD_LU_ONLINE: 6852 lu = (stmf_lu_t *)req->svc_obj; 6853 deq = 1; 6854 lu->lu_ctl(lu, req->svc_cmd, &req->svc_info); 6855 break; 6856 6857 case STMF_CMD_LU_OFFLINE: 6858 lu = (stmf_lu_t *)req->svc_obj; 6859 ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 6860 if (ilu->ilu_ntasks != ilu->ilu_ntasks_free) 6861 break; 6862 deq = 1; 6863 lu->lu_ctl(lu, req->svc_cmd, &req->svc_info); 6864 break; 6865 6866 case STMF_CMD_LPORT_OFFLINE: 6867 /* Fallthrough */ 6868 case STMF_CMD_LPORT_ONLINE: 6869 lport = (stmf_local_port_t *)req->svc_obj; 6870 deq = 1; 6871 lport->lport_ctl(lport, req->svc_cmd, &req->svc_info); 6872 break; 6873 } 6874 if (deq) { 6875 *preq = req->svc_next; 6876 kmem_free(req, req->svc_req_alloc_size); 6877 } else { 6878 preq = &req->svc_next; 6879 } 6880 } 6881 6882 mutex_enter(&stmf_state.stmf_lock); 6883 if (stmf_state.stmf_svc_active == NULL) { 6884 /* Do timeouts */ 6885 if (stmf_state.stmf_nlus && 6886 ((!timing_next) || (ddi_get_lbolt() >= timing_next))) { 6887 if (!stmf_state.stmf_svc_ilu_timing) { 6888 /* we are starting a new round */ 6889 stmf_state.stmf_svc_ilu_timing = 6890 stmf_state.stmf_ilulist; 6891 timing_start = ddi_get_lbolt(); 6892 } 6893 stmf_check_ilu_timing(); 6894 if (!stmf_state.stmf_svc_ilu_timing) { 6895 /* we finished a complete round */ 6896 timing_next = 6897 timing_start + drv_usectohz(5*1000*1000); 6898 } else { 6899 /* we still have some ilu items to check */ 6900 timing_next = 6901 ddi_get_lbolt() + drv_usectohz(1*1000*1000); 6902 } 6903 if (stmf_state.stmf_svc_active) 6904 goto stmf_svc_loop; 6905 } 6906 /* Check if there are free tasks to clear */ 6907 if (stmf_state.stmf_nlus && 6908 ((!drain_next) || (ddi_get_lbolt() >= drain_next))) { 6909 if (!stmf_state.stmf_svc_ilu_draining) { 6910 /* we are starting a new round */ 6911 stmf_state.stmf_svc_ilu_draining = 6912 stmf_state.stmf_ilulist; 6913 drain_start = ddi_get_lbolt(); 6914 } 6915 stmf_check_freetask(); 6916 if (!stmf_state.stmf_svc_ilu_draining) { 6917 /* we finished a complete round */ 6918 drain_next = 6919 drain_start + drv_usectohz(10*1000*1000); 6920 } else { 6921 /* we still have some ilu items to check */ 6922 drain_next = 6923 ddi_get_lbolt() + drv_usectohz(1*1000*1000); 6924 } 6925 if (stmf_state.stmf_svc_active) 6926 goto stmf_svc_loop; 6927 } 6928 6929 /* Check if we need to run worker_mgmt */ 6930 if (ddi_get_lbolt() > worker_delay) { 6931 stmf_worker_mgmt(); 6932 worker_delay = ddi_get_lbolt() + 6933 stmf_worker_mgmt_delay; 6934 } 6935 6936 /* Check if any active session got its 1st LUN */ 6937 if (stmf_state.stmf_process_initial_luns) { 6938 int stmf_level = 0; 6939 int port_level; 6940 for (ilport = stmf_state.stmf_ilportlist; ilport; 6941 ilport = next_ilport) { 6942 int ilport_lock_held; 6943 next_ilport = ilport->ilport_next; 6944 if ((ilport->ilport_flags & 6945 ILPORT_SS_GOT_INITIAL_LUNS) == 0) { 6946 continue; 6947 } 6948 port_level = 0; 6949 rw_enter(&ilport->ilport_lock, RW_READER); 6950 ilport_lock_held = 1; 6951 for (iss = ilport->ilport_ss_list; iss; 6952 iss = iss->iss_next) { 6953 if ((iss->iss_flags & 6954 ISS_GOT_INITIAL_LUNS) == 0) { 6955 continue; 6956 } 6957 port_level++; 6958 stmf_level++; 6959 atomic_and_32(&iss->iss_flags, 6960 ~ISS_GOT_INITIAL_LUNS); 6961 atomic_or_32(&iss->iss_flags, 6962 ISS_EVENT_ACTIVE); 6963 rw_exit(&ilport->ilport_lock); 6964 ilport_lock_held = 0; 6965 mutex_exit(&stmf_state.stmf_lock); 6966 stmf_generate_lport_event(ilport, 6967 LPORT_EVENT_INITIAL_LUN_MAPPED, 6968 iss->iss_ss, 0); 6969 atomic_and_32(&iss->iss_flags, 6970 ~ISS_EVENT_ACTIVE); 6971 mutex_enter(&stmf_state.stmf_lock); 6972 /* 6973 * scan all the ilports again as the 6974 * ilport list might have changed. 6975 */ 6976 next_ilport = 6977 stmf_state.stmf_ilportlist; 6978 break; 6979 } 6980 if (port_level == 0) { 6981 atomic_and_32(&ilport->ilport_flags, 6982 ~ILPORT_SS_GOT_INITIAL_LUNS); 6983 } 6984 /* drop the lock if we are holding it. */ 6985 if (ilport_lock_held == 1) 6986 rw_exit(&ilport->ilport_lock); 6987 6988 /* Max 4 session at a time */ 6989 if (stmf_level >= 4) { 6990 break; 6991 } 6992 } 6993 if (stmf_level == 0) { 6994 stmf_state.stmf_process_initial_luns = 0; 6995 } 6996 } 6997 6998 stmf_state.stmf_svc_flags &= ~STMF_SVC_ACTIVE; 6999 (void) cv_reltimedwait(&stmf_state.stmf_cv, 7000 &stmf_state.stmf_lock, td, TR_CLOCK_TICK); 7001 stmf_state.stmf_svc_flags |= STMF_SVC_ACTIVE; 7002 } 7003 goto stmf_svc_loop; 7004 } 7005 7006 void 7007 stmf_svc_queue(int cmd, void *obj, stmf_state_change_info_t *info) 7008 { 7009 stmf_svc_req_t *req; 7010 int s; 7011 7012 ASSERT(!mutex_owned(&stmf_state.stmf_lock)); 7013 s = sizeof (stmf_svc_req_t); 7014 if (info->st_additional_info) { 7015 s += strlen(info->st_additional_info) + 1; 7016 } 7017 req = kmem_zalloc(s, KM_SLEEP); 7018 7019 req->svc_cmd = cmd; 7020 req->svc_obj = obj; 7021 req->svc_info.st_rflags = info->st_rflags; 7022 if (info->st_additional_info) { 7023 req->svc_info.st_additional_info = (char *)(GET_BYTE_OFFSET(req, 7024 sizeof (stmf_svc_req_t))); 7025 (void) strcpy(req->svc_info.st_additional_info, 7026 info->st_additional_info); 7027 } 7028 req->svc_req_alloc_size = s; 7029 7030 mutex_enter(&stmf_state.stmf_lock); 7031 req->svc_next = stmf_state.stmf_svc_active; 7032 stmf_state.stmf_svc_active = req; 7033 if ((stmf_state.stmf_svc_flags & STMF_SVC_ACTIVE) == 0) { 7034 cv_signal(&stmf_state.stmf_cv); 7035 } 7036 mutex_exit(&stmf_state.stmf_lock); 7037 } 7038 7039 void 7040 stmf_trace(caddr_t ident, const char *fmt, ...) 7041 { 7042 va_list args; 7043 char tbuf[160]; 7044 int len; 7045 7046 if (!stmf_trace_on) 7047 return; 7048 len = snprintf(tbuf, 158, "%s:%07lu: ", ident ? ident : "", 7049 ddi_get_lbolt()); 7050 va_start(args, fmt); 7051 len += vsnprintf(tbuf + len, 158 - len, fmt, args); 7052 va_end(args); 7053 7054 if (len > 158) { 7055 len = 158; 7056 } 7057 tbuf[len++] = '\n'; 7058 tbuf[len] = 0; 7059 7060 mutex_enter(&trace_buf_lock); 7061 bcopy(tbuf, &stmf_trace_buf[trace_buf_curndx], len+1); 7062 trace_buf_curndx += len; 7063 if (trace_buf_curndx > (trace_buf_size - 320)) 7064 trace_buf_curndx = 0; 7065 mutex_exit(&trace_buf_lock); 7066 } 7067 7068 void 7069 stmf_trace_clear() 7070 { 7071 if (!stmf_trace_on) 7072 return; 7073 mutex_enter(&trace_buf_lock); 7074 trace_buf_curndx = 0; 7075 if (trace_buf_size > 0) 7076 stmf_trace_buf[0] = 0; 7077 mutex_exit(&trace_buf_lock); 7078 } 7079 7080 static void 7081 stmf_abort_task_offline(scsi_task_t *task, int offline_lu, char *info) 7082 { 7083 stmf_state_change_info_t change_info; 7084 void *ctl_private; 7085 uint32_t ctl_cmd; 7086 int msg = 0; 7087 7088 stmf_trace("FROM STMF", "abort_task_offline called for %s: %s", 7089 offline_lu ? "LU" : "LPORT", info ? info : "no additional info"); 7090 change_info.st_additional_info = info; 7091 if (offline_lu) { 7092 change_info.st_rflags = STMF_RFLAG_RESET | 7093 STMF_RFLAG_LU_ABORT; 7094 ctl_private = task->task_lu; 7095 if (((stmf_i_lu_t *) 7096 task->task_lu->lu_stmf_private)->ilu_state == 7097 STMF_STATE_ONLINE) { 7098 msg = 1; 7099 } 7100 ctl_cmd = STMF_CMD_LU_OFFLINE; 7101 } else { 7102 change_info.st_rflags = STMF_RFLAG_RESET | 7103 STMF_RFLAG_LPORT_ABORT; 7104 ctl_private = task->task_lport; 7105 if (((stmf_i_local_port_t *) 7106 task->task_lport->lport_stmf_private)->ilport_state == 7107 STMF_STATE_ONLINE) { 7108 msg = 1; 7109 } 7110 ctl_cmd = STMF_CMD_LPORT_OFFLINE; 7111 } 7112 7113 if (msg) { 7114 stmf_trace(0, "Calling stmf_ctl to offline %s : %s", 7115 offline_lu ? "LU" : "LPORT", info ? info : 7116 "<no additional info>"); 7117 } 7118 (void) stmf_ctl(ctl_cmd, ctl_private, &change_info); 7119 } 7120