1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 * 25 * Fibre Channel SCSI ULP Mapping driver 26 */ 27 28 #include <sys/scsi/scsi.h> 29 #include <sys/types.h> 30 #include <sys/varargs.h> 31 #include <sys/devctl.h> 32 #include <sys/thread.h> 33 #include <sys/thread.h> 34 #include <sys/open.h> 35 #include <sys/file.h> 36 #include <sys/sunndi.h> 37 #include <sys/console.h> 38 #include <sys/proc.h> 39 #include <sys/time.h> 40 #include <sys/utsname.h> 41 #include <sys/scsi/impl/scsi_reset_notify.h> 42 #include <sys/ndi_impldefs.h> 43 #include <sys/byteorder.h> 44 #include <sys/fs/dv_node.h> 45 #include <sys/ctype.h> 46 #include <sys/sunmdi.h> 47 48 #include <sys/fibre-channel/fc.h> 49 #include <sys/fibre-channel/impl/fc_ulpif.h> 50 #include <sys/fibre-channel/ulp/fcpvar.h> 51 52 /* 53 * Discovery Process 54 * ================= 55 * 56 * The discovery process is a major function of FCP. In order to help 57 * understand that function a flow diagram is given here. This diagram 58 * doesn't claim to cover all the cases and the events that can occur during 59 * the discovery process nor the subtleties of the code. The code paths shown 60 * are simplified. Its purpose is to help the reader (and potentially bug 61 * fixer) have an overall view of the logic of the code. For that reason the 62 * diagram covers the simple case of the line coming up cleanly or of a new 63 * port attaching to FCP the link being up. The reader must keep in mind 64 * that: 65 * 66 * - There are special cases where bringing devices online and offline 67 * is driven by Ioctl. 68 * 69 * - The behavior of the discovery process can be modified through the 70 * .conf file. 71 * 72 * - The line can go down and come back up at any time during the 73 * discovery process which explains some of the complexity of the code. 74 * 75 * ............................................................................ 76 * 77 * STEP 1: The line comes up or a new Fibre Channel port attaches to FCP. 78 * 79 * 80 * +-------------------------+ 81 * fp/fctl module --->| fcp_port_attach | 82 * +-------------------------+ 83 * | | 84 * | | 85 * | v 86 * | +-------------------------+ 87 * | | fcp_handle_port_attach | 88 * | +-------------------------+ 89 * | | 90 * | | 91 * +--------------------+ | 92 * | | 93 * v v 94 * +-------------------------+ 95 * | fcp_statec_callback | 96 * +-------------------------+ 97 * | 98 * | 99 * v 100 * +-------------------------+ 101 * | fcp_handle_devices | 102 * +-------------------------+ 103 * | 104 * | 105 * v 106 * +-------------------------+ 107 * | fcp_handle_mapflags | 108 * +-------------------------+ 109 * | 110 * | 111 * v 112 * +-------------------------+ 113 * | fcp_send_els | 114 * | | 115 * | PLOGI or PRLI To all the| 116 * | reachable devices. | 117 * +-------------------------+ 118 * 119 * 120 * ............................................................................ 121 * 122 * STEP 2: The callback functions of the PLOGI and/or PRLI requests sent during 123 * STEP 1 are called (it is actually the same function). 124 * 125 * 126 * +-------------------------+ 127 * | fcp_icmd_callback | 128 * fp/fctl module --->| | 129 * | callback for PLOGI and | 130 * | PRLI. | 131 * +-------------------------+ 132 * | 133 * | 134 * Received PLOGI Accept /-\ Received PRLI Accept 135 * _ _ _ _ _ _ / \_ _ _ _ _ _ 136 * | \ / | 137 * | \-/ | 138 * | | 139 * v v 140 * +-------------------------+ +-------------------------+ 141 * | fcp_send_els | | fcp_send_scsi | 142 * | | | | 143 * | PRLI | | REPORT_LUN | 144 * +-------------------------+ +-------------------------+ 145 * 146 * ............................................................................ 147 * 148 * STEP 3: The callback functions of the SCSI commands issued by FCP are called 149 * (It is actually the same function). 150 * 151 * 152 * +-------------------------+ 153 * fp/fctl module ------->| fcp_scsi_callback | 154 * +-------------------------+ 155 * | 156 * | 157 * | 158 * Receive REPORT_LUN reply /-\ Receive INQUIRY PAGE83 reply 159 * _ _ _ _ _ _ _ _ _ _ / \_ _ _ _ _ _ _ _ _ _ _ _ 160 * | \ / | 161 * | \-/ | 162 * | | | 163 * | Receive INQUIRY reply| | 164 * | | | 165 * v v v 166 * +------------------------+ +----------------------+ +----------------------+ 167 * | fcp_handle_reportlun | | fcp_handle_inquiry | | fcp_handle_page83 | 168 * |(Called for each Target)| | (Called for each LUN)| |(Called for each LUN) | 169 * +------------------------+ +----------------------+ +----------------------+ 170 * | | | 171 * | | | 172 * | | | 173 * v v | 174 * +-----------------+ +-----------------+ | 175 * | fcp_send_scsi | | fcp_send_scsi | | 176 * | | | | | 177 * | INQUIRY | | INQUIRY PAGE83 | | 178 * | (To each LUN) | +-----------------+ | 179 * +-----------------+ | 180 * | 181 * v 182 * +------------------------+ 183 * | fcp_call_finish_init | 184 * +------------------------+ 185 * | 186 * v 187 * +-----------------------------+ 188 * | fcp_call_finish_init_held | 189 * +-----------------------------+ 190 * | 191 * | 192 * All LUNs scanned /-\ 193 * _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ __ / \ 194 * | \ / 195 * | \-/ 196 * v | 197 * +------------------+ | 198 * | fcp_finish_tgt | | 199 * +------------------+ | 200 * | Target Not Offline and | 201 * Target Not Offline and | not marked and tgt_node_state | 202 * marked /-\ not FCP_TGT_NODE_ON_DEMAND | 203 * _ _ _ _ _ _ / \_ _ _ _ _ _ _ _ | 204 * | \ / | | 205 * | \-/ | | 206 * v v | 207 * +----------------------------+ +-------------------+ | 208 * | fcp_offline_target | | fcp_create_luns | | 209 * | | +-------------------+ | 210 * | A structure fcp_tgt_elem | | | 211 * | is created and queued in | v | 212 * | the FCP port list | +-------------------+ | 213 * | port_offline_tgts. It | | fcp_pass_to_hp | | 214 * | will be unqueued by the | | | | 215 * | watchdog timer. | | Called for each | | 216 * +----------------------------+ | LUN. Dispatches | | 217 * | | fcp_hp_task | | 218 * | +-------------------+ | 219 * | | | 220 * | | | 221 * | | | 222 * | +---------------->| 223 * | | 224 * +---------------------------------------------->| 225 * | 226 * | 227 * All the targets (devices) have been scanned /-\ 228 * _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ / \ 229 * | \ / 230 * | \-/ 231 * +-------------------------------------+ | 232 * | fcp_finish_init | | 233 * | | | 234 * | Signal broadcasts the condition | | 235 * | variable port_config_cv of the FCP | | 236 * | port. One potential code sequence | | 237 * | waiting on the condition variable | | 238 * | the code sequence handling | | 239 * | BUS_CONFIG_ALL and BUS_CONFIG_DRIVER| | 240 * | The other is in the function | | 241 * | fcp_reconfig_wait which is called | | 242 * | in the transmit path preventing IOs | | 243 * | from going through till the disco- | | 244 * | very process is over. | | 245 * +-------------------------------------+ | 246 * | | 247 * | | 248 * +--------------------------------->| 249 * | 250 * v 251 * Return 252 * 253 * ............................................................................ 254 * 255 * STEP 4: The hot plug task is called (for each fcp_hp_elem). 256 * 257 * 258 * +-------------------------+ 259 * | fcp_hp_task | 260 * +-------------------------+ 261 * | 262 * | 263 * v 264 * +-------------------------+ 265 * | fcp_trigger_lun | 266 * +-------------------------+ 267 * | 268 * | 269 * v 270 * Bring offline /-\ Bring online 271 * _ _ _ _ _ _ _ _ _/ \_ _ _ _ _ _ _ _ _ _ 272 * | \ / | 273 * | \-/ | 274 * v v 275 * +---------------------+ +-----------------------+ 276 * | fcp_offline_child | | fcp_get_cip | 277 * +---------------------+ | | 278 * | Creates a dev_info_t | 279 * | or a mdi_pathinfo_t | 280 * | depending on whether | 281 * | mpxio is on or off. | 282 * +-----------------------+ 283 * | 284 * | 285 * v 286 * +-----------------------+ 287 * | fcp_online_child | 288 * | | 289 * | Set device online | 290 * | using NDI or MDI. | 291 * +-----------------------+ 292 * 293 * ............................................................................ 294 * 295 * STEP 5: The watchdog timer expires. The watch dog timer does much more that 296 * what is described here. We only show the target offline path. 297 * 298 * 299 * +--------------------------+ 300 * | fcp_watch | 301 * +--------------------------+ 302 * | 303 * | 304 * v 305 * +--------------------------+ 306 * | fcp_scan_offline_tgts | 307 * +--------------------------+ 308 * | 309 * | 310 * v 311 * +--------------------------+ 312 * | fcp_offline_target_now | 313 * +--------------------------+ 314 * | 315 * | 316 * v 317 * +--------------------------+ 318 * | fcp_offline_tgt_luns | 319 * +--------------------------+ 320 * | 321 * | 322 * v 323 * +--------------------------+ 324 * | fcp_offline_lun | 325 * +--------------------------+ 326 * | 327 * | 328 * v 329 * +----------------------------------+ 330 * | fcp_offline_lun_now | 331 * | | 332 * | A request (or two if mpxio) is | 333 * | sent to the hot plug task using | 334 * | a fcp_hp_elem structure. | 335 * +----------------------------------+ 336 */ 337 338 /* 339 * Functions registered with DDI framework 340 */ 341 static int fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd); 342 static int fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd); 343 static int fcp_open(dev_t *devp, int flag, int otype, cred_t *credp); 344 static int fcp_close(dev_t dev, int flag, int otype, cred_t *credp); 345 static int fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode, 346 cred_t *credp, int *rval); 347 348 /* 349 * Functions registered with FC Transport framework 350 */ 351 static int fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo, 352 fc_attach_cmd_t cmd, uint32_t s_id); 353 static int fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info, 354 fc_detach_cmd_t cmd); 355 static int fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev, 356 int cmd, intptr_t data, int mode, cred_t *credp, int *rval, 357 uint32_t claimed); 358 static int fcp_els_callback(opaque_t ulph, opaque_t port_handle, 359 fc_unsol_buf_t *buf, uint32_t claimed); 360 static int fcp_data_callback(opaque_t ulph, opaque_t port_handle, 361 fc_unsol_buf_t *buf, uint32_t claimed); 362 static void fcp_statec_callback(opaque_t ulph, opaque_t port_handle, 363 uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist, 364 uint32_t dev_cnt, uint32_t port_sid); 365 366 /* 367 * Functions registered with SCSA framework 368 */ 369 static int fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 370 scsi_hba_tran_t *hba_tran, struct scsi_device *sd); 371 static int fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 372 scsi_hba_tran_t *hba_tran, struct scsi_device *sd); 373 static void fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip, 374 scsi_hba_tran_t *hba_tran, struct scsi_device *sd); 375 static int fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt); 376 static int fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt); 377 static int fcp_scsi_reset(struct scsi_address *ap, int level); 378 static int fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom); 379 static int fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value, 380 int whom); 381 static void fcp_pkt_teardown(struct scsi_pkt *pkt); 382 static int fcp_scsi_reset_notify(struct scsi_address *ap, int flag, 383 void (*callback)(caddr_t), caddr_t arg); 384 static int fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip, 385 char *name, ddi_eventcookie_t *event_cookiep); 386 static int fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip, 387 ddi_eventcookie_t eventid, void (*callback)(), void *arg, 388 ddi_callback_id_t *cb_id); 389 static int fcp_scsi_bus_remove_eventcall(dev_info_t *devi, 390 ddi_callback_id_t cb_id); 391 static int fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip, 392 ddi_eventcookie_t eventid, void *impldata); 393 static int fcp_scsi_bus_config(dev_info_t *parent, uint_t flag, 394 ddi_bus_config_op_t op, void *arg, dev_info_t **childp); 395 static int fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag, 396 ddi_bus_config_op_t op, void *arg); 397 398 /* 399 * Internal functions 400 */ 401 static int fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data, 402 int mode, int *rval); 403 404 static int fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi, 405 int mode, int *rval); 406 static int fcp_copyin_scsi_cmd(caddr_t base_addr, 407 struct fcp_scsi_cmd *fscsi, int mode); 408 static int fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi, 409 caddr_t base_addr, int mode); 410 static int fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi); 411 412 static struct fcp_tgt *fcp_port_create_tgt(struct fcp_port *pptr, 413 la_wwn_t *pwwn, int *ret_val, int *fc_status, int *fc_pkt_state, 414 int *fc_pkt_reason, int *fc_pkt_action); 415 static int fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status, 416 int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action); 417 static int fcp_tgt_send_prli(struct fcp_tgt *ptgt, int *fc_status, 418 int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action); 419 static void fcp_ipkt_sema_init(struct fcp_ipkt *icmd); 420 static int fcp_ipkt_sema_wait(struct fcp_ipkt *icmd); 421 static void fcp_ipkt_sema_callback(struct fc_packet *fpkt); 422 static void fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd); 423 424 static void fcp_handle_devices(struct fcp_port *pptr, 425 fc_portmap_t devlist[], uint32_t dev_cnt, int link_cnt, 426 fcp_map_tag_t *map_tag, int cause); 427 static int fcp_handle_mapflags(struct fcp_port *pptr, 428 struct fcp_tgt *ptgt, fc_portmap_t *map_entry, int link_cnt, 429 int tgt_cnt, int cause); 430 static int fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt, 431 struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause); 432 static void fcp_update_state(struct fcp_port *pptr, uint32_t state, 433 int cause); 434 static void fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag, 435 uint32_t state); 436 static struct fcp_port *fcp_get_port(opaque_t port_handle); 437 static void fcp_unsol_callback(fc_packet_t *fpkt); 438 static void fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf, 439 uchar_t r_ctl, uchar_t type); 440 static int fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf); 441 static struct fcp_ipkt *fcp_icmd_alloc(struct fcp_port *pptr, 442 struct fcp_tgt *ptgt, int cmd_len, int resp_len, int data_len, 443 int nodma, int lcount, int tcount, int cause, uint32_t rscn_count); 444 static void fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd); 445 static int fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd, 446 int nodma, int flags); 447 static void fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd); 448 static struct fcp_tgt *fcp_lookup_target(struct fcp_port *pptr, 449 uchar_t *wwn); 450 static struct fcp_tgt *fcp_get_target_by_did(struct fcp_port *pptr, 451 uint32_t d_id); 452 static void fcp_icmd_callback(fc_packet_t *fpkt); 453 static int fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode, 454 int len, int lcount, int tcount, int cause, uint32_t rscn_count); 455 static int fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt); 456 static void fcp_scsi_callback(fc_packet_t *fpkt); 457 static void fcp_retry_scsi_cmd(fc_packet_t *fpkt); 458 static void fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd); 459 static void fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd); 460 static struct fcp_lun *fcp_get_lun(struct fcp_tgt *ptgt, 461 uint16_t lun_num); 462 static int fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt, 463 int link_cnt, int tgt_cnt, int cause); 464 static void fcp_finish_init(struct fcp_port *pptr); 465 static void fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt, 466 int tgt_cnt, int cause); 467 static int fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip, 468 int online, int link_cnt, int tgt_cnt, int flags); 469 static int fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt, 470 int link_cnt, int tgt_cnt, int nowait, int flags); 471 static void fcp_offline_target_now(struct fcp_port *pptr, 472 struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int flags); 473 static void fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt, 474 int tgt_cnt, int flags); 475 static void fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt, 476 int nowait, int flags); 477 static void fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt, 478 int tgt_cnt); 479 static void fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt, 480 int tgt_cnt, int flags); 481 static void fcp_scan_offline_luns(struct fcp_port *pptr); 482 static void fcp_scan_offline_tgts(struct fcp_port *pptr); 483 static void fcp_update_offline_flags(struct fcp_lun *plun); 484 static struct fcp_pkt *fcp_scan_commands(struct fcp_lun *plun); 485 static void fcp_abort_commands(struct fcp_pkt *head, struct 486 fcp_port *pptr); 487 static void fcp_cmd_callback(fc_packet_t *fpkt); 488 static void fcp_complete_pkt(fc_packet_t *fpkt); 489 static int fcp_validate_fcp_response(struct fcp_rsp *rsp, 490 struct fcp_port *pptr); 491 static int fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt, 492 fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause); 493 static struct fcp_lun *fcp_alloc_lun(struct fcp_tgt *ptgt); 494 static void fcp_dealloc_lun(struct fcp_lun *plun); 495 static struct fcp_tgt *fcp_alloc_tgt(struct fcp_port *pptr, 496 fc_portmap_t *map_entry, int link_cnt); 497 static void fcp_dealloc_tgt(struct fcp_tgt *ptgt); 498 static void fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt); 499 static int fcp_transport(opaque_t port_handle, fc_packet_t *fpkt, 500 int internal); 501 static void fcp_log(int level, dev_info_t *dip, const char *fmt, ...); 502 static int fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo, 503 uint32_t s_id, int instance); 504 static int fcp_handle_port_detach(struct fcp_port *pptr, int flag, 505 int instance); 506 static void fcp_cleanup_port(struct fcp_port *pptr, int instance); 507 static int fcp_kmem_cache_constructor(struct scsi_pkt *, scsi_hba_tran_t *, 508 int); 509 static void fcp_kmem_cache_destructor(struct scsi_pkt *, scsi_hba_tran_t *); 510 static int fcp_pkt_setup(struct scsi_pkt *, int (*)(), caddr_t); 511 static int fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt, 512 int flags); 513 static void fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt); 514 static int fcp_reset_target(struct scsi_address *ap, int level); 515 static int fcp_commoncap(struct scsi_address *ap, char *cap, 516 int val, int tgtonly, int doset); 517 static int fcp_scsi_get_name(struct scsi_device *sd, char *name, int len); 518 static int fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len); 519 static int fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap, 520 int sleep); 521 static int fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo, 522 uint32_t s_id, fc_attach_cmd_t cmd, int instance); 523 static void fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo); 524 static void fcp_process_elem(struct fcp_hp_elem *elem, int result); 525 static child_info_t *fcp_get_cip(struct fcp_lun *plun, child_info_t *cip, 526 int lcount, int tcount); 527 static int fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip); 528 static int fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip); 529 static dev_info_t *fcp_create_dip(struct fcp_lun *plun, int link_cnt, 530 int tgt_cnt); 531 static dev_info_t *fcp_find_existing_dip(struct fcp_lun *plun, 532 dev_info_t *pdip, caddr_t name); 533 static int fcp_online_child(struct fcp_lun *plun, child_info_t *cip, 534 int lcount, int tcount, int flags, int *circ); 535 static int fcp_offline_child(struct fcp_lun *plun, child_info_t *cip, 536 int lcount, int tcount, int flags, int *circ); 537 static void fcp_remove_child(struct fcp_lun *plun); 538 static void fcp_watch(void *arg); 539 static void fcp_check_reset_delay(struct fcp_port *pptr); 540 static void fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt, 541 struct fcp_lun *rlun, int tgt_cnt); 542 struct fcp_port *fcp_soft_state_unlink(struct fcp_port *pptr); 543 static struct fcp_lun *fcp_lookup_lun(struct fcp_port *pptr, 544 uchar_t *wwn, uint16_t lun); 545 static void fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd, 546 struct fcp_lun *plun); 547 static void fcp_post_callback(struct fcp_pkt *cmd); 548 static int fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd); 549 static struct fcp_port *fcp_dip2port(dev_info_t *dip); 550 struct fcp_lun *fcp_get_lun_from_cip(struct fcp_port *pptr, 551 child_info_t *cip); 552 static int fcp_pass_to_hp_and_wait(struct fcp_port *pptr, 553 struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt, 554 int tgt_cnt, int flags); 555 static struct fcp_hp_elem *fcp_pass_to_hp(struct fcp_port *pptr, 556 struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt, 557 int tgt_cnt, int flags, int wait); 558 static void fcp_retransport_cmd(struct fcp_port *pptr, 559 struct fcp_pkt *cmd); 560 static void fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason, 561 uint_t statistics); 562 static void fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd); 563 static void fcp_update_targets(struct fcp_port *pptr, 564 fc_portmap_t *dev_list, uint32_t count, uint32_t state, int cause); 565 static int fcp_call_finish_init(struct fcp_port *pptr, 566 struct fcp_tgt *ptgt, int lcount, int tcount, int cause); 567 static int fcp_call_finish_init_held(struct fcp_port *pptr, 568 struct fcp_tgt *ptgt, int lcount, int tcount, int cause); 569 static void fcp_reconfigure_luns(void * tgt_handle); 570 static void fcp_free_targets(struct fcp_port *pptr); 571 static void fcp_free_target(struct fcp_tgt *ptgt); 572 static int fcp_is_retryable(struct fcp_ipkt *icmd); 573 static int fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn); 574 static void fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int); 575 static void fcp_wwn_to_ascii(uchar_t bytes[], char *string); 576 static void fcp_print_error(fc_packet_t *fpkt); 577 static int fcp_handle_ipkt_errors(struct fcp_port *pptr, 578 struct fcp_tgt *ptgt, struct fcp_ipkt *icmd, int rval, caddr_t op); 579 static int fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt); 580 static fc_portmap_t *fcp_construct_map(struct fcp_port *pptr, 581 uint32_t *dev_cnt); 582 static void fcp_offline_all(struct fcp_port *pptr, int lcount, int cause); 583 static int fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval); 584 static int fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *, int, int *, 585 struct fcp_ioctl *, struct fcp_port **); 586 static char *fcp_get_lun_path(struct fcp_lun *plun); 587 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode, 588 int *rval); 589 static int fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id); 590 static void fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id); 591 static char *fcp_get_lun_path(struct fcp_lun *plun); 592 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode, 593 int *rval); 594 static void fcp_reconfig_wait(struct fcp_port *pptr); 595 596 /* 597 * New functions added for mpxio support 598 */ 599 static int fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 600 scsi_hba_tran_t *hba_tran, struct scsi_device *sd); 601 static mdi_pathinfo_t *fcp_create_pip(struct fcp_lun *plun, int lcount, 602 int tcount); 603 static mdi_pathinfo_t *fcp_find_existing_pip(struct fcp_lun *plun, 604 dev_info_t *pdip); 605 static int fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip); 606 static void fcp_handle_page83(fc_packet_t *, struct fcp_ipkt *, int); 607 static void fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr); 608 static int fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp); 609 static int fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip, 610 int what); 611 static int fcp_is_reconfig_needed(struct fcp_tgt *ptgt, 612 fc_packet_t *fpkt); 613 static int fcp_symmetric_device_probe(struct fcp_lun *plun); 614 615 /* 616 * New functions added for lun masking support 617 */ 618 static void fcp_read_blacklist(dev_info_t *dip, 619 struct fcp_black_list_entry **pplun_blacklist); 620 static void fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun, 621 struct fcp_black_list_entry **pplun_blacklist); 622 static void fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id, 623 struct fcp_black_list_entry **pplun_blacklist); 624 static int fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id); 625 static void fcp_cleanup_blacklist(struct fcp_black_list_entry **lun_blacklist); 626 627 extern struct mod_ops mod_driverops; 628 /* 629 * This variable is defined in modctl.c and set to '1' after the root driver 630 * and fs are loaded. It serves as an indication that the root filesystem can 631 * be used. 632 */ 633 extern int modrootloaded; 634 /* 635 * This table contains strings associated with the SCSI sense key codes. It 636 * is used by FCP to print a clear explanation of the code returned in the 637 * sense information by a device. 638 */ 639 extern char *sense_keys[]; 640 /* 641 * This device is created by the SCSI pseudo nexus driver (SCSI vHCI). It is 642 * under this device that the paths to a physical device are created when 643 * MPxIO is used. 644 */ 645 extern dev_info_t *scsi_vhci_dip; 646 647 /* 648 * Report lun processing 649 */ 650 #define FCP_LUN_ADDRESSING 0x80 651 #define FCP_PD_ADDRESSING 0x00 652 #define FCP_VOLUME_ADDRESSING 0x40 653 654 #define FCP_SVE_THROTTLE 0x28 /* Vicom */ 655 #define MAX_INT_DMA 0x7fffffff 656 #define FCP_MAX_SENSE_LEN 252 657 #define FCP_MAX_RESPONSE_LEN 0xffffff 658 /* 659 * Property definitions 660 */ 661 #define NODE_WWN_PROP (char *)fcp_node_wwn_prop 662 #define PORT_WWN_PROP (char *)fcp_port_wwn_prop 663 #define TARGET_PROP (char *)fcp_target_prop 664 #define LUN_PROP (char *)fcp_lun_prop 665 #define SAM_LUN_PROP (char *)fcp_sam_lun_prop 666 #define CONF_WWN_PROP (char *)fcp_conf_wwn_prop 667 #define OBP_BOOT_WWN (char *)fcp_obp_boot_wwn 668 #define MANUAL_CFG_ONLY (char *)fcp_manual_config_only 669 #define INIT_PORT_PROP (char *)fcp_init_port_prop 670 #define TGT_PORT_PROP (char *)fcp_tgt_port_prop 671 #define LUN_BLACKLIST_PROP (char *)fcp_lun_blacklist_prop 672 /* 673 * Short hand macros. 674 */ 675 #define LUN_PORT (plun->lun_tgt->tgt_port) 676 #define LUN_TGT (plun->lun_tgt) 677 678 /* 679 * Driver private macros 680 */ 681 #define FCP_ATOB(x) (((x) >= '0' && (x) <= '9') ? ((x) - '0') : \ 682 ((x) >= 'a' && (x) <= 'f') ? \ 683 ((x) - 'a' + 10) : ((x) - 'A' + 10)) 684 685 #define FCP_MAX(a, b) ((a) > (b) ? (a) : (b)) 686 687 #define FCP_N_NDI_EVENTS \ 688 (sizeof (fcp_ndi_event_defs) / sizeof (ndi_event_definition_t)) 689 690 #define FCP_LINK_STATE_CHANGED(p, c) \ 691 ((p)->port_link_cnt != (c)->ipkt_link_cnt) 692 693 #define FCP_TGT_STATE_CHANGED(t, c) \ 694 ((t)->tgt_change_cnt != (c)->ipkt_change_cnt) 695 696 #define FCP_STATE_CHANGED(p, t, c) \ 697 (FCP_TGT_STATE_CHANGED(t, c)) 698 699 #define FCP_MUST_RETRY(fpkt) \ 700 ((fpkt)->pkt_state == FC_PKT_LOCAL_BSY || \ 701 (fpkt)->pkt_state == FC_PKT_LOCAL_RJT || \ 702 (fpkt)->pkt_state == FC_PKT_TRAN_BSY || \ 703 (fpkt)->pkt_state == FC_PKT_ELS_IN_PROGRESS || \ 704 (fpkt)->pkt_state == FC_PKT_NPORT_BSY || \ 705 (fpkt)->pkt_state == FC_PKT_FABRIC_BSY || \ 706 (fpkt)->pkt_state == FC_PKT_PORT_OFFLINE || \ 707 (fpkt)->pkt_reason == FC_REASON_OFFLINE) 708 709 #define FCP_SENSE_REPORTLUN_CHANGED(es) \ 710 ((es)->es_key == KEY_UNIT_ATTENTION && \ 711 (es)->es_add_code == 0x3f && \ 712 (es)->es_qual_code == 0x0e) 713 714 #define FCP_SENSE_NO_LUN(es) \ 715 ((es)->es_key == KEY_ILLEGAL_REQUEST && \ 716 (es)->es_add_code == 0x25 && \ 717 (es)->es_qual_code == 0x0) 718 719 #define FCP_VERSION "1.186" 720 #define FCP_NAME_VERSION "SunFC FCP v" FCP_VERSION 721 722 #define FCP_NUM_ELEMENTS(array) \ 723 (sizeof (array) / sizeof ((array)[0])) 724 725 /* 726 * Debugging, Error reporting, and tracing 727 */ 728 #define FCP_LOG_SIZE 1024 * 1024 729 730 #define FCP_LEVEL_1 0x00001 /* attach/detach PM CPR */ 731 #define FCP_LEVEL_2 0x00002 /* failures/Invalid data */ 732 #define FCP_LEVEL_3 0x00004 /* state change, discovery */ 733 #define FCP_LEVEL_4 0x00008 /* ULP messages */ 734 #define FCP_LEVEL_5 0x00010 /* ELS/SCSI cmds */ 735 #define FCP_LEVEL_6 0x00020 /* Transport failures */ 736 #define FCP_LEVEL_7 0x00040 737 #define FCP_LEVEL_8 0x00080 /* I/O tracing */ 738 #define FCP_LEVEL_9 0x00100 /* I/O tracing */ 739 740 741 742 /* 743 * Log contents to system messages file 744 */ 745 #define FCP_MSG_LEVEL_1 (FCP_LEVEL_1 | FC_TRACE_LOG_MSG) 746 #define FCP_MSG_LEVEL_2 (FCP_LEVEL_2 | FC_TRACE_LOG_MSG) 747 #define FCP_MSG_LEVEL_3 (FCP_LEVEL_3 | FC_TRACE_LOG_MSG) 748 #define FCP_MSG_LEVEL_4 (FCP_LEVEL_4 | FC_TRACE_LOG_MSG) 749 #define FCP_MSG_LEVEL_5 (FCP_LEVEL_5 | FC_TRACE_LOG_MSG) 750 #define FCP_MSG_LEVEL_6 (FCP_LEVEL_6 | FC_TRACE_LOG_MSG) 751 #define FCP_MSG_LEVEL_7 (FCP_LEVEL_7 | FC_TRACE_LOG_MSG) 752 #define FCP_MSG_LEVEL_8 (FCP_LEVEL_8 | FC_TRACE_LOG_MSG) 753 #define FCP_MSG_LEVEL_9 (FCP_LEVEL_9 | FC_TRACE_LOG_MSG) 754 755 756 /* 757 * Log contents to trace buffer 758 */ 759 #define FCP_BUF_LEVEL_1 (FCP_LEVEL_1 | FC_TRACE_LOG_BUF) 760 #define FCP_BUF_LEVEL_2 (FCP_LEVEL_2 | FC_TRACE_LOG_BUF) 761 #define FCP_BUF_LEVEL_3 (FCP_LEVEL_3 | FC_TRACE_LOG_BUF) 762 #define FCP_BUF_LEVEL_4 (FCP_LEVEL_4 | FC_TRACE_LOG_BUF) 763 #define FCP_BUF_LEVEL_5 (FCP_LEVEL_5 | FC_TRACE_LOG_BUF) 764 #define FCP_BUF_LEVEL_6 (FCP_LEVEL_6 | FC_TRACE_LOG_BUF) 765 #define FCP_BUF_LEVEL_7 (FCP_LEVEL_7 | FC_TRACE_LOG_BUF) 766 #define FCP_BUF_LEVEL_8 (FCP_LEVEL_8 | FC_TRACE_LOG_BUF) 767 #define FCP_BUF_LEVEL_9 (FCP_LEVEL_9 | FC_TRACE_LOG_BUF) 768 769 770 /* 771 * Log contents to both system messages file and trace buffer 772 */ 773 #define FCP_MSG_BUF_LEVEL_1 (FCP_LEVEL_1 | FC_TRACE_LOG_BUF | \ 774 FC_TRACE_LOG_MSG) 775 #define FCP_MSG_BUF_LEVEL_2 (FCP_LEVEL_2 | FC_TRACE_LOG_BUF | \ 776 FC_TRACE_LOG_MSG) 777 #define FCP_MSG_BUF_LEVEL_3 (FCP_LEVEL_3 | FC_TRACE_LOG_BUF | \ 778 FC_TRACE_LOG_MSG) 779 #define FCP_MSG_BUF_LEVEL_4 (FCP_LEVEL_4 | FC_TRACE_LOG_BUF | \ 780 FC_TRACE_LOG_MSG) 781 #define FCP_MSG_BUF_LEVEL_5 (FCP_LEVEL_5 | FC_TRACE_LOG_BUF | \ 782 FC_TRACE_LOG_MSG) 783 #define FCP_MSG_BUF_LEVEL_6 (FCP_LEVEL_6 | FC_TRACE_LOG_BUF | \ 784 FC_TRACE_LOG_MSG) 785 #define FCP_MSG_BUF_LEVEL_7 (FCP_LEVEL_7 | FC_TRACE_LOG_BUF | \ 786 FC_TRACE_LOG_MSG) 787 #define FCP_MSG_BUF_LEVEL_8 (FCP_LEVEL_8 | FC_TRACE_LOG_BUF | \ 788 FC_TRACE_LOG_MSG) 789 #define FCP_MSG_BUF_LEVEL_9 (FCP_LEVEL_9 | FC_TRACE_LOG_BUF | \ 790 FC_TRACE_LOG_MSG) 791 #ifdef DEBUG 792 #define FCP_DTRACE fc_trace_debug 793 #else 794 #define FCP_DTRACE 795 #endif 796 797 #define FCP_TRACE fc_trace_debug 798 799 static struct cb_ops fcp_cb_ops = { 800 fcp_open, /* open */ 801 fcp_close, /* close */ 802 nodev, /* strategy */ 803 nodev, /* print */ 804 nodev, /* dump */ 805 nodev, /* read */ 806 nodev, /* write */ 807 fcp_ioctl, /* ioctl */ 808 nodev, /* devmap */ 809 nodev, /* mmap */ 810 nodev, /* segmap */ 811 nochpoll, /* chpoll */ 812 ddi_prop_op, /* cb_prop_op */ 813 0, /* streamtab */ 814 D_NEW | D_MP | D_HOTPLUG, /* cb_flag */ 815 CB_REV, /* rev */ 816 nodev, /* aread */ 817 nodev /* awrite */ 818 }; 819 820 821 static struct dev_ops fcp_ops = { 822 DEVO_REV, 823 0, 824 ddi_getinfo_1to1, 825 nulldev, /* identify */ 826 nulldev, /* probe */ 827 fcp_attach, /* attach and detach are mandatory */ 828 fcp_detach, 829 nodev, /* reset */ 830 &fcp_cb_ops, /* cb_ops */ 831 NULL, /* bus_ops */ 832 NULL, /* power */ 833 }; 834 835 836 char *fcp_version = FCP_NAME_VERSION; 837 838 static struct modldrv modldrv = { 839 &mod_driverops, 840 FCP_NAME_VERSION, 841 &fcp_ops 842 }; 843 844 845 static struct modlinkage modlinkage = { 846 MODREV_1, 847 &modldrv, 848 NULL 849 }; 850 851 852 static fc_ulp_modinfo_t fcp_modinfo = { 853 &fcp_modinfo, /* ulp_handle */ 854 FCTL_ULP_MODREV_4, /* ulp_rev */ 855 FC4_SCSI_FCP, /* ulp_type */ 856 "fcp", /* ulp_name */ 857 FCP_STATEC_MASK, /* ulp_statec_mask */ 858 fcp_port_attach, /* ulp_port_attach */ 859 fcp_port_detach, /* ulp_port_detach */ 860 fcp_port_ioctl, /* ulp_port_ioctl */ 861 fcp_els_callback, /* ulp_els_callback */ 862 fcp_data_callback, /* ulp_data_callback */ 863 fcp_statec_callback /* ulp_statec_callback */ 864 }; 865 866 #ifdef DEBUG 867 #define FCP_TRACE_DEFAULT (FC_TRACE_LOG_MASK | FCP_LEVEL_1 | \ 868 FCP_LEVEL_2 | FCP_LEVEL_3 | \ 869 FCP_LEVEL_4 | FCP_LEVEL_5 | \ 870 FCP_LEVEL_6 | FCP_LEVEL_7) 871 #else 872 #define FCP_TRACE_DEFAULT (FC_TRACE_LOG_MASK | FCP_LEVEL_1 | \ 873 FCP_LEVEL_2 | FCP_LEVEL_3 | \ 874 FCP_LEVEL_4 | FCP_LEVEL_5 | \ 875 FCP_LEVEL_6 | FCP_LEVEL_7) 876 #endif 877 878 /* FCP global variables */ 879 int fcp_bus_config_debug = 0; 880 static int fcp_log_size = FCP_LOG_SIZE; 881 static int fcp_trace = FCP_TRACE_DEFAULT; 882 static fc_trace_logq_t *fcp_logq = NULL; 883 static struct fcp_black_list_entry *fcp_lun_blacklist = NULL; 884 /* 885 * The auto-configuration is set by default. The only way of disabling it is 886 * through the property MANUAL_CFG_ONLY in the fcp.conf file. 887 */ 888 static int fcp_enable_auto_configuration = 1; 889 static int fcp_max_bus_config_retries = 4; 890 static int fcp_lun_ready_retry = 300; 891 /* 892 * The value assigned to the following variable has changed several times due 893 * to a problem with the data underruns reporting of some firmware(s). The 894 * current value of 50 gives a timeout value of 25 seconds for a max number 895 * of 256 LUNs. 896 */ 897 static int fcp_max_target_retries = 50; 898 /* 899 * Watchdog variables 900 * ------------------ 901 * 902 * fcp_watchdog_init 903 * 904 * Indicates if the watchdog timer is running or not. This is actually 905 * a counter of the number of Fibre Channel ports that attached. When 906 * the first port attaches the watchdog is started. When the last port 907 * detaches the watchdog timer is stopped. 908 * 909 * fcp_watchdog_time 910 * 911 * This is the watchdog clock counter. It is incremented by 912 * fcp_watchdog_time each time the watchdog timer expires. 913 * 914 * fcp_watchdog_timeout 915 * 916 * Increment value of the variable fcp_watchdog_time as well as the 917 * the timeout value of the watchdog timer. The unit is 1 second. It 918 * is strange that this is not a #define but a variable since the code 919 * never changes this value. The reason why it can be said that the 920 * unit is 1 second is because the number of ticks for the watchdog 921 * timer is determined like this: 922 * 923 * fcp_watchdog_tick = fcp_watchdog_timeout * 924 * drv_usectohz(1000000); 925 * 926 * The value 1000000 is hard coded in the code. 927 * 928 * fcp_watchdog_tick 929 * 930 * Watchdog timer value in ticks. 931 */ 932 static int fcp_watchdog_init = 0; 933 static int fcp_watchdog_time = 0; 934 static int fcp_watchdog_timeout = 1; 935 static int fcp_watchdog_tick; 936 937 /* 938 * fcp_offline_delay is a global variable to enable customisation of 939 * the timeout on link offlines or RSCNs. The default value is set 940 * to match FCP_OFFLINE_DELAY (20sec), which is 2*RA_TOV_els as 941 * specified in FCP4 Chapter 11 (see www.t10.org). 942 * 943 * The variable fcp_offline_delay is specified in SECONDS. 944 * 945 * If we made this a static var then the user would not be able to 946 * change it. This variable is set in fcp_attach(). 947 */ 948 unsigned int fcp_offline_delay = FCP_OFFLINE_DELAY; 949 950 static void *fcp_softstate = NULL; /* for soft state */ 951 static uchar_t fcp_oflag = FCP_IDLE; /* open flag */ 952 static kmutex_t fcp_global_mutex; 953 static kmutex_t fcp_ioctl_mutex; 954 static dev_info_t *fcp_global_dip = NULL; 955 static timeout_id_t fcp_watchdog_id; 956 const char *fcp_lun_prop = "lun"; 957 const char *fcp_sam_lun_prop = "sam-lun"; 958 const char *fcp_target_prop = "target"; 959 /* 960 * NOTE: consumers of "node-wwn" property include stmsboot in ON 961 * consolidation. 962 */ 963 const char *fcp_node_wwn_prop = "node-wwn"; 964 const char *fcp_port_wwn_prop = "port-wwn"; 965 const char *fcp_conf_wwn_prop = "fc-port-wwn"; 966 const char *fcp_obp_boot_wwn = "fc-boot-dev-portwwn"; 967 const char *fcp_manual_config_only = "manual_configuration_only"; 968 const char *fcp_init_port_prop = "initiator-port"; 969 const char *fcp_tgt_port_prop = "target-port"; 970 const char *fcp_lun_blacklist_prop = "pwwn-lun-blacklist"; 971 972 static struct fcp_port *fcp_port_head = NULL; 973 static ddi_eventcookie_t fcp_insert_eid; 974 static ddi_eventcookie_t fcp_remove_eid; 975 976 static ndi_event_definition_t fcp_ndi_event_defs[] = { 977 { FCP_EVENT_TAG_INSERT, FCAL_INSERT_EVENT, EPL_KERNEL }, 978 { FCP_EVENT_TAG_REMOVE, FCAL_REMOVE_EVENT, EPL_INTERRUPT } 979 }; 980 981 /* 982 * List of valid commands for the scsi_ioctl call 983 */ 984 static uint8_t scsi_ioctl_list[] = { 985 SCMD_INQUIRY, 986 SCMD_REPORT_LUN, 987 SCMD_READ_CAPACITY 988 }; 989 990 /* 991 * this is used to dummy up a report lun response for cases 992 * where the target doesn't support it 993 */ 994 static uchar_t fcp_dummy_lun[] = { 995 0x00, /* MSB length (length = no of luns * 8) */ 996 0x00, 997 0x00, 998 0x08, /* LSB length */ 999 0x00, /* MSB reserved */ 1000 0x00, 1001 0x00, 1002 0x00, /* LSB reserved */ 1003 FCP_PD_ADDRESSING, 1004 0x00, /* LUN is ZERO at the first level */ 1005 0x00, 1006 0x00, /* second level is zero */ 1007 0x00, 1008 0x00, /* third level is zero */ 1009 0x00, 1010 0x00 /* fourth level is zero */ 1011 }; 1012 1013 static uchar_t fcp_alpa_to_switch[] = { 1014 0x00, 0x7d, 0x7c, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x7a, 0x00, 1015 0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0x78, 0x00, 0x00, 0x00, 1016 0x00, 0x00, 0x00, 0x77, 0x76, 0x00, 0x00, 0x75, 0x00, 0x74, 1017 0x73, 0x72, 0x00, 0x00, 0x00, 0x71, 0x00, 0x70, 0x6f, 0x6e, 1018 0x00, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x00, 0x00, 0x67, 1019 0x66, 0x65, 0x64, 0x63, 0x62, 0x00, 0x00, 0x61, 0x60, 0x00, 1020 0x5f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x5d, 1021 0x5c, 0x5b, 0x00, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x00, 1022 0x00, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x00, 0x00, 0x4e, 1023 0x4d, 0x00, 0x4c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b, 1024 0x00, 0x4a, 0x49, 0x48, 0x00, 0x47, 0x46, 0x45, 0x44, 0x43, 1025 0x42, 0x00, 0x00, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x00, 1026 0x00, 0x3b, 0x3a, 0x00, 0x39, 0x00, 0x00, 0x00, 0x38, 0x37, 1027 0x36, 0x00, 0x35, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 1028 0x00, 0x00, 0x00, 0x33, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 1029 0x00, 0x31, 0x30, 0x00, 0x00, 0x2f, 0x00, 0x2e, 0x2d, 0x2c, 1030 0x00, 0x00, 0x00, 0x2b, 0x00, 0x2a, 0x29, 0x28, 0x00, 0x27, 1031 0x26, 0x25, 0x24, 0x23, 0x22, 0x00, 0x00, 0x21, 0x20, 0x1f, 1032 0x1e, 0x1d, 0x1c, 0x00, 0x00, 0x1b, 0x1a, 0x00, 0x19, 0x00, 1033 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x17, 0x16, 0x15, 1034 0x00, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x00, 0x00, 0x0e, 1035 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x00, 0x00, 0x08, 0x07, 0x00, 1036 0x06, 0x00, 0x00, 0x00, 0x05, 0x04, 0x03, 0x00, 0x02, 0x00, 1037 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1038 }; 1039 1040 static caddr_t pid = "SESS01 "; 1041 1042 #if !defined(lint) 1043 1044 _NOTE(MUTEX_PROTECTS_DATA(fcp_global_mutex, 1045 fcp_port::fcp_next fcp_watchdog_id)) 1046 1047 _NOTE(DATA_READABLE_WITHOUT_LOCK(fcp_watchdog_time)) 1048 1049 _NOTE(SCHEME_PROTECTS_DATA("Unshared", 1050 fcp_insert_eid 1051 fcp_remove_eid 1052 fcp_watchdog_time)) 1053 1054 _NOTE(SCHEME_PROTECTS_DATA("Unshared", 1055 fcp_cb_ops 1056 fcp_ops 1057 callb_cpr)) 1058 1059 #endif /* lint */ 1060 1061 /* 1062 * This table is used to determine whether or not it's safe to copy in 1063 * the target node name for a lun. Since all luns behind the same target 1064 * have the same wwnn, only tagets that do not support multiple luns are 1065 * eligible to be enumerated under mpxio if they aren't page83 compliant. 1066 */ 1067 1068 char *fcp_symmetric_disk_table[] = { 1069 "SEAGATE ST", 1070 "IBM DDYFT", 1071 "SUNW SUNWGS", /* Daktari enclosure */ 1072 "SUN SENA", /* SES device */ 1073 "SUN SESS01" /* VICOM SVE box */ 1074 }; 1075 1076 int fcp_symmetric_disk_table_size = 1077 sizeof (fcp_symmetric_disk_table)/sizeof (char *); 1078 1079 /* 1080 * The _init(9e) return value should be that of mod_install(9f). Under 1081 * some circumstances, a failure may not be related mod_install(9f) and 1082 * one would then require a return value to indicate the failure. Looking 1083 * at mod_install(9f), it is expected to return 0 for success and non-zero 1084 * for failure. mod_install(9f) for device drivers, further goes down the 1085 * calling chain and ends up in ddi_installdrv(), whose return values are 1086 * DDI_SUCCESS and DDI_FAILURE - There are also other functions in the 1087 * calling chain of mod_install(9f) which return values like EINVAL and 1088 * in some even return -1. 1089 * 1090 * To work around the vagaries of the mod_install() calling chain, return 1091 * either 0 or ENODEV depending on the success or failure of mod_install() 1092 */ 1093 int 1094 _init(void) 1095 { 1096 int rval; 1097 1098 /* 1099 * Allocate soft state and prepare to do ddi_soft_state_zalloc() 1100 * before registering with the transport first. 1101 */ 1102 if (ddi_soft_state_init(&fcp_softstate, 1103 sizeof (struct fcp_port), FCP_INIT_ITEMS) != 0) { 1104 return (EINVAL); 1105 } 1106 1107 mutex_init(&fcp_global_mutex, NULL, MUTEX_DRIVER, NULL); 1108 mutex_init(&fcp_ioctl_mutex, NULL, MUTEX_DRIVER, NULL); 1109 1110 if ((rval = fc_ulp_add(&fcp_modinfo)) != FC_SUCCESS) { 1111 cmn_err(CE_WARN, "fcp: fc_ulp_add failed"); 1112 mutex_destroy(&fcp_global_mutex); 1113 mutex_destroy(&fcp_ioctl_mutex); 1114 ddi_soft_state_fini(&fcp_softstate); 1115 return (ENODEV); 1116 } 1117 1118 fcp_logq = fc_trace_alloc_logq(fcp_log_size); 1119 1120 if ((rval = mod_install(&modlinkage)) != 0) { 1121 fc_trace_free_logq(fcp_logq); 1122 (void) fc_ulp_remove(&fcp_modinfo); 1123 mutex_destroy(&fcp_global_mutex); 1124 mutex_destroy(&fcp_ioctl_mutex); 1125 ddi_soft_state_fini(&fcp_softstate); 1126 rval = ENODEV; 1127 } 1128 1129 return (rval); 1130 } 1131 1132 1133 /* 1134 * the system is done with us as a driver, so clean up 1135 */ 1136 int 1137 _fini(void) 1138 { 1139 int rval; 1140 1141 /* 1142 * don't start cleaning up until we know that the module remove 1143 * has worked -- if this works, then we know that each instance 1144 * has successfully been DDI_DETACHed 1145 */ 1146 if ((rval = mod_remove(&modlinkage)) != 0) { 1147 return (rval); 1148 } 1149 1150 (void) fc_ulp_remove(&fcp_modinfo); 1151 1152 ddi_soft_state_fini(&fcp_softstate); 1153 mutex_destroy(&fcp_global_mutex); 1154 mutex_destroy(&fcp_ioctl_mutex); 1155 fc_trace_free_logq(fcp_logq); 1156 1157 return (rval); 1158 } 1159 1160 1161 int 1162 _info(struct modinfo *modinfop) 1163 { 1164 return (mod_info(&modlinkage, modinfop)); 1165 } 1166 1167 1168 /* 1169 * attach the module 1170 */ 1171 static int 1172 fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) 1173 { 1174 int rval = DDI_SUCCESS; 1175 1176 FCP_DTRACE(fcp_logq, "fcp", fcp_trace, 1177 FCP_BUF_LEVEL_8, 0, "fcp module attach: cmd=0x%x", cmd); 1178 1179 if (cmd == DDI_ATTACH) { 1180 /* The FCP pseudo device is created here. */ 1181 mutex_enter(&fcp_global_mutex); 1182 fcp_global_dip = devi; 1183 mutex_exit(&fcp_global_mutex); 1184 1185 if (ddi_create_minor_node(fcp_global_dip, "fcp", S_IFCHR, 1186 0, DDI_PSEUDO, 0) == DDI_SUCCESS) { 1187 ddi_report_dev(fcp_global_dip); 1188 } else { 1189 cmn_err(CE_WARN, "FCP: Cannot create minor node"); 1190 mutex_enter(&fcp_global_mutex); 1191 fcp_global_dip = NULL; 1192 mutex_exit(&fcp_global_mutex); 1193 1194 rval = DDI_FAILURE; 1195 } 1196 /* 1197 * We check the fcp_offline_delay property at this 1198 * point. This variable is global for the driver, 1199 * not specific to an instance. 1200 * 1201 * We do not recommend setting the value to less 1202 * than 10 seconds (RA_TOV_els), or greater than 1203 * 60 seconds. 1204 */ 1205 fcp_offline_delay = ddi_prop_get_int(DDI_DEV_T_ANY, 1206 devi, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 1207 "fcp_offline_delay", FCP_OFFLINE_DELAY); 1208 if ((fcp_offline_delay < 10) || 1209 (fcp_offline_delay > 60)) { 1210 cmn_err(CE_WARN, "Setting fcp_offline_delay " 1211 "to %d second(s). This is outside the " 1212 "recommended range of 10..60 seconds.", 1213 fcp_offline_delay); 1214 } 1215 } 1216 1217 return (rval); 1218 } 1219 1220 1221 /*ARGSUSED*/ 1222 static int 1223 fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd) 1224 { 1225 int res = DDI_SUCCESS; 1226 1227 FCP_DTRACE(fcp_logq, "fcp", fcp_trace, 1228 FCP_BUF_LEVEL_8, 0, "module detach: cmd=0x%x", cmd); 1229 1230 if (cmd == DDI_DETACH) { 1231 /* 1232 * Check if there are active ports/threads. If there 1233 * are any, we will fail, else we will succeed (there 1234 * should not be much to clean up) 1235 */ 1236 mutex_enter(&fcp_global_mutex); 1237 FCP_DTRACE(fcp_logq, "fcp", 1238 fcp_trace, FCP_BUF_LEVEL_8, 0, "port_head=%p", 1239 (void *) fcp_port_head); 1240 1241 if (fcp_port_head == NULL) { 1242 ddi_remove_minor_node(fcp_global_dip, NULL); 1243 fcp_global_dip = NULL; 1244 mutex_exit(&fcp_global_mutex); 1245 } else { 1246 mutex_exit(&fcp_global_mutex); 1247 res = DDI_FAILURE; 1248 } 1249 } 1250 FCP_DTRACE(fcp_logq, "fcp", fcp_trace, 1251 FCP_BUF_LEVEL_8, 0, "module detach returning %d", res); 1252 1253 return (res); 1254 } 1255 1256 1257 /* ARGSUSED */ 1258 static int 1259 fcp_open(dev_t *devp, int flag, int otype, cred_t *credp) 1260 { 1261 if (otype != OTYP_CHR) { 1262 return (EINVAL); 1263 } 1264 1265 /* 1266 * Allow only root to talk; 1267 */ 1268 if (drv_priv(credp)) { 1269 return (EPERM); 1270 } 1271 1272 mutex_enter(&fcp_global_mutex); 1273 if (fcp_oflag & FCP_EXCL) { 1274 mutex_exit(&fcp_global_mutex); 1275 return (EBUSY); 1276 } 1277 1278 if (flag & FEXCL) { 1279 if (fcp_oflag & FCP_OPEN) { 1280 mutex_exit(&fcp_global_mutex); 1281 return (EBUSY); 1282 } 1283 fcp_oflag |= FCP_EXCL; 1284 } 1285 fcp_oflag |= FCP_OPEN; 1286 mutex_exit(&fcp_global_mutex); 1287 1288 return (0); 1289 } 1290 1291 1292 /* ARGSUSED */ 1293 static int 1294 fcp_close(dev_t dev, int flag, int otype, cred_t *credp) 1295 { 1296 if (otype != OTYP_CHR) { 1297 return (EINVAL); 1298 } 1299 1300 mutex_enter(&fcp_global_mutex); 1301 if (!(fcp_oflag & FCP_OPEN)) { 1302 mutex_exit(&fcp_global_mutex); 1303 return (ENODEV); 1304 } 1305 fcp_oflag = FCP_IDLE; 1306 mutex_exit(&fcp_global_mutex); 1307 1308 return (0); 1309 } 1310 1311 1312 /* 1313 * fcp_ioctl 1314 * Entry point for the FCP ioctls 1315 * 1316 * Input: 1317 * See ioctl(9E) 1318 * 1319 * Output: 1320 * See ioctl(9E) 1321 * 1322 * Returns: 1323 * See ioctl(9E) 1324 * 1325 * Context: 1326 * Kernel context. 1327 */ 1328 /* ARGSUSED */ 1329 static int 1330 fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp, 1331 int *rval) 1332 { 1333 int ret = 0; 1334 1335 mutex_enter(&fcp_global_mutex); 1336 if (!(fcp_oflag & FCP_OPEN)) { 1337 mutex_exit(&fcp_global_mutex); 1338 return (ENXIO); 1339 } 1340 mutex_exit(&fcp_global_mutex); 1341 1342 switch (cmd) { 1343 case FCP_TGT_INQUIRY: 1344 case FCP_TGT_CREATE: 1345 case FCP_TGT_DELETE: 1346 ret = fcp_setup_device_data_ioctl(cmd, 1347 (struct fcp_ioctl *)data, mode, rval); 1348 break; 1349 1350 case FCP_TGT_SEND_SCSI: 1351 mutex_enter(&fcp_ioctl_mutex); 1352 ret = fcp_setup_scsi_ioctl( 1353 (struct fcp_scsi_cmd *)data, mode, rval); 1354 mutex_exit(&fcp_ioctl_mutex); 1355 break; 1356 1357 case FCP_STATE_COUNT: 1358 ret = fcp_get_statec_count((struct fcp_ioctl *)data, 1359 mode, rval); 1360 break; 1361 case FCP_GET_TARGET_MAPPINGS: 1362 ret = fcp_get_target_mappings((struct fcp_ioctl *)data, 1363 mode, rval); 1364 break; 1365 default: 1366 fcp_log(CE_WARN, NULL, 1367 "!Invalid ioctl opcode = 0x%x", cmd); 1368 ret = EINVAL; 1369 } 1370 1371 return (ret); 1372 } 1373 1374 1375 /* 1376 * fcp_setup_device_data_ioctl 1377 * Setup handler for the "device data" style of 1378 * ioctl for FCP. See "fcp_util.h" for data structure 1379 * definition. 1380 * 1381 * Input: 1382 * cmd = FCP ioctl command 1383 * data = ioctl data 1384 * mode = See ioctl(9E) 1385 * 1386 * Output: 1387 * data = ioctl data 1388 * rval = return value - see ioctl(9E) 1389 * 1390 * Returns: 1391 * See ioctl(9E) 1392 * 1393 * Context: 1394 * Kernel context. 1395 */ 1396 /* ARGSUSED */ 1397 static int 1398 fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data, int mode, 1399 int *rval) 1400 { 1401 struct fcp_port *pptr; 1402 struct device_data *dev_data; 1403 uint32_t link_cnt; 1404 la_wwn_t *wwn_ptr = NULL; 1405 struct fcp_tgt *ptgt = NULL; 1406 struct fcp_lun *plun = NULL; 1407 int i, error; 1408 struct fcp_ioctl fioctl; 1409 1410 #ifdef _MULTI_DATAMODEL 1411 switch (ddi_model_convert_from(mode & FMODELS)) { 1412 case DDI_MODEL_ILP32: { 1413 struct fcp32_ioctl f32_ioctl; 1414 1415 if (ddi_copyin((void *)data, (void *)&f32_ioctl, 1416 sizeof (struct fcp32_ioctl), mode)) { 1417 return (EFAULT); 1418 } 1419 fioctl.fp_minor = f32_ioctl.fp_minor; 1420 fioctl.listlen = f32_ioctl.listlen; 1421 fioctl.list = (caddr_t)(long)f32_ioctl.list; 1422 break; 1423 } 1424 case DDI_MODEL_NONE: 1425 if (ddi_copyin((void *)data, (void *)&fioctl, 1426 sizeof (struct fcp_ioctl), mode)) { 1427 return (EFAULT); 1428 } 1429 break; 1430 } 1431 1432 #else /* _MULTI_DATAMODEL */ 1433 if (ddi_copyin((void *)data, (void *)&fioctl, 1434 sizeof (struct fcp_ioctl), mode)) { 1435 return (EFAULT); 1436 } 1437 #endif /* _MULTI_DATAMODEL */ 1438 1439 /* 1440 * Right now we can assume that the minor number matches with 1441 * this instance of fp. If this changes we will need to 1442 * revisit this logic. 1443 */ 1444 mutex_enter(&fcp_global_mutex); 1445 pptr = fcp_port_head; 1446 while (pptr) { 1447 if (pptr->port_instance == (uint32_t)fioctl.fp_minor) { 1448 break; 1449 } else { 1450 pptr = pptr->port_next; 1451 } 1452 } 1453 mutex_exit(&fcp_global_mutex); 1454 if (pptr == NULL) { 1455 return (ENXIO); 1456 } 1457 mutex_enter(&pptr->port_mutex); 1458 1459 1460 if ((dev_data = kmem_zalloc((sizeof (struct device_data)) * 1461 fioctl.listlen, KM_NOSLEEP)) == NULL) { 1462 mutex_exit(&pptr->port_mutex); 1463 return (ENOMEM); 1464 } 1465 1466 if (ddi_copyin(fioctl.list, dev_data, 1467 (sizeof (struct device_data)) * fioctl.listlen, mode)) { 1468 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen); 1469 mutex_exit(&pptr->port_mutex); 1470 return (EFAULT); 1471 } 1472 link_cnt = pptr->port_link_cnt; 1473 1474 if (cmd == FCP_TGT_INQUIRY) { 1475 wwn_ptr = (la_wwn_t *)&(dev_data[0].dev_pwwn); 1476 if (bcmp(wwn_ptr->raw_wwn, pptr->port_pwwn.raw_wwn, 1477 sizeof (wwn_ptr->raw_wwn)) == 0) { 1478 /* This ioctl is requesting INQ info of local HBA */ 1479 mutex_exit(&pptr->port_mutex); 1480 dev_data[0].dev0_type = DTYPE_UNKNOWN; 1481 dev_data[0].dev_status = 0; 1482 if (ddi_copyout(dev_data, fioctl.list, 1483 (sizeof (struct device_data)) * fioctl.listlen, 1484 mode)) { 1485 kmem_free(dev_data, 1486 sizeof (*dev_data) * fioctl.listlen); 1487 return (EFAULT); 1488 } 1489 kmem_free(dev_data, 1490 sizeof (*dev_data) * fioctl.listlen); 1491 #ifdef _MULTI_DATAMODEL 1492 switch (ddi_model_convert_from(mode & FMODELS)) { 1493 case DDI_MODEL_ILP32: { 1494 struct fcp32_ioctl f32_ioctl; 1495 f32_ioctl.fp_minor = fioctl.fp_minor; 1496 f32_ioctl.listlen = fioctl.listlen; 1497 f32_ioctl.list = (caddr32_t)(long)fioctl.list; 1498 if (ddi_copyout((void *)&f32_ioctl, 1499 (void *)data, 1500 sizeof (struct fcp32_ioctl), mode)) { 1501 return (EFAULT); 1502 } 1503 break; 1504 } 1505 case DDI_MODEL_NONE: 1506 if (ddi_copyout((void *)&fioctl, (void *)data, 1507 sizeof (struct fcp_ioctl), mode)) { 1508 return (EFAULT); 1509 } 1510 break; 1511 } 1512 #else /* _MULTI_DATAMODEL */ 1513 if (ddi_copyout((void *)&fioctl, (void *)data, 1514 sizeof (struct fcp_ioctl), mode)) { 1515 return (EFAULT); 1516 } 1517 #endif /* _MULTI_DATAMODEL */ 1518 return (0); 1519 } 1520 } 1521 1522 if (pptr->port_state & (FCP_STATE_INIT | FCP_STATE_OFFLINE)) { 1523 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen); 1524 mutex_exit(&pptr->port_mutex); 1525 return (ENXIO); 1526 } 1527 1528 for (i = 0; (i < fioctl.listlen) && (link_cnt == pptr->port_link_cnt); 1529 i++) { 1530 wwn_ptr = (la_wwn_t *)&(dev_data[i].dev_pwwn); 1531 1532 dev_data[i].dev0_type = DTYPE_UNKNOWN; 1533 1534 1535 dev_data[i].dev_status = ENXIO; 1536 1537 if ((ptgt = fcp_lookup_target(pptr, 1538 (uchar_t *)wwn_ptr)) == NULL) { 1539 mutex_exit(&pptr->port_mutex); 1540 if (fc_ulp_get_remote_port(pptr->port_fp_handle, 1541 wwn_ptr, &error, 0) == NULL) { 1542 dev_data[i].dev_status = ENODEV; 1543 mutex_enter(&pptr->port_mutex); 1544 continue; 1545 } else { 1546 1547 dev_data[i].dev_status = EAGAIN; 1548 1549 mutex_enter(&pptr->port_mutex); 1550 continue; 1551 } 1552 } else { 1553 mutex_enter(&ptgt->tgt_mutex); 1554 if (ptgt->tgt_state & (FCP_TGT_MARK | 1555 FCP_TGT_BUSY)) { 1556 dev_data[i].dev_status = EAGAIN; 1557 mutex_exit(&ptgt->tgt_mutex); 1558 continue; 1559 } 1560 1561 if (ptgt->tgt_state & FCP_TGT_OFFLINE) { 1562 if (ptgt->tgt_icap && !ptgt->tgt_tcap) { 1563 dev_data[i].dev_status = ENOTSUP; 1564 } else { 1565 dev_data[i].dev_status = ENXIO; 1566 } 1567 mutex_exit(&ptgt->tgt_mutex); 1568 continue; 1569 } 1570 1571 switch (cmd) { 1572 case FCP_TGT_INQUIRY: 1573 /* 1574 * The reason we give device type of 1575 * lun 0 only even though in some 1576 * cases(like maxstrat) lun 0 device 1577 * type may be 0x3f(invalid) is that 1578 * for bridge boxes target will appear 1579 * as luns and the first lun could be 1580 * a device that utility may not care 1581 * about (like a tape device). 1582 */ 1583 dev_data[i].dev_lun_cnt = ptgt->tgt_lun_cnt; 1584 dev_data[i].dev_status = 0; 1585 mutex_exit(&ptgt->tgt_mutex); 1586 1587 if ((plun = fcp_get_lun(ptgt, 0)) == NULL) { 1588 dev_data[i].dev0_type = DTYPE_UNKNOWN; 1589 } else { 1590 dev_data[i].dev0_type = plun->lun_type; 1591 } 1592 mutex_enter(&ptgt->tgt_mutex); 1593 break; 1594 1595 case FCP_TGT_CREATE: 1596 mutex_exit(&ptgt->tgt_mutex); 1597 mutex_exit(&pptr->port_mutex); 1598 1599 /* 1600 * serialize state change call backs. 1601 * only one call back will be handled 1602 * at a time. 1603 */ 1604 mutex_enter(&fcp_global_mutex); 1605 if (fcp_oflag & FCP_BUSY) { 1606 mutex_exit(&fcp_global_mutex); 1607 if (dev_data) { 1608 kmem_free(dev_data, 1609 sizeof (*dev_data) * 1610 fioctl.listlen); 1611 } 1612 return (EBUSY); 1613 } 1614 fcp_oflag |= FCP_BUSY; 1615 mutex_exit(&fcp_global_mutex); 1616 1617 dev_data[i].dev_status = 1618 fcp_create_on_demand(pptr, 1619 wwn_ptr->raw_wwn); 1620 1621 if (dev_data[i].dev_status != 0) { 1622 char buf[25]; 1623 1624 for (i = 0; i < FC_WWN_SIZE; i++) { 1625 (void) sprintf(&buf[i << 1], 1626 "%02x", 1627 wwn_ptr->raw_wwn[i]); 1628 } 1629 1630 fcp_log(CE_WARN, pptr->port_dip, 1631 "!Failed to create nodes for" 1632 " pwwn=%s; error=%x", buf, 1633 dev_data[i].dev_status); 1634 } 1635 1636 /* allow state change call backs again */ 1637 mutex_enter(&fcp_global_mutex); 1638 fcp_oflag &= ~FCP_BUSY; 1639 mutex_exit(&fcp_global_mutex); 1640 1641 mutex_enter(&pptr->port_mutex); 1642 mutex_enter(&ptgt->tgt_mutex); 1643 1644 break; 1645 1646 case FCP_TGT_DELETE: 1647 break; 1648 1649 default: 1650 fcp_log(CE_WARN, pptr->port_dip, 1651 "!Invalid device data ioctl " 1652 "opcode = 0x%x", cmd); 1653 } 1654 mutex_exit(&ptgt->tgt_mutex); 1655 } 1656 } 1657 mutex_exit(&pptr->port_mutex); 1658 1659 if (ddi_copyout(dev_data, fioctl.list, 1660 (sizeof (struct device_data)) * fioctl.listlen, mode)) { 1661 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen); 1662 return (EFAULT); 1663 } 1664 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen); 1665 1666 #ifdef _MULTI_DATAMODEL 1667 switch (ddi_model_convert_from(mode & FMODELS)) { 1668 case DDI_MODEL_ILP32: { 1669 struct fcp32_ioctl f32_ioctl; 1670 1671 f32_ioctl.fp_minor = fioctl.fp_minor; 1672 f32_ioctl.listlen = fioctl.listlen; 1673 f32_ioctl.list = (caddr32_t)(long)fioctl.list; 1674 if (ddi_copyout((void *)&f32_ioctl, (void *)data, 1675 sizeof (struct fcp32_ioctl), mode)) { 1676 return (EFAULT); 1677 } 1678 break; 1679 } 1680 case DDI_MODEL_NONE: 1681 if (ddi_copyout((void *)&fioctl, (void *)data, 1682 sizeof (struct fcp_ioctl), mode)) { 1683 return (EFAULT); 1684 } 1685 break; 1686 } 1687 #else /* _MULTI_DATAMODEL */ 1688 1689 if (ddi_copyout((void *)&fioctl, (void *)data, 1690 sizeof (struct fcp_ioctl), mode)) { 1691 return (EFAULT); 1692 } 1693 #endif /* _MULTI_DATAMODEL */ 1694 1695 return (0); 1696 } 1697 1698 /* 1699 * Fetch the target mappings (path, etc.) for all LUNs 1700 * on this port. 1701 */ 1702 /* ARGSUSED */ 1703 static int 1704 fcp_get_target_mappings(struct fcp_ioctl *data, 1705 int mode, int *rval) 1706 { 1707 struct fcp_port *pptr; 1708 fc_hba_target_mappings_t *mappings; 1709 fc_hba_mapping_entry_t *map; 1710 struct fcp_tgt *ptgt = NULL; 1711 struct fcp_lun *plun = NULL; 1712 int i, mapIndex, mappingSize; 1713 int listlen; 1714 struct fcp_ioctl fioctl; 1715 char *path; 1716 fcp_ent_addr_t sam_lun_addr; 1717 1718 #ifdef _MULTI_DATAMODEL 1719 switch (ddi_model_convert_from(mode & FMODELS)) { 1720 case DDI_MODEL_ILP32: { 1721 struct fcp32_ioctl f32_ioctl; 1722 1723 if (ddi_copyin((void *)data, (void *)&f32_ioctl, 1724 sizeof (struct fcp32_ioctl), mode)) { 1725 return (EFAULT); 1726 } 1727 fioctl.fp_minor = f32_ioctl.fp_minor; 1728 fioctl.listlen = f32_ioctl.listlen; 1729 fioctl.list = (caddr_t)(long)f32_ioctl.list; 1730 break; 1731 } 1732 case DDI_MODEL_NONE: 1733 if (ddi_copyin((void *)data, (void *)&fioctl, 1734 sizeof (struct fcp_ioctl), mode)) { 1735 return (EFAULT); 1736 } 1737 break; 1738 } 1739 1740 #else /* _MULTI_DATAMODEL */ 1741 if (ddi_copyin((void *)data, (void *)&fioctl, 1742 sizeof (struct fcp_ioctl), mode)) { 1743 return (EFAULT); 1744 } 1745 #endif /* _MULTI_DATAMODEL */ 1746 1747 /* 1748 * Right now we can assume that the minor number matches with 1749 * this instance of fp. If this changes we will need to 1750 * revisit this logic. 1751 */ 1752 mutex_enter(&fcp_global_mutex); 1753 pptr = fcp_port_head; 1754 while (pptr) { 1755 if (pptr->port_instance == (uint32_t)fioctl.fp_minor) { 1756 break; 1757 } else { 1758 pptr = pptr->port_next; 1759 } 1760 } 1761 mutex_exit(&fcp_global_mutex); 1762 if (pptr == NULL) { 1763 cmn_err(CE_NOTE, "target mappings: unknown instance number: %d", 1764 fioctl.fp_minor); 1765 return (ENXIO); 1766 } 1767 1768 1769 /* We use listlen to show the total buffer size */ 1770 mappingSize = fioctl.listlen; 1771 1772 /* Now calculate how many mapping entries will fit */ 1773 listlen = fioctl.listlen + sizeof (fc_hba_mapping_entry_t) 1774 - sizeof (fc_hba_target_mappings_t); 1775 if (listlen <= 0) { 1776 cmn_err(CE_NOTE, "target mappings: Insufficient buffer"); 1777 return (ENXIO); 1778 } 1779 listlen = listlen / sizeof (fc_hba_mapping_entry_t); 1780 1781 if ((mappings = kmem_zalloc(mappingSize, KM_SLEEP)) == NULL) { 1782 return (ENOMEM); 1783 } 1784 mappings->version = FC_HBA_TARGET_MAPPINGS_VERSION; 1785 1786 /* Now get to work */ 1787 mapIndex = 0; 1788 1789 mutex_enter(&pptr->port_mutex); 1790 /* Loop through all targets on this port */ 1791 for (i = 0; i < FCP_NUM_HASH; i++) { 1792 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL; 1793 ptgt = ptgt->tgt_next) { 1794 1795 1796 /* Loop through all LUNs on this target */ 1797 for (plun = ptgt->tgt_lun; plun != NULL; 1798 plun = plun->lun_next) { 1799 if (plun->lun_state & FCP_LUN_OFFLINE) { 1800 continue; 1801 } 1802 1803 path = fcp_get_lun_path(plun); 1804 if (path == NULL) { 1805 continue; 1806 } 1807 1808 if (mapIndex >= listlen) { 1809 mapIndex ++; 1810 kmem_free(path, MAXPATHLEN); 1811 continue; 1812 } 1813 map = &mappings->entries[mapIndex++]; 1814 bcopy(path, map->targetDriver, 1815 sizeof (map->targetDriver)); 1816 map->d_id = ptgt->tgt_d_id; 1817 map->busNumber = 0; 1818 map->targetNumber = ptgt->tgt_d_id; 1819 map->osLUN = plun->lun_num; 1820 1821 /* 1822 * We had swapped lun when we stored it in 1823 * lun_addr. We need to swap it back before 1824 * returning it to user land 1825 */ 1826 1827 sam_lun_addr.ent_addr_0 = 1828 BE_16(plun->lun_addr.ent_addr_0); 1829 sam_lun_addr.ent_addr_1 = 1830 BE_16(plun->lun_addr.ent_addr_1); 1831 sam_lun_addr.ent_addr_2 = 1832 BE_16(plun->lun_addr.ent_addr_2); 1833 sam_lun_addr.ent_addr_3 = 1834 BE_16(plun->lun_addr.ent_addr_3); 1835 1836 bcopy(&sam_lun_addr, &map->samLUN, 1837 FCP_LUN_SIZE); 1838 bcopy(ptgt->tgt_node_wwn.raw_wwn, 1839 map->NodeWWN.raw_wwn, sizeof (la_wwn_t)); 1840 bcopy(ptgt->tgt_port_wwn.raw_wwn, 1841 map->PortWWN.raw_wwn, sizeof (la_wwn_t)); 1842 1843 if (plun->lun_guid) { 1844 1845 /* convert ascii wwn to bytes */ 1846 fcp_ascii_to_wwn(plun->lun_guid, 1847 map->guid, sizeof (map->guid)); 1848 1849 if ((sizeof (map->guid)) < 1850 plun->lun_guid_size / 2) { 1851 cmn_err(CE_WARN, 1852 "fcp_get_target_mappings:" 1853 "guid copy space " 1854 "insufficient." 1855 "Copy Truncation - " 1856 "available %d; need %d", 1857 (int)sizeof (map->guid), 1858 (int) 1859 plun->lun_guid_size / 2); 1860 } 1861 } 1862 kmem_free(path, MAXPATHLEN); 1863 } 1864 } 1865 } 1866 mutex_exit(&pptr->port_mutex); 1867 mappings->numLuns = mapIndex; 1868 1869 if (ddi_copyout(mappings, fioctl.list, mappingSize, mode)) { 1870 kmem_free(mappings, mappingSize); 1871 return (EFAULT); 1872 } 1873 kmem_free(mappings, mappingSize); 1874 1875 #ifdef _MULTI_DATAMODEL 1876 switch (ddi_model_convert_from(mode & FMODELS)) { 1877 case DDI_MODEL_ILP32: { 1878 struct fcp32_ioctl f32_ioctl; 1879 1880 f32_ioctl.fp_minor = fioctl.fp_minor; 1881 f32_ioctl.listlen = fioctl.listlen; 1882 f32_ioctl.list = (caddr32_t)(long)fioctl.list; 1883 if (ddi_copyout((void *)&f32_ioctl, (void *)data, 1884 sizeof (struct fcp32_ioctl), mode)) { 1885 return (EFAULT); 1886 } 1887 break; 1888 } 1889 case DDI_MODEL_NONE: 1890 if (ddi_copyout((void *)&fioctl, (void *)data, 1891 sizeof (struct fcp_ioctl), mode)) { 1892 return (EFAULT); 1893 } 1894 break; 1895 } 1896 #else /* _MULTI_DATAMODEL */ 1897 1898 if (ddi_copyout((void *)&fioctl, (void *)data, 1899 sizeof (struct fcp_ioctl), mode)) { 1900 return (EFAULT); 1901 } 1902 #endif /* _MULTI_DATAMODEL */ 1903 1904 return (0); 1905 } 1906 1907 /* 1908 * fcp_setup_scsi_ioctl 1909 * Setup handler for the "scsi passthru" style of 1910 * ioctl for FCP. See "fcp_util.h" for data structure 1911 * definition. 1912 * 1913 * Input: 1914 * u_fscsi = ioctl data (user address space) 1915 * mode = See ioctl(9E) 1916 * 1917 * Output: 1918 * u_fscsi = ioctl data (user address space) 1919 * rval = return value - see ioctl(9E) 1920 * 1921 * Returns: 1922 * 0 = OK 1923 * EAGAIN = See errno.h 1924 * EBUSY = See errno.h 1925 * EFAULT = See errno.h 1926 * EINTR = See errno.h 1927 * EINVAL = See errno.h 1928 * EIO = See errno.h 1929 * ENOMEM = See errno.h 1930 * ENXIO = See errno.h 1931 * 1932 * Context: 1933 * Kernel context. 1934 */ 1935 /* ARGSUSED */ 1936 static int 1937 fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi, 1938 int mode, int *rval) 1939 { 1940 int ret = 0; 1941 int temp_ret; 1942 caddr_t k_cdbbufaddr = NULL; 1943 caddr_t k_bufaddr = NULL; 1944 caddr_t k_rqbufaddr = NULL; 1945 caddr_t u_cdbbufaddr; 1946 caddr_t u_bufaddr; 1947 caddr_t u_rqbufaddr; 1948 struct fcp_scsi_cmd k_fscsi; 1949 1950 /* 1951 * Get fcp_scsi_cmd array element from user address space 1952 */ 1953 if ((ret = fcp_copyin_scsi_cmd((caddr_t)u_fscsi, &k_fscsi, mode)) 1954 != 0) { 1955 return (ret); 1956 } 1957 1958 1959 /* 1960 * Even though kmem_alloc() checks the validity of the 1961 * buffer length, this check is needed when the 1962 * kmem_flags set and the zero buffer length is passed. 1963 */ 1964 if ((k_fscsi.scsi_cdblen <= 0) || 1965 (k_fscsi.scsi_buflen <= 0) || 1966 (k_fscsi.scsi_buflen > FCP_MAX_RESPONSE_LEN) || 1967 (k_fscsi.scsi_rqlen <= 0) || 1968 (k_fscsi.scsi_rqlen > FCP_MAX_SENSE_LEN)) { 1969 return (EINVAL); 1970 } 1971 1972 /* 1973 * Allocate data for fcp_scsi_cmd pointer fields 1974 */ 1975 if (ret == 0) { 1976 k_cdbbufaddr = kmem_alloc(k_fscsi.scsi_cdblen, KM_NOSLEEP); 1977 k_bufaddr = kmem_alloc(k_fscsi.scsi_buflen, KM_NOSLEEP); 1978 k_rqbufaddr = kmem_alloc(k_fscsi.scsi_rqlen, KM_NOSLEEP); 1979 1980 if (k_cdbbufaddr == NULL || 1981 k_bufaddr == NULL || 1982 k_rqbufaddr == NULL) { 1983 ret = ENOMEM; 1984 } 1985 } 1986 1987 /* 1988 * Get fcp_scsi_cmd pointer fields from user 1989 * address space 1990 */ 1991 if (ret == 0) { 1992 u_cdbbufaddr = k_fscsi.scsi_cdbbufaddr; 1993 u_bufaddr = k_fscsi.scsi_bufaddr; 1994 u_rqbufaddr = k_fscsi.scsi_rqbufaddr; 1995 1996 if (ddi_copyin(u_cdbbufaddr, 1997 k_cdbbufaddr, 1998 k_fscsi.scsi_cdblen, 1999 mode)) { 2000 ret = EFAULT; 2001 } else if (ddi_copyin(u_bufaddr, 2002 k_bufaddr, 2003 k_fscsi.scsi_buflen, 2004 mode)) { 2005 ret = EFAULT; 2006 } else if (ddi_copyin(u_rqbufaddr, 2007 k_rqbufaddr, 2008 k_fscsi.scsi_rqlen, 2009 mode)) { 2010 ret = EFAULT; 2011 } 2012 } 2013 2014 /* 2015 * Send scsi command (blocking) 2016 */ 2017 if (ret == 0) { 2018 /* 2019 * Prior to sending the scsi command, the 2020 * fcp_scsi_cmd data structure must contain kernel, 2021 * not user, addresses. 2022 */ 2023 k_fscsi.scsi_cdbbufaddr = k_cdbbufaddr; 2024 k_fscsi.scsi_bufaddr = k_bufaddr; 2025 k_fscsi.scsi_rqbufaddr = k_rqbufaddr; 2026 2027 ret = fcp_send_scsi_ioctl(&k_fscsi); 2028 2029 /* 2030 * After sending the scsi command, the 2031 * fcp_scsi_cmd data structure must contain user, 2032 * not kernel, addresses. 2033 */ 2034 k_fscsi.scsi_cdbbufaddr = u_cdbbufaddr; 2035 k_fscsi.scsi_bufaddr = u_bufaddr; 2036 k_fscsi.scsi_rqbufaddr = u_rqbufaddr; 2037 } 2038 2039 /* 2040 * Put fcp_scsi_cmd pointer fields to user address space 2041 */ 2042 if (ret == 0) { 2043 if (ddi_copyout(k_cdbbufaddr, 2044 u_cdbbufaddr, 2045 k_fscsi.scsi_cdblen, 2046 mode)) { 2047 ret = EFAULT; 2048 } else if (ddi_copyout(k_bufaddr, 2049 u_bufaddr, 2050 k_fscsi.scsi_buflen, 2051 mode)) { 2052 ret = EFAULT; 2053 } else if (ddi_copyout(k_rqbufaddr, 2054 u_rqbufaddr, 2055 k_fscsi.scsi_rqlen, 2056 mode)) { 2057 ret = EFAULT; 2058 } 2059 } 2060 2061 /* 2062 * Free data for fcp_scsi_cmd pointer fields 2063 */ 2064 if (k_cdbbufaddr != NULL) { 2065 kmem_free(k_cdbbufaddr, k_fscsi.scsi_cdblen); 2066 } 2067 if (k_bufaddr != NULL) { 2068 kmem_free(k_bufaddr, k_fscsi.scsi_buflen); 2069 } 2070 if (k_rqbufaddr != NULL) { 2071 kmem_free(k_rqbufaddr, k_fscsi.scsi_rqlen); 2072 } 2073 2074 /* 2075 * Put fcp_scsi_cmd array element to user address space 2076 */ 2077 temp_ret = fcp_copyout_scsi_cmd(&k_fscsi, (caddr_t)u_fscsi, mode); 2078 if (temp_ret != 0) { 2079 ret = temp_ret; 2080 } 2081 2082 /* 2083 * Return status 2084 */ 2085 return (ret); 2086 } 2087 2088 2089 /* 2090 * fcp_copyin_scsi_cmd 2091 * Copy in fcp_scsi_cmd data structure from user address space. 2092 * The data may be in 32 bit or 64 bit modes. 2093 * 2094 * Input: 2095 * base_addr = from address (user address space) 2096 * mode = See ioctl(9E) and ddi_copyin(9F) 2097 * 2098 * Output: 2099 * fscsi = to address (kernel address space) 2100 * 2101 * Returns: 2102 * 0 = OK 2103 * EFAULT = Error 2104 * 2105 * Context: 2106 * Kernel context. 2107 */ 2108 static int 2109 fcp_copyin_scsi_cmd(caddr_t base_addr, struct fcp_scsi_cmd *fscsi, int mode) 2110 { 2111 #ifdef _MULTI_DATAMODEL 2112 struct fcp32_scsi_cmd f32scsi; 2113 2114 switch (ddi_model_convert_from(mode & FMODELS)) { 2115 case DDI_MODEL_ILP32: 2116 /* 2117 * Copy data from user address space 2118 */ 2119 if (ddi_copyin((void *)base_addr, 2120 &f32scsi, 2121 sizeof (struct fcp32_scsi_cmd), 2122 mode)) { 2123 return (EFAULT); 2124 } 2125 /* 2126 * Convert from 32 bit to 64 bit 2127 */ 2128 FCP32_SCSI_CMD_TO_FCP_SCSI_CMD(&f32scsi, fscsi); 2129 break; 2130 case DDI_MODEL_NONE: 2131 /* 2132 * Copy data from user address space 2133 */ 2134 if (ddi_copyin((void *)base_addr, 2135 fscsi, 2136 sizeof (struct fcp_scsi_cmd), 2137 mode)) { 2138 return (EFAULT); 2139 } 2140 break; 2141 } 2142 #else /* _MULTI_DATAMODEL */ 2143 /* 2144 * Copy data from user address space 2145 */ 2146 if (ddi_copyin((void *)base_addr, 2147 fscsi, 2148 sizeof (struct fcp_scsi_cmd), 2149 mode)) { 2150 return (EFAULT); 2151 } 2152 #endif /* _MULTI_DATAMODEL */ 2153 2154 return (0); 2155 } 2156 2157 2158 /* 2159 * fcp_copyout_scsi_cmd 2160 * Copy out fcp_scsi_cmd data structure to user address space. 2161 * The data may be in 32 bit or 64 bit modes. 2162 * 2163 * Input: 2164 * fscsi = to address (kernel address space) 2165 * mode = See ioctl(9E) and ddi_copyin(9F) 2166 * 2167 * Output: 2168 * base_addr = from address (user address space) 2169 * 2170 * Returns: 2171 * 0 = OK 2172 * EFAULT = Error 2173 * 2174 * Context: 2175 * Kernel context. 2176 */ 2177 static int 2178 fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi, caddr_t base_addr, int mode) 2179 { 2180 #ifdef _MULTI_DATAMODEL 2181 struct fcp32_scsi_cmd f32scsi; 2182 2183 switch (ddi_model_convert_from(mode & FMODELS)) { 2184 case DDI_MODEL_ILP32: 2185 /* 2186 * Convert from 64 bit to 32 bit 2187 */ 2188 FCP_SCSI_CMD_TO_FCP32_SCSI_CMD(fscsi, &f32scsi); 2189 /* 2190 * Copy data to user address space 2191 */ 2192 if (ddi_copyout(&f32scsi, 2193 (void *)base_addr, 2194 sizeof (struct fcp32_scsi_cmd), 2195 mode)) { 2196 return (EFAULT); 2197 } 2198 break; 2199 case DDI_MODEL_NONE: 2200 /* 2201 * Copy data to user address space 2202 */ 2203 if (ddi_copyout(fscsi, 2204 (void *)base_addr, 2205 sizeof (struct fcp_scsi_cmd), 2206 mode)) { 2207 return (EFAULT); 2208 } 2209 break; 2210 } 2211 #else /* _MULTI_DATAMODEL */ 2212 /* 2213 * Copy data to user address space 2214 */ 2215 if (ddi_copyout(fscsi, 2216 (void *)base_addr, 2217 sizeof (struct fcp_scsi_cmd), 2218 mode)) { 2219 return (EFAULT); 2220 } 2221 #endif /* _MULTI_DATAMODEL */ 2222 2223 return (0); 2224 } 2225 2226 2227 /* 2228 * fcp_send_scsi_ioctl 2229 * Sends the SCSI command in blocking mode. 2230 * 2231 * Input: 2232 * fscsi = SCSI command data structure 2233 * 2234 * Output: 2235 * fscsi = SCSI command data structure 2236 * 2237 * Returns: 2238 * 0 = OK 2239 * EAGAIN = See errno.h 2240 * EBUSY = See errno.h 2241 * EINTR = See errno.h 2242 * EINVAL = See errno.h 2243 * EIO = See errno.h 2244 * ENOMEM = See errno.h 2245 * ENXIO = See errno.h 2246 * 2247 * Context: 2248 * Kernel context. 2249 */ 2250 static int 2251 fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi) 2252 { 2253 struct fcp_lun *plun = NULL; 2254 struct fcp_port *pptr = NULL; 2255 struct fcp_tgt *ptgt = NULL; 2256 fc_packet_t *fpkt = NULL; 2257 struct fcp_ipkt *icmd = NULL; 2258 int target_created = FALSE; 2259 fc_frame_hdr_t *hp; 2260 struct fcp_cmd fcp_cmd; 2261 struct fcp_cmd *fcmd; 2262 union scsi_cdb *scsi_cdb; 2263 la_wwn_t *wwn_ptr; 2264 int nodma; 2265 struct fcp_rsp *rsp; 2266 struct fcp_rsp_info *rsp_info; 2267 caddr_t rsp_sense; 2268 int buf_len; 2269 int info_len; 2270 int sense_len; 2271 struct scsi_extended_sense *sense_to = NULL; 2272 timeout_id_t tid; 2273 uint8_t reconfig_lun = FALSE; 2274 uint8_t reconfig_pending = FALSE; 2275 uint8_t scsi_cmd; 2276 int rsp_len; 2277 int cmd_index; 2278 int fc_status; 2279 int pkt_state; 2280 int pkt_action; 2281 int pkt_reason; 2282 int ret, xport_retval = ~FC_SUCCESS; 2283 int lcount; 2284 int tcount; 2285 int reconfig_status; 2286 int port_busy = FALSE; 2287 uchar_t *lun_string; 2288 2289 /* 2290 * Check valid SCSI command 2291 */ 2292 scsi_cmd = ((uint8_t *)fscsi->scsi_cdbbufaddr)[0]; 2293 ret = EINVAL; 2294 for (cmd_index = 0; 2295 cmd_index < FCP_NUM_ELEMENTS(scsi_ioctl_list) && 2296 ret != 0; 2297 cmd_index++) { 2298 /* 2299 * First byte of CDB is the SCSI command 2300 */ 2301 if (scsi_ioctl_list[cmd_index] == scsi_cmd) { 2302 ret = 0; 2303 } 2304 } 2305 2306 /* 2307 * Check inputs 2308 */ 2309 if (fscsi->scsi_flags != FCP_SCSI_READ) { 2310 ret = EINVAL; 2311 } else if (fscsi->scsi_cdblen > FCP_CDB_SIZE) { 2312 /* no larger than */ 2313 ret = EINVAL; 2314 } 2315 2316 2317 /* 2318 * Find FC port 2319 */ 2320 if (ret == 0) { 2321 /* 2322 * Acquire global mutex 2323 */ 2324 mutex_enter(&fcp_global_mutex); 2325 2326 pptr = fcp_port_head; 2327 while (pptr) { 2328 if (pptr->port_instance == 2329 (uint32_t)fscsi->scsi_fc_port_num) { 2330 break; 2331 } else { 2332 pptr = pptr->port_next; 2333 } 2334 } 2335 2336 if (pptr == NULL) { 2337 ret = ENXIO; 2338 } else { 2339 /* 2340 * fc_ulp_busy_port can raise power 2341 * so, we must not hold any mutexes involved in PM 2342 */ 2343 mutex_exit(&fcp_global_mutex); 2344 ret = fc_ulp_busy_port(pptr->port_fp_handle); 2345 } 2346 2347 if (ret == 0) { 2348 2349 /* remember port is busy, so we will release later */ 2350 port_busy = TRUE; 2351 2352 /* 2353 * If there is a reconfiguration in progress, wait 2354 * for it to complete. 2355 */ 2356 2357 fcp_reconfig_wait(pptr); 2358 2359 /* reacquire mutexes in order */ 2360 mutex_enter(&fcp_global_mutex); 2361 mutex_enter(&pptr->port_mutex); 2362 2363 /* 2364 * Will port accept DMA? 2365 */ 2366 nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) 2367 ? 1 : 0; 2368 2369 /* 2370 * If init or offline, device not known 2371 * 2372 * If we are discovering (onlining), we can 2373 * NOT obviously provide reliable data about 2374 * devices until it is complete 2375 */ 2376 if (pptr->port_state & (FCP_STATE_INIT | 2377 FCP_STATE_OFFLINE)) { 2378 ret = ENXIO; 2379 } else if (pptr->port_state & FCP_STATE_ONLINING) { 2380 ret = EBUSY; 2381 } else { 2382 /* 2383 * Find target from pwwn 2384 * 2385 * The wwn must be put into a local 2386 * variable to ensure alignment. 2387 */ 2388 wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn); 2389 ptgt = fcp_lookup_target(pptr, 2390 (uchar_t *)wwn_ptr); 2391 2392 /* 2393 * If target not found, 2394 */ 2395 if (ptgt == NULL) { 2396 /* 2397 * Note: Still have global & 2398 * port mutexes 2399 */ 2400 mutex_exit(&pptr->port_mutex); 2401 ptgt = fcp_port_create_tgt(pptr, 2402 wwn_ptr, &ret, &fc_status, 2403 &pkt_state, &pkt_action, 2404 &pkt_reason); 2405 mutex_enter(&pptr->port_mutex); 2406 2407 fscsi->scsi_fc_status = fc_status; 2408 fscsi->scsi_pkt_state = 2409 (uchar_t)pkt_state; 2410 fscsi->scsi_pkt_reason = pkt_reason; 2411 fscsi->scsi_pkt_action = 2412 (uchar_t)pkt_action; 2413 2414 if (ptgt != NULL) { 2415 target_created = TRUE; 2416 } else if (ret == 0) { 2417 ret = ENOMEM; 2418 } 2419 } 2420 2421 if (ret == 0) { 2422 /* 2423 * Acquire target 2424 */ 2425 mutex_enter(&ptgt->tgt_mutex); 2426 2427 /* 2428 * If target is mark or busy, 2429 * then target can not be used 2430 */ 2431 if (ptgt->tgt_state & 2432 (FCP_TGT_MARK | 2433 FCP_TGT_BUSY)) { 2434 ret = EBUSY; 2435 } else { 2436 /* 2437 * Mark target as busy 2438 */ 2439 ptgt->tgt_state |= 2440 FCP_TGT_BUSY; 2441 } 2442 2443 /* 2444 * Release target 2445 */ 2446 lcount = pptr->port_link_cnt; 2447 tcount = ptgt->tgt_change_cnt; 2448 mutex_exit(&ptgt->tgt_mutex); 2449 } 2450 } 2451 2452 /* 2453 * Release port 2454 */ 2455 mutex_exit(&pptr->port_mutex); 2456 } 2457 2458 /* 2459 * Release global mutex 2460 */ 2461 mutex_exit(&fcp_global_mutex); 2462 } 2463 2464 if (ret == 0) { 2465 uint64_t belun = BE_64(fscsi->scsi_lun); 2466 2467 /* 2468 * If it's a target device, find lun from pwwn 2469 * The wwn must be put into a local 2470 * variable to ensure alignment. 2471 */ 2472 mutex_enter(&pptr->port_mutex); 2473 wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn); 2474 if (!ptgt->tgt_tcap && ptgt->tgt_icap) { 2475 /* this is not a target */ 2476 fscsi->scsi_fc_status = FC_DEVICE_NOT_TGT; 2477 ret = ENXIO; 2478 } else if ((belun << 16) != 0) { 2479 /* 2480 * Since fcp only support PD and LU addressing method 2481 * so far, the last 6 bytes of a valid LUN are expected 2482 * to be filled with 00h. 2483 */ 2484 fscsi->scsi_fc_status = FC_INVALID_LUN; 2485 cmn_err(CE_WARN, "fcp: Unsupported LUN addressing" 2486 " method 0x%02x with LUN number 0x%016" PRIx64, 2487 (uint8_t)(belun >> 62), belun); 2488 ret = ENXIO; 2489 } else if ((plun = fcp_lookup_lun(pptr, (uchar_t *)wwn_ptr, 2490 (uint16_t)((belun >> 48) & 0x3fff))) == NULL) { 2491 /* 2492 * This is a SCSI target, but no LUN at this 2493 * address. 2494 * 2495 * In the future, we may want to send this to 2496 * the target, and let it respond 2497 * appropriately 2498 */ 2499 ret = ENXIO; 2500 } 2501 mutex_exit(&pptr->port_mutex); 2502 } 2503 2504 /* 2505 * Finished grabbing external resources 2506 * Allocate internal packet (icmd) 2507 */ 2508 if (ret == 0) { 2509 /* 2510 * Calc rsp len assuming rsp info included 2511 */ 2512 rsp_len = sizeof (struct fcp_rsp) + 2513 sizeof (struct fcp_rsp_info) + fscsi->scsi_rqlen; 2514 2515 icmd = fcp_icmd_alloc(pptr, ptgt, 2516 sizeof (struct fcp_cmd), 2517 rsp_len, 2518 fscsi->scsi_buflen, 2519 nodma, 2520 lcount, /* ipkt_link_cnt */ 2521 tcount, /* ipkt_change_cnt */ 2522 0, /* cause */ 2523 FC_INVALID_RSCN_COUNT); /* invalidate the count */ 2524 2525 if (icmd == NULL) { 2526 ret = ENOMEM; 2527 } else { 2528 /* 2529 * Setup internal packet as sema sync 2530 */ 2531 fcp_ipkt_sema_init(icmd); 2532 } 2533 } 2534 2535 if (ret == 0) { 2536 /* 2537 * Init fpkt pointer for use. 2538 */ 2539 2540 fpkt = icmd->ipkt_fpkt; 2541 2542 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR; 2543 fpkt->pkt_tran_type = FC_PKT_FCP_READ; /* only rd for now */ 2544 fpkt->pkt_timeout = fscsi->scsi_timeout; 2545 2546 /* 2547 * Init fcmd pointer for use by SCSI command 2548 */ 2549 2550 if (nodma) { 2551 fcmd = (struct fcp_cmd *)fpkt->pkt_cmd; 2552 } else { 2553 fcmd = &fcp_cmd; 2554 } 2555 bzero(fcmd, sizeof (struct fcp_cmd)); 2556 ptgt = plun->lun_tgt; 2557 2558 lun_string = (uchar_t *)&fscsi->scsi_lun; 2559 2560 fcmd->fcp_ent_addr.ent_addr_0 = 2561 BE_16(*(uint16_t *)&(lun_string[0])); 2562 fcmd->fcp_ent_addr.ent_addr_1 = 2563 BE_16(*(uint16_t *)&(lun_string[2])); 2564 fcmd->fcp_ent_addr.ent_addr_2 = 2565 BE_16(*(uint16_t *)&(lun_string[4])); 2566 fcmd->fcp_ent_addr.ent_addr_3 = 2567 BE_16(*(uint16_t *)&(lun_string[6])); 2568 2569 /* 2570 * Setup internal packet(icmd) 2571 */ 2572 icmd->ipkt_lun = plun; 2573 icmd->ipkt_restart = 0; 2574 icmd->ipkt_retries = 0; 2575 icmd->ipkt_opcode = 0; 2576 2577 /* 2578 * Init the frame HEADER Pointer for use 2579 */ 2580 hp = &fpkt->pkt_cmd_fhdr; 2581 2582 hp->s_id = pptr->port_id; 2583 hp->d_id = ptgt->tgt_d_id; 2584 hp->r_ctl = R_CTL_COMMAND; 2585 hp->type = FC_TYPE_SCSI_FCP; 2586 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ; 2587 hp->rsvd = 0; 2588 hp->seq_id = 0; 2589 hp->seq_cnt = 0; 2590 hp->ox_id = 0xffff; 2591 hp->rx_id = 0xffff; 2592 hp->ro = 0; 2593 2594 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE; 2595 fcmd->fcp_cntl.cntl_read_data = 1; /* only rd for now */ 2596 fcmd->fcp_cntl.cntl_write_data = 0; 2597 fcmd->fcp_data_len = fscsi->scsi_buflen; 2598 2599 scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb; 2600 bcopy((char *)fscsi->scsi_cdbbufaddr, (char *)scsi_cdb, 2601 fscsi->scsi_cdblen); 2602 2603 if (!nodma) { 2604 FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd, 2605 fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd)); 2606 } 2607 2608 /* 2609 * Send SCSI command to FC transport 2610 */ 2611 2612 if (ret == 0) { 2613 mutex_enter(&ptgt->tgt_mutex); 2614 2615 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) { 2616 mutex_exit(&ptgt->tgt_mutex); 2617 fscsi->scsi_fc_status = xport_retval = 2618 fc_ulp_transport(pptr->port_fp_handle, 2619 fpkt); 2620 if (fscsi->scsi_fc_status != FC_SUCCESS) { 2621 ret = EIO; 2622 } 2623 } else { 2624 mutex_exit(&ptgt->tgt_mutex); 2625 ret = EBUSY; 2626 } 2627 } 2628 } 2629 2630 /* 2631 * Wait for completion only if fc_ulp_transport was called and it 2632 * returned a success. This is the only time callback will happen. 2633 * Otherwise, there is no point in waiting 2634 */ 2635 if ((ret == 0) && (xport_retval == FC_SUCCESS)) { 2636 ret = fcp_ipkt_sema_wait(icmd); 2637 } 2638 2639 /* 2640 * Copy data to IOCTL data structures 2641 */ 2642 rsp = NULL; 2643 if ((ret == 0) && (xport_retval == FC_SUCCESS)) { 2644 rsp = (struct fcp_rsp *)fpkt->pkt_resp; 2645 2646 if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) { 2647 fcp_log(CE_WARN, pptr->port_dip, 2648 "!SCSI command to d_id=0x%x lun=0x%x" 2649 " failed, Bad FCP response values:" 2650 " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x," 2651 " sts-rsvd2=%x, rsplen=%x, senselen=%x", 2652 ptgt->tgt_d_id, plun->lun_num, 2653 rsp->reserved_0, rsp->reserved_1, 2654 rsp->fcp_u.fcp_status.reserved_0, 2655 rsp->fcp_u.fcp_status.reserved_1, 2656 rsp->fcp_response_len, rsp->fcp_sense_len); 2657 2658 ret = EIO; 2659 } 2660 } 2661 2662 if ((ret == 0) && (rsp != NULL)) { 2663 /* 2664 * Calc response lengths 2665 */ 2666 sense_len = 0; 2667 info_len = 0; 2668 2669 if (rsp->fcp_u.fcp_status.rsp_len_set) { 2670 info_len = rsp->fcp_response_len; 2671 } 2672 2673 rsp_info = (struct fcp_rsp_info *) 2674 ((uint8_t *)rsp + sizeof (struct fcp_rsp)); 2675 2676 /* 2677 * Get SCSI status 2678 */ 2679 fscsi->scsi_bufstatus = rsp->fcp_u.fcp_status.scsi_status; 2680 /* 2681 * If a lun was just added or removed and the next command 2682 * comes through this interface, we need to capture the check 2683 * condition so we can discover the new topology. 2684 */ 2685 if (fscsi->scsi_bufstatus != STATUS_GOOD && 2686 rsp->fcp_u.fcp_status.sense_len_set) { 2687 sense_len = rsp->fcp_sense_len; 2688 rsp_sense = (caddr_t)((uint8_t *)rsp_info + info_len); 2689 sense_to = (struct scsi_extended_sense *)rsp_sense; 2690 if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) || 2691 (FCP_SENSE_NO_LUN(sense_to))) { 2692 reconfig_lun = TRUE; 2693 } 2694 } 2695 2696 if (fscsi->scsi_bufstatus == STATUS_GOOD && (ptgt != NULL) && 2697 (reconfig_lun || (scsi_cdb->scc_cmd == SCMD_REPORT_LUN))) { 2698 if (reconfig_lun == FALSE) { 2699 reconfig_status = 2700 fcp_is_reconfig_needed(ptgt, fpkt); 2701 } 2702 2703 if ((reconfig_lun == TRUE) || 2704 (reconfig_status == TRUE)) { 2705 mutex_enter(&ptgt->tgt_mutex); 2706 if (ptgt->tgt_tid == NULL) { 2707 /* 2708 * Either we've been notified the 2709 * REPORT_LUN data has changed, or 2710 * we've determined on our own that 2711 * we're out of date. Kick off 2712 * rediscovery. 2713 */ 2714 tid = timeout(fcp_reconfigure_luns, 2715 (caddr_t)ptgt, drv_usectohz(1)); 2716 2717 ptgt->tgt_tid = tid; 2718 ptgt->tgt_state |= FCP_TGT_BUSY; 2719 ret = EBUSY; 2720 reconfig_pending = TRUE; 2721 } 2722 mutex_exit(&ptgt->tgt_mutex); 2723 } 2724 } 2725 2726 /* 2727 * Calc residuals and buffer lengths 2728 */ 2729 2730 if (ret == 0) { 2731 buf_len = fscsi->scsi_buflen; 2732 fscsi->scsi_bufresid = 0; 2733 if (rsp->fcp_u.fcp_status.resid_under) { 2734 if (rsp->fcp_resid <= fscsi->scsi_buflen) { 2735 fscsi->scsi_bufresid = rsp->fcp_resid; 2736 } else { 2737 cmn_err(CE_WARN, "fcp: bad residue %x " 2738 "for txfer len %x", rsp->fcp_resid, 2739 fscsi->scsi_buflen); 2740 fscsi->scsi_bufresid = 2741 fscsi->scsi_buflen; 2742 } 2743 buf_len -= fscsi->scsi_bufresid; 2744 } 2745 if (rsp->fcp_u.fcp_status.resid_over) { 2746 fscsi->scsi_bufresid = -rsp->fcp_resid; 2747 } 2748 2749 fscsi->scsi_rqresid = fscsi->scsi_rqlen - sense_len; 2750 if (fscsi->scsi_rqlen < sense_len) { 2751 sense_len = fscsi->scsi_rqlen; 2752 } 2753 2754 fscsi->scsi_fc_rspcode = 0; 2755 if (rsp->fcp_u.fcp_status.rsp_len_set) { 2756 fscsi->scsi_fc_rspcode = rsp_info->rsp_code; 2757 } 2758 fscsi->scsi_pkt_state = fpkt->pkt_state; 2759 fscsi->scsi_pkt_action = fpkt->pkt_action; 2760 fscsi->scsi_pkt_reason = fpkt->pkt_reason; 2761 2762 /* 2763 * Copy data and request sense 2764 * 2765 * Data must be copied by using the FCP_CP_IN macro. 2766 * This will ensure the proper byte order since the data 2767 * is being copied directly from the memory mapped 2768 * device register. 2769 * 2770 * The response (and request sense) will be in the 2771 * correct byte order. No special copy is necessary. 2772 */ 2773 2774 if (buf_len) { 2775 FCP_CP_IN(fpkt->pkt_data, 2776 fscsi->scsi_bufaddr, 2777 fpkt->pkt_data_acc, 2778 buf_len); 2779 } 2780 bcopy((void *)rsp_sense, 2781 (void *)fscsi->scsi_rqbufaddr, 2782 sense_len); 2783 } 2784 } 2785 2786 /* 2787 * Cleanup transport data structures if icmd was alloc-ed 2788 * So, cleanup happens in the same thread that icmd was alloc-ed 2789 */ 2790 if (icmd != NULL) { 2791 fcp_ipkt_sema_cleanup(icmd); 2792 } 2793 2794 /* restore pm busy/idle status */ 2795 if (port_busy) { 2796 fc_ulp_idle_port(pptr->port_fp_handle); 2797 } 2798 2799 /* 2800 * Cleanup target. if a reconfig is pending, don't clear the BUSY 2801 * flag, it'll be cleared when the reconfig is complete. 2802 */ 2803 if ((ptgt != NULL) && !reconfig_pending) { 2804 /* 2805 * If target was created, 2806 */ 2807 if (target_created) { 2808 mutex_enter(&ptgt->tgt_mutex); 2809 ptgt->tgt_state &= ~FCP_TGT_BUSY; 2810 mutex_exit(&ptgt->tgt_mutex); 2811 } else { 2812 /* 2813 * De-mark target as busy 2814 */ 2815 mutex_enter(&ptgt->tgt_mutex); 2816 ptgt->tgt_state &= ~FCP_TGT_BUSY; 2817 mutex_exit(&ptgt->tgt_mutex); 2818 } 2819 } 2820 return (ret); 2821 } 2822 2823 2824 static int 2825 fcp_is_reconfig_needed(struct fcp_tgt *ptgt, 2826 fc_packet_t *fpkt) 2827 { 2828 uchar_t *lun_string; 2829 uint16_t lun_num, i; 2830 int num_luns; 2831 int actual_luns; 2832 int num_masked_luns; 2833 int lun_buflen; 2834 struct fcp_lun *plun = NULL; 2835 struct fcp_reportlun_resp *report_lun; 2836 uint8_t reconfig_needed = FALSE; 2837 uint8_t lun_exists = FALSE; 2838 2839 report_lun = kmem_zalloc(fpkt->pkt_datalen, KM_SLEEP); 2840 2841 FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc, 2842 fpkt->pkt_datalen); 2843 2844 /* get number of luns (which is supplied as LUNS * 8) */ 2845 num_luns = BE_32(report_lun->num_lun) >> 3; 2846 2847 /* 2848 * Figure out exactly how many lun strings our response buffer 2849 * can hold. 2850 */ 2851 lun_buflen = (fpkt->pkt_datalen - 2852 2 * sizeof (uint32_t)) / sizeof (longlong_t); 2853 2854 /* 2855 * Is our response buffer full or not? We don't want to 2856 * potentially walk beyond the number of luns we have. 2857 */ 2858 if (num_luns <= lun_buflen) { 2859 actual_luns = num_luns; 2860 } else { 2861 actual_luns = lun_buflen; 2862 } 2863 2864 mutex_enter(&ptgt->tgt_mutex); 2865 2866 /* Scan each lun to see if we have masked it. */ 2867 num_masked_luns = 0; 2868 if (fcp_lun_blacklist != NULL) { 2869 for (i = 0; i < actual_luns; i++) { 2870 lun_string = (uchar_t *)&(report_lun->lun_string[i]); 2871 switch (lun_string[0] & 0xC0) { 2872 case FCP_LUN_ADDRESSING: 2873 case FCP_PD_ADDRESSING: 2874 lun_num = ((lun_string[0] & 0x3F) << 8) 2875 | lun_string[1]; 2876 if (fcp_should_mask(&ptgt->tgt_port_wwn, 2877 lun_num) == TRUE) { 2878 num_masked_luns++; 2879 } 2880 break; 2881 default: 2882 break; 2883 } 2884 } 2885 } 2886 2887 /* 2888 * The quick and easy check. If the number of LUNs reported 2889 * doesn't match the number we currently know about, we need 2890 * to reconfigure. 2891 */ 2892 if (num_luns && num_luns != (ptgt->tgt_lun_cnt + num_masked_luns)) { 2893 mutex_exit(&ptgt->tgt_mutex); 2894 kmem_free(report_lun, fpkt->pkt_datalen); 2895 return (TRUE); 2896 } 2897 2898 /* 2899 * If the quick and easy check doesn't turn up anything, we walk 2900 * the list of luns from the REPORT_LUN response and look for 2901 * any luns we don't know about. If we find one, we know we need 2902 * to reconfigure. We will skip LUNs that are masked because of the 2903 * blacklist. 2904 */ 2905 for (i = 0; i < actual_luns; i++) { 2906 lun_string = (uchar_t *)&(report_lun->lun_string[i]); 2907 lun_exists = FALSE; 2908 switch (lun_string[0] & 0xC0) { 2909 case FCP_LUN_ADDRESSING: 2910 case FCP_PD_ADDRESSING: 2911 lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1]; 2912 2913 if ((fcp_lun_blacklist != NULL) && (fcp_should_mask( 2914 &ptgt->tgt_port_wwn, lun_num) == TRUE)) { 2915 lun_exists = TRUE; 2916 break; 2917 } 2918 2919 for (plun = ptgt->tgt_lun; plun; 2920 plun = plun->lun_next) { 2921 if (plun->lun_num == lun_num) { 2922 lun_exists = TRUE; 2923 break; 2924 } 2925 } 2926 break; 2927 default: 2928 break; 2929 } 2930 2931 if (lun_exists == FALSE) { 2932 reconfig_needed = TRUE; 2933 break; 2934 } 2935 } 2936 2937 mutex_exit(&ptgt->tgt_mutex); 2938 kmem_free(report_lun, fpkt->pkt_datalen); 2939 2940 return (reconfig_needed); 2941 } 2942 2943 /* 2944 * This function is called by fcp_handle_page83 and uses inquiry response data 2945 * stored in plun->lun_inq to determine whether or not a device is a member of 2946 * the table fcp_symmetric_disk_table_size. We return 0 if it is in the table, 2947 * otherwise 1. 2948 */ 2949 static int 2950 fcp_symmetric_device_probe(struct fcp_lun *plun) 2951 { 2952 struct scsi_inquiry *stdinq = &plun->lun_inq; 2953 char *devidptr; 2954 int i, len; 2955 2956 for (i = 0; i < fcp_symmetric_disk_table_size; i++) { 2957 devidptr = fcp_symmetric_disk_table[i]; 2958 len = (int)strlen(devidptr); 2959 2960 if (bcmp(stdinq->inq_vid, devidptr, len) == 0) { 2961 return (0); 2962 } 2963 } 2964 return (1); 2965 } 2966 2967 2968 /* 2969 * This function is called by fcp_ioctl for the FCP_STATE_COUNT ioctl 2970 * It basically returns the current count of # of state change callbacks 2971 * i.e the value of tgt_change_cnt. 2972 * 2973 * INPUT: 2974 * fcp_ioctl.fp_minor -> The minor # of the fp port 2975 * fcp_ioctl.listlen -> 1 2976 * fcp_ioctl.list -> Pointer to a 32 bit integer 2977 */ 2978 /*ARGSUSED2*/ 2979 static int 2980 fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval) 2981 { 2982 int ret; 2983 uint32_t link_cnt; 2984 struct fcp_ioctl fioctl; 2985 struct fcp_port *pptr = NULL; 2986 2987 if ((ret = fcp_copyin_fcp_ioctl_data(data, mode, rval, &fioctl, 2988 &pptr)) != 0) { 2989 return (ret); 2990 } 2991 2992 ASSERT(pptr != NULL); 2993 2994 if (fioctl.listlen != 1) { 2995 return (EINVAL); 2996 } 2997 2998 mutex_enter(&pptr->port_mutex); 2999 if (pptr->port_state & FCP_STATE_OFFLINE) { 3000 mutex_exit(&pptr->port_mutex); 3001 return (ENXIO); 3002 } 3003 3004 /* 3005 * FCP_STATE_INIT is set in 2 cases (not sure why it is overloaded): 3006 * When the fcp initially attaches to the port and there are nothing 3007 * hanging out of the port or if there was a repeat offline state change 3008 * callback (refer fcp_statec_callback() FC_STATE_OFFLINE case). 3009 * In the latter case, port_tmp_cnt will be non-zero and that is how we 3010 * will differentiate the 2 cases. 3011 */ 3012 if ((pptr->port_state & FCP_STATE_INIT) && pptr->port_tmp_cnt) { 3013 mutex_exit(&pptr->port_mutex); 3014 return (ENXIO); 3015 } 3016 3017 link_cnt = pptr->port_link_cnt; 3018 mutex_exit(&pptr->port_mutex); 3019 3020 if (ddi_copyout(&link_cnt, fioctl.list, (sizeof (uint32_t)), mode)) { 3021 return (EFAULT); 3022 } 3023 3024 #ifdef _MULTI_DATAMODEL 3025 switch (ddi_model_convert_from(mode & FMODELS)) { 3026 case DDI_MODEL_ILP32: { 3027 struct fcp32_ioctl f32_ioctl; 3028 3029 f32_ioctl.fp_minor = fioctl.fp_minor; 3030 f32_ioctl.listlen = fioctl.listlen; 3031 f32_ioctl.list = (caddr32_t)(long)fioctl.list; 3032 if (ddi_copyout((void *)&f32_ioctl, (void *)data, 3033 sizeof (struct fcp32_ioctl), mode)) { 3034 return (EFAULT); 3035 } 3036 break; 3037 } 3038 case DDI_MODEL_NONE: 3039 if (ddi_copyout((void *)&fioctl, (void *)data, 3040 sizeof (struct fcp_ioctl), mode)) { 3041 return (EFAULT); 3042 } 3043 break; 3044 } 3045 #else /* _MULTI_DATAMODEL */ 3046 3047 if (ddi_copyout((void *)&fioctl, (void *)data, 3048 sizeof (struct fcp_ioctl), mode)) { 3049 return (EFAULT); 3050 } 3051 #endif /* _MULTI_DATAMODEL */ 3052 3053 return (0); 3054 } 3055 3056 /* 3057 * This function copies the fcp_ioctl structure passed in from user land 3058 * into kernel land. Handles 32 bit applications. 3059 */ 3060 /*ARGSUSED*/ 3061 static int 3062 fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *data, int mode, int *rval, 3063 struct fcp_ioctl *fioctl, struct fcp_port **pptr) 3064 { 3065 struct fcp_port *t_pptr; 3066 3067 #ifdef _MULTI_DATAMODEL 3068 switch (ddi_model_convert_from(mode & FMODELS)) { 3069 case DDI_MODEL_ILP32: { 3070 struct fcp32_ioctl f32_ioctl; 3071 3072 if (ddi_copyin((void *)data, (void *)&f32_ioctl, 3073 sizeof (struct fcp32_ioctl), mode)) { 3074 return (EFAULT); 3075 } 3076 fioctl->fp_minor = f32_ioctl.fp_minor; 3077 fioctl->listlen = f32_ioctl.listlen; 3078 fioctl->list = (caddr_t)(long)f32_ioctl.list; 3079 break; 3080 } 3081 case DDI_MODEL_NONE: 3082 if (ddi_copyin((void *)data, (void *)fioctl, 3083 sizeof (struct fcp_ioctl), mode)) { 3084 return (EFAULT); 3085 } 3086 break; 3087 } 3088 3089 #else /* _MULTI_DATAMODEL */ 3090 if (ddi_copyin((void *)data, (void *)fioctl, 3091 sizeof (struct fcp_ioctl), mode)) { 3092 return (EFAULT); 3093 } 3094 #endif /* _MULTI_DATAMODEL */ 3095 3096 /* 3097 * Right now we can assume that the minor number matches with 3098 * this instance of fp. If this changes we will need to 3099 * revisit this logic. 3100 */ 3101 mutex_enter(&fcp_global_mutex); 3102 t_pptr = fcp_port_head; 3103 while (t_pptr) { 3104 if (t_pptr->port_instance == (uint32_t)fioctl->fp_minor) { 3105 break; 3106 } else { 3107 t_pptr = t_pptr->port_next; 3108 } 3109 } 3110 *pptr = t_pptr; 3111 mutex_exit(&fcp_global_mutex); 3112 if (t_pptr == NULL) { 3113 return (ENXIO); 3114 } 3115 3116 return (0); 3117 } 3118 3119 /* 3120 * Function: fcp_port_create_tgt 3121 * 3122 * Description: As the name suggest this function creates the target context 3123 * specified by the the WWN provided by the caller. If the 3124 * creation goes well and the target is known by fp/fctl a PLOGI 3125 * followed by a PRLI are issued. 3126 * 3127 * Argument: pptr fcp port structure 3128 * pwwn WWN of the target 3129 * ret_val Address of the return code. It could be: 3130 * EIO, ENOMEM or 0. 3131 * fc_status PLOGI or PRLI status completion 3132 * fc_pkt_state PLOGI or PRLI state completion 3133 * fc_pkt_reason PLOGI or PRLI reason completion 3134 * fc_pkt_action PLOGI or PRLI action completion 3135 * 3136 * Return Value: NULL if it failed 3137 * Target structure address if it succeeds 3138 */ 3139 static struct fcp_tgt * 3140 fcp_port_create_tgt(struct fcp_port *pptr, la_wwn_t *pwwn, int *ret_val, 3141 int *fc_status, int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action) 3142 { 3143 struct fcp_tgt *ptgt = NULL; 3144 fc_portmap_t devlist; 3145 int lcount; 3146 int error; 3147 3148 *ret_val = 0; 3149 3150 /* 3151 * Check FC port device & get port map 3152 */ 3153 if (fc_ulp_get_remote_port(pptr->port_fp_handle, pwwn, 3154 &error, 1) == NULL) { 3155 *ret_val = EIO; 3156 } else { 3157 if (fc_ulp_pwwn_to_portmap(pptr->port_fp_handle, pwwn, 3158 &devlist) != FC_SUCCESS) { 3159 *ret_val = EIO; 3160 } 3161 } 3162 3163 /* Set port map flags */ 3164 devlist.map_type = PORT_DEVICE_USER_CREATE; 3165 3166 /* Allocate target */ 3167 if (*ret_val == 0) { 3168 lcount = pptr->port_link_cnt; 3169 ptgt = fcp_alloc_tgt(pptr, &devlist, lcount); 3170 if (ptgt == NULL) { 3171 fcp_log(CE_WARN, pptr->port_dip, 3172 "!FC target allocation failed"); 3173 *ret_val = ENOMEM; 3174 } else { 3175 /* Setup target */ 3176 mutex_enter(&ptgt->tgt_mutex); 3177 3178 ptgt->tgt_statec_cause = FCP_CAUSE_TGT_CHANGE; 3179 ptgt->tgt_tmp_cnt = 1; 3180 ptgt->tgt_d_id = devlist.map_did.port_id; 3181 ptgt->tgt_hard_addr = 3182 devlist.map_hard_addr.hard_addr; 3183 ptgt->tgt_pd_handle = devlist.map_pd; 3184 ptgt->tgt_fca_dev = NULL; 3185 3186 bcopy(&devlist.map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0], 3187 FC_WWN_SIZE); 3188 bcopy(&devlist.map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0], 3189 FC_WWN_SIZE); 3190 3191 mutex_exit(&ptgt->tgt_mutex); 3192 } 3193 } 3194 3195 /* Release global mutex for PLOGI and PRLI */ 3196 mutex_exit(&fcp_global_mutex); 3197 3198 /* Send PLOGI (If necessary) */ 3199 if (*ret_val == 0) { 3200 *ret_val = fcp_tgt_send_plogi(ptgt, fc_status, 3201 fc_pkt_state, fc_pkt_reason, fc_pkt_action); 3202 } 3203 3204 /* Send PRLI (If necessary) */ 3205 if (*ret_val == 0) { 3206 *ret_val = fcp_tgt_send_prli(ptgt, fc_status, 3207 fc_pkt_state, fc_pkt_reason, fc_pkt_action); 3208 } 3209 3210 mutex_enter(&fcp_global_mutex); 3211 3212 return (ptgt); 3213 } 3214 3215 /* 3216 * Function: fcp_tgt_send_plogi 3217 * 3218 * Description: This function sends a PLOGI to the target specified by the 3219 * caller and waits till it completes. 3220 * 3221 * Argument: ptgt Target to send the plogi to. 3222 * fc_status Status returned by fp/fctl in the PLOGI request. 3223 * fc_pkt_state State returned by fp/fctl in the PLOGI request. 3224 * fc_pkt_reason Reason returned by fp/fctl in the PLOGI request. 3225 * fc_pkt_action Action returned by fp/fctl in the PLOGI request. 3226 * 3227 * Return Value: 0 3228 * ENOMEM 3229 * EIO 3230 * 3231 * Context: User context. 3232 */ 3233 static int 3234 fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state, 3235 int *fc_pkt_reason, int *fc_pkt_action) 3236 { 3237 struct fcp_port *pptr; 3238 struct fcp_ipkt *icmd; 3239 struct fc_packet *fpkt; 3240 fc_frame_hdr_t *hp; 3241 struct la_els_logi logi; 3242 int tcount; 3243 int lcount; 3244 int ret, login_retval = ~FC_SUCCESS; 3245 3246 ret = 0; 3247 3248 pptr = ptgt->tgt_port; 3249 3250 lcount = pptr->port_link_cnt; 3251 tcount = ptgt->tgt_change_cnt; 3252 3253 /* Alloc internal packet */ 3254 icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_logi_t), 3255 sizeof (la_els_logi_t), 0, 0, lcount, tcount, 0, 3256 FC_INVALID_RSCN_COUNT); 3257 3258 if (icmd == NULL) { 3259 ret = ENOMEM; 3260 } else { 3261 /* 3262 * Setup internal packet as sema sync 3263 */ 3264 fcp_ipkt_sema_init(icmd); 3265 3266 /* 3267 * Setup internal packet (icmd) 3268 */ 3269 icmd->ipkt_lun = NULL; 3270 icmd->ipkt_restart = 0; 3271 icmd->ipkt_retries = 0; 3272 icmd->ipkt_opcode = LA_ELS_PLOGI; 3273 3274 /* 3275 * Setup fc_packet 3276 */ 3277 fpkt = icmd->ipkt_fpkt; 3278 3279 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR; 3280 fpkt->pkt_tran_type = FC_PKT_EXCHANGE; 3281 fpkt->pkt_timeout = FCP_ELS_TIMEOUT; 3282 3283 /* 3284 * Setup FC frame header 3285 */ 3286 hp = &fpkt->pkt_cmd_fhdr; 3287 3288 hp->s_id = pptr->port_id; /* source ID */ 3289 hp->d_id = ptgt->tgt_d_id; /* dest ID */ 3290 hp->r_ctl = R_CTL_ELS_REQ; 3291 hp->type = FC_TYPE_EXTENDED_LS; 3292 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ; 3293 hp->seq_id = 0; 3294 hp->rsvd = 0; 3295 hp->df_ctl = 0; 3296 hp->seq_cnt = 0; 3297 hp->ox_id = 0xffff; /* i.e. none */ 3298 hp->rx_id = 0xffff; /* i.e. none */ 3299 hp->ro = 0; 3300 3301 /* 3302 * Setup PLOGI 3303 */ 3304 bzero(&logi, sizeof (struct la_els_logi)); 3305 logi.ls_code.ls_code = LA_ELS_PLOGI; 3306 3307 FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd, 3308 fpkt->pkt_cmd_acc, sizeof (struct la_els_logi)); 3309 3310 /* 3311 * Send PLOGI 3312 */ 3313 *fc_status = login_retval = 3314 fc_ulp_login(pptr->port_fp_handle, &fpkt, 1); 3315 if (*fc_status != FC_SUCCESS) { 3316 ret = EIO; 3317 } 3318 } 3319 3320 /* 3321 * Wait for completion 3322 */ 3323 if ((ret == 0) && (login_retval == FC_SUCCESS)) { 3324 ret = fcp_ipkt_sema_wait(icmd); 3325 3326 *fc_pkt_state = fpkt->pkt_state; 3327 *fc_pkt_reason = fpkt->pkt_reason; 3328 *fc_pkt_action = fpkt->pkt_action; 3329 } 3330 3331 /* 3332 * Cleanup transport data structures if icmd was alloc-ed AND if there 3333 * is going to be no callback (i.e if fc_ulp_login() failed). 3334 * Otherwise, cleanup happens in callback routine. 3335 */ 3336 if (icmd != NULL) { 3337 fcp_ipkt_sema_cleanup(icmd); 3338 } 3339 3340 return (ret); 3341 } 3342 3343 /* 3344 * Function: fcp_tgt_send_prli 3345 * 3346 * Description: Does nothing as of today. 3347 * 3348 * Argument: ptgt Target to send the prli to. 3349 * fc_status Status returned by fp/fctl in the PRLI request. 3350 * fc_pkt_state State returned by fp/fctl in the PRLI request. 3351 * fc_pkt_reason Reason returned by fp/fctl in the PRLI request. 3352 * fc_pkt_action Action returned by fp/fctl in the PRLI request. 3353 * 3354 * Return Value: 0 3355 */ 3356 /*ARGSUSED*/ 3357 static int 3358 fcp_tgt_send_prli(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state, 3359 int *fc_pkt_reason, int *fc_pkt_action) 3360 { 3361 return (0); 3362 } 3363 3364 /* 3365 * Function: fcp_ipkt_sema_init 3366 * 3367 * Description: Initializes the semaphore contained in the internal packet. 3368 * 3369 * Argument: icmd Internal packet the semaphore of which must be 3370 * initialized. 3371 * 3372 * Return Value: None 3373 * 3374 * Context: User context only. 3375 */ 3376 static void 3377 fcp_ipkt_sema_init(struct fcp_ipkt *icmd) 3378 { 3379 struct fc_packet *fpkt; 3380 3381 fpkt = icmd->ipkt_fpkt; 3382 3383 /* Create semaphore for sync */ 3384 sema_init(&(icmd->ipkt_sema), 0, NULL, SEMA_DRIVER, NULL); 3385 3386 /* Setup the completion callback */ 3387 fpkt->pkt_comp = fcp_ipkt_sema_callback; 3388 } 3389 3390 /* 3391 * Function: fcp_ipkt_sema_wait 3392 * 3393 * Description: Wait on the semaphore embedded in the internal packet. The 3394 * semaphore is released in the callback. 3395 * 3396 * Argument: icmd Internal packet to wait on for completion. 3397 * 3398 * Return Value: 0 3399 * EIO 3400 * EBUSY 3401 * EAGAIN 3402 * 3403 * Context: User context only. 3404 * 3405 * This function does a conversion between the field pkt_state of the fc_packet 3406 * embedded in the internal packet (icmd) and the code it returns. 3407 */ 3408 static int 3409 fcp_ipkt_sema_wait(struct fcp_ipkt *icmd) 3410 { 3411 struct fc_packet *fpkt; 3412 int ret; 3413 3414 ret = EIO; 3415 fpkt = icmd->ipkt_fpkt; 3416 3417 /* 3418 * Wait on semaphore 3419 */ 3420 sema_p(&(icmd->ipkt_sema)); 3421 3422 /* 3423 * Check the status of the FC packet 3424 */ 3425 switch (fpkt->pkt_state) { 3426 case FC_PKT_SUCCESS: 3427 ret = 0; 3428 break; 3429 case FC_PKT_LOCAL_RJT: 3430 switch (fpkt->pkt_reason) { 3431 case FC_REASON_SEQ_TIMEOUT: 3432 case FC_REASON_RX_BUF_TIMEOUT: 3433 ret = EAGAIN; 3434 break; 3435 case FC_REASON_PKT_BUSY: 3436 ret = EBUSY; 3437 break; 3438 } 3439 break; 3440 case FC_PKT_TIMEOUT: 3441 ret = EAGAIN; 3442 break; 3443 case FC_PKT_LOCAL_BSY: 3444 case FC_PKT_TRAN_BSY: 3445 case FC_PKT_NPORT_BSY: 3446 case FC_PKT_FABRIC_BSY: 3447 ret = EBUSY; 3448 break; 3449 case FC_PKT_LS_RJT: 3450 case FC_PKT_BA_RJT: 3451 switch (fpkt->pkt_reason) { 3452 case FC_REASON_LOGICAL_BSY: 3453 ret = EBUSY; 3454 break; 3455 } 3456 break; 3457 case FC_PKT_FS_RJT: 3458 switch (fpkt->pkt_reason) { 3459 case FC_REASON_FS_LOGICAL_BUSY: 3460 ret = EBUSY; 3461 break; 3462 } 3463 break; 3464 } 3465 3466 return (ret); 3467 } 3468 3469 /* 3470 * Function: fcp_ipkt_sema_callback 3471 * 3472 * Description: Registered as the completion callback function for the FC 3473 * transport when the ipkt semaphore is used for sync. This will 3474 * cleanup the used data structures, if necessary and wake up 3475 * the user thread to complete the transaction. 3476 * 3477 * Argument: fpkt FC packet (points to the icmd) 3478 * 3479 * Return Value: None 3480 * 3481 * Context: User context only 3482 */ 3483 static void 3484 fcp_ipkt_sema_callback(struct fc_packet *fpkt) 3485 { 3486 struct fcp_ipkt *icmd; 3487 3488 icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private; 3489 3490 /* 3491 * Wake up user thread 3492 */ 3493 sema_v(&(icmd->ipkt_sema)); 3494 } 3495 3496 /* 3497 * Function: fcp_ipkt_sema_cleanup 3498 * 3499 * Description: Called to cleanup (if necessary) the data structures used 3500 * when ipkt sema is used for sync. This function will detect 3501 * whether the caller is the last thread (via counter) and 3502 * cleanup only if necessary. 3503 * 3504 * Argument: icmd Internal command packet 3505 * 3506 * Return Value: None 3507 * 3508 * Context: User context only 3509 */ 3510 static void 3511 fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd) 3512 { 3513 struct fcp_tgt *ptgt; 3514 struct fcp_port *pptr; 3515 3516 ptgt = icmd->ipkt_tgt; 3517 pptr = icmd->ipkt_port; 3518 3519 /* 3520 * Acquire data structure 3521 */ 3522 mutex_enter(&ptgt->tgt_mutex); 3523 3524 /* 3525 * Destroy semaphore 3526 */ 3527 sema_destroy(&(icmd->ipkt_sema)); 3528 3529 /* 3530 * Cleanup internal packet 3531 */ 3532 mutex_exit(&ptgt->tgt_mutex); 3533 fcp_icmd_free(pptr, icmd); 3534 } 3535 3536 /* 3537 * Function: fcp_port_attach 3538 * 3539 * Description: Called by the transport framework to resume, suspend or 3540 * attach a new port. 3541 * 3542 * Argument: ulph Port handle 3543 * *pinfo Port information 3544 * cmd Command 3545 * s_id Port ID 3546 * 3547 * Return Value: FC_FAILURE or FC_SUCCESS 3548 */ 3549 /*ARGSUSED*/ 3550 static int 3551 fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo, 3552 fc_attach_cmd_t cmd, uint32_t s_id) 3553 { 3554 int instance; 3555 int res = FC_FAILURE; /* default result */ 3556 3557 ASSERT(pinfo != NULL); 3558 3559 instance = ddi_get_instance(pinfo->port_dip); 3560 3561 switch (cmd) { 3562 case FC_CMD_ATTACH: 3563 /* 3564 * this port instance attaching for the first time (or after 3565 * being detached before) 3566 */ 3567 if (fcp_handle_port_attach(ulph, pinfo, s_id, 3568 instance) == DDI_SUCCESS) { 3569 res = FC_SUCCESS; 3570 } else { 3571 ASSERT(ddi_get_soft_state(fcp_softstate, 3572 instance) == NULL); 3573 } 3574 break; 3575 3576 case FC_CMD_RESUME: 3577 case FC_CMD_POWER_UP: 3578 /* 3579 * this port instance was attached and the suspended and 3580 * will now be resumed 3581 */ 3582 if (fcp_handle_port_resume(ulph, pinfo, s_id, cmd, 3583 instance) == DDI_SUCCESS) { 3584 res = FC_SUCCESS; 3585 } 3586 break; 3587 3588 default: 3589 /* shouldn't happen */ 3590 FCP_TRACE(fcp_logq, "fcp", 3591 fcp_trace, FCP_BUF_LEVEL_2, 0, 3592 "port_attach: unknown cmdcommand: %d", cmd); 3593 break; 3594 } 3595 3596 /* return result */ 3597 FCP_DTRACE(fcp_logq, "fcp", fcp_trace, 3598 FCP_BUF_LEVEL_1, 0, "fcp_port_attach returning %d", res); 3599 3600 return (res); 3601 } 3602 3603 3604 /* 3605 * detach or suspend this port instance 3606 * 3607 * acquires and releases the global mutex 3608 * 3609 * acquires and releases the mutex for this port 3610 * 3611 * acquires and releases the hotplug mutex for this port 3612 */ 3613 /*ARGSUSED*/ 3614 static int 3615 fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info, 3616 fc_detach_cmd_t cmd) 3617 { 3618 int flag; 3619 int instance; 3620 struct fcp_port *pptr; 3621 3622 instance = ddi_get_instance(info->port_dip); 3623 pptr = ddi_get_soft_state(fcp_softstate, instance); 3624 3625 switch (cmd) { 3626 case FC_CMD_SUSPEND: 3627 FCP_DTRACE(fcp_logq, "fcp", 3628 fcp_trace, FCP_BUF_LEVEL_8, 0, 3629 "port suspend called for port %d", instance); 3630 flag = FCP_STATE_SUSPENDED; 3631 break; 3632 3633 case FC_CMD_POWER_DOWN: 3634 FCP_DTRACE(fcp_logq, "fcp", 3635 fcp_trace, FCP_BUF_LEVEL_8, 0, 3636 "port power down called for port %d", instance); 3637 flag = FCP_STATE_POWER_DOWN; 3638 break; 3639 3640 case FC_CMD_DETACH: 3641 FCP_DTRACE(fcp_logq, "fcp", 3642 fcp_trace, FCP_BUF_LEVEL_8, 0, 3643 "port detach called for port %d", instance); 3644 flag = FCP_STATE_DETACHING; 3645 break; 3646 3647 default: 3648 /* shouldn't happen */ 3649 return (FC_FAILURE); 3650 } 3651 FCP_DTRACE(fcp_logq, "fcp", fcp_trace, 3652 FCP_BUF_LEVEL_1, 0, "fcp_port_detach returning"); 3653 3654 return (fcp_handle_port_detach(pptr, flag, instance)); 3655 } 3656 3657 3658 /* 3659 * called for ioctls on the transport's devctl interface, and the transport 3660 * has passed it to us 3661 * 3662 * this will only be called for device control ioctls (i.e. hotplugging stuff) 3663 * 3664 * return FC_SUCCESS if we decide to claim the ioctl, 3665 * else return FC_UNCLAIMED 3666 * 3667 * *rval is set iff we decide to claim the ioctl 3668 */ 3669 /*ARGSUSED*/ 3670 static int 3671 fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev, int cmd, 3672 intptr_t data, int mode, cred_t *credp, int *rval, uint32_t claimed) 3673 { 3674 int retval = FC_UNCLAIMED; /* return value */ 3675 struct fcp_port *pptr = NULL; /* our soft state */ 3676 struct devctl_iocdata *dcp = NULL; /* for devctl */ 3677 dev_info_t *cdip; 3678 mdi_pathinfo_t *pip = NULL; 3679 char *ndi_nm; /* NDI name */ 3680 char *ndi_addr; /* NDI addr */ 3681 int is_mpxio, circ; 3682 int devi_entered = 0; 3683 time_t end_time; 3684 3685 ASSERT(rval != NULL); 3686 3687 FCP_DTRACE(fcp_logq, "fcp", 3688 fcp_trace, FCP_BUF_LEVEL_8, 0, 3689 "fcp_port_ioctl(cmd=0x%x, claimed=%d)", cmd, claimed); 3690 3691 /* if already claimed then forget it */ 3692 if (claimed) { 3693 /* 3694 * for now, if this ioctl has already been claimed, then 3695 * we just ignore it 3696 */ 3697 return (retval); 3698 } 3699 3700 /* get our port info */ 3701 if ((pptr = fcp_get_port(port_handle)) == NULL) { 3702 fcp_log(CE_WARN, NULL, 3703 "!fcp:Invalid port handle handle in ioctl"); 3704 *rval = ENXIO; 3705 return (retval); 3706 } 3707 is_mpxio = pptr->port_mpxio; 3708 3709 switch (cmd) { 3710 case DEVCTL_BUS_GETSTATE: 3711 case DEVCTL_BUS_QUIESCE: 3712 case DEVCTL_BUS_UNQUIESCE: 3713 case DEVCTL_BUS_RESET: 3714 case DEVCTL_BUS_RESETALL: 3715 3716 case DEVCTL_BUS_DEV_CREATE: 3717 if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) { 3718 return (retval); 3719 } 3720 break; 3721 3722 case DEVCTL_DEVICE_GETSTATE: 3723 case DEVCTL_DEVICE_OFFLINE: 3724 case DEVCTL_DEVICE_ONLINE: 3725 case DEVCTL_DEVICE_REMOVE: 3726 case DEVCTL_DEVICE_RESET: 3727 if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) { 3728 return (retval); 3729 } 3730 3731 ASSERT(dcp != NULL); 3732 3733 /* ensure we have a name and address */ 3734 if (((ndi_nm = ndi_dc_getname(dcp)) == NULL) || 3735 ((ndi_addr = ndi_dc_getaddr(dcp)) == NULL)) { 3736 FCP_TRACE(fcp_logq, pptr->port_instbuf, 3737 fcp_trace, FCP_BUF_LEVEL_2, 0, 3738 "ioctl: can't get name (%s) or addr (%s)", 3739 ndi_nm ? ndi_nm : "<null ptr>", 3740 ndi_addr ? ndi_addr : "<null ptr>"); 3741 ndi_dc_freehdl(dcp); 3742 return (retval); 3743 } 3744 3745 3746 /* get our child's DIP */ 3747 ASSERT(pptr != NULL); 3748 if (is_mpxio) { 3749 mdi_devi_enter(pptr->port_dip, &circ); 3750 } else { 3751 ndi_devi_enter(pptr->port_dip, &circ); 3752 } 3753 devi_entered = 1; 3754 3755 if ((cdip = ndi_devi_find(pptr->port_dip, ndi_nm, 3756 ndi_addr)) == NULL) { 3757 /* Look for virtually enumerated devices. */ 3758 pip = mdi_pi_find(pptr->port_dip, NULL, ndi_addr); 3759 if (pip == NULL || 3760 ((cdip = mdi_pi_get_client(pip)) == NULL)) { 3761 *rval = ENXIO; 3762 goto out; 3763 } 3764 } 3765 break; 3766 3767 default: 3768 *rval = ENOTTY; 3769 return (retval); 3770 } 3771 3772 /* this ioctl is ours -- process it */ 3773 3774 retval = FC_SUCCESS; /* just means we claim the ioctl */ 3775 3776 /* we assume it will be a success; else we'll set error value */ 3777 *rval = 0; 3778 3779 3780 FCP_DTRACE(fcp_logq, pptr->port_instbuf, 3781 fcp_trace, FCP_BUF_LEVEL_8, 0, 3782 "ioctl: claiming this one"); 3783 3784 /* handle ioctls now */ 3785 switch (cmd) { 3786 case DEVCTL_DEVICE_GETSTATE: 3787 ASSERT(cdip != NULL); 3788 ASSERT(dcp != NULL); 3789 if (ndi_dc_return_dev_state(cdip, dcp) != NDI_SUCCESS) { 3790 *rval = EFAULT; 3791 } 3792 break; 3793 3794 case DEVCTL_DEVICE_REMOVE: 3795 case DEVCTL_DEVICE_OFFLINE: { 3796 int flag = 0; 3797 int lcount; 3798 int tcount; 3799 struct fcp_pkt *head = NULL; 3800 struct fcp_lun *plun; 3801 child_info_t *cip = CIP(cdip); 3802 int all = 1; 3803 struct fcp_lun *tplun; 3804 struct fcp_tgt *ptgt; 3805 3806 ASSERT(pptr != NULL); 3807 ASSERT(cdip != NULL); 3808 3809 mutex_enter(&pptr->port_mutex); 3810 if (pip != NULL) { 3811 cip = CIP(pip); 3812 } 3813 if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) { 3814 mutex_exit(&pptr->port_mutex); 3815 *rval = ENXIO; 3816 break; 3817 } 3818 3819 head = fcp_scan_commands(plun); 3820 if (head != NULL) { 3821 fcp_abort_commands(head, LUN_PORT); 3822 } 3823 lcount = pptr->port_link_cnt; 3824 tcount = plun->lun_tgt->tgt_change_cnt; 3825 mutex_exit(&pptr->port_mutex); 3826 3827 if (cmd == DEVCTL_DEVICE_REMOVE) { 3828 flag = NDI_DEVI_REMOVE; 3829 } 3830 3831 if (is_mpxio) { 3832 mdi_devi_exit(pptr->port_dip, circ); 3833 } else { 3834 ndi_devi_exit(pptr->port_dip, circ); 3835 } 3836 devi_entered = 0; 3837 3838 *rval = fcp_pass_to_hp_and_wait(pptr, plun, cip, 3839 FCP_OFFLINE, lcount, tcount, flag); 3840 3841 if (*rval != NDI_SUCCESS) { 3842 *rval = (*rval == NDI_BUSY) ? EBUSY : EIO; 3843 break; 3844 } 3845 3846 fcp_update_offline_flags(plun); 3847 3848 ptgt = plun->lun_tgt; 3849 mutex_enter(&ptgt->tgt_mutex); 3850 for (tplun = ptgt->tgt_lun; tplun != NULL; tplun = 3851 tplun->lun_next) { 3852 mutex_enter(&tplun->lun_mutex); 3853 if (!(tplun->lun_state & FCP_LUN_OFFLINE)) { 3854 all = 0; 3855 } 3856 mutex_exit(&tplun->lun_mutex); 3857 } 3858 3859 if (all) { 3860 ptgt->tgt_node_state = FCP_TGT_NODE_NONE; 3861 /* 3862 * The user is unconfiguring/offlining the device. 3863 * If fabric and the auto configuration is set 3864 * then make sure the user is the only one who 3865 * can reconfigure the device. 3866 */ 3867 if (FC_TOP_EXTERNAL(pptr->port_topology) && 3868 fcp_enable_auto_configuration) { 3869 ptgt->tgt_manual_config_only = 1; 3870 } 3871 } 3872 mutex_exit(&ptgt->tgt_mutex); 3873 break; 3874 } 3875 3876 case DEVCTL_DEVICE_ONLINE: { 3877 int lcount; 3878 int tcount; 3879 struct fcp_lun *plun; 3880 child_info_t *cip = CIP(cdip); 3881 3882 ASSERT(cdip != NULL); 3883 ASSERT(pptr != NULL); 3884 3885 mutex_enter(&pptr->port_mutex); 3886 if (pip != NULL) { 3887 cip = CIP(pip); 3888 } 3889 if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) { 3890 mutex_exit(&pptr->port_mutex); 3891 *rval = ENXIO; 3892 break; 3893 } 3894 lcount = pptr->port_link_cnt; 3895 tcount = plun->lun_tgt->tgt_change_cnt; 3896 mutex_exit(&pptr->port_mutex); 3897 3898 /* 3899 * The FCP_LUN_ONLINING flag is used in fcp_scsi_start() 3900 * to allow the device attach to occur when the device is 3901 * FCP_LUN_OFFLINE (so we don't reject the INQUIRY command 3902 * from the scsi_probe()). 3903 */ 3904 mutex_enter(&LUN_TGT->tgt_mutex); 3905 plun->lun_state |= FCP_LUN_ONLINING; 3906 mutex_exit(&LUN_TGT->tgt_mutex); 3907 3908 if (is_mpxio) { 3909 mdi_devi_exit(pptr->port_dip, circ); 3910 } else { 3911 ndi_devi_exit(pptr->port_dip, circ); 3912 } 3913 devi_entered = 0; 3914 3915 *rval = fcp_pass_to_hp_and_wait(pptr, plun, cip, 3916 FCP_ONLINE, lcount, tcount, 0); 3917 3918 if (*rval != NDI_SUCCESS) { 3919 /* Reset the FCP_LUN_ONLINING bit */ 3920 mutex_enter(&LUN_TGT->tgt_mutex); 3921 plun->lun_state &= ~FCP_LUN_ONLINING; 3922 mutex_exit(&LUN_TGT->tgt_mutex); 3923 *rval = EIO; 3924 break; 3925 } 3926 mutex_enter(&LUN_TGT->tgt_mutex); 3927 plun->lun_state &= ~(FCP_LUN_OFFLINE | FCP_LUN_BUSY | 3928 FCP_LUN_ONLINING); 3929 mutex_exit(&LUN_TGT->tgt_mutex); 3930 break; 3931 } 3932 3933 case DEVCTL_BUS_DEV_CREATE: { 3934 uchar_t *bytes = NULL; 3935 uint_t nbytes; 3936 struct fcp_tgt *ptgt = NULL; 3937 struct fcp_lun *plun = NULL; 3938 dev_info_t *useless_dip = NULL; 3939 3940 *rval = ndi_dc_devi_create(dcp, pptr->port_dip, 3941 DEVCTL_CONSTRUCT, &useless_dip); 3942 if (*rval != 0 || useless_dip == NULL) { 3943 break; 3944 } 3945 3946 if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, useless_dip, 3947 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes, 3948 &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) { 3949 *rval = EINVAL; 3950 (void) ndi_devi_free(useless_dip); 3951 if (bytes != NULL) { 3952 ddi_prop_free(bytes); 3953 } 3954 break; 3955 } 3956 3957 *rval = fcp_create_on_demand(pptr, bytes); 3958 if (*rval == 0) { 3959 mutex_enter(&pptr->port_mutex); 3960 ptgt = fcp_lookup_target(pptr, (uchar_t *)bytes); 3961 if (ptgt) { 3962 /* 3963 * We now have a pointer to the target that 3964 * was created. Lets point to the first LUN on 3965 * this new target. 3966 */ 3967 mutex_enter(&ptgt->tgt_mutex); 3968 3969 plun = ptgt->tgt_lun; 3970 /* 3971 * There may be stale/offline LUN entries on 3972 * this list (this is by design) and so we have 3973 * to make sure we point to the first online 3974 * LUN 3975 */ 3976 while (plun && 3977 plun->lun_state & FCP_LUN_OFFLINE) { 3978 plun = plun->lun_next; 3979 } 3980 3981 mutex_exit(&ptgt->tgt_mutex); 3982 } 3983 mutex_exit(&pptr->port_mutex); 3984 } 3985 3986 if (*rval == 0 && ptgt && plun) { 3987 mutex_enter(&plun->lun_mutex); 3988 /* 3989 * Allow up to fcp_lun_ready_retry seconds to 3990 * configure all the luns behind the target. 3991 * 3992 * The intent here is to allow targets with long 3993 * reboot/reset-recovery times to become available 3994 * while limiting the maximum wait time for an 3995 * unresponsive target. 3996 */ 3997 end_time = ddi_get_lbolt() + 3998 SEC_TO_TICK(fcp_lun_ready_retry); 3999 4000 while (ddi_get_lbolt() < end_time) { 4001 retval = FC_SUCCESS; 4002 4003 /* 4004 * The new ndi interfaces for on-demand creation 4005 * are inflexible, Do some more work to pass on 4006 * a path name of some LUN (design is broken !) 4007 */ 4008 if (plun->lun_cip) { 4009 if (plun->lun_mpxio == 0) { 4010 cdip = DIP(plun->lun_cip); 4011 } else { 4012 cdip = mdi_pi_get_client( 4013 PIP(plun->lun_cip)); 4014 } 4015 if (cdip == NULL) { 4016 *rval = ENXIO; 4017 break; 4018 } 4019 4020 if (!i_ddi_devi_attached(cdip)) { 4021 mutex_exit(&plun->lun_mutex); 4022 delay(drv_usectohz(1000000)); 4023 mutex_enter(&plun->lun_mutex); 4024 } else { 4025 /* 4026 * This Lun is ready, lets 4027 * check the next one. 4028 */ 4029 mutex_exit(&plun->lun_mutex); 4030 plun = plun->lun_next; 4031 while (plun && (plun->lun_state 4032 & FCP_LUN_OFFLINE)) { 4033 plun = plun->lun_next; 4034 } 4035 if (!plun) { 4036 break; 4037 } 4038 mutex_enter(&plun->lun_mutex); 4039 } 4040 } else { 4041 /* 4042 * lun_cip field for a valid lun 4043 * should never be NULL. Fail the 4044 * command. 4045 */ 4046 *rval = ENXIO; 4047 break; 4048 } 4049 } 4050 if (plun) { 4051 mutex_exit(&plun->lun_mutex); 4052 } else { 4053 char devnm[MAXNAMELEN]; 4054 int nmlen; 4055 4056 nmlen = snprintf(devnm, MAXNAMELEN, "%s@%s", 4057 ddi_node_name(cdip), 4058 ddi_get_name_addr(cdip)); 4059 4060 if (copyout(&devnm, dcp->cpyout_buf, nmlen) != 4061 0) { 4062 *rval = EFAULT; 4063 } 4064 } 4065 } else { 4066 int i; 4067 char buf[25]; 4068 4069 for (i = 0; i < FC_WWN_SIZE; i++) { 4070 (void) sprintf(&buf[i << 1], "%02x", bytes[i]); 4071 } 4072 4073 fcp_log(CE_WARN, pptr->port_dip, 4074 "!Failed to create nodes for pwwn=%s; error=%x", 4075 buf, *rval); 4076 } 4077 4078 (void) ndi_devi_free(useless_dip); 4079 ddi_prop_free(bytes); 4080 break; 4081 } 4082 4083 case DEVCTL_DEVICE_RESET: { 4084 struct fcp_lun *plun; 4085 struct scsi_address ap; 4086 child_info_t *cip = CIP(cdip); 4087 4088 ASSERT(cdip != NULL); 4089 ASSERT(pptr != NULL); 4090 mutex_enter(&pptr->port_mutex); 4091 if (pip != NULL) { 4092 cip = CIP(pip); 4093 } 4094 if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) { 4095 mutex_exit(&pptr->port_mutex); 4096 *rval = ENXIO; 4097 break; 4098 } 4099 mutex_exit(&pptr->port_mutex); 4100 4101 mutex_enter(&plun->lun_tgt->tgt_mutex); 4102 if (!(plun->lun_state & FCP_SCSI_LUN_TGT_INIT)) { 4103 mutex_exit(&plun->lun_tgt->tgt_mutex); 4104 *rval = ENXIO; 4105 break; 4106 } 4107 ap.a_hba_tran = plun->lun_tran; 4108 ASSERT(pptr->port_tran != NULL); 4109 mutex_exit(&plun->lun_tgt->tgt_mutex); 4110 4111 /* 4112 * There is a chance lun_tran is NULL at this point. So check 4113 * for it. If it is NULL, it basically means that the tgt has 4114 * been freed. So, just return a "No such device or address" 4115 * error. 4116 */ 4117 if (ap.a_hba_tran == NULL) { 4118 *rval = ENXIO; 4119 break; 4120 } 4121 4122 /* 4123 * set up ap so that fcp_reset can figure out 4124 * which target to reset 4125 */ 4126 if (fcp_scsi_reset(&ap, RESET_TARGET) == FALSE) { 4127 *rval = EIO; 4128 } 4129 break; 4130 } 4131 4132 case DEVCTL_BUS_GETSTATE: 4133 ASSERT(dcp != NULL); 4134 ASSERT(pptr != NULL); 4135 ASSERT(pptr->port_dip != NULL); 4136 if (ndi_dc_return_bus_state(pptr->port_dip, dcp) != 4137 NDI_SUCCESS) { 4138 *rval = EFAULT; 4139 } 4140 break; 4141 4142 case DEVCTL_BUS_QUIESCE: 4143 case DEVCTL_BUS_UNQUIESCE: 4144 *rval = ENOTSUP; 4145 break; 4146 4147 case DEVCTL_BUS_RESET: 4148 case DEVCTL_BUS_RESETALL: 4149 ASSERT(pptr != NULL); 4150 (void) fcp_linkreset(pptr, NULL, KM_SLEEP); 4151 break; 4152 4153 default: 4154 ASSERT(dcp != NULL); 4155 *rval = ENOTTY; 4156 break; 4157 } 4158 4159 /* all done -- clean up and return */ 4160 out: if (devi_entered) { 4161 if (is_mpxio) { 4162 mdi_devi_exit(pptr->port_dip, circ); 4163 } else { 4164 ndi_devi_exit(pptr->port_dip, circ); 4165 } 4166 } 4167 4168 if (dcp != NULL) { 4169 ndi_dc_freehdl(dcp); 4170 } 4171 4172 return (retval); 4173 } 4174 4175 4176 /*ARGSUSED*/ 4177 static int 4178 fcp_els_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf, 4179 uint32_t claimed) 4180 { 4181 uchar_t r_ctl; 4182 uchar_t ls_code; 4183 struct fcp_port *pptr; 4184 4185 if ((pptr = fcp_get_port(port_handle)) == NULL || claimed) { 4186 return (FC_UNCLAIMED); 4187 } 4188 4189 mutex_enter(&pptr->port_mutex); 4190 if (pptr->port_state & (FCP_STATE_DETACHING | 4191 FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) { 4192 mutex_exit(&pptr->port_mutex); 4193 return (FC_UNCLAIMED); 4194 } 4195 mutex_exit(&pptr->port_mutex); 4196 4197 r_ctl = buf->ub_frame.r_ctl; 4198 4199 switch (r_ctl & R_CTL_ROUTING) { 4200 case R_CTL_EXTENDED_SVC: 4201 if (r_ctl == R_CTL_ELS_REQ) { 4202 ls_code = buf->ub_buffer[0]; 4203 4204 switch (ls_code) { 4205 case LA_ELS_PRLI: 4206 /* 4207 * We really don't care if something fails. 4208 * If the PRLI was not sent out, then the 4209 * other end will time it out. 4210 */ 4211 if (fcp_unsol_prli(pptr, buf) == FC_SUCCESS) { 4212 return (FC_SUCCESS); 4213 } 4214 return (FC_UNCLAIMED); 4215 /* NOTREACHED */ 4216 4217 default: 4218 break; 4219 } 4220 } 4221 /* FALLTHROUGH */ 4222 4223 default: 4224 return (FC_UNCLAIMED); 4225 } 4226 } 4227 4228 4229 /*ARGSUSED*/ 4230 static int 4231 fcp_data_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf, 4232 uint32_t claimed) 4233 { 4234 return (FC_UNCLAIMED); 4235 } 4236 4237 /* 4238 * Function: fcp_statec_callback 4239 * 4240 * Description: The purpose of this function is to handle a port state change. 4241 * It is called from fp/fctl and, in a few instances, internally. 4242 * 4243 * Argument: ulph fp/fctl port handle 4244 * port_handle fcp_port structure 4245 * port_state Physical state of the port 4246 * port_top Topology 4247 * *devlist Pointer to the first entry of a table 4248 * containing the remote ports that can be 4249 * reached. 4250 * dev_cnt Number of entries pointed by devlist. 4251 * port_sid Port ID of the local port. 4252 * 4253 * Return Value: None 4254 */ 4255 /*ARGSUSED*/ 4256 static void 4257 fcp_statec_callback(opaque_t ulph, opaque_t port_handle, 4258 uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist, 4259 uint32_t dev_cnt, uint32_t port_sid) 4260 { 4261 uint32_t link_count; 4262 int map_len = 0; 4263 struct fcp_port *pptr; 4264 fcp_map_tag_t *map_tag = NULL; 4265 4266 if ((pptr = fcp_get_port(port_handle)) == NULL) { 4267 fcp_log(CE_WARN, NULL, "!Invalid port handle in callback"); 4268 return; /* nothing to work with! */ 4269 } 4270 4271 FCP_TRACE(fcp_logq, pptr->port_instbuf, 4272 fcp_trace, FCP_BUF_LEVEL_2, 0, 4273 "fcp_statec_callback: port state/dev_cnt/top =" 4274 "%d/%d/%d", FC_PORT_STATE_MASK(port_state), 4275 dev_cnt, port_top); 4276 4277 mutex_enter(&pptr->port_mutex); 4278 4279 /* 4280 * If a thread is in detach, don't do anything. 4281 */ 4282 if (pptr->port_state & (FCP_STATE_DETACHING | 4283 FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) { 4284 mutex_exit(&pptr->port_mutex); 4285 return; 4286 } 4287 4288 /* 4289 * First thing we do is set the FCP_STATE_IN_CB_DEVC flag so that if 4290 * init_pkt is called, it knows whether or not the target's status 4291 * (or pd) might be changing. 4292 */ 4293 4294 if (FC_PORT_STATE_MASK(port_state) == FC_STATE_DEVICE_CHANGE) { 4295 pptr->port_state |= FCP_STATE_IN_CB_DEVC; 4296 } 4297 4298 /* 4299 * the transport doesn't allocate or probe unless being 4300 * asked to by either the applications or ULPs 4301 * 4302 * in cases where the port is OFFLINE at the time of port 4303 * attach callback and the link comes ONLINE later, for 4304 * easier automatic node creation (i.e. without you having to 4305 * go out and run the utility to perform LOGINs) the 4306 * following conditional is helpful 4307 */ 4308 pptr->port_phys_state = port_state; 4309 4310 if (dev_cnt) { 4311 mutex_exit(&pptr->port_mutex); 4312 4313 map_len = sizeof (*map_tag) * dev_cnt; 4314 map_tag = kmem_alloc(map_len, KM_NOSLEEP); 4315 if (map_tag == NULL) { 4316 fcp_log(CE_WARN, pptr->port_dip, 4317 "!fcp%d: failed to allocate for map tags; " 4318 " state change will not be processed", 4319 pptr->port_instance); 4320 4321 mutex_enter(&pptr->port_mutex); 4322 pptr->port_state &= ~FCP_STATE_IN_CB_DEVC; 4323 mutex_exit(&pptr->port_mutex); 4324 4325 return; 4326 } 4327 4328 mutex_enter(&pptr->port_mutex); 4329 } 4330 4331 if (pptr->port_id != port_sid) { 4332 FCP_TRACE(fcp_logq, pptr->port_instbuf, 4333 fcp_trace, FCP_BUF_LEVEL_3, 0, 4334 "fcp: Port S_ID=0x%x => 0x%x", pptr->port_id, 4335 port_sid); 4336 /* 4337 * The local port changed ID. It is the first time a port ID 4338 * is assigned or something drastic happened. We might have 4339 * been unplugged and replugged on another loop or fabric port 4340 * or somebody grabbed the AL_PA we had or somebody rezoned 4341 * the fabric we were plugged into. 4342 */ 4343 pptr->port_id = port_sid; 4344 } 4345 4346 switch (FC_PORT_STATE_MASK(port_state)) { 4347 case FC_STATE_OFFLINE: 4348 case FC_STATE_RESET_REQUESTED: 4349 /* 4350 * link has gone from online to offline -- just update the 4351 * state of this port to BUSY and MARKed to go offline 4352 */ 4353 FCP_TRACE(fcp_logq, pptr->port_instbuf, 4354 fcp_trace, FCP_BUF_LEVEL_3, 0, 4355 "link went offline"); 4356 if ((pptr->port_state & FCP_STATE_OFFLINE) && dev_cnt) { 4357 /* 4358 * We were offline a while ago and this one 4359 * seems to indicate that the loop has gone 4360 * dead forever. 4361 */ 4362 pptr->port_tmp_cnt += dev_cnt; 4363 pptr->port_state &= ~FCP_STATE_OFFLINE; 4364 pptr->port_state |= FCP_STATE_INIT; 4365 link_count = pptr->port_link_cnt; 4366 fcp_handle_devices(pptr, devlist, dev_cnt, 4367 link_count, map_tag, FCP_CAUSE_LINK_DOWN); 4368 } else { 4369 pptr->port_link_cnt++; 4370 ASSERT(!(pptr->port_state & FCP_STATE_SUSPENDED)); 4371 fcp_update_state(pptr, (FCP_LUN_BUSY | 4372 FCP_LUN_MARK), FCP_CAUSE_LINK_DOWN); 4373 if (pptr->port_mpxio) { 4374 fcp_update_mpxio_path_verifybusy(pptr); 4375 } 4376 pptr->port_state |= FCP_STATE_OFFLINE; 4377 pptr->port_state &= 4378 ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE); 4379 pptr->port_tmp_cnt = 0; 4380 } 4381 mutex_exit(&pptr->port_mutex); 4382 break; 4383 4384 case FC_STATE_ONLINE: 4385 case FC_STATE_LIP: 4386 case FC_STATE_LIP_LBIT_SET: 4387 /* 4388 * link has gone from offline to online 4389 */ 4390 FCP_TRACE(fcp_logq, pptr->port_instbuf, 4391 fcp_trace, FCP_BUF_LEVEL_3, 0, 4392 "link went online"); 4393 4394 pptr->port_link_cnt++; 4395 4396 while (pptr->port_ipkt_cnt) { 4397 mutex_exit(&pptr->port_mutex); 4398 delay(drv_usectohz(1000000)); 4399 mutex_enter(&pptr->port_mutex); 4400 } 4401 4402 pptr->port_topology = port_top; 4403 4404 /* 4405 * The state of the targets and luns accessible through this 4406 * port is updated. 4407 */ 4408 fcp_update_state(pptr, FCP_LUN_BUSY | FCP_LUN_MARK, 4409 FCP_CAUSE_LINK_CHANGE); 4410 4411 pptr->port_state &= ~(FCP_STATE_INIT | FCP_STATE_OFFLINE); 4412 pptr->port_state |= FCP_STATE_ONLINING; 4413 pptr->port_tmp_cnt = dev_cnt; 4414 link_count = pptr->port_link_cnt; 4415 4416 pptr->port_deadline = fcp_watchdog_time + 4417 FCP_ICMD_DEADLINE; 4418 4419 if (!dev_cnt) { 4420 /* 4421 * We go directly to the online state if no remote 4422 * ports were discovered. 4423 */ 4424 FCP_TRACE(fcp_logq, pptr->port_instbuf, 4425 fcp_trace, FCP_BUF_LEVEL_3, 0, 4426 "No remote ports discovered"); 4427 4428 pptr->port_state &= ~FCP_STATE_ONLINING; 4429 pptr->port_state |= FCP_STATE_ONLINE; 4430 } 4431 4432 switch (port_top) { 4433 case FC_TOP_FABRIC: 4434 case FC_TOP_PUBLIC_LOOP: 4435 case FC_TOP_PRIVATE_LOOP: 4436 case FC_TOP_PT_PT: 4437 4438 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) { 4439 fcp_retry_ns_registry(pptr, port_sid); 4440 } 4441 4442 fcp_handle_devices(pptr, devlist, dev_cnt, link_count, 4443 map_tag, FCP_CAUSE_LINK_CHANGE); 4444 break; 4445 4446 default: 4447 /* 4448 * We got here because we were provided with an unknown 4449 * topology. 4450 */ 4451 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) { 4452 pptr->port_state &= ~FCP_STATE_NS_REG_FAILED; 4453 } 4454 4455 pptr->port_tmp_cnt -= dev_cnt; 4456 fcp_log(CE_WARN, pptr->port_dip, 4457 "!unknown/unsupported topology (0x%x)", port_top); 4458 break; 4459 } 4460 FCP_TRACE(fcp_logq, pptr->port_instbuf, 4461 fcp_trace, FCP_BUF_LEVEL_3, 0, 4462 "Notify ssd of the reset to reinstate the reservations"); 4463 4464 scsi_hba_reset_notify_callback(&pptr->port_mutex, 4465 &pptr->port_reset_notify_listf); 4466 4467 mutex_exit(&pptr->port_mutex); 4468 4469 break; 4470 4471 case FC_STATE_RESET: 4472 ASSERT(pptr->port_state & FCP_STATE_OFFLINE); 4473 FCP_TRACE(fcp_logq, pptr->port_instbuf, 4474 fcp_trace, FCP_BUF_LEVEL_3, 0, 4475 "RESET state, waiting for Offline/Online state_cb"); 4476 mutex_exit(&pptr->port_mutex); 4477 break; 4478 4479 case FC_STATE_DEVICE_CHANGE: 4480 /* 4481 * We come here when an application has requested 4482 * Dynamic node creation/deletion in Fabric connectivity. 4483 */ 4484 if (pptr->port_state & (FCP_STATE_OFFLINE | 4485 FCP_STATE_INIT)) { 4486 /* 4487 * This case can happen when the FCTL is in the 4488 * process of giving us on online and the host on 4489 * the other side issues a PLOGI/PLOGO. Ideally 4490 * the state changes should be serialized unless 4491 * they are opposite (online-offline). 4492 * The transport will give us a final state change 4493 * so we can ignore this for the time being. 4494 */ 4495 pptr->port_state &= ~FCP_STATE_IN_CB_DEVC; 4496 mutex_exit(&pptr->port_mutex); 4497 break; 4498 } 4499 4500 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) { 4501 fcp_retry_ns_registry(pptr, port_sid); 4502 } 4503 4504 /* 4505 * Extend the deadline under steady state conditions 4506 * to provide more time for the device-change-commands 4507 */ 4508 if (!pptr->port_ipkt_cnt) { 4509 pptr->port_deadline = fcp_watchdog_time + 4510 FCP_ICMD_DEADLINE; 4511 } 4512 4513 /* 4514 * There is another race condition here, where if we were 4515 * in ONLINEING state and a devices in the map logs out, 4516 * fp will give another state change as DEVICE_CHANGE 4517 * and OLD. This will result in that target being offlined. 4518 * The pd_handle is freed. If from the first statec callback 4519 * we were going to fire a PLOGI/PRLI, the system will 4520 * panic in fc_ulp_transport with invalid pd_handle. 4521 * The fix is to check for the link_cnt before issuing 4522 * any command down. 4523 */ 4524 fcp_update_targets(pptr, devlist, dev_cnt, 4525 FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_TGT_CHANGE); 4526 4527 link_count = pptr->port_link_cnt; 4528 4529 fcp_handle_devices(pptr, devlist, dev_cnt, 4530 link_count, map_tag, FCP_CAUSE_TGT_CHANGE); 4531 4532 pptr->port_state &= ~FCP_STATE_IN_CB_DEVC; 4533 4534 mutex_exit(&pptr->port_mutex); 4535 break; 4536 4537 case FC_STATE_TARGET_PORT_RESET: 4538 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) { 4539 fcp_retry_ns_registry(pptr, port_sid); 4540 } 4541 4542 /* Do nothing else */ 4543 mutex_exit(&pptr->port_mutex); 4544 break; 4545 4546 default: 4547 fcp_log(CE_WARN, pptr->port_dip, 4548 "!Invalid state change=0x%x", port_state); 4549 mutex_exit(&pptr->port_mutex); 4550 break; 4551 } 4552 4553 if (map_tag) { 4554 kmem_free(map_tag, map_len); 4555 } 4556 } 4557 4558 /* 4559 * Function: fcp_handle_devices 4560 * 4561 * Description: This function updates the devices currently known by 4562 * walking the list provided by the caller. The list passed 4563 * by the caller is supposed to be the list of reachable 4564 * devices. 4565 * 4566 * Argument: *pptr Fcp port structure. 4567 * *devlist Pointer to the first entry of a table 4568 * containing the remote ports that can be 4569 * reached. 4570 * dev_cnt Number of entries pointed by devlist. 4571 * link_cnt Link state count. 4572 * *map_tag Array of fcp_map_tag_t structures. 4573 * cause What caused this function to be called. 4574 * 4575 * Return Value: None 4576 * 4577 * Notes: The pptr->port_mutex must be held. 4578 */ 4579 static void 4580 fcp_handle_devices(struct fcp_port *pptr, fc_portmap_t devlist[], 4581 uint32_t dev_cnt, int link_cnt, fcp_map_tag_t *map_tag, int cause) 4582 { 4583 int i; 4584 int check_finish_init = 0; 4585 fc_portmap_t *map_entry; 4586 struct fcp_tgt *ptgt = NULL; 4587 4588 FCP_TRACE(fcp_logq, pptr->port_instbuf, 4589 fcp_trace, FCP_BUF_LEVEL_3, 0, 4590 "fcp_handle_devices: called for %d dev(s)", dev_cnt); 4591 4592 if (dev_cnt) { 4593 ASSERT(map_tag != NULL); 4594 } 4595 4596 /* 4597 * The following code goes through the list of remote ports that are 4598 * accessible through this (pptr) local port (The list walked is the 4599 * one provided by the caller which is the list of the remote ports 4600 * currently reachable). It checks if any of them was already 4601 * known by looking for the corresponding target structure based on 4602 * the world wide name. If a target is part of the list it is tagged 4603 * (ptgt->tgt_aux_state = FCP_TGT_TAGGED). 4604 * 4605 * Old comment 4606 * ----------- 4607 * Before we drop port mutex; we MUST get the tags updated; This 4608 * two step process is somewhat slow, but more reliable. 4609 */ 4610 for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) { 4611 map_entry = &(devlist[i]); 4612 4613 /* 4614 * get ptr to this map entry in our port's 4615 * list (if any) 4616 */ 4617 ptgt = fcp_lookup_target(pptr, 4618 (uchar_t *)&(map_entry->map_pwwn)); 4619 4620 if (ptgt) { 4621 map_tag[i] = ptgt->tgt_change_cnt; 4622 if (cause == FCP_CAUSE_LINK_CHANGE) { 4623 ptgt->tgt_aux_state = FCP_TGT_TAGGED; 4624 } 4625 } 4626 } 4627 4628 /* 4629 * At this point we know which devices of the new list were already 4630 * known (The field tgt_aux_state of the target structure has been 4631 * set to FCP_TGT_TAGGED). 4632 * 4633 * The following code goes through the list of targets currently known 4634 * by the local port (the list is actually a hashing table). If a 4635 * target is found and is not tagged, it means the target cannot 4636 * be reached anymore through the local port (pptr). It is offlined. 4637 * The offlining only occurs if the cause is FCP_CAUSE_LINK_CHANGE. 4638 */ 4639 for (i = 0; i < FCP_NUM_HASH; i++) { 4640 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL; 4641 ptgt = ptgt->tgt_next) { 4642 mutex_enter(&ptgt->tgt_mutex); 4643 if ((ptgt->tgt_aux_state != FCP_TGT_TAGGED) && 4644 (cause == FCP_CAUSE_LINK_CHANGE) && 4645 !(ptgt->tgt_state & FCP_TGT_OFFLINE)) { 4646 fcp_offline_target_now(pptr, ptgt, 4647 link_cnt, ptgt->tgt_change_cnt, 0); 4648 } 4649 mutex_exit(&ptgt->tgt_mutex); 4650 } 4651 } 4652 4653 /* 4654 * At this point, the devices that were known but cannot be reached 4655 * anymore, have most likely been offlined. 4656 * 4657 * The following section of code seems to go through the list of 4658 * remote ports that can now be reached. For every single one it 4659 * checks if it is already known or if it is a new port. 4660 */ 4661 for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) { 4662 4663 if (check_finish_init) { 4664 ASSERT(i > 0); 4665 (void) fcp_call_finish_init_held(pptr, ptgt, link_cnt, 4666 map_tag[i - 1], cause); 4667 check_finish_init = 0; 4668 } 4669 4670 /* get a pointer to this map entry */ 4671 map_entry = &(devlist[i]); 4672 4673 /* 4674 * Check for the duplicate map entry flag. If we have marked 4675 * this entry as a duplicate we skip it since the correct 4676 * (perhaps even same) state change will be encountered 4677 * later in the list. 4678 */ 4679 if (map_entry->map_flags & PORT_DEVICE_DUPLICATE_MAP_ENTRY) { 4680 continue; 4681 } 4682 4683 /* get ptr to this map entry in our port's list (if any) */ 4684 ptgt = fcp_lookup_target(pptr, 4685 (uchar_t *)&(map_entry->map_pwwn)); 4686 4687 if (ptgt) { 4688 /* 4689 * This device was already known. The field 4690 * tgt_aux_state is reset (was probably set to 4691 * FCP_TGT_TAGGED previously in this routine). 4692 */ 4693 ptgt->tgt_aux_state = 0; 4694 FCP_TRACE(fcp_logq, pptr->port_instbuf, 4695 fcp_trace, FCP_BUF_LEVEL_3, 0, 4696 "handle_devices: map did/state/type/flags = " 4697 "0x%x/0x%x/0x%x/0x%x, tgt_d_id=0x%x, " 4698 "tgt_state=%d", 4699 map_entry->map_did.port_id, map_entry->map_state, 4700 map_entry->map_type, map_entry->map_flags, 4701 ptgt->tgt_d_id, ptgt->tgt_state); 4702 } 4703 4704 if (map_entry->map_type == PORT_DEVICE_OLD || 4705 map_entry->map_type == PORT_DEVICE_NEW || 4706 map_entry->map_type == PORT_DEVICE_CHANGED) { 4707 FCP_TRACE(fcp_logq, pptr->port_instbuf, 4708 fcp_trace, FCP_BUF_LEVEL_2, 0, 4709 "map_type=%x, did = %x", 4710 map_entry->map_type, 4711 map_entry->map_did.port_id); 4712 } 4713 4714 switch (map_entry->map_type) { 4715 case PORT_DEVICE_NOCHANGE: 4716 case PORT_DEVICE_USER_CREATE: 4717 case PORT_DEVICE_USER_LOGIN: 4718 case PORT_DEVICE_NEW: 4719 FCP_TGT_TRACE(ptgt, map_tag[i], FCP_TGT_TRACE_1); 4720 4721 if (fcp_handle_mapflags(pptr, ptgt, map_entry, 4722 link_cnt, (ptgt) ? map_tag[i] : 0, 4723 cause) == TRUE) { 4724 4725 FCP_TGT_TRACE(ptgt, map_tag[i], 4726 FCP_TGT_TRACE_2); 4727 check_finish_init++; 4728 } 4729 break; 4730 4731 case PORT_DEVICE_OLD: 4732 if (ptgt != NULL) { 4733 FCP_TGT_TRACE(ptgt, map_tag[i], 4734 FCP_TGT_TRACE_3); 4735 4736 mutex_enter(&ptgt->tgt_mutex); 4737 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) { 4738 /* 4739 * Must do an in-line wait for I/Os 4740 * to get drained 4741 */ 4742 mutex_exit(&ptgt->tgt_mutex); 4743 mutex_exit(&pptr->port_mutex); 4744 4745 mutex_enter(&ptgt->tgt_mutex); 4746 while (ptgt->tgt_ipkt_cnt || 4747 fcp_outstanding_lun_cmds(ptgt) 4748 == FC_SUCCESS) { 4749 mutex_exit(&ptgt->tgt_mutex); 4750 delay(drv_usectohz(1000000)); 4751 mutex_enter(&ptgt->tgt_mutex); 4752 } 4753 mutex_exit(&ptgt->tgt_mutex); 4754 4755 mutex_enter(&pptr->port_mutex); 4756 mutex_enter(&ptgt->tgt_mutex); 4757 4758 (void) fcp_offline_target(pptr, ptgt, 4759 link_cnt, map_tag[i], 0, 0); 4760 } 4761 mutex_exit(&ptgt->tgt_mutex); 4762 } 4763 check_finish_init++; 4764 break; 4765 4766 case PORT_DEVICE_USER_DELETE: 4767 case PORT_DEVICE_USER_LOGOUT: 4768 if (ptgt != NULL) { 4769 FCP_TGT_TRACE(ptgt, map_tag[i], 4770 FCP_TGT_TRACE_4); 4771 4772 mutex_enter(&ptgt->tgt_mutex); 4773 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) { 4774 (void) fcp_offline_target(pptr, ptgt, 4775 link_cnt, map_tag[i], 1, 0); 4776 } 4777 mutex_exit(&ptgt->tgt_mutex); 4778 } 4779 check_finish_init++; 4780 break; 4781 4782 case PORT_DEVICE_CHANGED: 4783 if (ptgt != NULL) { 4784 FCP_TGT_TRACE(ptgt, map_tag[i], 4785 FCP_TGT_TRACE_5); 4786 4787 if (fcp_device_changed(pptr, ptgt, 4788 map_entry, link_cnt, map_tag[i], 4789 cause) == TRUE) { 4790 check_finish_init++; 4791 } 4792 } else { 4793 if (fcp_handle_mapflags(pptr, ptgt, 4794 map_entry, link_cnt, 0, cause) == TRUE) { 4795 check_finish_init++; 4796 } 4797 } 4798 break; 4799 4800 default: 4801 fcp_log(CE_WARN, pptr->port_dip, 4802 "!Invalid map_type=0x%x", map_entry->map_type); 4803 check_finish_init++; 4804 break; 4805 } 4806 } 4807 4808 if (check_finish_init && pptr->port_link_cnt == link_cnt) { 4809 ASSERT(i > 0); 4810 (void) fcp_call_finish_init_held(pptr, ptgt, link_cnt, 4811 map_tag[i-1], cause); 4812 } else if (dev_cnt == 0 && pptr->port_link_cnt == link_cnt) { 4813 fcp_offline_all(pptr, link_cnt, cause); 4814 } 4815 } 4816 4817 /* 4818 * Function: fcp_handle_mapflags 4819 * 4820 * Description: This function creates a target structure if the ptgt passed 4821 * is NULL. It also kicks off the PLOGI if we are not logged 4822 * into the target yet or the PRLI if we are logged into the 4823 * target already. The rest of the treatment is done in the 4824 * callbacks of the PLOGI or PRLI. 4825 * 4826 * Argument: *pptr FCP Port structure. 4827 * *ptgt Target structure. 4828 * *map_entry Array of fc_portmap_t structures. 4829 * link_cnt Link state count. 4830 * tgt_cnt Target state count. 4831 * cause What caused this function to be called. 4832 * 4833 * Return Value: TRUE Failed 4834 * FALSE Succeeded 4835 * 4836 * Notes: pptr->port_mutex must be owned. 4837 */ 4838 static int 4839 fcp_handle_mapflags(struct fcp_port *pptr, struct fcp_tgt *ptgt, 4840 fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause) 4841 { 4842 int lcount; 4843 int tcount; 4844 int ret = TRUE; 4845 int alloc; 4846 struct fcp_ipkt *icmd; 4847 struct fcp_lun *pseq_lun = NULL; 4848 uchar_t opcode; 4849 int valid_ptgt_was_passed = FALSE; 4850 4851 ASSERT(mutex_owned(&pptr->port_mutex)); 4852 4853 /* 4854 * This case is possible where the FCTL has come up and done discovery 4855 * before FCP was loaded and attached. FCTL would have discovered the 4856 * devices and later the ULP came online. In this case ULP's would get 4857 * PORT_DEVICE_NOCHANGE but target would be NULL. 4858 */ 4859 if (ptgt == NULL) { 4860 /* don't already have a target */ 4861 mutex_exit(&pptr->port_mutex); 4862 ptgt = fcp_alloc_tgt(pptr, map_entry, link_cnt); 4863 mutex_enter(&pptr->port_mutex); 4864 4865 if (ptgt == NULL) { 4866 fcp_log(CE_WARN, pptr->port_dip, 4867 "!FC target allocation failed"); 4868 return (ret); 4869 } 4870 mutex_enter(&ptgt->tgt_mutex); 4871 ptgt->tgt_statec_cause = cause; 4872 ptgt->tgt_tmp_cnt = 1; 4873 mutex_exit(&ptgt->tgt_mutex); 4874 } else { 4875 valid_ptgt_was_passed = TRUE; 4876 } 4877 4878 /* 4879 * Copy in the target parameters 4880 */ 4881 mutex_enter(&ptgt->tgt_mutex); 4882 ptgt->tgt_d_id = map_entry->map_did.port_id; 4883 ptgt->tgt_hard_addr = map_entry->map_hard_addr.hard_addr; 4884 ptgt->tgt_pd_handle = map_entry->map_pd; 4885 ptgt->tgt_fca_dev = NULL; 4886 4887 /* Copy port and node WWNs */ 4888 bcopy(&map_entry->map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0], 4889 FC_WWN_SIZE); 4890 bcopy(&map_entry->map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0], 4891 FC_WWN_SIZE); 4892 4893 if (!(map_entry->map_flags & PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY) && 4894 (map_entry->map_type == PORT_DEVICE_NOCHANGE) && 4895 (map_entry->map_state == PORT_DEVICE_LOGGED_IN) && 4896 valid_ptgt_was_passed) { 4897 /* 4898 * determine if there are any tape LUNs on this target 4899 */ 4900 for (pseq_lun = ptgt->tgt_lun; 4901 pseq_lun != NULL; 4902 pseq_lun = pseq_lun->lun_next) { 4903 if ((pseq_lun->lun_type == DTYPE_SEQUENTIAL) && 4904 !(pseq_lun->lun_state & FCP_LUN_OFFLINE)) { 4905 fcp_update_tgt_state(ptgt, FCP_RESET, 4906 FCP_LUN_MARK); 4907 mutex_exit(&ptgt->tgt_mutex); 4908 return (ret); 4909 } 4910 } 4911 } 4912 4913 /* 4914 * If ptgt was NULL when this function was entered, then tgt_node_state 4915 * was never specifically initialized but zeroed out which means 4916 * FCP_TGT_NODE_NONE. 4917 */ 4918 switch (ptgt->tgt_node_state) { 4919 case FCP_TGT_NODE_NONE: 4920 case FCP_TGT_NODE_ON_DEMAND: 4921 if (FC_TOP_EXTERNAL(pptr->port_topology) && 4922 !fcp_enable_auto_configuration && 4923 map_entry->map_type != PORT_DEVICE_USER_CREATE) { 4924 ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND; 4925 } else if (FC_TOP_EXTERNAL(pptr->port_topology) && 4926 fcp_enable_auto_configuration && 4927 (ptgt->tgt_manual_config_only == 1) && 4928 map_entry->map_type != PORT_DEVICE_USER_CREATE) { 4929 /* 4930 * If auto configuration is set and 4931 * the tgt_manual_config_only flag is set then 4932 * we only want the user to be able to change 4933 * the state through create_on_demand. 4934 */ 4935 ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND; 4936 } else { 4937 ptgt->tgt_node_state = FCP_TGT_NODE_NONE; 4938 } 4939 break; 4940 4941 case FCP_TGT_NODE_PRESENT: 4942 break; 4943 } 4944 /* 4945 * If we are booting from a fabric device, make sure we 4946 * mark the node state appropriately for this target to be 4947 * enumerated 4948 */ 4949 if (FC_TOP_EXTERNAL(pptr->port_topology) && pptr->port_boot_wwn[0]) { 4950 if (bcmp((caddr_t)pptr->port_boot_wwn, 4951 (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0], 4952 sizeof (ptgt->tgt_port_wwn)) == 0) { 4953 ptgt->tgt_node_state = FCP_TGT_NODE_NONE; 4954 } 4955 } 4956 mutex_exit(&ptgt->tgt_mutex); 4957 4958 FCP_TRACE(fcp_logq, pptr->port_instbuf, 4959 fcp_trace, FCP_BUF_LEVEL_3, 0, 4960 "map_pd=%p, map_type=%x, did = %x, ulp_rscn_count=0x%x", 4961 map_entry->map_pd, map_entry->map_type, map_entry->map_did.port_id, 4962 map_entry->map_rscn_info.ulp_rscn_count); 4963 4964 mutex_enter(&ptgt->tgt_mutex); 4965 4966 /* 4967 * Reset target OFFLINE state and mark the target BUSY 4968 */ 4969 ptgt->tgt_state &= ~FCP_TGT_OFFLINE; 4970 ptgt->tgt_state |= (FCP_TGT_BUSY | FCP_TGT_MARK); 4971 4972 tcount = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt; 4973 lcount = link_cnt; 4974 4975 mutex_exit(&ptgt->tgt_mutex); 4976 mutex_exit(&pptr->port_mutex); 4977 4978 /* 4979 * if we are already logged in, then we do a PRLI, else 4980 * we do a PLOGI first (to get logged in) 4981 * 4982 * We will not check if we are the PLOGI initiator 4983 */ 4984 opcode = (map_entry->map_state == PORT_DEVICE_LOGGED_IN && 4985 map_entry->map_pd != NULL) ? LA_ELS_PRLI : LA_ELS_PLOGI; 4986 4987 alloc = FCP_MAX(sizeof (la_els_logi_t), sizeof (la_els_prli_t)); 4988 4989 icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0, 0, lcount, tcount, 4990 cause, map_entry->map_rscn_info.ulp_rscn_count); 4991 4992 if (icmd == NULL) { 4993 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_29); 4994 /* 4995 * We've exited port_mutex before calling fcp_icmd_alloc, 4996 * we need to make sure we reacquire it before returning. 4997 */ 4998 mutex_enter(&pptr->port_mutex); 4999 return (FALSE); 5000 } 5001 5002 /* TRUE is only returned while target is intended skipped */ 5003 ret = FALSE; 5004 /* discover info about this target */ 5005 if ((fcp_send_els(pptr, ptgt, icmd, opcode, 5006 lcount, tcount, cause)) == DDI_SUCCESS) { 5007 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_9); 5008 } else { 5009 fcp_icmd_free(pptr, icmd); 5010 ret = TRUE; 5011 } 5012 mutex_enter(&pptr->port_mutex); 5013 5014 return (ret); 5015 } 5016 5017 /* 5018 * Function: fcp_send_els 5019 * 5020 * Description: Sends an ELS to the target specified by the caller. Supports 5021 * PLOGI and PRLI. 5022 * 5023 * Argument: *pptr Fcp port. 5024 * *ptgt Target to send the ELS to. 5025 * *icmd Internal packet 5026 * opcode ELS opcode 5027 * lcount Link state change counter 5028 * tcount Target state change counter 5029 * cause What caused the call 5030 * 5031 * Return Value: DDI_SUCCESS 5032 * Others 5033 */ 5034 static int 5035 fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt, 5036 struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause) 5037 { 5038 fc_packet_t *fpkt; 5039 fc_frame_hdr_t *hp; 5040 int internal = 0; 5041 int alloc; 5042 int cmd_len; 5043 int resp_len; 5044 int res = DDI_FAILURE; /* default result */ 5045 int rval = DDI_FAILURE; 5046 5047 ASSERT(opcode == LA_ELS_PLOGI || opcode == LA_ELS_PRLI); 5048 ASSERT(ptgt->tgt_port == pptr); 5049 5050 FCP_TRACE(fcp_logq, pptr->port_instbuf, 5051 fcp_trace, FCP_BUF_LEVEL_5, 0, 5052 "fcp_send_els: d_id=0x%x ELS 0x%x (%s)", ptgt->tgt_d_id, opcode, 5053 (opcode == LA_ELS_PLOGI) ? "PLOGI" : "PRLI"); 5054 5055 if (opcode == LA_ELS_PLOGI) { 5056 cmd_len = sizeof (la_els_logi_t); 5057 resp_len = sizeof (la_els_logi_t); 5058 } else { 5059 ASSERT(opcode == LA_ELS_PRLI); 5060 cmd_len = sizeof (la_els_prli_t); 5061 resp_len = sizeof (la_els_prli_t); 5062 } 5063 5064 if (icmd == NULL) { 5065 alloc = FCP_MAX(sizeof (la_els_logi_t), 5066 sizeof (la_els_prli_t)); 5067 icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0, 0, 5068 lcount, tcount, cause, FC_INVALID_RSCN_COUNT); 5069 if (icmd == NULL) { 5070 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_10); 5071 return (res); 5072 } 5073 internal++; 5074 } 5075 fpkt = icmd->ipkt_fpkt; 5076 5077 fpkt->pkt_cmdlen = cmd_len; 5078 fpkt->pkt_rsplen = resp_len; 5079 fpkt->pkt_datalen = 0; 5080 icmd->ipkt_retries = 0; 5081 5082 /* fill in fpkt info */ 5083 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR; 5084 fpkt->pkt_tran_type = FC_PKT_EXCHANGE; 5085 fpkt->pkt_timeout = FCP_ELS_TIMEOUT; 5086 5087 /* get ptr to frame hdr in fpkt */ 5088 hp = &fpkt->pkt_cmd_fhdr; 5089 5090 /* 5091 * fill in frame hdr 5092 */ 5093 hp->r_ctl = R_CTL_ELS_REQ; 5094 hp->s_id = pptr->port_id; /* source ID */ 5095 hp->d_id = ptgt->tgt_d_id; /* dest ID */ 5096 hp->type = FC_TYPE_EXTENDED_LS; 5097 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ; 5098 hp->seq_id = 0; 5099 hp->rsvd = 0; 5100 hp->df_ctl = 0; 5101 hp->seq_cnt = 0; 5102 hp->ox_id = 0xffff; /* i.e. none */ 5103 hp->rx_id = 0xffff; /* i.e. none */ 5104 hp->ro = 0; 5105 5106 /* 5107 * at this point we have a filled in cmd pkt 5108 * 5109 * fill in the respective info, then use the transport to send 5110 * the packet 5111 * 5112 * for a PLOGI call fc_ulp_login(), and 5113 * for a PRLI call fc_ulp_issue_els() 5114 */ 5115 switch (opcode) { 5116 case LA_ELS_PLOGI: { 5117 struct la_els_logi logi; 5118 5119 bzero(&logi, sizeof (struct la_els_logi)); 5120 5121 hp = &fpkt->pkt_cmd_fhdr; 5122 hp->r_ctl = R_CTL_ELS_REQ; 5123 logi.ls_code.ls_code = LA_ELS_PLOGI; 5124 logi.ls_code.mbz = 0; 5125 5126 FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd, 5127 fpkt->pkt_cmd_acc, sizeof (struct la_els_logi)); 5128 5129 icmd->ipkt_opcode = LA_ELS_PLOGI; 5130 5131 mutex_enter(&pptr->port_mutex); 5132 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) { 5133 5134 mutex_exit(&pptr->port_mutex); 5135 5136 rval = fc_ulp_login(pptr->port_fp_handle, &fpkt, 1); 5137 if (rval == FC_SUCCESS) { 5138 res = DDI_SUCCESS; 5139 break; 5140 } 5141 5142 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_11); 5143 5144 res = fcp_handle_ipkt_errors(pptr, ptgt, icmd, 5145 rval, "PLOGI"); 5146 } else { 5147 FCP_TRACE(fcp_logq, pptr->port_instbuf, 5148 fcp_trace, FCP_BUF_LEVEL_5, 0, 5149 "fcp_send_els1: state change occured" 5150 " for D_ID=0x%x", ptgt->tgt_d_id); 5151 mutex_exit(&pptr->port_mutex); 5152 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_12); 5153 } 5154 break; 5155 } 5156 5157 case LA_ELS_PRLI: { 5158 struct la_els_prli prli; 5159 struct fcp_prli *fprli; 5160 5161 bzero(&prli, sizeof (struct la_els_prli)); 5162 5163 hp = &fpkt->pkt_cmd_fhdr; 5164 hp->r_ctl = R_CTL_ELS_REQ; 5165 5166 /* fill in PRLI cmd ELS fields */ 5167 prli.ls_code = LA_ELS_PRLI; 5168 prli.page_length = 0x10; /* huh? */ 5169 prli.payload_length = sizeof (struct la_els_prli); 5170 5171 icmd->ipkt_opcode = LA_ELS_PRLI; 5172 5173 /* get ptr to PRLI service params */ 5174 fprli = (struct fcp_prli *)prli.service_params; 5175 5176 /* fill in service params */ 5177 fprli->type = 0x08; 5178 fprli->resvd1 = 0; 5179 fprli->orig_process_assoc_valid = 0; 5180 fprli->resp_process_assoc_valid = 0; 5181 fprli->establish_image_pair = 1; 5182 fprli->resvd2 = 0; 5183 fprli->resvd3 = 0; 5184 fprli->obsolete_1 = 0; 5185 fprli->obsolete_2 = 0; 5186 fprli->data_overlay_allowed = 0; 5187 fprli->initiator_fn = 1; 5188 fprli->confirmed_compl_allowed = 1; 5189 5190 if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) { 5191 fprli->target_fn = 1; 5192 } else { 5193 fprli->target_fn = 0; 5194 } 5195 5196 fprli->retry = 1; 5197 fprli->read_xfer_rdy_disabled = 1; 5198 fprli->write_xfer_rdy_disabled = 0; 5199 5200 FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd, 5201 fpkt->pkt_cmd_acc, sizeof (struct la_els_prli)); 5202 5203 /* issue the PRLI request */ 5204 5205 mutex_enter(&pptr->port_mutex); 5206 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) { 5207 5208 mutex_exit(&pptr->port_mutex); 5209 5210 rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt); 5211 if (rval == FC_SUCCESS) { 5212 res = DDI_SUCCESS; 5213 break; 5214 } 5215 5216 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_13); 5217 5218 res = fcp_handle_ipkt_errors(pptr, ptgt, icmd, 5219 rval, "PRLI"); 5220 } else { 5221 mutex_exit(&pptr->port_mutex); 5222 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_14); 5223 } 5224 break; 5225 } 5226 5227 default: 5228 fcp_log(CE_WARN, NULL, "!invalid ELS opcode=0x%x", opcode); 5229 break; 5230 } 5231 5232 FCP_TRACE(fcp_logq, pptr->port_instbuf, 5233 fcp_trace, FCP_BUF_LEVEL_5, 0, 5234 "fcp_send_els: returning %d", res); 5235 5236 if (res != DDI_SUCCESS) { 5237 if (internal) { 5238 fcp_icmd_free(pptr, icmd); 5239 } 5240 } 5241 5242 return (res); 5243 } 5244 5245 5246 /* 5247 * called internally update the state of all of the tgts and each LUN 5248 * for this port (i.e. each target known to be attached to this port) 5249 * if they are not already offline 5250 * 5251 * must be called with the port mutex owned 5252 * 5253 * acquires and releases the target mutexes for each target attached 5254 * to this port 5255 */ 5256 void 5257 fcp_update_state(struct fcp_port *pptr, uint32_t state, int cause) 5258 { 5259 int i; 5260 struct fcp_tgt *ptgt; 5261 5262 ASSERT(mutex_owned(&pptr->port_mutex)); 5263 5264 for (i = 0; i < FCP_NUM_HASH; i++) { 5265 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL; 5266 ptgt = ptgt->tgt_next) { 5267 mutex_enter(&ptgt->tgt_mutex); 5268 fcp_update_tgt_state(ptgt, FCP_SET, state); 5269 ptgt->tgt_change_cnt++; 5270 ptgt->tgt_statec_cause = cause; 5271 ptgt->tgt_tmp_cnt = 1; 5272 ptgt->tgt_done = 0; 5273 mutex_exit(&ptgt->tgt_mutex); 5274 } 5275 } 5276 } 5277 5278 5279 static void 5280 fcp_offline_all(struct fcp_port *pptr, int lcount, int cause) 5281 { 5282 int i; 5283 int ndevs; 5284 struct fcp_tgt *ptgt; 5285 5286 ASSERT(mutex_owned(&pptr->port_mutex)); 5287 5288 for (ndevs = 0, i = 0; i < FCP_NUM_HASH; i++) { 5289 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL; 5290 ptgt = ptgt->tgt_next) { 5291 ndevs++; 5292 } 5293 } 5294 5295 if (ndevs == 0) { 5296 return; 5297 } 5298 pptr->port_tmp_cnt = ndevs; 5299 5300 for (i = 0; i < FCP_NUM_HASH; i++) { 5301 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL; 5302 ptgt = ptgt->tgt_next) { 5303 (void) fcp_call_finish_init_held(pptr, ptgt, 5304 lcount, ptgt->tgt_change_cnt, cause); 5305 } 5306 } 5307 } 5308 5309 /* 5310 * Function: fcp_update_tgt_state 5311 * 5312 * Description: This function updates the field tgt_state of a target. That 5313 * field is a bitmap and which bit can be set or reset 5314 * individually. The action applied to the target state is also 5315 * applied to all the LUNs belonging to the target (provided the 5316 * LUN is not offline). A side effect of applying the state 5317 * modification to the target and the LUNs is the field tgt_trace 5318 * of the target and lun_trace of the LUNs is set to zero. 5319 * 5320 * 5321 * Argument: *ptgt Target structure. 5322 * flag Flag indication what action to apply (set/reset). 5323 * state State bits to update. 5324 * 5325 * Return Value: None 5326 * 5327 * Context: Interrupt, Kernel or User context. 5328 * The mutex of the target (ptgt->tgt_mutex) must be owned when 5329 * calling this function. 5330 */ 5331 void 5332 fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag, uint32_t state) 5333 { 5334 struct fcp_lun *plun; 5335 5336 ASSERT(mutex_owned(&ptgt->tgt_mutex)); 5337 5338 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) { 5339 /* The target is not offline. */ 5340 if (flag == FCP_SET) { 5341 ptgt->tgt_state |= state; 5342 ptgt->tgt_trace = 0; 5343 } else { 5344 ptgt->tgt_state &= ~state; 5345 } 5346 5347 for (plun = ptgt->tgt_lun; plun != NULL; 5348 plun = plun->lun_next) { 5349 if (!(plun->lun_state & FCP_LUN_OFFLINE)) { 5350 /* The LUN is not offline. */ 5351 if (flag == FCP_SET) { 5352 plun->lun_state |= state; 5353 plun->lun_trace = 0; 5354 } else { 5355 plun->lun_state &= ~state; 5356 } 5357 } 5358 } 5359 } 5360 } 5361 5362 /* 5363 * Function: fcp_update_tgt_state 5364 * 5365 * Description: This function updates the field lun_state of a LUN. That 5366 * field is a bitmap and which bit can be set or reset 5367 * individually. 5368 * 5369 * Argument: *plun LUN structure. 5370 * flag Flag indication what action to apply (set/reset). 5371 * state State bits to update. 5372 * 5373 * Return Value: None 5374 * 5375 * Context: Interrupt, Kernel or User context. 5376 * The mutex of the target (ptgt->tgt_mutex) must be owned when 5377 * calling this function. 5378 */ 5379 void 5380 fcp_update_lun_state(struct fcp_lun *plun, int flag, uint32_t state) 5381 { 5382 struct fcp_tgt *ptgt = plun->lun_tgt; 5383 5384 ASSERT(mutex_owned(&ptgt->tgt_mutex)); 5385 5386 if (!(plun->lun_state & FCP_TGT_OFFLINE)) { 5387 if (flag == FCP_SET) { 5388 plun->lun_state |= state; 5389 } else { 5390 plun->lun_state &= ~state; 5391 } 5392 } 5393 } 5394 5395 /* 5396 * Function: fcp_get_port 5397 * 5398 * Description: This function returns the fcp_port structure from the opaque 5399 * handle passed by the caller. That opaque handle is the handle 5400 * used by fp/fctl to identify a particular local port. That 5401 * handle has been stored in the corresponding fcp_port 5402 * structure. This function is going to walk the global list of 5403 * fcp_port structures till one has a port_fp_handle that matches 5404 * the handle passed by the caller. This function enters the 5405 * mutex fcp_global_mutex while walking the global list and then 5406 * releases it. 5407 * 5408 * Argument: port_handle Opaque handle that fp/fctl uses to identify a 5409 * particular port. 5410 * 5411 * Return Value: NULL Not found. 5412 * Not NULL Pointer to the fcp_port structure. 5413 * 5414 * Context: Interrupt, Kernel or User context. 5415 */ 5416 static struct fcp_port * 5417 fcp_get_port(opaque_t port_handle) 5418 { 5419 struct fcp_port *pptr; 5420 5421 ASSERT(port_handle != NULL); 5422 5423 mutex_enter(&fcp_global_mutex); 5424 for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) { 5425 if (pptr->port_fp_handle == port_handle) { 5426 break; 5427 } 5428 } 5429 mutex_exit(&fcp_global_mutex); 5430 5431 return (pptr); 5432 } 5433 5434 5435 static void 5436 fcp_unsol_callback(fc_packet_t *fpkt) 5437 { 5438 struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private; 5439 struct fcp_port *pptr = icmd->ipkt_port; 5440 5441 if (fpkt->pkt_state != FC_PKT_SUCCESS) { 5442 caddr_t state, reason, action, expln; 5443 5444 (void) fc_ulp_pkt_error(fpkt, &state, &reason, 5445 &action, &expln); 5446 5447 fcp_log(CE_WARN, pptr->port_dip, 5448 "!couldn't post response to unsolicited request: " 5449 " state=%s reason=%s rx_id=%x ox_id=%x", 5450 state, reason, fpkt->pkt_cmd_fhdr.ox_id, 5451 fpkt->pkt_cmd_fhdr.rx_id); 5452 } 5453 fcp_icmd_free(pptr, icmd); 5454 } 5455 5456 5457 /* 5458 * Perform general purpose preparation of a response to an unsolicited request 5459 */ 5460 static void 5461 fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf, 5462 uchar_t r_ctl, uchar_t type) 5463 { 5464 pkt->pkt_cmd_fhdr.r_ctl = r_ctl; 5465 pkt->pkt_cmd_fhdr.d_id = buf->ub_frame.s_id; 5466 pkt->pkt_cmd_fhdr.s_id = buf->ub_frame.d_id; 5467 pkt->pkt_cmd_fhdr.type = type; 5468 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_LAST_SEQ | F_CTL_XCHG_CONTEXT; 5469 pkt->pkt_cmd_fhdr.seq_id = buf->ub_frame.seq_id; 5470 pkt->pkt_cmd_fhdr.df_ctl = buf->ub_frame.df_ctl; 5471 pkt->pkt_cmd_fhdr.seq_cnt = buf->ub_frame.seq_cnt; 5472 pkt->pkt_cmd_fhdr.ox_id = buf->ub_frame.ox_id; 5473 pkt->pkt_cmd_fhdr.rx_id = buf->ub_frame.rx_id; 5474 pkt->pkt_cmd_fhdr.ro = 0; 5475 pkt->pkt_cmd_fhdr.rsvd = 0; 5476 pkt->pkt_comp = fcp_unsol_callback; 5477 pkt->pkt_pd = NULL; 5478 } 5479 5480 5481 /*ARGSUSED*/ 5482 static int 5483 fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf) 5484 { 5485 fc_packet_t *fpkt; 5486 struct la_els_prli prli; 5487 struct fcp_prli *fprli; 5488 struct fcp_ipkt *icmd; 5489 struct la_els_prli *from; 5490 struct fcp_prli *orig; 5491 struct fcp_tgt *ptgt; 5492 int tcount = 0; 5493 int lcount; 5494 5495 from = (struct la_els_prli *)buf->ub_buffer; 5496 orig = (struct fcp_prli *)from->service_params; 5497 5498 if ((ptgt = fcp_get_target_by_did(pptr, buf->ub_frame.s_id)) != 5499 NULL) { 5500 mutex_enter(&ptgt->tgt_mutex); 5501 tcount = ptgt->tgt_change_cnt; 5502 mutex_exit(&ptgt->tgt_mutex); 5503 } 5504 mutex_enter(&pptr->port_mutex); 5505 lcount = pptr->port_link_cnt; 5506 mutex_exit(&pptr->port_mutex); 5507 5508 if ((icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_prli_t), 5509 sizeof (la_els_prli_t), 0, 0, lcount, tcount, 0, 5510 FC_INVALID_RSCN_COUNT)) == NULL) { 5511 return (FC_FAILURE); 5512 } 5513 fpkt = icmd->ipkt_fpkt; 5514 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR; 5515 fpkt->pkt_tran_type = FC_PKT_OUTBOUND; 5516 fpkt->pkt_timeout = FCP_ELS_TIMEOUT; 5517 fpkt->pkt_cmdlen = sizeof (la_els_prli_t); 5518 fpkt->pkt_rsplen = 0; 5519 fpkt->pkt_datalen = 0; 5520 5521 icmd->ipkt_opcode = LA_ELS_PRLI; 5522 5523 bzero(&prli, sizeof (struct la_els_prli)); 5524 fprli = (struct fcp_prli *)prli.service_params; 5525 prli.ls_code = LA_ELS_ACC; 5526 prli.page_length = 0x10; 5527 prli.payload_length = sizeof (struct la_els_prli); 5528 5529 /* fill in service params */ 5530 fprli->type = 0x08; 5531 fprli->resvd1 = 0; 5532 fprli->orig_process_assoc_valid = orig->orig_process_assoc_valid; 5533 fprli->orig_process_associator = orig->orig_process_associator; 5534 fprli->resp_process_assoc_valid = 0; 5535 fprli->establish_image_pair = 1; 5536 fprli->resvd2 = 0; 5537 fprli->resvd3 = 0; 5538 fprli->obsolete_1 = 0; 5539 fprli->obsolete_2 = 0; 5540 fprli->data_overlay_allowed = 0; 5541 fprli->initiator_fn = 1; 5542 fprli->confirmed_compl_allowed = 1; 5543 5544 if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) { 5545 fprli->target_fn = 1; 5546 } else { 5547 fprli->target_fn = 0; 5548 } 5549 5550 fprli->retry = 1; 5551 fprli->read_xfer_rdy_disabled = 1; 5552 fprli->write_xfer_rdy_disabled = 0; 5553 5554 /* save the unsol prli payload first */ 5555 FCP_CP_OUT((uint8_t *)from, fpkt->pkt_resp, 5556 fpkt->pkt_resp_acc, sizeof (struct la_els_prli)); 5557 5558 FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd, 5559 fpkt->pkt_cmd_acc, sizeof (struct la_els_prli)); 5560 5561 fcp_unsol_resp_init(fpkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS); 5562 5563 mutex_enter(&pptr->port_mutex); 5564 if (!FCP_LINK_STATE_CHANGED(pptr, icmd)) { 5565 int rval; 5566 mutex_exit(&pptr->port_mutex); 5567 5568 if ((rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt)) != 5569 FC_SUCCESS) { 5570 if (rval == FC_STATEC_BUSY || rval == FC_OFFLINE) { 5571 fcp_queue_ipkt(pptr, fpkt); 5572 return (FC_SUCCESS); 5573 } 5574 /* Let it timeout */ 5575 fcp_icmd_free(pptr, icmd); 5576 return (FC_FAILURE); 5577 } 5578 } else { 5579 mutex_exit(&pptr->port_mutex); 5580 fcp_icmd_free(pptr, icmd); 5581 return (FC_FAILURE); 5582 } 5583 5584 (void) fc_ulp_ubrelease(pptr->port_fp_handle, 1, &buf->ub_token); 5585 5586 return (FC_SUCCESS); 5587 } 5588 5589 /* 5590 * Function: fcp_icmd_alloc 5591 * 5592 * Description: This function allocated a fcp_ipkt structure. The pkt_comp 5593 * field is initialized to fcp_icmd_callback. Sometimes it is 5594 * modified by the caller (such as fcp_send_scsi). The 5595 * structure is also tied to the state of the line and of the 5596 * target at a particular time. That link is established by 5597 * setting the fields ipkt_link_cnt and ipkt_change_cnt to lcount 5598 * and tcount which came respectively from pptr->link_cnt and 5599 * ptgt->tgt_change_cnt. 5600 * 5601 * Argument: *pptr Fcp port. 5602 * *ptgt Target (destination of the command). 5603 * cmd_len Length of the command. 5604 * resp_len Length of the expected response. 5605 * data_len Length of the data. 5606 * nodma Indicates weither the command and response. 5607 * will be transfer through DMA or not. 5608 * lcount Link state change counter. 5609 * tcount Target state change counter. 5610 * cause Reason that lead to this call. 5611 * 5612 * Return Value: NULL Failed. 5613 * Not NULL Internal packet address. 5614 */ 5615 static struct fcp_ipkt * 5616 fcp_icmd_alloc(struct fcp_port *pptr, struct fcp_tgt *ptgt, int cmd_len, 5617 int resp_len, int data_len, int nodma, int lcount, int tcount, int cause, 5618 uint32_t rscn_count) 5619 { 5620 int dma_setup = 0; 5621 fc_packet_t *fpkt; 5622 struct fcp_ipkt *icmd = NULL; 5623 5624 icmd = kmem_zalloc(sizeof (struct fcp_ipkt) + 5625 pptr->port_dmacookie_sz + pptr->port_priv_pkt_len, 5626 KM_NOSLEEP); 5627 if (icmd == NULL) { 5628 fcp_log(CE_WARN, pptr->port_dip, 5629 "!internal packet allocation failed"); 5630 return (NULL); 5631 } 5632 5633 /* 5634 * initialize the allocated packet 5635 */ 5636 icmd->ipkt_nodma = nodma; 5637 icmd->ipkt_next = icmd->ipkt_prev = NULL; 5638 icmd->ipkt_lun = NULL; 5639 5640 icmd->ipkt_link_cnt = lcount; 5641 icmd->ipkt_change_cnt = tcount; 5642 icmd->ipkt_cause = cause; 5643 5644 mutex_enter(&pptr->port_mutex); 5645 icmd->ipkt_port = pptr; 5646 mutex_exit(&pptr->port_mutex); 5647 5648 /* keep track of amt of data to be sent in pkt */ 5649 icmd->ipkt_cmdlen = cmd_len; 5650 icmd->ipkt_resplen = resp_len; 5651 icmd->ipkt_datalen = data_len; 5652 5653 /* set up pkt's ptr to the fc_packet_t struct, just after the ipkt */ 5654 icmd->ipkt_fpkt = (fc_packet_t *)(&icmd->ipkt_fc_packet); 5655 5656 /* set pkt's private ptr to point to cmd pkt */ 5657 icmd->ipkt_fpkt->pkt_ulp_private = (opaque_t)icmd; 5658 5659 /* set FCA private ptr to memory just beyond */ 5660 icmd->ipkt_fpkt->pkt_fca_private = (opaque_t) 5661 ((char *)icmd + sizeof (struct fcp_ipkt) + 5662 pptr->port_dmacookie_sz); 5663 5664 /* get ptr to fpkt substruct and fill it in */ 5665 fpkt = icmd->ipkt_fpkt; 5666 fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)icmd + 5667 sizeof (struct fcp_ipkt)); 5668 5669 if (ptgt != NULL) { 5670 icmd->ipkt_tgt = ptgt; 5671 fpkt->pkt_fca_device = ptgt->tgt_fca_dev; 5672 } 5673 5674 fpkt->pkt_comp = fcp_icmd_callback; 5675 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR); 5676 fpkt->pkt_cmdlen = cmd_len; 5677 fpkt->pkt_rsplen = resp_len; 5678 fpkt->pkt_datalen = data_len; 5679 5680 /* 5681 * The pkt_ulp_rscn_infop (aka pkt_ulp_rsvd1) field is used to pass the 5682 * rscn_count as fcp knows down to the transport. If a valid count was 5683 * passed into this function, we allocate memory to actually pass down 5684 * this info. 5685 * 5686 * BTW, if the kmem_zalloc fails, we won't try too hard. This will 5687 * basically mean that fcp will not be able to help transport 5688 * distinguish if a new RSCN has come after fcp was last informed about 5689 * it. In such cases, it might lead to the problem mentioned in CR/bug # 5690 * 5068068 where the device might end up going offline in case of RSCN 5691 * storms. 5692 */ 5693 fpkt->pkt_ulp_rscn_infop = NULL; 5694 if (rscn_count != FC_INVALID_RSCN_COUNT) { 5695 fpkt->pkt_ulp_rscn_infop = kmem_zalloc( 5696 sizeof (fc_ulp_rscn_info_t), KM_NOSLEEP); 5697 if (fpkt->pkt_ulp_rscn_infop == NULL) { 5698 FCP_TRACE(fcp_logq, pptr->port_instbuf, 5699 fcp_trace, FCP_BUF_LEVEL_6, 0, 5700 "Failed to alloc memory to pass rscn info"); 5701 } 5702 } 5703 5704 if (fpkt->pkt_ulp_rscn_infop != NULL) { 5705 fc_ulp_rscn_info_t *rscnp; 5706 5707 rscnp = (fc_ulp_rscn_info_t *)fpkt->pkt_ulp_rscn_infop; 5708 rscnp->ulp_rscn_count = rscn_count; 5709 } 5710 5711 if (fcp_alloc_dma(pptr, icmd, nodma, KM_NOSLEEP) != FC_SUCCESS) { 5712 goto fail; 5713 } 5714 dma_setup++; 5715 5716 /* 5717 * Must hold target mutex across setting of pkt_pd and call to 5718 * fc_ulp_init_packet to ensure the handle to the target doesn't go 5719 * away while we're not looking. 5720 */ 5721 if (ptgt != NULL) { 5722 mutex_enter(&ptgt->tgt_mutex); 5723 fpkt->pkt_pd = ptgt->tgt_pd_handle; 5724 5725 /* ask transport to do its initialization on this pkt */ 5726 if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP) 5727 != FC_SUCCESS) { 5728 FCP_TRACE(fcp_logq, pptr->port_instbuf, 5729 fcp_trace, FCP_BUF_LEVEL_6, 0, 5730 "fc_ulp_init_packet failed"); 5731 mutex_exit(&ptgt->tgt_mutex); 5732 goto fail; 5733 } 5734 mutex_exit(&ptgt->tgt_mutex); 5735 } else { 5736 if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP) 5737 != FC_SUCCESS) { 5738 FCP_TRACE(fcp_logq, pptr->port_instbuf, 5739 fcp_trace, FCP_BUF_LEVEL_6, 0, 5740 "fc_ulp_init_packet failed"); 5741 goto fail; 5742 } 5743 } 5744 5745 mutex_enter(&pptr->port_mutex); 5746 if (pptr->port_state & (FCP_STATE_DETACHING | 5747 FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) { 5748 int rval; 5749 5750 mutex_exit(&pptr->port_mutex); 5751 5752 rval = fc_ulp_uninit_packet(pptr->port_fp_handle, fpkt); 5753 ASSERT(rval == FC_SUCCESS); 5754 5755 goto fail; 5756 } 5757 5758 if (ptgt != NULL) { 5759 mutex_enter(&ptgt->tgt_mutex); 5760 ptgt->tgt_ipkt_cnt++; 5761 mutex_exit(&ptgt->tgt_mutex); 5762 } 5763 5764 pptr->port_ipkt_cnt++; 5765 5766 mutex_exit(&pptr->port_mutex); 5767 5768 return (icmd); 5769 5770 fail: 5771 if (fpkt->pkt_ulp_rscn_infop != NULL) { 5772 kmem_free(fpkt->pkt_ulp_rscn_infop, 5773 sizeof (fc_ulp_rscn_info_t)); 5774 fpkt->pkt_ulp_rscn_infop = NULL; 5775 } 5776 5777 if (dma_setup) { 5778 fcp_free_dma(pptr, icmd); 5779 } 5780 kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len + 5781 (size_t)pptr->port_dmacookie_sz); 5782 5783 return (NULL); 5784 } 5785 5786 /* 5787 * Function: fcp_icmd_free 5788 * 5789 * Description: Frees the internal command passed by the caller. 5790 * 5791 * Argument: *pptr Fcp port. 5792 * *icmd Internal packet to free. 5793 * 5794 * Return Value: None 5795 */ 5796 static void 5797 fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd) 5798 { 5799 struct fcp_tgt *ptgt = icmd->ipkt_tgt; 5800 5801 /* Let the underlying layers do their cleanup. */ 5802 (void) fc_ulp_uninit_packet(pptr->port_fp_handle, 5803 icmd->ipkt_fpkt); 5804 5805 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop) { 5806 kmem_free(icmd->ipkt_fpkt->pkt_ulp_rscn_infop, 5807 sizeof (fc_ulp_rscn_info_t)); 5808 } 5809 5810 fcp_free_dma(pptr, icmd); 5811 5812 kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len + 5813 (size_t)pptr->port_dmacookie_sz); 5814 5815 mutex_enter(&pptr->port_mutex); 5816 5817 if (ptgt) { 5818 mutex_enter(&ptgt->tgt_mutex); 5819 ptgt->tgt_ipkt_cnt--; 5820 mutex_exit(&ptgt->tgt_mutex); 5821 } 5822 5823 pptr->port_ipkt_cnt--; 5824 mutex_exit(&pptr->port_mutex); 5825 } 5826 5827 /* 5828 * Function: fcp_alloc_dma 5829 * 5830 * Description: Allocated the DMA resources required for the internal 5831 * packet. 5832 * 5833 * Argument: *pptr FCP port. 5834 * *icmd Internal FCP packet. 5835 * nodma Indicates if the Cmd and Resp will be DMAed. 5836 * flags Allocation flags (Sleep or NoSleep). 5837 * 5838 * Return Value: FC_SUCCESS 5839 * FC_NOMEM 5840 */ 5841 static int 5842 fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd, 5843 int nodma, int flags) 5844 { 5845 int rval; 5846 size_t real_size; 5847 uint_t ccount; 5848 int bound = 0; 5849 int cmd_resp = 0; 5850 fc_packet_t *fpkt; 5851 ddi_dma_cookie_t pkt_data_cookie; 5852 ddi_dma_cookie_t *cp; 5853 uint32_t cnt; 5854 5855 fpkt = &icmd->ipkt_fc_packet; 5856 5857 ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_data_dma == NULL && 5858 fpkt->pkt_resp_dma == NULL); 5859 5860 icmd->ipkt_nodma = nodma; 5861 5862 if (nodma) { 5863 fpkt->pkt_cmd = kmem_zalloc(fpkt->pkt_cmdlen, flags); 5864 if (fpkt->pkt_cmd == NULL) { 5865 goto fail; 5866 } 5867 5868 fpkt->pkt_resp = kmem_zalloc(fpkt->pkt_rsplen, flags); 5869 if (fpkt->pkt_resp == NULL) { 5870 goto fail; 5871 } 5872 } else { 5873 ASSERT(fpkt->pkt_cmdlen && fpkt->pkt_rsplen); 5874 5875 rval = fcp_alloc_cmd_resp(pptr, fpkt, flags); 5876 if (rval == FC_FAILURE) { 5877 ASSERT(fpkt->pkt_cmd_dma == NULL && 5878 fpkt->pkt_resp_dma == NULL); 5879 goto fail; 5880 } 5881 cmd_resp++; 5882 } 5883 5884 if (fpkt->pkt_datalen != 0) { 5885 /* 5886 * set up DMA handle and memory for the data in this packet 5887 */ 5888 if (ddi_dma_alloc_handle(pptr->port_dip, 5889 &pptr->port_data_dma_attr, DDI_DMA_DONTWAIT, 5890 NULL, &fpkt->pkt_data_dma) != DDI_SUCCESS) { 5891 goto fail; 5892 } 5893 5894 if (ddi_dma_mem_alloc(fpkt->pkt_data_dma, fpkt->pkt_datalen, 5895 &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, 5896 DDI_DMA_DONTWAIT, NULL, &fpkt->pkt_data, 5897 &real_size, &fpkt->pkt_data_acc) != DDI_SUCCESS) { 5898 goto fail; 5899 } 5900 5901 /* was DMA mem size gotten < size asked for/needed ?? */ 5902 if (real_size < fpkt->pkt_datalen) { 5903 goto fail; 5904 } 5905 5906 /* bind DMA address and handle together */ 5907 if (ddi_dma_addr_bind_handle(fpkt->pkt_data_dma, 5908 NULL, fpkt->pkt_data, real_size, DDI_DMA_READ | 5909 DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL, 5910 &pkt_data_cookie, &ccount) != DDI_DMA_MAPPED) { 5911 goto fail; 5912 } 5913 bound++; 5914 5915 if (ccount > pptr->port_data_dma_attr.dma_attr_sgllen) { 5916 goto fail; 5917 } 5918 5919 fpkt->pkt_data_cookie_cnt = ccount; 5920 5921 cp = fpkt->pkt_data_cookie; 5922 *cp = pkt_data_cookie; 5923 cp++; 5924 5925 for (cnt = 1; cnt < ccount; cnt++, cp++) { 5926 ddi_dma_nextcookie(fpkt->pkt_data_dma, 5927 &pkt_data_cookie); 5928 *cp = pkt_data_cookie; 5929 } 5930 5931 } 5932 5933 return (FC_SUCCESS); 5934 5935 fail: 5936 if (bound) { 5937 (void) ddi_dma_unbind_handle(fpkt->pkt_data_dma); 5938 } 5939 5940 if (fpkt->pkt_data_dma) { 5941 if (fpkt->pkt_data) { 5942 ddi_dma_mem_free(&fpkt->pkt_data_acc); 5943 } 5944 ddi_dma_free_handle(&fpkt->pkt_data_dma); 5945 } 5946 5947 if (nodma) { 5948 if (fpkt->pkt_cmd) { 5949 kmem_free(fpkt->pkt_cmd, fpkt->pkt_cmdlen); 5950 } 5951 if (fpkt->pkt_resp) { 5952 kmem_free(fpkt->pkt_resp, fpkt->pkt_rsplen); 5953 } 5954 } else { 5955 if (cmd_resp) { 5956 fcp_free_cmd_resp(pptr, fpkt); 5957 } 5958 } 5959 5960 return (FC_NOMEM); 5961 } 5962 5963 5964 static void 5965 fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd) 5966 { 5967 fc_packet_t *fpkt = icmd->ipkt_fpkt; 5968 5969 if (fpkt->pkt_data_dma) { 5970 (void) ddi_dma_unbind_handle(fpkt->pkt_data_dma); 5971 if (fpkt->pkt_data) { 5972 ddi_dma_mem_free(&fpkt->pkt_data_acc); 5973 } 5974 ddi_dma_free_handle(&fpkt->pkt_data_dma); 5975 } 5976 5977 if (icmd->ipkt_nodma) { 5978 if (fpkt->pkt_cmd) { 5979 kmem_free(fpkt->pkt_cmd, icmd->ipkt_cmdlen); 5980 } 5981 if (fpkt->pkt_resp) { 5982 kmem_free(fpkt->pkt_resp, icmd->ipkt_resplen); 5983 } 5984 } else { 5985 ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL); 5986 5987 fcp_free_cmd_resp(pptr, fpkt); 5988 } 5989 } 5990 5991 /* 5992 * Function: fcp_lookup_target 5993 * 5994 * Description: Finds a target given a WWN. 5995 * 5996 * Argument: *pptr FCP port. 5997 * *wwn World Wide Name of the device to look for. 5998 * 5999 * Return Value: NULL No target found 6000 * Not NULL Target structure 6001 * 6002 * Context: Interrupt context. 6003 * The mutex pptr->port_mutex must be owned. 6004 */ 6005 /* ARGSUSED */ 6006 static struct fcp_tgt * 6007 fcp_lookup_target(struct fcp_port *pptr, uchar_t *wwn) 6008 { 6009 int hash; 6010 struct fcp_tgt *ptgt; 6011 6012 ASSERT(mutex_owned(&pptr->port_mutex)); 6013 6014 hash = FCP_HASH(wwn); 6015 6016 for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL; 6017 ptgt = ptgt->tgt_next) { 6018 if (!(ptgt->tgt_state & FCP_TGT_ORPHAN) && 6019 bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0], 6020 sizeof (ptgt->tgt_port_wwn)) == 0) { 6021 break; 6022 } 6023 } 6024 6025 return (ptgt); 6026 } 6027 6028 6029 /* 6030 * Find target structure given a port identifier 6031 */ 6032 static struct fcp_tgt * 6033 fcp_get_target_by_did(struct fcp_port *pptr, uint32_t d_id) 6034 { 6035 fc_portid_t port_id; 6036 la_wwn_t pwwn; 6037 struct fcp_tgt *ptgt = NULL; 6038 6039 port_id.priv_lilp_posit = 0; 6040 port_id.port_id = d_id; 6041 if (fc_ulp_get_pwwn_by_did(pptr->port_fp_handle, port_id, 6042 &pwwn) == FC_SUCCESS) { 6043 mutex_enter(&pptr->port_mutex); 6044 ptgt = fcp_lookup_target(pptr, pwwn.raw_wwn); 6045 mutex_exit(&pptr->port_mutex); 6046 } 6047 6048 return (ptgt); 6049 } 6050 6051 6052 /* 6053 * the packet completion callback routine for info cmd pkts 6054 * 6055 * this means fpkt pts to a response to either a PLOGI or a PRLI 6056 * 6057 * if there is an error an attempt is made to call a routine to resend 6058 * the command that failed 6059 */ 6060 static void 6061 fcp_icmd_callback(fc_packet_t *fpkt) 6062 { 6063 struct fcp_ipkt *icmd; 6064 struct fcp_port *pptr; 6065 struct fcp_tgt *ptgt; 6066 struct la_els_prli *prli; 6067 struct la_els_prli prli_s; 6068 struct fcp_prli *fprli; 6069 struct fcp_lun *plun; 6070 int free_pkt = 1; 6071 int rval; 6072 ls_code_t resp; 6073 uchar_t prli_acc = 0; 6074 uint32_t rscn_count = FC_INVALID_RSCN_COUNT; 6075 int lun0_newalloc; 6076 6077 icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private; 6078 6079 /* get ptrs to the port and target structs for the cmd */ 6080 pptr = icmd->ipkt_port; 6081 ptgt = icmd->ipkt_tgt; 6082 6083 FCP_CP_IN(fpkt->pkt_resp, &resp, fpkt->pkt_resp_acc, sizeof (resp)); 6084 6085 if (icmd->ipkt_opcode == LA_ELS_PRLI) { 6086 FCP_CP_IN(fpkt->pkt_cmd, &prli_s, fpkt->pkt_cmd_acc, 6087 sizeof (prli_s)); 6088 prli_acc = (prli_s.ls_code == LA_ELS_ACC); 6089 } 6090 6091 FCP_TRACE(fcp_logq, pptr->port_instbuf, 6092 fcp_trace, FCP_BUF_LEVEL_2, 0, 6093 "ELS (%x) callback state=0x%x reason=0x%x for %x", 6094 icmd->ipkt_opcode, fpkt->pkt_state, fpkt->pkt_reason, 6095 ptgt->tgt_d_id); 6096 6097 if ((fpkt->pkt_state == FC_PKT_SUCCESS) && 6098 ((resp.ls_code == LA_ELS_ACC) || prli_acc)) { 6099 6100 mutex_enter(&ptgt->tgt_mutex); 6101 if (ptgt->tgt_pd_handle == NULL) { 6102 /* 6103 * in a fabric environment the port device handles 6104 * get created only after successful LOGIN into the 6105 * transport, so the transport makes this port 6106 * device (pd) handle available in this packet, so 6107 * save it now 6108 */ 6109 ASSERT(fpkt->pkt_pd != NULL); 6110 ptgt->tgt_pd_handle = fpkt->pkt_pd; 6111 } 6112 mutex_exit(&ptgt->tgt_mutex); 6113 6114 /* which ELS cmd is this response for ?? */ 6115 switch (icmd->ipkt_opcode) { 6116 case LA_ELS_PLOGI: 6117 FCP_TRACE(fcp_logq, pptr->port_instbuf, 6118 fcp_trace, FCP_BUF_LEVEL_5, 0, 6119 "PLOGI to d_id=0x%x succeeded, wwn=%08x%08x", 6120 ptgt->tgt_d_id, 6121 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]), 6122 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4])); 6123 6124 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt, 6125 FCP_TGT_TRACE_15); 6126 6127 /* Note that we are not allocating a new icmd */ 6128 if (fcp_send_els(pptr, ptgt, icmd, LA_ELS_PRLI, 6129 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt, 6130 icmd->ipkt_cause) != DDI_SUCCESS) { 6131 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt, 6132 FCP_TGT_TRACE_16); 6133 goto fail; 6134 } 6135 break; 6136 6137 case LA_ELS_PRLI: 6138 FCP_TRACE(fcp_logq, pptr->port_instbuf, 6139 fcp_trace, FCP_BUF_LEVEL_5, 0, 6140 "PRLI to d_id=0x%x succeeded", ptgt->tgt_d_id); 6141 6142 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt, 6143 FCP_TGT_TRACE_17); 6144 6145 prli = &prli_s; 6146 6147 FCP_CP_IN(fpkt->pkt_resp, prli, fpkt->pkt_resp_acc, 6148 sizeof (prli_s)); 6149 6150 fprli = (struct fcp_prli *)prli->service_params; 6151 6152 mutex_enter(&ptgt->tgt_mutex); 6153 ptgt->tgt_icap = fprli->initiator_fn; 6154 ptgt->tgt_tcap = fprli->target_fn; 6155 mutex_exit(&ptgt->tgt_mutex); 6156 6157 if ((fprli->type != 0x08) || (fprli->target_fn != 1)) { 6158 /* 6159 * this FCP device does not support target mode 6160 */ 6161 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt, 6162 FCP_TGT_TRACE_18); 6163 goto fail; 6164 } 6165 if (fprli->retry == 1) { 6166 fc_ulp_disable_relogin(pptr->port_fp_handle, 6167 &ptgt->tgt_port_wwn); 6168 } 6169 6170 /* target is no longer offline */ 6171 mutex_enter(&pptr->port_mutex); 6172 mutex_enter(&ptgt->tgt_mutex); 6173 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) { 6174 ptgt->tgt_state &= ~(FCP_TGT_OFFLINE | 6175 FCP_TGT_MARK); 6176 } else { 6177 FCP_TRACE(fcp_logq, pptr->port_instbuf, 6178 fcp_trace, FCP_BUF_LEVEL_2, 0, 6179 "fcp_icmd_callback,1: state change " 6180 " occured for D_ID=0x%x", ptgt->tgt_d_id); 6181 mutex_exit(&ptgt->tgt_mutex); 6182 mutex_exit(&pptr->port_mutex); 6183 goto fail; 6184 } 6185 mutex_exit(&ptgt->tgt_mutex); 6186 mutex_exit(&pptr->port_mutex); 6187 6188 /* 6189 * lun 0 should always respond to inquiry, so 6190 * get the LUN struct for LUN 0 6191 * 6192 * Currently we deal with first level of addressing. 6193 * If / when we start supporting 0x device types 6194 * (DTYPE_ARRAY_CTRL, i.e. array controllers) 6195 * this logic will need revisiting. 6196 */ 6197 lun0_newalloc = 0; 6198 if ((plun = fcp_get_lun(ptgt, 0)) == NULL) { 6199 /* 6200 * no LUN struct for LUN 0 yet exists, 6201 * so create one 6202 */ 6203 plun = fcp_alloc_lun(ptgt); 6204 if (plun == NULL) { 6205 fcp_log(CE_WARN, pptr->port_dip, 6206 "!Failed to allocate lun 0 for" 6207 " D_ID=%x", ptgt->tgt_d_id); 6208 goto fail; 6209 } 6210 lun0_newalloc = 1; 6211 } 6212 6213 /* fill in LUN info */ 6214 mutex_enter(&ptgt->tgt_mutex); 6215 /* 6216 * consider lun 0 as device not connected if it is 6217 * offlined or newly allocated 6218 */ 6219 if ((plun->lun_state & FCP_LUN_OFFLINE) || 6220 lun0_newalloc) { 6221 plun->lun_state |= FCP_LUN_DEVICE_NOT_CONNECTED; 6222 } 6223 plun->lun_state |= (FCP_LUN_BUSY | FCP_LUN_MARK); 6224 plun->lun_state &= ~FCP_LUN_OFFLINE; 6225 ptgt->tgt_lun_cnt = 1; 6226 ptgt->tgt_report_lun_cnt = 0; 6227 mutex_exit(&ptgt->tgt_mutex); 6228 6229 /* Retrieve the rscn count (if a valid one exists) */ 6230 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) { 6231 rscn_count = ((fc_ulp_rscn_info_t *) 6232 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop)) 6233 ->ulp_rscn_count; 6234 } else { 6235 rscn_count = FC_INVALID_RSCN_COUNT; 6236 } 6237 6238 /* send Report Lun request to target */ 6239 if (fcp_send_scsi(plun, SCMD_REPORT_LUN, 6240 sizeof (struct fcp_reportlun_resp), 6241 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt, 6242 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) { 6243 mutex_enter(&pptr->port_mutex); 6244 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) { 6245 fcp_log(CE_WARN, pptr->port_dip, 6246 "!Failed to send REPORT LUN to" 6247 " D_ID=%x", ptgt->tgt_d_id); 6248 } else { 6249 FCP_TRACE(fcp_logq, 6250 pptr->port_instbuf, fcp_trace, 6251 FCP_BUF_LEVEL_5, 0, 6252 "fcp_icmd_callback,2:state change" 6253 " occured for D_ID=0x%x", 6254 ptgt->tgt_d_id); 6255 } 6256 mutex_exit(&pptr->port_mutex); 6257 6258 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt, 6259 FCP_TGT_TRACE_19); 6260 6261 goto fail; 6262 } else { 6263 free_pkt = 0; 6264 fcp_icmd_free(pptr, icmd); 6265 } 6266 break; 6267 6268 default: 6269 fcp_log(CE_WARN, pptr->port_dip, 6270 "!fcp_icmd_callback Invalid opcode"); 6271 goto fail; 6272 } 6273 6274 return; 6275 } 6276 6277 6278 /* 6279 * Other PLOGI failures are not retried as the 6280 * transport does it already 6281 */ 6282 if (icmd->ipkt_opcode != LA_ELS_PLOGI) { 6283 if (fcp_is_retryable(icmd) && 6284 icmd->ipkt_retries++ < FCP_MAX_RETRIES) { 6285 6286 if (FCP_MUST_RETRY(fpkt)) { 6287 fcp_queue_ipkt(pptr, fpkt); 6288 return; 6289 } 6290 6291 FCP_TRACE(fcp_logq, pptr->port_instbuf, 6292 fcp_trace, FCP_BUF_LEVEL_2, 0, 6293 "ELS PRLI is retried for d_id=0x%x, state=%x," 6294 " reason= %x", ptgt->tgt_d_id, fpkt->pkt_state, 6295 fpkt->pkt_reason); 6296 6297 /* 6298 * Retry by recalling the routine that 6299 * originally queued this packet 6300 */ 6301 mutex_enter(&pptr->port_mutex); 6302 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) { 6303 caddr_t msg; 6304 6305 mutex_exit(&pptr->port_mutex); 6306 6307 ASSERT(icmd->ipkt_opcode != LA_ELS_PLOGI); 6308 6309 if (fpkt->pkt_state == FC_PKT_TIMEOUT) { 6310 fpkt->pkt_timeout += 6311 FCP_TIMEOUT_DELTA; 6312 } 6313 6314 rval = fc_ulp_issue_els(pptr->port_fp_handle, 6315 fpkt); 6316 if (rval == FC_SUCCESS) { 6317 return; 6318 } 6319 6320 if (rval == FC_STATEC_BUSY || 6321 rval == FC_OFFLINE) { 6322 fcp_queue_ipkt(pptr, fpkt); 6323 return; 6324 } 6325 (void) fc_ulp_error(rval, &msg); 6326 6327 fcp_log(CE_NOTE, pptr->port_dip, 6328 "!ELS 0x%x failed to d_id=0x%x;" 6329 " %s", icmd->ipkt_opcode, 6330 ptgt->tgt_d_id, msg); 6331 } else { 6332 FCP_TRACE(fcp_logq, pptr->port_instbuf, 6333 fcp_trace, FCP_BUF_LEVEL_2, 0, 6334 "fcp_icmd_callback,3: state change " 6335 " occured for D_ID=0x%x", ptgt->tgt_d_id); 6336 mutex_exit(&pptr->port_mutex); 6337 } 6338 } 6339 } else { 6340 if (fcp_is_retryable(icmd) && 6341 icmd->ipkt_retries++ < FCP_MAX_RETRIES) { 6342 if (FCP_MUST_RETRY(fpkt)) { 6343 fcp_queue_ipkt(pptr, fpkt); 6344 return; 6345 } 6346 } 6347 mutex_enter(&pptr->port_mutex); 6348 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd) && 6349 fpkt->pkt_state != FC_PKT_PORT_OFFLINE) { 6350 mutex_exit(&pptr->port_mutex); 6351 fcp_print_error(fpkt); 6352 } else { 6353 FCP_TRACE(fcp_logq, pptr->port_instbuf, 6354 fcp_trace, FCP_BUF_LEVEL_2, 0, 6355 "fcp_icmd_callback,4: state change occured" 6356 " for D_ID=0x%x", ptgt->tgt_d_id); 6357 mutex_exit(&pptr->port_mutex); 6358 } 6359 } 6360 6361 fail: 6362 if (free_pkt) { 6363 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt, 6364 icmd->ipkt_change_cnt, icmd->ipkt_cause); 6365 fcp_icmd_free(pptr, icmd); 6366 } 6367 } 6368 6369 6370 /* 6371 * called internally to send an info cmd using the transport 6372 * 6373 * sends either an INQ or a REPORT_LUN 6374 * 6375 * when the packet is completed fcp_scsi_callback is called 6376 */ 6377 static int 6378 fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode, int alloc_len, 6379 int lcount, int tcount, int cause, uint32_t rscn_count) 6380 { 6381 int nodma; 6382 struct fcp_ipkt *icmd; 6383 struct fcp_tgt *ptgt; 6384 struct fcp_port *pptr; 6385 fc_frame_hdr_t *hp; 6386 fc_packet_t *fpkt; 6387 struct fcp_cmd fcp_cmd; 6388 struct fcp_cmd *fcmd; 6389 union scsi_cdb *scsi_cdb; 6390 6391 ASSERT(plun != NULL); 6392 6393 ptgt = plun->lun_tgt; 6394 ASSERT(ptgt != NULL); 6395 6396 pptr = ptgt->tgt_port; 6397 ASSERT(pptr != NULL); 6398 6399 FCP_TRACE(fcp_logq, pptr->port_instbuf, 6400 fcp_trace, FCP_BUF_LEVEL_5, 0, 6401 "fcp_send_scsi: d_id=0x%x opcode=0x%x", ptgt->tgt_d_id, opcode); 6402 6403 nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) ? 1 : 0; 6404 6405 icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (struct fcp_cmd), 6406 FCP_MAX_RSP_IU_SIZE, alloc_len, nodma, lcount, tcount, cause, 6407 rscn_count); 6408 6409 if (icmd == NULL) { 6410 return (DDI_FAILURE); 6411 } 6412 6413 fpkt = icmd->ipkt_fpkt; 6414 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR; 6415 icmd->ipkt_retries = 0; 6416 icmd->ipkt_opcode = opcode; 6417 icmd->ipkt_lun = plun; 6418 6419 if (nodma) { 6420 fcmd = (struct fcp_cmd *)fpkt->pkt_cmd; 6421 } else { 6422 fcmd = &fcp_cmd; 6423 } 6424 bzero(fcmd, sizeof (struct fcp_cmd)); 6425 6426 fpkt->pkt_timeout = FCP_SCSI_CMD_TIMEOUT; 6427 6428 hp = &fpkt->pkt_cmd_fhdr; 6429 6430 hp->s_id = pptr->port_id; 6431 hp->d_id = ptgt->tgt_d_id; 6432 hp->r_ctl = R_CTL_COMMAND; 6433 hp->type = FC_TYPE_SCSI_FCP; 6434 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ; 6435 hp->rsvd = 0; 6436 hp->seq_id = 0; 6437 hp->seq_cnt = 0; 6438 hp->ox_id = 0xffff; 6439 hp->rx_id = 0xffff; 6440 hp->ro = 0; 6441 6442 bcopy(&(plun->lun_addr), &(fcmd->fcp_ent_addr), FCP_LUN_SIZE); 6443 6444 /* 6445 * Request SCSI target for expedited processing 6446 */ 6447 6448 /* 6449 * Set up for untagged queuing because we do not 6450 * know if the fibre device supports queuing. 6451 */ 6452 fcmd->fcp_cntl.cntl_reserved_0 = 0; 6453 fcmd->fcp_cntl.cntl_reserved_1 = 0; 6454 fcmd->fcp_cntl.cntl_reserved_2 = 0; 6455 fcmd->fcp_cntl.cntl_reserved_3 = 0; 6456 fcmd->fcp_cntl.cntl_reserved_4 = 0; 6457 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED; 6458 scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb; 6459 6460 switch (opcode) { 6461 case SCMD_INQUIRY_PAGE83: 6462 /* 6463 * Prepare to get the Inquiry VPD page 83 information 6464 */ 6465 fcmd->fcp_cntl.cntl_read_data = 1; 6466 fcmd->fcp_cntl.cntl_write_data = 0; 6467 fcmd->fcp_data_len = alloc_len; 6468 6469 fpkt->pkt_tran_type = FC_PKT_FCP_READ; 6470 fpkt->pkt_comp = fcp_scsi_callback; 6471 6472 scsi_cdb->scc_cmd = SCMD_INQUIRY; 6473 scsi_cdb->g0_addr2 = 0x01; 6474 scsi_cdb->g0_addr1 = 0x83; 6475 scsi_cdb->g0_count0 = (uchar_t)alloc_len; 6476 break; 6477 6478 case SCMD_INQUIRY: 6479 fcmd->fcp_cntl.cntl_read_data = 1; 6480 fcmd->fcp_cntl.cntl_write_data = 0; 6481 fcmd->fcp_data_len = alloc_len; 6482 6483 fpkt->pkt_tran_type = FC_PKT_FCP_READ; 6484 fpkt->pkt_comp = fcp_scsi_callback; 6485 6486 scsi_cdb->scc_cmd = SCMD_INQUIRY; 6487 scsi_cdb->g0_count0 = SUN_INQSIZE; 6488 break; 6489 6490 case SCMD_REPORT_LUN: { 6491 fc_portid_t d_id; 6492 opaque_t fca_dev; 6493 6494 ASSERT(alloc_len >= 16); 6495 6496 d_id.priv_lilp_posit = 0; 6497 d_id.port_id = ptgt->tgt_d_id; 6498 6499 fca_dev = fc_ulp_get_fca_device(pptr->port_fp_handle, d_id); 6500 6501 mutex_enter(&ptgt->tgt_mutex); 6502 ptgt->tgt_fca_dev = fca_dev; 6503 mutex_exit(&ptgt->tgt_mutex); 6504 6505 fcmd->fcp_cntl.cntl_read_data = 1; 6506 fcmd->fcp_cntl.cntl_write_data = 0; 6507 fcmd->fcp_data_len = alloc_len; 6508 6509 fpkt->pkt_tran_type = FC_PKT_FCP_READ; 6510 fpkt->pkt_comp = fcp_scsi_callback; 6511 6512 scsi_cdb->scc_cmd = SCMD_REPORT_LUN; 6513 scsi_cdb->scc5_count0 = alloc_len & 0xff; 6514 scsi_cdb->scc5_count1 = (alloc_len >> 8) & 0xff; 6515 scsi_cdb->scc5_count2 = (alloc_len >> 16) & 0xff; 6516 scsi_cdb->scc5_count3 = (alloc_len >> 24) & 0xff; 6517 break; 6518 } 6519 6520 default: 6521 fcp_log(CE_WARN, pptr->port_dip, 6522 "!fcp_send_scsi Invalid opcode"); 6523 break; 6524 } 6525 6526 if (!nodma) { 6527 FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd, 6528 fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd)); 6529 } 6530 6531 mutex_enter(&pptr->port_mutex); 6532 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) { 6533 6534 mutex_exit(&pptr->port_mutex); 6535 if (fcp_transport(pptr->port_fp_handle, fpkt, 1) != 6536 FC_SUCCESS) { 6537 fcp_icmd_free(pptr, icmd); 6538 return (DDI_FAILURE); 6539 } 6540 return (DDI_SUCCESS); 6541 } else { 6542 FCP_TRACE(fcp_logq, pptr->port_instbuf, 6543 fcp_trace, FCP_BUF_LEVEL_2, 0, 6544 "fcp_send_scsi,1: state change occured" 6545 " for D_ID=0x%x", ptgt->tgt_d_id); 6546 mutex_exit(&pptr->port_mutex); 6547 fcp_icmd_free(pptr, icmd); 6548 return (DDI_FAILURE); 6549 } 6550 } 6551 6552 6553 /* 6554 * called by fcp_scsi_callback to check to handle the case where 6555 * REPORT_LUN returns ILLEGAL REQUEST or a UNIT ATTENTION 6556 */ 6557 static int 6558 fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt) 6559 { 6560 uchar_t rqlen; 6561 int rval = DDI_FAILURE; 6562 struct scsi_extended_sense sense_info, *sense; 6563 struct fcp_ipkt *icmd = (struct fcp_ipkt *) 6564 fpkt->pkt_ulp_private; 6565 struct fcp_tgt *ptgt = icmd->ipkt_tgt; 6566 struct fcp_port *pptr = ptgt->tgt_port; 6567 6568 ASSERT(icmd->ipkt_opcode == SCMD_REPORT_LUN); 6569 6570 if (rsp->fcp_u.fcp_status.scsi_status == STATUS_RESERVATION_CONFLICT) { 6571 /* 6572 * SCSI-II Reserve Release support. Some older FC drives return 6573 * Reservation conflict for Report Luns command. 6574 */ 6575 if (icmd->ipkt_nodma) { 6576 rsp->fcp_u.fcp_status.rsp_len_set = 0; 6577 rsp->fcp_u.fcp_status.sense_len_set = 0; 6578 rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD; 6579 } else { 6580 fcp_rsp_t new_resp; 6581 6582 FCP_CP_IN(fpkt->pkt_resp, &new_resp, 6583 fpkt->pkt_resp_acc, sizeof (new_resp)); 6584 6585 new_resp.fcp_u.fcp_status.rsp_len_set = 0; 6586 new_resp.fcp_u.fcp_status.sense_len_set = 0; 6587 new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD; 6588 6589 FCP_CP_OUT(&new_resp, fpkt->pkt_resp, 6590 fpkt->pkt_resp_acc, sizeof (new_resp)); 6591 } 6592 6593 FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data, 6594 fpkt->pkt_data_acc, sizeof (fcp_dummy_lun)); 6595 6596 return (DDI_SUCCESS); 6597 } 6598 6599 sense = &sense_info; 6600 if (!rsp->fcp_u.fcp_status.sense_len_set) { 6601 /* no need to continue if sense length is not set */ 6602 return (rval); 6603 } 6604 6605 /* casting 64-bit integer to 8-bit */ 6606 rqlen = (uchar_t)min(rsp->fcp_sense_len, 6607 sizeof (struct scsi_extended_sense)); 6608 6609 if (rqlen < 14) { 6610 /* no need to continue if request length isn't long enough */ 6611 return (rval); 6612 } 6613 6614 if (icmd->ipkt_nodma) { 6615 /* 6616 * We can safely use fcp_response_len here since the 6617 * only path that calls fcp_check_reportlun, 6618 * fcp_scsi_callback, has already called 6619 * fcp_validate_fcp_response. 6620 */ 6621 sense = (struct scsi_extended_sense *)(fpkt->pkt_resp + 6622 sizeof (struct fcp_rsp) + rsp->fcp_response_len); 6623 } else { 6624 FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp) + 6625 rsp->fcp_response_len, sense, fpkt->pkt_resp_acc, 6626 sizeof (struct scsi_extended_sense)); 6627 } 6628 6629 if (!FCP_SENSE_NO_LUN(sense)) { 6630 mutex_enter(&ptgt->tgt_mutex); 6631 /* clear the flag if any */ 6632 ptgt->tgt_state &= ~FCP_TGT_ILLREQ; 6633 mutex_exit(&ptgt->tgt_mutex); 6634 } 6635 6636 if ((sense->es_key == KEY_ILLEGAL_REQUEST) && 6637 (sense->es_add_code == 0x20)) { 6638 if (icmd->ipkt_nodma) { 6639 rsp->fcp_u.fcp_status.rsp_len_set = 0; 6640 rsp->fcp_u.fcp_status.sense_len_set = 0; 6641 rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD; 6642 } else { 6643 fcp_rsp_t new_resp; 6644 6645 FCP_CP_IN(fpkt->pkt_resp, &new_resp, 6646 fpkt->pkt_resp_acc, sizeof (new_resp)); 6647 6648 new_resp.fcp_u.fcp_status.rsp_len_set = 0; 6649 new_resp.fcp_u.fcp_status.sense_len_set = 0; 6650 new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD; 6651 6652 FCP_CP_OUT(&new_resp, fpkt->pkt_resp, 6653 fpkt->pkt_resp_acc, sizeof (new_resp)); 6654 } 6655 6656 FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data, 6657 fpkt->pkt_data_acc, sizeof (fcp_dummy_lun)); 6658 6659 return (DDI_SUCCESS); 6660 } 6661 6662 /* 6663 * This is for the STK library which returns a check condition, 6664 * to indicate device is not ready, manual assistance needed. 6665 * This is to a report lun command when the door is open. 6666 */ 6667 if ((sense->es_key == KEY_NOT_READY) && (sense->es_add_code == 0x04)) { 6668 if (icmd->ipkt_nodma) { 6669 rsp->fcp_u.fcp_status.rsp_len_set = 0; 6670 rsp->fcp_u.fcp_status.sense_len_set = 0; 6671 rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD; 6672 } else { 6673 fcp_rsp_t new_resp; 6674 6675 FCP_CP_IN(fpkt->pkt_resp, &new_resp, 6676 fpkt->pkt_resp_acc, sizeof (new_resp)); 6677 6678 new_resp.fcp_u.fcp_status.rsp_len_set = 0; 6679 new_resp.fcp_u.fcp_status.sense_len_set = 0; 6680 new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD; 6681 6682 FCP_CP_OUT(&new_resp, fpkt->pkt_resp, 6683 fpkt->pkt_resp_acc, sizeof (new_resp)); 6684 } 6685 6686 FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data, 6687 fpkt->pkt_data_acc, sizeof (fcp_dummy_lun)); 6688 6689 return (DDI_SUCCESS); 6690 } 6691 6692 if ((FCP_SENSE_REPORTLUN_CHANGED(sense)) || 6693 (FCP_SENSE_NO_LUN(sense))) { 6694 mutex_enter(&ptgt->tgt_mutex); 6695 if ((FCP_SENSE_NO_LUN(sense)) && 6696 (ptgt->tgt_state & FCP_TGT_ILLREQ)) { 6697 ptgt->tgt_state &= ~FCP_TGT_ILLREQ; 6698 mutex_exit(&ptgt->tgt_mutex); 6699 /* 6700 * reconfig was triggred by ILLEGAL REQUEST but 6701 * got ILLEGAL REQUEST again 6702 */ 6703 FCP_TRACE(fcp_logq, pptr->port_instbuf, 6704 fcp_trace, FCP_BUF_LEVEL_3, 0, 6705 "!FCP: Unable to obtain Report Lun data" 6706 " target=%x", ptgt->tgt_d_id); 6707 } else { 6708 if (ptgt->tgt_tid == NULL) { 6709 timeout_id_t tid; 6710 /* 6711 * REPORT LUN data has changed. Kick off 6712 * rediscovery 6713 */ 6714 tid = timeout(fcp_reconfigure_luns, 6715 (caddr_t)ptgt, (clock_t)drv_usectohz(1)); 6716 6717 ptgt->tgt_tid = tid; 6718 ptgt->tgt_state |= FCP_TGT_BUSY; 6719 } 6720 if (FCP_SENSE_NO_LUN(sense)) { 6721 ptgt->tgt_state |= FCP_TGT_ILLREQ; 6722 } 6723 mutex_exit(&ptgt->tgt_mutex); 6724 if (FCP_SENSE_REPORTLUN_CHANGED(sense)) { 6725 FCP_TRACE(fcp_logq, pptr->port_instbuf, 6726 fcp_trace, FCP_BUF_LEVEL_3, 0, 6727 "!FCP:Report Lun Has Changed" 6728 " target=%x", ptgt->tgt_d_id); 6729 } else if (FCP_SENSE_NO_LUN(sense)) { 6730 FCP_TRACE(fcp_logq, pptr->port_instbuf, 6731 fcp_trace, FCP_BUF_LEVEL_3, 0, 6732 "!FCP:LU Not Supported" 6733 " target=%x", ptgt->tgt_d_id); 6734 } 6735 } 6736 rval = DDI_SUCCESS; 6737 } 6738 6739 FCP_TRACE(fcp_logq, pptr->port_instbuf, 6740 fcp_trace, FCP_BUF_LEVEL_5, 0, 6741 "D_ID=%x, sense=%x, status=%x", 6742 fpkt->pkt_cmd_fhdr.d_id, sense->es_key, 6743 rsp->fcp_u.fcp_status.scsi_status); 6744 6745 return (rval); 6746 } 6747 6748 /* 6749 * Function: fcp_scsi_callback 6750 * 6751 * Description: This is the callback routine set by fcp_send_scsi() after 6752 * it calls fcp_icmd_alloc(). The SCSI command completed here 6753 * and autogenerated by FCP are: REPORT_LUN, INQUIRY and 6754 * INQUIRY_PAGE83. 6755 * 6756 * Argument: *fpkt FC packet used to convey the command 6757 * 6758 * Return Value: None 6759 */ 6760 static void 6761 fcp_scsi_callback(fc_packet_t *fpkt) 6762 { 6763 struct fcp_ipkt *icmd = (struct fcp_ipkt *) 6764 fpkt->pkt_ulp_private; 6765 struct fcp_rsp_info fcp_rsp_err, *bep; 6766 struct fcp_port *pptr; 6767 struct fcp_tgt *ptgt; 6768 struct fcp_lun *plun; 6769 struct fcp_rsp response, *rsp; 6770 6771 if (icmd->ipkt_nodma) { 6772 rsp = (struct fcp_rsp *)fpkt->pkt_resp; 6773 } else { 6774 rsp = &response; 6775 FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc, 6776 sizeof (struct fcp_rsp)); 6777 } 6778 6779 ptgt = icmd->ipkt_tgt; 6780 pptr = ptgt->tgt_port; 6781 plun = icmd->ipkt_lun; 6782 6783 FCP_TRACE(fcp_logq, pptr->port_instbuf, 6784 fcp_trace, FCP_BUF_LEVEL_2, 0, 6785 "SCSI callback state=0x%x for %x, op_code=0x%x, " 6786 "status=%x, lun num=%x", 6787 fpkt->pkt_state, ptgt->tgt_d_id, icmd->ipkt_opcode, 6788 rsp->fcp_u.fcp_status.scsi_status, plun->lun_num); 6789 6790 /* 6791 * Pre-init LUN GUID with NWWN if it is not a device that 6792 * supports multiple luns and we know it's not page83 6793 * compliant. Although using a NWWN is not lun unique, 6794 * we will be fine since there is only one lun behind the taget 6795 * in this case. 6796 */ 6797 if ((plun->lun_guid_size == 0) && 6798 (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) && 6799 (fcp_symmetric_device_probe(plun) == 0)) { 6800 6801 char ascii_wwn[FC_WWN_SIZE*2+1]; 6802 fcp_wwn_to_ascii(&ptgt->tgt_node_wwn.raw_wwn[0], ascii_wwn); 6803 (void) fcp_copy_guid_2_lun_block(plun, ascii_wwn); 6804 } 6805 6806 /* 6807 * Some old FC tapes and FC <-> SCSI bridge devices return overrun 6808 * when thay have more data than what is asked in CDB. An overrun 6809 * is really when FCP_DL is smaller than the data length in CDB. 6810 * In the case here we know that REPORT LUN command we formed within 6811 * this binary has correct FCP_DL. So this OVERRUN is due to bad device 6812 * behavior. In reality this is FC_SUCCESS. 6813 */ 6814 if ((fpkt->pkt_state != FC_PKT_SUCCESS) && 6815 (fpkt->pkt_reason == FC_REASON_OVERRUN) && 6816 (icmd->ipkt_opcode == SCMD_REPORT_LUN)) { 6817 fpkt->pkt_state = FC_PKT_SUCCESS; 6818 } 6819 6820 if (fpkt->pkt_state != FC_PKT_SUCCESS) { 6821 FCP_TRACE(fcp_logq, pptr->port_instbuf, 6822 fcp_trace, FCP_BUF_LEVEL_2, 0, 6823 "icmd failed with state=0x%x for %x", fpkt->pkt_state, 6824 ptgt->tgt_d_id); 6825 6826 if (fpkt->pkt_reason == FC_REASON_CRC_ERROR) { 6827 /* 6828 * Inquiry VPD page command on A5K SES devices would 6829 * result in data CRC errors. 6830 */ 6831 if (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) { 6832 (void) fcp_handle_page83(fpkt, icmd, 1); 6833 return; 6834 } 6835 } 6836 if (fpkt->pkt_state == FC_PKT_TIMEOUT || 6837 FCP_MUST_RETRY(fpkt)) { 6838 fpkt->pkt_timeout += FCP_TIMEOUT_DELTA; 6839 fcp_retry_scsi_cmd(fpkt); 6840 return; 6841 } 6842 6843 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt, 6844 FCP_TGT_TRACE_20); 6845 6846 mutex_enter(&pptr->port_mutex); 6847 mutex_enter(&ptgt->tgt_mutex); 6848 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) { 6849 mutex_exit(&ptgt->tgt_mutex); 6850 mutex_exit(&pptr->port_mutex); 6851 fcp_print_error(fpkt); 6852 } else { 6853 FCP_TRACE(fcp_logq, pptr->port_instbuf, 6854 fcp_trace, FCP_BUF_LEVEL_2, 0, 6855 "fcp_scsi_callback,1: state change occured" 6856 " for D_ID=0x%x", ptgt->tgt_d_id); 6857 mutex_exit(&ptgt->tgt_mutex); 6858 mutex_exit(&pptr->port_mutex); 6859 } 6860 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt, 6861 icmd->ipkt_change_cnt, icmd->ipkt_cause); 6862 fcp_icmd_free(pptr, icmd); 6863 return; 6864 } 6865 6866 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt, FCP_TGT_TRACE_21); 6867 6868 mutex_enter(&pptr->port_mutex); 6869 mutex_enter(&ptgt->tgt_mutex); 6870 if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) { 6871 FCP_TRACE(fcp_logq, pptr->port_instbuf, 6872 fcp_trace, FCP_BUF_LEVEL_2, 0, 6873 "fcp_scsi_callback,2: state change occured" 6874 " for D_ID=0x%x", ptgt->tgt_d_id); 6875 mutex_exit(&ptgt->tgt_mutex); 6876 mutex_exit(&pptr->port_mutex); 6877 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt, 6878 icmd->ipkt_change_cnt, icmd->ipkt_cause); 6879 fcp_icmd_free(pptr, icmd); 6880 return; 6881 } 6882 ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0); 6883 6884 mutex_exit(&ptgt->tgt_mutex); 6885 mutex_exit(&pptr->port_mutex); 6886 6887 if (icmd->ipkt_nodma) { 6888 bep = (struct fcp_rsp_info *)(fpkt->pkt_resp + 6889 sizeof (struct fcp_rsp)); 6890 } else { 6891 bep = &fcp_rsp_err; 6892 FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp), bep, 6893 fpkt->pkt_resp_acc, sizeof (struct fcp_rsp_info)); 6894 } 6895 6896 if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) { 6897 fcp_retry_scsi_cmd(fpkt); 6898 return; 6899 } 6900 6901 if (rsp->fcp_u.fcp_status.rsp_len_set && bep->rsp_code != 6902 FCP_NO_FAILURE) { 6903 FCP_TRACE(fcp_logq, pptr->port_instbuf, 6904 fcp_trace, FCP_BUF_LEVEL_2, 0, 6905 "rsp_code=0x%x, rsp_len_set=0x%x", 6906 bep->rsp_code, rsp->fcp_u.fcp_status.rsp_len_set); 6907 fcp_retry_scsi_cmd(fpkt); 6908 return; 6909 } 6910 6911 if (rsp->fcp_u.fcp_status.scsi_status == STATUS_QFULL || 6912 rsp->fcp_u.fcp_status.scsi_status == STATUS_BUSY) { 6913 fcp_queue_ipkt(pptr, fpkt); 6914 return; 6915 } 6916 6917 /* 6918 * Devices that do not support INQUIRY_PAGE83, return check condition 6919 * with illegal request as per SCSI spec. 6920 * Crossbridge is one such device and Daktari's SES node is another. 6921 * We want to ideally enumerate these devices as a non-mpxio devices. 6922 * SES nodes (Daktari only currently) are an exception to this. 6923 */ 6924 if ((icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) && 6925 (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK)) { 6926 6927 FCP_TRACE(fcp_logq, pptr->port_instbuf, 6928 fcp_trace, FCP_BUF_LEVEL_3, 0, 6929 "INQUIRY_PAGE83 for d_id %x (dtype:0x%x) failed with " 6930 "check condition. May enumerate as non-mpxio device", 6931 ptgt->tgt_d_id, plun->lun_type); 6932 6933 /* 6934 * If we let Daktari's SES be enumerated as a non-mpxio 6935 * device, there will be a discrepency in that the other 6936 * internal FC disks will get enumerated as mpxio devices. 6937 * Applications like luxadm expect this to be consistent. 6938 * 6939 * So, we put in a hack here to check if this is an SES device 6940 * and handle it here. 6941 */ 6942 if (plun->lun_type == DTYPE_ESI) { 6943 /* 6944 * Since, pkt_state is actually FC_PKT_SUCCESS 6945 * at this stage, we fake a failure here so that 6946 * fcp_handle_page83 will create a device path using 6947 * the WWN instead of the GUID which is not there anyway 6948 */ 6949 fpkt->pkt_state = FC_PKT_LOCAL_RJT; 6950 (void) fcp_handle_page83(fpkt, icmd, 1); 6951 return; 6952 } 6953 6954 mutex_enter(&ptgt->tgt_mutex); 6955 plun->lun_state &= ~(FCP_LUN_OFFLINE | 6956 FCP_LUN_MARK | FCP_LUN_BUSY); 6957 mutex_exit(&ptgt->tgt_mutex); 6958 6959 (void) fcp_call_finish_init(pptr, ptgt, 6960 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt, 6961 icmd->ipkt_cause); 6962 fcp_icmd_free(pptr, icmd); 6963 return; 6964 } 6965 6966 if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) { 6967 int rval = DDI_FAILURE; 6968 6969 /* 6970 * handle cases where report lun isn't supported 6971 * by faking up our own REPORT_LUN response or 6972 * UNIT ATTENTION 6973 */ 6974 if (icmd->ipkt_opcode == SCMD_REPORT_LUN) { 6975 rval = fcp_check_reportlun(rsp, fpkt); 6976 6977 /* 6978 * fcp_check_reportlun might have modified the 6979 * FCP response. Copy it in again to get an updated 6980 * FCP response 6981 */ 6982 if (rval == DDI_SUCCESS && icmd->ipkt_nodma == 0) { 6983 rsp = &response; 6984 6985 FCP_CP_IN(fpkt->pkt_resp, rsp, 6986 fpkt->pkt_resp_acc, 6987 sizeof (struct fcp_rsp)); 6988 } 6989 } 6990 6991 if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) { 6992 if (rval == DDI_SUCCESS) { 6993 (void) fcp_call_finish_init(pptr, ptgt, 6994 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt, 6995 icmd->ipkt_cause); 6996 fcp_icmd_free(pptr, icmd); 6997 } else { 6998 fcp_retry_scsi_cmd(fpkt); 6999 } 7000 7001 return; 7002 } 7003 } else { 7004 if (icmd->ipkt_opcode == SCMD_REPORT_LUN) { 7005 mutex_enter(&ptgt->tgt_mutex); 7006 ptgt->tgt_state &= ~FCP_TGT_ILLREQ; 7007 mutex_exit(&ptgt->tgt_mutex); 7008 } 7009 } 7010 7011 ASSERT(rsp->fcp_u.fcp_status.scsi_status == STATUS_GOOD); 7012 7013 (void) ddi_dma_sync(fpkt->pkt_data_dma, 0, 0, DDI_DMA_SYNC_FORCPU); 7014 7015 switch (icmd->ipkt_opcode) { 7016 case SCMD_INQUIRY: 7017 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_1); 7018 fcp_handle_inquiry(fpkt, icmd); 7019 break; 7020 7021 case SCMD_REPORT_LUN: 7022 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt, 7023 FCP_TGT_TRACE_22); 7024 fcp_handle_reportlun(fpkt, icmd); 7025 break; 7026 7027 case SCMD_INQUIRY_PAGE83: 7028 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_2); 7029 (void) fcp_handle_page83(fpkt, icmd, 0); 7030 break; 7031 7032 default: 7033 fcp_log(CE_WARN, NULL, "!Invalid SCSI opcode"); 7034 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt, 7035 icmd->ipkt_change_cnt, icmd->ipkt_cause); 7036 fcp_icmd_free(pptr, icmd); 7037 break; 7038 } 7039 } 7040 7041 7042 static void 7043 fcp_retry_scsi_cmd(fc_packet_t *fpkt) 7044 { 7045 struct fcp_ipkt *icmd = (struct fcp_ipkt *) 7046 fpkt->pkt_ulp_private; 7047 struct fcp_tgt *ptgt = icmd->ipkt_tgt; 7048 struct fcp_port *pptr = ptgt->tgt_port; 7049 7050 if (icmd->ipkt_retries < FCP_MAX_RETRIES && 7051 fcp_is_retryable(icmd)) { 7052 mutex_enter(&pptr->port_mutex); 7053 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) { 7054 mutex_exit(&pptr->port_mutex); 7055 FCP_TRACE(fcp_logq, pptr->port_instbuf, 7056 fcp_trace, FCP_BUF_LEVEL_3, 0, 7057 "Retrying %s to %x; state=%x, reason=%x", 7058 (icmd->ipkt_opcode == SCMD_REPORT_LUN) ? 7059 "Report LUN" : "INQUIRY", ptgt->tgt_d_id, 7060 fpkt->pkt_state, fpkt->pkt_reason); 7061 7062 fcp_queue_ipkt(pptr, fpkt); 7063 } else { 7064 FCP_TRACE(fcp_logq, pptr->port_instbuf, 7065 fcp_trace, FCP_BUF_LEVEL_3, 0, 7066 "fcp_retry_scsi_cmd,1: state change occured" 7067 " for D_ID=0x%x", ptgt->tgt_d_id); 7068 mutex_exit(&pptr->port_mutex); 7069 (void) fcp_call_finish_init(pptr, ptgt, 7070 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt, 7071 icmd->ipkt_cause); 7072 fcp_icmd_free(pptr, icmd); 7073 } 7074 } else { 7075 fcp_print_error(fpkt); 7076 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt, 7077 icmd->ipkt_change_cnt, icmd->ipkt_cause); 7078 fcp_icmd_free(pptr, icmd); 7079 } 7080 } 7081 7082 /* 7083 * Function: fcp_handle_page83 7084 * 7085 * Description: Treats the response to INQUIRY_PAGE83. 7086 * 7087 * Argument: *fpkt FC packet used to convey the command. 7088 * *icmd Original fcp_ipkt structure. 7089 * ignore_page83_data 7090 * if it's 1, that means it's a special devices's 7091 * page83 response, it should be enumerated under mpxio 7092 * 7093 * Return Value: None 7094 */ 7095 static void 7096 fcp_handle_page83(fc_packet_t *fpkt, struct fcp_ipkt *icmd, 7097 int ignore_page83_data) 7098 { 7099 struct fcp_port *pptr; 7100 struct fcp_lun *plun; 7101 struct fcp_tgt *ptgt; 7102 uchar_t dev_id_page[SCMD_MAX_INQUIRY_PAGE83_SIZE]; 7103 int fail = 0; 7104 ddi_devid_t devid; 7105 char *guid = NULL; 7106 int ret; 7107 7108 ASSERT(icmd != NULL && fpkt != NULL); 7109 7110 pptr = icmd->ipkt_port; 7111 ptgt = icmd->ipkt_tgt; 7112 plun = icmd->ipkt_lun; 7113 7114 if (fpkt->pkt_state == FC_PKT_SUCCESS) { 7115 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_7); 7116 7117 FCP_CP_IN(fpkt->pkt_data, dev_id_page, fpkt->pkt_data_acc, 7118 SCMD_MAX_INQUIRY_PAGE83_SIZE); 7119 7120 FCP_TRACE(fcp_logq, pptr->port_instbuf, 7121 fcp_trace, FCP_BUF_LEVEL_5, 0, 7122 "fcp_handle_page83: port=%d, tgt D_ID=0x%x, " 7123 "dtype=0x%x, lun num=%x", 7124 pptr->port_instance, ptgt->tgt_d_id, 7125 dev_id_page[0], plun->lun_num); 7126 7127 ret = ddi_devid_scsi_encode( 7128 DEVID_SCSI_ENCODE_VERSION_LATEST, 7129 NULL, /* driver name */ 7130 (unsigned char *) &plun->lun_inq, /* standard inquiry */ 7131 sizeof (plun->lun_inq), /* size of standard inquiry */ 7132 NULL, /* page 80 data */ 7133 0, /* page 80 len */ 7134 dev_id_page, /* page 83 data */ 7135 SCMD_MAX_INQUIRY_PAGE83_SIZE, /* page 83 data len */ 7136 &devid); 7137 7138 if (ret == DDI_SUCCESS) { 7139 7140 guid = ddi_devid_to_guid(devid); 7141 7142 if (guid) { 7143 /* 7144 * Check our current guid. If it's non null 7145 * and it has changed, we need to copy it into 7146 * lun_old_guid since we might still need it. 7147 */ 7148 if (plun->lun_guid && 7149 strcmp(guid, plun->lun_guid)) { 7150 unsigned int len; 7151 7152 /* 7153 * If the guid of the LUN changes, 7154 * reconfiguration should be triggered 7155 * to reflect the changes. 7156 * i.e. we should offline the LUN with 7157 * the old guid, and online the LUN with 7158 * the new guid. 7159 */ 7160 plun->lun_state |= FCP_LUN_CHANGED; 7161 7162 if (plun->lun_old_guid) { 7163 kmem_free(plun->lun_old_guid, 7164 plun->lun_old_guid_size); 7165 } 7166 7167 len = plun->lun_guid_size; 7168 plun->lun_old_guid_size = len; 7169 7170 plun->lun_old_guid = kmem_zalloc(len, 7171 KM_NOSLEEP); 7172 7173 if (plun->lun_old_guid) { 7174 /* 7175 * The alloc was successful then 7176 * let's do the copy. 7177 */ 7178 bcopy(plun->lun_guid, 7179 plun->lun_old_guid, len); 7180 } else { 7181 fail = 1; 7182 plun->lun_old_guid_size = 0; 7183 } 7184 } 7185 if (!fail) { 7186 if (fcp_copy_guid_2_lun_block( 7187 plun, guid)) { 7188 fail = 1; 7189 } 7190 } 7191 ddi_devid_free_guid(guid); 7192 7193 } else { 7194 FCP_TRACE(fcp_logq, pptr->port_instbuf, 7195 fcp_trace, FCP_BUF_LEVEL_2, 0, 7196 "fcp_handle_page83: unable to create " 7197 "GUID"); 7198 7199 /* couldn't create good guid from devid */ 7200 fail = 1; 7201 } 7202 ddi_devid_free(devid); 7203 7204 } else if (ret == DDI_NOT_WELL_FORMED) { 7205 /* NULL filled data for page 83 */ 7206 FCP_TRACE(fcp_logq, pptr->port_instbuf, 7207 fcp_trace, FCP_BUF_LEVEL_2, 0, 7208 "fcp_handle_page83: retry GUID"); 7209 7210 icmd->ipkt_retries = 0; 7211 fcp_retry_scsi_cmd(fpkt); 7212 return; 7213 } else { 7214 FCP_TRACE(fcp_logq, pptr->port_instbuf, 7215 fcp_trace, FCP_BUF_LEVEL_2, 0, 7216 "fcp_handle_page83: bad ddi_devid_scsi_encode %x", 7217 ret); 7218 /* 7219 * Since the page83 validation 7220 * introduced late, we are being 7221 * tolerant to the existing devices 7222 * that already found to be working 7223 * under mpxio, like A5200's SES device, 7224 * its page83 response will not be standard-compliant, 7225 * but we still want it to be enumerated under mpxio. 7226 */ 7227 if (fcp_symmetric_device_probe(plun) != 0) { 7228 fail = 1; 7229 } 7230 } 7231 7232 } else { 7233 /* bad packet state */ 7234 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_8); 7235 7236 /* 7237 * For some special devices (A5K SES and Daktari's SES devices), 7238 * they should be enumerated under mpxio 7239 * or "luxadm dis" will fail 7240 */ 7241 if (ignore_page83_data) { 7242 fail = 0; 7243 } else { 7244 fail = 1; 7245 } 7246 FCP_TRACE(fcp_logq, pptr->port_instbuf, 7247 fcp_trace, FCP_BUF_LEVEL_2, 0, 7248 "!Devid page cmd failed. " 7249 "fpkt_state: %x fpkt_reason: %x", 7250 "ignore_page83: %d", 7251 fpkt->pkt_state, fpkt->pkt_reason, 7252 ignore_page83_data); 7253 } 7254 7255 mutex_enter(&pptr->port_mutex); 7256 mutex_enter(&plun->lun_mutex); 7257 /* 7258 * If lun_cip is not NULL, then we needn't update lun_mpxio to avoid 7259 * mismatch between lun_cip and lun_mpxio. 7260 */ 7261 if (plun->lun_cip == NULL) { 7262 /* 7263 * If we don't have a guid for this lun it's because we were 7264 * unable to glean one from the page 83 response. Set the 7265 * control flag to 0 here to make sure that we don't attempt to 7266 * enumerate it under mpxio. 7267 */ 7268 if (fail || pptr->port_mpxio == 0) { 7269 plun->lun_mpxio = 0; 7270 } else { 7271 plun->lun_mpxio = 1; 7272 } 7273 } 7274 mutex_exit(&plun->lun_mutex); 7275 mutex_exit(&pptr->port_mutex); 7276 7277 mutex_enter(&ptgt->tgt_mutex); 7278 plun->lun_state &= 7279 ~(FCP_LUN_OFFLINE | FCP_LUN_MARK | FCP_LUN_BUSY); 7280 mutex_exit(&ptgt->tgt_mutex); 7281 7282 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt, 7283 icmd->ipkt_change_cnt, icmd->ipkt_cause); 7284 7285 fcp_icmd_free(pptr, icmd); 7286 } 7287 7288 /* 7289 * Function: fcp_handle_inquiry 7290 * 7291 * Description: Called by fcp_scsi_callback to handle the response to an 7292 * INQUIRY request. 7293 * 7294 * Argument: *fpkt FC packet used to convey the command. 7295 * *icmd Original fcp_ipkt structure. 7296 * 7297 * Return Value: None 7298 */ 7299 static void 7300 fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd) 7301 { 7302 struct fcp_port *pptr; 7303 struct fcp_lun *plun; 7304 struct fcp_tgt *ptgt; 7305 uchar_t dtype; 7306 uchar_t pqual; 7307 uint32_t rscn_count = FC_INVALID_RSCN_COUNT; 7308 7309 ASSERT(icmd != NULL && fpkt != NULL); 7310 7311 pptr = icmd->ipkt_port; 7312 ptgt = icmd->ipkt_tgt; 7313 plun = icmd->ipkt_lun; 7314 7315 FCP_CP_IN(fpkt->pkt_data, &plun->lun_inq, fpkt->pkt_data_acc, 7316 sizeof (struct scsi_inquiry)); 7317 7318 dtype = plun->lun_inq.inq_dtype & DTYPE_MASK; 7319 pqual = plun->lun_inq.inq_dtype >> 5; 7320 7321 FCP_TRACE(fcp_logq, pptr->port_instbuf, 7322 fcp_trace, FCP_BUF_LEVEL_5, 0, 7323 "fcp_handle_inquiry: port=%d, tgt D_ID=0x%x, lun=0x%x, " 7324 "dtype=0x%x pqual: 0x%x", pptr->port_instance, ptgt->tgt_d_id, 7325 plun->lun_num, dtype, pqual); 7326 7327 if (pqual != 0) { 7328 /* 7329 * Non-zero peripheral qualifier 7330 */ 7331 fcp_log(CE_CONT, pptr->port_dip, 7332 "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: " 7333 "Device type=0x%x Peripheral qual=0x%x\n", 7334 ptgt->tgt_d_id, plun->lun_num, dtype, pqual); 7335 7336 FCP_TRACE(fcp_logq, pptr->port_instbuf, 7337 fcp_trace, FCP_BUF_LEVEL_5, 0, 7338 "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: " 7339 "Device type=0x%x Peripheral qual=0x%x\n", 7340 ptgt->tgt_d_id, plun->lun_num, dtype, pqual); 7341 7342 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_3); 7343 7344 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt, 7345 icmd->ipkt_change_cnt, icmd->ipkt_cause); 7346 fcp_icmd_free(pptr, icmd); 7347 return; 7348 } 7349 7350 /* 7351 * If the device is already initialized, check the dtype 7352 * for a change. If it has changed then update the flags 7353 * so the create_luns will offline the old device and 7354 * create the new device. Refer to bug: 4764752 7355 */ 7356 if ((plun->lun_state & FCP_LUN_INIT) && dtype != plun->lun_type) { 7357 plun->lun_state |= FCP_LUN_CHANGED; 7358 } 7359 plun->lun_type = plun->lun_inq.inq_dtype; 7360 7361 /* 7362 * This code is setting/initializing the throttling in the FCA 7363 * driver. 7364 */ 7365 mutex_enter(&pptr->port_mutex); 7366 if (!pptr->port_notify) { 7367 if (bcmp(plun->lun_inq.inq_pid, pid, strlen(pid)) == 0) { 7368 uint32_t cmd = 0; 7369 cmd = ((cmd & 0xFF | FC_NOTIFY_THROTTLE) | 7370 ((cmd & 0xFFFFFF00 >> 8) | 7371 FCP_SVE_THROTTLE << 8)); 7372 pptr->port_notify = 1; 7373 mutex_exit(&pptr->port_mutex); 7374 (void) fc_ulp_port_notify(pptr->port_fp_handle, cmd); 7375 mutex_enter(&pptr->port_mutex); 7376 } 7377 } 7378 7379 if (FCP_TGT_STATE_CHANGED(ptgt, icmd)) { 7380 FCP_TRACE(fcp_logq, pptr->port_instbuf, 7381 fcp_trace, FCP_BUF_LEVEL_2, 0, 7382 "fcp_handle_inquiry,1:state change occured" 7383 " for D_ID=0x%x", ptgt->tgt_d_id); 7384 mutex_exit(&pptr->port_mutex); 7385 7386 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_5); 7387 (void) fcp_call_finish_init(pptr, ptgt, 7388 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt, 7389 icmd->ipkt_cause); 7390 fcp_icmd_free(pptr, icmd); 7391 return; 7392 } 7393 ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0); 7394 mutex_exit(&pptr->port_mutex); 7395 7396 /* Retrieve the rscn count (if a valid one exists) */ 7397 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) { 7398 rscn_count = ((fc_ulp_rscn_info_t *) 7399 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->ulp_rscn_count; 7400 } else { 7401 rscn_count = FC_INVALID_RSCN_COUNT; 7402 } 7403 7404 if (fcp_send_scsi(plun, SCMD_INQUIRY_PAGE83, 7405 SCMD_MAX_INQUIRY_PAGE83_SIZE, 7406 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt, 7407 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) { 7408 fcp_log(CE_WARN, NULL, "!failed to send page 83"); 7409 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_6); 7410 (void) fcp_call_finish_init(pptr, ptgt, 7411 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt, 7412 icmd->ipkt_cause); 7413 } 7414 7415 /* 7416 * Read Inquiry VPD Page 0x83 to uniquely 7417 * identify this logical unit. 7418 */ 7419 fcp_icmd_free(pptr, icmd); 7420 } 7421 7422 /* 7423 * Function: fcp_handle_reportlun 7424 * 7425 * Description: Called by fcp_scsi_callback to handle the response to a 7426 * REPORT_LUN request. 7427 * 7428 * Argument: *fpkt FC packet used to convey the command. 7429 * *icmd Original fcp_ipkt structure. 7430 * 7431 * Return Value: None 7432 */ 7433 static void 7434 fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd) 7435 { 7436 int i; 7437 int nluns_claimed; 7438 int nluns_bufmax; 7439 int len; 7440 uint16_t lun_num; 7441 uint32_t rscn_count = FC_INVALID_RSCN_COUNT; 7442 struct fcp_port *pptr; 7443 struct fcp_tgt *ptgt; 7444 struct fcp_lun *plun; 7445 struct fcp_reportlun_resp *report_lun; 7446 7447 pptr = icmd->ipkt_port; 7448 ptgt = icmd->ipkt_tgt; 7449 len = fpkt->pkt_datalen; 7450 7451 if ((len < FCP_LUN_HEADER) || 7452 ((report_lun = kmem_zalloc(len, KM_NOSLEEP)) == NULL)) { 7453 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt, 7454 icmd->ipkt_change_cnt, icmd->ipkt_cause); 7455 fcp_icmd_free(pptr, icmd); 7456 return; 7457 } 7458 7459 FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc, 7460 fpkt->pkt_datalen); 7461 7462 FCP_TRACE(fcp_logq, pptr->port_instbuf, 7463 fcp_trace, FCP_BUF_LEVEL_5, 0, 7464 "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x", 7465 pptr->port_instance, ptgt->tgt_d_id); 7466 7467 /* 7468 * Get the number of luns (which is supplied as LUNS * 8) the 7469 * device claims it has. 7470 */ 7471 nluns_claimed = BE_32(report_lun->num_lun) >> 3; 7472 7473 /* 7474 * Get the maximum number of luns the buffer submitted can hold. 7475 */ 7476 nluns_bufmax = (fpkt->pkt_datalen - FCP_LUN_HEADER) / FCP_LUN_SIZE; 7477 7478 /* 7479 * Due to limitations of certain hardware, we support only 16 bit LUNs 7480 */ 7481 if (nluns_claimed > FCP_MAX_LUNS_SUPPORTED) { 7482 kmem_free(report_lun, len); 7483 7484 fcp_log(CE_NOTE, pptr->port_dip, "!Can not support" 7485 " 0x%x number of LUNs for target=%x", nluns_claimed, 7486 ptgt->tgt_d_id); 7487 7488 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt, 7489 icmd->ipkt_change_cnt, icmd->ipkt_cause); 7490 fcp_icmd_free(pptr, icmd); 7491 return; 7492 } 7493 7494 /* 7495 * If there are more LUNs than we have allocated memory for, 7496 * allocate more space and send down yet another report lun if 7497 * the maximum number of attempts hasn't been reached. 7498 */ 7499 mutex_enter(&ptgt->tgt_mutex); 7500 7501 if ((nluns_claimed > nluns_bufmax) && 7502 (ptgt->tgt_report_lun_cnt < FCP_MAX_REPORTLUNS_ATTEMPTS)) { 7503 7504 struct fcp_lun *plun; 7505 7506 ptgt->tgt_report_lun_cnt++; 7507 plun = ptgt->tgt_lun; 7508 ASSERT(plun != NULL); 7509 mutex_exit(&ptgt->tgt_mutex); 7510 7511 kmem_free(report_lun, len); 7512 7513 FCP_TRACE(fcp_logq, pptr->port_instbuf, 7514 fcp_trace, FCP_BUF_LEVEL_5, 0, 7515 "!Dynamically discovered %d LUNs for D_ID=%x", 7516 nluns_claimed, ptgt->tgt_d_id); 7517 7518 /* Retrieve the rscn count (if a valid one exists) */ 7519 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) { 7520 rscn_count = ((fc_ulp_rscn_info_t *) 7521 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))-> 7522 ulp_rscn_count; 7523 } else { 7524 rscn_count = FC_INVALID_RSCN_COUNT; 7525 } 7526 7527 if (fcp_send_scsi(icmd->ipkt_lun, SCMD_REPORT_LUN, 7528 FCP_LUN_HEADER + (nluns_claimed * FCP_LUN_SIZE), 7529 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt, 7530 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) { 7531 (void) fcp_call_finish_init(pptr, ptgt, 7532 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt, 7533 icmd->ipkt_cause); 7534 } 7535 7536 fcp_icmd_free(pptr, icmd); 7537 return; 7538 } 7539 7540 if (nluns_claimed > nluns_bufmax) { 7541 FCP_TRACE(fcp_logq, pptr->port_instbuf, 7542 fcp_trace, FCP_BUF_LEVEL_5, 0, 7543 "Target=%x:%x:%x:%x:%x:%x:%x:%x" 7544 " Number of LUNs lost=%x", 7545 ptgt->tgt_port_wwn.raw_wwn[0], 7546 ptgt->tgt_port_wwn.raw_wwn[1], 7547 ptgt->tgt_port_wwn.raw_wwn[2], 7548 ptgt->tgt_port_wwn.raw_wwn[3], 7549 ptgt->tgt_port_wwn.raw_wwn[4], 7550 ptgt->tgt_port_wwn.raw_wwn[5], 7551 ptgt->tgt_port_wwn.raw_wwn[6], 7552 ptgt->tgt_port_wwn.raw_wwn[7], 7553 nluns_claimed - nluns_bufmax); 7554 7555 nluns_claimed = nluns_bufmax; 7556 } 7557 ptgt->tgt_lun_cnt = nluns_claimed; 7558 7559 /* 7560 * Identify missing LUNs and print warning messages 7561 */ 7562 for (plun = ptgt->tgt_lun; plun; plun = plun->lun_next) { 7563 int offline; 7564 int exists = 0; 7565 7566 offline = (plun->lun_state & FCP_LUN_OFFLINE) ? 1 : 0; 7567 7568 for (i = 0; i < nluns_claimed && exists == 0; i++) { 7569 uchar_t *lun_string; 7570 7571 lun_string = (uchar_t *)&(report_lun->lun_string[i]); 7572 7573 switch (lun_string[0] & 0xC0) { 7574 case FCP_LUN_ADDRESSING: 7575 case FCP_PD_ADDRESSING: 7576 lun_num = ((lun_string[0] & 0x3F) << 8) | 7577 lun_string[1]; 7578 if (plun->lun_num == lun_num) { 7579 exists++; 7580 break; 7581 } 7582 break; 7583 7584 default: 7585 break; 7586 } 7587 } 7588 7589 if (!exists && !offline) { 7590 mutex_exit(&ptgt->tgt_mutex); 7591 7592 mutex_enter(&pptr->port_mutex); 7593 mutex_enter(&ptgt->tgt_mutex); 7594 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) { 7595 /* 7596 * set disappear flag when device was connected 7597 */ 7598 if (!(plun->lun_state & 7599 FCP_LUN_DEVICE_NOT_CONNECTED)) { 7600 plun->lun_state |= FCP_LUN_DISAPPEARED; 7601 } 7602 mutex_exit(&ptgt->tgt_mutex); 7603 mutex_exit(&pptr->port_mutex); 7604 if (!(plun->lun_state & 7605 FCP_LUN_DEVICE_NOT_CONNECTED)) { 7606 fcp_log(CE_NOTE, pptr->port_dip, 7607 "!Lun=%x for target=%x disappeared", 7608 plun->lun_num, ptgt->tgt_d_id); 7609 } 7610 mutex_enter(&ptgt->tgt_mutex); 7611 } else { 7612 FCP_TRACE(fcp_logq, pptr->port_instbuf, 7613 fcp_trace, FCP_BUF_LEVEL_5, 0, 7614 "fcp_handle_reportlun,1: state change" 7615 " occured for D_ID=0x%x", ptgt->tgt_d_id); 7616 mutex_exit(&ptgt->tgt_mutex); 7617 mutex_exit(&pptr->port_mutex); 7618 kmem_free(report_lun, len); 7619 (void) fcp_call_finish_init(pptr, ptgt, 7620 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt, 7621 icmd->ipkt_cause); 7622 fcp_icmd_free(pptr, icmd); 7623 return; 7624 } 7625 } else if (exists) { 7626 /* 7627 * clear FCP_LUN_DEVICE_NOT_CONNECTED when lun 0 7628 * actually exists in REPORT_LUN response 7629 */ 7630 if (plun->lun_state & FCP_LUN_DEVICE_NOT_CONNECTED) { 7631 plun->lun_state &= 7632 ~FCP_LUN_DEVICE_NOT_CONNECTED; 7633 } 7634 if (offline || plun->lun_num == 0) { 7635 if (plun->lun_state & FCP_LUN_DISAPPEARED) { 7636 plun->lun_state &= ~FCP_LUN_DISAPPEARED; 7637 mutex_exit(&ptgt->tgt_mutex); 7638 fcp_log(CE_NOTE, pptr->port_dip, 7639 "!Lun=%x for target=%x reappeared", 7640 plun->lun_num, ptgt->tgt_d_id); 7641 mutex_enter(&ptgt->tgt_mutex); 7642 } 7643 } 7644 } 7645 } 7646 7647 ptgt->tgt_tmp_cnt = nluns_claimed ? nluns_claimed : 1; 7648 mutex_exit(&ptgt->tgt_mutex); 7649 7650 FCP_TRACE(fcp_logq, pptr->port_instbuf, 7651 fcp_trace, FCP_BUF_LEVEL_5, 0, 7652 "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x, %d LUN(s)", 7653 pptr->port_instance, ptgt->tgt_d_id, nluns_claimed); 7654 7655 /* scan each lun */ 7656 for (i = 0; i < nluns_claimed; i++) { 7657 uchar_t *lun_string; 7658 7659 lun_string = (uchar_t *)&(report_lun->lun_string[i]); 7660 7661 FCP_TRACE(fcp_logq, pptr->port_instbuf, 7662 fcp_trace, FCP_BUF_LEVEL_5, 0, 7663 "handle_reportlun: d_id=%x, LUN ind=%d, LUN=%d," 7664 " addr=0x%x", ptgt->tgt_d_id, i, lun_string[1], 7665 lun_string[0]); 7666 7667 switch (lun_string[0] & 0xC0) { 7668 case FCP_LUN_ADDRESSING: 7669 case FCP_PD_ADDRESSING: 7670 lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1]; 7671 7672 /* We will skip masked LUNs because of the blacklist. */ 7673 if (fcp_lun_blacklist != NULL) { 7674 mutex_enter(&ptgt->tgt_mutex); 7675 if (fcp_should_mask(&ptgt->tgt_port_wwn, 7676 lun_num) == TRUE) { 7677 ptgt->tgt_lun_cnt--; 7678 mutex_exit(&ptgt->tgt_mutex); 7679 break; 7680 } 7681 mutex_exit(&ptgt->tgt_mutex); 7682 } 7683 7684 /* see if this LUN is already allocated */ 7685 if ((plun = fcp_get_lun(ptgt, lun_num)) == NULL) { 7686 plun = fcp_alloc_lun(ptgt); 7687 if (plun == NULL) { 7688 fcp_log(CE_NOTE, pptr->port_dip, 7689 "!Lun allocation failed" 7690 " target=%x lun=%x", 7691 ptgt->tgt_d_id, lun_num); 7692 break; 7693 } 7694 } 7695 7696 mutex_enter(&plun->lun_tgt->tgt_mutex); 7697 /* convert to LUN */ 7698 plun->lun_addr.ent_addr_0 = 7699 BE_16(*(uint16_t *)&(lun_string[0])); 7700 plun->lun_addr.ent_addr_1 = 7701 BE_16(*(uint16_t *)&(lun_string[2])); 7702 plun->lun_addr.ent_addr_2 = 7703 BE_16(*(uint16_t *)&(lun_string[4])); 7704 plun->lun_addr.ent_addr_3 = 7705 BE_16(*(uint16_t *)&(lun_string[6])); 7706 7707 plun->lun_num = lun_num; 7708 plun->lun_state |= FCP_LUN_BUSY | FCP_LUN_MARK; 7709 plun->lun_state &= ~FCP_LUN_OFFLINE; 7710 mutex_exit(&plun->lun_tgt->tgt_mutex); 7711 7712 /* Retrieve the rscn count (if a valid one exists) */ 7713 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) { 7714 rscn_count = ((fc_ulp_rscn_info_t *) 7715 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))-> 7716 ulp_rscn_count; 7717 } else { 7718 rscn_count = FC_INVALID_RSCN_COUNT; 7719 } 7720 7721 if (fcp_send_scsi(plun, SCMD_INQUIRY, SUN_INQSIZE, 7722 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt, 7723 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) { 7724 mutex_enter(&pptr->port_mutex); 7725 mutex_enter(&plun->lun_tgt->tgt_mutex); 7726 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) { 7727 fcp_log(CE_NOTE, pptr->port_dip, 7728 "!failed to send INQUIRY" 7729 " target=%x lun=%x", 7730 ptgt->tgt_d_id, plun->lun_num); 7731 } else { 7732 FCP_TRACE(fcp_logq, 7733 pptr->port_instbuf, fcp_trace, 7734 FCP_BUF_LEVEL_5, 0, 7735 "fcp_handle_reportlun,2: state" 7736 " change occured for D_ID=0x%x", 7737 ptgt->tgt_d_id); 7738 } 7739 mutex_exit(&plun->lun_tgt->tgt_mutex); 7740 mutex_exit(&pptr->port_mutex); 7741 } else { 7742 continue; 7743 } 7744 break; 7745 7746 case FCP_VOLUME_ADDRESSING: 7747 /* FALLTHROUGH */ 7748 default: 7749 fcp_log(CE_WARN, NULL, 7750 "!Unsupported LUN Addressing method %x " 7751 "in response to REPORT_LUN", lun_string[0]); 7752 break; 7753 } 7754 7755 /* 7756 * each time through this loop we should decrement 7757 * the tmp_cnt by one -- since we go through this loop 7758 * one time for each LUN, the tmp_cnt should never be <=0 7759 */ 7760 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt, 7761 icmd->ipkt_change_cnt, icmd->ipkt_cause); 7762 } 7763 7764 if (i == 0) { 7765 fcp_log(CE_WARN, pptr->port_dip, 7766 "!FCP: target=%x reported NO Luns", ptgt->tgt_d_id); 7767 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt, 7768 icmd->ipkt_change_cnt, icmd->ipkt_cause); 7769 } 7770 7771 kmem_free(report_lun, len); 7772 fcp_icmd_free(pptr, icmd); 7773 } 7774 7775 7776 /* 7777 * called internally to return a LUN given a target and a LUN number 7778 */ 7779 static struct fcp_lun * 7780 fcp_get_lun(struct fcp_tgt *ptgt, uint16_t lun_num) 7781 { 7782 struct fcp_lun *plun; 7783 7784 mutex_enter(&ptgt->tgt_mutex); 7785 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) { 7786 if (plun->lun_num == lun_num) { 7787 mutex_exit(&ptgt->tgt_mutex); 7788 return (plun); 7789 } 7790 } 7791 mutex_exit(&ptgt->tgt_mutex); 7792 7793 return (NULL); 7794 } 7795 7796 7797 /* 7798 * handle finishing one target for fcp_finish_init 7799 * 7800 * return true (non-zero) if we want finish_init to continue with the 7801 * next target 7802 * 7803 * called with the port mutex held 7804 */ 7805 /*ARGSUSED*/ 7806 static int 7807 fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt, 7808 int link_cnt, int tgt_cnt, int cause) 7809 { 7810 int rval = 1; 7811 ASSERT(pptr != NULL); 7812 ASSERT(ptgt != NULL); 7813 7814 FCP_TRACE(fcp_logq, pptr->port_instbuf, 7815 fcp_trace, FCP_BUF_LEVEL_5, 0, 7816 "finish_tgt: D_ID/state = 0x%x/0x%x", ptgt->tgt_d_id, 7817 ptgt->tgt_state); 7818 7819 ASSERT(mutex_owned(&pptr->port_mutex)); 7820 7821 if ((pptr->port_link_cnt != link_cnt) || 7822 (tgt_cnt && ptgt->tgt_change_cnt != tgt_cnt)) { 7823 /* 7824 * oh oh -- another link reset or target change 7825 * must have occurred while we are in here 7826 */ 7827 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_23); 7828 7829 return (0); 7830 } else { 7831 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_24); 7832 } 7833 7834 mutex_enter(&ptgt->tgt_mutex); 7835 7836 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) { 7837 /* 7838 * tgt is not offline -- is it marked (i.e. needs 7839 * to be offlined) ?? 7840 */ 7841 if (ptgt->tgt_state & FCP_TGT_MARK) { 7842 /* 7843 * this target not offline *and* 7844 * marked 7845 */ 7846 ptgt->tgt_state &= ~FCP_TGT_MARK; 7847 rval = fcp_offline_target(pptr, ptgt, link_cnt, 7848 tgt_cnt, 0, 0); 7849 } else { 7850 ptgt->tgt_state &= ~FCP_TGT_BUSY; 7851 7852 /* create the LUNs */ 7853 if (ptgt->tgt_node_state != FCP_TGT_NODE_ON_DEMAND) { 7854 ptgt->tgt_node_state = FCP_TGT_NODE_PRESENT; 7855 fcp_create_luns(ptgt, link_cnt, tgt_cnt, 7856 cause); 7857 ptgt->tgt_device_created = 1; 7858 } else { 7859 fcp_update_tgt_state(ptgt, FCP_RESET, 7860 FCP_LUN_BUSY); 7861 } 7862 } 7863 } 7864 7865 mutex_exit(&ptgt->tgt_mutex); 7866 7867 return (rval); 7868 } 7869 7870 7871 /* 7872 * this routine is called to finish port initialization 7873 * 7874 * Each port has a "temp" counter -- when a state change happens (e.g. 7875 * port online), the temp count is set to the number of devices in the map. 7876 * Then, as each device gets "discovered", the temp counter is decremented 7877 * by one. When this count reaches zero we know that all of the devices 7878 * in the map have been discovered (or an error has occurred), so we can 7879 * then finish initialization -- which is done by this routine (well, this 7880 * and fcp-finish_tgt()) 7881 * 7882 * acquires and releases the global mutex 7883 * 7884 * called with the port mutex owned 7885 */ 7886 static void 7887 fcp_finish_init(struct fcp_port *pptr) 7888 { 7889 #ifdef DEBUG 7890 bzero(pptr->port_finish_stack, sizeof (pptr->port_finish_stack)); 7891 pptr->port_finish_depth = getpcstack(pptr->port_finish_stack, 7892 FCP_STACK_DEPTH); 7893 #endif /* DEBUG */ 7894 7895 ASSERT(mutex_owned(&pptr->port_mutex)); 7896 7897 FCP_TRACE(fcp_logq, pptr->port_instbuf, 7898 fcp_trace, FCP_BUF_LEVEL_2, 0, "finish_init:" 7899 " entering; ipkt count=%d", pptr->port_ipkt_cnt); 7900 7901 if ((pptr->port_state & FCP_STATE_ONLINING) && 7902 !(pptr->port_state & (FCP_STATE_SUSPENDED | 7903 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) { 7904 pptr->port_state &= ~FCP_STATE_ONLINING; 7905 pptr->port_state |= FCP_STATE_ONLINE; 7906 } 7907 7908 /* Wake up threads waiting on config done */ 7909 cv_broadcast(&pptr->port_config_cv); 7910 } 7911 7912 7913 /* 7914 * called from fcp_finish_init to create the LUNs for a target 7915 * 7916 * called with the port mutex owned 7917 */ 7918 static void 7919 fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int cause) 7920 { 7921 struct fcp_lun *plun; 7922 struct fcp_port *pptr; 7923 child_info_t *cip = NULL; 7924 7925 ASSERT(ptgt != NULL); 7926 ASSERT(mutex_owned(&ptgt->tgt_mutex)); 7927 7928 pptr = ptgt->tgt_port; 7929 7930 ASSERT(pptr != NULL); 7931 7932 /* scan all LUNs for this target */ 7933 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) { 7934 if (plun->lun_state & FCP_LUN_OFFLINE) { 7935 continue; 7936 } 7937 7938 if (plun->lun_state & FCP_LUN_MARK) { 7939 FCP_TRACE(fcp_logq, pptr->port_instbuf, 7940 fcp_trace, FCP_BUF_LEVEL_2, 0, 7941 "fcp_create_luns: offlining marked LUN!"); 7942 fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, 0); 7943 continue; 7944 } 7945 7946 plun->lun_state &= ~FCP_LUN_BUSY; 7947 7948 /* 7949 * There are conditions in which FCP_LUN_INIT flag is cleared 7950 * but we have a valid plun->lun_cip. To cover this case also 7951 * CLEAR_BUSY whenever we have a valid lun_cip. 7952 */ 7953 if (plun->lun_mpxio && plun->lun_cip && 7954 (!fcp_pass_to_hp(pptr, plun, plun->lun_cip, 7955 FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt, 7956 0, 0))) { 7957 FCP_TRACE(fcp_logq, pptr->port_instbuf, 7958 fcp_trace, FCP_BUF_LEVEL_2, 0, 7959 "fcp_create_luns: enable lun %p failed!", 7960 plun); 7961 } 7962 7963 if (plun->lun_state & FCP_LUN_INIT && 7964 !(plun->lun_state & FCP_LUN_CHANGED)) { 7965 continue; 7966 } 7967 7968 if (cause == FCP_CAUSE_USER_CREATE) { 7969 continue; 7970 } 7971 7972 FCP_TRACE(fcp_logq, pptr->port_instbuf, 7973 fcp_trace, FCP_BUF_LEVEL_6, 0, 7974 "create_luns: passing ONLINE elem to HP thread"); 7975 7976 /* 7977 * If lun has changed, prepare for offlining the old path. 7978 * Do not offline the old path right now, since it may be 7979 * still opened. 7980 */ 7981 if (plun->lun_cip && (plun->lun_state & FCP_LUN_CHANGED)) { 7982 fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt); 7983 } 7984 7985 /* pass an ONLINE element to the hotplug thread */ 7986 if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE, 7987 link_cnt, tgt_cnt, NDI_ONLINE_ATTACH, 0)) { 7988 7989 /* 7990 * We can not synchronous attach (i.e pass 7991 * NDI_ONLINE_ATTACH) here as we might be 7992 * coming from an interrupt or callback 7993 * thread. 7994 */ 7995 if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE, 7996 link_cnt, tgt_cnt, 0, 0)) { 7997 fcp_log(CE_CONT, pptr->port_dip, 7998 "Can not ONLINE LUN; D_ID=%x, LUN=%x\n", 7999 plun->lun_tgt->tgt_d_id, plun->lun_num); 8000 } 8001 } 8002 } 8003 } 8004 8005 8006 /* 8007 * function to online/offline devices 8008 */ 8009 static int 8010 fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip, int online, 8011 int lcount, int tcount, int flags) 8012 { 8013 int rval = NDI_FAILURE; 8014 int circ; 8015 child_info_t *ccip; 8016 struct fcp_port *pptr = plun->lun_tgt->tgt_port; 8017 int is_mpxio = pptr->port_mpxio; 8018 dev_info_t *cdip, *pdip; 8019 char *devname; 8020 8021 FCP_TRACE(fcp_logq, pptr->port_instbuf, 8022 fcp_trace, FCP_BUF_LEVEL_2, 0, 8023 "fcp_trigger_lun: plun=%p target=%x lun=%d cip=%p what=%x " 8024 "flags=%x mpxio=%x\n", 8025 plun, LUN_TGT->tgt_d_id, plun->lun_num, cip, online, flags, 8026 plun->lun_mpxio); 8027 8028 /* 8029 * lun_mpxio needs checking here because we can end up in a race 8030 * condition where this task has been dispatched while lun_mpxio is 8031 * set, but an earlier FCP_ONLINE task for the same LUN tried to 8032 * enable MPXIO for the LUN, but was unable to, and hence cleared 8033 * the flag. We rely on the serialization of the tasks here. We return 8034 * NDI_SUCCESS so any callers continue without reporting spurious 8035 * errors, and the still think we're an MPXIO LUN. 8036 */ 8037 8038 if (online == FCP_MPXIO_PATH_CLEAR_BUSY || 8039 online == FCP_MPXIO_PATH_SET_BUSY) { 8040 if (plun->lun_mpxio) { 8041 rval = fcp_update_mpxio_path(plun, cip, online); 8042 } else { 8043 rval = NDI_SUCCESS; 8044 } 8045 return (rval); 8046 } 8047 8048 /* 8049 * Explicit devfs_clean() due to ndi_devi_offline() not 8050 * executing devfs_clean() if parent lock is held. 8051 */ 8052 ASSERT(!servicing_interrupt()); 8053 if (online == FCP_OFFLINE) { 8054 if (plun->lun_mpxio == 0) { 8055 if (plun->lun_cip == cip) { 8056 cdip = DIP(plun->lun_cip); 8057 } else { 8058 cdip = DIP(cip); 8059 } 8060 } else if ((plun->lun_cip == cip) && plun->lun_cip) { 8061 cdip = mdi_pi_get_client(PIP(plun->lun_cip)); 8062 } else if ((plun->lun_cip != cip) && cip) { 8063 /* 8064 * This means a DTYPE/GUID change, we shall get the 8065 * dip of the old cip instead of the current lun_cip. 8066 */ 8067 cdip = mdi_pi_get_client(PIP(cip)); 8068 } 8069 if (cdip) { 8070 if (i_ddi_devi_attached(cdip)) { 8071 pdip = ddi_get_parent(cdip); 8072 devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP); 8073 ndi_devi_enter(pdip, &circ); 8074 (void) ddi_deviname(cdip, devname); 8075 ndi_devi_exit(pdip, circ); 8076 /* 8077 * Release parent lock before calling 8078 * devfs_clean(). 8079 */ 8080 rval = devfs_clean(pdip, devname + 1, 8081 DV_CLEAN_FORCE); 8082 kmem_free(devname, MAXNAMELEN + 1); 8083 /* 8084 * Return if devfs_clean() fails for 8085 * non-MPXIO case. 8086 * For MPXIO case, another path could be 8087 * offlined. 8088 */ 8089 if (rval && plun->lun_mpxio == 0) { 8090 FCP_TRACE(fcp_logq, pptr->port_instbuf, 8091 fcp_trace, FCP_BUF_LEVEL_3, 0, 8092 "fcp_trigger_lun: devfs_clean " 8093 "failed rval=%x dip=%p", 8094 rval, pdip); 8095 return (NDI_FAILURE); 8096 } 8097 } 8098 } 8099 } 8100 8101 if (fc_ulp_busy_port(pptr->port_fp_handle) != 0) { 8102 return (NDI_FAILURE); 8103 } 8104 8105 if (is_mpxio) { 8106 mdi_devi_enter(pptr->port_dip, &circ); 8107 } else { 8108 ndi_devi_enter(pptr->port_dip, &circ); 8109 } 8110 8111 mutex_enter(&pptr->port_mutex); 8112 mutex_enter(&plun->lun_mutex); 8113 8114 if (online == FCP_ONLINE) { 8115 ccip = fcp_get_cip(plun, cip, lcount, tcount); 8116 if (ccip == NULL) { 8117 goto fail; 8118 } 8119 } else { 8120 if (fcp_is_child_present(plun, cip) != FC_SUCCESS) { 8121 goto fail; 8122 } 8123 ccip = cip; 8124 } 8125 8126 if (online == FCP_ONLINE) { 8127 rval = fcp_online_child(plun, ccip, lcount, tcount, flags, 8128 &circ); 8129 fc_ulp_log_device_event(pptr->port_fp_handle, 8130 FC_ULP_DEVICE_ONLINE); 8131 } else { 8132 rval = fcp_offline_child(plun, ccip, lcount, tcount, flags, 8133 &circ); 8134 fc_ulp_log_device_event(pptr->port_fp_handle, 8135 FC_ULP_DEVICE_OFFLINE); 8136 } 8137 8138 fail: mutex_exit(&plun->lun_mutex); 8139 mutex_exit(&pptr->port_mutex); 8140 8141 if (is_mpxio) { 8142 mdi_devi_exit(pptr->port_dip, circ); 8143 } else { 8144 ndi_devi_exit(pptr->port_dip, circ); 8145 } 8146 8147 fc_ulp_idle_port(pptr->port_fp_handle); 8148 8149 return (rval); 8150 } 8151 8152 8153 /* 8154 * take a target offline by taking all of its LUNs offline 8155 */ 8156 /*ARGSUSED*/ 8157 static int 8158 fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt, 8159 int link_cnt, int tgt_cnt, int nowait, int flags) 8160 { 8161 struct fcp_tgt_elem *elem; 8162 8163 ASSERT(mutex_owned(&pptr->port_mutex)); 8164 ASSERT(mutex_owned(&ptgt->tgt_mutex)); 8165 8166 ASSERT(!(ptgt->tgt_state & FCP_TGT_OFFLINE)); 8167 8168 if (link_cnt != pptr->port_link_cnt || (tgt_cnt && tgt_cnt != 8169 ptgt->tgt_change_cnt)) { 8170 mutex_exit(&ptgt->tgt_mutex); 8171 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_25); 8172 mutex_enter(&ptgt->tgt_mutex); 8173 8174 return (0); 8175 } 8176 8177 ptgt->tgt_pd_handle = NULL; 8178 mutex_exit(&ptgt->tgt_mutex); 8179 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_26); 8180 mutex_enter(&ptgt->tgt_mutex); 8181 8182 tgt_cnt = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt; 8183 8184 if (ptgt->tgt_tcap && 8185 (elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) { 8186 elem->flags = flags; 8187 elem->time = fcp_watchdog_time; 8188 if (nowait == 0) { 8189 elem->time += fcp_offline_delay; 8190 } 8191 elem->ptgt = ptgt; 8192 elem->link_cnt = link_cnt; 8193 elem->tgt_cnt = tgt_cnt; 8194 elem->next = pptr->port_offline_tgts; 8195 pptr->port_offline_tgts = elem; 8196 } else { 8197 fcp_offline_target_now(pptr, ptgt, link_cnt, tgt_cnt, flags); 8198 } 8199 8200 return (1); 8201 } 8202 8203 8204 static void 8205 fcp_offline_target_now(struct fcp_port *pptr, struct fcp_tgt *ptgt, 8206 int link_cnt, int tgt_cnt, int flags) 8207 { 8208 ASSERT(mutex_owned(&pptr->port_mutex)); 8209 ASSERT(mutex_owned(&ptgt->tgt_mutex)); 8210 8211 fc_ulp_enable_relogin(pptr->port_fp_handle, &ptgt->tgt_port_wwn); 8212 ptgt->tgt_state = FCP_TGT_OFFLINE; 8213 ptgt->tgt_pd_handle = NULL; 8214 fcp_offline_tgt_luns(ptgt, link_cnt, tgt_cnt, flags); 8215 } 8216 8217 8218 static void 8219 fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, 8220 int flags) 8221 { 8222 struct fcp_lun *plun; 8223 8224 ASSERT(mutex_owned(&ptgt->tgt_port->port_mutex)); 8225 ASSERT(mutex_owned(&ptgt->tgt_mutex)); 8226 8227 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) { 8228 if (!(plun->lun_state & FCP_LUN_OFFLINE)) { 8229 fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, flags); 8230 } 8231 } 8232 } 8233 8234 8235 /* 8236 * take a LUN offline 8237 * 8238 * enters and leaves with the target mutex held, releasing it in the process 8239 * 8240 * allocates memory in non-sleep mode 8241 */ 8242 static void 8243 fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt, 8244 int nowait, int flags) 8245 { 8246 struct fcp_port *pptr = plun->lun_tgt->tgt_port; 8247 struct fcp_lun_elem *elem; 8248 8249 ASSERT(plun != NULL); 8250 ASSERT(mutex_owned(&LUN_TGT->tgt_mutex)); 8251 8252 if (nowait) { 8253 fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags); 8254 return; 8255 } 8256 8257 if ((elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) { 8258 elem->flags = flags; 8259 elem->time = fcp_watchdog_time; 8260 if (nowait == 0) { 8261 elem->time += fcp_offline_delay; 8262 } 8263 elem->plun = plun; 8264 elem->link_cnt = link_cnt; 8265 elem->tgt_cnt = plun->lun_tgt->tgt_change_cnt; 8266 elem->next = pptr->port_offline_luns; 8267 pptr->port_offline_luns = elem; 8268 } else { 8269 fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags); 8270 } 8271 } 8272 8273 8274 static void 8275 fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt) 8276 { 8277 struct fcp_pkt *head = NULL; 8278 8279 ASSERT(mutex_owned(&LUN_TGT->tgt_mutex)); 8280 8281 mutex_exit(&LUN_TGT->tgt_mutex); 8282 8283 head = fcp_scan_commands(plun); 8284 if (head != NULL) { 8285 fcp_abort_commands(head, LUN_PORT); 8286 } 8287 8288 mutex_enter(&LUN_TGT->tgt_mutex); 8289 8290 if (plun->lun_cip && plun->lun_mpxio) { 8291 /* 8292 * Intimate MPxIO lun busy is cleared 8293 */ 8294 if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip, 8295 FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt, 8296 0, 0)) { 8297 fcp_log(CE_NOTE, LUN_PORT->port_dip, 8298 "Can not ENABLE LUN; D_ID=%x, LUN=%x", 8299 LUN_TGT->tgt_d_id, plun->lun_num); 8300 } 8301 /* 8302 * Intimate MPxIO that the lun is now marked for offline 8303 */ 8304 mutex_exit(&LUN_TGT->tgt_mutex); 8305 (void) mdi_pi_disable_path(PIP(plun->lun_cip), DRIVER_DISABLE); 8306 mutex_enter(&LUN_TGT->tgt_mutex); 8307 } 8308 } 8309 8310 static void 8311 fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt, int tgt_cnt, 8312 int flags) 8313 { 8314 ASSERT(mutex_owned(&LUN_TGT->tgt_mutex)); 8315 8316 mutex_exit(&LUN_TGT->tgt_mutex); 8317 fcp_update_offline_flags(plun); 8318 mutex_enter(&LUN_TGT->tgt_mutex); 8319 8320 fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt); 8321 8322 FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf, 8323 fcp_trace, FCP_BUF_LEVEL_4, 0, 8324 "offline_lun: passing OFFLINE elem to HP thread"); 8325 8326 if (plun->lun_cip) { 8327 fcp_log(CE_NOTE, LUN_PORT->port_dip, 8328 "!offlining lun=%x (trace=%x), target=%x (trace=%x)", 8329 plun->lun_num, plun->lun_trace, LUN_TGT->tgt_d_id, 8330 LUN_TGT->tgt_trace); 8331 8332 if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip, FCP_OFFLINE, 8333 link_cnt, tgt_cnt, flags, 0)) { 8334 fcp_log(CE_CONT, LUN_PORT->port_dip, 8335 "Can not OFFLINE LUN; D_ID=%x, LUN=%x\n", 8336 LUN_TGT->tgt_d_id, plun->lun_num); 8337 } 8338 } 8339 } 8340 8341 static void 8342 fcp_scan_offline_luns(struct fcp_port *pptr) 8343 { 8344 struct fcp_lun_elem *elem; 8345 struct fcp_lun_elem *prev; 8346 struct fcp_lun_elem *next; 8347 8348 ASSERT(MUTEX_HELD(&pptr->port_mutex)); 8349 8350 prev = NULL; 8351 elem = pptr->port_offline_luns; 8352 while (elem) { 8353 next = elem->next; 8354 if (elem->time <= fcp_watchdog_time) { 8355 int changed = 1; 8356 struct fcp_tgt *ptgt = elem->plun->lun_tgt; 8357 8358 mutex_enter(&ptgt->tgt_mutex); 8359 if (pptr->port_link_cnt == elem->link_cnt && 8360 ptgt->tgt_change_cnt == elem->tgt_cnt) { 8361 changed = 0; 8362 } 8363 8364 if (!changed && 8365 !(elem->plun->lun_state & FCP_TGT_OFFLINE)) { 8366 fcp_offline_lun_now(elem->plun, 8367 elem->link_cnt, elem->tgt_cnt, elem->flags); 8368 } 8369 mutex_exit(&ptgt->tgt_mutex); 8370 8371 kmem_free(elem, sizeof (*elem)); 8372 8373 if (prev) { 8374 prev->next = next; 8375 } else { 8376 pptr->port_offline_luns = next; 8377 } 8378 } else { 8379 prev = elem; 8380 } 8381 elem = next; 8382 } 8383 } 8384 8385 8386 static void 8387 fcp_scan_offline_tgts(struct fcp_port *pptr) 8388 { 8389 struct fcp_tgt_elem *elem; 8390 struct fcp_tgt_elem *prev; 8391 struct fcp_tgt_elem *next; 8392 8393 ASSERT(MUTEX_HELD(&pptr->port_mutex)); 8394 8395 prev = NULL; 8396 elem = pptr->port_offline_tgts; 8397 while (elem) { 8398 next = elem->next; 8399 if (elem->time <= fcp_watchdog_time) { 8400 int changed = 1; 8401 struct fcp_tgt *ptgt = elem->ptgt; 8402 8403 if (ptgt->tgt_change_cnt == elem->tgt_cnt) { 8404 changed = 0; 8405 } 8406 8407 mutex_enter(&ptgt->tgt_mutex); 8408 if (!changed && !(ptgt->tgt_state & 8409 FCP_TGT_OFFLINE)) { 8410 fcp_offline_target_now(pptr, 8411 ptgt, elem->link_cnt, elem->tgt_cnt, 8412 elem->flags); 8413 } 8414 mutex_exit(&ptgt->tgt_mutex); 8415 8416 kmem_free(elem, sizeof (*elem)); 8417 8418 if (prev) { 8419 prev->next = next; 8420 } else { 8421 pptr->port_offline_tgts = next; 8422 } 8423 } else { 8424 prev = elem; 8425 } 8426 elem = next; 8427 } 8428 } 8429 8430 8431 static void 8432 fcp_update_offline_flags(struct fcp_lun *plun) 8433 { 8434 struct fcp_port *pptr = LUN_PORT; 8435 ASSERT(plun != NULL); 8436 8437 mutex_enter(&LUN_TGT->tgt_mutex); 8438 plun->lun_state |= FCP_LUN_OFFLINE; 8439 plun->lun_state &= ~(FCP_LUN_INIT | FCP_LUN_BUSY | FCP_LUN_MARK); 8440 8441 mutex_enter(&plun->lun_mutex); 8442 if (plun->lun_cip && plun->lun_state & FCP_SCSI_LUN_TGT_INIT) { 8443 dev_info_t *cdip = NULL; 8444 8445 mutex_exit(&LUN_TGT->tgt_mutex); 8446 8447 if (plun->lun_mpxio == 0) { 8448 cdip = DIP(plun->lun_cip); 8449 } else if (plun->lun_cip) { 8450 cdip = mdi_pi_get_client(PIP(plun->lun_cip)); 8451 } 8452 8453 mutex_exit(&plun->lun_mutex); 8454 if (cdip) { 8455 (void) ndi_event_retrieve_cookie( 8456 pptr->port_ndi_event_hdl, cdip, FCAL_REMOVE_EVENT, 8457 &fcp_remove_eid, NDI_EVENT_NOPASS); 8458 (void) ndi_event_run_callbacks( 8459 pptr->port_ndi_event_hdl, cdip, 8460 fcp_remove_eid, NULL); 8461 } 8462 } else { 8463 mutex_exit(&plun->lun_mutex); 8464 mutex_exit(&LUN_TGT->tgt_mutex); 8465 } 8466 } 8467 8468 8469 /* 8470 * Scan all of the command pkts for this port, moving pkts that 8471 * match our LUN onto our own list (headed by "head") 8472 */ 8473 static struct fcp_pkt * 8474 fcp_scan_commands(struct fcp_lun *plun) 8475 { 8476 struct fcp_port *pptr = LUN_PORT; 8477 8478 struct fcp_pkt *cmd = NULL; /* pkt cmd ptr */ 8479 struct fcp_pkt *ncmd = NULL; /* next pkt ptr */ 8480 struct fcp_pkt *pcmd = NULL; /* the previous command */ 8481 8482 struct fcp_pkt *head = NULL; /* head of our list */ 8483 struct fcp_pkt *tail = NULL; /* tail of our list */ 8484 8485 int cmds_found = 0; 8486 8487 mutex_enter(&pptr->port_pkt_mutex); 8488 for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) { 8489 struct fcp_lun *tlun = 8490 ADDR2LUN(&cmd->cmd_pkt->pkt_address); 8491 8492 ncmd = cmd->cmd_next; /* set next command */ 8493 8494 /* 8495 * if this pkt is for a different LUN or the 8496 * command is sent down, skip it. 8497 */ 8498 if (tlun != plun || cmd->cmd_state == FCP_PKT_ISSUED || 8499 (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR)) { 8500 pcmd = cmd; 8501 continue; 8502 } 8503 cmds_found++; 8504 if (pcmd != NULL) { 8505 ASSERT(pptr->port_pkt_head != cmd); 8506 pcmd->cmd_next = cmd->cmd_next; 8507 } else { 8508 ASSERT(cmd == pptr->port_pkt_head); 8509 pptr->port_pkt_head = cmd->cmd_next; 8510 } 8511 8512 if (cmd == pptr->port_pkt_tail) { 8513 pptr->port_pkt_tail = pcmd; 8514 if (pcmd) { 8515 pcmd->cmd_next = NULL; 8516 } 8517 } 8518 8519 if (head == NULL) { 8520 head = tail = cmd; 8521 } else { 8522 ASSERT(tail != NULL); 8523 8524 tail->cmd_next = cmd; 8525 tail = cmd; 8526 } 8527 cmd->cmd_next = NULL; 8528 } 8529 mutex_exit(&pptr->port_pkt_mutex); 8530 8531 FCP_DTRACE(fcp_logq, pptr->port_instbuf, 8532 fcp_trace, FCP_BUF_LEVEL_8, 0, 8533 "scan commands: %d cmd(s) found", cmds_found); 8534 8535 return (head); 8536 } 8537 8538 8539 /* 8540 * Abort all the commands in the command queue 8541 */ 8542 static void 8543 fcp_abort_commands(struct fcp_pkt *head, struct fcp_port *pptr) 8544 { 8545 struct fcp_pkt *cmd = NULL; /* pkt cmd ptr */ 8546 struct fcp_pkt *ncmd = NULL; /* next pkt ptr */ 8547 8548 ASSERT(mutex_owned(&pptr->port_mutex)); 8549 8550 /* scan through the pkts and invalid them */ 8551 for (cmd = head; cmd != NULL; cmd = ncmd) { 8552 struct scsi_pkt *pkt = cmd->cmd_pkt; 8553 8554 ncmd = cmd->cmd_next; 8555 ASSERT(pkt != NULL); 8556 8557 /* 8558 * The lun is going to be marked offline. Indicate 8559 * the target driver not to requeue or retry this command 8560 * as the device is going to be offlined pretty soon. 8561 */ 8562 pkt->pkt_reason = CMD_DEV_GONE; 8563 pkt->pkt_statistics = 0; 8564 pkt->pkt_state = 0; 8565 8566 /* reset cmd flags/state */ 8567 cmd->cmd_flags &= ~CFLAG_IN_QUEUE; 8568 cmd->cmd_state = FCP_PKT_IDLE; 8569 8570 /* 8571 * ensure we have a packet completion routine, 8572 * then call it. 8573 */ 8574 ASSERT(pkt->pkt_comp != NULL); 8575 8576 mutex_exit(&pptr->port_mutex); 8577 fcp_post_callback(cmd); 8578 mutex_enter(&pptr->port_mutex); 8579 } 8580 } 8581 8582 8583 /* 8584 * the pkt_comp callback for command packets 8585 */ 8586 static void 8587 fcp_cmd_callback(fc_packet_t *fpkt) 8588 { 8589 struct fcp_pkt *cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private; 8590 struct scsi_pkt *pkt = cmd->cmd_pkt; 8591 struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address); 8592 8593 ASSERT(cmd->cmd_state != FCP_PKT_IDLE); 8594 8595 if (cmd->cmd_state == FCP_PKT_IDLE) { 8596 cmn_err(CE_PANIC, "Packet already completed %p", 8597 (void *)cmd); 8598 } 8599 8600 /* 8601 * Watch thread should be freeing the packet, ignore the pkt. 8602 */ 8603 if (cmd->cmd_state == FCP_PKT_ABORTING) { 8604 fcp_log(CE_CONT, pptr->port_dip, 8605 "!FCP: Pkt completed while aborting\n"); 8606 return; 8607 } 8608 cmd->cmd_state = FCP_PKT_IDLE; 8609 8610 fcp_complete_pkt(fpkt); 8611 8612 #ifdef DEBUG 8613 mutex_enter(&pptr->port_pkt_mutex); 8614 pptr->port_npkts--; 8615 mutex_exit(&pptr->port_pkt_mutex); 8616 #endif /* DEBUG */ 8617 8618 fcp_post_callback(cmd); 8619 } 8620 8621 8622 static void 8623 fcp_complete_pkt(fc_packet_t *fpkt) 8624 { 8625 int error = 0; 8626 struct fcp_pkt *cmd = (struct fcp_pkt *) 8627 fpkt->pkt_ulp_private; 8628 struct scsi_pkt *pkt = cmd->cmd_pkt; 8629 struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address); 8630 struct fcp_lun *plun; 8631 struct fcp_tgt *ptgt; 8632 struct fcp_rsp *rsp; 8633 struct scsi_address save; 8634 8635 #ifdef DEBUG 8636 save = pkt->pkt_address; 8637 #endif /* DEBUG */ 8638 8639 rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp; 8640 8641 if (fpkt->pkt_state == FC_PKT_SUCCESS) { 8642 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) { 8643 FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc, 8644 sizeof (struct fcp_rsp)); 8645 } 8646 8647 pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET | 8648 STATE_SENT_CMD | STATE_GOT_STATUS; 8649 8650 pkt->pkt_resid = 0; 8651 8652 if (cmd->cmd_pkt->pkt_numcookies) { 8653 pkt->pkt_state |= STATE_XFERRED_DATA; 8654 if (fpkt->pkt_data_resid) { 8655 error++; 8656 } 8657 } 8658 8659 if ((pkt->pkt_scbp != NULL) && ((*(pkt->pkt_scbp) = 8660 rsp->fcp_u.fcp_status.scsi_status) != STATUS_GOOD)) { 8661 /* 8662 * The next two checks make sure that if there 8663 * is no sense data or a valid response and 8664 * the command came back with check condition, 8665 * the command should be retried. 8666 */ 8667 if (!rsp->fcp_u.fcp_status.rsp_len_set && 8668 !rsp->fcp_u.fcp_status.sense_len_set) { 8669 pkt->pkt_state &= ~STATE_XFERRED_DATA; 8670 pkt->pkt_resid = cmd->cmd_dmacount; 8671 } 8672 } 8673 8674 if ((error | rsp->fcp_u.i_fcp_status | rsp->fcp_resid) == 0) { 8675 return; 8676 } 8677 8678 plun = ADDR2LUN(&pkt->pkt_address); 8679 ptgt = plun->lun_tgt; 8680 ASSERT(ptgt != NULL); 8681 8682 /* 8683 * Update the transfer resid, if appropriate 8684 */ 8685 if (rsp->fcp_u.fcp_status.resid_over || 8686 rsp->fcp_u.fcp_status.resid_under) { 8687 pkt->pkt_resid = rsp->fcp_resid; 8688 } 8689 8690 /* 8691 * First see if we got a FCP protocol error. 8692 */ 8693 if (rsp->fcp_u.fcp_status.rsp_len_set) { 8694 struct fcp_rsp_info *bep; 8695 bep = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp + 8696 sizeof (struct fcp_rsp)); 8697 8698 if (fcp_validate_fcp_response(rsp, pptr) != 8699 FC_SUCCESS) { 8700 pkt->pkt_reason = CMD_CMPLT; 8701 *(pkt->pkt_scbp) = STATUS_CHECK; 8702 8703 fcp_log(CE_WARN, pptr->port_dip, 8704 "!SCSI command to d_id=0x%x lun=0x%x" 8705 " failed, Bad FCP response values:" 8706 " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x," 8707 " sts-rsvd2=%x, rsplen=%x, senselen=%x", 8708 ptgt->tgt_d_id, plun->lun_num, 8709 rsp->reserved_0, rsp->reserved_1, 8710 rsp->fcp_u.fcp_status.reserved_0, 8711 rsp->fcp_u.fcp_status.reserved_1, 8712 rsp->fcp_response_len, rsp->fcp_sense_len); 8713 8714 return; 8715 } 8716 8717 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) { 8718 FCP_CP_IN(fpkt->pkt_resp + 8719 sizeof (struct fcp_rsp), bep, 8720 fpkt->pkt_resp_acc, 8721 sizeof (struct fcp_rsp_info)); 8722 } 8723 8724 if (bep->rsp_code != FCP_NO_FAILURE) { 8725 child_info_t *cip; 8726 8727 pkt->pkt_reason = CMD_TRAN_ERR; 8728 8729 mutex_enter(&plun->lun_mutex); 8730 cip = plun->lun_cip; 8731 mutex_exit(&plun->lun_mutex); 8732 8733 FCP_TRACE(fcp_logq, pptr->port_instbuf, 8734 fcp_trace, FCP_BUF_LEVEL_2, 0, 8735 "FCP response error on cmd=%p" 8736 " target=0x%x, cip=%p", cmd, 8737 ptgt->tgt_d_id, cip); 8738 } 8739 } 8740 8741 /* 8742 * See if we got a SCSI error with sense data 8743 */ 8744 if (rsp->fcp_u.fcp_status.sense_len_set) { 8745 uchar_t rqlen; 8746 caddr_t sense_from; 8747 child_info_t *cip; 8748 timeout_id_t tid; 8749 struct scsi_arq_status *arq; 8750 struct scsi_extended_sense *sense_to; 8751 8752 arq = (struct scsi_arq_status *)pkt->pkt_scbp; 8753 sense_to = &arq->sts_sensedata; 8754 8755 rqlen = (uchar_t)min(rsp->fcp_sense_len, 8756 sizeof (struct scsi_extended_sense)); 8757 8758 sense_from = (caddr_t)fpkt->pkt_resp + 8759 sizeof (struct fcp_rsp) + rsp->fcp_response_len; 8760 8761 if (fcp_validate_fcp_response(rsp, pptr) != 8762 FC_SUCCESS) { 8763 pkt->pkt_reason = CMD_CMPLT; 8764 *(pkt->pkt_scbp) = STATUS_CHECK; 8765 8766 fcp_log(CE_WARN, pptr->port_dip, 8767 "!SCSI command to d_id=0x%x lun=0x%x" 8768 " failed, Bad FCP response values:" 8769 " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x," 8770 " sts-rsvd2=%x, rsplen=%x, senselen=%x", 8771 ptgt->tgt_d_id, plun->lun_num, 8772 rsp->reserved_0, rsp->reserved_1, 8773 rsp->fcp_u.fcp_status.reserved_0, 8774 rsp->fcp_u.fcp_status.reserved_1, 8775 rsp->fcp_response_len, rsp->fcp_sense_len); 8776 8777 return; 8778 } 8779 8780 /* 8781 * copy in sense information 8782 */ 8783 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) { 8784 FCP_CP_IN(sense_from, sense_to, 8785 fpkt->pkt_resp_acc, rqlen); 8786 } else { 8787 bcopy(sense_from, sense_to, rqlen); 8788 } 8789 8790 if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) || 8791 (FCP_SENSE_NO_LUN(sense_to))) { 8792 mutex_enter(&ptgt->tgt_mutex); 8793 if (ptgt->tgt_tid == NULL) { 8794 /* 8795 * Kick off rediscovery 8796 */ 8797 tid = timeout(fcp_reconfigure_luns, 8798 (caddr_t)ptgt, drv_usectohz(1)); 8799 8800 ptgt->tgt_tid = tid; 8801 ptgt->tgt_state |= FCP_TGT_BUSY; 8802 } 8803 mutex_exit(&ptgt->tgt_mutex); 8804 if (FCP_SENSE_REPORTLUN_CHANGED(sense_to)) { 8805 FCP_TRACE(fcp_logq, pptr->port_instbuf, 8806 fcp_trace, FCP_BUF_LEVEL_3, 0, 8807 "!FCP: Report Lun Has Changed" 8808 " target=%x", ptgt->tgt_d_id); 8809 } else if (FCP_SENSE_NO_LUN(sense_to)) { 8810 FCP_TRACE(fcp_logq, pptr->port_instbuf, 8811 fcp_trace, FCP_BUF_LEVEL_3, 0, 8812 "!FCP: LU Not Supported" 8813 " target=%x", ptgt->tgt_d_id); 8814 } 8815 } 8816 ASSERT(pkt->pkt_scbp != NULL); 8817 8818 pkt->pkt_state |= STATE_ARQ_DONE; 8819 8820 arq->sts_rqpkt_resid = SENSE_LENGTH - rqlen; 8821 8822 *((uchar_t *)&arq->sts_rqpkt_status) = STATUS_GOOD; 8823 arq->sts_rqpkt_reason = 0; 8824 arq->sts_rqpkt_statistics = 0; 8825 8826 arq->sts_rqpkt_state = STATE_GOT_BUS | 8827 STATE_GOT_TARGET | STATE_SENT_CMD | 8828 STATE_GOT_STATUS | STATE_ARQ_DONE | 8829 STATE_XFERRED_DATA; 8830 8831 mutex_enter(&plun->lun_mutex); 8832 cip = plun->lun_cip; 8833 mutex_exit(&plun->lun_mutex); 8834 8835 FCP_DTRACE(fcp_logq, pptr->port_instbuf, 8836 fcp_trace, FCP_BUF_LEVEL_8, 0, 8837 "SCSI Check condition on cmd=%p target=0x%x" 8838 " LUN=%p, cmd=%x SCSI status=%x, es key=%x" 8839 " ASC=%x ASCQ=%x", cmd, ptgt->tgt_d_id, cip, 8840 cmd->cmd_fcp_cmd.fcp_cdb[0], 8841 rsp->fcp_u.fcp_status.scsi_status, 8842 sense_to->es_key, sense_to->es_add_code, 8843 sense_to->es_qual_code); 8844 } 8845 } else { 8846 plun = ADDR2LUN(&pkt->pkt_address); 8847 ptgt = plun->lun_tgt; 8848 ASSERT(ptgt != NULL); 8849 8850 /* 8851 * Work harder to translate errors into target driver 8852 * understandable ones. Note with despair that the target 8853 * drivers don't decode pkt_state and pkt_reason exhaustively 8854 * They resort to using the big hammer most often, which 8855 * may not get fixed in the life time of this driver. 8856 */ 8857 pkt->pkt_state = 0; 8858 pkt->pkt_statistics = 0; 8859 8860 switch (fpkt->pkt_state) { 8861 case FC_PKT_TRAN_ERROR: 8862 switch (fpkt->pkt_reason) { 8863 case FC_REASON_OVERRUN: 8864 pkt->pkt_reason = CMD_CMD_OVR; 8865 pkt->pkt_statistics |= STAT_ABORTED; 8866 break; 8867 8868 case FC_REASON_XCHG_BSY: { 8869 caddr_t ptr; 8870 8871 pkt->pkt_reason = CMD_CMPLT; /* Lie */ 8872 8873 ptr = (caddr_t)pkt->pkt_scbp; 8874 if (ptr) { 8875 *ptr = STATUS_BUSY; 8876 } 8877 break; 8878 } 8879 8880 case FC_REASON_ABORTED: 8881 pkt->pkt_reason = CMD_TRAN_ERR; 8882 pkt->pkt_statistics |= STAT_ABORTED; 8883 break; 8884 8885 case FC_REASON_ABORT_FAILED: 8886 pkt->pkt_reason = CMD_ABORT_FAIL; 8887 break; 8888 8889 case FC_REASON_NO_SEQ_INIT: 8890 case FC_REASON_CRC_ERROR: 8891 pkt->pkt_reason = CMD_TRAN_ERR; 8892 pkt->pkt_statistics |= STAT_ABORTED; 8893 break; 8894 default: 8895 pkt->pkt_reason = CMD_TRAN_ERR; 8896 break; 8897 } 8898 break; 8899 8900 case FC_PKT_PORT_OFFLINE: { 8901 dev_info_t *cdip = NULL; 8902 caddr_t ptr; 8903 8904 if (fpkt->pkt_reason == FC_REASON_LOGIN_REQUIRED) { 8905 FCP_DTRACE(fcp_logq, pptr->port_instbuf, 8906 fcp_trace, FCP_BUF_LEVEL_8, 0, 8907 "SCSI cmd; LOGIN REQUIRED from FCA for %x", 8908 ptgt->tgt_d_id); 8909 } 8910 8911 mutex_enter(&plun->lun_mutex); 8912 if (plun->lun_mpxio == 0) { 8913 cdip = DIP(plun->lun_cip); 8914 } else if (plun->lun_cip) { 8915 cdip = mdi_pi_get_client(PIP(plun->lun_cip)); 8916 } 8917 8918 mutex_exit(&plun->lun_mutex); 8919 8920 if (cdip) { 8921 (void) ndi_event_retrieve_cookie( 8922 pptr->port_ndi_event_hdl, cdip, 8923 FCAL_REMOVE_EVENT, &fcp_remove_eid, 8924 NDI_EVENT_NOPASS); 8925 (void) ndi_event_run_callbacks( 8926 pptr->port_ndi_event_hdl, cdip, 8927 fcp_remove_eid, NULL); 8928 } 8929 8930 /* 8931 * If the link goes off-line for a lip, 8932 * this will cause a error to the ST SG 8933 * SGEN drivers. By setting BUSY we will 8934 * give the drivers the chance to retry 8935 * before it blows of the job. ST will 8936 * remember how many times it has retried. 8937 */ 8938 8939 if ((plun->lun_type == DTYPE_SEQUENTIAL) || 8940 (plun->lun_type == DTYPE_CHANGER)) { 8941 pkt->pkt_reason = CMD_CMPLT; /* Lie */ 8942 ptr = (caddr_t)pkt->pkt_scbp; 8943 if (ptr) { 8944 *ptr = STATUS_BUSY; 8945 } 8946 } else { 8947 pkt->pkt_reason = CMD_TRAN_ERR; 8948 pkt->pkt_statistics |= STAT_BUS_RESET; 8949 } 8950 break; 8951 } 8952 8953 case FC_PKT_TRAN_BSY: 8954 /* 8955 * Use the ssd Qfull handling here. 8956 */ 8957 *pkt->pkt_scbp = STATUS_INTERMEDIATE; 8958 pkt->pkt_state = STATE_GOT_BUS; 8959 break; 8960 8961 case FC_PKT_TIMEOUT: 8962 pkt->pkt_reason = CMD_TIMEOUT; 8963 if (fpkt->pkt_reason == FC_REASON_ABORT_FAILED) { 8964 pkt->pkt_statistics |= STAT_TIMEOUT; 8965 } else { 8966 pkt->pkt_statistics |= STAT_ABORTED; 8967 } 8968 break; 8969 8970 case FC_PKT_LOCAL_RJT: 8971 switch (fpkt->pkt_reason) { 8972 case FC_REASON_OFFLINE: { 8973 dev_info_t *cdip = NULL; 8974 8975 mutex_enter(&plun->lun_mutex); 8976 if (plun->lun_mpxio == 0) { 8977 cdip = DIP(plun->lun_cip); 8978 } else if (plun->lun_cip) { 8979 cdip = mdi_pi_get_client( 8980 PIP(plun->lun_cip)); 8981 } 8982 mutex_exit(&plun->lun_mutex); 8983 8984 if (cdip) { 8985 (void) ndi_event_retrieve_cookie( 8986 pptr->port_ndi_event_hdl, cdip, 8987 FCAL_REMOVE_EVENT, 8988 &fcp_remove_eid, 8989 NDI_EVENT_NOPASS); 8990 (void) ndi_event_run_callbacks( 8991 pptr->port_ndi_event_hdl, 8992 cdip, fcp_remove_eid, NULL); 8993 } 8994 8995 pkt->pkt_reason = CMD_TRAN_ERR; 8996 pkt->pkt_statistics |= STAT_BUS_RESET; 8997 8998 break; 8999 } 9000 9001 case FC_REASON_NOMEM: 9002 case FC_REASON_QFULL: { 9003 caddr_t ptr; 9004 9005 pkt->pkt_reason = CMD_CMPLT; /* Lie */ 9006 ptr = (caddr_t)pkt->pkt_scbp; 9007 if (ptr) { 9008 *ptr = STATUS_BUSY; 9009 } 9010 break; 9011 } 9012 9013 case FC_REASON_DMA_ERROR: 9014 pkt->pkt_reason = CMD_DMA_DERR; 9015 pkt->pkt_statistics |= STAT_ABORTED; 9016 break; 9017 9018 case FC_REASON_CRC_ERROR: 9019 case FC_REASON_UNDERRUN: { 9020 uchar_t status; 9021 /* 9022 * Work around for Bugid: 4240945. 9023 * IB on A5k doesn't set the Underrun bit 9024 * in the fcp status, when it is transferring 9025 * less than requested amount of data. Work 9026 * around the ses problem to keep luxadm 9027 * happy till ibfirmware is fixed. 9028 */ 9029 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) { 9030 FCP_CP_IN(fpkt->pkt_resp, rsp, 9031 fpkt->pkt_resp_acc, 9032 sizeof (struct fcp_rsp)); 9033 } 9034 status = rsp->fcp_u.fcp_status.scsi_status; 9035 if (((plun->lun_type & DTYPE_MASK) == 9036 DTYPE_ESI) && (status == STATUS_GOOD)) { 9037 pkt->pkt_reason = CMD_CMPLT; 9038 *pkt->pkt_scbp = status; 9039 pkt->pkt_resid = 0; 9040 } else { 9041 pkt->pkt_reason = CMD_TRAN_ERR; 9042 pkt->pkt_statistics |= STAT_ABORTED; 9043 } 9044 break; 9045 } 9046 9047 case FC_REASON_NO_CONNECTION: 9048 case FC_REASON_UNSUPPORTED: 9049 case FC_REASON_ILLEGAL_REQ: 9050 case FC_REASON_BAD_SID: 9051 case FC_REASON_DIAG_BUSY: 9052 case FC_REASON_FCAL_OPN_FAIL: 9053 case FC_REASON_BAD_XID: 9054 default: 9055 pkt->pkt_reason = CMD_TRAN_ERR; 9056 pkt->pkt_statistics |= STAT_ABORTED; 9057 break; 9058 9059 } 9060 break; 9061 9062 case FC_PKT_NPORT_RJT: 9063 case FC_PKT_FABRIC_RJT: 9064 case FC_PKT_NPORT_BSY: 9065 case FC_PKT_FABRIC_BSY: 9066 default: 9067 FCP_DTRACE(fcp_logq, pptr->port_instbuf, 9068 fcp_trace, FCP_BUF_LEVEL_8, 0, 9069 "FC Status 0x%x, reason 0x%x", 9070 fpkt->pkt_state, fpkt->pkt_reason); 9071 pkt->pkt_reason = CMD_TRAN_ERR; 9072 pkt->pkt_statistics |= STAT_ABORTED; 9073 break; 9074 } 9075 9076 FCP_DTRACE(fcp_logq, pptr->port_instbuf, 9077 fcp_trace, FCP_BUF_LEVEL_9, 0, 9078 "!FC error on cmd=%p target=0x%x: pkt state=0x%x " 9079 " pkt reason=0x%x", cmd, ptgt->tgt_d_id, fpkt->pkt_state, 9080 fpkt->pkt_reason); 9081 } 9082 9083 ASSERT(save.a_hba_tran == pkt->pkt_address.a_hba_tran); 9084 } 9085 9086 9087 static int 9088 fcp_validate_fcp_response(struct fcp_rsp *rsp, struct fcp_port *pptr) 9089 { 9090 if (rsp->reserved_0 || rsp->reserved_1 || 9091 rsp->fcp_u.fcp_status.reserved_0 || 9092 rsp->fcp_u.fcp_status.reserved_1) { 9093 /* 9094 * These reserved fields should ideally be zero. FCP-2 does say 9095 * that the recipient need not check for reserved fields to be 9096 * zero. If they are not zero, we will not make a fuss about it 9097 * - just log it (in debug to both trace buffer and messages 9098 * file and to trace buffer only in non-debug) and move on. 9099 * 9100 * Non-zero reserved fields were seen with minnows. 9101 * 9102 * qlc takes care of some of this but we cannot assume that all 9103 * FCAs will do so. 9104 */ 9105 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace, 9106 FCP_BUF_LEVEL_5, 0, 9107 "Got fcp response packet with non-zero reserved fields " 9108 "rsp->reserved_0:0x%x, rsp_reserved_1:0x%x, " 9109 "status.reserved_0:0x%x, status.reserved_1:0x%x", 9110 rsp->reserved_0, rsp->reserved_1, 9111 rsp->fcp_u.fcp_status.reserved_0, 9112 rsp->fcp_u.fcp_status.reserved_1); 9113 } 9114 9115 if (rsp->fcp_u.fcp_status.rsp_len_set && (rsp->fcp_response_len > 9116 (FCP_MAX_RSP_IU_SIZE - sizeof (struct fcp_rsp)))) { 9117 return (FC_FAILURE); 9118 } 9119 9120 if (rsp->fcp_u.fcp_status.sense_len_set && rsp->fcp_sense_len > 9121 (FCP_MAX_RSP_IU_SIZE - rsp->fcp_response_len - 9122 sizeof (struct fcp_rsp))) { 9123 return (FC_FAILURE); 9124 } 9125 9126 return (FC_SUCCESS); 9127 } 9128 9129 9130 /* 9131 * This is called when there is a change the in device state. The case we're 9132 * handling here is, if the d_id s does not match, offline this tgt and online 9133 * a new tgt with the new d_id. called from fcp_handle_devices with 9134 * port_mutex held. 9135 */ 9136 static int 9137 fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt, 9138 fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause) 9139 { 9140 ASSERT(mutex_owned(&pptr->port_mutex)); 9141 9142 FCP_TRACE(fcp_logq, pptr->port_instbuf, 9143 fcp_trace, FCP_BUF_LEVEL_3, 0, 9144 "Starting fcp_device_changed..."); 9145 9146 /* 9147 * The two cases where the port_device_changed is called is 9148 * either it changes it's d_id or it's hard address. 9149 */ 9150 if ((ptgt->tgt_d_id != map_entry->map_did.port_id) || 9151 (FC_TOP_EXTERNAL(pptr->port_topology) && 9152 (ptgt->tgt_hard_addr != map_entry->map_hard_addr.hard_addr))) { 9153 9154 /* offline this target */ 9155 mutex_enter(&ptgt->tgt_mutex); 9156 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) { 9157 (void) fcp_offline_target(pptr, ptgt, link_cnt, 9158 0, 1, NDI_DEVI_REMOVE); 9159 } 9160 mutex_exit(&ptgt->tgt_mutex); 9161 9162 fcp_log(CE_NOTE, pptr->port_dip, 9163 "Change in target properties: Old D_ID=%x New D_ID=%x" 9164 " Old HA=%x New HA=%x", ptgt->tgt_d_id, 9165 map_entry->map_did.port_id, ptgt->tgt_hard_addr, 9166 map_entry->map_hard_addr.hard_addr); 9167 } 9168 9169 return (fcp_handle_mapflags(pptr, ptgt, map_entry, 9170 link_cnt, tgt_cnt, cause)); 9171 } 9172 9173 /* 9174 * Function: fcp_alloc_lun 9175 * 9176 * Description: Creates a new lun structure and adds it to the list 9177 * of luns of the target. 9178 * 9179 * Argument: ptgt Target the lun will belong to. 9180 * 9181 * Return Value: NULL Failed 9182 * Not NULL Succeeded 9183 * 9184 * Context: Kernel context 9185 */ 9186 static struct fcp_lun * 9187 fcp_alloc_lun(struct fcp_tgt *ptgt) 9188 { 9189 struct fcp_lun *plun; 9190 9191 plun = kmem_zalloc(sizeof (struct fcp_lun), KM_NOSLEEP); 9192 if (plun != NULL) { 9193 /* 9194 * Initialize the mutex before putting in the target list 9195 * especially before releasing the target mutex. 9196 */ 9197 mutex_init(&plun->lun_mutex, NULL, MUTEX_DRIVER, NULL); 9198 plun->lun_tgt = ptgt; 9199 9200 mutex_enter(&ptgt->tgt_mutex); 9201 plun->lun_next = ptgt->tgt_lun; 9202 ptgt->tgt_lun = plun; 9203 plun->lun_old_guid = NULL; 9204 plun->lun_old_guid_size = 0; 9205 mutex_exit(&ptgt->tgt_mutex); 9206 } 9207 9208 return (plun); 9209 } 9210 9211 /* 9212 * Function: fcp_dealloc_lun 9213 * 9214 * Description: Frees the LUN structure passed by the caller. 9215 * 9216 * Argument: plun LUN structure to free. 9217 * 9218 * Return Value: None 9219 * 9220 * Context: Kernel context. 9221 */ 9222 static void 9223 fcp_dealloc_lun(struct fcp_lun *plun) 9224 { 9225 mutex_enter(&plun->lun_mutex); 9226 if (plun->lun_cip) { 9227 fcp_remove_child(plun); 9228 } 9229 mutex_exit(&plun->lun_mutex); 9230 9231 mutex_destroy(&plun->lun_mutex); 9232 if (plun->lun_guid) { 9233 kmem_free(plun->lun_guid, plun->lun_guid_size); 9234 } 9235 if (plun->lun_old_guid) { 9236 kmem_free(plun->lun_old_guid, plun->lun_old_guid_size); 9237 } 9238 kmem_free(plun, sizeof (*plun)); 9239 } 9240 9241 /* 9242 * Function: fcp_alloc_tgt 9243 * 9244 * Description: Creates a new target structure and adds it to the port 9245 * hash list. 9246 * 9247 * Argument: pptr fcp port structure 9248 * *map_entry entry describing the target to create 9249 * link_cnt Link state change counter 9250 * 9251 * Return Value: NULL Failed 9252 * Not NULL Succeeded 9253 * 9254 * Context: Kernel context. 9255 */ 9256 static struct fcp_tgt * 9257 fcp_alloc_tgt(struct fcp_port *pptr, fc_portmap_t *map_entry, int link_cnt) 9258 { 9259 int hash; 9260 uchar_t *wwn; 9261 struct fcp_tgt *ptgt; 9262 9263 ptgt = kmem_zalloc(sizeof (*ptgt), KM_NOSLEEP); 9264 if (ptgt != NULL) { 9265 mutex_enter(&pptr->port_mutex); 9266 if (link_cnt != pptr->port_link_cnt) { 9267 /* 9268 * oh oh -- another link reset 9269 * in progress -- give up 9270 */ 9271 mutex_exit(&pptr->port_mutex); 9272 kmem_free(ptgt, sizeof (*ptgt)); 9273 ptgt = NULL; 9274 } else { 9275 /* 9276 * initialize the mutex before putting in the port 9277 * wwn list, especially before releasing the port 9278 * mutex. 9279 */ 9280 mutex_init(&ptgt->tgt_mutex, NULL, MUTEX_DRIVER, NULL); 9281 9282 /* add new target entry to the port's hash list */ 9283 wwn = (uchar_t *)&map_entry->map_pwwn; 9284 hash = FCP_HASH(wwn); 9285 9286 ptgt->tgt_next = pptr->port_tgt_hash_table[hash]; 9287 pptr->port_tgt_hash_table[hash] = ptgt; 9288 9289 /* save cross-ptr */ 9290 ptgt->tgt_port = pptr; 9291 9292 ptgt->tgt_change_cnt = 1; 9293 9294 /* initialize the target manual_config_only flag */ 9295 if (fcp_enable_auto_configuration) { 9296 ptgt->tgt_manual_config_only = 0; 9297 } else { 9298 ptgt->tgt_manual_config_only = 1; 9299 } 9300 9301 mutex_exit(&pptr->port_mutex); 9302 } 9303 } 9304 9305 return (ptgt); 9306 } 9307 9308 /* 9309 * Function: fcp_dealloc_tgt 9310 * 9311 * Description: Frees the target structure passed by the caller. 9312 * 9313 * Argument: ptgt Target structure to free. 9314 * 9315 * Return Value: None 9316 * 9317 * Context: Kernel context. 9318 */ 9319 static void 9320 fcp_dealloc_tgt(struct fcp_tgt *ptgt) 9321 { 9322 mutex_destroy(&ptgt->tgt_mutex); 9323 kmem_free(ptgt, sizeof (*ptgt)); 9324 } 9325 9326 9327 /* 9328 * Handle STATUS_QFULL and STATUS_BUSY by performing delayed retry 9329 * 9330 * Device discovery commands will not be retried for-ever as 9331 * this will have repercussions on other devices that need to 9332 * be submitted to the hotplug thread. After a quick glance 9333 * at the SCSI-3 spec, it was found that the spec doesn't 9334 * mandate a forever retry, rather recommends a delayed retry. 9335 * 9336 * Since Photon IB is single threaded, STATUS_BUSY is common 9337 * in a 4+initiator environment. Make sure the total time 9338 * spent on retries (including command timeout) does not 9339 * 60 seconds 9340 */ 9341 static void 9342 fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt) 9343 { 9344 struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private; 9345 struct fcp_tgt *ptgt = icmd->ipkt_tgt; 9346 9347 mutex_enter(&pptr->port_mutex); 9348 mutex_enter(&ptgt->tgt_mutex); 9349 if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) { 9350 FCP_TRACE(fcp_logq, pptr->port_instbuf, 9351 fcp_trace, FCP_BUF_LEVEL_2, 0, 9352 "fcp_queue_ipkt,1:state change occured" 9353 " for D_ID=0x%x", ptgt->tgt_d_id); 9354 mutex_exit(&ptgt->tgt_mutex); 9355 mutex_exit(&pptr->port_mutex); 9356 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt, 9357 icmd->ipkt_change_cnt, icmd->ipkt_cause); 9358 fcp_icmd_free(pptr, icmd); 9359 return; 9360 } 9361 mutex_exit(&ptgt->tgt_mutex); 9362 9363 icmd->ipkt_restart = fcp_watchdog_time + icmd->ipkt_retries++; 9364 9365 if (pptr->port_ipkt_list != NULL) { 9366 /* add pkt to front of doubly-linked list */ 9367 pptr->port_ipkt_list->ipkt_prev = icmd; 9368 icmd->ipkt_next = pptr->port_ipkt_list; 9369 pptr->port_ipkt_list = icmd; 9370 icmd->ipkt_prev = NULL; 9371 } else { 9372 /* this is the first/only pkt on the list */ 9373 pptr->port_ipkt_list = icmd; 9374 icmd->ipkt_next = NULL; 9375 icmd->ipkt_prev = NULL; 9376 } 9377 mutex_exit(&pptr->port_mutex); 9378 } 9379 9380 /* 9381 * Function: fcp_transport 9382 * 9383 * Description: This function submits the Fibre Channel packet to the transort 9384 * layer by calling fc_ulp_transport(). If fc_ulp_transport() 9385 * fails the submission, the treatment depends on the value of 9386 * the variable internal. 9387 * 9388 * Argument: port_handle fp/fctl port handle. 9389 * *fpkt Packet to submit to the transport layer. 9390 * internal Not zero when it's an internal packet. 9391 * 9392 * Return Value: FC_TRAN_BUSY 9393 * FC_STATEC_BUSY 9394 * FC_OFFLINE 9395 * FC_LOGINREQ 9396 * FC_DEVICE_BUSY 9397 * FC_SUCCESS 9398 */ 9399 static int 9400 fcp_transport(opaque_t port_handle, fc_packet_t *fpkt, int internal) 9401 { 9402 int rval; 9403 9404 rval = fc_ulp_transport(port_handle, fpkt); 9405 if (rval == FC_SUCCESS) { 9406 return (rval); 9407 } 9408 9409 /* 9410 * LUN isn't marked BUSY or OFFLINE, so we got here to transport 9411 * a command, if the underlying modules see that there is a state 9412 * change, or if a port is OFFLINE, that means, that state change 9413 * hasn't reached FCP yet, so re-queue the command for deferred 9414 * submission. 9415 */ 9416 if ((rval == FC_STATEC_BUSY) || (rval == FC_OFFLINE) || 9417 (rval == FC_LOGINREQ) || (rval == FC_DEVICE_BUSY) || 9418 (rval == FC_DEVICE_BUSY_NEW_RSCN) || (rval == FC_TRAN_BUSY)) { 9419 /* 9420 * Defer packet re-submission. Life hang is possible on 9421 * internal commands if the port driver sends FC_STATEC_BUSY 9422 * for ever, but that shouldn't happen in a good environment. 9423 * Limiting re-transport for internal commands is probably a 9424 * good idea.. 9425 * A race condition can happen when a port sees barrage of 9426 * link transitions offline to online. If the FCTL has 9427 * returned FC_STATEC_BUSY or FC_OFFLINE then none of the 9428 * internal commands should be queued to do the discovery. 9429 * The race condition is when an online comes and FCP starts 9430 * its internal discovery and the link goes offline. It is 9431 * possible that the statec_callback has not reached FCP 9432 * and FCP is carrying on with its internal discovery. 9433 * FC_STATEC_BUSY or FC_OFFLINE will be the first indication 9434 * that the link has gone offline. At this point FCP should 9435 * drop all the internal commands and wait for the 9436 * statec_callback. It will be facilitated by incrementing 9437 * port_link_cnt. 9438 * 9439 * For external commands, the (FC)pkt_timeout is decremented 9440 * by the QUEUE Delay added by our driver, Care is taken to 9441 * ensure that it doesn't become zero (zero means no timeout) 9442 * If the time expires right inside driver queue itself, 9443 * the watch thread will return it to the original caller 9444 * indicating that the command has timed-out. 9445 */ 9446 if (internal) { 9447 char *op; 9448 struct fcp_ipkt *icmd; 9449 9450 icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private; 9451 switch (icmd->ipkt_opcode) { 9452 case SCMD_REPORT_LUN: 9453 op = "REPORT LUN"; 9454 break; 9455 9456 case SCMD_INQUIRY: 9457 op = "INQUIRY"; 9458 break; 9459 9460 case SCMD_INQUIRY_PAGE83: 9461 op = "INQUIRY-83"; 9462 break; 9463 9464 default: 9465 op = "Internal SCSI COMMAND"; 9466 break; 9467 } 9468 9469 if (fcp_handle_ipkt_errors(icmd->ipkt_port, 9470 icmd->ipkt_tgt, icmd, rval, op) == DDI_SUCCESS) { 9471 rval = FC_SUCCESS; 9472 } 9473 } else { 9474 struct fcp_pkt *cmd; 9475 struct fcp_port *pptr; 9476 9477 cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private; 9478 cmd->cmd_state = FCP_PKT_IDLE; 9479 pptr = ADDR2FCP(&cmd->cmd_pkt->pkt_address); 9480 9481 if (cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) { 9482 FCP_DTRACE(fcp_logq, pptr->port_instbuf, 9483 fcp_trace, FCP_BUF_LEVEL_9, 0, 9484 "fcp_transport: xport busy for pkt %p", 9485 cmd->cmd_pkt); 9486 rval = FC_TRAN_BUSY; 9487 } else { 9488 fcp_queue_pkt(pptr, cmd); 9489 rval = FC_SUCCESS; 9490 } 9491 } 9492 } 9493 9494 return (rval); 9495 } 9496 9497 /*VARARGS3*/ 9498 static void 9499 fcp_log(int level, dev_info_t *dip, const char *fmt, ...) 9500 { 9501 char buf[256]; 9502 va_list ap; 9503 9504 if (dip == NULL) { 9505 dip = fcp_global_dip; 9506 } 9507 9508 va_start(ap, fmt); 9509 (void) vsprintf(buf, fmt, ap); 9510 va_end(ap); 9511 9512 scsi_log(dip, "fcp", level, buf); 9513 } 9514 9515 /* 9516 * This function retries NS registry of FC4 type. 9517 * It assumes that fcp_mutex is held. 9518 * The function does nothing if topology is not fabric 9519 * So, the topology has to be set before this function can be called 9520 */ 9521 static void 9522 fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id) 9523 { 9524 int rval; 9525 9526 ASSERT(MUTEX_HELD(&pptr->port_mutex)); 9527 9528 if (((pptr->port_state & FCP_STATE_NS_REG_FAILED) == 0) || 9529 ((pptr->port_topology != FC_TOP_FABRIC) && 9530 (pptr->port_topology != FC_TOP_PUBLIC_LOOP))) { 9531 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) { 9532 pptr->port_state &= ~FCP_STATE_NS_REG_FAILED; 9533 } 9534 return; 9535 } 9536 mutex_exit(&pptr->port_mutex); 9537 rval = fcp_do_ns_registry(pptr, s_id); 9538 mutex_enter(&pptr->port_mutex); 9539 9540 if (rval == 0) { 9541 /* Registry successful. Reset flag */ 9542 pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED); 9543 } 9544 } 9545 9546 /* 9547 * This function registers the ULP with the switch by calling transport i/f 9548 */ 9549 static int 9550 fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id) 9551 { 9552 fc_ns_cmd_t ns_cmd; 9553 ns_rfc_type_t rfc; 9554 uint32_t types[8]; 9555 9556 /* 9557 * Prepare the Name server structure to 9558 * register with the transport in case of 9559 * Fabric configuration. 9560 */ 9561 bzero(&rfc, sizeof (rfc)); 9562 bzero(types, sizeof (types)); 9563 9564 types[FC4_TYPE_WORD_POS(FC_TYPE_SCSI_FCP)] = 9565 (1 << FC4_TYPE_BIT_POS(FC_TYPE_SCSI_FCP)); 9566 9567 rfc.rfc_port_id.port_id = s_id; 9568 bcopy(types, rfc.rfc_types, sizeof (types)); 9569 9570 ns_cmd.ns_flags = 0; 9571 ns_cmd.ns_cmd = NS_RFT_ID; 9572 ns_cmd.ns_req_len = sizeof (rfc); 9573 ns_cmd.ns_req_payload = (caddr_t)&rfc; 9574 ns_cmd.ns_resp_len = 0; 9575 ns_cmd.ns_resp_payload = NULL; 9576 9577 /* 9578 * Perform the Name Server Registration for SCSI_FCP FC4 Type. 9579 */ 9580 if (fc_ulp_port_ns(pptr->port_fp_handle, NULL, &ns_cmd)) { 9581 fcp_log(CE_WARN, pptr->port_dip, 9582 "!ns_registry: failed name server registration"); 9583 return (1); 9584 } 9585 9586 return (0); 9587 } 9588 9589 /* 9590 * Function: fcp_handle_port_attach 9591 * 9592 * Description: This function is called from fcp_port_attach() to attach a 9593 * new port. This routine does the following: 9594 * 9595 * 1) Allocates an fcp_port structure and initializes it. 9596 * 2) Tries to register the new FC-4 (FCP) capablity with the name 9597 * server. 9598 * 3) Kicks off the enumeration of the targets/luns visible 9599 * through this new port. That is done by calling 9600 * fcp_statec_callback() if the port is online. 9601 * 9602 * Argument: ulph fp/fctl port handle. 9603 * *pinfo Port information. 9604 * s_id Port ID. 9605 * instance Device instance number for the local port 9606 * (returned by ddi_get_instance()). 9607 * 9608 * Return Value: DDI_SUCCESS 9609 * DDI_FAILURE 9610 * 9611 * Context: User and Kernel context. 9612 */ 9613 /*ARGSUSED*/ 9614 int 9615 fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo, 9616 uint32_t s_id, int instance) 9617 { 9618 int res = DDI_FAILURE; 9619 scsi_hba_tran_t *tran; 9620 int mutex_initted = FALSE; 9621 int hba_attached = FALSE; 9622 int soft_state_linked = FALSE; 9623 int event_bind = FALSE; 9624 struct fcp_port *pptr; 9625 fc_portmap_t *tmp_list = NULL; 9626 uint32_t max_cnt, alloc_cnt; 9627 uchar_t *boot_wwn = NULL; 9628 uint_t nbytes; 9629 int manual_cfg; 9630 9631 /* 9632 * this port instance attaching for the first time (or after 9633 * being detached before) 9634 */ 9635 FCP_TRACE(fcp_logq, "fcp", fcp_trace, 9636 FCP_BUF_LEVEL_3, 0, "port attach: for port %d", instance); 9637 9638 if (ddi_soft_state_zalloc(fcp_softstate, instance) != DDI_SUCCESS) { 9639 cmn_err(CE_WARN, "fcp: Softstate struct alloc failed" 9640 "parent dip: %p; instance: %d", (void *)pinfo->port_dip, 9641 instance); 9642 return (res); 9643 } 9644 9645 if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) { 9646 /* this shouldn't happen */ 9647 ddi_soft_state_free(fcp_softstate, instance); 9648 cmn_err(CE_WARN, "fcp: bad soft state"); 9649 return (res); 9650 } 9651 9652 (void) sprintf(pptr->port_instbuf, "fcp(%d)", instance); 9653 9654 /* 9655 * Make a copy of ulp_port_info as fctl allocates 9656 * a temp struct. 9657 */ 9658 (void) fcp_cp_pinfo(pptr, pinfo); 9659 9660 /* 9661 * Check for manual_configuration_only property. 9662 * Enable manual configurtion if the property is 9663 * set to 1, otherwise disable manual configuration. 9664 */ 9665 if ((manual_cfg = ddi_prop_get_int(DDI_DEV_T_ANY, pptr->port_dip, 9666 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 9667 MANUAL_CFG_ONLY, 9668 -1)) != -1) { 9669 if (manual_cfg == 1) { 9670 char *pathname; 9671 pathname = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 9672 (void) ddi_pathname(pptr->port_dip, pathname); 9673 cmn_err(CE_NOTE, 9674 "%s (%s%d) %s is enabled via %s.conf.", 9675 pathname, 9676 ddi_driver_name(pptr->port_dip), 9677 ddi_get_instance(pptr->port_dip), 9678 MANUAL_CFG_ONLY, 9679 ddi_driver_name(pptr->port_dip)); 9680 fcp_enable_auto_configuration = 0; 9681 kmem_free(pathname, MAXPATHLEN); 9682 } 9683 } 9684 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt)) 9685 pptr->port_link_cnt = 1; 9686 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt)) 9687 pptr->port_id = s_id; 9688 pptr->port_instance = instance; 9689 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_state)) 9690 pptr->port_state = FCP_STATE_INIT; 9691 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_state)) 9692 9693 pptr->port_dmacookie_sz = (pptr->port_data_dma_attr.dma_attr_sgllen * 9694 sizeof (ddi_dma_cookie_t)); 9695 9696 /* 9697 * The two mutexes of fcp_port are initialized. The variable 9698 * mutex_initted is incremented to remember that fact. That variable 9699 * is checked when the routine fails and the mutexes have to be 9700 * destroyed. 9701 */ 9702 mutex_init(&pptr->port_mutex, NULL, MUTEX_DRIVER, NULL); 9703 mutex_init(&pptr->port_pkt_mutex, NULL, MUTEX_DRIVER, NULL); 9704 mutex_initted++; 9705 9706 /* 9707 * The SCSI tran structure is allocate and initialized now. 9708 */ 9709 if ((tran = scsi_hba_tran_alloc(pptr->port_dip, 0)) == NULL) { 9710 fcp_log(CE_WARN, pptr->port_dip, 9711 "!fcp%d: scsi_hba_tran_alloc failed", instance); 9712 goto fail; 9713 } 9714 9715 /* link in the transport structure then fill it in */ 9716 pptr->port_tran = tran; 9717 tran->tran_hba_private = pptr; 9718 tran->tran_tgt_private = NULL; 9719 tran->tran_tgt_init = fcp_scsi_tgt_init; 9720 tran->tran_tgt_probe = NULL; 9721 tran->tran_tgt_free = fcp_scsi_tgt_free; 9722 tran->tran_start = fcp_scsi_start; 9723 tran->tran_reset = fcp_scsi_reset; 9724 tran->tran_abort = fcp_scsi_abort; 9725 tran->tran_getcap = fcp_scsi_getcap; 9726 tran->tran_setcap = fcp_scsi_setcap; 9727 tran->tran_init_pkt = NULL; 9728 tran->tran_destroy_pkt = NULL; 9729 tran->tran_dmafree = NULL; 9730 tran->tran_sync_pkt = NULL; 9731 tran->tran_reset_notify = fcp_scsi_reset_notify; 9732 tran->tran_get_bus_addr = fcp_scsi_get_bus_addr; 9733 tran->tran_get_name = fcp_scsi_get_name; 9734 tran->tran_clear_aca = NULL; 9735 tran->tran_clear_task_set = NULL; 9736 tran->tran_terminate_task = NULL; 9737 tran->tran_get_eventcookie = fcp_scsi_bus_get_eventcookie; 9738 tran->tran_add_eventcall = fcp_scsi_bus_add_eventcall; 9739 tran->tran_remove_eventcall = fcp_scsi_bus_remove_eventcall; 9740 tran->tran_post_event = fcp_scsi_bus_post_event; 9741 tran->tran_quiesce = NULL; 9742 tran->tran_unquiesce = NULL; 9743 tran->tran_bus_reset = NULL; 9744 tran->tran_bus_config = fcp_scsi_bus_config; 9745 tran->tran_bus_unconfig = fcp_scsi_bus_unconfig; 9746 tran->tran_bus_power = NULL; 9747 tran->tran_interconnect_type = INTERCONNECT_FABRIC; 9748 9749 tran->tran_pkt_constructor = fcp_kmem_cache_constructor; 9750 tran->tran_pkt_destructor = fcp_kmem_cache_destructor; 9751 tran->tran_setup_pkt = fcp_pkt_setup; 9752 tran->tran_teardown_pkt = fcp_pkt_teardown; 9753 tran->tran_hba_len = pptr->port_priv_pkt_len + 9754 sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz; 9755 9756 /* 9757 * Allocate an ndi event handle 9758 */ 9759 pptr->port_ndi_event_defs = (ndi_event_definition_t *) 9760 kmem_zalloc(sizeof (fcp_ndi_event_defs), KM_SLEEP); 9761 9762 bcopy(fcp_ndi_event_defs, pptr->port_ndi_event_defs, 9763 sizeof (fcp_ndi_event_defs)); 9764 9765 (void) ndi_event_alloc_hdl(pptr->port_dip, NULL, 9766 &pptr->port_ndi_event_hdl, NDI_SLEEP); 9767 9768 pptr->port_ndi_events.ndi_events_version = NDI_EVENTS_REV1; 9769 pptr->port_ndi_events.ndi_n_events = FCP_N_NDI_EVENTS; 9770 pptr->port_ndi_events.ndi_event_defs = pptr->port_ndi_event_defs; 9771 9772 if (DEVI_IS_ATTACHING(pptr->port_dip) && 9773 (ndi_event_bind_set(pptr->port_ndi_event_hdl, 9774 &pptr->port_ndi_events, NDI_SLEEP) != NDI_SUCCESS)) { 9775 goto fail; 9776 } 9777 event_bind++; /* Checked in fail case */ 9778 9779 if (scsi_hba_attach_setup(pptr->port_dip, &pptr->port_data_dma_attr, 9780 tran, SCSI_HBA_TRAN_CLONE | SCSI_HBA_TRAN_SCB) 9781 != DDI_SUCCESS) { 9782 fcp_log(CE_WARN, pptr->port_dip, 9783 "!fcp%d: scsi_hba_attach_setup failed", instance); 9784 goto fail; 9785 } 9786 hba_attached++; /* Checked in fail case */ 9787 9788 pptr->port_mpxio = 0; 9789 if (mdi_phci_register(MDI_HCI_CLASS_SCSI, pptr->port_dip, 0) == 9790 MDI_SUCCESS) { 9791 pptr->port_mpxio++; 9792 } 9793 9794 /* 9795 * The following code is putting the new port structure in the global 9796 * list of ports and, if it is the first port to attach, it start the 9797 * fcp_watchdog_tick. 9798 * 9799 * Why put this new port in the global before we are done attaching it? 9800 * We are actually making the structure globally known before we are 9801 * done attaching it. The reason for that is: because of the code that 9802 * follows. At this point the resources to handle the port are 9803 * allocated. This function is now going to do the following: 9804 * 9805 * 1) It is going to try to register with the name server advertizing 9806 * the new FCP capability of the port. 9807 * 2) It is going to play the role of the fp/fctl layer by building 9808 * a list of worlwide names reachable through this port and call 9809 * itself on fcp_statec_callback(). That requires the port to 9810 * be part of the global list. 9811 */ 9812 mutex_enter(&fcp_global_mutex); 9813 if (fcp_port_head == NULL) { 9814 fcp_read_blacklist(pinfo->port_dip, &fcp_lun_blacklist); 9815 } 9816 pptr->port_next = fcp_port_head; 9817 fcp_port_head = pptr; 9818 soft_state_linked++; 9819 9820 if (fcp_watchdog_init++ == 0) { 9821 fcp_watchdog_tick = fcp_watchdog_timeout * 9822 drv_usectohz(1000000); 9823 fcp_watchdog_id = timeout(fcp_watch, NULL, 9824 fcp_watchdog_tick); 9825 } 9826 mutex_exit(&fcp_global_mutex); 9827 9828 /* 9829 * Here an attempt is made to register with the name server, the new 9830 * FCP capability. That is done using an RTF_ID to the name server. 9831 * It is done synchronously. The function fcp_do_ns_registry() 9832 * doesn't return till the name server responded. 9833 * On failures, just ignore it for now and it will get retried during 9834 * state change callbacks. We'll set a flag to show this failure 9835 */ 9836 if (fcp_do_ns_registry(pptr, s_id)) { 9837 mutex_enter(&pptr->port_mutex); 9838 pptr->port_state |= FCP_STATE_NS_REG_FAILED; 9839 mutex_exit(&pptr->port_mutex); 9840 } else { 9841 mutex_enter(&pptr->port_mutex); 9842 pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED); 9843 mutex_exit(&pptr->port_mutex); 9844 } 9845 9846 /* 9847 * Lookup for boot WWN property 9848 */ 9849 if (modrootloaded != 1) { 9850 if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, 9851 ddi_get_parent(pinfo->port_dip), 9852 DDI_PROP_DONTPASS, OBP_BOOT_WWN, 9853 &boot_wwn, &nbytes) == DDI_PROP_SUCCESS) && 9854 (nbytes == FC_WWN_SIZE)) { 9855 bcopy(boot_wwn, pptr->port_boot_wwn, FC_WWN_SIZE); 9856 } 9857 if (boot_wwn) { 9858 ddi_prop_free(boot_wwn); 9859 } 9860 } 9861 9862 /* 9863 * Handle various topologies and link states. 9864 */ 9865 switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) { 9866 case FC_STATE_OFFLINE: 9867 9868 /* 9869 * we're attaching a port where the link is offline 9870 * 9871 * Wait for ONLINE, at which time a state 9872 * change will cause a statec_callback 9873 * 9874 * in the mean time, do not do anything 9875 */ 9876 res = DDI_SUCCESS; 9877 pptr->port_state |= FCP_STATE_OFFLINE; 9878 break; 9879 9880 case FC_STATE_ONLINE: { 9881 if (pptr->port_topology == FC_TOP_UNKNOWN) { 9882 (void) fcp_linkreset(pptr, NULL, KM_NOSLEEP); 9883 res = DDI_SUCCESS; 9884 break; 9885 } 9886 /* 9887 * discover devices and create nodes (a private 9888 * loop or point-to-point) 9889 */ 9890 ASSERT(pptr->port_topology != FC_TOP_UNKNOWN); 9891 9892 /* 9893 * At this point we are going to build a list of all the ports 9894 * that can be reached through this local port. It looks like 9895 * we cannot handle more than FCP_MAX_DEVICES per local port 9896 * (128). 9897 */ 9898 if ((tmp_list = (fc_portmap_t *)kmem_zalloc( 9899 sizeof (fc_portmap_t) * FCP_MAX_DEVICES, 9900 KM_NOSLEEP)) == NULL) { 9901 fcp_log(CE_WARN, pptr->port_dip, 9902 "!fcp%d: failed to allocate portmap", 9903 instance); 9904 goto fail; 9905 } 9906 9907 /* 9908 * fc_ulp_getportmap() is going to provide us with the list of 9909 * remote ports in the buffer we just allocated. The way the 9910 * list is going to be retrieved depends on the topology. 9911 * However, if we are connected to a Fabric, a name server 9912 * request may be sent to get the list of FCP capable ports. 9913 * It should be noted that is the case the request is 9914 * synchronous. This means we are stuck here till the name 9915 * server replies. A lot of things can change during that time 9916 * and including, may be, being called on 9917 * fcp_statec_callback() for different reasons. I'm not sure 9918 * the code can handle that. 9919 */ 9920 max_cnt = FCP_MAX_DEVICES; 9921 alloc_cnt = FCP_MAX_DEVICES; 9922 if ((res = fc_ulp_getportmap(pptr->port_fp_handle, 9923 &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) != 9924 FC_SUCCESS) { 9925 caddr_t msg; 9926 9927 (void) fc_ulp_error(res, &msg); 9928 9929 /* 9930 * this just means the transport is 9931 * busy perhaps building a portmap so, 9932 * for now, succeed this port attach 9933 * when the transport has a new map, 9934 * it'll send us a state change then 9935 */ 9936 fcp_log(CE_WARN, pptr->port_dip, 9937 "!failed to get port map : %s", msg); 9938 9939 res = DDI_SUCCESS; 9940 break; /* go return result */ 9941 } 9942 if (max_cnt > alloc_cnt) { 9943 alloc_cnt = max_cnt; 9944 } 9945 9946 /* 9947 * We are now going to call fcp_statec_callback() ourselves. 9948 * By issuing this call we are trying to kick off the enumera- 9949 * tion process. 9950 */ 9951 /* 9952 * let the state change callback do the SCSI device 9953 * discovery and create the devinfos 9954 */ 9955 fcp_statec_callback(ulph, pptr->port_fp_handle, 9956 pptr->port_phys_state, pptr->port_topology, tmp_list, 9957 max_cnt, pptr->port_id); 9958 9959 res = DDI_SUCCESS; 9960 break; 9961 } 9962 9963 default: 9964 /* unknown port state */ 9965 fcp_log(CE_WARN, pptr->port_dip, 9966 "!fcp%d: invalid port state at attach=0x%x", 9967 instance, pptr->port_phys_state); 9968 9969 mutex_enter(&pptr->port_mutex); 9970 pptr->port_phys_state = FCP_STATE_OFFLINE; 9971 mutex_exit(&pptr->port_mutex); 9972 9973 res = DDI_SUCCESS; 9974 break; 9975 } 9976 9977 /* free temp list if used */ 9978 if (tmp_list != NULL) { 9979 kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt); 9980 } 9981 9982 /* note the attach time */ 9983 pptr->port_attach_time = lbolt64; 9984 9985 /* all done */ 9986 return (res); 9987 9988 /* a failure we have to clean up after */ 9989 fail: 9990 fcp_log(CE_WARN, pptr->port_dip, "!failed to attach to port"); 9991 9992 if (soft_state_linked) { 9993 /* remove this fcp_port from the linked list */ 9994 (void) fcp_soft_state_unlink(pptr); 9995 } 9996 9997 /* unbind and free event set */ 9998 if (pptr->port_ndi_event_hdl) { 9999 if (event_bind) { 10000 (void) ndi_event_unbind_set(pptr->port_ndi_event_hdl, 10001 &pptr->port_ndi_events, NDI_SLEEP); 10002 } 10003 (void) ndi_event_free_hdl(pptr->port_ndi_event_hdl); 10004 } 10005 10006 if (pptr->port_ndi_event_defs) { 10007 (void) kmem_free(pptr->port_ndi_event_defs, 10008 sizeof (fcp_ndi_event_defs)); 10009 } 10010 10011 /* 10012 * Clean up mpxio stuff 10013 */ 10014 if (pptr->port_mpxio) { 10015 (void) mdi_phci_unregister(pptr->port_dip, 0); 10016 pptr->port_mpxio--; 10017 } 10018 10019 /* undo SCSI HBA setup */ 10020 if (hba_attached) { 10021 (void) scsi_hba_detach(pptr->port_dip); 10022 } 10023 if (pptr->port_tran != NULL) { 10024 scsi_hba_tran_free(pptr->port_tran); 10025 } 10026 10027 mutex_enter(&fcp_global_mutex); 10028 10029 /* 10030 * We check soft_state_linked, because it is incremented right before 10031 * we call increment fcp_watchdog_init. Therefore, we know if 10032 * soft_state_linked is still FALSE, we do not want to decrement 10033 * fcp_watchdog_init or possibly call untimeout. 10034 */ 10035 10036 if (soft_state_linked) { 10037 if (--fcp_watchdog_init == 0) { 10038 timeout_id_t tid = fcp_watchdog_id; 10039 10040 mutex_exit(&fcp_global_mutex); 10041 (void) untimeout(tid); 10042 } else { 10043 mutex_exit(&fcp_global_mutex); 10044 } 10045 } else { 10046 mutex_exit(&fcp_global_mutex); 10047 } 10048 10049 if (mutex_initted) { 10050 mutex_destroy(&pptr->port_mutex); 10051 mutex_destroy(&pptr->port_pkt_mutex); 10052 } 10053 10054 if (tmp_list != NULL) { 10055 kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt); 10056 } 10057 10058 /* this makes pptr invalid */ 10059 ddi_soft_state_free(fcp_softstate, instance); 10060 10061 return (DDI_FAILURE); 10062 } 10063 10064 10065 static int 10066 fcp_handle_port_detach(struct fcp_port *pptr, int flag, int instance) 10067 { 10068 int count = 0; 10069 10070 mutex_enter(&pptr->port_mutex); 10071 10072 /* 10073 * if the port is powered down or suspended, nothing else 10074 * to do; just return. 10075 */ 10076 if (flag != FCP_STATE_DETACHING) { 10077 if (pptr->port_state & (FCP_STATE_POWER_DOWN | 10078 FCP_STATE_SUSPENDED)) { 10079 pptr->port_state |= flag; 10080 mutex_exit(&pptr->port_mutex); 10081 return (FC_SUCCESS); 10082 } 10083 } 10084 10085 if (pptr->port_state & FCP_STATE_IN_MDI) { 10086 mutex_exit(&pptr->port_mutex); 10087 return (FC_FAILURE); 10088 } 10089 10090 FCP_TRACE(fcp_logq, pptr->port_instbuf, 10091 fcp_trace, FCP_BUF_LEVEL_2, 0, 10092 "fcp_handle_port_detach: port is detaching"); 10093 10094 pptr->port_state |= flag; 10095 10096 /* 10097 * Wait for any ongoing reconfig/ipkt to complete, that 10098 * ensures the freeing to targets/luns is safe. 10099 * No more ref to this port should happen from statec/ioctl 10100 * after that as it was removed from the global port list. 10101 */ 10102 while (pptr->port_tmp_cnt || pptr->port_ipkt_cnt || 10103 (pptr->port_state & FCP_STATE_IN_WATCHDOG)) { 10104 /* 10105 * Let's give sufficient time for reconfig/ipkt 10106 * to complete. 10107 */ 10108 if (count++ >= FCP_ICMD_DEADLINE) { 10109 break; 10110 } 10111 mutex_exit(&pptr->port_mutex); 10112 delay(drv_usectohz(1000000)); 10113 mutex_enter(&pptr->port_mutex); 10114 } 10115 10116 /* 10117 * if the driver is still busy then fail to 10118 * suspend/power down. 10119 */ 10120 if (pptr->port_tmp_cnt || pptr->port_ipkt_cnt || 10121 (pptr->port_state & FCP_STATE_IN_WATCHDOG)) { 10122 pptr->port_state &= ~flag; 10123 mutex_exit(&pptr->port_mutex); 10124 return (FC_FAILURE); 10125 } 10126 10127 if (flag == FCP_STATE_DETACHING) { 10128 pptr = fcp_soft_state_unlink(pptr); 10129 ASSERT(pptr != NULL); 10130 } 10131 10132 pptr->port_link_cnt++; 10133 pptr->port_state |= FCP_STATE_OFFLINE; 10134 pptr->port_state &= ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE); 10135 10136 fcp_update_state(pptr, (FCP_LUN_BUSY | FCP_LUN_MARK), 10137 FCP_CAUSE_LINK_DOWN); 10138 mutex_exit(&pptr->port_mutex); 10139 10140 /* kill watch dog timer if we're the last */ 10141 mutex_enter(&fcp_global_mutex); 10142 if (--fcp_watchdog_init == 0) { 10143 timeout_id_t tid = fcp_watchdog_id; 10144 mutex_exit(&fcp_global_mutex); 10145 (void) untimeout(tid); 10146 } else { 10147 mutex_exit(&fcp_global_mutex); 10148 } 10149 10150 /* clean up the port structures */ 10151 if (flag == FCP_STATE_DETACHING) { 10152 fcp_cleanup_port(pptr, instance); 10153 } 10154 10155 return (FC_SUCCESS); 10156 } 10157 10158 10159 static void 10160 fcp_cleanup_port(struct fcp_port *pptr, int instance) 10161 { 10162 ASSERT(pptr != NULL); 10163 10164 /* unbind and free event set */ 10165 if (pptr->port_ndi_event_hdl) { 10166 (void) ndi_event_unbind_set(pptr->port_ndi_event_hdl, 10167 &pptr->port_ndi_events, NDI_SLEEP); 10168 (void) ndi_event_free_hdl(pptr->port_ndi_event_hdl); 10169 } 10170 10171 if (pptr->port_ndi_event_defs) { 10172 (void) kmem_free(pptr->port_ndi_event_defs, 10173 sizeof (fcp_ndi_event_defs)); 10174 } 10175 10176 /* free the lun/target structures and devinfos */ 10177 fcp_free_targets(pptr); 10178 10179 /* 10180 * Clean up mpxio stuff 10181 */ 10182 if (pptr->port_mpxio) { 10183 (void) mdi_phci_unregister(pptr->port_dip, 0); 10184 pptr->port_mpxio--; 10185 } 10186 10187 /* clean up SCSA stuff */ 10188 (void) scsi_hba_detach(pptr->port_dip); 10189 if (pptr->port_tran != NULL) { 10190 scsi_hba_tran_free(pptr->port_tran); 10191 } 10192 10193 #ifdef KSTATS_CODE 10194 /* clean up kstats */ 10195 if (pptr->fcp_ksp != NULL) { 10196 kstat_delete(pptr->fcp_ksp); 10197 } 10198 #endif 10199 10200 /* clean up soft state mutexes/condition variables */ 10201 mutex_destroy(&pptr->port_mutex); 10202 mutex_destroy(&pptr->port_pkt_mutex); 10203 10204 /* all done with soft state */ 10205 ddi_soft_state_free(fcp_softstate, instance); 10206 } 10207 10208 /* 10209 * Function: fcp_kmem_cache_constructor 10210 * 10211 * Description: This function allocates and initializes the resources required 10212 * to build a scsi_pkt structure the target driver. The result 10213 * of the allocation and initialization will be cached in the 10214 * memory cache. As DMA resources may be allocated here, that 10215 * means DMA resources will be tied up in the cache manager. 10216 * This is a tradeoff that has been made for performance reasons. 10217 * 10218 * Argument: *buf Memory to preinitialize. 10219 * *arg FCP port structure (fcp_port). 10220 * kmflags Value passed to kmem_cache_alloc() and 10221 * propagated to the constructor. 10222 * 10223 * Return Value: 0 Allocation/Initialization was successful. 10224 * -1 Allocation or Initialization failed. 10225 * 10226 * 10227 * If the returned value is 0, the buffer is initialized like this: 10228 * 10229 * +================================+ 10230 * +----> | struct scsi_pkt | 10231 * | | | 10232 * | +--- | pkt_ha_private | 10233 * | | | | 10234 * | | +================================+ 10235 * | | 10236 * | | +================================+ 10237 * | +--> | struct fcp_pkt | <---------+ 10238 * | | | | 10239 * +----- | cmd_pkt | | 10240 * | cmd_fp_pkt | ---+ | 10241 * +-------->| cmd_fcp_rsp[] | | | 10242 * | +--->| cmd_fcp_cmd[] | | | 10243 * | | |--------------------------------| | | 10244 * | | | struct fc_packet | <--+ | 10245 * | | | | | 10246 * | | | pkt_ulp_private | ----------+ 10247 * | | | pkt_fca_private | -----+ 10248 * | | | pkt_data_cookie | ---+ | 10249 * | | | pkt_cmdlen | | | 10250 * | |(a) | pkt_rsplen | | | 10251 * | +----| .......... pkt_cmd ........... | ---|-|---------------+ 10252 * | (b) | pkt_cmd_cookie | ---|-|----------+ | 10253 * +---------| .......... pkt_resp .......... | ---|-|------+ | | 10254 * | pkt_resp_cookie | ---|-|--+ | | | 10255 * | pkt_cmd_dma | | | | | | | 10256 * | pkt_cmd_acc | | | | | | | 10257 * +================================+ | | | | | | 10258 * | dma_cookies | <--+ | | | | | 10259 * | | | | | | | 10260 * +================================+ | | | | | 10261 * | fca_private | <----+ | | | | 10262 * | | | | | | 10263 * +================================+ | | | | 10264 * | | | | 10265 * | | | | 10266 * +================================+ (d) | | | | 10267 * | fcp_resp cookies | <-------+ | | | 10268 * | | | | | 10269 * +================================+ | | | 10270 * | | | 10271 * +================================+ (d) | | | 10272 * | fcp_resp | <-----------+ | | 10273 * | (DMA resources associated) | | | 10274 * +================================+ | | 10275 * | | 10276 * | | 10277 * | | 10278 * +================================+ (c) | | 10279 * | fcp_cmd cookies | <---------------+ | 10280 * | | | 10281 * +================================+ | 10282 * | 10283 * +================================+ (c) | 10284 * | fcp_cmd | <--------------------+ 10285 * | (DMA resources associated) | 10286 * +================================+ 10287 * 10288 * (a) Only if DMA is NOT used for the FCP_CMD buffer. 10289 * (b) Only if DMA is NOT used for the FCP_RESP buffer 10290 * (c) Only if DMA is used for the FCP_CMD buffer. 10291 * (d) Only if DMA is used for the FCP_RESP buffer 10292 */ 10293 static int 10294 fcp_kmem_cache_constructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran, 10295 int kmflags) 10296 { 10297 struct fcp_pkt *cmd; 10298 struct fcp_port *pptr; 10299 fc_packet_t *fpkt; 10300 10301 pptr = (struct fcp_port *)tran->tran_hba_private; 10302 cmd = (struct fcp_pkt *)pkt->pkt_ha_private; 10303 bzero(cmd, tran->tran_hba_len); 10304 10305 cmd->cmd_pkt = pkt; 10306 pkt->pkt_cdbp = cmd->cmd_fcp_cmd.fcp_cdb; 10307 fpkt = (fc_packet_t *)&cmd->cmd_fc_packet; 10308 cmd->cmd_fp_pkt = fpkt; 10309 10310 cmd->cmd_pkt->pkt_ha_private = (opaque_t)cmd; 10311 cmd->cmd_fp_pkt->pkt_ulp_private = (opaque_t)cmd; 10312 cmd->cmd_fp_pkt->pkt_fca_private = (opaque_t)((caddr_t)cmd + 10313 sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz); 10314 10315 fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)cmd + 10316 sizeof (struct fcp_pkt)); 10317 10318 fpkt->pkt_cmdlen = sizeof (struct fcp_cmd); 10319 fpkt->pkt_rsplen = FCP_MAX_RSP_IU_SIZE; 10320 10321 if (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) { 10322 /* 10323 * The underlying HBA doesn't want to DMA the fcp_cmd or 10324 * fcp_resp. The transfer of information will be done by 10325 * bcopy. 10326 * The naming of the flags (that is actually a value) is 10327 * unfortunate. FC_NO_DVMA_SPACE doesn't mean "NO VIRTUAL 10328 * DMA" but instead "NO DMA". 10329 */ 10330 fpkt->pkt_resp_acc = fpkt->pkt_cmd_acc = NULL; 10331 fpkt->pkt_cmd = (caddr_t)&cmd->cmd_fcp_cmd; 10332 fpkt->pkt_resp = cmd->cmd_fcp_rsp; 10333 } else { 10334 /* 10335 * The underlying HBA will dma the fcp_cmd buffer and fcp_resp 10336 * buffer. A buffer is allocated for each one the ddi_dma_* 10337 * interfaces. 10338 */ 10339 if (fcp_alloc_cmd_resp(pptr, fpkt, kmflags) != FC_SUCCESS) { 10340 return (-1); 10341 } 10342 } 10343 10344 return (0); 10345 } 10346 10347 /* 10348 * Function: fcp_kmem_cache_destructor 10349 * 10350 * Description: Called by the destructor of the cache managed by SCSA. 10351 * All the resources pre-allocated in fcp_pkt_constructor 10352 * and the data also pre-initialized in fcp_pkt_constructor 10353 * are freed and uninitialized here. 10354 * 10355 * Argument: *buf Memory to uninitialize. 10356 * *arg FCP port structure (fcp_port). 10357 * 10358 * Return Value: None 10359 * 10360 * Context: kernel 10361 */ 10362 static void 10363 fcp_kmem_cache_destructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran) 10364 { 10365 struct fcp_pkt *cmd; 10366 struct fcp_port *pptr; 10367 10368 pptr = (struct fcp_port *)(tran->tran_hba_private); 10369 cmd = pkt->pkt_ha_private; 10370 10371 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) { 10372 /* 10373 * If DMA was used to transfer the FCP_CMD and FCP_RESP, the 10374 * buffer and DMA resources allocated to do so are released. 10375 */ 10376 fcp_free_cmd_resp(pptr, cmd->cmd_fp_pkt); 10377 } 10378 } 10379 10380 /* 10381 * Function: fcp_alloc_cmd_resp 10382 * 10383 * Description: This function allocated an FCP_CMD and FCP_RESP buffer that 10384 * will be DMAed by the HBA. The buffer is allocated applying 10385 * the DMA requirements for the HBA. The buffers allocated will 10386 * also be bound. DMA resources are allocated in the process. 10387 * They will be released by fcp_free_cmd_resp(). 10388 * 10389 * Argument: *pptr FCP port. 10390 * *fpkt fc packet for which the cmd and resp packet should be 10391 * allocated. 10392 * flags Allocation flags. 10393 * 10394 * Return Value: FC_FAILURE 10395 * FC_SUCCESS 10396 * 10397 * Context: User or Kernel context only if flags == KM_SLEEP. 10398 * Interrupt context if the KM_SLEEP is not specified. 10399 */ 10400 static int 10401 fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt, int flags) 10402 { 10403 int rval; 10404 int cmd_len; 10405 int resp_len; 10406 ulong_t real_len; 10407 int (*cb) (caddr_t); 10408 ddi_dma_cookie_t pkt_cookie; 10409 ddi_dma_cookie_t *cp; 10410 uint32_t cnt; 10411 10412 cb = (flags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT; 10413 10414 cmd_len = fpkt->pkt_cmdlen; 10415 resp_len = fpkt->pkt_rsplen; 10416 10417 ASSERT(fpkt->pkt_cmd_dma == NULL); 10418 10419 /* Allocation of a DMA handle used in subsequent calls. */ 10420 if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_cmd_dma_attr, 10421 cb, NULL, &fpkt->pkt_cmd_dma) != DDI_SUCCESS) { 10422 return (FC_FAILURE); 10423 } 10424 10425 /* A buffer is allocated that satisfies the DMA requirements. */ 10426 rval = ddi_dma_mem_alloc(fpkt->pkt_cmd_dma, cmd_len, 10427 &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL, 10428 (caddr_t *)&fpkt->pkt_cmd, &real_len, &fpkt->pkt_cmd_acc); 10429 10430 if (rval != DDI_SUCCESS) { 10431 ddi_dma_free_handle(&fpkt->pkt_cmd_dma); 10432 return (FC_FAILURE); 10433 } 10434 10435 if (real_len < cmd_len) { 10436 ddi_dma_mem_free(&fpkt->pkt_cmd_acc); 10437 ddi_dma_free_handle(&fpkt->pkt_cmd_dma); 10438 return (FC_FAILURE); 10439 } 10440 10441 /* The buffer allocated is DMA bound. */ 10442 rval = ddi_dma_addr_bind_handle(fpkt->pkt_cmd_dma, NULL, 10443 fpkt->pkt_cmd, real_len, DDI_DMA_WRITE | DDI_DMA_CONSISTENT, 10444 cb, NULL, &pkt_cookie, &fpkt->pkt_cmd_cookie_cnt); 10445 10446 if (rval != DDI_DMA_MAPPED) { 10447 ddi_dma_mem_free(&fpkt->pkt_cmd_acc); 10448 ddi_dma_free_handle(&fpkt->pkt_cmd_dma); 10449 return (FC_FAILURE); 10450 } 10451 10452 if (fpkt->pkt_cmd_cookie_cnt > 10453 pptr->port_cmd_dma_attr.dma_attr_sgllen) { 10454 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma); 10455 ddi_dma_mem_free(&fpkt->pkt_cmd_acc); 10456 ddi_dma_free_handle(&fpkt->pkt_cmd_dma); 10457 return (FC_FAILURE); 10458 } 10459 10460 ASSERT(fpkt->pkt_cmd_cookie_cnt != 0); 10461 10462 /* 10463 * The buffer where the scatter/gather list is going to be built is 10464 * allocated. 10465 */ 10466 cp = fpkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc( 10467 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie), 10468 KM_NOSLEEP); 10469 10470 if (cp == NULL) { 10471 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma); 10472 ddi_dma_mem_free(&fpkt->pkt_cmd_acc); 10473 ddi_dma_free_handle(&fpkt->pkt_cmd_dma); 10474 return (FC_FAILURE); 10475 } 10476 10477 /* 10478 * The scatter/gather list for the buffer we just allocated is built 10479 * here. 10480 */ 10481 *cp = pkt_cookie; 10482 cp++; 10483 10484 for (cnt = 1; cnt < fpkt->pkt_cmd_cookie_cnt; cnt++, cp++) { 10485 ddi_dma_nextcookie(fpkt->pkt_cmd_dma, 10486 &pkt_cookie); 10487 *cp = pkt_cookie; 10488 } 10489 10490 ASSERT(fpkt->pkt_resp_dma == NULL); 10491 if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_resp_dma_attr, 10492 cb, NULL, &fpkt->pkt_resp_dma) != DDI_SUCCESS) { 10493 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma); 10494 ddi_dma_mem_free(&fpkt->pkt_cmd_acc); 10495 ddi_dma_free_handle(&fpkt->pkt_cmd_dma); 10496 return (FC_FAILURE); 10497 } 10498 10499 rval = ddi_dma_mem_alloc(fpkt->pkt_resp_dma, resp_len, 10500 &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL, 10501 (caddr_t *)&fpkt->pkt_resp, &real_len, 10502 &fpkt->pkt_resp_acc); 10503 10504 if (rval != DDI_SUCCESS) { 10505 ddi_dma_free_handle(&fpkt->pkt_resp_dma); 10506 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma); 10507 ddi_dma_mem_free(&fpkt->pkt_cmd_acc); 10508 ddi_dma_free_handle(&fpkt->pkt_cmd_dma); 10509 kmem_free(fpkt->pkt_cmd_cookie, 10510 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie)); 10511 return (FC_FAILURE); 10512 } 10513 10514 if (real_len < resp_len) { 10515 ddi_dma_mem_free(&fpkt->pkt_resp_acc); 10516 ddi_dma_free_handle(&fpkt->pkt_resp_dma); 10517 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma); 10518 ddi_dma_mem_free(&fpkt->pkt_cmd_acc); 10519 ddi_dma_free_handle(&fpkt->pkt_cmd_dma); 10520 kmem_free(fpkt->pkt_cmd_cookie, 10521 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie)); 10522 return (FC_FAILURE); 10523 } 10524 10525 rval = ddi_dma_addr_bind_handle(fpkt->pkt_resp_dma, NULL, 10526 fpkt->pkt_resp, real_len, DDI_DMA_READ | DDI_DMA_CONSISTENT, 10527 cb, NULL, &pkt_cookie, &fpkt->pkt_resp_cookie_cnt); 10528 10529 if (rval != DDI_DMA_MAPPED) { 10530 ddi_dma_mem_free(&fpkt->pkt_resp_acc); 10531 ddi_dma_free_handle(&fpkt->pkt_resp_dma); 10532 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma); 10533 ddi_dma_mem_free(&fpkt->pkt_cmd_acc); 10534 ddi_dma_free_handle(&fpkt->pkt_cmd_dma); 10535 kmem_free(fpkt->pkt_cmd_cookie, 10536 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie)); 10537 return (FC_FAILURE); 10538 } 10539 10540 if (fpkt->pkt_resp_cookie_cnt > 10541 pptr->port_resp_dma_attr.dma_attr_sgllen) { 10542 ddi_dma_mem_free(&fpkt->pkt_resp_acc); 10543 ddi_dma_free_handle(&fpkt->pkt_resp_dma); 10544 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma); 10545 ddi_dma_mem_free(&fpkt->pkt_cmd_acc); 10546 ddi_dma_free_handle(&fpkt->pkt_cmd_dma); 10547 kmem_free(fpkt->pkt_cmd_cookie, 10548 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie)); 10549 return (FC_FAILURE); 10550 } 10551 10552 ASSERT(fpkt->pkt_resp_cookie_cnt != 0); 10553 10554 cp = fpkt->pkt_resp_cookie = (ddi_dma_cookie_t *)kmem_alloc( 10555 fpkt->pkt_resp_cookie_cnt * sizeof (pkt_cookie), 10556 KM_NOSLEEP); 10557 10558 if (cp == NULL) { 10559 ddi_dma_mem_free(&fpkt->pkt_resp_acc); 10560 ddi_dma_free_handle(&fpkt->pkt_resp_dma); 10561 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma); 10562 ddi_dma_mem_free(&fpkt->pkt_cmd_acc); 10563 ddi_dma_free_handle(&fpkt->pkt_cmd_dma); 10564 kmem_free(fpkt->pkt_cmd_cookie, 10565 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie)); 10566 return (FC_FAILURE); 10567 } 10568 10569 *cp = pkt_cookie; 10570 cp++; 10571 10572 for (cnt = 1; cnt < fpkt->pkt_resp_cookie_cnt; cnt++, cp++) { 10573 ddi_dma_nextcookie(fpkt->pkt_resp_dma, 10574 &pkt_cookie); 10575 *cp = pkt_cookie; 10576 } 10577 10578 return (FC_SUCCESS); 10579 } 10580 10581 /* 10582 * Function: fcp_free_cmd_resp 10583 * 10584 * Description: This function releases the FCP_CMD and FCP_RESP buffer 10585 * allocated by fcp_alloc_cmd_resp() and all the resources 10586 * associated with them. That includes the DMA resources and the 10587 * buffer allocated for the cookies of each one of them. 10588 * 10589 * Argument: *pptr FCP port context. 10590 * *fpkt fc packet containing the cmd and resp packet 10591 * to be released. 10592 * 10593 * Return Value: None 10594 * 10595 * Context: Interrupt, User and Kernel context. 10596 */ 10597 /* ARGSUSED */ 10598 static void 10599 fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt) 10600 { 10601 ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL); 10602 10603 if (fpkt->pkt_resp_dma) { 10604 (void) ddi_dma_unbind_handle(fpkt->pkt_resp_dma); 10605 ddi_dma_mem_free(&fpkt->pkt_resp_acc); 10606 ddi_dma_free_handle(&fpkt->pkt_resp_dma); 10607 } 10608 10609 if (fpkt->pkt_resp_cookie) { 10610 kmem_free(fpkt->pkt_resp_cookie, 10611 fpkt->pkt_resp_cookie_cnt * sizeof (ddi_dma_cookie_t)); 10612 fpkt->pkt_resp_cookie = NULL; 10613 } 10614 10615 if (fpkt->pkt_cmd_dma) { 10616 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma); 10617 ddi_dma_mem_free(&fpkt->pkt_cmd_acc); 10618 ddi_dma_free_handle(&fpkt->pkt_cmd_dma); 10619 } 10620 10621 if (fpkt->pkt_cmd_cookie) { 10622 kmem_free(fpkt->pkt_cmd_cookie, 10623 fpkt->pkt_cmd_cookie_cnt * sizeof (ddi_dma_cookie_t)); 10624 fpkt->pkt_cmd_cookie = NULL; 10625 } 10626 } 10627 10628 10629 /* 10630 * called by the transport to do our own target initialization 10631 * 10632 * can acquire and release the global mutex 10633 */ 10634 /* ARGSUSED */ 10635 static int 10636 fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 10637 scsi_hba_tran_t *hba_tran, struct scsi_device *sd) 10638 { 10639 int *words; 10640 uchar_t *bytes; 10641 uint_t nbytes; 10642 uint_t nwords; 10643 struct fcp_tgt *ptgt; 10644 struct fcp_lun *plun; 10645 struct fcp_port *pptr = (struct fcp_port *) 10646 hba_tran->tran_hba_private; 10647 10648 ASSERT(pptr != NULL); 10649 10650 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace, 10651 FCP_BUF_LEVEL_8, 0, 10652 "fcp_phys_tgt_init: called for %s (instance %d)", 10653 ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip)); 10654 10655 /* get our port WWN property */ 10656 bytes = NULL; 10657 if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, tgt_dip, 10658 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes, 10659 &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) { 10660 /* no port WWN property */ 10661 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace, 10662 FCP_BUF_LEVEL_8, 0, 10663 "fcp_phys_tgt_init: Returning DDI_NOT_WELL_FORMED" 10664 " for %s (instance %d): bytes=%p nbytes=%x", 10665 ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip), bytes, 10666 nbytes); 10667 10668 if (bytes != NULL) { 10669 ddi_prop_free(bytes); 10670 } 10671 10672 return (DDI_NOT_WELL_FORMED); 10673 } 10674 10675 words = NULL; 10676 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, tgt_dip, 10677 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 10678 LUN_PROP, &words, &nwords) != DDI_PROP_SUCCESS) { 10679 ASSERT(bytes != NULL); 10680 10681 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace, 10682 FCP_BUF_LEVEL_8, 0, 10683 "fcp_phys_tgt_init: Returning DDI_FAILURE:lun" 10684 " for %s (instance %d)", ddi_get_name(tgt_dip), 10685 ddi_get_instance(tgt_dip)); 10686 10687 ddi_prop_free(bytes); 10688 10689 return (DDI_NOT_WELL_FORMED); 10690 } 10691 10692 if (nwords == 0) { 10693 ddi_prop_free(bytes); 10694 ddi_prop_free(words); 10695 return (DDI_NOT_WELL_FORMED); 10696 } 10697 10698 ASSERT(bytes != NULL && words != NULL); 10699 10700 mutex_enter(&pptr->port_mutex); 10701 if ((plun = fcp_lookup_lun(pptr, bytes, *words)) == NULL) { 10702 mutex_exit(&pptr->port_mutex); 10703 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace, 10704 FCP_BUF_LEVEL_8, 0, 10705 "fcp_phys_tgt_init: Returning DDI_FAILURE: No Lun" 10706 " for %s (instance %d)", ddi_get_name(tgt_dip), 10707 ddi_get_instance(tgt_dip)); 10708 10709 ddi_prop_free(bytes); 10710 ddi_prop_free(words); 10711 10712 return (DDI_FAILURE); 10713 } 10714 10715 ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes, 10716 FC_WWN_SIZE) == 0); 10717 ASSERT(plun->lun_num == (uint16_t)*words); 10718 10719 ddi_prop_free(bytes); 10720 ddi_prop_free(words); 10721 10722 ptgt = plun->lun_tgt; 10723 10724 mutex_enter(&ptgt->tgt_mutex); 10725 plun->lun_tgt_count++; 10726 hba_tran->tran_tgt_private = plun; 10727 plun->lun_state |= FCP_SCSI_LUN_TGT_INIT; 10728 plun->lun_tran = hba_tran; 10729 mutex_exit(&ptgt->tgt_mutex); 10730 mutex_exit(&pptr->port_mutex); 10731 10732 return (DDI_SUCCESS); 10733 } 10734 10735 /*ARGSUSED*/ 10736 static int 10737 fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 10738 scsi_hba_tran_t *hba_tran, struct scsi_device *sd) 10739 { 10740 int words; 10741 uchar_t *bytes; 10742 uint_t nbytes; 10743 struct fcp_tgt *ptgt; 10744 struct fcp_lun *plun; 10745 struct fcp_port *pptr = (struct fcp_port *) 10746 hba_tran->tran_hba_private; 10747 child_info_t *cip; 10748 10749 ASSERT(pptr != NULL); 10750 10751 FCP_DTRACE(fcp_logq, pptr->port_instbuf, 10752 fcp_trace, FCP_BUF_LEVEL_8, 0, 10753 "fcp_virt_tgt_init: called for %s (instance %d) (hba_dip %p)," 10754 " (tgt_dip %p)", ddi_get_name(tgt_dip), 10755 ddi_get_instance(tgt_dip), hba_dip, tgt_dip); 10756 10757 cip = (child_info_t *)sd->sd_private; 10758 if (cip == NULL) { 10759 FCP_DTRACE(fcp_logq, pptr->port_instbuf, 10760 fcp_trace, FCP_BUF_LEVEL_8, 0, 10761 "fcp_virt_tgt_init: Returning DDI_NOT_WELL_FORMED" 10762 " for %s (instance %d)", ddi_get_name(tgt_dip), 10763 ddi_get_instance(tgt_dip)); 10764 10765 return (DDI_NOT_WELL_FORMED); 10766 } 10767 10768 /* get our port WWN property */ 10769 bytes = NULL; 10770 if ((mdi_prop_lookup_byte_array(PIP(cip), PORT_WWN_PROP, &bytes, 10771 &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) { 10772 if (bytes) { 10773 (void) mdi_prop_free(bytes); 10774 } 10775 return (DDI_NOT_WELL_FORMED); 10776 } 10777 10778 words = 0; 10779 if (mdi_prop_lookup_int(PIP(cip), LUN_PROP, &words) != 10780 DDI_PROP_SUCCESS) { 10781 ASSERT(bytes != NULL); 10782 10783 FCP_DTRACE(fcp_logq, pptr->port_instbuf, 10784 fcp_trace, FCP_BUF_LEVEL_8, 0, 10785 "fcp_virt_tgt_init: Returning DDI_FAILURE:lun" 10786 " for %s (instance %d)", ddi_get_name(tgt_dip), 10787 ddi_get_instance(tgt_dip)); 10788 10789 (void) mdi_prop_free(bytes); 10790 return (DDI_NOT_WELL_FORMED); 10791 } 10792 10793 ASSERT(bytes != NULL); 10794 10795 mutex_enter(&pptr->port_mutex); 10796 if ((plun = fcp_lookup_lun(pptr, bytes, words)) == NULL) { 10797 mutex_exit(&pptr->port_mutex); 10798 FCP_DTRACE(fcp_logq, pptr->port_instbuf, 10799 fcp_trace, FCP_BUF_LEVEL_8, 0, 10800 "fcp_virt_tgt_init: Returning DDI_FAILURE: No Lun" 10801 " for %s (instance %d)", ddi_get_name(tgt_dip), 10802 ddi_get_instance(tgt_dip)); 10803 10804 (void) mdi_prop_free(bytes); 10805 (void) mdi_prop_free(&words); 10806 10807 return (DDI_FAILURE); 10808 } 10809 10810 ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes, 10811 FC_WWN_SIZE) == 0); 10812 ASSERT(plun->lun_num == (uint16_t)words); 10813 10814 (void) mdi_prop_free(bytes); 10815 (void) mdi_prop_free(&words); 10816 10817 ptgt = plun->lun_tgt; 10818 10819 mutex_enter(&ptgt->tgt_mutex); 10820 plun->lun_tgt_count++; 10821 hba_tran->tran_tgt_private = plun; 10822 plun->lun_state |= FCP_SCSI_LUN_TGT_INIT; 10823 plun->lun_tran = hba_tran; 10824 mutex_exit(&ptgt->tgt_mutex); 10825 mutex_exit(&pptr->port_mutex); 10826 10827 return (DDI_SUCCESS); 10828 } 10829 10830 10831 /* 10832 * called by the transport to do our own target initialization 10833 * 10834 * can acquire and release the global mutex 10835 */ 10836 /* ARGSUSED */ 10837 static int 10838 fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 10839 scsi_hba_tran_t *hba_tran, struct scsi_device *sd) 10840 { 10841 struct fcp_port *pptr = (struct fcp_port *) 10842 hba_tran->tran_hba_private; 10843 int rval; 10844 10845 ASSERT(pptr != NULL); 10846 10847 /* 10848 * Child node is getting initialized. Look at the mpxio component 10849 * type on the child device to see if this device is mpxio managed 10850 * or not. 10851 */ 10852 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) { 10853 rval = fcp_virt_tgt_init(hba_dip, tgt_dip, hba_tran, sd); 10854 } else { 10855 rval = fcp_phys_tgt_init(hba_dip, tgt_dip, hba_tran, sd); 10856 } 10857 10858 return (rval); 10859 } 10860 10861 10862 /* ARGSUSED */ 10863 static void 10864 fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip, 10865 scsi_hba_tran_t *hba_tran, struct scsi_device *sd) 10866 { 10867 struct fcp_lun *plun = hba_tran->tran_tgt_private; 10868 struct fcp_tgt *ptgt; 10869 10870 FCP_DTRACE(fcp_logq, LUN_PORT->port_instbuf, 10871 fcp_trace, FCP_BUF_LEVEL_8, 0, 10872 "fcp_scsi_tgt_free: called for tran %s%d, dev %s%d", 10873 ddi_get_name(hba_dip), ddi_get_instance(hba_dip), 10874 ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip)); 10875 10876 if (plun == NULL) { 10877 return; 10878 } 10879 ptgt = plun->lun_tgt; 10880 10881 ASSERT(ptgt != NULL); 10882 10883 mutex_enter(&ptgt->tgt_mutex); 10884 ASSERT(plun->lun_tgt_count > 0); 10885 10886 if (--plun->lun_tgt_count == 0) { 10887 plun->lun_state &= ~FCP_SCSI_LUN_TGT_INIT; 10888 } 10889 plun->lun_tran = NULL; 10890 mutex_exit(&ptgt->tgt_mutex); 10891 } 10892 10893 /* 10894 * Function: fcp_scsi_start 10895 * 10896 * Description: This function is called by the target driver to request a 10897 * command to be sent. 10898 * 10899 * Argument: *ap SCSI address of the device. 10900 * *pkt SCSI packet containing the cmd to send. 10901 * 10902 * Return Value: TRAN_ACCEPT 10903 * TRAN_BUSY 10904 * TRAN_BADPKT 10905 * TRAN_FATAL_ERROR 10906 */ 10907 static int 10908 fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt) 10909 { 10910 struct fcp_port *pptr = ADDR2FCP(ap); 10911 struct fcp_lun *plun = ADDR2LUN(ap); 10912 struct fcp_pkt *cmd = PKT2CMD(pkt); 10913 struct fcp_tgt *ptgt = plun->lun_tgt; 10914 int rval; 10915 10916 /* ensure command isn't already issued */ 10917 ASSERT(cmd->cmd_state != FCP_PKT_ISSUED); 10918 10919 FCP_DTRACE(fcp_logq, pptr->port_instbuf, 10920 fcp_trace, FCP_BUF_LEVEL_9, 0, 10921 "fcp_transport Invoked for %x", plun->lun_tgt->tgt_d_id); 10922 10923 /* 10924 * It is strange that we enter the fcp_port mutex and the target 10925 * mutex to check the lun state (which has a mutex of its own). 10926 */ 10927 mutex_enter(&pptr->port_mutex); 10928 mutex_enter(&ptgt->tgt_mutex); 10929 10930 /* 10931 * If the device is offline and is not in the process of coming 10932 * online, fail the request. 10933 */ 10934 10935 if ((plun->lun_state & FCP_LUN_OFFLINE) && 10936 !(plun->lun_state & FCP_LUN_ONLINING)) { 10937 mutex_exit(&ptgt->tgt_mutex); 10938 mutex_exit(&pptr->port_mutex); 10939 10940 if (cmd->cmd_fp_pkt->pkt_pd == NULL) { 10941 pkt->pkt_reason = CMD_DEV_GONE; 10942 } 10943 10944 return (TRAN_FATAL_ERROR); 10945 } 10946 cmd->cmd_fp_pkt->pkt_timeout = pkt->pkt_time; 10947 10948 /* 10949 * If we are suspended, kernel is trying to dump, so don't 10950 * block, fail or defer requests - send them down right away. 10951 * NOTE: If we are in panic (i.e. trying to dump), we can't 10952 * assume we have been suspended. There is hardware such as 10953 * the v880 that doesn't do PM. Thus, the check for 10954 * ddi_in_panic. 10955 * 10956 * If FCP_STATE_IN_CB_DEVC is set, devices are in the process 10957 * of changing. So, if we can queue the packet, do it. Eventually, 10958 * either the device will have gone away or changed and we can fail 10959 * the request, or we can proceed if the device didn't change. 10960 * 10961 * If the pd in the target or the packet is NULL it's probably 10962 * because the device has gone away, we allow the request to be 10963 * put on the internal queue here in case the device comes back within 10964 * the offline timeout. fctl will fix up the pd's if the tgt_pd_handle 10965 * has gone NULL, while fcp deals cases where pkt_pd is NULL. pkt_pd 10966 * could be NULL because the device was disappearing during or since 10967 * packet initialization. 10968 */ 10969 10970 if (((plun->lun_state & FCP_LUN_BUSY) && (!(pptr->port_state & 10971 FCP_STATE_SUSPENDED)) && !ddi_in_panic()) || 10972 (pptr->port_state & (FCP_STATE_ONLINING | FCP_STATE_IN_CB_DEVC)) || 10973 (ptgt->tgt_pd_handle == NULL) || 10974 (cmd->cmd_fp_pkt->pkt_pd == NULL)) { 10975 /* 10976 * If ((LUN is busy AND 10977 * LUN not suspended AND 10978 * The system is not in panic state) OR 10979 * (The port is coming up)) 10980 * 10981 * We check to see if the any of the flags FLAG_NOINTR or 10982 * FLAG_NOQUEUE is set. If one of them is set the value 10983 * returned will be TRAN_BUSY. If not, the request is queued. 10984 */ 10985 mutex_exit(&ptgt->tgt_mutex); 10986 mutex_exit(&pptr->port_mutex); 10987 10988 /* see if using interrupts is allowed (so queueing'll work) */ 10989 if (pkt->pkt_flags & FLAG_NOINTR) { 10990 pkt->pkt_resid = 0; 10991 return (TRAN_BUSY); 10992 } 10993 if (pkt->pkt_flags & FLAG_NOQUEUE) { 10994 FCP_DTRACE(fcp_logq, pptr->port_instbuf, 10995 fcp_trace, FCP_BUF_LEVEL_9, 0, 10996 "fcp_scsi_start: lun busy for pkt %p", pkt); 10997 return (TRAN_BUSY); 10998 } 10999 #ifdef DEBUG 11000 mutex_enter(&pptr->port_pkt_mutex); 11001 pptr->port_npkts++; 11002 mutex_exit(&pptr->port_pkt_mutex); 11003 #endif /* DEBUG */ 11004 11005 /* got queue up the pkt for later */ 11006 fcp_queue_pkt(pptr, cmd); 11007 return (TRAN_ACCEPT); 11008 } 11009 cmd->cmd_state = FCP_PKT_ISSUED; 11010 11011 mutex_exit(&ptgt->tgt_mutex); 11012 mutex_exit(&pptr->port_mutex); 11013 11014 /* 11015 * Now that we released the mutexes, what was protected by them can 11016 * change. 11017 */ 11018 11019 /* 11020 * If there is a reconfiguration in progress, wait for it to complete. 11021 */ 11022 fcp_reconfig_wait(pptr); 11023 11024 cmd->cmd_timeout = pkt->pkt_time ? fcp_watchdog_time + 11025 pkt->pkt_time : 0; 11026 11027 /* prepare the packet */ 11028 11029 fcp_prepare_pkt(pptr, cmd, plun); 11030 11031 if (cmd->cmd_pkt->pkt_time) { 11032 cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time; 11033 } else { 11034 cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60; 11035 } 11036 11037 /* 11038 * if interrupts aren't allowed (e.g. at dump time) then we'll 11039 * have to do polled I/O 11040 */ 11041 if (pkt->pkt_flags & FLAG_NOINTR) { 11042 cmd->cmd_state &= ~FCP_PKT_ISSUED; 11043 return (fcp_dopoll(pptr, cmd)); 11044 } 11045 11046 #ifdef DEBUG 11047 mutex_enter(&pptr->port_pkt_mutex); 11048 pptr->port_npkts++; 11049 mutex_exit(&pptr->port_pkt_mutex); 11050 #endif /* DEBUG */ 11051 11052 rval = fcp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt, 0); 11053 if (rval == FC_SUCCESS) { 11054 FCP_DTRACE(fcp_logq, pptr->port_instbuf, 11055 fcp_trace, FCP_BUF_LEVEL_9, 0, 11056 "fcp_transport success for %x", plun->lun_tgt->tgt_d_id); 11057 return (TRAN_ACCEPT); 11058 } 11059 11060 cmd->cmd_state = FCP_PKT_IDLE; 11061 11062 #ifdef DEBUG 11063 mutex_enter(&pptr->port_pkt_mutex); 11064 pptr->port_npkts--; 11065 mutex_exit(&pptr->port_pkt_mutex); 11066 #endif /* DEBUG */ 11067 11068 /* 11069 * For lack of clearer definitions, choose 11070 * between TRAN_BUSY and TRAN_FATAL_ERROR. 11071 */ 11072 11073 if (rval == FC_TRAN_BUSY) { 11074 pkt->pkt_resid = 0; 11075 rval = TRAN_BUSY; 11076 } else { 11077 mutex_enter(&ptgt->tgt_mutex); 11078 if (plun->lun_state & FCP_LUN_OFFLINE) { 11079 child_info_t *cip; 11080 11081 mutex_enter(&plun->lun_mutex); 11082 cip = plun->lun_cip; 11083 mutex_exit(&plun->lun_mutex); 11084 11085 FCP_TRACE(fcp_logq, pptr->port_instbuf, 11086 fcp_trace, FCP_BUF_LEVEL_6, 0, 11087 "fcp_transport failed 2 for %x: %x; dip=%p", 11088 plun->lun_tgt->tgt_d_id, rval, cip); 11089 11090 rval = TRAN_FATAL_ERROR; 11091 } else { 11092 if (pkt->pkt_flags & FLAG_NOQUEUE) { 11093 FCP_DTRACE(fcp_logq, pptr->port_instbuf, 11094 fcp_trace, FCP_BUF_LEVEL_9, 0, 11095 "fcp_scsi_start: FC_BUSY for pkt %p", 11096 pkt); 11097 rval = TRAN_BUSY; 11098 } else { 11099 rval = TRAN_ACCEPT; 11100 fcp_queue_pkt(pptr, cmd); 11101 } 11102 } 11103 mutex_exit(&ptgt->tgt_mutex); 11104 } 11105 11106 return (rval); 11107 } 11108 11109 /* 11110 * called by the transport to abort a packet 11111 */ 11112 /*ARGSUSED*/ 11113 static int 11114 fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt) 11115 { 11116 int tgt_cnt; 11117 struct fcp_port *pptr = ADDR2FCP(ap); 11118 struct fcp_lun *plun = ADDR2LUN(ap); 11119 struct fcp_tgt *ptgt = plun->lun_tgt; 11120 11121 if (pkt == NULL) { 11122 if (ptgt) { 11123 mutex_enter(&ptgt->tgt_mutex); 11124 tgt_cnt = ptgt->tgt_change_cnt; 11125 mutex_exit(&ptgt->tgt_mutex); 11126 fcp_abort_all(pptr, ptgt, plun, tgt_cnt); 11127 return (TRUE); 11128 } 11129 } 11130 return (FALSE); 11131 } 11132 11133 11134 /* 11135 * Perform reset 11136 */ 11137 int 11138 fcp_scsi_reset(struct scsi_address *ap, int level) 11139 { 11140 int rval = 0; 11141 struct fcp_port *pptr = ADDR2FCP(ap); 11142 struct fcp_lun *plun = ADDR2LUN(ap); 11143 struct fcp_tgt *ptgt = plun->lun_tgt; 11144 11145 if (level == RESET_ALL) { 11146 if (fcp_linkreset(pptr, ap, KM_NOSLEEP) == FC_SUCCESS) { 11147 rval = 1; 11148 } 11149 } else if (level == RESET_TARGET || level == RESET_LUN) { 11150 /* 11151 * If we are in the middle of discovery, return 11152 * SUCCESS as this target will be rediscovered 11153 * anyway 11154 */ 11155 mutex_enter(&ptgt->tgt_mutex); 11156 if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) { 11157 mutex_exit(&ptgt->tgt_mutex); 11158 return (1); 11159 } 11160 mutex_exit(&ptgt->tgt_mutex); 11161 11162 if (fcp_reset_target(ap, level) == FC_SUCCESS) { 11163 rval = 1; 11164 } 11165 } 11166 return (rval); 11167 } 11168 11169 11170 /* 11171 * called by the framework to get a SCSI capability 11172 */ 11173 static int 11174 fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom) 11175 { 11176 return (fcp_commoncap(ap, cap, 0, whom, 0)); 11177 } 11178 11179 11180 /* 11181 * called by the framework to set a SCSI capability 11182 */ 11183 static int 11184 fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom) 11185 { 11186 return (fcp_commoncap(ap, cap, value, whom, 1)); 11187 } 11188 11189 /* 11190 * Function: fcp_pkt_setup 11191 * 11192 * Description: This function sets up the scsi_pkt structure passed by the 11193 * caller. This function assumes fcp_pkt_constructor has been 11194 * called previously for the packet passed by the caller. If 11195 * successful this call will have the following results: 11196 * 11197 * - The resources needed that will be constant through out 11198 * the whole transaction are allocated. 11199 * - The fields that will be constant through out the whole 11200 * transaction are initialized. 11201 * - The scsi packet will be linked to the LUN structure 11202 * addressed by the transaction. 11203 * 11204 * Argument: 11205 * *pkt Pointer to a scsi_pkt structure. 11206 * callback 11207 * arg 11208 * 11209 * Return Value: 0 Success 11210 * !0 Failure 11211 * 11212 * Context: Kernel context or interrupt context 11213 */ 11214 /* ARGSUSED */ 11215 static int 11216 fcp_pkt_setup(struct scsi_pkt *pkt, 11217 int (*callback)(caddr_t arg), 11218 caddr_t arg) 11219 { 11220 struct fcp_pkt *cmd; 11221 struct fcp_port *pptr; 11222 struct fcp_lun *plun; 11223 struct fcp_tgt *ptgt; 11224 int kf; 11225 fc_packet_t *fpkt; 11226 fc_frame_hdr_t *hp; 11227 11228 pptr = ADDR2FCP(&pkt->pkt_address); 11229 plun = ADDR2LUN(&pkt->pkt_address); 11230 ptgt = plun->lun_tgt; 11231 11232 cmd = (struct fcp_pkt *)pkt->pkt_ha_private; 11233 fpkt = cmd->cmd_fp_pkt; 11234 11235 /* 11236 * this request is for dma allocation only 11237 */ 11238 /* 11239 * First step of fcp_scsi_init_pkt: pkt allocation 11240 * We determine if the caller is willing to wait for the 11241 * resources. 11242 */ 11243 kf = (callback == SLEEP_FUNC) ? KM_SLEEP: KM_NOSLEEP; 11244 11245 /* 11246 * Selective zeroing of the pkt. 11247 */ 11248 cmd->cmd_back = NULL; 11249 cmd->cmd_next = NULL; 11250 11251 /* 11252 * Zero out fcp command 11253 */ 11254 bzero(&cmd->cmd_fcp_cmd, sizeof (cmd->cmd_fcp_cmd)); 11255 11256 cmd->cmd_state = FCP_PKT_IDLE; 11257 11258 fpkt = cmd->cmd_fp_pkt; 11259 fpkt->pkt_data_acc = NULL; 11260 11261 mutex_enter(&ptgt->tgt_mutex); 11262 fpkt->pkt_pd = ptgt->tgt_pd_handle; 11263 11264 if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, kf) 11265 != FC_SUCCESS) { 11266 mutex_exit(&ptgt->tgt_mutex); 11267 return (-1); 11268 } 11269 11270 mutex_exit(&ptgt->tgt_mutex); 11271 11272 /* Fill in the Fabric Channel Header */ 11273 hp = &fpkt->pkt_cmd_fhdr; 11274 hp->r_ctl = R_CTL_COMMAND; 11275 hp->rsvd = 0; 11276 hp->type = FC_TYPE_SCSI_FCP; 11277 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ; 11278 hp->seq_id = 0; 11279 hp->df_ctl = 0; 11280 hp->seq_cnt = 0; 11281 hp->ox_id = 0xffff; 11282 hp->rx_id = 0xffff; 11283 hp->ro = 0; 11284 11285 /* 11286 * A doubly linked list (cmd_forw, cmd_back) is built 11287 * out of every allocated packet on a per-lun basis 11288 * 11289 * The packets are maintained in the list so as to satisfy 11290 * scsi_abort() requests. At present (which is unlikely to 11291 * change in the future) nobody performs a real scsi_abort 11292 * in the SCSI target drivers (as they don't keep the packets 11293 * after doing scsi_transport - so they don't know how to 11294 * abort a packet other than sending a NULL to abort all 11295 * outstanding packets) 11296 */ 11297 mutex_enter(&plun->lun_mutex); 11298 if ((cmd->cmd_forw = plun->lun_pkt_head) != NULL) { 11299 plun->lun_pkt_head->cmd_back = cmd; 11300 } else { 11301 plun->lun_pkt_tail = cmd; 11302 } 11303 plun->lun_pkt_head = cmd; 11304 mutex_exit(&plun->lun_mutex); 11305 return (0); 11306 } 11307 11308 /* 11309 * Function: fcp_pkt_teardown 11310 * 11311 * Description: This function releases a scsi_pkt structure and all the 11312 * resources attached to it. 11313 * 11314 * Argument: *pkt Pointer to a scsi_pkt structure. 11315 * 11316 * Return Value: None 11317 * 11318 * Context: User, Kernel or Interrupt context. 11319 */ 11320 static void 11321 fcp_pkt_teardown(struct scsi_pkt *pkt) 11322 { 11323 struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address); 11324 struct fcp_lun *plun = ADDR2LUN(&pkt->pkt_address); 11325 struct fcp_pkt *cmd = (struct fcp_pkt *)pkt->pkt_ha_private; 11326 11327 /* 11328 * Remove the packet from the per-lun list 11329 */ 11330 mutex_enter(&plun->lun_mutex); 11331 if (cmd->cmd_back) { 11332 ASSERT(cmd != plun->lun_pkt_head); 11333 cmd->cmd_back->cmd_forw = cmd->cmd_forw; 11334 } else { 11335 ASSERT(cmd == plun->lun_pkt_head); 11336 plun->lun_pkt_head = cmd->cmd_forw; 11337 } 11338 11339 if (cmd->cmd_forw) { 11340 cmd->cmd_forw->cmd_back = cmd->cmd_back; 11341 } else { 11342 ASSERT(cmd == plun->lun_pkt_tail); 11343 plun->lun_pkt_tail = cmd->cmd_back; 11344 } 11345 11346 mutex_exit(&plun->lun_mutex); 11347 11348 (void) fc_ulp_uninit_packet(pptr->port_fp_handle, cmd->cmd_fp_pkt); 11349 } 11350 11351 /* 11352 * Routine for reset notification setup, to register or cancel. 11353 * This function is called by SCSA 11354 */ 11355 /*ARGSUSED*/ 11356 static int 11357 fcp_scsi_reset_notify(struct scsi_address *ap, int flag, 11358 void (*callback)(caddr_t), caddr_t arg) 11359 { 11360 struct fcp_port *pptr = ADDR2FCP(ap); 11361 11362 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg, 11363 &pptr->port_mutex, &pptr->port_reset_notify_listf)); 11364 } 11365 11366 11367 static int 11368 fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip, char *name, 11369 ddi_eventcookie_t *event_cookiep) 11370 { 11371 struct fcp_port *pptr = fcp_dip2port(dip); 11372 11373 if (pptr == NULL) { 11374 return (DDI_FAILURE); 11375 } 11376 11377 return (ndi_event_retrieve_cookie(pptr->port_ndi_event_hdl, rdip, name, 11378 event_cookiep, NDI_EVENT_NOPASS)); 11379 } 11380 11381 11382 static int 11383 fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip, 11384 ddi_eventcookie_t eventid, void (*callback)(), void *arg, 11385 ddi_callback_id_t *cb_id) 11386 { 11387 struct fcp_port *pptr = fcp_dip2port(dip); 11388 11389 if (pptr == NULL) { 11390 return (DDI_FAILURE); 11391 } 11392 11393 return (ndi_event_add_callback(pptr->port_ndi_event_hdl, rdip, 11394 eventid, callback, arg, NDI_SLEEP, cb_id)); 11395 } 11396 11397 11398 static int 11399 fcp_scsi_bus_remove_eventcall(dev_info_t *dip, ddi_callback_id_t cb_id) 11400 { 11401 11402 struct fcp_port *pptr = fcp_dip2port(dip); 11403 11404 if (pptr == NULL) { 11405 return (DDI_FAILURE); 11406 } 11407 return (ndi_event_remove_callback(pptr->port_ndi_event_hdl, cb_id)); 11408 } 11409 11410 11411 /* 11412 * called by the transport to post an event 11413 */ 11414 static int 11415 fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip, 11416 ddi_eventcookie_t eventid, void *impldata) 11417 { 11418 struct fcp_port *pptr = fcp_dip2port(dip); 11419 11420 if (pptr == NULL) { 11421 return (DDI_FAILURE); 11422 } 11423 11424 return (ndi_event_run_callbacks(pptr->port_ndi_event_hdl, rdip, 11425 eventid, impldata)); 11426 } 11427 11428 11429 /* 11430 * A target in in many cases in Fibre Channel has a one to one relation 11431 * with a port identifier (which is also known as D_ID and also as AL_PA 11432 * in private Loop) On Fibre Channel-to-SCSI bridge boxes a target reset 11433 * will most likely result in resetting all LUNs (which means a reset will 11434 * occur on all the SCSI devices connected at the other end of the bridge) 11435 * That is the latest favorite topic for discussion, for, one can debate as 11436 * hot as one likes and come up with arguably a best solution to one's 11437 * satisfaction 11438 * 11439 * To stay on track and not digress much, here are the problems stated 11440 * briefly: 11441 * 11442 * SCSA doesn't define RESET_LUN, It defines RESET_TARGET, but the 11443 * target drivers use RESET_TARGET even if their instance is on a 11444 * LUN. Doesn't that sound a bit broken ? 11445 * 11446 * FCP SCSI (the current spec) only defines RESET TARGET in the 11447 * control fields of an FCP_CMND structure. It should have been 11448 * fixed right there, giving flexibility to the initiators to 11449 * minimize havoc that could be caused by resetting a target. 11450 */ 11451 static int 11452 fcp_reset_target(struct scsi_address *ap, int level) 11453 { 11454 int rval = FC_FAILURE; 11455 char lun_id[25]; 11456 struct fcp_port *pptr = ADDR2FCP(ap); 11457 struct fcp_lun *plun = ADDR2LUN(ap); 11458 struct fcp_tgt *ptgt = plun->lun_tgt; 11459 struct scsi_pkt *pkt; 11460 struct fcp_pkt *cmd; 11461 struct fcp_rsp *rsp; 11462 uint32_t tgt_cnt; 11463 struct fcp_rsp_info *rsp_info; 11464 struct fcp_reset_elem *p; 11465 int bval; 11466 11467 if ((p = kmem_alloc(sizeof (struct fcp_reset_elem), 11468 KM_NOSLEEP)) == NULL) { 11469 return (rval); 11470 } 11471 11472 mutex_enter(&ptgt->tgt_mutex); 11473 if (level == RESET_TARGET) { 11474 if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) { 11475 mutex_exit(&ptgt->tgt_mutex); 11476 kmem_free(p, sizeof (struct fcp_reset_elem)); 11477 return (rval); 11478 } 11479 fcp_update_tgt_state(ptgt, FCP_SET, FCP_LUN_BUSY); 11480 (void) strcpy(lun_id, " "); 11481 } else { 11482 if (plun->lun_state & (FCP_LUN_OFFLINE | FCP_LUN_BUSY)) { 11483 mutex_exit(&ptgt->tgt_mutex); 11484 kmem_free(p, sizeof (struct fcp_reset_elem)); 11485 return (rval); 11486 } 11487 fcp_update_lun_state(plun, FCP_SET, FCP_LUN_BUSY); 11488 11489 (void) sprintf(lun_id, ", LUN=%d", plun->lun_num); 11490 } 11491 tgt_cnt = ptgt->tgt_change_cnt; 11492 11493 mutex_exit(&ptgt->tgt_mutex); 11494 11495 if ((pkt = scsi_init_pkt(ap, NULL, NULL, 0, 0, 11496 0, 0, NULL, 0)) == NULL) { 11497 kmem_free(p, sizeof (struct fcp_reset_elem)); 11498 mutex_enter(&ptgt->tgt_mutex); 11499 fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY); 11500 mutex_exit(&ptgt->tgt_mutex); 11501 return (rval); 11502 } 11503 pkt->pkt_time = FCP_POLL_TIMEOUT; 11504 11505 /* fill in cmd part of packet */ 11506 cmd = PKT2CMD(pkt); 11507 if (level == RESET_TARGET) { 11508 cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_tgt = 1; 11509 } else { 11510 cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_lun = 1; 11511 } 11512 cmd->cmd_fp_pkt->pkt_comp = NULL; 11513 cmd->cmd_pkt->pkt_flags |= FLAG_NOINTR; 11514 11515 /* prepare a packet for transport */ 11516 fcp_prepare_pkt(pptr, cmd, plun); 11517 11518 if (cmd->cmd_pkt->pkt_time) { 11519 cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time; 11520 } else { 11521 cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60; 11522 } 11523 11524 (void) fc_ulp_busy_port(pptr->port_fp_handle); 11525 bval = fcp_dopoll(pptr, cmd); 11526 fc_ulp_idle_port(pptr->port_fp_handle); 11527 11528 /* submit the packet */ 11529 if (bval == TRAN_ACCEPT) { 11530 int error = 3; 11531 11532 rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp; 11533 rsp_info = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp + 11534 sizeof (struct fcp_rsp)); 11535 11536 if (rsp->fcp_u.fcp_status.rsp_len_set) { 11537 if (fcp_validate_fcp_response(rsp, pptr) == 11538 FC_SUCCESS) { 11539 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) { 11540 FCP_CP_IN(cmd->cmd_fp_pkt->pkt_resp + 11541 sizeof (struct fcp_rsp), rsp_info, 11542 cmd->cmd_fp_pkt->pkt_resp_acc, 11543 sizeof (struct fcp_rsp_info)); 11544 } 11545 if (rsp_info->rsp_code == FCP_NO_FAILURE) { 11546 rval = FC_SUCCESS; 11547 error = 0; 11548 } else { 11549 error = 1; 11550 } 11551 } else { 11552 error = 2; 11553 } 11554 } 11555 11556 switch (error) { 11557 case 0: 11558 fcp_log(CE_WARN, pptr->port_dip, 11559 "!FCP: WWN 0x%08x%08x %s reset successfully", 11560 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]), 11561 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id); 11562 break; 11563 11564 case 1: 11565 fcp_log(CE_WARN, pptr->port_dip, 11566 "!FCP: Reset to WWN 0x%08x%08x %s failed," 11567 " response code=%x", 11568 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]), 11569 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id, 11570 rsp_info->rsp_code); 11571 break; 11572 11573 case 2: 11574 fcp_log(CE_WARN, pptr->port_dip, 11575 "!FCP: Reset to WWN 0x%08x%08x %s failed," 11576 " Bad FCP response values: rsvd1=%x," 11577 " rsvd2=%x, sts-rsvd1=%x, sts-rsvd2=%x," 11578 " rsplen=%x, senselen=%x", 11579 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]), 11580 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id, 11581 rsp->reserved_0, rsp->reserved_1, 11582 rsp->fcp_u.fcp_status.reserved_0, 11583 rsp->fcp_u.fcp_status.reserved_1, 11584 rsp->fcp_response_len, rsp->fcp_sense_len); 11585 break; 11586 11587 default: 11588 fcp_log(CE_WARN, pptr->port_dip, 11589 "!FCP: Reset to WWN 0x%08x%08x %s failed", 11590 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]), 11591 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id); 11592 break; 11593 } 11594 } 11595 scsi_destroy_pkt(pkt); 11596 11597 if (rval == FC_FAILURE) { 11598 mutex_enter(&ptgt->tgt_mutex); 11599 if (level == RESET_TARGET) { 11600 fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY); 11601 } else { 11602 fcp_update_lun_state(plun, FCP_RESET, FCP_LUN_BUSY); 11603 } 11604 mutex_exit(&ptgt->tgt_mutex); 11605 kmem_free(p, sizeof (struct fcp_reset_elem)); 11606 return (rval); 11607 } 11608 11609 mutex_enter(&pptr->port_mutex); 11610 if (level == RESET_TARGET) { 11611 p->tgt = ptgt; 11612 p->lun = NULL; 11613 } else { 11614 p->tgt = NULL; 11615 p->lun = plun; 11616 } 11617 p->tgt = ptgt; 11618 p->tgt_cnt = tgt_cnt; 11619 p->timeout = fcp_watchdog_time + FCP_RESET_DELAY; 11620 p->next = pptr->port_reset_list; 11621 pptr->port_reset_list = p; 11622 11623 FCP_TRACE(fcp_logq, pptr->port_instbuf, 11624 fcp_trace, FCP_BUF_LEVEL_3, 0, 11625 "Notify ssd of the reset to reinstate the reservations"); 11626 11627 scsi_hba_reset_notify_callback(&pptr->port_mutex, 11628 &pptr->port_reset_notify_listf); 11629 11630 mutex_exit(&pptr->port_mutex); 11631 11632 return (rval); 11633 } 11634 11635 11636 /* 11637 * called by fcp_getcap and fcp_setcap to get and set (respectively) 11638 * SCSI capabilities 11639 */ 11640 /* ARGSUSED */ 11641 static int 11642 fcp_commoncap(struct scsi_address *ap, char *cap, 11643 int val, int tgtonly, int doset) 11644 { 11645 struct fcp_port *pptr = ADDR2FCP(ap); 11646 struct fcp_lun *plun = ADDR2LUN(ap); 11647 struct fcp_tgt *ptgt = plun->lun_tgt; 11648 int cidx; 11649 int rval = FALSE; 11650 11651 if (cap == (char *)0) { 11652 FCP_TRACE(fcp_logq, pptr->port_instbuf, 11653 fcp_trace, FCP_BUF_LEVEL_3, 0, 11654 "fcp_commoncap: invalid arg"); 11655 return (rval); 11656 } 11657 11658 if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) { 11659 return (UNDEFINED); 11660 } 11661 11662 /* 11663 * Process setcap request. 11664 */ 11665 if (doset) { 11666 /* 11667 * At present, we can only set binary (0/1) values 11668 */ 11669 switch (cidx) { 11670 case SCSI_CAP_ARQ: 11671 if (val == 0) { 11672 rval = FALSE; 11673 } else { 11674 rval = TRUE; 11675 } 11676 break; 11677 11678 case SCSI_CAP_LUN_RESET: 11679 if (val) { 11680 plun->lun_cap |= FCP_LUN_CAP_RESET; 11681 } else { 11682 plun->lun_cap &= ~FCP_LUN_CAP_RESET; 11683 } 11684 rval = TRUE; 11685 break; 11686 11687 case SCSI_CAP_SECTOR_SIZE: 11688 rval = TRUE; 11689 break; 11690 default: 11691 FCP_TRACE(fcp_logq, pptr->port_instbuf, 11692 fcp_trace, FCP_BUF_LEVEL_4, 0, 11693 "fcp_setcap: unsupported %d", cidx); 11694 rval = UNDEFINED; 11695 break; 11696 } 11697 11698 FCP_TRACE(fcp_logq, pptr->port_instbuf, 11699 fcp_trace, FCP_BUF_LEVEL_5, 0, 11700 "set cap: cap=%s, val/tgtonly/doset/rval = " 11701 "0x%x/0x%x/0x%x/%d", 11702 cap, val, tgtonly, doset, rval); 11703 11704 } else { 11705 /* 11706 * Process getcap request. 11707 */ 11708 switch (cidx) { 11709 case SCSI_CAP_DMA_MAX: 11710 rval = (int)pptr->port_data_dma_attr.dma_attr_maxxfer; 11711 11712 /* 11713 * Need to make an adjustment qlc is uint_t 64 11714 * st is int, so we will make the adjustment here 11715 * being as nobody wants to touch this. 11716 * It still leaves the max single block length 11717 * of 2 gig. This should last . 11718 */ 11719 11720 if (rval == -1) { 11721 rval = MAX_INT_DMA; 11722 } 11723 11724 break; 11725 11726 case SCSI_CAP_INITIATOR_ID: 11727 rval = pptr->port_id; 11728 break; 11729 11730 case SCSI_CAP_ARQ: 11731 case SCSI_CAP_RESET_NOTIFICATION: 11732 case SCSI_CAP_TAGGED_QING: 11733 rval = TRUE; 11734 break; 11735 11736 case SCSI_CAP_SCSI_VERSION: 11737 rval = 3; 11738 break; 11739 11740 case SCSI_CAP_INTERCONNECT_TYPE: 11741 if (FC_TOP_EXTERNAL(pptr->port_topology) || 11742 (ptgt->tgt_hard_addr == 0)) { 11743 rval = INTERCONNECT_FABRIC; 11744 } else { 11745 rval = INTERCONNECT_FIBRE; 11746 } 11747 break; 11748 11749 case SCSI_CAP_LUN_RESET: 11750 rval = ((plun->lun_cap & FCP_LUN_CAP_RESET) != 0) ? 11751 TRUE : FALSE; 11752 break; 11753 11754 default: 11755 FCP_TRACE(fcp_logq, pptr->port_instbuf, 11756 fcp_trace, FCP_BUF_LEVEL_4, 0, 11757 "fcp_getcap: unsupported %d", cidx); 11758 rval = UNDEFINED; 11759 break; 11760 } 11761 11762 FCP_TRACE(fcp_logq, pptr->port_instbuf, 11763 fcp_trace, FCP_BUF_LEVEL_8, 0, 11764 "get cap: cap=%s, val/tgtonly/doset/rval = " 11765 "0x%x/0x%x/0x%x/%d", 11766 cap, val, tgtonly, doset, rval); 11767 } 11768 11769 return (rval); 11770 } 11771 11772 /* 11773 * called by the transport to get the port-wwn and lun 11774 * properties of this device, and to create a "name" based on them 11775 * 11776 * these properties don't exist on sun4m 11777 * 11778 * return 1 for success else return 0 11779 */ 11780 /* ARGSUSED */ 11781 static int 11782 fcp_scsi_get_name(struct scsi_device *sd, char *name, int len) 11783 { 11784 int i; 11785 int *lun; 11786 int numChars; 11787 uint_t nlun; 11788 uint_t count; 11789 uint_t nbytes; 11790 uchar_t *bytes; 11791 uint16_t lun_num; 11792 uint32_t tgt_id; 11793 char **conf_wwn; 11794 char tbuf[(FC_WWN_SIZE << 1) + 1]; 11795 uchar_t barray[FC_WWN_SIZE]; 11796 dev_info_t *tgt_dip; 11797 struct fcp_tgt *ptgt; 11798 struct fcp_port *pptr; 11799 struct fcp_lun *plun; 11800 11801 ASSERT(sd != NULL); 11802 ASSERT(name != NULL); 11803 11804 tgt_dip = sd->sd_dev; 11805 pptr = ddi_get_soft_state(fcp_softstate, 11806 ddi_get_instance(ddi_get_parent(tgt_dip))); 11807 if (pptr == NULL) { 11808 return (0); 11809 } 11810 11811 ASSERT(tgt_dip != NULL); 11812 11813 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sd->sd_dev, 11814 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 11815 LUN_PROP, &lun, &nlun) != DDI_SUCCESS) { 11816 name[0] = '\0'; 11817 return (0); 11818 } 11819 11820 if (nlun == 0) { 11821 ddi_prop_free(lun); 11822 return (0); 11823 } 11824 11825 lun_num = lun[0]; 11826 ddi_prop_free(lun); 11827 11828 /* 11829 * Lookup for .conf WWN property 11830 */ 11831 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, tgt_dip, 11832 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, CONF_WWN_PROP, 11833 &conf_wwn, &count) == DDI_PROP_SUCCESS) { 11834 ASSERT(count >= 1); 11835 11836 fcp_ascii_to_wwn(conf_wwn[0], barray, FC_WWN_SIZE); 11837 ddi_prop_free(conf_wwn); 11838 mutex_enter(&pptr->port_mutex); 11839 if ((plun = fcp_lookup_lun(pptr, barray, lun_num)) == NULL) { 11840 mutex_exit(&pptr->port_mutex); 11841 return (0); 11842 } 11843 ptgt = plun->lun_tgt; 11844 mutex_exit(&pptr->port_mutex); 11845 11846 (void) ndi_prop_update_byte_array(DDI_DEV_T_NONE, 11847 tgt_dip, PORT_WWN_PROP, barray, FC_WWN_SIZE); 11848 11849 if (!FC_TOP_EXTERNAL(pptr->port_topology) && 11850 ptgt->tgt_hard_addr != 0) { 11851 tgt_id = (uint32_t)fcp_alpa_to_switch[ 11852 ptgt->tgt_hard_addr]; 11853 } else { 11854 tgt_id = ptgt->tgt_d_id; 11855 } 11856 11857 (void) ndi_prop_update_int(DDI_DEV_T_NONE, tgt_dip, 11858 TARGET_PROP, tgt_id); 11859 } 11860 11861 /* get the our port-wwn property */ 11862 bytes = NULL; 11863 if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, tgt_dip, 11864 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes, 11865 &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) { 11866 if (bytes != NULL) { 11867 ddi_prop_free(bytes); 11868 } 11869 return (0); 11870 } 11871 11872 for (i = 0; i < FC_WWN_SIZE; i++) { 11873 (void) sprintf(&tbuf[i << 1], "%02x", *(bytes + i)); 11874 } 11875 11876 /* Stick in the address of the form "wWWN,LUN" */ 11877 numChars = snprintf(name, len, "w%s,%x", tbuf, lun_num); 11878 11879 ASSERT(numChars < len); 11880 if (numChars >= len) { 11881 fcp_log(CE_WARN, pptr->port_dip, 11882 "!fcp_scsi_get_name: " 11883 "name parameter length too small, it needs to be %d", 11884 numChars+1); 11885 } 11886 11887 ddi_prop_free(bytes); 11888 11889 return (1); 11890 } 11891 11892 11893 /* 11894 * called by the transport to get the SCSI target id value, returning 11895 * it in "name" 11896 * 11897 * this isn't needed/used on sun4m 11898 * 11899 * return 1 for success else return 0 11900 */ 11901 /* ARGSUSED */ 11902 static int 11903 fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len) 11904 { 11905 struct fcp_lun *plun = ADDR2LUN(&sd->sd_address); 11906 struct fcp_tgt *ptgt; 11907 int numChars; 11908 11909 if (plun == NULL) { 11910 return (0); 11911 } 11912 11913 if ((ptgt = plun->lun_tgt) == NULL) { 11914 return (0); 11915 } 11916 11917 numChars = snprintf(name, len, "%x", ptgt->tgt_d_id); 11918 11919 ASSERT(numChars < len); 11920 if (numChars >= len) { 11921 fcp_log(CE_WARN, NULL, 11922 "!fcp_scsi_get_bus_addr: " 11923 "name parameter length too small, it needs to be %d", 11924 numChars+1); 11925 } 11926 11927 return (1); 11928 } 11929 11930 11931 /* 11932 * called internally to reset the link where the specified port lives 11933 */ 11934 static int 11935 fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap, int sleep) 11936 { 11937 la_wwn_t wwn; 11938 struct fcp_lun *plun; 11939 struct fcp_tgt *ptgt; 11940 11941 /* disable restart of lip if we're suspended */ 11942 mutex_enter(&pptr->port_mutex); 11943 11944 if (pptr->port_state & (FCP_STATE_SUSPENDED | 11945 FCP_STATE_POWER_DOWN)) { 11946 mutex_exit(&pptr->port_mutex); 11947 FCP_TRACE(fcp_logq, pptr->port_instbuf, 11948 fcp_trace, FCP_BUF_LEVEL_2, 0, 11949 "fcp_linkreset, fcp%d: link reset " 11950 "disabled due to DDI_SUSPEND", 11951 ddi_get_instance(pptr->port_dip)); 11952 return (FC_FAILURE); 11953 } 11954 11955 if (pptr->port_state & (FCP_STATE_OFFLINE | FCP_STATE_ONLINING)) { 11956 mutex_exit(&pptr->port_mutex); 11957 return (FC_SUCCESS); 11958 } 11959 11960 FCP_DTRACE(fcp_logq, pptr->port_instbuf, 11961 fcp_trace, FCP_BUF_LEVEL_8, 0, "Forcing link reset"); 11962 11963 /* 11964 * If ap == NULL assume local link reset. 11965 */ 11966 if (FC_TOP_EXTERNAL(pptr->port_topology) && (ap != NULL)) { 11967 plun = ADDR2LUN(ap); 11968 ptgt = plun->lun_tgt; 11969 bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &wwn, sizeof (wwn)); 11970 } else { 11971 bzero((caddr_t)&wwn, sizeof (wwn)); 11972 } 11973 mutex_exit(&pptr->port_mutex); 11974 11975 return (fc_ulp_linkreset(pptr->port_fp_handle, &wwn, sleep)); 11976 } 11977 11978 11979 /* 11980 * called from fcp_port_attach() to resume a port 11981 * return DDI_* success/failure status 11982 * acquires and releases the global mutex 11983 * acquires and releases the port mutex 11984 */ 11985 /*ARGSUSED*/ 11986 11987 static int 11988 fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo, 11989 uint32_t s_id, fc_attach_cmd_t cmd, int instance) 11990 { 11991 int res = DDI_FAILURE; /* default result */ 11992 struct fcp_port *pptr; /* port state ptr */ 11993 uint32_t alloc_cnt; 11994 uint32_t max_cnt; 11995 fc_portmap_t *tmp_list = NULL; 11996 11997 FCP_DTRACE(fcp_logq, "fcp", fcp_trace, 11998 FCP_BUF_LEVEL_8, 0, "port resume: for port %d", 11999 instance); 12000 12001 if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) { 12002 cmn_err(CE_WARN, "fcp: bad soft state"); 12003 return (res); 12004 } 12005 12006 mutex_enter(&pptr->port_mutex); 12007 switch (cmd) { 12008 case FC_CMD_RESUME: 12009 ASSERT((pptr->port_state & FCP_STATE_POWER_DOWN) == 0); 12010 pptr->port_state &= ~FCP_STATE_SUSPENDED; 12011 break; 12012 12013 case FC_CMD_POWER_UP: 12014 /* 12015 * If the port is DDI_SUSPENded, defer rediscovery 12016 * until DDI_RESUME occurs 12017 */ 12018 if (pptr->port_state & FCP_STATE_SUSPENDED) { 12019 pptr->port_state &= ~FCP_STATE_POWER_DOWN; 12020 mutex_exit(&pptr->port_mutex); 12021 return (DDI_SUCCESS); 12022 } 12023 pptr->port_state &= ~FCP_STATE_POWER_DOWN; 12024 } 12025 pptr->port_id = s_id; 12026 pptr->port_state = FCP_STATE_INIT; 12027 mutex_exit(&pptr->port_mutex); 12028 12029 /* 12030 * Make a copy of ulp_port_info as fctl allocates 12031 * a temp struct. 12032 */ 12033 (void) fcp_cp_pinfo(pptr, pinfo); 12034 12035 mutex_enter(&fcp_global_mutex); 12036 if (fcp_watchdog_init++ == 0) { 12037 fcp_watchdog_tick = fcp_watchdog_timeout * 12038 drv_usectohz(1000000); 12039 fcp_watchdog_id = timeout(fcp_watch, 12040 NULL, fcp_watchdog_tick); 12041 } 12042 mutex_exit(&fcp_global_mutex); 12043 12044 /* 12045 * Handle various topologies and link states. 12046 */ 12047 switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) { 12048 case FC_STATE_OFFLINE: 12049 /* 12050 * Wait for ONLINE, at which time a state 12051 * change will cause a statec_callback 12052 */ 12053 res = DDI_SUCCESS; 12054 break; 12055 12056 case FC_STATE_ONLINE: 12057 12058 if (pptr->port_topology == FC_TOP_UNKNOWN) { 12059 (void) fcp_linkreset(pptr, NULL, KM_NOSLEEP); 12060 res = DDI_SUCCESS; 12061 break; 12062 } 12063 12064 if (FC_TOP_EXTERNAL(pptr->port_topology) && 12065 !fcp_enable_auto_configuration) { 12066 tmp_list = fcp_construct_map(pptr, &alloc_cnt); 12067 if (tmp_list == NULL) { 12068 if (!alloc_cnt) { 12069 res = DDI_SUCCESS; 12070 } 12071 break; 12072 } 12073 max_cnt = alloc_cnt; 12074 } else { 12075 ASSERT(pptr->port_topology != FC_TOP_UNKNOWN); 12076 12077 alloc_cnt = FCP_MAX_DEVICES; 12078 12079 if ((tmp_list = (fc_portmap_t *)kmem_zalloc( 12080 (sizeof (fc_portmap_t)) * alloc_cnt, 12081 KM_NOSLEEP)) == NULL) { 12082 fcp_log(CE_WARN, pptr->port_dip, 12083 "!fcp%d: failed to allocate portmap", 12084 instance); 12085 break; 12086 } 12087 12088 max_cnt = alloc_cnt; 12089 if ((res = fc_ulp_getportmap(pptr->port_fp_handle, 12090 &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) != 12091 FC_SUCCESS) { 12092 caddr_t msg; 12093 12094 (void) fc_ulp_error(res, &msg); 12095 12096 FCP_TRACE(fcp_logq, pptr->port_instbuf, 12097 fcp_trace, FCP_BUF_LEVEL_2, 0, 12098 "resume failed getportmap: reason=0x%x", 12099 res); 12100 12101 fcp_log(CE_WARN, pptr->port_dip, 12102 "!failed to get port map : %s", msg); 12103 break; 12104 } 12105 if (max_cnt > alloc_cnt) { 12106 alloc_cnt = max_cnt; 12107 } 12108 } 12109 12110 /* 12111 * do the SCSI device discovery and create 12112 * the devinfos 12113 */ 12114 fcp_statec_callback(ulph, pptr->port_fp_handle, 12115 pptr->port_phys_state, pptr->port_topology, tmp_list, 12116 max_cnt, pptr->port_id); 12117 12118 res = DDI_SUCCESS; 12119 break; 12120 12121 default: 12122 fcp_log(CE_WARN, pptr->port_dip, 12123 "!fcp%d: invalid port state at attach=0x%x", 12124 instance, pptr->port_phys_state); 12125 12126 mutex_enter(&pptr->port_mutex); 12127 pptr->port_phys_state = FCP_STATE_OFFLINE; 12128 mutex_exit(&pptr->port_mutex); 12129 res = DDI_SUCCESS; 12130 12131 break; 12132 } 12133 12134 if (tmp_list != NULL) { 12135 kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt); 12136 } 12137 12138 return (res); 12139 } 12140 12141 12142 static void 12143 fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo) 12144 { 12145 pptr->port_fp_modlinkage = *pinfo->port_linkage; 12146 pptr->port_dip = pinfo->port_dip; 12147 pptr->port_fp_handle = pinfo->port_handle; 12148 pptr->port_data_dma_attr = *pinfo->port_data_dma_attr; 12149 pptr->port_cmd_dma_attr = *pinfo->port_cmd_dma_attr; 12150 pptr->port_resp_dma_attr = *pinfo->port_resp_dma_attr; 12151 pptr->port_dma_acc_attr = *pinfo->port_acc_attr; 12152 pptr->port_priv_pkt_len = pinfo->port_fca_pkt_size; 12153 pptr->port_max_exch = pinfo->port_fca_max_exch; 12154 pptr->port_phys_state = pinfo->port_state; 12155 pptr->port_topology = pinfo->port_flags; 12156 pptr->port_reset_action = pinfo->port_reset_action; 12157 pptr->port_cmds_dma_flags = pinfo->port_dma_behavior; 12158 pptr->port_fcp_dma = pinfo->port_fcp_dma; 12159 bcopy(&pinfo->port_nwwn, &pptr->port_nwwn, sizeof (la_wwn_t)); 12160 bcopy(&pinfo->port_pwwn, &pptr->port_pwwn, sizeof (la_wwn_t)); 12161 } 12162 12163 /* 12164 * If the elements wait field is set to 1 then 12165 * another thread is waiting for the operation to complete. Once 12166 * it is complete, the waiting thread is signaled and the element is 12167 * freed by the waiting thread. If the elements wait field is set to 0 12168 * the element is freed. 12169 */ 12170 static void 12171 fcp_process_elem(struct fcp_hp_elem *elem, int result) 12172 { 12173 ASSERT(elem != NULL); 12174 mutex_enter(&elem->mutex); 12175 elem->result = result; 12176 if (elem->wait) { 12177 elem->wait = 0; 12178 cv_signal(&elem->cv); 12179 mutex_exit(&elem->mutex); 12180 } else { 12181 mutex_exit(&elem->mutex); 12182 cv_destroy(&elem->cv); 12183 mutex_destroy(&elem->mutex); 12184 kmem_free(elem, sizeof (struct fcp_hp_elem)); 12185 } 12186 } 12187 12188 /* 12189 * This function is invoked from the taskq thread to allocate 12190 * devinfo nodes and to online/offline them. 12191 */ 12192 static void 12193 fcp_hp_task(void *arg) 12194 { 12195 struct fcp_hp_elem *elem = (struct fcp_hp_elem *)arg; 12196 struct fcp_lun *plun = elem->lun; 12197 struct fcp_port *pptr = elem->port; 12198 int result; 12199 12200 ASSERT(elem->what == FCP_ONLINE || 12201 elem->what == FCP_OFFLINE || 12202 elem->what == FCP_MPXIO_PATH_CLEAR_BUSY || 12203 elem->what == FCP_MPXIO_PATH_SET_BUSY); 12204 12205 mutex_enter(&pptr->port_mutex); 12206 mutex_enter(&plun->lun_mutex); 12207 if (((elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) && 12208 plun->lun_event_count != elem->event_cnt) || 12209 pptr->port_state & (FCP_STATE_SUSPENDED | 12210 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) { 12211 mutex_exit(&plun->lun_mutex); 12212 mutex_exit(&pptr->port_mutex); 12213 fcp_process_elem(elem, NDI_FAILURE); 12214 return; 12215 } 12216 mutex_exit(&plun->lun_mutex); 12217 mutex_exit(&pptr->port_mutex); 12218 12219 result = fcp_trigger_lun(plun, elem->cip, elem->what, 12220 elem->link_cnt, elem->tgt_cnt, elem->flags); 12221 fcp_process_elem(elem, result); 12222 } 12223 12224 12225 static child_info_t * 12226 fcp_get_cip(struct fcp_lun *plun, child_info_t *cip, int lcount, 12227 int tcount) 12228 { 12229 ASSERT(MUTEX_HELD(&plun->lun_mutex)); 12230 12231 if (fcp_is_child_present(plun, cip) == FC_FAILURE) { 12232 struct fcp_port *pptr = plun->lun_tgt->tgt_port; 12233 12234 ASSERT(MUTEX_HELD(&pptr->port_mutex)); 12235 /* 12236 * Child has not been created yet. Create the child device 12237 * based on the per-Lun flags. 12238 */ 12239 if (pptr->port_mpxio == 0 || plun->lun_mpxio == 0) { 12240 plun->lun_cip = 12241 CIP(fcp_create_dip(plun, lcount, tcount)); 12242 plun->lun_mpxio = 0; 12243 } else { 12244 plun->lun_cip = 12245 CIP(fcp_create_pip(plun, lcount, tcount)); 12246 plun->lun_mpxio = 1; 12247 } 12248 } else { 12249 plun->lun_cip = cip; 12250 } 12251 12252 return (plun->lun_cip); 12253 } 12254 12255 12256 static int 12257 fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip) 12258 { 12259 int rval = FC_FAILURE; 12260 dev_info_t *pdip; 12261 struct dev_info *dip; 12262 int circular; 12263 12264 ASSERT(MUTEX_HELD(&plun->lun_mutex)); 12265 12266 pdip = plun->lun_tgt->tgt_port->port_dip; 12267 12268 if (plun->lun_cip == NULL) { 12269 FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf, 12270 fcp_trace, FCP_BUF_LEVEL_3, 0, 12271 "fcp_is_dip_present: plun->lun_cip is NULL: " 12272 "plun: %p lun state: %x num: %d target state: %x", 12273 plun, plun->lun_state, plun->lun_num, 12274 plun->lun_tgt->tgt_port->port_state); 12275 return (rval); 12276 } 12277 ndi_devi_enter(pdip, &circular); 12278 dip = DEVI(pdip)->devi_child; 12279 while (dip) { 12280 if (dip == DEVI(cdip)) { 12281 rval = FC_SUCCESS; 12282 break; 12283 } 12284 dip = dip->devi_sibling; 12285 } 12286 ndi_devi_exit(pdip, circular); 12287 return (rval); 12288 } 12289 12290 static int 12291 fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip) 12292 { 12293 int rval = FC_FAILURE; 12294 12295 ASSERT(plun != NULL); 12296 ASSERT(MUTEX_HELD(&plun->lun_mutex)); 12297 12298 if (plun->lun_mpxio == 0) { 12299 rval = fcp_is_dip_present(plun, DIP(cip)); 12300 } else { 12301 rval = fcp_is_pip_present(plun, PIP(cip)); 12302 } 12303 12304 return (rval); 12305 } 12306 12307 /* 12308 * Function: fcp_create_dip 12309 * 12310 * Description: Creates a dev_info_t structure for the LUN specified by the 12311 * caller. 12312 * 12313 * Argument: plun Lun structure 12314 * link_cnt Link state count. 12315 * tgt_cnt Target state change count. 12316 * 12317 * Return Value: NULL if it failed 12318 * dev_info_t structure address if it succeeded 12319 * 12320 * Context: Kernel context 12321 */ 12322 static dev_info_t * 12323 fcp_create_dip(struct fcp_lun *plun, int link_cnt, int tgt_cnt) 12324 { 12325 int failure = 0; 12326 uint32_t tgt_id; 12327 uint64_t sam_lun; 12328 struct fcp_tgt *ptgt = plun->lun_tgt; 12329 struct fcp_port *pptr = ptgt->tgt_port; 12330 dev_info_t *pdip = pptr->port_dip; 12331 dev_info_t *cdip = NULL; 12332 dev_info_t *old_dip = DIP(plun->lun_cip); 12333 char *nname = NULL; 12334 char **compatible = NULL; 12335 int ncompatible; 12336 char *scsi_binding_set; 12337 char t_pwwn[17]; 12338 12339 ASSERT(MUTEX_HELD(&plun->lun_mutex)); 12340 ASSERT(MUTEX_HELD(&pptr->port_mutex)); 12341 12342 /* get the 'scsi-binding-set' property */ 12343 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip, 12344 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "scsi-binding-set", 12345 &scsi_binding_set) != DDI_PROP_SUCCESS) { 12346 scsi_binding_set = NULL; 12347 } 12348 12349 /* determine the node name and compatible */ 12350 scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set, 12351 plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible); 12352 if (scsi_binding_set) { 12353 ddi_prop_free(scsi_binding_set); 12354 } 12355 12356 if (nname == NULL) { 12357 #ifdef DEBUG 12358 cmn_err(CE_WARN, "%s%d: no driver for " 12359 "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:" 12360 " compatible: %s", 12361 ddi_driver_name(pdip), ddi_get_instance(pdip), 12362 ptgt->tgt_port_wwn.raw_wwn[0], 12363 ptgt->tgt_port_wwn.raw_wwn[1], 12364 ptgt->tgt_port_wwn.raw_wwn[2], 12365 ptgt->tgt_port_wwn.raw_wwn[3], 12366 ptgt->tgt_port_wwn.raw_wwn[4], 12367 ptgt->tgt_port_wwn.raw_wwn[5], 12368 ptgt->tgt_port_wwn.raw_wwn[6], 12369 ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num, 12370 *compatible); 12371 #endif /* DEBUG */ 12372 failure++; 12373 goto end_of_fcp_create_dip; 12374 } 12375 12376 cdip = fcp_find_existing_dip(plun, pdip, nname); 12377 12378 /* 12379 * if the old_dip does not match the cdip, that means there is 12380 * some property change. since we'll be using the cdip, we need 12381 * to offline the old_dip. If the state contains FCP_LUN_CHANGED 12382 * then the dtype for the device has been updated. Offline the 12383 * the old device and create a new device with the new device type 12384 * Refer to bug: 4764752 12385 */ 12386 if (old_dip && (cdip != old_dip || 12387 plun->lun_state & FCP_LUN_CHANGED)) { 12388 plun->lun_state &= ~(FCP_LUN_INIT); 12389 mutex_exit(&plun->lun_mutex); 12390 mutex_exit(&pptr->port_mutex); 12391 12392 mutex_enter(&ptgt->tgt_mutex); 12393 (void) fcp_pass_to_hp(pptr, plun, CIP(old_dip), FCP_OFFLINE, 12394 link_cnt, tgt_cnt, NDI_DEVI_REMOVE, 0); 12395 mutex_exit(&ptgt->tgt_mutex); 12396 12397 #ifdef DEBUG 12398 if (cdip != NULL) { 12399 FCP_TRACE(fcp_logq, pptr->port_instbuf, 12400 fcp_trace, FCP_BUF_LEVEL_2, 0, 12401 "Old dip=%p; New dip=%p don't match", old_dip, 12402 cdip); 12403 } else { 12404 FCP_TRACE(fcp_logq, pptr->port_instbuf, 12405 fcp_trace, FCP_BUF_LEVEL_2, 0, 12406 "Old dip=%p; New dip=NULL don't match", old_dip); 12407 } 12408 #endif 12409 12410 mutex_enter(&pptr->port_mutex); 12411 mutex_enter(&plun->lun_mutex); 12412 } 12413 12414 if (cdip == NULL || plun->lun_state & FCP_LUN_CHANGED) { 12415 plun->lun_state &= ~(FCP_LUN_CHANGED); 12416 if (ndi_devi_alloc(pptr->port_dip, nname, 12417 DEVI_SID_NODEID, &cdip) != NDI_SUCCESS) { 12418 failure++; 12419 goto end_of_fcp_create_dip; 12420 } 12421 } 12422 12423 /* 12424 * Previously all the properties for the devinfo were destroyed here 12425 * with a call to ndi_prop_remove_all(). Since this may cause loss of 12426 * the devid property (and other properties established by the target 12427 * driver or framework) which the code does not always recreate, this 12428 * call was removed. 12429 * This opens a theoretical possibility that we may return with a 12430 * stale devid on the node if the scsi entity behind the fibre channel 12431 * lun has changed. 12432 */ 12433 12434 /* decorate the node with compatible */ 12435 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, cdip, 12436 "compatible", compatible, ncompatible) != DDI_PROP_SUCCESS) { 12437 failure++; 12438 goto end_of_fcp_create_dip; 12439 } 12440 12441 if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, NODE_WWN_PROP, 12442 ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) { 12443 failure++; 12444 goto end_of_fcp_create_dip; 12445 } 12446 12447 if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, PORT_WWN_PROP, 12448 ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) { 12449 failure++; 12450 goto end_of_fcp_create_dip; 12451 } 12452 12453 fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn); 12454 t_pwwn[16] = '\0'; 12455 if (ndi_prop_update_string(DDI_DEV_T_NONE, cdip, TGT_PORT_PROP, t_pwwn) 12456 != DDI_PROP_SUCCESS) { 12457 failure++; 12458 goto end_of_fcp_create_dip; 12459 } 12460 12461 /* 12462 * If there is no hard address - We might have to deal with 12463 * that by using WWN - Having said that it is important to 12464 * recognize this problem early so ssd can be informed of 12465 * the right interconnect type. 12466 */ 12467 if (!FC_TOP_EXTERNAL(pptr->port_topology) && ptgt->tgt_hard_addr != 0) { 12468 tgt_id = (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr]; 12469 } else { 12470 tgt_id = ptgt->tgt_d_id; 12471 } 12472 12473 if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, TARGET_PROP, 12474 tgt_id) != DDI_PROP_SUCCESS) { 12475 failure++; 12476 goto end_of_fcp_create_dip; 12477 } 12478 12479 if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, LUN_PROP, 12480 (int)plun->lun_num) != DDI_PROP_SUCCESS) { 12481 failure++; 12482 goto end_of_fcp_create_dip; 12483 } 12484 bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE); 12485 if (ndi_prop_update_int64(DDI_DEV_T_NONE, cdip, SAM_LUN_PROP, 12486 sam_lun) != DDI_PROP_SUCCESS) { 12487 failure++; 12488 goto end_of_fcp_create_dip; 12489 } 12490 12491 end_of_fcp_create_dip: 12492 scsi_hba_nodename_compatible_free(nname, compatible); 12493 12494 if (cdip != NULL && failure) { 12495 (void) ndi_prop_remove_all(cdip); 12496 (void) ndi_devi_free(cdip); 12497 cdip = NULL; 12498 } 12499 12500 return (cdip); 12501 } 12502 12503 /* 12504 * Function: fcp_create_pip 12505 * 12506 * Description: Creates a Path Id for the LUN specified by the caller. 12507 * 12508 * Argument: plun Lun structure 12509 * link_cnt Link state count. 12510 * tgt_cnt Target state count. 12511 * 12512 * Return Value: NULL if it failed 12513 * mdi_pathinfo_t structure address if it succeeded 12514 * 12515 * Context: Kernel context 12516 */ 12517 static mdi_pathinfo_t * 12518 fcp_create_pip(struct fcp_lun *plun, int lcount, int tcount) 12519 { 12520 int i; 12521 char buf[MAXNAMELEN]; 12522 char uaddr[MAXNAMELEN]; 12523 int failure = 0; 12524 uint32_t tgt_id; 12525 uint64_t sam_lun; 12526 struct fcp_tgt *ptgt = plun->lun_tgt; 12527 struct fcp_port *pptr = ptgt->tgt_port; 12528 dev_info_t *pdip = pptr->port_dip; 12529 mdi_pathinfo_t *pip = NULL; 12530 mdi_pathinfo_t *old_pip = PIP(plun->lun_cip); 12531 char *nname = NULL; 12532 char **compatible = NULL; 12533 int ncompatible; 12534 char *scsi_binding_set; 12535 char t_pwwn[17]; 12536 12537 ASSERT(MUTEX_HELD(&plun->lun_mutex)); 12538 ASSERT(MUTEX_HELD(&pptr->port_mutex)); 12539 12540 scsi_binding_set = "vhci"; 12541 12542 /* determine the node name and compatible */ 12543 scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set, 12544 plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible); 12545 12546 if (nname == NULL) { 12547 #ifdef DEBUG 12548 cmn_err(CE_WARN, "fcp_create_dip: %s%d: no driver for " 12549 "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:" 12550 " compatible: %s", 12551 ddi_driver_name(pdip), ddi_get_instance(pdip), 12552 ptgt->tgt_port_wwn.raw_wwn[0], 12553 ptgt->tgt_port_wwn.raw_wwn[1], 12554 ptgt->tgt_port_wwn.raw_wwn[2], 12555 ptgt->tgt_port_wwn.raw_wwn[3], 12556 ptgt->tgt_port_wwn.raw_wwn[4], 12557 ptgt->tgt_port_wwn.raw_wwn[5], 12558 ptgt->tgt_port_wwn.raw_wwn[6], 12559 ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num, 12560 *compatible); 12561 #endif /* DEBUG */ 12562 failure++; 12563 goto end_of_fcp_create_pip; 12564 } 12565 12566 pip = fcp_find_existing_pip(plun, pdip); 12567 12568 /* 12569 * if the old_dip does not match the cdip, that means there is 12570 * some property change. since we'll be using the cdip, we need 12571 * to offline the old_dip. If the state contains FCP_LUN_CHANGED 12572 * then the dtype for the device has been updated. Offline the 12573 * the old device and create a new device with the new device type 12574 * Refer to bug: 4764752 12575 */ 12576 if (old_pip && (pip != old_pip || 12577 plun->lun_state & FCP_LUN_CHANGED)) { 12578 plun->lun_state &= ~(FCP_LUN_INIT); 12579 mutex_exit(&plun->lun_mutex); 12580 mutex_exit(&pptr->port_mutex); 12581 12582 mutex_enter(&ptgt->tgt_mutex); 12583 (void) fcp_pass_to_hp(pptr, plun, CIP(old_pip), 12584 FCP_OFFLINE, lcount, tcount, 12585 NDI_DEVI_REMOVE, 0); 12586 mutex_exit(&ptgt->tgt_mutex); 12587 12588 if (pip != NULL) { 12589 FCP_TRACE(fcp_logq, pptr->port_instbuf, 12590 fcp_trace, FCP_BUF_LEVEL_2, 0, 12591 "Old pip=%p; New pip=%p don't match", 12592 old_pip, pip); 12593 } else { 12594 FCP_TRACE(fcp_logq, pptr->port_instbuf, 12595 fcp_trace, FCP_BUF_LEVEL_2, 0, 12596 "Old pip=%p; New pip=NULL don't match", 12597 old_pip); 12598 } 12599 12600 mutex_enter(&pptr->port_mutex); 12601 mutex_enter(&plun->lun_mutex); 12602 } 12603 12604 /* 12605 * Since FC_WWN_SIZE is 8 bytes and its not like the 12606 * lun_guid_size which is dependent on the target, I don't 12607 * believe the same trancation happens here UNLESS the standards 12608 * change the FC_WWN_SIZE value to something larger than 12609 * MAXNAMELEN(currently 255 bytes). 12610 */ 12611 12612 for (i = 0; i < FC_WWN_SIZE; i++) { 12613 (void) sprintf(&buf[i << 1], "%02x", 12614 ptgt->tgt_port_wwn.raw_wwn[i]); 12615 } 12616 12617 (void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", 12618 buf, plun->lun_num); 12619 12620 if (pip == NULL || plun->lun_state & FCP_LUN_CHANGED) { 12621 /* 12622 * Release the locks before calling into 12623 * mdi_pi_alloc_compatible() since this can result in a 12624 * callback into fcp which can result in a deadlock 12625 * (see bug # 4870272). 12626 * 12627 * Basically, what we are trying to avoid is the scenario where 12628 * one thread does ndi_devi_enter() and tries to grab 12629 * fcp_mutex and another does it the other way round. 12630 * 12631 * But before we do that, make sure that nobody releases the 12632 * port in the meantime. We can do this by setting a flag. 12633 */ 12634 plun->lun_state &= ~(FCP_LUN_CHANGED); 12635 pptr->port_state |= FCP_STATE_IN_MDI; 12636 mutex_exit(&plun->lun_mutex); 12637 mutex_exit(&pptr->port_mutex); 12638 if (mdi_pi_alloc_compatible(pdip, nname, plun->lun_guid, 12639 uaddr, compatible, ncompatible, 0, &pip) != MDI_SUCCESS) { 12640 fcp_log(CE_WARN, pptr->port_dip, 12641 "!path alloc failed:0x%x", plun); 12642 mutex_enter(&pptr->port_mutex); 12643 mutex_enter(&plun->lun_mutex); 12644 pptr->port_state &= ~FCP_STATE_IN_MDI; 12645 failure++; 12646 goto end_of_fcp_create_pip; 12647 } 12648 mutex_enter(&pptr->port_mutex); 12649 mutex_enter(&plun->lun_mutex); 12650 pptr->port_state &= ~FCP_STATE_IN_MDI; 12651 } else { 12652 (void) mdi_prop_remove(pip, NULL); 12653 } 12654 12655 mdi_pi_set_phci_private(pip, (caddr_t)plun); 12656 12657 if (mdi_prop_update_byte_array(pip, NODE_WWN_PROP, 12658 ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE) 12659 != DDI_PROP_SUCCESS) { 12660 failure++; 12661 goto end_of_fcp_create_pip; 12662 } 12663 12664 if (mdi_prop_update_byte_array(pip, PORT_WWN_PROP, 12665 ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE) 12666 != DDI_PROP_SUCCESS) { 12667 failure++; 12668 goto end_of_fcp_create_pip; 12669 } 12670 12671 fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn); 12672 t_pwwn[16] = '\0'; 12673 if (mdi_prop_update_string(pip, TGT_PORT_PROP, t_pwwn) 12674 != DDI_PROP_SUCCESS) { 12675 failure++; 12676 goto end_of_fcp_create_pip; 12677 } 12678 12679 /* 12680 * If there is no hard address - We might have to deal with 12681 * that by using WWN - Having said that it is important to 12682 * recognize this problem early so ssd can be informed of 12683 * the right interconnect type. 12684 */ 12685 if (!FC_TOP_EXTERNAL(pptr->port_topology) && 12686 ptgt->tgt_hard_addr != 0) { 12687 tgt_id = (uint32_t) 12688 fcp_alpa_to_switch[ptgt->tgt_hard_addr]; 12689 } else { 12690 tgt_id = ptgt->tgt_d_id; 12691 } 12692 12693 if (mdi_prop_update_int(pip, TARGET_PROP, tgt_id) 12694 != DDI_PROP_SUCCESS) { 12695 failure++; 12696 goto end_of_fcp_create_pip; 12697 } 12698 12699 if (mdi_prop_update_int(pip, LUN_PROP, (int)plun->lun_num) 12700 != DDI_PROP_SUCCESS) { 12701 failure++; 12702 goto end_of_fcp_create_pip; 12703 } 12704 bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE); 12705 if (mdi_prop_update_int64(pip, SAM_LUN_PROP, sam_lun) 12706 != DDI_PROP_SUCCESS) { 12707 failure++; 12708 goto end_of_fcp_create_pip; 12709 } 12710 12711 end_of_fcp_create_pip: 12712 scsi_hba_nodename_compatible_free(nname, compatible); 12713 12714 if (pip != NULL && failure) { 12715 (void) mdi_prop_remove(pip, NULL); 12716 mutex_exit(&plun->lun_mutex); 12717 mutex_exit(&pptr->port_mutex); 12718 (void) mdi_pi_free(pip, 0); 12719 mutex_enter(&pptr->port_mutex); 12720 mutex_enter(&plun->lun_mutex); 12721 pip = NULL; 12722 } 12723 12724 return (pip); 12725 } 12726 12727 static dev_info_t * 12728 fcp_find_existing_dip(struct fcp_lun *plun, dev_info_t *pdip, caddr_t name) 12729 { 12730 uint_t nbytes; 12731 uchar_t *bytes; 12732 uint_t nwords; 12733 uint32_t tgt_id; 12734 int *words; 12735 dev_info_t *cdip; 12736 dev_info_t *ndip; 12737 struct fcp_tgt *ptgt = plun->lun_tgt; 12738 struct fcp_port *pptr = ptgt->tgt_port; 12739 int circular; 12740 12741 ndi_devi_enter(pdip, &circular); 12742 12743 ndip = (dev_info_t *)DEVI(pdip)->devi_child; 12744 while ((cdip = ndip) != NULL) { 12745 ndip = (dev_info_t *)DEVI(cdip)->devi_sibling; 12746 12747 if (strcmp(DEVI(cdip)->devi_node_name, name)) { 12748 continue; 12749 } 12750 12751 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip, 12752 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, NODE_WWN_PROP, &bytes, 12753 &nbytes) != DDI_PROP_SUCCESS) { 12754 continue; 12755 } 12756 12757 if (nbytes != FC_WWN_SIZE || bytes == NULL) { 12758 if (bytes != NULL) { 12759 ddi_prop_free(bytes); 12760 } 12761 continue; 12762 } 12763 ASSERT(bytes != NULL); 12764 12765 if (bcmp(bytes, ptgt->tgt_node_wwn.raw_wwn, nbytes) != 0) { 12766 ddi_prop_free(bytes); 12767 continue; 12768 } 12769 12770 ddi_prop_free(bytes); 12771 12772 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip, 12773 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes, 12774 &nbytes) != DDI_PROP_SUCCESS) { 12775 continue; 12776 } 12777 12778 if (nbytes != FC_WWN_SIZE || bytes == NULL) { 12779 if (bytes != NULL) { 12780 ddi_prop_free(bytes); 12781 } 12782 continue; 12783 } 12784 ASSERT(bytes != NULL); 12785 12786 if (bcmp(bytes, ptgt->tgt_port_wwn.raw_wwn, nbytes) != 0) { 12787 ddi_prop_free(bytes); 12788 continue; 12789 } 12790 12791 ddi_prop_free(bytes); 12792 12793 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip, 12794 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, TARGET_PROP, &words, 12795 &nwords) != DDI_PROP_SUCCESS) { 12796 continue; 12797 } 12798 12799 if (nwords != 1 || words == NULL) { 12800 if (words != NULL) { 12801 ddi_prop_free(words); 12802 } 12803 continue; 12804 } 12805 ASSERT(words != NULL); 12806 12807 /* 12808 * If there is no hard address - We might have to deal with 12809 * that by using WWN - Having said that it is important to 12810 * recognize this problem early so ssd can be informed of 12811 * the right interconnect type. 12812 */ 12813 if (!FC_TOP_EXTERNAL(pptr->port_topology) && 12814 ptgt->tgt_hard_addr != 0) { 12815 tgt_id = 12816 (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr]; 12817 } else { 12818 tgt_id = ptgt->tgt_d_id; 12819 } 12820 12821 if (tgt_id != (uint32_t)*words) { 12822 ddi_prop_free(words); 12823 continue; 12824 } 12825 ddi_prop_free(words); 12826 12827 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip, 12828 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, LUN_PROP, &words, 12829 &nwords) != DDI_PROP_SUCCESS) { 12830 continue; 12831 } 12832 12833 if (nwords != 1 || words == NULL) { 12834 if (words != NULL) { 12835 ddi_prop_free(words); 12836 } 12837 continue; 12838 } 12839 ASSERT(words != NULL); 12840 12841 if (plun->lun_num == (uint16_t)*words) { 12842 ddi_prop_free(words); 12843 break; 12844 } 12845 ddi_prop_free(words); 12846 } 12847 ndi_devi_exit(pdip, circular); 12848 12849 return (cdip); 12850 } 12851 12852 12853 static int 12854 fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip) 12855 { 12856 dev_info_t *pdip; 12857 char buf[MAXNAMELEN]; 12858 char uaddr[MAXNAMELEN]; 12859 int rval = FC_FAILURE; 12860 12861 ASSERT(MUTEX_HELD(&plun->lun_mutex)); 12862 12863 pdip = plun->lun_tgt->tgt_port->port_dip; 12864 12865 /* 12866 * Check if pip (and not plun->lun_cip) is NULL. plun->lun_cip can be 12867 * non-NULL even when the LUN is not there as in the case when a LUN is 12868 * configured and then deleted on the device end (for T3/T4 case). In 12869 * such cases, pip will be NULL. 12870 * 12871 * If the device generates an RSCN, it will end up getting offlined when 12872 * it disappeared and a new LUN will get created when it is rediscovered 12873 * on the device. If we check for lun_cip here, the LUN will not end 12874 * up getting onlined since this function will end up returning a 12875 * FC_SUCCESS. 12876 * 12877 * The behavior is different on other devices. For instance, on a HDS, 12878 * there was no RSCN generated by the device but the next I/O generated 12879 * a check condition and rediscovery got triggered that way. So, in 12880 * such cases, this path will not be exercised 12881 */ 12882 if (pip == NULL) { 12883 FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf, 12884 fcp_trace, FCP_BUF_LEVEL_4, 0, 12885 "fcp_is_pip_present: plun->lun_cip is NULL: " 12886 "plun: %p lun state: %x num: %d target state: %x", 12887 plun, plun->lun_state, plun->lun_num, 12888 plun->lun_tgt->tgt_port->port_state); 12889 return (rval); 12890 } 12891 12892 fcp_wwn_to_ascii(plun->lun_tgt->tgt_port_wwn.raw_wwn, buf); 12893 12894 (void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num); 12895 12896 if (plun->lun_old_guid) { 12897 if (mdi_pi_find(pdip, plun->lun_old_guid, uaddr) == pip) { 12898 rval = FC_SUCCESS; 12899 } 12900 } else { 12901 if (mdi_pi_find(pdip, plun->lun_guid, uaddr) == pip) { 12902 rval = FC_SUCCESS; 12903 } 12904 } 12905 return (rval); 12906 } 12907 12908 static mdi_pathinfo_t * 12909 fcp_find_existing_pip(struct fcp_lun *plun, dev_info_t *pdip) 12910 { 12911 char buf[MAXNAMELEN]; 12912 char uaddr[MAXNAMELEN]; 12913 mdi_pathinfo_t *pip; 12914 struct fcp_tgt *ptgt = plun->lun_tgt; 12915 struct fcp_port *pptr = ptgt->tgt_port; 12916 12917 ASSERT(MUTEX_HELD(&pptr->port_mutex)); 12918 12919 fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, buf); 12920 (void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num); 12921 12922 pip = mdi_pi_find(pdip, plun->lun_guid, uaddr); 12923 12924 return (pip); 12925 } 12926 12927 12928 static int 12929 fcp_online_child(struct fcp_lun *plun, child_info_t *cip, int lcount, 12930 int tcount, int flags, int *circ) 12931 { 12932 int rval; 12933 struct fcp_port *pptr = plun->lun_tgt->tgt_port; 12934 struct fcp_tgt *ptgt = plun->lun_tgt; 12935 dev_info_t *cdip = NULL; 12936 12937 ASSERT(MUTEX_HELD(&pptr->port_mutex)); 12938 ASSERT(MUTEX_HELD(&plun->lun_mutex)); 12939 12940 if (plun->lun_cip == NULL) { 12941 FCP_TRACE(fcp_logq, pptr->port_instbuf, 12942 fcp_trace, FCP_BUF_LEVEL_3, 0, 12943 "fcp_online_child: plun->lun_cip is NULL: " 12944 "plun: %p state: %x num: %d target state: %x", 12945 plun, plun->lun_state, plun->lun_num, 12946 plun->lun_tgt->tgt_port->port_state); 12947 return (NDI_FAILURE); 12948 } 12949 again: 12950 if (plun->lun_mpxio == 0) { 12951 cdip = DIP(cip); 12952 mutex_exit(&plun->lun_mutex); 12953 mutex_exit(&pptr->port_mutex); 12954 12955 FCP_TRACE(fcp_logq, pptr->port_instbuf, 12956 fcp_trace, FCP_BUF_LEVEL_3, 0, 12957 "!Invoking ndi_devi_online for %s: target=%x lun=%x", 12958 ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num); 12959 12960 /* 12961 * We could check for FCP_LUN_INIT here but chances 12962 * of getting here when it's already in FCP_LUN_INIT 12963 * is rare and a duplicate ndi_devi_online wouldn't 12964 * hurt either (as the node would already have been 12965 * in CF2) 12966 */ 12967 if (!i_ddi_devi_attached(ddi_get_parent(cdip))) { 12968 rval = ndi_devi_bind_driver(cdip, flags); 12969 } else { 12970 rval = ndi_devi_online(cdip, flags); 12971 } 12972 /* 12973 * We log the message into trace buffer if the device 12974 * is "ses" and into syslog for any other device 12975 * type. This is to prevent the ndi_devi_online failure 12976 * message that appears for V880/A5K ses devices. 12977 */ 12978 if (rval == NDI_SUCCESS) { 12979 mutex_enter(&ptgt->tgt_mutex); 12980 plun->lun_state |= FCP_LUN_INIT; 12981 mutex_exit(&ptgt->tgt_mutex); 12982 } else if (strncmp(ddi_node_name(cdip), "ses", 3) != 0) { 12983 fcp_log(CE_NOTE, pptr->port_dip, 12984 "!ndi_devi_online:" 12985 " failed for %s: target=%x lun=%x %x", 12986 ddi_get_name(cdip), ptgt->tgt_d_id, 12987 plun->lun_num, rval); 12988 } else { 12989 FCP_TRACE(fcp_logq, pptr->port_instbuf, 12990 fcp_trace, FCP_BUF_LEVEL_3, 0, 12991 " !ndi_devi_online:" 12992 " failed for %s: target=%x lun=%x %x", 12993 ddi_get_name(cdip), ptgt->tgt_d_id, 12994 plun->lun_num, rval); 12995 } 12996 } else { 12997 cdip = mdi_pi_get_client(PIP(cip)); 12998 mutex_exit(&plun->lun_mutex); 12999 mutex_exit(&pptr->port_mutex); 13000 13001 FCP_TRACE(fcp_logq, pptr->port_instbuf, 13002 fcp_trace, FCP_BUF_LEVEL_3, 0, 13003 "!Invoking mdi_pi_online for %s: target=%x lun=%x", 13004 ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num); 13005 13006 /* 13007 * Hold path and exit phci to avoid deadlock with power 13008 * management code during mdi_pi_online. 13009 */ 13010 mdi_hold_path(PIP(cip)); 13011 mdi_devi_exit_phci(pptr->port_dip, *circ); 13012 13013 rval = mdi_pi_online(PIP(cip), flags); 13014 13015 mdi_devi_enter_phci(pptr->port_dip, circ); 13016 mdi_rele_path(PIP(cip)); 13017 13018 if (rval == MDI_SUCCESS) { 13019 mutex_enter(&ptgt->tgt_mutex); 13020 plun->lun_state |= FCP_LUN_INIT; 13021 mutex_exit(&ptgt->tgt_mutex); 13022 13023 /* 13024 * Clear MPxIO path permanent disable in case 13025 * fcp hotplug dropped the offline event. 13026 */ 13027 (void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE); 13028 13029 } else if (rval == MDI_NOT_SUPPORTED) { 13030 child_info_t *old_cip = cip; 13031 13032 /* 13033 * MPxIO does not support this device yet. 13034 * Enumerate in legacy mode. 13035 */ 13036 mutex_enter(&pptr->port_mutex); 13037 mutex_enter(&plun->lun_mutex); 13038 plun->lun_mpxio = 0; 13039 plun->lun_cip = NULL; 13040 cdip = fcp_create_dip(plun, lcount, tcount); 13041 plun->lun_cip = cip = CIP(cdip); 13042 if (cip == NULL) { 13043 fcp_log(CE_WARN, pptr->port_dip, 13044 "!fcp_online_child: " 13045 "Create devinfo failed for LU=%p", plun); 13046 mutex_exit(&plun->lun_mutex); 13047 13048 mutex_enter(&ptgt->tgt_mutex); 13049 plun->lun_state |= FCP_LUN_OFFLINE; 13050 mutex_exit(&ptgt->tgt_mutex); 13051 13052 mutex_exit(&pptr->port_mutex); 13053 13054 /* 13055 * free the mdi_pathinfo node 13056 */ 13057 (void) mdi_pi_free(PIP(old_cip), 0); 13058 } else { 13059 FCP_TRACE(fcp_logq, pptr->port_instbuf, 13060 fcp_trace, FCP_BUF_LEVEL_3, 0, 13061 "fcp_online_child: creating devinfo " 13062 "node 0x%p for plun 0x%p", 13063 cip, plun); 13064 mutex_exit(&plun->lun_mutex); 13065 mutex_exit(&pptr->port_mutex); 13066 /* 13067 * free the mdi_pathinfo node 13068 */ 13069 (void) mdi_pi_free(PIP(old_cip), 0); 13070 mutex_enter(&pptr->port_mutex); 13071 mutex_enter(&plun->lun_mutex); 13072 goto again; 13073 } 13074 } else { 13075 if (cdip) { 13076 fcp_log(CE_NOTE, pptr->port_dip, 13077 "!fcp_online_child: mdi_pi_online:" 13078 " failed for %s: target=%x lun=%x %x", 13079 ddi_get_name(cdip), ptgt->tgt_d_id, 13080 plun->lun_num, rval); 13081 } 13082 } 13083 rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE; 13084 } 13085 13086 if (rval == NDI_SUCCESS) { 13087 if (cdip) { 13088 (void) ndi_event_retrieve_cookie( 13089 pptr->port_ndi_event_hdl, cdip, FCAL_INSERT_EVENT, 13090 &fcp_insert_eid, NDI_EVENT_NOPASS); 13091 (void) ndi_event_run_callbacks(pptr->port_ndi_event_hdl, 13092 cdip, fcp_insert_eid, NULL); 13093 } 13094 } 13095 mutex_enter(&pptr->port_mutex); 13096 mutex_enter(&plun->lun_mutex); 13097 return (rval); 13098 } 13099 13100 /* ARGSUSED */ 13101 static int 13102 fcp_offline_child(struct fcp_lun *plun, child_info_t *cip, int lcount, 13103 int tcount, int flags, int *circ) 13104 { 13105 int rval; 13106 struct fcp_port *pptr = plun->lun_tgt->tgt_port; 13107 struct fcp_tgt *ptgt = plun->lun_tgt; 13108 dev_info_t *cdip; 13109 13110 ASSERT(MUTEX_HELD(&plun->lun_mutex)); 13111 ASSERT(MUTEX_HELD(&pptr->port_mutex)); 13112 13113 if (plun->lun_cip == NULL) { 13114 FCP_TRACE(fcp_logq, pptr->port_instbuf, 13115 fcp_trace, FCP_BUF_LEVEL_3, 0, 13116 "fcp_offline_child: plun->lun_cip is NULL: " 13117 "plun: %p lun state: %x num: %d target state: %x", 13118 plun, plun->lun_state, plun->lun_num, 13119 plun->lun_tgt->tgt_port->port_state); 13120 return (NDI_FAILURE); 13121 } 13122 13123 if (plun->lun_mpxio == 0) { 13124 cdip = DIP(cip); 13125 mutex_exit(&plun->lun_mutex); 13126 mutex_exit(&pptr->port_mutex); 13127 rval = ndi_devi_offline(DIP(cip), flags); 13128 if (rval != NDI_SUCCESS) { 13129 FCP_TRACE(fcp_logq, pptr->port_instbuf, 13130 fcp_trace, FCP_BUF_LEVEL_3, 0, 13131 "fcp_offline_child: ndi_devi_offline failed " 13132 "rval=%x cip=%p", rval, cip); 13133 } 13134 } else { 13135 cdip = mdi_pi_get_client(PIP(cip)); 13136 mutex_exit(&plun->lun_mutex); 13137 mutex_exit(&pptr->port_mutex); 13138 13139 /* 13140 * Exit phci to avoid deadlock with power management code 13141 * during mdi_pi_offline 13142 */ 13143 mdi_hold_path(PIP(cip)); 13144 mdi_devi_exit_phci(pptr->port_dip, *circ); 13145 13146 rval = mdi_pi_offline(PIP(cip), flags); 13147 13148 mdi_devi_enter_phci(pptr->port_dip, circ); 13149 mdi_rele_path(PIP(cip)); 13150 13151 if (rval == MDI_SUCCESS) { 13152 /* 13153 * Clear MPxIO path permanent disable as the path is 13154 * already offlined. 13155 */ 13156 (void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE); 13157 13158 if (flags & NDI_DEVI_REMOVE) { 13159 (void) mdi_pi_free(PIP(cip), 0); 13160 } 13161 } else { 13162 FCP_TRACE(fcp_logq, pptr->port_instbuf, 13163 fcp_trace, FCP_BUF_LEVEL_3, 0, 13164 "fcp_offline_child: mdi_pi_offline failed " 13165 "rval=%x cip=%p", rval, cip); 13166 } 13167 rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE; 13168 } 13169 13170 mutex_enter(&ptgt->tgt_mutex); 13171 plun->lun_state &= ~FCP_LUN_INIT; 13172 mutex_exit(&ptgt->tgt_mutex); 13173 13174 mutex_enter(&pptr->port_mutex); 13175 mutex_enter(&plun->lun_mutex); 13176 13177 if (rval == NDI_SUCCESS) { 13178 cdip = NULL; 13179 if (flags & NDI_DEVI_REMOVE) { 13180 /* 13181 * If the guid of the LUN changes, lun_cip will not 13182 * equal to cip, and after offlining the LUN with the 13183 * old guid, we should keep lun_cip since it's the cip 13184 * of the LUN with the new guid. 13185 * Otherwise remove our reference to child node. 13186 */ 13187 if (plun->lun_cip == cip) { 13188 plun->lun_cip = NULL; 13189 } 13190 if (plun->lun_old_guid) { 13191 kmem_free(plun->lun_old_guid, 13192 plun->lun_old_guid_size); 13193 plun->lun_old_guid = NULL; 13194 plun->lun_old_guid_size = 0; 13195 } 13196 } 13197 } 13198 13199 if (cdip) { 13200 FCP_TRACE(fcp_logq, pptr->port_instbuf, 13201 fcp_trace, FCP_BUF_LEVEL_3, 0, "!%s failed for %s:" 13202 " target=%x lun=%x", "ndi_offline", 13203 ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num); 13204 } 13205 13206 return (rval); 13207 } 13208 13209 static void 13210 fcp_remove_child(struct fcp_lun *plun) 13211 { 13212 ASSERT(MUTEX_HELD(&plun->lun_mutex)); 13213 13214 if (fcp_is_child_present(plun, plun->lun_cip) == FC_SUCCESS) { 13215 if (plun->lun_mpxio == 0) { 13216 (void) ndi_prop_remove_all(DIP(plun->lun_cip)); 13217 (void) ndi_devi_free(DIP(plun->lun_cip)); 13218 } else { 13219 mutex_exit(&plun->lun_mutex); 13220 mutex_exit(&plun->lun_tgt->tgt_mutex); 13221 mutex_exit(&plun->lun_tgt->tgt_port->port_mutex); 13222 FCP_TRACE(fcp_logq, 13223 plun->lun_tgt->tgt_port->port_instbuf, 13224 fcp_trace, FCP_BUF_LEVEL_3, 0, 13225 "lun=%p pip freed %p", plun, plun->lun_cip); 13226 (void) mdi_prop_remove(PIP(plun->lun_cip), NULL); 13227 (void) mdi_pi_free(PIP(plun->lun_cip), 0); 13228 mutex_enter(&plun->lun_tgt->tgt_port->port_mutex); 13229 mutex_enter(&plun->lun_tgt->tgt_mutex); 13230 mutex_enter(&plun->lun_mutex); 13231 } 13232 } 13233 13234 plun->lun_cip = NULL; 13235 } 13236 13237 /* 13238 * called when a timeout occurs 13239 * 13240 * can be scheduled during an attach or resume (if not already running) 13241 * 13242 * one timeout is set up for all ports 13243 * 13244 * acquires and releases the global mutex 13245 */ 13246 /*ARGSUSED*/ 13247 static void 13248 fcp_watch(void *arg) 13249 { 13250 struct fcp_port *pptr; 13251 struct fcp_ipkt *icmd; 13252 struct fcp_ipkt *nicmd; 13253 struct fcp_pkt *cmd; 13254 struct fcp_pkt *ncmd; 13255 struct fcp_pkt *tail; 13256 struct fcp_pkt *pcmd; 13257 struct fcp_pkt *save_head; 13258 struct fcp_port *save_port; 13259 13260 /* increment global watchdog time */ 13261 fcp_watchdog_time += fcp_watchdog_timeout; 13262 13263 mutex_enter(&fcp_global_mutex); 13264 13265 /* scan each port in our list */ 13266 for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) { 13267 save_port = fcp_port_head; 13268 pptr->port_state |= FCP_STATE_IN_WATCHDOG; 13269 mutex_exit(&fcp_global_mutex); 13270 13271 mutex_enter(&pptr->port_mutex); 13272 if (pptr->port_ipkt_list == NULL && 13273 (pptr->port_state & (FCP_STATE_SUSPENDED | 13274 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) { 13275 pptr->port_state &= ~FCP_STATE_IN_WATCHDOG; 13276 mutex_exit(&pptr->port_mutex); 13277 mutex_enter(&fcp_global_mutex); 13278 goto end_of_watchdog; 13279 } 13280 13281 /* 13282 * We check if a list of targets need to be offlined. 13283 */ 13284 if (pptr->port_offline_tgts) { 13285 fcp_scan_offline_tgts(pptr); 13286 } 13287 13288 /* 13289 * We check if a list of luns need to be offlined. 13290 */ 13291 if (pptr->port_offline_luns) { 13292 fcp_scan_offline_luns(pptr); 13293 } 13294 13295 /* 13296 * We check if a list of targets or luns need to be reset. 13297 */ 13298 if (pptr->port_reset_list) { 13299 fcp_check_reset_delay(pptr); 13300 } 13301 13302 mutex_exit(&pptr->port_mutex); 13303 13304 /* 13305 * This is where the pending commands (pkt) are checked for 13306 * timeout. 13307 */ 13308 mutex_enter(&pptr->port_pkt_mutex); 13309 tail = pptr->port_pkt_tail; 13310 13311 for (pcmd = NULL, cmd = pptr->port_pkt_head; 13312 cmd != NULL; cmd = ncmd) { 13313 ncmd = cmd->cmd_next; 13314 /* 13315 * If a command is in this queue the bit CFLAG_IN_QUEUE 13316 * must be set. 13317 */ 13318 ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE); 13319 /* 13320 * FCP_INVALID_TIMEOUT will be set for those 13321 * command that need to be failed. Mostly those 13322 * cmds that could not be queued down for the 13323 * "timeout" value. cmd->cmd_timeout is used 13324 * to try and requeue the command regularly. 13325 */ 13326 if (cmd->cmd_timeout >= fcp_watchdog_time) { 13327 /* 13328 * This command hasn't timed out yet. Let's 13329 * go to the next one. 13330 */ 13331 pcmd = cmd; 13332 goto end_of_loop; 13333 } 13334 13335 if (cmd == pptr->port_pkt_head) { 13336 ASSERT(pcmd == NULL); 13337 pptr->port_pkt_head = cmd->cmd_next; 13338 } else { 13339 ASSERT(pcmd != NULL); 13340 pcmd->cmd_next = cmd->cmd_next; 13341 } 13342 13343 if (cmd == pptr->port_pkt_tail) { 13344 ASSERT(cmd->cmd_next == NULL); 13345 pptr->port_pkt_tail = pcmd; 13346 if (pcmd) { 13347 pcmd->cmd_next = NULL; 13348 } 13349 } 13350 cmd->cmd_next = NULL; 13351 13352 /* 13353 * save the current head before dropping the 13354 * mutex - If the head doesn't remain the 13355 * same after re acquiring the mutex, just 13356 * bail out and revisit on next tick. 13357 * 13358 * PS: The tail pointer can change as the commands 13359 * get requeued after failure to retransport 13360 */ 13361 save_head = pptr->port_pkt_head; 13362 mutex_exit(&pptr->port_pkt_mutex); 13363 13364 if (cmd->cmd_fp_pkt->pkt_timeout == 13365 FCP_INVALID_TIMEOUT) { 13366 struct scsi_pkt *pkt = cmd->cmd_pkt; 13367 struct fcp_lun *plun; 13368 struct fcp_tgt *ptgt; 13369 13370 plun = ADDR2LUN(&pkt->pkt_address); 13371 ptgt = plun->lun_tgt; 13372 13373 FCP_TRACE(fcp_logq, pptr->port_instbuf, 13374 fcp_trace, FCP_BUF_LEVEL_2, 0, 13375 "SCSI cmd 0x%x to D_ID=%x timed out", 13376 pkt->pkt_cdbp[0], ptgt->tgt_d_id); 13377 13378 cmd->cmd_state == FCP_PKT_ABORTING ? 13379 fcp_fail_cmd(cmd, CMD_RESET, 13380 STAT_DEV_RESET) : fcp_fail_cmd(cmd, 13381 CMD_TIMEOUT, STAT_ABORTED); 13382 } else { 13383 fcp_retransport_cmd(pptr, cmd); 13384 } 13385 mutex_enter(&pptr->port_pkt_mutex); 13386 if (save_head && save_head != pptr->port_pkt_head) { 13387 /* 13388 * Looks like linked list got changed (mostly 13389 * happens when an an OFFLINE LUN code starts 13390 * returning overflow queue commands in 13391 * parallel. So bail out and revisit during 13392 * next tick 13393 */ 13394 break; 13395 } 13396 end_of_loop: 13397 /* 13398 * Scan only upto the previously known tail pointer 13399 * to avoid excessive processing - lots of new packets 13400 * could have been added to the tail or the old ones 13401 * re-queued. 13402 */ 13403 if (cmd == tail) { 13404 break; 13405 } 13406 } 13407 mutex_exit(&pptr->port_pkt_mutex); 13408 13409 mutex_enter(&pptr->port_mutex); 13410 for (icmd = pptr->port_ipkt_list; icmd != NULL; icmd = nicmd) { 13411 struct fcp_tgt *ptgt = icmd->ipkt_tgt; 13412 13413 nicmd = icmd->ipkt_next; 13414 if ((icmd->ipkt_restart != 0) && 13415 (icmd->ipkt_restart >= fcp_watchdog_time)) { 13416 /* packet has not timed out */ 13417 continue; 13418 } 13419 13420 /* time for packet re-transport */ 13421 if (icmd == pptr->port_ipkt_list) { 13422 pptr->port_ipkt_list = icmd->ipkt_next; 13423 if (pptr->port_ipkt_list) { 13424 pptr->port_ipkt_list->ipkt_prev = 13425 NULL; 13426 } 13427 } else { 13428 icmd->ipkt_prev->ipkt_next = icmd->ipkt_next; 13429 if (icmd->ipkt_next) { 13430 icmd->ipkt_next->ipkt_prev = 13431 icmd->ipkt_prev; 13432 } 13433 } 13434 icmd->ipkt_next = NULL; 13435 icmd->ipkt_prev = NULL; 13436 mutex_exit(&pptr->port_mutex); 13437 13438 if (fcp_is_retryable(icmd)) { 13439 fc_ulp_rscn_info_t *rscnp = 13440 (fc_ulp_rscn_info_t *)icmd->ipkt_fpkt-> 13441 pkt_ulp_rscn_infop; 13442 13443 FCP_TRACE(fcp_logq, pptr->port_instbuf, 13444 fcp_trace, FCP_BUF_LEVEL_2, 0, 13445 "%x to D_ID=%x Retrying..", 13446 icmd->ipkt_opcode, 13447 icmd->ipkt_fpkt->pkt_cmd_fhdr.d_id); 13448 13449 /* 13450 * Update the RSCN count in the packet 13451 * before resending. 13452 */ 13453 13454 if (rscnp != NULL) { 13455 rscnp->ulp_rscn_count = 13456 fc_ulp_get_rscn_count(pptr-> 13457 port_fp_handle); 13458 } 13459 13460 mutex_enter(&pptr->port_mutex); 13461 mutex_enter(&ptgt->tgt_mutex); 13462 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) { 13463 mutex_exit(&ptgt->tgt_mutex); 13464 mutex_exit(&pptr->port_mutex); 13465 switch (icmd->ipkt_opcode) { 13466 int rval; 13467 case LA_ELS_PLOGI: 13468 if ((rval = fc_ulp_login( 13469 pptr->port_fp_handle, 13470 &icmd->ipkt_fpkt, 1)) == 13471 FC_SUCCESS) { 13472 mutex_enter( 13473 &pptr->port_mutex); 13474 continue; 13475 } 13476 if (fcp_handle_ipkt_errors( 13477 pptr, ptgt, icmd, rval, 13478 "PLOGI") == DDI_SUCCESS) { 13479 mutex_enter( 13480 &pptr->port_mutex); 13481 continue; 13482 } 13483 break; 13484 13485 case LA_ELS_PRLI: 13486 if ((rval = fc_ulp_issue_els( 13487 pptr->port_fp_handle, 13488 icmd->ipkt_fpkt)) == 13489 FC_SUCCESS) { 13490 mutex_enter( 13491 &pptr->port_mutex); 13492 continue; 13493 } 13494 if (fcp_handle_ipkt_errors( 13495 pptr, ptgt, icmd, rval, 13496 "PRLI") == DDI_SUCCESS) { 13497 mutex_enter( 13498 &pptr->port_mutex); 13499 continue; 13500 } 13501 break; 13502 13503 default: 13504 if ((rval = fcp_transport( 13505 pptr->port_fp_handle, 13506 icmd->ipkt_fpkt, 1)) == 13507 FC_SUCCESS) { 13508 mutex_enter( 13509 &pptr->port_mutex); 13510 continue; 13511 } 13512 if (fcp_handle_ipkt_errors( 13513 pptr, ptgt, icmd, rval, 13514 "PRLI") == DDI_SUCCESS) { 13515 mutex_enter( 13516 &pptr->port_mutex); 13517 continue; 13518 } 13519 break; 13520 } 13521 } else { 13522 mutex_exit(&ptgt->tgt_mutex); 13523 mutex_exit(&pptr->port_mutex); 13524 } 13525 } else { 13526 fcp_print_error(icmd->ipkt_fpkt); 13527 } 13528 13529 (void) fcp_call_finish_init(pptr, ptgt, 13530 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt, 13531 icmd->ipkt_cause); 13532 fcp_icmd_free(pptr, icmd); 13533 mutex_enter(&pptr->port_mutex); 13534 } 13535 13536 pptr->port_state &= ~FCP_STATE_IN_WATCHDOG; 13537 mutex_exit(&pptr->port_mutex); 13538 mutex_enter(&fcp_global_mutex); 13539 13540 end_of_watchdog: 13541 /* 13542 * Bail out early before getting into trouble 13543 */ 13544 if (save_port != fcp_port_head) { 13545 break; 13546 } 13547 } 13548 13549 if (fcp_watchdog_init > 0) { 13550 /* reschedule timeout to go again */ 13551 fcp_watchdog_id = 13552 timeout(fcp_watch, NULL, fcp_watchdog_tick); 13553 } 13554 mutex_exit(&fcp_global_mutex); 13555 } 13556 13557 13558 static void 13559 fcp_check_reset_delay(struct fcp_port *pptr) 13560 { 13561 uint32_t tgt_cnt; 13562 int level; 13563 struct fcp_tgt *ptgt; 13564 struct fcp_lun *plun; 13565 struct fcp_reset_elem *cur = NULL; 13566 struct fcp_reset_elem *next = NULL; 13567 struct fcp_reset_elem *prev = NULL; 13568 13569 ASSERT(mutex_owned(&pptr->port_mutex)); 13570 13571 next = pptr->port_reset_list; 13572 while ((cur = next) != NULL) { 13573 next = cur->next; 13574 13575 if (cur->timeout < fcp_watchdog_time) { 13576 prev = cur; 13577 continue; 13578 } 13579 13580 ptgt = cur->tgt; 13581 plun = cur->lun; 13582 tgt_cnt = cur->tgt_cnt; 13583 13584 if (ptgt) { 13585 level = RESET_TARGET; 13586 } else { 13587 ASSERT(plun != NULL); 13588 level = RESET_LUN; 13589 ptgt = plun->lun_tgt; 13590 } 13591 if (prev) { 13592 prev->next = next; 13593 } else { 13594 /* 13595 * Because we drop port mutex while doing aborts for 13596 * packets, we can't rely on reset_list pointing to 13597 * our head 13598 */ 13599 if (cur == pptr->port_reset_list) { 13600 pptr->port_reset_list = next; 13601 } else { 13602 struct fcp_reset_elem *which; 13603 13604 which = pptr->port_reset_list; 13605 while (which && which->next != cur) { 13606 which = which->next; 13607 } 13608 ASSERT(which != NULL); 13609 13610 which->next = next; 13611 prev = which; 13612 } 13613 } 13614 13615 kmem_free(cur, sizeof (*cur)); 13616 13617 if (tgt_cnt == ptgt->tgt_change_cnt) { 13618 mutex_enter(&ptgt->tgt_mutex); 13619 if (level == RESET_TARGET) { 13620 fcp_update_tgt_state(ptgt, 13621 FCP_RESET, FCP_LUN_BUSY); 13622 } else { 13623 fcp_update_lun_state(plun, 13624 FCP_RESET, FCP_LUN_BUSY); 13625 } 13626 mutex_exit(&ptgt->tgt_mutex); 13627 13628 mutex_exit(&pptr->port_mutex); 13629 fcp_abort_all(pptr, ptgt, plun, tgt_cnt); 13630 mutex_enter(&pptr->port_mutex); 13631 } 13632 } 13633 } 13634 13635 13636 static void 13637 fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt, 13638 struct fcp_lun *rlun, int tgt_cnt) 13639 { 13640 int rval; 13641 struct fcp_lun *tlun, *nlun; 13642 struct fcp_pkt *pcmd = NULL, *ncmd = NULL, 13643 *cmd = NULL, *head = NULL, 13644 *tail = NULL; 13645 13646 mutex_enter(&pptr->port_pkt_mutex); 13647 for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) { 13648 struct fcp_lun *plun = ADDR2LUN(&cmd->cmd_pkt->pkt_address); 13649 struct fcp_tgt *ptgt = plun->lun_tgt; 13650 13651 ncmd = cmd->cmd_next; 13652 13653 if (ptgt != ttgt && plun != rlun) { 13654 pcmd = cmd; 13655 continue; 13656 } 13657 13658 if (pcmd != NULL) { 13659 ASSERT(pptr->port_pkt_head != cmd); 13660 pcmd->cmd_next = ncmd; 13661 } else { 13662 ASSERT(cmd == pptr->port_pkt_head); 13663 pptr->port_pkt_head = ncmd; 13664 } 13665 if (pptr->port_pkt_tail == cmd) { 13666 ASSERT(cmd->cmd_next == NULL); 13667 pptr->port_pkt_tail = pcmd; 13668 if (pcmd != NULL) { 13669 pcmd->cmd_next = NULL; 13670 } 13671 } 13672 13673 if (head == NULL) { 13674 head = tail = cmd; 13675 } else { 13676 ASSERT(tail != NULL); 13677 tail->cmd_next = cmd; 13678 tail = cmd; 13679 } 13680 cmd->cmd_next = NULL; 13681 } 13682 mutex_exit(&pptr->port_pkt_mutex); 13683 13684 for (cmd = head; cmd != NULL; cmd = ncmd) { 13685 struct scsi_pkt *pkt = cmd->cmd_pkt; 13686 13687 ncmd = cmd->cmd_next; 13688 ASSERT(pkt != NULL); 13689 13690 mutex_enter(&pptr->port_mutex); 13691 if (ttgt->tgt_change_cnt == tgt_cnt) { 13692 mutex_exit(&pptr->port_mutex); 13693 cmd->cmd_flags &= ~CFLAG_IN_QUEUE; 13694 pkt->pkt_reason = CMD_RESET; 13695 pkt->pkt_statistics |= STAT_DEV_RESET; 13696 cmd->cmd_state = FCP_PKT_IDLE; 13697 fcp_post_callback(cmd); 13698 } else { 13699 mutex_exit(&pptr->port_mutex); 13700 } 13701 } 13702 13703 /* 13704 * If the FCA will return all the commands in its queue then our 13705 * work is easy, just return. 13706 */ 13707 13708 if (pptr->port_reset_action == FC_RESET_RETURN_ALL) { 13709 return; 13710 } 13711 13712 /* 13713 * For RESET_LUN get hold of target pointer 13714 */ 13715 if (ttgt == NULL) { 13716 ASSERT(rlun != NULL); 13717 13718 ttgt = rlun->lun_tgt; 13719 13720 ASSERT(ttgt != NULL); 13721 } 13722 13723 /* 13724 * There are some severe race conditions here. 13725 * While we are trying to abort the pkt, it might be completing 13726 * so mark it aborted and if the abort does not succeed then 13727 * handle it in the watch thread. 13728 */ 13729 mutex_enter(&ttgt->tgt_mutex); 13730 nlun = ttgt->tgt_lun; 13731 mutex_exit(&ttgt->tgt_mutex); 13732 while ((tlun = nlun) != NULL) { 13733 int restart = 0; 13734 if (rlun && rlun != tlun) { 13735 mutex_enter(&ttgt->tgt_mutex); 13736 nlun = tlun->lun_next; 13737 mutex_exit(&ttgt->tgt_mutex); 13738 continue; 13739 } 13740 mutex_enter(&tlun->lun_mutex); 13741 cmd = tlun->lun_pkt_head; 13742 while (cmd != NULL) { 13743 if (cmd->cmd_state == FCP_PKT_ISSUED) { 13744 struct scsi_pkt *pkt; 13745 13746 restart = 1; 13747 cmd->cmd_state = FCP_PKT_ABORTING; 13748 mutex_exit(&tlun->lun_mutex); 13749 rval = fc_ulp_abort(pptr->port_fp_handle, 13750 cmd->cmd_fp_pkt, KM_SLEEP); 13751 if (rval == FC_SUCCESS) { 13752 pkt = cmd->cmd_pkt; 13753 pkt->pkt_reason = CMD_RESET; 13754 pkt->pkt_statistics |= STAT_DEV_RESET; 13755 cmd->cmd_state = FCP_PKT_IDLE; 13756 fcp_post_callback(cmd); 13757 } else { 13758 caddr_t msg; 13759 13760 (void) fc_ulp_error(rval, &msg); 13761 13762 /* 13763 * This part is tricky. The abort 13764 * failed and now the command could 13765 * be completing. The cmd_state == 13766 * FCP_PKT_ABORTING should save 13767 * us in fcp_cmd_callback. If we 13768 * are already aborting ignore the 13769 * command in fcp_cmd_callback. 13770 * Here we leave this packet for 20 13771 * sec to be aborted in the 13772 * fcp_watch thread. 13773 */ 13774 fcp_log(CE_WARN, pptr->port_dip, 13775 "!Abort failed after reset %s", 13776 msg); 13777 13778 cmd->cmd_timeout = 13779 fcp_watchdog_time + 13780 cmd->cmd_pkt->pkt_time + 13781 FCP_FAILED_DELAY; 13782 13783 cmd->cmd_fp_pkt->pkt_timeout = 13784 FCP_INVALID_TIMEOUT; 13785 /* 13786 * This is a hack, cmd is put in the 13787 * overflow queue so that it can be 13788 * timed out finally 13789 */ 13790 cmd->cmd_flags |= CFLAG_IN_QUEUE; 13791 13792 mutex_enter(&pptr->port_pkt_mutex); 13793 if (pptr->port_pkt_head) { 13794 ASSERT(pptr->port_pkt_tail 13795 != NULL); 13796 pptr->port_pkt_tail->cmd_next 13797 = cmd; 13798 pptr->port_pkt_tail = cmd; 13799 } else { 13800 ASSERT(pptr->port_pkt_tail 13801 == NULL); 13802 pptr->port_pkt_head = 13803 pptr->port_pkt_tail 13804 = cmd; 13805 } 13806 cmd->cmd_next = NULL; 13807 mutex_exit(&pptr->port_pkt_mutex); 13808 } 13809 mutex_enter(&tlun->lun_mutex); 13810 cmd = tlun->lun_pkt_head; 13811 } else { 13812 cmd = cmd->cmd_forw; 13813 } 13814 } 13815 mutex_exit(&tlun->lun_mutex); 13816 13817 mutex_enter(&ttgt->tgt_mutex); 13818 restart == 1 ? (nlun = ttgt->tgt_lun) : (nlun = tlun->lun_next); 13819 mutex_exit(&ttgt->tgt_mutex); 13820 13821 mutex_enter(&pptr->port_mutex); 13822 if (tgt_cnt != ttgt->tgt_change_cnt) { 13823 mutex_exit(&pptr->port_mutex); 13824 return; 13825 } else { 13826 mutex_exit(&pptr->port_mutex); 13827 } 13828 } 13829 } 13830 13831 13832 /* 13833 * unlink the soft state, returning the soft state found (if any) 13834 * 13835 * acquires and releases the global mutex 13836 */ 13837 struct fcp_port * 13838 fcp_soft_state_unlink(struct fcp_port *pptr) 13839 { 13840 struct fcp_port *hptr; /* ptr index */ 13841 struct fcp_port *tptr; /* prev hptr */ 13842 13843 mutex_enter(&fcp_global_mutex); 13844 for (hptr = fcp_port_head, tptr = NULL; 13845 hptr != NULL; 13846 tptr = hptr, hptr = hptr->port_next) { 13847 if (hptr == pptr) { 13848 /* we found a match -- remove this item */ 13849 if (tptr == NULL) { 13850 /* we're at the head of the list */ 13851 fcp_port_head = hptr->port_next; 13852 } else { 13853 tptr->port_next = hptr->port_next; 13854 } 13855 break; /* success */ 13856 } 13857 } 13858 if (fcp_port_head == NULL) { 13859 fcp_cleanup_blacklist(&fcp_lun_blacklist); 13860 } 13861 mutex_exit(&fcp_global_mutex); 13862 return (hptr); 13863 } 13864 13865 13866 /* 13867 * called by fcp_scsi_hba_tgt_init to find a LUN given a 13868 * WWN and a LUN number 13869 */ 13870 /* ARGSUSED */ 13871 static struct fcp_lun * 13872 fcp_lookup_lun(struct fcp_port *pptr, uchar_t *wwn, uint16_t lun) 13873 { 13874 int hash; 13875 struct fcp_tgt *ptgt; 13876 struct fcp_lun *plun; 13877 13878 ASSERT(mutex_owned(&pptr->port_mutex)); 13879 13880 hash = FCP_HASH(wwn); 13881 for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL; 13882 ptgt = ptgt->tgt_next) { 13883 if (bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0], 13884 sizeof (ptgt->tgt_port_wwn)) == 0) { 13885 mutex_enter(&ptgt->tgt_mutex); 13886 for (plun = ptgt->tgt_lun; 13887 plun != NULL; 13888 plun = plun->lun_next) { 13889 if (plun->lun_num == lun) { 13890 mutex_exit(&ptgt->tgt_mutex); 13891 return (plun); 13892 } 13893 } 13894 mutex_exit(&ptgt->tgt_mutex); 13895 return (NULL); 13896 } 13897 } 13898 return (NULL); 13899 } 13900 13901 /* 13902 * Function: fcp_prepare_pkt 13903 * 13904 * Description: This function prepares the SCSI cmd pkt, passed by the caller, 13905 * for fcp_start(). It binds the data or partially maps it. 13906 * Builds the FCP header and starts the initialization of the 13907 * Fibre Channel header. 13908 * 13909 * Argument: *pptr FCP port. 13910 * *cmd FCP packet. 13911 * *plun LUN the command will be sent to. 13912 * 13913 * Context: User, Kernel and Interrupt context. 13914 */ 13915 static void 13916 fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd, 13917 struct fcp_lun *plun) 13918 { 13919 fc_packet_t *fpkt = cmd->cmd_fp_pkt; 13920 struct fcp_tgt *ptgt = plun->lun_tgt; 13921 struct fcp_cmd *fcmd = &cmd->cmd_fcp_cmd; 13922 13923 ASSERT(cmd->cmd_pkt->pkt_comp || 13924 (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR)); 13925 13926 if (cmd->cmd_pkt->pkt_numcookies) { 13927 if (cmd->cmd_pkt->pkt_dma_flags & DDI_DMA_READ) { 13928 fcmd->fcp_cntl.cntl_read_data = 1; 13929 fcmd->fcp_cntl.cntl_write_data = 0; 13930 fpkt->pkt_tran_type = FC_PKT_FCP_READ; 13931 } else { 13932 fcmd->fcp_cntl.cntl_read_data = 0; 13933 fcmd->fcp_cntl.cntl_write_data = 1; 13934 fpkt->pkt_tran_type = FC_PKT_FCP_WRITE; 13935 } 13936 13937 fpkt->pkt_data_cookie = cmd->cmd_pkt->pkt_cookies; 13938 13939 fpkt->pkt_data_cookie_cnt = cmd->cmd_pkt->pkt_numcookies; 13940 ASSERT(fpkt->pkt_data_cookie_cnt <= 13941 pptr->port_data_dma_attr.dma_attr_sgllen); 13942 13943 cmd->cmd_dmacount = cmd->cmd_pkt->pkt_dma_len; 13944 13945 /* FCA needs pkt_datalen to be set */ 13946 fpkt->pkt_datalen = cmd->cmd_dmacount; 13947 fcmd->fcp_data_len = cmd->cmd_dmacount; 13948 } else { 13949 fcmd->fcp_cntl.cntl_read_data = 0; 13950 fcmd->fcp_cntl.cntl_write_data = 0; 13951 fpkt->pkt_tran_type = FC_PKT_EXCHANGE; 13952 fpkt->pkt_datalen = 0; 13953 fcmd->fcp_data_len = 0; 13954 } 13955 13956 /* set up the Tagged Queuing type */ 13957 if (cmd->cmd_pkt->pkt_flags & FLAG_HTAG) { 13958 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q; 13959 } else if (cmd->cmd_pkt->pkt_flags & FLAG_OTAG) { 13960 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED; 13961 } else if (cmd->cmd_pkt->pkt_flags & FLAG_STAG) { 13962 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE; 13963 } else { 13964 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED; 13965 } 13966 13967 fcmd->fcp_ent_addr = plun->lun_addr; 13968 13969 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) { 13970 FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd, 13971 fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd)); 13972 } else { 13973 ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_resp_dma == NULL); 13974 } 13975 13976 cmd->cmd_pkt->pkt_reason = CMD_CMPLT; 13977 cmd->cmd_pkt->pkt_state = 0; 13978 cmd->cmd_pkt->pkt_statistics = 0; 13979 cmd->cmd_pkt->pkt_resid = 0; 13980 13981 cmd->cmd_fp_pkt->pkt_data_dma = cmd->cmd_pkt->pkt_handle; 13982 13983 if (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) { 13984 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_NO_INTR); 13985 fpkt->pkt_comp = NULL; 13986 } else { 13987 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR); 13988 if (cmd->cmd_pkt->pkt_flags & FLAG_IMMEDIATE_CB) { 13989 fpkt->pkt_tran_flags |= FC_TRAN_IMMEDIATE_CB; 13990 } 13991 fpkt->pkt_comp = fcp_cmd_callback; 13992 } 13993 13994 mutex_enter(&pptr->port_mutex); 13995 if (pptr->port_state & FCP_STATE_SUSPENDED) { 13996 fpkt->pkt_tran_flags |= FC_TRAN_DUMPING; 13997 } 13998 mutex_exit(&pptr->port_mutex); 13999 14000 fpkt->pkt_cmd_fhdr.d_id = ptgt->tgt_d_id; 14001 fpkt->pkt_cmd_fhdr.s_id = pptr->port_id; 14002 14003 /* 14004 * Save a few kernel cycles here 14005 */ 14006 #ifndef __lock_lint 14007 fpkt->pkt_fca_device = ptgt->tgt_fca_dev; 14008 #endif /* __lock_lint */ 14009 } 14010 14011 static void 14012 fcp_post_callback(struct fcp_pkt *cmd) 14013 { 14014 if (cmd->cmd_pkt->pkt_comp) { 14015 (*cmd->cmd_pkt->pkt_comp) (cmd->cmd_pkt); 14016 } 14017 } 14018 14019 14020 /* 14021 * called to do polled I/O by fcp_start() 14022 * 14023 * return a transport status value, i.e. TRAN_ACCECPT for success 14024 */ 14025 static int 14026 fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd) 14027 { 14028 int rval; 14029 14030 #ifdef DEBUG 14031 mutex_enter(&pptr->port_pkt_mutex); 14032 pptr->port_npkts++; 14033 mutex_exit(&pptr->port_pkt_mutex); 14034 #endif /* DEBUG */ 14035 14036 if (cmd->cmd_fp_pkt->pkt_timeout) { 14037 cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time; 14038 } else { 14039 cmd->cmd_fp_pkt->pkt_timeout = FCP_POLL_TIMEOUT; 14040 } 14041 14042 ASSERT(cmd->cmd_fp_pkt->pkt_comp == NULL); 14043 14044 cmd->cmd_state = FCP_PKT_ISSUED; 14045 14046 rval = fc_ulp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt); 14047 14048 #ifdef DEBUG 14049 mutex_enter(&pptr->port_pkt_mutex); 14050 pptr->port_npkts--; 14051 mutex_exit(&pptr->port_pkt_mutex); 14052 #endif /* DEBUG */ 14053 14054 cmd->cmd_state = FCP_PKT_IDLE; 14055 14056 switch (rval) { 14057 case FC_SUCCESS: 14058 if (cmd->cmd_fp_pkt->pkt_state == FC_PKT_SUCCESS) { 14059 fcp_complete_pkt(cmd->cmd_fp_pkt); 14060 rval = TRAN_ACCEPT; 14061 } else { 14062 rval = TRAN_FATAL_ERROR; 14063 } 14064 break; 14065 14066 case FC_TRAN_BUSY: 14067 rval = TRAN_BUSY; 14068 cmd->cmd_pkt->pkt_resid = 0; 14069 break; 14070 14071 case FC_BADPACKET: 14072 rval = TRAN_BADPKT; 14073 break; 14074 14075 default: 14076 rval = TRAN_FATAL_ERROR; 14077 break; 14078 } 14079 14080 return (rval); 14081 } 14082 14083 14084 /* 14085 * called by some of the following transport-called routines to convert 14086 * a supplied dip ptr to a port struct ptr (i.e. to the soft state) 14087 */ 14088 static struct fcp_port * 14089 fcp_dip2port(dev_info_t *dip) 14090 { 14091 int instance; 14092 14093 instance = ddi_get_instance(dip); 14094 return (ddi_get_soft_state(fcp_softstate, instance)); 14095 } 14096 14097 14098 /* 14099 * called internally to return a LUN given a dip 14100 */ 14101 struct fcp_lun * 14102 fcp_get_lun_from_cip(struct fcp_port *pptr, child_info_t *cip) 14103 { 14104 struct fcp_tgt *ptgt; 14105 struct fcp_lun *plun; 14106 int i; 14107 14108 14109 ASSERT(mutex_owned(&pptr->port_mutex)); 14110 14111 for (i = 0; i < FCP_NUM_HASH; i++) { 14112 for (ptgt = pptr->port_tgt_hash_table[i]; 14113 ptgt != NULL; 14114 ptgt = ptgt->tgt_next) { 14115 mutex_enter(&ptgt->tgt_mutex); 14116 for (plun = ptgt->tgt_lun; plun != NULL; 14117 plun = plun->lun_next) { 14118 mutex_enter(&plun->lun_mutex); 14119 if (plun->lun_cip == cip) { 14120 mutex_exit(&plun->lun_mutex); 14121 mutex_exit(&ptgt->tgt_mutex); 14122 return (plun); /* match found */ 14123 } 14124 mutex_exit(&plun->lun_mutex); 14125 } 14126 mutex_exit(&ptgt->tgt_mutex); 14127 } 14128 } 14129 return (NULL); /* no LUN found */ 14130 } 14131 14132 /* 14133 * pass an element to the hotplug list, kick the hotplug thread 14134 * and wait for the element to get processed by the hotplug thread. 14135 * on return the element is freed. 14136 * 14137 * return zero success and non-zero on failure 14138 * 14139 * acquires/releases the target mutex 14140 * 14141 */ 14142 static int 14143 fcp_pass_to_hp_and_wait(struct fcp_port *pptr, struct fcp_lun *plun, 14144 child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags) 14145 { 14146 struct fcp_hp_elem *elem; 14147 int rval; 14148 14149 mutex_enter(&plun->lun_tgt->tgt_mutex); 14150 if ((elem = fcp_pass_to_hp(pptr, plun, cip, 14151 what, link_cnt, tgt_cnt, flags, 1)) == NULL) { 14152 mutex_exit(&plun->lun_tgt->tgt_mutex); 14153 fcp_log(CE_CONT, pptr->port_dip, 14154 "Can not pass_to_hp: what: %d; D_ID=%x, LUN=%x\n", 14155 what, plun->lun_tgt->tgt_d_id, plun->lun_num); 14156 return (NDI_FAILURE); 14157 } 14158 mutex_exit(&plun->lun_tgt->tgt_mutex); 14159 mutex_enter(&elem->mutex); 14160 if (elem->wait) { 14161 while (elem->wait) { 14162 cv_wait(&elem->cv, &elem->mutex); 14163 } 14164 } 14165 rval = (elem->result); 14166 mutex_exit(&elem->mutex); 14167 mutex_destroy(&elem->mutex); 14168 cv_destroy(&elem->cv); 14169 kmem_free(elem, sizeof (struct fcp_hp_elem)); 14170 return (rval); 14171 } 14172 14173 /* 14174 * pass an element to the hotplug list, and then 14175 * kick the hotplug thread 14176 * 14177 * return Boolean success, i.e. non-zero if all goes well, else zero on error 14178 * 14179 * acquires/releases the hotplug mutex 14180 * 14181 * called with the target mutex owned 14182 * 14183 * memory acquired in NOSLEEP mode 14184 * NOTE: if wait is set to 1 then the caller is responsible for waiting on 14185 * for the hp daemon to process the request and is responsible for 14186 * freeing the element 14187 */ 14188 static struct fcp_hp_elem * 14189 fcp_pass_to_hp(struct fcp_port *pptr, struct fcp_lun *plun, 14190 child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags, int wait) 14191 { 14192 struct fcp_hp_elem *elem; 14193 dev_info_t *pdip; 14194 14195 ASSERT(pptr != NULL); 14196 ASSERT(plun != NULL); 14197 ASSERT(plun->lun_tgt != NULL); 14198 ASSERT(mutex_owned(&plun->lun_tgt->tgt_mutex)); 14199 14200 /* create space for a hotplug element */ 14201 if ((elem = kmem_zalloc(sizeof (struct fcp_hp_elem), KM_NOSLEEP)) 14202 == NULL) { 14203 fcp_log(CE_WARN, NULL, 14204 "!can't allocate memory for hotplug element"); 14205 return (NULL); 14206 } 14207 14208 /* fill in hotplug element */ 14209 elem->port = pptr; 14210 elem->lun = plun; 14211 elem->cip = cip; 14212 elem->what = what; 14213 elem->flags = flags; 14214 elem->link_cnt = link_cnt; 14215 elem->tgt_cnt = tgt_cnt; 14216 elem->wait = wait; 14217 mutex_init(&elem->mutex, NULL, MUTEX_DRIVER, NULL); 14218 cv_init(&elem->cv, NULL, CV_DRIVER, NULL); 14219 14220 /* schedule the hotplug task */ 14221 pdip = pptr->port_dip; 14222 mutex_enter(&plun->lun_mutex); 14223 if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) { 14224 plun->lun_event_count++; 14225 elem->event_cnt = plun->lun_event_count; 14226 } 14227 mutex_exit(&plun->lun_mutex); 14228 if (taskq_dispatch(DEVI(pdip)->devi_taskq, fcp_hp_task, 14229 (void *)elem, KM_NOSLEEP) == NULL) { 14230 mutex_enter(&plun->lun_mutex); 14231 if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) { 14232 plun->lun_event_count--; 14233 } 14234 mutex_exit(&plun->lun_mutex); 14235 kmem_free(elem, sizeof (*elem)); 14236 return (0); 14237 } 14238 14239 return (elem); 14240 } 14241 14242 14243 static void 14244 fcp_retransport_cmd(struct fcp_port *pptr, struct fcp_pkt *cmd) 14245 { 14246 int rval; 14247 struct scsi_address *ap; 14248 struct fcp_lun *plun; 14249 struct fcp_tgt *ptgt; 14250 fc_packet_t *fpkt; 14251 14252 ap = &cmd->cmd_pkt->pkt_address; 14253 plun = ADDR2LUN(ap); 14254 ptgt = plun->lun_tgt; 14255 14256 ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE); 14257 14258 cmd->cmd_state = FCP_PKT_IDLE; 14259 14260 mutex_enter(&pptr->port_mutex); 14261 mutex_enter(&ptgt->tgt_mutex); 14262 if (((plun->lun_state & (FCP_LUN_BUSY | FCP_LUN_OFFLINE)) == 0) && 14263 (!(pptr->port_state & FCP_STATE_ONLINING))) { 14264 fc_ulp_rscn_info_t *rscnp; 14265 14266 cmd->cmd_state = FCP_PKT_ISSUED; 14267 14268 /* 14269 * It is possible for pkt_pd to be NULL if tgt_pd_handle was 14270 * originally NULL, hence we try to set it to the pd pointed 14271 * to by the SCSI device we're trying to get to. 14272 */ 14273 14274 fpkt = cmd->cmd_fp_pkt; 14275 if ((fpkt->pkt_pd == NULL) && (ptgt->tgt_pd_handle != NULL)) { 14276 fpkt->pkt_pd = ptgt->tgt_pd_handle; 14277 /* 14278 * We need to notify the transport that we now have a 14279 * reference to the remote port handle. 14280 */ 14281 fc_ulp_hold_remote_port(ptgt->tgt_pd_handle); 14282 } 14283 14284 mutex_exit(&ptgt->tgt_mutex); 14285 mutex_exit(&pptr->port_mutex); 14286 14287 ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) == 0); 14288 14289 /* prepare the packet */ 14290 14291 fcp_prepare_pkt(pptr, cmd, plun); 14292 14293 rscnp = (fc_ulp_rscn_info_t *)cmd->cmd_fp_pkt-> 14294 pkt_ulp_rscn_infop; 14295 14296 cmd->cmd_timeout = cmd->cmd_pkt->pkt_time ? 14297 fcp_watchdog_time + cmd->cmd_pkt->pkt_time : 0; 14298 14299 if (rscnp != NULL) { 14300 rscnp->ulp_rscn_count = 14301 fc_ulp_get_rscn_count(pptr-> 14302 port_fp_handle); 14303 } 14304 14305 rval = fcp_transport(pptr->port_fp_handle, 14306 cmd->cmd_fp_pkt, 0); 14307 14308 if (rval == FC_SUCCESS) { 14309 return; 14310 } 14311 cmd->cmd_state &= ~FCP_PKT_ISSUED; 14312 } else { 14313 mutex_exit(&ptgt->tgt_mutex); 14314 mutex_exit(&pptr->port_mutex); 14315 } 14316 14317 fcp_queue_pkt(pptr, cmd); 14318 } 14319 14320 14321 static void 14322 fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason, uint_t statistics) 14323 { 14324 ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE); 14325 14326 cmd->cmd_flags &= ~CFLAG_IN_QUEUE; 14327 cmd->cmd_state = FCP_PKT_IDLE; 14328 14329 cmd->cmd_pkt->pkt_reason = reason; 14330 cmd->cmd_pkt->pkt_state = 0; 14331 cmd->cmd_pkt->pkt_statistics = statistics; 14332 14333 fcp_post_callback(cmd); 14334 } 14335 14336 /* 14337 * Function: fcp_queue_pkt 14338 * 14339 * Description: This function queues the packet passed by the caller into 14340 * the list of packets of the FCP port. 14341 * 14342 * Argument: *pptr FCP port. 14343 * *cmd FCP packet to queue. 14344 * 14345 * Return Value: None 14346 * 14347 * Context: User, Kernel and Interrupt context. 14348 */ 14349 static void 14350 fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd) 14351 { 14352 ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) == NULL); 14353 14354 mutex_enter(&pptr->port_pkt_mutex); 14355 cmd->cmd_flags |= CFLAG_IN_QUEUE; 14356 ASSERT(cmd->cmd_state != FCP_PKT_ISSUED); 14357 cmd->cmd_timeout = fcp_watchdog_time + FCP_QUEUE_DELAY; 14358 14359 /* 14360 * zero pkt_time means hang around for ever 14361 */ 14362 if (cmd->cmd_pkt->pkt_time) { 14363 if (cmd->cmd_fp_pkt->pkt_timeout > FCP_QUEUE_DELAY) { 14364 cmd->cmd_fp_pkt->pkt_timeout -= FCP_QUEUE_DELAY; 14365 } else { 14366 /* 14367 * Indicate the watch thread to fail the 14368 * command by setting it to highest value 14369 */ 14370 cmd->cmd_timeout = fcp_watchdog_time; 14371 cmd->cmd_fp_pkt->pkt_timeout = FCP_INVALID_TIMEOUT; 14372 } 14373 } 14374 14375 if (pptr->port_pkt_head) { 14376 ASSERT(pptr->port_pkt_tail != NULL); 14377 14378 pptr->port_pkt_tail->cmd_next = cmd; 14379 pptr->port_pkt_tail = cmd; 14380 } else { 14381 ASSERT(pptr->port_pkt_tail == NULL); 14382 14383 pptr->port_pkt_head = pptr->port_pkt_tail = cmd; 14384 } 14385 cmd->cmd_next = NULL; 14386 mutex_exit(&pptr->port_pkt_mutex); 14387 } 14388 14389 /* 14390 * Function: fcp_update_targets 14391 * 14392 * Description: This function applies the specified change of state to all 14393 * the targets listed. The operation applied is 'set'. 14394 * 14395 * Argument: *pptr FCP port. 14396 * *dev_list Array of fc_portmap_t structures. 14397 * count Length of dev_list. 14398 * state State bits to update. 14399 * cause Reason for the update. 14400 * 14401 * Return Value: None 14402 * 14403 * Context: User, Kernel and Interrupt context. 14404 * The mutex pptr->port_mutex must be held. 14405 */ 14406 static void 14407 fcp_update_targets(struct fcp_port *pptr, fc_portmap_t *dev_list, 14408 uint32_t count, uint32_t state, int cause) 14409 { 14410 fc_portmap_t *map_entry; 14411 struct fcp_tgt *ptgt; 14412 14413 ASSERT(MUTEX_HELD(&pptr->port_mutex)); 14414 14415 while (count--) { 14416 map_entry = &(dev_list[count]); 14417 ptgt = fcp_lookup_target(pptr, 14418 (uchar_t *)&(map_entry->map_pwwn)); 14419 if (ptgt == NULL) { 14420 continue; 14421 } 14422 14423 mutex_enter(&ptgt->tgt_mutex); 14424 ptgt->tgt_trace = 0; 14425 ptgt->tgt_change_cnt++; 14426 ptgt->tgt_statec_cause = cause; 14427 ptgt->tgt_tmp_cnt = 1; 14428 fcp_update_tgt_state(ptgt, FCP_SET, state); 14429 mutex_exit(&ptgt->tgt_mutex); 14430 } 14431 } 14432 14433 static int 14434 fcp_call_finish_init(struct fcp_port *pptr, struct fcp_tgt *ptgt, 14435 int lcount, int tcount, int cause) 14436 { 14437 int rval; 14438 14439 mutex_enter(&pptr->port_mutex); 14440 rval = fcp_call_finish_init_held(pptr, ptgt, lcount, tcount, cause); 14441 mutex_exit(&pptr->port_mutex); 14442 14443 return (rval); 14444 } 14445 14446 14447 static int 14448 fcp_call_finish_init_held(struct fcp_port *pptr, struct fcp_tgt *ptgt, 14449 int lcount, int tcount, int cause) 14450 { 14451 int finish_init = 0; 14452 int finish_tgt = 0; 14453 int do_finish_init = 0; 14454 int rval = FCP_NO_CHANGE; 14455 14456 if (cause == FCP_CAUSE_LINK_CHANGE || 14457 cause == FCP_CAUSE_LINK_DOWN) { 14458 do_finish_init = 1; 14459 } 14460 14461 if (ptgt != NULL) { 14462 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace, 14463 FCP_BUF_LEVEL_2, 0, 14464 "link_cnt: %d,%d; tgt_cnt: %d,%d; tmp_cnt: %d,%d;" 14465 " cause = %d, d_id = 0x%x, tgt_done = %d", 14466 pptr->port_link_cnt, lcount, ptgt->tgt_change_cnt, tcount, 14467 pptr->port_tmp_cnt, ptgt->tgt_tmp_cnt, cause, 14468 ptgt->tgt_d_id, ptgt->tgt_done); 14469 14470 mutex_enter(&ptgt->tgt_mutex); 14471 14472 if (tcount && (ptgt->tgt_change_cnt != tcount)) { 14473 rval = FCP_DEV_CHANGE; 14474 if (do_finish_init && ptgt->tgt_done == 0) { 14475 ptgt->tgt_done++; 14476 finish_init = 1; 14477 } 14478 } else { 14479 if (--ptgt->tgt_tmp_cnt <= 0) { 14480 ptgt->tgt_tmp_cnt = 0; 14481 finish_tgt = 1; 14482 14483 if (do_finish_init) { 14484 finish_init = 1; 14485 } 14486 } 14487 } 14488 mutex_exit(&ptgt->tgt_mutex); 14489 } else { 14490 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace, 14491 FCP_BUF_LEVEL_2, 0, 14492 "Call Finish Init for NO target"); 14493 14494 if (do_finish_init) { 14495 finish_init = 1; 14496 } 14497 } 14498 14499 if (finish_tgt) { 14500 ASSERT(ptgt != NULL); 14501 14502 mutex_enter(&ptgt->tgt_mutex); 14503 #ifdef DEBUG 14504 bzero(ptgt->tgt_tmp_cnt_stack, 14505 sizeof (ptgt->tgt_tmp_cnt_stack)); 14506 14507 ptgt->tgt_tmp_cnt_depth = getpcstack(ptgt->tgt_tmp_cnt_stack, 14508 FCP_STACK_DEPTH); 14509 #endif /* DEBUG */ 14510 mutex_exit(&ptgt->tgt_mutex); 14511 14512 (void) fcp_finish_tgt(pptr, ptgt, lcount, tcount, cause); 14513 } 14514 14515 if (finish_init && lcount == pptr->port_link_cnt) { 14516 ASSERT(pptr->port_tmp_cnt > 0); 14517 if (--pptr->port_tmp_cnt == 0) { 14518 fcp_finish_init(pptr); 14519 } 14520 } else if (lcount != pptr->port_link_cnt) { 14521 FCP_TRACE(fcp_logq, pptr->port_instbuf, 14522 fcp_trace, FCP_BUF_LEVEL_2, 0, 14523 "fcp_call_finish_init_held,1: state change occured" 14524 " for D_ID=0x%x", (ptgt) ? ptgt->tgt_d_id : 0); 14525 } 14526 14527 return (rval); 14528 } 14529 14530 14531 static void 14532 fcp_reconfigure_luns(void * tgt_handle) 14533 { 14534 uint32_t dev_cnt; 14535 fc_portmap_t *devlist; 14536 struct fcp_tgt *ptgt = (struct fcp_tgt *)tgt_handle; 14537 struct fcp_port *pptr = ptgt->tgt_port; 14538 14539 /* 14540 * If the timer that fires this off got canceled too late, the 14541 * target could have been destroyed. 14542 */ 14543 14544 if (ptgt->tgt_tid == NULL) { 14545 return; 14546 } 14547 14548 devlist = kmem_zalloc(sizeof (*devlist), KM_NOSLEEP); 14549 if (devlist == NULL) { 14550 fcp_log(CE_WARN, pptr->port_dip, 14551 "!fcp%d: failed to allocate for portmap", 14552 pptr->port_instance); 14553 return; 14554 } 14555 14556 dev_cnt = 1; 14557 devlist->map_pd = ptgt->tgt_pd_handle; 14558 devlist->map_hard_addr.hard_addr = ptgt->tgt_hard_addr; 14559 devlist->map_did.port_id = ptgt->tgt_d_id; 14560 14561 bcopy(&ptgt->tgt_node_wwn.raw_wwn[0], &devlist->map_nwwn, FC_WWN_SIZE); 14562 bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &devlist->map_pwwn, FC_WWN_SIZE); 14563 14564 devlist->map_state = PORT_DEVICE_LOGGED_IN; 14565 devlist->map_type = PORT_DEVICE_NEW; 14566 devlist->map_flags = 0; 14567 14568 fcp_statec_callback(NULL, pptr->port_fp_handle, FC_STATE_DEVICE_CHANGE, 14569 pptr->port_topology, devlist, dev_cnt, pptr->port_id); 14570 14571 /* 14572 * Clear the tgt_tid after no more references to 14573 * the fcp_tgt 14574 */ 14575 mutex_enter(&ptgt->tgt_mutex); 14576 ptgt->tgt_tid = NULL; 14577 mutex_exit(&ptgt->tgt_mutex); 14578 14579 kmem_free(devlist, sizeof (*devlist)); 14580 } 14581 14582 14583 static void 14584 fcp_free_targets(struct fcp_port *pptr) 14585 { 14586 int i; 14587 struct fcp_tgt *ptgt; 14588 14589 mutex_enter(&pptr->port_mutex); 14590 for (i = 0; i < FCP_NUM_HASH; i++) { 14591 ptgt = pptr->port_tgt_hash_table[i]; 14592 while (ptgt != NULL) { 14593 struct fcp_tgt *next_tgt = ptgt->tgt_next; 14594 14595 fcp_free_target(ptgt); 14596 ptgt = next_tgt; 14597 } 14598 } 14599 mutex_exit(&pptr->port_mutex); 14600 } 14601 14602 14603 static void 14604 fcp_free_target(struct fcp_tgt *ptgt) 14605 { 14606 struct fcp_lun *plun; 14607 timeout_id_t tid; 14608 14609 mutex_enter(&ptgt->tgt_mutex); 14610 tid = ptgt->tgt_tid; 14611 14612 /* 14613 * Cancel any pending timeouts for this target. 14614 */ 14615 14616 if (tid != NULL) { 14617 /* 14618 * Set tgt_tid to NULL first to avoid a race in the callback. 14619 * If tgt_tid is NULL, the callback will simply return. 14620 */ 14621 ptgt->tgt_tid = NULL; 14622 mutex_exit(&ptgt->tgt_mutex); 14623 (void) untimeout(tid); 14624 mutex_enter(&ptgt->tgt_mutex); 14625 } 14626 14627 plun = ptgt->tgt_lun; 14628 while (plun != NULL) { 14629 struct fcp_lun *next_lun = plun->lun_next; 14630 14631 fcp_dealloc_lun(plun); 14632 plun = next_lun; 14633 } 14634 14635 mutex_exit(&ptgt->tgt_mutex); 14636 fcp_dealloc_tgt(ptgt); 14637 } 14638 14639 /* 14640 * Function: fcp_is_retryable 14641 * 14642 * Description: Indicates if the internal packet is retryable. 14643 * 14644 * Argument: *icmd FCP internal packet. 14645 * 14646 * Return Value: 0 Not retryable 14647 * 1 Retryable 14648 * 14649 * Context: User, Kernel and Interrupt context 14650 */ 14651 static int 14652 fcp_is_retryable(struct fcp_ipkt *icmd) 14653 { 14654 if (icmd->ipkt_port->port_state & (FCP_STATE_SUSPENDED | 14655 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) { 14656 return (0); 14657 } 14658 14659 return (((fcp_watchdog_time + icmd->ipkt_fpkt->pkt_timeout) < 14660 icmd->ipkt_port->port_deadline) ? 1 : 0); 14661 } 14662 14663 /* 14664 * Function: fcp_create_on_demand 14665 * 14666 * Argument: *pptr FCP port. 14667 * *pwwn Port WWN. 14668 * 14669 * Return Value: 0 Success 14670 * EIO 14671 * ENOMEM 14672 * EBUSY 14673 * EINVAL 14674 * 14675 * Context: User and Kernel context 14676 */ 14677 static int 14678 fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn) 14679 { 14680 int wait_ms; 14681 int tcount; 14682 int lcount; 14683 int ret; 14684 int error; 14685 int rval = EIO; 14686 int ntries; 14687 fc_portmap_t *devlist; 14688 opaque_t pd; 14689 struct fcp_lun *plun; 14690 struct fcp_tgt *ptgt; 14691 int old_manual = 0; 14692 14693 /* Allocates the fc_portmap_t structure. */ 14694 devlist = kmem_zalloc(sizeof (*devlist), KM_SLEEP); 14695 14696 /* 14697 * If FC_INVALID_RSCN_COUNT is non-zero, we will have to init as shown 14698 * in the commented statement below: 14699 * 14700 * devlist->map_rscn_info.ulp_rscn_count = FC_INVALID_RSCN_COUNT; 14701 * 14702 * Below, the deadline for the discovery process is set. 14703 */ 14704 mutex_enter(&pptr->port_mutex); 14705 pptr->port_deadline = fcp_watchdog_time + FCP_ICMD_DEADLINE; 14706 mutex_exit(&pptr->port_mutex); 14707 14708 /* 14709 * We try to find the remote port based on the WWN provided by the 14710 * caller. We actually ask fp/fctl if it has it. 14711 */ 14712 pd = fc_ulp_get_remote_port(pptr->port_fp_handle, 14713 (la_wwn_t *)pwwn, &error, 1); 14714 14715 if (pd == NULL) { 14716 kmem_free(devlist, sizeof (*devlist)); 14717 return (rval); 14718 } 14719 14720 /* 14721 * The remote port was found. We ask fp/fctl to update our 14722 * fc_portmap_t structure. 14723 */ 14724 ret = fc_ulp_pwwn_to_portmap(pptr->port_fp_handle, 14725 (la_wwn_t *)pwwn, devlist); 14726 if (ret != FC_SUCCESS) { 14727 kmem_free(devlist, sizeof (*devlist)); 14728 return (rval); 14729 } 14730 14731 /* 14732 * The map flag field is set to indicates that the creation is being 14733 * done at the user request (Ioclt probably luxadm or cfgadm). 14734 */ 14735 devlist->map_type = PORT_DEVICE_USER_CREATE; 14736 14737 mutex_enter(&pptr->port_mutex); 14738 14739 /* 14740 * We check to see if fcp already has a target that describes the 14741 * device being created. If not it is created. 14742 */ 14743 ptgt = fcp_lookup_target(pptr, pwwn); 14744 if (ptgt == NULL) { 14745 lcount = pptr->port_link_cnt; 14746 mutex_exit(&pptr->port_mutex); 14747 14748 ptgt = fcp_alloc_tgt(pptr, devlist, lcount); 14749 if (ptgt == NULL) { 14750 fcp_log(CE_WARN, pptr->port_dip, 14751 "!FC target allocation failed"); 14752 return (ENOMEM); 14753 } 14754 14755 mutex_enter(&pptr->port_mutex); 14756 } 14757 14758 mutex_enter(&ptgt->tgt_mutex); 14759 ptgt->tgt_statec_cause = FCP_CAUSE_USER_CREATE; 14760 ptgt->tgt_tmp_cnt = 1; 14761 ptgt->tgt_device_created = 0; 14762 /* 14763 * If fabric and auto config is set but the target was 14764 * manually unconfigured then reset to the manual_config_only to 14765 * 0 so the device will get configured. 14766 */ 14767 if (FC_TOP_EXTERNAL(pptr->port_topology) && 14768 fcp_enable_auto_configuration && 14769 ptgt->tgt_manual_config_only == 1) { 14770 old_manual = 1; 14771 ptgt->tgt_manual_config_only = 0; 14772 } 14773 mutex_exit(&ptgt->tgt_mutex); 14774 14775 fcp_update_targets(pptr, devlist, 1, 14776 FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_USER_CREATE); 14777 14778 lcount = pptr->port_link_cnt; 14779 tcount = ptgt->tgt_change_cnt; 14780 14781 if (fcp_handle_mapflags(pptr, ptgt, devlist, lcount, 14782 tcount, FCP_CAUSE_USER_CREATE) == TRUE) { 14783 if (FC_TOP_EXTERNAL(pptr->port_topology) && 14784 fcp_enable_auto_configuration && old_manual) { 14785 mutex_enter(&ptgt->tgt_mutex); 14786 ptgt->tgt_manual_config_only = 1; 14787 mutex_exit(&ptgt->tgt_mutex); 14788 } 14789 14790 if (pptr->port_link_cnt != lcount || 14791 ptgt->tgt_change_cnt != tcount) { 14792 rval = EBUSY; 14793 } 14794 mutex_exit(&pptr->port_mutex); 14795 14796 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace, 14797 FCP_BUF_LEVEL_3, 0, 14798 "fcp_create_on_demand: mapflags ptgt=%x, " 14799 "lcount=%x::port_link_cnt=%x, " 14800 "tcount=%x: tgt_change_cnt=%x, rval=%x", 14801 ptgt, lcount, pptr->port_link_cnt, 14802 tcount, ptgt->tgt_change_cnt, rval); 14803 return (rval); 14804 } 14805 14806 /* 14807 * Due to lack of synchronization mechanisms, we perform 14808 * periodic monitoring of our request; Because requests 14809 * get dropped when another one supercedes (either because 14810 * of a link change or a target change), it is difficult to 14811 * provide a clean synchronization mechanism (such as a 14812 * semaphore or a conditional variable) without exhaustively 14813 * rewriting the mainline discovery code of this driver. 14814 */ 14815 wait_ms = 500; 14816 14817 ntries = fcp_max_target_retries; 14818 14819 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace, 14820 FCP_BUF_LEVEL_3, 0, 14821 "fcp_create_on_demand(1): ntries=%x, ptgt=%x, " 14822 "lcount=%x::port_link_cnt=%x, " 14823 "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x " 14824 "tgt_tmp_cnt =%x", 14825 ntries, ptgt, lcount, pptr->port_link_cnt, 14826 tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created, 14827 ptgt->tgt_tmp_cnt); 14828 14829 mutex_enter(&ptgt->tgt_mutex); 14830 while (ntries-- != 0 && pptr->port_link_cnt == lcount && 14831 ptgt->tgt_change_cnt == tcount && ptgt->tgt_device_created == 0) { 14832 mutex_exit(&ptgt->tgt_mutex); 14833 mutex_exit(&pptr->port_mutex); 14834 14835 delay(drv_usectohz(wait_ms * 1000)); 14836 14837 mutex_enter(&pptr->port_mutex); 14838 mutex_enter(&ptgt->tgt_mutex); 14839 } 14840 14841 14842 if (pptr->port_link_cnt != lcount || ptgt->tgt_change_cnt != tcount) { 14843 rval = EBUSY; 14844 } else { 14845 if (ptgt->tgt_tmp_cnt == 0 && ptgt->tgt_node_state == 14846 FCP_TGT_NODE_PRESENT) { 14847 rval = 0; 14848 } 14849 } 14850 14851 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace, 14852 FCP_BUF_LEVEL_3, 0, 14853 "fcp_create_on_demand(2): ntries=%x, ptgt=%x, " 14854 "lcount=%x::port_link_cnt=%x, " 14855 "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x " 14856 "tgt_tmp_cnt =%x", 14857 ntries, ptgt, lcount, pptr->port_link_cnt, 14858 tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created, 14859 ptgt->tgt_tmp_cnt); 14860 14861 if (rval) { 14862 if (FC_TOP_EXTERNAL(pptr->port_topology) && 14863 fcp_enable_auto_configuration && old_manual) { 14864 ptgt->tgt_manual_config_only = 1; 14865 } 14866 mutex_exit(&ptgt->tgt_mutex); 14867 mutex_exit(&pptr->port_mutex); 14868 kmem_free(devlist, sizeof (*devlist)); 14869 14870 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace, 14871 FCP_BUF_LEVEL_3, 0, 14872 "fcp_create_on_demand(3): ntries=%x, ptgt=%x, " 14873 "lcount=%x::port_link_cnt=%x, " 14874 "tcount=%x::tgt_change_cnt=%x, rval=%x, " 14875 "tgt_device_created=%x, tgt D_ID=%x", 14876 ntries, ptgt, lcount, pptr->port_link_cnt, 14877 tcount, ptgt->tgt_change_cnt, rval, 14878 ptgt->tgt_device_created, ptgt->tgt_d_id); 14879 return (rval); 14880 } 14881 14882 if ((plun = ptgt->tgt_lun) != NULL) { 14883 tcount = plun->lun_tgt->tgt_change_cnt; 14884 } else { 14885 rval = EINVAL; 14886 } 14887 lcount = pptr->port_link_cnt; 14888 14889 /* 14890 * Configuring the target with no LUNs will fail. We 14891 * should reset the node state so that it is not 14892 * automatically configured when the LUNs are added 14893 * to this target. 14894 */ 14895 if (ptgt->tgt_lun_cnt == 0) { 14896 ptgt->tgt_node_state = FCP_TGT_NODE_NONE; 14897 } 14898 mutex_exit(&ptgt->tgt_mutex); 14899 mutex_exit(&pptr->port_mutex); 14900 14901 while (plun) { 14902 child_info_t *cip; 14903 14904 mutex_enter(&plun->lun_mutex); 14905 cip = plun->lun_cip; 14906 mutex_exit(&plun->lun_mutex); 14907 14908 mutex_enter(&ptgt->tgt_mutex); 14909 if (!(plun->lun_state & FCP_LUN_OFFLINE)) { 14910 mutex_exit(&ptgt->tgt_mutex); 14911 14912 rval = fcp_pass_to_hp_and_wait(pptr, plun, cip, 14913 FCP_ONLINE, lcount, tcount, 14914 NDI_ONLINE_ATTACH); 14915 if (rval != NDI_SUCCESS) { 14916 FCP_TRACE(fcp_logq, 14917 pptr->port_instbuf, fcp_trace, 14918 FCP_BUF_LEVEL_3, 0, 14919 "fcp_create_on_demand: " 14920 "pass_to_hp_and_wait failed " 14921 "rval=%x", rval); 14922 rval = EIO; 14923 } else { 14924 mutex_enter(&LUN_TGT->tgt_mutex); 14925 plun->lun_state &= ~(FCP_LUN_OFFLINE | 14926 FCP_LUN_BUSY); 14927 mutex_exit(&LUN_TGT->tgt_mutex); 14928 } 14929 mutex_enter(&ptgt->tgt_mutex); 14930 } 14931 14932 plun = plun->lun_next; 14933 mutex_exit(&ptgt->tgt_mutex); 14934 } 14935 14936 kmem_free(devlist, sizeof (*devlist)); 14937 14938 if (FC_TOP_EXTERNAL(pptr->port_topology) && 14939 fcp_enable_auto_configuration && old_manual) { 14940 mutex_enter(&ptgt->tgt_mutex); 14941 /* if successful then set manual to 0 */ 14942 if (rval == 0) { 14943 ptgt->tgt_manual_config_only = 0; 14944 } else { 14945 /* reset to 1 so the user has to do the config */ 14946 ptgt->tgt_manual_config_only = 1; 14947 } 14948 mutex_exit(&ptgt->tgt_mutex); 14949 } 14950 14951 return (rval); 14952 } 14953 14954 14955 static void 14956 fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int byte_len) 14957 { 14958 int count; 14959 uchar_t byte; 14960 14961 count = 0; 14962 while (*string) { 14963 byte = FCP_ATOB(*string); string++; 14964 byte = byte << 4 | FCP_ATOB(*string); string++; 14965 bytes[count++] = byte; 14966 14967 if (count >= byte_len) { 14968 break; 14969 } 14970 } 14971 } 14972 14973 static void 14974 fcp_wwn_to_ascii(uchar_t wwn[], char *string) 14975 { 14976 int i; 14977 14978 for (i = 0; i < FC_WWN_SIZE; i++) { 14979 (void) sprintf(string + (i * 2), 14980 "%02x", wwn[i]); 14981 } 14982 14983 } 14984 14985 static void 14986 fcp_print_error(fc_packet_t *fpkt) 14987 { 14988 struct fcp_ipkt *icmd = (struct fcp_ipkt *) 14989 fpkt->pkt_ulp_private; 14990 struct fcp_port *pptr; 14991 struct fcp_tgt *ptgt; 14992 struct fcp_lun *plun; 14993 caddr_t buf; 14994 int scsi_cmd = 0; 14995 14996 ptgt = icmd->ipkt_tgt; 14997 plun = icmd->ipkt_lun; 14998 pptr = ptgt->tgt_port; 14999 15000 buf = kmem_zalloc(256, KM_NOSLEEP); 15001 if (buf == NULL) { 15002 return; 15003 } 15004 15005 switch (icmd->ipkt_opcode) { 15006 case SCMD_REPORT_LUN: 15007 (void) sprintf(buf, "!REPORT LUN to D_ID=0x%%x" 15008 " lun=0x%%x failed"); 15009 scsi_cmd++; 15010 break; 15011 15012 case SCMD_INQUIRY_PAGE83: 15013 (void) sprintf(buf, "!INQUIRY-83 to D_ID=0x%%x" 15014 " lun=0x%%x failed"); 15015 scsi_cmd++; 15016 break; 15017 15018 case SCMD_INQUIRY: 15019 (void) sprintf(buf, "!INQUIRY to D_ID=0x%%x" 15020 " lun=0x%%x failed"); 15021 scsi_cmd++; 15022 break; 15023 15024 case LA_ELS_PLOGI: 15025 (void) sprintf(buf, "!PLOGI to D_ID=0x%%x failed"); 15026 break; 15027 15028 case LA_ELS_PRLI: 15029 (void) sprintf(buf, "!PRLI to D_ID=0x%%x failed"); 15030 break; 15031 } 15032 15033 if (scsi_cmd && fpkt->pkt_state == FC_PKT_SUCCESS) { 15034 struct fcp_rsp response, *rsp; 15035 uchar_t asc, ascq; 15036 caddr_t sense_key = NULL; 15037 struct fcp_rsp_info fcp_rsp_err, *bep; 15038 15039 if (icmd->ipkt_nodma) { 15040 rsp = (struct fcp_rsp *)fpkt->pkt_resp; 15041 bep = (struct fcp_rsp_info *)((caddr_t)rsp + 15042 sizeof (struct fcp_rsp)); 15043 } else { 15044 rsp = &response; 15045 bep = &fcp_rsp_err; 15046 15047 FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc, 15048 sizeof (struct fcp_rsp)); 15049 15050 FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp), 15051 bep, fpkt->pkt_resp_acc, 15052 sizeof (struct fcp_rsp_info)); 15053 } 15054 15055 15056 if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) { 15057 (void) sprintf(buf + strlen(buf), 15058 " : Bad FCP response values rsvd1=%%x, rsvd2=%%x," 15059 " sts-rsvd1=%%x, sts-rsvd2=%%x, rsplen=%%x," 15060 " senselen=%%x. Giving up"); 15061 15062 fcp_log(CE_WARN, pptr->port_dip, buf, 15063 ptgt->tgt_d_id, plun->lun_num, rsp->reserved_0, 15064 rsp->reserved_1, rsp->fcp_u.fcp_status.reserved_0, 15065 rsp->fcp_u.fcp_status.reserved_1, 15066 rsp->fcp_response_len, rsp->fcp_sense_len); 15067 15068 kmem_free(buf, 256); 15069 return; 15070 } 15071 15072 if (rsp->fcp_u.fcp_status.rsp_len_set && 15073 bep->rsp_code != FCP_NO_FAILURE) { 15074 (void) sprintf(buf + strlen(buf), 15075 " FCP Response code = 0x%x", bep->rsp_code); 15076 } 15077 15078 if (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK) { 15079 struct scsi_extended_sense sense_info, *sense_ptr; 15080 15081 if (icmd->ipkt_nodma) { 15082 sense_ptr = (struct scsi_extended_sense *) 15083 ((caddr_t)fpkt->pkt_resp + 15084 sizeof (struct fcp_rsp) + 15085 rsp->fcp_response_len); 15086 } else { 15087 sense_ptr = &sense_info; 15088 15089 FCP_CP_IN(fpkt->pkt_resp + 15090 sizeof (struct fcp_rsp) + 15091 rsp->fcp_response_len, &sense_info, 15092 fpkt->pkt_resp_acc, 15093 sizeof (struct scsi_extended_sense)); 15094 } 15095 15096 if (sense_ptr->es_key < NUM_SENSE_KEYS + 15097 NUM_IMPL_SENSE_KEYS) { 15098 sense_key = sense_keys[sense_ptr->es_key]; 15099 } else { 15100 sense_key = "Undefined"; 15101 } 15102 15103 asc = sense_ptr->es_add_code; 15104 ascq = sense_ptr->es_qual_code; 15105 15106 (void) sprintf(buf + strlen(buf), 15107 ": sense key=%%s, ASC=%%x," " ASCQ=%%x." 15108 " Giving up"); 15109 15110 fcp_log(CE_WARN, pptr->port_dip, buf, 15111 ptgt->tgt_d_id, plun->lun_num, sense_key, 15112 asc, ascq); 15113 } else { 15114 (void) sprintf(buf + strlen(buf), 15115 " : SCSI status=%%x. Giving up"); 15116 15117 fcp_log(CE_WARN, pptr->port_dip, buf, 15118 ptgt->tgt_d_id, plun->lun_num, 15119 rsp->fcp_u.fcp_status.scsi_status); 15120 } 15121 } else { 15122 caddr_t state, reason, action, expln; 15123 15124 (void) fc_ulp_pkt_error(fpkt, &state, &reason, 15125 &action, &expln); 15126 15127 (void) sprintf(buf + strlen(buf), ": State:%%s," 15128 " Reason:%%s. Giving up"); 15129 15130 if (scsi_cmd) { 15131 fcp_log(CE_WARN, pptr->port_dip, buf, 15132 ptgt->tgt_d_id, plun->lun_num, state, reason); 15133 } else { 15134 fcp_log(CE_WARN, pptr->port_dip, buf, 15135 ptgt->tgt_d_id, state, reason); 15136 } 15137 } 15138 15139 kmem_free(buf, 256); 15140 } 15141 15142 15143 static int 15144 fcp_handle_ipkt_errors(struct fcp_port *pptr, struct fcp_tgt *ptgt, 15145 struct fcp_ipkt *icmd, int rval, caddr_t op) 15146 { 15147 int ret = DDI_FAILURE; 15148 char *error; 15149 15150 switch (rval) { 15151 case FC_DEVICE_BUSY_NEW_RSCN: 15152 /* 15153 * This means that there was a new RSCN that the transport 15154 * knows about (which the ULP *may* know about too) but the 15155 * pkt that was sent down was related to an older RSCN. So, we 15156 * are just going to reset the retry count and deadline and 15157 * continue to retry. The idea is that transport is currently 15158 * working on the new RSCN and will soon let the ULPs know 15159 * about it and when it does the existing logic will kick in 15160 * where it will change the tcount to indicate that something 15161 * changed on the target. So, rediscovery will start and there 15162 * will not be an infinite retry. 15163 * 15164 * For a full flow of how the RSCN info is transferred back and 15165 * forth, see fp.c 15166 */ 15167 icmd->ipkt_retries = 0; 15168 icmd->ipkt_port->port_deadline = fcp_watchdog_time + 15169 FCP_ICMD_DEADLINE; 15170 15171 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace, 15172 FCP_BUF_LEVEL_3, 0, 15173 "fcp_handle_ipkt_errors: rval=%x for D_ID=%x", 15174 rval, ptgt->tgt_d_id); 15175 /* FALLTHROUGH */ 15176 15177 case FC_STATEC_BUSY: 15178 case FC_DEVICE_BUSY: 15179 case FC_PBUSY: 15180 case FC_FBUSY: 15181 case FC_TRAN_BUSY: 15182 case FC_OFFLINE: 15183 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace, 15184 FCP_BUF_LEVEL_3, 0, 15185 "fcp_handle_ipkt_errors: rval=%x for D_ID=%x", 15186 rval, ptgt->tgt_d_id); 15187 if (icmd->ipkt_retries < FCP_MAX_RETRIES && 15188 fcp_is_retryable(icmd)) { 15189 fcp_queue_ipkt(pptr, icmd->ipkt_fpkt); 15190 ret = DDI_SUCCESS; 15191 } 15192 break; 15193 15194 case FC_LOGINREQ: 15195 /* 15196 * FC_LOGINREQ used to be handled just like all the cases 15197 * above. It has been changed to handled a PRLI that fails 15198 * with FC_LOGINREQ different than other ipkts that fail 15199 * with FC_LOGINREQ. If a PRLI fails with FC_LOGINREQ it is 15200 * a simple matter to turn it into a PLOGI instead, so that's 15201 * exactly what we do here. 15202 */ 15203 if (icmd->ipkt_opcode == LA_ELS_PRLI) { 15204 ret = fcp_send_els(icmd->ipkt_port, icmd->ipkt_tgt, 15205 icmd, LA_ELS_PLOGI, icmd->ipkt_link_cnt, 15206 icmd->ipkt_change_cnt, icmd->ipkt_cause); 15207 } else { 15208 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace, 15209 FCP_BUF_LEVEL_3, 0, 15210 "fcp_handle_ipkt_errors: rval=%x for D_ID=%x", 15211 rval, ptgt->tgt_d_id); 15212 if (icmd->ipkt_retries < FCP_MAX_RETRIES && 15213 fcp_is_retryable(icmd)) { 15214 fcp_queue_ipkt(pptr, icmd->ipkt_fpkt); 15215 ret = DDI_SUCCESS; 15216 } 15217 } 15218 break; 15219 15220 default: 15221 mutex_enter(&pptr->port_mutex); 15222 mutex_enter(&ptgt->tgt_mutex); 15223 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) { 15224 mutex_exit(&ptgt->tgt_mutex); 15225 mutex_exit(&pptr->port_mutex); 15226 15227 (void) fc_ulp_error(rval, &error); 15228 fcp_log(CE_WARN, pptr->port_dip, 15229 "!Failed to send %s to D_ID=%x error=%s", 15230 op, ptgt->tgt_d_id, error); 15231 } else { 15232 FCP_TRACE(fcp_logq, pptr->port_instbuf, 15233 fcp_trace, FCP_BUF_LEVEL_2, 0, 15234 "fcp_handle_ipkt_errors,1: state change occured" 15235 " for D_ID=0x%x", ptgt->tgt_d_id); 15236 mutex_exit(&ptgt->tgt_mutex); 15237 mutex_exit(&pptr->port_mutex); 15238 } 15239 break; 15240 } 15241 15242 return (ret); 15243 } 15244 15245 15246 /* 15247 * Check of outstanding commands on any LUN for this target 15248 */ 15249 static int 15250 fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt) 15251 { 15252 struct fcp_lun *plun; 15253 struct fcp_pkt *cmd; 15254 15255 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) { 15256 mutex_enter(&plun->lun_mutex); 15257 for (cmd = plun->lun_pkt_head; cmd != NULL; 15258 cmd = cmd->cmd_forw) { 15259 if (cmd->cmd_state == FCP_PKT_ISSUED) { 15260 mutex_exit(&plun->lun_mutex); 15261 return (FC_SUCCESS); 15262 } 15263 } 15264 mutex_exit(&plun->lun_mutex); 15265 } 15266 15267 return (FC_FAILURE); 15268 } 15269 15270 static fc_portmap_t * 15271 fcp_construct_map(struct fcp_port *pptr, uint32_t *dev_cnt) 15272 { 15273 int i; 15274 fc_portmap_t *devlist; 15275 fc_portmap_t *devptr = NULL; 15276 struct fcp_tgt *ptgt; 15277 15278 mutex_enter(&pptr->port_mutex); 15279 for (i = 0, *dev_cnt = 0; i < FCP_NUM_HASH; i++) { 15280 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL; 15281 ptgt = ptgt->tgt_next) { 15282 if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) { 15283 ++*dev_cnt; 15284 } 15285 } 15286 } 15287 15288 devptr = devlist = kmem_zalloc(sizeof (*devlist) * *dev_cnt, 15289 KM_NOSLEEP); 15290 if (devlist == NULL) { 15291 mutex_exit(&pptr->port_mutex); 15292 fcp_log(CE_WARN, pptr->port_dip, 15293 "!fcp%d: failed to allocate for portmap for construct map", 15294 pptr->port_instance); 15295 return (devptr); 15296 } 15297 15298 for (i = 0; i < FCP_NUM_HASH; i++) { 15299 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL; 15300 ptgt = ptgt->tgt_next) { 15301 if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) { 15302 int ret; 15303 15304 ret = fc_ulp_pwwn_to_portmap( 15305 pptr->port_fp_handle, 15306 (la_wwn_t *)&ptgt->tgt_port_wwn.raw_wwn[0], 15307 devlist); 15308 15309 if (ret == FC_SUCCESS) { 15310 devlist++; 15311 continue; 15312 } 15313 15314 devlist->map_pd = NULL; 15315 devlist->map_did.port_id = ptgt->tgt_d_id; 15316 devlist->map_hard_addr.hard_addr = 15317 ptgt->tgt_hard_addr; 15318 15319 devlist->map_state = PORT_DEVICE_INVALID; 15320 devlist->map_type = PORT_DEVICE_OLD; 15321 15322 bcopy(&ptgt->tgt_node_wwn.raw_wwn[0], 15323 &devlist->map_nwwn, FC_WWN_SIZE); 15324 15325 bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], 15326 &devlist->map_pwwn, FC_WWN_SIZE); 15327 15328 devlist++; 15329 } 15330 } 15331 } 15332 15333 mutex_exit(&pptr->port_mutex); 15334 15335 return (devptr); 15336 } 15337 /* 15338 * Inimate MPxIO that the lun is busy and cannot accept regular IO 15339 */ 15340 static void 15341 fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr) 15342 { 15343 int i; 15344 struct fcp_tgt *ptgt; 15345 struct fcp_lun *plun; 15346 15347 for (i = 0; i < FCP_NUM_HASH; i++) { 15348 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL; 15349 ptgt = ptgt->tgt_next) { 15350 mutex_enter(&ptgt->tgt_mutex); 15351 for (plun = ptgt->tgt_lun; plun != NULL; 15352 plun = plun->lun_next) { 15353 if (plun->lun_mpxio && 15354 plun->lun_state & FCP_LUN_BUSY) { 15355 if (!fcp_pass_to_hp(pptr, plun, 15356 plun->lun_cip, 15357 FCP_MPXIO_PATH_SET_BUSY, 15358 pptr->port_link_cnt, 15359 ptgt->tgt_change_cnt, 0, 0)) { 15360 FCP_TRACE(fcp_logq, 15361 pptr->port_instbuf, 15362 fcp_trace, 15363 FCP_BUF_LEVEL_2, 0, 15364 "path_verifybusy: " 15365 "disable lun %p failed!", 15366 plun); 15367 } 15368 } 15369 } 15370 mutex_exit(&ptgt->tgt_mutex); 15371 } 15372 } 15373 } 15374 15375 static int 15376 fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip, int what) 15377 { 15378 dev_info_t *cdip = NULL; 15379 dev_info_t *pdip = NULL; 15380 15381 ASSERT(plun); 15382 15383 mutex_enter(&plun->lun_mutex); 15384 if (fcp_is_child_present(plun, cip) == FC_FAILURE) { 15385 mutex_exit(&plun->lun_mutex); 15386 return (NDI_FAILURE); 15387 } 15388 mutex_exit(&plun->lun_mutex); 15389 cdip = mdi_pi_get_client(PIP(cip)); 15390 pdip = mdi_pi_get_phci(PIP(cip)); 15391 15392 ASSERT(cdip != NULL); 15393 ASSERT(pdip != NULL); 15394 15395 if (what == FCP_MPXIO_PATH_CLEAR_BUSY) { 15396 /* LUN ready for IO */ 15397 (void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT); 15398 } else { 15399 /* LUN busy to accept IO */ 15400 (void) mdi_pi_disable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT); 15401 } 15402 return (NDI_SUCCESS); 15403 } 15404 15405 /* 15406 * Caller must free the returned string of MAXPATHLEN len 15407 * If the device is offline (-1 instance number) NULL 15408 * will be returned. 15409 */ 15410 static char * 15411 fcp_get_lun_path(struct fcp_lun *plun) { 15412 dev_info_t *dip = NULL; 15413 char *path = NULL; 15414 if (plun == NULL) { 15415 return (NULL); 15416 } 15417 if (plun->lun_mpxio == 0) { 15418 dip = DIP(plun->lun_cip); 15419 } else { 15420 dip = mdi_pi_get_client(PIP(plun->lun_cip)); 15421 } 15422 if (dip == NULL) { 15423 return (NULL); 15424 } 15425 if (ddi_get_instance(dip) < 0) { 15426 return (NULL); 15427 } 15428 path = kmem_alloc(MAXPATHLEN, KM_SLEEP); 15429 if (path == NULL) { 15430 return (NULL); 15431 } 15432 15433 (void) ddi_pathname(dip, path); 15434 /* 15435 * In reality, the user wants a fully valid path (one they can open) 15436 * but this string is lacking the mount point, and the minor node. 15437 * It would be nice if we could "figure these out" somehow 15438 * and fill them in. Otherwise, the userland code has to understand 15439 * driver specific details of which minor node is the "best" or 15440 * "right" one to expose. (Ex: which slice is the whole disk, or 15441 * which tape doesn't rewind) 15442 */ 15443 return (path); 15444 } 15445 15446 static int 15447 fcp_scsi_bus_config(dev_info_t *parent, uint_t flag, 15448 ddi_bus_config_op_t op, void *arg, dev_info_t **childp) 15449 { 15450 int64_t reset_delay; 15451 int rval, retry = 0; 15452 struct fcp_port *pptr = fcp_dip2port(parent); 15453 15454 reset_delay = (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) - 15455 (lbolt64 - pptr->port_attach_time); 15456 if (reset_delay < 0) { 15457 reset_delay = 0; 15458 } 15459 15460 if (fcp_bus_config_debug) { 15461 flag |= NDI_DEVI_DEBUG; 15462 } 15463 15464 switch (op) { 15465 case BUS_CONFIG_ONE: 15466 /* 15467 * Retry the command since we need to ensure 15468 * the fabric devices are available for root 15469 */ 15470 while (retry++ < fcp_max_bus_config_retries) { 15471 rval = (ndi_busop_bus_config(parent, 15472 flag | NDI_MDI_FALLBACK, op, 15473 arg, childp, (clock_t)reset_delay)); 15474 if (rval == 0) { 15475 return (rval); 15476 } 15477 } 15478 15479 /* 15480 * drain taskq to make sure nodes are created and then 15481 * try again. 15482 */ 15483 taskq_wait(DEVI(parent)->devi_taskq); 15484 return (ndi_busop_bus_config(parent, flag | NDI_MDI_FALLBACK, 15485 op, arg, childp, 0)); 15486 15487 case BUS_CONFIG_DRIVER: 15488 case BUS_CONFIG_ALL: { 15489 /* 15490 * delay till all devices report in (port_tmp_cnt == 0) 15491 * or FCP_INIT_WAIT_TIMEOUT 15492 */ 15493 mutex_enter(&pptr->port_mutex); 15494 while ((reset_delay > 0) && pptr->port_tmp_cnt) { 15495 (void) cv_timedwait(&pptr->port_config_cv, 15496 &pptr->port_mutex, 15497 ddi_get_lbolt() + (clock_t)reset_delay); 15498 reset_delay = 15499 (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) - 15500 (lbolt64 - pptr->port_attach_time); 15501 } 15502 mutex_exit(&pptr->port_mutex); 15503 /* drain taskq to make sure nodes are created */ 15504 taskq_wait(DEVI(parent)->devi_taskq); 15505 return (ndi_busop_bus_config(parent, flag, op, 15506 arg, childp, 0)); 15507 } 15508 15509 default: 15510 return (NDI_FAILURE); 15511 } 15512 /*NOTREACHED*/ 15513 } 15514 15515 static int 15516 fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag, 15517 ddi_bus_config_op_t op, void *arg) 15518 { 15519 if (fcp_bus_config_debug) { 15520 flag |= NDI_DEVI_DEBUG; 15521 } 15522 15523 return (ndi_busop_bus_unconfig(parent, flag, op, arg)); 15524 } 15525 15526 15527 /* 15528 * Routine to copy GUID into the lun structure. 15529 * returns 0 if copy was successful and 1 if encountered a 15530 * failure and did not copy the guid. 15531 */ 15532 static int 15533 fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp) 15534 { 15535 15536 int retval = 0; 15537 15538 /* add one for the null terminator */ 15539 const unsigned int len = strlen(guidp) + 1; 15540 15541 if ((guidp == NULL) || (plun == NULL)) { 15542 return (1); 15543 } 15544 15545 /* 15546 * if the plun->lun_guid already has been allocated, 15547 * then check the size. if the size is exact, reuse 15548 * it....if not free it an allocate the required size. 15549 * The reallocation should NOT typically happen 15550 * unless the GUIDs reported changes between passes. 15551 * We free up and alloc again even if the 15552 * size was more than required. This is due to the 15553 * fact that the field lun_guid_size - serves 15554 * dual role of indicating the size of the wwn 15555 * size and ALSO the allocation size. 15556 */ 15557 if (plun->lun_guid) { 15558 if (plun->lun_guid_size != len) { 15559 /* 15560 * free the allocated memory and 15561 * initialize the field 15562 * lun_guid_size to 0. 15563 */ 15564 kmem_free(plun->lun_guid, plun->lun_guid_size); 15565 plun->lun_guid = NULL; 15566 plun->lun_guid_size = 0; 15567 } 15568 } 15569 /* 15570 * alloc only if not already done. 15571 */ 15572 if (plun->lun_guid == NULL) { 15573 plun->lun_guid = kmem_zalloc(len, KM_NOSLEEP); 15574 if (plun->lun_guid == NULL) { 15575 cmn_err(CE_WARN, "fcp_copy_guid_2_lun_block:" 15576 "Unable to allocate" 15577 "Memory for GUID!!! size %d", len); 15578 retval = 1; 15579 } else { 15580 plun->lun_guid_size = len; 15581 } 15582 } 15583 if (plun->lun_guid) { 15584 /* 15585 * now copy the GUID 15586 */ 15587 bcopy(guidp, plun->lun_guid, plun->lun_guid_size); 15588 } 15589 return (retval); 15590 } 15591 15592 /* 15593 * fcp_reconfig_wait 15594 * 15595 * Wait for a rediscovery/reconfiguration to complete before continuing. 15596 */ 15597 15598 static void 15599 fcp_reconfig_wait(struct fcp_port *pptr) 15600 { 15601 clock_t reconfig_start, wait_timeout; 15602 15603 /* 15604 * Quick check. If pptr->port_tmp_cnt is 0, there is no 15605 * reconfiguration in progress. 15606 */ 15607 15608 mutex_enter(&pptr->port_mutex); 15609 if (pptr->port_tmp_cnt == 0) { 15610 mutex_exit(&pptr->port_mutex); 15611 return; 15612 } 15613 mutex_exit(&pptr->port_mutex); 15614 15615 /* 15616 * If we cause a reconfig by raising power, delay until all devices 15617 * report in (port_tmp_cnt returns to 0) 15618 */ 15619 15620 reconfig_start = ddi_get_lbolt(); 15621 wait_timeout = drv_usectohz(FCP_INIT_WAIT_TIMEOUT); 15622 15623 mutex_enter(&pptr->port_mutex); 15624 15625 while (((ddi_get_lbolt() - reconfig_start) < wait_timeout) && 15626 pptr->port_tmp_cnt) { 15627 15628 (void) cv_timedwait(&pptr->port_config_cv, &pptr->port_mutex, 15629 reconfig_start + wait_timeout); 15630 } 15631 15632 mutex_exit(&pptr->port_mutex); 15633 15634 /* 15635 * Even if fcp_tmp_count isn't 0, continue without error. The port 15636 * we want may still be ok. If not, it will error out later 15637 */ 15638 } 15639 15640 /* 15641 * Read masking info from fp.conf and construct the global fcp_lun_blacklist. 15642 * We rely on the fcp_global_mutex to provide protection against changes to 15643 * the fcp_lun_blacklist. 15644 * 15645 * You can describe a list of target port WWNs and LUN numbers which will 15646 * not be configured. LUN numbers will be interpreted as decimal. White 15647 * spaces and ',' can be used in the list of LUN numbers. 15648 * 15649 * To prevent LUNs 1 and 2 from being configured for target 15650 * port 510000f010fd92a1 and target port 510000e012079df1, set: 15651 * 15652 * pwwn-lun-blacklist= 15653 * "510000f010fd92a1,1,2", 15654 * "510000e012079df1,1,2"; 15655 */ 15656 static void 15657 fcp_read_blacklist(dev_info_t *dip, 15658 struct fcp_black_list_entry **pplun_blacklist) { 15659 char **prop_array = NULL; 15660 char *curr_pwwn = NULL; 15661 char *curr_lun = NULL; 15662 uint32_t prop_item = 0; 15663 int idx = 0; 15664 int len = 0; 15665 15666 ASSERT(mutex_owned(&fcp_global_mutex)); 15667 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, dip, 15668 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 15669 LUN_BLACKLIST_PROP, &prop_array, &prop_item) != DDI_PROP_SUCCESS) { 15670 return; 15671 } 15672 15673 for (idx = 0; idx < prop_item; idx++) { 15674 15675 curr_pwwn = prop_array[idx]; 15676 while (*curr_pwwn == ' ') { 15677 curr_pwwn++; 15678 } 15679 if (strlen(curr_pwwn) <= (sizeof (la_wwn_t) * 2 + 1)) { 15680 fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist" 15681 ", please check.", curr_pwwn); 15682 continue; 15683 } 15684 if ((*(curr_pwwn + sizeof (la_wwn_t) * 2) != ' ') && 15685 (*(curr_pwwn + sizeof (la_wwn_t) * 2) != ',')) { 15686 fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist" 15687 ", please check.", curr_pwwn); 15688 continue; 15689 } 15690 for (len = 0; len < sizeof (la_wwn_t) * 2; len++) { 15691 if (isxdigit(curr_pwwn[len]) != TRUE) { 15692 fcp_log(CE_WARN, NULL, "Invalid WWN %s in the " 15693 "blacklist, please check.", curr_pwwn); 15694 break; 15695 } 15696 } 15697 if (len != sizeof (la_wwn_t) * 2) { 15698 continue; 15699 } 15700 15701 curr_lun = curr_pwwn + sizeof (la_wwn_t) * 2 + 1; 15702 *(curr_lun - 1) = '\0'; 15703 fcp_mask_pwwn_lun(curr_pwwn, curr_lun, pplun_blacklist); 15704 } 15705 15706 ddi_prop_free(prop_array); 15707 } 15708 15709 /* 15710 * Get the masking info about one remote target port designated by wwn. 15711 * Lun ids could be separated by ',' or white spaces. 15712 */ 15713 static void 15714 fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun, 15715 struct fcp_black_list_entry **pplun_blacklist) { 15716 int idx = 0; 15717 uint32_t offset = 0; 15718 unsigned long lun_id = 0; 15719 char lunid_buf[16]; 15720 char *pend = NULL; 15721 int illegal_digit = 0; 15722 15723 while (offset < strlen(curr_lun)) { 15724 while ((curr_lun[offset + idx] != ',') && 15725 (curr_lun[offset + idx] != '\0') && 15726 (curr_lun[offset + idx] != ' ')) { 15727 if (isdigit(curr_lun[offset + idx]) == 0) { 15728 illegal_digit++; 15729 } 15730 idx++; 15731 } 15732 if (illegal_digit > 0) { 15733 offset += (idx+1); /* To the start of next lun */ 15734 idx = 0; 15735 illegal_digit = 0; 15736 fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in " 15737 "the blacklist, please check digits.", 15738 curr_lun, curr_pwwn); 15739 continue; 15740 } 15741 if (idx >= (sizeof (lunid_buf) / sizeof (lunid_buf[0]))) { 15742 fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in " 15743 "the blacklist, please check the length of LUN#.", 15744 curr_lun, curr_pwwn); 15745 break; 15746 } 15747 if (idx == 0) { /* ignore ' ' or ',' or '\0' */ 15748 offset++; 15749 continue; 15750 } 15751 15752 bcopy(curr_lun + offset, lunid_buf, idx); 15753 lunid_buf[idx] = '\0'; 15754 if (ddi_strtoul(lunid_buf, &pend, 10, &lun_id) == 0) { 15755 fcp_add_one_mask(curr_pwwn, lun_id, pplun_blacklist); 15756 } else { 15757 fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in " 15758 "the blacklist, please check %s.", 15759 curr_lun, curr_pwwn, lunid_buf); 15760 } 15761 offset += (idx+1); /* To the start of next lun */ 15762 idx = 0; 15763 } 15764 } 15765 15766 /* 15767 * Add one masking record 15768 */ 15769 static void 15770 fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id, 15771 struct fcp_black_list_entry **pplun_blacklist) { 15772 struct fcp_black_list_entry *tmp_entry = *pplun_blacklist; 15773 struct fcp_black_list_entry *new_entry = NULL; 15774 la_wwn_t wwn; 15775 15776 fcp_ascii_to_wwn(curr_pwwn, wwn.raw_wwn, sizeof (la_wwn_t)); 15777 while (tmp_entry) { 15778 if ((bcmp(&tmp_entry->wwn, &wwn, 15779 sizeof (la_wwn_t)) == 0) && (tmp_entry->lun == lun_id)) { 15780 return; 15781 } 15782 15783 tmp_entry = tmp_entry->next; 15784 } 15785 15786 /* add to black list */ 15787 new_entry = (struct fcp_black_list_entry *)kmem_zalloc 15788 (sizeof (struct fcp_black_list_entry), KM_SLEEP); 15789 bcopy(&wwn, &new_entry->wwn, sizeof (la_wwn_t)); 15790 new_entry->lun = lun_id; 15791 new_entry->masked = 0; 15792 new_entry->next = *pplun_blacklist; 15793 *pplun_blacklist = new_entry; 15794 } 15795 15796 /* 15797 * Check if we should mask the specified lun of this fcp_tgt 15798 */ 15799 static int 15800 fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id) { 15801 struct fcp_black_list_entry *remote_port; 15802 15803 remote_port = fcp_lun_blacklist; 15804 while (remote_port != NULL) { 15805 if (bcmp(wwn, &remote_port->wwn, sizeof (la_wwn_t)) == 0) { 15806 if (remote_port->lun == lun_id) { 15807 remote_port->masked++; 15808 if (remote_port->masked == 1) { 15809 fcp_log(CE_NOTE, NULL, "LUN %d of port " 15810 "%02x%02x%02x%02x%02x%02x%02x%02x " 15811 "is masked due to black listing.\n", 15812 lun_id, wwn->raw_wwn[0], 15813 wwn->raw_wwn[1], wwn->raw_wwn[2], 15814 wwn->raw_wwn[3], wwn->raw_wwn[4], 15815 wwn->raw_wwn[5], wwn->raw_wwn[6], 15816 wwn->raw_wwn[7]); 15817 } 15818 return (TRUE); 15819 } 15820 } 15821 remote_port = remote_port->next; 15822 } 15823 return (FALSE); 15824 } 15825 15826 /* 15827 * Release all allocated resources 15828 */ 15829 static void 15830 fcp_cleanup_blacklist(struct fcp_black_list_entry **pplun_blacklist) { 15831 struct fcp_black_list_entry *tmp_entry = *pplun_blacklist; 15832 struct fcp_black_list_entry *current_entry = NULL; 15833 15834 ASSERT(mutex_owned(&fcp_global_mutex)); 15835 /* 15836 * Traverse all luns 15837 */ 15838 while (tmp_entry) { 15839 current_entry = tmp_entry; 15840 tmp_entry = tmp_entry->next; 15841 kmem_free(current_entry, sizeof (struct fcp_black_list_entry)); 15842 } 15843 *pplun_blacklist = NULL; 15844 } 15845