1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
23 *
24 * NOT a DDI compliant Sun Fibre Channel port driver(fp)
25 *
26 */
27
28 #include <sys/types.h>
29 #include <sys/varargs.h>
30 #include <sys/param.h>
31 #include <sys/errno.h>
32 #include <sys/uio.h>
33 #include <sys/buf.h>
34 #include <sys/modctl.h>
35 #include <sys/open.h>
36 #include <sys/file.h>
37 #include <sys/kmem.h>
38 #include <sys/poll.h>
39 #include <sys/conf.h>
40 #include <sys/thread.h>
41 #include <sys/var.h>
42 #include <sys/cmn_err.h>
43 #include <sys/stat.h>
44 #include <sys/ddi.h>
45 #include <sys/sunddi.h>
46 #include <sys/promif.h>
47 #include <sys/nvpair.h>
48 #include <sys/byteorder.h>
49 #include <sys/scsi/scsi.h>
50 #include <sys/fibre-channel/fc.h>
51 #include <sys/fibre-channel/impl/fc_ulpif.h>
52 #include <sys/fibre-channel/impl/fc_fcaif.h>
53 #include <sys/fibre-channel/impl/fctl_private.h>
54 #include <sys/fibre-channel/impl/fc_portif.h>
55 #include <sys/fibre-channel/impl/fp.h>
56
57 /* These are defined in fctl.c! */
58 extern int did_table_size;
59 extern int pwwn_table_size;
60
61 static struct cb_ops fp_cb_ops = {
62 fp_open, /* open */
63 fp_close, /* close */
64 nodev, /* strategy */
65 nodev, /* print */
66 nodev, /* dump */
67 nodev, /* read */
68 nodev, /* write */
69 fp_ioctl, /* ioctl */
70 nodev, /* devmap */
71 nodev, /* mmap */
72 nodev, /* segmap */
73 nochpoll, /* chpoll */
74 ddi_prop_op, /* cb_prop_op */
75 0, /* streamtab */
76 D_NEW | D_MP | D_HOTPLUG, /* cb_flag */
77 CB_REV, /* rev */
78 nodev, /* aread */
79 nodev /* awrite */
80 };
81
82 static struct dev_ops fp_ops = {
83 DEVO_REV, /* build revision */
84 0, /* reference count */
85 fp_getinfo, /* getinfo */
86 nulldev, /* identify - Obsoleted */
87 nulldev, /* probe */
88 fp_attach, /* attach */
89 fp_detach, /* detach */
90 nodev, /* reset */
91 &fp_cb_ops, /* cb_ops */
92 NULL, /* bus_ops */
93 fp_power, /* power */
94 ddi_quiesce_not_needed /* quiesce */
95 };
96
97 #define FP_VERSION "20091123-1.101"
98 #define FP_NAME_VERSION "SunFC Port v" FP_VERSION
99
100 char *fp_version = FP_NAME_VERSION;
101
102 static struct modldrv modldrv = {
103 &mod_driverops, /* Type of Module */
104 FP_NAME_VERSION, /* Name/Version of fp */
105 &fp_ops /* driver ops */
106 };
107
108 static struct modlinkage modlinkage = {
109 MODREV_1, /* Rev of the loadable modules system */
110 &modldrv, /* NULL terminated list of */
111 NULL /* Linkage structures */
112 };
113
114
115
116 static uint16_t ns_reg_cmds[] = {
117 NS_RPN_ID,
118 NS_RNN_ID,
119 NS_RCS_ID,
120 NS_RFT_ID,
121 NS_RPT_ID,
122 NS_RSPN_ID,
123 NS_RSNN_NN
124 };
125
126 struct fp_xlat {
127 uchar_t xlat_state;
128 int xlat_rval;
129 } fp_xlat [] = {
130 { FC_PKT_SUCCESS, FC_SUCCESS },
131 { FC_PKT_REMOTE_STOP, FC_FAILURE },
132 { FC_PKT_LOCAL_RJT, FC_FAILURE },
133 { FC_PKT_NPORT_RJT, FC_ELS_PREJECT },
134 { FC_PKT_FABRIC_RJT, FC_ELS_FREJECT },
135 { FC_PKT_LOCAL_BSY, FC_TRAN_BUSY },
136 { FC_PKT_TRAN_BSY, FC_TRAN_BUSY },
137 { FC_PKT_NPORT_BSY, FC_PBUSY },
138 { FC_PKT_FABRIC_BSY, FC_FBUSY },
139 { FC_PKT_LS_RJT, FC_FAILURE },
140 { FC_PKT_BA_RJT, FC_FAILURE },
141 { FC_PKT_TIMEOUT, FC_FAILURE },
142 { FC_PKT_TRAN_ERROR, FC_TRANSPORT_ERROR },
143 { FC_PKT_FAILURE, FC_FAILURE },
144 { FC_PKT_PORT_OFFLINE, FC_OFFLINE }
145 };
146
147 static uchar_t fp_valid_alpas[] = {
148 0x01, 0x02, 0x04, 0x08, 0x0F, 0x10, 0x17, 0x18, 0x1B,
149 0x1D, 0x1E, 0x1F, 0x23, 0x25, 0x26, 0x27, 0x29, 0x2A,
150 0x2B, 0x2C, 0x2D, 0x2E, 0x31, 0x32, 0x33, 0x34, 0x35,
151 0x36, 0x39, 0x3A, 0x3C, 0x43, 0x45, 0x46, 0x47, 0x49,
152 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x51, 0x52, 0x53, 0x54,
153 0x55, 0x56, 0x59, 0x5A, 0x5C, 0x63, 0x65, 0x66, 0x67,
154 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x71, 0x72, 0x73,
155 0x74, 0x75, 0x76, 0x79, 0x7A, 0x7C, 0x80, 0x81, 0x82,
156 0x84, 0x88, 0x8F, 0x90, 0x97, 0x98, 0x9B, 0x9D, 0x9E,
157 0x9F, 0xA3, 0xA5, 0xA6, 0xA7, 0xA9, 0xAA, 0xAB, 0xAC,
158 0xAD, 0xAE, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB9,
159 0xBA, 0xBC, 0xC3, 0xC5, 0xC6, 0xC7, 0xC9, 0xCA, 0xCB,
160 0xCC, 0xCD, 0xCE, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6,
161 0xD9, 0xDA, 0xDC, 0xE0, 0xE1, 0xE2, 0xE4, 0xE8, 0xEF
162 };
163
164 static struct fp_perms {
165 uint16_t fp_ioctl_cmd;
166 uchar_t fp_open_flag;
167 } fp_perm_list [] = {
168 { FCIO_GET_NUM_DEVS, FP_OPEN },
169 { FCIO_GET_DEV_LIST, FP_OPEN },
170 { FCIO_GET_SYM_PNAME, FP_OPEN },
171 { FCIO_GET_SYM_NNAME, FP_OPEN },
172 { FCIO_SET_SYM_PNAME, FP_EXCL },
173 { FCIO_SET_SYM_NNAME, FP_EXCL },
174 { FCIO_GET_LOGI_PARAMS, FP_OPEN },
175 { FCIO_DEV_LOGIN, FP_EXCL },
176 { FCIO_DEV_LOGOUT, FP_EXCL },
177 { FCIO_GET_STATE, FP_OPEN },
178 { FCIO_DEV_REMOVE, FP_EXCL },
179 { FCIO_GET_FCODE_REV, FP_OPEN },
180 { FCIO_GET_FW_REV, FP_OPEN },
181 { FCIO_GET_DUMP_SIZE, FP_OPEN },
182 { FCIO_FORCE_DUMP, FP_EXCL },
183 { FCIO_GET_DUMP, FP_OPEN },
184 { FCIO_GET_TOPOLOGY, FP_OPEN },
185 { FCIO_RESET_LINK, FP_EXCL },
186 { FCIO_RESET_HARD, FP_EXCL },
187 { FCIO_RESET_HARD_CORE, FP_EXCL },
188 { FCIO_DIAG, FP_OPEN },
189 { FCIO_NS, FP_EXCL },
190 { FCIO_DOWNLOAD_FW, FP_EXCL },
191 { FCIO_DOWNLOAD_FCODE, FP_EXCL },
192 { FCIO_LINK_STATUS, FP_OPEN },
193 { FCIO_GET_HOST_PARAMS, FP_OPEN },
194 { FCIO_GET_NODE_ID, FP_OPEN },
195 { FCIO_SET_NODE_ID, FP_EXCL },
196 { FCIO_SEND_NODE_ID, FP_OPEN },
197 { FCIO_GET_ADAPTER_ATTRIBUTES, FP_OPEN },
198 { FCIO_GET_OTHER_ADAPTER_PORTS, FP_OPEN },
199 { FCIO_GET_ADAPTER_PORT_ATTRIBUTES, FP_OPEN },
200 { FCIO_GET_DISCOVERED_PORT_ATTRIBUTES, FP_OPEN },
201 { FCIO_GET_PORT_ATTRIBUTES, FP_OPEN },
202 { FCIO_GET_ADAPTER_PORT_STATS, FP_OPEN },
203 { FCIO_GET_ADAPTER_PORT_NPIV_ATTRIBUTES, FP_OPEN },
204 { FCIO_GET_NPIV_PORT_LIST, FP_OPEN },
205 { FCIO_DELETE_NPIV_PORT, FP_OPEN },
206 { FCIO_GET_NPIV_ATTRIBUTES, FP_OPEN },
207 { FCIO_CREATE_NPIV_PORT, FP_OPEN },
208 { FCIO_NPIV_GET_ADAPTER_ATTRIBUTES, FP_OPEN }
209 };
210
211 static char *fp_pm_comps[] = {
212 "NAME=FC Port",
213 "0=Port Down",
214 "1=Port Up"
215 };
216
217
218 #ifdef _LITTLE_ENDIAN
219 #define MAKE_BE_32(x) { \
220 uint32_t *ptr1, i; \
221 ptr1 = (uint32_t *)(x); \
222 for (i = 0; i < sizeof (*(x)) / sizeof (uint32_t); i++) { \
223 *ptr1 = BE_32(*ptr1); \
224 ptr1++; \
225 } \
226 }
227 #else
228 #define MAKE_BE_32(x)
229 #endif
230
231 static uchar_t fp_verbosity = (FP_WARNING_MESSAGES | FP_FATAL_MESSAGES);
232 static uint32_t fp_options = 0;
233
234 static int fp_cmd_wait_cnt = FP_CMDWAIT_DELAY;
235 static int fp_retry_delay = FP_RETRY_DELAY; /* retry after this delay */
236 static int fp_retry_count = FP_RETRY_COUNT; /* number of retries */
237 unsigned int fp_offline_ticker; /* seconds */
238
239 /*
240 * Driver global variable to anchor the list of soft state structs for
241 * all fp driver instances. Used with the Solaris DDI soft state functions.
242 */
243 static void *fp_driver_softstate;
244
245 static clock_t fp_retry_ticks;
246 static clock_t fp_offline_ticks;
247
248 static int fp_retry_ticker;
249 static uint32_t fp_unsol_buf_count = FP_UNSOL_BUF_COUNT;
250 static uint32_t fp_unsol_buf_size = FP_UNSOL_BUF_SIZE;
251
252 static int fp_log_size = FP_LOG_SIZE;
253 static int fp_trace = FP_TRACE_DEFAULT;
254 static fc_trace_logq_t *fp_logq = NULL;
255
256 int fp_get_adapter_paths(char *pathList, int count);
257 static void fp_log_port_event(fc_local_port_t *port, char *subclass);
258 static void fp_log_target_event(fc_local_port_t *port, char *subclass,
259 la_wwn_t tgt_pwwn, uint32_t port_id);
260 static uint32_t fp_map_remote_port_state(uint32_t rm_state);
261 static void fp_init_symbolic_names(fc_local_port_t *port);
262
263
264 /*
265 * Perform global initialization
266 */
267 int
_init(void)268 _init(void)
269 {
270 int ret;
271
272 if ((ret = ddi_soft_state_init(&fp_driver_softstate,
273 sizeof (struct fc_local_port), 8)) != 0) {
274 return (ret);
275 }
276
277 if ((ret = scsi_hba_init(&modlinkage)) != 0) {
278 ddi_soft_state_fini(&fp_driver_softstate);
279 return (ret);
280 }
281
282 fp_logq = fc_trace_alloc_logq(fp_log_size);
283
284 if ((ret = mod_install(&modlinkage)) != 0) {
285 fc_trace_free_logq(fp_logq);
286 ddi_soft_state_fini(&fp_driver_softstate);
287 scsi_hba_fini(&modlinkage);
288 }
289
290 return (ret);
291 }
292
293
294 /*
295 * Prepare for driver unload
296 */
297 int
_fini(void)298 _fini(void)
299 {
300 int ret;
301
302 if ((ret = mod_remove(&modlinkage)) == 0) {
303 fc_trace_free_logq(fp_logq);
304 ddi_soft_state_fini(&fp_driver_softstate);
305 scsi_hba_fini(&modlinkage);
306 }
307
308 return (ret);
309 }
310
311
312 /*
313 * Request mod_info() to handle all cases
314 */
315 int
_info(struct modinfo * modinfo)316 _info(struct modinfo *modinfo)
317 {
318 return (mod_info(&modlinkage, modinfo));
319 }
320
321
322 /*
323 * fp_attach:
324 *
325 * The respective cmd handlers take care of performing
326 * ULP related invocations
327 */
328 static int
fp_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)329 fp_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
330 {
331 int rval;
332
333 /*
334 * We check the value of fp_offline_ticker at this
335 * point. The variable is global for the driver and
336 * not specific to an instance.
337 *
338 * If there is no user-defined value found in /etc/system
339 * or fp.conf, then we use 90 seconds (FP_OFFLINE_TICKER).
340 * The minimum setting for this offline timeout according
341 * to the FC-FS2 standard (Fibre Channel Framing and
342 * Signalling-2, see www.t11.org) is R_T_TOV == 100msec.
343 *
344 * We do not recommend setting the value to less than 10
345 * seconds (RA_TOV) or more than 90 seconds. If this
346 * variable is greater than 90 seconds then drivers above
347 * fp (fcp, sd, scsi_vhci, vxdmp et al) might complain.
348 */
349
350 fp_offline_ticker = ddi_prop_get_int(DDI_DEV_T_ANY,
351 dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "fp_offline_ticker",
352 FP_OFFLINE_TICKER);
353
354 if ((fp_offline_ticker < 10) ||
355 (fp_offline_ticker > 90)) {
356 cmn_err(CE_WARN, "Setting fp_offline_ticker to "
357 "%d second(s). This is outside the "
358 "recommended range of 10..90 seconds",
359 fp_offline_ticker);
360 }
361
362 /*
363 * Tick every second when there are commands to retry.
364 * It should tick at the least granular value of pkt_timeout
365 * (which is one second)
366 */
367 fp_retry_ticker = 1;
368
369 fp_retry_ticks = drv_usectohz(fp_retry_ticker * 1000 * 1000);
370 fp_offline_ticks = drv_usectohz(fp_offline_ticker * 1000 * 1000);
371
372 switch (cmd) {
373 case DDI_ATTACH:
374 rval = fp_attach_handler(dip);
375 break;
376
377 case DDI_RESUME:
378 rval = fp_resume_handler(dip);
379 break;
380
381 default:
382 rval = DDI_FAILURE;
383 break;
384 }
385 return (rval);
386 }
387
388
389 /*
390 * fp_detach:
391 *
392 * If a ULP fails to handle cmd request converse of
393 * cmd is invoked for ULPs that previously succeeded
394 * cmd request.
395 */
396 static int
fp_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)397 fp_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
398 {
399 int rval = DDI_FAILURE;
400 fc_local_port_t *port;
401 fc_attach_cmd_t converse;
402 uint8_t cnt;
403
404 if ((port = ddi_get_soft_state(fp_driver_softstate,
405 ddi_get_instance(dip))) == NULL) {
406 return (DDI_FAILURE);
407 }
408
409 mutex_enter(&port->fp_mutex);
410
411 if (port->fp_ulp_attach) {
412 mutex_exit(&port->fp_mutex);
413 return (DDI_FAILURE);
414 }
415
416 switch (cmd) {
417 case DDI_DETACH:
418 if (port->fp_task != FP_TASK_IDLE) {
419 mutex_exit(&port->fp_mutex);
420 return (DDI_FAILURE);
421 }
422
423 /* Let's attempt to quit the job handler gracefully */
424 port->fp_soft_state |= FP_DETACH_INPROGRESS;
425
426 mutex_exit(&port->fp_mutex);
427 converse = FC_CMD_ATTACH;
428 if (fctl_detach_ulps(port, FC_CMD_DETACH,
429 &modlinkage) != FC_SUCCESS) {
430 mutex_enter(&port->fp_mutex);
431 port->fp_soft_state &= ~FP_DETACH_INPROGRESS;
432 mutex_exit(&port->fp_mutex);
433 rval = DDI_FAILURE;
434 break;
435 }
436
437 mutex_enter(&port->fp_mutex);
438 for (cnt = 0; (port->fp_job_head) && (cnt < fp_cmd_wait_cnt);
439 cnt++) {
440 mutex_exit(&port->fp_mutex);
441 delay(drv_usectohz(1000000));
442 mutex_enter(&port->fp_mutex);
443 }
444
445 if (port->fp_job_head) {
446 mutex_exit(&port->fp_mutex);
447 rval = DDI_FAILURE;
448 break;
449 }
450 mutex_exit(&port->fp_mutex);
451
452 rval = fp_detach_handler(port);
453 break;
454
455 case DDI_SUSPEND:
456 mutex_exit(&port->fp_mutex);
457 converse = FC_CMD_RESUME;
458 if (fctl_detach_ulps(port, FC_CMD_SUSPEND,
459 &modlinkage) != FC_SUCCESS) {
460 rval = DDI_FAILURE;
461 break;
462 }
463 if ((rval = fp_suspend_handler(port)) != DDI_SUCCESS) {
464 (void) callb_generic_cpr(&port->fp_cpr_info,
465 CB_CODE_CPR_RESUME);
466 }
467 break;
468
469 default:
470 mutex_exit(&port->fp_mutex);
471 break;
472 }
473
474 /*
475 * Use softint to perform reattach. Mark fp_ulp_attach so we
476 * don't attempt to do this repeatedly on behalf of some persistent
477 * caller.
478 */
479 if (rval != DDI_SUCCESS) {
480 mutex_enter(&port->fp_mutex);
481 port->fp_ulp_attach = 1;
482
483 /*
484 * If the port is in the low power mode then there is
485 * possibility that fca too could be in low power mode.
486 * Try to raise the power before calling attach ulps.
487 */
488
489 if ((port->fp_soft_state & FP_SOFT_POWER_DOWN) &&
490 (!(port->fp_soft_state & FP_SOFT_NO_PMCOMP))) {
491 mutex_exit(&port->fp_mutex);
492 (void) pm_raise_power(port->fp_port_dip,
493 FP_PM_COMPONENT, FP_PM_PORT_UP);
494 } else {
495 mutex_exit(&port->fp_mutex);
496 }
497
498
499 fp_attach_ulps(port, converse);
500
501 mutex_enter(&port->fp_mutex);
502 while (port->fp_ulp_attach) {
503 cv_wait(&port->fp_attach_cv, &port->fp_mutex);
504 }
505
506 port->fp_soft_state &= ~FP_DETACH_INPROGRESS;
507
508 /*
509 * Mark state as detach failed so asynchronous ULP attach
510 * events (downstream, not the ones we're initiating with
511 * the call to fp_attach_ulps) are not honored. We're
512 * really still in pending detach.
513 */
514 port->fp_soft_state |= FP_DETACH_FAILED;
515
516 mutex_exit(&port->fp_mutex);
517 }
518
519 return (rval);
520 }
521
522
523 /*
524 * fp_getinfo:
525 * Given the device number, return either the
526 * dev_info_t pointer or the instance number.
527 */
528
529 /* ARGSUSED */
530 static int
fp_getinfo(dev_info_t * dip,ddi_info_cmd_t cmd,void * arg,void ** result)531 fp_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
532 {
533 int rval;
534 minor_t instance;
535 fc_local_port_t *port;
536
537 rval = DDI_SUCCESS;
538 instance = getminor((dev_t)arg);
539
540 switch (cmd) {
541 case DDI_INFO_DEVT2DEVINFO:
542 if ((port = ddi_get_soft_state(fp_driver_softstate,
543 instance)) == NULL) {
544 rval = DDI_FAILURE;
545 break;
546 }
547 *result = (void *)port->fp_port_dip;
548 break;
549
550 case DDI_INFO_DEVT2INSTANCE:
551 *result = (void *)(uintptr_t)instance;
552 break;
553
554 default:
555 rval = DDI_FAILURE;
556 break;
557 }
558
559 return (rval);
560 }
561
562
563 /*
564 * Entry point for power up and power down request from kernel
565 */
566 static int
fp_power(dev_info_t * dip,int comp,int level)567 fp_power(dev_info_t *dip, int comp, int level)
568 {
569 int rval = DDI_FAILURE;
570 fc_local_port_t *port;
571
572 port = ddi_get_soft_state(fp_driver_softstate, ddi_get_instance(dip));
573 if (port == NULL || comp != FP_PM_COMPONENT) {
574 return (rval);
575 }
576
577 switch (level) {
578 case FP_PM_PORT_UP:
579 rval = DDI_SUCCESS;
580
581 /*
582 * If the port is DDI_SUSPENDed, let the DDI_RESUME
583 * code complete the rediscovery.
584 */
585 mutex_enter(&port->fp_mutex);
586 if (port->fp_soft_state & FP_SOFT_SUSPEND) {
587 port->fp_soft_state &= ~FP_SOFT_POWER_DOWN;
588 port->fp_pm_level = FP_PM_PORT_UP;
589 mutex_exit(&port->fp_mutex);
590 fctl_attach_ulps(port, FC_CMD_POWER_UP, &modlinkage);
591 break;
592 }
593
594 if (port->fp_soft_state & FP_SOFT_POWER_DOWN) {
595 ASSERT(port->fp_pm_level == FP_PM_PORT_DOWN);
596
597 port->fp_pm_level = FP_PM_PORT_UP;
598 rval = fp_power_up(port);
599 if (rval != DDI_SUCCESS) {
600 port->fp_pm_level = FP_PM_PORT_DOWN;
601 }
602 } else {
603 port->fp_pm_level = FP_PM_PORT_UP;
604 }
605 mutex_exit(&port->fp_mutex);
606 break;
607
608 case FP_PM_PORT_DOWN:
609 mutex_enter(&port->fp_mutex);
610
611 ASSERT(!(port->fp_soft_state & FP_SOFT_NO_PMCOMP));
612 if (port->fp_soft_state & FP_SOFT_NO_PMCOMP) {
613 /*
614 * PM framework goofed up. We have don't
615 * have any PM components. Let's never go down.
616 */
617 mutex_exit(&port->fp_mutex);
618 break;
619
620 }
621
622 if (port->fp_ulp_attach) {
623 /* We shouldn't let the power go down */
624 mutex_exit(&port->fp_mutex);
625 break;
626 }
627
628 /*
629 * Not a whole lot to do if we are detaching
630 */
631 if (port->fp_soft_state & FP_SOFT_IN_DETACH) {
632 port->fp_pm_level = FP_PM_PORT_DOWN;
633 mutex_exit(&port->fp_mutex);
634 rval = DDI_SUCCESS;
635 break;
636 }
637
638 if (!port->fp_pm_busy && !port->fp_pm_busy_nocomp) {
639 port->fp_pm_level = FP_PM_PORT_DOWN;
640
641 rval = fp_power_down(port);
642 if (rval != DDI_SUCCESS) {
643 port->fp_pm_level = FP_PM_PORT_UP;
644 ASSERT(!(port->fp_soft_state &
645 FP_SOFT_POWER_DOWN));
646 } else {
647 ASSERT(port->fp_soft_state &
648 FP_SOFT_POWER_DOWN);
649 }
650 }
651 mutex_exit(&port->fp_mutex);
652 break;
653
654 default:
655 break;
656 }
657
658 return (rval);
659 }
660
661
662 /*
663 * Open FC port devctl node
664 */
665 static int
fp_open(dev_t * devp,int flag,int otype,cred_t * credp)666 fp_open(dev_t *devp, int flag, int otype, cred_t *credp)
667 {
668 int instance;
669 fc_local_port_t *port;
670
671 if (otype != OTYP_CHR) {
672 return (EINVAL);
673 }
674
675 /*
676 * This is not a toy to play with. Allow only powerful
677 * users (hopefully knowledgeable) to access the port
678 * (A hacker potentially could download a sick binary
679 * file into FCA)
680 */
681 if (drv_priv(credp)) {
682 return (EPERM);
683 }
684
685 instance = (int)getminor(*devp);
686
687 port = ddi_get_soft_state(fp_driver_softstate, instance);
688 if (port == NULL) {
689 return (ENXIO);
690 }
691
692 mutex_enter(&port->fp_mutex);
693 if (port->fp_flag & FP_EXCL) {
694 /*
695 * It is already open for exclusive access.
696 * So shut the door on this caller.
697 */
698 mutex_exit(&port->fp_mutex);
699 return (EBUSY);
700 }
701
702 if (flag & FEXCL) {
703 if (port->fp_flag & FP_OPEN) {
704 /*
705 * Exclusive operation not possible
706 * as it is already opened
707 */
708 mutex_exit(&port->fp_mutex);
709 return (EBUSY);
710 }
711 port->fp_flag |= FP_EXCL;
712 }
713 port->fp_flag |= FP_OPEN;
714 mutex_exit(&port->fp_mutex);
715
716 return (0);
717 }
718
719
720 /*
721 * The driver close entry point is called on the last close()
722 * of a device. So it is perfectly alright to just clobber the
723 * open flag and reset it to idle (instead of having to reset
724 * each flag bits). For any confusion, check out close(9E).
725 */
726
727 /* ARGSUSED */
728 static int
fp_close(dev_t dev,int flag,int otype,cred_t * credp)729 fp_close(dev_t dev, int flag, int otype, cred_t *credp)
730 {
731 int instance;
732 fc_local_port_t *port;
733
734 if (otype != OTYP_CHR) {
735 return (EINVAL);
736 }
737
738 instance = (int)getminor(dev);
739
740 port = ddi_get_soft_state(fp_driver_softstate, instance);
741 if (port == NULL) {
742 return (ENXIO);
743 }
744
745 mutex_enter(&port->fp_mutex);
746 if ((port->fp_flag & FP_OPEN) == 0) {
747 mutex_exit(&port->fp_mutex);
748 return (ENODEV);
749 }
750 port->fp_flag = FP_IDLE;
751 mutex_exit(&port->fp_mutex);
752
753 return (0);
754 }
755
756 /*
757 * Handle IOCTL requests
758 */
759
760 /* ARGSUSED */
761 static int
fp_ioctl(dev_t dev,int cmd,intptr_t data,int mode,cred_t * credp,int * rval)762 fp_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp, int *rval)
763 {
764 int instance;
765 int ret = 0;
766 fcio_t fcio;
767 fc_local_port_t *port;
768
769 instance = (int)getminor(dev);
770
771 port = ddi_get_soft_state(fp_driver_softstate, instance);
772 if (port == NULL) {
773 return (ENXIO);
774 }
775
776 mutex_enter(&port->fp_mutex);
777 if ((port->fp_flag & FP_OPEN) == 0) {
778 mutex_exit(&port->fp_mutex);
779 return (ENXIO);
780 }
781
782 if (port->fp_soft_state & FP_SOFT_SUSPEND) {
783 mutex_exit(&port->fp_mutex);
784 return (ENXIO);
785 }
786
787 mutex_exit(&port->fp_mutex);
788
789 /* this will raise power if necessary */
790 ret = fctl_busy_port(port);
791 if (ret != 0) {
792 return (ret);
793 }
794
795 ASSERT(port->fp_pm_level == FP_PM_PORT_UP);
796
797
798 switch (cmd) {
799 case FCIO_CMD: {
800 #ifdef _MULTI_DATAMODEL
801 switch (ddi_model_convert_from(mode & FMODELS)) {
802 case DDI_MODEL_ILP32: {
803 struct fcio32 fcio32;
804
805 if (ddi_copyin((void *)data, (void *)&fcio32,
806 sizeof (struct fcio32), mode)) {
807 ret = EFAULT;
808 break;
809 }
810 fcio.fcio_xfer = fcio32.fcio_xfer;
811 fcio.fcio_cmd = fcio32.fcio_cmd;
812 fcio.fcio_flags = fcio32.fcio_flags;
813 fcio.fcio_cmd_flags = fcio32.fcio_cmd_flags;
814 fcio.fcio_ilen = (size_t)fcio32.fcio_ilen;
815 fcio.fcio_ibuf =
816 (caddr_t)(uintptr_t)fcio32.fcio_ibuf;
817 fcio.fcio_olen = (size_t)fcio32.fcio_olen;
818 fcio.fcio_obuf =
819 (caddr_t)(uintptr_t)fcio32.fcio_obuf;
820 fcio.fcio_alen = (size_t)fcio32.fcio_alen;
821 fcio.fcio_abuf =
822 (caddr_t)(uintptr_t)fcio32.fcio_abuf;
823 fcio.fcio_errno = fcio32.fcio_errno;
824 break;
825 }
826
827 case DDI_MODEL_NONE:
828 if (ddi_copyin((void *)data, (void *)&fcio,
829 sizeof (fcio_t), mode)) {
830 ret = EFAULT;
831 }
832 break;
833 }
834 #else /* _MULTI_DATAMODEL */
835 if (ddi_copyin((void *)data, (void *)&fcio,
836 sizeof (fcio_t), mode)) {
837 ret = EFAULT;
838 break;
839 }
840 #endif /* _MULTI_DATAMODEL */
841 if (!ret) {
842 ret = fp_fciocmd(port, data, mode, &fcio);
843 }
844 break;
845 }
846
847 default:
848 ret = fctl_ulp_port_ioctl(port, dev, cmd, data,
849 mode, credp, rval);
850 }
851
852 fctl_idle_port(port);
853
854 return (ret);
855 }
856
857
858 /*
859 * Init Symbolic Port Name and Node Name
860 * LV will try to get symbolic names from FCA driver
861 * and register these to name server,
862 * if LV fails to get these,
863 * LV will register its default symbolic names to name server.
864 * The Default symbolic node name format is :
865 * <hostname>:<hba driver name>(instance)
866 * The Default symbolic port name format is :
867 * <fp path name>
868 */
869 static void
fp_init_symbolic_names(fc_local_port_t * port)870 fp_init_symbolic_names(fc_local_port_t *port)
871 {
872 const char *vendorname = ddi_driver_name(port->fp_fca_dip);
873 char *sym_name;
874 char fcaname[50] = {0};
875 int hostnlen, fcanlen;
876
877 if (port->fp_sym_node_namelen == 0) {
878 hostnlen = strlen(utsname.nodename);
879 (void) snprintf(fcaname, sizeof (fcaname),
880 "%s%d", vendorname, ddi_get_instance(port->fp_fca_dip));
881 fcanlen = strlen(fcaname);
882
883 sym_name = kmem_zalloc(hostnlen + fcanlen + 2, KM_SLEEP);
884 (void) sprintf(sym_name, "%s:%s", utsname.nodename, fcaname);
885 port->fp_sym_node_namelen = strlen(sym_name);
886 if (port->fp_sym_node_namelen >= FCHBA_SYMB_NAME_LEN) {
887 port->fp_sym_node_namelen = FCHBA_SYMB_NAME_LEN;
888 }
889 (void) strncpy(port->fp_sym_node_name, sym_name,
890 port->fp_sym_node_namelen);
891 kmem_free(sym_name, hostnlen + fcanlen + 2);
892 }
893
894 if (port->fp_sym_port_namelen == 0) {
895 char *pathname = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
896
897 (void) ddi_pathname(port->fp_port_dip, pathname);
898 port->fp_sym_port_namelen = strlen(pathname);
899 if (port->fp_sym_port_namelen >= FCHBA_SYMB_NAME_LEN) {
900 port->fp_sym_port_namelen = FCHBA_SYMB_NAME_LEN;
901 }
902 (void) strncpy(port->fp_sym_port_name, pathname,
903 port->fp_sym_port_namelen);
904 kmem_free(pathname, MAXPATHLEN);
905 }
906 }
907
908
909 /*
910 * Perform port attach
911 */
912 static int
fp_attach_handler(dev_info_t * dip)913 fp_attach_handler(dev_info_t *dip)
914 {
915 int rval;
916 int instance;
917 int port_num;
918 int port_len;
919 char name[30];
920 char i_pwwn[17];
921 fp_cmd_t *pkt;
922 uint32_t ub_count;
923 fc_local_port_t *port;
924 job_request_t *job;
925 fc_local_port_t *phyport = NULL;
926 int portpro1;
927 char pwwn[17], nwwn[17];
928
929 instance = ddi_get_instance(dip);
930 port_len = sizeof (port_num);
931 rval = ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF,
932 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "port",
933 (caddr_t)&port_num, &port_len);
934 if (rval != DDI_SUCCESS) {
935 cmn_err(CE_WARN, "fp(%d): No port property in devinfo",
936 instance);
937 return (DDI_FAILURE);
938 }
939
940 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, instance,
941 DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
942 cmn_err(CE_WARN, "fp(%d): failed to create devctl minor node",
943 instance);
944 return (DDI_FAILURE);
945 }
946
947 if (ddi_create_minor_node(dip, "fc", S_IFCHR, instance,
948 DDI_NT_FC_ATTACHMENT_POINT, 0) != DDI_SUCCESS) {
949 cmn_err(CE_WARN, "fp(%d): failed to create fc attachment"
950 " point minor node", instance);
951 ddi_remove_minor_node(dip, NULL);
952 return (DDI_FAILURE);
953 }
954
955 if (ddi_soft_state_zalloc(fp_driver_softstate, instance)
956 != DDI_SUCCESS) {
957 cmn_err(CE_WARN, "fp(%d): failed to alloc soft state",
958 instance);
959 ddi_remove_minor_node(dip, NULL);
960 return (DDI_FAILURE);
961 }
962 port = ddi_get_soft_state(fp_driver_softstate, instance);
963
964 (void) sprintf(port->fp_ibuf, "fp(%d)", instance);
965
966 port->fp_instance = instance;
967 port->fp_ulp_attach = 1;
968 port->fp_port_num = port_num;
969 port->fp_verbose = fp_verbosity;
970 port->fp_options = fp_options;
971
972 port->fp_fca_dip = ddi_get_parent(dip);
973 port->fp_port_dip = dip;
974 port->fp_fca_tran = (fc_fca_tran_t *)
975 ddi_get_driver_private(port->fp_fca_dip);
976
977 port->fp_task = port->fp_last_task = FP_TASK_IDLE;
978
979 /*
980 * Init the starting value of fp_rscn_count. Note that if
981 * FC_INVALID_RSCN_COUNT is 0 (which is what it currently is), the
982 * actual # of RSCNs will be (fp_rscn_count - 1)
983 */
984 port->fp_rscn_count = FC_INVALID_RSCN_COUNT + 1;
985
986 mutex_init(&port->fp_mutex, NULL, MUTEX_DRIVER, NULL);
987 cv_init(&port->fp_cv, NULL, CV_DRIVER, NULL);
988 cv_init(&port->fp_attach_cv, NULL, CV_DRIVER, NULL);
989
990 (void) sprintf(name, "fp%d_cache", instance);
991
992 if ((portpro1 = ddi_prop_get_int(DDI_DEV_T_ANY,
993 dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
994 "phyport-instance", -1)) != -1) {
995 phyport = ddi_get_soft_state(fp_driver_softstate, portpro1);
996 fc_wwn_to_str(&phyport->fp_service_params.nport_ww_name, pwwn);
997 fc_wwn_to_str(&phyport->fp_service_params.node_ww_name, nwwn);
998 port->fp_npiv_type = FC_NPIV_PORT;
999 }
1000
1001 /*
1002 * Allocate the pool of fc_packet_t structs to be used with
1003 * this fp instance.
1004 */
1005 port->fp_pkt_cache = kmem_cache_create(name,
1006 (port->fp_fca_tran->fca_pkt_size) + sizeof (fp_cmd_t), 8,
1007 fp_cache_constructor, fp_cache_destructor, NULL, (void *)port,
1008 NULL, 0);
1009 port->fp_out_fpcmds = 0;
1010 if (port->fp_pkt_cache == NULL) {
1011 goto cache_alloc_failed;
1012 }
1013
1014
1015 /*
1016 * Allocate the d_id and pwwn hash tables for all remote ports
1017 * connected to this local port.
1018 */
1019 port->fp_did_table = kmem_zalloc(did_table_size *
1020 sizeof (struct d_id_hash), KM_SLEEP);
1021
1022 port->fp_pwwn_table = kmem_zalloc(pwwn_table_size *
1023 sizeof (struct pwwn_hash), KM_SLEEP);
1024
1025 port->fp_taskq = taskq_create("fp_ulp_callback", 1,
1026 MINCLSYSPRI, 1, 16, 0);
1027
1028 /* Indicate that don't have the pm components yet */
1029 port->fp_soft_state |= FP_SOFT_NO_PMCOMP;
1030
1031 /*
1032 * Bind the callbacks with the FCA driver. This will open the gate
1033 * for asynchronous callbacks, so after this call the fp_mutex
1034 * must be held when updating the fc_local_port_t struct.
1035 *
1036 * This is done _before_ setting up the job thread so we can avoid
1037 * cleaning up after the thread_create() in the error path. This
1038 * also means fp will be operating with fp_els_resp_pkt set to NULL.
1039 */
1040 if (fp_bind_callbacks(port) != DDI_SUCCESS) {
1041 goto bind_callbacks_failed;
1042 }
1043
1044 if (phyport) {
1045 mutex_enter(&phyport->fp_mutex);
1046 if (phyport->fp_port_next) {
1047 phyport->fp_port_next->fp_port_prev = port;
1048 port->fp_port_next = phyport->fp_port_next;
1049 phyport->fp_port_next = port;
1050 port->fp_port_prev = phyport;
1051 } else {
1052 phyport->fp_port_next = port;
1053 phyport->fp_port_prev = port;
1054 port->fp_port_next = phyport;
1055 port->fp_port_prev = phyport;
1056 }
1057 mutex_exit(&phyport->fp_mutex);
1058 }
1059
1060 /*
1061 * Init Symbolic Names
1062 */
1063 fp_init_symbolic_names(port);
1064
1065 pkt = fp_alloc_pkt(port, sizeof (la_els_logi_t), sizeof (la_els_logi_t),
1066 KM_SLEEP, NULL);
1067
1068 if (pkt == NULL) {
1069 cmn_err(CE_WARN, "fp(%d): failed to allocate ELS packet",
1070 instance);
1071 goto alloc_els_packet_failed;
1072 }
1073
1074 (void) thread_create(NULL, 0, fp_job_handler, port, 0, &p0, TS_RUN,
1075 v.v_maxsyspri - 2);
1076
1077 fc_wwn_to_str(&port->fp_service_params.nport_ww_name, i_pwwn);
1078 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "initiator-port",
1079 i_pwwn) != DDI_PROP_SUCCESS) {
1080 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
1081 "fp(%d): Updating 'initiator-port' property"
1082 " on fp dev_info node failed", instance);
1083 }
1084
1085 fc_wwn_to_str(&port->fp_service_params.node_ww_name, i_pwwn);
1086 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "initiator-node",
1087 i_pwwn) != DDI_PROP_SUCCESS) {
1088 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
1089 "fp(%d): Updating 'initiator-node' property"
1090 " on fp dev_info node failed", instance);
1091 }
1092
1093 mutex_enter(&port->fp_mutex);
1094 port->fp_els_resp_pkt = pkt;
1095 mutex_exit(&port->fp_mutex);
1096
1097 /*
1098 * Determine the count of unsolicited buffers this FCA can support
1099 */
1100 fp_retrieve_caps(port);
1101
1102 /*
1103 * Allocate unsolicited buffer tokens
1104 */
1105 if (port->fp_ub_count) {
1106 ub_count = port->fp_ub_count;
1107 port->fp_ub_tokens = kmem_zalloc(ub_count *
1108 sizeof (*port->fp_ub_tokens), KM_SLEEP);
1109 /*
1110 * Do not fail the attach if unsolicited buffer allocation
1111 * fails; Just try to get along with whatever the FCA can do.
1112 */
1113 if (fc_ulp_uballoc(port, &ub_count, fp_unsol_buf_size,
1114 FC_TYPE_EXTENDED_LS, port->fp_ub_tokens) !=
1115 FC_SUCCESS || ub_count != port->fp_ub_count) {
1116 cmn_err(CE_WARN, "fp(%d): failed to allocate "
1117 " Unsolicited buffers. proceeding with attach...",
1118 instance);
1119 kmem_free(port->fp_ub_tokens,
1120 sizeof (*port->fp_ub_tokens) * port->fp_ub_count);
1121 port->fp_ub_tokens = NULL;
1122 }
1123 }
1124
1125 fp_load_ulp_modules(dip, port);
1126
1127 /*
1128 * Enable DDI_SUSPEND and DDI_RESUME for this instance.
1129 */
1130 (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
1131 "pm-hardware-state", "needs-suspend-resume",
1132 strlen("needs-suspend-resume") + 1);
1133
1134 /*
1135 * fctl maintains a list of all port handles, so
1136 * help fctl add this one to its list now.
1137 */
1138 mutex_enter(&port->fp_mutex);
1139 fctl_add_port(port);
1140
1141 /*
1142 * If a state change is already in progress, set the bind state t
1143 * OFFLINE as well, so further state change callbacks into ULPs
1144 * will pass the appropriate states
1145 */
1146 if (FC_PORT_STATE_MASK(port->fp_bind_state) == FC_STATE_OFFLINE ||
1147 port->fp_statec_busy) {
1148 port->fp_bind_state = FC_STATE_OFFLINE;
1149 mutex_exit(&port->fp_mutex);
1150
1151 fp_startup_done((opaque_t)port, FC_PKT_SUCCESS);
1152 } else {
1153 /*
1154 * Without dropping the mutex, ensure that the port
1155 * startup happens ahead of state change callback
1156 * processing
1157 */
1158 ASSERT(port->fp_job_tail == NULL && port->fp_job_head == NULL);
1159
1160 port->fp_last_task = port->fp_task;
1161 port->fp_task = FP_TASK_PORT_STARTUP;
1162
1163 job = fctl_alloc_job(JOB_PORT_STARTUP, JOB_TYPE_FCTL_ASYNC,
1164 fp_startup_done, (opaque_t)port, KM_SLEEP);
1165
1166 port->fp_job_head = port->fp_job_tail = job;
1167
1168 cv_signal(&port->fp_cv);
1169
1170 mutex_exit(&port->fp_mutex);
1171 }
1172
1173 mutex_enter(&port->fp_mutex);
1174 while (port->fp_ulp_attach) {
1175 cv_wait(&port->fp_attach_cv, &port->fp_mutex);
1176 }
1177 mutex_exit(&port->fp_mutex);
1178
1179 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip,
1180 "pm-components", fp_pm_comps,
1181 sizeof (fp_pm_comps) / sizeof (fp_pm_comps[0])) !=
1182 DDI_PROP_SUCCESS) {
1183 FP_TRACE(FP_NHEAD2(9, 0), "Failed to create PM"
1184 " components property, PM disabled on this port.");
1185 mutex_enter(&port->fp_mutex);
1186 port->fp_pm_level = FP_PM_PORT_UP;
1187 mutex_exit(&port->fp_mutex);
1188 } else {
1189 if (pm_raise_power(dip, FP_PM_COMPONENT,
1190 FP_PM_PORT_UP) != DDI_SUCCESS) {
1191 FP_TRACE(FP_NHEAD2(9, 0), "Failed to raise"
1192 " power level");
1193 mutex_enter(&port->fp_mutex);
1194 port->fp_pm_level = FP_PM_PORT_UP;
1195 mutex_exit(&port->fp_mutex);
1196 }
1197
1198 /*
1199 * Don't unset the FP_SOFT_NO_PMCOMP flag until after
1200 * the call to pm_raise_power. The PM framework can't
1201 * handle multiple threads calling into it during attach.
1202 */
1203
1204 mutex_enter(&port->fp_mutex);
1205 port->fp_soft_state &= ~FP_SOFT_NO_PMCOMP;
1206 mutex_exit(&port->fp_mutex);
1207 }
1208
1209 ddi_report_dev(dip);
1210
1211 fp_log_port_event(port, ESC_SUNFC_PORT_ATTACH);
1212
1213 return (DDI_SUCCESS);
1214
1215 /*
1216 * Unwind any/all preceeding allocations in the event of an error.
1217 */
1218
1219 alloc_els_packet_failed:
1220
1221 if (port->fp_fca_handle != NULL) {
1222 port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle);
1223 port->fp_fca_handle = NULL;
1224 }
1225
1226 if (port->fp_ub_tokens != NULL) {
1227 (void) fc_ulp_ubfree(port, port->fp_ub_count,
1228 port->fp_ub_tokens);
1229 kmem_free(port->fp_ub_tokens,
1230 port->fp_ub_count * sizeof (*port->fp_ub_tokens));
1231 port->fp_ub_tokens = NULL;
1232 }
1233
1234 if (port->fp_els_resp_pkt != NULL) {
1235 fp_free_pkt(port->fp_els_resp_pkt);
1236 port->fp_els_resp_pkt = NULL;
1237 }
1238
1239 bind_callbacks_failed:
1240
1241 if (port->fp_taskq != NULL) {
1242 taskq_destroy(port->fp_taskq);
1243 }
1244
1245 if (port->fp_pwwn_table != NULL) {
1246 kmem_free(port->fp_pwwn_table,
1247 pwwn_table_size * sizeof (struct pwwn_hash));
1248 port->fp_pwwn_table = NULL;
1249 }
1250
1251 if (port->fp_did_table != NULL) {
1252 kmem_free(port->fp_did_table,
1253 did_table_size * sizeof (struct d_id_hash));
1254 port->fp_did_table = NULL;
1255 }
1256
1257 if (port->fp_pkt_cache != NULL) {
1258 kmem_cache_destroy(port->fp_pkt_cache);
1259 port->fp_pkt_cache = NULL;
1260 }
1261
1262 cache_alloc_failed:
1263
1264 cv_destroy(&port->fp_attach_cv);
1265 cv_destroy(&port->fp_cv);
1266 mutex_destroy(&port->fp_mutex);
1267 ddi_remove_minor_node(port->fp_port_dip, NULL);
1268 ddi_soft_state_free(fp_driver_softstate, instance);
1269 ddi_prop_remove_all(dip);
1270
1271 return (DDI_FAILURE);
1272 }
1273
1274
1275 /*
1276 * Handle DDI_RESUME request
1277 */
1278 static int
fp_resume_handler(dev_info_t * dip)1279 fp_resume_handler(dev_info_t *dip)
1280 {
1281 int rval;
1282 fc_local_port_t *port;
1283
1284 port = ddi_get_soft_state(fp_driver_softstate, ddi_get_instance(dip));
1285
1286 ASSERT(port != NULL);
1287
1288 #ifdef DEBUG
1289 mutex_enter(&port->fp_mutex);
1290 ASSERT(port->fp_soft_state & FP_SOFT_SUSPEND);
1291 mutex_exit(&port->fp_mutex);
1292 #endif
1293
1294 /*
1295 * If the port was power suspended, raise the power level
1296 */
1297 mutex_enter(&port->fp_mutex);
1298 if ((port->fp_soft_state & FP_SOFT_POWER_DOWN) &&
1299 (!(port->fp_soft_state & FP_SOFT_NO_PMCOMP))) {
1300 ASSERT(port->fp_pm_level == FP_PM_PORT_DOWN);
1301
1302 mutex_exit(&port->fp_mutex);
1303 if (pm_raise_power(dip, FP_PM_COMPONENT,
1304 FP_PM_PORT_UP) != DDI_SUCCESS) {
1305 FP_TRACE(FP_NHEAD2(9, 0),
1306 "Failed to raise the power level");
1307 return (DDI_FAILURE);
1308 }
1309 mutex_enter(&port->fp_mutex);
1310 }
1311 port->fp_soft_state &= ~FP_SOFT_SUSPEND;
1312 mutex_exit(&port->fp_mutex);
1313
1314 /*
1315 * All the discovery is initiated and handled by per-port thread.
1316 * Further all the discovery is done in handled in callback mode
1317 * (not polled mode); In a specific case such as this, the discovery
1318 * is required to happen in polled mode. The easiest way out is
1319 * to bail out port thread and get started. Come back and fix this
1320 * to do on demand discovery initiated by ULPs. ULPs such as FCP
1321 * will do on-demand discovery during pre-power-up busctl handling
1322 * which will only be possible when SCSA provides a new HBA vector
1323 * for sending down the PM busctl requests.
1324 */
1325 (void) callb_generic_cpr(&port->fp_cpr_info, CB_CODE_CPR_RESUME);
1326
1327 rval = fp_resume_all(port, FC_CMD_RESUME);
1328 if (rval != DDI_SUCCESS) {
1329 mutex_enter(&port->fp_mutex);
1330 port->fp_soft_state |= FP_SOFT_SUSPEND;
1331 mutex_exit(&port->fp_mutex);
1332 (void) callb_generic_cpr(&port->fp_cpr_info,
1333 CB_CODE_CPR_CHKPT);
1334 }
1335
1336 return (rval);
1337 }
1338
1339 /*
1340 * Perform FC Port power on initialization
1341 */
1342 static int
fp_power_up(fc_local_port_t * port)1343 fp_power_up(fc_local_port_t *port)
1344 {
1345 int rval;
1346
1347 ASSERT(MUTEX_HELD(&port->fp_mutex));
1348
1349 ASSERT((port->fp_soft_state & FP_SOFT_SUSPEND) == 0);
1350 ASSERT(port->fp_soft_state & FP_SOFT_POWER_DOWN);
1351
1352 port->fp_soft_state &= ~FP_SOFT_POWER_DOWN;
1353
1354 mutex_exit(&port->fp_mutex);
1355
1356 rval = fp_resume_all(port, FC_CMD_POWER_UP);
1357 if (rval != DDI_SUCCESS) {
1358 mutex_enter(&port->fp_mutex);
1359 port->fp_soft_state |= FP_SOFT_POWER_DOWN;
1360 } else {
1361 mutex_enter(&port->fp_mutex);
1362 }
1363
1364 return (rval);
1365 }
1366
1367
1368 /*
1369 * It is important to note that the power may possibly be removed between
1370 * SUSPEND and the ensuing RESUME operation. In such a context the underlying
1371 * FC port hardware would have gone through an OFFLINE to ONLINE transition
1372 * (hardware state). In this case, the port driver may need to rediscover the
1373 * topology, perform LOGINs, register with the name server again and perform
1374 * any such port initialization procedures. To perform LOGINs, the driver could
1375 * use the port device handle to see if a LOGIN needs to be performed and use
1376 * the D_ID and WWN in it. The LOGINs may fail (if the hardware is reconfigured
1377 * or removed) which will be reflected in the map the ULPs will see.
1378 */
1379 static int
fp_resume_all(fc_local_port_t * port,fc_attach_cmd_t cmd)1380 fp_resume_all(fc_local_port_t *port, fc_attach_cmd_t cmd)
1381 {
1382
1383 ASSERT(!MUTEX_HELD(&port->fp_mutex));
1384
1385 if (fp_bind_callbacks(port) != DDI_SUCCESS) {
1386 return (DDI_FAILURE);
1387 }
1388
1389 mutex_enter(&port->fp_mutex);
1390
1391 /*
1392 * If there are commands queued for delayed retry, instead of
1393 * working the hard way to figure out which ones are good for
1394 * restart and which ones not (ELSs are definitely not good
1395 * as the port will have to go through a new spin of rediscovery
1396 * now), so just flush them out.
1397 */
1398 if (port->fp_restore & FP_RESTORE_WAIT_TIMEOUT) {
1399 fp_cmd_t *cmd;
1400
1401 port->fp_restore &= ~FP_RESTORE_WAIT_TIMEOUT;
1402
1403 mutex_exit(&port->fp_mutex);
1404 while ((cmd = fp_deque_cmd(port)) != NULL) {
1405 cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_ERROR;
1406 fp_iodone(cmd);
1407 }
1408 mutex_enter(&port->fp_mutex);
1409 }
1410
1411 if (FC_PORT_STATE_MASK(port->fp_bind_state) == FC_STATE_OFFLINE) {
1412 if ((port->fp_restore & FP_RESTORE_OFFLINE_TIMEOUT) ||
1413 port->fp_dev_count) {
1414 port->fp_restore &= ~FP_RESTORE_OFFLINE_TIMEOUT;
1415 port->fp_offline_tid = timeout(fp_offline_timeout,
1416 (caddr_t)port, fp_offline_ticks);
1417 }
1418 if (port->fp_job_head) {
1419 cv_signal(&port->fp_cv);
1420 }
1421 mutex_exit(&port->fp_mutex);
1422 fctl_attach_ulps(port, cmd, &modlinkage);
1423 } else {
1424 struct job_request *job;
1425
1426 /*
1427 * If an OFFLINE timer was running at the time of
1428 * suspending, there is no need to restart it as
1429 * the port is ONLINE now.
1430 */
1431 port->fp_restore &= ~FP_RESTORE_OFFLINE_TIMEOUT;
1432 if (port->fp_statec_busy == 0) {
1433 port->fp_soft_state |= FP_SOFT_IN_STATEC_CB;
1434 }
1435 port->fp_statec_busy++;
1436 mutex_exit(&port->fp_mutex);
1437
1438 job = fctl_alloc_job(JOB_PORT_ONLINE,
1439 JOB_CANCEL_ULP_NOTIFICATION, NULL, NULL, KM_SLEEP);
1440 fctl_enque_job(port, job);
1441
1442 fctl_jobwait(job);
1443 fctl_remove_oldies(port);
1444
1445 fctl_attach_ulps(port, cmd, &modlinkage);
1446 fctl_dealloc_job(job);
1447 }
1448
1449 return (DDI_SUCCESS);
1450 }
1451
1452
1453 /*
1454 * At this time, there shouldn't be any I/O requests on this port.
1455 * But the unsolicited callbacks from the underlying FCA port need
1456 * to be handled very carefully. The steps followed to handle the
1457 * DDI_DETACH are:
1458 * + Grab the port driver mutex, check if the unsolicited
1459 * callback is currently under processing. If true, fail
1460 * the DDI_DETACH request by printing a message; If false
1461 * mark the DDI_DETACH as under progress, so that any
1462 * further unsolicited callbacks get bounced.
1463 * + Perform PRLO/LOGO if necessary, cleanup all the data
1464 * structures.
1465 * + Get the job_handler thread to gracefully exit.
1466 * + Unregister callbacks with the FCA port.
1467 * + Now that some peace is found, notify all the ULPs of
1468 * DDI_DETACH request (using ulp_port_detach entry point)
1469 * + Free all mutexes, semaphores, conditional variables.
1470 * + Free the soft state, return success.
1471 *
1472 * Important considerations:
1473 * Port driver de-registers state change and unsolicited
1474 * callbacks before taking up the task of notifying ULPs
1475 * and performing PRLO and LOGOs.
1476 *
1477 * A port may go offline at the time PRLO/LOGO is being
1478 * requested. It is expected of all FCA drivers to fail
1479 * such requests either immediately with a FC_OFFLINE
1480 * return code to fc_fca_transport() or return the packet
1481 * asynchronously with pkt state set to FC_PKT_PORT_OFFLINE
1482 */
1483 static int
fp_detach_handler(fc_local_port_t * port)1484 fp_detach_handler(fc_local_port_t *port)
1485 {
1486 job_request_t *job;
1487 uint32_t delay_count;
1488 fc_orphan_t *orp, *tmporp;
1489
1490 /*
1491 * In a Fabric topology with many host ports connected to
1492 * a switch, another detaching instance of fp might have
1493 * triggered a LOGO (which is an unsolicited request to
1494 * this instance). So in order to be able to successfully
1495 * detach by taking care of such cases a delay of about
1496 * 30 seconds is introduced.
1497 */
1498 delay_count = 0;
1499 mutex_enter(&port->fp_mutex);
1500 if (port->fp_out_fpcmds != 0) {
1501 /*
1502 * At this time we can only check fp internal commands, because
1503 * sd/ssd/scsi_vhci should have finsihed all their commands,
1504 * fcp/fcip/fcsm should have finished all their commands.
1505 *
1506 * It seems that all fp internal commands are asynchronous now.
1507 */
1508 port->fp_soft_state &= ~FP_DETACH_INPROGRESS;
1509 mutex_exit(&port->fp_mutex);
1510
1511 cmn_err(CE_WARN, "fp(%d): %d fp_cmd(s) is/are in progress"
1512 " Failing detach", port->fp_instance, port->fp_out_fpcmds);
1513 return (DDI_FAILURE);
1514 }
1515
1516 while ((port->fp_soft_state &
1517 (FP_SOFT_IN_STATEC_CB | FP_SOFT_IN_UNSOL_CB)) &&
1518 (delay_count < 30)) {
1519 mutex_exit(&port->fp_mutex);
1520 delay_count++;
1521 delay(drv_usectohz(1000000));
1522 mutex_enter(&port->fp_mutex);
1523 }
1524
1525 if (port->fp_soft_state &
1526 (FP_SOFT_IN_STATEC_CB | FP_SOFT_IN_UNSOL_CB)) {
1527 port->fp_soft_state &= ~FP_DETACH_INPROGRESS;
1528 mutex_exit(&port->fp_mutex);
1529
1530 cmn_err(CE_WARN, "fp(%d): FCA callback in progress: "
1531 " Failing detach", port->fp_instance);
1532 return (DDI_FAILURE);
1533 }
1534
1535 port->fp_soft_state |= FP_SOFT_IN_DETACH;
1536 port->fp_soft_state &= ~FP_DETACH_INPROGRESS;
1537 mutex_exit(&port->fp_mutex);
1538
1539 /*
1540 * If we're powered down, we need to raise power prior to submitting
1541 * the JOB_PORT_SHUTDOWN job. Otherwise, the job handler will never
1542 * process the shutdown job.
1543 */
1544 if (fctl_busy_port(port) != 0) {
1545 cmn_err(CE_WARN, "fp(%d): fctl_busy_port failed",
1546 port->fp_instance);
1547 mutex_enter(&port->fp_mutex);
1548 port->fp_soft_state &= ~FP_SOFT_IN_DETACH;
1549 mutex_exit(&port->fp_mutex);
1550 return (DDI_FAILURE);
1551 }
1552
1553 /*
1554 * This will deallocate data structs and cause the "job" thread
1555 * to exit, in preparation for DDI_DETACH on the instance.
1556 * This can sleep for an arbitrary duration, since it waits for
1557 * commands over the wire, timeout(9F) callbacks, etc.
1558 *
1559 * CAUTION: There is still a race here, where the "job" thread
1560 * can still be executing code even tho the fctl_jobwait() call
1561 * below has returned to us. In theory the fp driver could even be
1562 * modunloaded even tho the job thread isn't done executing.
1563 * without creating the race condition.
1564 */
1565 job = fctl_alloc_job(JOB_PORT_SHUTDOWN, 0, NULL,
1566 (opaque_t)port, KM_SLEEP);
1567 fctl_enque_job(port, job);
1568 fctl_jobwait(job);
1569 fctl_dealloc_job(job);
1570
1571
1572 (void) pm_lower_power(port->fp_port_dip, FP_PM_COMPONENT,
1573 FP_PM_PORT_DOWN);
1574
1575 if (port->fp_taskq) {
1576 taskq_destroy(port->fp_taskq);
1577 }
1578
1579 ddi_prop_remove_all(port->fp_port_dip);
1580
1581 ddi_remove_minor_node(port->fp_port_dip, NULL);
1582
1583 fctl_remove_port(port);
1584
1585 fp_free_pkt(port->fp_els_resp_pkt);
1586
1587 if (port->fp_ub_tokens) {
1588 if (fc_ulp_ubfree(port, port->fp_ub_count,
1589 port->fp_ub_tokens) != FC_SUCCESS) {
1590 cmn_err(CE_WARN, "fp(%d): couldn't free "
1591 " unsolicited buffers", port->fp_instance);
1592 }
1593 kmem_free(port->fp_ub_tokens,
1594 sizeof (*port->fp_ub_tokens) * port->fp_ub_count);
1595 port->fp_ub_tokens = NULL;
1596 }
1597
1598 if (port->fp_pkt_cache != NULL) {
1599 kmem_cache_destroy(port->fp_pkt_cache);
1600 }
1601
1602 port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle);
1603
1604 mutex_enter(&port->fp_mutex);
1605 if (port->fp_did_table) {
1606 kmem_free(port->fp_did_table, did_table_size *
1607 sizeof (struct d_id_hash));
1608 }
1609
1610 if (port->fp_pwwn_table) {
1611 kmem_free(port->fp_pwwn_table, pwwn_table_size *
1612 sizeof (struct pwwn_hash));
1613 }
1614 orp = port->fp_orphan_list;
1615 while (orp) {
1616 tmporp = orp;
1617 orp = orp->orp_next;
1618 kmem_free(tmporp, sizeof (*orp));
1619 }
1620
1621 mutex_exit(&port->fp_mutex);
1622
1623 fp_log_port_event(port, ESC_SUNFC_PORT_DETACH);
1624
1625 mutex_destroy(&port->fp_mutex);
1626 cv_destroy(&port->fp_attach_cv);
1627 cv_destroy(&port->fp_cv);
1628 ddi_soft_state_free(fp_driver_softstate, port->fp_instance);
1629
1630 return (DDI_SUCCESS);
1631 }
1632
1633
1634 /*
1635 * Steps to perform DDI_SUSPEND operation on a FC port
1636 *
1637 * - If already suspended return DDI_FAILURE
1638 * - If already power-suspended return DDI_SUCCESS
1639 * - If an unsolicited callback or state change handling is in
1640 * in progress, throw a warning message, return DDI_FAILURE
1641 * - Cancel timeouts
1642 * - SUSPEND the job_handler thread (means do nothing as it is
1643 * taken care of by the CPR frame work)
1644 */
1645 static int
fp_suspend_handler(fc_local_port_t * port)1646 fp_suspend_handler(fc_local_port_t *port)
1647 {
1648 uint32_t delay_count;
1649
1650 mutex_enter(&port->fp_mutex);
1651
1652 /*
1653 * The following should never happen, but
1654 * let the driver be more defensive here
1655 */
1656 if (port->fp_soft_state & FP_SOFT_SUSPEND) {
1657 mutex_exit(&port->fp_mutex);
1658 return (DDI_FAILURE);
1659 }
1660
1661 /*
1662 * If the port is already power suspended, there
1663 * is nothing else to do, So return DDI_SUCCESS,
1664 * but mark the SUSPEND bit in the soft state
1665 * before leaving.
1666 */
1667 if (port->fp_soft_state & FP_SOFT_POWER_DOWN) {
1668 port->fp_soft_state |= FP_SOFT_SUSPEND;
1669 mutex_exit(&port->fp_mutex);
1670 return (DDI_SUCCESS);
1671 }
1672
1673 /*
1674 * Check if an unsolicited callback or state change handling is
1675 * in progress. If true, fail the suspend operation; also throw
1676 * a warning message notifying the failure. Note that Sun PCI
1677 * hotplug spec recommends messages in cases of failure (but
1678 * not flooding the console)
1679 *
1680 * Busy waiting for a short interval (500 millisecond ?) to see
1681 * if the callback processing completes may be another idea. Since
1682 * most of the callback processing involves a lot of work, it
1683 * is safe to just fail the SUSPEND operation. It is definitely
1684 * not bad to fail the SUSPEND operation if the driver is busy.
1685 */
1686 delay_count = 0;
1687 while ((port->fp_soft_state & (FP_SOFT_IN_STATEC_CB |
1688 FP_SOFT_IN_UNSOL_CB)) && (delay_count < 30)) {
1689 mutex_exit(&port->fp_mutex);
1690 delay_count++;
1691 delay(drv_usectohz(1000000));
1692 mutex_enter(&port->fp_mutex);
1693 }
1694
1695 if (port->fp_soft_state & (FP_SOFT_IN_STATEC_CB |
1696 FP_SOFT_IN_UNSOL_CB)) {
1697 mutex_exit(&port->fp_mutex);
1698 cmn_err(CE_WARN, "fp(%d): FCA callback in progress: "
1699 " Failing suspend", port->fp_instance);
1700 return (DDI_FAILURE);
1701 }
1702
1703 /*
1704 * Check of FC port thread is busy
1705 */
1706 if (port->fp_job_head) {
1707 mutex_exit(&port->fp_mutex);
1708 FP_TRACE(FP_NHEAD2(9, 0),
1709 "FC port thread is busy: Failing suspend");
1710 return (DDI_FAILURE);
1711 }
1712 port->fp_soft_state |= FP_SOFT_SUSPEND;
1713
1714 fp_suspend_all(port);
1715 mutex_exit(&port->fp_mutex);
1716
1717 return (DDI_SUCCESS);
1718 }
1719
1720
1721 /*
1722 * Prepare for graceful power down of a FC port
1723 */
1724 static int
fp_power_down(fc_local_port_t * port)1725 fp_power_down(fc_local_port_t *port)
1726 {
1727 ASSERT(MUTEX_HELD(&port->fp_mutex));
1728
1729 /*
1730 * Power down request followed by a DDI_SUSPEND should
1731 * never happen; If it does return DDI_SUCCESS
1732 */
1733 if (port->fp_soft_state & FP_SOFT_SUSPEND) {
1734 port->fp_soft_state |= FP_SOFT_POWER_DOWN;
1735 return (DDI_SUCCESS);
1736 }
1737
1738 /*
1739 * If the port is already power suspended, there
1740 * is nothing else to do, So return DDI_SUCCESS,
1741 */
1742 if (port->fp_soft_state & FP_SOFT_POWER_DOWN) {
1743 return (DDI_SUCCESS);
1744 }
1745
1746 /*
1747 * Check if an unsolicited callback or state change handling
1748 * is in progress. If true, fail the PM suspend operation.
1749 * But don't print a message unless the verbosity of the
1750 * driver desires otherwise.
1751 */
1752 if ((port->fp_soft_state & FP_SOFT_IN_STATEC_CB) ||
1753 (port->fp_soft_state & FP_SOFT_IN_UNSOL_CB)) {
1754 FP_TRACE(FP_NHEAD2(9, 0),
1755 "Unsolicited callback in progress: Failing power down");
1756 return (DDI_FAILURE);
1757 }
1758
1759 /*
1760 * Check of FC port thread is busy
1761 */
1762 if (port->fp_job_head) {
1763 FP_TRACE(FP_NHEAD2(9, 0),
1764 "FC port thread is busy: Failing power down");
1765 return (DDI_FAILURE);
1766 }
1767 port->fp_soft_state |= FP_SOFT_POWER_DOWN;
1768
1769 /*
1770 * check if the ULPs are ready for power down
1771 */
1772 mutex_exit(&port->fp_mutex);
1773 if (fctl_detach_ulps(port, FC_CMD_POWER_DOWN,
1774 &modlinkage) != FC_SUCCESS) {
1775 mutex_enter(&port->fp_mutex);
1776 port->fp_soft_state &= ~FP_SOFT_POWER_DOWN;
1777 mutex_exit(&port->fp_mutex);
1778
1779 /*
1780 * Power back up the obedient ULPs that went down
1781 */
1782 fp_attach_ulps(port, FC_CMD_POWER_UP);
1783
1784 FP_TRACE(FP_NHEAD2(9, 0),
1785 "ULP(s) busy, detach_ulps failed. Failing power down");
1786 mutex_enter(&port->fp_mutex);
1787 return (DDI_FAILURE);
1788 }
1789 mutex_enter(&port->fp_mutex);
1790
1791 fp_suspend_all(port);
1792
1793 return (DDI_SUCCESS);
1794 }
1795
1796
1797 /*
1798 * Suspend the entire FC port
1799 */
1800 static void
fp_suspend_all(fc_local_port_t * port)1801 fp_suspend_all(fc_local_port_t *port)
1802 {
1803 int index;
1804 struct pwwn_hash *head;
1805 fc_remote_port_t *pd;
1806
1807 ASSERT(MUTEX_HELD(&port->fp_mutex));
1808
1809 if (port->fp_wait_tid != 0) {
1810 timeout_id_t tid;
1811
1812 tid = port->fp_wait_tid;
1813 port->fp_wait_tid = (timeout_id_t)NULL;
1814 mutex_exit(&port->fp_mutex);
1815 (void) untimeout(tid);
1816 mutex_enter(&port->fp_mutex);
1817 port->fp_restore |= FP_RESTORE_WAIT_TIMEOUT;
1818 }
1819
1820 if (port->fp_offline_tid) {
1821 timeout_id_t tid;
1822
1823 tid = port->fp_offline_tid;
1824 port->fp_offline_tid = (timeout_id_t)NULL;
1825 mutex_exit(&port->fp_mutex);
1826 (void) untimeout(tid);
1827 mutex_enter(&port->fp_mutex);
1828 port->fp_restore |= FP_RESTORE_OFFLINE_TIMEOUT;
1829 }
1830 mutex_exit(&port->fp_mutex);
1831 port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle);
1832 mutex_enter(&port->fp_mutex);
1833
1834 /*
1835 * Mark all devices as OLD, and reset the LOGIN state as well
1836 * (this will force the ULPs to perform a LOGIN after calling
1837 * fc_portgetmap() during RESUME/PM_RESUME)
1838 */
1839 for (index = 0; index < pwwn_table_size; index++) {
1840 head = &port->fp_pwwn_table[index];
1841 pd = head->pwwn_head;
1842 while (pd != NULL) {
1843 mutex_enter(&pd->pd_mutex);
1844 fp_remote_port_offline(pd);
1845 fctl_delist_did_table(port, pd);
1846 pd->pd_state = PORT_DEVICE_VALID;
1847 pd->pd_login_count = 0;
1848 mutex_exit(&pd->pd_mutex);
1849 pd = pd->pd_wwn_hnext;
1850 }
1851 }
1852 }
1853
1854
1855 /*
1856 * fp_cache_constructor: Constructor function for kmem_cache_create(9F).
1857 * Performs intializations for fc_packet_t structs.
1858 * Returns 0 for success or -1 for failure.
1859 *
1860 * This function allocates DMA handles for both command and responses.
1861 * Most of the ELSs used have both command and responses so it is strongly
1862 * desired to move them to cache constructor routine.
1863 *
1864 * Context: Can sleep iff called with KM_SLEEP flag.
1865 */
1866 static int
fp_cache_constructor(void * buf,void * cdarg,int kmflags)1867 fp_cache_constructor(void *buf, void *cdarg, int kmflags)
1868 {
1869 int (*cb) (caddr_t);
1870 fc_packet_t *pkt;
1871 fp_cmd_t *cmd = (fp_cmd_t *)buf;
1872 fc_local_port_t *port = (fc_local_port_t *)cdarg;
1873
1874 cb = (kmflags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
1875
1876 cmd->cmd_next = NULL;
1877 cmd->cmd_flags = 0;
1878 cmd->cmd_dflags = 0;
1879 cmd->cmd_job = NULL;
1880 cmd->cmd_port = port;
1881 pkt = &cmd->cmd_pkt;
1882
1883 if (!(port->fp_soft_state & FP_SOFT_FCA_IS_NODMA)) {
1884 if (ddi_dma_alloc_handle(port->fp_fca_dip,
1885 port->fp_fca_tran->fca_dma_attr, cb, NULL,
1886 &pkt->pkt_cmd_dma) != DDI_SUCCESS) {
1887 return (-1);
1888 }
1889
1890 if (ddi_dma_alloc_handle(port->fp_fca_dip,
1891 port->fp_fca_tran->fca_dma_attr, cb, NULL,
1892 &pkt->pkt_resp_dma) != DDI_SUCCESS) {
1893 ddi_dma_free_handle(&pkt->pkt_cmd_dma);
1894 return (-1);
1895 }
1896 } else {
1897 pkt->pkt_cmd_dma = 0;
1898 pkt->pkt_resp_dma = 0;
1899 }
1900
1901 pkt->pkt_cmd_acc = pkt->pkt_resp_acc = NULL;
1902 pkt->pkt_cmd_cookie_cnt = pkt->pkt_resp_cookie_cnt =
1903 pkt->pkt_data_cookie_cnt = 0;
1904 pkt->pkt_cmd_cookie = pkt->pkt_resp_cookie =
1905 pkt->pkt_data_cookie = NULL;
1906 pkt->pkt_fca_private = (caddr_t)buf + sizeof (fp_cmd_t);
1907
1908 return (0);
1909 }
1910
1911
1912 /*
1913 * fp_cache_destructor: Destructor function for kmem_cache_create().
1914 * Performs un-intializations for fc_packet_t structs.
1915 */
1916 /* ARGSUSED */
1917 static void
fp_cache_destructor(void * buf,void * cdarg)1918 fp_cache_destructor(void *buf, void *cdarg)
1919 {
1920 fp_cmd_t *cmd = (fp_cmd_t *)buf;
1921 fc_packet_t *pkt;
1922
1923 pkt = &cmd->cmd_pkt;
1924 if (pkt->pkt_cmd_dma) {
1925 ddi_dma_free_handle(&pkt->pkt_cmd_dma);
1926 }
1927
1928 if (pkt->pkt_resp_dma) {
1929 ddi_dma_free_handle(&pkt->pkt_resp_dma);
1930 }
1931 }
1932
1933
1934 /*
1935 * Packet allocation for ELS and any other port driver commands
1936 *
1937 * Some ELSs like FLOGI and PLOGI are critical for topology and
1938 * device discovery and a system's inability to allocate memory
1939 * or DVMA resources while performing some of these critical ELSs
1940 * cause a lot of problem. While memory allocation failures are
1941 * rare, DVMA resource failures are common as the applications
1942 * are becoming more and more powerful on huge servers. So it
1943 * is desirable to have a framework support to reserve a fragment
1944 * of DVMA. So until this is fixed the correct way, the suffering
1945 * is huge whenever a LIP happens at a time DVMA resources are
1946 * drained out completely - So an attempt needs to be made to
1947 * KM_SLEEP while requesting for these resources, hoping that
1948 * the requests won't hang forever.
1949 *
1950 * The fc_remote_port_t argument is stored into the pkt_pd field in the
1951 * fc_packet_t struct prior to the fc_ulp_init_packet() call. This
1952 * ensures that the pd_ref_count for the fc_remote_port_t is valid.
1953 * If there is no fc_remote_port_t associated with the fc_packet_t, then
1954 * fp_alloc_pkt() must be called with pd set to NULL.
1955 *
1956 * fp/fctl will resue fp_cmd_t somewhere, and change pkt_cmdlen/rsplen,
1957 * actually, it's a design fault. But there's no problem for physical
1958 * FCAs. But it will cause memory leak or panic for virtual FCAs like fcoei.
1959 *
1960 * For FCAs that don't support DMA, such as fcoei, we will use
1961 * pkt_fctl_rsvd1/rsvd2 to keep the real cmd_len/resp_len.
1962 */
1963
1964 static fp_cmd_t *
fp_alloc_pkt(fc_local_port_t * port,int cmd_len,int resp_len,int kmflags,fc_remote_port_t * pd)1965 fp_alloc_pkt(fc_local_port_t *port, int cmd_len, int resp_len, int kmflags,
1966 fc_remote_port_t *pd)
1967 {
1968 int rval;
1969 ulong_t real_len;
1970 fp_cmd_t *cmd;
1971 fc_packet_t *pkt;
1972 int (*cb) (caddr_t);
1973 ddi_dma_cookie_t pkt_cookie;
1974 ddi_dma_cookie_t *cp;
1975 uint32_t cnt;
1976
1977 ASSERT(!MUTEX_HELD(&port->fp_mutex));
1978
1979 cb = (kmflags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
1980
1981 cmd = (fp_cmd_t *)kmem_cache_alloc(port->fp_pkt_cache, kmflags);
1982 if (cmd == NULL) {
1983 return (cmd);
1984 }
1985
1986 cmd->cmd_ulp_pkt = NULL;
1987 cmd->cmd_flags = 0;
1988 pkt = &cmd->cmd_pkt;
1989 ASSERT(cmd->cmd_dflags == 0);
1990
1991 pkt->pkt_datalen = 0;
1992 pkt->pkt_data = NULL;
1993 pkt->pkt_state = 0;
1994 pkt->pkt_action = 0;
1995 pkt->pkt_reason = 0;
1996 pkt->pkt_expln = 0;
1997 pkt->pkt_cmd = NULL;
1998 pkt->pkt_resp = NULL;
1999 pkt->pkt_fctl_rsvd1 = NULL;
2000 pkt->pkt_fctl_rsvd2 = NULL;
2001
2002 /*
2003 * Init pkt_pd with the given pointer; this must be done _before_
2004 * the call to fc_ulp_init_packet().
2005 */
2006 pkt->pkt_pd = pd;
2007
2008 /* Now call the FCA driver to init its private, per-packet fields */
2009 if (fc_ulp_init_packet((opaque_t)port, pkt, kmflags) != FC_SUCCESS) {
2010 goto alloc_pkt_failed;
2011 }
2012
2013 if (cmd_len && !(port->fp_soft_state & FP_SOFT_FCA_IS_NODMA)) {
2014 ASSERT(pkt->pkt_cmd_dma != NULL);
2015
2016 rval = ddi_dma_mem_alloc(pkt->pkt_cmd_dma, cmd_len,
2017 port->fp_fca_tran->fca_acc_attr, DDI_DMA_CONSISTENT,
2018 cb, NULL, (caddr_t *)&pkt->pkt_cmd, &real_len,
2019 &pkt->pkt_cmd_acc);
2020
2021 if (rval != DDI_SUCCESS) {
2022 goto alloc_pkt_failed;
2023 }
2024 cmd->cmd_dflags |= FP_CMD_VALID_DMA_MEM;
2025
2026 if (real_len < cmd_len) {
2027 goto alloc_pkt_failed;
2028 }
2029
2030 rval = ddi_dma_addr_bind_handle(pkt->pkt_cmd_dma, NULL,
2031 pkt->pkt_cmd, real_len, DDI_DMA_WRITE |
2032 DDI_DMA_CONSISTENT, cb, NULL,
2033 &pkt_cookie, &pkt->pkt_cmd_cookie_cnt);
2034
2035 if (rval != DDI_DMA_MAPPED) {
2036 goto alloc_pkt_failed;
2037 }
2038
2039 cmd->cmd_dflags |= FP_CMD_VALID_DMA_BIND;
2040
2041 if (pkt->pkt_cmd_cookie_cnt >
2042 port->fp_fca_tran->fca_dma_attr->dma_attr_sgllen) {
2043 goto alloc_pkt_failed;
2044 }
2045
2046 ASSERT(pkt->pkt_cmd_cookie_cnt != 0);
2047
2048 cp = pkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc(
2049 pkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie),
2050 KM_NOSLEEP);
2051
2052 if (cp == NULL) {
2053 goto alloc_pkt_failed;
2054 }
2055
2056 *cp = pkt_cookie;
2057 cp++;
2058 for (cnt = 1; cnt < pkt->pkt_cmd_cookie_cnt; cnt++, cp++) {
2059 ddi_dma_nextcookie(pkt->pkt_cmd_dma, &pkt_cookie);
2060 *cp = pkt_cookie;
2061 }
2062 } else if (cmd_len != 0) {
2063 pkt->pkt_cmd = kmem_alloc(cmd_len, KM_SLEEP);
2064 pkt->pkt_fctl_rsvd1 = (opaque_t)(uintptr_t)cmd_len;
2065 }
2066
2067 if (resp_len && !(port->fp_soft_state & FP_SOFT_FCA_IS_NODMA)) {
2068 ASSERT(pkt->pkt_resp_dma != NULL);
2069
2070 rval = ddi_dma_mem_alloc(pkt->pkt_resp_dma, resp_len,
2071 port->fp_fca_tran->fca_acc_attr,
2072 DDI_DMA_CONSISTENT, cb, NULL,
2073 (caddr_t *)&pkt->pkt_resp, &real_len,
2074 &pkt->pkt_resp_acc);
2075
2076 if (rval != DDI_SUCCESS) {
2077 goto alloc_pkt_failed;
2078 }
2079 cmd->cmd_dflags |= FP_RESP_VALID_DMA_MEM;
2080
2081 if (real_len < resp_len) {
2082 goto alloc_pkt_failed;
2083 }
2084
2085 rval = ddi_dma_addr_bind_handle(pkt->pkt_resp_dma, NULL,
2086 pkt->pkt_resp, real_len, DDI_DMA_READ |
2087 DDI_DMA_CONSISTENT, cb, NULL,
2088 &pkt_cookie, &pkt->pkt_resp_cookie_cnt);
2089
2090 if (rval != DDI_DMA_MAPPED) {
2091 goto alloc_pkt_failed;
2092 }
2093
2094 cmd->cmd_dflags |= FP_RESP_VALID_DMA_BIND;
2095
2096 if (pkt->pkt_resp_cookie_cnt >
2097 port->fp_fca_tran->fca_dma_attr->dma_attr_sgllen) {
2098 goto alloc_pkt_failed;
2099 }
2100
2101 ASSERT(pkt->pkt_cmd_cookie_cnt != 0);
2102
2103 cp = pkt->pkt_resp_cookie = (ddi_dma_cookie_t *)kmem_alloc(
2104 pkt->pkt_resp_cookie_cnt * sizeof (pkt_cookie),
2105 KM_NOSLEEP);
2106
2107 if (cp == NULL) {
2108 goto alloc_pkt_failed;
2109 }
2110
2111 *cp = pkt_cookie;
2112 cp++;
2113 for (cnt = 1; cnt < pkt->pkt_resp_cookie_cnt; cnt++, cp++) {
2114 ddi_dma_nextcookie(pkt->pkt_resp_dma, &pkt_cookie);
2115 *cp = pkt_cookie;
2116 }
2117 } else if (resp_len != 0) {
2118 pkt->pkt_resp = kmem_alloc(resp_len, KM_SLEEP);
2119 pkt->pkt_fctl_rsvd2 = (opaque_t)(uintptr_t)resp_len;
2120 }
2121
2122 pkt->pkt_cmdlen = cmd_len;
2123 pkt->pkt_rsplen = resp_len;
2124 pkt->pkt_ulp_private = cmd;
2125
2126 return (cmd);
2127
2128 alloc_pkt_failed:
2129
2130 fp_free_dma(cmd);
2131
2132 if (pkt->pkt_cmd_cookie != NULL) {
2133 kmem_free(pkt->pkt_cmd_cookie,
2134 pkt->pkt_cmd_cookie_cnt * sizeof (ddi_dma_cookie_t));
2135 pkt->pkt_cmd_cookie = NULL;
2136 }
2137
2138 if (pkt->pkt_resp_cookie != NULL) {
2139 kmem_free(pkt->pkt_resp_cookie,
2140 pkt->pkt_resp_cookie_cnt * sizeof (ddi_dma_cookie_t));
2141 pkt->pkt_resp_cookie = NULL;
2142 }
2143
2144 if (port->fp_soft_state & FP_SOFT_FCA_IS_NODMA) {
2145 if (pkt->pkt_cmd) {
2146 kmem_free(pkt->pkt_cmd, cmd_len);
2147 }
2148
2149 if (pkt->pkt_resp) {
2150 kmem_free(pkt->pkt_resp, resp_len);
2151 }
2152 }
2153
2154 kmem_cache_free(port->fp_pkt_cache, cmd);
2155
2156 return (NULL);
2157 }
2158
2159
2160 /*
2161 * Free FC packet
2162 */
2163 static void
fp_free_pkt(fp_cmd_t * cmd)2164 fp_free_pkt(fp_cmd_t *cmd)
2165 {
2166 fc_local_port_t *port;
2167 fc_packet_t *pkt;
2168
2169 ASSERT(!MUTEX_HELD(&cmd->cmd_port->fp_mutex));
2170
2171 cmd->cmd_next = NULL;
2172 cmd->cmd_job = NULL;
2173 pkt = &cmd->cmd_pkt;
2174 pkt->pkt_ulp_private = 0;
2175 pkt->pkt_tran_flags = 0;
2176 pkt->pkt_tran_type = 0;
2177 port = cmd->cmd_port;
2178
2179 if (pkt->pkt_cmd_cookie != NULL) {
2180 kmem_free(pkt->pkt_cmd_cookie, pkt->pkt_cmd_cookie_cnt *
2181 sizeof (ddi_dma_cookie_t));
2182 pkt->pkt_cmd_cookie = NULL;
2183 }
2184
2185 if (pkt->pkt_resp_cookie != NULL) {
2186 kmem_free(pkt->pkt_resp_cookie, pkt->pkt_resp_cookie_cnt *
2187 sizeof (ddi_dma_cookie_t));
2188 pkt->pkt_resp_cookie = NULL;
2189 }
2190
2191 if (port->fp_soft_state & FP_SOFT_FCA_IS_NODMA) {
2192 if (pkt->pkt_cmd) {
2193 kmem_free(pkt->pkt_cmd,
2194 (uint32_t)(uintptr_t)pkt->pkt_fctl_rsvd1);
2195 }
2196
2197 if (pkt->pkt_resp) {
2198 kmem_free(pkt->pkt_resp,
2199 (uint32_t)(uintptr_t)pkt->pkt_fctl_rsvd2);
2200 }
2201 }
2202
2203 fp_free_dma(cmd);
2204 (void) fc_ulp_uninit_packet((opaque_t)port, pkt);
2205 kmem_cache_free(port->fp_pkt_cache, (void *)cmd);
2206 }
2207
2208
2209 /*
2210 * Release DVMA resources
2211 */
2212 static void
fp_free_dma(fp_cmd_t * cmd)2213 fp_free_dma(fp_cmd_t *cmd)
2214 {
2215 fc_packet_t *pkt = &cmd->cmd_pkt;
2216
2217 pkt->pkt_cmdlen = 0;
2218 pkt->pkt_rsplen = 0;
2219 pkt->pkt_tran_type = 0;
2220 pkt->pkt_tran_flags = 0;
2221
2222 if (cmd->cmd_dflags & FP_CMD_VALID_DMA_BIND) {
2223 (void) ddi_dma_unbind_handle(pkt->pkt_cmd_dma);
2224 }
2225
2226 if (cmd->cmd_dflags & FP_CMD_VALID_DMA_MEM) {
2227 if (pkt->pkt_cmd_acc) {
2228 ddi_dma_mem_free(&pkt->pkt_cmd_acc);
2229 }
2230 }
2231
2232 if (cmd->cmd_dflags & FP_RESP_VALID_DMA_BIND) {
2233 (void) ddi_dma_unbind_handle(pkt->pkt_resp_dma);
2234 }
2235
2236 if (cmd->cmd_dflags & FP_RESP_VALID_DMA_MEM) {
2237 if (pkt->pkt_resp_acc) {
2238 ddi_dma_mem_free(&pkt->pkt_resp_acc);
2239 }
2240 }
2241 cmd->cmd_dflags = 0;
2242 }
2243
2244
2245 /*
2246 * Dedicated thread to perform various activities. One thread for
2247 * each fc_local_port_t (driver soft state) instance.
2248 * Note, this effectively works out to one thread for each local
2249 * port, but there are also some Solaris taskq threads in use on a per-local
2250 * port basis; these also need to be taken into consideration.
2251 */
2252 static void
fp_job_handler(fc_local_port_t * port)2253 fp_job_handler(fc_local_port_t *port)
2254 {
2255 int rval;
2256 uint32_t *d_id;
2257 fc_remote_port_t *pd;
2258 job_request_t *job;
2259
2260 #ifndef __lock_lint
2261 /*
2262 * Solaris-internal stuff for proper operation of kernel threads
2263 * with Solaris CPR.
2264 */
2265 CALLB_CPR_INIT(&port->fp_cpr_info, &port->fp_mutex,
2266 callb_generic_cpr, "fp_job_handler");
2267 #endif
2268
2269
2270 /* Loop forever waiting for work to do */
2271 for (;;) {
2272
2273 mutex_enter(&port->fp_mutex);
2274
2275 /*
2276 * Sleep if no work to do right now, or if we want
2277 * to suspend or power-down.
2278 */
2279 while (port->fp_job_head == NULL ||
2280 (port->fp_soft_state & (FP_SOFT_POWER_DOWN |
2281 FP_SOFT_SUSPEND))) {
2282 CALLB_CPR_SAFE_BEGIN(&port->fp_cpr_info);
2283 cv_wait(&port->fp_cv, &port->fp_mutex);
2284 CALLB_CPR_SAFE_END(&port->fp_cpr_info, &port->fp_mutex);
2285 }
2286
2287 /*
2288 * OK, we've just been woken up, so retrieve the next entry
2289 * from the head of the job queue for this local port.
2290 */
2291 job = fctl_deque_job(port);
2292
2293 /*
2294 * Handle all the fp driver's supported job codes here
2295 * in this big honkin' switch.
2296 */
2297 switch (job->job_code) {
2298 case JOB_PORT_SHUTDOWN:
2299 /*
2300 * fp_port_shutdown() is only called from here. This
2301 * will prepare the local port instance (softstate)
2302 * for detaching. This cancels timeout callbacks,
2303 * executes LOGOs with remote ports, cleans up tables,
2304 * and deallocates data structs.
2305 */
2306 fp_port_shutdown(port, job);
2307
2308 /*
2309 * This will exit the job thread.
2310 */
2311 #ifndef __lock_lint
2312 CALLB_CPR_EXIT(&(port->fp_cpr_info));
2313 #else
2314 mutex_exit(&port->fp_mutex);
2315 #endif
2316 fctl_jobdone(job);
2317 thread_exit();
2318
2319 /* NOTREACHED */
2320
2321 case JOB_ATTACH_ULP: {
2322 /*
2323 * This job is spawned in response to a ULP calling
2324 * fc_ulp_add().
2325 */
2326
2327 boolean_t do_attach_ulps = B_TRUE;
2328
2329 /*
2330 * If fp is detaching, we don't want to call
2331 * fp_startup_done as this asynchronous
2332 * notification may interfere with the re-attach.
2333 */
2334
2335 if (port->fp_soft_state & (FP_DETACH_INPROGRESS |
2336 FP_SOFT_IN_DETACH | FP_DETACH_FAILED)) {
2337 do_attach_ulps = B_FALSE;
2338 } else {
2339 /*
2340 * We are going to force the transport
2341 * to attach to the ULPs, so set
2342 * fp_ulp_attach. This will keep any
2343 * potential detach from occurring until
2344 * we are done.
2345 */
2346 port->fp_ulp_attach = 1;
2347 }
2348
2349 mutex_exit(&port->fp_mutex);
2350
2351 /*
2352 * NOTE: Since we just dropped the mutex, there is now
2353 * a race window where the fp_soft_state check above
2354 * could change here. This race is covered because an
2355 * additional check was added in the functions hidden
2356 * under fp_startup_done().
2357 */
2358 if (do_attach_ulps == B_TRUE) {
2359 /*
2360 * This goes thru a bit of a convoluted call
2361 * chain before spawning off a DDI taskq
2362 * request to perform the actual attach
2363 * operations. Blocking can occur at a number
2364 * of points.
2365 */
2366 fp_startup_done((opaque_t)port, FC_PKT_SUCCESS);
2367 }
2368 job->job_result = FC_SUCCESS;
2369 fctl_jobdone(job);
2370 break;
2371 }
2372
2373 case JOB_ULP_NOTIFY: {
2374 /*
2375 * Pass state change notifications up to any/all
2376 * registered ULPs.
2377 */
2378 uint32_t statec;
2379
2380 statec = job->job_ulp_listlen;
2381 if (statec == FC_STATE_RESET_REQUESTED) {
2382 port->fp_last_task = port->fp_task;
2383 port->fp_task = FP_TASK_OFFLINE;
2384 fp_port_offline(port, 0);
2385 port->fp_task = port->fp_last_task;
2386 port->fp_last_task = FP_TASK_IDLE;
2387 }
2388
2389 if (--port->fp_statec_busy == 0) {
2390 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB;
2391 }
2392
2393 mutex_exit(&port->fp_mutex);
2394
2395 job->job_result = fp_ulp_notify(port, statec, KM_SLEEP);
2396 fctl_jobdone(job);
2397 break;
2398 }
2399
2400 case JOB_PLOGI_ONE:
2401 /*
2402 * Issue a PLOGI to a single remote port. Multiple
2403 * PLOGIs to different remote ports may occur in
2404 * parallel.
2405 * This can create the fc_remote_port_t if it does not
2406 * already exist.
2407 */
2408
2409 mutex_exit(&port->fp_mutex);
2410 d_id = (uint32_t *)job->job_private;
2411 pd = fctl_get_remote_port_by_did(port, *d_id);
2412
2413 if (pd) {
2414 mutex_enter(&pd->pd_mutex);
2415 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) {
2416 pd->pd_login_count++;
2417 mutex_exit(&pd->pd_mutex);
2418 job->job_result = FC_SUCCESS;
2419 fctl_jobdone(job);
2420 break;
2421 }
2422 mutex_exit(&pd->pd_mutex);
2423 } else {
2424 mutex_enter(&port->fp_mutex);
2425 if (FC_IS_TOP_SWITCH(port->fp_topology)) {
2426 mutex_exit(&port->fp_mutex);
2427 pd = fp_create_remote_port_by_ns(port,
2428 *d_id, KM_SLEEP);
2429 if (pd == NULL) {
2430 job->job_result = FC_FAILURE;
2431 fctl_jobdone(job);
2432 break;
2433 }
2434 } else {
2435 mutex_exit(&port->fp_mutex);
2436 }
2437 }
2438
2439 job->job_flags |= JOB_TYPE_FP_ASYNC;
2440 job->job_counter = 1;
2441
2442 rval = fp_port_login(port, *d_id, job,
2443 FP_CMD_PLOGI_RETAIN, KM_SLEEP, pd, NULL);
2444
2445 if (rval != FC_SUCCESS) {
2446 job->job_result = rval;
2447 fctl_jobdone(job);
2448 }
2449 break;
2450
2451 case JOB_LOGO_ONE: {
2452 /*
2453 * Issue a PLOGO to a single remote port. Multiple
2454 * PLOGOs to different remote ports may occur in
2455 * parallel.
2456 */
2457 fc_remote_port_t *pd;
2458
2459 #ifndef __lock_lint
2460 ASSERT(job->job_counter > 0);
2461 #endif
2462
2463 pd = (fc_remote_port_t *)job->job_ulp_pkts;
2464
2465 mutex_enter(&pd->pd_mutex);
2466 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) {
2467 mutex_exit(&pd->pd_mutex);
2468 job->job_result = FC_LOGINREQ;
2469 mutex_exit(&port->fp_mutex);
2470 fctl_jobdone(job);
2471 break;
2472 }
2473 if (pd->pd_login_count > 1) {
2474 pd->pd_login_count--;
2475 mutex_exit(&pd->pd_mutex);
2476 job->job_result = FC_SUCCESS;
2477 mutex_exit(&port->fp_mutex);
2478 fctl_jobdone(job);
2479 break;
2480 }
2481 mutex_exit(&pd->pd_mutex);
2482 mutex_exit(&port->fp_mutex);
2483 job->job_flags |= JOB_TYPE_FP_ASYNC;
2484 (void) fp_logout(port, pd, job);
2485 break;
2486 }
2487
2488 case JOB_FCIO_LOGIN:
2489 /*
2490 * PLOGI initiated at ioctl request.
2491 */
2492 mutex_exit(&port->fp_mutex);
2493 job->job_result =
2494 fp_fcio_login(port, job->job_private, job);
2495 fctl_jobdone(job);
2496 break;
2497
2498 case JOB_FCIO_LOGOUT:
2499 /*
2500 * PLOGO initiated at ioctl request.
2501 */
2502 mutex_exit(&port->fp_mutex);
2503 job->job_result =
2504 fp_fcio_logout(port, job->job_private, job);
2505 fctl_jobdone(job);
2506 break;
2507
2508 case JOB_PORT_GETMAP:
2509 case JOB_PORT_GETMAP_PLOGI_ALL: {
2510 port->fp_last_task = port->fp_task;
2511 port->fp_task = FP_TASK_GETMAP;
2512
2513 switch (port->fp_topology) {
2514 case FC_TOP_PRIVATE_LOOP:
2515 job->job_counter = 1;
2516
2517 fp_get_loopmap(port, job);
2518 mutex_exit(&port->fp_mutex);
2519 fp_jobwait(job);
2520 fctl_fillout_map(port,
2521 (fc_portmap_t **)job->job_private,
2522 (uint32_t *)job->job_arg, 1, 0, 0);
2523 fctl_jobdone(job);
2524 mutex_enter(&port->fp_mutex);
2525 break;
2526
2527 case FC_TOP_PUBLIC_LOOP:
2528 case FC_TOP_FABRIC:
2529 mutex_exit(&port->fp_mutex);
2530 job->job_counter = 1;
2531
2532 job->job_result = fp_ns_getmap(port,
2533 job, (fc_portmap_t **)job->job_private,
2534 (uint32_t *)job->job_arg,
2535 FCTL_GAN_START_ID);
2536 fctl_jobdone(job);
2537 mutex_enter(&port->fp_mutex);
2538 break;
2539
2540 case FC_TOP_PT_PT:
2541 mutex_exit(&port->fp_mutex);
2542 fctl_fillout_map(port,
2543 (fc_portmap_t **)job->job_private,
2544 (uint32_t *)job->job_arg, 1, 0, 0);
2545 fctl_jobdone(job);
2546 mutex_enter(&port->fp_mutex);
2547 break;
2548
2549 default:
2550 mutex_exit(&port->fp_mutex);
2551 fctl_jobdone(job);
2552 mutex_enter(&port->fp_mutex);
2553 break;
2554 }
2555 port->fp_task = port->fp_last_task;
2556 port->fp_last_task = FP_TASK_IDLE;
2557 mutex_exit(&port->fp_mutex);
2558 break;
2559 }
2560
2561 case JOB_PORT_OFFLINE: {
2562 fp_log_port_event(port, ESC_SUNFC_PORT_OFFLINE);
2563
2564 port->fp_last_task = port->fp_task;
2565 port->fp_task = FP_TASK_OFFLINE;
2566
2567 if (port->fp_statec_busy > 2) {
2568 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION;
2569 fp_port_offline(port, 0);
2570 if (--port->fp_statec_busy == 0) {
2571 port->fp_soft_state &=
2572 ~FP_SOFT_IN_STATEC_CB;
2573 }
2574 } else {
2575 fp_port_offline(port, 1);
2576 }
2577
2578 port->fp_task = port->fp_last_task;
2579 port->fp_last_task = FP_TASK_IDLE;
2580
2581 mutex_exit(&port->fp_mutex);
2582
2583 fctl_jobdone(job);
2584 break;
2585 }
2586
2587 case JOB_PORT_STARTUP: {
2588 if ((rval = fp_port_startup(port, job)) != FC_SUCCESS) {
2589 if (port->fp_statec_busy > 1) {
2590 mutex_exit(&port->fp_mutex);
2591 break;
2592 }
2593 mutex_exit(&port->fp_mutex);
2594
2595 FP_TRACE(FP_NHEAD2(9, rval),
2596 "Topology discovery failed");
2597 break;
2598 }
2599
2600 /*
2601 * Attempt building device handles in case
2602 * of private Loop.
2603 */
2604 if (port->fp_topology == FC_TOP_PRIVATE_LOOP) {
2605 job->job_counter = 1;
2606
2607 fp_get_loopmap(port, job);
2608 mutex_exit(&port->fp_mutex);
2609 fp_jobwait(job);
2610 mutex_enter(&port->fp_mutex);
2611 if (port->fp_lilp_map.lilp_magic < MAGIC_LIRP) {
2612 ASSERT(port->fp_total_devices == 0);
2613 port->fp_total_devices =
2614 port->fp_dev_count;
2615 }
2616 } else if (FC_IS_TOP_SWITCH(port->fp_topology)) {
2617 /*
2618 * Hack to avoid state changes going up early
2619 */
2620 port->fp_statec_busy++;
2621 port->fp_soft_state |= FP_SOFT_IN_STATEC_CB;
2622
2623 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION;
2624 fp_fabric_online(port, job);
2625 job->job_flags &= ~JOB_CANCEL_ULP_NOTIFICATION;
2626 }
2627 mutex_exit(&port->fp_mutex);
2628 fctl_jobdone(job);
2629 break;
2630 }
2631
2632 case JOB_PORT_ONLINE: {
2633 char *newtop;
2634 char *oldtop;
2635 uint32_t old_top;
2636
2637 fp_log_port_event(port, ESC_SUNFC_PORT_ONLINE);
2638
2639 /*
2640 * Bail out early if there are a lot of
2641 * state changes in the pipeline
2642 */
2643 if (port->fp_statec_busy > 1) {
2644 --port->fp_statec_busy;
2645 mutex_exit(&port->fp_mutex);
2646 fctl_jobdone(job);
2647 break;
2648 }
2649
2650 switch (old_top = port->fp_topology) {
2651 case FC_TOP_PRIVATE_LOOP:
2652 oldtop = "Private Loop";
2653 break;
2654
2655 case FC_TOP_PUBLIC_LOOP:
2656 oldtop = "Public Loop";
2657 break;
2658
2659 case FC_TOP_PT_PT:
2660 oldtop = "Point to Point";
2661 break;
2662
2663 case FC_TOP_FABRIC:
2664 oldtop = "Fabric";
2665 break;
2666
2667 default:
2668 oldtop = NULL;
2669 break;
2670 }
2671
2672 port->fp_last_task = port->fp_task;
2673 port->fp_task = FP_TASK_ONLINE;
2674
2675 if ((rval = fp_port_startup(port, job)) != FC_SUCCESS) {
2676
2677 port->fp_task = port->fp_last_task;
2678 port->fp_last_task = FP_TASK_IDLE;
2679
2680 if (port->fp_statec_busy > 1) {
2681 --port->fp_statec_busy;
2682 mutex_exit(&port->fp_mutex);
2683 break;
2684 }
2685
2686 port->fp_state = FC_STATE_OFFLINE;
2687
2688 FP_TRACE(FP_NHEAD2(9, rval),
2689 "Topology discovery failed");
2690
2691 if (--port->fp_statec_busy == 0) {
2692 port->fp_soft_state &=
2693 ~FP_SOFT_IN_STATEC_CB;
2694 }
2695
2696 if (port->fp_offline_tid == NULL) {
2697 port->fp_offline_tid =
2698 timeout(fp_offline_timeout,
2699 (caddr_t)port, fp_offline_ticks);
2700 }
2701
2702 mutex_exit(&port->fp_mutex);
2703 break;
2704 }
2705
2706 switch (port->fp_topology) {
2707 case FC_TOP_PRIVATE_LOOP:
2708 newtop = "Private Loop";
2709 break;
2710
2711 case FC_TOP_PUBLIC_LOOP:
2712 newtop = "Public Loop";
2713 break;
2714
2715 case FC_TOP_PT_PT:
2716 newtop = "Point to Point";
2717 break;
2718
2719 case FC_TOP_FABRIC:
2720 newtop = "Fabric";
2721 break;
2722
2723 default:
2724 newtop = NULL;
2725 break;
2726 }
2727
2728 if (oldtop && newtop && strcmp(oldtop, newtop)) {
2729 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
2730 "Change in FC Topology old = %s new = %s",
2731 oldtop, newtop);
2732 }
2733
2734 switch (port->fp_topology) {
2735 case FC_TOP_PRIVATE_LOOP: {
2736 int orphan = (old_top == FC_TOP_FABRIC ||
2737 old_top == FC_TOP_PUBLIC_LOOP) ? 1 : 0;
2738
2739 mutex_exit(&port->fp_mutex);
2740 fp_loop_online(port, job, orphan);
2741 break;
2742 }
2743
2744 case FC_TOP_PUBLIC_LOOP:
2745 /* FALLTHROUGH */
2746 case FC_TOP_FABRIC:
2747 fp_fabric_online(port, job);
2748 mutex_exit(&port->fp_mutex);
2749 break;
2750
2751 case FC_TOP_PT_PT:
2752 fp_p2p_online(port, job);
2753 mutex_exit(&port->fp_mutex);
2754 break;
2755
2756 default:
2757 if (--port->fp_statec_busy != 0) {
2758 /*
2759 * Watch curiously at what the next
2760 * state transition can do.
2761 */
2762 mutex_exit(&port->fp_mutex);
2763 break;
2764 }
2765
2766 FP_TRACE(FP_NHEAD2(9, 0),
2767 "Topology Unknown, Offlining the port..");
2768
2769 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB;
2770 port->fp_state = FC_STATE_OFFLINE;
2771
2772 if (port->fp_offline_tid == NULL) {
2773 port->fp_offline_tid =
2774 timeout(fp_offline_timeout,
2775 (caddr_t)port, fp_offline_ticks);
2776 }
2777 mutex_exit(&port->fp_mutex);
2778 break;
2779 }
2780
2781 mutex_enter(&port->fp_mutex);
2782
2783 port->fp_task = port->fp_last_task;
2784 port->fp_last_task = FP_TASK_IDLE;
2785
2786 mutex_exit(&port->fp_mutex);
2787
2788 fctl_jobdone(job);
2789 break;
2790 }
2791
2792 case JOB_PLOGI_GROUP: {
2793 mutex_exit(&port->fp_mutex);
2794 fp_plogi_group(port, job);
2795 break;
2796 }
2797
2798 case JOB_UNSOL_REQUEST: {
2799 mutex_exit(&port->fp_mutex);
2800 fp_handle_unsol_buf(port,
2801 (fc_unsol_buf_t *)job->job_private, job);
2802 fctl_dealloc_job(job);
2803 break;
2804 }
2805
2806 case JOB_NS_CMD: {
2807 fctl_ns_req_t *ns_cmd;
2808
2809 mutex_exit(&port->fp_mutex);
2810
2811 job->job_flags |= JOB_TYPE_FP_ASYNC;
2812 ns_cmd = (fctl_ns_req_t *)job->job_private;
2813 if (ns_cmd->ns_cmd_code < NS_GA_NXT ||
2814 ns_cmd->ns_cmd_code > NS_DA_ID) {
2815 job->job_result = FC_BADCMD;
2816 fctl_jobdone(job);
2817 break;
2818 }
2819
2820 if (FC_IS_CMD_A_REG(ns_cmd->ns_cmd_code)) {
2821 if (ns_cmd->ns_pd != NULL) {
2822 job->job_result = FC_BADOBJECT;
2823 fctl_jobdone(job);
2824 break;
2825 }
2826
2827 job->job_counter = 1;
2828
2829 rval = fp_ns_reg(port, ns_cmd->ns_pd,
2830 ns_cmd->ns_cmd_code, job, 0, KM_SLEEP);
2831
2832 if (rval != FC_SUCCESS) {
2833 job->job_result = rval;
2834 fctl_jobdone(job);
2835 }
2836 break;
2837 }
2838 job->job_result = FC_SUCCESS;
2839 job->job_counter = 1;
2840
2841 rval = fp_ns_query(port, ns_cmd, job, 0, KM_SLEEP);
2842 if (rval != FC_SUCCESS) {
2843 fctl_jobdone(job);
2844 }
2845 break;
2846 }
2847
2848 case JOB_LINK_RESET: {
2849 la_wwn_t *pwwn;
2850 uint32_t topology;
2851
2852 pwwn = (la_wwn_t *)job->job_private;
2853 ASSERT(pwwn != NULL);
2854
2855 topology = port->fp_topology;
2856 mutex_exit(&port->fp_mutex);
2857
2858 if (fctl_is_wwn_zero(pwwn) == FC_SUCCESS ||
2859 topology == FC_TOP_PRIVATE_LOOP) {
2860 job->job_flags |= JOB_TYPE_FP_ASYNC;
2861 rval = port->fp_fca_tran->fca_reset(
2862 port->fp_fca_handle, FC_FCA_LINK_RESET);
2863 job->job_result = rval;
2864 fp_jobdone(job);
2865 } else {
2866 ASSERT((job->job_flags &
2867 JOB_TYPE_FP_ASYNC) == 0);
2868
2869 if (FC_IS_TOP_SWITCH(topology)) {
2870 rval = fp_remote_lip(port, pwwn,
2871 KM_SLEEP, job);
2872 } else {
2873 rval = FC_FAILURE;
2874 }
2875 if (rval != FC_SUCCESS) {
2876 job->job_result = rval;
2877 }
2878 fctl_jobdone(job);
2879 }
2880 break;
2881 }
2882
2883 default:
2884 mutex_exit(&port->fp_mutex);
2885 job->job_result = FC_BADCMD;
2886 fctl_jobdone(job);
2887 break;
2888 }
2889 }
2890 /* NOTREACHED */
2891 }
2892
2893
2894 /*
2895 * Perform FC port bring up initialization
2896 */
2897 static int
fp_port_startup(fc_local_port_t * port,job_request_t * job)2898 fp_port_startup(fc_local_port_t *port, job_request_t *job)
2899 {
2900 int rval;
2901 uint32_t state;
2902 uint32_t src_id;
2903 fc_lilpmap_t *lilp_map;
2904
2905 ASSERT(MUTEX_HELD(&port->fp_mutex));
2906 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0);
2907
2908 FP_DTRACE(FP_NHEAD1(2, 0), "Entering fp_port_startup;"
2909 " port=%p, job=%p", port, job);
2910
2911 port->fp_topology = FC_TOP_UNKNOWN;
2912 port->fp_port_id.port_id = 0;
2913 state = FC_PORT_STATE_MASK(port->fp_state);
2914
2915 if (state == FC_STATE_OFFLINE) {
2916 port->fp_port_type.port_type = FC_NS_PORT_UNKNOWN;
2917 job->job_result = FC_OFFLINE;
2918 mutex_exit(&port->fp_mutex);
2919 fctl_jobdone(job);
2920 mutex_enter(&port->fp_mutex);
2921 return (FC_OFFLINE);
2922 }
2923
2924 if (state == FC_STATE_LOOP) {
2925 port->fp_port_type.port_type = FC_NS_PORT_NL;
2926 mutex_exit(&port->fp_mutex);
2927
2928 lilp_map = &port->fp_lilp_map;
2929 if ((rval = fp_get_lilpmap(port, lilp_map)) != FC_SUCCESS) {
2930 job->job_result = FC_FAILURE;
2931 fctl_jobdone(job);
2932
2933 FP_TRACE(FP_NHEAD1(9, rval),
2934 "LILP map Invalid or not present");
2935 mutex_enter(&port->fp_mutex);
2936 return (FC_FAILURE);
2937 }
2938
2939 if (lilp_map->lilp_length == 0) {
2940 job->job_result = FC_NO_MAP;
2941 fctl_jobdone(job);
2942 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
2943 "LILP map length zero");
2944 mutex_enter(&port->fp_mutex);
2945 return (FC_NO_MAP);
2946 }
2947 src_id = lilp_map->lilp_myalpa & 0xFF;
2948 } else {
2949 fc_remote_port_t *pd;
2950 fc_fca_pm_t pm;
2951 fc_fca_p2p_info_t p2p_info;
2952 int pd_recepient;
2953
2954 /*
2955 * Get P2P remote port info if possible
2956 */
2957 bzero((caddr_t)&pm, sizeof (pm));
2958
2959 pm.pm_cmd_flags = FC_FCA_PM_READ;
2960 pm.pm_cmd_code = FC_PORT_GET_P2P_INFO;
2961 pm.pm_data_len = sizeof (fc_fca_p2p_info_t);
2962 pm.pm_data_buf = (caddr_t)&p2p_info;
2963
2964 rval = port->fp_fca_tran->fca_port_manage(
2965 port->fp_fca_handle, &pm);
2966
2967 if (rval == FC_SUCCESS) {
2968 port->fp_port_id.port_id = p2p_info.fca_d_id;
2969 port->fp_port_type.port_type = FC_NS_PORT_N;
2970 port->fp_topology = FC_TOP_PT_PT;
2971 port->fp_total_devices = 1;
2972 pd_recepient = fctl_wwn_cmp(
2973 &port->fp_service_params.nport_ww_name,
2974 &p2p_info.pwwn) < 0 ?
2975 PD_PLOGI_RECEPIENT : PD_PLOGI_INITIATOR;
2976 mutex_exit(&port->fp_mutex);
2977 pd = fctl_create_remote_port(port,
2978 &p2p_info.nwwn,
2979 &p2p_info.pwwn,
2980 p2p_info.d_id,
2981 pd_recepient, KM_NOSLEEP);
2982 FP_DTRACE(FP_NHEAD1(2, 0), "Exiting fp_port_startup;"
2983 " P2P port=%p pd=%p fp %x pd %x", port, pd,
2984 port->fp_port_id.port_id, p2p_info.d_id);
2985 mutex_enter(&port->fp_mutex);
2986 return (FC_SUCCESS);
2987 }
2988 port->fp_port_type.port_type = FC_NS_PORT_N;
2989 mutex_exit(&port->fp_mutex);
2990 src_id = 0;
2991 }
2992
2993 job->job_counter = 1;
2994 job->job_result = FC_SUCCESS;
2995
2996 if ((rval = fp_fabric_login(port, src_id, job, FP_CMD_PLOGI_DONT_CARE,
2997 KM_SLEEP)) != FC_SUCCESS) {
2998 port->fp_port_type.port_type = FC_NS_PORT_UNKNOWN;
2999 job->job_result = FC_FAILURE;
3000 fctl_jobdone(job);
3001
3002 mutex_enter(&port->fp_mutex);
3003 if (port->fp_statec_busy <= 1) {
3004 mutex_exit(&port->fp_mutex);
3005 fp_printf(port, CE_NOTE, FP_LOG_ONLY, rval, NULL,
3006 "Couldn't transport FLOGI");
3007 mutex_enter(&port->fp_mutex);
3008 }
3009 return (FC_FAILURE);
3010 }
3011
3012 fp_jobwait(job);
3013
3014 mutex_enter(&port->fp_mutex);
3015 if (job->job_result == FC_SUCCESS) {
3016 if (FC_IS_TOP_SWITCH(port->fp_topology)) {
3017 mutex_exit(&port->fp_mutex);
3018 fp_ns_init(port, job, KM_SLEEP);
3019 mutex_enter(&port->fp_mutex);
3020 }
3021 } else {
3022 if (state == FC_STATE_LOOP) {
3023 port->fp_topology = FC_TOP_PRIVATE_LOOP;
3024 port->fp_port_id.port_id =
3025 port->fp_lilp_map.lilp_myalpa & 0xFF;
3026 }
3027 }
3028
3029 FP_DTRACE(FP_NHEAD1(2, 0), "Exiting fp_port_startup; port=%p, job=%p",
3030 port, job);
3031
3032 return (FC_SUCCESS);
3033 }
3034
3035
3036 /*
3037 * Perform ULP invocations following FC port startup
3038 */
3039 /* ARGSUSED */
3040 static void
fp_startup_done(opaque_t arg,uchar_t result)3041 fp_startup_done(opaque_t arg, uchar_t result)
3042 {
3043 fc_local_port_t *port = arg;
3044
3045 fp_attach_ulps(port, FC_CMD_ATTACH);
3046
3047 FP_DTRACE(FP_NHEAD1(2, 0), "fp_startup almost complete; port=%p", port);
3048 }
3049
3050
3051 /*
3052 * Perform ULP port attach
3053 */
3054 static void
fp_ulp_port_attach(void * arg)3055 fp_ulp_port_attach(void *arg)
3056 {
3057 fp_soft_attach_t *att = (fp_soft_attach_t *)arg;
3058 fc_local_port_t *port = att->att_port;
3059
3060 FP_DTRACE(FP_NHEAD1(1, 0), "port attach of"
3061 " ULPs begin; port=%p, cmd=%x", port, att->att_cmd);
3062
3063 fctl_attach_ulps(att->att_port, att->att_cmd, &modlinkage);
3064
3065 if (att->att_need_pm_idle == B_TRUE) {
3066 fctl_idle_port(port);
3067 }
3068
3069 FP_DTRACE(FP_NHEAD1(1, 0), "port attach of"
3070 " ULPs end; port=%p, cmd=%x", port, att->att_cmd);
3071
3072 mutex_enter(&att->att_port->fp_mutex);
3073 att->att_port->fp_ulp_attach = 0;
3074
3075 port->fp_task = port->fp_last_task;
3076 port->fp_last_task = FP_TASK_IDLE;
3077
3078 cv_signal(&att->att_port->fp_attach_cv);
3079
3080 mutex_exit(&att->att_port->fp_mutex);
3081
3082 kmem_free(att, sizeof (fp_soft_attach_t));
3083 }
3084
3085 /*
3086 * Entry point to funnel all requests down to FCAs
3087 */
3088 static int
fp_sendcmd(fc_local_port_t * port,fp_cmd_t * cmd,opaque_t fca_handle)3089 fp_sendcmd(fc_local_port_t *port, fp_cmd_t *cmd, opaque_t fca_handle)
3090 {
3091 int rval;
3092
3093 mutex_enter(&port->fp_mutex);
3094 if (port->fp_statec_busy > 1 || (cmd->cmd_ulp_pkt != NULL &&
3095 (port->fp_statec_busy || FC_PORT_STATE_MASK(port->fp_state) ==
3096 FC_STATE_OFFLINE))) {
3097 /*
3098 * This means there is more than one state change
3099 * at this point of time - Since they are processed
3100 * serially, any processing of the current one should
3101 * be failed, failed and move up in processing the next
3102 */
3103 cmd->cmd_pkt.pkt_state = FC_PKT_ELS_IN_PROGRESS;
3104 cmd->cmd_pkt.pkt_reason = FC_REASON_OFFLINE;
3105 if (cmd->cmd_job) {
3106 /*
3107 * A state change that is going to be invalidated
3108 * by another one already in the port driver's queue
3109 * need not go up to all ULPs. This will minimize
3110 * needless processing and ripples in ULP modules
3111 */
3112 cmd->cmd_job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION;
3113 }
3114 mutex_exit(&port->fp_mutex);
3115 return (FC_STATEC_BUSY);
3116 }
3117
3118 if (FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) {
3119 cmd->cmd_pkt.pkt_state = FC_PKT_PORT_OFFLINE;
3120 cmd->cmd_pkt.pkt_reason = FC_REASON_OFFLINE;
3121 mutex_exit(&port->fp_mutex);
3122
3123 return (FC_OFFLINE);
3124 }
3125 mutex_exit(&port->fp_mutex);
3126
3127 rval = cmd->cmd_transport(fca_handle, &cmd->cmd_pkt);
3128 if (rval != FC_SUCCESS) {
3129 if (rval == FC_TRAN_BUSY) {
3130 cmd->cmd_retry_interval = fp_retry_delay;
3131 rval = fp_retry_cmd(&cmd->cmd_pkt);
3132 if (rval == FC_FAILURE) {
3133 cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_BSY;
3134 }
3135 }
3136 } else {
3137 mutex_enter(&port->fp_mutex);
3138 port->fp_out_fpcmds++;
3139 mutex_exit(&port->fp_mutex);
3140 }
3141
3142 return (rval);
3143 }
3144
3145
3146 /*
3147 * Each time a timeout kicks in, walk the wait queue, decrement the
3148 * the retry_interval, when the retry_interval becomes less than
3149 * or equal to zero, re-transport the command: If the re-transport
3150 * fails with BUSY, enqueue the command in the wait queue.
3151 *
3152 * In order to prevent looping forever because of commands enqueued
3153 * from within this function itself, save the current tail pointer
3154 * (in cur_tail) and exit the loop after serving this command.
3155 */
3156 static void
fp_resendcmd(void * port_handle)3157 fp_resendcmd(void *port_handle)
3158 {
3159 int rval;
3160 fc_local_port_t *port;
3161 fp_cmd_t *cmd;
3162 fp_cmd_t *cur_tail;
3163
3164 port = port_handle;
3165 mutex_enter(&port->fp_mutex);
3166 cur_tail = port->fp_wait_tail;
3167 mutex_exit(&port->fp_mutex);
3168
3169 while ((cmd = fp_deque_cmd(port)) != NULL) {
3170 cmd->cmd_retry_interval -= fp_retry_ticker;
3171 /* Check if we are detaching */
3172 if (port->fp_soft_state &
3173 (FP_SOFT_IN_DETACH | FP_DETACH_INPROGRESS)) {
3174 cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_ERROR;
3175 cmd->cmd_pkt.pkt_reason = 0;
3176 fp_iodone(cmd);
3177 } else if (cmd->cmd_retry_interval <= 0) {
3178 rval = cmd->cmd_transport(port->fp_fca_handle,
3179 &cmd->cmd_pkt);
3180
3181 if (rval != FC_SUCCESS) {
3182 if (cmd->cmd_pkt.pkt_state == FC_PKT_TRAN_BSY) {
3183 if (--cmd->cmd_retry_count) {
3184 fp_enque_cmd(port, cmd);
3185 if (cmd == cur_tail) {
3186 break;
3187 }
3188 continue;
3189 }
3190 cmd->cmd_pkt.pkt_state =
3191 FC_PKT_TRAN_BSY;
3192 } else {
3193 cmd->cmd_pkt.pkt_state =
3194 FC_PKT_TRAN_ERROR;
3195 }
3196 cmd->cmd_pkt.pkt_reason = 0;
3197 fp_iodone(cmd);
3198 } else {
3199 mutex_enter(&port->fp_mutex);
3200 port->fp_out_fpcmds++;
3201 mutex_exit(&port->fp_mutex);
3202 }
3203 } else {
3204 fp_enque_cmd(port, cmd);
3205 }
3206
3207 if (cmd == cur_tail) {
3208 break;
3209 }
3210 }
3211
3212 mutex_enter(&port->fp_mutex);
3213 if (port->fp_wait_head) {
3214 timeout_id_t tid;
3215
3216 mutex_exit(&port->fp_mutex);
3217 tid = timeout(fp_resendcmd, (caddr_t)port,
3218 fp_retry_ticks);
3219 mutex_enter(&port->fp_mutex);
3220 port->fp_wait_tid = tid;
3221 } else {
3222 port->fp_wait_tid = NULL;
3223 }
3224 mutex_exit(&port->fp_mutex);
3225 }
3226
3227
3228 /*
3229 * Handle Local, Fabric, N_Port, Transport (whatever that means) BUSY here.
3230 *
3231 * Yes, as you can see below, cmd_retry_count is used here too. That means
3232 * the retries for BUSY are less if there were transport failures (transport
3233 * failure means fca_transport failure). The goal is not to exceed overall
3234 * retries set in the cmd_retry_count (whatever may be the reason for retry)
3235 *
3236 * Return Values:
3237 * FC_SUCCESS
3238 * FC_FAILURE
3239 */
3240 static int
fp_retry_cmd(fc_packet_t * pkt)3241 fp_retry_cmd(fc_packet_t *pkt)
3242 {
3243 fp_cmd_t *cmd;
3244
3245 cmd = pkt->pkt_ulp_private;
3246
3247 if (--cmd->cmd_retry_count) {
3248 fp_enque_cmd(cmd->cmd_port, cmd);
3249 return (FC_SUCCESS);
3250 } else {
3251 return (FC_FAILURE);
3252 }
3253 }
3254
3255
3256 /*
3257 * Queue up FC packet for deferred retry
3258 */
3259 static void
fp_enque_cmd(fc_local_port_t * port,fp_cmd_t * cmd)3260 fp_enque_cmd(fc_local_port_t *port, fp_cmd_t *cmd)
3261 {
3262 timeout_id_t tid;
3263
3264 ASSERT(!MUTEX_HELD(&port->fp_mutex));
3265
3266 #ifdef DEBUG
3267 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, &cmd->cmd_pkt,
3268 "Retrying ELS for %x", cmd->cmd_pkt.pkt_cmd_fhdr.d_id);
3269 #endif
3270
3271 mutex_enter(&port->fp_mutex);
3272 if (port->fp_wait_tail) {
3273 port->fp_wait_tail->cmd_next = cmd;
3274 port->fp_wait_tail = cmd;
3275 } else {
3276 ASSERT(port->fp_wait_head == NULL);
3277 port->fp_wait_head = port->fp_wait_tail = cmd;
3278 if (port->fp_wait_tid == NULL) {
3279 mutex_exit(&port->fp_mutex);
3280 tid = timeout(fp_resendcmd, (caddr_t)port,
3281 fp_retry_ticks);
3282 mutex_enter(&port->fp_mutex);
3283 port->fp_wait_tid = tid;
3284 }
3285 }
3286 mutex_exit(&port->fp_mutex);
3287 }
3288
3289
3290 /*
3291 * Handle all RJT codes
3292 */
3293 static int
fp_handle_reject(fc_packet_t * pkt)3294 fp_handle_reject(fc_packet_t *pkt)
3295 {
3296 int rval = FC_FAILURE;
3297 uchar_t next_class;
3298 fp_cmd_t *cmd;
3299 fc_local_port_t *port;
3300
3301 cmd = pkt->pkt_ulp_private;
3302 port = cmd->cmd_port;
3303
3304 switch (pkt->pkt_state) {
3305 case FC_PKT_FABRIC_RJT:
3306 case FC_PKT_NPORT_RJT:
3307 if (pkt->pkt_reason == FC_REASON_CLASS_NOT_SUPP) {
3308 next_class = fp_get_nextclass(cmd->cmd_port,
3309 FC_TRAN_CLASS(pkt->pkt_tran_flags));
3310
3311 if (next_class == FC_TRAN_CLASS_INVALID) {
3312 return (rval);
3313 }
3314 pkt->pkt_tran_flags = FC_TRAN_INTR | next_class;
3315 pkt->pkt_tran_type = FC_PKT_EXCHANGE;
3316
3317 rval = fp_sendcmd(cmd->cmd_port, cmd,
3318 cmd->cmd_port->fp_fca_handle);
3319
3320 if (rval != FC_SUCCESS) {
3321 pkt->pkt_state = FC_PKT_TRAN_ERROR;
3322 }
3323 }
3324 break;
3325
3326 case FC_PKT_LS_RJT:
3327 case FC_PKT_BA_RJT:
3328 if ((pkt->pkt_reason == FC_REASON_LOGICAL_ERROR) ||
3329 (pkt->pkt_reason == FC_REASON_LOGICAL_BSY)) {
3330 cmd->cmd_retry_interval = fp_retry_delay;
3331 rval = fp_retry_cmd(pkt);
3332 }
3333 break;
3334
3335 case FC_PKT_FS_RJT:
3336 if ((pkt->pkt_reason == FC_REASON_FS_LOGICAL_BUSY) ||
3337 ((pkt->pkt_reason == FC_REASON_FS_CMD_UNABLE) &&
3338 (pkt->pkt_expln == 0x00))) {
3339 cmd->cmd_retry_interval = fp_retry_delay;
3340 rval = fp_retry_cmd(pkt);
3341 }
3342 break;
3343
3344 case FC_PKT_LOCAL_RJT:
3345 if (pkt->pkt_reason == FC_REASON_QFULL) {
3346 cmd->cmd_retry_interval = fp_retry_delay;
3347 rval = fp_retry_cmd(pkt);
3348 }
3349 break;
3350
3351 default:
3352 FP_TRACE(FP_NHEAD1(1, 0),
3353 "fp_handle_reject(): Invalid pkt_state");
3354 break;
3355 }
3356
3357 return (rval);
3358 }
3359
3360
3361 /*
3362 * Return the next class of service supported by the FCA
3363 */
3364 static uchar_t
fp_get_nextclass(fc_local_port_t * port,uchar_t cur_class)3365 fp_get_nextclass(fc_local_port_t *port, uchar_t cur_class)
3366 {
3367 uchar_t next_class;
3368
3369 ASSERT(!MUTEX_HELD(&port->fp_mutex));
3370
3371 switch (cur_class) {
3372 case FC_TRAN_CLASS_INVALID:
3373 if (port->fp_cos & FC_NS_CLASS1) {
3374 next_class = FC_TRAN_CLASS1;
3375 break;
3376 }
3377 /* FALLTHROUGH */
3378
3379 case FC_TRAN_CLASS1:
3380 if (port->fp_cos & FC_NS_CLASS2) {
3381 next_class = FC_TRAN_CLASS2;
3382 break;
3383 }
3384 /* FALLTHROUGH */
3385
3386 case FC_TRAN_CLASS2:
3387 if (port->fp_cos & FC_NS_CLASS3) {
3388 next_class = FC_TRAN_CLASS3;
3389 break;
3390 }
3391 /* FALLTHROUGH */
3392
3393 case FC_TRAN_CLASS3:
3394 default:
3395 next_class = FC_TRAN_CLASS_INVALID;
3396 break;
3397 }
3398
3399 return (next_class);
3400 }
3401
3402
3403 /*
3404 * Determine if a class of service is supported by the FCA
3405 */
3406 static int
fp_is_class_supported(uint32_t cos,uchar_t tran_class)3407 fp_is_class_supported(uint32_t cos, uchar_t tran_class)
3408 {
3409 int rval;
3410
3411 switch (tran_class) {
3412 case FC_TRAN_CLASS1:
3413 if (cos & FC_NS_CLASS1) {
3414 rval = FC_SUCCESS;
3415 } else {
3416 rval = FC_FAILURE;
3417 }
3418 break;
3419
3420 case FC_TRAN_CLASS2:
3421 if (cos & FC_NS_CLASS2) {
3422 rval = FC_SUCCESS;
3423 } else {
3424 rval = FC_FAILURE;
3425 }
3426 break;
3427
3428 case FC_TRAN_CLASS3:
3429 if (cos & FC_NS_CLASS3) {
3430 rval = FC_SUCCESS;
3431 } else {
3432 rval = FC_FAILURE;
3433 }
3434 break;
3435
3436 default:
3437 rval = FC_FAILURE;
3438 break;
3439 }
3440
3441 return (rval);
3442 }
3443
3444
3445 /*
3446 * Dequeue FC packet for retry
3447 */
3448 static fp_cmd_t *
fp_deque_cmd(fc_local_port_t * port)3449 fp_deque_cmd(fc_local_port_t *port)
3450 {
3451 fp_cmd_t *cmd;
3452
3453 ASSERT(!MUTEX_HELD(&port->fp_mutex));
3454
3455 mutex_enter(&port->fp_mutex);
3456
3457 if (port->fp_wait_head == NULL) {
3458 /*
3459 * To avoid races, NULL the fp_wait_tid as
3460 * we are about to exit the timeout thread.
3461 */
3462 port->fp_wait_tid = NULL;
3463 mutex_exit(&port->fp_mutex);
3464 return (NULL);
3465 }
3466
3467 cmd = port->fp_wait_head;
3468 port->fp_wait_head = cmd->cmd_next;
3469 cmd->cmd_next = NULL;
3470
3471 if (port->fp_wait_head == NULL) {
3472 port->fp_wait_tail = NULL;
3473 }
3474 mutex_exit(&port->fp_mutex);
3475
3476 return (cmd);
3477 }
3478
3479
3480 /*
3481 * Wait for job completion
3482 */
3483 static void
fp_jobwait(job_request_t * job)3484 fp_jobwait(job_request_t *job)
3485 {
3486 sema_p(&job->job_port_sema);
3487 }
3488
3489
3490 /*
3491 * Convert FC packet state to FC errno
3492 */
3493 int
fp_state_to_rval(uchar_t state)3494 fp_state_to_rval(uchar_t state)
3495 {
3496 int count;
3497
3498 for (count = 0; count < sizeof (fp_xlat) /
3499 sizeof (fp_xlat[0]); count++) {
3500 if (fp_xlat[count].xlat_state == state) {
3501 return (fp_xlat[count].xlat_rval);
3502 }
3503 }
3504
3505 return (FC_FAILURE);
3506 }
3507
3508
3509 /*
3510 * For Synchronous I/O requests, the caller is
3511 * expected to do fctl_jobdone(if necessary)
3512 *
3513 * We want to preserve at least one failure in the
3514 * job_result if it happens.
3515 *
3516 */
3517 static void
fp_iodone(fp_cmd_t * cmd)3518 fp_iodone(fp_cmd_t *cmd)
3519 {
3520 fc_packet_t *ulp_pkt = cmd->cmd_ulp_pkt;
3521 job_request_t *job = cmd->cmd_job;
3522 fc_remote_port_t *pd = cmd->cmd_pkt.pkt_pd;
3523
3524 ASSERT(job != NULL);
3525 ASSERT(cmd->cmd_port != NULL);
3526 ASSERT(&cmd->cmd_pkt != NULL);
3527
3528 mutex_enter(&job->job_mutex);
3529 if (job->job_result == FC_SUCCESS) {
3530 job->job_result = fp_state_to_rval(cmd->cmd_pkt.pkt_state);
3531 }
3532 mutex_exit(&job->job_mutex);
3533
3534 if (pd) {
3535 mutex_enter(&pd->pd_mutex);
3536 pd->pd_flags = PD_IDLE;
3537 mutex_exit(&pd->pd_mutex);
3538 }
3539
3540 if (ulp_pkt) {
3541 if (pd && cmd->cmd_flags & FP_CMD_DELDEV_ON_ERROR &&
3542 FP_IS_PKT_ERROR(ulp_pkt)) {
3543 fc_local_port_t *port;
3544 fc_remote_node_t *node;
3545
3546 port = cmd->cmd_port;
3547
3548 mutex_enter(&pd->pd_mutex);
3549 pd->pd_state = PORT_DEVICE_INVALID;
3550 pd->pd_ref_count--;
3551 node = pd->pd_remote_nodep;
3552 mutex_exit(&pd->pd_mutex);
3553
3554 ASSERT(node != NULL);
3555 ASSERT(port != NULL);
3556
3557 if (fctl_destroy_remote_port(port, pd) == 0) {
3558 fctl_destroy_remote_node(node);
3559 }
3560
3561 ulp_pkt->pkt_pd = NULL;
3562 }
3563
3564 ulp_pkt->pkt_comp(ulp_pkt);
3565 }
3566
3567 fp_free_pkt(cmd);
3568 fp_jobdone(job);
3569 }
3570
3571
3572 /*
3573 * Job completion handler
3574 */
3575 static void
fp_jobdone(job_request_t * job)3576 fp_jobdone(job_request_t *job)
3577 {
3578 mutex_enter(&job->job_mutex);
3579 ASSERT(job->job_counter > 0);
3580
3581 if (--job->job_counter != 0) {
3582 mutex_exit(&job->job_mutex);
3583 return;
3584 }
3585
3586 if (job->job_ulp_pkts) {
3587 ASSERT(job->job_ulp_listlen > 0);
3588 kmem_free(job->job_ulp_pkts,
3589 sizeof (fc_packet_t *) * job->job_ulp_listlen);
3590 }
3591
3592 if (job->job_flags & JOB_TYPE_FP_ASYNC) {
3593 mutex_exit(&job->job_mutex);
3594 fctl_jobdone(job);
3595 } else {
3596 mutex_exit(&job->job_mutex);
3597 sema_v(&job->job_port_sema);
3598 }
3599 }
3600
3601
3602 /*
3603 * Try to perform shutdown of a port during a detach. No return
3604 * value since the detach should not fail because the port shutdown
3605 * failed.
3606 */
3607 static void
fp_port_shutdown(fc_local_port_t * port,job_request_t * job)3608 fp_port_shutdown(fc_local_port_t *port, job_request_t *job)
3609 {
3610 int index;
3611 int count;
3612 int flags;
3613 fp_cmd_t *cmd;
3614 struct pwwn_hash *head;
3615 fc_remote_port_t *pd;
3616
3617 ASSERT(MUTEX_HELD(&port->fp_mutex));
3618
3619 job->job_result = FC_SUCCESS;
3620
3621 if (port->fp_taskq) {
3622 /*
3623 * We must release the mutex here to ensure that other
3624 * potential jobs can complete their processing. Many
3625 * also need this mutex.
3626 */
3627 mutex_exit(&port->fp_mutex);
3628 taskq_wait(port->fp_taskq);
3629 mutex_enter(&port->fp_mutex);
3630 }
3631
3632 if (port->fp_offline_tid) {
3633 timeout_id_t tid;
3634
3635 tid = port->fp_offline_tid;
3636 port->fp_offline_tid = NULL;
3637 mutex_exit(&port->fp_mutex);
3638 (void) untimeout(tid);
3639 mutex_enter(&port->fp_mutex);
3640 }
3641
3642 if (port->fp_wait_tid) {
3643 timeout_id_t tid;
3644
3645 tid = port->fp_wait_tid;
3646 port->fp_wait_tid = NULL;
3647 mutex_exit(&port->fp_mutex);
3648 (void) untimeout(tid);
3649 } else {
3650 mutex_exit(&port->fp_mutex);
3651 }
3652
3653 /*
3654 * While we cancel the timeout, let's also return the
3655 * the outstanding requests back to the callers.
3656 */
3657 while ((cmd = fp_deque_cmd(port)) != NULL) {
3658 ASSERT(cmd->cmd_job != NULL);
3659 cmd->cmd_job->job_result = FC_OFFLINE;
3660 fp_iodone(cmd);
3661 }
3662
3663 /*
3664 * Gracefully LOGO with all the devices logged in.
3665 */
3666 mutex_enter(&port->fp_mutex);
3667
3668 for (count = index = 0; index < pwwn_table_size; index++) {
3669 head = &port->fp_pwwn_table[index];
3670 pd = head->pwwn_head;
3671 while (pd != NULL) {
3672 mutex_enter(&pd->pd_mutex);
3673 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) {
3674 count++;
3675 }
3676 mutex_exit(&pd->pd_mutex);
3677 pd = pd->pd_wwn_hnext;
3678 }
3679 }
3680
3681 if (job->job_flags & JOB_TYPE_FP_ASYNC) {
3682 flags = job->job_flags;
3683 job->job_flags &= ~JOB_TYPE_FP_ASYNC;
3684 } else {
3685 flags = 0;
3686 }
3687 if (count) {
3688 job->job_counter = count;
3689
3690 for (index = 0; index < pwwn_table_size; index++) {
3691 head = &port->fp_pwwn_table[index];
3692 pd = head->pwwn_head;
3693 while (pd != NULL) {
3694 mutex_enter(&pd->pd_mutex);
3695 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) {
3696 ASSERT(pd->pd_login_count > 0);
3697 /*
3698 * Force the counter to ONE in order
3699 * for us to really send LOGO els.
3700 */
3701 pd->pd_login_count = 1;
3702 mutex_exit(&pd->pd_mutex);
3703 mutex_exit(&port->fp_mutex);
3704 (void) fp_logout(port, pd, job);
3705 mutex_enter(&port->fp_mutex);
3706 } else {
3707 mutex_exit(&pd->pd_mutex);
3708 }
3709 pd = pd->pd_wwn_hnext;
3710 }
3711 }
3712 mutex_exit(&port->fp_mutex);
3713 fp_jobwait(job);
3714 } else {
3715 mutex_exit(&port->fp_mutex);
3716 }
3717
3718 if (job->job_result != FC_SUCCESS) {
3719 FP_TRACE(FP_NHEAD1(9, 0),
3720 "Can't logout all devices. Proceeding with"
3721 " port shutdown");
3722 job->job_result = FC_SUCCESS;
3723 }
3724
3725 fctl_destroy_all_remote_ports(port);
3726
3727 mutex_enter(&port->fp_mutex);
3728 if (FC_IS_TOP_SWITCH(port->fp_topology)) {
3729 mutex_exit(&port->fp_mutex);
3730 fp_ns_fini(port, job);
3731 } else {
3732 mutex_exit(&port->fp_mutex);
3733 }
3734
3735 if (flags) {
3736 job->job_flags = flags;
3737 }
3738
3739 mutex_enter(&port->fp_mutex);
3740
3741 }
3742
3743
3744 /*
3745 * Build the port driver's data structures based on the AL_PA list
3746 */
3747 static void
fp_get_loopmap(fc_local_port_t * port,job_request_t * job)3748 fp_get_loopmap(fc_local_port_t *port, job_request_t *job)
3749 {
3750 int rval;
3751 int flag;
3752 int count;
3753 uint32_t d_id;
3754 fc_remote_port_t *pd;
3755 fc_lilpmap_t *lilp_map;
3756
3757 ASSERT(MUTEX_HELD(&port->fp_mutex));
3758
3759 if (FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) {
3760 job->job_result = FC_OFFLINE;
3761 mutex_exit(&port->fp_mutex);
3762 fp_jobdone(job);
3763 mutex_enter(&port->fp_mutex);
3764 return;
3765 }
3766
3767 if (port->fp_lilp_map.lilp_length == 0) {
3768 mutex_exit(&port->fp_mutex);
3769 job->job_result = FC_NO_MAP;
3770 fp_jobdone(job);
3771 mutex_enter(&port->fp_mutex);
3772 return;
3773 }
3774 mutex_exit(&port->fp_mutex);
3775
3776 lilp_map = &port->fp_lilp_map;
3777 job->job_counter = lilp_map->lilp_length;
3778
3779 if (job->job_code == JOB_PORT_GETMAP_PLOGI_ALL) {
3780 flag = FP_CMD_PLOGI_RETAIN;
3781 } else {
3782 flag = FP_CMD_PLOGI_DONT_CARE;
3783 }
3784
3785 for (count = 0; count < lilp_map->lilp_length; count++) {
3786 d_id = lilp_map->lilp_alpalist[count];
3787
3788 if (d_id == (lilp_map->lilp_myalpa & 0xFF)) {
3789 fp_jobdone(job);
3790 continue;
3791 }
3792
3793 pd = fctl_get_remote_port_by_did(port, d_id);
3794 if (pd) {
3795 mutex_enter(&pd->pd_mutex);
3796 if (flag == FP_CMD_PLOGI_DONT_CARE ||
3797 pd->pd_state == PORT_DEVICE_LOGGED_IN) {
3798 mutex_exit(&pd->pd_mutex);
3799 fp_jobdone(job);
3800 continue;
3801 }
3802 mutex_exit(&pd->pd_mutex);
3803 }
3804
3805 rval = fp_port_login(port, d_id, job, flag,
3806 KM_SLEEP, pd, NULL);
3807 if (rval != FC_SUCCESS) {
3808 fp_jobdone(job);
3809 }
3810 }
3811
3812 mutex_enter(&port->fp_mutex);
3813 }
3814
3815
3816 /*
3817 * Perform loop ONLINE processing
3818 */
3819 static void
fp_loop_online(fc_local_port_t * port,job_request_t * job,int orphan)3820 fp_loop_online(fc_local_port_t *port, job_request_t *job, int orphan)
3821 {
3822 int count;
3823 int rval;
3824 uint32_t d_id;
3825 uint32_t listlen;
3826 fc_lilpmap_t *lilp_map;
3827 fc_remote_port_t *pd;
3828 fc_portmap_t *changelist;
3829
3830 ASSERT(!MUTEX_HELD(&port->fp_mutex));
3831
3832 FP_TRACE(FP_NHEAD1(1, 0), "fp_loop_online begin; port=%p, job=%p",
3833 port, job);
3834
3835 lilp_map = &port->fp_lilp_map;
3836
3837 if (lilp_map->lilp_length) {
3838 mutex_enter(&port->fp_mutex);
3839 if (port->fp_soft_state & FP_SOFT_IN_FCA_RESET) {
3840 port->fp_soft_state &= ~FP_SOFT_IN_FCA_RESET;
3841 mutex_exit(&port->fp_mutex);
3842 delay(drv_usectohz(PLDA_RR_TOV * 1000 * 1000));
3843 } else {
3844 mutex_exit(&port->fp_mutex);
3845 }
3846
3847 job->job_counter = lilp_map->lilp_length;
3848
3849 for (count = 0; count < lilp_map->lilp_length; count++) {
3850 d_id = lilp_map->lilp_alpalist[count];
3851
3852 if (d_id == (lilp_map->lilp_myalpa & 0xFF)) {
3853 fp_jobdone(job);
3854 continue;
3855 }
3856
3857 pd = fctl_get_remote_port_by_did(port, d_id);
3858 if (pd != NULL) {
3859 #ifdef DEBUG
3860 mutex_enter(&pd->pd_mutex);
3861 if (pd->pd_recepient == PD_PLOGI_INITIATOR) {
3862 ASSERT(pd->pd_type != PORT_DEVICE_OLD);
3863 }
3864 mutex_exit(&pd->pd_mutex);
3865 #endif
3866 fp_jobdone(job);
3867 continue;
3868 }
3869
3870 rval = fp_port_login(port, d_id, job,
3871 FP_CMD_PLOGI_DONT_CARE, KM_SLEEP, pd, NULL);
3872
3873 if (rval != FC_SUCCESS) {
3874 fp_jobdone(job);
3875 }
3876 }
3877 fp_jobwait(job);
3878 }
3879 listlen = 0;
3880 changelist = NULL;
3881
3882 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) {
3883 mutex_enter(&port->fp_mutex);
3884 ASSERT(port->fp_statec_busy > 0);
3885 if (port->fp_statec_busy == 1) {
3886 mutex_exit(&port->fp_mutex);
3887 fctl_fillout_map(port, &changelist, &listlen,
3888 1, 0, orphan);
3889
3890 mutex_enter(&port->fp_mutex);
3891 if (port->fp_lilp_map.lilp_magic < MAGIC_LIRP) {
3892 ASSERT(port->fp_total_devices == 0);
3893 port->fp_total_devices = port->fp_dev_count;
3894 }
3895 } else {
3896 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION;
3897 }
3898 mutex_exit(&port->fp_mutex);
3899 }
3900
3901 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) {
3902 (void) fp_ulp_statec_cb(port, FC_STATE_ONLINE, changelist,
3903 listlen, listlen, KM_SLEEP);
3904 } else {
3905 mutex_enter(&port->fp_mutex);
3906 if (--port->fp_statec_busy == 0) {
3907 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB;
3908 }
3909 ASSERT(changelist == NULL && listlen == 0);
3910 mutex_exit(&port->fp_mutex);
3911 }
3912
3913 FP_TRACE(FP_NHEAD1(1, 0), "fp_loop_online end; port=%p, job=%p",
3914 port, job);
3915 }
3916
3917
3918 /*
3919 * Get an Arbitrated Loop map from the underlying FCA
3920 */
3921 static int
fp_get_lilpmap(fc_local_port_t * port,fc_lilpmap_t * lilp_map)3922 fp_get_lilpmap(fc_local_port_t *port, fc_lilpmap_t *lilp_map)
3923 {
3924 int rval;
3925
3926 FP_TRACE(FP_NHEAD1(1, 0), "fp_get_lilpmap Begin; port=%p, map=%p",
3927 port, lilp_map);
3928
3929 bzero((caddr_t)lilp_map, sizeof (fc_lilpmap_t));
3930 rval = port->fp_fca_tran->fca_getmap(port->fp_fca_handle, lilp_map);
3931 lilp_map->lilp_magic &= 0xFF; /* Ignore upper byte */
3932
3933 if (rval != FC_SUCCESS) {
3934 rval = FC_NO_MAP;
3935 } else if (lilp_map->lilp_length == 0 &&
3936 (lilp_map->lilp_magic >= MAGIC_LISM &&
3937 lilp_map->lilp_magic < MAGIC_LIRP)) {
3938 uchar_t lilp_length;
3939
3940 /*
3941 * Since the map length is zero, provide all
3942 * the valid AL_PAs for NL_ports discovery.
3943 */
3944 lilp_length = sizeof (fp_valid_alpas) /
3945 sizeof (fp_valid_alpas[0]);
3946 lilp_map->lilp_length = lilp_length;
3947 bcopy(fp_valid_alpas, lilp_map->lilp_alpalist,
3948 lilp_length);
3949 } else {
3950 rval = fp_validate_lilp_map(lilp_map);
3951
3952 if (rval == FC_SUCCESS) {
3953 mutex_enter(&port->fp_mutex);
3954 port->fp_total_devices = lilp_map->lilp_length - 1;
3955 mutex_exit(&port->fp_mutex);
3956 }
3957 }
3958
3959 mutex_enter(&port->fp_mutex);
3960 if (rval != FC_SUCCESS && !(port->fp_soft_state & FP_SOFT_BAD_LINK)) {
3961 port->fp_soft_state |= FP_SOFT_BAD_LINK;
3962 mutex_exit(&port->fp_mutex);
3963
3964 if (port->fp_fca_tran->fca_reset(port->fp_fca_handle,
3965 FC_FCA_RESET_CORE) != FC_SUCCESS) {
3966 FP_TRACE(FP_NHEAD1(9, 0),
3967 "FCA reset failed after LILP map was found"
3968 " to be invalid");
3969 }
3970 } else if (rval == FC_SUCCESS) {
3971 port->fp_soft_state &= ~FP_SOFT_BAD_LINK;
3972 mutex_exit(&port->fp_mutex);
3973 } else {
3974 mutex_exit(&port->fp_mutex);
3975 }
3976
3977 FP_TRACE(FP_NHEAD1(1, 0), "fp_get_lilpmap End; port=%p, map=%p", port,
3978 lilp_map);
3979
3980 return (rval);
3981 }
3982
3983
3984 /*
3985 * Perform Fabric Login:
3986 *
3987 * Return Values:
3988 * FC_SUCCESS
3989 * FC_FAILURE
3990 * FC_NOMEM
3991 * FC_TRANSPORT_ERROR
3992 * and a lot others defined in fc_error.h
3993 */
3994 static int
fp_fabric_login(fc_local_port_t * port,uint32_t s_id,job_request_t * job,int flag,int sleep)3995 fp_fabric_login(fc_local_port_t *port, uint32_t s_id, job_request_t *job,
3996 int flag, int sleep)
3997 {
3998 int rval;
3999 fp_cmd_t *cmd;
4000 uchar_t class;
4001
4002 ASSERT(!MUTEX_HELD(&port->fp_mutex));
4003
4004 FP_TRACE(FP_NHEAD1(1, 0), "fp_fabric_login Begin; port=%p, job=%p",
4005 port, job);
4006
4007 class = fp_get_nextclass(port, FC_TRAN_CLASS_INVALID);
4008 if (class == FC_TRAN_CLASS_INVALID) {
4009 return (FC_ELS_BAD);
4010 }
4011
4012 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t),
4013 sizeof (la_els_logi_t), sleep, NULL);
4014 if (cmd == NULL) {
4015 return (FC_NOMEM);
4016 }
4017
4018 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class;
4019 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE;
4020 cmd->cmd_flags = flag;
4021 cmd->cmd_retry_count = fp_retry_count;
4022 cmd->cmd_ulp_pkt = NULL;
4023
4024 fp_xlogi_init(port, cmd, s_id, 0xFFFFFE, fp_flogi_intr,
4025 job, LA_ELS_FLOGI);
4026
4027 rval = fp_sendcmd(port, cmd, port->fp_fca_handle);
4028 if (rval != FC_SUCCESS) {
4029 fp_free_pkt(cmd);
4030 }
4031
4032 FP_TRACE(FP_NHEAD1(1, 0), "fp_fabric_login End; port=%p, job=%p",
4033 port, job);
4034
4035 return (rval);
4036 }
4037
4038
4039 /*
4040 * In some scenarios such as private loop device discovery period
4041 * the fc_remote_port_t data structure isn't allocated. The allocation
4042 * is done when the PLOGI is successful. In some other scenarios
4043 * such as Fabric topology, the fc_remote_port_t is already created
4044 * and initialized with appropriate values (as the NS provides
4045 * them)
4046 */
4047 static int
fp_port_login(fc_local_port_t * port,uint32_t d_id,job_request_t * job,int cmd_flag,int sleep,fc_remote_port_t * pd,fc_packet_t * ulp_pkt)4048 fp_port_login(fc_local_port_t *port, uint32_t d_id, job_request_t *job,
4049 int cmd_flag, int sleep, fc_remote_port_t *pd, fc_packet_t *ulp_pkt)
4050 {
4051 uchar_t class;
4052 fp_cmd_t *cmd;
4053 uint32_t src_id;
4054 fc_remote_port_t *tmp_pd;
4055 int relogin;
4056 int found = 0;
4057
4058 #ifdef DEBUG
4059 if (pd == NULL) {
4060 ASSERT(fctl_get_remote_port_by_did(port, d_id) == NULL);
4061 }
4062 #endif
4063 ASSERT(job->job_counter > 0);
4064
4065 class = fp_get_nextclass(port, FC_TRAN_CLASS_INVALID);
4066 if (class == FC_TRAN_CLASS_INVALID) {
4067 return (FC_ELS_BAD);
4068 }
4069
4070 mutex_enter(&port->fp_mutex);
4071 tmp_pd = fctl_lookup_pd_by_did(port, d_id);
4072 mutex_exit(&port->fp_mutex);
4073
4074 relogin = 1;
4075 if (tmp_pd) {
4076 mutex_enter(&tmp_pd->pd_mutex);
4077 if ((tmp_pd->pd_aux_flags & PD_DISABLE_RELOGIN) &&
4078 !(tmp_pd->pd_aux_flags & PD_LOGGED_OUT)) {
4079 tmp_pd->pd_state = PORT_DEVICE_LOGGED_IN;
4080 relogin = 0;
4081 }
4082 mutex_exit(&tmp_pd->pd_mutex);
4083 }
4084
4085 if (!relogin) {
4086 mutex_enter(&tmp_pd->pd_mutex);
4087 if (tmp_pd->pd_state == PORT_DEVICE_LOGGED_IN) {
4088 cmd_flag |= FP_CMD_PLOGI_RETAIN;
4089 }
4090 mutex_exit(&tmp_pd->pd_mutex);
4091
4092 cmd = fp_alloc_pkt(port, sizeof (la_els_adisc_t),
4093 sizeof (la_els_adisc_t), sleep, tmp_pd);
4094 if (cmd == NULL) {
4095 return (FC_NOMEM);
4096 }
4097
4098 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class;
4099 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE;
4100 cmd->cmd_flags = cmd_flag;
4101 cmd->cmd_retry_count = fp_retry_count;
4102 cmd->cmd_ulp_pkt = ulp_pkt;
4103
4104 mutex_enter(&port->fp_mutex);
4105 mutex_enter(&tmp_pd->pd_mutex);
4106 fp_adisc_init(cmd, job);
4107 mutex_exit(&tmp_pd->pd_mutex);
4108 mutex_exit(&port->fp_mutex);
4109
4110 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_adisc_t);
4111 cmd->cmd_pkt.pkt_rsplen = sizeof (la_els_adisc_t);
4112
4113 } else {
4114 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t),
4115 sizeof (la_els_logi_t), sleep, pd);
4116 if (cmd == NULL) {
4117 return (FC_NOMEM);
4118 }
4119
4120 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class;
4121 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE;
4122 cmd->cmd_flags = cmd_flag;
4123 cmd->cmd_retry_count = fp_retry_count;
4124 cmd->cmd_ulp_pkt = ulp_pkt;
4125
4126 mutex_enter(&port->fp_mutex);
4127 src_id = port->fp_port_id.port_id;
4128 mutex_exit(&port->fp_mutex);
4129
4130 fp_xlogi_init(port, cmd, src_id, d_id, fp_plogi_intr,
4131 job, LA_ELS_PLOGI);
4132 }
4133
4134 if (pd) {
4135 mutex_enter(&pd->pd_mutex);
4136 pd->pd_flags = PD_ELS_IN_PROGRESS;
4137 mutex_exit(&pd->pd_mutex);
4138 }
4139
4140 /* npiv check to make sure we don't log into ourself */
4141 if (relogin &&
4142 ((port->fp_npiv_type == FC_NPIV_PORT) ||
4143 (port->fp_npiv_flag == FC_NPIV_ENABLE))) {
4144 if ((d_id & 0xffff00) ==
4145 (port->fp_port_id.port_id & 0xffff00)) {
4146 found = 1;
4147 }
4148 }
4149
4150 if (found ||
4151 (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS)) {
4152 if (found) {
4153 fc_packet_t *pkt = &cmd->cmd_pkt;
4154 pkt->pkt_state = FC_PKT_NPORT_RJT;
4155 }
4156 if (pd) {
4157 mutex_enter(&pd->pd_mutex);
4158 pd->pd_flags = PD_IDLE;
4159 mutex_exit(&pd->pd_mutex);
4160 }
4161
4162 if (ulp_pkt) {
4163 fc_packet_t *pkt = &cmd->cmd_pkt;
4164
4165 ulp_pkt->pkt_state = pkt->pkt_state;
4166 ulp_pkt->pkt_reason = pkt->pkt_reason;
4167 ulp_pkt->pkt_action = pkt->pkt_action;
4168 ulp_pkt->pkt_expln = pkt->pkt_expln;
4169 }
4170
4171 fp_iodone(cmd);
4172 }
4173
4174 return (FC_SUCCESS);
4175 }
4176
4177
4178 /*
4179 * Register the LOGIN parameters with a port device
4180 */
4181 static void
fp_register_login(ddi_acc_handle_t * handle,fc_remote_port_t * pd,la_els_logi_t * acc,uchar_t class)4182 fp_register_login(ddi_acc_handle_t *handle, fc_remote_port_t *pd,
4183 la_els_logi_t *acc, uchar_t class)
4184 {
4185 fc_remote_node_t *node;
4186
4187 ASSERT(pd != NULL);
4188
4189 mutex_enter(&pd->pd_mutex);
4190 node = pd->pd_remote_nodep;
4191 if (pd->pd_login_count == 0) {
4192 pd->pd_login_count++;
4193 }
4194
4195 if (handle) {
4196 FC_GET_RSP(pd->pd_port, *handle, (uint8_t *)&pd->pd_csp,
4197 (uint8_t *)&acc->common_service,
4198 sizeof (acc->common_service), DDI_DEV_AUTOINCR);
4199 FC_GET_RSP(pd->pd_port, *handle, (uint8_t *)&pd->pd_clsp1,
4200 (uint8_t *)&acc->class_1, sizeof (acc->class_1),
4201 DDI_DEV_AUTOINCR);
4202 FC_GET_RSP(pd->pd_port, *handle, (uint8_t *)&pd->pd_clsp2,
4203 (uint8_t *)&acc->class_2, sizeof (acc->class_2),
4204 DDI_DEV_AUTOINCR);
4205 FC_GET_RSP(pd->pd_port, *handle, (uint8_t *)&pd->pd_clsp3,
4206 (uint8_t *)&acc->class_3, sizeof (acc->class_3),
4207 DDI_DEV_AUTOINCR);
4208 } else {
4209 pd->pd_csp = acc->common_service;
4210 pd->pd_clsp1 = acc->class_1;
4211 pd->pd_clsp2 = acc->class_2;
4212 pd->pd_clsp3 = acc->class_3;
4213 }
4214
4215 pd->pd_state = PORT_DEVICE_LOGGED_IN;
4216 pd->pd_login_class = class;
4217 mutex_exit(&pd->pd_mutex);
4218
4219 #ifndef __lock_lint
4220 ASSERT(fctl_get_remote_port_by_did(pd->pd_port,
4221 pd->pd_port_id.port_id) == pd);
4222 #endif
4223
4224 mutex_enter(&node->fd_mutex);
4225 if (handle) {
4226 FC_GET_RSP(pd->pd_port, *handle, (uint8_t *)node->fd_vv,
4227 (uint8_t *)acc->vendor_version, sizeof (node->fd_vv),
4228 DDI_DEV_AUTOINCR);
4229 } else {
4230 bcopy(acc->vendor_version, node->fd_vv, sizeof (node->fd_vv));
4231 }
4232 mutex_exit(&node->fd_mutex);
4233 }
4234
4235
4236 /*
4237 * Mark the remote port as OFFLINE
4238 */
4239 static void
fp_remote_port_offline(fc_remote_port_t * pd)4240 fp_remote_port_offline(fc_remote_port_t *pd)
4241 {
4242 ASSERT(MUTEX_HELD(&pd->pd_mutex));
4243 if (pd->pd_login_count &&
4244 ((pd->pd_aux_flags & PD_DISABLE_RELOGIN) == 0)) {
4245 bzero((caddr_t)&pd->pd_csp, sizeof (struct common_service));
4246 bzero((caddr_t)&pd->pd_clsp1, sizeof (struct service_param));
4247 bzero((caddr_t)&pd->pd_clsp2, sizeof (struct service_param));
4248 bzero((caddr_t)&pd->pd_clsp3, sizeof (struct service_param));
4249 pd->pd_login_class = 0;
4250 }
4251 pd->pd_type = PORT_DEVICE_OLD;
4252 pd->pd_flags = PD_IDLE;
4253 fctl_tc_reset(&pd->pd_logo_tc);
4254 }
4255
4256
4257 /*
4258 * Deregistration of a port device
4259 */
4260 static void
fp_unregister_login(fc_remote_port_t * pd)4261 fp_unregister_login(fc_remote_port_t *pd)
4262 {
4263 fc_remote_node_t *node;
4264
4265 ASSERT(pd != NULL);
4266
4267 mutex_enter(&pd->pd_mutex);
4268 pd->pd_login_count = 0;
4269 bzero((caddr_t)&pd->pd_csp, sizeof (struct common_service));
4270 bzero((caddr_t)&pd->pd_clsp1, sizeof (struct service_param));
4271 bzero((caddr_t)&pd->pd_clsp2, sizeof (struct service_param));
4272 bzero((caddr_t)&pd->pd_clsp3, sizeof (struct service_param));
4273
4274 pd->pd_state = PORT_DEVICE_VALID;
4275 pd->pd_login_class = 0;
4276 node = pd->pd_remote_nodep;
4277 mutex_exit(&pd->pd_mutex);
4278
4279 mutex_enter(&node->fd_mutex);
4280 bzero(node->fd_vv, sizeof (node->fd_vv));
4281 mutex_exit(&node->fd_mutex);
4282 }
4283
4284
4285 /*
4286 * Handle OFFLINE state of an FCA port
4287 */
4288 static void
fp_port_offline(fc_local_port_t * port,int notify)4289 fp_port_offline(fc_local_port_t *port, int notify)
4290 {
4291 int index;
4292 int statec;
4293 timeout_id_t tid;
4294 struct pwwn_hash *head;
4295 fc_remote_port_t *pd;
4296
4297 ASSERT(MUTEX_HELD(&port->fp_mutex));
4298
4299 for (index = 0; index < pwwn_table_size; index++) {
4300 head = &port->fp_pwwn_table[index];
4301 pd = head->pwwn_head;
4302 while (pd != NULL) {
4303 mutex_enter(&pd->pd_mutex);
4304 fp_remote_port_offline(pd);
4305 fctl_delist_did_table(port, pd);
4306 mutex_exit(&pd->pd_mutex);
4307 pd = pd->pd_wwn_hnext;
4308 }
4309 }
4310 port->fp_total_devices = 0;
4311
4312 statec = 0;
4313 if (notify) {
4314 /*
4315 * Decrement the statec busy counter as we
4316 * are almost done with handling the state
4317 * change
4318 */
4319 ASSERT(port->fp_statec_busy > 0);
4320 if (--port->fp_statec_busy == 0) {
4321 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB;
4322 }
4323 mutex_exit(&port->fp_mutex);
4324 (void) fp_ulp_statec_cb(port, FC_STATE_OFFLINE, NULL,
4325 0, 0, KM_SLEEP);
4326 mutex_enter(&port->fp_mutex);
4327
4328 if (port->fp_statec_busy) {
4329 statec++;
4330 }
4331 } else if (port->fp_statec_busy > 1) {
4332 statec++;
4333 }
4334
4335 if ((tid = port->fp_offline_tid) != NULL) {
4336 mutex_exit(&port->fp_mutex);
4337 (void) untimeout(tid);
4338 mutex_enter(&port->fp_mutex);
4339 }
4340
4341 if (!statec) {
4342 port->fp_offline_tid = timeout(fp_offline_timeout,
4343 (caddr_t)port, fp_offline_ticks);
4344 }
4345 }
4346
4347
4348 /*
4349 * Offline devices and send up a state change notification to ULPs
4350 */
4351 static void
fp_offline_timeout(void * port_handle)4352 fp_offline_timeout(void *port_handle)
4353 {
4354 int ret;
4355 fc_local_port_t *port = port_handle;
4356 uint32_t listlen = 0;
4357 fc_portmap_t *changelist = NULL;
4358
4359 mutex_enter(&port->fp_mutex);
4360
4361 if ((FC_PORT_STATE_MASK(port->fp_state) != FC_STATE_OFFLINE) ||
4362 (port->fp_soft_state &
4363 (FP_SOFT_IN_DETACH | FP_SOFT_SUSPEND | FP_SOFT_POWER_DOWN)) ||
4364 port->fp_dev_count == 0 || port->fp_statec_busy) {
4365 port->fp_offline_tid = NULL;
4366 mutex_exit(&port->fp_mutex);
4367 return;
4368 }
4369
4370 mutex_exit(&port->fp_mutex);
4371
4372 FP_TRACE(FP_NHEAD2(9, 0), "OFFLINE timeout");
4373
4374 if (port->fp_options & FP_CORE_ON_OFFLINE_TIMEOUT) {
4375 if ((ret = port->fp_fca_tran->fca_reset(port->fp_fca_handle,
4376 FC_FCA_CORE)) != FC_SUCCESS) {
4377 FP_TRACE(FP_NHEAD1(9, ret),
4378 "Failed to force adapter dump");
4379 } else {
4380 FP_TRACE(FP_NHEAD1(9, 0),
4381 "Forced adapter dump successfully");
4382 }
4383 } else if (port->fp_options & FP_RESET_CORE_ON_OFFLINE_TIMEOUT) {
4384 if ((ret = port->fp_fca_tran->fca_reset(port->fp_fca_handle,
4385 FC_FCA_RESET_CORE)) != FC_SUCCESS) {
4386 FP_TRACE(FP_NHEAD1(9, ret),
4387 "Failed to force adapter dump and reset");
4388 } else {
4389 FP_TRACE(FP_NHEAD1(9, 0),
4390 "Forced adapter dump and reset successfully");
4391 }
4392 }
4393
4394 fctl_fillout_map(port, &changelist, &listlen, 1, 0, 0);
4395 (void) fp_ulp_statec_cb(port, FC_STATE_OFFLINE, changelist,
4396 listlen, listlen, KM_SLEEP);
4397
4398 mutex_enter(&port->fp_mutex);
4399 port->fp_offline_tid = NULL;
4400 mutex_exit(&port->fp_mutex);
4401 }
4402
4403
4404 /*
4405 * Perform general purpose ELS request initialization
4406 */
4407 static void
fp_els_init(fp_cmd_t * cmd,uint32_t s_id,uint32_t d_id,void (* comp)(),job_request_t * job)4408 fp_els_init(fp_cmd_t *cmd, uint32_t s_id, uint32_t d_id,
4409 void (*comp) (), job_request_t *job)
4410 {
4411 fc_packet_t *pkt;
4412
4413 pkt = &cmd->cmd_pkt;
4414 cmd->cmd_job = job;
4415
4416 pkt->pkt_cmd_fhdr.r_ctl = R_CTL_ELS_REQ;
4417 pkt->pkt_cmd_fhdr.d_id = d_id;
4418 pkt->pkt_cmd_fhdr.s_id = s_id;
4419 pkt->pkt_cmd_fhdr.type = FC_TYPE_EXTENDED_LS;
4420 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
4421 pkt->pkt_cmd_fhdr.seq_id = 0;
4422 pkt->pkt_cmd_fhdr.df_ctl = 0;
4423 pkt->pkt_cmd_fhdr.seq_cnt = 0;
4424 pkt->pkt_cmd_fhdr.ox_id = 0xffff;
4425 pkt->pkt_cmd_fhdr.rx_id = 0xffff;
4426 pkt->pkt_cmd_fhdr.ro = 0;
4427 pkt->pkt_cmd_fhdr.rsvd = 0;
4428 pkt->pkt_comp = comp;
4429 pkt->pkt_timeout = FP_ELS_TIMEOUT;
4430 }
4431
4432
4433 /*
4434 * Initialize PLOGI/FLOGI ELS request
4435 */
4436 static void
fp_xlogi_init(fc_local_port_t * port,fp_cmd_t * cmd,uint32_t s_id,uint32_t d_id,void (* intr)(),job_request_t * job,uchar_t ls_code)4437 fp_xlogi_init(fc_local_port_t *port, fp_cmd_t *cmd, uint32_t s_id,
4438 uint32_t d_id, void (*intr) (), job_request_t *job, uchar_t ls_code)
4439 {
4440 ls_code_t payload;
4441
4442 fp_els_init(cmd, s_id, d_id, intr, job);
4443 cmd->cmd_transport = port->fp_fca_tran->fca_els_send;
4444
4445 payload.ls_code = ls_code;
4446 payload.mbz = 0;
4447
4448 FC_SET_CMD(port, cmd->cmd_pkt.pkt_cmd_acc,
4449 (uint8_t *)&port->fp_service_params,
4450 (uint8_t *)cmd->cmd_pkt.pkt_cmd, sizeof (port->fp_service_params),
4451 DDI_DEV_AUTOINCR);
4452
4453 FC_SET_CMD(port, cmd->cmd_pkt.pkt_cmd_acc, (uint8_t *)&payload,
4454 (uint8_t *)cmd->cmd_pkt.pkt_cmd, sizeof (payload),
4455 DDI_DEV_AUTOINCR);
4456 }
4457
4458
4459 /*
4460 * Initialize LOGO ELS request
4461 */
4462 static void
fp_logo_init(fc_remote_port_t * pd,fp_cmd_t * cmd,job_request_t * job)4463 fp_logo_init(fc_remote_port_t *pd, fp_cmd_t *cmd, job_request_t *job)
4464 {
4465 fc_local_port_t *port;
4466 fc_packet_t *pkt;
4467 la_els_logo_t payload;
4468
4469 port = pd->pd_port;
4470 pkt = &cmd->cmd_pkt;
4471 ASSERT(MUTEX_HELD(&port->fp_mutex));
4472 ASSERT(MUTEX_HELD(&pd->pd_mutex));
4473
4474 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id,
4475 fp_logo_intr, job);
4476
4477 cmd->cmd_transport = port->fp_fca_tran->fca_els_send;
4478
4479 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class;
4480 pkt->pkt_tran_type = FC_PKT_EXCHANGE;
4481
4482 payload.ls_code.ls_code = LA_ELS_LOGO;
4483 payload.ls_code.mbz = 0;
4484 payload.nport_ww_name = port->fp_service_params.nport_ww_name;
4485 payload.nport_id = port->fp_port_id;
4486
4487 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload,
4488 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR);
4489 }
4490
4491 /*
4492 * Initialize RNID ELS request
4493 */
4494 static void
fp_rnid_init(fp_cmd_t * cmd,uint16_t flag,job_request_t * job)4495 fp_rnid_init(fp_cmd_t *cmd, uint16_t flag, job_request_t *job)
4496 {
4497 fc_local_port_t *port;
4498 fc_packet_t *pkt;
4499 la_els_rnid_t payload;
4500 fc_remote_port_t *pd;
4501
4502 pkt = &cmd->cmd_pkt;
4503 pd = pkt->pkt_pd;
4504 port = pd->pd_port;
4505
4506 ASSERT(MUTEX_HELD(&port->fp_mutex));
4507 ASSERT(MUTEX_HELD(&pd->pd_mutex));
4508
4509 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id,
4510 fp_rnid_intr, job);
4511
4512 cmd->cmd_transport = port->fp_fca_tran->fca_els_send;
4513 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class;
4514 pkt->pkt_tran_type = FC_PKT_EXCHANGE;
4515
4516 payload.ls_code.ls_code = LA_ELS_RNID;
4517 payload.ls_code.mbz = 0;
4518 payload.data_format = flag;
4519
4520 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload,
4521 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR);
4522 }
4523
4524 /*
4525 * Initialize RLS ELS request
4526 */
4527 static void
fp_rls_init(fp_cmd_t * cmd,job_request_t * job)4528 fp_rls_init(fp_cmd_t *cmd, job_request_t *job)
4529 {
4530 fc_local_port_t *port;
4531 fc_packet_t *pkt;
4532 la_els_rls_t payload;
4533 fc_remote_port_t *pd;
4534
4535 pkt = &cmd->cmd_pkt;
4536 pd = pkt->pkt_pd;
4537 port = pd->pd_port;
4538
4539 ASSERT(MUTEX_HELD(&port->fp_mutex));
4540 ASSERT(MUTEX_HELD(&pd->pd_mutex));
4541
4542 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id,
4543 fp_rls_intr, job);
4544
4545 cmd->cmd_transport = port->fp_fca_tran->fca_els_send;
4546 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class;
4547 pkt->pkt_tran_type = FC_PKT_EXCHANGE;
4548
4549 payload.ls_code.ls_code = LA_ELS_RLS;
4550 payload.ls_code.mbz = 0;
4551 payload.rls_portid = port->fp_port_id;
4552
4553 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload,
4554 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR);
4555 }
4556
4557
4558 /*
4559 * Initialize an ADISC ELS request
4560 */
4561 static void
fp_adisc_init(fp_cmd_t * cmd,job_request_t * job)4562 fp_adisc_init(fp_cmd_t *cmd, job_request_t *job)
4563 {
4564 fc_local_port_t *port;
4565 fc_packet_t *pkt;
4566 la_els_adisc_t payload;
4567 fc_remote_port_t *pd;
4568
4569 pkt = &cmd->cmd_pkt;
4570 pd = pkt->pkt_pd;
4571 port = pd->pd_port;
4572
4573 ASSERT(MUTEX_HELD(&pd->pd_mutex));
4574 ASSERT(MUTEX_HELD(&pd->pd_port->fp_mutex));
4575
4576 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id,
4577 fp_adisc_intr, job);
4578
4579 cmd->cmd_transport = port->fp_fca_tran->fca_els_send;
4580 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class;
4581 pkt->pkt_tran_type = FC_PKT_EXCHANGE;
4582
4583 payload.ls_code.ls_code = LA_ELS_ADISC;
4584 payload.ls_code.mbz = 0;
4585 payload.nport_id = port->fp_port_id;
4586 payload.port_wwn = port->fp_service_params.nport_ww_name;
4587 payload.node_wwn = port->fp_service_params.node_ww_name;
4588 payload.hard_addr = port->fp_hard_addr;
4589
4590 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload,
4591 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR);
4592 }
4593
4594
4595 /*
4596 * Send up a state change notification to ULPs.
4597 * Spawns a call to fctl_ulp_statec_cb in a taskq thread.
4598 */
4599 static int
fp_ulp_statec_cb(fc_local_port_t * port,uint32_t state,fc_portmap_t * changelist,uint32_t listlen,uint32_t alloc_len,int sleep)4600 fp_ulp_statec_cb(fc_local_port_t *port, uint32_t state,
4601 fc_portmap_t *changelist, uint32_t listlen, uint32_t alloc_len, int sleep)
4602 {
4603 fc_port_clist_t *clist;
4604 fc_remote_port_t *pd;
4605 int count;
4606
4607 ASSERT(!MUTEX_HELD(&port->fp_mutex));
4608
4609 clist = kmem_zalloc(sizeof (*clist), sleep);
4610 if (clist == NULL) {
4611 kmem_free(changelist, alloc_len * sizeof (*changelist));
4612 return (FC_NOMEM);
4613 }
4614
4615 clist->clist_state = state;
4616
4617 mutex_enter(&port->fp_mutex);
4618 clist->clist_flags = port->fp_topology;
4619 mutex_exit(&port->fp_mutex);
4620
4621 clist->clist_port = (opaque_t)port;
4622 clist->clist_len = listlen;
4623 clist->clist_size = alloc_len;
4624 clist->clist_map = changelist;
4625
4626 /*
4627 * Bump the reference count of each fc_remote_port_t in this changelist.
4628 * This is necessary since these devices will be sitting in a taskq
4629 * and referenced later. When the state change notification is
4630 * complete, the reference counts will be decremented.
4631 */
4632 for (count = 0; count < clist->clist_len; count++) {
4633 pd = clist->clist_map[count].map_pd;
4634
4635 if (pd != NULL) {
4636 mutex_enter(&pd->pd_mutex);
4637 ASSERT((pd->pd_ref_count >= 0) ||
4638 (pd->pd_aux_flags & PD_GIVEN_TO_ULPS));
4639 pd->pd_ref_count++;
4640
4641 if (clist->clist_map[count].map_state !=
4642 PORT_DEVICE_INVALID) {
4643 pd->pd_aux_flags |= PD_GIVEN_TO_ULPS;
4644 }
4645
4646 mutex_exit(&pd->pd_mutex);
4647 }
4648 }
4649
4650 #ifdef DEBUG
4651 /*
4652 * Sanity check for presence of OLD devices in the hash lists
4653 */
4654 if (clist->clist_size) {
4655 ASSERT(clist->clist_map != NULL);
4656 for (count = 0; count < clist->clist_len; count++) {
4657 if (clist->clist_map[count].map_state ==
4658 PORT_DEVICE_INVALID) {
4659 la_wwn_t pwwn;
4660 fc_portid_t d_id;
4661
4662 pd = clist->clist_map[count].map_pd;
4663 ASSERT(pd != NULL);
4664
4665 mutex_enter(&pd->pd_mutex);
4666 pwwn = pd->pd_port_name;
4667 d_id = pd->pd_port_id;
4668 mutex_exit(&pd->pd_mutex);
4669
4670 pd = fctl_get_remote_port_by_pwwn(port, &pwwn);
4671 ASSERT(pd != clist->clist_map[count].map_pd);
4672
4673 pd = fctl_get_remote_port_by_did(port,
4674 d_id.port_id);
4675 ASSERT(pd != clist->clist_map[count].map_pd);
4676 }
4677 }
4678 }
4679 #endif
4680
4681 mutex_enter(&port->fp_mutex);
4682
4683 if (state == FC_STATE_ONLINE) {
4684 if (--port->fp_statec_busy == 0) {
4685 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB;
4686 }
4687 }
4688 mutex_exit(&port->fp_mutex);
4689
4690 (void) taskq_dispatch(port->fp_taskq, fctl_ulp_statec_cb,
4691 clist, KM_SLEEP);
4692
4693 FP_TRACE(FP_NHEAD1(4, 0), "fp_ulp_statec fired; Port=%p,"
4694 "state=%x, len=%d", port, state, listlen);
4695
4696 return (FC_SUCCESS);
4697 }
4698
4699
4700 /*
4701 * Send up a FC_STATE_DEVICE_CHANGE state notification to ULPs
4702 */
4703 static int
fp_ulp_devc_cb(fc_local_port_t * port,fc_portmap_t * changelist,uint32_t listlen,uint32_t alloc_len,int sleep,int sync)4704 fp_ulp_devc_cb(fc_local_port_t *port, fc_portmap_t *changelist,
4705 uint32_t listlen, uint32_t alloc_len, int sleep, int sync)
4706 {
4707 int ret;
4708 fc_port_clist_t *clist;
4709
4710 ASSERT(!MUTEX_HELD(&port->fp_mutex));
4711
4712 clist = kmem_zalloc(sizeof (*clist), sleep);
4713 if (clist == NULL) {
4714 kmem_free(changelist, alloc_len * sizeof (*changelist));
4715 return (FC_NOMEM);
4716 }
4717
4718 clist->clist_state = FC_STATE_DEVICE_CHANGE;
4719
4720 mutex_enter(&port->fp_mutex);
4721 clist->clist_flags = port->fp_topology;
4722 mutex_exit(&port->fp_mutex);
4723
4724 clist->clist_port = (opaque_t)port;
4725 clist->clist_len = listlen;
4726 clist->clist_size = alloc_len;
4727 clist->clist_map = changelist;
4728
4729 /* Send sysevents for target state changes */
4730
4731 if (clist->clist_size) {
4732 int count;
4733 fc_remote_port_t *pd;
4734
4735 ASSERT(clist->clist_map != NULL);
4736 for (count = 0; count < clist->clist_len; count++) {
4737 pd = clist->clist_map[count].map_pd;
4738
4739 /*
4740 * Bump reference counts on all fc_remote_port_t
4741 * structs in this list. We don't know when the task
4742 * will fire, and we don't need these fc_remote_port_t
4743 * structs going away behind our back.
4744 */
4745 if (pd) {
4746 mutex_enter(&pd->pd_mutex);
4747 ASSERT((pd->pd_ref_count >= 0) ||
4748 (pd->pd_aux_flags & PD_GIVEN_TO_ULPS));
4749 pd->pd_ref_count++;
4750 mutex_exit(&pd->pd_mutex);
4751 }
4752
4753 if (clist->clist_map[count].map_state ==
4754 PORT_DEVICE_VALID) {
4755 if (clist->clist_map[count].map_type ==
4756 PORT_DEVICE_NEW) {
4757 /* Update our state change counter */
4758 mutex_enter(&port->fp_mutex);
4759 port->fp_last_change++;
4760 mutex_exit(&port->fp_mutex);
4761
4762 /* Additions */
4763 fp_log_target_event(port,
4764 ESC_SUNFC_TARGET_ADD,
4765 clist->clist_map[count].map_pwwn,
4766 clist->clist_map[count].map_did.
4767 port_id);
4768 }
4769
4770 } else if ((clist->clist_map[count].map_type ==
4771 PORT_DEVICE_OLD) &&
4772 (clist->clist_map[count].map_state ==
4773 PORT_DEVICE_INVALID)) {
4774 /* Update our state change counter */
4775 mutex_enter(&port->fp_mutex);
4776 port->fp_last_change++;
4777 mutex_exit(&port->fp_mutex);
4778
4779 /*
4780 * For removals, we don't decrement
4781 * pd_ref_count until after the ULP's
4782 * state change callback function has
4783 * completed.
4784 */
4785
4786 /* Removals */
4787 fp_log_target_event(port,
4788 ESC_SUNFC_TARGET_REMOVE,
4789 clist->clist_map[count].map_pwwn,
4790 clist->clist_map[count].map_did.port_id);
4791 }
4792
4793 if (clist->clist_map[count].map_state !=
4794 PORT_DEVICE_INVALID) {
4795 /*
4796 * Indicate that the ULPs are now aware of
4797 * this device.
4798 */
4799
4800 mutex_enter(&pd->pd_mutex);
4801 pd->pd_aux_flags |= PD_GIVEN_TO_ULPS;
4802 mutex_exit(&pd->pd_mutex);
4803 }
4804
4805 #ifdef DEBUG
4806 /*
4807 * Sanity check for OLD devices in the hash lists
4808 */
4809 if (pd && clist->clist_map[count].map_state ==
4810 PORT_DEVICE_INVALID) {
4811 la_wwn_t pwwn;
4812 fc_portid_t d_id;
4813
4814 mutex_enter(&pd->pd_mutex);
4815 pwwn = pd->pd_port_name;
4816 d_id = pd->pd_port_id;
4817 mutex_exit(&pd->pd_mutex);
4818
4819 /*
4820 * This overwrites the 'pd' local variable.
4821 * Beware of this if 'pd' ever gets
4822 * referenced below this block.
4823 */
4824 pd = fctl_get_remote_port_by_pwwn(port, &pwwn);
4825 ASSERT(pd != clist->clist_map[count].map_pd);
4826
4827 pd = fctl_get_remote_port_by_did(port,
4828 d_id.port_id);
4829 ASSERT(pd != clist->clist_map[count].map_pd);
4830 }
4831 #endif
4832 }
4833 }
4834
4835 if (sync) {
4836 clist->clist_wait = 1;
4837 mutex_init(&clist->clist_mutex, NULL, MUTEX_DRIVER, NULL);
4838 cv_init(&clist->clist_cv, NULL, CV_DRIVER, NULL);
4839 }
4840
4841 ret = taskq_dispatch(port->fp_taskq, fctl_ulp_statec_cb, clist, sleep);
4842 if (sync && ret) {
4843 mutex_enter(&clist->clist_mutex);
4844 while (clist->clist_wait) {
4845 cv_wait(&clist->clist_cv, &clist->clist_mutex);
4846 }
4847 mutex_exit(&clist->clist_mutex);
4848
4849 mutex_destroy(&clist->clist_mutex);
4850 cv_destroy(&clist->clist_cv);
4851 kmem_free(clist, sizeof (*clist));
4852 }
4853
4854 if (!ret) {
4855 FP_TRACE(FP_NHEAD1(4, 0), "fp_ulp_devc dispatch failed; "
4856 "port=%p", port);
4857 kmem_free(clist->clist_map,
4858 sizeof (*(clist->clist_map)) * clist->clist_size);
4859 kmem_free(clist, sizeof (*clist));
4860 } else {
4861 FP_TRACE(FP_NHEAD1(4, 0), "fp_ulp_devc fired; port=%p, len=%d",
4862 port, listlen);
4863 }
4864
4865 return (FC_SUCCESS);
4866 }
4867
4868
4869 /*
4870 * Perform PLOGI to the group of devices for ULPs
4871 */
4872 static void
fp_plogi_group(fc_local_port_t * port,job_request_t * job)4873 fp_plogi_group(fc_local_port_t *port, job_request_t *job)
4874 {
4875 int offline;
4876 int count;
4877 int rval;
4878 uint32_t listlen;
4879 uint32_t done;
4880 uint32_t d_id;
4881 fc_remote_node_t *node;
4882 fc_remote_port_t *pd;
4883 fc_remote_port_t *tmp_pd;
4884 fc_packet_t *ulp_pkt;
4885 la_els_logi_t *els_data;
4886 ls_code_t ls_code;
4887
4888 FP_TRACE(FP_NHEAD1(1, 0), "fp_plogi_group begin; port=%p, job=%p",
4889 port, job);
4890
4891 done = 0;
4892 listlen = job->job_ulp_listlen;
4893 job->job_counter = job->job_ulp_listlen;
4894
4895 mutex_enter(&port->fp_mutex);
4896 offline = (port->fp_statec_busy ||
4897 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) ? 1 : 0;
4898 mutex_exit(&port->fp_mutex);
4899
4900 for (count = 0; count < listlen; count++) {
4901 ASSERT(job->job_ulp_pkts[count]->pkt_rsplen >=
4902 sizeof (la_els_logi_t));
4903
4904 ulp_pkt = job->job_ulp_pkts[count];
4905 pd = ulp_pkt->pkt_pd;
4906 d_id = ulp_pkt->pkt_cmd_fhdr.d_id;
4907
4908 if (offline) {
4909 done++;
4910
4911 ulp_pkt->pkt_state = FC_PKT_PORT_OFFLINE;
4912 ulp_pkt->pkt_reason = FC_REASON_OFFLINE;
4913 ulp_pkt->pkt_pd = NULL;
4914 ulp_pkt->pkt_comp(ulp_pkt);
4915
4916 job->job_ulp_pkts[count] = NULL;
4917
4918 fp_jobdone(job);
4919 continue;
4920 }
4921
4922 if (pd == NULL) {
4923 pd = fctl_get_remote_port_by_did(port, d_id);
4924 if (pd == NULL) {
4925 /* reset later */
4926 ulp_pkt->pkt_state = FC_PKT_FAILURE;
4927 continue;
4928 }
4929 mutex_enter(&pd->pd_mutex);
4930 if (pd->pd_flags == PD_ELS_IN_PROGRESS) {
4931 mutex_exit(&pd->pd_mutex);
4932 ulp_pkt->pkt_state = FC_PKT_ELS_IN_PROGRESS;
4933 done++;
4934 ulp_pkt->pkt_comp(ulp_pkt);
4935 job->job_ulp_pkts[count] = NULL;
4936 fp_jobdone(job);
4937 } else {
4938 ulp_pkt->pkt_state = FC_PKT_FAILURE;
4939 mutex_exit(&pd->pd_mutex);
4940 }
4941 continue;
4942 }
4943
4944 switch (ulp_pkt->pkt_state) {
4945 case FC_PKT_ELS_IN_PROGRESS:
4946 ulp_pkt->pkt_reason = FC_REASON_OFFLINE;
4947 /* FALLTHRU */
4948 case FC_PKT_LOCAL_RJT:
4949 done++;
4950 ulp_pkt->pkt_comp(ulp_pkt);
4951 job->job_ulp_pkts[count] = NULL;
4952 fp_jobdone(job);
4953 continue;
4954 default:
4955 break;
4956 }
4957
4958 /*
4959 * Validate the pd corresponding to the d_id passed
4960 * by the ULPs
4961 */
4962 tmp_pd = fctl_get_remote_port_by_did(port, d_id);
4963 if ((tmp_pd == NULL) || (pd != tmp_pd)) {
4964 done++;
4965 ulp_pkt->pkt_state = FC_PKT_FAILURE;
4966 ulp_pkt->pkt_reason = FC_REASON_NO_CONNECTION;
4967 ulp_pkt->pkt_pd = NULL;
4968 ulp_pkt->pkt_comp(ulp_pkt);
4969 job->job_ulp_pkts[count] = NULL;
4970 fp_jobdone(job);
4971 continue;
4972 }
4973
4974 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_group contd; "
4975 "port=%p, pd=%p", port, pd);
4976
4977 mutex_enter(&pd->pd_mutex);
4978
4979 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) {
4980 done++;
4981 els_data = (la_els_logi_t *)ulp_pkt->pkt_resp;
4982
4983 ls_code.ls_code = LA_ELS_ACC;
4984 ls_code.mbz = 0;
4985
4986 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc,
4987 (uint8_t *)&ls_code, (uint8_t *)&els_data->ls_code,
4988 sizeof (ls_code_t), DDI_DEV_AUTOINCR);
4989
4990 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc,
4991 (uint8_t *)&pd->pd_csp,
4992 (uint8_t *)&els_data->common_service,
4993 sizeof (pd->pd_csp), DDI_DEV_AUTOINCR);
4994
4995 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc,
4996 (uint8_t *)&pd->pd_port_name,
4997 (uint8_t *)&els_data->nport_ww_name,
4998 sizeof (pd->pd_port_name), DDI_DEV_AUTOINCR);
4999
5000 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc,
5001 (uint8_t *)&pd->pd_clsp1,
5002 (uint8_t *)&els_data->class_1,
5003 sizeof (pd->pd_clsp1), DDI_DEV_AUTOINCR);
5004
5005 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc,
5006 (uint8_t *)&pd->pd_clsp2,
5007 (uint8_t *)&els_data->class_2,
5008 sizeof (pd->pd_clsp2), DDI_DEV_AUTOINCR);
5009
5010 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc,
5011 (uint8_t *)&pd->pd_clsp3,
5012 (uint8_t *)&els_data->class_3,
5013 sizeof (pd->pd_clsp3), DDI_DEV_AUTOINCR);
5014
5015 node = pd->pd_remote_nodep;
5016 pd->pd_login_count++;
5017 pd->pd_flags = PD_IDLE;
5018 ulp_pkt->pkt_pd = pd;
5019 mutex_exit(&pd->pd_mutex);
5020
5021 mutex_enter(&node->fd_mutex);
5022 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc,
5023 (uint8_t *)&node->fd_node_name,
5024 (uint8_t *)(&els_data->node_ww_name),
5025 sizeof (node->fd_node_name), DDI_DEV_AUTOINCR);
5026
5027 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc,
5028 (uint8_t *)&node->fd_vv,
5029 (uint8_t *)(&els_data->vendor_version),
5030 sizeof (node->fd_vv), DDI_DEV_AUTOINCR);
5031
5032 mutex_exit(&node->fd_mutex);
5033 ulp_pkt->pkt_state = FC_PKT_SUCCESS;
5034 } else {
5035
5036 ulp_pkt->pkt_state = FC_PKT_FAILURE; /* reset later */
5037 mutex_exit(&pd->pd_mutex);
5038 }
5039
5040 if (ulp_pkt->pkt_state != FC_PKT_FAILURE) {
5041 ulp_pkt->pkt_comp(ulp_pkt);
5042 job->job_ulp_pkts[count] = NULL;
5043 fp_jobdone(job);
5044 }
5045 }
5046
5047 if (done == listlen) {
5048 fp_jobwait(job);
5049 fctl_jobdone(job);
5050 return;
5051 }
5052
5053 job->job_counter = listlen - done;
5054
5055 for (count = 0; count < listlen; count++) {
5056 int cmd_flags;
5057
5058 if ((ulp_pkt = job->job_ulp_pkts[count]) == NULL) {
5059 continue;
5060 }
5061
5062 ASSERT(ulp_pkt->pkt_state == FC_PKT_FAILURE);
5063
5064 cmd_flags = FP_CMD_PLOGI_RETAIN;
5065
5066 d_id = ulp_pkt->pkt_cmd_fhdr.d_id;
5067 ASSERT(d_id != 0);
5068
5069 pd = fctl_get_remote_port_by_did(port, d_id);
5070
5071 /*
5072 * We need to properly adjust the port device
5073 * reference counter before we assign the pd
5074 * to the ULP packets port device pointer.
5075 */
5076 if (pd != NULL && ulp_pkt->pkt_pd == NULL) {
5077 mutex_enter(&pd->pd_mutex);
5078 pd->pd_ref_count++;
5079 mutex_exit(&pd->pd_mutex);
5080 FP_TRACE(FP_NHEAD1(3, 0),
5081 "fp_plogi_group: DID = 0x%x using new pd %p \
5082 old pd NULL\n", d_id, pd);
5083 } else if (pd != NULL && ulp_pkt->pkt_pd != NULL &&
5084 ulp_pkt->pkt_pd != pd) {
5085 mutex_enter(&pd->pd_mutex);
5086 pd->pd_ref_count++;
5087 mutex_exit(&pd->pd_mutex);
5088 mutex_enter(&ulp_pkt->pkt_pd->pd_mutex);
5089 ulp_pkt->pkt_pd->pd_ref_count--;
5090 mutex_exit(&ulp_pkt->pkt_pd->pd_mutex);
5091 FP_TRACE(FP_NHEAD1(3, 0),
5092 "fp_plogi_group: DID = 0x%x pkt_pd %p != pd %p\n",
5093 d_id, ulp_pkt->pkt_pd, pd);
5094 } else if (pd == NULL && ulp_pkt->pkt_pd != NULL) {
5095 mutex_enter(&ulp_pkt->pkt_pd->pd_mutex);
5096 ulp_pkt->pkt_pd->pd_ref_count--;
5097 mutex_exit(&ulp_pkt->pkt_pd->pd_mutex);
5098 FP_TRACE(FP_NHEAD1(3, 0),
5099 "fp_plogi_group: DID = 0x%x pd is NULL and \
5100 pkt_pd = %p\n", d_id, ulp_pkt->pkt_pd);
5101 }
5102
5103 ulp_pkt->pkt_pd = pd;
5104
5105 if (pd != NULL) {
5106 mutex_enter(&pd->pd_mutex);
5107 d_id = pd->pd_port_id.port_id;
5108 pd->pd_flags = PD_ELS_IN_PROGRESS;
5109 mutex_exit(&pd->pd_mutex);
5110 } else {
5111 d_id = ulp_pkt->pkt_cmd_fhdr.d_id;
5112 #ifdef DEBUG
5113 pd = fctl_get_remote_port_by_did(port, d_id);
5114 ASSERT(pd == NULL);
5115 #endif
5116 /*
5117 * In the Fabric topology, use NS to create
5118 * port device, and if that fails still try
5119 * with PLOGI - which will make yet another
5120 * attempt to create after successful PLOGI
5121 */
5122 mutex_enter(&port->fp_mutex);
5123 if (FC_IS_TOP_SWITCH(port->fp_topology)) {
5124 mutex_exit(&port->fp_mutex);
5125 pd = fp_create_remote_port_by_ns(port,
5126 d_id, KM_SLEEP);
5127 if (pd) {
5128 cmd_flags |= FP_CMD_DELDEV_ON_ERROR;
5129
5130 mutex_enter(&pd->pd_mutex);
5131 pd->pd_flags = PD_ELS_IN_PROGRESS;
5132 mutex_exit(&pd->pd_mutex);
5133
5134 FP_TRACE(FP_NHEAD1(3, 0),
5135 "fp_plogi_group;"
5136 " NS created PD port=%p, job=%p,"
5137 " pd=%p", port, job, pd);
5138 }
5139 } else {
5140 mutex_exit(&port->fp_mutex);
5141 }
5142 if ((ulp_pkt->pkt_pd == NULL) && (pd != NULL)) {
5143 FP_TRACE(FP_NHEAD1(3, 0),
5144 "fp_plogi_group;"
5145 "ulp_pkt's pd is NULL, get a pd %p",
5146 pd);
5147 mutex_enter(&pd->pd_mutex);
5148 pd->pd_ref_count++;
5149 mutex_exit(&pd->pd_mutex);
5150 }
5151 ulp_pkt->pkt_pd = pd;
5152 }
5153
5154 rval = fp_port_login(port, d_id, job, cmd_flags,
5155 KM_SLEEP, pd, ulp_pkt);
5156
5157 if (rval == FC_SUCCESS) {
5158 continue;
5159 }
5160
5161 if (rval == FC_STATEC_BUSY) {
5162 ulp_pkt->pkt_state = FC_PKT_PORT_OFFLINE;
5163 ulp_pkt->pkt_reason = FC_REASON_OFFLINE;
5164 } else {
5165 ulp_pkt->pkt_state = FC_PKT_FAILURE;
5166 }
5167
5168 if (pd) {
5169 mutex_enter(&pd->pd_mutex);
5170 pd->pd_flags = PD_IDLE;
5171 mutex_exit(&pd->pd_mutex);
5172 }
5173
5174 if (cmd_flags & FP_CMD_DELDEV_ON_ERROR) {
5175 ASSERT(pd != NULL);
5176
5177 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_group: NS created,"
5178 " PD removed; port=%p, job=%p", port, job);
5179
5180 mutex_enter(&pd->pd_mutex);
5181 pd->pd_ref_count--;
5182 node = pd->pd_remote_nodep;
5183 mutex_exit(&pd->pd_mutex);
5184
5185 ASSERT(node != NULL);
5186
5187 if (fctl_destroy_remote_port(port, pd) == 0) {
5188 fctl_destroy_remote_node(node);
5189 }
5190 ulp_pkt->pkt_pd = NULL;
5191 }
5192 ulp_pkt->pkt_comp(ulp_pkt);
5193 fp_jobdone(job);
5194 }
5195
5196 fp_jobwait(job);
5197 fctl_jobdone(job);
5198
5199 FP_TRACE(FP_NHEAD1(1, 0), "fp_plogi_group end: port=%p, job=%p",
5200 port, job);
5201 }
5202
5203
5204 /*
5205 * Name server request initialization
5206 */
5207 static void
fp_ns_init(fc_local_port_t * port,job_request_t * job,int sleep)5208 fp_ns_init(fc_local_port_t *port, job_request_t *job, int sleep)
5209 {
5210 int rval;
5211 int count;
5212 int size;
5213
5214 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0);
5215
5216 job->job_counter = 1;
5217 job->job_result = FC_SUCCESS;
5218
5219 rval = fp_port_login(port, 0xFFFFFC, job, FP_CMD_PLOGI_RETAIN,
5220 KM_SLEEP, NULL, NULL);
5221
5222 if (rval != FC_SUCCESS) {
5223 mutex_enter(&port->fp_mutex);
5224 port->fp_topology = FC_TOP_NO_NS;
5225 mutex_exit(&port->fp_mutex);
5226 return;
5227 }
5228
5229 fp_jobwait(job);
5230
5231 if (job->job_result != FC_SUCCESS) {
5232 mutex_enter(&port->fp_mutex);
5233 port->fp_topology = FC_TOP_NO_NS;
5234 mutex_exit(&port->fp_mutex);
5235 return;
5236 }
5237
5238 /*
5239 * At this time, we'll do NS registration for objects in the
5240 * ns_reg_cmds (see top of this file) array.
5241 *
5242 * Each time a ULP module registers with the transport, the
5243 * appropriate fc4 bit is set fc4 types and registered with
5244 * the NS for this support. Also, ULPs and FC admin utilities
5245 * may do registration for objects like IP address, symbolic
5246 * port/node name, Initial process associator at run time.
5247 */
5248 size = sizeof (ns_reg_cmds) / sizeof (ns_reg_cmds[0]);
5249 job->job_counter = size;
5250 job->job_result = FC_SUCCESS;
5251
5252 for (count = 0; count < size; count++) {
5253 if (fp_ns_reg(port, NULL, ns_reg_cmds[count],
5254 job, 0, sleep) != FC_SUCCESS) {
5255 fp_jobdone(job);
5256 }
5257 }
5258 if (size) {
5259 fp_jobwait(job);
5260 }
5261
5262 job->job_result = FC_SUCCESS;
5263
5264 (void) fp_ns_get_devcount(port, job, 0, KM_SLEEP);
5265
5266 if (port->fp_dev_count < FP_MAX_DEVICES) {
5267 (void) fp_ns_get_devcount(port, job, 1, KM_SLEEP);
5268 }
5269
5270 job->job_counter = 1;
5271
5272 if (fp_ns_scr(port, job, FC_SCR_FULL_REGISTRATION,
5273 sleep) == FC_SUCCESS) {
5274 fp_jobwait(job);
5275 }
5276 }
5277
5278
5279 /*
5280 * Name server finish:
5281 * Unregister for RSCNs
5282 * Unregister all the host port objects in the Name Server
5283 * Perform LOGO with the NS;
5284 */
5285 static void
fp_ns_fini(fc_local_port_t * port,job_request_t * job)5286 fp_ns_fini(fc_local_port_t *port, job_request_t *job)
5287 {
5288 fp_cmd_t *cmd;
5289 uchar_t class;
5290 uint32_t s_id;
5291 fc_packet_t *pkt;
5292 la_els_logo_t payload;
5293
5294 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0);
5295
5296 job->job_counter = 1;
5297
5298 if (fp_ns_scr(port, job, FC_SCR_CLEAR_REGISTRATION, KM_SLEEP) !=
5299 FC_SUCCESS) {
5300 fp_jobdone(job);
5301 }
5302 fp_jobwait(job);
5303
5304 job->job_counter = 1;
5305
5306 if (fp_ns_reg(port, NULL, NS_DA_ID, job, 0, KM_SLEEP) != FC_SUCCESS) {
5307 fp_jobdone(job);
5308 }
5309 fp_jobwait(job);
5310
5311 job->job_counter = 1;
5312
5313 cmd = fp_alloc_pkt(port, sizeof (la_els_logo_t),
5314 FP_PORT_IDENTIFIER_LEN, KM_SLEEP, NULL);
5315 pkt = &cmd->cmd_pkt;
5316
5317 mutex_enter(&port->fp_mutex);
5318 class = port->fp_ns_login_class;
5319 s_id = port->fp_port_id.port_id;
5320 payload.nport_id = port->fp_port_id;
5321 mutex_exit(&port->fp_mutex);
5322
5323 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class;
5324 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE;
5325 cmd->cmd_flags = FP_CMD_PLOGI_DONT_CARE;
5326 cmd->cmd_retry_count = 1;
5327 cmd->cmd_ulp_pkt = NULL;
5328
5329 if (port->fp_npiv_type == FC_NPIV_PORT) {
5330 fp_els_init(cmd, s_id, 0xFFFFFE, fp_logo_intr, job);
5331 } else {
5332 fp_els_init(cmd, s_id, 0xFFFFFC, fp_logo_intr, job);
5333 }
5334
5335 cmd->cmd_transport = port->fp_fca_tran->fca_els_send;
5336
5337 payload.ls_code.ls_code = LA_ELS_LOGO;
5338 payload.ls_code.mbz = 0;
5339 payload.nport_ww_name = port->fp_service_params.nport_ww_name;
5340
5341 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload,
5342 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR);
5343
5344 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) {
5345 fp_iodone(cmd);
5346 }
5347 fp_jobwait(job);
5348 }
5349
5350
5351 /*
5352 * NS Registration function.
5353 *
5354 * It should be seriously noted that FC-GS-2 currently doesn't support
5355 * an Object Registration by a D_ID other than the owner of the object.
5356 * What we are aiming at currently is to at least allow Symbolic Node/Port
5357 * Name registration for any N_Port Identifier by the host software.
5358 *
5359 * Anyway, if the second argument (fc_remote_port_t *) is NULL, this
5360 * function treats the request as Host NS Object.
5361 */
5362 static int
fp_ns_reg(fc_local_port_t * port,fc_remote_port_t * pd,uint16_t cmd_code,job_request_t * job,int polled,int sleep)5363 fp_ns_reg(fc_local_port_t *port, fc_remote_port_t *pd, uint16_t cmd_code,
5364 job_request_t *job, int polled, int sleep)
5365 {
5366 int rval;
5367 fc_portid_t s_id;
5368 fc_packet_t *pkt;
5369 fp_cmd_t *cmd;
5370
5371 if (pd == NULL) {
5372 mutex_enter(&port->fp_mutex);
5373 s_id = port->fp_port_id;
5374 mutex_exit(&port->fp_mutex);
5375 } else {
5376 mutex_enter(&pd->pd_mutex);
5377 s_id = pd->pd_port_id;
5378 mutex_exit(&pd->pd_mutex);
5379 }
5380
5381 if (polled) {
5382 job->job_counter = 1;
5383 }
5384
5385 switch (cmd_code) {
5386 case NS_RPN_ID:
5387 case NS_RNN_ID: {
5388 ns_rxn_req_t rxn;
5389
5390 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) +
5391 sizeof (ns_rxn_req_t), sizeof (fc_reg_resp_t), sleep, NULL);
5392 if (cmd == NULL) {
5393 return (FC_NOMEM);
5394 }
5395 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job);
5396 pkt = &cmd->cmd_pkt;
5397
5398 if (pd == NULL) {
5399 rxn.rxn_xname = ((cmd_code == NS_RPN_ID) ?
5400 (port->fp_service_params.nport_ww_name) :
5401 (port->fp_service_params.node_ww_name));
5402 } else {
5403 if (cmd_code == NS_RPN_ID) {
5404 mutex_enter(&pd->pd_mutex);
5405 rxn.rxn_xname = pd->pd_port_name;
5406 mutex_exit(&pd->pd_mutex);
5407 } else {
5408 fc_remote_node_t *node;
5409
5410 mutex_enter(&pd->pd_mutex);
5411 node = pd->pd_remote_nodep;
5412 mutex_exit(&pd->pd_mutex);
5413
5414 mutex_enter(&node->fd_mutex);
5415 rxn.rxn_xname = node->fd_node_name;
5416 mutex_exit(&node->fd_mutex);
5417 }
5418 }
5419 rxn.rxn_port_id = s_id;
5420
5421 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&rxn,
5422 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)),
5423 sizeof (rxn), DDI_DEV_AUTOINCR);
5424
5425 break;
5426 }
5427
5428 case NS_RCS_ID: {
5429 ns_rcos_t rcos;
5430
5431 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) +
5432 sizeof (ns_rcos_t), sizeof (fc_reg_resp_t), sleep, NULL);
5433 if (cmd == NULL) {
5434 return (FC_NOMEM);
5435 }
5436 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job);
5437 pkt = &cmd->cmd_pkt;
5438
5439 if (pd == NULL) {
5440 rcos.rcos_cos = port->fp_cos;
5441 } else {
5442 mutex_enter(&pd->pd_mutex);
5443 rcos.rcos_cos = pd->pd_cos;
5444 mutex_exit(&pd->pd_mutex);
5445 }
5446 rcos.rcos_port_id = s_id;
5447
5448 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&rcos,
5449 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)),
5450 sizeof (rcos), DDI_DEV_AUTOINCR);
5451
5452 break;
5453 }
5454
5455 case NS_RFT_ID: {
5456 ns_rfc_type_t rfc;
5457
5458 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) +
5459 sizeof (ns_rfc_type_t), sizeof (fc_reg_resp_t), sleep,
5460 NULL);
5461 if (cmd == NULL) {
5462 return (FC_NOMEM);
5463 }
5464 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job);
5465 pkt = &cmd->cmd_pkt;
5466
5467 if (pd == NULL) {
5468 mutex_enter(&port->fp_mutex);
5469 bcopy(port->fp_fc4_types, rfc.rfc_types,
5470 sizeof (port->fp_fc4_types));
5471 mutex_exit(&port->fp_mutex);
5472 } else {
5473 mutex_enter(&pd->pd_mutex);
5474 bcopy(pd->pd_fc4types, rfc.rfc_types,
5475 sizeof (pd->pd_fc4types));
5476 mutex_exit(&pd->pd_mutex);
5477 }
5478 rfc.rfc_port_id = s_id;
5479
5480 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&rfc,
5481 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)),
5482 sizeof (rfc), DDI_DEV_AUTOINCR);
5483
5484 break;
5485 }
5486
5487 case NS_RSPN_ID: {
5488 uchar_t name_len;
5489 int pl_size;
5490 fc_portid_t spn;
5491
5492 if (pd == NULL) {
5493 mutex_enter(&port->fp_mutex);
5494 name_len = port->fp_sym_port_namelen;
5495 mutex_exit(&port->fp_mutex);
5496 } else {
5497 mutex_enter(&pd->pd_mutex);
5498 name_len = pd->pd_spn_len;
5499 mutex_exit(&pd->pd_mutex);
5500 }
5501
5502 pl_size = sizeof (fc_portid_t) + name_len + 1;
5503
5504 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + pl_size,
5505 sizeof (fc_reg_resp_t), sleep, NULL);
5506 if (cmd == NULL) {
5507 return (FC_NOMEM);
5508 }
5509
5510 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job);
5511
5512 pkt = &cmd->cmd_pkt;
5513
5514 spn = s_id;
5515
5516 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&spn, (uint8_t *)
5517 (pkt->pkt_cmd + sizeof (fc_ct_header_t)), sizeof (spn),
5518 DDI_DEV_AUTOINCR);
5519 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&name_len,
5520 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)
5521 + sizeof (fc_portid_t)), 1, DDI_DEV_AUTOINCR);
5522
5523 if (pd == NULL) {
5524 mutex_enter(&port->fp_mutex);
5525 FC_SET_CMD(port, pkt->pkt_cmd_acc,
5526 (uint8_t *)port->fp_sym_port_name, (uint8_t *)
5527 (pkt->pkt_cmd + sizeof (fc_ct_header_t) +
5528 sizeof (spn) + 1), name_len, DDI_DEV_AUTOINCR);
5529 mutex_exit(&port->fp_mutex);
5530 } else {
5531 mutex_enter(&pd->pd_mutex);
5532 FC_SET_CMD(port, pkt->pkt_cmd_acc,
5533 (uint8_t *)pd->pd_spn,
5534 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t) +
5535 sizeof (spn) + 1), name_len, DDI_DEV_AUTOINCR);
5536 mutex_exit(&pd->pd_mutex);
5537 }
5538 break;
5539 }
5540
5541 case NS_RPT_ID: {
5542 ns_rpt_t rpt;
5543
5544 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) +
5545 sizeof (ns_rpt_t), sizeof (fc_reg_resp_t), sleep, NULL);
5546 if (cmd == NULL) {
5547 return (FC_NOMEM);
5548 }
5549 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job);
5550 pkt = &cmd->cmd_pkt;
5551
5552 if (pd == NULL) {
5553 rpt.rpt_type = port->fp_port_type;
5554 } else {
5555 mutex_enter(&pd->pd_mutex);
5556 rpt.rpt_type = pd->pd_porttype;
5557 mutex_exit(&pd->pd_mutex);
5558 }
5559 rpt.rpt_port_id = s_id;
5560
5561 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&rpt,
5562 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)),
5563 sizeof (rpt), DDI_DEV_AUTOINCR);
5564
5565 break;
5566 }
5567
5568 case NS_RIP_NN: {
5569 ns_rip_t rip;
5570
5571 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) +
5572 sizeof (ns_rip_t), sizeof (fc_reg_resp_t), sleep, NULL);
5573 if (cmd == NULL) {
5574 return (FC_NOMEM);
5575 }
5576 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job);
5577 pkt = &cmd->cmd_pkt;
5578
5579 if (pd == NULL) {
5580 rip.rip_node_name =
5581 port->fp_service_params.node_ww_name;
5582 bcopy(port->fp_ip_addr, rip.rip_ip_addr,
5583 sizeof (port->fp_ip_addr));
5584 } else {
5585 fc_remote_node_t *node;
5586
5587 /*
5588 * The most correct implementation should have the IP
5589 * address in the fc_remote_node_t structure; I believe
5590 * Node WWN and IP address should have one to one
5591 * correlation (but guess what this is changing in
5592 * FC-GS-2 latest draft)
5593 */
5594 mutex_enter(&pd->pd_mutex);
5595 node = pd->pd_remote_nodep;
5596 bcopy(pd->pd_ip_addr, rip.rip_ip_addr,
5597 sizeof (pd->pd_ip_addr));
5598 mutex_exit(&pd->pd_mutex);
5599
5600 mutex_enter(&node->fd_mutex);
5601 rip.rip_node_name = node->fd_node_name;
5602 mutex_exit(&node->fd_mutex);
5603 }
5604
5605 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&rip,
5606 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)),
5607 sizeof (rip), DDI_DEV_AUTOINCR);
5608
5609 break;
5610 }
5611
5612 case NS_RIPA_NN: {
5613 ns_ipa_t ipa;
5614
5615 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) +
5616 sizeof (ns_ipa_t), sizeof (fc_reg_resp_t), sleep, NULL);
5617 if (cmd == NULL) {
5618 return (FC_NOMEM);
5619 }
5620 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job);
5621 pkt = &cmd->cmd_pkt;
5622
5623 if (pd == NULL) {
5624 ipa.ipa_node_name =
5625 port->fp_service_params.node_ww_name;
5626 bcopy(port->fp_ipa, ipa.ipa_value,
5627 sizeof (port->fp_ipa));
5628 } else {
5629 fc_remote_node_t *node;
5630
5631 mutex_enter(&pd->pd_mutex);
5632 node = pd->pd_remote_nodep;
5633 mutex_exit(&pd->pd_mutex);
5634
5635 mutex_enter(&node->fd_mutex);
5636 ipa.ipa_node_name = node->fd_node_name;
5637 bcopy(node->fd_ipa, ipa.ipa_value,
5638 sizeof (node->fd_ipa));
5639 mutex_exit(&node->fd_mutex);
5640 }
5641
5642 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&ipa,
5643 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)),
5644 sizeof (ipa), DDI_DEV_AUTOINCR);
5645
5646 break;
5647 }
5648
5649 case NS_RSNN_NN: {
5650 uchar_t name_len;
5651 int pl_size;
5652 la_wwn_t snn;
5653 fc_remote_node_t *node = NULL;
5654
5655 if (pd == NULL) {
5656 mutex_enter(&port->fp_mutex);
5657 name_len = port->fp_sym_node_namelen;
5658 mutex_exit(&port->fp_mutex);
5659 } else {
5660 mutex_enter(&pd->pd_mutex);
5661 node = pd->pd_remote_nodep;
5662 mutex_exit(&pd->pd_mutex);
5663
5664 mutex_enter(&node->fd_mutex);
5665 name_len = node->fd_snn_len;
5666 mutex_exit(&node->fd_mutex);
5667 }
5668
5669 pl_size = sizeof (la_wwn_t) + name_len + 1;
5670
5671 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) +
5672 pl_size, sizeof (fc_reg_resp_t), sleep, NULL);
5673 if (cmd == NULL) {
5674 return (FC_NOMEM);
5675 }
5676 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job);
5677
5678 pkt = &cmd->cmd_pkt;
5679
5680 bcopy(&port->fp_service_params.node_ww_name,
5681 &snn, sizeof (la_wwn_t));
5682
5683 if (pd == NULL) {
5684 mutex_enter(&port->fp_mutex);
5685 FC_SET_CMD(port, pkt->pkt_cmd_acc,
5686 (uint8_t *)port->fp_sym_node_name, (uint8_t *)
5687 (pkt->pkt_cmd + sizeof (fc_ct_header_t) +
5688 sizeof (snn) + 1), name_len, DDI_DEV_AUTOINCR);
5689 mutex_exit(&port->fp_mutex);
5690 } else {
5691 ASSERT(node != NULL);
5692 mutex_enter(&node->fd_mutex);
5693 FC_SET_CMD(port, pkt->pkt_cmd_acc,
5694 (uint8_t *)node->fd_snn,
5695 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t) +
5696 sizeof (snn) + 1), name_len, DDI_DEV_AUTOINCR);
5697 mutex_exit(&node->fd_mutex);
5698 }
5699
5700 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&snn,
5701 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)),
5702 sizeof (snn), DDI_DEV_AUTOINCR);
5703 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&name_len,
5704 (uint8_t *)(pkt->pkt_cmd
5705 + sizeof (fc_ct_header_t) + sizeof (snn)),
5706 1, DDI_DEV_AUTOINCR);
5707
5708 break;
5709 }
5710
5711 case NS_DA_ID: {
5712 ns_remall_t rall;
5713 char tmp[4] = {0};
5714 char *ptr;
5715
5716 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) +
5717 sizeof (ns_remall_t), sizeof (fc_reg_resp_t), sleep, NULL);
5718
5719 if (cmd == NULL) {
5720 return (FC_NOMEM);
5721 }
5722
5723 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job);
5724 pkt = &cmd->cmd_pkt;
5725
5726 ptr = (char *)(&s_id);
5727 tmp[3] = *ptr++;
5728 tmp[2] = *ptr++;
5729 tmp[1] = *ptr++;
5730 tmp[0] = *ptr;
5731 #if defined(_BIT_FIELDS_LTOH)
5732 bcopy((caddr_t)tmp, (caddr_t)(&rall.rem_port_id), 4);
5733 #else
5734 rall.rem_port_id = s_id;
5735 #endif
5736 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&rall,
5737 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)),
5738 sizeof (rall), DDI_DEV_AUTOINCR);
5739
5740 break;
5741 }
5742
5743 default:
5744 return (FC_FAILURE);
5745 }
5746
5747 rval = fp_sendcmd(port, cmd, port->fp_fca_handle);
5748
5749 if (rval != FC_SUCCESS) {
5750 job->job_result = rval;
5751 fp_iodone(cmd);
5752 }
5753
5754 if (polled) {
5755 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0);
5756 fp_jobwait(job);
5757 } else {
5758 rval = FC_SUCCESS;
5759 }
5760
5761 return (rval);
5762 }
5763
5764
5765 /*
5766 * Common interrupt handler
5767 */
5768 static int
fp_common_intr(fc_packet_t * pkt,int iodone)5769 fp_common_intr(fc_packet_t *pkt, int iodone)
5770 {
5771 int rval = FC_FAILURE;
5772 fp_cmd_t *cmd;
5773 fc_local_port_t *port;
5774
5775 cmd = pkt->pkt_ulp_private;
5776 port = cmd->cmd_port;
5777
5778 /*
5779 * Fail fast the upper layer requests if
5780 * a state change has occurred amidst.
5781 */
5782 mutex_enter(&port->fp_mutex);
5783 if (cmd->cmd_ulp_pkt != NULL && port->fp_statec_busy) {
5784 mutex_exit(&port->fp_mutex);
5785 cmd->cmd_ulp_pkt->pkt_state = FC_PKT_PORT_OFFLINE;
5786 cmd->cmd_ulp_pkt->pkt_reason = FC_REASON_OFFLINE;
5787 } else if (!(port->fp_soft_state &
5788 (FP_SOFT_IN_DETACH | FP_DETACH_INPROGRESS))) {
5789 mutex_exit(&port->fp_mutex);
5790
5791 switch (pkt->pkt_state) {
5792 case FC_PKT_LOCAL_BSY:
5793 case FC_PKT_FABRIC_BSY:
5794 case FC_PKT_NPORT_BSY:
5795 case FC_PKT_TIMEOUT:
5796 cmd->cmd_retry_interval = (pkt->pkt_state ==
5797 FC_PKT_TIMEOUT) ? 0 : fp_retry_delay;
5798 rval = fp_retry_cmd(pkt);
5799 break;
5800
5801 case FC_PKT_FABRIC_RJT:
5802 case FC_PKT_NPORT_RJT:
5803 case FC_PKT_LOCAL_RJT:
5804 case FC_PKT_LS_RJT:
5805 case FC_PKT_FS_RJT:
5806 case FC_PKT_BA_RJT:
5807 rval = fp_handle_reject(pkt);
5808 break;
5809
5810 default:
5811 if (pkt->pkt_resp_resid) {
5812 cmd->cmd_retry_interval = 0;
5813 rval = fp_retry_cmd(pkt);
5814 }
5815 break;
5816 }
5817 } else {
5818 mutex_exit(&port->fp_mutex);
5819 }
5820
5821 if (rval != FC_SUCCESS && iodone) {
5822 fp_iodone(cmd);
5823 rval = FC_SUCCESS;
5824 }
5825
5826 return (rval);
5827 }
5828
5829
5830 /*
5831 * Some not so long winding theory on point to point topology:
5832 *
5833 * In the ACC payload, if the D_ID is ZERO and the common service
5834 * parameters indicate N_Port, then the topology is POINT TO POINT.
5835 *
5836 * In a point to point topology with an N_Port, during Fabric Login,
5837 * the destination N_Port will check with our WWN and decide if it
5838 * needs to issue PLOGI or not. That means, FLOGI could potentially
5839 * trigger an unsolicited PLOGI from an N_Port. The Unsolicited
5840 * PLOGI creates the device handles.
5841 *
5842 * Assuming that the host port WWN is greater than the other N_Port
5843 * WWN, then we become the master (be aware that this isn't the word
5844 * used in the FC standards) and initiate the PLOGI.
5845 *
5846 */
5847 static void
fp_flogi_intr(fc_packet_t * pkt)5848 fp_flogi_intr(fc_packet_t *pkt)
5849 {
5850 int state;
5851 int f_port;
5852 uint32_t s_id;
5853 uint32_t d_id;
5854 fp_cmd_t *cmd;
5855 fc_local_port_t *port;
5856 la_wwn_t *swwn;
5857 la_wwn_t dwwn;
5858 la_wwn_t nwwn;
5859 fc_remote_port_t *pd;
5860 la_els_logi_t *acc;
5861 com_svc_t csp;
5862 ls_code_t resp;
5863
5864 cmd = pkt->pkt_ulp_private;
5865 port = cmd->cmd_port;
5866
5867 mutex_enter(&port->fp_mutex);
5868 port->fp_out_fpcmds--;
5869 mutex_exit(&port->fp_mutex);
5870
5871 FP_TRACE(FP_NHEAD1(1, 0), "fp_flogi_intr; port=%p, pkt=%p, state=%x",
5872 port, pkt, pkt->pkt_state);
5873
5874 if (FP_IS_PKT_ERROR(pkt)) {
5875 (void) fp_common_intr(pkt, 1);
5876 return;
5877 }
5878
5879 /*
5880 * Currently, we don't need to swap bytes here because qlc is faking the
5881 * response for us and so endianness is getting taken care of. But we
5882 * have to fix this and generalize this at some point
5883 */
5884 acc = (la_els_logi_t *)pkt->pkt_resp;
5885
5886 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&resp, (uint8_t *)acc,
5887 sizeof (resp), DDI_DEV_AUTOINCR);
5888
5889 ASSERT(resp.ls_code == LA_ELS_ACC);
5890 if (resp.ls_code != LA_ELS_ACC) {
5891 (void) fp_common_intr(pkt, 1);
5892 return;
5893 }
5894
5895 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&csp,
5896 (uint8_t *)&acc->common_service, sizeof (csp), DDI_DEV_AUTOINCR);
5897
5898 f_port = FP_IS_F_PORT(csp.cmn_features) ? 1 : 0;
5899
5900 ASSERT(!MUTEX_HELD(&port->fp_mutex));
5901
5902 mutex_enter(&port->fp_mutex);
5903 state = FC_PORT_STATE_MASK(port->fp_state);
5904 mutex_exit(&port->fp_mutex);
5905
5906 if (f_port == 0) {
5907 if (state != FC_STATE_LOOP) {
5908 swwn = &port->fp_service_params.nport_ww_name;
5909
5910 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&dwwn,
5911 (uint8_t *)&acc->nport_ww_name, sizeof (la_wwn_t),
5912 DDI_DEV_AUTOINCR);
5913
5914 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&nwwn,
5915 (uint8_t *)&acc->node_ww_name, sizeof (la_wwn_t),
5916 DDI_DEV_AUTOINCR);
5917
5918 mutex_enter(&port->fp_mutex);
5919
5920 port->fp_topology = FC_TOP_PT_PT;
5921 port->fp_total_devices = 1;
5922 if (fctl_wwn_cmp(swwn, &dwwn) >= 0) {
5923 port->fp_ptpt_master = 1;
5924 /*
5925 * Let us choose 'X' as S_ID and 'Y'
5926 * as D_ID and that'll work; hopefully
5927 * If not, it will get changed.
5928 */
5929 s_id = port->fp_instance + FP_DEFAULT_SID;
5930 d_id = port->fp_instance + FP_DEFAULT_DID;
5931 port->fp_port_id.port_id = s_id;
5932 mutex_exit(&port->fp_mutex);
5933
5934 FP_TRACE(FP_NHEAD1(1, 0), "fp_flogi_intr: fp %x"
5935 "pd %x", port->fp_port_id.port_id, d_id);
5936 pd = fctl_create_remote_port(port,
5937 &nwwn, &dwwn, d_id, PD_PLOGI_INITIATOR,
5938 KM_NOSLEEP);
5939 if (pd == NULL) {
5940 fp_printf(port, CE_NOTE, FP_LOG_ONLY,
5941 0, NULL, "couldn't create device"
5942 " d_id=%X", d_id);
5943 fp_iodone(cmd);
5944 return;
5945 }
5946
5947 cmd->cmd_pkt.pkt_tran_flags =
5948 pkt->pkt_tran_flags;
5949 cmd->cmd_pkt.pkt_tran_type = pkt->pkt_tran_type;
5950 cmd->cmd_flags = FP_CMD_PLOGI_RETAIN;
5951 cmd->cmd_retry_count = fp_retry_count;
5952
5953 fp_xlogi_init(port, cmd, s_id, d_id,
5954 fp_plogi_intr, cmd->cmd_job, LA_ELS_PLOGI);
5955
5956 (&cmd->cmd_pkt)->pkt_pd = pd;
5957
5958 /*
5959 * We've just created this fc_remote_port_t, and
5960 * we're about to use it to send a PLOGI, so
5961 * bump the reference count right now. When
5962 * the packet is freed, the reference count will
5963 * be decremented. The ULP may also start using
5964 * it, so mark it as given away as well.
5965 */
5966 pd->pd_ref_count++;
5967 pd->pd_aux_flags |= PD_GIVEN_TO_ULPS;
5968
5969 if (fp_sendcmd(port, cmd,
5970 port->fp_fca_handle) == FC_SUCCESS) {
5971 return;
5972 }
5973 } else {
5974 /*
5975 * The device handles will be created when the
5976 * unsolicited PLOGI is completed successfully
5977 */
5978 port->fp_ptpt_master = 0;
5979 mutex_exit(&port->fp_mutex);
5980 }
5981 }
5982 pkt->pkt_state = FC_PKT_FAILURE;
5983 } else {
5984 if (f_port) {
5985 mutex_enter(&port->fp_mutex);
5986 if (state == FC_STATE_LOOP) {
5987 port->fp_topology = FC_TOP_PUBLIC_LOOP;
5988 } else {
5989 port->fp_topology = FC_TOP_FABRIC;
5990
5991 FC_GET_RSP(port, pkt->pkt_resp_acc,
5992 (uint8_t *)&port->fp_fabric_name,
5993 (uint8_t *)&acc->node_ww_name,
5994 sizeof (la_wwn_t),
5995 DDI_DEV_AUTOINCR);
5996 }
5997 port->fp_port_id.port_id = pkt->pkt_resp_fhdr.d_id;
5998 mutex_exit(&port->fp_mutex);
5999 } else {
6000 pkt->pkt_state = FC_PKT_FAILURE;
6001 }
6002 }
6003 fp_iodone(cmd);
6004 }
6005
6006
6007 /*
6008 * Handle solicited PLOGI response
6009 */
6010 static void
fp_plogi_intr(fc_packet_t * pkt)6011 fp_plogi_intr(fc_packet_t *pkt)
6012 {
6013 int nl_port;
6014 int bailout;
6015 uint32_t d_id;
6016 fp_cmd_t *cmd;
6017 la_els_logi_t *acc;
6018 fc_local_port_t *port;
6019 fc_remote_port_t *pd;
6020 la_wwn_t nwwn;
6021 la_wwn_t pwwn;
6022 ls_code_t resp;
6023
6024 nl_port = 0;
6025 cmd = pkt->pkt_ulp_private;
6026 port = cmd->cmd_port;
6027 d_id = pkt->pkt_cmd_fhdr.d_id;
6028
6029 #ifndef __lock_lint
6030 ASSERT(cmd->cmd_job && cmd->cmd_job->job_counter);
6031 #endif
6032
6033 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_intr: port=%p, job=%p, d_id=%x,"
6034 " jcount=%d pkt=%p, state=%x", port, cmd->cmd_job, d_id,
6035 cmd->cmd_job->job_counter, pkt, pkt->pkt_state);
6036
6037 /*
6038 * Bail out early on ULP initiated requests if the
6039 * state change has occurred
6040 */
6041 mutex_enter(&port->fp_mutex);
6042 port->fp_out_fpcmds--;
6043 bailout = ((port->fp_statec_busy ||
6044 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) &&
6045 cmd->cmd_ulp_pkt) ? 1 : 0;
6046 mutex_exit(&port->fp_mutex);
6047
6048 if (FP_IS_PKT_ERROR(pkt) || bailout) {
6049 int skip_msg = 0;
6050 int giveup = 0;
6051
6052 if (cmd->cmd_ulp_pkt) {
6053 cmd->cmd_ulp_pkt->pkt_state = pkt->pkt_state;
6054 cmd->cmd_ulp_pkt->pkt_reason = pkt->pkt_reason;
6055 cmd->cmd_ulp_pkt->pkt_action = pkt->pkt_action;
6056 cmd->cmd_ulp_pkt->pkt_expln = pkt->pkt_expln;
6057 }
6058
6059 /*
6060 * If an unsolicited cross login already created
6061 * a device speed up the discovery by not retrying
6062 * the command mindlessly.
6063 */
6064 if (pkt->pkt_pd == NULL &&
6065 fctl_get_remote_port_by_did(port, d_id) != NULL) {
6066 fp_iodone(cmd);
6067 return;
6068 }
6069
6070 if (pkt->pkt_pd != NULL) {
6071 giveup = (pkt->pkt_pd->pd_recepient ==
6072 PD_PLOGI_RECEPIENT) ? 1 : 0;
6073 if (giveup) {
6074 /*
6075 * This pd is marked as plogi
6076 * recipient, stop retrying
6077 */
6078 FP_TRACE(FP_NHEAD1(3, 0),
6079 "fp_plogi_intr: stop retry as"
6080 " a cross login was accepted"
6081 " from d_id=%x, port=%p.",
6082 d_id, port);
6083 fp_iodone(cmd);
6084 return;
6085 }
6086 }
6087
6088 if (fp_common_intr(pkt, 0) == FC_SUCCESS) {
6089 return;
6090 }
6091
6092 if ((pd = fctl_get_remote_port_by_did(port, d_id)) != NULL) {
6093 mutex_enter(&pd->pd_mutex);
6094 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) {
6095 skip_msg++;
6096 }
6097 mutex_exit(&pd->pd_mutex);
6098 }
6099
6100 mutex_enter(&port->fp_mutex);
6101 if (!bailout && !(skip_msg && port->fp_statec_busy) &&
6102 port->fp_statec_busy <= 1 &&
6103 pkt->pkt_reason != FC_REASON_FCAL_OPN_FAIL) {
6104 mutex_exit(&port->fp_mutex);
6105 /*
6106 * In case of Login Collisions, JNI HBAs returns the
6107 * FC pkt back to the Initiator with the state set to
6108 * FC_PKT_LS_RJT and reason to FC_REASON_LOGICAL_ERROR.
6109 * QLC HBAs handles such cases in the FW and doesnot
6110 * return the LS_RJT with Logical error when
6111 * login collision happens.
6112 */
6113 if ((pkt->pkt_state != FC_PKT_LS_RJT) ||
6114 (pkt->pkt_reason != FC_REASON_LOGICAL_ERROR)) {
6115 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, pkt,
6116 "PLOGI to %x failed", d_id);
6117 }
6118 FP_TRACE(FP_NHEAD2(9, 0),
6119 "PLOGI to %x failed. state=%x reason=%x.",
6120 d_id, pkt->pkt_state, pkt->pkt_reason);
6121 } else {
6122 mutex_exit(&port->fp_mutex);
6123 }
6124
6125 fp_iodone(cmd);
6126 return;
6127 }
6128
6129 acc = (la_els_logi_t *)pkt->pkt_resp;
6130
6131 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&resp, (uint8_t *)acc,
6132 sizeof (resp), DDI_DEV_AUTOINCR);
6133
6134 ASSERT(resp.ls_code == LA_ELS_ACC);
6135 if (resp.ls_code != LA_ELS_ACC) {
6136 (void) fp_common_intr(pkt, 1);
6137 return;
6138 }
6139
6140 if (d_id == FS_NAME_SERVER || d_id == FS_FABRIC_CONTROLLER) {
6141 mutex_enter(&port->fp_mutex);
6142 port->fp_ns_login_class = FC_TRAN_CLASS(pkt->pkt_tran_flags);
6143 mutex_exit(&port->fp_mutex);
6144 fp_iodone(cmd);
6145 return;
6146 }
6147
6148 ASSERT(acc == (la_els_logi_t *)pkt->pkt_resp);
6149
6150 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&pwwn,
6151 (uint8_t *)&acc->nport_ww_name, sizeof (la_wwn_t),
6152 DDI_DEV_AUTOINCR);
6153
6154 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&nwwn,
6155 (uint8_t *)&acc->node_ww_name, sizeof (la_wwn_t),
6156 DDI_DEV_AUTOINCR);
6157
6158 ASSERT(fctl_is_wwn_zero(&pwwn) == FC_FAILURE);
6159 ASSERT(fctl_is_wwn_zero(&nwwn) == FC_FAILURE);
6160
6161 if ((pd = pkt->pkt_pd) == NULL) {
6162 pd = fctl_get_remote_port_by_pwwn(port, &pwwn);
6163 if (pd == NULL) {
6164 FP_TRACE(FP_NHEAD2(1, 0), "fp_plogi_intr: fp %x pd %x",
6165 port->fp_port_id.port_id, d_id);
6166 pd = fctl_create_remote_port(port, &nwwn, &pwwn, d_id,
6167 PD_PLOGI_INITIATOR, KM_NOSLEEP);
6168 if (pd == NULL) {
6169 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
6170 "couldn't create port device handles"
6171 " d_id=%x", d_id);
6172 fp_iodone(cmd);
6173 return;
6174 }
6175 } else {
6176 fc_remote_port_t *tmp_pd;
6177
6178 tmp_pd = fctl_get_remote_port_by_did(port, d_id);
6179 if (tmp_pd != NULL) {
6180 fp_iodone(cmd);
6181 return;
6182 }
6183
6184 mutex_enter(&port->fp_mutex);
6185 mutex_enter(&pd->pd_mutex);
6186 if ((pd->pd_state == PORT_DEVICE_LOGGED_IN) ||
6187 (pd->pd_aux_flags & PD_LOGGED_OUT)) {
6188 cmd->cmd_flags |= FP_CMD_PLOGI_RETAIN;
6189 }
6190
6191 if (pd->pd_type == PORT_DEVICE_OLD) {
6192 if (pd->pd_port_id.port_id != d_id) {
6193 fctl_delist_did_table(port, pd);
6194 pd->pd_type = PORT_DEVICE_CHANGED;
6195 pd->pd_port_id.port_id = d_id;
6196 } else {
6197 pd->pd_type = PORT_DEVICE_NOCHANGE;
6198 }
6199 }
6200
6201 if (pd->pd_aux_flags & PD_IN_DID_QUEUE) {
6202 char ww_name[17];
6203
6204 fc_wwn_to_str(&pd->pd_port_name, ww_name);
6205
6206 mutex_exit(&pd->pd_mutex);
6207 mutex_exit(&port->fp_mutex);
6208 FP_TRACE(FP_NHEAD2(9, 0),
6209 "Possible Duplicate name or address"
6210 " identifiers in the PLOGI response"
6211 " D_ID=%x, PWWN=%s: Please check the"
6212 " configuration", d_id, ww_name);
6213 fp_iodone(cmd);
6214 return;
6215 }
6216 fctl_enlist_did_table(port, pd);
6217 pd->pd_aux_flags &= ~PD_LOGGED_OUT;
6218 mutex_exit(&pd->pd_mutex);
6219 mutex_exit(&port->fp_mutex);
6220 }
6221 } else {
6222 fc_remote_port_t *tmp_pd, *new_wwn_pd;
6223
6224 tmp_pd = fctl_get_remote_port_by_did(port, d_id);
6225 new_wwn_pd = fctl_get_remote_port_by_pwwn(port, &pwwn);
6226
6227 mutex_enter(&port->fp_mutex);
6228 mutex_enter(&pd->pd_mutex);
6229 if (fctl_wwn_cmp(&pd->pd_port_name, &pwwn) == 0) {
6230 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_intr: d_id=%x,"
6231 " pd_state=%x pd_type=%x", d_id, pd->pd_state,
6232 pd->pd_type);
6233 if ((pd->pd_state == PORT_DEVICE_LOGGED_IN &&
6234 pd->pd_type == PORT_DEVICE_OLD) ||
6235 (pd->pd_aux_flags & PD_LOGGED_OUT)) {
6236 pd->pd_type = PORT_DEVICE_NOCHANGE;
6237 } else if (pd->pd_state != PORT_DEVICE_LOGGED_IN) {
6238 pd->pd_type = PORT_DEVICE_NEW;
6239 }
6240 } else {
6241 char old_name[17];
6242 char new_name[17];
6243
6244 fc_wwn_to_str(&pd->pd_port_name, old_name);
6245 fc_wwn_to_str(&pwwn, new_name);
6246
6247 FP_TRACE(FP_NHEAD1(9, 0),
6248 "fp_plogi_intr: PWWN of a device with D_ID=%x "
6249 "changed. New PWWN = %s, OLD PWWN = %s ; tmp_pd:%p "
6250 "pd:%p new_wwn_pd:%p, cmd_ulp_pkt:%p, bailout:0x%x",
6251 d_id, new_name, old_name, tmp_pd, pd, new_wwn_pd,
6252 cmd->cmd_ulp_pkt, bailout);
6253
6254 FP_TRACE(FP_NHEAD2(9, 0),
6255 "PWWN of a device with D_ID=%x changed."
6256 " New PWWN = %s, OLD PWWN = %s", d_id,
6257 new_name, old_name);
6258
6259 if (cmd->cmd_ulp_pkt && !bailout) {
6260 fc_remote_node_t *rnodep;
6261 fc_portmap_t *changelist;
6262 fc_portmap_t *listptr;
6263 int len = 1;
6264 /* # entries in changelist */
6265
6266 fctl_delist_pwwn_table(port, pd);
6267
6268 /*
6269 * Lets now check if there already is a pd with
6270 * this new WWN in the table. If so, we'll mark
6271 * it as invalid
6272 */
6273
6274 if (new_wwn_pd) {
6275 /*
6276 * There is another pd with in the pwwn
6277 * table with the same WWN that we got
6278 * in the PLOGI payload. We have to get
6279 * it out of the pwwn table, update the
6280 * pd's state (fp_fillout_old_map does
6281 * this for us) and add it to the
6282 * changelist that goes up to ULPs.
6283 *
6284 * len is length of changelist and so
6285 * increment it.
6286 */
6287 len++;
6288
6289 if (tmp_pd != pd) {
6290 /*
6291 * Odd case where pwwn and did
6292 * tables are out of sync but
6293 * we will handle that too. See
6294 * more comments below.
6295 *
6296 * One more device that ULPs
6297 * should know about and so len
6298 * gets incremented again.
6299 */
6300 len++;
6301 }
6302
6303 listptr = changelist = kmem_zalloc(len *
6304 sizeof (*changelist), KM_SLEEP);
6305
6306 mutex_enter(&new_wwn_pd->pd_mutex);
6307 rnodep = new_wwn_pd->pd_remote_nodep;
6308 mutex_exit(&new_wwn_pd->pd_mutex);
6309
6310 /*
6311 * Hold the fd_mutex since
6312 * fctl_copy_portmap_held expects it.
6313 * Preserve lock hierarchy by grabbing
6314 * fd_mutex before pd_mutex
6315 */
6316 if (rnodep) {
6317 mutex_enter(&rnodep->fd_mutex);
6318 }
6319 mutex_enter(&new_wwn_pd->pd_mutex);
6320 fp_fillout_old_map_held(listptr++,
6321 new_wwn_pd, 0);
6322 mutex_exit(&new_wwn_pd->pd_mutex);
6323 if (rnodep) {
6324 mutex_exit(&rnodep->fd_mutex);
6325 }
6326
6327 /*
6328 * Safety check :
6329 * Lets ensure that the pwwn and did
6330 * tables are in sync. Ideally, we
6331 * should not find that these two pd's
6332 * are different.
6333 */
6334 if (tmp_pd != pd) {
6335 mutex_enter(&tmp_pd->pd_mutex);
6336 rnodep =
6337 tmp_pd->pd_remote_nodep;
6338 mutex_exit(&tmp_pd->pd_mutex);
6339
6340 /* As above grab fd_mutex */
6341 if (rnodep) {
6342 mutex_enter(&rnodep->
6343 fd_mutex);
6344 }
6345 mutex_enter(&tmp_pd->pd_mutex);
6346
6347 fp_fillout_old_map_held(
6348 listptr++, tmp_pd, 0);
6349
6350 mutex_exit(&tmp_pd->pd_mutex);
6351 if (rnodep) {
6352 mutex_exit(&rnodep->
6353 fd_mutex);
6354 }
6355
6356 /*
6357 * Now add "pd" (not tmp_pd)
6358 * to fp_did_table to sync it up
6359 * with fp_pwwn_table
6360 *
6361 * pd->pd_mutex is already held
6362 * at this point
6363 */
6364 fctl_enlist_did_table(port, pd);
6365 }
6366 } else {
6367 listptr = changelist = kmem_zalloc(
6368 sizeof (*changelist), KM_SLEEP);
6369 }
6370
6371 ASSERT(changelist != NULL);
6372
6373 fp_fillout_changed_map(listptr, pd, &d_id,
6374 &pwwn);
6375 fctl_enlist_pwwn_table(port, pd);
6376
6377 mutex_exit(&pd->pd_mutex);
6378 mutex_exit(&port->fp_mutex);
6379
6380 fp_iodone(cmd);
6381
6382 (void) fp_ulp_devc_cb(port, changelist, len,
6383 len, KM_NOSLEEP, 0);
6384
6385 return;
6386 }
6387 }
6388
6389 if (pd->pd_porttype.port_type == FC_NS_PORT_NL) {
6390 nl_port = 1;
6391 }
6392 if (pd->pd_aux_flags & PD_DISABLE_RELOGIN) {
6393 pd->pd_aux_flags &= ~PD_LOGGED_OUT;
6394 }
6395
6396 mutex_exit(&pd->pd_mutex);
6397 mutex_exit(&port->fp_mutex);
6398
6399 if (tmp_pd == NULL) {
6400 mutex_enter(&port->fp_mutex);
6401 mutex_enter(&pd->pd_mutex);
6402 if (pd->pd_aux_flags & PD_IN_DID_QUEUE) {
6403 char ww_name[17];
6404
6405 fc_wwn_to_str(&pd->pd_port_name, ww_name);
6406 mutex_exit(&pd->pd_mutex);
6407 mutex_exit(&port->fp_mutex);
6408 FP_TRACE(FP_NHEAD2(9, 0),
6409 "Possible Duplicate name or address"
6410 " identifiers in the PLOGI response"
6411 " D_ID=%x, PWWN=%s: Please check the"
6412 " configuration", d_id, ww_name);
6413 fp_iodone(cmd);
6414 return;
6415 }
6416 fctl_enlist_did_table(port, pd);
6417 pd->pd_aux_flags &= ~PD_LOGGED_OUT;
6418 mutex_exit(&pd->pd_mutex);
6419 mutex_exit(&port->fp_mutex);
6420 }
6421 }
6422 fp_register_login(&pkt->pkt_resp_acc, pd, acc,
6423 FC_TRAN_CLASS(pkt->pkt_tran_flags));
6424
6425 if (cmd->cmd_ulp_pkt) {
6426 cmd->cmd_ulp_pkt->pkt_state = pkt->pkt_state;
6427 cmd->cmd_ulp_pkt->pkt_action = pkt->pkt_action;
6428 cmd->cmd_ulp_pkt->pkt_expln = pkt->pkt_expln;
6429 if (cmd->cmd_ulp_pkt->pkt_pd == NULL) {
6430 if (pd != NULL) {
6431 FP_TRACE(FP_NHEAD1(9, 0),
6432 "fp_plogi_intr;"
6433 "ulp_pkt's pd is NULL, get a pd %p",
6434 pd);
6435 mutex_enter(&pd->pd_mutex);
6436 pd->pd_ref_count++;
6437 mutex_exit(&pd->pd_mutex);
6438 }
6439 cmd->cmd_ulp_pkt->pkt_pd = pd;
6440 }
6441 bcopy((caddr_t)&pkt->pkt_resp_fhdr,
6442 (caddr_t)&cmd->cmd_ulp_pkt->pkt_resp_fhdr,
6443 sizeof (fc_frame_hdr_t));
6444 bcopy((caddr_t)pkt->pkt_resp,
6445 (caddr_t)cmd->cmd_ulp_pkt->pkt_resp,
6446 sizeof (la_els_logi_t));
6447 }
6448
6449 mutex_enter(&port->fp_mutex);
6450 if (port->fp_topology == FC_TOP_PRIVATE_LOOP || nl_port) {
6451 mutex_enter(&pd->pd_mutex);
6452
6453 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class;
6454 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE;
6455 cmd->cmd_retry_count = fp_retry_count;
6456
6457 /*
6458 * If the fc_remote_port_t pointer is not set in the given
6459 * fc_packet_t, then this fc_remote_port_t must have just
6460 * been created. Save the pointer and also increment the
6461 * fc_remote_port_t reference count.
6462 */
6463 if (pkt->pkt_pd == NULL) {
6464 pkt->pkt_pd = pd;
6465 pd->pd_ref_count++; /* It's in use! */
6466 }
6467
6468 fp_adisc_init(cmd, cmd->cmd_job);
6469
6470 pkt->pkt_cmdlen = sizeof (la_els_adisc_t);
6471 pkt->pkt_rsplen = sizeof (la_els_adisc_t);
6472
6473 mutex_exit(&pd->pd_mutex);
6474 mutex_exit(&port->fp_mutex);
6475
6476 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) {
6477 return;
6478 }
6479 } else {
6480 mutex_exit(&port->fp_mutex);
6481 }
6482
6483 if ((cmd->cmd_flags & FP_CMD_PLOGI_RETAIN) == 0) {
6484 mutex_enter(&port->fp_mutex);
6485 mutex_enter(&pd->pd_mutex);
6486
6487 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class;
6488 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE;
6489 cmd->cmd_retry_count = fp_retry_count;
6490
6491 fp_logo_init(pd, cmd, cmd->cmd_job);
6492
6493 pkt->pkt_cmdlen = sizeof (la_els_logo_t);
6494 pkt->pkt_rsplen = FP_PORT_IDENTIFIER_LEN;
6495
6496 mutex_exit(&pd->pd_mutex);
6497 mutex_exit(&port->fp_mutex);
6498
6499 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) {
6500 return;
6501 }
6502
6503 }
6504 fp_iodone(cmd);
6505 }
6506
6507
6508 /*
6509 * Handle solicited ADISC response
6510 */
6511 static void
fp_adisc_intr(fc_packet_t * pkt)6512 fp_adisc_intr(fc_packet_t *pkt)
6513 {
6514 int rval;
6515 int bailout;
6516 fp_cmd_t *cmd, *logi_cmd;
6517 fc_local_port_t *port;
6518 fc_remote_port_t *pd;
6519 la_els_adisc_t *acc;
6520 ls_code_t resp;
6521 fc_hardaddr_t ha;
6522 fc_portmap_t *changelist;
6523 int initiator, adiscfail = 0;
6524
6525 pd = pkt->pkt_pd;
6526 cmd = pkt->pkt_ulp_private;
6527 port = cmd->cmd_port;
6528
6529 #ifndef __lock_lint
6530 ASSERT(cmd->cmd_job && cmd->cmd_job->job_counter);
6531 #endif
6532
6533 ASSERT(pd != NULL && port != NULL && cmd != NULL);
6534
6535 mutex_enter(&port->fp_mutex);
6536 port->fp_out_fpcmds--;
6537 bailout = ((port->fp_statec_busy ||
6538 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) &&
6539 cmd->cmd_ulp_pkt) ? 1 : 0;
6540 mutex_exit(&port->fp_mutex);
6541
6542 if (bailout) {
6543 fp_iodone(cmd);
6544 return;
6545 }
6546
6547 if (pkt->pkt_state == FC_PKT_SUCCESS && pkt->pkt_resp_resid == 0) {
6548 acc = (la_els_adisc_t *)pkt->pkt_resp;
6549
6550 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&resp,
6551 (uint8_t *)acc, sizeof (resp), DDI_DEV_AUTOINCR);
6552
6553 if (resp.ls_code == LA_ELS_ACC) {
6554 int is_private;
6555
6556 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&ha,
6557 (uint8_t *)&acc->hard_addr, sizeof (ha),
6558 DDI_DEV_AUTOINCR);
6559
6560 mutex_enter(&port->fp_mutex);
6561
6562 is_private =
6563 (port->fp_topology == FC_TOP_PRIVATE_LOOP) ? 1 : 0;
6564
6565 mutex_enter(&pd->pd_mutex);
6566 if ((pd->pd_aux_flags & PD_IN_DID_QUEUE) == 0) {
6567 fctl_enlist_did_table(port, pd);
6568 }
6569 mutex_exit(&pd->pd_mutex);
6570
6571 mutex_exit(&port->fp_mutex);
6572
6573 mutex_enter(&pd->pd_mutex);
6574 if (pd->pd_type != PORT_DEVICE_NEW) {
6575 if (is_private && (pd->pd_hard_addr.hard_addr !=
6576 ha.hard_addr)) {
6577 pd->pd_type = PORT_DEVICE_CHANGED;
6578 } else {
6579 pd->pd_type = PORT_DEVICE_NOCHANGE;
6580 }
6581 }
6582
6583 if (is_private && (ha.hard_addr &&
6584 pd->pd_port_id.port_id != ha.hard_addr)) {
6585 char ww_name[17];
6586
6587 fc_wwn_to_str(&pd->pd_port_name, ww_name);
6588
6589 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
6590 "NL_Port Identifier %x doesn't match"
6591 " with Hard Address %x, Will use Port"
6592 " WWN %s", pd->pd_port_id.port_id,
6593 ha.hard_addr, ww_name);
6594
6595 pd->pd_hard_addr.hard_addr = 0;
6596 } else {
6597 pd->pd_hard_addr.hard_addr = ha.hard_addr;
6598 }
6599 mutex_exit(&pd->pd_mutex);
6600 } else {
6601 if (fp_common_intr(pkt, 0) == FC_SUCCESS) {
6602 return;
6603 }
6604 }
6605 } else {
6606 if (fp_common_intr(pkt, 0) == FC_SUCCESS) {
6607 return;
6608 }
6609
6610 mutex_enter(&port->fp_mutex);
6611 if (port->fp_statec_busy <= 1) {
6612 mutex_exit(&port->fp_mutex);
6613 if (pkt->pkt_state == FC_PKT_LS_RJT &&
6614 pkt->pkt_reason == FC_REASON_CMD_UNABLE) {
6615 uchar_t class;
6616 int cmd_flag;
6617 uint32_t src_id;
6618
6619 class = fp_get_nextclass(port,
6620 FC_TRAN_CLASS_INVALID);
6621 if (class == FC_TRAN_CLASS_INVALID) {
6622 fp_iodone(cmd);
6623 return;
6624 }
6625
6626 FP_TRACE(FP_NHEAD1(1, 0), "ADISC re-login; "
6627 "fp_state=0x%x, pkt_state=0x%x, "
6628 "reason=0x%x, class=0x%x",
6629 port->fp_state, pkt->pkt_state,
6630 pkt->pkt_reason, class);
6631 cmd_flag = FP_CMD_PLOGI_RETAIN;
6632
6633 logi_cmd = fp_alloc_pkt(port,
6634 sizeof (la_els_logi_t),
6635 sizeof (la_els_logi_t), KM_SLEEP, pd);
6636 if (logi_cmd == NULL) {
6637 fp_iodone(cmd);
6638 return;
6639 }
6640
6641 logi_cmd->cmd_pkt.pkt_tran_flags =
6642 FC_TRAN_INTR | class;
6643 logi_cmd->cmd_pkt.pkt_tran_type =
6644 FC_PKT_EXCHANGE;
6645 logi_cmd->cmd_flags = cmd_flag;
6646 logi_cmd->cmd_retry_count = fp_retry_count;
6647 logi_cmd->cmd_ulp_pkt = NULL;
6648
6649 mutex_enter(&port->fp_mutex);
6650 src_id = port->fp_port_id.port_id;
6651 mutex_exit(&port->fp_mutex);
6652
6653 fp_xlogi_init(port, logi_cmd, src_id,
6654 pkt->pkt_cmd_fhdr.d_id, fp_plogi_intr,
6655 cmd->cmd_job, LA_ELS_PLOGI);
6656 if (pd) {
6657 mutex_enter(&pd->pd_mutex);
6658 pd->pd_flags = PD_ELS_IN_PROGRESS;
6659 mutex_exit(&pd->pd_mutex);
6660 }
6661
6662 if (fp_sendcmd(port, logi_cmd,
6663 port->fp_fca_handle) == FC_SUCCESS) {
6664 fp_free_pkt(cmd);
6665 return;
6666 } else {
6667 fp_free_pkt(logi_cmd);
6668 }
6669 } else {
6670 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, pkt,
6671 "ADISC to %x failed, cmd_flags=%x",
6672 pkt->pkt_cmd_fhdr.d_id, cmd->cmd_flags);
6673 cmd->cmd_flags &= ~FP_CMD_PLOGI_RETAIN;
6674 adiscfail = 1;
6675 }
6676 } else {
6677 mutex_exit(&port->fp_mutex);
6678 }
6679 }
6680
6681 if (cmd->cmd_ulp_pkt) {
6682 cmd->cmd_ulp_pkt->pkt_state = pkt->pkt_state;
6683 cmd->cmd_ulp_pkt->pkt_action = pkt->pkt_action;
6684 cmd->cmd_ulp_pkt->pkt_expln = pkt->pkt_expln;
6685 if (cmd->cmd_ulp_pkt->pkt_pd == NULL) {
6686 cmd->cmd_ulp_pkt->pkt_pd = pd;
6687 FP_TRACE(FP_NHEAD1(9, 0),
6688 "fp_adisc__intr;"
6689 "ulp_pkt's pd is NULL, get a pd %p",
6690 pd);
6691
6692 }
6693 bcopy((caddr_t)&pkt->pkt_resp_fhdr,
6694 (caddr_t)&cmd->cmd_ulp_pkt->pkt_resp_fhdr,
6695 sizeof (fc_frame_hdr_t));
6696 bcopy((caddr_t)pkt->pkt_resp,
6697 (caddr_t)cmd->cmd_ulp_pkt->pkt_resp,
6698 sizeof (la_els_adisc_t));
6699 }
6700
6701 if ((cmd->cmd_flags & FP_CMD_PLOGI_RETAIN) == 0) {
6702 FP_TRACE(FP_NHEAD1(9, 0),
6703 "fp_adisc_intr: Perform LOGO.cmd_flags=%x, "
6704 "fp_retry_count=%x, ulp_pkt=%p",
6705 cmd->cmd_flags, fp_retry_count, cmd->cmd_ulp_pkt);
6706
6707 mutex_enter(&port->fp_mutex);
6708 mutex_enter(&pd->pd_mutex);
6709
6710 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class;
6711 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE;
6712 cmd->cmd_retry_count = fp_retry_count;
6713
6714 fp_logo_init(pd, cmd, cmd->cmd_job);
6715
6716 pkt->pkt_cmdlen = sizeof (la_els_logo_t);
6717 pkt->pkt_rsplen = FP_PORT_IDENTIFIER_LEN;
6718
6719 mutex_exit(&pd->pd_mutex);
6720 mutex_exit(&port->fp_mutex);
6721
6722 rval = fp_sendcmd(port, cmd, port->fp_fca_handle);
6723 if (adiscfail) {
6724 mutex_enter(&pd->pd_mutex);
6725 initiator =
6726 ((pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0);
6727 pd->pd_state = PORT_DEVICE_VALID;
6728 pd->pd_aux_flags |= PD_LOGGED_OUT;
6729 if (pd->pd_aux_flags & PD_DISABLE_RELOGIN) {
6730 pd->pd_type = PORT_DEVICE_NEW;
6731 } else {
6732 pd->pd_type = PORT_DEVICE_NOCHANGE;
6733 }
6734 mutex_exit(&pd->pd_mutex);
6735
6736 changelist =
6737 kmem_zalloc(sizeof (*changelist), KM_SLEEP);
6738
6739 if (initiator) {
6740 fp_unregister_login(pd);
6741 fctl_copy_portmap(changelist, pd);
6742 } else {
6743 fp_fillout_old_map(changelist, pd, 0);
6744 }
6745
6746 FP_TRACE(FP_NHEAD1(9, 0),
6747 "fp_adisc_intr: Dev change notification "
6748 "to ULP port=%p, pd=%p, map_type=%x map_state=%x "
6749 "map_flags=%x initiator=%d", port, pd,
6750 changelist->map_type, changelist->map_state,
6751 changelist->map_flags, initiator);
6752
6753 (void) fp_ulp_devc_cb(port, changelist,
6754 1, 1, KM_SLEEP, 0);
6755 }
6756 if (rval == FC_SUCCESS) {
6757 return;
6758 }
6759 }
6760 fp_iodone(cmd);
6761 }
6762
6763
6764 /*
6765 * Handle solicited LOGO response
6766 */
6767 static void
fp_logo_intr(fc_packet_t * pkt)6768 fp_logo_intr(fc_packet_t *pkt)
6769 {
6770 ls_code_t resp;
6771 fc_local_port_t *port = ((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port;
6772
6773 mutex_enter(&((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_mutex);
6774 ((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_out_fpcmds--;
6775 mutex_exit(&((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_mutex);
6776
6777 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&resp,
6778 (uint8_t *)pkt->pkt_resp, sizeof (resp), DDI_DEV_AUTOINCR);
6779
6780 if (FP_IS_PKT_ERROR(pkt)) {
6781 (void) fp_common_intr(pkt, 1);
6782 return;
6783 }
6784
6785 ASSERT(resp.ls_code == LA_ELS_ACC);
6786 if (resp.ls_code != LA_ELS_ACC) {
6787 (void) fp_common_intr(pkt, 1);
6788 return;
6789 }
6790
6791 if (pkt->pkt_pd != NULL) {
6792 fp_unregister_login(pkt->pkt_pd);
6793 }
6794
6795 fp_iodone(pkt->pkt_ulp_private);
6796 }
6797
6798
6799 /*
6800 * Handle solicited RNID response
6801 */
6802 static void
fp_rnid_intr(fc_packet_t * pkt)6803 fp_rnid_intr(fc_packet_t *pkt)
6804 {
6805 ls_code_t resp;
6806 job_request_t *job;
6807 fp_cmd_t *cmd;
6808 la_els_rnid_acc_t *acc;
6809 fc_local_port_t *port = ((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port;
6810
6811 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&resp,
6812 (uint8_t *)pkt->pkt_resp, sizeof (resp), DDI_DEV_AUTOINCR);
6813 cmd = pkt->pkt_ulp_private;
6814
6815 mutex_enter(&cmd->cmd_port->fp_mutex);
6816 cmd->cmd_port->fp_out_fpcmds--;
6817 mutex_exit(&cmd->cmd_port->fp_mutex);
6818
6819 job = cmd->cmd_job;
6820 ASSERT(job->job_private != NULL);
6821
6822 /* If failure or LS_RJT then retry the packet, if needed */
6823 if (pkt->pkt_state != FC_PKT_SUCCESS || resp.ls_code != LA_ELS_ACC) {
6824 (void) fp_common_intr(pkt, 1);
6825 return;
6826 }
6827
6828 /* Save node_id memory allocated in ioctl code */
6829 acc = (la_els_rnid_acc_t *)pkt->pkt_resp;
6830
6831 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)job->job_private,
6832 (uint8_t *)acc, sizeof (la_els_rnid_acc_t), DDI_DEV_AUTOINCR);
6833
6834 /* wakeup the ioctl thread and free the pkt */
6835 fp_iodone(cmd);
6836 }
6837
6838
6839 /*
6840 * Handle solicited RLS response
6841 */
6842 static void
fp_rls_intr(fc_packet_t * pkt)6843 fp_rls_intr(fc_packet_t *pkt)
6844 {
6845 ls_code_t resp;
6846 job_request_t *job;
6847 fp_cmd_t *cmd;
6848 la_els_rls_acc_t *acc;
6849 fc_local_port_t *port = ((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port;
6850
6851 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&resp,
6852 (uint8_t *)pkt->pkt_resp, sizeof (resp), DDI_DEV_AUTOINCR);
6853 cmd = pkt->pkt_ulp_private;
6854
6855 mutex_enter(&cmd->cmd_port->fp_mutex);
6856 cmd->cmd_port->fp_out_fpcmds--;
6857 mutex_exit(&cmd->cmd_port->fp_mutex);
6858
6859 job = cmd->cmd_job;
6860 ASSERT(job->job_private != NULL);
6861
6862 /* If failure or LS_RJT then retry the packet, if needed */
6863 if (FP_IS_PKT_ERROR(pkt) || resp.ls_code != LA_ELS_ACC) {
6864 (void) fp_common_intr(pkt, 1);
6865 return;
6866 }
6867
6868 /* Save link error status block in memory allocated in ioctl code */
6869 acc = (la_els_rls_acc_t *)pkt->pkt_resp;
6870
6871 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)job->job_private,
6872 (uint8_t *)&acc->rls_link_params, sizeof (fc_rls_acc_t),
6873 DDI_DEV_AUTOINCR);
6874
6875 /* wakeup the ioctl thread and free the pkt */
6876 fp_iodone(cmd);
6877 }
6878
6879
6880 /*
6881 * A solicited command completion interrupt (mostly for commands
6882 * that require almost no post processing such as SCR ELS)
6883 */
6884 static void
fp_intr(fc_packet_t * pkt)6885 fp_intr(fc_packet_t *pkt)
6886 {
6887 mutex_enter(&((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_mutex);
6888 ((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_out_fpcmds--;
6889 mutex_exit(&((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_mutex);
6890
6891 if (FP_IS_PKT_ERROR(pkt)) {
6892 (void) fp_common_intr(pkt, 1);
6893 return;
6894 }
6895 fp_iodone(pkt->pkt_ulp_private);
6896 }
6897
6898
6899 /*
6900 * Handle the underlying port's state change
6901 */
6902 static void
fp_statec_cb(opaque_t port_handle,uint32_t state)6903 fp_statec_cb(opaque_t port_handle, uint32_t state)
6904 {
6905 fc_local_port_t *port = port_handle;
6906 job_request_t *job;
6907
6908 /*
6909 * If it is not possible to process the callbacks
6910 * just drop the callback on the floor; Don't bother
6911 * to do something that isn't safe at this time
6912 */
6913 mutex_enter(&port->fp_mutex);
6914 if ((port->fp_soft_state &
6915 (FP_SOFT_IN_DETACH | FP_SOFT_SUSPEND | FP_SOFT_POWER_DOWN)) ||
6916 (FC_PORT_STATE_MASK(port->fp_state) == FC_PORT_STATE_MASK(state))) {
6917 mutex_exit(&port->fp_mutex);
6918 return;
6919 }
6920
6921 if (port->fp_statec_busy == 0) {
6922 port->fp_soft_state |= FP_SOFT_IN_STATEC_CB;
6923 #ifdef DEBUG
6924 } else {
6925 ASSERT(port->fp_soft_state & FP_SOFT_IN_STATEC_CB);
6926 #endif
6927 }
6928
6929 port->fp_statec_busy++;
6930
6931 /*
6932 * For now, force the trusted method of device authentication (by
6933 * PLOGI) when LIPs do not involve OFFLINE to ONLINE transition.
6934 */
6935 if (FC_PORT_STATE_MASK(state) == FC_STATE_LIP ||
6936 FC_PORT_STATE_MASK(state) == FC_STATE_LIP_LBIT_SET) {
6937 state = FC_PORT_SPEED_MASK(port->fp_state) | FC_STATE_LOOP;
6938 fp_port_offline(port, 0);
6939 }
6940 mutex_exit(&port->fp_mutex);
6941
6942 switch (FC_PORT_STATE_MASK(state)) {
6943 case FC_STATE_OFFLINE:
6944 job = fctl_alloc_job(JOB_PORT_OFFLINE,
6945 JOB_TYPE_FCTL_ASYNC, NULL, NULL, KM_NOSLEEP);
6946 if (job == NULL) {
6947 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
6948 " fp_statec_cb() couldn't submit a job "
6949 " to the thread: failing..");
6950 mutex_enter(&port->fp_mutex);
6951 if (--port->fp_statec_busy == 0) {
6952 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB;
6953 }
6954 mutex_exit(&port->fp_mutex);
6955 return;
6956 }
6957 mutex_enter(&port->fp_mutex);
6958 /*
6959 * Zero out this field so that we do not retain
6960 * the fabric name as its no longer valid
6961 */
6962 bzero(&port->fp_fabric_name, sizeof (la_wwn_t));
6963 port->fp_state = state;
6964 mutex_exit(&port->fp_mutex);
6965
6966 fctl_enque_job(port, job);
6967 break;
6968
6969 case FC_STATE_ONLINE:
6970 case FC_STATE_LOOP:
6971 mutex_enter(&port->fp_mutex);
6972 port->fp_state = state;
6973
6974 if (port->fp_offline_tid) {
6975 timeout_id_t tid;
6976
6977 tid = port->fp_offline_tid;
6978 port->fp_offline_tid = NULL;
6979 mutex_exit(&port->fp_mutex);
6980 (void) untimeout(tid);
6981 } else {
6982 mutex_exit(&port->fp_mutex);
6983 }
6984
6985 job = fctl_alloc_job(JOB_PORT_ONLINE,
6986 JOB_TYPE_FCTL_ASYNC, NULL, NULL, KM_NOSLEEP);
6987 if (job == NULL) {
6988 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
6989 "fp_statec_cb() couldn't submit a job "
6990 "to the thread: failing..");
6991
6992 mutex_enter(&port->fp_mutex);
6993 if (--port->fp_statec_busy == 0) {
6994 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB;
6995 }
6996 mutex_exit(&port->fp_mutex);
6997 return;
6998 }
6999 fctl_enque_job(port, job);
7000 break;
7001
7002 case FC_STATE_RESET_REQUESTED:
7003 mutex_enter(&port->fp_mutex);
7004 port->fp_state = FC_STATE_OFFLINE;
7005 port->fp_soft_state |= FP_SOFT_IN_FCA_RESET;
7006 mutex_exit(&port->fp_mutex);
7007 /* FALLTHROUGH */
7008
7009 case FC_STATE_RESET:
7010 job = fctl_alloc_job(JOB_ULP_NOTIFY,
7011 JOB_TYPE_FCTL_ASYNC, NULL, NULL, KM_NOSLEEP);
7012 if (job == NULL) {
7013 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
7014 "fp_statec_cb() couldn't submit a job"
7015 " to the thread: failing..");
7016
7017 mutex_enter(&port->fp_mutex);
7018 if (--port->fp_statec_busy == 0) {
7019 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB;
7020 }
7021 mutex_exit(&port->fp_mutex);
7022 return;
7023 }
7024
7025 /* squeeze into some field in the job structure */
7026 job->job_ulp_listlen = FC_PORT_STATE_MASK(state);
7027 fctl_enque_job(port, job);
7028 break;
7029
7030 case FC_STATE_TARGET_PORT_RESET:
7031 (void) fp_ulp_notify(port, state, KM_NOSLEEP);
7032 /* FALLTHROUGH */
7033
7034 case FC_STATE_NAMESERVICE:
7035 /* FALLTHROUGH */
7036
7037 default:
7038 mutex_enter(&port->fp_mutex);
7039 if (--port->fp_statec_busy == 0) {
7040 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB;
7041 }
7042 mutex_exit(&port->fp_mutex);
7043 break;
7044 }
7045 }
7046
7047
7048 /*
7049 * Register with the Name Server for RSCNs
7050 */
7051 static int
fp_ns_scr(fc_local_port_t * port,job_request_t * job,uchar_t scr_func,int sleep)7052 fp_ns_scr(fc_local_port_t *port, job_request_t *job, uchar_t scr_func,
7053 int sleep)
7054 {
7055 uint32_t s_id;
7056 uchar_t class;
7057 fc_scr_req_t payload;
7058 fp_cmd_t *cmd;
7059 fc_packet_t *pkt;
7060
7061 mutex_enter(&port->fp_mutex);
7062 s_id = port->fp_port_id.port_id;
7063 class = port->fp_ns_login_class;
7064 mutex_exit(&port->fp_mutex);
7065
7066 cmd = fp_alloc_pkt(port, sizeof (fc_scr_req_t),
7067 sizeof (fc_scr_resp_t), sleep, NULL);
7068 if (cmd == NULL) {
7069 return (FC_NOMEM);
7070 }
7071
7072 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class;
7073 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE;
7074 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED;
7075 cmd->cmd_retry_count = fp_retry_count;
7076 cmd->cmd_ulp_pkt = NULL;
7077
7078 pkt = &cmd->cmd_pkt;
7079 cmd->cmd_transport = port->fp_fca_tran->fca_els_send;
7080
7081 fp_els_init(cmd, s_id, 0xFFFFFD, fp_intr, job);
7082
7083 payload.ls_code.ls_code = LA_ELS_SCR;
7084 payload.ls_code.mbz = 0;
7085 payload.scr_rsvd = 0;
7086 payload.scr_func = scr_func;
7087
7088 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload,
7089 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR);
7090
7091 job->job_counter = 1;
7092
7093 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) {
7094 fp_iodone(cmd);
7095 }
7096
7097 return (FC_SUCCESS);
7098 }
7099
7100
7101 /*
7102 * There are basically two methods to determine the total number of
7103 * devices out in the NS database; Reading the details of the two
7104 * methods described below, it shouldn't be hard to identify which
7105 * of the two methods is better.
7106 *
7107 * Method 1.
7108 * Iteratively issue GANs until all ports identifiers are walked
7109 *
7110 * Method 2.
7111 * Issue GID_PT (get port Identifiers) with Maximum residual
7112 * field in the request CT HEADER set to accommodate only the
7113 * CT HEADER in the response frame. And if FC-GS2 has been
7114 * carefully read, the NS here has a chance to FS_ACC the
7115 * request and indicate the residual size in the FS_ACC.
7116 *
7117 * Method 2 is wonderful, although it's not mandatory for the NS
7118 * to update the Maximum/Residual Field as can be seen in 4.3.1.6
7119 * (note with particular care the use of the auxiliary verb 'may')
7120 *
7121 */
7122 static int
fp_ns_get_devcount(fc_local_port_t * port,job_request_t * job,int create,int sleep)7123 fp_ns_get_devcount(fc_local_port_t *port, job_request_t *job, int create,
7124 int sleep)
7125 {
7126 int flags;
7127 int rval;
7128 uint32_t src_id;
7129 fctl_ns_req_t *ns_cmd;
7130
7131 ASSERT(!MUTEX_HELD(&port->fp_mutex));
7132
7133 mutex_enter(&port->fp_mutex);
7134 src_id = port->fp_port_id.port_id;
7135 mutex_exit(&port->fp_mutex);
7136
7137 if (!create && (port->fp_options & FP_NS_SMART_COUNT)) {
7138 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pt_t),
7139 sizeof (ns_resp_gid_pt_t), 0,
7140 (FCTL_NS_GET_DEV_COUNT | FCTL_NS_NO_DATA_BUF), sleep);
7141
7142 if (ns_cmd == NULL) {
7143 return (FC_NOMEM);
7144 }
7145
7146 ns_cmd->ns_cmd_code = NS_GID_PT;
7147 ((ns_req_gid_pt_t *)(ns_cmd->ns_cmd_buf))->port_type.port_type
7148 = FC_NS_PORT_NX; /* All port types */
7149 ((ns_req_gid_pt_t *)(ns_cmd->ns_cmd_buf))->port_type.rsvd = 0;
7150
7151 } else {
7152 uint32_t ns_flags;
7153
7154 ns_flags = FCTL_NS_GET_DEV_COUNT | FCTL_NS_NO_DATA_BUF;
7155 if (create) {
7156 ns_flags |= FCTL_NS_CREATE_DEVICE;
7157 }
7158 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t),
7159 sizeof (ns_resp_gan_t), sizeof (int), ns_flags, sleep);
7160
7161 if (ns_cmd == NULL) {
7162 return (FC_NOMEM);
7163 }
7164 ns_cmd->ns_gan_index = 0;
7165 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID;
7166 ns_cmd->ns_cmd_code = NS_GA_NXT;
7167 ns_cmd->ns_gan_max = 0xFFFF;
7168
7169 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.port_id = src_id;
7170 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.priv_lilp_posit = 0;
7171 }
7172
7173 flags = job->job_flags;
7174 job->job_flags &= ~JOB_TYPE_FP_ASYNC;
7175 job->job_counter = 1;
7176
7177 rval = fp_ns_query(port, ns_cmd, job, 1, sleep);
7178 job->job_flags = flags;
7179
7180 if (!create && (port->fp_options & FP_NS_SMART_COUNT)) {
7181 uint16_t max_resid;
7182
7183 /*
7184 * Revert to scanning the NS if NS_GID_PT isn't
7185 * helping us figure out total number of devices.
7186 */
7187 if (job->job_result != FC_SUCCESS ||
7188 ns_cmd->ns_resp_hdr.ct_cmdrsp != FS_ACC_IU) {
7189 mutex_enter(&port->fp_mutex);
7190 port->fp_options &= ~FP_NS_SMART_COUNT;
7191 mutex_exit(&port->fp_mutex);
7192
7193 fctl_free_ns_cmd(ns_cmd);
7194 return (fp_ns_get_devcount(port, job, create, sleep));
7195 }
7196
7197 mutex_enter(&port->fp_mutex);
7198 port->fp_total_devices = 1;
7199 max_resid = ns_cmd->ns_resp_hdr.ct_aiusize;
7200 if (max_resid) {
7201 /*
7202 * Since port identifier is 4 bytes and max_resid
7203 * is also in WORDS, max_resid simply indicates
7204 * the total number of port identifiers not
7205 * transferred
7206 */
7207 port->fp_total_devices += max_resid;
7208 }
7209 mutex_exit(&port->fp_mutex);
7210 }
7211 mutex_enter(&port->fp_mutex);
7212 port->fp_total_devices = *((int *)ns_cmd->ns_data_buf);
7213 mutex_exit(&port->fp_mutex);
7214 fctl_free_ns_cmd(ns_cmd);
7215
7216 return (rval);
7217 }
7218
7219 /*
7220 * One heck of a function to serve userland.
7221 */
7222 static int
fp_fciocmd(fc_local_port_t * port,intptr_t data,int mode,fcio_t * fcio)7223 fp_fciocmd(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio)
7224 {
7225 int rval = 0;
7226 int jcode;
7227 uint32_t ret;
7228 uchar_t open_flag;
7229 fcio_t *kfcio;
7230 job_request_t *job;
7231 boolean_t use32 = B_FALSE;
7232
7233 #ifdef _MULTI_DATAMODEL
7234 switch (ddi_model_convert_from(mode & FMODELS)) {
7235 case DDI_MODEL_ILP32:
7236 use32 = B_TRUE;
7237 break;
7238
7239 case DDI_MODEL_NONE:
7240 default:
7241 break;
7242 }
7243 #endif
7244
7245 mutex_enter(&port->fp_mutex);
7246 if (port->fp_soft_state & (FP_SOFT_IN_STATEC_CB |
7247 FP_SOFT_IN_UNSOL_CB)) {
7248 fcio->fcio_errno = FC_STATEC_BUSY;
7249 mutex_exit(&port->fp_mutex);
7250 rval = EAGAIN;
7251 if (fp_fcio_copyout(fcio, data, mode)) {
7252 rval = EFAULT;
7253 }
7254 return (rval);
7255 }
7256 open_flag = port->fp_flag;
7257 mutex_exit(&port->fp_mutex);
7258
7259 if (fp_check_perms(open_flag, fcio->fcio_cmd) != FC_SUCCESS) {
7260 fcio->fcio_errno = FC_FAILURE;
7261 rval = EACCES;
7262 if (fp_fcio_copyout(fcio, data, mode)) {
7263 rval = EFAULT;
7264 }
7265 return (rval);
7266 }
7267
7268 /*
7269 * If an exclusive open was demanded during open, don't let
7270 * either innocuous or devil threads to share the file
7271 * descriptor and fire down exclusive access commands
7272 */
7273 mutex_enter(&port->fp_mutex);
7274 if (port->fp_flag & FP_EXCL) {
7275 if (port->fp_flag & FP_EXCL_BUSY) {
7276 mutex_exit(&port->fp_mutex);
7277 fcio->fcio_errno = FC_FAILURE;
7278 return (EBUSY);
7279 }
7280 port->fp_flag |= FP_EXCL_BUSY;
7281 }
7282 mutex_exit(&port->fp_mutex);
7283
7284 fcio->fcio_errno = FC_SUCCESS;
7285
7286 switch (fcio->fcio_cmd) {
7287 case FCIO_GET_HOST_PARAMS: {
7288 fc_port_dev_t *val;
7289 fc_port_dev32_t *val32;
7290 int index;
7291 int lilp_device_count;
7292 fc_lilpmap_t *lilp_map;
7293 uchar_t *alpa_list;
7294
7295 if (use32 == B_TRUE) {
7296 if (fcio->fcio_olen != sizeof (*val32) ||
7297 fcio->fcio_xfer != FCIO_XFER_READ) {
7298 rval = EINVAL;
7299 break;
7300 }
7301 } else {
7302 if (fcio->fcio_olen != sizeof (*val) ||
7303 fcio->fcio_xfer != FCIO_XFER_READ) {
7304 rval = EINVAL;
7305 break;
7306 }
7307 }
7308
7309 val = kmem_zalloc(sizeof (*val), KM_SLEEP);
7310
7311 mutex_enter(&port->fp_mutex);
7312 val->dev_did = port->fp_port_id;
7313 val->dev_hard_addr = port->fp_hard_addr;
7314 val->dev_pwwn = port->fp_service_params.nport_ww_name;
7315 val->dev_nwwn = port->fp_service_params.node_ww_name;
7316 val->dev_state = port->fp_state;
7317
7318 lilp_map = &port->fp_lilp_map;
7319 alpa_list = &lilp_map->lilp_alpalist[0];
7320 lilp_device_count = lilp_map->lilp_length;
7321 for (index = 0; index < lilp_device_count; index++) {
7322 uint32_t d_id;
7323
7324 d_id = alpa_list[index];
7325 if (d_id == port->fp_port_id.port_id) {
7326 break;
7327 }
7328 }
7329 val->dev_did.priv_lilp_posit = (uint8_t)(index & 0xff);
7330
7331 bcopy(port->fp_fc4_types, val->dev_type,
7332 sizeof (port->fp_fc4_types));
7333 mutex_exit(&port->fp_mutex);
7334
7335 if (use32 == B_TRUE) {
7336 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP);
7337
7338 val32->dev_did = val->dev_did;
7339 val32->dev_hard_addr = val->dev_hard_addr;
7340 val32->dev_pwwn = val->dev_pwwn;
7341 val32->dev_nwwn = val->dev_nwwn;
7342 val32->dev_state = val->dev_state;
7343 val32->dev_did.priv_lilp_posit =
7344 val->dev_did.priv_lilp_posit;
7345
7346 bcopy(val->dev_type, val32->dev_type,
7347 sizeof (port->fp_fc4_types));
7348
7349 if (fp_copyout((void *)val32, (void *)fcio->fcio_obuf,
7350 fcio->fcio_olen, mode) == 0) {
7351 if (fp_fcio_copyout(fcio, data, mode)) {
7352 rval = EFAULT;
7353 }
7354 } else {
7355 rval = EFAULT;
7356 }
7357
7358 kmem_free(val32, sizeof (*val32));
7359 } else {
7360 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf,
7361 fcio->fcio_olen, mode) == 0) {
7362 if (fp_fcio_copyout(fcio, data, mode)) {
7363 rval = EFAULT;
7364 }
7365 } else {
7366 rval = EFAULT;
7367 }
7368 }
7369
7370 /* need to free "val" here */
7371 kmem_free(val, sizeof (*val));
7372 break;
7373 }
7374
7375 case FCIO_GET_OTHER_ADAPTER_PORTS: {
7376 uint32_t index;
7377 char *tmpPath;
7378 fc_local_port_t *tmpPort;
7379
7380 if (fcio->fcio_olen < MAXPATHLEN ||
7381 fcio->fcio_ilen != sizeof (uint32_t)) {
7382 rval = EINVAL;
7383 break;
7384 }
7385 if (ddi_copyin(fcio->fcio_ibuf, &index, sizeof (index), mode)) {
7386 rval = EFAULT;
7387 break;
7388 }
7389
7390 tmpPort = fctl_get_adapter_port_by_index(port, index);
7391 if (tmpPort == NULL) {
7392 FP_TRACE(FP_NHEAD1(9, 0),
7393 "User supplied index out of range");
7394 fcio->fcio_errno = FC_BADPORT;
7395 rval = EFAULT;
7396 if (fp_fcio_copyout(fcio, data, mode)) {
7397 rval = EFAULT;
7398 }
7399 break;
7400 }
7401
7402 tmpPath = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
7403 (void) ddi_pathname(tmpPort->fp_port_dip, tmpPath);
7404 if (fp_copyout((void *)tmpPath, (void *)fcio->fcio_obuf,
7405 MAXPATHLEN, mode) == 0) {
7406 if (fp_fcio_copyout(fcio, data, mode)) {
7407 rval = EFAULT;
7408 }
7409 } else {
7410 rval = EFAULT;
7411 }
7412 kmem_free(tmpPath, MAXPATHLEN);
7413 break;
7414 }
7415
7416 case FCIO_NPIV_GET_ADAPTER_ATTRIBUTES:
7417 case FCIO_GET_ADAPTER_ATTRIBUTES: {
7418 fc_hba_adapter_attributes_t *val;
7419 fc_hba_adapter_attributes32_t *val32;
7420
7421 if (use32 == B_TRUE) {
7422 if (fcio->fcio_olen < sizeof (*val32) ||
7423 fcio->fcio_xfer != FCIO_XFER_READ) {
7424 rval = EINVAL;
7425 break;
7426 }
7427 } else {
7428 if (fcio->fcio_olen < sizeof (*val) ||
7429 fcio->fcio_xfer != FCIO_XFER_READ) {
7430 rval = EINVAL;
7431 break;
7432 }
7433 }
7434
7435 val = kmem_zalloc(sizeof (*val), KM_SLEEP);
7436 val->version = FC_HBA_ADAPTER_ATTRIBUTES_VERSION;
7437 mutex_enter(&port->fp_mutex);
7438 bcopy(port->fp_hba_port_attrs.manufacturer,
7439 val->Manufacturer,
7440 sizeof (val->Manufacturer));
7441 bcopy(port->fp_hba_port_attrs.serial_number,
7442 val->SerialNumber,
7443 sizeof (val->SerialNumber));
7444 bcopy(port->fp_hba_port_attrs.model,
7445 val->Model,
7446 sizeof (val->Model));
7447 bcopy(port->fp_hba_port_attrs.model_description,
7448 val->ModelDescription,
7449 sizeof (val->ModelDescription));
7450 bcopy(port->fp_sym_node_name, val->NodeSymbolicName,
7451 port->fp_sym_node_namelen);
7452 bcopy(port->fp_hba_port_attrs.hardware_version,
7453 val->HardwareVersion,
7454 sizeof (val->HardwareVersion));
7455 bcopy(port->fp_hba_port_attrs.option_rom_version,
7456 val->OptionROMVersion,
7457 sizeof (val->OptionROMVersion));
7458 bcopy(port->fp_hba_port_attrs.firmware_version,
7459 val->FirmwareVersion,
7460 sizeof (val->FirmwareVersion));
7461 val->VendorSpecificID =
7462 port->fp_hba_port_attrs.vendor_specific_id;
7463 bcopy(&port->fp_service_params.node_ww_name.raw_wwn,
7464 &val->NodeWWN.raw_wwn,
7465 sizeof (val->NodeWWN.raw_wwn));
7466
7467
7468 bcopy(port->fp_hba_port_attrs.driver_name,
7469 val->DriverName,
7470 sizeof (val->DriverName));
7471 bcopy(port->fp_hba_port_attrs.driver_version,
7472 val->DriverVersion,
7473 sizeof (val->DriverVersion));
7474 mutex_exit(&port->fp_mutex);
7475
7476 if (fcio->fcio_cmd == FCIO_GET_ADAPTER_ATTRIBUTES) {
7477 val->NumberOfPorts = fctl_count_fru_ports(port, 0);
7478 } else {
7479 val->NumberOfPorts = fctl_count_fru_ports(port, 1);
7480 }
7481
7482 if (use32 == B_TRUE) {
7483 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP);
7484 val32->version = val->version;
7485 bcopy(val->Manufacturer, val32->Manufacturer,
7486 sizeof (val->Manufacturer));
7487 bcopy(val->SerialNumber, val32->SerialNumber,
7488 sizeof (val->SerialNumber));
7489 bcopy(val->Model, val32->Model,
7490 sizeof (val->Model));
7491 bcopy(val->ModelDescription, val32->ModelDescription,
7492 sizeof (val->ModelDescription));
7493 bcopy(val->NodeSymbolicName, val32->NodeSymbolicName,
7494 sizeof (val->NodeSymbolicName));
7495 bcopy(val->HardwareVersion, val32->HardwareVersion,
7496 sizeof (val->HardwareVersion));
7497 bcopy(val->OptionROMVersion, val32->OptionROMVersion,
7498 sizeof (val->OptionROMVersion));
7499 bcopy(val->FirmwareVersion, val32->FirmwareVersion,
7500 sizeof (val->FirmwareVersion));
7501 val32->VendorSpecificID = val->VendorSpecificID;
7502 bcopy(&val->NodeWWN.raw_wwn, &val32->NodeWWN.raw_wwn,
7503 sizeof (val->NodeWWN.raw_wwn));
7504 bcopy(val->DriverName, val32->DriverName,
7505 sizeof (val->DriverName));
7506 bcopy(val->DriverVersion, val32->DriverVersion,
7507 sizeof (val->DriverVersion));
7508
7509 val32->NumberOfPorts = val->NumberOfPorts;
7510
7511 if (fp_copyout((void *)val32, (void *)fcio->fcio_obuf,
7512 fcio->fcio_olen, mode) == 0) {
7513 if (fp_fcio_copyout(fcio, data, mode)) {
7514 rval = EFAULT;
7515 }
7516 } else {
7517 rval = EFAULT;
7518 }
7519
7520 kmem_free(val32, sizeof (*val32));
7521 } else {
7522 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf,
7523 fcio->fcio_olen, mode) == 0) {
7524 if (fp_fcio_copyout(fcio, data, mode)) {
7525 rval = EFAULT;
7526 }
7527 } else {
7528 rval = EFAULT;
7529 }
7530 }
7531
7532 kmem_free(val, sizeof (*val));
7533 break;
7534 }
7535
7536 case FCIO_GET_NPIV_ATTRIBUTES: {
7537 fc_hba_npiv_attributes_t *attrs;
7538
7539 attrs = kmem_zalloc(sizeof (*attrs), KM_SLEEP);
7540 mutex_enter(&port->fp_mutex);
7541 bcopy(&port->fp_service_params.node_ww_name.raw_wwn,
7542 &attrs->NodeWWN.raw_wwn,
7543 sizeof (attrs->NodeWWN.raw_wwn));
7544 bcopy(&port->fp_service_params.nport_ww_name.raw_wwn,
7545 &attrs->PortWWN.raw_wwn,
7546 sizeof (attrs->PortWWN.raw_wwn));
7547 mutex_exit(&port->fp_mutex);
7548 if (fp_copyout((void *)attrs, (void *)fcio->fcio_obuf,
7549 fcio->fcio_olen, mode) == 0) {
7550 if (fp_fcio_copyout(fcio, data, mode)) {
7551 rval = EFAULT;
7552 }
7553 } else {
7554 rval = EFAULT;
7555 }
7556 kmem_free(attrs, sizeof (*attrs));
7557 break;
7558 }
7559
7560 case FCIO_DELETE_NPIV_PORT: {
7561 fc_local_port_t *tmpport;
7562 char ww_pname[17];
7563 la_wwn_t vwwn[1];
7564
7565 FP_TRACE(FP_NHEAD1(1, 0), "Delete NPIV Port");
7566 if (ddi_copyin(fcio->fcio_ibuf,
7567 &vwwn, sizeof (la_wwn_t), mode)) {
7568 rval = EFAULT;
7569 break;
7570 }
7571
7572 fc_wwn_to_str(&vwwn[0], ww_pname);
7573 FP_TRACE(FP_NHEAD1(3, 0),
7574 "Delete NPIV Port %s", ww_pname);
7575 tmpport = fc_delete_npiv_port(port, &vwwn[0]);
7576 if (tmpport == NULL) {
7577 FP_TRACE(FP_NHEAD1(3, 0),
7578 "Delete NPIV Port : no found");
7579 rval = EFAULT;
7580 } else {
7581 fc_local_port_t *nextport = tmpport->fp_port_next;
7582 fc_local_port_t *prevport = tmpport->fp_port_prev;
7583 int portlen, portindex, ret;
7584
7585 portlen = sizeof (portindex);
7586 ret = ddi_prop_op(DDI_DEV_T_ANY,
7587 tmpport->fp_port_dip, PROP_LEN_AND_VAL_BUF,
7588 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "port",
7589 (caddr_t)&portindex, &portlen);
7590 if (ret != DDI_SUCCESS) {
7591 rval = EFAULT;
7592 break;
7593 }
7594 if (ndi_devi_offline(tmpport->fp_port_dip,
7595 NDI_DEVI_REMOVE) != DDI_SUCCESS) {
7596 FP_TRACE(FP_NHEAD1(1, 0),
7597 "Delete NPIV Port failed");
7598 mutex_enter(&port->fp_mutex);
7599 tmpport->fp_npiv_state = 0;
7600 mutex_exit(&port->fp_mutex);
7601 rval = EFAULT;
7602 } else {
7603 mutex_enter(&port->fp_mutex);
7604 nextport->fp_port_prev = prevport;
7605 prevport->fp_port_next = nextport;
7606 if (port == port->fp_port_next) {
7607 port->fp_port_next =
7608 port->fp_port_prev = NULL;
7609 }
7610 port->fp_npiv_portnum--;
7611 FP_TRACE(FP_NHEAD1(3, 0),
7612 "Delete NPIV Port %d", portindex);
7613 port->fp_npiv_portindex[portindex-1] = 0;
7614 mutex_exit(&port->fp_mutex);
7615 }
7616 }
7617 break;
7618 }
7619
7620 case FCIO_CREATE_NPIV_PORT: {
7621 char ww_nname[17], ww_pname[17];
7622 la_npiv_create_entry_t entrybuf;
7623 uint32_t vportindex = 0;
7624 int npiv_ret = 0;
7625 char *portname, *fcaname;
7626
7627 portname = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
7628 (void) ddi_pathname(port->fp_port_dip, portname);
7629 fcaname = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
7630 (void) ddi_pathname(port->fp_fca_dip, fcaname);
7631 FP_TRACE(FP_NHEAD1(1, 0),
7632 "Create NPIV port %s %s %s", portname, fcaname,
7633 ddi_driver_name(port->fp_fca_dip));
7634 kmem_free(portname, MAXPATHLEN);
7635 kmem_free(fcaname, MAXPATHLEN);
7636 if (ddi_copyin(fcio->fcio_ibuf,
7637 &entrybuf, sizeof (la_npiv_create_entry_t), mode)) {
7638 rval = EFAULT;
7639 break;
7640 }
7641
7642 fc_wwn_to_str(&entrybuf.VNodeWWN, ww_nname);
7643 fc_wwn_to_str(&entrybuf.VPortWWN, ww_pname);
7644 vportindex = entrybuf.vindex;
7645 FP_TRACE(FP_NHEAD1(3, 0),
7646 "Create NPIV Port %s %s %d",
7647 ww_nname, ww_pname, vportindex);
7648
7649 if (fc_get_npiv_port(port, &entrybuf.VPortWWN)) {
7650 rval = EFAULT;
7651 break;
7652 }
7653 npiv_ret = fctl_fca_create_npivport(port->fp_fca_dip,
7654 port->fp_port_dip, ww_nname, ww_pname, &vportindex);
7655 if (npiv_ret == NDI_SUCCESS) {
7656 mutex_enter(&port->fp_mutex);
7657 port->fp_npiv_portnum++;
7658 mutex_exit(&port->fp_mutex);
7659 if (fp_copyout((void *)&vportindex,
7660 (void *)fcio->fcio_obuf,
7661 fcio->fcio_olen, mode) == 0) {
7662 if (fp_fcio_copyout(fcio, data, mode)) {
7663 rval = EFAULT;
7664 }
7665 } else {
7666 rval = EFAULT;
7667 }
7668 } else {
7669 rval = EFAULT;
7670 }
7671 FP_TRACE(FP_NHEAD1(3, 0),
7672 "Create NPIV Port %d %d", npiv_ret, vportindex);
7673 break;
7674 }
7675
7676 case FCIO_GET_NPIV_PORT_LIST: {
7677 fc_hba_npiv_port_list_t *list;
7678 int count;
7679
7680 if ((fcio->fcio_xfer != FCIO_XFER_READ) ||
7681 (fcio->fcio_olen == 0) || (fcio->fcio_obuf == 0)) {
7682 rval = EINVAL;
7683 break;
7684 }
7685
7686 list = kmem_zalloc(fcio->fcio_olen, KM_SLEEP);
7687 list->version = FC_HBA_LIST_VERSION;
7688
7689 count = (fcio->fcio_olen -
7690 (int)sizeof (fc_hba_npiv_port_list_t))/MAXPATHLEN + 1;
7691 if (port->fp_npiv_portnum > count) {
7692 list->numAdapters = port->fp_npiv_portnum;
7693 } else {
7694 /* build npiv port list */
7695 count = fc_ulp_get_npiv_port_list(port,
7696 (char *)list->hbaPaths);
7697 if (count < 0) {
7698 rval = ENXIO;
7699 FP_TRACE(FP_NHEAD1(1, 0),
7700 "Build NPIV Port List error");
7701 kmem_free(list, fcio->fcio_olen);
7702 break;
7703 }
7704 list->numAdapters = count;
7705 }
7706
7707 if (fp_copyout((void *)list, (void *)fcio->fcio_obuf,
7708 fcio->fcio_olen, mode) == 0) {
7709 if (fp_fcio_copyout(fcio, data, mode)) {
7710 FP_TRACE(FP_NHEAD1(1, 0),
7711 "Copy NPIV Port data error");
7712 rval = EFAULT;
7713 }
7714 } else {
7715 FP_TRACE(FP_NHEAD1(1, 0), "Copy NPIV Port List error");
7716 rval = EFAULT;
7717 }
7718 kmem_free(list, fcio->fcio_olen);
7719 break;
7720 }
7721
7722 case FCIO_GET_ADAPTER_PORT_NPIV_ATTRIBUTES: {
7723 fc_hba_port_npiv_attributes_t *val;
7724
7725 val = kmem_zalloc(sizeof (*val), KM_SLEEP);
7726 val->version = FC_HBA_PORT_NPIV_ATTRIBUTES_VERSION;
7727
7728 mutex_enter(&port->fp_mutex);
7729 val->npivflag = port->fp_npiv_flag;
7730 val->lastChange = port->fp_last_change;
7731 bcopy(&port->fp_service_params.nport_ww_name.raw_wwn,
7732 &val->PortWWN.raw_wwn,
7733 sizeof (val->PortWWN.raw_wwn));
7734 bcopy(&port->fp_service_params.node_ww_name.raw_wwn,
7735 &val->NodeWWN.raw_wwn,
7736 sizeof (val->NodeWWN.raw_wwn));
7737 mutex_exit(&port->fp_mutex);
7738
7739 val->NumberOfNPIVPorts = fc_ulp_get_npiv_port_num(port);
7740 if (port->fp_npiv_type != FC_NPIV_PORT) {
7741 val->MaxNumberOfNPIVPorts =
7742 port->fp_fca_tran->fca_num_npivports;
7743 } else {
7744 val->MaxNumberOfNPIVPorts = 0;
7745 }
7746
7747 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf,
7748 fcio->fcio_olen, mode) == 0) {
7749 if (fp_fcio_copyout(fcio, data, mode)) {
7750 rval = EFAULT;
7751 }
7752 } else {
7753 rval = EFAULT;
7754 }
7755 kmem_free(val, sizeof (*val));
7756 break;
7757 }
7758
7759 case FCIO_GET_ADAPTER_PORT_ATTRIBUTES: {
7760 fc_hba_port_attributes_t *val;
7761 fc_hba_port_attributes32_t *val32;
7762
7763 if (use32 == B_TRUE) {
7764 if (fcio->fcio_olen < sizeof (*val32) ||
7765 fcio->fcio_xfer != FCIO_XFER_READ) {
7766 rval = EINVAL;
7767 break;
7768 }
7769 } else {
7770 if (fcio->fcio_olen < sizeof (*val) ||
7771 fcio->fcio_xfer != FCIO_XFER_READ) {
7772 rval = EINVAL;
7773 break;
7774 }
7775 }
7776
7777 val = kmem_zalloc(sizeof (*val), KM_SLEEP);
7778 val->version = FC_HBA_PORT_ATTRIBUTES_VERSION;
7779 mutex_enter(&port->fp_mutex);
7780 val->lastChange = port->fp_last_change;
7781 val->fp_minor = port->fp_instance;
7782
7783 bcopy(&port->fp_service_params.nport_ww_name.raw_wwn,
7784 &val->PortWWN.raw_wwn,
7785 sizeof (val->PortWWN.raw_wwn));
7786 bcopy(&port->fp_service_params.node_ww_name.raw_wwn,
7787 &val->NodeWWN.raw_wwn,
7788 sizeof (val->NodeWWN.raw_wwn));
7789 bcopy(&port->fp_fabric_name, &val->FabricName.raw_wwn,
7790 sizeof (val->FabricName.raw_wwn));
7791
7792 val->PortFcId = port->fp_port_id.port_id;
7793
7794 switch (FC_PORT_STATE_MASK(port->fp_state)) {
7795 case FC_STATE_OFFLINE:
7796 val->PortState = FC_HBA_PORTSTATE_OFFLINE;
7797 break;
7798 case FC_STATE_ONLINE:
7799 case FC_STATE_LOOP:
7800 case FC_STATE_NAMESERVICE:
7801 val->PortState = FC_HBA_PORTSTATE_ONLINE;
7802 break;
7803 default:
7804 val->PortState = FC_HBA_PORTSTATE_UNKNOWN;
7805 break;
7806 }
7807
7808 /* Translate from LV to FC-HBA port type codes */
7809 switch (port->fp_port_type.port_type) {
7810 case FC_NS_PORT_N:
7811 val->PortType = FC_HBA_PORTTYPE_NPORT;
7812 break;
7813 case FC_NS_PORT_NL:
7814 /* Actually means loop for us */
7815 val->PortType = FC_HBA_PORTTYPE_LPORT;
7816 break;
7817 case FC_NS_PORT_F:
7818 val->PortType = FC_HBA_PORTTYPE_FPORT;
7819 break;
7820 case FC_NS_PORT_FL:
7821 val->PortType = FC_HBA_PORTTYPE_FLPORT;
7822 break;
7823 case FC_NS_PORT_E:
7824 val->PortType = FC_HBA_PORTTYPE_EPORT;
7825 break;
7826 default:
7827 val->PortType = FC_HBA_PORTTYPE_OTHER;
7828 break;
7829 }
7830
7831
7832 /*
7833 * If fp has decided that the topology is public loop,
7834 * we will indicate that using the appropriate
7835 * FC HBA API constant.
7836 */
7837 switch (port->fp_topology) {
7838 case FC_TOP_PUBLIC_LOOP:
7839 val->PortType = FC_HBA_PORTTYPE_NLPORT;
7840 break;
7841
7842 case FC_TOP_PT_PT:
7843 val->PortType = FC_HBA_PORTTYPE_PTP;
7844 break;
7845
7846 case FC_TOP_UNKNOWN:
7847 /*
7848 * This should cover the case where nothing is connected
7849 * to the port. Crystal+ is p'bly an exception here.
7850 * For Crystal+, port 0 will come up as private loop
7851 * (i.e fp_bind_state will be FC_STATE_LOOP) even when
7852 * nothing is connected to it.
7853 * Current plan is to let userland handle this.
7854 */
7855 if (port->fp_bind_state == FC_STATE_OFFLINE) {
7856 val->PortType = FC_HBA_PORTTYPE_UNKNOWN;
7857 }
7858 break;
7859
7860 default:
7861 /*
7862 * Do Nothing.
7863 * Unused:
7864 * val->PortType = FC_HBA_PORTTYPE_GPORT;
7865 */
7866 break;
7867 }
7868
7869 val->PortSupportedClassofService =
7870 port->fp_hba_port_attrs.supported_cos;
7871 val->PortSupportedFc4Types[0] = 0;
7872 bcopy(port->fp_fc4_types, val->PortActiveFc4Types,
7873 sizeof (val->PortActiveFc4Types));
7874 bcopy(port->fp_sym_port_name, val->PortSymbolicName,
7875 port->fp_sym_port_namelen);
7876 val->PortSupportedSpeed =
7877 port->fp_hba_port_attrs.supported_speed;
7878
7879 switch (FC_PORT_SPEED_MASK(port->fp_state)) {
7880 case FC_STATE_1GBIT_SPEED:
7881 val->PortSpeed = FC_HBA_PORTSPEED_1GBIT;
7882 break;
7883 case FC_STATE_2GBIT_SPEED:
7884 val->PortSpeed = FC_HBA_PORTSPEED_2GBIT;
7885 break;
7886 case FC_STATE_4GBIT_SPEED:
7887 val->PortSpeed = FC_HBA_PORTSPEED_4GBIT;
7888 break;
7889 case FC_STATE_8GBIT_SPEED:
7890 val->PortSpeed = FC_HBA_PORTSPEED_8GBIT;
7891 break;
7892 case FC_STATE_10GBIT_SPEED:
7893 val->PortSpeed = FC_HBA_PORTSPEED_10GBIT;
7894 break;
7895 case FC_STATE_16GBIT_SPEED:
7896 val->PortSpeed = FC_HBA_PORTSPEED_16GBIT;
7897 break;
7898 default:
7899 val->PortSpeed = FC_HBA_PORTSPEED_UNKNOWN;
7900 break;
7901 }
7902 val->PortMaxFrameSize = port->fp_hba_port_attrs.max_frame_size;
7903 val->NumberofDiscoveredPorts = port->fp_dev_count;
7904 mutex_exit(&port->fp_mutex);
7905
7906 if (use32 == B_TRUE) {
7907 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP);
7908 val32->version = val->version;
7909 val32->lastChange = val->lastChange;
7910 val32->fp_minor = val->fp_minor;
7911
7912 bcopy(&val->PortWWN.raw_wwn, &val32->PortWWN.raw_wwn,
7913 sizeof (val->PortWWN.raw_wwn));
7914 bcopy(&val->NodeWWN.raw_wwn, &val32->NodeWWN.raw_wwn,
7915 sizeof (val->NodeWWN.raw_wwn));
7916 val32->PortFcId = val->PortFcId;
7917 val32->PortState = val->PortState;
7918 val32->PortType = val->PortType;
7919
7920 val32->PortSupportedClassofService =
7921 val->PortSupportedClassofService;
7922 bcopy(val->PortActiveFc4Types,
7923 val32->PortActiveFc4Types,
7924 sizeof (val->PortActiveFc4Types));
7925 bcopy(val->PortSymbolicName, val32->PortSymbolicName,
7926 sizeof (val->PortSymbolicName));
7927 bcopy(&val->FabricName, &val32->FabricName,
7928 sizeof (val->FabricName.raw_wwn));
7929 val32->PortSupportedSpeed = val->PortSupportedSpeed;
7930 val32->PortSpeed = val->PortSpeed;
7931
7932 val32->PortMaxFrameSize = val->PortMaxFrameSize;
7933 val32->NumberofDiscoveredPorts =
7934 val->NumberofDiscoveredPorts;
7935
7936 if (fp_copyout((void *)val32, (void *)fcio->fcio_obuf,
7937 fcio->fcio_olen, mode) == 0) {
7938 if (fp_fcio_copyout(fcio, data, mode)) {
7939 rval = EFAULT;
7940 }
7941 } else {
7942 rval = EFAULT;
7943 }
7944
7945 kmem_free(val32, sizeof (*val32));
7946 } else {
7947 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf,
7948 fcio->fcio_olen, mode) == 0) {
7949 if (fp_fcio_copyout(fcio, data, mode)) {
7950 rval = EFAULT;
7951 }
7952 } else {
7953 rval = EFAULT;
7954 }
7955 }
7956
7957 kmem_free(val, sizeof (*val));
7958 break;
7959 }
7960
7961 case FCIO_GET_DISCOVERED_PORT_ATTRIBUTES: {
7962 fc_hba_port_attributes_t *val;
7963 fc_hba_port_attributes32_t *val32;
7964 uint32_t index = 0;
7965 fc_remote_port_t *tmp_pd;
7966
7967 if (use32 == B_TRUE) {
7968 if (fcio->fcio_olen < sizeof (*val32) ||
7969 fcio->fcio_xfer != FCIO_XFER_READ) {
7970 rval = EINVAL;
7971 break;
7972 }
7973 } else {
7974 if (fcio->fcio_olen < sizeof (*val) ||
7975 fcio->fcio_xfer != FCIO_XFER_READ) {
7976 rval = EINVAL;
7977 break;
7978 }
7979 }
7980
7981 if (ddi_copyin(fcio->fcio_ibuf, &index, sizeof (index), mode)) {
7982 rval = EFAULT;
7983 break;
7984 }
7985
7986 if (index >= port->fp_dev_count) {
7987 FP_TRACE(FP_NHEAD1(9, 0),
7988 "User supplied index out of range");
7989 fcio->fcio_errno = FC_OUTOFBOUNDS;
7990 rval = EINVAL;
7991 if (fp_fcio_copyout(fcio, data, mode)) {
7992 rval = EFAULT;
7993 }
7994 break;
7995 }
7996
7997 val = kmem_zalloc(sizeof (*val), KM_SLEEP);
7998 val->version = FC_HBA_PORT_ATTRIBUTES_VERSION;
7999
8000 mutex_enter(&port->fp_mutex);
8001 tmp_pd = fctl_lookup_pd_by_index(port, index);
8002
8003 if (tmp_pd == NULL) {
8004 fcio->fcio_errno = FC_BADPORT;
8005 rval = EINVAL;
8006 } else {
8007 val->lastChange = port->fp_last_change;
8008 val->fp_minor = port->fp_instance;
8009
8010 mutex_enter(&tmp_pd->pd_mutex);
8011 bcopy(&tmp_pd->pd_port_name.raw_wwn,
8012 &val->PortWWN.raw_wwn,
8013 sizeof (val->PortWWN.raw_wwn));
8014 bcopy(&tmp_pd->pd_remote_nodep->fd_node_name.raw_wwn,
8015 &val->NodeWWN.raw_wwn,
8016 sizeof (val->NodeWWN.raw_wwn));
8017 val->PortFcId = tmp_pd->pd_port_id.port_id;
8018 bcopy(tmp_pd->pd_spn, val->PortSymbolicName,
8019 tmp_pd->pd_spn_len);
8020 val->PortSupportedClassofService = tmp_pd->pd_cos;
8021 /*
8022 * we will assume the sizeof these pd_fc4types and
8023 * portActiveFc4Types will remain the same. we could
8024 * add in a check for it, but we decided it was unneeded
8025 */
8026 bcopy((caddr_t)tmp_pd->pd_fc4types,
8027 val->PortActiveFc4Types,
8028 sizeof (tmp_pd->pd_fc4types));
8029 val->PortState =
8030 fp_map_remote_port_state(tmp_pd->pd_state);
8031 mutex_exit(&tmp_pd->pd_mutex);
8032
8033 val->PortType = FC_HBA_PORTTYPE_UNKNOWN;
8034 val->PortSupportedFc4Types[0] = 0;
8035 val->PortSupportedSpeed = FC_HBA_PORTSPEED_UNKNOWN;
8036 val->PortSpeed = FC_HBA_PORTSPEED_UNKNOWN;
8037 val->PortMaxFrameSize = 0;
8038 val->NumberofDiscoveredPorts = 0;
8039
8040 if (use32 == B_TRUE) {
8041 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP);
8042 val32->version = val->version;
8043 val32->lastChange = val->lastChange;
8044 val32->fp_minor = val->fp_minor;
8045
8046 bcopy(&val->PortWWN.raw_wwn,
8047 &val32->PortWWN.raw_wwn,
8048 sizeof (val->PortWWN.raw_wwn));
8049 bcopy(&val->NodeWWN.raw_wwn,
8050 &val32->NodeWWN.raw_wwn,
8051 sizeof (val->NodeWWN.raw_wwn));
8052 val32->PortFcId = val->PortFcId;
8053 bcopy(val->PortSymbolicName,
8054 val32->PortSymbolicName,
8055 sizeof (val->PortSymbolicName));
8056 val32->PortSupportedClassofService =
8057 val->PortSupportedClassofService;
8058 bcopy(val->PortActiveFc4Types,
8059 val32->PortActiveFc4Types,
8060 sizeof (tmp_pd->pd_fc4types));
8061
8062 val32->PortType = val->PortType;
8063 val32->PortState = val->PortState;
8064 val32->PortSupportedFc4Types[0] =
8065 val->PortSupportedFc4Types[0];
8066 val32->PortSupportedSpeed =
8067 val->PortSupportedSpeed;
8068 val32->PortSpeed = val->PortSpeed;
8069 val32->PortMaxFrameSize =
8070 val->PortMaxFrameSize;
8071 val32->NumberofDiscoveredPorts =
8072 val->NumberofDiscoveredPorts;
8073
8074 if (fp_copyout((void *)val32,
8075 (void *)fcio->fcio_obuf,
8076 fcio->fcio_olen, mode) == 0) {
8077 if (fp_fcio_copyout(fcio,
8078 data, mode)) {
8079 rval = EFAULT;
8080 }
8081 } else {
8082 rval = EFAULT;
8083 }
8084
8085 kmem_free(val32, sizeof (*val32));
8086 } else {
8087 if (fp_copyout((void *)val,
8088 (void *)fcio->fcio_obuf,
8089 fcio->fcio_olen, mode) == 0) {
8090 if (fp_fcio_copyout(fcio, data, mode)) {
8091 rval = EFAULT;
8092 }
8093 } else {
8094 rval = EFAULT;
8095 }
8096 }
8097 }
8098
8099 mutex_exit(&port->fp_mutex);
8100 kmem_free(val, sizeof (*val));
8101 break;
8102 }
8103
8104 case FCIO_GET_PORT_ATTRIBUTES: {
8105 fc_hba_port_attributes_t *val;
8106 fc_hba_port_attributes32_t *val32;
8107 la_wwn_t wwn;
8108 fc_remote_port_t *tmp_pd;
8109
8110 if (use32 == B_TRUE) {
8111 if (fcio->fcio_olen < sizeof (*val32) ||
8112 fcio->fcio_xfer != FCIO_XFER_READ) {
8113 rval = EINVAL;
8114 break;
8115 }
8116 } else {
8117 if (fcio->fcio_olen < sizeof (*val) ||
8118 fcio->fcio_xfer != FCIO_XFER_READ) {
8119 rval = EINVAL;
8120 break;
8121 }
8122 }
8123
8124 if (ddi_copyin(fcio->fcio_ibuf, &wwn, sizeof (wwn), mode)) {
8125 rval = EFAULT;
8126 break;
8127 }
8128
8129 val = kmem_zalloc(sizeof (*val), KM_SLEEP);
8130 val->version = FC_HBA_PORT_ATTRIBUTES_VERSION;
8131
8132 mutex_enter(&port->fp_mutex);
8133 tmp_pd = fctl_lookup_pd_by_wwn(port, wwn);
8134 val->lastChange = port->fp_last_change;
8135 val->fp_minor = port->fp_instance;
8136 mutex_exit(&port->fp_mutex);
8137
8138 if (tmp_pd == NULL) {
8139 fcio->fcio_errno = FC_BADWWN;
8140 rval = EINVAL;
8141 } else {
8142 mutex_enter(&tmp_pd->pd_mutex);
8143 bcopy(&tmp_pd->pd_port_name.raw_wwn,
8144 &val->PortWWN.raw_wwn,
8145 sizeof (val->PortWWN.raw_wwn));
8146 bcopy(&tmp_pd->pd_remote_nodep->fd_node_name.raw_wwn,
8147 &val->NodeWWN.raw_wwn,
8148 sizeof (val->NodeWWN.raw_wwn));
8149 val->PortFcId = tmp_pd->pd_port_id.port_id;
8150 bcopy(tmp_pd->pd_spn, val->PortSymbolicName,
8151 tmp_pd->pd_spn_len);
8152 val->PortSupportedClassofService = tmp_pd->pd_cos;
8153 val->PortType = FC_HBA_PORTTYPE_UNKNOWN;
8154 val->PortState =
8155 fp_map_remote_port_state(tmp_pd->pd_state);
8156 val->PortSupportedFc4Types[0] = 0;
8157 /*
8158 * we will assume the sizeof these pd_fc4types and
8159 * portActiveFc4Types will remain the same. we could
8160 * add in a check for it, but we decided it was unneeded
8161 */
8162 bcopy((caddr_t)tmp_pd->pd_fc4types,
8163 val->PortActiveFc4Types,
8164 sizeof (tmp_pd->pd_fc4types));
8165 val->PortSupportedSpeed = FC_HBA_PORTSPEED_UNKNOWN;
8166 val->PortSpeed = FC_HBA_PORTSPEED_UNKNOWN;
8167 val->PortMaxFrameSize = 0;
8168 val->NumberofDiscoveredPorts = 0;
8169 mutex_exit(&tmp_pd->pd_mutex);
8170
8171 if (use32 == B_TRUE) {
8172 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP);
8173 val32->version = val->version;
8174 val32->lastChange = val->lastChange;
8175 val32->fp_minor = val->fp_minor;
8176 bcopy(&val->PortWWN.raw_wwn,
8177 &val32->PortWWN.raw_wwn,
8178 sizeof (val->PortWWN.raw_wwn));
8179 bcopy(&val->NodeWWN.raw_wwn,
8180 &val32->NodeWWN.raw_wwn,
8181 sizeof (val->NodeWWN.raw_wwn));
8182 val32->PortFcId = val->PortFcId;
8183 bcopy(val->PortSymbolicName,
8184 val32->PortSymbolicName,
8185 sizeof (val->PortSymbolicName));
8186 val32->PortSupportedClassofService =
8187 val->PortSupportedClassofService;
8188 val32->PortType = val->PortType;
8189 val32->PortState = val->PortState;
8190 val32->PortSupportedFc4Types[0] =
8191 val->PortSupportedFc4Types[0];
8192 bcopy(val->PortActiveFc4Types,
8193 val32->PortActiveFc4Types,
8194 sizeof (tmp_pd->pd_fc4types));
8195 val32->PortSupportedSpeed =
8196 val->PortSupportedSpeed;
8197 val32->PortSpeed = val->PortSpeed;
8198 val32->PortMaxFrameSize = val->PortMaxFrameSize;
8199 val32->NumberofDiscoveredPorts =
8200 val->NumberofDiscoveredPorts;
8201
8202 if (fp_copyout((void *)val32,
8203 (void *)fcio->fcio_obuf,
8204 fcio->fcio_olen, mode) == 0) {
8205 if (fp_fcio_copyout(fcio, data, mode)) {
8206 rval = EFAULT;
8207 }
8208 } else {
8209 rval = EFAULT;
8210 }
8211
8212 kmem_free(val32, sizeof (*val32));
8213 } else {
8214 if (fp_copyout((void *)val,
8215 (void *)fcio->fcio_obuf,
8216 fcio->fcio_olen, mode) == 0) {
8217 if (fp_fcio_copyout(fcio, data, mode)) {
8218 rval = EFAULT;
8219 }
8220 } else {
8221 rval = EFAULT;
8222 }
8223 }
8224 }
8225 kmem_free(val, sizeof (*val));
8226 break;
8227 }
8228
8229 case FCIO_GET_NUM_DEVS: {
8230 int num_devices;
8231
8232 if (fcio->fcio_olen != sizeof (num_devices) ||
8233 fcio->fcio_xfer != FCIO_XFER_READ) {
8234 rval = EINVAL;
8235 break;
8236 }
8237
8238 mutex_enter(&port->fp_mutex);
8239 switch (port->fp_topology) {
8240 case FC_TOP_PRIVATE_LOOP:
8241 case FC_TOP_PT_PT:
8242 num_devices = port->fp_total_devices;
8243 fcio->fcio_errno = FC_SUCCESS;
8244 break;
8245
8246 case FC_TOP_PUBLIC_LOOP:
8247 case FC_TOP_FABRIC:
8248 mutex_exit(&port->fp_mutex);
8249 job = fctl_alloc_job(JOB_NS_CMD, 0, NULL,
8250 NULL, KM_SLEEP);
8251 ASSERT(job != NULL);
8252
8253 /*
8254 * In FC-GS-2 the Name Server doesn't send out
8255 * RSCNs for any Name Server Database updates
8256 * When it is finally fixed there is no need
8257 * to probe as below and should be removed.
8258 */
8259 (void) fp_ns_get_devcount(port, job, 0, KM_SLEEP);
8260 fctl_dealloc_job(job);
8261
8262 mutex_enter(&port->fp_mutex);
8263 num_devices = port->fp_total_devices;
8264 fcio->fcio_errno = FC_SUCCESS;
8265 break;
8266
8267 case FC_TOP_NO_NS:
8268 /* FALLTHROUGH */
8269 case FC_TOP_UNKNOWN:
8270 /* FALLTHROUGH */
8271 default:
8272 num_devices = 0;
8273 fcio->fcio_errno = FC_SUCCESS;
8274 break;
8275 }
8276 mutex_exit(&port->fp_mutex);
8277
8278 if (fp_copyout((void *)&num_devices,
8279 (void *)fcio->fcio_obuf, fcio->fcio_olen,
8280 mode) == 0) {
8281 if (fp_fcio_copyout(fcio, data, mode)) {
8282 rval = EFAULT;
8283 }
8284 } else {
8285 rval = EFAULT;
8286 }
8287 break;
8288 }
8289
8290 case FCIO_GET_DEV_LIST: {
8291 int num_devices;
8292 int new_count;
8293 int map_size;
8294
8295 if (fcio->fcio_xfer != FCIO_XFER_READ ||
8296 fcio->fcio_alen != sizeof (new_count)) {
8297 rval = EINVAL;
8298 break;
8299 }
8300
8301 num_devices = fcio->fcio_olen / sizeof (fc_port_dev_t);
8302
8303 mutex_enter(&port->fp_mutex);
8304 if (num_devices < port->fp_total_devices) {
8305 fcio->fcio_errno = FC_TOOMANY;
8306 new_count = port->fp_total_devices;
8307 mutex_exit(&port->fp_mutex);
8308
8309 if (fp_copyout((void *)&new_count,
8310 (void *)fcio->fcio_abuf,
8311 sizeof (new_count), mode)) {
8312 rval = EFAULT;
8313 break;
8314 }
8315
8316 if (fp_fcio_copyout(fcio, data, mode)) {
8317 rval = EFAULT;
8318 break;
8319 }
8320 rval = EINVAL;
8321 break;
8322 }
8323
8324 if (port->fp_total_devices <= 0) {
8325 fcio->fcio_errno = FC_NO_MAP;
8326 new_count = port->fp_total_devices;
8327 mutex_exit(&port->fp_mutex);
8328
8329 if (fp_copyout((void *)&new_count,
8330 (void *)fcio->fcio_abuf,
8331 sizeof (new_count), mode)) {
8332 rval = EFAULT;
8333 break;
8334 }
8335
8336 if (fp_fcio_copyout(fcio, data, mode)) {
8337 rval = EFAULT;
8338 break;
8339 }
8340 rval = EINVAL;
8341 break;
8342 }
8343
8344 switch (port->fp_topology) {
8345 case FC_TOP_PRIVATE_LOOP:
8346 if (fp_fillout_loopmap(port, fcio,
8347 mode) != FC_SUCCESS) {
8348 rval = EFAULT;
8349 break;
8350 }
8351 if (fp_fcio_copyout(fcio, data, mode)) {
8352 rval = EFAULT;
8353 }
8354 break;
8355
8356 case FC_TOP_PT_PT:
8357 if (fp_fillout_p2pmap(port, fcio,
8358 mode) != FC_SUCCESS) {
8359 rval = EFAULT;
8360 break;
8361 }
8362 if (fp_fcio_copyout(fcio, data, mode)) {
8363 rval = EFAULT;
8364 }
8365 break;
8366
8367 case FC_TOP_PUBLIC_LOOP:
8368 case FC_TOP_FABRIC: {
8369 fctl_ns_req_t *ns_cmd;
8370
8371 map_size =
8372 sizeof (fc_port_dev_t) * port->fp_total_devices;
8373
8374 mutex_exit(&port->fp_mutex);
8375
8376 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t),
8377 sizeof (ns_resp_gan_t), map_size,
8378 (FCTL_NS_FILL_NS_MAP | FCTL_NS_BUF_IS_USERLAND),
8379 KM_SLEEP);
8380 ASSERT(ns_cmd != NULL);
8381
8382 ns_cmd->ns_gan_index = 0;
8383 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID;
8384 ns_cmd->ns_cmd_code = NS_GA_NXT;
8385 ns_cmd->ns_gan_max = map_size / sizeof (fc_port_dev_t);
8386
8387 job = fctl_alloc_job(JOB_PORT_GETMAP, 0, NULL,
8388 NULL, KM_SLEEP);
8389 ASSERT(job != NULL);
8390
8391 ret = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP);
8392
8393 if (ret != FC_SUCCESS ||
8394 job->job_result != FC_SUCCESS) {
8395 fctl_free_ns_cmd(ns_cmd);
8396
8397 fcio->fcio_errno = job->job_result;
8398 new_count = 0;
8399 if (fp_copyout((void *)&new_count,
8400 (void *)fcio->fcio_abuf,
8401 sizeof (new_count), mode)) {
8402 fctl_dealloc_job(job);
8403 mutex_enter(&port->fp_mutex);
8404 rval = EFAULT;
8405 break;
8406 }
8407
8408 if (fp_fcio_copyout(fcio, data, mode)) {
8409 fctl_dealloc_job(job);
8410 mutex_enter(&port->fp_mutex);
8411 rval = EFAULT;
8412 break;
8413 }
8414 rval = EIO;
8415 mutex_enter(&port->fp_mutex);
8416 break;
8417 }
8418 fctl_dealloc_job(job);
8419
8420 new_count = ns_cmd->ns_gan_index;
8421 if (fp_copyout((void *)&new_count,
8422 (void *)fcio->fcio_abuf, sizeof (new_count),
8423 mode)) {
8424 rval = EFAULT;
8425 fctl_free_ns_cmd(ns_cmd);
8426 mutex_enter(&port->fp_mutex);
8427 break;
8428 }
8429
8430 if (fp_copyout((void *)ns_cmd->ns_data_buf,
8431 (void *)fcio->fcio_obuf, sizeof (fc_port_dev_t) *
8432 ns_cmd->ns_gan_index, mode)) {
8433 rval = EFAULT;
8434 fctl_free_ns_cmd(ns_cmd);
8435 mutex_enter(&port->fp_mutex);
8436 break;
8437 }
8438 fctl_free_ns_cmd(ns_cmd);
8439
8440 if (fp_fcio_copyout(fcio, data, mode)) {
8441 rval = EFAULT;
8442 }
8443 mutex_enter(&port->fp_mutex);
8444 break;
8445 }
8446
8447 case FC_TOP_NO_NS:
8448 /* FALLTHROUGH */
8449 case FC_TOP_UNKNOWN:
8450 /* FALLTHROUGH */
8451 default:
8452 fcio->fcio_errno = FC_NO_MAP;
8453 num_devices = port->fp_total_devices;
8454
8455 if (fp_copyout((void *)&new_count,
8456 (void *)fcio->fcio_abuf,
8457 sizeof (new_count), mode)) {
8458 rval = EFAULT;
8459 break;
8460 }
8461
8462 if (fp_fcio_copyout(fcio, data, mode)) {
8463 rval = EFAULT;
8464 break;
8465 }
8466 rval = EINVAL;
8467 break;
8468 }
8469 mutex_exit(&port->fp_mutex);
8470 break;
8471 }
8472
8473 case FCIO_GET_SYM_PNAME: {
8474 rval = ENOTSUP;
8475 break;
8476 }
8477
8478 case FCIO_GET_SYM_NNAME: {
8479 rval = ENOTSUP;
8480 break;
8481 }
8482
8483 case FCIO_SET_SYM_PNAME: {
8484 rval = ENOTSUP;
8485 break;
8486 }
8487
8488 case FCIO_SET_SYM_NNAME: {
8489 rval = ENOTSUP;
8490 break;
8491 }
8492
8493 case FCIO_GET_LOGI_PARAMS: {
8494 la_wwn_t pwwn;
8495 la_wwn_t *my_pwwn;
8496 la_els_logi_t *params;
8497 la_els_logi32_t *params32;
8498 fc_remote_node_t *node;
8499 fc_remote_port_t *pd;
8500
8501 if (fcio->fcio_ilen != sizeof (la_wwn_t) ||
8502 (fcio->fcio_xfer & FCIO_XFER_READ) == 0 ||
8503 (fcio->fcio_xfer & FCIO_XFER_WRITE) == 0) {
8504 rval = EINVAL;
8505 break;
8506 }
8507
8508 if (use32 == B_TRUE) {
8509 if (fcio->fcio_olen != sizeof (la_els_logi32_t)) {
8510 rval = EINVAL;
8511 break;
8512 }
8513 } else {
8514 if (fcio->fcio_olen != sizeof (la_els_logi_t)) {
8515 rval = EINVAL;
8516 break;
8517 }
8518 }
8519
8520 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, sizeof (pwwn), mode)) {
8521 rval = EFAULT;
8522 break;
8523 }
8524
8525 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn);
8526 if (pd == NULL) {
8527 mutex_enter(&port->fp_mutex);
8528 my_pwwn = &port->fp_service_params.nport_ww_name;
8529 mutex_exit(&port->fp_mutex);
8530
8531 if (fctl_wwn_cmp(&pwwn, my_pwwn) != 0) {
8532 rval = ENXIO;
8533 break;
8534 }
8535
8536 params = kmem_zalloc(sizeof (*params), KM_SLEEP);
8537 mutex_enter(&port->fp_mutex);
8538 *params = port->fp_service_params;
8539 mutex_exit(&port->fp_mutex);
8540 } else {
8541 params = kmem_zalloc(sizeof (*params), KM_SLEEP);
8542
8543 mutex_enter(&pd->pd_mutex);
8544 params->ls_code.mbz = params->ls_code.ls_code = 0;
8545 params->common_service = pd->pd_csp;
8546 params->nport_ww_name = pd->pd_port_name;
8547 params->class_1 = pd->pd_clsp1;
8548 params->class_2 = pd->pd_clsp2;
8549 params->class_3 = pd->pd_clsp3;
8550 node = pd->pd_remote_nodep;
8551 mutex_exit(&pd->pd_mutex);
8552
8553 bzero(params->reserved, sizeof (params->reserved));
8554
8555 mutex_enter(&node->fd_mutex);
8556 bcopy(node->fd_vv, params->vendor_version,
8557 sizeof (node->fd_vv));
8558 params->node_ww_name = node->fd_node_name;
8559 mutex_exit(&node->fd_mutex);
8560
8561 fctl_release_remote_port(pd);
8562 }
8563
8564 if (use32 == B_TRUE) {
8565 params32 = kmem_zalloc(sizeof (*params32), KM_SLEEP);
8566
8567 params32->ls_code.mbz = params->ls_code.mbz;
8568 params32->common_service = params->common_service;
8569 params32->nport_ww_name = params->nport_ww_name;
8570 params32->class_1 = params->class_1;
8571 params32->class_2 = params->class_2;
8572 params32->class_3 = params->class_3;
8573 bzero(params32->reserved, sizeof (params32->reserved));
8574 bcopy(params->vendor_version, params32->vendor_version,
8575 sizeof (node->fd_vv));
8576 params32->node_ww_name = params->node_ww_name;
8577
8578 if (ddi_copyout((void *)params32,
8579 (void *)fcio->fcio_obuf,
8580 sizeof (*params32), mode)) {
8581 rval = EFAULT;
8582 }
8583
8584 kmem_free(params32, sizeof (*params32));
8585 } else {
8586 if (ddi_copyout((void *)params, (void *)fcio->fcio_obuf,
8587 sizeof (*params), mode)) {
8588 rval = EFAULT;
8589 }
8590 }
8591
8592 kmem_free(params, sizeof (*params));
8593 if (fp_fcio_copyout(fcio, data, mode)) {
8594 rval = EFAULT;
8595 }
8596 break;
8597 }
8598
8599 case FCIO_DEV_LOGOUT:
8600 case FCIO_DEV_LOGIN:
8601 if (fcio->fcio_ilen != sizeof (la_wwn_t) ||
8602 fcio->fcio_xfer != FCIO_XFER_WRITE) {
8603 rval = EINVAL;
8604
8605 if (fp_fcio_copyout(fcio, data, mode)) {
8606 rval = EFAULT;
8607 }
8608 break;
8609 }
8610
8611 if (fcio->fcio_cmd == FCIO_DEV_LOGIN) {
8612 jcode = JOB_FCIO_LOGIN;
8613 } else {
8614 jcode = JOB_FCIO_LOGOUT;
8615 }
8616
8617 kfcio = kmem_zalloc(sizeof (*kfcio), KM_SLEEP);
8618 bcopy(fcio, kfcio, sizeof (*fcio));
8619
8620 if (kfcio->fcio_ilen) {
8621 kfcio->fcio_ibuf = kmem_zalloc(kfcio->fcio_ilen,
8622 KM_SLEEP);
8623
8624 if (ddi_copyin((void *)fcio->fcio_ibuf,
8625 (void *)kfcio->fcio_ibuf, kfcio->fcio_ilen,
8626 mode)) {
8627 rval = EFAULT;
8628
8629 kmem_free(kfcio->fcio_ibuf, kfcio->fcio_ilen);
8630 kmem_free(kfcio, sizeof (*kfcio));
8631 fcio->fcio_errno = job->job_result;
8632 if (fp_fcio_copyout(fcio, data, mode)) {
8633 rval = EFAULT;
8634 }
8635 break;
8636 }
8637 }
8638
8639 job = fctl_alloc_job(jcode, 0, NULL, NULL, KM_SLEEP);
8640 job->job_private = kfcio;
8641
8642 fctl_enque_job(port, job);
8643 fctl_jobwait(job);
8644
8645 rval = job->job_result;
8646
8647 fcio->fcio_errno = kfcio->fcio_errno;
8648 if (fp_fcio_copyout(fcio, data, mode)) {
8649 rval = EFAULT;
8650 }
8651
8652 kmem_free(kfcio->fcio_ibuf, kfcio->fcio_ilen);
8653 kmem_free(kfcio, sizeof (*kfcio));
8654 fctl_dealloc_job(job);
8655 break;
8656
8657 case FCIO_GET_STATE: {
8658 la_wwn_t pwwn;
8659 uint32_t state;
8660 fc_remote_port_t *pd;
8661 fctl_ns_req_t *ns_cmd;
8662
8663 if (fcio->fcio_ilen != sizeof (la_wwn_t) ||
8664 fcio->fcio_olen != sizeof (state) ||
8665 (fcio->fcio_xfer & FCIO_XFER_WRITE) == 0 ||
8666 (fcio->fcio_xfer & FCIO_XFER_READ) == 0) {
8667 rval = EINVAL;
8668 break;
8669 }
8670
8671 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, sizeof (pwwn), mode)) {
8672 rval = EFAULT;
8673 break;
8674 }
8675 fcio->fcio_errno = 0;
8676
8677 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn);
8678 if (pd == NULL) {
8679 mutex_enter(&port->fp_mutex);
8680 if (FC_IS_TOP_SWITCH(port->fp_topology)) {
8681 mutex_exit(&port->fp_mutex);
8682 job = fctl_alloc_job(JOB_PLOGI_ONE, 0,
8683 NULL, NULL, KM_SLEEP);
8684
8685 job->job_counter = 1;
8686 job->job_result = FC_SUCCESS;
8687
8688 ns_cmd = fctl_alloc_ns_cmd(
8689 sizeof (ns_req_gid_pn_t),
8690 sizeof (ns_resp_gid_pn_t),
8691 sizeof (ns_resp_gid_pn_t),
8692 FCTL_NS_BUF_IS_USERLAND, KM_SLEEP);
8693 ASSERT(ns_cmd != NULL);
8694
8695 ns_cmd->ns_cmd_code = NS_GID_PN;
8696 ((ns_req_gid_pn_t *)
8697 (ns_cmd->ns_cmd_buf))->pwwn = pwwn;
8698
8699 ret = fp_ns_query(port, ns_cmd, job,
8700 1, KM_SLEEP);
8701
8702 if (ret != FC_SUCCESS || job->job_result !=
8703 FC_SUCCESS) {
8704 if (ret != FC_SUCCESS) {
8705 fcio->fcio_errno = ret;
8706 } else {
8707 fcio->fcio_errno =
8708 job->job_result;
8709 }
8710 rval = EIO;
8711 } else {
8712 state = PORT_DEVICE_INVALID;
8713 }
8714 fctl_free_ns_cmd(ns_cmd);
8715 fctl_dealloc_job(job);
8716 } else {
8717 mutex_exit(&port->fp_mutex);
8718 fcio->fcio_errno = FC_BADWWN;
8719 rval = ENXIO;
8720 }
8721 } else {
8722 mutex_enter(&pd->pd_mutex);
8723 state = pd->pd_state;
8724 mutex_exit(&pd->pd_mutex);
8725
8726 fctl_release_remote_port(pd);
8727 }
8728
8729 if (!rval) {
8730 if (ddi_copyout((void *)&state,
8731 (void *)fcio->fcio_obuf, sizeof (state),
8732 mode)) {
8733 rval = EFAULT;
8734 }
8735 }
8736 if (fp_fcio_copyout(fcio, data, mode)) {
8737 rval = EFAULT;
8738 }
8739 break;
8740 }
8741
8742 case FCIO_DEV_REMOVE: {
8743 la_wwn_t pwwn;
8744 fc_portmap_t *changelist;
8745 fc_remote_port_t *pd;
8746
8747 if (fcio->fcio_ilen != sizeof (la_wwn_t) ||
8748 fcio->fcio_xfer != FCIO_XFER_WRITE) {
8749 rval = EINVAL;
8750 break;
8751 }
8752
8753 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, sizeof (pwwn), mode)) {
8754 rval = EFAULT;
8755 break;
8756 }
8757
8758 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn);
8759 if (pd == NULL) {
8760 rval = ENXIO;
8761 fcio->fcio_errno = FC_BADWWN;
8762 if (fp_fcio_copyout(fcio, data, mode)) {
8763 rval = EFAULT;
8764 }
8765 break;
8766 }
8767
8768 mutex_enter(&pd->pd_mutex);
8769 if (pd->pd_ref_count > 1) {
8770 mutex_exit(&pd->pd_mutex);
8771
8772 rval = EBUSY;
8773 fcio->fcio_errno = FC_FAILURE;
8774 fctl_release_remote_port(pd);
8775
8776 if (fp_fcio_copyout(fcio, data, mode)) {
8777 rval = EFAULT;
8778 }
8779 break;
8780 }
8781 mutex_exit(&pd->pd_mutex);
8782
8783 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP);
8784
8785 fctl_copy_portmap(changelist, pd);
8786 changelist->map_type = PORT_DEVICE_USER_LOGOUT;
8787 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1);
8788
8789 fctl_release_remote_port(pd);
8790 break;
8791 }
8792
8793 case FCIO_GET_FCODE_REV: {
8794 caddr_t fcode_rev;
8795 fc_fca_pm_t pm;
8796
8797 if (fcio->fcio_olen < FC_FCODE_REV_SIZE ||
8798 fcio->fcio_xfer != FCIO_XFER_READ) {
8799 rval = EINVAL;
8800 break;
8801 }
8802 bzero((caddr_t)&pm, sizeof (pm));
8803
8804 fcode_rev = kmem_zalloc(fcio->fcio_olen, KM_SLEEP);
8805
8806 pm.pm_cmd_flags = FC_FCA_PM_READ;
8807 pm.pm_cmd_code = FC_PORT_GET_FCODE_REV;
8808 pm.pm_data_len = fcio->fcio_olen;
8809 pm.pm_data_buf = fcode_rev;
8810
8811 ret = port->fp_fca_tran->fca_port_manage(
8812 port->fp_fca_handle, &pm);
8813
8814 if (ret == FC_SUCCESS) {
8815 if (ddi_copyout((void *)fcode_rev,
8816 (void *)fcio->fcio_obuf,
8817 fcio->fcio_olen, mode) == 0) {
8818 if (fp_fcio_copyout(fcio, data, mode)) {
8819 rval = EFAULT;
8820 }
8821 } else {
8822 rval = EFAULT;
8823 }
8824 } else {
8825 /*
8826 * check if buffer was not large enough to obtain
8827 * FCODE version.
8828 */
8829 if (pm.pm_data_len > fcio->fcio_olen) {
8830 rval = ENOMEM;
8831 } else {
8832 rval = EIO;
8833 }
8834 fcio->fcio_errno = ret;
8835 if (fp_fcio_copyout(fcio, data, mode)) {
8836 rval = EFAULT;
8837 }
8838 }
8839 kmem_free(fcode_rev, fcio->fcio_olen);
8840 break;
8841 }
8842
8843 case FCIO_GET_FW_REV: {
8844 caddr_t fw_rev;
8845 fc_fca_pm_t pm;
8846
8847 if (fcio->fcio_olen < FC_FW_REV_SIZE ||
8848 fcio->fcio_xfer != FCIO_XFER_READ) {
8849 rval = EINVAL;
8850 break;
8851 }
8852 bzero((caddr_t)&pm, sizeof (pm));
8853
8854 fw_rev = kmem_zalloc(fcio->fcio_olen, KM_SLEEP);
8855
8856 pm.pm_cmd_flags = FC_FCA_PM_READ;
8857 pm.pm_cmd_code = FC_PORT_GET_FW_REV;
8858 pm.pm_data_len = fcio->fcio_olen;
8859 pm.pm_data_buf = fw_rev;
8860
8861 ret = port->fp_fca_tran->fca_port_manage(
8862 port->fp_fca_handle, &pm);
8863
8864 if (ret == FC_SUCCESS) {
8865 if (ddi_copyout((void *)fw_rev,
8866 (void *)fcio->fcio_obuf,
8867 fcio->fcio_olen, mode) == 0) {
8868 if (fp_fcio_copyout(fcio, data, mode)) {
8869 rval = EFAULT;
8870 }
8871 } else {
8872 rval = EFAULT;
8873 }
8874 } else {
8875 if (fp_fcio_copyout(fcio, data, mode)) {
8876 rval = EFAULT;
8877 }
8878 rval = EIO;
8879 }
8880 kmem_free(fw_rev, fcio->fcio_olen);
8881 break;
8882 }
8883
8884 case FCIO_GET_DUMP_SIZE: {
8885 uint32_t dump_size;
8886 fc_fca_pm_t pm;
8887
8888 if (fcio->fcio_olen != sizeof (dump_size) ||
8889 fcio->fcio_xfer != FCIO_XFER_READ) {
8890 rval = EINVAL;
8891 break;
8892 }
8893 bzero((caddr_t)&pm, sizeof (pm));
8894 pm.pm_cmd_flags = FC_FCA_PM_READ;
8895 pm.pm_cmd_code = FC_PORT_GET_DUMP_SIZE;
8896 pm.pm_data_len = sizeof (dump_size);
8897 pm.pm_data_buf = (caddr_t)&dump_size;
8898
8899 ret = port->fp_fca_tran->fca_port_manage(
8900 port->fp_fca_handle, &pm);
8901
8902 if (ret == FC_SUCCESS) {
8903 if (ddi_copyout((void *)&dump_size,
8904 (void *)fcio->fcio_obuf, sizeof (dump_size),
8905 mode) == 0) {
8906 if (fp_fcio_copyout(fcio, data, mode)) {
8907 rval = EFAULT;
8908 }
8909 } else {
8910 rval = EFAULT;
8911 }
8912 } else {
8913 fcio->fcio_errno = ret;
8914 rval = EIO;
8915 if (fp_fcio_copyout(fcio, data, mode)) {
8916 rval = EFAULT;
8917 }
8918 }
8919 break;
8920 }
8921
8922 case FCIO_DOWNLOAD_FW: {
8923 caddr_t firmware;
8924 fc_fca_pm_t pm;
8925
8926 if (fcio->fcio_ilen <= 0 ||
8927 fcio->fcio_xfer != FCIO_XFER_WRITE) {
8928 rval = EINVAL;
8929 break;
8930 }
8931
8932 firmware = kmem_zalloc(fcio->fcio_ilen, KM_SLEEP);
8933 if (ddi_copyin(fcio->fcio_ibuf, firmware,
8934 fcio->fcio_ilen, mode)) {
8935 rval = EFAULT;
8936 kmem_free(firmware, fcio->fcio_ilen);
8937 break;
8938 }
8939
8940 bzero((caddr_t)&pm, sizeof (pm));
8941 pm.pm_cmd_flags = FC_FCA_PM_WRITE;
8942 pm.pm_cmd_code = FC_PORT_DOWNLOAD_FW;
8943 pm.pm_data_len = fcio->fcio_ilen;
8944 pm.pm_data_buf = firmware;
8945
8946 ret = port->fp_fca_tran->fca_port_manage(
8947 port->fp_fca_handle, &pm);
8948
8949 kmem_free(firmware, fcio->fcio_ilen);
8950
8951 if (ret != FC_SUCCESS) {
8952 fcio->fcio_errno = ret;
8953 rval = EIO;
8954 if (fp_fcio_copyout(fcio, data, mode)) {
8955 rval = EFAULT;
8956 }
8957 }
8958 break;
8959 }
8960
8961 case FCIO_DOWNLOAD_FCODE: {
8962 caddr_t fcode;
8963 fc_fca_pm_t pm;
8964
8965 if (fcio->fcio_ilen <= 0 ||
8966 fcio->fcio_xfer != FCIO_XFER_WRITE) {
8967 rval = EINVAL;
8968 break;
8969 }
8970
8971 fcode = kmem_zalloc(fcio->fcio_ilen, KM_SLEEP);
8972 if (ddi_copyin(fcio->fcio_ibuf, fcode,
8973 fcio->fcio_ilen, mode)) {
8974 rval = EFAULT;
8975 kmem_free(fcode, fcio->fcio_ilen);
8976 break;
8977 }
8978
8979 bzero((caddr_t)&pm, sizeof (pm));
8980 pm.pm_cmd_flags = FC_FCA_PM_WRITE;
8981 pm.pm_cmd_code = FC_PORT_DOWNLOAD_FCODE;
8982 pm.pm_data_len = fcio->fcio_ilen;
8983 pm.pm_data_buf = fcode;
8984
8985 ret = port->fp_fca_tran->fca_port_manage(
8986 port->fp_fca_handle, &pm);
8987
8988 kmem_free(fcode, fcio->fcio_ilen);
8989
8990 if (ret != FC_SUCCESS) {
8991 fcio->fcio_errno = ret;
8992 rval = EIO;
8993 if (fp_fcio_copyout(fcio, data, mode)) {
8994 rval = EFAULT;
8995 }
8996 }
8997 break;
8998 }
8999
9000 case FCIO_FORCE_DUMP:
9001 ret = port->fp_fca_tran->fca_reset(
9002 port->fp_fca_handle, FC_FCA_CORE);
9003
9004 if (ret != FC_SUCCESS) {
9005 fcio->fcio_errno = ret;
9006 rval = EIO;
9007 if (fp_fcio_copyout(fcio, data, mode)) {
9008 rval = EFAULT;
9009 }
9010 }
9011 break;
9012
9013 case FCIO_GET_DUMP: {
9014 caddr_t dump;
9015 uint32_t dump_size;
9016 fc_fca_pm_t pm;
9017
9018 if (fcio->fcio_xfer != FCIO_XFER_READ) {
9019 rval = EINVAL;
9020 break;
9021 }
9022 bzero((caddr_t)&pm, sizeof (pm));
9023
9024 pm.pm_cmd_flags = FC_FCA_PM_READ;
9025 pm.pm_cmd_code = FC_PORT_GET_DUMP_SIZE;
9026 pm.pm_data_len = sizeof (dump_size);
9027 pm.pm_data_buf = (caddr_t)&dump_size;
9028
9029 ret = port->fp_fca_tran->fca_port_manage(
9030 port->fp_fca_handle, &pm);
9031
9032 if (ret != FC_SUCCESS) {
9033 fcio->fcio_errno = ret;
9034 rval = EIO;
9035 if (fp_fcio_copyout(fcio, data, mode)) {
9036 rval = EFAULT;
9037 }
9038 break;
9039 }
9040 if (fcio->fcio_olen != dump_size) {
9041 fcio->fcio_errno = FC_NOMEM;
9042 rval = EINVAL;
9043 if (fp_fcio_copyout(fcio, data, mode)) {
9044 rval = EFAULT;
9045 }
9046 break;
9047 }
9048
9049 dump = kmem_zalloc(dump_size, KM_SLEEP);
9050
9051 bzero((caddr_t)&pm, sizeof (pm));
9052 pm.pm_cmd_flags = FC_FCA_PM_READ;
9053 pm.pm_cmd_code = FC_PORT_GET_DUMP;
9054 pm.pm_data_len = dump_size;
9055 pm.pm_data_buf = dump;
9056
9057 ret = port->fp_fca_tran->fca_port_manage(
9058 port->fp_fca_handle, &pm);
9059
9060 if (ret == FC_SUCCESS) {
9061 if (ddi_copyout((void *)dump, (void *)fcio->fcio_obuf,
9062 dump_size, mode) == 0) {
9063 if (fp_fcio_copyout(fcio, data, mode)) {
9064 rval = EFAULT;
9065 }
9066 } else {
9067 rval = EFAULT;
9068 }
9069 } else {
9070 fcio->fcio_errno = ret;
9071 rval = EIO;
9072 if (fp_fcio_copyout(fcio, data, mode)) {
9073 rval = EFAULT;
9074 }
9075 }
9076 kmem_free(dump, dump_size);
9077 break;
9078 }
9079
9080 case FCIO_GET_TOPOLOGY: {
9081 uint32_t user_topology;
9082
9083 if (fcio->fcio_xfer != FCIO_XFER_READ ||
9084 fcio->fcio_olen != sizeof (user_topology)) {
9085 rval = EINVAL;
9086 break;
9087 }
9088
9089 mutex_enter(&port->fp_mutex);
9090 if (FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) {
9091 user_topology = FC_TOP_UNKNOWN;
9092 } else {
9093 user_topology = port->fp_topology;
9094 }
9095 mutex_exit(&port->fp_mutex);
9096
9097 if (ddi_copyout((void *)&user_topology,
9098 (void *)fcio->fcio_obuf, sizeof (user_topology),
9099 mode)) {
9100 rval = EFAULT;
9101 }
9102 break;
9103 }
9104
9105 case FCIO_RESET_LINK: {
9106 la_wwn_t pwwn;
9107
9108 /*
9109 * Look at the output buffer field; if this field has zero
9110 * bytes then attempt to reset the local link/loop. If the
9111 * fcio_ibuf field points to a WWN, see if it's an NL_Port,
9112 * and if yes, determine the LFA and reset the remote LIP
9113 * by LINIT ELS.
9114 */
9115
9116 if (fcio->fcio_xfer != FCIO_XFER_WRITE ||
9117 fcio->fcio_ilen != sizeof (pwwn)) {
9118 rval = EINVAL;
9119 break;
9120 }
9121
9122 if (ddi_copyin(fcio->fcio_ibuf, &pwwn,
9123 sizeof (pwwn), mode)) {
9124 rval = EFAULT;
9125 break;
9126 }
9127
9128 mutex_enter(&port->fp_mutex);
9129 if (port->fp_soft_state & FP_SOFT_IN_LINK_RESET) {
9130 mutex_exit(&port->fp_mutex);
9131 break;
9132 }
9133 port->fp_soft_state |= FP_SOFT_IN_LINK_RESET;
9134 mutex_exit(&port->fp_mutex);
9135
9136 job = fctl_alloc_job(JOB_LINK_RESET, 0, NULL, NULL, KM_SLEEP);
9137 if (job == NULL) {
9138 rval = ENOMEM;
9139 break;
9140 }
9141 job->job_counter = 1;
9142 job->job_private = (void *)&pwwn;
9143
9144 fctl_enque_job(port, job);
9145 fctl_jobwait(job);
9146
9147 mutex_enter(&port->fp_mutex);
9148 port->fp_soft_state &= ~FP_SOFT_IN_LINK_RESET;
9149 mutex_exit(&port->fp_mutex);
9150
9151 if (job->job_result != FC_SUCCESS) {
9152 fcio->fcio_errno = job->job_result;
9153 rval = EIO;
9154 if (fp_fcio_copyout(fcio, data, mode)) {
9155 rval = EFAULT;
9156 }
9157 }
9158 fctl_dealloc_job(job);
9159 break;
9160 }
9161
9162 case FCIO_RESET_HARD:
9163 ret = port->fp_fca_tran->fca_reset(
9164 port->fp_fca_handle, FC_FCA_RESET);
9165 if (ret != FC_SUCCESS) {
9166 fcio->fcio_errno = ret;
9167 rval = EIO;
9168 if (fp_fcio_copyout(fcio, data, mode)) {
9169 rval = EFAULT;
9170 }
9171 }
9172 break;
9173
9174 case FCIO_RESET_HARD_CORE:
9175 ret = port->fp_fca_tran->fca_reset(
9176 port->fp_fca_handle, FC_FCA_RESET_CORE);
9177 if (ret != FC_SUCCESS) {
9178 rval = EIO;
9179 fcio->fcio_errno = ret;
9180 if (fp_fcio_copyout(fcio, data, mode)) {
9181 rval = EFAULT;
9182 }
9183 }
9184 break;
9185
9186 case FCIO_DIAG: {
9187 fc_fca_pm_t pm;
9188
9189 bzero((caddr_t)&pm, sizeof (fc_fca_pm_t));
9190
9191 /* Validate user buffer from ioctl call. */
9192 if (((fcio->fcio_ilen > 0) && (fcio->fcio_ibuf == NULL)) ||
9193 ((fcio->fcio_ilen <= 0) && (fcio->fcio_ibuf != NULL)) ||
9194 ((fcio->fcio_alen > 0) && (fcio->fcio_abuf == NULL)) ||
9195 ((fcio->fcio_alen <= 0) && (fcio->fcio_abuf != NULL)) ||
9196 ((fcio->fcio_olen > 0) && (fcio->fcio_obuf == NULL)) ||
9197 ((fcio->fcio_olen <= 0) && (fcio->fcio_obuf != NULL))) {
9198 rval = EFAULT;
9199 break;
9200 }
9201
9202 if ((pm.pm_cmd_len = fcio->fcio_ilen) > 0) {
9203 pm.pm_cmd_buf = kmem_zalloc(fcio->fcio_ilen, KM_SLEEP);
9204 if (ddi_copyin(fcio->fcio_ibuf, pm.pm_cmd_buf,
9205 fcio->fcio_ilen, mode)) {
9206 rval = EFAULT;
9207 goto fp_fcio_diag_cleanup;
9208 }
9209 }
9210
9211 if ((pm.pm_data_len = fcio->fcio_alen) > 0) {
9212 pm.pm_data_buf = kmem_zalloc(fcio->fcio_alen, KM_SLEEP);
9213 if (ddi_copyin(fcio->fcio_abuf, pm.pm_data_buf,
9214 fcio->fcio_alen, mode)) {
9215 rval = EFAULT;
9216 goto fp_fcio_diag_cleanup;
9217 }
9218 }
9219
9220 if ((pm.pm_stat_len = fcio->fcio_olen) > 0) {
9221 pm.pm_stat_buf = kmem_zalloc(fcio->fcio_olen, KM_SLEEP);
9222 }
9223
9224 pm.pm_cmd_code = FC_PORT_DIAG;
9225 pm.pm_cmd_flags = fcio->fcio_cmd_flags;
9226
9227 ret = port->fp_fca_tran->fca_port_manage(
9228 port->fp_fca_handle, &pm);
9229
9230 if (ret != FC_SUCCESS) {
9231 if (ret == FC_INVALID_REQUEST) {
9232 rval = ENOTTY;
9233 } else {
9234 rval = EIO;
9235 }
9236
9237 fcio->fcio_errno = ret;
9238 if (fp_fcio_copyout(fcio, data, mode)) {
9239 rval = EFAULT;
9240 }
9241 goto fp_fcio_diag_cleanup;
9242 }
9243
9244 /*
9245 * pm_stat_len will contain the number of status bytes
9246 * an FCA driver requires to return the complete status
9247 * of the requested diag operation. If the user buffer
9248 * is not large enough to hold the entire status, We
9249 * copy only the portion of data the fits in the buffer and
9250 * return a ENOMEM to the user application.
9251 */
9252 if (pm.pm_stat_len > fcio->fcio_olen) {
9253 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
9254 "fp:FCIO_DIAG:status buffer too small\n");
9255
9256 rval = ENOMEM;
9257 if (ddi_copyout(pm.pm_stat_buf, fcio->fcio_obuf,
9258 fcio->fcio_olen, mode)) {
9259 rval = EFAULT;
9260 goto fp_fcio_diag_cleanup;
9261 }
9262 } else {
9263 /*
9264 * Copy only data pm_stat_len bytes of data
9265 */
9266 if (ddi_copyout(pm.pm_stat_buf, fcio->fcio_obuf,
9267 pm.pm_stat_len, mode)) {
9268 rval = EFAULT;
9269 goto fp_fcio_diag_cleanup;
9270 }
9271 }
9272
9273 if (fp_fcio_copyout(fcio, data, mode)) {
9274 rval = EFAULT;
9275 }
9276
9277 fp_fcio_diag_cleanup:
9278 if (pm.pm_cmd_buf != NULL) {
9279 kmem_free(pm.pm_cmd_buf, fcio->fcio_ilen);
9280 }
9281 if (pm.pm_data_buf != NULL) {
9282 kmem_free(pm.pm_data_buf, fcio->fcio_alen);
9283 }
9284 if (pm.pm_stat_buf != NULL) {
9285 kmem_free(pm.pm_stat_buf, fcio->fcio_olen);
9286 }
9287
9288 break;
9289 }
9290
9291 case FCIO_GET_NODE_ID: {
9292 /* validate parameters */
9293 if (fcio->fcio_xfer != FCIO_XFER_READ ||
9294 fcio->fcio_olen < sizeof (fc_rnid_t)) {
9295 rval = EINVAL;
9296 break;
9297 }
9298
9299 rval = fp_get_rnid(port, data, mode, fcio);
9300
9301 /* ioctl handling is over */
9302 break;
9303 }
9304
9305 case FCIO_SEND_NODE_ID: {
9306 la_wwn_t pwwn;
9307
9308 /* validate parameters */
9309 if (fcio->fcio_ilen != sizeof (la_wwn_t) ||
9310 fcio->fcio_xfer != FCIO_XFER_READ) {
9311 rval = EINVAL;
9312 break;
9313 }
9314
9315 if (ddi_copyin(fcio->fcio_ibuf, &pwwn,
9316 sizeof (la_wwn_t), mode)) {
9317 rval = EFAULT;
9318 break;
9319 }
9320
9321 rval = fp_send_rnid(port, data, mode, fcio, &pwwn);
9322
9323 /* ioctl handling is over */
9324 break;
9325 }
9326
9327 case FCIO_SET_NODE_ID: {
9328 if (fcio->fcio_ilen != sizeof (fc_rnid_t) ||
9329 (fcio->fcio_xfer != FCIO_XFER_WRITE)) {
9330 rval = EINVAL;
9331 break;
9332 }
9333
9334 rval = fp_set_rnid(port, data, mode, fcio);
9335 break;
9336 }
9337
9338 case FCIO_LINK_STATUS: {
9339 fc_portid_t rls_req;
9340 fc_rls_acc_t *rls_acc;
9341 fc_fca_pm_t pm;
9342 uint32_t dest, src_id;
9343 fp_cmd_t *cmd;
9344 fc_remote_port_t *pd;
9345 uchar_t pd_flags;
9346
9347 /* validate parameters */
9348 if (fcio->fcio_ilen != sizeof (fc_portid_t) ||
9349 fcio->fcio_olen != sizeof (fc_rls_acc_t) ||
9350 fcio->fcio_xfer != FCIO_XFER_RW) {
9351 rval = EINVAL;
9352 break;
9353 }
9354
9355 if ((fcio->fcio_cmd_flags != FCIO_CFLAGS_RLS_DEST_FPORT) &&
9356 (fcio->fcio_cmd_flags != FCIO_CFLAGS_RLS_DEST_NPORT)) {
9357 rval = EINVAL;
9358 break;
9359 }
9360
9361 if (ddi_copyin((void *)fcio->fcio_ibuf, (void *)&rls_req,
9362 sizeof (fc_portid_t), mode)) {
9363 rval = EFAULT;
9364 break;
9365 }
9366
9367
9368 /* Determine the destination of the RLS frame */
9369 if (fcio->fcio_cmd_flags == FCIO_CFLAGS_RLS_DEST_FPORT) {
9370 dest = FS_FABRIC_F_PORT;
9371 } else {
9372 dest = rls_req.port_id;
9373 }
9374
9375 mutex_enter(&port->fp_mutex);
9376 src_id = port->fp_port_id.port_id;
9377 mutex_exit(&port->fp_mutex);
9378
9379 /* If dest is zero OR same as FCA ID, then use port_manage() */
9380 if (dest == 0 || dest == src_id) {
9381
9382 /* Allocate memory for link error status block */
9383 rls_acc = kmem_zalloc(sizeof (*rls_acc), KM_SLEEP);
9384 ASSERT(rls_acc != NULL);
9385
9386 /* Prepare the port management structure */
9387 bzero((caddr_t)&pm, sizeof (pm));
9388
9389 pm.pm_cmd_flags = FC_FCA_PM_READ;
9390 pm.pm_cmd_code = FC_PORT_RLS;
9391 pm.pm_data_len = sizeof (*rls_acc);
9392 pm.pm_data_buf = (caddr_t)rls_acc;
9393
9394 /* Get the adapter's link error status block */
9395 ret = port->fp_fca_tran->fca_port_manage(
9396 port->fp_fca_handle, &pm);
9397
9398 if (ret == FC_SUCCESS) {
9399 /* xfer link status block to userland */
9400 if (ddi_copyout((void *)rls_acc,
9401 (void *)fcio->fcio_obuf,
9402 sizeof (*rls_acc), mode) == 0) {
9403 if (fp_fcio_copyout(fcio, data,
9404 mode)) {
9405 rval = EFAULT;
9406 }
9407 } else {
9408 rval = EFAULT;
9409 }
9410 } else {
9411 rval = EIO;
9412 fcio->fcio_errno = ret;
9413 if (fp_fcio_copyout(fcio, data, mode)) {
9414 rval = EFAULT;
9415 }
9416 }
9417
9418 kmem_free(rls_acc, sizeof (*rls_acc));
9419
9420 /* ioctl handling is over */
9421 break;
9422 }
9423
9424 /*
9425 * Send RLS to the destination port.
9426 * Having RLS frame destination is as FPORT is not yet
9427 * supported and will be implemented in future, if needed.
9428 * Following call to get "pd" will fail if dest is FPORT
9429 */
9430 pd = fctl_hold_remote_port_by_did(port, dest);
9431 if (pd == NULL) {
9432 fcio->fcio_errno = FC_BADOBJECT;
9433 rval = ENXIO;
9434 if (fp_fcio_copyout(fcio, data, mode)) {
9435 rval = EFAULT;
9436 }
9437 break;
9438 }
9439
9440 mutex_enter(&pd->pd_mutex);
9441 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) {
9442 mutex_exit(&pd->pd_mutex);
9443 fctl_release_remote_port(pd);
9444
9445 fcio->fcio_errno = FC_LOGINREQ;
9446 rval = EINVAL;
9447 if (fp_fcio_copyout(fcio, data, mode)) {
9448 rval = EFAULT;
9449 }
9450 break;
9451 }
9452 ASSERT(pd->pd_login_count >= 1);
9453 mutex_exit(&pd->pd_mutex);
9454
9455 /*
9456 * Allocate job structure and set job_code as DUMMY,
9457 * because we will not go through the job thread.
9458 * Instead fp_sendcmd() is called directly here.
9459 */
9460 job = fctl_alloc_job(JOB_DUMMY, JOB_TYPE_FP_ASYNC,
9461 NULL, NULL, KM_SLEEP);
9462 ASSERT(job != NULL);
9463
9464 job->job_counter = 1;
9465
9466 cmd = fp_alloc_pkt(port, sizeof (la_els_rls_t),
9467 sizeof (la_els_rls_acc_t), KM_SLEEP, pd);
9468 if (cmd == NULL) {
9469 fcio->fcio_errno = FC_NOMEM;
9470 rval = ENOMEM;
9471
9472 fctl_release_remote_port(pd);
9473
9474 fctl_dealloc_job(job);
9475 if (fp_fcio_copyout(fcio, data, mode)) {
9476 rval = EFAULT;
9477 }
9478 break;
9479 }
9480
9481 /* Allocate memory for link error status block */
9482 rls_acc = kmem_zalloc(sizeof (*rls_acc), KM_SLEEP);
9483
9484 mutex_enter(&port->fp_mutex);
9485 mutex_enter(&pd->pd_mutex);
9486
9487 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class;
9488 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE;
9489 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED;
9490 cmd->cmd_retry_count = 1;
9491 cmd->cmd_ulp_pkt = NULL;
9492
9493 fp_rls_init(cmd, job);
9494
9495 job->job_private = (void *)rls_acc;
9496
9497 pd_flags = pd->pd_flags;
9498 pd->pd_flags = PD_ELS_IN_PROGRESS;
9499
9500 mutex_exit(&pd->pd_mutex);
9501 mutex_exit(&port->fp_mutex);
9502
9503 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) {
9504 fctl_jobwait(job);
9505
9506 fcio->fcio_errno = job->job_result;
9507 if (job->job_result == FC_SUCCESS) {
9508 ASSERT(pd != NULL);
9509 /*
9510 * link error status block is now available.
9511 * Copy it to userland
9512 */
9513 ASSERT(job->job_private == (void *)rls_acc);
9514 if (ddi_copyout((void *)rls_acc,
9515 (void *)fcio->fcio_obuf,
9516 sizeof (*rls_acc), mode) == 0) {
9517 if (fp_fcio_copyout(fcio, data,
9518 mode)) {
9519 rval = EFAULT;
9520 }
9521 } else {
9522 rval = EFAULT;
9523 }
9524 } else {
9525 rval = EIO;
9526 }
9527 } else {
9528 rval = EIO;
9529 fp_free_pkt(cmd);
9530 }
9531
9532 if (rval) {
9533 mutex_enter(&port->fp_mutex);
9534 mutex_enter(&pd->pd_mutex);
9535 if (pd->pd_flags == PD_ELS_IN_PROGRESS) {
9536 pd->pd_flags = pd_flags;
9537 }
9538 mutex_exit(&pd->pd_mutex);
9539 mutex_exit(&port->fp_mutex);
9540 }
9541
9542 fctl_release_remote_port(pd);
9543 fctl_dealloc_job(job);
9544 kmem_free(rls_acc, sizeof (*rls_acc));
9545
9546 if (fp_fcio_copyout(fcio, data, mode)) {
9547 rval = EFAULT;
9548 }
9549 break;
9550 }
9551
9552 case FCIO_NS: {
9553 fc_ns_cmd_t *ns_req;
9554 fc_ns_cmd32_t *ns_req32;
9555 fctl_ns_req_t *ns_cmd;
9556
9557 if (use32 == B_TRUE) {
9558 if (fcio->fcio_ilen != sizeof (*ns_req32)) {
9559 rval = EINVAL;
9560 break;
9561 }
9562
9563 ns_req = kmem_zalloc(sizeof (*ns_req), KM_SLEEP);
9564 ns_req32 = kmem_zalloc(sizeof (*ns_req32), KM_SLEEP);
9565
9566 if (ddi_copyin(fcio->fcio_ibuf, ns_req32,
9567 sizeof (*ns_req32), mode)) {
9568 rval = EFAULT;
9569 kmem_free(ns_req, sizeof (*ns_req));
9570 kmem_free(ns_req32, sizeof (*ns_req32));
9571 break;
9572 }
9573
9574 ns_req->ns_flags = ns_req32->ns_flags;
9575 ns_req->ns_cmd = ns_req32->ns_cmd;
9576 ns_req->ns_req_len = ns_req32->ns_req_len;
9577 ns_req->ns_req_payload = ns_req32->ns_req_payload;
9578 ns_req->ns_resp_len = ns_req32->ns_resp_len;
9579 ns_req->ns_resp_payload = ns_req32->ns_resp_payload;
9580 ns_req->ns_fctl_private = ns_req32->ns_fctl_private;
9581 ns_req->ns_resp_hdr = ns_req32->ns_resp_hdr;
9582
9583 kmem_free(ns_req32, sizeof (*ns_req32));
9584 } else {
9585 if (fcio->fcio_ilen != sizeof (*ns_req)) {
9586 rval = EINVAL;
9587 break;
9588 }
9589
9590 ns_req = kmem_zalloc(sizeof (*ns_req), KM_SLEEP);
9591
9592 if (ddi_copyin(fcio->fcio_ibuf, ns_req,
9593 sizeof (fc_ns_cmd_t), mode)) {
9594 rval = EFAULT;
9595 kmem_free(ns_req, sizeof (*ns_req));
9596 break;
9597 }
9598 }
9599
9600 if (ns_req->ns_req_len <= 0) {
9601 rval = EINVAL;
9602 kmem_free(ns_req, sizeof (*ns_req));
9603 break;
9604 }
9605
9606 job = fctl_alloc_job(JOB_NS_CMD, 0, NULL, NULL, KM_SLEEP);
9607 ASSERT(job != NULL);
9608
9609 ns_cmd = fctl_alloc_ns_cmd(ns_req->ns_req_len,
9610 ns_req->ns_resp_len, ns_req->ns_resp_len,
9611 FCTL_NS_FILL_NS_MAP, KM_SLEEP);
9612 ASSERT(ns_cmd != NULL);
9613 ns_cmd->ns_cmd_code = ns_req->ns_cmd;
9614
9615 if (ns_cmd->ns_cmd_code == NS_GA_NXT) {
9616 ns_cmd->ns_gan_max = 1;
9617 ns_cmd->ns_gan_index = 0;
9618 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID;
9619 }
9620
9621 if (ddi_copyin(ns_req->ns_req_payload,
9622 ns_cmd->ns_cmd_buf, ns_req->ns_req_len, mode)) {
9623 rval = EFAULT;
9624 fctl_free_ns_cmd(ns_cmd);
9625 fctl_dealloc_job(job);
9626 kmem_free(ns_req, sizeof (*ns_req));
9627 break;
9628 }
9629
9630 job->job_private = (void *)ns_cmd;
9631 fctl_enque_job(port, job);
9632 fctl_jobwait(job);
9633 rval = job->job_result;
9634
9635 if (rval == FC_SUCCESS) {
9636 if (ns_req->ns_resp_len) {
9637 if (ddi_copyout(ns_cmd->ns_data_buf,
9638 ns_req->ns_resp_payload,
9639 ns_cmd->ns_data_len, mode)) {
9640 rval = EFAULT;
9641 fctl_free_ns_cmd(ns_cmd);
9642 fctl_dealloc_job(job);
9643 kmem_free(ns_req, sizeof (*ns_req));
9644 break;
9645 }
9646 }
9647 } else {
9648 rval = EIO;
9649 }
9650 ns_req->ns_resp_hdr = ns_cmd->ns_resp_hdr;
9651 fctl_free_ns_cmd(ns_cmd);
9652 fctl_dealloc_job(job);
9653 kmem_free(ns_req, sizeof (*ns_req));
9654
9655 if (fp_fcio_copyout(fcio, data, mode)) {
9656 rval = EFAULT;
9657 }
9658 break;
9659 }
9660
9661 default:
9662 rval = ENOTTY;
9663 break;
9664 }
9665
9666 /*
9667 * If set, reset the EXCL busy bit to
9668 * receive other exclusive access commands
9669 */
9670 mutex_enter(&port->fp_mutex);
9671 if (port->fp_flag & FP_EXCL_BUSY) {
9672 port->fp_flag &= ~FP_EXCL_BUSY;
9673 }
9674 mutex_exit(&port->fp_mutex);
9675
9676 return (rval);
9677 }
9678
9679
9680 /*
9681 * This function assumes that the response length
9682 * is same regardless of data model (LP32 or LP64)
9683 * which is true for all the ioctls currently
9684 * supported.
9685 */
9686 static int
fp_copyout(void * from,void * to,size_t len,int mode)9687 fp_copyout(void *from, void *to, size_t len, int mode)
9688 {
9689 return (ddi_copyout(from, to, len, mode));
9690 }
9691
9692 /*
9693 * This function does the set rnid
9694 */
9695 static int
fp_set_rnid(fc_local_port_t * port,intptr_t data,int mode,fcio_t * fcio)9696 fp_set_rnid(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio)
9697 {
9698 int rval = 0;
9699 fc_rnid_t *rnid;
9700 fc_fca_pm_t pm;
9701
9702 /* Allocate memory for node id block */
9703 rnid = kmem_zalloc(sizeof (fc_rnid_t), KM_SLEEP);
9704
9705 if (ddi_copyin(fcio->fcio_ibuf, rnid, sizeof (fc_rnid_t), mode)) {
9706 FP_TRACE(FP_NHEAD1(3, 0), "fp_set_rnid: failed = %d", EFAULT);
9707 kmem_free(rnid, sizeof (fc_rnid_t));
9708 return (EFAULT);
9709 }
9710
9711 /* Prepare the port management structure */
9712 bzero((caddr_t)&pm, sizeof (pm));
9713
9714 pm.pm_cmd_flags = FC_FCA_PM_WRITE;
9715 pm.pm_cmd_code = FC_PORT_SET_NODE_ID;
9716 pm.pm_data_len = sizeof (*rnid);
9717 pm.pm_data_buf = (caddr_t)rnid;
9718
9719 /* Get the adapter's node data */
9720 rval = port->fp_fca_tran->fca_port_manage(
9721 port->fp_fca_handle, &pm);
9722
9723 if (rval != FC_SUCCESS) {
9724 fcio->fcio_errno = rval;
9725 rval = EIO;
9726 if (fp_fcio_copyout(fcio, data, mode)) {
9727 rval = EFAULT;
9728 }
9729 } else {
9730 mutex_enter(&port->fp_mutex);
9731 /* copy to the port structure */
9732 bcopy(rnid, &port->fp_rnid_params,
9733 sizeof (port->fp_rnid_params));
9734 mutex_exit(&port->fp_mutex);
9735 }
9736
9737 kmem_free(rnid, sizeof (fc_rnid_t));
9738
9739 if (rval != FC_SUCCESS) {
9740 FP_TRACE(FP_NHEAD1(3, 0), "fp_set_rnid: failed = %d", rval);
9741 }
9742
9743 return (rval);
9744 }
9745
9746 /*
9747 * This function does the local pwwn get rnid
9748 */
9749 static int
fp_get_rnid(fc_local_port_t * port,intptr_t data,int mode,fcio_t * fcio)9750 fp_get_rnid(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio)
9751 {
9752 fc_rnid_t *rnid;
9753 fc_fca_pm_t pm;
9754 int rval = 0;
9755 uint32_t ret;
9756
9757 /* Allocate memory for rnid data block */
9758 rnid = kmem_zalloc(sizeof (fc_rnid_t), KM_SLEEP);
9759
9760 mutex_enter(&port->fp_mutex);
9761 if (port->fp_rnid_init == 1) {
9762 bcopy(&port->fp_rnid_params, rnid, sizeof (fc_rnid_t));
9763 mutex_exit(&port->fp_mutex);
9764 /* xfer node info to userland */
9765 if (ddi_copyout((void *)rnid, (void *)fcio->fcio_obuf,
9766 sizeof (*rnid), mode) == 0) {
9767 if (fp_fcio_copyout(fcio, data, mode)) {
9768 rval = EFAULT;
9769 }
9770 } else {
9771 rval = EFAULT;
9772 }
9773
9774 kmem_free(rnid, sizeof (fc_rnid_t));
9775
9776 if (rval != FC_SUCCESS) {
9777 FP_TRACE(FP_NHEAD1(3, 0), "fp_get_rnid: failed = %d",
9778 rval);
9779 }
9780
9781 return (rval);
9782 }
9783 mutex_exit(&port->fp_mutex);
9784
9785 /* Prepare the port management structure */
9786 bzero((caddr_t)&pm, sizeof (pm));
9787
9788 pm.pm_cmd_flags = FC_FCA_PM_READ;
9789 pm.pm_cmd_code = FC_PORT_GET_NODE_ID;
9790 pm.pm_data_len = sizeof (fc_rnid_t);
9791 pm.pm_data_buf = (caddr_t)rnid;
9792
9793 /* Get the adapter's node data */
9794 ret = port->fp_fca_tran->fca_port_manage(
9795 port->fp_fca_handle,
9796 &pm);
9797
9798 if (ret == FC_SUCCESS) {
9799 /* initialize in the port_info */
9800 mutex_enter(&port->fp_mutex);
9801 port->fp_rnid_init = 1;
9802 bcopy(rnid, &port->fp_rnid_params, sizeof (*rnid));
9803 mutex_exit(&port->fp_mutex);
9804
9805 /* xfer node info to userland */
9806 if (ddi_copyout((void *)rnid,
9807 (void *)fcio->fcio_obuf,
9808 sizeof (*rnid), mode) == 0) {
9809 if (fp_fcio_copyout(fcio, data,
9810 mode)) {
9811 rval = EFAULT;
9812 }
9813 } else {
9814 rval = EFAULT;
9815 }
9816 } else {
9817 rval = EIO;
9818 fcio->fcio_errno = ret;
9819 if (fp_fcio_copyout(fcio, data, mode)) {
9820 rval = EFAULT;
9821 }
9822 }
9823
9824 kmem_free(rnid, sizeof (fc_rnid_t));
9825
9826 if (rval != FC_SUCCESS) {
9827 FP_TRACE(FP_NHEAD1(3, 0), "fp_get_rnid: failed = %d", rval);
9828 }
9829
9830 return (rval);
9831 }
9832
9833 static int
fp_send_rnid(fc_local_port_t * port,intptr_t data,int mode,fcio_t * fcio,la_wwn_t * pwwn)9834 fp_send_rnid(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio,
9835 la_wwn_t *pwwn)
9836 {
9837 int rval = 0;
9838 fc_remote_port_t *pd;
9839 fp_cmd_t *cmd;
9840 job_request_t *job;
9841 la_els_rnid_acc_t *rnid_acc;
9842
9843 pd = fctl_get_remote_port_by_pwwn(port, pwwn);
9844 if (pd == NULL) {
9845 /*
9846 * We can safely assume that the destination port
9847 * is logged in. Either the user land will explicitly
9848 * login before issuing RNID ioctl or the device would
9849 * have been configured, meaning already logged in.
9850 */
9851
9852 FP_TRACE(FP_NHEAD1(3, 0), "fp_send_rnid: failed = %d", ENXIO);
9853
9854 return (ENXIO);
9855 }
9856 /*
9857 * Allocate job structure and set job_code as DUMMY,
9858 * because we will not go thorugh the job thread.
9859 * Instead fp_sendcmd() is called directly here.
9860 */
9861 job = fctl_alloc_job(JOB_DUMMY, JOB_TYPE_FP_ASYNC,
9862 NULL, NULL, KM_SLEEP);
9863
9864 ASSERT(job != NULL);
9865
9866 job->job_counter = 1;
9867
9868 cmd = fp_alloc_pkt(port, sizeof (la_els_rnid_t),
9869 sizeof (la_els_rnid_acc_t), KM_SLEEP, pd);
9870 if (cmd == NULL) {
9871 fcio->fcio_errno = FC_NOMEM;
9872 rval = ENOMEM;
9873
9874 fctl_dealloc_job(job);
9875 if (fp_fcio_copyout(fcio, data, mode)) {
9876 rval = EFAULT;
9877 }
9878
9879 FP_TRACE(FP_NHEAD1(3, 0), "fp_send_rnid: failed = %d", rval);
9880
9881 return (rval);
9882 }
9883
9884 /* Allocate memory for node id accept block */
9885 rnid_acc = kmem_zalloc(sizeof (la_els_rnid_acc_t), KM_SLEEP);
9886
9887 mutex_enter(&port->fp_mutex);
9888 mutex_enter(&pd->pd_mutex);
9889
9890 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class;
9891 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE;
9892 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED;
9893 cmd->cmd_retry_count = 1;
9894 cmd->cmd_ulp_pkt = NULL;
9895
9896 fp_rnid_init(cmd, fcio->fcio_cmd_flags, job);
9897
9898 job->job_private = (void *)rnid_acc;
9899
9900 pd->pd_flags = PD_ELS_IN_PROGRESS;
9901
9902 mutex_exit(&pd->pd_mutex);
9903 mutex_exit(&port->fp_mutex);
9904
9905 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) {
9906 fctl_jobwait(job);
9907 fcio->fcio_errno = job->job_result;
9908 if (job->job_result == FC_SUCCESS) {
9909 int rnid_cnt;
9910 ASSERT(pd != NULL);
9911 /*
9912 * node id block is now available.
9913 * Copy it to userland
9914 */
9915 ASSERT(job->job_private == (void *)rnid_acc);
9916
9917 /* get the response length */
9918 rnid_cnt = sizeof (ls_code_t) + sizeof (fc_rnid_hdr_t) +
9919 rnid_acc->hdr.cmn_len +
9920 rnid_acc->hdr.specific_len;
9921
9922 if (fcio->fcio_olen < rnid_cnt) {
9923 rval = EINVAL;
9924 } else if (ddi_copyout((void *)rnid_acc,
9925 (void *)fcio->fcio_obuf,
9926 rnid_cnt, mode) == 0) {
9927 if (fp_fcio_copyout(fcio, data,
9928 mode)) {
9929 rval = EFAULT;
9930 }
9931 } else {
9932 rval = EFAULT;
9933 }
9934 } else {
9935 rval = EIO;
9936 }
9937 } else {
9938 rval = EIO;
9939 if (pd) {
9940 mutex_enter(&pd->pd_mutex);
9941 pd->pd_flags = PD_IDLE;
9942 mutex_exit(&pd->pd_mutex);
9943 }
9944 fp_free_pkt(cmd);
9945 }
9946
9947 fctl_dealloc_job(job);
9948 kmem_free(rnid_acc, sizeof (la_els_rnid_acc_t));
9949
9950 if (fp_fcio_copyout(fcio, data, mode)) {
9951 rval = EFAULT;
9952 }
9953
9954 if (rval != FC_SUCCESS) {
9955 FP_TRACE(FP_NHEAD1(3, 0), "fp_send_rnid: failed = %d", rval);
9956 }
9957
9958 return (rval);
9959 }
9960
9961 /*
9962 * Copy out to userland
9963 */
9964 static int
fp_fcio_copyout(fcio_t * fcio,intptr_t data,int mode)9965 fp_fcio_copyout(fcio_t *fcio, intptr_t data, int mode)
9966 {
9967 int rval;
9968
9969 #ifdef _MULTI_DATAMODEL
9970 switch (ddi_model_convert_from(mode & FMODELS)) {
9971 case DDI_MODEL_ILP32: {
9972 struct fcio32 fcio32;
9973
9974 fcio32.fcio_xfer = fcio->fcio_xfer;
9975 fcio32.fcio_cmd = fcio->fcio_cmd;
9976 fcio32.fcio_flags = fcio->fcio_flags;
9977 fcio32.fcio_cmd_flags = fcio->fcio_cmd_flags;
9978 fcio32.fcio_ilen = fcio->fcio_ilen;
9979 fcio32.fcio_ibuf =
9980 (caddr32_t)(uintptr_t)fcio->fcio_ibuf;
9981 fcio32.fcio_olen = fcio->fcio_olen;
9982 fcio32.fcio_obuf =
9983 (caddr32_t)(uintptr_t)fcio->fcio_obuf;
9984 fcio32.fcio_alen = fcio->fcio_alen;
9985 fcio32.fcio_abuf =
9986 (caddr32_t)(uintptr_t)fcio->fcio_abuf;
9987 fcio32.fcio_errno = fcio->fcio_errno;
9988
9989 rval = ddi_copyout((void *)&fcio32, (void *)data,
9990 sizeof (struct fcio32), mode);
9991 break;
9992 }
9993 case DDI_MODEL_NONE:
9994 rval = ddi_copyout((void *)fcio, (void *)data,
9995 sizeof (fcio_t), mode);
9996 break;
9997 }
9998 #else
9999 rval = ddi_copyout((void *)fcio, (void *)data, sizeof (fcio_t), mode);
10000 #endif
10001
10002 return (rval);
10003 }
10004
10005
10006 static void
fp_p2p_online(fc_local_port_t * port,job_request_t * job)10007 fp_p2p_online(fc_local_port_t *port, job_request_t *job)
10008 {
10009 uint32_t listlen;
10010 fc_portmap_t *changelist;
10011
10012 ASSERT(MUTEX_HELD(&port->fp_mutex));
10013 ASSERT(port->fp_topology == FC_TOP_PT_PT);
10014 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0);
10015
10016 listlen = 0;
10017 changelist = NULL;
10018
10019 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) {
10020 if (port->fp_statec_busy > 1) {
10021 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION;
10022 }
10023 }
10024 mutex_exit(&port->fp_mutex);
10025
10026 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) {
10027 fctl_fillout_map(port, &changelist, &listlen, 1, 0, 0);
10028 (void) fp_ulp_statec_cb(port, FC_STATE_ONLINE, changelist,
10029 listlen, listlen, KM_SLEEP);
10030
10031 mutex_enter(&port->fp_mutex);
10032 } else {
10033 ASSERT(changelist == NULL && listlen == 0);
10034 mutex_enter(&port->fp_mutex);
10035 if (--port->fp_statec_busy == 0) {
10036 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB;
10037 }
10038 }
10039 }
10040
10041 static int
fp_fillout_p2pmap(fc_local_port_t * port,fcio_t * fcio,int mode)10042 fp_fillout_p2pmap(fc_local_port_t *port, fcio_t *fcio, int mode)
10043 {
10044 int rval;
10045 int count;
10046 int index;
10047 int num_devices;
10048 fc_remote_node_t *node;
10049 fc_port_dev_t *devlist;
10050 struct pwwn_hash *head;
10051 fc_remote_port_t *pd;
10052
10053 ASSERT(MUTEX_HELD(&port->fp_mutex));
10054
10055 num_devices = fcio->fcio_olen / sizeof (fc_port_dev_t);
10056
10057 devlist = kmem_zalloc(sizeof (fc_port_dev_t) * num_devices, KM_SLEEP);
10058
10059 for (count = index = 0; index < pwwn_table_size; index++) {
10060 head = &port->fp_pwwn_table[index];
10061 pd = head->pwwn_head;
10062 while (pd != NULL) {
10063 mutex_enter(&pd->pd_mutex);
10064 if (pd->pd_state == PORT_DEVICE_INVALID) {
10065 mutex_exit(&pd->pd_mutex);
10066 pd = pd->pd_wwn_hnext;
10067 continue;
10068 }
10069
10070 devlist[count].dev_state = pd->pd_state;
10071 devlist[count].dev_hard_addr = pd->pd_hard_addr;
10072 devlist[count].dev_did = pd->pd_port_id;
10073 devlist[count].dev_did.priv_lilp_posit =
10074 (uint8_t)(index & 0xff);
10075 bcopy((caddr_t)pd->pd_fc4types,
10076 (caddr_t)devlist[count].dev_type,
10077 sizeof (pd->pd_fc4types));
10078
10079 bcopy((caddr_t)&pd->pd_port_name,
10080 (caddr_t)&devlist[count].dev_pwwn,
10081 sizeof (la_wwn_t));
10082
10083 node = pd->pd_remote_nodep;
10084 mutex_exit(&pd->pd_mutex);
10085
10086 if (node) {
10087 mutex_enter(&node->fd_mutex);
10088 bcopy((caddr_t)&node->fd_node_name,
10089 (caddr_t)&devlist[count].dev_nwwn,
10090 sizeof (la_wwn_t));
10091 mutex_exit(&node->fd_mutex);
10092 }
10093 count++;
10094 if (count >= num_devices) {
10095 goto found;
10096 }
10097 }
10098 }
10099 found:
10100 if (fp_copyout((void *)&count, (void *)fcio->fcio_abuf,
10101 sizeof (count), mode)) {
10102 rval = FC_FAILURE;
10103 } else if (fp_copyout((void *)devlist, (void *)fcio->fcio_obuf,
10104 sizeof (fc_port_dev_t) * num_devices, mode)) {
10105 rval = FC_FAILURE;
10106 } else {
10107 rval = FC_SUCCESS;
10108 }
10109
10110 kmem_free(devlist, sizeof (fc_port_dev_t) * num_devices);
10111
10112 return (rval);
10113 }
10114
10115
10116 /*
10117 * Handle Fabric ONLINE
10118 */
10119 static void
fp_fabric_online(fc_local_port_t * port,job_request_t * job)10120 fp_fabric_online(fc_local_port_t *port, job_request_t *job)
10121 {
10122 int index;
10123 int rval;
10124 int dbg_count;
10125 int count = 0;
10126 char ww_name[17];
10127 uint32_t d_id;
10128 uint32_t listlen;
10129 fctl_ns_req_t *ns_cmd;
10130 struct pwwn_hash *head;
10131 fc_remote_port_t *pd;
10132 fc_remote_port_t *npd;
10133 fc_portmap_t *changelist;
10134
10135 ASSERT(MUTEX_HELD(&port->fp_mutex));
10136 ASSERT(FC_IS_TOP_SWITCH(port->fp_topology));
10137 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0);
10138
10139 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t),
10140 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t),
10141 0, KM_SLEEP);
10142
10143 ASSERT(ns_cmd != NULL);
10144
10145 ns_cmd->ns_cmd_code = NS_GID_PN;
10146
10147 /*
10148 * Check if orphans are showing up now
10149 */
10150 if (port->fp_orphan_count) {
10151 fc_orphan_t *orp;
10152 fc_orphan_t *norp = NULL;
10153 fc_orphan_t *prev = NULL;
10154
10155 for (orp = port->fp_orphan_list; orp; orp = norp) {
10156 norp = orp->orp_next;
10157 mutex_exit(&port->fp_mutex);
10158 orp->orp_nscan++;
10159
10160 job->job_counter = 1;
10161 job->job_result = FC_SUCCESS;
10162
10163 ((ns_req_gid_pn_t *)
10164 (ns_cmd->ns_cmd_buf))->pwwn = orp->orp_pwwn;
10165 ((ns_resp_gid_pn_t *)
10166 ns_cmd->ns_data_buf)->pid.port_id = 0;
10167 ((ns_resp_gid_pn_t *)
10168 ns_cmd->ns_data_buf)->pid.priv_lilp_posit = 0;
10169
10170 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP);
10171 if (rval == FC_SUCCESS) {
10172 d_id =
10173 BE_32(*((uint32_t *)ns_cmd->ns_data_buf));
10174 pd = fp_create_remote_port_by_ns(port,
10175 d_id, KM_SLEEP);
10176
10177 if (pd != NULL) {
10178 fc_wwn_to_str(&orp->orp_pwwn, ww_name);
10179
10180 fp_printf(port, CE_WARN, FP_LOG_ONLY,
10181 0, NULL, "N_x Port with D_ID=%x,"
10182 " PWWN=%s reappeared in fabric",
10183 d_id, ww_name);
10184
10185 mutex_enter(&port->fp_mutex);
10186 if (prev) {
10187 prev->orp_next = orp->orp_next;
10188 } else {
10189 ASSERT(orp ==
10190 port->fp_orphan_list);
10191 port->fp_orphan_list =
10192 orp->orp_next;
10193 }
10194 port->fp_orphan_count--;
10195 mutex_exit(&port->fp_mutex);
10196 kmem_free(orp, sizeof (*orp));
10197 count++;
10198
10199 mutex_enter(&pd->pd_mutex);
10200 pd->pd_flags = PD_ELS_MARK;
10201
10202 mutex_exit(&pd->pd_mutex);
10203 } else {
10204 prev = orp;
10205 }
10206 } else {
10207 if (orp->orp_nscan == FC_ORPHAN_SCAN_LIMIT) {
10208 fc_wwn_to_str(&orp->orp_pwwn, ww_name);
10209
10210 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0,
10211 NULL,
10212 " Port WWN %s removed from orphan"
10213 " list after %d scans", ww_name,
10214 orp->orp_nscan);
10215
10216 mutex_enter(&port->fp_mutex);
10217 if (prev) {
10218 prev->orp_next = orp->orp_next;
10219 } else {
10220 ASSERT(orp ==
10221 port->fp_orphan_list);
10222 port->fp_orphan_list =
10223 orp->orp_next;
10224 }
10225 port->fp_orphan_count--;
10226 mutex_exit(&port->fp_mutex);
10227
10228 kmem_free(orp, sizeof (*orp));
10229 } else {
10230 prev = orp;
10231 }
10232 }
10233 mutex_enter(&port->fp_mutex);
10234 }
10235 }
10236
10237 /*
10238 * Walk the Port WWN hash table, reestablish LOGIN
10239 * if a LOGIN is already performed on a particular
10240 * device; Any failure to LOGIN should mark the
10241 * port device OLD.
10242 */
10243 for (index = 0; index < pwwn_table_size; index++) {
10244 head = &port->fp_pwwn_table[index];
10245 npd = head->pwwn_head;
10246
10247 while ((pd = npd) != NULL) {
10248 la_wwn_t *pwwn;
10249
10250 npd = pd->pd_wwn_hnext;
10251
10252 /*
10253 * Don't count in the port devices that are new
10254 * unless the total number of devices visible
10255 * through this port is less than FP_MAX_DEVICES
10256 */
10257 mutex_enter(&pd->pd_mutex);
10258 if (port->fp_dev_count >= FP_MAX_DEVICES ||
10259 (port->fp_options & FP_TARGET_MODE)) {
10260 if (pd->pd_type == PORT_DEVICE_NEW ||
10261 pd->pd_flags == PD_ELS_MARK ||
10262 pd->pd_recepient != PD_PLOGI_INITIATOR) {
10263 mutex_exit(&pd->pd_mutex);
10264 continue;
10265 }
10266 } else {
10267 if (pd->pd_flags == PD_ELS_MARK ||
10268 pd->pd_recepient != PD_PLOGI_INITIATOR) {
10269 mutex_exit(&pd->pd_mutex);
10270 continue;
10271 }
10272 pd->pd_type = PORT_DEVICE_OLD;
10273 }
10274 count++;
10275
10276 /*
10277 * Consult with the name server about D_ID changes
10278 */
10279 job->job_counter = 1;
10280 job->job_result = FC_SUCCESS;
10281
10282 ((ns_req_gid_pn_t *)
10283 (ns_cmd->ns_cmd_buf))->pwwn = pd->pd_port_name;
10284 ((ns_resp_gid_pn_t *)
10285 ns_cmd->ns_data_buf)->pid.port_id = 0;
10286
10287 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)->
10288 pid.priv_lilp_posit = 0;
10289
10290 pwwn = &pd->pd_port_name;
10291 pd->pd_flags = PD_ELS_MARK;
10292
10293 mutex_exit(&pd->pd_mutex);
10294 mutex_exit(&port->fp_mutex);
10295
10296 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP);
10297 if (rval != FC_SUCCESS) {
10298 fc_wwn_to_str(pwwn, ww_name);
10299
10300 mutex_enter(&pd->pd_mutex);
10301 d_id = pd->pd_port_id.port_id;
10302 pd->pd_type = PORT_DEVICE_DELETE;
10303 mutex_exit(&pd->pd_mutex);
10304
10305 FP_TRACE(FP_NHEAD1(3, 0),
10306 "fp_fabric_online: PD "
10307 "disappeared; d_id=%x, PWWN=%s",
10308 d_id, ww_name);
10309
10310 FP_TRACE(FP_NHEAD2(9, 0),
10311 "N_x Port with D_ID=%x, PWWN=%s"
10312 " disappeared from fabric", d_id,
10313 ww_name);
10314
10315 mutex_enter(&port->fp_mutex);
10316 continue;
10317 }
10318
10319 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf));
10320
10321 mutex_enter(&port->fp_mutex);
10322 mutex_enter(&pd->pd_mutex);
10323 if (d_id != pd->pd_port_id.port_id) {
10324 fctl_delist_did_table(port, pd);
10325 fc_wwn_to_str(pwwn, ww_name);
10326
10327 FP_TRACE(FP_NHEAD2(9, 0),
10328 "D_ID of a device with PWWN %s changed."
10329 " New D_ID = %x, OLD D_ID = %x", ww_name,
10330 d_id, pd->pd_port_id.port_id);
10331
10332 pd->pd_port_id.port_id = BE_32(d_id);
10333 pd->pd_type = PORT_DEVICE_CHANGED;
10334 fctl_enlist_did_table(port, pd);
10335 }
10336 mutex_exit(&pd->pd_mutex);
10337
10338 }
10339 }
10340
10341 if (ns_cmd) {
10342 fctl_free_ns_cmd(ns_cmd);
10343 }
10344
10345 listlen = 0;
10346 changelist = NULL;
10347 if (count) {
10348 if (port->fp_soft_state & FP_SOFT_IN_FCA_RESET) {
10349 port->fp_soft_state &= ~FP_SOFT_IN_FCA_RESET;
10350 mutex_exit(&port->fp_mutex);
10351 delay(drv_usectohz(FLA_RR_TOV * 1000 * 1000));
10352 mutex_enter(&port->fp_mutex);
10353 }
10354
10355 dbg_count = 0;
10356
10357 job->job_counter = count;
10358
10359 for (index = 0; index < pwwn_table_size; index++) {
10360 head = &port->fp_pwwn_table[index];
10361 npd = head->pwwn_head;
10362
10363 while ((pd = npd) != NULL) {
10364 npd = pd->pd_wwn_hnext;
10365
10366 mutex_enter(&pd->pd_mutex);
10367 if (pd->pd_flags != PD_ELS_MARK) {
10368 mutex_exit(&pd->pd_mutex);
10369 continue;
10370 }
10371
10372 dbg_count++;
10373
10374 /*
10375 * If it is already marked deletion, nothing
10376 * else to do.
10377 */
10378 if (pd->pd_type == PORT_DEVICE_DELETE) {
10379 pd->pd_type = PORT_DEVICE_OLD;
10380
10381 mutex_exit(&pd->pd_mutex);
10382 mutex_exit(&port->fp_mutex);
10383 fp_jobdone(job);
10384 mutex_enter(&port->fp_mutex);
10385
10386 continue;
10387 }
10388
10389 /*
10390 * If it is freshly discovered out of
10391 * the orphan list, nothing else to do
10392 */
10393 if (pd->pd_type == PORT_DEVICE_NEW) {
10394 pd->pd_flags = PD_IDLE;
10395
10396 mutex_exit(&pd->pd_mutex);
10397 mutex_exit(&port->fp_mutex);
10398 fp_jobdone(job);
10399 mutex_enter(&port->fp_mutex);
10400
10401 continue;
10402 }
10403
10404 pd->pd_flags = PD_IDLE;
10405 d_id = pd->pd_port_id.port_id;
10406
10407 /*
10408 * Explicitly mark all devices OLD; successful
10409 * PLOGI should reset this to either NO_CHANGE
10410 * or CHANGED.
10411 */
10412 if (pd->pd_type != PORT_DEVICE_CHANGED) {
10413 pd->pd_type = PORT_DEVICE_OLD;
10414 }
10415
10416 mutex_exit(&pd->pd_mutex);
10417 mutex_exit(&port->fp_mutex);
10418
10419 rval = fp_port_login(port, d_id, job,
10420 FP_CMD_PLOGI_RETAIN, KM_SLEEP, pd, NULL);
10421
10422 if (rval != FC_SUCCESS) {
10423 fp_jobdone(job);
10424 }
10425 mutex_enter(&port->fp_mutex);
10426 }
10427 }
10428 mutex_exit(&port->fp_mutex);
10429
10430 ASSERT(dbg_count == count);
10431 fp_jobwait(job);
10432
10433 mutex_enter(&port->fp_mutex);
10434
10435 ASSERT(port->fp_statec_busy > 0);
10436 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) {
10437 if (port->fp_statec_busy > 1) {
10438 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION;
10439 }
10440 }
10441 mutex_exit(&port->fp_mutex);
10442 } else {
10443 ASSERT(port->fp_statec_busy > 0);
10444 if (port->fp_statec_busy > 1) {
10445 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION;
10446 }
10447 mutex_exit(&port->fp_mutex);
10448 }
10449
10450 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) {
10451 fctl_fillout_map(port, &changelist, &listlen, 1, 0, 0);
10452
10453 (void) fp_ulp_statec_cb(port, FC_STATE_ONLINE, changelist,
10454 listlen, listlen, KM_SLEEP);
10455
10456 mutex_enter(&port->fp_mutex);
10457 } else {
10458 ASSERT(changelist == NULL && listlen == 0);
10459 mutex_enter(&port->fp_mutex);
10460 if (--port->fp_statec_busy == 0) {
10461 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB;
10462 }
10463 }
10464 }
10465
10466
10467 /*
10468 * Fill out device list for userland ioctl in private loop
10469 */
10470 static int
fp_fillout_loopmap(fc_local_port_t * port,fcio_t * fcio,int mode)10471 fp_fillout_loopmap(fc_local_port_t *port, fcio_t *fcio, int mode)
10472 {
10473 int rval;
10474 int count;
10475 int index;
10476 int num_devices;
10477 fc_remote_node_t *node;
10478 fc_port_dev_t *devlist;
10479 int lilp_device_count;
10480 fc_lilpmap_t *lilp_map;
10481 uchar_t *alpa_list;
10482
10483 ASSERT(MUTEX_HELD(&port->fp_mutex));
10484
10485 num_devices = fcio->fcio_olen / sizeof (fc_port_dev_t);
10486 if (port->fp_total_devices > port->fp_dev_count &&
10487 num_devices >= port->fp_total_devices) {
10488 job_request_t *job;
10489
10490 mutex_exit(&port->fp_mutex);
10491 job = fctl_alloc_job(JOB_PORT_GETMAP, 0, NULL, NULL, KM_SLEEP);
10492 job->job_counter = 1;
10493
10494 mutex_enter(&port->fp_mutex);
10495 fp_get_loopmap(port, job);
10496 mutex_exit(&port->fp_mutex);
10497
10498 fp_jobwait(job);
10499 fctl_dealloc_job(job);
10500 } else {
10501 mutex_exit(&port->fp_mutex);
10502 }
10503 devlist = kmem_zalloc(sizeof (*devlist) * num_devices, KM_SLEEP);
10504
10505 mutex_enter(&port->fp_mutex);
10506
10507 /*
10508 * Applications are accustomed to getting the device list in
10509 * LILP map order. The HBA firmware usually returns the device
10510 * map in the LILP map order and diagnostic applications would
10511 * prefer to receive in the device list in that order too
10512 */
10513 lilp_map = &port->fp_lilp_map;
10514 alpa_list = &lilp_map->lilp_alpalist[0];
10515
10516 /*
10517 * the length field corresponds to the offset in the LILP frame
10518 * which begins with 1. The thing to note here is that the
10519 * lilp_device_count is 1 more than fp->fp_total_devices since
10520 * the host adapter's alpa also shows up in the lilp map. We
10521 * don't however return details of the host adapter since
10522 * fctl_get_remote_port_by_did fails for the host adapter's ALPA
10523 * and applications are required to issue the FCIO_GET_HOST_PARAMS
10524 * ioctl to obtain details about the host adapter port.
10525 */
10526 lilp_device_count = lilp_map->lilp_length;
10527
10528 for (count = index = 0; index < lilp_device_count &&
10529 count < num_devices; index++) {
10530 uint32_t d_id;
10531 fc_remote_port_t *pd;
10532
10533 d_id = alpa_list[index];
10534
10535 mutex_exit(&port->fp_mutex);
10536 pd = fctl_get_remote_port_by_did(port, d_id);
10537 mutex_enter(&port->fp_mutex);
10538
10539 if (pd != NULL) {
10540 mutex_enter(&pd->pd_mutex);
10541
10542 if (pd->pd_state == PORT_DEVICE_INVALID) {
10543 mutex_exit(&pd->pd_mutex);
10544 continue;
10545 }
10546
10547 devlist[count].dev_state = pd->pd_state;
10548 devlist[count].dev_hard_addr = pd->pd_hard_addr;
10549 devlist[count].dev_did = pd->pd_port_id;
10550 devlist[count].dev_did.priv_lilp_posit =
10551 (uint8_t)(index & 0xff);
10552 bcopy((caddr_t)pd->pd_fc4types,
10553 (caddr_t)devlist[count].dev_type,
10554 sizeof (pd->pd_fc4types));
10555
10556 bcopy((caddr_t)&pd->pd_port_name,
10557 (caddr_t)&devlist[count].dev_pwwn,
10558 sizeof (la_wwn_t));
10559
10560 node = pd->pd_remote_nodep;
10561 mutex_exit(&pd->pd_mutex);
10562
10563 if (node) {
10564 mutex_enter(&node->fd_mutex);
10565 bcopy((caddr_t)&node->fd_node_name,
10566 (caddr_t)&devlist[count].dev_nwwn,
10567 sizeof (la_wwn_t));
10568 mutex_exit(&node->fd_mutex);
10569 }
10570 count++;
10571 }
10572 }
10573
10574 if (fp_copyout((void *)&count, (void *)fcio->fcio_abuf,
10575 sizeof (count), mode)) {
10576 rval = FC_FAILURE;
10577 }
10578
10579 if (fp_copyout((void *)devlist, (void *)fcio->fcio_obuf,
10580 sizeof (fc_port_dev_t) * num_devices, mode)) {
10581 rval = FC_FAILURE;
10582 } else {
10583 rval = FC_SUCCESS;
10584 }
10585
10586 kmem_free(devlist, sizeof (*devlist) * num_devices);
10587 ASSERT(MUTEX_HELD(&port->fp_mutex));
10588
10589 return (rval);
10590 }
10591
10592
10593 /*
10594 * Completion function for responses to unsolicited commands
10595 */
10596 static void
fp_unsol_intr(fc_packet_t * pkt)10597 fp_unsol_intr(fc_packet_t *pkt)
10598 {
10599 fp_cmd_t *cmd;
10600 fc_local_port_t *port;
10601
10602 cmd = pkt->pkt_ulp_private;
10603 port = cmd->cmd_port;
10604
10605 mutex_enter(&port->fp_mutex);
10606 port->fp_out_fpcmds--;
10607 mutex_exit(&port->fp_mutex);
10608
10609 if (pkt->pkt_state != FC_PKT_SUCCESS) {
10610 fp_printf(port, CE_WARN, FP_LOG_ONLY, 0, pkt,
10611 "couldn't post response to unsolicited request;"
10612 " ox_id=%x rx_id=%x", pkt->pkt_cmd_fhdr.ox_id,
10613 pkt->pkt_resp_fhdr.rx_id);
10614 }
10615
10616 if (cmd == port->fp_els_resp_pkt) {
10617 mutex_enter(&port->fp_mutex);
10618 port->fp_els_resp_pkt_busy = 0;
10619 mutex_exit(&port->fp_mutex);
10620 return;
10621 }
10622
10623 fp_free_pkt(cmd);
10624 }
10625
10626
10627 /*
10628 * solicited LINIT ELS completion function
10629 */
10630 static void
fp_linit_intr(fc_packet_t * pkt)10631 fp_linit_intr(fc_packet_t *pkt)
10632 {
10633 fp_cmd_t *cmd;
10634 job_request_t *job;
10635 fc_linit_resp_t acc;
10636 fc_local_port_t *port = ((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port;
10637
10638 cmd = (fp_cmd_t *)pkt->pkt_ulp_private;
10639
10640 mutex_enter(&cmd->cmd_port->fp_mutex);
10641 cmd->cmd_port->fp_out_fpcmds--;
10642 mutex_exit(&cmd->cmd_port->fp_mutex);
10643
10644 if (FP_IS_PKT_ERROR(pkt)) {
10645 (void) fp_common_intr(pkt, 1);
10646 return;
10647 }
10648
10649 job = cmd->cmd_job;
10650
10651 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&acc,
10652 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
10653 if (acc.status != FC_LINIT_SUCCESS) {
10654 job->job_result = FC_FAILURE;
10655 } else {
10656 job->job_result = FC_SUCCESS;
10657 }
10658
10659 fp_iodone(cmd);
10660 }
10661
10662
10663 /*
10664 * Decode the unsolicited request; For FC-4 Device and Link data frames
10665 * notify the registered ULP of this FC-4 type right here. For Unsolicited
10666 * ELS requests, submit a request to the job_handler thread to work on it.
10667 * The intent is to act quickly on the FC-4 unsolicited link and data frames
10668 * and save much of the interrupt time processing of unsolicited ELS requests
10669 * and hand it off to the job_handler thread.
10670 */
10671 static void
fp_unsol_cb(opaque_t port_handle,fc_unsol_buf_t * buf,uint32_t type)10672 fp_unsol_cb(opaque_t port_handle, fc_unsol_buf_t *buf, uint32_t type)
10673 {
10674 uchar_t r_ctl;
10675 uchar_t ls_code;
10676 uint32_t s_id;
10677 uint32_t rscn_count = FC_INVALID_RSCN_COUNT;
10678 uint32_t cb_arg;
10679 fp_cmd_t *cmd;
10680 fc_local_port_t *port;
10681 job_request_t *job;
10682 fc_remote_port_t *pd;
10683
10684 port = port_handle;
10685
10686 FP_TRACE(FP_NHEAD1(1, 0), "fp_unsol_cb: s_id=%x,"
10687 " d_id=%x, type=%x, r_ctl=%x, f_ctl=%x"
10688 " seq_id=%x, df_ctl=%x, seq_cnt=%x, ox_id=%x, rx_id=%x"
10689 " ro=%x, buffer[0]:%x", buf->ub_frame.s_id, buf->ub_frame.d_id,
10690 buf->ub_frame.type, buf->ub_frame.r_ctl, buf->ub_frame.f_ctl,
10691 buf->ub_frame.seq_id, buf->ub_frame.df_ctl, buf->ub_frame.seq_cnt,
10692 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro,
10693 buf->ub_buffer[0]);
10694
10695 if (type & 0x80000000) {
10696 /*
10697 * Huh ? Nothing much can be done without
10698 * a valid buffer. So just exit.
10699 */
10700 return;
10701 }
10702 /*
10703 * If the unsolicited interrupts arrive while it isn't
10704 * safe to handle unsolicited callbacks; Drop them, yes,
10705 * drop them on the floor
10706 */
10707 mutex_enter(&port->fp_mutex);
10708 port->fp_active_ubs++;
10709 if ((port->fp_soft_state &
10710 (FP_SOFT_IN_DETACH | FP_SOFT_SUSPEND | FP_SOFT_POWER_DOWN)) ||
10711 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) {
10712
10713 FP_TRACE(FP_NHEAD1(3, 0), "fp_unsol_cb: port state is "
10714 "not ONLINE. s_id=%x, d_id=%x, type=%x, "
10715 "seq_id=%x, ox_id=%x, rx_id=%x"
10716 "ro=%x", buf->ub_frame.s_id, buf->ub_frame.d_id,
10717 buf->ub_frame.type, buf->ub_frame.seq_id,
10718 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro);
10719
10720 ASSERT(port->fp_active_ubs > 0);
10721 if (--(port->fp_active_ubs) == 0) {
10722 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB;
10723 }
10724
10725 mutex_exit(&port->fp_mutex);
10726
10727 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle,
10728 1, &buf->ub_token);
10729
10730 return;
10731 }
10732
10733 r_ctl = buf->ub_frame.r_ctl;
10734 s_id = buf->ub_frame.s_id;
10735 if (port->fp_active_ubs == 1) {
10736 port->fp_soft_state |= FP_SOFT_IN_UNSOL_CB;
10737 }
10738
10739 if (r_ctl == R_CTL_ELS_REQ && buf->ub_buffer[0] == LA_ELS_LOGO &&
10740 port->fp_statec_busy) {
10741 mutex_exit(&port->fp_mutex);
10742 pd = fctl_get_remote_port_by_did(port, s_id);
10743 if (pd) {
10744 mutex_enter(&pd->pd_mutex);
10745 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) {
10746 FP_TRACE(FP_NHEAD1(3, 0),
10747 "LOGO for LOGGED IN D_ID %x",
10748 buf->ub_frame.s_id);
10749 pd->pd_state = PORT_DEVICE_VALID;
10750 }
10751 mutex_exit(&pd->pd_mutex);
10752 }
10753
10754 mutex_enter(&port->fp_mutex);
10755 ASSERT(port->fp_active_ubs > 0);
10756 if (--(port->fp_active_ubs) == 0) {
10757 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB;
10758 }
10759 mutex_exit(&port->fp_mutex);
10760
10761 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle,
10762 1, &buf->ub_token);
10763
10764 FP_TRACE(FP_NHEAD1(3, 0),
10765 "fp_unsol_cb() bailing out LOGO for D_ID %x",
10766 buf->ub_frame.s_id);
10767 return;
10768 }
10769
10770 if (port->fp_els_resp_pkt_busy == 0) {
10771 if (r_ctl == R_CTL_ELS_REQ) {
10772 ls_code = buf->ub_buffer[0];
10773
10774 switch (ls_code) {
10775 case LA_ELS_PLOGI:
10776 case LA_ELS_FLOGI:
10777 port->fp_els_resp_pkt_busy = 1;
10778 mutex_exit(&port->fp_mutex);
10779 fp_i_handle_unsol_els(port, buf);
10780
10781 mutex_enter(&port->fp_mutex);
10782 ASSERT(port->fp_active_ubs > 0);
10783 if (--(port->fp_active_ubs) == 0) {
10784 port->fp_soft_state &=
10785 ~FP_SOFT_IN_UNSOL_CB;
10786 }
10787 mutex_exit(&port->fp_mutex);
10788 port->fp_fca_tran->fca_ub_release(
10789 port->fp_fca_handle, 1, &buf->ub_token);
10790
10791 return;
10792 case LA_ELS_RSCN:
10793 if (++(port)->fp_rscn_count ==
10794 FC_INVALID_RSCN_COUNT) {
10795 ++(port)->fp_rscn_count;
10796 }
10797 rscn_count = port->fp_rscn_count;
10798 break;
10799
10800 default:
10801 break;
10802 }
10803 }
10804 } else if ((r_ctl == R_CTL_ELS_REQ) &&
10805 (buf->ub_buffer[0] == LA_ELS_RSCN)) {
10806 if (++port->fp_rscn_count == FC_INVALID_RSCN_COUNT) {
10807 ++port->fp_rscn_count;
10808 }
10809 rscn_count = port->fp_rscn_count;
10810 }
10811
10812 mutex_exit(&port->fp_mutex);
10813
10814 switch (r_ctl & R_CTL_ROUTING) {
10815 case R_CTL_DEVICE_DATA:
10816 /*
10817 * If the unsolicited buffer is a CT IU,
10818 * have the job_handler thread work on it.
10819 */
10820 if (buf->ub_frame.type == FC_TYPE_FC_SERVICES) {
10821 break;
10822 }
10823 /* FALLTHROUGH */
10824
10825 case R_CTL_FC4_SVC: {
10826 int sendup = 0;
10827
10828 /*
10829 * If a LOGIN isn't performed before this request
10830 * shut the door on this port with a reply that a
10831 * LOGIN is required. We make an exception however
10832 * for IP broadcast packets and pass them through
10833 * to the IP ULP(s) to handle broadcast requests.
10834 * This is not a problem for private loop devices
10835 * but for fabric topologies we don't log into the
10836 * remote ports during port initialization and
10837 * the ULPs need to log into requesting ports on
10838 * demand.
10839 */
10840 pd = fctl_get_remote_port_by_did(port, s_id);
10841 if (pd) {
10842 mutex_enter(&pd->pd_mutex);
10843 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) {
10844 sendup++;
10845 }
10846 mutex_exit(&pd->pd_mutex);
10847 } else if ((pd == NULL) &&
10848 (buf->ub_frame.type == FC_TYPE_IS8802_SNAP) &&
10849 (buf->ub_frame.d_id == 0xffffff ||
10850 buf->ub_frame.d_id == 0x00)) {
10851 /* brodacst IP frame - so sendup via job thread */
10852 break;
10853 }
10854
10855 /*
10856 * Send all FC4 services via job thread too
10857 */
10858 if ((r_ctl & R_CTL_ROUTING) == R_CTL_FC4_SVC) {
10859 break;
10860 }
10861
10862 if (sendup || !FC_IS_REAL_DEVICE(s_id)) {
10863 fctl_ulp_unsol_cb(port, buf, buf->ub_frame.type);
10864 return;
10865 }
10866
10867 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) {
10868 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t),
10869 0, KM_NOSLEEP, pd);
10870 if (cmd != NULL) {
10871 fp_els_rjt_init(port, cmd, buf,
10872 FC_ACTION_NON_RETRYABLE,
10873 FC_REASON_LOGIN_REQUIRED, NULL);
10874
10875 if (fp_sendcmd(port, cmd,
10876 port->fp_fca_handle) != FC_SUCCESS) {
10877 fp_free_pkt(cmd);
10878 }
10879 }
10880 }
10881
10882 mutex_enter(&port->fp_mutex);
10883 ASSERT(port->fp_active_ubs > 0);
10884 if (--(port->fp_active_ubs) == 0) {
10885 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB;
10886 }
10887 mutex_exit(&port->fp_mutex);
10888 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle,
10889 1, &buf->ub_token);
10890
10891 return;
10892 }
10893
10894 default:
10895 break;
10896 }
10897
10898 /*
10899 * Submit a Request to the job_handler thread to work
10900 * on the unsolicited request. The potential side effect
10901 * of this is that the unsolicited buffer takes a little
10902 * longer to get released but we save interrupt time in
10903 * the bargain.
10904 */
10905 cb_arg = (rscn_count == FC_INVALID_RSCN_COUNT) ? NULL : rscn_count;
10906
10907 /*
10908 * One way that the rscn_count will get used is described below :
10909 *
10910 * 1. fp_unsol_cb() gets an RSCN and updates fp_rscn_count.
10911 * 2. Before mutex is released, a copy of it is stored in rscn_count.
10912 * 3. The count is passed to job thread as JOB_UNSOL_REQUEST (below)
10913 * by overloading the job_cb_arg to pass the rscn_count
10914 * 4. When one of the routines processing the RSCN picks it up (ex:
10915 * fp_validate_rscn_page()), it passes this count in the map
10916 * structure (as part of the map_rscn_info structure member) to the
10917 * ULPs.
10918 * 5. When ULPs make calls back to the transport (example interfaces for
10919 * this are fc_ulp_transport(), fc_ulp_login(), fc_issue_els()), they
10920 * can now pass back this count as part of the fc_packet's
10921 * pkt_ulp_rscn_count member. fcp does this currently.
10922 * 6. When transport gets a call to transport a command on the wire, it
10923 * will check to see if there is a valid pkt_ulp_rsvd1 field in the
10924 * fc_packet. If there is, it will match that info with the current
10925 * rscn_count on that instance of the port. If they don't match up
10926 * then there was a newer RSCN. The ULP gets back an error code which
10927 * informs it about it - FC_DEVICE_BUSY_NEW_RSCN.
10928 * 7. At this point the ULP is free to make up its own mind as to how to
10929 * handle this. Currently, fcp will reset its retry counters and keep
10930 * retrying the operation it was doing in anticipation of getting a
10931 * new state change call back for the new RSCN.
10932 */
10933 job = fctl_alloc_job(JOB_UNSOL_REQUEST, 0, NULL,
10934 (opaque_t)(uintptr_t)cb_arg, KM_NOSLEEP);
10935 if (job == NULL) {
10936 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, "fp_unsol_cb() "
10937 "couldn't submit a job to the thread, failing..");
10938
10939 mutex_enter(&port->fp_mutex);
10940
10941 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) {
10942 --port->fp_rscn_count;
10943 }
10944
10945 ASSERT(port->fp_active_ubs > 0);
10946 if (--(port->fp_active_ubs) == 0) {
10947 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB;
10948 }
10949
10950 mutex_exit(&port->fp_mutex);
10951 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle,
10952 1, &buf->ub_token);
10953
10954 return;
10955 }
10956 job->job_private = (void *)buf;
10957 fctl_enque_job(port, job);
10958 }
10959
10960
10961 /*
10962 * Handle unsolicited requests
10963 */
10964 static void
fp_handle_unsol_buf(fc_local_port_t * port,fc_unsol_buf_t * buf,job_request_t * job)10965 fp_handle_unsol_buf(fc_local_port_t *port, fc_unsol_buf_t *buf,
10966 job_request_t *job)
10967 {
10968 uchar_t r_ctl;
10969 uchar_t ls_code;
10970 uint32_t s_id;
10971 fp_cmd_t *cmd;
10972 fc_remote_port_t *pd;
10973 fp_unsol_spec_t *ub_spec;
10974
10975 r_ctl = buf->ub_frame.r_ctl;
10976 s_id = buf->ub_frame.s_id;
10977
10978 switch (r_ctl & R_CTL_ROUTING) {
10979 case R_CTL_EXTENDED_SVC:
10980 if (r_ctl != R_CTL_ELS_REQ) {
10981 break;
10982 }
10983
10984 ls_code = buf->ub_buffer[0];
10985 switch (ls_code) {
10986 case LA_ELS_LOGO:
10987 case LA_ELS_ADISC:
10988 case LA_ELS_PRLO:
10989 pd = fctl_get_remote_port_by_did(port, s_id);
10990 if (pd == NULL) {
10991 if (!FC_IS_REAL_DEVICE(s_id)) {
10992 break;
10993 }
10994 if (!FP_IS_CLASS_1_OR_2(buf->ub_class)) {
10995 break;
10996 }
10997 if ((cmd = fp_alloc_pkt(port,
10998 sizeof (la_els_rjt_t), 0, KM_SLEEP,
10999 NULL)) == NULL) {
11000 /*
11001 * Can this actually fail when
11002 * given KM_SLEEP? (Could be used
11003 * this way in a number of places.)
11004 */
11005 break;
11006 }
11007
11008 fp_els_rjt_init(port, cmd, buf,
11009 FC_ACTION_NON_RETRYABLE,
11010 FC_REASON_INVALID_LINK_CTRL, job);
11011
11012 if (fp_sendcmd(port, cmd,
11013 port->fp_fca_handle) != FC_SUCCESS) {
11014 fp_free_pkt(cmd);
11015 }
11016
11017 break;
11018 }
11019 if (ls_code == LA_ELS_LOGO) {
11020 fp_handle_unsol_logo(port, buf, pd, job);
11021 } else if (ls_code == LA_ELS_ADISC) {
11022 fp_handle_unsol_adisc(port, buf, pd, job);
11023 } else {
11024 fp_handle_unsol_prlo(port, buf, pd, job);
11025 }
11026 break;
11027
11028 case LA_ELS_PLOGI:
11029 fp_handle_unsol_plogi(port, buf, job, KM_SLEEP);
11030 break;
11031
11032 case LA_ELS_FLOGI:
11033 fp_handle_unsol_flogi(port, buf, job, KM_SLEEP);
11034 break;
11035
11036 case LA_ELS_RSCN:
11037 fp_handle_unsol_rscn(port, buf, job, KM_SLEEP);
11038 break;
11039
11040 default:
11041 ub_spec = kmem_zalloc(sizeof (*ub_spec), KM_SLEEP);
11042 ub_spec->port = port;
11043 ub_spec->buf = buf;
11044
11045 (void) taskq_dispatch(port->fp_taskq,
11046 fp_ulp_unsol_cb, ub_spec, KM_SLEEP);
11047 return;
11048 }
11049 break;
11050
11051 case R_CTL_BASIC_SVC:
11052 /*
11053 * The unsolicited basic link services could be ABTS
11054 * and RMC (Or even a NOP). Just BA_RJT them until
11055 * such time there arises a need to handle them more
11056 * carefully.
11057 */
11058 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) {
11059 cmd = fp_alloc_pkt(port, sizeof (la_ba_rjt_t),
11060 0, KM_SLEEP, NULL);
11061 if (cmd != NULL) {
11062 fp_ba_rjt_init(port, cmd, buf, job);
11063 if (fp_sendcmd(port, cmd,
11064 port->fp_fca_handle) != FC_SUCCESS) {
11065 fp_free_pkt(cmd);
11066 }
11067 }
11068 }
11069 break;
11070
11071 case R_CTL_DEVICE_DATA:
11072 if (buf->ub_frame.type == FC_TYPE_FC_SERVICES) {
11073 /*
11074 * Mostly this is of type FC_TYPE_FC_SERVICES.
11075 * As we don't like any Unsolicited FC services
11076 * requests, we would do well to RJT them as
11077 * well.
11078 */
11079 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) {
11080 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t),
11081 0, KM_SLEEP, NULL);
11082 if (cmd != NULL) {
11083 fp_els_rjt_init(port, cmd, buf,
11084 FC_ACTION_NON_RETRYABLE,
11085 FC_REASON_INVALID_LINK_CTRL, job);
11086
11087 if (fp_sendcmd(port, cmd,
11088 port->fp_fca_handle) !=
11089 FC_SUCCESS) {
11090 fp_free_pkt(cmd);
11091 }
11092 }
11093 }
11094 break;
11095 }
11096 /* FALLTHROUGH */
11097
11098 case R_CTL_FC4_SVC:
11099 ub_spec = kmem_zalloc(sizeof (*ub_spec), KM_SLEEP);
11100 ub_spec->port = port;
11101 ub_spec->buf = buf;
11102
11103 (void) taskq_dispatch(port->fp_taskq,
11104 fp_ulp_unsol_cb, ub_spec, KM_SLEEP);
11105 return;
11106
11107 case R_CTL_LINK_CTL:
11108 /*
11109 * Turn deaf ear on unsolicited link control frames.
11110 * Typical unsolicited link control Frame is an LCR
11111 * (to reset End to End credit to the default login
11112 * value and abort current sequences for all classes)
11113 * An intelligent microcode/firmware should handle
11114 * this transparently at its level and not pass all
11115 * the way up here.
11116 *
11117 * Possible responses to LCR are R_RDY, F_RJT, P_RJT
11118 * or F_BSY. P_RJT is chosen to be the most appropriate
11119 * at this time.
11120 */
11121 /* FALLTHROUGH */
11122
11123 default:
11124 /*
11125 * Just reject everything else as an invalid request.
11126 */
11127 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) {
11128 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t),
11129 0, KM_SLEEP, NULL);
11130 if (cmd != NULL) {
11131 fp_els_rjt_init(port, cmd, buf,
11132 FC_ACTION_NON_RETRYABLE,
11133 FC_REASON_INVALID_LINK_CTRL, job);
11134
11135 if (fp_sendcmd(port, cmd,
11136 port->fp_fca_handle) != FC_SUCCESS) {
11137 fp_free_pkt(cmd);
11138 }
11139 }
11140 }
11141 break;
11142 }
11143
11144 mutex_enter(&port->fp_mutex);
11145 ASSERT(port->fp_active_ubs > 0);
11146 if (--(port->fp_active_ubs) == 0) {
11147 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB;
11148 }
11149 mutex_exit(&port->fp_mutex);
11150 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle,
11151 1, &buf->ub_token);
11152 }
11153
11154
11155 /*
11156 * Prepare a BA_RJT and send it over.
11157 */
11158 static void
fp_ba_rjt_init(fc_local_port_t * port,fp_cmd_t * cmd,fc_unsol_buf_t * buf,job_request_t * job)11159 fp_ba_rjt_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf,
11160 job_request_t *job)
11161 {
11162 fc_packet_t *pkt;
11163 la_ba_rjt_t payload;
11164
11165 ASSERT(!MUTEX_HELD(&port->fp_mutex));
11166
11167 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class;
11168 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND;
11169 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED;
11170 cmd->cmd_retry_count = 1;
11171 cmd->cmd_ulp_pkt = NULL;
11172
11173 cmd->cmd_transport = port->fp_fca_tran->fca_els_send;
11174 cmd->cmd_job = job;
11175
11176 pkt = &cmd->cmd_pkt;
11177
11178 fp_unsol_resp_init(pkt, buf, R_CTL_LS_BA_RJT, FC_TYPE_BASIC_LS);
11179
11180 payload.reserved = 0;
11181 payload.reason_code = FC_REASON_CMD_UNSUPPORTED;
11182 payload.explanation = FC_EXPLN_NONE;
11183 payload.vendor = 0;
11184
11185 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload,
11186 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR);
11187 }
11188
11189
11190 /*
11191 * Prepare an LS_RJT and send it over
11192 */
11193 static void
fp_els_rjt_init(fc_local_port_t * port,fp_cmd_t * cmd,fc_unsol_buf_t * buf,uchar_t action,uchar_t reason,job_request_t * job)11194 fp_els_rjt_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf,
11195 uchar_t action, uchar_t reason, job_request_t *job)
11196 {
11197 fc_packet_t *pkt;
11198 la_els_rjt_t payload;
11199
11200 ASSERT(!MUTEX_HELD(&port->fp_mutex));
11201
11202 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class;
11203 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND;
11204 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED;
11205 cmd->cmd_retry_count = 1;
11206 cmd->cmd_ulp_pkt = NULL;
11207
11208 cmd->cmd_transport = port->fp_fca_tran->fca_els_send;
11209 cmd->cmd_job = job;
11210
11211 pkt = &cmd->cmd_pkt;
11212
11213 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS);
11214
11215 payload.ls_code.ls_code = LA_ELS_RJT;
11216 payload.ls_code.mbz = 0;
11217 payload.action = action;
11218 payload.reason = reason;
11219 payload.reserved = 0;
11220 payload.vu = 0;
11221
11222 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload,
11223 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR);
11224 }
11225
11226 /*
11227 * Function: fp_prlo_acc_init
11228 *
11229 * Description: Initializes an Link Service Accept for a PRLO.
11230 *
11231 * Arguments: *port Local port through which the PRLO was
11232 * received.
11233 * cmd Command that will carry the accept.
11234 * *buf Unsolicited buffer containing the PRLO
11235 * request.
11236 * job Job request.
11237 * sleep Allocation mode.
11238 *
11239 * Return Value: *cmd Command containing the response.
11240 *
11241 * Context: Depends on the parameter sleep.
11242 */
11243 fp_cmd_t *
fp_prlo_acc_init(fc_local_port_t * port,fc_remote_port_t * pd,fc_unsol_buf_t * buf,job_request_t * job,int sleep)11244 fp_prlo_acc_init(fc_local_port_t *port, fc_remote_port_t *pd,
11245 fc_unsol_buf_t *buf, job_request_t *job, int sleep)
11246 {
11247 fp_cmd_t *cmd;
11248 fc_packet_t *pkt;
11249 la_els_prlo_t *req;
11250 size_t len;
11251 uint16_t flags;
11252
11253 req = (la_els_prlo_t *)buf->ub_buffer;
11254 len = (size_t)ntohs(req->payload_length);
11255
11256 /*
11257 * The payload of the accept to a PRLO has to be the exact match of
11258 * the payload of the request (at the exception of the code).
11259 */
11260 cmd = fp_alloc_pkt(port, (int)len, 0, sleep, pd);
11261
11262 if (cmd) {
11263 /*
11264 * The fp command was successfully allocated.
11265 */
11266 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class;
11267 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND;
11268 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED;
11269 cmd->cmd_retry_count = 1;
11270 cmd->cmd_ulp_pkt = NULL;
11271
11272 cmd->cmd_transport = port->fp_fca_tran->fca_els_send;
11273 cmd->cmd_job = job;
11274
11275 pkt = &cmd->cmd_pkt;
11276
11277 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP,
11278 FC_TYPE_EXTENDED_LS);
11279
11280 /* The code is overwritten for the copy. */
11281 req->ls_code = LA_ELS_ACC;
11282 /* Response code is set. */
11283 flags = ntohs(req->flags);
11284 flags &= ~SP_RESP_CODE_MASK;
11285 flags |= SP_RESP_CODE_REQ_EXECUTED;
11286 req->flags = htons(flags);
11287
11288 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)req,
11289 (uint8_t *)pkt->pkt_cmd, len, DDI_DEV_AUTOINCR);
11290 }
11291 return (cmd);
11292 }
11293
11294 /*
11295 * Prepare an ACC response to an ELS request
11296 */
11297 static void
fp_els_acc_init(fc_local_port_t * port,fp_cmd_t * cmd,fc_unsol_buf_t * buf,job_request_t * job)11298 fp_els_acc_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf,
11299 job_request_t *job)
11300 {
11301 fc_packet_t *pkt;
11302 ls_code_t payload;
11303
11304 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class;
11305 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND;
11306 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED;
11307 cmd->cmd_retry_count = 1;
11308 cmd->cmd_ulp_pkt = NULL;
11309
11310 cmd->cmd_transport = port->fp_fca_tran->fca_els_send;
11311 cmd->cmd_job = job;
11312
11313 pkt = &cmd->cmd_pkt;
11314
11315 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS);
11316
11317 payload.ls_code = LA_ELS_ACC;
11318 payload.mbz = 0;
11319
11320 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload,
11321 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR);
11322 }
11323
11324 /*
11325 * Unsolicited PRLO handler
11326 *
11327 * A Process Logout should be handled by the ULP that established it. However,
11328 * some devices send a PRLO to trigger a PLOGI followed by a PRLI. This happens
11329 * when a device implicitly logs out an initiator (for whatever reason) and
11330 * tries to get that initiator to restablish the connection (PLOGI and PRLI).
11331 * The logical thing to do for the device would be to send a LOGO in response
11332 * to any FC4 frame sent by the initiator. Some devices choose, however, to send
11333 * a PRLO instead.
11334 *
11335 * From a Fibre Channel standpoint a PRLO calls for a PRLI. There's no reason to
11336 * think that the Port Login has been lost. If we follow the Fibre Channel
11337 * protocol to the letter a PRLI should be sent after accepting the PRLO. If
11338 * the Port Login has also been lost, the remote port will reject the PRLI
11339 * indicating that we must PLOGI first. The initiator will then turn around and
11340 * send a PLOGI. The way Leadville is layered and the way the ULP interface
11341 * is defined doesn't allow this scenario to be followed easily. If FCP were to
11342 * handle the PRLO and attempt the PRLI, the reject indicating that a PLOGI is
11343 * needed would be received by FCP. FCP would have, then, to tell the transport
11344 * (fp) to PLOGI. The problem is, the transport would still think the Port
11345 * Login is valid and there is no way for FCP to tell the transport: "PLOGI even
11346 * if you think it's not necessary". To work around that difficulty, the PRLO
11347 * is treated by the transport as a LOGO. The downside to it is a Port Login
11348 * may be disrupted (if a PLOGI wasn't actually needed) and another ULP (that
11349 * has nothing to do with the PRLO) may be impacted. However, this is a
11350 * scenario very unlikely to happen. As of today the only ULP in Leadville
11351 * using PRLI/PRLOs is FCP. For a PRLO to disrupt another ULP (that would be
11352 * FCIP), a SCSI target would have to be running FCP and FCIP (which is very
11353 * unlikely).
11354 */
11355 static void
fp_handle_unsol_prlo(fc_local_port_t * port,fc_unsol_buf_t * buf,fc_remote_port_t * pd,job_request_t * job)11356 fp_handle_unsol_prlo(fc_local_port_t *port, fc_unsol_buf_t *buf,
11357 fc_remote_port_t *pd, job_request_t *job)
11358 {
11359 int busy;
11360 int rval;
11361 int retain;
11362 fp_cmd_t *cmd;
11363 fc_portmap_t *listptr;
11364 boolean_t tolerance;
11365 la_els_prlo_t *req;
11366
11367 req = (la_els_prlo_t *)buf->ub_buffer;
11368
11369 if ((ntohs(req->payload_length) !=
11370 (sizeof (service_parameter_page_t) + sizeof (ls_code_t))) ||
11371 (req->page_length != sizeof (service_parameter_page_t))) {
11372 /*
11373 * We are being very restrictive. Only on page per
11374 * payload. If it is not the case we reject the ELS although
11375 * we should reply indicating we handle only single page
11376 * per PRLO.
11377 */
11378 goto fp_reject_prlo;
11379 }
11380
11381 if (ntohs(req->payload_length) > buf->ub_bufsize) {
11382 /*
11383 * This is in case the payload advertizes a size bigger than
11384 * what it really is.
11385 */
11386 goto fp_reject_prlo;
11387 }
11388
11389 mutex_enter(&port->fp_mutex);
11390 busy = port->fp_statec_busy;
11391 mutex_exit(&port->fp_mutex);
11392
11393 mutex_enter(&pd->pd_mutex);
11394 tolerance = fctl_tc_increment(&pd->pd_logo_tc);
11395 if (!busy) {
11396 if (pd->pd_state != PORT_DEVICE_LOGGED_IN ||
11397 pd->pd_state == PORT_DEVICE_INVALID ||
11398 pd->pd_flags == PD_ELS_IN_PROGRESS ||
11399 pd->pd_type == PORT_DEVICE_OLD) {
11400 busy++;
11401 }
11402 }
11403
11404 if (busy) {
11405 mutex_exit(&pd->pd_mutex);
11406
11407 FP_TRACE(FP_NHEAD1(5, 0), "Logout; D_ID=%x,"
11408 "pd=%p - busy",
11409 pd->pd_port_id.port_id, pd);
11410
11411 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) {
11412 goto fp_reject_prlo;
11413 }
11414 } else {
11415 retain = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0;
11416
11417 if (tolerance) {
11418 fctl_tc_reset(&pd->pd_logo_tc);
11419 retain = 0;
11420 pd->pd_state = PORT_DEVICE_INVALID;
11421 }
11422
11423 FP_TRACE(FP_NHEAD1(5, 0), "Accepting LOGO; d_id=%x, pd=%p,"
11424 " tolerance=%d retain=%d", pd->pd_port_id.port_id, pd,
11425 tolerance, retain);
11426
11427 pd->pd_aux_flags |= PD_LOGGED_OUT;
11428 mutex_exit(&pd->pd_mutex);
11429
11430 cmd = fp_prlo_acc_init(port, pd, buf, job, KM_SLEEP);
11431 if (cmd == NULL) {
11432 return;
11433 }
11434
11435 rval = fp_sendcmd(port, cmd, port->fp_fca_handle);
11436 if (rval != FC_SUCCESS) {
11437 fp_free_pkt(cmd);
11438 return;
11439 }
11440
11441 listptr = kmem_zalloc(sizeof (fc_portmap_t), KM_SLEEP);
11442
11443 if (retain) {
11444 fp_unregister_login(pd);
11445 fctl_copy_portmap(listptr, pd);
11446 } else {
11447 uint32_t d_id;
11448 char ww_name[17];
11449
11450 mutex_enter(&pd->pd_mutex);
11451 d_id = pd->pd_port_id.port_id;
11452 fc_wwn_to_str(&pd->pd_port_name, ww_name);
11453 mutex_exit(&pd->pd_mutex);
11454
11455 FP_TRACE(FP_NHEAD2(9, 0),
11456 "N_x Port with D_ID=%x, PWWN=%s logged out"
11457 " %d times in %d us; Giving up", d_id, ww_name,
11458 FC_LOGO_TOLERANCE_LIMIT,
11459 FC_LOGO_TOLERANCE_TIME_LIMIT);
11460
11461 fp_fillout_old_map(listptr, pd, 0);
11462 listptr->map_type = PORT_DEVICE_OLD;
11463 }
11464
11465 (void) fp_ulp_devc_cb(port, listptr, 1, 1, KM_SLEEP, 0);
11466 return;
11467 }
11468
11469 fp_reject_prlo:
11470
11471 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 0, KM_SLEEP, pd);
11472 if (cmd != NULL) {
11473 fp_els_rjt_init(port, cmd, buf, FC_ACTION_NON_RETRYABLE,
11474 FC_REASON_INVALID_LINK_CTRL, job);
11475
11476 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) {
11477 fp_free_pkt(cmd);
11478 }
11479 }
11480 }
11481
11482 /*
11483 * Unsolicited LOGO handler
11484 */
11485 static void
fp_handle_unsol_logo(fc_local_port_t * port,fc_unsol_buf_t * buf,fc_remote_port_t * pd,job_request_t * job)11486 fp_handle_unsol_logo(fc_local_port_t *port, fc_unsol_buf_t *buf,
11487 fc_remote_port_t *pd, job_request_t *job)
11488 {
11489 int busy;
11490 int rval;
11491 int retain;
11492 fp_cmd_t *cmd;
11493 fc_portmap_t *listptr;
11494 boolean_t tolerance;
11495
11496 mutex_enter(&port->fp_mutex);
11497 busy = port->fp_statec_busy;
11498 mutex_exit(&port->fp_mutex);
11499
11500 mutex_enter(&pd->pd_mutex);
11501 tolerance = fctl_tc_increment(&pd->pd_logo_tc);
11502 if (!busy) {
11503 if (pd->pd_state != PORT_DEVICE_LOGGED_IN ||
11504 pd->pd_state == PORT_DEVICE_INVALID ||
11505 pd->pd_flags == PD_ELS_IN_PROGRESS ||
11506 pd->pd_type == PORT_DEVICE_OLD) {
11507 busy++;
11508 }
11509 }
11510
11511 if (busy) {
11512 mutex_exit(&pd->pd_mutex);
11513
11514 FP_TRACE(FP_NHEAD1(5, 0), "Logout; D_ID=%x,"
11515 "pd=%p - busy",
11516 pd->pd_port_id.port_id, pd);
11517
11518 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) {
11519 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t),
11520 0, KM_SLEEP, pd);
11521 if (cmd != NULL) {
11522 fp_els_rjt_init(port, cmd, buf,
11523 FC_ACTION_NON_RETRYABLE,
11524 FC_REASON_INVALID_LINK_CTRL, job);
11525
11526 if (fp_sendcmd(port, cmd,
11527 port->fp_fca_handle) != FC_SUCCESS) {
11528 fp_free_pkt(cmd);
11529 }
11530 }
11531 }
11532 } else {
11533 retain = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0;
11534
11535 if (tolerance) {
11536 fctl_tc_reset(&pd->pd_logo_tc);
11537 retain = 0;
11538 pd->pd_state = PORT_DEVICE_INVALID;
11539 }
11540
11541 FP_TRACE(FP_NHEAD1(5, 0), "Accepting LOGO; d_id=%x, pd=%p,"
11542 " tolerance=%d retain=%d", pd->pd_port_id.port_id, pd,
11543 tolerance, retain);
11544
11545 pd->pd_aux_flags |= PD_LOGGED_OUT;
11546 mutex_exit(&pd->pd_mutex);
11547
11548 cmd = fp_alloc_pkt(port, FP_PORT_IDENTIFIER_LEN, 0,
11549 KM_SLEEP, pd);
11550 if (cmd == NULL) {
11551 return;
11552 }
11553
11554 fp_els_acc_init(port, cmd, buf, job);
11555
11556 rval = fp_sendcmd(port, cmd, port->fp_fca_handle);
11557 if (rval != FC_SUCCESS) {
11558 fp_free_pkt(cmd);
11559 return;
11560 }
11561
11562 listptr = kmem_zalloc(sizeof (fc_portmap_t), KM_SLEEP);
11563
11564 if (retain) {
11565 job_request_t *job;
11566 fctl_ns_req_t *ns_cmd;
11567
11568 /*
11569 * when get LOGO, first try to get PID from nameserver
11570 * if failed, then we do not need
11571 * send PLOGI to that remote port
11572 */
11573 job = fctl_alloc_job(
11574 JOB_NS_CMD, 0, NULL, (opaque_t)port, KM_SLEEP);
11575
11576 if (job != NULL) {
11577 ns_cmd = fctl_alloc_ns_cmd(
11578 sizeof (ns_req_gid_pn_t),
11579 sizeof (ns_resp_gid_pn_t),
11580 sizeof (ns_resp_gid_pn_t),
11581 0, KM_SLEEP);
11582 if (ns_cmd != NULL) {
11583 int ret;
11584 job->job_result = FC_SUCCESS;
11585 ns_cmd->ns_cmd_code = NS_GID_PN;
11586 ((ns_req_gid_pn_t *)
11587 (ns_cmd->ns_cmd_buf))->pwwn =
11588 pd->pd_port_name;
11589 ret = fp_ns_query(
11590 port, ns_cmd, job, 1, KM_SLEEP);
11591 if ((ret != FC_SUCCESS) ||
11592 (job->job_result != FC_SUCCESS)) {
11593 fctl_free_ns_cmd(ns_cmd);
11594 fctl_dealloc_job(job);
11595 FP_TRACE(FP_NHEAD2(9, 0),
11596 "NS query failed,",
11597 " delete pd");
11598 goto delete_pd;
11599 }
11600 fctl_free_ns_cmd(ns_cmd);
11601 }
11602 fctl_dealloc_job(job);
11603 }
11604 fp_unregister_login(pd);
11605 fctl_copy_portmap(listptr, pd);
11606 } else {
11607 uint32_t d_id;
11608 char ww_name[17];
11609
11610 delete_pd:
11611 mutex_enter(&pd->pd_mutex);
11612 d_id = pd->pd_port_id.port_id;
11613 fc_wwn_to_str(&pd->pd_port_name, ww_name);
11614 mutex_exit(&pd->pd_mutex);
11615
11616 FP_TRACE(FP_NHEAD2(9, 0),
11617 "N_x Port with D_ID=%x, PWWN=%s logged out"
11618 " %d times in %d us; Giving up", d_id, ww_name,
11619 FC_LOGO_TOLERANCE_LIMIT,
11620 FC_LOGO_TOLERANCE_TIME_LIMIT);
11621
11622 fp_fillout_old_map(listptr, pd, 0);
11623 listptr->map_type = PORT_DEVICE_OLD;
11624 }
11625
11626 (void) fp_ulp_devc_cb(port, listptr, 1, 1, KM_SLEEP, 0);
11627 }
11628 }
11629
11630
11631 /*
11632 * Perform general purpose preparation of a response to an unsolicited request
11633 */
11634 static void
fp_unsol_resp_init(fc_packet_t * pkt,fc_unsol_buf_t * buf,uchar_t r_ctl,uchar_t type)11635 fp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
11636 uchar_t r_ctl, uchar_t type)
11637 {
11638 pkt->pkt_cmd_fhdr.r_ctl = r_ctl;
11639 pkt->pkt_cmd_fhdr.d_id = buf->ub_frame.s_id;
11640 pkt->pkt_cmd_fhdr.s_id = buf->ub_frame.d_id;
11641 pkt->pkt_cmd_fhdr.type = type;
11642 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_LAST_SEQ | F_CTL_XCHG_CONTEXT;
11643 pkt->pkt_cmd_fhdr.seq_id = buf->ub_frame.seq_id;
11644 pkt->pkt_cmd_fhdr.df_ctl = buf->ub_frame.df_ctl;
11645 pkt->pkt_cmd_fhdr.seq_cnt = buf->ub_frame.seq_cnt;
11646 pkt->pkt_cmd_fhdr.ox_id = buf->ub_frame.ox_id;
11647 pkt->pkt_cmd_fhdr.rx_id = buf->ub_frame.rx_id;
11648 pkt->pkt_cmd_fhdr.ro = 0;
11649 pkt->pkt_cmd_fhdr.rsvd = 0;
11650 pkt->pkt_comp = fp_unsol_intr;
11651 pkt->pkt_timeout = FP_ELS_TIMEOUT;
11652 pkt->pkt_ub_resp_token = (opaque_t)buf;
11653 }
11654
11655 /*
11656 * Immediate handling of unsolicited FLOGI and PLOGI requests. In the
11657 * early development days of public loop soc+ firmware, numerous problems
11658 * were encountered (the details are undocumented and history now) which
11659 * led to the birth of this function.
11660 *
11661 * If a pre-allocated unsolicited response packet is free, send out an
11662 * immediate response, otherwise submit the request to the port thread
11663 * to do the deferred processing.
11664 */
11665 static void
fp_i_handle_unsol_els(fc_local_port_t * port,fc_unsol_buf_t * buf)11666 fp_i_handle_unsol_els(fc_local_port_t *port, fc_unsol_buf_t *buf)
11667 {
11668 int sent;
11669 int f_port;
11670 int do_acc;
11671 fp_cmd_t *cmd;
11672 la_els_logi_t *payload;
11673 fc_remote_port_t *pd;
11674 char dww_name[17];
11675
11676 ASSERT(!MUTEX_HELD(&port->fp_mutex));
11677
11678 cmd = port->fp_els_resp_pkt;
11679
11680 mutex_enter(&port->fp_mutex);
11681 do_acc = (port->fp_statec_busy == 0) ? 1 : 0;
11682 mutex_exit(&port->fp_mutex);
11683
11684 switch (buf->ub_buffer[0]) {
11685 case LA_ELS_PLOGI: {
11686 int small;
11687
11688 payload = (la_els_logi_t *)buf->ub_buffer;
11689
11690 f_port = FP_IS_F_PORT(payload->
11691 common_service.cmn_features) ? 1 : 0;
11692
11693 small = fctl_wwn_cmp(&port->fp_service_params.nport_ww_name,
11694 &payload->nport_ww_name);
11695 pd = fctl_get_remote_port_by_pwwn(port,
11696 &payload->nport_ww_name);
11697 if (pd) {
11698 mutex_enter(&pd->pd_mutex);
11699 sent = (pd->pd_flags == PD_ELS_IN_PROGRESS) ? 1 : 0;
11700 /*
11701 * Most likely this means a cross login is in
11702 * progress or a device about to be yanked out.
11703 * Only accept the plogi if my wwn is smaller.
11704 */
11705 if (pd->pd_type == PORT_DEVICE_OLD) {
11706 sent = 1;
11707 }
11708 /*
11709 * Stop plogi request (if any)
11710 * attempt from local side to speedup
11711 * the discovery progress.
11712 * Mark the pd as PD_PLOGI_RECEPIENT.
11713 */
11714 if (f_port == 0 && small < 0) {
11715 pd->pd_recepient = PD_PLOGI_RECEPIENT;
11716 }
11717 fc_wwn_to_str(&pd->pd_port_name, dww_name);
11718
11719 mutex_exit(&pd->pd_mutex);
11720
11721 FP_TRACE(FP_NHEAD1(3, 0), "fp_i_handle_unsol_els: "
11722 "Unsol PLOGI received. PD still exists in the "
11723 "PWWN list. pd=%p PWWN=%s, sent=%x",
11724 pd, dww_name, sent);
11725
11726 if (f_port == 0 && small < 0) {
11727 FP_TRACE(FP_NHEAD1(3, 0),
11728 "fp_i_handle_unsol_els: Mark the pd"
11729 " as plogi recipient, pd=%p, PWWN=%s"
11730 ", sent=%x",
11731 pd, dww_name, sent);
11732 }
11733 } else {
11734 sent = 0;
11735 }
11736
11737 /*
11738 * To avoid Login collisions, accept only if my WWN
11739 * is smaller than the requester (A curious side note
11740 * would be that this rule may not satisfy the PLOGIs
11741 * initiated by the switch from not-so-well known
11742 * ports such as 0xFFFC41)
11743 */
11744 if ((f_port == 0 && small < 0) ||
11745 (((small > 0 && do_acc) ||
11746 FC_MUST_ACCEPT_D_ID(buf->ub_frame.s_id)) && sent == 0)) {
11747 if (fp_is_class_supported(port->fp_cos,
11748 buf->ub_class) == FC_FAILURE) {
11749 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) {
11750 cmd->cmd_pkt.pkt_cmdlen =
11751 sizeof (la_els_rjt_t);
11752 cmd->cmd_pkt.pkt_rsplen = 0;
11753 fp_els_rjt_init(port, cmd, buf,
11754 FC_ACTION_NON_RETRYABLE,
11755 FC_REASON_CLASS_NOT_SUPP, NULL);
11756 FP_TRACE(FP_NHEAD1(3, 0),
11757 "fp_i_handle_unsol_els: "
11758 "Unsupported class. "
11759 "Rejecting PLOGI");
11760
11761 } else {
11762 mutex_enter(&port->fp_mutex);
11763 port->fp_els_resp_pkt_busy = 0;
11764 mutex_exit(&port->fp_mutex);
11765 return;
11766 }
11767 } else {
11768 cmd->cmd_pkt.pkt_cmdlen =
11769 sizeof (la_els_logi_t);
11770 cmd->cmd_pkt.pkt_rsplen = 0;
11771
11772 /*
11773 * If fp_port_id is zero and topology is
11774 * Point-to-Point, get the local port id from
11775 * the d_id in the PLOGI request.
11776 * If the outgoing FLOGI hasn't been accepted,
11777 * the topology will be unknown here. But it's
11778 * still safe to save the d_id to fp_port_id,
11779 * just because it will be overwritten later
11780 * if the topology is not Point-to-Point.
11781 */
11782 mutex_enter(&port->fp_mutex);
11783 if ((port->fp_port_id.port_id == 0) &&
11784 (port->fp_topology == FC_TOP_PT_PT ||
11785 port->fp_topology == FC_TOP_UNKNOWN)) {
11786 port->fp_port_id.port_id =
11787 buf->ub_frame.d_id;
11788 }
11789 mutex_exit(&port->fp_mutex);
11790
11791 /*
11792 * Sometime later, we should validate
11793 * the service parameters instead of
11794 * just accepting it.
11795 */
11796 fp_login_acc_init(port, cmd, buf, NULL,
11797 KM_NOSLEEP);
11798 FP_TRACE(FP_NHEAD1(3, 0),
11799 "fp_i_handle_unsol_els: Accepting PLOGI,"
11800 " f_port=%d, small=%d, do_acc=%d,"
11801 " sent=%d.", f_port, small, do_acc,
11802 sent);
11803 }
11804 } else {
11805 if (FP_IS_CLASS_1_OR_2(buf->ub_class) ||
11806 port->fp_options & FP_SEND_RJT) {
11807 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t);
11808 cmd->cmd_pkt.pkt_rsplen = 0;
11809 fp_els_rjt_init(port, cmd, buf,
11810 FC_ACTION_NON_RETRYABLE,
11811 FC_REASON_LOGICAL_BSY, NULL);
11812 FP_TRACE(FP_NHEAD1(3, 0),
11813 "fp_i_handle_unsol_els: "
11814 "Rejecting PLOGI with Logical Busy."
11815 "Possible Login collision.");
11816 } else {
11817 mutex_enter(&port->fp_mutex);
11818 port->fp_els_resp_pkt_busy = 0;
11819 mutex_exit(&port->fp_mutex);
11820 return;
11821 }
11822 }
11823 break;
11824 }
11825
11826 case LA_ELS_FLOGI:
11827 if (fp_is_class_supported(port->fp_cos,
11828 buf->ub_class) == FC_FAILURE) {
11829 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) {
11830 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t);
11831 cmd->cmd_pkt.pkt_rsplen = 0;
11832 fp_els_rjt_init(port, cmd, buf,
11833 FC_ACTION_NON_RETRYABLE,
11834 FC_REASON_CLASS_NOT_SUPP, NULL);
11835 FP_TRACE(FP_NHEAD1(3, 0),
11836 "fp_i_handle_unsol_els: "
11837 "Unsupported Class. Rejecting FLOGI.");
11838 } else {
11839 mutex_enter(&port->fp_mutex);
11840 port->fp_els_resp_pkt_busy = 0;
11841 mutex_exit(&port->fp_mutex);
11842 return;
11843 }
11844 } else {
11845 mutex_enter(&port->fp_mutex);
11846 if (FC_PORT_STATE_MASK(port->fp_state) !=
11847 FC_STATE_ONLINE || (port->fp_port_id.port_id &&
11848 buf->ub_frame.s_id == port->fp_port_id.port_id)) {
11849 mutex_exit(&port->fp_mutex);
11850 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) {
11851 cmd->cmd_pkt.pkt_cmdlen =
11852 sizeof (la_els_rjt_t);
11853 cmd->cmd_pkt.pkt_rsplen = 0;
11854 fp_els_rjt_init(port, cmd, buf,
11855 FC_ACTION_NON_RETRYABLE,
11856 FC_REASON_INVALID_LINK_CTRL,
11857 NULL);
11858 FP_TRACE(FP_NHEAD1(3, 0),
11859 "fp_i_handle_unsol_els: "
11860 "Invalid Link Ctrl. "
11861 "Rejecting FLOGI.");
11862 } else {
11863 mutex_enter(&port->fp_mutex);
11864 port->fp_els_resp_pkt_busy = 0;
11865 mutex_exit(&port->fp_mutex);
11866 return;
11867 }
11868 } else {
11869 mutex_exit(&port->fp_mutex);
11870 cmd->cmd_pkt.pkt_cmdlen =
11871 sizeof (la_els_logi_t);
11872 cmd->cmd_pkt.pkt_rsplen = 0;
11873 /*
11874 * Let's not aggressively validate the N_Port's
11875 * service parameters until PLOGI. Suffice it
11876 * to give a hint that we are an N_Port and we
11877 * are game to some serious stuff here.
11878 */
11879 fp_login_acc_init(port, cmd, buf,
11880 NULL, KM_NOSLEEP);
11881 FP_TRACE(FP_NHEAD1(3, 0),
11882 "fp_i_handle_unsol_els: "
11883 "Accepting FLOGI.");
11884 }
11885 }
11886 break;
11887
11888 default:
11889 return;
11890 }
11891
11892 if ((fp_sendcmd(port, cmd, port->fp_fca_handle)) != FC_SUCCESS) {
11893 mutex_enter(&port->fp_mutex);
11894 port->fp_els_resp_pkt_busy = 0;
11895 mutex_exit(&port->fp_mutex);
11896 }
11897 }
11898
11899
11900 /*
11901 * Handle unsolicited PLOGI request
11902 */
11903 static void
fp_handle_unsol_plogi(fc_local_port_t * port,fc_unsol_buf_t * buf,job_request_t * job,int sleep)11904 fp_handle_unsol_plogi(fc_local_port_t *port, fc_unsol_buf_t *buf,
11905 job_request_t *job, int sleep)
11906 {
11907 int sent;
11908 int small;
11909 int f_port;
11910 int do_acc;
11911 fp_cmd_t *cmd;
11912 la_wwn_t *swwn;
11913 la_wwn_t *dwwn;
11914 la_els_logi_t *payload;
11915 fc_remote_port_t *pd;
11916 char dww_name[17];
11917
11918 payload = (la_els_logi_t *)buf->ub_buffer;
11919 f_port = FP_IS_F_PORT(payload->common_service.cmn_features) ? 1 : 0;
11920
11921 mutex_enter(&port->fp_mutex);
11922 do_acc = (port->fp_statec_busy == 0) ? 1 : 0;
11923 mutex_exit(&port->fp_mutex);
11924
11925 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: s_id=%x, d_id=%x,"
11926 "type=%x, f_ctl=%x"
11927 " seq_id=%x, ox_id=%x, rx_id=%x"
11928 " ro=%x", buf->ub_frame.s_id, buf->ub_frame.d_id,
11929 buf->ub_frame.type, buf->ub_frame.f_ctl, buf->ub_frame.seq_id,
11930 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro);
11931
11932 swwn = &port->fp_service_params.nport_ww_name;
11933 dwwn = &payload->nport_ww_name;
11934 small = fctl_wwn_cmp(swwn, dwwn);
11935 pd = fctl_get_remote_port_by_pwwn(port, dwwn);
11936 if (pd) {
11937 mutex_enter(&pd->pd_mutex);
11938 sent = (pd->pd_flags == PD_ELS_IN_PROGRESS) ? 1 : 0;
11939 /*
11940 * Most likely this means a cross login is in
11941 * progress or a device about to be yanked out.
11942 * Only accept the plogi if my wwn is smaller.
11943 */
11944
11945 if (pd->pd_type == PORT_DEVICE_OLD) {
11946 sent = 1;
11947 }
11948 /*
11949 * Stop plogi request (if any)
11950 * attempt from local side to speedup
11951 * the discovery progress.
11952 * Mark the pd as PD_PLOGI_RECEPIENT.
11953 */
11954 if (f_port == 0 && small < 0) {
11955 pd->pd_recepient = PD_PLOGI_RECEPIENT;
11956 }
11957 fc_wwn_to_str(&pd->pd_port_name, dww_name);
11958
11959 mutex_exit(&pd->pd_mutex);
11960
11961 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: Unsol PLOGI"
11962 " received. PD still exists in the PWWN list. pd=%p "
11963 "PWWN=%s, sent=%x", pd, dww_name, sent);
11964
11965 if (f_port == 0 && small < 0) {
11966 FP_TRACE(FP_NHEAD1(3, 0),
11967 "fp_handle_unsol_plogi: Mark the pd"
11968 " as plogi recipient, pd=%p, PWWN=%s"
11969 ", sent=%x",
11970 pd, dww_name, sent);
11971 }
11972 } else {
11973 sent = 0;
11974 }
11975
11976 /*
11977 * Avoid Login collisions by accepting only if my WWN is smaller.
11978 *
11979 * A side note: There is no need to start a PLOGI from this end in
11980 * this context if login isn't going to be accepted for the
11981 * above reason as either a LIP (in private loop), RSCN (in
11982 * fabric topology), or an FLOGI (in point to point - Huh ?
11983 * check FC-PH) would normally drive the PLOGI from this end.
11984 * At this point of time there is no need for an inbound PLOGI
11985 * to kick an outbound PLOGI when it is going to be rejected
11986 * for the reason of WWN being smaller. However it isn't hard
11987 * to do that either (when such a need arises, start a timer
11988 * for a duration that extends beyond a normal device discovery
11989 * time and check if an outbound PLOGI did go before that, if
11990 * none fire one)
11991 *
11992 * Unfortunately, as it turned out, during booting, it is possible
11993 * to miss another initiator in the same loop as port driver
11994 * instances are serially attached. While preserving the above
11995 * comments for belly laughs, please kick an outbound PLOGI in
11996 * a non-switch environment (which is a pt pt between N_Ports or
11997 * a private loop)
11998 *
11999 * While preserving the above comments for amusement, send an
12000 * ACC if the PLOGI is going to be rejected for WWN being smaller
12001 * when no discovery is in progress at this end. Turn around
12002 * and make the port device as the PLOGI initiator, so that
12003 * during subsequent link/loop initialization, this end drives
12004 * the PLOGI (In fact both ends do in this particular case, but
12005 * only one wins)
12006 *
12007 * Make sure the PLOGIs initiated by the switch from not-so-well-known
12008 * ports (such as 0xFFFC41) are accepted too.
12009 */
12010 if ((f_port == 0 && small < 0) || (((small > 0 && do_acc) ||
12011 FC_MUST_ACCEPT_D_ID(buf->ub_frame.s_id)) && sent == 0)) {
12012 if (fp_is_class_supported(port->fp_cos,
12013 buf->ub_class) == FC_FAILURE) {
12014 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) {
12015 cmd = fp_alloc_pkt(port,
12016 sizeof (la_els_logi_t), 0, sleep, pd);
12017 if (cmd == NULL) {
12018 return;
12019 }
12020 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t);
12021 cmd->cmd_pkt.pkt_rsplen = 0;
12022 fp_els_rjt_init(port, cmd, buf,
12023 FC_ACTION_NON_RETRYABLE,
12024 FC_REASON_CLASS_NOT_SUPP, job);
12025 FP_TRACE(FP_NHEAD1(3, 0),
12026 "fp_handle_unsol_plogi: "
12027 "Unsupported class. rejecting PLOGI");
12028 }
12029 } else {
12030 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t),
12031 0, sleep, pd);
12032 if (cmd == NULL) {
12033 return;
12034 }
12035 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_logi_t);
12036 cmd->cmd_pkt.pkt_rsplen = 0;
12037
12038 /*
12039 * Sometime later, we should validate the service
12040 * parameters instead of just accepting it.
12041 */
12042 fp_login_acc_init(port, cmd, buf, job, KM_SLEEP);
12043 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: "
12044 "Accepting PLOGI, f_port=%d, small=%d, "
12045 "do_acc=%d, sent=%d.", f_port, small, do_acc,
12046 sent);
12047
12048 /*
12049 * If fp_port_id is zero and topology is
12050 * Point-to-Point, get the local port id from
12051 * the d_id in the PLOGI request.
12052 * If the outgoing FLOGI hasn't been accepted,
12053 * the topology will be unknown here. But it's
12054 * still safe to save the d_id to fp_port_id,
12055 * just because it will be overwritten later
12056 * if the topology is not Point-to-Point.
12057 */
12058 mutex_enter(&port->fp_mutex);
12059 if ((port->fp_port_id.port_id == 0) &&
12060 (port->fp_topology == FC_TOP_PT_PT ||
12061 port->fp_topology == FC_TOP_UNKNOWN)) {
12062 port->fp_port_id.port_id =
12063 buf->ub_frame.d_id;
12064 }
12065 mutex_exit(&port->fp_mutex);
12066 }
12067 } else {
12068 if (FP_IS_CLASS_1_OR_2(buf->ub_class) ||
12069 port->fp_options & FP_SEND_RJT) {
12070 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t),
12071 0, sleep, pd);
12072 if (cmd == NULL) {
12073 return;
12074 }
12075 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t);
12076 cmd->cmd_pkt.pkt_rsplen = 0;
12077 /*
12078 * Send out Logical busy to indicate
12079 * the detection of PLOGI collision
12080 */
12081 fp_els_rjt_init(port, cmd, buf,
12082 FC_ACTION_NON_RETRYABLE,
12083 FC_REASON_LOGICAL_BSY, job);
12084
12085 fc_wwn_to_str(dwwn, dww_name);
12086 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: "
12087 "Rejecting Unsol PLOGI with Logical Busy."
12088 "possible PLOGI collision. PWWN=%s, sent=%x",
12089 dww_name, sent);
12090 } else {
12091 return;
12092 }
12093 }
12094
12095 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) {
12096 fp_free_pkt(cmd);
12097 }
12098 }
12099
12100
12101 /*
12102 * Handle mischievous turning over of our own FLOGI requests back to
12103 * us by the SOC+ microcode. In other words, look at the class of such
12104 * bone headed requests, if 1 or 2, bluntly P_RJT them, if 3 drop them
12105 * on the floor
12106 */
12107 static void
fp_handle_unsol_flogi(fc_local_port_t * port,fc_unsol_buf_t * buf,job_request_t * job,int sleep)12108 fp_handle_unsol_flogi(fc_local_port_t *port, fc_unsol_buf_t *buf,
12109 job_request_t *job, int sleep)
12110 {
12111 uint32_t state;
12112 uint32_t s_id;
12113 fp_cmd_t *cmd;
12114
12115 if (fp_is_class_supported(port->fp_cos, buf->ub_class) == FC_FAILURE) {
12116 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) {
12117 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t),
12118 0, sleep, NULL);
12119 if (cmd == NULL) {
12120 return;
12121 }
12122 fp_els_rjt_init(port, cmd, buf,
12123 FC_ACTION_NON_RETRYABLE,
12124 FC_REASON_CLASS_NOT_SUPP, job);
12125 } else {
12126 return;
12127 }
12128 } else {
12129
12130 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_flogi:"
12131 " s_id=%x, d_id=%x, type=%x, f_ctl=%x"
12132 " seq_id=%x, ox_id=%x, rx_id=%x, ro=%x",
12133 buf->ub_frame.s_id, buf->ub_frame.d_id,
12134 buf->ub_frame.type, buf->ub_frame.f_ctl,
12135 buf->ub_frame.seq_id, buf->ub_frame.ox_id,
12136 buf->ub_frame.rx_id, buf->ub_frame.ro);
12137
12138 mutex_enter(&port->fp_mutex);
12139 state = FC_PORT_STATE_MASK(port->fp_state);
12140 s_id = port->fp_port_id.port_id;
12141 mutex_exit(&port->fp_mutex);
12142
12143 if (state != FC_STATE_ONLINE ||
12144 (s_id && buf->ub_frame.s_id == s_id)) {
12145 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) {
12146 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t),
12147 0, sleep, NULL);
12148 if (cmd == NULL) {
12149 return;
12150 }
12151 fp_els_rjt_init(port, cmd, buf,
12152 FC_ACTION_NON_RETRYABLE,
12153 FC_REASON_INVALID_LINK_CTRL, job);
12154 FP_TRACE(FP_NHEAD1(3, 0),
12155 "fp_handle_unsol_flogi: "
12156 "Rejecting PLOGI. Invalid Link CTRL");
12157 } else {
12158 return;
12159 }
12160 } else {
12161 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t),
12162 0, sleep, NULL);
12163 if (cmd == NULL) {
12164 return;
12165 }
12166 /*
12167 * Let's not aggressively validate the N_Port's
12168 * service parameters until PLOGI. Suffice it
12169 * to give a hint that we are an N_Port and we
12170 * are game to some serious stuff here.
12171 */
12172 fp_login_acc_init(port, cmd, buf, job, KM_SLEEP);
12173 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_flogi: "
12174 "Accepting PLOGI");
12175 }
12176 }
12177
12178 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) {
12179 fp_free_pkt(cmd);
12180 }
12181 }
12182
12183
12184 /*
12185 * Perform PLOGI accept
12186 */
12187 static void
fp_login_acc_init(fc_local_port_t * port,fp_cmd_t * cmd,fc_unsol_buf_t * buf,job_request_t * job,int sleep)12188 fp_login_acc_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf,
12189 job_request_t *job, int sleep)
12190 {
12191 fc_packet_t *pkt;
12192 fc_portmap_t *listptr;
12193 la_els_logi_t payload;
12194
12195 ASSERT(buf != NULL);
12196
12197 /*
12198 * If we are sending ACC to PLOGI and we haven't already
12199 * create port and node device handles, let's create them
12200 * here.
12201 */
12202 if (buf->ub_buffer[0] == LA_ELS_PLOGI &&
12203 FC_IS_REAL_DEVICE(buf->ub_frame.s_id)) {
12204 int small;
12205 int do_acc;
12206 fc_remote_port_t *pd;
12207 la_els_logi_t *req;
12208
12209 req = (la_els_logi_t *)buf->ub_buffer;
12210 small = fctl_wwn_cmp(&port->fp_service_params.nport_ww_name,
12211 &req->nport_ww_name);
12212
12213 mutex_enter(&port->fp_mutex);
12214 do_acc = (port->fp_statec_busy == 0) ? 1 : 0;
12215 mutex_exit(&port->fp_mutex);
12216
12217 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_acc_init fp %x, pd %x",
12218 port->fp_port_id.port_id, buf->ub_frame.s_id);
12219 pd = fctl_create_remote_port(port, &req->node_ww_name,
12220 &req->nport_ww_name, buf->ub_frame.s_id,
12221 PD_PLOGI_RECEPIENT, sleep);
12222 if (pd == NULL) {
12223 FP_TRACE(FP_NHEAD1(3, 0), "login_acc_init: "
12224 "Couldn't create port device for d_id:0x%x",
12225 buf->ub_frame.s_id);
12226
12227 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
12228 "couldn't create port device d_id=%x",
12229 buf->ub_frame.s_id);
12230 } else {
12231 /*
12232 * usoc currently returns PLOGIs inline and
12233 * the maximum buffer size is 60 bytes or so.
12234 * So attempt not to look beyond what is in
12235 * the unsolicited buffer
12236 *
12237 * JNI also traverses this path sometimes
12238 */
12239 if (buf->ub_bufsize >= sizeof (la_els_logi_t)) {
12240 fp_register_login(NULL, pd, req, buf->ub_class);
12241 } else {
12242 mutex_enter(&pd->pd_mutex);
12243 if (pd->pd_login_count == 0) {
12244 pd->pd_login_count++;
12245 }
12246 pd->pd_state = PORT_DEVICE_LOGGED_IN;
12247 pd->pd_login_class = buf->ub_class;
12248 mutex_exit(&pd->pd_mutex);
12249 }
12250
12251 listptr = kmem_zalloc(sizeof (fc_portmap_t), sleep);
12252 if (listptr != NULL) {
12253 fctl_copy_portmap(listptr, pd);
12254 (void) fp_ulp_devc_cb(port, listptr,
12255 1, 1, sleep, 0);
12256 }
12257
12258 if (small > 0 && do_acc) {
12259 mutex_enter(&pd->pd_mutex);
12260 pd->pd_recepient = PD_PLOGI_INITIATOR;
12261 mutex_exit(&pd->pd_mutex);
12262 }
12263 }
12264 }
12265
12266 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class;
12267 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND;
12268 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED;
12269 cmd->cmd_retry_count = 1;
12270 cmd->cmd_ulp_pkt = NULL;
12271
12272 cmd->cmd_transport = port->fp_fca_tran->fca_els_send;
12273 cmd->cmd_job = job;
12274
12275 pkt = &cmd->cmd_pkt;
12276
12277 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS);
12278
12279 payload = port->fp_service_params;
12280 payload.ls_code.ls_code = LA_ELS_ACC;
12281
12282 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload,
12283 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR);
12284
12285 FP_TRACE(FP_NHEAD1(3, 0), "login_acc_init: ELS:0x%x d_id:0x%x "
12286 "bufsize:0x%x sizeof (la_els_logi):0x%x "
12287 "port's wwn:0x%01x%03x%04x%08x requestor's wwn:0x%01x%03x%04x%08x "
12288 "statec_busy:0x%x", buf->ub_buffer[0], buf->ub_frame.s_id,
12289 buf->ub_bufsize, sizeof (la_els_logi_t),
12290 port->fp_service_params.nport_ww_name.w.naa_id,
12291 port->fp_service_params.nport_ww_name.w.nport_id,
12292 port->fp_service_params.nport_ww_name.w.wwn_hi,
12293 port->fp_service_params.nport_ww_name.w.wwn_lo,
12294 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.naa_id,
12295 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.nport_id,
12296 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.wwn_hi,
12297 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.wwn_lo,
12298 port->fp_statec_busy);
12299 }
12300
12301
12302 #define RSCN_EVENT_NAME_LEN 256
12303
12304 /*
12305 * Handle RSCNs
12306 */
12307 static void
fp_handle_unsol_rscn(fc_local_port_t * port,fc_unsol_buf_t * buf,job_request_t * job,int sleep)12308 fp_handle_unsol_rscn(fc_local_port_t *port, fc_unsol_buf_t *buf,
12309 job_request_t *job, int sleep)
12310 {
12311 uint32_t mask;
12312 fp_cmd_t *cmd;
12313 uint32_t count;
12314 int listindex;
12315 int16_t len;
12316 fc_rscn_t *payload;
12317 fc_portmap_t *listptr;
12318 fctl_ns_req_t *ns_cmd;
12319 fc_affected_id_t *page;
12320 caddr_t nvname;
12321 nvlist_t *attr_list = NULL;
12322
12323 mutex_enter(&port->fp_mutex);
12324 if (!FC_IS_TOP_SWITCH(port->fp_topology)) {
12325 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) {
12326 --port->fp_rscn_count;
12327 }
12328 mutex_exit(&port->fp_mutex);
12329 return;
12330 }
12331 mutex_exit(&port->fp_mutex);
12332
12333 cmd = fp_alloc_pkt(port, FP_PORT_IDENTIFIER_LEN, 0, sleep, NULL);
12334 if (cmd != NULL) {
12335 fp_els_acc_init(port, cmd, buf, job);
12336 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) {
12337 fp_free_pkt(cmd);
12338 }
12339 }
12340
12341 payload = (fc_rscn_t *)buf->ub_buffer;
12342 ASSERT(payload->rscn_code == LA_ELS_RSCN);
12343 ASSERT(payload->rscn_len == FP_PORT_IDENTIFIER_LEN);
12344
12345 len = payload->rscn_payload_len - FP_PORT_IDENTIFIER_LEN;
12346
12347 if (len <= 0) {
12348 mutex_enter(&port->fp_mutex);
12349 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) {
12350 --port->fp_rscn_count;
12351 }
12352 mutex_exit(&port->fp_mutex);
12353
12354 return;
12355 }
12356
12357 ASSERT((len & 0x3) == 0); /* Must be power of 4 */
12358 count = (len >> 2) << 1; /* number of pages multiplied by 2 */
12359
12360 listptr = kmem_zalloc(sizeof (fc_portmap_t) * count, sleep);
12361 page = (fc_affected_id_t *)(buf->ub_buffer + sizeof (fc_rscn_t));
12362
12363 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0);
12364
12365 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gpn_id_t),
12366 sizeof (ns_resp_gpn_id_t), sizeof (ns_resp_gpn_id_t),
12367 0, sleep);
12368 if (ns_cmd == NULL) {
12369 kmem_free(listptr, sizeof (fc_portmap_t) * count);
12370
12371 mutex_enter(&port->fp_mutex);
12372 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) {
12373 --port->fp_rscn_count;
12374 }
12375 mutex_exit(&port->fp_mutex);
12376
12377 return;
12378 }
12379
12380 ns_cmd->ns_cmd_code = NS_GPN_ID;
12381
12382 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_rscn: s_id=%x, d_id=%x,"
12383 "type=%x, f_ctl=%x seq_id=%x, ox_id=%x, rx_id=%x"
12384 " ro=%x", buf->ub_frame.s_id, buf->ub_frame.d_id,
12385 buf->ub_frame.type, buf->ub_frame.f_ctl, buf->ub_frame.seq_id,
12386 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro);
12387
12388 /* Only proceed if we can allocate nvname and the nvlist */
12389 if ((nvname = kmem_zalloc(RSCN_EVENT_NAME_LEN, KM_NOSLEEP)) != NULL &&
12390 nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE,
12391 KM_NOSLEEP) == DDI_SUCCESS) {
12392 if (!(attr_list && nvlist_add_uint32(attr_list, "instance",
12393 port->fp_instance) == DDI_SUCCESS &&
12394 nvlist_add_byte_array(attr_list, "port-wwn",
12395 port->fp_service_params.nport_ww_name.raw_wwn,
12396 sizeof (la_wwn_t)) == DDI_SUCCESS)) {
12397 nvlist_free(attr_list);
12398 attr_list = NULL;
12399 }
12400 }
12401
12402 for (listindex = 0; len; len -= FP_PORT_IDENTIFIER_LEN, page++) {
12403 /* Add affected page to the event payload */
12404 if (attr_list != NULL) {
12405 (void) snprintf(nvname, RSCN_EVENT_NAME_LEN,
12406 "affected_page_%d", listindex);
12407 if (attr_list && nvlist_add_uint32(attr_list, nvname,
12408 ntohl(*(uint32_t *)page)) != DDI_SUCCESS) {
12409 /* We don't send a partial event, so dump it */
12410 nvlist_free(attr_list);
12411 attr_list = NULL;
12412 }
12413 }
12414 /*
12415 * Query the NS to get the Port WWN for this
12416 * affected D_ID.
12417 */
12418 mask = 0;
12419 switch (page->aff_format & FC_RSCN_ADDRESS_MASK) {
12420 case FC_RSCN_PORT_ADDRESS:
12421 fp_validate_rscn_page(port, page, job, ns_cmd,
12422 listptr, &listindex, sleep);
12423
12424 if (listindex == 0) {
12425 /*
12426 * We essentially did not process this RSCN. So,
12427 * ULPs are not going to be called and so we
12428 * decrement the rscn_count
12429 */
12430 mutex_enter(&port->fp_mutex);
12431 if (--port->fp_rscn_count ==
12432 FC_INVALID_RSCN_COUNT) {
12433 --port->fp_rscn_count;
12434 }
12435 mutex_exit(&port->fp_mutex);
12436 }
12437 break;
12438
12439 case FC_RSCN_AREA_ADDRESS:
12440 mask = 0xFFFF00;
12441 /* FALLTHROUGH */
12442
12443 case FC_RSCN_DOMAIN_ADDRESS:
12444 if (!mask) {
12445 mask = 0xFF0000;
12446 }
12447 fp_validate_area_domain(port, page->aff_d_id, mask,
12448 job, sleep);
12449 break;
12450
12451 case FC_RSCN_FABRIC_ADDRESS:
12452 /*
12453 * We need to discover all the devices on this
12454 * port.
12455 */
12456 fp_validate_area_domain(port, 0, 0, job, sleep);
12457 break;
12458
12459 default:
12460 break;
12461 }
12462 }
12463 if (attr_list != NULL) {
12464 (void) ddi_log_sysevent(port->fp_port_dip, DDI_VENDOR_SUNW,
12465 EC_SUNFC, ESC_SUNFC_PORT_RSCN, attr_list,
12466 NULL, DDI_SLEEP);
12467 nvlist_free(attr_list);
12468 } else {
12469 FP_TRACE(FP_NHEAD1(9, 0),
12470 "RSCN handled, but event not sent to userland");
12471 }
12472 if (nvname != NULL) {
12473 kmem_free(nvname, RSCN_EVENT_NAME_LEN);
12474 }
12475
12476 if (ns_cmd) {
12477 fctl_free_ns_cmd(ns_cmd);
12478 }
12479
12480 if (listindex) {
12481 #ifdef DEBUG
12482 page = (fc_affected_id_t *)(buf->ub_buffer +
12483 sizeof (fc_rscn_t));
12484
12485 if (listptr->map_did.port_id != page->aff_d_id) {
12486 FP_TRACE(FP_NHEAD1(9, 0),
12487 "PORT RSCN: processed=%x, reporting=%x",
12488 listptr->map_did.port_id, page->aff_d_id);
12489 }
12490 #endif
12491
12492 (void) fp_ulp_devc_cb(port, listptr, listindex, count,
12493 sleep, 0);
12494 } else {
12495 kmem_free(listptr, sizeof (fc_portmap_t) * count);
12496 }
12497 }
12498
12499
12500 /*
12501 * Fill out old map for ULPs with fp_mutex, fd_mutex and pd_mutex held
12502 */
12503 static void
fp_fillout_old_map_held(fc_portmap_t * map,fc_remote_port_t * pd,uchar_t flag)12504 fp_fillout_old_map_held(fc_portmap_t *map, fc_remote_port_t *pd, uchar_t flag)
12505 {
12506 int is_switch;
12507 int initiator;
12508 fc_local_port_t *port;
12509
12510 port = pd->pd_port;
12511
12512 /* This function has the following bunch of assumptions */
12513 ASSERT(port != NULL);
12514 ASSERT(MUTEX_HELD(&port->fp_mutex));
12515 ASSERT(MUTEX_HELD(&pd->pd_remote_nodep->fd_mutex));
12516 ASSERT(MUTEX_HELD(&pd->pd_mutex));
12517
12518 pd->pd_state = PORT_DEVICE_INVALID;
12519 pd->pd_type = PORT_DEVICE_OLD;
12520 initiator = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0;
12521 is_switch = FC_IS_TOP_SWITCH(port->fp_topology);
12522
12523 fctl_delist_did_table(port, pd);
12524 fctl_delist_pwwn_table(port, pd);
12525
12526 FP_TRACE(FP_NHEAD1(6, 0), "fp_fillout_old_map_held: port=%p, d_id=%x"
12527 " removed the PD=%p from DID and PWWN tables",
12528 port, pd->pd_port_id.port_id, pd);
12529
12530 if ((!flag) && port && initiator && is_switch) {
12531 (void) fctl_add_orphan_held(port, pd);
12532 }
12533 fctl_copy_portmap_held(map, pd);
12534 map->map_pd = pd;
12535 }
12536
12537 /*
12538 * Fill out old map for ULPs
12539 */
12540 static void
fp_fillout_old_map(fc_portmap_t * map,fc_remote_port_t * pd,uchar_t flag)12541 fp_fillout_old_map(fc_portmap_t *map, fc_remote_port_t *pd, uchar_t flag)
12542 {
12543 int is_switch;
12544 int initiator;
12545 fc_local_port_t *port;
12546
12547 mutex_enter(&pd->pd_mutex);
12548 port = pd->pd_port;
12549 mutex_exit(&pd->pd_mutex);
12550
12551 mutex_enter(&port->fp_mutex);
12552 mutex_enter(&pd->pd_mutex);
12553
12554 pd->pd_state = PORT_DEVICE_INVALID;
12555 pd->pd_type = PORT_DEVICE_OLD;
12556 initiator = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0;
12557 is_switch = FC_IS_TOP_SWITCH(port->fp_topology);
12558
12559 fctl_delist_did_table(port, pd);
12560 fctl_delist_pwwn_table(port, pd);
12561
12562 FP_TRACE(FP_NHEAD1(6, 0), "fp_fillout_old_map: port=%p, d_id=%x"
12563 " removed the PD=%p from DID and PWWN tables",
12564 port, pd->pd_port_id.port_id, pd);
12565
12566 mutex_exit(&pd->pd_mutex);
12567 mutex_exit(&port->fp_mutex);
12568
12569 ASSERT(port != NULL);
12570 if ((!flag) && port && initiator && is_switch) {
12571 (void) fctl_add_orphan(port, pd, KM_NOSLEEP);
12572 }
12573 fctl_copy_portmap(map, pd);
12574 map->map_pd = pd;
12575 }
12576
12577
12578 /*
12579 * Fillout Changed Map for ULPs
12580 */
12581 static void
fp_fillout_changed_map(fc_portmap_t * map,fc_remote_port_t * pd,uint32_t * new_did,la_wwn_t * new_pwwn)12582 fp_fillout_changed_map(fc_portmap_t *map, fc_remote_port_t *pd,
12583 uint32_t *new_did, la_wwn_t *new_pwwn)
12584 {
12585 ASSERT(MUTEX_HELD(&pd->pd_mutex));
12586
12587 pd->pd_type = PORT_DEVICE_CHANGED;
12588 if (new_did) {
12589 pd->pd_port_id.port_id = *new_did;
12590 }
12591 if (new_pwwn) {
12592 pd->pd_port_name = *new_pwwn;
12593 }
12594 mutex_exit(&pd->pd_mutex);
12595
12596 fctl_copy_portmap(map, pd);
12597
12598 mutex_enter(&pd->pd_mutex);
12599 pd->pd_type = PORT_DEVICE_NOCHANGE;
12600 }
12601
12602
12603 /*
12604 * Fillout New Name Server map
12605 */
12606 static void
fp_fillout_new_nsmap(fc_local_port_t * port,ddi_acc_handle_t * handle,fc_portmap_t * port_map,ns_resp_gan_t * gan_resp,uint32_t d_id)12607 fp_fillout_new_nsmap(fc_local_port_t *port, ddi_acc_handle_t *handle,
12608 fc_portmap_t *port_map, ns_resp_gan_t *gan_resp, uint32_t d_id)
12609 {
12610 ASSERT(!MUTEX_HELD(&port->fp_mutex));
12611
12612 if (handle) {
12613 FC_GET_RSP(port, *handle, (uint8_t *)&port_map->map_pwwn,
12614 (uint8_t *)&gan_resp->gan_pwwn, sizeof (gan_resp->gan_pwwn),
12615 DDI_DEV_AUTOINCR);
12616 FC_GET_RSP(port, *handle, (uint8_t *)&port_map->map_nwwn,
12617 (uint8_t *)&gan_resp->gan_nwwn, sizeof (gan_resp->gan_nwwn),
12618 DDI_DEV_AUTOINCR);
12619 FC_GET_RSP(port, *handle, (uint8_t *)port_map->map_fc4_types,
12620 (uint8_t *)gan_resp->gan_fc4types,
12621 sizeof (gan_resp->gan_fc4types), DDI_DEV_AUTOINCR);
12622 } else {
12623 bcopy(&gan_resp->gan_pwwn, &port_map->map_pwwn,
12624 sizeof (gan_resp->gan_pwwn));
12625 bcopy(&gan_resp->gan_nwwn, &port_map->map_nwwn,
12626 sizeof (gan_resp->gan_nwwn));
12627 bcopy(gan_resp->gan_fc4types, port_map->map_fc4_types,
12628 sizeof (gan_resp->gan_fc4types));
12629 }
12630 port_map->map_did.port_id = d_id;
12631 port_map->map_did.priv_lilp_posit = 0;
12632 port_map->map_hard_addr.hard_addr = 0;
12633 port_map->map_hard_addr.rsvd = 0;
12634 port_map->map_state = PORT_DEVICE_INVALID;
12635 port_map->map_type = PORT_DEVICE_NEW;
12636 port_map->map_flags = 0;
12637 port_map->map_pd = NULL;
12638
12639 (void) fctl_remove_if_orphan(port, &port_map->map_pwwn);
12640
12641 ASSERT(port != NULL);
12642 }
12643
12644
12645 /*
12646 * Perform LINIT ELS
12647 */
12648 static int
fp_remote_lip(fc_local_port_t * port,la_wwn_t * pwwn,int sleep,job_request_t * job)12649 fp_remote_lip(fc_local_port_t *port, la_wwn_t *pwwn, int sleep,
12650 job_request_t *job)
12651 {
12652 int rval;
12653 uint32_t d_id;
12654 uint32_t s_id;
12655 uint32_t lfa;
12656 uchar_t class;
12657 uint32_t ret;
12658 fp_cmd_t *cmd;
12659 fc_porttype_t ptype;
12660 fc_packet_t *pkt;
12661 fc_linit_req_t payload;
12662 fc_remote_port_t *pd;
12663
12664 rval = 0;
12665
12666 ASSERT(job != NULL);
12667 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0);
12668
12669 pd = fctl_get_remote_port_by_pwwn(port, pwwn);
12670 if (pd == NULL) {
12671 fctl_ns_req_t *ns_cmd;
12672
12673 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t),
12674 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t),
12675 0, sleep);
12676
12677 if (ns_cmd == NULL) {
12678 return (FC_NOMEM);
12679 }
12680 job->job_result = FC_SUCCESS;
12681 ns_cmd->ns_cmd_code = NS_GID_PN;
12682 ((ns_req_gid_pn_t *)(ns_cmd->ns_cmd_buf))->pwwn = *pwwn;
12683
12684 ret = fp_ns_query(port, ns_cmd, job, 1, sleep);
12685 if (ret != FC_SUCCESS || job->job_result != FC_SUCCESS) {
12686 fctl_free_ns_cmd(ns_cmd);
12687 return (FC_FAILURE);
12688 }
12689 bcopy(ns_cmd->ns_data_buf, (caddr_t)&d_id, sizeof (d_id));
12690 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf));
12691
12692 fctl_free_ns_cmd(ns_cmd);
12693 lfa = d_id & 0xFFFF00;
12694
12695 /*
12696 * Given this D_ID, get the port type to see if
12697 * we can do LINIT on the LFA
12698 */
12699 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gpt_id_t),
12700 sizeof (ns_resp_gpt_id_t), sizeof (ns_resp_gpt_id_t),
12701 0, sleep);
12702
12703 if (ns_cmd == NULL) {
12704 return (FC_NOMEM);
12705 }
12706
12707 job->job_result = FC_SUCCESS;
12708 ns_cmd->ns_cmd_code = NS_GPT_ID;
12709
12710 ((ns_req_gpt_id_t *)(ns_cmd->ns_cmd_buf))->pid.port_id = d_id;
12711 ((ns_req_gpt_id_t *)
12712 (ns_cmd->ns_cmd_buf))->pid.priv_lilp_posit = 0;
12713
12714 ret = fp_ns_query(port, ns_cmd, job, 1, sleep);
12715 if (ret != FC_SUCCESS || job->job_result != FC_SUCCESS) {
12716 fctl_free_ns_cmd(ns_cmd);
12717 return (FC_FAILURE);
12718 }
12719 bcopy(ns_cmd->ns_data_buf, (caddr_t)&ptype, sizeof (ptype));
12720
12721 fctl_free_ns_cmd(ns_cmd);
12722
12723 switch (ptype.port_type) {
12724 case FC_NS_PORT_NL:
12725 case FC_NS_PORT_F_NL:
12726 case FC_NS_PORT_FL:
12727 break;
12728
12729 default:
12730 return (FC_FAILURE);
12731 }
12732 } else {
12733 mutex_enter(&pd->pd_mutex);
12734 ptype = pd->pd_porttype;
12735
12736 switch (pd->pd_porttype.port_type) {
12737 case FC_NS_PORT_NL:
12738 case FC_NS_PORT_F_NL:
12739 case FC_NS_PORT_FL:
12740 lfa = pd->pd_port_id.port_id & 0xFFFF00;
12741 break;
12742
12743 default:
12744 mutex_exit(&pd->pd_mutex);
12745 return (FC_FAILURE);
12746 }
12747 mutex_exit(&pd->pd_mutex);
12748 }
12749
12750 mutex_enter(&port->fp_mutex);
12751 s_id = port->fp_port_id.port_id;
12752 class = port->fp_ns_login_class;
12753 mutex_exit(&port->fp_mutex);
12754
12755 cmd = fp_alloc_pkt(port, sizeof (fc_linit_req_t),
12756 sizeof (fc_linit_resp_t), sleep, pd);
12757 if (cmd == NULL) {
12758 return (FC_NOMEM);
12759 }
12760
12761 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class;
12762 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE;
12763 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED;
12764 cmd->cmd_retry_count = fp_retry_count;
12765 cmd->cmd_ulp_pkt = NULL;
12766
12767 pkt = &cmd->cmd_pkt;
12768 cmd->cmd_transport = port->fp_fca_tran->fca_els_send;
12769
12770 fp_els_init(cmd, s_id, lfa, fp_linit_intr, job);
12771
12772 /*
12773 * How does LIP work by the way ?
12774 * If the L_Port receives three consecutive identical ordered
12775 * sets whose first two characters (fully decoded) are equal to
12776 * the values shown in Table 3 of FC-AL-2 then the L_Port shall
12777 * recognize a Loop Initialization Primitive sequence. The
12778 * character 3 determines the type of lip:
12779 * LIP(F7) Normal LIP
12780 * LIP(F8) Loop Failure LIP
12781 *
12782 * The possible combination for the 3rd and 4th bytes are:
12783 * F7, F7 Normal Lip - No valid AL_PA
12784 * F8, F8 Loop Failure - No valid AL_PA
12785 * F7, AL_PS Normal Lip - Valid source AL_PA
12786 * F8, AL_PS Loop Failure - Valid source AL_PA
12787 * AL_PD AL_PS Loop reset of AL_PD originated by AL_PS
12788 * And Normal Lip for all other loop members
12789 * 0xFF AL_PS Vendor specific reset of all loop members
12790 *
12791 * Now, it may not always be that we, at the source, may have an
12792 * AL_PS (AL_PA of source) for 4th character slot, so we decide
12793 * to do (Normal Lip, No Valid AL_PA), that means, in the LINIT
12794 * payload we are going to set:
12795 * lip_b3 = 0xF7; Normal LIP
12796 * lip_b4 = 0xF7; No valid source AL_PA
12797 */
12798 payload.ls_code.ls_code = LA_ELS_LINIT;
12799 payload.ls_code.mbz = 0;
12800 payload.rsvd = 0;
12801 payload.func = 0; /* Let Fabric determine the best way */
12802 payload.lip_b3 = 0xF7; /* Normal LIP */
12803 payload.lip_b4 = 0xF7; /* No valid source AL_PA */
12804
12805 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload,
12806 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR);
12807
12808 job->job_counter = 1;
12809
12810 ret = fp_sendcmd(port, cmd, port->fp_fca_handle);
12811 if (ret == FC_SUCCESS) {
12812 fp_jobwait(job);
12813 rval = job->job_result;
12814 } else {
12815 rval = FC_FAILURE;
12816 fp_free_pkt(cmd);
12817 }
12818
12819 return (rval);
12820 }
12821
12822
12823 /*
12824 * Fill out the device handles with GAN response
12825 */
12826 static void
fp_stuff_device_with_gan(ddi_acc_handle_t * handle,fc_remote_port_t * pd,ns_resp_gan_t * gan_resp)12827 fp_stuff_device_with_gan(ddi_acc_handle_t *handle, fc_remote_port_t *pd,
12828 ns_resp_gan_t *gan_resp)
12829 {
12830 fc_remote_node_t *node;
12831 fc_porttype_t type;
12832 fc_local_port_t *port;
12833
12834 ASSERT(pd != NULL);
12835 ASSERT(handle != NULL);
12836
12837 port = pd->pd_port;
12838
12839 FP_TRACE(FP_NHEAD1(1, 0), "GAN PD stuffing; pd=%p,"
12840 " port_id=%x, sym_len=%d fc4-type=%x",
12841 pd, gan_resp->gan_type_id.rsvd,
12842 gan_resp->gan_spnlen, gan_resp->gan_fc4types[0]);
12843
12844 mutex_enter(&pd->pd_mutex);
12845
12846 FC_GET_RSP(port, *handle, (uint8_t *)&type,
12847 (uint8_t *)&gan_resp->gan_type_id, sizeof (type), DDI_DEV_AUTOINCR);
12848
12849 pd->pd_porttype.port_type = type.port_type;
12850 pd->pd_porttype.rsvd = 0;
12851
12852 pd->pd_spn_len = gan_resp->gan_spnlen;
12853 if (pd->pd_spn_len) {
12854 FC_GET_RSP(port, *handle, (uint8_t *)pd->pd_spn,
12855 (uint8_t *)gan_resp->gan_spname, pd->pd_spn_len,
12856 DDI_DEV_AUTOINCR);
12857 }
12858
12859 FC_GET_RSP(port, *handle, (uint8_t *)pd->pd_ip_addr,
12860 (uint8_t *)gan_resp->gan_ip, sizeof (pd->pd_ip_addr),
12861 DDI_DEV_AUTOINCR);
12862 FC_GET_RSP(port, *handle, (uint8_t *)&pd->pd_cos,
12863 (uint8_t *)&gan_resp->gan_cos, sizeof (pd->pd_cos),
12864 DDI_DEV_AUTOINCR);
12865 FC_GET_RSP(port, *handle, (uint8_t *)pd->pd_fc4types,
12866 (uint8_t *)gan_resp->gan_fc4types, sizeof (pd->pd_fc4types),
12867 DDI_DEV_AUTOINCR);
12868
12869 node = pd->pd_remote_nodep;
12870 mutex_exit(&pd->pd_mutex);
12871
12872 mutex_enter(&node->fd_mutex);
12873
12874 FC_GET_RSP(port, *handle, (uint8_t *)node->fd_ipa,
12875 (uint8_t *)gan_resp->gan_ipa, sizeof (node->fd_ipa),
12876 DDI_DEV_AUTOINCR);
12877
12878 node->fd_snn_len = gan_resp->gan_snnlen;
12879 if (node->fd_snn_len) {
12880 FC_GET_RSP(port, *handle, (uint8_t *)node->fd_snn,
12881 (uint8_t *)gan_resp->gan_snname, node->fd_snn_len,
12882 DDI_DEV_AUTOINCR);
12883 }
12884
12885 mutex_exit(&node->fd_mutex);
12886 }
12887
12888
12889 /*
12890 * Handles all NS Queries (also means that this function
12891 * doesn't handle NS object registration)
12892 */
12893 static int
fp_ns_query(fc_local_port_t * port,fctl_ns_req_t * ns_cmd,job_request_t * job,int polled,int sleep)12894 fp_ns_query(fc_local_port_t *port, fctl_ns_req_t *ns_cmd, job_request_t *job,
12895 int polled, int sleep)
12896 {
12897 int rval;
12898 fp_cmd_t *cmd;
12899
12900 ASSERT(!MUTEX_HELD(&port->fp_mutex));
12901
12902 if (ns_cmd->ns_cmd_code == NS_GA_NXT) {
12903 FP_TRACE(FP_NHEAD1(1, 0), "fp_ns_query GA_NXT fp %x pd %x",
12904 port->fp_port_id.port_id, ns_cmd->ns_gan_sid);
12905 }
12906
12907 if (ns_cmd->ns_cmd_size == 0) {
12908 return (FC_FAILURE);
12909 }
12910
12911 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) +
12912 ns_cmd->ns_cmd_size, sizeof (fc_ct_header_t) +
12913 ns_cmd->ns_resp_size, sleep, NULL);
12914 if (cmd == NULL) {
12915 return (FC_NOMEM);
12916 }
12917
12918 fp_ct_init(port, cmd, ns_cmd, ns_cmd->ns_cmd_code, ns_cmd->ns_cmd_buf,
12919 ns_cmd->ns_cmd_size, ns_cmd->ns_resp_size, job);
12920
12921 if (polled) {
12922 job->job_counter = 1;
12923 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0);
12924 }
12925 rval = fp_sendcmd(port, cmd, port->fp_fca_handle);
12926 if (rval != FC_SUCCESS) {
12927 job->job_result = rval;
12928 fp_iodone(cmd);
12929 if (polled == 0) {
12930 /*
12931 * Return FC_SUCCESS to indicate that
12932 * fp_iodone is performed already.
12933 */
12934 rval = FC_SUCCESS;
12935 }
12936 }
12937
12938 if (polled) {
12939 fp_jobwait(job);
12940 rval = job->job_result;
12941 }
12942
12943 return (rval);
12944 }
12945
12946
12947 /*
12948 * Initialize Common Transport request
12949 */
12950 static void
fp_ct_init(fc_local_port_t * port,fp_cmd_t * cmd,fctl_ns_req_t * ns_cmd,uint16_t cmd_code,caddr_t cmd_buf,uint16_t cmd_len,uint16_t resp_len,job_request_t * job)12951 fp_ct_init(fc_local_port_t *port, fp_cmd_t *cmd, fctl_ns_req_t *ns_cmd,
12952 uint16_t cmd_code, caddr_t cmd_buf, uint16_t cmd_len,
12953 uint16_t resp_len, job_request_t *job)
12954 {
12955 uint32_t s_id;
12956 uchar_t class;
12957 fc_packet_t *pkt;
12958 fc_ct_header_t ct;
12959
12960 ASSERT(!MUTEX_HELD(&port->fp_mutex));
12961
12962 mutex_enter(&port->fp_mutex);
12963 s_id = port->fp_port_id.port_id;
12964 class = port->fp_ns_login_class;
12965 mutex_exit(&port->fp_mutex);
12966
12967 cmd->cmd_job = job;
12968 cmd->cmd_private = ns_cmd;
12969 pkt = &cmd->cmd_pkt;
12970
12971 ct.ct_rev = CT_REV;
12972 ct.ct_inid = 0;
12973 ct.ct_fcstype = FCSTYPE_DIRECTORY;
12974 ct.ct_fcssubtype = FCSSUB_DS_NAME_SERVER;
12975 ct.ct_options = 0;
12976 ct.ct_reserved1 = 0;
12977 ct.ct_cmdrsp = cmd_code;
12978 ct.ct_aiusize = resp_len >> 2;
12979 ct.ct_reserved2 = 0;
12980 ct.ct_reason = 0;
12981 ct.ct_expln = 0;
12982 ct.ct_vendor = 0;
12983
12984 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&ct,
12985 (uint8_t *)pkt->pkt_cmd, sizeof (ct), DDI_DEV_AUTOINCR);
12986
12987 pkt->pkt_cmd_fhdr.r_ctl = R_CTL_UNSOL_CONTROL;
12988 pkt->pkt_cmd_fhdr.d_id = 0xFFFFFC;
12989 pkt->pkt_cmd_fhdr.s_id = s_id;
12990 pkt->pkt_cmd_fhdr.type = FC_TYPE_FC_SERVICES;
12991 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_SEQ_INITIATIVE |
12992 F_CTL_FIRST_SEQ | F_CTL_END_SEQ;
12993 pkt->pkt_cmd_fhdr.seq_id = 0;
12994 pkt->pkt_cmd_fhdr.df_ctl = 0;
12995 pkt->pkt_cmd_fhdr.seq_cnt = 0;
12996 pkt->pkt_cmd_fhdr.ox_id = 0xffff;
12997 pkt->pkt_cmd_fhdr.rx_id = 0xffff;
12998 pkt->pkt_cmd_fhdr.ro = 0;
12999 pkt->pkt_cmd_fhdr.rsvd = 0;
13000
13001 pkt->pkt_comp = fp_ns_intr;
13002 pkt->pkt_ulp_private = (opaque_t)cmd;
13003 pkt->pkt_timeout = FP_NS_TIMEOUT;
13004
13005 if (cmd_buf) {
13006 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)cmd_buf,
13007 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)),
13008 cmd_len, DDI_DEV_AUTOINCR);
13009 }
13010
13011 cmd->cmd_transport = port->fp_fca_tran->fca_transport;
13012
13013 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class;
13014 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE;
13015 cmd->cmd_flags = FP_CMD_PLOGI_DONT_CARE;
13016 cmd->cmd_retry_count = fp_retry_count;
13017 cmd->cmd_ulp_pkt = NULL;
13018 }
13019
13020
13021 /*
13022 * Name Server request interrupt routine
13023 */
13024 static void
fp_ns_intr(fc_packet_t * pkt)13025 fp_ns_intr(fc_packet_t *pkt)
13026 {
13027 fp_cmd_t *cmd;
13028 fc_local_port_t *port;
13029 fc_ct_header_t resp_hdr;
13030 fc_ct_header_t cmd_hdr;
13031 fctl_ns_req_t *ns_cmd;
13032
13033 cmd = pkt->pkt_ulp_private;
13034 port = cmd->cmd_port;
13035
13036 mutex_enter(&port->fp_mutex);
13037 port->fp_out_fpcmds--;
13038 mutex_exit(&port->fp_mutex);
13039
13040 FC_GET_RSP(port, pkt->pkt_cmd_acc, (uint8_t *)&cmd_hdr,
13041 (uint8_t *)pkt->pkt_cmd, sizeof (cmd_hdr), DDI_DEV_AUTOINCR);
13042 ns_cmd = (fctl_ns_req_t *)
13043 (((fp_cmd_t *)(pkt->pkt_ulp_private))->cmd_private);
13044 if (!FP_IS_PKT_ERROR(pkt)) {
13045 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&resp_hdr,
13046 (uint8_t *)pkt->pkt_resp, sizeof (resp_hdr),
13047 DDI_DEV_AUTOINCR);
13048
13049 /*
13050 * On x86 architectures, make sure the resp_hdr is big endian.
13051 * This macro is a NOP on sparc architectures mainly because
13052 * we don't want to end up wasting time since the end result
13053 * is going to be the same.
13054 */
13055 MAKE_BE_32(&resp_hdr);
13056
13057 if (ns_cmd) {
13058 /*
13059 * Always copy out the response CT_HDR
13060 */
13061 bcopy(&resp_hdr, &ns_cmd->ns_resp_hdr,
13062 sizeof (resp_hdr));
13063 }
13064
13065 if (resp_hdr.ct_cmdrsp == FS_RJT_IU) {
13066 pkt->pkt_state = FC_PKT_FS_RJT;
13067 pkt->pkt_reason = resp_hdr.ct_reason;
13068 pkt->pkt_expln = resp_hdr.ct_expln;
13069 }
13070 }
13071
13072 if (FP_IS_PKT_ERROR(pkt)) {
13073 if (ns_cmd) {
13074 if (ns_cmd->ns_flags & FCTL_NS_VALIDATE_PD) {
13075 ASSERT(ns_cmd->ns_pd != NULL);
13076
13077 /* Mark it OLD if not already done */
13078 mutex_enter(&ns_cmd->ns_pd->pd_mutex);
13079 ns_cmd->ns_pd->pd_type = PORT_DEVICE_OLD;
13080 mutex_exit(&ns_cmd->ns_pd->pd_mutex);
13081 }
13082
13083 if (ns_cmd->ns_flags & FCTL_NS_ASYNC_REQUEST) {
13084 fctl_free_ns_cmd(ns_cmd);
13085 ((fp_cmd_t *)
13086 (pkt->pkt_ulp_private))->cmd_private = NULL;
13087 }
13088
13089 }
13090
13091 FP_TRACE(FP_NHEAD2(1, 0), "%x NS failure pkt state=%x "
13092 "reason=%x, expln=%x, NSCMD=%04X, NSRSP=%04X",
13093 port->fp_port_id.port_id, pkt->pkt_state,
13094 pkt->pkt_reason, pkt->pkt_expln,
13095 cmd_hdr.ct_cmdrsp, resp_hdr.ct_cmdrsp);
13096
13097 (void) fp_common_intr(pkt, 1);
13098
13099 return;
13100 }
13101
13102 if (resp_hdr.ct_cmdrsp != FS_ACC_IU) {
13103 uint32_t d_id;
13104 fc_local_port_t *port;
13105 fp_cmd_t *cmd;
13106
13107 d_id = pkt->pkt_cmd_fhdr.d_id;
13108 cmd = pkt->pkt_ulp_private;
13109 port = cmd->cmd_port;
13110 FP_TRACE(FP_NHEAD2(9, 0),
13111 "Bogus NS response received for D_ID=%x", d_id);
13112 }
13113
13114 if (cmd_hdr.ct_cmdrsp == NS_GA_NXT) {
13115 fp_gan_handler(pkt, ns_cmd);
13116 return;
13117 }
13118
13119 if (cmd_hdr.ct_cmdrsp >= NS_GPN_ID &&
13120 cmd_hdr.ct_cmdrsp <= NS_GID_PT) {
13121 if (ns_cmd) {
13122 if ((ns_cmd->ns_flags & FCTL_NS_NO_DATA_BUF) == 0) {
13123 fp_ns_query_handler(pkt, ns_cmd);
13124 return;
13125 }
13126 }
13127 }
13128
13129 fp_iodone(pkt->pkt_ulp_private);
13130 }
13131
13132
13133 /*
13134 * Process NS_GAN response
13135 */
13136 static void
fp_gan_handler(fc_packet_t * pkt,fctl_ns_req_t * ns_cmd)13137 fp_gan_handler(fc_packet_t *pkt, fctl_ns_req_t *ns_cmd)
13138 {
13139 int my_did;
13140 fc_portid_t d_id;
13141 fp_cmd_t *cmd;
13142 fc_local_port_t *port;
13143 fc_remote_port_t *pd;
13144 ns_req_gan_t gan_req;
13145 ns_resp_gan_t *gan_resp;
13146
13147 ASSERT(ns_cmd != NULL);
13148
13149 cmd = pkt->pkt_ulp_private;
13150 port = cmd->cmd_port;
13151
13152 gan_resp = (ns_resp_gan_t *)(pkt->pkt_resp + sizeof (fc_ct_header_t));
13153
13154 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&d_id,
13155 (uint8_t *)&gan_resp->gan_type_id, sizeof (d_id), DDI_DEV_AUTOINCR);
13156
13157 *(uint32_t *)&d_id = BE_32(*(uint32_t *)&d_id);
13158
13159 /*
13160 * In this case the priv_lilp_posit field in reality
13161 * is actually represents the relative position on a private loop.
13162 * So zero it while dealing with Port Identifiers.
13163 */
13164 d_id.priv_lilp_posit = 0;
13165 pd = fctl_get_remote_port_by_did(port, d_id.port_id);
13166 if (ns_cmd->ns_gan_sid == d_id.port_id) {
13167 /*
13168 * We've come a full circle; time to get out.
13169 */
13170 fp_iodone(cmd);
13171 return;
13172 }
13173
13174 if (ns_cmd->ns_gan_sid == FCTL_GAN_START_ID) {
13175 ns_cmd->ns_gan_sid = d_id.port_id;
13176 }
13177
13178 mutex_enter(&port->fp_mutex);
13179 my_did = (d_id.port_id == port->fp_port_id.port_id) ? 1 : 0;
13180 mutex_exit(&port->fp_mutex);
13181
13182 FP_TRACE(FP_NHEAD1(1, 0), "GAN response; port=%p, fp %x pd %x", port,
13183 port->fp_port_id.port_id, d_id.port_id);
13184 if (my_did == 0) {
13185 la_wwn_t pwwn;
13186 la_wwn_t nwwn;
13187
13188 FP_TRACE(FP_NHEAD1(1, 0), "GAN response details; "
13189 "port=%p, d_id=%x, type_id=%x, "
13190 "pwwn=%x %x %x %x %x %x %x %x, "
13191 "nwwn=%x %x %x %x %x %x %x %x",
13192 port, d_id.port_id, gan_resp->gan_type_id,
13193
13194 gan_resp->gan_pwwn.raw_wwn[0],
13195 gan_resp->gan_pwwn.raw_wwn[1],
13196 gan_resp->gan_pwwn.raw_wwn[2],
13197 gan_resp->gan_pwwn.raw_wwn[3],
13198 gan_resp->gan_pwwn.raw_wwn[4],
13199 gan_resp->gan_pwwn.raw_wwn[5],
13200 gan_resp->gan_pwwn.raw_wwn[6],
13201 gan_resp->gan_pwwn.raw_wwn[7],
13202
13203 gan_resp->gan_nwwn.raw_wwn[0],
13204 gan_resp->gan_nwwn.raw_wwn[1],
13205 gan_resp->gan_nwwn.raw_wwn[2],
13206 gan_resp->gan_nwwn.raw_wwn[3],
13207 gan_resp->gan_nwwn.raw_wwn[4],
13208 gan_resp->gan_nwwn.raw_wwn[5],
13209 gan_resp->gan_nwwn.raw_wwn[6],
13210 gan_resp->gan_nwwn.raw_wwn[7]);
13211
13212 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&nwwn,
13213 (uint8_t *)&gan_resp->gan_nwwn, sizeof (nwwn),
13214 DDI_DEV_AUTOINCR);
13215
13216 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&pwwn,
13217 (uint8_t *)&gan_resp->gan_pwwn, sizeof (pwwn),
13218 DDI_DEV_AUTOINCR);
13219
13220 if (ns_cmd->ns_flags & FCTL_NS_CREATE_DEVICE && pd == NULL) {
13221 FP_TRACE(FP_NHEAD1(1, 0), "fp %x gan_hander create"
13222 "pd %x", port->fp_port_id.port_id, d_id.port_id);
13223 pd = fctl_create_remote_port(port, &nwwn, &pwwn,
13224 d_id.port_id, PD_PLOGI_INITIATOR, KM_NOSLEEP);
13225 }
13226 if (pd != NULL) {
13227 fp_stuff_device_with_gan(&pkt->pkt_resp_acc,
13228 pd, gan_resp);
13229 }
13230
13231 if (ns_cmd->ns_flags & FCTL_NS_GET_DEV_COUNT) {
13232 *((int *)ns_cmd->ns_data_buf) += 1;
13233 }
13234
13235 if (ns_cmd->ns_flags & FCTL_NS_FILL_NS_MAP) {
13236 ASSERT((ns_cmd->ns_flags & FCTL_NS_NO_DATA_BUF) == 0);
13237
13238 if (ns_cmd->ns_flags & FCTL_NS_BUF_IS_USERLAND) {
13239 fc_port_dev_t *userbuf;
13240
13241 userbuf = ((fc_port_dev_t *)
13242 ns_cmd->ns_data_buf) +
13243 ns_cmd->ns_gan_index++;
13244
13245 userbuf->dev_did = d_id;
13246
13247 FC_GET_RSP(port, pkt->pkt_resp_acc,
13248 (uint8_t *)userbuf->dev_type,
13249 (uint8_t *)gan_resp->gan_fc4types,
13250 sizeof (userbuf->dev_type),
13251 DDI_DEV_AUTOINCR);
13252
13253 userbuf->dev_nwwn = nwwn;
13254 userbuf->dev_pwwn = pwwn;
13255
13256 if (pd != NULL) {
13257 mutex_enter(&pd->pd_mutex);
13258 userbuf->dev_state = pd->pd_state;
13259 userbuf->dev_hard_addr =
13260 pd->pd_hard_addr;
13261 mutex_exit(&pd->pd_mutex);
13262 } else {
13263 userbuf->dev_state =
13264 PORT_DEVICE_INVALID;
13265 }
13266 } else if (ns_cmd->ns_flags &
13267 FCTL_NS_BUF_IS_FC_PORTMAP) {
13268 fc_portmap_t *map;
13269
13270 map = ((fc_portmap_t *)
13271 ns_cmd->ns_data_buf) +
13272 ns_cmd->ns_gan_index++;
13273
13274 /*
13275 * First fill it like any new map
13276 * and update the port device info
13277 * below.
13278 */
13279 fp_fillout_new_nsmap(port, &pkt->pkt_resp_acc,
13280 map, gan_resp, d_id.port_id);
13281 if (pd != NULL) {
13282 fctl_copy_portmap(map, pd);
13283 } else {
13284 map->map_state = PORT_DEVICE_INVALID;
13285 map->map_type = PORT_DEVICE_NOCHANGE;
13286 }
13287 } else {
13288 caddr_t dst_ptr;
13289
13290 dst_ptr = ns_cmd->ns_data_buf +
13291 (NS_GAN_RESP_LEN) * ns_cmd->ns_gan_index++;
13292
13293 FC_GET_RSP(port, pkt->pkt_resp_acc,
13294 (uint8_t *)dst_ptr, (uint8_t *)gan_resp,
13295 NS_GAN_RESP_LEN, DDI_DEV_AUTOINCR);
13296 }
13297 } else {
13298 ns_cmd->ns_gan_index++;
13299 }
13300 if (ns_cmd->ns_gan_index >= ns_cmd->ns_gan_max) {
13301 fp_iodone(cmd);
13302 return;
13303 }
13304 }
13305
13306 gan_req.pid = d_id;
13307
13308 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&gan_req,
13309 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)),
13310 sizeof (gan_req), DDI_DEV_AUTOINCR);
13311
13312 if (cmd->cmd_transport(port->fp_fca_handle, pkt) != FC_SUCCESS) {
13313 pkt->pkt_state = FC_PKT_TRAN_ERROR;
13314 fp_iodone(cmd);
13315 } else {
13316 mutex_enter(&port->fp_mutex);
13317 port->fp_out_fpcmds++;
13318 mutex_exit(&port->fp_mutex);
13319 }
13320 }
13321
13322
13323 /*
13324 * Handle NS Query interrupt
13325 */
13326 static void
fp_ns_query_handler(fc_packet_t * pkt,fctl_ns_req_t * ns_cmd)13327 fp_ns_query_handler(fc_packet_t *pkt, fctl_ns_req_t *ns_cmd)
13328 {
13329 fp_cmd_t *cmd;
13330 fc_local_port_t *port;
13331 caddr_t src_ptr;
13332 uint32_t xfer_len;
13333
13334 cmd = pkt->pkt_ulp_private;
13335 port = cmd->cmd_port;
13336
13337 xfer_len = ns_cmd->ns_resp_size;
13338
13339 FP_TRACE(FP_NHEAD1(1, 0), "NS Query response, cmd_code=%x, xfer_len=%x",
13340 ns_cmd->ns_cmd_code, xfer_len);
13341
13342 if (ns_cmd->ns_cmd_code == NS_GPN_ID) {
13343 src_ptr = (caddr_t)pkt->pkt_resp + sizeof (fc_ct_header_t);
13344
13345 FP_TRACE(FP_NHEAD1(6, 0), "GPN_ID results; %x %x %x %x %x",
13346 src_ptr[0], src_ptr[1], src_ptr[2], src_ptr[3], src_ptr[4]);
13347 }
13348
13349 if (xfer_len <= ns_cmd->ns_data_len) {
13350 src_ptr = (caddr_t)pkt->pkt_resp + sizeof (fc_ct_header_t);
13351 FC_GET_RSP(port, pkt->pkt_resp_acc,
13352 (uint8_t *)ns_cmd->ns_data_buf,
13353 (uint8_t *)src_ptr, xfer_len, DDI_DEV_AUTOINCR);
13354 }
13355
13356 if (ns_cmd->ns_flags & FCTL_NS_VALIDATE_PD) {
13357 ASSERT(ns_cmd->ns_pd != NULL);
13358
13359 mutex_enter(&ns_cmd->ns_pd->pd_mutex);
13360 if (ns_cmd->ns_pd->pd_type == PORT_DEVICE_OLD) {
13361 ns_cmd->ns_pd->pd_type = PORT_DEVICE_NOCHANGE;
13362 }
13363 mutex_exit(&ns_cmd->ns_pd->pd_mutex);
13364 }
13365
13366 if (ns_cmd->ns_flags & FCTL_NS_ASYNC_REQUEST) {
13367 fctl_free_ns_cmd(ns_cmd);
13368 ((fp_cmd_t *)(pkt->pkt_ulp_private))->cmd_private = NULL;
13369 }
13370 fp_iodone(cmd);
13371 }
13372
13373
13374 /*
13375 * Handle unsolicited ADISC ELS request
13376 */
13377 static void
fp_handle_unsol_adisc(fc_local_port_t * port,fc_unsol_buf_t * buf,fc_remote_port_t * pd,job_request_t * job)13378 fp_handle_unsol_adisc(fc_local_port_t *port, fc_unsol_buf_t *buf,
13379 fc_remote_port_t *pd, job_request_t *job)
13380 {
13381 int rval;
13382 fp_cmd_t *cmd;
13383
13384 FP_TRACE(FP_NHEAD1(5, 0), "ADISC; port=%p, D_ID=%x state=%x, pd=%p",
13385 port, pd->pd_port_id.port_id, pd->pd_state, pd);
13386 mutex_enter(&pd->pd_mutex);
13387 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) {
13388 mutex_exit(&pd->pd_mutex);
13389 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) {
13390 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t),
13391 0, KM_SLEEP, pd);
13392 if (cmd != NULL) {
13393 fp_els_rjt_init(port, cmd, buf,
13394 FC_ACTION_NON_RETRYABLE,
13395 FC_REASON_INVALID_LINK_CTRL, job);
13396
13397 if (fp_sendcmd(port, cmd,
13398 port->fp_fca_handle) != FC_SUCCESS) {
13399 fp_free_pkt(cmd);
13400 }
13401 }
13402 }
13403 } else {
13404 mutex_exit(&pd->pd_mutex);
13405 /*
13406 * Yes, yes, we don't have a hard address. But we
13407 * we should still respond. Huh ? Visit 21.19.2
13408 * of FC-PH-2 which essentially says that if an
13409 * NL_Port doesn't have a hard address, or if a port
13410 * does not have FC-AL capability, it shall report
13411 * zeroes in this field.
13412 */
13413 cmd = fp_alloc_pkt(port, sizeof (la_els_adisc_t),
13414 0, KM_SLEEP, pd);
13415 if (cmd == NULL) {
13416 return;
13417 }
13418 fp_adisc_acc_init(port, cmd, buf, job);
13419 rval = fp_sendcmd(port, cmd, port->fp_fca_handle);
13420 if (rval != FC_SUCCESS) {
13421 fp_free_pkt(cmd);
13422 }
13423 }
13424 }
13425
13426
13427 /*
13428 * Initialize ADISC response.
13429 */
13430 static void
fp_adisc_acc_init(fc_local_port_t * port,fp_cmd_t * cmd,fc_unsol_buf_t * buf,job_request_t * job)13431 fp_adisc_acc_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf,
13432 job_request_t *job)
13433 {
13434 fc_packet_t *pkt;
13435 la_els_adisc_t payload;
13436
13437 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class;
13438 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND;
13439 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED;
13440 cmd->cmd_retry_count = 1;
13441 cmd->cmd_ulp_pkt = NULL;
13442
13443 cmd->cmd_transport = port->fp_fca_tran->fca_els_send;
13444 cmd->cmd_job = job;
13445
13446 pkt = &cmd->cmd_pkt;
13447
13448 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS);
13449
13450 payload.ls_code.ls_code = LA_ELS_ACC;
13451 payload.ls_code.mbz = 0;
13452
13453 mutex_enter(&port->fp_mutex);
13454 payload.nport_id = port->fp_port_id;
13455 payload.hard_addr = port->fp_hard_addr;
13456 mutex_exit(&port->fp_mutex);
13457
13458 payload.port_wwn = port->fp_service_params.nport_ww_name;
13459 payload.node_wwn = port->fp_service_params.node_ww_name;
13460
13461 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload,
13462 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR);
13463 }
13464
13465
13466 /*
13467 * Hold and Install the requested ULP drivers
13468 */
13469 static void
fp_load_ulp_modules(dev_info_t * dip,fc_local_port_t * port)13470 fp_load_ulp_modules(dev_info_t *dip, fc_local_port_t *port)
13471 {
13472 int len;
13473 int count;
13474 int data_len;
13475 major_t ulp_major;
13476 caddr_t ulp_name;
13477 caddr_t data_ptr;
13478 caddr_t data_buf;
13479
13480 ASSERT(!MUTEX_HELD(&port->fp_mutex));
13481
13482 data_buf = NULL;
13483 if (ddi_getlongprop(DDI_DEV_T_ANY, dip,
13484 DDI_PROP_DONTPASS, "load-ulp-list",
13485 (caddr_t)&data_buf, &data_len) != DDI_PROP_SUCCESS) {
13486 return;
13487 }
13488
13489 len = strlen(data_buf);
13490 port->fp_ulp_nload = fctl_atoi(data_buf, 10);
13491
13492 data_ptr = data_buf + len + 1;
13493 for (count = 0; count < port->fp_ulp_nload; count++) {
13494 len = strlen(data_ptr) + 1;
13495 ulp_name = kmem_zalloc(len, KM_SLEEP);
13496 bcopy(data_ptr, ulp_name, len);
13497
13498 ulp_major = ddi_name_to_major(ulp_name);
13499
13500 if (ulp_major != (major_t)-1) {
13501 if (modload("drv", ulp_name) < 0) {
13502 fp_printf(port, CE_NOTE, FP_LOG_ONLY,
13503 0, NULL, "failed to load %s",
13504 ulp_name);
13505 }
13506 } else {
13507 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
13508 "%s isn't a valid driver", ulp_name);
13509 }
13510
13511 kmem_free(ulp_name, len);
13512 data_ptr += len; /* Skip to next field */
13513 }
13514
13515 /*
13516 * Free the memory allocated by DDI
13517 */
13518 if (data_buf != NULL) {
13519 kmem_free(data_buf, data_len);
13520 }
13521 }
13522
13523
13524 /*
13525 * Perform LOGO operation
13526 */
13527 static int
fp_logout(fc_local_port_t * port,fc_remote_port_t * pd,job_request_t * job)13528 fp_logout(fc_local_port_t *port, fc_remote_port_t *pd, job_request_t *job)
13529 {
13530 int rval;
13531 fp_cmd_t *cmd;
13532
13533 ASSERT(!MUTEX_HELD(&port->fp_mutex));
13534 ASSERT(!MUTEX_HELD(&pd->pd_mutex));
13535
13536 cmd = fp_alloc_pkt(port, sizeof (la_els_logo_t),
13537 FP_PORT_IDENTIFIER_LEN, KM_SLEEP, pd);
13538
13539 mutex_enter(&port->fp_mutex);
13540 mutex_enter(&pd->pd_mutex);
13541
13542 ASSERT(pd->pd_state == PORT_DEVICE_LOGGED_IN);
13543 ASSERT(pd->pd_login_count == 1);
13544
13545 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class;
13546 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE;
13547 cmd->cmd_flags = 0;
13548 cmd->cmd_retry_count = 1;
13549 cmd->cmd_ulp_pkt = NULL;
13550
13551 fp_logo_init(pd, cmd, job);
13552
13553 mutex_exit(&pd->pd_mutex);
13554 mutex_exit(&port->fp_mutex);
13555
13556 rval = fp_sendcmd(port, cmd, port->fp_fca_handle);
13557 if (rval != FC_SUCCESS) {
13558 fp_iodone(cmd);
13559 }
13560
13561 return (rval);
13562 }
13563
13564
13565 /*
13566 * Perform Port attach callbacks to registered ULPs
13567 */
13568 static void
fp_attach_ulps(fc_local_port_t * port,fc_attach_cmd_t cmd)13569 fp_attach_ulps(fc_local_port_t *port, fc_attach_cmd_t cmd)
13570 {
13571 fp_soft_attach_t *att;
13572
13573 att = kmem_zalloc(sizeof (*att), KM_SLEEP);
13574 att->att_cmd = cmd;
13575 att->att_port = port;
13576
13577 /*
13578 * We need to remember whether or not fctl_busy_port
13579 * succeeded so we know whether or not to call
13580 * fctl_idle_port when the task is complete.
13581 */
13582
13583 if (fctl_busy_port(port) == 0) {
13584 att->att_need_pm_idle = B_TRUE;
13585 } else {
13586 att->att_need_pm_idle = B_FALSE;
13587 }
13588
13589 (void) taskq_dispatch(port->fp_taskq, fp_ulp_port_attach,
13590 att, KM_SLEEP);
13591 }
13592
13593
13594 /*
13595 * Forward state change notifications on to interested ULPs.
13596 * Spawns a call to fctl_ulp_statec_cb() in a taskq thread to do all the
13597 * real work.
13598 */
13599 static int
fp_ulp_notify(fc_local_port_t * port,uint32_t statec,int sleep)13600 fp_ulp_notify(fc_local_port_t *port, uint32_t statec, int sleep)
13601 {
13602 fc_port_clist_t *clist;
13603
13604 clist = kmem_zalloc(sizeof (*clist), sleep);
13605 if (clist == NULL) {
13606 return (FC_NOMEM);
13607 }
13608
13609 clist->clist_state = statec;
13610
13611 mutex_enter(&port->fp_mutex);
13612 clist->clist_flags = port->fp_topology;
13613 mutex_exit(&port->fp_mutex);
13614
13615 clist->clist_port = (opaque_t)port;
13616 clist->clist_len = 0;
13617 clist->clist_size = 0;
13618 clist->clist_map = NULL;
13619
13620 (void) taskq_dispatch(port->fp_taskq, fctl_ulp_statec_cb,
13621 clist, KM_SLEEP);
13622
13623 return (FC_SUCCESS);
13624 }
13625
13626
13627 /*
13628 * Get name server map
13629 */
13630 static int
fp_ns_getmap(fc_local_port_t * port,job_request_t * job,fc_portmap_t ** map,uint32_t * len,uint32_t sid)13631 fp_ns_getmap(fc_local_port_t *port, job_request_t *job, fc_portmap_t **map,
13632 uint32_t *len, uint32_t sid)
13633 {
13634 int ret;
13635 fctl_ns_req_t *ns_cmd;
13636
13637 /*
13638 * Don't let the allocator do anything for response;
13639 * we have have buffer ready to fillout.
13640 */
13641 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t),
13642 sizeof (ns_resp_gan_t), 0, (FCTL_NS_FILL_NS_MAP |
13643 FCTL_NS_BUF_IS_FC_PORTMAP), KM_SLEEP);
13644
13645 ns_cmd->ns_data_len = sizeof (**map) * (*len);
13646 ns_cmd->ns_data_buf = (caddr_t)*map;
13647
13648 ASSERT(ns_cmd != NULL);
13649
13650 ns_cmd->ns_gan_index = 0;
13651 ns_cmd->ns_gan_sid = sid;
13652 ns_cmd->ns_cmd_code = NS_GA_NXT;
13653 ns_cmd->ns_gan_max = *len;
13654
13655 ret = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP);
13656
13657 if (ns_cmd->ns_gan_index != *len) {
13658 *len = ns_cmd->ns_gan_index;
13659 }
13660 ns_cmd->ns_data_len = 0;
13661 ns_cmd->ns_data_buf = NULL;
13662 fctl_free_ns_cmd(ns_cmd);
13663
13664 return (ret);
13665 }
13666
13667
13668 /*
13669 * Create a remote port in Fabric topology by using NS services
13670 */
13671 static fc_remote_port_t *
fp_create_remote_port_by_ns(fc_local_port_t * port,uint32_t d_id,int sleep)13672 fp_create_remote_port_by_ns(fc_local_port_t *port, uint32_t d_id, int sleep)
13673 {
13674 int rval;
13675 job_request_t *job;
13676 fctl_ns_req_t *ns_cmd;
13677 fc_remote_port_t *pd;
13678
13679 ASSERT(!MUTEX_HELD(&port->fp_mutex));
13680
13681 FP_TRACE(FP_NHEAD1(1, 0), "PD creation begin; port=%p, d_id=%x",
13682 port, d_id);
13683
13684 #ifdef DEBUG
13685 mutex_enter(&port->fp_mutex);
13686 ASSERT(FC_IS_TOP_SWITCH(port->fp_topology));
13687 mutex_exit(&port->fp_mutex);
13688 #endif
13689
13690 job = fctl_alloc_job(JOB_NS_CMD, 0, NULL, (opaque_t)port, sleep);
13691 if (job == NULL) {
13692 return (NULL);
13693 }
13694
13695 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t),
13696 sizeof (ns_resp_gan_t), 0, (FCTL_NS_CREATE_DEVICE |
13697 FCTL_NS_NO_DATA_BUF), sleep);
13698 if (ns_cmd == NULL) {
13699 return (NULL);
13700 }
13701
13702 job->job_result = FC_SUCCESS;
13703 ns_cmd->ns_gan_max = 1;
13704 ns_cmd->ns_cmd_code = NS_GA_NXT;
13705 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID;
13706 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.port_id = d_id - 1;
13707 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.priv_lilp_posit = 0;
13708
13709 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0);
13710 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP);
13711 fctl_free_ns_cmd(ns_cmd);
13712
13713 if (rval != FC_SUCCESS || job->job_result != FC_SUCCESS) {
13714 fctl_dealloc_job(job);
13715 return (NULL);
13716 }
13717 fctl_dealloc_job(job);
13718
13719 pd = fctl_get_remote_port_by_did(port, d_id);
13720
13721 FP_TRACE(FP_NHEAD1(1, 0), "PD creation end; port=%p, d_id=%x, pd=%p",
13722 port, d_id, pd);
13723
13724 return (pd);
13725 }
13726
13727
13728 /*
13729 * Check for the permissions on an ioctl command. If it is required to have an
13730 * EXCLUSIVE open performed, return a FAILURE to just shut the door on it. If
13731 * the ioctl command isn't in one of the list built, shut the door on that too.
13732 *
13733 * Certain ioctls perform hardware accesses in FCA drivers, and it needs
13734 * to be made sure that users open the port for an exclusive access while
13735 * performing those operations.
13736 *
13737 * This can prevent a casual user from inflicting damage on the port by
13738 * sending these ioctls from multiple processes/threads (there is no good
13739 * reason why one would need to do that) without actually realizing how
13740 * expensive such commands could turn out to be.
13741 *
13742 * It is also important to note that, even with an exclusive access,
13743 * multiple threads can share the same file descriptor and fire down
13744 * commands in parallel. To prevent that the driver needs to make sure
13745 * that such commands aren't in progress already. This is taken care of
13746 * in the FP_EXCL_BUSY bit of fp_flag.
13747 */
13748 static int
fp_check_perms(uchar_t open_flag,uint16_t ioctl_cmd)13749 fp_check_perms(uchar_t open_flag, uint16_t ioctl_cmd)
13750 {
13751 int ret = FC_FAILURE;
13752 int count;
13753
13754 for (count = 0;
13755 count < sizeof (fp_perm_list) / sizeof (fp_perm_list[0]);
13756 count++) {
13757 if (fp_perm_list[count].fp_ioctl_cmd == ioctl_cmd) {
13758 if (fp_perm_list[count].fp_open_flag & open_flag) {
13759 ret = FC_SUCCESS;
13760 }
13761 break;
13762 }
13763 }
13764
13765 return (ret);
13766 }
13767
13768
13769 /*
13770 * Bind Port driver's unsolicited, state change callbacks
13771 */
13772 static int
fp_bind_callbacks(fc_local_port_t * port)13773 fp_bind_callbacks(fc_local_port_t *port)
13774 {
13775 fc_fca_bind_info_t bind_info = {0};
13776 fc_fca_port_info_t *port_info;
13777 int rval = DDI_SUCCESS;
13778 uint16_t class;
13779 int node_namelen, port_namelen;
13780 char *nname = NULL, *pname = NULL;
13781
13782 ASSERT(!MUTEX_HELD(&port->fp_mutex));
13783
13784 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, port->fp_port_dip,
13785 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
13786 "node-name", &nname) != DDI_PROP_SUCCESS) {
13787 FP_TRACE(FP_NHEAD1(1, 0),
13788 "fp_bind_callback fail to get node-name");
13789 }
13790 if (nname) {
13791 fc_str_to_wwn(nname, &(bind_info.port_nwwn));
13792 }
13793
13794 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, port->fp_port_dip,
13795 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
13796 "port-name", &pname) != DDI_PROP_SUCCESS) {
13797 FP_TRACE(FP_NHEAD1(1, 0),
13798 "fp_bind_callback fail to get port-name");
13799 }
13800 if (pname) {
13801 fc_str_to_wwn(pname, &(bind_info.port_pwwn));
13802 }
13803
13804 if (port->fp_npiv_type == FC_NPIV_PORT) {
13805 bind_info.port_npiv = 1;
13806 }
13807
13808 /*
13809 * fca_bind_port returns the FCA driver's handle for the local
13810 * port instance. If the port number isn't supported it returns NULL.
13811 * It also sets up callback in the FCA for various
13812 * things like state change, ELS etc..
13813 */
13814 bind_info.port_statec_cb = fp_statec_cb;
13815 bind_info.port_unsol_cb = fp_unsol_cb;
13816 bind_info.port_num = port->fp_port_num;
13817 bind_info.port_handle = (opaque_t)port;
13818
13819 port_info = kmem_zalloc(sizeof (*port_info), KM_SLEEP);
13820
13821 /*
13822 * Hold the port driver mutex as the callbacks are bound until the
13823 * service parameters are properly filled in (in order to be able to
13824 * properly respond to unsolicited ELS requests)
13825 */
13826 mutex_enter(&port->fp_mutex);
13827
13828 port->fp_fca_handle = port->fp_fca_tran->fca_bind_port(
13829 port->fp_fca_dip, port_info, &bind_info);
13830
13831 if (port->fp_fca_handle == NULL) {
13832 rval = DDI_FAILURE;
13833 goto exit;
13834 }
13835
13836 /*
13837 * Only fcoei will set this bit
13838 */
13839 if (port_info->pi_port_state & FC_STATE_FCA_IS_NODMA) {
13840 port->fp_soft_state |= FP_SOFT_FCA_IS_NODMA;
13841 port_info->pi_port_state &= ~(FC_STATE_FCA_IS_NODMA);
13842 }
13843
13844 port->fp_bind_state = port->fp_state = port_info->pi_port_state;
13845 port->fp_service_params = port_info->pi_login_params;
13846 port->fp_hard_addr = port_info->pi_hard_addr;
13847
13848 /* Copy from the FCA structure to the FP structure */
13849 port->fp_hba_port_attrs = port_info->pi_attrs;
13850
13851 if (port_info->pi_rnid_params.status == FC_SUCCESS) {
13852 port->fp_rnid_init = 1;
13853 bcopy(&port_info->pi_rnid_params.params,
13854 &port->fp_rnid_params,
13855 sizeof (port->fp_rnid_params));
13856 } else {
13857 port->fp_rnid_init = 0;
13858 }
13859
13860 node_namelen = strlen((char *)&port_info->pi_attrs.sym_node_name);
13861 if (node_namelen) {
13862 bcopy(&port_info->pi_attrs.sym_node_name,
13863 &port->fp_sym_node_name,
13864 node_namelen);
13865 port->fp_sym_node_namelen = node_namelen;
13866 }
13867 port_namelen = strlen((char *)&port_info->pi_attrs.sym_port_name);
13868 if (port_namelen) {
13869 bcopy(&port_info->pi_attrs.sym_port_name,
13870 &port->fp_sym_port_name,
13871 port_namelen);
13872 port->fp_sym_port_namelen = port_namelen;
13873 }
13874
13875 /* zero out the normally unused fields right away */
13876 port->fp_service_params.ls_code.mbz = 0;
13877 port->fp_service_params.ls_code.ls_code = 0;
13878 bzero(&port->fp_service_params.reserved,
13879 sizeof (port->fp_service_params.reserved));
13880
13881 class = port_info->pi_login_params.class_1.class_opt;
13882 port->fp_cos |= (class & 0x8000) ? FC_NS_CLASS1 : 0;
13883
13884 class = port_info->pi_login_params.class_2.class_opt;
13885 port->fp_cos |= (class & 0x8000) ? FC_NS_CLASS2 : 0;
13886
13887 class = port_info->pi_login_params.class_3.class_opt;
13888 port->fp_cos |= (class & 0x8000) ? FC_NS_CLASS3 : 0;
13889
13890 exit:
13891 if (nname) {
13892 ddi_prop_free(nname);
13893 }
13894 if (pname) {
13895 ddi_prop_free(pname);
13896 }
13897 mutex_exit(&port->fp_mutex);
13898 kmem_free(port_info, sizeof (*port_info));
13899
13900 return (rval);
13901 }
13902
13903
13904 /*
13905 * Retrieve FCA capabilities
13906 */
13907 static void
fp_retrieve_caps(fc_local_port_t * port)13908 fp_retrieve_caps(fc_local_port_t *port)
13909 {
13910 int rval;
13911 int ub_count;
13912 fc_fcp_dma_t fcp_dma;
13913 fc_reset_action_t action;
13914 fc_dma_behavior_t dma_behavior;
13915
13916 ASSERT(!MUTEX_HELD(&port->fp_mutex));
13917
13918 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle,
13919 FC_CAP_UNSOL_BUF, &ub_count);
13920
13921 switch (rval) {
13922 case FC_CAP_FOUND:
13923 case FC_CAP_SETTABLE:
13924 switch (ub_count) {
13925 case 0:
13926 break;
13927
13928 case -1:
13929 ub_count = fp_unsol_buf_count;
13930 break;
13931
13932 default:
13933 /* 1/4th of total buffers is my share */
13934 ub_count =
13935 (ub_count / port->fp_fca_tran->fca_numports) >> 2;
13936 break;
13937 }
13938 break;
13939
13940 default:
13941 ub_count = 0;
13942 break;
13943 }
13944
13945 mutex_enter(&port->fp_mutex);
13946 port->fp_ub_count = ub_count;
13947 mutex_exit(&port->fp_mutex);
13948
13949 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle,
13950 FC_CAP_POST_RESET_BEHAVIOR, &action);
13951
13952 switch (rval) {
13953 case FC_CAP_FOUND:
13954 case FC_CAP_SETTABLE:
13955 switch (action) {
13956 case FC_RESET_RETURN_NONE:
13957 case FC_RESET_RETURN_ALL:
13958 case FC_RESET_RETURN_OUTSTANDING:
13959 break;
13960
13961 default:
13962 action = FC_RESET_RETURN_NONE;
13963 break;
13964 }
13965 break;
13966
13967 default:
13968 action = FC_RESET_RETURN_NONE;
13969 break;
13970 }
13971 mutex_enter(&port->fp_mutex);
13972 port->fp_reset_action = action;
13973 mutex_exit(&port->fp_mutex);
13974
13975 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle,
13976 FC_CAP_NOSTREAM_ON_UNALIGN_BUF, &dma_behavior);
13977
13978 switch (rval) {
13979 case FC_CAP_FOUND:
13980 switch (dma_behavior) {
13981 case FC_ALLOW_STREAMING:
13982 /* FALLTHROUGH */
13983 case FC_NO_STREAMING:
13984 break;
13985
13986 default:
13987 /*
13988 * If capability was found and the value
13989 * was incorrect assume the worst
13990 */
13991 dma_behavior = FC_NO_STREAMING;
13992 break;
13993 }
13994 break;
13995
13996 default:
13997 /*
13998 * If capability was not defined - allow streaming; existing
13999 * FCAs should not be affected.
14000 */
14001 dma_behavior = FC_ALLOW_STREAMING;
14002 break;
14003 }
14004 mutex_enter(&port->fp_mutex);
14005 port->fp_dma_behavior = dma_behavior;
14006 mutex_exit(&port->fp_mutex);
14007
14008 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle,
14009 FC_CAP_FCP_DMA, &fcp_dma);
14010
14011 if (rval != FC_CAP_FOUND || (fcp_dma != FC_NO_DVMA_SPACE &&
14012 fcp_dma != FC_DVMA_SPACE)) {
14013 fcp_dma = FC_DVMA_SPACE;
14014 }
14015
14016 mutex_enter(&port->fp_mutex);
14017 port->fp_fcp_dma = fcp_dma;
14018 mutex_exit(&port->fp_mutex);
14019 }
14020
14021
14022 /*
14023 * Handle Domain, Area changes in the Fabric.
14024 */
14025 static void
fp_validate_area_domain(fc_local_port_t * port,uint32_t id,uint32_t mask,job_request_t * job,int sleep)14026 fp_validate_area_domain(fc_local_port_t *port, uint32_t id, uint32_t mask,
14027 job_request_t *job, int sleep)
14028 {
14029 #ifdef DEBUG
14030 uint32_t dcnt;
14031 #endif
14032 int rval;
14033 int send;
14034 int index;
14035 int listindex;
14036 int login;
14037 int job_flags;
14038 char ww_name[17];
14039 uint32_t d_id;
14040 uint32_t count;
14041 fctl_ns_req_t *ns_cmd;
14042 fc_portmap_t *list;
14043 fc_orphan_t *orp;
14044 fc_orphan_t *norp;
14045 fc_orphan_t *prev;
14046 fc_remote_port_t *pd;
14047 fc_remote_port_t *npd;
14048 struct pwwn_hash *head;
14049
14050 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t),
14051 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t),
14052 0, sleep);
14053 if (ns_cmd == NULL) {
14054 mutex_enter(&port->fp_mutex);
14055 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) {
14056 --port->fp_rscn_count;
14057 }
14058 mutex_exit(&port->fp_mutex);
14059
14060 return;
14061 }
14062 ns_cmd->ns_cmd_code = NS_GID_PN;
14063
14064 /*
14065 * We need to get a new count of devices from the
14066 * name server, which will also create any new devices
14067 * as needed.
14068 */
14069
14070 (void) fp_ns_get_devcount(port, job, 1, sleep);
14071
14072 FP_TRACE(FP_NHEAD1(3, 0),
14073 "fp_validate_area_domain: get_devcount found %d devices",
14074 port->fp_total_devices);
14075
14076 mutex_enter(&port->fp_mutex);
14077
14078 for (count = index = 0; index < pwwn_table_size; index++) {
14079 head = &port->fp_pwwn_table[index];
14080 pd = head->pwwn_head;
14081 while (pd != NULL) {
14082 mutex_enter(&pd->pd_mutex);
14083 if (pd->pd_flags != PD_ELS_IN_PROGRESS) {
14084 if ((pd->pd_port_id.port_id & mask) == id &&
14085 pd->pd_recepient == PD_PLOGI_INITIATOR) {
14086 count++;
14087 pd->pd_type = PORT_DEVICE_OLD;
14088 pd->pd_flags = PD_ELS_MARK;
14089 }
14090 }
14091 mutex_exit(&pd->pd_mutex);
14092 pd = pd->pd_wwn_hnext;
14093 }
14094 }
14095
14096 #ifdef DEBUG
14097 dcnt = count;
14098 #endif /* DEBUG */
14099
14100 /*
14101 * Since port->fp_orphan_count is declared an 'int' it is
14102 * theoretically possible that the count could go negative.
14103 *
14104 * This would be bad and if that happens we really do want
14105 * to know.
14106 */
14107
14108 ASSERT(port->fp_orphan_count >= 0);
14109
14110 count += port->fp_orphan_count;
14111
14112 /*
14113 * We add the port->fp_total_devices value to the count
14114 * in the case where our port is newly attached. This is
14115 * because we haven't done any discovery and we don't have
14116 * any orphans in the port's orphan list. If we do not do
14117 * this addition to count then we won't alloc enough kmem
14118 * to do discovery with.
14119 */
14120
14121 if (count == 0) {
14122 count += port->fp_total_devices;
14123 FP_TRACE(FP_NHEAD1(3, 0), "fp_validate_area_domain: "
14124 "0x%x orphans found, using 0x%x",
14125 port->fp_orphan_count, count);
14126 }
14127
14128 mutex_exit(&port->fp_mutex);
14129
14130 /*
14131 * Allocate the change list
14132 */
14133
14134 list = kmem_zalloc(sizeof (fc_portmap_t) * count, sleep);
14135 if (list == NULL) {
14136 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
14137 " Not enough memory to service RSCNs"
14138 " for %d ports, continuing...", count);
14139
14140 fctl_free_ns_cmd(ns_cmd);
14141
14142 mutex_enter(&port->fp_mutex);
14143 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) {
14144 --port->fp_rscn_count;
14145 }
14146 mutex_exit(&port->fp_mutex);
14147
14148 return;
14149 }
14150
14151 /*
14152 * Attempt to validate or invalidate the devices that were
14153 * already in the pwwn hash table.
14154 */
14155
14156 mutex_enter(&port->fp_mutex);
14157 for (listindex = 0, index = 0; index < pwwn_table_size; index++) {
14158 head = &port->fp_pwwn_table[index];
14159 npd = head->pwwn_head;
14160
14161 while ((pd = npd) != NULL) {
14162 npd = pd->pd_wwn_hnext;
14163
14164 mutex_enter(&pd->pd_mutex);
14165 if ((pd->pd_port_id.port_id & mask) == id &&
14166 pd->pd_flags == PD_ELS_MARK) {
14167 la_wwn_t *pwwn;
14168
14169 job->job_result = FC_SUCCESS;
14170
14171 ((ns_req_gid_pn_t *)
14172 (ns_cmd->ns_cmd_buf))->pwwn =
14173 pd->pd_port_name;
14174
14175 pwwn = &pd->pd_port_name;
14176 d_id = pd->pd_port_id.port_id;
14177
14178 mutex_exit(&pd->pd_mutex);
14179 mutex_exit(&port->fp_mutex);
14180
14181 rval = fp_ns_query(port, ns_cmd, job, 1,
14182 sleep);
14183 if (rval != FC_SUCCESS) {
14184 fc_wwn_to_str(pwwn, ww_name);
14185
14186 FP_TRACE(FP_NHEAD1(3, 0),
14187 "AREA RSCN: PD disappeared; "
14188 "d_id=%x, PWWN=%s", d_id, ww_name);
14189
14190 FP_TRACE(FP_NHEAD2(9, 0),
14191 "N_x Port with D_ID=%x,"
14192 " PWWN=%s disappeared from fabric",
14193 d_id, ww_name);
14194
14195 fp_fillout_old_map(list + listindex++,
14196 pd, 1);
14197 } else {
14198 fctl_copy_portmap(list + listindex++,
14199 pd);
14200
14201 mutex_enter(&pd->pd_mutex);
14202 pd->pd_flags = PD_ELS_IN_PROGRESS;
14203 mutex_exit(&pd->pd_mutex);
14204 }
14205
14206 mutex_enter(&port->fp_mutex);
14207 } else {
14208 mutex_exit(&pd->pd_mutex);
14209 }
14210 }
14211 }
14212
14213 mutex_exit(&port->fp_mutex);
14214
14215 ASSERT(listindex == dcnt);
14216
14217 job->job_counter = listindex;
14218 job_flags = job->job_flags;
14219 job->job_flags |= JOB_TYPE_FP_ASYNC;
14220
14221 /*
14222 * Login (if we were the initiator) or validate devices in the
14223 * port map.
14224 */
14225
14226 for (index = 0; index < listindex; index++) {
14227 pd = list[index].map_pd;
14228
14229 mutex_enter(&pd->pd_mutex);
14230 ASSERT((pd->pd_port_id.port_id & mask) == id);
14231
14232 if (pd->pd_flags != PD_ELS_IN_PROGRESS) {
14233 ASSERT(pd->pd_type == PORT_DEVICE_OLD);
14234 mutex_exit(&pd->pd_mutex);
14235 fp_jobdone(job);
14236 continue;
14237 }
14238
14239 login = (pd->pd_state == PORT_DEVICE_LOGGED_IN) ? 1 : 0;
14240 send = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0;
14241 d_id = pd->pd_port_id.port_id;
14242 mutex_exit(&pd->pd_mutex);
14243
14244 if ((d_id & mask) == id && send) {
14245 if (login) {
14246 FP_TRACE(FP_NHEAD1(6, 0),
14247 "RSCN and PLOGI request;"
14248 " pd=%p, job=%p d_id=%x, index=%d", pd,
14249 job, d_id, index);
14250
14251 rval = fp_port_login(port, d_id, job,
14252 FP_CMD_PLOGI_RETAIN, sleep, pd, NULL);
14253 if (rval != FC_SUCCESS) {
14254 mutex_enter(&pd->pd_mutex);
14255 pd->pd_flags = PD_IDLE;
14256 mutex_exit(&pd->pd_mutex);
14257
14258 job->job_result = rval;
14259 fp_jobdone(job);
14260 }
14261 FP_TRACE(FP_NHEAD1(1, 0),
14262 "PLOGI succeeded:no skip(1) for "
14263 "D_ID %x", d_id);
14264 list[index].map_flags |=
14265 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY;
14266 } else {
14267 FP_TRACE(FP_NHEAD1(6, 0), "RSCN and NS request;"
14268 " pd=%p, job=%p d_id=%x, index=%d", pd,
14269 job, d_id, index);
14270
14271 rval = fp_ns_validate_device(port, pd, job,
14272 0, sleep);
14273 if (rval != FC_SUCCESS) {
14274 fp_jobdone(job);
14275 }
14276 mutex_enter(&pd->pd_mutex);
14277 pd->pd_flags = PD_IDLE;
14278 mutex_exit(&pd->pd_mutex);
14279 }
14280 } else {
14281 FP_TRACE(FP_NHEAD1(6, 0),
14282 "RSCN and NO request sent; pd=%p,"
14283 " d_id=%x, index=%d", pd, d_id, index);
14284
14285 mutex_enter(&pd->pd_mutex);
14286 pd->pd_flags = PD_IDLE;
14287 mutex_exit(&pd->pd_mutex);
14288
14289 fp_jobdone(job);
14290 }
14291 }
14292
14293 if (listindex) {
14294 fctl_jobwait(job);
14295 }
14296 job->job_flags = job_flags;
14297
14298 /*
14299 * Orphan list validation.
14300 */
14301 mutex_enter(&port->fp_mutex);
14302 for (prev = NULL, orp = port->fp_orphan_list; port->fp_orphan_count &&
14303 orp != NULL; orp = norp) {
14304 norp = orp->orp_next;
14305 mutex_exit(&port->fp_mutex);
14306
14307 job->job_counter = 1;
14308 job->job_result = FC_SUCCESS;
14309 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0);
14310
14311 ((ns_req_gid_pn_t *)ns_cmd->ns_cmd_buf)->pwwn = orp->orp_pwwn;
14312
14313 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)->pid.port_id = 0;
14314 ((ns_resp_gid_pn_t *)
14315 ns_cmd->ns_data_buf)->pid.priv_lilp_posit = 0;
14316
14317 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP);
14318 if (rval == FC_SUCCESS) {
14319 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf));
14320 pd = fp_create_remote_port_by_ns(port, d_id, KM_SLEEP);
14321 if (pd != NULL) {
14322 fc_wwn_to_str(&orp->orp_pwwn, ww_name);
14323
14324 FP_TRACE(FP_NHEAD1(6, 0),
14325 "RSCN and ORPHAN list "
14326 "success; d_id=%x, PWWN=%s", d_id, ww_name);
14327
14328 FP_TRACE(FP_NHEAD2(6, 0),
14329 "N_x Port with D_ID=%x, PWWN=%s reappeared"
14330 " in fabric", d_id, ww_name);
14331
14332 mutex_enter(&port->fp_mutex);
14333 if (prev) {
14334 prev->orp_next = orp->orp_next;
14335 } else {
14336 ASSERT(orp == port->fp_orphan_list);
14337 port->fp_orphan_list = orp->orp_next;
14338 }
14339 port->fp_orphan_count--;
14340 mutex_exit(&port->fp_mutex);
14341
14342 kmem_free(orp, sizeof (*orp));
14343 fctl_copy_portmap(list + listindex++, pd);
14344 } else {
14345 prev = orp;
14346 }
14347 } else {
14348 prev = orp;
14349 }
14350 mutex_enter(&port->fp_mutex);
14351 }
14352 mutex_exit(&port->fp_mutex);
14353
14354 /*
14355 * One more pass through the list to delist old devices from
14356 * the d_id and pwwn tables and possibly add to the orphan list.
14357 */
14358
14359 for (index = 0; index < listindex; index++) {
14360 pd = list[index].map_pd;
14361 ASSERT(pd != NULL);
14362
14363 /*
14364 * Update PLOGI results; For NS validation
14365 * of orphan list, it is redundant
14366 *
14367 * Take care to preserve PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY if
14368 * appropriate as fctl_copy_portmap() will clear map_flags.
14369 */
14370 if (list[index].map_flags &
14371 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY) {
14372 fctl_copy_portmap(list + index, pd);
14373 list[index].map_flags |=
14374 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY;
14375 } else {
14376 fctl_copy_portmap(list + index, pd);
14377 }
14378
14379 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with Area DOMAIN "
14380 "results; pd=%p, d_id=%x pwwn=%x %x %x %x %x %x %x %x",
14381 pd, pd->pd_port_id.port_id,
14382 pd->pd_port_name.raw_wwn[0],
14383 pd->pd_port_name.raw_wwn[1],
14384 pd->pd_port_name.raw_wwn[2],
14385 pd->pd_port_name.raw_wwn[3],
14386 pd->pd_port_name.raw_wwn[4],
14387 pd->pd_port_name.raw_wwn[5],
14388 pd->pd_port_name.raw_wwn[6],
14389 pd->pd_port_name.raw_wwn[7]);
14390
14391 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with Area DOMAIN "
14392 "results continued, pd=%p type=%x, flags=%x, state=%x",
14393 pd, pd->pd_type, pd->pd_flags, pd->pd_state);
14394
14395 mutex_enter(&pd->pd_mutex);
14396 if (pd->pd_type == PORT_DEVICE_OLD) {
14397 int initiator;
14398
14399 pd->pd_flags = PD_IDLE;
14400 initiator = (pd->pd_recepient ==
14401 PD_PLOGI_INITIATOR) ? 1 : 0;
14402
14403 mutex_exit(&pd->pd_mutex);
14404
14405 mutex_enter(&port->fp_mutex);
14406 mutex_enter(&pd->pd_mutex);
14407
14408 pd->pd_state = PORT_DEVICE_INVALID;
14409 fctl_delist_did_table(port, pd);
14410 fctl_delist_pwwn_table(port, pd);
14411
14412 mutex_exit(&pd->pd_mutex);
14413 mutex_exit(&port->fp_mutex);
14414
14415 if (initiator) {
14416 (void) fctl_add_orphan(port, pd, sleep);
14417 }
14418 list[index].map_pd = pd;
14419 } else {
14420 ASSERT(pd->pd_flags == PD_IDLE);
14421 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) {
14422 /*
14423 * Reset LOGO tolerance to zero
14424 */
14425 fctl_tc_reset(&pd->pd_logo_tc);
14426 }
14427 mutex_exit(&pd->pd_mutex);
14428 }
14429 }
14430
14431 if (ns_cmd) {
14432 fctl_free_ns_cmd(ns_cmd);
14433 }
14434 if (listindex) {
14435 (void) fp_ulp_devc_cb(port, list, listindex, count,
14436 sleep, 0);
14437 } else {
14438 kmem_free(list, sizeof (*list) * count);
14439
14440 mutex_enter(&port->fp_mutex);
14441 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) {
14442 --port->fp_rscn_count;
14443 }
14444 mutex_exit(&port->fp_mutex);
14445 }
14446 }
14447
14448
14449 /*
14450 * Work hard to make sense out of an RSCN page.
14451 */
14452 static void
fp_validate_rscn_page(fc_local_port_t * port,fc_affected_id_t * page,job_request_t * job,fctl_ns_req_t * ns_cmd,fc_portmap_t * listptr,int * listindex,int sleep)14453 fp_validate_rscn_page(fc_local_port_t *port, fc_affected_id_t *page,
14454 job_request_t *job, fctl_ns_req_t *ns_cmd, fc_portmap_t *listptr,
14455 int *listindex, int sleep)
14456 {
14457 int rval;
14458 char ww_name[17];
14459 la_wwn_t *pwwn;
14460 fc_remote_port_t *pwwn_pd;
14461 fc_remote_port_t *did_pd;
14462
14463 did_pd = fctl_get_remote_port_by_did(port, page->aff_d_id);
14464
14465 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; "
14466 "port=%p, d_id=%x, pd=%p, rscn_count:0x%x", port, page->aff_d_id,
14467 did_pd, (uint32_t)(uintptr_t)job->job_cb_arg);
14468
14469 if (did_pd != NULL) {
14470 mutex_enter(&did_pd->pd_mutex);
14471 if (did_pd->pd_flags != PD_IDLE) {
14472 mutex_exit(&did_pd->pd_mutex);
14473 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page: "
14474 "PD is BUSY; port=%p, d_id=%x, pd=%p",
14475 port, page->aff_d_id, did_pd);
14476 return;
14477 }
14478 did_pd->pd_flags = PD_ELS_IN_PROGRESS;
14479 mutex_exit(&did_pd->pd_mutex);
14480 }
14481
14482 job->job_counter = 1;
14483
14484 pwwn = &((ns_resp_gpn_id_t *)ns_cmd->ns_data_buf)->pwwn;
14485
14486 ((ns_req_gpn_id_t *)ns_cmd->ns_cmd_buf)->pid.port_id = page->aff_d_id;
14487 ((ns_req_gpn_id_t *)ns_cmd->ns_cmd_buf)->pid.priv_lilp_posit = 0;
14488
14489 bzero(ns_cmd->ns_data_buf, sizeof (la_wwn_t));
14490 rval = fp_ns_query(port, ns_cmd, job, 1, sleep);
14491
14492 FP_TRACE(FP_NHEAD1(1, 0), "NS Query Response for D_ID page; rev=%x,"
14493 " in_id=%x, cmdrsp=%x, reason=%x, expln=%x",
14494 ns_cmd->ns_resp_hdr.ct_rev, ns_cmd->ns_resp_hdr.ct_inid,
14495 ns_cmd->ns_resp_hdr.ct_cmdrsp, ns_cmd->ns_resp_hdr.ct_reason,
14496 ns_cmd->ns_resp_hdr.ct_expln);
14497
14498 job->job_counter = 1;
14499
14500 if (rval != FC_SUCCESS || fctl_is_wwn_zero(pwwn) == FC_SUCCESS) {
14501 /*
14502 * What this means is that the D_ID
14503 * disappeared from the Fabric.
14504 */
14505 if (did_pd == NULL) {
14506 FP_TRACE(FP_NHEAD1(1, 0), "RSCN with D_ID page;"
14507 " NULL PD disappeared, rval=%x", rval);
14508 return;
14509 }
14510
14511 fc_wwn_to_str(&did_pd->pd_port_name, ww_name);
14512
14513 (listptr + *listindex)->map_rscn_info.ulp_rscn_count =
14514 (uint32_t)(uintptr_t)job->job_cb_arg;
14515
14516 fp_fillout_old_map(listptr + (*listindex)++, did_pd, 0);
14517
14518 FP_TRACE(FP_NHEAD1(3, 0), "RSCN: PD disappeared; "
14519 "d_id=%x, PWWN=%s", page->aff_d_id, ww_name);
14520
14521 FP_TRACE(FP_NHEAD2(9, 0),
14522 "GPN_ID for D_ID=%x failed", page->aff_d_id);
14523
14524 FP_TRACE(FP_NHEAD2(9, 0),
14525 "N_x Port with D_ID=%x, PWWN=%s disappeared from"
14526 " fabric", page->aff_d_id, ww_name);
14527
14528 mutex_enter(&did_pd->pd_mutex);
14529 did_pd->pd_flags = PD_IDLE;
14530 mutex_exit(&did_pd->pd_mutex);
14531
14532 FP_TRACE(FP_NHEAD1(3, 0), "RSCN with D_ID (%x) page; "
14533 "PD disappeared, pd=%p", page->aff_d_id, did_pd);
14534
14535 return;
14536 }
14537
14538 pwwn_pd = fctl_get_remote_port_by_pwwn(port, pwwn);
14539
14540 if (did_pd != NULL && pwwn_pd != NULL && did_pd == pwwn_pd) {
14541 /*
14542 * There is no change. Do PLOGI again and add it to
14543 * ULP portmap baggage and return. Note: When RSCNs
14544 * arrive with per page states, the need for PLOGI
14545 * can be determined correctly.
14546 */
14547 mutex_enter(&pwwn_pd->pd_mutex);
14548 pwwn_pd->pd_type = PORT_DEVICE_NOCHANGE;
14549 mutex_exit(&pwwn_pd->pd_mutex);
14550
14551 (listptr + *listindex)->map_rscn_info.ulp_rscn_count =
14552 (uint32_t)(uintptr_t)job->job_cb_arg;
14553
14554 fctl_copy_portmap(listptr + (*listindex)++, pwwn_pd);
14555
14556 mutex_enter(&pwwn_pd->pd_mutex);
14557 if ((pwwn_pd->pd_state == PORT_DEVICE_LOGGED_IN) ||
14558 (pwwn_pd->pd_aux_flags & PD_LOGGED_OUT)) {
14559 fc_wwn_to_str(&pwwn_pd->pd_port_name, ww_name);
14560 mutex_exit(&pwwn_pd->pd_mutex);
14561
14562 rval = fp_port_login(port, page->aff_d_id, job,
14563 FP_CMD_PLOGI_RETAIN, sleep, pwwn_pd, NULL);
14564 if (rval == FC_SUCCESS) {
14565 fp_jobwait(job);
14566 rval = job->job_result;
14567
14568 /*
14569 * Reset LOGO tolerance to zero
14570 * Also we are the PLOGI initiator now.
14571 */
14572 mutex_enter(&pwwn_pd->pd_mutex);
14573 fctl_tc_reset(&pwwn_pd->pd_logo_tc);
14574 pwwn_pd->pd_recepient = PD_PLOGI_INITIATOR;
14575 mutex_exit(&pwwn_pd->pd_mutex);
14576 }
14577
14578 if (rval == FC_SUCCESS) {
14579 struct fc_portmap *map =
14580 listptr + *listindex - 1;
14581
14582 FP_TRACE(FP_NHEAD1(1, 0),
14583 "PLOGI succeeded: no skip(2)"
14584 " for D_ID %x", page->aff_d_id);
14585 map->map_flags |=
14586 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY;
14587 } else {
14588 FP_TRACE(FP_NHEAD2(9, rval),
14589 "PLOGI to D_ID=%x failed", page->aff_d_id);
14590
14591 FP_TRACE(FP_NHEAD2(9, 0),
14592 "N_x Port with D_ID=%x, PWWN=%s"
14593 " disappeared from fabric",
14594 page->aff_d_id, ww_name);
14595
14596 fp_fillout_old_map(listptr +
14597 *listindex - 1, pwwn_pd, 0);
14598 }
14599 } else {
14600 mutex_exit(&pwwn_pd->pd_mutex);
14601 }
14602
14603 mutex_enter(&did_pd->pd_mutex);
14604 did_pd->pd_flags = PD_IDLE;
14605 mutex_exit(&did_pd->pd_mutex);
14606
14607 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID (0x%x) page; "
14608 "Case ONE, rval=%x, result=%x pd=%p", page->aff_d_id, rval,
14609 job->job_result, pwwn_pd);
14610
14611 return;
14612 }
14613
14614 if (did_pd == NULL && pwwn_pd == NULL) {
14615
14616 fc_orphan_t *orp = NULL;
14617 fc_orphan_t *norp = NULL;
14618 fc_orphan_t *prev = NULL;
14619
14620 /*
14621 * Hunt down the orphan list before giving up.
14622 */
14623
14624 mutex_enter(&port->fp_mutex);
14625 if (port->fp_orphan_count) {
14626
14627 for (orp = port->fp_orphan_list; orp; orp = norp) {
14628 norp = orp->orp_next;
14629
14630 if (fctl_wwn_cmp(&orp->orp_pwwn, pwwn) != 0) {
14631 prev = orp;
14632 continue;
14633 }
14634
14635 if (prev) {
14636 prev->orp_next = orp->orp_next;
14637 } else {
14638 ASSERT(orp ==
14639 port->fp_orphan_list);
14640 port->fp_orphan_list =
14641 orp->orp_next;
14642 }
14643 port->fp_orphan_count--;
14644 break;
14645 }
14646 }
14647
14648 mutex_exit(&port->fp_mutex);
14649 pwwn_pd = fp_create_remote_port_by_ns(port,
14650 page->aff_d_id, sleep);
14651
14652 if (pwwn_pd != NULL) {
14653
14654 if (orp) {
14655 fc_wwn_to_str(&orp->orp_pwwn,
14656 ww_name);
14657
14658 FP_TRACE(FP_NHEAD2(9, 0),
14659 "N_x Port with D_ID=%x,"
14660 " PWWN=%s reappeared in fabric",
14661 page->aff_d_id, ww_name);
14662
14663 kmem_free(orp, sizeof (*orp));
14664 }
14665
14666 (listptr + *listindex)->
14667 map_rscn_info.ulp_rscn_count =
14668 (uint32_t)(uintptr_t)job->job_cb_arg;
14669
14670 fctl_copy_portmap(listptr +
14671 (*listindex)++, pwwn_pd);
14672 }
14673
14674 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID (0x%x) page; "
14675 "Case TWO", page->aff_d_id);
14676
14677 return;
14678 }
14679
14680 if (pwwn_pd != NULL && did_pd == NULL) {
14681 uint32_t old_d_id;
14682 uint32_t d_id = page->aff_d_id;
14683
14684 /*
14685 * What this means is there is a new D_ID for this
14686 * Port WWN. Take out the port device off D_ID
14687 * list and put it back with a new D_ID. Perform
14688 * PLOGI if already logged in.
14689 */
14690 mutex_enter(&port->fp_mutex);
14691 mutex_enter(&pwwn_pd->pd_mutex);
14692
14693 old_d_id = pwwn_pd->pd_port_id.port_id;
14694
14695 fctl_delist_did_table(port, pwwn_pd);
14696
14697 (listptr + *listindex)->map_rscn_info.ulp_rscn_count =
14698 (uint32_t)(uintptr_t)job->job_cb_arg;
14699
14700 fp_fillout_changed_map(listptr + (*listindex)++, pwwn_pd,
14701 &d_id, NULL);
14702 fctl_enlist_did_table(port, pwwn_pd);
14703
14704 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page;"
14705 " Case THREE, pd=%p,"
14706 " state=%x", pwwn_pd, pwwn_pd->pd_state);
14707
14708 if ((pwwn_pd->pd_state == PORT_DEVICE_LOGGED_IN) ||
14709 (pwwn_pd->pd_aux_flags & PD_LOGGED_OUT)) {
14710 fc_wwn_to_str(&pwwn_pd->pd_port_name, ww_name);
14711
14712 mutex_exit(&pwwn_pd->pd_mutex);
14713 mutex_exit(&port->fp_mutex);
14714
14715 FP_TRACE(FP_NHEAD2(9, 0),
14716 "N_x Port with D_ID=%x, PWWN=%s has a new"
14717 " D_ID=%x now", old_d_id, ww_name, d_id);
14718
14719 rval = fp_port_login(port, page->aff_d_id, job,
14720 FP_CMD_PLOGI_RETAIN, sleep, pwwn_pd, NULL);
14721 if (rval == FC_SUCCESS) {
14722 fp_jobwait(job);
14723 rval = job->job_result;
14724 }
14725
14726 if (rval != FC_SUCCESS) {
14727 fp_fillout_old_map(listptr +
14728 *listindex - 1, pwwn_pd, 0);
14729 }
14730 } else {
14731 mutex_exit(&pwwn_pd->pd_mutex);
14732 mutex_exit(&port->fp_mutex);
14733 }
14734
14735 return;
14736 }
14737
14738 if (pwwn_pd == NULL && did_pd != NULL) {
14739 fc_portmap_t *ptr;
14740 uint32_t len = 1;
14741 char old_ww_name[17];
14742
14743 mutex_enter(&did_pd->pd_mutex);
14744 fc_wwn_to_str(&did_pd->pd_port_name, old_ww_name);
14745 mutex_exit(&did_pd->pd_mutex);
14746
14747 fc_wwn_to_str(pwwn, ww_name);
14748
14749 (listptr + *listindex)->map_rscn_info.ulp_rscn_count =
14750 (uint32_t)(uintptr_t)job->job_cb_arg;
14751
14752 /*
14753 * What this means is that there is a new Port WWN for
14754 * this D_ID; Mark the Port device as old and provide
14755 * the new PWWN and D_ID combination as new.
14756 */
14757 fp_fillout_old_map(listptr + (*listindex)++, did_pd, 0);
14758
14759 FP_TRACE(FP_NHEAD2(9, 0),
14760 "N_x Port with D_ID=%x, PWWN=%s has a new PWWN=%s now",
14761 page->aff_d_id, old_ww_name, ww_name);
14762
14763 (listptr + *listindex)->map_rscn_info.ulp_rscn_count =
14764 (uint32_t)(uintptr_t)job->job_cb_arg;
14765
14766 ptr = listptr + (*listindex)++;
14767
14768 job->job_counter = 1;
14769
14770 if (fp_ns_getmap(port, job, &ptr, &len,
14771 page->aff_d_id - 1) != FC_SUCCESS) {
14772 (*listindex)--;
14773 }
14774
14775 mutex_enter(&did_pd->pd_mutex);
14776 did_pd->pd_flags = PD_IDLE;
14777 mutex_exit(&did_pd->pd_mutex);
14778
14779 return;
14780 }
14781
14782 /*
14783 * A weird case of Port WWN and D_ID existence but not matching up
14784 * between them. Trust your instincts - Take the port device handle
14785 * off Port WWN list, fix it with new Port WWN and put it back, In
14786 * the mean time mark the port device corresponding to the old port
14787 * WWN as OLD.
14788 */
14789 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; Case WEIRD, pwwn_pd=%p,"
14790 " did_pd=%p", pwwn_pd, did_pd);
14791
14792 mutex_enter(&port->fp_mutex);
14793 mutex_enter(&pwwn_pd->pd_mutex);
14794
14795 pwwn_pd->pd_type = PORT_DEVICE_OLD;
14796 pwwn_pd->pd_state = PORT_DEVICE_INVALID;
14797 fctl_delist_did_table(port, pwwn_pd);
14798 fctl_delist_pwwn_table(port, pwwn_pd);
14799
14800 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; case WEIRD continued,"
14801 " pwwn-d_id=%x pwwn-wwn=%x %x %x %x %x %x %x %x",
14802 pwwn_pd->pd_port_id.port_id,
14803
14804 pwwn_pd->pd_port_name.raw_wwn[0],
14805 pwwn_pd->pd_port_name.raw_wwn[1],
14806 pwwn_pd->pd_port_name.raw_wwn[2],
14807 pwwn_pd->pd_port_name.raw_wwn[3],
14808 pwwn_pd->pd_port_name.raw_wwn[4],
14809 pwwn_pd->pd_port_name.raw_wwn[5],
14810 pwwn_pd->pd_port_name.raw_wwn[6],
14811 pwwn_pd->pd_port_name.raw_wwn[7]);
14812
14813 mutex_exit(&pwwn_pd->pd_mutex);
14814 mutex_exit(&port->fp_mutex);
14815
14816 (listptr + *listindex)->map_rscn_info.ulp_rscn_count =
14817 (uint32_t)(uintptr_t)job->job_cb_arg;
14818
14819 fctl_copy_portmap(listptr + (*listindex)++, pwwn_pd);
14820
14821 mutex_enter(&port->fp_mutex);
14822 mutex_enter(&did_pd->pd_mutex);
14823
14824 fctl_delist_pwwn_table(port, did_pd);
14825
14826 (listptr + *listindex)->map_rscn_info.ulp_rscn_count =
14827 (uint32_t)(uintptr_t)job->job_cb_arg;
14828
14829 fp_fillout_changed_map(listptr + (*listindex)++, did_pd, NULL, pwwn);
14830 fctl_enlist_pwwn_table(port, did_pd);
14831
14832 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; case WEIRD continued,"
14833 " d_id=%x, state=%x, did-wwn=%x %x %x %x %x %x %x %x",
14834 did_pd->pd_port_id.port_id, did_pd->pd_state,
14835
14836 did_pd->pd_port_name.raw_wwn[0],
14837 did_pd->pd_port_name.raw_wwn[1],
14838 did_pd->pd_port_name.raw_wwn[2],
14839 did_pd->pd_port_name.raw_wwn[3],
14840 did_pd->pd_port_name.raw_wwn[4],
14841 did_pd->pd_port_name.raw_wwn[5],
14842 did_pd->pd_port_name.raw_wwn[6],
14843 did_pd->pd_port_name.raw_wwn[7]);
14844
14845 if ((did_pd->pd_state == PORT_DEVICE_LOGGED_IN) ||
14846 (did_pd->pd_aux_flags & PD_LOGGED_OUT)) {
14847 mutex_exit(&did_pd->pd_mutex);
14848 mutex_exit(&port->fp_mutex);
14849
14850 rval = fp_port_login(port, page->aff_d_id, job,
14851 FP_CMD_PLOGI_RETAIN, sleep, did_pd, NULL);
14852 if (rval == FC_SUCCESS) {
14853 fp_jobwait(job);
14854 if (job->job_result != FC_SUCCESS) {
14855 fp_fillout_old_map(listptr +
14856 *listindex - 1, did_pd, 0);
14857 }
14858 } else {
14859 fp_fillout_old_map(listptr + *listindex - 1, did_pd, 0);
14860 }
14861 } else {
14862 mutex_exit(&did_pd->pd_mutex);
14863 mutex_exit(&port->fp_mutex);
14864 }
14865
14866 mutex_enter(&did_pd->pd_mutex);
14867 did_pd->pd_flags = PD_IDLE;
14868 mutex_exit(&did_pd->pd_mutex);
14869 }
14870
14871
14872 /*
14873 * Check with NS for the presence of this port WWN
14874 */
14875 static int
fp_ns_validate_device(fc_local_port_t * port,fc_remote_port_t * pd,job_request_t * job,int polled,int sleep)14876 fp_ns_validate_device(fc_local_port_t *port, fc_remote_port_t *pd,
14877 job_request_t *job, int polled, int sleep)
14878 {
14879 la_wwn_t pwwn;
14880 uint32_t flags;
14881 fctl_ns_req_t *ns_cmd;
14882
14883 flags = FCTL_NS_VALIDATE_PD | ((polled) ? 0: FCTL_NS_ASYNC_REQUEST);
14884 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t),
14885 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t),
14886 flags, sleep);
14887 if (ns_cmd == NULL) {
14888 return (FC_NOMEM);
14889 }
14890
14891 mutex_enter(&pd->pd_mutex);
14892 pwwn = pd->pd_port_name;
14893 mutex_exit(&pd->pd_mutex);
14894
14895 ns_cmd->ns_cmd_code = NS_GID_PN;
14896 ns_cmd->ns_pd = pd;
14897 ((ns_req_gid_pn_t *)ns_cmd->ns_cmd_buf)->pwwn = pwwn;
14898 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)->pid.port_id = 0;
14899 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)->pid.priv_lilp_posit = 0;
14900
14901 return (fp_ns_query(port, ns_cmd, job, polled, sleep));
14902 }
14903
14904
14905 /*
14906 * Sanity check the LILP map returned by FCA
14907 */
14908 static int
fp_validate_lilp_map(fc_lilpmap_t * lilp_map)14909 fp_validate_lilp_map(fc_lilpmap_t *lilp_map)
14910 {
14911 int count;
14912
14913 if (lilp_map->lilp_length == 0) {
14914 return (FC_FAILURE);
14915 }
14916
14917 for (count = 0; count < lilp_map->lilp_length; count++) {
14918 if (fp_is_valid_alpa(lilp_map->lilp_alpalist[count]) !=
14919 FC_SUCCESS) {
14920 return (FC_FAILURE);
14921 }
14922 }
14923
14924 return (FC_SUCCESS);
14925 }
14926
14927
14928 /*
14929 * Sanity check if the AL_PA is a valid address
14930 */
14931 static int
fp_is_valid_alpa(uchar_t al_pa)14932 fp_is_valid_alpa(uchar_t al_pa)
14933 {
14934 int count;
14935
14936 for (count = 0; count < sizeof (fp_valid_alpas); count++) {
14937 if (al_pa == fp_valid_alpas[count] || al_pa == 0) {
14938 return (FC_SUCCESS);
14939 }
14940 }
14941
14942 return (FC_FAILURE);
14943 }
14944
14945
14946 /*
14947 * Post unsolicited callbacks to ULPs
14948 */
14949 static void
fp_ulp_unsol_cb(void * arg)14950 fp_ulp_unsol_cb(void *arg)
14951 {
14952 fp_unsol_spec_t *ub_spec = (fp_unsol_spec_t *)arg;
14953
14954 fctl_ulp_unsol_cb(ub_spec->port, ub_spec->buf,
14955 ub_spec->buf->ub_frame.type);
14956 kmem_free(ub_spec, sizeof (*ub_spec));
14957 }
14958
14959
14960 /*
14961 * Perform message reporting in a consistent manner. Unless there is
14962 * a strong reason NOT to use this function (which is very very rare)
14963 * all message reporting should go through this.
14964 */
14965 static void
fp_printf(fc_local_port_t * port,int level,fp_mesg_dest_t dest,int fc_errno,fc_packet_t * pkt,const char * fmt,...)14966 fp_printf(fc_local_port_t *port, int level, fp_mesg_dest_t dest, int fc_errno,
14967 fc_packet_t *pkt, const char *fmt, ...)
14968 {
14969 caddr_t buf;
14970 va_list ap;
14971
14972 switch (level) {
14973 case CE_NOTE:
14974 if ((port->fp_verbose & FP_WARNING_MESSAGES) == 0) {
14975 return;
14976 }
14977 break;
14978
14979 case CE_WARN:
14980 if ((port->fp_verbose & FP_FATAL_MESSAGES) == 0) {
14981 return;
14982 }
14983 break;
14984 }
14985
14986 buf = kmem_zalloc(256, KM_NOSLEEP);
14987 if (buf == NULL) {
14988 return;
14989 }
14990
14991 (void) sprintf(buf, "fp(%d): ", port->fp_instance);
14992
14993 va_start(ap, fmt);
14994 (void) vsprintf(buf + strlen(buf), fmt, ap);
14995 va_end(ap);
14996
14997 if (fc_errno) {
14998 char *errmsg;
14999
15000 (void) fc_ulp_error(fc_errno, &errmsg);
15001 (void) sprintf(buf + strlen(buf), " FC Error=%s", errmsg);
15002 } else {
15003 if (pkt) {
15004 caddr_t state, reason, action, expln;
15005
15006 (void) fc_ulp_pkt_error(pkt, &state, &reason,
15007 &action, &expln);
15008
15009 (void) sprintf(buf + strlen(buf),
15010 " state=%s, reason=%s", state, reason);
15011
15012 if (pkt->pkt_resp_resid) {
15013 (void) sprintf(buf + strlen(buf),
15014 " resp resid=%x\n", pkt->pkt_resp_resid);
15015 }
15016 }
15017 }
15018
15019 switch (dest) {
15020 case FP_CONSOLE_ONLY:
15021 cmn_err(level, "^%s", buf);
15022 break;
15023
15024 case FP_LOG_ONLY:
15025 cmn_err(level, "!%s", buf);
15026 break;
15027
15028 default:
15029 cmn_err(level, "%s", buf);
15030 break;
15031 }
15032
15033 kmem_free(buf, 256);
15034 }
15035
15036 static int
fp_fcio_login(fc_local_port_t * port,fcio_t * fcio,job_request_t * job)15037 fp_fcio_login(fc_local_port_t *port, fcio_t *fcio, job_request_t *job)
15038 {
15039 int ret;
15040 uint32_t d_id;
15041 la_wwn_t pwwn;
15042 fc_remote_port_t *pd = NULL;
15043 fc_remote_port_t *held_pd = NULL;
15044 fctl_ns_req_t *ns_cmd;
15045 fc_portmap_t *changelist;
15046
15047 bcopy(fcio->fcio_ibuf, &pwwn, sizeof (pwwn));
15048
15049 mutex_enter(&port->fp_mutex);
15050 if (FC_IS_TOP_SWITCH(port->fp_topology)) {
15051 mutex_exit(&port->fp_mutex);
15052 job->job_counter = 1;
15053
15054 job->job_result = FC_SUCCESS;
15055
15056 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t),
15057 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t),
15058 FCTL_NS_BUF_IS_USERLAND, KM_SLEEP);
15059
15060 ASSERT(ns_cmd != NULL);
15061
15062 ns_cmd->ns_cmd_code = NS_GID_PN;
15063 ((ns_req_gid_pn_t *)(ns_cmd->ns_cmd_buf))->pwwn = pwwn;
15064
15065 ret = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP);
15066
15067 if (ret != FC_SUCCESS || job->job_result != FC_SUCCESS) {
15068 if (ret != FC_SUCCESS) {
15069 fcio->fcio_errno = ret;
15070 } else {
15071 fcio->fcio_errno = job->job_result;
15072 }
15073 fctl_free_ns_cmd(ns_cmd);
15074 return (EIO);
15075 }
15076 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf));
15077 fctl_free_ns_cmd(ns_cmd);
15078 } else {
15079 mutex_exit(&port->fp_mutex);
15080
15081 held_pd = fctl_hold_remote_port_by_pwwn(port, &pwwn);
15082 if (held_pd == NULL) {
15083 fcio->fcio_errno = FC_BADWWN;
15084 return (EIO);
15085 }
15086 pd = held_pd;
15087
15088 mutex_enter(&pd->pd_mutex);
15089 d_id = pd->pd_port_id.port_id;
15090 mutex_exit(&pd->pd_mutex);
15091 }
15092
15093 job->job_counter = 1;
15094
15095 pd = fctl_get_remote_port_by_did(port, d_id);
15096
15097 if (pd) {
15098 mutex_enter(&pd->pd_mutex);
15099 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) {
15100 pd->pd_login_count++;
15101 mutex_exit(&pd->pd_mutex);
15102
15103 fcio->fcio_errno = FC_SUCCESS;
15104 if (held_pd) {
15105 fctl_release_remote_port(held_pd);
15106 }
15107
15108 return (0);
15109 }
15110 mutex_exit(&pd->pd_mutex);
15111 } else {
15112 mutex_enter(&port->fp_mutex);
15113 if (FC_IS_TOP_SWITCH(port->fp_topology)) {
15114 mutex_exit(&port->fp_mutex);
15115 pd = fp_create_remote_port_by_ns(port, d_id, KM_SLEEP);
15116 if (pd == NULL) {
15117 fcio->fcio_errno = FC_FAILURE;
15118 if (held_pd) {
15119 fctl_release_remote_port(held_pd);
15120 }
15121 return (EIO);
15122 }
15123 } else {
15124 mutex_exit(&port->fp_mutex);
15125 }
15126 }
15127
15128 job->job_flags &= ~JOB_TYPE_FP_ASYNC;
15129 job->job_counter = 1;
15130
15131 ret = fp_port_login(port, d_id, job, FP_CMD_PLOGI_RETAIN,
15132 KM_SLEEP, pd, NULL);
15133
15134 if (ret != FC_SUCCESS) {
15135 fcio->fcio_errno = ret;
15136 if (held_pd) {
15137 fctl_release_remote_port(held_pd);
15138 }
15139 return (EIO);
15140 }
15141 fp_jobwait(job);
15142
15143 fcio->fcio_errno = job->job_result;
15144
15145 if (held_pd) {
15146 fctl_release_remote_port(held_pd);
15147 }
15148
15149 if (job->job_result != FC_SUCCESS) {
15150 return (EIO);
15151 }
15152
15153 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn);
15154 if (pd == NULL) {
15155 fcio->fcio_errno = FC_BADDEV;
15156 return (ENODEV);
15157 }
15158
15159 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP);
15160
15161 fctl_copy_portmap(changelist, pd);
15162 changelist->map_type = PORT_DEVICE_USER_LOGIN;
15163
15164 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1);
15165
15166 mutex_enter(&pd->pd_mutex);
15167 pd->pd_type = PORT_DEVICE_NOCHANGE;
15168 mutex_exit(&pd->pd_mutex);
15169
15170 fctl_release_remote_port(pd);
15171
15172 return (0);
15173 }
15174
15175
15176 static int
fp_fcio_logout(fc_local_port_t * port,fcio_t * fcio,job_request_t * job)15177 fp_fcio_logout(fc_local_port_t *port, fcio_t *fcio, job_request_t *job)
15178 {
15179 la_wwn_t pwwn;
15180 fp_cmd_t *cmd;
15181 fc_portmap_t *changelist;
15182 fc_remote_port_t *pd;
15183
15184 bcopy(fcio->fcio_ibuf, &pwwn, sizeof (pwwn));
15185
15186 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn);
15187 if (pd == NULL) {
15188 fcio->fcio_errno = FC_BADWWN;
15189 return (ENXIO);
15190 }
15191
15192 mutex_enter(&pd->pd_mutex);
15193 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) {
15194 fcio->fcio_errno = FC_LOGINREQ;
15195 mutex_exit(&pd->pd_mutex);
15196
15197 fctl_release_remote_port(pd);
15198
15199 return (EINVAL);
15200 }
15201
15202 ASSERT(pd->pd_login_count >= 1);
15203
15204 if (pd->pd_flags == PD_ELS_IN_PROGRESS) {
15205 fcio->fcio_errno = FC_FAILURE;
15206 mutex_exit(&pd->pd_mutex);
15207
15208 fctl_release_remote_port(pd);
15209
15210 return (EBUSY);
15211 }
15212
15213 if (pd->pd_login_count > 1) {
15214 pd->pd_login_count--;
15215 fcio->fcio_errno = FC_SUCCESS;
15216 mutex_exit(&pd->pd_mutex);
15217
15218 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP);
15219
15220 fctl_copy_portmap(changelist, pd);
15221 changelist->map_type = PORT_DEVICE_USER_LOGOUT;
15222
15223 fctl_release_remote_port(pd);
15224
15225 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1);
15226
15227 return (0);
15228 }
15229
15230 pd->pd_flags = PD_ELS_IN_PROGRESS;
15231 mutex_exit(&pd->pd_mutex);
15232
15233 job->job_counter = 1;
15234
15235 cmd = fp_alloc_pkt(port, sizeof (la_els_logo_t),
15236 FP_PORT_IDENTIFIER_LEN, KM_SLEEP, pd);
15237 if (cmd == NULL) {
15238 fcio->fcio_errno = FC_NOMEM;
15239 fctl_release_remote_port(pd);
15240
15241 mutex_enter(&pd->pd_mutex);
15242 pd->pd_flags = PD_IDLE;
15243 mutex_exit(&pd->pd_mutex);
15244
15245 return (ENOMEM);
15246 }
15247
15248 mutex_enter(&port->fp_mutex);
15249 mutex_enter(&pd->pd_mutex);
15250
15251 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class;
15252 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE;
15253 cmd->cmd_flags = FP_CMD_PLOGI_DONT_CARE;
15254 cmd->cmd_retry_count = 1;
15255 cmd->cmd_ulp_pkt = NULL;
15256
15257 fp_logo_init(pd, cmd, job);
15258
15259 mutex_exit(&pd->pd_mutex);
15260 mutex_exit(&port->fp_mutex);
15261
15262 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) {
15263 mutex_enter(&pd->pd_mutex);
15264 pd->pd_flags = PD_IDLE;
15265 mutex_exit(&pd->pd_mutex);
15266
15267 fp_free_pkt(cmd);
15268 fctl_release_remote_port(pd);
15269
15270 return (EIO);
15271 }
15272
15273 fp_jobwait(job);
15274
15275 fcio->fcio_errno = job->job_result;
15276 if (job->job_result != FC_SUCCESS) {
15277 mutex_enter(&pd->pd_mutex);
15278 pd->pd_flags = PD_IDLE;
15279 mutex_exit(&pd->pd_mutex);
15280
15281 fctl_release_remote_port(pd);
15282
15283 return (EIO);
15284 }
15285
15286 ASSERT(pd != NULL);
15287
15288 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP);
15289
15290 fctl_copy_portmap(changelist, pd);
15291 changelist->map_type = PORT_DEVICE_USER_LOGOUT;
15292 changelist->map_state = PORT_DEVICE_INVALID;
15293
15294 mutex_enter(&port->fp_mutex);
15295 mutex_enter(&pd->pd_mutex);
15296
15297 fctl_delist_did_table(port, pd);
15298 fctl_delist_pwwn_table(port, pd);
15299 pd->pd_flags = PD_IDLE;
15300
15301 mutex_exit(&pd->pd_mutex);
15302 mutex_exit(&port->fp_mutex);
15303
15304 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1);
15305
15306 fctl_release_remote_port(pd);
15307
15308 return (0);
15309 }
15310
15311
15312
15313 /*
15314 * Send a syslog event for adapter port level events.
15315 */
15316 static void
fp_log_port_event(fc_local_port_t * port,char * subclass)15317 fp_log_port_event(fc_local_port_t *port, char *subclass)
15318 {
15319 nvlist_t *attr_list;
15320
15321 if (nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE,
15322 KM_SLEEP) != DDI_SUCCESS) {
15323 goto alloc_failed;
15324 }
15325
15326 if (nvlist_add_uint32(attr_list, "instance",
15327 port->fp_instance) != DDI_SUCCESS) {
15328 goto error;
15329 }
15330
15331 if (nvlist_add_byte_array(attr_list, "port-wwn",
15332 port->fp_service_params.nport_ww_name.raw_wwn,
15333 sizeof (la_wwn_t)) != DDI_SUCCESS) {
15334 goto error;
15335 }
15336
15337 (void) ddi_log_sysevent(port->fp_port_dip, DDI_VENDOR_SUNW, EC_SUNFC,
15338 subclass, attr_list, NULL, DDI_SLEEP);
15339
15340 nvlist_free(attr_list);
15341 return;
15342
15343 error:
15344 nvlist_free(attr_list);
15345 alloc_failed:
15346 FP_TRACE(FP_NHEAD1(9, 0), "Unable to send %s event", subclass);
15347 }
15348
15349
15350 static void
fp_log_target_event(fc_local_port_t * port,char * subclass,la_wwn_t tgt_pwwn,uint32_t port_id)15351 fp_log_target_event(fc_local_port_t *port, char *subclass, la_wwn_t tgt_pwwn,
15352 uint32_t port_id)
15353 {
15354 nvlist_t *attr_list;
15355
15356 if (nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE,
15357 KM_SLEEP) != DDI_SUCCESS) {
15358 goto alloc_failed;
15359 }
15360
15361 if (nvlist_add_uint32(attr_list, "instance",
15362 port->fp_instance) != DDI_SUCCESS) {
15363 goto error;
15364 }
15365
15366 if (nvlist_add_byte_array(attr_list, "port-wwn",
15367 port->fp_service_params.nport_ww_name.raw_wwn,
15368 sizeof (la_wwn_t)) != DDI_SUCCESS) {
15369 goto error;
15370 }
15371
15372 if (nvlist_add_byte_array(attr_list, "target-port-wwn",
15373 tgt_pwwn.raw_wwn, sizeof (la_wwn_t)) != DDI_SUCCESS) {
15374 goto error;
15375 }
15376
15377 if (nvlist_add_uint32(attr_list, "target-port-id",
15378 port_id) != DDI_SUCCESS) {
15379 goto error;
15380 }
15381
15382 (void) ddi_log_sysevent(port->fp_port_dip, DDI_VENDOR_SUNW, EC_SUNFC,
15383 subclass, attr_list, NULL, DDI_SLEEP);
15384
15385 nvlist_free(attr_list);
15386 return;
15387
15388 error:
15389 nvlist_free(attr_list);
15390 alloc_failed:
15391 FP_TRACE(FP_NHEAD1(9, 0), "Unable to send %s event", subclass);
15392 }
15393
15394 static uint32_t
fp_map_remote_port_state(uint32_t rm_state)15395 fp_map_remote_port_state(uint32_t rm_state)
15396 {
15397 switch (rm_state) {
15398 case PORT_DEVICE_LOGGED_IN:
15399 return (FC_HBA_PORTSTATE_ONLINE);
15400 case PORT_DEVICE_VALID:
15401 case PORT_DEVICE_INVALID:
15402 default:
15403 return (FC_HBA_PORTSTATE_UNKNOWN);
15404 }
15405 }
15406