1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2020 RackTop Systems, Inc.
24 *
25 * NOT a DDI compliant Sun Fibre Channel port driver(fp)
26 *
27 */
28
29 #include <sys/types.h>
30 #include <sys/varargs.h>
31 #include <sys/param.h>
32 #include <sys/errno.h>
33 #include <sys/uio.h>
34 #include <sys/buf.h>
35 #include <sys/modctl.h>
36 #include <sys/open.h>
37 #include <sys/file.h>
38 #include <sys/kmem.h>
39 #include <sys/poll.h>
40 #include <sys/conf.h>
41 #include <sys/thread.h>
42 #include <sys/var.h>
43 #include <sys/cmn_err.h>
44 #include <sys/stat.h>
45 #include <sys/ddi.h>
46 #include <sys/sunddi.h>
47 #include <sys/promif.h>
48 #include <sys/nvpair.h>
49 #include <sys/byteorder.h>
50 #include <sys/scsi/scsi.h>
51 #include <sys/fibre-channel/fc.h>
52 #include <sys/fibre-channel/impl/fc_ulpif.h>
53 #include <sys/fibre-channel/impl/fc_fcaif.h>
54 #include <sys/fibre-channel/impl/fctl_private.h>
55 #include <sys/fibre-channel/impl/fc_portif.h>
56 #include <sys/fibre-channel/impl/fp.h>
57
58 /* These are defined in fctl.c! */
59 extern int did_table_size;
60 extern int pwwn_table_size;
61
62 static struct cb_ops fp_cb_ops = {
63 fp_open, /* open */
64 fp_close, /* close */
65 nodev, /* strategy */
66 nodev, /* print */
67 nodev, /* dump */
68 nodev, /* read */
69 nodev, /* write */
70 fp_ioctl, /* ioctl */
71 nodev, /* devmap */
72 nodev, /* mmap */
73 nodev, /* segmap */
74 nochpoll, /* chpoll */
75 ddi_prop_op, /* cb_prop_op */
76 0, /* streamtab */
77 D_NEW | D_MP | D_HOTPLUG, /* cb_flag */
78 CB_REV, /* rev */
79 nodev, /* aread */
80 nodev /* awrite */
81 };
82
83 static struct dev_ops fp_ops = {
84 DEVO_REV, /* build revision */
85 0, /* reference count */
86 fp_getinfo, /* getinfo */
87 nulldev, /* identify - Obsoleted */
88 nulldev, /* probe */
89 fp_attach, /* attach */
90 fp_detach, /* detach */
91 nodev, /* reset */
92 &fp_cb_ops, /* cb_ops */
93 NULL, /* bus_ops */
94 fp_power, /* power */
95 ddi_quiesce_not_needed /* quiesce */
96 };
97
98 #define FP_VERSION "20091123-1.101"
99 #define FP_NAME_VERSION "SunFC Port v" FP_VERSION
100
101 char *fp_version = FP_NAME_VERSION;
102
103 static struct modldrv modldrv = {
104 &mod_driverops, /* Type of Module */
105 FP_NAME_VERSION, /* Name/Version of fp */
106 &fp_ops /* driver ops */
107 };
108
109 static struct modlinkage modlinkage = {
110 MODREV_1, /* Rev of the loadable modules system */
111 &modldrv, /* NULL terminated list of */
112 NULL /* Linkage structures */
113 };
114
115
116
117 static uint16_t ns_reg_cmds[] = {
118 NS_RPN_ID,
119 NS_RNN_ID,
120 NS_RCS_ID,
121 NS_RFT_ID,
122 NS_RPT_ID,
123 NS_RSPN_ID,
124 NS_RSNN_NN
125 };
126
127 struct fp_xlat {
128 uchar_t xlat_state;
129 int xlat_rval;
130 } fp_xlat [] = {
131 { FC_PKT_SUCCESS, FC_SUCCESS },
132 { FC_PKT_REMOTE_STOP, FC_FAILURE },
133 { FC_PKT_LOCAL_RJT, FC_FAILURE },
134 { FC_PKT_NPORT_RJT, FC_ELS_PREJECT },
135 { FC_PKT_FABRIC_RJT, FC_ELS_FREJECT },
136 { FC_PKT_LOCAL_BSY, FC_TRAN_BUSY },
137 { FC_PKT_TRAN_BSY, FC_TRAN_BUSY },
138 { FC_PKT_NPORT_BSY, FC_PBUSY },
139 { FC_PKT_FABRIC_BSY, FC_FBUSY },
140 { FC_PKT_LS_RJT, FC_FAILURE },
141 { FC_PKT_BA_RJT, FC_FAILURE },
142 { FC_PKT_TIMEOUT, FC_FAILURE },
143 { FC_PKT_TRAN_ERROR, FC_TRANSPORT_ERROR },
144 { FC_PKT_FAILURE, FC_FAILURE },
145 { FC_PKT_PORT_OFFLINE, FC_OFFLINE }
146 };
147
148 static uchar_t fp_valid_alpas[] = {
149 0x01, 0x02, 0x04, 0x08, 0x0F, 0x10, 0x17, 0x18, 0x1B,
150 0x1D, 0x1E, 0x1F, 0x23, 0x25, 0x26, 0x27, 0x29, 0x2A,
151 0x2B, 0x2C, 0x2D, 0x2E, 0x31, 0x32, 0x33, 0x34, 0x35,
152 0x36, 0x39, 0x3A, 0x3C, 0x43, 0x45, 0x46, 0x47, 0x49,
153 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x51, 0x52, 0x53, 0x54,
154 0x55, 0x56, 0x59, 0x5A, 0x5C, 0x63, 0x65, 0x66, 0x67,
155 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x71, 0x72, 0x73,
156 0x74, 0x75, 0x76, 0x79, 0x7A, 0x7C, 0x80, 0x81, 0x82,
157 0x84, 0x88, 0x8F, 0x90, 0x97, 0x98, 0x9B, 0x9D, 0x9E,
158 0x9F, 0xA3, 0xA5, 0xA6, 0xA7, 0xA9, 0xAA, 0xAB, 0xAC,
159 0xAD, 0xAE, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB9,
160 0xBA, 0xBC, 0xC3, 0xC5, 0xC6, 0xC7, 0xC9, 0xCA, 0xCB,
161 0xCC, 0xCD, 0xCE, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6,
162 0xD9, 0xDA, 0xDC, 0xE0, 0xE1, 0xE2, 0xE4, 0xE8, 0xEF
163 };
164
165 static struct fp_perms {
166 uint16_t fp_ioctl_cmd;
167 uchar_t fp_open_flag;
168 } fp_perm_list [] = {
169 { FCIO_GET_NUM_DEVS, FP_OPEN },
170 { FCIO_GET_DEV_LIST, FP_OPEN },
171 { FCIO_GET_SYM_PNAME, FP_OPEN },
172 { FCIO_GET_SYM_NNAME, FP_OPEN },
173 { FCIO_SET_SYM_PNAME, FP_EXCL },
174 { FCIO_SET_SYM_NNAME, FP_EXCL },
175 { FCIO_GET_LOGI_PARAMS, FP_OPEN },
176 { FCIO_DEV_LOGIN, FP_EXCL },
177 { FCIO_DEV_LOGOUT, FP_EXCL },
178 { FCIO_GET_STATE, FP_OPEN },
179 { FCIO_DEV_REMOVE, FP_EXCL },
180 { FCIO_GET_FCODE_REV, FP_OPEN },
181 { FCIO_GET_FW_REV, FP_OPEN },
182 { FCIO_GET_DUMP_SIZE, FP_OPEN },
183 { FCIO_FORCE_DUMP, FP_EXCL },
184 { FCIO_GET_DUMP, FP_OPEN },
185 { FCIO_GET_TOPOLOGY, FP_OPEN },
186 { FCIO_RESET_LINK, FP_EXCL },
187 { FCIO_RESET_HARD, FP_EXCL },
188 { FCIO_RESET_HARD_CORE, FP_EXCL },
189 { FCIO_DIAG, FP_OPEN },
190 { FCIO_NS, FP_EXCL },
191 { FCIO_DOWNLOAD_FW, FP_EXCL },
192 { FCIO_DOWNLOAD_FCODE, FP_EXCL },
193 { FCIO_LINK_STATUS, FP_OPEN },
194 { FCIO_GET_HOST_PARAMS, FP_OPEN },
195 { FCIO_GET_NODE_ID, FP_OPEN },
196 { FCIO_SET_NODE_ID, FP_EXCL },
197 { FCIO_SEND_NODE_ID, FP_OPEN },
198 { FCIO_GET_ADAPTER_ATTRIBUTES, FP_OPEN },
199 { FCIO_GET_OTHER_ADAPTER_PORTS, FP_OPEN },
200 { FCIO_GET_ADAPTER_PORT_ATTRIBUTES, FP_OPEN },
201 { FCIO_GET_DISCOVERED_PORT_ATTRIBUTES, FP_OPEN },
202 { FCIO_GET_PORT_ATTRIBUTES, FP_OPEN },
203 { FCIO_GET_ADAPTER_PORT_STATS, FP_OPEN },
204 { FCIO_GET_ADAPTER_PORT_NPIV_ATTRIBUTES, FP_OPEN },
205 { FCIO_GET_NPIV_PORT_LIST, FP_OPEN },
206 { FCIO_DELETE_NPIV_PORT, FP_OPEN },
207 { FCIO_GET_NPIV_ATTRIBUTES, FP_OPEN },
208 { FCIO_CREATE_NPIV_PORT, FP_OPEN },
209 { FCIO_NPIV_GET_ADAPTER_ATTRIBUTES, FP_OPEN }
210 };
211
212 static char *fp_pm_comps[] = {
213 "NAME=FC Port",
214 "0=Port Down",
215 "1=Port Up"
216 };
217
218
219 #ifdef _LITTLE_ENDIAN
220 #define MAKE_BE_32(x) { \
221 uint32_t *ptr1, i; \
222 ptr1 = (uint32_t *)(x); \
223 for (i = 0; i < sizeof (*(x)) / sizeof (uint32_t); i++) { \
224 *ptr1 = BE_32(*ptr1); \
225 ptr1++; \
226 } \
227 }
228 #else
229 #define MAKE_BE_32(x)
230 #endif
231
232 static uchar_t fp_verbosity = (FP_WARNING_MESSAGES | FP_FATAL_MESSAGES);
233 static uint32_t fp_options = 0;
234
235 static int fp_cmd_wait_cnt = FP_CMDWAIT_DELAY;
236 static int fp_retry_delay = FP_RETRY_DELAY; /* retry after this delay */
237 static int fp_retry_count = FP_RETRY_COUNT; /* number of retries */
238 unsigned int fp_offline_ticker; /* seconds */
239
240 /*
241 * Driver global variable to anchor the list of soft state structs for
242 * all fp driver instances. Used with the Solaris DDI soft state functions.
243 */
244 static void *fp_driver_softstate;
245
246 static clock_t fp_retry_ticks;
247 static clock_t fp_offline_ticks;
248
249 static int fp_retry_ticker;
250 static uint32_t fp_unsol_buf_count = FP_UNSOL_BUF_COUNT;
251 static uint32_t fp_unsol_buf_size = FP_UNSOL_BUF_SIZE;
252
253 static int fp_log_size = FP_LOG_SIZE;
254 static int fp_trace = FP_TRACE_DEFAULT;
255 static fc_trace_logq_t *fp_logq = NULL;
256
257 int fp_get_adapter_paths(char *pathList, int count);
258 static void fp_log_port_event(fc_local_port_t *port, char *subclass);
259 static void fp_log_target_event(fc_local_port_t *port, char *subclass,
260 la_wwn_t tgt_pwwn, uint32_t port_id);
261 static uint32_t fp_map_remote_port_state(uint32_t rm_state);
262 static void fp_init_symbolic_names(fc_local_port_t *port);
263
264
265 /*
266 * Perform global initialization
267 */
268 int
_init(void)269 _init(void)
270 {
271 int ret;
272
273 if ((ret = ddi_soft_state_init(&fp_driver_softstate,
274 sizeof (struct fc_local_port), 8)) != 0) {
275 return (ret);
276 }
277
278 if ((ret = scsi_hba_init(&modlinkage)) != 0) {
279 ddi_soft_state_fini(&fp_driver_softstate);
280 return (ret);
281 }
282
283 fp_logq = fc_trace_alloc_logq(fp_log_size);
284
285 if ((ret = mod_install(&modlinkage)) != 0) {
286 fc_trace_free_logq(fp_logq);
287 ddi_soft_state_fini(&fp_driver_softstate);
288 scsi_hba_fini(&modlinkage);
289 }
290
291 return (ret);
292 }
293
294
295 /*
296 * Prepare for driver unload
297 */
298 int
_fini(void)299 _fini(void)
300 {
301 int ret;
302
303 if ((ret = mod_remove(&modlinkage)) == 0) {
304 fc_trace_free_logq(fp_logq);
305 ddi_soft_state_fini(&fp_driver_softstate);
306 scsi_hba_fini(&modlinkage);
307 }
308
309 return (ret);
310 }
311
312
313 /*
314 * Request mod_info() to handle all cases
315 */
316 int
_info(struct modinfo * modinfo)317 _info(struct modinfo *modinfo)
318 {
319 return (mod_info(&modlinkage, modinfo));
320 }
321
322
323 /*
324 * fp_attach:
325 *
326 * The respective cmd handlers take care of performing
327 * ULP related invocations
328 */
329 static int
fp_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)330 fp_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
331 {
332 int rval;
333
334 /*
335 * We check the value of fp_offline_ticker at this
336 * point. The variable is global for the driver and
337 * not specific to an instance.
338 *
339 * If there is no user-defined value found in /etc/system
340 * or fp.conf, then we use 90 seconds (FP_OFFLINE_TICKER).
341 * The minimum setting for this offline timeout according
342 * to the FC-FS2 standard (Fibre Channel Framing and
343 * Signalling-2, see www.t11.org) is R_T_TOV == 100msec.
344 *
345 * We do not recommend setting the value to less than 10
346 * seconds (RA_TOV) or more than 90 seconds. If this
347 * variable is greater than 90 seconds then drivers above
348 * fp (fcp, sd, scsi_vhci, vxdmp et al) might complain.
349 */
350
351 fp_offline_ticker = ddi_prop_get_int(DDI_DEV_T_ANY,
352 dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "fp_offline_ticker",
353 FP_OFFLINE_TICKER);
354
355 if ((fp_offline_ticker < 10) ||
356 (fp_offline_ticker > 90)) {
357 cmn_err(CE_WARN, "Setting fp_offline_ticker to "
358 "%d second(s). This is outside the "
359 "recommended range of 10..90 seconds",
360 fp_offline_ticker);
361 }
362
363 /*
364 * Tick every second when there are commands to retry.
365 * It should tick at the least granular value of pkt_timeout
366 * (which is one second)
367 */
368 fp_retry_ticker = 1;
369
370 fp_retry_ticks = drv_usectohz(fp_retry_ticker * 1000 * 1000);
371 fp_offline_ticks = drv_usectohz(fp_offline_ticker * 1000 * 1000);
372
373 switch (cmd) {
374 case DDI_ATTACH:
375 rval = fp_attach_handler(dip);
376 break;
377
378 case DDI_RESUME:
379 rval = fp_resume_handler(dip);
380 break;
381
382 default:
383 rval = DDI_FAILURE;
384 break;
385 }
386 return (rval);
387 }
388
389
390 /*
391 * fp_detach:
392 *
393 * If a ULP fails to handle cmd request converse of
394 * cmd is invoked for ULPs that previously succeeded
395 * cmd request.
396 */
397 static int
fp_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)398 fp_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
399 {
400 int rval = DDI_FAILURE;
401 fc_local_port_t *port;
402 fc_attach_cmd_t converse;
403 uint8_t cnt;
404
405 if ((port = ddi_get_soft_state(fp_driver_softstate,
406 ddi_get_instance(dip))) == NULL) {
407 return (DDI_FAILURE);
408 }
409
410 mutex_enter(&port->fp_mutex);
411
412 if (port->fp_ulp_attach) {
413 mutex_exit(&port->fp_mutex);
414 return (DDI_FAILURE);
415 }
416
417 switch (cmd) {
418 case DDI_DETACH:
419 if (port->fp_task != FP_TASK_IDLE) {
420 mutex_exit(&port->fp_mutex);
421 return (DDI_FAILURE);
422 }
423
424 /* Let's attempt to quit the job handler gracefully */
425 port->fp_soft_state |= FP_DETACH_INPROGRESS;
426
427 mutex_exit(&port->fp_mutex);
428 converse = FC_CMD_ATTACH;
429 if (fctl_detach_ulps(port, FC_CMD_DETACH,
430 &modlinkage) != FC_SUCCESS) {
431 mutex_enter(&port->fp_mutex);
432 port->fp_soft_state &= ~FP_DETACH_INPROGRESS;
433 mutex_exit(&port->fp_mutex);
434 rval = DDI_FAILURE;
435 break;
436 }
437
438 mutex_enter(&port->fp_mutex);
439 for (cnt = 0; (port->fp_job_head) && (cnt < fp_cmd_wait_cnt);
440 cnt++) {
441 mutex_exit(&port->fp_mutex);
442 delay(drv_usectohz(1000000));
443 mutex_enter(&port->fp_mutex);
444 }
445
446 if (port->fp_job_head) {
447 mutex_exit(&port->fp_mutex);
448 rval = DDI_FAILURE;
449 break;
450 }
451 mutex_exit(&port->fp_mutex);
452
453 rval = fp_detach_handler(port);
454 break;
455
456 case DDI_SUSPEND:
457 mutex_exit(&port->fp_mutex);
458 converse = FC_CMD_RESUME;
459 if (fctl_detach_ulps(port, FC_CMD_SUSPEND,
460 &modlinkage) != FC_SUCCESS) {
461 rval = DDI_FAILURE;
462 break;
463 }
464 if ((rval = fp_suspend_handler(port)) != DDI_SUCCESS) {
465 (void) callb_generic_cpr(&port->fp_cpr_info,
466 CB_CODE_CPR_RESUME);
467 }
468 break;
469
470 default:
471 mutex_exit(&port->fp_mutex);
472 break;
473 }
474
475 /*
476 * Use softint to perform reattach. Mark fp_ulp_attach so we
477 * don't attempt to do this repeatedly on behalf of some persistent
478 * caller.
479 */
480 if (rval != DDI_SUCCESS) {
481 mutex_enter(&port->fp_mutex);
482 port->fp_ulp_attach = 1;
483
484 /*
485 * If the port is in the low power mode then there is
486 * possibility that fca too could be in low power mode.
487 * Try to raise the power before calling attach ulps.
488 */
489
490 if ((port->fp_soft_state & FP_SOFT_POWER_DOWN) &&
491 (!(port->fp_soft_state & FP_SOFT_NO_PMCOMP))) {
492 mutex_exit(&port->fp_mutex);
493 (void) pm_raise_power(port->fp_port_dip,
494 FP_PM_COMPONENT, FP_PM_PORT_UP);
495 } else {
496 mutex_exit(&port->fp_mutex);
497 }
498
499
500 fp_attach_ulps(port, converse);
501
502 mutex_enter(&port->fp_mutex);
503 while (port->fp_ulp_attach) {
504 cv_wait(&port->fp_attach_cv, &port->fp_mutex);
505 }
506
507 port->fp_soft_state &= ~FP_DETACH_INPROGRESS;
508
509 /*
510 * Mark state as detach failed so asynchronous ULP attach
511 * events (downstream, not the ones we're initiating with
512 * the call to fp_attach_ulps) are not honored. We're
513 * really still in pending detach.
514 */
515 port->fp_soft_state |= FP_DETACH_FAILED;
516
517 mutex_exit(&port->fp_mutex);
518 }
519
520 return (rval);
521 }
522
523
524 /*
525 * fp_getinfo:
526 * Given the device number, return either the
527 * dev_info_t pointer or the instance number.
528 */
529
530 /* ARGSUSED */
531 static int
fp_getinfo(dev_info_t * dip,ddi_info_cmd_t cmd,void * arg,void ** result)532 fp_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
533 {
534 int rval;
535 minor_t instance;
536 fc_local_port_t *port;
537
538 rval = DDI_SUCCESS;
539 instance = getminor((dev_t)arg);
540
541 switch (cmd) {
542 case DDI_INFO_DEVT2DEVINFO:
543 if ((port = ddi_get_soft_state(fp_driver_softstate,
544 instance)) == NULL) {
545 rval = DDI_FAILURE;
546 break;
547 }
548 *result = (void *)port->fp_port_dip;
549 break;
550
551 case DDI_INFO_DEVT2INSTANCE:
552 *result = (void *)(uintptr_t)instance;
553 break;
554
555 default:
556 rval = DDI_FAILURE;
557 break;
558 }
559
560 return (rval);
561 }
562
563
564 /*
565 * Entry point for power up and power down request from kernel
566 */
567 static int
fp_power(dev_info_t * dip,int comp,int level)568 fp_power(dev_info_t *dip, int comp, int level)
569 {
570 int rval = DDI_FAILURE;
571 fc_local_port_t *port;
572
573 port = ddi_get_soft_state(fp_driver_softstate, ddi_get_instance(dip));
574 if (port == NULL || comp != FP_PM_COMPONENT) {
575 return (rval);
576 }
577
578 switch (level) {
579 case FP_PM_PORT_UP:
580 rval = DDI_SUCCESS;
581
582 /*
583 * If the port is DDI_SUSPENDed, let the DDI_RESUME
584 * code complete the rediscovery.
585 */
586 mutex_enter(&port->fp_mutex);
587 if (port->fp_soft_state & FP_SOFT_SUSPEND) {
588 port->fp_soft_state &= ~FP_SOFT_POWER_DOWN;
589 port->fp_pm_level = FP_PM_PORT_UP;
590 mutex_exit(&port->fp_mutex);
591 fctl_attach_ulps(port, FC_CMD_POWER_UP, &modlinkage);
592 break;
593 }
594
595 if (port->fp_soft_state & FP_SOFT_POWER_DOWN) {
596 ASSERT(port->fp_pm_level == FP_PM_PORT_DOWN);
597
598 port->fp_pm_level = FP_PM_PORT_UP;
599 rval = fp_power_up(port);
600 if (rval != DDI_SUCCESS) {
601 port->fp_pm_level = FP_PM_PORT_DOWN;
602 }
603 } else {
604 port->fp_pm_level = FP_PM_PORT_UP;
605 }
606 mutex_exit(&port->fp_mutex);
607 break;
608
609 case FP_PM_PORT_DOWN:
610 mutex_enter(&port->fp_mutex);
611
612 ASSERT(!(port->fp_soft_state & FP_SOFT_NO_PMCOMP));
613 if (port->fp_soft_state & FP_SOFT_NO_PMCOMP) {
614 /*
615 * PM framework goofed up. We have don't
616 * have any PM components. Let's never go down.
617 */
618 mutex_exit(&port->fp_mutex);
619 break;
620
621 }
622
623 if (port->fp_ulp_attach) {
624 /* We shouldn't let the power go down */
625 mutex_exit(&port->fp_mutex);
626 break;
627 }
628
629 /*
630 * Not a whole lot to do if we are detaching
631 */
632 if (port->fp_soft_state & FP_SOFT_IN_DETACH) {
633 port->fp_pm_level = FP_PM_PORT_DOWN;
634 mutex_exit(&port->fp_mutex);
635 rval = DDI_SUCCESS;
636 break;
637 }
638
639 if (!port->fp_pm_busy && !port->fp_pm_busy_nocomp) {
640 port->fp_pm_level = FP_PM_PORT_DOWN;
641
642 rval = fp_power_down(port);
643 if (rval != DDI_SUCCESS) {
644 port->fp_pm_level = FP_PM_PORT_UP;
645 ASSERT(!(port->fp_soft_state &
646 FP_SOFT_POWER_DOWN));
647 } else {
648 ASSERT(port->fp_soft_state &
649 FP_SOFT_POWER_DOWN);
650 }
651 }
652 mutex_exit(&port->fp_mutex);
653 break;
654
655 default:
656 break;
657 }
658
659 return (rval);
660 }
661
662
663 /*
664 * Open FC port devctl node
665 */
666 static int
fp_open(dev_t * devp,int flag,int otype,cred_t * credp)667 fp_open(dev_t *devp, int flag, int otype, cred_t *credp)
668 {
669 int instance;
670 fc_local_port_t *port;
671
672 if (otype != OTYP_CHR) {
673 return (EINVAL);
674 }
675
676 /*
677 * This is not a toy to play with. Allow only powerful
678 * users (hopefully knowledgeable) to access the port
679 * (A hacker potentially could download a sick binary
680 * file into FCA)
681 */
682 if (drv_priv(credp)) {
683 return (EPERM);
684 }
685
686 instance = (int)getminor(*devp);
687
688 port = ddi_get_soft_state(fp_driver_softstate, instance);
689 if (port == NULL) {
690 return (ENXIO);
691 }
692
693 mutex_enter(&port->fp_mutex);
694 if (port->fp_flag & FP_EXCL) {
695 /*
696 * It is already open for exclusive access.
697 * So shut the door on this caller.
698 */
699 mutex_exit(&port->fp_mutex);
700 return (EBUSY);
701 }
702
703 if (flag & FEXCL) {
704 if (port->fp_flag & FP_OPEN) {
705 /*
706 * Exclusive operation not possible
707 * as it is already opened
708 */
709 mutex_exit(&port->fp_mutex);
710 return (EBUSY);
711 }
712 port->fp_flag |= FP_EXCL;
713 }
714 port->fp_flag |= FP_OPEN;
715 mutex_exit(&port->fp_mutex);
716
717 return (0);
718 }
719
720
721 /*
722 * The driver close entry point is called on the last close()
723 * of a device. So it is perfectly alright to just clobber the
724 * open flag and reset it to idle (instead of having to reset
725 * each flag bits). For any confusion, check out close(9E).
726 */
727
728 /* ARGSUSED */
729 static int
fp_close(dev_t dev,int flag,int otype,cred_t * credp)730 fp_close(dev_t dev, int flag, int otype, cred_t *credp)
731 {
732 int instance;
733 fc_local_port_t *port;
734
735 if (otype != OTYP_CHR) {
736 return (EINVAL);
737 }
738
739 instance = (int)getminor(dev);
740
741 port = ddi_get_soft_state(fp_driver_softstate, instance);
742 if (port == NULL) {
743 return (ENXIO);
744 }
745
746 mutex_enter(&port->fp_mutex);
747 if ((port->fp_flag & FP_OPEN) == 0) {
748 mutex_exit(&port->fp_mutex);
749 return (ENODEV);
750 }
751 port->fp_flag = FP_IDLE;
752 mutex_exit(&port->fp_mutex);
753
754 return (0);
755 }
756
757 /*
758 * Handle IOCTL requests
759 */
760
761 /* ARGSUSED */
762 static int
fp_ioctl(dev_t dev,int cmd,intptr_t data,int mode,cred_t * credp,int * rval)763 fp_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp, int *rval)
764 {
765 int instance;
766 int ret = 0;
767 fcio_t fcio;
768 fc_local_port_t *port;
769
770 instance = (int)getminor(dev);
771
772 port = ddi_get_soft_state(fp_driver_softstate, instance);
773 if (port == NULL) {
774 return (ENXIO);
775 }
776
777 mutex_enter(&port->fp_mutex);
778 if ((port->fp_flag & FP_OPEN) == 0) {
779 mutex_exit(&port->fp_mutex);
780 return (ENXIO);
781 }
782
783 if (port->fp_soft_state & FP_SOFT_SUSPEND) {
784 mutex_exit(&port->fp_mutex);
785 return (ENXIO);
786 }
787
788 mutex_exit(&port->fp_mutex);
789
790 /* this will raise power if necessary */
791 ret = fctl_busy_port(port);
792 if (ret != 0) {
793 return (ret);
794 }
795
796 ASSERT(port->fp_pm_level == FP_PM_PORT_UP);
797
798
799 switch (cmd) {
800 case FCIO_CMD: {
801 #ifdef _MULTI_DATAMODEL
802 switch (ddi_model_convert_from(mode & FMODELS)) {
803 case DDI_MODEL_ILP32: {
804 struct fcio32 fcio32;
805
806 if (ddi_copyin((void *)data, (void *)&fcio32,
807 sizeof (struct fcio32), mode)) {
808 ret = EFAULT;
809 break;
810 }
811 fcio.fcio_xfer = fcio32.fcio_xfer;
812 fcio.fcio_cmd = fcio32.fcio_cmd;
813 fcio.fcio_flags = fcio32.fcio_flags;
814 fcio.fcio_cmd_flags = fcio32.fcio_cmd_flags;
815 fcio.fcio_ilen = (size_t)fcio32.fcio_ilen;
816 fcio.fcio_ibuf =
817 (caddr_t)(uintptr_t)fcio32.fcio_ibuf;
818 fcio.fcio_olen = (size_t)fcio32.fcio_olen;
819 fcio.fcio_obuf =
820 (caddr_t)(uintptr_t)fcio32.fcio_obuf;
821 fcio.fcio_alen = (size_t)fcio32.fcio_alen;
822 fcio.fcio_abuf =
823 (caddr_t)(uintptr_t)fcio32.fcio_abuf;
824 fcio.fcio_errno = fcio32.fcio_errno;
825 break;
826 }
827
828 case DDI_MODEL_NONE:
829 if (ddi_copyin((void *)data, (void *)&fcio,
830 sizeof (fcio_t), mode)) {
831 ret = EFAULT;
832 }
833 break;
834 }
835 #else /* _MULTI_DATAMODEL */
836 if (ddi_copyin((void *)data, (void *)&fcio,
837 sizeof (fcio_t), mode)) {
838 ret = EFAULT;
839 break;
840 }
841 #endif /* _MULTI_DATAMODEL */
842 if (!ret) {
843 ret = fp_fciocmd(port, data, mode, &fcio);
844 }
845 break;
846 }
847
848 default:
849 ret = fctl_ulp_port_ioctl(port, dev, cmd, data,
850 mode, credp, rval);
851 }
852
853 fctl_idle_port(port);
854
855 return (ret);
856 }
857
858
859 /*
860 * Init Symbolic Port Name and Node Name
861 * LV will try to get symbolic names from FCA driver
862 * and register these to name server,
863 * if LV fails to get these,
864 * LV will register its default symbolic names to name server.
865 * The Default symbolic node name format is :
866 * <hostname>:<hba driver name>(instance)
867 * The Default symbolic port name format is :
868 * <fp path name>
869 */
870 static void
fp_init_symbolic_names(fc_local_port_t * port)871 fp_init_symbolic_names(fc_local_port_t *port)
872 {
873 const char *vendorname = ddi_driver_name(port->fp_fca_dip);
874 char *sym_name;
875 char fcaname[50] = {0};
876 int hostnlen, fcanlen;
877
878 if (port->fp_sym_node_namelen == 0) {
879 hostnlen = strlen(utsname.nodename);
880 (void) snprintf(fcaname, sizeof (fcaname),
881 "%s%d", vendorname, ddi_get_instance(port->fp_fca_dip));
882 fcanlen = strlen(fcaname);
883
884 sym_name = kmem_zalloc(hostnlen + fcanlen + 2, KM_SLEEP);
885 (void) sprintf(sym_name, "%s:%s", utsname.nodename, fcaname);
886 port->fp_sym_node_namelen = strlen(sym_name);
887 if (port->fp_sym_node_namelen >= FCHBA_SYMB_NAME_LEN) {
888 port->fp_sym_node_namelen = FCHBA_SYMB_NAME_LEN;
889 }
890 (void) strncpy(port->fp_sym_node_name, sym_name,
891 port->fp_sym_node_namelen);
892 kmem_free(sym_name, hostnlen + fcanlen + 2);
893 }
894
895 if (port->fp_sym_port_namelen == 0) {
896 char *pathname = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
897
898 (void) ddi_pathname(port->fp_port_dip, pathname);
899 port->fp_sym_port_namelen = strlen(pathname);
900 if (port->fp_sym_port_namelen >= FCHBA_SYMB_NAME_LEN) {
901 port->fp_sym_port_namelen = FCHBA_SYMB_NAME_LEN;
902 }
903 (void) strncpy(port->fp_sym_port_name, pathname,
904 port->fp_sym_port_namelen);
905 kmem_free(pathname, MAXPATHLEN);
906 }
907 }
908
909
910 /*
911 * Perform port attach
912 */
913 static int
fp_attach_handler(dev_info_t * dip)914 fp_attach_handler(dev_info_t *dip)
915 {
916 int rval;
917 int instance;
918 int port_num;
919 int port_len;
920 char name[30];
921 char i_pwwn[17];
922 fp_cmd_t *pkt;
923 uint32_t ub_count;
924 fc_local_port_t *port;
925 job_request_t *job;
926 fc_local_port_t *phyport = NULL;
927 int portpro1;
928 char pwwn[17], nwwn[17];
929
930 instance = ddi_get_instance(dip);
931 port_len = sizeof (port_num);
932 rval = ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF,
933 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "port",
934 (caddr_t)&port_num, &port_len);
935 if (rval != DDI_SUCCESS) {
936 cmn_err(CE_WARN, "fp(%d): No port property in devinfo",
937 instance);
938 return (DDI_FAILURE);
939 }
940
941 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, instance,
942 DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
943 cmn_err(CE_WARN, "fp(%d): failed to create devctl minor node",
944 instance);
945 return (DDI_FAILURE);
946 }
947
948 if (ddi_create_minor_node(dip, "fc", S_IFCHR, instance,
949 DDI_NT_FC_ATTACHMENT_POINT, 0) != DDI_SUCCESS) {
950 cmn_err(CE_WARN, "fp(%d): failed to create fc attachment"
951 " point minor node", instance);
952 ddi_remove_minor_node(dip, NULL);
953 return (DDI_FAILURE);
954 }
955
956 if (ddi_soft_state_zalloc(fp_driver_softstate, instance)
957 != DDI_SUCCESS) {
958 cmn_err(CE_WARN, "fp(%d): failed to alloc soft state",
959 instance);
960 ddi_remove_minor_node(dip, NULL);
961 return (DDI_FAILURE);
962 }
963 port = ddi_get_soft_state(fp_driver_softstate, instance);
964
965 (void) sprintf(port->fp_ibuf, "fp(%d)", instance);
966
967 port->fp_instance = instance;
968 port->fp_ulp_attach = 1;
969 port->fp_port_num = port_num;
970 port->fp_verbose = fp_verbosity;
971 port->fp_options = fp_options;
972
973 port->fp_fca_dip = ddi_get_parent(dip);
974 port->fp_port_dip = dip;
975 port->fp_fca_tran = (fc_fca_tran_t *)
976 ddi_get_driver_private(port->fp_fca_dip);
977
978 port->fp_task = port->fp_last_task = FP_TASK_IDLE;
979
980 /*
981 * Init the starting value of fp_rscn_count. Note that if
982 * FC_INVALID_RSCN_COUNT is 0 (which is what it currently is), the
983 * actual # of RSCNs will be (fp_rscn_count - 1)
984 */
985 port->fp_rscn_count = FC_INVALID_RSCN_COUNT + 1;
986
987 mutex_init(&port->fp_mutex, NULL, MUTEX_DRIVER, NULL);
988 cv_init(&port->fp_cv, NULL, CV_DRIVER, NULL);
989 cv_init(&port->fp_attach_cv, NULL, CV_DRIVER, NULL);
990
991 (void) sprintf(name, "fp%d_cache", instance);
992
993 if ((portpro1 = ddi_prop_get_int(DDI_DEV_T_ANY,
994 dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
995 "phyport-instance", -1)) != -1) {
996 phyport = ddi_get_soft_state(fp_driver_softstate, portpro1);
997 fc_wwn_to_str(&phyport->fp_service_params.nport_ww_name, pwwn);
998 fc_wwn_to_str(&phyport->fp_service_params.node_ww_name, nwwn);
999 port->fp_npiv_type = FC_NPIV_PORT;
1000 }
1001
1002 /*
1003 * Allocate the pool of fc_packet_t structs to be used with
1004 * this fp instance.
1005 */
1006 port->fp_pkt_cache = kmem_cache_create(name,
1007 (port->fp_fca_tran->fca_pkt_size) + sizeof (fp_cmd_t), 8,
1008 fp_cache_constructor, fp_cache_destructor, NULL, (void *)port,
1009 NULL, 0);
1010 port->fp_out_fpcmds = 0;
1011 if (port->fp_pkt_cache == NULL) {
1012 goto cache_alloc_failed;
1013 }
1014
1015
1016 /*
1017 * Allocate the d_id and pwwn hash tables for all remote ports
1018 * connected to this local port.
1019 */
1020 port->fp_did_table = kmem_zalloc(did_table_size *
1021 sizeof (struct d_id_hash), KM_SLEEP);
1022
1023 port->fp_pwwn_table = kmem_zalloc(pwwn_table_size *
1024 sizeof (struct pwwn_hash), KM_SLEEP);
1025
1026 port->fp_taskq = taskq_create("fp_ulp_callback", 1,
1027 MINCLSYSPRI, 1, 16, 0);
1028
1029 /* Indicate that don't have the pm components yet */
1030 port->fp_soft_state |= FP_SOFT_NO_PMCOMP;
1031
1032 /*
1033 * Bind the callbacks with the FCA driver. This will open the gate
1034 * for asynchronous callbacks, so after this call the fp_mutex
1035 * must be held when updating the fc_local_port_t struct.
1036 *
1037 * This is done _before_ setting up the job thread so we can avoid
1038 * cleaning up after the thread_create() in the error path. This
1039 * also means fp will be operating with fp_els_resp_pkt set to NULL.
1040 */
1041 if (fp_bind_callbacks(port) != DDI_SUCCESS) {
1042 goto bind_callbacks_failed;
1043 }
1044
1045 if (phyport) {
1046 mutex_enter(&phyport->fp_mutex);
1047 if (phyport->fp_port_next) {
1048 phyport->fp_port_next->fp_port_prev = port;
1049 port->fp_port_next = phyport->fp_port_next;
1050 phyport->fp_port_next = port;
1051 port->fp_port_prev = phyport;
1052 } else {
1053 phyport->fp_port_next = port;
1054 phyport->fp_port_prev = port;
1055 port->fp_port_next = phyport;
1056 port->fp_port_prev = phyport;
1057 }
1058 mutex_exit(&phyport->fp_mutex);
1059 }
1060
1061 /*
1062 * Init Symbolic Names
1063 */
1064 fp_init_symbolic_names(port);
1065
1066 pkt = fp_alloc_pkt(port, sizeof (la_els_logi_t), sizeof (la_els_logi_t),
1067 KM_SLEEP, NULL);
1068
1069 if (pkt == NULL) {
1070 cmn_err(CE_WARN, "fp(%d): failed to allocate ELS packet",
1071 instance);
1072 goto alloc_els_packet_failed;
1073 }
1074
1075 (void) thread_create(NULL, 0, fp_job_handler, port, 0, &p0, TS_RUN,
1076 v.v_maxsyspri - 2);
1077
1078 fc_wwn_to_str(&port->fp_service_params.nport_ww_name, i_pwwn);
1079 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "initiator-port",
1080 i_pwwn) != DDI_PROP_SUCCESS) {
1081 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
1082 "fp(%d): Updating 'initiator-port' property"
1083 " on fp dev_info node failed", instance);
1084 }
1085
1086 fc_wwn_to_str(&port->fp_service_params.node_ww_name, i_pwwn);
1087 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "initiator-node",
1088 i_pwwn) != DDI_PROP_SUCCESS) {
1089 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
1090 "fp(%d): Updating 'initiator-node' property"
1091 " on fp dev_info node failed", instance);
1092 }
1093
1094 mutex_enter(&port->fp_mutex);
1095 port->fp_els_resp_pkt = pkt;
1096 mutex_exit(&port->fp_mutex);
1097
1098 /*
1099 * Determine the count of unsolicited buffers this FCA can support
1100 */
1101 fp_retrieve_caps(port);
1102
1103 /*
1104 * Allocate unsolicited buffer tokens
1105 */
1106 if (port->fp_ub_count) {
1107 ub_count = port->fp_ub_count;
1108 port->fp_ub_tokens = kmem_zalloc(ub_count *
1109 sizeof (*port->fp_ub_tokens), KM_SLEEP);
1110 /*
1111 * Do not fail the attach if unsolicited buffer allocation
1112 * fails; Just try to get along with whatever the FCA can do.
1113 */
1114 if (fc_ulp_uballoc(port, &ub_count, fp_unsol_buf_size,
1115 FC_TYPE_EXTENDED_LS, port->fp_ub_tokens) !=
1116 FC_SUCCESS || ub_count != port->fp_ub_count) {
1117 cmn_err(CE_WARN, "fp(%d): failed to allocate "
1118 " Unsolicited buffers. proceeding with attach...",
1119 instance);
1120 kmem_free(port->fp_ub_tokens,
1121 sizeof (*port->fp_ub_tokens) * port->fp_ub_count);
1122 port->fp_ub_tokens = NULL;
1123 }
1124 }
1125
1126 fp_load_ulp_modules(dip, port);
1127
1128 /*
1129 * Enable DDI_SUSPEND and DDI_RESUME for this instance.
1130 */
1131 (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
1132 "pm-hardware-state", "needs-suspend-resume",
1133 strlen("needs-suspend-resume") + 1);
1134
1135 /*
1136 * fctl maintains a list of all port handles, so
1137 * help fctl add this one to its list now.
1138 */
1139 mutex_enter(&port->fp_mutex);
1140 fctl_add_port(port);
1141
1142 /*
1143 * If a state change is already in progress, set the bind state t
1144 * OFFLINE as well, so further state change callbacks into ULPs
1145 * will pass the appropriate states
1146 */
1147 if (FC_PORT_STATE_MASK(port->fp_bind_state) == FC_STATE_OFFLINE ||
1148 port->fp_statec_busy) {
1149 port->fp_bind_state = FC_STATE_OFFLINE;
1150 mutex_exit(&port->fp_mutex);
1151
1152 fp_startup_done((opaque_t)port, FC_PKT_SUCCESS);
1153 } else {
1154 /*
1155 * Without dropping the mutex, ensure that the port
1156 * startup happens ahead of state change callback
1157 * processing
1158 */
1159 ASSERT(port->fp_job_tail == NULL && port->fp_job_head == NULL);
1160
1161 port->fp_last_task = port->fp_task;
1162 port->fp_task = FP_TASK_PORT_STARTUP;
1163
1164 job = fctl_alloc_job(JOB_PORT_STARTUP, JOB_TYPE_FCTL_ASYNC,
1165 fp_startup_done, (opaque_t)port, KM_SLEEP);
1166
1167 port->fp_job_head = port->fp_job_tail = job;
1168
1169 cv_signal(&port->fp_cv);
1170
1171 mutex_exit(&port->fp_mutex);
1172 }
1173
1174 mutex_enter(&port->fp_mutex);
1175 while (port->fp_ulp_attach) {
1176 cv_wait(&port->fp_attach_cv, &port->fp_mutex);
1177 }
1178 mutex_exit(&port->fp_mutex);
1179
1180 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip,
1181 "pm-components", fp_pm_comps,
1182 sizeof (fp_pm_comps) / sizeof (fp_pm_comps[0])) !=
1183 DDI_PROP_SUCCESS) {
1184 FP_TRACE(FP_NHEAD2(9, 0), "Failed to create PM"
1185 " components property, PM disabled on this port.");
1186 mutex_enter(&port->fp_mutex);
1187 port->fp_pm_level = FP_PM_PORT_UP;
1188 mutex_exit(&port->fp_mutex);
1189 } else {
1190 if (pm_raise_power(dip, FP_PM_COMPONENT,
1191 FP_PM_PORT_UP) != DDI_SUCCESS) {
1192 FP_TRACE(FP_NHEAD2(9, 0), "Failed to raise"
1193 " power level");
1194 mutex_enter(&port->fp_mutex);
1195 port->fp_pm_level = FP_PM_PORT_UP;
1196 mutex_exit(&port->fp_mutex);
1197 }
1198
1199 /*
1200 * Don't unset the FP_SOFT_NO_PMCOMP flag until after
1201 * the call to pm_raise_power. The PM framework can't
1202 * handle multiple threads calling into it during attach.
1203 */
1204
1205 mutex_enter(&port->fp_mutex);
1206 port->fp_soft_state &= ~FP_SOFT_NO_PMCOMP;
1207 mutex_exit(&port->fp_mutex);
1208 }
1209
1210 ddi_report_dev(dip);
1211
1212 fp_log_port_event(port, ESC_SUNFC_PORT_ATTACH);
1213
1214 return (DDI_SUCCESS);
1215
1216 /*
1217 * Unwind any/all preceeding allocations in the event of an error.
1218 */
1219
1220 alloc_els_packet_failed:
1221
1222 if (port->fp_fca_handle != NULL) {
1223 port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle);
1224 port->fp_fca_handle = NULL;
1225 }
1226
1227 if (port->fp_ub_tokens != NULL) {
1228 (void) fc_ulp_ubfree(port, port->fp_ub_count,
1229 port->fp_ub_tokens);
1230 kmem_free(port->fp_ub_tokens,
1231 port->fp_ub_count * sizeof (*port->fp_ub_tokens));
1232 port->fp_ub_tokens = NULL;
1233 }
1234
1235 if (port->fp_els_resp_pkt != NULL) {
1236 fp_free_pkt(port->fp_els_resp_pkt);
1237 port->fp_els_resp_pkt = NULL;
1238 }
1239
1240 bind_callbacks_failed:
1241
1242 if (port->fp_taskq != NULL) {
1243 taskq_destroy(port->fp_taskq);
1244 }
1245
1246 if (port->fp_pwwn_table != NULL) {
1247 kmem_free(port->fp_pwwn_table,
1248 pwwn_table_size * sizeof (struct pwwn_hash));
1249 port->fp_pwwn_table = NULL;
1250 }
1251
1252 if (port->fp_did_table != NULL) {
1253 kmem_free(port->fp_did_table,
1254 did_table_size * sizeof (struct d_id_hash));
1255 port->fp_did_table = NULL;
1256 }
1257
1258 if (port->fp_pkt_cache != NULL) {
1259 kmem_cache_destroy(port->fp_pkt_cache);
1260 port->fp_pkt_cache = NULL;
1261 }
1262
1263 cache_alloc_failed:
1264
1265 cv_destroy(&port->fp_attach_cv);
1266 cv_destroy(&port->fp_cv);
1267 mutex_destroy(&port->fp_mutex);
1268 ddi_remove_minor_node(port->fp_port_dip, NULL);
1269 ddi_soft_state_free(fp_driver_softstate, instance);
1270 ddi_prop_remove_all(dip);
1271
1272 return (DDI_FAILURE);
1273 }
1274
1275
1276 /*
1277 * Handle DDI_RESUME request
1278 */
1279 static int
fp_resume_handler(dev_info_t * dip)1280 fp_resume_handler(dev_info_t *dip)
1281 {
1282 int rval;
1283 fc_local_port_t *port;
1284
1285 port = ddi_get_soft_state(fp_driver_softstate, ddi_get_instance(dip));
1286
1287 ASSERT(port != NULL);
1288
1289 #ifdef DEBUG
1290 mutex_enter(&port->fp_mutex);
1291 ASSERT(port->fp_soft_state & FP_SOFT_SUSPEND);
1292 mutex_exit(&port->fp_mutex);
1293 #endif
1294
1295 /*
1296 * If the port was power suspended, raise the power level
1297 */
1298 mutex_enter(&port->fp_mutex);
1299 if ((port->fp_soft_state & FP_SOFT_POWER_DOWN) &&
1300 (!(port->fp_soft_state & FP_SOFT_NO_PMCOMP))) {
1301 ASSERT(port->fp_pm_level == FP_PM_PORT_DOWN);
1302
1303 mutex_exit(&port->fp_mutex);
1304 if (pm_raise_power(dip, FP_PM_COMPONENT,
1305 FP_PM_PORT_UP) != DDI_SUCCESS) {
1306 FP_TRACE(FP_NHEAD2(9, 0),
1307 "Failed to raise the power level");
1308 return (DDI_FAILURE);
1309 }
1310 mutex_enter(&port->fp_mutex);
1311 }
1312 port->fp_soft_state &= ~FP_SOFT_SUSPEND;
1313 mutex_exit(&port->fp_mutex);
1314
1315 /*
1316 * All the discovery is initiated and handled by per-port thread.
1317 * Further all the discovery is done in handled in callback mode
1318 * (not polled mode); In a specific case such as this, the discovery
1319 * is required to happen in polled mode. The easiest way out is
1320 * to bail out port thread and get started. Come back and fix this
1321 * to do on demand discovery initiated by ULPs. ULPs such as FCP
1322 * will do on-demand discovery during pre-power-up busctl handling
1323 * which will only be possible when SCSA provides a new HBA vector
1324 * for sending down the PM busctl requests.
1325 */
1326 (void) callb_generic_cpr(&port->fp_cpr_info, CB_CODE_CPR_RESUME);
1327
1328 rval = fp_resume_all(port, FC_CMD_RESUME);
1329 if (rval != DDI_SUCCESS) {
1330 mutex_enter(&port->fp_mutex);
1331 port->fp_soft_state |= FP_SOFT_SUSPEND;
1332 mutex_exit(&port->fp_mutex);
1333 (void) callb_generic_cpr(&port->fp_cpr_info,
1334 CB_CODE_CPR_CHKPT);
1335 }
1336
1337 return (rval);
1338 }
1339
1340 /*
1341 * Perform FC Port power on initialization
1342 */
1343 static int
fp_power_up(fc_local_port_t * port)1344 fp_power_up(fc_local_port_t *port)
1345 {
1346 int rval;
1347
1348 ASSERT(MUTEX_HELD(&port->fp_mutex));
1349
1350 ASSERT((port->fp_soft_state & FP_SOFT_SUSPEND) == 0);
1351 ASSERT(port->fp_soft_state & FP_SOFT_POWER_DOWN);
1352
1353 port->fp_soft_state &= ~FP_SOFT_POWER_DOWN;
1354
1355 mutex_exit(&port->fp_mutex);
1356
1357 rval = fp_resume_all(port, FC_CMD_POWER_UP);
1358 if (rval != DDI_SUCCESS) {
1359 mutex_enter(&port->fp_mutex);
1360 port->fp_soft_state |= FP_SOFT_POWER_DOWN;
1361 } else {
1362 mutex_enter(&port->fp_mutex);
1363 }
1364
1365 return (rval);
1366 }
1367
1368
1369 /*
1370 * It is important to note that the power may possibly be removed between
1371 * SUSPEND and the ensuing RESUME operation. In such a context the underlying
1372 * FC port hardware would have gone through an OFFLINE to ONLINE transition
1373 * (hardware state). In this case, the port driver may need to rediscover the
1374 * topology, perform LOGINs, register with the name server again and perform
1375 * any such port initialization procedures. To perform LOGINs, the driver could
1376 * use the port device handle to see if a LOGIN needs to be performed and use
1377 * the D_ID and WWN in it. The LOGINs may fail (if the hardware is reconfigured
1378 * or removed) which will be reflected in the map the ULPs will see.
1379 */
1380 static int
fp_resume_all(fc_local_port_t * port,fc_attach_cmd_t cmd)1381 fp_resume_all(fc_local_port_t *port, fc_attach_cmd_t cmd)
1382 {
1383
1384 ASSERT(!MUTEX_HELD(&port->fp_mutex));
1385
1386 if (fp_bind_callbacks(port) != DDI_SUCCESS) {
1387 return (DDI_FAILURE);
1388 }
1389
1390 mutex_enter(&port->fp_mutex);
1391
1392 /*
1393 * If there are commands queued for delayed retry, instead of
1394 * working the hard way to figure out which ones are good for
1395 * restart and which ones not (ELSs are definitely not good
1396 * as the port will have to go through a new spin of rediscovery
1397 * now), so just flush them out.
1398 */
1399 if (port->fp_restore & FP_RESTORE_WAIT_TIMEOUT) {
1400 fp_cmd_t *cmd;
1401
1402 port->fp_restore &= ~FP_RESTORE_WAIT_TIMEOUT;
1403
1404 mutex_exit(&port->fp_mutex);
1405 while ((cmd = fp_deque_cmd(port)) != NULL) {
1406 cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_ERROR;
1407 fp_iodone(cmd);
1408 }
1409 mutex_enter(&port->fp_mutex);
1410 }
1411
1412 if (FC_PORT_STATE_MASK(port->fp_bind_state) == FC_STATE_OFFLINE) {
1413 if ((port->fp_restore & FP_RESTORE_OFFLINE_TIMEOUT) ||
1414 port->fp_dev_count) {
1415 port->fp_restore &= ~FP_RESTORE_OFFLINE_TIMEOUT;
1416 port->fp_offline_tid = timeout(fp_offline_timeout,
1417 (caddr_t)port, fp_offline_ticks);
1418 }
1419 if (port->fp_job_head) {
1420 cv_signal(&port->fp_cv);
1421 }
1422 mutex_exit(&port->fp_mutex);
1423 fctl_attach_ulps(port, cmd, &modlinkage);
1424 } else {
1425 struct job_request *job;
1426
1427 /*
1428 * If an OFFLINE timer was running at the time of
1429 * suspending, there is no need to restart it as
1430 * the port is ONLINE now.
1431 */
1432 port->fp_restore &= ~FP_RESTORE_OFFLINE_TIMEOUT;
1433 if (port->fp_statec_busy == 0) {
1434 port->fp_soft_state |= FP_SOFT_IN_STATEC_CB;
1435 }
1436 port->fp_statec_busy++;
1437 mutex_exit(&port->fp_mutex);
1438
1439 job = fctl_alloc_job(JOB_PORT_ONLINE,
1440 JOB_CANCEL_ULP_NOTIFICATION, NULL, NULL, KM_SLEEP);
1441 fctl_enque_job(port, job);
1442
1443 fctl_jobwait(job);
1444 fctl_remove_oldies(port);
1445
1446 fctl_attach_ulps(port, cmd, &modlinkage);
1447 fctl_dealloc_job(job);
1448 }
1449
1450 return (DDI_SUCCESS);
1451 }
1452
1453
1454 /*
1455 * At this time, there shouldn't be any I/O requests on this port.
1456 * But the unsolicited callbacks from the underlying FCA port need
1457 * to be handled very carefully. The steps followed to handle the
1458 * DDI_DETACH are:
1459 * + Grab the port driver mutex, check if the unsolicited
1460 * callback is currently under processing. If true, fail
1461 * the DDI_DETACH request by printing a message; If false
1462 * mark the DDI_DETACH as under progress, so that any
1463 * further unsolicited callbacks get bounced.
1464 * + Perform PRLO/LOGO if necessary, cleanup all the data
1465 * structures.
1466 * + Get the job_handler thread to gracefully exit.
1467 * + Unregister callbacks with the FCA port.
1468 * + Now that some peace is found, notify all the ULPs of
1469 * DDI_DETACH request (using ulp_port_detach entry point)
1470 * + Free all mutexes, semaphores, conditional variables.
1471 * + Free the soft state, return success.
1472 *
1473 * Important considerations:
1474 * Port driver de-registers state change and unsolicited
1475 * callbacks before taking up the task of notifying ULPs
1476 * and performing PRLO and LOGOs.
1477 *
1478 * A port may go offline at the time PRLO/LOGO is being
1479 * requested. It is expected of all FCA drivers to fail
1480 * such requests either immediately with a FC_OFFLINE
1481 * return code to fc_fca_transport() or return the packet
1482 * asynchronously with pkt state set to FC_PKT_PORT_OFFLINE
1483 */
1484 static int
fp_detach_handler(fc_local_port_t * port)1485 fp_detach_handler(fc_local_port_t *port)
1486 {
1487 job_request_t *job;
1488 uint32_t delay_count;
1489 fc_orphan_t *orp, *tmporp;
1490
1491 /*
1492 * In a Fabric topology with many host ports connected to
1493 * a switch, another detaching instance of fp might have
1494 * triggered a LOGO (which is an unsolicited request to
1495 * this instance). So in order to be able to successfully
1496 * detach by taking care of such cases a delay of about
1497 * 30 seconds is introduced.
1498 */
1499 delay_count = 0;
1500 mutex_enter(&port->fp_mutex);
1501 if (port->fp_out_fpcmds != 0) {
1502 /*
1503 * At this time we can only check fp internal commands, because
1504 * sd/ssd/scsi_vhci should have finsihed all their commands,
1505 * fcp/fcip/fcsm should have finished all their commands.
1506 *
1507 * It seems that all fp internal commands are asynchronous now.
1508 */
1509 port->fp_soft_state &= ~FP_DETACH_INPROGRESS;
1510 mutex_exit(&port->fp_mutex);
1511
1512 cmn_err(CE_WARN, "fp(%d): %d fp_cmd(s) is/are in progress"
1513 " Failing detach", port->fp_instance, port->fp_out_fpcmds);
1514 return (DDI_FAILURE);
1515 }
1516
1517 while ((port->fp_soft_state &
1518 (FP_SOFT_IN_STATEC_CB | FP_SOFT_IN_UNSOL_CB)) &&
1519 (delay_count < 30)) {
1520 mutex_exit(&port->fp_mutex);
1521 delay_count++;
1522 delay(drv_usectohz(1000000));
1523 mutex_enter(&port->fp_mutex);
1524 }
1525
1526 if (port->fp_soft_state &
1527 (FP_SOFT_IN_STATEC_CB | FP_SOFT_IN_UNSOL_CB)) {
1528 port->fp_soft_state &= ~FP_DETACH_INPROGRESS;
1529 mutex_exit(&port->fp_mutex);
1530
1531 cmn_err(CE_WARN, "fp(%d): FCA callback in progress: "
1532 " Failing detach", port->fp_instance);
1533 return (DDI_FAILURE);
1534 }
1535
1536 port->fp_soft_state |= FP_SOFT_IN_DETACH;
1537 port->fp_soft_state &= ~FP_DETACH_INPROGRESS;
1538 mutex_exit(&port->fp_mutex);
1539
1540 /*
1541 * If we're powered down, we need to raise power prior to submitting
1542 * the JOB_PORT_SHUTDOWN job. Otherwise, the job handler will never
1543 * process the shutdown job.
1544 */
1545 if (fctl_busy_port(port) != 0) {
1546 cmn_err(CE_WARN, "fp(%d): fctl_busy_port failed",
1547 port->fp_instance);
1548 mutex_enter(&port->fp_mutex);
1549 port->fp_soft_state &= ~FP_SOFT_IN_DETACH;
1550 mutex_exit(&port->fp_mutex);
1551 return (DDI_FAILURE);
1552 }
1553
1554 /*
1555 * This will deallocate data structs and cause the "job" thread
1556 * to exit, in preparation for DDI_DETACH on the instance.
1557 * This can sleep for an arbitrary duration, since it waits for
1558 * commands over the wire, timeout(9F) callbacks, etc.
1559 *
1560 * CAUTION: There is still a race here, where the "job" thread
1561 * can still be executing code even tho the fctl_jobwait() call
1562 * below has returned to us. In theory the fp driver could even be
1563 * modunloaded even tho the job thread isn't done executing.
1564 * without creating the race condition.
1565 */
1566 job = fctl_alloc_job(JOB_PORT_SHUTDOWN, 0, NULL,
1567 (opaque_t)port, KM_SLEEP);
1568 fctl_enque_job(port, job);
1569 fctl_jobwait(job);
1570 fctl_dealloc_job(job);
1571
1572
1573 (void) pm_lower_power(port->fp_port_dip, FP_PM_COMPONENT,
1574 FP_PM_PORT_DOWN);
1575
1576 if (port->fp_taskq) {
1577 taskq_destroy(port->fp_taskq);
1578 }
1579
1580 ddi_prop_remove_all(port->fp_port_dip);
1581
1582 ddi_remove_minor_node(port->fp_port_dip, NULL);
1583
1584 fctl_remove_port(port);
1585
1586 fp_free_pkt(port->fp_els_resp_pkt);
1587
1588 if (port->fp_ub_tokens) {
1589 if (fc_ulp_ubfree(port, port->fp_ub_count,
1590 port->fp_ub_tokens) != FC_SUCCESS) {
1591 cmn_err(CE_WARN, "fp(%d): couldn't free "
1592 " unsolicited buffers", port->fp_instance);
1593 }
1594 kmem_free(port->fp_ub_tokens,
1595 sizeof (*port->fp_ub_tokens) * port->fp_ub_count);
1596 port->fp_ub_tokens = NULL;
1597 }
1598
1599 if (port->fp_pkt_cache != NULL) {
1600 kmem_cache_destroy(port->fp_pkt_cache);
1601 }
1602
1603 port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle);
1604
1605 mutex_enter(&port->fp_mutex);
1606 if (port->fp_did_table) {
1607 kmem_free(port->fp_did_table, did_table_size *
1608 sizeof (struct d_id_hash));
1609 }
1610
1611 if (port->fp_pwwn_table) {
1612 kmem_free(port->fp_pwwn_table, pwwn_table_size *
1613 sizeof (struct pwwn_hash));
1614 }
1615 orp = port->fp_orphan_list;
1616 while (orp) {
1617 tmporp = orp;
1618 orp = orp->orp_next;
1619 kmem_free(tmporp, sizeof (*orp));
1620 }
1621
1622 mutex_exit(&port->fp_mutex);
1623
1624 fp_log_port_event(port, ESC_SUNFC_PORT_DETACH);
1625
1626 mutex_destroy(&port->fp_mutex);
1627 cv_destroy(&port->fp_attach_cv);
1628 cv_destroy(&port->fp_cv);
1629 ddi_soft_state_free(fp_driver_softstate, port->fp_instance);
1630
1631 return (DDI_SUCCESS);
1632 }
1633
1634
1635 /*
1636 * Steps to perform DDI_SUSPEND operation on a FC port
1637 *
1638 * - If already suspended return DDI_FAILURE
1639 * - If already power-suspended return DDI_SUCCESS
1640 * - If an unsolicited callback or state change handling is in
1641 * in progress, throw a warning message, return DDI_FAILURE
1642 * - Cancel timeouts
1643 * - SUSPEND the job_handler thread (means do nothing as it is
1644 * taken care of by the CPR frame work)
1645 */
1646 static int
fp_suspend_handler(fc_local_port_t * port)1647 fp_suspend_handler(fc_local_port_t *port)
1648 {
1649 uint32_t delay_count;
1650
1651 mutex_enter(&port->fp_mutex);
1652
1653 /*
1654 * The following should never happen, but
1655 * let the driver be more defensive here
1656 */
1657 if (port->fp_soft_state & FP_SOFT_SUSPEND) {
1658 mutex_exit(&port->fp_mutex);
1659 return (DDI_FAILURE);
1660 }
1661
1662 /*
1663 * If the port is already power suspended, there
1664 * is nothing else to do, So return DDI_SUCCESS,
1665 * but mark the SUSPEND bit in the soft state
1666 * before leaving.
1667 */
1668 if (port->fp_soft_state & FP_SOFT_POWER_DOWN) {
1669 port->fp_soft_state |= FP_SOFT_SUSPEND;
1670 mutex_exit(&port->fp_mutex);
1671 return (DDI_SUCCESS);
1672 }
1673
1674 /*
1675 * Check if an unsolicited callback or state change handling is
1676 * in progress. If true, fail the suspend operation; also throw
1677 * a warning message notifying the failure. Note that Sun PCI
1678 * hotplug spec recommends messages in cases of failure (but
1679 * not flooding the console)
1680 *
1681 * Busy waiting for a short interval (500 millisecond ?) to see
1682 * if the callback processing completes may be another idea. Since
1683 * most of the callback processing involves a lot of work, it
1684 * is safe to just fail the SUSPEND operation. It is definitely
1685 * not bad to fail the SUSPEND operation if the driver is busy.
1686 */
1687 delay_count = 0;
1688 while ((port->fp_soft_state & (FP_SOFT_IN_STATEC_CB |
1689 FP_SOFT_IN_UNSOL_CB)) && (delay_count < 30)) {
1690 mutex_exit(&port->fp_mutex);
1691 delay_count++;
1692 delay(drv_usectohz(1000000));
1693 mutex_enter(&port->fp_mutex);
1694 }
1695
1696 if (port->fp_soft_state & (FP_SOFT_IN_STATEC_CB |
1697 FP_SOFT_IN_UNSOL_CB)) {
1698 mutex_exit(&port->fp_mutex);
1699 cmn_err(CE_WARN, "fp(%d): FCA callback in progress: "
1700 " Failing suspend", port->fp_instance);
1701 return (DDI_FAILURE);
1702 }
1703
1704 /*
1705 * Check of FC port thread is busy
1706 */
1707 if (port->fp_job_head) {
1708 mutex_exit(&port->fp_mutex);
1709 FP_TRACE(FP_NHEAD2(9, 0),
1710 "FC port thread is busy: Failing suspend");
1711 return (DDI_FAILURE);
1712 }
1713 port->fp_soft_state |= FP_SOFT_SUSPEND;
1714
1715 fp_suspend_all(port);
1716 mutex_exit(&port->fp_mutex);
1717
1718 return (DDI_SUCCESS);
1719 }
1720
1721
1722 /*
1723 * Prepare for graceful power down of a FC port
1724 */
1725 static int
fp_power_down(fc_local_port_t * port)1726 fp_power_down(fc_local_port_t *port)
1727 {
1728 ASSERT(MUTEX_HELD(&port->fp_mutex));
1729
1730 /*
1731 * Power down request followed by a DDI_SUSPEND should
1732 * never happen; If it does return DDI_SUCCESS
1733 */
1734 if (port->fp_soft_state & FP_SOFT_SUSPEND) {
1735 port->fp_soft_state |= FP_SOFT_POWER_DOWN;
1736 return (DDI_SUCCESS);
1737 }
1738
1739 /*
1740 * If the port is already power suspended, there
1741 * is nothing else to do, So return DDI_SUCCESS,
1742 */
1743 if (port->fp_soft_state & FP_SOFT_POWER_DOWN) {
1744 return (DDI_SUCCESS);
1745 }
1746
1747 /*
1748 * Check if an unsolicited callback or state change handling
1749 * is in progress. If true, fail the PM suspend operation.
1750 * But don't print a message unless the verbosity of the
1751 * driver desires otherwise.
1752 */
1753 if ((port->fp_soft_state & FP_SOFT_IN_STATEC_CB) ||
1754 (port->fp_soft_state & FP_SOFT_IN_UNSOL_CB)) {
1755 FP_TRACE(FP_NHEAD2(9, 0),
1756 "Unsolicited callback in progress: Failing power down");
1757 return (DDI_FAILURE);
1758 }
1759
1760 /*
1761 * Check of FC port thread is busy
1762 */
1763 if (port->fp_job_head) {
1764 FP_TRACE(FP_NHEAD2(9, 0),
1765 "FC port thread is busy: Failing power down");
1766 return (DDI_FAILURE);
1767 }
1768 port->fp_soft_state |= FP_SOFT_POWER_DOWN;
1769
1770 /*
1771 * check if the ULPs are ready for power down
1772 */
1773 mutex_exit(&port->fp_mutex);
1774 if (fctl_detach_ulps(port, FC_CMD_POWER_DOWN,
1775 &modlinkage) != FC_SUCCESS) {
1776 mutex_enter(&port->fp_mutex);
1777 port->fp_soft_state &= ~FP_SOFT_POWER_DOWN;
1778 mutex_exit(&port->fp_mutex);
1779
1780 /*
1781 * Power back up the obedient ULPs that went down
1782 */
1783 fp_attach_ulps(port, FC_CMD_POWER_UP);
1784
1785 FP_TRACE(FP_NHEAD2(9, 0),
1786 "ULP(s) busy, detach_ulps failed. Failing power down");
1787 mutex_enter(&port->fp_mutex);
1788 return (DDI_FAILURE);
1789 }
1790 mutex_enter(&port->fp_mutex);
1791
1792 fp_suspend_all(port);
1793
1794 return (DDI_SUCCESS);
1795 }
1796
1797
1798 /*
1799 * Suspend the entire FC port
1800 */
1801 static void
fp_suspend_all(fc_local_port_t * port)1802 fp_suspend_all(fc_local_port_t *port)
1803 {
1804 int index;
1805 struct pwwn_hash *head;
1806 fc_remote_port_t *pd;
1807
1808 ASSERT(MUTEX_HELD(&port->fp_mutex));
1809
1810 if (port->fp_wait_tid != 0) {
1811 timeout_id_t tid;
1812
1813 tid = port->fp_wait_tid;
1814 port->fp_wait_tid = (timeout_id_t)NULL;
1815 mutex_exit(&port->fp_mutex);
1816 (void) untimeout(tid);
1817 mutex_enter(&port->fp_mutex);
1818 port->fp_restore |= FP_RESTORE_WAIT_TIMEOUT;
1819 }
1820
1821 if (port->fp_offline_tid) {
1822 timeout_id_t tid;
1823
1824 tid = port->fp_offline_tid;
1825 port->fp_offline_tid = (timeout_id_t)NULL;
1826 mutex_exit(&port->fp_mutex);
1827 (void) untimeout(tid);
1828 mutex_enter(&port->fp_mutex);
1829 port->fp_restore |= FP_RESTORE_OFFLINE_TIMEOUT;
1830 }
1831 mutex_exit(&port->fp_mutex);
1832 port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle);
1833 mutex_enter(&port->fp_mutex);
1834
1835 /*
1836 * Mark all devices as OLD, and reset the LOGIN state as well
1837 * (this will force the ULPs to perform a LOGIN after calling
1838 * fc_portgetmap() during RESUME/PM_RESUME)
1839 */
1840 for (index = 0; index < pwwn_table_size; index++) {
1841 head = &port->fp_pwwn_table[index];
1842 pd = head->pwwn_head;
1843 while (pd != NULL) {
1844 mutex_enter(&pd->pd_mutex);
1845 fp_remote_port_offline(pd);
1846 fctl_delist_did_table(port, pd);
1847 pd->pd_state = PORT_DEVICE_VALID;
1848 pd->pd_login_count = 0;
1849 mutex_exit(&pd->pd_mutex);
1850 pd = pd->pd_wwn_hnext;
1851 }
1852 }
1853 }
1854
1855
1856 /*
1857 * fp_cache_constructor: Constructor function for kmem_cache_create(9F).
1858 * Performs intializations for fc_packet_t structs.
1859 * Returns 0 for success or -1 for failure.
1860 *
1861 * This function allocates DMA handles for both command and responses.
1862 * Most of the ELSs used have both command and responses so it is strongly
1863 * desired to move them to cache constructor routine.
1864 *
1865 * Context: Can sleep iff called with KM_SLEEP flag.
1866 */
1867 static int
fp_cache_constructor(void * buf,void * cdarg,int kmflags)1868 fp_cache_constructor(void *buf, void *cdarg, int kmflags)
1869 {
1870 int (*cb) (caddr_t);
1871 fc_packet_t *pkt;
1872 fp_cmd_t *cmd = (fp_cmd_t *)buf;
1873 fc_local_port_t *port = (fc_local_port_t *)cdarg;
1874
1875 cb = (kmflags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
1876
1877 cmd->cmd_next = NULL;
1878 cmd->cmd_flags = 0;
1879 cmd->cmd_dflags = 0;
1880 cmd->cmd_job = NULL;
1881 cmd->cmd_port = port;
1882 pkt = &cmd->cmd_pkt;
1883
1884 if (!(port->fp_soft_state & FP_SOFT_FCA_IS_NODMA)) {
1885 if (ddi_dma_alloc_handle(port->fp_fca_dip,
1886 port->fp_fca_tran->fca_dma_attr, cb, NULL,
1887 &pkt->pkt_cmd_dma) != DDI_SUCCESS) {
1888 return (-1);
1889 }
1890
1891 if (ddi_dma_alloc_handle(port->fp_fca_dip,
1892 port->fp_fca_tran->fca_dma_attr, cb, NULL,
1893 &pkt->pkt_resp_dma) != DDI_SUCCESS) {
1894 ddi_dma_free_handle(&pkt->pkt_cmd_dma);
1895 return (-1);
1896 }
1897 } else {
1898 pkt->pkt_cmd_dma = 0;
1899 pkt->pkt_resp_dma = 0;
1900 }
1901
1902 pkt->pkt_cmd_acc = pkt->pkt_resp_acc = NULL;
1903 pkt->pkt_cmd_cookie_cnt = pkt->pkt_resp_cookie_cnt =
1904 pkt->pkt_data_cookie_cnt = 0;
1905 pkt->pkt_cmd_cookie = pkt->pkt_resp_cookie =
1906 pkt->pkt_data_cookie = NULL;
1907 pkt->pkt_fca_private = (caddr_t)buf + sizeof (fp_cmd_t);
1908
1909 return (0);
1910 }
1911
1912
1913 /*
1914 * fp_cache_destructor: Destructor function for kmem_cache_create().
1915 * Performs un-intializations for fc_packet_t structs.
1916 */
1917 /* ARGSUSED */
1918 static void
fp_cache_destructor(void * buf,void * cdarg)1919 fp_cache_destructor(void *buf, void *cdarg)
1920 {
1921 fp_cmd_t *cmd = (fp_cmd_t *)buf;
1922 fc_packet_t *pkt;
1923
1924 pkt = &cmd->cmd_pkt;
1925 if (pkt->pkt_cmd_dma) {
1926 ddi_dma_free_handle(&pkt->pkt_cmd_dma);
1927 }
1928
1929 if (pkt->pkt_resp_dma) {
1930 ddi_dma_free_handle(&pkt->pkt_resp_dma);
1931 }
1932 }
1933
1934
1935 /*
1936 * Packet allocation for ELS and any other port driver commands
1937 *
1938 * Some ELSs like FLOGI and PLOGI are critical for topology and
1939 * device discovery and a system's inability to allocate memory
1940 * or DVMA resources while performing some of these critical ELSs
1941 * cause a lot of problem. While memory allocation failures are
1942 * rare, DVMA resource failures are common as the applications
1943 * are becoming more and more powerful on huge servers. So it
1944 * is desirable to have a framework support to reserve a fragment
1945 * of DVMA. So until this is fixed the correct way, the suffering
1946 * is huge whenever a LIP happens at a time DVMA resources are
1947 * drained out completely - So an attempt needs to be made to
1948 * KM_SLEEP while requesting for these resources, hoping that
1949 * the requests won't hang forever.
1950 *
1951 * The fc_remote_port_t argument is stored into the pkt_pd field in the
1952 * fc_packet_t struct prior to the fc_ulp_init_packet() call. This
1953 * ensures that the pd_ref_count for the fc_remote_port_t is valid.
1954 * If there is no fc_remote_port_t associated with the fc_packet_t, then
1955 * fp_alloc_pkt() must be called with pd set to NULL.
1956 *
1957 * fp/fctl will resue fp_cmd_t somewhere, and change pkt_cmdlen/rsplen,
1958 * actually, it's a design fault. But there's no problem for physical
1959 * FCAs. But it will cause memory leak or panic for virtual FCAs like fcoei.
1960 *
1961 * For FCAs that don't support DMA, such as fcoei, we will use
1962 * pkt_fctl_rsvd1/rsvd2 to keep the real cmd_len/resp_len.
1963 */
1964
1965 static fp_cmd_t *
fp_alloc_pkt(fc_local_port_t * port,int cmd_len,int resp_len,int kmflags,fc_remote_port_t * pd)1966 fp_alloc_pkt(fc_local_port_t *port, int cmd_len, int resp_len, int kmflags,
1967 fc_remote_port_t *pd)
1968 {
1969 int rval;
1970 ulong_t real_len;
1971 fp_cmd_t *cmd;
1972 fc_packet_t *pkt;
1973 int (*cb) (caddr_t);
1974 ddi_dma_cookie_t pkt_cookie;
1975 ddi_dma_cookie_t *cp;
1976 uint32_t cnt;
1977
1978 ASSERT(!MUTEX_HELD(&port->fp_mutex));
1979
1980 cb = (kmflags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
1981
1982 cmd = (fp_cmd_t *)kmem_cache_alloc(port->fp_pkt_cache, kmflags);
1983 if (cmd == NULL) {
1984 return (cmd);
1985 }
1986
1987 cmd->cmd_ulp_pkt = NULL;
1988 cmd->cmd_flags = 0;
1989 pkt = &cmd->cmd_pkt;
1990 ASSERT(cmd->cmd_dflags == 0);
1991
1992 pkt->pkt_datalen = 0;
1993 pkt->pkt_data = NULL;
1994 pkt->pkt_state = 0;
1995 pkt->pkt_action = 0;
1996 pkt->pkt_reason = 0;
1997 pkt->pkt_expln = 0;
1998 pkt->pkt_cmd = NULL;
1999 pkt->pkt_resp = NULL;
2000 pkt->pkt_fctl_rsvd1 = NULL;
2001 pkt->pkt_fctl_rsvd2 = NULL;
2002
2003 /*
2004 * Init pkt_pd with the given pointer; this must be done _before_
2005 * the call to fc_ulp_init_packet().
2006 */
2007 pkt->pkt_pd = pd;
2008
2009 /* Now call the FCA driver to init its private, per-packet fields */
2010 if (fc_ulp_init_packet((opaque_t)port, pkt, kmflags) != FC_SUCCESS) {
2011 goto alloc_pkt_failed;
2012 }
2013
2014 if (cmd_len && !(port->fp_soft_state & FP_SOFT_FCA_IS_NODMA)) {
2015 ASSERT(pkt->pkt_cmd_dma != NULL);
2016
2017 rval = ddi_dma_mem_alloc(pkt->pkt_cmd_dma, cmd_len,
2018 port->fp_fca_tran->fca_acc_attr, DDI_DMA_CONSISTENT,
2019 cb, NULL, (caddr_t *)&pkt->pkt_cmd, &real_len,
2020 &pkt->pkt_cmd_acc);
2021
2022 if (rval != DDI_SUCCESS) {
2023 goto alloc_pkt_failed;
2024 }
2025 cmd->cmd_dflags |= FP_CMD_VALID_DMA_MEM;
2026
2027 if (real_len < cmd_len) {
2028 goto alloc_pkt_failed;
2029 }
2030
2031 rval = ddi_dma_addr_bind_handle(pkt->pkt_cmd_dma, NULL,
2032 pkt->pkt_cmd, real_len, DDI_DMA_WRITE |
2033 DDI_DMA_CONSISTENT, cb, NULL,
2034 &pkt_cookie, &pkt->pkt_cmd_cookie_cnt);
2035
2036 if (rval != DDI_DMA_MAPPED) {
2037 goto alloc_pkt_failed;
2038 }
2039
2040 cmd->cmd_dflags |= FP_CMD_VALID_DMA_BIND;
2041
2042 if (pkt->pkt_cmd_cookie_cnt >
2043 port->fp_fca_tran->fca_dma_attr->dma_attr_sgllen) {
2044 goto alloc_pkt_failed;
2045 }
2046
2047 ASSERT(pkt->pkt_cmd_cookie_cnt != 0);
2048
2049 cp = pkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc(
2050 pkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie),
2051 KM_NOSLEEP);
2052
2053 if (cp == NULL) {
2054 goto alloc_pkt_failed;
2055 }
2056
2057 *cp = pkt_cookie;
2058 cp++;
2059 for (cnt = 1; cnt < pkt->pkt_cmd_cookie_cnt; cnt++, cp++) {
2060 ddi_dma_nextcookie(pkt->pkt_cmd_dma, &pkt_cookie);
2061 *cp = pkt_cookie;
2062 }
2063 } else if (cmd_len != 0) {
2064 pkt->pkt_cmd = kmem_alloc(cmd_len, KM_SLEEP);
2065 pkt->pkt_fctl_rsvd1 = (opaque_t)(uintptr_t)cmd_len;
2066 }
2067
2068 if (resp_len && !(port->fp_soft_state & FP_SOFT_FCA_IS_NODMA)) {
2069 ASSERT(pkt->pkt_resp_dma != NULL);
2070
2071 rval = ddi_dma_mem_alloc(pkt->pkt_resp_dma, resp_len,
2072 port->fp_fca_tran->fca_acc_attr,
2073 DDI_DMA_CONSISTENT, cb, NULL,
2074 (caddr_t *)&pkt->pkt_resp, &real_len,
2075 &pkt->pkt_resp_acc);
2076
2077 if (rval != DDI_SUCCESS) {
2078 goto alloc_pkt_failed;
2079 }
2080 cmd->cmd_dflags |= FP_RESP_VALID_DMA_MEM;
2081
2082 if (real_len < resp_len) {
2083 goto alloc_pkt_failed;
2084 }
2085
2086 rval = ddi_dma_addr_bind_handle(pkt->pkt_resp_dma, NULL,
2087 pkt->pkt_resp, real_len, DDI_DMA_READ |
2088 DDI_DMA_CONSISTENT, cb, NULL,
2089 &pkt_cookie, &pkt->pkt_resp_cookie_cnt);
2090
2091 if (rval != DDI_DMA_MAPPED) {
2092 goto alloc_pkt_failed;
2093 }
2094
2095 cmd->cmd_dflags |= FP_RESP_VALID_DMA_BIND;
2096
2097 if (pkt->pkt_resp_cookie_cnt >
2098 port->fp_fca_tran->fca_dma_attr->dma_attr_sgllen) {
2099 goto alloc_pkt_failed;
2100 }
2101
2102 ASSERT(pkt->pkt_cmd_cookie_cnt != 0);
2103
2104 cp = pkt->pkt_resp_cookie = (ddi_dma_cookie_t *)kmem_alloc(
2105 pkt->pkt_resp_cookie_cnt * sizeof (pkt_cookie),
2106 KM_NOSLEEP);
2107
2108 if (cp == NULL) {
2109 goto alloc_pkt_failed;
2110 }
2111
2112 *cp = pkt_cookie;
2113 cp++;
2114 for (cnt = 1; cnt < pkt->pkt_resp_cookie_cnt; cnt++, cp++) {
2115 ddi_dma_nextcookie(pkt->pkt_resp_dma, &pkt_cookie);
2116 *cp = pkt_cookie;
2117 }
2118 } else if (resp_len != 0) {
2119 pkt->pkt_resp = kmem_alloc(resp_len, KM_SLEEP);
2120 pkt->pkt_fctl_rsvd2 = (opaque_t)(uintptr_t)resp_len;
2121 }
2122
2123 pkt->pkt_cmdlen = cmd_len;
2124 pkt->pkt_rsplen = resp_len;
2125 pkt->pkt_ulp_private = cmd;
2126
2127 return (cmd);
2128
2129 alloc_pkt_failed:
2130
2131 fp_free_dma(cmd);
2132
2133 if (pkt->pkt_cmd_cookie != NULL) {
2134 kmem_free(pkt->pkt_cmd_cookie,
2135 pkt->pkt_cmd_cookie_cnt * sizeof (ddi_dma_cookie_t));
2136 pkt->pkt_cmd_cookie = NULL;
2137 }
2138
2139 if (pkt->pkt_resp_cookie != NULL) {
2140 kmem_free(pkt->pkt_resp_cookie,
2141 pkt->pkt_resp_cookie_cnt * sizeof (ddi_dma_cookie_t));
2142 pkt->pkt_resp_cookie = NULL;
2143 }
2144
2145 if (port->fp_soft_state & FP_SOFT_FCA_IS_NODMA) {
2146 if (pkt->pkt_cmd) {
2147 kmem_free(pkt->pkt_cmd, cmd_len);
2148 }
2149
2150 if (pkt->pkt_resp) {
2151 kmem_free(pkt->pkt_resp, resp_len);
2152 }
2153 }
2154
2155 kmem_cache_free(port->fp_pkt_cache, cmd);
2156
2157 return (NULL);
2158 }
2159
2160
2161 /*
2162 * Free FC packet
2163 */
2164 static void
fp_free_pkt(fp_cmd_t * cmd)2165 fp_free_pkt(fp_cmd_t *cmd)
2166 {
2167 fc_local_port_t *port;
2168 fc_packet_t *pkt;
2169
2170 ASSERT(!MUTEX_HELD(&cmd->cmd_port->fp_mutex));
2171
2172 cmd->cmd_next = NULL;
2173 cmd->cmd_job = NULL;
2174 pkt = &cmd->cmd_pkt;
2175 pkt->pkt_ulp_private = 0;
2176 pkt->pkt_tran_flags = 0;
2177 pkt->pkt_tran_type = 0;
2178 port = cmd->cmd_port;
2179
2180 if (pkt->pkt_cmd_cookie != NULL) {
2181 kmem_free(pkt->pkt_cmd_cookie, pkt->pkt_cmd_cookie_cnt *
2182 sizeof (ddi_dma_cookie_t));
2183 pkt->pkt_cmd_cookie = NULL;
2184 }
2185
2186 if (pkt->pkt_resp_cookie != NULL) {
2187 kmem_free(pkt->pkt_resp_cookie, pkt->pkt_resp_cookie_cnt *
2188 sizeof (ddi_dma_cookie_t));
2189 pkt->pkt_resp_cookie = NULL;
2190 }
2191
2192 if (port->fp_soft_state & FP_SOFT_FCA_IS_NODMA) {
2193 if (pkt->pkt_cmd) {
2194 kmem_free(pkt->pkt_cmd,
2195 (uint32_t)(uintptr_t)pkt->pkt_fctl_rsvd1);
2196 }
2197
2198 if (pkt->pkt_resp) {
2199 kmem_free(pkt->pkt_resp,
2200 (uint32_t)(uintptr_t)pkt->pkt_fctl_rsvd2);
2201 }
2202 }
2203
2204 fp_free_dma(cmd);
2205 (void) fc_ulp_uninit_packet((opaque_t)port, pkt);
2206 kmem_cache_free(port->fp_pkt_cache, (void *)cmd);
2207 }
2208
2209
2210 /*
2211 * Release DVMA resources
2212 */
2213 static void
fp_free_dma(fp_cmd_t * cmd)2214 fp_free_dma(fp_cmd_t *cmd)
2215 {
2216 fc_packet_t *pkt = &cmd->cmd_pkt;
2217
2218 pkt->pkt_cmdlen = 0;
2219 pkt->pkt_rsplen = 0;
2220 pkt->pkt_tran_type = 0;
2221 pkt->pkt_tran_flags = 0;
2222
2223 if (cmd->cmd_dflags & FP_CMD_VALID_DMA_BIND) {
2224 (void) ddi_dma_unbind_handle(pkt->pkt_cmd_dma);
2225 }
2226
2227 if (cmd->cmd_dflags & FP_CMD_VALID_DMA_MEM) {
2228 if (pkt->pkt_cmd_acc) {
2229 ddi_dma_mem_free(&pkt->pkt_cmd_acc);
2230 }
2231 }
2232
2233 if (cmd->cmd_dflags & FP_RESP_VALID_DMA_BIND) {
2234 (void) ddi_dma_unbind_handle(pkt->pkt_resp_dma);
2235 }
2236
2237 if (cmd->cmd_dflags & FP_RESP_VALID_DMA_MEM) {
2238 if (pkt->pkt_resp_acc) {
2239 ddi_dma_mem_free(&pkt->pkt_resp_acc);
2240 }
2241 }
2242 cmd->cmd_dflags = 0;
2243 }
2244
2245
2246 /*
2247 * Dedicated thread to perform various activities. One thread for
2248 * each fc_local_port_t (driver soft state) instance.
2249 * Note, this effectively works out to one thread for each local
2250 * port, but there are also some Solaris taskq threads in use on a per-local
2251 * port basis; these also need to be taken into consideration.
2252 */
2253 static void
fp_job_handler(fc_local_port_t * port)2254 fp_job_handler(fc_local_port_t *port)
2255 {
2256 int rval;
2257 uint32_t *d_id;
2258 fc_remote_port_t *pd;
2259 job_request_t *job;
2260
2261 #ifndef __lock_lint
2262 /*
2263 * Solaris-internal stuff for proper operation of kernel threads
2264 * with Solaris CPR.
2265 */
2266 CALLB_CPR_INIT(&port->fp_cpr_info, &port->fp_mutex,
2267 callb_generic_cpr, "fp_job_handler");
2268 #endif
2269
2270
2271 /* Loop forever waiting for work to do */
2272 for (;;) {
2273
2274 mutex_enter(&port->fp_mutex);
2275
2276 /*
2277 * Sleep if no work to do right now, or if we want
2278 * to suspend or power-down.
2279 */
2280 while (port->fp_job_head == NULL ||
2281 (port->fp_soft_state & (FP_SOFT_POWER_DOWN |
2282 FP_SOFT_SUSPEND))) {
2283 CALLB_CPR_SAFE_BEGIN(&port->fp_cpr_info);
2284 cv_wait(&port->fp_cv, &port->fp_mutex);
2285 CALLB_CPR_SAFE_END(&port->fp_cpr_info, &port->fp_mutex);
2286 }
2287
2288 /*
2289 * OK, we've just been woken up, so retrieve the next entry
2290 * from the head of the job queue for this local port.
2291 */
2292 job = fctl_deque_job(port);
2293
2294 /*
2295 * Handle all the fp driver's supported job codes here
2296 * in this big honkin' switch.
2297 */
2298 switch (job->job_code) {
2299 case JOB_PORT_SHUTDOWN:
2300 /*
2301 * fp_port_shutdown() is only called from here. This
2302 * will prepare the local port instance (softstate)
2303 * for detaching. This cancels timeout callbacks,
2304 * executes LOGOs with remote ports, cleans up tables,
2305 * and deallocates data structs.
2306 */
2307 fp_port_shutdown(port, job);
2308
2309 /*
2310 * This will exit the job thread.
2311 */
2312 #ifndef __lock_lint
2313 CALLB_CPR_EXIT(&(port->fp_cpr_info));
2314 #else
2315 mutex_exit(&port->fp_mutex);
2316 #endif
2317 fctl_jobdone(job);
2318 thread_exit();
2319
2320 /* NOTREACHED */
2321
2322 case JOB_ATTACH_ULP: {
2323 /*
2324 * This job is spawned in response to a ULP calling
2325 * fc_ulp_add().
2326 */
2327
2328 boolean_t do_attach_ulps = B_TRUE;
2329
2330 /*
2331 * If fp is detaching, we don't want to call
2332 * fp_startup_done as this asynchronous
2333 * notification may interfere with the re-attach.
2334 */
2335
2336 if (port->fp_soft_state & (FP_DETACH_INPROGRESS |
2337 FP_SOFT_IN_DETACH | FP_DETACH_FAILED)) {
2338 do_attach_ulps = B_FALSE;
2339 } else {
2340 /*
2341 * We are going to force the transport
2342 * to attach to the ULPs, so set
2343 * fp_ulp_attach. This will keep any
2344 * potential detach from occurring until
2345 * we are done.
2346 */
2347 port->fp_ulp_attach = 1;
2348 }
2349
2350 mutex_exit(&port->fp_mutex);
2351
2352 /*
2353 * NOTE: Since we just dropped the mutex, there is now
2354 * a race window where the fp_soft_state check above
2355 * could change here. This race is covered because an
2356 * additional check was added in the functions hidden
2357 * under fp_startup_done().
2358 */
2359 if (do_attach_ulps == B_TRUE) {
2360 /*
2361 * This goes thru a bit of a convoluted call
2362 * chain before spawning off a DDI taskq
2363 * request to perform the actual attach
2364 * operations. Blocking can occur at a number
2365 * of points.
2366 */
2367 fp_startup_done((opaque_t)port, FC_PKT_SUCCESS);
2368 }
2369 job->job_result = FC_SUCCESS;
2370 fctl_jobdone(job);
2371 break;
2372 }
2373
2374 case JOB_ULP_NOTIFY: {
2375 /*
2376 * Pass state change notifications up to any/all
2377 * registered ULPs.
2378 */
2379 uint32_t statec;
2380
2381 statec = job->job_ulp_listlen;
2382 if (statec == FC_STATE_RESET_REQUESTED) {
2383 port->fp_last_task = port->fp_task;
2384 port->fp_task = FP_TASK_OFFLINE;
2385 fp_port_offline(port, 0);
2386 port->fp_task = port->fp_last_task;
2387 port->fp_last_task = FP_TASK_IDLE;
2388 }
2389
2390 if (--port->fp_statec_busy == 0) {
2391 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB;
2392 }
2393
2394 mutex_exit(&port->fp_mutex);
2395
2396 job->job_result = fp_ulp_notify(port, statec, KM_SLEEP);
2397 fctl_jobdone(job);
2398 break;
2399 }
2400
2401 case JOB_PLOGI_ONE:
2402 /*
2403 * Issue a PLOGI to a single remote port. Multiple
2404 * PLOGIs to different remote ports may occur in
2405 * parallel.
2406 * This can create the fc_remote_port_t if it does not
2407 * already exist.
2408 */
2409
2410 mutex_exit(&port->fp_mutex);
2411 d_id = (uint32_t *)job->job_private;
2412 pd = fctl_get_remote_port_by_did(port, *d_id);
2413
2414 if (pd) {
2415 mutex_enter(&pd->pd_mutex);
2416 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) {
2417 pd->pd_login_count++;
2418 mutex_exit(&pd->pd_mutex);
2419 job->job_result = FC_SUCCESS;
2420 fctl_jobdone(job);
2421 break;
2422 }
2423 mutex_exit(&pd->pd_mutex);
2424 } else {
2425 mutex_enter(&port->fp_mutex);
2426 if (FC_IS_TOP_SWITCH(port->fp_topology)) {
2427 mutex_exit(&port->fp_mutex);
2428 pd = fp_create_remote_port_by_ns(port,
2429 *d_id, KM_SLEEP);
2430 if (pd == NULL) {
2431 job->job_result = FC_FAILURE;
2432 fctl_jobdone(job);
2433 break;
2434 }
2435 } else {
2436 mutex_exit(&port->fp_mutex);
2437 }
2438 }
2439
2440 job->job_flags |= JOB_TYPE_FP_ASYNC;
2441 job->job_counter = 1;
2442
2443 rval = fp_port_login(port, *d_id, job,
2444 FP_CMD_PLOGI_RETAIN, KM_SLEEP, pd, NULL);
2445
2446 if (rval != FC_SUCCESS) {
2447 job->job_result = rval;
2448 fctl_jobdone(job);
2449 }
2450 break;
2451
2452 case JOB_LOGO_ONE: {
2453 /*
2454 * Issue a PLOGO to a single remote port. Multiple
2455 * PLOGOs to different remote ports may occur in
2456 * parallel.
2457 */
2458 fc_remote_port_t *pd;
2459
2460 #ifndef __lock_lint
2461 ASSERT(job->job_counter > 0);
2462 #endif
2463
2464 pd = (fc_remote_port_t *)job->job_ulp_pkts;
2465
2466 mutex_enter(&pd->pd_mutex);
2467 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) {
2468 mutex_exit(&pd->pd_mutex);
2469 job->job_result = FC_LOGINREQ;
2470 mutex_exit(&port->fp_mutex);
2471 fctl_jobdone(job);
2472 break;
2473 }
2474 if (pd->pd_login_count > 1) {
2475 pd->pd_login_count--;
2476 mutex_exit(&pd->pd_mutex);
2477 job->job_result = FC_SUCCESS;
2478 mutex_exit(&port->fp_mutex);
2479 fctl_jobdone(job);
2480 break;
2481 }
2482 mutex_exit(&pd->pd_mutex);
2483 mutex_exit(&port->fp_mutex);
2484 job->job_flags |= JOB_TYPE_FP_ASYNC;
2485 (void) fp_logout(port, pd, job);
2486 break;
2487 }
2488
2489 case JOB_FCIO_LOGIN:
2490 /*
2491 * PLOGI initiated at ioctl request.
2492 */
2493 mutex_exit(&port->fp_mutex);
2494 job->job_result =
2495 fp_fcio_login(port, job->job_private, job);
2496 fctl_jobdone(job);
2497 break;
2498
2499 case JOB_FCIO_LOGOUT:
2500 /*
2501 * PLOGO initiated at ioctl request.
2502 */
2503 mutex_exit(&port->fp_mutex);
2504 job->job_result =
2505 fp_fcio_logout(port, job->job_private, job);
2506 fctl_jobdone(job);
2507 break;
2508
2509 case JOB_PORT_GETMAP:
2510 case JOB_PORT_GETMAP_PLOGI_ALL: {
2511 port->fp_last_task = port->fp_task;
2512 port->fp_task = FP_TASK_GETMAP;
2513
2514 switch (port->fp_topology) {
2515 case FC_TOP_PRIVATE_LOOP:
2516 job->job_counter = 1;
2517
2518 fp_get_loopmap(port, job);
2519 mutex_exit(&port->fp_mutex);
2520 fp_jobwait(job);
2521 fctl_fillout_map(port,
2522 (fc_portmap_t **)job->job_private,
2523 (uint32_t *)job->job_arg, 1, 0, 0);
2524 fctl_jobdone(job);
2525 mutex_enter(&port->fp_mutex);
2526 break;
2527
2528 case FC_TOP_PUBLIC_LOOP:
2529 case FC_TOP_FABRIC:
2530 mutex_exit(&port->fp_mutex);
2531 job->job_counter = 1;
2532
2533 job->job_result = fp_ns_getmap(port,
2534 job, (fc_portmap_t **)job->job_private,
2535 (uint32_t *)job->job_arg,
2536 FCTL_GAN_START_ID);
2537 fctl_jobdone(job);
2538 mutex_enter(&port->fp_mutex);
2539 break;
2540
2541 case FC_TOP_PT_PT:
2542 mutex_exit(&port->fp_mutex);
2543 fctl_fillout_map(port,
2544 (fc_portmap_t **)job->job_private,
2545 (uint32_t *)job->job_arg, 1, 0, 0);
2546 fctl_jobdone(job);
2547 mutex_enter(&port->fp_mutex);
2548 break;
2549
2550 default:
2551 mutex_exit(&port->fp_mutex);
2552 fctl_jobdone(job);
2553 mutex_enter(&port->fp_mutex);
2554 break;
2555 }
2556 port->fp_task = port->fp_last_task;
2557 port->fp_last_task = FP_TASK_IDLE;
2558 mutex_exit(&port->fp_mutex);
2559 break;
2560 }
2561
2562 case JOB_PORT_OFFLINE: {
2563 fp_log_port_event(port, ESC_SUNFC_PORT_OFFLINE);
2564
2565 port->fp_last_task = port->fp_task;
2566 port->fp_task = FP_TASK_OFFLINE;
2567
2568 if (port->fp_statec_busy > 2) {
2569 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION;
2570 fp_port_offline(port, 0);
2571 if (--port->fp_statec_busy == 0) {
2572 port->fp_soft_state &=
2573 ~FP_SOFT_IN_STATEC_CB;
2574 }
2575 } else {
2576 fp_port_offline(port, 1);
2577 }
2578
2579 port->fp_task = port->fp_last_task;
2580 port->fp_last_task = FP_TASK_IDLE;
2581
2582 mutex_exit(&port->fp_mutex);
2583
2584 fctl_jobdone(job);
2585 break;
2586 }
2587
2588 case JOB_PORT_STARTUP: {
2589 if ((rval = fp_port_startup(port, job)) != FC_SUCCESS) {
2590 if (port->fp_statec_busy > 1) {
2591 mutex_exit(&port->fp_mutex);
2592 break;
2593 }
2594 mutex_exit(&port->fp_mutex);
2595
2596 FP_TRACE(FP_NHEAD2(9, rval),
2597 "Topology discovery failed");
2598 break;
2599 }
2600
2601 /*
2602 * Attempt building device handles in case
2603 * of private Loop.
2604 */
2605 if (port->fp_topology == FC_TOP_PRIVATE_LOOP) {
2606 job->job_counter = 1;
2607
2608 fp_get_loopmap(port, job);
2609 mutex_exit(&port->fp_mutex);
2610 fp_jobwait(job);
2611 mutex_enter(&port->fp_mutex);
2612 if (port->fp_lilp_map.lilp_magic < MAGIC_LIRP) {
2613 ASSERT(port->fp_total_devices == 0);
2614 port->fp_total_devices =
2615 port->fp_dev_count;
2616 }
2617 } else if (FC_IS_TOP_SWITCH(port->fp_topology)) {
2618 /*
2619 * Hack to avoid state changes going up early
2620 */
2621 port->fp_statec_busy++;
2622 port->fp_soft_state |= FP_SOFT_IN_STATEC_CB;
2623
2624 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION;
2625 fp_fabric_online(port, job);
2626 job->job_flags &= ~JOB_CANCEL_ULP_NOTIFICATION;
2627 }
2628 mutex_exit(&port->fp_mutex);
2629 fctl_jobdone(job);
2630 break;
2631 }
2632
2633 case JOB_PORT_ONLINE: {
2634 char *newtop;
2635 char *oldtop;
2636 uint32_t old_top;
2637
2638 fp_log_port_event(port, ESC_SUNFC_PORT_ONLINE);
2639
2640 /*
2641 * Bail out early if there are a lot of
2642 * state changes in the pipeline
2643 */
2644 if (port->fp_statec_busy > 1) {
2645 --port->fp_statec_busy;
2646 mutex_exit(&port->fp_mutex);
2647 fctl_jobdone(job);
2648 break;
2649 }
2650
2651 switch (old_top = port->fp_topology) {
2652 case FC_TOP_PRIVATE_LOOP:
2653 oldtop = "Private Loop";
2654 break;
2655
2656 case FC_TOP_PUBLIC_LOOP:
2657 oldtop = "Public Loop";
2658 break;
2659
2660 case FC_TOP_PT_PT:
2661 oldtop = "Point to Point";
2662 break;
2663
2664 case FC_TOP_FABRIC:
2665 oldtop = "Fabric";
2666 break;
2667
2668 default:
2669 oldtop = NULL;
2670 break;
2671 }
2672
2673 port->fp_last_task = port->fp_task;
2674 port->fp_task = FP_TASK_ONLINE;
2675
2676 if ((rval = fp_port_startup(port, job)) != FC_SUCCESS) {
2677
2678 port->fp_task = port->fp_last_task;
2679 port->fp_last_task = FP_TASK_IDLE;
2680
2681 if (port->fp_statec_busy > 1) {
2682 --port->fp_statec_busy;
2683 mutex_exit(&port->fp_mutex);
2684 break;
2685 }
2686
2687 port->fp_state = FC_STATE_OFFLINE;
2688
2689 FP_TRACE(FP_NHEAD2(9, rval),
2690 "Topology discovery failed");
2691
2692 if (--port->fp_statec_busy == 0) {
2693 port->fp_soft_state &=
2694 ~FP_SOFT_IN_STATEC_CB;
2695 }
2696
2697 if (port->fp_offline_tid == NULL) {
2698 port->fp_offline_tid =
2699 timeout(fp_offline_timeout,
2700 (caddr_t)port, fp_offline_ticks);
2701 }
2702
2703 mutex_exit(&port->fp_mutex);
2704 break;
2705 }
2706
2707 switch (port->fp_topology) {
2708 case FC_TOP_PRIVATE_LOOP:
2709 newtop = "Private Loop";
2710 break;
2711
2712 case FC_TOP_PUBLIC_LOOP:
2713 newtop = "Public Loop";
2714 break;
2715
2716 case FC_TOP_PT_PT:
2717 newtop = "Point to Point";
2718 break;
2719
2720 case FC_TOP_FABRIC:
2721 newtop = "Fabric";
2722 break;
2723
2724 default:
2725 newtop = NULL;
2726 break;
2727 }
2728
2729 if (oldtop && newtop && strcmp(oldtop, newtop)) {
2730 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
2731 "Change in FC Topology old = %s new = %s",
2732 oldtop, newtop);
2733 }
2734
2735 switch (port->fp_topology) {
2736 case FC_TOP_PRIVATE_LOOP: {
2737 int orphan = (old_top == FC_TOP_FABRIC ||
2738 old_top == FC_TOP_PUBLIC_LOOP) ? 1 : 0;
2739
2740 mutex_exit(&port->fp_mutex);
2741 fp_loop_online(port, job, orphan);
2742 break;
2743 }
2744
2745 case FC_TOP_PUBLIC_LOOP:
2746 /* FALLTHROUGH */
2747 case FC_TOP_FABRIC:
2748 fp_fabric_online(port, job);
2749 mutex_exit(&port->fp_mutex);
2750 break;
2751
2752 case FC_TOP_PT_PT:
2753 fp_p2p_online(port, job);
2754 mutex_exit(&port->fp_mutex);
2755 break;
2756
2757 default:
2758 if (--port->fp_statec_busy != 0) {
2759 /*
2760 * Watch curiously at what the next
2761 * state transition can do.
2762 */
2763 mutex_exit(&port->fp_mutex);
2764 break;
2765 }
2766
2767 FP_TRACE(FP_NHEAD2(9, 0),
2768 "Topology Unknown, Offlining the port..");
2769
2770 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB;
2771 port->fp_state = FC_STATE_OFFLINE;
2772
2773 if (port->fp_offline_tid == NULL) {
2774 port->fp_offline_tid =
2775 timeout(fp_offline_timeout,
2776 (caddr_t)port, fp_offline_ticks);
2777 }
2778 mutex_exit(&port->fp_mutex);
2779 break;
2780 }
2781
2782 mutex_enter(&port->fp_mutex);
2783
2784 port->fp_task = port->fp_last_task;
2785 port->fp_last_task = FP_TASK_IDLE;
2786
2787 mutex_exit(&port->fp_mutex);
2788
2789 fctl_jobdone(job);
2790 break;
2791 }
2792
2793 case JOB_PLOGI_GROUP: {
2794 mutex_exit(&port->fp_mutex);
2795 fp_plogi_group(port, job);
2796 break;
2797 }
2798
2799 case JOB_UNSOL_REQUEST: {
2800 mutex_exit(&port->fp_mutex);
2801 fp_handle_unsol_buf(port,
2802 (fc_unsol_buf_t *)job->job_private, job);
2803 fctl_dealloc_job(job);
2804 break;
2805 }
2806
2807 case JOB_NS_CMD: {
2808 fctl_ns_req_t *ns_cmd;
2809
2810 mutex_exit(&port->fp_mutex);
2811
2812 job->job_flags |= JOB_TYPE_FP_ASYNC;
2813 ns_cmd = (fctl_ns_req_t *)job->job_private;
2814 if (ns_cmd->ns_cmd_code < NS_GA_NXT ||
2815 ns_cmd->ns_cmd_code > NS_DA_ID) {
2816 job->job_result = FC_BADCMD;
2817 fctl_jobdone(job);
2818 break;
2819 }
2820
2821 if (FC_IS_CMD_A_REG(ns_cmd->ns_cmd_code)) {
2822 if (ns_cmd->ns_pd != NULL) {
2823 job->job_result = FC_BADOBJECT;
2824 fctl_jobdone(job);
2825 break;
2826 }
2827
2828 job->job_counter = 1;
2829
2830 rval = fp_ns_reg(port, ns_cmd->ns_pd,
2831 ns_cmd->ns_cmd_code, job, 0, KM_SLEEP);
2832
2833 if (rval != FC_SUCCESS) {
2834 job->job_result = rval;
2835 fctl_jobdone(job);
2836 }
2837 break;
2838 }
2839 job->job_result = FC_SUCCESS;
2840 job->job_counter = 1;
2841
2842 rval = fp_ns_query(port, ns_cmd, job, 0, KM_SLEEP);
2843 if (rval != FC_SUCCESS) {
2844 fctl_jobdone(job);
2845 }
2846 break;
2847 }
2848
2849 case JOB_LINK_RESET: {
2850 la_wwn_t *pwwn;
2851 uint32_t topology;
2852
2853 pwwn = (la_wwn_t *)job->job_private;
2854 ASSERT(pwwn != NULL);
2855
2856 topology = port->fp_topology;
2857 mutex_exit(&port->fp_mutex);
2858
2859 if (fctl_is_wwn_zero(pwwn) == FC_SUCCESS ||
2860 topology == FC_TOP_PRIVATE_LOOP) {
2861 job->job_flags |= JOB_TYPE_FP_ASYNC;
2862 rval = port->fp_fca_tran->fca_reset(
2863 port->fp_fca_handle, FC_FCA_LINK_RESET);
2864 job->job_result = rval;
2865 fp_jobdone(job);
2866 } else {
2867 ASSERT((job->job_flags &
2868 JOB_TYPE_FP_ASYNC) == 0);
2869
2870 if (FC_IS_TOP_SWITCH(topology)) {
2871 rval = fp_remote_lip(port, pwwn,
2872 KM_SLEEP, job);
2873 } else {
2874 rval = FC_FAILURE;
2875 }
2876 if (rval != FC_SUCCESS) {
2877 job->job_result = rval;
2878 }
2879 fctl_jobdone(job);
2880 }
2881 break;
2882 }
2883
2884 default:
2885 mutex_exit(&port->fp_mutex);
2886 job->job_result = FC_BADCMD;
2887 fctl_jobdone(job);
2888 break;
2889 }
2890 }
2891 /* NOTREACHED */
2892 }
2893
2894
2895 /*
2896 * Perform FC port bring up initialization
2897 */
2898 static int
fp_port_startup(fc_local_port_t * port,job_request_t * job)2899 fp_port_startup(fc_local_port_t *port, job_request_t *job)
2900 {
2901 int rval;
2902 uint32_t state;
2903 uint32_t src_id;
2904 fc_lilpmap_t *lilp_map;
2905
2906 ASSERT(MUTEX_HELD(&port->fp_mutex));
2907 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0);
2908
2909 FP_DTRACE(FP_NHEAD1(2, 0), "Entering fp_port_startup;"
2910 " port=%p, job=%p", port, job);
2911
2912 port->fp_topology = FC_TOP_UNKNOWN;
2913 port->fp_port_id.port_id = 0;
2914 state = FC_PORT_STATE_MASK(port->fp_state);
2915
2916 if (state == FC_STATE_OFFLINE) {
2917 port->fp_port_type.port_type = FC_NS_PORT_UNKNOWN;
2918 job->job_result = FC_OFFLINE;
2919 mutex_exit(&port->fp_mutex);
2920 fctl_jobdone(job);
2921 mutex_enter(&port->fp_mutex);
2922 return (FC_OFFLINE);
2923 }
2924
2925 if (state == FC_STATE_LOOP) {
2926 port->fp_port_type.port_type = FC_NS_PORT_NL;
2927 mutex_exit(&port->fp_mutex);
2928
2929 lilp_map = &port->fp_lilp_map;
2930 if ((rval = fp_get_lilpmap(port, lilp_map)) != FC_SUCCESS) {
2931 job->job_result = FC_FAILURE;
2932 fctl_jobdone(job);
2933
2934 FP_TRACE(FP_NHEAD1(9, rval),
2935 "LILP map Invalid or not present");
2936 mutex_enter(&port->fp_mutex);
2937 return (FC_FAILURE);
2938 }
2939
2940 if (lilp_map->lilp_length == 0) {
2941 job->job_result = FC_NO_MAP;
2942 fctl_jobdone(job);
2943 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
2944 "LILP map length zero");
2945 mutex_enter(&port->fp_mutex);
2946 return (FC_NO_MAP);
2947 }
2948 src_id = lilp_map->lilp_myalpa & 0xFF;
2949 } else {
2950 fc_remote_port_t *pd;
2951 fc_fca_pm_t pm;
2952 fc_fca_p2p_info_t p2p_info;
2953 int pd_recepient;
2954
2955 /*
2956 * Get P2P remote port info if possible
2957 */
2958 bzero((caddr_t)&pm, sizeof (pm));
2959
2960 pm.pm_cmd_flags = FC_FCA_PM_READ;
2961 pm.pm_cmd_code = FC_PORT_GET_P2P_INFO;
2962 pm.pm_data_len = sizeof (fc_fca_p2p_info_t);
2963 pm.pm_data_buf = (caddr_t)&p2p_info;
2964
2965 rval = port->fp_fca_tran->fca_port_manage(
2966 port->fp_fca_handle, &pm);
2967
2968 if (rval == FC_SUCCESS) {
2969 port->fp_port_id.port_id = p2p_info.fca_d_id;
2970 port->fp_port_type.port_type = FC_NS_PORT_N;
2971 port->fp_topology = FC_TOP_PT_PT;
2972 port->fp_total_devices = 1;
2973 pd_recepient = fctl_wwn_cmp(
2974 &port->fp_service_params.nport_ww_name,
2975 &p2p_info.pwwn) < 0 ?
2976 PD_PLOGI_RECEPIENT : PD_PLOGI_INITIATOR;
2977 mutex_exit(&port->fp_mutex);
2978 pd = fctl_create_remote_port(port,
2979 &p2p_info.nwwn,
2980 &p2p_info.pwwn,
2981 p2p_info.d_id,
2982 pd_recepient, KM_NOSLEEP);
2983 FP_DTRACE(FP_NHEAD1(2, 0), "Exiting fp_port_startup;"
2984 " P2P port=%p pd=%p fp %x pd %x", port, pd,
2985 port->fp_port_id.port_id, p2p_info.d_id);
2986 mutex_enter(&port->fp_mutex);
2987 return (FC_SUCCESS);
2988 }
2989 port->fp_port_type.port_type = FC_NS_PORT_N;
2990 mutex_exit(&port->fp_mutex);
2991 src_id = 0;
2992 }
2993
2994 job->job_counter = 1;
2995 job->job_result = FC_SUCCESS;
2996
2997 if ((rval = fp_fabric_login(port, src_id, job, FP_CMD_PLOGI_DONT_CARE,
2998 KM_SLEEP)) != FC_SUCCESS) {
2999 port->fp_port_type.port_type = FC_NS_PORT_UNKNOWN;
3000 job->job_result = FC_FAILURE;
3001 fctl_jobdone(job);
3002
3003 mutex_enter(&port->fp_mutex);
3004 if (port->fp_statec_busy <= 1) {
3005 mutex_exit(&port->fp_mutex);
3006 fp_printf(port, CE_NOTE, FP_LOG_ONLY, rval, NULL,
3007 "Couldn't transport FLOGI");
3008 mutex_enter(&port->fp_mutex);
3009 }
3010 return (FC_FAILURE);
3011 }
3012
3013 fp_jobwait(job);
3014
3015 mutex_enter(&port->fp_mutex);
3016 if (job->job_result == FC_SUCCESS) {
3017 if (FC_IS_TOP_SWITCH(port->fp_topology)) {
3018 mutex_exit(&port->fp_mutex);
3019 fp_ns_init(port, job, KM_SLEEP);
3020 mutex_enter(&port->fp_mutex);
3021 }
3022 } else {
3023 if (state == FC_STATE_LOOP) {
3024 port->fp_topology = FC_TOP_PRIVATE_LOOP;
3025 port->fp_port_id.port_id =
3026 port->fp_lilp_map.lilp_myalpa & 0xFF;
3027 }
3028 }
3029
3030 FP_DTRACE(FP_NHEAD1(2, 0), "Exiting fp_port_startup; port=%p, job=%p",
3031 port, job);
3032
3033 return (FC_SUCCESS);
3034 }
3035
3036
3037 /*
3038 * Perform ULP invocations following FC port startup
3039 */
3040 /* ARGSUSED */
3041 static void
fp_startup_done(opaque_t arg,uchar_t result)3042 fp_startup_done(opaque_t arg, uchar_t result)
3043 {
3044 fc_local_port_t *port = arg;
3045
3046 fp_attach_ulps(port, FC_CMD_ATTACH);
3047
3048 FP_DTRACE(FP_NHEAD1(2, 0), "fp_startup almost complete; port=%p", port);
3049 }
3050
3051
3052 /*
3053 * Perform ULP port attach
3054 */
3055 static void
fp_ulp_port_attach(void * arg)3056 fp_ulp_port_attach(void *arg)
3057 {
3058 fp_soft_attach_t *att = (fp_soft_attach_t *)arg;
3059 fc_local_port_t *port = att->att_port;
3060
3061 FP_DTRACE(FP_NHEAD1(1, 0), "port attach of"
3062 " ULPs begin; port=%p, cmd=%x", port, att->att_cmd);
3063
3064 fctl_attach_ulps(att->att_port, att->att_cmd, &modlinkage);
3065
3066 if (att->att_need_pm_idle == B_TRUE) {
3067 fctl_idle_port(port);
3068 }
3069
3070 FP_DTRACE(FP_NHEAD1(1, 0), "port attach of"
3071 " ULPs end; port=%p, cmd=%x", port, att->att_cmd);
3072
3073 mutex_enter(&att->att_port->fp_mutex);
3074 att->att_port->fp_ulp_attach = 0;
3075
3076 port->fp_task = port->fp_last_task;
3077 port->fp_last_task = FP_TASK_IDLE;
3078
3079 cv_signal(&att->att_port->fp_attach_cv);
3080
3081 mutex_exit(&att->att_port->fp_mutex);
3082
3083 kmem_free(att, sizeof (fp_soft_attach_t));
3084 }
3085
3086 /*
3087 * Entry point to funnel all requests down to FCAs
3088 */
3089 static int
fp_sendcmd(fc_local_port_t * port,fp_cmd_t * cmd,opaque_t fca_handle)3090 fp_sendcmd(fc_local_port_t *port, fp_cmd_t *cmd, opaque_t fca_handle)
3091 {
3092 int rval;
3093
3094 mutex_enter(&port->fp_mutex);
3095 if (port->fp_statec_busy > 1 || (cmd->cmd_ulp_pkt != NULL &&
3096 (port->fp_statec_busy || FC_PORT_STATE_MASK(port->fp_state) ==
3097 FC_STATE_OFFLINE))) {
3098 /*
3099 * This means there is more than one state change
3100 * at this point of time - Since they are processed
3101 * serially, any processing of the current one should
3102 * be failed, failed and move up in processing the next
3103 */
3104 cmd->cmd_pkt.pkt_state = FC_PKT_ELS_IN_PROGRESS;
3105 cmd->cmd_pkt.pkt_reason = FC_REASON_OFFLINE;
3106 if (cmd->cmd_job) {
3107 /*
3108 * A state change that is going to be invalidated
3109 * by another one already in the port driver's queue
3110 * need not go up to all ULPs. This will minimize
3111 * needless processing and ripples in ULP modules
3112 */
3113 cmd->cmd_job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION;
3114 }
3115 mutex_exit(&port->fp_mutex);
3116 return (FC_STATEC_BUSY);
3117 }
3118
3119 if (FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) {
3120 cmd->cmd_pkt.pkt_state = FC_PKT_PORT_OFFLINE;
3121 cmd->cmd_pkt.pkt_reason = FC_REASON_OFFLINE;
3122 mutex_exit(&port->fp_mutex);
3123
3124 return (FC_OFFLINE);
3125 }
3126 mutex_exit(&port->fp_mutex);
3127
3128 rval = cmd->cmd_transport(fca_handle, &cmd->cmd_pkt);
3129 if (rval != FC_SUCCESS) {
3130 if (rval == FC_TRAN_BUSY) {
3131 cmd->cmd_retry_interval = fp_retry_delay;
3132 rval = fp_retry_cmd(&cmd->cmd_pkt);
3133 if (rval == FC_FAILURE) {
3134 cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_BSY;
3135 }
3136 }
3137 } else {
3138 mutex_enter(&port->fp_mutex);
3139 port->fp_out_fpcmds++;
3140 mutex_exit(&port->fp_mutex);
3141 }
3142
3143 return (rval);
3144 }
3145
3146
3147 /*
3148 * Each time a timeout kicks in, walk the wait queue, decrement the
3149 * the retry_interval, when the retry_interval becomes less than
3150 * or equal to zero, re-transport the command: If the re-transport
3151 * fails with BUSY, enqueue the command in the wait queue.
3152 *
3153 * In order to prevent looping forever because of commands enqueued
3154 * from within this function itself, save the current tail pointer
3155 * (in cur_tail) and exit the loop after serving this command.
3156 */
3157 static void
fp_resendcmd(void * port_handle)3158 fp_resendcmd(void *port_handle)
3159 {
3160 int rval;
3161 fc_local_port_t *port;
3162 fp_cmd_t *cmd;
3163 fp_cmd_t *cur_tail;
3164
3165 port = port_handle;
3166 mutex_enter(&port->fp_mutex);
3167 cur_tail = port->fp_wait_tail;
3168 mutex_exit(&port->fp_mutex);
3169
3170 while ((cmd = fp_deque_cmd(port)) != NULL) {
3171 cmd->cmd_retry_interval -= fp_retry_ticker;
3172 /* Check if we are detaching */
3173 if (port->fp_soft_state &
3174 (FP_SOFT_IN_DETACH | FP_DETACH_INPROGRESS)) {
3175 cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_ERROR;
3176 cmd->cmd_pkt.pkt_reason = 0;
3177 fp_iodone(cmd);
3178 } else if (cmd->cmd_retry_interval <= 0) {
3179 rval = cmd->cmd_transport(port->fp_fca_handle,
3180 &cmd->cmd_pkt);
3181
3182 if (rval != FC_SUCCESS) {
3183 if (cmd->cmd_pkt.pkt_state == FC_PKT_TRAN_BSY) {
3184 if (--cmd->cmd_retry_count) {
3185 fp_enque_cmd(port, cmd);
3186 if (cmd == cur_tail) {
3187 break;
3188 }
3189 continue;
3190 }
3191 cmd->cmd_pkt.pkt_state =
3192 FC_PKT_TRAN_BSY;
3193 } else {
3194 cmd->cmd_pkt.pkt_state =
3195 FC_PKT_TRAN_ERROR;
3196 }
3197 cmd->cmd_pkt.pkt_reason = 0;
3198 fp_iodone(cmd);
3199 } else {
3200 mutex_enter(&port->fp_mutex);
3201 port->fp_out_fpcmds++;
3202 mutex_exit(&port->fp_mutex);
3203 }
3204 } else {
3205 fp_enque_cmd(port, cmd);
3206 }
3207
3208 if (cmd == cur_tail) {
3209 break;
3210 }
3211 }
3212
3213 mutex_enter(&port->fp_mutex);
3214 if (port->fp_wait_head) {
3215 timeout_id_t tid;
3216
3217 mutex_exit(&port->fp_mutex);
3218 tid = timeout(fp_resendcmd, (caddr_t)port,
3219 fp_retry_ticks);
3220 mutex_enter(&port->fp_mutex);
3221 port->fp_wait_tid = tid;
3222 } else {
3223 port->fp_wait_tid = NULL;
3224 }
3225 mutex_exit(&port->fp_mutex);
3226 }
3227
3228
3229 /*
3230 * Handle Local, Fabric, N_Port, Transport (whatever that means) BUSY here.
3231 *
3232 * Yes, as you can see below, cmd_retry_count is used here too. That means
3233 * the retries for BUSY are less if there were transport failures (transport
3234 * failure means fca_transport failure). The goal is not to exceed overall
3235 * retries set in the cmd_retry_count (whatever may be the reason for retry)
3236 *
3237 * Return Values:
3238 * FC_SUCCESS
3239 * FC_FAILURE
3240 */
3241 static int
fp_retry_cmd(fc_packet_t * pkt)3242 fp_retry_cmd(fc_packet_t *pkt)
3243 {
3244 fp_cmd_t *cmd;
3245
3246 cmd = pkt->pkt_ulp_private;
3247
3248 if (--cmd->cmd_retry_count) {
3249 fp_enque_cmd(cmd->cmd_port, cmd);
3250 return (FC_SUCCESS);
3251 } else {
3252 return (FC_FAILURE);
3253 }
3254 }
3255
3256
3257 /*
3258 * Queue up FC packet for deferred retry
3259 */
3260 static void
fp_enque_cmd(fc_local_port_t * port,fp_cmd_t * cmd)3261 fp_enque_cmd(fc_local_port_t *port, fp_cmd_t *cmd)
3262 {
3263 timeout_id_t tid;
3264
3265 ASSERT(!MUTEX_HELD(&port->fp_mutex));
3266
3267 #ifdef DEBUG
3268 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, &cmd->cmd_pkt,
3269 "Retrying ELS for %x", cmd->cmd_pkt.pkt_cmd_fhdr.d_id);
3270 #endif
3271
3272 mutex_enter(&port->fp_mutex);
3273 if (port->fp_wait_tail) {
3274 port->fp_wait_tail->cmd_next = cmd;
3275 port->fp_wait_tail = cmd;
3276 } else {
3277 ASSERT(port->fp_wait_head == NULL);
3278 port->fp_wait_head = port->fp_wait_tail = cmd;
3279 if (port->fp_wait_tid == NULL) {
3280 mutex_exit(&port->fp_mutex);
3281 tid = timeout(fp_resendcmd, (caddr_t)port,
3282 fp_retry_ticks);
3283 mutex_enter(&port->fp_mutex);
3284 port->fp_wait_tid = tid;
3285 }
3286 }
3287 mutex_exit(&port->fp_mutex);
3288 }
3289
3290
3291 /*
3292 * Handle all RJT codes
3293 */
3294 static int
fp_handle_reject(fc_packet_t * pkt)3295 fp_handle_reject(fc_packet_t *pkt)
3296 {
3297 int rval = FC_FAILURE;
3298 uchar_t next_class;
3299 fp_cmd_t *cmd;
3300 fc_local_port_t *port;
3301
3302 cmd = pkt->pkt_ulp_private;
3303 port = cmd->cmd_port;
3304
3305 switch (pkt->pkt_state) {
3306 case FC_PKT_FABRIC_RJT:
3307 case FC_PKT_NPORT_RJT:
3308 if (pkt->pkt_reason == FC_REASON_CLASS_NOT_SUPP) {
3309 next_class = fp_get_nextclass(cmd->cmd_port,
3310 FC_TRAN_CLASS(pkt->pkt_tran_flags));
3311
3312 if (next_class == FC_TRAN_CLASS_INVALID) {
3313 return (rval);
3314 }
3315 pkt->pkt_tran_flags = FC_TRAN_INTR | next_class;
3316 pkt->pkt_tran_type = FC_PKT_EXCHANGE;
3317
3318 rval = fp_sendcmd(cmd->cmd_port, cmd,
3319 cmd->cmd_port->fp_fca_handle);
3320
3321 if (rval != FC_SUCCESS) {
3322 pkt->pkt_state = FC_PKT_TRAN_ERROR;
3323 }
3324 }
3325 break;
3326
3327 case FC_PKT_LS_RJT:
3328 case FC_PKT_BA_RJT:
3329 if ((pkt->pkt_reason == FC_REASON_LOGICAL_ERROR) ||
3330 (pkt->pkt_reason == FC_REASON_LOGICAL_BSY)) {
3331 cmd->cmd_retry_interval = fp_retry_delay;
3332 rval = fp_retry_cmd(pkt);
3333 }
3334 break;
3335
3336 case FC_PKT_FS_RJT:
3337 if ((pkt->pkt_reason == FC_REASON_FS_LOGICAL_BUSY) ||
3338 ((pkt->pkt_reason == FC_REASON_FS_CMD_UNABLE) &&
3339 (pkt->pkt_expln == 0x00))) {
3340 cmd->cmd_retry_interval = fp_retry_delay;
3341 rval = fp_retry_cmd(pkt);
3342 }
3343 break;
3344
3345 case FC_PKT_LOCAL_RJT:
3346 if (pkt->pkt_reason == FC_REASON_QFULL) {
3347 cmd->cmd_retry_interval = fp_retry_delay;
3348 rval = fp_retry_cmd(pkt);
3349 }
3350 break;
3351
3352 default:
3353 FP_TRACE(FP_NHEAD1(1, 0),
3354 "fp_handle_reject(): Invalid pkt_state");
3355 break;
3356 }
3357
3358 return (rval);
3359 }
3360
3361
3362 /*
3363 * Return the next class of service supported by the FCA
3364 */
3365 static uchar_t
fp_get_nextclass(fc_local_port_t * port,uchar_t cur_class)3366 fp_get_nextclass(fc_local_port_t *port, uchar_t cur_class)
3367 {
3368 uchar_t next_class;
3369
3370 ASSERT(!MUTEX_HELD(&port->fp_mutex));
3371
3372 switch (cur_class) {
3373 case FC_TRAN_CLASS_INVALID:
3374 if (port->fp_cos & FC_NS_CLASS1) {
3375 next_class = FC_TRAN_CLASS1;
3376 break;
3377 }
3378 /* FALLTHROUGH */
3379
3380 case FC_TRAN_CLASS1:
3381 if (port->fp_cos & FC_NS_CLASS2) {
3382 next_class = FC_TRAN_CLASS2;
3383 break;
3384 }
3385 /* FALLTHROUGH */
3386
3387 case FC_TRAN_CLASS2:
3388 if (port->fp_cos & FC_NS_CLASS3) {
3389 next_class = FC_TRAN_CLASS3;
3390 break;
3391 }
3392 /* FALLTHROUGH */
3393
3394 case FC_TRAN_CLASS3:
3395 default:
3396 next_class = FC_TRAN_CLASS_INVALID;
3397 break;
3398 }
3399
3400 return (next_class);
3401 }
3402
3403
3404 /*
3405 * Determine if a class of service is supported by the FCA
3406 */
3407 static int
fp_is_class_supported(uint32_t cos,uchar_t tran_class)3408 fp_is_class_supported(uint32_t cos, uchar_t tran_class)
3409 {
3410 int rval;
3411
3412 switch (tran_class) {
3413 case FC_TRAN_CLASS1:
3414 if (cos & FC_NS_CLASS1) {
3415 rval = FC_SUCCESS;
3416 } else {
3417 rval = FC_FAILURE;
3418 }
3419 break;
3420
3421 case FC_TRAN_CLASS2:
3422 if (cos & FC_NS_CLASS2) {
3423 rval = FC_SUCCESS;
3424 } else {
3425 rval = FC_FAILURE;
3426 }
3427 break;
3428
3429 case FC_TRAN_CLASS3:
3430 if (cos & FC_NS_CLASS3) {
3431 rval = FC_SUCCESS;
3432 } else {
3433 rval = FC_FAILURE;
3434 }
3435 break;
3436
3437 default:
3438 rval = FC_FAILURE;
3439 break;
3440 }
3441
3442 return (rval);
3443 }
3444
3445
3446 /*
3447 * Dequeue FC packet for retry
3448 */
3449 static fp_cmd_t *
fp_deque_cmd(fc_local_port_t * port)3450 fp_deque_cmd(fc_local_port_t *port)
3451 {
3452 fp_cmd_t *cmd;
3453
3454 ASSERT(!MUTEX_HELD(&port->fp_mutex));
3455
3456 mutex_enter(&port->fp_mutex);
3457
3458 if (port->fp_wait_head == NULL) {
3459 /*
3460 * To avoid races, NULL the fp_wait_tid as
3461 * we are about to exit the timeout thread.
3462 */
3463 port->fp_wait_tid = NULL;
3464 mutex_exit(&port->fp_mutex);
3465 return (NULL);
3466 }
3467
3468 cmd = port->fp_wait_head;
3469 port->fp_wait_head = cmd->cmd_next;
3470 cmd->cmd_next = NULL;
3471
3472 if (port->fp_wait_head == NULL) {
3473 port->fp_wait_tail = NULL;
3474 }
3475 mutex_exit(&port->fp_mutex);
3476
3477 return (cmd);
3478 }
3479
3480
3481 /*
3482 * Wait for job completion
3483 */
3484 static void
fp_jobwait(job_request_t * job)3485 fp_jobwait(job_request_t *job)
3486 {
3487 sema_p(&job->job_port_sema);
3488 }
3489
3490
3491 /*
3492 * Convert FC packet state to FC errno
3493 */
3494 int
fp_state_to_rval(uchar_t state)3495 fp_state_to_rval(uchar_t state)
3496 {
3497 int count;
3498
3499 for (count = 0; count < sizeof (fp_xlat) /
3500 sizeof (fp_xlat[0]); count++) {
3501 if (fp_xlat[count].xlat_state == state) {
3502 return (fp_xlat[count].xlat_rval);
3503 }
3504 }
3505
3506 return (FC_FAILURE);
3507 }
3508
3509
3510 /*
3511 * For Synchronous I/O requests, the caller is
3512 * expected to do fctl_jobdone(if necessary)
3513 *
3514 * We want to preserve at least one failure in the
3515 * job_result if it happens.
3516 *
3517 */
3518 static void
fp_iodone(fp_cmd_t * cmd)3519 fp_iodone(fp_cmd_t *cmd)
3520 {
3521 fc_packet_t *ulp_pkt = cmd->cmd_ulp_pkt;
3522 job_request_t *job = cmd->cmd_job;
3523 fc_remote_port_t *pd = cmd->cmd_pkt.pkt_pd;
3524
3525 ASSERT(job != NULL);
3526 ASSERT(cmd->cmd_port != NULL);
3527 ASSERT(&cmd->cmd_pkt != NULL);
3528
3529 mutex_enter(&job->job_mutex);
3530 if (job->job_result == FC_SUCCESS) {
3531 job->job_result = fp_state_to_rval(cmd->cmd_pkt.pkt_state);
3532 }
3533 mutex_exit(&job->job_mutex);
3534
3535 if (pd) {
3536 mutex_enter(&pd->pd_mutex);
3537 pd->pd_flags = PD_IDLE;
3538 mutex_exit(&pd->pd_mutex);
3539 }
3540
3541 if (ulp_pkt) {
3542 if (pd && cmd->cmd_flags & FP_CMD_DELDEV_ON_ERROR &&
3543 FP_IS_PKT_ERROR(ulp_pkt)) {
3544 fc_local_port_t *port;
3545 fc_remote_node_t *node;
3546
3547 port = cmd->cmd_port;
3548
3549 mutex_enter(&pd->pd_mutex);
3550 pd->pd_state = PORT_DEVICE_INVALID;
3551 pd->pd_ref_count--;
3552 node = pd->pd_remote_nodep;
3553 mutex_exit(&pd->pd_mutex);
3554
3555 ASSERT(node != NULL);
3556 ASSERT(port != NULL);
3557
3558 if (fctl_destroy_remote_port(port, pd) == 0) {
3559 fctl_destroy_remote_node(node);
3560 }
3561
3562 ulp_pkt->pkt_pd = NULL;
3563 }
3564
3565 ulp_pkt->pkt_comp(ulp_pkt);
3566 }
3567
3568 fp_free_pkt(cmd);
3569 fp_jobdone(job);
3570 }
3571
3572
3573 /*
3574 * Job completion handler
3575 */
3576 static void
fp_jobdone(job_request_t * job)3577 fp_jobdone(job_request_t *job)
3578 {
3579 mutex_enter(&job->job_mutex);
3580 ASSERT(job->job_counter > 0);
3581
3582 if (--job->job_counter != 0) {
3583 mutex_exit(&job->job_mutex);
3584 return;
3585 }
3586
3587 if (job->job_ulp_pkts) {
3588 ASSERT(job->job_ulp_listlen > 0);
3589 kmem_free(job->job_ulp_pkts,
3590 sizeof (fc_packet_t *) * job->job_ulp_listlen);
3591 }
3592
3593 if (job->job_flags & JOB_TYPE_FP_ASYNC) {
3594 mutex_exit(&job->job_mutex);
3595 fctl_jobdone(job);
3596 } else {
3597 mutex_exit(&job->job_mutex);
3598 sema_v(&job->job_port_sema);
3599 }
3600 }
3601
3602
3603 /*
3604 * Try to perform shutdown of a port during a detach. No return
3605 * value since the detach should not fail because the port shutdown
3606 * failed.
3607 */
3608 static void
fp_port_shutdown(fc_local_port_t * port,job_request_t * job)3609 fp_port_shutdown(fc_local_port_t *port, job_request_t *job)
3610 {
3611 int index;
3612 int count;
3613 int flags;
3614 fp_cmd_t *cmd;
3615 struct pwwn_hash *head;
3616 fc_remote_port_t *pd;
3617
3618 ASSERT(MUTEX_HELD(&port->fp_mutex));
3619
3620 job->job_result = FC_SUCCESS;
3621
3622 if (port->fp_taskq) {
3623 /*
3624 * We must release the mutex here to ensure that other
3625 * potential jobs can complete their processing. Many
3626 * also need this mutex.
3627 */
3628 mutex_exit(&port->fp_mutex);
3629 taskq_wait(port->fp_taskq);
3630 mutex_enter(&port->fp_mutex);
3631 }
3632
3633 if (port->fp_offline_tid) {
3634 timeout_id_t tid;
3635
3636 tid = port->fp_offline_tid;
3637 port->fp_offline_tid = NULL;
3638 mutex_exit(&port->fp_mutex);
3639 (void) untimeout(tid);
3640 mutex_enter(&port->fp_mutex);
3641 }
3642
3643 if (port->fp_wait_tid) {
3644 timeout_id_t tid;
3645
3646 tid = port->fp_wait_tid;
3647 port->fp_wait_tid = NULL;
3648 mutex_exit(&port->fp_mutex);
3649 (void) untimeout(tid);
3650 } else {
3651 mutex_exit(&port->fp_mutex);
3652 }
3653
3654 /*
3655 * While we cancel the timeout, let's also return the
3656 * the outstanding requests back to the callers.
3657 */
3658 while ((cmd = fp_deque_cmd(port)) != NULL) {
3659 ASSERT(cmd->cmd_job != NULL);
3660 cmd->cmd_job->job_result = FC_OFFLINE;
3661 fp_iodone(cmd);
3662 }
3663
3664 /*
3665 * Gracefully LOGO with all the devices logged in.
3666 */
3667 mutex_enter(&port->fp_mutex);
3668
3669 for (count = index = 0; index < pwwn_table_size; index++) {
3670 head = &port->fp_pwwn_table[index];
3671 pd = head->pwwn_head;
3672 while (pd != NULL) {
3673 mutex_enter(&pd->pd_mutex);
3674 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) {
3675 count++;
3676 }
3677 mutex_exit(&pd->pd_mutex);
3678 pd = pd->pd_wwn_hnext;
3679 }
3680 }
3681
3682 if (job->job_flags & JOB_TYPE_FP_ASYNC) {
3683 flags = job->job_flags;
3684 job->job_flags &= ~JOB_TYPE_FP_ASYNC;
3685 } else {
3686 flags = 0;
3687 }
3688 if (count) {
3689 job->job_counter = count;
3690
3691 for (index = 0; index < pwwn_table_size; index++) {
3692 head = &port->fp_pwwn_table[index];
3693 pd = head->pwwn_head;
3694 while (pd != NULL) {
3695 mutex_enter(&pd->pd_mutex);
3696 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) {
3697 ASSERT(pd->pd_login_count > 0);
3698 /*
3699 * Force the counter to ONE in order
3700 * for us to really send LOGO els.
3701 */
3702 pd->pd_login_count = 1;
3703 mutex_exit(&pd->pd_mutex);
3704 mutex_exit(&port->fp_mutex);
3705 (void) fp_logout(port, pd, job);
3706 mutex_enter(&port->fp_mutex);
3707 } else {
3708 mutex_exit(&pd->pd_mutex);
3709 }
3710 pd = pd->pd_wwn_hnext;
3711 }
3712 }
3713 mutex_exit(&port->fp_mutex);
3714 fp_jobwait(job);
3715 } else {
3716 mutex_exit(&port->fp_mutex);
3717 }
3718
3719 if (job->job_result != FC_SUCCESS) {
3720 FP_TRACE(FP_NHEAD1(9, 0),
3721 "Can't logout all devices. Proceeding with"
3722 " port shutdown");
3723 job->job_result = FC_SUCCESS;
3724 }
3725
3726 fctl_destroy_all_remote_ports(port);
3727
3728 mutex_enter(&port->fp_mutex);
3729 if (FC_IS_TOP_SWITCH(port->fp_topology)) {
3730 mutex_exit(&port->fp_mutex);
3731 fp_ns_fini(port, job);
3732 } else {
3733 mutex_exit(&port->fp_mutex);
3734 }
3735
3736 if (flags) {
3737 job->job_flags = flags;
3738 }
3739
3740 mutex_enter(&port->fp_mutex);
3741
3742 }
3743
3744
3745 /*
3746 * Build the port driver's data structures based on the AL_PA list
3747 */
3748 static void
fp_get_loopmap(fc_local_port_t * port,job_request_t * job)3749 fp_get_loopmap(fc_local_port_t *port, job_request_t *job)
3750 {
3751 int rval;
3752 int flag;
3753 int count;
3754 uint32_t d_id;
3755 fc_remote_port_t *pd;
3756 fc_lilpmap_t *lilp_map;
3757
3758 ASSERT(MUTEX_HELD(&port->fp_mutex));
3759
3760 if (FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) {
3761 job->job_result = FC_OFFLINE;
3762 mutex_exit(&port->fp_mutex);
3763 fp_jobdone(job);
3764 mutex_enter(&port->fp_mutex);
3765 return;
3766 }
3767
3768 if (port->fp_lilp_map.lilp_length == 0) {
3769 mutex_exit(&port->fp_mutex);
3770 job->job_result = FC_NO_MAP;
3771 fp_jobdone(job);
3772 mutex_enter(&port->fp_mutex);
3773 return;
3774 }
3775 mutex_exit(&port->fp_mutex);
3776
3777 lilp_map = &port->fp_lilp_map;
3778 job->job_counter = lilp_map->lilp_length;
3779
3780 if (job->job_code == JOB_PORT_GETMAP_PLOGI_ALL) {
3781 flag = FP_CMD_PLOGI_RETAIN;
3782 } else {
3783 flag = FP_CMD_PLOGI_DONT_CARE;
3784 }
3785
3786 for (count = 0; count < lilp_map->lilp_length; count++) {
3787 d_id = lilp_map->lilp_alpalist[count];
3788
3789 if (d_id == (lilp_map->lilp_myalpa & 0xFF)) {
3790 fp_jobdone(job);
3791 continue;
3792 }
3793
3794 pd = fctl_get_remote_port_by_did(port, d_id);
3795 if (pd) {
3796 mutex_enter(&pd->pd_mutex);
3797 if (flag == FP_CMD_PLOGI_DONT_CARE ||
3798 pd->pd_state == PORT_DEVICE_LOGGED_IN) {
3799 mutex_exit(&pd->pd_mutex);
3800 fp_jobdone(job);
3801 continue;
3802 }
3803 mutex_exit(&pd->pd_mutex);
3804 }
3805
3806 rval = fp_port_login(port, d_id, job, flag,
3807 KM_SLEEP, pd, NULL);
3808 if (rval != FC_SUCCESS) {
3809 fp_jobdone(job);
3810 }
3811 }
3812
3813 mutex_enter(&port->fp_mutex);
3814 }
3815
3816
3817 /*
3818 * Perform loop ONLINE processing
3819 */
3820 static void
fp_loop_online(fc_local_port_t * port,job_request_t * job,int orphan)3821 fp_loop_online(fc_local_port_t *port, job_request_t *job, int orphan)
3822 {
3823 int count;
3824 int rval;
3825 uint32_t d_id;
3826 uint32_t listlen;
3827 fc_lilpmap_t *lilp_map;
3828 fc_remote_port_t *pd;
3829 fc_portmap_t *changelist;
3830
3831 ASSERT(!MUTEX_HELD(&port->fp_mutex));
3832
3833 FP_TRACE(FP_NHEAD1(1, 0), "fp_loop_online begin; port=%p, job=%p",
3834 port, job);
3835
3836 lilp_map = &port->fp_lilp_map;
3837
3838 if (lilp_map->lilp_length) {
3839 mutex_enter(&port->fp_mutex);
3840 if (port->fp_soft_state & FP_SOFT_IN_FCA_RESET) {
3841 port->fp_soft_state &= ~FP_SOFT_IN_FCA_RESET;
3842 mutex_exit(&port->fp_mutex);
3843 delay(drv_usectohz(PLDA_RR_TOV * 1000 * 1000));
3844 } else {
3845 mutex_exit(&port->fp_mutex);
3846 }
3847
3848 job->job_counter = lilp_map->lilp_length;
3849
3850 for (count = 0; count < lilp_map->lilp_length; count++) {
3851 d_id = lilp_map->lilp_alpalist[count];
3852
3853 if (d_id == (lilp_map->lilp_myalpa & 0xFF)) {
3854 fp_jobdone(job);
3855 continue;
3856 }
3857
3858 pd = fctl_get_remote_port_by_did(port, d_id);
3859 if (pd != NULL) {
3860 #ifdef DEBUG
3861 mutex_enter(&pd->pd_mutex);
3862 if (pd->pd_recepient == PD_PLOGI_INITIATOR) {
3863 ASSERT(pd->pd_type != PORT_DEVICE_OLD);
3864 }
3865 mutex_exit(&pd->pd_mutex);
3866 #endif
3867 fp_jobdone(job);
3868 continue;
3869 }
3870
3871 rval = fp_port_login(port, d_id, job,
3872 FP_CMD_PLOGI_DONT_CARE, KM_SLEEP, pd, NULL);
3873
3874 if (rval != FC_SUCCESS) {
3875 fp_jobdone(job);
3876 }
3877 }
3878 fp_jobwait(job);
3879 }
3880 listlen = 0;
3881 changelist = NULL;
3882
3883 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) {
3884 mutex_enter(&port->fp_mutex);
3885 ASSERT(port->fp_statec_busy > 0);
3886 if (port->fp_statec_busy == 1) {
3887 mutex_exit(&port->fp_mutex);
3888 fctl_fillout_map(port, &changelist, &listlen,
3889 1, 0, orphan);
3890
3891 mutex_enter(&port->fp_mutex);
3892 if (port->fp_lilp_map.lilp_magic < MAGIC_LIRP) {
3893 ASSERT(port->fp_total_devices == 0);
3894 port->fp_total_devices = port->fp_dev_count;
3895 }
3896 } else {
3897 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION;
3898 }
3899 mutex_exit(&port->fp_mutex);
3900 }
3901
3902 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) {
3903 (void) fp_ulp_statec_cb(port, FC_STATE_ONLINE, changelist,
3904 listlen, listlen, KM_SLEEP);
3905 } else {
3906 mutex_enter(&port->fp_mutex);
3907 if (--port->fp_statec_busy == 0) {
3908 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB;
3909 }
3910 ASSERT(changelist == NULL && listlen == 0);
3911 mutex_exit(&port->fp_mutex);
3912 }
3913
3914 FP_TRACE(FP_NHEAD1(1, 0), "fp_loop_online end; port=%p, job=%p",
3915 port, job);
3916 }
3917
3918
3919 /*
3920 * Get an Arbitrated Loop map from the underlying FCA
3921 */
3922 static int
fp_get_lilpmap(fc_local_port_t * port,fc_lilpmap_t * lilp_map)3923 fp_get_lilpmap(fc_local_port_t *port, fc_lilpmap_t *lilp_map)
3924 {
3925 int rval;
3926
3927 FP_TRACE(FP_NHEAD1(1, 0), "fp_get_lilpmap Begin; port=%p, map=%p",
3928 port, lilp_map);
3929
3930 bzero((caddr_t)lilp_map, sizeof (fc_lilpmap_t));
3931 rval = port->fp_fca_tran->fca_getmap(port->fp_fca_handle, lilp_map);
3932 lilp_map->lilp_magic &= 0xFF; /* Ignore upper byte */
3933
3934 if (rval != FC_SUCCESS) {
3935 rval = FC_NO_MAP;
3936 } else if (lilp_map->lilp_length == 0 &&
3937 (lilp_map->lilp_magic >= MAGIC_LISM &&
3938 lilp_map->lilp_magic < MAGIC_LIRP)) {
3939 uchar_t lilp_length;
3940
3941 /*
3942 * Since the map length is zero, provide all
3943 * the valid AL_PAs for NL_ports discovery.
3944 */
3945 lilp_length = sizeof (fp_valid_alpas) /
3946 sizeof (fp_valid_alpas[0]);
3947 lilp_map->lilp_length = lilp_length;
3948 bcopy(fp_valid_alpas, lilp_map->lilp_alpalist,
3949 lilp_length);
3950 } else {
3951 rval = fp_validate_lilp_map(lilp_map);
3952
3953 if (rval == FC_SUCCESS) {
3954 mutex_enter(&port->fp_mutex);
3955 port->fp_total_devices = lilp_map->lilp_length - 1;
3956 mutex_exit(&port->fp_mutex);
3957 }
3958 }
3959
3960 mutex_enter(&port->fp_mutex);
3961 if (rval != FC_SUCCESS && !(port->fp_soft_state & FP_SOFT_BAD_LINK)) {
3962 port->fp_soft_state |= FP_SOFT_BAD_LINK;
3963 mutex_exit(&port->fp_mutex);
3964
3965 if (port->fp_fca_tran->fca_reset(port->fp_fca_handle,
3966 FC_FCA_RESET_CORE) != FC_SUCCESS) {
3967 FP_TRACE(FP_NHEAD1(9, 0),
3968 "FCA reset failed after LILP map was found"
3969 " to be invalid");
3970 }
3971 } else if (rval == FC_SUCCESS) {
3972 port->fp_soft_state &= ~FP_SOFT_BAD_LINK;
3973 mutex_exit(&port->fp_mutex);
3974 } else {
3975 mutex_exit(&port->fp_mutex);
3976 }
3977
3978 FP_TRACE(FP_NHEAD1(1, 0), "fp_get_lilpmap End; port=%p, map=%p", port,
3979 lilp_map);
3980
3981 return (rval);
3982 }
3983
3984
3985 /*
3986 * Perform Fabric Login:
3987 *
3988 * Return Values:
3989 * FC_SUCCESS
3990 * FC_FAILURE
3991 * FC_NOMEM
3992 * FC_TRANSPORT_ERROR
3993 * and a lot others defined in fc_error.h
3994 */
3995 static int
fp_fabric_login(fc_local_port_t * port,uint32_t s_id,job_request_t * job,int flag,int sleep)3996 fp_fabric_login(fc_local_port_t *port, uint32_t s_id, job_request_t *job,
3997 int flag, int sleep)
3998 {
3999 int rval;
4000 fp_cmd_t *cmd;
4001 uchar_t class;
4002
4003 ASSERT(!MUTEX_HELD(&port->fp_mutex));
4004
4005 FP_TRACE(FP_NHEAD1(1, 0), "fp_fabric_login Begin; port=%p, job=%p",
4006 port, job);
4007
4008 class = fp_get_nextclass(port, FC_TRAN_CLASS_INVALID);
4009 if (class == FC_TRAN_CLASS_INVALID) {
4010 return (FC_ELS_BAD);
4011 }
4012
4013 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t),
4014 sizeof (la_els_logi_t), sleep, NULL);
4015 if (cmd == NULL) {
4016 return (FC_NOMEM);
4017 }
4018
4019 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class;
4020 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE;
4021 cmd->cmd_flags = flag;
4022 cmd->cmd_retry_count = fp_retry_count;
4023 cmd->cmd_ulp_pkt = NULL;
4024
4025 fp_xlogi_init(port, cmd, s_id, 0xFFFFFE, fp_flogi_intr,
4026 job, LA_ELS_FLOGI);
4027
4028 rval = fp_sendcmd(port, cmd, port->fp_fca_handle);
4029 if (rval != FC_SUCCESS) {
4030 fp_free_pkt(cmd);
4031 }
4032
4033 FP_TRACE(FP_NHEAD1(1, 0), "fp_fabric_login End; port=%p, job=%p",
4034 port, job);
4035
4036 return (rval);
4037 }
4038
4039
4040 /*
4041 * In some scenarios such as private loop device discovery period
4042 * the fc_remote_port_t data structure isn't allocated. The allocation
4043 * is done when the PLOGI is successful. In some other scenarios
4044 * such as Fabric topology, the fc_remote_port_t is already created
4045 * and initialized with appropriate values (as the NS provides
4046 * them)
4047 */
4048 static int
fp_port_login(fc_local_port_t * port,uint32_t d_id,job_request_t * job,int cmd_flag,int sleep,fc_remote_port_t * pd,fc_packet_t * ulp_pkt)4049 fp_port_login(fc_local_port_t *port, uint32_t d_id, job_request_t *job,
4050 int cmd_flag, int sleep, fc_remote_port_t *pd, fc_packet_t *ulp_pkt)
4051 {
4052 uchar_t class;
4053 fp_cmd_t *cmd;
4054 uint32_t src_id;
4055 fc_remote_port_t *tmp_pd;
4056 int relogin;
4057 int found = 0;
4058
4059 #ifdef DEBUG
4060 if (pd == NULL) {
4061 ASSERT(fctl_get_remote_port_by_did(port, d_id) == NULL);
4062 }
4063 #endif
4064 ASSERT(job->job_counter > 0);
4065
4066 class = fp_get_nextclass(port, FC_TRAN_CLASS_INVALID);
4067 if (class == FC_TRAN_CLASS_INVALID) {
4068 return (FC_ELS_BAD);
4069 }
4070
4071 mutex_enter(&port->fp_mutex);
4072 tmp_pd = fctl_lookup_pd_by_did(port, d_id);
4073 mutex_exit(&port->fp_mutex);
4074
4075 relogin = 1;
4076 if (tmp_pd) {
4077 mutex_enter(&tmp_pd->pd_mutex);
4078 if ((tmp_pd->pd_aux_flags & PD_DISABLE_RELOGIN) &&
4079 !(tmp_pd->pd_aux_flags & PD_LOGGED_OUT)) {
4080 tmp_pd->pd_state = PORT_DEVICE_LOGGED_IN;
4081 relogin = 0;
4082 }
4083 mutex_exit(&tmp_pd->pd_mutex);
4084 }
4085
4086 if (!relogin) {
4087 mutex_enter(&tmp_pd->pd_mutex);
4088 if (tmp_pd->pd_state == PORT_DEVICE_LOGGED_IN) {
4089 cmd_flag |= FP_CMD_PLOGI_RETAIN;
4090 }
4091 mutex_exit(&tmp_pd->pd_mutex);
4092
4093 cmd = fp_alloc_pkt(port, sizeof (la_els_adisc_t),
4094 sizeof (la_els_adisc_t), sleep, tmp_pd);
4095 if (cmd == NULL) {
4096 return (FC_NOMEM);
4097 }
4098
4099 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class;
4100 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE;
4101 cmd->cmd_flags = cmd_flag;
4102 cmd->cmd_retry_count = fp_retry_count;
4103 cmd->cmd_ulp_pkt = ulp_pkt;
4104
4105 mutex_enter(&port->fp_mutex);
4106 mutex_enter(&tmp_pd->pd_mutex);
4107 fp_adisc_init(cmd, job);
4108 mutex_exit(&tmp_pd->pd_mutex);
4109 mutex_exit(&port->fp_mutex);
4110
4111 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_adisc_t);
4112 cmd->cmd_pkt.pkt_rsplen = sizeof (la_els_adisc_t);
4113
4114 } else {
4115 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t),
4116 sizeof (la_els_logi_t), sleep, pd);
4117 if (cmd == NULL) {
4118 return (FC_NOMEM);
4119 }
4120
4121 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class;
4122 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE;
4123 cmd->cmd_flags = cmd_flag;
4124 cmd->cmd_retry_count = fp_retry_count;
4125 cmd->cmd_ulp_pkt = ulp_pkt;
4126
4127 mutex_enter(&port->fp_mutex);
4128 src_id = port->fp_port_id.port_id;
4129 mutex_exit(&port->fp_mutex);
4130
4131 fp_xlogi_init(port, cmd, src_id, d_id, fp_plogi_intr,
4132 job, LA_ELS_PLOGI);
4133 }
4134
4135 if (pd) {
4136 mutex_enter(&pd->pd_mutex);
4137 pd->pd_flags = PD_ELS_IN_PROGRESS;
4138 mutex_exit(&pd->pd_mutex);
4139 }
4140
4141 /* npiv check to make sure we don't log into ourself */
4142 if (relogin &&
4143 ((port->fp_npiv_type == FC_NPIV_PORT) ||
4144 (port->fp_npiv_flag == FC_NPIV_ENABLE))) {
4145 if ((d_id & 0xffff00) ==
4146 (port->fp_port_id.port_id & 0xffff00)) {
4147 found = 1;
4148 }
4149 }
4150
4151 if (found ||
4152 (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS)) {
4153 if (found) {
4154 fc_packet_t *pkt = &cmd->cmd_pkt;
4155 pkt->pkt_state = FC_PKT_NPORT_RJT;
4156 }
4157 if (pd) {
4158 mutex_enter(&pd->pd_mutex);
4159 pd->pd_flags = PD_IDLE;
4160 mutex_exit(&pd->pd_mutex);
4161 }
4162
4163 if (ulp_pkt) {
4164 fc_packet_t *pkt = &cmd->cmd_pkt;
4165
4166 ulp_pkt->pkt_state = pkt->pkt_state;
4167 ulp_pkt->pkt_reason = pkt->pkt_reason;
4168 ulp_pkt->pkt_action = pkt->pkt_action;
4169 ulp_pkt->pkt_expln = pkt->pkt_expln;
4170 }
4171
4172 fp_iodone(cmd);
4173 }
4174
4175 return (FC_SUCCESS);
4176 }
4177
4178
4179 /*
4180 * Register the LOGIN parameters with a port device
4181 */
4182 static void
fp_register_login(ddi_acc_handle_t * handle,fc_remote_port_t * pd,la_els_logi_t * acc,uchar_t class)4183 fp_register_login(ddi_acc_handle_t *handle, fc_remote_port_t *pd,
4184 la_els_logi_t *acc, uchar_t class)
4185 {
4186 fc_remote_node_t *node;
4187
4188 ASSERT(pd != NULL);
4189
4190 mutex_enter(&pd->pd_mutex);
4191 node = pd->pd_remote_nodep;
4192 if (pd->pd_login_count == 0) {
4193 pd->pd_login_count++;
4194 }
4195
4196 if (handle) {
4197 FC_GET_RSP(pd->pd_port, *handle, (uint8_t *)&pd->pd_csp,
4198 (uint8_t *)&acc->common_service,
4199 sizeof (acc->common_service), DDI_DEV_AUTOINCR);
4200 FC_GET_RSP(pd->pd_port, *handle, (uint8_t *)&pd->pd_clsp1,
4201 (uint8_t *)&acc->class_1, sizeof (acc->class_1),
4202 DDI_DEV_AUTOINCR);
4203 FC_GET_RSP(pd->pd_port, *handle, (uint8_t *)&pd->pd_clsp2,
4204 (uint8_t *)&acc->class_2, sizeof (acc->class_2),
4205 DDI_DEV_AUTOINCR);
4206 FC_GET_RSP(pd->pd_port, *handle, (uint8_t *)&pd->pd_clsp3,
4207 (uint8_t *)&acc->class_3, sizeof (acc->class_3),
4208 DDI_DEV_AUTOINCR);
4209 } else {
4210 pd->pd_csp = acc->common_service;
4211 pd->pd_clsp1 = acc->class_1;
4212 pd->pd_clsp2 = acc->class_2;
4213 pd->pd_clsp3 = acc->class_3;
4214 }
4215
4216 pd->pd_state = PORT_DEVICE_LOGGED_IN;
4217 pd->pd_login_class = class;
4218 mutex_exit(&pd->pd_mutex);
4219
4220 #ifndef __lock_lint
4221 ASSERT(fctl_get_remote_port_by_did(pd->pd_port,
4222 pd->pd_port_id.port_id) == pd);
4223 #endif
4224
4225 mutex_enter(&node->fd_mutex);
4226 if (handle) {
4227 FC_GET_RSP(pd->pd_port, *handle, (uint8_t *)node->fd_vv,
4228 (uint8_t *)acc->vendor_version, sizeof (node->fd_vv),
4229 DDI_DEV_AUTOINCR);
4230 } else {
4231 bcopy(acc->vendor_version, node->fd_vv, sizeof (node->fd_vv));
4232 }
4233 mutex_exit(&node->fd_mutex);
4234 }
4235
4236
4237 /*
4238 * Mark the remote port as OFFLINE
4239 */
4240 static void
fp_remote_port_offline(fc_remote_port_t * pd)4241 fp_remote_port_offline(fc_remote_port_t *pd)
4242 {
4243 ASSERT(MUTEX_HELD(&pd->pd_mutex));
4244 if (pd->pd_login_count &&
4245 ((pd->pd_aux_flags & PD_DISABLE_RELOGIN) == 0)) {
4246 bzero((caddr_t)&pd->pd_csp, sizeof (struct common_service));
4247 bzero((caddr_t)&pd->pd_clsp1, sizeof (struct service_param));
4248 bzero((caddr_t)&pd->pd_clsp2, sizeof (struct service_param));
4249 bzero((caddr_t)&pd->pd_clsp3, sizeof (struct service_param));
4250 pd->pd_login_class = 0;
4251 }
4252 pd->pd_type = PORT_DEVICE_OLD;
4253 pd->pd_flags = PD_IDLE;
4254 fctl_tc_reset(&pd->pd_logo_tc);
4255 }
4256
4257
4258 /*
4259 * Deregistration of a port device
4260 */
4261 static void
fp_unregister_login(fc_remote_port_t * pd)4262 fp_unregister_login(fc_remote_port_t *pd)
4263 {
4264 fc_remote_node_t *node;
4265
4266 ASSERT(pd != NULL);
4267
4268 mutex_enter(&pd->pd_mutex);
4269 pd->pd_login_count = 0;
4270 bzero((caddr_t)&pd->pd_csp, sizeof (struct common_service));
4271 bzero((caddr_t)&pd->pd_clsp1, sizeof (struct service_param));
4272 bzero((caddr_t)&pd->pd_clsp2, sizeof (struct service_param));
4273 bzero((caddr_t)&pd->pd_clsp3, sizeof (struct service_param));
4274
4275 pd->pd_state = PORT_DEVICE_VALID;
4276 pd->pd_login_class = 0;
4277 node = pd->pd_remote_nodep;
4278 mutex_exit(&pd->pd_mutex);
4279
4280 mutex_enter(&node->fd_mutex);
4281 bzero(node->fd_vv, sizeof (node->fd_vv));
4282 mutex_exit(&node->fd_mutex);
4283 }
4284
4285
4286 /*
4287 * Handle OFFLINE state of an FCA port
4288 */
4289 static void
fp_port_offline(fc_local_port_t * port,int notify)4290 fp_port_offline(fc_local_port_t *port, int notify)
4291 {
4292 int index;
4293 int statec;
4294 timeout_id_t tid;
4295 struct pwwn_hash *head;
4296 fc_remote_port_t *pd;
4297
4298 ASSERT(MUTEX_HELD(&port->fp_mutex));
4299
4300 for (index = 0; index < pwwn_table_size; index++) {
4301 head = &port->fp_pwwn_table[index];
4302 pd = head->pwwn_head;
4303 while (pd != NULL) {
4304 mutex_enter(&pd->pd_mutex);
4305 fp_remote_port_offline(pd);
4306 fctl_delist_did_table(port, pd);
4307 mutex_exit(&pd->pd_mutex);
4308 pd = pd->pd_wwn_hnext;
4309 }
4310 }
4311 port->fp_total_devices = 0;
4312
4313 statec = 0;
4314 if (notify) {
4315 /*
4316 * Decrement the statec busy counter as we
4317 * are almost done with handling the state
4318 * change
4319 */
4320 ASSERT(port->fp_statec_busy > 0);
4321 if (--port->fp_statec_busy == 0) {
4322 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB;
4323 }
4324 mutex_exit(&port->fp_mutex);
4325 (void) fp_ulp_statec_cb(port, FC_STATE_OFFLINE, NULL,
4326 0, 0, KM_SLEEP);
4327 mutex_enter(&port->fp_mutex);
4328
4329 if (port->fp_statec_busy) {
4330 statec++;
4331 }
4332 } else if (port->fp_statec_busy > 1) {
4333 statec++;
4334 }
4335
4336 if ((tid = port->fp_offline_tid) != NULL) {
4337 mutex_exit(&port->fp_mutex);
4338 (void) untimeout(tid);
4339 mutex_enter(&port->fp_mutex);
4340 }
4341
4342 if (!statec) {
4343 port->fp_offline_tid = timeout(fp_offline_timeout,
4344 (caddr_t)port, fp_offline_ticks);
4345 }
4346 }
4347
4348
4349 /*
4350 * Offline devices and send up a state change notification to ULPs
4351 */
4352 static void
fp_offline_timeout(void * port_handle)4353 fp_offline_timeout(void *port_handle)
4354 {
4355 int ret;
4356 fc_local_port_t *port = port_handle;
4357 uint32_t listlen = 0;
4358 fc_portmap_t *changelist = NULL;
4359
4360 mutex_enter(&port->fp_mutex);
4361
4362 if ((FC_PORT_STATE_MASK(port->fp_state) != FC_STATE_OFFLINE) ||
4363 (port->fp_soft_state &
4364 (FP_SOFT_IN_DETACH | FP_SOFT_SUSPEND | FP_SOFT_POWER_DOWN)) ||
4365 port->fp_dev_count == 0 || port->fp_statec_busy) {
4366 port->fp_offline_tid = NULL;
4367 mutex_exit(&port->fp_mutex);
4368 return;
4369 }
4370
4371 mutex_exit(&port->fp_mutex);
4372
4373 FP_TRACE(FP_NHEAD2(9, 0), "OFFLINE timeout");
4374
4375 if (port->fp_options & FP_CORE_ON_OFFLINE_TIMEOUT) {
4376 if ((ret = port->fp_fca_tran->fca_reset(port->fp_fca_handle,
4377 FC_FCA_CORE)) != FC_SUCCESS) {
4378 FP_TRACE(FP_NHEAD1(9, ret),
4379 "Failed to force adapter dump");
4380 } else {
4381 FP_TRACE(FP_NHEAD1(9, 0),
4382 "Forced adapter dump successfully");
4383 }
4384 } else if (port->fp_options & FP_RESET_CORE_ON_OFFLINE_TIMEOUT) {
4385 if ((ret = port->fp_fca_tran->fca_reset(port->fp_fca_handle,
4386 FC_FCA_RESET_CORE)) != FC_SUCCESS) {
4387 FP_TRACE(FP_NHEAD1(9, ret),
4388 "Failed to force adapter dump and reset");
4389 } else {
4390 FP_TRACE(FP_NHEAD1(9, 0),
4391 "Forced adapter dump and reset successfully");
4392 }
4393 }
4394
4395 fctl_fillout_map(port, &changelist, &listlen, 1, 0, 0);
4396 (void) fp_ulp_statec_cb(port, FC_STATE_OFFLINE, changelist,
4397 listlen, listlen, KM_SLEEP);
4398
4399 mutex_enter(&port->fp_mutex);
4400 port->fp_offline_tid = NULL;
4401 mutex_exit(&port->fp_mutex);
4402 }
4403
4404
4405 /*
4406 * Perform general purpose ELS request initialization
4407 */
4408 static void
fp_els_init(fp_cmd_t * cmd,uint32_t s_id,uint32_t d_id,void (* comp)(),job_request_t * job)4409 fp_els_init(fp_cmd_t *cmd, uint32_t s_id, uint32_t d_id,
4410 void (*comp) (), job_request_t *job)
4411 {
4412 fc_packet_t *pkt;
4413
4414 pkt = &cmd->cmd_pkt;
4415 cmd->cmd_job = job;
4416
4417 pkt->pkt_cmd_fhdr.r_ctl = R_CTL_ELS_REQ;
4418 pkt->pkt_cmd_fhdr.d_id = d_id;
4419 pkt->pkt_cmd_fhdr.s_id = s_id;
4420 pkt->pkt_cmd_fhdr.type = FC_TYPE_EXTENDED_LS;
4421 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
4422 pkt->pkt_cmd_fhdr.seq_id = 0;
4423 pkt->pkt_cmd_fhdr.df_ctl = 0;
4424 pkt->pkt_cmd_fhdr.seq_cnt = 0;
4425 pkt->pkt_cmd_fhdr.ox_id = 0xffff;
4426 pkt->pkt_cmd_fhdr.rx_id = 0xffff;
4427 pkt->pkt_cmd_fhdr.ro = 0;
4428 pkt->pkt_cmd_fhdr.rsvd = 0;
4429 pkt->pkt_comp = comp;
4430 pkt->pkt_timeout = FP_ELS_TIMEOUT;
4431 }
4432
4433
4434 /*
4435 * Initialize PLOGI/FLOGI ELS request
4436 */
4437 static void
fp_xlogi_init(fc_local_port_t * port,fp_cmd_t * cmd,uint32_t s_id,uint32_t d_id,void (* intr)(),job_request_t * job,uchar_t ls_code)4438 fp_xlogi_init(fc_local_port_t *port, fp_cmd_t *cmd, uint32_t s_id,
4439 uint32_t d_id, void (*intr) (), job_request_t *job, uchar_t ls_code)
4440 {
4441 ls_code_t payload;
4442
4443 fp_els_init(cmd, s_id, d_id, intr, job);
4444 cmd->cmd_transport = port->fp_fca_tran->fca_els_send;
4445
4446 payload.ls_code = ls_code;
4447 payload.mbz = 0;
4448
4449 FC_SET_CMD(port, cmd->cmd_pkt.pkt_cmd_acc,
4450 (uint8_t *)&port->fp_service_params,
4451 (uint8_t *)cmd->cmd_pkt.pkt_cmd, sizeof (port->fp_service_params),
4452 DDI_DEV_AUTOINCR);
4453
4454 FC_SET_CMD(port, cmd->cmd_pkt.pkt_cmd_acc, (uint8_t *)&payload,
4455 (uint8_t *)cmd->cmd_pkt.pkt_cmd, sizeof (payload),
4456 DDI_DEV_AUTOINCR);
4457 }
4458
4459
4460 /*
4461 * Initialize LOGO ELS request
4462 */
4463 static void
fp_logo_init(fc_remote_port_t * pd,fp_cmd_t * cmd,job_request_t * job)4464 fp_logo_init(fc_remote_port_t *pd, fp_cmd_t *cmd, job_request_t *job)
4465 {
4466 fc_local_port_t *port;
4467 fc_packet_t *pkt;
4468 la_els_logo_t payload;
4469
4470 port = pd->pd_port;
4471 pkt = &cmd->cmd_pkt;
4472 ASSERT(MUTEX_HELD(&port->fp_mutex));
4473 ASSERT(MUTEX_HELD(&pd->pd_mutex));
4474
4475 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id,
4476 fp_logo_intr, job);
4477
4478 cmd->cmd_transport = port->fp_fca_tran->fca_els_send;
4479
4480 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class;
4481 pkt->pkt_tran_type = FC_PKT_EXCHANGE;
4482
4483 payload.ls_code.ls_code = LA_ELS_LOGO;
4484 payload.ls_code.mbz = 0;
4485 payload.nport_ww_name = port->fp_service_params.nport_ww_name;
4486 payload.nport_id = port->fp_port_id;
4487
4488 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload,
4489 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR);
4490 }
4491
4492 /*
4493 * Initialize RNID ELS request
4494 */
4495 static void
fp_rnid_init(fp_cmd_t * cmd,uint16_t flag,job_request_t * job)4496 fp_rnid_init(fp_cmd_t *cmd, uint16_t flag, job_request_t *job)
4497 {
4498 fc_local_port_t *port;
4499 fc_packet_t *pkt;
4500 la_els_rnid_t payload;
4501 fc_remote_port_t *pd;
4502
4503 pkt = &cmd->cmd_pkt;
4504 pd = pkt->pkt_pd;
4505 port = pd->pd_port;
4506
4507 ASSERT(MUTEX_HELD(&port->fp_mutex));
4508 ASSERT(MUTEX_HELD(&pd->pd_mutex));
4509
4510 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id,
4511 fp_rnid_intr, job);
4512
4513 cmd->cmd_transport = port->fp_fca_tran->fca_els_send;
4514 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class;
4515 pkt->pkt_tran_type = FC_PKT_EXCHANGE;
4516
4517 payload.ls_code.ls_code = LA_ELS_RNID;
4518 payload.ls_code.mbz = 0;
4519 payload.data_format = flag;
4520
4521 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload,
4522 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR);
4523 }
4524
4525 /*
4526 * Initialize RLS ELS request
4527 */
4528 static void
fp_rls_init(fp_cmd_t * cmd,job_request_t * job)4529 fp_rls_init(fp_cmd_t *cmd, job_request_t *job)
4530 {
4531 fc_local_port_t *port;
4532 fc_packet_t *pkt;
4533 la_els_rls_t payload;
4534 fc_remote_port_t *pd;
4535
4536 pkt = &cmd->cmd_pkt;
4537 pd = pkt->pkt_pd;
4538 port = pd->pd_port;
4539
4540 ASSERT(MUTEX_HELD(&port->fp_mutex));
4541 ASSERT(MUTEX_HELD(&pd->pd_mutex));
4542
4543 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id,
4544 fp_rls_intr, job);
4545
4546 cmd->cmd_transport = port->fp_fca_tran->fca_els_send;
4547 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class;
4548 pkt->pkt_tran_type = FC_PKT_EXCHANGE;
4549
4550 payload.ls_code.ls_code = LA_ELS_RLS;
4551 payload.ls_code.mbz = 0;
4552 payload.rls_portid = port->fp_port_id;
4553
4554 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload,
4555 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR);
4556 }
4557
4558
4559 /*
4560 * Initialize an ADISC ELS request
4561 */
4562 static void
fp_adisc_init(fp_cmd_t * cmd,job_request_t * job)4563 fp_adisc_init(fp_cmd_t *cmd, job_request_t *job)
4564 {
4565 fc_local_port_t *port;
4566 fc_packet_t *pkt;
4567 la_els_adisc_t payload;
4568 fc_remote_port_t *pd;
4569
4570 pkt = &cmd->cmd_pkt;
4571 pd = pkt->pkt_pd;
4572 port = pd->pd_port;
4573
4574 ASSERT(MUTEX_HELD(&pd->pd_mutex));
4575 ASSERT(MUTEX_HELD(&pd->pd_port->fp_mutex));
4576
4577 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id,
4578 fp_adisc_intr, job);
4579
4580 cmd->cmd_transport = port->fp_fca_tran->fca_els_send;
4581 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class;
4582 pkt->pkt_tran_type = FC_PKT_EXCHANGE;
4583
4584 payload.ls_code.ls_code = LA_ELS_ADISC;
4585 payload.ls_code.mbz = 0;
4586 payload.nport_id = port->fp_port_id;
4587 payload.port_wwn = port->fp_service_params.nport_ww_name;
4588 payload.node_wwn = port->fp_service_params.node_ww_name;
4589 payload.hard_addr = port->fp_hard_addr;
4590
4591 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload,
4592 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR);
4593 }
4594
4595
4596 /*
4597 * Send up a state change notification to ULPs.
4598 * Spawns a call to fctl_ulp_statec_cb in a taskq thread.
4599 */
4600 static int
fp_ulp_statec_cb(fc_local_port_t * port,uint32_t state,fc_portmap_t * changelist,uint32_t listlen,uint32_t alloc_len,int sleep)4601 fp_ulp_statec_cb(fc_local_port_t *port, uint32_t state,
4602 fc_portmap_t *changelist, uint32_t listlen, uint32_t alloc_len, int sleep)
4603 {
4604 fc_port_clist_t *clist;
4605 fc_remote_port_t *pd;
4606 int count;
4607
4608 ASSERT(!MUTEX_HELD(&port->fp_mutex));
4609
4610 clist = kmem_zalloc(sizeof (*clist), sleep);
4611 if (clist == NULL) {
4612 kmem_free(changelist, alloc_len * sizeof (*changelist));
4613 return (FC_NOMEM);
4614 }
4615
4616 clist->clist_state = state;
4617
4618 mutex_enter(&port->fp_mutex);
4619 clist->clist_flags = port->fp_topology;
4620 mutex_exit(&port->fp_mutex);
4621
4622 clist->clist_port = (opaque_t)port;
4623 clist->clist_len = listlen;
4624 clist->clist_size = alloc_len;
4625 clist->clist_map = changelist;
4626
4627 /*
4628 * Bump the reference count of each fc_remote_port_t in this changelist.
4629 * This is necessary since these devices will be sitting in a taskq
4630 * and referenced later. When the state change notification is
4631 * complete, the reference counts will be decremented.
4632 */
4633 for (count = 0; count < clist->clist_len; count++) {
4634 pd = clist->clist_map[count].map_pd;
4635
4636 if (pd != NULL) {
4637 mutex_enter(&pd->pd_mutex);
4638 ASSERT((pd->pd_ref_count >= 0) ||
4639 (pd->pd_aux_flags & PD_GIVEN_TO_ULPS));
4640 pd->pd_ref_count++;
4641
4642 if (clist->clist_map[count].map_state !=
4643 PORT_DEVICE_INVALID) {
4644 pd->pd_aux_flags |= PD_GIVEN_TO_ULPS;
4645 }
4646
4647 mutex_exit(&pd->pd_mutex);
4648 }
4649 }
4650
4651 #ifdef DEBUG
4652 /*
4653 * Sanity check for presence of OLD devices in the hash lists
4654 */
4655 if (clist->clist_size) {
4656 ASSERT(clist->clist_map != NULL);
4657 for (count = 0; count < clist->clist_len; count++) {
4658 if (clist->clist_map[count].map_state ==
4659 PORT_DEVICE_INVALID) {
4660 la_wwn_t pwwn;
4661 fc_portid_t d_id;
4662
4663 pd = clist->clist_map[count].map_pd;
4664 ASSERT(pd != NULL);
4665
4666 mutex_enter(&pd->pd_mutex);
4667 pwwn = pd->pd_port_name;
4668 d_id = pd->pd_port_id;
4669 mutex_exit(&pd->pd_mutex);
4670
4671 pd = fctl_get_remote_port_by_pwwn(port, &pwwn);
4672 ASSERT(pd != clist->clist_map[count].map_pd);
4673
4674 pd = fctl_get_remote_port_by_did(port,
4675 d_id.port_id);
4676 ASSERT(pd != clist->clist_map[count].map_pd);
4677 }
4678 }
4679 }
4680 #endif
4681
4682 mutex_enter(&port->fp_mutex);
4683
4684 if (state == FC_STATE_ONLINE) {
4685 if (--port->fp_statec_busy == 0) {
4686 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB;
4687 }
4688 }
4689 mutex_exit(&port->fp_mutex);
4690
4691 (void) taskq_dispatch(port->fp_taskq, fctl_ulp_statec_cb,
4692 clist, KM_SLEEP);
4693
4694 FP_TRACE(FP_NHEAD1(4, 0), "fp_ulp_statec fired; Port=%p,"
4695 "state=%x, len=%d", port, state, listlen);
4696
4697 return (FC_SUCCESS);
4698 }
4699
4700
4701 /*
4702 * Send up a FC_STATE_DEVICE_CHANGE state notification to ULPs
4703 */
4704 static int
fp_ulp_devc_cb(fc_local_port_t * port,fc_portmap_t * changelist,uint32_t listlen,uint32_t alloc_len,int sleep,int sync)4705 fp_ulp_devc_cb(fc_local_port_t *port, fc_portmap_t *changelist,
4706 uint32_t listlen, uint32_t alloc_len, int sleep, int sync)
4707 {
4708 int ret;
4709 fc_port_clist_t *clist;
4710
4711 ASSERT(!MUTEX_HELD(&port->fp_mutex));
4712
4713 clist = kmem_zalloc(sizeof (*clist), sleep);
4714 if (clist == NULL) {
4715 kmem_free(changelist, alloc_len * sizeof (*changelist));
4716 return (FC_NOMEM);
4717 }
4718
4719 clist->clist_state = FC_STATE_DEVICE_CHANGE;
4720
4721 mutex_enter(&port->fp_mutex);
4722 clist->clist_flags = port->fp_topology;
4723 mutex_exit(&port->fp_mutex);
4724
4725 clist->clist_port = (opaque_t)port;
4726 clist->clist_len = listlen;
4727 clist->clist_size = alloc_len;
4728 clist->clist_map = changelist;
4729
4730 /* Send sysevents for target state changes */
4731
4732 if (clist->clist_size) {
4733 int count;
4734 fc_remote_port_t *pd;
4735
4736 ASSERT(clist->clist_map != NULL);
4737 for (count = 0; count < clist->clist_len; count++) {
4738 pd = clist->clist_map[count].map_pd;
4739
4740 /*
4741 * Bump reference counts on all fc_remote_port_t
4742 * structs in this list. We don't know when the task
4743 * will fire, and we don't need these fc_remote_port_t
4744 * structs going away behind our back.
4745 */
4746 if (pd) {
4747 mutex_enter(&pd->pd_mutex);
4748 ASSERT((pd->pd_ref_count >= 0) ||
4749 (pd->pd_aux_flags & PD_GIVEN_TO_ULPS));
4750 pd->pd_ref_count++;
4751 mutex_exit(&pd->pd_mutex);
4752 }
4753
4754 if (clist->clist_map[count].map_state ==
4755 PORT_DEVICE_VALID) {
4756 if (clist->clist_map[count].map_type ==
4757 PORT_DEVICE_NEW) {
4758 /* Update our state change counter */
4759 mutex_enter(&port->fp_mutex);
4760 port->fp_last_change++;
4761 mutex_exit(&port->fp_mutex);
4762
4763 /* Additions */
4764 fp_log_target_event(port,
4765 ESC_SUNFC_TARGET_ADD,
4766 clist->clist_map[count].map_pwwn,
4767 clist->clist_map[count].map_did.
4768 port_id);
4769 }
4770
4771 } else if ((clist->clist_map[count].map_type ==
4772 PORT_DEVICE_OLD) &&
4773 (clist->clist_map[count].map_state ==
4774 PORT_DEVICE_INVALID)) {
4775 /* Update our state change counter */
4776 mutex_enter(&port->fp_mutex);
4777 port->fp_last_change++;
4778 mutex_exit(&port->fp_mutex);
4779
4780 /*
4781 * For removals, we don't decrement
4782 * pd_ref_count until after the ULP's
4783 * state change callback function has
4784 * completed.
4785 */
4786
4787 /* Removals */
4788 fp_log_target_event(port,
4789 ESC_SUNFC_TARGET_REMOVE,
4790 clist->clist_map[count].map_pwwn,
4791 clist->clist_map[count].map_did.port_id);
4792 }
4793
4794 if (clist->clist_map[count].map_state !=
4795 PORT_DEVICE_INVALID) {
4796 /*
4797 * Indicate that the ULPs are now aware of
4798 * this device.
4799 */
4800
4801 mutex_enter(&pd->pd_mutex);
4802 pd->pd_aux_flags |= PD_GIVEN_TO_ULPS;
4803 mutex_exit(&pd->pd_mutex);
4804 }
4805
4806 #ifdef DEBUG
4807 /*
4808 * Sanity check for OLD devices in the hash lists
4809 */
4810 if (pd && clist->clist_map[count].map_state ==
4811 PORT_DEVICE_INVALID) {
4812 la_wwn_t pwwn;
4813 fc_portid_t d_id;
4814
4815 mutex_enter(&pd->pd_mutex);
4816 pwwn = pd->pd_port_name;
4817 d_id = pd->pd_port_id;
4818 mutex_exit(&pd->pd_mutex);
4819
4820 /*
4821 * This overwrites the 'pd' local variable.
4822 * Beware of this if 'pd' ever gets
4823 * referenced below this block.
4824 */
4825 pd = fctl_get_remote_port_by_pwwn(port, &pwwn);
4826 ASSERT(pd != clist->clist_map[count].map_pd);
4827
4828 pd = fctl_get_remote_port_by_did(port,
4829 d_id.port_id);
4830 ASSERT(pd != clist->clist_map[count].map_pd);
4831 }
4832 #endif
4833 }
4834 }
4835
4836 if (sync) {
4837 clist->clist_wait = 1;
4838 mutex_init(&clist->clist_mutex, NULL, MUTEX_DRIVER, NULL);
4839 cv_init(&clist->clist_cv, NULL, CV_DRIVER, NULL);
4840 }
4841
4842 ret = taskq_dispatch(port->fp_taskq, fctl_ulp_statec_cb, clist, sleep);
4843 if (sync && ret != TASKQID_INVALID) {
4844 mutex_enter(&clist->clist_mutex);
4845 while (clist->clist_wait) {
4846 cv_wait(&clist->clist_cv, &clist->clist_mutex);
4847 }
4848 mutex_exit(&clist->clist_mutex);
4849
4850 mutex_destroy(&clist->clist_mutex);
4851 cv_destroy(&clist->clist_cv);
4852 kmem_free(clist, sizeof (*clist));
4853 }
4854
4855 if (!ret) {
4856 FP_TRACE(FP_NHEAD1(4, 0), "fp_ulp_devc dispatch failed; "
4857 "port=%p", port);
4858 kmem_free(clist->clist_map,
4859 sizeof (*(clist->clist_map)) * clist->clist_size);
4860 kmem_free(clist, sizeof (*clist));
4861 } else {
4862 FP_TRACE(FP_NHEAD1(4, 0), "fp_ulp_devc fired; port=%p, len=%d",
4863 port, listlen);
4864 }
4865
4866 return (FC_SUCCESS);
4867 }
4868
4869
4870 /*
4871 * Perform PLOGI to the group of devices for ULPs
4872 */
4873 static void
fp_plogi_group(fc_local_port_t * port,job_request_t * job)4874 fp_plogi_group(fc_local_port_t *port, job_request_t *job)
4875 {
4876 int offline;
4877 int count;
4878 int rval;
4879 uint32_t listlen;
4880 uint32_t done;
4881 uint32_t d_id;
4882 fc_remote_node_t *node;
4883 fc_remote_port_t *pd;
4884 fc_remote_port_t *tmp_pd;
4885 fc_packet_t *ulp_pkt;
4886 la_els_logi_t *els_data;
4887 ls_code_t ls_code;
4888
4889 FP_TRACE(FP_NHEAD1(1, 0), "fp_plogi_group begin; port=%p, job=%p",
4890 port, job);
4891
4892 done = 0;
4893 listlen = job->job_ulp_listlen;
4894 job->job_counter = job->job_ulp_listlen;
4895
4896 mutex_enter(&port->fp_mutex);
4897 offline = (port->fp_statec_busy ||
4898 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) ? 1 : 0;
4899 mutex_exit(&port->fp_mutex);
4900
4901 for (count = 0; count < listlen; count++) {
4902 ASSERT(job->job_ulp_pkts[count]->pkt_rsplen >=
4903 sizeof (la_els_logi_t));
4904
4905 ulp_pkt = job->job_ulp_pkts[count];
4906 pd = ulp_pkt->pkt_pd;
4907 d_id = ulp_pkt->pkt_cmd_fhdr.d_id;
4908
4909 if (offline) {
4910 done++;
4911
4912 ulp_pkt->pkt_state = FC_PKT_PORT_OFFLINE;
4913 ulp_pkt->pkt_reason = FC_REASON_OFFLINE;
4914 ulp_pkt->pkt_pd = NULL;
4915 ulp_pkt->pkt_comp(ulp_pkt);
4916
4917 job->job_ulp_pkts[count] = NULL;
4918
4919 fp_jobdone(job);
4920 continue;
4921 }
4922
4923 if (pd == NULL) {
4924 pd = fctl_get_remote_port_by_did(port, d_id);
4925 if (pd == NULL) {
4926 /* reset later */
4927 ulp_pkt->pkt_state = FC_PKT_FAILURE;
4928 continue;
4929 }
4930 mutex_enter(&pd->pd_mutex);
4931 if (pd->pd_flags == PD_ELS_IN_PROGRESS) {
4932 mutex_exit(&pd->pd_mutex);
4933 ulp_pkt->pkt_state = FC_PKT_ELS_IN_PROGRESS;
4934 done++;
4935 ulp_pkt->pkt_comp(ulp_pkt);
4936 job->job_ulp_pkts[count] = NULL;
4937 fp_jobdone(job);
4938 } else {
4939 ulp_pkt->pkt_state = FC_PKT_FAILURE;
4940 mutex_exit(&pd->pd_mutex);
4941 }
4942 continue;
4943 }
4944
4945 switch (ulp_pkt->pkt_state) {
4946 case FC_PKT_ELS_IN_PROGRESS:
4947 ulp_pkt->pkt_reason = FC_REASON_OFFLINE;
4948 /* FALLTHRU */
4949 case FC_PKT_LOCAL_RJT:
4950 done++;
4951 ulp_pkt->pkt_comp(ulp_pkt);
4952 job->job_ulp_pkts[count] = NULL;
4953 fp_jobdone(job);
4954 continue;
4955 default:
4956 break;
4957 }
4958
4959 /*
4960 * Validate the pd corresponding to the d_id passed
4961 * by the ULPs
4962 */
4963 tmp_pd = fctl_get_remote_port_by_did(port, d_id);
4964 if ((tmp_pd == NULL) || (pd != tmp_pd)) {
4965 done++;
4966 ulp_pkt->pkt_state = FC_PKT_FAILURE;
4967 ulp_pkt->pkt_reason = FC_REASON_NO_CONNECTION;
4968 ulp_pkt->pkt_pd = NULL;
4969 ulp_pkt->pkt_comp(ulp_pkt);
4970 job->job_ulp_pkts[count] = NULL;
4971 fp_jobdone(job);
4972 continue;
4973 }
4974
4975 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_group contd; "
4976 "port=%p, pd=%p", port, pd);
4977
4978 mutex_enter(&pd->pd_mutex);
4979
4980 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) {
4981 done++;
4982 els_data = (la_els_logi_t *)ulp_pkt->pkt_resp;
4983
4984 ls_code.ls_code = LA_ELS_ACC;
4985 ls_code.mbz = 0;
4986
4987 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc,
4988 (uint8_t *)&ls_code, (uint8_t *)&els_data->ls_code,
4989 sizeof (ls_code_t), DDI_DEV_AUTOINCR);
4990
4991 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc,
4992 (uint8_t *)&pd->pd_csp,
4993 (uint8_t *)&els_data->common_service,
4994 sizeof (pd->pd_csp), DDI_DEV_AUTOINCR);
4995
4996 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc,
4997 (uint8_t *)&pd->pd_port_name,
4998 (uint8_t *)&els_data->nport_ww_name,
4999 sizeof (pd->pd_port_name), DDI_DEV_AUTOINCR);
5000
5001 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc,
5002 (uint8_t *)&pd->pd_clsp1,
5003 (uint8_t *)&els_data->class_1,
5004 sizeof (pd->pd_clsp1), DDI_DEV_AUTOINCR);
5005
5006 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc,
5007 (uint8_t *)&pd->pd_clsp2,
5008 (uint8_t *)&els_data->class_2,
5009 sizeof (pd->pd_clsp2), DDI_DEV_AUTOINCR);
5010
5011 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc,
5012 (uint8_t *)&pd->pd_clsp3,
5013 (uint8_t *)&els_data->class_3,
5014 sizeof (pd->pd_clsp3), DDI_DEV_AUTOINCR);
5015
5016 node = pd->pd_remote_nodep;
5017 pd->pd_login_count++;
5018 pd->pd_flags = PD_IDLE;
5019 ulp_pkt->pkt_pd = pd;
5020 mutex_exit(&pd->pd_mutex);
5021
5022 mutex_enter(&node->fd_mutex);
5023 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc,
5024 (uint8_t *)&node->fd_node_name,
5025 (uint8_t *)(&els_data->node_ww_name),
5026 sizeof (node->fd_node_name), DDI_DEV_AUTOINCR);
5027
5028 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc,
5029 (uint8_t *)&node->fd_vv,
5030 (uint8_t *)(&els_data->vendor_version),
5031 sizeof (node->fd_vv), DDI_DEV_AUTOINCR);
5032
5033 mutex_exit(&node->fd_mutex);
5034 ulp_pkt->pkt_state = FC_PKT_SUCCESS;
5035 } else {
5036
5037 ulp_pkt->pkt_state = FC_PKT_FAILURE; /* reset later */
5038 mutex_exit(&pd->pd_mutex);
5039 }
5040
5041 if (ulp_pkt->pkt_state != FC_PKT_FAILURE) {
5042 ulp_pkt->pkt_comp(ulp_pkt);
5043 job->job_ulp_pkts[count] = NULL;
5044 fp_jobdone(job);
5045 }
5046 }
5047
5048 if (done == listlen) {
5049 fp_jobwait(job);
5050 fctl_jobdone(job);
5051 return;
5052 }
5053
5054 job->job_counter = listlen - done;
5055
5056 for (count = 0; count < listlen; count++) {
5057 int cmd_flags;
5058
5059 if ((ulp_pkt = job->job_ulp_pkts[count]) == NULL) {
5060 continue;
5061 }
5062
5063 ASSERT(ulp_pkt->pkt_state == FC_PKT_FAILURE);
5064
5065 cmd_flags = FP_CMD_PLOGI_RETAIN;
5066
5067 d_id = ulp_pkt->pkt_cmd_fhdr.d_id;
5068 ASSERT(d_id != 0);
5069
5070 pd = fctl_get_remote_port_by_did(port, d_id);
5071
5072 /*
5073 * We need to properly adjust the port device
5074 * reference counter before we assign the pd
5075 * to the ULP packets port device pointer.
5076 */
5077 if (pd != NULL && ulp_pkt->pkt_pd == NULL) {
5078 mutex_enter(&pd->pd_mutex);
5079 pd->pd_ref_count++;
5080 mutex_exit(&pd->pd_mutex);
5081 FP_TRACE(FP_NHEAD1(3, 0),
5082 "fp_plogi_group: DID = 0x%x using new pd %p \
5083 old pd NULL\n", d_id, pd);
5084 } else if (pd != NULL && ulp_pkt->pkt_pd != NULL &&
5085 ulp_pkt->pkt_pd != pd) {
5086 mutex_enter(&pd->pd_mutex);
5087 pd->pd_ref_count++;
5088 mutex_exit(&pd->pd_mutex);
5089 mutex_enter(&ulp_pkt->pkt_pd->pd_mutex);
5090 ulp_pkt->pkt_pd->pd_ref_count--;
5091 mutex_exit(&ulp_pkt->pkt_pd->pd_mutex);
5092 FP_TRACE(FP_NHEAD1(3, 0),
5093 "fp_plogi_group: DID = 0x%x pkt_pd %p != pd %p\n",
5094 d_id, ulp_pkt->pkt_pd, pd);
5095 } else if (pd == NULL && ulp_pkt->pkt_pd != NULL) {
5096 mutex_enter(&ulp_pkt->pkt_pd->pd_mutex);
5097 ulp_pkt->pkt_pd->pd_ref_count--;
5098 mutex_exit(&ulp_pkt->pkt_pd->pd_mutex);
5099 FP_TRACE(FP_NHEAD1(3, 0),
5100 "fp_plogi_group: DID = 0x%x pd is NULL and \
5101 pkt_pd = %p\n", d_id, ulp_pkt->pkt_pd);
5102 }
5103
5104 ulp_pkt->pkt_pd = pd;
5105
5106 if (pd != NULL) {
5107 mutex_enter(&pd->pd_mutex);
5108 d_id = pd->pd_port_id.port_id;
5109 pd->pd_flags = PD_ELS_IN_PROGRESS;
5110 mutex_exit(&pd->pd_mutex);
5111 } else {
5112 d_id = ulp_pkt->pkt_cmd_fhdr.d_id;
5113 #ifdef DEBUG
5114 pd = fctl_get_remote_port_by_did(port, d_id);
5115 ASSERT(pd == NULL);
5116 #endif
5117 /*
5118 * In the Fabric topology, use NS to create
5119 * port device, and if that fails still try
5120 * with PLOGI - which will make yet another
5121 * attempt to create after successful PLOGI
5122 */
5123 mutex_enter(&port->fp_mutex);
5124 if (FC_IS_TOP_SWITCH(port->fp_topology)) {
5125 mutex_exit(&port->fp_mutex);
5126 pd = fp_create_remote_port_by_ns(port,
5127 d_id, KM_SLEEP);
5128 if (pd) {
5129 cmd_flags |= FP_CMD_DELDEV_ON_ERROR;
5130
5131 mutex_enter(&pd->pd_mutex);
5132 pd->pd_flags = PD_ELS_IN_PROGRESS;
5133 mutex_exit(&pd->pd_mutex);
5134
5135 FP_TRACE(FP_NHEAD1(3, 0),
5136 "fp_plogi_group;"
5137 " NS created PD port=%p, job=%p,"
5138 " pd=%p", port, job, pd);
5139 }
5140 } else {
5141 mutex_exit(&port->fp_mutex);
5142 }
5143 if ((ulp_pkt->pkt_pd == NULL) && (pd != NULL)) {
5144 FP_TRACE(FP_NHEAD1(3, 0),
5145 "fp_plogi_group;"
5146 "ulp_pkt's pd is NULL, get a pd %p",
5147 pd);
5148 mutex_enter(&pd->pd_mutex);
5149 pd->pd_ref_count++;
5150 mutex_exit(&pd->pd_mutex);
5151 }
5152 ulp_pkt->pkt_pd = pd;
5153 }
5154
5155 rval = fp_port_login(port, d_id, job, cmd_flags,
5156 KM_SLEEP, pd, ulp_pkt);
5157
5158 if (rval == FC_SUCCESS) {
5159 continue;
5160 }
5161
5162 if (rval == FC_STATEC_BUSY) {
5163 ulp_pkt->pkt_state = FC_PKT_PORT_OFFLINE;
5164 ulp_pkt->pkt_reason = FC_REASON_OFFLINE;
5165 } else {
5166 ulp_pkt->pkt_state = FC_PKT_FAILURE;
5167 }
5168
5169 if (pd) {
5170 mutex_enter(&pd->pd_mutex);
5171 pd->pd_flags = PD_IDLE;
5172 mutex_exit(&pd->pd_mutex);
5173 }
5174
5175 if (cmd_flags & FP_CMD_DELDEV_ON_ERROR) {
5176 ASSERT(pd != NULL);
5177
5178 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_group: NS created,"
5179 " PD removed; port=%p, job=%p", port, job);
5180
5181 mutex_enter(&pd->pd_mutex);
5182 pd->pd_ref_count--;
5183 node = pd->pd_remote_nodep;
5184 mutex_exit(&pd->pd_mutex);
5185
5186 ASSERT(node != NULL);
5187
5188 if (fctl_destroy_remote_port(port, pd) == 0) {
5189 fctl_destroy_remote_node(node);
5190 }
5191 ulp_pkt->pkt_pd = NULL;
5192 }
5193 ulp_pkt->pkt_comp(ulp_pkt);
5194 fp_jobdone(job);
5195 }
5196
5197 fp_jobwait(job);
5198 fctl_jobdone(job);
5199
5200 FP_TRACE(FP_NHEAD1(1, 0), "fp_plogi_group end: port=%p, job=%p",
5201 port, job);
5202 }
5203
5204
5205 /*
5206 * Name server request initialization
5207 */
5208 static void
fp_ns_init(fc_local_port_t * port,job_request_t * job,int sleep)5209 fp_ns_init(fc_local_port_t *port, job_request_t *job, int sleep)
5210 {
5211 int rval;
5212 int count;
5213 int size;
5214
5215 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0);
5216
5217 job->job_counter = 1;
5218 job->job_result = FC_SUCCESS;
5219
5220 rval = fp_port_login(port, 0xFFFFFC, job, FP_CMD_PLOGI_RETAIN,
5221 KM_SLEEP, NULL, NULL);
5222
5223 if (rval != FC_SUCCESS) {
5224 mutex_enter(&port->fp_mutex);
5225 port->fp_topology = FC_TOP_NO_NS;
5226 mutex_exit(&port->fp_mutex);
5227 return;
5228 }
5229
5230 fp_jobwait(job);
5231
5232 if (job->job_result != FC_SUCCESS) {
5233 mutex_enter(&port->fp_mutex);
5234 port->fp_topology = FC_TOP_NO_NS;
5235 mutex_exit(&port->fp_mutex);
5236 return;
5237 }
5238
5239 /*
5240 * At this time, we'll do NS registration for objects in the
5241 * ns_reg_cmds (see top of this file) array.
5242 *
5243 * Each time a ULP module registers with the transport, the
5244 * appropriate fc4 bit is set fc4 types and registered with
5245 * the NS for this support. Also, ULPs and FC admin utilities
5246 * may do registration for objects like IP address, symbolic
5247 * port/node name, Initial process associator at run time.
5248 */
5249 size = sizeof (ns_reg_cmds) / sizeof (ns_reg_cmds[0]);
5250 job->job_counter = size;
5251 job->job_result = FC_SUCCESS;
5252
5253 for (count = 0; count < size; count++) {
5254 if (fp_ns_reg(port, NULL, ns_reg_cmds[count],
5255 job, 0, sleep) != FC_SUCCESS) {
5256 fp_jobdone(job);
5257 }
5258 }
5259 if (size) {
5260 fp_jobwait(job);
5261 }
5262
5263 job->job_result = FC_SUCCESS;
5264
5265 (void) fp_ns_get_devcount(port, job, 0, KM_SLEEP);
5266
5267 if (port->fp_dev_count < FP_MAX_DEVICES) {
5268 (void) fp_ns_get_devcount(port, job, 1, KM_SLEEP);
5269 }
5270
5271 job->job_counter = 1;
5272
5273 if (fp_ns_scr(port, job, FC_SCR_FULL_REGISTRATION,
5274 sleep) == FC_SUCCESS) {
5275 fp_jobwait(job);
5276 }
5277 }
5278
5279
5280 /*
5281 * Name server finish:
5282 * Unregister for RSCNs
5283 * Unregister all the host port objects in the Name Server
5284 * Perform LOGO with the NS;
5285 */
5286 static void
fp_ns_fini(fc_local_port_t * port,job_request_t * job)5287 fp_ns_fini(fc_local_port_t *port, job_request_t *job)
5288 {
5289 fp_cmd_t *cmd;
5290 uchar_t class;
5291 uint32_t s_id;
5292 fc_packet_t *pkt;
5293 la_els_logo_t payload;
5294
5295 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0);
5296
5297 job->job_counter = 1;
5298
5299 if (fp_ns_scr(port, job, FC_SCR_CLEAR_REGISTRATION, KM_SLEEP) !=
5300 FC_SUCCESS) {
5301 fp_jobdone(job);
5302 }
5303 fp_jobwait(job);
5304
5305 job->job_counter = 1;
5306
5307 if (fp_ns_reg(port, NULL, NS_DA_ID, job, 0, KM_SLEEP) != FC_SUCCESS) {
5308 fp_jobdone(job);
5309 }
5310 fp_jobwait(job);
5311
5312 job->job_counter = 1;
5313
5314 cmd = fp_alloc_pkt(port, sizeof (la_els_logo_t),
5315 FP_PORT_IDENTIFIER_LEN, KM_SLEEP, NULL);
5316 pkt = &cmd->cmd_pkt;
5317
5318 mutex_enter(&port->fp_mutex);
5319 class = port->fp_ns_login_class;
5320 s_id = port->fp_port_id.port_id;
5321 payload.nport_id = port->fp_port_id;
5322 mutex_exit(&port->fp_mutex);
5323
5324 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class;
5325 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE;
5326 cmd->cmd_flags = FP_CMD_PLOGI_DONT_CARE;
5327 cmd->cmd_retry_count = 1;
5328 cmd->cmd_ulp_pkt = NULL;
5329
5330 if (port->fp_npiv_type == FC_NPIV_PORT) {
5331 fp_els_init(cmd, s_id, 0xFFFFFE, fp_logo_intr, job);
5332 } else {
5333 fp_els_init(cmd, s_id, 0xFFFFFC, fp_logo_intr, job);
5334 }
5335
5336 cmd->cmd_transport = port->fp_fca_tran->fca_els_send;
5337
5338 payload.ls_code.ls_code = LA_ELS_LOGO;
5339 payload.ls_code.mbz = 0;
5340 payload.nport_ww_name = port->fp_service_params.nport_ww_name;
5341
5342 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload,
5343 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR);
5344
5345 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) {
5346 fp_iodone(cmd);
5347 }
5348 fp_jobwait(job);
5349 }
5350
5351
5352 /*
5353 * NS Registration function.
5354 *
5355 * It should be seriously noted that FC-GS-2 currently doesn't support
5356 * an Object Registration by a D_ID other than the owner of the object.
5357 * What we are aiming at currently is to at least allow Symbolic Node/Port
5358 * Name registration for any N_Port Identifier by the host software.
5359 *
5360 * Anyway, if the second argument (fc_remote_port_t *) is NULL, this
5361 * function treats the request as Host NS Object.
5362 */
5363 static int
fp_ns_reg(fc_local_port_t * port,fc_remote_port_t * pd,uint16_t cmd_code,job_request_t * job,int polled,int sleep)5364 fp_ns_reg(fc_local_port_t *port, fc_remote_port_t *pd, uint16_t cmd_code,
5365 job_request_t *job, int polled, int sleep)
5366 {
5367 int rval;
5368 fc_portid_t s_id;
5369 fc_packet_t *pkt;
5370 fp_cmd_t *cmd;
5371
5372 if (pd == NULL) {
5373 mutex_enter(&port->fp_mutex);
5374 s_id = port->fp_port_id;
5375 mutex_exit(&port->fp_mutex);
5376 } else {
5377 mutex_enter(&pd->pd_mutex);
5378 s_id = pd->pd_port_id;
5379 mutex_exit(&pd->pd_mutex);
5380 }
5381
5382 if (polled) {
5383 job->job_counter = 1;
5384 }
5385
5386 switch (cmd_code) {
5387 case NS_RPN_ID:
5388 case NS_RNN_ID: {
5389 ns_rxn_req_t rxn;
5390
5391 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) +
5392 sizeof (ns_rxn_req_t), sizeof (fc_reg_resp_t), sleep, NULL);
5393 if (cmd == NULL) {
5394 return (FC_NOMEM);
5395 }
5396 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job);
5397 pkt = &cmd->cmd_pkt;
5398
5399 if (pd == NULL) {
5400 rxn.rxn_xname = ((cmd_code == NS_RPN_ID) ?
5401 (port->fp_service_params.nport_ww_name) :
5402 (port->fp_service_params.node_ww_name));
5403 } else {
5404 if (cmd_code == NS_RPN_ID) {
5405 mutex_enter(&pd->pd_mutex);
5406 rxn.rxn_xname = pd->pd_port_name;
5407 mutex_exit(&pd->pd_mutex);
5408 } else {
5409 fc_remote_node_t *node;
5410
5411 mutex_enter(&pd->pd_mutex);
5412 node = pd->pd_remote_nodep;
5413 mutex_exit(&pd->pd_mutex);
5414
5415 mutex_enter(&node->fd_mutex);
5416 rxn.rxn_xname = node->fd_node_name;
5417 mutex_exit(&node->fd_mutex);
5418 }
5419 }
5420 rxn.rxn_port_id = s_id;
5421
5422 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&rxn,
5423 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)),
5424 sizeof (rxn), DDI_DEV_AUTOINCR);
5425
5426 break;
5427 }
5428
5429 case NS_RCS_ID: {
5430 ns_rcos_t rcos;
5431
5432 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) +
5433 sizeof (ns_rcos_t), sizeof (fc_reg_resp_t), sleep, NULL);
5434 if (cmd == NULL) {
5435 return (FC_NOMEM);
5436 }
5437 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job);
5438 pkt = &cmd->cmd_pkt;
5439
5440 if (pd == NULL) {
5441 rcos.rcos_cos = port->fp_cos;
5442 } else {
5443 mutex_enter(&pd->pd_mutex);
5444 rcos.rcos_cos = pd->pd_cos;
5445 mutex_exit(&pd->pd_mutex);
5446 }
5447 rcos.rcos_port_id = s_id;
5448
5449 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&rcos,
5450 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)),
5451 sizeof (rcos), DDI_DEV_AUTOINCR);
5452
5453 break;
5454 }
5455
5456 case NS_RFT_ID: {
5457 ns_rfc_type_t rfc;
5458
5459 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) +
5460 sizeof (ns_rfc_type_t), sizeof (fc_reg_resp_t), sleep,
5461 NULL);
5462 if (cmd == NULL) {
5463 return (FC_NOMEM);
5464 }
5465 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job);
5466 pkt = &cmd->cmd_pkt;
5467
5468 if (pd == NULL) {
5469 mutex_enter(&port->fp_mutex);
5470 bcopy(port->fp_fc4_types, rfc.rfc_types,
5471 sizeof (port->fp_fc4_types));
5472 mutex_exit(&port->fp_mutex);
5473 } else {
5474 mutex_enter(&pd->pd_mutex);
5475 bcopy(pd->pd_fc4types, rfc.rfc_types,
5476 sizeof (pd->pd_fc4types));
5477 mutex_exit(&pd->pd_mutex);
5478 }
5479 rfc.rfc_port_id = s_id;
5480
5481 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&rfc,
5482 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)),
5483 sizeof (rfc), DDI_DEV_AUTOINCR);
5484
5485 break;
5486 }
5487
5488 case NS_RSPN_ID: {
5489 uchar_t name_len;
5490 int pl_size;
5491 fc_portid_t spn;
5492
5493 if (pd == NULL) {
5494 mutex_enter(&port->fp_mutex);
5495 name_len = port->fp_sym_port_namelen;
5496 mutex_exit(&port->fp_mutex);
5497 } else {
5498 mutex_enter(&pd->pd_mutex);
5499 name_len = pd->pd_spn_len;
5500 mutex_exit(&pd->pd_mutex);
5501 }
5502
5503 pl_size = sizeof (fc_portid_t) + name_len + 1;
5504
5505 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + pl_size,
5506 sizeof (fc_reg_resp_t), sleep, NULL);
5507 if (cmd == NULL) {
5508 return (FC_NOMEM);
5509 }
5510
5511 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job);
5512
5513 pkt = &cmd->cmd_pkt;
5514
5515 spn = s_id;
5516
5517 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&spn, (uint8_t *)
5518 (pkt->pkt_cmd + sizeof (fc_ct_header_t)), sizeof (spn),
5519 DDI_DEV_AUTOINCR);
5520 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&name_len,
5521 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)
5522 + sizeof (fc_portid_t)), 1, DDI_DEV_AUTOINCR);
5523
5524 if (pd == NULL) {
5525 mutex_enter(&port->fp_mutex);
5526 FC_SET_CMD(port, pkt->pkt_cmd_acc,
5527 (uint8_t *)port->fp_sym_port_name, (uint8_t *)
5528 (pkt->pkt_cmd + sizeof (fc_ct_header_t) +
5529 sizeof (spn) + 1), name_len, DDI_DEV_AUTOINCR);
5530 mutex_exit(&port->fp_mutex);
5531 } else {
5532 mutex_enter(&pd->pd_mutex);
5533 FC_SET_CMD(port, pkt->pkt_cmd_acc,
5534 (uint8_t *)pd->pd_spn,
5535 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t) +
5536 sizeof (spn) + 1), name_len, DDI_DEV_AUTOINCR);
5537 mutex_exit(&pd->pd_mutex);
5538 }
5539 break;
5540 }
5541
5542 case NS_RPT_ID: {
5543 ns_rpt_t rpt;
5544
5545 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) +
5546 sizeof (ns_rpt_t), sizeof (fc_reg_resp_t), sleep, NULL);
5547 if (cmd == NULL) {
5548 return (FC_NOMEM);
5549 }
5550 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job);
5551 pkt = &cmd->cmd_pkt;
5552
5553 if (pd == NULL) {
5554 rpt.rpt_type = port->fp_port_type;
5555 } else {
5556 mutex_enter(&pd->pd_mutex);
5557 rpt.rpt_type = pd->pd_porttype;
5558 mutex_exit(&pd->pd_mutex);
5559 }
5560 rpt.rpt_port_id = s_id;
5561
5562 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&rpt,
5563 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)),
5564 sizeof (rpt), DDI_DEV_AUTOINCR);
5565
5566 break;
5567 }
5568
5569 case NS_RIP_NN: {
5570 ns_rip_t rip;
5571
5572 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) +
5573 sizeof (ns_rip_t), sizeof (fc_reg_resp_t), sleep, NULL);
5574 if (cmd == NULL) {
5575 return (FC_NOMEM);
5576 }
5577 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job);
5578 pkt = &cmd->cmd_pkt;
5579
5580 if (pd == NULL) {
5581 rip.rip_node_name =
5582 port->fp_service_params.node_ww_name;
5583 bcopy(port->fp_ip_addr, rip.rip_ip_addr,
5584 sizeof (port->fp_ip_addr));
5585 } else {
5586 fc_remote_node_t *node;
5587
5588 /*
5589 * The most correct implementation should have the IP
5590 * address in the fc_remote_node_t structure; I believe
5591 * Node WWN and IP address should have one to one
5592 * correlation (but guess what this is changing in
5593 * FC-GS-2 latest draft)
5594 */
5595 mutex_enter(&pd->pd_mutex);
5596 node = pd->pd_remote_nodep;
5597 bcopy(pd->pd_ip_addr, rip.rip_ip_addr,
5598 sizeof (pd->pd_ip_addr));
5599 mutex_exit(&pd->pd_mutex);
5600
5601 mutex_enter(&node->fd_mutex);
5602 rip.rip_node_name = node->fd_node_name;
5603 mutex_exit(&node->fd_mutex);
5604 }
5605
5606 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&rip,
5607 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)),
5608 sizeof (rip), DDI_DEV_AUTOINCR);
5609
5610 break;
5611 }
5612
5613 case NS_RIPA_NN: {
5614 ns_ipa_t ipa;
5615
5616 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) +
5617 sizeof (ns_ipa_t), sizeof (fc_reg_resp_t), sleep, NULL);
5618 if (cmd == NULL) {
5619 return (FC_NOMEM);
5620 }
5621 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job);
5622 pkt = &cmd->cmd_pkt;
5623
5624 if (pd == NULL) {
5625 ipa.ipa_node_name =
5626 port->fp_service_params.node_ww_name;
5627 bcopy(port->fp_ipa, ipa.ipa_value,
5628 sizeof (port->fp_ipa));
5629 } else {
5630 fc_remote_node_t *node;
5631
5632 mutex_enter(&pd->pd_mutex);
5633 node = pd->pd_remote_nodep;
5634 mutex_exit(&pd->pd_mutex);
5635
5636 mutex_enter(&node->fd_mutex);
5637 ipa.ipa_node_name = node->fd_node_name;
5638 bcopy(node->fd_ipa, ipa.ipa_value,
5639 sizeof (node->fd_ipa));
5640 mutex_exit(&node->fd_mutex);
5641 }
5642
5643 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&ipa,
5644 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)),
5645 sizeof (ipa), DDI_DEV_AUTOINCR);
5646
5647 break;
5648 }
5649
5650 case NS_RSNN_NN: {
5651 uchar_t name_len;
5652 int pl_size;
5653 la_wwn_t snn;
5654 fc_remote_node_t *node = NULL;
5655
5656 if (pd == NULL) {
5657 mutex_enter(&port->fp_mutex);
5658 name_len = port->fp_sym_node_namelen;
5659 mutex_exit(&port->fp_mutex);
5660 } else {
5661 mutex_enter(&pd->pd_mutex);
5662 node = pd->pd_remote_nodep;
5663 mutex_exit(&pd->pd_mutex);
5664
5665 mutex_enter(&node->fd_mutex);
5666 name_len = node->fd_snn_len;
5667 mutex_exit(&node->fd_mutex);
5668 }
5669
5670 pl_size = sizeof (la_wwn_t) + name_len + 1;
5671
5672 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) +
5673 pl_size, sizeof (fc_reg_resp_t), sleep, NULL);
5674 if (cmd == NULL) {
5675 return (FC_NOMEM);
5676 }
5677 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job);
5678
5679 pkt = &cmd->cmd_pkt;
5680
5681 bcopy(&port->fp_service_params.node_ww_name,
5682 &snn, sizeof (la_wwn_t));
5683
5684 if (pd == NULL) {
5685 mutex_enter(&port->fp_mutex);
5686 FC_SET_CMD(port, pkt->pkt_cmd_acc,
5687 (uint8_t *)port->fp_sym_node_name, (uint8_t *)
5688 (pkt->pkt_cmd + sizeof (fc_ct_header_t) +
5689 sizeof (snn) + 1), name_len, DDI_DEV_AUTOINCR);
5690 mutex_exit(&port->fp_mutex);
5691 } else {
5692 ASSERT(node != NULL);
5693 mutex_enter(&node->fd_mutex);
5694 FC_SET_CMD(port, pkt->pkt_cmd_acc,
5695 (uint8_t *)node->fd_snn,
5696 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t) +
5697 sizeof (snn) + 1), name_len, DDI_DEV_AUTOINCR);
5698 mutex_exit(&node->fd_mutex);
5699 }
5700
5701 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&snn,
5702 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)),
5703 sizeof (snn), DDI_DEV_AUTOINCR);
5704 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&name_len,
5705 (uint8_t *)(pkt->pkt_cmd
5706 + sizeof (fc_ct_header_t) + sizeof (snn)),
5707 1, DDI_DEV_AUTOINCR);
5708
5709 break;
5710 }
5711
5712 case NS_DA_ID: {
5713 ns_remall_t rall;
5714 char tmp[4] = {0};
5715 char *ptr;
5716
5717 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) +
5718 sizeof (ns_remall_t), sizeof (fc_reg_resp_t), sleep, NULL);
5719
5720 if (cmd == NULL) {
5721 return (FC_NOMEM);
5722 }
5723
5724 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job);
5725 pkt = &cmd->cmd_pkt;
5726
5727 ptr = (char *)(&s_id);
5728 tmp[3] = *ptr++;
5729 tmp[2] = *ptr++;
5730 tmp[1] = *ptr++;
5731 tmp[0] = *ptr;
5732 #if defined(_BIT_FIELDS_LTOH)
5733 bcopy((caddr_t)tmp, (caddr_t)(&rall.rem_port_id), 4);
5734 #else
5735 rall.rem_port_id = s_id;
5736 #endif
5737 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&rall,
5738 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)),
5739 sizeof (rall), DDI_DEV_AUTOINCR);
5740
5741 break;
5742 }
5743
5744 default:
5745 return (FC_FAILURE);
5746 }
5747
5748 rval = fp_sendcmd(port, cmd, port->fp_fca_handle);
5749
5750 if (rval != FC_SUCCESS) {
5751 job->job_result = rval;
5752 fp_iodone(cmd);
5753 }
5754
5755 if (polled) {
5756 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0);
5757 fp_jobwait(job);
5758 } else {
5759 rval = FC_SUCCESS;
5760 }
5761
5762 return (rval);
5763 }
5764
5765
5766 /*
5767 * Common interrupt handler
5768 */
5769 static int
fp_common_intr(fc_packet_t * pkt,int iodone)5770 fp_common_intr(fc_packet_t *pkt, int iodone)
5771 {
5772 int rval = FC_FAILURE;
5773 fp_cmd_t *cmd;
5774 fc_local_port_t *port;
5775
5776 cmd = pkt->pkt_ulp_private;
5777 port = cmd->cmd_port;
5778
5779 /*
5780 * Fail fast the upper layer requests if
5781 * a state change has occurred amidst.
5782 */
5783 mutex_enter(&port->fp_mutex);
5784 if (cmd->cmd_ulp_pkt != NULL && port->fp_statec_busy) {
5785 mutex_exit(&port->fp_mutex);
5786 cmd->cmd_ulp_pkt->pkt_state = FC_PKT_PORT_OFFLINE;
5787 cmd->cmd_ulp_pkt->pkt_reason = FC_REASON_OFFLINE;
5788 } else if (!(port->fp_soft_state &
5789 (FP_SOFT_IN_DETACH | FP_DETACH_INPROGRESS))) {
5790 mutex_exit(&port->fp_mutex);
5791
5792 switch (pkt->pkt_state) {
5793 case FC_PKT_LOCAL_BSY:
5794 case FC_PKT_FABRIC_BSY:
5795 case FC_PKT_NPORT_BSY:
5796 case FC_PKT_TIMEOUT:
5797 cmd->cmd_retry_interval = (pkt->pkt_state ==
5798 FC_PKT_TIMEOUT) ? 0 : fp_retry_delay;
5799 rval = fp_retry_cmd(pkt);
5800 break;
5801
5802 case FC_PKT_FABRIC_RJT:
5803 case FC_PKT_NPORT_RJT:
5804 case FC_PKT_LOCAL_RJT:
5805 case FC_PKT_LS_RJT:
5806 case FC_PKT_FS_RJT:
5807 case FC_PKT_BA_RJT:
5808 rval = fp_handle_reject(pkt);
5809 break;
5810
5811 default:
5812 if (pkt->pkt_resp_resid) {
5813 cmd->cmd_retry_interval = 0;
5814 rval = fp_retry_cmd(pkt);
5815 }
5816 break;
5817 }
5818 } else {
5819 mutex_exit(&port->fp_mutex);
5820 }
5821
5822 if (rval != FC_SUCCESS && iodone) {
5823 fp_iodone(cmd);
5824 rval = FC_SUCCESS;
5825 }
5826
5827 return (rval);
5828 }
5829
5830
5831 /*
5832 * Some not so long winding theory on point to point topology:
5833 *
5834 * In the ACC payload, if the D_ID is ZERO and the common service
5835 * parameters indicate N_Port, then the topology is POINT TO POINT.
5836 *
5837 * In a point to point topology with an N_Port, during Fabric Login,
5838 * the destination N_Port will check with our WWN and decide if it
5839 * needs to issue PLOGI or not. That means, FLOGI could potentially
5840 * trigger an unsolicited PLOGI from an N_Port. The Unsolicited
5841 * PLOGI creates the device handles.
5842 *
5843 * Assuming that the host port WWN is greater than the other N_Port
5844 * WWN, then we become the master (be aware that this isn't the word
5845 * used in the FC standards) and initiate the PLOGI.
5846 *
5847 */
5848 static void
fp_flogi_intr(fc_packet_t * pkt)5849 fp_flogi_intr(fc_packet_t *pkt)
5850 {
5851 int state;
5852 int f_port;
5853 uint32_t s_id;
5854 uint32_t d_id;
5855 fp_cmd_t *cmd;
5856 fc_local_port_t *port;
5857 la_wwn_t *swwn;
5858 la_wwn_t dwwn;
5859 la_wwn_t nwwn;
5860 fc_remote_port_t *pd;
5861 la_els_logi_t *acc;
5862 com_svc_t csp;
5863 ls_code_t resp;
5864
5865 cmd = pkt->pkt_ulp_private;
5866 port = cmd->cmd_port;
5867
5868 mutex_enter(&port->fp_mutex);
5869 port->fp_out_fpcmds--;
5870 mutex_exit(&port->fp_mutex);
5871
5872 FP_TRACE(FP_NHEAD1(1, 0), "fp_flogi_intr; port=%p, pkt=%p, state=%x",
5873 port, pkt, pkt->pkt_state);
5874
5875 if (FP_IS_PKT_ERROR(pkt)) {
5876 (void) fp_common_intr(pkt, 1);
5877 return;
5878 }
5879
5880 /*
5881 * Currently, we don't need to swap bytes here because qlc is faking the
5882 * response for us and so endianness is getting taken care of. But we
5883 * have to fix this and generalize this at some point
5884 */
5885 acc = (la_els_logi_t *)pkt->pkt_resp;
5886
5887 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&resp, (uint8_t *)acc,
5888 sizeof (resp), DDI_DEV_AUTOINCR);
5889
5890 ASSERT(resp.ls_code == LA_ELS_ACC);
5891 if (resp.ls_code != LA_ELS_ACC) {
5892 (void) fp_common_intr(pkt, 1);
5893 return;
5894 }
5895
5896 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&csp,
5897 (uint8_t *)&acc->common_service, sizeof (csp), DDI_DEV_AUTOINCR);
5898
5899 f_port = FP_IS_F_PORT(csp.cmn_features) ? 1 : 0;
5900
5901 ASSERT(!MUTEX_HELD(&port->fp_mutex));
5902
5903 mutex_enter(&port->fp_mutex);
5904 state = FC_PORT_STATE_MASK(port->fp_state);
5905 mutex_exit(&port->fp_mutex);
5906
5907 if (f_port == 0) {
5908 if (state != FC_STATE_LOOP) {
5909 swwn = &port->fp_service_params.nport_ww_name;
5910
5911 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&dwwn,
5912 (uint8_t *)&acc->nport_ww_name, sizeof (la_wwn_t),
5913 DDI_DEV_AUTOINCR);
5914
5915 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&nwwn,
5916 (uint8_t *)&acc->node_ww_name, sizeof (la_wwn_t),
5917 DDI_DEV_AUTOINCR);
5918
5919 mutex_enter(&port->fp_mutex);
5920
5921 port->fp_topology = FC_TOP_PT_PT;
5922 port->fp_total_devices = 1;
5923 if (fctl_wwn_cmp(swwn, &dwwn) >= 0) {
5924 port->fp_ptpt_master = 1;
5925 /*
5926 * Let us choose 'X' as S_ID and 'Y'
5927 * as D_ID and that'll work; hopefully
5928 * If not, it will get changed.
5929 */
5930 s_id = port->fp_instance + FP_DEFAULT_SID;
5931 d_id = port->fp_instance + FP_DEFAULT_DID;
5932 port->fp_port_id.port_id = s_id;
5933 mutex_exit(&port->fp_mutex);
5934
5935 FP_TRACE(FP_NHEAD1(1, 0), "fp_flogi_intr: fp %x"
5936 "pd %x", port->fp_port_id.port_id, d_id);
5937 pd = fctl_create_remote_port(port,
5938 &nwwn, &dwwn, d_id, PD_PLOGI_INITIATOR,
5939 KM_NOSLEEP);
5940 if (pd == NULL) {
5941 fp_printf(port, CE_NOTE, FP_LOG_ONLY,
5942 0, NULL, "couldn't create device"
5943 " d_id=%X", d_id);
5944 fp_iodone(cmd);
5945 return;
5946 }
5947
5948 cmd->cmd_pkt.pkt_tran_flags =
5949 pkt->pkt_tran_flags;
5950 cmd->cmd_pkt.pkt_tran_type = pkt->pkt_tran_type;
5951 cmd->cmd_flags = FP_CMD_PLOGI_RETAIN;
5952 cmd->cmd_retry_count = fp_retry_count;
5953
5954 fp_xlogi_init(port, cmd, s_id, d_id,
5955 fp_plogi_intr, cmd->cmd_job, LA_ELS_PLOGI);
5956
5957 (&cmd->cmd_pkt)->pkt_pd = pd;
5958
5959 /*
5960 * We've just created this fc_remote_port_t, and
5961 * we're about to use it to send a PLOGI, so
5962 * bump the reference count right now. When
5963 * the packet is freed, the reference count will
5964 * be decremented. The ULP may also start using
5965 * it, so mark it as given away as well.
5966 */
5967 pd->pd_ref_count++;
5968 pd->pd_aux_flags |= PD_GIVEN_TO_ULPS;
5969
5970 if (fp_sendcmd(port, cmd,
5971 port->fp_fca_handle) == FC_SUCCESS) {
5972 return;
5973 }
5974 } else {
5975 /*
5976 * The device handles will be created when the
5977 * unsolicited PLOGI is completed successfully
5978 */
5979 port->fp_ptpt_master = 0;
5980 mutex_exit(&port->fp_mutex);
5981 }
5982 }
5983 pkt->pkt_state = FC_PKT_FAILURE;
5984 } else {
5985 if (f_port) {
5986 mutex_enter(&port->fp_mutex);
5987 if (state == FC_STATE_LOOP) {
5988 port->fp_topology = FC_TOP_PUBLIC_LOOP;
5989 } else {
5990 port->fp_topology = FC_TOP_FABRIC;
5991
5992 FC_GET_RSP(port, pkt->pkt_resp_acc,
5993 (uint8_t *)&port->fp_fabric_name,
5994 (uint8_t *)&acc->node_ww_name,
5995 sizeof (la_wwn_t),
5996 DDI_DEV_AUTOINCR);
5997 }
5998 port->fp_port_id.port_id = pkt->pkt_resp_fhdr.d_id;
5999 mutex_exit(&port->fp_mutex);
6000 } else {
6001 pkt->pkt_state = FC_PKT_FAILURE;
6002 }
6003 }
6004 fp_iodone(cmd);
6005 }
6006
6007
6008 /*
6009 * Handle solicited PLOGI response
6010 */
6011 static void
fp_plogi_intr(fc_packet_t * pkt)6012 fp_plogi_intr(fc_packet_t *pkt)
6013 {
6014 int nl_port;
6015 int bailout;
6016 uint32_t d_id;
6017 fp_cmd_t *cmd;
6018 la_els_logi_t *acc;
6019 fc_local_port_t *port;
6020 fc_remote_port_t *pd;
6021 la_wwn_t nwwn;
6022 la_wwn_t pwwn;
6023 ls_code_t resp;
6024
6025 nl_port = 0;
6026 cmd = pkt->pkt_ulp_private;
6027 port = cmd->cmd_port;
6028 d_id = pkt->pkt_cmd_fhdr.d_id;
6029
6030 #ifndef __lock_lint
6031 ASSERT(cmd->cmd_job && cmd->cmd_job->job_counter);
6032 #endif
6033
6034 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_intr: port=%p, job=%p, d_id=%x,"
6035 " jcount=%d pkt=%p, state=%x", port, cmd->cmd_job, d_id,
6036 cmd->cmd_job->job_counter, pkt, pkt->pkt_state);
6037
6038 /*
6039 * Bail out early on ULP initiated requests if the
6040 * state change has occurred
6041 */
6042 mutex_enter(&port->fp_mutex);
6043 port->fp_out_fpcmds--;
6044 bailout = ((port->fp_statec_busy ||
6045 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) &&
6046 cmd->cmd_ulp_pkt) ? 1 : 0;
6047 mutex_exit(&port->fp_mutex);
6048
6049 if (FP_IS_PKT_ERROR(pkt) || bailout) {
6050 int skip_msg = 0;
6051 int giveup = 0;
6052
6053 if (cmd->cmd_ulp_pkt) {
6054 cmd->cmd_ulp_pkt->pkt_state = pkt->pkt_state;
6055 cmd->cmd_ulp_pkt->pkt_reason = pkt->pkt_reason;
6056 cmd->cmd_ulp_pkt->pkt_action = pkt->pkt_action;
6057 cmd->cmd_ulp_pkt->pkt_expln = pkt->pkt_expln;
6058 }
6059
6060 /*
6061 * If an unsolicited cross login already created
6062 * a device speed up the discovery by not retrying
6063 * the command mindlessly.
6064 */
6065 if (pkt->pkt_pd == NULL &&
6066 fctl_get_remote_port_by_did(port, d_id) != NULL) {
6067 fp_iodone(cmd);
6068 return;
6069 }
6070
6071 if (pkt->pkt_pd != NULL) {
6072 giveup = (pkt->pkt_pd->pd_recepient ==
6073 PD_PLOGI_RECEPIENT) ? 1 : 0;
6074 if (giveup) {
6075 /*
6076 * This pd is marked as plogi
6077 * recipient, stop retrying
6078 */
6079 FP_TRACE(FP_NHEAD1(3, 0),
6080 "fp_plogi_intr: stop retry as"
6081 " a cross login was accepted"
6082 " from d_id=%x, port=%p.",
6083 d_id, port);
6084 fp_iodone(cmd);
6085 return;
6086 }
6087 }
6088
6089 if (fp_common_intr(pkt, 0) == FC_SUCCESS) {
6090 return;
6091 }
6092
6093 if ((pd = fctl_get_remote_port_by_did(port, d_id)) != NULL) {
6094 mutex_enter(&pd->pd_mutex);
6095 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) {
6096 skip_msg++;
6097 }
6098 mutex_exit(&pd->pd_mutex);
6099 }
6100
6101 mutex_enter(&port->fp_mutex);
6102 if (!bailout && !(skip_msg && port->fp_statec_busy) &&
6103 port->fp_statec_busy <= 1 &&
6104 pkt->pkt_reason != FC_REASON_FCAL_OPN_FAIL) {
6105 mutex_exit(&port->fp_mutex);
6106 /*
6107 * In case of Login Collisions, JNI HBAs returns the
6108 * FC pkt back to the Initiator with the state set to
6109 * FC_PKT_LS_RJT and reason to FC_REASON_LOGICAL_ERROR.
6110 * QLC HBAs handles such cases in the FW and doesnot
6111 * return the LS_RJT with Logical error when
6112 * login collision happens.
6113 */
6114 if ((pkt->pkt_state != FC_PKT_LS_RJT) ||
6115 (pkt->pkt_reason != FC_REASON_LOGICAL_ERROR)) {
6116 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, pkt,
6117 "PLOGI to %x failed", d_id);
6118 }
6119 FP_TRACE(FP_NHEAD2(9, 0),
6120 "PLOGI to %x failed. state=%x reason=%x.",
6121 d_id, pkt->pkt_state, pkt->pkt_reason);
6122 } else {
6123 mutex_exit(&port->fp_mutex);
6124 }
6125
6126 fp_iodone(cmd);
6127 return;
6128 }
6129
6130 acc = (la_els_logi_t *)pkt->pkt_resp;
6131
6132 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&resp, (uint8_t *)acc,
6133 sizeof (resp), DDI_DEV_AUTOINCR);
6134
6135 ASSERT(resp.ls_code == LA_ELS_ACC);
6136 if (resp.ls_code != LA_ELS_ACC) {
6137 (void) fp_common_intr(pkt, 1);
6138 return;
6139 }
6140
6141 if (d_id == FS_NAME_SERVER || d_id == FS_FABRIC_CONTROLLER) {
6142 mutex_enter(&port->fp_mutex);
6143 port->fp_ns_login_class = FC_TRAN_CLASS(pkt->pkt_tran_flags);
6144 mutex_exit(&port->fp_mutex);
6145 fp_iodone(cmd);
6146 return;
6147 }
6148
6149 ASSERT(acc == (la_els_logi_t *)pkt->pkt_resp);
6150
6151 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&pwwn,
6152 (uint8_t *)&acc->nport_ww_name, sizeof (la_wwn_t),
6153 DDI_DEV_AUTOINCR);
6154
6155 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&nwwn,
6156 (uint8_t *)&acc->node_ww_name, sizeof (la_wwn_t),
6157 DDI_DEV_AUTOINCR);
6158
6159 ASSERT(fctl_is_wwn_zero(&pwwn) == FC_FAILURE);
6160 ASSERT(fctl_is_wwn_zero(&nwwn) == FC_FAILURE);
6161
6162 if ((pd = pkt->pkt_pd) == NULL) {
6163 pd = fctl_get_remote_port_by_pwwn(port, &pwwn);
6164 if (pd == NULL) {
6165 FP_TRACE(FP_NHEAD2(1, 0), "fp_plogi_intr: fp %x pd %x",
6166 port->fp_port_id.port_id, d_id);
6167 pd = fctl_create_remote_port(port, &nwwn, &pwwn, d_id,
6168 PD_PLOGI_INITIATOR, KM_NOSLEEP);
6169 if (pd == NULL) {
6170 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
6171 "couldn't create port device handles"
6172 " d_id=%x", d_id);
6173 fp_iodone(cmd);
6174 return;
6175 }
6176 } else {
6177 fc_remote_port_t *tmp_pd;
6178
6179 tmp_pd = fctl_get_remote_port_by_did(port, d_id);
6180 if (tmp_pd != NULL) {
6181 fp_iodone(cmd);
6182 return;
6183 }
6184
6185 mutex_enter(&port->fp_mutex);
6186 mutex_enter(&pd->pd_mutex);
6187 if ((pd->pd_state == PORT_DEVICE_LOGGED_IN) ||
6188 (pd->pd_aux_flags & PD_LOGGED_OUT)) {
6189 cmd->cmd_flags |= FP_CMD_PLOGI_RETAIN;
6190 }
6191
6192 if (pd->pd_type == PORT_DEVICE_OLD) {
6193 if (pd->pd_port_id.port_id != d_id) {
6194 fctl_delist_did_table(port, pd);
6195 pd->pd_type = PORT_DEVICE_CHANGED;
6196 pd->pd_port_id.port_id = d_id;
6197 } else {
6198 pd->pd_type = PORT_DEVICE_NOCHANGE;
6199 }
6200 }
6201
6202 if (pd->pd_aux_flags & PD_IN_DID_QUEUE) {
6203 char ww_name[17];
6204
6205 fc_wwn_to_str(&pd->pd_port_name, ww_name);
6206
6207 mutex_exit(&pd->pd_mutex);
6208 mutex_exit(&port->fp_mutex);
6209 FP_TRACE(FP_NHEAD2(9, 0),
6210 "Possible Duplicate name or address"
6211 " identifiers in the PLOGI response"
6212 " D_ID=%x, PWWN=%s: Please check the"
6213 " configuration", d_id, ww_name);
6214 fp_iodone(cmd);
6215 return;
6216 }
6217 fctl_enlist_did_table(port, pd);
6218 pd->pd_aux_flags &= ~PD_LOGGED_OUT;
6219 mutex_exit(&pd->pd_mutex);
6220 mutex_exit(&port->fp_mutex);
6221 }
6222 } else {
6223 fc_remote_port_t *tmp_pd, *new_wwn_pd;
6224
6225 tmp_pd = fctl_get_remote_port_by_did(port, d_id);
6226 new_wwn_pd = fctl_get_remote_port_by_pwwn(port, &pwwn);
6227
6228 mutex_enter(&port->fp_mutex);
6229 mutex_enter(&pd->pd_mutex);
6230 if (fctl_wwn_cmp(&pd->pd_port_name, &pwwn) == 0) {
6231 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_intr: d_id=%x,"
6232 " pd_state=%x pd_type=%x", d_id, pd->pd_state,
6233 pd->pd_type);
6234 if ((pd->pd_state == PORT_DEVICE_LOGGED_IN &&
6235 pd->pd_type == PORT_DEVICE_OLD) ||
6236 (pd->pd_aux_flags & PD_LOGGED_OUT)) {
6237 pd->pd_type = PORT_DEVICE_NOCHANGE;
6238 } else if (pd->pd_state != PORT_DEVICE_LOGGED_IN) {
6239 pd->pd_type = PORT_DEVICE_NEW;
6240 }
6241 } else {
6242 char old_name[17];
6243 char new_name[17];
6244
6245 fc_wwn_to_str(&pd->pd_port_name, old_name);
6246 fc_wwn_to_str(&pwwn, new_name);
6247
6248 FP_TRACE(FP_NHEAD1(9, 0),
6249 "fp_plogi_intr: PWWN of a device with D_ID=%x "
6250 "changed. New PWWN = %s, OLD PWWN = %s ; tmp_pd:%p "
6251 "pd:%p new_wwn_pd:%p, cmd_ulp_pkt:%p, bailout:0x%x",
6252 d_id, new_name, old_name, tmp_pd, pd, new_wwn_pd,
6253 cmd->cmd_ulp_pkt, bailout);
6254
6255 FP_TRACE(FP_NHEAD2(9, 0),
6256 "PWWN of a device with D_ID=%x changed."
6257 " New PWWN = %s, OLD PWWN = %s", d_id,
6258 new_name, old_name);
6259
6260 if (cmd->cmd_ulp_pkt && !bailout) {
6261 fc_remote_node_t *rnodep;
6262 fc_portmap_t *changelist;
6263 fc_portmap_t *listptr;
6264 int len = 1;
6265 /* # entries in changelist */
6266
6267 fctl_delist_pwwn_table(port, pd);
6268
6269 /*
6270 * Lets now check if there already is a pd with
6271 * this new WWN in the table. If so, we'll mark
6272 * it as invalid
6273 */
6274
6275 if (new_wwn_pd) {
6276 /*
6277 * There is another pd with in the pwwn
6278 * table with the same WWN that we got
6279 * in the PLOGI payload. We have to get
6280 * it out of the pwwn table, update the
6281 * pd's state (fp_fillout_old_map does
6282 * this for us) and add it to the
6283 * changelist that goes up to ULPs.
6284 *
6285 * len is length of changelist and so
6286 * increment it.
6287 */
6288 len++;
6289
6290 if (tmp_pd != pd) {
6291 /*
6292 * Odd case where pwwn and did
6293 * tables are out of sync but
6294 * we will handle that too. See
6295 * more comments below.
6296 *
6297 * One more device that ULPs
6298 * should know about and so len
6299 * gets incremented again.
6300 */
6301 len++;
6302 }
6303
6304 listptr = changelist = kmem_zalloc(len *
6305 sizeof (*changelist), KM_SLEEP);
6306
6307 mutex_enter(&new_wwn_pd->pd_mutex);
6308 rnodep = new_wwn_pd->pd_remote_nodep;
6309 mutex_exit(&new_wwn_pd->pd_mutex);
6310
6311 /*
6312 * Hold the fd_mutex since
6313 * fctl_copy_portmap_held expects it.
6314 * Preserve lock hierarchy by grabbing
6315 * fd_mutex before pd_mutex
6316 */
6317 if (rnodep) {
6318 mutex_enter(&rnodep->fd_mutex);
6319 }
6320 mutex_enter(&new_wwn_pd->pd_mutex);
6321 fp_fillout_old_map_held(listptr++,
6322 new_wwn_pd, 0);
6323 mutex_exit(&new_wwn_pd->pd_mutex);
6324 if (rnodep) {
6325 mutex_exit(&rnodep->fd_mutex);
6326 }
6327
6328 /*
6329 * Safety check :
6330 * Lets ensure that the pwwn and did
6331 * tables are in sync. Ideally, we
6332 * should not find that these two pd's
6333 * are different.
6334 */
6335 if (tmp_pd != pd) {
6336 mutex_enter(&tmp_pd->pd_mutex);
6337 rnodep =
6338 tmp_pd->pd_remote_nodep;
6339 mutex_exit(&tmp_pd->pd_mutex);
6340
6341 /* As above grab fd_mutex */
6342 if (rnodep) {
6343 mutex_enter(&rnodep->
6344 fd_mutex);
6345 }
6346 mutex_enter(&tmp_pd->pd_mutex);
6347
6348 fp_fillout_old_map_held(
6349 listptr++, tmp_pd, 0);
6350
6351 mutex_exit(&tmp_pd->pd_mutex);
6352 if (rnodep) {
6353 mutex_exit(&rnodep->
6354 fd_mutex);
6355 }
6356
6357 /*
6358 * Now add "pd" (not tmp_pd)
6359 * to fp_did_table to sync it up
6360 * with fp_pwwn_table
6361 *
6362 * pd->pd_mutex is already held
6363 * at this point
6364 */
6365 fctl_enlist_did_table(port, pd);
6366 }
6367 } else {
6368 listptr = changelist = kmem_zalloc(
6369 sizeof (*changelist), KM_SLEEP);
6370 }
6371
6372 ASSERT(changelist != NULL);
6373
6374 fp_fillout_changed_map(listptr, pd, &d_id,
6375 &pwwn);
6376 fctl_enlist_pwwn_table(port, pd);
6377
6378 mutex_exit(&pd->pd_mutex);
6379 mutex_exit(&port->fp_mutex);
6380
6381 fp_iodone(cmd);
6382
6383 (void) fp_ulp_devc_cb(port, changelist, len,
6384 len, KM_NOSLEEP, 0);
6385
6386 return;
6387 }
6388 }
6389
6390 if (pd->pd_porttype.port_type == FC_NS_PORT_NL) {
6391 nl_port = 1;
6392 }
6393 if (pd->pd_aux_flags & PD_DISABLE_RELOGIN) {
6394 pd->pd_aux_flags &= ~PD_LOGGED_OUT;
6395 }
6396
6397 mutex_exit(&pd->pd_mutex);
6398 mutex_exit(&port->fp_mutex);
6399
6400 if (tmp_pd == NULL) {
6401 mutex_enter(&port->fp_mutex);
6402 mutex_enter(&pd->pd_mutex);
6403 if (pd->pd_aux_flags & PD_IN_DID_QUEUE) {
6404 char ww_name[17];
6405
6406 fc_wwn_to_str(&pd->pd_port_name, ww_name);
6407 mutex_exit(&pd->pd_mutex);
6408 mutex_exit(&port->fp_mutex);
6409 FP_TRACE(FP_NHEAD2(9, 0),
6410 "Possible Duplicate name or address"
6411 " identifiers in the PLOGI response"
6412 " D_ID=%x, PWWN=%s: Please check the"
6413 " configuration", d_id, ww_name);
6414 fp_iodone(cmd);
6415 return;
6416 }
6417 fctl_enlist_did_table(port, pd);
6418 pd->pd_aux_flags &= ~PD_LOGGED_OUT;
6419 mutex_exit(&pd->pd_mutex);
6420 mutex_exit(&port->fp_mutex);
6421 }
6422 }
6423 fp_register_login(&pkt->pkt_resp_acc, pd, acc,
6424 FC_TRAN_CLASS(pkt->pkt_tran_flags));
6425
6426 if (cmd->cmd_ulp_pkt) {
6427 cmd->cmd_ulp_pkt->pkt_state = pkt->pkt_state;
6428 cmd->cmd_ulp_pkt->pkt_action = pkt->pkt_action;
6429 cmd->cmd_ulp_pkt->pkt_expln = pkt->pkt_expln;
6430 if (cmd->cmd_ulp_pkt->pkt_pd == NULL) {
6431 if (pd != NULL) {
6432 FP_TRACE(FP_NHEAD1(9, 0),
6433 "fp_plogi_intr;"
6434 "ulp_pkt's pd is NULL, get a pd %p",
6435 pd);
6436 mutex_enter(&pd->pd_mutex);
6437 pd->pd_ref_count++;
6438 mutex_exit(&pd->pd_mutex);
6439 }
6440 cmd->cmd_ulp_pkt->pkt_pd = pd;
6441 }
6442 bcopy((caddr_t)&pkt->pkt_resp_fhdr,
6443 (caddr_t)&cmd->cmd_ulp_pkt->pkt_resp_fhdr,
6444 sizeof (fc_frame_hdr_t));
6445 bcopy((caddr_t)pkt->pkt_resp,
6446 (caddr_t)cmd->cmd_ulp_pkt->pkt_resp,
6447 sizeof (la_els_logi_t));
6448 }
6449
6450 mutex_enter(&port->fp_mutex);
6451 if (port->fp_topology == FC_TOP_PRIVATE_LOOP || nl_port) {
6452 mutex_enter(&pd->pd_mutex);
6453
6454 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class;
6455 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE;
6456 cmd->cmd_retry_count = fp_retry_count;
6457
6458 /*
6459 * If the fc_remote_port_t pointer is not set in the given
6460 * fc_packet_t, then this fc_remote_port_t must have just
6461 * been created. Save the pointer and also increment the
6462 * fc_remote_port_t reference count.
6463 */
6464 if (pkt->pkt_pd == NULL) {
6465 pkt->pkt_pd = pd;
6466 pd->pd_ref_count++; /* It's in use! */
6467 }
6468
6469 fp_adisc_init(cmd, cmd->cmd_job);
6470
6471 pkt->pkt_cmdlen = sizeof (la_els_adisc_t);
6472 pkt->pkt_rsplen = sizeof (la_els_adisc_t);
6473
6474 mutex_exit(&pd->pd_mutex);
6475 mutex_exit(&port->fp_mutex);
6476
6477 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) {
6478 return;
6479 }
6480 } else {
6481 mutex_exit(&port->fp_mutex);
6482 }
6483
6484 if ((cmd->cmd_flags & FP_CMD_PLOGI_RETAIN) == 0) {
6485 mutex_enter(&port->fp_mutex);
6486 mutex_enter(&pd->pd_mutex);
6487
6488 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class;
6489 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE;
6490 cmd->cmd_retry_count = fp_retry_count;
6491
6492 fp_logo_init(pd, cmd, cmd->cmd_job);
6493
6494 pkt->pkt_cmdlen = sizeof (la_els_logo_t);
6495 pkt->pkt_rsplen = FP_PORT_IDENTIFIER_LEN;
6496
6497 mutex_exit(&pd->pd_mutex);
6498 mutex_exit(&port->fp_mutex);
6499
6500 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) {
6501 return;
6502 }
6503
6504 }
6505 fp_iodone(cmd);
6506 }
6507
6508
6509 /*
6510 * Handle solicited ADISC response
6511 */
6512 static void
fp_adisc_intr(fc_packet_t * pkt)6513 fp_adisc_intr(fc_packet_t *pkt)
6514 {
6515 int rval;
6516 int bailout;
6517 fp_cmd_t *cmd, *logi_cmd;
6518 fc_local_port_t *port;
6519 fc_remote_port_t *pd;
6520 la_els_adisc_t *acc;
6521 ls_code_t resp;
6522 fc_hardaddr_t ha;
6523 fc_portmap_t *changelist;
6524 int initiator, adiscfail = 0;
6525
6526 pd = pkt->pkt_pd;
6527 cmd = pkt->pkt_ulp_private;
6528 port = cmd->cmd_port;
6529
6530 #ifndef __lock_lint
6531 ASSERT(cmd->cmd_job && cmd->cmd_job->job_counter);
6532 #endif
6533
6534 ASSERT(pd != NULL && port != NULL && cmd != NULL);
6535
6536 mutex_enter(&port->fp_mutex);
6537 port->fp_out_fpcmds--;
6538 bailout = ((port->fp_statec_busy ||
6539 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) &&
6540 cmd->cmd_ulp_pkt) ? 1 : 0;
6541 mutex_exit(&port->fp_mutex);
6542
6543 if (bailout) {
6544 fp_iodone(cmd);
6545 return;
6546 }
6547
6548 if (pkt->pkt_state == FC_PKT_SUCCESS && pkt->pkt_resp_resid == 0) {
6549 acc = (la_els_adisc_t *)pkt->pkt_resp;
6550
6551 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&resp,
6552 (uint8_t *)acc, sizeof (resp), DDI_DEV_AUTOINCR);
6553
6554 if (resp.ls_code == LA_ELS_ACC) {
6555 int is_private;
6556
6557 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&ha,
6558 (uint8_t *)&acc->hard_addr, sizeof (ha),
6559 DDI_DEV_AUTOINCR);
6560
6561 mutex_enter(&port->fp_mutex);
6562
6563 is_private =
6564 (port->fp_topology == FC_TOP_PRIVATE_LOOP) ? 1 : 0;
6565
6566 mutex_enter(&pd->pd_mutex);
6567 if ((pd->pd_aux_flags & PD_IN_DID_QUEUE) == 0) {
6568 fctl_enlist_did_table(port, pd);
6569 }
6570 mutex_exit(&pd->pd_mutex);
6571
6572 mutex_exit(&port->fp_mutex);
6573
6574 mutex_enter(&pd->pd_mutex);
6575 if (pd->pd_type != PORT_DEVICE_NEW) {
6576 if (is_private && (pd->pd_hard_addr.hard_addr !=
6577 ha.hard_addr)) {
6578 pd->pd_type = PORT_DEVICE_CHANGED;
6579 } else {
6580 pd->pd_type = PORT_DEVICE_NOCHANGE;
6581 }
6582 }
6583
6584 if (is_private && (ha.hard_addr &&
6585 pd->pd_port_id.port_id != ha.hard_addr)) {
6586 char ww_name[17];
6587
6588 fc_wwn_to_str(&pd->pd_port_name, ww_name);
6589
6590 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
6591 "NL_Port Identifier %x doesn't match"
6592 " with Hard Address %x, Will use Port"
6593 " WWN %s", pd->pd_port_id.port_id,
6594 ha.hard_addr, ww_name);
6595
6596 pd->pd_hard_addr.hard_addr = 0;
6597 } else {
6598 pd->pd_hard_addr.hard_addr = ha.hard_addr;
6599 }
6600 mutex_exit(&pd->pd_mutex);
6601 } else {
6602 if (fp_common_intr(pkt, 0) == FC_SUCCESS) {
6603 return;
6604 }
6605 }
6606 } else {
6607 if (fp_common_intr(pkt, 0) == FC_SUCCESS) {
6608 return;
6609 }
6610
6611 mutex_enter(&port->fp_mutex);
6612 if (port->fp_statec_busy <= 1) {
6613 mutex_exit(&port->fp_mutex);
6614 if (pkt->pkt_state == FC_PKT_LS_RJT &&
6615 pkt->pkt_reason == FC_REASON_CMD_UNABLE) {
6616 uchar_t class;
6617 int cmd_flag;
6618 uint32_t src_id;
6619
6620 class = fp_get_nextclass(port,
6621 FC_TRAN_CLASS_INVALID);
6622 if (class == FC_TRAN_CLASS_INVALID) {
6623 fp_iodone(cmd);
6624 return;
6625 }
6626
6627 FP_TRACE(FP_NHEAD1(1, 0), "ADISC re-login; "
6628 "fp_state=0x%x, pkt_state=0x%x, "
6629 "reason=0x%x, class=0x%x",
6630 port->fp_state, pkt->pkt_state,
6631 pkt->pkt_reason, class);
6632 cmd_flag = FP_CMD_PLOGI_RETAIN;
6633
6634 logi_cmd = fp_alloc_pkt(port,
6635 sizeof (la_els_logi_t),
6636 sizeof (la_els_logi_t), KM_SLEEP, pd);
6637 if (logi_cmd == NULL) {
6638 fp_iodone(cmd);
6639 return;
6640 }
6641
6642 logi_cmd->cmd_pkt.pkt_tran_flags =
6643 FC_TRAN_INTR | class;
6644 logi_cmd->cmd_pkt.pkt_tran_type =
6645 FC_PKT_EXCHANGE;
6646 logi_cmd->cmd_flags = cmd_flag;
6647 logi_cmd->cmd_retry_count = fp_retry_count;
6648 logi_cmd->cmd_ulp_pkt = NULL;
6649
6650 mutex_enter(&port->fp_mutex);
6651 src_id = port->fp_port_id.port_id;
6652 mutex_exit(&port->fp_mutex);
6653
6654 fp_xlogi_init(port, logi_cmd, src_id,
6655 pkt->pkt_cmd_fhdr.d_id, fp_plogi_intr,
6656 cmd->cmd_job, LA_ELS_PLOGI);
6657 if (pd) {
6658 mutex_enter(&pd->pd_mutex);
6659 pd->pd_flags = PD_ELS_IN_PROGRESS;
6660 mutex_exit(&pd->pd_mutex);
6661 }
6662
6663 if (fp_sendcmd(port, logi_cmd,
6664 port->fp_fca_handle) == FC_SUCCESS) {
6665 fp_free_pkt(cmd);
6666 return;
6667 } else {
6668 fp_free_pkt(logi_cmd);
6669 }
6670 } else {
6671 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, pkt,
6672 "ADISC to %x failed, cmd_flags=%x",
6673 pkt->pkt_cmd_fhdr.d_id, cmd->cmd_flags);
6674 cmd->cmd_flags &= ~FP_CMD_PLOGI_RETAIN;
6675 adiscfail = 1;
6676 }
6677 } else {
6678 mutex_exit(&port->fp_mutex);
6679 }
6680 }
6681
6682 if (cmd->cmd_ulp_pkt) {
6683 cmd->cmd_ulp_pkt->pkt_state = pkt->pkt_state;
6684 cmd->cmd_ulp_pkt->pkt_action = pkt->pkt_action;
6685 cmd->cmd_ulp_pkt->pkt_expln = pkt->pkt_expln;
6686 if (cmd->cmd_ulp_pkt->pkt_pd == NULL) {
6687 cmd->cmd_ulp_pkt->pkt_pd = pd;
6688 FP_TRACE(FP_NHEAD1(9, 0),
6689 "fp_adisc__intr;"
6690 "ulp_pkt's pd is NULL, get a pd %p",
6691 pd);
6692
6693 }
6694 bcopy((caddr_t)&pkt->pkt_resp_fhdr,
6695 (caddr_t)&cmd->cmd_ulp_pkt->pkt_resp_fhdr,
6696 sizeof (fc_frame_hdr_t));
6697 bcopy((caddr_t)pkt->pkt_resp,
6698 (caddr_t)cmd->cmd_ulp_pkt->pkt_resp,
6699 sizeof (la_els_adisc_t));
6700 }
6701
6702 if ((cmd->cmd_flags & FP_CMD_PLOGI_RETAIN) == 0) {
6703 FP_TRACE(FP_NHEAD1(9, 0),
6704 "fp_adisc_intr: Perform LOGO.cmd_flags=%x, "
6705 "fp_retry_count=%x, ulp_pkt=%p",
6706 cmd->cmd_flags, fp_retry_count, cmd->cmd_ulp_pkt);
6707
6708 mutex_enter(&port->fp_mutex);
6709 mutex_enter(&pd->pd_mutex);
6710
6711 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class;
6712 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE;
6713 cmd->cmd_retry_count = fp_retry_count;
6714
6715 fp_logo_init(pd, cmd, cmd->cmd_job);
6716
6717 pkt->pkt_cmdlen = sizeof (la_els_logo_t);
6718 pkt->pkt_rsplen = FP_PORT_IDENTIFIER_LEN;
6719
6720 mutex_exit(&pd->pd_mutex);
6721 mutex_exit(&port->fp_mutex);
6722
6723 rval = fp_sendcmd(port, cmd, port->fp_fca_handle);
6724 if (adiscfail) {
6725 mutex_enter(&pd->pd_mutex);
6726 initiator =
6727 ((pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0);
6728 pd->pd_state = PORT_DEVICE_VALID;
6729 pd->pd_aux_flags |= PD_LOGGED_OUT;
6730 if (pd->pd_aux_flags & PD_DISABLE_RELOGIN) {
6731 pd->pd_type = PORT_DEVICE_NEW;
6732 } else {
6733 pd->pd_type = PORT_DEVICE_NOCHANGE;
6734 }
6735 mutex_exit(&pd->pd_mutex);
6736
6737 changelist =
6738 kmem_zalloc(sizeof (*changelist), KM_SLEEP);
6739
6740 if (initiator) {
6741 fp_unregister_login(pd);
6742 fctl_copy_portmap(changelist, pd);
6743 } else {
6744 fp_fillout_old_map(changelist, pd, 0);
6745 }
6746
6747 FP_TRACE(FP_NHEAD1(9, 0),
6748 "fp_adisc_intr: Dev change notification "
6749 "to ULP port=%p, pd=%p, map_type=%x map_state=%x "
6750 "map_flags=%x initiator=%d", port, pd,
6751 changelist->map_type, changelist->map_state,
6752 changelist->map_flags, initiator);
6753
6754 (void) fp_ulp_devc_cb(port, changelist,
6755 1, 1, KM_SLEEP, 0);
6756 }
6757 if (rval == FC_SUCCESS) {
6758 return;
6759 }
6760 }
6761 fp_iodone(cmd);
6762 }
6763
6764
6765 /*
6766 * Handle solicited LOGO response
6767 */
6768 static void
fp_logo_intr(fc_packet_t * pkt)6769 fp_logo_intr(fc_packet_t *pkt)
6770 {
6771 ls_code_t resp;
6772 fc_local_port_t *port = ((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port;
6773
6774 mutex_enter(&((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_mutex);
6775 ((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_out_fpcmds--;
6776 mutex_exit(&((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_mutex);
6777
6778 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&resp,
6779 (uint8_t *)pkt->pkt_resp, sizeof (resp), DDI_DEV_AUTOINCR);
6780
6781 if (FP_IS_PKT_ERROR(pkt)) {
6782 (void) fp_common_intr(pkt, 1);
6783 return;
6784 }
6785
6786 ASSERT(resp.ls_code == LA_ELS_ACC);
6787 if (resp.ls_code != LA_ELS_ACC) {
6788 (void) fp_common_intr(pkt, 1);
6789 return;
6790 }
6791
6792 if (pkt->pkt_pd != NULL) {
6793 fp_unregister_login(pkt->pkt_pd);
6794 }
6795
6796 fp_iodone(pkt->pkt_ulp_private);
6797 }
6798
6799
6800 /*
6801 * Handle solicited RNID response
6802 */
6803 static void
fp_rnid_intr(fc_packet_t * pkt)6804 fp_rnid_intr(fc_packet_t *pkt)
6805 {
6806 ls_code_t resp;
6807 job_request_t *job;
6808 fp_cmd_t *cmd;
6809 la_els_rnid_acc_t *acc;
6810 fc_local_port_t *port = ((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port;
6811
6812 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&resp,
6813 (uint8_t *)pkt->pkt_resp, sizeof (resp), DDI_DEV_AUTOINCR);
6814 cmd = pkt->pkt_ulp_private;
6815
6816 mutex_enter(&cmd->cmd_port->fp_mutex);
6817 cmd->cmd_port->fp_out_fpcmds--;
6818 mutex_exit(&cmd->cmd_port->fp_mutex);
6819
6820 job = cmd->cmd_job;
6821 ASSERT(job->job_private != NULL);
6822
6823 /* If failure or LS_RJT then retry the packet, if needed */
6824 if (pkt->pkt_state != FC_PKT_SUCCESS || resp.ls_code != LA_ELS_ACC) {
6825 (void) fp_common_intr(pkt, 1);
6826 return;
6827 }
6828
6829 /* Save node_id memory allocated in ioctl code */
6830 acc = (la_els_rnid_acc_t *)pkt->pkt_resp;
6831
6832 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)job->job_private,
6833 (uint8_t *)acc, sizeof (la_els_rnid_acc_t), DDI_DEV_AUTOINCR);
6834
6835 /* wakeup the ioctl thread and free the pkt */
6836 fp_iodone(cmd);
6837 }
6838
6839
6840 /*
6841 * Handle solicited RLS response
6842 */
6843 static void
fp_rls_intr(fc_packet_t * pkt)6844 fp_rls_intr(fc_packet_t *pkt)
6845 {
6846 ls_code_t resp;
6847 job_request_t *job;
6848 fp_cmd_t *cmd;
6849 la_els_rls_acc_t *acc;
6850 fc_local_port_t *port = ((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port;
6851
6852 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&resp,
6853 (uint8_t *)pkt->pkt_resp, sizeof (resp), DDI_DEV_AUTOINCR);
6854 cmd = pkt->pkt_ulp_private;
6855
6856 mutex_enter(&cmd->cmd_port->fp_mutex);
6857 cmd->cmd_port->fp_out_fpcmds--;
6858 mutex_exit(&cmd->cmd_port->fp_mutex);
6859
6860 job = cmd->cmd_job;
6861 ASSERT(job->job_private != NULL);
6862
6863 /* If failure or LS_RJT then retry the packet, if needed */
6864 if (FP_IS_PKT_ERROR(pkt) || resp.ls_code != LA_ELS_ACC) {
6865 (void) fp_common_intr(pkt, 1);
6866 return;
6867 }
6868
6869 /* Save link error status block in memory allocated in ioctl code */
6870 acc = (la_els_rls_acc_t *)pkt->pkt_resp;
6871
6872 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)job->job_private,
6873 (uint8_t *)&acc->rls_link_params, sizeof (fc_rls_acc_t),
6874 DDI_DEV_AUTOINCR);
6875
6876 /* wakeup the ioctl thread and free the pkt */
6877 fp_iodone(cmd);
6878 }
6879
6880
6881 /*
6882 * A solicited command completion interrupt (mostly for commands
6883 * that require almost no post processing such as SCR ELS)
6884 */
6885 static void
fp_intr(fc_packet_t * pkt)6886 fp_intr(fc_packet_t *pkt)
6887 {
6888 mutex_enter(&((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_mutex);
6889 ((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_out_fpcmds--;
6890 mutex_exit(&((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_mutex);
6891
6892 if (FP_IS_PKT_ERROR(pkt)) {
6893 (void) fp_common_intr(pkt, 1);
6894 return;
6895 }
6896 fp_iodone(pkt->pkt_ulp_private);
6897 }
6898
6899
6900 /*
6901 * Handle the underlying port's state change
6902 */
6903 static void
fp_statec_cb(opaque_t port_handle,uint32_t state)6904 fp_statec_cb(opaque_t port_handle, uint32_t state)
6905 {
6906 fc_local_port_t *port = port_handle;
6907 job_request_t *job;
6908
6909 /*
6910 * If it is not possible to process the callbacks
6911 * just drop the callback on the floor; Don't bother
6912 * to do something that isn't safe at this time
6913 */
6914 mutex_enter(&port->fp_mutex);
6915 if ((port->fp_soft_state &
6916 (FP_SOFT_IN_DETACH | FP_SOFT_SUSPEND | FP_SOFT_POWER_DOWN)) ||
6917 (FC_PORT_STATE_MASK(port->fp_state) == FC_PORT_STATE_MASK(state))) {
6918 mutex_exit(&port->fp_mutex);
6919 return;
6920 }
6921
6922 if (port->fp_statec_busy == 0) {
6923 port->fp_soft_state |= FP_SOFT_IN_STATEC_CB;
6924 #ifdef DEBUG
6925 } else {
6926 ASSERT(port->fp_soft_state & FP_SOFT_IN_STATEC_CB);
6927 #endif
6928 }
6929
6930 port->fp_statec_busy++;
6931
6932 /*
6933 * For now, force the trusted method of device authentication (by
6934 * PLOGI) when LIPs do not involve OFFLINE to ONLINE transition.
6935 */
6936 if (FC_PORT_STATE_MASK(state) == FC_STATE_LIP ||
6937 FC_PORT_STATE_MASK(state) == FC_STATE_LIP_LBIT_SET) {
6938 state = FC_PORT_SPEED_MASK(port->fp_state) | FC_STATE_LOOP;
6939 fp_port_offline(port, 0);
6940 }
6941 mutex_exit(&port->fp_mutex);
6942
6943 switch (FC_PORT_STATE_MASK(state)) {
6944 case FC_STATE_OFFLINE:
6945 job = fctl_alloc_job(JOB_PORT_OFFLINE,
6946 JOB_TYPE_FCTL_ASYNC, NULL, NULL, KM_NOSLEEP);
6947 if (job == NULL) {
6948 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
6949 " fp_statec_cb() couldn't submit a job "
6950 " to the thread: failing..");
6951 mutex_enter(&port->fp_mutex);
6952 if (--port->fp_statec_busy == 0) {
6953 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB;
6954 }
6955 mutex_exit(&port->fp_mutex);
6956 return;
6957 }
6958 mutex_enter(&port->fp_mutex);
6959 /*
6960 * Zero out this field so that we do not retain
6961 * the fabric name as its no longer valid
6962 */
6963 bzero(&port->fp_fabric_name, sizeof (la_wwn_t));
6964 port->fp_state = state;
6965 mutex_exit(&port->fp_mutex);
6966
6967 fctl_enque_job(port, job);
6968 break;
6969
6970 case FC_STATE_ONLINE:
6971 case FC_STATE_LOOP:
6972 mutex_enter(&port->fp_mutex);
6973 port->fp_state = state;
6974
6975 if (port->fp_offline_tid) {
6976 timeout_id_t tid;
6977
6978 tid = port->fp_offline_tid;
6979 port->fp_offline_tid = NULL;
6980 mutex_exit(&port->fp_mutex);
6981 (void) untimeout(tid);
6982 } else {
6983 mutex_exit(&port->fp_mutex);
6984 }
6985
6986 job = fctl_alloc_job(JOB_PORT_ONLINE,
6987 JOB_TYPE_FCTL_ASYNC, NULL, NULL, KM_NOSLEEP);
6988 if (job == NULL) {
6989 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
6990 "fp_statec_cb() couldn't submit a job "
6991 "to the thread: failing..");
6992
6993 mutex_enter(&port->fp_mutex);
6994 if (--port->fp_statec_busy == 0) {
6995 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB;
6996 }
6997 mutex_exit(&port->fp_mutex);
6998 return;
6999 }
7000 fctl_enque_job(port, job);
7001 break;
7002
7003 case FC_STATE_RESET_REQUESTED:
7004 mutex_enter(&port->fp_mutex);
7005 port->fp_state = FC_STATE_OFFLINE;
7006 port->fp_soft_state |= FP_SOFT_IN_FCA_RESET;
7007 mutex_exit(&port->fp_mutex);
7008 /* FALLTHROUGH */
7009
7010 case FC_STATE_RESET:
7011 job = fctl_alloc_job(JOB_ULP_NOTIFY,
7012 JOB_TYPE_FCTL_ASYNC, NULL, NULL, KM_NOSLEEP);
7013 if (job == NULL) {
7014 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
7015 "fp_statec_cb() couldn't submit a job"
7016 " to the thread: failing..");
7017
7018 mutex_enter(&port->fp_mutex);
7019 if (--port->fp_statec_busy == 0) {
7020 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB;
7021 }
7022 mutex_exit(&port->fp_mutex);
7023 return;
7024 }
7025
7026 /* squeeze into some field in the job structure */
7027 job->job_ulp_listlen = FC_PORT_STATE_MASK(state);
7028 fctl_enque_job(port, job);
7029 break;
7030
7031 case FC_STATE_TARGET_PORT_RESET:
7032 (void) fp_ulp_notify(port, state, KM_NOSLEEP);
7033 /* FALLTHROUGH */
7034
7035 case FC_STATE_NAMESERVICE:
7036 /* FALLTHROUGH */
7037
7038 default:
7039 mutex_enter(&port->fp_mutex);
7040 if (--port->fp_statec_busy == 0) {
7041 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB;
7042 }
7043 mutex_exit(&port->fp_mutex);
7044 break;
7045 }
7046 }
7047
7048
7049 /*
7050 * Register with the Name Server for RSCNs
7051 */
7052 static int
fp_ns_scr(fc_local_port_t * port,job_request_t * job,uchar_t scr_func,int sleep)7053 fp_ns_scr(fc_local_port_t *port, job_request_t *job, uchar_t scr_func,
7054 int sleep)
7055 {
7056 uint32_t s_id;
7057 uchar_t class;
7058 fc_scr_req_t payload;
7059 fp_cmd_t *cmd;
7060 fc_packet_t *pkt;
7061
7062 mutex_enter(&port->fp_mutex);
7063 s_id = port->fp_port_id.port_id;
7064 class = port->fp_ns_login_class;
7065 mutex_exit(&port->fp_mutex);
7066
7067 cmd = fp_alloc_pkt(port, sizeof (fc_scr_req_t),
7068 sizeof (fc_scr_resp_t), sleep, NULL);
7069 if (cmd == NULL) {
7070 return (FC_NOMEM);
7071 }
7072
7073 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class;
7074 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE;
7075 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED;
7076 cmd->cmd_retry_count = fp_retry_count;
7077 cmd->cmd_ulp_pkt = NULL;
7078
7079 pkt = &cmd->cmd_pkt;
7080 cmd->cmd_transport = port->fp_fca_tran->fca_els_send;
7081
7082 fp_els_init(cmd, s_id, 0xFFFFFD, fp_intr, job);
7083
7084 payload.ls_code.ls_code = LA_ELS_SCR;
7085 payload.ls_code.mbz = 0;
7086 payload.scr_rsvd = 0;
7087 payload.scr_func = scr_func;
7088
7089 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload,
7090 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR);
7091
7092 job->job_counter = 1;
7093
7094 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) {
7095 fp_iodone(cmd);
7096 }
7097
7098 return (FC_SUCCESS);
7099 }
7100
7101
7102 /*
7103 * There are basically two methods to determine the total number of
7104 * devices out in the NS database; Reading the details of the two
7105 * methods described below, it shouldn't be hard to identify which
7106 * of the two methods is better.
7107 *
7108 * Method 1.
7109 * Iteratively issue GANs until all ports identifiers are walked
7110 *
7111 * Method 2.
7112 * Issue GID_PT (get port Identifiers) with Maximum residual
7113 * field in the request CT HEADER set to accommodate only the
7114 * CT HEADER in the response frame. And if FC-GS2 has been
7115 * carefully read, the NS here has a chance to FS_ACC the
7116 * request and indicate the residual size in the FS_ACC.
7117 *
7118 * Method 2 is wonderful, although it's not mandatory for the NS
7119 * to update the Maximum/Residual Field as can be seen in 4.3.1.6
7120 * (note with particular care the use of the auxiliary verb 'may')
7121 *
7122 */
7123 static int
fp_ns_get_devcount(fc_local_port_t * port,job_request_t * job,int create,int sleep)7124 fp_ns_get_devcount(fc_local_port_t *port, job_request_t *job, int create,
7125 int sleep)
7126 {
7127 int flags;
7128 int rval;
7129 uint32_t src_id;
7130 fctl_ns_req_t *ns_cmd;
7131
7132 ASSERT(!MUTEX_HELD(&port->fp_mutex));
7133
7134 mutex_enter(&port->fp_mutex);
7135 src_id = port->fp_port_id.port_id;
7136 mutex_exit(&port->fp_mutex);
7137
7138 if (!create && (port->fp_options & FP_NS_SMART_COUNT)) {
7139 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pt_t),
7140 sizeof (ns_resp_gid_pt_t), 0,
7141 (FCTL_NS_GET_DEV_COUNT | FCTL_NS_NO_DATA_BUF), sleep);
7142
7143 if (ns_cmd == NULL) {
7144 return (FC_NOMEM);
7145 }
7146
7147 ns_cmd->ns_cmd_code = NS_GID_PT;
7148 ((ns_req_gid_pt_t *)(ns_cmd->ns_cmd_buf))->port_type.port_type
7149 = FC_NS_PORT_NX; /* All port types */
7150 ((ns_req_gid_pt_t *)(ns_cmd->ns_cmd_buf))->port_type.rsvd = 0;
7151
7152 } else {
7153 uint32_t ns_flags;
7154
7155 ns_flags = FCTL_NS_GET_DEV_COUNT | FCTL_NS_NO_DATA_BUF;
7156 if (create) {
7157 ns_flags |= FCTL_NS_CREATE_DEVICE;
7158 }
7159 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t),
7160 sizeof (ns_resp_gan_t), sizeof (int), ns_flags, sleep);
7161
7162 if (ns_cmd == NULL) {
7163 return (FC_NOMEM);
7164 }
7165 ns_cmd->ns_gan_index = 0;
7166 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID;
7167 ns_cmd->ns_cmd_code = NS_GA_NXT;
7168 ns_cmd->ns_gan_max = 0xFFFF;
7169
7170 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.port_id = src_id;
7171 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.priv_lilp_posit = 0;
7172 }
7173
7174 flags = job->job_flags;
7175 job->job_flags &= ~JOB_TYPE_FP_ASYNC;
7176 job->job_counter = 1;
7177
7178 rval = fp_ns_query(port, ns_cmd, job, 1, sleep);
7179 job->job_flags = flags;
7180
7181 if (!create && (port->fp_options & FP_NS_SMART_COUNT)) {
7182 uint16_t max_resid;
7183
7184 /*
7185 * Revert to scanning the NS if NS_GID_PT isn't
7186 * helping us figure out total number of devices.
7187 */
7188 if (job->job_result != FC_SUCCESS ||
7189 ns_cmd->ns_resp_hdr.ct_cmdrsp != FS_ACC_IU) {
7190 mutex_enter(&port->fp_mutex);
7191 port->fp_options &= ~FP_NS_SMART_COUNT;
7192 mutex_exit(&port->fp_mutex);
7193
7194 fctl_free_ns_cmd(ns_cmd);
7195 return (fp_ns_get_devcount(port, job, create, sleep));
7196 }
7197
7198 mutex_enter(&port->fp_mutex);
7199 port->fp_total_devices = 1;
7200 max_resid = ns_cmd->ns_resp_hdr.ct_aiusize;
7201 if (max_resid) {
7202 /*
7203 * Since port identifier is 4 bytes and max_resid
7204 * is also in WORDS, max_resid simply indicates
7205 * the total number of port identifiers not
7206 * transferred
7207 */
7208 port->fp_total_devices += max_resid;
7209 }
7210 mutex_exit(&port->fp_mutex);
7211 }
7212 mutex_enter(&port->fp_mutex);
7213 port->fp_total_devices = *((int *)ns_cmd->ns_data_buf);
7214 mutex_exit(&port->fp_mutex);
7215 fctl_free_ns_cmd(ns_cmd);
7216
7217 return (rval);
7218 }
7219
7220 /*
7221 * One heck of a function to serve userland.
7222 */
7223 static int
fp_fciocmd(fc_local_port_t * port,intptr_t data,int mode,fcio_t * fcio)7224 fp_fciocmd(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio)
7225 {
7226 int rval = 0;
7227 int jcode;
7228 uint32_t ret;
7229 uchar_t open_flag;
7230 fcio_t *kfcio;
7231 job_request_t *job;
7232 boolean_t use32 = B_FALSE;
7233
7234 #ifdef _MULTI_DATAMODEL
7235 switch (ddi_model_convert_from(mode & FMODELS)) {
7236 case DDI_MODEL_ILP32:
7237 use32 = B_TRUE;
7238 break;
7239
7240 case DDI_MODEL_NONE:
7241 default:
7242 break;
7243 }
7244 #endif
7245
7246 mutex_enter(&port->fp_mutex);
7247 if (port->fp_soft_state & (FP_SOFT_IN_STATEC_CB |
7248 FP_SOFT_IN_UNSOL_CB)) {
7249 fcio->fcio_errno = FC_STATEC_BUSY;
7250 mutex_exit(&port->fp_mutex);
7251 rval = EAGAIN;
7252 if (fp_fcio_copyout(fcio, data, mode)) {
7253 rval = EFAULT;
7254 }
7255 return (rval);
7256 }
7257 open_flag = port->fp_flag;
7258 mutex_exit(&port->fp_mutex);
7259
7260 if (fp_check_perms(open_flag, fcio->fcio_cmd) != FC_SUCCESS) {
7261 fcio->fcio_errno = FC_FAILURE;
7262 rval = EACCES;
7263 if (fp_fcio_copyout(fcio, data, mode)) {
7264 rval = EFAULT;
7265 }
7266 return (rval);
7267 }
7268
7269 /*
7270 * If an exclusive open was demanded during open, don't let
7271 * either innocuous or devil threads to share the file
7272 * descriptor and fire down exclusive access commands
7273 */
7274 mutex_enter(&port->fp_mutex);
7275 if (port->fp_flag & FP_EXCL) {
7276 if (port->fp_flag & FP_EXCL_BUSY) {
7277 mutex_exit(&port->fp_mutex);
7278 fcio->fcio_errno = FC_FAILURE;
7279 return (EBUSY);
7280 }
7281 port->fp_flag |= FP_EXCL_BUSY;
7282 }
7283 mutex_exit(&port->fp_mutex);
7284
7285 fcio->fcio_errno = FC_SUCCESS;
7286
7287 switch (fcio->fcio_cmd) {
7288 case FCIO_GET_HOST_PARAMS: {
7289 fc_port_dev_t *val;
7290 fc_port_dev32_t *val32;
7291 int index;
7292 int lilp_device_count;
7293 fc_lilpmap_t *lilp_map;
7294 uchar_t *alpa_list;
7295
7296 if (use32 == B_TRUE) {
7297 if (fcio->fcio_olen != sizeof (*val32) ||
7298 fcio->fcio_xfer != FCIO_XFER_READ) {
7299 rval = EINVAL;
7300 break;
7301 }
7302 } else {
7303 if (fcio->fcio_olen != sizeof (*val) ||
7304 fcio->fcio_xfer != FCIO_XFER_READ) {
7305 rval = EINVAL;
7306 break;
7307 }
7308 }
7309
7310 val = kmem_zalloc(sizeof (*val), KM_SLEEP);
7311
7312 mutex_enter(&port->fp_mutex);
7313 val->dev_did = port->fp_port_id;
7314 val->dev_hard_addr = port->fp_hard_addr;
7315 val->dev_pwwn = port->fp_service_params.nport_ww_name;
7316 val->dev_nwwn = port->fp_service_params.node_ww_name;
7317 val->dev_state = port->fp_state;
7318
7319 lilp_map = &port->fp_lilp_map;
7320 alpa_list = &lilp_map->lilp_alpalist[0];
7321 lilp_device_count = lilp_map->lilp_length;
7322 for (index = 0; index < lilp_device_count; index++) {
7323 uint32_t d_id;
7324
7325 d_id = alpa_list[index];
7326 if (d_id == port->fp_port_id.port_id) {
7327 break;
7328 }
7329 }
7330 val->dev_did.priv_lilp_posit = (uint8_t)(index & 0xff);
7331
7332 bcopy(port->fp_fc4_types, val->dev_type,
7333 sizeof (port->fp_fc4_types));
7334 mutex_exit(&port->fp_mutex);
7335
7336 if (use32 == B_TRUE) {
7337 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP);
7338
7339 val32->dev_did = val->dev_did;
7340 val32->dev_hard_addr = val->dev_hard_addr;
7341 val32->dev_pwwn = val->dev_pwwn;
7342 val32->dev_nwwn = val->dev_nwwn;
7343 val32->dev_state = val->dev_state;
7344 val32->dev_did.priv_lilp_posit =
7345 val->dev_did.priv_lilp_posit;
7346
7347 bcopy(val->dev_type, val32->dev_type,
7348 sizeof (port->fp_fc4_types));
7349
7350 if (fp_copyout((void *)val32, (void *)fcio->fcio_obuf,
7351 fcio->fcio_olen, mode) == 0) {
7352 if (fp_fcio_copyout(fcio, data, mode)) {
7353 rval = EFAULT;
7354 }
7355 } else {
7356 rval = EFAULT;
7357 }
7358
7359 kmem_free(val32, sizeof (*val32));
7360 } else {
7361 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf,
7362 fcio->fcio_olen, mode) == 0) {
7363 if (fp_fcio_copyout(fcio, data, mode)) {
7364 rval = EFAULT;
7365 }
7366 } else {
7367 rval = EFAULT;
7368 }
7369 }
7370
7371 /* need to free "val" here */
7372 kmem_free(val, sizeof (*val));
7373 break;
7374 }
7375
7376 case FCIO_GET_OTHER_ADAPTER_PORTS: {
7377 uint32_t index;
7378 char *tmpPath;
7379 fc_local_port_t *tmpPort;
7380
7381 if (fcio->fcio_olen < MAXPATHLEN ||
7382 fcio->fcio_ilen != sizeof (uint32_t)) {
7383 rval = EINVAL;
7384 break;
7385 }
7386 if (ddi_copyin(fcio->fcio_ibuf, &index, sizeof (index), mode)) {
7387 rval = EFAULT;
7388 break;
7389 }
7390
7391 tmpPort = fctl_get_adapter_port_by_index(port, index);
7392 if (tmpPort == NULL) {
7393 FP_TRACE(FP_NHEAD1(9, 0),
7394 "User supplied index out of range");
7395 fcio->fcio_errno = FC_BADPORT;
7396 rval = EFAULT;
7397 if (fp_fcio_copyout(fcio, data, mode)) {
7398 rval = EFAULT;
7399 }
7400 break;
7401 }
7402
7403 tmpPath = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
7404 (void) ddi_pathname(tmpPort->fp_port_dip, tmpPath);
7405 if (fp_copyout((void *)tmpPath, (void *)fcio->fcio_obuf,
7406 MAXPATHLEN, mode) == 0) {
7407 if (fp_fcio_copyout(fcio, data, mode)) {
7408 rval = EFAULT;
7409 }
7410 } else {
7411 rval = EFAULT;
7412 }
7413 kmem_free(tmpPath, MAXPATHLEN);
7414 break;
7415 }
7416
7417 case FCIO_NPIV_GET_ADAPTER_ATTRIBUTES:
7418 case FCIO_GET_ADAPTER_ATTRIBUTES: {
7419 fc_hba_adapter_attributes_t *val;
7420 fc_hba_adapter_attributes32_t *val32;
7421
7422 if (use32 == B_TRUE) {
7423 if (fcio->fcio_olen < sizeof (*val32) ||
7424 fcio->fcio_xfer != FCIO_XFER_READ) {
7425 rval = EINVAL;
7426 break;
7427 }
7428 } else {
7429 if (fcio->fcio_olen < sizeof (*val) ||
7430 fcio->fcio_xfer != FCIO_XFER_READ) {
7431 rval = EINVAL;
7432 break;
7433 }
7434 }
7435
7436 val = kmem_zalloc(sizeof (*val), KM_SLEEP);
7437 val->version = FC_HBA_ADAPTER_ATTRIBUTES_VERSION;
7438 mutex_enter(&port->fp_mutex);
7439 bcopy(port->fp_hba_port_attrs.manufacturer,
7440 val->Manufacturer,
7441 sizeof (val->Manufacturer));
7442 bcopy(port->fp_hba_port_attrs.serial_number,
7443 val->SerialNumber,
7444 sizeof (val->SerialNumber));
7445 bcopy(port->fp_hba_port_attrs.model,
7446 val->Model,
7447 sizeof (val->Model));
7448 bcopy(port->fp_hba_port_attrs.model_description,
7449 val->ModelDescription,
7450 sizeof (val->ModelDescription));
7451 bcopy(port->fp_sym_node_name, val->NodeSymbolicName,
7452 port->fp_sym_node_namelen);
7453 bcopy(port->fp_hba_port_attrs.hardware_version,
7454 val->HardwareVersion,
7455 sizeof (val->HardwareVersion));
7456 bcopy(port->fp_hba_port_attrs.option_rom_version,
7457 val->OptionROMVersion,
7458 sizeof (val->OptionROMVersion));
7459 bcopy(port->fp_hba_port_attrs.firmware_version,
7460 val->FirmwareVersion,
7461 sizeof (val->FirmwareVersion));
7462 val->VendorSpecificID =
7463 port->fp_hba_port_attrs.vendor_specific_id;
7464 bcopy(&port->fp_service_params.node_ww_name.raw_wwn,
7465 &val->NodeWWN.raw_wwn,
7466 sizeof (val->NodeWWN.raw_wwn));
7467
7468
7469 bcopy(port->fp_hba_port_attrs.driver_name,
7470 val->DriverName,
7471 sizeof (val->DriverName));
7472 bcopy(port->fp_hba_port_attrs.driver_version,
7473 val->DriverVersion,
7474 sizeof (val->DriverVersion));
7475 mutex_exit(&port->fp_mutex);
7476
7477 if (fcio->fcio_cmd == FCIO_GET_ADAPTER_ATTRIBUTES) {
7478 val->NumberOfPorts = fctl_count_fru_ports(port, 0);
7479 } else {
7480 val->NumberOfPorts = fctl_count_fru_ports(port, 1);
7481 }
7482
7483 if (use32 == B_TRUE) {
7484 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP);
7485 val32->version = val->version;
7486 bcopy(val->Manufacturer, val32->Manufacturer,
7487 sizeof (val->Manufacturer));
7488 bcopy(val->SerialNumber, val32->SerialNumber,
7489 sizeof (val->SerialNumber));
7490 bcopy(val->Model, val32->Model,
7491 sizeof (val->Model));
7492 bcopy(val->ModelDescription, val32->ModelDescription,
7493 sizeof (val->ModelDescription));
7494 bcopy(val->NodeSymbolicName, val32->NodeSymbolicName,
7495 sizeof (val->NodeSymbolicName));
7496 bcopy(val->HardwareVersion, val32->HardwareVersion,
7497 sizeof (val->HardwareVersion));
7498 bcopy(val->OptionROMVersion, val32->OptionROMVersion,
7499 sizeof (val->OptionROMVersion));
7500 bcopy(val->FirmwareVersion, val32->FirmwareVersion,
7501 sizeof (val->FirmwareVersion));
7502 val32->VendorSpecificID = val->VendorSpecificID;
7503 bcopy(&val->NodeWWN.raw_wwn, &val32->NodeWWN.raw_wwn,
7504 sizeof (val->NodeWWN.raw_wwn));
7505 bcopy(val->DriverName, val32->DriverName,
7506 sizeof (val->DriverName));
7507 bcopy(val->DriverVersion, val32->DriverVersion,
7508 sizeof (val->DriverVersion));
7509
7510 val32->NumberOfPorts = val->NumberOfPorts;
7511
7512 if (fp_copyout((void *)val32, (void *)fcio->fcio_obuf,
7513 fcio->fcio_olen, mode) == 0) {
7514 if (fp_fcio_copyout(fcio, data, mode)) {
7515 rval = EFAULT;
7516 }
7517 } else {
7518 rval = EFAULT;
7519 }
7520
7521 kmem_free(val32, sizeof (*val32));
7522 } else {
7523 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf,
7524 fcio->fcio_olen, mode) == 0) {
7525 if (fp_fcio_copyout(fcio, data, mode)) {
7526 rval = EFAULT;
7527 }
7528 } else {
7529 rval = EFAULT;
7530 }
7531 }
7532
7533 kmem_free(val, sizeof (*val));
7534 break;
7535 }
7536
7537 case FCIO_GET_NPIV_ATTRIBUTES: {
7538 fc_hba_npiv_attributes_t *attrs;
7539
7540 attrs = kmem_zalloc(sizeof (*attrs), KM_SLEEP);
7541 mutex_enter(&port->fp_mutex);
7542 bcopy(&port->fp_service_params.node_ww_name.raw_wwn,
7543 &attrs->NodeWWN.raw_wwn,
7544 sizeof (attrs->NodeWWN.raw_wwn));
7545 bcopy(&port->fp_service_params.nport_ww_name.raw_wwn,
7546 &attrs->PortWWN.raw_wwn,
7547 sizeof (attrs->PortWWN.raw_wwn));
7548 mutex_exit(&port->fp_mutex);
7549 if (fp_copyout((void *)attrs, (void *)fcio->fcio_obuf,
7550 fcio->fcio_olen, mode) == 0) {
7551 if (fp_fcio_copyout(fcio, data, mode)) {
7552 rval = EFAULT;
7553 }
7554 } else {
7555 rval = EFAULT;
7556 }
7557 kmem_free(attrs, sizeof (*attrs));
7558 break;
7559 }
7560
7561 case FCIO_DELETE_NPIV_PORT: {
7562 fc_local_port_t *tmpport;
7563 char ww_pname[17];
7564 la_wwn_t vwwn[1];
7565
7566 FP_TRACE(FP_NHEAD1(1, 0), "Delete NPIV Port");
7567 if (ddi_copyin(fcio->fcio_ibuf,
7568 &vwwn, sizeof (la_wwn_t), mode)) {
7569 rval = EFAULT;
7570 break;
7571 }
7572
7573 fc_wwn_to_str(&vwwn[0], ww_pname);
7574 FP_TRACE(FP_NHEAD1(3, 0),
7575 "Delete NPIV Port %s", ww_pname);
7576 tmpport = fc_delete_npiv_port(port, &vwwn[0]);
7577 if (tmpport == NULL) {
7578 FP_TRACE(FP_NHEAD1(3, 0),
7579 "Delete NPIV Port : no found");
7580 rval = EFAULT;
7581 } else {
7582 fc_local_port_t *nextport = tmpport->fp_port_next;
7583 fc_local_port_t *prevport = tmpport->fp_port_prev;
7584 int portlen, portindex, ret;
7585
7586 portlen = sizeof (portindex);
7587 ret = ddi_prop_op(DDI_DEV_T_ANY,
7588 tmpport->fp_port_dip, PROP_LEN_AND_VAL_BUF,
7589 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "port",
7590 (caddr_t)&portindex, &portlen);
7591 if (ret != DDI_SUCCESS) {
7592 rval = EFAULT;
7593 break;
7594 }
7595 if (ndi_devi_offline(tmpport->fp_port_dip,
7596 NDI_DEVI_REMOVE) != DDI_SUCCESS) {
7597 FP_TRACE(FP_NHEAD1(1, 0),
7598 "Delete NPIV Port failed");
7599 mutex_enter(&port->fp_mutex);
7600 tmpport->fp_npiv_state = 0;
7601 mutex_exit(&port->fp_mutex);
7602 rval = EFAULT;
7603 } else {
7604 mutex_enter(&port->fp_mutex);
7605 nextport->fp_port_prev = prevport;
7606 prevport->fp_port_next = nextport;
7607 if (port == port->fp_port_next) {
7608 port->fp_port_next =
7609 port->fp_port_prev = NULL;
7610 }
7611 port->fp_npiv_portnum--;
7612 FP_TRACE(FP_NHEAD1(3, 0),
7613 "Delete NPIV Port %d", portindex);
7614 port->fp_npiv_portindex[portindex-1] = 0;
7615 mutex_exit(&port->fp_mutex);
7616 }
7617 }
7618 break;
7619 }
7620
7621 case FCIO_CREATE_NPIV_PORT: {
7622 char ww_nname[17], ww_pname[17];
7623 la_npiv_create_entry_t entrybuf;
7624 uint32_t vportindex = 0;
7625 int npiv_ret = 0;
7626 char *portname, *fcaname;
7627
7628 portname = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
7629 (void) ddi_pathname(port->fp_port_dip, portname);
7630 fcaname = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
7631 (void) ddi_pathname(port->fp_fca_dip, fcaname);
7632 FP_TRACE(FP_NHEAD1(1, 0),
7633 "Create NPIV port %s %s %s", portname, fcaname,
7634 ddi_driver_name(port->fp_fca_dip));
7635 kmem_free(portname, MAXPATHLEN);
7636 kmem_free(fcaname, MAXPATHLEN);
7637 if (ddi_copyin(fcio->fcio_ibuf,
7638 &entrybuf, sizeof (la_npiv_create_entry_t), mode)) {
7639 rval = EFAULT;
7640 break;
7641 }
7642
7643 fc_wwn_to_str(&entrybuf.VNodeWWN, ww_nname);
7644 fc_wwn_to_str(&entrybuf.VPortWWN, ww_pname);
7645 vportindex = entrybuf.vindex;
7646 FP_TRACE(FP_NHEAD1(3, 0),
7647 "Create NPIV Port %s %s %d",
7648 ww_nname, ww_pname, vportindex);
7649
7650 if (fc_get_npiv_port(port, &entrybuf.VPortWWN)) {
7651 rval = EFAULT;
7652 break;
7653 }
7654 npiv_ret = fctl_fca_create_npivport(port->fp_fca_dip,
7655 port->fp_port_dip, ww_nname, ww_pname, &vportindex);
7656 if (npiv_ret == NDI_SUCCESS) {
7657 mutex_enter(&port->fp_mutex);
7658 port->fp_npiv_portnum++;
7659 mutex_exit(&port->fp_mutex);
7660 if (fp_copyout((void *)&vportindex,
7661 (void *)fcio->fcio_obuf,
7662 fcio->fcio_olen, mode) == 0) {
7663 if (fp_fcio_copyout(fcio, data, mode)) {
7664 rval = EFAULT;
7665 }
7666 } else {
7667 rval = EFAULT;
7668 }
7669 } else {
7670 rval = EFAULT;
7671 }
7672 FP_TRACE(FP_NHEAD1(3, 0),
7673 "Create NPIV Port %d %d", npiv_ret, vportindex);
7674 break;
7675 }
7676
7677 case FCIO_GET_NPIV_PORT_LIST: {
7678 fc_hba_npiv_port_list_t *list;
7679 int count;
7680
7681 if ((fcio->fcio_xfer != FCIO_XFER_READ) ||
7682 (fcio->fcio_olen == 0) || (fcio->fcio_obuf == 0)) {
7683 rval = EINVAL;
7684 break;
7685 }
7686
7687 list = kmem_zalloc(fcio->fcio_olen, KM_SLEEP);
7688 list->version = FC_HBA_LIST_VERSION;
7689
7690 count = (fcio->fcio_olen -
7691 (int)sizeof (fc_hba_npiv_port_list_t))/MAXPATHLEN + 1;
7692 if (port->fp_npiv_portnum > count) {
7693 list->numAdapters = port->fp_npiv_portnum;
7694 } else {
7695 /* build npiv port list */
7696 count = fc_ulp_get_npiv_port_list(port,
7697 (char *)list->hbaPaths);
7698 if (count < 0) {
7699 rval = ENXIO;
7700 FP_TRACE(FP_NHEAD1(1, 0),
7701 "Build NPIV Port List error");
7702 kmem_free(list, fcio->fcio_olen);
7703 break;
7704 }
7705 list->numAdapters = count;
7706 }
7707
7708 if (fp_copyout((void *)list, (void *)fcio->fcio_obuf,
7709 fcio->fcio_olen, mode) == 0) {
7710 if (fp_fcio_copyout(fcio, data, mode)) {
7711 FP_TRACE(FP_NHEAD1(1, 0),
7712 "Copy NPIV Port data error");
7713 rval = EFAULT;
7714 }
7715 } else {
7716 FP_TRACE(FP_NHEAD1(1, 0), "Copy NPIV Port List error");
7717 rval = EFAULT;
7718 }
7719 kmem_free(list, fcio->fcio_olen);
7720 break;
7721 }
7722
7723 case FCIO_GET_ADAPTER_PORT_NPIV_ATTRIBUTES: {
7724 fc_hba_port_npiv_attributes_t *val;
7725
7726 val = kmem_zalloc(sizeof (*val), KM_SLEEP);
7727 val->version = FC_HBA_PORT_NPIV_ATTRIBUTES_VERSION;
7728
7729 mutex_enter(&port->fp_mutex);
7730 val->npivflag = port->fp_npiv_flag;
7731 val->lastChange = port->fp_last_change;
7732 bcopy(&port->fp_service_params.nport_ww_name.raw_wwn,
7733 &val->PortWWN.raw_wwn,
7734 sizeof (val->PortWWN.raw_wwn));
7735 bcopy(&port->fp_service_params.node_ww_name.raw_wwn,
7736 &val->NodeWWN.raw_wwn,
7737 sizeof (val->NodeWWN.raw_wwn));
7738 mutex_exit(&port->fp_mutex);
7739
7740 val->NumberOfNPIVPorts = fc_ulp_get_npiv_port_num(port);
7741 if (port->fp_npiv_type != FC_NPIV_PORT) {
7742 val->MaxNumberOfNPIVPorts =
7743 port->fp_fca_tran->fca_num_npivports;
7744 } else {
7745 val->MaxNumberOfNPIVPorts = 0;
7746 }
7747
7748 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf,
7749 fcio->fcio_olen, mode) == 0) {
7750 if (fp_fcio_copyout(fcio, data, mode)) {
7751 rval = EFAULT;
7752 }
7753 } else {
7754 rval = EFAULT;
7755 }
7756 kmem_free(val, sizeof (*val));
7757 break;
7758 }
7759
7760 case FCIO_GET_ADAPTER_PORT_ATTRIBUTES: {
7761 fc_hba_port_attributes_t *val;
7762 fc_hba_port_attributes32_t *val32;
7763
7764 if (use32 == B_TRUE) {
7765 if (fcio->fcio_olen < sizeof (*val32) ||
7766 fcio->fcio_xfer != FCIO_XFER_READ) {
7767 rval = EINVAL;
7768 break;
7769 }
7770 } else {
7771 if (fcio->fcio_olen < sizeof (*val) ||
7772 fcio->fcio_xfer != FCIO_XFER_READ) {
7773 rval = EINVAL;
7774 break;
7775 }
7776 }
7777
7778 val = kmem_zalloc(sizeof (*val), KM_SLEEP);
7779 val->version = FC_HBA_PORT_ATTRIBUTES_VERSION;
7780 mutex_enter(&port->fp_mutex);
7781 val->lastChange = port->fp_last_change;
7782 val->fp_minor = port->fp_instance;
7783
7784 bcopy(&port->fp_service_params.nport_ww_name.raw_wwn,
7785 &val->PortWWN.raw_wwn,
7786 sizeof (val->PortWWN.raw_wwn));
7787 bcopy(&port->fp_service_params.node_ww_name.raw_wwn,
7788 &val->NodeWWN.raw_wwn,
7789 sizeof (val->NodeWWN.raw_wwn));
7790 bcopy(&port->fp_fabric_name, &val->FabricName.raw_wwn,
7791 sizeof (val->FabricName.raw_wwn));
7792
7793 val->PortFcId = port->fp_port_id.port_id;
7794
7795 switch (FC_PORT_STATE_MASK(port->fp_state)) {
7796 case FC_STATE_OFFLINE:
7797 val->PortState = FC_HBA_PORTSTATE_OFFLINE;
7798 break;
7799 case FC_STATE_ONLINE:
7800 case FC_STATE_LOOP:
7801 case FC_STATE_NAMESERVICE:
7802 val->PortState = FC_HBA_PORTSTATE_ONLINE;
7803 break;
7804 default:
7805 val->PortState = FC_HBA_PORTSTATE_UNKNOWN;
7806 break;
7807 }
7808
7809 /* Translate from LV to FC-HBA port type codes */
7810 switch (port->fp_port_type.port_type) {
7811 case FC_NS_PORT_N:
7812 val->PortType = FC_HBA_PORTTYPE_NPORT;
7813 break;
7814 case FC_NS_PORT_NL:
7815 /* Actually means loop for us */
7816 val->PortType = FC_HBA_PORTTYPE_LPORT;
7817 break;
7818 case FC_NS_PORT_F:
7819 val->PortType = FC_HBA_PORTTYPE_FPORT;
7820 break;
7821 case FC_NS_PORT_FL:
7822 val->PortType = FC_HBA_PORTTYPE_FLPORT;
7823 break;
7824 case FC_NS_PORT_E:
7825 val->PortType = FC_HBA_PORTTYPE_EPORT;
7826 break;
7827 default:
7828 val->PortType = FC_HBA_PORTTYPE_OTHER;
7829 break;
7830 }
7831
7832
7833 /*
7834 * If fp has decided that the topology is public loop,
7835 * we will indicate that using the appropriate
7836 * FC HBA API constant.
7837 */
7838 switch (port->fp_topology) {
7839 case FC_TOP_PUBLIC_LOOP:
7840 val->PortType = FC_HBA_PORTTYPE_NLPORT;
7841 break;
7842
7843 case FC_TOP_PT_PT:
7844 val->PortType = FC_HBA_PORTTYPE_PTP;
7845 break;
7846
7847 case FC_TOP_UNKNOWN:
7848 /*
7849 * This should cover the case where nothing is connected
7850 * to the port. Crystal+ is p'bly an exception here.
7851 * For Crystal+, port 0 will come up as private loop
7852 * (i.e fp_bind_state will be FC_STATE_LOOP) even when
7853 * nothing is connected to it.
7854 * Current plan is to let userland handle this.
7855 */
7856 if (port->fp_bind_state == FC_STATE_OFFLINE) {
7857 val->PortType = FC_HBA_PORTTYPE_UNKNOWN;
7858 }
7859 break;
7860
7861 default:
7862 /*
7863 * Do Nothing.
7864 * Unused:
7865 * val->PortType = FC_HBA_PORTTYPE_GPORT;
7866 */
7867 break;
7868 }
7869
7870 val->PortSupportedClassofService =
7871 port->fp_hba_port_attrs.supported_cos;
7872 val->PortSupportedFc4Types[0] = 0;
7873 bcopy(port->fp_fc4_types, val->PortActiveFc4Types,
7874 sizeof (val->PortActiveFc4Types));
7875 bcopy(port->fp_sym_port_name, val->PortSymbolicName,
7876 port->fp_sym_port_namelen);
7877 val->PortSupportedSpeed =
7878 port->fp_hba_port_attrs.supported_speed;
7879
7880 switch (FC_PORT_SPEED_MASK(port->fp_state)) {
7881 case FC_STATE_1GBIT_SPEED:
7882 val->PortSpeed = FC_HBA_PORTSPEED_1GBIT;
7883 break;
7884 case FC_STATE_2GBIT_SPEED:
7885 val->PortSpeed = FC_HBA_PORTSPEED_2GBIT;
7886 break;
7887 case FC_STATE_4GBIT_SPEED:
7888 val->PortSpeed = FC_HBA_PORTSPEED_4GBIT;
7889 break;
7890 case FC_STATE_8GBIT_SPEED:
7891 val->PortSpeed = FC_HBA_PORTSPEED_8GBIT;
7892 break;
7893 case FC_STATE_10GBIT_SPEED:
7894 val->PortSpeed = FC_HBA_PORTSPEED_10GBIT;
7895 break;
7896 case FC_STATE_16GBIT_SPEED:
7897 val->PortSpeed = FC_HBA_PORTSPEED_16GBIT;
7898 break;
7899 case FC_STATE_32GBIT_SPEED:
7900 val->PortSpeed = FC_HBA_PORTSPEED_32GBIT;
7901 break;
7902 default:
7903 val->PortSpeed = FC_HBA_PORTSPEED_UNKNOWN;
7904 break;
7905 }
7906 val->PortMaxFrameSize = port->fp_hba_port_attrs.max_frame_size;
7907 val->NumberofDiscoveredPorts = port->fp_dev_count;
7908 mutex_exit(&port->fp_mutex);
7909
7910 if (use32 == B_TRUE) {
7911 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP);
7912 val32->version = val->version;
7913 val32->lastChange = val->lastChange;
7914 val32->fp_minor = val->fp_minor;
7915
7916 bcopy(&val->PortWWN.raw_wwn, &val32->PortWWN.raw_wwn,
7917 sizeof (val->PortWWN.raw_wwn));
7918 bcopy(&val->NodeWWN.raw_wwn, &val32->NodeWWN.raw_wwn,
7919 sizeof (val->NodeWWN.raw_wwn));
7920 val32->PortFcId = val->PortFcId;
7921 val32->PortState = val->PortState;
7922 val32->PortType = val->PortType;
7923
7924 val32->PortSupportedClassofService =
7925 val->PortSupportedClassofService;
7926 bcopy(val->PortActiveFc4Types,
7927 val32->PortActiveFc4Types,
7928 sizeof (val->PortActiveFc4Types));
7929 bcopy(val->PortSymbolicName, val32->PortSymbolicName,
7930 sizeof (val->PortSymbolicName));
7931 bcopy(&val->FabricName, &val32->FabricName,
7932 sizeof (val->FabricName.raw_wwn));
7933 val32->PortSupportedSpeed = val->PortSupportedSpeed;
7934 val32->PortSpeed = val->PortSpeed;
7935
7936 val32->PortMaxFrameSize = val->PortMaxFrameSize;
7937 val32->NumberofDiscoveredPorts =
7938 val->NumberofDiscoveredPorts;
7939
7940 if (fp_copyout((void *)val32, (void *)fcio->fcio_obuf,
7941 fcio->fcio_olen, mode) == 0) {
7942 if (fp_fcio_copyout(fcio, data, mode)) {
7943 rval = EFAULT;
7944 }
7945 } else {
7946 rval = EFAULT;
7947 }
7948
7949 kmem_free(val32, sizeof (*val32));
7950 } else {
7951 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf,
7952 fcio->fcio_olen, mode) == 0) {
7953 if (fp_fcio_copyout(fcio, data, mode)) {
7954 rval = EFAULT;
7955 }
7956 } else {
7957 rval = EFAULT;
7958 }
7959 }
7960
7961 kmem_free(val, sizeof (*val));
7962 break;
7963 }
7964
7965 case FCIO_GET_DISCOVERED_PORT_ATTRIBUTES: {
7966 fc_hba_port_attributes_t *val;
7967 fc_hba_port_attributes32_t *val32;
7968 uint32_t index = 0;
7969 fc_remote_port_t *tmp_pd;
7970
7971 if (use32 == B_TRUE) {
7972 if (fcio->fcio_olen < sizeof (*val32) ||
7973 fcio->fcio_xfer != FCIO_XFER_READ) {
7974 rval = EINVAL;
7975 break;
7976 }
7977 } else {
7978 if (fcio->fcio_olen < sizeof (*val) ||
7979 fcio->fcio_xfer != FCIO_XFER_READ) {
7980 rval = EINVAL;
7981 break;
7982 }
7983 }
7984
7985 if (ddi_copyin(fcio->fcio_ibuf, &index, sizeof (index), mode)) {
7986 rval = EFAULT;
7987 break;
7988 }
7989
7990 if (index >= port->fp_dev_count) {
7991 FP_TRACE(FP_NHEAD1(9, 0),
7992 "User supplied index out of range");
7993 fcio->fcio_errno = FC_OUTOFBOUNDS;
7994 rval = EINVAL;
7995 if (fp_fcio_copyout(fcio, data, mode)) {
7996 rval = EFAULT;
7997 }
7998 break;
7999 }
8000
8001 val = kmem_zalloc(sizeof (*val), KM_SLEEP);
8002 val->version = FC_HBA_PORT_ATTRIBUTES_VERSION;
8003
8004 mutex_enter(&port->fp_mutex);
8005 tmp_pd = fctl_lookup_pd_by_index(port, index);
8006
8007 if (tmp_pd == NULL) {
8008 fcio->fcio_errno = FC_BADPORT;
8009 rval = EINVAL;
8010 } else {
8011 val->lastChange = port->fp_last_change;
8012 val->fp_minor = port->fp_instance;
8013
8014 mutex_enter(&tmp_pd->pd_mutex);
8015 bcopy(&tmp_pd->pd_port_name.raw_wwn,
8016 &val->PortWWN.raw_wwn,
8017 sizeof (val->PortWWN.raw_wwn));
8018 bcopy(&tmp_pd->pd_remote_nodep->fd_node_name.raw_wwn,
8019 &val->NodeWWN.raw_wwn,
8020 sizeof (val->NodeWWN.raw_wwn));
8021 val->PortFcId = tmp_pd->pd_port_id.port_id;
8022 bcopy(tmp_pd->pd_spn, val->PortSymbolicName,
8023 tmp_pd->pd_spn_len);
8024 val->PortSupportedClassofService = tmp_pd->pd_cos;
8025 /*
8026 * we will assume the sizeof these pd_fc4types and
8027 * portActiveFc4Types will remain the same. we could
8028 * add in a check for it, but we decided it was unneeded
8029 */
8030 bcopy((caddr_t)tmp_pd->pd_fc4types,
8031 val->PortActiveFc4Types,
8032 sizeof (tmp_pd->pd_fc4types));
8033 val->PortState =
8034 fp_map_remote_port_state(tmp_pd->pd_state);
8035 mutex_exit(&tmp_pd->pd_mutex);
8036
8037 val->PortType = FC_HBA_PORTTYPE_UNKNOWN;
8038 val->PortSupportedFc4Types[0] = 0;
8039 val->PortSupportedSpeed = FC_HBA_PORTSPEED_UNKNOWN;
8040 val->PortSpeed = FC_HBA_PORTSPEED_UNKNOWN;
8041 val->PortMaxFrameSize = 0;
8042 val->NumberofDiscoveredPorts = 0;
8043
8044 if (use32 == B_TRUE) {
8045 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP);
8046 val32->version = val->version;
8047 val32->lastChange = val->lastChange;
8048 val32->fp_minor = val->fp_minor;
8049
8050 bcopy(&val->PortWWN.raw_wwn,
8051 &val32->PortWWN.raw_wwn,
8052 sizeof (val->PortWWN.raw_wwn));
8053 bcopy(&val->NodeWWN.raw_wwn,
8054 &val32->NodeWWN.raw_wwn,
8055 sizeof (val->NodeWWN.raw_wwn));
8056 val32->PortFcId = val->PortFcId;
8057 bcopy(val->PortSymbolicName,
8058 val32->PortSymbolicName,
8059 sizeof (val->PortSymbolicName));
8060 val32->PortSupportedClassofService =
8061 val->PortSupportedClassofService;
8062 bcopy(val->PortActiveFc4Types,
8063 val32->PortActiveFc4Types,
8064 sizeof (tmp_pd->pd_fc4types));
8065
8066 val32->PortType = val->PortType;
8067 val32->PortState = val->PortState;
8068 val32->PortSupportedFc4Types[0] =
8069 val->PortSupportedFc4Types[0];
8070 val32->PortSupportedSpeed =
8071 val->PortSupportedSpeed;
8072 val32->PortSpeed = val->PortSpeed;
8073 val32->PortMaxFrameSize =
8074 val->PortMaxFrameSize;
8075 val32->NumberofDiscoveredPorts =
8076 val->NumberofDiscoveredPorts;
8077
8078 if (fp_copyout((void *)val32,
8079 (void *)fcio->fcio_obuf,
8080 fcio->fcio_olen, mode) == 0) {
8081 if (fp_fcio_copyout(fcio,
8082 data, mode)) {
8083 rval = EFAULT;
8084 }
8085 } else {
8086 rval = EFAULT;
8087 }
8088
8089 kmem_free(val32, sizeof (*val32));
8090 } else {
8091 if (fp_copyout((void *)val,
8092 (void *)fcio->fcio_obuf,
8093 fcio->fcio_olen, mode) == 0) {
8094 if (fp_fcio_copyout(fcio, data, mode)) {
8095 rval = EFAULT;
8096 }
8097 } else {
8098 rval = EFAULT;
8099 }
8100 }
8101 }
8102
8103 mutex_exit(&port->fp_mutex);
8104 kmem_free(val, sizeof (*val));
8105 break;
8106 }
8107
8108 case FCIO_GET_PORT_ATTRIBUTES: {
8109 fc_hba_port_attributes_t *val;
8110 fc_hba_port_attributes32_t *val32;
8111 la_wwn_t wwn;
8112 fc_remote_port_t *tmp_pd;
8113
8114 if (use32 == B_TRUE) {
8115 if (fcio->fcio_olen < sizeof (*val32) ||
8116 fcio->fcio_xfer != FCIO_XFER_READ) {
8117 rval = EINVAL;
8118 break;
8119 }
8120 } else {
8121 if (fcio->fcio_olen < sizeof (*val) ||
8122 fcio->fcio_xfer != FCIO_XFER_READ) {
8123 rval = EINVAL;
8124 break;
8125 }
8126 }
8127
8128 if (ddi_copyin(fcio->fcio_ibuf, &wwn, sizeof (wwn), mode)) {
8129 rval = EFAULT;
8130 break;
8131 }
8132
8133 val = kmem_zalloc(sizeof (*val), KM_SLEEP);
8134 val->version = FC_HBA_PORT_ATTRIBUTES_VERSION;
8135
8136 mutex_enter(&port->fp_mutex);
8137 tmp_pd = fctl_lookup_pd_by_wwn(port, wwn);
8138 val->lastChange = port->fp_last_change;
8139 val->fp_minor = port->fp_instance;
8140 mutex_exit(&port->fp_mutex);
8141
8142 if (tmp_pd == NULL) {
8143 fcio->fcio_errno = FC_BADWWN;
8144 rval = EINVAL;
8145 } else {
8146 mutex_enter(&tmp_pd->pd_mutex);
8147 bcopy(&tmp_pd->pd_port_name.raw_wwn,
8148 &val->PortWWN.raw_wwn,
8149 sizeof (val->PortWWN.raw_wwn));
8150 bcopy(&tmp_pd->pd_remote_nodep->fd_node_name.raw_wwn,
8151 &val->NodeWWN.raw_wwn,
8152 sizeof (val->NodeWWN.raw_wwn));
8153 val->PortFcId = tmp_pd->pd_port_id.port_id;
8154 bcopy(tmp_pd->pd_spn, val->PortSymbolicName,
8155 tmp_pd->pd_spn_len);
8156 val->PortSupportedClassofService = tmp_pd->pd_cos;
8157 val->PortType = FC_HBA_PORTTYPE_UNKNOWN;
8158 val->PortState =
8159 fp_map_remote_port_state(tmp_pd->pd_state);
8160 val->PortSupportedFc4Types[0] = 0;
8161 /*
8162 * we will assume the sizeof these pd_fc4types and
8163 * portActiveFc4Types will remain the same. we could
8164 * add in a check for it, but we decided it was unneeded
8165 */
8166 bcopy((caddr_t)tmp_pd->pd_fc4types,
8167 val->PortActiveFc4Types,
8168 sizeof (tmp_pd->pd_fc4types));
8169 val->PortSupportedSpeed = FC_HBA_PORTSPEED_UNKNOWN;
8170 val->PortSpeed = FC_HBA_PORTSPEED_UNKNOWN;
8171 val->PortMaxFrameSize = 0;
8172 val->NumberofDiscoveredPorts = 0;
8173 mutex_exit(&tmp_pd->pd_mutex);
8174
8175 if (use32 == B_TRUE) {
8176 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP);
8177 val32->version = val->version;
8178 val32->lastChange = val->lastChange;
8179 val32->fp_minor = val->fp_minor;
8180 bcopy(&val->PortWWN.raw_wwn,
8181 &val32->PortWWN.raw_wwn,
8182 sizeof (val->PortWWN.raw_wwn));
8183 bcopy(&val->NodeWWN.raw_wwn,
8184 &val32->NodeWWN.raw_wwn,
8185 sizeof (val->NodeWWN.raw_wwn));
8186 val32->PortFcId = val->PortFcId;
8187 bcopy(val->PortSymbolicName,
8188 val32->PortSymbolicName,
8189 sizeof (val->PortSymbolicName));
8190 val32->PortSupportedClassofService =
8191 val->PortSupportedClassofService;
8192 val32->PortType = val->PortType;
8193 val32->PortState = val->PortState;
8194 val32->PortSupportedFc4Types[0] =
8195 val->PortSupportedFc4Types[0];
8196 bcopy(val->PortActiveFc4Types,
8197 val32->PortActiveFc4Types,
8198 sizeof (tmp_pd->pd_fc4types));
8199 val32->PortSupportedSpeed =
8200 val->PortSupportedSpeed;
8201 val32->PortSpeed = val->PortSpeed;
8202 val32->PortMaxFrameSize = val->PortMaxFrameSize;
8203 val32->NumberofDiscoveredPorts =
8204 val->NumberofDiscoveredPorts;
8205
8206 if (fp_copyout((void *)val32,
8207 (void *)fcio->fcio_obuf,
8208 fcio->fcio_olen, mode) == 0) {
8209 if (fp_fcio_copyout(fcio, data, mode)) {
8210 rval = EFAULT;
8211 }
8212 } else {
8213 rval = EFAULT;
8214 }
8215
8216 kmem_free(val32, sizeof (*val32));
8217 } else {
8218 if (fp_copyout((void *)val,
8219 (void *)fcio->fcio_obuf,
8220 fcio->fcio_olen, mode) == 0) {
8221 if (fp_fcio_copyout(fcio, data, mode)) {
8222 rval = EFAULT;
8223 }
8224 } else {
8225 rval = EFAULT;
8226 }
8227 }
8228 }
8229 kmem_free(val, sizeof (*val));
8230 break;
8231 }
8232
8233 case FCIO_GET_NUM_DEVS: {
8234 int num_devices;
8235
8236 if (fcio->fcio_olen != sizeof (num_devices) ||
8237 fcio->fcio_xfer != FCIO_XFER_READ) {
8238 rval = EINVAL;
8239 break;
8240 }
8241
8242 mutex_enter(&port->fp_mutex);
8243 switch (port->fp_topology) {
8244 case FC_TOP_PRIVATE_LOOP:
8245 case FC_TOP_PT_PT:
8246 num_devices = port->fp_total_devices;
8247 fcio->fcio_errno = FC_SUCCESS;
8248 break;
8249
8250 case FC_TOP_PUBLIC_LOOP:
8251 case FC_TOP_FABRIC:
8252 mutex_exit(&port->fp_mutex);
8253 job = fctl_alloc_job(JOB_NS_CMD, 0, NULL,
8254 NULL, KM_SLEEP);
8255 ASSERT(job != NULL);
8256
8257 /*
8258 * In FC-GS-2 the Name Server doesn't send out
8259 * RSCNs for any Name Server Database updates
8260 * When it is finally fixed there is no need
8261 * to probe as below and should be removed.
8262 */
8263 (void) fp_ns_get_devcount(port, job, 0, KM_SLEEP);
8264 fctl_dealloc_job(job);
8265
8266 mutex_enter(&port->fp_mutex);
8267 num_devices = port->fp_total_devices;
8268 fcio->fcio_errno = FC_SUCCESS;
8269 break;
8270
8271 case FC_TOP_NO_NS:
8272 /* FALLTHROUGH */
8273 case FC_TOP_UNKNOWN:
8274 /* FALLTHROUGH */
8275 default:
8276 num_devices = 0;
8277 fcio->fcio_errno = FC_SUCCESS;
8278 break;
8279 }
8280 mutex_exit(&port->fp_mutex);
8281
8282 if (fp_copyout((void *)&num_devices,
8283 (void *)fcio->fcio_obuf, fcio->fcio_olen,
8284 mode) == 0) {
8285 if (fp_fcio_copyout(fcio, data, mode)) {
8286 rval = EFAULT;
8287 }
8288 } else {
8289 rval = EFAULT;
8290 }
8291 break;
8292 }
8293
8294 case FCIO_GET_DEV_LIST: {
8295 int num_devices;
8296 int new_count;
8297 int map_size;
8298
8299 if (fcio->fcio_xfer != FCIO_XFER_READ ||
8300 fcio->fcio_alen != sizeof (new_count)) {
8301 rval = EINVAL;
8302 break;
8303 }
8304
8305 num_devices = fcio->fcio_olen / sizeof (fc_port_dev_t);
8306
8307 mutex_enter(&port->fp_mutex);
8308 if (num_devices < port->fp_total_devices) {
8309 fcio->fcio_errno = FC_TOOMANY;
8310 new_count = port->fp_total_devices;
8311 mutex_exit(&port->fp_mutex);
8312
8313 if (fp_copyout((void *)&new_count,
8314 (void *)fcio->fcio_abuf,
8315 sizeof (new_count), mode)) {
8316 rval = EFAULT;
8317 break;
8318 }
8319
8320 if (fp_fcio_copyout(fcio, data, mode)) {
8321 rval = EFAULT;
8322 break;
8323 }
8324 rval = EINVAL;
8325 break;
8326 }
8327
8328 if (port->fp_total_devices <= 0) {
8329 fcio->fcio_errno = FC_NO_MAP;
8330 new_count = port->fp_total_devices;
8331 mutex_exit(&port->fp_mutex);
8332
8333 if (fp_copyout((void *)&new_count,
8334 (void *)fcio->fcio_abuf,
8335 sizeof (new_count), mode)) {
8336 rval = EFAULT;
8337 break;
8338 }
8339
8340 if (fp_fcio_copyout(fcio, data, mode)) {
8341 rval = EFAULT;
8342 break;
8343 }
8344 rval = EINVAL;
8345 break;
8346 }
8347
8348 switch (port->fp_topology) {
8349 case FC_TOP_PRIVATE_LOOP:
8350 if (fp_fillout_loopmap(port, fcio,
8351 mode) != FC_SUCCESS) {
8352 rval = EFAULT;
8353 break;
8354 }
8355 if (fp_fcio_copyout(fcio, data, mode)) {
8356 rval = EFAULT;
8357 }
8358 break;
8359
8360 case FC_TOP_PT_PT:
8361 if (fp_fillout_p2pmap(port, fcio,
8362 mode) != FC_SUCCESS) {
8363 rval = EFAULT;
8364 break;
8365 }
8366 if (fp_fcio_copyout(fcio, data, mode)) {
8367 rval = EFAULT;
8368 }
8369 break;
8370
8371 case FC_TOP_PUBLIC_LOOP:
8372 case FC_TOP_FABRIC: {
8373 fctl_ns_req_t *ns_cmd;
8374
8375 map_size =
8376 sizeof (fc_port_dev_t) * port->fp_total_devices;
8377
8378 mutex_exit(&port->fp_mutex);
8379
8380 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t),
8381 sizeof (ns_resp_gan_t), map_size,
8382 (FCTL_NS_FILL_NS_MAP | FCTL_NS_BUF_IS_USERLAND),
8383 KM_SLEEP);
8384 ASSERT(ns_cmd != NULL);
8385
8386 ns_cmd->ns_gan_index = 0;
8387 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID;
8388 ns_cmd->ns_cmd_code = NS_GA_NXT;
8389 ns_cmd->ns_gan_max = map_size / sizeof (fc_port_dev_t);
8390
8391 job = fctl_alloc_job(JOB_PORT_GETMAP, 0, NULL,
8392 NULL, KM_SLEEP);
8393 ASSERT(job != NULL);
8394
8395 ret = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP);
8396
8397 if (ret != FC_SUCCESS ||
8398 job->job_result != FC_SUCCESS) {
8399 fctl_free_ns_cmd(ns_cmd);
8400
8401 fcio->fcio_errno = job->job_result;
8402 new_count = 0;
8403 if (fp_copyout((void *)&new_count,
8404 (void *)fcio->fcio_abuf,
8405 sizeof (new_count), mode)) {
8406 fctl_dealloc_job(job);
8407 mutex_enter(&port->fp_mutex);
8408 rval = EFAULT;
8409 break;
8410 }
8411
8412 if (fp_fcio_copyout(fcio, data, mode)) {
8413 fctl_dealloc_job(job);
8414 mutex_enter(&port->fp_mutex);
8415 rval = EFAULT;
8416 break;
8417 }
8418 rval = EIO;
8419 mutex_enter(&port->fp_mutex);
8420 break;
8421 }
8422 fctl_dealloc_job(job);
8423
8424 new_count = ns_cmd->ns_gan_index;
8425 if (fp_copyout((void *)&new_count,
8426 (void *)fcio->fcio_abuf, sizeof (new_count),
8427 mode)) {
8428 rval = EFAULT;
8429 fctl_free_ns_cmd(ns_cmd);
8430 mutex_enter(&port->fp_mutex);
8431 break;
8432 }
8433
8434 if (fp_copyout((void *)ns_cmd->ns_data_buf,
8435 (void *)fcio->fcio_obuf, sizeof (fc_port_dev_t) *
8436 ns_cmd->ns_gan_index, mode)) {
8437 rval = EFAULT;
8438 fctl_free_ns_cmd(ns_cmd);
8439 mutex_enter(&port->fp_mutex);
8440 break;
8441 }
8442 fctl_free_ns_cmd(ns_cmd);
8443
8444 if (fp_fcio_copyout(fcio, data, mode)) {
8445 rval = EFAULT;
8446 }
8447 mutex_enter(&port->fp_mutex);
8448 break;
8449 }
8450
8451 case FC_TOP_NO_NS:
8452 /* FALLTHROUGH */
8453 case FC_TOP_UNKNOWN:
8454 /* FALLTHROUGH */
8455 default:
8456 fcio->fcio_errno = FC_NO_MAP;
8457 num_devices = port->fp_total_devices;
8458
8459 if (fp_copyout((void *)&new_count,
8460 (void *)fcio->fcio_abuf,
8461 sizeof (new_count), mode)) {
8462 rval = EFAULT;
8463 break;
8464 }
8465
8466 if (fp_fcio_copyout(fcio, data, mode)) {
8467 rval = EFAULT;
8468 break;
8469 }
8470 rval = EINVAL;
8471 break;
8472 }
8473 mutex_exit(&port->fp_mutex);
8474 break;
8475 }
8476
8477 case FCIO_GET_SYM_PNAME: {
8478 rval = ENOTSUP;
8479 break;
8480 }
8481
8482 case FCIO_GET_SYM_NNAME: {
8483 rval = ENOTSUP;
8484 break;
8485 }
8486
8487 case FCIO_SET_SYM_PNAME: {
8488 rval = ENOTSUP;
8489 break;
8490 }
8491
8492 case FCIO_SET_SYM_NNAME: {
8493 rval = ENOTSUP;
8494 break;
8495 }
8496
8497 case FCIO_GET_LOGI_PARAMS: {
8498 la_wwn_t pwwn;
8499 la_wwn_t *my_pwwn;
8500 la_els_logi_t *params;
8501 la_els_logi32_t *params32;
8502 fc_remote_node_t *node;
8503 fc_remote_port_t *pd;
8504
8505 if (fcio->fcio_ilen != sizeof (la_wwn_t) ||
8506 (fcio->fcio_xfer & FCIO_XFER_READ) == 0 ||
8507 (fcio->fcio_xfer & FCIO_XFER_WRITE) == 0) {
8508 rval = EINVAL;
8509 break;
8510 }
8511
8512 if (use32 == B_TRUE) {
8513 if (fcio->fcio_olen != sizeof (la_els_logi32_t)) {
8514 rval = EINVAL;
8515 break;
8516 }
8517 } else {
8518 if (fcio->fcio_olen != sizeof (la_els_logi_t)) {
8519 rval = EINVAL;
8520 break;
8521 }
8522 }
8523
8524 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, sizeof (pwwn), mode)) {
8525 rval = EFAULT;
8526 break;
8527 }
8528
8529 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn);
8530 if (pd == NULL) {
8531 mutex_enter(&port->fp_mutex);
8532 my_pwwn = &port->fp_service_params.nport_ww_name;
8533 mutex_exit(&port->fp_mutex);
8534
8535 if (fctl_wwn_cmp(&pwwn, my_pwwn) != 0) {
8536 rval = ENXIO;
8537 break;
8538 }
8539
8540 params = kmem_zalloc(sizeof (*params), KM_SLEEP);
8541 mutex_enter(&port->fp_mutex);
8542 *params = port->fp_service_params;
8543 mutex_exit(&port->fp_mutex);
8544 } else {
8545 params = kmem_zalloc(sizeof (*params), KM_SLEEP);
8546
8547 mutex_enter(&pd->pd_mutex);
8548 params->ls_code.mbz = params->ls_code.ls_code = 0;
8549 params->common_service = pd->pd_csp;
8550 params->nport_ww_name = pd->pd_port_name;
8551 params->class_1 = pd->pd_clsp1;
8552 params->class_2 = pd->pd_clsp2;
8553 params->class_3 = pd->pd_clsp3;
8554 node = pd->pd_remote_nodep;
8555 mutex_exit(&pd->pd_mutex);
8556
8557 bzero(params->reserved, sizeof (params->reserved));
8558
8559 mutex_enter(&node->fd_mutex);
8560 bcopy(node->fd_vv, params->vendor_version,
8561 sizeof (node->fd_vv));
8562 params->node_ww_name = node->fd_node_name;
8563 mutex_exit(&node->fd_mutex);
8564
8565 fctl_release_remote_port(pd);
8566 }
8567
8568 if (use32 == B_TRUE) {
8569 params32 = kmem_zalloc(sizeof (*params32), KM_SLEEP);
8570
8571 params32->ls_code.mbz = params->ls_code.mbz;
8572 params32->common_service = params->common_service;
8573 params32->nport_ww_name = params->nport_ww_name;
8574 params32->class_1 = params->class_1;
8575 params32->class_2 = params->class_2;
8576 params32->class_3 = params->class_3;
8577 bzero(params32->reserved, sizeof (params32->reserved));
8578 bcopy(params->vendor_version, params32->vendor_version,
8579 sizeof (node->fd_vv));
8580 params32->node_ww_name = params->node_ww_name;
8581
8582 if (ddi_copyout((void *)params32,
8583 (void *)fcio->fcio_obuf,
8584 sizeof (*params32), mode)) {
8585 rval = EFAULT;
8586 }
8587
8588 kmem_free(params32, sizeof (*params32));
8589 } else {
8590 if (ddi_copyout((void *)params, (void *)fcio->fcio_obuf,
8591 sizeof (*params), mode)) {
8592 rval = EFAULT;
8593 }
8594 }
8595
8596 kmem_free(params, sizeof (*params));
8597 if (fp_fcio_copyout(fcio, data, mode)) {
8598 rval = EFAULT;
8599 }
8600 break;
8601 }
8602
8603 case FCIO_DEV_LOGOUT:
8604 case FCIO_DEV_LOGIN:
8605 if (fcio->fcio_ilen != sizeof (la_wwn_t) ||
8606 fcio->fcio_xfer != FCIO_XFER_WRITE) {
8607 rval = EINVAL;
8608
8609 if (fp_fcio_copyout(fcio, data, mode)) {
8610 rval = EFAULT;
8611 }
8612 break;
8613 }
8614
8615 if (fcio->fcio_cmd == FCIO_DEV_LOGIN) {
8616 jcode = JOB_FCIO_LOGIN;
8617 } else {
8618 jcode = JOB_FCIO_LOGOUT;
8619 }
8620
8621 kfcio = kmem_zalloc(sizeof (*kfcio), KM_SLEEP);
8622 bcopy(fcio, kfcio, sizeof (*fcio));
8623
8624 if (kfcio->fcio_ilen) {
8625 kfcio->fcio_ibuf = kmem_zalloc(kfcio->fcio_ilen,
8626 KM_SLEEP);
8627
8628 if (ddi_copyin((void *)fcio->fcio_ibuf,
8629 (void *)kfcio->fcio_ibuf, kfcio->fcio_ilen,
8630 mode)) {
8631 rval = EFAULT;
8632
8633 kmem_free(kfcio->fcio_ibuf, kfcio->fcio_ilen);
8634 kmem_free(kfcio, sizeof (*kfcio));
8635 fcio->fcio_errno = job->job_result;
8636 if (fp_fcio_copyout(fcio, data, mode)) {
8637 rval = EFAULT;
8638 }
8639 break;
8640 }
8641 }
8642
8643 job = fctl_alloc_job(jcode, 0, NULL, NULL, KM_SLEEP);
8644 job->job_private = kfcio;
8645
8646 fctl_enque_job(port, job);
8647 fctl_jobwait(job);
8648
8649 rval = job->job_result;
8650
8651 fcio->fcio_errno = kfcio->fcio_errno;
8652 if (fp_fcio_copyout(fcio, data, mode)) {
8653 rval = EFAULT;
8654 }
8655
8656 kmem_free(kfcio->fcio_ibuf, kfcio->fcio_ilen);
8657 kmem_free(kfcio, sizeof (*kfcio));
8658 fctl_dealloc_job(job);
8659 break;
8660
8661 case FCIO_GET_STATE: {
8662 la_wwn_t pwwn;
8663 uint32_t state;
8664 fc_remote_port_t *pd;
8665 fctl_ns_req_t *ns_cmd;
8666
8667 if (fcio->fcio_ilen != sizeof (la_wwn_t) ||
8668 fcio->fcio_olen != sizeof (state) ||
8669 (fcio->fcio_xfer & FCIO_XFER_WRITE) == 0 ||
8670 (fcio->fcio_xfer & FCIO_XFER_READ) == 0) {
8671 rval = EINVAL;
8672 break;
8673 }
8674
8675 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, sizeof (pwwn), mode)) {
8676 rval = EFAULT;
8677 break;
8678 }
8679 fcio->fcio_errno = 0;
8680
8681 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn);
8682 if (pd == NULL) {
8683 mutex_enter(&port->fp_mutex);
8684 if (FC_IS_TOP_SWITCH(port->fp_topology)) {
8685 mutex_exit(&port->fp_mutex);
8686 job = fctl_alloc_job(JOB_PLOGI_ONE, 0,
8687 NULL, NULL, KM_SLEEP);
8688
8689 job->job_counter = 1;
8690 job->job_result = FC_SUCCESS;
8691
8692 ns_cmd = fctl_alloc_ns_cmd(
8693 sizeof (ns_req_gid_pn_t),
8694 sizeof (ns_resp_gid_pn_t),
8695 sizeof (ns_resp_gid_pn_t),
8696 FCTL_NS_BUF_IS_USERLAND, KM_SLEEP);
8697 ASSERT(ns_cmd != NULL);
8698
8699 ns_cmd->ns_cmd_code = NS_GID_PN;
8700 ((ns_req_gid_pn_t *)
8701 (ns_cmd->ns_cmd_buf))->pwwn = pwwn;
8702
8703 ret = fp_ns_query(port, ns_cmd, job,
8704 1, KM_SLEEP);
8705
8706 if (ret != FC_SUCCESS || job->job_result !=
8707 FC_SUCCESS) {
8708 if (ret != FC_SUCCESS) {
8709 fcio->fcio_errno = ret;
8710 } else {
8711 fcio->fcio_errno =
8712 job->job_result;
8713 }
8714 rval = EIO;
8715 } else {
8716 state = PORT_DEVICE_INVALID;
8717 }
8718 fctl_free_ns_cmd(ns_cmd);
8719 fctl_dealloc_job(job);
8720 } else {
8721 mutex_exit(&port->fp_mutex);
8722 fcio->fcio_errno = FC_BADWWN;
8723 rval = ENXIO;
8724 }
8725 } else {
8726 mutex_enter(&pd->pd_mutex);
8727 state = pd->pd_state;
8728 mutex_exit(&pd->pd_mutex);
8729
8730 fctl_release_remote_port(pd);
8731 }
8732
8733 if (!rval) {
8734 if (ddi_copyout((void *)&state,
8735 (void *)fcio->fcio_obuf, sizeof (state),
8736 mode)) {
8737 rval = EFAULT;
8738 }
8739 }
8740 if (fp_fcio_copyout(fcio, data, mode)) {
8741 rval = EFAULT;
8742 }
8743 break;
8744 }
8745
8746 case FCIO_DEV_REMOVE: {
8747 la_wwn_t pwwn;
8748 fc_portmap_t *changelist;
8749 fc_remote_port_t *pd;
8750
8751 if (fcio->fcio_ilen != sizeof (la_wwn_t) ||
8752 fcio->fcio_xfer != FCIO_XFER_WRITE) {
8753 rval = EINVAL;
8754 break;
8755 }
8756
8757 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, sizeof (pwwn), mode)) {
8758 rval = EFAULT;
8759 break;
8760 }
8761
8762 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn);
8763 if (pd == NULL) {
8764 rval = ENXIO;
8765 fcio->fcio_errno = FC_BADWWN;
8766 if (fp_fcio_copyout(fcio, data, mode)) {
8767 rval = EFAULT;
8768 }
8769 break;
8770 }
8771
8772 mutex_enter(&pd->pd_mutex);
8773 if (pd->pd_ref_count > 1) {
8774 mutex_exit(&pd->pd_mutex);
8775
8776 rval = EBUSY;
8777 fcio->fcio_errno = FC_FAILURE;
8778 fctl_release_remote_port(pd);
8779
8780 if (fp_fcio_copyout(fcio, data, mode)) {
8781 rval = EFAULT;
8782 }
8783 break;
8784 }
8785 mutex_exit(&pd->pd_mutex);
8786
8787 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP);
8788
8789 fctl_copy_portmap(changelist, pd);
8790 changelist->map_type = PORT_DEVICE_USER_LOGOUT;
8791 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1);
8792
8793 fctl_release_remote_port(pd);
8794 break;
8795 }
8796
8797 case FCIO_GET_FCODE_REV: {
8798 caddr_t fcode_rev;
8799 fc_fca_pm_t pm;
8800
8801 if (fcio->fcio_olen < FC_FCODE_REV_SIZE ||
8802 fcio->fcio_xfer != FCIO_XFER_READ) {
8803 rval = EINVAL;
8804 break;
8805 }
8806 bzero((caddr_t)&pm, sizeof (pm));
8807
8808 fcode_rev = kmem_zalloc(fcio->fcio_olen, KM_SLEEP);
8809
8810 pm.pm_cmd_flags = FC_FCA_PM_READ;
8811 pm.pm_cmd_code = FC_PORT_GET_FCODE_REV;
8812 pm.pm_data_len = fcio->fcio_olen;
8813 pm.pm_data_buf = fcode_rev;
8814
8815 ret = port->fp_fca_tran->fca_port_manage(
8816 port->fp_fca_handle, &pm);
8817
8818 if (ret == FC_SUCCESS) {
8819 if (ddi_copyout((void *)fcode_rev,
8820 (void *)fcio->fcio_obuf,
8821 fcio->fcio_olen, mode) == 0) {
8822 if (fp_fcio_copyout(fcio, data, mode)) {
8823 rval = EFAULT;
8824 }
8825 } else {
8826 rval = EFAULT;
8827 }
8828 } else {
8829 /*
8830 * check if buffer was not large enough to obtain
8831 * FCODE version.
8832 */
8833 if (pm.pm_data_len > fcio->fcio_olen) {
8834 rval = ENOMEM;
8835 } else {
8836 rval = EIO;
8837 }
8838 fcio->fcio_errno = ret;
8839 if (fp_fcio_copyout(fcio, data, mode)) {
8840 rval = EFAULT;
8841 }
8842 }
8843 kmem_free(fcode_rev, fcio->fcio_olen);
8844 break;
8845 }
8846
8847 case FCIO_GET_FW_REV: {
8848 caddr_t fw_rev;
8849 fc_fca_pm_t pm;
8850
8851 if (fcio->fcio_olen < FC_FW_REV_SIZE ||
8852 fcio->fcio_xfer != FCIO_XFER_READ) {
8853 rval = EINVAL;
8854 break;
8855 }
8856 bzero((caddr_t)&pm, sizeof (pm));
8857
8858 fw_rev = kmem_zalloc(fcio->fcio_olen, KM_SLEEP);
8859
8860 pm.pm_cmd_flags = FC_FCA_PM_READ;
8861 pm.pm_cmd_code = FC_PORT_GET_FW_REV;
8862 pm.pm_data_len = fcio->fcio_olen;
8863 pm.pm_data_buf = fw_rev;
8864
8865 ret = port->fp_fca_tran->fca_port_manage(
8866 port->fp_fca_handle, &pm);
8867
8868 if (ret == FC_SUCCESS) {
8869 if (ddi_copyout((void *)fw_rev,
8870 (void *)fcio->fcio_obuf,
8871 fcio->fcio_olen, mode) == 0) {
8872 if (fp_fcio_copyout(fcio, data, mode)) {
8873 rval = EFAULT;
8874 }
8875 } else {
8876 rval = EFAULT;
8877 }
8878 } else {
8879 if (fp_fcio_copyout(fcio, data, mode)) {
8880 rval = EFAULT;
8881 }
8882 rval = EIO;
8883 }
8884 kmem_free(fw_rev, fcio->fcio_olen);
8885 break;
8886 }
8887
8888 case FCIO_GET_DUMP_SIZE: {
8889 uint32_t dump_size;
8890 fc_fca_pm_t pm;
8891
8892 if (fcio->fcio_olen != sizeof (dump_size) ||
8893 fcio->fcio_xfer != FCIO_XFER_READ) {
8894 rval = EINVAL;
8895 break;
8896 }
8897 bzero((caddr_t)&pm, sizeof (pm));
8898 pm.pm_cmd_flags = FC_FCA_PM_READ;
8899 pm.pm_cmd_code = FC_PORT_GET_DUMP_SIZE;
8900 pm.pm_data_len = sizeof (dump_size);
8901 pm.pm_data_buf = (caddr_t)&dump_size;
8902
8903 ret = port->fp_fca_tran->fca_port_manage(
8904 port->fp_fca_handle, &pm);
8905
8906 if (ret == FC_SUCCESS) {
8907 if (ddi_copyout((void *)&dump_size,
8908 (void *)fcio->fcio_obuf, sizeof (dump_size),
8909 mode) == 0) {
8910 if (fp_fcio_copyout(fcio, data, mode)) {
8911 rval = EFAULT;
8912 }
8913 } else {
8914 rval = EFAULT;
8915 }
8916 } else {
8917 fcio->fcio_errno = ret;
8918 rval = EIO;
8919 if (fp_fcio_copyout(fcio, data, mode)) {
8920 rval = EFAULT;
8921 }
8922 }
8923 break;
8924 }
8925
8926 case FCIO_DOWNLOAD_FW: {
8927 caddr_t firmware;
8928 fc_fca_pm_t pm;
8929
8930 if (fcio->fcio_ilen <= 0 ||
8931 fcio->fcio_xfer != FCIO_XFER_WRITE) {
8932 rval = EINVAL;
8933 break;
8934 }
8935
8936 firmware = kmem_zalloc(fcio->fcio_ilen, KM_SLEEP);
8937 if (ddi_copyin(fcio->fcio_ibuf, firmware,
8938 fcio->fcio_ilen, mode)) {
8939 rval = EFAULT;
8940 kmem_free(firmware, fcio->fcio_ilen);
8941 break;
8942 }
8943
8944 bzero((caddr_t)&pm, sizeof (pm));
8945 pm.pm_cmd_flags = FC_FCA_PM_WRITE;
8946 pm.pm_cmd_code = FC_PORT_DOWNLOAD_FW;
8947 pm.pm_data_len = fcio->fcio_ilen;
8948 pm.pm_data_buf = firmware;
8949
8950 ret = port->fp_fca_tran->fca_port_manage(
8951 port->fp_fca_handle, &pm);
8952
8953 kmem_free(firmware, fcio->fcio_ilen);
8954
8955 if (ret != FC_SUCCESS) {
8956 fcio->fcio_errno = ret;
8957 rval = EIO;
8958 if (fp_fcio_copyout(fcio, data, mode)) {
8959 rval = EFAULT;
8960 }
8961 }
8962 break;
8963 }
8964
8965 case FCIO_DOWNLOAD_FCODE: {
8966 caddr_t fcode;
8967 fc_fca_pm_t pm;
8968
8969 if (fcio->fcio_ilen <= 0 ||
8970 fcio->fcio_xfer != FCIO_XFER_WRITE) {
8971 rval = EINVAL;
8972 break;
8973 }
8974
8975 fcode = kmem_zalloc(fcio->fcio_ilen, KM_SLEEP);
8976 if (ddi_copyin(fcio->fcio_ibuf, fcode,
8977 fcio->fcio_ilen, mode)) {
8978 rval = EFAULT;
8979 kmem_free(fcode, fcio->fcio_ilen);
8980 break;
8981 }
8982
8983 bzero((caddr_t)&pm, sizeof (pm));
8984 pm.pm_cmd_flags = FC_FCA_PM_WRITE;
8985 pm.pm_cmd_code = FC_PORT_DOWNLOAD_FCODE;
8986 pm.pm_data_len = fcio->fcio_ilen;
8987 pm.pm_data_buf = fcode;
8988
8989 ret = port->fp_fca_tran->fca_port_manage(
8990 port->fp_fca_handle, &pm);
8991
8992 kmem_free(fcode, fcio->fcio_ilen);
8993
8994 if (ret != FC_SUCCESS) {
8995 fcio->fcio_errno = ret;
8996 rval = EIO;
8997 if (fp_fcio_copyout(fcio, data, mode)) {
8998 rval = EFAULT;
8999 }
9000 }
9001 break;
9002 }
9003
9004 case FCIO_FORCE_DUMP:
9005 ret = port->fp_fca_tran->fca_reset(
9006 port->fp_fca_handle, FC_FCA_CORE);
9007
9008 if (ret != FC_SUCCESS) {
9009 fcio->fcio_errno = ret;
9010 rval = EIO;
9011 if (fp_fcio_copyout(fcio, data, mode)) {
9012 rval = EFAULT;
9013 }
9014 }
9015 break;
9016
9017 case FCIO_GET_DUMP: {
9018 caddr_t dump;
9019 uint32_t dump_size;
9020 fc_fca_pm_t pm;
9021
9022 if (fcio->fcio_xfer != FCIO_XFER_READ) {
9023 rval = EINVAL;
9024 break;
9025 }
9026 bzero((caddr_t)&pm, sizeof (pm));
9027
9028 pm.pm_cmd_flags = FC_FCA_PM_READ;
9029 pm.pm_cmd_code = FC_PORT_GET_DUMP_SIZE;
9030 pm.pm_data_len = sizeof (dump_size);
9031 pm.pm_data_buf = (caddr_t)&dump_size;
9032
9033 ret = port->fp_fca_tran->fca_port_manage(
9034 port->fp_fca_handle, &pm);
9035
9036 if (ret != FC_SUCCESS) {
9037 fcio->fcio_errno = ret;
9038 rval = EIO;
9039 if (fp_fcio_copyout(fcio, data, mode)) {
9040 rval = EFAULT;
9041 }
9042 break;
9043 }
9044 if (fcio->fcio_olen != dump_size) {
9045 fcio->fcio_errno = FC_NOMEM;
9046 rval = EINVAL;
9047 if (fp_fcio_copyout(fcio, data, mode)) {
9048 rval = EFAULT;
9049 }
9050 break;
9051 }
9052
9053 dump = kmem_zalloc(dump_size, KM_SLEEP);
9054
9055 bzero((caddr_t)&pm, sizeof (pm));
9056 pm.pm_cmd_flags = FC_FCA_PM_READ;
9057 pm.pm_cmd_code = FC_PORT_GET_DUMP;
9058 pm.pm_data_len = dump_size;
9059 pm.pm_data_buf = dump;
9060
9061 ret = port->fp_fca_tran->fca_port_manage(
9062 port->fp_fca_handle, &pm);
9063
9064 if (ret == FC_SUCCESS) {
9065 if (ddi_copyout((void *)dump, (void *)fcio->fcio_obuf,
9066 dump_size, mode) == 0) {
9067 if (fp_fcio_copyout(fcio, data, mode)) {
9068 rval = EFAULT;
9069 }
9070 } else {
9071 rval = EFAULT;
9072 }
9073 } else {
9074 fcio->fcio_errno = ret;
9075 rval = EIO;
9076 if (fp_fcio_copyout(fcio, data, mode)) {
9077 rval = EFAULT;
9078 }
9079 }
9080 kmem_free(dump, dump_size);
9081 break;
9082 }
9083
9084 case FCIO_GET_TOPOLOGY: {
9085 uint32_t user_topology;
9086
9087 if (fcio->fcio_xfer != FCIO_XFER_READ ||
9088 fcio->fcio_olen != sizeof (user_topology)) {
9089 rval = EINVAL;
9090 break;
9091 }
9092
9093 mutex_enter(&port->fp_mutex);
9094 if (FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) {
9095 user_topology = FC_TOP_UNKNOWN;
9096 } else {
9097 user_topology = port->fp_topology;
9098 }
9099 mutex_exit(&port->fp_mutex);
9100
9101 if (ddi_copyout((void *)&user_topology,
9102 (void *)fcio->fcio_obuf, sizeof (user_topology),
9103 mode)) {
9104 rval = EFAULT;
9105 }
9106 break;
9107 }
9108
9109 case FCIO_RESET_LINK: {
9110 la_wwn_t pwwn;
9111
9112 /*
9113 * Look at the output buffer field; if this field has zero
9114 * bytes then attempt to reset the local link/loop. If the
9115 * fcio_ibuf field points to a WWN, see if it's an NL_Port,
9116 * and if yes, determine the LFA and reset the remote LIP
9117 * by LINIT ELS.
9118 */
9119
9120 if (fcio->fcio_xfer != FCIO_XFER_WRITE ||
9121 fcio->fcio_ilen != sizeof (pwwn)) {
9122 rval = EINVAL;
9123 break;
9124 }
9125
9126 if (ddi_copyin(fcio->fcio_ibuf, &pwwn,
9127 sizeof (pwwn), mode)) {
9128 rval = EFAULT;
9129 break;
9130 }
9131
9132 mutex_enter(&port->fp_mutex);
9133 if (port->fp_soft_state & FP_SOFT_IN_LINK_RESET) {
9134 mutex_exit(&port->fp_mutex);
9135 break;
9136 }
9137 port->fp_soft_state |= FP_SOFT_IN_LINK_RESET;
9138 mutex_exit(&port->fp_mutex);
9139
9140 job = fctl_alloc_job(JOB_LINK_RESET, 0, NULL, NULL, KM_SLEEP);
9141 if (job == NULL) {
9142 rval = ENOMEM;
9143 break;
9144 }
9145 job->job_counter = 1;
9146 job->job_private = (void *)&pwwn;
9147
9148 fctl_enque_job(port, job);
9149 fctl_jobwait(job);
9150
9151 mutex_enter(&port->fp_mutex);
9152 port->fp_soft_state &= ~FP_SOFT_IN_LINK_RESET;
9153 mutex_exit(&port->fp_mutex);
9154
9155 if (job->job_result != FC_SUCCESS) {
9156 fcio->fcio_errno = job->job_result;
9157 rval = EIO;
9158 if (fp_fcio_copyout(fcio, data, mode)) {
9159 rval = EFAULT;
9160 }
9161 }
9162 fctl_dealloc_job(job);
9163 break;
9164 }
9165
9166 case FCIO_RESET_HARD:
9167 ret = port->fp_fca_tran->fca_reset(
9168 port->fp_fca_handle, FC_FCA_RESET);
9169 if (ret != FC_SUCCESS) {
9170 fcio->fcio_errno = ret;
9171 rval = EIO;
9172 if (fp_fcio_copyout(fcio, data, mode)) {
9173 rval = EFAULT;
9174 }
9175 }
9176 break;
9177
9178 case FCIO_RESET_HARD_CORE:
9179 ret = port->fp_fca_tran->fca_reset(
9180 port->fp_fca_handle, FC_FCA_RESET_CORE);
9181 if (ret != FC_SUCCESS) {
9182 rval = EIO;
9183 fcio->fcio_errno = ret;
9184 if (fp_fcio_copyout(fcio, data, mode)) {
9185 rval = EFAULT;
9186 }
9187 }
9188 break;
9189
9190 case FCIO_DIAG: {
9191 fc_fca_pm_t pm;
9192
9193 bzero((caddr_t)&pm, sizeof (fc_fca_pm_t));
9194
9195 /* Validate user buffer from ioctl call. */
9196 if (((fcio->fcio_ilen > 0) && (fcio->fcio_ibuf == NULL)) ||
9197 ((fcio->fcio_ilen <= 0) && (fcio->fcio_ibuf != NULL)) ||
9198 ((fcio->fcio_alen > 0) && (fcio->fcio_abuf == NULL)) ||
9199 ((fcio->fcio_alen <= 0) && (fcio->fcio_abuf != NULL)) ||
9200 ((fcio->fcio_olen > 0) && (fcio->fcio_obuf == NULL)) ||
9201 ((fcio->fcio_olen <= 0) && (fcio->fcio_obuf != NULL))) {
9202 rval = EFAULT;
9203 break;
9204 }
9205
9206 if ((pm.pm_cmd_len = fcio->fcio_ilen) > 0) {
9207 pm.pm_cmd_buf = kmem_zalloc(fcio->fcio_ilen, KM_SLEEP);
9208 if (ddi_copyin(fcio->fcio_ibuf, pm.pm_cmd_buf,
9209 fcio->fcio_ilen, mode)) {
9210 rval = EFAULT;
9211 goto fp_fcio_diag_cleanup;
9212 }
9213 }
9214
9215 if ((pm.pm_data_len = fcio->fcio_alen) > 0) {
9216 pm.pm_data_buf = kmem_zalloc(fcio->fcio_alen, KM_SLEEP);
9217 if (ddi_copyin(fcio->fcio_abuf, pm.pm_data_buf,
9218 fcio->fcio_alen, mode)) {
9219 rval = EFAULT;
9220 goto fp_fcio_diag_cleanup;
9221 }
9222 }
9223
9224 if ((pm.pm_stat_len = fcio->fcio_olen) > 0) {
9225 pm.pm_stat_buf = kmem_zalloc(fcio->fcio_olen, KM_SLEEP);
9226 }
9227
9228 pm.pm_cmd_code = FC_PORT_DIAG;
9229 pm.pm_cmd_flags = fcio->fcio_cmd_flags;
9230
9231 ret = port->fp_fca_tran->fca_port_manage(
9232 port->fp_fca_handle, &pm);
9233
9234 if (ret != FC_SUCCESS) {
9235 if (ret == FC_INVALID_REQUEST) {
9236 rval = ENOTTY;
9237 } else {
9238 rval = EIO;
9239 }
9240
9241 fcio->fcio_errno = ret;
9242 if (fp_fcio_copyout(fcio, data, mode)) {
9243 rval = EFAULT;
9244 }
9245 goto fp_fcio_diag_cleanup;
9246 }
9247
9248 /*
9249 * pm_stat_len will contain the number of status bytes
9250 * an FCA driver requires to return the complete status
9251 * of the requested diag operation. If the user buffer
9252 * is not large enough to hold the entire status, We
9253 * copy only the portion of data the fits in the buffer and
9254 * return a ENOMEM to the user application.
9255 */
9256 if (pm.pm_stat_len > fcio->fcio_olen) {
9257 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
9258 "fp:FCIO_DIAG:status buffer too small\n");
9259
9260 rval = ENOMEM;
9261 if (ddi_copyout(pm.pm_stat_buf, fcio->fcio_obuf,
9262 fcio->fcio_olen, mode)) {
9263 rval = EFAULT;
9264 goto fp_fcio_diag_cleanup;
9265 }
9266 } else {
9267 /*
9268 * Copy only data pm_stat_len bytes of data
9269 */
9270 if (ddi_copyout(pm.pm_stat_buf, fcio->fcio_obuf,
9271 pm.pm_stat_len, mode)) {
9272 rval = EFAULT;
9273 goto fp_fcio_diag_cleanup;
9274 }
9275 }
9276
9277 if (fp_fcio_copyout(fcio, data, mode)) {
9278 rval = EFAULT;
9279 }
9280
9281 fp_fcio_diag_cleanup:
9282 if (pm.pm_cmd_buf != NULL) {
9283 kmem_free(pm.pm_cmd_buf, fcio->fcio_ilen);
9284 }
9285 if (pm.pm_data_buf != NULL) {
9286 kmem_free(pm.pm_data_buf, fcio->fcio_alen);
9287 }
9288 if (pm.pm_stat_buf != NULL) {
9289 kmem_free(pm.pm_stat_buf, fcio->fcio_olen);
9290 }
9291
9292 break;
9293 }
9294
9295 case FCIO_GET_NODE_ID: {
9296 /* validate parameters */
9297 if (fcio->fcio_xfer != FCIO_XFER_READ ||
9298 fcio->fcio_olen < sizeof (fc_rnid_t)) {
9299 rval = EINVAL;
9300 break;
9301 }
9302
9303 rval = fp_get_rnid(port, data, mode, fcio);
9304
9305 /* ioctl handling is over */
9306 break;
9307 }
9308
9309 case FCIO_SEND_NODE_ID: {
9310 la_wwn_t pwwn;
9311
9312 /* validate parameters */
9313 if (fcio->fcio_ilen != sizeof (la_wwn_t) ||
9314 fcio->fcio_xfer != FCIO_XFER_READ) {
9315 rval = EINVAL;
9316 break;
9317 }
9318
9319 if (ddi_copyin(fcio->fcio_ibuf, &pwwn,
9320 sizeof (la_wwn_t), mode)) {
9321 rval = EFAULT;
9322 break;
9323 }
9324
9325 rval = fp_send_rnid(port, data, mode, fcio, &pwwn);
9326
9327 /* ioctl handling is over */
9328 break;
9329 }
9330
9331 case FCIO_SET_NODE_ID: {
9332 if (fcio->fcio_ilen != sizeof (fc_rnid_t) ||
9333 (fcio->fcio_xfer != FCIO_XFER_WRITE)) {
9334 rval = EINVAL;
9335 break;
9336 }
9337
9338 rval = fp_set_rnid(port, data, mode, fcio);
9339 break;
9340 }
9341
9342 case FCIO_LINK_STATUS: {
9343 fc_portid_t rls_req;
9344 fc_rls_acc_t *rls_acc;
9345 fc_fca_pm_t pm;
9346 uint32_t dest, src_id;
9347 fp_cmd_t *cmd;
9348 fc_remote_port_t *pd;
9349 uchar_t pd_flags;
9350
9351 /* validate parameters */
9352 if (fcio->fcio_ilen != sizeof (fc_portid_t) ||
9353 fcio->fcio_olen != sizeof (fc_rls_acc_t) ||
9354 fcio->fcio_xfer != FCIO_XFER_RW) {
9355 rval = EINVAL;
9356 break;
9357 }
9358
9359 if ((fcio->fcio_cmd_flags != FCIO_CFLAGS_RLS_DEST_FPORT) &&
9360 (fcio->fcio_cmd_flags != FCIO_CFLAGS_RLS_DEST_NPORT)) {
9361 rval = EINVAL;
9362 break;
9363 }
9364
9365 if (ddi_copyin((void *)fcio->fcio_ibuf, (void *)&rls_req,
9366 sizeof (fc_portid_t), mode)) {
9367 rval = EFAULT;
9368 break;
9369 }
9370
9371
9372 /* Determine the destination of the RLS frame */
9373 if (fcio->fcio_cmd_flags == FCIO_CFLAGS_RLS_DEST_FPORT) {
9374 dest = FS_FABRIC_F_PORT;
9375 } else {
9376 dest = rls_req.port_id;
9377 }
9378
9379 mutex_enter(&port->fp_mutex);
9380 src_id = port->fp_port_id.port_id;
9381 mutex_exit(&port->fp_mutex);
9382
9383 /* If dest is zero OR same as FCA ID, then use port_manage() */
9384 if (dest == 0 || dest == src_id) {
9385
9386 /* Allocate memory for link error status block */
9387 rls_acc = kmem_zalloc(sizeof (*rls_acc), KM_SLEEP);
9388 ASSERT(rls_acc != NULL);
9389
9390 /* Prepare the port management structure */
9391 bzero((caddr_t)&pm, sizeof (pm));
9392
9393 pm.pm_cmd_flags = FC_FCA_PM_READ;
9394 pm.pm_cmd_code = FC_PORT_RLS;
9395 pm.pm_data_len = sizeof (*rls_acc);
9396 pm.pm_data_buf = (caddr_t)rls_acc;
9397
9398 /* Get the adapter's link error status block */
9399 ret = port->fp_fca_tran->fca_port_manage(
9400 port->fp_fca_handle, &pm);
9401
9402 if (ret == FC_SUCCESS) {
9403 /* xfer link status block to userland */
9404 if (ddi_copyout((void *)rls_acc,
9405 (void *)fcio->fcio_obuf,
9406 sizeof (*rls_acc), mode) == 0) {
9407 if (fp_fcio_copyout(fcio, data,
9408 mode)) {
9409 rval = EFAULT;
9410 }
9411 } else {
9412 rval = EFAULT;
9413 }
9414 } else {
9415 rval = EIO;
9416 fcio->fcio_errno = ret;
9417 if (fp_fcio_copyout(fcio, data, mode)) {
9418 rval = EFAULT;
9419 }
9420 }
9421
9422 kmem_free(rls_acc, sizeof (*rls_acc));
9423
9424 /* ioctl handling is over */
9425 break;
9426 }
9427
9428 /*
9429 * Send RLS to the destination port.
9430 * Having RLS frame destination is as FPORT is not yet
9431 * supported and will be implemented in future, if needed.
9432 * Following call to get "pd" will fail if dest is FPORT
9433 */
9434 pd = fctl_hold_remote_port_by_did(port, dest);
9435 if (pd == NULL) {
9436 fcio->fcio_errno = FC_BADOBJECT;
9437 rval = ENXIO;
9438 if (fp_fcio_copyout(fcio, data, mode)) {
9439 rval = EFAULT;
9440 }
9441 break;
9442 }
9443
9444 mutex_enter(&pd->pd_mutex);
9445 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) {
9446 mutex_exit(&pd->pd_mutex);
9447 fctl_release_remote_port(pd);
9448
9449 fcio->fcio_errno = FC_LOGINREQ;
9450 rval = EINVAL;
9451 if (fp_fcio_copyout(fcio, data, mode)) {
9452 rval = EFAULT;
9453 }
9454 break;
9455 }
9456 ASSERT(pd->pd_login_count >= 1);
9457 mutex_exit(&pd->pd_mutex);
9458
9459 /*
9460 * Allocate job structure and set job_code as DUMMY,
9461 * because we will not go through the job thread.
9462 * Instead fp_sendcmd() is called directly here.
9463 */
9464 job = fctl_alloc_job(JOB_DUMMY, JOB_TYPE_FP_ASYNC,
9465 NULL, NULL, KM_SLEEP);
9466 ASSERT(job != NULL);
9467
9468 job->job_counter = 1;
9469
9470 cmd = fp_alloc_pkt(port, sizeof (la_els_rls_t),
9471 sizeof (la_els_rls_acc_t), KM_SLEEP, pd);
9472 if (cmd == NULL) {
9473 fcio->fcio_errno = FC_NOMEM;
9474 rval = ENOMEM;
9475
9476 fctl_release_remote_port(pd);
9477
9478 fctl_dealloc_job(job);
9479 if (fp_fcio_copyout(fcio, data, mode)) {
9480 rval = EFAULT;
9481 }
9482 break;
9483 }
9484
9485 /* Allocate memory for link error status block */
9486 rls_acc = kmem_zalloc(sizeof (*rls_acc), KM_SLEEP);
9487
9488 mutex_enter(&port->fp_mutex);
9489 mutex_enter(&pd->pd_mutex);
9490
9491 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class;
9492 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE;
9493 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED;
9494 cmd->cmd_retry_count = 1;
9495 cmd->cmd_ulp_pkt = NULL;
9496
9497 fp_rls_init(cmd, job);
9498
9499 job->job_private = (void *)rls_acc;
9500
9501 pd_flags = pd->pd_flags;
9502 pd->pd_flags = PD_ELS_IN_PROGRESS;
9503
9504 mutex_exit(&pd->pd_mutex);
9505 mutex_exit(&port->fp_mutex);
9506
9507 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) {
9508 fctl_jobwait(job);
9509
9510 fcio->fcio_errno = job->job_result;
9511 if (job->job_result == FC_SUCCESS) {
9512 ASSERT(pd != NULL);
9513 /*
9514 * link error status block is now available.
9515 * Copy it to userland
9516 */
9517 ASSERT(job->job_private == (void *)rls_acc);
9518 if (ddi_copyout((void *)rls_acc,
9519 (void *)fcio->fcio_obuf,
9520 sizeof (*rls_acc), mode) == 0) {
9521 if (fp_fcio_copyout(fcio, data,
9522 mode)) {
9523 rval = EFAULT;
9524 }
9525 } else {
9526 rval = EFAULT;
9527 }
9528 } else {
9529 rval = EIO;
9530 }
9531 } else {
9532 rval = EIO;
9533 fp_free_pkt(cmd);
9534 }
9535
9536 if (rval) {
9537 mutex_enter(&port->fp_mutex);
9538 mutex_enter(&pd->pd_mutex);
9539 if (pd->pd_flags == PD_ELS_IN_PROGRESS) {
9540 pd->pd_flags = pd_flags;
9541 }
9542 mutex_exit(&pd->pd_mutex);
9543 mutex_exit(&port->fp_mutex);
9544 }
9545
9546 fctl_release_remote_port(pd);
9547 fctl_dealloc_job(job);
9548 kmem_free(rls_acc, sizeof (*rls_acc));
9549
9550 if (fp_fcio_copyout(fcio, data, mode)) {
9551 rval = EFAULT;
9552 }
9553 break;
9554 }
9555
9556 case FCIO_NS: {
9557 fc_ns_cmd_t *ns_req;
9558 fc_ns_cmd32_t *ns_req32;
9559 fctl_ns_req_t *ns_cmd;
9560
9561 if (use32 == B_TRUE) {
9562 if (fcio->fcio_ilen != sizeof (*ns_req32)) {
9563 rval = EINVAL;
9564 break;
9565 }
9566
9567 ns_req = kmem_zalloc(sizeof (*ns_req), KM_SLEEP);
9568 ns_req32 = kmem_zalloc(sizeof (*ns_req32), KM_SLEEP);
9569
9570 if (ddi_copyin(fcio->fcio_ibuf, ns_req32,
9571 sizeof (*ns_req32), mode)) {
9572 rval = EFAULT;
9573 kmem_free(ns_req, sizeof (*ns_req));
9574 kmem_free(ns_req32, sizeof (*ns_req32));
9575 break;
9576 }
9577
9578 ns_req->ns_flags = ns_req32->ns_flags;
9579 ns_req->ns_cmd = ns_req32->ns_cmd;
9580 ns_req->ns_req_len = ns_req32->ns_req_len;
9581 ns_req->ns_req_payload = ns_req32->ns_req_payload;
9582 ns_req->ns_resp_len = ns_req32->ns_resp_len;
9583 ns_req->ns_resp_payload = ns_req32->ns_resp_payload;
9584 ns_req->ns_fctl_private = ns_req32->ns_fctl_private;
9585 ns_req->ns_resp_hdr = ns_req32->ns_resp_hdr;
9586
9587 kmem_free(ns_req32, sizeof (*ns_req32));
9588 } else {
9589 if (fcio->fcio_ilen != sizeof (*ns_req)) {
9590 rval = EINVAL;
9591 break;
9592 }
9593
9594 ns_req = kmem_zalloc(sizeof (*ns_req), KM_SLEEP);
9595
9596 if (ddi_copyin(fcio->fcio_ibuf, ns_req,
9597 sizeof (fc_ns_cmd_t), mode)) {
9598 rval = EFAULT;
9599 kmem_free(ns_req, sizeof (*ns_req));
9600 break;
9601 }
9602 }
9603
9604 if (ns_req->ns_req_len <= 0) {
9605 rval = EINVAL;
9606 kmem_free(ns_req, sizeof (*ns_req));
9607 break;
9608 }
9609
9610 job = fctl_alloc_job(JOB_NS_CMD, 0, NULL, NULL, KM_SLEEP);
9611 ASSERT(job != NULL);
9612
9613 ns_cmd = fctl_alloc_ns_cmd(ns_req->ns_req_len,
9614 ns_req->ns_resp_len, ns_req->ns_resp_len,
9615 FCTL_NS_FILL_NS_MAP, KM_SLEEP);
9616 ASSERT(ns_cmd != NULL);
9617 ns_cmd->ns_cmd_code = ns_req->ns_cmd;
9618
9619 if (ns_cmd->ns_cmd_code == NS_GA_NXT) {
9620 ns_cmd->ns_gan_max = 1;
9621 ns_cmd->ns_gan_index = 0;
9622 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID;
9623 }
9624
9625 if (ddi_copyin(ns_req->ns_req_payload,
9626 ns_cmd->ns_cmd_buf, ns_req->ns_req_len, mode)) {
9627 rval = EFAULT;
9628 fctl_free_ns_cmd(ns_cmd);
9629 fctl_dealloc_job(job);
9630 kmem_free(ns_req, sizeof (*ns_req));
9631 break;
9632 }
9633
9634 job->job_private = (void *)ns_cmd;
9635 fctl_enque_job(port, job);
9636 fctl_jobwait(job);
9637 rval = job->job_result;
9638
9639 if (rval == FC_SUCCESS) {
9640 if (ns_req->ns_resp_len) {
9641 if (ddi_copyout(ns_cmd->ns_data_buf,
9642 ns_req->ns_resp_payload,
9643 ns_cmd->ns_data_len, mode)) {
9644 rval = EFAULT;
9645 fctl_free_ns_cmd(ns_cmd);
9646 fctl_dealloc_job(job);
9647 kmem_free(ns_req, sizeof (*ns_req));
9648 break;
9649 }
9650 }
9651 } else {
9652 rval = EIO;
9653 }
9654 ns_req->ns_resp_hdr = ns_cmd->ns_resp_hdr;
9655 fctl_free_ns_cmd(ns_cmd);
9656 fctl_dealloc_job(job);
9657 kmem_free(ns_req, sizeof (*ns_req));
9658
9659 if (fp_fcio_copyout(fcio, data, mode)) {
9660 rval = EFAULT;
9661 }
9662 break;
9663 }
9664
9665 default:
9666 rval = ENOTTY;
9667 break;
9668 }
9669
9670 /*
9671 * If set, reset the EXCL busy bit to
9672 * receive other exclusive access commands
9673 */
9674 mutex_enter(&port->fp_mutex);
9675 if (port->fp_flag & FP_EXCL_BUSY) {
9676 port->fp_flag &= ~FP_EXCL_BUSY;
9677 }
9678 mutex_exit(&port->fp_mutex);
9679
9680 return (rval);
9681 }
9682
9683
9684 /*
9685 * This function assumes that the response length
9686 * is same regardless of data model (LP32 or LP64)
9687 * which is true for all the ioctls currently
9688 * supported.
9689 */
9690 static int
fp_copyout(void * from,void * to,size_t len,int mode)9691 fp_copyout(void *from, void *to, size_t len, int mode)
9692 {
9693 return (ddi_copyout(from, to, len, mode));
9694 }
9695
9696 /*
9697 * This function does the set rnid
9698 */
9699 static int
fp_set_rnid(fc_local_port_t * port,intptr_t data,int mode,fcio_t * fcio)9700 fp_set_rnid(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio)
9701 {
9702 int rval = 0;
9703 fc_rnid_t *rnid;
9704 fc_fca_pm_t pm;
9705
9706 /* Allocate memory for node id block */
9707 rnid = kmem_zalloc(sizeof (fc_rnid_t), KM_SLEEP);
9708
9709 if (ddi_copyin(fcio->fcio_ibuf, rnid, sizeof (fc_rnid_t), mode)) {
9710 FP_TRACE(FP_NHEAD1(3, 0), "fp_set_rnid: failed = %d", EFAULT);
9711 kmem_free(rnid, sizeof (fc_rnid_t));
9712 return (EFAULT);
9713 }
9714
9715 /* Prepare the port management structure */
9716 bzero((caddr_t)&pm, sizeof (pm));
9717
9718 pm.pm_cmd_flags = FC_FCA_PM_WRITE;
9719 pm.pm_cmd_code = FC_PORT_SET_NODE_ID;
9720 pm.pm_data_len = sizeof (*rnid);
9721 pm.pm_data_buf = (caddr_t)rnid;
9722
9723 /* Get the adapter's node data */
9724 rval = port->fp_fca_tran->fca_port_manage(
9725 port->fp_fca_handle, &pm);
9726
9727 if (rval != FC_SUCCESS) {
9728 fcio->fcio_errno = rval;
9729 rval = EIO;
9730 if (fp_fcio_copyout(fcio, data, mode)) {
9731 rval = EFAULT;
9732 }
9733 } else {
9734 mutex_enter(&port->fp_mutex);
9735 /* copy to the port structure */
9736 bcopy(rnid, &port->fp_rnid_params,
9737 sizeof (port->fp_rnid_params));
9738 mutex_exit(&port->fp_mutex);
9739 }
9740
9741 kmem_free(rnid, sizeof (fc_rnid_t));
9742
9743 if (rval != FC_SUCCESS) {
9744 FP_TRACE(FP_NHEAD1(3, 0), "fp_set_rnid: failed = %d", rval);
9745 }
9746
9747 return (rval);
9748 }
9749
9750 /*
9751 * This function does the local pwwn get rnid
9752 */
9753 static int
fp_get_rnid(fc_local_port_t * port,intptr_t data,int mode,fcio_t * fcio)9754 fp_get_rnid(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio)
9755 {
9756 fc_rnid_t *rnid;
9757 fc_fca_pm_t pm;
9758 int rval = 0;
9759 uint32_t ret;
9760
9761 /* Allocate memory for rnid data block */
9762 rnid = kmem_zalloc(sizeof (fc_rnid_t), KM_SLEEP);
9763
9764 mutex_enter(&port->fp_mutex);
9765 if (port->fp_rnid_init == 1) {
9766 bcopy(&port->fp_rnid_params, rnid, sizeof (fc_rnid_t));
9767 mutex_exit(&port->fp_mutex);
9768 /* xfer node info to userland */
9769 if (ddi_copyout((void *)rnid, (void *)fcio->fcio_obuf,
9770 sizeof (*rnid), mode) == 0) {
9771 if (fp_fcio_copyout(fcio, data, mode)) {
9772 rval = EFAULT;
9773 }
9774 } else {
9775 rval = EFAULT;
9776 }
9777
9778 kmem_free(rnid, sizeof (fc_rnid_t));
9779
9780 if (rval != FC_SUCCESS) {
9781 FP_TRACE(FP_NHEAD1(3, 0), "fp_get_rnid: failed = %d",
9782 rval);
9783 }
9784
9785 return (rval);
9786 }
9787 mutex_exit(&port->fp_mutex);
9788
9789 /* Prepare the port management structure */
9790 bzero((caddr_t)&pm, sizeof (pm));
9791
9792 pm.pm_cmd_flags = FC_FCA_PM_READ;
9793 pm.pm_cmd_code = FC_PORT_GET_NODE_ID;
9794 pm.pm_data_len = sizeof (fc_rnid_t);
9795 pm.pm_data_buf = (caddr_t)rnid;
9796
9797 /* Get the adapter's node data */
9798 ret = port->fp_fca_tran->fca_port_manage(
9799 port->fp_fca_handle,
9800 &pm);
9801
9802 if (ret == FC_SUCCESS) {
9803 /* initialize in the port_info */
9804 mutex_enter(&port->fp_mutex);
9805 port->fp_rnid_init = 1;
9806 bcopy(rnid, &port->fp_rnid_params, sizeof (*rnid));
9807 mutex_exit(&port->fp_mutex);
9808
9809 /* xfer node info to userland */
9810 if (ddi_copyout((void *)rnid,
9811 (void *)fcio->fcio_obuf,
9812 sizeof (*rnid), mode) == 0) {
9813 if (fp_fcio_copyout(fcio, data,
9814 mode)) {
9815 rval = EFAULT;
9816 }
9817 } else {
9818 rval = EFAULT;
9819 }
9820 } else {
9821 rval = EIO;
9822 fcio->fcio_errno = ret;
9823 if (fp_fcio_copyout(fcio, data, mode)) {
9824 rval = EFAULT;
9825 }
9826 }
9827
9828 kmem_free(rnid, sizeof (fc_rnid_t));
9829
9830 if (rval != FC_SUCCESS) {
9831 FP_TRACE(FP_NHEAD1(3, 0), "fp_get_rnid: failed = %d", rval);
9832 }
9833
9834 return (rval);
9835 }
9836
9837 static int
fp_send_rnid(fc_local_port_t * port,intptr_t data,int mode,fcio_t * fcio,la_wwn_t * pwwn)9838 fp_send_rnid(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio,
9839 la_wwn_t *pwwn)
9840 {
9841 int rval = 0;
9842 fc_remote_port_t *pd;
9843 fp_cmd_t *cmd;
9844 job_request_t *job;
9845 la_els_rnid_acc_t *rnid_acc;
9846
9847 pd = fctl_get_remote_port_by_pwwn(port, pwwn);
9848 if (pd == NULL) {
9849 /*
9850 * We can safely assume that the destination port
9851 * is logged in. Either the user land will explicitly
9852 * login before issuing RNID ioctl or the device would
9853 * have been configured, meaning already logged in.
9854 */
9855
9856 FP_TRACE(FP_NHEAD1(3, 0), "fp_send_rnid: failed = %d", ENXIO);
9857
9858 return (ENXIO);
9859 }
9860 /*
9861 * Allocate job structure and set job_code as DUMMY,
9862 * because we will not go thorugh the job thread.
9863 * Instead fp_sendcmd() is called directly here.
9864 */
9865 job = fctl_alloc_job(JOB_DUMMY, JOB_TYPE_FP_ASYNC,
9866 NULL, NULL, KM_SLEEP);
9867
9868 ASSERT(job != NULL);
9869
9870 job->job_counter = 1;
9871
9872 cmd = fp_alloc_pkt(port, sizeof (la_els_rnid_t),
9873 sizeof (la_els_rnid_acc_t), KM_SLEEP, pd);
9874 if (cmd == NULL) {
9875 fcio->fcio_errno = FC_NOMEM;
9876 rval = ENOMEM;
9877
9878 fctl_dealloc_job(job);
9879 if (fp_fcio_copyout(fcio, data, mode)) {
9880 rval = EFAULT;
9881 }
9882
9883 FP_TRACE(FP_NHEAD1(3, 0), "fp_send_rnid: failed = %d", rval);
9884
9885 return (rval);
9886 }
9887
9888 /* Allocate memory for node id accept block */
9889 rnid_acc = kmem_zalloc(sizeof (la_els_rnid_acc_t), KM_SLEEP);
9890
9891 mutex_enter(&port->fp_mutex);
9892 mutex_enter(&pd->pd_mutex);
9893
9894 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class;
9895 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE;
9896 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED;
9897 cmd->cmd_retry_count = 1;
9898 cmd->cmd_ulp_pkt = NULL;
9899
9900 fp_rnid_init(cmd, fcio->fcio_cmd_flags, job);
9901
9902 job->job_private = (void *)rnid_acc;
9903
9904 pd->pd_flags = PD_ELS_IN_PROGRESS;
9905
9906 mutex_exit(&pd->pd_mutex);
9907 mutex_exit(&port->fp_mutex);
9908
9909 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) {
9910 fctl_jobwait(job);
9911 fcio->fcio_errno = job->job_result;
9912 if (job->job_result == FC_SUCCESS) {
9913 int rnid_cnt;
9914 ASSERT(pd != NULL);
9915 /*
9916 * node id block is now available.
9917 * Copy it to userland
9918 */
9919 ASSERT(job->job_private == (void *)rnid_acc);
9920
9921 /* get the response length */
9922 rnid_cnt = sizeof (ls_code_t) + sizeof (fc_rnid_hdr_t) +
9923 rnid_acc->hdr.cmn_len +
9924 rnid_acc->hdr.specific_len;
9925
9926 if (fcio->fcio_olen < rnid_cnt) {
9927 rval = EINVAL;
9928 } else if (ddi_copyout((void *)rnid_acc,
9929 (void *)fcio->fcio_obuf,
9930 rnid_cnt, mode) == 0) {
9931 if (fp_fcio_copyout(fcio, data,
9932 mode)) {
9933 rval = EFAULT;
9934 }
9935 } else {
9936 rval = EFAULT;
9937 }
9938 } else {
9939 rval = EIO;
9940 }
9941 } else {
9942 rval = EIO;
9943 if (pd) {
9944 mutex_enter(&pd->pd_mutex);
9945 pd->pd_flags = PD_IDLE;
9946 mutex_exit(&pd->pd_mutex);
9947 }
9948 fp_free_pkt(cmd);
9949 }
9950
9951 fctl_dealloc_job(job);
9952 kmem_free(rnid_acc, sizeof (la_els_rnid_acc_t));
9953
9954 if (fp_fcio_copyout(fcio, data, mode)) {
9955 rval = EFAULT;
9956 }
9957
9958 if (rval != FC_SUCCESS) {
9959 FP_TRACE(FP_NHEAD1(3, 0), "fp_send_rnid: failed = %d", rval);
9960 }
9961
9962 return (rval);
9963 }
9964
9965 /*
9966 * Copy out to userland
9967 */
9968 static int
fp_fcio_copyout(fcio_t * fcio,intptr_t data,int mode)9969 fp_fcio_copyout(fcio_t *fcio, intptr_t data, int mode)
9970 {
9971 int rval;
9972
9973 #ifdef _MULTI_DATAMODEL
9974 switch (ddi_model_convert_from(mode & FMODELS)) {
9975 case DDI_MODEL_ILP32: {
9976 struct fcio32 fcio32;
9977
9978 fcio32.fcio_xfer = fcio->fcio_xfer;
9979 fcio32.fcio_cmd = fcio->fcio_cmd;
9980 fcio32.fcio_flags = fcio->fcio_flags;
9981 fcio32.fcio_cmd_flags = fcio->fcio_cmd_flags;
9982 fcio32.fcio_ilen = fcio->fcio_ilen;
9983 fcio32.fcio_ibuf =
9984 (caddr32_t)(uintptr_t)fcio->fcio_ibuf;
9985 fcio32.fcio_olen = fcio->fcio_olen;
9986 fcio32.fcio_obuf =
9987 (caddr32_t)(uintptr_t)fcio->fcio_obuf;
9988 fcio32.fcio_alen = fcio->fcio_alen;
9989 fcio32.fcio_abuf =
9990 (caddr32_t)(uintptr_t)fcio->fcio_abuf;
9991 fcio32.fcio_errno = fcio->fcio_errno;
9992
9993 rval = ddi_copyout((void *)&fcio32, (void *)data,
9994 sizeof (struct fcio32), mode);
9995 break;
9996 }
9997 case DDI_MODEL_NONE:
9998 rval = ddi_copyout((void *)fcio, (void *)data,
9999 sizeof (fcio_t), mode);
10000 break;
10001 }
10002 #else
10003 rval = ddi_copyout((void *)fcio, (void *)data, sizeof (fcio_t), mode);
10004 #endif
10005
10006 return (rval);
10007 }
10008
10009
10010 static void
fp_p2p_online(fc_local_port_t * port,job_request_t * job)10011 fp_p2p_online(fc_local_port_t *port, job_request_t *job)
10012 {
10013 uint32_t listlen;
10014 fc_portmap_t *changelist;
10015
10016 ASSERT(MUTEX_HELD(&port->fp_mutex));
10017 ASSERT(port->fp_topology == FC_TOP_PT_PT);
10018 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0);
10019
10020 listlen = 0;
10021 changelist = NULL;
10022
10023 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) {
10024 if (port->fp_statec_busy > 1) {
10025 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION;
10026 }
10027 }
10028 mutex_exit(&port->fp_mutex);
10029
10030 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) {
10031 fctl_fillout_map(port, &changelist, &listlen, 1, 0, 0);
10032 (void) fp_ulp_statec_cb(port, FC_STATE_ONLINE, changelist,
10033 listlen, listlen, KM_SLEEP);
10034
10035 mutex_enter(&port->fp_mutex);
10036 } else {
10037 ASSERT(changelist == NULL && listlen == 0);
10038 mutex_enter(&port->fp_mutex);
10039 if (--port->fp_statec_busy == 0) {
10040 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB;
10041 }
10042 }
10043 }
10044
10045 static int
fp_fillout_p2pmap(fc_local_port_t * port,fcio_t * fcio,int mode)10046 fp_fillout_p2pmap(fc_local_port_t *port, fcio_t *fcio, int mode)
10047 {
10048 int rval;
10049 int count;
10050 int index;
10051 int num_devices;
10052 fc_remote_node_t *node;
10053 fc_port_dev_t *devlist;
10054 struct pwwn_hash *head;
10055 fc_remote_port_t *pd;
10056
10057 ASSERT(MUTEX_HELD(&port->fp_mutex));
10058
10059 num_devices = fcio->fcio_olen / sizeof (fc_port_dev_t);
10060
10061 devlist = kmem_zalloc(sizeof (fc_port_dev_t) * num_devices, KM_SLEEP);
10062
10063 for (count = index = 0; index < pwwn_table_size; index++) {
10064 head = &port->fp_pwwn_table[index];
10065 pd = head->pwwn_head;
10066 while (pd != NULL) {
10067 mutex_enter(&pd->pd_mutex);
10068 if (pd->pd_state == PORT_DEVICE_INVALID) {
10069 mutex_exit(&pd->pd_mutex);
10070 pd = pd->pd_wwn_hnext;
10071 continue;
10072 }
10073
10074 devlist[count].dev_state = pd->pd_state;
10075 devlist[count].dev_hard_addr = pd->pd_hard_addr;
10076 devlist[count].dev_did = pd->pd_port_id;
10077 devlist[count].dev_did.priv_lilp_posit =
10078 (uint8_t)(index & 0xff);
10079 bcopy((caddr_t)pd->pd_fc4types,
10080 (caddr_t)devlist[count].dev_type,
10081 sizeof (pd->pd_fc4types));
10082
10083 bcopy((caddr_t)&pd->pd_port_name,
10084 (caddr_t)&devlist[count].dev_pwwn,
10085 sizeof (la_wwn_t));
10086
10087 node = pd->pd_remote_nodep;
10088 mutex_exit(&pd->pd_mutex);
10089
10090 if (node) {
10091 mutex_enter(&node->fd_mutex);
10092 bcopy((caddr_t)&node->fd_node_name,
10093 (caddr_t)&devlist[count].dev_nwwn,
10094 sizeof (la_wwn_t));
10095 mutex_exit(&node->fd_mutex);
10096 }
10097 count++;
10098 if (count >= num_devices) {
10099 goto found;
10100 }
10101 }
10102 }
10103 found:
10104 if (fp_copyout((void *)&count, (void *)fcio->fcio_abuf,
10105 sizeof (count), mode)) {
10106 rval = FC_FAILURE;
10107 } else if (fp_copyout((void *)devlist, (void *)fcio->fcio_obuf,
10108 sizeof (fc_port_dev_t) * num_devices, mode)) {
10109 rval = FC_FAILURE;
10110 } else {
10111 rval = FC_SUCCESS;
10112 }
10113
10114 kmem_free(devlist, sizeof (fc_port_dev_t) * num_devices);
10115
10116 return (rval);
10117 }
10118
10119
10120 /*
10121 * Handle Fabric ONLINE
10122 */
10123 static void
fp_fabric_online(fc_local_port_t * port,job_request_t * job)10124 fp_fabric_online(fc_local_port_t *port, job_request_t *job)
10125 {
10126 int index;
10127 int rval;
10128 int dbg_count;
10129 int count = 0;
10130 char ww_name[17];
10131 uint32_t d_id;
10132 uint32_t listlen;
10133 fctl_ns_req_t *ns_cmd;
10134 struct pwwn_hash *head;
10135 fc_remote_port_t *pd;
10136 fc_remote_port_t *npd;
10137 fc_portmap_t *changelist;
10138
10139 ASSERT(MUTEX_HELD(&port->fp_mutex));
10140 ASSERT(FC_IS_TOP_SWITCH(port->fp_topology));
10141 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0);
10142
10143 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t),
10144 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t),
10145 0, KM_SLEEP);
10146
10147 ASSERT(ns_cmd != NULL);
10148
10149 ns_cmd->ns_cmd_code = NS_GID_PN;
10150
10151 /*
10152 * Check if orphans are showing up now
10153 */
10154 if (port->fp_orphan_count) {
10155 fc_orphan_t *orp;
10156 fc_orphan_t *norp = NULL;
10157 fc_orphan_t *prev = NULL;
10158
10159 for (orp = port->fp_orphan_list; orp; orp = norp) {
10160 norp = orp->orp_next;
10161 mutex_exit(&port->fp_mutex);
10162 orp->orp_nscan++;
10163
10164 job->job_counter = 1;
10165 job->job_result = FC_SUCCESS;
10166
10167 ((ns_req_gid_pn_t *)
10168 (ns_cmd->ns_cmd_buf))->pwwn = orp->orp_pwwn;
10169 ((ns_resp_gid_pn_t *)
10170 ns_cmd->ns_data_buf)->pid.port_id = 0;
10171 ((ns_resp_gid_pn_t *)
10172 ns_cmd->ns_data_buf)->pid.priv_lilp_posit = 0;
10173
10174 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP);
10175 if (rval == FC_SUCCESS) {
10176 d_id =
10177 BE_32(*((uint32_t *)ns_cmd->ns_data_buf));
10178 pd = fp_create_remote_port_by_ns(port,
10179 d_id, KM_SLEEP);
10180
10181 if (pd != NULL) {
10182 fc_wwn_to_str(&orp->orp_pwwn, ww_name);
10183
10184 fp_printf(port, CE_WARN, FP_LOG_ONLY,
10185 0, NULL, "N_x Port with D_ID=%x,"
10186 " PWWN=%s reappeared in fabric",
10187 d_id, ww_name);
10188
10189 mutex_enter(&port->fp_mutex);
10190 if (prev) {
10191 prev->orp_next = orp->orp_next;
10192 } else {
10193 ASSERT(orp ==
10194 port->fp_orphan_list);
10195 port->fp_orphan_list =
10196 orp->orp_next;
10197 }
10198 port->fp_orphan_count--;
10199 mutex_exit(&port->fp_mutex);
10200 kmem_free(orp, sizeof (*orp));
10201 count++;
10202
10203 mutex_enter(&pd->pd_mutex);
10204 pd->pd_flags = PD_ELS_MARK;
10205
10206 mutex_exit(&pd->pd_mutex);
10207 } else {
10208 prev = orp;
10209 }
10210 } else {
10211 if (orp->orp_nscan == FC_ORPHAN_SCAN_LIMIT) {
10212 fc_wwn_to_str(&orp->orp_pwwn, ww_name);
10213
10214 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0,
10215 NULL,
10216 " Port WWN %s removed from orphan"
10217 " list after %d scans", ww_name,
10218 orp->orp_nscan);
10219
10220 mutex_enter(&port->fp_mutex);
10221 if (prev) {
10222 prev->orp_next = orp->orp_next;
10223 } else {
10224 ASSERT(orp ==
10225 port->fp_orphan_list);
10226 port->fp_orphan_list =
10227 orp->orp_next;
10228 }
10229 port->fp_orphan_count--;
10230 mutex_exit(&port->fp_mutex);
10231
10232 kmem_free(orp, sizeof (*orp));
10233 } else {
10234 prev = orp;
10235 }
10236 }
10237 mutex_enter(&port->fp_mutex);
10238 }
10239 }
10240
10241 /*
10242 * Walk the Port WWN hash table, reestablish LOGIN
10243 * if a LOGIN is already performed on a particular
10244 * device; Any failure to LOGIN should mark the
10245 * port device OLD.
10246 */
10247 for (index = 0; index < pwwn_table_size; index++) {
10248 head = &port->fp_pwwn_table[index];
10249 npd = head->pwwn_head;
10250
10251 while ((pd = npd) != NULL) {
10252 la_wwn_t *pwwn;
10253
10254 npd = pd->pd_wwn_hnext;
10255
10256 /*
10257 * Don't count in the port devices that are new
10258 * unless the total number of devices visible
10259 * through this port is less than FP_MAX_DEVICES
10260 */
10261 mutex_enter(&pd->pd_mutex);
10262 if (port->fp_dev_count >= FP_MAX_DEVICES ||
10263 (port->fp_options & FP_TARGET_MODE)) {
10264 if (pd->pd_type == PORT_DEVICE_NEW ||
10265 pd->pd_flags == PD_ELS_MARK ||
10266 pd->pd_recepient != PD_PLOGI_INITIATOR) {
10267 mutex_exit(&pd->pd_mutex);
10268 continue;
10269 }
10270 } else {
10271 if (pd->pd_flags == PD_ELS_MARK ||
10272 pd->pd_recepient != PD_PLOGI_INITIATOR) {
10273 mutex_exit(&pd->pd_mutex);
10274 continue;
10275 }
10276 pd->pd_type = PORT_DEVICE_OLD;
10277 }
10278 count++;
10279
10280 /*
10281 * Consult with the name server about D_ID changes
10282 */
10283 job->job_counter = 1;
10284 job->job_result = FC_SUCCESS;
10285
10286 ((ns_req_gid_pn_t *)
10287 (ns_cmd->ns_cmd_buf))->pwwn = pd->pd_port_name;
10288 ((ns_resp_gid_pn_t *)
10289 ns_cmd->ns_data_buf)->pid.port_id = 0;
10290
10291 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)->
10292 pid.priv_lilp_posit = 0;
10293
10294 pwwn = &pd->pd_port_name;
10295 pd->pd_flags = PD_ELS_MARK;
10296
10297 mutex_exit(&pd->pd_mutex);
10298 mutex_exit(&port->fp_mutex);
10299
10300 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP);
10301 if (rval != FC_SUCCESS) {
10302 fc_wwn_to_str(pwwn, ww_name);
10303
10304 mutex_enter(&pd->pd_mutex);
10305 d_id = pd->pd_port_id.port_id;
10306 pd->pd_type = PORT_DEVICE_DELETE;
10307 mutex_exit(&pd->pd_mutex);
10308
10309 FP_TRACE(FP_NHEAD1(3, 0),
10310 "fp_fabric_online: PD "
10311 "disappeared; d_id=%x, PWWN=%s",
10312 d_id, ww_name);
10313
10314 FP_TRACE(FP_NHEAD2(9, 0),
10315 "N_x Port with D_ID=%x, PWWN=%s"
10316 " disappeared from fabric", d_id,
10317 ww_name);
10318
10319 mutex_enter(&port->fp_mutex);
10320 continue;
10321 }
10322
10323 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf));
10324
10325 mutex_enter(&port->fp_mutex);
10326 mutex_enter(&pd->pd_mutex);
10327 if (d_id != pd->pd_port_id.port_id) {
10328 fctl_delist_did_table(port, pd);
10329 fc_wwn_to_str(pwwn, ww_name);
10330
10331 FP_TRACE(FP_NHEAD2(9, 0),
10332 "D_ID of a device with PWWN %s changed."
10333 " New D_ID = %x, OLD D_ID = %x", ww_name,
10334 d_id, pd->pd_port_id.port_id);
10335
10336 pd->pd_port_id.port_id = BE_32(d_id);
10337 pd->pd_type = PORT_DEVICE_CHANGED;
10338 fctl_enlist_did_table(port, pd);
10339 }
10340 mutex_exit(&pd->pd_mutex);
10341
10342 }
10343 }
10344
10345 if (ns_cmd) {
10346 fctl_free_ns_cmd(ns_cmd);
10347 }
10348
10349 listlen = 0;
10350 changelist = NULL;
10351 if (count) {
10352 if (port->fp_soft_state & FP_SOFT_IN_FCA_RESET) {
10353 port->fp_soft_state &= ~FP_SOFT_IN_FCA_RESET;
10354 mutex_exit(&port->fp_mutex);
10355 delay(drv_usectohz(FLA_RR_TOV * 1000 * 1000));
10356 mutex_enter(&port->fp_mutex);
10357 }
10358
10359 dbg_count = 0;
10360
10361 job->job_counter = count;
10362
10363 for (index = 0; index < pwwn_table_size; index++) {
10364 head = &port->fp_pwwn_table[index];
10365 npd = head->pwwn_head;
10366
10367 while ((pd = npd) != NULL) {
10368 npd = pd->pd_wwn_hnext;
10369
10370 mutex_enter(&pd->pd_mutex);
10371 if (pd->pd_flags != PD_ELS_MARK) {
10372 mutex_exit(&pd->pd_mutex);
10373 continue;
10374 }
10375
10376 dbg_count++;
10377
10378 /*
10379 * If it is already marked deletion, nothing
10380 * else to do.
10381 */
10382 if (pd->pd_type == PORT_DEVICE_DELETE) {
10383 pd->pd_type = PORT_DEVICE_OLD;
10384
10385 mutex_exit(&pd->pd_mutex);
10386 mutex_exit(&port->fp_mutex);
10387 fp_jobdone(job);
10388 mutex_enter(&port->fp_mutex);
10389
10390 continue;
10391 }
10392
10393 /*
10394 * If it is freshly discovered out of
10395 * the orphan list, nothing else to do
10396 */
10397 if (pd->pd_type == PORT_DEVICE_NEW) {
10398 pd->pd_flags = PD_IDLE;
10399
10400 mutex_exit(&pd->pd_mutex);
10401 mutex_exit(&port->fp_mutex);
10402 fp_jobdone(job);
10403 mutex_enter(&port->fp_mutex);
10404
10405 continue;
10406 }
10407
10408 pd->pd_flags = PD_IDLE;
10409 d_id = pd->pd_port_id.port_id;
10410
10411 /*
10412 * Explicitly mark all devices OLD; successful
10413 * PLOGI should reset this to either NO_CHANGE
10414 * or CHANGED.
10415 */
10416 if (pd->pd_type != PORT_DEVICE_CHANGED) {
10417 pd->pd_type = PORT_DEVICE_OLD;
10418 }
10419
10420 mutex_exit(&pd->pd_mutex);
10421 mutex_exit(&port->fp_mutex);
10422
10423 rval = fp_port_login(port, d_id, job,
10424 FP_CMD_PLOGI_RETAIN, KM_SLEEP, pd, NULL);
10425
10426 if (rval != FC_SUCCESS) {
10427 fp_jobdone(job);
10428 }
10429 mutex_enter(&port->fp_mutex);
10430 }
10431 }
10432 mutex_exit(&port->fp_mutex);
10433
10434 ASSERT(dbg_count == count);
10435 fp_jobwait(job);
10436
10437 mutex_enter(&port->fp_mutex);
10438
10439 ASSERT(port->fp_statec_busy > 0);
10440 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) {
10441 if (port->fp_statec_busy > 1) {
10442 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION;
10443 }
10444 }
10445 mutex_exit(&port->fp_mutex);
10446 } else {
10447 ASSERT(port->fp_statec_busy > 0);
10448 if (port->fp_statec_busy > 1) {
10449 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION;
10450 }
10451 mutex_exit(&port->fp_mutex);
10452 }
10453
10454 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) {
10455 fctl_fillout_map(port, &changelist, &listlen, 1, 0, 0);
10456
10457 (void) fp_ulp_statec_cb(port, FC_STATE_ONLINE, changelist,
10458 listlen, listlen, KM_SLEEP);
10459
10460 mutex_enter(&port->fp_mutex);
10461 } else {
10462 ASSERT(changelist == NULL && listlen == 0);
10463 mutex_enter(&port->fp_mutex);
10464 if (--port->fp_statec_busy == 0) {
10465 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB;
10466 }
10467 }
10468 }
10469
10470
10471 /*
10472 * Fill out device list for userland ioctl in private loop
10473 */
10474 static int
fp_fillout_loopmap(fc_local_port_t * port,fcio_t * fcio,int mode)10475 fp_fillout_loopmap(fc_local_port_t *port, fcio_t *fcio, int mode)
10476 {
10477 int rval;
10478 int count;
10479 int index;
10480 int num_devices;
10481 fc_remote_node_t *node;
10482 fc_port_dev_t *devlist;
10483 int lilp_device_count;
10484 fc_lilpmap_t *lilp_map;
10485 uchar_t *alpa_list;
10486
10487 ASSERT(MUTEX_HELD(&port->fp_mutex));
10488
10489 num_devices = fcio->fcio_olen / sizeof (fc_port_dev_t);
10490 if (port->fp_total_devices > port->fp_dev_count &&
10491 num_devices >= port->fp_total_devices) {
10492 job_request_t *job;
10493
10494 mutex_exit(&port->fp_mutex);
10495 job = fctl_alloc_job(JOB_PORT_GETMAP, 0, NULL, NULL, KM_SLEEP);
10496 job->job_counter = 1;
10497
10498 mutex_enter(&port->fp_mutex);
10499 fp_get_loopmap(port, job);
10500 mutex_exit(&port->fp_mutex);
10501
10502 fp_jobwait(job);
10503 fctl_dealloc_job(job);
10504 } else {
10505 mutex_exit(&port->fp_mutex);
10506 }
10507 devlist = kmem_zalloc(sizeof (*devlist) * num_devices, KM_SLEEP);
10508
10509 mutex_enter(&port->fp_mutex);
10510
10511 /*
10512 * Applications are accustomed to getting the device list in
10513 * LILP map order. The HBA firmware usually returns the device
10514 * map in the LILP map order and diagnostic applications would
10515 * prefer to receive in the device list in that order too
10516 */
10517 lilp_map = &port->fp_lilp_map;
10518 alpa_list = &lilp_map->lilp_alpalist[0];
10519
10520 /*
10521 * the length field corresponds to the offset in the LILP frame
10522 * which begins with 1. The thing to note here is that the
10523 * lilp_device_count is 1 more than fp->fp_total_devices since
10524 * the host adapter's alpa also shows up in the lilp map. We
10525 * don't however return details of the host adapter since
10526 * fctl_get_remote_port_by_did fails for the host adapter's ALPA
10527 * and applications are required to issue the FCIO_GET_HOST_PARAMS
10528 * ioctl to obtain details about the host adapter port.
10529 */
10530 lilp_device_count = lilp_map->lilp_length;
10531
10532 for (count = index = 0; index < lilp_device_count &&
10533 count < num_devices; index++) {
10534 uint32_t d_id;
10535 fc_remote_port_t *pd;
10536
10537 d_id = alpa_list[index];
10538
10539 mutex_exit(&port->fp_mutex);
10540 pd = fctl_get_remote_port_by_did(port, d_id);
10541 mutex_enter(&port->fp_mutex);
10542
10543 if (pd != NULL) {
10544 mutex_enter(&pd->pd_mutex);
10545
10546 if (pd->pd_state == PORT_DEVICE_INVALID) {
10547 mutex_exit(&pd->pd_mutex);
10548 continue;
10549 }
10550
10551 devlist[count].dev_state = pd->pd_state;
10552 devlist[count].dev_hard_addr = pd->pd_hard_addr;
10553 devlist[count].dev_did = pd->pd_port_id;
10554 devlist[count].dev_did.priv_lilp_posit =
10555 (uint8_t)(index & 0xff);
10556 bcopy((caddr_t)pd->pd_fc4types,
10557 (caddr_t)devlist[count].dev_type,
10558 sizeof (pd->pd_fc4types));
10559
10560 bcopy((caddr_t)&pd->pd_port_name,
10561 (caddr_t)&devlist[count].dev_pwwn,
10562 sizeof (la_wwn_t));
10563
10564 node = pd->pd_remote_nodep;
10565 mutex_exit(&pd->pd_mutex);
10566
10567 if (node) {
10568 mutex_enter(&node->fd_mutex);
10569 bcopy((caddr_t)&node->fd_node_name,
10570 (caddr_t)&devlist[count].dev_nwwn,
10571 sizeof (la_wwn_t));
10572 mutex_exit(&node->fd_mutex);
10573 }
10574 count++;
10575 }
10576 }
10577
10578 if (fp_copyout((void *)&count, (void *)fcio->fcio_abuf,
10579 sizeof (count), mode)) {
10580 rval = FC_FAILURE;
10581 }
10582
10583 if (fp_copyout((void *)devlist, (void *)fcio->fcio_obuf,
10584 sizeof (fc_port_dev_t) * num_devices, mode)) {
10585 rval = FC_FAILURE;
10586 } else {
10587 rval = FC_SUCCESS;
10588 }
10589
10590 kmem_free(devlist, sizeof (*devlist) * num_devices);
10591 ASSERT(MUTEX_HELD(&port->fp_mutex));
10592
10593 return (rval);
10594 }
10595
10596
10597 /*
10598 * Completion function for responses to unsolicited commands
10599 */
10600 static void
fp_unsol_intr(fc_packet_t * pkt)10601 fp_unsol_intr(fc_packet_t *pkt)
10602 {
10603 fp_cmd_t *cmd;
10604 fc_local_port_t *port;
10605
10606 cmd = pkt->pkt_ulp_private;
10607 port = cmd->cmd_port;
10608
10609 mutex_enter(&port->fp_mutex);
10610 port->fp_out_fpcmds--;
10611 mutex_exit(&port->fp_mutex);
10612
10613 if (pkt->pkt_state != FC_PKT_SUCCESS) {
10614 fp_printf(port, CE_WARN, FP_LOG_ONLY, 0, pkt,
10615 "couldn't post response to unsolicited request;"
10616 " ox_id=%x rx_id=%x", pkt->pkt_cmd_fhdr.ox_id,
10617 pkt->pkt_resp_fhdr.rx_id);
10618 }
10619
10620 if (cmd == port->fp_els_resp_pkt) {
10621 mutex_enter(&port->fp_mutex);
10622 port->fp_els_resp_pkt_busy = 0;
10623 mutex_exit(&port->fp_mutex);
10624 return;
10625 }
10626
10627 fp_free_pkt(cmd);
10628 }
10629
10630
10631 /*
10632 * solicited LINIT ELS completion function
10633 */
10634 static void
fp_linit_intr(fc_packet_t * pkt)10635 fp_linit_intr(fc_packet_t *pkt)
10636 {
10637 fp_cmd_t *cmd;
10638 job_request_t *job;
10639 fc_linit_resp_t acc;
10640 fc_local_port_t *port = ((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port;
10641
10642 cmd = (fp_cmd_t *)pkt->pkt_ulp_private;
10643
10644 mutex_enter(&cmd->cmd_port->fp_mutex);
10645 cmd->cmd_port->fp_out_fpcmds--;
10646 mutex_exit(&cmd->cmd_port->fp_mutex);
10647
10648 if (FP_IS_PKT_ERROR(pkt)) {
10649 (void) fp_common_intr(pkt, 1);
10650 return;
10651 }
10652
10653 job = cmd->cmd_job;
10654
10655 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&acc,
10656 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
10657 if (acc.status != FC_LINIT_SUCCESS) {
10658 job->job_result = FC_FAILURE;
10659 } else {
10660 job->job_result = FC_SUCCESS;
10661 }
10662
10663 fp_iodone(cmd);
10664 }
10665
10666
10667 /*
10668 * Decode the unsolicited request; For FC-4 Device and Link data frames
10669 * notify the registered ULP of this FC-4 type right here. For Unsolicited
10670 * ELS requests, submit a request to the job_handler thread to work on it.
10671 * The intent is to act quickly on the FC-4 unsolicited link and data frames
10672 * and save much of the interrupt time processing of unsolicited ELS requests
10673 * and hand it off to the job_handler thread.
10674 */
10675 static void
fp_unsol_cb(opaque_t port_handle,fc_unsol_buf_t * buf,uint32_t type)10676 fp_unsol_cb(opaque_t port_handle, fc_unsol_buf_t *buf, uint32_t type)
10677 {
10678 uchar_t r_ctl;
10679 uchar_t ls_code;
10680 uint32_t s_id;
10681 uint32_t rscn_count = FC_INVALID_RSCN_COUNT;
10682 uint32_t cb_arg;
10683 fp_cmd_t *cmd;
10684 fc_local_port_t *port;
10685 job_request_t *job;
10686 fc_remote_port_t *pd;
10687
10688 port = port_handle;
10689
10690 FP_TRACE(FP_NHEAD1(1, 0), "fp_unsol_cb: s_id=%x,"
10691 " d_id=%x, type=%x, r_ctl=%x, f_ctl=%x"
10692 " seq_id=%x, df_ctl=%x, seq_cnt=%x, ox_id=%x, rx_id=%x"
10693 " ro=%x, buffer[0]:%x", buf->ub_frame.s_id, buf->ub_frame.d_id,
10694 buf->ub_frame.type, buf->ub_frame.r_ctl, buf->ub_frame.f_ctl,
10695 buf->ub_frame.seq_id, buf->ub_frame.df_ctl, buf->ub_frame.seq_cnt,
10696 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro,
10697 buf->ub_buffer[0]);
10698
10699 if (type & 0x80000000) {
10700 /*
10701 * Huh ? Nothing much can be done without
10702 * a valid buffer. So just exit.
10703 */
10704 return;
10705 }
10706 /*
10707 * If the unsolicited interrupts arrive while it isn't
10708 * safe to handle unsolicited callbacks; Drop them, yes,
10709 * drop them on the floor
10710 */
10711 mutex_enter(&port->fp_mutex);
10712 port->fp_active_ubs++;
10713 if ((port->fp_soft_state &
10714 (FP_SOFT_IN_DETACH | FP_SOFT_SUSPEND | FP_SOFT_POWER_DOWN)) ||
10715 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) {
10716
10717 FP_TRACE(FP_NHEAD1(3, 0), "fp_unsol_cb: port state is "
10718 "not ONLINE. s_id=%x, d_id=%x, type=%x, "
10719 "seq_id=%x, ox_id=%x, rx_id=%x"
10720 "ro=%x", buf->ub_frame.s_id, buf->ub_frame.d_id,
10721 buf->ub_frame.type, buf->ub_frame.seq_id,
10722 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro);
10723
10724 ASSERT(port->fp_active_ubs > 0);
10725 if (--(port->fp_active_ubs) == 0) {
10726 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB;
10727 }
10728
10729 mutex_exit(&port->fp_mutex);
10730
10731 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle,
10732 1, &buf->ub_token);
10733
10734 return;
10735 }
10736
10737 r_ctl = buf->ub_frame.r_ctl;
10738 s_id = buf->ub_frame.s_id;
10739 if (port->fp_active_ubs == 1) {
10740 port->fp_soft_state |= FP_SOFT_IN_UNSOL_CB;
10741 }
10742
10743 if (r_ctl == R_CTL_ELS_REQ && buf->ub_buffer[0] == LA_ELS_LOGO &&
10744 port->fp_statec_busy) {
10745 mutex_exit(&port->fp_mutex);
10746 pd = fctl_get_remote_port_by_did(port, s_id);
10747 if (pd) {
10748 mutex_enter(&pd->pd_mutex);
10749 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) {
10750 FP_TRACE(FP_NHEAD1(3, 0),
10751 "LOGO for LOGGED IN D_ID %x",
10752 buf->ub_frame.s_id);
10753 pd->pd_state = PORT_DEVICE_VALID;
10754 }
10755 mutex_exit(&pd->pd_mutex);
10756 }
10757
10758 mutex_enter(&port->fp_mutex);
10759 ASSERT(port->fp_active_ubs > 0);
10760 if (--(port->fp_active_ubs) == 0) {
10761 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB;
10762 }
10763 mutex_exit(&port->fp_mutex);
10764
10765 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle,
10766 1, &buf->ub_token);
10767
10768 FP_TRACE(FP_NHEAD1(3, 0),
10769 "fp_unsol_cb() bailing out LOGO for D_ID %x",
10770 buf->ub_frame.s_id);
10771 return;
10772 }
10773
10774 if (port->fp_els_resp_pkt_busy == 0) {
10775 if (r_ctl == R_CTL_ELS_REQ) {
10776 ls_code = buf->ub_buffer[0];
10777
10778 switch (ls_code) {
10779 case LA_ELS_PLOGI:
10780 case LA_ELS_FLOGI:
10781 port->fp_els_resp_pkt_busy = 1;
10782 mutex_exit(&port->fp_mutex);
10783 fp_i_handle_unsol_els(port, buf);
10784
10785 mutex_enter(&port->fp_mutex);
10786 ASSERT(port->fp_active_ubs > 0);
10787 if (--(port->fp_active_ubs) == 0) {
10788 port->fp_soft_state &=
10789 ~FP_SOFT_IN_UNSOL_CB;
10790 }
10791 mutex_exit(&port->fp_mutex);
10792 port->fp_fca_tran->fca_ub_release(
10793 port->fp_fca_handle, 1, &buf->ub_token);
10794
10795 return;
10796 case LA_ELS_RSCN:
10797 if (++(port)->fp_rscn_count ==
10798 FC_INVALID_RSCN_COUNT) {
10799 ++(port)->fp_rscn_count;
10800 }
10801 rscn_count = port->fp_rscn_count;
10802 break;
10803
10804 default:
10805 break;
10806 }
10807 }
10808 } else if ((r_ctl == R_CTL_ELS_REQ) &&
10809 (buf->ub_buffer[0] == LA_ELS_RSCN)) {
10810 if (++port->fp_rscn_count == FC_INVALID_RSCN_COUNT) {
10811 ++port->fp_rscn_count;
10812 }
10813 rscn_count = port->fp_rscn_count;
10814 }
10815
10816 mutex_exit(&port->fp_mutex);
10817
10818 switch (r_ctl & R_CTL_ROUTING) {
10819 case R_CTL_DEVICE_DATA:
10820 /*
10821 * If the unsolicited buffer is a CT IU,
10822 * have the job_handler thread work on it.
10823 */
10824 if (buf->ub_frame.type == FC_TYPE_FC_SERVICES) {
10825 break;
10826 }
10827 /* FALLTHROUGH */
10828
10829 case R_CTL_FC4_SVC: {
10830 int sendup = 0;
10831
10832 /*
10833 * If a LOGIN isn't performed before this request
10834 * shut the door on this port with a reply that a
10835 * LOGIN is required. We make an exception however
10836 * for IP broadcast packets and pass them through
10837 * to the IP ULP(s) to handle broadcast requests.
10838 * This is not a problem for private loop devices
10839 * but for fabric topologies we don't log into the
10840 * remote ports during port initialization and
10841 * the ULPs need to log into requesting ports on
10842 * demand.
10843 */
10844 pd = fctl_get_remote_port_by_did(port, s_id);
10845 if (pd) {
10846 mutex_enter(&pd->pd_mutex);
10847 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) {
10848 sendup++;
10849 }
10850 mutex_exit(&pd->pd_mutex);
10851 } else if ((pd == NULL) &&
10852 (buf->ub_frame.type == FC_TYPE_IS8802_SNAP) &&
10853 (buf->ub_frame.d_id == 0xffffff ||
10854 buf->ub_frame.d_id == 0x00)) {
10855 /* brodacst IP frame - so sendup via job thread */
10856 break;
10857 }
10858
10859 /*
10860 * Send all FC4 services via job thread too
10861 */
10862 if ((r_ctl & R_CTL_ROUTING) == R_CTL_FC4_SVC) {
10863 break;
10864 }
10865
10866 if (sendup || !FC_IS_REAL_DEVICE(s_id)) {
10867 fctl_ulp_unsol_cb(port, buf, buf->ub_frame.type);
10868 return;
10869 }
10870
10871 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) {
10872 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t),
10873 0, KM_NOSLEEP, pd);
10874 if (cmd != NULL) {
10875 fp_els_rjt_init(port, cmd, buf,
10876 FC_ACTION_NON_RETRYABLE,
10877 FC_REASON_LOGIN_REQUIRED, NULL);
10878
10879 if (fp_sendcmd(port, cmd,
10880 port->fp_fca_handle) != FC_SUCCESS) {
10881 fp_free_pkt(cmd);
10882 }
10883 }
10884 }
10885
10886 mutex_enter(&port->fp_mutex);
10887 ASSERT(port->fp_active_ubs > 0);
10888 if (--(port->fp_active_ubs) == 0) {
10889 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB;
10890 }
10891 mutex_exit(&port->fp_mutex);
10892 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle,
10893 1, &buf->ub_token);
10894
10895 return;
10896 }
10897
10898 default:
10899 break;
10900 }
10901
10902 /*
10903 * Submit a Request to the job_handler thread to work
10904 * on the unsolicited request. The potential side effect
10905 * of this is that the unsolicited buffer takes a little
10906 * longer to get released but we save interrupt time in
10907 * the bargain.
10908 */
10909 cb_arg = (rscn_count == FC_INVALID_RSCN_COUNT) ? 0 : rscn_count;
10910
10911 /*
10912 * One way that the rscn_count will get used is described below :
10913 *
10914 * 1. fp_unsol_cb() gets an RSCN and updates fp_rscn_count.
10915 * 2. Before mutex is released, a copy of it is stored in rscn_count.
10916 * 3. The count is passed to job thread as JOB_UNSOL_REQUEST (below)
10917 * by overloading the job_cb_arg to pass the rscn_count
10918 * 4. When one of the routines processing the RSCN picks it up (ex:
10919 * fp_validate_rscn_page()), it passes this count in the map
10920 * structure (as part of the map_rscn_info structure member) to the
10921 * ULPs.
10922 * 5. When ULPs make calls back to the transport (example interfaces for
10923 * this are fc_ulp_transport(), fc_ulp_login(), fc_issue_els()), they
10924 * can now pass back this count as part of the fc_packet's
10925 * pkt_ulp_rscn_count member. fcp does this currently.
10926 * 6. When transport gets a call to transport a command on the wire, it
10927 * will check to see if there is a valid pkt_ulp_rsvd1 field in the
10928 * fc_packet. If there is, it will match that info with the current
10929 * rscn_count on that instance of the port. If they don't match up
10930 * then there was a newer RSCN. The ULP gets back an error code which
10931 * informs it about it - FC_DEVICE_BUSY_NEW_RSCN.
10932 * 7. At this point the ULP is free to make up its own mind as to how to
10933 * handle this. Currently, fcp will reset its retry counters and keep
10934 * retrying the operation it was doing in anticipation of getting a
10935 * new state change call back for the new RSCN.
10936 */
10937 job = fctl_alloc_job(JOB_UNSOL_REQUEST, 0, NULL,
10938 (opaque_t)(uintptr_t)cb_arg, KM_NOSLEEP);
10939 if (job == NULL) {
10940 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, "fp_unsol_cb() "
10941 "couldn't submit a job to the thread, failing..");
10942
10943 mutex_enter(&port->fp_mutex);
10944
10945 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) {
10946 --port->fp_rscn_count;
10947 }
10948
10949 ASSERT(port->fp_active_ubs > 0);
10950 if (--(port->fp_active_ubs) == 0) {
10951 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB;
10952 }
10953
10954 mutex_exit(&port->fp_mutex);
10955 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle,
10956 1, &buf->ub_token);
10957
10958 return;
10959 }
10960 job->job_private = (void *)buf;
10961 fctl_enque_job(port, job);
10962 }
10963
10964
10965 /*
10966 * Handle unsolicited requests
10967 */
10968 static void
fp_handle_unsol_buf(fc_local_port_t * port,fc_unsol_buf_t * buf,job_request_t * job)10969 fp_handle_unsol_buf(fc_local_port_t *port, fc_unsol_buf_t *buf,
10970 job_request_t *job)
10971 {
10972 uchar_t r_ctl;
10973 uchar_t ls_code;
10974 uint32_t s_id;
10975 fp_cmd_t *cmd;
10976 fc_remote_port_t *pd;
10977 fp_unsol_spec_t *ub_spec;
10978
10979 r_ctl = buf->ub_frame.r_ctl;
10980 s_id = buf->ub_frame.s_id;
10981
10982 switch (r_ctl & R_CTL_ROUTING) {
10983 case R_CTL_EXTENDED_SVC:
10984 if (r_ctl != R_CTL_ELS_REQ) {
10985 break;
10986 }
10987
10988 ls_code = buf->ub_buffer[0];
10989 switch (ls_code) {
10990 case LA_ELS_LOGO:
10991 case LA_ELS_ADISC:
10992 case LA_ELS_PRLO:
10993 pd = fctl_get_remote_port_by_did(port, s_id);
10994 if (pd == NULL) {
10995 if (!FC_IS_REAL_DEVICE(s_id)) {
10996 break;
10997 }
10998 if (!FP_IS_CLASS_1_OR_2(buf->ub_class)) {
10999 break;
11000 }
11001 if ((cmd = fp_alloc_pkt(port,
11002 sizeof (la_els_rjt_t), 0, KM_SLEEP,
11003 NULL)) == NULL) {
11004 /*
11005 * Can this actually fail when
11006 * given KM_SLEEP? (Could be used
11007 * this way in a number of places.)
11008 */
11009 break;
11010 }
11011
11012 fp_els_rjt_init(port, cmd, buf,
11013 FC_ACTION_NON_RETRYABLE,
11014 FC_REASON_INVALID_LINK_CTRL, job);
11015
11016 if (fp_sendcmd(port, cmd,
11017 port->fp_fca_handle) != FC_SUCCESS) {
11018 fp_free_pkt(cmd);
11019 }
11020
11021 break;
11022 }
11023 if (ls_code == LA_ELS_LOGO) {
11024 fp_handle_unsol_logo(port, buf, pd, job);
11025 } else if (ls_code == LA_ELS_ADISC) {
11026 fp_handle_unsol_adisc(port, buf, pd, job);
11027 } else {
11028 fp_handle_unsol_prlo(port, buf, pd, job);
11029 }
11030 break;
11031
11032 case LA_ELS_PLOGI:
11033 fp_handle_unsol_plogi(port, buf, job, KM_SLEEP);
11034 break;
11035
11036 case LA_ELS_FLOGI:
11037 fp_handle_unsol_flogi(port, buf, job, KM_SLEEP);
11038 break;
11039
11040 case LA_ELS_RSCN:
11041 fp_handle_unsol_rscn(port, buf, job, KM_SLEEP);
11042 break;
11043
11044 default:
11045 ub_spec = kmem_zalloc(sizeof (*ub_spec), KM_SLEEP);
11046 ub_spec->port = port;
11047 ub_spec->buf = buf;
11048
11049 (void) taskq_dispatch(port->fp_taskq,
11050 fp_ulp_unsol_cb, ub_spec, KM_SLEEP);
11051 return;
11052 }
11053 break;
11054
11055 case R_CTL_BASIC_SVC:
11056 /*
11057 * The unsolicited basic link services could be ABTS
11058 * and RMC (Or even a NOP). Just BA_RJT them until
11059 * such time there arises a need to handle them more
11060 * carefully.
11061 */
11062 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) {
11063 cmd = fp_alloc_pkt(port, sizeof (la_ba_rjt_t),
11064 0, KM_SLEEP, NULL);
11065 if (cmd != NULL) {
11066 fp_ba_rjt_init(port, cmd, buf, job);
11067 if (fp_sendcmd(port, cmd,
11068 port->fp_fca_handle) != FC_SUCCESS) {
11069 fp_free_pkt(cmd);
11070 }
11071 }
11072 }
11073 break;
11074
11075 case R_CTL_DEVICE_DATA:
11076 if (buf->ub_frame.type == FC_TYPE_FC_SERVICES) {
11077 /*
11078 * Mostly this is of type FC_TYPE_FC_SERVICES.
11079 * As we don't like any Unsolicited FC services
11080 * requests, we would do well to RJT them as
11081 * well.
11082 */
11083 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) {
11084 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t),
11085 0, KM_SLEEP, NULL);
11086 if (cmd != NULL) {
11087 fp_els_rjt_init(port, cmd, buf,
11088 FC_ACTION_NON_RETRYABLE,
11089 FC_REASON_INVALID_LINK_CTRL, job);
11090
11091 if (fp_sendcmd(port, cmd,
11092 port->fp_fca_handle) !=
11093 FC_SUCCESS) {
11094 fp_free_pkt(cmd);
11095 }
11096 }
11097 }
11098 break;
11099 }
11100 /* FALLTHROUGH */
11101
11102 case R_CTL_FC4_SVC:
11103 ub_spec = kmem_zalloc(sizeof (*ub_spec), KM_SLEEP);
11104 ub_spec->port = port;
11105 ub_spec->buf = buf;
11106
11107 (void) taskq_dispatch(port->fp_taskq,
11108 fp_ulp_unsol_cb, ub_spec, KM_SLEEP);
11109 return;
11110
11111 case R_CTL_LINK_CTL:
11112 /*
11113 * Turn deaf ear on unsolicited link control frames.
11114 * Typical unsolicited link control Frame is an LCR
11115 * (to reset End to End credit to the default login
11116 * value and abort current sequences for all classes)
11117 * An intelligent microcode/firmware should handle
11118 * this transparently at its level and not pass all
11119 * the way up here.
11120 *
11121 * Possible responses to LCR are R_RDY, F_RJT, P_RJT
11122 * or F_BSY. P_RJT is chosen to be the most appropriate
11123 * at this time.
11124 */
11125 /* FALLTHROUGH */
11126
11127 default:
11128 /*
11129 * Just reject everything else as an invalid request.
11130 */
11131 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) {
11132 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t),
11133 0, KM_SLEEP, NULL);
11134 if (cmd != NULL) {
11135 fp_els_rjt_init(port, cmd, buf,
11136 FC_ACTION_NON_RETRYABLE,
11137 FC_REASON_INVALID_LINK_CTRL, job);
11138
11139 if (fp_sendcmd(port, cmd,
11140 port->fp_fca_handle) != FC_SUCCESS) {
11141 fp_free_pkt(cmd);
11142 }
11143 }
11144 }
11145 break;
11146 }
11147
11148 mutex_enter(&port->fp_mutex);
11149 ASSERT(port->fp_active_ubs > 0);
11150 if (--(port->fp_active_ubs) == 0) {
11151 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB;
11152 }
11153 mutex_exit(&port->fp_mutex);
11154 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle,
11155 1, &buf->ub_token);
11156 }
11157
11158
11159 /*
11160 * Prepare a BA_RJT and send it over.
11161 */
11162 static void
fp_ba_rjt_init(fc_local_port_t * port,fp_cmd_t * cmd,fc_unsol_buf_t * buf,job_request_t * job)11163 fp_ba_rjt_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf,
11164 job_request_t *job)
11165 {
11166 fc_packet_t *pkt;
11167 la_ba_rjt_t payload;
11168
11169 ASSERT(!MUTEX_HELD(&port->fp_mutex));
11170
11171 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class;
11172 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND;
11173 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED;
11174 cmd->cmd_retry_count = 1;
11175 cmd->cmd_ulp_pkt = NULL;
11176
11177 cmd->cmd_transport = port->fp_fca_tran->fca_els_send;
11178 cmd->cmd_job = job;
11179
11180 pkt = &cmd->cmd_pkt;
11181
11182 fp_unsol_resp_init(pkt, buf, R_CTL_LS_BA_RJT, FC_TYPE_BASIC_LS);
11183
11184 payload.reserved = 0;
11185 payload.reason_code = FC_REASON_CMD_UNSUPPORTED;
11186 payload.explanation = FC_EXPLN_NONE;
11187 payload.vendor = 0;
11188
11189 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload,
11190 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR);
11191 }
11192
11193
11194 /*
11195 * Prepare an LS_RJT and send it over
11196 */
11197 static void
fp_els_rjt_init(fc_local_port_t * port,fp_cmd_t * cmd,fc_unsol_buf_t * buf,uchar_t action,uchar_t reason,job_request_t * job)11198 fp_els_rjt_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf,
11199 uchar_t action, uchar_t reason, job_request_t *job)
11200 {
11201 fc_packet_t *pkt;
11202 la_els_rjt_t payload;
11203
11204 ASSERT(!MUTEX_HELD(&port->fp_mutex));
11205
11206 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class;
11207 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND;
11208 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED;
11209 cmd->cmd_retry_count = 1;
11210 cmd->cmd_ulp_pkt = NULL;
11211
11212 cmd->cmd_transport = port->fp_fca_tran->fca_els_send;
11213 cmd->cmd_job = job;
11214
11215 pkt = &cmd->cmd_pkt;
11216
11217 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS);
11218
11219 payload.ls_code.ls_code = LA_ELS_RJT;
11220 payload.ls_code.mbz = 0;
11221 payload.action = action;
11222 payload.reason = reason;
11223 payload.reserved = 0;
11224 payload.vu = 0;
11225
11226 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload,
11227 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR);
11228 }
11229
11230 /*
11231 * Function: fp_prlo_acc_init
11232 *
11233 * Description: Initializes an Link Service Accept for a PRLO.
11234 *
11235 * Arguments: *port Local port through which the PRLO was
11236 * received.
11237 * cmd Command that will carry the accept.
11238 * *buf Unsolicited buffer containing the PRLO
11239 * request.
11240 * job Job request.
11241 * sleep Allocation mode.
11242 *
11243 * Return Value: *cmd Command containing the response.
11244 *
11245 * Context: Depends on the parameter sleep.
11246 */
11247 fp_cmd_t *
fp_prlo_acc_init(fc_local_port_t * port,fc_remote_port_t * pd,fc_unsol_buf_t * buf,job_request_t * job,int sleep)11248 fp_prlo_acc_init(fc_local_port_t *port, fc_remote_port_t *pd,
11249 fc_unsol_buf_t *buf, job_request_t *job, int sleep)
11250 {
11251 fp_cmd_t *cmd;
11252 fc_packet_t *pkt;
11253 la_els_prlo_t *req;
11254 size_t len;
11255 uint16_t flags;
11256
11257 req = (la_els_prlo_t *)buf->ub_buffer;
11258 len = (size_t)ntohs(req->payload_length);
11259
11260 /*
11261 * The payload of the accept to a PRLO has to be the exact match of
11262 * the payload of the request (at the exception of the code).
11263 */
11264 cmd = fp_alloc_pkt(port, (int)len, 0, sleep, pd);
11265
11266 if (cmd) {
11267 /*
11268 * The fp command was successfully allocated.
11269 */
11270 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class;
11271 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND;
11272 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED;
11273 cmd->cmd_retry_count = 1;
11274 cmd->cmd_ulp_pkt = NULL;
11275
11276 cmd->cmd_transport = port->fp_fca_tran->fca_els_send;
11277 cmd->cmd_job = job;
11278
11279 pkt = &cmd->cmd_pkt;
11280
11281 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP,
11282 FC_TYPE_EXTENDED_LS);
11283
11284 /* The code is overwritten for the copy. */
11285 req->ls_code = LA_ELS_ACC;
11286 /* Response code is set. */
11287 flags = ntohs(req->flags);
11288 flags &= ~SP_RESP_CODE_MASK;
11289 flags |= SP_RESP_CODE_REQ_EXECUTED;
11290 req->flags = htons(flags);
11291
11292 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)req,
11293 (uint8_t *)pkt->pkt_cmd, len, DDI_DEV_AUTOINCR);
11294 }
11295 return (cmd);
11296 }
11297
11298 /*
11299 * Prepare an ACC response to an ELS request
11300 */
11301 static void
fp_els_acc_init(fc_local_port_t * port,fp_cmd_t * cmd,fc_unsol_buf_t * buf,job_request_t * job)11302 fp_els_acc_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf,
11303 job_request_t *job)
11304 {
11305 fc_packet_t *pkt;
11306 ls_code_t payload;
11307
11308 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class;
11309 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND;
11310 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED;
11311 cmd->cmd_retry_count = 1;
11312 cmd->cmd_ulp_pkt = NULL;
11313
11314 cmd->cmd_transport = port->fp_fca_tran->fca_els_send;
11315 cmd->cmd_job = job;
11316
11317 pkt = &cmd->cmd_pkt;
11318
11319 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS);
11320
11321 payload.ls_code = LA_ELS_ACC;
11322 payload.mbz = 0;
11323
11324 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload,
11325 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR);
11326 }
11327
11328 /*
11329 * Unsolicited PRLO handler
11330 *
11331 * A Process Logout should be handled by the ULP that established it. However,
11332 * some devices send a PRLO to trigger a PLOGI followed by a PRLI. This happens
11333 * when a device implicitly logs out an initiator (for whatever reason) and
11334 * tries to get that initiator to restablish the connection (PLOGI and PRLI).
11335 * The logical thing to do for the device would be to send a LOGO in response
11336 * to any FC4 frame sent by the initiator. Some devices choose, however, to send
11337 * a PRLO instead.
11338 *
11339 * From a Fibre Channel standpoint a PRLO calls for a PRLI. There's no reason to
11340 * think that the Port Login has been lost. If we follow the Fibre Channel
11341 * protocol to the letter a PRLI should be sent after accepting the PRLO. If
11342 * the Port Login has also been lost, the remote port will reject the PRLI
11343 * indicating that we must PLOGI first. The initiator will then turn around and
11344 * send a PLOGI. The way Leadville is layered and the way the ULP interface
11345 * is defined doesn't allow this scenario to be followed easily. If FCP were to
11346 * handle the PRLO and attempt the PRLI, the reject indicating that a PLOGI is
11347 * needed would be received by FCP. FCP would have, then, to tell the transport
11348 * (fp) to PLOGI. The problem is, the transport would still think the Port
11349 * Login is valid and there is no way for FCP to tell the transport: "PLOGI even
11350 * if you think it's not necessary". To work around that difficulty, the PRLO
11351 * is treated by the transport as a LOGO. The downside to it is a Port Login
11352 * may be disrupted (if a PLOGI wasn't actually needed) and another ULP (that
11353 * has nothing to do with the PRLO) may be impacted. However, this is a
11354 * scenario very unlikely to happen. As of today the only ULP in Leadville
11355 * using PRLI/PRLOs is FCP. For a PRLO to disrupt another ULP (that would be
11356 * FCIP), a SCSI target would have to be running FCP and FCIP (which is very
11357 * unlikely).
11358 */
11359 static void
fp_handle_unsol_prlo(fc_local_port_t * port,fc_unsol_buf_t * buf,fc_remote_port_t * pd,job_request_t * job)11360 fp_handle_unsol_prlo(fc_local_port_t *port, fc_unsol_buf_t *buf,
11361 fc_remote_port_t *pd, job_request_t *job)
11362 {
11363 int busy;
11364 int rval;
11365 int retain;
11366 fp_cmd_t *cmd;
11367 fc_portmap_t *listptr;
11368 boolean_t tolerance;
11369 la_els_prlo_t *req;
11370
11371 req = (la_els_prlo_t *)buf->ub_buffer;
11372
11373 if ((ntohs(req->payload_length) !=
11374 (sizeof (service_parameter_page_t) + sizeof (ls_code_t))) ||
11375 (req->page_length != sizeof (service_parameter_page_t))) {
11376 /*
11377 * We are being very restrictive. Only on page per
11378 * payload. If it is not the case we reject the ELS although
11379 * we should reply indicating we handle only single page
11380 * per PRLO.
11381 */
11382 goto fp_reject_prlo;
11383 }
11384
11385 if (ntohs(req->payload_length) > buf->ub_bufsize) {
11386 /*
11387 * This is in case the payload advertizes a size bigger than
11388 * what it really is.
11389 */
11390 goto fp_reject_prlo;
11391 }
11392
11393 mutex_enter(&port->fp_mutex);
11394 busy = port->fp_statec_busy;
11395 mutex_exit(&port->fp_mutex);
11396
11397 mutex_enter(&pd->pd_mutex);
11398 tolerance = fctl_tc_increment(&pd->pd_logo_tc);
11399 if (!busy) {
11400 if (pd->pd_state != PORT_DEVICE_LOGGED_IN ||
11401 pd->pd_state == PORT_DEVICE_INVALID ||
11402 pd->pd_flags == PD_ELS_IN_PROGRESS ||
11403 pd->pd_type == PORT_DEVICE_OLD) {
11404 busy++;
11405 }
11406 }
11407
11408 if (busy) {
11409 mutex_exit(&pd->pd_mutex);
11410
11411 FP_TRACE(FP_NHEAD1(5, 0), "Logout; D_ID=%x,"
11412 "pd=%p - busy",
11413 pd->pd_port_id.port_id, pd);
11414
11415 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) {
11416 goto fp_reject_prlo;
11417 }
11418 } else {
11419 retain = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0;
11420
11421 if (tolerance) {
11422 fctl_tc_reset(&pd->pd_logo_tc);
11423 retain = 0;
11424 pd->pd_state = PORT_DEVICE_INVALID;
11425 }
11426
11427 FP_TRACE(FP_NHEAD1(5, 0), "Accepting LOGO; d_id=%x, pd=%p,"
11428 " tolerance=%d retain=%d", pd->pd_port_id.port_id, pd,
11429 tolerance, retain);
11430
11431 pd->pd_aux_flags |= PD_LOGGED_OUT;
11432 mutex_exit(&pd->pd_mutex);
11433
11434 cmd = fp_prlo_acc_init(port, pd, buf, job, KM_SLEEP);
11435 if (cmd == NULL) {
11436 return;
11437 }
11438
11439 rval = fp_sendcmd(port, cmd, port->fp_fca_handle);
11440 if (rval != FC_SUCCESS) {
11441 fp_free_pkt(cmd);
11442 return;
11443 }
11444
11445 listptr = kmem_zalloc(sizeof (fc_portmap_t), KM_SLEEP);
11446
11447 if (retain) {
11448 fp_unregister_login(pd);
11449 fctl_copy_portmap(listptr, pd);
11450 } else {
11451 uint32_t d_id;
11452 char ww_name[17];
11453
11454 mutex_enter(&pd->pd_mutex);
11455 d_id = pd->pd_port_id.port_id;
11456 fc_wwn_to_str(&pd->pd_port_name, ww_name);
11457 mutex_exit(&pd->pd_mutex);
11458
11459 FP_TRACE(FP_NHEAD2(9, 0),
11460 "N_x Port with D_ID=%x, PWWN=%s logged out"
11461 " %d times in %d us; Giving up", d_id, ww_name,
11462 FC_LOGO_TOLERANCE_LIMIT,
11463 FC_LOGO_TOLERANCE_TIME_LIMIT);
11464
11465 fp_fillout_old_map(listptr, pd, 0);
11466 listptr->map_type = PORT_DEVICE_OLD;
11467 }
11468
11469 (void) fp_ulp_devc_cb(port, listptr, 1, 1, KM_SLEEP, 0);
11470 return;
11471 }
11472
11473 fp_reject_prlo:
11474
11475 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 0, KM_SLEEP, pd);
11476 if (cmd != NULL) {
11477 fp_els_rjt_init(port, cmd, buf, FC_ACTION_NON_RETRYABLE,
11478 FC_REASON_INVALID_LINK_CTRL, job);
11479
11480 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) {
11481 fp_free_pkt(cmd);
11482 }
11483 }
11484 }
11485
11486 /*
11487 * Unsolicited LOGO handler
11488 */
11489 static void
fp_handle_unsol_logo(fc_local_port_t * port,fc_unsol_buf_t * buf,fc_remote_port_t * pd,job_request_t * job)11490 fp_handle_unsol_logo(fc_local_port_t *port, fc_unsol_buf_t *buf,
11491 fc_remote_port_t *pd, job_request_t *job)
11492 {
11493 int busy;
11494 int rval;
11495 int retain;
11496 fp_cmd_t *cmd;
11497 fc_portmap_t *listptr;
11498 boolean_t tolerance;
11499
11500 mutex_enter(&port->fp_mutex);
11501 busy = port->fp_statec_busy;
11502 mutex_exit(&port->fp_mutex);
11503
11504 mutex_enter(&pd->pd_mutex);
11505 tolerance = fctl_tc_increment(&pd->pd_logo_tc);
11506 if (!busy) {
11507 if (pd->pd_state != PORT_DEVICE_LOGGED_IN ||
11508 pd->pd_state == PORT_DEVICE_INVALID ||
11509 pd->pd_flags == PD_ELS_IN_PROGRESS ||
11510 pd->pd_type == PORT_DEVICE_OLD) {
11511 busy++;
11512 }
11513 }
11514
11515 if (busy) {
11516 mutex_exit(&pd->pd_mutex);
11517
11518 FP_TRACE(FP_NHEAD1(5, 0), "Logout; D_ID=%x,"
11519 "pd=%p - busy",
11520 pd->pd_port_id.port_id, pd);
11521
11522 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) {
11523 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t),
11524 0, KM_SLEEP, pd);
11525 if (cmd != NULL) {
11526 fp_els_rjt_init(port, cmd, buf,
11527 FC_ACTION_NON_RETRYABLE,
11528 FC_REASON_INVALID_LINK_CTRL, job);
11529
11530 if (fp_sendcmd(port, cmd,
11531 port->fp_fca_handle) != FC_SUCCESS) {
11532 fp_free_pkt(cmd);
11533 }
11534 }
11535 }
11536 } else {
11537 retain = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0;
11538
11539 if (tolerance) {
11540 fctl_tc_reset(&pd->pd_logo_tc);
11541 retain = 0;
11542 pd->pd_state = PORT_DEVICE_INVALID;
11543 }
11544
11545 FP_TRACE(FP_NHEAD1(5, 0), "Accepting LOGO; d_id=%x, pd=%p,"
11546 " tolerance=%d retain=%d", pd->pd_port_id.port_id, pd,
11547 tolerance, retain);
11548
11549 pd->pd_aux_flags |= PD_LOGGED_OUT;
11550 mutex_exit(&pd->pd_mutex);
11551
11552 cmd = fp_alloc_pkt(port, FP_PORT_IDENTIFIER_LEN, 0,
11553 KM_SLEEP, pd);
11554 if (cmd == NULL) {
11555 return;
11556 }
11557
11558 fp_els_acc_init(port, cmd, buf, job);
11559
11560 rval = fp_sendcmd(port, cmd, port->fp_fca_handle);
11561 if (rval != FC_SUCCESS) {
11562 fp_free_pkt(cmd);
11563 return;
11564 }
11565
11566 listptr = kmem_zalloc(sizeof (fc_portmap_t), KM_SLEEP);
11567
11568 if (retain) {
11569 job_request_t *job;
11570 fctl_ns_req_t *ns_cmd;
11571
11572 /*
11573 * when get LOGO, first try to get PID from nameserver
11574 * if failed, then we do not need
11575 * send PLOGI to that remote port
11576 */
11577 job = fctl_alloc_job(
11578 JOB_NS_CMD, 0, NULL, (opaque_t)port, KM_SLEEP);
11579
11580 if (job != NULL) {
11581 ns_cmd = fctl_alloc_ns_cmd(
11582 sizeof (ns_req_gid_pn_t),
11583 sizeof (ns_resp_gid_pn_t),
11584 sizeof (ns_resp_gid_pn_t),
11585 0, KM_SLEEP);
11586 if (ns_cmd != NULL) {
11587 int ret;
11588 job->job_result = FC_SUCCESS;
11589 ns_cmd->ns_cmd_code = NS_GID_PN;
11590 ((ns_req_gid_pn_t *)
11591 (ns_cmd->ns_cmd_buf))->pwwn =
11592 pd->pd_port_name;
11593 ret = fp_ns_query(
11594 port, ns_cmd, job, 1, KM_SLEEP);
11595 if ((ret != FC_SUCCESS) ||
11596 (job->job_result != FC_SUCCESS)) {
11597 fctl_free_ns_cmd(ns_cmd);
11598 fctl_dealloc_job(job);
11599 FP_TRACE(FP_NHEAD2(9, 0),
11600 "NS query failed,",
11601 " delete pd");
11602 goto delete_pd;
11603 }
11604 fctl_free_ns_cmd(ns_cmd);
11605 }
11606 fctl_dealloc_job(job);
11607 }
11608 fp_unregister_login(pd);
11609 fctl_copy_portmap(listptr, pd);
11610 } else {
11611 uint32_t d_id;
11612 char ww_name[17];
11613
11614 delete_pd:
11615 mutex_enter(&pd->pd_mutex);
11616 d_id = pd->pd_port_id.port_id;
11617 fc_wwn_to_str(&pd->pd_port_name, ww_name);
11618 mutex_exit(&pd->pd_mutex);
11619
11620 FP_TRACE(FP_NHEAD2(9, 0),
11621 "N_x Port with D_ID=%x, PWWN=%s logged out"
11622 " %d times in %d us; Giving up", d_id, ww_name,
11623 FC_LOGO_TOLERANCE_LIMIT,
11624 FC_LOGO_TOLERANCE_TIME_LIMIT);
11625
11626 fp_fillout_old_map(listptr, pd, 0);
11627 listptr->map_type = PORT_DEVICE_OLD;
11628 }
11629
11630 (void) fp_ulp_devc_cb(port, listptr, 1, 1, KM_SLEEP, 0);
11631 }
11632 }
11633
11634
11635 /*
11636 * Perform general purpose preparation of a response to an unsolicited request
11637 */
11638 static void
fp_unsol_resp_init(fc_packet_t * pkt,fc_unsol_buf_t * buf,uchar_t r_ctl,uchar_t type)11639 fp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
11640 uchar_t r_ctl, uchar_t type)
11641 {
11642 pkt->pkt_cmd_fhdr.r_ctl = r_ctl;
11643 pkt->pkt_cmd_fhdr.d_id = buf->ub_frame.s_id;
11644 pkt->pkt_cmd_fhdr.s_id = buf->ub_frame.d_id;
11645 pkt->pkt_cmd_fhdr.type = type;
11646 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_LAST_SEQ | F_CTL_XCHG_CONTEXT;
11647 pkt->pkt_cmd_fhdr.seq_id = buf->ub_frame.seq_id;
11648 pkt->pkt_cmd_fhdr.df_ctl = buf->ub_frame.df_ctl;
11649 pkt->pkt_cmd_fhdr.seq_cnt = buf->ub_frame.seq_cnt;
11650 pkt->pkt_cmd_fhdr.ox_id = buf->ub_frame.ox_id;
11651 pkt->pkt_cmd_fhdr.rx_id = buf->ub_frame.rx_id;
11652 pkt->pkt_cmd_fhdr.ro = 0;
11653 pkt->pkt_cmd_fhdr.rsvd = 0;
11654 pkt->pkt_comp = fp_unsol_intr;
11655 pkt->pkt_timeout = FP_ELS_TIMEOUT;
11656 pkt->pkt_ub_resp_token = (opaque_t)buf;
11657 }
11658
11659 /*
11660 * Immediate handling of unsolicited FLOGI and PLOGI requests. In the
11661 * early development days of public loop soc+ firmware, numerous problems
11662 * were encountered (the details are undocumented and history now) which
11663 * led to the birth of this function.
11664 *
11665 * If a pre-allocated unsolicited response packet is free, send out an
11666 * immediate response, otherwise submit the request to the port thread
11667 * to do the deferred processing.
11668 */
11669 static void
fp_i_handle_unsol_els(fc_local_port_t * port,fc_unsol_buf_t * buf)11670 fp_i_handle_unsol_els(fc_local_port_t *port, fc_unsol_buf_t *buf)
11671 {
11672 int sent;
11673 int f_port;
11674 int do_acc;
11675 fp_cmd_t *cmd;
11676 la_els_logi_t *payload;
11677 fc_remote_port_t *pd;
11678 char dww_name[17];
11679
11680 ASSERT(!MUTEX_HELD(&port->fp_mutex));
11681
11682 cmd = port->fp_els_resp_pkt;
11683
11684 mutex_enter(&port->fp_mutex);
11685 do_acc = (port->fp_statec_busy == 0) ? 1 : 0;
11686 mutex_exit(&port->fp_mutex);
11687
11688 switch (buf->ub_buffer[0]) {
11689 case LA_ELS_PLOGI: {
11690 int small;
11691
11692 payload = (la_els_logi_t *)buf->ub_buffer;
11693
11694 f_port = FP_IS_F_PORT(payload->
11695 common_service.cmn_features) ? 1 : 0;
11696
11697 small = fctl_wwn_cmp(&port->fp_service_params.nport_ww_name,
11698 &payload->nport_ww_name);
11699 pd = fctl_get_remote_port_by_pwwn(port,
11700 &payload->nport_ww_name);
11701 if (pd) {
11702 mutex_enter(&pd->pd_mutex);
11703 sent = (pd->pd_flags == PD_ELS_IN_PROGRESS) ? 1 : 0;
11704 /*
11705 * Most likely this means a cross login is in
11706 * progress or a device about to be yanked out.
11707 * Only accept the plogi if my wwn is smaller.
11708 */
11709 if (pd->pd_type == PORT_DEVICE_OLD) {
11710 sent = 1;
11711 }
11712 /*
11713 * Stop plogi request (if any)
11714 * attempt from local side to speedup
11715 * the discovery progress.
11716 * Mark the pd as PD_PLOGI_RECEPIENT.
11717 */
11718 if (f_port == 0 && small < 0) {
11719 pd->pd_recepient = PD_PLOGI_RECEPIENT;
11720 }
11721 fc_wwn_to_str(&pd->pd_port_name, dww_name);
11722
11723 mutex_exit(&pd->pd_mutex);
11724
11725 FP_TRACE(FP_NHEAD1(3, 0), "fp_i_handle_unsol_els: "
11726 "Unsol PLOGI received. PD still exists in the "
11727 "PWWN list. pd=%p PWWN=%s, sent=%x",
11728 pd, dww_name, sent);
11729
11730 if (f_port == 0 && small < 0) {
11731 FP_TRACE(FP_NHEAD1(3, 0),
11732 "fp_i_handle_unsol_els: Mark the pd"
11733 " as plogi recipient, pd=%p, PWWN=%s"
11734 ", sent=%x",
11735 pd, dww_name, sent);
11736 }
11737 } else {
11738 sent = 0;
11739 }
11740
11741 /*
11742 * To avoid Login collisions, accept only if my WWN
11743 * is smaller than the requester (A curious side note
11744 * would be that this rule may not satisfy the PLOGIs
11745 * initiated by the switch from not-so-well known
11746 * ports such as 0xFFFC41)
11747 */
11748 if ((f_port == 0 && small < 0) ||
11749 (((small > 0 && do_acc) ||
11750 FC_MUST_ACCEPT_D_ID(buf->ub_frame.s_id)) && sent == 0)) {
11751 if (fp_is_class_supported(port->fp_cos,
11752 buf->ub_class) == FC_FAILURE) {
11753 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) {
11754 cmd->cmd_pkt.pkt_cmdlen =
11755 sizeof (la_els_rjt_t);
11756 cmd->cmd_pkt.pkt_rsplen = 0;
11757 fp_els_rjt_init(port, cmd, buf,
11758 FC_ACTION_NON_RETRYABLE,
11759 FC_REASON_CLASS_NOT_SUPP, NULL);
11760 FP_TRACE(FP_NHEAD1(3, 0),
11761 "fp_i_handle_unsol_els: "
11762 "Unsupported class. "
11763 "Rejecting PLOGI");
11764
11765 } else {
11766 mutex_enter(&port->fp_mutex);
11767 port->fp_els_resp_pkt_busy = 0;
11768 mutex_exit(&port->fp_mutex);
11769 return;
11770 }
11771 } else {
11772 cmd->cmd_pkt.pkt_cmdlen =
11773 sizeof (la_els_logi_t);
11774 cmd->cmd_pkt.pkt_rsplen = 0;
11775
11776 /*
11777 * If fp_port_id is zero and topology is
11778 * Point-to-Point, get the local port id from
11779 * the d_id in the PLOGI request.
11780 * If the outgoing FLOGI hasn't been accepted,
11781 * the topology will be unknown here. But it's
11782 * still safe to save the d_id to fp_port_id,
11783 * just because it will be overwritten later
11784 * if the topology is not Point-to-Point.
11785 */
11786 mutex_enter(&port->fp_mutex);
11787 if ((port->fp_port_id.port_id == 0) &&
11788 (port->fp_topology == FC_TOP_PT_PT ||
11789 port->fp_topology == FC_TOP_UNKNOWN)) {
11790 port->fp_port_id.port_id =
11791 buf->ub_frame.d_id;
11792 }
11793 mutex_exit(&port->fp_mutex);
11794
11795 /*
11796 * Sometime later, we should validate
11797 * the service parameters instead of
11798 * just accepting it.
11799 */
11800 fp_login_acc_init(port, cmd, buf, NULL,
11801 KM_NOSLEEP);
11802 FP_TRACE(FP_NHEAD1(3, 0),
11803 "fp_i_handle_unsol_els: Accepting PLOGI,"
11804 " f_port=%d, small=%d, do_acc=%d,"
11805 " sent=%d.", f_port, small, do_acc,
11806 sent);
11807 }
11808 } else {
11809 if (FP_IS_CLASS_1_OR_2(buf->ub_class) ||
11810 port->fp_options & FP_SEND_RJT) {
11811 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t);
11812 cmd->cmd_pkt.pkt_rsplen = 0;
11813 fp_els_rjt_init(port, cmd, buf,
11814 FC_ACTION_NON_RETRYABLE,
11815 FC_REASON_LOGICAL_BSY, NULL);
11816 FP_TRACE(FP_NHEAD1(3, 0),
11817 "fp_i_handle_unsol_els: "
11818 "Rejecting PLOGI with Logical Busy."
11819 "Possible Login collision.");
11820 } else {
11821 mutex_enter(&port->fp_mutex);
11822 port->fp_els_resp_pkt_busy = 0;
11823 mutex_exit(&port->fp_mutex);
11824 return;
11825 }
11826 }
11827 break;
11828 }
11829
11830 case LA_ELS_FLOGI:
11831 if (fp_is_class_supported(port->fp_cos,
11832 buf->ub_class) == FC_FAILURE) {
11833 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) {
11834 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t);
11835 cmd->cmd_pkt.pkt_rsplen = 0;
11836 fp_els_rjt_init(port, cmd, buf,
11837 FC_ACTION_NON_RETRYABLE,
11838 FC_REASON_CLASS_NOT_SUPP, NULL);
11839 FP_TRACE(FP_NHEAD1(3, 0),
11840 "fp_i_handle_unsol_els: "
11841 "Unsupported Class. Rejecting FLOGI.");
11842 } else {
11843 mutex_enter(&port->fp_mutex);
11844 port->fp_els_resp_pkt_busy = 0;
11845 mutex_exit(&port->fp_mutex);
11846 return;
11847 }
11848 } else {
11849 mutex_enter(&port->fp_mutex);
11850 if (FC_PORT_STATE_MASK(port->fp_state) !=
11851 FC_STATE_ONLINE || (port->fp_port_id.port_id &&
11852 buf->ub_frame.s_id == port->fp_port_id.port_id)) {
11853 mutex_exit(&port->fp_mutex);
11854 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) {
11855 cmd->cmd_pkt.pkt_cmdlen =
11856 sizeof (la_els_rjt_t);
11857 cmd->cmd_pkt.pkt_rsplen = 0;
11858 fp_els_rjt_init(port, cmd, buf,
11859 FC_ACTION_NON_RETRYABLE,
11860 FC_REASON_INVALID_LINK_CTRL,
11861 NULL);
11862 FP_TRACE(FP_NHEAD1(3, 0),
11863 "fp_i_handle_unsol_els: "
11864 "Invalid Link Ctrl. "
11865 "Rejecting FLOGI.");
11866 } else {
11867 mutex_enter(&port->fp_mutex);
11868 port->fp_els_resp_pkt_busy = 0;
11869 mutex_exit(&port->fp_mutex);
11870 return;
11871 }
11872 } else {
11873 mutex_exit(&port->fp_mutex);
11874 cmd->cmd_pkt.pkt_cmdlen =
11875 sizeof (la_els_logi_t);
11876 cmd->cmd_pkt.pkt_rsplen = 0;
11877 /*
11878 * Let's not aggressively validate the N_Port's
11879 * service parameters until PLOGI. Suffice it
11880 * to give a hint that we are an N_Port and we
11881 * are game to some serious stuff here.
11882 */
11883 fp_login_acc_init(port, cmd, buf,
11884 NULL, KM_NOSLEEP);
11885 FP_TRACE(FP_NHEAD1(3, 0),
11886 "fp_i_handle_unsol_els: "
11887 "Accepting FLOGI.");
11888 }
11889 }
11890 break;
11891
11892 default:
11893 return;
11894 }
11895
11896 if ((fp_sendcmd(port, cmd, port->fp_fca_handle)) != FC_SUCCESS) {
11897 mutex_enter(&port->fp_mutex);
11898 port->fp_els_resp_pkt_busy = 0;
11899 mutex_exit(&port->fp_mutex);
11900 }
11901 }
11902
11903
11904 /*
11905 * Handle unsolicited PLOGI request
11906 */
11907 static void
fp_handle_unsol_plogi(fc_local_port_t * port,fc_unsol_buf_t * buf,job_request_t * job,int sleep)11908 fp_handle_unsol_plogi(fc_local_port_t *port, fc_unsol_buf_t *buf,
11909 job_request_t *job, int sleep)
11910 {
11911 int sent;
11912 int small;
11913 int f_port;
11914 int do_acc;
11915 fp_cmd_t *cmd;
11916 la_wwn_t *swwn;
11917 la_wwn_t *dwwn;
11918 la_els_logi_t *payload;
11919 fc_remote_port_t *pd;
11920 char dww_name[17];
11921
11922 payload = (la_els_logi_t *)buf->ub_buffer;
11923 f_port = FP_IS_F_PORT(payload->common_service.cmn_features) ? 1 : 0;
11924
11925 mutex_enter(&port->fp_mutex);
11926 do_acc = (port->fp_statec_busy == 0) ? 1 : 0;
11927 mutex_exit(&port->fp_mutex);
11928
11929 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: s_id=%x, d_id=%x,"
11930 "type=%x, f_ctl=%x"
11931 " seq_id=%x, ox_id=%x, rx_id=%x"
11932 " ro=%x", buf->ub_frame.s_id, buf->ub_frame.d_id,
11933 buf->ub_frame.type, buf->ub_frame.f_ctl, buf->ub_frame.seq_id,
11934 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro);
11935
11936 swwn = &port->fp_service_params.nport_ww_name;
11937 dwwn = &payload->nport_ww_name;
11938 small = fctl_wwn_cmp(swwn, dwwn);
11939 pd = fctl_get_remote_port_by_pwwn(port, dwwn);
11940 if (pd) {
11941 mutex_enter(&pd->pd_mutex);
11942 sent = (pd->pd_flags == PD_ELS_IN_PROGRESS) ? 1 : 0;
11943 /*
11944 * Most likely this means a cross login is in
11945 * progress or a device about to be yanked out.
11946 * Only accept the plogi if my wwn is smaller.
11947 */
11948
11949 if (pd->pd_type == PORT_DEVICE_OLD) {
11950 sent = 1;
11951 }
11952 /*
11953 * Stop plogi request (if any)
11954 * attempt from local side to speedup
11955 * the discovery progress.
11956 * Mark the pd as PD_PLOGI_RECEPIENT.
11957 */
11958 if (f_port == 0 && small < 0) {
11959 pd->pd_recepient = PD_PLOGI_RECEPIENT;
11960 }
11961 fc_wwn_to_str(&pd->pd_port_name, dww_name);
11962
11963 mutex_exit(&pd->pd_mutex);
11964
11965 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: Unsol PLOGI"
11966 " received. PD still exists in the PWWN list. pd=%p "
11967 "PWWN=%s, sent=%x", pd, dww_name, sent);
11968
11969 if (f_port == 0 && small < 0) {
11970 FP_TRACE(FP_NHEAD1(3, 0),
11971 "fp_handle_unsol_plogi: Mark the pd"
11972 " as plogi recipient, pd=%p, PWWN=%s"
11973 ", sent=%x",
11974 pd, dww_name, sent);
11975 }
11976 } else {
11977 sent = 0;
11978 }
11979
11980 /*
11981 * Avoid Login collisions by accepting only if my WWN is smaller.
11982 *
11983 * A side note: There is no need to start a PLOGI from this end in
11984 * this context if login isn't going to be accepted for the
11985 * above reason as either a LIP (in private loop), RSCN (in
11986 * fabric topology), or an FLOGI (in point to point - Huh ?
11987 * check FC-PH) would normally drive the PLOGI from this end.
11988 * At this point of time there is no need for an inbound PLOGI
11989 * to kick an outbound PLOGI when it is going to be rejected
11990 * for the reason of WWN being smaller. However it isn't hard
11991 * to do that either (when such a need arises, start a timer
11992 * for a duration that extends beyond a normal device discovery
11993 * time and check if an outbound PLOGI did go before that, if
11994 * none fire one)
11995 *
11996 * Unfortunately, as it turned out, during booting, it is possible
11997 * to miss another initiator in the same loop as port driver
11998 * instances are serially attached. While preserving the above
11999 * comments for belly laughs, please kick an outbound PLOGI in
12000 * a non-switch environment (which is a pt pt between N_Ports or
12001 * a private loop)
12002 *
12003 * While preserving the above comments for amusement, send an
12004 * ACC if the PLOGI is going to be rejected for WWN being smaller
12005 * when no discovery is in progress at this end. Turn around
12006 * and make the port device as the PLOGI initiator, so that
12007 * during subsequent link/loop initialization, this end drives
12008 * the PLOGI (In fact both ends do in this particular case, but
12009 * only one wins)
12010 *
12011 * Make sure the PLOGIs initiated by the switch from not-so-well-known
12012 * ports (such as 0xFFFC41) are accepted too.
12013 */
12014 if ((f_port == 0 && small < 0) || (((small > 0 && do_acc) ||
12015 FC_MUST_ACCEPT_D_ID(buf->ub_frame.s_id)) && sent == 0)) {
12016 if (fp_is_class_supported(port->fp_cos,
12017 buf->ub_class) == FC_FAILURE) {
12018 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) {
12019 cmd = fp_alloc_pkt(port,
12020 sizeof (la_els_logi_t), 0, sleep, pd);
12021 if (cmd == NULL) {
12022 return;
12023 }
12024 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t);
12025 cmd->cmd_pkt.pkt_rsplen = 0;
12026 fp_els_rjt_init(port, cmd, buf,
12027 FC_ACTION_NON_RETRYABLE,
12028 FC_REASON_CLASS_NOT_SUPP, job);
12029 FP_TRACE(FP_NHEAD1(3, 0),
12030 "fp_handle_unsol_plogi: "
12031 "Unsupported class. rejecting PLOGI");
12032 }
12033 } else {
12034 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t),
12035 0, sleep, pd);
12036 if (cmd == NULL) {
12037 return;
12038 }
12039 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_logi_t);
12040 cmd->cmd_pkt.pkt_rsplen = 0;
12041
12042 /*
12043 * Sometime later, we should validate the service
12044 * parameters instead of just accepting it.
12045 */
12046 fp_login_acc_init(port, cmd, buf, job, KM_SLEEP);
12047 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: "
12048 "Accepting PLOGI, f_port=%d, small=%d, "
12049 "do_acc=%d, sent=%d.", f_port, small, do_acc,
12050 sent);
12051
12052 /*
12053 * If fp_port_id is zero and topology is
12054 * Point-to-Point, get the local port id from
12055 * the d_id in the PLOGI request.
12056 * If the outgoing FLOGI hasn't been accepted,
12057 * the topology will be unknown here. But it's
12058 * still safe to save the d_id to fp_port_id,
12059 * just because it will be overwritten later
12060 * if the topology is not Point-to-Point.
12061 */
12062 mutex_enter(&port->fp_mutex);
12063 if ((port->fp_port_id.port_id == 0) &&
12064 (port->fp_topology == FC_TOP_PT_PT ||
12065 port->fp_topology == FC_TOP_UNKNOWN)) {
12066 port->fp_port_id.port_id =
12067 buf->ub_frame.d_id;
12068 }
12069 mutex_exit(&port->fp_mutex);
12070 }
12071 } else {
12072 if (FP_IS_CLASS_1_OR_2(buf->ub_class) ||
12073 port->fp_options & FP_SEND_RJT) {
12074 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t),
12075 0, sleep, pd);
12076 if (cmd == NULL) {
12077 return;
12078 }
12079 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t);
12080 cmd->cmd_pkt.pkt_rsplen = 0;
12081 /*
12082 * Send out Logical busy to indicate
12083 * the detection of PLOGI collision
12084 */
12085 fp_els_rjt_init(port, cmd, buf,
12086 FC_ACTION_NON_RETRYABLE,
12087 FC_REASON_LOGICAL_BSY, job);
12088
12089 fc_wwn_to_str(dwwn, dww_name);
12090 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: "
12091 "Rejecting Unsol PLOGI with Logical Busy."
12092 "possible PLOGI collision. PWWN=%s, sent=%x",
12093 dww_name, sent);
12094 } else {
12095 return;
12096 }
12097 }
12098
12099 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) {
12100 fp_free_pkt(cmd);
12101 }
12102 }
12103
12104
12105 /*
12106 * Handle mischievous turning over of our own FLOGI requests back to
12107 * us by the SOC+ microcode. In other words, look at the class of such
12108 * bone headed requests, if 1 or 2, bluntly P_RJT them, if 3 drop them
12109 * on the floor
12110 */
12111 static void
fp_handle_unsol_flogi(fc_local_port_t * port,fc_unsol_buf_t * buf,job_request_t * job,int sleep)12112 fp_handle_unsol_flogi(fc_local_port_t *port, fc_unsol_buf_t *buf,
12113 job_request_t *job, int sleep)
12114 {
12115 uint32_t state;
12116 uint32_t s_id;
12117 fp_cmd_t *cmd;
12118
12119 if (fp_is_class_supported(port->fp_cos, buf->ub_class) == FC_FAILURE) {
12120 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) {
12121 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t),
12122 0, sleep, NULL);
12123 if (cmd == NULL) {
12124 return;
12125 }
12126 fp_els_rjt_init(port, cmd, buf,
12127 FC_ACTION_NON_RETRYABLE,
12128 FC_REASON_CLASS_NOT_SUPP, job);
12129 } else {
12130 return;
12131 }
12132 } else {
12133
12134 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_flogi:"
12135 " s_id=%x, d_id=%x, type=%x, f_ctl=%x"
12136 " seq_id=%x, ox_id=%x, rx_id=%x, ro=%x",
12137 buf->ub_frame.s_id, buf->ub_frame.d_id,
12138 buf->ub_frame.type, buf->ub_frame.f_ctl,
12139 buf->ub_frame.seq_id, buf->ub_frame.ox_id,
12140 buf->ub_frame.rx_id, buf->ub_frame.ro);
12141
12142 mutex_enter(&port->fp_mutex);
12143 state = FC_PORT_STATE_MASK(port->fp_state);
12144 s_id = port->fp_port_id.port_id;
12145 mutex_exit(&port->fp_mutex);
12146
12147 if (state != FC_STATE_ONLINE ||
12148 (s_id && buf->ub_frame.s_id == s_id)) {
12149 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) {
12150 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t),
12151 0, sleep, NULL);
12152 if (cmd == NULL) {
12153 return;
12154 }
12155 fp_els_rjt_init(port, cmd, buf,
12156 FC_ACTION_NON_RETRYABLE,
12157 FC_REASON_INVALID_LINK_CTRL, job);
12158 FP_TRACE(FP_NHEAD1(3, 0),
12159 "fp_handle_unsol_flogi: "
12160 "Rejecting PLOGI. Invalid Link CTRL");
12161 } else {
12162 return;
12163 }
12164 } else {
12165 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t),
12166 0, sleep, NULL);
12167 if (cmd == NULL) {
12168 return;
12169 }
12170 /*
12171 * Let's not aggressively validate the N_Port's
12172 * service parameters until PLOGI. Suffice it
12173 * to give a hint that we are an N_Port and we
12174 * are game to some serious stuff here.
12175 */
12176 fp_login_acc_init(port, cmd, buf, job, KM_SLEEP);
12177 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_flogi: "
12178 "Accepting PLOGI");
12179 }
12180 }
12181
12182 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) {
12183 fp_free_pkt(cmd);
12184 }
12185 }
12186
12187
12188 /*
12189 * Perform PLOGI accept
12190 */
12191 static void
fp_login_acc_init(fc_local_port_t * port,fp_cmd_t * cmd,fc_unsol_buf_t * buf,job_request_t * job,int sleep)12192 fp_login_acc_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf,
12193 job_request_t *job, int sleep)
12194 {
12195 fc_packet_t *pkt;
12196 fc_portmap_t *listptr;
12197 la_els_logi_t payload;
12198
12199 ASSERT(buf != NULL);
12200
12201 /*
12202 * If we are sending ACC to PLOGI and we haven't already
12203 * create port and node device handles, let's create them
12204 * here.
12205 */
12206 if (buf->ub_buffer[0] == LA_ELS_PLOGI &&
12207 FC_IS_REAL_DEVICE(buf->ub_frame.s_id)) {
12208 int small;
12209 int do_acc;
12210 fc_remote_port_t *pd;
12211 la_els_logi_t *req;
12212
12213 req = (la_els_logi_t *)buf->ub_buffer;
12214 small = fctl_wwn_cmp(&port->fp_service_params.nport_ww_name,
12215 &req->nport_ww_name);
12216
12217 mutex_enter(&port->fp_mutex);
12218 do_acc = (port->fp_statec_busy == 0) ? 1 : 0;
12219 mutex_exit(&port->fp_mutex);
12220
12221 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_acc_init fp %x, pd %x",
12222 port->fp_port_id.port_id, buf->ub_frame.s_id);
12223 pd = fctl_create_remote_port(port, &req->node_ww_name,
12224 &req->nport_ww_name, buf->ub_frame.s_id,
12225 PD_PLOGI_RECEPIENT, sleep);
12226 if (pd == NULL) {
12227 FP_TRACE(FP_NHEAD1(3, 0), "login_acc_init: "
12228 "Couldn't create port device for d_id:0x%x",
12229 buf->ub_frame.s_id);
12230
12231 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
12232 "couldn't create port device d_id=%x",
12233 buf->ub_frame.s_id);
12234 } else {
12235 /*
12236 * usoc currently returns PLOGIs inline and
12237 * the maximum buffer size is 60 bytes or so.
12238 * So attempt not to look beyond what is in
12239 * the unsolicited buffer
12240 *
12241 * JNI also traverses this path sometimes
12242 */
12243 if (buf->ub_bufsize >= sizeof (la_els_logi_t)) {
12244 fp_register_login(NULL, pd, req, buf->ub_class);
12245 } else {
12246 mutex_enter(&pd->pd_mutex);
12247 if (pd->pd_login_count == 0) {
12248 pd->pd_login_count++;
12249 }
12250 pd->pd_state = PORT_DEVICE_LOGGED_IN;
12251 pd->pd_login_class = buf->ub_class;
12252 mutex_exit(&pd->pd_mutex);
12253 }
12254
12255 listptr = kmem_zalloc(sizeof (fc_portmap_t), sleep);
12256 if (listptr != NULL) {
12257 fctl_copy_portmap(listptr, pd);
12258 (void) fp_ulp_devc_cb(port, listptr,
12259 1, 1, sleep, 0);
12260 }
12261
12262 if (small > 0 && do_acc) {
12263 mutex_enter(&pd->pd_mutex);
12264 pd->pd_recepient = PD_PLOGI_INITIATOR;
12265 mutex_exit(&pd->pd_mutex);
12266 }
12267 }
12268 }
12269
12270 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class;
12271 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND;
12272 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED;
12273 cmd->cmd_retry_count = 1;
12274 cmd->cmd_ulp_pkt = NULL;
12275
12276 cmd->cmd_transport = port->fp_fca_tran->fca_els_send;
12277 cmd->cmd_job = job;
12278
12279 pkt = &cmd->cmd_pkt;
12280
12281 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS);
12282
12283 payload = port->fp_service_params;
12284 payload.ls_code.ls_code = LA_ELS_ACC;
12285
12286 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload,
12287 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR);
12288
12289 FP_TRACE(FP_NHEAD1(3, 0), "login_acc_init: ELS:0x%x d_id:0x%x "
12290 "bufsize:0x%x sizeof (la_els_logi):0x%x "
12291 "port's wwn:0x%01x%03x%04x%08x requestor's wwn:0x%01x%03x%04x%08x "
12292 "statec_busy:0x%x", buf->ub_buffer[0], buf->ub_frame.s_id,
12293 buf->ub_bufsize, sizeof (la_els_logi_t),
12294 port->fp_service_params.nport_ww_name.w.naa_id,
12295 port->fp_service_params.nport_ww_name.w.nport_id,
12296 port->fp_service_params.nport_ww_name.w.wwn_hi,
12297 port->fp_service_params.nport_ww_name.w.wwn_lo,
12298 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.naa_id,
12299 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.nport_id,
12300 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.wwn_hi,
12301 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.wwn_lo,
12302 port->fp_statec_busy);
12303 }
12304
12305
12306 #define RSCN_EVENT_NAME_LEN 256
12307
12308 /*
12309 * Handle RSCNs
12310 */
12311 static void
fp_handle_unsol_rscn(fc_local_port_t * port,fc_unsol_buf_t * buf,job_request_t * job,int sleep)12312 fp_handle_unsol_rscn(fc_local_port_t *port, fc_unsol_buf_t *buf,
12313 job_request_t *job, int sleep)
12314 {
12315 uint32_t mask;
12316 fp_cmd_t *cmd;
12317 uint32_t count;
12318 int listindex;
12319 int16_t len;
12320 fc_rscn_t *payload;
12321 fc_portmap_t *listptr;
12322 fctl_ns_req_t *ns_cmd;
12323 fc_affected_id_t *page;
12324 caddr_t nvname;
12325 nvlist_t *attr_list = NULL;
12326
12327 mutex_enter(&port->fp_mutex);
12328 if (!FC_IS_TOP_SWITCH(port->fp_topology)) {
12329 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) {
12330 --port->fp_rscn_count;
12331 }
12332 mutex_exit(&port->fp_mutex);
12333 return;
12334 }
12335 mutex_exit(&port->fp_mutex);
12336
12337 cmd = fp_alloc_pkt(port, FP_PORT_IDENTIFIER_LEN, 0, sleep, NULL);
12338 if (cmd != NULL) {
12339 fp_els_acc_init(port, cmd, buf, job);
12340 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) {
12341 fp_free_pkt(cmd);
12342 }
12343 }
12344
12345 payload = (fc_rscn_t *)buf->ub_buffer;
12346 ASSERT(payload->rscn_code == LA_ELS_RSCN);
12347 ASSERT(payload->rscn_len == FP_PORT_IDENTIFIER_LEN);
12348
12349 len = payload->rscn_payload_len - FP_PORT_IDENTIFIER_LEN;
12350
12351 if (len <= 0) {
12352 mutex_enter(&port->fp_mutex);
12353 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) {
12354 --port->fp_rscn_count;
12355 }
12356 mutex_exit(&port->fp_mutex);
12357
12358 return;
12359 }
12360
12361 ASSERT((len & 0x3) == 0); /* Must be power of 4 */
12362 count = (len >> 2) << 1; /* number of pages multiplied by 2 */
12363
12364 listptr = kmem_zalloc(sizeof (fc_portmap_t) * count, sleep);
12365 page = (fc_affected_id_t *)(buf->ub_buffer + sizeof (fc_rscn_t));
12366
12367 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0);
12368
12369 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gpn_id_t),
12370 sizeof (ns_resp_gpn_id_t), sizeof (ns_resp_gpn_id_t),
12371 0, sleep);
12372 if (ns_cmd == NULL) {
12373 kmem_free(listptr, sizeof (fc_portmap_t) * count);
12374
12375 mutex_enter(&port->fp_mutex);
12376 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) {
12377 --port->fp_rscn_count;
12378 }
12379 mutex_exit(&port->fp_mutex);
12380
12381 return;
12382 }
12383
12384 ns_cmd->ns_cmd_code = NS_GPN_ID;
12385
12386 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_rscn: s_id=%x, d_id=%x,"
12387 "type=%x, f_ctl=%x seq_id=%x, ox_id=%x, rx_id=%x"
12388 " ro=%x", buf->ub_frame.s_id, buf->ub_frame.d_id,
12389 buf->ub_frame.type, buf->ub_frame.f_ctl, buf->ub_frame.seq_id,
12390 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro);
12391
12392 /* Only proceed if we can allocate nvname and the nvlist */
12393 if ((nvname = kmem_zalloc(RSCN_EVENT_NAME_LEN, KM_NOSLEEP)) != NULL &&
12394 nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE,
12395 KM_NOSLEEP) == DDI_SUCCESS) {
12396 if (!(attr_list && nvlist_add_uint32(attr_list, "instance",
12397 port->fp_instance) == DDI_SUCCESS &&
12398 nvlist_add_byte_array(attr_list, "port-wwn",
12399 port->fp_service_params.nport_ww_name.raw_wwn,
12400 sizeof (la_wwn_t)) == DDI_SUCCESS)) {
12401 nvlist_free(attr_list);
12402 attr_list = NULL;
12403 }
12404 }
12405
12406 for (listindex = 0; len; len -= FP_PORT_IDENTIFIER_LEN, page++) {
12407 /* Add affected page to the event payload */
12408 if (attr_list != NULL) {
12409 (void) snprintf(nvname, RSCN_EVENT_NAME_LEN,
12410 "affected_page_%d", listindex);
12411 if (attr_list && nvlist_add_uint32(attr_list, nvname,
12412 ntohl(*(uint32_t *)page)) != DDI_SUCCESS) {
12413 /* We don't send a partial event, so dump it */
12414 nvlist_free(attr_list);
12415 attr_list = NULL;
12416 }
12417 }
12418 /*
12419 * Query the NS to get the Port WWN for this
12420 * affected D_ID.
12421 */
12422 mask = 0;
12423 switch (page->aff_format & FC_RSCN_ADDRESS_MASK) {
12424 case FC_RSCN_PORT_ADDRESS:
12425 fp_validate_rscn_page(port, page, job, ns_cmd,
12426 listptr, &listindex, sleep);
12427
12428 if (listindex == 0) {
12429 /*
12430 * We essentially did not process this RSCN. So,
12431 * ULPs are not going to be called and so we
12432 * decrement the rscn_count
12433 */
12434 mutex_enter(&port->fp_mutex);
12435 if (--port->fp_rscn_count ==
12436 FC_INVALID_RSCN_COUNT) {
12437 --port->fp_rscn_count;
12438 }
12439 mutex_exit(&port->fp_mutex);
12440 }
12441 break;
12442
12443 case FC_RSCN_AREA_ADDRESS:
12444 mask = 0xFFFF00;
12445 /* FALLTHROUGH */
12446
12447 case FC_RSCN_DOMAIN_ADDRESS:
12448 if (!mask) {
12449 mask = 0xFF0000;
12450 }
12451 fp_validate_area_domain(port, page->aff_d_id, mask,
12452 job, sleep);
12453 break;
12454
12455 case FC_RSCN_FABRIC_ADDRESS:
12456 /*
12457 * We need to discover all the devices on this
12458 * port.
12459 */
12460 fp_validate_area_domain(port, 0, 0, job, sleep);
12461 break;
12462
12463 default:
12464 break;
12465 }
12466 }
12467 if (attr_list != NULL) {
12468 (void) ddi_log_sysevent(port->fp_port_dip, DDI_VENDOR_SUNW,
12469 EC_SUNFC, ESC_SUNFC_PORT_RSCN, attr_list,
12470 NULL, DDI_SLEEP);
12471 nvlist_free(attr_list);
12472 } else {
12473 FP_TRACE(FP_NHEAD1(9, 0),
12474 "RSCN handled, but event not sent to userland");
12475 }
12476 if (nvname != NULL) {
12477 kmem_free(nvname, RSCN_EVENT_NAME_LEN);
12478 }
12479
12480 if (ns_cmd) {
12481 fctl_free_ns_cmd(ns_cmd);
12482 }
12483
12484 if (listindex) {
12485 #ifdef DEBUG
12486 page = (fc_affected_id_t *)(buf->ub_buffer +
12487 sizeof (fc_rscn_t));
12488
12489 if (listptr->map_did.port_id != page->aff_d_id) {
12490 FP_TRACE(FP_NHEAD1(9, 0),
12491 "PORT RSCN: processed=%x, reporting=%x",
12492 listptr->map_did.port_id, page->aff_d_id);
12493 }
12494 #endif
12495
12496 (void) fp_ulp_devc_cb(port, listptr, listindex, count,
12497 sleep, 0);
12498 } else {
12499 kmem_free(listptr, sizeof (fc_portmap_t) * count);
12500 }
12501 }
12502
12503
12504 /*
12505 * Fill out old map for ULPs with fp_mutex, fd_mutex and pd_mutex held
12506 */
12507 static void
fp_fillout_old_map_held(fc_portmap_t * map,fc_remote_port_t * pd,uchar_t flag)12508 fp_fillout_old_map_held(fc_portmap_t *map, fc_remote_port_t *pd, uchar_t flag)
12509 {
12510 int is_switch;
12511 int initiator;
12512 fc_local_port_t *port;
12513
12514 port = pd->pd_port;
12515
12516 /* This function has the following bunch of assumptions */
12517 ASSERT(port != NULL);
12518 ASSERT(MUTEX_HELD(&port->fp_mutex));
12519 ASSERT(MUTEX_HELD(&pd->pd_remote_nodep->fd_mutex));
12520 ASSERT(MUTEX_HELD(&pd->pd_mutex));
12521
12522 pd->pd_state = PORT_DEVICE_INVALID;
12523 pd->pd_type = PORT_DEVICE_OLD;
12524 initiator = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0;
12525 is_switch = FC_IS_TOP_SWITCH(port->fp_topology);
12526
12527 fctl_delist_did_table(port, pd);
12528 fctl_delist_pwwn_table(port, pd);
12529
12530 FP_TRACE(FP_NHEAD1(6, 0), "fp_fillout_old_map_held: port=%p, d_id=%x"
12531 " removed the PD=%p from DID and PWWN tables",
12532 port, pd->pd_port_id.port_id, pd);
12533
12534 if ((!flag) && port && initiator && is_switch) {
12535 (void) fctl_add_orphan_held(port, pd);
12536 }
12537 fctl_copy_portmap_held(map, pd);
12538 map->map_pd = pd;
12539 }
12540
12541 /*
12542 * Fill out old map for ULPs
12543 */
12544 static void
fp_fillout_old_map(fc_portmap_t * map,fc_remote_port_t * pd,uchar_t flag)12545 fp_fillout_old_map(fc_portmap_t *map, fc_remote_port_t *pd, uchar_t flag)
12546 {
12547 int is_switch;
12548 int initiator;
12549 fc_local_port_t *port;
12550
12551 mutex_enter(&pd->pd_mutex);
12552 port = pd->pd_port;
12553 mutex_exit(&pd->pd_mutex);
12554
12555 mutex_enter(&port->fp_mutex);
12556 mutex_enter(&pd->pd_mutex);
12557
12558 pd->pd_state = PORT_DEVICE_INVALID;
12559 pd->pd_type = PORT_DEVICE_OLD;
12560 initiator = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0;
12561 is_switch = FC_IS_TOP_SWITCH(port->fp_topology);
12562
12563 fctl_delist_did_table(port, pd);
12564 fctl_delist_pwwn_table(port, pd);
12565
12566 FP_TRACE(FP_NHEAD1(6, 0), "fp_fillout_old_map: port=%p, d_id=%x"
12567 " removed the PD=%p from DID and PWWN tables",
12568 port, pd->pd_port_id.port_id, pd);
12569
12570 mutex_exit(&pd->pd_mutex);
12571 mutex_exit(&port->fp_mutex);
12572
12573 ASSERT(port != NULL);
12574 if ((!flag) && port && initiator && is_switch) {
12575 (void) fctl_add_orphan(port, pd, KM_NOSLEEP);
12576 }
12577 fctl_copy_portmap(map, pd);
12578 map->map_pd = pd;
12579 }
12580
12581
12582 /*
12583 * Fillout Changed Map for ULPs
12584 */
12585 static void
fp_fillout_changed_map(fc_portmap_t * map,fc_remote_port_t * pd,uint32_t * new_did,la_wwn_t * new_pwwn)12586 fp_fillout_changed_map(fc_portmap_t *map, fc_remote_port_t *pd,
12587 uint32_t *new_did, la_wwn_t *new_pwwn)
12588 {
12589 ASSERT(MUTEX_HELD(&pd->pd_mutex));
12590
12591 pd->pd_type = PORT_DEVICE_CHANGED;
12592 if (new_did) {
12593 pd->pd_port_id.port_id = *new_did;
12594 }
12595 if (new_pwwn) {
12596 pd->pd_port_name = *new_pwwn;
12597 }
12598 mutex_exit(&pd->pd_mutex);
12599
12600 fctl_copy_portmap(map, pd);
12601
12602 mutex_enter(&pd->pd_mutex);
12603 pd->pd_type = PORT_DEVICE_NOCHANGE;
12604 }
12605
12606
12607 /*
12608 * Fillout New Name Server map
12609 */
12610 static void
fp_fillout_new_nsmap(fc_local_port_t * port,ddi_acc_handle_t * handle,fc_portmap_t * port_map,ns_resp_gan_t * gan_resp,uint32_t d_id)12611 fp_fillout_new_nsmap(fc_local_port_t *port, ddi_acc_handle_t *handle,
12612 fc_portmap_t *port_map, ns_resp_gan_t *gan_resp, uint32_t d_id)
12613 {
12614 ASSERT(!MUTEX_HELD(&port->fp_mutex));
12615
12616 if (handle) {
12617 FC_GET_RSP(port, *handle, (uint8_t *)&port_map->map_pwwn,
12618 (uint8_t *)&gan_resp->gan_pwwn, sizeof (gan_resp->gan_pwwn),
12619 DDI_DEV_AUTOINCR);
12620 FC_GET_RSP(port, *handle, (uint8_t *)&port_map->map_nwwn,
12621 (uint8_t *)&gan_resp->gan_nwwn, sizeof (gan_resp->gan_nwwn),
12622 DDI_DEV_AUTOINCR);
12623 FC_GET_RSP(port, *handle, (uint8_t *)port_map->map_fc4_types,
12624 (uint8_t *)gan_resp->gan_fc4types,
12625 sizeof (gan_resp->gan_fc4types), DDI_DEV_AUTOINCR);
12626 } else {
12627 bcopy(&gan_resp->gan_pwwn, &port_map->map_pwwn,
12628 sizeof (gan_resp->gan_pwwn));
12629 bcopy(&gan_resp->gan_nwwn, &port_map->map_nwwn,
12630 sizeof (gan_resp->gan_nwwn));
12631 bcopy(gan_resp->gan_fc4types, port_map->map_fc4_types,
12632 sizeof (gan_resp->gan_fc4types));
12633 }
12634 port_map->map_did.port_id = d_id;
12635 port_map->map_did.priv_lilp_posit = 0;
12636 port_map->map_hard_addr.hard_addr = 0;
12637 port_map->map_hard_addr.rsvd = 0;
12638 port_map->map_state = PORT_DEVICE_INVALID;
12639 port_map->map_type = PORT_DEVICE_NEW;
12640 port_map->map_flags = 0;
12641 port_map->map_pd = NULL;
12642
12643 (void) fctl_remove_if_orphan(port, &port_map->map_pwwn);
12644
12645 ASSERT(port != NULL);
12646 }
12647
12648
12649 /*
12650 * Perform LINIT ELS
12651 */
12652 static int
fp_remote_lip(fc_local_port_t * port,la_wwn_t * pwwn,int sleep,job_request_t * job)12653 fp_remote_lip(fc_local_port_t *port, la_wwn_t *pwwn, int sleep,
12654 job_request_t *job)
12655 {
12656 int rval;
12657 uint32_t d_id;
12658 uint32_t s_id;
12659 uint32_t lfa;
12660 uchar_t class;
12661 uint32_t ret;
12662 fp_cmd_t *cmd;
12663 fc_porttype_t ptype;
12664 fc_packet_t *pkt;
12665 fc_linit_req_t payload;
12666 fc_remote_port_t *pd;
12667
12668 rval = 0;
12669
12670 ASSERT(job != NULL);
12671 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0);
12672
12673 pd = fctl_get_remote_port_by_pwwn(port, pwwn);
12674 if (pd == NULL) {
12675 fctl_ns_req_t *ns_cmd;
12676
12677 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t),
12678 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t),
12679 0, sleep);
12680
12681 if (ns_cmd == NULL) {
12682 return (FC_NOMEM);
12683 }
12684 job->job_result = FC_SUCCESS;
12685 ns_cmd->ns_cmd_code = NS_GID_PN;
12686 ((ns_req_gid_pn_t *)(ns_cmd->ns_cmd_buf))->pwwn = *pwwn;
12687
12688 ret = fp_ns_query(port, ns_cmd, job, 1, sleep);
12689 if (ret != FC_SUCCESS || job->job_result != FC_SUCCESS) {
12690 fctl_free_ns_cmd(ns_cmd);
12691 return (FC_FAILURE);
12692 }
12693 bcopy(ns_cmd->ns_data_buf, (caddr_t)&d_id, sizeof (d_id));
12694 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf));
12695
12696 fctl_free_ns_cmd(ns_cmd);
12697 lfa = d_id & 0xFFFF00;
12698
12699 /*
12700 * Given this D_ID, get the port type to see if
12701 * we can do LINIT on the LFA
12702 */
12703 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gpt_id_t),
12704 sizeof (ns_resp_gpt_id_t), sizeof (ns_resp_gpt_id_t),
12705 0, sleep);
12706
12707 if (ns_cmd == NULL) {
12708 return (FC_NOMEM);
12709 }
12710
12711 job->job_result = FC_SUCCESS;
12712 ns_cmd->ns_cmd_code = NS_GPT_ID;
12713
12714 ((ns_req_gpt_id_t *)(ns_cmd->ns_cmd_buf))->pid.port_id = d_id;
12715 ((ns_req_gpt_id_t *)
12716 (ns_cmd->ns_cmd_buf))->pid.priv_lilp_posit = 0;
12717
12718 ret = fp_ns_query(port, ns_cmd, job, 1, sleep);
12719 if (ret != FC_SUCCESS || job->job_result != FC_SUCCESS) {
12720 fctl_free_ns_cmd(ns_cmd);
12721 return (FC_FAILURE);
12722 }
12723 bcopy(ns_cmd->ns_data_buf, (caddr_t)&ptype, sizeof (ptype));
12724
12725 fctl_free_ns_cmd(ns_cmd);
12726
12727 switch (ptype.port_type) {
12728 case FC_NS_PORT_NL:
12729 case FC_NS_PORT_F_NL:
12730 case FC_NS_PORT_FL:
12731 break;
12732
12733 default:
12734 return (FC_FAILURE);
12735 }
12736 } else {
12737 mutex_enter(&pd->pd_mutex);
12738 ptype = pd->pd_porttype;
12739
12740 switch (pd->pd_porttype.port_type) {
12741 case FC_NS_PORT_NL:
12742 case FC_NS_PORT_F_NL:
12743 case FC_NS_PORT_FL:
12744 lfa = pd->pd_port_id.port_id & 0xFFFF00;
12745 break;
12746
12747 default:
12748 mutex_exit(&pd->pd_mutex);
12749 return (FC_FAILURE);
12750 }
12751 mutex_exit(&pd->pd_mutex);
12752 }
12753
12754 mutex_enter(&port->fp_mutex);
12755 s_id = port->fp_port_id.port_id;
12756 class = port->fp_ns_login_class;
12757 mutex_exit(&port->fp_mutex);
12758
12759 cmd = fp_alloc_pkt(port, sizeof (fc_linit_req_t),
12760 sizeof (fc_linit_resp_t), sleep, pd);
12761 if (cmd == NULL) {
12762 return (FC_NOMEM);
12763 }
12764
12765 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class;
12766 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE;
12767 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED;
12768 cmd->cmd_retry_count = fp_retry_count;
12769 cmd->cmd_ulp_pkt = NULL;
12770
12771 pkt = &cmd->cmd_pkt;
12772 cmd->cmd_transport = port->fp_fca_tran->fca_els_send;
12773
12774 fp_els_init(cmd, s_id, lfa, fp_linit_intr, job);
12775
12776 /*
12777 * How does LIP work by the way ?
12778 * If the L_Port receives three consecutive identical ordered
12779 * sets whose first two characters (fully decoded) are equal to
12780 * the values shown in Table 3 of FC-AL-2 then the L_Port shall
12781 * recognize a Loop Initialization Primitive sequence. The
12782 * character 3 determines the type of lip:
12783 * LIP(F7) Normal LIP
12784 * LIP(F8) Loop Failure LIP
12785 *
12786 * The possible combination for the 3rd and 4th bytes are:
12787 * F7, F7 Normal Lip - No valid AL_PA
12788 * F8, F8 Loop Failure - No valid AL_PA
12789 * F7, AL_PS Normal Lip - Valid source AL_PA
12790 * F8, AL_PS Loop Failure - Valid source AL_PA
12791 * AL_PD AL_PS Loop reset of AL_PD originated by AL_PS
12792 * And Normal Lip for all other loop members
12793 * 0xFF AL_PS Vendor specific reset of all loop members
12794 *
12795 * Now, it may not always be that we, at the source, may have an
12796 * AL_PS (AL_PA of source) for 4th character slot, so we decide
12797 * to do (Normal Lip, No Valid AL_PA), that means, in the LINIT
12798 * payload we are going to set:
12799 * lip_b3 = 0xF7; Normal LIP
12800 * lip_b4 = 0xF7; No valid source AL_PA
12801 */
12802 payload.ls_code.ls_code = LA_ELS_LINIT;
12803 payload.ls_code.mbz = 0;
12804 payload.rsvd = 0;
12805 payload.func = 0; /* Let Fabric determine the best way */
12806 payload.lip_b3 = 0xF7; /* Normal LIP */
12807 payload.lip_b4 = 0xF7; /* No valid source AL_PA */
12808
12809 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload,
12810 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR);
12811
12812 job->job_counter = 1;
12813
12814 ret = fp_sendcmd(port, cmd, port->fp_fca_handle);
12815 if (ret == FC_SUCCESS) {
12816 fp_jobwait(job);
12817 rval = job->job_result;
12818 } else {
12819 rval = FC_FAILURE;
12820 fp_free_pkt(cmd);
12821 }
12822
12823 return (rval);
12824 }
12825
12826
12827 /*
12828 * Fill out the device handles with GAN response
12829 */
12830 static void
fp_stuff_device_with_gan(ddi_acc_handle_t * handle,fc_remote_port_t * pd,ns_resp_gan_t * gan_resp)12831 fp_stuff_device_with_gan(ddi_acc_handle_t *handle, fc_remote_port_t *pd,
12832 ns_resp_gan_t *gan_resp)
12833 {
12834 fc_remote_node_t *node;
12835 fc_porttype_t type;
12836 fc_local_port_t *port;
12837
12838 ASSERT(pd != NULL);
12839 ASSERT(handle != NULL);
12840
12841 port = pd->pd_port;
12842
12843 FP_TRACE(FP_NHEAD1(1, 0), "GAN PD stuffing; pd=%p,"
12844 " port_id=%x, sym_len=%d fc4-type=%x",
12845 pd, gan_resp->gan_type_id.rsvd,
12846 gan_resp->gan_spnlen, gan_resp->gan_fc4types[0]);
12847
12848 mutex_enter(&pd->pd_mutex);
12849
12850 FC_GET_RSP(port, *handle, (uint8_t *)&type,
12851 (uint8_t *)&gan_resp->gan_type_id, sizeof (type), DDI_DEV_AUTOINCR);
12852
12853 pd->pd_porttype.port_type = type.port_type;
12854 pd->pd_porttype.rsvd = 0;
12855
12856 pd->pd_spn_len = gan_resp->gan_spnlen;
12857 if (pd->pd_spn_len) {
12858 FC_GET_RSP(port, *handle, (uint8_t *)pd->pd_spn,
12859 (uint8_t *)gan_resp->gan_spname, pd->pd_spn_len,
12860 DDI_DEV_AUTOINCR);
12861 }
12862
12863 FC_GET_RSP(port, *handle, (uint8_t *)pd->pd_ip_addr,
12864 (uint8_t *)gan_resp->gan_ip, sizeof (pd->pd_ip_addr),
12865 DDI_DEV_AUTOINCR);
12866 FC_GET_RSP(port, *handle, (uint8_t *)&pd->pd_cos,
12867 (uint8_t *)&gan_resp->gan_cos, sizeof (pd->pd_cos),
12868 DDI_DEV_AUTOINCR);
12869 FC_GET_RSP(port, *handle, (uint8_t *)pd->pd_fc4types,
12870 (uint8_t *)gan_resp->gan_fc4types, sizeof (pd->pd_fc4types),
12871 DDI_DEV_AUTOINCR);
12872
12873 node = pd->pd_remote_nodep;
12874 mutex_exit(&pd->pd_mutex);
12875
12876 mutex_enter(&node->fd_mutex);
12877
12878 FC_GET_RSP(port, *handle, (uint8_t *)node->fd_ipa,
12879 (uint8_t *)gan_resp->gan_ipa, sizeof (node->fd_ipa),
12880 DDI_DEV_AUTOINCR);
12881
12882 node->fd_snn_len = gan_resp->gan_snnlen;
12883 if (node->fd_snn_len) {
12884 FC_GET_RSP(port, *handle, (uint8_t *)node->fd_snn,
12885 (uint8_t *)gan_resp->gan_snname, node->fd_snn_len,
12886 DDI_DEV_AUTOINCR);
12887 }
12888
12889 mutex_exit(&node->fd_mutex);
12890 }
12891
12892
12893 /*
12894 * Handles all NS Queries (also means that this function
12895 * doesn't handle NS object registration)
12896 */
12897 static int
fp_ns_query(fc_local_port_t * port,fctl_ns_req_t * ns_cmd,job_request_t * job,int polled,int sleep)12898 fp_ns_query(fc_local_port_t *port, fctl_ns_req_t *ns_cmd, job_request_t *job,
12899 int polled, int sleep)
12900 {
12901 int rval;
12902 fp_cmd_t *cmd;
12903
12904 ASSERT(!MUTEX_HELD(&port->fp_mutex));
12905
12906 if (ns_cmd->ns_cmd_code == NS_GA_NXT) {
12907 FP_TRACE(FP_NHEAD1(1, 0), "fp_ns_query GA_NXT fp %x pd %x",
12908 port->fp_port_id.port_id, ns_cmd->ns_gan_sid);
12909 }
12910
12911 if (ns_cmd->ns_cmd_size == 0) {
12912 return (FC_FAILURE);
12913 }
12914
12915 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) +
12916 ns_cmd->ns_cmd_size, sizeof (fc_ct_header_t) +
12917 ns_cmd->ns_resp_size, sleep, NULL);
12918 if (cmd == NULL) {
12919 return (FC_NOMEM);
12920 }
12921
12922 fp_ct_init(port, cmd, ns_cmd, ns_cmd->ns_cmd_code, ns_cmd->ns_cmd_buf,
12923 ns_cmd->ns_cmd_size, ns_cmd->ns_resp_size, job);
12924
12925 if (polled) {
12926 job->job_counter = 1;
12927 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0);
12928 }
12929 rval = fp_sendcmd(port, cmd, port->fp_fca_handle);
12930 if (rval != FC_SUCCESS) {
12931 job->job_result = rval;
12932 fp_iodone(cmd);
12933 if (polled == 0) {
12934 /*
12935 * Return FC_SUCCESS to indicate that
12936 * fp_iodone is performed already.
12937 */
12938 rval = FC_SUCCESS;
12939 }
12940 }
12941
12942 if (polled) {
12943 fp_jobwait(job);
12944 rval = job->job_result;
12945 }
12946
12947 return (rval);
12948 }
12949
12950
12951 /*
12952 * Initialize Common Transport request
12953 */
12954 static void
fp_ct_init(fc_local_port_t * port,fp_cmd_t * cmd,fctl_ns_req_t * ns_cmd,uint16_t cmd_code,caddr_t cmd_buf,uint16_t cmd_len,uint16_t resp_len,job_request_t * job)12955 fp_ct_init(fc_local_port_t *port, fp_cmd_t *cmd, fctl_ns_req_t *ns_cmd,
12956 uint16_t cmd_code, caddr_t cmd_buf, uint16_t cmd_len,
12957 uint16_t resp_len, job_request_t *job)
12958 {
12959 uint32_t s_id;
12960 uchar_t class;
12961 fc_packet_t *pkt;
12962 fc_ct_header_t ct;
12963
12964 ASSERT(!MUTEX_HELD(&port->fp_mutex));
12965
12966 mutex_enter(&port->fp_mutex);
12967 s_id = port->fp_port_id.port_id;
12968 class = port->fp_ns_login_class;
12969 mutex_exit(&port->fp_mutex);
12970
12971 cmd->cmd_job = job;
12972 cmd->cmd_private = ns_cmd;
12973 pkt = &cmd->cmd_pkt;
12974
12975 ct.ct_rev = CT_REV;
12976 ct.ct_inid = 0;
12977 ct.ct_fcstype = FCSTYPE_DIRECTORY;
12978 ct.ct_fcssubtype = FCSSUB_DS_NAME_SERVER;
12979 ct.ct_options = 0;
12980 ct.ct_reserved1 = 0;
12981 ct.ct_cmdrsp = cmd_code;
12982 ct.ct_aiusize = resp_len >> 2;
12983 ct.ct_reserved2 = 0;
12984 ct.ct_reason = 0;
12985 ct.ct_expln = 0;
12986 ct.ct_vendor = 0;
12987
12988 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&ct,
12989 (uint8_t *)pkt->pkt_cmd, sizeof (ct), DDI_DEV_AUTOINCR);
12990
12991 pkt->pkt_cmd_fhdr.r_ctl = R_CTL_UNSOL_CONTROL;
12992 pkt->pkt_cmd_fhdr.d_id = 0xFFFFFC;
12993 pkt->pkt_cmd_fhdr.s_id = s_id;
12994 pkt->pkt_cmd_fhdr.type = FC_TYPE_FC_SERVICES;
12995 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_SEQ_INITIATIVE |
12996 F_CTL_FIRST_SEQ | F_CTL_END_SEQ;
12997 pkt->pkt_cmd_fhdr.seq_id = 0;
12998 pkt->pkt_cmd_fhdr.df_ctl = 0;
12999 pkt->pkt_cmd_fhdr.seq_cnt = 0;
13000 pkt->pkt_cmd_fhdr.ox_id = 0xffff;
13001 pkt->pkt_cmd_fhdr.rx_id = 0xffff;
13002 pkt->pkt_cmd_fhdr.ro = 0;
13003 pkt->pkt_cmd_fhdr.rsvd = 0;
13004
13005 pkt->pkt_comp = fp_ns_intr;
13006 pkt->pkt_ulp_private = (opaque_t)cmd;
13007 pkt->pkt_timeout = FP_NS_TIMEOUT;
13008
13009 if (cmd_buf) {
13010 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)cmd_buf,
13011 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)),
13012 cmd_len, DDI_DEV_AUTOINCR);
13013 }
13014
13015 cmd->cmd_transport = port->fp_fca_tran->fca_transport;
13016
13017 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class;
13018 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE;
13019 cmd->cmd_flags = FP_CMD_PLOGI_DONT_CARE;
13020 cmd->cmd_retry_count = fp_retry_count;
13021 cmd->cmd_ulp_pkt = NULL;
13022 }
13023
13024
13025 /*
13026 * Name Server request interrupt routine
13027 */
13028 static void
fp_ns_intr(fc_packet_t * pkt)13029 fp_ns_intr(fc_packet_t *pkt)
13030 {
13031 fp_cmd_t *cmd;
13032 fc_local_port_t *port;
13033 fc_ct_header_t resp_hdr;
13034 fc_ct_header_t cmd_hdr;
13035 fctl_ns_req_t *ns_cmd;
13036
13037 cmd = pkt->pkt_ulp_private;
13038 port = cmd->cmd_port;
13039
13040 mutex_enter(&port->fp_mutex);
13041 port->fp_out_fpcmds--;
13042 mutex_exit(&port->fp_mutex);
13043
13044 FC_GET_RSP(port, pkt->pkt_cmd_acc, (uint8_t *)&cmd_hdr,
13045 (uint8_t *)pkt->pkt_cmd, sizeof (cmd_hdr), DDI_DEV_AUTOINCR);
13046 ns_cmd = (fctl_ns_req_t *)
13047 (((fp_cmd_t *)(pkt->pkt_ulp_private))->cmd_private);
13048 if (!FP_IS_PKT_ERROR(pkt)) {
13049 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&resp_hdr,
13050 (uint8_t *)pkt->pkt_resp, sizeof (resp_hdr),
13051 DDI_DEV_AUTOINCR);
13052
13053 /*
13054 * On x86 architectures, make sure the resp_hdr is big endian.
13055 * This macro is a NOP on sparc architectures mainly because
13056 * we don't want to end up wasting time since the end result
13057 * is going to be the same.
13058 */
13059 MAKE_BE_32(&resp_hdr);
13060
13061 if (ns_cmd) {
13062 /*
13063 * Always copy out the response CT_HDR
13064 */
13065 bcopy(&resp_hdr, &ns_cmd->ns_resp_hdr,
13066 sizeof (resp_hdr));
13067 }
13068
13069 if (resp_hdr.ct_cmdrsp == FS_RJT_IU) {
13070 pkt->pkt_state = FC_PKT_FS_RJT;
13071 pkt->pkt_reason = resp_hdr.ct_reason;
13072 pkt->pkt_expln = resp_hdr.ct_expln;
13073 }
13074 }
13075
13076 if (FP_IS_PKT_ERROR(pkt)) {
13077 if (ns_cmd) {
13078 if (ns_cmd->ns_flags & FCTL_NS_VALIDATE_PD) {
13079 ASSERT(ns_cmd->ns_pd != NULL);
13080
13081 /* Mark it OLD if not already done */
13082 mutex_enter(&ns_cmd->ns_pd->pd_mutex);
13083 ns_cmd->ns_pd->pd_type = PORT_DEVICE_OLD;
13084 mutex_exit(&ns_cmd->ns_pd->pd_mutex);
13085 }
13086
13087 if (ns_cmd->ns_flags & FCTL_NS_ASYNC_REQUEST) {
13088 fctl_free_ns_cmd(ns_cmd);
13089 ((fp_cmd_t *)
13090 (pkt->pkt_ulp_private))->cmd_private = NULL;
13091 }
13092
13093 }
13094
13095 FP_TRACE(FP_NHEAD2(1, 0), "%x NS failure pkt state=%x "
13096 "reason=%x, expln=%x, NSCMD=%04X, NSRSP=%04X",
13097 port->fp_port_id.port_id, pkt->pkt_state,
13098 pkt->pkt_reason, pkt->pkt_expln,
13099 cmd_hdr.ct_cmdrsp, resp_hdr.ct_cmdrsp);
13100
13101 (void) fp_common_intr(pkt, 1);
13102
13103 return;
13104 }
13105
13106 if (resp_hdr.ct_cmdrsp != FS_ACC_IU) {
13107 uint32_t d_id;
13108 fc_local_port_t *port;
13109 fp_cmd_t *cmd;
13110
13111 d_id = pkt->pkt_cmd_fhdr.d_id;
13112 cmd = pkt->pkt_ulp_private;
13113 port = cmd->cmd_port;
13114 FP_TRACE(FP_NHEAD2(9, 0),
13115 "Bogus NS response received for D_ID=%x", d_id);
13116 }
13117
13118 if (cmd_hdr.ct_cmdrsp == NS_GA_NXT) {
13119 fp_gan_handler(pkt, ns_cmd);
13120 return;
13121 }
13122
13123 if (cmd_hdr.ct_cmdrsp >= NS_GPN_ID &&
13124 cmd_hdr.ct_cmdrsp <= NS_GID_PT) {
13125 if (ns_cmd) {
13126 if ((ns_cmd->ns_flags & FCTL_NS_NO_DATA_BUF) == 0) {
13127 fp_ns_query_handler(pkt, ns_cmd);
13128 return;
13129 }
13130 }
13131 }
13132
13133 fp_iodone(pkt->pkt_ulp_private);
13134 }
13135
13136
13137 /*
13138 * Process NS_GAN response
13139 */
13140 static void
fp_gan_handler(fc_packet_t * pkt,fctl_ns_req_t * ns_cmd)13141 fp_gan_handler(fc_packet_t *pkt, fctl_ns_req_t *ns_cmd)
13142 {
13143 int my_did;
13144 fc_portid_t d_id;
13145 fp_cmd_t *cmd;
13146 fc_local_port_t *port;
13147 fc_remote_port_t *pd;
13148 ns_req_gan_t gan_req;
13149 ns_resp_gan_t *gan_resp;
13150
13151 ASSERT(ns_cmd != NULL);
13152
13153 cmd = pkt->pkt_ulp_private;
13154 port = cmd->cmd_port;
13155
13156 gan_resp = (ns_resp_gan_t *)(pkt->pkt_resp + sizeof (fc_ct_header_t));
13157
13158 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&d_id,
13159 (uint8_t *)&gan_resp->gan_type_id, sizeof (d_id), DDI_DEV_AUTOINCR);
13160
13161 *(uint32_t *)&d_id = BE_32(*(uint32_t *)&d_id);
13162
13163 /*
13164 * In this case the priv_lilp_posit field in reality
13165 * is actually represents the relative position on a private loop.
13166 * So zero it while dealing with Port Identifiers.
13167 */
13168 d_id.priv_lilp_posit = 0;
13169 pd = fctl_get_remote_port_by_did(port, d_id.port_id);
13170 if (ns_cmd->ns_gan_sid == d_id.port_id) {
13171 /*
13172 * We've come a full circle; time to get out.
13173 */
13174 fp_iodone(cmd);
13175 return;
13176 }
13177
13178 if (ns_cmd->ns_gan_sid == FCTL_GAN_START_ID) {
13179 ns_cmd->ns_gan_sid = d_id.port_id;
13180 }
13181
13182 mutex_enter(&port->fp_mutex);
13183 my_did = (d_id.port_id == port->fp_port_id.port_id) ? 1 : 0;
13184 mutex_exit(&port->fp_mutex);
13185
13186 FP_TRACE(FP_NHEAD1(1, 0), "GAN response; port=%p, fp %x pd %x", port,
13187 port->fp_port_id.port_id, d_id.port_id);
13188 if (my_did == 0) {
13189 la_wwn_t pwwn;
13190 la_wwn_t nwwn;
13191
13192 FP_TRACE(FP_NHEAD1(1, 0), "GAN response details; "
13193 "port=%p, d_id=%x, type_id=%x, "
13194 "pwwn=%x %x %x %x %x %x %x %x, "
13195 "nwwn=%x %x %x %x %x %x %x %x",
13196 port, d_id.port_id, gan_resp->gan_type_id,
13197
13198 gan_resp->gan_pwwn.raw_wwn[0],
13199 gan_resp->gan_pwwn.raw_wwn[1],
13200 gan_resp->gan_pwwn.raw_wwn[2],
13201 gan_resp->gan_pwwn.raw_wwn[3],
13202 gan_resp->gan_pwwn.raw_wwn[4],
13203 gan_resp->gan_pwwn.raw_wwn[5],
13204 gan_resp->gan_pwwn.raw_wwn[6],
13205 gan_resp->gan_pwwn.raw_wwn[7],
13206
13207 gan_resp->gan_nwwn.raw_wwn[0],
13208 gan_resp->gan_nwwn.raw_wwn[1],
13209 gan_resp->gan_nwwn.raw_wwn[2],
13210 gan_resp->gan_nwwn.raw_wwn[3],
13211 gan_resp->gan_nwwn.raw_wwn[4],
13212 gan_resp->gan_nwwn.raw_wwn[5],
13213 gan_resp->gan_nwwn.raw_wwn[6],
13214 gan_resp->gan_nwwn.raw_wwn[7]);
13215
13216 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&nwwn,
13217 (uint8_t *)&gan_resp->gan_nwwn, sizeof (nwwn),
13218 DDI_DEV_AUTOINCR);
13219
13220 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&pwwn,
13221 (uint8_t *)&gan_resp->gan_pwwn, sizeof (pwwn),
13222 DDI_DEV_AUTOINCR);
13223
13224 if (ns_cmd->ns_flags & FCTL_NS_CREATE_DEVICE && pd == NULL) {
13225 FP_TRACE(FP_NHEAD1(1, 0), "fp %x gan_hander create"
13226 "pd %x", port->fp_port_id.port_id, d_id.port_id);
13227 pd = fctl_create_remote_port(port, &nwwn, &pwwn,
13228 d_id.port_id, PD_PLOGI_INITIATOR, KM_NOSLEEP);
13229 }
13230 if (pd != NULL) {
13231 fp_stuff_device_with_gan(&pkt->pkt_resp_acc,
13232 pd, gan_resp);
13233 }
13234
13235 if (ns_cmd->ns_flags & FCTL_NS_GET_DEV_COUNT) {
13236 *((int *)ns_cmd->ns_data_buf) += 1;
13237 }
13238
13239 if (ns_cmd->ns_flags & FCTL_NS_FILL_NS_MAP) {
13240 ASSERT((ns_cmd->ns_flags & FCTL_NS_NO_DATA_BUF) == 0);
13241
13242 if (ns_cmd->ns_flags & FCTL_NS_BUF_IS_USERLAND) {
13243 fc_port_dev_t *userbuf;
13244
13245 userbuf = ((fc_port_dev_t *)
13246 ns_cmd->ns_data_buf) +
13247 ns_cmd->ns_gan_index++;
13248
13249 userbuf->dev_did = d_id;
13250
13251 FC_GET_RSP(port, pkt->pkt_resp_acc,
13252 (uint8_t *)userbuf->dev_type,
13253 (uint8_t *)gan_resp->gan_fc4types,
13254 sizeof (userbuf->dev_type),
13255 DDI_DEV_AUTOINCR);
13256
13257 userbuf->dev_nwwn = nwwn;
13258 userbuf->dev_pwwn = pwwn;
13259
13260 if (pd != NULL) {
13261 mutex_enter(&pd->pd_mutex);
13262 userbuf->dev_state = pd->pd_state;
13263 userbuf->dev_hard_addr =
13264 pd->pd_hard_addr;
13265 mutex_exit(&pd->pd_mutex);
13266 } else {
13267 userbuf->dev_state =
13268 PORT_DEVICE_INVALID;
13269 }
13270 } else if (ns_cmd->ns_flags &
13271 FCTL_NS_BUF_IS_FC_PORTMAP) {
13272 fc_portmap_t *map;
13273
13274 map = ((fc_portmap_t *)
13275 ns_cmd->ns_data_buf) +
13276 ns_cmd->ns_gan_index++;
13277
13278 /*
13279 * First fill it like any new map
13280 * and update the port device info
13281 * below.
13282 */
13283 fp_fillout_new_nsmap(port, &pkt->pkt_resp_acc,
13284 map, gan_resp, d_id.port_id);
13285 if (pd != NULL) {
13286 fctl_copy_portmap(map, pd);
13287 } else {
13288 map->map_state = PORT_DEVICE_INVALID;
13289 map->map_type = PORT_DEVICE_NOCHANGE;
13290 }
13291 } else {
13292 caddr_t dst_ptr;
13293
13294 dst_ptr = ns_cmd->ns_data_buf +
13295 (NS_GAN_RESP_LEN) * ns_cmd->ns_gan_index++;
13296
13297 FC_GET_RSP(port, pkt->pkt_resp_acc,
13298 (uint8_t *)dst_ptr, (uint8_t *)gan_resp,
13299 NS_GAN_RESP_LEN, DDI_DEV_AUTOINCR);
13300 }
13301 } else {
13302 ns_cmd->ns_gan_index++;
13303 }
13304 if (ns_cmd->ns_gan_index >= ns_cmd->ns_gan_max) {
13305 fp_iodone(cmd);
13306 return;
13307 }
13308 }
13309
13310 gan_req.pid = d_id;
13311
13312 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&gan_req,
13313 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)),
13314 sizeof (gan_req), DDI_DEV_AUTOINCR);
13315
13316 if (cmd->cmd_transport(port->fp_fca_handle, pkt) != FC_SUCCESS) {
13317 pkt->pkt_state = FC_PKT_TRAN_ERROR;
13318 fp_iodone(cmd);
13319 } else {
13320 mutex_enter(&port->fp_mutex);
13321 port->fp_out_fpcmds++;
13322 mutex_exit(&port->fp_mutex);
13323 }
13324 }
13325
13326
13327 /*
13328 * Handle NS Query interrupt
13329 */
13330 static void
fp_ns_query_handler(fc_packet_t * pkt,fctl_ns_req_t * ns_cmd)13331 fp_ns_query_handler(fc_packet_t *pkt, fctl_ns_req_t *ns_cmd)
13332 {
13333 fp_cmd_t *cmd;
13334 fc_local_port_t *port;
13335 caddr_t src_ptr;
13336 uint32_t xfer_len;
13337
13338 cmd = pkt->pkt_ulp_private;
13339 port = cmd->cmd_port;
13340
13341 xfer_len = ns_cmd->ns_resp_size;
13342
13343 FP_TRACE(FP_NHEAD1(1, 0), "NS Query response, cmd_code=%x, xfer_len=%x",
13344 ns_cmd->ns_cmd_code, xfer_len);
13345
13346 if (ns_cmd->ns_cmd_code == NS_GPN_ID) {
13347 src_ptr = (caddr_t)pkt->pkt_resp + sizeof (fc_ct_header_t);
13348
13349 FP_TRACE(FP_NHEAD1(6, 0), "GPN_ID results; %x %x %x %x %x",
13350 src_ptr[0], src_ptr[1], src_ptr[2], src_ptr[3], src_ptr[4]);
13351 }
13352
13353 if (xfer_len <= ns_cmd->ns_data_len) {
13354 src_ptr = (caddr_t)pkt->pkt_resp + sizeof (fc_ct_header_t);
13355 FC_GET_RSP(port, pkt->pkt_resp_acc,
13356 (uint8_t *)ns_cmd->ns_data_buf,
13357 (uint8_t *)src_ptr, xfer_len, DDI_DEV_AUTOINCR);
13358 }
13359
13360 if (ns_cmd->ns_flags & FCTL_NS_VALIDATE_PD) {
13361 ASSERT(ns_cmd->ns_pd != NULL);
13362
13363 mutex_enter(&ns_cmd->ns_pd->pd_mutex);
13364 if (ns_cmd->ns_pd->pd_type == PORT_DEVICE_OLD) {
13365 ns_cmd->ns_pd->pd_type = PORT_DEVICE_NOCHANGE;
13366 }
13367 mutex_exit(&ns_cmd->ns_pd->pd_mutex);
13368 }
13369
13370 if (ns_cmd->ns_flags & FCTL_NS_ASYNC_REQUEST) {
13371 fctl_free_ns_cmd(ns_cmd);
13372 ((fp_cmd_t *)(pkt->pkt_ulp_private))->cmd_private = NULL;
13373 }
13374 fp_iodone(cmd);
13375 }
13376
13377
13378 /*
13379 * Handle unsolicited ADISC ELS request
13380 */
13381 static void
fp_handle_unsol_adisc(fc_local_port_t * port,fc_unsol_buf_t * buf,fc_remote_port_t * pd,job_request_t * job)13382 fp_handle_unsol_adisc(fc_local_port_t *port, fc_unsol_buf_t *buf,
13383 fc_remote_port_t *pd, job_request_t *job)
13384 {
13385 int rval;
13386 fp_cmd_t *cmd;
13387
13388 FP_TRACE(FP_NHEAD1(5, 0), "ADISC; port=%p, D_ID=%x state=%x, pd=%p",
13389 port, pd->pd_port_id.port_id, pd->pd_state, pd);
13390 mutex_enter(&pd->pd_mutex);
13391 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) {
13392 mutex_exit(&pd->pd_mutex);
13393 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) {
13394 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t),
13395 0, KM_SLEEP, pd);
13396 if (cmd != NULL) {
13397 fp_els_rjt_init(port, cmd, buf,
13398 FC_ACTION_NON_RETRYABLE,
13399 FC_REASON_INVALID_LINK_CTRL, job);
13400
13401 if (fp_sendcmd(port, cmd,
13402 port->fp_fca_handle) != FC_SUCCESS) {
13403 fp_free_pkt(cmd);
13404 }
13405 }
13406 }
13407 } else {
13408 mutex_exit(&pd->pd_mutex);
13409 /*
13410 * Yes, yes, we don't have a hard address. But we
13411 * we should still respond. Huh ? Visit 21.19.2
13412 * of FC-PH-2 which essentially says that if an
13413 * NL_Port doesn't have a hard address, or if a port
13414 * does not have FC-AL capability, it shall report
13415 * zeroes in this field.
13416 */
13417 cmd = fp_alloc_pkt(port, sizeof (la_els_adisc_t),
13418 0, KM_SLEEP, pd);
13419 if (cmd == NULL) {
13420 return;
13421 }
13422 fp_adisc_acc_init(port, cmd, buf, job);
13423 rval = fp_sendcmd(port, cmd, port->fp_fca_handle);
13424 if (rval != FC_SUCCESS) {
13425 fp_free_pkt(cmd);
13426 }
13427 }
13428 }
13429
13430
13431 /*
13432 * Initialize ADISC response.
13433 */
13434 static void
fp_adisc_acc_init(fc_local_port_t * port,fp_cmd_t * cmd,fc_unsol_buf_t * buf,job_request_t * job)13435 fp_adisc_acc_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf,
13436 job_request_t *job)
13437 {
13438 fc_packet_t *pkt;
13439 la_els_adisc_t payload;
13440
13441 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class;
13442 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND;
13443 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED;
13444 cmd->cmd_retry_count = 1;
13445 cmd->cmd_ulp_pkt = NULL;
13446
13447 cmd->cmd_transport = port->fp_fca_tran->fca_els_send;
13448 cmd->cmd_job = job;
13449
13450 pkt = &cmd->cmd_pkt;
13451
13452 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS);
13453
13454 payload.ls_code.ls_code = LA_ELS_ACC;
13455 payload.ls_code.mbz = 0;
13456
13457 mutex_enter(&port->fp_mutex);
13458 payload.nport_id = port->fp_port_id;
13459 payload.hard_addr = port->fp_hard_addr;
13460 mutex_exit(&port->fp_mutex);
13461
13462 payload.port_wwn = port->fp_service_params.nport_ww_name;
13463 payload.node_wwn = port->fp_service_params.node_ww_name;
13464
13465 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload,
13466 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR);
13467 }
13468
13469
13470 /*
13471 * Hold and Install the requested ULP drivers
13472 */
13473 static void
fp_load_ulp_modules(dev_info_t * dip,fc_local_port_t * port)13474 fp_load_ulp_modules(dev_info_t *dip, fc_local_port_t *port)
13475 {
13476 int len;
13477 int count;
13478 int data_len;
13479 major_t ulp_major;
13480 caddr_t ulp_name;
13481 caddr_t data_ptr;
13482 caddr_t data_buf;
13483
13484 ASSERT(!MUTEX_HELD(&port->fp_mutex));
13485
13486 data_buf = NULL;
13487 if (ddi_getlongprop(DDI_DEV_T_ANY, dip,
13488 DDI_PROP_DONTPASS, "load-ulp-list",
13489 (caddr_t)&data_buf, &data_len) != DDI_PROP_SUCCESS) {
13490 return;
13491 }
13492
13493 len = strlen(data_buf);
13494 port->fp_ulp_nload = fctl_atoi(data_buf, 10);
13495
13496 data_ptr = data_buf + len + 1;
13497 for (count = 0; count < port->fp_ulp_nload; count++) {
13498 len = strlen(data_ptr) + 1;
13499 ulp_name = kmem_zalloc(len, KM_SLEEP);
13500 bcopy(data_ptr, ulp_name, len);
13501
13502 ulp_major = ddi_name_to_major(ulp_name);
13503
13504 if (ulp_major != (major_t)-1) {
13505 if (modload("drv", ulp_name) < 0) {
13506 fp_printf(port, CE_NOTE, FP_LOG_ONLY,
13507 0, NULL, "failed to load %s",
13508 ulp_name);
13509 }
13510 } else {
13511 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
13512 "%s isn't a valid driver", ulp_name);
13513 }
13514
13515 kmem_free(ulp_name, len);
13516 data_ptr += len; /* Skip to next field */
13517 }
13518
13519 /*
13520 * Free the memory allocated by DDI
13521 */
13522 if (data_buf != NULL) {
13523 kmem_free(data_buf, data_len);
13524 }
13525 }
13526
13527
13528 /*
13529 * Perform LOGO operation
13530 */
13531 static int
fp_logout(fc_local_port_t * port,fc_remote_port_t * pd,job_request_t * job)13532 fp_logout(fc_local_port_t *port, fc_remote_port_t *pd, job_request_t *job)
13533 {
13534 int rval;
13535 fp_cmd_t *cmd;
13536
13537 ASSERT(!MUTEX_HELD(&port->fp_mutex));
13538 ASSERT(!MUTEX_HELD(&pd->pd_mutex));
13539
13540 cmd = fp_alloc_pkt(port, sizeof (la_els_logo_t),
13541 FP_PORT_IDENTIFIER_LEN, KM_SLEEP, pd);
13542
13543 mutex_enter(&port->fp_mutex);
13544 mutex_enter(&pd->pd_mutex);
13545
13546 ASSERT(pd->pd_state == PORT_DEVICE_LOGGED_IN);
13547 ASSERT(pd->pd_login_count == 1);
13548
13549 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class;
13550 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE;
13551 cmd->cmd_flags = 0;
13552 cmd->cmd_retry_count = 1;
13553 cmd->cmd_ulp_pkt = NULL;
13554
13555 fp_logo_init(pd, cmd, job);
13556
13557 mutex_exit(&pd->pd_mutex);
13558 mutex_exit(&port->fp_mutex);
13559
13560 rval = fp_sendcmd(port, cmd, port->fp_fca_handle);
13561 if (rval != FC_SUCCESS) {
13562 fp_iodone(cmd);
13563 }
13564
13565 return (rval);
13566 }
13567
13568
13569 /*
13570 * Perform Port attach callbacks to registered ULPs
13571 */
13572 static void
fp_attach_ulps(fc_local_port_t * port,fc_attach_cmd_t cmd)13573 fp_attach_ulps(fc_local_port_t *port, fc_attach_cmd_t cmd)
13574 {
13575 fp_soft_attach_t *att;
13576
13577 att = kmem_zalloc(sizeof (*att), KM_SLEEP);
13578 att->att_cmd = cmd;
13579 att->att_port = port;
13580
13581 /*
13582 * We need to remember whether or not fctl_busy_port
13583 * succeeded so we know whether or not to call
13584 * fctl_idle_port when the task is complete.
13585 */
13586
13587 if (fctl_busy_port(port) == 0) {
13588 att->att_need_pm_idle = B_TRUE;
13589 } else {
13590 att->att_need_pm_idle = B_FALSE;
13591 }
13592
13593 (void) taskq_dispatch(port->fp_taskq, fp_ulp_port_attach,
13594 att, KM_SLEEP);
13595 }
13596
13597
13598 /*
13599 * Forward state change notifications on to interested ULPs.
13600 * Spawns a call to fctl_ulp_statec_cb() in a taskq thread to do all the
13601 * real work.
13602 */
13603 static int
fp_ulp_notify(fc_local_port_t * port,uint32_t statec,int sleep)13604 fp_ulp_notify(fc_local_port_t *port, uint32_t statec, int sleep)
13605 {
13606 fc_port_clist_t *clist;
13607
13608 clist = kmem_zalloc(sizeof (*clist), sleep);
13609 if (clist == NULL) {
13610 return (FC_NOMEM);
13611 }
13612
13613 clist->clist_state = statec;
13614
13615 mutex_enter(&port->fp_mutex);
13616 clist->clist_flags = port->fp_topology;
13617 mutex_exit(&port->fp_mutex);
13618
13619 clist->clist_port = (opaque_t)port;
13620 clist->clist_len = 0;
13621 clist->clist_size = 0;
13622 clist->clist_map = NULL;
13623
13624 (void) taskq_dispatch(port->fp_taskq, fctl_ulp_statec_cb,
13625 clist, KM_SLEEP);
13626
13627 return (FC_SUCCESS);
13628 }
13629
13630
13631 /*
13632 * Get name server map
13633 */
13634 static int
fp_ns_getmap(fc_local_port_t * port,job_request_t * job,fc_portmap_t ** map,uint32_t * len,uint32_t sid)13635 fp_ns_getmap(fc_local_port_t *port, job_request_t *job, fc_portmap_t **map,
13636 uint32_t *len, uint32_t sid)
13637 {
13638 int ret;
13639 fctl_ns_req_t *ns_cmd;
13640
13641 /*
13642 * Don't let the allocator do anything for response;
13643 * we have have buffer ready to fillout.
13644 */
13645 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t),
13646 sizeof (ns_resp_gan_t), 0, (FCTL_NS_FILL_NS_MAP |
13647 FCTL_NS_BUF_IS_FC_PORTMAP), KM_SLEEP);
13648
13649 ns_cmd->ns_data_len = sizeof (**map) * (*len);
13650 ns_cmd->ns_data_buf = (caddr_t)*map;
13651
13652 ASSERT(ns_cmd != NULL);
13653
13654 ns_cmd->ns_gan_index = 0;
13655 ns_cmd->ns_gan_sid = sid;
13656 ns_cmd->ns_cmd_code = NS_GA_NXT;
13657 ns_cmd->ns_gan_max = *len;
13658
13659 ret = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP);
13660
13661 if (ns_cmd->ns_gan_index != *len) {
13662 *len = ns_cmd->ns_gan_index;
13663 }
13664 ns_cmd->ns_data_len = 0;
13665 ns_cmd->ns_data_buf = NULL;
13666 fctl_free_ns_cmd(ns_cmd);
13667
13668 return (ret);
13669 }
13670
13671
13672 /*
13673 * Create a remote port in Fabric topology by using NS services
13674 */
13675 static fc_remote_port_t *
fp_create_remote_port_by_ns(fc_local_port_t * port,uint32_t d_id,int sleep)13676 fp_create_remote_port_by_ns(fc_local_port_t *port, uint32_t d_id, int sleep)
13677 {
13678 int rval;
13679 job_request_t *job;
13680 fctl_ns_req_t *ns_cmd;
13681 fc_remote_port_t *pd;
13682
13683 ASSERT(!MUTEX_HELD(&port->fp_mutex));
13684
13685 FP_TRACE(FP_NHEAD1(1, 0), "PD creation begin; port=%p, d_id=%x",
13686 port, d_id);
13687
13688 #ifdef DEBUG
13689 mutex_enter(&port->fp_mutex);
13690 ASSERT(FC_IS_TOP_SWITCH(port->fp_topology));
13691 mutex_exit(&port->fp_mutex);
13692 #endif
13693
13694 job = fctl_alloc_job(JOB_NS_CMD, 0, NULL, (opaque_t)port, sleep);
13695 if (job == NULL) {
13696 return (NULL);
13697 }
13698
13699 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t),
13700 sizeof (ns_resp_gan_t), 0, (FCTL_NS_CREATE_DEVICE |
13701 FCTL_NS_NO_DATA_BUF), sleep);
13702 if (ns_cmd == NULL) {
13703 return (NULL);
13704 }
13705
13706 job->job_result = FC_SUCCESS;
13707 ns_cmd->ns_gan_max = 1;
13708 ns_cmd->ns_cmd_code = NS_GA_NXT;
13709 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID;
13710 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.port_id = d_id - 1;
13711 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.priv_lilp_posit = 0;
13712
13713 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0);
13714 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP);
13715 fctl_free_ns_cmd(ns_cmd);
13716
13717 if (rval != FC_SUCCESS || job->job_result != FC_SUCCESS) {
13718 fctl_dealloc_job(job);
13719 return (NULL);
13720 }
13721 fctl_dealloc_job(job);
13722
13723 pd = fctl_get_remote_port_by_did(port, d_id);
13724
13725 FP_TRACE(FP_NHEAD1(1, 0), "PD creation end; port=%p, d_id=%x, pd=%p",
13726 port, d_id, pd);
13727
13728 return (pd);
13729 }
13730
13731
13732 /*
13733 * Check for the permissions on an ioctl command. If it is required to have an
13734 * EXCLUSIVE open performed, return a FAILURE to just shut the door on it. If
13735 * the ioctl command isn't in one of the list built, shut the door on that too.
13736 *
13737 * Certain ioctls perform hardware accesses in FCA drivers, and it needs
13738 * to be made sure that users open the port for an exclusive access while
13739 * performing those operations.
13740 *
13741 * This can prevent a casual user from inflicting damage on the port by
13742 * sending these ioctls from multiple processes/threads (there is no good
13743 * reason why one would need to do that) without actually realizing how
13744 * expensive such commands could turn out to be.
13745 *
13746 * It is also important to note that, even with an exclusive access,
13747 * multiple threads can share the same file descriptor and fire down
13748 * commands in parallel. To prevent that the driver needs to make sure
13749 * that such commands aren't in progress already. This is taken care of
13750 * in the FP_EXCL_BUSY bit of fp_flag.
13751 */
13752 static int
fp_check_perms(uchar_t open_flag,uint16_t ioctl_cmd)13753 fp_check_perms(uchar_t open_flag, uint16_t ioctl_cmd)
13754 {
13755 int ret = FC_FAILURE;
13756 int count;
13757
13758 for (count = 0;
13759 count < sizeof (fp_perm_list) / sizeof (fp_perm_list[0]);
13760 count++) {
13761 if (fp_perm_list[count].fp_ioctl_cmd == ioctl_cmd) {
13762 if (fp_perm_list[count].fp_open_flag & open_flag) {
13763 ret = FC_SUCCESS;
13764 }
13765 break;
13766 }
13767 }
13768
13769 return (ret);
13770 }
13771
13772
13773 /*
13774 * Bind Port driver's unsolicited, state change callbacks
13775 */
13776 static int
fp_bind_callbacks(fc_local_port_t * port)13777 fp_bind_callbacks(fc_local_port_t *port)
13778 {
13779 fc_fca_bind_info_t bind_info = {0};
13780 fc_fca_port_info_t *port_info;
13781 int rval = DDI_SUCCESS;
13782 uint16_t class;
13783 int node_namelen, port_namelen;
13784 char *nname = NULL, *pname = NULL;
13785
13786 ASSERT(!MUTEX_HELD(&port->fp_mutex));
13787
13788 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, port->fp_port_dip,
13789 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
13790 "node-name", &nname) != DDI_PROP_SUCCESS) {
13791 FP_TRACE(FP_NHEAD1(1, 0),
13792 "fp_bind_callback fail to get node-name");
13793 }
13794 if (nname) {
13795 fc_str_to_wwn(nname, &(bind_info.port_nwwn));
13796 }
13797
13798 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, port->fp_port_dip,
13799 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
13800 "port-name", &pname) != DDI_PROP_SUCCESS) {
13801 FP_TRACE(FP_NHEAD1(1, 0),
13802 "fp_bind_callback fail to get port-name");
13803 }
13804 if (pname) {
13805 fc_str_to_wwn(pname, &(bind_info.port_pwwn));
13806 }
13807
13808 if (port->fp_npiv_type == FC_NPIV_PORT) {
13809 bind_info.port_npiv = 1;
13810 }
13811
13812 /*
13813 * fca_bind_port returns the FCA driver's handle for the local
13814 * port instance. If the port number isn't supported it returns NULL.
13815 * It also sets up callback in the FCA for various
13816 * things like state change, ELS etc..
13817 */
13818 bind_info.port_statec_cb = fp_statec_cb;
13819 bind_info.port_unsol_cb = fp_unsol_cb;
13820 bind_info.port_num = port->fp_port_num;
13821 bind_info.port_handle = (opaque_t)port;
13822
13823 port_info = kmem_zalloc(sizeof (*port_info), KM_SLEEP);
13824
13825 /*
13826 * Hold the port driver mutex as the callbacks are bound until the
13827 * service parameters are properly filled in (in order to be able to
13828 * properly respond to unsolicited ELS requests)
13829 */
13830 mutex_enter(&port->fp_mutex);
13831
13832 port->fp_fca_handle = port->fp_fca_tran->fca_bind_port(
13833 port->fp_fca_dip, port_info, &bind_info);
13834
13835 if (port->fp_fca_handle == NULL) {
13836 rval = DDI_FAILURE;
13837 goto exit;
13838 }
13839
13840 /*
13841 * Only fcoei will set this bit
13842 */
13843 if (port_info->pi_port_state & FC_STATE_FCA_IS_NODMA) {
13844 port->fp_soft_state |= FP_SOFT_FCA_IS_NODMA;
13845 port_info->pi_port_state &= ~(FC_STATE_FCA_IS_NODMA);
13846 }
13847
13848 port->fp_bind_state = port->fp_state = port_info->pi_port_state;
13849 port->fp_service_params = port_info->pi_login_params;
13850 port->fp_hard_addr = port_info->pi_hard_addr;
13851
13852 /* Copy from the FCA structure to the FP structure */
13853 port->fp_hba_port_attrs = port_info->pi_attrs;
13854
13855 if (port_info->pi_rnid_params.status == FC_SUCCESS) {
13856 port->fp_rnid_init = 1;
13857 bcopy(&port_info->pi_rnid_params.params,
13858 &port->fp_rnid_params,
13859 sizeof (port->fp_rnid_params));
13860 } else {
13861 port->fp_rnid_init = 0;
13862 }
13863
13864 node_namelen = strlen((char *)&port_info->pi_attrs.sym_node_name);
13865 if (node_namelen) {
13866 bcopy(&port_info->pi_attrs.sym_node_name,
13867 &port->fp_sym_node_name,
13868 node_namelen);
13869 port->fp_sym_node_namelen = node_namelen;
13870 }
13871 port_namelen = strlen((char *)&port_info->pi_attrs.sym_port_name);
13872 if (port_namelen) {
13873 bcopy(&port_info->pi_attrs.sym_port_name,
13874 &port->fp_sym_port_name,
13875 port_namelen);
13876 port->fp_sym_port_namelen = port_namelen;
13877 }
13878
13879 /* zero out the normally unused fields right away */
13880 port->fp_service_params.ls_code.mbz = 0;
13881 port->fp_service_params.ls_code.ls_code = 0;
13882 bzero(&port->fp_service_params.reserved,
13883 sizeof (port->fp_service_params.reserved));
13884
13885 class = port_info->pi_login_params.class_1.class_opt;
13886 port->fp_cos |= (class & 0x8000) ? FC_NS_CLASS1 : 0;
13887
13888 class = port_info->pi_login_params.class_2.class_opt;
13889 port->fp_cos |= (class & 0x8000) ? FC_NS_CLASS2 : 0;
13890
13891 class = port_info->pi_login_params.class_3.class_opt;
13892 port->fp_cos |= (class & 0x8000) ? FC_NS_CLASS3 : 0;
13893
13894 exit:
13895 if (nname) {
13896 ddi_prop_free(nname);
13897 }
13898 if (pname) {
13899 ddi_prop_free(pname);
13900 }
13901 mutex_exit(&port->fp_mutex);
13902 kmem_free(port_info, sizeof (*port_info));
13903
13904 return (rval);
13905 }
13906
13907
13908 /*
13909 * Retrieve FCA capabilities
13910 */
13911 static void
fp_retrieve_caps(fc_local_port_t * port)13912 fp_retrieve_caps(fc_local_port_t *port)
13913 {
13914 int rval;
13915 int ub_count;
13916 fc_fcp_dma_t fcp_dma;
13917 fc_reset_action_t action;
13918 fc_dma_behavior_t dma_behavior;
13919
13920 ASSERT(!MUTEX_HELD(&port->fp_mutex));
13921
13922 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle,
13923 FC_CAP_UNSOL_BUF, &ub_count);
13924
13925 switch (rval) {
13926 case FC_CAP_FOUND:
13927 case FC_CAP_SETTABLE:
13928 switch (ub_count) {
13929 case 0:
13930 break;
13931
13932 case -1:
13933 ub_count = fp_unsol_buf_count;
13934 break;
13935
13936 default:
13937 /* 1/4th of total buffers is my share */
13938 ub_count =
13939 (ub_count / port->fp_fca_tran->fca_numports) >> 2;
13940 break;
13941 }
13942 break;
13943
13944 default:
13945 ub_count = 0;
13946 break;
13947 }
13948
13949 mutex_enter(&port->fp_mutex);
13950 port->fp_ub_count = ub_count;
13951 mutex_exit(&port->fp_mutex);
13952
13953 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle,
13954 FC_CAP_POST_RESET_BEHAVIOR, &action);
13955
13956 switch (rval) {
13957 case FC_CAP_FOUND:
13958 case FC_CAP_SETTABLE:
13959 switch (action) {
13960 case FC_RESET_RETURN_NONE:
13961 case FC_RESET_RETURN_ALL:
13962 case FC_RESET_RETURN_OUTSTANDING:
13963 break;
13964
13965 default:
13966 action = FC_RESET_RETURN_NONE;
13967 break;
13968 }
13969 break;
13970
13971 default:
13972 action = FC_RESET_RETURN_NONE;
13973 break;
13974 }
13975 mutex_enter(&port->fp_mutex);
13976 port->fp_reset_action = action;
13977 mutex_exit(&port->fp_mutex);
13978
13979 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle,
13980 FC_CAP_NOSTREAM_ON_UNALIGN_BUF, &dma_behavior);
13981
13982 switch (rval) {
13983 case FC_CAP_FOUND:
13984 switch (dma_behavior) {
13985 case FC_ALLOW_STREAMING:
13986 /* FALLTHROUGH */
13987 case FC_NO_STREAMING:
13988 break;
13989
13990 default:
13991 /*
13992 * If capability was found and the value
13993 * was incorrect assume the worst
13994 */
13995 dma_behavior = FC_NO_STREAMING;
13996 break;
13997 }
13998 break;
13999
14000 default:
14001 /*
14002 * If capability was not defined - allow streaming; existing
14003 * FCAs should not be affected.
14004 */
14005 dma_behavior = FC_ALLOW_STREAMING;
14006 break;
14007 }
14008 mutex_enter(&port->fp_mutex);
14009 port->fp_dma_behavior = dma_behavior;
14010 mutex_exit(&port->fp_mutex);
14011
14012 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle,
14013 FC_CAP_FCP_DMA, &fcp_dma);
14014
14015 if (rval != FC_CAP_FOUND || (fcp_dma != FC_NO_DVMA_SPACE &&
14016 fcp_dma != FC_DVMA_SPACE)) {
14017 fcp_dma = FC_DVMA_SPACE;
14018 }
14019
14020 mutex_enter(&port->fp_mutex);
14021 port->fp_fcp_dma = fcp_dma;
14022 mutex_exit(&port->fp_mutex);
14023 }
14024
14025
14026 /*
14027 * Handle Domain, Area changes in the Fabric.
14028 */
14029 static void
fp_validate_area_domain(fc_local_port_t * port,uint32_t id,uint32_t mask,job_request_t * job,int sleep)14030 fp_validate_area_domain(fc_local_port_t *port, uint32_t id, uint32_t mask,
14031 job_request_t *job, int sleep)
14032 {
14033 #ifdef DEBUG
14034 uint32_t dcnt;
14035 #endif
14036 int rval;
14037 int send;
14038 int index;
14039 int listindex;
14040 int login;
14041 int job_flags;
14042 char ww_name[17];
14043 uint32_t d_id;
14044 uint32_t count;
14045 fctl_ns_req_t *ns_cmd;
14046 fc_portmap_t *list;
14047 fc_orphan_t *orp;
14048 fc_orphan_t *norp;
14049 fc_orphan_t *prev;
14050 fc_remote_port_t *pd;
14051 fc_remote_port_t *npd;
14052 struct pwwn_hash *head;
14053
14054 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t),
14055 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t),
14056 0, sleep);
14057 if (ns_cmd == NULL) {
14058 mutex_enter(&port->fp_mutex);
14059 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) {
14060 --port->fp_rscn_count;
14061 }
14062 mutex_exit(&port->fp_mutex);
14063
14064 return;
14065 }
14066 ns_cmd->ns_cmd_code = NS_GID_PN;
14067
14068 /*
14069 * We need to get a new count of devices from the
14070 * name server, which will also create any new devices
14071 * as needed.
14072 */
14073
14074 (void) fp_ns_get_devcount(port, job, 1, sleep);
14075
14076 FP_TRACE(FP_NHEAD1(3, 0),
14077 "fp_validate_area_domain: get_devcount found %d devices",
14078 port->fp_total_devices);
14079
14080 mutex_enter(&port->fp_mutex);
14081
14082 for (count = index = 0; index < pwwn_table_size; index++) {
14083 head = &port->fp_pwwn_table[index];
14084 pd = head->pwwn_head;
14085 while (pd != NULL) {
14086 mutex_enter(&pd->pd_mutex);
14087 if (pd->pd_flags != PD_ELS_IN_PROGRESS) {
14088 if ((pd->pd_port_id.port_id & mask) == id &&
14089 pd->pd_recepient == PD_PLOGI_INITIATOR) {
14090 count++;
14091 pd->pd_type = PORT_DEVICE_OLD;
14092 pd->pd_flags = PD_ELS_MARK;
14093 }
14094 }
14095 mutex_exit(&pd->pd_mutex);
14096 pd = pd->pd_wwn_hnext;
14097 }
14098 }
14099
14100 #ifdef DEBUG
14101 dcnt = count;
14102 #endif /* DEBUG */
14103
14104 /*
14105 * Since port->fp_orphan_count is declared an 'int' it is
14106 * theoretically possible that the count could go negative.
14107 *
14108 * This would be bad and if that happens we really do want
14109 * to know.
14110 */
14111
14112 ASSERT(port->fp_orphan_count >= 0);
14113
14114 count += port->fp_orphan_count;
14115
14116 /*
14117 * We add the port->fp_total_devices value to the count
14118 * in the case where our port is newly attached. This is
14119 * because we haven't done any discovery and we don't have
14120 * any orphans in the port's orphan list. If we do not do
14121 * this addition to count then we won't alloc enough kmem
14122 * to do discovery with.
14123 */
14124
14125 if (count == 0) {
14126 count += port->fp_total_devices;
14127 FP_TRACE(FP_NHEAD1(3, 0), "fp_validate_area_domain: "
14128 "0x%x orphans found, using 0x%x",
14129 port->fp_orphan_count, count);
14130 }
14131
14132 mutex_exit(&port->fp_mutex);
14133
14134 /*
14135 * Allocate the change list
14136 */
14137
14138 list = kmem_zalloc(sizeof (fc_portmap_t) * count, sleep);
14139 if (list == NULL) {
14140 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
14141 " Not enough memory to service RSCNs"
14142 " for %d ports, continuing...", count);
14143
14144 fctl_free_ns_cmd(ns_cmd);
14145
14146 mutex_enter(&port->fp_mutex);
14147 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) {
14148 --port->fp_rscn_count;
14149 }
14150 mutex_exit(&port->fp_mutex);
14151
14152 return;
14153 }
14154
14155 /*
14156 * Attempt to validate or invalidate the devices that were
14157 * already in the pwwn hash table.
14158 */
14159
14160 mutex_enter(&port->fp_mutex);
14161 for (listindex = 0, index = 0; index < pwwn_table_size; index++) {
14162 head = &port->fp_pwwn_table[index];
14163 npd = head->pwwn_head;
14164
14165 while ((pd = npd) != NULL) {
14166 npd = pd->pd_wwn_hnext;
14167
14168 mutex_enter(&pd->pd_mutex);
14169 if ((pd->pd_port_id.port_id & mask) == id &&
14170 pd->pd_flags == PD_ELS_MARK) {
14171 la_wwn_t *pwwn;
14172
14173 job->job_result = FC_SUCCESS;
14174
14175 ((ns_req_gid_pn_t *)
14176 (ns_cmd->ns_cmd_buf))->pwwn =
14177 pd->pd_port_name;
14178
14179 pwwn = &pd->pd_port_name;
14180 d_id = pd->pd_port_id.port_id;
14181
14182 mutex_exit(&pd->pd_mutex);
14183 mutex_exit(&port->fp_mutex);
14184
14185 rval = fp_ns_query(port, ns_cmd, job, 1,
14186 sleep);
14187 if (rval != FC_SUCCESS) {
14188 fc_wwn_to_str(pwwn, ww_name);
14189
14190 FP_TRACE(FP_NHEAD1(3, 0),
14191 "AREA RSCN: PD disappeared; "
14192 "d_id=%x, PWWN=%s", d_id, ww_name);
14193
14194 FP_TRACE(FP_NHEAD2(9, 0),
14195 "N_x Port with D_ID=%x,"
14196 " PWWN=%s disappeared from fabric",
14197 d_id, ww_name);
14198
14199 fp_fillout_old_map(list + listindex++,
14200 pd, 1);
14201 } else {
14202 fctl_copy_portmap(list + listindex++,
14203 pd);
14204
14205 mutex_enter(&pd->pd_mutex);
14206 pd->pd_flags = PD_ELS_IN_PROGRESS;
14207 mutex_exit(&pd->pd_mutex);
14208 }
14209
14210 mutex_enter(&port->fp_mutex);
14211 } else {
14212 mutex_exit(&pd->pd_mutex);
14213 }
14214 }
14215 }
14216
14217 mutex_exit(&port->fp_mutex);
14218
14219 ASSERT(listindex == dcnt);
14220
14221 job->job_counter = listindex;
14222 job_flags = job->job_flags;
14223 job->job_flags |= JOB_TYPE_FP_ASYNC;
14224
14225 /*
14226 * Login (if we were the initiator) or validate devices in the
14227 * port map.
14228 */
14229
14230 for (index = 0; index < listindex; index++) {
14231 pd = list[index].map_pd;
14232
14233 mutex_enter(&pd->pd_mutex);
14234 ASSERT((pd->pd_port_id.port_id & mask) == id);
14235
14236 if (pd->pd_flags != PD_ELS_IN_PROGRESS) {
14237 ASSERT(pd->pd_type == PORT_DEVICE_OLD);
14238 mutex_exit(&pd->pd_mutex);
14239 fp_jobdone(job);
14240 continue;
14241 }
14242
14243 login = (pd->pd_state == PORT_DEVICE_LOGGED_IN) ? 1 : 0;
14244 send = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0;
14245 d_id = pd->pd_port_id.port_id;
14246 mutex_exit(&pd->pd_mutex);
14247
14248 if ((d_id & mask) == id && send) {
14249 if (login) {
14250 FP_TRACE(FP_NHEAD1(6, 0),
14251 "RSCN and PLOGI request;"
14252 " pd=%p, job=%p d_id=%x, index=%d", pd,
14253 job, d_id, index);
14254
14255 rval = fp_port_login(port, d_id, job,
14256 FP_CMD_PLOGI_RETAIN, sleep, pd, NULL);
14257 if (rval != FC_SUCCESS) {
14258 mutex_enter(&pd->pd_mutex);
14259 pd->pd_flags = PD_IDLE;
14260 mutex_exit(&pd->pd_mutex);
14261
14262 job->job_result = rval;
14263 fp_jobdone(job);
14264 }
14265 FP_TRACE(FP_NHEAD1(1, 0),
14266 "PLOGI succeeded:no skip(1) for "
14267 "D_ID %x", d_id);
14268 list[index].map_flags |=
14269 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY;
14270 } else {
14271 FP_TRACE(FP_NHEAD1(6, 0), "RSCN and NS request;"
14272 " pd=%p, job=%p d_id=%x, index=%d", pd,
14273 job, d_id, index);
14274
14275 rval = fp_ns_validate_device(port, pd, job,
14276 0, sleep);
14277 if (rval != FC_SUCCESS) {
14278 fp_jobdone(job);
14279 }
14280 mutex_enter(&pd->pd_mutex);
14281 pd->pd_flags = PD_IDLE;
14282 mutex_exit(&pd->pd_mutex);
14283 }
14284 } else {
14285 FP_TRACE(FP_NHEAD1(6, 0),
14286 "RSCN and NO request sent; pd=%p,"
14287 " d_id=%x, index=%d", pd, d_id, index);
14288
14289 mutex_enter(&pd->pd_mutex);
14290 pd->pd_flags = PD_IDLE;
14291 mutex_exit(&pd->pd_mutex);
14292
14293 fp_jobdone(job);
14294 }
14295 }
14296
14297 if (listindex) {
14298 fctl_jobwait(job);
14299 }
14300 job->job_flags = job_flags;
14301
14302 /*
14303 * Orphan list validation.
14304 */
14305 mutex_enter(&port->fp_mutex);
14306 for (prev = NULL, orp = port->fp_orphan_list; port->fp_orphan_count &&
14307 orp != NULL; orp = norp) {
14308 norp = orp->orp_next;
14309 mutex_exit(&port->fp_mutex);
14310
14311 job->job_counter = 1;
14312 job->job_result = FC_SUCCESS;
14313 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0);
14314
14315 ((ns_req_gid_pn_t *)ns_cmd->ns_cmd_buf)->pwwn = orp->orp_pwwn;
14316
14317 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)->pid.port_id = 0;
14318 ((ns_resp_gid_pn_t *)
14319 ns_cmd->ns_data_buf)->pid.priv_lilp_posit = 0;
14320
14321 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP);
14322 if (rval == FC_SUCCESS) {
14323 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf));
14324 pd = fp_create_remote_port_by_ns(port, d_id, KM_SLEEP);
14325 if (pd != NULL) {
14326 fc_wwn_to_str(&orp->orp_pwwn, ww_name);
14327
14328 FP_TRACE(FP_NHEAD1(6, 0),
14329 "RSCN and ORPHAN list "
14330 "success; d_id=%x, PWWN=%s", d_id, ww_name);
14331
14332 FP_TRACE(FP_NHEAD2(6, 0),
14333 "N_x Port with D_ID=%x, PWWN=%s reappeared"
14334 " in fabric", d_id, ww_name);
14335
14336 mutex_enter(&port->fp_mutex);
14337 if (prev) {
14338 prev->orp_next = orp->orp_next;
14339 } else {
14340 ASSERT(orp == port->fp_orphan_list);
14341 port->fp_orphan_list = orp->orp_next;
14342 }
14343 port->fp_orphan_count--;
14344 mutex_exit(&port->fp_mutex);
14345
14346 kmem_free(orp, sizeof (*orp));
14347 fctl_copy_portmap(list + listindex++, pd);
14348 } else {
14349 prev = orp;
14350 }
14351 } else {
14352 prev = orp;
14353 }
14354 mutex_enter(&port->fp_mutex);
14355 }
14356 mutex_exit(&port->fp_mutex);
14357
14358 /*
14359 * One more pass through the list to delist old devices from
14360 * the d_id and pwwn tables and possibly add to the orphan list.
14361 */
14362
14363 for (index = 0; index < listindex; index++) {
14364 pd = list[index].map_pd;
14365 ASSERT(pd != NULL);
14366
14367 /*
14368 * Update PLOGI results; For NS validation
14369 * of orphan list, it is redundant
14370 *
14371 * Take care to preserve PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY if
14372 * appropriate as fctl_copy_portmap() will clear map_flags.
14373 */
14374 if (list[index].map_flags &
14375 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY) {
14376 fctl_copy_portmap(list + index, pd);
14377 list[index].map_flags |=
14378 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY;
14379 } else {
14380 fctl_copy_portmap(list + index, pd);
14381 }
14382
14383 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with Area DOMAIN "
14384 "results; pd=%p, d_id=%x pwwn=%x %x %x %x %x %x %x %x",
14385 pd, pd->pd_port_id.port_id,
14386 pd->pd_port_name.raw_wwn[0],
14387 pd->pd_port_name.raw_wwn[1],
14388 pd->pd_port_name.raw_wwn[2],
14389 pd->pd_port_name.raw_wwn[3],
14390 pd->pd_port_name.raw_wwn[4],
14391 pd->pd_port_name.raw_wwn[5],
14392 pd->pd_port_name.raw_wwn[6],
14393 pd->pd_port_name.raw_wwn[7]);
14394
14395 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with Area DOMAIN "
14396 "results continued, pd=%p type=%x, flags=%x, state=%x",
14397 pd, pd->pd_type, pd->pd_flags, pd->pd_state);
14398
14399 mutex_enter(&pd->pd_mutex);
14400 if (pd->pd_type == PORT_DEVICE_OLD) {
14401 int initiator;
14402
14403 pd->pd_flags = PD_IDLE;
14404 initiator = (pd->pd_recepient ==
14405 PD_PLOGI_INITIATOR) ? 1 : 0;
14406
14407 mutex_exit(&pd->pd_mutex);
14408
14409 mutex_enter(&port->fp_mutex);
14410 mutex_enter(&pd->pd_mutex);
14411
14412 pd->pd_state = PORT_DEVICE_INVALID;
14413 fctl_delist_did_table(port, pd);
14414 fctl_delist_pwwn_table(port, pd);
14415
14416 mutex_exit(&pd->pd_mutex);
14417 mutex_exit(&port->fp_mutex);
14418
14419 if (initiator) {
14420 (void) fctl_add_orphan(port, pd, sleep);
14421 }
14422 list[index].map_pd = pd;
14423 } else {
14424 ASSERT(pd->pd_flags == PD_IDLE);
14425 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) {
14426 /*
14427 * Reset LOGO tolerance to zero
14428 */
14429 fctl_tc_reset(&pd->pd_logo_tc);
14430 }
14431 mutex_exit(&pd->pd_mutex);
14432 }
14433 }
14434
14435 if (ns_cmd) {
14436 fctl_free_ns_cmd(ns_cmd);
14437 }
14438 if (listindex) {
14439 (void) fp_ulp_devc_cb(port, list, listindex, count,
14440 sleep, 0);
14441 } else {
14442 kmem_free(list, sizeof (*list) * count);
14443
14444 mutex_enter(&port->fp_mutex);
14445 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) {
14446 --port->fp_rscn_count;
14447 }
14448 mutex_exit(&port->fp_mutex);
14449 }
14450 }
14451
14452
14453 /*
14454 * Work hard to make sense out of an RSCN page.
14455 */
14456 static void
fp_validate_rscn_page(fc_local_port_t * port,fc_affected_id_t * page,job_request_t * job,fctl_ns_req_t * ns_cmd,fc_portmap_t * listptr,int * listindex,int sleep)14457 fp_validate_rscn_page(fc_local_port_t *port, fc_affected_id_t *page,
14458 job_request_t *job, fctl_ns_req_t *ns_cmd, fc_portmap_t *listptr,
14459 int *listindex, int sleep)
14460 {
14461 int rval;
14462 char ww_name[17];
14463 la_wwn_t *pwwn;
14464 fc_remote_port_t *pwwn_pd;
14465 fc_remote_port_t *did_pd;
14466
14467 did_pd = fctl_get_remote_port_by_did(port, page->aff_d_id);
14468
14469 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; "
14470 "port=%p, d_id=%x, pd=%p, rscn_count:0x%x", port, page->aff_d_id,
14471 did_pd, (uint32_t)(uintptr_t)job->job_cb_arg);
14472
14473 if (did_pd != NULL) {
14474 mutex_enter(&did_pd->pd_mutex);
14475 if (did_pd->pd_flags != PD_IDLE) {
14476 mutex_exit(&did_pd->pd_mutex);
14477 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page: "
14478 "PD is BUSY; port=%p, d_id=%x, pd=%p",
14479 port, page->aff_d_id, did_pd);
14480 return;
14481 }
14482 did_pd->pd_flags = PD_ELS_IN_PROGRESS;
14483 mutex_exit(&did_pd->pd_mutex);
14484 }
14485
14486 job->job_counter = 1;
14487
14488 pwwn = &((ns_resp_gpn_id_t *)ns_cmd->ns_data_buf)->pwwn;
14489
14490 ((ns_req_gpn_id_t *)ns_cmd->ns_cmd_buf)->pid.port_id = page->aff_d_id;
14491 ((ns_req_gpn_id_t *)ns_cmd->ns_cmd_buf)->pid.priv_lilp_posit = 0;
14492
14493 bzero(ns_cmd->ns_data_buf, sizeof (la_wwn_t));
14494 rval = fp_ns_query(port, ns_cmd, job, 1, sleep);
14495
14496 FP_TRACE(FP_NHEAD1(1, 0), "NS Query Response for D_ID page; rev=%x,"
14497 " in_id=%x, cmdrsp=%x, reason=%x, expln=%x",
14498 ns_cmd->ns_resp_hdr.ct_rev, ns_cmd->ns_resp_hdr.ct_inid,
14499 ns_cmd->ns_resp_hdr.ct_cmdrsp, ns_cmd->ns_resp_hdr.ct_reason,
14500 ns_cmd->ns_resp_hdr.ct_expln);
14501
14502 job->job_counter = 1;
14503
14504 if (rval != FC_SUCCESS || fctl_is_wwn_zero(pwwn) == FC_SUCCESS) {
14505 /*
14506 * What this means is that the D_ID
14507 * disappeared from the Fabric.
14508 */
14509 if (did_pd == NULL) {
14510 FP_TRACE(FP_NHEAD1(1, 0), "RSCN with D_ID page;"
14511 " NULL PD disappeared, rval=%x", rval);
14512 return;
14513 }
14514
14515 fc_wwn_to_str(&did_pd->pd_port_name, ww_name);
14516
14517 (listptr + *listindex)->map_rscn_info.ulp_rscn_count =
14518 (uint32_t)(uintptr_t)job->job_cb_arg;
14519
14520 fp_fillout_old_map(listptr + (*listindex)++, did_pd, 0);
14521
14522 FP_TRACE(FP_NHEAD1(3, 0), "RSCN: PD disappeared; "
14523 "d_id=%x, PWWN=%s", page->aff_d_id, ww_name);
14524
14525 FP_TRACE(FP_NHEAD2(9, 0),
14526 "GPN_ID for D_ID=%x failed", page->aff_d_id);
14527
14528 FP_TRACE(FP_NHEAD2(9, 0),
14529 "N_x Port with D_ID=%x, PWWN=%s disappeared from"
14530 " fabric", page->aff_d_id, ww_name);
14531
14532 mutex_enter(&did_pd->pd_mutex);
14533 did_pd->pd_flags = PD_IDLE;
14534 mutex_exit(&did_pd->pd_mutex);
14535
14536 FP_TRACE(FP_NHEAD1(3, 0), "RSCN with D_ID (%x) page; "
14537 "PD disappeared, pd=%p", page->aff_d_id, did_pd);
14538
14539 return;
14540 }
14541
14542 pwwn_pd = fctl_get_remote_port_by_pwwn(port, pwwn);
14543
14544 if (did_pd != NULL && pwwn_pd != NULL && did_pd == pwwn_pd) {
14545 /*
14546 * There is no change. Do PLOGI again and add it to
14547 * ULP portmap baggage and return. Note: When RSCNs
14548 * arrive with per page states, the need for PLOGI
14549 * can be determined correctly.
14550 */
14551 mutex_enter(&pwwn_pd->pd_mutex);
14552 pwwn_pd->pd_type = PORT_DEVICE_NOCHANGE;
14553 mutex_exit(&pwwn_pd->pd_mutex);
14554
14555 (listptr + *listindex)->map_rscn_info.ulp_rscn_count =
14556 (uint32_t)(uintptr_t)job->job_cb_arg;
14557
14558 fctl_copy_portmap(listptr + (*listindex)++, pwwn_pd);
14559
14560 mutex_enter(&pwwn_pd->pd_mutex);
14561 if ((pwwn_pd->pd_state == PORT_DEVICE_LOGGED_IN) ||
14562 (pwwn_pd->pd_aux_flags & PD_LOGGED_OUT)) {
14563 fc_wwn_to_str(&pwwn_pd->pd_port_name, ww_name);
14564 mutex_exit(&pwwn_pd->pd_mutex);
14565
14566 rval = fp_port_login(port, page->aff_d_id, job,
14567 FP_CMD_PLOGI_RETAIN, sleep, pwwn_pd, NULL);
14568 if (rval == FC_SUCCESS) {
14569 fp_jobwait(job);
14570 rval = job->job_result;
14571
14572 /*
14573 * Reset LOGO tolerance to zero
14574 * Also we are the PLOGI initiator now.
14575 */
14576 mutex_enter(&pwwn_pd->pd_mutex);
14577 fctl_tc_reset(&pwwn_pd->pd_logo_tc);
14578 pwwn_pd->pd_recepient = PD_PLOGI_INITIATOR;
14579 mutex_exit(&pwwn_pd->pd_mutex);
14580 }
14581
14582 if (rval == FC_SUCCESS) {
14583 struct fc_portmap *map =
14584 listptr + *listindex - 1;
14585
14586 FP_TRACE(FP_NHEAD1(1, 0),
14587 "PLOGI succeeded: no skip(2)"
14588 " for D_ID %x", page->aff_d_id);
14589 map->map_flags |=
14590 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY;
14591 } else {
14592 FP_TRACE(FP_NHEAD2(9, rval),
14593 "PLOGI to D_ID=%x failed", page->aff_d_id);
14594
14595 FP_TRACE(FP_NHEAD2(9, 0),
14596 "N_x Port with D_ID=%x, PWWN=%s"
14597 " disappeared from fabric",
14598 page->aff_d_id, ww_name);
14599
14600 fp_fillout_old_map(listptr +
14601 *listindex - 1, pwwn_pd, 0);
14602 }
14603 } else {
14604 mutex_exit(&pwwn_pd->pd_mutex);
14605 }
14606
14607 mutex_enter(&did_pd->pd_mutex);
14608 did_pd->pd_flags = PD_IDLE;
14609 mutex_exit(&did_pd->pd_mutex);
14610
14611 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID (0x%x) page; "
14612 "Case ONE, rval=%x, result=%x pd=%p", page->aff_d_id, rval,
14613 job->job_result, pwwn_pd);
14614
14615 return;
14616 }
14617
14618 if (did_pd == NULL && pwwn_pd == NULL) {
14619
14620 fc_orphan_t *orp = NULL;
14621 fc_orphan_t *norp = NULL;
14622 fc_orphan_t *prev = NULL;
14623
14624 /*
14625 * Hunt down the orphan list before giving up.
14626 */
14627
14628 mutex_enter(&port->fp_mutex);
14629 if (port->fp_orphan_count) {
14630
14631 for (orp = port->fp_orphan_list; orp; orp = norp) {
14632 norp = orp->orp_next;
14633
14634 if (fctl_wwn_cmp(&orp->orp_pwwn, pwwn) != 0) {
14635 prev = orp;
14636 continue;
14637 }
14638
14639 if (prev) {
14640 prev->orp_next = orp->orp_next;
14641 } else {
14642 ASSERT(orp ==
14643 port->fp_orphan_list);
14644 port->fp_orphan_list =
14645 orp->orp_next;
14646 }
14647 port->fp_orphan_count--;
14648 break;
14649 }
14650 }
14651
14652 mutex_exit(&port->fp_mutex);
14653 pwwn_pd = fp_create_remote_port_by_ns(port,
14654 page->aff_d_id, sleep);
14655
14656 if (pwwn_pd != NULL) {
14657
14658 if (orp) {
14659 fc_wwn_to_str(&orp->orp_pwwn,
14660 ww_name);
14661
14662 FP_TRACE(FP_NHEAD2(9, 0),
14663 "N_x Port with D_ID=%x,"
14664 " PWWN=%s reappeared in fabric",
14665 page->aff_d_id, ww_name);
14666
14667 kmem_free(orp, sizeof (*orp));
14668 }
14669
14670 (listptr + *listindex)->
14671 map_rscn_info.ulp_rscn_count =
14672 (uint32_t)(uintptr_t)job->job_cb_arg;
14673
14674 fctl_copy_portmap(listptr +
14675 (*listindex)++, pwwn_pd);
14676 }
14677
14678 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID (0x%x) page; "
14679 "Case TWO", page->aff_d_id);
14680
14681 return;
14682 }
14683
14684 if (pwwn_pd != NULL && did_pd == NULL) {
14685 uint32_t old_d_id;
14686 uint32_t d_id = page->aff_d_id;
14687
14688 /*
14689 * What this means is there is a new D_ID for this
14690 * Port WWN. Take out the port device off D_ID
14691 * list and put it back with a new D_ID. Perform
14692 * PLOGI if already logged in.
14693 */
14694 mutex_enter(&port->fp_mutex);
14695 mutex_enter(&pwwn_pd->pd_mutex);
14696
14697 old_d_id = pwwn_pd->pd_port_id.port_id;
14698
14699 fctl_delist_did_table(port, pwwn_pd);
14700
14701 (listptr + *listindex)->map_rscn_info.ulp_rscn_count =
14702 (uint32_t)(uintptr_t)job->job_cb_arg;
14703
14704 fp_fillout_changed_map(listptr + (*listindex)++, pwwn_pd,
14705 &d_id, NULL);
14706 fctl_enlist_did_table(port, pwwn_pd);
14707
14708 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page;"
14709 " Case THREE, pd=%p,"
14710 " state=%x", pwwn_pd, pwwn_pd->pd_state);
14711
14712 if ((pwwn_pd->pd_state == PORT_DEVICE_LOGGED_IN) ||
14713 (pwwn_pd->pd_aux_flags & PD_LOGGED_OUT)) {
14714 fc_wwn_to_str(&pwwn_pd->pd_port_name, ww_name);
14715
14716 mutex_exit(&pwwn_pd->pd_mutex);
14717 mutex_exit(&port->fp_mutex);
14718
14719 FP_TRACE(FP_NHEAD2(9, 0),
14720 "N_x Port with D_ID=%x, PWWN=%s has a new"
14721 " D_ID=%x now", old_d_id, ww_name, d_id);
14722
14723 rval = fp_port_login(port, page->aff_d_id, job,
14724 FP_CMD_PLOGI_RETAIN, sleep, pwwn_pd, NULL);
14725 if (rval == FC_SUCCESS) {
14726 fp_jobwait(job);
14727 rval = job->job_result;
14728 }
14729
14730 if (rval != FC_SUCCESS) {
14731 fp_fillout_old_map(listptr +
14732 *listindex - 1, pwwn_pd, 0);
14733 }
14734 } else {
14735 mutex_exit(&pwwn_pd->pd_mutex);
14736 mutex_exit(&port->fp_mutex);
14737 }
14738
14739 return;
14740 }
14741
14742 if (pwwn_pd == NULL && did_pd != NULL) {
14743 fc_portmap_t *ptr;
14744 uint32_t len = 1;
14745 char old_ww_name[17];
14746
14747 mutex_enter(&did_pd->pd_mutex);
14748 fc_wwn_to_str(&did_pd->pd_port_name, old_ww_name);
14749 mutex_exit(&did_pd->pd_mutex);
14750
14751 fc_wwn_to_str(pwwn, ww_name);
14752
14753 (listptr + *listindex)->map_rscn_info.ulp_rscn_count =
14754 (uint32_t)(uintptr_t)job->job_cb_arg;
14755
14756 /*
14757 * What this means is that there is a new Port WWN for
14758 * this D_ID; Mark the Port device as old and provide
14759 * the new PWWN and D_ID combination as new.
14760 */
14761 fp_fillout_old_map(listptr + (*listindex)++, did_pd, 0);
14762
14763 FP_TRACE(FP_NHEAD2(9, 0),
14764 "N_x Port with D_ID=%x, PWWN=%s has a new PWWN=%s now",
14765 page->aff_d_id, old_ww_name, ww_name);
14766
14767 (listptr + *listindex)->map_rscn_info.ulp_rscn_count =
14768 (uint32_t)(uintptr_t)job->job_cb_arg;
14769
14770 ptr = listptr + (*listindex)++;
14771
14772 job->job_counter = 1;
14773
14774 if (fp_ns_getmap(port, job, &ptr, &len,
14775 page->aff_d_id - 1) != FC_SUCCESS) {
14776 (*listindex)--;
14777 }
14778
14779 mutex_enter(&did_pd->pd_mutex);
14780 did_pd->pd_flags = PD_IDLE;
14781 mutex_exit(&did_pd->pd_mutex);
14782
14783 return;
14784 }
14785
14786 /*
14787 * A weird case of Port WWN and D_ID existence but not matching up
14788 * between them. Trust your instincts - Take the port device handle
14789 * off Port WWN list, fix it with new Port WWN and put it back, In
14790 * the mean time mark the port device corresponding to the old port
14791 * WWN as OLD.
14792 */
14793 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; Case WEIRD, pwwn_pd=%p,"
14794 " did_pd=%p", pwwn_pd, did_pd);
14795
14796 mutex_enter(&port->fp_mutex);
14797 mutex_enter(&pwwn_pd->pd_mutex);
14798
14799 pwwn_pd->pd_type = PORT_DEVICE_OLD;
14800 pwwn_pd->pd_state = PORT_DEVICE_INVALID;
14801 fctl_delist_did_table(port, pwwn_pd);
14802 fctl_delist_pwwn_table(port, pwwn_pd);
14803
14804 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; case WEIRD continued,"
14805 " pwwn-d_id=%x pwwn-wwn=%x %x %x %x %x %x %x %x",
14806 pwwn_pd->pd_port_id.port_id,
14807
14808 pwwn_pd->pd_port_name.raw_wwn[0],
14809 pwwn_pd->pd_port_name.raw_wwn[1],
14810 pwwn_pd->pd_port_name.raw_wwn[2],
14811 pwwn_pd->pd_port_name.raw_wwn[3],
14812 pwwn_pd->pd_port_name.raw_wwn[4],
14813 pwwn_pd->pd_port_name.raw_wwn[5],
14814 pwwn_pd->pd_port_name.raw_wwn[6],
14815 pwwn_pd->pd_port_name.raw_wwn[7]);
14816
14817 mutex_exit(&pwwn_pd->pd_mutex);
14818 mutex_exit(&port->fp_mutex);
14819
14820 (listptr + *listindex)->map_rscn_info.ulp_rscn_count =
14821 (uint32_t)(uintptr_t)job->job_cb_arg;
14822
14823 fctl_copy_portmap(listptr + (*listindex)++, pwwn_pd);
14824
14825 mutex_enter(&port->fp_mutex);
14826 mutex_enter(&did_pd->pd_mutex);
14827
14828 fctl_delist_pwwn_table(port, did_pd);
14829
14830 (listptr + *listindex)->map_rscn_info.ulp_rscn_count =
14831 (uint32_t)(uintptr_t)job->job_cb_arg;
14832
14833 fp_fillout_changed_map(listptr + (*listindex)++, did_pd, NULL, pwwn);
14834 fctl_enlist_pwwn_table(port, did_pd);
14835
14836 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; case WEIRD continued,"
14837 " d_id=%x, state=%x, did-wwn=%x %x %x %x %x %x %x %x",
14838 did_pd->pd_port_id.port_id, did_pd->pd_state,
14839
14840 did_pd->pd_port_name.raw_wwn[0],
14841 did_pd->pd_port_name.raw_wwn[1],
14842 did_pd->pd_port_name.raw_wwn[2],
14843 did_pd->pd_port_name.raw_wwn[3],
14844 did_pd->pd_port_name.raw_wwn[4],
14845 did_pd->pd_port_name.raw_wwn[5],
14846 did_pd->pd_port_name.raw_wwn[6],
14847 did_pd->pd_port_name.raw_wwn[7]);
14848
14849 if ((did_pd->pd_state == PORT_DEVICE_LOGGED_IN) ||
14850 (did_pd->pd_aux_flags & PD_LOGGED_OUT)) {
14851 mutex_exit(&did_pd->pd_mutex);
14852 mutex_exit(&port->fp_mutex);
14853
14854 rval = fp_port_login(port, page->aff_d_id, job,
14855 FP_CMD_PLOGI_RETAIN, sleep, did_pd, NULL);
14856 if (rval == FC_SUCCESS) {
14857 fp_jobwait(job);
14858 if (job->job_result != FC_SUCCESS) {
14859 fp_fillout_old_map(listptr +
14860 *listindex - 1, did_pd, 0);
14861 }
14862 } else {
14863 fp_fillout_old_map(listptr + *listindex - 1, did_pd, 0);
14864 }
14865 } else {
14866 mutex_exit(&did_pd->pd_mutex);
14867 mutex_exit(&port->fp_mutex);
14868 }
14869
14870 mutex_enter(&did_pd->pd_mutex);
14871 did_pd->pd_flags = PD_IDLE;
14872 mutex_exit(&did_pd->pd_mutex);
14873 }
14874
14875
14876 /*
14877 * Check with NS for the presence of this port WWN
14878 */
14879 static int
fp_ns_validate_device(fc_local_port_t * port,fc_remote_port_t * pd,job_request_t * job,int polled,int sleep)14880 fp_ns_validate_device(fc_local_port_t *port, fc_remote_port_t *pd,
14881 job_request_t *job, int polled, int sleep)
14882 {
14883 la_wwn_t pwwn;
14884 uint32_t flags;
14885 fctl_ns_req_t *ns_cmd;
14886
14887 flags = FCTL_NS_VALIDATE_PD | ((polled) ? 0: FCTL_NS_ASYNC_REQUEST);
14888 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t),
14889 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t),
14890 flags, sleep);
14891 if (ns_cmd == NULL) {
14892 return (FC_NOMEM);
14893 }
14894
14895 mutex_enter(&pd->pd_mutex);
14896 pwwn = pd->pd_port_name;
14897 mutex_exit(&pd->pd_mutex);
14898
14899 ns_cmd->ns_cmd_code = NS_GID_PN;
14900 ns_cmd->ns_pd = pd;
14901 ((ns_req_gid_pn_t *)ns_cmd->ns_cmd_buf)->pwwn = pwwn;
14902 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)->pid.port_id = 0;
14903 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)->pid.priv_lilp_posit = 0;
14904
14905 return (fp_ns_query(port, ns_cmd, job, polled, sleep));
14906 }
14907
14908
14909 /*
14910 * Sanity check the LILP map returned by FCA
14911 */
14912 static int
fp_validate_lilp_map(fc_lilpmap_t * lilp_map)14913 fp_validate_lilp_map(fc_lilpmap_t *lilp_map)
14914 {
14915 int count;
14916
14917 if (lilp_map->lilp_length == 0) {
14918 return (FC_FAILURE);
14919 }
14920
14921 for (count = 0; count < lilp_map->lilp_length; count++) {
14922 if (fp_is_valid_alpa(lilp_map->lilp_alpalist[count]) !=
14923 FC_SUCCESS) {
14924 return (FC_FAILURE);
14925 }
14926 }
14927
14928 return (FC_SUCCESS);
14929 }
14930
14931
14932 /*
14933 * Sanity check if the AL_PA is a valid address
14934 */
14935 static int
fp_is_valid_alpa(uchar_t al_pa)14936 fp_is_valid_alpa(uchar_t al_pa)
14937 {
14938 int count;
14939
14940 for (count = 0; count < sizeof (fp_valid_alpas); count++) {
14941 if (al_pa == fp_valid_alpas[count] || al_pa == 0) {
14942 return (FC_SUCCESS);
14943 }
14944 }
14945
14946 return (FC_FAILURE);
14947 }
14948
14949
14950 /*
14951 * Post unsolicited callbacks to ULPs
14952 */
14953 static void
fp_ulp_unsol_cb(void * arg)14954 fp_ulp_unsol_cb(void *arg)
14955 {
14956 fp_unsol_spec_t *ub_spec = (fp_unsol_spec_t *)arg;
14957
14958 fctl_ulp_unsol_cb(ub_spec->port, ub_spec->buf,
14959 ub_spec->buf->ub_frame.type);
14960 kmem_free(ub_spec, sizeof (*ub_spec));
14961 }
14962
14963
14964 /*
14965 * Perform message reporting in a consistent manner. Unless there is
14966 * a strong reason NOT to use this function (which is very very rare)
14967 * all message reporting should go through this.
14968 */
14969 static void
fp_printf(fc_local_port_t * port,int level,fp_mesg_dest_t dest,int fc_errno,fc_packet_t * pkt,const char * fmt,...)14970 fp_printf(fc_local_port_t *port, int level, fp_mesg_dest_t dest, int fc_errno,
14971 fc_packet_t *pkt, const char *fmt, ...)
14972 {
14973 caddr_t buf;
14974 va_list ap;
14975
14976 switch (level) {
14977 case CE_NOTE:
14978 if ((port->fp_verbose & FP_WARNING_MESSAGES) == 0) {
14979 return;
14980 }
14981 break;
14982
14983 case CE_WARN:
14984 if ((port->fp_verbose & FP_FATAL_MESSAGES) == 0) {
14985 return;
14986 }
14987 break;
14988 }
14989
14990 buf = kmem_zalloc(256, KM_NOSLEEP);
14991 if (buf == NULL) {
14992 return;
14993 }
14994
14995 (void) sprintf(buf, "fp(%d): ", port->fp_instance);
14996
14997 va_start(ap, fmt);
14998 (void) vsprintf(buf + strlen(buf), fmt, ap);
14999 va_end(ap);
15000
15001 if (fc_errno) {
15002 char *errmsg;
15003
15004 (void) fc_ulp_error(fc_errno, &errmsg);
15005 (void) sprintf(buf + strlen(buf), " FC Error=%s", errmsg);
15006 } else {
15007 if (pkt) {
15008 caddr_t state, reason, action, expln;
15009
15010 (void) fc_ulp_pkt_error(pkt, &state, &reason,
15011 &action, &expln);
15012
15013 (void) sprintf(buf + strlen(buf),
15014 " state=%s, reason=%s", state, reason);
15015
15016 if (pkt->pkt_resp_resid) {
15017 (void) sprintf(buf + strlen(buf),
15018 " resp resid=%x\n", pkt->pkt_resp_resid);
15019 }
15020 }
15021 }
15022
15023 switch (dest) {
15024 case FP_CONSOLE_ONLY:
15025 cmn_err(level, "^%s", buf);
15026 break;
15027
15028 case FP_LOG_ONLY:
15029 cmn_err(level, "!%s", buf);
15030 break;
15031
15032 default:
15033 cmn_err(level, "%s", buf);
15034 break;
15035 }
15036
15037 kmem_free(buf, 256);
15038 }
15039
15040 static int
fp_fcio_login(fc_local_port_t * port,fcio_t * fcio,job_request_t * job)15041 fp_fcio_login(fc_local_port_t *port, fcio_t *fcio, job_request_t *job)
15042 {
15043 int ret;
15044 uint32_t d_id;
15045 la_wwn_t pwwn;
15046 fc_remote_port_t *pd = NULL;
15047 fc_remote_port_t *held_pd = NULL;
15048 fctl_ns_req_t *ns_cmd;
15049 fc_portmap_t *changelist;
15050
15051 bcopy(fcio->fcio_ibuf, &pwwn, sizeof (pwwn));
15052
15053 mutex_enter(&port->fp_mutex);
15054 if (FC_IS_TOP_SWITCH(port->fp_topology)) {
15055 mutex_exit(&port->fp_mutex);
15056 job->job_counter = 1;
15057
15058 job->job_result = FC_SUCCESS;
15059
15060 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t),
15061 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t),
15062 FCTL_NS_BUF_IS_USERLAND, KM_SLEEP);
15063
15064 ASSERT(ns_cmd != NULL);
15065
15066 ns_cmd->ns_cmd_code = NS_GID_PN;
15067 ((ns_req_gid_pn_t *)(ns_cmd->ns_cmd_buf))->pwwn = pwwn;
15068
15069 ret = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP);
15070
15071 if (ret != FC_SUCCESS || job->job_result != FC_SUCCESS) {
15072 if (ret != FC_SUCCESS) {
15073 fcio->fcio_errno = ret;
15074 } else {
15075 fcio->fcio_errno = job->job_result;
15076 }
15077 fctl_free_ns_cmd(ns_cmd);
15078 return (EIO);
15079 }
15080 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf));
15081 fctl_free_ns_cmd(ns_cmd);
15082 } else {
15083 mutex_exit(&port->fp_mutex);
15084
15085 held_pd = fctl_hold_remote_port_by_pwwn(port, &pwwn);
15086 if (held_pd == NULL) {
15087 fcio->fcio_errno = FC_BADWWN;
15088 return (EIO);
15089 }
15090 pd = held_pd;
15091
15092 mutex_enter(&pd->pd_mutex);
15093 d_id = pd->pd_port_id.port_id;
15094 mutex_exit(&pd->pd_mutex);
15095 }
15096
15097 job->job_counter = 1;
15098
15099 pd = fctl_get_remote_port_by_did(port, d_id);
15100
15101 if (pd) {
15102 mutex_enter(&pd->pd_mutex);
15103 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) {
15104 pd->pd_login_count++;
15105 mutex_exit(&pd->pd_mutex);
15106
15107 fcio->fcio_errno = FC_SUCCESS;
15108 if (held_pd) {
15109 fctl_release_remote_port(held_pd);
15110 }
15111
15112 return (0);
15113 }
15114 mutex_exit(&pd->pd_mutex);
15115 } else {
15116 mutex_enter(&port->fp_mutex);
15117 if (FC_IS_TOP_SWITCH(port->fp_topology)) {
15118 mutex_exit(&port->fp_mutex);
15119 pd = fp_create_remote_port_by_ns(port, d_id, KM_SLEEP);
15120 if (pd == NULL) {
15121 fcio->fcio_errno = FC_FAILURE;
15122 if (held_pd) {
15123 fctl_release_remote_port(held_pd);
15124 }
15125 return (EIO);
15126 }
15127 } else {
15128 mutex_exit(&port->fp_mutex);
15129 }
15130 }
15131
15132 job->job_flags &= ~JOB_TYPE_FP_ASYNC;
15133 job->job_counter = 1;
15134
15135 ret = fp_port_login(port, d_id, job, FP_CMD_PLOGI_RETAIN,
15136 KM_SLEEP, pd, NULL);
15137
15138 if (ret != FC_SUCCESS) {
15139 fcio->fcio_errno = ret;
15140 if (held_pd) {
15141 fctl_release_remote_port(held_pd);
15142 }
15143 return (EIO);
15144 }
15145 fp_jobwait(job);
15146
15147 fcio->fcio_errno = job->job_result;
15148
15149 if (held_pd) {
15150 fctl_release_remote_port(held_pd);
15151 }
15152
15153 if (job->job_result != FC_SUCCESS) {
15154 return (EIO);
15155 }
15156
15157 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn);
15158 if (pd == NULL) {
15159 fcio->fcio_errno = FC_BADDEV;
15160 return (ENODEV);
15161 }
15162
15163 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP);
15164
15165 fctl_copy_portmap(changelist, pd);
15166 changelist->map_type = PORT_DEVICE_USER_LOGIN;
15167
15168 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1);
15169
15170 mutex_enter(&pd->pd_mutex);
15171 pd->pd_type = PORT_DEVICE_NOCHANGE;
15172 mutex_exit(&pd->pd_mutex);
15173
15174 fctl_release_remote_port(pd);
15175
15176 return (0);
15177 }
15178
15179
15180 static int
fp_fcio_logout(fc_local_port_t * port,fcio_t * fcio,job_request_t * job)15181 fp_fcio_logout(fc_local_port_t *port, fcio_t *fcio, job_request_t *job)
15182 {
15183 la_wwn_t pwwn;
15184 fp_cmd_t *cmd;
15185 fc_portmap_t *changelist;
15186 fc_remote_port_t *pd;
15187
15188 bcopy(fcio->fcio_ibuf, &pwwn, sizeof (pwwn));
15189
15190 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn);
15191 if (pd == NULL) {
15192 fcio->fcio_errno = FC_BADWWN;
15193 return (ENXIO);
15194 }
15195
15196 mutex_enter(&pd->pd_mutex);
15197 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) {
15198 fcio->fcio_errno = FC_LOGINREQ;
15199 mutex_exit(&pd->pd_mutex);
15200
15201 fctl_release_remote_port(pd);
15202
15203 return (EINVAL);
15204 }
15205
15206 ASSERT(pd->pd_login_count >= 1);
15207
15208 if (pd->pd_flags == PD_ELS_IN_PROGRESS) {
15209 fcio->fcio_errno = FC_FAILURE;
15210 mutex_exit(&pd->pd_mutex);
15211
15212 fctl_release_remote_port(pd);
15213
15214 return (EBUSY);
15215 }
15216
15217 if (pd->pd_login_count > 1) {
15218 pd->pd_login_count--;
15219 fcio->fcio_errno = FC_SUCCESS;
15220 mutex_exit(&pd->pd_mutex);
15221
15222 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP);
15223
15224 fctl_copy_portmap(changelist, pd);
15225 changelist->map_type = PORT_DEVICE_USER_LOGOUT;
15226
15227 fctl_release_remote_port(pd);
15228
15229 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1);
15230
15231 return (0);
15232 }
15233
15234 pd->pd_flags = PD_ELS_IN_PROGRESS;
15235 mutex_exit(&pd->pd_mutex);
15236
15237 job->job_counter = 1;
15238
15239 cmd = fp_alloc_pkt(port, sizeof (la_els_logo_t),
15240 FP_PORT_IDENTIFIER_LEN, KM_SLEEP, pd);
15241 if (cmd == NULL) {
15242 fcio->fcio_errno = FC_NOMEM;
15243 fctl_release_remote_port(pd);
15244
15245 mutex_enter(&pd->pd_mutex);
15246 pd->pd_flags = PD_IDLE;
15247 mutex_exit(&pd->pd_mutex);
15248
15249 return (ENOMEM);
15250 }
15251
15252 mutex_enter(&port->fp_mutex);
15253 mutex_enter(&pd->pd_mutex);
15254
15255 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class;
15256 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE;
15257 cmd->cmd_flags = FP_CMD_PLOGI_DONT_CARE;
15258 cmd->cmd_retry_count = 1;
15259 cmd->cmd_ulp_pkt = NULL;
15260
15261 fp_logo_init(pd, cmd, job);
15262
15263 mutex_exit(&pd->pd_mutex);
15264 mutex_exit(&port->fp_mutex);
15265
15266 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) {
15267 mutex_enter(&pd->pd_mutex);
15268 pd->pd_flags = PD_IDLE;
15269 mutex_exit(&pd->pd_mutex);
15270
15271 fp_free_pkt(cmd);
15272 fctl_release_remote_port(pd);
15273
15274 return (EIO);
15275 }
15276
15277 fp_jobwait(job);
15278
15279 fcio->fcio_errno = job->job_result;
15280 if (job->job_result != FC_SUCCESS) {
15281 mutex_enter(&pd->pd_mutex);
15282 pd->pd_flags = PD_IDLE;
15283 mutex_exit(&pd->pd_mutex);
15284
15285 fctl_release_remote_port(pd);
15286
15287 return (EIO);
15288 }
15289
15290 ASSERT(pd != NULL);
15291
15292 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP);
15293
15294 fctl_copy_portmap(changelist, pd);
15295 changelist->map_type = PORT_DEVICE_USER_LOGOUT;
15296 changelist->map_state = PORT_DEVICE_INVALID;
15297
15298 mutex_enter(&port->fp_mutex);
15299 mutex_enter(&pd->pd_mutex);
15300
15301 fctl_delist_did_table(port, pd);
15302 fctl_delist_pwwn_table(port, pd);
15303 pd->pd_flags = PD_IDLE;
15304
15305 mutex_exit(&pd->pd_mutex);
15306 mutex_exit(&port->fp_mutex);
15307
15308 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1);
15309
15310 fctl_release_remote_port(pd);
15311
15312 return (0);
15313 }
15314
15315
15316
15317 /*
15318 * Send a syslog event for adapter port level events.
15319 */
15320 static void
fp_log_port_event(fc_local_port_t * port,char * subclass)15321 fp_log_port_event(fc_local_port_t *port, char *subclass)
15322 {
15323 nvlist_t *attr_list;
15324
15325 if (nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE,
15326 KM_SLEEP) != DDI_SUCCESS) {
15327 goto alloc_failed;
15328 }
15329
15330 if (nvlist_add_uint32(attr_list, "instance",
15331 port->fp_instance) != DDI_SUCCESS) {
15332 goto error;
15333 }
15334
15335 if (nvlist_add_byte_array(attr_list, "port-wwn",
15336 port->fp_service_params.nport_ww_name.raw_wwn,
15337 sizeof (la_wwn_t)) != DDI_SUCCESS) {
15338 goto error;
15339 }
15340
15341 (void) ddi_log_sysevent(port->fp_port_dip, DDI_VENDOR_SUNW, EC_SUNFC,
15342 subclass, attr_list, NULL, DDI_SLEEP);
15343
15344 nvlist_free(attr_list);
15345 return;
15346
15347 error:
15348 nvlist_free(attr_list);
15349 alloc_failed:
15350 FP_TRACE(FP_NHEAD1(9, 0), "Unable to send %s event", subclass);
15351 }
15352
15353
15354 static void
fp_log_target_event(fc_local_port_t * port,char * subclass,la_wwn_t tgt_pwwn,uint32_t port_id)15355 fp_log_target_event(fc_local_port_t *port, char *subclass, la_wwn_t tgt_pwwn,
15356 uint32_t port_id)
15357 {
15358 nvlist_t *attr_list;
15359
15360 if (nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE,
15361 KM_SLEEP) != DDI_SUCCESS) {
15362 goto alloc_failed;
15363 }
15364
15365 if (nvlist_add_uint32(attr_list, "instance",
15366 port->fp_instance) != DDI_SUCCESS) {
15367 goto error;
15368 }
15369
15370 if (nvlist_add_byte_array(attr_list, "port-wwn",
15371 port->fp_service_params.nport_ww_name.raw_wwn,
15372 sizeof (la_wwn_t)) != DDI_SUCCESS) {
15373 goto error;
15374 }
15375
15376 if (nvlist_add_byte_array(attr_list, "target-port-wwn",
15377 tgt_pwwn.raw_wwn, sizeof (la_wwn_t)) != DDI_SUCCESS) {
15378 goto error;
15379 }
15380
15381 if (nvlist_add_uint32(attr_list, "target-port-id",
15382 port_id) != DDI_SUCCESS) {
15383 goto error;
15384 }
15385
15386 (void) ddi_log_sysevent(port->fp_port_dip, DDI_VENDOR_SUNW, EC_SUNFC,
15387 subclass, attr_list, NULL, DDI_SLEEP);
15388
15389 nvlist_free(attr_list);
15390 return;
15391
15392 error:
15393 nvlist_free(attr_list);
15394 alloc_failed:
15395 FP_TRACE(FP_NHEAD1(9, 0), "Unable to send %s event", subclass);
15396 }
15397
15398 static uint32_t
fp_map_remote_port_state(uint32_t rm_state)15399 fp_map_remote_port_state(uint32_t rm_state)
15400 {
15401 switch (rm_state) {
15402 case PORT_DEVICE_LOGGED_IN:
15403 return (FC_HBA_PORTSTATE_ONLINE);
15404 case PORT_DEVICE_VALID:
15405 case PORT_DEVICE_INVALID:
15406 default:
15407 return (FC_HBA_PORTSTATE_UNKNOWN);
15408 }
15409 }
15410