xref: /titanic_50/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_solaris.c (revision 8f23e9fa8abcb5857661066b954e63400d589b65)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at
9  * http://www.opensource.org/licenses/cddl1.txt.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2004-2012 Emulex. All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #define	DEF_ICFG	1
28 
29 #include <emlxs.h>
30 #include <emlxs_version.h>
31 
32 
33 static char emlxs_copyright[] = EMLXS_COPYRIGHT;
34 char emlxs_revision[] = EMLXS_REVISION;
35 char emlxs_version[] = EMLXS_VERSION;
36 char emlxs_name[] = EMLXS_NAME;
37 char emlxs_label[] = EMLXS_LABEL;
38 
39 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
40 EMLXS_MSG_DEF(EMLXS_SOLARIS_C);
41 
42 #ifdef MENLO_SUPPORT
43 static int32_t  emlxs_send_menlo(emlxs_port_t *port, emlxs_buf_t *sbp);
44 #endif /* MENLO_SUPPORT */
45 
46 static void	emlxs_fca_attach(emlxs_hba_t *hba);
47 static void	emlxs_fca_detach(emlxs_hba_t *hba);
48 static void	emlxs_drv_banner(emlxs_hba_t *hba);
49 
50 static int32_t	emlxs_get_props(emlxs_hba_t *hba);
51 static int32_t	emlxs_send_fcp_cmd(emlxs_port_t *port, emlxs_buf_t *sbp,
52 		    uint32_t *pkt_flags);
53 static int32_t	emlxs_send_fct_status(emlxs_port_t *port, emlxs_buf_t *sbp);
54 static int32_t	emlxs_send_fct_abort(emlxs_port_t *port, emlxs_buf_t *sbp);
55 static int32_t	emlxs_send_ip(emlxs_port_t *port, emlxs_buf_t *sbp);
56 static int32_t	emlxs_send_els(emlxs_port_t *port, emlxs_buf_t *sbp);
57 static int32_t	emlxs_send_els_rsp(emlxs_port_t *port, emlxs_buf_t *sbp);
58 static int32_t	emlxs_send_ct(emlxs_port_t *port, emlxs_buf_t *sbp);
59 static int32_t	emlxs_send_ct_rsp(emlxs_port_t *port, emlxs_buf_t *sbp);
60 static uint32_t emlxs_add_instance(int32_t ddiinst);
61 static void	emlxs_iodone(emlxs_buf_t *sbp);
62 static int	emlxs_pm_lower_power(dev_info_t *dip);
63 static int	emlxs_pm_raise_power(dev_info_t *dip);
64 static void	emlxs_driver_remove(dev_info_t *dip, uint32_t init_flag,
65 		    uint32_t failed);
66 static void	emlxs_iodone_server(void *arg1, void *arg2, void *arg3);
67 static uint32_t	emlxs_integrity_check(emlxs_hba_t *hba);
68 static uint32_t	emlxs_test(emlxs_hba_t *hba, uint32_t test_code,
69 		    uint32_t args, uint32_t *arg);
70 
71 #if (EMLXS_MODREV >= EMLXS_MODREV3) && (EMLXS_MODREV <= EMLXS_MODREV4)
72 static void	emlxs_read_vport_prop(emlxs_hba_t *hba);
73 #endif	/* EMLXS_MODREV3 && EMLXS_MODREV4 */
74 
75 static void	emlxs_mode_init_masks(emlxs_hba_t *hba);
76 
77 
78 extern int
79 emlxs_msiid_to_chan(emlxs_hba_t *hba, int msi_id);
80 extern int
81 emlxs_select_msiid(emlxs_hba_t *hba);
82 extern void
83 emlxs_sli4_zero_queue_stat(emlxs_hba_t *hba);
84 
85 /*
86  * Driver Entry Routines.
87  */
88 static int32_t	emlxs_detach(dev_info_t *, ddi_detach_cmd_t);
89 static int32_t	emlxs_attach(dev_info_t *, ddi_attach_cmd_t);
90 static int32_t	emlxs_open(dev_t *, int32_t, int32_t, cred_t *);
91 static int32_t	emlxs_close(dev_t, int32_t, int32_t, cred_t *);
92 static int32_t	emlxs_ioctl(dev_t, int32_t, intptr_t, int32_t,
93 		    cred_t *, int32_t *);
94 static int32_t	emlxs_info(dev_info_t *, ddi_info_cmd_t, void *, void **);
95 
96 
97 /*
98  * FC_AL Transport Functions.
99  */
100 static opaque_t	emlxs_fca_bind_port(dev_info_t *, fc_fca_port_info_t *,
101 		    fc_fca_bind_info_t *);
102 static void	emlxs_fca_unbind_port(opaque_t);
103 static void	emlxs_initialize_pkt(emlxs_port_t *, emlxs_buf_t *);
104 static int32_t	emlxs_fca_get_cap(opaque_t, char *, void *);
105 static int32_t	emlxs_fca_set_cap(opaque_t, char *, void *);
106 static int32_t	emlxs_fca_get_map(opaque_t, fc_lilpmap_t *);
107 static int32_t	emlxs_fca_ub_alloc(opaque_t, uint64_t *, uint32_t,
108 		    uint32_t *, uint32_t);
109 static int32_t	emlxs_fca_ub_free(opaque_t, uint32_t, uint64_t *);
110 
111 static opaque_t	emlxs_fca_get_device(opaque_t, fc_portid_t);
112 static int32_t	emlxs_fca_notify(opaque_t, uint32_t);
113 static void	emlxs_ub_els_reject(emlxs_port_t *, fc_unsol_buf_t *);
114 
115 /*
116  * Driver Internal Functions.
117  */
118 
119 static void	emlxs_poll(emlxs_port_t *, emlxs_buf_t *);
120 static int32_t	emlxs_power(dev_info_t *, int32_t, int32_t);
121 #ifdef EMLXS_I386
122 #ifdef S11
123 static int32_t	emlxs_quiesce(dev_info_t *);
124 #endif /* S11 */
125 #endif /* EMLXS_I386 */
126 static int32_t	emlxs_hba_resume(dev_info_t *);
127 static int32_t	emlxs_hba_suspend(dev_info_t *);
128 static int32_t	emlxs_hba_detach(dev_info_t *);
129 static int32_t	emlxs_hba_attach(dev_info_t *);
130 static void	emlxs_lock_destroy(emlxs_hba_t *);
131 static void	emlxs_lock_init(emlxs_hba_t *);
132 
133 char *emlxs_pm_components[] = {
134 	"NAME=" DRIVER_NAME "000",
135 	"0=Device D3 State",
136 	"1=Device D0 State"
137 };
138 
139 
140 /*
141  * Default emlx dma limits
142  */
143 ddi_dma_lim_t emlxs_dma_lim = {
144 	(uint32_t)0,				/* dlim_addr_lo */
145 	(uint32_t)0xffffffff,			/* dlim_addr_hi */
146 	(uint_t)0x00ffffff,			/* dlim_cntr_max */
147 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dlim_burstsizes */
148 	1,					/* dlim_minxfer */
149 	0x00ffffff				/* dlim_dmaspeed */
150 };
151 
152 /*
153  * Be careful when using these attributes; the defaults listed below are
154  * (almost) the most general case, permitting allocation in almost any
155  * way supported by the LightPulse family.  The sole exception is the
156  * alignment specified as requiring memory allocation on a 4-byte boundary;
157  * the Lightpulse can DMA memory on any byte boundary.
158  *
159  * The LightPulse family currently is limited to 16M transfers;
160  * this restriction affects the dma_attr_count_max and dma_attr_maxxfer fields.
161  */
162 ddi_dma_attr_t emlxs_dma_attr = {
163 	DMA_ATTR_V0,				/* dma_attr_version */
164 	(uint64_t)0,				/* dma_attr_addr_lo */
165 	(uint64_t)0xffffffffffffffff,		/* dma_attr_addr_hi */
166 	(uint64_t)0x00ffffff,			/* dma_attr_count_max */
167 	1,					/* dma_attr_align */
168 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
169 	1,					/* dma_attr_minxfer */
170 	(uint64_t)0x00ffffff,			/* dma_attr_maxxfer */
171 	(uint64_t)0xffffffff,			/* dma_attr_seg */
172 	1,					/* dma_attr_sgllen */
173 	1,					/* dma_attr_granular */
174 	0					/* dma_attr_flags */
175 };
176 
177 ddi_dma_attr_t emlxs_dma_attr_ro = {
178 	DMA_ATTR_V0,				/* dma_attr_version */
179 	(uint64_t)0,				/* dma_attr_addr_lo */
180 	(uint64_t)0xffffffffffffffff,		/* dma_attr_addr_hi */
181 	(uint64_t)0x00ffffff,			/* dma_attr_count_max */
182 	1,					/* dma_attr_align */
183 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
184 	1,					/* dma_attr_minxfer */
185 	(uint64_t)0x00ffffff,			/* dma_attr_maxxfer */
186 	(uint64_t)0xffffffff,			/* dma_attr_seg */
187 	1,					/* dma_attr_sgllen */
188 	1,					/* dma_attr_granular */
189 	DDI_DMA_RELAXED_ORDERING		/* dma_attr_flags */
190 };
191 
192 ddi_dma_attr_t emlxs_dma_attr_1sg = {
193 	DMA_ATTR_V0,				/* dma_attr_version */
194 	(uint64_t)0,				/* dma_attr_addr_lo */
195 	(uint64_t)0xffffffffffffffff,		/* dma_attr_addr_hi */
196 	(uint64_t)0x00ffffff,			/* dma_attr_count_max */
197 	1,					/* dma_attr_align */
198 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
199 	1,					/* dma_attr_minxfer */
200 	(uint64_t)0x00ffffff,			/* dma_attr_maxxfer */
201 	(uint64_t)0xffffffff,			/* dma_attr_seg */
202 	1,					/* dma_attr_sgllen */
203 	1,					/* dma_attr_granular */
204 	0					/* dma_attr_flags */
205 };
206 
207 #if (EMLXS_MODREV >= EMLXS_MODREV3)
208 ddi_dma_attr_t emlxs_dma_attr_fcip_rsp = {
209 	DMA_ATTR_V0,				/* dma_attr_version */
210 	(uint64_t)0,				/* dma_attr_addr_lo */
211 	(uint64_t)0xffffffffffffffff,		/* dma_attr_addr_hi */
212 	(uint64_t)0x00ffffff,			/* dma_attr_count_max */
213 	1,					/* dma_attr_align */
214 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
215 	1,					/* dma_attr_minxfer */
216 	(uint64_t)0x00ffffff,			/* dma_attr_maxxfer */
217 	(uint64_t)0xffffffff,			/* dma_attr_seg */
218 	1,					/* dma_attr_sgllen */
219 	1,					/* dma_attr_granular */
220 	0					/* dma_attr_flags */
221 };
222 #endif	/* >= EMLXS_MODREV3 */
223 
224 /*
225  * DDI access attributes for device
226  */
227 ddi_device_acc_attr_t emlxs_dev_acc_attr = {
228 	DDI_DEVICE_ATTR_V1,	/* devacc_attr_version		*/
229 	DDI_STRUCTURE_LE_ACC,	/* PCI is Little Endian		*/
230 	DDI_STRICTORDER_ACC,	/* devacc_attr_dataorder	*/
231 	DDI_DEFAULT_ACC		/* devacc_attr_access		*/
232 };
233 
234 /*
235  * DDI access attributes for data
236  */
237 ddi_device_acc_attr_t emlxs_data_acc_attr = {
238 	DDI_DEVICE_ATTR_V1,	/* devacc_attr_version		*/
239 	DDI_NEVERSWAP_ACC,	/* don't swap for Data		*/
240 	DDI_STRICTORDER_ACC,	/* devacc_attr_dataorder	*/
241 	DDI_DEFAULT_ACC		/* devacc_attr_access		*/
242 };
243 
244 /*
245  * Fill in the FC Transport structure,
246  * as defined in the Fibre Channel Transport Programmming Guide.
247  */
248 #if (EMLXS_MODREV == EMLXS_MODREV5)
249 	static fc_fca_tran_t emlxs_fca_tran = {
250 	FCTL_FCA_MODREV_5, 		/* fca_version, with SUN NPIV support */
251 	MAX_VPORTS,			/* fca numerb of ports */
252 	sizeof (emlxs_buf_t),		/* fca pkt size */
253 	2048,				/* fca cmd max */
254 	&emlxs_dma_lim,			/* fca dma limits */
255 	0,				/* fca iblock, to be filled in later */
256 	&emlxs_dma_attr,		/* fca dma attributes */
257 	&emlxs_dma_attr_1sg,		/* fca dma fcp cmd attributes */
258 	&emlxs_dma_attr_1sg,		/* fca dma fcp rsp attributes */
259 	&emlxs_dma_attr_ro,		/* fca dma fcp data attributes */
260 	&emlxs_dma_attr_1sg,		/* fca dma fcip cmd attributes */
261 	&emlxs_dma_attr_fcip_rsp,	/* fca dma fcip rsp attributes */
262 	&emlxs_dma_attr_1sg,		/* fca dma fcsm cmd attributes */
263 	&emlxs_dma_attr,		/* fca dma fcsm rsp attributes */
264 	&emlxs_data_acc_attr,   	/* fca access atributes */
265 	0,				/* fca_num_npivports */
266 	{0, 0, 0, 0, 0, 0, 0, 0},	/* Physical port WWPN */
267 	emlxs_fca_bind_port,
268 	emlxs_fca_unbind_port,
269 	emlxs_fca_pkt_init,
270 	emlxs_fca_pkt_uninit,
271 	emlxs_fca_transport,
272 	emlxs_fca_get_cap,
273 	emlxs_fca_set_cap,
274 	emlxs_fca_get_map,
275 	emlxs_fca_transport,
276 	emlxs_fca_ub_alloc,
277 	emlxs_fca_ub_free,
278 	emlxs_fca_ub_release,
279 	emlxs_fca_pkt_abort,
280 	emlxs_fca_reset,
281 	emlxs_fca_port_manage,
282 	emlxs_fca_get_device,
283 	emlxs_fca_notify
284 };
285 #endif	/* EMLXS_MODREV5 */
286 
287 
288 #if (EMLXS_MODREV == EMLXS_MODREV4)
289 static fc_fca_tran_t emlxs_fca_tran = {
290 	FCTL_FCA_MODREV_4,		/* fca_version */
291 	MAX_VPORTS,			/* fca numerb of ports */
292 	sizeof (emlxs_buf_t),		/* fca pkt size */
293 	2048,				/* fca cmd max */
294 	&emlxs_dma_lim,			/* fca dma limits */
295 	0,				/* fca iblock, to be filled in later */
296 	&emlxs_dma_attr,		/* fca dma attributes */
297 	&emlxs_dma_attr_1sg,		/* fca dma fcp cmd attributes */
298 	&emlxs_dma_attr_1sg,		/* fca dma fcp rsp attributes */
299 	&emlxs_dma_attr_ro,		/* fca dma fcp data attributes */
300 	&emlxs_dma_attr_1sg,		/* fca dma fcip cmd attributes */
301 	&emlxs_dma_attr_fcip_rsp,	/* fca dma fcip rsp attributes */
302 	&emlxs_dma_attr_1sg,		/* fca dma fcsm cmd attributes */
303 	&emlxs_dma_attr,		/* fca dma fcsm rsp attributes */
304 	&emlxs_data_acc_attr,		/* fca access atributes */
305 	emlxs_fca_bind_port,
306 	emlxs_fca_unbind_port,
307 	emlxs_fca_pkt_init,
308 	emlxs_fca_pkt_uninit,
309 	emlxs_fca_transport,
310 	emlxs_fca_get_cap,
311 	emlxs_fca_set_cap,
312 	emlxs_fca_get_map,
313 	emlxs_fca_transport,
314 	emlxs_fca_ub_alloc,
315 	emlxs_fca_ub_free,
316 	emlxs_fca_ub_release,
317 	emlxs_fca_pkt_abort,
318 	emlxs_fca_reset,
319 	emlxs_fca_port_manage,
320 	emlxs_fca_get_device,
321 	emlxs_fca_notify
322 };
323 #endif	/* EMLXS_MODEREV4 */
324 
325 
326 #if (EMLXS_MODREV == EMLXS_MODREV3)
327 static fc_fca_tran_t emlxs_fca_tran = {
328 	FCTL_FCA_MODREV_3,		/* fca_version */
329 	MAX_VPORTS,			/* fca numerb of ports */
330 	sizeof (emlxs_buf_t),		/* fca pkt size */
331 	2048,				/* fca cmd max */
332 	&emlxs_dma_lim,			/* fca dma limits */
333 	0,				/* fca iblock, to be filled in later */
334 	&emlxs_dma_attr,		/* fca dma attributes */
335 	&emlxs_dma_attr_1sg,		/* fca dma fcp cmd attributes */
336 	&emlxs_dma_attr_1sg,		/* fca dma fcp rsp attributes */
337 	&emlxs_dma_attr_ro,		/* fca dma fcp data attributes */
338 	&emlxs_dma_attr_1sg,		/* fca dma fcip cmd attributes */
339 	&emlxs_dma_attr_fcip_rsp,	/* fca dma fcip rsp attributes */
340 	&emlxs_dma_attr_1sg,		/* fca dma fcsm cmd attributes */
341 	&emlxs_dma_attr,		/* fca dma fcsm rsp attributes */
342 	&emlxs_data_acc_attr,		/* fca access atributes */
343 	emlxs_fca_bind_port,
344 	emlxs_fca_unbind_port,
345 	emlxs_fca_pkt_init,
346 	emlxs_fca_pkt_uninit,
347 	emlxs_fca_transport,
348 	emlxs_fca_get_cap,
349 	emlxs_fca_set_cap,
350 	emlxs_fca_get_map,
351 	emlxs_fca_transport,
352 	emlxs_fca_ub_alloc,
353 	emlxs_fca_ub_free,
354 	emlxs_fca_ub_release,
355 	emlxs_fca_pkt_abort,
356 	emlxs_fca_reset,
357 	emlxs_fca_port_manage,
358 	emlxs_fca_get_device,
359 	emlxs_fca_notify
360 };
361 #endif	/* EMLXS_MODREV3 */
362 
363 
364 #if (EMLXS_MODREV == EMLXS_MODREV2)
365 static fc_fca_tran_t emlxs_fca_tran = {
366 	FCTL_FCA_MODREV_2,		/* fca_version */
367 	MAX_VPORTS,			/* number of ports */
368 	sizeof (emlxs_buf_t),		/* pkt size */
369 	2048,				/* max cmds */
370 	&emlxs_dma_lim,			/* DMA limits */
371 	0,				/* iblock, to be filled in later */
372 	&emlxs_dma_attr,		/* dma attributes */
373 	&emlxs_data_acc_attr,		/* access atributes */
374 	emlxs_fca_bind_port,
375 	emlxs_fca_unbind_port,
376 	emlxs_fca_pkt_init,
377 	emlxs_fca_pkt_uninit,
378 	emlxs_fca_transport,
379 	emlxs_fca_get_cap,
380 	emlxs_fca_set_cap,
381 	emlxs_fca_get_map,
382 	emlxs_fca_transport,
383 	emlxs_fca_ub_alloc,
384 	emlxs_fca_ub_free,
385 	emlxs_fca_ub_release,
386 	emlxs_fca_pkt_abort,
387 	emlxs_fca_reset,
388 	emlxs_fca_port_manage,
389 	emlxs_fca_get_device,
390 	emlxs_fca_notify
391 };
392 #endif	/* EMLXS_MODREV2 */
393 
394 
395 /*
396  * state pointer which the implementation uses as a place to
397  * hang a set of per-driver structures;
398  *
399  */
400 void		*emlxs_soft_state = NULL;
401 
402 /*
403  * Driver Global variables.
404  */
405 int32_t		emlxs_scsi_reset_delay = 3000;	/* milliseconds */
406 
407 emlxs_device_t  emlxs_device;
408 
409 uint32_t	emlxs_instance[MAX_FC_BRDS];	/* uses emlxs_device.lock */
410 uint32_t	emlxs_instance_count = 0;	/* uses emlxs_device.lock */
411 uint32_t	emlxs_instance_flag = 0;	/* uses emlxs_device.lock */
412 #define	EMLXS_FW_SHOW		0x00000001
413 
414 
415 /*
416  * CB ops vector.  Used for administration only.
417  */
418 static struct cb_ops emlxs_cb_ops = {
419 	emlxs_open,	/* cb_open	*/
420 	emlxs_close,	/* cb_close	*/
421 	nodev,		/* cb_strategy	*/
422 	nodev,		/* cb_print	*/
423 	nodev,		/* cb_dump	*/
424 	nodev,		/* cb_read	*/
425 	nodev,		/* cb_write	*/
426 	emlxs_ioctl,	/* cb_ioctl	*/
427 	nodev,		/* cb_devmap	*/
428 	nodev,		/* cb_mmap	*/
429 	nodev,		/* cb_segmap	*/
430 	nochpoll,	/* cb_chpoll	*/
431 	ddi_prop_op,	/* cb_prop_op	*/
432 	0,		/* cb_stream	*/
433 #ifdef _LP64
434 	D_64BIT | D_HOTPLUG | D_MP | D_NEW,	/* cb_flag */
435 #else
436 	D_HOTPLUG | D_MP | D_NEW,		/* cb_flag */
437 #endif
438 	CB_REV,		/* rev		*/
439 	nodev,		/* cb_aread	*/
440 	nodev		/* cb_awrite	*/
441 };
442 
443 static struct dev_ops emlxs_ops = {
444 	DEVO_REV,	/* rev */
445 	0,	/* refcnt */
446 	emlxs_info,	/* getinfo	*/
447 	nulldev,	/* identify	*/
448 	nulldev,	/* probe	*/
449 	emlxs_attach,	/* attach	*/
450 	emlxs_detach,	/* detach	*/
451 	nodev,		/* reset	*/
452 	&emlxs_cb_ops,	/* devo_cb_ops	*/
453 	NULL,		/* devo_bus_ops */
454 	emlxs_power,	/* power ops	*/
455 #ifdef EMLXS_I386
456 #ifdef S11
457 	emlxs_quiesce,	/* quiesce	*/
458 #endif /* S11 */
459 #endif /* EMLXS_I386 */
460 };
461 
462 #include <sys/modctl.h>
463 extern struct mod_ops mod_driverops;
464 
465 #ifdef SAN_DIAG_SUPPORT
466 extern kmutex_t		emlxs_sd_bucket_mutex;
467 extern sd_bucket_info_t	emlxs_sd_bucket;
468 #endif /* SAN_DIAG_SUPPORT */
469 
470 /*
471  * Module linkage information for the kernel.
472  */
473 static struct modldrv emlxs_modldrv = {
474 	&mod_driverops,	/* module type - driver */
475 	emlxs_name,	/* module name */
476 	&emlxs_ops,	/* driver ops */
477 };
478 
479 
480 /*
481  * Driver module linkage structure
482  */
483 static struct modlinkage emlxs_modlinkage = {
484 	MODREV_1,	/* ml_rev - must be MODREV_1 */
485 	&emlxs_modldrv,	/* ml_linkage */
486 	NULL	/* end of driver linkage */
487 };
488 
489 
490 /* We only need to add entries for non-default return codes. */
491 /* Entries do not need to be in order. */
492 /* Default:	FC_PKT_TRAN_ERROR,	FC_REASON_ABORTED, */
493 /*		FC_EXPLN_NONE,		FC_ACTION_RETRYABLE */
494 
495 emlxs_xlat_err_t emlxs_iostat_tbl[] = {
496 /* 	{f/w code, pkt_state, pkt_reason, 	*/
497 /* 		pkt_expln, pkt_action}		*/
498 
499 	/* 0x00 - Do not remove */
500 	{IOSTAT_SUCCESS, FC_PKT_SUCCESS, FC_REASON_NONE,
501 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
502 
503 	/* 0x01 - Do not remove */
504 	{IOSTAT_FCP_RSP_ERROR, FC_PKT_SUCCESS, FC_REASON_NONE,
505 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
506 
507 	/* 0x02 */
508 	{IOSTAT_REMOTE_STOP, FC_PKT_REMOTE_STOP, FC_REASON_ABTS,
509 		FC_EXPLN_NONE, FC_ACTION_NON_RETRYABLE},
510 
511 	/*
512 	 * This is a default entry.
513 	 * The real codes are written dynamically in emlxs_els.c
514 	 */
515 	/* 0x09 */
516 	{IOSTAT_LS_RJT, FC_PKT_LS_RJT, FC_REASON_CMD_UNABLE,
517 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
518 
519 	/* Special error code */
520 	/* 0x10 */
521 	{IOSTAT_DATA_OVERRUN, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN,
522 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
523 
524 	/* Special error code */
525 	/* 0x11 */
526 	{IOSTAT_DATA_UNDERRUN, FC_PKT_TRAN_ERROR, FC_REASON_ABORTED,
527 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
528 
529 	/* Special error code */
530 	/* 0x12 */
531 	{IOSTAT_RSP_INVALID, FC_PKT_TRAN_ERROR, FC_REASON_ABORTED,
532 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
533 
534 	/* CLASS 2 only */
535 	/* 0x04 */
536 	{IOSTAT_NPORT_RJT, FC_PKT_NPORT_RJT, FC_REASON_PROTOCOL_ERROR,
537 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
538 
539 	/* CLASS 2 only */
540 	/* 0x05 */
541 	{IOSTAT_FABRIC_RJT, FC_PKT_FABRIC_RJT, FC_REASON_PROTOCOL_ERROR,
542 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
543 
544 	/* CLASS 2 only */
545 	/* 0x06 */
546 	{IOSTAT_NPORT_BSY, FC_PKT_NPORT_BSY, FC_REASON_PHYSICAL_BUSY,
547 		FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY},
548 
549 	/* CLASS 2 only */
550 	/* 0x07 */
551 	{IOSTAT_FABRIC_BSY, FC_PKT_FABRIC_BSY, FC_REASON_FABRIC_BSY,
552 		FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY},
553 };
554 
555 #define	IOSTAT_MAX (sizeof (emlxs_iostat_tbl)/sizeof (emlxs_xlat_err_t))
556 
557 
558 /* We only need to add entries for non-default return codes. */
559 /* Entries do not need to be in order. */
560 /* Default:	FC_PKT_TRAN_ERROR,	FC_REASON_ABORTED, */
561 /*		FC_EXPLN_NONE,		FC_ACTION_RETRYABLE} */
562 
563 emlxs_xlat_err_t emlxs_ioerr_tbl[] = {
564 /*	{f/w code, pkt_state, pkt_reason,	*/
565 /*		pkt_expln, pkt_action}		*/
566 
567 	/* 0x01 */
568 	{IOERR_MISSING_CONTINUE, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN,
569 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
570 
571 	/* 0x02 */
572 	{IOERR_SEQUENCE_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT,
573 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
574 
575 	/* 0x04 */
576 	{IOERR_INVALID_RPI, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
577 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
578 
579 	/* 0x05 */
580 	{IOERR_NO_XRI, FC_PKT_LOCAL_RJT, FC_REASON_XCHG_DROPPED,
581 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
582 
583 	/* 0x06 */
584 	{IOERR_ILLEGAL_COMMAND,	FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_REQ,
585 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
586 
587 	/* 0x07 */
588 	{IOERR_XCHG_DROPPED, FC_PKT_LOCAL_RJT,	FC_REASON_XCHG_DROPPED,
589 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
590 
591 	/* 0x08 */
592 	{IOERR_ILLEGAL_FIELD, FC_PKT_LOCAL_RJT,	FC_REASON_ILLEGAL_REQ,
593 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
594 
595 	/* 0x0B */
596 	{IOERR_RCV_BUFFER_WAITING, FC_PKT_LOCAL_RJT, FC_REASON_NOMEM,
597 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
598 
599 	/* 0x0D */
600 	{IOERR_TX_DMA_FAILED, FC_PKT_LOCAL_RJT,	FC_REASON_DMA_ERROR,
601 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
602 
603 	/* 0x0E */
604 	{IOERR_RX_DMA_FAILED, FC_PKT_LOCAL_RJT,	FC_REASON_DMA_ERROR,
605 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
606 
607 	/* 0x0F */
608 	{IOERR_ILLEGAL_FRAME, FC_PKT_LOCAL_RJT,	FC_REASON_ILLEGAL_FRAME,
609 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
610 
611 	/* 0x11 */
612 	{IOERR_NO_RESOURCES, FC_PKT_LOCAL_RJT,	FC_REASON_NOMEM,
613 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
614 
615 	/* 0x13 */
616 	{IOERR_ILLEGAL_LENGTH, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_LENGTH,
617 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
618 
619 	/* 0x14 */
620 	{IOERR_UNSUPPORTED_FEATURE, FC_PKT_LOCAL_RJT, FC_REASON_UNSUPPORTED,
621 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
622 
623 	/* 0x15 */
624 	{IOERR_ABORT_IN_PROGRESS, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED,
625 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
626 
627 	/* 0x16 */
628 	{IOERR_ABORT_REQUESTED, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED,
629 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
630 
631 	/* 0x17 */
632 	{IOERR_RCV_BUFFER_TIMEOUT, FC_PKT_LOCAL_RJT, FC_REASON_RX_BUF_TIMEOUT,
633 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
634 
635 	/* 0x18 */
636 	{IOERR_LOOP_OPEN_FAILURE, FC_PKT_LOCAL_RJT, FC_REASON_FCAL_OPN_FAIL,
637 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
638 
639 	/* 0x1A */
640 	{IOERR_LINK_DOWN, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
641 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
642 
643 	/* 0x21 */
644 	{IOERR_BAD_HOST_ADDRESS, FC_PKT_LOCAL_RJT, FC_REASON_BAD_SID,
645 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
646 
647 	/* Occurs at link down */
648 	/* 0x28 */
649 	{IOERR_BUFFER_SHORTAGE, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
650 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
651 
652 	/* 0xF0 */
653 	{IOERR_ABORT_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT,
654 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
655 };
656 
657 #define	IOERR_MAX    (sizeof (emlxs_ioerr_tbl)/sizeof (emlxs_xlat_err_t))
658 
659 
660 
661 emlxs_table_t emlxs_error_table[] = {
662 	{IOERR_SUCCESS, "No error."},
663 	{IOERR_MISSING_CONTINUE, "Missing continue."},
664 	{IOERR_SEQUENCE_TIMEOUT, "Sequence timeout."},
665 	{IOERR_INTERNAL_ERROR, "Internal error."},
666 	{IOERR_INVALID_RPI, "Invalid RPI."},
667 	{IOERR_NO_XRI, "No XRI."},
668 	{IOERR_ILLEGAL_COMMAND, "Illegal command."},
669 	{IOERR_XCHG_DROPPED, "Exchange dropped."},
670 	{IOERR_ILLEGAL_FIELD, "Illegal field."},
671 	{IOERR_RCV_BUFFER_WAITING, "RX buffer waiting."},
672 	{IOERR_TX_DMA_FAILED, "TX DMA failed."},
673 	{IOERR_RX_DMA_FAILED, "RX DMA failed."},
674 	{IOERR_ILLEGAL_FRAME, "Illegal frame."},
675 	{IOERR_NO_RESOURCES, "No resources."},
676 	{IOERR_ILLEGAL_LENGTH, "Illegal length."},
677 	{IOERR_UNSUPPORTED_FEATURE, "Unsupported feature."},
678 	{IOERR_ABORT_IN_PROGRESS, "Abort in progess."},
679 	{IOERR_ABORT_REQUESTED, "Abort requested."},
680 	{IOERR_RCV_BUFFER_TIMEOUT, "RX buffer timeout."},
681 	{IOERR_LOOP_OPEN_FAILURE, "Loop open failed."},
682 	{IOERR_RING_RESET, "Ring reset."},
683 	{IOERR_LINK_DOWN, "Link down."},
684 	{IOERR_CORRUPTED_DATA, "Corrupted data."},
685 	{IOERR_CORRUPTED_RPI, "Corrupted RPI."},
686 	{IOERR_OUT_OF_ORDER_DATA, "Out-of-order data."},
687 	{IOERR_OUT_OF_ORDER_ACK, "Out-of-order ack."},
688 	{IOERR_DUP_FRAME, "Duplicate frame."},
689 	{IOERR_LINK_CONTROL_FRAME, "Link control frame."},
690 	{IOERR_BAD_HOST_ADDRESS, "Bad host address."},
691 	{IOERR_RCV_HDRBUF_WAITING, "RX header buffer waiting."},
692 	{IOERR_MISSING_HDR_BUFFER, "Missing header buffer."},
693 	{IOERR_MSEQ_CHAIN_CORRUPTED, "MSEQ chain corrupted."},
694 	{IOERR_ABORTMULT_REQUESTED, "Abort multiple requested."},
695 	{IOERR_BUFFER_SHORTAGE, "Buffer shortage."},
696 	{IOERR_XRIBUF_WAITING, "XRI buffer shortage"},
697 	{IOERR_XRIBUF_MISSING, "XRI buffer missing"},
698 	{IOERR_ROFFSET_INVAL, "Relative offset invalid."},
699 	{IOERR_ROFFSET_MISSING, "Relative offset missing."},
700 	{IOERR_INSUF_BUFFER, "Buffer too small."},
701 	{IOERR_MISSING_SI, "ELS frame missing SI"},
702 	{IOERR_MISSING_ES, "Exhausted burst without ES"},
703 	{IOERR_INCOMP_XFER, "Transfer incomplete."},
704 	{IOERR_ABORT_TIMEOUT, "Abort timeout."}
705 
706 };	/* emlxs_error_table */
707 
708 
709 emlxs_table_t emlxs_state_table[] = {
710 	{IOSTAT_SUCCESS, "Success."},
711 	{IOSTAT_FCP_RSP_ERROR, "FCP response error."},
712 	{IOSTAT_REMOTE_STOP, "Remote stop."},
713 	{IOSTAT_LOCAL_REJECT, "Local reject."},
714 	{IOSTAT_NPORT_RJT, "NPort reject."},
715 	{IOSTAT_FABRIC_RJT, "Fabric reject."},
716 	{IOSTAT_NPORT_BSY, "Nport busy."},
717 	{IOSTAT_FABRIC_BSY, "Fabric busy."},
718 	{IOSTAT_INTERMED_RSP, "Intermediate response."},
719 	{IOSTAT_LS_RJT, "LS reject."},
720 	{IOSTAT_CMD_REJECT,		"Cmd reject."},
721 	{IOSTAT_FCP_TGT_LENCHK, "TGT length check."},
722 	{IOSTAT_NEED_BUFF_ENTRY, "Need buffer entry."},
723 	{IOSTAT_DATA_UNDERRUN, "Data underrun."},
724 	{IOSTAT_DATA_OVERRUN,  "Data overrun."},
725 	{IOSTAT_RSP_INVALID,  "Response Invalid."},
726 
727 };	/* emlxs_state_table */
728 
729 
730 #ifdef MENLO_SUPPORT
731 emlxs_table_t emlxs_menlo_cmd_table[] = {
732 	{MENLO_CMD_INITIALIZE,		"MENLO_INIT"},
733 	{MENLO_CMD_FW_DOWNLOAD,		"MENLO_FW_DOWNLOAD"},
734 	{MENLO_CMD_READ_MEMORY,		"MENLO_READ_MEM"},
735 	{MENLO_CMD_WRITE_MEMORY,	"MENLO_WRITE_MEM"},
736 	{MENLO_CMD_FTE_INSERT,		"MENLO_FTE_INSERT"},
737 	{MENLO_CMD_FTE_DELETE,		"MENLO_FTE_DELETE"},
738 
739 	{MENLO_CMD_GET_INIT,		"MENLO_GET_INIT"},
740 	{MENLO_CMD_GET_CONFIG,		"MENLO_GET_CONFIG"},
741 	{MENLO_CMD_GET_PORT_STATS,	"MENLO_GET_PORT_STATS"},
742 	{MENLO_CMD_GET_LIF_STATS,	"MENLO_GET_LIF_STATS"},
743 	{MENLO_CMD_GET_ASIC_STATS,	"MENLO_GET_ASIC_STATS"},
744 	{MENLO_CMD_GET_LOG_CONFIG,	"MENLO_GET_LOG_CFG"},
745 	{MENLO_CMD_GET_LOG_DATA,	"MENLO_GET_LOG_DATA"},
746 	{MENLO_CMD_GET_PANIC_LOG,	"MENLO_GET_PANIC_LOG"},
747 	{MENLO_CMD_GET_LB_MODE,		"MENLO_GET_LB_MODE"},
748 
749 	{MENLO_CMD_SET_PAUSE,		"MENLO_SET_PAUSE"},
750 	{MENLO_CMD_SET_FCOE_COS,	"MENLO_SET_FCOE_COS"},
751 	{MENLO_CMD_SET_UIF_PORT_TYPE,	"MENLO_SET_UIF_TYPE"},
752 
753 	{MENLO_CMD_DIAGNOSTICS,		"MENLO_DIAGNOSTICS"},
754 	{MENLO_CMD_LOOPBACK,		"MENLO_LOOPBACK"},
755 
756 	{MENLO_CMD_RESET,		"MENLO_RESET"},
757 	{MENLO_CMD_SET_MODE,		"MENLO_SET_MODE"}
758 
759 };	/* emlxs_menlo_cmd_table */
760 
761 emlxs_table_t emlxs_menlo_rsp_table[] = {
762 	{MENLO_RSP_SUCCESS,		"SUCCESS"},
763 	{MENLO_ERR_FAILED,		"FAILED"},
764 	{MENLO_ERR_INVALID_CMD,		"INVALID_CMD"},
765 	{MENLO_ERR_INVALID_CREDIT,	"INVALID_CREDIT"},
766 	{MENLO_ERR_INVALID_SIZE,	"INVALID_SIZE"},
767 	{MENLO_ERR_INVALID_ADDRESS,	"INVALID_ADDRESS"},
768 	{MENLO_ERR_INVALID_CONTEXT,	"INVALID_CONTEXT"},
769 	{MENLO_ERR_INVALID_LENGTH,	"INVALID_LENGTH"},
770 	{MENLO_ERR_INVALID_TYPE,	"INVALID_TYPE"},
771 	{MENLO_ERR_INVALID_DATA,	"INVALID_DATA"},
772 	{MENLO_ERR_INVALID_VALUE1,	"INVALID_VALUE1"},
773 	{MENLO_ERR_INVALID_VALUE2,	"INVALID_VALUE2"},
774 	{MENLO_ERR_INVALID_MASK,	"INVALID_MASK"},
775 	{MENLO_ERR_CHECKSUM,		"CHECKSUM_ERROR"},
776 	{MENLO_ERR_UNKNOWN_FCID,	"UNKNOWN_FCID"},
777 	{MENLO_ERR_UNKNOWN_WWN,		"UNKNOWN_WWN"},
778 	{MENLO_ERR_BUSY,		"BUSY"},
779 
780 };	/* emlxs_menlo_rsp_table */
781 
782 #endif /* MENLO_SUPPORT */
783 
784 
785 emlxs_table_t emlxs_mscmd_table[] = {
786 	{SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
787 	{SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
788 	{MS_GTIN, "MS_GTIN"},
789 	{MS_GIEL, "MS_GIEL"},
790 	{MS_GIET, "MS_GIET"},
791 	{MS_GDID, "MS_GDID"},
792 	{MS_GMID, "MS_GMID"},
793 	{MS_GFN, "MS_GFN"},
794 	{MS_GIELN, "MS_GIELN"},
795 	{MS_GMAL, "MS_GMAL"},
796 	{MS_GIEIL, "MS_GIEIL"},
797 	{MS_GPL, "MS_GPL"},
798 	{MS_GPT, "MS_GPT"},
799 	{MS_GPPN, "MS_GPPN"},
800 	{MS_GAPNL, "MS_GAPNL"},
801 	{MS_GPS, "MS_GPS"},
802 	{MS_GPSC, "MS_GPSC"},
803 	{MS_GATIN, "MS_GATIN"},
804 	{MS_GSES, "MS_GSES"},
805 	{MS_GPLNL, "MS_GPLNL"},
806 	{MS_GPLT, "MS_GPLT"},
807 	{MS_GPLML, "MS_GPLML"},
808 	{MS_GPAB, "MS_GPAB"},
809 	{MS_GNPL, "MS_GNPL"},
810 	{MS_GPNL, "MS_GPNL"},
811 	{MS_GPFCP, "MS_GPFCP"},
812 	{MS_GPLI, "MS_GPLI"},
813 	{MS_GNID, "MS_GNID"},
814 	{MS_RIELN, "MS_RIELN"},
815 	{MS_RPL, "MS_RPL"},
816 	{MS_RPLN, "MS_RPLN"},
817 	{MS_RPLT, "MS_RPLT"},
818 	{MS_RPLM, "MS_RPLM"},
819 	{MS_RPAB, "MS_RPAB"},
820 	{MS_RPFCP, "MS_RPFCP"},
821 	{MS_RPLI, "MS_RPLI"},
822 	{MS_DPL, "MS_DPL"},
823 	{MS_DPLN, "MS_DPLN"},
824 	{MS_DPLM, "MS_DPLM"},
825 	{MS_DPLML, "MS_DPLML"},
826 	{MS_DPLI, "MS_DPLI"},
827 	{MS_DPAB, "MS_DPAB"},
828 	{MS_DPALL, "MS_DPALL"}
829 
830 };	/* emlxs_mscmd_table */
831 
832 
833 emlxs_table_t emlxs_ctcmd_table[] = {
834 	{SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
835 	{SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
836 	{SLI_CTNS_GA_NXT, "GA_NXT"},
837 	{SLI_CTNS_GPN_ID, "GPN_ID"},
838 	{SLI_CTNS_GNN_ID, "GNN_ID"},
839 	{SLI_CTNS_GCS_ID, "GCS_ID"},
840 	{SLI_CTNS_GFT_ID, "GFT_ID"},
841 	{SLI_CTNS_GSPN_ID, "GSPN_ID"},
842 	{SLI_CTNS_GPT_ID, "GPT_ID"},
843 	{SLI_CTNS_GID_PN, "GID_PN"},
844 	{SLI_CTNS_GID_NN, "GID_NN"},
845 	{SLI_CTNS_GIP_NN, "GIP_NN"},
846 	{SLI_CTNS_GIPA_NN, "GIPA_NN"},
847 	{SLI_CTNS_GSNN_NN, "GSNN_NN"},
848 	{SLI_CTNS_GNN_IP, "GNN_IP"},
849 	{SLI_CTNS_GIPA_IP, "GIPA_IP"},
850 	{SLI_CTNS_GID_FT, "GID_FT"},
851 	{SLI_CTNS_GID_PT, "GID_PT"},
852 	{SLI_CTNS_RPN_ID, "RPN_ID"},
853 	{SLI_CTNS_RNN_ID, "RNN_ID"},
854 	{SLI_CTNS_RCS_ID, "RCS_ID"},
855 	{SLI_CTNS_RFT_ID, "RFT_ID"},
856 	{SLI_CTNS_RSPN_ID, "RSPN_ID"},
857 	{SLI_CTNS_RPT_ID, "RPT_ID"},
858 	{SLI_CTNS_RIP_NN, "RIP_NN"},
859 	{SLI_CTNS_RIPA_NN, "RIPA_NN"},
860 	{SLI_CTNS_RSNN_NN, "RSNN_NN"},
861 	{SLI_CTNS_DA_ID, "DA_ID"},
862 	{SLI_CT_LOOPBACK, "LOOPBACK"} /* Driver special */
863 
864 };	/* emlxs_ctcmd_table */
865 
866 
867 
868 emlxs_table_t emlxs_rmcmd_table[] = {
869 	{SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
870 	{SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
871 	{CT_OP_GSAT, "RM_GSAT"},
872 	{CT_OP_GHAT, "RM_GHAT"},
873 	{CT_OP_GPAT, "RM_GPAT"},
874 	{CT_OP_GDAT, "RM_GDAT"},
875 	{CT_OP_GPST, "RM_GPST"},
876 	{CT_OP_GDP, "RM_GDP"},
877 	{CT_OP_GDPG, "RM_GDPG"},
878 	{CT_OP_GEPS, "RM_GEPS"},
879 	{CT_OP_GLAT, "RM_GLAT"},
880 	{CT_OP_SSAT, "RM_SSAT"},
881 	{CT_OP_SHAT, "RM_SHAT"},
882 	{CT_OP_SPAT, "RM_SPAT"},
883 	{CT_OP_SDAT, "RM_SDAT"},
884 	{CT_OP_SDP, "RM_SDP"},
885 	{CT_OP_SBBS, "RM_SBBS"},
886 	{CT_OP_RPST, "RM_RPST"},
887 	{CT_OP_VFW, "RM_VFW"},
888 	{CT_OP_DFW, "RM_DFW"},
889 	{CT_OP_RES, "RM_RES"},
890 	{CT_OP_RHD, "RM_RHD"},
891 	{CT_OP_UFW, "RM_UFW"},
892 	{CT_OP_RDP, "RM_RDP"},
893 	{CT_OP_GHDR, "RM_GHDR"},
894 	{CT_OP_CHD, "RM_CHD"},
895 	{CT_OP_SSR, "RM_SSR"},
896 	{CT_OP_RSAT, "RM_RSAT"},
897 	{CT_OP_WSAT, "RM_WSAT"},
898 	{CT_OP_RSAH, "RM_RSAH"},
899 	{CT_OP_WSAH, "RM_WSAH"},
900 	{CT_OP_RACT, "RM_RACT"},
901 	{CT_OP_WACT, "RM_WACT"},
902 	{CT_OP_RKT, "RM_RKT"},
903 	{CT_OP_WKT, "RM_WKT"},
904 	{CT_OP_SSC, "RM_SSC"},
905 	{CT_OP_QHBA, "RM_QHBA"},
906 	{CT_OP_GST, "RM_GST"},
907 	{CT_OP_GFTM, "RM_GFTM"},
908 	{CT_OP_SRL, "RM_SRL"},
909 	{CT_OP_SI, "RM_SI"},
910 	{CT_OP_SRC, "RM_SRC"},
911 	{CT_OP_GPB, "RM_GPB"},
912 	{CT_OP_SPB, "RM_SPB"},
913 	{CT_OP_RPB, "RM_RPB"},
914 	{CT_OP_RAPB, "RM_RAPB"},
915 	{CT_OP_GBC, "RM_GBC"},
916 	{CT_OP_GBS, "RM_GBS"},
917 	{CT_OP_SBS, "RM_SBS"},
918 	{CT_OP_GANI, "RM_GANI"},
919 	{CT_OP_GRV, "RM_GRV"},
920 	{CT_OP_GAPBS, "RM_GAPBS"},
921 	{CT_OP_APBC, "RM_APBC"},
922 	{CT_OP_GDT, "RM_GDT"},
923 	{CT_OP_GDLMI, "RM_GDLMI"},
924 	{CT_OP_GANA, "RM_GANA"},
925 	{CT_OP_GDLV, "RM_GDLV"},
926 	{CT_OP_GWUP, "RM_GWUP"},
927 	{CT_OP_GLM, "RM_GLM"},
928 	{CT_OP_GABS, "RM_GABS"},
929 	{CT_OP_SABS, "RM_SABS"},
930 	{CT_OP_RPR, "RM_RPR"},
931 	{SLI_CT_LOOPBACK, "LOOPBACK"} /* Driver special */
932 
933 };	/* emlxs_rmcmd_table */
934 
935 
936 emlxs_table_t emlxs_elscmd_table[] = {
937 	{ELS_CMD_ACC, "ACC"},
938 	{ELS_CMD_LS_RJT, "LS_RJT"},
939 	{ELS_CMD_PLOGI, "PLOGI"},
940 	{ELS_CMD_FLOGI, "FLOGI"},
941 	{ELS_CMD_LOGO, "LOGO"},
942 	{ELS_CMD_ABTX, "ABTX"},
943 	{ELS_CMD_RCS, "RCS"},
944 	{ELS_CMD_RES, "RES"},
945 	{ELS_CMD_RSS, "RSS"},
946 	{ELS_CMD_RSI, "RSI"},
947 	{ELS_CMD_ESTS, "ESTS"},
948 	{ELS_CMD_ESTC, "ESTC"},
949 	{ELS_CMD_ADVC, "ADVC"},
950 	{ELS_CMD_RTV, "RTV"},
951 	{ELS_CMD_RLS, "RLS"},
952 	{ELS_CMD_ECHO, "ECHO"},
953 	{ELS_CMD_TEST, "TEST"},
954 	{ELS_CMD_RRQ, "RRQ"},
955 	{ELS_CMD_REC, "REC"},
956 	{ELS_CMD_PRLI, "PRLI"},
957 	{ELS_CMD_PRLO, "PRLO"},
958 	{ELS_CMD_SCN, "SCN"},
959 	{ELS_CMD_TPLS, "TPLS"},
960 	{ELS_CMD_GPRLO, "GPRLO"},
961 	{ELS_CMD_GAID, "GAID"},
962 	{ELS_CMD_FACT, "FACT"},
963 	{ELS_CMD_FDACT, "FDACT"},
964 	{ELS_CMD_NACT, "NACT"},
965 	{ELS_CMD_NDACT, "NDACT"},
966 	{ELS_CMD_QoSR, "QoSR"},
967 	{ELS_CMD_RVCS, "RVCS"},
968 	{ELS_CMD_PDISC, "PDISC"},
969 	{ELS_CMD_FDISC, "FDISC"},
970 	{ELS_CMD_ADISC, "ADISC"},
971 	{ELS_CMD_FARP, "FARP"},
972 	{ELS_CMD_FARPR, "FARPR"},
973 	{ELS_CMD_FAN, "FAN"},
974 	{ELS_CMD_RSCN, "RSCN"},
975 	{ELS_CMD_SCR, "SCR"},
976 	{ELS_CMD_LINIT, "LINIT"},
977 	{ELS_CMD_RNID, "RNID"},
978 	{ELS_CMD_AUTH, "AUTH"}
979 
980 };	/* emlxs_elscmd_table */
981 
982 
983 emlxs_table_t emlxs_mode_table[] = {
984 	{MODE_NONE, "NONE"},
985 	{MODE_INITIATOR, "INITIATOR"},
986 	{MODE_TARGET, "TARGET"},
987 	{MODE_ALL, "INITIATOR | TARGET"}
988 };	/* emlxs_mode_table */
989 
990 /*
991  *
992  *	Device Driver Entry Routines
993  *
994  */
995 
996 #ifdef MODSYM_SUPPORT
997 static void emlxs_fca_modclose();
998 static int  emlxs_fca_modopen();
999 emlxs_modsym_t emlxs_modsym;	/* uses emlxs_device.lock */
1000 
1001 static int
emlxs_fca_modopen()1002 emlxs_fca_modopen()
1003 {
1004 	int err;
1005 
1006 	if (emlxs_modsym.mod_fctl) {
1007 		return (0);
1008 	}
1009 
1010 	/* Leadville (fctl) */
1011 	err = 0;
1012 	emlxs_modsym.mod_fctl =
1013 	    ddi_modopen("misc/fctl", KRTLD_MODE_FIRST, &err);
1014 	if (!emlxs_modsym.mod_fctl) {
1015 		cmn_err(CE_WARN,
1016 		    "?%s: misc/fctl: ddi_modopen misc/fctl failed: error=%d",
1017 		    DRIVER_NAME, err);
1018 
1019 		goto failed;
1020 	}
1021 
1022 	err = 0;
1023 	/* Check if the fctl fc_fca_attach is present */
1024 	emlxs_modsym.fc_fca_attach =
1025 	    (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_attach",
1026 	    &err);
1027 	if ((void *)emlxs_modsym.fc_fca_attach == NULL) {
1028 		cmn_err(CE_WARN,
1029 		    "?%s: misc/fctl: fc_fca_attach not present", DRIVER_NAME);
1030 		goto failed;
1031 	}
1032 
1033 	err = 0;
1034 	/* Check if the fctl fc_fca_detach is present */
1035 	emlxs_modsym.fc_fca_detach =
1036 	    (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_detach",
1037 	    &err);
1038 	if ((void *)emlxs_modsym.fc_fca_detach == NULL) {
1039 		cmn_err(CE_WARN,
1040 		    "?%s: misc/fctl: fc_fca_detach not present", DRIVER_NAME);
1041 		goto failed;
1042 	}
1043 
1044 	err = 0;
1045 	/* Check if the fctl fc_fca_init is present */
1046 	emlxs_modsym.fc_fca_init =
1047 	    (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_init", &err);
1048 	if ((void *)emlxs_modsym.fc_fca_init == NULL) {
1049 		cmn_err(CE_WARN,
1050 		    "?%s: misc/fctl: fc_fca_init not present", DRIVER_NAME);
1051 		goto failed;
1052 	}
1053 
1054 	return (0);
1055 
1056 failed:
1057 
1058 	emlxs_fca_modclose();
1059 
1060 	return (1);
1061 
1062 
1063 } /* emlxs_fca_modopen() */
1064 
1065 
1066 static void
emlxs_fca_modclose()1067 emlxs_fca_modclose()
1068 {
1069 	if (emlxs_modsym.mod_fctl) {
1070 		(void) ddi_modclose(emlxs_modsym.mod_fctl);
1071 		emlxs_modsym.mod_fctl = 0;
1072 	}
1073 
1074 	emlxs_modsym.fc_fca_attach = NULL;
1075 	emlxs_modsym.fc_fca_detach = NULL;
1076 	emlxs_modsym.fc_fca_init   = NULL;
1077 
1078 	return;
1079 
1080 } /* emlxs_fca_modclose() */
1081 
1082 #endif /* MODSYM_SUPPORT */
1083 
1084 
1085 
1086 /*
1087  * Global driver initialization, called once when driver is loaded
1088  */
1089 int
_init(void)1090 _init(void)
1091 {
1092 	int ret;
1093 
1094 	/*
1095 	 * First init call for this driver,
1096 	 * so initialize the emlxs_dev_ctl structure.
1097 	 */
1098 	bzero(&emlxs_device, sizeof (emlxs_device));
1099 
1100 #ifdef MODSYM_SUPPORT
1101 	bzero(&emlxs_modsym, sizeof (emlxs_modsym_t));
1102 #endif /* MODSYM_SUPPORT */
1103 
1104 	mutex_init(&emlxs_device.lock, NULL, MUTEX_DRIVER, NULL);
1105 
1106 	(void) drv_getparm(LBOLT, &emlxs_device.log_timestamp);
1107 	emlxs_device.drv_timestamp = ddi_get_time();
1108 
1109 	for (ret = 0; ret < MAX_FC_BRDS; ret++) {
1110 		emlxs_instance[ret] = (uint32_t)-1;
1111 	}
1112 
1113 	/*
1114 	 * Provide for one ddiinst of the emlxs_dev_ctl structure
1115 	 * for each possible board in the system.
1116 	 */
1117 	if ((ret = ddi_soft_state_init(&emlxs_soft_state,
1118 	    sizeof (emlxs_hba_t), MAX_FC_BRDS)) != 0) {
1119 		cmn_err(CE_WARN,
1120 		    "?%s: _init: ddi_soft_state_init failed. rval=%x",
1121 		    DRIVER_NAME, ret);
1122 
1123 		return (ret);
1124 	}
1125 
1126 #ifdef MODSYM_SUPPORT
1127 	/* Open SFS */
1128 	(void) emlxs_fca_modopen();
1129 #endif /* MODSYM_SUPPORT */
1130 
1131 	/* Setup devops for SFS */
1132 	MODSYM(fc_fca_init)(&emlxs_ops);
1133 
1134 	if ((ret = mod_install(&emlxs_modlinkage)) != 0) {
1135 		(void) ddi_soft_state_fini(&emlxs_soft_state);
1136 #ifdef MODSYM_SUPPORT
1137 		/* Close SFS */
1138 		emlxs_fca_modclose();
1139 #endif /* MODSYM_SUPPORT */
1140 
1141 		return (ret);
1142 	}
1143 
1144 #ifdef SAN_DIAG_SUPPORT
1145 	mutex_init(&emlxs_sd_bucket_mutex, NULL, MUTEX_DRIVER, NULL);
1146 #endif /* SAN_DIAG_SUPPORT */
1147 
1148 	return (ret);
1149 
1150 } /* _init() */
1151 
1152 
1153 /*
1154  * Called when driver is unloaded.
1155  */
1156 int
_fini(void)1157 _fini(void)
1158 {
1159 	int ret;
1160 
1161 	if ((ret = mod_remove(&emlxs_modlinkage)) != 0) {
1162 		return (ret);
1163 	}
1164 #ifdef MODSYM_SUPPORT
1165 	/* Close SFS */
1166 	emlxs_fca_modclose();
1167 #endif /* MODSYM_SUPPORT */
1168 
1169 	/*
1170 	 * Destroy the soft state structure
1171 	 */
1172 	(void) ddi_soft_state_fini(&emlxs_soft_state);
1173 
1174 	/* Destroy the global device lock */
1175 	mutex_destroy(&emlxs_device.lock);
1176 
1177 #ifdef SAN_DIAG_SUPPORT
1178 	mutex_destroy(&emlxs_sd_bucket_mutex);
1179 #endif /* SAN_DIAG_SUPPORT */
1180 
1181 	return (ret);
1182 
1183 } /* _fini() */
1184 
1185 
1186 
1187 int
_info(struct modinfo * modinfop)1188 _info(struct modinfo *modinfop)
1189 {
1190 
1191 	return (mod_info(&emlxs_modlinkage, modinfop));
1192 
1193 } /* _info() */
1194 
1195 
1196 /*
1197  * Attach an ddiinst of an emlx host adapter.
1198  * Allocate data structures, initialize the adapter and we're ready to fly.
1199  */
1200 static int
emlxs_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)1201 emlxs_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1202 {
1203 	emlxs_hba_t *hba;
1204 	int ddiinst;
1205 	int emlxinst;
1206 	int rval;
1207 
1208 	switch (cmd) {
1209 	case DDI_ATTACH:
1210 		/* If successful this will set EMLXS_PM_IN_ATTACH */
1211 		rval = emlxs_hba_attach(dip);
1212 		break;
1213 
1214 	case DDI_RESUME:
1215 		/* This will resume the driver */
1216 		rval = emlxs_hba_resume(dip);
1217 		break;
1218 
1219 	default:
1220 		rval = DDI_FAILURE;
1221 	}
1222 
1223 	if (rval == DDI_SUCCESS) {
1224 		ddiinst = ddi_get_instance(dip);
1225 		emlxinst = emlxs_get_instance(ddiinst);
1226 		hba = emlxs_device.hba[emlxinst];
1227 
1228 		if ((hba != NULL) && (hba != (emlxs_hba_t *)-1)) {
1229 
1230 			/* Enable driver dump feature */
1231 			mutex_enter(&EMLXS_PORT_LOCK);
1232 			hba->flag |= FC_DUMP_SAFE;
1233 			mutex_exit(&EMLXS_PORT_LOCK);
1234 		}
1235 	}
1236 
1237 	return (rval);
1238 
1239 } /* emlxs_attach() */
1240 
1241 
1242 /*
1243  * Detach/prepare driver to unload (see detach(9E)).
1244  */
1245 static int
emlxs_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)1246 emlxs_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1247 {
1248 	emlxs_hba_t *hba;
1249 	emlxs_port_t *port;
1250 	int ddiinst;
1251 	int emlxinst;
1252 	int rval;
1253 
1254 	ddiinst = ddi_get_instance(dip);
1255 	emlxinst = emlxs_get_instance(ddiinst);
1256 	hba = emlxs_device.hba[emlxinst];
1257 
1258 	if (hba == NULL) {
1259 		cmn_err(CE_WARN, "?%s: Detach: NULL device.", DRIVER_NAME);
1260 
1261 		return (DDI_FAILURE);
1262 	}
1263 
1264 	if (hba == (emlxs_hba_t *)-1) {
1265 		cmn_err(CE_WARN, "?%s: Detach: Device attach failed.",
1266 		    DRIVER_NAME);
1267 
1268 		return (DDI_FAILURE);
1269 	}
1270 
1271 	port = &PPORT;
1272 	rval = DDI_SUCCESS;
1273 
1274 	/* Check driver dump */
1275 	mutex_enter(&EMLXS_PORT_LOCK);
1276 
1277 	if (hba->flag & FC_DUMP_ACTIVE) {
1278 		mutex_exit(&EMLXS_PORT_LOCK);
1279 
1280 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1281 		    "detach: Driver busy. Driver dump active.");
1282 
1283 		return (DDI_FAILURE);
1284 	}
1285 
1286 #ifdef SFCT_SUPPORT
1287 	if ((port->flag & EMLXS_TGT_BOUND) &&
1288 	    ((port->fct_flags & FCT_STATE_PORT_ONLINE) ||
1289 	    (port->fct_flags & FCT_STATE_NOT_ACKED))) {
1290 		mutex_exit(&EMLXS_PORT_LOCK);
1291 
1292 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1293 		    "detach: Driver busy. Target mode active.");
1294 
1295 		return (DDI_FAILURE);
1296 	}
1297 #endif /* SFCT_SUPPORT */
1298 
1299 	if (port->flag & EMLXS_INI_BOUND) {
1300 		mutex_exit(&EMLXS_PORT_LOCK);
1301 
1302 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1303 		    "detach: Driver busy. Initiator mode active.");
1304 
1305 		return (DDI_FAILURE);
1306 	}
1307 
1308 	hba->flag &= ~FC_DUMP_SAFE;
1309 
1310 	mutex_exit(&EMLXS_PORT_LOCK);
1311 
1312 	switch (cmd) {
1313 	case DDI_DETACH:
1314 
1315 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg,
1316 		    "DDI_DETACH");
1317 
1318 		rval = emlxs_hba_detach(dip);
1319 
1320 		if (rval != DDI_SUCCESS) {
1321 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1322 			    "Unable to detach.");
1323 		}
1324 		break;
1325 
1326 	case DDI_SUSPEND:
1327 
1328 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg,
1329 		    "DDI_SUSPEND");
1330 
1331 		/* Suspend the driver */
1332 		rval = emlxs_hba_suspend(dip);
1333 
1334 		if (rval != DDI_SUCCESS) {
1335 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1336 			    "Unable to suspend driver.");
1337 		}
1338 		break;
1339 
1340 	default:
1341 		cmn_err(CE_WARN, "?%s: Detach: Unknown cmd received. cmd=%x",
1342 		    DRIVER_NAME, cmd);
1343 		rval = DDI_FAILURE;
1344 	}
1345 
1346 	if (rval == DDI_FAILURE) {
1347 		/* Re-Enable driver dump feature */
1348 		mutex_enter(&EMLXS_PORT_LOCK);
1349 		hba->flag |= FC_DUMP_SAFE;
1350 		mutex_exit(&EMLXS_PORT_LOCK);
1351 	}
1352 
1353 	return (rval);
1354 
1355 } /* emlxs_detach() */
1356 
1357 
1358 /* EMLXS_PORT_LOCK must be held when calling this */
1359 extern void
emlxs_port_init(emlxs_port_t * port)1360 emlxs_port_init(emlxs_port_t *port)
1361 {
1362 	emlxs_hba_t *hba = HBA;
1363 
1364 	/* Initialize the base node */
1365 	bzero((caddr_t)&port->node_base, sizeof (NODELIST));
1366 	port->node_base.nlp_Rpi = 0;
1367 	port->node_base.nlp_DID = 0xffffff;
1368 	port->node_base.nlp_list_next = NULL;
1369 	port->node_base.nlp_list_prev = NULL;
1370 	port->node_base.nlp_active = 1;
1371 	port->node_base.nlp_base = 1;
1372 	port->node_count = 0;
1373 
1374 	if (!(port->flag & EMLXS_PORT_ENABLED)) {
1375 		uint8_t dummy_wwn[8] =
1376 		    { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
1377 
1378 		bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwnn,
1379 		    sizeof (NAME_TYPE));
1380 		bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwpn,
1381 		    sizeof (NAME_TYPE));
1382 	}
1383 
1384 	if (!(port->flag & EMLXS_PORT_CONFIG)) {
1385 		(void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn,
1386 		    (sizeof (port->snn)-1));
1387 		(void) strncpy((caddr_t)port->spn, (caddr_t)hba->spn,
1388 		    (sizeof (port->spn)-1));
1389 	}
1390 
1391 	bcopy((caddr_t)&hba->sparam, (caddr_t)&port->sparam,
1392 	    sizeof (SERV_PARM));
1393 	bcopy((caddr_t)&port->wwnn, (caddr_t)&port->sparam.nodeName,
1394 	    sizeof (NAME_TYPE));
1395 	bcopy((caddr_t)&port->wwpn, (caddr_t)&port->sparam.portName,
1396 	    sizeof (NAME_TYPE));
1397 
1398 	return;
1399 
1400 } /* emlxs_port_init() */
1401 
1402 
1403 void
emlxs_disable_pcie_ce_err(emlxs_hba_t * hba)1404 emlxs_disable_pcie_ce_err(emlxs_hba_t *hba)
1405 {
1406 	uint16_t	reg;
1407 
1408 	if (!hba->pci_cap_offset[PCI_CAP_ID_PCI_E]) {
1409 		return;
1410 	}
1411 
1412 	/* Turn off the Correctable Error Reporting */
1413 	/* (the Device Control Register, bit 0). */
1414 	reg = ddi_get16(hba->pci_acc_handle,
1415 	    (uint16_t *)(hba->pci_addr +
1416 	    hba->pci_cap_offset[PCI_CAP_ID_PCI_E] +
1417 	    PCIE_DEVCTL));
1418 
1419 	reg &= ~1;
1420 
1421 	(void) ddi_put16(hba->pci_acc_handle,
1422 	    (uint16_t *)(hba->pci_addr +
1423 	    hba->pci_cap_offset[PCI_CAP_ID_PCI_E] +
1424 	    PCIE_DEVCTL),
1425 	    reg);
1426 
1427 	return;
1428 
1429 } /* emlxs_disable_pcie_ce_err() */
1430 
1431 
1432 /*
1433  * emlxs_fca_bind_port
1434  *
1435  * Arguments:
1436  *
1437  * dip: the dev_info pointer for the ddiinst
1438  * port_info: pointer to info handed back to the transport
1439  * bind_info: pointer to info from the transport
1440  *
1441  * Return values: a port handle for this port, NULL for failure
1442  *
1443  */
1444 static opaque_t
emlxs_fca_bind_port(dev_info_t * dip,fc_fca_port_info_t * port_info,fc_fca_bind_info_t * bind_info)1445 emlxs_fca_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info,
1446     fc_fca_bind_info_t *bind_info)
1447 {
1448 	emlxs_hba_t *hba;
1449 	emlxs_port_t *port;
1450 	emlxs_port_t *pport;
1451 	emlxs_port_t *vport;
1452 	int ddiinst;
1453 	emlxs_vpd_t *vpd;
1454 	emlxs_config_t *cfg;
1455 	char *dptr;
1456 	char buffer[16];
1457 	uint32_t length;
1458 	uint32_t len;
1459 	char topology[32];
1460 	char linkspeed[32];
1461 	uint32_t linkstate;
1462 
1463 	ddiinst = ddi_get_instance(dip);
1464 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
1465 	port = &PPORT;
1466 	pport = &PPORT;
1467 
1468 	ddiinst = hba->ddiinst;
1469 	vpd = &VPD;
1470 	cfg = &CFG;
1471 
1472 	mutex_enter(&EMLXS_PORT_LOCK);
1473 
1474 	if (bind_info->port_num > 0) {
1475 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1476 		if (!(hba->flag & FC_NPIV_ENABLED) ||
1477 		    !(bind_info->port_npiv) ||
1478 		    (bind_info->port_num > hba->vpi_max))
1479 #elif (EMLXS_MODREV >= EMLXS_MODREV3)
1480 		if (!(hba->flag & FC_NPIV_ENABLED) ||
1481 		    (bind_info->port_num > hba->vpi_high))
1482 #endif
1483 		{
1484 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1485 			    "fca_bind_port: Port %d not supported.",
1486 			    bind_info->port_num);
1487 
1488 			mutex_exit(&EMLXS_PORT_LOCK);
1489 
1490 			port_info->pi_error = FC_OUTOFBOUNDS;
1491 			return (NULL);
1492 		}
1493 	}
1494 
1495 	/* Get true port pointer */
1496 	port = &VPORT(bind_info->port_num);
1497 
1498 	/* Make sure the port is not already bound to the transport */
1499 	if (port->flag & EMLXS_INI_BOUND) {
1500 
1501 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1502 		    "fca_bind_port: Port %d already bound. flag=%x",
1503 		    bind_info->port_num, port->flag);
1504 
1505 		mutex_exit(&EMLXS_PORT_LOCK);
1506 
1507 		port_info->pi_error = FC_ALREADY;
1508 		return (NULL);
1509 	}
1510 
1511 	if (!(pport->flag & EMLXS_INI_ENABLED)) {
1512 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1513 		    "fca_bind_port: Physical port does not support "
1514 		    "initiator mode.");
1515 
1516 		mutex_exit(&EMLXS_PORT_LOCK);
1517 
1518 		port_info->pi_error = FC_OUTOFBOUNDS;
1519 		return (NULL);
1520 	}
1521 
1522 	/* Make sure port enable flag is set */
1523 	/* Just in case fca_port_unbind is called just prior to fca_port_bind */
1524 	/* without a driver attach or resume operation */
1525 	port->flag |= EMLXS_PORT_ENABLED;
1526 
1527 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1528 	    "fca_bind_port: Port %d: port_info=%p bind_info=%p",
1529 	    bind_info->port_num, port_info, bind_info);
1530 
1531 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1532 	if (bind_info->port_npiv) {
1533 		/* Leadville is telling us about a new virtual port */
1534 		bcopy((caddr_t)&bind_info->port_nwwn, (caddr_t)&port->wwnn,
1535 		    sizeof (NAME_TYPE));
1536 		bcopy((caddr_t)&bind_info->port_pwwn, (caddr_t)&port->wwpn,
1537 		    sizeof (NAME_TYPE));
1538 		if (port->snn[0] == 0) {
1539 			(void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn,
1540 			    (sizeof (port->snn)-1));
1541 
1542 		}
1543 
1544 		if (port->spn[0] == 0) {
1545 			(void) snprintf((caddr_t)port->spn,
1546 			    (sizeof (port->spn)-1), "%s VPort-%d",
1547 			    (caddr_t)hba->spn, port->vpi);
1548 		}
1549 		port->flag |= EMLXS_PORT_CONFIG;
1550 	}
1551 #endif /* >= EMLXS_MODREV5 */
1552 
1553 	/*
1554 	 * Restricted login should apply both physical and
1555 	 * virtual ports.
1556 	 */
1557 	if (cfg[CFG_VPORT_RESTRICTED].current) {
1558 		port->flag |= EMLXS_PORT_RESTRICTED;
1559 	}
1560 
1561 	/* Perform generic port initialization */
1562 	emlxs_port_init(port);
1563 
1564 	/* Perform SFS specific initialization */
1565 	port->ulp_handle	= bind_info->port_handle;
1566 	port->ulp_statec_cb	= bind_info->port_statec_cb;
1567 	port->ulp_unsol_cb	= bind_info->port_unsol_cb;
1568 
1569 	/* Set the bound flag */
1570 	port->flag |= EMLXS_INI_BOUND;
1571 	hba->num_of_ports++;
1572 
1573 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1574 		mutex_exit(&EMLXS_PORT_LOCK);
1575 		(void) emlxs_vpi_port_bind_notify(port);
1576 		mutex_enter(&EMLXS_PORT_LOCK);
1577 
1578 		linkstate = (port->vpip->state == VPI_STATE_PORT_ONLINE)?
1579 		    FC_LINK_UP:FC_LINK_DOWN;
1580 	} else {
1581 		linkstate = hba->state;
1582 	}
1583 
1584 	/* Update the port info structure */
1585 
1586 	/* Set the topology and state */
1587 	if (port->mode == MODE_TARGET) {
1588 		port_info->pi_port_state = FC_STATE_OFFLINE;
1589 		port_info->pi_topology = FC_TOP_UNKNOWN;
1590 	} else if ((linkstate < FC_LINK_UP) ||
1591 	    ((port->vpi > 0) && (!(port->flag & EMLXS_PORT_ENABLED) ||
1592 	    !(hba->flag & FC_NPIV_SUPPORTED)))) {
1593 		port_info->pi_port_state = FC_STATE_OFFLINE;
1594 		port_info->pi_topology = FC_TOP_UNKNOWN;
1595 	}
1596 #ifdef MENLO_SUPPORT
1597 	else if (hba->flag & FC_MENLO_MODE) {
1598 		port_info->pi_port_state = FC_STATE_OFFLINE;
1599 		port_info->pi_topology = FC_TOP_UNKNOWN;
1600 	}
1601 #endif /* MENLO_SUPPORT */
1602 	else {
1603 		/* Check for loop topology */
1604 		if (hba->topology == TOPOLOGY_LOOP) {
1605 			port_info->pi_port_state = FC_STATE_LOOP;
1606 			(void) strlcpy(topology, ", loop", sizeof (topology));
1607 
1608 			if (hba->flag & FC_FABRIC_ATTACHED) {
1609 				port_info->pi_topology = FC_TOP_PUBLIC_LOOP;
1610 			} else {
1611 				port_info->pi_topology = FC_TOP_PRIVATE_LOOP;
1612 			}
1613 		} else {
1614 			port_info->pi_topology = FC_TOP_FABRIC;
1615 			port_info->pi_port_state = FC_STATE_ONLINE;
1616 			(void) strlcpy(topology, ", fabric", sizeof (topology));
1617 		}
1618 
1619 		/* Set the link speed */
1620 		switch (hba->linkspeed) {
1621 		case 0:
1622 			(void) strlcpy(linkspeed, "Gb", sizeof (linkspeed));
1623 			port_info->pi_port_state |= FC_STATE_1GBIT_SPEED;
1624 			break;
1625 
1626 		case LA_1GHZ_LINK:
1627 			(void) strlcpy(linkspeed, "1Gb", sizeof (linkspeed));
1628 			port_info->pi_port_state |= FC_STATE_1GBIT_SPEED;
1629 			break;
1630 		case LA_2GHZ_LINK:
1631 			(void) strlcpy(linkspeed, "2Gb", sizeof (linkspeed));
1632 			port_info->pi_port_state |= FC_STATE_2GBIT_SPEED;
1633 			break;
1634 		case LA_4GHZ_LINK:
1635 			(void) strlcpy(linkspeed, "4Gb", sizeof (linkspeed));
1636 			port_info->pi_port_state |= FC_STATE_4GBIT_SPEED;
1637 			break;
1638 		case LA_8GHZ_LINK:
1639 			(void) strlcpy(linkspeed, "8Gb", sizeof (linkspeed));
1640 			port_info->pi_port_state |= FC_STATE_8GBIT_SPEED;
1641 			break;
1642 		case LA_10GHZ_LINK:
1643 			(void) strlcpy(linkspeed, "10Gb", sizeof (linkspeed));
1644 			port_info->pi_port_state |= FC_STATE_10GBIT_SPEED;
1645 			break;
1646 		case LA_16GHZ_LINK:
1647 			(void) strlcpy(linkspeed, "16Gb", sizeof (linkspeed));
1648 			port_info->pi_port_state |= FC_STATE_16GBIT_SPEED;
1649 			break;
1650 		default:
1651 			(void) snprintf(linkspeed, sizeof (linkspeed),
1652 			    "unknown(0x%x)", hba->linkspeed);
1653 			break;
1654 		}
1655 
1656 		if (hba->sli_mode <= EMLXS_HBA_SLI3_MODE) {
1657 			/* Adjusting port context for link up messages */
1658 			vport = port;
1659 			port = &PPORT;
1660 			if (vport->vpi == 0) {
1661 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1662 				    "%s%s, initiator",
1663 				    linkspeed, topology);
1664 			} else if (!(hba->flag & FC_NPIV_LINKUP)) {
1665 				hba->flag |= FC_NPIV_LINKUP;
1666 				EMLXS_MSGF(EMLXS_CONTEXT,
1667 				    &emlxs_npiv_link_up_msg,
1668 				    "%s%s, initiator", linkspeed, topology);
1669 			}
1670 			port = vport;
1671 		}
1672 	}
1673 
1674 	/* PCIE Correctable Error Reporting workaround */
1675 	if (((hba->model_info.chip == EMLXS_BE2_CHIP) ||
1676 	    (hba->model_info.chip == EMLXS_BE3_CHIP)) &&
1677 	    (bind_info->port_num == 0)) {
1678 		emlxs_disable_pcie_ce_err(hba);
1679 	}
1680 
1681 	/* Save initial state */
1682 	port->ulp_statec = port_info->pi_port_state;
1683 
1684 	/*
1685 	 * The transport needs a copy of the common service parameters
1686 	 * for this port. The transport can get any updates through
1687 	 * the getcap entry point.
1688 	 */
1689 	bcopy((void *) &port->sparam,
1690 	    (void *) &port_info->pi_login_params.common_service,
1691 	    sizeof (SERV_PARM));
1692 
1693 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
1694 	/* Swap the service parameters for ULP */
1695 	emlxs_swap_service_params((SERV_PARM *)&port_info->pi_login_params.
1696 	    common_service);
1697 #endif /* EMLXS_MODREV2X */
1698 
1699 	port_info->pi_login_params.common_service.btob_credit = 0xffff;
1700 
1701 	bcopy((void *) &port->wwnn,
1702 	    (void *) &port_info->pi_login_params.node_ww_name,
1703 	    sizeof (NAME_TYPE));
1704 
1705 	bcopy((void *) &port->wwpn,
1706 	    (void *) &port_info->pi_login_params.nport_ww_name,
1707 	    sizeof (NAME_TYPE));
1708 
1709 	/*
1710 	 * We need to turn off CLASS2 support.
1711 	 * Otherwise, FC transport will use CLASS2 as default class
1712 	 * and never try with CLASS3.
1713 	 */
1714 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1715 #if (EMLXS_MODREVX >= EMLXS_MODREV3X)
1716 	if ((port_info->pi_login_params.class_1.class_opt) & 0x0080) {
1717 		port_info->pi_login_params.class_1.class_opt &= ~0x0080;
1718 	}
1719 
1720 	if ((port_info->pi_login_params.class_2.class_opt) & 0x0080) {
1721 		port_info->pi_login_params.class_2.class_opt &= ~0x0080;
1722 	}
1723 #else	/* EMLXS_SPARC or EMLXS_MODREV2X */
1724 	if ((port_info->pi_login_params.class_1.class_opt) & 0x8000) {
1725 		port_info->pi_login_params.class_1.class_opt &= ~0x8000;
1726 	}
1727 
1728 	if ((port_info->pi_login_params.class_2.class_opt) & 0x8000) {
1729 		port_info->pi_login_params.class_2.class_opt &= ~0x8000;
1730 	}
1731 #endif	/* >= EMLXS_MODREV3X */
1732 #endif	/* >= EMLXS_MODREV3 */
1733 
1734 
1735 #if (EMLXS_MODREV <= EMLXS_MODREV2)
1736 	if ((port_info->pi_login_params.class_1.data[0]) & 0x80) {
1737 		port_info->pi_login_params.class_1.data[0] &= ~0x80;
1738 	}
1739 
1740 	if ((port_info->pi_login_params.class_2.data[0]) & 0x80) {
1741 		port_info->pi_login_params.class_2.data[0] &= ~0x80;
1742 	}
1743 #endif	/* <= EMLXS_MODREV2 */
1744 
1745 	/* Additional parameters */
1746 	port_info->pi_s_id.port_id = port->did;
1747 	port_info->pi_s_id.priv_lilp_posit = 0;
1748 	port_info->pi_hard_addr.hard_addr = cfg[CFG_ASSIGN_ALPA].current;
1749 
1750 	/* Initialize the RNID parameters */
1751 	bzero(&port_info->pi_rnid_params, sizeof (port_info->pi_rnid_params));
1752 
1753 	(void) snprintf((char *)port_info->pi_rnid_params.params.global_id,
1754 	    (sizeof (port_info->pi_rnid_params.params.global_id)-1),
1755 	    "%01x%01x%02x%02x%02x%02x%02x%02x%02x", hba->wwpn.nameType,
1756 	    hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb, hba->wwpn.IEEE[0],
1757 	    hba->wwpn.IEEE[1], hba->wwpn.IEEE[2], hba->wwpn.IEEE[3],
1758 	    hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
1759 
1760 	port_info->pi_rnid_params.params.unit_type  = RNID_HBA;
1761 	port_info->pi_rnid_params.params.port_id    = port->did;
1762 	port_info->pi_rnid_params.params.ip_version = RNID_IPV4;
1763 
1764 	/* Initialize the port attributes */
1765 	bzero(&port_info->pi_attrs, sizeof (port_info->pi_attrs));
1766 
1767 	(void) strncpy(port_info->pi_attrs.manufacturer, "Emulex",
1768 	    (sizeof (port_info->pi_attrs.manufacturer)-1));
1769 
1770 	port_info->pi_rnid_params.status = FC_SUCCESS;
1771 
1772 	(void) strncpy(port_info->pi_attrs.serial_number, vpd->serial_num,
1773 	    (sizeof (port_info->pi_attrs.serial_number)-1));
1774 
1775 	(void) snprintf(port_info->pi_attrs.firmware_version,
1776 	    (sizeof (port_info->pi_attrs.firmware_version)-1), "%s (%s)",
1777 	    vpd->fw_version, vpd->fw_label);
1778 
1779 #ifdef EMLXS_I386
1780 	(void) snprintf(port_info->pi_attrs.option_rom_version,
1781 	    (sizeof (port_info->pi_attrs.option_rom_version)-1),
1782 	    "Boot:%s", vpd->boot_version);
1783 #else	/* EMLXS_SPARC */
1784 	(void) snprintf(port_info->pi_attrs.option_rom_version,
1785 	    (sizeof (port_info->pi_attrs.option_rom_version)-1),
1786 	    "Boot:%s Fcode:%s", vpd->boot_version, vpd->fcode_version);
1787 #endif	/* EMLXS_I386 */
1788 
1789 	(void) snprintf(port_info->pi_attrs.driver_version,
1790 	    (sizeof (port_info->pi_attrs.driver_version)-1), "%s (%s)",
1791 	    emlxs_version, emlxs_revision);
1792 
1793 	(void) strncpy(port_info->pi_attrs.driver_name, DRIVER_NAME,
1794 	    (sizeof (port_info->pi_attrs.driver_name)-1));
1795 
1796 	port_info->pi_attrs.vendor_specific_id =
1797 	    ((hba->model_info.device_id << 16) | PCI_VENDOR_ID_EMULEX);
1798 
1799 	port_info->pi_attrs.supported_cos = LE_SWAP32(FC_NS_CLASS3);
1800 
1801 	port_info->pi_attrs.max_frame_size = FF_FRAME_SIZE;
1802 
1803 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1804 	port_info->pi_rnid_params.params.num_attached = 0;
1805 
1806 	if (hba->model_info.chip == EMLXS_LANCER_CHIP) {
1807 		uint8_t		byte;
1808 		uint8_t		*wwpn;
1809 		uint32_t	i;
1810 		uint32_t	j;
1811 
1812 		/* Copy the WWPN as a string into the local buffer */
1813 		wwpn = (uint8_t *)&hba->wwpn;
1814 		for (i = 0; i < 16; i++) {
1815 			byte = *wwpn++;
1816 			j = ((byte & 0xf0) >> 4);
1817 			if (j <= 9) {
1818 				buffer[i] =
1819 				    (char)((uint8_t)'0' + (uint8_t)j);
1820 			} else {
1821 				buffer[i] =
1822 				    (char)((uint8_t)'A' + (uint8_t)(j -
1823 				    10));
1824 			}
1825 
1826 			i++;
1827 			j = (byte & 0xf);
1828 			if (j <= 9) {
1829 				buffer[i] =
1830 				    (char)((uint8_t)'0' + (uint8_t)j);
1831 			} else {
1832 				buffer[i] =
1833 				    (char)((uint8_t)'A' + (uint8_t)(j -
1834 				    10));
1835 			}
1836 		}
1837 
1838 		port_info->pi_attrs.hba_fru_details.port_index = 0;
1839 #if ((EMLXS_MODREV == EMLXS_MODREV3) || (EMLXS_MODREV == EMLXS_MODREV4))
1840 
1841 	} else if (hba->flag & FC_NPIV_ENABLED) {
1842 		uint8_t		byte;
1843 		uint8_t		*wwpn;
1844 		uint32_t	i;
1845 		uint32_t	j;
1846 
1847 		/* Copy the WWPN as a string into the local buffer */
1848 		wwpn = (uint8_t *)&hba->wwpn;
1849 		for (i = 0; i < 16; i++) {
1850 			byte = *wwpn++;
1851 			j = ((byte & 0xf0) >> 4);
1852 			if (j <= 9) {
1853 				buffer[i] =
1854 				    (char)((uint8_t)'0' + (uint8_t)j);
1855 			} else {
1856 				buffer[i] =
1857 				    (char)((uint8_t)'A' + (uint8_t)(j -
1858 				    10));
1859 			}
1860 
1861 			i++;
1862 			j = (byte & 0xf);
1863 			if (j <= 9) {
1864 				buffer[i] =
1865 				    (char)((uint8_t)'0' + (uint8_t)j);
1866 			} else {
1867 				buffer[i] =
1868 				    (char)((uint8_t)'A' + (uint8_t)(j -
1869 				    10));
1870 			}
1871 		}
1872 
1873 		port_info->pi_attrs.hba_fru_details.port_index = port->vpi;
1874 #endif /* == EMLXS_MODREV3 || EMLXS_MODREV4 */
1875 
1876 	} else {
1877 		/* Copy the serial number string (right most 16 chars) */
1878 		/* into the right justified local buffer */
1879 		bzero(buffer, sizeof (buffer));
1880 		length = strlen(vpd->serial_num);
1881 		len = (length > 16) ? 16 : length;
1882 		bcopy(&vpd->serial_num[(length - len)],
1883 		    &buffer[(sizeof (buffer) - len)], len);
1884 
1885 		port_info->pi_attrs.hba_fru_details.port_index =
1886 		    vpd->port_index;
1887 	}
1888 
1889 	dptr = (char *)&port_info->pi_attrs.hba_fru_details.high;
1890 	dptr[0] = buffer[0];
1891 	dptr[1] = buffer[1];
1892 	dptr[2] = buffer[2];
1893 	dptr[3] = buffer[3];
1894 	dptr[4] = buffer[4];
1895 	dptr[5] = buffer[5];
1896 	dptr[6] = buffer[6];
1897 	dptr[7] = buffer[7];
1898 	port_info->pi_attrs.hba_fru_details.high =
1899 	    LE_SWAP64(port_info->pi_attrs.hba_fru_details.high);
1900 
1901 	dptr = (char *)&port_info->pi_attrs.hba_fru_details.low;
1902 	dptr[0] = buffer[8];
1903 	dptr[1] = buffer[9];
1904 	dptr[2] = buffer[10];
1905 	dptr[3] = buffer[11];
1906 	dptr[4] = buffer[12];
1907 	dptr[5] = buffer[13];
1908 	dptr[6] = buffer[14];
1909 	dptr[7] = buffer[15];
1910 	port_info->pi_attrs.hba_fru_details.low =
1911 	    LE_SWAP64(port_info->pi_attrs.hba_fru_details.low);
1912 
1913 #endif /* >= EMLXS_MODREV3 */
1914 
1915 #if (EMLXS_MODREV >= EMLXS_MODREV4)
1916 	(void) strncpy((caddr_t)port_info->pi_attrs.sym_node_name,
1917 	    (caddr_t)port->snn, FCHBA_SYMB_NAME_LEN);
1918 	(void) strncpy((caddr_t)port_info->pi_attrs.sym_port_name,
1919 	    (caddr_t)port->spn, FCHBA_SYMB_NAME_LEN);
1920 #endif	/* >= EMLXS_MODREV4 */
1921 
1922 	(void) snprintf(port_info->pi_attrs.hardware_version,
1923 	    (sizeof (port_info->pi_attrs.hardware_version)-1),
1924 	    "%x", vpd->biuRev);
1925 
1926 	/* Set the hba speed limit */
1927 	if (vpd->link_speed & LMT_16GB_CAPABLE) {
1928 		port_info->pi_attrs.supported_speed |=
1929 		    FC_HBA_PORTSPEED_16GBIT;
1930 	}
1931 	if (vpd->link_speed & LMT_10GB_CAPABLE) {
1932 		port_info->pi_attrs.supported_speed |=
1933 		    FC_HBA_PORTSPEED_10GBIT;
1934 	}
1935 	if (vpd->link_speed & LMT_8GB_CAPABLE) {
1936 		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_8GBIT;
1937 	}
1938 	if (vpd->link_speed & LMT_4GB_CAPABLE) {
1939 		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_4GBIT;
1940 	}
1941 	if (vpd->link_speed & LMT_2GB_CAPABLE) {
1942 		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_2GBIT;
1943 	}
1944 	if (vpd->link_speed & LMT_1GB_CAPABLE) {
1945 		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_1GBIT;
1946 	}
1947 
1948 	/* Set the hba model info */
1949 	(void) strncpy(port_info->pi_attrs.model, hba->model_info.model,
1950 	    (sizeof (port_info->pi_attrs.model)-1));
1951 	(void) strncpy(port_info->pi_attrs.model_description,
1952 	    hba->model_info.model_desc,
1953 	    (sizeof (port_info->pi_attrs.model_description)-1));
1954 
1955 
1956 	/* Log information */
1957 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1958 	    "Bind info: port_num           = %d", bind_info->port_num);
1959 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1960 	    "Bind info: port_handle        = %p", bind_info->port_handle);
1961 
1962 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1963 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1964 	    "Bind info: port_npiv          = %d", bind_info->port_npiv);
1965 #endif /* >= EMLXS_MODREV5 */
1966 
1967 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1968 	    "Port info: pi_topology        = %x", port_info->pi_topology);
1969 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1970 	    "Port info: pi_error           = %x", port_info->pi_error);
1971 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1972 	    "Port info: pi_port_state      = %x", port_info->pi_port_state);
1973 
1974 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1975 	    "Port info: port_id            = %x", port_info->pi_s_id.port_id);
1976 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1977 	    "Port info: priv_lilp_posit    = %x",
1978 	    port_info->pi_s_id.priv_lilp_posit);
1979 
1980 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1981 	    "Port info: hard_addr          = %x",
1982 	    port_info->pi_hard_addr.hard_addr);
1983 
1984 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1985 	    "Port info: rnid.status        = %x",
1986 	    port_info->pi_rnid_params.status);
1987 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1988 	    "Port info: rnid.global_id     = %16s",
1989 	    port_info->pi_rnid_params.params.global_id);
1990 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1991 	    "Port info: rnid.unit_type     = %x",
1992 	    port_info->pi_rnid_params.params.unit_type);
1993 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1994 	    "Port info: rnid.port_id       = %x",
1995 	    port_info->pi_rnid_params.params.port_id);
1996 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1997 	    "Port info: rnid.num_attached  = %x",
1998 	    port_info->pi_rnid_params.params.num_attached);
1999 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2000 	    "Port info: rnid.ip_version    = %x",
2001 	    port_info->pi_rnid_params.params.ip_version);
2002 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2003 	    "Port info: rnid.udp_port      = %x",
2004 	    port_info->pi_rnid_params.params.udp_port);
2005 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2006 	    "Port info: rnid.ip_addr       = %16s",
2007 	    port_info->pi_rnid_params.params.ip_addr);
2008 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2009 	    "Port info: rnid.spec_id_resv  = %x",
2010 	    port_info->pi_rnid_params.params.specific_id_resv);
2011 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2012 	    "Port info: rnid.topo_flags    = %x",
2013 	    port_info->pi_rnid_params.params.topo_flags);
2014 
2015 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2016 	    "Port info: manufacturer       = %s",
2017 	    port_info->pi_attrs.manufacturer);
2018 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2019 	    "Port info: serial_num         = %s",
2020 	    port_info->pi_attrs.serial_number);
2021 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2022 	    "Port info: model              = %s", port_info->pi_attrs.model);
2023 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2024 	    "Port info: model_description  = %s",
2025 	    port_info->pi_attrs.model_description);
2026 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2027 	    "Port info: hardware_version   = %s",
2028 	    port_info->pi_attrs.hardware_version);
2029 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2030 	    "Port info: driver_version     = %s",
2031 	    port_info->pi_attrs.driver_version);
2032 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2033 	    "Port info: option_rom_version = %s",
2034 	    port_info->pi_attrs.option_rom_version);
2035 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2036 	    "Port info: firmware_version   = %s",
2037 	    port_info->pi_attrs.firmware_version);
2038 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2039 	    "Port info: driver_name        = %s",
2040 	    port_info->pi_attrs.driver_name);
2041 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2042 	    "Port info: vendor_specific_id = %x",
2043 	    port_info->pi_attrs.vendor_specific_id);
2044 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2045 	    "Port info: supported_cos      = %x",
2046 	    port_info->pi_attrs.supported_cos);
2047 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2048 	    "Port info: supported_speed    = %x",
2049 	    port_info->pi_attrs.supported_speed);
2050 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2051 	    "Port info: max_frame_size     = %x",
2052 	    port_info->pi_attrs.max_frame_size);
2053 
2054 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2055 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2056 	    "Port info: fru_port_index     = %x",
2057 	    port_info->pi_attrs.hba_fru_details.port_index);
2058 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2059 	    "Port info: fru_high           = %llx",
2060 	    port_info->pi_attrs.hba_fru_details.high);
2061 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2062 	    "Port info: fru_low            = %llx",
2063 	    port_info->pi_attrs.hba_fru_details.low);
2064 #endif	/* >= EMLXS_MODREV3 */
2065 
2066 #if (EMLXS_MODREV >= EMLXS_MODREV4)
2067 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2068 	    "Port info: sym_node_name      = %s",
2069 	    port_info->pi_attrs.sym_node_name);
2070 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2071 	    "Port info: sym_port_name      = %s",
2072 	    port_info->pi_attrs.sym_port_name);
2073 #endif	/* >= EMLXS_MODREV4 */
2074 
2075 	mutex_exit(&EMLXS_PORT_LOCK);
2076 
2077 #ifdef SFCT_SUPPORT
2078 	if (port->flag & EMLXS_TGT_ENABLED) {
2079 		emlxs_fct_bind_port(port);
2080 	}
2081 #endif /* SFCT_SUPPORT */
2082 
2083 	return ((opaque_t)port);
2084 
2085 } /* emlxs_fca_bind_port() */
2086 
2087 
2088 static void
emlxs_fca_unbind_port(opaque_t fca_port_handle)2089 emlxs_fca_unbind_port(opaque_t fca_port_handle)
2090 {
2091 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2092 	emlxs_hba_t *hba = HBA;
2093 
2094 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2095 	    "fca_unbind_port: port=%p", port);
2096 
2097 	if (!(port->flag & EMLXS_PORT_BOUND)) {
2098 		return;
2099 	}
2100 
2101 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2102 		(void) emlxs_vpi_port_unbind_notify(port, 1);
2103 	}
2104 
2105 	/* Destroy & flush all port nodes, if they exist */
2106 	if (port->node_count) {
2107 		(void) EMLXS_SLI_UNREG_NODE(port, 0, 0, 0, 0);
2108 	}
2109 
2110 #if (EMLXS_MODREV >= EMLXS_MODREV5)
2111 	if ((hba->sli_mode <= EMLXS_HBA_SLI3_MODE) &&
2112 	    (hba->flag & FC_NPIV_ENABLED) &&
2113 	    (port->flag & (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLED))) {
2114 		(void) emlxs_mb_unreg_vpi(port);
2115 	}
2116 #endif
2117 
2118 	mutex_enter(&EMLXS_PORT_LOCK);
2119 	if (port->flag & EMLXS_INI_BOUND) {
2120 #if (EMLXS_MODREV >= EMLXS_MODREV5)
2121 		port->flag &= ~(EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLED);
2122 #endif
2123 		port->flag &= ~EMLXS_INI_BOUND;
2124 		hba->num_of_ports--;
2125 
2126 		/* Wait until ulp callback interface is idle */
2127 		while (port->ulp_busy) {
2128 			mutex_exit(&EMLXS_PORT_LOCK);
2129 			delay(drv_usectohz(500000));
2130 			mutex_enter(&EMLXS_PORT_LOCK);
2131 		}
2132 
2133 		port->ulp_handle = 0;
2134 		port->ulp_statec = FC_STATE_OFFLINE;
2135 		port->ulp_statec_cb = NULL;
2136 		port->ulp_unsol_cb = NULL;
2137 	}
2138 	mutex_exit(&EMLXS_PORT_LOCK);
2139 
2140 #ifdef SFCT_SUPPORT
2141 	/* Check if port was target bound */
2142 	if (port->flag & EMLXS_TGT_BOUND) {
2143 		emlxs_fct_unbind_port(port);
2144 	}
2145 #endif /* SFCT_SUPPORT */
2146 
2147 	return;
2148 
2149 } /* emlxs_fca_unbind_port() */
2150 
2151 
2152 /*ARGSUSED*/
2153 extern int
emlxs_fca_pkt_init(opaque_t fca_port_handle,fc_packet_t * pkt,int32_t sleep)2154 emlxs_fca_pkt_init(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep)
2155 {
2156 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2157 	emlxs_hba_t  *hba = HBA;
2158 	emlxs_buf_t  *sbp = (emlxs_buf_t *)pkt->pkt_fca_private;
2159 
2160 	if (!sbp) {
2161 		return (FC_FAILURE);
2162 	}
2163 	bzero((void *)sbp, sizeof (emlxs_buf_t));
2164 
2165 	mutex_init(&sbp->mtx, NULL, MUTEX_DRIVER, DDI_INTR_PRI(hba->intr_arg));
2166 	sbp->pkt_flags =
2167 	    PACKET_VALID | PACKET_ULP_OWNED;
2168 	sbp->port = port;
2169 	sbp->pkt = pkt;
2170 	sbp->iocbq.sbp = sbp;
2171 
2172 	return (FC_SUCCESS);
2173 
2174 } /* emlxs_fca_pkt_init() */
2175 
2176 
2177 
2178 static void
emlxs_initialize_pkt(emlxs_port_t * port,emlxs_buf_t * sbp)2179 emlxs_initialize_pkt(emlxs_port_t *port, emlxs_buf_t *sbp)
2180 {
2181 	emlxs_hba_t *hba = HBA;
2182 	emlxs_config_t *cfg = &CFG;
2183 	fc_packet_t *pkt = PRIV2PKT(sbp);
2184 
2185 	mutex_enter(&sbp->mtx);
2186 
2187 	/* Reinitialize */
2188 	sbp->pkt   = pkt;
2189 	sbp->port  = port;
2190 	sbp->bmp   = NULL;
2191 	sbp->pkt_flags &= (PACKET_VALID | PACKET_ALLOCATED);
2192 	sbp->iotag = 0;
2193 	sbp->ticks = 0;
2194 	sbp->abort_attempts = 0;
2195 	sbp->fpkt  = NULL;
2196 	sbp->flush_count = 0;
2197 	sbp->next  = NULL;
2198 
2199 	if (port->mode == MODE_INITIATOR) {
2200 		sbp->node  = NULL;
2201 		sbp->did   = 0;
2202 		sbp->lun   = EMLXS_LUN_NONE;
2203 		sbp->class = 0;
2204 		sbp->channel  = NULL;
2205 	}
2206 
2207 	bzero((void *)&sbp->iocbq, sizeof (IOCBQ));
2208 	sbp->iocbq.sbp = sbp;
2209 
2210 	if ((pkt->pkt_tran_flags & FC_TRAN_NO_INTR) || !pkt->pkt_comp ||
2211 	    ddi_in_panic()) {
2212 		sbp->pkt_flags |= PACKET_POLLED;
2213 	}
2214 
2215 	/* Prepare the fc packet */
2216 	pkt->pkt_state = FC_PKT_SUCCESS;
2217 	pkt->pkt_reason = 0;
2218 	pkt->pkt_action = 0;
2219 	pkt->pkt_expln = 0;
2220 	pkt->pkt_data_resid = 0;
2221 	pkt->pkt_resp_resid = 0;
2222 
2223 	/* Make sure all pkt's have a proper timeout */
2224 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
2225 		/* This disables all IOCB on chip timeouts */
2226 		pkt->pkt_timeout = 0x80000000;
2227 	} else if (pkt->pkt_timeout == 0 || pkt->pkt_timeout == 0xffffffff) {
2228 		pkt->pkt_timeout = 60;
2229 	}
2230 
2231 	/* Clear the response buffer */
2232 	if (pkt->pkt_rsplen) {
2233 		bzero(pkt->pkt_resp, pkt->pkt_rsplen);
2234 	}
2235 
2236 	mutex_exit(&sbp->mtx);
2237 
2238 	return;
2239 
2240 } /* emlxs_initialize_pkt() */
2241 
2242 
2243 
2244 /*
2245  * We may not need this routine
2246  */
2247 /*ARGSUSED*/
2248 extern int
emlxs_fca_pkt_uninit(opaque_t fca_port_handle,fc_packet_t * pkt)2249 emlxs_fca_pkt_uninit(opaque_t fca_port_handle, fc_packet_t *pkt)
2250 {
2251 	emlxs_buf_t  *sbp = PKT2PRIV(pkt);
2252 
2253 	if (!sbp) {
2254 		return (FC_FAILURE);
2255 	}
2256 
2257 	if (!(sbp->pkt_flags & PACKET_VALID)) {
2258 		return (FC_FAILURE);
2259 	}
2260 	sbp->pkt_flags &= ~PACKET_VALID;
2261 	mutex_destroy(&sbp->mtx);
2262 
2263 	return (FC_SUCCESS);
2264 
2265 } /* emlxs_fca_pkt_uninit() */
2266 
2267 
2268 static int
emlxs_fca_get_cap(opaque_t fca_port_handle,char * cap,void * ptr)2269 emlxs_fca_get_cap(opaque_t fca_port_handle, char *cap, void *ptr)
2270 {
2271 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2272 	emlxs_hba_t  *hba = HBA;
2273 	int32_t rval;
2274 	emlxs_config_t *cfg = &CFG;
2275 
2276 	if (!(port->flag & EMLXS_INI_BOUND)) {
2277 		return (FC_CAP_ERROR);
2278 	}
2279 
2280 	if (strcmp(cap, FC_NODE_WWN) == 0) {
2281 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2282 		    "fca_get_cap: FC_NODE_WWN");
2283 
2284 		bcopy((void *)&hba->wwnn, (void *)ptr, sizeof (NAME_TYPE));
2285 		rval = FC_CAP_FOUND;
2286 
2287 	} else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
2288 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2289 		    "fca_get_cap: FC_LOGIN_PARAMS");
2290 
2291 		/*
2292 		 * We need to turn off CLASS2 support.
2293 		 * Otherwise, FC transport will use CLASS2 as default class
2294 		 * and never try with CLASS3.
2295 		 */
2296 		hba->sparam.cls2.classValid = 0;
2297 
2298 		bcopy((void *)&hba->sparam, (void *)ptr, sizeof (SERV_PARM));
2299 
2300 		rval = FC_CAP_FOUND;
2301 
2302 	} else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
2303 		int32_t		*num_bufs;
2304 
2305 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2306 		    "fca_get_cap: FC_CAP_UNSOL_BUF (%d)",
2307 		    cfg[CFG_UB_BUFS].current);
2308 
2309 		num_bufs = (int32_t *)ptr;
2310 
2311 		/* We multiply by MAX_VPORTS because ULP uses a */
2312 		/* formula to calculate ub bufs from this */
2313 		*num_bufs = (cfg[CFG_UB_BUFS].current * MAX_VPORTS);
2314 
2315 		rval = FC_CAP_FOUND;
2316 
2317 	} else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
2318 		int32_t		*size;
2319 
2320 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2321 		    "fca_get_cap: FC_CAP_PAYLOAD_SIZE");
2322 
2323 		size = (int32_t *)ptr;
2324 		*size = -1;
2325 		rval = FC_CAP_FOUND;
2326 
2327 	} else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
2328 		fc_reset_action_t *action;
2329 
2330 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2331 		    "fca_get_cap: FC_CAP_POST_RESET_BEHAVIOR");
2332 
2333 		action = (fc_reset_action_t *)ptr;
2334 		*action = FC_RESET_RETURN_ALL;
2335 		rval = FC_CAP_FOUND;
2336 
2337 	} else if (strcmp(cap, FC_CAP_NOSTREAM_ON_UNALIGN_BUF) == 0) {
2338 		fc_dma_behavior_t *behavior;
2339 
2340 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2341 		    "fca_get_cap: FC_CAP_NOSTREAM_ON_UNALIGN_BUF");
2342 
2343 		behavior = (fc_dma_behavior_t *)ptr;
2344 		*behavior = FC_ALLOW_STREAMING;
2345 		rval = FC_CAP_FOUND;
2346 
2347 	} else if (strcmp(cap, FC_CAP_FCP_DMA) == 0) {
2348 		fc_fcp_dma_t   *fcp_dma;
2349 
2350 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2351 		    "fca_get_cap: FC_CAP_FCP_DMA");
2352 
2353 		fcp_dma = (fc_fcp_dma_t *)ptr;
2354 		*fcp_dma = FC_DVMA_SPACE;
2355 		rval = FC_CAP_FOUND;
2356 
2357 	} else {
2358 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2359 		    "fca_get_cap: Unknown capability. [%s]", cap);
2360 
2361 		rval = FC_CAP_ERROR;
2362 
2363 	}
2364 
2365 	return (rval);
2366 
2367 } /* emlxs_fca_get_cap() */
2368 
2369 
2370 
2371 static int
emlxs_fca_set_cap(opaque_t fca_port_handle,char * cap,void * ptr)2372 emlxs_fca_set_cap(opaque_t fca_port_handle, char *cap, void *ptr)
2373 {
2374 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2375 
2376 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2377 	    "fca_set_cap: cap=[%s] arg=%p", cap, ptr);
2378 
2379 	return (FC_CAP_ERROR);
2380 
2381 } /* emlxs_fca_set_cap() */
2382 
2383 
2384 static opaque_t
emlxs_fca_get_device(opaque_t fca_port_handle,fc_portid_t d_id)2385 emlxs_fca_get_device(opaque_t fca_port_handle, fc_portid_t d_id)
2386 {
2387 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2388 
2389 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2390 	    "fca_get_device: did=%x", d_id.port_id);
2391 
2392 	return (NULL);
2393 
2394 } /* emlxs_fca_get_device() */
2395 
2396 
2397 static int32_t
emlxs_fca_notify(opaque_t fca_port_handle,uint32_t cmd)2398 emlxs_fca_notify(opaque_t fca_port_handle, uint32_t cmd)
2399 {
2400 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2401 
2402 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, "fca_notify: cmd=%x",
2403 	    cmd);
2404 
2405 	return (FC_SUCCESS);
2406 
2407 } /* emlxs_fca_notify */
2408 
2409 
2410 
2411 static int
emlxs_fca_get_map(opaque_t fca_port_handle,fc_lilpmap_t * mapbuf)2412 emlxs_fca_get_map(opaque_t fca_port_handle, fc_lilpmap_t *mapbuf)
2413 {
2414 	emlxs_port_t	*port = (emlxs_port_t *)fca_port_handle;
2415 	emlxs_hba_t	*hba = HBA;
2416 	uint32_t	lilp_length;
2417 
2418 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2419 	    "fca_get_map: mapbuf=%p length=%d (%X,%X,%X,%X)", mapbuf,
2420 	    port->alpa_map[0], port->alpa_map[1], port->alpa_map[2],
2421 	    port->alpa_map[3], port->alpa_map[4]);
2422 
2423 	if (!(port->flag & EMLXS_INI_BOUND)) {
2424 		return (FC_NOMAP);
2425 	}
2426 
2427 	if (hba->topology != TOPOLOGY_LOOP) {
2428 		return (FC_NOMAP);
2429 	}
2430 
2431 	/* Check if alpa map is available */
2432 	if (port->alpa_map[0] != 0) {
2433 		mapbuf->lilp_magic  = MAGIC_LILP;
2434 	} else {	/* No LILP map available */
2435 
2436 		/* Set lilp_magic to MAGIC_LISA and this will */
2437 		/* trigger an ALPA scan in ULP */
2438 		mapbuf->lilp_magic  = MAGIC_LISA;
2439 	}
2440 
2441 	mapbuf->lilp_myalpa = port->did;
2442 
2443 	/* The first byte of the alpa_map is the lilp map length */
2444 	/* Add one to include the lilp length byte itself */
2445 	lilp_length = (uint32_t)port->alpa_map[0] + 1;
2446 
2447 	/* Make sure the max transfer is 128 bytes */
2448 	if (lilp_length > 128) {
2449 		lilp_length = 128;
2450 	}
2451 
2452 	/* We start copying from the lilp_length field */
2453 	/* in order to get a word aligned address */
2454 	bcopy((void *)&port->alpa_map, (void *)&mapbuf->lilp_length,
2455 	    lilp_length);
2456 
2457 	return (FC_SUCCESS);
2458 
2459 } /* emlxs_fca_get_map() */
2460 
2461 
2462 
2463 extern int
emlxs_fca_transport(opaque_t fca_port_handle,fc_packet_t * pkt)2464 emlxs_fca_transport(opaque_t fca_port_handle, fc_packet_t *pkt)
2465 {
2466 	emlxs_port_t	*port = (emlxs_port_t *)fca_port_handle;
2467 	emlxs_hba_t	*hba = HBA;
2468 	emlxs_buf_t	*sbp;
2469 	uint32_t	rval;
2470 	uint32_t	pkt_flags;
2471 
2472 	/* Validate packet */
2473 	sbp = PKT2PRIV(pkt);
2474 
2475 	/* Make sure adapter is online */
2476 	if (!(hba->flag & FC_ONLINE_MODE) &&
2477 	    !(sbp->pkt_flags & PACKET_ALLOCATED)) {
2478 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
2479 		    "Adapter offline.");
2480 
2481 		rval = (hba->flag & FC_ONLINING_MODE) ?
2482 		    FC_TRAN_BUSY : FC_OFFLINE;
2483 		return (rval);
2484 	}
2485 
2486 	/* Make sure ULP was told that the port was online */
2487 	if ((port->ulp_statec == FC_STATE_OFFLINE) &&
2488 	    !(sbp->pkt_flags & PACKET_ALLOCATED)) {
2489 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
2490 		    "Port offline.");
2491 
2492 		return (FC_OFFLINE);
2493 	}
2494 
2495 	if (sbp->port != port) {
2496 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2497 		    "Invalid port handle. sbp=%p port=%p flags=%x", sbp,
2498 		    sbp->port, sbp->pkt_flags);
2499 		return (FC_BADPACKET);
2500 	}
2501 
2502 	if (!(sbp->pkt_flags & (PACKET_VALID | PACKET_ULP_OWNED))) {
2503 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2504 		    "Invalid packet flags. sbp=%p port=%p flags=%x", sbp,
2505 		    sbp->port, sbp->pkt_flags);
2506 		return (FC_BADPACKET);
2507 	}
2508 
2509 #ifdef SFCT_SUPPORT
2510 	if ((port->mode == MODE_TARGET) && !sbp->fct_cmd &&
2511 	    !(sbp->pkt_flags & PACKET_ALLOCATED)) {
2512 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2513 		    "Packet blocked. Target mode.");
2514 		return (FC_TRANSPORT_ERROR);
2515 	}
2516 #endif /* SFCT_SUPPORT */
2517 
2518 #ifdef IDLE_TIMER
2519 	emlxs_pm_busy_component(hba);
2520 #endif	/* IDLE_TIMER */
2521 
2522 	/* Prepare the packet for transport */
2523 	emlxs_initialize_pkt(port, sbp);
2524 
2525 	/* Save a copy of the pkt flags. */
2526 	/* We will check the polling flag later */
2527 	pkt_flags = sbp->pkt_flags;
2528 
2529 	/* Send the packet */
2530 	switch (pkt->pkt_tran_type) {
2531 	case FC_PKT_FCP_READ:
2532 	case FC_PKT_FCP_WRITE:
2533 		rval = emlxs_send_fcp_cmd(port, sbp, &pkt_flags);
2534 		break;
2535 
2536 	case FC_PKT_IP_WRITE:
2537 	case FC_PKT_BROADCAST:
2538 		rval = emlxs_send_ip(port, sbp);
2539 		break;
2540 
2541 	case FC_PKT_EXCHANGE:
2542 		switch (pkt->pkt_cmd_fhdr.type) {
2543 		case FC_TYPE_SCSI_FCP:
2544 			rval = emlxs_send_fcp_cmd(port, sbp, &pkt_flags);
2545 			break;
2546 
2547 		case FC_TYPE_FC_SERVICES:
2548 			rval = emlxs_send_ct(port, sbp);
2549 			break;
2550 
2551 #ifdef MENLO_SUPPORT
2552 		case EMLXS_MENLO_TYPE:
2553 			rval = emlxs_send_menlo(port, sbp);
2554 			break;
2555 #endif /* MENLO_SUPPORT */
2556 
2557 		default:
2558 			rval = emlxs_send_els(port, sbp);
2559 		}
2560 		break;
2561 
2562 	case FC_PKT_OUTBOUND:
2563 		switch (pkt->pkt_cmd_fhdr.type) {
2564 #ifdef SFCT_SUPPORT
2565 		case FC_TYPE_SCSI_FCP:
2566 			rval = emlxs_send_fct_status(port, sbp);
2567 			break;
2568 
2569 		case FC_TYPE_BASIC_LS:
2570 			rval = emlxs_send_fct_abort(port, sbp);
2571 			break;
2572 #endif /* SFCT_SUPPORT */
2573 
2574 		case FC_TYPE_FC_SERVICES:
2575 			rval = emlxs_send_ct_rsp(port, sbp);
2576 			break;
2577 #ifdef MENLO_SUPPORT
2578 		case EMLXS_MENLO_TYPE:
2579 			rval = emlxs_send_menlo(port, sbp);
2580 			break;
2581 #endif /* MENLO_SUPPORT */
2582 
2583 		default:
2584 			rval = emlxs_send_els_rsp(port, sbp);
2585 		}
2586 		break;
2587 
2588 	default:
2589 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2590 		    "Unsupported pkt_tran_type. type=%x", pkt->pkt_tran_type);
2591 		rval = FC_TRANSPORT_ERROR;
2592 		break;
2593 	}
2594 
2595 	/* Check if send was not successful */
2596 	if (rval != FC_SUCCESS) {
2597 		/* Return packet to ULP */
2598 		mutex_enter(&sbp->mtx);
2599 		sbp->pkt_flags |= PACKET_ULP_OWNED;
2600 		mutex_exit(&sbp->mtx);
2601 
2602 		return (rval);
2603 	}
2604 
2605 	/* Check if this packet should be polled for completion before */
2606 	/* returning. This check must be done with a saved copy of the */
2607 	/* pkt_flags because the packet itself could already be freed from */
2608 	/* memory if it was not polled. */
2609 	if (pkt_flags & PACKET_POLLED) {
2610 		emlxs_poll(port, sbp);
2611 	}
2612 
2613 	return (FC_SUCCESS);
2614 
2615 } /* emlxs_fca_transport() */
2616 
2617 
2618 
2619 static void
emlxs_poll(emlxs_port_t * port,emlxs_buf_t * sbp)2620 emlxs_poll(emlxs_port_t *port, emlxs_buf_t *sbp)
2621 {
2622 	emlxs_hba_t	*hba = HBA;
2623 	fc_packet_t	*pkt = PRIV2PKT(sbp);
2624 	clock_t		timeout;
2625 	clock_t		time;
2626 	CHANNEL	*cp;
2627 	int		in_panic = 0;
2628 
2629 	mutex_enter(&EMLXS_PORT_LOCK);
2630 	hba->io_poll_count++;
2631 	mutex_exit(&EMLXS_PORT_LOCK);
2632 
2633 	/* Check for panic situation */
2634 	cp = (CHANNEL *)sbp->channel;
2635 
2636 	if (ddi_in_panic()) {
2637 		in_panic = 1;
2638 		/*
2639 		 * In panic situations there will be one thread with
2640 		 * no interrrupts (hard or soft) and no timers
2641 		 */
2642 
2643 		/*
2644 		 * We must manually poll everything in this thread
2645 		 * to keep the driver going.
2646 		 */
2647 
2648 		/* Keep polling the chip until our IO is completed */
2649 		/* Driver's timer will not function during panics. */
2650 		/* Therefore, timer checks must be performed manually. */
2651 		(void) drv_getparm(LBOLT, &time);
2652 		timeout = time + drv_usectohz(1000000);
2653 		while (!(sbp->pkt_flags & PACKET_COMPLETED)) {
2654 			EMLXS_SLI_POLL_INTR(hba);
2655 			(void) drv_getparm(LBOLT, &time);
2656 
2657 			/* Trigger timer checks periodically */
2658 			if (time >= timeout) {
2659 				emlxs_timer_checks(hba);
2660 				timeout = time + drv_usectohz(1000000);
2661 			}
2662 		}
2663 	} else {
2664 		/* Wait for IO completion */
2665 		/* The driver's timer will detect */
2666 		/* any timeout and abort the I/O. */
2667 		mutex_enter(&EMLXS_PKT_LOCK);
2668 		while (!(sbp->pkt_flags & PACKET_COMPLETED)) {
2669 			cv_wait(&EMLXS_PKT_CV, &EMLXS_PKT_LOCK);
2670 		}
2671 		mutex_exit(&EMLXS_PKT_LOCK);
2672 	}
2673 
2674 	/* Check for fcp reset pkt */
2675 	if (sbp->pkt_flags & PACKET_FCP_RESET) {
2676 		if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) {
2677 			/* Flush the IO's on the chipq */
2678 			(void) emlxs_chipq_node_flush(port,
2679 			    &hba->chan[hba->channel_fcp],
2680 			    sbp->node, sbp);
2681 		} else {
2682 			/* Flush the IO's on the chipq for this lun */
2683 			(void) emlxs_chipq_lun_flush(port,
2684 			    sbp->node, sbp->lun, sbp);
2685 		}
2686 
2687 		if (sbp->flush_count == 0) {
2688 			emlxs_node_open(port, sbp->node, hba->channel_fcp);
2689 			goto done;
2690 		}
2691 
2692 		/* Set the timeout so the flush has time to complete */
2693 		timeout = emlxs_timeout(hba, 60);
2694 		(void) drv_getparm(LBOLT, &time);
2695 		while ((time < timeout) && sbp->flush_count > 0) {
2696 			delay(drv_usectohz(500000));
2697 			(void) drv_getparm(LBOLT, &time);
2698 		}
2699 
2700 		if (sbp->flush_count == 0) {
2701 			emlxs_node_open(port, sbp->node, hba->channel_fcp);
2702 			goto done;
2703 		}
2704 
2705 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2706 		    "sbp=%p flush_count=%d. Waiting...", sbp,
2707 		    sbp->flush_count);
2708 
2709 		/* Let's try this one more time */
2710 
2711 		if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) {
2712 			/* Flush the IO's on the chipq */
2713 			(void) emlxs_chipq_node_flush(port,
2714 			    &hba->chan[hba->channel_fcp],
2715 			    sbp->node, sbp);
2716 		} else {
2717 			/* Flush the IO's on the chipq for this lun */
2718 			(void) emlxs_chipq_lun_flush(port,
2719 			    sbp->node, sbp->lun, sbp);
2720 		}
2721 
2722 		/* Reset the timeout so the flush has time to complete */
2723 		timeout = emlxs_timeout(hba, 60);
2724 		(void) drv_getparm(LBOLT, &time);
2725 		while ((time < timeout) && sbp->flush_count > 0) {
2726 			delay(drv_usectohz(500000));
2727 			(void) drv_getparm(LBOLT, &time);
2728 		}
2729 
2730 		if (sbp->flush_count == 0) {
2731 			emlxs_node_open(port, sbp->node, hba->channel_fcp);
2732 			goto done;
2733 		}
2734 
2735 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2736 		    "sbp=%p flush_count=%d. Resetting link.", sbp,
2737 		    sbp->flush_count);
2738 
2739 		/* Let's first try to reset the link */
2740 		(void) emlxs_reset(port, FC_FCA_LINK_RESET);
2741 
2742 		if (sbp->flush_count == 0) {
2743 			goto done;
2744 		}
2745 
2746 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2747 		    "sbp=%p flush_count=%d. Resetting HBA.", sbp,
2748 		    sbp->flush_count);
2749 
2750 		/* If that doesn't work, reset the adapter */
2751 		(void) emlxs_reset(port, FC_FCA_RESET);
2752 
2753 		if (sbp->flush_count != 0) {
2754 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2755 			    "sbp=%p flush_count=%d. Giving up.", sbp,
2756 			    sbp->flush_count);
2757 		}
2758 
2759 	}
2760 	/* PACKET_FCP_RESET */
2761 done:
2762 
2763 	/* Packet has been declared completed and is now ready to be returned */
2764 
2765 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
2766 	emlxs_unswap_pkt(sbp);
2767 #endif	/* EMLXS_MODREV2X */
2768 
2769 	mutex_enter(&sbp->mtx);
2770 	sbp->pkt_flags |= PACKET_ULP_OWNED;
2771 	mutex_exit(&sbp->mtx);
2772 
2773 	mutex_enter(&EMLXS_PORT_LOCK);
2774 	hba->io_poll_count--;
2775 	mutex_exit(&EMLXS_PORT_LOCK);
2776 
2777 #ifdef FMA_SUPPORT
2778 	if (!in_panic) {
2779 		emlxs_check_dma(hba, sbp);
2780 	}
2781 #endif
2782 
2783 	/* Make ULP completion callback if required */
2784 	if (pkt->pkt_comp) {
2785 		cp->ulpCmplCmd++;
2786 		(*pkt->pkt_comp) (pkt);
2787 	}
2788 
2789 #ifdef FMA_SUPPORT
2790 	if (hba->flag & FC_DMA_CHECK_ERROR) {
2791 		emlxs_thread_spawn(hba, emlxs_restart_thread,
2792 		    NULL, NULL);
2793 	}
2794 #endif
2795 
2796 	return;
2797 
2798 } /* emlxs_poll() */
2799 
2800 
2801 static int
emlxs_fca_ub_alloc(opaque_t fca_port_handle,uint64_t tokens[],uint32_t size,uint32_t * count,uint32_t type)2802 emlxs_fca_ub_alloc(opaque_t fca_port_handle, uint64_t tokens[], uint32_t size,
2803     uint32_t *count, uint32_t type)
2804 {
2805 	emlxs_port_t		*port = (emlxs_port_t *)fca_port_handle;
2806 	emlxs_hba_t		*hba = HBA;
2807 	char			*err = NULL;
2808 	emlxs_unsol_buf_t	*pool = NULL;
2809 	emlxs_unsol_buf_t	*new_pool = NULL;
2810 	emlxs_config_t		*cfg = &CFG;
2811 	int32_t			i;
2812 	int			result;
2813 	uint32_t		free_resv;
2814 	uint32_t		free;
2815 	fc_unsol_buf_t		*ubp;
2816 	emlxs_ub_priv_t		*ub_priv;
2817 	int			rc;
2818 
2819 	if (!(port->flag & EMLXS_INI_ENABLED)) {
2820 		if (tokens && count) {
2821 			bzero(tokens, (sizeof (uint64_t) * (*count)));
2822 		}
2823 		return (FC_SUCCESS);
2824 	}
2825 
2826 	if (!(port->flag & EMLXS_INI_BOUND)) {
2827 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2828 		    "fca_ub_alloc failed: Port not bound!  size=%x count=%d "
2829 		    "type=%x", size, *count, type);
2830 
2831 		return (FC_FAILURE);
2832 	}
2833 
2834 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2835 	    "fca_ub_alloc: size=%x count=%d type=%x", size, *count, type);
2836 
2837 	if (count && (*count > EMLXS_MAX_UBUFS)) {
2838 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2839 		    "fca_ub_alloc failed: Too many unsolicted buffers "
2840 		    "requested. count=%x", *count);
2841 
2842 		return (FC_FAILURE);
2843 
2844 	}
2845 
2846 	if (tokens == NULL) {
2847 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2848 		    "fca_ub_alloc failed: Token array is NULL.");
2849 
2850 		return (FC_FAILURE);
2851 	}
2852 
2853 	/* Clear the token array */
2854 	bzero(tokens, (sizeof (uint64_t) * (*count)));
2855 
2856 	free_resv = 0;
2857 	free = *count;
2858 	switch (type) {
2859 	case FC_TYPE_BASIC_LS:
2860 		err = "BASIC_LS";
2861 		break;
2862 	case FC_TYPE_EXTENDED_LS:
2863 		err = "EXTENDED_LS";
2864 		free = *count / 2;	/* Hold 50% for normal use */
2865 		free_resv = *count - free;	/* Reserve 50% for RSCN use */
2866 		break;
2867 	case FC_TYPE_IS8802:
2868 		err = "IS8802";
2869 		break;
2870 	case FC_TYPE_IS8802_SNAP:
2871 		err = "IS8802_SNAP";
2872 
2873 		if (cfg[CFG_NETWORK_ON].current == 0) {
2874 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2875 			    "fca_ub_alloc failed: IP support is disabled.");
2876 
2877 			return (FC_FAILURE);
2878 		}
2879 		break;
2880 	case FC_TYPE_SCSI_FCP:
2881 		err = "SCSI_FCP";
2882 		break;
2883 	case FC_TYPE_SCSI_GPP:
2884 		err = "SCSI_GPP";
2885 		break;
2886 	case FC_TYPE_HIPP_FP:
2887 		err = "HIPP_FP";
2888 		break;
2889 	case FC_TYPE_IPI3_MASTER:
2890 		err = "IPI3_MASTER";
2891 		break;
2892 	case FC_TYPE_IPI3_SLAVE:
2893 		err = "IPI3_SLAVE";
2894 		break;
2895 	case FC_TYPE_IPI3_PEER:
2896 		err = "IPI3_PEER";
2897 		break;
2898 	case FC_TYPE_FC_SERVICES:
2899 		err = "FC_SERVICES";
2900 		break;
2901 	}
2902 
2903 	mutex_enter(&EMLXS_UB_LOCK);
2904 
2905 	/*
2906 	 * Walk through the list of the unsolicited buffers
2907 	 * for this ddiinst of emlx.
2908 	 */
2909 
2910 	pool = port->ub_pool;
2911 
2912 	/*
2913 	 * The emlxs_fca_ub_alloc() can be called more than once with different
2914 	 * size. We will reject the call if there are
2915 	 * duplicate size with the same FC-4 type.
2916 	 */
2917 	while (pool) {
2918 		if ((pool->pool_type == type) &&
2919 		    (pool->pool_buf_size == size)) {
2920 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2921 			    "fca_ub_alloc failed: Unsolicited buffer pool "
2922 			    "for %s of size 0x%x bytes already exists.",
2923 			    err, size);
2924 
2925 			result = FC_FAILURE;
2926 			goto fail;
2927 		}
2928 
2929 		pool = pool->pool_next;
2930 	}
2931 
2932 	mutex_exit(&EMLXS_UB_LOCK);
2933 
2934 	new_pool = (emlxs_unsol_buf_t *)kmem_zalloc(sizeof (emlxs_unsol_buf_t),
2935 	    KM_SLEEP);
2936 
2937 	new_pool->pool_next = NULL;
2938 	new_pool->pool_type = type;
2939 	new_pool->pool_buf_size = size;
2940 	new_pool->pool_nentries = *count;
2941 	new_pool->pool_available = new_pool->pool_nentries;
2942 	new_pool->pool_free = free;
2943 	new_pool->pool_free_resv = free_resv;
2944 	new_pool->fc_ubufs =
2945 	    kmem_zalloc((sizeof (fc_unsol_buf_t) * (*count)), KM_SLEEP);
2946 
2947 	new_pool->pool_first_token = port->ub_count;
2948 	new_pool->pool_last_token = port->ub_count + new_pool->pool_nentries;
2949 
2950 	for (i = 0; i < new_pool->pool_nentries; i++) {
2951 		ubp = (fc_unsol_buf_t *)&new_pool->fc_ubufs[i];
2952 		ubp->ub_port_handle = port->ulp_handle;
2953 		ubp->ub_token = (uint64_t)((unsigned long)ubp);
2954 		ubp->ub_bufsize = size;
2955 		ubp->ub_class = FC_TRAN_CLASS3;
2956 		ubp->ub_port_private = NULL;
2957 		ubp->ub_fca_private =
2958 		    (emlxs_ub_priv_t *)kmem_zalloc(sizeof (emlxs_ub_priv_t),
2959 		    KM_SLEEP);
2960 
2961 		/*
2962 		 * Initialize emlxs_ub_priv_t
2963 		 */
2964 		ub_priv = ubp->ub_fca_private;
2965 		ub_priv->ubp = ubp;
2966 		ub_priv->port = port;
2967 		ub_priv->flags = EMLXS_UB_FREE;
2968 		ub_priv->available = 1;
2969 		ub_priv->pool = new_pool;
2970 		ub_priv->time = 0;
2971 		ub_priv->timeout = 0;
2972 		ub_priv->token = port->ub_count;
2973 		ub_priv->cmd = 0;
2974 
2975 		/* Allocate the actual buffer */
2976 		ubp->ub_buffer = (caddr_t)kmem_zalloc(size, KM_SLEEP);
2977 
2978 
2979 		tokens[i] = (uint64_t)((unsigned long)ubp);
2980 		port->ub_count++;
2981 	}
2982 
2983 	mutex_enter(&EMLXS_UB_LOCK);
2984 
2985 	/* Add the pool to the top of the pool list */
2986 	new_pool->pool_prev = NULL;
2987 	new_pool->pool_next = port->ub_pool;
2988 
2989 	if (port->ub_pool) {
2990 		port->ub_pool->pool_prev = new_pool;
2991 	}
2992 	port->ub_pool = new_pool;
2993 
2994 	/* Set the post counts */
2995 	if (type == FC_TYPE_IS8802_SNAP) {
2996 		MAILBOXQ	*mbox;
2997 
2998 		port->ub_post[hba->channel_ip] += new_pool->pool_nentries;
2999 
3000 		if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba,
3001 		    MEM_MBOX))) {
3002 			emlxs_mb_config_farp(hba, mbox);
3003 			rc =  EMLXS_SLI_ISSUE_MBOX_CMD(hba,
3004 			    mbox, MBX_NOWAIT, 0);
3005 			if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
3006 				emlxs_mem_put(hba, MEM_MBOX, (void *)mbox);
3007 			}
3008 		}
3009 		port->flag |= EMLXS_PORT_IP_UP;
3010 	} else if (type == FC_TYPE_EXTENDED_LS) {
3011 		port->ub_post[hba->channel_els] += new_pool->pool_nentries;
3012 	} else if (type == FC_TYPE_FC_SERVICES) {
3013 		port->ub_post[hba->channel_ct] += new_pool->pool_nentries;
3014 	}
3015 
3016 	mutex_exit(&EMLXS_UB_LOCK);
3017 
3018 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
3019 	    "%d unsolicited buffers allocated for %s of size 0x%x bytes.",
3020 	    *count, err, size);
3021 
3022 	return (FC_SUCCESS);
3023 
3024 fail:
3025 
3026 	/* Clean the pool */
3027 	for (i = 0; tokens[i] != NULL; i++) {
3028 		/* Get the buffer object */
3029 		ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]);
3030 		ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3031 
3032 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
3033 		    "fca_ub_alloc failed: Freed buffer=%p token=%x size=%x "
3034 		    "type=%x ", ubp, ub_priv->token, ubp->ub_bufsize, type);
3035 
3036 		/* Free the actual buffer */
3037 		kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
3038 
3039 		/* Free the private area of the buffer object */
3040 		kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t));
3041 
3042 		tokens[i] = 0;
3043 		port->ub_count--;
3044 	}
3045 
3046 	if (new_pool) {
3047 		/* Free the array of buffer objects in the pool */
3048 		kmem_free((caddr_t)new_pool->fc_ubufs,
3049 		    (sizeof (fc_unsol_buf_t) * new_pool->pool_nentries));
3050 
3051 		/* Free the pool object */
3052 		kmem_free((caddr_t)new_pool, sizeof (emlxs_unsol_buf_t));
3053 	}
3054 
3055 	mutex_exit(&EMLXS_UB_LOCK);
3056 
3057 	return (result);
3058 
3059 } /* emlxs_fca_ub_alloc() */
3060 
3061 
3062 static void
emlxs_ub_els_reject(emlxs_port_t * port,fc_unsol_buf_t * ubp)3063 emlxs_ub_els_reject(emlxs_port_t *port, fc_unsol_buf_t *ubp)
3064 {
3065 	emlxs_hba_t	*hba = HBA;
3066 	emlxs_ub_priv_t	*ub_priv;
3067 	fc_packet_t	*pkt;
3068 	ELS_PKT		*els;
3069 	uint32_t	sid;
3070 
3071 	ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3072 
3073 	if (hba->state <= FC_LINK_DOWN) {
3074 		emlxs_abort_els_exchange(hba, port, ubp->ub_frame.rx_id);
3075 		return;
3076 	}
3077 
3078 	if (!(pkt = emlxs_pkt_alloc(port, sizeof (uint32_t) +
3079 	    sizeof (LS_RJT), 0, 0, KM_NOSLEEP))) {
3080 		emlxs_abort_els_exchange(hba, port, ubp->ub_frame.rx_id);
3081 		return;
3082 	}
3083 
3084 	sid = LE_SWAP24_LO(ubp->ub_frame.s_id);
3085 
3086 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
3087 	    "%s dropped: sid=%x. Rejecting.",
3088 	    emlxs_elscmd_xlate(ub_priv->cmd), sid);
3089 
3090 	pkt->pkt_tran_type = FC_PKT_OUTBOUND;
3091 	pkt->pkt_timeout = (2 * hba->fc_ratov);
3092 
3093 	if ((uint32_t)ubp->ub_class == FC_TRAN_CLASS2) {
3094 		pkt->pkt_tran_flags &= ~FC_TRAN_CLASS3;
3095 		pkt->pkt_tran_flags |= FC_TRAN_CLASS2;
3096 	}
3097 
3098 	/* Build the fc header */
3099 	pkt->pkt_cmd_fhdr.d_id = ubp->ub_frame.s_id;
3100 	pkt->pkt_cmd_fhdr.r_ctl =
3101 	    R_CTL_EXTENDED_SVC | R_CTL_SOLICITED_CONTROL;
3102 	pkt->pkt_cmd_fhdr.s_id = LE_SWAP24_LO(port->did);
3103 	pkt->pkt_cmd_fhdr.type = FC_TYPE_EXTENDED_LS;
3104 	pkt->pkt_cmd_fhdr.f_ctl =
3105 	    F_CTL_XCHG_CONTEXT | F_CTL_LAST_SEQ | F_CTL_END_SEQ;
3106 	pkt->pkt_cmd_fhdr.seq_id = 0;
3107 	pkt->pkt_cmd_fhdr.df_ctl = 0;
3108 	pkt->pkt_cmd_fhdr.seq_cnt = 0;
3109 	pkt->pkt_cmd_fhdr.ox_id = (ub_priv->cmd >> ELS_CMD_SHIFT) & 0xff;
3110 	pkt->pkt_cmd_fhdr.rx_id = ubp->ub_frame.rx_id;
3111 	pkt->pkt_cmd_fhdr.ro = 0;
3112 
3113 	/* Build the command */
3114 	els = (ELS_PKT *) pkt->pkt_cmd;
3115 	els->elsCode = 0x01;
3116 	els->un.lsRjt.un.b.lsRjtRsvd0 = 0;
3117 	els->un.lsRjt.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
3118 	els->un.lsRjt.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
3119 	els->un.lsRjt.un.b.vendorUnique = 0x02;
3120 
3121 	/* Send the pkt later in another thread */
3122 	(void) emlxs_pkt_send(pkt, 0);
3123 
3124 	return;
3125 
3126 } /* emlxs_ub_els_reject() */
3127 
3128 extern int
emlxs_fca_ub_release(opaque_t fca_port_handle,uint32_t count,uint64_t tokens[])3129 emlxs_fca_ub_release(opaque_t fca_port_handle, uint32_t count,
3130     uint64_t tokens[])
3131 {
3132 	emlxs_port_t		*port = (emlxs_port_t *)fca_port_handle;
3133 	emlxs_hba_t		*hba = HBA;
3134 	fc_unsol_buf_t		*ubp;
3135 	emlxs_ub_priv_t		*ub_priv;
3136 	uint32_t		i;
3137 	uint32_t		time;
3138 	emlxs_unsol_buf_t	*pool;
3139 
3140 	if (count == 0) {
3141 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3142 		    "fca_ub_release: Nothing to do. count=%d", count);
3143 
3144 		return (FC_SUCCESS);
3145 	}
3146 
3147 	if (!(port->flag & EMLXS_INI_BOUND)) {
3148 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3149 		    "fca_ub_release failed: Port not bound. count=%d "
3150 		    "token[0]=%p",
3151 		    count, tokens[0]);
3152 
3153 		return (FC_UNBOUND);
3154 	}
3155 
3156 	mutex_enter(&EMLXS_UB_LOCK);
3157 
3158 	if (!port->ub_pool) {
3159 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3160 		    "fca_ub_release failed: No pools! count=%d token[0]=%p",
3161 		    count, tokens[0]);
3162 
3163 		mutex_exit(&EMLXS_UB_LOCK);
3164 		return (FC_UB_BADTOKEN);
3165 	}
3166 
3167 	for (i = 0; i < count; i++) {
3168 		ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]);
3169 
3170 		if (!ubp) {
3171 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3172 			    "fca_ub_release failed: count=%d tokens[%d]=0",
3173 			    count, i);
3174 
3175 			mutex_exit(&EMLXS_UB_LOCK);
3176 			return (FC_UB_BADTOKEN);
3177 		}
3178 
3179 		ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3180 
3181 		if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) {
3182 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3183 			    "fca_ub_release failed: Dead buffer found. ubp=%p",
3184 			    ubp);
3185 
3186 			mutex_exit(&EMLXS_UB_LOCK);
3187 			return (FC_UB_BADTOKEN);
3188 		}
3189 
3190 		if (ub_priv->flags == EMLXS_UB_FREE) {
3191 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3192 			    "fca_ub_release: Buffer already free! ubp=%p "
3193 			    "token=%x",
3194 			    ubp, ub_priv->token);
3195 
3196 			continue;
3197 		}
3198 
3199 		/* Check for dropped els buffer */
3200 		/* ULP will do this sometimes without sending a reply */
3201 		if ((ubp->ub_frame.r_ctl == FC_ELS_REQ) &&
3202 		    !(ub_priv->flags & EMLXS_UB_REPLY)) {
3203 			emlxs_ub_els_reject(port, ubp);
3204 		}
3205 
3206 		/* Mark the buffer free */
3207 		ub_priv->flags = EMLXS_UB_FREE;
3208 		bzero(ubp->ub_buffer, ubp->ub_bufsize);
3209 
3210 		time = hba->timer_tics - ub_priv->time;
3211 		ub_priv->time = 0;
3212 		ub_priv->timeout = 0;
3213 
3214 		pool = ub_priv->pool;
3215 
3216 		if (ub_priv->flags & EMLXS_UB_RESV) {
3217 			pool->pool_free_resv++;
3218 		} else {
3219 			pool->pool_free++;
3220 		}
3221 
3222 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
3223 		    "fca_ub_release: ubp=%p token=%x time=%d av=%d "
3224 		    "(%d,%d,%d,%d)",
3225 		    ubp, ub_priv->token, time, ub_priv->available,
3226 		    pool->pool_nentries, pool->pool_available,
3227 		    pool->pool_free, pool->pool_free_resv);
3228 
3229 		/* Check if pool can be destroyed now */
3230 		if ((pool->pool_available == 0) &&
3231 		    (pool->pool_free + pool->pool_free_resv ==
3232 		    pool->pool_nentries)) {
3233 			emlxs_ub_destroy(port, pool);
3234 		}
3235 	}
3236 
3237 	mutex_exit(&EMLXS_UB_LOCK);
3238 
3239 	return (FC_SUCCESS);
3240 
3241 } /* emlxs_fca_ub_release() */
3242 
3243 
3244 static int
emlxs_fca_ub_free(opaque_t fca_port_handle,uint32_t count,uint64_t tokens[])3245 emlxs_fca_ub_free(opaque_t fca_port_handle, uint32_t count, uint64_t tokens[])
3246 {
3247 	emlxs_port_t		*port = (emlxs_port_t *)fca_port_handle;
3248 	emlxs_unsol_buf_t	*pool;
3249 	fc_unsol_buf_t		*ubp;
3250 	emlxs_ub_priv_t		*ub_priv;
3251 	uint32_t		i;
3252 
3253 	if (!(port->flag & EMLXS_INI_ENABLED)) {
3254 		return (FC_SUCCESS);
3255 	}
3256 
3257 	if (count == 0) {
3258 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3259 		    "fca_ub_free: Nothing to do. count=%d token[0]=%p", count,
3260 		    tokens[0]);
3261 
3262 		return (FC_SUCCESS);
3263 	}
3264 
3265 	if (!(port->flag & EMLXS_INI_BOUND)) {
3266 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3267 		    "fca_ub_free: Port not bound. count=%d token[0]=%p", count,
3268 		    tokens[0]);
3269 
3270 		return (FC_SUCCESS);
3271 	}
3272 
3273 	mutex_enter(&EMLXS_UB_LOCK);
3274 
3275 	if (!port->ub_pool) {
3276 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3277 		    "fca_ub_free failed: No pools! count=%d token[0]=%p", count,
3278 		    tokens[0]);
3279 
3280 		mutex_exit(&EMLXS_UB_LOCK);
3281 		return (FC_UB_BADTOKEN);
3282 	}
3283 
3284 	/* Process buffer list */
3285 	for (i = 0; i < count; i++) {
3286 		ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]);
3287 
3288 		if (!ubp) {
3289 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3290 			    "fca_ub_free failed: count=%d tokens[%d]=0", count,
3291 			    i);
3292 
3293 			mutex_exit(&EMLXS_UB_LOCK);
3294 			return (FC_UB_BADTOKEN);
3295 		}
3296 
3297 		/* Mark buffer unavailable */
3298 		ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3299 
3300 		if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) {
3301 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3302 			    "fca_ub_free failed: Dead buffer found. ubp=%p",
3303 			    ubp);
3304 
3305 			mutex_exit(&EMLXS_UB_LOCK);
3306 			return (FC_UB_BADTOKEN);
3307 		}
3308 
3309 		ub_priv->available = 0;
3310 
3311 		/* Mark one less buffer available in the parent pool */
3312 		pool = ub_priv->pool;
3313 
3314 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
3315 		    "fca_ub_free: ubp=%p token=%x (%d,%d,%d,%d)", ubp,
3316 		    ub_priv->token, pool->pool_nentries,
3317 		    pool->pool_available - 1, pool->pool_free,
3318 		    pool->pool_free_resv);
3319 
3320 		if (pool->pool_available) {
3321 			pool->pool_available--;
3322 
3323 			/* Check if pool can be destroyed */
3324 			if ((pool->pool_available == 0) &&
3325 			    (pool->pool_free + pool->pool_free_resv ==
3326 			    pool->pool_nentries)) {
3327 				emlxs_ub_destroy(port, pool);
3328 			}
3329 		}
3330 	}
3331 
3332 	mutex_exit(&EMLXS_UB_LOCK);
3333 
3334 	return (FC_SUCCESS);
3335 
3336 } /* emlxs_fca_ub_free() */
3337 
3338 
3339 /* EMLXS_UB_LOCK must be held when calling this routine */
3340 extern void
emlxs_ub_destroy(emlxs_port_t * port,emlxs_unsol_buf_t * pool)3341 emlxs_ub_destroy(emlxs_port_t *port, emlxs_unsol_buf_t *pool)
3342 {
3343 	emlxs_hba_t		*hba = HBA;
3344 	emlxs_unsol_buf_t	*next;
3345 	emlxs_unsol_buf_t	*prev;
3346 	fc_unsol_buf_t		*ubp;
3347 	uint32_t		i;
3348 
3349 	/* Remove the pool object from the pool list */
3350 	next = pool->pool_next;
3351 	prev = pool->pool_prev;
3352 
3353 	if (port->ub_pool == pool) {
3354 		port->ub_pool = next;
3355 	}
3356 
3357 	if (prev) {
3358 		prev->pool_next = next;
3359 	}
3360 
3361 	if (next) {
3362 		next->pool_prev = prev;
3363 	}
3364 
3365 	pool->pool_prev = NULL;
3366 	pool->pool_next = NULL;
3367 
3368 	/* Clear the post counts */
3369 	switch (pool->pool_type) {
3370 	case FC_TYPE_IS8802_SNAP:
3371 		port->ub_post[hba->channel_ip] -= pool->pool_nentries;
3372 		break;
3373 
3374 	case FC_TYPE_EXTENDED_LS:
3375 		port->ub_post[hba->channel_els] -= pool->pool_nentries;
3376 		break;
3377 
3378 	case FC_TYPE_FC_SERVICES:
3379 		port->ub_post[hba->channel_ct] -= pool->pool_nentries;
3380 		break;
3381 	}
3382 
3383 	/* Now free the pool memory */
3384 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3385 	    "ub_destroy: pool=%p type=%d size=%d count=%d", pool,
3386 	    pool->pool_type, pool->pool_buf_size, pool->pool_nentries);
3387 
3388 	/* Process the array of buffer objects in the pool */
3389 	for (i = 0; i < pool->pool_nentries; i++) {
3390 		/* Get the buffer object */
3391 		ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[i];
3392 
3393 		/* Free the memory the buffer object represents */
3394 		kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
3395 
3396 		/* Free the private area of the buffer object */
3397 		kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t));
3398 	}
3399 
3400 	/* Free the array of buffer objects in the pool */
3401 	kmem_free((caddr_t)pool->fc_ubufs,
3402 	    (sizeof (fc_unsol_buf_t)*pool->pool_nentries));
3403 
3404 	/* Free the pool object */
3405 	kmem_free((caddr_t)pool, sizeof (emlxs_unsol_buf_t));
3406 
3407 	return;
3408 
3409 } /* emlxs_ub_destroy() */
3410 
3411 
3412 /*ARGSUSED*/
3413 extern int
emlxs_fca_pkt_abort(opaque_t fca_port_handle,fc_packet_t * pkt,int32_t sleep)3414 emlxs_fca_pkt_abort(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep)
3415 {
3416 	emlxs_port_t	*port = (emlxs_port_t *)fca_port_handle;
3417 	emlxs_hba_t	*hba = HBA;
3418 	emlxs_config_t	*cfg = &CFG;
3419 
3420 	emlxs_buf_t	*sbp;
3421 	NODELIST	*nlp;
3422 	NODELIST	*prev_nlp;
3423 	uint8_t		channelno;
3424 	CHANNEL	*cp;
3425 	clock_t		pkt_timeout;
3426 	clock_t		timer;
3427 	clock_t		time;
3428 	int32_t		pkt_ret;
3429 	IOCBQ		*iocbq;
3430 	IOCBQ		*next;
3431 	IOCBQ		*prev;
3432 	uint32_t	found;
3433 	uint32_t	pass = 0;
3434 
3435 	sbp = (emlxs_buf_t *)pkt->pkt_fca_private;
3436 	iocbq = &sbp->iocbq;
3437 	nlp = (NODELIST *)sbp->node;
3438 	cp = (CHANNEL *)sbp->channel;
3439 	channelno = (cp) ? cp->channelno : 0;
3440 
3441 	if (!(port->flag & EMLXS_INI_BOUND)) {
3442 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3443 		    "Port not bound.");
3444 		return (FC_UNBOUND);
3445 	}
3446 
3447 	if (!(hba->flag & FC_ONLINE_MODE)) {
3448 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3449 		    "Adapter offline.");
3450 		return (FC_OFFLINE);
3451 	}
3452 
3453 	/* ULP requires the aborted pkt to be completed */
3454 	/* back to ULP before returning from this call. */
3455 	/* SUN knows of problems with this call so they suggested that we */
3456 	/* always return a FC_FAILURE for this call, until it is worked out. */
3457 
3458 	/* Check if pkt is no good */
3459 	if (!(sbp->pkt_flags & PACKET_VALID) ||
3460 	    (sbp->pkt_flags & PACKET_ULP_OWNED)) {
3461 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3462 		    "Bad sbp. flags=%x", sbp->pkt_flags);
3463 		return (FC_FAILURE);
3464 	}
3465 
3466 	/* Tag this now */
3467 	/* This will prevent any thread except ours from completing it */
3468 	mutex_enter(&sbp->mtx);
3469 
3470 	/* Check again if we still own this */
3471 	if (!(sbp->pkt_flags & PACKET_VALID) ||
3472 	    (sbp->pkt_flags & PACKET_ULP_OWNED)) {
3473 		mutex_exit(&sbp->mtx);
3474 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3475 		    "Bad sbp. flags=%x", sbp->pkt_flags);
3476 		return (FC_FAILURE);
3477 	}
3478 
3479 	/* Check if pkt is a real polled command */
3480 	if (!(sbp->pkt_flags & PACKET_IN_ABORT) &&
3481 	    (sbp->pkt_flags & PACKET_POLLED)) {
3482 		mutex_exit(&sbp->mtx);
3483 
3484 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3485 		    "Attempting to abort a polled I/O. sbp=%p flags=%x", sbp,
3486 		    sbp->pkt_flags);
3487 		return (FC_FAILURE);
3488 	}
3489 
3490 	sbp->pkt_flags |= PACKET_POLLED;
3491 	sbp->pkt_flags |= PACKET_IN_ABORT;
3492 
3493 	if (sbp->pkt_flags & (PACKET_IN_COMPLETION | PACKET_IN_FLUSH |
3494 	    PACKET_IN_TIMEOUT)) {
3495 		mutex_exit(&sbp->mtx);
3496 
3497 		/* Do nothing, pkt already on its way out */
3498 		goto done;
3499 	}
3500 
3501 	mutex_exit(&sbp->mtx);
3502 
3503 begin:
3504 	pass++;
3505 
3506 	mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3507 
3508 	if (sbp->pkt_flags & PACKET_IN_TXQ) {
3509 		/* Find it on the queue */
3510 		found = 0;
3511 		if (iocbq->flag & IOCB_PRIORITY) {
3512 			/* Search the priority queue */
3513 			prev = NULL;
3514 			next = (IOCBQ *) nlp->nlp_ptx[channelno].q_first;
3515 
3516 			while (next) {
3517 				if (next == iocbq) {
3518 					/* Remove it */
3519 					if (prev) {
3520 						prev->next = iocbq->next;
3521 					}
3522 
3523 					if (nlp->nlp_ptx[channelno].q_last ==
3524 					    (void *)iocbq) {
3525 						nlp->nlp_ptx[channelno].q_last =
3526 						    (void *)prev;
3527 					}
3528 
3529 					if (nlp->nlp_ptx[channelno].q_first ==
3530 					    (void *)iocbq) {
3531 						nlp->nlp_ptx[channelno].
3532 						    q_first =
3533 						    (void *)iocbq->next;
3534 					}
3535 
3536 					nlp->nlp_ptx[channelno].q_cnt--;
3537 					iocbq->next = NULL;
3538 					found = 1;
3539 					break;
3540 				}
3541 
3542 				prev = next;
3543 				next = next->next;
3544 			}
3545 		} else {
3546 			/* Search the normal queue */
3547 			prev = NULL;
3548 			next = (IOCBQ *) nlp->nlp_tx[channelno].q_first;
3549 
3550 			while (next) {
3551 				if (next == iocbq) {
3552 					/* Remove it */
3553 					if (prev) {
3554 						prev->next = iocbq->next;
3555 					}
3556 
3557 					if (nlp->nlp_tx[channelno].q_last ==
3558 					    (void *)iocbq) {
3559 						nlp->nlp_tx[channelno].q_last =
3560 						    (void *)prev;
3561 					}
3562 
3563 					if (nlp->nlp_tx[channelno].q_first ==
3564 					    (void *)iocbq) {
3565 						nlp->nlp_tx[channelno].q_first =
3566 						    (void *)iocbq->next;
3567 					}
3568 
3569 					nlp->nlp_tx[channelno].q_cnt--;
3570 					iocbq->next = NULL;
3571 					found = 1;
3572 					break;
3573 				}
3574 
3575 				prev = next;
3576 				next = (IOCBQ *) next->next;
3577 			}
3578 		}
3579 
3580 		if (!found) {
3581 			mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3582 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3583 			    "I/O not found in driver. sbp=%p flags=%x", sbp,
3584 			    sbp->pkt_flags);
3585 			goto done;
3586 		}
3587 
3588 		/* Check if node still needs servicing */
3589 		if ((nlp->nlp_ptx[channelno].q_first) ||
3590 		    (nlp->nlp_tx[channelno].q_first &&
3591 		    !(nlp->nlp_flag[channelno] & NLP_CLOSED))) {
3592 
3593 			/*
3594 			 * If this is the base node,
3595 			 * then don't shift the pointers
3596 			 */
3597 			/* We want to drain the base node before moving on */
3598 			if (!nlp->nlp_base) {
3599 				/* Just shift channel queue */
3600 				/* pointers to next node */
3601 				cp->nodeq.q_last = (void *) nlp;
3602 				cp->nodeq.q_first = nlp->nlp_next[channelno];
3603 			}
3604 		} else {
3605 			/* Remove node from channel queue */
3606 
3607 			/* If this is the only node on list */
3608 			if (cp->nodeq.q_first == (void *)nlp &&
3609 			    cp->nodeq.q_last == (void *)nlp) {
3610 				cp->nodeq.q_last = NULL;
3611 				cp->nodeq.q_first = NULL;
3612 				cp->nodeq.q_cnt = 0;
3613 			} else if (cp->nodeq.q_first == (void *)nlp) {
3614 				cp->nodeq.q_first = nlp->nlp_next[channelno];
3615 				((NODELIST *) cp->nodeq.q_last)->
3616 				    nlp_next[channelno] = cp->nodeq.q_first;
3617 				cp->nodeq.q_cnt--;
3618 			} else {
3619 				/*
3620 				 * This is a little more difficult find the
3621 				 * previous node in the circular channel queue
3622 				 */
3623 				prev_nlp = nlp;
3624 				while (prev_nlp->nlp_next[channelno] != nlp) {
3625 					prev_nlp = prev_nlp->
3626 					    nlp_next[channelno];
3627 				}
3628 
3629 				prev_nlp->nlp_next[channelno] =
3630 				    nlp->nlp_next[channelno];
3631 
3632 				if (cp->nodeq.q_last == (void *)nlp) {
3633 					cp->nodeq.q_last = (void *)prev_nlp;
3634 				}
3635 				cp->nodeq.q_cnt--;
3636 
3637 			}
3638 
3639 			/* Clear node */
3640 			nlp->nlp_next[channelno] = NULL;
3641 		}
3642 
3643 		/* Free the ULPIOTAG and the bmp */
3644 		if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3645 			emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
3646 		} else {
3647 			(void) emlxs_unregister_pkt(cp, sbp->iotag, 1);
3648 		}
3649 
3650 
3651 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3652 
3653 		emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3654 		    IOERR_ABORT_REQUESTED, 1);
3655 
3656 		goto done;
3657 	}
3658 
3659 	mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3660 
3661 
3662 	/* Check the chip queue */
3663 	mutex_enter(&EMLXS_FCTAB_LOCK);
3664 
3665 	if ((sbp->pkt_flags & PACKET_IN_CHIPQ) &&
3666 	    !(sbp->pkt_flags & PACKET_XRI_CLOSED) &&
3667 	    (sbp == hba->fc_table[sbp->iotag])) {
3668 
3669 		/* Create the abort IOCB */
3670 		if (hba->state >= FC_LINK_UP) {
3671 			iocbq =
3672 			    emlxs_create_abort_xri_cn(port, sbp->node,
3673 			    sbp->iotag, cp, sbp->class, ABORT_TYPE_ABTS);
3674 
3675 			mutex_enter(&sbp->mtx);
3676 			sbp->pkt_flags |= PACKET_XRI_CLOSED;
3677 			sbp->ticks =
3678 			    hba->timer_tics + (4 * hba->fc_ratov) + 10;
3679 			sbp->abort_attempts++;
3680 			mutex_exit(&sbp->mtx);
3681 		} else {
3682 			iocbq =
3683 			    emlxs_create_close_xri_cn(port, sbp->node,
3684 			    sbp->iotag, cp);
3685 
3686 			mutex_enter(&sbp->mtx);
3687 			sbp->pkt_flags |= PACKET_XRI_CLOSED;
3688 			sbp->ticks = hba->timer_tics + 30;
3689 			sbp->abort_attempts++;
3690 			mutex_exit(&sbp->mtx);
3691 		}
3692 
3693 		mutex_exit(&EMLXS_FCTAB_LOCK);
3694 
3695 		/* Send this iocbq */
3696 		if (iocbq) {
3697 			EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
3698 			iocbq = NULL;
3699 		}
3700 
3701 		goto done;
3702 	}
3703 
3704 	mutex_exit(&EMLXS_FCTAB_LOCK);
3705 
3706 	/* Pkt was not on any queues */
3707 
3708 	/* Check again if we still own this */
3709 	if (!(sbp->pkt_flags & PACKET_VALID) ||
3710 	    (sbp->pkt_flags &
3711 	    (PACKET_ULP_OWNED | PACKET_IN_COMPLETION |
3712 	    PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) {
3713 		goto done;
3714 	}
3715 
3716 	if (!sleep) {
3717 		return (FC_FAILURE);
3718 	}
3719 
3720 	/* Apparently the pkt was not found.  Let's delay and try again */
3721 	if (pass < 5) {
3722 		delay(drv_usectohz(5000000));	/* 5 seconds */
3723 
3724 		/* Check again if we still own this */
3725 		if (!(sbp->pkt_flags & PACKET_VALID) ||
3726 		    (sbp->pkt_flags &
3727 		    (PACKET_ULP_OWNED | PACKET_IN_COMPLETION |
3728 		    PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) {
3729 			goto done;
3730 		}
3731 
3732 		goto begin;
3733 	}
3734 
3735 force_it:
3736 
3737 	/* Force the completion now */
3738 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3739 	    "Abort: Completing an IO thats not outstanding: %x", sbp->iotag);
3740 
3741 	/* Now complete it */
3742 	emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, IOERR_ABORT_REQUESTED,
3743 	    1);
3744 
3745 done:
3746 
3747 	/* Now wait for the pkt to complete */
3748 	if (!(sbp->pkt_flags & PACKET_COMPLETED)) {
3749 		/* Set thread timeout */
3750 		pkt_timeout = emlxs_timeout(hba, 30);
3751 
3752 		/* Check for panic situation */
3753 		if (ddi_in_panic()) {
3754 
3755 			/*
3756 			 * In panic situations there will be one thread with no
3757 			 * interrrupts (hard or soft) and no timers
3758 			 */
3759 
3760 			/*
3761 			 * We must manually poll everything in this thread
3762 			 * to keep the driver going.
3763 			 */
3764 
3765 			/* Keep polling the chip until our IO is completed */
3766 			(void) drv_getparm(LBOLT, &time);
3767 			timer = time + drv_usectohz(1000000);
3768 			while ((time < pkt_timeout) &&
3769 			    !(sbp->pkt_flags & PACKET_COMPLETED)) {
3770 				EMLXS_SLI_POLL_INTR(hba);
3771 				(void) drv_getparm(LBOLT, &time);
3772 
3773 				/* Trigger timer checks periodically */
3774 				if (time >= timer) {
3775 					emlxs_timer_checks(hba);
3776 					timer = time + drv_usectohz(1000000);
3777 				}
3778 			}
3779 		} else {
3780 			/* Wait for IO completion or pkt_timeout */
3781 			mutex_enter(&EMLXS_PKT_LOCK);
3782 			pkt_ret = 0;
3783 			while ((pkt_ret != -1) &&
3784 			    !(sbp->pkt_flags & PACKET_COMPLETED)) {
3785 				pkt_ret =
3786 				    cv_timedwait(&EMLXS_PKT_CV,
3787 				    &EMLXS_PKT_LOCK, pkt_timeout);
3788 			}
3789 			mutex_exit(&EMLXS_PKT_LOCK);
3790 		}
3791 
3792 		/* Check if pkt_timeout occured. This is not good. */
3793 		/* Something happened to our IO. */
3794 		if (!(sbp->pkt_flags & PACKET_COMPLETED)) {
3795 			/* Force the completion now */
3796 			goto force_it;
3797 		}
3798 	}
3799 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
3800 	emlxs_unswap_pkt(sbp);
3801 #endif	/* EMLXS_MODREV2X */
3802 
3803 	/* Check again if we still own this */
3804 	if ((sbp->pkt_flags & PACKET_VALID) &&
3805 	    !(sbp->pkt_flags & PACKET_ULP_OWNED)) {
3806 		mutex_enter(&sbp->mtx);
3807 		if ((sbp->pkt_flags & PACKET_VALID) &&
3808 		    !(sbp->pkt_flags & PACKET_ULP_OWNED)) {
3809 			sbp->pkt_flags |= PACKET_ULP_OWNED;
3810 		}
3811 		mutex_exit(&sbp->mtx);
3812 	}
3813 
3814 #ifdef ULP_PATCH5
3815 	if (cfg[CFG_ENABLE_PATCH].current & ULP_PATCH5) {
3816 		return (FC_FAILURE);
3817 	}
3818 #endif /* ULP_PATCH5 */
3819 
3820 	return (FC_SUCCESS);
3821 
3822 } /* emlxs_fca_pkt_abort() */
3823 
3824 
3825 static void
emlxs_abort_all(emlxs_hba_t * hba,uint32_t * tx,uint32_t * chip)3826 emlxs_abort_all(emlxs_hba_t *hba, uint32_t *tx, uint32_t *chip)
3827 {
3828 	emlxs_port_t   *port = &PPORT;
3829 	fc_packet_t *pkt;
3830 	emlxs_buf_t *sbp;
3831 	uint32_t i;
3832 	uint32_t flg;
3833 	uint32_t rc;
3834 	uint32_t txcnt;
3835 	uint32_t chipcnt;
3836 
3837 	txcnt = 0;
3838 	chipcnt = 0;
3839 
3840 	mutex_enter(&EMLXS_FCTAB_LOCK);
3841 	for (i = 0; i < hba->max_iotag; i++) {
3842 		sbp = hba->fc_table[i];
3843 		if (sbp == NULL || sbp == STALE_PACKET) {
3844 			continue;
3845 		}
3846 		flg =  (sbp->pkt_flags & PACKET_IN_CHIPQ);
3847 		pkt = PRIV2PKT(sbp);
3848 		mutex_exit(&EMLXS_FCTAB_LOCK);
3849 		rc = emlxs_fca_pkt_abort(port, pkt, 0);
3850 		if (rc == FC_SUCCESS) {
3851 			if (flg) {
3852 				chipcnt++;
3853 			} else {
3854 				txcnt++;
3855 			}
3856 		}
3857 		mutex_enter(&EMLXS_FCTAB_LOCK);
3858 	}
3859 	mutex_exit(&EMLXS_FCTAB_LOCK);
3860 	*tx = txcnt;
3861 	*chip = chipcnt;
3862 } /* emlxs_abort_all() */
3863 
3864 
3865 extern int32_t
emlxs_reset(emlxs_port_t * port,uint32_t cmd)3866 emlxs_reset(emlxs_port_t *port, uint32_t cmd)
3867 {
3868 	emlxs_hba_t	*hba = HBA;
3869 	int		rval;
3870 	int		i = 0;
3871 	int		ret;
3872 	clock_t		timeout;
3873 
3874 	switch (cmd) {
3875 	case FC_FCA_LINK_RESET:
3876 
3877 		mutex_enter(&EMLXS_PORT_LOCK);
3878 		if (!(hba->flag & FC_ONLINE_MODE) ||
3879 		    (hba->state <= FC_LINK_DOWN)) {
3880 			mutex_exit(&EMLXS_PORT_LOCK);
3881 			return (FC_SUCCESS);
3882 		}
3883 
3884 		if (hba->reset_state &
3885 		    (FC_LINK_RESET_INP | FC_PORT_RESET_INP)) {
3886 			mutex_exit(&EMLXS_PORT_LOCK);
3887 			return (FC_FAILURE);
3888 		}
3889 
3890 		hba->reset_state |= FC_LINK_RESET_INP;
3891 		hba->reset_request |= FC_LINK_RESET;
3892 		mutex_exit(&EMLXS_PORT_LOCK);
3893 
3894 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3895 		    "Resetting Link.");
3896 
3897 		mutex_enter(&EMLXS_LINKUP_LOCK);
3898 		hba->linkup_wait_flag = TRUE;
3899 		mutex_exit(&EMLXS_LINKUP_LOCK);
3900 
3901 		if (emlxs_reset_link(hba, 1, 1)) {
3902 			mutex_enter(&EMLXS_LINKUP_LOCK);
3903 			hba->linkup_wait_flag = FALSE;
3904 			mutex_exit(&EMLXS_LINKUP_LOCK);
3905 
3906 			mutex_enter(&EMLXS_PORT_LOCK);
3907 			hba->reset_state &= ~FC_LINK_RESET_INP;
3908 			hba->reset_request &= ~FC_LINK_RESET;
3909 			mutex_exit(&EMLXS_PORT_LOCK);
3910 
3911 			return (FC_FAILURE);
3912 		}
3913 
3914 		mutex_enter(&EMLXS_LINKUP_LOCK);
3915 		timeout = emlxs_timeout(hba, 60);
3916 		ret = 0;
3917 		while ((ret != -1) && (hba->linkup_wait_flag == TRUE)) {
3918 			ret =
3919 			    cv_timedwait(&EMLXS_LINKUP_CV, &EMLXS_LINKUP_LOCK,
3920 			    timeout);
3921 		}
3922 
3923 		hba->linkup_wait_flag = FALSE;
3924 		mutex_exit(&EMLXS_LINKUP_LOCK);
3925 
3926 		mutex_enter(&EMLXS_PORT_LOCK);
3927 		hba->reset_state &= ~FC_LINK_RESET_INP;
3928 		hba->reset_request &= ~FC_LINK_RESET;
3929 		mutex_exit(&EMLXS_PORT_LOCK);
3930 
3931 		if (ret == -1) {
3932 			return (FC_FAILURE);
3933 		}
3934 
3935 		return (FC_SUCCESS);
3936 
3937 	case FC_FCA_CORE:
3938 #ifdef DUMP_SUPPORT
3939 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3940 		    "Dumping Core.");
3941 
3942 		/* Schedule a USER dump */
3943 		emlxs_dump(hba, EMLXS_USER_DUMP, 0, 0);
3944 
3945 		/* Wait for dump to complete */
3946 		emlxs_dump_wait(hba);
3947 
3948 		return (FC_SUCCESS);
3949 #endif /* DUMP_SUPPORT */
3950 
3951 	case FC_FCA_RESET:
3952 	case FC_FCA_RESET_CORE:
3953 
3954 		mutex_enter(&EMLXS_PORT_LOCK);
3955 		if (hba->reset_state & FC_PORT_RESET_INP) {
3956 			mutex_exit(&EMLXS_PORT_LOCK);
3957 			return (FC_FAILURE);
3958 		}
3959 
3960 		hba->reset_state |= FC_PORT_RESET_INP;
3961 		hba->reset_request |= (FC_PORT_RESET | FC_LINK_RESET);
3962 
3963 		/* wait for any pending link resets to complete */
3964 		while ((hba->reset_state & FC_LINK_RESET_INP) &&
3965 		    (i++ < 1000)) {
3966 			mutex_exit(&EMLXS_PORT_LOCK);
3967 			delay(drv_usectohz(1000));
3968 			mutex_enter(&EMLXS_PORT_LOCK);
3969 		}
3970 
3971 		if (hba->reset_state & FC_LINK_RESET_INP) {
3972 			hba->reset_state &= ~FC_PORT_RESET_INP;
3973 			hba->reset_request &= ~(FC_PORT_RESET | FC_LINK_RESET);
3974 			mutex_exit(&EMLXS_PORT_LOCK);
3975 			return (FC_FAILURE);
3976 		}
3977 		mutex_exit(&EMLXS_PORT_LOCK);
3978 
3979 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3980 		    "Resetting Adapter.");
3981 
3982 		rval = FC_SUCCESS;
3983 
3984 		if (emlxs_offline(hba, 0) == 0) {
3985 			(void) emlxs_online(hba);
3986 		} else {
3987 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3988 			    "Adapter reset failed. Device busy.");
3989 
3990 			rval = FC_DEVICE_BUSY;
3991 		}
3992 
3993 		mutex_enter(&EMLXS_PORT_LOCK);
3994 		hba->reset_state &= ~FC_PORT_RESET_INP;
3995 		hba->reset_request &= ~(FC_PORT_RESET | FC_LINK_RESET);
3996 		mutex_exit(&EMLXS_PORT_LOCK);
3997 
3998 		return (rval);
3999 
4000 	case EMLXS_DFC_RESET_ALL:
4001 	case EMLXS_DFC_RESET_ALL_FORCE_DUMP:
4002 
4003 		mutex_enter(&EMLXS_PORT_LOCK);
4004 		if (hba->reset_state & FC_PORT_RESET_INP) {
4005 			mutex_exit(&EMLXS_PORT_LOCK);
4006 			return (FC_FAILURE);
4007 		}
4008 
4009 		hba->reset_state |= FC_PORT_RESET_INP;
4010 		hba->reset_request |= (FC_PORT_RESET | FC_LINK_RESET);
4011 
4012 		/* wait for any pending link resets to complete */
4013 		while ((hba->reset_state & FC_LINK_RESET_INP) &&
4014 		    (i++ < 1000)) {
4015 			mutex_exit(&EMLXS_PORT_LOCK);
4016 			delay(drv_usectohz(1000));
4017 			mutex_enter(&EMLXS_PORT_LOCK);
4018 		}
4019 
4020 		if (hba->reset_state & FC_LINK_RESET_INP) {
4021 			hba->reset_state &= ~FC_PORT_RESET_INP;
4022 			hba->reset_request &= ~(FC_PORT_RESET | FC_LINK_RESET);
4023 			mutex_exit(&EMLXS_PORT_LOCK);
4024 			return (FC_FAILURE);
4025 		}
4026 		mutex_exit(&EMLXS_PORT_LOCK);
4027 
4028 		rval = FC_SUCCESS;
4029 
4030 		if (cmd == EMLXS_DFC_RESET_ALL) {
4031 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4032 			    "Resetting Adapter (All Firmware Reset).");
4033 
4034 			emlxs_sli4_hba_reset_all(hba, 0);
4035 		} else {
4036 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4037 			    "Resetting Adapter "
4038 			    "(All Firmware Reset, Force Dump).");
4039 
4040 			emlxs_sli4_hba_reset_all(hba, 1);
4041 		}
4042 
4043 		mutex_enter(&EMLXS_PORT_LOCK);
4044 		hba->reset_state &= ~FC_PORT_RESET_INP;
4045 		hba->reset_request &= ~(FC_PORT_RESET | FC_LINK_RESET);
4046 		mutex_exit(&EMLXS_PORT_LOCK);
4047 
4048 		/* Wait for the timer thread to detect the error condition */
4049 		delay(drv_usectohz(1000000));
4050 
4051 		/* Wait for the HBA to re-initialize */
4052 		i = 0;
4053 		mutex_enter(&EMLXS_PORT_LOCK);
4054 		while (!(hba->flag & FC_ONLINE_MODE) && (i++ < 30)) {
4055 			mutex_exit(&EMLXS_PORT_LOCK);
4056 			delay(drv_usectohz(1000000));
4057 			mutex_enter(&EMLXS_PORT_LOCK);
4058 		}
4059 
4060 		if (!(hba->flag & FC_ONLINE_MODE)) {
4061 			rval = FC_FAILURE;
4062 		}
4063 
4064 		mutex_exit(&EMLXS_PORT_LOCK);
4065 
4066 		return (rval);
4067 
4068 	default:
4069 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4070 		    "reset: Unknown command. cmd=%x", cmd);
4071 
4072 		break;
4073 	}
4074 
4075 	return (FC_FAILURE);
4076 
4077 } /* emlxs_reset() */
4078 
4079 
4080 extern int32_t
emlxs_fca_reset(opaque_t fca_port_handle,uint32_t cmd)4081 emlxs_fca_reset(opaque_t fca_port_handle, uint32_t cmd)
4082 {
4083 	emlxs_port_t	*port = (emlxs_port_t *)fca_port_handle;
4084 	emlxs_hba_t	*hba = HBA;
4085 	int32_t		rval;
4086 
4087 	if (port->mode != MODE_INITIATOR) {
4088 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4089 		    "fca_reset failed. Port is not in initiator mode.");
4090 
4091 		return (FC_FAILURE);
4092 	}
4093 
4094 	if (!(port->flag & EMLXS_INI_BOUND)) {
4095 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4096 		    "fca_reset: Port not bound.");
4097 
4098 		return (FC_UNBOUND);
4099 	}
4100 
4101 	switch (cmd) {
4102 	case FC_FCA_LINK_RESET:
4103 		if (hba->fw_flag & FW_UPDATE_NEEDED) {
4104 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4105 			    "fca_reset: FC_FCA_LINK_RESET -> FC_FCA_RESET");
4106 			cmd = FC_FCA_RESET;
4107 		} else {
4108 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4109 			    "fca_reset: FC_FCA_LINK_RESET");
4110 		}
4111 		break;
4112 
4113 	case FC_FCA_CORE:
4114 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4115 		    "fca_reset: FC_FCA_CORE");
4116 		break;
4117 
4118 	case FC_FCA_RESET:
4119 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4120 		    "fca_reset: FC_FCA_RESET");
4121 		break;
4122 
4123 	case FC_FCA_RESET_CORE:
4124 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4125 		    "fca_reset: FC_FCA_RESET_CORE");
4126 		break;
4127 
4128 	default:
4129 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4130 		    "fca_reset: Unknown command. cmd=%x", cmd);
4131 		return (FC_FAILURE);
4132 	}
4133 
4134 	if (hba->fw_flag & FW_UPDATE_NEEDED) {
4135 		hba->fw_flag |= FW_UPDATE_KERNEL;
4136 	}
4137 
4138 	rval = emlxs_reset(port, cmd);
4139 
4140 	return (rval);
4141 
4142 } /* emlxs_fca_reset() */
4143 
4144 
4145 extern int
emlxs_fca_port_manage(opaque_t fca_port_handle,fc_fca_pm_t * pm)4146 emlxs_fca_port_manage(opaque_t fca_port_handle, fc_fca_pm_t *pm)
4147 {
4148 	emlxs_port_t	*port = (emlxs_port_t *)fca_port_handle;
4149 	emlxs_hba_t	*hba = HBA;
4150 	int32_t		ret;
4151 	emlxs_vpd_t	*vpd = &VPD;
4152 
4153 	ret = FC_SUCCESS;
4154 
4155 #ifdef IDLE_TIMER
4156 	emlxs_pm_busy_component(hba);
4157 #endif	/* IDLE_TIMER */
4158 
4159 	switch (pm->pm_cmd_code) {
4160 
4161 	case FC_PORT_GET_FW_REV:
4162 	{
4163 		char buffer[128];
4164 
4165 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4166 		    "fca_port_manage: FC_PORT_GET_FW_REV");
4167 
4168 		(void) snprintf(buffer, (sizeof (buffer)-1),
4169 		    "%s %s", hba->model_info.model,
4170 		    vpd->fw_version);
4171 		bzero(pm->pm_data_buf, pm->pm_data_len);
4172 
4173 		if (pm->pm_data_len < strlen(buffer) + 1) {
4174 			ret = FC_NOMEM;
4175 
4176 			break;
4177 		}
4178 
4179 		(void) strncpy(pm->pm_data_buf, buffer,
4180 		    (pm->pm_data_len-1));
4181 		break;
4182 	}
4183 
4184 	case FC_PORT_GET_FCODE_REV:
4185 	{
4186 		char buffer[128];
4187 
4188 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4189 		    "fca_port_manage: FC_PORT_GET_FCODE_REV");
4190 
4191 		/* Force update here just to be sure */
4192 		emlxs_get_fcode_version(hba);
4193 
4194 		(void) snprintf(buffer, (sizeof (buffer)-1),
4195 		    "%s %s", hba->model_info.model,
4196 		    vpd->fcode_version);
4197 		bzero(pm->pm_data_buf, pm->pm_data_len);
4198 
4199 		if (pm->pm_data_len < strlen(buffer) + 1) {
4200 			ret = FC_NOMEM;
4201 			break;
4202 		}
4203 
4204 		(void) strncpy(pm->pm_data_buf, buffer,
4205 		    (pm->pm_data_len-1));
4206 		break;
4207 	}
4208 
4209 	case FC_PORT_GET_DUMP_SIZE:
4210 	{
4211 #ifdef DUMP_SUPPORT
4212 		uint32_t dump_size = 0;
4213 
4214 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4215 		    "fca_port_manage: FC_PORT_GET_DUMP_SIZE");
4216 
4217 		if (pm->pm_data_len < sizeof (uint32_t)) {
4218 			ret = FC_NOMEM;
4219 			break;
4220 		}
4221 
4222 		(void) emlxs_get_dump(hba, NULL, &dump_size);
4223 
4224 		*((uint32_t *)pm->pm_data_buf) = dump_size;
4225 
4226 #else
4227 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4228 		    "fca_port_manage: FC_PORT_GET_DUMP_SIZE unsupported.");
4229 
4230 #endif /* DUMP_SUPPORT */
4231 
4232 		break;
4233 	}
4234 
4235 	case FC_PORT_GET_DUMP:
4236 	{
4237 #ifdef DUMP_SUPPORT
4238 		uint32_t dump_size = 0;
4239 
4240 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4241 		    "fca_port_manage: FC_PORT_GET_DUMP");
4242 
4243 		(void) emlxs_get_dump(hba, NULL, &dump_size);
4244 
4245 		if (pm->pm_data_len < dump_size) {
4246 			ret = FC_NOMEM;
4247 			break;
4248 		}
4249 
4250 		(void) emlxs_get_dump(hba, (uint8_t *)pm->pm_data_buf,
4251 		    (uint32_t *)&dump_size);
4252 #else
4253 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4254 		    "fca_port_manage: FC_PORT_GET_DUMP unsupported.");
4255 
4256 #endif /* DUMP_SUPPORT */
4257 
4258 		break;
4259 	}
4260 
4261 	case FC_PORT_FORCE_DUMP:
4262 	{
4263 #ifdef DUMP_SUPPORT
4264 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4265 		    "fca_port_manage: FC_PORT_FORCE_DUMP");
4266 
4267 		/* Schedule a USER dump */
4268 		emlxs_dump(hba, EMLXS_USER_DUMP, 0, 0);
4269 
4270 		/* Wait for dump to complete */
4271 		emlxs_dump_wait(hba);
4272 #else
4273 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4274 		    "fca_port_manage: FC_PORT_FORCE_DUMP unsupported.");
4275 
4276 #endif /* DUMP_SUPPORT */
4277 		break;
4278 	}
4279 
4280 	case FC_PORT_LINK_STATE:
4281 	{
4282 		uint32_t	*link_state;
4283 
4284 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4285 		    "fca_port_manage: FC_PORT_LINK_STATE");
4286 
4287 		if (pm->pm_stat_len != sizeof (*link_state)) {
4288 			ret = FC_NOMEM;
4289 			break;
4290 		}
4291 
4292 		if (pm->pm_cmd_buf != NULL) {
4293 			/*
4294 			 * Can't look beyond the FCA port.
4295 			 */
4296 			ret = FC_INVALID_REQUEST;
4297 			break;
4298 		}
4299 
4300 		link_state = (uint32_t *)pm->pm_stat_buf;
4301 
4302 		/* Set the state */
4303 		if (hba->state >= FC_LINK_UP) {
4304 			/* Check for loop topology */
4305 			if (hba->topology == TOPOLOGY_LOOP) {
4306 				*link_state = FC_STATE_LOOP;
4307 			} else {
4308 				*link_state = FC_STATE_ONLINE;
4309 			}
4310 
4311 			/* Set the link speed */
4312 			switch (hba->linkspeed) {
4313 			case LA_2GHZ_LINK:
4314 				*link_state |= FC_STATE_2GBIT_SPEED;
4315 				break;
4316 			case LA_4GHZ_LINK:
4317 				*link_state |= FC_STATE_4GBIT_SPEED;
4318 				break;
4319 			case LA_8GHZ_LINK:
4320 				*link_state |= FC_STATE_8GBIT_SPEED;
4321 				break;
4322 			case LA_10GHZ_LINK:
4323 				*link_state |= FC_STATE_10GBIT_SPEED;
4324 				break;
4325 			case LA_16GHZ_LINK:
4326 				*link_state |= FC_STATE_16GBIT_SPEED;
4327 				break;
4328 			case LA_1GHZ_LINK:
4329 			default:
4330 				*link_state |= FC_STATE_1GBIT_SPEED;
4331 				break;
4332 			}
4333 		} else {
4334 			*link_state = FC_STATE_OFFLINE;
4335 		}
4336 
4337 		break;
4338 	}
4339 
4340 
4341 	case FC_PORT_ERR_STATS:
4342 	case FC_PORT_RLS:
4343 	{
4344 		MAILBOXQ	*mbq;
4345 		MAILBOX		*mb;
4346 		fc_rls_acc_t	*bp;
4347 
4348 		if (!(hba->flag & FC_ONLINE_MODE)) {
4349 			return (FC_OFFLINE);
4350 		}
4351 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4352 		    "fca_port_manage: FC_PORT_RLS / FC_PORT_ERR_STATS");
4353 
4354 		if (pm->pm_data_len < sizeof (fc_rls_acc_t)) {
4355 			ret = FC_NOMEM;
4356 			break;
4357 		}
4358 
4359 		if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba,
4360 		    MEM_MBOX)) == 0) {
4361 			ret = FC_NOMEM;
4362 			break;
4363 		}
4364 		mb = (MAILBOX *)mbq;
4365 
4366 		emlxs_mb_read_lnk_stat(hba, mbq);
4367 		if (EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_WAIT, 0)
4368 		    != MBX_SUCCESS) {
4369 			ret = FC_PBUSY;
4370 		} else {
4371 			bp = (fc_rls_acc_t *)pm->pm_data_buf;
4372 
4373 			bp->rls_link_fail = mb->un.varRdLnk.linkFailureCnt;
4374 			bp->rls_sync_loss = mb->un.varRdLnk.lossSyncCnt;
4375 			bp->rls_sig_loss = mb->un.varRdLnk.lossSignalCnt;
4376 			bp->rls_prim_seq_err = mb->un.varRdLnk.primSeqErrCnt;
4377 			bp->rls_invalid_word =
4378 			    mb->un.varRdLnk.invalidXmitWord;
4379 			bp->rls_invalid_crc = mb->un.varRdLnk.crcCnt;
4380 		}
4381 
4382 		emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
4383 		break;
4384 	}
4385 
4386 	case FC_PORT_DOWNLOAD_FW:
4387 		if (!(hba->flag & FC_ONLINE_MODE)) {
4388 			return (FC_OFFLINE);
4389 		}
4390 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4391 		    "fca_port_manage: FC_PORT_DOWNLOAD_FW");
4392 		ret = emlxs_fw_download(hba, pm->pm_data_buf,
4393 		    pm->pm_data_len, 1);
4394 		break;
4395 
4396 	case FC_PORT_DOWNLOAD_FCODE:
4397 		if (!(hba->flag & FC_ONLINE_MODE)) {
4398 			return (FC_OFFLINE);
4399 		}
4400 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4401 		    "fca_port_manage: FC_PORT_DOWNLOAD_FCODE");
4402 		ret = emlxs_fw_download(hba, pm->pm_data_buf,
4403 		    pm->pm_data_len, 1);
4404 		break;
4405 
4406 	case FC_PORT_DIAG:
4407 	{
4408 		uint32_t errno = 0;
4409 		uint32_t did = 0;
4410 		uint32_t pattern = 0;
4411 
4412 		switch (pm->pm_cmd_flags) {
4413 		case EMLXS_DIAG_BIU:
4414 
4415 			if (!(hba->flag & FC_ONLINE_MODE)) {
4416 				return (FC_OFFLINE);
4417 			}
4418 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4419 			    "fca_port_manage: DIAG_BIU");
4420 
4421 			if (pm->pm_data_len) {
4422 				pattern = *((uint32_t *)pm->pm_data_buf);
4423 			}
4424 
4425 			errno = emlxs_diag_biu_run(hba, pattern);
4426 
4427 			if (pm->pm_stat_len == sizeof (errno)) {
4428 				*(int *)pm->pm_stat_buf = errno;
4429 			}
4430 
4431 			break;
4432 
4433 
4434 		case EMLXS_DIAG_POST:
4435 
4436 			if (!(hba->flag & FC_ONLINE_MODE)) {
4437 				return (FC_OFFLINE);
4438 			}
4439 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4440 			    "fca_port_manage: DIAG_POST");
4441 
4442 			errno = emlxs_diag_post_run(hba);
4443 
4444 			if (pm->pm_stat_len == sizeof (errno)) {
4445 				*(int *)pm->pm_stat_buf = errno;
4446 			}
4447 
4448 			break;
4449 
4450 
4451 		case EMLXS_DIAG_ECHO:
4452 
4453 			if (!(hba->flag & FC_ONLINE_MODE)) {
4454 				return (FC_OFFLINE);
4455 			}
4456 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4457 			    "fca_port_manage: DIAG_ECHO");
4458 
4459 			if (pm->pm_cmd_len != sizeof (uint32_t)) {
4460 				ret = FC_INVALID_REQUEST;
4461 				break;
4462 			}
4463 
4464 			did = *((uint32_t *)pm->pm_cmd_buf);
4465 
4466 			if (pm->pm_data_len) {
4467 				pattern = *((uint32_t *)pm->pm_data_buf);
4468 			}
4469 
4470 			errno = emlxs_diag_echo_run(port, did, pattern);
4471 
4472 			if (pm->pm_stat_len == sizeof (errno)) {
4473 				*(int *)pm->pm_stat_buf = errno;
4474 			}
4475 
4476 			break;
4477 
4478 
4479 		case EMLXS_PARM_GET_NUM:
4480 		{
4481 			uint32_t	*num;
4482 			emlxs_config_t	*cfg;
4483 			uint32_t	i;
4484 			uint32_t	count;
4485 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4486 			    "fca_port_manage: PARM_GET_NUM");
4487 
4488 			if (pm->pm_stat_len < sizeof (uint32_t)) {
4489 				ret = FC_NOMEM;
4490 				break;
4491 			}
4492 
4493 			num = (uint32_t *)pm->pm_stat_buf;
4494 			count = 0;
4495 			cfg = &CFG;
4496 			for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4497 				if (!(cfg->flags & PARM_HIDDEN)) {
4498 					count++;
4499 				}
4500 
4501 			}
4502 
4503 			*num = count;
4504 
4505 			break;
4506 		}
4507 
4508 		case EMLXS_PARM_GET_LIST:
4509 		{
4510 			emlxs_parm_t	*parm;
4511 			emlxs_config_t	*cfg;
4512 			uint32_t	i;
4513 			uint32_t	max_count;
4514 
4515 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4516 			    "fca_port_manage: PARM_GET_LIST");
4517 
4518 			if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4519 				ret = FC_NOMEM;
4520 				break;
4521 			}
4522 
4523 			max_count = pm->pm_stat_len / sizeof (emlxs_parm_t);
4524 
4525 			parm = (emlxs_parm_t *)pm->pm_stat_buf;
4526 			cfg = &CFG;
4527 			for (i = 0; i < NUM_CFG_PARAM && max_count; i++,
4528 			    cfg++) {
4529 				if (!(cfg->flags & PARM_HIDDEN)) {
4530 					(void) strncpy(parm->label, cfg->string,
4531 					    (sizeof (parm->label)-1));
4532 					parm->min = cfg->low;
4533 					parm->max = cfg->hi;
4534 					parm->def = cfg->def;
4535 					parm->current = cfg->current;
4536 					parm->flags = cfg->flags;
4537 					(void) strncpy(parm->help, cfg->help,
4538 					    (sizeof (parm->help)-1));
4539 					parm++;
4540 					max_count--;
4541 				}
4542 			}
4543 
4544 			break;
4545 		}
4546 
4547 		case EMLXS_PARM_GET:
4548 		{
4549 			emlxs_parm_t	*parm_in;
4550 			emlxs_parm_t	*parm_out;
4551 			emlxs_config_t	*cfg;
4552 			uint32_t	i;
4553 			uint32_t	len;
4554 
4555 			if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) {
4556 				EMLXS_MSGF(EMLXS_CONTEXT,
4557 				    &emlxs_sfs_debug_msg,
4558 				    "fca_port_manage: PARM_GET. "
4559 				    "inbuf too small.");
4560 
4561 				ret = FC_BADCMD;
4562 				break;
4563 			}
4564 
4565 			if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4566 				EMLXS_MSGF(EMLXS_CONTEXT,
4567 				    &emlxs_sfs_debug_msg,
4568 				    "fca_port_manage: PARM_GET. "
4569 				    "outbuf too small");
4570 
4571 				ret = FC_BADCMD;
4572 				break;
4573 			}
4574 
4575 			parm_in = (emlxs_parm_t *)pm->pm_cmd_buf;
4576 			parm_out = (emlxs_parm_t *)pm->pm_stat_buf;
4577 			len = strlen(parm_in->label);
4578 			cfg = &CFG;
4579 			ret = FC_BADOBJECT;
4580 
4581 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4582 			    "fca_port_manage: PARM_GET: %s=0x%x,%d",
4583 			    parm_in->label, parm_in->current,
4584 			    parm_in->current);
4585 
4586 			for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4587 				if (len == strlen(cfg->string) &&
4588 				    (strcmp(parm_in->label,
4589 				    cfg->string) == 0)) {
4590 					(void) strncpy(parm_out->label,
4591 					    cfg->string,
4592 					    (sizeof (parm_out->label)-1));
4593 					parm_out->min = cfg->low;
4594 					parm_out->max = cfg->hi;
4595 					parm_out->def = cfg->def;
4596 					parm_out->current = cfg->current;
4597 					parm_out->flags = cfg->flags;
4598 					(void) strncpy(parm_out->help,
4599 					    cfg->help,
4600 					    (sizeof (parm_out->help)-1));
4601 
4602 					ret = FC_SUCCESS;
4603 					break;
4604 				}
4605 			}
4606 
4607 			break;
4608 		}
4609 
4610 		case EMLXS_PARM_SET:
4611 		{
4612 			emlxs_parm_t	*parm_in;
4613 			emlxs_parm_t	*parm_out;
4614 			emlxs_config_t	*cfg;
4615 			uint32_t	i;
4616 			uint32_t	len;
4617 
4618 			if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) {
4619 				EMLXS_MSGF(EMLXS_CONTEXT,
4620 				    &emlxs_sfs_debug_msg,
4621 				    "fca_port_manage: PARM_GET. "
4622 				    "inbuf too small.");
4623 
4624 				ret = FC_BADCMD;
4625 				break;
4626 			}
4627 
4628 			if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4629 				EMLXS_MSGF(EMLXS_CONTEXT,
4630 				    &emlxs_sfs_debug_msg,
4631 				    "fca_port_manage: PARM_GET. "
4632 				    "outbuf too small");
4633 				ret = FC_BADCMD;
4634 				break;
4635 			}
4636 
4637 			parm_in = (emlxs_parm_t *)pm->pm_cmd_buf;
4638 			parm_out = (emlxs_parm_t *)pm->pm_stat_buf;
4639 			len = strlen(parm_in->label);
4640 			cfg = &CFG;
4641 			ret = FC_BADOBJECT;
4642 
4643 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4644 			    "fca_port_manage: PARM_SET: %s=0x%x,%d",
4645 			    parm_in->label, parm_in->current,
4646 			    parm_in->current);
4647 
4648 			for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4649 				/* Find matching parameter string */
4650 				if (len == strlen(cfg->string) &&
4651 				    (strcmp(parm_in->label,
4652 				    cfg->string) == 0)) {
4653 					/* Attempt to update parameter */
4654 					if (emlxs_set_parm(hba, i,
4655 					    parm_in->current) == FC_SUCCESS) {
4656 						(void) strncpy(parm_out->label,
4657 						    cfg->string,
4658 						    (sizeof (parm_out->label)-
4659 						    1));
4660 						parm_out->min = cfg->low;
4661 						parm_out->max = cfg->hi;
4662 						parm_out->def = cfg->def;
4663 						parm_out->current =
4664 						    cfg->current;
4665 						parm_out->flags = cfg->flags;
4666 						(void) strncpy(parm_out->help,
4667 						    cfg->help,
4668 						    (sizeof (parm_out->help)-
4669 						    1));
4670 
4671 						ret = FC_SUCCESS;
4672 					}
4673 
4674 					break;
4675 				}
4676 			}
4677 
4678 			break;
4679 		}
4680 
4681 		case EMLXS_LOG_GET:
4682 		{
4683 			emlxs_log_req_t		*req;
4684 			emlxs_log_resp_t	*resp;
4685 			uint32_t		len;
4686 
4687 			/* Check command size */
4688 			if (pm->pm_cmd_len < sizeof (emlxs_log_req_t)) {
4689 				ret = FC_BADCMD;
4690 				break;
4691 			}
4692 
4693 			/* Get the request */
4694 			req = (emlxs_log_req_t *)pm->pm_cmd_buf;
4695 
4696 			/* Calculate the response length from the request */
4697 			len = sizeof (emlxs_log_resp_t) +
4698 			    (req->count * MAX_LOG_MSG_LENGTH);
4699 
4700 					/* Check the response buffer length */
4701 			if (pm->pm_stat_len < len) {
4702 				ret = FC_BADCMD;
4703 				break;
4704 			}
4705 
4706 			/* Get the response pointer */
4707 			resp = (emlxs_log_resp_t *)pm->pm_stat_buf;
4708 
4709 			/* Get the request log enties */
4710 			(void) emlxs_msg_log_get(hba, req, resp);
4711 
4712 			ret = FC_SUCCESS;
4713 			break;
4714 		}
4715 
4716 		case EMLXS_GET_BOOT_REV:
4717 		{
4718 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4719 			    "fca_port_manage: GET_BOOT_REV");
4720 
4721 			if (pm->pm_stat_len < strlen(vpd->boot_version)) {
4722 				ret = FC_NOMEM;
4723 				break;
4724 			}
4725 
4726 			bzero(pm->pm_stat_buf, pm->pm_stat_len);
4727 			(void) snprintf(pm->pm_stat_buf, pm->pm_stat_len,
4728 			    "%s %s", hba->model_info.model, vpd->boot_version);
4729 
4730 			break;
4731 		}
4732 
4733 		case EMLXS_DOWNLOAD_BOOT:
4734 			if (!(hba->flag & FC_ONLINE_MODE)) {
4735 				return (FC_OFFLINE);
4736 			}
4737 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4738 			    "fca_port_manage: DOWNLOAD_BOOT");
4739 
4740 			ret = emlxs_fw_download(hba, pm->pm_data_buf,
4741 			    pm->pm_data_len, 1);
4742 			break;
4743 
4744 		case EMLXS_DOWNLOAD_CFL:
4745 		{
4746 			uint32_t *buffer;
4747 			uint32_t region;
4748 			uint32_t length;
4749 
4750 			if (!(hba->flag & FC_ONLINE_MODE)) {
4751 				return (FC_OFFLINE);
4752 			}
4753 
4754 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4755 			    "fca_port_manage: DOWNLOAD_CFL");
4756 
4757 			/* Extract the region number from the first word. */
4758 			buffer = (uint32_t *)pm->pm_data_buf;
4759 			region = *buffer++;
4760 
4761 			/* Adjust the image length for the header word */
4762 			length = pm->pm_data_len - 4;
4763 
4764 			ret =
4765 			    emlxs_cfl_download(hba, region, (caddr_t)buffer,
4766 			    length);
4767 			break;
4768 		}
4769 
4770 		case EMLXS_VPD_GET:
4771 		{
4772 			emlxs_vpd_desc_t	*vpd_out;
4773 
4774 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4775 			    "fca_port_manage: VPD_GET");
4776 
4777 			if (pm->pm_stat_len < sizeof (emlxs_vpd_desc_t)) {
4778 				ret = FC_BADCMD;
4779 				break;
4780 			}
4781 
4782 			vpd_out = (emlxs_vpd_desc_t *)pm->pm_stat_buf;
4783 			bzero(vpd_out, pm->pm_stat_len);
4784 
4785 			(void) strncpy(vpd_out->id, vpd->id,
4786 			    (sizeof (vpd_out->id)-1));
4787 			(void) strncpy(vpd_out->part_num, vpd->part_num,
4788 			    (sizeof (vpd_out->part_num)-1));
4789 			(void) strncpy(vpd_out->eng_change, vpd->eng_change,
4790 			    (sizeof (vpd_out->eng_change)-1));
4791 			(void) strncpy(vpd_out->manufacturer, vpd->manufacturer,
4792 			    (sizeof (vpd_out->manufacturer)-1));
4793 			(void) strncpy(vpd_out->serial_num, vpd->serial_num,
4794 			    (sizeof (vpd_out->serial_num)-1));
4795 			(void) strncpy(vpd_out->model, vpd->model,
4796 			    (sizeof (vpd_out->model)-1));
4797 			(void) strncpy(vpd_out->model_desc, vpd->model_desc,
4798 			    (sizeof (vpd_out->model_desc)-1));
4799 			(void) strncpy(vpd_out->port_num, vpd->port_num,
4800 			    (sizeof (vpd_out->port_num)-1));
4801 			(void) strncpy(vpd_out->prog_types, vpd->prog_types,
4802 			    (sizeof (vpd_out->prog_types)-1));
4803 
4804 			ret = FC_SUCCESS;
4805 
4806 			break;
4807 		}
4808 
4809 		case EMLXS_VPD_GET_V2:
4810 		{
4811 			emlxs_vpd_desc_v2_t	*vpd_out;
4812 
4813 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4814 			    "fca_port_manage: VPD_GET_V2");
4815 
4816 			if (pm->pm_stat_len < sizeof (emlxs_vpd_desc_v2_t)) {
4817 				ret = FC_BADCMD;
4818 				break;
4819 			}
4820 
4821 			vpd_out = (emlxs_vpd_desc_v2_t *)pm->pm_stat_buf;
4822 			bzero(vpd_out, pm->pm_stat_len);
4823 
4824 			(void) strncpy(vpd_out->id, vpd->id,
4825 			    (sizeof (vpd_out->id)-1));
4826 			(void) strncpy(vpd_out->part_num, vpd->part_num,
4827 			    (sizeof (vpd_out->part_num)-1));
4828 			(void) strncpy(vpd_out->eng_change, vpd->eng_change,
4829 			    (sizeof (vpd_out->eng_change)-1));
4830 			(void) strncpy(vpd_out->manufacturer, vpd->manufacturer,
4831 			    (sizeof (vpd_out->manufacturer)-1));
4832 			(void) strncpy(vpd_out->serial_num, vpd->serial_num,
4833 			    (sizeof (vpd_out->serial_num)-1));
4834 			(void) strncpy(vpd_out->model, vpd->model,
4835 			    (sizeof (vpd_out->model)-1));
4836 			(void) strncpy(vpd_out->model_desc, vpd->model_desc,
4837 			    (sizeof (vpd_out->model_desc)-1));
4838 			(void) strncpy(vpd_out->port_num, vpd->port_num,
4839 			    (sizeof (vpd_out->port_num)-1));
4840 			(void) strncpy(vpd_out->prog_types, vpd->prog_types,
4841 			    (sizeof (vpd_out->prog_types)-1));
4842 
4843 			ret = FC_SUCCESS;
4844 
4845 			break;
4846 		}
4847 
4848 		case EMLXS_PHY_GET:
4849 		{
4850 			emlxs_phy_desc_t	*phy_out;
4851 			MAILBOXQ *mbq;
4852 			MAILBOX4 *mb;
4853 			IOCTL_COMMON_GET_PHY_DETAILS *phy;
4854 			mbox_req_hdr_t	*hdr_req;
4855 
4856 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4857 			    "fca_port_manage: EMLXS_PHY_GET");
4858 
4859 			if (pm->pm_stat_len < sizeof (emlxs_phy_desc_t)) {
4860 				ret = FC_BADCMD;
4861 				break;
4862 			}
4863 
4864 			if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
4865 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4866 				    "Invalid sli_mode. mode=%d", hba->sli_mode);
4867 				ret = FC_BADCMD;
4868 				break;
4869 			}
4870 
4871 			phy_out = (emlxs_phy_desc_t *)pm->pm_stat_buf;
4872 			bzero(phy_out, sizeof (emlxs_phy_desc_t));
4873 
4874 			if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba,
4875 			    MEM_MBOX)) == 0) {
4876 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4877 				    "Unable to allocate mailbox buffer.");
4878 				ret = FC_NOMEM;
4879 				break;
4880 			}
4881 
4882 			mb = (MAILBOX4*)mbq;
4883 
4884 			bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE);
4885 
4886 			mb->un.varSLIConfig.be.embedded = 1;
4887 			mbq->mbox_cmpl = NULL;
4888 
4889 			mb->mbxCommand = MBX_SLI_CONFIG;
4890 			mb->mbxOwner = OWN_HOST;
4891 
4892 			hdr_req = (mbox_req_hdr_t *)
4893 			    &mb->un.varSLIConfig.be.un_hdr.hdr_req;
4894 			hdr_req->subsystem = IOCTL_SUBSYSTEM_COMMON;
4895 			hdr_req->opcode = COMMON_OPCODE_GET_PHY_DETAILS;
4896 			hdr_req->timeout = 0;
4897 			hdr_req->req_length =
4898 			    sizeof (IOCTL_COMMON_GET_PHY_DETAILS);
4899 
4900 			phy = (IOCTL_COMMON_GET_PHY_DETAILS *)(hdr_req + 1);
4901 
4902 			/* Send read request */
4903 			if (EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_WAIT, 0) !=
4904 			    MBX_SUCCESS) {
4905 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4906 				    "Unable to get PHY details. status=%x",
4907 				    mb->mbxStatus);
4908 
4909 				emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
4910 
4911 				ret = FC_FAILURE;
4912 				break;
4913 			}
4914 
4915 			phy_out->phy_type = phy->params.response.phy_type;
4916 			phy_out->interface_type =
4917 			    phy->params.response.interface_type;
4918 			phy_out->misc_params = phy->params.response.misc_params;
4919 			phy_out->rsvd[0] = phy->params.response.rsvd[0];
4920 			phy_out->rsvd[1] = phy->params.response.rsvd[1];
4921 			phy_out->rsvd[2] = phy->params.response.rsvd[2];
4922 			phy_out->rsvd[3] = phy->params.response.rsvd[3];
4923 
4924 			emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
4925 
4926 			ret = FC_SUCCESS;
4927 			break;
4928 		}
4929 
4930 #ifdef NODE_THROTTLE_SUPPORT
4931 		case EMLXS_SET_THROTTLE:
4932 		{
4933 			emlxs_node_t *node;
4934 			uint32_t scope = 0;
4935 			uint32_t i;
4936 			char buf1[32];
4937 			emlxs_throttle_desc_t *desc;
4938 
4939 			if ((pm->pm_data_buf == NULL) ||
4940 			    (pm->pm_data_len !=
4941 			    sizeof (emlxs_throttle_desc_t))) {
4942 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4943 				    "fca_port_manage: EMLXS_SET_THROTTLE: "
4944 				    "Descriptor buffer not valid. %d",
4945 				    pm->pm_data_len);
4946 				ret = FC_BADCMD;
4947 				break;
4948 			}
4949 
4950 			if ((pm->pm_cmd_buf != NULL) &&
4951 			    (pm->pm_cmd_len == sizeof (uint32_t))) {
4952 				scope = *(uint32_t *)pm->pm_cmd_buf;
4953 			}
4954 
4955 			desc = (emlxs_throttle_desc_t *)pm->pm_data_buf;
4956 			desc->throttle = MIN(desc->throttle, MAX_NODE_THROTTLE);
4957 
4958 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4959 			    "fca_port_manage: EMLXS_SET_THROTTLE: scope=%d "
4960 			    "depth=%d",
4961 			    scope, desc->throttle);
4962 
4963 			rw_enter(&port->node_rwlock, RW_WRITER);
4964 			switch (scope) {
4965 			case 1: /* all */
4966 				for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
4967 				node = port->node_table[i];
4968 				while (node != NULL) {
4969 					node->io_throttle = desc->throttle;
4970 
4971 					EMLXS_MSGF(EMLXS_CONTEXT,
4972 					    &emlxs_sfs_debug_msg,
4973 					    "EMLXS_SET_THROTTLE: wwpn=%s "
4974 					    "depth=%d",
4975 					    emlxs_wwn_xlate(buf1, sizeof (buf1),
4976 					    (uint8_t *)&node->nlp_portname),
4977 					    node->io_throttle);
4978 
4979 					node = (NODELIST *)node->nlp_list_next;
4980 				}
4981 				}
4982 				break;
4983 
4984 			case 2: /* FCP */
4985 				for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
4986 				node = port->node_table[i];
4987 				while (node != NULL) {
4988 					if (!(node->nlp_fcp_info &
4989 					    NLP_FCP_TGT_DEVICE)) {
4990 						node = (NODELIST *)
4991 						    node->nlp_list_next;
4992 						continue;
4993 					}
4994 
4995 					node->io_throttle = desc->throttle;
4996 
4997 					EMLXS_MSGF(EMLXS_CONTEXT,
4998 					    &emlxs_sfs_debug_msg,
4999 					    "EMLXS_SET_THROTTLE: wwpn=%s "
5000 					    "depth=%d",
5001 					    emlxs_wwn_xlate(buf1, sizeof (buf1),
5002 					    (uint8_t *)&node->nlp_portname),
5003 					    node->io_throttle);
5004 
5005 					node = (NODELIST *)node->nlp_list_next;
5006 				}
5007 				}
5008 				break;
5009 
5010 			case 0: /* WWPN */
5011 			default:
5012 				for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
5013 				node = port->node_table[i];
5014 				while (node != NULL) {
5015 					if (bcmp((caddr_t)&node->nlp_portname,
5016 					    desc->wwpn, 8)) {
5017 						node = (NODELIST *)
5018 						    node->nlp_list_next;
5019 						continue;
5020 					}
5021 
5022 					node->io_throttle = desc->throttle;
5023 
5024 					EMLXS_MSGF(EMLXS_CONTEXT,
5025 					    &emlxs_sfs_debug_msg,
5026 					    "EMLXS_SET_THROTTLE: wwpn=%s "
5027 					    "depth=%d",
5028 					    emlxs_wwn_xlate(buf1, sizeof (buf1),
5029 					    (uint8_t *)&node->nlp_portname),
5030 					    node->io_throttle);
5031 
5032 					goto set_throttle_done;
5033 				}
5034 				}
5035 set_throttle_done:
5036 				break;
5037 			}
5038 
5039 			rw_exit(&port->node_rwlock);
5040 			ret = FC_SUCCESS;
5041 
5042 			break;
5043 		}
5044 
5045 		case EMLXS_GET_THROTTLE:
5046 		{
5047 			emlxs_node_t *node;
5048 			uint32_t i;
5049 			uint32_t j;
5050 			char buf1[32];
5051 			uint32_t count;
5052 			emlxs_throttle_desc_t *desc;
5053 
5054 			if (pm->pm_stat_len == sizeof (uint32_t)) {
5055 				count = emlxs_nport_count(port);
5056 				*(uint32_t *)pm->pm_stat_buf = count;
5057 
5058 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5059 				    "fca_port_manage: EMLXS_GET_THROTTLE: "
5060 				    "count=%d",
5061 				    count);
5062 
5063 				ret = FC_SUCCESS;
5064 				break;
5065 			}
5066 
5067 			if ((pm->pm_stat_buf == NULL) ||
5068 			    (pm->pm_stat_len <
5069 			    sizeof (emlxs_throttle_desc_t))) {
5070 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5071 				    "fca_port_manage: EMLXS_GET_THROTTLE: "
5072 				    "Descriptor buffer too small. %d",
5073 				    pm->pm_data_len);
5074 				ret = FC_BADCMD;
5075 				break;
5076 			}
5077 
5078 			count = pm->pm_stat_len /
5079 			    sizeof (emlxs_throttle_desc_t);
5080 			desc = (emlxs_throttle_desc_t *)pm->pm_stat_buf;
5081 
5082 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5083 			    "fca_port_manage: EMLXS_GET_THROTTLE: max=%d",
5084 			    count);
5085 
5086 			rw_enter(&port->node_rwlock, RW_READER);
5087 			j = 0;
5088 			for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
5089 				node = port->node_table[i];
5090 				while (node != NULL) {
5091 					if ((node->nlp_DID & 0xFFF000) ==
5092 					    0xFFF000) {
5093 						node = (NODELIST *)
5094 						    node->nlp_list_next;
5095 						continue;
5096 					}
5097 
5098 					bcopy((uint8_t *)&node->nlp_portname,
5099 					    desc[j].wwpn, 8);
5100 					desc[j].throttle = node->io_throttle;
5101 
5102 					EMLXS_MSGF(EMLXS_CONTEXT,
5103 					    &emlxs_sfs_debug_msg,
5104 					    "EMLXS_GET_THROTTLE: wwpn=%s "
5105 					    "depth=%d",
5106 					    emlxs_wwn_xlate(buf1, sizeof (buf1),
5107 					    desc[j].wwpn),
5108 					    desc[j].throttle);
5109 
5110 					j++;
5111 					if (j >= count) {
5112 						goto get_throttle_done;
5113 					}
5114 
5115 					node = (NODELIST *)node->nlp_list_next;
5116 				}
5117 			}
5118 get_throttle_done:
5119 			rw_exit(&port->node_rwlock);
5120 			ret = FC_SUCCESS;
5121 
5122 			break;
5123 		}
5124 #endif /* NODE_THROTTLE_SUPPORT */
5125 
5126 		case EMLXS_GET_FCIO_REV:
5127 		{
5128 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5129 			    "fca_port_manage: GET_FCIO_REV");
5130 
5131 			if (pm->pm_stat_len < sizeof (uint32_t)) {
5132 				ret = FC_NOMEM;
5133 				break;
5134 			}
5135 
5136 			bzero(pm->pm_stat_buf, pm->pm_stat_len);
5137 			*(uint32_t *)pm->pm_stat_buf = FCIO_REV;
5138 
5139 			break;
5140 		}
5141 
5142 		case EMLXS_GET_DFC_REV:
5143 		{
5144 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5145 			    "fca_port_manage: GET_DFC_REV");
5146 
5147 			if (pm->pm_stat_len < sizeof (uint32_t)) {
5148 				ret = FC_NOMEM;
5149 				break;
5150 			}
5151 
5152 			bzero(pm->pm_stat_buf, pm->pm_stat_len);
5153 			*(uint32_t *)pm->pm_stat_buf = DFC_REV;
5154 
5155 			break;
5156 		}
5157 
5158 		case EMLXS_SET_BOOT_STATE:
5159 		case EMLXS_SET_BOOT_STATE_old:
5160 		{
5161 			uint32_t	state;
5162 
5163 			if (!(hba->flag & FC_ONLINE_MODE)) {
5164 				return (FC_OFFLINE);
5165 			}
5166 			if (pm->pm_cmd_len < sizeof (uint32_t)) {
5167 				EMLXS_MSGF(EMLXS_CONTEXT,
5168 				    &emlxs_sfs_debug_msg,
5169 				    "fca_port_manage: SET_BOOT_STATE");
5170 				ret = FC_BADCMD;
5171 				break;
5172 			}
5173 
5174 			state = *(uint32_t *)pm->pm_cmd_buf;
5175 
5176 			if (state == 0) {
5177 				EMLXS_MSGF(EMLXS_CONTEXT,
5178 				    &emlxs_sfs_debug_msg,
5179 				    "fca_port_manage: SET_BOOT_STATE: "
5180 				    "Disable");
5181 				ret = emlxs_boot_code_disable(hba);
5182 			} else {
5183 				EMLXS_MSGF(EMLXS_CONTEXT,
5184 				    &emlxs_sfs_debug_msg,
5185 				    "fca_port_manage: SET_BOOT_STATE: "
5186 				    "Enable");
5187 				ret = emlxs_boot_code_enable(hba);
5188 			}
5189 
5190 			break;
5191 		}
5192 
5193 		case EMLXS_GET_BOOT_STATE:
5194 		case EMLXS_GET_BOOT_STATE_old:
5195 		{
5196 			if (!(hba->flag & FC_ONLINE_MODE)) {
5197 				return (FC_OFFLINE);
5198 			}
5199 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5200 			    "fca_port_manage: GET_BOOT_STATE");
5201 
5202 			if (pm->pm_stat_len < sizeof (uint32_t)) {
5203 				ret = FC_NOMEM;
5204 				break;
5205 			}
5206 			bzero(pm->pm_stat_buf, pm->pm_stat_len);
5207 
5208 			ret = emlxs_boot_code_state(hba);
5209 
5210 			if (ret == FC_SUCCESS) {
5211 				*(uint32_t *)pm->pm_stat_buf = 1;
5212 				ret = FC_SUCCESS;
5213 			} else if (ret == FC_FAILURE) {
5214 				ret = FC_SUCCESS;
5215 			}
5216 
5217 			break;
5218 		}
5219 
5220 		case EMLXS_HW_ERROR_TEST:
5221 		{
5222 			/*
5223 			 * This command is used for simulating HW ERROR
5224 			 * on SLI4 only.
5225 			 */
5226 			if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5227 				ret = FC_INVALID_REQUEST;
5228 				break;
5229 			}
5230 			hba->sli.sli4.flag |= EMLXS_SLI4_HW_ERROR;
5231 			break;
5232 		}
5233 
5234 		case EMLXS_MB_TIMEOUT_TEST:
5235 		{
5236 			if (!(hba->flag & FC_ONLINE_MODE)) {
5237 				return (FC_OFFLINE);
5238 			}
5239 
5240 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5241 			    "fca_port_manage: HW_ERROR_TEST");
5242 
5243 			/* Trigger a mailbox timeout */
5244 			hba->mbox_timer = hba->timer_tics;
5245 
5246 			break;
5247 		}
5248 
5249 		case EMLXS_TEST_CODE:
5250 		{
5251 			uint32_t *cmd;
5252 
5253 			if (!(hba->flag & FC_ONLINE_MODE)) {
5254 				return (FC_OFFLINE);
5255 			}
5256 
5257 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5258 			    "fca_port_manage: TEST_CODE");
5259 
5260 			if (pm->pm_cmd_len < sizeof (uint32_t)) {
5261 				EMLXS_MSGF(EMLXS_CONTEXT,
5262 				    &emlxs_sfs_debug_msg,
5263 				    "fca_port_manage: TEST_CODE. "
5264 				    "inbuf to small.");
5265 
5266 				ret = FC_BADCMD;
5267 				break;
5268 			}
5269 
5270 			cmd = (uint32_t *)pm->pm_cmd_buf;
5271 
5272 			ret = emlxs_test(hba, cmd[0],
5273 			    (pm->pm_cmd_len/sizeof (uint32_t)) - 1, &cmd[1]);
5274 
5275 			break;
5276 		}
5277 
5278 		case EMLXS_BAR_IO:
5279 		{
5280 			uint32_t *cmd;
5281 			uint32_t *datap;
5282 			FCIO_Q_STAT_t *qp;
5283 			clock_t	 time;
5284 			uint32_t offset;
5285 			caddr_t  addr;
5286 			uint32_t i;
5287 			uint32_t tx_cnt;
5288 			uint32_t chip_cnt;
5289 
5290 			cmd = (uint32_t *)pm->pm_cmd_buf;
5291 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5292 			    "fca_port_manage: BAR_IO %x %x %x",
5293 			    cmd[0], cmd[1], cmd[2]);
5294 
5295 			offset = cmd[1];
5296 
5297 			ret = FC_SUCCESS;
5298 
5299 			switch (cmd[0]) {
5300 			case 2: /* bar1read */
5301 				if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5302 					return (FC_BADCMD);
5303 				}
5304 
5305 				/* Registers in this range are invalid */
5306 				if ((offset >= 0x4C00) && (offset < 0x5000)) {
5307 					return (FC_BADCMD);
5308 				}
5309 				if ((offset >= 0x5800) || (offset & 0x3)) {
5310 					return (FC_BADCMD);
5311 				}
5312 				datap = (uint32_t *)pm->pm_stat_buf;
5313 
5314 				for (i = 0; i < pm->pm_stat_len;
5315 				    i += sizeof (uint32_t)) {
5316 					if ((offset >= 0x4C00) &&
5317 					    (offset < 0x5000)) {
5318 						pm->pm_stat_len = i;
5319 						break;
5320 					}
5321 					if (offset >= 0x5800) {
5322 						pm->pm_stat_len = i;
5323 						break;
5324 					}
5325 					addr = hba->sli.sli4.bar1_addr + offset;
5326 					*datap = READ_BAR1_REG(hba, addr);
5327 					datap++;
5328 					offset += sizeof (uint32_t);
5329 				}
5330 #ifdef FMA_SUPPORT
5331 				/* Access handle validation */
5332 				EMLXS_CHK_ACC_HANDLE(hba,
5333 				    hba->sli.sli4.bar1_acc_handle);
5334 #endif  /* FMA_SUPPORT */
5335 				break;
5336 			case 3: /* bar2read */
5337 				if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5338 					return (FC_BADCMD);
5339 				}
5340 				if ((offset >= 0x1000) || (offset & 0x3)) {
5341 					return (FC_BADCMD);
5342 				}
5343 				datap = (uint32_t *)pm->pm_stat_buf;
5344 
5345 				for (i = 0; i < pm->pm_stat_len;
5346 				    i += sizeof (uint32_t)) {
5347 					*datap = READ_BAR2_REG(hba,
5348 					    hba->sli.sli4.bar2_addr + offset);
5349 					datap++;
5350 					offset += sizeof (uint32_t);
5351 				}
5352 #ifdef FMA_SUPPORT
5353 				/* Access handle validation */
5354 				EMLXS_CHK_ACC_HANDLE(hba,
5355 				    hba->sli.sli4.bar2_acc_handle);
5356 #endif  /* FMA_SUPPORT */
5357 				break;
5358 			case 4: /* bar1write */
5359 				if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5360 					return (FC_BADCMD);
5361 				}
5362 				WRITE_BAR1_REG(hba, hba->sli.sli4.bar1_addr +
5363 				    offset, cmd[2]);
5364 #ifdef FMA_SUPPORT
5365 				/* Access handle validation */
5366 				EMLXS_CHK_ACC_HANDLE(hba,
5367 				    hba->sli.sli4.bar1_acc_handle);
5368 #endif  /* FMA_SUPPORT */
5369 				break;
5370 			case 5: /* bar2write */
5371 				if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5372 					return (FC_BADCMD);
5373 				}
5374 				WRITE_BAR2_REG(hba, hba->sli.sli4.bar2_addr +
5375 				    offset, cmd[2]);
5376 #ifdef FMA_SUPPORT
5377 				/* Access handle validation */
5378 				EMLXS_CHK_ACC_HANDLE(hba,
5379 				    hba->sli.sli4.bar2_acc_handle);
5380 #endif  /* FMA_SUPPORT */
5381 				break;
5382 			case 6: /* dumpbsmbox */
5383 				if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5384 					return (FC_BADCMD);
5385 				}
5386 				if (offset != 0) {
5387 					return (FC_BADCMD);
5388 				}
5389 
5390 				bcopy((caddr_t)hba->sli.sli4.bootstrapmb.virt,
5391 				    (caddr_t)pm->pm_stat_buf, 256);
5392 				break;
5393 			case 7: /* pciread */
5394 				if ((offset >= 0x200) || (offset & 0x3)) {
5395 					return (FC_BADCMD);
5396 				}
5397 				datap = (uint32_t *)pm->pm_stat_buf;
5398 				for (i = 0; i < pm->pm_stat_len;
5399 				    i += sizeof (uint32_t)) {
5400 					*datap = ddi_get32(hba->pci_acc_handle,
5401 					    (uint32_t *)(hba->pci_addr +
5402 					    offset));
5403 					datap++;
5404 					offset += sizeof (uint32_t);
5405 				}
5406 #ifdef FMA_SUPPORT
5407 				/* Access handle validation */
5408 				EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle);
5409 #endif  /* FMA_SUPPORT */
5410 				break;
5411 			case 8: /* abortall */
5412 				if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5413 					return (FC_BADCMD);
5414 				}
5415 				emlxs_abort_all(hba, &tx_cnt, &chip_cnt);
5416 				datap = (uint32_t *)pm->pm_stat_buf;
5417 				*datap++ = tx_cnt;
5418 				*datap = chip_cnt;
5419 				break;
5420 			case 9: /* get_q_info */
5421 				if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5422 					return (FC_BADCMD);
5423 				}
5424 				qp = (FCIO_Q_STAT_t *)pm->pm_stat_buf;
5425 				for (i = 0; i < FCIO_MAX_EQS; i++) {
5426 					addr = hba->sli.sli4.eq[i].addr.virt;
5427 					qp->eq[i].host_index =
5428 					    hba->sli.sli4.eq[i].host_index;
5429 					qp->eq[i].max_index =
5430 					    hba->sli.sli4.eq[i].max_index;
5431 					qp->eq[i].qid =
5432 					    hba->sli.sli4.eq[i].qid;
5433 					qp->eq[i].msix_vector =
5434 					    hba->sli.sli4.eq[i].msix_vector;
5435 					qp->eq[i].phys =
5436 					    hba->sli.sli4.eq[i].addr.phys;
5437 					qp->eq[i].virt = PADDR_LO(
5438 					    (uintptr_t)addr);
5439 					qp->eq[i].virt_hi  = PADDR_HI(
5440 					    (uintptr_t)addr);
5441 					qp->eq[i].max_proc =
5442 					    hba->sli.sli4.eq[i].max_proc;
5443 					qp->eq[i].isr_count =
5444 					    hba->sli.sli4.eq[i].isr_count;
5445 					qp->eq[i].num_proc =
5446 					    hba->sli.sli4.eq[i].num_proc;
5447 				}
5448 				for (i = 0; i < FCIO_MAX_CQS; i++) {
5449 					addr = hba->sli.sli4.cq[i].addr.virt;
5450 					qp->cq[i].host_index =
5451 					    hba->sli.sli4.cq[i].host_index;
5452 					qp->cq[i].max_index =
5453 					    hba->sli.sli4.cq[i].max_index;
5454 					qp->cq[i].qid =
5455 					    hba->sli.sli4.cq[i].qid;
5456 					qp->cq[i].eqid =
5457 					    hba->sli.sli4.cq[i].eqid;
5458 					qp->cq[i].type =
5459 					    hba->sli.sli4.cq[i].type;
5460 					qp->cq[i].phys =
5461 					    hba->sli.sli4.cq[i].addr.phys;
5462 					qp->cq[i].virt = PADDR_LO(
5463 					    (uintptr_t)addr);
5464 					qp->cq[i].virt_hi = PADDR_HI(
5465 					    (uintptr_t)addr);
5466 					qp->cq[i].max_proc =
5467 					    hba->sli.sli4.cq[i].max_proc;
5468 					qp->cq[i].isr_count =
5469 					    hba->sli.sli4.cq[i].isr_count;
5470 					qp->cq[i].num_proc =
5471 					    hba->sli.sli4.cq[i].num_proc;
5472 				}
5473 				for (i = 0; i < FCIO_MAX_WQS; i++) {
5474 					addr = hba->sli.sli4.wq[i].addr.virt;
5475 					qp->wq[i].host_index =
5476 					    hba->sli.sli4.wq[i].host_index;
5477 					qp->wq[i].max_index =
5478 					    hba->sli.sli4.wq[i].max_index;
5479 					qp->wq[i].port_index =
5480 					    hba->sli.sli4.wq[i].port_index;
5481 					qp->wq[i].release_depth =
5482 					    hba->sli.sli4.wq[i].release_depth;
5483 					qp->wq[i].qid =
5484 					    hba->sli.sli4.wq[i].qid;
5485 					qp->wq[i].cqid =
5486 					    hba->sli.sli4.wq[i].cqid;
5487 					qp->wq[i].phys =
5488 					    hba->sli.sli4.wq[i].addr.phys;
5489 					qp->wq[i].virt = PADDR_LO(
5490 					    (uintptr_t)addr);
5491 					qp->wq[i].virt_hi = PADDR_HI(
5492 					    (uintptr_t)addr);
5493 					qp->wq[i].num_proc =
5494 					    hba->sli.sli4.wq[i].num_proc;
5495 					qp->wq[i].num_busy =
5496 					    hba->sli.sli4.wq[i].num_busy;
5497 				}
5498 				for (i = 0; i < FCIO_MAX_RQS; i++) {
5499 					addr = hba->sli.sli4.rq[i].addr.virt;
5500 					qp->rq[i].qid =
5501 					    hba->sli.sli4.rq[i].qid;
5502 					qp->rq[i].cqid =
5503 					    hba->sli.sli4.rq[i].cqid;
5504 					qp->rq[i].host_index =
5505 					    hba->sli.sli4.rq[i].host_index;
5506 					qp->rq[i].max_index =
5507 					    hba->sli.sli4.rq[i].max_index;
5508 					qp->rq[i].phys =
5509 					    hba->sli.sli4.rq[i].addr.phys;
5510 					qp->rq[i].virt = PADDR_LO(
5511 					    (uintptr_t)addr);
5512 					qp->rq[i].virt_hi = PADDR_HI(
5513 					    (uintptr_t)addr);
5514 					qp->rq[i].num_proc =
5515 					    hba->sli.sli4.rq[i].num_proc;
5516 				}
5517 				qp->que_start_timer =
5518 				    hba->sli.sli4.que_stat_timer;
5519 				(void) drv_getparm(LBOLT, &time);
5520 				qp->que_current_timer = (uint32_t)time;
5521 				qp->intr_count = hba->intr_count;
5522 				break;
5523 			case 10: /* zero_q_stat */
5524 				if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5525 					return (FC_BADCMD);
5526 				}
5527 				emlxs_sli4_zero_queue_stat(hba);
5528 				break;
5529 			default:
5530 				ret = FC_BADCMD;
5531 				break;
5532 			}
5533 			break;
5534 		}
5535 
5536 		default:
5537 
5538 			ret = FC_INVALID_REQUEST;
5539 			break;
5540 		}
5541 
5542 		break;
5543 
5544 	}
5545 
5546 	case FC_PORT_INITIALIZE:
5547 		if (!(hba->flag & FC_ONLINE_MODE)) {
5548 			return (FC_OFFLINE);
5549 		}
5550 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5551 		    "fca_port_manage: FC_PORT_INITIALIZE");
5552 		break;
5553 
5554 	case FC_PORT_LOOPBACK:
5555 		if (!(hba->flag & FC_ONLINE_MODE)) {
5556 			return (FC_OFFLINE);
5557 		}
5558 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5559 		    "fca_port_manage: FC_PORT_LOOPBACK");
5560 		break;
5561 
5562 	case FC_PORT_BYPASS:
5563 		if (!(hba->flag & FC_ONLINE_MODE)) {
5564 			return (FC_OFFLINE);
5565 		}
5566 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5567 		    "fca_port_manage: FC_PORT_BYPASS");
5568 		ret = FC_INVALID_REQUEST;
5569 		break;
5570 
5571 	case FC_PORT_UNBYPASS:
5572 		if (!(hba->flag & FC_ONLINE_MODE)) {
5573 			return (FC_OFFLINE);
5574 		}
5575 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5576 		    "fca_port_manage: FC_PORT_UNBYPASS");
5577 		ret = FC_INVALID_REQUEST;
5578 		break;
5579 
5580 	case FC_PORT_GET_NODE_ID:
5581 	{
5582 		fc_rnid_t *rnid;
5583 
5584 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5585 		    "fca_port_manage: FC_PORT_GET_NODE_ID");
5586 
5587 		bzero(pm->pm_data_buf, pm->pm_data_len);
5588 
5589 		if (pm->pm_data_len < sizeof (fc_rnid_t)) {
5590 			ret = FC_NOMEM;
5591 			break;
5592 		}
5593 
5594 		rnid = (fc_rnid_t *)pm->pm_data_buf;
5595 
5596 		(void) snprintf((char *)rnid->global_id,
5597 		    (sizeof (rnid->global_id)-1),
5598 		    "%01x%01x%02x%02x%02x%02x%02x%02x%02x",
5599 		    hba->wwpn.nameType, hba->wwpn.IEEEextMsn,
5600 		    hba->wwpn.IEEEextLsb, hba->wwpn.IEEE[0],
5601 		    hba->wwpn.IEEE[1], hba->wwpn.IEEE[2], hba->wwpn.IEEE[3],
5602 		    hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
5603 
5604 		rnid->unit_type  = RNID_HBA;
5605 		rnid->port_id    = port->did;
5606 		rnid->ip_version = RNID_IPV4;
5607 
5608 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5609 		    "GET_NODE_ID: wwpn:       %s", rnid->global_id);
5610 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5611 		    "GET_NODE_ID: unit_type:  0x%x", rnid->unit_type);
5612 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5613 		    "GET_NODE_ID: port_id:    0x%x", rnid->port_id);
5614 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5615 		    "GET_NODE_ID: num_attach: %d", rnid->num_attached);
5616 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5617 		    "GET_NODE_ID: ip_version: 0x%x", rnid->ip_version);
5618 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5619 		    "GET_NODE_ID: udp_port:   0x%x", rnid->udp_port);
5620 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5621 		    "GET_NODE_ID: ip_addr:    %s", rnid->ip_addr);
5622 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5623 		    "GET_NODE_ID: resv:       0x%x", rnid->specific_id_resv);
5624 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5625 		    "GET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags);
5626 
5627 		ret = FC_SUCCESS;
5628 		break;
5629 	}
5630 
5631 	case FC_PORT_SET_NODE_ID:
5632 	{
5633 		fc_rnid_t *rnid;
5634 
5635 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5636 		    "fca_port_manage: FC_PORT_SET_NODE_ID");
5637 
5638 		if (pm->pm_data_len < sizeof (fc_rnid_t)) {
5639 			ret = FC_NOMEM;
5640 			break;
5641 		}
5642 
5643 		rnid = (fc_rnid_t *)pm->pm_data_buf;
5644 
5645 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5646 		    "SET_NODE_ID: wwpn:       %s", rnid->global_id);
5647 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5648 		    "SET_NODE_ID: unit_type:  0x%x", rnid->unit_type);
5649 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5650 		    "SET_NODE_ID: port_id:    0x%x", rnid->port_id);
5651 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5652 		    "SET_NODE_ID: num_attach: %d", rnid->num_attached);
5653 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5654 		    "SET_NODE_ID: ip_version: 0x%x", rnid->ip_version);
5655 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5656 		    "SET_NODE_ID: udp_port:   0x%x", rnid->udp_port);
5657 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5658 		    "SET_NODE_ID: ip_addr:    %s", rnid->ip_addr);
5659 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5660 		    "SET_NODE_ID: resv:       0x%x", rnid->specific_id_resv);
5661 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5662 		    "SET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags);
5663 
5664 		ret = FC_SUCCESS;
5665 		break;
5666 	}
5667 
5668 #ifdef S11
5669 	case FC_PORT_GET_P2P_INFO:
5670 	{
5671 		fc_fca_p2p_info_t	*p2p_info;
5672 		NODELIST		*ndlp;
5673 
5674 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5675 		    "fca_port_manage: FC_PORT_GET_P2P_INFO");
5676 
5677 		bzero(pm->pm_data_buf, pm->pm_data_len);
5678 
5679 		if (pm->pm_data_len < sizeof (fc_fca_p2p_info_t)) {
5680 			ret = FC_NOMEM;
5681 			break;
5682 		}
5683 
5684 		p2p_info = (fc_fca_p2p_info_t *)pm->pm_data_buf;
5685 
5686 		if (hba->state >= FC_LINK_UP) {
5687 			if ((hba->topology == TOPOLOGY_PT_PT) &&
5688 			    (hba->flag & FC_PT_TO_PT)) {
5689 				p2p_info->fca_d_id = port->did;
5690 				p2p_info->d_id = port->rdid;
5691 
5692 				ndlp = emlxs_node_find_did(port,
5693 				    port->rdid, 1);
5694 
5695 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5696 				    "FC_PORT_GET_P2P_INFO: fca_d_id: 0x%x, "
5697 				    "d_id: 0x%x, ndlp: 0x%p", port->did,
5698 				    port->rdid, ndlp);
5699 				if (ndlp) {
5700 					bcopy(&ndlp->nlp_portname,
5701 					    (caddr_t)&p2p_info->pwwn,
5702 					    sizeof (la_wwn_t));
5703 					bcopy(&ndlp->nlp_nodename,
5704 					    (caddr_t)&p2p_info->nwwn,
5705 					    sizeof (la_wwn_t));
5706 
5707 					ret = FC_SUCCESS;
5708 					break;
5709 
5710 				}
5711 			}
5712 		}
5713 
5714 		ret = FC_FAILURE;
5715 		break;
5716 	}
5717 #endif /* S11 */
5718 
5719 	default:
5720 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5721 		    "fca_port_manage: code=%x", pm->pm_cmd_code);
5722 		ret = FC_INVALID_REQUEST;
5723 		break;
5724 
5725 	}
5726 
5727 	return (ret);
5728 
5729 } /* emlxs_fca_port_manage() */
5730 
5731 
5732 /*ARGSUSED*/
5733 static uint32_t
emlxs_test(emlxs_hba_t * hba,uint32_t test_code,uint32_t args,uint32_t * arg)5734 emlxs_test(emlxs_hba_t *hba, uint32_t test_code, uint32_t args,
5735     uint32_t *arg)
5736 {
5737 	uint32_t rval = 0;
5738 	emlxs_port_t   *port = &PPORT;
5739 
5740 	switch (test_code) {
5741 #ifdef TEST_SUPPORT
5742 	case 1: /* SCSI underrun */
5743 	{
5744 		hba->underrun_counter = (args)? arg[0]:1;
5745 		break;
5746 	}
5747 #endif /* TEST_SUPPORT */
5748 
5749 	default:
5750 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5751 		    "test: Unsupported test code. (0x%x)", test_code);
5752 		rval = FC_INVALID_REQUEST;
5753 	}
5754 
5755 	return (rval);
5756 
5757 } /* emlxs_test() */
5758 
5759 
5760 /*
5761  * Given the device number, return the devinfo pointer or the ddiinst number.
5762  * Note: this routine must be successful on DDI_INFO_DEVT2INSTANCE even
5763  * before attach.
5764  *
5765  * Translate "dev_t" to a pointer to the associated "dev_info_t".
5766  */
5767 /*ARGSUSED*/
5768 static int
emlxs_info(dev_info_t * dip,ddi_info_cmd_t infocmd,void * arg,void ** result)5769 emlxs_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
5770 {
5771 	emlxs_hba_t	*hba;
5772 	int32_t		ddiinst;
5773 
5774 	ddiinst = getminor((dev_t)arg);
5775 
5776 	switch (infocmd) {
5777 	case DDI_INFO_DEVT2DEVINFO:
5778 		hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5779 		if (hba)
5780 			*result = hba->dip;
5781 		else
5782 			*result = NULL;
5783 		break;
5784 
5785 	case DDI_INFO_DEVT2INSTANCE:
5786 		*result = (void *)((unsigned long)ddiinst);
5787 		break;
5788 
5789 	default:
5790 		return (DDI_FAILURE);
5791 	}
5792 
5793 	return (DDI_SUCCESS);
5794 
5795 } /* emlxs_info() */
5796 
5797 
5798 static int32_t
emlxs_power(dev_info_t * dip,int32_t comp,int32_t level)5799 emlxs_power(dev_info_t *dip, int32_t comp, int32_t level)
5800 {
5801 	emlxs_hba_t	*hba;
5802 	emlxs_port_t	*port;
5803 	int32_t		ddiinst;
5804 	int		rval = DDI_SUCCESS;
5805 
5806 	ddiinst = ddi_get_instance(dip);
5807 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5808 	port = &PPORT;
5809 
5810 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5811 	    "fca_power: comp=%x level=%x", comp, level);
5812 
5813 	if (hba == NULL || comp != EMLXS_PM_ADAPTER) {
5814 		return (DDI_FAILURE);
5815 	}
5816 
5817 	mutex_enter(&EMLXS_PM_LOCK);
5818 
5819 	/* If we are already at the proper level then return success */
5820 	if (hba->pm_level == level) {
5821 		mutex_exit(&EMLXS_PM_LOCK);
5822 		return (DDI_SUCCESS);
5823 	}
5824 
5825 	switch (level) {
5826 	case EMLXS_PM_ADAPTER_UP:
5827 
5828 		/*
5829 		 * If we are already in emlxs_attach,
5830 		 * let emlxs_hba_attach take care of things
5831 		 */
5832 		if (hba->pm_state & EMLXS_PM_IN_ATTACH) {
5833 			hba->pm_level = EMLXS_PM_ADAPTER_UP;
5834 			break;
5835 		}
5836 
5837 		/* Check if adapter is suspended */
5838 		if (hba->pm_state & EMLXS_PM_SUSPENDED) {
5839 			hba->pm_level = EMLXS_PM_ADAPTER_UP;
5840 
5841 			/* Try to resume the port */
5842 			rval = emlxs_hba_resume(dip);
5843 
5844 			if (rval != DDI_SUCCESS) {
5845 				hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5846 			}
5847 			break;
5848 		}
5849 
5850 		/* Set adapter up */
5851 		hba->pm_level = EMLXS_PM_ADAPTER_UP;
5852 		break;
5853 
5854 	case EMLXS_PM_ADAPTER_DOWN:
5855 
5856 
5857 		/*
5858 		 * If we are already in emlxs_detach,
5859 		 * let emlxs_hba_detach take care of things
5860 		 */
5861 		if (hba->pm_state & EMLXS_PM_IN_DETACH) {
5862 			hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5863 			break;
5864 		}
5865 
5866 		/* Check if adapter is not suspended */
5867 		if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) {
5868 			hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5869 
5870 			/* Try to suspend the port */
5871 			rval = emlxs_hba_suspend(dip);
5872 
5873 			if (rval != DDI_SUCCESS) {
5874 				hba->pm_level = EMLXS_PM_ADAPTER_UP;
5875 			}
5876 
5877 			break;
5878 		}
5879 
5880 		/* Set adapter down */
5881 		hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5882 		break;
5883 
5884 	default:
5885 		rval = DDI_FAILURE;
5886 		break;
5887 
5888 	}
5889 
5890 	mutex_exit(&EMLXS_PM_LOCK);
5891 
5892 	return (rval);
5893 
5894 } /* emlxs_power() */
5895 
5896 
5897 #ifdef EMLXS_I386
5898 #ifdef S11
5899 /*
5900  * quiesce(9E) entry point.
5901  *
5902  * This function is called when the system is single-thread at hight PIL
5903  * with preemption disabled. Therefore, this function must not be blocked.
5904  *
5905  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
5906  * DDI_FAILURE indicates an error condition and should almost never happen.
5907  */
5908 static int
emlxs_quiesce(dev_info_t * dip)5909 emlxs_quiesce(dev_info_t *dip)
5910 {
5911 	emlxs_hba_t	*hba;
5912 	emlxs_port_t	*port;
5913 	int32_t		ddiinst;
5914 	int		rval = DDI_SUCCESS;
5915 
5916 	ddiinst = ddi_get_instance(dip);
5917 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5918 	port = &PPORT;
5919 
5920 	if (hba == NULL || port == NULL) {
5921 		return (DDI_FAILURE);
5922 	}
5923 
5924 	/* The fourth arg 1 indicates the call is from quiesce */
5925 	if (EMLXS_SLI_HBA_RESET(hba, 1, 1, 1) == 0) {
5926 		return (rval);
5927 	} else {
5928 		return (DDI_FAILURE);
5929 	}
5930 
5931 } /* emlxs_quiesce */
5932 #endif /* S11 */
5933 #endif /* EMLXS_I386 */
5934 
5935 
5936 static int
emlxs_open(dev_t * dev_p,int32_t flag,int32_t otype,cred_t * cred_p)5937 emlxs_open(dev_t *dev_p, int32_t flag, int32_t otype, cred_t *cred_p)
5938 {
5939 	emlxs_hba_t	*hba;
5940 	emlxs_port_t	*port;
5941 	int		ddiinst;
5942 
5943 	ddiinst = getminor(*dev_p);
5944 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5945 
5946 	if (hba == NULL) {
5947 		return (ENXIO);
5948 	}
5949 
5950 	port = &PPORT;
5951 
5952 	if (hba->pm_state & EMLXS_PM_SUSPENDED) {
5953 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
5954 		    "open failed: Driver suspended.");
5955 		return (ENXIO);
5956 	}
5957 
5958 	if (otype != OTYP_CHR) {
5959 		return (EINVAL);
5960 	}
5961 
5962 	if (drv_priv(cred_p)) {
5963 		return (EPERM);
5964 	}
5965 
5966 	mutex_enter(&EMLXS_IOCTL_LOCK);
5967 
5968 	if (hba->ioctl_flags & EMLXS_OPEN_EXCLUSIVE) {
5969 		mutex_exit(&EMLXS_IOCTL_LOCK);
5970 		return (EBUSY);
5971 	}
5972 
5973 	if (flag & FEXCL) {
5974 		if (hba->ioctl_flags & EMLXS_OPEN) {
5975 			mutex_exit(&EMLXS_IOCTL_LOCK);
5976 			return (EBUSY);
5977 		}
5978 
5979 		hba->ioctl_flags |= EMLXS_OPEN_EXCLUSIVE;
5980 	}
5981 
5982 	hba->ioctl_flags |= EMLXS_OPEN;
5983 
5984 	mutex_exit(&EMLXS_IOCTL_LOCK);
5985 
5986 	return (0);
5987 
5988 } /* emlxs_open() */
5989 
5990 
5991 /*ARGSUSED*/
5992 static int
emlxs_close(dev_t dev,int32_t flag,int32_t otype,cred_t * cred_p)5993 emlxs_close(dev_t dev, int32_t flag, int32_t otype, cred_t *cred_p)
5994 {
5995 	emlxs_hba_t	*hba;
5996 	int		ddiinst;
5997 
5998 	ddiinst = getminor(dev);
5999 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
6000 
6001 	if (hba == NULL) {
6002 		return (ENXIO);
6003 	}
6004 
6005 	if (otype != OTYP_CHR) {
6006 		return (EINVAL);
6007 	}
6008 
6009 	mutex_enter(&EMLXS_IOCTL_LOCK);
6010 
6011 	if (!(hba->ioctl_flags & EMLXS_OPEN)) {
6012 		mutex_exit(&EMLXS_IOCTL_LOCK);
6013 		return (ENODEV);
6014 	}
6015 
6016 	hba->ioctl_flags &= ~EMLXS_OPEN;
6017 	hba->ioctl_flags &= ~EMLXS_OPEN_EXCLUSIVE;
6018 
6019 	mutex_exit(&EMLXS_IOCTL_LOCK);
6020 
6021 	return (0);
6022 
6023 } /* emlxs_close() */
6024 
6025 
6026 /*ARGSUSED*/
6027 static int
emlxs_ioctl(dev_t dev,int32_t cmd,intptr_t arg,int32_t mode,cred_t * cred_p,int32_t * rval_p)6028 emlxs_ioctl(dev_t dev, int32_t cmd, intptr_t arg, int32_t mode,
6029     cred_t *cred_p, int32_t *rval_p)
6030 {
6031 	emlxs_hba_t	*hba;
6032 	emlxs_port_t	*port;
6033 	int		rval = 0;	/* return code */
6034 	int		ddiinst;
6035 
6036 	ddiinst = getminor(dev);
6037 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
6038 
6039 	if (hba == NULL) {
6040 		return (ENXIO);
6041 	}
6042 
6043 	port = &PPORT;
6044 
6045 	if (hba->pm_state & EMLXS_PM_SUSPENDED) {
6046 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
6047 		    "ioctl failed: Driver suspended.");
6048 
6049 		return (ENXIO);
6050 	}
6051 
6052 	mutex_enter(&EMLXS_IOCTL_LOCK);
6053 	if (!(hba->ioctl_flags & EMLXS_OPEN)) {
6054 		mutex_exit(&EMLXS_IOCTL_LOCK);
6055 		return (ENXIO);
6056 	}
6057 	mutex_exit(&EMLXS_IOCTL_LOCK);
6058 
6059 #ifdef IDLE_TIMER
6060 	emlxs_pm_busy_component(hba);
6061 #endif	/* IDLE_TIMER */
6062 
6063 	switch (cmd) {
6064 	case EMLXS_DFC_COMMAND:
6065 		rval = emlxs_dfc_manage(hba, (void *)arg, mode);
6066 		break;
6067 
6068 	default:
6069 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
6070 		    "ioctl: Invalid command received. cmd=%x", cmd);
6071 		rval = EINVAL;
6072 	}
6073 
6074 done:
6075 	return (rval);
6076 
6077 } /* emlxs_ioctl() */
6078 
6079 
6080 
6081 /*
6082  *
6083  *	Device Driver Common Routines
6084  *
6085  */
6086 
6087 /* EMLXS_PM_LOCK must be held for this call */
6088 static int
emlxs_hba_resume(dev_info_t * dip)6089 emlxs_hba_resume(dev_info_t *dip)
6090 {
6091 	emlxs_hba_t	*hba;
6092 	emlxs_port_t	*port;
6093 	int		ddiinst;
6094 
6095 	ddiinst = ddi_get_instance(dip);
6096 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
6097 	port = &PPORT;
6098 
6099 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_msg, NULL);
6100 
6101 	if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) {
6102 		return (DDI_SUCCESS);
6103 	}
6104 
6105 	hba->pm_state &= ~EMLXS_PM_SUSPENDED;
6106 
6107 	/* Re-enable the physical port on this HBA */
6108 	port->flag |= EMLXS_PORT_ENABLED;
6109 
6110 	/* Take the adapter online */
6111 	if (emlxs_power_up(hba)) {
6112 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_failed_msg,
6113 		    "Unable to take adapter online.");
6114 
6115 		hba->pm_state |= EMLXS_PM_SUSPENDED;
6116 
6117 		return (DDI_FAILURE);
6118 	}
6119 
6120 	return (DDI_SUCCESS);
6121 
6122 } /* emlxs_hba_resume() */
6123 
6124 
6125 /* EMLXS_PM_LOCK must be held for this call */
6126 static int
emlxs_hba_suspend(dev_info_t * dip)6127 emlxs_hba_suspend(dev_info_t *dip)
6128 {
6129 	emlxs_hba_t	*hba;
6130 	emlxs_port_t	*port;
6131 	int		ddiinst;
6132 
6133 	ddiinst = ddi_get_instance(dip);
6134 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
6135 	port = &PPORT;
6136 
6137 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_msg, NULL);
6138 
6139 	if (hba->pm_state & EMLXS_PM_SUSPENDED) {
6140 		return (DDI_SUCCESS);
6141 	}
6142 
6143 	hba->pm_state |= EMLXS_PM_SUSPENDED;
6144 
6145 	/* Take the adapter offline */
6146 	if (emlxs_power_down(hba)) {
6147 		hba->pm_state &= ~EMLXS_PM_SUSPENDED;
6148 
6149 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_failed_msg,
6150 		    "Unable to take adapter offline.");
6151 
6152 		return (DDI_FAILURE);
6153 	}
6154 
6155 	return (DDI_SUCCESS);
6156 
6157 } /* emlxs_hba_suspend() */
6158 
6159 
6160 
6161 static void
emlxs_lock_init(emlxs_hba_t * hba)6162 emlxs_lock_init(emlxs_hba_t *hba)
6163 {
6164 	emlxs_port_t	*port = &PPORT;
6165 	uint32_t	i;
6166 
6167 	/* Initialize the power management */
6168 	mutex_init(&EMLXS_PM_LOCK, NULL, MUTEX_DRIVER,
6169 	    DDI_INTR_PRI(hba->intr_arg));
6170 
6171 	mutex_init(&EMLXS_TIMER_LOCK, NULL, MUTEX_DRIVER,
6172 	    DDI_INTR_PRI(hba->intr_arg));
6173 
6174 	cv_init(&hba->timer_lock_cv, NULL, CV_DRIVER, NULL);
6175 
6176 	mutex_init(&EMLXS_PORT_LOCK, NULL, MUTEX_DRIVER,
6177 	    DDI_INTR_PRI(hba->intr_arg));
6178 
6179 	mutex_init(&EMLXS_MBOX_LOCK, NULL, MUTEX_DRIVER,
6180 	    DDI_INTR_PRI(hba->intr_arg));
6181 
6182 	cv_init(&EMLXS_MBOX_CV, NULL, CV_DRIVER, NULL);
6183 
6184 	mutex_init(&EMLXS_LINKUP_LOCK, NULL, MUTEX_DRIVER,
6185 	    DDI_INTR_PRI(hba->intr_arg));
6186 
6187 	cv_init(&EMLXS_LINKUP_CV, NULL, CV_DRIVER, NULL);
6188 
6189 	mutex_init(&EMLXS_TX_CHANNEL_LOCK, NULL, MUTEX_DRIVER,
6190 	    DDI_INTR_PRI(hba->intr_arg));
6191 
6192 	for (i = 0; i < MAX_RINGS; i++) {
6193 		mutex_init(&EMLXS_CMD_RING_LOCK(i), NULL, MUTEX_DRIVER,
6194 		    DDI_INTR_PRI(hba->intr_arg));
6195 	}
6196 
6197 
6198 	for (i = 0; i < EMLXS_MAX_WQS; i++) {
6199 		mutex_init(&EMLXS_QUE_LOCK(i), NULL, MUTEX_DRIVER,
6200 		    DDI_INTR_PRI(hba->intr_arg));
6201 	}
6202 
6203 	mutex_init(&EMLXS_MSIID_LOCK, NULL, MUTEX_DRIVER,
6204 	    DDI_INTR_PRI(hba->intr_arg));
6205 
6206 	mutex_init(&EMLXS_FCTAB_LOCK, NULL, MUTEX_DRIVER,
6207 	    DDI_INTR_PRI(hba->intr_arg));
6208 
6209 	mutex_init(&EMLXS_MEMGET_LOCK, NULL, MUTEX_DRIVER,
6210 	    DDI_INTR_PRI(hba->intr_arg));
6211 
6212 	mutex_init(&EMLXS_MEMPUT_LOCK, NULL, MUTEX_DRIVER,
6213 	    DDI_INTR_PRI(hba->intr_arg));
6214 
6215 	mutex_init(&EMLXS_IOCTL_LOCK, NULL, MUTEX_DRIVER,
6216 	    DDI_INTR_PRI(hba->intr_arg));
6217 
6218 #ifdef DUMP_SUPPORT
6219 	mutex_init(&EMLXS_DUMP_LOCK, NULL, MUTEX_DRIVER,
6220 	    DDI_INTR_PRI(hba->intr_arg));
6221 #endif /* DUMP_SUPPORT */
6222 
6223 	mutex_init(&EMLXS_SPAWN_LOCK, NULL, MUTEX_DRIVER,
6224 	    DDI_INTR_PRI(hba->intr_arg));
6225 
6226 	/* Create per port locks */
6227 	for (i = 0; i < MAX_VPORTS; i++) {
6228 		port = &VPORT(i);
6229 
6230 		rw_init(&port->node_rwlock, NULL, RW_DRIVER, NULL);
6231 
6232 		if (i == 0) {
6233 			mutex_init(&EMLXS_PKT_LOCK, NULL, MUTEX_DRIVER,
6234 			    DDI_INTR_PRI(hba->intr_arg));
6235 
6236 			cv_init(&EMLXS_PKT_CV, NULL, CV_DRIVER, NULL);
6237 
6238 			mutex_init(&EMLXS_UB_LOCK, NULL, MUTEX_DRIVER,
6239 			    DDI_INTR_PRI(hba->intr_arg));
6240 		} else {
6241 			mutex_init(&EMLXS_PKT_LOCK, NULL, MUTEX_DRIVER,
6242 			    DDI_INTR_PRI(hba->intr_arg));
6243 
6244 			cv_init(&EMLXS_PKT_CV, NULL, CV_DRIVER, NULL);
6245 
6246 			mutex_init(&EMLXS_UB_LOCK, NULL, MUTEX_DRIVER,
6247 			    DDI_INTR_PRI(hba->intr_arg));
6248 		}
6249 	}
6250 
6251 	return;
6252 
6253 } /* emlxs_lock_init() */
6254 
6255 
6256 
6257 static void
emlxs_lock_destroy(emlxs_hba_t * hba)6258 emlxs_lock_destroy(emlxs_hba_t *hba)
6259 {
6260 	emlxs_port_t	*port = &PPORT;
6261 	uint32_t	i;
6262 
6263 	mutex_destroy(&EMLXS_TIMER_LOCK);
6264 	cv_destroy(&hba->timer_lock_cv);
6265 
6266 	mutex_destroy(&EMLXS_PORT_LOCK);
6267 
6268 	cv_destroy(&EMLXS_MBOX_CV);
6269 	cv_destroy(&EMLXS_LINKUP_CV);
6270 
6271 	mutex_destroy(&EMLXS_LINKUP_LOCK);
6272 	mutex_destroy(&EMLXS_MBOX_LOCK);
6273 
6274 	mutex_destroy(&EMLXS_TX_CHANNEL_LOCK);
6275 
6276 	for (i = 0; i < MAX_RINGS; i++) {
6277 		mutex_destroy(&EMLXS_CMD_RING_LOCK(i));
6278 	}
6279 
6280 	for (i = 0; i < EMLXS_MAX_WQS; i++) {
6281 		mutex_destroy(&EMLXS_QUE_LOCK(i));
6282 	}
6283 
6284 	mutex_destroy(&EMLXS_MSIID_LOCK);
6285 
6286 	mutex_destroy(&EMLXS_FCTAB_LOCK);
6287 	mutex_destroy(&EMLXS_MEMGET_LOCK);
6288 	mutex_destroy(&EMLXS_MEMPUT_LOCK);
6289 	mutex_destroy(&EMLXS_IOCTL_LOCK);
6290 	mutex_destroy(&EMLXS_SPAWN_LOCK);
6291 	mutex_destroy(&EMLXS_PM_LOCK);
6292 
6293 #ifdef DUMP_SUPPORT
6294 	mutex_destroy(&EMLXS_DUMP_LOCK);
6295 #endif /* DUMP_SUPPORT */
6296 
6297 	/* Destroy per port locks */
6298 	for (i = 0; i < MAX_VPORTS; i++) {
6299 		port = &VPORT(i);
6300 		rw_destroy(&port->node_rwlock);
6301 		mutex_destroy(&EMLXS_PKT_LOCK);
6302 		cv_destroy(&EMLXS_PKT_CV);
6303 		mutex_destroy(&EMLXS_UB_LOCK);
6304 	}
6305 
6306 	return;
6307 
6308 } /* emlxs_lock_destroy() */
6309 
6310 
6311 /* init_flag values */
6312 #define	ATTACH_SOFT_STATE	0x00000001
6313 #define	ATTACH_FCA_TRAN		0x00000002
6314 #define	ATTACH_HBA		0x00000004
6315 #define	ATTACH_LOG		0x00000008
6316 #define	ATTACH_MAP_BUS		0x00000010
6317 #define	ATTACH_INTR_INIT	0x00000020
6318 #define	ATTACH_PROP		0x00000040
6319 #define	ATTACH_LOCK		0x00000080
6320 #define	ATTACH_THREAD		0x00000100
6321 #define	ATTACH_INTR_ADD		0x00000200
6322 #define	ATTACH_ONLINE		0x00000400
6323 #define	ATTACH_NODE		0x00000800
6324 #define	ATTACH_FCT		0x00001000
6325 #define	ATTACH_FCA		0x00002000
6326 #define	ATTACH_KSTAT		0x00004000
6327 #define	ATTACH_DHCHAP		0x00008000
6328 #define	ATTACH_FM		0x00010000
6329 #define	ATTACH_MAP_SLI		0x00020000
6330 #define	ATTACH_SPAWN		0x00040000
6331 #define	ATTACH_EVENTS		0x00080000
6332 
6333 static void
emlxs_driver_remove(dev_info_t * dip,uint32_t init_flag,uint32_t failed)6334 emlxs_driver_remove(dev_info_t *dip, uint32_t init_flag, uint32_t failed)
6335 {
6336 	emlxs_hba_t	*hba = NULL;
6337 	int		ddiinst;
6338 
6339 	ddiinst = ddi_get_instance(dip);
6340 
6341 	if (init_flag & ATTACH_HBA) {
6342 		hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
6343 
6344 		if (init_flag & ATTACH_SPAWN) {
6345 			emlxs_thread_spawn_destroy(hba);
6346 		}
6347 
6348 		if (init_flag & ATTACH_EVENTS) {
6349 			(void) emlxs_event_queue_destroy(hba);
6350 		}
6351 
6352 		if (init_flag & ATTACH_ONLINE) {
6353 			(void) emlxs_offline(hba, 1);
6354 		}
6355 
6356 		if (init_flag & ATTACH_INTR_ADD) {
6357 			(void) EMLXS_INTR_REMOVE(hba);
6358 		}
6359 #ifdef SFCT_SUPPORT
6360 		if (init_flag & ATTACH_FCT) {
6361 			emlxs_fct_detach(hba);
6362 			emlxs_fct_modclose();
6363 		}
6364 #endif /* SFCT_SUPPORT */
6365 
6366 #ifdef DHCHAP_SUPPORT
6367 		if (init_flag & ATTACH_DHCHAP) {
6368 			emlxs_dhc_detach(hba);
6369 		}
6370 #endif /* DHCHAP_SUPPORT */
6371 
6372 		if (init_flag & ATTACH_KSTAT) {
6373 			kstat_delete(hba->kstat);
6374 		}
6375 
6376 		if (init_flag & ATTACH_FCA) {
6377 			emlxs_fca_detach(hba);
6378 		}
6379 
6380 		if (init_flag & ATTACH_NODE) {
6381 			(void) ddi_remove_minor_node(hba->dip, "devctl");
6382 		}
6383 
6384 		if (init_flag & ATTACH_THREAD) {
6385 			emlxs_thread_destroy(&hba->iodone_thread);
6386 		}
6387 
6388 		if (init_flag & ATTACH_PROP) {
6389 			(void) ddi_prop_remove_all(hba->dip);
6390 		}
6391 
6392 		if (init_flag & ATTACH_LOCK) {
6393 			emlxs_lock_destroy(hba);
6394 		}
6395 
6396 		if (init_flag & ATTACH_INTR_INIT) {
6397 			(void) EMLXS_INTR_UNINIT(hba);
6398 		}
6399 
6400 		if (init_flag & ATTACH_MAP_BUS) {
6401 			emlxs_unmap_bus(hba);
6402 		}
6403 
6404 		if (init_flag & ATTACH_MAP_SLI) {
6405 			EMLXS_SLI_UNMAP_HDW(hba);
6406 		}
6407 
6408 #ifdef FMA_SUPPORT
6409 		if (init_flag & ATTACH_FM) {
6410 			emlxs_fm_fini(hba);
6411 		}
6412 #endif	/* FMA_SUPPORT */
6413 
6414 		if (init_flag & ATTACH_LOG) {
6415 			emlxs_msg_log_destroy(hba);
6416 		}
6417 
6418 		if (init_flag & ATTACH_FCA_TRAN) {
6419 			(void) ddi_set_driver_private(hba->dip, NULL);
6420 			kmem_free(hba->fca_tran, sizeof (fc_fca_tran_t));
6421 			hba->fca_tran = NULL;
6422 		}
6423 
6424 		if (init_flag & ATTACH_HBA) {
6425 			emlxs_device.log[hba->emlxinst] = 0;
6426 			emlxs_device.hba[hba->emlxinst] =
6427 			    (emlxs_hba_t *)((unsigned long)((failed) ? -1 : 0));
6428 #ifdef DUMP_SUPPORT
6429 			emlxs_device.dump_txtfile[hba->emlxinst] = 0;
6430 			emlxs_device.dump_dmpfile[hba->emlxinst] = 0;
6431 			emlxs_device.dump_ceefile[hba->emlxinst] = 0;
6432 #endif /* DUMP_SUPPORT */
6433 
6434 		}
6435 	}
6436 
6437 	if (init_flag & ATTACH_SOFT_STATE) {
6438 		(void) ddi_soft_state_free(emlxs_soft_state, ddiinst);
6439 	}
6440 
6441 	return;
6442 
6443 } /* emlxs_driver_remove() */
6444 
6445 
6446 /* This determines which ports will be initiator mode */
6447 static uint32_t
emlxs_fca_init(emlxs_hba_t * hba)6448 emlxs_fca_init(emlxs_hba_t *hba)
6449 {
6450 	emlxs_port_t	*port = &PPORT;
6451 
6452 	/* Check if SFS present */
6453 	if (((void *)MODSYM(fc_fca_init) == NULL) ||
6454 	    ((void *)MODSYM(fc_fca_attach) == NULL)) {
6455 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6456 		    "SFS not present.");
6457 		return (1);
6458 	}
6459 
6460 	/* Check if our SFS driver interface matches the current SFS stack */
6461 	if (MODSYM(fc_fca_attach) (hba->dip, hba->fca_tran) != DDI_SUCCESS) {
6462 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6463 		    "SFS/FCA version mismatch. FCA=0x%x",
6464 		    hba->fca_tran->fca_version);
6465 		return (1);
6466 	}
6467 
6468 	return (0);
6469 
6470 } /* emlxs_fca_init() */
6471 
6472 
6473 /* This determines which ports will be initiator or target mode */
6474 static void
emlxs_mode_init(emlxs_hba_t * hba)6475 emlxs_mode_init(emlxs_hba_t *hba)
6476 {
6477 	emlxs_port_t	*port = &PPORT;
6478 	emlxs_config_t *cfg = &CFG;
6479 	emlxs_port_t	*vport;
6480 	uint32_t	i;
6481 	uint32_t	mode_mask;
6482 
6483 	/* Initialize mode masks */
6484 	(void) emlxs_mode_init_masks(hba);
6485 
6486 	if (!(port->mode_mask & MODE_INITIATOR)) {
6487 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6488 		    "Initiator mode not enabled.");
6489 
6490 #ifdef SFCT_SUPPORT
6491 		/* Disable dynamic target mode */
6492 		cfg[CFG_DTM_ENABLE].current = 0;
6493 #endif /* SFCT_SUPPORT */
6494 
6495 		goto done1;
6496 	}
6497 
6498 	/* Try to initialize fca interface */
6499 	if (emlxs_fca_init(hba) != 0) {
6500 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6501 		    "Initiator mode disabled.");
6502 
6503 		/* Disable initiator mode */
6504 		port->mode_mask &= ~MODE_INITIATOR;
6505 
6506 #ifdef SFCT_SUPPORT
6507 		/* Disable dynamic target mode */
6508 		cfg[CFG_DTM_ENABLE].current = 0;
6509 #endif /* SFCT_SUPPORT */
6510 
6511 		goto done1;
6512 	}
6513 
6514 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6515 	    "Initiator mode enabled.");
6516 
6517 done1:
6518 
6519 #ifdef SFCT_SUPPORT
6520 	if (!(port->mode_mask & MODE_TARGET)) {
6521 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6522 		    "Target mode not enabled.");
6523 
6524 		/* Disable target modes */
6525 		cfg[CFG_DTM_ENABLE].current = 0;
6526 		cfg[CFG_TARGET_MODE].current = 0;
6527 
6528 		goto done2;
6529 	}
6530 
6531 	/* Try to open the COMSTAR module */
6532 	if (emlxs_fct_modopen() != 0) {
6533 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6534 		    "Target mode disabled.");
6535 
6536 		/* Disable target modes */
6537 		port->mode_mask &= ~MODE_TARGET;
6538 		cfg[CFG_DTM_ENABLE].current = 0;
6539 		cfg[CFG_TARGET_MODE].current = 0;
6540 
6541 		goto done2;
6542 	}
6543 
6544 	/* Try to initialize fct interface */
6545 	if (emlxs_fct_init(hba) != 0) {
6546 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6547 		    "Target mode disabled.");
6548 
6549 		/* Disable target modes */
6550 		port->mode_mask &= ~MODE_TARGET;
6551 		cfg[CFG_DTM_ENABLE].current = 0;
6552 		cfg[CFG_TARGET_MODE].current = 0;
6553 
6554 		goto done2;
6555 	}
6556 
6557 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6558 	    "Target mode enabled.");
6559 
6560 done2:
6561 	/* Adjust target mode parameter flags */
6562 	if (cfg[CFG_DTM_ENABLE].current) {
6563 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6564 		    "Dynamic target mode enabled.");
6565 
6566 		cfg[CFG_TARGET_MODE].flags |= PARM_DYNAMIC;
6567 	} else {
6568 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6569 		    "Dynamic target mode disabled.");
6570 
6571 		cfg[CFG_TARGET_MODE].flags &= ~PARM_DYNAMIC;
6572 	}
6573 #endif /* SFCT_SUPPORT */
6574 
6575 	/* Now set port flags */
6576 	mutex_enter(&EMLXS_PORT_LOCK);
6577 
6578 	/* Set flags for physical port */
6579 	if (port->mode_mask & MODE_INITIATOR) {
6580 		port->flag |= EMLXS_INI_ENABLED;
6581 	} else {
6582 		port->flag &= ~EMLXS_INI_ENABLED;
6583 	}
6584 
6585 	if (port->mode_mask & MODE_TARGET) {
6586 		port->flag |= EMLXS_TGT_ENABLED;
6587 	} else {
6588 		port->flag &= ~EMLXS_TGT_ENABLED;
6589 	}
6590 
6591 	for (i = 1; i < MAX_VPORTS; i++) {
6592 		vport = &VPORT(i);
6593 
6594 		/* Physical port mask has only allowable bits */
6595 		mode_mask = vport->mode_mask & port->mode_mask;
6596 
6597 		/* Set flags for physical port */
6598 		if (mode_mask & MODE_INITIATOR) {
6599 			vport->flag |= EMLXS_INI_ENABLED;
6600 		} else {
6601 			vport->flag &= ~EMLXS_INI_ENABLED;
6602 		}
6603 
6604 		if (mode_mask & MODE_TARGET) {
6605 			vport->flag |= EMLXS_TGT_ENABLED;
6606 		} else {
6607 			vport->flag &= ~EMLXS_TGT_ENABLED;
6608 		}
6609 	}
6610 
6611 	/* Set initial driver mode */
6612 	emlxs_mode_set(hba);
6613 
6614 	mutex_exit(&EMLXS_PORT_LOCK);
6615 
6616 	/* Recheck possible mode dependent parameters */
6617 	/* in case conditions have changed. */
6618 	if (port->mode != MODE_NONE) {
6619 		for (i = 0; i < NUM_CFG_PARAM; i++) {
6620 			cfg = &hba->config[i];
6621 			cfg->current = emlxs_check_parm(hba, i, cfg->current);
6622 		}
6623 	}
6624 
6625 	return;
6626 
6627 } /* emlxs_mode_init() */
6628 
6629 
6630 /* This must be called while holding the EMLXS_PORT_LOCK */
6631 extern void
emlxs_mode_set(emlxs_hba_t * hba)6632 emlxs_mode_set(emlxs_hba_t *hba)
6633 {
6634 	emlxs_port_t	*port = &PPORT;
6635 #ifdef SFCT_SUPPORT
6636 	emlxs_config_t *cfg = &CFG;
6637 #endif /* SFCT_SUPPORT */
6638 	emlxs_port_t	*vport;
6639 	uint32_t	i;
6640 	uint32_t cfg_tgt_mode = 0;
6641 
6642 	/* mutex_enter(&EMLXS_PORT_LOCK); */
6643 
6644 #ifdef SFCT_SUPPORT
6645 	cfg_tgt_mode = cfg[CFG_TARGET_MODE].current;
6646 #endif /* SFCT_SUPPORT */
6647 
6648 	/* Initiator mode requested */
6649 	if (!cfg_tgt_mode) {
6650 		for (i = 0; i < MAX_VPORTS; i++) {
6651 			vport = &VPORT(i);
6652 			vport->mode = (vport->flag & EMLXS_INI_ENABLED)?
6653 			    MODE_INITIATOR:MODE_NONE;
6654 		}
6655 #ifdef SFCT_SUPPORT
6656 	/* Target mode requested */
6657 	} else  {
6658 		for (i = 0; i < MAX_VPORTS; i++) {
6659 			vport = &VPORT(i);
6660 			vport->mode = (vport->flag & EMLXS_TGT_ENABLED)?
6661 			    MODE_TARGET:MODE_NONE;
6662 		}
6663 #endif /* SFCT_SUPPORT */
6664 	}
6665 
6666 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
6667 	    "MODE: %s", emlxs_mode_xlate(port->mode));
6668 
6669 	/* mutex_exit(&EMLXS_PORT_LOCK); */
6670 
6671 	return;
6672 
6673 } /* emlxs_mode_set() */
6674 
6675 
6676 static void
emlxs_mode_init_masks(emlxs_hba_t * hba)6677 emlxs_mode_init_masks(emlxs_hba_t *hba)
6678 {
6679 	emlxs_port_t *port = &PPORT;
6680 	emlxs_port_t *vport;
6681 	uint32_t	i;
6682 
6683 #ifdef SFCT_SUPPORT
6684 	emlxs_config_t	*cfg = &CFG;
6685 	uint32_t	vport_mode_mask;
6686 	uint32_t	cfg_vport_mode_mask;
6687 	uint32_t	mode_mask;
6688 	char		string[256];
6689 
6690 	port->mode_mask = 0;
6691 
6692 	if (!cfg[CFG_TARGET_MODE].current ||
6693 	    cfg[CFG_DTM_ENABLE].current) {
6694 		port->mode_mask |= MODE_INITIATOR;
6695 	}
6696 
6697 	if (cfg[CFG_TARGET_MODE].current ||
6698 	    cfg[CFG_DTM_ENABLE].current) {
6699 		port->mode_mask |= MODE_TARGET;
6700 	}
6701 
6702 	/* Physical port mask has only allowable bits */
6703 	vport_mode_mask = port->mode_mask;
6704 	cfg_vport_mode_mask = cfg[CFG_VPORT_MODE_MASK].current;
6705 
6706 	/* Check dynamic target mode value for virtual ports */
6707 	if (cfg[CFG_DTM_ENABLE].current == 0) {
6708 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6709 		    "%s = 0: Virtual target ports are not supported.",
6710 		    cfg[CFG_DTM_ENABLE].string);
6711 
6712 		vport_mode_mask &= ~MODE_TARGET;
6713 	}
6714 
6715 	cfg_vport_mode_mask &= vport_mode_mask;
6716 
6717 	if (cfg[CFG_VPORT_MODE_MASK].current != cfg_vport_mode_mask) {
6718 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6719 		    "%s: Changing 0x%x --> 0x%x",
6720 		    cfg[CFG_VPORT_MODE_MASK].string,
6721 		    cfg[CFG_VPORT_MODE_MASK].current,
6722 		    cfg_vport_mode_mask);
6723 
6724 		cfg[CFG_VPORT_MODE_MASK].current = cfg_vport_mode_mask;
6725 	}
6726 
6727 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6728 	    "pport-mode-mask: %s", emlxs_mode_xlate(port->mode_mask));
6729 
6730 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6731 	    "vport-mode-mask: %s", emlxs_mode_xlate(cfg_vport_mode_mask));
6732 
6733 	for (i = 1; i < MAX_VPORTS; i++) {
6734 		vport = &VPORT(i);
6735 
6736 		(void) snprintf(string, sizeof (string),
6737 		    "%s%d-vport%d-mode-mask", DRIVER_NAME, hba->ddiinst, i);
6738 
6739 		mode_mask = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY,
6740 		    (void *)hba->dip, DDI_PROP_DONTPASS, string,
6741 		    cfg_vport_mode_mask);
6742 
6743 		vport->mode_mask = mode_mask & vport_mode_mask;
6744 
6745 		if (vport->mode_mask != cfg_vport_mode_mask) {
6746 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6747 			    "vport%d-mode-mask: %s",
6748 			    i, emlxs_mode_xlate(vport->mode_mask));
6749 		}
6750 	}
6751 #else
6752 	port->mode_mask = MODE_INITIATOR;
6753 	for (i = 1; i < MAX_VPORTS; i++) {
6754 		vport = &VPORT(i);
6755 		vport->mode_mask = MODE_INITIATOR;
6756 	}
6757 #endif /* SFCT_SUPPORT */
6758 
6759 	return;
6760 
6761 } /* emlxs_mode_init_masks() */
6762 
6763 
6764 static void
emlxs_fca_attach(emlxs_hba_t * hba)6765 emlxs_fca_attach(emlxs_hba_t *hba)
6766 {
6767 	emlxs_port_t	*port;
6768 	uint32_t	i;
6769 
6770 	/* Update our transport structure */
6771 	hba->fca_tran->fca_iblock  = (ddi_iblock_cookie_t *)&hba->intr_arg;
6772 	hba->fca_tran->fca_cmd_max = hba->io_throttle;
6773 
6774 	for (i = 0; i < MAX_VPORTS; i++) {
6775 		port = &VPORT(i);
6776 		port->ub_count	= EMLXS_UB_TOKEN_OFFSET;
6777 		port->ub_pool	= NULL;
6778 	}
6779 
6780 #if (EMLXS_MODREV >= EMLXS_MODREV5)
6781 	bcopy((caddr_t)&hba->wwpn, (caddr_t)&hba->fca_tran->fca_perm_pwwn,
6782 	    sizeof (NAME_TYPE));
6783 #endif /* >= EMLXS_MODREV5 */
6784 
6785 	return;
6786 
6787 } /* emlxs_fca_attach() */
6788 
6789 
6790 static void
emlxs_fca_detach(emlxs_hba_t * hba)6791 emlxs_fca_detach(emlxs_hba_t *hba)
6792 {
6793 	emlxs_port_t	*port = &PPORT;
6794 	uint32_t	i;
6795 	emlxs_port_t	*vport;
6796 
6797 	if (!(port->flag & EMLXS_INI_ENABLED)) {
6798 		return;
6799 	}
6800 
6801 	if ((void *)MODSYM(fc_fca_detach) != NULL) {
6802 		MODSYM(fc_fca_detach)(hba->dip);
6803 	}
6804 
6805 	/* Disable INI mode for all ports */
6806 	for (i = 0; i < MAX_VPORTS; i++) {
6807 		vport = &VPORT(i);
6808 		vport->flag &= ~EMLXS_INI_ENABLED;
6809 	}
6810 
6811 	return;
6812 
6813 } /* emlxs_fca_detach() */
6814 
6815 
6816 static void
emlxs_drv_banner(emlxs_hba_t * hba)6817 emlxs_drv_banner(emlxs_hba_t *hba)
6818 {
6819 	emlxs_port_t	*port = &PPORT;
6820 	uint32_t	i;
6821 	char		sli_mode[16];
6822 	char		msi_mode[16];
6823 	char		npiv_mode[16];
6824 	emlxs_vpd_t	*vpd = &VPD;
6825 	uint8_t		*wwpn;
6826 	uint8_t		*wwnn;
6827 	uint32_t	fw_show = 0;
6828 
6829 	/* Display firmware library one time for all driver instances */
6830 	mutex_enter(&emlxs_device.lock);
6831 	if (!(emlxs_instance_flag & EMLXS_FW_SHOW)) {
6832 		emlxs_instance_flag |= EMLXS_FW_SHOW;
6833 		fw_show = 1;
6834 	}
6835 	mutex_exit(&emlxs_device.lock);
6836 
6837 	if (fw_show) {
6838 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s",
6839 		    emlxs_copyright);
6840 		emlxs_fw_show(hba);
6841 	}
6842 
6843 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s (%s)", emlxs_label,
6844 	    emlxs_revision);
6845 
6846 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6847 	    "%s Dev_id:%x Sub_id:%x Id:%d", hba->model_info.model,
6848 	    hba->model_info.device_id, hba->model_info.ssdid,
6849 	    hba->model_info.id);
6850 
6851 #ifdef EMLXS_I386
6852 
6853 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6854 	    "Firmware:%s (%s) Boot:%s", vpd->fw_version, vpd->fw_label,
6855 	    vpd->boot_version);
6856 
6857 #else	/* EMLXS_SPARC */
6858 
6859 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6860 	    "Firmware:%s (%s) Boot:%s Fcode:%s", vpd->fw_version,
6861 	    vpd->fw_label, vpd->boot_version, vpd->fcode_version);
6862 
6863 #endif	/* EMLXS_I386 */
6864 
6865 	if (hba->sli_mode > 3) {
6866 		(void) snprintf(sli_mode, sizeof (sli_mode), "SLI:%d(%s)",
6867 		    hba->sli_mode,
6868 		    ((hba->flag & FC_FIP_SUPPORTED) ? "FIP" : "nonFIP"));
6869 	} else {
6870 		(void) snprintf(sli_mode, sizeof (sli_mode), "SLI:%d",
6871 		    hba->sli_mode);
6872 	}
6873 
6874 	(void) strlcpy(msi_mode, " INTX:1", sizeof (msi_mode));
6875 
6876 #ifdef MSI_SUPPORT
6877 	if (hba->intr_flags & EMLXS_MSI_ENABLED) {
6878 		switch (hba->intr_type) {
6879 		case DDI_INTR_TYPE_FIXED:
6880 			(void) strlcpy(msi_mode, " MSI:0", sizeof (msi_mode));
6881 			break;
6882 
6883 		case DDI_INTR_TYPE_MSI:
6884 			(void) snprintf(msi_mode, sizeof (msi_mode), " MSI:%d",
6885 			    hba->intr_count);
6886 			break;
6887 
6888 		case DDI_INTR_TYPE_MSIX:
6889 			(void) snprintf(msi_mode, sizeof (msi_mode), " MSIX:%d",
6890 			    hba->intr_count);
6891 			break;
6892 		}
6893 	}
6894 #endif /* MSI_SUPPORT */
6895 
6896 	(void) strlcpy(npiv_mode, "", sizeof (npiv_mode));
6897 
6898 	if (hba->flag & FC_NPIV_ENABLED) {
6899 		(void) snprintf(npiv_mode, sizeof (npiv_mode), " NPIV:%d",
6900 		    hba->vpi_max+1);
6901 	} else {
6902 		(void) strlcpy(npiv_mode, " NPIV:0", sizeof (npiv_mode));
6903 	}
6904 
6905 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
6906 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s%s%s%s%s%s",
6907 		    sli_mode, msi_mode, npiv_mode,
6908 		    ((port->flag & EMLXS_INI_ENABLED)? " FCA":""),
6909 		    ((port->flag & EMLXS_TGT_ENABLED)? " FCT":""),
6910 		    ((SLI4_FCOE_MODE)? " FCoE":" FC"));
6911 	} else {
6912 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s%s%s%s%s",
6913 		    sli_mode, msi_mode, npiv_mode,
6914 		    ((port->flag & EMLXS_INI_ENABLED)? " FCA":""),
6915 		    ((port->flag & EMLXS_TGT_ENABLED)? " FCT":""));
6916 	}
6917 
6918 	wwpn = (uint8_t *)&hba->wwpn;
6919 	wwnn = (uint8_t *)&hba->wwnn;
6920 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6921 	    "WWPN:%02X%02X%02X%02X%02X%02X%02X%02X "
6922 	    "WWNN:%02X%02X%02X%02X%02X%02X%02X%02X",
6923 	    wwpn[0], wwpn[1], wwpn[2], wwpn[3], wwpn[4], wwpn[5], wwpn[6],
6924 	    wwpn[7], wwnn[0], wwnn[1], wwnn[2], wwnn[3], wwnn[4], wwnn[5],
6925 	    wwnn[6], wwnn[7]);
6926 
6927 	for (i = 0; i < MAX_VPORTS; i++) {
6928 		port = &VPORT(i);
6929 
6930 		if (!(port->flag & EMLXS_PORT_CONFIG)) {
6931 			continue;
6932 		}
6933 
6934 		wwpn = (uint8_t *)&port->wwpn;
6935 		wwnn = (uint8_t *)&port->wwnn;
6936 
6937 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6938 		    "WWPN:%02X%02X%02X%02X%02X%02X%02X%02X "
6939 		    "WWNN:%02X%02X%02X%02X%02X%02X%02X%02X",
6940 		    wwpn[0], wwpn[1], wwpn[2], wwpn[3], wwpn[4], wwpn[5],
6941 		    wwpn[6], wwpn[7], wwnn[0], wwnn[1], wwnn[2], wwnn[3],
6942 		    wwnn[4], wwnn[5], wwnn[6], wwnn[7]);
6943 	}
6944 
6945 	/*
6946 	 * Announce the device: ddi_report_dev() prints a banner at boot time,
6947 	 * announcing the device pointed to by dip.
6948 	 */
6949 	(void) ddi_report_dev(hba->dip);
6950 
6951 	return;
6952 
6953 } /* emlxs_drv_banner() */
6954 
6955 
6956 extern void
emlxs_get_fcode_version(emlxs_hba_t * hba)6957 emlxs_get_fcode_version(emlxs_hba_t *hba)
6958 {
6959 	emlxs_vpd_t	*vpd = &VPD;
6960 	char		*prop_str;
6961 	int		status;
6962 
6963 	/* Setup fcode version property */
6964 	prop_str = NULL;
6965 	status =
6966 	    ddi_prop_lookup_string(DDI_DEV_T_ANY, (dev_info_t *)hba->dip, 0,
6967 	    "fcode-version", (char **)&prop_str);
6968 
6969 	if (status == DDI_PROP_SUCCESS) {
6970 		bcopy(prop_str, vpd->fcode_version, strlen(prop_str));
6971 		(void) ddi_prop_free((void *)prop_str);
6972 	} else {
6973 		(void) strncpy(vpd->fcode_version, "none",
6974 		    (sizeof (vpd->fcode_version)-1));
6975 	}
6976 
6977 	return;
6978 
6979 } /* emlxs_get_fcode_version() */
6980 
6981 
6982 static int
emlxs_hba_attach(dev_info_t * dip)6983 emlxs_hba_attach(dev_info_t *dip)
6984 {
6985 	emlxs_hba_t	*hba;
6986 	emlxs_port_t	*port;
6987 	emlxs_config_t	*cfg;
6988 	char		*prop_str;
6989 	int		ddiinst;
6990 	int32_t		emlxinst;
6991 	int		status;
6992 	uint32_t	rval;
6993 	uint32_t	init_flag = 0;
6994 	char		local_pm_components[32];
6995 	uint32_t	i;
6996 
6997 	ddiinst = ddi_get_instance(dip);
6998 	emlxinst = emlxs_add_instance(ddiinst);
6999 
7000 	if (emlxinst >= MAX_FC_BRDS) {
7001 		cmn_err(CE_WARN,
7002 		    "?%s: fca_hba_attach failed. Too many driver ddiinsts. "
7003 		    "inst=%x", DRIVER_NAME, ddiinst);
7004 		return (DDI_FAILURE);
7005 	}
7006 
7007 	if (emlxs_device.hba[emlxinst] == (emlxs_hba_t *)-1) {
7008 		return (DDI_FAILURE);
7009 	}
7010 
7011 	if (emlxs_device.hba[emlxinst]) {
7012 		return (DDI_SUCCESS);
7013 	}
7014 
7015 	/* An adapter can accidentally be plugged into a slave-only PCI slot */
7016 	if (ddi_slaveonly(dip) == DDI_SUCCESS) {
7017 		cmn_err(CE_WARN,
7018 		    "?%s%d: fca_hba_attach failed. Device in slave-only slot.",
7019 		    DRIVER_NAME, ddiinst);
7020 		return (DDI_FAILURE);
7021 	}
7022 
7023 	/* Allocate emlxs_dev_ctl structure. */
7024 	if (ddi_soft_state_zalloc(emlxs_soft_state, ddiinst) != DDI_SUCCESS) {
7025 		cmn_err(CE_WARN,
7026 		    "?%s%d: fca_hba_attach failed. Unable to allocate soft "
7027 		    "state.", DRIVER_NAME, ddiinst);
7028 		return (DDI_FAILURE);
7029 	}
7030 	init_flag |= ATTACH_SOFT_STATE;
7031 
7032 	if ((hba = (emlxs_hba_t *)ddi_get_soft_state(emlxs_soft_state,
7033 	    ddiinst)) == NULL) {
7034 		cmn_err(CE_WARN,
7035 		    "?%s%d: fca_hba_attach failed. Unable to get soft state.",
7036 		    DRIVER_NAME, ddiinst);
7037 		goto failed;
7038 	}
7039 	bzero((char *)hba, sizeof (emlxs_hba_t));
7040 
7041 	emlxs_device.hba[emlxinst] = hba;
7042 	emlxs_device.log[emlxinst] = &hba->log;
7043 
7044 #ifdef DUMP_SUPPORT
7045 	emlxs_device.dump_txtfile[emlxinst] = &hba->dump_txtfile;
7046 	emlxs_device.dump_dmpfile[emlxinst] = &hba->dump_dmpfile;
7047 	emlxs_device.dump_ceefile[emlxinst] = &hba->dump_ceefile;
7048 #endif /* DUMP_SUPPORT */
7049 
7050 	hba->dip = dip;
7051 	hba->emlxinst = emlxinst;
7052 	hba->ddiinst = ddiinst;
7053 
7054 	init_flag |= ATTACH_HBA;
7055 
7056 	/* Enable the physical port on this HBA */
7057 	port = &PPORT;
7058 	port->hba = hba;
7059 	port->vpi = 0;
7060 	port->flag |= EMLXS_PORT_ENABLED;
7061 
7062 	/* Allocate a transport structure */
7063 	hba->fca_tran =
7064 	    (fc_fca_tran_t *)kmem_zalloc(sizeof (fc_fca_tran_t), KM_NOSLEEP);
7065 	if (hba->fca_tran == NULL) {
7066 		cmn_err(CE_WARN,
7067 		    "?%s%d: fca_hba_attach failed. Unable to allocate fca_tran "
7068 		    "memory.", DRIVER_NAME, ddiinst);
7069 		goto failed;
7070 	}
7071 	bcopy((caddr_t)&emlxs_fca_tran, (caddr_t)hba->fca_tran,
7072 	    sizeof (fc_fca_tran_t));
7073 
7074 	/*
7075 	 * Copy the global ddi_dma_attr to the local hba fields
7076 	 */
7077 	bcopy((caddr_t)&emlxs_dma_attr, (caddr_t)&hba->dma_attr,
7078 	    sizeof (ddi_dma_attr_t));
7079 	bcopy((caddr_t)&emlxs_dma_attr_ro, (caddr_t)&hba->dma_attr_ro,
7080 	    sizeof (ddi_dma_attr_t));
7081 	bcopy((caddr_t)&emlxs_dma_attr_1sg, (caddr_t)&hba->dma_attr_1sg,
7082 	    sizeof (ddi_dma_attr_t));
7083 	bcopy((caddr_t)&emlxs_dma_attr_fcip_rsp,
7084 	    (caddr_t)&hba->dma_attr_fcip_rsp, sizeof (ddi_dma_attr_t));
7085 
7086 	/* Reset the fca_tran dma_attr fields to the per-hba copies */
7087 	hba->fca_tran->fca_dma_attr = &hba->dma_attr;
7088 	hba->fca_tran->fca_dma_fcp_cmd_attr = &hba->dma_attr_1sg;
7089 	hba->fca_tran->fca_dma_fcp_rsp_attr = &hba->dma_attr_1sg;
7090 	hba->fca_tran->fca_dma_fcp_data_attr = &hba->dma_attr_ro;
7091 	hba->fca_tran->fca_dma_fcip_cmd_attr = &hba->dma_attr_1sg;
7092 	hba->fca_tran->fca_dma_fcip_rsp_attr = &hba->dma_attr_fcip_rsp;
7093 	hba->fca_tran->fca_dma_fcsm_cmd_attr = &hba->dma_attr_1sg;
7094 	hba->fca_tran->fca_dma_fcsm_rsp_attr = &hba->dma_attr;
7095 
7096 	/* Set the transport structure pointer in our dip */
7097 	/* SFS may panic if we are in target only mode    */
7098 	/* We will update the transport structure later   */
7099 	(void) ddi_set_driver_private(dip, (caddr_t)&emlxs_fca_tran);
7100 	init_flag |= ATTACH_FCA_TRAN;
7101 
7102 	/* Perform driver integrity check */
7103 	rval = emlxs_integrity_check(hba);
7104 	if (rval) {
7105 		cmn_err(CE_WARN,
7106 		    "?%s%d: fca_hba_attach failed. Driver integrity check "
7107 		    "failed. %d error(s) found.", DRIVER_NAME, ddiinst, rval);
7108 		goto failed;
7109 	}
7110 
7111 	cfg = &CFG;
7112 
7113 	bcopy((uint8_t *)&emlxs_cfg, (uint8_t *)cfg, sizeof (emlxs_cfg));
7114 #ifdef MSI_SUPPORT
7115 	if ((void *)&ddi_intr_get_supported_types != NULL) {
7116 		hba->intr_flags |= EMLXS_MSI_ENABLED;
7117 	}
7118 #endif	/* MSI_SUPPORT */
7119 
7120 
7121 	/* Create the msg log file */
7122 	if (emlxs_msg_log_create(hba) == 0) {
7123 		cmn_err(CE_WARN,
7124 		    "?%s%d: fca_hba_attach failed. Unable to create message "
7125 		    "log", DRIVER_NAME, ddiinst);
7126 		goto failed;
7127 
7128 	}
7129 	init_flag |= ATTACH_LOG;
7130 
7131 	/* We can begin to use EMLXS_MSGF from this point on */
7132 
7133 	/*
7134 	 * Find the I/O bus type If it is not a SBUS card,
7135 	 * then it is a PCI card. Default is PCI_FC (0).
7136 	 */
7137 	prop_str = NULL;
7138 	status = ddi_prop_lookup_string(DDI_DEV_T_ANY,
7139 	    (dev_info_t *)dip, 0, "name", (char **)&prop_str);
7140 
7141 	if (status == DDI_PROP_SUCCESS) {
7142 		if (strncmp(prop_str, "lpfs", 4) == 0) {
7143 			hba->bus_type = SBUS_FC;
7144 		}
7145 
7146 		(void) ddi_prop_free((void *)prop_str);
7147 	}
7148 
7149 	/*
7150 	 * Copy DDS from the config method and update configuration parameters
7151 	 */
7152 	(void) emlxs_get_props(hba);
7153 
7154 #ifdef FMA_SUPPORT
7155 	hba->fm_caps = cfg[CFG_FM_CAPS].current;
7156 
7157 	emlxs_fm_init(hba);
7158 
7159 	init_flag |= ATTACH_FM;
7160 #endif	/* FMA_SUPPORT */
7161 
7162 	if (emlxs_map_bus(hba)) {
7163 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7164 		    "Unable to map memory");
7165 		goto failed;
7166 
7167 	}
7168 	init_flag |= ATTACH_MAP_BUS;
7169 
7170 	/* Attempt to identify the adapter */
7171 	rval = emlxs_init_adapter_info(hba);
7172 
7173 	if (rval == 0) {
7174 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7175 		    "Unable to get adapter info. Id:%d  Device id:0x%x "
7176 		    "Model:%s", hba->model_info.id,
7177 		    hba->model_info.device_id, hba->model_info.model);
7178 		goto failed;
7179 	}
7180 #define	FILTER_ORACLE_BRANDED
7181 #ifdef FILTER_ORACLE_BRANDED
7182 
7183 	/* Oracle branded adapters are not supported in this driver */
7184 	if (hba->model_info.flags & EMLXS_ORACLE_BRANDED) {
7185 		hba->model_info.flags |= EMLXS_NOT_SUPPORTED;
7186 	}
7187 #endif /* FILTER_ORACLE_BRANDED */
7188 
7189 	/* Check if adapter is not supported */
7190 	if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) {
7191 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7192 		    "Unsupported adapter found. Id:%d  Device id:0x%x "
7193 		    "SSDID:0x%x  Model:%s", hba->model_info.id,
7194 		    hba->model_info.device_id,
7195 		    hba->model_info.ssdid, hba->model_info.model);
7196 		goto failed;
7197 	}
7198 
7199 	if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) {
7200 		hba->sli.sli4.mem_sgl_size = MEM_SGL_SIZE;
7201 
7202 #ifdef EMLXS_I386
7203 		/*
7204 		 * TigerShark has 64K limit for SG element size
7205 		 * Do this for x86 alone. For SPARC, the driver
7206 		 * breaks up the single SGE later on.
7207 		 */
7208 		hba->dma_attr_ro.dma_attr_count_max = 0xffff;
7209 
7210 		i = cfg[CFG_MAX_XFER_SIZE].current;
7211 		/* Update SGL size based on max_xfer_size */
7212 		if (i > 516096) {
7213 			/* 516096 = (((2048 / 16) - 2) * 4096) */
7214 			hba->sli.sli4.mem_sgl_size = 4096;
7215 		} else if (i > 253952) {
7216 			/* 253952 = (((1024 / 16) - 2) * 4096) */
7217 			hba->sli.sli4.mem_sgl_size = 2048;
7218 		} else {
7219 			hba->sli.sli4.mem_sgl_size = 1024;
7220 		}
7221 #endif /* EMLXS_I386 */
7222 
7223 		i = SGL_TO_SGLLEN(hba->sli.sli4.mem_sgl_size);
7224 	} else {
7225 		hba->sli.sli3.mem_bpl_size = MEM_BPL_SIZE;
7226 
7227 #ifdef EMLXS_I386
7228 		i = cfg[CFG_MAX_XFER_SIZE].current;
7229 		/* Update BPL size based on max_xfer_size */
7230 		if (i > 688128) {
7231 			/* 688128 = (((2048 / 12) - 2) * 4096) */
7232 			hba->sli.sli3.mem_bpl_size = 4096;
7233 		} else if (i > 339968) {
7234 			/* 339968 = (((1024 / 12) - 2) * 4096) */
7235 			hba->sli.sli3.mem_bpl_size = 2048;
7236 		} else {
7237 			hba->sli.sli3.mem_bpl_size = 1024;
7238 		}
7239 #endif /* EMLXS_I386 */
7240 
7241 		i = BPL_TO_SGLLEN(hba->sli.sli3.mem_bpl_size);
7242 	}
7243 
7244 	/* Update dma_attr_sgllen based on true SGL length */
7245 	hba->dma_attr.dma_attr_sgllen = i;
7246 	hba->dma_attr_ro.dma_attr_sgllen = i;
7247 	hba->dma_attr_fcip_rsp.dma_attr_sgllen = i;
7248 
7249 	if (EMLXS_SLI_MAP_HDW(hba)) {
7250 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7251 		    "Unable to map memory");
7252 		goto failed;
7253 
7254 	}
7255 	init_flag |= ATTACH_MAP_SLI;
7256 
7257 	/* Initialize the interrupts. But don't add them yet */
7258 	status = EMLXS_INTR_INIT(hba, 0);
7259 	if (status != DDI_SUCCESS) {
7260 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7261 		    "Unable to initalize interrupt(s).");
7262 		goto failed;
7263 
7264 	}
7265 	init_flag |= ATTACH_INTR_INIT;
7266 
7267 	/* Initialize LOCKs */
7268 	emlxs_msg_lock_reinit(hba);
7269 	emlxs_lock_init(hba);
7270 	init_flag |= ATTACH_LOCK;
7271 
7272 	/* Create the event queue */
7273 	if (emlxs_event_queue_create(hba) == 0) {
7274 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7275 		    "Unable to create event queue");
7276 
7277 		goto failed;
7278 
7279 	}
7280 	init_flag |= ATTACH_EVENTS;
7281 
7282 	/* Initialize the power management */
7283 	mutex_enter(&EMLXS_PM_LOCK);
7284 	hba->pm_state = EMLXS_PM_IN_ATTACH;
7285 	hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
7286 	hba->pm_busy = 0;
7287 #ifdef IDLE_TIMER
7288 	hba->pm_active = 1;
7289 	hba->pm_idle_timer = 0;
7290 #endif	/* IDLE_TIMER */
7291 	mutex_exit(&EMLXS_PM_LOCK);
7292 
7293 	/* Set the pm component name */
7294 	(void) snprintf(local_pm_components, sizeof (local_pm_components),
7295 	    "NAME=%s%d", DRIVER_NAME, ddiinst);
7296 	emlxs_pm_components[0] = local_pm_components;
7297 
7298 	/* Check if power management support is enabled */
7299 	if (cfg[CFG_PM_SUPPORT].current) {
7300 		if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip,
7301 		    "pm-components", emlxs_pm_components,
7302 		    sizeof (emlxs_pm_components) /
7303 		    sizeof (emlxs_pm_components[0])) !=
7304 		    DDI_PROP_SUCCESS) {
7305 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7306 			    "Unable to create pm components.");
7307 			goto failed;
7308 		}
7309 	}
7310 
7311 	/* Needed for suspend and resume support */
7312 	(void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "pm-hardware-state",
7313 	    "needs-suspend-resume");
7314 	init_flag |= ATTACH_PROP;
7315 
7316 	emlxs_thread_spawn_create(hba);
7317 	init_flag |= ATTACH_SPAWN;
7318 
7319 	emlxs_thread_create(hba, &hba->iodone_thread);
7320 
7321 	init_flag |= ATTACH_THREAD;
7322 
7323 retry:
7324 	/* Setup initiator / target ports */
7325 	emlxs_mode_init(hba);
7326 
7327 	/* If driver did not attach to either stack, */
7328 	/* then driver attach fails */
7329 	if (port->mode == MODE_NONE) {
7330 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7331 		    "Driver interfaces not enabled.");
7332 		goto failed;
7333 	}
7334 
7335 	/*
7336 	 * Initialize HBA
7337 	 */
7338 
7339 	/* Set initial state */
7340 	mutex_enter(&EMLXS_PORT_LOCK);
7341 	hba->flag |= FC_OFFLINE_MODE;
7342 	hba->flag &= ~(FC_ONLINE_MODE | FC_ONLINING_MODE | FC_OFFLINING_MODE);
7343 	mutex_exit(&EMLXS_PORT_LOCK);
7344 
7345 	if (status = emlxs_online(hba)) {
7346 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7347 		    "Unable to initialize adapter.");
7348 
7349 		if (status == EAGAIN) {
7350 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7351 			    "Retrying adapter initialization ...");
7352 			goto retry;
7353 		}
7354 		goto failed;
7355 	}
7356 	init_flag |= ATTACH_ONLINE;
7357 
7358 	/* This is to ensure that the model property is properly set */
7359 	(void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "model",
7360 	    hba->model_info.model);
7361 
7362 	/* Create the device node. */
7363 	if (ddi_create_minor_node(dip, "devctl", S_IFCHR, ddiinst, NULL, 0) ==
7364 	    DDI_FAILURE) {
7365 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7366 		    "Unable to create device node.");
7367 		goto failed;
7368 	}
7369 	init_flag |= ATTACH_NODE;
7370 
7371 	/* Attach initiator now */
7372 	/* This must come after emlxs_online() */
7373 	emlxs_fca_attach(hba);
7374 	init_flag |= ATTACH_FCA;
7375 
7376 	/* Initialize kstat information */
7377 	hba->kstat = kstat_create(DRIVER_NAME,
7378 	    ddiinst, "statistics", "controller",
7379 	    KSTAT_TYPE_RAW, sizeof (emlxs_stats_t),
7380 	    KSTAT_FLAG_VIRTUAL);
7381 
7382 	if (hba->kstat == NULL) {
7383 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
7384 		    "kstat_create failed.");
7385 	} else {
7386 		hba->kstat->ks_data = (void *)&hba->stats;
7387 		kstat_install(hba->kstat);
7388 		init_flag |= ATTACH_KSTAT;
7389 	}
7390 
7391 #if (EMLXS_MODREV >= EMLXS_MODREV3) && (EMLXS_MODREV <= EMLXS_MODREV4)
7392 	/* Setup virtual port properties */
7393 	emlxs_read_vport_prop(hba);
7394 #endif	/* EMLXS_MODREV3 || EMLXS_MODREV4 */
7395 
7396 
7397 #ifdef DHCHAP_SUPPORT
7398 	emlxs_dhc_attach(hba);
7399 	init_flag |= ATTACH_DHCHAP;
7400 #endif	/* DHCHAP_SUPPORT */
7401 
7402 	/* Display the driver banner now */
7403 	emlxs_drv_banner(hba);
7404 
7405 	/* Raise the power level */
7406 
7407 	/*
7408 	 * This will not execute emlxs_hba_resume because
7409 	 * EMLXS_PM_IN_ATTACH is set
7410 	 */
7411 	if (emlxs_pm_raise_power(dip) != DDI_SUCCESS) {
7412 		/* Set power up anyway. This should not happen! */
7413 		mutex_enter(&EMLXS_PM_LOCK);
7414 		hba->pm_level = EMLXS_PM_ADAPTER_UP;
7415 		hba->pm_state &= ~EMLXS_PM_IN_ATTACH;
7416 		mutex_exit(&EMLXS_PM_LOCK);
7417 	} else {
7418 		mutex_enter(&EMLXS_PM_LOCK);
7419 		hba->pm_state &= ~EMLXS_PM_IN_ATTACH;
7420 		mutex_exit(&EMLXS_PM_LOCK);
7421 	}
7422 
7423 #ifdef SFCT_SUPPORT
7424 	if (port->flag & EMLXS_TGT_ENABLED) {
7425 		/* Do this last */
7426 		emlxs_fct_attach(hba);
7427 		init_flag |= ATTACH_FCT;
7428 	}
7429 #endif /* SFCT_SUPPORT */
7430 
7431 	return (DDI_SUCCESS);
7432 
7433 failed:
7434 
7435 	emlxs_driver_remove(dip, init_flag, 1);
7436 
7437 	return (DDI_FAILURE);
7438 
7439 } /* emlxs_hba_attach() */
7440 
7441 
7442 static int
emlxs_hba_detach(dev_info_t * dip)7443 emlxs_hba_detach(dev_info_t *dip)
7444 {
7445 	emlxs_hba_t	*hba;
7446 	emlxs_port_t	*port;
7447 	int		ddiinst;
7448 	int		count;
7449 	uint32_t	init_flag = (uint32_t)-1;
7450 
7451 	ddiinst = ddi_get_instance(dip);
7452 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
7453 	port = &PPORT;
7454 
7455 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg, NULL);
7456 
7457 	mutex_enter(&EMLXS_PM_LOCK);
7458 	hba->pm_state |= EMLXS_PM_IN_DETACH;
7459 	mutex_exit(&EMLXS_PM_LOCK);
7460 
7461 	/* Lower the power level */
7462 	/*
7463 	 * This will not suspend the driver since the
7464 	 * EMLXS_PM_IN_DETACH has been set
7465 	 */
7466 	if (emlxs_pm_lower_power(dip) != DDI_SUCCESS) {
7467 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
7468 		    "Unable to lower power.");
7469 
7470 		mutex_enter(&EMLXS_PM_LOCK);
7471 		hba->pm_state &= ~EMLXS_PM_IN_DETACH;
7472 		mutex_exit(&EMLXS_PM_LOCK);
7473 
7474 		return (DDI_FAILURE);
7475 	}
7476 
7477 	/* Take the adapter offline first, if not already */
7478 	if (emlxs_offline(hba, 1) != 0) {
7479 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
7480 		    "Unable to take adapter offline.");
7481 
7482 		mutex_enter(&EMLXS_PM_LOCK);
7483 		hba->pm_state &= ~EMLXS_PM_IN_DETACH;
7484 		mutex_exit(&EMLXS_PM_LOCK);
7485 
7486 		(void) emlxs_pm_raise_power(dip);
7487 
7488 		return (DDI_FAILURE);
7489 	}
7490 	/* Check ub buffer pools */
7491 	if (port->ub_pool) {
7492 		mutex_enter(&EMLXS_UB_LOCK);
7493 
7494 		/* Wait up to 10 seconds for all ub pools to be freed */
7495 		count = 10 * 2;
7496 		while (port->ub_pool && count) {
7497 			mutex_exit(&EMLXS_UB_LOCK);
7498 			delay(drv_usectohz(500000));	/* half second wait */
7499 			count--;
7500 			mutex_enter(&EMLXS_UB_LOCK);
7501 		}
7502 
7503 		if (port->ub_pool) {
7504 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7505 			    "fca_unbind_port: Unsolicited buffers still "
7506 			    "active. port=%p. Destroying...", port);
7507 
7508 			/* Destroy all pools */
7509 			while (port->ub_pool) {
7510 				emlxs_ub_destroy(port, port->ub_pool);
7511 			}
7512 		}
7513 
7514 		mutex_exit(&EMLXS_UB_LOCK);
7515 	}
7516 	init_flag &= ~ATTACH_ONLINE;
7517 
7518 	/* Remove the driver instance */
7519 	emlxs_driver_remove(dip, init_flag, 0);
7520 
7521 	return (DDI_SUCCESS);
7522 
7523 } /* emlxs_hba_detach() */
7524 
7525 
7526 extern int
emlxs_map_bus(emlxs_hba_t * hba)7527 emlxs_map_bus(emlxs_hba_t *hba)
7528 {
7529 	emlxs_port_t		*port = &PPORT;
7530 	dev_info_t		*dip;
7531 	ddi_device_acc_attr_t	dev_attr;
7532 	int			status;
7533 
7534 	dip = (dev_info_t *)hba->dip;
7535 	dev_attr = emlxs_dev_acc_attr;
7536 
7537 	if (hba->bus_type == SBUS_FC) {
7538 		if (hba->pci_acc_handle == 0) {
7539 			status = ddi_regs_map_setup(dip,
7540 			    SBUS_DFLY_PCI_CFG_RINDEX,
7541 			    (caddr_t *)&hba->pci_addr,
7542 			    0, 0, &emlxs_dev_acc_attr, &hba->pci_acc_handle);
7543 			if (status != DDI_SUCCESS) {
7544 				EMLXS_MSGF(EMLXS_CONTEXT,
7545 				    &emlxs_attach_failed_msg,
7546 				    "(SBUS) ddi_regs_map_setup PCI failed. "
7547 				    "status=%x", status);
7548 				goto failed;
7549 			}
7550 		}
7551 
7552 		if (hba->sbus_pci_handle == 0) {
7553 			status = ddi_regs_map_setup(dip,
7554 			    SBUS_TITAN_PCI_CFG_RINDEX,
7555 			    (caddr_t *)&hba->sbus_pci_addr,
7556 			    0, 0, &dev_attr, &hba->sbus_pci_handle);
7557 			if (status != DDI_SUCCESS) {
7558 				EMLXS_MSGF(EMLXS_CONTEXT,
7559 				    &emlxs_attach_failed_msg,
7560 				    "(SBUS) ddi_regs_map_setup TITAN PCI "
7561 				    "failed. status=%x", status);
7562 				goto failed;
7563 			}
7564 		}
7565 
7566 	} else {	/* ****** PCI ****** */
7567 
7568 		if (hba->pci_acc_handle == 0) {
7569 			status = ddi_regs_map_setup(dip,
7570 			    PCI_CFG_RINDEX,
7571 			    (caddr_t *)&hba->pci_addr,
7572 			    0, 0, &emlxs_dev_acc_attr, &hba->pci_acc_handle);
7573 			if (status != DDI_SUCCESS) {
7574 				EMLXS_MSGF(EMLXS_CONTEXT,
7575 				    &emlxs_attach_failed_msg,
7576 				    "(PCI) ddi_regs_map_setup PCI failed. "
7577 				    "status=%x", status);
7578 				goto failed;
7579 			}
7580 		}
7581 #ifdef EMLXS_I386
7582 		/* Setting up PCI configure space */
7583 		(void) ddi_put16(hba->pci_acc_handle,
7584 		    (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER),
7585 		    CMD_CFG_VALUE | CMD_IO_ENBL);
7586 
7587 #ifdef FMA_SUPPORT
7588 		if (emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
7589 		    != DDI_FM_OK) {
7590 			EMLXS_MSGF(EMLXS_CONTEXT,
7591 			    &emlxs_invalid_access_handle_msg, NULL);
7592 			goto failed;
7593 		}
7594 #endif  /* FMA_SUPPORT */
7595 
7596 #endif	/* EMLXS_I386 */
7597 
7598 	}
7599 	return (0);
7600 
7601 failed:
7602 
7603 	emlxs_unmap_bus(hba);
7604 	return (ENOMEM);
7605 
7606 } /* emlxs_map_bus() */
7607 
7608 
7609 extern void
emlxs_unmap_bus(emlxs_hba_t * hba)7610 emlxs_unmap_bus(emlxs_hba_t *hba)
7611 {
7612 	if (hba->pci_acc_handle) {
7613 		(void) ddi_regs_map_free(&hba->pci_acc_handle);
7614 		hba->pci_acc_handle = 0;
7615 	}
7616 
7617 	if (hba->sbus_pci_handle) {
7618 		(void) ddi_regs_map_free(&hba->sbus_pci_handle);
7619 		hba->sbus_pci_handle = 0;
7620 	}
7621 
7622 	return;
7623 
7624 } /* emlxs_unmap_bus() */
7625 
7626 
7627 static int
emlxs_get_props(emlxs_hba_t * hba)7628 emlxs_get_props(emlxs_hba_t *hba)
7629 {
7630 	emlxs_config_t	*cfg;
7631 	uint32_t	i;
7632 	char		string[256];
7633 	uint32_t	new_value;
7634 
7635 	/* Initialize each parameter */
7636 	for (i = 0; i < NUM_CFG_PARAM; i++) {
7637 		cfg = &hba->config[i];
7638 
7639 		/* Ensure strings are terminated */
7640 		cfg->string[(EMLXS_CFG_STR_SIZE-1)] = 0;
7641 		cfg->help[(EMLXS_CFG_HELP_SIZE-1)]  = 0;
7642 
7643 		/* Set the current value to the default value */
7644 		new_value = cfg->def;
7645 
7646 		/* First check for the global setting */
7647 		new_value = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY,
7648 		    (void *)hba->dip, DDI_PROP_DONTPASS,
7649 		    cfg->string, new_value);
7650 
7651 		/* Now check for the per adapter ddiinst setting */
7652 		(void) snprintf(string, sizeof (string), "%s%d-%s", DRIVER_NAME,
7653 		    hba->ddiinst, cfg->string);
7654 
7655 		new_value = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY,
7656 		    (void *)hba->dip, DDI_PROP_DONTPASS, string, new_value);
7657 
7658 		/* Now check the parameter */
7659 		cfg->current = emlxs_check_parm(hba, i, new_value);
7660 	}
7661 
7662 	return (0);
7663 
7664 } /* emlxs_get_props() */
7665 
7666 
7667 extern uint32_t
emlxs_check_parm(emlxs_hba_t * hba,uint32_t index,uint32_t new_value)7668 emlxs_check_parm(emlxs_hba_t *hba, uint32_t index, uint32_t new_value)
7669 {
7670 	emlxs_port_t	*port = &PPORT;
7671 	uint32_t	i;
7672 	emlxs_config_t	*cfg;
7673 	emlxs_vpd_t	*vpd = &VPD;
7674 
7675 	if (index >= NUM_CFG_PARAM) {
7676 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7677 		    "check_parm failed. Invalid index = %d", index);
7678 
7679 		return (new_value);
7680 	}
7681 
7682 	cfg = &hba->config[index];
7683 
7684 	if (new_value > cfg->hi) {
7685 		new_value = cfg->def;
7686 	} else if (new_value < cfg->low) {
7687 		new_value = cfg->def;
7688 	}
7689 
7690 	/* Perform additional checks */
7691 	switch (index) {
7692 #ifdef SFCT_SUPPORT
7693 	case CFG_NPIV_ENABLE:
7694 		if (hba->config[CFG_TARGET_MODE].current &&
7695 		    hba->config[CFG_DTM_ENABLE].current == 0) {
7696 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
7697 			    "enable-npiv: Not supported in pure target mode. "
7698 			    "Disabling.");
7699 
7700 			new_value = 0;
7701 		}
7702 		break;
7703 #endif /* SFCT_SUPPORT */
7704 
7705 
7706 	case CFG_NUM_NODES:
7707 		switch (new_value) {
7708 		case 1:
7709 		case 2:
7710 			/* Must have at least 3 if not 0 */
7711 			return (3);
7712 
7713 		default:
7714 			break;
7715 		}
7716 		break;
7717 
7718 	case CFG_FW_CHECK:
7719 		/* The 0x2 bit implies the 0x1 bit will also be set */
7720 		if (new_value & 0x2) {
7721 			new_value |= 0x1;
7722 		}
7723 
7724 		/* The 0x4 bit should not be set if 0x1 or 0x2 is not set */
7725 		if (!(new_value & 0x3) && (new_value & 0x4)) {
7726 			new_value &= ~0x4;
7727 		}
7728 		break;
7729 
7730 	case CFG_LINK_SPEED:
7731 		if ((new_value > 8) &&
7732 		    (hba->config[CFG_TOPOLOGY].current == 4)) {
7733 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
7734 			    "link-speed: %dGb not supported in loop topology. "
7735 			    "Switching to auto detect.",
7736 			    new_value);
7737 
7738 			new_value = 0;
7739 			break;
7740 		}
7741 
7742 		if (vpd->link_speed) {
7743 			switch (new_value) {
7744 			case 0:
7745 				break;
7746 
7747 			case 1:
7748 				if (!(vpd->link_speed & LMT_1GB_CAPABLE)) {
7749 					new_value = 0;
7750 
7751 					EMLXS_MSGF(EMLXS_CONTEXT,
7752 					    &emlxs_init_msg,
7753 					    "link-speed: 1Gb not supported "
7754 					    "by adapter. Switching to auto "
7755 					    "detect.");
7756 				}
7757 				break;
7758 
7759 			case 2:
7760 				if (!(vpd->link_speed & LMT_2GB_CAPABLE)) {
7761 					new_value = 0;
7762 
7763 					EMLXS_MSGF(EMLXS_CONTEXT,
7764 					    &emlxs_init_msg,
7765 					    "link-speed: 2Gb not supported "
7766 					    "by adapter. Switching to auto "
7767 					    "detect.");
7768 				}
7769 				break;
7770 
7771 			case 4:
7772 				if (!(vpd->link_speed & LMT_4GB_CAPABLE)) {
7773 					new_value = 0;
7774 
7775 					EMLXS_MSGF(EMLXS_CONTEXT,
7776 					    &emlxs_init_msg,
7777 					    "link-speed: 4Gb not supported "
7778 					    "by adapter. Switching to auto "
7779 					    "detect.");
7780 				}
7781 				break;
7782 
7783 			case 8:
7784 				if (!(vpd->link_speed & LMT_8GB_CAPABLE)) {
7785 					new_value = 0;
7786 
7787 					EMLXS_MSGF(EMLXS_CONTEXT,
7788 					    &emlxs_init_msg,
7789 					    "link-speed: 8Gb not supported "
7790 					    "by adapter. Switching to auto "
7791 					    "detect.");
7792 				}
7793 				break;
7794 
7795 			case 16:
7796 				if (!(vpd->link_speed & LMT_16GB_CAPABLE)) {
7797 					new_value = 0;
7798 
7799 					EMLXS_MSGF(EMLXS_CONTEXT,
7800 					    &emlxs_init_msg,
7801 					    "link-speed: 16Gb not supported "
7802 					    "by adapter. Switching to auto "
7803 					    "detect.");
7804 				}
7805 				break;
7806 
7807 			default:
7808 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
7809 				    "link-speed: Invalid value=%d provided. "
7810 				    "Switching to auto detect.",
7811 				    new_value);
7812 
7813 				new_value = 0;
7814 			}
7815 		} else {	/* Perform basic validity check */
7816 
7817 			/* Perform additional check on link speed */
7818 			switch (new_value) {
7819 			case 0:
7820 			case 1:
7821 			case 2:
7822 			case 4:
7823 			case 8:
7824 			case 16:
7825 				/* link-speed is a valid choice */
7826 				break;
7827 
7828 			default:
7829 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
7830 				    "link-speed: Invalid value=%d provided. "
7831 				    "Switching to auto detect.",
7832 				    new_value);
7833 
7834 				new_value = 0;
7835 			}
7836 		}
7837 		break;
7838 
7839 	case CFG_TOPOLOGY:
7840 		if ((new_value == 4) &&
7841 		    (hba->config[CFG_LINK_SPEED].current > 8)) {
7842 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
7843 			    "topology: Loop topology not supported "
7844 			    "with link speeds greater than 8Gb. "
7845 			    "Switching to auto detect.");
7846 
7847 			new_value = 0;
7848 			break;
7849 		}
7850 
7851 		/* Perform additional check on topology */
7852 		switch (new_value) {
7853 		case 0:
7854 		case 2:
7855 		case 4:
7856 		case 6:
7857 			/* topology is a valid choice */
7858 			break;
7859 
7860 		default:
7861 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
7862 			    "topology: Invalid value=%d provided. "
7863 			    "Switching to auto detect.",
7864 			    new_value);
7865 
7866 			new_value = 0;
7867 			break;
7868 		}
7869 		break;
7870 
7871 #ifdef DHCHAP_SUPPORT
7872 	case CFG_AUTH_TYPE:
7873 	{
7874 		uint32_t shift;
7875 		uint32_t mask;
7876 
7877 		/* Perform additional check on auth type */
7878 		shift = 12;
7879 		mask  = 0xF000;
7880 		for (i = 0; i < 4; i++) {
7881 			if (((new_value & mask) >> shift) > DFC_AUTH_TYPE_MAX) {
7882 				return (cfg->def);
7883 			}
7884 
7885 			shift -= 4;
7886 			mask >>= 4;
7887 		}
7888 		break;
7889 	}
7890 
7891 	case CFG_AUTH_HASH:
7892 	{
7893 		uint32_t shift;
7894 		uint32_t mask;
7895 
7896 		/* Perform additional check on auth hash */
7897 		shift = 12;
7898 		mask  = 0xF000;
7899 		for (i = 0; i < 4; i++) {
7900 			if (((new_value & mask) >> shift) > DFC_AUTH_HASH_MAX) {
7901 				return (cfg->def);
7902 			}
7903 
7904 			shift -= 4;
7905 			mask >>= 4;
7906 		}
7907 		break;
7908 	}
7909 
7910 	case CFG_AUTH_GROUP:
7911 	{
7912 		uint32_t shift;
7913 		uint32_t mask;
7914 
7915 		/* Perform additional check on auth group */
7916 		shift = 28;
7917 		mask  = 0xF0000000;
7918 		for (i = 0; i < 8; i++) {
7919 			if (((new_value & mask) >> shift) >
7920 			    DFC_AUTH_GROUP_MAX) {
7921 				return (cfg->def);
7922 			}
7923 
7924 			shift -= 4;
7925 			mask >>= 4;
7926 		}
7927 		break;
7928 	}
7929 
7930 	case CFG_AUTH_INTERVAL:
7931 		if (new_value < 10) {
7932 			return (10);
7933 		}
7934 		break;
7935 
7936 
7937 #endif /* DHCHAP_SUPPORT */
7938 
7939 	} /* switch */
7940 
7941 	return (new_value);
7942 
7943 } /* emlxs_check_parm() */
7944 
7945 
7946 extern uint32_t
emlxs_set_parm(emlxs_hba_t * hba,uint32_t index,uint32_t new_value)7947 emlxs_set_parm(emlxs_hba_t *hba, uint32_t index, uint32_t new_value)
7948 {
7949 	emlxs_port_t	*port = &PPORT;
7950 	emlxs_port_t	*vport;
7951 	uint32_t	vpi;
7952 	emlxs_config_t	*cfg;
7953 	uint32_t	old_value;
7954 
7955 	if (index >= NUM_CFG_PARAM) {
7956 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7957 		    "set_parm failed. Invalid index = %d", index);
7958 
7959 		return ((uint32_t)FC_FAILURE);
7960 	}
7961 
7962 	cfg = &hba->config[index];
7963 
7964 	if (!(cfg->flags & PARM_DYNAMIC)) {
7965 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7966 		    "set_parm failed. %s is not dynamic.", cfg->string);
7967 
7968 		return ((uint32_t)FC_FAILURE);
7969 	}
7970 
7971 	/* Check new value */
7972 	old_value = new_value;
7973 	new_value = emlxs_check_parm(hba, index, new_value);
7974 
7975 	if (old_value != new_value) {
7976 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7977 		    "set_parm: %s invalid. 0x%x --> 0x%x",
7978 		    cfg->string, old_value, new_value);
7979 	}
7980 
7981 	/* Return now if no actual change */
7982 	if (new_value == cfg->current) {
7983 		return (FC_SUCCESS);
7984 	}
7985 
7986 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7987 	    "set_parm: %s changing. 0x%x --> 0x%x",
7988 	    cfg->string, cfg->current, new_value);
7989 
7990 	old_value = cfg->current;
7991 	cfg->current = new_value;
7992 
7993 	/* React to change if needed */
7994 	switch (index) {
7995 
7996 	case CFG_PCI_MAX_READ:
7997 		/* Update MXR */
7998 		emlxs_pcix_mxr_update(hba, 1);
7999 		break;
8000 
8001 #ifdef SFCT_SUPPORT
8002 	case CFG_TARGET_MODE:
8003 		(void) emlxs_reset(port, FC_FCA_LINK_RESET);
8004 		break;
8005 #endif /* SFCT_SUPPORT */
8006 
8007 	case CFG_SLI_MODE:
8008 		/* Check SLI mode */
8009 		if ((hba->sli_mode == 3) && (new_value == 2)) {
8010 			/* All vports must be disabled first */
8011 			for (vpi = 1; vpi < MAX_VPORTS; vpi++) {
8012 				vport = &VPORT(vpi);
8013 
8014 				if (vport->flag & EMLXS_PORT_ENABLED) {
8015 					/* Reset current value */
8016 					cfg->current = old_value;
8017 
8018 					EMLXS_MSGF(EMLXS_CONTEXT,
8019 					    &emlxs_sfs_debug_msg,
8020 					    "set_parm failed. %s: vpi=%d "
8021 					    "still enabled. Value restored to "
8022 					    "0x%x.", cfg->string, vpi,
8023 					    old_value);
8024 
8025 					return (2);
8026 				}
8027 			}
8028 		}
8029 
8030 		if ((hba->sli_mode >= 4) && (new_value < 4)) {
8031 			/*
8032 			 * Not allow to set to SLI 2 or 3 if HBA supports SLI4
8033 			 */
8034 			cfg->current = old_value;
8035 			return ((uint32_t)FC_FAILURE);
8036 		}
8037 
8038 		break;
8039 
8040 	case CFG_NPIV_ENABLE:
8041 		/* Check if NPIV is being disabled */
8042 		if ((old_value == 1) && (new_value == 0)) {
8043 			/* All vports must be disabled first */
8044 			for (vpi = 1; vpi < MAX_VPORTS; vpi++) {
8045 				vport = &VPORT(vpi);
8046 
8047 				if (vport->flag & EMLXS_PORT_ENABLED) {
8048 					/* Reset current value */
8049 					cfg->current = old_value;
8050 
8051 					EMLXS_MSGF(EMLXS_CONTEXT,
8052 					    &emlxs_sfs_debug_msg,
8053 					    "set_parm failed. %s: vpi=%d "
8054 					    "still enabled. Value restored to "
8055 					    "0x%x.", cfg->string, vpi,
8056 					    old_value);
8057 
8058 					return (2);
8059 				}
8060 			}
8061 		}
8062 
8063 		/* Trigger adapter reset */
8064 		/* (void) emlxs_reset(port, FC_FCA_RESET); */
8065 
8066 		break;
8067 
8068 
8069 	case CFG_VPORT_RESTRICTED:
8070 		for (vpi = 0; vpi < MAX_VPORTS; vpi++) {
8071 			vport = &VPORT(vpi);
8072 
8073 			if (!(vport->flag & EMLXS_PORT_CONFIG)) {
8074 				continue;
8075 			}
8076 
8077 			if (vport->options & EMLXS_OPT_RESTRICT_MASK) {
8078 				continue;
8079 			}
8080 
8081 			if (new_value) {
8082 				vport->flag |= EMLXS_PORT_RESTRICTED;
8083 			} else {
8084 				vport->flag &= ~EMLXS_PORT_RESTRICTED;
8085 			}
8086 		}
8087 
8088 		break;
8089 
8090 #ifdef DHCHAP_SUPPORT
8091 	case CFG_AUTH_ENABLE:
8092 		(void) emlxs_reset(port, FC_FCA_LINK_RESET);
8093 		break;
8094 
8095 	case CFG_AUTH_TMO:
8096 		hba->auth_cfg.authentication_timeout = cfg->current;
8097 		break;
8098 
8099 	case CFG_AUTH_MODE:
8100 		hba->auth_cfg.authentication_mode = cfg->current;
8101 		break;
8102 
8103 	case CFG_AUTH_BIDIR:
8104 		hba->auth_cfg.bidirectional = cfg->current;
8105 		break;
8106 
8107 	case CFG_AUTH_TYPE:
8108 		hba->auth_cfg.authentication_type_priority[0] =
8109 		    (cfg->current & 0xF000) >> 12;
8110 		hba->auth_cfg.authentication_type_priority[1] =
8111 		    (cfg->current & 0x0F00) >> 8;
8112 		hba->auth_cfg.authentication_type_priority[2] =
8113 		    (cfg->current & 0x00F0) >> 4;
8114 		hba->auth_cfg.authentication_type_priority[3] =
8115 		    (cfg->current & 0x000F);
8116 		break;
8117 
8118 	case CFG_AUTH_HASH:
8119 		hba->auth_cfg.hash_priority[0] =
8120 		    (cfg->current & 0xF000) >> 12;
8121 		hba->auth_cfg.hash_priority[1] = (cfg->current & 0x0F00)>>8;
8122 		hba->auth_cfg.hash_priority[2] = (cfg->current & 0x00F0)>>4;
8123 		hba->auth_cfg.hash_priority[3] = (cfg->current & 0x000F);
8124 		break;
8125 
8126 	case CFG_AUTH_GROUP:
8127 		hba->auth_cfg.dh_group_priority[0] =
8128 		    (cfg->current & 0xF0000000) >> 28;
8129 		hba->auth_cfg.dh_group_priority[1] =
8130 		    (cfg->current & 0x0F000000) >> 24;
8131 		hba->auth_cfg.dh_group_priority[2] =
8132 		    (cfg->current & 0x00F00000) >> 20;
8133 		hba->auth_cfg.dh_group_priority[3] =
8134 		    (cfg->current & 0x000F0000) >> 16;
8135 		hba->auth_cfg.dh_group_priority[4] =
8136 		    (cfg->current & 0x0000F000) >> 12;
8137 		hba->auth_cfg.dh_group_priority[5] =
8138 		    (cfg->current & 0x00000F00) >> 8;
8139 		hba->auth_cfg.dh_group_priority[6] =
8140 		    (cfg->current & 0x000000F0) >> 4;
8141 		hba->auth_cfg.dh_group_priority[7] =
8142 		    (cfg->current & 0x0000000F);
8143 		break;
8144 
8145 	case CFG_AUTH_INTERVAL:
8146 		hba->auth_cfg.reauthenticate_time_interval = cfg->current;
8147 		break;
8148 #endif /* DHCHAP_SUPPORT */
8149 
8150 	}
8151 
8152 	return (FC_SUCCESS);
8153 
8154 } /* emlxs_set_parm() */
8155 
8156 
8157 /*
8158  * emlxs_mem_alloc  OS specific routine for memory allocation / mapping
8159  *
8160  * The buf_info->flags field describes the memory operation requested.
8161  *
8162  * FC_MBUF_PHYSONLY set  requests a supplied virtual address be mapped for DMA
8163  * Virtual address is supplied in buf_info->virt
8164  * DMA mapping flag is in buf_info->align
8165  * (DMA_READ_ONLY, DMA_WRITE_ONLY, DMA_READ_WRITE)
8166  * The mapped physical address is returned buf_info->phys
8167  *
8168  * FC_MBUF_PHYSONLY cleared requests memory be allocated for driver use and
8169  * if FC_MBUF_DMA is set the memory is also mapped for DMA
8170  * The byte alignment of the memory request is supplied in buf_info->align
8171  * The byte size of the memory request is supplied in buf_info->size
8172  * The virtual address is returned buf_info->virt
8173  * The mapped physical address is returned buf_info->phys (for FC_MBUF_DMA)
8174  */
8175 extern uint8_t *
emlxs_mem_alloc(emlxs_hba_t * hba,MBUF_INFO * buf_info)8176 emlxs_mem_alloc(emlxs_hba_t *hba, MBUF_INFO *buf_info)
8177 {
8178 	emlxs_port_t		*port = &PPORT;
8179 	ddi_dma_attr_t		dma_attr;
8180 	ddi_device_acc_attr_t	dev_attr;
8181 	uint_t			cookie_count;
8182 	size_t			dma_reallen;
8183 	ddi_dma_cookie_t	dma_cookie;
8184 	uint_t			dma_flag;
8185 	int			status;
8186 
8187 	dma_attr = hba->dma_attr_1sg;
8188 	dev_attr = emlxs_data_acc_attr;
8189 
8190 	if (buf_info->flags & FC_MBUF_SNGLSG) {
8191 		dma_attr.dma_attr_sgllen = 1;
8192 	}
8193 
8194 	if (buf_info->flags & FC_MBUF_DMA32) {
8195 		dma_attr.dma_attr_addr_hi = (uint64_t)0xffffffff;
8196 	}
8197 
8198 	if (buf_info->flags & FC_MBUF_PHYSONLY) {
8199 
8200 		if (buf_info->virt == NULL) {
8201 			goto done;
8202 		}
8203 
8204 		/*
8205 		 * Allocate the DMA handle for this DMA object
8206 		 */
8207 		status = ddi_dma_alloc_handle((void *)hba->dip,
8208 		    &dma_attr, DDI_DMA_DONTWAIT,
8209 		    NULL, (ddi_dma_handle_t *)&buf_info->dma_handle);
8210 		if (status != DDI_SUCCESS) {
8211 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
8212 			    "ddi_dma_alloc_handle failed: size=%x align=%x "
8213 			    "flags=%x", buf_info->size, buf_info->align,
8214 			    buf_info->flags);
8215 
8216 			buf_info->phys = 0;
8217 			buf_info->dma_handle = 0;
8218 			goto done;
8219 		}
8220 
8221 		switch (buf_info->align) {
8222 		case DMA_READ_WRITE:
8223 			dma_flag = (DDI_DMA_RDWR | DDI_DMA_CONSISTENT);
8224 			break;
8225 		case DMA_READ_ONLY:
8226 			dma_flag = (DDI_DMA_READ | DDI_DMA_CONSISTENT);
8227 			break;
8228 		case DMA_WRITE_ONLY:
8229 			dma_flag = (DDI_DMA_WRITE | DDI_DMA_CONSISTENT);
8230 			break;
8231 		default:
8232 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
8233 			    "Invalid DMA flag");
8234 			(void) ddi_dma_free_handle(
8235 			    (ddi_dma_handle_t *)&buf_info->dma_handle);
8236 			buf_info->phys = 0;
8237 			buf_info->dma_handle = 0;
8238 			return ((uint8_t *)buf_info->virt);
8239 		}
8240 
8241 		/* Map this page of memory */
8242 		status = ddi_dma_addr_bind_handle(
8243 		    (ddi_dma_handle_t)buf_info->dma_handle, NULL,
8244 		    (caddr_t)buf_info->virt, (size_t)buf_info->size,
8245 		    dma_flag, DDI_DMA_DONTWAIT, NULL, &dma_cookie,
8246 		    &cookie_count);
8247 
8248 		if (status != DDI_DMA_MAPPED || (cookie_count > 1)) {
8249 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
8250 			    "ddi_dma_addr_bind_handle failed: status=%x "
8251 			    "count=%x flags=%x", status, cookie_count,
8252 			    buf_info->flags);
8253 
8254 			(void) ddi_dma_free_handle(
8255 			    (ddi_dma_handle_t *)&buf_info->dma_handle);
8256 			buf_info->phys = 0;
8257 			buf_info->dma_handle = 0;
8258 			goto done;
8259 		}
8260 
8261 		if (hba->bus_type == SBUS_FC) {
8262 
8263 			int32_t burstsizes_limit = 0xff;
8264 			int32_t ret_burst;
8265 
8266 			ret_burst = ddi_dma_burstsizes(
8267 			    buf_info->dma_handle) & burstsizes_limit;
8268 			if (ddi_dma_set_sbus64(buf_info->dma_handle,
8269 			    ret_burst) == DDI_FAILURE) {
8270 				EMLXS_MSGF(EMLXS_CONTEXT,
8271 				    &emlxs_mem_alloc_failed_msg,
8272 				    "ddi_dma_set_sbus64 failed.");
8273 			}
8274 		}
8275 
8276 		/* Save Physical address */
8277 		buf_info->phys = dma_cookie.dmac_laddress;
8278 
8279 		/*
8280 		 * Just to be sure, let's add this
8281 		 */
8282 		EMLXS_MPDATA_SYNC((ddi_dma_handle_t)buf_info->dma_handle,
8283 		    (off_t)0, (size_t)buf_info->size, DDI_DMA_SYNC_FORDEV);
8284 
8285 	} else if (buf_info->flags & (FC_MBUF_DMA|FC_MBUF_DMA32)) {
8286 
8287 		dma_attr.dma_attr_align = buf_info->align;
8288 
8289 		/*
8290 		 * Allocate the DMA handle for this DMA object
8291 		 */
8292 		status = ddi_dma_alloc_handle((void *)hba->dip, &dma_attr,
8293 		    DDI_DMA_DONTWAIT, NULL,
8294 		    (ddi_dma_handle_t *)&buf_info->dma_handle);
8295 		if (status != DDI_SUCCESS) {
8296 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
8297 			    "ddi_dma_alloc_handle failed: size=%x align=%x "
8298 			    "flags=%x", buf_info->size, buf_info->align,
8299 			    buf_info->flags);
8300 
8301 			buf_info->virt = NULL;
8302 			buf_info->phys = 0;
8303 			buf_info->data_handle = 0;
8304 			buf_info->dma_handle = 0;
8305 			goto done;
8306 		}
8307 
8308 		status = ddi_dma_mem_alloc(
8309 		    (ddi_dma_handle_t)buf_info->dma_handle,
8310 		    (size_t)buf_info->size, &dev_attr, DDI_DMA_CONSISTENT,
8311 		    DDI_DMA_DONTWAIT, NULL, (caddr_t *)&buf_info->virt,
8312 		    &dma_reallen, (ddi_acc_handle_t *)&buf_info->data_handle);
8313 
8314 		if ((status != DDI_SUCCESS) || (buf_info->size > dma_reallen)) {
8315 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
8316 			    "ddi_dma_mem_alloc failed: size=%x align=%x "
8317 			    "flags=%x", buf_info->size, buf_info->align,
8318 			    buf_info->flags);
8319 
8320 			(void) ddi_dma_free_handle(
8321 			    (ddi_dma_handle_t *)&buf_info->dma_handle);
8322 
8323 			buf_info->virt = NULL;
8324 			buf_info->phys = 0;
8325 			buf_info->data_handle = 0;
8326 			buf_info->dma_handle = 0;
8327 			goto done;
8328 		}
8329 
8330 		/* Map this page of memory */
8331 		status = ddi_dma_addr_bind_handle(
8332 		    (ddi_dma_handle_t)buf_info->dma_handle, NULL,
8333 		    (caddr_t)buf_info->virt, (size_t)buf_info->size,
8334 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
8335 		    &dma_cookie, &cookie_count);
8336 
8337 		if (status != DDI_DMA_MAPPED || (cookie_count > 1)) {
8338 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
8339 			    "ddi_dma_addr_bind_handle failed: status=%x "
8340 			    "count=%d size=%x align=%x flags=%x", status,
8341 			    cookie_count, buf_info->size, buf_info->align,
8342 			    buf_info->flags);
8343 
8344 			(void) ddi_dma_mem_free(
8345 			    (ddi_acc_handle_t *)&buf_info->data_handle);
8346 			(void) ddi_dma_free_handle(
8347 			    (ddi_dma_handle_t *)&buf_info->dma_handle);
8348 
8349 			buf_info->virt = NULL;
8350 			buf_info->phys = 0;
8351 			buf_info->dma_handle = 0;
8352 			buf_info->data_handle = 0;
8353 			goto done;
8354 		}
8355 
8356 		if (hba->bus_type == SBUS_FC) {
8357 			int32_t burstsizes_limit = 0xff;
8358 			int32_t ret_burst;
8359 
8360 			ret_burst =
8361 			    ddi_dma_burstsizes(buf_info->
8362 			    dma_handle) & burstsizes_limit;
8363 			if (ddi_dma_set_sbus64(buf_info->dma_handle,
8364 			    ret_burst) == DDI_FAILURE) {
8365 				EMLXS_MSGF(EMLXS_CONTEXT,
8366 				    &emlxs_mem_alloc_failed_msg,
8367 				    "ddi_dma_set_sbus64 failed.");
8368 			}
8369 		}
8370 
8371 		/* Save Physical address */
8372 		buf_info->phys = dma_cookie.dmac_laddress;
8373 
8374 		/* Just to be sure, let's add this */
8375 		EMLXS_MPDATA_SYNC((ddi_dma_handle_t)buf_info->dma_handle,
8376 		    (off_t)0, (size_t)buf_info->size, DDI_DMA_SYNC_FORDEV);
8377 
8378 	} else {	/* allocate virtual memory */
8379 
8380 		buf_info->virt =
8381 		    kmem_zalloc((size_t)buf_info->size, KM_NOSLEEP);
8382 		buf_info->phys = 0;
8383 		buf_info->data_handle = 0;
8384 		buf_info->dma_handle = 0;
8385 
8386 		if (buf_info->virt == (uint32_t *)0) {
8387 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
8388 			    "size=%x flags=%x", buf_info->size,
8389 			    buf_info->flags);
8390 		}
8391 
8392 	}
8393 
8394 done:
8395 
8396 	return ((uint8_t *)buf_info->virt);
8397 
8398 } /* emlxs_mem_alloc() */
8399 
8400 
8401 
8402 /*
8403  * emlxs_mem_free:
8404  *
8405  * OS specific routine for memory de-allocation / unmapping
8406  *
8407  * The buf_info->flags field describes the memory operation requested.
8408  *
8409  * FC_MBUF_PHYSONLY set  requests a supplied virtual address be unmapped
8410  * for DMA, but not freed. The mapped physical address to be unmapped is in
8411  * buf_info->phys
8412  *
8413  * FC_MBUF_PHYSONLY cleared requests memory be freed and unmapped for DMA only
8414  * if FC_MBUF_DMA is set. The mapped physical address to be unmapped is in
8415  * buf_info->phys. The virtual address to be freed is in buf_info->virt
8416  */
8417 /*ARGSUSED*/
8418 extern void
emlxs_mem_free(emlxs_hba_t * hba,MBUF_INFO * buf_info)8419 emlxs_mem_free(emlxs_hba_t *hba, MBUF_INFO *buf_info)
8420 {
8421 	if (buf_info->flags & FC_MBUF_PHYSONLY) {
8422 
8423 		if (buf_info->dma_handle) {
8424 			(void) ddi_dma_unbind_handle(buf_info->dma_handle);
8425 			(void) ddi_dma_free_handle(
8426 			    (ddi_dma_handle_t *)&buf_info->dma_handle);
8427 			buf_info->dma_handle = NULL;
8428 		}
8429 
8430 	} else if (buf_info->flags & (FC_MBUF_DMA|FC_MBUF_DMA32)) {
8431 
8432 		if (buf_info->dma_handle) {
8433 			(void) ddi_dma_unbind_handle(buf_info->dma_handle);
8434 			(void) ddi_dma_mem_free(
8435 			    (ddi_acc_handle_t *)&buf_info->data_handle);
8436 			(void) ddi_dma_free_handle(
8437 			    (ddi_dma_handle_t *)&buf_info->dma_handle);
8438 			buf_info->dma_handle = NULL;
8439 			buf_info->data_handle = NULL;
8440 		}
8441 
8442 	} else {	/* allocate virtual memory */
8443 
8444 		if (buf_info->virt) {
8445 			kmem_free(buf_info->virt, (size_t)buf_info->size);
8446 			buf_info->virt = NULL;
8447 		}
8448 	}
8449 
8450 } /* emlxs_mem_free() */
8451 
8452 
8453 static int
emlxs_select_fcp_channel(emlxs_hba_t * hba,NODELIST * ndlp,int reset)8454 emlxs_select_fcp_channel(emlxs_hba_t *hba, NODELIST *ndlp, int reset)
8455 {
8456 	int		channel;
8457 	int		msi_id;
8458 
8459 
8460 	/* IO to FCP2 device or a device reset always use fcp channel */
8461 	if ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) || reset) {
8462 		return (hba->channel_fcp);
8463 	}
8464 
8465 
8466 	msi_id = emlxs_select_msiid(hba);
8467 	channel = emlxs_msiid_to_chan(hba, msi_id);
8468 
8469 
8470 
8471 	/* If channel is closed, then try fcp channel */
8472 	if (ndlp->nlp_flag[channel] & NLP_CLOSED) {
8473 		channel = hba->channel_fcp;
8474 	}
8475 	return (channel);
8476 
8477 } /* emlxs_select_fcp_channel() */
8478 
8479 
8480 static int32_t
emlxs_fast_target_reset(emlxs_port_t * port,emlxs_buf_t * sbp,NODELIST * ndlp)8481 emlxs_fast_target_reset(emlxs_port_t *port, emlxs_buf_t *sbp, NODELIST *ndlp)
8482 {
8483 	emlxs_hba_t	*hba = HBA;
8484 	fc_packet_t	*pkt;
8485 	emlxs_config_t	*cfg;
8486 	MAILBOXQ	*mbq;
8487 	MAILBOX		*mb;
8488 	uint32_t	rc;
8489 
8490 	/*
8491 	 * This routine provides a alternative target reset provessing
8492 	 * method. Instead of sending an actual target reset to the
8493 	 * NPort, we will first unreg the login to that NPort. This
8494 	 * will cause all the outstanding IOs the quickly complete with
8495 	 * a NO RPI local error. Next we will force the ULP to relogin
8496 	 * to the NPort by sending an RSCN (for that NPort) to the
8497 	 * upper layer. This method should result in a fast target
8498 	 * reset, as far as IOs completing; however, since an actual
8499 	 * target reset is not sent to the NPort, it is not 100%
8500 	 * compatable. Things like reservations will not be broken.
8501 	 * By default this option is DISABLED, and its only enabled thru
8502 	 * a hidden configuration parameter (fast-tgt-reset).
8503 	 */
8504 	rc = FC_TRAN_BUSY;
8505 	pkt = PRIV2PKT(sbp);
8506 	cfg = &CFG;
8507 
8508 	if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))) {
8509 		/* issue the mbox cmd to the sli */
8510 		mb = (MAILBOX *) mbq->mbox;
8511 		bzero((void *) mb, MAILBOX_CMD_BSIZE);
8512 		mb->un.varUnregLogin.rpi = (uint16_t)ndlp->nlp_Rpi;
8513 #ifdef SLI3_SUPPORT
8514 		mb->un.varUnregLogin.vpi = port->vpi;
8515 #endif	/* SLI3_SUPPORT */
8516 		mb->mbxCommand = MBX_UNREG_LOGIN;
8517 		mb->mbxOwner = OWN_HOST;
8518 
8519 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8520 		    "Fast Target Reset: unreg rpi=%d tmr=%d", ndlp->nlp_Rpi,
8521 		    cfg[CFG_FAST_TGT_RESET_TMR].current);
8522 
8523 		if (EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_WAIT, 0)
8524 		    == MBX_SUCCESS) {
8525 
8526 			ndlp->nlp_Rpi = 0;
8527 
8528 			mutex_enter(&sbp->mtx);
8529 			sbp->node = (void *)ndlp;
8530 			sbp->did = ndlp->nlp_DID;
8531 			mutex_exit(&sbp->mtx);
8532 
8533 			if (pkt->pkt_rsplen) {
8534 				bzero((uint8_t *)pkt->pkt_resp,
8535 				    pkt->pkt_rsplen);
8536 			}
8537 			if (cfg[CFG_FAST_TGT_RESET_TMR].current) {
8538 				ndlp->nlp_force_rscn = hba->timer_tics +
8539 				    cfg[CFG_FAST_TGT_RESET_TMR].current;
8540 			}
8541 
8542 			emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 0);
8543 		}
8544 
8545 		emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
8546 		rc = FC_SUCCESS;
8547 	}
8548 	return (rc);
8549 } /* emlxs_fast_target_reset() */
8550 
8551 static int32_t
emlxs_send_fcp_cmd(emlxs_port_t * port,emlxs_buf_t * sbp,uint32_t * pkt_flags)8552 emlxs_send_fcp_cmd(emlxs_port_t *port, emlxs_buf_t *sbp, uint32_t *pkt_flags)
8553 {
8554 	emlxs_hba_t	*hba = HBA;
8555 	fc_packet_t	*pkt;
8556 	emlxs_config_t	*cfg;
8557 	IOCBQ		*iocbq;
8558 	IOCB		*iocb;
8559 	CHANNEL		*cp;
8560 	NODELIST	*ndlp;
8561 	char		*cmd;
8562 	uint16_t	lun;
8563 	FCP_CMND	*fcp_cmd;
8564 	uint32_t	did;
8565 	uint32_t	reset = 0;
8566 	int		channel;
8567 	int32_t		rval;
8568 
8569 	pkt = PRIV2PKT(sbp);
8570 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
8571 
8572 	/* Find target node object */
8573 	ndlp = emlxs_node_find_did(port, did, 1);
8574 
8575 	if (!ndlp || !ndlp->nlp_active) {
8576 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
8577 		    "Node not found. did=%x", did);
8578 
8579 		return (FC_BADPACKET);
8580 	}
8581 
8582 	/* When the fcp channel is closed we stop accepting any FCP cmd */
8583 	if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) {
8584 		return (FC_TRAN_BUSY);
8585 	}
8586 
8587 	/* Snoop for target or lun reset first */
8588 	/* We always use FCP channel to send out target/lun reset fcp cmds */
8589 	/* interrupt affinity only applies to non tgt lun reset fcp cmd */
8590 
8591 	cmd = (char *)pkt->pkt_cmd;
8592 	lun = *((uint16_t *)cmd);
8593 	lun = LE_SWAP16(lun);
8594 
8595 	iocbq = &sbp->iocbq;
8596 	iocb = &iocbq->iocb;
8597 	iocbq->node = (void *) ndlp;
8598 
8599 	/* Check for target reset */
8600 	if (cmd[10] & 0x20) {
8601 		/* prepare iocb */
8602 		if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp,
8603 		    hba->channel_fcp)) != FC_SUCCESS) {
8604 
8605 			if (rval == 0xff) {
8606 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
8607 				    0, 1);
8608 				rval = FC_SUCCESS;
8609 			}
8610 
8611 			return (rval);
8612 		}
8613 
8614 		mutex_enter(&sbp->mtx);
8615 		sbp->pkt_flags |= PACKET_FCP_TGT_RESET;
8616 		sbp->pkt_flags |= PACKET_POLLED;
8617 		*pkt_flags = sbp->pkt_flags;
8618 		mutex_exit(&sbp->mtx);
8619 
8620 #ifdef SAN_DIAG_SUPPORT
8621 		emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_TGTRESET,
8622 		    (HBA_WWN *)&ndlp->nlp_portname, -1);
8623 #endif	/* SAN_DIAG_SUPPORT */
8624 
8625 		iocbq->flag |= IOCB_PRIORITY;
8626 
8627 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8628 		    "Target Reset: did=%x", did);
8629 
8630 		cfg = &CFG;
8631 		if (cfg[CFG_FAST_TGT_RESET].current) {
8632 			if (emlxs_fast_target_reset(port, sbp, ndlp) ==
8633 			    FC_SUCCESS) {
8634 				return (FC_SUCCESS);
8635 			}
8636 		}
8637 
8638 		/* Close the node for any further normal IO */
8639 		emlxs_node_close(port, ndlp, hba->channel_fcp,
8640 		    pkt->pkt_timeout);
8641 
8642 		/* Flush the IO's on the tx queues */
8643 		(void) emlxs_tx_node_flush(port, ndlp,
8644 		    &hba->chan[hba->channel_fcp], 0, sbp);
8645 
8646 		/* This is the target reset fcp cmd */
8647 		reset = 1;
8648 	}
8649 
8650 	/* Check for lun reset */
8651 	else if (cmd[10] & 0x10) {
8652 		/* prepare iocb */
8653 		if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp,
8654 		    hba->channel_fcp)) != FC_SUCCESS) {
8655 
8656 			if (rval == 0xff) {
8657 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
8658 				    0, 1);
8659 				rval = FC_SUCCESS;
8660 			}
8661 
8662 			return (rval);
8663 		}
8664 
8665 		mutex_enter(&sbp->mtx);
8666 		sbp->pkt_flags |= PACKET_FCP_LUN_RESET;
8667 		sbp->pkt_flags |= PACKET_POLLED;
8668 		*pkt_flags = sbp->pkt_flags;
8669 		mutex_exit(&sbp->mtx);
8670 
8671 #ifdef SAN_DIAG_SUPPORT
8672 		emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_LUNRESET,
8673 		    (HBA_WWN *)&ndlp->nlp_portname, lun);
8674 #endif	/* SAN_DIAG_SUPPORT */
8675 
8676 		iocbq->flag |= IOCB_PRIORITY;
8677 
8678 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8679 		    "LUN Reset: did=%x lun=%02x LUN=%02x02x", did, lun,
8680 		    cmd[0], cmd[1]);
8681 
8682 		/* Flush the IO's on the tx queues for this lun */
8683 		(void) emlxs_tx_lun_flush(port, ndlp, lun, sbp);
8684 
8685 		/* This is the lun reset fcp cmd */
8686 		reset = 1;
8687 	}
8688 
8689 	channel = emlxs_select_fcp_channel(hba, ndlp, reset);
8690 
8691 #ifdef SAN_DIAG_SUPPORT
8692 	sbp->sd_start_time = gethrtime();
8693 #endif /* SAN_DIAG_SUPPORT */
8694 
8695 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8696 	emlxs_swap_fcp_pkt(sbp);
8697 #endif	/* EMLXS_MODREV2X */
8698 
8699 	fcp_cmd = (FCP_CMND *) pkt->pkt_cmd;
8700 
8701 	if (fcp_cmd->fcpCntl1 == FCP_QTYPE_UNTAGGED) {
8702 		fcp_cmd->fcpCntl1 = FCP_QTYPE_SIMPLE;
8703 	}
8704 
8705 	if (reset == 0) {
8706 		/*
8707 		 * tgt lun reset fcp cmd has been prepared
8708 		 * separately in the beginning
8709 		 */
8710 		if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp,
8711 		    channel)) != FC_SUCCESS) {
8712 
8713 			if (rval == 0xff) {
8714 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
8715 				    0, 1);
8716 				rval = FC_SUCCESS;
8717 			}
8718 
8719 			return (rval);
8720 		}
8721 	}
8722 
8723 	cp = &hba->chan[channel];
8724 	cp->ulpSendCmd++;
8725 
8726 	/* Initalize sbp */
8727 	mutex_enter(&sbp->mtx);
8728 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8729 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
8730 	sbp->node = (void *)ndlp;
8731 	sbp->lun = lun;
8732 	sbp->class = iocb->ULPCLASS;
8733 	sbp->did = ndlp->nlp_DID;
8734 	mutex_exit(&sbp->mtx);
8735 
8736 	if (pkt->pkt_cmdlen) {
8737 		EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
8738 		    DDI_DMA_SYNC_FORDEV);
8739 	}
8740 
8741 	if (pkt->pkt_datalen && pkt->pkt_tran_type == FC_PKT_FCP_WRITE) {
8742 		EMLXS_MPDATA_SYNC(pkt->pkt_data_dma, 0, pkt->pkt_datalen,
8743 		    DDI_DMA_SYNC_FORDEV);
8744 	}
8745 
8746 	HBASTATS.FcpIssued++;
8747 
8748 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
8749 	return (FC_SUCCESS);
8750 
8751 } /* emlxs_send_fcp_cmd() */
8752 
8753 
8754 
8755 
8756 /*
8757  * We have to consider this setup works for INTX, MSI, and MSIX
8758  * For INTX, intr_count is always 1
8759  * For MSI, intr_count is always 2 by default
8760  * For MSIX, intr_count is configurable (1, 2, 4, 8) for now.
8761  */
8762 extern int
emlxs_select_msiid(emlxs_hba_t * hba)8763 emlxs_select_msiid(emlxs_hba_t *hba)
8764 {
8765 	int	msiid = 0;
8766 
8767 	/* We use round-robin */
8768 	mutex_enter(&EMLXS_MSIID_LOCK);
8769 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
8770 		msiid = hba->last_msiid;
8771 		hba->last_msiid ++;
8772 		if (hba->last_msiid >= hba->intr_count) {
8773 			hba->last_msiid = 0;
8774 		}
8775 	} else {
8776 		/* This should work for INTX and MSI also */
8777 		/* For SLI3 the chan_count is always 4 */
8778 		/* For SLI3 the msiid is limited to chan_count */
8779 		msiid = hba->last_msiid;
8780 		hba->last_msiid ++;
8781 		if (hba->intr_count > hba->chan_count) {
8782 			if (hba->last_msiid >= hba->chan_count) {
8783 				hba->last_msiid = 0;
8784 			}
8785 		} else {
8786 			if (hba->last_msiid >= hba->intr_count) {
8787 				hba->last_msiid = 0;
8788 			}
8789 		}
8790 	}
8791 	mutex_exit(&EMLXS_MSIID_LOCK);
8792 
8793 	return (msiid);
8794 } /* emlxs_select_msiid */
8795 
8796 
8797 /*
8798  * A channel has a association with a msi id.
8799  * One msi id could be associated with multiple channels.
8800  */
8801 extern int
emlxs_msiid_to_chan(emlxs_hba_t * hba,int msi_id)8802 emlxs_msiid_to_chan(emlxs_hba_t *hba, int msi_id)
8803 {
8804 	emlxs_config_t *cfg = &CFG;
8805 	EQ_DESC_t *eqp;
8806 	int chan;
8807 	int num_wq;
8808 
8809 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
8810 		/* For SLI4 round robin all WQs associated with the msi_id */
8811 		eqp = &hba->sli.sli4.eq[msi_id];
8812 
8813 		mutex_enter(&eqp->lastwq_lock);
8814 		chan = eqp->lastwq;
8815 		eqp->lastwq++;
8816 		num_wq = cfg[CFG_NUM_WQ].current;
8817 		if (eqp->lastwq >= ((msi_id + 1) * num_wq)) {
8818 			eqp->lastwq -= num_wq;
8819 		}
8820 		mutex_exit(&eqp->lastwq_lock);
8821 
8822 		return (chan);
8823 	} else {
8824 		/* This is for SLI3 mode */
8825 		return (hba->msi2chan[msi_id]);
8826 	}
8827 
8828 } /* emlxs_msiid_to_chan */
8829 
8830 
8831 #ifdef SFCT_SUPPORT
8832 static int32_t
emlxs_send_fct_status(emlxs_port_t * port,emlxs_buf_t * sbp)8833 emlxs_send_fct_status(emlxs_port_t *port, emlxs_buf_t *sbp)
8834 {
8835 	emlxs_hba_t		*hba = HBA;
8836 	IOCBQ			*iocbq;
8837 	IOCB			*iocb;
8838 	NODELIST		*ndlp;
8839 	CHANNEL			*cp;
8840 	uint32_t		did;
8841 
8842 	did = sbp->did;
8843 	ndlp = sbp->node;
8844 	cp = (CHANNEL *)sbp->channel;
8845 
8846 	iocbq = &sbp->iocbq;
8847 	iocb = &iocbq->iocb;
8848 
8849 	/* Make sure node is still active */
8850 	if (!ndlp->nlp_active) {
8851 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
8852 		    "*Node not found. did=%x", did);
8853 
8854 		return (FC_BADPACKET);
8855 	}
8856 
8857 	/* If gate is closed */
8858 	if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) {
8859 		return (FC_TRAN_BUSY);
8860 	}
8861 
8862 	iocb->ULPCOMMAND = CMD_FCP_TRSP64_CX;
8863 	if (EMLXS_SLI_PREP_FCT_IOCB(port, sbp, cp->channelno) !=
8864 	    IOERR_SUCCESS) {
8865 		return (FC_TRAN_BUSY);
8866 	}
8867 
8868 	HBASTATS.FcpIssued++;
8869 
8870 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
8871 
8872 	return (FC_SUCCESS);
8873 
8874 } /* emlxs_send_fct_status() */
8875 
8876 
8877 static int32_t
emlxs_send_fct_abort(emlxs_port_t * port,emlxs_buf_t * sbp)8878 emlxs_send_fct_abort(emlxs_port_t *port, emlxs_buf_t *sbp)
8879 {
8880 	emlxs_hba_t	*hba = HBA;
8881 	IOCBQ		*iocbq;
8882 	IOCB		*iocb;
8883 	NODELIST	*ndlp;
8884 	CHANNEL		*cp;
8885 	uint32_t	did;
8886 
8887 	did = sbp->did;
8888 	ndlp = sbp->node;
8889 	cp = (CHANNEL *)sbp->channel;
8890 
8891 	iocbq = &sbp->iocbq;
8892 	iocb = &iocbq->iocb;
8893 
8894 	/* Make sure node is still active */
8895 	if ((ndlp == NULL) || (!ndlp->nlp_active)) {
8896 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
8897 		    "*Node not found. did=%x", did);
8898 
8899 		return (FC_BADPACKET);
8900 	}
8901 
8902 	/* If gate is closed */
8903 	if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) {
8904 		return (FC_TRAN_BUSY);
8905 	}
8906 
8907 	iocb->ULPCOMMAND = CMD_ABORT_XRI_CX;
8908 	if (EMLXS_SLI_PREP_FCT_IOCB(port, sbp, cp->channelno) !=
8909 	    IOERR_SUCCESS) {
8910 		return (FC_TRAN_BUSY);
8911 	}
8912 
8913 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, sbp->channel, iocbq);
8914 
8915 	return (FC_SUCCESS);
8916 
8917 } /* emlxs_send_fct_abort() */
8918 
8919 #endif /* SFCT_SUPPORT */
8920 
8921 
8922 static int32_t
emlxs_send_ip(emlxs_port_t * port,emlxs_buf_t * sbp)8923 emlxs_send_ip(emlxs_port_t *port, emlxs_buf_t *sbp)
8924 {
8925 	emlxs_hba_t	*hba = HBA;
8926 	fc_packet_t	*pkt;
8927 	IOCBQ		*iocbq;
8928 	IOCB		*iocb;
8929 	CHANNEL		*cp;
8930 	uint32_t	i;
8931 	NODELIST	*ndlp;
8932 	uint32_t	did;
8933 	int32_t 	rval;
8934 
8935 	pkt = PRIV2PKT(sbp);
8936 	cp = &hba->chan[hba->channel_ip];
8937 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
8938 
8939 	/* Check if node exists */
8940 	/* Broadcast did is always a success */
8941 	ndlp = emlxs_node_find_did(port, did, 1);
8942 
8943 	if (!ndlp || !ndlp->nlp_active) {
8944 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
8945 		    "Node not found. did=0x%x", did);
8946 
8947 		return (FC_BADPACKET);
8948 	}
8949 
8950 	/* Check if gate is temporarily closed */
8951 	if (ndlp->nlp_flag[hba->channel_ip] & NLP_CLOSED) {
8952 		return (FC_TRAN_BUSY);
8953 	}
8954 
8955 	/* Check if an exchange has been created */
8956 	if ((ndlp->nlp_Xri == 0) && (did != BCAST_DID)) {
8957 		/* No exchange.  Try creating one */
8958 		(void) emlxs_create_xri(port, cp, ndlp);
8959 
8960 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8961 		    "Adapter Busy. Exchange not found. did=0x%x", did);
8962 
8963 		return (FC_TRAN_BUSY);
8964 	}
8965 
8966 	/* ULP PATCH: pkt_cmdlen was found to be set to zero */
8967 	/* on BROADCAST commands */
8968 	if (pkt->pkt_cmdlen == 0) {
8969 		/* Set the pkt_cmdlen to the cookie size */
8970 #if (EMLXS_MODREV >= EMLXS_MODREV3)
8971 		for (i = 0; i < pkt->pkt_cmd_cookie_cnt; i++) {
8972 			pkt->pkt_cmdlen += pkt->pkt_cmd_cookie[i].dmac_size;
8973 		}
8974 #else
8975 		pkt->pkt_cmdlen = pkt->pkt_cmd_cookie.dmac_size;
8976 #endif	/* >= EMLXS_MODREV3 */
8977 
8978 	}
8979 
8980 	iocbq = &sbp->iocbq;
8981 	iocb = &iocbq->iocb;
8982 
8983 	iocbq->node = (void *)ndlp;
8984 	if ((rval = EMLXS_SLI_PREP_IP_IOCB(port, sbp)) != FC_SUCCESS) {
8985 
8986 		if (rval == 0xff) {
8987 			emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
8988 			rval = FC_SUCCESS;
8989 		}
8990 
8991 		return (rval);
8992 	}
8993 
8994 	cp->ulpSendCmd++;
8995 
8996 	/* Initalize sbp */
8997 	mutex_enter(&sbp->mtx);
8998 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8999 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9000 	sbp->node = (void *)ndlp;
9001 	sbp->lun = EMLXS_LUN_NONE;
9002 	sbp->class = iocb->ULPCLASS;
9003 	sbp->did = did;
9004 	mutex_exit(&sbp->mtx);
9005 
9006 	if (pkt->pkt_cmdlen) {
9007 		EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
9008 		    DDI_DMA_SYNC_FORDEV);
9009 	}
9010 
9011 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
9012 
9013 	return (FC_SUCCESS);
9014 
9015 } /* emlxs_send_ip() */
9016 
9017 
9018 static int32_t
emlxs_send_els(emlxs_port_t * port,emlxs_buf_t * sbp)9019 emlxs_send_els(emlxs_port_t *port, emlxs_buf_t *sbp)
9020 {
9021 	emlxs_hba_t	*hba = HBA;
9022 	emlxs_port_t	*vport;
9023 	fc_packet_t	*pkt;
9024 	IOCBQ		*iocbq;
9025 	CHANNEL		*cp;
9026 	SERV_PARM	*sp;
9027 	uint32_t	cmd;
9028 	int		i;
9029 	ELS_PKT		*els_pkt;
9030 	NODELIST	*ndlp;
9031 	uint32_t	did;
9032 	char		fcsp_msg[32];
9033 	int		rc;
9034 	int32_t 	rval;
9035 	emlxs_config_t  *cfg = &CFG;
9036 
9037 	fcsp_msg[0] = 0;
9038 	pkt = PRIV2PKT(sbp);
9039 	els_pkt = (ELS_PKT *)pkt->pkt_cmd;
9040 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
9041 
9042 	iocbq = &sbp->iocbq;
9043 
9044 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9045 	emlxs_swap_els_pkt(sbp);
9046 #endif	/* EMLXS_MODREV2X */
9047 
9048 	cmd = *((uint32_t *)pkt->pkt_cmd);
9049 	cmd &= ELS_CMD_MASK;
9050 
9051 	/* Point of no return, except for ADISC & PLOGI */
9052 
9053 	/* Check node */
9054 	switch (cmd) {
9055 	case ELS_CMD_FLOGI:
9056 	case ELS_CMD_FDISC:
9057 		if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
9058 
9059 			if (emlxs_vpi_logi_notify(port, sbp)) {
9060 				pkt->pkt_state = FC_PKT_LOCAL_RJT;
9061 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9062 				emlxs_unswap_pkt(sbp);
9063 #endif  /* EMLXS_MODREV2X */
9064 				return (FC_FAILURE);
9065 			}
9066 		} else {
9067 			/*
9068 			 * If FLOGI is already complete, then we
9069 			 * should not be receiving another FLOGI.
9070 			 * Reset the link to recover.
9071 			 */
9072 			if (port->flag & EMLXS_PORT_FLOGI_CMPL) {
9073 				pkt->pkt_state = FC_PKT_LOCAL_RJT;
9074 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9075 				emlxs_unswap_pkt(sbp);
9076 #endif  /* EMLXS_MODREV2X */
9077 
9078 				(void) emlxs_reset(port, FC_FCA_LINK_RESET);
9079 				return (FC_FAILURE);
9080 			}
9081 
9082 			if (port->vpi > 0) {
9083 				*((uint32_t *)pkt->pkt_cmd) = ELS_CMD_FDISC;
9084 			}
9085 		}
9086 
9087 		/* Command may have been changed */
9088 		cmd = *((uint32_t *)pkt->pkt_cmd);
9089 		cmd &= ELS_CMD_MASK;
9090 
9091 		if (hba->flag & FC_NPIV_DELAY_REQUIRED) {
9092 			sbp->pkt_flags |= PACKET_DELAY_REQUIRED;
9093 		}
9094 
9095 		ndlp = NULL;
9096 
9097 		/* We will process these cmds at the bottom of this routine */
9098 		break;
9099 
9100 	case ELS_CMD_PLOGI:
9101 		/* Make sure we don't log into ourself */
9102 		for (i = 0; i < MAX_VPORTS; i++) {
9103 			vport = &VPORT(i);
9104 
9105 			if (!(vport->flag & EMLXS_INI_BOUND)) {
9106 				continue;
9107 			}
9108 
9109 			if (did == vport->did) {
9110 				pkt->pkt_state = FC_PKT_NPORT_RJT;
9111 
9112 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9113 				emlxs_unswap_pkt(sbp);
9114 #endif	/* EMLXS_MODREV2X */
9115 
9116 				return (FC_FAILURE);
9117 			}
9118 		}
9119 
9120 		ndlp = NULL;
9121 
9122 		if (hba->flag & FC_PT_TO_PT) {
9123 			MAILBOXQ	*mbox;
9124 
9125 			/* ULP bug fix */
9126 			if (pkt->pkt_cmd_fhdr.s_id == 0) {
9127 				pkt->pkt_cmd_fhdr.s_id = FP_DEFAULT_SID;
9128 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_send_msg,
9129 				    "PLOGI: P2P Fix. sid=0-->%x did=%x",
9130 				    pkt->pkt_cmd_fhdr.s_id,
9131 				    pkt->pkt_cmd_fhdr.d_id);
9132 			}
9133 
9134 			mutex_enter(&EMLXS_PORT_LOCK);
9135 			port->did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.s_id);
9136 			port->rdid = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
9137 			mutex_exit(&EMLXS_PORT_LOCK);
9138 
9139 			if (hba->sli_mode <= EMLXS_HBA_SLI3_MODE) {
9140 				/* Update our service parms */
9141 				if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba,
9142 				    MEM_MBOX))) {
9143 					emlxs_mb_config_link(hba, mbox);
9144 
9145 					rc =  EMLXS_SLI_ISSUE_MBOX_CMD(hba,
9146 					    mbox, MBX_NOWAIT, 0);
9147 					if ((rc != MBX_BUSY) &&
9148 					    (rc != MBX_SUCCESS)) {
9149 						emlxs_mem_put(hba, MEM_MBOX,
9150 						    (void *)mbox);
9151 					}
9152 				}
9153 			}
9154 		}
9155 
9156 		/* We will process these cmds at the bottom of this routine */
9157 		break;
9158 
9159 	default:
9160 		ndlp = emlxs_node_find_did(port, did, 1);
9161 
9162 		/* If an ADISC is being sent and we have no node, */
9163 		/* then we must fail the ADISC now */
9164 		if (!ndlp && (cmd == ELS_CMD_ADISC) &&
9165 		    (port->mode == MODE_INITIATOR)) {
9166 
9167 			/* Build the LS_RJT response */
9168 			els_pkt = (ELS_PKT *)pkt->pkt_resp;
9169 			els_pkt->elsCode = 0x01;
9170 			els_pkt->un.lsRjt.un.b.lsRjtRsvd0 = 0;
9171 			els_pkt->un.lsRjt.un.b.lsRjtRsnCode =
9172 			    LSRJT_LOGICAL_ERR;
9173 			els_pkt->un.lsRjt.un.b.lsRjtRsnCodeExp =
9174 			    LSEXP_NOTHING_MORE;
9175 			els_pkt->un.lsRjt.un.b.vendorUnique = 0x03;
9176 
9177 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
9178 			    "ADISC Rejected. Node not found. did=0x%x", did);
9179 
9180 			if (sbp->channel == NULL) {
9181 				if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
9182 					sbp->channel =
9183 					    &hba->chan[hba->channel_els];
9184 				} else {
9185 					sbp->channel =
9186 					    &hba->chan[FC_ELS_RING];
9187 				}
9188 			}
9189 
9190 			/* Return this as rejected by the target */
9191 			emlxs_pkt_complete(sbp, IOSTAT_LS_RJT, 0, 1);
9192 
9193 			return (FC_SUCCESS);
9194 		}
9195 	}
9196 
9197 	/* DID == BCAST_DID is special case to indicate that */
9198 	/* RPI is being passed in seq_id field */
9199 	/* This is used by emlxs_send_logo() for target mode */
9200 
9201 	/* Initalize iocbq */
9202 	iocbq->node = (void *)ndlp;
9203 	if ((rval = EMLXS_SLI_PREP_ELS_IOCB(port, sbp)) != FC_SUCCESS) {
9204 
9205 		if (rval == 0xff) {
9206 			emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
9207 			rval = FC_SUCCESS;
9208 		}
9209 
9210 		return (rval);
9211 	}
9212 
9213 	cp = &hba->chan[hba->channel_els];
9214 	cp->ulpSendCmd++;
9215 	sp = (SERV_PARM *)&els_pkt->un.logi;
9216 
9217 	/* Check cmd */
9218 	switch (cmd) {
9219 	case ELS_CMD_PRLI:
9220 		/*
9221 		 * if our firmware version is 3.20 or later,
9222 		 * set the following bits for FC-TAPE support.
9223 		 */
9224 		if ((port->mode == MODE_INITIATOR) &&
9225 		    (hba->vpd.feaLevelHigh >= 0x02) &&
9226 		    (cfg[CFG_ADISC_SUPPORT].current != 0)) {
9227 				els_pkt->un.prli.ConfmComplAllowed = 1;
9228 				els_pkt->un.prli.Retry = 1;
9229 				els_pkt->un.prli.TaskRetryIdReq = 1;
9230 		} else {
9231 				els_pkt->un.prli.ConfmComplAllowed = 0;
9232 				els_pkt->un.prli.Retry = 0;
9233 				els_pkt->un.prli.TaskRetryIdReq = 0;
9234 		}
9235 
9236 		break;
9237 
9238 		/* This is a patch for the ULP stack. */
9239 
9240 		/*
9241 		 * ULP only reads our service parameters once during bind_port,
9242 		 * but the service parameters change due to topology.
9243 		 */
9244 	case ELS_CMD_FLOGI:
9245 	case ELS_CMD_FDISC:
9246 	case ELS_CMD_PLOGI:
9247 	case ELS_CMD_PDISC:
9248 		/* Copy latest service parameters to payload */
9249 		bcopy((void *) &port->sparam, (void *)sp, sizeof (SERV_PARM));
9250 
9251 		if ((cmd == ELS_CMD_FLOGI) || (cmd == ELS_CMD_FDISC)) {
9252 
9253 			/* Clear support for virtual fabrics */
9254 			/* randomOffset bit controls this for FLOGI */
9255 			sp->cmn.randomOffset = 0;
9256 
9257 			/* Set R_A_TOV to current value */
9258 			sp->cmn.w2.r_a_tov =
9259 			    LE_SWAP32((hba->fc_ratov * 1000));
9260 		}
9261 
9262 		if ((hba->flag & FC_NPIV_ENABLED) &&
9263 		    (hba->flag & FC_NPIV_SUPPORTED) &&
9264 		    (cmd == ELS_CMD_PLOGI)) {
9265 			emlxs_vvl_fmt_t	*vvl;
9266 
9267 			sp->VALID_VENDOR_VERSION = 1;
9268 			vvl = (emlxs_vvl_fmt_t *)&sp->vendorVersion[0];
9269 			vvl->un0.w0.oui = 0x0000C9;
9270 			vvl->un0.word0 = LE_SWAP32(vvl->un0.word0);
9271 			vvl->un1.w1.vport =  (port->vpi > 0) ? 1 : 0;
9272 			vvl->un1.word1 = LE_SWAP32(vvl->un1.word1);
9273 		}
9274 
9275 #ifdef DHCHAP_SUPPORT
9276 		emlxs_dhc_init_sp(port, did, sp, (char **)&fcsp_msg);
9277 #endif	/* DHCHAP_SUPPORT */
9278 
9279 		break;
9280 	}
9281 
9282 	/* Initialize the sbp */
9283 	mutex_enter(&sbp->mtx);
9284 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
9285 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9286 	sbp->node = (void *)ndlp;
9287 	sbp->lun = EMLXS_LUN_NONE;
9288 	sbp->did = did;
9289 	mutex_exit(&sbp->mtx);
9290 
9291 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_send_msg, "%s: sid=%x did=%x %s",
9292 	    emlxs_elscmd_xlate(cmd), port->did, did, fcsp_msg);
9293 
9294 	if (pkt->pkt_cmdlen) {
9295 		EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
9296 		    DDI_DMA_SYNC_FORDEV);
9297 	}
9298 
9299 	/* Check node */
9300 	switch (cmd) {
9301 	case ELS_CMD_FLOGI:
9302 	case ELS_CMD_FDISC:
9303 		if (port->mode == MODE_INITIATOR) {
9304 			/* Make sure fabric node is destroyed */
9305 			/* It should already have been destroyed at link down */
9306 			if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
9307 				ndlp = emlxs_node_find_did(port, FABRIC_DID, 1);
9308 				if (ndlp) {
9309 					if (EMLXS_SLI_UNREG_NODE(port, ndlp,
9310 					    NULL, NULL, iocbq) == 0) {
9311 						/* Deferring iocb tx until */
9312 						/* completion of unreg */
9313 						return (FC_SUCCESS);
9314 					}
9315 				}
9316 			}
9317 		}
9318 		break;
9319 
9320 	case ELS_CMD_PLOGI:
9321 
9322 		ndlp = emlxs_node_find_did(port, did, 1);
9323 
9324 		if (ndlp && ndlp->nlp_active) {
9325 			/* Close the node for any further normal IO */
9326 			emlxs_node_close(port, ndlp, hba->channel_fcp,
9327 			    pkt->pkt_timeout + 10);
9328 			emlxs_node_close(port, ndlp, hba->channel_ip,
9329 			    pkt->pkt_timeout + 10);
9330 
9331 			/* Flush tx queues */
9332 			(void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
9333 
9334 			/* Flush chip queues */
9335 			(void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
9336 		}
9337 
9338 		break;
9339 
9340 	case ELS_CMD_PRLI:
9341 
9342 		ndlp = emlxs_node_find_did(port, did, 1);
9343 
9344 		if (ndlp && ndlp->nlp_active) {
9345 			/*
9346 			 * Close the node for any further FCP IO;
9347 			 * Flush all outstanding I/O only if
9348 			 * "Establish Image Pair" bit is set.
9349 			 */
9350 			emlxs_node_close(port, ndlp, hba->channel_fcp,
9351 			    pkt->pkt_timeout + 10);
9352 
9353 			if (els_pkt->un.prli.estabImagePair) {
9354 				/* Flush tx queues */
9355 				(void) emlxs_tx_node_flush(port, ndlp,
9356 				    &hba->chan[hba->channel_fcp], 0, 0);
9357 
9358 				/* Flush chip queues */
9359 				(void) emlxs_chipq_node_flush(port,
9360 				    &hba->chan[hba->channel_fcp], ndlp, 0);
9361 			}
9362 		}
9363 
9364 		break;
9365 
9366 	}
9367 
9368 	HBASTATS.ElsCmdIssued++;
9369 
9370 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
9371 
9372 	return (FC_SUCCESS);
9373 
9374 } /* emlxs_send_els() */
9375 
9376 
9377 
9378 
9379 static int32_t
emlxs_send_els_rsp(emlxs_port_t * port,emlxs_buf_t * sbp)9380 emlxs_send_els_rsp(emlxs_port_t *port, emlxs_buf_t *sbp)
9381 {
9382 	emlxs_hba_t	*hba = HBA;
9383 	emlxs_config_t  *cfg = &CFG;
9384 	fc_packet_t	*pkt;
9385 	IOCBQ		*iocbq;
9386 	IOCB		*iocb;
9387 	NODELIST	*ndlp;
9388 	CHANNEL		*cp;
9389 	int		i;
9390 	uint32_t	cmd;
9391 	uint32_t	ucmd;
9392 	ELS_PKT		*els_pkt;
9393 	fc_unsol_buf_t	*ubp;
9394 	emlxs_ub_priv_t	*ub_priv;
9395 	uint32_t	did;
9396 	char		fcsp_msg[32];
9397 	uint8_t		*ub_buffer;
9398 	int32_t		rval;
9399 
9400 	fcsp_msg[0] = 0;
9401 	pkt = PRIV2PKT(sbp);
9402 	els_pkt = (ELS_PKT *)pkt->pkt_cmd;
9403 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
9404 
9405 	iocbq = &sbp->iocbq;
9406 	iocb = &iocbq->iocb;
9407 
9408 	/* Acquire the unsolicited command this pkt is replying to */
9409 	if (pkt->pkt_cmd_fhdr.ox_id < EMLXS_UB_TOKEN_OFFSET) {
9410 		/* This is for auto replies when no ub's are used */
9411 		ucmd = pkt->pkt_cmd_fhdr.ox_id << ELS_CMD_SHIFT;
9412 		ubp = NULL;
9413 		ub_priv = NULL;
9414 		ub_buffer = NULL;
9415 
9416 #ifdef SFCT_SUPPORT
9417 		if (sbp->fct_cmd) {
9418 			fct_els_t *els =
9419 			    (fct_els_t *)sbp->fct_cmd->cmd_specific;
9420 			ub_buffer = (uint8_t *)els->els_req_payload;
9421 		}
9422 #endif /* SFCT_SUPPORT */
9423 
9424 	} else {
9425 		/* Find the ub buffer that goes with this reply */
9426 		if (!(ubp = emlxs_ub_find(port, pkt->pkt_cmd_fhdr.ox_id))) {
9427 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
9428 			    "ELS reply: Invalid oxid=%x",
9429 			    pkt->pkt_cmd_fhdr.ox_id);
9430 			return (FC_BADPACKET);
9431 		}
9432 
9433 		ub_buffer = (uint8_t *)ubp->ub_buffer;
9434 		ub_priv = ubp->ub_fca_private;
9435 		ucmd = ub_priv->cmd;
9436 
9437 		ub_priv->flags |= EMLXS_UB_REPLY;
9438 
9439 		/* Reset oxid to ELS command */
9440 		/* We do this because the ub is only valid */
9441 		/* until we return from this thread */
9442 		pkt->pkt_cmd_fhdr.ox_id = (ucmd >> ELS_CMD_SHIFT) & 0xff;
9443 	}
9444 
9445 	/* Save the result */
9446 	sbp->ucmd = ucmd;
9447 
9448 	if (sbp->channel == NULL) {
9449 		if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
9450 			sbp->channel = &hba->chan[hba->channel_els];
9451 		} else {
9452 			sbp->channel = &hba->chan[FC_ELS_RING];
9453 		}
9454 	}
9455 
9456 	/* Check for interceptions */
9457 	switch (ucmd) {
9458 
9459 #ifdef ULP_PATCH2
9460 	case ELS_CMD_LOGO:
9461 		if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH2)) {
9462 			break;
9463 		}
9464 
9465 		/* Check if this was generated by ULP and not us */
9466 		if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
9467 
9468 			/*
9469 			 * Since we replied to this already,
9470 			 * we won't need to send this now
9471 			 */
9472 			emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
9473 
9474 			return (FC_SUCCESS);
9475 		}
9476 
9477 		break;
9478 #endif /* ULP_PATCH2 */
9479 
9480 #ifdef ULP_PATCH3
9481 	case ELS_CMD_PRLI:
9482 		if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH3)) {
9483 			break;
9484 		}
9485 
9486 		/* Check if this was generated by ULP and not us */
9487 		if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
9488 
9489 			/*
9490 			 * Since we replied to this already,
9491 			 * we won't need to send this now
9492 			 */
9493 			emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
9494 
9495 			return (FC_SUCCESS);
9496 		}
9497 
9498 		break;
9499 #endif /* ULP_PATCH3 */
9500 
9501 
9502 #ifdef ULP_PATCH4
9503 	case ELS_CMD_PRLO:
9504 		if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH4)) {
9505 			break;
9506 		}
9507 
9508 		/* Check if this was generated by ULP and not us */
9509 		if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
9510 			/*
9511 			 * Since we replied to this already,
9512 			 * we won't need to send this now
9513 			 */
9514 			emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
9515 
9516 			return (FC_SUCCESS);
9517 		}
9518 
9519 		break;
9520 #endif /* ULP_PATCH4 */
9521 
9522 #ifdef ULP_PATCH6
9523 	case ELS_CMD_RSCN:
9524 		if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH6)) {
9525 			break;
9526 		}
9527 
9528 		/* Check if this RSCN was generated by us */
9529 		if (ub_priv && (ub_priv->flags & EMLXS_UB_INTERCEPT)) {
9530 			cmd = *((uint32_t *)pkt->pkt_cmd);
9531 			cmd = LE_SWAP32(cmd);
9532 			cmd &= ELS_CMD_MASK;
9533 
9534 			/*
9535 			 * If ULP is accepting this,
9536 			 * then close affected node
9537 			 */
9538 			if ((port->mode == MODE_INITIATOR) && ub_buffer &&
9539 			    cmd == ELS_CMD_ACC) {
9540 				fc_rscn_t	*rscn;
9541 				uint32_t	count;
9542 				uint32_t	*lp;
9543 
9544 				/*
9545 				 * Only the Leadville code path will
9546 				 * come thru here. The RSCN data is NOT
9547 				 * swapped properly for the Comstar code
9548 				 * path.
9549 				 */
9550 				lp = (uint32_t *)ub_buffer;
9551 				rscn = (fc_rscn_t *)lp++;
9552 				count =
9553 				    ((rscn->rscn_payload_len - 4) / 4);
9554 
9555 				/* Close affected ports */
9556 				for (i = 0; i < count; i++, lp++) {
9557 					(void) emlxs_port_offline(port,
9558 					    *lp);
9559 				}
9560 			}
9561 
9562 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
9563 			    "RSCN %s: did=%x oxid=%x rxid=%x. "
9564 			    "Intercepted.", emlxs_elscmd_xlate(cmd),
9565 			    did, pkt->pkt_cmd_fhdr.ox_id,
9566 			    pkt->pkt_cmd_fhdr.rx_id);
9567 
9568 			/*
9569 			 * Since we generated this RSCN,
9570 			 * we won't need to send this reply
9571 			 */
9572 			emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
9573 
9574 			return (FC_SUCCESS);
9575 		}
9576 
9577 		break;
9578 #endif /* ULP_PATCH6 */
9579 
9580 	case ELS_CMD_PLOGI:
9581 		/* Check if this PLOGI was generated by us */
9582 		if (ub_priv && (ub_priv->flags & EMLXS_UB_INTERCEPT)) {
9583 			cmd = *((uint32_t *)pkt->pkt_cmd);
9584 			cmd = LE_SWAP32(cmd);
9585 			cmd &= ELS_CMD_MASK;
9586 
9587 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
9588 			    "PLOGI %s: did=%x oxid=%x rxid=%x. "
9589 			    "Intercepted.", emlxs_elscmd_xlate(cmd),
9590 			    did, pkt->pkt_cmd_fhdr.ox_id,
9591 			    pkt->pkt_cmd_fhdr.rx_id);
9592 
9593 			/*
9594 			 * Since we generated this PLOGI,
9595 			 * we won't need to send this reply
9596 			 */
9597 			emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
9598 
9599 			return (FC_SUCCESS);
9600 		}
9601 
9602 		break;
9603 	}
9604 
9605 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9606 	emlxs_swap_els_pkt(sbp);
9607 #endif	/* EMLXS_MODREV2X */
9608 
9609 
9610 	cmd = *((uint32_t *)pkt->pkt_cmd);
9611 	cmd &= ELS_CMD_MASK;
9612 
9613 	/* Check if modifications are needed */
9614 	switch (ucmd) {
9615 	case (ELS_CMD_PRLI):
9616 
9617 		if (cmd == ELS_CMD_ACC) {
9618 			/* This is a patch for the ULP stack. */
9619 			/* ULP does not keep track of FCP2 support */
9620 			if ((port->mode == MODE_INITIATOR) &&
9621 			    (hba->vpd.feaLevelHigh >= 0x02) &&
9622 			    (cfg[CFG_ADISC_SUPPORT].current != 0)) {
9623 				els_pkt->un.prli.ConfmComplAllowed = 1;
9624 				els_pkt->un.prli.Retry = 1;
9625 				els_pkt->un.prli.TaskRetryIdReq = 1;
9626 			} else {
9627 				els_pkt->un.prli.ConfmComplAllowed = 0;
9628 				els_pkt->un.prli.Retry = 0;
9629 				els_pkt->un.prli.TaskRetryIdReq = 0;
9630 			}
9631 		}
9632 
9633 		break;
9634 
9635 	case ELS_CMD_FLOGI:
9636 	case ELS_CMD_FDISC:
9637 		if (cmd == ELS_CMD_ACC) {
9638 			SERV_PARM *sp = (SERV_PARM *)&els_pkt->un.logi;
9639 
9640 			/* This is a patch for the ULP stack. */
9641 
9642 			/*
9643 			 * ULP only reads our service parameters
9644 			 * once during bind_port, but the service
9645 			 * parameters change due to topology.
9646 			 */
9647 
9648 			/* Copy latest service parameters to payload */
9649 			bcopy((void *)&port->sparam,
9650 			    (void *)sp, sizeof (SERV_PARM));
9651 
9652 			/* We are in pt-to-pt mode. Set R_A_TOV to default */
9653 			sp->cmn.w2.r_a_tov =
9654 			    LE_SWAP32((FF_DEF_RATOV * 1000));
9655 
9656 			/* Clear support for virtual fabrics */
9657 			/* randomOffset bit controls this for FLOGI */
9658 			sp->cmn.randomOffset = 0;
9659 #ifdef DHCHAP_SUPPORT
9660 			emlxs_dhc_init_sp(port, did, sp, (char **)&fcsp_msg);
9661 #endif	/* DHCHAP_SUPPORT */
9662 		}
9663 		break;
9664 
9665 	case ELS_CMD_PLOGI:
9666 	case ELS_CMD_PDISC:
9667 		if (cmd == ELS_CMD_ACC) {
9668 			SERV_PARM *sp = (SERV_PARM *)&els_pkt->un.logi;
9669 
9670 			/* This is a patch for the ULP stack. */
9671 
9672 			/*
9673 			 * ULP only reads our service parameters
9674 			 * once during bind_port, but the service
9675 			 * parameters change due to topology.
9676 			 */
9677 
9678 			/* Copy latest service parameters to payload */
9679 			bcopy((void *)&port->sparam,
9680 			    (void *)sp, sizeof (SERV_PARM));
9681 
9682 #ifdef DHCHAP_SUPPORT
9683 			emlxs_dhc_init_sp(port, did, sp, (char **)&fcsp_msg);
9684 #endif	/* DHCHAP_SUPPORT */
9685 		}
9686 		break;
9687 
9688 	}
9689 
9690 	/* Initalize iocbq */
9691 	iocbq->node = (void *)NULL;
9692 	if ((rval = EMLXS_SLI_PREP_ELS_IOCB(port, sbp)) != FC_SUCCESS) {
9693 
9694 		if (rval == 0xff) {
9695 			emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
9696 			rval = FC_SUCCESS;
9697 		}
9698 
9699 		return (rval);
9700 	}
9701 
9702 	cp = &hba->chan[hba->channel_els];
9703 	cp->ulpSendCmd++;
9704 
9705 	/* Initalize sbp */
9706 	mutex_enter(&sbp->mtx);
9707 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
9708 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9709 	sbp->node = (void *) NULL;
9710 	sbp->lun = EMLXS_LUN_NONE;
9711 	sbp->class = iocb->ULPCLASS;
9712 	sbp->did = did;
9713 	mutex_exit(&sbp->mtx);
9714 
9715 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
9716 	    "%s %s: did=%x oxid=%x rxid=%x %s", emlxs_elscmd_xlate(ucmd),
9717 	    emlxs_elscmd_xlate(cmd), did, pkt->pkt_cmd_fhdr.ox_id,
9718 	    pkt->pkt_cmd_fhdr.rx_id, fcsp_msg);
9719 
9720 	/* Process nodes */
9721 	switch (ucmd) {
9722 	case ELS_CMD_RSCN:
9723 		if ((port->mode == MODE_INITIATOR) && ub_buffer &&
9724 		    cmd == ELS_CMD_ACC) {
9725 			fc_rscn_t	*rscn;
9726 			uint32_t	count;
9727 			uint32_t	*lp = NULL;
9728 
9729 			/*
9730 			 * Only the Leadville code path will come thru
9731 			 * here. The RSCN data is NOT swapped properly
9732 			 * for the Comstar code path.
9733 			 */
9734 			lp = (uint32_t *)ub_buffer;
9735 			rscn = (fc_rscn_t *)lp++;
9736 			count = ((rscn->rscn_payload_len - 4) / 4);
9737 
9738 			/* Close affected ports */
9739 			for (i = 0; i < count; i++, lp++) {
9740 				(void) emlxs_port_offline(port, *lp);
9741 			}
9742 		}
9743 		break;
9744 
9745 	case ELS_CMD_PLOGI:
9746 		if (cmd == ELS_CMD_ACC) {
9747 			ndlp = emlxs_node_find_did(port, did, 1);
9748 
9749 			if (ndlp && ndlp->nlp_active) {
9750 				/* Close the node for any further normal IO */
9751 				emlxs_node_close(port, ndlp, hba->channel_fcp,
9752 				    pkt->pkt_timeout + 10);
9753 				emlxs_node_close(port, ndlp, hba->channel_ip,
9754 				    pkt->pkt_timeout + 10);
9755 
9756 				/* Flush tx queue */
9757 				(void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
9758 
9759 				/* Flush chip queue */
9760 				(void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
9761 			}
9762 		}
9763 		break;
9764 
9765 	case ELS_CMD_PRLI:
9766 		if (cmd == ELS_CMD_ACC) {
9767 			ndlp = emlxs_node_find_did(port, did, 1);
9768 
9769 			if (ndlp && ndlp->nlp_active) {
9770 				/* Close the node for any further normal IO */
9771 				emlxs_node_close(port, ndlp, hba->channel_fcp,
9772 				    pkt->pkt_timeout + 10);
9773 
9774 				/* Flush tx queues */
9775 				(void) emlxs_tx_node_flush(port, ndlp,
9776 				    &hba->chan[hba->channel_fcp], 0, 0);
9777 
9778 				/* Flush chip queues */
9779 				(void) emlxs_chipq_node_flush(port,
9780 				    &hba->chan[hba->channel_fcp], ndlp, 0);
9781 			}
9782 		}
9783 		break;
9784 
9785 	case ELS_CMD_PRLO:
9786 		if (cmd == ELS_CMD_ACC) {
9787 			ndlp = emlxs_node_find_did(port, did, 1);
9788 
9789 			if (ndlp && ndlp->nlp_active) {
9790 				/* Close the node for any further normal IO */
9791 				emlxs_node_close(port, ndlp,
9792 				    hba->channel_fcp, 60);
9793 
9794 				/* Flush tx queues */
9795 				(void) emlxs_tx_node_flush(port, ndlp,
9796 				    &hba->chan[hba->channel_fcp], 0, 0);
9797 
9798 				/* Flush chip queues */
9799 				(void) emlxs_chipq_node_flush(port,
9800 				    &hba->chan[hba->channel_fcp], ndlp, 0);
9801 			}
9802 		}
9803 
9804 		break;
9805 
9806 	case ELS_CMD_LOGO:
9807 		if (cmd == ELS_CMD_ACC) {
9808 			ndlp = emlxs_node_find_did(port, did, 1);
9809 
9810 			if (ndlp && ndlp->nlp_active) {
9811 				/* Close the node for any further normal IO */
9812 				emlxs_node_close(port, ndlp,
9813 				    hba->channel_fcp, 60);
9814 				emlxs_node_close(port, ndlp,
9815 				    hba->channel_ip, 60);
9816 
9817 				/* Flush tx queues */
9818 				(void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
9819 
9820 				/* Flush chip queues */
9821 				(void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
9822 			}
9823 		}
9824 
9825 		break;
9826 	}
9827 
9828 	if (pkt->pkt_cmdlen) {
9829 		EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
9830 		    DDI_DMA_SYNC_FORDEV);
9831 	}
9832 
9833 	HBASTATS.ElsRspIssued++;
9834 
9835 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
9836 
9837 	return (FC_SUCCESS);
9838 
9839 } /* emlxs_send_els_rsp() */
9840 
9841 
9842 #ifdef MENLO_SUPPORT
9843 static int32_t
emlxs_send_menlo(emlxs_port_t * port,emlxs_buf_t * sbp)9844 emlxs_send_menlo(emlxs_port_t *port, emlxs_buf_t *sbp)
9845 {
9846 	emlxs_hba_t	*hba = HBA;
9847 	fc_packet_t	*pkt;
9848 	IOCBQ		*iocbq;
9849 	IOCB		*iocb;
9850 	CHANNEL		*cp;
9851 	NODELIST	*ndlp;
9852 	uint32_t	did;
9853 	uint32_t	*lp;
9854 	int32_t		rval;
9855 
9856 	pkt = PRIV2PKT(sbp);
9857 	did = EMLXS_MENLO_DID;
9858 	lp = (uint32_t *)pkt->pkt_cmd;
9859 
9860 	iocbq = &sbp->iocbq;
9861 	iocb = &iocbq->iocb;
9862 
9863 	ndlp = emlxs_node_find_did(port, did, 1);
9864 
9865 	if (!ndlp || !ndlp->nlp_active) {
9866 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
9867 		    "Node not found. did=0x%x", did);
9868 
9869 		return (FC_BADPACKET);
9870 	}
9871 
9872 	iocbq->node = (void *) ndlp;
9873 	if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) {
9874 
9875 		if (rval == 0xff) {
9876 			emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
9877 			rval = FC_SUCCESS;
9878 		}
9879 
9880 		return (rval);
9881 	}
9882 
9883 	cp = &hba->chan[hba->channel_ct];
9884 	cp->ulpSendCmd++;
9885 
9886 	if (pkt->pkt_tran_type == FC_PKT_EXCHANGE) {
9887 		/* Cmd phase */
9888 
9889 		/* Initalize iocb */
9890 		iocb->un.genreq64.param = pkt->pkt_cmd_fhdr.d_id;
9891 		iocb->ULPCONTEXT = 0;
9892 		iocb->ULPPU = 3;
9893 
9894 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
9895 		    "%s: [%08x,%08x,%08x,%08x]",
9896 		    emlxs_menlo_cmd_xlate(BE_SWAP32(lp[0])), BE_SWAP32(lp[1]),
9897 		    BE_SWAP32(lp[2]), BE_SWAP32(lp[3]), BE_SWAP32(lp[4]));
9898 
9899 	} else {	/* FC_PKT_OUTBOUND */
9900 
9901 		/* MENLO_CMD_FW_DOWNLOAD Data Phase */
9902 		iocb->ULPCOMMAND = CMD_GEN_REQUEST64_CX;
9903 
9904 		/* Initalize iocb */
9905 		iocb->un.genreq64.param = 0;
9906 		iocb->ULPCONTEXT = pkt->pkt_cmd_fhdr.rx_id;
9907 		iocb->ULPPU = 1;
9908 
9909 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
9910 		    "%s: Data: rxid=0x%x size=%d",
9911 		    emlxs_menlo_cmd_xlate(MENLO_CMD_FW_DOWNLOAD),
9912 		    pkt->pkt_cmd_fhdr.rx_id, pkt->pkt_cmdlen);
9913 	}
9914 
9915 	/* Initalize sbp */
9916 	mutex_enter(&sbp->mtx);
9917 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
9918 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9919 	sbp->node = (void *) ndlp;
9920 	sbp->lun = EMLXS_LUN_NONE;
9921 	sbp->class = iocb->ULPCLASS;
9922 	sbp->did = did;
9923 	mutex_exit(&sbp->mtx);
9924 
9925 	EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
9926 	    DDI_DMA_SYNC_FORDEV);
9927 
9928 	HBASTATS.CtCmdIssued++;
9929 
9930 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
9931 
9932 	return (FC_SUCCESS);
9933 
9934 } /* emlxs_send_menlo() */
9935 #endif /* MENLO_SUPPORT */
9936 
9937 
9938 static int32_t
emlxs_send_ct(emlxs_port_t * port,emlxs_buf_t * sbp)9939 emlxs_send_ct(emlxs_port_t *port, emlxs_buf_t *sbp)
9940 {
9941 	emlxs_hba_t	*hba = HBA;
9942 	fc_packet_t	*pkt;
9943 	IOCBQ		*iocbq;
9944 	IOCB		*iocb;
9945 	NODELIST	*ndlp;
9946 	uint32_t	did;
9947 	CHANNEL		*cp;
9948 	int32_t 	rval;
9949 
9950 	pkt = PRIV2PKT(sbp);
9951 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
9952 
9953 	iocbq = &sbp->iocbq;
9954 	iocb = &iocbq->iocb;
9955 
9956 	ndlp = emlxs_node_find_did(port, did, 1);
9957 
9958 	if (!ndlp || !ndlp->nlp_active) {
9959 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
9960 		    "Node not found. did=0x%x", did);
9961 
9962 		return (FC_BADPACKET);
9963 	}
9964 
9965 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9966 	emlxs_swap_ct_pkt(sbp);
9967 #endif	/* EMLXS_MODREV2X */
9968 
9969 	iocbq->node = (void *)ndlp;
9970 	if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) {
9971 
9972 		if (rval == 0xff) {
9973 			emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
9974 			rval = FC_SUCCESS;
9975 		}
9976 
9977 		return (rval);
9978 	}
9979 
9980 	cp = &hba->chan[hba->channel_ct];
9981 	cp->ulpSendCmd++;
9982 
9983 	/* Initalize sbp */
9984 	mutex_enter(&sbp->mtx);
9985 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
9986 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9987 	sbp->node = (void *)ndlp;
9988 	sbp->lun = EMLXS_LUN_NONE;
9989 	sbp->class = iocb->ULPCLASS;
9990 	sbp->did = did;
9991 	mutex_exit(&sbp->mtx);
9992 
9993 	if (did == NAMESERVER_DID) {
9994 		SLI_CT_REQUEST	*CtCmd;
9995 		uint32_t	*lp0;
9996 
9997 		CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
9998 		lp0 = (uint32_t *)pkt->pkt_cmd;
9999 
10000 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
10001 		    "%s: did=%x [%08x,%08x]",
10002 		    emlxs_ctcmd_xlate(
10003 		    LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)),
10004 		    did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5]));
10005 
10006 		if (hba->flag & FC_NPIV_DELAY_REQUIRED) {
10007 			sbp->pkt_flags |= PACKET_DELAY_REQUIRED;
10008 		}
10009 
10010 	} else if (did == FDMI_DID) {
10011 		SLI_CT_REQUEST	*CtCmd;
10012 		uint32_t	*lp0;
10013 
10014 		CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
10015 		lp0 = (uint32_t *)pkt->pkt_cmd;
10016 
10017 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
10018 		    "%s: did=%x [%08x,%08x]",
10019 		    emlxs_mscmd_xlate(
10020 		    LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)),
10021 		    did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5]));
10022 	} else {
10023 		SLI_CT_REQUEST	*CtCmd;
10024 		uint32_t	*lp0;
10025 
10026 		CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
10027 		lp0 = (uint32_t *)pkt->pkt_cmd;
10028 
10029 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
10030 		    "%s: did=%x [%08x,%08x]",
10031 		    emlxs_rmcmd_xlate(
10032 		    LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)),
10033 		    did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5]));
10034 	}
10035 
10036 	if (pkt->pkt_cmdlen) {
10037 		EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
10038 		    DDI_DMA_SYNC_FORDEV);
10039 	}
10040 
10041 	HBASTATS.CtCmdIssued++;
10042 
10043 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
10044 
10045 	return (FC_SUCCESS);
10046 
10047 } /* emlxs_send_ct() */
10048 
10049 
10050 static int32_t
emlxs_send_ct_rsp(emlxs_port_t * port,emlxs_buf_t * sbp)10051 emlxs_send_ct_rsp(emlxs_port_t *port, emlxs_buf_t *sbp)
10052 {
10053 	emlxs_hba_t	*hba = HBA;
10054 	fc_packet_t	*pkt;
10055 	CHANNEL		*cp;
10056 	IOCBQ		*iocbq;
10057 	IOCB		*iocb;
10058 	uint32_t	*cmd;
10059 	SLI_CT_REQUEST	*CtCmd;
10060 	int32_t 	rval;
10061 
10062 	pkt = PRIV2PKT(sbp);
10063 	CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
10064 	cmd = (uint32_t *)pkt->pkt_cmd;
10065 
10066 	iocbq = &sbp->iocbq;
10067 	iocb = &iocbq->iocb;
10068 
10069 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
10070 	emlxs_swap_ct_pkt(sbp);
10071 #endif	/* EMLXS_MODREV2X */
10072 
10073 	if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) {
10074 
10075 		if (rval == 0xff) {
10076 			emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
10077 			rval = FC_SUCCESS;
10078 		}
10079 
10080 		return (rval);
10081 	}
10082 
10083 	cp = &hba->chan[hba->channel_ct];
10084 	cp->ulpSendCmd++;
10085 
10086 	/* Initalize sbp */
10087 	mutex_enter(&sbp->mtx);
10088 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
10089 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
10090 	sbp->node = NULL;
10091 	sbp->lun = EMLXS_LUN_NONE;
10092 	sbp->class = iocb->ULPCLASS;
10093 	mutex_exit(&sbp->mtx);
10094 
10095 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_reply_msg,
10096 	    "%s: Rsn=%x Exp=%x [%08x,%08x] rxid=%x ",
10097 	    emlxs_rmcmd_xlate(LE_SWAP16(
10098 	    CtCmd->CommandResponse.bits.CmdRsp)),
10099 	    CtCmd->ReasonCode, CtCmd->Explanation,
10100 	    LE_SWAP32(cmd[4]), LE_SWAP32(cmd[5]),
10101 	    pkt->pkt_cmd_fhdr.rx_id);
10102 
10103 	if (pkt->pkt_cmdlen) {
10104 		EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
10105 		    DDI_DMA_SYNC_FORDEV);
10106 	}
10107 
10108 	HBASTATS.CtRspIssued++;
10109 
10110 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
10111 
10112 	return (FC_SUCCESS);
10113 
10114 } /* emlxs_send_ct_rsp() */
10115 
10116 
10117 /*
10118  * emlxs_get_instance()
10119  * Given a ddi ddiinst, return a Fibre Channel (emlx) ddiinst.
10120  */
10121 extern uint32_t
emlxs_get_instance(int32_t ddiinst)10122 emlxs_get_instance(int32_t ddiinst)
10123 {
10124 	uint32_t i;
10125 	uint32_t inst;
10126 
10127 	mutex_enter(&emlxs_device.lock);
10128 
10129 	inst = MAX_FC_BRDS;
10130 	for (i = 0; i < emlxs_instance_count; i++) {
10131 		if (emlxs_instance[i] == ddiinst) {
10132 			inst = i;
10133 			break;
10134 		}
10135 	}
10136 
10137 	mutex_exit(&emlxs_device.lock);
10138 
10139 	return (inst);
10140 
10141 } /* emlxs_get_instance() */
10142 
10143 
10144 /*
10145  * emlxs_add_instance()
10146  * Given a ddi ddiinst, create a Fibre Channel (emlx) ddiinst.
10147  * emlx ddiinsts are the order that emlxs_attach gets called, starting at 0.
10148  */
10149 static uint32_t
emlxs_add_instance(int32_t ddiinst)10150 emlxs_add_instance(int32_t ddiinst)
10151 {
10152 	uint32_t i;
10153 
10154 	mutex_enter(&emlxs_device.lock);
10155 
10156 	/* First see if the ddiinst already exists */
10157 	for (i = 0; i < emlxs_instance_count; i++) {
10158 		if (emlxs_instance[i] == ddiinst) {
10159 			break;
10160 		}
10161 	}
10162 
10163 	/* If it doesn't already exist, add it */
10164 	if (i >= emlxs_instance_count) {
10165 		if ((i = emlxs_instance_count) < MAX_FC_BRDS) {
10166 			emlxs_instance[i] = ddiinst;
10167 			emlxs_instance_count++;
10168 			emlxs_device.hba_count = emlxs_instance_count;
10169 		}
10170 	}
10171 
10172 	mutex_exit(&emlxs_device.lock);
10173 
10174 	return (i);
10175 
10176 } /* emlxs_add_instance() */
10177 
10178 
10179 /*ARGSUSED*/
10180 extern void
emlxs_pkt_complete(emlxs_buf_t * sbp,uint32_t iostat,uint8_t localstat,uint32_t doneq)10181 emlxs_pkt_complete(emlxs_buf_t *sbp, uint32_t iostat, uint8_t localstat,
10182     uint32_t doneq)
10183 {
10184 	emlxs_hba_t	*hba;
10185 	emlxs_port_t	*port;
10186 	emlxs_buf_t	*fpkt;
10187 
10188 	port = sbp->port;
10189 
10190 	if (!port) {
10191 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_completion_error_msg,
10192 		    "NULL port found. sbp=%p flags=%x", sbp, sbp->pkt_flags);
10193 
10194 		return;
10195 	}
10196 
10197 	hba = HBA;
10198 
10199 	if ((hba->sli_mode == EMLXS_HBA_SLI4_MODE) &&
10200 	    (sbp->iotag)) {
10201 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
10202 		    "WARNING: Completing IO with iotag. sbp=%p iotag=%d "
10203 		    "xri_flags=%x",
10204 		    sbp, sbp->iotag, ((sbp->xrip)? sbp->xrip->flag:0));
10205 
10206 		emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
10207 	}
10208 
10209 	mutex_enter(&sbp->mtx);
10210 
10211 	/* Check for error conditions */
10212 	if (sbp->pkt_flags & (PACKET_ULP_OWNED | PACKET_COMPLETED |
10213 	    PACKET_IN_DONEQ | PACKET_IN_COMPLETION |
10214 	    PACKET_IN_TXQ | PACKET_IN_CHIPQ)) {
10215 		if (sbp->pkt_flags & PACKET_ULP_OWNED) {
10216 			EMLXS_MSGF(EMLXS_CONTEXT,
10217 			    &emlxs_pkt_completion_error_msg,
10218 			    "Packet already returned. sbp=%p flags=%x", sbp,
10219 			    sbp->pkt_flags);
10220 		}
10221 
10222 		else if (sbp->pkt_flags & PACKET_COMPLETED) {
10223 			EMLXS_MSGF(EMLXS_CONTEXT,
10224 			    &emlxs_pkt_completion_error_msg,
10225 			    "Packet already completed. sbp=%p flags=%x", sbp,
10226 			    sbp->pkt_flags);
10227 		}
10228 
10229 		else if (sbp->pkt_flags & PACKET_IN_DONEQ) {
10230 			EMLXS_MSGF(EMLXS_CONTEXT,
10231 			    &emlxs_pkt_completion_error_msg,
10232 			    "Pkt already on done queue. sbp=%p flags=%x", sbp,
10233 			    sbp->pkt_flags);
10234 		}
10235 
10236 		else if (sbp->pkt_flags & PACKET_IN_COMPLETION) {
10237 			EMLXS_MSGF(EMLXS_CONTEXT,
10238 			    &emlxs_pkt_completion_error_msg,
10239 			    "Packet already in completion. sbp=%p flags=%x",
10240 			    sbp, sbp->pkt_flags);
10241 		}
10242 
10243 		else if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
10244 			EMLXS_MSGF(EMLXS_CONTEXT,
10245 			    &emlxs_pkt_completion_error_msg,
10246 			    "Packet still on chip queue. sbp=%p flags=%x",
10247 			    sbp, sbp->pkt_flags);
10248 		}
10249 
10250 		else if (sbp->pkt_flags & PACKET_IN_TXQ) {
10251 			EMLXS_MSGF(EMLXS_CONTEXT,
10252 			    &emlxs_pkt_completion_error_msg,
10253 			    "Packet still on tx queue. sbp=%p flags=%x", sbp,
10254 			    sbp->pkt_flags);
10255 		}
10256 
10257 		mutex_exit(&sbp->mtx);
10258 		return;
10259 	}
10260 
10261 	/* Packet is now in completion */
10262 	sbp->pkt_flags |= PACKET_IN_COMPLETION;
10263 
10264 	/* Set the state if not already set */
10265 	if (!(sbp->pkt_flags & PACKET_STATE_VALID)) {
10266 		emlxs_set_pkt_state(sbp, iostat, localstat, 0);
10267 	}
10268 
10269 	/* Check for parent flush packet */
10270 	/* If pkt has a parent flush packet then adjust its count now */
10271 	fpkt = sbp->fpkt;
10272 	if (fpkt) {
10273 		/*
10274 		 * We will try to NULL sbp->fpkt inside the
10275 		 * fpkt's mutex if possible
10276 		 */
10277 
10278 		if (!(fpkt->pkt_flags & PACKET_ULP_OWNED)) {
10279 			mutex_enter(&fpkt->mtx);
10280 			if (fpkt->flush_count) {
10281 				fpkt->flush_count--;
10282 			}
10283 			sbp->fpkt = NULL;
10284 			mutex_exit(&fpkt->mtx);
10285 		} else {	/* fpkt has been returned already */
10286 
10287 			sbp->fpkt = NULL;
10288 		}
10289 	}
10290 
10291 	/* If pkt is polled, then wake up sleeping thread */
10292 	if (sbp->pkt_flags & PACKET_POLLED) {
10293 		/* Don't set the PACKET_ULP_OWNED flag here */
10294 		/* because the polling thread will do it */
10295 		sbp->pkt_flags |= PACKET_COMPLETED;
10296 		mutex_exit(&sbp->mtx);
10297 
10298 		/* Wake up sleeping thread */
10299 		mutex_enter(&EMLXS_PKT_LOCK);
10300 		cv_broadcast(&EMLXS_PKT_CV);
10301 		mutex_exit(&EMLXS_PKT_LOCK);
10302 	}
10303 
10304 	/* If packet was generated by our driver, */
10305 	/* then complete it immediately */
10306 	else if (sbp->pkt_flags & PACKET_ALLOCATED) {
10307 		mutex_exit(&sbp->mtx);
10308 
10309 		emlxs_iodone(sbp);
10310 	}
10311 
10312 	/* Put the pkt on the done queue for callback */
10313 	/* completion in another thread */
10314 	else {
10315 		sbp->pkt_flags |= PACKET_IN_DONEQ;
10316 		sbp->next = NULL;
10317 		mutex_exit(&sbp->mtx);
10318 
10319 		/* Put pkt on doneq, so I/O's will be completed in order */
10320 		mutex_enter(&EMLXS_PORT_LOCK);
10321 		if (hba->iodone_tail == NULL) {
10322 			hba->iodone_list = sbp;
10323 			hba->iodone_count = 1;
10324 		} else {
10325 			hba->iodone_tail->next = sbp;
10326 			hba->iodone_count++;
10327 		}
10328 		hba->iodone_tail = sbp;
10329 		mutex_exit(&EMLXS_PORT_LOCK);
10330 
10331 		/* Trigger a thread to service the doneq */
10332 		emlxs_thread_trigger1(&hba->iodone_thread,
10333 		    emlxs_iodone_server);
10334 	}
10335 
10336 	return;
10337 
10338 } /* emlxs_pkt_complete() */
10339 
10340 
10341 #ifdef SAN_DIAG_SUPPORT
10342 /*
10343  * This routine is called with EMLXS_PORT_LOCK held so we can just increment
10344  * normally. Don't have to use atomic operations.
10345  */
10346 extern void
emlxs_update_sd_bucket(emlxs_buf_t * sbp)10347 emlxs_update_sd_bucket(emlxs_buf_t *sbp)
10348 {
10349 	emlxs_port_t	*vport;
10350 	fc_packet_t	*pkt;
10351 	uint32_t	did;
10352 	hrtime_t	t;
10353 	hrtime_t	delta_time;
10354 	int		i;
10355 	NODELIST	*ndlp;
10356 
10357 	vport = sbp->port;
10358 
10359 	if ((emlxs_sd_bucket.search_type == 0) ||
10360 	    (vport->sd_io_latency_state != SD_COLLECTING)) {
10361 		return;
10362 	}
10363 
10364 	/* Compute the iolatency time in microseconds */
10365 	t = gethrtime();
10366 	delta_time = t - sbp->sd_start_time;
10367 	pkt = PRIV2PKT(sbp);
10368 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
10369 	ndlp = emlxs_node_find_did(vport, did, 1);
10370 
10371 	if (!ndlp) {
10372 		return;
10373 	}
10374 
10375 	if (delta_time >=
10376 	    emlxs_sd_bucket.values[SD_IO_LATENCY_MAX_BUCKETS - 1]) {
10377 		ndlp->sd_dev_bucket[SD_IO_LATENCY_MAX_BUCKETS - 1].
10378 		    count++;
10379 	} else if (delta_time <= emlxs_sd_bucket.values[0]) {
10380 		ndlp->sd_dev_bucket[0].count++;
10381 	} else {
10382 		for (i = 1; i < SD_IO_LATENCY_MAX_BUCKETS; i++) {
10383 			if ((delta_time > emlxs_sd_bucket.values[i-1]) &&
10384 			    (delta_time <= emlxs_sd_bucket.values[i])) {
10385 				ndlp->sd_dev_bucket[i].count++;
10386 				break;
10387 			}
10388 		}
10389 	}
10390 
10391 	return;
10392 
10393 } /* emlxs_update_sd_bucket() */
10394 #endif /* SAN_DIAG_SUPPORT */
10395 
10396 /*ARGSUSED*/
10397 static void
emlxs_iodone_server(void * arg1,void * arg2,void * arg3)10398 emlxs_iodone_server(void *arg1, void *arg2, void *arg3)
10399 {
10400 	emlxs_hba_t *hba = (emlxs_hba_t *)arg1;
10401 	emlxs_buf_t *sbp;
10402 
10403 	mutex_enter(&EMLXS_PORT_LOCK);
10404 
10405 	/* Remove one pkt from the doneq head and complete it */
10406 	while ((sbp = hba->iodone_list) != NULL) {
10407 		if ((hba->iodone_list = sbp->next) == NULL) {
10408 			hba->iodone_tail = NULL;
10409 			hba->iodone_count = 0;
10410 		} else {
10411 			hba->iodone_count--;
10412 		}
10413 
10414 		mutex_exit(&EMLXS_PORT_LOCK);
10415 
10416 		/* Prepare the pkt for completion */
10417 		mutex_enter(&sbp->mtx);
10418 		sbp->next = NULL;
10419 		sbp->pkt_flags &= ~PACKET_IN_DONEQ;
10420 		mutex_exit(&sbp->mtx);
10421 
10422 		/* Complete the IO now */
10423 		emlxs_iodone(sbp);
10424 
10425 		/* Reacquire lock and check if more work is to be done */
10426 		mutex_enter(&EMLXS_PORT_LOCK);
10427 	}
10428 
10429 	mutex_exit(&EMLXS_PORT_LOCK);
10430 
10431 #ifdef FMA_SUPPORT
10432 	if (hba->flag & FC_DMA_CHECK_ERROR) {
10433 		emlxs_thread_spawn(hba, emlxs_restart_thread,
10434 		    NULL, NULL);
10435 	}
10436 #endif /* FMA_SUPPORT */
10437 
10438 	return;
10439 
10440 } /* End emlxs_iodone_server */
10441 
10442 
10443 static void
emlxs_iodone(emlxs_buf_t * sbp)10444 emlxs_iodone(emlxs_buf_t *sbp)
10445 {
10446 #ifdef FMA_SUPPORT
10447 	emlxs_port_t	*port = sbp->port;
10448 	emlxs_hba_t	*hba = port->hba;
10449 #endif  /* FMA_SUPPORT */
10450 
10451 	fc_packet_t	*pkt;
10452 	CHANNEL		*cp;
10453 
10454 	pkt = PRIV2PKT(sbp);
10455 
10456 	/* Check one more time that the  pkt has not already been returned */
10457 	if (sbp->pkt_flags & PACKET_ULP_OWNED) {
10458 		return;
10459 	}
10460 
10461 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
10462 	emlxs_unswap_pkt(sbp);
10463 #endif	/* EMLXS_MODREV2X */
10464 
10465 	mutex_enter(&sbp->mtx);
10466 	sbp->pkt_flags |= (PACKET_COMPLETED | PACKET_ULP_OWNED);
10467 	mutex_exit(&sbp->mtx);
10468 
10469 	if (pkt->pkt_comp) {
10470 #ifdef FMA_SUPPORT
10471 		emlxs_check_dma(hba, sbp);
10472 #endif  /* FMA_SUPPORT */
10473 
10474 		if (sbp->channel) {
10475 			cp = (CHANNEL *)sbp->channel;
10476 			cp->ulpCmplCmd++;
10477 		}
10478 
10479 		(*pkt->pkt_comp) (pkt);
10480 	}
10481 
10482 	return;
10483 
10484 } /* emlxs_iodone() */
10485 
10486 
10487 
10488 extern fc_unsol_buf_t *
emlxs_ub_find(emlxs_port_t * port,uint32_t token)10489 emlxs_ub_find(emlxs_port_t *port, uint32_t token)
10490 {
10491 	emlxs_unsol_buf_t	*pool;
10492 	fc_unsol_buf_t		*ubp;
10493 	emlxs_ub_priv_t		*ub_priv;
10494 
10495 	/* Check if this is a valid ub token */
10496 	if (token < EMLXS_UB_TOKEN_OFFSET) {
10497 		return (NULL);
10498 	}
10499 
10500 	mutex_enter(&EMLXS_UB_LOCK);
10501 
10502 	pool = port->ub_pool;
10503 	while (pool) {
10504 		/* Find a pool with the proper token range */
10505 		if (token >= pool->pool_first_token &&
10506 		    token <= pool->pool_last_token) {
10507 			ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[(token -
10508 			    pool->pool_first_token)];
10509 			ub_priv = ubp->ub_fca_private;
10510 
10511 			if (ub_priv->token != token) {
10512 				EMLXS_MSGF(EMLXS_CONTEXT,
10513 				    &emlxs_sfs_debug_msg,
10514 				    "ub_find: Invalid token=%x", ubp, token,
10515 				    ub_priv->token);
10516 
10517 				ubp = NULL;
10518 			}
10519 
10520 			else if (!(ub_priv->flags & EMLXS_UB_IN_USE)) {
10521 				EMLXS_MSGF(EMLXS_CONTEXT,
10522 				    &emlxs_sfs_debug_msg,
10523 				    "ub_find: Buffer not in use. buffer=%p "
10524 				    "token=%x", ubp, token);
10525 
10526 				ubp = NULL;
10527 			}
10528 
10529 			mutex_exit(&EMLXS_UB_LOCK);
10530 
10531 			return (ubp);
10532 		}
10533 
10534 		pool = pool->pool_next;
10535 	}
10536 
10537 	mutex_exit(&EMLXS_UB_LOCK);
10538 
10539 	return (NULL);
10540 
10541 } /* emlxs_ub_find() */
10542 
10543 
10544 
10545 extern fc_unsol_buf_t *
emlxs_ub_get(emlxs_port_t * port,uint32_t size,uint32_t type,uint32_t reserve)10546 emlxs_ub_get(emlxs_port_t *port, uint32_t size, uint32_t type,
10547     uint32_t reserve)
10548 {
10549 	emlxs_hba_t		*hba = HBA;
10550 	emlxs_unsol_buf_t	*pool;
10551 	fc_unsol_buf_t		*ubp;
10552 	emlxs_ub_priv_t		*ub_priv;
10553 	uint32_t		i;
10554 	uint32_t		resv_flag;
10555 	uint32_t		pool_free;
10556 	uint32_t		pool_free_resv;
10557 
10558 	mutex_enter(&EMLXS_UB_LOCK);
10559 
10560 	pool = port->ub_pool;
10561 	while (pool) {
10562 		/* Find a pool of the appropriate type and size */
10563 		if ((pool->pool_available == 0) ||
10564 		    (pool->pool_type != type) ||
10565 		    (pool->pool_buf_size < size)) {
10566 			goto next_pool;
10567 		}
10568 
10569 
10570 		/* Adjust free counts based on availablity    */
10571 		/* The free reserve count gets first priority */
10572 		pool_free_resv =
10573 		    min(pool->pool_free_resv, pool->pool_available);
10574 		pool_free =
10575 		    min(pool->pool_free,
10576 		    (pool->pool_available - pool_free_resv));
10577 
10578 		/* Initialize reserve flag */
10579 		resv_flag = reserve;
10580 
10581 		if (resv_flag) {
10582 			if (pool_free_resv == 0) {
10583 				if (pool_free == 0) {
10584 					goto next_pool;
10585 				}
10586 				resv_flag = 0;
10587 			}
10588 		} else if (pool_free == 0) {
10589 			goto next_pool;
10590 		}
10591 
10592 		/* Find next available free buffer in this pool */
10593 		for (i = 0; i < pool->pool_nentries; i++) {
10594 			ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[i];
10595 			ub_priv = ubp->ub_fca_private;
10596 
10597 			if (!ub_priv->available ||
10598 			    ub_priv->flags != EMLXS_UB_FREE) {
10599 				continue;
10600 			}
10601 
10602 			ub_priv->time = hba->timer_tics;
10603 
10604 			/* Timeout in 5 minutes */
10605 			ub_priv->timeout = (5 * 60);
10606 
10607 			ub_priv->flags = EMLXS_UB_IN_USE;
10608 
10609 			/* Alloc the buffer from the pool */
10610 			if (resv_flag) {
10611 				ub_priv->flags |= EMLXS_UB_RESV;
10612 				pool->pool_free_resv--;
10613 			} else {
10614 				pool->pool_free--;
10615 			}
10616 
10617 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
10618 			    "ub_get: ubp=%p token=%x (%d,%d,%d,%d)", ubp,
10619 			    ub_priv->token, pool->pool_nentries,
10620 			    pool->pool_available, pool->pool_free,
10621 			    pool->pool_free_resv);
10622 
10623 			mutex_exit(&EMLXS_UB_LOCK);
10624 
10625 			return (ubp);
10626 		}
10627 next_pool:
10628 
10629 		pool = pool->pool_next;
10630 	}
10631 
10632 	mutex_exit(&EMLXS_UB_LOCK);
10633 
10634 	return (NULL);
10635 
10636 } /* emlxs_ub_get() */
10637 
10638 
10639 
10640 extern void
emlxs_set_pkt_state(emlxs_buf_t * sbp,uint32_t iostat,uint8_t localstat,uint32_t lock)10641 emlxs_set_pkt_state(emlxs_buf_t *sbp, uint32_t iostat, uint8_t localstat,
10642     uint32_t lock)
10643 {
10644 	fc_packet_t		*pkt;
10645 	fcp_rsp_t		*fcp_rsp;
10646 	uint32_t		i;
10647 	emlxs_xlat_err_t	*tptr;
10648 	emlxs_xlat_err_t	*entry;
10649 
10650 
10651 	pkt = PRIV2PKT(sbp);
10652 
10653 	/* Warning: Some FCT sbp's don't have */
10654 	/* fc_packet objects, so just return  */
10655 	if (!pkt) {
10656 		return;
10657 	}
10658 
10659 	if (lock) {
10660 		mutex_enter(&sbp->mtx);
10661 	}
10662 
10663 	if (!(sbp->pkt_flags & PACKET_STATE_VALID)) {
10664 		sbp->pkt_flags |= PACKET_STATE_VALID;
10665 
10666 		/* Perform table lookup */
10667 		entry = NULL;
10668 		if (iostat != IOSTAT_LOCAL_REJECT) {
10669 			tptr = emlxs_iostat_tbl;
10670 			for (i = 0; i < IOSTAT_MAX; i++, tptr++) {
10671 				if (iostat == tptr->emlxs_status) {
10672 					entry = tptr;
10673 					break;
10674 		}
10675 			}
10676 		} else {	/* iostate == IOSTAT_LOCAL_REJECT */
10677 
10678 			tptr = emlxs_ioerr_tbl;
10679 			for (i = 0; i < IOERR_MAX; i++, tptr++) {
10680 				if (localstat == tptr->emlxs_status) {
10681 					entry = tptr;
10682 					break;
10683 		}
10684 			}
10685 		}
10686 
10687 		if (entry) {
10688 			pkt->pkt_state  = entry->pkt_state;
10689 			pkt->pkt_reason = entry->pkt_reason;
10690 			pkt->pkt_expln  = entry->pkt_expln;
10691 			pkt->pkt_action = entry->pkt_action;
10692 		} else {
10693 			/* Set defaults */
10694 			pkt->pkt_state  = FC_PKT_TRAN_ERROR;
10695 			pkt->pkt_reason = FC_REASON_ABORTED;
10696 			pkt->pkt_expln  = FC_EXPLN_NONE;
10697 			pkt->pkt_action = FC_ACTION_RETRYABLE;
10698 		}
10699 
10700 
10701 		/* Set the residual counts and response frame */
10702 		/* Check if response frame was received from the chip */
10703 		/* If so, then the residual counts will already be set */
10704 		if (!(sbp->pkt_flags & (PACKET_FCP_RSP_VALID |
10705 		    PACKET_CT_RSP_VALID | PACKET_ELS_RSP_VALID))) {
10706 			/* We have to create the response frame */
10707 			if (iostat == IOSTAT_SUCCESS) {
10708 				pkt->pkt_resp_resid = 0;
10709 				pkt->pkt_data_resid = 0;
10710 
10711 				if ((pkt->pkt_cmd_fhdr.type ==
10712 				    FC_TYPE_SCSI_FCP) && pkt->pkt_rsplen &&
10713 				    pkt->pkt_resp) {
10714 					fcp_rsp = (fcp_rsp_t *)pkt->pkt_resp;
10715 
10716 					fcp_rsp->fcp_u.fcp_status.
10717 					    rsp_len_set = 1;
10718 					fcp_rsp->fcp_response_len = 8;
10719 				}
10720 			} else {
10721 				/* Otherwise assume no data */
10722 				/* and no response received */
10723 				pkt->pkt_data_resid = pkt->pkt_datalen;
10724 				pkt->pkt_resp_resid = pkt->pkt_rsplen;
10725 			}
10726 		}
10727 	}
10728 
10729 	if (lock) {
10730 		mutex_exit(&sbp->mtx);
10731 	}
10732 
10733 	return;
10734 
10735 } /* emlxs_set_pkt_state() */
10736 
10737 
10738 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
10739 
10740 extern void
emlxs_swap_service_params(SERV_PARM * sp)10741 emlxs_swap_service_params(SERV_PARM *sp)
10742 {
10743 	uint16_t	*p;
10744 	int		size;
10745 	int		i;
10746 
10747 	size = (sizeof (CSP) - 4) / 2;
10748 	p = (uint16_t *)&sp->cmn;
10749 	for (i = 0; i < size; i++) {
10750 		p[i] = LE_SWAP16(p[i]);
10751 	}
10752 	sp->cmn.e_d_tov = LE_SWAP32(sp->cmn.e_d_tov);
10753 
10754 	size = sizeof (CLASS_PARMS) / 2;
10755 	p = (uint16_t *)&sp->cls1;
10756 	for (i = 0; i < size; i++, p++) {
10757 		*p = LE_SWAP16(*p);
10758 	}
10759 
10760 	size = sizeof (CLASS_PARMS) / 2;
10761 	p = (uint16_t *)&sp->cls2;
10762 	for (i = 0; i < size; i++, p++) {
10763 		*p = LE_SWAP16(*p);
10764 	}
10765 
10766 	size = sizeof (CLASS_PARMS) / 2;
10767 	p = (uint16_t *)&sp->cls3;
10768 	for (i = 0; i < size; i++, p++) {
10769 		*p = LE_SWAP16(*p);
10770 	}
10771 
10772 	size = sizeof (CLASS_PARMS) / 2;
10773 	p = (uint16_t *)&sp->cls4;
10774 	for (i = 0; i < size; i++, p++) {
10775 		*p = LE_SWAP16(*p);
10776 	}
10777 
10778 	return;
10779 
10780 } /* emlxs_swap_service_params() */
10781 
10782 extern void
emlxs_unswap_pkt(emlxs_buf_t * sbp)10783 emlxs_unswap_pkt(emlxs_buf_t *sbp)
10784 {
10785 	if (sbp->pkt_flags & PACKET_FCP_SWAPPED) {
10786 		emlxs_swap_fcp_pkt(sbp);
10787 	}
10788 
10789 	else if (sbp->pkt_flags & PACKET_ELS_SWAPPED) {
10790 		emlxs_swap_els_pkt(sbp);
10791 	}
10792 
10793 	else if (sbp->pkt_flags & PACKET_CT_SWAPPED) {
10794 		emlxs_swap_ct_pkt(sbp);
10795 	}
10796 
10797 } /* emlxs_unswap_pkt() */
10798 
10799 
10800 extern void
emlxs_swap_fcp_pkt(emlxs_buf_t * sbp)10801 emlxs_swap_fcp_pkt(emlxs_buf_t *sbp)
10802 {
10803 	fc_packet_t	*pkt;
10804 	FCP_CMND	*cmd;
10805 	fcp_rsp_t	*rsp;
10806 	uint16_t	*lunp;
10807 	uint32_t	i;
10808 
10809 	mutex_enter(&sbp->mtx);
10810 
10811 	if (sbp->pkt_flags & PACKET_ALLOCATED) {
10812 		mutex_exit(&sbp->mtx);
10813 		return;
10814 	}
10815 
10816 	if (sbp->pkt_flags & PACKET_FCP_SWAPPED) {
10817 		sbp->pkt_flags &= ~PACKET_FCP_SWAPPED;
10818 	} else {
10819 		sbp->pkt_flags |= PACKET_FCP_SWAPPED;
10820 	}
10821 
10822 	mutex_exit(&sbp->mtx);
10823 
10824 	pkt = PRIV2PKT(sbp);
10825 
10826 	cmd = (FCP_CMND *)pkt->pkt_cmd;
10827 	rsp = (pkt->pkt_rsplen &&
10828 	    (sbp->pkt_flags & PACKET_FCP_RSP_VALID)) ?
10829 	    (fcp_rsp_t *)pkt->pkt_resp : NULL;
10830 
10831 	/* The size of data buffer needs to be swapped. */
10832 	cmd->fcpDl = LE_SWAP32(cmd->fcpDl);
10833 
10834 	/*
10835 	 * Swap first 2 words of FCP CMND payload.
10836 	 */
10837 	lunp = (uint16_t *)&cmd->fcpLunMsl;
10838 	for (i = 0; i < 4; i++) {
10839 		lunp[i] = LE_SWAP16(lunp[i]);
10840 	}
10841 
10842 	if (rsp) {
10843 		rsp->fcp_resid = LE_SWAP32(rsp->fcp_resid);
10844 		rsp->fcp_sense_len = LE_SWAP32(rsp->fcp_sense_len);
10845 		rsp->fcp_response_len = LE_SWAP32(rsp->fcp_response_len);
10846 	}
10847 
10848 	return;
10849 
10850 } /* emlxs_swap_fcp_pkt() */
10851 
10852 
10853 extern void
emlxs_swap_els_pkt(emlxs_buf_t * sbp)10854 emlxs_swap_els_pkt(emlxs_buf_t *sbp)
10855 {
10856 	fc_packet_t	*pkt;
10857 	uint32_t	*cmd;
10858 	uint32_t	*rsp;
10859 	uint32_t	command;
10860 	uint16_t	*c;
10861 	uint32_t	i;
10862 	uint32_t	swapped;
10863 
10864 	mutex_enter(&sbp->mtx);
10865 
10866 	if (sbp->pkt_flags & PACKET_ALLOCATED) {
10867 		mutex_exit(&sbp->mtx);
10868 		return;
10869 	}
10870 
10871 	if (sbp->pkt_flags & PACKET_ELS_SWAPPED) {
10872 		sbp->pkt_flags &= ~PACKET_ELS_SWAPPED;
10873 		swapped = 1;
10874 	} else {
10875 		sbp->pkt_flags |= PACKET_ELS_SWAPPED;
10876 		swapped = 0;
10877 	}
10878 
10879 	mutex_exit(&sbp->mtx);
10880 
10881 	pkt = PRIV2PKT(sbp);
10882 
10883 	cmd = (uint32_t *)pkt->pkt_cmd;
10884 	rsp = (pkt->pkt_rsplen &&
10885 	    (sbp->pkt_flags & PACKET_ELS_RSP_VALID)) ?
10886 	    (uint32_t *)pkt->pkt_resp : NULL;
10887 
10888 	if (!swapped) {
10889 		cmd[0] = LE_SWAP32(cmd[0]);
10890 		command = cmd[0] & ELS_CMD_MASK;
10891 	} else {
10892 		command = cmd[0] & ELS_CMD_MASK;
10893 		cmd[0] = LE_SWAP32(cmd[0]);
10894 	}
10895 
10896 	if (rsp) {
10897 		rsp[0] = LE_SWAP32(rsp[0]);
10898 	}
10899 
10900 	switch (command) {
10901 	case ELS_CMD_ACC:
10902 		if (sbp->ucmd == ELS_CMD_ADISC) {
10903 			/* Hard address of originator */
10904 			cmd[1] = LE_SWAP32(cmd[1]);
10905 
10906 			/* N_Port ID of originator */
10907 			cmd[6] = LE_SWAP32(cmd[6]);
10908 		}
10909 		break;
10910 
10911 	case ELS_CMD_PLOGI:
10912 	case ELS_CMD_FLOGI:
10913 	case ELS_CMD_FDISC:
10914 		if (rsp) {
10915 			emlxs_swap_service_params((SERV_PARM *) & rsp[1]);
10916 		}
10917 		break;
10918 
10919 	case ELS_CMD_LOGO:
10920 		cmd[1] = LE_SWAP32(cmd[1]);	/* N_Port ID */
10921 		break;
10922 
10923 	case ELS_CMD_RLS:
10924 		cmd[1] = LE_SWAP32(cmd[1]);
10925 
10926 		if (rsp) {
10927 			for (i = 0; i < 6; i++) {
10928 				rsp[1 + i] = LE_SWAP32(rsp[1 + i]);
10929 			}
10930 		}
10931 		break;
10932 
10933 	case ELS_CMD_ADISC:
10934 		cmd[1] = LE_SWAP32(cmd[1]);	/* Hard address of originator */
10935 		cmd[6] = LE_SWAP32(cmd[6]);	/* N_Port ID of originator */
10936 		break;
10937 
10938 	case ELS_CMD_PRLI:
10939 		c = (uint16_t *)&cmd[1];
10940 		c[1] = LE_SWAP16(c[1]);
10941 
10942 		cmd[4] = LE_SWAP32(cmd[4]);
10943 
10944 		if (rsp) {
10945 			rsp[4] = LE_SWAP32(rsp[4]);
10946 		}
10947 		break;
10948 
10949 	case ELS_CMD_SCR:
10950 		cmd[1] = LE_SWAP32(cmd[1]);
10951 		break;
10952 
10953 	case ELS_CMD_LINIT:
10954 		if (rsp) {
10955 			rsp[1] = LE_SWAP32(rsp[1]);
10956 		}
10957 		break;
10958 
10959 	default:
10960 		break;
10961 	}
10962 
10963 	return;
10964 
10965 } /* emlxs_swap_els_pkt() */
10966 
10967 
10968 extern void
emlxs_swap_ct_pkt(emlxs_buf_t * sbp)10969 emlxs_swap_ct_pkt(emlxs_buf_t *sbp)
10970 {
10971 	fc_packet_t	*pkt;
10972 	uint32_t	*cmd;
10973 	uint32_t	*rsp;
10974 	uint32_t	command;
10975 	uint32_t	i;
10976 	uint32_t	swapped;
10977 
10978 	mutex_enter(&sbp->mtx);
10979 
10980 	if (sbp->pkt_flags & PACKET_ALLOCATED) {
10981 		mutex_exit(&sbp->mtx);
10982 		return;
10983 	}
10984 
10985 	if (sbp->pkt_flags & PACKET_CT_SWAPPED) {
10986 		sbp->pkt_flags &= ~PACKET_CT_SWAPPED;
10987 		swapped = 1;
10988 	} else {
10989 		sbp->pkt_flags |= PACKET_CT_SWAPPED;
10990 		swapped = 0;
10991 	}
10992 
10993 	mutex_exit(&sbp->mtx);
10994 
10995 	pkt = PRIV2PKT(sbp);
10996 
10997 	cmd = (uint32_t *)pkt->pkt_cmd;
10998 	rsp = (pkt->pkt_rsplen &&
10999 	    (sbp->pkt_flags & PACKET_CT_RSP_VALID)) ?
11000 	    (uint32_t *)pkt->pkt_resp : NULL;
11001 
11002 	if (!swapped) {
11003 		cmd[0] = 0x01000000;
11004 		command = cmd[2];
11005 	}
11006 
11007 	cmd[0] = LE_SWAP32(cmd[0]);
11008 	cmd[1] = LE_SWAP32(cmd[1]);
11009 	cmd[2] = LE_SWAP32(cmd[2]);
11010 	cmd[3] = LE_SWAP32(cmd[3]);
11011 
11012 	if (swapped) {
11013 		command = cmd[2];
11014 	}
11015 
11016 	switch ((command >> 16)) {
11017 	case SLI_CTNS_GA_NXT:
11018 		cmd[4] = LE_SWAP32(cmd[4]);
11019 		break;
11020 
11021 	case SLI_CTNS_GPN_ID:
11022 	case SLI_CTNS_GNN_ID:
11023 	case SLI_CTNS_RPN_ID:
11024 	case SLI_CTNS_RNN_ID:
11025 	case SLI_CTNS_RSPN_ID:
11026 		cmd[4] = LE_SWAP32(cmd[4]);
11027 		break;
11028 
11029 	case SLI_CTNS_RCS_ID:
11030 	case SLI_CTNS_RPT_ID:
11031 		cmd[4] = LE_SWAP32(cmd[4]);
11032 		cmd[5] = LE_SWAP32(cmd[5]);
11033 		break;
11034 
11035 	case SLI_CTNS_RFT_ID:
11036 		cmd[4] = LE_SWAP32(cmd[4]);
11037 
11038 		/* Swap FC4 types */
11039 		for (i = 0; i < 8; i++) {
11040 			cmd[5 + i] = LE_SWAP32(cmd[5 + i]);
11041 		}
11042 		break;
11043 
11044 	case SLI_CTNS_GFT_ID:
11045 		if (rsp) {
11046 			/* Swap FC4 types */
11047 			for (i = 0; i < 8; i++) {
11048 				rsp[4 + i] = LE_SWAP32(rsp[4 + i]);
11049 			}
11050 		}
11051 		break;
11052 
11053 	case SLI_CTNS_GCS_ID:
11054 	case SLI_CTNS_GSPN_ID:
11055 	case SLI_CTNS_GSNN_NN:
11056 	case SLI_CTNS_GIP_NN:
11057 	case SLI_CTNS_GIPA_NN:
11058 
11059 	case SLI_CTNS_GPT_ID:
11060 	case SLI_CTNS_GID_NN:
11061 	case SLI_CTNS_GNN_IP:
11062 	case SLI_CTNS_GIPA_IP:
11063 	case SLI_CTNS_GID_FT:
11064 	case SLI_CTNS_GID_PT:
11065 	case SLI_CTNS_GID_PN:
11066 	case SLI_CTNS_RIP_NN:
11067 	case SLI_CTNS_RIPA_NN:
11068 	case SLI_CTNS_RSNN_NN:
11069 	case SLI_CTNS_DA_ID:
11070 	case SLI_CT_RESPONSE_FS_RJT:
11071 	case SLI_CT_RESPONSE_FS_ACC:
11072 
11073 	default:
11074 		break;
11075 	}
11076 	return;
11077 
11078 } /* emlxs_swap_ct_pkt() */
11079 
11080 
11081 extern void
emlxs_swap_els_ub(fc_unsol_buf_t * ubp)11082 emlxs_swap_els_ub(fc_unsol_buf_t *ubp)
11083 {
11084 	emlxs_ub_priv_t	*ub_priv;
11085 	fc_rscn_t	*rscn;
11086 	uint32_t	count;
11087 	uint32_t	i;
11088 	uint32_t	*lp;
11089 	la_els_logi_t	*logi;
11090 
11091 	ub_priv = ubp->ub_fca_private;
11092 
11093 	switch (ub_priv->cmd) {
11094 	case ELS_CMD_RSCN:
11095 		rscn = (fc_rscn_t *)ubp->ub_buffer;
11096 
11097 		rscn->rscn_payload_len = LE_SWAP16(rscn->rscn_payload_len);
11098 
11099 		count = ((rscn->rscn_payload_len - 4) / 4);
11100 		lp = (uint32_t *)ubp->ub_buffer + 1;
11101 		for (i = 0; i < count; i++, lp++) {
11102 			*lp = LE_SWAP32(*lp);
11103 		}
11104 
11105 		break;
11106 
11107 	case ELS_CMD_FLOGI:
11108 	case ELS_CMD_PLOGI:
11109 	case ELS_CMD_FDISC:
11110 	case ELS_CMD_PDISC:
11111 		logi = (la_els_logi_t *)ubp->ub_buffer;
11112 		emlxs_swap_service_params(
11113 		    (SERV_PARM *)&logi->common_service);
11114 		break;
11115 
11116 		/* ULP handles this */
11117 	case ELS_CMD_LOGO:
11118 	case ELS_CMD_PRLI:
11119 	case ELS_CMD_PRLO:
11120 	case ELS_CMD_ADISC:
11121 	default:
11122 		break;
11123 	}
11124 
11125 	return;
11126 
11127 } /* emlxs_swap_els_ub() */
11128 
11129 
11130 #endif	/* EMLXS_MODREV2X */
11131 
11132 
11133 extern char *
emlxs_mode_xlate(uint32_t mode)11134 emlxs_mode_xlate(uint32_t mode)
11135 {
11136 	static char	buffer[32];
11137 	uint32_t	i;
11138 	uint32_t	count;
11139 
11140 	count = sizeof (emlxs_mode_table) / sizeof (emlxs_table_t);
11141 	for (i = 0; i < count; i++) {
11142 		if (mode == emlxs_mode_table[i].code) {
11143 			return (emlxs_mode_table[i].string);
11144 		}
11145 	}
11146 
11147 	(void) snprintf(buffer, sizeof (buffer), "Unknown (%x)", mode);
11148 	return (buffer);
11149 
11150 } /* emlxs_mode_xlate() */
11151 
11152 
11153 extern char *
emlxs_elscmd_xlate(uint32_t elscmd)11154 emlxs_elscmd_xlate(uint32_t elscmd)
11155 {
11156 	static char	buffer[32];
11157 	uint32_t	i;
11158 	uint32_t	count;
11159 
11160 	count = sizeof (emlxs_elscmd_table) / sizeof (emlxs_table_t);
11161 	for (i = 0; i < count; i++) {
11162 		if (elscmd == emlxs_elscmd_table[i].code) {
11163 			return (emlxs_elscmd_table[i].string);
11164 		}
11165 	}
11166 
11167 	(void) snprintf(buffer, sizeof (buffer), "ELS=0x%x", elscmd);
11168 	return (buffer);
11169 
11170 } /* emlxs_elscmd_xlate() */
11171 
11172 
11173 extern char *
emlxs_ctcmd_xlate(uint32_t ctcmd)11174 emlxs_ctcmd_xlate(uint32_t ctcmd)
11175 {
11176 	static char	buffer[32];
11177 	uint32_t	i;
11178 	uint32_t	count;
11179 
11180 	count = sizeof (emlxs_ctcmd_table) / sizeof (emlxs_table_t);
11181 	for (i = 0; i < count; i++) {
11182 		if (ctcmd == emlxs_ctcmd_table[i].code) {
11183 			return (emlxs_ctcmd_table[i].string);
11184 		}
11185 	}
11186 
11187 	(void) snprintf(buffer, sizeof (buffer), "cmd=0x%x", ctcmd);
11188 	return (buffer);
11189 
11190 } /* emlxs_ctcmd_xlate() */
11191 
11192 
11193 #ifdef MENLO_SUPPORT
11194 extern char *
emlxs_menlo_cmd_xlate(uint32_t cmd)11195 emlxs_menlo_cmd_xlate(uint32_t cmd)
11196 {
11197 	static char	buffer[32];
11198 	uint32_t	i;
11199 	uint32_t	count;
11200 
11201 	count = sizeof (emlxs_menlo_cmd_table) / sizeof (emlxs_table_t);
11202 	for (i = 0; i < count; i++) {
11203 		if (cmd == emlxs_menlo_cmd_table[i].code) {
11204 			return (emlxs_menlo_cmd_table[i].string);
11205 		}
11206 	}
11207 
11208 	(void) snprintf(buffer, sizeof (buffer), "Cmd=0x%x", cmd);
11209 	return (buffer);
11210 
11211 } /* emlxs_menlo_cmd_xlate() */
11212 
11213 extern char *
emlxs_menlo_rsp_xlate(uint32_t rsp)11214 emlxs_menlo_rsp_xlate(uint32_t rsp)
11215 {
11216 	static char	buffer[32];
11217 	uint32_t	i;
11218 	uint32_t	count;
11219 
11220 	count = sizeof (emlxs_menlo_rsp_table) / sizeof (emlxs_table_t);
11221 	for (i = 0; i < count; i++) {
11222 		if (rsp == emlxs_menlo_rsp_table[i].code) {
11223 			return (emlxs_menlo_rsp_table[i].string);
11224 		}
11225 	}
11226 
11227 	(void) snprintf(buffer, sizeof (buffer), "Rsp=0x%x", rsp);
11228 	return (buffer);
11229 
11230 } /* emlxs_menlo_rsp_xlate() */
11231 
11232 #endif /* MENLO_SUPPORT */
11233 
11234 
11235 extern char *
emlxs_rmcmd_xlate(uint32_t rmcmd)11236 emlxs_rmcmd_xlate(uint32_t rmcmd)
11237 {
11238 	static char	buffer[32];
11239 	uint32_t	i;
11240 	uint32_t	count;
11241 
11242 	count = sizeof (emlxs_rmcmd_table) / sizeof (emlxs_table_t);
11243 	for (i = 0; i < count; i++) {
11244 		if (rmcmd == emlxs_rmcmd_table[i].code) {
11245 			return (emlxs_rmcmd_table[i].string);
11246 		}
11247 	}
11248 
11249 	(void) snprintf(buffer, sizeof (buffer), "RM=0x%x", rmcmd);
11250 	return (buffer);
11251 
11252 } /* emlxs_rmcmd_xlate() */
11253 
11254 
11255 
11256 extern char *
emlxs_mscmd_xlate(uint16_t mscmd)11257 emlxs_mscmd_xlate(uint16_t mscmd)
11258 {
11259 	static char	buffer[32];
11260 	uint32_t	i;
11261 	uint32_t	count;
11262 
11263 	count = sizeof (emlxs_mscmd_table) / sizeof (emlxs_table_t);
11264 	for (i = 0; i < count; i++) {
11265 		if (mscmd == emlxs_mscmd_table[i].code) {
11266 			return (emlxs_mscmd_table[i].string);
11267 		}
11268 	}
11269 
11270 	(void) snprintf(buffer, sizeof (buffer), "Cmd=0x%x", mscmd);
11271 	return (buffer);
11272 
11273 } /* emlxs_mscmd_xlate() */
11274 
11275 
11276 extern char *
emlxs_state_xlate(uint8_t state)11277 emlxs_state_xlate(uint8_t state)
11278 {
11279 	static char	buffer[32];
11280 	uint32_t	i;
11281 	uint32_t	count;
11282 
11283 	count = sizeof (emlxs_state_table) / sizeof (emlxs_table_t);
11284 	for (i = 0; i < count; i++) {
11285 		if (state == emlxs_state_table[i].code) {
11286 			return (emlxs_state_table[i].string);
11287 		}
11288 	}
11289 
11290 	(void) snprintf(buffer, sizeof (buffer), "State=0x%x", state);
11291 	return (buffer);
11292 
11293 } /* emlxs_state_xlate() */
11294 
11295 
11296 extern char *
emlxs_error_xlate(uint8_t errno)11297 emlxs_error_xlate(uint8_t errno)
11298 {
11299 	static char	buffer[32];
11300 	uint32_t	i;
11301 	uint32_t	count;
11302 
11303 	count = sizeof (emlxs_error_table) / sizeof (emlxs_table_t);
11304 	for (i = 0; i < count; i++) {
11305 		if (errno == emlxs_error_table[i].code) {
11306 			return (emlxs_error_table[i].string);
11307 		}
11308 	}
11309 
11310 	(void) snprintf(buffer, sizeof (buffer), "Errno=0x%x", errno);
11311 	return (buffer);
11312 
11313 } /* emlxs_error_xlate() */
11314 
11315 
11316 static int
emlxs_pm_lower_power(dev_info_t * dip)11317 emlxs_pm_lower_power(dev_info_t *dip)
11318 {
11319 	int		ddiinst;
11320 	int		emlxinst;
11321 	emlxs_config_t	*cfg;
11322 	int32_t		rval;
11323 	emlxs_hba_t	*hba;
11324 
11325 	ddiinst = ddi_get_instance(dip);
11326 	emlxinst = emlxs_get_instance(ddiinst);
11327 	hba = emlxs_device.hba[emlxinst];
11328 	cfg = &CFG;
11329 
11330 	rval = DDI_SUCCESS;
11331 
11332 	/* Lower the power level */
11333 	if (cfg[CFG_PM_SUPPORT].current) {
11334 		rval =
11335 		    pm_lower_power(dip, EMLXS_PM_ADAPTER,
11336 		    EMLXS_PM_ADAPTER_DOWN);
11337 	} else {
11338 		/* We do not have kernel support of power management enabled */
11339 		/* therefore, call our power management routine directly */
11340 		rval =
11341 		    emlxs_power(dip, EMLXS_PM_ADAPTER, EMLXS_PM_ADAPTER_DOWN);
11342 	}
11343 
11344 	return (rval);
11345 
11346 } /* emlxs_pm_lower_power() */
11347 
11348 
11349 static int
emlxs_pm_raise_power(dev_info_t * dip)11350 emlxs_pm_raise_power(dev_info_t *dip)
11351 {
11352 	int		ddiinst;
11353 	int		emlxinst;
11354 	emlxs_config_t	*cfg;
11355 	int32_t		rval;
11356 	emlxs_hba_t	*hba;
11357 
11358 	ddiinst = ddi_get_instance(dip);
11359 	emlxinst = emlxs_get_instance(ddiinst);
11360 	hba = emlxs_device.hba[emlxinst];
11361 	cfg = &CFG;
11362 
11363 	/* Raise the power level */
11364 	if (cfg[CFG_PM_SUPPORT].current) {
11365 		rval =
11366 		    pm_raise_power(dip, EMLXS_PM_ADAPTER,
11367 		    EMLXS_PM_ADAPTER_UP);
11368 	} else {
11369 		/* We do not have kernel support of power management enabled */
11370 		/* therefore, call our power management routine directly */
11371 		rval =
11372 		    emlxs_power(dip, EMLXS_PM_ADAPTER, EMLXS_PM_ADAPTER_UP);
11373 	}
11374 
11375 	return (rval);
11376 
11377 } /* emlxs_pm_raise_power() */
11378 
11379 
11380 #ifdef IDLE_TIMER
11381 
11382 extern int
emlxs_pm_busy_component(emlxs_hba_t * hba)11383 emlxs_pm_busy_component(emlxs_hba_t *hba)
11384 {
11385 	emlxs_config_t	*cfg = &CFG;
11386 	int		rval;
11387 
11388 	hba->pm_active = 1;
11389 
11390 	if (hba->pm_busy) {
11391 		return (DDI_SUCCESS);
11392 	}
11393 
11394 	mutex_enter(&EMLXS_PM_LOCK);
11395 
11396 	if (hba->pm_busy) {
11397 		mutex_exit(&EMLXS_PM_LOCK);
11398 		return (DDI_SUCCESS);
11399 	}
11400 	hba->pm_busy = 1;
11401 
11402 	mutex_exit(&EMLXS_PM_LOCK);
11403 
11404 	/* Attempt to notify system that we are busy */
11405 	if (cfg[CFG_PM_SUPPORT].current) {
11406 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
11407 		    "pm_busy_component.");
11408 
11409 		rval = pm_busy_component(dip, EMLXS_PM_ADAPTER);
11410 
11411 		if (rval != DDI_SUCCESS) {
11412 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
11413 			    "pm_busy_component failed. ret=%d", rval);
11414 
11415 			/* If this attempt failed then clear our flags */
11416 			mutex_enter(&EMLXS_PM_LOCK);
11417 			hba->pm_busy = 0;
11418 			mutex_exit(&EMLXS_PM_LOCK);
11419 
11420 			return (rval);
11421 		}
11422 	}
11423 
11424 	return (DDI_SUCCESS);
11425 
11426 } /* emlxs_pm_busy_component() */
11427 
11428 
11429 extern int
emlxs_pm_idle_component(emlxs_hba_t * hba)11430 emlxs_pm_idle_component(emlxs_hba_t *hba)
11431 {
11432 	emlxs_config_t	*cfg = &CFG;
11433 	int		rval;
11434 
11435 	if (!hba->pm_busy) {
11436 		return (DDI_SUCCESS);
11437 	}
11438 
11439 	mutex_enter(&EMLXS_PM_LOCK);
11440 
11441 	if (!hba->pm_busy) {
11442 		mutex_exit(&EMLXS_PM_LOCK);
11443 		return (DDI_SUCCESS);
11444 	}
11445 	hba->pm_busy = 0;
11446 
11447 	mutex_exit(&EMLXS_PM_LOCK);
11448 
11449 	if (cfg[CFG_PM_SUPPORT].current) {
11450 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
11451 		    "pm_idle_component.");
11452 
11453 		rval = pm_idle_component(dip, EMLXS_PM_ADAPTER);
11454 
11455 		if (rval != DDI_SUCCESS) {
11456 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
11457 			    "pm_idle_component failed. ret=%d", rval);
11458 
11459 			/* If this attempt failed then */
11460 			/* reset our flags for another attempt */
11461 			mutex_enter(&EMLXS_PM_LOCK);
11462 			hba->pm_busy = 1;
11463 			mutex_exit(&EMLXS_PM_LOCK);
11464 
11465 			return (rval);
11466 		}
11467 	}
11468 
11469 	return (DDI_SUCCESS);
11470 
11471 } /* emlxs_pm_idle_component() */
11472 
11473 
11474 extern void
emlxs_pm_idle_timer(emlxs_hba_t * hba)11475 emlxs_pm_idle_timer(emlxs_hba_t *hba)
11476 {
11477 	emlxs_config_t *cfg = &CFG;
11478 
11479 	if (hba->pm_active) {
11480 		/* Clear active flag and reset idle timer */
11481 		mutex_enter(&EMLXS_PM_LOCK);
11482 		hba->pm_active = 0;
11483 		hba->pm_idle_timer =
11484 		    hba->timer_tics + cfg[CFG_PM_IDLE].current;
11485 		mutex_exit(&EMLXS_PM_LOCK);
11486 	}
11487 
11488 	/* Check for idle timeout */
11489 	else if (hba->timer_tics >= hba->pm_idle_timer) {
11490 		if (emlxs_pm_idle_component(hba) == DDI_SUCCESS) {
11491 			mutex_enter(&EMLXS_PM_LOCK);
11492 			hba->pm_idle_timer =
11493 			    hba->timer_tics + cfg[CFG_PM_IDLE].current;
11494 			mutex_exit(&EMLXS_PM_LOCK);
11495 		}
11496 	}
11497 
11498 	return;
11499 
11500 } /* emlxs_pm_idle_timer() */
11501 
11502 #endif	/* IDLE_TIMER */
11503 
11504 
11505 #if (EMLXS_MODREV >= EMLXS_MODREV3) && (EMLXS_MODREV <= EMLXS_MODREV4)
11506 static void
emlxs_read_vport_prop(emlxs_hba_t * hba)11507 emlxs_read_vport_prop(emlxs_hba_t *hba)
11508 {
11509 	emlxs_port_t	*port = &PPORT;
11510 	emlxs_config_t	*cfg = &CFG;
11511 	char		**arrayp;
11512 	uint8_t		*s;
11513 	uint8_t		*np;
11514 	NAME_TYPE	pwwpn;
11515 	NAME_TYPE	wwnn;
11516 	NAME_TYPE	wwpn;
11517 	uint32_t	vpi;
11518 	uint32_t	cnt;
11519 	uint32_t	rval;
11520 	uint32_t	i;
11521 	uint32_t	j;
11522 	uint32_t	c1;
11523 	uint32_t	sum;
11524 	uint32_t	errors;
11525 	char		buffer[64];
11526 
11527 	/* Check for the per adapter vport setting */
11528 	(void) snprintf(buffer, sizeof (buffer), "%s%d-vport", DRIVER_NAME,
11529 	    hba->ddiinst);
11530 	cnt = 0;
11531 	arrayp = NULL;
11532 	rval =
11533 	    ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba->dip,
11534 	    (DDI_PROP_DONTPASS), buffer, &arrayp, &cnt);
11535 
11536 	if ((rval != DDI_PROP_SUCCESS) || !cnt || !arrayp) {
11537 		/* Check for the global vport setting */
11538 		cnt = 0;
11539 		arrayp = NULL;
11540 		rval =
11541 		    ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba->dip,
11542 		    (DDI_PROP_DONTPASS), "vport", &arrayp, &cnt);
11543 	}
11544 
11545 	if ((rval != DDI_PROP_SUCCESS) || !cnt || !arrayp) {
11546 		return;
11547 	}
11548 
11549 	for (i = 0; i < cnt; i++) {
11550 		errors = 0;
11551 		s = (uint8_t *)arrayp[i];
11552 
11553 		if (!s) {
11554 			break;
11555 		}
11556 
11557 		np = (uint8_t *)&pwwpn;
11558 		for (j = 0; j < sizeof (NAME_TYPE); j++) {
11559 			c1 = *s++;
11560 			if ((c1 >= '0') && (c1 <= '9')) {
11561 				sum = ((c1 - '0') << 4);
11562 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
11563 				sum = ((c1 - 'a' + 10) << 4);
11564 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
11565 				sum = ((c1 - 'A' + 10) << 4);
11566 			} else {
11567 				EMLXS_MSGF(EMLXS_CONTEXT,
11568 				    &emlxs_attach_debug_msg,
11569 				    "Config error: Invalid PWWPN found. "
11570 				    "entry=%d byte=%d hi_nibble=%c",
11571 				    i, j, c1);
11572 				errors++;
11573 			}
11574 
11575 			c1 = *s++;
11576 			if ((c1 >= '0') && (c1 <= '9')) {
11577 				sum |= (c1 - '0');
11578 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
11579 				sum |= (c1 - 'a' + 10);
11580 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
11581 				sum |= (c1 - 'A' + 10);
11582 			} else {
11583 				EMLXS_MSGF(EMLXS_CONTEXT,
11584 				    &emlxs_attach_debug_msg,
11585 				    "Config error: Invalid PWWPN found. "
11586 				    "entry=%d byte=%d lo_nibble=%c",
11587 				    i, j, c1);
11588 				errors++;
11589 			}
11590 
11591 			*np++ = (uint8_t)sum;
11592 		}
11593 
11594 		if (*s++ != ':') {
11595 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
11596 			    "Config error: Invalid delimiter after PWWPN. "
11597 			    "entry=%d", i);
11598 			goto out;
11599 		}
11600 
11601 		np = (uint8_t *)&wwnn;
11602 		for (j = 0; j < sizeof (NAME_TYPE); j++) {
11603 			c1 = *s++;
11604 			if ((c1 >= '0') && (c1 <= '9')) {
11605 				sum = ((c1 - '0') << 4);
11606 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
11607 				sum = ((c1 - 'a' + 10) << 4);
11608 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
11609 				sum = ((c1 - 'A' + 10) << 4);
11610 			} else {
11611 				EMLXS_MSGF(EMLXS_CONTEXT,
11612 				    &emlxs_attach_debug_msg,
11613 				    "Config error: Invalid WWNN found. "
11614 				    "entry=%d byte=%d hi_nibble=%c",
11615 				    i, j, c1);
11616 				errors++;
11617 			}
11618 
11619 			c1 = *s++;
11620 			if ((c1 >= '0') && (c1 <= '9')) {
11621 				sum |= (c1 - '0');
11622 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
11623 				sum |= (c1 - 'a' + 10);
11624 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
11625 				sum |= (c1 - 'A' + 10);
11626 			} else {
11627 				EMLXS_MSGF(EMLXS_CONTEXT,
11628 				    &emlxs_attach_debug_msg,
11629 				    "Config error: Invalid WWNN found. "
11630 				    "entry=%d byte=%d lo_nibble=%c",
11631 				    i, j, c1);
11632 				errors++;
11633 			}
11634 
11635 			*np++ = (uint8_t)sum;
11636 		}
11637 
11638 		if (*s++ != ':') {
11639 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
11640 			    "Config error: Invalid delimiter after WWNN. "
11641 			    "entry=%d", i);
11642 			goto out;
11643 		}
11644 
11645 		np = (uint8_t *)&wwpn;
11646 		for (j = 0; j < sizeof (NAME_TYPE); j++) {
11647 			c1 = *s++;
11648 			if ((c1 >= '0') && (c1 <= '9')) {
11649 				sum = ((c1 - '0') << 4);
11650 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
11651 				sum = ((c1 - 'a' + 10) << 4);
11652 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
11653 				sum = ((c1 - 'A' + 10) << 4);
11654 			} else {
11655 				EMLXS_MSGF(EMLXS_CONTEXT,
11656 				    &emlxs_attach_debug_msg,
11657 				    "Config error: Invalid WWPN found. "
11658 				    "entry=%d byte=%d hi_nibble=%c",
11659 				    i, j, c1);
11660 
11661 				errors++;
11662 			}
11663 
11664 			c1 = *s++;
11665 			if ((c1 >= '0') && (c1 <= '9')) {
11666 				sum |= (c1 - '0');
11667 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
11668 				sum |= (c1 - 'a' + 10);
11669 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
11670 				sum |= (c1 - 'A' + 10);
11671 			} else {
11672 				EMLXS_MSGF(EMLXS_CONTEXT,
11673 				    &emlxs_attach_debug_msg,
11674 				    "Config error: Invalid WWPN found. "
11675 				    "entry=%d byte=%d lo_nibble=%c",
11676 				    i, j, c1);
11677 
11678 				errors++;
11679 			}
11680 
11681 			*np++ = (uint8_t)sum;
11682 		}
11683 
11684 		if (*s++ != ':') {
11685 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
11686 			    "Config error: Invalid delimiter after WWPN. "
11687 			    "entry=%d", i);
11688 
11689 			goto out;
11690 		}
11691 
11692 		sum = 0;
11693 		do {
11694 			c1 = *s++;
11695 			if ((c1 < '0') || (c1 > '9')) {
11696 				EMLXS_MSGF(EMLXS_CONTEXT,
11697 				    &emlxs_attach_debug_msg,
11698 				    "Config error: Invalid VPI found. "
11699 				    "entry=%d c=%c vpi=%d", i, c1, sum);
11700 
11701 				goto out;
11702 			}
11703 
11704 			sum = (sum * 10) + (c1 - '0');
11705 
11706 		} while (*s != 0);
11707 
11708 		vpi = sum;
11709 
11710 		if (errors) {
11711 			continue;
11712 		}
11713 
11714 		/* Entry has been read */
11715 
11716 		/* Check if the physical port wwpn */
11717 		/* matches our physical port wwpn */
11718 		if (bcmp((caddr_t)&hba->wwpn, (caddr_t)&pwwpn, 8)) {
11719 			continue;
11720 		}
11721 
11722 		/* Check vpi range */
11723 		if ((vpi == 0) || (vpi >= MAX_VPORTS)) {
11724 			continue;
11725 		}
11726 
11727 		/* Check if port has already been configured */
11728 		if (hba->port[vpi].flag & EMLXS_PORT_CONFIG) {
11729 			continue;
11730 		}
11731 
11732 		/* Set the highest configured vpi */
11733 		if (vpi > hba->vpi_high) {
11734 			hba->vpi_high = vpi;
11735 		}
11736 
11737 		bcopy((caddr_t)&wwnn, (caddr_t)&hba->port[vpi].wwnn,
11738 		    sizeof (NAME_TYPE));
11739 		bcopy((caddr_t)&wwpn, (caddr_t)&hba->port[vpi].wwpn,
11740 		    sizeof (NAME_TYPE));
11741 
11742 		if (hba->port[vpi].snn[0] == 0) {
11743 			(void) strncpy((caddr_t)hba->port[vpi].snn,
11744 			    (caddr_t)hba->snn,
11745 			    (sizeof (hba->port[vpi].snn)-1));
11746 		}
11747 
11748 		if (hba->port[vpi].spn[0] == 0) {
11749 			(void) snprintf((caddr_t)hba->port[vpi].spn,
11750 			    sizeof (hba->port[vpi].spn),
11751 			    "%s VPort-%d",
11752 			    (caddr_t)hba->spn, vpi);
11753 		}
11754 
11755 		hba->port[vpi].flag |=
11756 		    (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLED);
11757 
11758 		if (cfg[CFG_VPORT_RESTRICTED].current) {
11759 			hba->port[vpi].flag |= EMLXS_PORT_RESTRICTED;
11760 		}
11761 	}
11762 
11763 out:
11764 
11765 	(void) ddi_prop_free((void *) arrayp);
11766 	return;
11767 
11768 } /* emlxs_read_vport_prop() */
11769 #endif	/* EMLXS_MODREV3 || EMLXS_MODREV4 */
11770 
11771 
11772 extern char *
emlxs_wwn_xlate(char * buffer,size_t len,uint8_t * wwn)11773 emlxs_wwn_xlate(char *buffer, size_t len, uint8_t *wwn)
11774 {
11775 	(void) snprintf(buffer, len, "%02x%02x%02x%02x%02x%02x%02x%02x",
11776 	    wwn[0] & 0xff, wwn[1] & 0xff, wwn[2] & 0xff, wwn[3] & 0xff,
11777 	    wwn[4] & 0xff, wwn[5] & 0xff, wwn[6] & 0xff, wwn[7] & 0xff);
11778 
11779 	return (buffer);
11780 
11781 } /* emlxs_wwn_xlate() */
11782 
11783 
11784 extern int32_t
emlxs_wwn_cmp(uint8_t * wwn1,uint8_t * wwn2)11785 emlxs_wwn_cmp(uint8_t *wwn1, uint8_t *wwn2)
11786 {
11787 	uint32_t i;
11788 
11789 	for (i = 0; i < 8; i ++, wwn1 ++, wwn2 ++) {
11790 		if (*wwn1 > *wwn2) {
11791 			return (1);
11792 		}
11793 		if (*wwn1 < *wwn2) {
11794 			return (-1);
11795 		}
11796 	}
11797 
11798 	return (0);
11799 
11800 } /* emlxs_wwn_cmp() */
11801 
11802 
11803 /* This is called at port online and offline */
11804 extern void
emlxs_ub_flush(emlxs_port_t * port)11805 emlxs_ub_flush(emlxs_port_t *port)
11806 {
11807 	emlxs_hba_t	*hba = HBA;
11808 	fc_unsol_buf_t	*ubp;
11809 	emlxs_ub_priv_t	*ub_priv;
11810 	emlxs_ub_priv_t	*next;
11811 
11812 	/* Return if nothing to do */
11813 	if (!port->ub_wait_head) {
11814 		return;
11815 	}
11816 
11817 	mutex_enter(&EMLXS_PORT_LOCK);
11818 	ub_priv = port->ub_wait_head;
11819 	port->ub_wait_head = NULL;
11820 	port->ub_wait_tail = NULL;
11821 	mutex_exit(&EMLXS_PORT_LOCK);
11822 
11823 	while (ub_priv) {
11824 		next = ub_priv->next;
11825 		ubp = ub_priv->ubp;
11826 
11827 		/* Check if ULP is online and we have a callback function */
11828 		if (port->ulp_statec != FC_STATE_OFFLINE) {
11829 			/* Send ULP the ub buffer */
11830 			emlxs_ulp_unsol_cb(port, ubp);
11831 		} else {	/* Drop the buffer */
11832 			(void) emlxs_fca_ub_release(port, 1, &ubp->ub_token);
11833 		}
11834 
11835 		ub_priv = next;
11836 
11837 	}	/* while () */
11838 
11839 	return;
11840 
11841 } /* emlxs_ub_flush() */
11842 
11843 
11844 extern void
emlxs_ub_callback(emlxs_port_t * port,fc_unsol_buf_t * ubp)11845 emlxs_ub_callback(emlxs_port_t *port, fc_unsol_buf_t *ubp)
11846 {
11847 	emlxs_hba_t	*hba = HBA;
11848 	emlxs_ub_priv_t	*ub_priv;
11849 
11850 	ub_priv = ubp->ub_fca_private;
11851 
11852 	/* Check if ULP is online */
11853 	if (port->ulp_statec != FC_STATE_OFFLINE) {
11854 		emlxs_ulp_unsol_cb(port, ubp);
11855 
11856 	} else {	/* ULP offline */
11857 
11858 		if (hba->state >= FC_LINK_UP) {
11859 			/* Add buffer to queue tail */
11860 			mutex_enter(&EMLXS_PORT_LOCK);
11861 
11862 			if (port->ub_wait_tail) {
11863 				port->ub_wait_tail->next = ub_priv;
11864 			}
11865 			port->ub_wait_tail = ub_priv;
11866 
11867 			if (!port->ub_wait_head) {
11868 				port->ub_wait_head = ub_priv;
11869 			}
11870 
11871 			mutex_exit(&EMLXS_PORT_LOCK);
11872 		} else {
11873 			(void) emlxs_fca_ub_release(port, 1, &ubp->ub_token);
11874 		}
11875 	}
11876 
11877 	return;
11878 
11879 } /* emlxs_ub_callback() */
11880 
11881 
11882 extern void
emlxs_fca_link_up(emlxs_port_t * port)11883 emlxs_fca_link_up(emlxs_port_t *port)
11884 {
11885 	emlxs_ulp_statec_cb(port, port->ulp_statec);
11886 	return;
11887 
11888 } /* emlxs_fca_link_up() */
11889 
11890 
11891 extern void
emlxs_fca_link_down(emlxs_port_t * port)11892 emlxs_fca_link_down(emlxs_port_t *port)
11893 {
11894 	emlxs_ulp_statec_cb(port, FC_STATE_OFFLINE);
11895 	return;
11896 
11897 } /* emlxs_fca_link_down() */
11898 
11899 
11900 static uint32_t
emlxs_integrity_check(emlxs_hba_t * hba)11901 emlxs_integrity_check(emlxs_hba_t *hba)
11902 {
11903 	uint32_t size;
11904 	uint32_t errors = 0;
11905 	int ddiinst = hba->ddiinst;
11906 
11907 	size = 16;
11908 	if (sizeof (ULP_BDL) != size) {
11909 		cmn_err(CE_WARN, "?%s%d: ULP_BDL size incorrect.  %d != 16",
11910 		    DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDL));
11911 
11912 		errors++;
11913 	}
11914 	size = 8;
11915 	if (sizeof (ULP_BDE) != size) {
11916 		cmn_err(CE_WARN, "?%s%d: ULP_BDE size incorrect.  %d != 8",
11917 		    DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDE));
11918 
11919 		errors++;
11920 	}
11921 	size = 12;
11922 	if (sizeof (ULP_BDE64) != size) {
11923 		cmn_err(CE_WARN, "?%s%d: ULP_BDE64 size incorrect.  %d != 12",
11924 		    DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDE64));
11925 
11926 		errors++;
11927 	}
11928 	size = 16;
11929 	if (sizeof (HBQE_t) != size) {
11930 		cmn_err(CE_WARN, "?%s%d: HBQE size incorrect.  %d != 16",
11931 		    DRIVER_NAME, ddiinst, (int)sizeof (HBQE_t));
11932 
11933 		errors++;
11934 	}
11935 	size = 8;
11936 	if (sizeof (HGP) != size) {
11937 		cmn_err(CE_WARN, "?%s%d: HGP size incorrect.  %d != 8",
11938 		    DRIVER_NAME, ddiinst, (int)sizeof (HGP));
11939 
11940 		errors++;
11941 	}
11942 	if (sizeof (PGP) != size) {
11943 		cmn_err(CE_WARN, "?%s%d: PGP size incorrect.  %d != 8",
11944 		    DRIVER_NAME, ddiinst, (int)sizeof (PGP));
11945 
11946 		errors++;
11947 	}
11948 	size = 4;
11949 	if (sizeof (WORD5) != size) {
11950 		cmn_err(CE_WARN, "?%s%d: WORD5 size incorrect.  %d != 4",
11951 		    DRIVER_NAME, ddiinst, (int)sizeof (WORD5));
11952 
11953 		errors++;
11954 	}
11955 	size = 124;
11956 	if (sizeof (MAILVARIANTS) != size) {
11957 		cmn_err(CE_WARN, "?%s%d: MAILVARIANTS size incorrect.  "
11958 		    "%d != 124", DRIVER_NAME, ddiinst,
11959 		    (int)sizeof (MAILVARIANTS));
11960 
11961 		errors++;
11962 	}
11963 	size = 128;
11964 	if (sizeof (SLI1_DESC) != size) {
11965 		cmn_err(CE_WARN, "?%s%d: SLI1_DESC size incorrect.  %d != 128",
11966 		    DRIVER_NAME, ddiinst, (int)sizeof (SLI1_DESC));
11967 
11968 		errors++;
11969 	}
11970 	if (sizeof (SLI2_DESC) != size) {
11971 		cmn_err(CE_WARN, "?%s%d: SLI2_DESC size incorrect.  %d != 128",
11972 		    DRIVER_NAME, ddiinst, (int)sizeof (SLI2_DESC));
11973 
11974 		errors++;
11975 	}
11976 	size = MBOX_SIZE;
11977 	if (sizeof (MAILBOX) != size) {
11978 		cmn_err(CE_WARN, "?%s%d: MAILBOX size incorrect.  %d != %d",
11979 		    DRIVER_NAME, ddiinst, (int)sizeof (MAILBOX), MBOX_SIZE);
11980 
11981 		errors++;
11982 	}
11983 	size = PCB_SIZE;
11984 	if (sizeof (PCB) != size) {
11985 		cmn_err(CE_WARN, "?%s%d: PCB size incorrect.  %d != %d",
11986 		    DRIVER_NAME, ddiinst, (int)sizeof (PCB), PCB_SIZE);
11987 
11988 		errors++;
11989 	}
11990 	size = 260;
11991 	if (sizeof (ATTRIBUTE_ENTRY) != size) {
11992 		cmn_err(CE_WARN, "?%s%d: ATTRIBUTE_ENTRY size incorrect.  "
11993 		    "%d != 260", DRIVER_NAME, ddiinst,
11994 		    (int)sizeof (ATTRIBUTE_ENTRY));
11995 
11996 		errors++;
11997 	}
11998 	size = SLI_SLIM1_SIZE;
11999 	if (sizeof (SLIM1) != size) {
12000 		cmn_err(CE_WARN, "?%s%d: SLIM1 size incorrect.  %d != %d",
12001 		    DRIVER_NAME, ddiinst, (int)sizeof (SLIM1), SLI_SLIM1_SIZE);
12002 
12003 		errors++;
12004 	}
12005 	size = SLI3_IOCB_CMD_SIZE;
12006 	if (sizeof (IOCB) != size) {
12007 		cmn_err(CE_WARN, "?%s%d: IOCB size incorrect.  %d != %d",
12008 		    DRIVER_NAME, ddiinst, (int)sizeof (IOCB),
12009 		    SLI3_IOCB_CMD_SIZE);
12010 
12011 		errors++;
12012 	}
12013 
12014 	size = SLI_SLIM2_SIZE;
12015 	if (sizeof (SLIM2) != size) {
12016 		cmn_err(CE_WARN, "?%s%d: SLIM2 size incorrect.  %d != %d",
12017 		    DRIVER_NAME, ddiinst, (int)sizeof (SLIM2),
12018 		    SLI_SLIM2_SIZE);
12019 
12020 		errors++;
12021 	}
12022 	return (errors);
12023 
12024 } /* emlxs_integrity_check() */
12025 
12026 
12027 #ifdef FMA_SUPPORT
12028 /*
12029  * FMA support
12030  */
12031 
12032 extern void
emlxs_fm_init(emlxs_hba_t * hba)12033 emlxs_fm_init(emlxs_hba_t *hba)
12034 {
12035 	ddi_iblock_cookie_t iblk;
12036 
12037 	if (hba->fm_caps == DDI_FM_NOT_CAPABLE) {
12038 		return;
12039 	}
12040 
12041 	if (DDI_FM_ACC_ERR_CAP(hba->fm_caps)) {
12042 		emlxs_dev_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
12043 		emlxs_data_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
12044 	}
12045 
12046 	if (DDI_FM_DMA_ERR_CAP(hba->fm_caps)) {
12047 		hba->dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
12048 		hba->dma_attr_ro.dma_attr_flags |= DDI_DMA_FLAGERR;
12049 		hba->dma_attr_1sg.dma_attr_flags |= DDI_DMA_FLAGERR;
12050 		hba->dma_attr_fcip_rsp.dma_attr_flags |= DDI_DMA_FLAGERR;
12051 	} else {
12052 		hba->dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12053 		hba->dma_attr_ro.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12054 		hba->dma_attr_1sg.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12055 		hba->dma_attr_fcip_rsp.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12056 	}
12057 
12058 	ddi_fm_init(hba->dip, &hba->fm_caps, &iblk);
12059 
12060 	if (DDI_FM_EREPORT_CAP(hba->fm_caps) ||
12061 	    DDI_FM_ERRCB_CAP(hba->fm_caps)) {
12062 		pci_ereport_setup(hba->dip);
12063 	}
12064 
12065 	if (DDI_FM_ERRCB_CAP(hba->fm_caps)) {
12066 		ddi_fm_handler_register(hba->dip, emlxs_fm_error_cb,
12067 		    (void *)hba);
12068 	}
12069 
12070 } /* emlxs_fm_init() */
12071 
12072 
12073 extern void
emlxs_fm_fini(emlxs_hba_t * hba)12074 emlxs_fm_fini(emlxs_hba_t *hba)
12075 {
12076 	if (hba->fm_caps == DDI_FM_NOT_CAPABLE) {
12077 		return;
12078 	}
12079 
12080 	if (DDI_FM_EREPORT_CAP(hba->fm_caps) ||
12081 	    DDI_FM_ERRCB_CAP(hba->fm_caps)) {
12082 		pci_ereport_teardown(hba->dip);
12083 	}
12084 
12085 	if (DDI_FM_ERRCB_CAP(hba->fm_caps)) {
12086 		ddi_fm_handler_unregister(hba->dip);
12087 	}
12088 
12089 	(void) ddi_fm_fini(hba->dip);
12090 
12091 } /* emlxs_fm_fini() */
12092 
12093 
12094 extern int
emlxs_fm_check_acc_handle(emlxs_hba_t * hba,ddi_acc_handle_t handle)12095 emlxs_fm_check_acc_handle(emlxs_hba_t *hba, ddi_acc_handle_t handle)
12096 {
12097 	ddi_fm_error_t err;
12098 
12099 	if (!DDI_FM_ACC_ERR_CAP(hba->fm_caps)) {
12100 		return (DDI_FM_OK);
12101 	}
12102 
12103 	/* Some S10 versions do not define the ahi_err structure */
12104 	if (((ddi_acc_impl_t *)handle)->ahi_err == NULL) {
12105 		return (DDI_FM_OK);
12106 	}
12107 
12108 	err.fme_status = DDI_FM_OK;
12109 	(void) ddi_fm_acc_err_get(handle, &err, DDI_FME_VERSION);
12110 
12111 	/* Some S10 versions do not define the ddi_fm_acc_err_clear function */
12112 	if ((void *)&ddi_fm_acc_err_clear != NULL) {
12113 		(void) ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
12114 	}
12115 
12116 	return (err.fme_status);
12117 
12118 } /* emlxs_fm_check_acc_handle() */
12119 
12120 
12121 extern int
emlxs_fm_check_dma_handle(emlxs_hba_t * hba,ddi_dma_handle_t handle)12122 emlxs_fm_check_dma_handle(emlxs_hba_t *hba, ddi_dma_handle_t handle)
12123 {
12124 	ddi_fm_error_t err;
12125 
12126 	if (!DDI_FM_ACC_ERR_CAP(hba->fm_caps)) {
12127 		return (DDI_FM_OK);
12128 	}
12129 
12130 	err.fme_status = DDI_FM_OK;
12131 	(void) ddi_fm_dma_err_get(handle, &err, DDI_FME_VERSION);
12132 
12133 	return (err.fme_status);
12134 
12135 } /* emlxs_fm_check_dma_handle() */
12136 
12137 
12138 extern void
emlxs_fm_ereport(emlxs_hba_t * hba,char * detail)12139 emlxs_fm_ereport(emlxs_hba_t *hba, char *detail)
12140 {
12141 	uint64_t ena;
12142 	char buf[FM_MAX_CLASS];
12143 
12144 	if (!DDI_FM_EREPORT_CAP(hba->fm_caps)) {
12145 		return;
12146 	}
12147 
12148 	if (detail == NULL) {
12149 		return;
12150 	}
12151 
12152 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
12153 	ena = fm_ena_generate(0, FM_ENA_FMT1);
12154 
12155 	ddi_fm_ereport_post(hba->dip, buf, ena, DDI_NOSLEEP,
12156 	    FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
12157 
12158 } /* emlxs_fm_ereport() */
12159 
12160 
12161 extern void
emlxs_fm_service_impact(emlxs_hba_t * hba,int impact)12162 emlxs_fm_service_impact(emlxs_hba_t *hba, int impact)
12163 {
12164 	if (!DDI_FM_EREPORT_CAP(hba->fm_caps)) {
12165 		return;
12166 	}
12167 
12168 	if (impact == NULL) {
12169 		return;
12170 	}
12171 
12172 	if ((hba->pm_state & EMLXS_PM_IN_DETACH) &&
12173 	    (impact == DDI_SERVICE_DEGRADED)) {
12174 		impact = DDI_SERVICE_UNAFFECTED;
12175 	}
12176 
12177 	ddi_fm_service_impact(hba->dip, impact);
12178 
12179 	return;
12180 
12181 } /* emlxs_fm_service_impact() */
12182 
12183 
12184 /*
12185  * The I/O fault service error handling callback function
12186  */
12187 /*ARGSUSED*/
12188 extern int
emlxs_fm_error_cb(dev_info_t * dip,ddi_fm_error_t * err,const void * impl_data)12189 emlxs_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
12190     const void *impl_data)
12191 {
12192 	/*
12193 	 * as the driver can always deal with an error
12194 	 * in any dma or access handle, we can just return
12195 	 * the fme_status value.
12196 	 */
12197 	pci_ereport_post(dip, err, NULL);
12198 	return (err->fme_status);
12199 
12200 } /* emlxs_fm_error_cb() */
12201 
12202 extern void
emlxs_check_dma(emlxs_hba_t * hba,emlxs_buf_t * sbp)12203 emlxs_check_dma(emlxs_hba_t *hba, emlxs_buf_t *sbp)
12204 {
12205 	emlxs_port_t	*port = sbp->port;
12206 	fc_packet_t	*pkt = PRIV2PKT(sbp);
12207 
12208 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
12209 		if (emlxs_fm_check_dma_handle(hba,
12210 		    hba->sli.sli4.slim2.dma_handle)
12211 		    != DDI_FM_OK) {
12212 			EMLXS_MSGF(EMLXS_CONTEXT,
12213 			    &emlxs_invalid_dma_handle_msg,
12214 			    "slim2: hdl=%p",
12215 			    hba->sli.sli4.slim2.dma_handle);
12216 
12217 			mutex_enter(&EMLXS_PORT_LOCK);
12218 			hba->flag |= FC_DMA_CHECK_ERROR;
12219 			mutex_exit(&EMLXS_PORT_LOCK);
12220 		}
12221 	} else {
12222 		if (emlxs_fm_check_dma_handle(hba,
12223 		    hba->sli.sli3.slim2.dma_handle)
12224 		    != DDI_FM_OK) {
12225 			EMLXS_MSGF(EMLXS_CONTEXT,
12226 			    &emlxs_invalid_dma_handle_msg,
12227 			    "slim2: hdl=%p",
12228 			    hba->sli.sli3.slim2.dma_handle);
12229 
12230 			mutex_enter(&EMLXS_PORT_LOCK);
12231 			hba->flag |= FC_DMA_CHECK_ERROR;
12232 			mutex_exit(&EMLXS_PORT_LOCK);
12233 		}
12234 	}
12235 
12236 	if (hba->flag & FC_DMA_CHECK_ERROR) {
12237 		pkt->pkt_state  = FC_PKT_TRAN_ERROR;
12238 		pkt->pkt_reason = FC_REASON_DMA_ERROR;
12239 		pkt->pkt_expln  = FC_EXPLN_NONE;
12240 		pkt->pkt_action = FC_ACTION_RETRYABLE;
12241 		return;
12242 	}
12243 
12244 	if (pkt->pkt_cmdlen) {
12245 		if (emlxs_fm_check_dma_handle(hba, pkt->pkt_cmd_dma)
12246 		    != DDI_FM_OK) {
12247 			EMLXS_MSGF(EMLXS_CONTEXT,
12248 			    &emlxs_invalid_dma_handle_msg,
12249 			    "pkt_cmd_dma: hdl=%p",
12250 			    pkt->pkt_cmd_dma);
12251 
12252 			pkt->pkt_state  = FC_PKT_TRAN_ERROR;
12253 			pkt->pkt_reason = FC_REASON_DMA_ERROR;
12254 			pkt->pkt_expln  = FC_EXPLN_NONE;
12255 			pkt->pkt_action = FC_ACTION_RETRYABLE;
12256 
12257 			return;
12258 		}
12259 	}
12260 
12261 	if (pkt->pkt_rsplen) {
12262 		if (emlxs_fm_check_dma_handle(hba, pkt->pkt_resp_dma)
12263 		    != DDI_FM_OK) {
12264 			EMLXS_MSGF(EMLXS_CONTEXT,
12265 			    &emlxs_invalid_dma_handle_msg,
12266 			    "pkt_resp_dma: hdl=%p",
12267 			    pkt->pkt_resp_dma);
12268 
12269 			pkt->pkt_state  = FC_PKT_TRAN_ERROR;
12270 			pkt->pkt_reason = FC_REASON_DMA_ERROR;
12271 			pkt->pkt_expln  = FC_EXPLN_NONE;
12272 			pkt->pkt_action = FC_ACTION_RETRYABLE;
12273 
12274 			return;
12275 		}
12276 	}
12277 
12278 	if (pkt->pkt_datalen) {
12279 		if (emlxs_fm_check_dma_handle(hba, pkt->pkt_data_dma)
12280 		    != DDI_FM_OK) {
12281 			EMLXS_MSGF(EMLXS_CONTEXT,
12282 			    &emlxs_invalid_dma_handle_msg,
12283 			    "pkt_data_dma: hdl=%p",
12284 			    pkt->pkt_data_dma);
12285 
12286 			pkt->pkt_state  = FC_PKT_TRAN_ERROR;
12287 			pkt->pkt_reason = FC_REASON_DMA_ERROR;
12288 			pkt->pkt_expln  = FC_EXPLN_NONE;
12289 			pkt->pkt_action = FC_ACTION_RETRYABLE;
12290 
12291 			return;
12292 		}
12293 	}
12294 
12295 	return;
12296 
12297 }
12298 #endif	/* FMA_SUPPORT */
12299 
12300 
12301 extern void
emlxs_swap32_buffer(uint8_t * buffer,uint32_t size)12302 emlxs_swap32_buffer(uint8_t *buffer, uint32_t size)
12303 {
12304 	uint32_t word;
12305 	uint32_t *wptr;
12306 	uint32_t i;
12307 
12308 	VERIFY((size % 4) == 0);
12309 
12310 	wptr = (uint32_t *)buffer;
12311 
12312 	for (i = 0; i < size / 4; i++) {
12313 		word = *wptr;
12314 		*wptr++ = SWAP32(word);
12315 	}
12316 
12317 	return;
12318 
12319 }  /* emlxs_swap32_buffer() */
12320 
12321 
12322 extern void
emlxs_swap32_bcopy(uint8_t * src,uint8_t * dst,uint32_t size)12323 emlxs_swap32_bcopy(uint8_t *src, uint8_t *dst, uint32_t size)
12324 {
12325 	uint32_t word;
12326 	uint32_t *sptr;
12327 	uint32_t *dptr;
12328 	uint32_t i;
12329 
12330 	VERIFY((size % 4) == 0);
12331 
12332 	sptr = (uint32_t *)src;
12333 	dptr = (uint32_t *)dst;
12334 
12335 	for (i = 0; i < size / 4; i++) {
12336 		word = *sptr++;
12337 		*dptr++ = SWAP32(word);
12338 	}
12339 
12340 	return;
12341 
12342 }  /* emlxs_swap32_buffer() */
12343 
12344 
12345 extern char *
emlxs_strtoupper(char * str)12346 emlxs_strtoupper(char *str)
12347 {
12348 	char *cptr = str;
12349 
12350 	while (*cptr) {
12351 		if ((*cptr >= 'a') && (*cptr <= 'z')) {
12352 			*cptr -= ('a' - 'A');
12353 		}
12354 		cptr++;
12355 	}
12356 
12357 	return (str);
12358 
12359 } /* emlxs_strtoupper() */
12360 
12361 
12362 extern void
emlxs_ulp_statec_cb(emlxs_port_t * port,uint32_t statec)12363 emlxs_ulp_statec_cb(emlxs_port_t *port, uint32_t statec)
12364 {
12365 	emlxs_hba_t *hba = HBA;
12366 
12367 	/* This routine coordinates protection with emlxs_fca_unbind_port() */
12368 
12369 	mutex_enter(&EMLXS_PORT_LOCK);
12370 	if (!(port->flag & EMLXS_INI_BOUND)) {
12371 		mutex_exit(&EMLXS_PORT_LOCK);
12372 		return;
12373 	}
12374 	port->ulp_busy++;
12375 	mutex_exit(&EMLXS_PORT_LOCK);
12376 
12377 	port->ulp_statec_cb(port->ulp_handle, statec);
12378 
12379 	mutex_enter(&EMLXS_PORT_LOCK);
12380 	port->ulp_busy--;
12381 	mutex_exit(&EMLXS_PORT_LOCK);
12382 
12383 	return;
12384 
12385 }  /* emlxs_ulp_statec_cb() */
12386 
12387 
12388 extern void
emlxs_ulp_unsol_cb(emlxs_port_t * port,fc_unsol_buf_t * ubp)12389 emlxs_ulp_unsol_cb(emlxs_port_t *port, fc_unsol_buf_t *ubp)
12390 {
12391 	emlxs_hba_t *hba = HBA;
12392 
12393 	/* This routine coordinates protection with emlxs_fca_unbind_port() */
12394 
12395 	mutex_enter(&EMLXS_PORT_LOCK);
12396 	if (!(port->flag & EMLXS_INI_BOUND)) {
12397 		mutex_exit(&EMLXS_PORT_LOCK);
12398 		return;
12399 	}
12400 	port->ulp_busy++;
12401 	mutex_exit(&EMLXS_PORT_LOCK);
12402 
12403 	port->ulp_unsol_cb(port->ulp_handle, ubp, ubp->ub_frame.type);
12404 
12405 	mutex_enter(&EMLXS_PORT_LOCK);
12406 	port->ulp_busy--;
12407 	mutex_exit(&EMLXS_PORT_LOCK);
12408 
12409 	return;
12410 
12411 }  /* emlxs_ulp_unsol_cb() */
12412