xref: /titanic_41/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_sli4.c (revision d478d0a82b5f273f7eac89c8df47960d9d22820b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at
9  * http://www.opensource.org/licenses/cddl1.txt.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2004-2012 Emulex. All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <emlxs.h>
28 
29 
30 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
31 EMLXS_MSG_DEF(EMLXS_SLI4_C);
32 
33 static int		emlxs_sli4_init_extents(emlxs_hba_t *hba,
34 				MAILBOXQ *mbq);
35 static uint32_t		emlxs_sli4_read_status(emlxs_hba_t *hba);
36 
37 static int		emlxs_init_bootstrap_mb(emlxs_hba_t *hba);
38 
39 static uint32_t		emlxs_sli4_read_sema(emlxs_hba_t *hba);
40 
41 static uint32_t		emlxs_sli4_read_mbdb(emlxs_hba_t *hba);
42 
43 static void		emlxs_sli4_write_mbdb(emlxs_hba_t *hba, uint32_t value);
44 
45 static void		emlxs_sli4_write_wqdb(emlxs_hba_t *hba, uint32_t value);
46 
47 static void		emlxs_sli4_write_mqdb(emlxs_hba_t *hba, uint32_t value);
48 
49 static void		emlxs_sli4_write_rqdb(emlxs_hba_t *hba, uint32_t value);
50 
51 static void		emlxs_sli4_write_cqdb(emlxs_hba_t *hba, uint32_t value);
52 
53 static int		emlxs_sli4_create_queues(emlxs_hba_t *hba,
54 				MAILBOXQ *mbq);
55 static int		emlxs_sli4_post_hdr_tmplates(emlxs_hba_t *hba,
56 				MAILBOXQ *mbq);
57 static int		emlxs_sli4_post_sgl_pages(emlxs_hba_t *hba,
58 				MAILBOXQ *mbq);
59 
60 static int		emlxs_sli4_read_eq(emlxs_hba_t *hba, EQ_DESC_t *eq);
61 
62 static int		emlxs_sli4_map_hdw(emlxs_hba_t *hba);
63 
64 static void		emlxs_sli4_unmap_hdw(emlxs_hba_t *hba);
65 
66 static int32_t		emlxs_sli4_online(emlxs_hba_t *hba);
67 
68 static void		emlxs_sli4_offline(emlxs_hba_t *hba,
69 				uint32_t reset_requested);
70 
71 static uint32_t		emlxs_sli4_hba_reset(emlxs_hba_t *hba, uint32_t restart,
72 				uint32_t skip_post, uint32_t quiesce);
73 static void		emlxs_sli4_hba_kill(emlxs_hba_t *hba);
74 
75 static uint32_t		emlxs_sli4_hba_init(emlxs_hba_t *hba);
76 
77 static uint32_t		emlxs_sli4_bde_setup(emlxs_port_t *port,
78 				emlxs_buf_t *sbp);
79 
80 static void		emlxs_sli4_issue_iocb_cmd(emlxs_hba_t *hba,
81 				CHANNEL *cp, IOCBQ *iocb_cmd);
82 static uint32_t		emlxs_sli4_issue_mbox_cmd(emlxs_hba_t *hba,
83 				MAILBOXQ *mbq, int32_t flg, uint32_t tmo);
84 static uint32_t		emlxs_sli4_issue_mbox_cmd4quiesce(emlxs_hba_t *hba,
85 				MAILBOXQ *mbq, int32_t flg, uint32_t tmo);
86 #ifdef SFCT_SUPPORT
87 static uint32_t		emlxs_sli4_prep_fct_iocb(emlxs_port_t *port,
88 				emlxs_buf_t *cmd_sbp, int channel);
89 static uint32_t		emlxs_sli4_fct_bde_setup(emlxs_port_t *port,
90 				emlxs_buf_t *sbp);
91 #endif /* SFCT_SUPPORT */
92 
93 static uint32_t		emlxs_sli4_prep_fcp_iocb(emlxs_port_t *port,
94 				emlxs_buf_t *sbp, int ring);
95 static uint32_t		emlxs_sli4_prep_ip_iocb(emlxs_port_t *port,
96 				emlxs_buf_t *sbp);
97 static uint32_t		emlxs_sli4_prep_els_iocb(emlxs_port_t *port,
98 				emlxs_buf_t *sbp);
99 static uint32_t		emlxs_sli4_prep_ct_iocb(emlxs_port_t *port,
100 				emlxs_buf_t *sbp);
101 static void		emlxs_sli4_poll_intr(emlxs_hba_t *hba);
102 static int32_t		emlxs_sli4_intx_intr(char *arg);
103 
104 #ifdef MSI_SUPPORT
105 static uint32_t		emlxs_sli4_msi_intr(char *arg1, char *arg2);
106 #endif /* MSI_SUPPORT */
107 
108 static void		emlxs_sli4_resource_free(emlxs_hba_t *hba);
109 
110 static int		emlxs_sli4_resource_alloc(emlxs_hba_t *hba);
111 extern void		emlxs_sli4_zero_queue_stat(emlxs_hba_t *hba);
112 
113 static XRIobj_t		*emlxs_sli4_alloc_xri(emlxs_port_t *port,
114 				emlxs_buf_t *sbp, RPIobj_t *rpip,
115 				uint32_t type);
116 static void		emlxs_sli4_enable_intr(emlxs_hba_t *hba);
117 
118 static void		emlxs_sli4_disable_intr(emlxs_hba_t *hba, uint32_t att);
119 
120 static void		emlxs_sli4_timer(emlxs_hba_t *hba);
121 
122 static void		emlxs_sli4_timer_check_mbox(emlxs_hba_t *hba);
123 
124 static void		emlxs_sli4_poll_erratt(emlxs_hba_t *hba);
125 
126 extern XRIobj_t 	*emlxs_sli4_reserve_xri(emlxs_port_t *port,
127 				RPIobj_t *rpip, uint32_t type, uint16_t rx_id);
128 static int		emlxs_check_hdw_ready(emlxs_hba_t *);
129 
130 static uint32_t		emlxs_sli4_reg_did(emlxs_port_t *port,
131 				uint32_t did, SERV_PARM *param,
132 				emlxs_buf_t *sbp, fc_unsol_buf_t *ubp,
133 				IOCBQ *iocbq);
134 
135 static uint32_t		emlxs_sli4_unreg_node(emlxs_port_t *port,
136 				emlxs_node_t *node, emlxs_buf_t *sbp,
137 				fc_unsol_buf_t *ubp, IOCBQ *iocbq);
138 
139 static void		emlxs_sli4_handle_fc_link_att(emlxs_hba_t *hba,
140 				CQE_ASYNC_t *cqe);
141 static void		emlxs_sli4_handle_fcoe_link_event(emlxs_hba_t *hba,
142 				CQE_ASYNC_t *cqe);
143 
144 
145 static uint16_t		emlxs_sli4_rqid_to_index(emlxs_hba_t *hba,
146 				uint16_t rqid);
147 static uint16_t		emlxs_sli4_wqid_to_index(emlxs_hba_t *hba,
148 				uint16_t wqid);
149 static uint16_t		emlxs_sli4_cqid_to_index(emlxs_hba_t *hba,
150 				uint16_t cqid);
151 
152 /* Define SLI4 API functions */
153 emlxs_sli_api_t emlxs_sli4_api = {
154 	emlxs_sli4_map_hdw,
155 	emlxs_sli4_unmap_hdw,
156 	emlxs_sli4_online,
157 	emlxs_sli4_offline,
158 	emlxs_sli4_hba_reset,
159 	emlxs_sli4_hba_kill,
160 	emlxs_sli4_issue_iocb_cmd,
161 	emlxs_sli4_issue_mbox_cmd,
162 #ifdef SFCT_SUPPORT
163 	emlxs_sli4_prep_fct_iocb,
164 #else
165 	NULL,
166 #endif /* SFCT_SUPPORT */
167 	emlxs_sli4_prep_fcp_iocb,
168 	emlxs_sli4_prep_ip_iocb,
169 	emlxs_sli4_prep_els_iocb,
170 	emlxs_sli4_prep_ct_iocb,
171 	emlxs_sli4_poll_intr,
172 	emlxs_sli4_intx_intr,
173 	emlxs_sli4_msi_intr,
174 	emlxs_sli4_disable_intr,
175 	emlxs_sli4_timer,
176 	emlxs_sli4_poll_erratt,
177 	emlxs_sli4_reg_did,
178 	emlxs_sli4_unreg_node
179 };
180 
181 
182 /* ************************************************************************** */
183 
184 static void
emlxs_sli4_set_default_params(emlxs_hba_t * hba)185 emlxs_sli4_set_default_params(emlxs_hba_t *hba)
186 {
187 	emlxs_port_t *port = &PPORT;
188 
189 	bzero((char *)&hba->sli.sli4.param, sizeof (sli_params_t));
190 
191 	hba->sli.sli4.param.ProtocolType = 0x3; /* FC/FCoE */
192 
193 	hba->sli.sli4.param.SliHint2 = 0;
194 	hba->sli.sli4.param.SliHint1 = 0;
195 	hba->sli.sli4.param.IfType = 0;
196 	hba->sli.sli4.param.SliFamily = 0;
197 	hba->sli.sli4.param.Revision = 0x4; /* SLI4 */
198 	hba->sli.sli4.param.FT = 0;
199 
200 	hba->sli.sli4.param.EqeCntMethod = 0x1; /* Bit pattern */
201 	hba->sli.sli4.param.EqPageSize = 0x1; /* 4096 */
202 	hba->sli.sli4.param.EqeSize = 0x1; /* 4 byte */
203 	hba->sli.sli4.param.EqPageCnt = 8;
204 	hba->sli.sli4.param.EqeCntMask = 0x1F; /* 256-4096 elements */
205 
206 	hba->sli.sli4.param.CqeCntMethod = 0x1; /* Bit pattern */
207 	hba->sli.sli4.param.CqPageSize = 0x1; /* 4096 */
208 	hba->sli.sli4.param.CQV = 0;
209 	hba->sli.sli4.param.CqeSize = 0x3; /* 16 byte */
210 	hba->sli.sli4.param.CqPageCnt = 4;
211 	hba->sli.sli4.param.CqeCntMask = 0x70; /* 256-1024 elements */
212 
213 	hba->sli.sli4.param.MqeCntMethod = 0x1; /* Bit pattern */
214 	hba->sli.sli4.param.MqPageSize = 0x1; /* 4096 */
215 	hba->sli.sli4.param.MQV = 0;
216 	hba->sli.sli4.param.MqPageCnt = 8;
217 	hba->sli.sli4.param.MqeCntMask = 0x0F; /* 16-128 elements */
218 
219 	hba->sli.sli4.param.WqeCntMethod = 0; /* Page Count */
220 	hba->sli.sli4.param.WqPageSize = 0x1; /* 4096 */
221 	hba->sli.sli4.param.WQV = 0;
222 	hba->sli.sli4.param.WqeSize = 0x5; /* 64 byte */
223 	hba->sli.sli4.param.WqPageCnt = 4;
224 	hba->sli.sli4.param.WqeCntMask = 0x10; /* 256 elements */
225 
226 	hba->sli.sli4.param.RqeCntMethod = 0; /* Page Count */
227 	hba->sli.sli4.param.RqPageSize = 0x1; /* 4096 */
228 	hba->sli.sli4.param.RQV = 0;
229 	hba->sli.sli4.param.RqeSize = 0x2; /* 8 byte */
230 	hba->sli.sli4.param.RqPageCnt = 8;
231 	hba->sli.sli4.param.RqDbWin = 1;
232 	hba->sli.sli4.param.RqeCntMask = 0x100; /* 4096 elements */
233 
234 	hba->sli.sli4.param.Loopback = 0xf; /* unsupported */
235 	hba->sli.sli4.param.PHWQ = 0;
236 	hba->sli.sli4.param.PHON = 0;
237 	hba->sli.sli4.param.TRIR = 0;
238 	hba->sli.sli4.param.TRTY = 0;
239 	hba->sli.sli4.param.TCCA = 0;
240 	hba->sli.sli4.param.MWQE = 0;
241 	hba->sli.sli4.param.ASSI = 0;
242 	hba->sli.sli4.param.TERP = 0;
243 	hba->sli.sli4.param.TGT  = 0;
244 	hba->sli.sli4.param.AREG = 0;
245 	hba->sli.sli4.param.FBRR = 0;
246 	hba->sli.sli4.param.SGLR = 1;
247 	hba->sli.sli4.param.HDRR = 1;
248 	hba->sli.sli4.param.EXT  = 0;
249 	hba->sli.sli4.param.FCOE = 1;
250 
251 	hba->sli.sli4.param.SgeLength = (64 * 1024);
252 	hba->sli.sli4.param.SglAlign = 0x7 /* 4096 */;
253 	hba->sli.sli4.param.SglPageSize = 0x1; /* 4096 */
254 	hba->sli.sli4.param.SglPageCnt = 2;
255 
256 	hba->sli.sli4.param.MinRqSize = 128;
257 	hba->sli.sli4.param.MaxRqSize = 2048;
258 
259 	hba->sli.sli4.param.RPIMax = 0x3ff;
260 	hba->sli.sli4.param.XRIMax = 0x3ff;
261 	hba->sli.sli4.param.VFIMax = 0xff;
262 	hba->sli.sli4.param.VPIMax = 0xff;
263 
264 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
265 	    "Default SLI4 parameters set.");
266 
267 } /* emlxs_sli4_set_default_params() */
268 
269 
270 /*
271  * emlxs_sli4_online()
272  *
273  * This routine will start initialization of the SLI4 HBA.
274  */
275 static int32_t
emlxs_sli4_online(emlxs_hba_t * hba)276 emlxs_sli4_online(emlxs_hba_t *hba)
277 {
278 	emlxs_port_t *port = &PPORT;
279 	emlxs_config_t *cfg;
280 	emlxs_vpd_t *vpd;
281 	MAILBOXQ *mbq = NULL;
282 	MAILBOX4 *mb  = NULL;
283 	MATCHMAP *mp  = NULL;
284 	uint32_t i;
285 	uint32_t j;
286 	uint32_t rval = 0;
287 	uint8_t *vpd_data;
288 	uint32_t sli_mode;
289 	uint8_t *outptr;
290 	uint32_t status;
291 	uint32_t fw_check;
292 	uint32_t kern_update = 0;
293 	emlxs_firmware_t hba_fw;
294 	emlxs_firmware_t *fw;
295 	uint16_t ssvid;
296 	char buf[64];
297 
298 	cfg = &CFG;
299 	vpd = &VPD;
300 
301 	sli_mode = EMLXS_HBA_SLI4_MODE;
302 	hba->sli_mode = sli_mode;
303 
304 	/* Set the fw_check flag */
305 	fw_check = cfg[CFG_FW_CHECK].current;
306 
307 	if ((fw_check & 0x04) ||
308 	    (hba->fw_flag & FW_UPDATE_KERNEL)) {
309 		kern_update = 1;
310 	}
311 
312 	hba->mbox_queue_flag = 0;
313 	hba->fc_edtov = FF_DEF_EDTOV;
314 	hba->fc_ratov = FF_DEF_RATOV;
315 	hba->fc_altov = FF_DEF_ALTOV;
316 	hba->fc_arbtov = FF_DEF_ARBTOV;
317 
318 	/* Networking not supported */
319 	if (cfg[CFG_NETWORK_ON].current) {
320 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
321 		    "Networking is not supported in SLI4, turning it off");
322 		cfg[CFG_NETWORK_ON].current = 0;
323 	}
324 
325 	hba->chan_count = hba->intr_count * cfg[CFG_NUM_WQ].current;
326 	if (hba->chan_count > MAX_CHANNEL) {
327 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
328 		    "Max channels exceeded, dropping num-wq from %d to 1",
329 		    cfg[CFG_NUM_WQ].current);
330 		cfg[CFG_NUM_WQ].current = 1;
331 		hba->chan_count = hba->intr_count * cfg[CFG_NUM_WQ].current;
332 	}
333 	hba->channel_fcp = 0; /* First channel */
334 
335 	/* Default channel for everything else is the last channel */
336 	hba->channel_ip = hba->chan_count - 1;
337 	hba->channel_els = hba->chan_count - 1;
338 	hba->channel_ct = hba->chan_count - 1;
339 
340 	hba->fc_iotag = 1;
341 	hba->io_count = 0;
342 	hba->channel_tx_count = 0;
343 
344 	/* Initialize the local dump region buffer */
345 	bzero(&hba->sli.sli4.dump_region, sizeof (MBUF_INFO));
346 	hba->sli.sli4.dump_region.size = EMLXS_DUMP_REGION_SIZE;
347 	hba->sli.sli4.dump_region.flags = FC_MBUF_DMA | FC_MBUF_SNGLSG
348 	    | FC_MBUF_DMA32;
349 	hba->sli.sli4.dump_region.align = ddi_ptob(hba->dip, 1L);
350 
351 	(void) emlxs_mem_alloc(hba, &hba->sli.sli4.dump_region);
352 
353 	if (hba->sli.sli4.dump_region.virt == NULL) {
354 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
355 		    "Unable to allocate dump region buffer.");
356 
357 		return (ENOMEM);
358 	}
359 
360 	/*
361 	 * Get a buffer which will be used repeatedly for mailbox commands
362 	 */
363 	mbq = (MAILBOXQ *) kmem_zalloc((sizeof (MAILBOXQ)), KM_SLEEP);
364 
365 	mb = (MAILBOX4 *)mbq;
366 
367 reset:
368 	/* Reset & Initialize the adapter */
369 	if (emlxs_sli4_hba_init(hba)) {
370 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
371 		    "Unable to init hba.");
372 
373 		rval = EIO;
374 		goto failed1;
375 	}
376 
377 #ifdef FMA_SUPPORT
378 	/* Access handle validation */
379 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
380 	case SLI_INTF_IF_TYPE_2:
381 		if ((emlxs_fm_check_acc_handle(hba,
382 		    hba->pci_acc_handle) != DDI_FM_OK) ||
383 		    (emlxs_fm_check_acc_handle(hba,
384 		    hba->sli.sli4.bar0_acc_handle) != DDI_FM_OK)) {
385 			EMLXS_MSGF(EMLXS_CONTEXT,
386 			    &emlxs_invalid_access_handle_msg, NULL);
387 
388 			rval = EIO;
389 			goto failed1;
390 		}
391 		break;
392 
393 	default :
394 		if ((emlxs_fm_check_acc_handle(hba,
395 		    hba->pci_acc_handle) != DDI_FM_OK) ||
396 		    (emlxs_fm_check_acc_handle(hba,
397 		    hba->sli.sli4.bar1_acc_handle) != DDI_FM_OK) ||
398 		    (emlxs_fm_check_acc_handle(hba,
399 		    hba->sli.sli4.bar2_acc_handle) != DDI_FM_OK)) {
400 			EMLXS_MSGF(EMLXS_CONTEXT,
401 			    &emlxs_invalid_access_handle_msg, NULL);
402 
403 			rval = EIO;
404 			goto failed1;
405 		}
406 		break;
407 	}
408 #endif	/* FMA_SUPPORT */
409 
410 	/*
411 	 * Setup and issue mailbox READ REV command
412 	 */
413 	vpd->opFwRev = 0;
414 	vpd->postKernRev = 0;
415 	vpd->sli1FwRev = 0;
416 	vpd->sli2FwRev = 0;
417 	vpd->sli3FwRev = 0;
418 	vpd->sli4FwRev = 0;
419 
420 	vpd->postKernName[0] = 0;
421 	vpd->opFwName[0] = 0;
422 	vpd->sli1FwName[0] = 0;
423 	vpd->sli2FwName[0] = 0;
424 	vpd->sli3FwName[0] = 0;
425 	vpd->sli4FwName[0] = 0;
426 
427 	vpd->opFwLabel[0] = 0;
428 	vpd->sli1FwLabel[0] = 0;
429 	vpd->sli2FwLabel[0] = 0;
430 	vpd->sli3FwLabel[0] = 0;
431 	vpd->sli4FwLabel[0] = 0;
432 
433 	EMLXS_STATE_CHANGE(hba, FC_INIT_REV);
434 
435 	emlxs_mb_get_sli4_params(hba, mbq);
436 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
437 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
438 		    "Unable to read parameters. Mailbox cmd=%x status=%x",
439 		    mb->mbxCommand, mb->mbxStatus);
440 
441 		/* Set param defaults */
442 		emlxs_sli4_set_default_params(hba);
443 
444 	} else {
445 		/* Save parameters */
446 		bcopy((char *)&mb->un.varSLIConfig.payload,
447 		    (char *)&hba->sli.sli4.param, sizeof (sli_params_t));
448 
449 		emlxs_data_dump(port, "SLI_PARMS",
450 		    (uint32_t *)&hba->sli.sli4.param,
451 		    sizeof (sli_params_t), 0);
452 	}
453 
454 	/* Reuse mbq from previous mbox */
455 	bzero(mbq, sizeof (MAILBOXQ));
456 
457 	emlxs_mb_get_port_name(hba, mbq);
458 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
459 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
460 		    "Unable to get port names. Mailbox cmd=%x status=%x",
461 		    mb->mbxCommand, mb->mbxStatus);
462 
463 		bzero(hba->sli.sli4.port_name,
464 		    sizeof (hba->sli.sli4.port_name));
465 	} else {
466 		/* Save port names */
467 		bcopy((char *)&mb->un.varSLIConfig.payload,
468 		    (char *)&hba->sli.sli4.port_name,
469 		    sizeof (hba->sli.sli4.port_name));
470 	}
471 
472 	/* Reuse mbq from previous mbox */
473 	bzero(mbq, sizeof (MAILBOXQ));
474 
475 	emlxs_mb_read_rev(hba, mbq, 0);
476 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
477 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
478 		    "Unable to read rev. Mailbox cmd=%x status=%x",
479 		    mb->mbxCommand, mb->mbxStatus);
480 
481 		rval = EIO;
482 		goto failed1;
483 
484 	}
485 
486 	emlxs_data_dump(port, "RD_REV", (uint32_t *)mb, 18, 0);
487 	if (mb->un.varRdRev4.sliLevel != 4) {
488 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
489 		    "Invalid read rev Version for SLI4: 0x%x",
490 		    mb->un.varRdRev4.sliLevel);
491 
492 		rval = EIO;
493 		goto failed1;
494 	}
495 
496 	switch (mb->un.varRdRev4.dcbxMode) {
497 	case EMLXS_DCBX_MODE_CIN:	/* Mapped to nonFIP mode */
498 		hba->flag &= ~FC_FIP_SUPPORTED;
499 		break;
500 
501 	case EMLXS_DCBX_MODE_CEE:	/* Mapped to FIP mode */
502 		hba->flag |= FC_FIP_SUPPORTED;
503 		break;
504 
505 	default:
506 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
507 		    "Invalid read rev dcbx mode for SLI4: 0x%x",
508 		    mb->un.varRdRev4.dcbxMode);
509 
510 		rval = EIO;
511 		goto failed1;
512 	}
513 
514 	/* Set FC/FCoE mode */
515 	if (mb->un.varRdRev4.FCoE) {
516 		hba->sli.sli4.flag |= EMLXS_SLI4_FCOE_MODE;
517 	} else {
518 		hba->sli.sli4.flag &= ~EMLXS_SLI4_FCOE_MODE;
519 	}
520 
521 	/* Save information as VPD data */
522 	vpd->rBit = 1;
523 
524 	vpd->sli4FwRev = (mb->un.varRdRev4.ULPFwId);
525 	bcopy((char *)mb->un.varRdRev4.ULPFwName, vpd->sli4FwName, 16);
526 
527 	vpd->opFwRev = (mb->un.varRdRev4.ULPFwId);
528 	bcopy((char *)mb->un.varRdRev4.ULPFwName, vpd->opFwName, 16);
529 
530 	vpd->postKernRev = (mb->un.varRdRev4.ARMFwId);
531 	bcopy((char *)mb->un.varRdRev4.ARMFwName, vpd->postKernName, 16);
532 
533 	vpd->biuRev = mb->un.varRdRev4.HwRev1;
534 	vpd->fcphHigh = mb->un.varRdRev4.fcphHigh;
535 	vpd->fcphLow = mb->un.varRdRev4.fcphLow;
536 	vpd->feaLevelHigh = mb->un.varRdRev4.feaLevelHigh;
537 	vpd->feaLevelLow = mb->un.varRdRev4.feaLevelLow;
538 
539 	/* Decode FW labels */
540 	if (hba->model_info.chip == EMLXS_LANCER_CHIP) {
541 		bcopy(vpd->postKernName, vpd->sli4FwName, 16);
542 	}
543 	emlxs_decode_label(vpd->sli4FwName, vpd->sli4FwName, 0,
544 	    sizeof (vpd->sli4FwName));
545 	emlxs_decode_label(vpd->opFwName, vpd->opFwName, 0,
546 	    sizeof (vpd->opFwName));
547 	emlxs_decode_label(vpd->postKernName, vpd->postKernName, 0,
548 	    sizeof (vpd->postKernName));
549 
550 	if (hba->model_info.chip == EMLXS_BE2_CHIP) {
551 		(void) strlcpy(vpd->sli4FwLabel, "be2.ufi",
552 		    sizeof (vpd->sli4FwLabel));
553 	} else if (hba->model_info.chip == EMLXS_BE3_CHIP) {
554 		(void) strlcpy(vpd->sli4FwLabel, "be3.ufi",
555 		    sizeof (vpd->sli4FwLabel));
556 	} else if (hba->model_info.chip == EMLXS_LANCER_CHIP) {
557 		(void) strlcpy(vpd->sli4FwLabel, "xe201.grp",
558 		    sizeof (vpd->sli4FwLabel));
559 	} else {
560 		(void) strlcpy(vpd->sli4FwLabel, "sli4.fw",
561 		    sizeof (vpd->sli4FwLabel));
562 	}
563 
564 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
565 	    "VPD ULP:%08x %s ARM:%08x %s f:%d %d %d %d : dcbx %d",
566 	    vpd->opFwRev, vpd->opFwName, vpd->postKernRev, vpd->postKernName,
567 	    vpd->fcphHigh, vpd->fcphLow, vpd->feaLevelHigh, vpd->feaLevelLow,
568 	    mb->un.varRdRev4.dcbxMode);
569 
570 	/* No key information is needed for SLI4 products */
571 
572 	/* Get adapter VPD information */
573 	vpd->port_index = (uint32_t)-1;
574 
575 	/* Reuse mbq from previous mbox */
576 	bzero(mbq, sizeof (MAILBOXQ));
577 
578 	emlxs_mb_dump_vpd(hba, mbq, 0);
579 	vpd_data = hba->sli.sli4.dump_region.virt;
580 
581 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
582 	    MBX_SUCCESS) {
583 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
584 		    "No VPD found. status=%x", mb->mbxStatus);
585 	} else {
586 		EMLXS_MSGF(EMLXS_CONTEXT,
587 		    &emlxs_init_debug_msg,
588 		    "VPD dumped. rsp_cnt=%d status=%x",
589 		    mb->un.varDmp4.rsp_cnt, mb->mbxStatus);
590 
591 		if (mb->un.varDmp4.rsp_cnt) {
592 			EMLXS_MPDATA_SYNC(hba->sli.sli4.dump_region.dma_handle,
593 			    0, mb->un.varDmp4.rsp_cnt, DDI_DMA_SYNC_FORKERNEL);
594 
595 #ifdef FMA_SUPPORT
596 			if (hba->sli.sli4.dump_region.dma_handle) {
597 				if (emlxs_fm_check_dma_handle(hba,
598 				    hba->sli.sli4.dump_region.dma_handle)
599 				    != DDI_FM_OK) {
600 					EMLXS_MSGF(EMLXS_CONTEXT,
601 					    &emlxs_invalid_dma_handle_msg,
602 					    "sli4_online: hdl=%p",
603 					    hba->sli.sli4.dump_region.
604 					    dma_handle);
605 					rval = EIO;
606 					goto failed1;
607 				}
608 			}
609 #endif /* FMA_SUPPORT */
610 
611 		}
612 	}
613 
614 	if (vpd_data[0]) {
615 		(void) emlxs_parse_vpd(hba, (uint8_t *)vpd_data,
616 		    mb->un.varDmp4.rsp_cnt);
617 
618 		/*
619 		 * If there is a VPD part number, and it does not
620 		 * match the current default HBA model info,
621 		 * replace the default data with an entry that
622 		 * does match.
623 		 *
624 		 * After emlxs_parse_vpd model holds the VPD value
625 		 * for V2 and part_num hold the value for PN. These
626 		 * 2 values are NOT necessarily the same.
627 		 */
628 
629 		rval = 0;
630 		if ((vpd->model[0] != 0) &&
631 		    (strcmp(&vpd->model[0], hba->model_info.model) != 0)) {
632 
633 			/* First scan for a V2 match */
634 
635 			for (i = 1; i < emlxs_pci_model_count; i++) {
636 				if (strcmp(&vpd->model[0],
637 				    emlxs_pci_model[i].model) == 0) {
638 					bcopy(&emlxs_pci_model[i],
639 					    &hba->model_info,
640 					    sizeof (emlxs_model_t));
641 					rval = 1;
642 					break;
643 				}
644 			}
645 		}
646 
647 		if (!rval && (vpd->part_num[0] != 0) &&
648 		    (strcmp(&vpd->part_num[0], hba->model_info.model) != 0)) {
649 
650 			/* Next scan for a PN match */
651 
652 			for (i = 1; i < emlxs_pci_model_count; i++) {
653 				if (strcmp(&vpd->part_num[0],
654 				    emlxs_pci_model[i].model) == 0) {
655 					bcopy(&emlxs_pci_model[i],
656 					    &hba->model_info,
657 					    sizeof (emlxs_model_t));
658 					break;
659 				}
660 			}
661 		}
662 
663 		/* HP CNA port indices start at 1 instead of 0 */
664 		if (hba->model_info.chip & EMLXS_BE_CHIPS) {
665 			ssvid = ddi_get16(hba->pci_acc_handle,
666 			    (uint16_t *)(hba->pci_addr + PCI_SSVID_REGISTER));
667 
668 			if ((ssvid == PCI_SSVID_HP) && (vpd->port_index > 0)) {
669 				vpd->port_index--;
670 			}
671 		}
672 
673 		/*
674 		 * Now lets update hba->model_info with the real
675 		 * VPD data, if any.
676 		 */
677 
678 		/*
679 		 * Replace the default model description with vpd data
680 		 */
681 		if (vpd->model_desc[0] != 0) {
682 			(void) strncpy(hba->model_info.model_desc,
683 			    vpd->model_desc,
684 			    (sizeof (hba->model_info.model_desc)-1));
685 		}
686 
687 		/* Replace the default model with vpd data */
688 		if (vpd->model[0] != 0) {
689 			(void) strncpy(hba->model_info.model, vpd->model,
690 			    (sizeof (hba->model_info.model)-1));
691 		}
692 
693 		/* Replace the default program types with vpd data */
694 		if (vpd->prog_types[0] != 0) {
695 			emlxs_parse_prog_types(hba, vpd->prog_types);
696 		}
697 	}
698 
699 	/*
700 	 * Since the adapter model may have changed with the vpd data
701 	 * lets double check if adapter is not supported
702 	 */
703 	if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) {
704 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
705 		    "Unsupported adapter found.  "
706 		    "Id:%d  Device id:0x%x  SSDID:0x%x  Model:%s",
707 		    hba->model_info.id, hba->model_info.device_id,
708 		    hba->model_info.ssdid, hba->model_info.model);
709 
710 		rval = EIO;
711 		goto failed1;
712 	}
713 
714 	(void) strncpy(vpd->boot_version, vpd->sli4FwName,
715 	    (sizeof (vpd->boot_version)-1));
716 
717 	/* Get fcode version property */
718 	emlxs_get_fcode_version(hba);
719 
720 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
721 	    "Firmware: kern=%08x stub=%08x sli1=%08x", vpd->postKernRev,
722 	    vpd->opFwRev, vpd->sli1FwRev);
723 
724 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
725 	    "Firmware: sli2=%08x sli3=%08x sli4=%08x fl=%x", vpd->sli2FwRev,
726 	    vpd->sli3FwRev, vpd->sli4FwRev, vpd->feaLevelHigh);
727 
728 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
729 	    "BIOS: boot=%s fcode=%s", vpd->boot_version, vpd->fcode_version);
730 
731 	/*
732 	 * If firmware checking is enabled and the adapter model indicates
733 	 * a firmware image, then perform firmware version check
734 	 */
735 	hba->fw_flag = 0;
736 	hba->fw_timer = 0;
737 
738 	if (((fw_check & 0x1) &&
739 	    (hba->model_info.flags & EMLXS_ORACLE_BRANDED) &&
740 	    hba->model_info.fwid) ||
741 	    ((fw_check & 0x2) && hba->model_info.fwid)) {
742 
743 		/* Find firmware image indicated by adapter model */
744 		fw = NULL;
745 		for (i = 0; i < emlxs_fw_count; i++) {
746 			if (emlxs_fw_table[i].id == hba->model_info.fwid) {
747 				fw = &emlxs_fw_table[i];
748 				break;
749 			}
750 		}
751 
752 		/*
753 		 * If the image was found, then verify current firmware
754 		 * versions of adapter
755 		 */
756 		if (fw) {
757 			/* Obtain current firmware version info */
758 			if (hba->model_info.chip & EMLXS_BE_CHIPS) {
759 				(void) emlxs_be_read_fw_version(hba, &hba_fw);
760 			} else {
761 				hba_fw.kern = vpd->postKernRev;
762 				hba_fw.stub = vpd->opFwRev;
763 				hba_fw.sli1 = vpd->sli1FwRev;
764 				hba_fw.sli2 = vpd->sli2FwRev;
765 				hba_fw.sli3 = vpd->sli3FwRev;
766 				hba_fw.sli4 = vpd->sli4FwRev;
767 			}
768 
769 			if (!kern_update &&
770 			    ((fw->kern && (hba_fw.kern != fw->kern)) ||
771 			    (fw->stub && (hba_fw.stub != fw->stub)))) {
772 
773 				hba->fw_flag |= FW_UPDATE_NEEDED;
774 
775 			} else if ((fw->kern && (hba_fw.kern != fw->kern)) ||
776 			    (fw->stub && (hba_fw.stub != fw->stub)) ||
777 			    (fw->sli1 && (hba_fw.sli1 != fw->sli1)) ||
778 			    (fw->sli2 && (hba_fw.sli2 != fw->sli2)) ||
779 			    (fw->sli3 && (hba_fw.sli3 != fw->sli3)) ||
780 			    (fw->sli4 && (hba_fw.sli4 != fw->sli4))) {
781 
782 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
783 				    "Firmware update needed. "
784 				    "Updating. id=%d fw=%d",
785 				    hba->model_info.id, hba->model_info.fwid);
786 
787 #ifdef MODFW_SUPPORT
788 				/*
789 				 * Load the firmware image now
790 				 * If MODFW_SUPPORT is not defined, the
791 				 * firmware image will already be defined
792 				 * in the emlxs_fw_table
793 				 */
794 				emlxs_fw_load(hba, fw);
795 #endif /* MODFW_SUPPORT */
796 
797 				if (fw->image && fw->size) {
798 					uint32_t rc;
799 
800 					rc = emlxs_fw_download(hba,
801 					    (char *)fw->image, fw->size, 0);
802 					if ((rc != FC_SUCCESS) &&
803 					    (rc != EMLXS_REBOOT_REQUIRED)) {
804 						EMLXS_MSGF(EMLXS_CONTEXT,
805 						    &emlxs_init_msg,
806 						    "Firmware update failed.");
807 						hba->fw_flag |=
808 						    FW_UPDATE_NEEDED;
809 					}
810 #ifdef MODFW_SUPPORT
811 					/*
812 					 * Unload the firmware image from
813 					 * kernel memory
814 					 */
815 					emlxs_fw_unload(hba, fw);
816 #endif /* MODFW_SUPPORT */
817 
818 					fw_check = 0;
819 
820 					goto reset;
821 				}
822 
823 				hba->fw_flag |= FW_UPDATE_NEEDED;
824 
825 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
826 				    "Firmware image unavailable.");
827 			} else {
828 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
829 				    "Firmware update not needed.");
830 			}
831 		} else {
832 			/*
833 			 * This means either the adapter database is not
834 			 * correct or a firmware image is missing from the
835 			 * compile
836 			 */
837 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
838 			    "Firmware image unavailable. id=%d fw=%d",
839 			    hba->model_info.id, hba->model_info.fwid);
840 		}
841 	}
842 
843 	/* Reuse mbq from previous mbox */
844 	bzero(mbq, sizeof (MAILBOXQ));
845 
846 	emlxs_mb_dump_fcoe(hba, mbq, 0);
847 
848 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
849 	    MBX_SUCCESS) {
850 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
851 		    "No FCOE info found. status=%x", mb->mbxStatus);
852 	} else {
853 		EMLXS_MSGF(EMLXS_CONTEXT,
854 		    &emlxs_init_debug_msg,
855 		    "FCOE info dumped. rsp_cnt=%d status=%x",
856 		    mb->un.varDmp4.rsp_cnt, mb->mbxStatus);
857 		(void) emlxs_parse_fcoe(hba,
858 		    (uint8_t *)hba->sli.sli4.dump_region.virt,
859 		    mb->un.varDmp4.rsp_cnt);
860 	}
861 
862 	/* Reuse mbq from previous mbox */
863 	bzero(mbq, sizeof (MAILBOXQ));
864 
865 	status = 0;
866 	if (port->flag & EMLXS_INI_ENABLED) {
867 		status |= SLI4_FEATURE_FCP_INITIATOR;
868 	}
869 	if (port->flag & EMLXS_TGT_ENABLED) {
870 		status |= SLI4_FEATURE_FCP_TARGET;
871 	}
872 	if (cfg[CFG_NPIV_ENABLE].current) {
873 		status |= SLI4_FEATURE_NPIV;
874 	}
875 	if (cfg[CFG_RQD_MODE].current) {
876 		status |= SLI4_FEATURE_RQD;
877 	}
878 	if (cfg[CFG_PERF_HINT].current) {
879 		if (hba->sli.sli4.param.PHON) {
880 			status |= SLI4_FEATURE_PERF_HINT;
881 		}
882 	}
883 
884 	emlxs_mb_request_features(hba, mbq, status);
885 
886 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
887 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
888 		    "Unable to REQUEST_FEATURES. Mailbox cmd=%x status=%x",
889 		    mb->mbxCommand, mb->mbxStatus);
890 
891 		rval = EIO;
892 		goto failed1;
893 	}
894 	emlxs_data_dump(port, "REQ_FEATURE", (uint32_t *)mb, 6, 0);
895 
896 	/* Check to see if we get the features we requested */
897 	if (status != mb->un.varReqFeatures.featuresEnabled) {
898 
899 		/* Just report descrepencies, don't abort the attach */
900 
901 		outptr = (uint8_t *)emlxs_request_feature_xlate(
902 		    mb->un.varReqFeatures.featuresRequested);
903 		(void) strlcpy(buf, (char *)outptr, sizeof (buf));
904 
905 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
906 		    "REQUEST_FEATURES: wanted:%s  got:%s",
907 		    &buf[0], emlxs_request_feature_xlate(
908 		    mb->un.varReqFeatures.featuresEnabled));
909 
910 	}
911 
912 	if ((port->flag & EMLXS_INI_ENABLED) &&
913 	    !(mb->un.varReqFeatures.featuresEnabled &
914 	    SLI4_FEATURE_FCP_INITIATOR)) {
915 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
916 		    "Initiator mode not supported by adapter.");
917 
918 		rval = EIO;
919 
920 #ifdef SFCT_SUPPORT
921 		/* Check if we can fall back to just target mode */
922 		if ((hba->pm_state == EMLXS_PM_IN_ATTACH) &&
923 		    (mb->un.varReqFeatures.featuresEnabled &
924 		    SLI4_FEATURE_FCP_TARGET) &&
925 		    (cfg[CFG_DTM_ENABLE].current == 1) &&
926 		    (cfg[CFG_TARGET_MODE].current == 1)) {
927 
928 			cfg[CFG_DTM_ENABLE].current = 0;
929 
930 			EMLXS_MSGF(EMLXS_CONTEXT,
931 			    &emlxs_init_failed_msg,
932 			    "Disabling dynamic target mode. "
933 			    "Enabling target mode only.");
934 
935 			/* This will trigger the driver to reattach */
936 			rval = EAGAIN;
937 		}
938 #endif /* SFCT_SUPPORT */
939 		goto failed1;
940 	}
941 
942 	if ((port->flag & EMLXS_TGT_ENABLED) &&
943 	    !(mb->un.varReqFeatures.featuresEnabled &
944 	    SLI4_FEATURE_FCP_TARGET)) {
945 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
946 		    "Target mode not supported by adapter.");
947 
948 		rval = EIO;
949 
950 #ifdef SFCT_SUPPORT
951 		/* Check if we can fall back to just initiator mode */
952 		if ((hba->pm_state == EMLXS_PM_IN_ATTACH) &&
953 		    (mb->un.varReqFeatures.featuresEnabled &
954 		    SLI4_FEATURE_FCP_INITIATOR) &&
955 		    (cfg[CFG_DTM_ENABLE].current == 1) &&
956 		    (cfg[CFG_TARGET_MODE].current == 0)) {
957 
958 			cfg[CFG_DTM_ENABLE].current = 0;
959 
960 			EMLXS_MSGF(EMLXS_CONTEXT,
961 			    &emlxs_init_failed_msg,
962 			    "Disabling dynamic target mode. "
963 			    "Enabling initiator mode only.");
964 
965 			/* This will trigger the driver to reattach */
966 			rval = EAGAIN;
967 		}
968 #endif /* SFCT_SUPPORT */
969 		goto failed1;
970 	}
971 
972 	if (mb->un.varReqFeatures.featuresEnabled & SLI4_FEATURE_NPIV) {
973 		hba->flag |= FC_NPIV_ENABLED;
974 	}
975 
976 	if (mb->un.varReqFeatures.featuresEnabled & SLI4_FEATURE_PERF_HINT) {
977 		hba->sli.sli4.flag |= EMLXS_SLI4_PHON;
978 		if (hba->sli.sli4.param.PHWQ) {
979 			hba->sli.sli4.flag |= EMLXS_SLI4_PHWQ;
980 		}
981 	}
982 
983 	/* Reuse mbq from previous mbox */
984 	bzero(mbq, sizeof (MAILBOXQ));
985 
986 	emlxs_mb_read_config(hba, mbq);
987 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
988 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
989 		    "Unable to READ_CONFIG. Mailbox cmd=%x status=%x",
990 		    mb->mbxCommand, mb->mbxStatus);
991 
992 		rval = EIO;
993 		goto failed1;
994 	}
995 	emlxs_data_dump(port, "READ_CONFIG4", (uint32_t *)mb, 18, 0);
996 
997 	/* Set default extents */
998 	hba->sli.sli4.XRICount = mb->un.varRdConfig4.XRICount;
999 	hba->sli.sli4.XRIExtCount = 1;
1000 	hba->sli.sli4.XRIExtSize = hba->sli.sli4.XRICount;
1001 	hba->sli.sli4.XRIBase[0] = mb->un.varRdConfig4.XRIBase;
1002 
1003 	hba->sli.sli4.RPICount = mb->un.varRdConfig4.RPICount;
1004 	hba->sli.sli4.RPIExtCount = 1;
1005 	hba->sli.sli4.RPIExtSize = hba->sli.sli4.RPICount;
1006 	hba->sli.sli4.RPIBase[0] = mb->un.varRdConfig4.RPIBase;
1007 
1008 	hba->sli.sli4.VPICount = mb->un.varRdConfig4.VPICount;
1009 	hba->sli.sli4.VPIExtCount = 1;
1010 	hba->sli.sli4.VPIExtSize = hba->sli.sli4.VPICount;
1011 	hba->sli.sli4.VPIBase[0] = mb->un.varRdConfig4.VPIBase;
1012 
1013 	hba->sli.sli4.VFICount = mb->un.varRdConfig4.VFICount;
1014 	hba->sli.sli4.VFIExtCount = 1;
1015 	hba->sli.sli4.VFIExtSize = hba->sli.sli4.VFICount;
1016 	hba->sli.sli4.VFIBase[0] = mb->un.varRdConfig4.VFIBase;
1017 
1018 	hba->sli.sli4.FCFICount = mb->un.varRdConfig4.FCFICount;
1019 
1020 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1021 	    "CONFIG: xri:%d rpi:%d vpi:%d vfi:%d fcfi:%d",
1022 	    hba->sli.sli4.XRICount,
1023 	    hba->sli.sli4.RPICount,
1024 	    hba->sli.sli4.VPICount,
1025 	    hba->sli.sli4.VFICount,
1026 	    hba->sli.sli4.FCFICount);
1027 
1028 	if ((hba->sli.sli4.XRICount == 0) ||
1029 	    (hba->sli.sli4.RPICount == 0) ||
1030 	    (hba->sli.sli4.VPICount == 0) ||
1031 	    (hba->sli.sli4.VFICount == 0) ||
1032 	    (hba->sli.sli4.FCFICount == 0)) {
1033 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1034 		    "Invalid extent value(s) - xri:%d rpi:%d vpi:%d "
1035 		    "vfi:%d fcfi:%d",
1036 		    hba->sli.sli4.XRICount,
1037 		    hba->sli.sli4.RPICount,
1038 		    hba->sli.sli4.VPICount,
1039 		    hba->sli.sli4.VFICount,
1040 		    hba->sli.sli4.FCFICount);
1041 
1042 		rval = EIO;
1043 		goto failed1;
1044 	}
1045 
1046 	if (mb->un.varRdConfig4.extents) {
1047 		if (emlxs_sli4_init_extents(hba, mbq)) {
1048 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1049 			    "Unable to initialize extents.");
1050 
1051 			rval = EIO;
1052 			goto failed1;
1053 		}
1054 	}
1055 
1056 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1057 	    "CONFIG: port_name:%c %c %c %c",
1058 	    hba->sli.sli4.port_name[0],
1059 	    hba->sli.sli4.port_name[1],
1060 	    hba->sli.sli4.port_name[2],
1061 	    hba->sli.sli4.port_name[3]);
1062 
1063 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1064 	    "CONFIG: ldv:%d link_type:%d link_number:%d",
1065 	    mb->un.varRdConfig4.ldv,
1066 	    mb->un.varRdConfig4.link_type,
1067 	    mb->un.varRdConfig4.link_number);
1068 
1069 	if (mb->un.varRdConfig4.ldv) {
1070 		hba->sli.sli4.link_number = mb->un.varRdConfig4.link_number;
1071 	} else {
1072 		hba->sli.sli4.link_number = (uint32_t)-1;
1073 	}
1074 
1075 	if (hba->sli.sli4.VPICount) {
1076 		hba->vpi_max = min(hba->sli.sli4.VPICount, MAX_VPORTS) - 1;
1077 	}
1078 
1079 	/* Set the max node count */
1080 	if (cfg[CFG_NUM_NODES].current > 0) {
1081 		hba->max_nodes =
1082 		    min(cfg[CFG_NUM_NODES].current,
1083 		    hba->sli.sli4.RPICount);
1084 	} else {
1085 		hba->max_nodes = hba->sli.sli4.RPICount;
1086 	}
1087 
1088 	/* Set the io throttle */
1089 	hba->io_throttle = hba->sli.sli4.XRICount - IO_THROTTLE_RESERVE;
1090 
1091 	/* Set max_iotag */
1092 	/* We add 1 in case all XRI's are non-zero */
1093 	hba->max_iotag = hba->sli.sli4.XRICount + 1;
1094 
1095 	if (cfg[CFG_NUM_IOTAGS].current) {
1096 		hba->max_iotag = min(hba->max_iotag,
1097 		    (uint16_t)cfg[CFG_NUM_IOTAGS].current);
1098 	}
1099 
1100 	/* Set out-of-range iotag base */
1101 	hba->fc_oor_iotag = hba->max_iotag;
1102 
1103 	/* Save the link speed capabilities */
1104 	vpd->link_speed = (uint16_t)mb->un.varRdConfig4.lmt;
1105 	emlxs_process_link_speed(hba);
1106 
1107 	/*
1108 	 * Allocate some memory for buffers
1109 	 */
1110 	if (emlxs_mem_alloc_buffer(hba) == 0) {
1111 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1112 		    "Unable to allocate memory buffers.");
1113 
1114 		rval = ENOMEM;
1115 		goto failed1;
1116 	}
1117 
1118 	if (emlxs_sli4_resource_alloc(hba)) {
1119 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1120 		    "Unable to allocate resources.");
1121 
1122 		rval = ENOMEM;
1123 		goto failed2;
1124 	}
1125 	emlxs_data_dump(port, "XRIp", (uint32_t *)hba->sli.sli4.XRIp, 18, 0);
1126 	emlxs_sli4_zero_queue_stat(hba);
1127 
1128 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1129 	if ((cfg[CFG_NPIV_ENABLE].current) && (hba->flag & FC_NPIV_ENABLED)) {
1130 		hba->fca_tran->fca_num_npivports = hba->vpi_max;
1131 	}
1132 #endif /* >= EMLXS_MODREV5 */
1133 
1134 	/* Reuse mbq from previous mbox */
1135 	bzero(mbq, sizeof (MAILBOXQ));
1136 
1137 	if (emlxs_sli4_post_sgl_pages(hba, mbq)) {
1138 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1139 		    "Unable to post sgl pages.");
1140 
1141 		rval = EIO;
1142 		goto failed3;
1143 	}
1144 
1145 	/* Reuse mbq from previous mbox */
1146 	bzero(mbq, sizeof (MAILBOXQ));
1147 
1148 	if (emlxs_sli4_post_hdr_tmplates(hba, mbq)) {
1149 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1150 		    "Unable to post header templates.");
1151 
1152 		rval = EIO;
1153 		goto failed3;
1154 	}
1155 
1156 	/*
1157 	 * Add our interrupt routine to kernel's interrupt chain & enable it
1158 	 * If MSI is enabled this will cause Solaris to program the MSI address
1159 	 * and data registers in PCI config space
1160 	 */
1161 	if (EMLXS_INTR_ADD(hba) != DDI_SUCCESS) {
1162 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1163 		    "Unable to add interrupt(s).");
1164 
1165 		rval = EIO;
1166 		goto failed3;
1167 	}
1168 
1169 	/* Reuse mbq from previous mbox */
1170 	bzero(mbq, sizeof (MAILBOXQ));
1171 
1172 	/* This MUST be done after EMLXS_INTR_ADD */
1173 	if (emlxs_sli4_create_queues(hba, mbq)) {
1174 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1175 		    "Unable to create queues.");
1176 
1177 		rval = EIO;
1178 		goto failed3;
1179 	}
1180 
1181 	EMLXS_STATE_CHANGE(hba, FC_INIT_CFGPORT);
1182 
1183 	/* Get and save the current firmware version (based on sli_mode) */
1184 	emlxs_decode_firmware_rev(hba, vpd);
1185 
1186 
1187 	EMLXS_STATE_CHANGE(hba, FC_INIT_INITLINK);
1188 
1189 	if (SLI4_FC_MODE) {
1190 		/* Reuse mbq from previous mbox */
1191 		bzero(mbq, sizeof (MAILBOXQ));
1192 
1193 		emlxs_mb_config_link(hba, mbq);
1194 		if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1195 		    MBX_SUCCESS) {
1196 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1197 			    "Unable to configure link. Mailbox cmd=%x "
1198 			    "status=%x",
1199 			    mb->mbxCommand, mb->mbxStatus);
1200 
1201 			rval = EIO;
1202 			goto failed3;
1203 		}
1204 	}
1205 
1206 	/* Reuse mbq from previous mbox */
1207 	bzero(mbq, sizeof (MAILBOXQ));
1208 
1209 	/*
1210 	 * We need to get login parameters for NID
1211 	 */
1212 	(void) emlxs_mb_read_sparam(hba, mbq);
1213 	mp = (MATCHMAP *)mbq->bp;
1214 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1215 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1216 		    "Unable to read parameters. Mailbox cmd=%x status=%x",
1217 		    mb->mbxCommand, mb->mbxStatus);
1218 
1219 		rval = EIO;
1220 		goto failed3;
1221 	}
1222 
1223 	/* Free the buffer since we were polling */
1224 	emlxs_mem_put(hba, MEM_BUF, (void *)mp);
1225 	mp = NULL;
1226 
1227 	/* If no serial number in VPD data, then use the WWPN */
1228 	if (vpd->serial_num[0] == 0) {
1229 		outptr = (uint8_t *)&hba->wwpn.IEEE[0];
1230 		for (i = 0; i < 12; i++) {
1231 			status = *outptr++;
1232 			j = ((status & 0xf0) >> 4);
1233 			if (j <= 9) {
1234 				vpd->serial_num[i] =
1235 				    (char)((uint8_t)'0' + (uint8_t)j);
1236 			} else {
1237 				vpd->serial_num[i] =
1238 				    (char)((uint8_t)'A' + (uint8_t)(j - 10));
1239 			}
1240 
1241 			i++;
1242 			j = (status & 0xf);
1243 			if (j <= 9) {
1244 				vpd->serial_num[i] =
1245 				    (char)((uint8_t)'0' + (uint8_t)j);
1246 			} else {
1247 				vpd->serial_num[i] =
1248 				    (char)((uint8_t)'A' + (uint8_t)(j - 10));
1249 			}
1250 		}
1251 
1252 		/*
1253 		 * Set port number and port index to zero
1254 		 * The WWN's are unique to each port and therefore port_num
1255 		 * must equal zero. This effects the hba_fru_details structure
1256 		 * in fca_bind_port()
1257 		 */
1258 		vpd->port_num[0] = 0;
1259 		vpd->port_index = 0;
1260 
1261 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1262 		    "CONFIG: WWPN: port_index=0");
1263 	}
1264 
1265 	/* Make final attempt to set a port index */
1266 	if (vpd->port_index == (uint32_t)-1) {
1267 		dev_info_t *p_dip;
1268 		dev_info_t *c_dip;
1269 
1270 		p_dip = ddi_get_parent(hba->dip);
1271 		c_dip = ddi_get_child(p_dip);
1272 
1273 		vpd->port_index = 0;
1274 		while (c_dip && (hba->dip != c_dip)) {
1275 			c_dip = ddi_get_next_sibling(c_dip);
1276 
1277 			if (strcmp(ddi_get_name(c_dip), "ethernet") == 0) {
1278 				continue;
1279 			}
1280 
1281 			vpd->port_index++;
1282 		}
1283 
1284 		EMLXS_MSGF(EMLXS_CONTEXT,
1285 		    &emlxs_init_debug_msg,
1286 		    "CONFIG: Device tree: port_index=%d",
1287 		    vpd->port_index);
1288 	}
1289 
1290 	if (vpd->port_num[0] == 0) {
1291 		if (hba->model_info.channels == EMLXS_MULTI_CHANNEL) {
1292 			(void) snprintf(vpd->port_num,
1293 			    (sizeof (vpd->port_num)-1),
1294 			    "%d", vpd->port_index);
1295 		}
1296 	}
1297 
1298 	if (vpd->id[0] == 0) {
1299 		(void) snprintf(vpd->id, (sizeof (vpd->id)-1),
1300 		    "%s %d",
1301 		    hba->model_info.model_desc, vpd->port_index);
1302 
1303 	}
1304 
1305 	if (vpd->manufacturer[0] == 0) {
1306 		(void) strncpy(vpd->manufacturer, hba->model_info.manufacturer,
1307 		    (sizeof (vpd->manufacturer)-1));
1308 	}
1309 
1310 	if (vpd->part_num[0] == 0) {
1311 		(void) strncpy(vpd->part_num, hba->model_info.model,
1312 		    (sizeof (vpd->part_num)-1));
1313 	}
1314 
1315 	if (vpd->model_desc[0] == 0) {
1316 		(void) snprintf(vpd->model_desc, (sizeof (vpd->model_desc)-1),
1317 		    "%s %d",
1318 		    hba->model_info.model_desc, vpd->port_index);
1319 	}
1320 
1321 	if (vpd->model[0] == 0) {
1322 		(void) strncpy(vpd->model, hba->model_info.model,
1323 		    (sizeof (vpd->model)-1));
1324 	}
1325 
1326 	if (vpd->prog_types[0] == 0) {
1327 		emlxs_build_prog_types(hba, vpd);
1328 	}
1329 
1330 	/* Create the symbolic names */
1331 	(void) snprintf(hba->snn, (sizeof (hba->snn)-1),
1332 	    "Emulex %s FV%s DV%s %s",
1333 	    hba->model_info.model, hba->vpd.fw_version, emlxs_version,
1334 	    (char *)utsname.nodename);
1335 
1336 	(void) snprintf(hba->spn, (sizeof (hba->spn)-1),
1337 	    "Emulex PPN-%01x%01x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
1338 	    hba->wwpn.nameType, hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb,
1339 	    hba->wwpn.IEEE[0], hba->wwpn.IEEE[1], hba->wwpn.IEEE[2],
1340 	    hba->wwpn.IEEE[3], hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
1341 
1342 
1343 	EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN);
1344 	emlxs_sli4_enable_intr(hba);
1345 
1346 	/* Check persist-linkdown */
1347 	if (cfg[CFG_PERSIST_LINKDOWN].current) {
1348 		EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN_PERSIST);
1349 		goto done;
1350 	}
1351 
1352 #ifdef SFCT_SUPPORT
1353 	if ((port->mode == MODE_TARGET) &&
1354 	    !(port->fct_flags & FCT_STATE_PORT_ONLINE)) {
1355 		goto done;
1356 	}
1357 #endif /* SFCT_SUPPORT */
1358 
1359 	/* Reuse mbq from previous mbox */
1360 	bzero(mbq, sizeof (MAILBOXQ));
1361 
1362 	/*
1363 	 * Setup and issue mailbox INITIALIZE LINK command
1364 	 * At this point, the interrupt will be generated by the HW
1365 	 */
1366 	emlxs_mb_init_link(hba, mbq,
1367 	    cfg[CFG_TOPOLOGY].current, cfg[CFG_LINK_SPEED].current);
1368 
1369 	rval = emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
1370 	if ((rval != MBX_SUCCESS) && (rval != MBX_BUSY)) {
1371 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1372 		    "Unable to initialize link. "
1373 		    "Mailbox cmd=%x status=%x",
1374 		    mb->mbxCommand, mb->mbxStatus);
1375 
1376 		rval = EIO;
1377 		goto failed3;
1378 	}
1379 
1380 	/* Wait for link to come up */
1381 	i = cfg[CFG_LINKUP_DELAY].current;
1382 	while (i && (hba->state < FC_LINK_UP)) {
1383 		/* Check for hardware error */
1384 		if (hba->state == FC_ERROR) {
1385 			EMLXS_MSGF(EMLXS_CONTEXT,
1386 			    &emlxs_init_failed_msg,
1387 			    "Adapter error.", mb->mbxCommand,
1388 			    mb->mbxStatus);
1389 
1390 			rval = EIO;
1391 			goto failed3;
1392 		}
1393 
1394 		BUSYWAIT_MS(1000);
1395 		i--;
1396 	}
1397 
1398 done:
1399 	/*
1400 	 * The leadville driver will now handle the FLOGI at the driver level
1401 	 */
1402 
1403 	if (mbq) {
1404 		(void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1405 		mbq = NULL;
1406 		mb = NULL;
1407 	}
1408 	return (0);
1409 
1410 failed3:
1411 	EMLXS_STATE_CHANGE(hba, FC_ERROR);
1412 
1413 	if (mp) {
1414 		emlxs_mem_put(hba, MEM_BUF, (void *)mp);
1415 		mp = NULL;
1416 	}
1417 
1418 
1419 	if (hba->intr_flags & EMLXS_MSI_ADDED) {
1420 		(void) EMLXS_INTR_REMOVE(hba);
1421 	}
1422 
1423 	emlxs_sli4_resource_free(hba);
1424 
1425 failed2:
1426 	(void) emlxs_mem_free_buffer(hba);
1427 
1428 failed1:
1429 	if (mbq) {
1430 		(void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1431 		mbq = NULL;
1432 		mb = NULL;
1433 	}
1434 
1435 	if (hba->sli.sli4.dump_region.virt) {
1436 		(void) emlxs_mem_free(hba, &hba->sli.sli4.dump_region);
1437 	}
1438 
1439 	if (rval == 0) {
1440 		rval = EIO;
1441 	}
1442 
1443 	return (rval);
1444 
1445 } /* emlxs_sli4_online() */
1446 
1447 
1448 static void
emlxs_sli4_offline(emlxs_hba_t * hba,uint32_t reset_requested)1449 emlxs_sli4_offline(emlxs_hba_t *hba, uint32_t reset_requested)
1450 {
1451 	/* Reverse emlxs_sli4_online */
1452 
1453 	mutex_enter(&EMLXS_PORT_LOCK);
1454 	if (hba->flag & FC_INTERLOCKED) {
1455 		mutex_exit(&EMLXS_PORT_LOCK);
1456 		goto killed;
1457 	}
1458 	mutex_exit(&EMLXS_PORT_LOCK);
1459 
1460 	if (reset_requested) {
1461 		(void) emlxs_sli4_hba_reset(hba, 0, 0, 0);
1462 	}
1463 
1464 	/* Shutdown the adapter interface */
1465 	emlxs_sli4_hba_kill(hba);
1466 
1467 killed:
1468 
1469 	/* Free SLI shared memory */
1470 	emlxs_sli4_resource_free(hba);
1471 
1472 	/* Free driver shared memory */
1473 	(void) emlxs_mem_free_buffer(hba);
1474 
1475 	/* Free the host dump region buffer */
1476 	(void) emlxs_mem_free(hba, &hba->sli.sli4.dump_region);
1477 
1478 } /* emlxs_sli4_offline() */
1479 
1480 
1481 /*ARGSUSED*/
1482 static int
emlxs_sli4_map_hdw(emlxs_hba_t * hba)1483 emlxs_sli4_map_hdw(emlxs_hba_t *hba)
1484 {
1485 	emlxs_port_t		*port = &PPORT;
1486 	dev_info_t		*dip;
1487 	ddi_device_acc_attr_t	dev_attr;
1488 	int			status;
1489 
1490 	dip = (dev_info_t *)hba->dip;
1491 	dev_attr = emlxs_dev_acc_attr;
1492 
1493 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1494 	case SLI_INTF_IF_TYPE_0:
1495 
1496 		/* Map in Hardware BAR pages that will be used for */
1497 		/* communication with HBA. */
1498 		if (hba->sli.sli4.bar1_acc_handle == 0) {
1499 			status = ddi_regs_map_setup(dip, PCI_BAR1_RINDEX,
1500 			    (caddr_t *)&hba->sli.sli4.bar1_addr,
1501 			    0, 0, &dev_attr, &hba->sli.sli4.bar1_acc_handle);
1502 			if (status != DDI_SUCCESS) {
1503 				EMLXS_MSGF(EMLXS_CONTEXT,
1504 				    &emlxs_attach_failed_msg,
1505 				    "(PCI) ddi_regs_map_setup BAR1 failed. "
1506 				    "stat=%d mem=%p attr=%p hdl=%p",
1507 				    status, &hba->sli.sli4.bar1_addr, &dev_attr,
1508 				    &hba->sli.sli4.bar1_acc_handle);
1509 				goto failed;
1510 			}
1511 		}
1512 
1513 		if (hba->sli.sli4.bar2_acc_handle == 0) {
1514 			status = ddi_regs_map_setup(dip, PCI_BAR2_RINDEX,
1515 			    (caddr_t *)&hba->sli.sli4.bar2_addr,
1516 			    0, 0, &dev_attr, &hba->sli.sli4.bar2_acc_handle);
1517 			if (status != DDI_SUCCESS) {
1518 				EMLXS_MSGF(EMLXS_CONTEXT,
1519 				    &emlxs_attach_failed_msg,
1520 				    "ddi_regs_map_setup BAR2 failed. status=%x",
1521 				    status);
1522 				goto failed;
1523 			}
1524 		}
1525 
1526 		/* offset from beginning of register space */
1527 		hba->sli.sli4.MPUEPSemaphore_reg_addr =
1528 		    (uint32_t *)(hba->sli.sli4.bar1_addr +
1529 		    CSR_MPU_EP_SEMAPHORE_OFFSET);
1530 		hba->sli.sli4.MBDB_reg_addr =
1531 		    (uint32_t *)(hba->sli.sli4.bar2_addr + PD_MB_DB_OFFSET);
1532 		hba->sli.sli4.CQDB_reg_addr =
1533 		    (uint32_t *)(hba->sli.sli4.bar2_addr + PD_CQ_DB_OFFSET);
1534 		hba->sli.sli4.MQDB_reg_addr =
1535 		    (uint32_t *)(hba->sli.sli4.bar2_addr + PD_MQ_DB_OFFSET);
1536 		hba->sli.sli4.WQDB_reg_addr =
1537 		    (uint32_t *)(hba->sli.sli4.bar2_addr + PD_WQ_DB_OFFSET);
1538 		hba->sli.sli4.RQDB_reg_addr =
1539 		    (uint32_t *)(hba->sli.sli4.bar2_addr + PD_RQ_DB_OFFSET);
1540 
1541 		hba->sli.sli4.STATUS_reg_addr = 0;
1542 		hba->sli.sli4.CNTL_reg_addr = 0;
1543 
1544 		hba->sli.sli4.ERR1_reg_addr =
1545 		    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_LO_OFFSET);
1546 		hba->sli.sli4.ERR2_reg_addr =
1547 		    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_HI_OFFSET);
1548 
1549 		hba->sli.sli4.PHYSDEV_reg_addr = 0;
1550 		break;
1551 
1552 	case SLI_INTF_IF_TYPE_2:
1553 
1554 		/* Map in Hardware BAR pages that will be used for */
1555 		/* communication with HBA. */
1556 		if (hba->sli.sli4.bar0_acc_handle == 0) {
1557 			status = ddi_regs_map_setup(dip, PCI_BAR0_RINDEX,
1558 			    (caddr_t *)&hba->sli.sli4.bar0_addr,
1559 			    0, 0, &dev_attr, &hba->sli.sli4.bar0_acc_handle);
1560 			if (status != DDI_SUCCESS) {
1561 				EMLXS_MSGF(EMLXS_CONTEXT,
1562 				    &emlxs_attach_failed_msg,
1563 				    "(PCI) ddi_regs_map_setup BAR0 failed. "
1564 				    "stat=%d mem=%p attr=%p hdl=%p",
1565 				    status, &hba->sli.sli4.bar0_addr, &dev_attr,
1566 				    &hba->sli.sli4.bar0_acc_handle);
1567 				goto failed;
1568 			}
1569 		}
1570 
1571 		/* offset from beginning of register space */
1572 		hba->sli.sli4.MPUEPSemaphore_reg_addr =
1573 		    (uint32_t *)(hba->sli.sli4.bar0_addr +
1574 		    SLIPORT_SEMAPHORE_OFFSET);
1575 		hba->sli.sli4.MBDB_reg_addr =
1576 		    (uint32_t *)(hba->sli.sli4.bar0_addr + PD_MB_DB_OFFSET);
1577 		hba->sli.sli4.CQDB_reg_addr =
1578 		    (uint32_t *)(hba->sli.sli4.bar0_addr + PD_CQ_DB_OFFSET);
1579 		hba->sli.sli4.MQDB_reg_addr =
1580 		    (uint32_t *)(hba->sli.sli4.bar0_addr + PD_MQ_DB_OFFSET);
1581 		hba->sli.sli4.WQDB_reg_addr =
1582 		    (uint32_t *)(hba->sli.sli4.bar0_addr + PD_WQ_DB_OFFSET);
1583 		hba->sli.sli4.RQDB_reg_addr =
1584 		    (uint32_t *)(hba->sli.sli4.bar0_addr + PD_RQ_DB_OFFSET);
1585 
1586 		hba->sli.sli4.STATUS_reg_addr =
1587 		    (uint32_t *)(hba->sli.sli4.bar0_addr +
1588 		    SLIPORT_STATUS_OFFSET);
1589 		hba->sli.sli4.CNTL_reg_addr =
1590 		    (uint32_t *)(hba->sli.sli4.bar0_addr +
1591 		    SLIPORT_CONTROL_OFFSET);
1592 		hba->sli.sli4.ERR1_reg_addr =
1593 		    (uint32_t *)(hba->sli.sli4.bar0_addr +
1594 		    SLIPORT_ERROR1_OFFSET);
1595 		hba->sli.sli4.ERR2_reg_addr =
1596 		    (uint32_t *)(hba->sli.sli4.bar0_addr +
1597 		    SLIPORT_ERROR2_OFFSET);
1598 		hba->sli.sli4.PHYSDEV_reg_addr =
1599 		    (uint32_t *)(hba->sli.sli4.bar0_addr +
1600 		    PHYSDEV_CONTROL_OFFSET);
1601 
1602 		break;
1603 
1604 	case SLI_INTF_IF_TYPE_1:
1605 	case SLI_INTF_IF_TYPE_3:
1606 	default:
1607 		EMLXS_MSGF(EMLXS_CONTEXT,
1608 		    &emlxs_attach_failed_msg,
1609 		    "Map hdw: Unsupported if_type %08x",
1610 		    (hba->sli_intf & SLI_INTF_IF_TYPE_MASK));
1611 
1612 		goto failed;
1613 	}
1614 
1615 	if (hba->sli.sli4.bootstrapmb.virt == 0) {
1616 		MBUF_INFO	*buf_info;
1617 		MBUF_INFO	bufinfo;
1618 
1619 		buf_info = &bufinfo;
1620 
1621 		bzero(buf_info, sizeof (MBUF_INFO));
1622 		buf_info->size = EMLXS_BOOTSTRAP_MB_SIZE + MBOX_EXTENSION_SIZE;
1623 		buf_info->flags =
1624 		    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
1625 		buf_info->align = ddi_ptob(dip, 1L);
1626 
1627 		(void) emlxs_mem_alloc(hba, buf_info);
1628 
1629 		if (buf_info->virt == NULL) {
1630 			goto failed;
1631 		}
1632 
1633 		hba->sli.sli4.bootstrapmb.virt = buf_info->virt;
1634 		hba->sli.sli4.bootstrapmb.phys = buf_info->phys;
1635 		hba->sli.sli4.bootstrapmb.size = EMLXS_BOOTSTRAP_MB_SIZE +
1636 		    MBOX_EXTENSION_SIZE;
1637 		hba->sli.sli4.bootstrapmb.data_handle = buf_info->data_handle;
1638 		hba->sli.sli4.bootstrapmb.dma_handle = buf_info->dma_handle;
1639 		bzero((char *)hba->sli.sli4.bootstrapmb.virt,
1640 		    EMLXS_BOOTSTRAP_MB_SIZE);
1641 	}
1642 
1643 	hba->chan_count = MAX_CHANNEL;
1644 
1645 	return (0);
1646 
1647 failed:
1648 
1649 	emlxs_sli4_unmap_hdw(hba);
1650 	return (ENOMEM);
1651 
1652 
1653 } /* emlxs_sli4_map_hdw() */
1654 
1655 
1656 /*ARGSUSED*/
1657 static void
emlxs_sli4_unmap_hdw(emlxs_hba_t * hba)1658 emlxs_sli4_unmap_hdw(emlxs_hba_t *hba)
1659 {
1660 	MBUF_INFO	bufinfo;
1661 	MBUF_INFO	*buf_info = &bufinfo;
1662 
1663 
1664 	if (hba->sli.sli4.bar0_acc_handle) {
1665 		ddi_regs_map_free(&hba->sli.sli4.bar0_acc_handle);
1666 		hba->sli.sli4.bar0_acc_handle = 0;
1667 	}
1668 
1669 	if (hba->sli.sli4.bar1_acc_handle) {
1670 		ddi_regs_map_free(&hba->sli.sli4.bar1_acc_handle);
1671 		hba->sli.sli4.bar1_acc_handle = 0;
1672 	}
1673 
1674 	if (hba->sli.sli4.bar2_acc_handle) {
1675 		ddi_regs_map_free(&hba->sli.sli4.bar2_acc_handle);
1676 		hba->sli.sli4.bar2_acc_handle = 0;
1677 	}
1678 
1679 	if (hba->sli.sli4.bootstrapmb.virt) {
1680 		bzero(buf_info, sizeof (MBUF_INFO));
1681 
1682 		if (hba->sli.sli4.bootstrapmb.phys) {
1683 			buf_info->phys = hba->sli.sli4.bootstrapmb.phys;
1684 			buf_info->data_handle =
1685 			    hba->sli.sli4.bootstrapmb.data_handle;
1686 			buf_info->dma_handle =
1687 			    hba->sli.sli4.bootstrapmb.dma_handle;
1688 			buf_info->flags = FC_MBUF_DMA;
1689 		}
1690 
1691 		buf_info->virt = hba->sli.sli4.bootstrapmb.virt;
1692 		buf_info->size = hba->sli.sli4.bootstrapmb.size;
1693 		emlxs_mem_free(hba, buf_info);
1694 
1695 		hba->sli.sli4.bootstrapmb.virt = NULL;
1696 	}
1697 
1698 	return;
1699 
1700 } /* emlxs_sli4_unmap_hdw() */
1701 
1702 
1703 static int
emlxs_check_hdw_ready(emlxs_hba_t * hba)1704 emlxs_check_hdw_ready(emlxs_hba_t *hba)
1705 {
1706 	emlxs_port_t *port = &PPORT;
1707 	uint32_t status;
1708 	uint32_t i = 0;
1709 	uint32_t err1;
1710 	uint32_t err2;
1711 
1712 	/* Wait for reset completion */
1713 	while (i < 30) {
1714 
1715 		switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1716 		case SLI_INTF_IF_TYPE_0:
1717 			status = emlxs_sli4_read_sema(hba);
1718 
1719 			/* Check to see if any errors occurred during init */
1720 			if (status & ARM_POST_FATAL) {
1721 				EMLXS_MSGF(EMLXS_CONTEXT,
1722 				    &emlxs_reset_failed_msg,
1723 				    "SEMA Error: status=%x", status);
1724 
1725 				EMLXS_STATE_CHANGE(hba, FC_ERROR);
1726 
1727 				return (1);
1728 			}
1729 
1730 			if ((status & ARM_UNRECOVERABLE_ERROR) ==
1731 			    ARM_UNRECOVERABLE_ERROR) {
1732 				EMLXS_MSGF(EMLXS_CONTEXT,
1733 				    &emlxs_reset_failed_msg,
1734 				    "Unrecoverable Error: status=%x", status);
1735 
1736 				EMLXS_STATE_CHANGE(hba, FC_ERROR);
1737 
1738 				return (1);
1739 			}
1740 
1741 			if ((status & ARM_POST_MASK) == ARM_POST_READY) {
1742 				/* ARM Ready !! */
1743 				EMLXS_MSGF(EMLXS_CONTEXT,
1744 				    &emlxs_sli_detail_msg,
1745 				    "ARM Ready: status=%x", status);
1746 
1747 				return (0);
1748 			}
1749 			break;
1750 
1751 		case SLI_INTF_IF_TYPE_2:
1752 			status = emlxs_sli4_read_status(hba);
1753 
1754 			if (status & SLI_STATUS_READY) {
1755 				if (!(status & SLI_STATUS_ERROR)) {
1756 					/* ARM Ready !! */
1757 					EMLXS_MSGF(EMLXS_CONTEXT,
1758 					    &emlxs_sli_detail_msg,
1759 					    "ARM Ready: status=%x", status);
1760 
1761 					return (0);
1762 				}
1763 
1764 				err1 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
1765 				    hba->sli.sli4.ERR1_reg_addr);
1766 				err2 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
1767 				    hba->sli.sli4.ERR2_reg_addr);
1768 
1769 				if (status & SLI_STATUS_RESET_NEEDED) {
1770 					EMLXS_MSGF(EMLXS_CONTEXT,
1771 					    &emlxs_sli_detail_msg,
1772 					    "ARM Ready (Reset Needed): "
1773 					    "status=%x err1=%x "
1774 					    "err2=%x",
1775 					    status, err1, err2);
1776 
1777 					return (1);
1778 				}
1779 
1780 				EMLXS_MSGF(EMLXS_CONTEXT,
1781 				    &emlxs_reset_failed_msg,
1782 				    "Unrecoverable Error: status=%x err1=%x "
1783 				    "err2=%x",
1784 				    status, err1, err2);
1785 
1786 				EMLXS_STATE_CHANGE(hba, FC_ERROR);
1787 
1788 				return (2);
1789 			}
1790 
1791 			break;
1792 
1793 		default:
1794 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
1795 
1796 			return (3);
1797 		}
1798 
1799 		BUSYWAIT_MS(1000);
1800 		i++;
1801 	}
1802 
1803 	/* Timeout occurred */
1804 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1805 	case SLI_INTF_IF_TYPE_0:
1806 		err1 = ddi_get32(hba->pci_acc_handle,
1807 		    hba->sli.sli4.ERR1_reg_addr);
1808 		err2 = ddi_get32(hba->pci_acc_handle,
1809 		    hba->sli.sli4.ERR2_reg_addr);
1810 		break;
1811 
1812 	default:
1813 		err1 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
1814 		    hba->sli.sli4.ERR1_reg_addr);
1815 		err2 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
1816 		    hba->sli.sli4.ERR2_reg_addr);
1817 		break;
1818 	}
1819 
1820 	if (status & SLI_STATUS_ERROR) {
1821 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1822 		    "Ready Timeout: Port Error: status=%x err1=%x err2=%x",
1823 		    status, err1, err2);
1824 	} else {
1825 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1826 		    "Ready Timeout: status=%x err1=%x err2=%x",
1827 		    status, err1, err2);
1828 	}
1829 
1830 	EMLXS_STATE_CHANGE(hba, FC_ERROR);
1831 
1832 	return (3);
1833 
1834 } /* emlxs_check_hdw_ready() */
1835 
1836 
1837 static uint32_t
emlxs_sli4_read_status(emlxs_hba_t * hba)1838 emlxs_sli4_read_status(emlxs_hba_t *hba)
1839 {
1840 #ifdef FMA_SUPPORT
1841 	emlxs_port_t *port = &PPORT;
1842 #endif  /* FMA_SUPPORT */
1843 	uint32_t status;
1844 
1845 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1846 	case SLI_INTF_IF_TYPE_2:
1847 		status = ddi_get32(hba->sli.sli4.bar0_acc_handle,
1848 		    hba->sli.sli4.STATUS_reg_addr);
1849 #ifdef FMA_SUPPORT
1850 		/* Access handle validation */
1851 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar0_acc_handle);
1852 #endif  /* FMA_SUPPORT */
1853 		break;
1854 	default:
1855 		status = 0;
1856 		break;
1857 	}
1858 
1859 	return (status);
1860 
1861 } /* emlxs_sli4_read_status() */
1862 
1863 
1864 static uint32_t
emlxs_sli4_read_sema(emlxs_hba_t * hba)1865 emlxs_sli4_read_sema(emlxs_hba_t *hba)
1866 {
1867 #ifdef FMA_SUPPORT
1868 	emlxs_port_t *port = &PPORT;
1869 #endif  /* FMA_SUPPORT */
1870 	uint32_t status;
1871 
1872 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1873 	case SLI_INTF_IF_TYPE_0:
1874 		status = ddi_get32(hba->sli.sli4.bar1_acc_handle,
1875 		    hba->sli.sli4.MPUEPSemaphore_reg_addr);
1876 #ifdef FMA_SUPPORT
1877 		/* Access handle validation */
1878 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar1_acc_handle);
1879 #endif  /* FMA_SUPPORT */
1880 		break;
1881 
1882 	case SLI_INTF_IF_TYPE_2:
1883 		status = ddi_get32(hba->sli.sli4.bar0_acc_handle,
1884 		    hba->sli.sli4.MPUEPSemaphore_reg_addr);
1885 #ifdef FMA_SUPPORT
1886 		/* Access handle validation */
1887 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar0_acc_handle);
1888 #endif  /* FMA_SUPPORT */
1889 		break;
1890 	default:
1891 		status = 0;
1892 		break;
1893 	}
1894 
1895 	return (status);
1896 
1897 } /* emlxs_sli4_read_sema() */
1898 
1899 
1900 static uint32_t
emlxs_sli4_read_mbdb(emlxs_hba_t * hba)1901 emlxs_sli4_read_mbdb(emlxs_hba_t *hba)
1902 {
1903 #ifdef FMA_SUPPORT
1904 	emlxs_port_t *port = &PPORT;
1905 #endif  /* FMA_SUPPORT */
1906 	uint32_t status;
1907 
1908 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1909 	case SLI_INTF_IF_TYPE_0:
1910 		status = ddi_get32(hba->sli.sli4.bar2_acc_handle,
1911 		    hba->sli.sli4.MBDB_reg_addr);
1912 
1913 #ifdef FMA_SUPPORT
1914 		/* Access handle validation */
1915 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar2_acc_handle);
1916 #endif  /* FMA_SUPPORT */
1917 		break;
1918 
1919 	case SLI_INTF_IF_TYPE_2:
1920 		status = ddi_get32(hba->sli.sli4.bar0_acc_handle,
1921 		    hba->sli.sli4.MBDB_reg_addr);
1922 #ifdef FMA_SUPPORT
1923 		/* Access handle validation */
1924 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar0_acc_handle);
1925 #endif  /* FMA_SUPPORT */
1926 		break;
1927 	default:
1928 		status = 0;
1929 		break;
1930 	}
1931 
1932 	return (status);
1933 
1934 } /* emlxs_sli4_read_mbdb() */
1935 
1936 
1937 static void
emlxs_sli4_write_mbdb(emlxs_hba_t * hba,uint32_t value)1938 emlxs_sli4_write_mbdb(emlxs_hba_t *hba, uint32_t value)
1939 {
1940 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1941 	case SLI_INTF_IF_TYPE_0:
1942 		ddi_put32(hba->sli.sli4.bar2_acc_handle,
1943 		    hba->sli.sli4.MBDB_reg_addr, value);
1944 		break;
1945 
1946 	case SLI_INTF_IF_TYPE_2:
1947 		ddi_put32(hba->sli.sli4.bar0_acc_handle,
1948 		    hba->sli.sli4.MBDB_reg_addr, value);
1949 		break;
1950 	}
1951 
1952 } /* emlxs_sli4_write_mbdb() */
1953 
1954 
1955 static void
emlxs_sli4_write_cqdb(emlxs_hba_t * hba,uint32_t value)1956 emlxs_sli4_write_cqdb(emlxs_hba_t *hba, uint32_t value)
1957 {
1958 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1959 	case SLI_INTF_IF_TYPE_0:
1960 		ddi_put32(hba->sli.sli4.bar2_acc_handle,
1961 		    hba->sli.sli4.CQDB_reg_addr, value);
1962 		break;
1963 
1964 	case SLI_INTF_IF_TYPE_2:
1965 		ddi_put32(hba->sli.sli4.bar0_acc_handle,
1966 		    hba->sli.sli4.CQDB_reg_addr, value);
1967 		break;
1968 	}
1969 
1970 } /* emlxs_sli4_write_cqdb() */
1971 
1972 
1973 static void
emlxs_sli4_write_rqdb(emlxs_hba_t * hba,uint32_t value)1974 emlxs_sli4_write_rqdb(emlxs_hba_t *hba, uint32_t value)
1975 {
1976 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1977 	case SLI_INTF_IF_TYPE_0:
1978 		ddi_put32(hba->sli.sli4.bar2_acc_handle,
1979 		    hba->sli.sli4.RQDB_reg_addr, value);
1980 		break;
1981 
1982 	case SLI_INTF_IF_TYPE_2:
1983 		ddi_put32(hba->sli.sli4.bar0_acc_handle,
1984 		    hba->sli.sli4.RQDB_reg_addr, value);
1985 		break;
1986 	}
1987 
1988 } /* emlxs_sli4_write_rqdb() */
1989 
1990 
1991 static void
emlxs_sli4_write_mqdb(emlxs_hba_t * hba,uint32_t value)1992 emlxs_sli4_write_mqdb(emlxs_hba_t *hba, uint32_t value)
1993 {
1994 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1995 	case SLI_INTF_IF_TYPE_0:
1996 		ddi_put32(hba->sli.sli4.bar2_acc_handle,
1997 		    hba->sli.sli4.MQDB_reg_addr, value);
1998 		break;
1999 
2000 	case SLI_INTF_IF_TYPE_2:
2001 		ddi_put32(hba->sli.sli4.bar0_acc_handle,
2002 		    hba->sli.sli4.MQDB_reg_addr, value);
2003 		break;
2004 	}
2005 
2006 } /* emlxs_sli4_write_mqdb() */
2007 
2008 
2009 static void
emlxs_sli4_write_wqdb(emlxs_hba_t * hba,uint32_t value)2010 emlxs_sli4_write_wqdb(emlxs_hba_t *hba, uint32_t value)
2011 {
2012 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2013 	case SLI_INTF_IF_TYPE_0:
2014 		ddi_put32(hba->sli.sli4.bar2_acc_handle,
2015 		    hba->sli.sli4.WQDB_reg_addr, value);
2016 		break;
2017 
2018 	case SLI_INTF_IF_TYPE_2:
2019 		ddi_put32(hba->sli.sli4.bar0_acc_handle,
2020 		    hba->sli.sli4.WQDB_reg_addr, value);
2021 		break;
2022 	}
2023 
2024 } /* emlxs_sli4_write_wqdb() */
2025 
2026 
2027 static uint32_t
emlxs_check_bootstrap_ready(emlxs_hba_t * hba,uint32_t tmo)2028 emlxs_check_bootstrap_ready(emlxs_hba_t *hba, uint32_t tmo)
2029 {
2030 	emlxs_port_t *port = &PPORT;
2031 	uint32_t status = 0;
2032 	uint32_t err1;
2033 	uint32_t err2;
2034 
2035 	/* Wait for reset completion, tmo is in 10ms ticks */
2036 	while (tmo) {
2037 		status = emlxs_sli4_read_mbdb(hba);
2038 
2039 		/* Check to see if any errors occurred during init */
2040 		if (status & BMBX_READY) {
2041 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2042 			    "BMBX Ready: status=0x%x", status);
2043 
2044 			return (tmo);
2045 		}
2046 
2047 		BUSYWAIT_MS(10);
2048 		tmo--;
2049 	}
2050 
2051 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2052 	case SLI_INTF_IF_TYPE_0:
2053 		err1 = ddi_get32(hba->pci_acc_handle,
2054 		    hba->sli.sli4.ERR1_reg_addr);
2055 		err2 = ddi_get32(hba->pci_acc_handle,
2056 		    hba->sli.sli4.ERR2_reg_addr);
2057 		break;
2058 
2059 	default:
2060 		err1 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
2061 		    hba->sli.sli4.ERR1_reg_addr);
2062 		err2 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
2063 		    hba->sli.sli4.ERR2_reg_addr);
2064 		break;
2065 	}
2066 
2067 	/* Timeout occurred */
2068 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
2069 	    "Timeout waiting for BMailbox: status=%x err1=%x err2=%x",
2070 	    status, err1, err2);
2071 
2072 	EMLXS_STATE_CHANGE(hba, FC_ERROR);
2073 
2074 	return (0);
2075 
2076 } /* emlxs_check_bootstrap_ready() */
2077 
2078 
2079 static uint32_t
emlxs_issue_bootstrap_mb(emlxs_hba_t * hba,uint32_t tmo)2080 emlxs_issue_bootstrap_mb(emlxs_hba_t *hba, uint32_t tmo)
2081 {
2082 	emlxs_port_t *port = &PPORT;
2083 	uint32_t *iptr;
2084 	uint32_t addr30;
2085 
2086 	/*
2087 	 * This routine assumes the bootstrap mbox is loaded
2088 	 * with the mailbox command to be executed.
2089 	 *
2090 	 * First, load the high 30 bits of bootstrap mailbox
2091 	 */
2092 	addr30 = (uint32_t)((hba->sli.sli4.bootstrapmb.phys>>32) & 0xfffffffc);
2093 	addr30 |= BMBX_ADDR_HI;
2094 	emlxs_sli4_write_mbdb(hba, addr30);
2095 
2096 	tmo = emlxs_check_bootstrap_ready(hba, tmo);
2097 	if (tmo == 0) {
2098 		return (0);
2099 	}
2100 
2101 	/* Load the low 30 bits of bootstrap mailbox */
2102 	addr30 = (uint32_t)((hba->sli.sli4.bootstrapmb.phys>>2) & 0xfffffffc);
2103 	emlxs_sli4_write_mbdb(hba, addr30);
2104 
2105 	tmo = emlxs_check_bootstrap_ready(hba, tmo);
2106 	if (tmo == 0) {
2107 		return (0);
2108 	}
2109 
2110 	iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2111 
2112 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2113 	    "BootstrapMB: %p Completed %08x %08x %08x",
2114 	    hba->sli.sli4.bootstrapmb.virt,
2115 	    *iptr, *(iptr+1), *(iptr+2));
2116 
2117 	return (tmo);
2118 
2119 } /* emlxs_issue_bootstrap_mb() */
2120 
2121 
2122 static int
emlxs_init_bootstrap_mb(emlxs_hba_t * hba)2123 emlxs_init_bootstrap_mb(emlxs_hba_t *hba)
2124 {
2125 #ifdef FMA_SUPPORT
2126 	emlxs_port_t *port = &PPORT;
2127 #endif /* FMA_SUPPORT */
2128 	uint32_t *iptr;
2129 	uint32_t tmo;
2130 
2131 	if (emlxs_check_hdw_ready(hba)) {
2132 		return (1);
2133 	}
2134 
2135 	if (hba->flag & FC_BOOTSTRAPMB_INIT) {
2136 		return (0);  /* Already initialized */
2137 	}
2138 
2139 	/* NOTE: tmo is in 10ms ticks */
2140 	tmo = emlxs_check_bootstrap_ready(hba, 3000);
2141 	if (tmo == 0) {
2142 		return (1);
2143 	}
2144 
2145 	/* Issue FW_INITIALIZE command */
2146 
2147 	/* Special words to initialize bootstrap mbox MUST be little endian */
2148 	iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2149 	*iptr = LE_SWAP32(FW_INITIALIZE_WORD0);
2150 	*(iptr+1) = LE_SWAP32(FW_INITIALIZE_WORD1);
2151 
2152 	EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
2153 	    MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORDEV);
2154 
2155 	emlxs_data_dump(port, "FW_INIT", (uint32_t *)iptr, 6, 0);
2156 	if (!emlxs_issue_bootstrap_mb(hba, tmo)) {
2157 		return (1);
2158 	}
2159 
2160 #ifdef FMA_SUPPORT
2161 	if (emlxs_fm_check_dma_handle(hba, hba->sli.sli4.bootstrapmb.dma_handle)
2162 	    != DDI_FM_OK) {
2163 		EMLXS_MSGF(EMLXS_CONTEXT,
2164 		    &emlxs_invalid_dma_handle_msg,
2165 		    "init_bootstrap_mb: hdl=%p",
2166 		    hba->sli.sli4.bootstrapmb.dma_handle);
2167 		return (1);
2168 	}
2169 #endif
2170 	hba->flag |= FC_BOOTSTRAPMB_INIT;
2171 	return (0);
2172 
2173 } /* emlxs_init_bootstrap_mb() */
2174 
2175 
2176 
2177 
2178 static uint32_t
emlxs_sli4_hba_init(emlxs_hba_t * hba)2179 emlxs_sli4_hba_init(emlxs_hba_t *hba)
2180 {
2181 	int rc;
2182 	uint16_t i;
2183 	emlxs_port_t *vport;
2184 	emlxs_config_t *cfg = &CFG;
2185 	CHANNEL *cp;
2186 	VPIobj_t *vpip;
2187 
2188 	/* Restart the adapter */
2189 	if (emlxs_sli4_hba_reset(hba, 1, 0, 0)) {
2190 		return (1);
2191 	}
2192 
2193 	for (i = 0; i < hba->chan_count; i++) {
2194 		cp = &hba->chan[i];
2195 		cp->iopath = (void *)&hba->sli.sli4.wq[i];
2196 	}
2197 
2198 	/* Initialize all the port objects */
2199 	hba->vpi_max  = 0;
2200 	for (i = 0; i < MAX_VPORTS; i++) {
2201 		vport = &VPORT(i);
2202 		vport->hba = hba;
2203 		vport->vpi = i;
2204 
2205 		vpip = &vport->VPIobj;
2206 		vpip->index = i;
2207 		vpip->VPI = i;
2208 		vpip->port = vport;
2209 		vpip->state = VPI_STATE_OFFLINE;
2210 		vport->vpip = vpip;
2211 	}
2212 
2213 	/* Set the max node count */
2214 	if (hba->max_nodes == 0) {
2215 		if (cfg[CFG_NUM_NODES].current > 0) {
2216 			hba->max_nodes = cfg[CFG_NUM_NODES].current;
2217 		} else {
2218 			hba->max_nodes = 4096;
2219 		}
2220 	}
2221 
2222 	rc = emlxs_init_bootstrap_mb(hba);
2223 	if (rc) {
2224 		return (rc);
2225 	}
2226 
2227 	hba->sli.sli4.cfgFCOE.FCMap[0] = FCOE_FCF_MAP0;
2228 	hba->sli.sli4.cfgFCOE.FCMap[1] = FCOE_FCF_MAP1;
2229 	hba->sli.sli4.cfgFCOE.FCMap[2] = FCOE_FCF_MAP2;
2230 
2231 	if ((hba->sli_intf & SLI_INTF_IF_TYPE_MASK) == SLI_INTF_IF_TYPE_0) {
2232 		/* Cache the UE MASK registers value for UE error detection */
2233 		hba->sli.sli4.ue_mask_lo = ddi_get32(hba->pci_acc_handle,
2234 		    (uint32_t *)(hba->pci_addr + PCICFG_UE_MASK_LO_OFFSET));
2235 		hba->sli.sli4.ue_mask_hi = ddi_get32(hba->pci_acc_handle,
2236 		    (uint32_t *)(hba->pci_addr + PCICFG_UE_MASK_HI_OFFSET));
2237 	}
2238 
2239 	return (0);
2240 
2241 } /* emlxs_sli4_hba_init() */
2242 
2243 
2244 /*ARGSUSED*/
2245 static uint32_t
emlxs_sli4_hba_reset(emlxs_hba_t * hba,uint32_t restart,uint32_t skip_post,uint32_t quiesce)2246 emlxs_sli4_hba_reset(emlxs_hba_t *hba, uint32_t restart, uint32_t skip_post,
2247 		uint32_t quiesce)
2248 {
2249 	emlxs_port_t *port = &PPORT;
2250 	emlxs_port_t *vport;
2251 	CHANNEL *cp;
2252 	emlxs_config_t *cfg = &CFG;
2253 	MAILBOXQ mboxq;
2254 	uint32_t value;
2255 	uint32_t i;
2256 	uint32_t rc;
2257 	uint16_t channelno;
2258 	uint32_t status;
2259 	uint32_t err1;
2260 	uint32_t err2;
2261 	uint8_t generate_event = 0;
2262 
2263 	if (!cfg[CFG_RESET_ENABLE].current) {
2264 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
2265 		    "Adapter reset disabled.");
2266 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
2267 
2268 		return (1);
2269 	}
2270 
2271 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2272 	case SLI_INTF_IF_TYPE_0:
2273 		if (quiesce == 0) {
2274 			emlxs_sli4_hba_kill(hba);
2275 
2276 			/*
2277 			 * Initalize Hardware that will be used to bring
2278 			 * SLI4 online.
2279 			 */
2280 			rc = emlxs_init_bootstrap_mb(hba);
2281 			if (rc) {
2282 				return (rc);
2283 			}
2284 		}
2285 
2286 		bzero((void *)&mboxq, sizeof (MAILBOXQ));
2287 		emlxs_mb_resetport(hba, &mboxq);
2288 
2289 		if (quiesce == 0) {
2290 			if (emlxs_sli4_issue_mbox_cmd(hba, &mboxq,
2291 			    MBX_POLL, 0) != MBX_SUCCESS) {
2292 				/* Timeout occurred */
2293 				EMLXS_MSGF(EMLXS_CONTEXT,
2294 				    &emlxs_reset_failed_msg,
2295 				    "Timeout: RESET");
2296 				EMLXS_STATE_CHANGE(hba, FC_ERROR);
2297 				/* Log a dump event - not supported */
2298 				return (1);
2299 			}
2300 		} else {
2301 			if (emlxs_sli4_issue_mbox_cmd4quiesce(hba, &mboxq,
2302 			    MBX_POLL, 0) != MBX_SUCCESS) {
2303 				EMLXS_STATE_CHANGE(hba, FC_ERROR);
2304 				/* Log a dump event - not supported */
2305 				return (1);
2306 			}
2307 		}
2308 		emlxs_data_dump(port, "resetPort", (uint32_t *)&mboxq, 12, 0);
2309 		break;
2310 
2311 	case SLI_INTF_IF_TYPE_2:
2312 		if (quiesce == 0) {
2313 			emlxs_sli4_hba_kill(hba);
2314 		}
2315 
2316 		rc = emlxs_check_hdw_ready(hba);
2317 		if (rc > 1) {
2318 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
2319 			    "Adapter not ready for reset.");
2320 			return (1);
2321 		}
2322 
2323 		if (rc == 1) {
2324 			err1 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
2325 			    hba->sli.sli4.ERR1_reg_addr);
2326 			err2 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
2327 			    hba->sli.sli4.ERR2_reg_addr);
2328 
2329 			/* Don't generate an event if dump was forced */
2330 			if ((err1 != 0x2) || (err2 != 0x2)) {
2331 				generate_event = 1;
2332 			}
2333 		}
2334 
2335 		/* Reset the port now */
2336 
2337 		mutex_enter(&EMLXS_PORT_LOCK);
2338 		value = SLI_CNTL_INIT_PORT;
2339 
2340 		ddi_put32(hba->sli.sli4.bar0_acc_handle,
2341 		    hba->sli.sli4.CNTL_reg_addr, value);
2342 		mutex_exit(&EMLXS_PORT_LOCK);
2343 
2344 		break;
2345 	}
2346 
2347 	/* Reset the hba structure */
2348 	hba->flag &= FC_RESET_MASK;
2349 
2350 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
2351 		cp = &hba->chan[channelno];
2352 		cp->hba = hba;
2353 		cp->channelno = channelno;
2354 	}
2355 
2356 	hba->channel_tx_count = 0;
2357 	hba->io_count = 0;
2358 	hba->iodone_count = 0;
2359 	hba->topology = 0;
2360 	hba->linkspeed = 0;
2361 	hba->heartbeat_active = 0;
2362 	hba->discovery_timer = 0;
2363 	hba->linkup_timer = 0;
2364 	hba->loopback_tics = 0;
2365 
2366 	/* Reset the port objects */
2367 	for (i = 0; i < MAX_VPORTS; i++) {
2368 		vport = &VPORT(i);
2369 
2370 		vport->flag &= EMLXS_PORT_RESET_MASK;
2371 		vport->did = 0;
2372 		vport->prev_did = 0;
2373 		vport->lip_type = 0;
2374 		bzero(&vport->fabric_sparam, sizeof (SERV_PARM));
2375 		bzero(&vport->prev_fabric_sparam, sizeof (SERV_PARM));
2376 
2377 		bzero((caddr_t)&vport->node_base, sizeof (NODELIST));
2378 		vport->node_base.nlp_Rpi = 0;
2379 		vport->node_base.nlp_DID = 0xffffff;
2380 		vport->node_base.nlp_list_next = NULL;
2381 		vport->node_base.nlp_list_prev = NULL;
2382 		vport->node_base.nlp_active = 1;
2383 		vport->node_count = 0;
2384 
2385 		if (vport->ub_count < EMLXS_UB_TOKEN_OFFSET) {
2386 			vport->ub_count = EMLXS_UB_TOKEN_OFFSET;
2387 		}
2388 	}
2389 
2390 	if (emlxs_check_hdw_ready(hba)) {
2391 		return (1);
2392 	}
2393 
2394 	if (generate_event) {
2395 		status = emlxs_sli4_read_status(hba);
2396 		if (status & SLI_STATUS_DUMP_IMAGE_PRESENT) {
2397 			emlxs_log_dump_event(port, NULL, 0);
2398 		}
2399 	}
2400 
2401 	return (0);
2402 
2403 } /* emlxs_sli4_hba_reset */
2404 
2405 
2406 #define	SGL_CMD		0
2407 #define	SGL_RESP	1
2408 #define	SGL_DATA	2
2409 #define	SGL_LAST	0x80
2410 
2411 /*ARGSUSED*/
2412 static ULP_SGE64 *
emlxs_pkt_to_sgl(emlxs_port_t * port,fc_packet_t * pkt,ULP_SGE64 * sge,uint32_t sgl_type,uint32_t * pcnt)2413 emlxs_pkt_to_sgl(emlxs_port_t *port, fc_packet_t *pkt, ULP_SGE64 *sge,
2414     uint32_t sgl_type, uint32_t *pcnt)
2415 {
2416 #ifdef DEBUG_SGE
2417 	emlxs_hba_t *hba = HBA;
2418 #endif /* DEBUG_SGE */
2419 	ddi_dma_cookie_t *cp;
2420 	uint_t i;
2421 	uint_t last;
2422 	int32_t	size;
2423 	int32_t	sge_size;
2424 	uint64_t sge_addr;
2425 	int32_t	len;
2426 	uint32_t cnt;
2427 	uint_t cookie_cnt;
2428 	ULP_SGE64 stage_sge;
2429 
2430 	last = sgl_type & SGL_LAST;
2431 	sgl_type &= ~SGL_LAST;
2432 
2433 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2434 	switch (sgl_type) {
2435 	case SGL_CMD:
2436 		cp = pkt->pkt_cmd_cookie;
2437 		cookie_cnt = pkt->pkt_cmd_cookie_cnt;
2438 		size = (int32_t)pkt->pkt_cmdlen;
2439 		break;
2440 
2441 	case SGL_RESP:
2442 		cp = pkt->pkt_resp_cookie;
2443 		cookie_cnt = pkt->pkt_resp_cookie_cnt;
2444 		size = (int32_t)pkt->pkt_rsplen;
2445 		break;
2446 
2447 
2448 	case SGL_DATA:
2449 		cp = pkt->pkt_data_cookie;
2450 		cookie_cnt = pkt->pkt_data_cookie_cnt;
2451 		size = (int32_t)pkt->pkt_datalen;
2452 		break;
2453 
2454 	default:
2455 		return (NULL);
2456 	}
2457 
2458 #else
2459 	switch (sgl_type) {
2460 	case SGL_CMD:
2461 		cp = &pkt->pkt_cmd_cookie;
2462 		cookie_cnt = 1;
2463 		size = (int32_t)pkt->pkt_cmdlen;
2464 		break;
2465 
2466 	case SGL_RESP:
2467 		cp = &pkt->pkt_resp_cookie;
2468 		cookie_cnt = 1;
2469 		size = (int32_t)pkt->pkt_rsplen;
2470 		break;
2471 
2472 
2473 	case SGL_DATA:
2474 		cp = &pkt->pkt_data_cookie;
2475 		cookie_cnt = 1;
2476 		size = (int32_t)pkt->pkt_datalen;
2477 		break;
2478 
2479 	default:
2480 		return (NULL);
2481 	}
2482 #endif	/* >= EMLXS_MODREV3 */
2483 
2484 	stage_sge.offset = 0;
2485 	stage_sge.type = 0;
2486 	stage_sge.last = 0;
2487 	cnt = 0;
2488 	for (i = 0; i < cookie_cnt && size > 0; i++, cp++) {
2489 
2490 		sge_size = cp->dmac_size;
2491 		sge_addr = cp->dmac_laddress;
2492 		while (sge_size && size) {
2493 			if (cnt) {
2494 				/* Copy staged SGE before we build next one */
2495 				BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
2496 				    (uint8_t *)sge, sizeof (ULP_SGE64));
2497 				sge++;
2498 			}
2499 			len = MIN(EMLXS_MAX_SGE_SIZE, sge_size);
2500 			len = MIN(size, len);
2501 
2502 			stage_sge.addrHigh =
2503 			    PADDR_HI(sge_addr);
2504 			stage_sge.addrLow =
2505 			    PADDR_LO(sge_addr);
2506 			stage_sge.length = len;
2507 			if (sgl_type == SGL_DATA) {
2508 				stage_sge.offset = cnt;
2509 			}
2510 #ifdef DEBUG_SGE
2511 			emlxs_data_dump(port, "SGE", (uint32_t *)&stage_sge,
2512 			    4, 0);
2513 #endif /* DEBUG_SGE */
2514 			sge_addr += len;
2515 			sge_size -= len;
2516 
2517 			cnt += len;
2518 			size -= len;
2519 		}
2520 	}
2521 
2522 	if (last) {
2523 		stage_sge.last = 1;
2524 	}
2525 	BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
2526 	    sizeof (ULP_SGE64));
2527 
2528 	sge++;
2529 
2530 	if (pcnt) {
2531 		*pcnt = cnt;
2532 	}
2533 	return (sge);
2534 
2535 } /* emlxs_pkt_to_sgl */
2536 
2537 
2538 /*ARGSUSED*/
2539 uint32_t
emlxs_sli4_bde_setup(emlxs_port_t * port,emlxs_buf_t * sbp)2540 emlxs_sli4_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2541 {
2542 	emlxs_hba_t *hba = HBA;
2543 	fc_packet_t *pkt;
2544 	XRIobj_t *xrip;
2545 	ULP_SGE64 *sge;
2546 	emlxs_wqe_t *wqe;
2547 	IOCBQ *iocbq;
2548 	ddi_dma_cookie_t *cp_cmd;
2549 	ddi_dma_cookie_t *cp_data;
2550 	uint64_t sge_addr;
2551 	uint32_t cmd_cnt;
2552 	uint32_t resp_cnt;
2553 
2554 	iocbq = (IOCBQ *) &sbp->iocbq;
2555 	wqe = &iocbq->wqe;
2556 	pkt = PRIV2PKT(sbp);
2557 	xrip = sbp->xrip;
2558 	sge = xrip->SGList.virt;
2559 
2560 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2561 	cp_cmd = pkt->pkt_cmd_cookie;
2562 	cp_data = pkt->pkt_data_cookie;
2563 #else
2564 	cp_cmd  = &pkt->pkt_cmd_cookie;
2565 	cp_data = &pkt->pkt_data_cookie;
2566 #endif	/* >= EMLXS_MODREV3 */
2567 
2568 	iocbq = &sbp->iocbq;
2569 	if (iocbq->flag & IOCB_FCP_CMD) {
2570 
2571 		if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
2572 			return (1);
2573 		}
2574 
2575 		/* CMD payload */
2576 		sge = emlxs_pkt_to_sgl(port, pkt, sge, SGL_CMD, &cmd_cnt);
2577 		if (! sge) {
2578 			return (1);
2579 		}
2580 
2581 		/* DATA payload */
2582 		if (pkt->pkt_datalen != 0) {
2583 			/* RSP payload */
2584 			sge = emlxs_pkt_to_sgl(port, pkt, sge,
2585 			    SGL_RESP, &resp_cnt);
2586 			if (! sge) {
2587 				return (1);
2588 			}
2589 
2590 			/* Data payload */
2591 			sge = emlxs_pkt_to_sgl(port, pkt, sge,
2592 			    SGL_DATA | SGL_LAST, 0);
2593 			if (! sge) {
2594 				return (1);
2595 			}
2596 sgl_done:
2597 			if (hba->sli.sli4.flag & EMLXS_SLI4_PHON) {
2598 				sge_addr = cp_data->dmac_laddress;
2599 				wqe->FirstData.addrHigh = PADDR_HI(sge_addr);
2600 				wqe->FirstData.addrLow = PADDR_LO(sge_addr);
2601 				wqe->FirstData.tus.f.bdeSize =
2602 				    cp_data->dmac_size;
2603 			}
2604 		} else {
2605 			/* RSP payload */
2606 			sge = emlxs_pkt_to_sgl(port, pkt, sge,
2607 			    SGL_RESP | SGL_LAST, &resp_cnt);
2608 			if (! sge) {
2609 				return (1);
2610 			}
2611 		}
2612 
2613 		wqe->un.FcpCmd.Payload.addrHigh =
2614 		    PADDR_HI(cp_cmd->dmac_laddress);
2615 		wqe->un.FcpCmd.Payload.addrLow =
2616 		    PADDR_LO(cp_cmd->dmac_laddress);
2617 		wqe->un.FcpCmd.Payload.tus.f.bdeSize = cmd_cnt;
2618 		wqe->un.FcpCmd.PayloadLength = cmd_cnt + resp_cnt;
2619 
2620 	} else {
2621 
2622 		if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
2623 			/* CMD payload */
2624 			sge = emlxs_pkt_to_sgl(port, pkt, sge,
2625 			    SGL_CMD | SGL_LAST, &cmd_cnt);
2626 			if (! sge) {
2627 				return (1);
2628 			}
2629 		} else {
2630 			/* CMD payload */
2631 			sge = emlxs_pkt_to_sgl(port, pkt, sge,
2632 			    SGL_CMD, &cmd_cnt);
2633 			if (! sge) {
2634 				return (1);
2635 			}
2636 
2637 			/* RSP payload */
2638 			sge = emlxs_pkt_to_sgl(port, pkt, sge,
2639 			    SGL_RESP | SGL_LAST, &resp_cnt);
2640 			if (! sge) {
2641 				return (1);
2642 			}
2643 			wqe->un.GenReq.PayloadLength = cmd_cnt;
2644 		}
2645 
2646 		wqe->un.GenReq.Payload.addrHigh =
2647 		    PADDR_HI(cp_cmd->dmac_laddress);
2648 		wqe->un.GenReq.Payload.addrLow =
2649 		    PADDR_LO(cp_cmd->dmac_laddress);
2650 		wqe->un.GenReq.Payload.tus.f.bdeSize = cmd_cnt;
2651 	}
2652 	return (0);
2653 } /* emlxs_sli4_bde_setup */
2654 
2655 
2656 
2657 
2658 #ifdef SFCT_SUPPORT
2659 /*ARGSUSED*/
2660 static uint32_t
emlxs_sli4_fct_bde_setup(emlxs_port_t * port,emlxs_buf_t * sbp)2661 emlxs_sli4_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2662 {
2663 	emlxs_hba_t *hba = HBA;
2664 	emlxs_wqe_t *wqe;
2665 	ULP_SGE64 stage_sge;
2666 	ULP_SGE64 *sge;
2667 	IOCB *iocb;
2668 	IOCBQ *iocbq;
2669 	MATCHMAP *mp;
2670 	MATCHMAP *fct_mp;
2671 	XRIobj_t *xrip;
2672 	uint64_t sge_addr;
2673 	uint32_t sge_size;
2674 	uint32_t cnt;
2675 	uint32_t len;
2676 	uint32_t size;
2677 	uint32_t *xrdy_vaddr;
2678 	stmf_data_buf_t *dbuf;
2679 
2680 	iocbq = &sbp->iocbq;
2681 	iocb = &iocbq->iocb;
2682 	wqe = &iocbq->wqe;
2683 	xrip = sbp->xrip;
2684 
2685 	if (!sbp->fct_buf) {
2686 		return (0);
2687 	}
2688 
2689 	size = sbp->fct_buf->db_data_size;
2690 
2691 	/*
2692 	 * The hardware will automaticlly round up
2693 	 * to multiple of 4.
2694 	 *
2695 	 * if (size & 3) {
2696 	 *	size = (size + 3) & 0xfffffffc;
2697 	 * }
2698 	 */
2699 	fct_mp = (MATCHMAP *)sbp->fct_buf->db_port_private;
2700 
2701 	if (sbp->fct_buf->db_sglist_length != 1) {
2702 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_error_msg,
2703 		    "fct_bde_setup: Only 1 sglist entry supported: %d",
2704 		    sbp->fct_buf->db_sglist_length);
2705 		return (1);
2706 	}
2707 
2708 	sge = xrip->SGList.virt;
2709 
2710 	if (iocb->ULPCOMMAND == CMD_FCP_TRECEIVE64_CX) {
2711 
2712 		mp = emlxs_mem_buf_alloc(hba, EMLXS_XFER_RDY_SIZE);
2713 		if (!mp || !mp->virt || !mp->phys) {
2714 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_error_msg,
2715 			    "fct_bde_setup: Cannot allocate XRDY memory");
2716 			return (1);
2717 		}
2718 		/* Save the MATCHMAP info to free this memory later */
2719 		iocbq->bp = mp;
2720 
2721 		/* Point to XRDY payload */
2722 		xrdy_vaddr = (uint32_t *)(mp->virt);
2723 
2724 		/* Fill in burstsize in payload */
2725 		*xrdy_vaddr++ = 0;
2726 		*xrdy_vaddr++ = LE_SWAP32(size);
2727 		*xrdy_vaddr = 0;
2728 
2729 		/* First 2 SGEs are XRDY and SKIP */
2730 		stage_sge.addrHigh = PADDR_HI(mp->phys);
2731 		stage_sge.addrLow = PADDR_LO(mp->phys);
2732 		stage_sge.length = EMLXS_XFER_RDY_SIZE;
2733 		stage_sge.offset = 0;
2734 		stage_sge.type = 0;
2735 		stage_sge.last = 0;
2736 
2737 		/* Words  0-3 */
2738 		wqe->un.FcpCmd.Payload.addrHigh = stage_sge.addrHigh;
2739 		wqe->un.FcpCmd.Payload.addrLow = stage_sge.addrLow;
2740 		wqe->un.FcpCmd.Payload.tus.f.bdeSize = EMLXS_XFER_RDY_SIZE;
2741 		wqe->un.FcpCmd.PayloadLength = EMLXS_XFER_RDY_SIZE;
2742 
2743 	} else {	/* CMD_FCP_TSEND64_CX */
2744 		/* First 2 SGEs are SKIP */
2745 		stage_sge.addrHigh = 0;
2746 		stage_sge.addrLow = 0;
2747 		stage_sge.length = 0;
2748 		stage_sge.offset = 0;
2749 		stage_sge.type = EMLXS_SGE_TYPE_SKIP;
2750 		stage_sge.last = 0;
2751 
2752 		/* Words  0-3 */
2753 		wqe->un.FcpCmd.Payload.addrHigh = PADDR_HI(fct_mp->phys);
2754 		wqe->un.FcpCmd.Payload.addrLow = PADDR_LO(fct_mp->phys);
2755 
2756 		/* The BDE should match the contents of the first SGE payload */
2757 		len = MIN(EMLXS_MAX_SGE_SIZE, size);
2758 		wqe->un.FcpCmd.Payload.tus.f.bdeSize = len;
2759 
2760 		/* The PayloadLength should be set to 0 for TSEND64. */
2761 		wqe->un.FcpCmd.PayloadLength = 0;
2762 	}
2763 
2764 	dbuf = sbp->fct_buf;
2765 	/*
2766 	 * TotalTransferCount equals to Relative Offset field (Word 4)
2767 	 * in both TSEND64 and TRECEIVE64 WQE.
2768 	 */
2769 	wqe->un.FcpCmd.TotalTransferCount = dbuf->db_relative_offset;
2770 
2771 	/* Copy staged SGE into SGL */
2772 	BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
2773 	    (uint8_t *)sge, sizeof (ULP_SGE64));
2774 	sge++;
2775 
2776 	stage_sge.addrHigh = 0;
2777 	stage_sge.addrLow = 0;
2778 	stage_sge.length = 0;
2779 	stage_sge.offset = 0;
2780 	stage_sge.type = EMLXS_SGE_TYPE_SKIP;
2781 	stage_sge.last = 0;
2782 
2783 	/* Copy staged SGE into SGL */
2784 	BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
2785 	    (uint8_t *)sge, sizeof (ULP_SGE64));
2786 	sge++;
2787 
2788 	sge_size = size;
2789 	sge_addr = fct_mp->phys;
2790 	cnt = 0;
2791 
2792 	/* Build SGEs */
2793 	while (sge_size) {
2794 		if (cnt) {
2795 			/* Copy staged SGE before we build next one */
2796 			BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
2797 			    (uint8_t *)sge, sizeof (ULP_SGE64));
2798 			sge++;
2799 		}
2800 
2801 		len = MIN(EMLXS_MAX_SGE_SIZE, sge_size);
2802 
2803 		stage_sge.addrHigh = PADDR_HI(sge_addr);
2804 		stage_sge.addrLow = PADDR_LO(sge_addr);
2805 		stage_sge.length = len;
2806 		stage_sge.offset = cnt;
2807 		stage_sge.type = EMLXS_SGE_TYPE_DATA;
2808 
2809 		sge_addr += len;
2810 		sge_size -= len;
2811 		cnt += len;
2812 	}
2813 
2814 	stage_sge.last = 1;
2815 
2816 	if (hba->sli.sli4.flag & EMLXS_SLI4_PHON) {
2817 		wqe->FirstData.addrHigh = stage_sge.addrHigh;
2818 		wqe->FirstData.addrLow = stage_sge.addrLow;
2819 		wqe->FirstData.tus.f.bdeSize = stage_sge.length;
2820 	}
2821 	/* Copy staged SGE into SGL */
2822 	BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
2823 	    (uint8_t *)sge, sizeof (ULP_SGE64));
2824 
2825 	return (0);
2826 
2827 } /* emlxs_sli4_fct_bde_setup */
2828 #endif /* SFCT_SUPPORT */
2829 
2830 
2831 static void
emlxs_sli4_issue_iocb_cmd(emlxs_hba_t * hba,CHANNEL * cp,IOCBQ * iocbq)2832 emlxs_sli4_issue_iocb_cmd(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
2833 {
2834 	emlxs_port_t *port = &PPORT;
2835 	emlxs_buf_t *sbp;
2836 	uint32_t channelno;
2837 	int32_t throttle;
2838 	emlxs_wqe_t *wqe;
2839 	emlxs_wqe_t *wqeslot;
2840 	WQ_DESC_t *wq;
2841 	uint32_t flag;
2842 	uint32_t wqdb;
2843 	uint16_t next_wqe;
2844 	off_t offset;
2845 #ifdef NODE_THROTTLE_SUPPORT
2846 	int32_t node_throttle;
2847 	NODELIST *marked_node = NULL;
2848 #endif /* NODE_THROTTLE_SUPPORT */
2849 
2850 
2851 	channelno = cp->channelno;
2852 	wq = (WQ_DESC_t *)cp->iopath;
2853 
2854 #ifdef DEBUG_FASTPATH
2855 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2856 	    "ISSUE WQE channel: %x  %p", channelno, wq);
2857 #endif /* DEBUG_FASTPATH */
2858 
2859 	throttle = 0;
2860 
2861 	/* Check if FCP ring and adapter is not ready */
2862 	/* We may use any ring for FCP_CMD */
2863 	if (iocbq && (iocbq->flag & IOCB_FCP_CMD) && (hba->state != FC_READY)) {
2864 		if (!(iocbq->flag & IOCB_SPECIAL) || !iocbq->port ||
2865 		    (((emlxs_port_t *)iocbq->port)->mode == MODE_INITIATOR)) {
2866 			emlxs_tx_put(iocbq, 1);
2867 			return;
2868 		}
2869 	}
2870 
2871 	/* Attempt to acquire CMD_RING lock */
2872 	if (mutex_tryenter(&EMLXS_QUE_LOCK(channelno)) == 0) {
2873 		/* Queue it for later */
2874 		if (iocbq) {
2875 			if ((hba->io_count -
2876 			    hba->channel_tx_count) > 10) {
2877 				emlxs_tx_put(iocbq, 1);
2878 				return;
2879 			} else {
2880 
2881 				mutex_enter(&EMLXS_QUE_LOCK(channelno));
2882 			}
2883 		} else {
2884 			return;
2885 		}
2886 	}
2887 	/* EMLXS_QUE_LOCK acquired */
2888 
2889 	/* Throttle check only applies to non special iocb */
2890 	if (iocbq && (!(iocbq->flag & IOCB_SPECIAL))) {
2891 		/* Check if HBA is full */
2892 		throttle = hba->io_throttle - hba->io_active;
2893 		if (throttle <= 0) {
2894 			/* Hitting adapter throttle limit */
2895 			/* Queue it for later */
2896 			if (iocbq) {
2897 				emlxs_tx_put(iocbq, 1);
2898 			}
2899 
2900 			goto busy;
2901 		}
2902 	}
2903 
2904 	/* Check to see if we have room for this WQE */
2905 	next_wqe = wq->host_index + 1;
2906 	if (next_wqe >= wq->max_index) {
2907 		next_wqe = 0;
2908 	}
2909 
2910 	if (next_wqe == wq->port_index) {
2911 		/* Queue it for later */
2912 		if (iocbq) {
2913 			emlxs_tx_put(iocbq, 1);
2914 		}
2915 		goto busy;
2916 	}
2917 
2918 	/*
2919 	 * We have a command ring slot available
2920 	 * Make sure we have an iocb to send
2921 	 */
2922 	if (iocbq) {
2923 		mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2924 
2925 		/* Check if the ring already has iocb's waiting */
2926 		if (cp->nodeq.q_first != NULL) {
2927 			/* Put the current iocbq on the tx queue */
2928 			emlxs_tx_put(iocbq, 0);
2929 
2930 			/*
2931 			 * Attempt to replace it with the next iocbq
2932 			 * in the tx queue
2933 			 */
2934 			iocbq = emlxs_tx_get(cp, 0);
2935 		}
2936 
2937 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2938 	} else {
2939 		iocbq = emlxs_tx_get(cp, 1);
2940 	}
2941 
2942 sendit:
2943 	/* Process each iocbq */
2944 	while (iocbq) {
2945 		sbp = iocbq->sbp;
2946 
2947 #ifdef NODE_THROTTLE_SUPPORT
2948 		if (sbp && sbp->node && sbp->node->io_throttle) {
2949 			node_throttle = sbp->node->io_throttle -
2950 			    sbp->node->io_active;
2951 			if (node_throttle <= 0) {
2952 				/* Node is busy */
2953 				/* Queue this iocb and get next iocb from */
2954 				/* channel */
2955 
2956 				if (!marked_node) {
2957 					marked_node = sbp->node;
2958 				}
2959 
2960 				mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2961 				emlxs_tx_put(iocbq, 0);
2962 
2963 				if (cp->nodeq.q_first == marked_node) {
2964 					mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2965 					goto busy;
2966 				}
2967 
2968 				iocbq = emlxs_tx_get(cp, 0);
2969 				mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2970 				continue;
2971 			}
2972 		}
2973 		marked_node = 0;
2974 #endif /* NODE_THROTTLE_SUPPORT */
2975 
2976 		wqe = &iocbq->wqe;
2977 #ifdef DEBUG_FASTPATH
2978 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2979 		    "ISSUE QID %d WQE iotag:%x xri:%d", wq->qid,
2980 		    wqe->RequestTag, wqe->XRITag);
2981 #endif /* DEBUG_FASTPATH */
2982 
2983 		if (sbp) {
2984 			/* If exchange removed after wqe was prep'ed, drop it */
2985 			if (!(sbp->xrip)) {
2986 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2987 				    "Xmit WQE iotag:%x xri:%d aborted",
2988 				    wqe->RequestTag, wqe->XRITag);
2989 
2990 				/* Get next iocb from the tx queue */
2991 				iocbq = emlxs_tx_get(cp, 1);
2992 				continue;
2993 			}
2994 
2995 			if (sbp->pkt_flags & PACKET_DELAY_REQUIRED) {
2996 
2997 				/* Perform delay */
2998 				if ((channelno == hba->channel_els) &&
2999 				    !(iocbq->flag & IOCB_FCP_CMD)) {
3000 					drv_usecwait(100000);
3001 				} else {
3002 					drv_usecwait(20000);
3003 				}
3004 			}
3005 
3006 			/* Check for ULP pkt request */
3007 			mutex_enter(&sbp->mtx);
3008 
3009 			if (sbp->node == NULL) {
3010 				/* Set node to base node by default */
3011 				iocbq->node = (void *)&port->node_base;
3012 				sbp->node = (void *)&port->node_base;
3013 			}
3014 
3015 			sbp->pkt_flags |= PACKET_IN_CHIPQ;
3016 			mutex_exit(&sbp->mtx);
3017 
3018 			atomic_inc_32(&hba->io_active);
3019 #ifdef NODE_THROTTLE_SUPPORT
3020 			if (sbp->node) {
3021 				atomic_inc_32(&sbp->node->io_active);
3022 			}
3023 #endif /* NODE_THROTTLE_SUPPORT */
3024 
3025 			sbp->xrip->flag |= EMLXS_XRI_PENDING_IO;
3026 #ifdef SFCT_SUPPORT
3027 #ifdef FCT_IO_TRACE
3028 			if (sbp->fct_cmd) {
3029 				emlxs_fct_io_trace(port, sbp->fct_cmd,
3030 				    EMLXS_FCT_IOCB_ISSUED);
3031 				emlxs_fct_io_trace(port, sbp->fct_cmd,
3032 				    icmd->ULPCOMMAND);
3033 			}
3034 #endif /* FCT_IO_TRACE */
3035 #endif /* SFCT_SUPPORT */
3036 			cp->hbaSendCmd_sbp++;
3037 			iocbq->channel = cp;
3038 		} else {
3039 			cp->hbaSendCmd++;
3040 		}
3041 
3042 		flag = iocbq->flag;
3043 
3044 		/*
3045 		 * At this point, we have a command ring slot available
3046 		 * and an iocb to send
3047 		 */
3048 		wq->release_depth--;
3049 		if (wq->release_depth == 0) {
3050 			wq->release_depth = WQE_RELEASE_DEPTH;
3051 			wqe->WQEC = 1;
3052 		}
3053 
3054 		HBASTATS.IocbIssued[channelno]++;
3055 		wq->num_proc++;
3056 
3057 		/* Send the iocb */
3058 		wqeslot = (emlxs_wqe_t *)wq->addr.virt;
3059 		wqeslot += wq->host_index;
3060 
3061 		wqe->CQId = wq->cqid;
3062 		if (hba->sli.sli4.param.PHWQ) {
3063 			WQE_PHWQ_WQID(wqe, wq->qid);
3064 		}
3065 		BE_SWAP32_BCOPY((uint8_t *)wqe, (uint8_t *)wqeslot,
3066 		    sizeof (emlxs_wqe_t));
3067 #ifdef DEBUG_WQE
3068 		emlxs_data_dump(port, "WQE", (uint32_t *)wqe, 18, 0);
3069 #endif /* DEBUG_WQE */
3070 		offset = (off_t)((uint64_t)((unsigned long)
3071 		    wq->addr.virt) -
3072 		    (uint64_t)((unsigned long)
3073 		    hba->sli.sli4.slim2.virt));
3074 
3075 		EMLXS_MPDATA_SYNC(wq->addr.dma_handle, offset,
3076 		    4096, DDI_DMA_SYNC_FORDEV);
3077 
3078 		/* Ring the WQ Doorbell */
3079 		wqdb = wq->qid;
3080 		wqdb |= ((1 << 24) | (wq->host_index << 16));
3081 
3082 		/*
3083 		 * After this, the sbp / iocb / wqe should not be
3084 		 * accessed in the xmit path.
3085 		 */
3086 
3087 		emlxs_sli4_write_wqdb(hba, wqdb);
3088 		wq->host_index = next_wqe;
3089 
3090 #ifdef DEBUG_FASTPATH
3091 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3092 		    "WQ RING: %08x", wqdb);
3093 #endif /* DEBUG_FASTPATH */
3094 
3095 		if (!sbp) {
3096 			emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq);
3097 		}
3098 
3099 		if (iocbq && (!(flag & IOCB_SPECIAL))) {
3100 			/* Check if HBA is full */
3101 			throttle = hba->io_throttle - hba->io_active;
3102 			if (throttle <= 0) {
3103 				goto busy;
3104 			}
3105 		}
3106 
3107 		/* Check to see if we have room for another WQE */
3108 		next_wqe++;
3109 		if (next_wqe >= wq->max_index) {
3110 			next_wqe = 0;
3111 		}
3112 
3113 		if (next_wqe == wq->port_index) {
3114 			/* Queue it for later */
3115 			goto busy;
3116 		}
3117 
3118 		/* Get the next iocb from the tx queue if there is one */
3119 		iocbq = emlxs_tx_get(cp, 1);
3120 	}
3121 
3122 	mutex_exit(&EMLXS_QUE_LOCK(channelno));
3123 
3124 	return;
3125 
3126 busy:
3127 	wq->num_busy++;
3128 	if (throttle <= 0) {
3129 		HBASTATS.IocbThrottled++;
3130 	} else {
3131 		HBASTATS.IocbRingFull[channelno]++;
3132 	}
3133 
3134 	mutex_exit(&EMLXS_QUE_LOCK(channelno));
3135 
3136 	return;
3137 
3138 } /* emlxs_sli4_issue_iocb_cmd() */
3139 
3140 
3141 /*ARGSUSED*/
3142 static uint32_t
emlxs_sli4_issue_mq(emlxs_port_t * port,MAILBOX4 * mqe,MAILBOX * mb,uint32_t tmo)3143 emlxs_sli4_issue_mq(emlxs_port_t *port, MAILBOX4 *mqe, MAILBOX *mb,
3144     uint32_t tmo)
3145 {
3146 	emlxs_hba_t *hba = HBA;
3147 	MAILBOXQ	*mbq;
3148 	MAILBOX4	*mb4;
3149 	MATCHMAP	*mp;
3150 	uint32_t	*iptr;
3151 	uint32_t	mqdb;
3152 	off_t		offset;
3153 
3154 	mbq = (MAILBOXQ *)mb;
3155 	mb4 = (MAILBOX4 *)mb;
3156 	mp = (MATCHMAP *) mbq->nonembed;
3157 	hba->mbox_mqe = (void *)mqe;
3158 
3159 	if ((mb->mbxCommand != MBX_SLI_CONFIG) ||
3160 	    (mb4->un.varSLIConfig.be.embedded)) {
3161 		/*
3162 		 * If this is an embedded mbox, everything should fit
3163 		 * into the mailbox area.
3164 		 */
3165 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)mqe,
3166 		    MAILBOX_CMD_SLI4_BSIZE);
3167 
3168 		EMLXS_MPDATA_SYNC(hba->sli.sli4.mq.addr.dma_handle, 0,
3169 		    4096, DDI_DMA_SYNC_FORDEV);
3170 
3171 		if (mb->mbxCommand != MBX_HEARTBEAT) {
3172 			emlxs_data_dump(port, "MBOX CMD", (uint32_t *)mqe,
3173 			    18, 0);
3174 		}
3175 	} else {
3176 		/* SLI_CONFIG and non-embedded */
3177 
3178 		/*
3179 		 * If this is not embedded, the MQ area
3180 		 * MUST contain a SGE pointer to a larger area for the
3181 		 * non-embedded mailbox command.
3182 		 * mp will point to the actual mailbox command which
3183 		 * should be copied into the non-embedded area.
3184 		 */
3185 		mb4->un.varSLIConfig.be.sge_cnt = 1;
3186 		mb4->un.varSLIConfig.be.payload_length = mp->size;
3187 		iptr = (uint32_t *)&mb4->un.varSLIConfig.be.un_hdr.hdr_req;
3188 		*iptr++ = (uint32_t)PADDR_LO(mp->phys);
3189 		*iptr++ = (uint32_t)PADDR_HI(mp->phys);
3190 		*iptr = mp->size;
3191 
3192 		BE_SWAP32_BUFFER(mp->virt, mp->size);
3193 
3194 		EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
3195 		    DDI_DMA_SYNC_FORDEV);
3196 
3197 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)mqe,
3198 		    MAILBOX_CMD_SLI4_BSIZE);
3199 
3200 		offset = (off_t)((uint64_t)((unsigned long)
3201 		    hba->sli.sli4.mq.addr.virt) -
3202 		    (uint64_t)((unsigned long)
3203 		    hba->sli.sli4.slim2.virt));
3204 
3205 		EMLXS_MPDATA_SYNC(hba->sli.sli4.mq.addr.dma_handle, offset,
3206 		    4096, DDI_DMA_SYNC_FORDEV);
3207 
3208 		emlxs_data_dump(port, "MBOX EXT", (uint32_t *)mqe, 12, 0);
3209 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3210 		    "Extension Addr %p %p", mp->phys, (uint32_t *)(mp->virt));
3211 		emlxs_data_dump(port, "EXT AREA", (uint32_t *)mp->virt, 24, 0);
3212 	}
3213 
3214 	/* Ring the MQ Doorbell */
3215 	mqdb = hba->sli.sli4.mq.qid;
3216 	mqdb |= ((1 << MQ_DB_POP_SHIFT) & MQ_DB_POP_MASK);
3217 
3218 	if (mb->mbxCommand != MBX_HEARTBEAT) {
3219 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3220 		    "MQ RING: %08x", mqdb);
3221 	}
3222 
3223 	emlxs_sli4_write_mqdb(hba, mqdb);
3224 
3225 	return (MBX_SUCCESS);
3226 
3227 } /* emlxs_sli4_issue_mq() */
3228 
3229 
3230 /*ARGSUSED*/
3231 static uint32_t
emlxs_sli4_issue_bootstrap(emlxs_hba_t * hba,MAILBOX * mb,uint32_t tmo)3232 emlxs_sli4_issue_bootstrap(emlxs_hba_t *hba, MAILBOX *mb, uint32_t tmo)
3233 {
3234 	emlxs_port_t	*port = &PPORT;
3235 	MAILBOXQ	*mbq;
3236 	MAILBOX4	*mb4;
3237 	MATCHMAP	*mp = NULL;
3238 	uint32_t	*iptr;
3239 	int		nonembed = 0;
3240 
3241 	mbq = (MAILBOXQ *)mb;
3242 	mb4 = (MAILBOX4 *)mb;
3243 	mp = (MATCHMAP *) mbq->nonembed;
3244 	hba->mbox_mqe = hba->sli.sli4.bootstrapmb.virt;
3245 
3246 	if ((mb->mbxCommand != MBX_SLI_CONFIG) ||
3247 	    (mb4->un.varSLIConfig.be.embedded)) {
3248 		/*
3249 		 * If this is an embedded mbox, everything should fit
3250 		 * into the bootstrap mailbox area.
3251 		 */
3252 		iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
3253 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)iptr,
3254 		    MAILBOX_CMD_SLI4_BSIZE);
3255 
3256 		EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
3257 		    MAILBOX_CMD_SLI4_BSIZE, DDI_DMA_SYNC_FORDEV);
3258 		emlxs_data_dump(port, "MBOX CMD", iptr, 18, 0);
3259 	} else {
3260 		/*
3261 		 * If this is not embedded, the bootstrap mailbox area
3262 		 * MUST contain a SGE pointer to a larger area for the
3263 		 * non-embedded mailbox command.
3264 		 * mp will point to the actual mailbox command which
3265 		 * should be copied into the non-embedded area.
3266 		 */
3267 		nonembed = 1;
3268 		mb4->un.varSLIConfig.be.sge_cnt = 1;
3269 		mb4->un.varSLIConfig.be.payload_length = mp->size;
3270 		iptr = (uint32_t *)&mb4->un.varSLIConfig.be.un_hdr.hdr_req;
3271 		*iptr++ = (uint32_t)PADDR_LO(mp->phys);
3272 		*iptr++ = (uint32_t)PADDR_HI(mp->phys);
3273 		*iptr = mp->size;
3274 
3275 		BE_SWAP32_BUFFER(mp->virt, mp->size);
3276 
3277 		EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
3278 		    DDI_DMA_SYNC_FORDEV);
3279 
3280 		iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
3281 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)iptr,
3282 		    MAILBOX_CMD_SLI4_BSIZE);
3283 
3284 		EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
3285 		    EMLXS_BOOTSTRAP_MB_SIZE + MBOX_EXTENSION_SIZE,
3286 		    DDI_DMA_SYNC_FORDEV);
3287 
3288 		emlxs_data_dump(port, "MBOX EXT", iptr, 12, 0);
3289 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3290 		    "Extension Addr %p %p", mp->phys,
3291 		    (uint32_t *)((uint8_t *)mp->virt));
3292 		iptr = (uint32_t *)((uint8_t *)mp->virt);
3293 		emlxs_data_dump(port, "EXT AREA", (uint32_t *)mp->virt, 24, 0);
3294 	}
3295 
3296 
3297 	/* NOTE: tmo is in 10ms ticks */
3298 	if (!emlxs_issue_bootstrap_mb(hba, tmo)) {
3299 		return (MBX_TIMEOUT);
3300 	}
3301 
3302 	if ((mb->mbxCommand != MBX_SLI_CONFIG) ||
3303 	    (mb4->un.varSLIConfig.be.embedded)) {
3304 		EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
3305 		    MAILBOX_CMD_SLI4_BSIZE, DDI_DMA_SYNC_FORKERNEL);
3306 
3307 		iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
3308 		BE_SWAP32_BCOPY((uint8_t *)iptr, (uint8_t *)mb,
3309 		    MAILBOX_CMD_SLI4_BSIZE);
3310 
3311 		emlxs_data_dump(port, "MBOX CMP", iptr, 18, 0);
3312 
3313 	} else {
3314 		EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
3315 		    EMLXS_BOOTSTRAP_MB_SIZE + MBOX_EXTENSION_SIZE,
3316 		    DDI_DMA_SYNC_FORKERNEL);
3317 
3318 		EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
3319 		    DDI_DMA_SYNC_FORKERNEL);
3320 
3321 		BE_SWAP32_BUFFER(mp->virt, mp->size);
3322 
3323 		iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
3324 		BE_SWAP32_BCOPY((uint8_t *)iptr, (uint8_t *)mb,
3325 		    MAILBOX_CMD_SLI4_BSIZE);
3326 
3327 		emlxs_data_dump(port, "MBOX CMP", iptr, 12, 0);
3328 		iptr = (uint32_t *)((uint8_t *)mp->virt);
3329 		emlxs_data_dump(port, "EXT AREA", (uint32_t *)iptr, 24, 0);
3330 	}
3331 
3332 #ifdef FMA_SUPPORT
3333 	if (nonembed && mp) {
3334 		if (emlxs_fm_check_dma_handle(hba, mp->dma_handle)
3335 		    != DDI_FM_OK) {
3336 			EMLXS_MSGF(EMLXS_CONTEXT,
3337 			    &emlxs_invalid_dma_handle_msg,
3338 			    "sli4_issue_bootstrap: mp_hdl=%p",
3339 			    mp->dma_handle);
3340 			return (MBXERR_DMA_ERROR);
3341 		}
3342 	}
3343 
3344 	if (emlxs_fm_check_dma_handle(hba,
3345 	    hba->sli.sli4.bootstrapmb.dma_handle)
3346 	    != DDI_FM_OK) {
3347 		EMLXS_MSGF(EMLXS_CONTEXT,
3348 		    &emlxs_invalid_dma_handle_msg,
3349 		    "sli4_issue_bootstrap: hdl=%p",
3350 		    hba->sli.sli4.bootstrapmb.dma_handle);
3351 		return (MBXERR_DMA_ERROR);
3352 	}
3353 #endif
3354 
3355 	return (MBX_SUCCESS);
3356 
3357 } /* emlxs_sli4_issue_bootstrap() */
3358 
3359 
3360 /*ARGSUSED*/
3361 static uint32_t
emlxs_sli4_issue_mbox_cmd(emlxs_hba_t * hba,MAILBOXQ * mbq,int32_t flag,uint32_t tmo)3362 emlxs_sli4_issue_mbox_cmd(emlxs_hba_t *hba, MAILBOXQ *mbq, int32_t flag,
3363     uint32_t tmo)
3364 {
3365 	emlxs_port_t	*port;
3366 	MAILBOX4	*mb4;
3367 	MAILBOX		*mb;
3368 	mbox_rsp_hdr_t	*hdr_rsp;
3369 	MATCHMAP	*mp;
3370 	uint32_t	*iptr;
3371 	uint32_t	rc;
3372 	uint32_t	i;
3373 	uint32_t	tmo_local;
3374 
3375 	if (!mbq->port) {
3376 		mbq->port = &PPORT;
3377 	}
3378 
3379 	port = (emlxs_port_t *)mbq->port;
3380 
3381 	mb4 = (MAILBOX4 *)mbq;
3382 	mb = (MAILBOX *)mbq;
3383 
3384 	mb->mbxStatus = MBX_SUCCESS;
3385 	rc = MBX_SUCCESS;
3386 
3387 	/* Check for minimum timeouts */
3388 	switch (mb->mbxCommand) {
3389 	/* Mailbox commands that erase/write flash */
3390 	case MBX_DOWN_LOAD:
3391 	case MBX_UPDATE_CFG:
3392 	case MBX_LOAD_AREA:
3393 	case MBX_LOAD_EXP_ROM:
3394 	case MBX_WRITE_NV:
3395 	case MBX_FLASH_WR_ULA:
3396 	case MBX_DEL_LD_ENTRY:
3397 	case MBX_LOAD_SM:
3398 	case MBX_DUMP_MEMORY:
3399 	case MBX_WRITE_VPARMS:
3400 	case MBX_ACCESS_VDATA:
3401 		if (tmo < 300) {
3402 			tmo = 300;
3403 		}
3404 		break;
3405 
3406 	case MBX_SLI_CONFIG: {
3407 		mbox_req_hdr_t *hdr_req;
3408 
3409 		hdr_req = (mbox_req_hdr_t *)
3410 		    &mb4->un.varSLIConfig.be.un_hdr.hdr_req;
3411 
3412 		if (hdr_req->subsystem == IOCTL_SUBSYSTEM_COMMON) {
3413 			switch (hdr_req->opcode) {
3414 			case COMMON_OPCODE_WRITE_OBJ:
3415 			case COMMON_OPCODE_READ_OBJ:
3416 			case COMMON_OPCODE_READ_OBJ_LIST:
3417 			case COMMON_OPCODE_DELETE_OBJ:
3418 			case COMMON_OPCODE_SET_BOOT_CFG:
3419 			case COMMON_OPCODE_GET_PROFILE_CFG:
3420 			case COMMON_OPCODE_SET_PROFILE_CFG:
3421 			case COMMON_OPCODE_GET_PROFILE_LIST:
3422 			case COMMON_OPCODE_SET_ACTIVE_PROFILE:
3423 			case COMMON_OPCODE_GET_PROFILE_CAPS:
3424 			case COMMON_OPCODE_GET_MR_PROFILE_CAPS:
3425 			case COMMON_OPCODE_SET_MR_PROFILE_CAPS:
3426 			case COMMON_OPCODE_SET_FACTORY_PROFILE_CFG:
3427 			case COMMON_OPCODE_SEND_ACTIVATION:
3428 			case COMMON_OPCODE_RESET_LICENSES:
3429 			case COMMON_OPCODE_SET_PHYSICAL_LINK_CFG_V1:
3430 			case COMMON_OPCODE_GET_VPD_DATA:
3431 				if (tmo < 300) {
3432 					tmo = 300;
3433 				}
3434 				break;
3435 			default:
3436 				if (tmo < 30) {
3437 					tmo = 30;
3438 				}
3439 			}
3440 		} else if (hdr_req->subsystem == IOCTL_SUBSYSTEM_FCOE) {
3441 			switch (hdr_req->opcode) {
3442 			case FCOE_OPCODE_SET_FCLINK_SETTINGS:
3443 				if (tmo < 300) {
3444 					tmo = 300;
3445 				}
3446 				break;
3447 			default:
3448 				if (tmo < 30) {
3449 					tmo = 30;
3450 				}
3451 			}
3452 		} else {
3453 			if (tmo < 30) {
3454 				tmo = 30;
3455 			}
3456 		}
3457 
3458 		/*
3459 		 * Also: VENDOR_MANAGE_FFV  (0x13, 0x02) (not currently used)
3460 		 */
3461 
3462 		break;
3463 	}
3464 	default:
3465 		if (tmo < 30) {
3466 			tmo = 30;
3467 		}
3468 		break;
3469 	}
3470 
3471 	/* Convert tmo seconds to 10 millisecond tics */
3472 	tmo_local = tmo * 100;
3473 
3474 	mutex_enter(&EMLXS_PORT_LOCK);
3475 
3476 	/* Adjust wait flag */
3477 	if (flag != MBX_NOWAIT) {
3478 		if (hba->sli.sli4.flag & EMLXS_SLI4_INTR_ENABLED) {
3479 			flag = MBX_SLEEP;
3480 		} else {
3481 			flag = MBX_POLL;
3482 		}
3483 	} else {
3484 		/* Must have interrupts enabled to perform MBX_NOWAIT */
3485 		if (!(hba->sli.sli4.flag & EMLXS_SLI4_INTR_ENABLED)) {
3486 
3487 			mb->mbxStatus = MBX_HARDWARE_ERROR;
3488 			mutex_exit(&EMLXS_PORT_LOCK);
3489 
3490 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3491 			    "Interrupts disabled. %s failed.",
3492 			    emlxs_mb_cmd_xlate(mb->mbxCommand));
3493 
3494 			return (MBX_HARDWARE_ERROR);
3495 		}
3496 	}
3497 
3498 	/* Check for hardware error ; special case SLI_CONFIG */
3499 	if ((hba->flag & FC_HARDWARE_ERROR) &&
3500 	    ! ((mb4->mbxCommand == MBX_SLI_CONFIG) &&
3501 	    (mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode ==
3502 	    COMMON_OPCODE_RESET))) {
3503 		mb->mbxStatus = MBX_HARDWARE_ERROR;
3504 
3505 		mutex_exit(&EMLXS_PORT_LOCK);
3506 
3507 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3508 		    "Hardware error reported. %s failed. status=%x mb=%p",
3509 		    emlxs_mb_cmd_xlate(mb->mbxCommand), mb->mbxStatus, mb);
3510 
3511 		return (MBX_HARDWARE_ERROR);
3512 	}
3513 
3514 	if (hba->mbox_queue_flag) {
3515 		/* If we are not polling, then queue it for later */
3516 		if (flag == MBX_NOWAIT) {
3517 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3518 			    "Busy.      %s: mb=%p NoWait.",
3519 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3520 
3521 			emlxs_mb_put(hba, mbq);
3522 
3523 			HBASTATS.MboxBusy++;
3524 
3525 			mutex_exit(&EMLXS_PORT_LOCK);
3526 
3527 			return (MBX_BUSY);
3528 		}
3529 
3530 		while (hba->mbox_queue_flag) {
3531 			mutex_exit(&EMLXS_PORT_LOCK);
3532 
3533 			if (tmo_local-- == 0) {
3534 				EMLXS_MSGF(EMLXS_CONTEXT,
3535 				    &emlxs_mbox_event_msg,
3536 				    "Timeout.   %s: mb=%p tmo=%d Waiting.",
3537 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3538 				    tmo);
3539 
3540 				/* Non-lethalStatus mailbox timeout */
3541 				/* Does not indicate a hardware error */
3542 				mb->mbxStatus = MBX_TIMEOUT;
3543 				return (MBX_TIMEOUT);
3544 			}
3545 
3546 			BUSYWAIT_MS(10);
3547 			mutex_enter(&EMLXS_PORT_LOCK);
3548 
3549 			/* Check for hardware error ; special case SLI_CONFIG */
3550 			if ((hba->flag & FC_HARDWARE_ERROR) &&
3551 			    ! ((mb4->mbxCommand == MBX_SLI_CONFIG) &&
3552 			    (mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode ==
3553 			    COMMON_OPCODE_RESET))) {
3554 				mb->mbxStatus = MBX_HARDWARE_ERROR;
3555 
3556 				mutex_exit(&EMLXS_PORT_LOCK);
3557 
3558 				EMLXS_MSGF(EMLXS_CONTEXT,
3559 				    &emlxs_mbox_detail_msg,
3560 				    "Hardware error reported. %s failed. "
3561 				    "status=%x mb=%p",
3562 				    emlxs_mb_cmd_xlate(mb->mbxCommand),
3563 				    mb->mbxStatus, mb);
3564 
3565 				return (MBX_HARDWARE_ERROR);
3566 			}
3567 		}
3568 	}
3569 
3570 	/* Initialize mailbox area */
3571 	emlxs_mb_init(hba, mbq, flag, tmo);
3572 
3573 	if (mb->mbxCommand == MBX_DOWN_LINK) {
3574 		hba->sli.sli4.flag |= EMLXS_SLI4_DOWN_LINK;
3575 	}
3576 
3577 	mutex_exit(&EMLXS_PORT_LOCK);
3578 	switch (flag) {
3579 
3580 	case MBX_NOWAIT:
3581 		if (mb->mbxCommand != MBX_HEARTBEAT) {
3582 			if (mb->mbxCommand != MBX_DOWN_LOAD
3583 			    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3584 				EMLXS_MSGF(EMLXS_CONTEXT,
3585 				    &emlxs_mbox_detail_msg,
3586 				    "Sending.   %s: mb=%p NoWait. embedded %d",
3587 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3588 				    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
3589 				    (mb4->un.varSLIConfig.be.embedded)));
3590 			}
3591 		}
3592 
3593 		iptr = hba->sli.sli4.mq.addr.virt;
3594 		iptr += (hba->sli.sli4.mq.host_index * MAILBOX_CMD_SLI4_WSIZE);
3595 		hba->sli.sli4.mq.host_index++;
3596 		if (hba->sli.sli4.mq.host_index >= hba->sli.sli4.mq.max_index) {
3597 			hba->sli.sli4.mq.host_index = 0;
3598 		}
3599 
3600 		if (mbq->bp) {
3601 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3602 			    "BDE virt %p phys %p size x%x",
3603 			    ((MATCHMAP *)mbq->bp)->virt,
3604 			    ((MATCHMAP *)mbq->bp)->phys,
3605 			    ((MATCHMAP *)mbq->bp)->size);
3606 			emlxs_data_dump(port, "DATA",
3607 			    (uint32_t *)(((MATCHMAP *)mbq->bp)->virt), 30, 0);
3608 		}
3609 		rc = emlxs_sli4_issue_mq(port, (MAILBOX4 *)iptr, mb, tmo_local);
3610 		break;
3611 
3612 	case MBX_POLL:
3613 		if (mb->mbxCommand != MBX_DOWN_LOAD
3614 		    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3615 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3616 			    "Sending.   %s: mb=%p Poll. embedded %d",
3617 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3618 			    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
3619 			    (mb4->un.varSLIConfig.be.embedded)));
3620 		}
3621 
3622 		rc = emlxs_sli4_issue_bootstrap(hba, mb, tmo_local);
3623 
3624 		/* Clean up the mailbox area */
3625 		if (rc == MBX_TIMEOUT) {
3626 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3627 			    "Timeout.   %s: mb=%p tmo=%x Poll. embedded %d",
3628 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo,
3629 			    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
3630 			    (mb4->un.varSLIConfig.be.embedded)));
3631 
3632 			hba->flag |= FC_MBOX_TIMEOUT;
3633 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
3634 			emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
3635 
3636 		} else {
3637 			if (mb->mbxCommand != MBX_DOWN_LOAD
3638 			    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3639 				EMLXS_MSGF(EMLXS_CONTEXT,
3640 				    &emlxs_mbox_detail_msg,
3641 				    "Completed.   %s: mb=%p status=%x Poll. "
3642 				    "embedded %d",
3643 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, rc,
3644 				    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
3645 				    (mb4->un.varSLIConfig.be.embedded)));
3646 			}
3647 
3648 			/* Process the result */
3649 			if (!(mbq->flag & MBQ_PASSTHRU)) {
3650 				if (mbq->mbox_cmpl) {
3651 					(void) (mbq->mbox_cmpl)(hba, mbq);
3652 				}
3653 			}
3654 
3655 			emlxs_mb_fini(hba, NULL, mb->mbxStatus);
3656 		}
3657 
3658 		mp = (MATCHMAP *)mbq->nonembed;
3659 		if (mp) {
3660 			hdr_rsp = (mbox_rsp_hdr_t *)mp->virt;
3661 			if (hdr_rsp->status) {
3662 				EMLXS_MSGF(EMLXS_CONTEXT,
3663 				    &emlxs_mbox_detail_msg,
3664 				    "%s: MBX_NONEMBED_ERROR: 0x%x, 0x%x",
3665 				    emlxs_mb_cmd_xlate(mb->mbxCommand),
3666 				    hdr_rsp->status, hdr_rsp->extra_status);
3667 
3668 				mb->mbxStatus = MBX_NONEMBED_ERROR;
3669 			}
3670 		}
3671 		rc = mb->mbxStatus;
3672 
3673 		/* Attempt to send pending mailboxes */
3674 		mbq = (MAILBOXQ *)emlxs_mb_get(hba);
3675 		if (mbq) {
3676 			/* Attempt to send pending mailboxes */
3677 			i =  emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
3678 			if ((i != MBX_BUSY) && (i != MBX_SUCCESS)) {
3679 				emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
3680 			}
3681 		}
3682 		break;
3683 
3684 	case MBX_SLEEP:
3685 		if (mb->mbxCommand != MBX_DOWN_LOAD
3686 		    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3687 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3688 			    "Sending.   %s: mb=%p Sleep. embedded %d",
3689 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3690 			    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
3691 			    (mb4->un.varSLIConfig.be.embedded)));
3692 		}
3693 
3694 		iptr = hba->sli.sli4.mq.addr.virt;
3695 		iptr += (hba->sli.sli4.mq.host_index * MAILBOX_CMD_SLI4_WSIZE);
3696 		hba->sli.sli4.mq.host_index++;
3697 		if (hba->sli.sli4.mq.host_index >= hba->sli.sli4.mq.max_index) {
3698 			hba->sli.sli4.mq.host_index = 0;
3699 		}
3700 
3701 		rc = emlxs_sli4_issue_mq(port, (MAILBOX4 *)iptr, mb, tmo_local);
3702 
3703 		if (rc != MBX_SUCCESS) {
3704 			break;
3705 		}
3706 
3707 		/* Wait for completion */
3708 		/* The driver clock is timing the mailbox. */
3709 
3710 		mutex_enter(&EMLXS_MBOX_LOCK);
3711 		while (!(mbq->flag & MBQ_COMPLETED)) {
3712 			cv_wait(&EMLXS_MBOX_CV, &EMLXS_MBOX_LOCK);
3713 		}
3714 		mutex_exit(&EMLXS_MBOX_LOCK);
3715 
3716 		mp = (MATCHMAP *)mbq->nonembed;
3717 		if (mp) {
3718 			hdr_rsp = (mbox_rsp_hdr_t *)mp->virt;
3719 			if (hdr_rsp->status) {
3720 				EMLXS_MSGF(EMLXS_CONTEXT,
3721 				    &emlxs_mbox_detail_msg,
3722 				    "%s: MBX_NONEMBED_ERROR: 0x%x, 0x%x",
3723 				    emlxs_mb_cmd_xlate(mb->mbxCommand),
3724 				    hdr_rsp->status, hdr_rsp->extra_status);
3725 
3726 				mb->mbxStatus = MBX_NONEMBED_ERROR;
3727 			}
3728 		}
3729 		rc = mb->mbxStatus;
3730 
3731 		if (rc == MBX_TIMEOUT) {
3732 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3733 			    "Timeout.   %s: mb=%p tmo=%x Sleep. embedded %d",
3734 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo,
3735 			    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
3736 			    (mb4->un.varSLIConfig.be.embedded)));
3737 		} else {
3738 			if (mb->mbxCommand != MBX_DOWN_LOAD
3739 			    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3740 				EMLXS_MSGF(EMLXS_CONTEXT,
3741 				    &emlxs_mbox_detail_msg,
3742 				    "Completed.   %s: mb=%p status=%x Sleep. "
3743 				    "embedded %d",
3744 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, rc,
3745 				    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
3746 				    (mb4->un.varSLIConfig.be.embedded)));
3747 			}
3748 		}
3749 		break;
3750 	}
3751 
3752 	return (rc);
3753 
3754 } /* emlxs_sli4_issue_mbox_cmd() */
3755 
3756 
3757 
3758 /*ARGSUSED*/
3759 static uint32_t
emlxs_sli4_issue_mbox_cmd4quiesce(emlxs_hba_t * hba,MAILBOXQ * mbq,int32_t flag,uint32_t tmo)3760 emlxs_sli4_issue_mbox_cmd4quiesce(emlxs_hba_t *hba, MAILBOXQ *mbq, int32_t flag,
3761     uint32_t tmo)
3762 {
3763 	emlxs_port_t	*port = &PPORT;
3764 	MAILBOX		*mb;
3765 	mbox_rsp_hdr_t	*hdr_rsp;
3766 	MATCHMAP	*mp;
3767 	uint32_t	rc;
3768 	uint32_t	tmo_local;
3769 
3770 	mb = (MAILBOX *)mbq;
3771 
3772 	mb->mbxStatus = MBX_SUCCESS;
3773 	rc = MBX_SUCCESS;
3774 
3775 	if (tmo < 30) {
3776 		tmo = 30;
3777 	}
3778 
3779 	/* Convert tmo seconds to 10 millisecond tics */
3780 	tmo_local = tmo * 100;
3781 
3782 	flag = MBX_POLL;
3783 
3784 	/* Check for hardware error */
3785 	if (hba->flag & FC_HARDWARE_ERROR) {
3786 		mb->mbxStatus = MBX_HARDWARE_ERROR;
3787 		return (MBX_HARDWARE_ERROR);
3788 	}
3789 
3790 	/* Initialize mailbox area */
3791 	emlxs_mb_init(hba, mbq, flag, tmo);
3792 
3793 	switch (flag) {
3794 
3795 	case MBX_POLL:
3796 
3797 		rc = emlxs_sli4_issue_bootstrap(hba, mb, tmo_local);
3798 
3799 		/* Clean up the mailbox area */
3800 		if (rc == MBX_TIMEOUT) {
3801 			hba->flag |= FC_MBOX_TIMEOUT;
3802 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
3803 			emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
3804 
3805 		} else {
3806 			/* Process the result */
3807 			if (!(mbq->flag & MBQ_PASSTHRU)) {
3808 				if (mbq->mbox_cmpl) {
3809 					(void) (mbq->mbox_cmpl)(hba, mbq);
3810 				}
3811 			}
3812 
3813 			emlxs_mb_fini(hba, NULL, mb->mbxStatus);
3814 		}
3815 
3816 		mp = (MATCHMAP *)mbq->nonembed;
3817 		if (mp) {
3818 			hdr_rsp = (mbox_rsp_hdr_t *)mp->virt;
3819 			if (hdr_rsp->status) {
3820 				EMLXS_MSGF(EMLXS_CONTEXT,
3821 				    &emlxs_mbox_detail_msg,
3822 				    "%s: MBX_NONEMBED_ERROR: 0x%x, 0x%x",
3823 				    emlxs_mb_cmd_xlate(mb->mbxCommand),
3824 				    hdr_rsp->status, hdr_rsp->extra_status);
3825 
3826 				mb->mbxStatus = MBX_NONEMBED_ERROR;
3827 			}
3828 		}
3829 		rc = mb->mbxStatus;
3830 
3831 		break;
3832 	}
3833 
3834 	return (rc);
3835 
3836 } /* emlxs_sli4_issue_mbox_cmd4quiesce() */
3837 
3838 
3839 
3840 #ifdef SFCT_SUPPORT
3841 /*ARGSUSED*/
3842 extern uint32_t
emlxs_sli4_prep_fct_iocb(emlxs_port_t * port,emlxs_buf_t * cmd_sbp,int channel)3843 emlxs_sli4_prep_fct_iocb(emlxs_port_t *port, emlxs_buf_t *cmd_sbp, int channel)
3844 {
3845 	emlxs_hba_t *hba = HBA;
3846 	emlxs_config_t *cfg = &CFG;
3847 	fct_cmd_t *fct_cmd;
3848 	stmf_data_buf_t *dbuf;
3849 	scsi_task_t *fct_task;
3850 	fc_packet_t *pkt;
3851 	CHANNEL *cp;
3852 	XRIobj_t *xrip;
3853 	emlxs_node_t *ndlp;
3854 	IOCBQ *iocbq;
3855 	IOCB *iocb;
3856 	emlxs_wqe_t *wqe;
3857 	ULP_SGE64 stage_sge;
3858 	ULP_SGE64 *sge;
3859 	RPIobj_t *rpip;
3860 	int32_t	sge_size;
3861 	uint64_t sge_addr;
3862 	uint32_t did;
3863 	uint32_t timeout;
3864 
3865 	ddi_dma_cookie_t *cp_cmd;
3866 
3867 	pkt = PRIV2PKT(cmd_sbp);
3868 
3869 	cp = (CHANNEL *)cmd_sbp->channel;
3870 
3871 	iocbq = &cmd_sbp->iocbq;
3872 	iocb = &iocbq->iocb;
3873 
3874 	did = cmd_sbp->did;
3875 	if (iocb->ULPCOMMAND == CMD_ABORT_XRI_CX) {
3876 
3877 		ndlp = cmd_sbp->node;
3878 		rpip = EMLXS_NODE_TO_RPI(port, ndlp);
3879 
3880 		if (!rpip) {
3881 			/* Use the fabric rpi */
3882 			rpip = port->vpip->fabric_rpip;
3883 		}
3884 
3885 		/* Next allocate an Exchange for this command */
3886 		xrip = emlxs_sli4_alloc_xri(port, cmd_sbp, rpip,
3887 		    EMLXS_XRI_SOL_BLS_TYPE);
3888 
3889 		if (!xrip) {
3890 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3891 			    "Adapter Busy. Unable to allocate exchange. "
3892 			    "did=0x%x", did);
3893 
3894 			return (FC_TRAN_BUSY);
3895 		}
3896 
3897 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3898 		    "FCT Abort Request: xri=%d iotag=%d sbp=%p rxid=%x",
3899 		    xrip->XRI, xrip->iotag, cmd_sbp, pkt->pkt_cmd_fhdr.rx_id);
3900 
3901 		cmd_sbp->xrip = xrip;
3902 
3903 		cp->ulpSendCmd++;
3904 
3905 		/* Initalize iocbq */
3906 		iocbq->port = (void *)port;
3907 		iocbq->node = (void *)ndlp;
3908 		iocbq->channel = (void *)cp;
3909 
3910 		/*
3911 		 * Don't give the abort priority, we want the IOCB
3912 		 * we are aborting to be processed first.
3913 		 */
3914 		iocbq->flag |= IOCB_SPECIAL;
3915 
3916 		wqe = &iocbq->wqe;
3917 		bzero((void *)wqe, sizeof (emlxs_wqe_t));
3918 
3919 		wqe = &iocbq->wqe;
3920 		wqe->un.Abort.Criteria = ABORT_XRI_TAG;
3921 		wqe->RequestTag = xrip->iotag;
3922 		wqe->AbortTag = pkt->pkt_cmd_fhdr.rx_id;
3923 		wqe->Command = CMD_ABORT_XRI_CX;
3924 		wqe->Class = CLASS3;
3925 		wqe->CQId = 0xffff;
3926 		wqe->CmdType = WQE_TYPE_ABORT;
3927 
3928 		if (hba->state >= FC_LINK_UP) {
3929 			wqe->un.Abort.IA = 0;
3930 		} else {
3931 			wqe->un.Abort.IA = 1;
3932 		}
3933 
3934 		/* Set the pkt timer */
3935 		cmd_sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
3936 		    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
3937 
3938 		return (IOERR_SUCCESS);
3939 
3940 	} else if (iocb->ULPCOMMAND == CMD_FCP_TRSP64_CX) {
3941 
3942 		timeout = pkt->pkt_timeout;
3943 		ndlp = cmd_sbp->node;
3944 		if (!ndlp) {
3945 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3946 			    "Unable to find rpi. did=0x%x", did);
3947 
3948 			emlxs_set_pkt_state(cmd_sbp, IOSTAT_LOCAL_REJECT,
3949 			    IOERR_INVALID_RPI, 0);
3950 			return (0xff);
3951 		}
3952 
3953 		cp->ulpSendCmd++;
3954 
3955 		/* Initalize iocbq */
3956 		iocbq->port = (void *)port;
3957 		iocbq->node = (void *)ndlp;
3958 		iocbq->channel = (void *)cp;
3959 
3960 		wqe = &iocbq->wqe;
3961 		bzero((void *)wqe, sizeof (emlxs_wqe_t));
3962 
3963 		xrip = emlxs_sli4_register_xri(port, cmd_sbp,
3964 		    pkt->pkt_cmd_fhdr.rx_id, did);
3965 
3966 		if (!xrip) {
3967 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3968 			    "Unable to register xri %x. did=0x%x",
3969 			    pkt->pkt_cmd_fhdr.rx_id, did);
3970 
3971 			emlxs_set_pkt_state(cmd_sbp, IOSTAT_LOCAL_REJECT,
3972 			    IOERR_NO_XRI, 0);
3973 			return (0xff);
3974 		}
3975 
3976 		cmd_sbp->iotag = xrip->iotag;
3977 		cmd_sbp->channel = cp;
3978 
3979 #if (EMLXS_MODREV >= EMLXS_MODREV3)
3980 		cp_cmd = pkt->pkt_cmd_cookie;
3981 #else
3982 		cp_cmd  = &pkt->pkt_cmd_cookie;
3983 #endif	/* >= EMLXS_MODREV3 */
3984 
3985 		sge_size = pkt->pkt_cmdlen;
3986 		/* Make size a multiple of 4 */
3987 		if (sge_size & 3) {
3988 			sge_size = (sge_size + 3) & 0xfffffffc;
3989 		}
3990 		sge_addr = cp_cmd->dmac_laddress;
3991 		sge = xrip->SGList.virt;
3992 
3993 		stage_sge.addrHigh = PADDR_HI(sge_addr);
3994 		stage_sge.addrLow = PADDR_LO(sge_addr);
3995 		stage_sge.length = sge_size;
3996 		stage_sge.offset = 0;
3997 		stage_sge.type = 0;
3998 		stage_sge.last = 1;
3999 
4000 		/* Copy staged SGE into SGL */
4001 		BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
4002 		    (uint8_t *)sge, sizeof (ULP_SGE64));
4003 
4004 		/* Words  0-3 */
4005 		wqe->un.FcpCmd.Payload.addrHigh = stage_sge.addrHigh;
4006 		wqe->un.FcpCmd.Payload.addrLow = stage_sge.addrLow;
4007 		wqe->un.FcpCmd.Payload.tus.f.bdeSize = sge_size;
4008 		wqe->un.FcpCmd.PayloadLength = sge_size;
4009 
4010 		/*  Word  6 */
4011 		wqe->ContextTag = ndlp->nlp_Rpi;
4012 		wqe->XRITag = xrip->XRI;
4013 
4014 		/*  Word  7 */
4015 		wqe->Command  = iocb->ULPCOMMAND;
4016 		wqe->Class = cmd_sbp->class;
4017 		wqe->ContextType = WQE_RPI_CONTEXT;
4018 		wqe->Timer = ((timeout > 0xff) ? 0 : timeout);
4019 
4020 		/*  Word  8 */
4021 		wqe->AbortTag = 0;
4022 
4023 		/*  Word  9 */
4024 		wqe->RequestTag = xrip->iotag;
4025 		wqe->OXId = (uint16_t)xrip->rx_id;
4026 
4027 		/*  Word  10 */
4028 		if (xrip->flag & EMLXS_XRI_BUSY) {
4029 			wqe->XC = 1;
4030 		}
4031 
4032 		if (!(hba->sli.sli4.param.PHWQ)) {
4033 			wqe->QOSd = 1;
4034 			wqe->DBDE = 1; /* Data type for BDE 0 */
4035 		}
4036 
4037 		/*  Word  11 */
4038 		wqe->CmdType = WQE_TYPE_TRSP;
4039 		wqe->CQId = (uint16_t)0xffff;  /* default CQ for response */
4040 
4041 		/* Set the pkt timer */
4042 		cmd_sbp->ticks = hba->timer_tics + timeout +
4043 		    ((timeout > 0xff) ? 0 : 10);
4044 
4045 		if (pkt->pkt_cmdlen) {
4046 			EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
4047 			    DDI_DMA_SYNC_FORDEV);
4048 		}
4049 
4050 		return (IOERR_SUCCESS);
4051 	}
4052 
4053 	fct_cmd = cmd_sbp->fct_cmd;
4054 	did = fct_cmd->cmd_rportid;
4055 	dbuf = cmd_sbp->fct_buf;
4056 	fct_task = (scsi_task_t *)fct_cmd->cmd_specific;
4057 	ndlp = *(emlxs_node_t **)fct_cmd->cmd_rp->rp_fca_private;
4058 	if (!ndlp) {
4059 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4060 		    "Unable to find rpi. did=0x%x", did);
4061 
4062 		emlxs_set_pkt_state(cmd_sbp, IOSTAT_LOCAL_REJECT,
4063 		    IOERR_INVALID_RPI, 0);
4064 		return (0xff);
4065 	}
4066 
4067 
4068 	/* Initalize iocbq */
4069 	iocbq->port = (void *) port;
4070 	iocbq->node = (void *)ndlp;
4071 	iocbq->channel = (void *) cp;
4072 
4073 	wqe = &iocbq->wqe;
4074 	bzero((void *)wqe, sizeof (emlxs_wqe_t));
4075 
4076 	xrip = cmd_sbp->xrip;
4077 	if (!xrip) {
4078 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4079 		    "Unable to find xri. did=0x%x", did);
4080 
4081 		emlxs_set_pkt_state(cmd_sbp, IOSTAT_LOCAL_REJECT,
4082 		    IOERR_NO_XRI, 0);
4083 		return (0xff);
4084 	}
4085 
4086 	if (emlxs_sli4_register_xri(port, cmd_sbp,
4087 	    xrip->XRI, ndlp->nlp_DID) == NULL) {
4088 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4089 		    "Unable to register xri. did=0x%x", did);
4090 
4091 		emlxs_set_pkt_state(cmd_sbp, IOSTAT_LOCAL_REJECT,
4092 		    IOERR_NO_XRI, 0);
4093 		return (0xff);
4094 	}
4095 	cmd_sbp->iotag = xrip->iotag;
4096 	cmd_sbp->channel = cp;
4097 
4098 	if (cfg[CFG_TIMEOUT_ENABLE].current) {
4099 		timeout =
4100 		    ((2 * hba->fc_ratov) < 60) ? 60 : (2 * hba->fc_ratov);
4101 	} else {
4102 		timeout = 0x80000000;
4103 	}
4104 	cmd_sbp->ticks =
4105 	    hba->timer_tics + timeout + ((timeout > 0xff) ? 0 : 10);
4106 
4107 
4108 	iocb->ULPCT = 0;
4109 	if (fct_task->task_flags & TF_WRITE_DATA) {
4110 		iocb->ULPCOMMAND = CMD_FCP_TRECEIVE64_CX;
4111 		wqe->CmdType = WQE_TYPE_TRECEIVE;		/* Word 11 */
4112 
4113 	} else { /* TF_READ_DATA */
4114 
4115 		iocb->ULPCOMMAND = CMD_FCP_TSEND64_CX;
4116 		wqe->CmdType = WQE_TYPE_TSEND;			/* Word 11 */
4117 
4118 		if ((dbuf->db_data_size >=
4119 		    fct_task->task_expected_xfer_length)) {
4120 			/* enable auto-rsp AP feature */
4121 			wqe->AR = 0x1;
4122 			iocb->ULPCT = 0x1; /* for cmpl */
4123 		}
4124 	}
4125 
4126 	(void) emlxs_sli4_fct_bde_setup(port, cmd_sbp);
4127 
4128 	/*  Word  6 */
4129 	wqe->ContextTag = ndlp->nlp_Rpi;
4130 	wqe->XRITag = xrip->XRI;
4131 
4132 	/*  Word  7 */
4133 	wqe->Command  = iocb->ULPCOMMAND;
4134 	wqe->Class = cmd_sbp->class;
4135 	wqe->ContextType = WQE_RPI_CONTEXT;
4136 	wqe->Timer = ((timeout > 0xff) ? 0 : timeout);
4137 	wqe->PU = 1;
4138 
4139 	/*  Word  8 */
4140 	wqe->AbortTag = 0;
4141 
4142 	/*  Word  9 */
4143 	wqe->RequestTag = xrip->iotag;
4144 	wqe->OXId = (uint16_t)fct_cmd->cmd_oxid;
4145 
4146 	/*  Word  10 */
4147 	if (xrip->flag & EMLXS_XRI_BUSY) {
4148 		wqe->XC = 1;
4149 	}
4150 
4151 	if (!(hba->sli.sli4.param.PHWQ)) {
4152 		wqe->QOSd = 1;
4153 		wqe->DBDE = 1; /* Data type for BDE 0 */
4154 	}
4155 
4156 	/*  Word  11 */
4157 	wqe->CQId = (uint16_t)0xffff;  /* default CQ for response */
4158 
4159 	/*  Word  12 */
4160 	wqe->CmdSpecific = dbuf->db_data_size;
4161 
4162 	return (IOERR_SUCCESS);
4163 
4164 } /* emlxs_sli4_prep_fct_iocb() */
4165 #endif /* SFCT_SUPPORT */
4166 
4167 
4168 /*ARGSUSED*/
4169 extern uint32_t
emlxs_sli4_prep_fcp_iocb(emlxs_port_t * port,emlxs_buf_t * sbp,int channel)4170 emlxs_sli4_prep_fcp_iocb(emlxs_port_t *port, emlxs_buf_t *sbp, int channel)
4171 {
4172 	emlxs_hba_t *hba = HBA;
4173 	fc_packet_t *pkt;
4174 	CHANNEL *cp;
4175 	RPIobj_t *rpip;
4176 	XRIobj_t *xrip;
4177 	emlxs_wqe_t *wqe;
4178 	IOCBQ *iocbq;
4179 	IOCB *iocb;
4180 	NODELIST *node;
4181 	uint16_t iotag;
4182 	uint32_t did;
4183 	off_t offset;
4184 
4185 	pkt = PRIV2PKT(sbp);
4186 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
4187 	cp = &hba->chan[channel];
4188 
4189 	iocbq = &sbp->iocbq;
4190 	iocbq->channel = (void *) cp;
4191 	iocbq->port = (void *) port;
4192 
4193 	wqe = &iocbq->wqe;
4194 	iocb = &iocbq->iocb;
4195 	bzero((void *)wqe, sizeof (emlxs_wqe_t));
4196 	bzero((void *)iocb, sizeof (IOCB));
4197 
4198 	/* Find target node object */
4199 	node = (NODELIST *)iocbq->node;
4200 	rpip = EMLXS_NODE_TO_RPI(port, node);
4201 
4202 	if (!rpip) {
4203 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4204 		    "Unable to find rpi. did=0x%x", did);
4205 
4206 		emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
4207 		    IOERR_INVALID_RPI, 0);
4208 		return (0xff);
4209 	}
4210 
4211 	sbp->channel = cp;
4212 	/* Next allocate an Exchange for this command */
4213 	xrip = emlxs_sli4_alloc_xri(port, sbp, rpip,
4214 	    EMLXS_XRI_SOL_FCP_TYPE);
4215 
4216 	if (!xrip) {
4217 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4218 		    "Adapter Busy. Unable to allocate exchange. did=0x%x", did);
4219 
4220 		return (FC_TRAN_BUSY);
4221 	}
4222 	sbp->bmp = NULL;
4223 	iotag = sbp->iotag;
4224 
4225 #ifdef DEBUG_FASTPATH
4226 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4227 	    "FCP: Prep xri=%d iotag=%d oxid=%x rpi=%d",
4228 	    xrip->XRI, xrip->iotag, xrip->rx_id, rpip->RPI);
4229 #endif /* DEBUG_FASTPATH */
4230 
4231 	/* Indicate this is a FCP cmd */
4232 	iocbq->flag |= IOCB_FCP_CMD;
4233 
4234 	if (emlxs_sli4_bde_setup(port, sbp)) {
4235 		emlxs_sli4_free_xri(port, sbp, xrip, 1);
4236 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4237 		    "Adapter Busy. Unable to setup SGE. did=0x%x", did);
4238 
4239 		return (FC_TRAN_BUSY);
4240 	}
4241 
4242 	/* DEBUG */
4243 #ifdef DEBUG_FCP
4244 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4245 	    "FCP: SGLaddr virt %p phys %p size %d", xrip->SGList.virt,
4246 	    xrip->SGList.phys, pkt->pkt_datalen);
4247 	emlxs_data_dump(port, "FCP: SGL", (uint32_t *)xrip->SGList.virt, 20, 0);
4248 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4249 	    "FCP: CMD virt %p len %d:%d:%d",
4250 	    pkt->pkt_cmd, pkt->pkt_cmdlen, pkt->pkt_rsplen, pkt->pkt_datalen);
4251 	emlxs_data_dump(port, "FCP: CMD", (uint32_t *)pkt->pkt_cmd, 10, 0);
4252 #endif /* DEBUG_FCP */
4253 
4254 	offset = (off_t)((uint64_t)((unsigned long)
4255 	    xrip->SGList.virt) -
4256 	    (uint64_t)((unsigned long)
4257 	    hba->sli.sli4.slim2.virt));
4258 
4259 	EMLXS_MPDATA_SYNC(xrip->SGList.dma_handle, offset,
4260 	    xrip->SGList.size, DDI_DMA_SYNC_FORDEV);
4261 
4262 	/* if device is FCP-2 device, set the following bit */
4263 	/* that says to run the FC-TAPE protocol. */
4264 	if (node->nlp_fcp_info & NLP_FCP_2_DEVICE) {
4265 		wqe->ERP = 1;
4266 	}
4267 
4268 	if (pkt->pkt_datalen == 0) {
4269 		iocb->ULPCOMMAND = CMD_FCP_ICMND64_CR;
4270 		wqe->Command = CMD_FCP_ICMND64_CR;
4271 		wqe->CmdType = WQE_TYPE_FCP_DATA_IN;
4272 	} else if (pkt->pkt_tran_type == FC_PKT_FCP_READ) {
4273 		iocb->ULPCOMMAND = CMD_FCP_IREAD64_CR;
4274 		wqe->Command = CMD_FCP_IREAD64_CR;
4275 		wqe->CmdType = WQE_TYPE_FCP_DATA_IN;
4276 		wqe->PU = PARM_XFER_CHECK;
4277 	} else {
4278 		iocb->ULPCOMMAND = CMD_FCP_IWRITE64_CR;
4279 		wqe->Command = CMD_FCP_IWRITE64_CR;
4280 		wqe->CmdType = WQE_TYPE_FCP_DATA_OUT;
4281 	}
4282 	wqe->un.FcpCmd.TotalTransferCount = pkt->pkt_datalen;
4283 
4284 	if (!(hba->sli.sli4.param.PHWQ)) {
4285 		wqe->DBDE = 1; /* Data type for BDE 0 */
4286 	}
4287 	wqe->ContextTag = rpip->RPI;
4288 	wqe->ContextType = WQE_RPI_CONTEXT;
4289 	wqe->XRITag = xrip->XRI;
4290 	wqe->Timer =
4291 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
4292 
4293 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_CHAINED_SEQ) {
4294 		wqe->CCPE = 1;
4295 		wqe->CCP = pkt->pkt_cmd_fhdr.rsvd;
4296 	}
4297 
4298 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
4299 	case FC_TRAN_CLASS2:
4300 		wqe->Class = CLASS2;
4301 		break;
4302 	case FC_TRAN_CLASS3:
4303 	default:
4304 		wqe->Class = CLASS3;
4305 		break;
4306 	}
4307 	sbp->class = wqe->Class;
4308 	wqe->RequestTag = iotag;
4309 	wqe->CQId = (uint16_t)0xffff;  /* default CQ for response */
4310 
4311 	return (FC_SUCCESS);
4312 } /* emlxs_sli4_prep_fcp_iocb() */
4313 
4314 
4315 /*ARGSUSED*/
4316 static uint32_t
emlxs_sli4_prep_ip_iocb(emlxs_port_t * port,emlxs_buf_t * sbp)4317 emlxs_sli4_prep_ip_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
4318 {
4319 	return (FC_TRAN_BUSY);
4320 
4321 } /* emlxs_sli4_prep_ip_iocb() */
4322 
4323 
4324 /*ARGSUSED*/
4325 static uint32_t
emlxs_sli4_prep_els_iocb(emlxs_port_t * port,emlxs_buf_t * sbp)4326 emlxs_sli4_prep_els_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
4327 {
4328 	emlxs_hba_t *hba = HBA;
4329 	fc_packet_t *pkt;
4330 	IOCBQ *iocbq;
4331 	IOCB *iocb;
4332 	emlxs_wqe_t *wqe;
4333 	FCFIobj_t *fcfp;
4334 	RPIobj_t *reserved_rpip = NULL;
4335 	RPIobj_t *rpip = NULL;
4336 	XRIobj_t *xrip;
4337 	CHANNEL *cp;
4338 	uint32_t did;
4339 	uint32_t cmd;
4340 	ULP_SGE64 stage_sge;
4341 	ULP_SGE64 *sge;
4342 	ddi_dma_cookie_t *cp_cmd;
4343 	ddi_dma_cookie_t *cp_resp;
4344 	emlxs_node_t *node;
4345 	off_t offset;
4346 
4347 	pkt = PRIV2PKT(sbp);
4348 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
4349 
4350 	iocbq = &sbp->iocbq;
4351 	wqe = &iocbq->wqe;
4352 	iocb = &iocbq->iocb;
4353 	bzero((void *)wqe, sizeof (emlxs_wqe_t));
4354 	bzero((void *)iocb, sizeof (IOCB));
4355 	cp = &hba->chan[hba->channel_els];
4356 
4357 	/* Initalize iocbq */
4358 	iocbq->port = (void *) port;
4359 	iocbq->channel = (void *) cp;
4360 
4361 	sbp->channel = cp;
4362 	sbp->bmp = NULL;
4363 
4364 #if (EMLXS_MODREV >= EMLXS_MODREV3)
4365 	cp_cmd = pkt->pkt_cmd_cookie;
4366 	cp_resp = pkt->pkt_resp_cookie;
4367 #else
4368 	cp_cmd  = &pkt->pkt_cmd_cookie;
4369 	cp_resp = &pkt->pkt_resp_cookie;
4370 #endif	/* >= EMLXS_MODREV3 */
4371 
4372 	/* CMD payload */
4373 	sge = &stage_sge;
4374 	sge->addrHigh = PADDR_HI(cp_cmd->dmac_laddress);
4375 	sge->addrLow = PADDR_LO(cp_cmd->dmac_laddress);
4376 	sge->length = pkt->pkt_cmdlen;
4377 	sge->offset = 0;
4378 	sge->type = 0;
4379 
4380 	cmd = *((uint32_t *)pkt->pkt_cmd);
4381 	cmd &= ELS_CMD_MASK;
4382 
4383 	/* Initalize iocb */
4384 	if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
4385 		/* ELS Response */
4386 
4387 		sbp->xrip = 0;
4388 		xrip = emlxs_sli4_register_xri(port, sbp,
4389 		    pkt->pkt_cmd_fhdr.rx_id, did);
4390 
4391 		if (!xrip) {
4392 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
4393 			    "Unable to find XRI. rxid=%x",
4394 			    pkt->pkt_cmd_fhdr.rx_id);
4395 
4396 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
4397 			    IOERR_NO_XRI, 0);
4398 			return (0xff);
4399 		}
4400 
4401 		rpip = xrip->rpip;
4402 
4403 		if (!rpip) {
4404 			/* This means that we had a node registered */
4405 			/* when the unsol request came in but the node */
4406 			/* has since been unregistered. */
4407 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
4408 			    "Unable to find RPI. rxid=%x",
4409 			    pkt->pkt_cmd_fhdr.rx_id);
4410 
4411 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
4412 			    IOERR_INVALID_RPI, 0);
4413 			return (0xff);
4414 		}
4415 
4416 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4417 		    "ELS: Prep xri=%d iotag=%d oxid=%x rpi=%d",
4418 		    xrip->XRI, xrip->iotag, xrip->rx_id, rpip->RPI);
4419 
4420 		iocb->ULPCOMMAND = CMD_XMIT_ELS_RSP64_CX;
4421 		wqe->Command = CMD_XMIT_ELS_RSP64_CX;
4422 		wqe->CmdType = WQE_TYPE_GEN;
4423 		if (!(hba->sli.sli4.param.PHWQ)) {
4424 			wqe->DBDE = 1; /* Data type for BDE 0 */
4425 		}
4426 
4427 		wqe->un.ElsRsp.Payload.addrHigh = sge->addrHigh;
4428 		wqe->un.ElsRsp.Payload.addrLow = sge->addrLow;
4429 		wqe->un.ElsRsp.Payload.tus.f.bdeSize = pkt->pkt_cmdlen;
4430 		wqe->un.ElsCmd.PayloadLength = pkt->pkt_cmdlen;
4431 
4432 		wqe->un.ElsRsp.RemoteId = did;
4433 		wqe->PU = 0x3;
4434 		wqe->OXId = xrip->rx_id;
4435 
4436 		sge->last = 1;
4437 		/* Now sge is fully staged */
4438 
4439 		sge = xrip->SGList.virt;
4440 		BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
4441 		    sizeof (ULP_SGE64));
4442 
4443 		if (rpip->RPI == FABRIC_RPI) {
4444 			wqe->ContextTag = port->vpip->VPI;
4445 			wqe->ContextType = WQE_VPI_CONTEXT;
4446 		} else {
4447 			wqe->ContextTag = rpip->RPI;
4448 			wqe->ContextType = WQE_RPI_CONTEXT;
4449 		}
4450 
4451 		if ((cmd == ELS_CMD_ACC) && (sbp->ucmd == ELS_CMD_FLOGI)) {
4452 			wqe->un.ElsCmd.SP = 1;
4453 			wqe->un.ElsCmd.LocalId = 0xFFFFFE;
4454 		}
4455 
4456 	} else {
4457 		/* ELS Request */
4458 
4459 		fcfp = port->vpip->vfip->fcfp;
4460 		node = (emlxs_node_t *)iocbq->node;
4461 		rpip = EMLXS_NODE_TO_RPI(port, node);
4462 
4463 		if (!rpip) {
4464 			/* Use the fabric rpi */
4465 			rpip = port->vpip->fabric_rpip;
4466 		}
4467 
4468 		/* Next allocate an Exchange for this command */
4469 		xrip = emlxs_sli4_alloc_xri(port, sbp, rpip,
4470 		    EMLXS_XRI_SOL_ELS_TYPE);
4471 
4472 		if (!xrip) {
4473 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4474 			    "Adapter Busy. Unable to allocate exchange. "
4475 			    "did=0x%x", did);
4476 
4477 			return (FC_TRAN_BUSY);
4478 		}
4479 
4480 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4481 		    "ELS: Prep xri=%d iotag=%d rpi=%d",
4482 		    xrip->XRI, xrip->iotag, rpip->RPI);
4483 
4484 		iocb->ULPCOMMAND = CMD_ELS_REQUEST64_CR;
4485 		wqe->Command = CMD_ELS_REQUEST64_CR;
4486 		wqe->CmdType = WQE_TYPE_ELS;
4487 		if (!(hba->sli.sli4.param.PHWQ)) {
4488 			wqe->DBDE = 1; /* Data type for BDE 0 */
4489 		}
4490 
4491 		wqe->un.ElsCmd.Payload.addrHigh = sge->addrHigh;
4492 		wqe->un.ElsCmd.Payload.addrLow = sge->addrLow;
4493 		wqe->un.ElsCmd.Payload.tus.f.bdeSize = pkt->pkt_cmdlen;
4494 
4495 		wqe->un.ElsCmd.RemoteId = did;
4496 		wqe->Timer = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
4497 
4498 		/* setup for rsp */
4499 		iocb->un.elsreq64.remoteID = (did == BCAST_DID) ? 0 : did;
4500 		iocb->ULPPU = 1;	/* Wd4 is relative offset */
4501 
4502 		sge->last = 0;
4503 
4504 		sge = xrip->SGList.virt;
4505 		BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
4506 		    sizeof (ULP_SGE64));
4507 
4508 		wqe->un.ElsCmd.PayloadLength =
4509 		    pkt->pkt_cmdlen; /* Byte offset of rsp data */
4510 
4511 		/* RSP payload */
4512 		sge = &stage_sge;
4513 		sge->addrHigh = PADDR_HI(cp_resp->dmac_laddress);
4514 		sge->addrLow = PADDR_LO(cp_resp->dmac_laddress);
4515 		sge->length = pkt->pkt_rsplen;
4516 		sge->offset = 0;
4517 		sge->last = 1;
4518 		/* Now sge is fully staged */
4519 
4520 		sge = xrip->SGList.virt;
4521 		sge++;
4522 		BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
4523 		    sizeof (ULP_SGE64));
4524 #ifdef DEBUG_ELS
4525 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4526 		    "ELS: SGLaddr virt %p phys %p",
4527 		    xrip->SGList.virt, xrip->SGList.phys);
4528 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4529 		    "ELS: PAYLOAD virt %p phys %p",
4530 		    pkt->pkt_cmd, cp_cmd->dmac_laddress);
4531 		emlxs_data_dump(port, "ELS: SGL", (uint32_t *)xrip->SGList.virt,
4532 		    12, 0);
4533 #endif /* DEBUG_ELS */
4534 
4535 		switch (cmd) {
4536 		case ELS_CMD_FLOGI:
4537 			wqe->un.ElsCmd.SP = 1;
4538 
4539 			if ((hba->sli_intf & SLI_INTF_IF_TYPE_MASK) ==
4540 			    SLI_INTF_IF_TYPE_0) {
4541 				wqe->ContextTag = fcfp->FCFI;
4542 				wqe->ContextType = WQE_FCFI_CONTEXT;
4543 			} else {
4544 				wqe->ContextTag = port->vpip->VPI;
4545 				wqe->ContextType = WQE_VPI_CONTEXT;
4546 			}
4547 
4548 			if (hba->flag & FC_FIP_SUPPORTED) {
4549 				wqe->CmdType |= WQE_TYPE_MASK_FIP;
4550 			}
4551 
4552 			if (hba->topology == TOPOLOGY_LOOP) {
4553 				wqe->un.ElsCmd.LocalId = port->did;
4554 			}
4555 
4556 			wqe->ELSId = WQE_ELSID_FLOGI;
4557 			break;
4558 		case ELS_CMD_FDISC:
4559 			wqe->un.ElsCmd.SP = 1;
4560 			wqe->ContextTag = port->vpip->VPI;
4561 			wqe->ContextType = WQE_VPI_CONTEXT;
4562 
4563 			if (hba->flag & FC_FIP_SUPPORTED) {
4564 				wqe->CmdType |= WQE_TYPE_MASK_FIP;
4565 			}
4566 
4567 			wqe->ELSId = WQE_ELSID_FDISC;
4568 			break;
4569 		case ELS_CMD_LOGO:
4570 			if ((did == FABRIC_DID) &&
4571 			    (hba->flag & FC_FIP_SUPPORTED)) {
4572 				wqe->CmdType |= WQE_TYPE_MASK_FIP;
4573 			}
4574 
4575 			wqe->ContextTag = port->vpip->VPI;
4576 			wqe->ContextType = WQE_VPI_CONTEXT;
4577 			wqe->ELSId = WQE_ELSID_LOGO;
4578 			break;
4579 		case ELS_CMD_PLOGI:
4580 			if (rpip->RPI == FABRIC_RPI) {
4581 				if (hba->flag & FC_PT_TO_PT) {
4582 					wqe->un.ElsCmd.SP = 1;
4583 					wqe->un.ElsCmd.LocalId = port->did;
4584 				}
4585 
4586 				wqe->ContextTag = port->vpip->VPI;
4587 				wqe->ContextType = WQE_VPI_CONTEXT;
4588 			} else {
4589 				wqe->ContextTag = rpip->RPI;
4590 				wqe->ContextType = WQE_RPI_CONTEXT;
4591 			}
4592 
4593 			wqe->ELSId = WQE_ELSID_PLOGI;
4594 			break;
4595 		default:
4596 			if (rpip->RPI == FABRIC_RPI) {
4597 				wqe->ContextTag = port->vpip->VPI;
4598 				wqe->ContextType = WQE_VPI_CONTEXT;
4599 			} else {
4600 				wqe->ContextTag = rpip->RPI;
4601 				wqe->ContextType = WQE_RPI_CONTEXT;
4602 			}
4603 
4604 			wqe->ELSId = WQE_ELSID_CMD;
4605 			break;
4606 		}
4607 
4608 #ifdef SFCT_SUPPORT
4609 		/* This allows fct to abort the request */
4610 		if (sbp->fct_cmd) {
4611 			sbp->fct_cmd->cmd_oxid = xrip->XRI;
4612 			sbp->fct_cmd->cmd_rxid = 0xFFFF;
4613 		}
4614 #endif /* SFCT_SUPPORT */
4615 	}
4616 
4617 	if (wqe->ContextType == WQE_VPI_CONTEXT) {
4618 		reserved_rpip = emlxs_rpi_reserve_notify(port, did, xrip);
4619 
4620 		if (!reserved_rpip) {
4621 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
4622 			    "Unable to alloc reserved RPI. rxid=%x. Rejecting.",
4623 			    pkt->pkt_cmd_fhdr.rx_id);
4624 
4625 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
4626 			    IOERR_INVALID_RPI, 0);
4627 			return (0xff);
4628 		}
4629 
4630 		/* Store the reserved rpi */
4631 		if (wqe->Command == CMD_ELS_REQUEST64_CR) {
4632 			wqe->OXId = reserved_rpip->RPI;
4633 		} else {
4634 			wqe->CmdSpecific = reserved_rpip->RPI;
4635 		}
4636 	}
4637 
4638 	offset = (off_t)((uint64_t)((unsigned long)
4639 	    xrip->SGList.virt) -
4640 	    (uint64_t)((unsigned long)
4641 	    hba->sli.sli4.slim2.virt));
4642 
4643 	EMLXS_MPDATA_SYNC(xrip->SGList.dma_handle, offset,
4644 	    xrip->SGList.size, DDI_DMA_SYNC_FORDEV);
4645 
4646 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_CHAINED_SEQ) {
4647 		wqe->CCPE = 1;
4648 		wqe->CCP = pkt->pkt_cmd_fhdr.rsvd;
4649 	}
4650 
4651 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
4652 	case FC_TRAN_CLASS2:
4653 		wqe->Class = CLASS2;
4654 		break;
4655 	case FC_TRAN_CLASS3:
4656 	default:
4657 		wqe->Class = CLASS3;
4658 		break;
4659 	}
4660 	sbp->class = wqe->Class;
4661 	wqe->XRITag = xrip->XRI;
4662 	wqe->RequestTag = xrip->iotag;
4663 	wqe->CQId = (uint16_t)0xffff;  /* default CQ for response */
4664 	return (FC_SUCCESS);
4665 
4666 } /* emlxs_sli4_prep_els_iocb() */
4667 
4668 
4669 /*ARGSUSED*/
4670 static uint32_t
emlxs_sli4_prep_ct_iocb(emlxs_port_t * port,emlxs_buf_t * sbp)4671 emlxs_sli4_prep_ct_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
4672 {
4673 	emlxs_hba_t *hba = HBA;
4674 	fc_packet_t *pkt;
4675 	IOCBQ *iocbq;
4676 	IOCB *iocb;
4677 	emlxs_wqe_t *wqe;
4678 	NODELIST *node = NULL;
4679 	CHANNEL *cp;
4680 	RPIobj_t *rpip;
4681 	XRIobj_t *xrip;
4682 	uint32_t did;
4683 	off_t offset;
4684 
4685 	pkt = PRIV2PKT(sbp);
4686 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
4687 
4688 	iocbq = &sbp->iocbq;
4689 	wqe = &iocbq->wqe;
4690 	iocb = &iocbq->iocb;
4691 	bzero((void *)wqe, sizeof (emlxs_wqe_t));
4692 	bzero((void *)iocb, sizeof (IOCB));
4693 
4694 	cp = &hba->chan[hba->channel_ct];
4695 
4696 	iocbq->port = (void *) port;
4697 	iocbq->channel = (void *) cp;
4698 
4699 	sbp->bmp = NULL;
4700 	sbp->channel = cp;
4701 
4702 	/* Initalize wqe */
4703 	if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
4704 		/* CT Response */
4705 
4706 		sbp->xrip = 0;
4707 		xrip = emlxs_sli4_register_xri(port, sbp,
4708 		    pkt->pkt_cmd_fhdr.rx_id, did);
4709 
4710 		if (!xrip) {
4711 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
4712 			    "Unable to find XRI. rxid=%x",
4713 			    pkt->pkt_cmd_fhdr.rx_id);
4714 
4715 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
4716 			    IOERR_NO_XRI, 0);
4717 			return (0xff);
4718 		}
4719 
4720 		rpip = xrip->rpip;
4721 
4722 		if (!rpip) {
4723 			/* This means that we had a node registered */
4724 			/* when the unsol request came in but the node */
4725 			/* has since been unregistered. */
4726 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
4727 			    "Unable to find RPI. rxid=%x",
4728 			    pkt->pkt_cmd_fhdr.rx_id);
4729 
4730 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
4731 			    IOERR_INVALID_RPI, 0);
4732 			return (0xff);
4733 		}
4734 
4735 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4736 		    "CT: Prep xri=%d iotag=%d oxid=%x rpi=%d",
4737 		    xrip->XRI, xrip->iotag, xrip->rx_id, rpip->RPI);
4738 
4739 		if (emlxs_sli4_bde_setup(port, sbp)) {
4740 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4741 			    "Adapter Busy. Unable to setup SGE. did=0x%x", did);
4742 
4743 			return (FC_TRAN_BUSY);
4744 		}
4745 
4746 		if (!(hba->model_info.chip & EMLXS_BE_CHIPS)) {
4747 			wqe->un.XmitSeq.Rsvd0 = 0; /* Word3 now reserved */
4748 		}
4749 
4750 		if (!(hba->sli.sli4.param.PHWQ)) {
4751 			wqe->DBDE = 1; /* Data type for BDE 0 */
4752 		}
4753 
4754 		iocb->ULPCOMMAND = CMD_XMIT_SEQUENCE64_CR;
4755 		wqe->CmdType = WQE_TYPE_GEN;
4756 		wqe->Command = CMD_XMIT_SEQUENCE64_CR;
4757 		wqe->LenLoc = 2;
4758 
4759 		if (((SLI_CT_REQUEST *) pkt->pkt_cmd)->CommandResponse.bits.
4760 		    CmdRsp == (LE_SWAP16(SLI_CT_LOOPBACK))) {
4761 			wqe->un.XmitSeq.xo = 1;
4762 		} else {
4763 			wqe->un.XmitSeq.xo = 0;
4764 		}
4765 
4766 		if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_LAST_SEQ) {
4767 			wqe->un.XmitSeq.ls = 1;
4768 		}
4769 
4770 		if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
4771 			wqe->un.XmitSeq.si = 1;
4772 		}
4773 
4774 		wqe->un.XmitSeq.DFctl  = pkt->pkt_cmd_fhdr.df_ctl;
4775 		wqe->un.XmitSeq.Rctl  = pkt->pkt_cmd_fhdr.r_ctl;
4776 		wqe->un.XmitSeq.Type  = pkt->pkt_cmd_fhdr.type;
4777 		wqe->OXId = xrip->rx_id;
4778 		wqe->XC = 0; /* xri_tag is a new exchange */
4779 		wqe->CmdSpecific = wqe->un.GenReq.Payload.tus.f.bdeSize;
4780 
4781 	} else {
4782 		/* CT Request */
4783 
4784 		node = (emlxs_node_t *)iocbq->node;
4785 		rpip = EMLXS_NODE_TO_RPI(port, node);
4786 
4787 		if (!rpip) {
4788 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
4789 			    "Unable to find rpi. did=0x%x rpi=%d",
4790 			    did, node->nlp_Rpi);
4791 
4792 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
4793 			    IOERR_INVALID_RPI, 0);
4794 			return (0xff);
4795 		}
4796 
4797 		/* Next allocate an Exchange for this command */
4798 		xrip = emlxs_sli4_alloc_xri(port, sbp, rpip,
4799 		    EMLXS_XRI_SOL_CT_TYPE);
4800 
4801 		if (!xrip) {
4802 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4803 			    "Adapter Busy. Unable to allocate exchange. "
4804 			    "did=0x%x", did);
4805 
4806 			return (FC_TRAN_BUSY);
4807 		}
4808 
4809 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4810 		    "CT: Prep xri=%d iotag=%d oxid=%x rpi=%d",
4811 		    xrip->XRI, xrip->iotag, xrip->rx_id, rpip->RPI);
4812 
4813 		if (emlxs_sli4_bde_setup(port, sbp)) {
4814 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4815 			    "Adapter Busy. Unable to setup SGE. did=0x%x", did);
4816 
4817 			emlxs_sli4_free_xri(port, sbp, xrip, 1);
4818 			return (FC_TRAN_BUSY);
4819 		}
4820 
4821 		if (!(hba->sli.sli4.param.PHWQ)) {
4822 			wqe->DBDE = 1; /* Data type for BDE 0 */
4823 		}
4824 
4825 		iocb->ULPCOMMAND = CMD_GEN_REQUEST64_CR;
4826 		wqe->CmdType = WQE_TYPE_GEN;
4827 		wqe->Command = CMD_GEN_REQUEST64_CR;
4828 		wqe->un.GenReq.la = 1;
4829 		wqe->un.GenReq.DFctl  = pkt->pkt_cmd_fhdr.df_ctl;
4830 		wqe->un.GenReq.Rctl  = pkt->pkt_cmd_fhdr.r_ctl;
4831 		wqe->un.GenReq.Type  = pkt->pkt_cmd_fhdr.type;
4832 
4833 #ifdef DEBUG_CT
4834 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4835 		    "CT: SGLaddr virt %p phys %p", xrip->SGList.virt,
4836 		    xrip->SGList.phys);
4837 		emlxs_data_dump(port, "CT: SGL", (uint32_t *)xrip->SGList.virt,
4838 		    12, 0);
4839 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4840 		    "CT: CMD virt %p len %d:%d",
4841 		    pkt->pkt_cmd, pkt->pkt_cmdlen, pkt->pkt_rsplen);
4842 		emlxs_data_dump(port, "CT: DATA", (uint32_t *)pkt->pkt_cmd,
4843 		    20, 0);
4844 #endif /* DEBUG_CT */
4845 
4846 #ifdef SFCT_SUPPORT
4847 		/* This allows fct to abort the request */
4848 		if (sbp->fct_cmd) {
4849 			sbp->fct_cmd->cmd_oxid = xrip->XRI;
4850 			sbp->fct_cmd->cmd_rxid = 0xFFFF;
4851 		}
4852 #endif /* SFCT_SUPPORT */
4853 	}
4854 
4855 	/* Setup for rsp */
4856 	iocb->un.genreq64.w5.hcsw.Rctl  = pkt->pkt_cmd_fhdr.r_ctl;
4857 	iocb->un.genreq64.w5.hcsw.Type  = pkt->pkt_cmd_fhdr.type;
4858 	iocb->un.genreq64.w5.hcsw.Dfctl  = pkt->pkt_cmd_fhdr.df_ctl;
4859 	iocb->ULPPU = 1;	/* Wd4 is relative offset */
4860 
4861 	offset = (off_t)((uint64_t)((unsigned long)
4862 	    xrip->SGList.virt) -
4863 	    (uint64_t)((unsigned long)
4864 	    hba->sli.sli4.slim2.virt));
4865 
4866 	EMLXS_MPDATA_SYNC(xrip->SGList.dma_handle, offset,
4867 	    xrip->SGList.size, DDI_DMA_SYNC_FORDEV);
4868 
4869 	wqe->ContextTag = rpip->RPI;
4870 	wqe->ContextType = WQE_RPI_CONTEXT;
4871 	wqe->XRITag = xrip->XRI;
4872 	wqe->Timer = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
4873 
4874 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_CHAINED_SEQ) {
4875 		wqe->CCPE = 1;
4876 		wqe->CCP = pkt->pkt_cmd_fhdr.rsvd;
4877 	}
4878 
4879 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
4880 	case FC_TRAN_CLASS2:
4881 		wqe->Class = CLASS2;
4882 		break;
4883 	case FC_TRAN_CLASS3:
4884 	default:
4885 		wqe->Class = CLASS3;
4886 		break;
4887 	}
4888 	sbp->class = wqe->Class;
4889 	wqe->RequestTag = xrip->iotag;
4890 	wqe->CQId = (uint16_t)0xffff;  /* default CQ for response */
4891 	return (FC_SUCCESS);
4892 
4893 } /* emlxs_sli4_prep_ct_iocb() */
4894 
4895 
4896 /*ARGSUSED*/
4897 static int
emlxs_sli4_read_eq(emlxs_hba_t * hba,EQ_DESC_t * eq)4898 emlxs_sli4_read_eq(emlxs_hba_t *hba, EQ_DESC_t *eq)
4899 {
4900 	uint32_t *ptr;
4901 	EQE_u eqe;
4902 	int rc = 0;
4903 	off_t offset;
4904 
4905 	mutex_enter(&EMLXS_PORT_LOCK);
4906 
4907 	ptr = eq->addr.virt;
4908 	ptr += eq->host_index;
4909 
4910 	offset = (off_t)((uint64_t)((unsigned long)
4911 	    eq->addr.virt) -
4912 	    (uint64_t)((unsigned long)
4913 	    hba->sli.sli4.slim2.virt));
4914 
4915 	EMLXS_MPDATA_SYNC(eq->addr.dma_handle, offset,
4916 	    4096, DDI_DMA_SYNC_FORKERNEL);
4917 
4918 	eqe.word = *ptr;
4919 	eqe.word = BE_SWAP32(eqe.word);
4920 
4921 	if (eqe.word & EQE_VALID) {
4922 		rc = 1;
4923 	}
4924 
4925 	mutex_exit(&EMLXS_PORT_LOCK);
4926 
4927 	return (rc);
4928 
4929 } /* emlxs_sli4_read_eq */
4930 
4931 
4932 static void
emlxs_sli4_poll_intr(emlxs_hba_t * hba)4933 emlxs_sli4_poll_intr(emlxs_hba_t *hba)
4934 {
4935 	int rc = 0;
4936 	int i;
4937 	char arg[] = {0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7};
4938 
4939 	/* Check attention bits once and process if required */
4940 
4941 	for (i = 0; i < hba->intr_count; i++) {
4942 		rc = emlxs_sli4_read_eq(hba, &hba->sli.sli4.eq[i]);
4943 		if (rc == 1) {
4944 			break;
4945 		}
4946 	}
4947 
4948 	if (rc != 1) {
4949 		return;
4950 	}
4951 
4952 	(void) emlxs_sli4_msi_intr((char *)hba,
4953 	    (char *)(unsigned long)arg[i]);
4954 
4955 	return;
4956 
4957 } /* emlxs_sli4_poll_intr() */
4958 
4959 
4960 /*ARGSUSED*/
4961 static void
emlxs_sli4_process_async_event(emlxs_hba_t * hba,CQE_ASYNC_t * cqe)4962 emlxs_sli4_process_async_event(emlxs_hba_t *hba, CQE_ASYNC_t *cqe)
4963 {
4964 	emlxs_port_t *port = &PPORT;
4965 	uint8_t status;
4966 
4967 	/* Save the event tag */
4968 	if (hba->link_event_tag == cqe->un.link.event_tag) {
4969 		HBASTATS.LinkMultiEvent++;
4970 	} else if (hba->link_event_tag + 1 < cqe->un.link.event_tag) {
4971 		HBASTATS.LinkMultiEvent++;
4972 	}
4973 	hba->link_event_tag = cqe->un.link.event_tag;
4974 
4975 	switch (cqe->event_code) {
4976 	case ASYNC_EVENT_CODE_FCOE_LINK_STATE:
4977 		HBASTATS.LinkEvent++;
4978 
4979 		switch (cqe->un.link.link_status) {
4980 		case ASYNC_EVENT_PHYS_LINK_UP:
4981 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4982 			    "Link Async Event: PHYS_LINK_UP. val=%d "
4983 			    "type=%x event=%x",
4984 			    cqe->valid, cqe->event_type, HBASTATS.LinkEvent);
4985 			break;
4986 
4987 		case ASYNC_EVENT_LOGICAL_LINK_UP:
4988 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4989 			    "Link Async Event: LOGICAL_LINK_UP. val=%d "
4990 			    "type=%x event=%x",
4991 			    cqe->valid, cqe->event_type, HBASTATS.LinkEvent);
4992 
4993 			emlxs_sli4_handle_fcoe_link_event(hba, cqe);
4994 			break;
4995 
4996 		case ASYNC_EVENT_PHYS_LINK_DOWN:
4997 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4998 			    "Link Async Event: PHYS_LINK_DOWN. val=%d "
4999 			    "type=%x event=%x",
5000 			    cqe->valid, cqe->event_type, HBASTATS.LinkEvent);
5001 
5002 			emlxs_sli4_handle_fcoe_link_event(hba, cqe);
5003 			break;
5004 
5005 		case ASYNC_EVENT_LOGICAL_LINK_DOWN:
5006 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5007 			    "Link Async Event: LOGICAL_LINK_DOWN. val=%d "
5008 			    "type=%x event=%x",
5009 			    cqe->valid, cqe->event_type, HBASTATS.LinkEvent);
5010 
5011 			emlxs_sli4_handle_fcoe_link_event(hba, cqe);
5012 			break;
5013 		default:
5014 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5015 			    "Link Async Event: Unknown link status=%d event=%x",
5016 			    cqe->un.link.link_status, HBASTATS.LinkEvent);
5017 			break;
5018 		}
5019 		break;
5020 	case ASYNC_EVENT_CODE_FCOE_FIP:
5021 		switch (cqe->un.fcoe.evt_type) {
5022 		case ASYNC_EVENT_NEW_FCF_DISC:
5023 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5024 			    "FIP Async Event: FCF_FOUND %d:%d",
5025 			    cqe->un.fcoe.ref_index, cqe->un.fcoe.fcf_count);
5026 
5027 			(void) emlxs_fcf_found_notify(port,
5028 			    cqe->un.fcoe.ref_index);
5029 			break;
5030 		case ASYNC_EVENT_FCF_TABLE_FULL:
5031 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5032 			    "FIP Async Event: FCFTAB_FULL %d:%d",
5033 			    cqe->un.fcoe.ref_index, cqe->un.fcoe.fcf_count);
5034 
5035 			(void) emlxs_fcf_full_notify(port);
5036 			break;
5037 		case ASYNC_EVENT_FCF_DEAD:
5038 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5039 			    "FIP Async Event: FCF_LOST %d:%d",
5040 			    cqe->un.fcoe.ref_index, cqe->un.fcoe.fcf_count);
5041 
5042 			(void) emlxs_fcf_lost_notify(port,
5043 			    cqe->un.fcoe.ref_index);
5044 			break;
5045 		case ASYNC_EVENT_VIRT_LINK_CLEAR:
5046 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5047 			    "FIP Async Event: CVL %d",
5048 			    cqe->un.fcoe.ref_index);
5049 
5050 			(void) emlxs_fcf_cvl_notify(port,
5051 			    emlxs_sli4_vpi_to_index(hba,
5052 			    cqe->un.fcoe.ref_index));
5053 			break;
5054 
5055 		case ASYNC_EVENT_FCF_MODIFIED:
5056 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5057 			    "FIP Async Event: FCF_CHANGED %d",
5058 			    cqe->un.fcoe.ref_index);
5059 
5060 			(void) emlxs_fcf_changed_notify(port,
5061 			    cqe->un.fcoe.ref_index);
5062 			break;
5063 		default:
5064 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5065 			    "FIP Async Event: Unknown event type=%d",
5066 			    cqe->un.fcoe.evt_type);
5067 			break;
5068 		}
5069 		break;
5070 	case ASYNC_EVENT_CODE_DCBX:
5071 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5072 		    "DCBX Async Event: type=%d. Not supported.",
5073 		    cqe->event_type);
5074 		break;
5075 	case ASYNC_EVENT_CODE_GRP_5:
5076 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5077 		    "Group 5 Async Event: type=%d.", cqe->event_type);
5078 		if (cqe->event_type == ASYNC_EVENT_QOS_SPEED) {
5079 			hba->qos_linkspeed = cqe->un.qos.qos_link_speed;
5080 		}
5081 		break;
5082 	case ASYNC_EVENT_CODE_FC_EVENT:
5083 		switch (cqe->event_type) {
5084 		case ASYNC_EVENT_FC_LINK_ATT:
5085 			HBASTATS.LinkEvent++;
5086 
5087 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5088 			    "FC Async Event: Link Attention. event=%x",
5089 			    HBASTATS.LinkEvent);
5090 
5091 			emlxs_sli4_handle_fc_link_att(hba, cqe);
5092 			break;
5093 		case ASYNC_EVENT_FC_SHARED_LINK_ATT:
5094 			HBASTATS.LinkEvent++;
5095 
5096 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5097 			    "FC Async Event: Shared Link Attention. event=%x",
5098 			    HBASTATS.LinkEvent);
5099 
5100 			emlxs_sli4_handle_fc_link_att(hba, cqe);
5101 			break;
5102 		default:
5103 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5104 			    "FC Async Event: Unknown event. type=%d event=%x",
5105 			    cqe->event_type, HBASTATS.LinkEvent);
5106 		}
5107 		break;
5108 	case ASYNC_EVENT_CODE_PORT:
5109 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5110 		    "SLI Port Async Event: type=%d", cqe->event_type);
5111 		if (cqe->event_type == ASYNC_EVENT_MISCONFIG_PORT) {
5112 			*((uint32_t *)cqe->un.port.link_status) =
5113 			    BE_SWAP32(*((uint32_t *)cqe->un.port.link_status));
5114 			status =
5115 			    cqe->un.port.link_status[hba->sli.sli4.link_number];
5116 
5117 			switch (status) {
5118 				case 0 :
5119 				break;
5120 
5121 				case 1 :
5122 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5123 				    "SLI Port Async Event: Physical media not "
5124 				    "detected");
5125 				cmn_err(CE_WARN,
5126 				    "^%s%d: Optics faulted/incorrectly "
5127 				    "installed/not installed - Reseat optics, "
5128 				    "if issue not resolved, replace.",
5129 				    DRIVER_NAME, hba->ddiinst);
5130 				break;
5131 
5132 				case 2 :
5133 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5134 				    "SLI Port Async Event: Wrong physical "
5135 				    "media detected");
5136 				cmn_err(CE_WARN,
5137 				    "^%s%d: Optics of two types installed - "
5138 				    "Remove one optic or install matching"
5139 				    "pair of optics.",
5140 				    DRIVER_NAME, hba->ddiinst);
5141 				break;
5142 
5143 				case 3 :
5144 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5145 				    "SLI Port Async Event: Unsupported "
5146 				    "physical media detected");
5147 				cmn_err(CE_WARN,
5148 				    "^%s%d:  Incompatible optics - Replace "
5149 				    "with compatible optics for card to "
5150 				    "function.",
5151 				    DRIVER_NAME, hba->ddiinst);
5152 				break;
5153 
5154 				default :
5155 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5156 				    "SLI Port Async Event: Physical media "
5157 				    "error, status=%x", status);
5158 				cmn_err(CE_WARN,
5159 				    "^%s%d: Misconfigured port: status=0x%x - "
5160 				    "Check optics on card.",
5161 				    DRIVER_NAME, hba->ddiinst, status);
5162 				break;
5163 			}
5164 		}
5165 		break;
5166 	case ASYNC_EVENT_CODE_VF:
5167 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5168 		    "VF Async Event: type=%d",
5169 		    cqe->event_type);
5170 		break;
5171 	case ASYNC_EVENT_CODE_MR:
5172 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5173 		    "MR Async Event: type=%d",
5174 		    cqe->event_type);
5175 		break;
5176 	default:
5177 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5178 		    "Unknown Async Event: code=%d type=%d.",
5179 		    cqe->event_code, cqe->event_type);
5180 		break;
5181 	}
5182 
5183 } /* emlxs_sli4_process_async_event() */
5184 
5185 
5186 /*ARGSUSED*/
5187 static void
emlxs_sli4_process_mbox_event(emlxs_hba_t * hba,CQE_MBOX_t * cqe)5188 emlxs_sli4_process_mbox_event(emlxs_hba_t *hba, CQE_MBOX_t *cqe)
5189 {
5190 	emlxs_port_t *port = &PPORT;
5191 	MAILBOX4 *mb;
5192 	MATCHMAP *mbox_bp;
5193 	MATCHMAP *mbox_nonembed;
5194 	MAILBOXQ *mbq = NULL;
5195 	uint32_t size;
5196 	uint32_t *iptr;
5197 	int rc;
5198 	off_t offset;
5199 
5200 	if (cqe->consumed && !cqe->completed) {
5201 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5202 		    "CQ ENTRY: Mbox event. Entry consumed but not completed");
5203 		return;
5204 	}
5205 
5206 	mutex_enter(&EMLXS_PORT_LOCK);
5207 	switch (hba->mbox_queue_flag) {
5208 	case 0:
5209 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_mbox_intr_msg,
5210 		    "CQ ENTRY: Mbox event. No mailbox active.");
5211 
5212 		mutex_exit(&EMLXS_PORT_LOCK);
5213 		return;
5214 
5215 	case MBX_POLL:
5216 
5217 		/* Mark mailbox complete, this should wake up any polling */
5218 		/* threads. This can happen if interrupts are enabled while */
5219 		/* a polled mailbox command is outstanding. If we don't set */
5220 		/* MBQ_COMPLETED here, the polling thread may wait until */
5221 		/* timeout error occurs */
5222 
5223 		mutex_enter(&EMLXS_MBOX_LOCK);
5224 		mbq = (MAILBOXQ *)hba->mbox_mbq;
5225 		if (mbq) {
5226 			port = (emlxs_port_t *)mbq->port;
5227 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5228 			    "CQ ENTRY: Mbox event. Completing Polled command.");
5229 			mbq->flag |= MBQ_COMPLETED;
5230 		}
5231 		mutex_exit(&EMLXS_MBOX_LOCK);
5232 
5233 		mutex_exit(&EMLXS_PORT_LOCK);
5234 		return;
5235 
5236 	case MBX_SLEEP:
5237 	case MBX_NOWAIT:
5238 		/* Check mbox_timer, it acts as a service flag too */
5239 		/* The first to service the mbox queue will clear the timer */
5240 		if (hba->mbox_timer) {
5241 			hba->mbox_timer = 0;
5242 
5243 			mutex_enter(&EMLXS_MBOX_LOCK);
5244 			mbq = (MAILBOXQ *)hba->mbox_mbq;
5245 			mutex_exit(&EMLXS_MBOX_LOCK);
5246 		}
5247 
5248 		if (!mbq) {
5249 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5250 			    "Mailbox event. No service required.");
5251 			mutex_exit(&EMLXS_PORT_LOCK);
5252 			return;
5253 		}
5254 
5255 		mb = (MAILBOX4 *)mbq;
5256 		mutex_exit(&EMLXS_PORT_LOCK);
5257 		break;
5258 
5259 	default:
5260 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_completion_error_msg,
5261 		    "CQ ENTRY: Mbox event. Invalid Mailbox flag (%x).",
5262 		    hba->mbox_queue_flag);
5263 
5264 		mutex_exit(&EMLXS_PORT_LOCK);
5265 		return;
5266 	}
5267 
5268 	/* Set port context */
5269 	port = (emlxs_port_t *)mbq->port;
5270 
5271 	offset = (off_t)((uint64_t)((unsigned long)
5272 	    hba->sli.sli4.mq.addr.virt) -
5273 	    (uint64_t)((unsigned long)
5274 	    hba->sli.sli4.slim2.virt));
5275 
5276 	/* Now that we are the owner, DMA Sync entire MQ if needed */
5277 	EMLXS_MPDATA_SYNC(hba->sli.sli4.mq.addr.dma_handle, offset,
5278 	    4096, DDI_DMA_SYNC_FORDEV);
5279 
5280 	BE_SWAP32_BCOPY((uint8_t *)hba->mbox_mqe, (uint8_t *)mb,
5281 	    MAILBOX_CMD_SLI4_BSIZE);
5282 
5283 	if (mb->mbxCommand != MBX_HEARTBEAT) {
5284 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5285 		    "CQ ENTRY: Mbox event. Mbox complete. status=%x cmd=%x",
5286 		    mb->mbxStatus, mb->mbxCommand);
5287 
5288 		emlxs_data_dump(port, "MBOX CMP", (uint32_t *)hba->mbox_mqe,
5289 		    12, 0);
5290 	}
5291 
5292 	if (mb->mbxCommand == MBX_SLI_CONFIG) {
5293 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5294 		    "Mbox sge_cnt: %d length: %d embed: %d",
5295 		    mb->un.varSLIConfig.be.sge_cnt,
5296 		    mb->un.varSLIConfig.be.payload_length,
5297 		    mb->un.varSLIConfig.be.embedded);
5298 	}
5299 
5300 	/* Now sync the memory buffer if one was used */
5301 	if (mbq->bp) {
5302 		mbox_bp = (MATCHMAP *)mbq->bp;
5303 		EMLXS_MPDATA_SYNC(mbox_bp->dma_handle, 0, mbox_bp->size,
5304 		    DDI_DMA_SYNC_FORKERNEL);
5305 #ifdef FMA_SUPPORT
5306 		if (emlxs_fm_check_dma_handle(hba, mbox_bp->dma_handle)
5307 		    != DDI_FM_OK) {
5308 			EMLXS_MSGF(EMLXS_CONTEXT,
5309 			    &emlxs_invalid_dma_handle_msg,
5310 			    "sli4_process_mbox_event: hdl=%p",
5311 			    mbox_bp->dma_handle);
5312 
5313 			mb->mbxStatus = MBXERR_DMA_ERROR;
5314 }
5315 #endif
5316 	}
5317 
5318 	/* Now sync the memory buffer if one was used */
5319 	if (mbq->nonembed) {
5320 		mbox_nonembed = (MATCHMAP *)mbq->nonembed;
5321 		size = mbox_nonembed->size;
5322 		EMLXS_MPDATA_SYNC(mbox_nonembed->dma_handle, 0, size,
5323 		    DDI_DMA_SYNC_FORKERNEL);
5324 		iptr = (uint32_t *)((uint8_t *)mbox_nonembed->virt);
5325 		BE_SWAP32_BCOPY((uint8_t *)iptr, (uint8_t *)iptr, size);
5326 
5327 #ifdef FMA_SUPPORT
5328 		if (emlxs_fm_check_dma_handle(hba,
5329 		    mbox_nonembed->dma_handle) != DDI_FM_OK) {
5330 			EMLXS_MSGF(EMLXS_CONTEXT,
5331 			    &emlxs_invalid_dma_handle_msg,
5332 			    "sli4_process_mbox_event: hdl=%p",
5333 			    mbox_nonembed->dma_handle);
5334 
5335 			mb->mbxStatus = MBXERR_DMA_ERROR;
5336 		}
5337 #endif
5338 		emlxs_data_dump(port, "EXT AREA", (uint32_t *)iptr, 24, 0);
5339 	}
5340 
5341 	/* Mailbox has been completely received at this point */
5342 
5343 	if (mb->mbxCommand == MBX_HEARTBEAT) {
5344 		hba->heartbeat_active = 0;
5345 		goto done;
5346 	}
5347 
5348 	if (hba->mbox_queue_flag == MBX_SLEEP) {
5349 		if (mb->mbxCommand != MBX_DOWN_LOAD
5350 		    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
5351 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5352 			    "Received.  %s: status=%x Sleep.",
5353 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
5354 			    mb->mbxStatus);
5355 		}
5356 	} else {
5357 		if (mb->mbxCommand != MBX_DOWN_LOAD
5358 		    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
5359 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5360 			    "Completed. %s: status=%x",
5361 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
5362 			    mb->mbxStatus);
5363 		}
5364 	}
5365 
5366 	/* Filter out passthru mailbox */
5367 	if (mbq->flag & MBQ_PASSTHRU) {
5368 		goto done;
5369 	}
5370 
5371 	if (mb->mbxStatus) {
5372 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5373 		    "%s: status=0x%x", emlxs_mb_cmd_xlate(mb->mbxCommand),
5374 		    (uint32_t)mb->mbxStatus);
5375 	}
5376 
5377 	if (mbq->mbox_cmpl) {
5378 		rc = (mbq->mbox_cmpl)(hba, mbq);
5379 
5380 		/* If mbox was retried, return immediately */
5381 		if (rc) {
5382 			return;
5383 		}
5384 	}
5385 
5386 done:
5387 
5388 	/* Clean up the mailbox area */
5389 	emlxs_mb_fini(hba, (MAILBOX *)mb, mb->mbxStatus);
5390 
5391 	/* Attempt to send pending mailboxes */
5392 	mbq = (MAILBOXQ *)emlxs_mb_get(hba);
5393 	if (mbq) {
5394 		/* Attempt to send pending mailboxes */
5395 		rc =  emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
5396 		if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
5397 			emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
5398 		}
5399 	}
5400 	return;
5401 
5402 } /* emlxs_sli4_process_mbox_event() */
5403 
5404 
5405 /*ARGSUSED*/
5406 static void
emlxs_CQE_to_IOCB(emlxs_hba_t * hba,CQE_CmplWQ_t * cqe,emlxs_buf_t * sbp)5407 emlxs_CQE_to_IOCB(emlxs_hba_t *hba, CQE_CmplWQ_t *cqe, emlxs_buf_t *sbp)
5408 {
5409 #ifdef DEBUG_FASTPATH
5410 	emlxs_port_t *port = &PPORT;
5411 #endif /* DEBUG_FASTPATH */
5412 	IOCBQ *iocbq;
5413 	IOCB *iocb;
5414 	uint32_t *iptr;
5415 	fc_packet_t *pkt;
5416 	emlxs_wqe_t *wqe;
5417 
5418 	iocbq = &sbp->iocbq;
5419 	wqe = &iocbq->wqe;
5420 	iocb = &iocbq->iocb;
5421 
5422 #ifdef DEBUG_FASTPATH
5423 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5424 	    "CQE to IOCB: cmd:%x tag:%x xri:%d", wqe->Command,
5425 	    wqe->RequestTag, wqe->XRITag);
5426 #endif /* DEBUG_FASTPATH */
5427 
5428 	iocb->ULPSTATUS = cqe->Status;
5429 	iocb->un.ulpWord[4] = cqe->Parameter;
5430 	iocb->ULPIOTAG = cqe->RequestTag;
5431 	iocb->ULPCONTEXT = wqe->XRITag;
5432 
5433 	switch (wqe->Command) {
5434 
5435 	case CMD_FCP_ICMND64_CR:
5436 		iocb->ULPCOMMAND = CMD_FCP_ICMND64_CX;
5437 		break;
5438 
5439 	case CMD_FCP_IREAD64_CR:
5440 		iocb->ULPCOMMAND = CMD_FCP_IREAD64_CX;
5441 		iocb->ULPPU = PARM_XFER_CHECK;
5442 		if (iocb->ULPSTATUS ==  IOSTAT_FCP_RSP_ERROR) {
5443 			iocb->un.fcpi64.fcpi_parm =
5444 			    wqe->un.FcpCmd.TotalTransferCount -
5445 			    cqe->CmdSpecific;
5446 		}
5447 		break;
5448 
5449 	case CMD_FCP_IWRITE64_CR:
5450 		iocb->ULPCOMMAND = CMD_FCP_IWRITE64_CX;
5451 		if (iocb->ULPSTATUS ==  IOSTAT_FCP_RSP_ERROR) {
5452 			if (wqe->un.FcpCmd.TotalTransferCount >
5453 			    cqe->CmdSpecific) {
5454 				iocb->un.fcpi64.fcpi_parm =
5455 				    wqe->un.FcpCmd.TotalTransferCount -
5456 				    cqe->CmdSpecific;
5457 			} else {
5458 				iocb->un.fcpi64.fcpi_parm = 0;
5459 			}
5460 		}
5461 		break;
5462 
5463 	case CMD_ELS_REQUEST64_CR:
5464 		iocb->ULPCOMMAND = CMD_ELS_REQUEST64_CX;
5465 		iocb->un.elsreq64.bdl.bdeSize = cqe->CmdSpecific;
5466 		if (iocb->ULPSTATUS == 0) {
5467 			iocb->unsli3.ext_iocb.rsplen = cqe->CmdSpecific;
5468 		}
5469 		if (iocb->ULPSTATUS == IOSTAT_LS_RJT) {
5470 			/* For LS_RJT, the driver populates the rsp buffer */
5471 			pkt = PRIV2PKT(sbp);
5472 			iptr = (uint32_t *)pkt->pkt_resp;
5473 			*iptr++ = ELS_CMD_LS_RJT;
5474 			*iptr = cqe->Parameter;
5475 		}
5476 		break;
5477 
5478 	case CMD_GEN_REQUEST64_CR:
5479 		iocb->ULPCOMMAND = CMD_GEN_REQUEST64_CX;
5480 		iocb->unsli3.ext_iocb.rsplen = cqe->CmdSpecific;
5481 		break;
5482 
5483 	case CMD_XMIT_SEQUENCE64_CR:
5484 		iocb->ULPCOMMAND = CMD_XMIT_SEQUENCE64_CX;
5485 		break;
5486 
5487 	case CMD_ABORT_XRI_CX:
5488 		iocb->ULPCONTEXT = wqe->AbortTag;
5489 		break;
5490 
5491 	case CMD_FCP_TRECEIVE64_CX:
5492 		/* free memory for XRDY */
5493 		if (iocbq->bp) {
5494 			emlxs_mem_buf_free(hba, iocbq->bp);
5495 			iocbq->bp = 0;
5496 		}
5497 
5498 		/*FALLTHROUGH*/
5499 
5500 	case CMD_FCP_TSEND64_CX:
5501 	case CMD_FCP_TRSP64_CX:
5502 	default:
5503 		iocb->ULPCOMMAND = wqe->Command;
5504 
5505 	}
5506 } /* emlxs_CQE_to_IOCB() */
5507 
5508 
5509 /*ARGSUSED*/
5510 static void
emlxs_sli4_hba_flush_chipq(emlxs_hba_t * hba)5511 emlxs_sli4_hba_flush_chipq(emlxs_hba_t *hba)
5512 {
5513 	emlxs_port_t *port = &PPORT;
5514 	CHANNEL *cp;
5515 	emlxs_buf_t *sbp;
5516 	IOCBQ *iocbq;
5517 	uint16_t i;
5518 	uint32_t trigger = 0;
5519 	CQE_CmplWQ_t cqe;
5520 
5521 	mutex_enter(&EMLXS_FCTAB_LOCK);
5522 	for (i = 0; i < hba->max_iotag; i++) {
5523 		sbp = hba->fc_table[i];
5524 		if (sbp == NULL || sbp == STALE_PACKET) {
5525 			continue;
5526 		}
5527 		hba->fc_table[i] = STALE_PACKET;
5528 		hba->io_count--;
5529 		sbp->iotag = 0;
5530 		mutex_exit(&EMLXS_FCTAB_LOCK);
5531 
5532 		cp = sbp->channel;
5533 		bzero(&cqe, sizeof (CQE_CmplWQ_t));
5534 		cqe.RequestTag = i;
5535 		cqe.Status = IOSTAT_LOCAL_REJECT;
5536 		cqe.Parameter = IOERR_SEQUENCE_TIMEOUT;
5537 
5538 		cp->hbaCmplCmd_sbp++;
5539 
5540 #ifdef SFCT_SUPPORT
5541 #ifdef FCT_IO_TRACE
5542 		if (sbp->fct_cmd) {
5543 			emlxs_fct_io_trace(port, sbp->fct_cmd,
5544 			    EMLXS_FCT_IOCB_COMPLETE);
5545 		}
5546 #endif /* FCT_IO_TRACE */
5547 #endif /* SFCT_SUPPORT */
5548 
5549 		if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
5550 			atomic_dec_32(&hba->io_active);
5551 #ifdef NODE_THROTTLE_SUPPORT
5552 			if (sbp->node) {
5553 				atomic_dec_32(&sbp->node->io_active);
5554 			}
5555 #endif /* NODE_THROTTLE_SUPPORT */
5556 		}
5557 
5558 		/* Copy entry to sbp's iocbq */
5559 		iocbq = &sbp->iocbq;
5560 		emlxs_CQE_to_IOCB(hba, &cqe, sbp);
5561 
5562 		iocbq->next = NULL;
5563 
5564 		/* Exchange is no longer busy on-chip, free it */
5565 		emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
5566 
5567 		if (!(sbp->pkt_flags &
5568 		    (PACKET_POLLED | PACKET_ALLOCATED))) {
5569 			/* Add the IOCB to the channel list */
5570 			mutex_enter(&cp->rsp_lock);
5571 			if (cp->rsp_head == NULL) {
5572 				cp->rsp_head = iocbq;
5573 				cp->rsp_tail = iocbq;
5574 			} else {
5575 				cp->rsp_tail->next = iocbq;
5576 				cp->rsp_tail = iocbq;
5577 			}
5578 			mutex_exit(&cp->rsp_lock);
5579 			trigger = 1;
5580 		} else {
5581 			emlxs_proc_channel_event(hba, cp, iocbq);
5582 		}
5583 		mutex_enter(&EMLXS_FCTAB_LOCK);
5584 	}
5585 	mutex_exit(&EMLXS_FCTAB_LOCK);
5586 
5587 	if (trigger) {
5588 		for (i = 0; i < hba->chan_count; i++) {
5589 			cp = &hba->chan[i];
5590 			if (cp->rsp_head != NULL) {
5591 				emlxs_thread_trigger2(&cp->intr_thread,
5592 				    emlxs_proc_channel, cp);
5593 			}
5594 		}
5595 	}
5596 
5597 } /* emlxs_sli4_hba_flush_chipq() */
5598 
5599 
5600 /*ARGSUSED*/
5601 static void
emlxs_sli4_process_oor_wqe_cmpl(emlxs_hba_t * hba,CQ_DESC_t * cq,CQE_CmplWQ_t * cqe)5602 emlxs_sli4_process_oor_wqe_cmpl(emlxs_hba_t *hba,
5603     CQ_DESC_t *cq, CQE_CmplWQ_t *cqe)
5604 {
5605 	emlxs_port_t *port = &PPORT;
5606 	CHANNEL *cp;
5607 	uint16_t request_tag;
5608 
5609 	request_tag = cqe->RequestTag;
5610 
5611 	/* 1 to 1 mapping between CQ and channel */
5612 	cp = cq->channelp;
5613 
5614 	cp->hbaCmplCmd++;
5615 
5616 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5617 	    "CQ ENTRY: OOR Cmpl: iotag=%d", request_tag);
5618 
5619 	emlxs_data_dump(port, "CQE", (uint32_t *)cqe, 4, 0);
5620 
5621 } /* emlxs_sli4_process_oor_wqe_cmpl() */
5622 
5623 
5624 /*ARGSUSED*/
5625 static void
emlxs_sli4_process_wqe_cmpl(emlxs_hba_t * hba,CQ_DESC_t * cq,CQE_CmplWQ_t * cqe)5626 emlxs_sli4_process_wqe_cmpl(emlxs_hba_t *hba, CQ_DESC_t *cq, CQE_CmplWQ_t *cqe)
5627 {
5628 	emlxs_port_t *port = &PPORT;
5629 	CHANNEL *cp;
5630 	emlxs_buf_t *sbp;
5631 	IOCBQ *iocbq;
5632 	uint16_t request_tag;
5633 #ifdef SFCT_SUPPORT
5634 #ifdef FCT_IO_TRACE
5635 	fct_cmd_t *fct_cmd;
5636 	emlxs_buf_t *cmd_sbp;
5637 #endif /* FCT_IO_TRACE */
5638 #endif /* SFCT_SUPPORT */
5639 
5640 	request_tag = cqe->RequestTag;
5641 
5642 	/* 1 to 1 mapping between CQ and channel */
5643 	cp = cq->channelp;
5644 
5645 	mutex_enter(&EMLXS_FCTAB_LOCK);
5646 	sbp = hba->fc_table[request_tag];
5647 
5648 	if (!sbp) {
5649 		cp->hbaCmplCmd++;
5650 		mutex_exit(&EMLXS_FCTAB_LOCK);
5651 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5652 		    "CQ ENTRY: NULL sbp. iotag=%d. Dropping...",
5653 		    request_tag);
5654 		return;
5655 	}
5656 
5657 	if (sbp == STALE_PACKET) {
5658 		cp->hbaCmplCmd_sbp++;
5659 		mutex_exit(&EMLXS_FCTAB_LOCK);
5660 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5661 		    "CQ ENTRY: Stale sbp. iotag=%d. Dropping...", request_tag);
5662 		return;
5663 	}
5664 
5665 	if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
5666 		atomic_add_32(&hba->io_active, -1);
5667 #ifdef NODE_THROTTLE_SUPPORT
5668 		if (sbp->node) {
5669 			atomic_add_32(&sbp->node->io_active, -1);
5670 		}
5671 #endif /* NODE_THROTTLE_SUPPORT */
5672 	}
5673 
5674 	if (!(sbp->xrip)) {
5675 		cp->hbaCmplCmd++;
5676 		mutex_exit(&EMLXS_FCTAB_LOCK);
5677 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5678 		    "CQ ENTRY: NULL sbp xrip %p. iotag=%d. Dropping...",
5679 		    sbp, request_tag);
5680 		return;
5681 	}
5682 
5683 #ifdef DEBUG_FASTPATH
5684 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5685 	    "CQ ENTRY: process wqe compl");
5686 #endif /* DEBUG_FASTPATH */
5687 	cp->hbaCmplCmd_sbp++;
5688 
5689 	/* Copy entry to sbp's iocbq */
5690 	iocbq = &sbp->iocbq;
5691 	emlxs_CQE_to_IOCB(hba, cqe, sbp);
5692 
5693 	iocbq->next = NULL;
5694 
5695 	if (cqe->XB) {
5696 		/* Mark exchange as ABORT in progress */
5697 		sbp->xrip->flag &= ~EMLXS_XRI_PENDING_IO;
5698 		sbp->xrip->flag |= EMLXS_XRI_BUSY;
5699 
5700 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5701 		    "CQ ENTRY: XRI BUSY: iotag=%d xri=%d", request_tag,
5702 		    sbp->xrip->XRI);
5703 
5704 		emlxs_sli4_free_xri(port, sbp, 0, 0);
5705 	} else {
5706 		/* Exchange is no longer busy on-chip, free it */
5707 		emlxs_sli4_free_xri(port, sbp, sbp->xrip, 0);
5708 	}
5709 
5710 	mutex_exit(&EMLXS_FCTAB_LOCK);
5711 
5712 #ifdef SFCT_SUPPORT
5713 #ifdef FCT_IO_TRACE
5714 	fct_cmd = sbp->fct_cmd;
5715 	if (fct_cmd) {
5716 		cmd_sbp = (emlxs_buf_t *)fct_cmd->cmd_fca_private;
5717 		mutex_enter(&cmd_sbp->fct_mtx);
5718 		EMLXS_FCT_STATE_CHG(fct_cmd, cmd_sbp, EMLXS_FCT_IOCB_COMPLETE);
5719 		mutex_exit(&cmd_sbp->fct_mtx);
5720 	}
5721 #endif /* FCT_IO_TRACE */
5722 #endif /* SFCT_SUPPORT */
5723 
5724 	/*
5725 	 * If this is NOT a polled command completion
5726 	 * or a driver allocated pkt, then defer pkt
5727 	 * completion.
5728 	 */
5729 	if (!(sbp->pkt_flags &
5730 	    (PACKET_POLLED | PACKET_ALLOCATED))) {
5731 		/* Add the IOCB to the channel list */
5732 		mutex_enter(&cp->rsp_lock);
5733 		if (cp->rsp_head == NULL) {
5734 			cp->rsp_head = iocbq;
5735 			cp->rsp_tail = iocbq;
5736 		} else {
5737 			cp->rsp_tail->next = iocbq;
5738 			cp->rsp_tail = iocbq;
5739 		}
5740 		mutex_exit(&cp->rsp_lock);
5741 
5742 		/* Delay triggering thread till end of ISR */
5743 		cp->chan_flag |= EMLXS_NEEDS_TRIGGER;
5744 	} else {
5745 		emlxs_proc_channel_event(hba, cp, iocbq);
5746 	}
5747 
5748 } /* emlxs_sli4_process_wqe_cmpl() */
5749 
5750 
5751 /*ARGSUSED*/
5752 static void
emlxs_sli4_process_release_wqe(emlxs_hba_t * hba,CQ_DESC_t * cq,CQE_RelWQ_t * cqe)5753 emlxs_sli4_process_release_wqe(emlxs_hba_t *hba, CQ_DESC_t *cq,
5754     CQE_RelWQ_t *cqe)
5755 {
5756 	emlxs_port_t *port = &PPORT;
5757 	WQ_DESC_t *wq;
5758 	CHANNEL *cp;
5759 	uint32_t i;
5760 	uint16_t wqi;
5761 
5762 	wqi = emlxs_sli4_wqid_to_index(hba, (uint16_t)cqe->WQid);
5763 
5764 	/* Verify WQ index */
5765 	if (wqi == 0xffff) {
5766 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5767 		    "CQ ENTRY: Invalid WQid:%d. Dropping...",
5768 		    cqe->WQid);
5769 		return;
5770 	}
5771 
5772 	wq = &hba->sli.sli4.wq[wqi];
5773 
5774 #ifdef DEBUG_FASTPATH
5775 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5776 	    "CQ ENTRY: process release wqe: old %d new %d", wq->port_index,
5777 	    cqe->WQindex);
5778 #endif /* DEBUG_FASTPATH */
5779 
5780 	wq->port_index = cqe->WQindex;
5781 
5782 	/* Cmd ring may be available. Try sending more iocbs */
5783 	for (i = 0; i < hba->chan_count; i++) {
5784 		cp = &hba->chan[i];
5785 		if (wq == (WQ_DESC_t *)cp->iopath) {
5786 			emlxs_sli4_issue_iocb_cmd(hba, cp, 0);
5787 		}
5788 	}
5789 
5790 } /* emlxs_sli4_process_release_wqe() */
5791 
5792 
5793 /*ARGSUSED*/
5794 emlxs_iocbq_t *
emlxs_sli4_rxq_get(emlxs_hba_t * hba,fc_frame_hdr_t * fchdr)5795 emlxs_sli4_rxq_get(emlxs_hba_t *hba, fc_frame_hdr_t *fchdr)
5796 {
5797 	emlxs_queue_t *q;
5798 	emlxs_iocbq_t *iocbq;
5799 	emlxs_iocbq_t *prev;
5800 	fc_frame_hdr_t *fchdr2;
5801 	RXQ_DESC_t *rxq;
5802 
5803 	switch (fchdr->type) {
5804 	case 1: /* ELS */
5805 		rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_ELS];
5806 		break;
5807 	case 0x20: /* CT */
5808 		rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_CT];
5809 		break;
5810 	default:
5811 		return (NULL);
5812 	}
5813 
5814 	mutex_enter(&rxq->lock);
5815 
5816 	q = &rxq->active;
5817 	iocbq  = (emlxs_iocbq_t *)q->q_first;
5818 	prev = NULL;
5819 
5820 	while (iocbq) {
5821 
5822 		fchdr2 = (fc_frame_hdr_t *)iocbq->iocb.un.ulpWord;
5823 
5824 		if ((fchdr2->s_id == fchdr->s_id) &&
5825 		    (fchdr2->ox_id == fchdr->ox_id) &&
5826 		    (fchdr2->seq_id == fchdr->seq_id)) {
5827 			/* Remove iocbq */
5828 			if (prev) {
5829 				prev->next = iocbq->next;
5830 			}
5831 			if (q->q_first == (uint8_t *)iocbq) {
5832 				q->q_first = (uint8_t *)iocbq->next;
5833 			}
5834 			if (q->q_last == (uint8_t *)iocbq) {
5835 				q->q_last = (uint8_t *)prev;
5836 			}
5837 			q->q_cnt--;
5838 
5839 			break;
5840 		}
5841 
5842 		prev  = iocbq;
5843 		iocbq = iocbq->next;
5844 	}
5845 
5846 	mutex_exit(&rxq->lock);
5847 
5848 	return (iocbq);
5849 
5850 } /* emlxs_sli4_rxq_get() */
5851 
5852 
5853 /*ARGSUSED*/
5854 void
emlxs_sli4_rxq_put(emlxs_hba_t * hba,emlxs_iocbq_t * iocbq)5855 emlxs_sli4_rxq_put(emlxs_hba_t *hba, emlxs_iocbq_t *iocbq)
5856 {
5857 	emlxs_queue_t *q;
5858 	fc_frame_hdr_t *fchdr;
5859 	RXQ_DESC_t *rxq;
5860 
5861 	fchdr = (fc_frame_hdr_t *)iocbq->iocb.RXFCHDR;
5862 
5863 	switch (fchdr->type) {
5864 	case 1: /* ELS */
5865 		rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_ELS];
5866 		break;
5867 	case 0x20: /* CT */
5868 		rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_CT];
5869 		break;
5870 	default:
5871 		return;
5872 	}
5873 
5874 	mutex_enter(&rxq->lock);
5875 
5876 	q = &rxq->active;
5877 
5878 	if (q->q_last) {
5879 		((emlxs_iocbq_t *)q->q_last)->next = iocbq;
5880 		q->q_cnt++;
5881 	} else {
5882 		q->q_first = (uint8_t *)iocbq;
5883 		q->q_cnt = 1;
5884 	}
5885 
5886 	q->q_last = (uint8_t *)iocbq;
5887 	iocbq->next = NULL;
5888 
5889 	mutex_exit(&rxq->lock);
5890 
5891 	return;
5892 
5893 } /* emlxs_sli4_rxq_put() */
5894 
5895 
5896 static void
emlxs_sli4_rq_post(emlxs_port_t * port,uint16_t rqid)5897 emlxs_sli4_rq_post(emlxs_port_t *port, uint16_t rqid)
5898 {
5899 	emlxs_hba_t *hba = HBA;
5900 	emlxs_rqdbu_t rqdb;
5901 
5902 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5903 	    "RQ POST: rqid=%d count=1", rqid);
5904 
5905 	/* Ring the RQ doorbell once to repost the RQ buffer */
5906 	rqdb.word = 0;
5907 	rqdb.db.Qid = rqid;
5908 	rqdb.db.NumPosted = 1;
5909 
5910 	emlxs_sli4_write_rqdb(hba, rqdb.word);
5911 
5912 } /* emlxs_sli4_rq_post() */
5913 
5914 
5915 /*ARGSUSED*/
5916 static void
emlxs_sli4_process_unsol_rcv(emlxs_hba_t * hba,CQ_DESC_t * cq,CQE_UnsolRcv_t * cqe)5917 emlxs_sli4_process_unsol_rcv(emlxs_hba_t *hba, CQ_DESC_t *cq,
5918     CQE_UnsolRcv_t *cqe)
5919 {
5920 	emlxs_port_t *port = &PPORT;
5921 	emlxs_port_t *vport;
5922 	RQ_DESC_t *hdr_rq;
5923 	RQ_DESC_t *data_rq;
5924 	MBUF_INFO *hdr_mp;
5925 	MBUF_INFO *data_mp;
5926 	MATCHMAP *seq_mp;
5927 	uint32_t *data;
5928 	fc_frame_hdr_t fchdr;
5929 	uint16_t hdr_rqi;
5930 	uint32_t host_index;
5931 	emlxs_iocbq_t *iocbq = NULL;
5932 	emlxs_iocb_t *iocb;
5933 	emlxs_node_t *node = NULL;
5934 	uint32_t i;
5935 	uint32_t seq_len;
5936 	uint32_t seq_cnt;
5937 	uint32_t buf_type;
5938 	char label[32];
5939 	emlxs_wqe_t *wqe;
5940 	CHANNEL *cp;
5941 	XRIobj_t *xrip;
5942 	RPIobj_t *rpip = NULL;
5943 	uint32_t	cmd;
5944 	uint32_t posted = 0;
5945 	uint32_t abort = 1;
5946 	off_t offset;
5947 	uint32_t status;
5948 	uint32_t data_size;
5949 	uint16_t rqid;
5950 	uint32_t hdr_size;
5951 	fc_packet_t *pkt;
5952 	emlxs_buf_t *sbp;
5953 
5954 	if (cqe->Code == CQE_TYPE_UNSOL_RCV_V1) {
5955 		CQE_UnsolRcvV1_t *cqeV1 = (CQE_UnsolRcvV1_t *)cqe;
5956 
5957 		status	  = cqeV1->Status;
5958 		data_size = cqeV1->data_size;
5959 		rqid	  = cqeV1->RQid;
5960 		hdr_size  = cqeV1->hdr_size;
5961 	} else {
5962 		status	  = cqe->Status;
5963 		data_size = cqe->data_size;
5964 		rqid	  = cqe->RQid;
5965 		hdr_size  = cqe->hdr_size;
5966 	}
5967 
5968 	/* Validate the CQE */
5969 
5970 	/* Check status */
5971 	switch (status) {
5972 	case RQ_STATUS_SUCCESS: /* 0x10 */
5973 		break;
5974 
5975 	case RQ_STATUS_BUFLEN_EXCEEDED:  /* 0x11 */
5976 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5977 		    "CQ ENTRY: Unsol Rcv: Payload truncated.");
5978 		break;
5979 
5980 	case RQ_STATUS_NEED_BUFFER: /* 0x12 */
5981 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5982 		    "CQ ENTRY: Unsol Rcv: Payload buffer needed.");
5983 		return;
5984 
5985 	case RQ_STATUS_FRAME_DISCARDED:  /* 0x13 */
5986 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5987 		    "CQ ENTRY: Unsol Rcv: Payload buffer discarded.");
5988 		return;
5989 
5990 	default:
5991 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5992 		    "CQ ENTRY: Unsol Rcv: Unknown status=%x.",
5993 		    status);
5994 		break;
5995 	}
5996 
5997 	/* Make sure there is a frame header */
5998 	if (hdr_size < sizeof (fc_frame_hdr_t)) {
5999 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6000 		    "CQ ENTRY: Unsol Rcv: FC header too small. Dropping...");
6001 		return;
6002 	}
6003 
6004 	hdr_rqi = emlxs_sli4_rqid_to_index(hba, rqid);
6005 
6006 	/* Verify RQ index */
6007 	if (hdr_rqi == 0xffff) {
6008 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6009 		    "CQ ENTRY: Unsol Rcv: Invalid RQID:%d. Dropping...",
6010 		    rqid);
6011 		return;
6012 	}
6013 
6014 	hdr_rq  = &hba->sli.sli4.rq[hdr_rqi];
6015 	data_rq = &hba->sli.sli4.rq[hdr_rqi + 1];
6016 
6017 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6018 	    "CQ ENTRY: Unsol Rcv:%x rqid=%d,%d index=%d status=%x "
6019 	    "hdr_size=%d data_size=%d",
6020 	    cqe->Code, rqid, hdr_rqi, hdr_rq->host_index, status, hdr_size,
6021 	    data_size);
6022 
6023 	hdr_rq->num_proc++;
6024 
6025 	/* Update host index */
6026 	mutex_enter(&hba->sli.sli4.rq[hdr_rqi].lock);
6027 	host_index = hdr_rq->host_index;
6028 	hdr_rq->host_index++;
6029 
6030 	if (hdr_rq->host_index >= hdr_rq->max_index) {
6031 		hdr_rq->host_index = 0;
6032 	}
6033 	data_rq->host_index = hdr_rq->host_index;
6034 	mutex_exit(&hba->sli.sli4.rq[hdr_rqi].lock);
6035 
6036 	/* Get the next header rqb */
6037 	hdr_mp  = &hdr_rq->rqb[host_index];
6038 
6039 	offset = (off_t)((uint64_t)((unsigned long)hdr_mp->virt) -
6040 	    (uint64_t)((unsigned long)hba->sli.sli4.slim2.virt));
6041 
6042 	EMLXS_MPDATA_SYNC(hdr_mp->dma_handle, offset,
6043 	    sizeof (fc_frame_hdr_t), DDI_DMA_SYNC_FORKERNEL);
6044 
6045 	LE_SWAP32_BCOPY(hdr_mp->virt, (uint8_t *)&fchdr,
6046 	    sizeof (fc_frame_hdr_t));
6047 
6048 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6049 	    "RQ HDR[%d]: rctl:%x type:%x "
6050 	    "sid:%x did:%x oxid:%x rxid:%x",
6051 	    host_index, fchdr.r_ctl, fchdr.type,
6052 	    fchdr.s_id,  fchdr.d_id, fchdr.ox_id, fchdr.rx_id);
6053 
6054 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6055 	    "RQ HDR[%d]: fctl:%x seq_id:%x seq_cnt:%x df_ctl:%x ro:%x",
6056 	    host_index, fchdr.f_ctl, fchdr.seq_id, fchdr.seq_cnt,
6057 	    fchdr.df_ctl, fchdr.ro);
6058 
6059 	/* Verify fc header type */
6060 	switch (fchdr.type) {
6061 	case 0: /* BLS */
6062 		if (fchdr.r_ctl != 0x81) {
6063 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6064 			    "RQ ENTRY: Unexpected FC rctl (0x%x) "
6065 			    "received. Dropping...",
6066 			    fchdr.r_ctl);
6067 
6068 			goto done;
6069 		}
6070 
6071 		/* Make sure there is no payload */
6072 		if (data_size != 0) {
6073 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6074 			    "RQ ENTRY: ABTS payload provided. Dropping...");
6075 
6076 			goto done;
6077 		}
6078 
6079 		buf_type = 0xFFFFFFFF;
6080 		(void) strlcpy(label, "ABTS", sizeof (label));
6081 		cp = &hba->chan[hba->channel_els];
6082 		break;
6083 
6084 	case 0x01: /* ELS */
6085 		/* Make sure there is a payload */
6086 		if (data_size == 0) {
6087 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6088 			    "RQ ENTRY: Unsol Rcv: No ELS payload provided. "
6089 			    "Dropping...");
6090 
6091 			goto done;
6092 		}
6093 
6094 		buf_type = MEM_ELSBUF;
6095 		(void) strlcpy(label, "Unsol ELS", sizeof (label));
6096 		cp = &hba->chan[hba->channel_els];
6097 		break;
6098 
6099 	case 0x20: /* CT */
6100 		/* Make sure there is a payload */
6101 		if (data_size == 0) {
6102 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6103 			    "RQ ENTRY: Unsol Rcv: No CT payload provided. "
6104 			    "Dropping...");
6105 
6106 			goto done;
6107 		}
6108 
6109 		buf_type = MEM_CTBUF;
6110 		(void) strlcpy(label, "Unsol CT", sizeof (label));
6111 		cp = &hba->chan[hba->channel_ct];
6112 		break;
6113 
6114 	case 0x08: /* FCT */
6115 		/* Make sure there is a payload */
6116 		if (data_size == 0) {
6117 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6118 			    "RQ ENTRY: Unsol Rcv: No FCP payload provided. "
6119 			    "Dropping...");
6120 
6121 			goto done;
6122 		}
6123 
6124 		buf_type = MEM_FCTBUF;
6125 		(void) strlcpy(label, "Unsol FCT", sizeof (label));
6126 		cp = &hba->chan[hba->CHANNEL_FCT];
6127 		break;
6128 
6129 	default:
6130 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6131 		    "RQ ENTRY: Unexpected FC type (0x%x) received. Dropping...",
6132 		    fchdr.type);
6133 
6134 		goto done;
6135 	}
6136 	/* Fc Header is valid */
6137 
6138 	/* Check if this is an active sequence */
6139 	iocbq = emlxs_sli4_rxq_get(hba, &fchdr);
6140 
6141 	if (!iocbq) {
6142 		if (fchdr.type != 0) {
6143 			if (!(fchdr.f_ctl & F_CTL_FIRST_SEQ)) {
6144 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6145 				    "RQ ENTRY: %s: First of sequence not"
6146 				    " set.  Dropping...",
6147 				    label);
6148 
6149 				goto done;
6150 			}
6151 		}
6152 
6153 		if ((fchdr.type != 0) && (fchdr.seq_cnt != 0)) {
6154 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6155 			    "RQ ENTRY: %s: Sequence count not zero (%d).  "
6156 			    "Dropping...",
6157 			    label, fchdr.seq_cnt);
6158 
6159 			goto done;
6160 		}
6161 
6162 		/* Find vport */
6163 		for (i = 0; i < MAX_VPORTS; i++) {
6164 			vport = &VPORT(i);
6165 
6166 			if (vport->did == fchdr.d_id) {
6167 				port = vport;
6168 				break;
6169 			}
6170 		}
6171 
6172 		if (i == MAX_VPORTS) {
6173 			/* Allow unsol FLOGI & PLOGI for P2P */
6174 			if ((fchdr.type != 1 /* ELS*/) ||
6175 			    ((fchdr.d_id != FABRIC_DID) &&
6176 			    !(hba->flag & FC_PT_TO_PT))) {
6177 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6178 				    "RQ ENTRY: %s: Invalid did=%x. Dropping...",
6179 				    label, fchdr.d_id);
6180 
6181 				goto done;
6182 			}
6183 		}
6184 
6185 		/* Allocate an IOCBQ */
6186 		iocbq = (emlxs_iocbq_t *)emlxs_mem_get(hba, MEM_IOCB);
6187 
6188 		if (!iocbq) {
6189 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6190 			    "RQ ENTRY: %s: Out of IOCB "
6191 			    "resources.  Dropping...",
6192 			    label);
6193 
6194 			goto done;
6195 		}
6196 
6197 		seq_mp = NULL;
6198 		if (fchdr.type != 0) {
6199 			/* Allocate a buffer */
6200 			seq_mp = (MATCHMAP *)emlxs_mem_get(hba, buf_type);
6201 
6202 			if (!seq_mp) {
6203 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6204 				    "RQ ENTRY: %s: Out of buffer "
6205 				    "resources.  Dropping...",
6206 				    label);
6207 
6208 				goto done;
6209 			}
6210 
6211 			iocbq->bp = (uint8_t *)seq_mp;
6212 		}
6213 
6214 		node = (void *)emlxs_node_find_did(port, fchdr.s_id, 1);
6215 		if (node == NULL) {
6216 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6217 			    "RQ ENTRY: %s: Node not found. sid=%x",
6218 			    label, fchdr.s_id);
6219 		}
6220 
6221 		/* Initialize the iocbq */
6222 		iocbq->port = port;
6223 		iocbq->channel = cp;
6224 		iocbq->node = node;
6225 
6226 		iocb = &iocbq->iocb;
6227 		iocb->RXSEQCNT = 0;
6228 		iocb->RXSEQLEN = 0;
6229 
6230 		seq_len = 0;
6231 		seq_cnt = 0;
6232 
6233 	} else {
6234 
6235 		iocb = &iocbq->iocb;
6236 		port = iocbq->port;
6237 		node = (emlxs_node_t *)iocbq->node;
6238 
6239 		seq_mp = (MATCHMAP *)iocbq->bp;
6240 		seq_len = iocb->RXSEQLEN;
6241 		seq_cnt = iocb->RXSEQCNT;
6242 
6243 		/* Check sequence order */
6244 		if (fchdr.seq_cnt != seq_cnt) {
6245 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6246 			    "RQ ENTRY: %s: Out of order frame received "
6247 			    "(%d != %d).  Dropping...",
6248 			    label, fchdr.seq_cnt, seq_cnt);
6249 
6250 			goto done;
6251 		}
6252 	}
6253 
6254 	/* We now have an iocbq */
6255 
6256 	if (!port->vpip->vfip) {
6257 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6258 		    "RQ ENTRY: %s: No fabric connection. "
6259 		    "Dropping...",
6260 		    label);
6261 
6262 		goto done;
6263 	}
6264 
6265 	/* Save the frame data to our seq buffer */
6266 	if (data_size && seq_mp) {
6267 		/* Get the next data rqb */
6268 		data_mp = &data_rq->rqb[host_index];
6269 
6270 		offset = (off_t)((uint64_t)((unsigned long)
6271 		    data_mp->virt) -
6272 		    (uint64_t)((unsigned long)
6273 		    hba->sli.sli4.slim2.virt));
6274 
6275 		EMLXS_MPDATA_SYNC(data_mp->dma_handle, offset,
6276 		    data_size, DDI_DMA_SYNC_FORKERNEL);
6277 
6278 		data = (uint32_t *)data_mp->virt;
6279 
6280 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6281 		    "RQ DAT[%d]: %08x %08x %08x %08x %08x %08x ...",
6282 		    host_index, data[0], data[1], data[2], data[3],
6283 		    data[4], data[5]);
6284 
6285 		/* Check sequence length */
6286 		if ((seq_len + data_size) > seq_mp->size) {
6287 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6288 			    "RQ ENTRY: %s: Sequence buffer overflow. "
6289 			    "(%d > %d). Dropping...",
6290 			    label, (seq_len + data_size), seq_mp->size);
6291 
6292 			goto done;
6293 		}
6294 
6295 		/* Copy data to local receive buffer */
6296 		bcopy((uint8_t *)data, ((uint8_t *)seq_mp->virt +
6297 		    seq_len), data_size);
6298 
6299 		seq_len += data_size;
6300 	}
6301 
6302 	/* If this is not the last frame of sequence, queue it. */
6303 	if (!(fchdr.f_ctl & F_CTL_END_SEQ)) {
6304 		/* Save sequence header */
6305 		if (seq_cnt == 0) {
6306 			bcopy((uint8_t *)&fchdr, (uint8_t *)iocb->RXFCHDR,
6307 			    sizeof (fc_frame_hdr_t));
6308 		}
6309 
6310 		/* Update sequence info in iocb */
6311 		iocb->RXSEQCNT = seq_cnt + 1;
6312 		iocb->RXSEQLEN = seq_len;
6313 
6314 		/* Queue iocbq for next frame */
6315 		emlxs_sli4_rxq_put(hba, iocbq);
6316 
6317 		/* Don't free resources */
6318 		iocbq = NULL;
6319 
6320 		/* No need to abort */
6321 		abort = 0;
6322 
6323 		goto done;
6324 	}
6325 
6326 	emlxs_sli4_rq_post(port, hdr_rq->qid);
6327 	posted = 1;
6328 
6329 	/* End of sequence found. Process request now. */
6330 
6331 	if (seq_cnt > 0) {
6332 		/* Retrieve first frame of sequence */
6333 		bcopy((uint8_t *)iocb->RXFCHDR, (uint8_t *)&fchdr,
6334 		    sizeof (fc_frame_hdr_t));
6335 
6336 		bzero((uint8_t *)iocb, sizeof (emlxs_iocb_t));
6337 	}
6338 
6339 	/* Build rcv iocb and process it */
6340 	switch (fchdr.type) {
6341 	case 0: /* BLS */
6342 
6343 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6344 		    "RQ ENTRY: %s: oxid:%x rxid %x sid:%x. Sending BLS ACC...",
6345 		    label, fchdr.ox_id, fchdr.rx_id, fchdr.s_id);
6346 
6347 		/* Try to send abort response */
6348 		if (!(pkt = emlxs_pkt_alloc(port, 0, 0, 0, KM_NOSLEEP))) {
6349 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6350 			    "RQ ENTRY: %s: Unable to alloc pkt. Dropping...",
6351 			    label);
6352 			goto done;
6353 		}
6354 
6355 		/* Setup sbp / iocb for driver initiated cmd */
6356 		sbp = PKT2PRIV(pkt);
6357 
6358 		/* Free the temporary iocbq */
6359 		emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq);
6360 
6361 		iocbq = (emlxs_iocbq_t *)&sbp->iocbq;
6362 		iocbq->port = port;
6363 		iocbq->channel = cp;
6364 		iocbq->node = node;
6365 
6366 		sbp->pkt_flags &= ~PACKET_ULP_OWNED;
6367 
6368 		if (node) {
6369 			sbp->node = node;
6370 			sbp->did  = node->nlp_DID;
6371 		}
6372 
6373 		iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
6374 
6375 		/* BLS ACC Response */
6376 		wqe = &iocbq->wqe;
6377 		bzero((void *)wqe, sizeof (emlxs_wqe_t));
6378 
6379 		iocbq->iocb.ULPCOMMAND = CMD_XMIT_BLS_RSP64_CX;
6380 		wqe->Command = CMD_XMIT_BLS_RSP64_CX;
6381 		wqe->CmdType = WQE_TYPE_GEN;
6382 
6383 		wqe->un.BlsRsp.Payload0 = 0x80;
6384 		wqe->un.BlsRsp.Payload1 = fchdr.seq_id;
6385 
6386 		wqe->un.BlsRsp.OXId = fchdr.ox_id;
6387 		wqe->un.BlsRsp.RXId = fchdr.rx_id;
6388 
6389 		wqe->un.BlsRsp.SeqCntLow = 0;
6390 		wqe->un.BlsRsp.SeqCntHigh = 0xFFFF;
6391 
6392 		wqe->un.BlsRsp.XO = ((fchdr.f_ctl & F_CTL_XCHG_CONTEXT)? 1:0);
6393 		wqe->un.BlsRsp.AR = 0;
6394 
6395 		rpip = EMLXS_NODE_TO_RPI(port, node);
6396 
6397 		if (rpip) {
6398 			wqe->ContextType = WQE_RPI_CONTEXT;
6399 			wqe->ContextTag = rpip->RPI;
6400 		} else {
6401 			wqe->ContextType = WQE_VPI_CONTEXT;
6402 			wqe->ContextTag = port->vpip->VPI;
6403 
6404 			rpip = emlxs_rpi_reserve_notify(port, fchdr.s_id, 0);
6405 
6406 			if (!rpip) {
6407 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6408 				    "RQ ENTRY: %s: Unable to alloc "
6409 				    "reserved RPI. Dropping...",
6410 				    label);
6411 
6412 				goto done;
6413 			}
6414 
6415 			/* Store the reserved rpi */
6416 			wqe->CmdSpecific = rpip->RPI;
6417 
6418 			wqe->un.BlsRsp.RemoteId = fchdr.s_id;
6419 			wqe->un.BlsRsp.LocalId = fchdr.d_id;
6420 		}
6421 
6422 		if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
6423 			wqe->CCPE = 1;
6424 			wqe->CCP = fchdr.rsvd;
6425 		}
6426 
6427 		/* Allocate an exchange for this command */
6428 		xrip = emlxs_sli4_alloc_xri(port, sbp, rpip,
6429 		    EMLXS_XRI_SOL_BLS_TYPE);
6430 
6431 		if (!xrip) {
6432 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6433 			    "RQ ENTRY: %s: Unable to alloc XRI. Dropping...",
6434 			    label);
6435 			goto done;
6436 		}
6437 
6438 		wqe->XRITag = xrip->XRI;
6439 		wqe->Class = CLASS3;
6440 		wqe->RequestTag = xrip->iotag;
6441 		wqe->CQId = (uint16_t)0xffff;  /* default CQ for response */
6442 
6443 		sbp->ticks = hba->timer_tics + 30;
6444 
6445 		emlxs_sli4_issue_iocb_cmd(hba, iocbq->channel, iocbq);
6446 
6447 		/* The temporary iocbq has been freed already */
6448 		iocbq = NULL;
6449 
6450 		break;
6451 
6452 	case 1: /* ELS */
6453 		cmd = *((uint32_t *)seq_mp->virt);
6454 		cmd &= ELS_CMD_MASK;
6455 
6456 		if (!(port->vpip->flag & EMLXS_VPI_PORT_ENABLED)) {
6457 			uint32_t dropit = 1;
6458 
6459 			/* Allow for P2P handshaking */
6460 			switch (cmd) {
6461 			case ELS_CMD_FLOGI:
6462 				dropit = 0;
6463 				break;
6464 
6465 			case ELS_CMD_PLOGI:
6466 			case ELS_CMD_PRLI:
6467 				if (hba->flag & FC_PT_TO_PT) {
6468 					dropit = 0;
6469 				}
6470 				break;
6471 			}
6472 
6473 			if (dropit) {
6474 				EMLXS_MSGF(EMLXS_CONTEXT,
6475 				    &emlxs_sli_detail_msg,
6476 				    "RQ ENTRY: %s: Port not yet enabled. "
6477 				    "Dropping...",
6478 				    label);
6479 				goto done;
6480 			}
6481 		}
6482 
6483 		rpip = NULL;
6484 
6485 		if (cmd != ELS_CMD_LOGO) {
6486 			rpip = EMLXS_NODE_TO_RPI(port, node);
6487 		}
6488 
6489 		if (!rpip) {
6490 			/* Use the fabric rpi */
6491 			rpip = port->vpip->fabric_rpip;
6492 		}
6493 
6494 		xrip = emlxs_sli4_reserve_xri(port, rpip,
6495 		    EMLXS_XRI_UNSOL_ELS_TYPE, fchdr.ox_id);
6496 
6497 		if (!xrip) {
6498 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6499 			    "RQ ENTRY: %s: Out of exchange "
6500 			    "resources.  Dropping...",
6501 			    label);
6502 
6503 			goto done;
6504 		}
6505 
6506 		/* Build CMD_RCV_ELS64_CX */
6507 		iocb->un.rcvels64.elsReq.tus.f.bdeFlags = 0;
6508 		iocb->un.rcvels64.elsReq.tus.f.bdeSize  = seq_len;
6509 		iocb->un.rcvels64.elsReq.addrLow  = PADDR_LO(seq_mp->phys);
6510 		iocb->un.rcvels64.elsReq.addrHigh = PADDR_HI(seq_mp->phys);
6511 		iocb->ULPBDECOUNT = 1;
6512 
6513 		iocb->un.rcvels64.remoteID = fchdr.s_id;
6514 		iocb->un.rcvels64.parmRo = fchdr.d_id;
6515 
6516 		iocb->ULPPU = 0x3;
6517 		iocb->ULPCONTEXT = xrip->XRI;
6518 		iocb->ULPIOTAG = ((node)? node->nlp_Rpi:0);
6519 		iocb->ULPCLASS = CLASS3;
6520 		iocb->ULPCOMMAND = CMD_RCV_ELS64_CX;
6521 
6522 		iocb->unsli3.ext_rcv.seq_len = seq_len;
6523 		iocb->unsli3.ext_rcv.vpi = port->vpip->VPI;
6524 		iocb->unsli3.ext_rcv.oxid = fchdr.ox_id;
6525 
6526 		if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
6527 			iocb->unsli3.ext_rcv.ccpe = 1;
6528 			iocb->unsli3.ext_rcv.ccp = fchdr.rsvd;
6529 		}
6530 
6531 		if (port->mode == MODE_INITIATOR) {
6532 			(void) emlxs_els_handle_unsol_req(port, iocbq->channel,
6533 			    iocbq, seq_mp, seq_len);
6534 		}
6535 #ifdef SFCT_SUPPORT
6536 		else if (port->mode == MODE_TARGET) {
6537 			(void) emlxs_fct_handle_unsol_els(port, iocbq->channel,
6538 			    iocbq, seq_mp, seq_len);
6539 		}
6540 #endif /* SFCT_SUPPORT */
6541 		break;
6542 
6543 #ifdef SFCT_SUPPORT
6544 	case 8: /* FCT */
6545 		if (!(port->VPIobj.flag & EMLXS_VPI_PORT_ENABLED)) {
6546 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6547 			    "RQ ENTRY: %s: Port not yet enabled. "
6548 			    "Dropping...",
6549 			    label);
6550 
6551 			goto done;
6552 		}
6553 
6554 		rpip = EMLXS_NODE_TO_RPI(port, node);
6555 
6556 		if (!rpip) {
6557 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6558 			    "RQ ENTRY: %s: Port not logged in. "
6559 			    "Dropping...",
6560 			    label);
6561 
6562 			goto done;
6563 		}
6564 
6565 		xrip = emlxs_sli4_reserve_xri(port, rpip,
6566 		    EMLXS_XRI_UNSOL_FCP_TYPE, fchdr.ox_id);
6567 
6568 		if (!xrip) {
6569 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6570 			    "RQ ENTRY: %s: Out of exchange "
6571 			    "resources.  Dropping...",
6572 			    label);
6573 
6574 			goto done;
6575 		}
6576 
6577 		/* Build CMD_RCV_SEQUENCE64_CX */
6578 		iocb->un.rcvseq64.rcvBde.tus.f.bdeFlags = 0;
6579 		iocb->un.rcvseq64.rcvBde.tus.f.bdeSize  = seq_len;
6580 		iocb->un.rcvseq64.rcvBde.addrLow  = PADDR_LO(seq_mp->phys);
6581 		iocb->un.rcvseq64.rcvBde.addrHigh = PADDR_HI(seq_mp->phys);
6582 		iocb->ULPBDECOUNT = 1;
6583 
6584 		iocb->ULPPU = 0x3;
6585 		iocb->ULPCONTEXT = xrip->XRI;
6586 		iocb->ULPIOTAG = ((node)? node->nlp_Rpi:0);
6587 		iocb->ULPCLASS = CLASS3;
6588 		iocb->ULPCOMMAND = CMD_RCV_ELS64_CX;
6589 
6590 		iocb->unsli3.ext_rcv.seq_len = seq_len;
6591 		iocb->unsli3.ext_rcv.vpi = port->VPIobj.VPI;
6592 		iocb->unsli3.ext_rcv.oxid = fchdr.ox_id;
6593 
6594 		if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
6595 			iocb->unsli3.ext_rcv.ccpe = 1;
6596 			iocb->unsli3.ext_rcv.ccp = fchdr.rsvd;
6597 		}
6598 
6599 		/* pass xrip to FCT in the iocbq */
6600 		iocbq->sbp = xrip;
6601 
6602 #define	EMLXS_FIX_CISCO_BUG1
6603 #ifdef EMLXS_FIX_CISCO_BUG1
6604 {
6605 uint8_t *ptr;
6606 ptr = ((uint8_t *)seq_mp->virt);
6607 if (((*ptr+12) != 0xa0) && (*(ptr+20) == 0x8) && (*(ptr+21) == 0x8)) {
6608 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6609 	    "RQ ENTRY: Bad CDB fixed");
6610 	*ptr++ = 0;
6611 	*ptr = 0;
6612 }
6613 }
6614 #endif
6615 		(void) emlxs_fct_handle_unsol_req(port, cp, iocbq,
6616 			seq_mp, seq_len);
6617 		break;
6618 #endif /* SFCT_SUPPORT */
6619 
6620 	case 0x20: /* CT */
6621 		if (!(port->vpip->flag & EMLXS_VPI_PORT_ENABLED) &&
6622 		    !(hba->flag & FC_LOOPBACK_MODE)) {
6623 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6624 			    "RQ ENTRY: %s: Port not yet enabled. "
6625 			    "Dropping...",
6626 			    label);
6627 
6628 			goto done;
6629 		}
6630 
6631 		if (!node) {
6632 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6633 			    "RQ ENTRY: %s: Node not found (did=%x).  "
6634 			    "Dropping...",
6635 			    label, fchdr.d_id);
6636 
6637 			goto done;
6638 		}
6639 
6640 		rpip = EMLXS_NODE_TO_RPI(port, node);
6641 
6642 		if (!rpip) {
6643 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6644 			    "RQ ENTRY: %s: RPI not found (did=%x rpi=%d).  "
6645 			    "Dropping...",
6646 			    label, fchdr.d_id, node->nlp_Rpi);
6647 
6648 			goto done;
6649 		}
6650 
6651 		xrip = emlxs_sli4_reserve_xri(port, rpip,
6652 		    EMLXS_XRI_UNSOL_CT_TYPE, fchdr.ox_id);
6653 
6654 		if (!xrip) {
6655 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6656 			    "RQ ENTRY: %s: Out of exchange "
6657 			    "resources.  Dropping...",
6658 			    label);
6659 
6660 			goto done;
6661 		}
6662 
6663 		/* Build CMD_RCV_SEQ64_CX */
6664 		iocb->un.rcvseq64.rcvBde.tus.f.bdeFlags = 0;
6665 		iocb->un.rcvseq64.rcvBde.tus.f.bdeSize  = seq_len;
6666 		iocb->un.rcvseq64.rcvBde.addrLow  = PADDR_LO(seq_mp->phys);
6667 		iocb->un.rcvseq64.rcvBde.addrHigh = PADDR_HI(seq_mp->phys);
6668 		iocb->ULPBDECOUNT = 1;
6669 
6670 		iocb->un.rcvseq64.xrsqRo = 0;
6671 		iocb->un.rcvseq64.w5.hcsw.Rctl = fchdr.r_ctl;
6672 		iocb->un.rcvseq64.w5.hcsw.Type = fchdr.type;
6673 		iocb->un.rcvseq64.w5.hcsw.Dfctl = fchdr.df_ctl;
6674 		iocb->un.rcvseq64.w5.hcsw.Fctl = fchdr.f_ctl;
6675 
6676 		iocb->ULPPU = 0x3;
6677 		iocb->ULPCONTEXT = xrip->XRI;
6678 		iocb->ULPIOTAG = rpip->RPI;
6679 		iocb->ULPCLASS = CLASS3;
6680 		iocb->ULPCOMMAND = CMD_RCV_SEQ64_CX;
6681 
6682 		iocb->unsli3.ext_rcv.seq_len = seq_len;
6683 		iocb->unsli3.ext_rcv.vpi = port->vpip->VPI;
6684 
6685 		if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
6686 			iocb->unsli3.ext_rcv.ccpe = 1;
6687 			iocb->unsli3.ext_rcv.ccp = fchdr.rsvd;
6688 		}
6689 
6690 		(void) emlxs_ct_handle_unsol_req(port, iocbq->channel,
6691 		    iocbq, seq_mp, seq_len);
6692 
6693 		break;
6694 	}
6695 
6696 	/* Sequence handled, no need to abort */
6697 	abort = 0;
6698 
6699 done:
6700 
6701 	if (!posted) {
6702 		emlxs_sli4_rq_post(port, hdr_rq->qid);
6703 	}
6704 
6705 	if (abort) {
6706 		/* Send ABTS for this exchange */
6707 		/* !!! Currently, we have no implementation for this !!! */
6708 		abort = 0;
6709 	}
6710 
6711 	/* Return memory resources to pools */
6712 	if (iocbq) {
6713 		if (iocbq->bp) {
6714 			emlxs_mem_put(hba, buf_type, (void *)iocbq->bp);
6715 			iocbq->bp = 0;
6716 		}
6717 
6718 		emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq);
6719 	}
6720 
6721 #ifdef FMA_SUPPORT
6722 	if (emlxs_fm_check_dma_handle(hba,
6723 	    hba->sli.sli4.slim2.dma_handle)
6724 	    != DDI_FM_OK) {
6725 		EMLXS_MSGF(EMLXS_CONTEXT,
6726 		    &emlxs_invalid_dma_handle_msg,
6727 		    "sli4_process_unsol_rcv: hdl=%p",
6728 		    hba->sli.sli4.slim2.dma_handle);
6729 
6730 		emlxs_thread_spawn(hba, emlxs_restart_thread,
6731 		    0, 0);
6732 	}
6733 #endif
6734 	return;
6735 
6736 } /* emlxs_sli4_process_unsol_rcv() */
6737 
6738 
6739 /*ARGSUSED*/
6740 static void
emlxs_sli4_process_xri_aborted(emlxs_hba_t * hba,CQ_DESC_t * cq,CQE_XRI_Abort_t * cqe)6741 emlxs_sli4_process_xri_aborted(emlxs_hba_t *hba, CQ_DESC_t *cq,
6742     CQE_XRI_Abort_t *cqe)
6743 {
6744 	emlxs_port_t *port = &PPORT;
6745 	XRIobj_t *xrip;
6746 
6747 	mutex_enter(&EMLXS_FCTAB_LOCK);
6748 
6749 	xrip = emlxs_sli4_find_xri(port, cqe->XRI);
6750 	if (xrip == NULL) {
6751 		/* EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg, */
6752 		/*    "CQ ENTRY: process xri aborted ignored");  */
6753 
6754 		mutex_exit(&EMLXS_FCTAB_LOCK);
6755 		return;
6756 	}
6757 
6758 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6759 	    "CQ ENTRY: XRI Aborted: xri=%d IA=%d EO=%d BR=%d",
6760 	    cqe->XRI, cqe->IA, cqe->EO, cqe->BR);
6761 
6762 	if (!(xrip->flag & EMLXS_XRI_BUSY)) {
6763 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6764 		    "CQ ENTRY: XRI Aborted: xri=%d flag=%x. Bad state.",
6765 		    xrip->XRI, xrip->flag);
6766 
6767 		mutex_exit(&EMLXS_FCTAB_LOCK);
6768 		return;
6769 	}
6770 
6771 	/* Exchange is no longer busy on-chip, free it */
6772 	emlxs_sli4_free_xri(port, 0, xrip, 0);
6773 
6774 	mutex_exit(&EMLXS_FCTAB_LOCK);
6775 
6776 	return;
6777 
6778 } /* emlxs_sli4_process_xri_aborted () */
6779 
6780 
6781 /*ARGSUSED*/
6782 static void
emlxs_sli4_process_cq(emlxs_hba_t * hba,CQ_DESC_t * cq)6783 emlxs_sli4_process_cq(emlxs_hba_t *hba, CQ_DESC_t *cq)
6784 {
6785 	emlxs_port_t *port = &PPORT;
6786 	CQE_u *cqe;
6787 	CQE_u cq_entry;
6788 	uint32_t cqdb;
6789 	int num_entries = 0;
6790 	off_t offset;
6791 
6792 	/* EMLXS_PORT_LOCK must be held when entering this routine */
6793 
6794 	cqe = (CQE_u *)cq->addr.virt;
6795 	cqe += cq->host_index;
6796 
6797 	offset = (off_t)((uint64_t)((unsigned long)
6798 	    cq->addr.virt) -
6799 	    (uint64_t)((unsigned long)
6800 	    hba->sli.sli4.slim2.virt));
6801 
6802 	EMLXS_MPDATA_SYNC(cq->addr.dma_handle, offset,
6803 	    4096, DDI_DMA_SYNC_FORKERNEL);
6804 
6805 	for (;;) {
6806 		cq_entry.word[3] = BE_SWAP32(cqe->word[3]);
6807 		if (!(cq_entry.word[3] & CQE_VALID)) {
6808 			break;
6809 		}
6810 
6811 		cq_entry.word[2] = BE_SWAP32(cqe->word[2]);
6812 		cq_entry.word[1] = BE_SWAP32(cqe->word[1]);
6813 		cq_entry.word[0] = BE_SWAP32(cqe->word[0]);
6814 
6815 #ifdef	DEBUG_CQE
6816 		emlxs_data_dump(port, "CQE", (uint32_t *)cqe, 6, 0);
6817 #endif /* DEBUG_CQE */
6818 		num_entries++;
6819 		cqe->word[3] = 0;
6820 
6821 		cq->host_index++;
6822 		if (cq->host_index >= cq->max_index) {
6823 			cq->host_index = 0;
6824 			cqe = (CQE_u *)cq->addr.virt;
6825 		} else {
6826 			cqe++;
6827 		}
6828 		mutex_exit(&EMLXS_PORT_LOCK);
6829 
6830 		/* Now handle specific cq type */
6831 		if (cq->type == EMLXS_CQ_TYPE_GROUP1) {
6832 			if (cq_entry.cqAsyncEntry.async_evt) {
6833 				emlxs_sli4_process_async_event(hba,
6834 				    (CQE_ASYNC_t *)&cq_entry);
6835 			} else {
6836 				emlxs_sli4_process_mbox_event(hba,
6837 				    (CQE_MBOX_t *)&cq_entry);
6838 			}
6839 		} else { /* EMLXS_CQ_TYPE_GROUP2 */
6840 			switch (cq_entry.cqCmplEntry.Code) {
6841 			case CQE_TYPE_WQ_COMPLETION:
6842 				if (cq_entry.cqCmplEntry.RequestTag <
6843 				    hba->max_iotag) {
6844 					emlxs_sli4_process_wqe_cmpl(hba, cq,
6845 					    (CQE_CmplWQ_t *)&cq_entry);
6846 				} else {
6847 					emlxs_sli4_process_oor_wqe_cmpl(hba, cq,
6848 					    (CQE_CmplWQ_t *)&cq_entry);
6849 				}
6850 				break;
6851 			case CQE_TYPE_RELEASE_WQE:
6852 				emlxs_sli4_process_release_wqe(hba, cq,
6853 				    (CQE_RelWQ_t *)&cq_entry);
6854 				break;
6855 			case CQE_TYPE_UNSOL_RCV:
6856 			case CQE_TYPE_UNSOL_RCV_V1:
6857 				emlxs_sli4_process_unsol_rcv(hba, cq,
6858 				    (CQE_UnsolRcv_t *)&cq_entry);
6859 				break;
6860 			case CQE_TYPE_XRI_ABORTED:
6861 				emlxs_sli4_process_xri_aborted(hba, cq,
6862 				    (CQE_XRI_Abort_t *)&cq_entry);
6863 				break;
6864 			default:
6865 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6866 				    "Invalid CQ entry %d: %08x %08x %08x %08x",
6867 				    cq_entry.cqCmplEntry.Code, cq_entry.word[0],
6868 				    cq_entry.word[1], cq_entry.word[2],
6869 				    cq_entry.word[3]);
6870 				break;
6871 			}
6872 		}
6873 
6874 		mutex_enter(&EMLXS_PORT_LOCK);
6875 	}
6876 
6877 	/* Number of times this routine gets called for this CQ */
6878 	cq->isr_count++;
6879 
6880 	/* num_entries is the number of CQEs we process in this specific CQ */
6881 	cq->num_proc += num_entries;
6882 	if (cq->max_proc < num_entries)
6883 		cq->max_proc = num_entries;
6884 
6885 	cqdb = cq->qid;
6886 	cqdb |= CQ_DB_REARM;
6887 	if (num_entries != 0) {
6888 		cqdb |= ((num_entries << CQ_DB_POP_SHIFT) & CQ_DB_POP_MASK);
6889 	}
6890 
6891 #ifdef DEBUG_FASTPATH
6892 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6893 	    "CQE: CLEAR cqdb=%08x: pops=%d", cqdb, num_entries);
6894 #endif /* DEBUG_FASTPATH */
6895 
6896 	emlxs_sli4_write_cqdb(hba, cqdb);
6897 
6898 	/* EMLXS_PORT_LOCK must be held when exiting this routine */
6899 
6900 } /* emlxs_sli4_process_cq() */
6901 
6902 
6903 /*ARGSUSED*/
6904 static void
emlxs_sli4_process_eq(emlxs_hba_t * hba,EQ_DESC_t * eq)6905 emlxs_sli4_process_eq(emlxs_hba_t *hba, EQ_DESC_t *eq)
6906 {
6907 	emlxs_port_t *port = &PPORT;
6908 	uint32_t eqdb;
6909 	uint32_t *ptr;
6910 	CHANNEL *cp;
6911 	EQE_u eqe;
6912 	uint32_t i;
6913 	uint16_t cqi;
6914 	int num_entries = 0;
6915 	off_t offset;
6916 
6917 	/* EMLXS_PORT_LOCK must be held when entering this routine */
6918 
6919 	hba->intr_busy_cnt ++;
6920 
6921 	ptr = eq->addr.virt;
6922 	ptr += eq->host_index;
6923 
6924 	offset = (off_t)((uint64_t)((unsigned long)
6925 	    eq->addr.virt) -
6926 	    (uint64_t)((unsigned long)
6927 	    hba->sli.sli4.slim2.virt));
6928 
6929 	EMLXS_MPDATA_SYNC(eq->addr.dma_handle, offset,
6930 	    4096, DDI_DMA_SYNC_FORKERNEL);
6931 
6932 	for (;;) {
6933 		eqe.word = *ptr;
6934 		eqe.word = BE_SWAP32(eqe.word);
6935 
6936 		if (!(eqe.word & EQE_VALID)) {
6937 			break;
6938 		}
6939 
6940 #ifdef DEBUG_FASTPATH
6941 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6942 		    "EQE00: %08x", eqe.word);
6943 #endif /* DEBUG_FASTPATH */
6944 
6945 		*ptr = 0;
6946 		num_entries++;
6947 		eq->host_index++;
6948 		if (eq->host_index >= eq->max_index) {
6949 			eq->host_index = 0;
6950 			ptr = eq->addr.virt;
6951 		} else {
6952 			ptr++;
6953 		}
6954 
6955 		cqi = emlxs_sli4_cqid_to_index(hba, eqe.entry.CQId);
6956 
6957 		/* Verify CQ index */
6958 		if (cqi == 0xffff) {
6959 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6960 			    "EQE: Invalid CQid: %d. Dropping...",
6961 			    eqe.entry.CQId);
6962 			continue;
6963 		}
6964 
6965 #ifdef DEBUG_FASTPATH
6966 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6967 		    "EQE: CQIndex:%x cqid:%x", cqi, eqe.entry.CQId);
6968 #endif /* DEBUG_FASTPATH */
6969 
6970 		emlxs_sli4_process_cq(hba, &hba->sli.sli4.cq[cqi]);
6971 	}
6972 
6973 	/* Number of times the ISR for this EQ gets called */
6974 	eq->isr_count++;
6975 
6976 	/* num_entries is the number of EQEs we process in this specific ISR */
6977 	eq->num_proc += num_entries;
6978 	if (eq->max_proc < num_entries) {
6979 		eq->max_proc = num_entries;
6980 	}
6981 
6982 	eqdb = eq->qid;
6983 	eqdb |= (EQ_DB_CLEAR | EQ_DB_EVENT | EQ_DB_REARM);
6984 
6985 #ifdef DEBUG_FASTPATH
6986 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6987 	    "EQE: CLEAR eqdb=%08x pops=%d", eqdb, num_entries);
6988 #endif /* DEBUG_FASTPATH */
6989 
6990 	if (num_entries != 0) {
6991 		eqdb |= ((num_entries << EQ_DB_POP_SHIFT) & EQ_DB_POP_MASK);
6992 		for (i = 0; i < hba->chan_count; i++) {
6993 			cp = &hba->chan[i];
6994 			if (cp->chan_flag & EMLXS_NEEDS_TRIGGER) {
6995 				cp->chan_flag &= ~EMLXS_NEEDS_TRIGGER;
6996 				emlxs_thread_trigger2(&cp->intr_thread,
6997 				    emlxs_proc_channel, cp);
6998 			}
6999 		}
7000 	}
7001 
7002 	emlxs_sli4_write_cqdb(hba, eqdb);
7003 
7004 	/* EMLXS_PORT_LOCK must be held when exiting this routine */
7005 
7006 	hba->intr_busy_cnt --;
7007 
7008 } /* emlxs_sli4_process_eq() */
7009 
7010 
7011 #ifdef MSI_SUPPORT
7012 /*ARGSUSED*/
7013 static uint32_t
emlxs_sli4_msi_intr(char * arg1,char * arg2)7014 emlxs_sli4_msi_intr(char *arg1, char *arg2)
7015 {
7016 	emlxs_hba_t *hba = (emlxs_hba_t *)arg1;
7017 #ifdef DEBUG_FASTPATH
7018 	emlxs_port_t *port = &PPORT;
7019 #endif /* DEBUG_FASTPATH */
7020 	uint16_t msgid;
7021 	int rc;
7022 
7023 #ifdef DEBUG_FASTPATH
7024 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7025 	    "msiINTR arg1:%p arg2:%p", arg1, arg2);
7026 #endif /* DEBUG_FASTPATH */
7027 
7028 	/* Check for legacy interrupt handling */
7029 	if (hba->intr_type == DDI_INTR_TYPE_FIXED) {
7030 		rc = emlxs_sli4_intx_intr(arg1);
7031 		return (rc);
7032 	}
7033 
7034 	/* Get MSI message id */
7035 	msgid = (uint16_t)((unsigned long)arg2);
7036 
7037 	/* Validate the message id */
7038 	if (msgid >= hba->intr_count) {
7039 		msgid = 0;
7040 	}
7041 	mutex_enter(&EMLXS_PORT_LOCK);
7042 
7043 	if ((hba->state == FC_KILLED) || (hba->flag & FC_OFFLINE_MODE)) {
7044 		mutex_exit(&EMLXS_PORT_LOCK);
7045 		return (DDI_INTR_UNCLAIMED);
7046 	}
7047 
7048 	/* The eq[] index == the MSI vector number */
7049 	emlxs_sli4_process_eq(hba, &hba->sli.sli4.eq[msgid]);
7050 
7051 	mutex_exit(&EMLXS_PORT_LOCK);
7052 	return (DDI_INTR_CLAIMED);
7053 
7054 } /* emlxs_sli4_msi_intr() */
7055 #endif /* MSI_SUPPORT */
7056 
7057 
7058 /*ARGSUSED*/
7059 static int
emlxs_sli4_intx_intr(char * arg)7060 emlxs_sli4_intx_intr(char *arg)
7061 {
7062 	emlxs_hba_t *hba = (emlxs_hba_t *)arg;
7063 #ifdef DEBUG_FASTPATH
7064 	emlxs_port_t *port = &PPORT;
7065 #endif /* DEBUG_FASTPATH */
7066 
7067 #ifdef DEBUG_FASTPATH
7068 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7069 	    "intxINTR arg:%p", arg);
7070 #endif /* DEBUG_FASTPATH */
7071 
7072 	mutex_enter(&EMLXS_PORT_LOCK);
7073 
7074 	if ((hba->state == FC_KILLED) || (hba->flag & FC_OFFLINE_MODE)) {
7075 		mutex_exit(&EMLXS_PORT_LOCK);
7076 		return (DDI_INTR_UNCLAIMED);
7077 	}
7078 
7079 	emlxs_sli4_process_eq(hba, &hba->sli.sli4.eq[0]);
7080 
7081 	mutex_exit(&EMLXS_PORT_LOCK);
7082 	return (DDI_INTR_CLAIMED);
7083 } /* emlxs_sli4_intx_intr() */
7084 
7085 
7086 static void
emlxs_sli4_hba_kill(emlxs_hba_t * hba)7087 emlxs_sli4_hba_kill(emlxs_hba_t *hba)
7088 {
7089 	emlxs_port_t *port = &PPORT;
7090 	uint32_t j;
7091 
7092 	mutex_enter(&EMLXS_PORT_LOCK);
7093 	if (hba->flag & FC_INTERLOCKED) {
7094 		EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
7095 
7096 		mutex_exit(&EMLXS_PORT_LOCK);
7097 
7098 		return;
7099 	}
7100 
7101 	j = 0;
7102 	while (j++ < 10000) {
7103 		if ((hba->mbox_queue_flag == 0) &&
7104 		    (hba->intr_busy_cnt == 0)) {
7105 			break;
7106 		}
7107 
7108 		mutex_exit(&EMLXS_PORT_LOCK);
7109 		BUSYWAIT_US(100);
7110 		mutex_enter(&EMLXS_PORT_LOCK);
7111 	}
7112 
7113 	if ((hba->mbox_queue_flag != 0) || (hba->intr_busy_cnt > 0)) {
7114 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
7115 		    "Board kill failed. Adapter busy, %d, %d.",
7116 		    hba->mbox_queue_flag, hba->intr_busy_cnt);
7117 		mutex_exit(&EMLXS_PORT_LOCK);
7118 		return;
7119 	}
7120 
7121 	hba->flag |= FC_INTERLOCKED;
7122 
7123 	EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
7124 
7125 	mutex_exit(&EMLXS_PORT_LOCK);
7126 
7127 } /* emlxs_sli4_hba_kill() */
7128 
7129 
7130 extern void
emlxs_sli4_hba_reset_all(emlxs_hba_t * hba,uint32_t flag)7131 emlxs_sli4_hba_reset_all(emlxs_hba_t *hba, uint32_t flag)
7132 {
7133 	emlxs_port_t *port = &PPORT;
7134 	uint32_t value;
7135 
7136 	mutex_enter(&EMLXS_PORT_LOCK);
7137 
7138 	if ((hba->sli_intf & SLI_INTF_IF_TYPE_MASK) != SLI_INTF_IF_TYPE_2) {
7139 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
7140 		    "Reset All failed. Invalid Operation.");
7141 		mutex_exit(&EMLXS_PORT_LOCK);
7142 		return;
7143 	}
7144 
7145 	/* Issue a Firmware Reset All Request */
7146 	if (flag) {
7147 		value = SLI_PHYDEV_FRST | SLI_PHYDEV_FRL_ALL | SLI_PHYDEV_DD;
7148 	} else {
7149 		value = SLI_PHYDEV_FRST | SLI_PHYDEV_FRL_ALL;
7150 	}
7151 
7152 	ddi_put32(hba->sli.sli4.bar0_acc_handle,
7153 	    hba->sli.sli4.PHYSDEV_reg_addr, value);
7154 
7155 	mutex_exit(&EMLXS_PORT_LOCK);
7156 
7157 } /* emlxs_sli4_hba_reset_all() */
7158 
7159 
7160 static void
emlxs_sli4_enable_intr(emlxs_hba_t * hba)7161 emlxs_sli4_enable_intr(emlxs_hba_t *hba)
7162 {
7163 	emlxs_config_t *cfg = &CFG;
7164 	int i;
7165 	int num_cq;
7166 	uint32_t data;
7167 
7168 	hba->sli.sli4.flag |= EMLXS_SLI4_INTR_ENABLED;
7169 
7170 	num_cq = (hba->intr_count * cfg[CFG_NUM_WQ].current) +
7171 	    EMLXS_CQ_OFFSET_WQ;
7172 
7173 	/* ARM EQ / CQs */
7174 	for (i = 0; i < num_cq; i++) {
7175 		data = hba->sli.sli4.cq[i].qid;
7176 		data |= CQ_DB_REARM;
7177 		emlxs_sli4_write_cqdb(hba, data);
7178 	}
7179 	for (i = 0; i < hba->intr_count; i++) {
7180 		data = hba->sli.sli4.eq[i].qid;
7181 		data |= (EQ_DB_REARM | EQ_DB_EVENT);
7182 		emlxs_sli4_write_cqdb(hba, data);
7183 	}
7184 } /* emlxs_sli4_enable_intr() */
7185 
7186 
7187 static void
emlxs_sli4_disable_intr(emlxs_hba_t * hba,uint32_t att)7188 emlxs_sli4_disable_intr(emlxs_hba_t *hba, uint32_t att)
7189 {
7190 	if (att) {
7191 		return;
7192 	}
7193 
7194 	hba->sli.sli4.flag &= ~EMLXS_SLI4_INTR_ENABLED;
7195 
7196 	/* Short of reset, we cannot disable interrupts */
7197 } /* emlxs_sli4_disable_intr() */
7198 
7199 
7200 static void
emlxs_sli4_resource_free(emlxs_hba_t * hba)7201 emlxs_sli4_resource_free(emlxs_hba_t *hba)
7202 {
7203 	emlxs_port_t	*port = &PPORT;
7204 	MBUF_INFO	*buf_info;
7205 	uint32_t	i;
7206 
7207 	buf_info = &hba->sli.sli4.slim2;
7208 	if (buf_info->virt == 0) {
7209 		/* Already free */
7210 		return;
7211 	}
7212 
7213 	emlxs_fcf_fini(hba);
7214 
7215 	buf_info = &hba->sli.sli4.HeaderTmplate;
7216 	if (buf_info->virt) {
7217 		bzero(buf_info, sizeof (MBUF_INFO));
7218 	}
7219 
7220 	if (hba->sli.sli4.XRIp) {
7221 		if ((hba->sli.sli4.XRIinuse_f !=
7222 		    (XRIobj_t *)&hba->sli.sli4.XRIinuse_f) ||
7223 		    (hba->sli.sli4.XRIinuse_b !=
7224 		    (XRIobj_t *)&hba->sli.sli4.XRIinuse_f)) {
7225 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
7226 			    "XRIs in use during free!: %p %p != %p\n",
7227 			    hba->sli.sli4.XRIinuse_f,
7228 			    hba->sli.sli4.XRIinuse_b,
7229 			    &hba->sli.sli4.XRIinuse_f);
7230 		}
7231 		kmem_free(hba->sli.sli4.XRIp,
7232 		    (sizeof (XRIobj_t) * hba->sli.sli4.XRICount));
7233 		hba->sli.sli4.XRIp = NULL;
7234 
7235 		hba->sli.sli4.XRIfree_f =
7236 		    (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
7237 		hba->sli.sli4.XRIfree_b =
7238 		    (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
7239 		hba->sli.sli4.xrif_count = 0;
7240 	}
7241 
7242 	for (i = 0; i < hba->intr_count; i++) {
7243 		mutex_destroy(&hba->sli.sli4.eq[i].lastwq_lock);
7244 		bzero(&hba->sli.sli4.eq[i], sizeof (EQ_DESC_t));
7245 		hba->sli.sli4.eq[i].qid = 0xffff;
7246 	}
7247 	for (i = 0; i < EMLXS_MAX_CQS; i++) {
7248 		bzero(&hba->sli.sli4.cq[i], sizeof (CQ_DESC_t));
7249 		hba->sli.sli4.cq[i].qid = 0xffff;
7250 	}
7251 	for (i = 0; i < EMLXS_MAX_WQS; i++) {
7252 		bzero(&hba->sli.sli4.wq[i], sizeof (WQ_DESC_t));
7253 		hba->sli.sli4.wq[i].qid = 0xffff;
7254 	}
7255 	for (i = 0; i < EMLXS_MAX_RXQS; i++) {
7256 		mutex_destroy(&hba->sli.sli4.rxq[i].lock);
7257 		bzero(&hba->sli.sli4.rxq[i], sizeof (RXQ_DESC_t));
7258 	}
7259 	for (i = 0; i < EMLXS_MAX_RQS; i++) {
7260 		mutex_destroy(&hba->sli.sli4.rq[i].lock);
7261 		bzero(&hba->sli.sli4.rq[i], sizeof (RQ_DESC_t));
7262 		hba->sli.sli4.rq[i].qid = 0xffff;
7263 	}
7264 
7265 	/* Free the MQ */
7266 	bzero(&hba->sli.sli4.mq, sizeof (MQ_DESC_t));
7267 
7268 	buf_info = &hba->sli.sli4.slim2;
7269 	if (buf_info->virt) {
7270 		buf_info->flags = FC_MBUF_DMA;
7271 		emlxs_mem_free(hba, buf_info);
7272 		bzero(buf_info, sizeof (MBUF_INFO));
7273 	}
7274 
7275 } /* emlxs_sli4_resource_free() */
7276 
7277 
7278 static int
emlxs_sli4_resource_alloc(emlxs_hba_t * hba)7279 emlxs_sli4_resource_alloc(emlxs_hba_t *hba)
7280 {
7281 	emlxs_port_t	*port = &PPORT;
7282 	emlxs_config_t	*cfg = &CFG;
7283 	MBUF_INFO	*buf_info;
7284 	int		num_eq;
7285 	int		num_wq;
7286 	uint16_t	i;
7287 	uint32_t	j;
7288 	uint32_t	k;
7289 	uint16_t	cq_depth;
7290 	uint32_t	cq_size;
7291 	uint32_t	word;
7292 	XRIobj_t	*xrip;
7293 	RQE_t		*rqe;
7294 	MBUF_INFO	*rqb;
7295 	uint64_t	phys;
7296 	uint64_t	tmp_phys;
7297 	char		*virt;
7298 	char		*tmp_virt;
7299 	void		*data_handle;
7300 	void		*dma_handle;
7301 	int32_t		size;
7302 	off_t		offset;
7303 	uint32_t	count = 0;
7304 	uint32_t	hddr_size = 0;
7305 	uint32_t	align;
7306 	uint32_t	iotag;
7307 
7308 	buf_info = &hba->sli.sli4.slim2;
7309 	if (buf_info->virt) {
7310 		/* Already allocated */
7311 		return (0);
7312 	}
7313 
7314 	emlxs_fcf_init(hba);
7315 
7316 	switch (hba->sli.sli4.param.CQV) {
7317 	case 0:
7318 		cq_depth = CQ_DEPTH;
7319 		break;
7320 	case 2:
7321 	default:
7322 		cq_depth = CQ_DEPTH_V2;
7323 		break;
7324 	}
7325 	cq_size = (cq_depth * CQE_SIZE);
7326 
7327 	/* EQs - 1 per Interrupt vector */
7328 	num_eq = hba->intr_count;
7329 
7330 	/* CQs  - number of WQs + 1 for RQs + 1 for mbox/async events */
7331 	num_wq = cfg[CFG_NUM_WQ].current * num_eq;
7332 
7333 	/* Calculate total dmable memory we need */
7334 	/* WARNING: make sure each section is aligned on 4K boundary */
7335 
7336 	/* EQ */
7337 	count += num_eq * 4096;
7338 
7339 	/* CQ */
7340 	count += (num_wq + EMLXS_CQ_OFFSET_WQ) * cq_size;
7341 
7342 	/* WQ */
7343 	count += num_wq * (4096 * EMLXS_NUM_WQ_PAGES);
7344 
7345 	/* MQ */
7346 	count +=  EMLXS_MAX_MQS * 4096;
7347 
7348 	/* RQ */
7349 	count +=  EMLXS_MAX_RQS * 4096;
7350 
7351 	/* RQB/E */
7352 	count += RQB_COUNT * (RQB_DATA_SIZE + RQB_HEADER_SIZE);
7353 	count += (4096 - (count%4096)); /* Ensure 4K alignment */
7354 
7355 	/* SGL */
7356 	count += hba->sli.sli4.XRIExtSize * hba->sli.sli4.mem_sgl_size;
7357 	count += (4096 - (count%4096)); /* Ensure 4K alignment */
7358 
7359 	/* RPI Header Templates */
7360 	if (hba->sli.sli4.param.HDRR) {
7361 		/* Bytes per extent */
7362 		j = hba->sli.sli4.RPIExtSize * sizeof (RPIHdrTmplate_t);
7363 
7364 		/* Pages required per extent (page == 4096 bytes) */
7365 		k = (j/4096) + ((j%4096)? 1:0);
7366 
7367 		/* Total size */
7368 		hddr_size = (k * hba->sli.sli4.RPIExtCount * 4096);
7369 
7370 		count += hddr_size;
7371 	}
7372 
7373 	/* Allocate slim2 for SLI4 */
7374 	buf_info = &hba->sli.sli4.slim2;
7375 	buf_info->size = count;
7376 	buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
7377 	buf_info->align = ddi_ptob(hba->dip, 1L);
7378 
7379 	(void) emlxs_mem_alloc(hba, buf_info);
7380 
7381 	if (buf_info->virt == NULL) {
7382 		EMLXS_MSGF(EMLXS_CONTEXT,
7383 		    &emlxs_init_failed_msg,
7384 		    "Unable to allocate internal memory for SLI4: %d",
7385 		    count);
7386 		goto failed;
7387 	}
7388 	bzero(buf_info->virt, buf_info->size);
7389 	EMLXS_MPDATA_SYNC(buf_info->dma_handle, 0,
7390 	    buf_info->size, DDI_DMA_SYNC_FORDEV);
7391 
7392 	/* Assign memory to SGL, Head Template, EQ, CQ, WQ, RQ and MQ */
7393 	data_handle = buf_info->data_handle;
7394 	dma_handle = buf_info->dma_handle;
7395 	phys = buf_info->phys;
7396 	virt = (char *)buf_info->virt;
7397 
7398 	/* Allocate space for queues */
7399 
7400 	/* EQ */
7401 	size = 4096;
7402 	for (i = 0; i < num_eq; i++) {
7403 		bzero(&hba->sli.sli4.eq[i], sizeof (EQ_DESC_t));
7404 
7405 		buf_info = &hba->sli.sli4.eq[i].addr;
7406 		buf_info->size = size;
7407 		buf_info->flags =
7408 		    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
7409 		buf_info->align = ddi_ptob(hba->dip, 1L);
7410 		buf_info->phys = phys;
7411 		buf_info->virt = (void *)virt;
7412 		buf_info->data_handle = data_handle;
7413 		buf_info->dma_handle = dma_handle;
7414 
7415 		phys += size;
7416 		virt += size;
7417 
7418 		hba->sli.sli4.eq[i].max_index = EQ_DEPTH;
7419 		hba->sli.sli4.eq[i].qid = 0xffff;
7420 
7421 		mutex_init(&hba->sli.sli4.eq[i].lastwq_lock, NULL,
7422 		    MUTEX_DRIVER, NULL);
7423 	}
7424 
7425 
7426 	/* CQ */
7427 	for (i = 0; i < (num_wq + EMLXS_CQ_OFFSET_WQ); i++) {
7428 		bzero(&hba->sli.sli4.cq[i], sizeof (CQ_DESC_t));
7429 
7430 		buf_info = &hba->sli.sli4.cq[i].addr;
7431 		buf_info->size = cq_size;
7432 		buf_info->flags =
7433 		    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
7434 		buf_info->align = ddi_ptob(hba->dip, 1L);
7435 		buf_info->phys = phys;
7436 		buf_info->virt = (void *)virt;
7437 		buf_info->data_handle = data_handle;
7438 		buf_info->dma_handle = dma_handle;
7439 
7440 		phys += cq_size;
7441 		virt += cq_size;
7442 
7443 		hba->sli.sli4.cq[i].max_index = cq_depth;
7444 		hba->sli.sli4.cq[i].qid = 0xffff;
7445 	}
7446 
7447 
7448 	/* WQ */
7449 	size = 4096 * EMLXS_NUM_WQ_PAGES;
7450 	for (i = 0; i < num_wq; i++) {
7451 		bzero(&hba->sli.sli4.wq[i], sizeof (WQ_DESC_t));
7452 
7453 		buf_info = &hba->sli.sli4.wq[i].addr;
7454 		buf_info->size = size;
7455 		buf_info->flags =
7456 		    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
7457 		buf_info->align = ddi_ptob(hba->dip, 1L);
7458 		buf_info->phys = phys;
7459 		buf_info->virt = (void *)virt;
7460 		buf_info->data_handle = data_handle;
7461 		buf_info->dma_handle = dma_handle;
7462 
7463 		phys += size;
7464 		virt += size;
7465 
7466 		hba->sli.sli4.wq[i].max_index = WQ_DEPTH;
7467 		hba->sli.sli4.wq[i].release_depth = WQE_RELEASE_DEPTH;
7468 		hba->sli.sli4.wq[i].qid = 0xFFFF;
7469 	}
7470 
7471 
7472 	/* MQ */
7473 	size = 4096;
7474 	bzero(&hba->sli.sli4.mq, sizeof (MQ_DESC_t));
7475 
7476 	buf_info = &hba->sli.sli4.mq.addr;
7477 	buf_info->size = size;
7478 	buf_info->flags =
7479 	    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
7480 	buf_info->align = ddi_ptob(hba->dip, 1L);
7481 	buf_info->phys = phys;
7482 	buf_info->virt = (void *)virt;
7483 	buf_info->data_handle = data_handle;
7484 	buf_info->dma_handle = dma_handle;
7485 
7486 	phys += size;
7487 	virt += size;
7488 
7489 	hba->sli.sli4.mq.max_index = MQ_DEPTH;
7490 
7491 
7492 	/* RXQ */
7493 	for (i = 0; i < EMLXS_MAX_RXQS; i++) {
7494 		bzero(&hba->sli.sli4.rxq[i], sizeof (RXQ_DESC_t));
7495 
7496 		mutex_init(&hba->sli.sli4.rxq[i].lock, NULL, MUTEX_DRIVER,
7497 		    NULL);
7498 	}
7499 
7500 
7501 	/* RQ */
7502 	size = 4096;
7503 	for (i = 0; i < EMLXS_MAX_RQS; i++) {
7504 		bzero(&hba->sli.sli4.rq[i], sizeof (RQ_DESC_t));
7505 
7506 		buf_info = &hba->sli.sli4.rq[i].addr;
7507 		buf_info->size = size;
7508 		buf_info->flags =
7509 		    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
7510 		buf_info->align = ddi_ptob(hba->dip, 1L);
7511 		buf_info->phys = phys;
7512 		buf_info->virt = (void *)virt;
7513 		buf_info->data_handle = data_handle;
7514 		buf_info->dma_handle = dma_handle;
7515 
7516 		phys += size;
7517 		virt += size;
7518 
7519 		hba->sli.sli4.rq[i].max_index = RQ_DEPTH;
7520 		hba->sli.sli4.rq[i].qid = 0xFFFF;
7521 
7522 		mutex_init(&hba->sli.sli4.rq[i].lock, NULL, MUTEX_DRIVER, NULL);
7523 	}
7524 
7525 
7526 	/* RQB/E */
7527 	for (i = 0; i < EMLXS_MAX_RQS; i++) {
7528 		size = (i & 0x1) ? RQB_DATA_SIZE : RQB_HEADER_SIZE;
7529 		tmp_phys = phys;
7530 		tmp_virt = virt;
7531 
7532 		/* Initialize the RQEs */
7533 		rqe = (RQE_t *)hba->sli.sli4.rq[i].addr.virt;
7534 		for (j = 0; j < (RQ_DEPTH/RQB_COUNT); j++) {
7535 			phys = tmp_phys;
7536 			virt = tmp_virt;
7537 			for (k = 0; k < RQB_COUNT; k++) {
7538 				word = PADDR_HI(phys);
7539 				rqe->AddrHi = BE_SWAP32(word);
7540 
7541 				word = PADDR_LO(phys);
7542 				rqe->AddrLo = BE_SWAP32(word);
7543 
7544 				rqb = &hba->sli.sli4.rq[i].
7545 				    rqb[k + (j * RQB_COUNT)];
7546 				rqb->size = size;
7547 				rqb->flags = FC_MBUF_DMA |
7548 				    FC_MBUF_SNGLSG | FC_MBUF_DMA32;
7549 				rqb->align = ddi_ptob(hba->dip, 1L);
7550 				rqb->phys = phys;
7551 				rqb->virt = (void *)virt;
7552 				rqb->data_handle = data_handle;
7553 				rqb->dma_handle = dma_handle;
7554 
7555 				phys += size;
7556 				virt += size;
7557 #ifdef DEBUG_RQE
7558 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7559 				    "RQ_ALLOC: rq[%d] rqb[%d,%d]=%p iotag=%d",
7560 				    i, j, k, mp, mp->tag);
7561 #endif /* DEBUG_RQE */
7562 
7563 				rqe++;
7564 			}
7565 		}
7566 
7567 		offset = (off_t)((uint64_t)((unsigned long)
7568 		    hba->sli.sli4.rq[i].addr.virt) -
7569 		    (uint64_t)((unsigned long)
7570 		    hba->sli.sli4.slim2.virt));
7571 
7572 		/* Sync the RQ buffer list */
7573 		EMLXS_MPDATA_SYNC(hba->sli.sli4.rq[i].addr.dma_handle, offset,
7574 		    hba->sli.sli4.rq[i].addr.size, DDI_DMA_SYNC_FORDEV);
7575 	}
7576 
7577 	/* 4K Alignment */
7578 	align = (4096 - (phys%4096));
7579 	phys += align;
7580 	virt += align;
7581 
7582 	/* SGL */
7583 	/* Initialize double linked lists */
7584 	hba->sli.sli4.XRIinuse_f =
7585 	    (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
7586 	hba->sli.sli4.XRIinuse_b =
7587 	    (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
7588 	hba->sli.sli4.xria_count = 0;
7589 
7590 	hba->sli.sli4.XRIfree_f =
7591 	    (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
7592 	hba->sli.sli4.XRIfree_b =
7593 	    (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
7594 	hba->sli.sli4.xria_count = 0;
7595 
7596 	hba->sli.sli4.XRIp = (XRIobj_t *)kmem_zalloc(
7597 	    (sizeof (XRIobj_t) * hba->sli.sli4.XRICount), KM_SLEEP);
7598 
7599 	xrip = hba->sli.sli4.XRIp;
7600 	size = hba->sli.sli4.mem_sgl_size;
7601 	iotag = 1;
7602 	for (i = 0; i < hba->sli.sli4.XRICount; i++) {
7603 		xrip->XRI = emlxs_sli4_index_to_xri(hba, i);
7604 
7605 		/* We don't use XRI==0, since it also represents an */
7606 		/* uninitialized exchange */
7607 		if (xrip->XRI == 0) {
7608 			xrip++;
7609 			continue;
7610 		}
7611 
7612 		xrip->iotag = iotag++;
7613 		xrip->sge_count =
7614 		    (hba->sli.sli4.mem_sgl_size / sizeof (ULP_SGE64));
7615 
7616 		/* Add xrip to end of free list */
7617 		xrip->_b = hba->sli.sli4.XRIfree_b;
7618 		hba->sli.sli4.XRIfree_b->_f = xrip;
7619 		xrip->_f = (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
7620 		hba->sli.sli4.XRIfree_b = xrip;
7621 		hba->sli.sli4.xrif_count++;
7622 
7623 		/* Allocate SGL for this xrip */
7624 		buf_info = &xrip->SGList;
7625 		buf_info->size = size;
7626 		buf_info->flags =
7627 		    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
7628 		buf_info->align = size;
7629 		buf_info->phys = phys;
7630 		buf_info->virt = (void *)virt;
7631 		buf_info->data_handle = data_handle;
7632 		buf_info->dma_handle = dma_handle;
7633 
7634 		phys += size;
7635 		virt += size;
7636 
7637 		xrip++;
7638 	}
7639 
7640 	/* 4K Alignment */
7641 	align = (4096 - (phys%4096));
7642 	phys += align;
7643 	virt += align;
7644 
7645 	/* RPI Header Templates */
7646 	if (hba->sli.sli4.param.HDRR) {
7647 		buf_info = &hba->sli.sli4.HeaderTmplate;
7648 		bzero(buf_info, sizeof (MBUF_INFO));
7649 		buf_info->size = hddr_size;
7650 		buf_info->flags = FC_MBUF_DMA | FC_MBUF_DMA32;
7651 		buf_info->align = ddi_ptob(hba->dip, 1L);
7652 		buf_info->phys = phys;
7653 		buf_info->virt = (void *)virt;
7654 		buf_info->data_handle = data_handle;
7655 		buf_info->dma_handle = dma_handle;
7656 	}
7657 
7658 #ifdef FMA_SUPPORT
7659 	if (hba->sli.sli4.slim2.dma_handle) {
7660 		if (emlxs_fm_check_dma_handle(hba,
7661 		    hba->sli.sli4.slim2.dma_handle)
7662 		    != DDI_FM_OK) {
7663 			EMLXS_MSGF(EMLXS_CONTEXT,
7664 			    &emlxs_invalid_dma_handle_msg,
7665 			    "sli4_resource_alloc: hdl=%p",
7666 			    hba->sli.sli4.slim2.dma_handle);
7667 			goto failed;
7668 		}
7669 	}
7670 #endif /* FMA_SUPPORT */
7671 
7672 	return (0);
7673 
7674 failed:
7675 
7676 	(void) emlxs_sli4_resource_free(hba);
7677 	return (ENOMEM);
7678 
7679 } /* emlxs_sli4_resource_alloc */
7680 
7681 
7682 extern void
emlxs_sli4_zero_queue_stat(emlxs_hba_t * hba)7683 emlxs_sli4_zero_queue_stat(emlxs_hba_t *hba)
7684 {
7685 	uint32_t i;
7686 	uint32_t num_wq;
7687 	emlxs_config_t	*cfg = &CFG;
7688 	clock_t		time;
7689 
7690 	/* EQ */
7691 	for (i = 0; i < hba->intr_count; i++) {
7692 		hba->sli.sli4.eq[i].num_proc = 0;
7693 		hba->sli.sli4.eq[i].max_proc = 0;
7694 		hba->sli.sli4.eq[i].isr_count = 0;
7695 	}
7696 	num_wq = cfg[CFG_NUM_WQ].current * hba->intr_count;
7697 	/* CQ */
7698 	for (i = 0; i < (num_wq + EMLXS_CQ_OFFSET_WQ); i++) {
7699 		hba->sli.sli4.cq[i].num_proc = 0;
7700 		hba->sli.sli4.cq[i].max_proc = 0;
7701 		hba->sli.sli4.cq[i].isr_count = 0;
7702 	}
7703 	/* WQ */
7704 	for (i = 0; i < num_wq; i++) {
7705 		hba->sli.sli4.wq[i].num_proc = 0;
7706 		hba->sli.sli4.wq[i].num_busy = 0;
7707 	}
7708 	/* RQ */
7709 	for (i = 0; i < EMLXS_MAX_RQS; i++) {
7710 		hba->sli.sli4.rq[i].num_proc = 0;
7711 	}
7712 	(void) drv_getparm(LBOLT, &time);
7713 	hba->sli.sli4.que_stat_timer = (uint32_t)time;
7714 
7715 } /* emlxs_sli4_zero_queue_stat */
7716 
7717 
7718 extern XRIobj_t *
emlxs_sli4_reserve_xri(emlxs_port_t * port,RPIobj_t * rpip,uint32_t type,uint16_t rx_id)7719 emlxs_sli4_reserve_xri(emlxs_port_t *port,  RPIobj_t *rpip, uint32_t type,
7720     uint16_t rx_id)
7721 {
7722 	emlxs_hba_t *hba = HBA;
7723 	XRIobj_t	*xrip;
7724 	uint16_t	iotag;
7725 
7726 	mutex_enter(&EMLXS_FCTAB_LOCK);
7727 
7728 	xrip = hba->sli.sli4.XRIfree_f;
7729 
7730 	if (xrip == (XRIobj_t *)&hba->sli.sli4.XRIfree_f) {
7731 		mutex_exit(&EMLXS_FCTAB_LOCK);
7732 
7733 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
7734 		    "Unable to reserve XRI. type=%d",
7735 		    type);
7736 
7737 		return (NULL);
7738 	}
7739 
7740 	iotag = xrip->iotag;
7741 
7742 	if ((!iotag) ||
7743 	    ((hba->fc_table[iotag] != NULL) &&
7744 	    (hba->fc_table[iotag] != STALE_PACKET))) {
7745 		/*
7746 		 * No more command slots available, retry later
7747 		 */
7748 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7749 		    "Adapter Busy. Unable to reserve iotag. type=%d",
7750 		    type);
7751 
7752 		mutex_exit(&EMLXS_FCTAB_LOCK);
7753 		return (NULL);
7754 	}
7755 
7756 	xrip->state = XRI_STATE_ALLOCATED;
7757 	xrip->type = type;
7758 	xrip->flag = EMLXS_XRI_RESERVED;
7759 	xrip->sbp = NULL;
7760 
7761 	xrip->rpip = rpip;
7762 	xrip->rx_id = rx_id;
7763 	rpip->xri_count++;
7764 
7765 	/* Take it off free list */
7766 	(xrip->_b)->_f = xrip->_f;
7767 	(xrip->_f)->_b = xrip->_b;
7768 	xrip->_f = NULL;
7769 	xrip->_b = NULL;
7770 	hba->sli.sli4.xrif_count--;
7771 
7772 	/* Add it to end of inuse list */
7773 	xrip->_b = hba->sli.sli4.XRIinuse_b;
7774 	hba->sli.sli4.XRIinuse_b->_f = xrip;
7775 	xrip->_f = (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
7776 	hba->sli.sli4.XRIinuse_b = xrip;
7777 	hba->sli.sli4.xria_count++;
7778 
7779 	mutex_exit(&EMLXS_FCTAB_LOCK);
7780 	return (xrip);
7781 
7782 } /* emlxs_sli4_reserve_xri() */
7783 
7784 
7785 extern uint32_t
emlxs_sli4_unreserve_xri(emlxs_port_t * port,uint16_t xri,uint32_t lock)7786 emlxs_sli4_unreserve_xri(emlxs_port_t *port, uint16_t xri, uint32_t lock)
7787 {
7788 	emlxs_hba_t *hba = HBA;
7789 	XRIobj_t *xrip;
7790 
7791 	if (lock) {
7792 		mutex_enter(&EMLXS_FCTAB_LOCK);
7793 	}
7794 
7795 	xrip = emlxs_sli4_find_xri(port, xri);
7796 
7797 	if (!xrip || xrip->state == XRI_STATE_FREE) {
7798 		if (lock) {
7799 			mutex_exit(&EMLXS_FCTAB_LOCK);
7800 		}
7801 
7802 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7803 		    "sli4_unreserve_xri:%d already freed.", xri);
7804 		return (0);
7805 	}
7806 
7807 	/* Flush this unsolicited ct command */
7808 	if (xrip->type == EMLXS_XRI_UNSOL_CT_TYPE) {
7809 		(void) emlxs_flush_ct_event(port, xrip->rx_id);
7810 	}
7811 
7812 	if (!(xrip->flag & EMLXS_XRI_RESERVED)) {
7813 		if (lock) {
7814 			mutex_exit(&EMLXS_FCTAB_LOCK);
7815 		}
7816 
7817 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7818 		    "sli4_unreserve_xri:%d in use. type=%d",
7819 		    xrip->XRI, xrip->type);
7820 		return (1);
7821 	}
7822 
7823 	if (xrip->iotag &&
7824 	    (hba->fc_table[xrip->iotag] != NULL) &&
7825 	    (hba->fc_table[xrip->iotag] != STALE_PACKET)) {
7826 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
7827 		    "sli4_unreserve_xri:%d  sbp dropped:%p type=%d",
7828 		    xrip->XRI, hba->fc_table[xrip->iotag], xrip->type);
7829 
7830 		hba->fc_table[xrip->iotag] = NULL;
7831 		hba->io_count--;
7832 	}
7833 
7834 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7835 	    "sli4_unreserve_xri:%d unreserved. type=%d",
7836 	    xrip->XRI, xrip->type);
7837 
7838 	xrip->state = XRI_STATE_FREE;
7839 	xrip->type = 0;
7840 
7841 	if (xrip->rpip) {
7842 		xrip->rpip->xri_count--;
7843 		xrip->rpip = NULL;
7844 	}
7845 
7846 	if (xrip->reserved_rpip) {
7847 		xrip->reserved_rpip->xri_count--;
7848 		xrip->reserved_rpip = NULL;
7849 	}
7850 
7851 	/* Take it off inuse list */
7852 	(xrip->_b)->_f = xrip->_f;
7853 	(xrip->_f)->_b = xrip->_b;
7854 	xrip->_f = NULL;
7855 	xrip->_b = NULL;
7856 	hba->sli.sli4.xria_count--;
7857 
7858 	/* Add it to end of free list */
7859 	xrip->_b = hba->sli.sli4.XRIfree_b;
7860 	hba->sli.sli4.XRIfree_b->_f = xrip;
7861 	xrip->_f = (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
7862 	hba->sli.sli4.XRIfree_b = xrip;
7863 	hba->sli.sli4.xrif_count++;
7864 
7865 	if (lock) {
7866 		mutex_exit(&EMLXS_FCTAB_LOCK);
7867 	}
7868 
7869 	return (0);
7870 
7871 } /* emlxs_sli4_unreserve_xri() */
7872 
7873 
7874 XRIobj_t *
emlxs_sli4_register_xri(emlxs_port_t * port,emlxs_buf_t * sbp,uint16_t xri,uint32_t did)7875 emlxs_sli4_register_xri(emlxs_port_t *port, emlxs_buf_t *sbp, uint16_t xri,
7876     uint32_t did)
7877 {
7878 	emlxs_hba_t *hba = HBA;
7879 	uint16_t	iotag;
7880 	XRIobj_t	*xrip;
7881 	emlxs_node_t	*node;
7882 	RPIobj_t	*rpip;
7883 
7884 	mutex_enter(&EMLXS_FCTAB_LOCK);
7885 
7886 	xrip = sbp->xrip;
7887 	if (!xrip) {
7888 		xrip = emlxs_sli4_find_xri(port, xri);
7889 
7890 		if (!xrip) {
7891 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7892 			    "sli4_register_xri:%d XRI not found.", xri);
7893 
7894 			mutex_exit(&EMLXS_FCTAB_LOCK);
7895 			return (NULL);
7896 		}
7897 	}
7898 
7899 	if ((xrip->state == XRI_STATE_FREE) ||
7900 	    !(xrip->flag & EMLXS_XRI_RESERVED)) {
7901 
7902 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7903 		    "sli4_register_xri:%d Invalid XRI. xrip=%p "
7904 		    "state=%x flag=%x",
7905 		    xrip->XRI, xrip, xrip->state, xrip->flag);
7906 
7907 		mutex_exit(&EMLXS_FCTAB_LOCK);
7908 		return (NULL);
7909 	}
7910 
7911 	iotag = xrip->iotag;
7912 
7913 	if ((!iotag) ||
7914 	    ((hba->fc_table[iotag] != NULL) &&
7915 	    (hba->fc_table[iotag] != STALE_PACKET))) {
7916 
7917 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7918 		    "sli4_register_xri:%d Invalid fc_table entry. "
7919 		    "iotag=%d entry=%p",
7920 		    xrip->XRI, iotag, hba->fc_table[iotag]);
7921 
7922 		mutex_exit(&EMLXS_FCTAB_LOCK);
7923 		return (NULL);
7924 	}
7925 
7926 	hba->fc_table[iotag] = sbp;
7927 	hba->io_count++;
7928 
7929 	sbp->iotag = iotag;
7930 	sbp->xrip = xrip;
7931 
7932 	xrip->flag &= ~EMLXS_XRI_RESERVED;
7933 	xrip->sbp = sbp;
7934 
7935 	/* If we did not have a registered RPI when we reserved */
7936 	/* this exchange, check again now. */
7937 	if (xrip->rpip && (xrip->rpip->RPI == FABRIC_RPI)) {
7938 		node = emlxs_node_find_did(port, did, 1);
7939 		rpip = EMLXS_NODE_TO_RPI(port, node);
7940 
7941 		if (rpip && (rpip->RPI != FABRIC_RPI)) {
7942 			/* Move the XRI to the new RPI */
7943 			xrip->rpip->xri_count--;
7944 			xrip->rpip = rpip;
7945 			rpip->xri_count++;
7946 		}
7947 	}
7948 
7949 	mutex_exit(&EMLXS_FCTAB_LOCK);
7950 
7951 	return (xrip);
7952 
7953 } /* emlxs_sli4_register_xri() */
7954 
7955 
7956 /* Performs both reserve and register functions for XRI */
7957 static XRIobj_t *
emlxs_sli4_alloc_xri(emlxs_port_t * port,emlxs_buf_t * sbp,RPIobj_t * rpip,uint32_t type)7958 emlxs_sli4_alloc_xri(emlxs_port_t *port, emlxs_buf_t *sbp, RPIobj_t *rpip,
7959     uint32_t type)
7960 {
7961 	emlxs_hba_t *hba = HBA;
7962 	XRIobj_t	*xrip;
7963 	uint16_t	iotag;
7964 
7965 	mutex_enter(&EMLXS_FCTAB_LOCK);
7966 
7967 	xrip = hba->sli.sli4.XRIfree_f;
7968 
7969 	if (xrip == (XRIobj_t *)&hba->sli.sli4.XRIfree_f) {
7970 		mutex_exit(&EMLXS_FCTAB_LOCK);
7971 
7972 		return (NULL);
7973 	}
7974 
7975 	/* Get the iotag by registering the packet */
7976 	iotag = xrip->iotag;
7977 
7978 	if ((!iotag) ||
7979 	    ((hba->fc_table[iotag] != NULL) &&
7980 	    (hba->fc_table[iotag] != STALE_PACKET))) {
7981 		/*
7982 		 * No more command slots available, retry later
7983 		 */
7984 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7985 		    "Adapter Busy. Unable to alloc iotag:(0x%x)(%p) type=%d",
7986 		    iotag, hba->fc_table[iotag], type);
7987 
7988 		mutex_exit(&EMLXS_FCTAB_LOCK);
7989 		return (NULL);
7990 	}
7991 
7992 	hba->fc_table[iotag] = sbp;
7993 	hba->io_count++;
7994 
7995 	sbp->iotag = iotag;
7996 	sbp->xrip = xrip;
7997 
7998 	xrip->state = XRI_STATE_ALLOCATED;
7999 	xrip->type = type;
8000 	xrip->flag = 0;
8001 	xrip->sbp = sbp;
8002 
8003 	xrip->rpip = rpip;
8004 	rpip->xri_count++;
8005 
8006 	/* Take it off free list */
8007 	(xrip->_b)->_f = xrip->_f;
8008 	(xrip->_f)->_b = xrip->_b;
8009 	xrip->_f = NULL;
8010 	xrip->_b = NULL;
8011 	hba->sli.sli4.xrif_count--;
8012 
8013 	/* Add it to end of inuse list */
8014 	xrip->_b = hba->sli.sli4.XRIinuse_b;
8015 	hba->sli.sli4.XRIinuse_b->_f = xrip;
8016 	xrip->_f = (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
8017 	hba->sli.sli4.XRIinuse_b = xrip;
8018 	hba->sli.sli4.xria_count++;
8019 
8020 	mutex_exit(&EMLXS_FCTAB_LOCK);
8021 
8022 	return (xrip);
8023 
8024 } /* emlxs_sli4_alloc_xri() */
8025 
8026 
8027 /* EMLXS_FCTAB_LOCK must be held to enter */
8028 extern XRIobj_t *
emlxs_sli4_find_xri(emlxs_port_t * port,uint16_t xri)8029 emlxs_sli4_find_xri(emlxs_port_t *port, uint16_t xri)
8030 {
8031 	emlxs_hba_t *hba = HBA;
8032 	XRIobj_t	*xrip;
8033 
8034 	xrip = (XRIobj_t *)hba->sli.sli4.XRIinuse_f;
8035 	while (xrip != (XRIobj_t *)&hba->sli.sli4.XRIinuse_f) {
8036 		if ((xrip->state >= XRI_STATE_ALLOCATED) &&
8037 		    (xrip->XRI == xri)) {
8038 			return (xrip);
8039 		}
8040 		xrip = xrip->_f;
8041 	}
8042 
8043 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8044 	    "Unable to find XRI x%x", xri);
8045 
8046 	return (NULL);
8047 
8048 } /* emlxs_sli4_find_xri() */
8049 
8050 
8051 
8052 
8053 extern void
emlxs_sli4_free_xri(emlxs_port_t * port,emlxs_buf_t * sbp,XRIobj_t * xrip,uint8_t lock)8054 emlxs_sli4_free_xri(emlxs_port_t *port, emlxs_buf_t *sbp, XRIobj_t *xrip,
8055     uint8_t lock)
8056 {
8057 	emlxs_hba_t *hba = HBA;
8058 
8059 	if (lock) {
8060 		mutex_enter(&EMLXS_FCTAB_LOCK);
8061 	}
8062 
8063 	if (xrip) {
8064 		if (xrip->state == XRI_STATE_FREE) {
8065 			if (lock) {
8066 				mutex_exit(&EMLXS_FCTAB_LOCK);
8067 			}
8068 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8069 			    "Free XRI:%x, Already freed. type=%d",
8070 			    xrip->XRI, xrip->type);
8071 			return;
8072 		}
8073 
8074 		if (xrip->type == EMLXS_XRI_UNSOL_CT_TYPE) {
8075 			(void) emlxs_flush_ct_event(port, xrip->rx_id);
8076 		}
8077 
8078 		if (xrip->iotag &&
8079 		    (hba->fc_table[xrip->iotag] != NULL) &&
8080 		    (hba->fc_table[xrip->iotag] != STALE_PACKET)) {
8081 			hba->fc_table[xrip->iotag] = NULL;
8082 			hba->io_count--;
8083 		}
8084 
8085 		xrip->state = XRI_STATE_FREE;
8086 		xrip->type  = 0;
8087 		xrip->flag  = 0;
8088 
8089 		if (xrip->rpip) {
8090 			xrip->rpip->xri_count--;
8091 			xrip->rpip = NULL;
8092 		}
8093 
8094 		if (xrip->reserved_rpip) {
8095 			xrip->reserved_rpip->xri_count--;
8096 			xrip->reserved_rpip = NULL;
8097 		}
8098 
8099 		/* Take it off inuse list */
8100 		(xrip->_b)->_f = xrip->_f;
8101 		(xrip->_f)->_b = xrip->_b;
8102 		xrip->_f = NULL;
8103 		xrip->_b = NULL;
8104 		hba->sli.sli4.xria_count--;
8105 
8106 		/* Add it to end of free list */
8107 		xrip->_b = hba->sli.sli4.XRIfree_b;
8108 		hba->sli.sli4.XRIfree_b->_f = xrip;
8109 		xrip->_f = (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
8110 		hba->sli.sli4.XRIfree_b = xrip;
8111 		hba->sli.sli4.xrif_count++;
8112 	}
8113 
8114 	if (sbp) {
8115 		if (!(sbp->pkt_flags & PACKET_VALID) ||
8116 		    (sbp->pkt_flags &
8117 		    (PACKET_ULP_OWNED|PACKET_COMPLETED|PACKET_IN_COMPLETION))) {
8118 			if (lock) {
8119 				mutex_exit(&EMLXS_FCTAB_LOCK);
8120 			}
8121 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8122 			    "Free XRI: sbp invalid. sbp=%p flags=%x xri=%d",
8123 			    sbp, sbp->pkt_flags, ((xrip)? xrip->XRI:0));
8124 			return;
8125 		}
8126 
8127 		if (xrip && (xrip->iotag != sbp->iotag)) {
8128 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
8129 			    "sbp/iotag mismatch %p iotag:%d %d", sbp,
8130 			    sbp->iotag, xrip->iotag);
8131 		}
8132 
8133 		if (sbp->iotag) {
8134 			if (sbp == hba->fc_table[sbp->iotag]) {
8135 				hba->fc_table[sbp->iotag] = NULL;
8136 				hba->io_count--;
8137 
8138 				if (sbp->xrip) {
8139 					/* Exchange is still reserved */
8140 					sbp->xrip->flag |= EMLXS_XRI_RESERVED;
8141 				}
8142 			}
8143 			sbp->iotag = 0;
8144 		}
8145 
8146 		if (xrip) {
8147 			sbp->xrip = 0;
8148 		}
8149 
8150 		if (lock) {
8151 			mutex_exit(&EMLXS_FCTAB_LOCK);
8152 		}
8153 
8154 		/* Clean up the sbp */
8155 		mutex_enter(&sbp->mtx);
8156 
8157 		if (sbp->pkt_flags & PACKET_IN_TXQ) {
8158 			sbp->pkt_flags &= ~PACKET_IN_TXQ;
8159 			hba->channel_tx_count--;
8160 		}
8161 
8162 		if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
8163 			sbp->pkt_flags &= ~PACKET_IN_CHIPQ;
8164 		}
8165 
8166 		mutex_exit(&sbp->mtx);
8167 	} else {
8168 		if (lock) {
8169 			mutex_exit(&EMLXS_FCTAB_LOCK);
8170 		}
8171 	}
8172 
8173 } /* emlxs_sli4_free_xri() */
8174 
8175 
8176 static int
emlxs_sli4_post_sgl_pages(emlxs_hba_t * hba,MAILBOXQ * mbq)8177 emlxs_sli4_post_sgl_pages(emlxs_hba_t *hba, MAILBOXQ *mbq)
8178 {
8179 	MAILBOX4	*mb = (MAILBOX4 *)mbq;
8180 	emlxs_port_t	*port = &PPORT;
8181 	XRIobj_t	*xrip;
8182 	MATCHMAP	*mp;
8183 	mbox_req_hdr_t 	*hdr_req;
8184 	uint32_t	i;
8185 	uint32_t	cnt;
8186 	uint32_t	xri_cnt;
8187 	uint32_t	j;
8188 	uint32_t	size;
8189 	IOCTL_FCOE_CFG_POST_SGL_PAGES *post_sgl;
8190 
8191 	bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE);
8192 	mbq->bp = NULL;
8193 	mbq->mbox_cmpl = NULL;
8194 
8195 	if ((mp = emlxs_mem_buf_alloc(hba, EMLXS_MAX_NONEMBED_SIZE)) == 0) {
8196 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8197 		    "Unable to POST_SGL. Mailbox cmd=%x  ",
8198 		    mb->mbxCommand);
8199 		return (EIO);
8200 	}
8201 	mbq->nonembed = (void *)mp;
8202 
8203 	/*
8204 	 * Signifies a non embedded command
8205 	 */
8206 	mb->un.varSLIConfig.be.embedded = 0;
8207 	mb->mbxCommand = MBX_SLI_CONFIG;
8208 	mb->mbxOwner = OWN_HOST;
8209 
8210 	hdr_req = (mbox_req_hdr_t *)mp->virt;
8211 	post_sgl =
8212 	    (IOCTL_FCOE_CFG_POST_SGL_PAGES *)(hdr_req + 1);
8213 
8214 	xrip = hba->sli.sli4.XRIp;
8215 
8216 	/* For each extent */
8217 	for (j = 0; j < hba->sli.sli4.XRIExtCount; j++) {
8218 		cnt = hba->sli.sli4.XRIExtSize;
8219 		while (cnt) {
8220 			if (xrip->XRI == 0) {
8221 				cnt--;
8222 				xrip++;
8223 				continue;
8224 			}
8225 
8226 			bzero((void *) hdr_req, mp->size);
8227 			size = mp->size - IOCTL_HEADER_SZ;
8228 
8229 			mb->un.varSLIConfig.be.payload_length =
8230 			    mp->size;
8231 			mb->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
8232 			    IOCTL_SUBSYSTEM_FCOE;
8233 			mb->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
8234 			    FCOE_OPCODE_CFG_POST_SGL_PAGES;
8235 			mb->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
8236 			mb->un.varSLIConfig.be.un_hdr.hdr_req.req_length = size;
8237 
8238 			hdr_req->subsystem = IOCTL_SUBSYSTEM_FCOE;
8239 			hdr_req->opcode = FCOE_OPCODE_CFG_POST_SGL_PAGES;
8240 			hdr_req->timeout = 0;
8241 			hdr_req->req_length = size;
8242 
8243 			post_sgl->params.request.xri_count = 0;
8244 			post_sgl->params.request.xri_start = xrip->XRI;
8245 
8246 			xri_cnt = (size -
8247 			    sizeof (IOCTL_FCOE_CFG_POST_SGL_PAGES)) /
8248 			    sizeof (FCOE_SGL_PAGES);
8249 
8250 			for (i = 0; (i < xri_cnt) && cnt; i++) {
8251 				post_sgl->params.request.xri_count++;
8252 				post_sgl->params.request.pages[i].\
8253 				    sgl_page0.addrLow =
8254 				    PADDR_LO(xrip->SGList.phys);
8255 				post_sgl->params.request.pages[i].\
8256 				    sgl_page0.addrHigh =
8257 				    PADDR_HI(xrip->SGList.phys);
8258 
8259 				cnt--;
8260 				xrip++;
8261 			}
8262 
8263 			if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8264 			    MBX_SUCCESS) {
8265 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8266 				    "Unable to POST_SGL. Mailbox cmd=%x "
8267 				    "status=%x XRI cnt:%d start:%d",
8268 				    mb->mbxCommand, mb->mbxStatus,
8269 				    post_sgl->params.request.xri_count,
8270 				    post_sgl->params.request.xri_start);
8271 				emlxs_mem_buf_free(hba, mp);
8272 				mbq->nonembed = NULL;
8273 				return (EIO);
8274 			}
8275 		}
8276 	}
8277 
8278 	emlxs_mem_buf_free(hba, mp);
8279 	mbq->nonembed = NULL;
8280 	return (0);
8281 
8282 } /* emlxs_sli4_post_sgl_pages() */
8283 
8284 
8285 static int
emlxs_sli4_post_hdr_tmplates(emlxs_hba_t * hba,MAILBOXQ * mbq)8286 emlxs_sli4_post_hdr_tmplates(emlxs_hba_t *hba, MAILBOXQ *mbq)
8287 {
8288 	MAILBOX4	*mb = (MAILBOX4 *)mbq;
8289 	emlxs_port_t	*port = &PPORT;
8290 	uint32_t 	j;
8291 	uint32_t 	k;
8292 	uint64_t	addr;
8293 	IOCTL_FCOE_POST_HDR_TEMPLATES *post_hdr;
8294 	uint16_t	num_pages;
8295 
8296 	if (!(hba->sli.sli4.param.HDRR)) {
8297 		return (0);
8298 	}
8299 
8300 	/* Bytes per extent */
8301 	j = hba->sli.sli4.RPIExtSize * sizeof (RPIHdrTmplate_t);
8302 
8303 	/* Pages required per extent (page == 4096 bytes) */
8304 	num_pages = (j/4096) + ((j%4096)? 1:0);
8305 
8306 	addr = hba->sli.sli4.HeaderTmplate.phys;
8307 
8308 	/* For each extent */
8309 	for (j = 0; j < hba->sli.sli4.RPIExtCount; j++) {
8310 		bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE);
8311 		mbq->bp = NULL;
8312 		mbq->mbox_cmpl = NULL;
8313 
8314 		/*
8315 		 * Signifies an embedded command
8316 		 */
8317 		mb->un.varSLIConfig.be.embedded = 1;
8318 
8319 		mb->mbxCommand = MBX_SLI_CONFIG;
8320 		mb->mbxOwner = OWN_HOST;
8321 		mb->un.varSLIConfig.be.payload_length =
8322 		    sizeof (IOCTL_FCOE_POST_HDR_TEMPLATES) + IOCTL_HEADER_SZ;
8323 		mb->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
8324 		    IOCTL_SUBSYSTEM_FCOE;
8325 		mb->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
8326 		    FCOE_OPCODE_POST_HDR_TEMPLATES;
8327 		mb->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
8328 		mb->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
8329 		    sizeof (IOCTL_FCOE_POST_HDR_TEMPLATES);
8330 
8331 		post_hdr =
8332 		    (IOCTL_FCOE_POST_HDR_TEMPLATES *)
8333 		    &mb->un.varSLIConfig.payload;
8334 		post_hdr->params.request.num_pages = num_pages;
8335 		post_hdr->params.request.rpi_offset = hba->sli.sli4.RPIBase[j];
8336 
8337 		for (k = 0; k < num_pages; k++) {
8338 			post_hdr->params.request.pages[k].addrLow =
8339 			    PADDR_LO(addr);
8340 			post_hdr->params.request.pages[k].addrHigh =
8341 			    PADDR_HI(addr);
8342 			addr += 4096;
8343 		}
8344 
8345 		if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8346 		    MBX_SUCCESS) {
8347 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8348 			    "Unable to POST_HDR_TEMPLATES. Mailbox cmd=%x "
8349 			    "status=%x ",
8350 			    mb->mbxCommand, mb->mbxStatus);
8351 			return (EIO);
8352 		}
8353 		emlxs_data_dump(port, "POST_HDR", (uint32_t *)mb, 18, 0);
8354 	}
8355 
8356 	return (0);
8357 
8358 } /* emlxs_sli4_post_hdr_tmplates() */
8359 
8360 
8361 static int
emlxs_sli4_create_queues(emlxs_hba_t * hba,MAILBOXQ * mbq)8362 emlxs_sli4_create_queues(emlxs_hba_t *hba, MAILBOXQ *mbq)
8363 {
8364 	MAILBOX4	*mb = (MAILBOX4 *)mbq;
8365 	emlxs_port_t	*port = &PPORT;
8366 	emlxs_config_t	*cfg = &CFG;
8367 	IOCTL_COMMON_EQ_CREATE *eq;
8368 	IOCTL_COMMON_CQ_CREATE *cq;
8369 	IOCTL_FCOE_WQ_CREATE *wq;
8370 	IOCTL_FCOE_RQ_CREATE *rq;
8371 	IOCTL_COMMON_MQ_CREATE *mq;
8372 	IOCTL_COMMON_MQ_CREATE_EXT *mq_ext;
8373 	emlxs_rqdbu_t	rqdb;
8374 	uint16_t i, j;
8375 	uint16_t num_cq, total_cq;
8376 	uint16_t num_wq, total_wq;
8377 
8378 	/*
8379 	 * The first CQ is reserved for ASYNC events,
8380 	 * the second is reserved for unsol rcv, the rest
8381 	 * correspond to WQs. (WQ0 -> CQ2, WQ1 -> CQ3, ...)
8382 	 */
8383 
8384 	total_cq = 0;
8385 	total_wq = 0;
8386 
8387 	/* Create EQ's */
8388 	for (i = 0; i < hba->intr_count; i++) {
8389 		emlxs_mb_eq_create(hba, mbq, i);
8390 		if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8391 		    MBX_SUCCESS) {
8392 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
8393 			    "Unable to Create EQ %d: Mailbox cmd=%x status=%x ",
8394 			    i, mb->mbxCommand, mb->mbxStatus);
8395 			return (EIO);
8396 		}
8397 		eq = (IOCTL_COMMON_EQ_CREATE *)&mb->un.varSLIConfig.payload;
8398 		hba->sli.sli4.eq[i].qid = eq->params.response.EQId;
8399 		hba->sli.sli4.eq[i].lastwq = total_wq;
8400 		hba->sli.sli4.eq[i].msix_vector = i;
8401 
8402 		emlxs_data_dump(port, "EQ0_CREATE", (uint32_t *)mb, 18, 0);
8403 		num_wq = cfg[CFG_NUM_WQ].current;
8404 		num_cq = num_wq;
8405 		if (i == 0) {
8406 			/* One for RQ handling, one for mbox/event handling */
8407 			num_cq += EMLXS_CQ_OFFSET_WQ;
8408 		}
8409 
8410 		/* Create CQ's */
8411 		for (j = 0; j < num_cq; j++) {
8412 			/* Reuse mbq from previous mbox */
8413 			bzero(mbq, sizeof (MAILBOXQ));
8414 
8415 			hba->sli.sli4.cq[total_cq].eqid =
8416 			    hba->sli.sli4.eq[i].qid;
8417 
8418 			emlxs_mb_cq_create(hba, mbq, total_cq);
8419 			if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8420 			    MBX_SUCCESS) {
8421 				EMLXS_MSGF(EMLXS_CONTEXT,
8422 				    &emlxs_init_failed_msg, "Unable to Create "
8423 				    "CQ %d: Mailbox cmd=%x status=%x ",
8424 				    total_cq, mb->mbxCommand, mb->mbxStatus);
8425 				return (EIO);
8426 			}
8427 			cq = (IOCTL_COMMON_CQ_CREATE *)
8428 			    &mb->un.varSLIConfig.payload;
8429 			hba->sli.sli4.cq[total_cq].qid =
8430 			    cq->params.response.CQId;
8431 
8432 			switch (total_cq) {
8433 			case EMLXS_CQ_MBOX:
8434 				/* First CQ is for async event handling */
8435 				hba->sli.sli4.cq[total_cq].type =
8436 				    EMLXS_CQ_TYPE_GROUP1;
8437 				break;
8438 
8439 			case EMLXS_CQ_RCV:
8440 				/* Second CQ is for unsol receive handling */
8441 				hba->sli.sli4.cq[total_cq].type =
8442 				    EMLXS_CQ_TYPE_GROUP2;
8443 				break;
8444 
8445 			default:
8446 				/* Setup CQ to channel mapping */
8447 				hba->sli.sli4.cq[total_cq].type =
8448 				    EMLXS_CQ_TYPE_GROUP2;
8449 				hba->sli.sli4.cq[total_cq].channelp =
8450 				    &hba->chan[total_cq - EMLXS_CQ_OFFSET_WQ];
8451 				break;
8452 			}
8453 			emlxs_data_dump(port, "CQX_CREATE", (uint32_t *)mb,
8454 			    18, 0);
8455 			total_cq++;
8456 		}
8457 
8458 		/* Create WQ's */
8459 		for (j = 0; j < num_wq; j++) {
8460 			/* Reuse mbq from previous mbox */
8461 			bzero(mbq, sizeof (MAILBOXQ));
8462 
8463 			hba->sli.sli4.wq[total_wq].cqid =
8464 			    hba->sli.sli4.cq[total_wq + EMLXS_CQ_OFFSET_WQ].qid;
8465 
8466 			emlxs_mb_wq_create(hba, mbq, total_wq);
8467 			if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8468 			    MBX_SUCCESS) {
8469 				EMLXS_MSGF(EMLXS_CONTEXT,
8470 				    &emlxs_init_failed_msg, "Unable to Create "
8471 				    "WQ %d: Mailbox cmd=%x status=%x ",
8472 				    total_wq, mb->mbxCommand, mb->mbxStatus);
8473 				return (EIO);
8474 			}
8475 			wq = (IOCTL_FCOE_WQ_CREATE *)
8476 			    &mb->un.varSLIConfig.payload;
8477 			hba->sli.sli4.wq[total_wq].qid =
8478 			    wq->params.response.WQId;
8479 
8480 			hba->sli.sli4.wq[total_wq].cqid =
8481 			    hba->sli.sli4.cq[total_wq+EMLXS_CQ_OFFSET_WQ].qid;
8482 			emlxs_data_dump(port, "WQ_CREATE", (uint32_t *)mb,
8483 			    18, 0);
8484 			total_wq++;
8485 		}
8486 		hba->last_msiid = i;
8487 	}
8488 
8489 	/* We assume 1 RQ pair will handle ALL incoming data */
8490 	/* Create RQs */
8491 	for (i = 0; i < EMLXS_MAX_RQS; i++) {
8492 		/* Personalize the RQ */
8493 		switch (i) {
8494 		case 0:
8495 			hba->sli.sli4.rq[i].cqid =
8496 			    hba->sli.sli4.cq[EMLXS_CQ_RCV].qid;
8497 			break;
8498 		case 1:
8499 			hba->sli.sli4.rq[i].cqid =
8500 			    hba->sli.sli4.cq[EMLXS_CQ_RCV].qid;
8501 			break;
8502 		default:
8503 			hba->sli.sli4.rq[i].cqid = 0xffff;
8504 		}
8505 
8506 		/* Reuse mbq from previous mbox */
8507 		bzero(mbq, sizeof (MAILBOXQ));
8508 
8509 		emlxs_mb_rq_create(hba, mbq, i);
8510 		if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8511 		    MBX_SUCCESS) {
8512 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
8513 			    "Unable to Create RQ %d: Mailbox cmd=%x status=%x ",
8514 			    i, mb->mbxCommand, mb->mbxStatus);
8515 			return (EIO);
8516 		}
8517 
8518 		rq = (IOCTL_FCOE_RQ_CREATE *)&mb->un.varSLIConfig.payload;
8519 		hba->sli.sli4.rq[i].qid = rq->params.response.RQId;
8520 		emlxs_data_dump(port, "RQ CREATE", (uint32_t *)mb, 18, 0);
8521 
8522 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8523 		    "RQ CREATE: rq[%d].qid=%d cqid=%d",
8524 		    i, hba->sli.sli4.rq[i].qid, hba->sli.sli4.rq[i].cqid);
8525 
8526 		/* Initialize the host_index */
8527 		hba->sli.sli4.rq[i].host_index = 0;
8528 
8529 		/* If Data queue was just created, */
8530 		/* then post buffers using the header qid */
8531 		if ((i & 0x1)) {
8532 			/* Ring the RQ doorbell to post buffers */
8533 			rqdb.word = 0;
8534 			rqdb.db.Qid = hba->sli.sli4.rq[i-1].qid;
8535 			rqdb.db.NumPosted = RQB_COUNT;
8536 
8537 			emlxs_sli4_write_rqdb(hba, rqdb.word);
8538 
8539 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8540 			    "RQ CREATE: Doorbell rang: qid=%d count=%d",
8541 			    hba->sli.sli4.rq[i-1].qid, RQB_COUNT);
8542 		}
8543 	}
8544 
8545 	/* Create MQ */
8546 
8547 	/* Personalize the MQ */
8548 	hba->sli.sli4.mq.cqid = hba->sli.sli4.cq[EMLXS_CQ_MBOX].qid;
8549 
8550 	/* Reuse mbq from previous mbox */
8551 	bzero(mbq, sizeof (MAILBOXQ));
8552 
8553 	emlxs_mb_mq_create_ext(hba, mbq);
8554 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8555 	    MBX_SUCCESS) {
8556 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
8557 		    "Unable to Create MQ_EXT %d: Mailbox cmd=%x status=%x ",
8558 		    i, mb->mbxCommand, mb->mbxStatus);
8559 
8560 		/* Reuse mbq from previous mbox */
8561 		bzero(mbq, sizeof (MAILBOXQ));
8562 
8563 		emlxs_mb_mq_create(hba, mbq);
8564 		if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8565 		    MBX_SUCCESS) {
8566 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
8567 			    "Unable to Create MQ %d: Mailbox cmd=%x status=%x ",
8568 			    i, mb->mbxCommand, mb->mbxStatus);
8569 			return (EIO);
8570 		}
8571 
8572 		mq = (IOCTL_COMMON_MQ_CREATE *)&mb->un.varSLIConfig.payload;
8573 		hba->sli.sli4.mq.qid = mq->params.response.MQId;
8574 		return (0);
8575 	}
8576 
8577 	mq_ext = (IOCTL_COMMON_MQ_CREATE_EXT *)&mb->un.varSLIConfig.payload;
8578 	hba->sli.sli4.mq.qid = mq_ext->params.response.MQId;
8579 	return (0);
8580 
8581 } /* emlxs_sli4_create_queues() */
8582 
8583 
8584 extern void
emlxs_sli4_timer(emlxs_hba_t * hba)8585 emlxs_sli4_timer(emlxs_hba_t *hba)
8586 {
8587 	/* Perform SLI4 level timer checks */
8588 
8589 	emlxs_fcf_timer_notify(hba);
8590 
8591 	emlxs_sli4_timer_check_mbox(hba);
8592 
8593 	return;
8594 
8595 } /* emlxs_sli4_timer() */
8596 
8597 
8598 static void
emlxs_sli4_timer_check_mbox(emlxs_hba_t * hba)8599 emlxs_sli4_timer_check_mbox(emlxs_hba_t *hba)
8600 {
8601 	emlxs_port_t *port = &PPORT;
8602 	emlxs_config_t *cfg = &CFG;
8603 	MAILBOX *mb = NULL;
8604 
8605 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
8606 		return;
8607 	}
8608 
8609 	mutex_enter(&EMLXS_PORT_LOCK);
8610 
8611 	/* Return if timer hasn't expired */
8612 	if (!hba->mbox_timer || (hba->timer_tics < hba->mbox_timer)) {
8613 		mutex_exit(&EMLXS_PORT_LOCK);
8614 		return;
8615 	}
8616 
8617 	/* The first to service the mbox queue will clear the timer */
8618 	hba->mbox_timer = 0;
8619 
8620 	if (hba->mbox_queue_flag) {
8621 		if (hba->mbox_mbq) {
8622 			mb = (MAILBOX *)hba->mbox_mbq;
8623 		}
8624 	}
8625 
8626 	if (mb) {
8627 		switch (hba->mbox_queue_flag) {
8628 		case MBX_NOWAIT:
8629 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
8630 			    "%s: Nowait.",
8631 			    emlxs_mb_cmd_xlate(mb->mbxCommand));
8632 			break;
8633 
8634 		case MBX_SLEEP:
8635 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
8636 			    "%s: mb=%p Sleep.",
8637 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
8638 			    mb);
8639 			break;
8640 
8641 		case MBX_POLL:
8642 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
8643 			    "%s: mb=%p Polled.",
8644 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
8645 			    mb);
8646 			break;
8647 
8648 		default:
8649 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
8650 			    "%s: mb=%p (%d).",
8651 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
8652 			    mb, hba->mbox_queue_flag);
8653 			break;
8654 		}
8655 	} else {
8656 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg, NULL);
8657 	}
8658 
8659 	hba->flag |= FC_MBOX_TIMEOUT;
8660 	EMLXS_STATE_CHANGE_LOCKED(hba, FC_ERROR);
8661 
8662 	mutex_exit(&EMLXS_PORT_LOCK);
8663 
8664 	/* Perform mailbox cleanup */
8665 	/* This will wake any sleeping or polling threads */
8666 	emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
8667 
8668 	/* Trigger adapter shutdown */
8669 	emlxs_thread_spawn(hba, emlxs_shutdown_thread, 0, 0);
8670 
8671 	return;
8672 
8673 } /* emlxs_sli4_timer_check_mbox() */
8674 
8675 
8676 extern void
emlxs_data_dump(emlxs_port_t * port,char * str,uint32_t * iptr,int cnt,int err)8677 emlxs_data_dump(emlxs_port_t *port, char *str, uint32_t *iptr, int cnt, int err)
8678 {
8679 	void *msg;
8680 
8681 	if (!port || !str || !iptr || !cnt) {
8682 		return;
8683 	}
8684 
8685 	if (err) {
8686 		msg = &emlxs_sli_err_msg;
8687 	} else {
8688 		msg = &emlxs_sli_detail_msg;
8689 	}
8690 
8691 	if (cnt) {
8692 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
8693 		    "%s00: %08x %08x %08x %08x %08x %08x", str, *iptr,
8694 		    *(iptr+1), *(iptr+2), *(iptr+3), *(iptr+4), *(iptr+5));
8695 	}
8696 	if (cnt > 6) {
8697 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
8698 		    "%s06: %08x %08x %08x %08x %08x %08x", str, *(iptr+6),
8699 		    *(iptr+7), *(iptr+8), *(iptr+9), *(iptr+10), *(iptr+11));
8700 	}
8701 	if (cnt > 12) {
8702 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
8703 		    "%s12: %08x %08x %08x %08x %08x %08x", str, *(iptr+12),
8704 		    *(iptr+13), *(iptr+14), *(iptr+15), *(iptr+16), *(iptr+17));
8705 	}
8706 	if (cnt > 18) {
8707 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
8708 		    "%s18: %08x %08x %08x %08x %08x %08x", str, *(iptr+18),
8709 		    *(iptr+19), *(iptr+20), *(iptr+21), *(iptr+22), *(iptr+23));
8710 	}
8711 	if (cnt > 24) {
8712 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
8713 		    "%s24: %08x %08x %08x %08x %08x %08x", str, *(iptr+24),
8714 		    *(iptr+25), *(iptr+26), *(iptr+27), *(iptr+28), *(iptr+29));
8715 	}
8716 	if (cnt > 30) {
8717 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
8718 		    "%s30: %08x %08x %08x %08x %08x %08x", str, *(iptr+30),
8719 		    *(iptr+31), *(iptr+32), *(iptr+33), *(iptr+34), *(iptr+35));
8720 	}
8721 	if (cnt > 36) {
8722 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
8723 		    "%s36: %08x %08x %08x %08x %08x %08x", str, *(iptr+36),
8724 		    *(iptr+37), *(iptr+38), *(iptr+39), *(iptr+40), *(iptr+41));
8725 	}
8726 
8727 } /* emlxs_data_dump() */
8728 
8729 
8730 extern void
emlxs_ue_dump(emlxs_hba_t * hba,char * str)8731 emlxs_ue_dump(emlxs_hba_t *hba, char *str)
8732 {
8733 	emlxs_port_t *port = &PPORT;
8734 	uint32_t status;
8735 	uint32_t ue_h;
8736 	uint32_t ue_l;
8737 	uint32_t on1;
8738 	uint32_t on2;
8739 
8740 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
8741 	case SLI_INTF_IF_TYPE_0:
8742 		ue_l = ddi_get32(hba->pci_acc_handle,
8743 		    hba->sli.sli4.ERR1_reg_addr);
8744 		ue_h = ddi_get32(hba->pci_acc_handle,
8745 		    hba->sli.sli4.ERR2_reg_addr);
8746 
8747 		on1 = ddi_get32(hba->pci_acc_handle,
8748 		    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_ONLINE1));
8749 		on2 = ddi_get32(hba->pci_acc_handle,
8750 		    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_ONLINE2));
8751 
8752 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8753 		    "%s: ueLow:%08x ueHigh:%08x on1:%08x on2:%08x", str,
8754 		    ue_l, ue_h, on1, on2);
8755 		break;
8756 
8757 	case SLI_INTF_IF_TYPE_2:
8758 		status = ddi_get32(hba->sli.sli4.bar0_acc_handle,
8759 		    hba->sli.sli4.STATUS_reg_addr);
8760 
8761 		ue_l = ddi_get32(hba->sli.sli4.bar0_acc_handle,
8762 		    hba->sli.sli4.ERR1_reg_addr);
8763 		ue_h = ddi_get32(hba->sli.sli4.bar0_acc_handle,
8764 		    hba->sli.sli4.ERR2_reg_addr);
8765 
8766 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8767 		    "%s: status:%08x err1:%08x err2:%08x", str,
8768 		    status, ue_l, ue_h);
8769 
8770 		break;
8771 	}
8772 
8773 #ifdef FMA_SUPPORT
8774 	/* Access handle validation */
8775 	EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle);
8776 #endif  /* FMA_SUPPORT */
8777 
8778 } /* emlxs_ue_dump() */
8779 
8780 
8781 static void
emlxs_sli4_poll_erratt(emlxs_hba_t * hba)8782 emlxs_sli4_poll_erratt(emlxs_hba_t *hba)
8783 {
8784 	emlxs_port_t *port = &PPORT;
8785 	uint32_t status;
8786 	uint32_t ue_h;
8787 	uint32_t ue_l;
8788 	uint32_t error = 0;
8789 
8790 	if (hba->flag & FC_HARDWARE_ERROR) {
8791 		return;
8792 	}
8793 
8794 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
8795 	case SLI_INTF_IF_TYPE_0:
8796 		ue_l = ddi_get32(hba->pci_acc_handle,
8797 		    hba->sli.sli4.ERR1_reg_addr);
8798 		ue_h = ddi_get32(hba->pci_acc_handle,
8799 		    hba->sli.sli4.ERR2_reg_addr);
8800 
8801 		if ((~hba->sli.sli4.ue_mask_lo & ue_l) ||
8802 		    (~hba->sli.sli4.ue_mask_hi & ue_h) ||
8803 		    (hba->sli.sli4.flag & EMLXS_SLI4_HW_ERROR)) {
8804 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
8805 			    "Host Error: ueLow:%08x ueHigh:%08x maskLow:%08x "
8806 			    "maskHigh:%08x flag:%08x",
8807 			    ue_l, ue_h, hba->sli.sli4.ue_mask_lo,
8808 			    hba->sli.sli4.ue_mask_hi, hba->sli.sli4.flag);
8809 
8810 			error = 2;
8811 		}
8812 		break;
8813 
8814 	case SLI_INTF_IF_TYPE_2:
8815 		status = ddi_get32(hba->sli.sli4.bar0_acc_handle,
8816 		    hba->sli.sli4.STATUS_reg_addr);
8817 
8818 		if ((status & SLI_STATUS_ERROR) ||
8819 		    (hba->sli.sli4.flag & EMLXS_SLI4_HW_ERROR)) {
8820 			ue_l = ddi_get32(hba->sli.sli4.bar0_acc_handle,
8821 			    hba->sli.sli4.ERR1_reg_addr);
8822 			ue_h = ddi_get32(hba->sli.sli4.bar0_acc_handle,
8823 			    hba->sli.sli4.ERR2_reg_addr);
8824 
8825 			error = (status & SLI_STATUS_RESET_NEEDED)? 1:2;
8826 
8827 			if (error == 1) {
8828 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
8829 				    "Host Error: status:%08x err1:%08x "
8830 				    "err2:%08x flag:%08x",
8831 				    status, ue_l, ue_h, hba->sli.sli4.flag);
8832 			} else {
8833 				EMLXS_MSGF(EMLXS_CONTEXT,
8834 				    &emlxs_hardware_error_msg,
8835 				    "Host Error: status:%08x err1:%08x "
8836 				    "err2:%08x flag:%08x",
8837 				    status, ue_l, ue_h, hba->sli.sli4.flag);
8838 			}
8839 		}
8840 		break;
8841 	}
8842 
8843 	if (error == 2) {
8844 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
8845 
8846 		emlxs_sli4_hba_flush_chipq(hba);
8847 
8848 		emlxs_thread_spawn(hba, emlxs_shutdown_thread, 0, 0);
8849 
8850 	} else if (error == 1) {
8851 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
8852 
8853 		emlxs_sli4_hba_flush_chipq(hba);
8854 
8855 		emlxs_thread_spawn(hba, emlxs_restart_thread, 0, 0);
8856 	}
8857 
8858 #ifdef FMA_SUPPORT
8859 	/* Access handle validation */
8860 	EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle);
8861 #endif  /* FMA_SUPPORT */
8862 
8863 } /* emlxs_sli4_poll_erratt() */
8864 
8865 
8866 static uint32_t
emlxs_sli4_reg_did(emlxs_port_t * port,uint32_t did,SERV_PARM * param,emlxs_buf_t * sbp,fc_unsol_buf_t * ubp,IOCBQ * iocbq)8867 emlxs_sli4_reg_did(emlxs_port_t *port, uint32_t did, SERV_PARM *param,
8868     emlxs_buf_t *sbp, fc_unsol_buf_t *ubp, IOCBQ *iocbq)
8869 {
8870 	emlxs_hba_t	*hba = HBA;
8871 	NODELIST	*node;
8872 	RPIobj_t	*rpip;
8873 	uint32_t	rval;
8874 
8875 	/* Check for invalid node ids to register */
8876 	if ((did == 0) && (!(hba->flag & FC_LOOPBACK_MODE))) {
8877 		return (1);
8878 	}
8879 
8880 	if (did & 0xff000000) {
8881 		return (1);
8882 	}
8883 
8884 	/* We don't register our own did */
8885 	if ((did == port->did) && (!(hba->flag & FC_LOOPBACK_MODE))) {
8886 		return (1);
8887 	}
8888 
8889 	if (did != FABRIC_DID) {
8890 		if ((rval = emlxs_mb_check_sparm(hba, param))) {
8891 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg,
8892 			    "Invalid service parameters. did=%06x rval=%d", did,
8893 			    rval);
8894 
8895 			return (1);
8896 		}
8897 	}
8898 
8899 	/* Check if the node limit has been reached */
8900 	if (port->node_count >= hba->max_nodes) {
8901 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg,
8902 		    "Limit reached. did=%06x count=%d", did,
8903 		    port->node_count);
8904 
8905 		return (1);
8906 	}
8907 
8908 	node = emlxs_node_find_did(port, did, 1);
8909 	rpip = EMLXS_NODE_TO_RPI(port, node);
8910 
8911 	rval = emlxs_rpi_online_notify(port, rpip, did, param, (void *)sbp,
8912 	    (void *)ubp, (void *)iocbq);
8913 
8914 	return (rval);
8915 
8916 } /* emlxs_sli4_reg_did() */
8917 
8918 
8919 static uint32_t
emlxs_sli4_unreg_node(emlxs_port_t * port,emlxs_node_t * node,emlxs_buf_t * sbp,fc_unsol_buf_t * ubp,IOCBQ * iocbq)8920 emlxs_sli4_unreg_node(emlxs_port_t *port, emlxs_node_t *node,
8921     emlxs_buf_t *sbp, fc_unsol_buf_t *ubp, IOCBQ *iocbq)
8922 {
8923 	RPIobj_t	*rpip;
8924 	uint32_t	rval;
8925 
8926 	if (!node) {
8927 		/* Unreg all nodes */
8928 		(void) emlxs_sli4_unreg_all_nodes(port);
8929 		return (1);
8930 	}
8931 
8932 	/* Check for base node */
8933 	if (node == &port->node_base) {
8934 		/* Just flush base node */
8935 		(void) emlxs_tx_node_flush(port, &port->node_base,
8936 		    0, 0, 0);
8937 
8938 		(void) emlxs_chipq_node_flush(port, 0,
8939 		    &port->node_base, 0);
8940 
8941 		port->did = 0;
8942 
8943 		/* Return now */
8944 		return (1);
8945 	}
8946 
8947 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8948 	    "unreg_node:%p did=%x rpi=%d",
8949 	    node, node->nlp_DID, node->nlp_Rpi);
8950 
8951 	rpip = EMLXS_NODE_TO_RPI(port, node);
8952 
8953 	if (!rpip) {
8954 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8955 		    "unreg_node:%p did=%x rpi=%d. RPI not found.",
8956 		    node, node->nlp_DID, node->nlp_Rpi);
8957 
8958 		emlxs_node_rm(port, node);
8959 		return (1);
8960 	}
8961 
8962 	rval = emlxs_rpi_offline_notify(port, rpip, (void *)sbp, (void *)ubp,
8963 	    (void *)iocbq);
8964 
8965 	return (rval);
8966 
8967 } /* emlxs_sli4_unreg_node() */
8968 
8969 
8970 extern uint32_t
emlxs_sli4_unreg_all_nodes(emlxs_port_t * port)8971 emlxs_sli4_unreg_all_nodes(emlxs_port_t *port)
8972 {
8973 	NODELIST	*nlp;
8974 	int		i;
8975 	uint32_t 	found;
8976 
8977 	/* Set the node tags */
8978 	/* We will process all nodes with this tag */
8979 	rw_enter(&port->node_rwlock, RW_READER);
8980 	found = 0;
8981 	for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
8982 		nlp = port->node_table[i];
8983 		while (nlp != NULL) {
8984 			found = 1;
8985 			nlp->nlp_tag = 1;
8986 			nlp = nlp->nlp_list_next;
8987 		}
8988 	}
8989 	rw_exit(&port->node_rwlock);
8990 
8991 	if (!found) {
8992 		return (0);
8993 	}
8994 
8995 	for (;;) {
8996 		rw_enter(&port->node_rwlock, RW_READER);
8997 		found = 0;
8998 		for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
8999 			nlp = port->node_table[i];
9000 			while (nlp != NULL) {
9001 				if (!nlp->nlp_tag) {
9002 					nlp = nlp->nlp_list_next;
9003 					continue;
9004 				}
9005 				nlp->nlp_tag = 0;
9006 				found = 1;
9007 				break;
9008 			}
9009 
9010 			if (found) {
9011 				break;
9012 			}
9013 		}
9014 		rw_exit(&port->node_rwlock);
9015 
9016 		if (!found) {
9017 			break;
9018 		}
9019 
9020 		(void) emlxs_sli4_unreg_node(port, nlp, 0, 0, 0);
9021 	}
9022 
9023 	return (0);
9024 
9025 } /* emlxs_sli4_unreg_all_nodes() */
9026 
9027 
9028 static void
emlxs_sli4_handle_fcoe_link_event(emlxs_hba_t * hba,CQE_ASYNC_t * cqe)9029 emlxs_sli4_handle_fcoe_link_event(emlxs_hba_t *hba, CQE_ASYNC_t *cqe)
9030 {
9031 	emlxs_port_t *port = &PPORT;
9032 
9033 	/* Handle link down */
9034 	if ((cqe->un.link.link_status == ASYNC_EVENT_LOGICAL_LINK_DOWN) ||
9035 	    (cqe->un.link.link_status == ASYNC_EVENT_PHYS_LINK_DOWN)) {
9036 		(void) emlxs_fcf_linkdown_notify(port);
9037 
9038 		mutex_enter(&EMLXS_PORT_LOCK);
9039 		hba->sli.sli4.flag &= ~EMLXS_SLI4_DOWN_LINK;
9040 		mutex_exit(&EMLXS_PORT_LOCK);
9041 		return;
9042 	}
9043 
9044 	/* Link is up */
9045 
9046 	/* Set linkspeed */
9047 	switch (cqe->un.link.port_speed) {
9048 	case PHY_1GHZ_LINK:
9049 		hba->linkspeed = LA_1GHZ_LINK;
9050 		break;
9051 	case PHY_10GHZ_LINK:
9052 		hba->linkspeed = LA_10GHZ_LINK;
9053 		break;
9054 	default:
9055 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
9056 		    "sli4_handle_fcoe_link_event: Unknown link speed=%x.",
9057 		    cqe->un.link.port_speed);
9058 		hba->linkspeed = 0;
9059 		break;
9060 	}
9061 
9062 	/* Set qos_linkspeed */
9063 	hba->qos_linkspeed = cqe->un.link.qos_link_speed;
9064 
9065 	/* Set topology */
9066 	hba->topology = TOPOLOGY_PT_PT;
9067 
9068 	mutex_enter(&EMLXS_PORT_LOCK);
9069 	hba->sli.sli4.flag &= ~EMLXS_SLI4_DOWN_LINK;
9070 	mutex_exit(&EMLXS_PORT_LOCK);
9071 
9072 	(void) emlxs_fcf_linkup_notify(port);
9073 
9074 	return;
9075 
9076 } /* emlxs_sli4_handle_fcoe_link_event()  */
9077 
9078 
9079 static void
emlxs_sli4_handle_fc_link_att(emlxs_hba_t * hba,CQE_ASYNC_t * cqe)9080 emlxs_sli4_handle_fc_link_att(emlxs_hba_t *hba, CQE_ASYNC_t *cqe)
9081 {
9082 	emlxs_port_t *port = &PPORT;
9083 
9084 	/* Handle link down */
9085 	if (cqe->un.fc.att_type == ATT_TYPE_LINK_DOWN) {
9086 		(void) emlxs_fcf_linkdown_notify(port);
9087 
9088 		mutex_enter(&EMLXS_PORT_LOCK);
9089 		hba->sli.sli4.flag &= ~EMLXS_SLI4_DOWN_LINK;
9090 		mutex_exit(&EMLXS_PORT_LOCK);
9091 		return;
9092 	}
9093 
9094 	/* Link is up */
9095 
9096 	/* Set linkspeed */
9097 	switch (cqe->un.fc.port_speed) {
9098 	case 1:
9099 		hba->linkspeed = LA_1GHZ_LINK;
9100 		break;
9101 	case 2:
9102 		hba->linkspeed = LA_2GHZ_LINK;
9103 		break;
9104 	case 4:
9105 		hba->linkspeed = LA_4GHZ_LINK;
9106 		break;
9107 	case 8:
9108 		hba->linkspeed = LA_8GHZ_LINK;
9109 		break;
9110 	case 10:
9111 		hba->linkspeed = LA_10GHZ_LINK;
9112 		break;
9113 	case 16:
9114 		hba->linkspeed = LA_16GHZ_LINK;
9115 		break;
9116 	default:
9117 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
9118 		    "sli4_handle_fc_link_att: Unknown link speed=%x.",
9119 		    cqe->un.fc.port_speed);
9120 		hba->linkspeed = 0;
9121 		break;
9122 	}
9123 
9124 	/* Set qos_linkspeed */
9125 	hba->qos_linkspeed = cqe->un.fc.link_speed;
9126 
9127 	/* Set topology */
9128 	hba->topology = cqe->un.fc.topology;
9129 
9130 	mutex_enter(&EMLXS_PORT_LOCK);
9131 	hba->sli.sli4.flag &= ~EMLXS_SLI4_DOWN_LINK;
9132 	mutex_exit(&EMLXS_PORT_LOCK);
9133 
9134 	(void) emlxs_fcf_linkup_notify(port);
9135 
9136 	return;
9137 
9138 } /* emlxs_sli4_handle_fc_link_att() */
9139 
9140 
9141 static int
emlxs_sli4_init_extents(emlxs_hba_t * hba,MAILBOXQ * mbq)9142 emlxs_sli4_init_extents(emlxs_hba_t *hba, MAILBOXQ *mbq)
9143 {
9144 	emlxs_port_t *port = &PPORT;
9145 	MAILBOX4 *mb4;
9146 	IOCTL_COMMON_EXTENTS *ep;
9147 	uint32_t i;
9148 	uint32_t ExtentCnt;
9149 
9150 	if (!(hba->sli.sli4.param.EXT)) {
9151 		return (0);
9152 	}
9153 
9154 	mb4 = (MAILBOX4 *) mbq;
9155 
9156 	/* Discover XRI Extents */
9157 	bzero(mbq, sizeof (MAILBOXQ));
9158 	emlxs_mb_get_extents_info(hba, mbq, RSC_TYPE_FCOE_XRI);
9159 
9160 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9161 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9162 		    "Unable to discover XRI extents.  Mailbox cmd=%x status=%x",
9163 		    mb4->mbxCommand, mb4->mbxStatus);
9164 
9165 		return (EIO);
9166 	}
9167 
9168 	ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9169 	hba->sli.sli4.XRIExtSize = ep->params.response.ExtentSize;
9170 	ExtentCnt = ep->params.response.ExtentCnt;
9171 
9172 	/* Allocate XRI Extents */
9173 	bzero(mbq, sizeof (MAILBOXQ));
9174 	emlxs_mb_alloc_extents(hba, mbq, RSC_TYPE_FCOE_XRI, ExtentCnt);
9175 
9176 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9177 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9178 		    "Unable to allocate XRI extents.  Mailbox cmd=%x status=%x",
9179 		    mb4->mbxCommand, mb4->mbxStatus);
9180 
9181 		return (EIO);
9182 	}
9183 	ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9184 
9185 	bcopy((uint8_t *)ep->params.response.RscId,
9186 	    (uint8_t *)hba->sli.sli4.XRIBase,
9187 	    (ep->params.response.ExtentCnt * sizeof (uint16_t)));
9188 
9189 	hba->sli.sli4.XRIExtCount = ep->params.response.ExtentCnt;
9190 	hba->sli.sli4.XRICount = hba->sli.sli4.XRIExtCount *
9191 	    hba->sli.sli4.XRIExtSize;
9192 
9193 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9194 	    "XRI Ext: size=%d cnt=%d/%d",
9195 	    hba->sli.sli4.XRIExtSize,
9196 	    hba->sli.sli4.XRIExtCount, ExtentCnt);
9197 
9198 	for (i = 0; i < ep->params.response.ExtentCnt; i += 4) {
9199 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9200 		    "XRI Ext%d: %d, %d, %d, %d", i,
9201 		    hba->sli.sli4.XRIBase[i],
9202 		    hba->sli.sli4.XRIBase[i+1],
9203 		    hba->sli.sli4.XRIBase[i+2],
9204 		    hba->sli.sli4.XRIBase[i+3]);
9205 	}
9206 
9207 
9208 	/* Discover RPI Extents */
9209 	bzero(mbq, sizeof (MAILBOXQ));
9210 	emlxs_mb_get_extents_info(hba, mbq, RSC_TYPE_FCOE_RPI);
9211 
9212 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9213 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9214 		    "Unable to discover RPI extents.  Mailbox cmd=%x status=%x",
9215 		    mb4->mbxCommand, mb4->mbxStatus);
9216 
9217 		return (EIO);
9218 	}
9219 
9220 	ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9221 	hba->sli.sli4.RPIExtSize = ep->params.response.ExtentSize;
9222 	ExtentCnt = ep->params.response.ExtentCnt;
9223 
9224 	/* Allocate RPI Extents */
9225 	bzero(mbq, sizeof (MAILBOXQ));
9226 	emlxs_mb_alloc_extents(hba, mbq, RSC_TYPE_FCOE_RPI, ExtentCnt);
9227 
9228 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9229 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9230 		    "Unable to allocate RPI extents.  Mailbox cmd=%x status=%x",
9231 		    mb4->mbxCommand, mb4->mbxStatus);
9232 
9233 		return (EIO);
9234 	}
9235 	ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9236 
9237 	bcopy((uint8_t *)ep->params.response.RscId,
9238 	    (uint8_t *)hba->sli.sli4.RPIBase,
9239 	    (ep->params.response.ExtentCnt * sizeof (uint16_t)));
9240 
9241 	hba->sli.sli4.RPIExtCount = ep->params.response.ExtentCnt;
9242 	hba->sli.sli4.RPICount = hba->sli.sli4.RPIExtCount *
9243 	    hba->sli.sli4.RPIExtSize;
9244 
9245 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9246 	    "RPI Ext: size=%d cnt=%d/%d",
9247 	    hba->sli.sli4.RPIExtSize,
9248 	    hba->sli.sli4.RPIExtCount, ExtentCnt);
9249 
9250 	for (i = 0; i < ep->params.response.ExtentCnt; i += 4) {
9251 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9252 		    "RPI Ext%d: %d, %d, %d, %d", i,
9253 		    hba->sli.sli4.RPIBase[i],
9254 		    hba->sli.sli4.RPIBase[i+1],
9255 		    hba->sli.sli4.RPIBase[i+2],
9256 		    hba->sli.sli4.RPIBase[i+3]);
9257 	}
9258 
9259 
9260 	/* Discover VPI Extents */
9261 	bzero(mbq, sizeof (MAILBOXQ));
9262 	emlxs_mb_get_extents_info(hba, mbq, RSC_TYPE_FCOE_VPI);
9263 
9264 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9265 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9266 		    "Unable to discover VPI extents.  Mailbox cmd=%x status=%x",
9267 		    mb4->mbxCommand, mb4->mbxStatus);
9268 
9269 		return (EIO);
9270 	}
9271 
9272 	ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9273 	hba->sli.sli4.VPIExtSize = ep->params.response.ExtentSize;
9274 	ExtentCnt = ep->params.response.ExtentCnt;
9275 
9276 	/* Allocate VPI Extents */
9277 	bzero(mbq, sizeof (MAILBOXQ));
9278 	emlxs_mb_alloc_extents(hba, mbq, RSC_TYPE_FCOE_VPI, ExtentCnt);
9279 
9280 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9281 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9282 		    "Unable to allocate VPI extents.  Mailbox cmd=%x status=%x",
9283 		    mb4->mbxCommand, mb4->mbxStatus);
9284 
9285 		return (EIO);
9286 	}
9287 	ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9288 
9289 	bcopy((uint8_t *)ep->params.response.RscId,
9290 	    (uint8_t *)hba->sli.sli4.VPIBase,
9291 	    (ep->params.response.ExtentCnt * sizeof (uint16_t)));
9292 
9293 	hba->sli.sli4.VPIExtCount = ep->params.response.ExtentCnt;
9294 	hba->sli.sli4.VPICount = hba->sli.sli4.VPIExtCount *
9295 	    hba->sli.sli4.VPIExtSize;
9296 
9297 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9298 	    "VPI Ext: size=%d cnt=%d/%d",
9299 	    hba->sli.sli4.VPIExtSize,
9300 	    hba->sli.sli4.VPIExtCount, ExtentCnt);
9301 
9302 	for (i = 0; i < ep->params.response.ExtentCnt; i += 4) {
9303 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9304 		    "VPI Ext%d: %d, %d, %d, %d", i,
9305 		    hba->sli.sli4.VPIBase[i],
9306 		    hba->sli.sli4.VPIBase[i+1],
9307 		    hba->sli.sli4.VPIBase[i+2],
9308 		    hba->sli.sli4.VPIBase[i+3]);
9309 	}
9310 
9311 	/* Discover VFI Extents */
9312 	bzero(mbq, sizeof (MAILBOXQ));
9313 	emlxs_mb_get_extents_info(hba, mbq, RSC_TYPE_FCOE_VFI);
9314 
9315 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9316 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9317 		    "Unable to discover VFI extents.  Mailbox cmd=%x status=%x",
9318 		    mb4->mbxCommand, mb4->mbxStatus);
9319 
9320 		return (EIO);
9321 	}
9322 
9323 	ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9324 	hba->sli.sli4.VFIExtSize = ep->params.response.ExtentSize;
9325 	ExtentCnt = ep->params.response.ExtentCnt;
9326 
9327 	/* Allocate VFI Extents */
9328 	bzero(mbq, sizeof (MAILBOXQ));
9329 	emlxs_mb_alloc_extents(hba, mbq, RSC_TYPE_FCOE_VFI, ExtentCnt);
9330 
9331 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9332 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9333 		    "Unable to allocate VFI extents.  Mailbox cmd=%x status=%x",
9334 		    mb4->mbxCommand, mb4->mbxStatus);
9335 
9336 		return (EIO);
9337 	}
9338 	ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9339 
9340 	bcopy((uint8_t *)ep->params.response.RscId,
9341 	    (uint8_t *)hba->sli.sli4.VFIBase,
9342 	    (ep->params.response.ExtentCnt * sizeof (uint16_t)));
9343 
9344 	hba->sli.sli4.VFIExtCount = ep->params.response.ExtentCnt;
9345 	hba->sli.sli4.VFICount = hba->sli.sli4.VFIExtCount *
9346 	    hba->sli.sli4.VFIExtSize;
9347 
9348 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9349 	    "VFI Ext: size=%d cnt=%d/%d",
9350 	    hba->sli.sli4.VFIExtSize,
9351 	    hba->sli.sli4.VFIExtCount, ExtentCnt);
9352 
9353 	for (i = 0; i < ep->params.response.ExtentCnt; i += 4) {
9354 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9355 		    "VFI Ext%d: %d, %d, %d, %d", i,
9356 		    hba->sli.sli4.VFIBase[i],
9357 		    hba->sli.sli4.VFIBase[i+1],
9358 		    hba->sli.sli4.VFIBase[i+2],
9359 		    hba->sli.sli4.VFIBase[i+3]);
9360 	}
9361 
9362 	return (0);
9363 
9364 } /* emlxs_sli4_init_extents() */
9365 
9366 
9367 extern uint32_t
emlxs_sli4_index_to_rpi(emlxs_hba_t * hba,uint32_t index)9368 emlxs_sli4_index_to_rpi(emlxs_hba_t *hba, uint32_t index)
9369 {
9370 	uint32_t i;
9371 	uint32_t j;
9372 	uint32_t rpi;
9373 
9374 	i = index / hba->sli.sli4.RPIExtSize;
9375 	j = index % hba->sli.sli4.RPIExtSize;
9376 	rpi = hba->sli.sli4.RPIBase[i] + j;
9377 
9378 	return (rpi);
9379 
9380 } /* emlxs_sli4_index_to_rpi */
9381 
9382 
9383 extern uint32_t
emlxs_sli4_rpi_to_index(emlxs_hba_t * hba,uint32_t rpi)9384 emlxs_sli4_rpi_to_index(emlxs_hba_t *hba, uint32_t rpi)
9385 {
9386 	uint32_t i;
9387 	uint32_t lo;
9388 	uint32_t hi;
9389 	uint32_t index = hba->sli.sli4.RPICount;
9390 
9391 	for (i = 0; i < hba->sli.sli4.RPIExtCount; i++) {
9392 		lo = hba->sli.sli4.RPIBase[i];
9393 		hi = lo + hba->sli.sli4.RPIExtSize;
9394 
9395 		if ((rpi < hi) && (rpi >= lo)) {
9396 			index = (i * hba->sli.sli4.RPIExtSize) + (rpi - lo);
9397 			break;
9398 		}
9399 	}
9400 
9401 	return (index);
9402 
9403 } /* emlxs_sli4_rpi_to_index */
9404 
9405 
9406 extern uint32_t
emlxs_sli4_index_to_xri(emlxs_hba_t * hba,uint32_t index)9407 emlxs_sli4_index_to_xri(emlxs_hba_t *hba, uint32_t index)
9408 {
9409 	uint32_t i;
9410 	uint32_t j;
9411 	uint32_t xri;
9412 
9413 	i = index / hba->sli.sli4.XRIExtSize;
9414 	j = index % hba->sli.sli4.XRIExtSize;
9415 	xri = hba->sli.sli4.XRIBase[i] + j;
9416 
9417 	return (xri);
9418 
9419 } /* emlxs_sli4_index_to_xri */
9420 
9421 
9422 
9423 
9424 extern uint32_t
emlxs_sli4_index_to_vpi(emlxs_hba_t * hba,uint32_t index)9425 emlxs_sli4_index_to_vpi(emlxs_hba_t *hba, uint32_t index)
9426 {
9427 	uint32_t i;
9428 	uint32_t j;
9429 	uint32_t vpi;
9430 
9431 	i = index / hba->sli.sli4.VPIExtSize;
9432 	j = index % hba->sli.sli4.VPIExtSize;
9433 	vpi = hba->sli.sli4.VPIBase[i] + j;
9434 
9435 	return (vpi);
9436 
9437 } /* emlxs_sli4_index_to_vpi */
9438 
9439 
9440 extern uint32_t
emlxs_sli4_vpi_to_index(emlxs_hba_t * hba,uint32_t vpi)9441 emlxs_sli4_vpi_to_index(emlxs_hba_t *hba, uint32_t vpi)
9442 {
9443 	uint32_t i;
9444 	uint32_t lo;
9445 	uint32_t hi;
9446 	uint32_t index = hba->sli.sli4.VPICount;
9447 
9448 	for (i = 0; i < hba->sli.sli4.VPIExtCount; i++) {
9449 		lo = hba->sli.sli4.VPIBase[i];
9450 		hi = lo + hba->sli.sli4.VPIExtSize;
9451 
9452 		if ((vpi < hi) && (vpi >= lo)) {
9453 			index = (i * hba->sli.sli4.VPIExtSize) + (vpi - lo);
9454 			break;
9455 		}
9456 	}
9457 
9458 	return (index);
9459 
9460 } /* emlxs_sli4_vpi_to_index */
9461 
9462 
9463 
9464 
9465 extern uint32_t
emlxs_sli4_index_to_vfi(emlxs_hba_t * hba,uint32_t index)9466 emlxs_sli4_index_to_vfi(emlxs_hba_t *hba, uint32_t index)
9467 {
9468 	uint32_t i;
9469 	uint32_t j;
9470 	uint32_t vfi;
9471 
9472 	i = index / hba->sli.sli4.VFIExtSize;
9473 	j = index % hba->sli.sli4.VFIExtSize;
9474 	vfi = hba->sli.sli4.VFIBase[i] + j;
9475 
9476 	return (vfi);
9477 
9478 } /* emlxs_sli4_index_to_vfi */
9479 
9480 
9481 static uint16_t
emlxs_sli4_rqid_to_index(emlxs_hba_t * hba,uint16_t rqid)9482 emlxs_sli4_rqid_to_index(emlxs_hba_t *hba, uint16_t rqid)
9483 {
9484 	uint16_t i;
9485 
9486 	if (rqid < 0xffff) {
9487 		for (i = 0; i < EMLXS_MAX_RQS; i++) {
9488 			if (hba->sli.sli4.rq[i].qid == rqid) {
9489 				return (i);
9490 			}
9491 		}
9492 	}
9493 
9494 	return (0xffff);
9495 
9496 } /* emlxs_sli4_rqid_to_index */
9497 
9498 
9499 static uint16_t
emlxs_sli4_wqid_to_index(emlxs_hba_t * hba,uint16_t wqid)9500 emlxs_sli4_wqid_to_index(emlxs_hba_t *hba, uint16_t wqid)
9501 {
9502 	uint16_t i;
9503 
9504 	if (wqid < 0xffff) {
9505 		for (i = 0; i < EMLXS_MAX_WQS; i++) {
9506 			if (hba->sli.sli4.wq[i].qid == wqid) {
9507 				return (i);
9508 			}
9509 		}
9510 	}
9511 
9512 	return (0xffff);
9513 
9514 } /* emlxs_sli4_wqid_to_index */
9515 
9516 
9517 static uint16_t
emlxs_sli4_cqid_to_index(emlxs_hba_t * hba,uint16_t cqid)9518 emlxs_sli4_cqid_to_index(emlxs_hba_t *hba, uint16_t cqid)
9519 {
9520 	uint16_t i;
9521 
9522 	if (cqid < 0xffff) {
9523 		for (i = 0; i < EMLXS_MAX_CQS; i++) {
9524 			if (hba->sli.sli4.cq[i].qid == cqid) {
9525 				return (i);
9526 			}
9527 		}
9528 	}
9529 
9530 	return (0xffff);
9531 
9532 } /* emlxs_sli4_cqid_to_index */
9533