xref: /illumos-gate/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_sli4.c (revision 33efde4275d24731ef87927237b0ffb0630b6b2d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at
9  * http://www.opensource.org/licenses/cddl1.txt.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2004-2012 Emulex. All rights reserved.
24  * Use is subject to license terms.
25  * Copyright 2018 OmniOS Community Edition (OmniOSce) Association.
26  * Copyright 2020 RackTop Systems, Inc.
27  */
28 
29 #include <emlxs.h>
30 
31 
32 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
33 EMLXS_MSG_DEF(EMLXS_SLI4_C);
34 
35 static int		emlxs_sli4_init_extents(emlxs_hba_t *hba,
36 				MAILBOXQ *mbq);
37 static uint32_t		emlxs_sli4_read_status(emlxs_hba_t *hba);
38 
39 static int		emlxs_init_bootstrap_mb(emlxs_hba_t *hba);
40 
41 static uint32_t		emlxs_sli4_read_sema(emlxs_hba_t *hba);
42 
43 static uint32_t		emlxs_sli4_read_mbdb(emlxs_hba_t *hba);
44 
45 static void		emlxs_sli4_write_mbdb(emlxs_hba_t *hba, uint64_t phys,
46 				boolean_t high);
47 
48 static void		emlxs_sli4_write_wqdb(emlxs_hba_t *hba, uint16_t qid,
49 				uint_t posted, uint_t index);
50 
51 static void		emlxs_sli4_write_mqdb(emlxs_hba_t *hba, uint16_t qid,
52 				uint_t count);
53 
54 static void		emlxs_sli4_write_rqdb(emlxs_hba_t *hba, uint16_t qid,
55 				uint_t count);
56 
57 static void		emlxs_sli4_write_eqdb(emlxs_hba_t *hba, uint16_t qid,
58 				uint32_t count, boolean_t arm);
59 static void		emlxs_sli4_write_cqdb(emlxs_hba_t *hba, uint16_t qid,
60 				uint32_t count, boolean_t arm);
61 
62 static int		emlxs_sli4_create_queues(emlxs_hba_t *hba,
63 				MAILBOXQ *mbq);
64 static int		emlxs_sli4_post_hdr_tmplates(emlxs_hba_t *hba,
65 				MAILBOXQ *mbq);
66 static int		emlxs_sli4_post_sgl_pages(emlxs_hba_t *hba,
67 				MAILBOXQ *mbq);
68 
69 static int		emlxs_sli4_read_eq(emlxs_hba_t *hba, EQ_DESC_t *eq);
70 
71 static int		emlxs_sli4_map_hdw(emlxs_hba_t *hba);
72 
73 static void		emlxs_sli4_unmap_hdw(emlxs_hba_t *hba);
74 
75 static int32_t		emlxs_sli4_online(emlxs_hba_t *hba);
76 
77 static void		emlxs_sli4_offline(emlxs_hba_t *hba,
78 				uint32_t reset_requested);
79 
80 static uint32_t		emlxs_sli4_hba_reset(emlxs_hba_t *hba, uint32_t restart,
81 				uint32_t skip_post, uint32_t quiesce);
82 static void		emlxs_sli4_hba_kill(emlxs_hba_t *hba);
83 
84 static uint32_t		emlxs_sli4_hba_init(emlxs_hba_t *hba);
85 
86 static uint32_t		emlxs_sli4_bde_setup(emlxs_port_t *port,
87 				emlxs_buf_t *sbp);
88 
89 static void		emlxs_sli4_issue_iocb_cmd(emlxs_hba_t *hba,
90 				CHANNEL *cp, IOCBQ *iocb_cmd);
91 static uint32_t		emlxs_sli4_issue_mbox_cmd(emlxs_hba_t *hba,
92 				MAILBOXQ *mbq, int32_t flg, uint32_t tmo);
93 static uint32_t		emlxs_sli4_issue_mbox_cmd4quiesce(emlxs_hba_t *hba,
94 				MAILBOXQ *mbq, int32_t flg, uint32_t tmo);
95 #ifdef SFCT_SUPPORT
96 static uint32_t		emlxs_sli4_prep_fct_iocb(emlxs_port_t *port,
97 				emlxs_buf_t *cmd_sbp, int channel);
98 static uint32_t		emlxs_sli4_fct_bde_setup(emlxs_port_t *port,
99 				emlxs_buf_t *sbp);
100 #endif /* SFCT_SUPPORT */
101 
102 static uint32_t		emlxs_sli4_prep_fcp_iocb(emlxs_port_t *port,
103 				emlxs_buf_t *sbp, int ring);
104 static uint32_t		emlxs_sli4_prep_ip_iocb(emlxs_port_t *port,
105 				emlxs_buf_t *sbp);
106 static uint32_t		emlxs_sli4_prep_els_iocb(emlxs_port_t *port,
107 				emlxs_buf_t *sbp);
108 static uint32_t		emlxs_sli4_prep_ct_iocb(emlxs_port_t *port,
109 				emlxs_buf_t *sbp);
110 static void		emlxs_sli4_poll_intr(emlxs_hba_t *hba);
111 static int32_t		emlxs_sli4_intx_intr(char *arg);
112 
113 #ifdef MSI_SUPPORT
114 static uint32_t		emlxs_sli4_msi_intr(char *arg1, char *arg2);
115 #endif /* MSI_SUPPORT */
116 
117 static void		emlxs_sli4_resource_free(emlxs_hba_t *hba);
118 
119 static int		emlxs_sli4_resource_alloc(emlxs_hba_t *hba);
120 extern void		emlxs_sli4_zero_queue_stat(emlxs_hba_t *hba);
121 
122 static XRIobj_t		*emlxs_sli4_alloc_xri(emlxs_port_t *port,
123 				emlxs_buf_t *sbp, RPIobj_t *rpip,
124 				uint32_t type);
125 static void		emlxs_sli4_enable_intr(emlxs_hba_t *hba);
126 
127 static void		emlxs_sli4_disable_intr(emlxs_hba_t *hba, uint32_t att);
128 
129 static void		emlxs_sli4_timer(emlxs_hba_t *hba);
130 
131 static void		emlxs_sli4_timer_check_mbox(emlxs_hba_t *hba);
132 
133 static void		emlxs_sli4_gpio_timer_start(emlxs_hba_t *hba);
134 
135 static void		emlxs_sli4_gpio_timer_stop(emlxs_hba_t *hba);
136 
137 static void		emlxs_sli4_gpio_timer(void *arg);
138 
139 static void		emlxs_sli4_check_gpio(emlxs_hba_t *hba);
140 
141 static uint32_t	emlxs_sli4_fix_gpio(emlxs_hba_t *hba,
142 					uint8_t *pin, uint8_t *pinval);
143 
144 static uint32_t	emlxs_sli4_fix_gpio_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq);
145 
146 static void		emlxs_sli4_poll_erratt(emlxs_hba_t *hba);
147 
148 extern XRIobj_t		*emlxs_sli4_reserve_xri(emlxs_port_t *port,
149 				RPIobj_t *rpip, uint32_t type, uint16_t rx_id);
150 static int		emlxs_check_hdw_ready(emlxs_hba_t *);
151 
152 static uint32_t		emlxs_sli4_reg_did(emlxs_port_t *port,
153 				uint32_t did, SERV_PARM *param,
154 				emlxs_buf_t *sbp, fc_unsol_buf_t *ubp,
155 				IOCBQ *iocbq);
156 
157 static uint32_t		emlxs_sli4_unreg_node(emlxs_port_t *port,
158 				emlxs_node_t *node, emlxs_buf_t *sbp,
159 				fc_unsol_buf_t *ubp, IOCBQ *iocbq);
160 
161 static void		emlxs_sli4_handle_fc_link_att(emlxs_hba_t *hba,
162 				CQE_ASYNC_t *cqe);
163 static void		emlxs_sli4_handle_fcoe_link_event(emlxs_hba_t *hba,
164 				CQE_ASYNC_t *cqe);
165 
166 
167 static uint16_t		emlxs_sli4_rqid_to_index(emlxs_hba_t *hba,
168 				uint16_t rqid);
169 static uint16_t		emlxs_sli4_wqid_to_index(emlxs_hba_t *hba,
170 				uint16_t wqid);
171 static uint16_t		emlxs_sli4_cqid_to_index(emlxs_hba_t *hba,
172 				uint16_t cqid);
173 
174 /* Define SLI4 API functions */
175 emlxs_sli_api_t emlxs_sli4_api = {
176 	emlxs_sli4_map_hdw,
177 	emlxs_sli4_unmap_hdw,
178 	emlxs_sli4_online,
179 	emlxs_sli4_offline,
180 	emlxs_sli4_hba_reset,
181 	emlxs_sli4_hba_kill,
182 	emlxs_sli4_issue_iocb_cmd,
183 	emlxs_sli4_issue_mbox_cmd,
184 #ifdef SFCT_SUPPORT
185 	emlxs_sli4_prep_fct_iocb,
186 #else
187 	NULL,
188 #endif /* SFCT_SUPPORT */
189 	emlxs_sli4_prep_fcp_iocb,
190 	emlxs_sli4_prep_ip_iocb,
191 	emlxs_sli4_prep_els_iocb,
192 	emlxs_sli4_prep_ct_iocb,
193 	emlxs_sli4_poll_intr,
194 	emlxs_sli4_intx_intr,
195 	emlxs_sli4_msi_intr,
196 	emlxs_sli4_disable_intr,
197 	emlxs_sli4_timer,
198 	emlxs_sli4_poll_erratt,
199 	emlxs_sli4_reg_did,
200 	emlxs_sli4_unreg_node
201 };
202 
203 
204 /* ************************************************************************** */
205 
206 static void
emlxs_sli4_set_default_params(emlxs_hba_t * hba)207 emlxs_sli4_set_default_params(emlxs_hba_t *hba)
208 {
209 	emlxs_port_t *port = &PPORT;
210 
211 	bzero((char *)&hba->sli.sli4.param, sizeof (sli_params_t));
212 
213 	hba->sli.sli4.param.ProtocolType = 0x3; /* FC/FCoE */
214 
215 	hba->sli.sli4.param.SliHint2 = 0;
216 	hba->sli.sli4.param.SliHint1 = 0;
217 	hba->sli.sli4.param.IfType = 0;
218 	hba->sli.sli4.param.SliFamily = 0;
219 	hba->sli.sli4.param.Revision = 0x4; /* SLI4 */
220 	hba->sli.sli4.param.FT = 0;
221 
222 	hba->sli.sli4.param.EqeCntMethod = 0x1; /* Bit pattern */
223 	hba->sli.sli4.param.EqPageSize = 0x1; /* 4096 */
224 	hba->sli.sli4.param.EqeSize = 0x1; /* 4 byte */
225 	hba->sli.sli4.param.EqPageCnt = 8;
226 	hba->sli.sli4.param.EqeCntMask = 0x1F; /* 256-4096 elements */
227 
228 	hba->sli.sli4.param.CqeCntMethod = 0x1; /* Bit pattern */
229 	hba->sli.sli4.param.CqPageSize = 0x1; /* 4096 */
230 	hba->sli.sli4.param.CQV = 0;
231 	hba->sli.sli4.param.CqeSize = 0x3; /* 16 byte */
232 	hba->sli.sli4.param.CqPageCnt = 4;
233 	hba->sli.sli4.param.CqeCntMask = 0x70; /* 256-1024 elements */
234 
235 	hba->sli.sli4.param.MqeCntMethod = 0x1; /* Bit pattern */
236 	hba->sli.sli4.param.MqPageSize = 0x1; /* 4096 */
237 	hba->sli.sli4.param.MQV = 0;
238 	hba->sli.sli4.param.MqPageCnt = 8;
239 	hba->sli.sli4.param.MqeCntMask = 0x0F; /* 16-128 elements */
240 
241 	hba->sli.sli4.param.WqeCntMethod = 0; /* Page Count */
242 	hba->sli.sli4.param.WqPageSize = 0x1; /* 4096 */
243 	hba->sli.sli4.param.WQV = 0;
244 	hba->sli.sli4.param.WqeSize = 0x5; /* 64 byte */
245 	hba->sli.sli4.param.WqPageCnt = 4;
246 	hba->sli.sli4.param.WqeCntMask = 0x10; /* 256 elements */
247 
248 	hba->sli.sli4.param.RqeCntMethod = 0; /* Page Count */
249 	hba->sli.sli4.param.RqPageSize = 0x1; /* 4096 */
250 	hba->sli.sli4.param.RQV = 0;
251 	hba->sli.sli4.param.RqeSize = 0x2; /* 8 byte */
252 	hba->sli.sli4.param.RqPageCnt = 8;
253 	hba->sli.sli4.param.RqDbWin = 1;
254 	hba->sli.sli4.param.RqeCntMask = 0x100; /* 4096 elements */
255 
256 	hba->sli.sli4.param.Loopback = 0xf; /* unsupported */
257 	hba->sli.sli4.param.PHWQ = 0;
258 	hba->sli.sli4.param.PHON = 0;
259 	hba->sli.sli4.param.TRIR = 0;
260 	hba->sli.sli4.param.TRTY = 0;
261 	hba->sli.sli4.param.TCCA = 0;
262 	hba->sli.sli4.param.MWQE = 0;
263 	hba->sli.sli4.param.ASSI = 0;
264 	hba->sli.sli4.param.TERP = 0;
265 	hba->sli.sli4.param.TGT  = 0;
266 	hba->sli.sli4.param.AREG = 0;
267 	hba->sli.sli4.param.FBRR = 0;
268 	hba->sli.sli4.param.SGLR = 1;
269 	hba->sli.sli4.param.HDRR = 1;
270 	hba->sli.sli4.param.EXT  = 0;
271 	hba->sli.sli4.param.FCOE = 1;
272 
273 	hba->sli.sli4.param.SgeLength = (64 * 1024);
274 	hba->sli.sli4.param.SglAlign = 0x7 /* 4096 */;
275 	hba->sli.sli4.param.SglPageSize = 0x1; /* 4096 */
276 	hba->sli.sli4.param.SglPageCnt = 2;
277 
278 	hba->sli.sli4.param.MinRqSize = 128;
279 	hba->sli.sli4.param.MaxRqSize = 2048;
280 
281 	hba->sli.sli4.param.RPIMax = 0x3ff;
282 	hba->sli.sli4.param.XRIMax = 0x3ff;
283 	hba->sli.sli4.param.VFIMax = 0xff;
284 	hba->sli.sli4.param.VPIMax = 0xff;
285 
286 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
287 	    "Default SLI4 parameters set.");
288 
289 } /* emlxs_sli4_set_default_params() */
290 
291 
292 /*
293  * emlxs_sli4_online()
294  *
295  * This routine will start initialization of the SLI4 HBA.
296  */
297 static int32_t
emlxs_sli4_online(emlxs_hba_t * hba)298 emlxs_sli4_online(emlxs_hba_t *hba)
299 {
300 	emlxs_port_t *port = &PPORT;
301 	emlxs_config_t *cfg;
302 	emlxs_vpd_t *vpd;
303 	MAILBOXQ *mbq = NULL;
304 	MAILBOX4 *mb  = NULL;
305 	MATCHMAP *mp  = NULL;
306 	uint32_t i;
307 	uint32_t j;
308 	uint32_t rval = 0;
309 	uint8_t *vpd_data;
310 	uint32_t sli_mode;
311 	uint8_t *outptr;
312 	uint32_t status;
313 	uint32_t fw_check;
314 	uint32_t kern_update = 0;
315 	emlxs_firmware_t hba_fw;
316 	emlxs_firmware_t *fw;
317 	uint16_t ssvid;
318 	char buf[64];
319 
320 	cfg = &CFG;
321 	vpd = &VPD;
322 
323 	sli_mode = EMLXS_HBA_SLI4_MODE;
324 	hba->sli_mode = sli_mode;
325 
326 	/* Set the fw_check flag */
327 	fw_check = cfg[CFG_FW_CHECK].current;
328 
329 	if ((fw_check & 0x04) ||
330 	    (hba->fw_flag & FW_UPDATE_KERNEL)) {
331 		kern_update = 1;
332 	}
333 
334 	hba->mbox_queue_flag = 0;
335 	hba->fc_edtov = FF_DEF_EDTOV;
336 	hba->fc_ratov = FF_DEF_RATOV;
337 	hba->fc_altov = FF_DEF_ALTOV;
338 	hba->fc_arbtov = FF_DEF_ARBTOV;
339 
340 	/* Networking not supported */
341 	if (cfg[CFG_NETWORK_ON].current) {
342 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
343 		    "Networking is not supported in SLI4, turning it off");
344 		cfg[CFG_NETWORK_ON].current = 0;
345 	}
346 
347 	hba->chan_count = hba->intr_count * cfg[CFG_NUM_WQ].current;
348 	if (hba->chan_count > MAX_CHANNEL) {
349 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
350 		    "Max channels exceeded, dropping num-wq from %d to 1",
351 		    cfg[CFG_NUM_WQ].current);
352 		cfg[CFG_NUM_WQ].current = 1;
353 		hba->chan_count = hba->intr_count * cfg[CFG_NUM_WQ].current;
354 	}
355 	hba->channel_fcp = 0; /* First channel */
356 
357 	/* Default channel for everything else is the last channel */
358 	hba->channel_ip = hba->chan_count - 1;
359 	hba->channel_els = hba->chan_count - 1;
360 	hba->channel_ct = hba->chan_count - 1;
361 
362 	hba->fc_iotag = 1;
363 	hba->io_count = 0;
364 	hba->channel_tx_count = 0;
365 
366 	/* Specific to ATTO G5 boards */
367 	if (hba->model_info.flags & EMLXS_GPIO_LEDS) {
368 		/* Set hard-coded GPIO pins */
369 		if (hba->pci_function_number) {
370 			hba->gpio_pin[EMLXS_GPIO_PIN_LO] = 27;
371 			hba->gpio_pin[EMLXS_GPIO_PIN_HI] = 28;
372 			hba->gpio_pin[EMLXS_GPIO_PIN_ACT] = 29;
373 			hba->gpio_pin[EMLXS_GPIO_PIN_LASER] = 8;
374 		} else {
375 			hba->gpio_pin[EMLXS_GPIO_PIN_LO] = 13;
376 			hba->gpio_pin[EMLXS_GPIO_PIN_HI] = 25;
377 			hba->gpio_pin[EMLXS_GPIO_PIN_ACT] = 26;
378 			hba->gpio_pin[EMLXS_GPIO_PIN_LASER] = 12;
379 		}
380 	}
381 
382 	/* Initialize the local dump region buffer */
383 	bzero(&hba->sli.sli4.dump_region, sizeof (MBUF_INFO));
384 	hba->sli.sli4.dump_region.size = EMLXS_DUMP_REGION_SIZE;
385 	hba->sli.sli4.dump_region.flags = FC_MBUF_DMA | FC_MBUF_SNGLSG;
386 	hba->sli.sli4.dump_region.align = ddi_ptob(hba->dip, 1L);
387 
388 	(void) emlxs_mem_alloc(hba, &hba->sli.sli4.dump_region);
389 
390 	if (hba->sli.sli4.dump_region.virt == NULL) {
391 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
392 		    "Unable to allocate dump region buffer.");
393 
394 		return (ENOMEM);
395 	}
396 
397 	/*
398 	 * Get a buffer which will be used repeatedly for mailbox commands
399 	 */
400 	mbq = (MAILBOXQ *) kmem_zalloc((sizeof (MAILBOXQ)), KM_SLEEP);
401 
402 	mb = (MAILBOX4 *)mbq;
403 
404 reset:
405 	/* Reset & Initialize the adapter */
406 	if (emlxs_sli4_hba_init(hba)) {
407 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
408 		    "Unable to init hba.");
409 
410 		rval = EIO;
411 		goto failed1;
412 	}
413 
414 #ifdef FMA_SUPPORT
415 	/* Access handle validation */
416 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
417 	case SLI_INTF_IF_TYPE_6:
418 		if ((emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
419 		    != DDI_FM_OK) ||
420 		    (emlxs_fm_check_acc_handle(hba,
421 		    hba->sli.sli4.bar0_acc_handle) != DDI_FM_OK) ||
422 		    (emlxs_fm_check_acc_handle(hba,
423 		    hba->sli.sli4.bar1_acc_handle) != DDI_FM_OK)) {
424 			EMLXS_MSGF(EMLXS_CONTEXT,
425 			    &emlxs_invalid_access_handle_msg, NULL);
426 
427 			rval = EIO;
428 			goto failed1;
429 		}
430 		break;
431 	case SLI_INTF_IF_TYPE_2:
432 		if ((emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
433 		    != DDI_FM_OK) ||
434 		    (emlxs_fm_check_acc_handle(hba,
435 		    hba->sli.sli4.bar0_acc_handle) != DDI_FM_OK)) {
436 			EMLXS_MSGF(EMLXS_CONTEXT,
437 			    &emlxs_invalid_access_handle_msg, NULL);
438 
439 			rval = EIO;
440 			goto failed1;
441 		}
442 		break;
443 	default :
444 		if ((emlxs_fm_check_acc_handle(hba,
445 		    hba->pci_acc_handle) != DDI_FM_OK) ||
446 		    (emlxs_fm_check_acc_handle(hba,
447 		    hba->sli.sli4.bar1_acc_handle) != DDI_FM_OK) ||
448 		    (emlxs_fm_check_acc_handle(hba,
449 		    hba->sli.sli4.bar2_acc_handle) != DDI_FM_OK)) {
450 			EMLXS_MSGF(EMLXS_CONTEXT,
451 			    &emlxs_invalid_access_handle_msg, NULL);
452 
453 			rval = EIO;
454 			goto failed1;
455 		}
456 		break;
457 	}
458 #endif	/* FMA_SUPPORT */
459 
460 	/*
461 	 * Setup and issue mailbox READ REV command
462 	 */
463 	vpd->opFwRev = 0;
464 	vpd->postKernRev = 0;
465 	vpd->sli1FwRev = 0;
466 	vpd->sli2FwRev = 0;
467 	vpd->sli3FwRev = 0;
468 	vpd->sli4FwRev = 0;
469 
470 	vpd->postKernName[0] = 0;
471 	vpd->opFwName[0] = 0;
472 	vpd->sli1FwName[0] = 0;
473 	vpd->sli2FwName[0] = 0;
474 	vpd->sli3FwName[0] = 0;
475 	vpd->sli4FwName[0] = 0;
476 
477 	vpd->opFwLabel[0] = 0;
478 	vpd->sli1FwLabel[0] = 0;
479 	vpd->sli2FwLabel[0] = 0;
480 	vpd->sli3FwLabel[0] = 0;
481 	vpd->sli4FwLabel[0] = 0;
482 
483 	EMLXS_STATE_CHANGE(hba, FC_INIT_REV);
484 
485 	emlxs_mb_get_sli4_params(hba, mbq);
486 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
487 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
488 		    "Unable to read parameters. Mailbox cmd=%x status=%x",
489 		    mb->mbxCommand, mb->mbxStatus);
490 
491 		/* Set param defaults */
492 		emlxs_sli4_set_default_params(hba);
493 
494 	} else {
495 		/* Save parameters */
496 		bcopy((char *)&mb->un.varSLIConfig.payload,
497 		    (char *)&hba->sli.sli4.param, sizeof (sli_params_t));
498 
499 		emlxs_data_dump(port, "SLI_PARMS",
500 		    (uint32_t *)&hba->sli.sli4.param,
501 		    sizeof (sli_params_t), 0);
502 	}
503 
504 	/* Reuse mbq from previous mbox */
505 	bzero(mbq, sizeof (MAILBOXQ));
506 
507 	emlxs_mb_get_port_name(hba, mbq);
508 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
509 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
510 		    "Unable to get port names. Mailbox cmd=%x status=%x",
511 		    mb->mbxCommand, mb->mbxStatus);
512 
513 		bzero(hba->sli.sli4.port_name,
514 		    sizeof (hba->sli.sli4.port_name));
515 	} else {
516 		/* Save port names */
517 		bcopy((char *)&mb->un.varSLIConfig.payload,
518 		    (char *)&hba->sli.sli4.port_name,
519 		    sizeof (hba->sli.sli4.port_name));
520 	}
521 
522 	/* Reuse mbq from previous mbox */
523 	bzero(mbq, sizeof (MAILBOXQ));
524 
525 	emlxs_mb_read_rev(hba, mbq, 0);
526 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
527 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
528 		    "Unable to read rev. Mailbox cmd=%x status=%x",
529 		    mb->mbxCommand, mb->mbxStatus);
530 
531 		rval = EIO;
532 		goto failed1;
533 
534 	}
535 
536 	emlxs_data_dump(port, "RD_REV", (uint32_t *)mb, 18, 0);
537 	if (mb->un.varRdRev4.sliLevel != 4) {
538 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
539 		    "Invalid read rev Version for SLI4: 0x%x",
540 		    mb->un.varRdRev4.sliLevel);
541 
542 		rval = EIO;
543 		goto failed1;
544 	}
545 
546 	switch (mb->un.varRdRev4.dcbxMode) {
547 	case EMLXS_DCBX_MODE_CIN:	/* Mapped to nonFIP mode */
548 		hba->flag &= ~FC_FIP_SUPPORTED;
549 		break;
550 
551 	case EMLXS_DCBX_MODE_CEE:	/* Mapped to FIP mode */
552 		hba->flag |= FC_FIP_SUPPORTED;
553 		break;
554 
555 	default:
556 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
557 		    "Invalid read rev dcbx mode for SLI4: 0x%x",
558 		    mb->un.varRdRev4.dcbxMode);
559 
560 		rval = EIO;
561 		goto failed1;
562 	}
563 
564 	/* Set FC/FCoE mode */
565 	if (mb->un.varRdRev4.FCoE) {
566 		hba->sli.sli4.flag |= EMLXS_SLI4_FCOE_MODE;
567 	} else {
568 		hba->sli.sli4.flag &= ~EMLXS_SLI4_FCOE_MODE;
569 	}
570 
571 	/* Save information as VPD data */
572 	vpd->rBit = 1;
573 
574 	vpd->sli4FwRev = (mb->un.varRdRev4.ULPFwId);
575 	bcopy((char *)mb->un.varRdRev4.ULPFwName, vpd->sli4FwName, 16);
576 
577 	vpd->opFwRev = (mb->un.varRdRev4.ULPFwId);
578 	bcopy((char *)mb->un.varRdRev4.ULPFwName, vpd->opFwName, 16);
579 
580 	vpd->postKernRev = (mb->un.varRdRev4.ARMFwId);
581 	bcopy((char *)mb->un.varRdRev4.ARMFwName, vpd->postKernName, 16);
582 
583 	vpd->biuRev = mb->un.varRdRev4.HwRev1;
584 	vpd->fcphHigh = mb->un.varRdRev4.fcphHigh;
585 	vpd->fcphLow = mb->un.varRdRev4.fcphLow;
586 	vpd->feaLevelHigh = mb->un.varRdRev4.feaLevelHigh;
587 	vpd->feaLevelLow = mb->un.varRdRev4.feaLevelLow;
588 
589 	/* Decode FW labels */
590 	if ((hba->model_info.chip & EMLXS_LANCER_CHIPS) != 0) {
591 		bcopy(vpd->postKernName, vpd->sli4FwName, 16);
592 	}
593 	emlxs_decode_label(vpd->sli4FwName, vpd->sli4FwName, 0,
594 	    sizeof (vpd->sli4FwName));
595 	emlxs_decode_label(vpd->opFwName, vpd->opFwName, 0,
596 	    sizeof (vpd->opFwName));
597 	emlxs_decode_label(vpd->postKernName, vpd->postKernName, 0,
598 	    sizeof (vpd->postKernName));
599 
600 	if (hba->model_info.chip == EMLXS_BE2_CHIP) {
601 		(void) strlcpy(vpd->sli4FwLabel, "be2.ufi",
602 		    sizeof (vpd->sli4FwLabel));
603 	} else if (hba->model_info.chip == EMLXS_BE3_CHIP) {
604 		(void) strlcpy(vpd->sli4FwLabel, "be3.ufi",
605 		    sizeof (vpd->sli4FwLabel));
606 	} else if (hba->model_info.chip == EMLXS_LANCER_CHIP) {
607 		(void) strlcpy(vpd->sli4FwLabel, "xe201.grp",
608 		    sizeof (vpd->sli4FwLabel));
609 	} else if (hba->model_info.chip == EMLXS_LANCERG6_CHIP) {
610 		(void) strlcpy(vpd->sli4FwLabel, "xe501.grp",
611 		    sizeof (vpd->sli4FwLabel));
612 	} else if (hba->model_info.chip == EMLXS_PRISMG7_CHIP) {
613 		(void) strlcpy(vpd->sli4FwLabel, "xe601.grp",
614 		    sizeof (vpd->sli4FwLabel));
615 	} else {
616 		(void) strlcpy(vpd->sli4FwLabel, "sli4.fw",
617 		    sizeof (vpd->sli4FwLabel));
618 	}
619 
620 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
621 	    "VPD ULP:%08x %s ARM:%08x %s f:%d %d %d %d : dcbx %d",
622 	    vpd->opFwRev, vpd->opFwName, vpd->postKernRev, vpd->postKernName,
623 	    vpd->fcphHigh, vpd->fcphLow, vpd->feaLevelHigh, vpd->feaLevelLow,
624 	    mb->un.varRdRev4.dcbxMode);
625 
626 	/* No key information is needed for SLI4 products */
627 
628 	/* Get adapter VPD information */
629 	vpd->port_index = (uint32_t)-1;
630 
631 	/* Reuse mbq from previous mbox */
632 	bzero(mbq, sizeof (MAILBOXQ));
633 
634 	emlxs_mb_dump_vpd(hba, mbq, 0);
635 	vpd_data = hba->sli.sli4.dump_region.virt;
636 
637 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
638 	    MBX_SUCCESS) {
639 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
640 		    "No VPD found. status=%x", mb->mbxStatus);
641 	} else {
642 		EMLXS_MSGF(EMLXS_CONTEXT,
643 		    &emlxs_init_debug_msg,
644 		    "VPD dumped. rsp_cnt=%d status=%x",
645 		    mb->un.varDmp4.rsp_cnt, mb->mbxStatus);
646 
647 		if (mb->un.varDmp4.rsp_cnt) {
648 			EMLXS_MPDATA_SYNC(hba->sli.sli4.dump_region.dma_handle,
649 			    0, mb->un.varDmp4.rsp_cnt, DDI_DMA_SYNC_FORKERNEL);
650 
651 #ifdef FMA_SUPPORT
652 			if (hba->sli.sli4.dump_region.dma_handle) {
653 				if (emlxs_fm_check_dma_handle(hba,
654 				    hba->sli.sli4.dump_region.dma_handle)
655 				    != DDI_FM_OK) {
656 					EMLXS_MSGF(EMLXS_CONTEXT,
657 					    &emlxs_invalid_dma_handle_msg,
658 					    "sli4_online: hdl=%p",
659 					    hba->sli.sli4.dump_region.
660 					    dma_handle);
661 					rval = EIO;
662 					goto failed1;
663 				}
664 			}
665 #endif /* FMA_SUPPORT */
666 
667 		}
668 	}
669 
670 	if (vpd_data[0]) {
671 		(void) emlxs_parse_vpd(hba, (uint8_t *)vpd_data,
672 		    mb->un.varDmp4.rsp_cnt);
673 
674 		/*
675 		 * If there is a VPD part number, and it does not
676 		 * match the current default HBA model info,
677 		 * replace the default data with an entry that
678 		 * does match.
679 		 *
680 		 * After emlxs_parse_vpd model holds the VPD value
681 		 * for V2 and part_num hold the value for PN. These
682 		 * 2 values are NOT necessarily the same.
683 		 */
684 
685 		rval = 0;
686 		if ((vpd->model[0] != 0) &&
687 		    (strcmp(&vpd->model[0], hba->model_info.model) != 0)) {
688 
689 			/* First scan for a V2 match */
690 
691 			for (i = 1; i < emlxs_pci_model_count; i++) {
692 				if (strcmp(&vpd->model[0],
693 				    emlxs_pci_model[i].model) == 0) {
694 					bcopy(&emlxs_pci_model[i],
695 					    &hba->model_info,
696 					    sizeof (emlxs_model_t));
697 					rval = 1;
698 					break;
699 				}
700 			}
701 		}
702 
703 		if (!rval && (vpd->part_num[0] != 0) &&
704 		    (strcmp(&vpd->part_num[0], hba->model_info.model) != 0)) {
705 
706 			/* Next scan for a PN match */
707 
708 			for (i = 1; i < emlxs_pci_model_count; i++) {
709 				if (strcmp(&vpd->part_num[0],
710 				    emlxs_pci_model[i].model) == 0) {
711 					bcopy(&emlxs_pci_model[i],
712 					    &hba->model_info,
713 					    sizeof (emlxs_model_t));
714 					break;
715 				}
716 			}
717 		}
718 
719 		/* HP CNA port indices start at 1 instead of 0 */
720 		if (hba->model_info.chip & EMLXS_BE_CHIPS) {
721 			ssvid = ddi_get16(hba->pci_acc_handle,
722 			    (uint16_t *)(hba->pci_addr + PCI_SSVID_REGISTER));
723 
724 			if ((ssvid == PCI_SSVID_HP) && (vpd->port_index > 0)) {
725 				vpd->port_index--;
726 			}
727 		}
728 
729 		/*
730 		 * Now lets update hba->model_info with the real
731 		 * VPD data, if any.
732 		 */
733 
734 		/*
735 		 * Replace the default model description with vpd data
736 		 */
737 		if (vpd->model_desc[0] != 0) {
738 			(void) strncpy(hba->model_info.model_desc,
739 			    vpd->model_desc,
740 			    (sizeof (hba->model_info.model_desc)-1));
741 		}
742 
743 		/* Replace the default model with vpd data */
744 		if (vpd->model[0] != 0) {
745 			(void) strncpy(hba->model_info.model, vpd->model,
746 			    (sizeof (hba->model_info.model)-1));
747 		}
748 
749 		/* Replace the default program types with vpd data */
750 		if (vpd->prog_types[0] != 0) {
751 			emlxs_parse_prog_types(hba, vpd->prog_types);
752 		}
753 	}
754 
755 	/*
756 	 * Since the adapter model may have changed with the vpd data
757 	 * lets double check if adapter is not supported
758 	 */
759 	if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) {
760 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
761 		    "Unsupported adapter found.  "
762 		    "Id:%d  Vendor id:0x%x  Device id:0x%x  SSDID:0x%x  "
763 		    "Model:%s", hba->model_info.id, hba->model_info.vendor_id,
764 		    hba->model_info.device_id, hba->model_info.ssdid,
765 		    hba->model_info.model);
766 
767 		rval = EIO;
768 		goto failed1;
769 	}
770 
771 	(void) strncpy(vpd->boot_version, vpd->sli4FwName,
772 	    (sizeof (vpd->boot_version)-1));
773 
774 	/* Get fcode version property */
775 	emlxs_get_fcode_version(hba);
776 
777 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
778 	    "Firmware: kern=%08x stub=%08x sli1=%08x", vpd->postKernRev,
779 	    vpd->opFwRev, vpd->sli1FwRev);
780 
781 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
782 	    "Firmware: sli2=%08x sli3=%08x sli4=%08x fl=%x", vpd->sli2FwRev,
783 	    vpd->sli3FwRev, vpd->sli4FwRev, vpd->feaLevelHigh);
784 
785 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
786 	    "BIOS: boot=%s fcode=%s", vpd->boot_version, vpd->fcode_version);
787 
788 	/*
789 	 * If firmware checking is enabled and the adapter model indicates
790 	 * a firmware image, then perform firmware version check
791 	 */
792 	hba->fw_flag = 0;
793 	hba->fw_timer = 0;
794 
795 	if (((fw_check & 0x1) &&
796 	    (hba->model_info.flags & EMLXS_ORACLE_BRANDED) &&
797 	    hba->model_info.fwid) ||
798 	    ((fw_check & 0x2) && hba->model_info.fwid)) {
799 
800 		/* Find firmware image indicated by adapter model */
801 		fw = NULL;
802 		for (i = 0; i < emlxs_fw_count; i++) {
803 			if (emlxs_fw_table[i].id == hba->model_info.fwid) {
804 				fw = &emlxs_fw_table[i];
805 				break;
806 			}
807 		}
808 
809 		/*
810 		 * If the image was found, then verify current firmware
811 		 * versions of adapter
812 		 */
813 		if (fw) {
814 			/* Obtain current firmware version info */
815 			if (hba->model_info.chip & EMLXS_BE_CHIPS) {
816 				(void) emlxs_be_read_fw_version(hba, &hba_fw);
817 			} else {
818 				hba_fw.kern = vpd->postKernRev;
819 				hba_fw.stub = vpd->opFwRev;
820 				hba_fw.sli1 = vpd->sli1FwRev;
821 				hba_fw.sli2 = vpd->sli2FwRev;
822 				hba_fw.sli3 = vpd->sli3FwRev;
823 				hba_fw.sli4 = vpd->sli4FwRev;
824 			}
825 
826 			if (!kern_update &&
827 			    ((fw->kern && (hba_fw.kern != fw->kern)) ||
828 			    (fw->stub && (hba_fw.stub != fw->stub)))) {
829 
830 				hba->fw_flag |= FW_UPDATE_NEEDED;
831 
832 			} else if ((fw->kern && (hba_fw.kern != fw->kern)) ||
833 			    (fw->stub && (hba_fw.stub != fw->stub)) ||
834 			    (fw->sli1 && (hba_fw.sli1 != fw->sli1)) ||
835 			    (fw->sli2 && (hba_fw.sli2 != fw->sli2)) ||
836 			    (fw->sli3 && (hba_fw.sli3 != fw->sli3)) ||
837 			    (fw->sli4 && (hba_fw.sli4 != fw->sli4))) {
838 
839 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
840 				    "Firmware update needed. "
841 				    "Updating. id=%d fw=%d",
842 				    hba->model_info.id, hba->model_info.fwid);
843 
844 #ifdef MODFW_SUPPORT
845 				/*
846 				 * Load the firmware image now
847 				 * If MODFW_SUPPORT is not defined, the
848 				 * firmware image will already be defined
849 				 * in the emlxs_fw_table
850 				 */
851 				emlxs_fw_load(hba, fw);
852 #endif /* MODFW_SUPPORT */
853 
854 				if (fw->image && fw->size) {
855 					uint32_t rc;
856 
857 					rc = emlxs_fw_download(hba,
858 					    (char *)fw->image, fw->size, 0);
859 					if ((rc != FC_SUCCESS) &&
860 					    (rc != EMLXS_REBOOT_REQUIRED)) {
861 						EMLXS_MSGF(EMLXS_CONTEXT,
862 						    &emlxs_init_msg,
863 						    "Firmware update failed.");
864 						hba->fw_flag |=
865 						    FW_UPDATE_NEEDED;
866 					}
867 #ifdef MODFW_SUPPORT
868 					/*
869 					 * Unload the firmware image from
870 					 * kernel memory
871 					 */
872 					emlxs_fw_unload(hba, fw);
873 #endif /* MODFW_SUPPORT */
874 
875 					fw_check = 0;
876 
877 					goto reset;
878 				}
879 
880 				hba->fw_flag |= FW_UPDATE_NEEDED;
881 
882 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
883 				    "Firmware image unavailable.");
884 			} else {
885 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
886 				    "Firmware update not needed.");
887 			}
888 		} else {
889 			/*
890 			 * This means either the adapter database is not
891 			 * correct or a firmware image is missing from the
892 			 * compile
893 			 */
894 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
895 			    "Firmware image unavailable. id=%d fw=%d",
896 			    hba->model_info.id, hba->model_info.fwid);
897 		}
898 	}
899 
900 	/* Reuse mbq from previous mbox */
901 	bzero(mbq, sizeof (MAILBOXQ));
902 
903 	emlxs_mb_dump_fcoe(hba, mbq, 0);
904 
905 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
906 	    MBX_SUCCESS) {
907 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
908 		    "No FCOE info found. status=%x", mb->mbxStatus);
909 	} else {
910 		EMLXS_MSGF(EMLXS_CONTEXT,
911 		    &emlxs_init_debug_msg,
912 		    "FCOE info dumped. rsp_cnt=%d status=%x",
913 		    mb->un.varDmp4.rsp_cnt, mb->mbxStatus);
914 		(void) emlxs_parse_fcoe(hba,
915 		    (uint8_t *)hba->sli.sli4.dump_region.virt,
916 		    mb->un.varDmp4.rsp_cnt);
917 	}
918 
919 	/* Reuse mbq from previous mbox */
920 	bzero(mbq, sizeof (MAILBOXQ));
921 
922 	status = 0;
923 	if (port->flag & EMLXS_INI_ENABLED) {
924 		status |= SLI4_FEATURE_FCP_INITIATOR;
925 	}
926 	if (port->flag & EMLXS_TGT_ENABLED) {
927 		status |= SLI4_FEATURE_FCP_TARGET;
928 	}
929 	if (cfg[CFG_NPIV_ENABLE].current) {
930 		status |= SLI4_FEATURE_NPIV;
931 	}
932 	if (cfg[CFG_RQD_MODE].current) {
933 		status |= SLI4_FEATURE_RQD;
934 	}
935 	if (cfg[CFG_PERF_HINT].current) {
936 		if (hba->sli.sli4.param.PHON) {
937 			status |= SLI4_FEATURE_PERF_HINT;
938 		}
939 	}
940 
941 	emlxs_mb_request_features(hba, mbq, status);
942 
943 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
944 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
945 		    "Unable to REQUEST_FEATURES. Mailbox cmd=%x status=%x",
946 		    mb->mbxCommand, mb->mbxStatus);
947 
948 		rval = EIO;
949 		goto failed1;
950 	}
951 	emlxs_data_dump(port, "REQ_FEATURE", (uint32_t *)mb, 6, 0);
952 
953 	/* Check to see if we get the features we requested */
954 	if (status != mb->un.varReqFeatures.featuresEnabled) {
955 
956 		/* Just report descrepencies, don't abort the attach */
957 
958 		outptr = (uint8_t *)emlxs_request_feature_xlate(
959 		    mb->un.varReqFeatures.featuresRequested);
960 		(void) strlcpy(buf, (char *)outptr, sizeof (buf));
961 
962 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
963 		    "REQUEST_FEATURES: wanted:%s  got:%s",
964 		    &buf[0], emlxs_request_feature_xlate(
965 		    mb->un.varReqFeatures.featuresEnabled));
966 
967 	}
968 
969 	if ((port->flag & EMLXS_INI_ENABLED) &&
970 	    !(mb->un.varReqFeatures.featuresEnabled &
971 	    SLI4_FEATURE_FCP_INITIATOR)) {
972 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
973 		    "Initiator mode not supported by adapter.");
974 
975 		rval = EIO;
976 
977 #ifdef SFCT_SUPPORT
978 		/* Check if we can fall back to just target mode */
979 		if ((hba->pm_state == EMLXS_PM_IN_ATTACH) &&
980 		    (mb->un.varReqFeatures.featuresEnabled &
981 		    SLI4_FEATURE_FCP_TARGET) &&
982 		    (cfg[CFG_DTM_ENABLE].current == 1) &&
983 		    (cfg[CFG_TARGET_MODE].current == 1)) {
984 
985 			cfg[CFG_DTM_ENABLE].current = 0;
986 
987 			EMLXS_MSGF(EMLXS_CONTEXT,
988 			    &emlxs_init_failed_msg,
989 			    "Disabling dynamic target mode. "
990 			    "Enabling target mode only.");
991 
992 			/* This will trigger the driver to reattach */
993 			rval = EAGAIN;
994 		}
995 #endif /* SFCT_SUPPORT */
996 		goto failed1;
997 	}
998 
999 	if ((port->flag & EMLXS_TGT_ENABLED) &&
1000 	    !(mb->un.varReqFeatures.featuresEnabled &
1001 	    SLI4_FEATURE_FCP_TARGET)) {
1002 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1003 		    "Target mode not supported by adapter.");
1004 
1005 		rval = EIO;
1006 
1007 #ifdef SFCT_SUPPORT
1008 		/* Check if we can fall back to just initiator mode */
1009 		if ((hba->pm_state == EMLXS_PM_IN_ATTACH) &&
1010 		    (mb->un.varReqFeatures.featuresEnabled &
1011 		    SLI4_FEATURE_FCP_INITIATOR) &&
1012 		    (cfg[CFG_DTM_ENABLE].current == 1) &&
1013 		    (cfg[CFG_TARGET_MODE].current == 0)) {
1014 
1015 			cfg[CFG_DTM_ENABLE].current = 0;
1016 
1017 			EMLXS_MSGF(EMLXS_CONTEXT,
1018 			    &emlxs_init_failed_msg,
1019 			    "Disabling dynamic target mode. "
1020 			    "Enabling initiator mode only.");
1021 
1022 			/* This will trigger the driver to reattach */
1023 			rval = EAGAIN;
1024 		}
1025 #endif /* SFCT_SUPPORT */
1026 		goto failed1;
1027 	}
1028 
1029 	if (mb->un.varReqFeatures.featuresEnabled & SLI4_FEATURE_NPIV) {
1030 		hba->flag |= FC_NPIV_ENABLED;
1031 	}
1032 
1033 	if (mb->un.varReqFeatures.featuresEnabled & SLI4_FEATURE_PERF_HINT) {
1034 		hba->sli.sli4.flag |= EMLXS_SLI4_PHON;
1035 		if (hba->sli.sli4.param.PHWQ) {
1036 			hba->sli.sli4.flag |= EMLXS_SLI4_PHWQ;
1037 		}
1038 	}
1039 
1040 	/* Reuse mbq from previous mbox */
1041 	bzero(mbq, sizeof (MAILBOXQ));
1042 
1043 	emlxs_mb_read_config(hba, mbq);
1044 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1045 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1046 		    "Unable to READ_CONFIG. Mailbox cmd=%x status=%x",
1047 		    mb->mbxCommand, mb->mbxStatus);
1048 
1049 		rval = EIO;
1050 		goto failed1;
1051 	}
1052 	emlxs_data_dump(port, "READ_CONFIG4", (uint32_t *)mb, 18, 0);
1053 
1054 	/* Set default extents */
1055 	hba->sli.sli4.XRICount = mb->un.varRdConfig4.XRICount;
1056 	hba->sli.sli4.XRIExtCount = 1;
1057 	hba->sli.sli4.XRIExtSize = hba->sli.sli4.XRICount;
1058 	hba->sli.sli4.XRIBase[0] = mb->un.varRdConfig4.XRIBase;
1059 
1060 	hba->sli.sli4.RPICount = mb->un.varRdConfig4.RPICount;
1061 	hba->sli.sli4.RPIExtCount = 1;
1062 	hba->sli.sli4.RPIExtSize = hba->sli.sli4.RPICount;
1063 	hba->sli.sli4.RPIBase[0] = mb->un.varRdConfig4.RPIBase;
1064 
1065 	hba->sli.sli4.VPICount = mb->un.varRdConfig4.VPICount;
1066 	hba->sli.sli4.VPIExtCount = 1;
1067 	hba->sli.sli4.VPIExtSize = hba->sli.sli4.VPICount;
1068 	hba->sli.sli4.VPIBase[0] = mb->un.varRdConfig4.VPIBase;
1069 
1070 	hba->sli.sli4.VFICount = mb->un.varRdConfig4.VFICount;
1071 	hba->sli.sli4.VFIExtCount = 1;
1072 	hba->sli.sli4.VFIExtSize = hba->sli.sli4.VFICount;
1073 	hba->sli.sli4.VFIBase[0] = mb->un.varRdConfig4.VFIBase;
1074 
1075 	hba->sli.sli4.FCFICount = mb->un.varRdConfig4.FCFICount;
1076 
1077 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1078 	    "CONFIG: xri:%d rpi:%d vpi:%d vfi:%d fcfi:%d",
1079 	    hba->sli.sli4.XRICount,
1080 	    hba->sli.sli4.RPICount,
1081 	    hba->sli.sli4.VPICount,
1082 	    hba->sli.sli4.VFICount,
1083 	    hba->sli.sli4.FCFICount);
1084 
1085 	if ((hba->sli.sli4.XRICount == 0) ||
1086 	    (hba->sli.sli4.RPICount == 0) ||
1087 	    (hba->sli.sli4.VPICount == 0) ||
1088 	    (hba->sli.sli4.VFICount == 0) ||
1089 	    (hba->sli.sli4.FCFICount == 0)) {
1090 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1091 		    "Invalid extent value(s) - xri:%d rpi:%d vpi:%d "
1092 		    "vfi:%d fcfi:%d",
1093 		    hba->sli.sli4.XRICount,
1094 		    hba->sli.sli4.RPICount,
1095 		    hba->sli.sli4.VPICount,
1096 		    hba->sli.sli4.VFICount,
1097 		    hba->sli.sli4.FCFICount);
1098 
1099 		rval = EIO;
1100 		goto failed1;
1101 	}
1102 
1103 	if (mb->un.varRdConfig4.extents) {
1104 		if (emlxs_sli4_init_extents(hba, mbq)) {
1105 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1106 			    "Unable to initialize extents.");
1107 
1108 			rval = EIO;
1109 			goto failed1;
1110 		}
1111 	}
1112 
1113 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1114 	    "CONFIG: port_name:%c %c %c %c",
1115 	    hba->sli.sli4.port_name[0],
1116 	    hba->sli.sli4.port_name[1],
1117 	    hba->sli.sli4.port_name[2],
1118 	    hba->sli.sli4.port_name[3]);
1119 
1120 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1121 	    "CONFIG: ldv:%d link_type:%d link_number:%d",
1122 	    mb->un.varRdConfig4.ldv,
1123 	    mb->un.varRdConfig4.link_type,
1124 	    mb->un.varRdConfig4.link_number);
1125 
1126 	if (mb->un.varRdConfig4.ldv) {
1127 		hba->sli.sli4.link_number = mb->un.varRdConfig4.link_number;
1128 	} else {
1129 		hba->sli.sli4.link_number = (uint32_t)-1;
1130 	}
1131 
1132 	if (hba->sli.sli4.VPICount) {
1133 		hba->vpi_max = min(hba->sli.sli4.VPICount, MAX_VPORTS) - 1;
1134 	}
1135 
1136 	/* Set the max node count */
1137 	if (cfg[CFG_NUM_NODES].current > 0) {
1138 		hba->max_nodes =
1139 		    min(cfg[CFG_NUM_NODES].current,
1140 		    hba->sli.sli4.RPICount);
1141 	} else {
1142 		hba->max_nodes = hba->sli.sli4.RPICount;
1143 	}
1144 
1145 	/* Set the io throttle */
1146 	hba->io_throttle = hba->sli.sli4.XRICount - IO_THROTTLE_RESERVE;
1147 
1148 	/* Set max_iotag */
1149 	/* We add 1 in case all XRI's are non-zero */
1150 	hba->max_iotag = hba->sli.sli4.XRICount + 1;
1151 
1152 	if (cfg[CFG_NUM_IOTAGS].current) {
1153 		hba->max_iotag = min(hba->max_iotag,
1154 		    (uint16_t)cfg[CFG_NUM_IOTAGS].current);
1155 	}
1156 
1157 	/* Set out-of-range iotag base */
1158 	hba->fc_oor_iotag = hba->max_iotag;
1159 
1160 	/* Save the link speed capabilities */
1161 	vpd->link_speed = (uint16_t)mb->un.varRdConfig4.lmt;
1162 	emlxs_process_link_speed(hba);
1163 
1164 	/*
1165 	 * Allocate some memory for buffers
1166 	 */
1167 	if (emlxs_mem_alloc_buffer(hba) == 0) {
1168 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1169 		    "Unable to allocate memory buffers.");
1170 
1171 		rval = ENOMEM;
1172 		goto failed1;
1173 	}
1174 
1175 	if (emlxs_sli4_resource_alloc(hba)) {
1176 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1177 		    "Unable to allocate resources.");
1178 
1179 		rval = ENOMEM;
1180 		goto failed2;
1181 	}
1182 	emlxs_data_dump(port, "XRIp", (uint32_t *)hba->sli.sli4.XRIp, 18, 0);
1183 	emlxs_sli4_zero_queue_stat(hba);
1184 
1185 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1186 	if ((cfg[CFG_NPIV_ENABLE].current) && (hba->flag & FC_NPIV_ENABLED)) {
1187 		hba->fca_tran->fca_num_npivports = hba->vpi_max;
1188 	}
1189 #endif /* >= EMLXS_MODREV5 */
1190 
1191 	/* Reuse mbq from previous mbox */
1192 	bzero(mbq, sizeof (MAILBOXQ));
1193 
1194 	if (emlxs_sli4_post_sgl_pages(hba, mbq)) {
1195 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1196 		    "Unable to post sgl pages.");
1197 
1198 		rval = EIO;
1199 		goto failed3;
1200 	}
1201 
1202 	/* Reuse mbq from previous mbox */
1203 	bzero(mbq, sizeof (MAILBOXQ));
1204 
1205 	if (emlxs_sli4_post_hdr_tmplates(hba, mbq)) {
1206 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1207 		    "Unable to post header templates.");
1208 
1209 		rval = EIO;
1210 		goto failed3;
1211 	}
1212 
1213 	/*
1214 	 * Add our interrupt routine to kernel's interrupt chain & enable it
1215 	 * If MSI is enabled this will cause Solaris to program the MSI address
1216 	 * and data registers in PCI config space
1217 	 */
1218 	if (EMLXS_INTR_ADD(hba) != DDI_SUCCESS) {
1219 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1220 		    "Unable to add interrupt(s).");
1221 
1222 		rval = EIO;
1223 		goto failed3;
1224 	}
1225 
1226 	/* Reuse mbq from previous mbox */
1227 	bzero(mbq, sizeof (MAILBOXQ));
1228 
1229 	/* This MUST be done after EMLXS_INTR_ADD */
1230 	if (emlxs_sli4_create_queues(hba, mbq)) {
1231 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1232 		    "Unable to create queues.");
1233 
1234 		rval = EIO;
1235 		goto failed3;
1236 	}
1237 
1238 	EMLXS_STATE_CHANGE(hba, FC_INIT_CFGPORT);
1239 
1240 	/* Get and save the current firmware version (based on sli_mode) */
1241 	emlxs_decode_firmware_rev(hba, vpd);
1242 
1243 
1244 	EMLXS_STATE_CHANGE(hba, FC_INIT_INITLINK);
1245 
1246 	if (SLI4_FC_MODE) {
1247 		/* Reuse mbq from previous mbox */
1248 		bzero(mbq, sizeof (MAILBOXQ));
1249 
1250 		emlxs_mb_config_link(hba, mbq);
1251 		if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1252 		    MBX_SUCCESS) {
1253 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1254 			    "Unable to configure link. Mailbox cmd=%x "
1255 			    "status=%x",
1256 			    mb->mbxCommand, mb->mbxStatus);
1257 
1258 			rval = EIO;
1259 			goto failed3;
1260 		}
1261 	}
1262 
1263 	/* Reuse mbq from previous mbox */
1264 	bzero(mbq, sizeof (MAILBOXQ));
1265 
1266 	/*
1267 	 * We need to get login parameters for NID
1268 	 */
1269 	(void) emlxs_mb_read_sparam(hba, mbq);
1270 	mp = (MATCHMAP *)mbq->bp;
1271 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1272 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1273 		    "Unable to read parameters. Mailbox cmd=%x status=%x",
1274 		    mb->mbxCommand, mb->mbxStatus);
1275 
1276 		rval = EIO;
1277 		goto failed3;
1278 	}
1279 
1280 	/* Free the buffer since we were polling */
1281 	emlxs_mem_put(hba, MEM_BUF, (void *)mp);
1282 	mp = NULL;
1283 
1284 	/* If no serial number in VPD data, then use the WWPN */
1285 	if (vpd->serial_num[0] == 0) {
1286 		outptr = (uint8_t *)&hba->wwpn.IEEE[0];
1287 		for (i = 0; i < 12; i++) {
1288 			status = *outptr++;
1289 			j = ((status & 0xf0) >> 4);
1290 			if (j <= 9) {
1291 				vpd->serial_num[i] =
1292 				    (char)((uint8_t)'0' + (uint8_t)j);
1293 			} else {
1294 				vpd->serial_num[i] =
1295 				    (char)((uint8_t)'A' + (uint8_t)(j - 10));
1296 			}
1297 
1298 			i++;
1299 			j = (status & 0xf);
1300 			if (j <= 9) {
1301 				vpd->serial_num[i] =
1302 				    (char)((uint8_t)'0' + (uint8_t)j);
1303 			} else {
1304 				vpd->serial_num[i] =
1305 				    (char)((uint8_t)'A' + (uint8_t)(j - 10));
1306 			}
1307 		}
1308 
1309 		/*
1310 		 * Set port number and port index to zero
1311 		 * The WWN's are unique to each port and therefore port_num
1312 		 * must equal zero. This effects the hba_fru_details structure
1313 		 * in fca_bind_port()
1314 		 */
1315 		vpd->port_num[0] = 0;
1316 		vpd->port_index = 0;
1317 
1318 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1319 		    "CONFIG: WWPN: port_index=0");
1320 	}
1321 
1322 	/* Make final attempt to set a port index */
1323 	if (vpd->port_index == (uint32_t)-1) {
1324 		dev_info_t *p_dip;
1325 		dev_info_t *c_dip;
1326 
1327 		p_dip = ddi_get_parent(hba->dip);
1328 		c_dip = ddi_get_child(p_dip);
1329 
1330 		vpd->port_index = 0;
1331 		while (c_dip && (hba->dip != c_dip)) {
1332 			c_dip = ddi_get_next_sibling(c_dip);
1333 
1334 			if (strcmp(ddi_get_name(c_dip), "ethernet") == 0) {
1335 				continue;
1336 			}
1337 
1338 			vpd->port_index++;
1339 		}
1340 
1341 		EMLXS_MSGF(EMLXS_CONTEXT,
1342 		    &emlxs_init_debug_msg,
1343 		    "CONFIG: Device tree: port_index=%d",
1344 		    vpd->port_index);
1345 	}
1346 
1347 	if (vpd->port_num[0] == 0) {
1348 		if (hba->model_info.channels == EMLXS_MULTI_CHANNEL) {
1349 			(void) snprintf(vpd->port_num,
1350 			    (sizeof (vpd->port_num)-1),
1351 			    "%d", vpd->port_index);
1352 		}
1353 	}
1354 
1355 	if (vpd->id[0] == 0) {
1356 		(void) snprintf(vpd->id, (sizeof (vpd->id)-1),
1357 		    "%s %d",
1358 		    hba->model_info.model_desc, vpd->port_index);
1359 
1360 	}
1361 
1362 	if (vpd->manufacturer[0] == 0) {
1363 		(void) strncpy(vpd->manufacturer, hba->model_info.manufacturer,
1364 		    (sizeof (vpd->manufacturer)-1));
1365 	}
1366 
1367 	if (vpd->part_num[0] == 0) {
1368 		(void) strncpy(vpd->part_num, hba->model_info.model,
1369 		    (sizeof (vpd->part_num)-1));
1370 	}
1371 
1372 	if (vpd->model_desc[0] == 0) {
1373 		(void) snprintf(vpd->model_desc, (sizeof (vpd->model_desc)-1),
1374 		    "%s %d",
1375 		    hba->model_info.model_desc, vpd->port_index);
1376 	}
1377 
1378 	if (vpd->model[0] == 0) {
1379 		(void) strncpy(vpd->model, hba->model_info.model,
1380 		    (sizeof (vpd->model)-1));
1381 	}
1382 
1383 	if (vpd->prog_types[0] == 0) {
1384 		emlxs_build_prog_types(hba, vpd);
1385 	}
1386 
1387 	/* Create the symbolic names */
1388 	(void) snprintf(hba->snn, (sizeof (hba->snn)-1),
1389 	    "%s %s FV%s DV%s %s",
1390 	    hba->model_info.manufacturer, hba->model_info.model,
1391 	    hba->vpd.fw_version, emlxs_version,
1392 	    (char *)utsname.nodename);
1393 
1394 	(void) snprintf(hba->spn, (sizeof (hba->spn)-1),
1395 	    "%s PPN-%01x%01x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
1396 	    hba->model_info.manufacturer,
1397 	    hba->wwpn.nameType, hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb,
1398 	    hba->wwpn.IEEE[0], hba->wwpn.IEEE[1], hba->wwpn.IEEE[2],
1399 	    hba->wwpn.IEEE[3], hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
1400 
1401 
1402 	EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN);
1403 	emlxs_sli4_enable_intr(hba);
1404 
1405 	/* Check persist-linkdown */
1406 	if (cfg[CFG_PERSIST_LINKDOWN].current) {
1407 		EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN_PERSIST);
1408 		goto done;
1409 	}
1410 
1411 #ifdef SFCT_SUPPORT
1412 	if ((port->mode == MODE_TARGET) &&
1413 	    !(port->fct_flags & FCT_STATE_PORT_ONLINE)) {
1414 		goto done;
1415 	}
1416 #endif /* SFCT_SUPPORT */
1417 
1418 	/* Reuse mbq from previous mbox */
1419 	bzero(mbq, sizeof (MAILBOXQ));
1420 
1421 	/*
1422 	 * Interupts are enabled, start the timeout timers now.
1423 	 */
1424 	emlxs_timer_start(hba);
1425 
1426 	/*
1427 	 * Setup and issue mailbox INITIALIZE LINK command
1428 	 * At this point, the interrupt will be generated by the HW
1429 	 */
1430 	emlxs_mb_init_link(hba, mbq,
1431 	    cfg[CFG_TOPOLOGY].current, cfg[CFG_LINK_SPEED].current);
1432 
1433 	rval = emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0);
1434 	if ((rval != MBX_SUCCESS) && (rval != MBX_BUSY)) {
1435 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1436 		    "Unable to initialize link. "
1437 		    "Mailbox cmd=%x status=%x",
1438 		    mb->mbxCommand, mb->mbxStatus);
1439 
1440 		rval = EIO;
1441 		goto failed4;
1442 	}
1443 
1444 	/* Wait for link to come up */
1445 	i = cfg[CFG_LINKUP_DELAY].current;
1446 	while (i && (hba->state < FC_LINK_UP)) {
1447 		/* Check for hardware error */
1448 		if (hba->state == FC_ERROR) {
1449 			EMLXS_MSGF(EMLXS_CONTEXT,
1450 			    &emlxs_init_failed_msg,
1451 			    "Adapter error.");
1452 
1453 			rval = EIO;
1454 			goto failed4;
1455 		}
1456 
1457 		BUSYWAIT_MS(1000);
1458 		i--;
1459 	}
1460 	if (i == 0) {
1461 		EMLXS_MSGF(EMLXS_CONTEXT,
1462 		    &emlxs_init_msg,
1463 		    "Link up timeout");
1464 	}
1465 
1466 done:
1467 	/*
1468 	 * The leadville driver will now handle the FLOGI at the driver level
1469 	 */
1470 
1471 	if (mbq) {
1472 		(void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1473 		mbq = NULL;
1474 		mb = NULL;
1475 	}
1476 
1477 	if (hba->model_info.flags & EMLXS_GPIO_LEDS)
1478 		emlxs_sli4_gpio_timer_start(hba);
1479 
1480 	return (0);
1481 
1482 failed4:
1483 	emlxs_timer_stop(hba);
1484 
1485 failed3:
1486 	EMLXS_STATE_CHANGE(hba, FC_ERROR);
1487 
1488 	if (mp) {
1489 		emlxs_mem_put(hba, MEM_BUF, (void *)mp);
1490 		mp = NULL;
1491 	}
1492 
1493 
1494 	if (hba->intr_flags & EMLXS_MSI_ADDED) {
1495 		(void) EMLXS_INTR_REMOVE(hba);
1496 	}
1497 
1498 	emlxs_sli4_resource_free(hba);
1499 
1500 failed2:
1501 	(void) emlxs_mem_free_buffer(hba);
1502 
1503 failed1:
1504 	if (mbq) {
1505 		(void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1506 		mbq = NULL;
1507 		mb = NULL;
1508 	}
1509 
1510 	if (hba->sli.sli4.dump_region.virt) {
1511 		mutex_enter(&EMLXS_PORT_LOCK);
1512 		(void) emlxs_mem_free(hba, &hba->sli.sli4.dump_region);
1513 		mutex_exit(&EMLXS_PORT_LOCK);
1514 	}
1515 
1516 	if (rval == 0) {
1517 		rval = EIO;
1518 	}
1519 
1520 	return (rval);
1521 
1522 } /* emlxs_sli4_online() */
1523 
1524 
1525 static void
emlxs_sli4_offline(emlxs_hba_t * hba,uint32_t reset_requested)1526 emlxs_sli4_offline(emlxs_hba_t *hba, uint32_t reset_requested)
1527 {
1528 	/* Reverse emlxs_sli4_online */
1529 
1530 	if (hba->model_info.flags & EMLXS_GPIO_LEDS)
1531 		emlxs_sli4_gpio_timer_stop(hba);
1532 
1533 	mutex_enter(&EMLXS_PORT_LOCK);
1534 	if (hba->flag & FC_INTERLOCKED) {
1535 		mutex_exit(&EMLXS_PORT_LOCK);
1536 		goto killed;
1537 	}
1538 	mutex_exit(&EMLXS_PORT_LOCK);
1539 
1540 	if (reset_requested) {
1541 		(void) emlxs_sli4_hba_reset(hba, 0, 0, 0);
1542 	}
1543 
1544 	/* Shutdown the adapter interface */
1545 	emlxs_sli4_hba_kill(hba);
1546 
1547 killed:
1548 
1549 	/* Free SLI shared memory */
1550 	emlxs_sli4_resource_free(hba);
1551 
1552 	/* Free driver shared memory */
1553 	(void) emlxs_mem_free_buffer(hba);
1554 
1555 	/* Free the host dump region buffer */
1556 	mutex_enter(&EMLXS_PORT_LOCK);
1557 	(void) emlxs_mem_free(hba, &hba->sli.sli4.dump_region);
1558 	mutex_exit(&EMLXS_PORT_LOCK);
1559 
1560 } /* emlxs_sli4_offline() */
1561 
1562 static int
emlxs_map_g7_bars(emlxs_hba_t * hba)1563 emlxs_map_g7_bars(emlxs_hba_t *hba)
1564 {
1565 	emlxs_port_t		*port = &PPORT;
1566 	dev_info_t		*dip;
1567 	ddi_device_acc_attr_t	dev_attr = emlxs_dev_acc_attr;
1568 	uint_t			num_prop;
1569 	pci_regspec_t		*prop;
1570 	int 			rnum, type, size, rcount, r;
1571 
1572 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hba->dip, 0,
1573 	    "reg", (int **)&prop, &num_prop) != DDI_PROP_SUCCESS) {
1574 		return (0);
1575 	}
1576 
1577 	dip = (dev_info_t *)hba->dip;
1578 	rcount = num_prop * sizeof (int) / sizeof (pci_regspec_t);
1579 	for (r = 0; r < rcount; r++) {
1580 		rnum = PCI_REG_REG_G(prop[r].pci_phys_hi);
1581 		type = PCI_ADDR_MASK &prop[r].pci_phys_hi;
1582 		size = prop[r].pci_size_low;
1583 		EMLXS_MSGF(EMLXS_CONTEXT,
1584 		    &emlxs_init_debug_msg,
1585 		    "PCI_BAR%x regaddr=%x type=%x size=%x",
1586 		    r, rnum, PCI_REG_ADDR_G(type), size);
1587 		if (type < PCI_ADDR_MEM32) {
1588 			/* config or IO reg address */
1589 			continue;
1590 		}
1591 		/* MEM reg address */
1592 		caddr_t addr;
1593 		ddi_acc_handle_t handle;
1594 		int status;
1595 
1596 		status = ddi_regs_map_setup(dip, r,
1597 		    (caddr_t *)&addr, 0, 0, &dev_attr,
1598 		    &handle);
1599 		if (status != DDI_SUCCESS) {
1600 			EMLXS_MSGF(EMLXS_CONTEXT,
1601 			    &emlxs_attach_failed_msg,
1602 			    "ddi_regs_map_setup BAR%d failed."
1603 			    "  status=%x",
1604 			    r, status);
1605 			ddi_prop_free((void *)prop);
1606 			return (0);
1607 		}
1608 		switch (r-1) {
1609 		case 0:
1610 			if (hba->sli.sli4.bar0_acc_handle == 0) {
1611 				hba->sli.sli4.bar0_addr = addr;
1612 				hba->sli.sli4.bar0_acc_handle =
1613 				    handle;
1614 			}
1615 			break;
1616 		case 1:
1617 			if (hba->sli.sli4.bar1_acc_handle == 0) {
1618 				hba->sli.sli4.bar1_addr = addr;
1619 				hba->sli.sli4.bar1_acc_handle =
1620 				    handle;
1621 			}
1622 			break;
1623 		case 2:
1624 			if (hba->sli.sli4.bar2_acc_handle == 0) {
1625 				hba->sli.sli4.bar2_addr = addr;
1626 				hba->sli.sli4.bar2_acc_handle =
1627 				    handle;
1628 			}
1629 			break;
1630 		}
1631 	}
1632 	ddi_prop_free((void *)prop);
1633 	return (num_prop > 0);
1634 }
1635 
1636 /*ARGSUSED*/
1637 static int
emlxs_sli4_map_hdw(emlxs_hba_t * hba)1638 emlxs_sli4_map_hdw(emlxs_hba_t *hba)
1639 {
1640 	emlxs_port_t		*port = &PPORT;
1641 	dev_info_t		*dip;
1642 	ddi_device_acc_attr_t	dev_attr;
1643 	int			status;
1644 
1645 	dip = (dev_info_t *)hba->dip;
1646 	dev_attr = emlxs_dev_acc_attr;
1647 
1648 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1649 	case SLI_INTF_IF_TYPE_0:
1650 
1651 		/* Map in Hardware BAR pages that will be used for */
1652 		/* communication with HBA. */
1653 		if (hba->sli.sli4.bar1_acc_handle == 0) {
1654 			status = ddi_regs_map_setup(dip, PCI_BAR1_RINDEX,
1655 			    (caddr_t *)&hba->sli.sli4.bar1_addr,
1656 			    0, 0, &dev_attr, &hba->sli.sli4.bar1_acc_handle);
1657 			if (status != DDI_SUCCESS) {
1658 				EMLXS_MSGF(EMLXS_CONTEXT,
1659 				    &emlxs_attach_failed_msg,
1660 				    "(PCI) ddi_regs_map_setup BAR1 failed. "
1661 				    "stat=%d mem=%p attr=%p hdl=%p",
1662 				    status, &hba->sli.sli4.bar1_addr, &dev_attr,
1663 				    &hba->sli.sli4.bar1_acc_handle);
1664 				goto failed;
1665 			}
1666 		}
1667 
1668 		if (hba->sli.sli4.bar2_acc_handle == 0) {
1669 			status = ddi_regs_map_setup(dip, PCI_BAR2_RINDEX,
1670 			    (caddr_t *)&hba->sli.sli4.bar2_addr,
1671 			    0, 0, &dev_attr, &hba->sli.sli4.bar2_acc_handle);
1672 			if (status != DDI_SUCCESS) {
1673 				EMLXS_MSGF(EMLXS_CONTEXT,
1674 				    &emlxs_attach_failed_msg,
1675 				    "ddi_regs_map_setup BAR2 failed. status=%x",
1676 				    status);
1677 				goto failed;
1678 			}
1679 		}
1680 
1681 		/* offset from beginning of register space */
1682 		hba->sli.sli4.MPUEPSemaphore_reg_addr =
1683 		    (uint32_t *)(hba->sli.sli4.bar1_addr +
1684 		    CSR_MPU_EP_SEMAPHORE_OFFSET);
1685 		hba->sli.sli4.MBDB_reg_addr =
1686 		    (uint32_t *)(hba->sli.sli4.bar2_addr + PD_MB_DB_OFFSET);
1687 		hba->sli.sli4.CQDB_reg_addr =
1688 		    (uint32_t *)(hba->sli.sli4.bar2_addr + PD_CQ_DB_OFFSET);
1689 		hba->sli.sli4.MQDB_reg_addr =
1690 		    (uint32_t *)(hba->sli.sli4.bar2_addr + PD_MQ_DB_OFFSET);
1691 		hba->sli.sli4.WQDB_reg_addr =
1692 		    (uint32_t *)(hba->sli.sli4.bar2_addr + PD_WQ_DB_OFFSET);
1693 		hba->sli.sli4.RQDB_reg_addr =
1694 		    (uint32_t *)(hba->sli.sli4.bar2_addr + PD_RQ_DB_OFFSET);
1695 
1696 		hba->sli.sli4.STATUS_reg_addr = 0;
1697 		hba->sli.sli4.CNTL_reg_addr = 0;
1698 
1699 		hba->sli.sli4.ERR1_reg_addr =
1700 		    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_LO_OFFSET);
1701 		hba->sli.sli4.ERR2_reg_addr =
1702 		    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_HI_OFFSET);
1703 
1704 		hba->sli.sli4.PHYSDEV_reg_addr = 0;
1705 		break;
1706 
1707 	case SLI_INTF_IF_TYPE_2: /* Lancer FC */
1708 
1709 		/* Map in Hardware BAR pages that will be used for */
1710 		/* communication with HBA. */
1711 		if (hba->sli.sli4.bar0_acc_handle == 0) {
1712 			status = ddi_regs_map_setup(dip, PCI_BAR0_RINDEX,
1713 			    (caddr_t *)&hba->sli.sli4.bar0_addr,
1714 			    0, 0, &dev_attr, &hba->sli.sli4.bar0_acc_handle);
1715 			if (status != DDI_SUCCESS) {
1716 				EMLXS_MSGF(EMLXS_CONTEXT,
1717 				    &emlxs_attach_failed_msg,
1718 				    "(PCI) ddi_regs_map_setup BAR0 failed. "
1719 				    "stat=%d mem=%p attr=%p hdl=%p",
1720 				    status, &hba->sli.sli4.bar0_addr, &dev_attr,
1721 				    &hba->sli.sli4.bar0_acc_handle);
1722 				goto failed;
1723 			}
1724 		}
1725 
1726 		/* offset from beginning of register space */
1727 		hba->sli.sli4.MPUEPSemaphore_reg_addr =
1728 		    (uint32_t *)(hba->sli.sli4.bar0_addr +
1729 		    SLIPORT_SEMAPHORE_OFFSET);
1730 		hba->sli.sli4.MBDB_reg_addr =
1731 		    (uint32_t *)(hba->sli.sli4.bar0_addr + PD_MB_DB_OFFSET);
1732 		hba->sli.sli4.CQDB_reg_addr =
1733 		    (uint32_t *)(hba->sli.sli4.bar0_addr + PD_CQ_DB_OFFSET);
1734 		hba->sli.sli4.MQDB_reg_addr =
1735 		    (uint32_t *)(hba->sli.sli4.bar0_addr + PD_MQ_DB_OFFSET);
1736 		hba->sli.sli4.WQDB_reg_addr =
1737 		    (uint32_t *)(hba->sli.sli4.bar0_addr + PD_WQ_DB_OFFSET);
1738 		hba->sli.sli4.RQDB_reg_addr =
1739 		    (uint32_t *)(hba->sli.sli4.bar0_addr + PD_RQ_DB_OFFSET);
1740 
1741 		hba->sli.sli4.STATUS_reg_addr =
1742 		    (uint32_t *)(hba->sli.sli4.bar0_addr +
1743 		    SLIPORT_STATUS_OFFSET);
1744 		hba->sli.sli4.CNTL_reg_addr =
1745 		    (uint32_t *)(hba->sli.sli4.bar0_addr +
1746 		    SLIPORT_CONTROL_OFFSET);
1747 		hba->sli.sli4.ERR1_reg_addr =
1748 		    (uint32_t *)(hba->sli.sli4.bar0_addr +
1749 		    SLIPORT_ERROR1_OFFSET);
1750 		hba->sli.sli4.ERR2_reg_addr =
1751 		    (uint32_t *)(hba->sli.sli4.bar0_addr +
1752 		    SLIPORT_ERROR2_OFFSET);
1753 		hba->sli.sli4.PHYSDEV_reg_addr =
1754 		    (uint32_t *)(hba->sli.sli4.bar0_addr +
1755 		    PHYSDEV_CONTROL_OFFSET);
1756 
1757 		break;
1758 	case SLI_INTF_IF_TYPE_6:
1759 		/* Map in Hardware BAR pages that will be used for */
1760 		/* communication with HBA. */
1761 		if (!emlxs_map_g7_bars(hba))
1762 			goto failed;
1763 		/* offset from beginning of register space */
1764 		hba->sli.sli4.MPUEPSemaphore_reg_addr =
1765 		    (uint32_t *)(hba->sli.sli4.bar0_addr +
1766 		    SLIPORT_SEMAPHORE_OFFSET);
1767 		hba->sli.sli4.MBDB_reg_addr =
1768 		    (uint32_t *)(hba->sli.sli4.bar0_addr + PD_MB_DB_OFFSET);
1769 		hba->sli.sli4.EQDB_reg_addr =
1770 		    (uint32_t *)(hba->sli.sli4.bar1_addr + PD_IF6_EQ_DB_OFFSET);
1771 		hba->sli.sli4.CQDB_reg_addr =
1772 		    (uint32_t *)(hba->sli.sli4.bar1_addr + PD_IF6_CQ_DB_OFFSET);
1773 		hba->sli.sli4.MQDB_reg_addr =
1774 		    (uint32_t *)(hba->sli.sli4.bar1_addr + PD_IF6_MQ_DB_OFFSET);
1775 		hba->sli.sli4.WQDB_reg_addr =
1776 		    (uint32_t *)(hba->sli.sli4.bar1_addr + PD_IF6_WQ_DB_OFFSET);
1777 		hba->sli.sli4.RQDB_reg_addr =
1778 		    (uint32_t *)(hba->sli.sli4.bar1_addr + PD_IF6_RQ_DB_OFFSET);
1779 
1780 		hba->sli.sli4.STATUS_reg_addr =
1781 		    (uint32_t *)(hba->sli.sli4.bar0_addr +
1782 		    SLIPORT_STATUS_OFFSET);
1783 		hba->sli.sli4.CNTL_reg_addr =
1784 		    (uint32_t *)(hba->sli.sli4.bar0_addr +
1785 		    SLIPORT_CONTROL_OFFSET);
1786 		hba->sli.sli4.ERR1_reg_addr =
1787 		    (uint32_t *)(hba->sli.sli4.bar0_addr +
1788 		    SLIPORT_ERROR1_OFFSET);
1789 		hba->sli.sli4.ERR2_reg_addr =
1790 		    (uint32_t *)(hba->sli.sli4.bar0_addr +
1791 		    SLIPORT_ERROR2_OFFSET);
1792 		hba->sli.sli4.PHYSDEV_reg_addr =
1793 		    (uint32_t *)(hba->sli.sli4.bar0_addr +
1794 		    PHYSDEV_CONTROL_OFFSET);
1795 
1796 		break;
1797 	case SLI_INTF_IF_TYPE_1:
1798 	case SLI_INTF_IF_TYPE_3:
1799 	default:
1800 		EMLXS_MSGF(EMLXS_CONTEXT,
1801 		    &emlxs_attach_failed_msg,
1802 		    "Map hdw: Unsupported if_type %08x",
1803 		    (hba->sli_intf & SLI_INTF_IF_TYPE_MASK));
1804 
1805 		goto failed;
1806 	}
1807 
1808 	if (hba->sli.sli4.bootstrapmb.virt == 0) {
1809 		MBUF_INFO	*buf_info;
1810 		MBUF_INFO	bufinfo;
1811 
1812 		buf_info = &bufinfo;
1813 
1814 		bzero(buf_info, sizeof (MBUF_INFO));
1815 		buf_info->size = EMLXS_BOOTSTRAP_MB_SIZE + MBOX_EXTENSION_SIZE;
1816 		buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG;
1817 		buf_info->align = ddi_ptob(dip, 1L);
1818 
1819 		(void) emlxs_mem_alloc(hba, buf_info);
1820 
1821 		if (buf_info->virt == NULL) {
1822 			goto failed;
1823 		}
1824 
1825 		hba->sli.sli4.bootstrapmb.virt = buf_info->virt;
1826 		hba->sli.sli4.bootstrapmb.phys = buf_info->phys;
1827 		hba->sli.sli4.bootstrapmb.size = EMLXS_BOOTSTRAP_MB_SIZE +
1828 		    MBOX_EXTENSION_SIZE;
1829 		hba->sli.sli4.bootstrapmb.data_handle = buf_info->data_handle;
1830 		hba->sli.sli4.bootstrapmb.dma_handle = buf_info->dma_handle;
1831 		bzero((char *)hba->sli.sli4.bootstrapmb.virt,
1832 		    EMLXS_BOOTSTRAP_MB_SIZE);
1833 	}
1834 
1835 	hba->chan_count = MAX_CHANNEL;
1836 
1837 	return (0);
1838 
1839 failed:
1840 
1841 	emlxs_sli4_unmap_hdw(hba);
1842 	return (ENOMEM);
1843 
1844 
1845 } /* emlxs_sli4_map_hdw() */
1846 
1847 
1848 /*ARGSUSED*/
1849 static void
emlxs_sli4_unmap_hdw(emlxs_hba_t * hba)1850 emlxs_sli4_unmap_hdw(emlxs_hba_t *hba)
1851 {
1852 	MBUF_INFO	bufinfo;
1853 	MBUF_INFO	*buf_info = &bufinfo;
1854 
1855 
1856 	if (hba->sli.sli4.bar0_acc_handle) {
1857 		ddi_regs_map_free(&hba->sli.sli4.bar0_acc_handle);
1858 		hba->sli.sli4.bar0_acc_handle = 0;
1859 	}
1860 
1861 	if (hba->sli.sli4.bar1_acc_handle) {
1862 		ddi_regs_map_free(&hba->sli.sli4.bar1_acc_handle);
1863 		hba->sli.sli4.bar1_acc_handle = 0;
1864 	}
1865 
1866 	if (hba->sli.sli4.bar2_acc_handle) {
1867 		ddi_regs_map_free(&hba->sli.sli4.bar2_acc_handle);
1868 		hba->sli.sli4.bar2_acc_handle = 0;
1869 	}
1870 
1871 	if (hba->sli.sli4.bootstrapmb.virt) {
1872 		bzero(buf_info, sizeof (MBUF_INFO));
1873 
1874 		if (hba->sli.sli4.bootstrapmb.phys) {
1875 			buf_info->phys = hba->sli.sli4.bootstrapmb.phys;
1876 			buf_info->data_handle =
1877 			    hba->sli.sli4.bootstrapmb.data_handle;
1878 			buf_info->dma_handle =
1879 			    hba->sli.sli4.bootstrapmb.dma_handle;
1880 			buf_info->flags = FC_MBUF_DMA;
1881 		}
1882 
1883 		buf_info->virt = hba->sli.sli4.bootstrapmb.virt;
1884 		buf_info->size = hba->sli.sli4.bootstrapmb.size;
1885 		emlxs_mem_free(hba, buf_info);
1886 
1887 		hba->sli.sli4.bootstrapmb.virt = NULL;
1888 	}
1889 
1890 	return;
1891 
1892 } /* emlxs_sli4_unmap_hdw() */
1893 
1894 
1895 static int
emlxs_check_hdw_ready(emlxs_hba_t * hba)1896 emlxs_check_hdw_ready(emlxs_hba_t *hba)
1897 {
1898 	emlxs_port_t *port = &PPORT;
1899 	uint32_t status;
1900 	uint32_t i = 0;
1901 	uint32_t err1;
1902 	uint32_t err2;
1903 
1904 	/* Wait for reset completion */
1905 	while (i < 30) {
1906 
1907 		switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1908 		case SLI_INTF_IF_TYPE_0:
1909 			status = emlxs_sli4_read_sema(hba);
1910 
1911 			/* Check to see if any errors occurred during init */
1912 			if (status & ARM_POST_FATAL) {
1913 				EMLXS_MSGF(EMLXS_CONTEXT,
1914 				    &emlxs_reset_failed_msg,
1915 				    "SEMA Error: status=%x", status);
1916 
1917 				EMLXS_STATE_CHANGE(hba, FC_ERROR);
1918 
1919 				return (1);
1920 			}
1921 
1922 			if ((status & ARM_UNRECOVERABLE_ERROR) ==
1923 			    ARM_UNRECOVERABLE_ERROR) {
1924 				EMLXS_MSGF(EMLXS_CONTEXT,
1925 				    &emlxs_reset_failed_msg,
1926 				    "Unrecoverable Error: status=%x", status);
1927 
1928 				EMLXS_STATE_CHANGE(hba, FC_ERROR);
1929 
1930 				return (1);
1931 			}
1932 
1933 			if ((status & ARM_POST_MASK) == ARM_POST_READY) {
1934 				/* ARM Ready !! */
1935 				EMLXS_MSGF(EMLXS_CONTEXT,
1936 				    &emlxs_sli_detail_msg,
1937 				    "ARM Ready: status=%x", status);
1938 
1939 				return (0);
1940 			}
1941 			break;
1942 
1943 		case SLI_INTF_IF_TYPE_2:
1944 		case SLI_INTF_IF_TYPE_6:
1945 			status = emlxs_sli4_read_status(hba);
1946 
1947 			if (status & SLI_STATUS_READY) {
1948 				if (!(status & SLI_STATUS_ERROR)) {
1949 					/* ARM Ready !! */
1950 					EMLXS_MSGF(EMLXS_CONTEXT,
1951 					    &emlxs_sli_detail_msg,
1952 					    "ARM Ready: status=%x", status);
1953 
1954 					return (0);
1955 				}
1956 
1957 				err1 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
1958 				    hba->sli.sli4.ERR1_reg_addr);
1959 				err2 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
1960 				    hba->sli.sli4.ERR2_reg_addr);
1961 
1962 				if (status & SLI_STATUS_RESET_NEEDED) {
1963 					EMLXS_MSGF(EMLXS_CONTEXT,
1964 					    &emlxs_sli_detail_msg,
1965 					    "ARM Ready (Reset Needed): "
1966 					    "status=%x err1=%x "
1967 					    "err2=%x",
1968 					    status, err1, err2);
1969 
1970 					return (1);
1971 				}
1972 
1973 				EMLXS_MSGF(EMLXS_CONTEXT,
1974 				    &emlxs_reset_failed_msg,
1975 				    "Unrecoverable Error: status=%x err1=%x "
1976 				    "err2=%x",
1977 				    status, err1, err2);
1978 
1979 				EMLXS_STATE_CHANGE(hba, FC_ERROR);
1980 
1981 				return (2);
1982 			}
1983 
1984 			break;
1985 
1986 		default:
1987 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
1988 
1989 			return (3);
1990 		}
1991 
1992 		BUSYWAIT_MS(1000);
1993 		i++;
1994 	}
1995 
1996 	/* Timeout occurred */
1997 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1998 	case SLI_INTF_IF_TYPE_0:
1999 		err1 = ddi_get32(hba->pci_acc_handle,
2000 		    hba->sli.sli4.ERR1_reg_addr);
2001 		err2 = ddi_get32(hba->pci_acc_handle,
2002 		    hba->sli.sli4.ERR2_reg_addr);
2003 		break;
2004 
2005 	default:
2006 		err1 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
2007 		    hba->sli.sli4.ERR1_reg_addr);
2008 		err2 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
2009 		    hba->sli.sli4.ERR2_reg_addr);
2010 		break;
2011 	}
2012 
2013 	if (status & SLI_STATUS_ERROR) {
2014 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
2015 		    "Ready Timeout: Port Error: status=%x err1=%x err2=%x",
2016 		    status, err1, err2);
2017 	} else {
2018 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
2019 		    "Ready Timeout: status=%x err1=%x err2=%x",
2020 		    status, err1, err2);
2021 	}
2022 
2023 	EMLXS_STATE_CHANGE(hba, FC_ERROR);
2024 
2025 	return (3);
2026 
2027 } /* emlxs_check_hdw_ready() */
2028 
2029 
2030 static uint32_t
emlxs_sli4_read_status(emlxs_hba_t * hba)2031 emlxs_sli4_read_status(emlxs_hba_t *hba)
2032 {
2033 #ifdef FMA_SUPPORT
2034 	emlxs_port_t *port = &PPORT;
2035 #endif  /* FMA_SUPPORT */
2036 	uint32_t status;
2037 
2038 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2039 	case SLI_INTF_IF_TYPE_2:
2040 	case SLI_INTF_IF_TYPE_6:
2041 		status = ddi_get32(hba->sli.sli4.bar0_acc_handle,
2042 		    hba->sli.sli4.STATUS_reg_addr);
2043 #ifdef FMA_SUPPORT
2044 		/* Access handle validation */
2045 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar0_acc_handle);
2046 #endif  /* FMA_SUPPORT */
2047 		break;
2048 	default:
2049 		status = 0;
2050 		break;
2051 	}
2052 
2053 	return (status);
2054 
2055 } /* emlxs_sli4_read_status() */
2056 
2057 
2058 static uint32_t
emlxs_sli4_read_sema(emlxs_hba_t * hba)2059 emlxs_sli4_read_sema(emlxs_hba_t *hba)
2060 {
2061 #ifdef FMA_SUPPORT
2062 	emlxs_port_t *port = &PPORT;
2063 #endif  /* FMA_SUPPORT */
2064 	uint32_t status;
2065 
2066 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2067 	case SLI_INTF_IF_TYPE_0:
2068 		status = ddi_get32(hba->sli.sli4.bar1_acc_handle,
2069 		    hba->sli.sli4.MPUEPSemaphore_reg_addr);
2070 #ifdef FMA_SUPPORT
2071 		/* Access handle validation */
2072 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar1_acc_handle);
2073 #endif  /* FMA_SUPPORT */
2074 		break;
2075 
2076 	case SLI_INTF_IF_TYPE_2:
2077 	case SLI_INTF_IF_TYPE_6:
2078 		status = ddi_get32(hba->sli.sli4.bar0_acc_handle,
2079 		    hba->sli.sli4.MPUEPSemaphore_reg_addr);
2080 #ifdef FMA_SUPPORT
2081 		/* Access handle validation */
2082 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar0_acc_handle);
2083 #endif  /* FMA_SUPPORT */
2084 		break;
2085 	default:
2086 		status = 0;
2087 		break;
2088 	}
2089 
2090 	return (status);
2091 
2092 } /* emlxs_sli4_read_sema() */
2093 
2094 
2095 static uint32_t
emlxs_sli4_read_mbdb(emlxs_hba_t * hba)2096 emlxs_sli4_read_mbdb(emlxs_hba_t *hba)
2097 {
2098 #ifdef FMA_SUPPORT
2099 	emlxs_port_t *port = &PPORT;
2100 #endif  /* FMA_SUPPORT */
2101 	uint32_t status;
2102 
2103 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2104 	case SLI_INTF_IF_TYPE_0:
2105 		status = ddi_get32(hba->sli.sli4.bar2_acc_handle,
2106 		    hba->sli.sli4.MBDB_reg_addr);
2107 
2108 #ifdef FMA_SUPPORT
2109 		/* Access handle validation */
2110 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar2_acc_handle);
2111 #endif  /* FMA_SUPPORT */
2112 		break;
2113 
2114 	case SLI_INTF_IF_TYPE_2:
2115 	case SLI_INTF_IF_TYPE_6:
2116 		status = ddi_get32(hba->sli.sli4.bar0_acc_handle,
2117 		    hba->sli.sli4.MBDB_reg_addr);
2118 #ifdef FMA_SUPPORT
2119 		/* Access handle validation */
2120 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar0_acc_handle);
2121 #endif  /* FMA_SUPPORT */
2122 		break;
2123 	default:
2124 		status = 0;
2125 		break;
2126 	}
2127 
2128 	return (status);
2129 
2130 } /* emlxs_sli4_read_mbdb() */
2131 
2132 
2133 static void
emlxs_sli4_write_mbdb(emlxs_hba_t * hba,uint64_t phys,boolean_t high)2134 emlxs_sli4_write_mbdb(emlxs_hba_t *hba, uint64_t phys, boolean_t high)
2135 {
2136 	uint32_t db;
2137 	uint_t shift;
2138 
2139 	/*
2140 	 * The bootstrap mailbox is posted as 2 x 30 bit values.
2141 	 * It is required to be 16 bit aligned, and the 2 low order
2142 	 * bits are used as flags.
2143 	 */
2144 	shift = high ? 32 : 2;
2145 
2146 	db = (uint32_t)(phys >> shift) & BMBX_ADDR;
2147 
2148 	if (high)
2149 		db |= BMBX_ADDR_HI;
2150 
2151 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2152 	case SLI_INTF_IF_TYPE_0:
2153 		ddi_put32(hba->sli.sli4.bar2_acc_handle,
2154 		    hba->sli.sli4.MBDB_reg_addr, db);
2155 		break;
2156 
2157 	case SLI_INTF_IF_TYPE_2:
2158 	case SLI_INTF_IF_TYPE_6:
2159 		ddi_put32(hba->sli.sli4.bar0_acc_handle,
2160 		    hba->sli.sli4.MBDB_reg_addr, db);
2161 		break;
2162 	}
2163 
2164 } /* emlxs_sli4_write_mbdb() */
2165 
2166 
2167 static void
emlxs_sli4_write_eqdb(emlxs_hba_t * hba,uint16_t qid,uint32_t count,boolean_t arm)2168 emlxs_sli4_write_eqdb(emlxs_hba_t *hba, uint16_t qid, uint32_t count,
2169     boolean_t arm)
2170 {
2171 	emlxs_eqdb_u	db;
2172 	db.word = 0;
2173 
2174 	/*
2175 	 * Add the qid to the doorbell. It is split into a low and
2176 	 * high component.
2177 	 */
2178 
2179 	if ((hba->sli_intf & SLI_INTF_IF_TYPE_MASK) == SLI_INTF_IF_TYPE_6) {
2180 		db.db6.Qid = qid;
2181 		db.db6.NumPopped = count;
2182 		db.db6.Rearm = arm;
2183 	} else {
2184 		/* Initialize with the low bits */
2185 		db.db2.Qid = qid & EQ_DB_ID_LO_MASK;
2186 
2187 		/* Add the high bits */
2188 		db.db2.Qid_hi = (qid >> EQ_ID_LO_BITS) & 0x1f;
2189 
2190 		/*
2191 		 * Include the number of entries to be popped.
2192 		 */
2193 		db.db2.NumPopped = count;
2194 
2195 		/* The doorbell is for an event queue */
2196 		db.db2.Event = B_TRUE;
2197 
2198 		/* Arm if asked to do so */
2199 		if (arm)
2200 			/* Clear only on not AutoValid EqAV */
2201 			db.db2.Clear = B_TRUE;
2202 		db.db2.Rearm = arm;
2203 	}
2204 
2205 #ifdef DEBUG_FASTPATH
2206 	EMLXS_MSGF(&hba->port[0], _FILENO_, __LINE__, &emlxs_sli_detail_msg,
2207 	    "EQE: CLEAR db=%08x pops=%d", db, count);
2208 #endif /* DEBUG_FASTPATH */
2209 
2210 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2211 	case SLI_INTF_IF_TYPE_0:
2212 		/* The CQDB_reg_addr is also use for EQs */
2213 		ddi_put32(hba->sli.sli4.bar2_acc_handle,
2214 		    hba->sli.sli4.CQDB_reg_addr, db.word);
2215 		break;
2216 
2217 	case SLI_INTF_IF_TYPE_2:
2218 		/* The CQDB_reg_addr is also use for EQs */
2219 		ddi_put32(hba->sli.sli4.bar0_acc_handle,
2220 		    hba->sli.sli4.CQDB_reg_addr, db.word);
2221 		break;
2222 
2223 	case SLI_INTF_IF_TYPE_6:
2224 		ddi_put32(hba->sli.sli4.bar1_acc_handle,
2225 		    hba->sli.sli4.EQDB_reg_addr, db.word);
2226 		break;
2227 
2228 	}
2229 } /* emlxs_sli4_write_eqdb() */
2230 
2231 static void
emlxs_sli4_write_cqdb(emlxs_hba_t * hba,uint16_t qid,uint32_t count,boolean_t arm)2232 emlxs_sli4_write_cqdb(emlxs_hba_t *hba, uint16_t qid, uint32_t count,
2233     boolean_t arm)
2234 {
2235 	emlxs_cqdb_u	db;
2236 	db.word = 0;
2237 
2238 	/*
2239 	 * Add the qid to the doorbell. It is split into a low and
2240 	 * high component.
2241 	 */
2242 
2243 	if ((hba->sli_intf & SLI_INTF_IF_TYPE_MASK) == SLI_INTF_IF_TYPE_6) {
2244 		db.db6.Qid = qid;
2245 		db.db6.NumPopped = count;
2246 		db.db6.Rearm = arm;
2247 	} else {
2248 		/* Initialize with the low bits */
2249 		db.db2.Qid = qid & CQ_DB_ID_LO_MASK;
2250 
2251 		/* Add the high bits */
2252 		db.db2.Qid_hi = (qid >> CQ_ID_LO_BITS) & 0x1f;
2253 
2254 		/*
2255 		 * Include the number of entries to be popped.
2256 		 */
2257 		db.db2.NumPopped = count;
2258 
2259 		/* Arm if asked to do so */
2260 		db.db2.Rearm = arm;
2261 	}
2262 #ifdef DEBUG_FASTPATH
2263 	EMLXS_MSGF(&hba->port[0], _FILENO_, __LINE__, &emlxs_sli_detail_msg,
2264 	    "CQE: db=%08x: pops=%d", db, count);
2265 #endif /* DEBUG_FASTPATH */
2266 
2267 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2268 	case SLI_INTF_IF_TYPE_0:
2269 		ddi_put32(hba->sli.sli4.bar2_acc_handle,
2270 		    hba->sli.sli4.CQDB_reg_addr, db.word);
2271 		break;
2272 
2273 	case SLI_INTF_IF_TYPE_2:
2274 		ddi_put32(hba->sli.sli4.bar0_acc_handle,
2275 		    hba->sli.sli4.CQDB_reg_addr, db.word);
2276 		break;
2277 
2278 	case SLI_INTF_IF_TYPE_6:
2279 		ddi_put32(hba->sli.sli4.bar1_acc_handle,
2280 		    hba->sli.sli4.CQDB_reg_addr, db.word);
2281 		break;
2282 	}
2283 } /* emlxs_sli4_write_cqdb() */
2284 
2285 
2286 static void
emlxs_sli4_write_rqdb(emlxs_hba_t * hba,uint16_t qid,uint_t count)2287 emlxs_sli4_write_rqdb(emlxs_hba_t *hba, uint16_t qid, uint_t count)
2288 {
2289 	emlxs_rqdbu_t rqdb;
2290 
2291 	rqdb.word = 0;
2292 	rqdb.db.Qid = qid;
2293 	rqdb.db.NumPosted = count;
2294 
2295 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2296 	case SLI_INTF_IF_TYPE_0:
2297 		ddi_put32(hba->sli.sli4.bar2_acc_handle,
2298 		    hba->sli.sli4.RQDB_reg_addr, rqdb.word);
2299 		break;
2300 
2301 	case SLI_INTF_IF_TYPE_2:
2302 		ddi_put32(hba->sli.sli4.bar0_acc_handle,
2303 		    hba->sli.sli4.RQDB_reg_addr, rqdb.word);
2304 		break;
2305 
2306 	case SLI_INTF_IF_TYPE_6:
2307 		ddi_put32(hba->sli.sli4.bar1_acc_handle,
2308 		    hba->sli.sli4.RQDB_reg_addr, rqdb.word);
2309 		break;
2310 
2311 	}
2312 
2313 } /* emlxs_sli4_write_rqdb() */
2314 
2315 
2316 static void
emlxs_sli4_write_mqdb(emlxs_hba_t * hba,uint16_t qid,uint_t count)2317 emlxs_sli4_write_mqdb(emlxs_hba_t *hba, uint16_t qid, uint_t count)
2318 {
2319 	uint32_t db;
2320 
2321 	db = qid;
2322 	db |= (count << MQ_DB_POP_SHIFT) & MQ_DB_POP_MASK;
2323 
2324 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2325 	case SLI_INTF_IF_TYPE_0:
2326 		ddi_put32(hba->sli.sli4.bar2_acc_handle,
2327 		    hba->sli.sli4.MQDB_reg_addr, db);
2328 		break;
2329 
2330 	case SLI_INTF_IF_TYPE_2:
2331 		ddi_put32(hba->sli.sli4.bar0_acc_handle,
2332 		    hba->sli.sli4.MQDB_reg_addr, db);
2333 		break;
2334 	case SLI_INTF_IF_TYPE_6:
2335 		ddi_put32(hba->sli.sli4.bar1_acc_handle,
2336 		    hba->sli.sli4.MQDB_reg_addr, db);
2337 		break;
2338 	}
2339 
2340 } /* emlxs_sli4_write_mqdb() */
2341 
2342 
2343 static void
emlxs_sli4_write_wqdb(emlxs_hba_t * hba,uint16_t qid,uint_t posted,uint_t index)2344 emlxs_sli4_write_wqdb(emlxs_hba_t *hba, uint16_t qid, uint_t posted,
2345     uint_t index)
2346 {
2347 	uint32_t db;
2348 
2349 	db = qid;
2350 	db |= (posted << WQ_DB_POST_SHIFT) & WQ_DB_POST_MASK;
2351 
2352 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2353 	case SLI_INTF_IF_TYPE_0:
2354 		db |= (index << WQ_DB_IDX_SHIFT) & WQ_DB_IDX_MASK;
2355 		ddi_put32(hba->sli.sli4.bar2_acc_handle,
2356 		    hba->sli.sli4.WQDB_reg_addr, db);
2357 		break;
2358 
2359 	case SLI_INTF_IF_TYPE_2:
2360 		db |= (index << WQ_DB_IDX_SHIFT) & WQ_DB_IDX_MASK;
2361 		ddi_put32(hba->sli.sli4.bar0_acc_handle,
2362 		    hba->sli.sli4.WQDB_reg_addr, db);
2363 		break;
2364 
2365 	case SLI_INTF_IF_TYPE_6:
2366 		ddi_put32(hba->sli.sli4.bar1_acc_handle,
2367 		    hba->sli.sli4.WQDB_reg_addr, db);
2368 		break;
2369 
2370 	}
2371 
2372 #ifdef DEBUG_FASTPATH
2373 	EMLXS_MSGF(&hba->port[0], _FILENO_, __LINE__, &emlxs_sli_detail_msg,
2374 	    "WQ RING: %08x", db);
2375 #endif /* DEBUG_FASTPATH */
2376 } /* emlxs_sli4_write_wqdb() */
2377 
2378 
2379 static uint32_t
emlxs_check_bootstrap_ready(emlxs_hba_t * hba,uint32_t tmo)2380 emlxs_check_bootstrap_ready(emlxs_hba_t *hba, uint32_t tmo)
2381 {
2382 	emlxs_port_t *port = &PPORT;
2383 	uint32_t status = 0;
2384 	uint32_t err1;
2385 	uint32_t err2;
2386 
2387 	/* Wait for reset completion, tmo is in 10ms ticks */
2388 	while (tmo) {
2389 		status = emlxs_sli4_read_mbdb(hba);
2390 
2391 		/* Check to see if any errors occurred during init */
2392 		if (status & BMBX_READY) {
2393 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2394 			    "BMBX Ready: status=0x%x", status);
2395 
2396 			return (tmo);
2397 		}
2398 
2399 		BUSYWAIT_MS(10);
2400 		tmo--;
2401 	}
2402 
2403 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2404 	case SLI_INTF_IF_TYPE_0:
2405 		err1 = ddi_get32(hba->pci_acc_handle,
2406 		    hba->sli.sli4.ERR1_reg_addr);
2407 		err2 = ddi_get32(hba->pci_acc_handle,
2408 		    hba->sli.sli4.ERR2_reg_addr);
2409 		break;
2410 
2411 	default: /* IF_TYPE_2 and IF_TYPE_6 */
2412 		err1 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
2413 		    hba->sli.sli4.ERR1_reg_addr);
2414 		err2 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
2415 		    hba->sli.sli4.ERR2_reg_addr);
2416 		break;
2417 	}
2418 
2419 	/* Timeout occurred */
2420 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
2421 	    "Timeout waiting for BMailbox: status=%x err1=%x err2=%x",
2422 	    status, err1, err2);
2423 
2424 	EMLXS_STATE_CHANGE(hba, FC_ERROR);
2425 
2426 	return (0);
2427 
2428 } /* emlxs_check_bootstrap_ready() */
2429 
2430 
2431 static uint32_t
emlxs_issue_bootstrap_mb(emlxs_hba_t * hba,uint32_t tmo)2432 emlxs_issue_bootstrap_mb(emlxs_hba_t *hba, uint32_t tmo)
2433 {
2434 	emlxs_port_t *port = &PPORT;
2435 	uint32_t *iptr;
2436 
2437 	/*
2438 	 * This routine assumes the bootstrap mbox is loaded
2439 	 * with the mailbox command to be executed.
2440 	 *
2441 	 * First, load the high 30 bits of bootstrap mailbox
2442 	 */
2443 	emlxs_sli4_write_mbdb(hba, hba->sli.sli4.bootstrapmb.phys, B_TRUE);
2444 
2445 	tmo = emlxs_check_bootstrap_ready(hba, tmo);
2446 	if (tmo == 0) {
2447 		return (0);
2448 	}
2449 
2450 	/* Load the low 30 bits of bootstrap mailbox */
2451 	emlxs_sli4_write_mbdb(hba, hba->sli.sli4.bootstrapmb.phys, B_FALSE);
2452 
2453 	tmo = emlxs_check_bootstrap_ready(hba, tmo);
2454 	if (tmo == 0) {
2455 		return (0);
2456 	}
2457 
2458 	iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2459 
2460 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2461 	    "BootstrapMB: %p Completed %08x %08x %08x",
2462 	    hba->sli.sli4.bootstrapmb.virt,
2463 	    *iptr, *(iptr+1), *(iptr+2));
2464 
2465 	return (tmo);
2466 
2467 } /* emlxs_issue_bootstrap_mb() */
2468 
2469 
2470 static int
emlxs_init_bootstrap_mb(emlxs_hba_t * hba)2471 emlxs_init_bootstrap_mb(emlxs_hba_t *hba)
2472 {
2473 #ifdef FMA_SUPPORT
2474 	emlxs_port_t *port = &PPORT;
2475 #endif /* FMA_SUPPORT */
2476 	uint32_t *iptr;
2477 	uint32_t tmo;
2478 
2479 	if (emlxs_check_hdw_ready(hba)) {
2480 		return (1);
2481 	}
2482 
2483 	if (hba->flag & FC_BOOTSTRAPMB_INIT) {
2484 		return (0);  /* Already initialized */
2485 	}
2486 
2487 	/* NOTE: tmo is in 10ms ticks */
2488 	tmo = emlxs_check_bootstrap_ready(hba, 3000);
2489 	if (tmo == 0) {
2490 		return (1);
2491 	}
2492 
2493 	/* Issue FW_INITIALIZE command */
2494 
2495 	/* Special words to initialize bootstrap mbox MUST be little endian */
2496 	iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2497 	*iptr = LE_SWAP32(FW_INITIALIZE_WORD0);
2498 	*(iptr+1) = LE_SWAP32(FW_INITIALIZE_WORD1);
2499 
2500 	EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
2501 	    MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORDEV);
2502 
2503 	emlxs_data_dump(port, "FW_INIT", (uint32_t *)iptr, 6, 0);
2504 	if (!emlxs_issue_bootstrap_mb(hba, tmo)) {
2505 		return (1);
2506 	}
2507 
2508 #ifdef FMA_SUPPORT
2509 	if (emlxs_fm_check_dma_handle(hba, hba->sli.sli4.bootstrapmb.dma_handle)
2510 	    != DDI_FM_OK) {
2511 		EMLXS_MSGF(EMLXS_CONTEXT,
2512 		    &emlxs_invalid_dma_handle_msg,
2513 		    "init_bootstrap_mb: hdl=%p",
2514 		    hba->sli.sli4.bootstrapmb.dma_handle);
2515 		return (1);
2516 	}
2517 #endif
2518 	hba->flag |= FC_BOOTSTRAPMB_INIT;
2519 	return (0);
2520 
2521 } /* emlxs_init_bootstrap_mb() */
2522 
2523 
2524 
2525 
2526 static uint32_t
emlxs_sli4_hba_init(emlxs_hba_t * hba)2527 emlxs_sli4_hba_init(emlxs_hba_t *hba)
2528 {
2529 	int rc;
2530 	uint16_t i;
2531 	emlxs_port_t *vport;
2532 	emlxs_config_t *cfg = &CFG;
2533 	CHANNEL *cp;
2534 	VPIobj_t *vpip;
2535 
2536 	/* Restart the adapter */
2537 	if (emlxs_sli4_hba_reset(hba, 1, 0, 0)) {
2538 		return (1);
2539 	}
2540 
2541 	for (i = 0; i < hba->chan_count; i++) {
2542 		cp = &hba->chan[i];
2543 		cp->iopath = (void *)&hba->sli.sli4.wq[i];
2544 	}
2545 
2546 	/* Initialize all the port objects */
2547 	hba->vpi_max  = 0;
2548 	for (i = 0; i < MAX_VPORTS; i++) {
2549 		vport = &VPORT(i);
2550 		vport->hba = hba;
2551 		vport->vpi = i;
2552 
2553 		vpip = &vport->VPIobj;
2554 		vpip->index = i;
2555 		vpip->VPI = i;
2556 		vpip->port = vport;
2557 		vpip->state = VPI_STATE_OFFLINE;
2558 		vport->vpip = vpip;
2559 	}
2560 
2561 	/* Set the max node count */
2562 	if (hba->max_nodes == 0) {
2563 		if (cfg[CFG_NUM_NODES].current > 0) {
2564 			hba->max_nodes = cfg[CFG_NUM_NODES].current;
2565 		} else {
2566 			hba->max_nodes = 4096;
2567 		}
2568 	}
2569 
2570 	rc = emlxs_init_bootstrap_mb(hba);
2571 	if (rc) {
2572 		return (rc);
2573 	}
2574 
2575 	hba->sli.sli4.cfgFCOE.FCMap[0] = FCOE_FCF_MAP0;
2576 	hba->sli.sli4.cfgFCOE.FCMap[1] = FCOE_FCF_MAP1;
2577 	hba->sli.sli4.cfgFCOE.FCMap[2] = FCOE_FCF_MAP2;
2578 
2579 	if ((hba->sli_intf & SLI_INTF_IF_TYPE_MASK) == SLI_INTF_IF_TYPE_0) {
2580 		/* Cache the UE MASK registers value for UE error detection */
2581 		hba->sli.sli4.ue_mask_lo = ddi_get32(hba->pci_acc_handle,
2582 		    (uint32_t *)(hba->pci_addr + PCICFG_UE_MASK_LO_OFFSET));
2583 		hba->sli.sli4.ue_mask_hi = ddi_get32(hba->pci_acc_handle,
2584 		    (uint32_t *)(hba->pci_addr + PCICFG_UE_MASK_HI_OFFSET));
2585 	}
2586 
2587 	return (0);
2588 
2589 } /* emlxs_sli4_hba_init() */
2590 
2591 
2592 /*ARGSUSED*/
2593 static uint32_t
emlxs_sli4_hba_reset(emlxs_hba_t * hba,uint32_t restart,uint32_t skip_post,uint32_t quiesce)2594 emlxs_sli4_hba_reset(emlxs_hba_t *hba, uint32_t restart, uint32_t skip_post,
2595     uint32_t quiesce)
2596 {
2597 	emlxs_port_t *port = &PPORT;
2598 	emlxs_port_t *vport;
2599 	CHANNEL *cp;
2600 	emlxs_config_t *cfg = &CFG;
2601 	MAILBOXQ mboxq;
2602 	uint32_t value;
2603 	uint32_t i;
2604 	uint32_t rc;
2605 	uint16_t channelno;
2606 	uint32_t status;
2607 	uint32_t err1;
2608 	uint32_t err2;
2609 	uint8_t generate_event = 0;
2610 
2611 	if (!cfg[CFG_RESET_ENABLE].current) {
2612 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
2613 		    "Adapter reset disabled.");
2614 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
2615 
2616 		return (1);
2617 	}
2618 
2619 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2620 	case SLI_INTF_IF_TYPE_0:
2621 		if (quiesce == 0) {
2622 			emlxs_sli4_hba_kill(hba);
2623 
2624 			/*
2625 			 * Initalize Hardware that will be used to bring
2626 			 * SLI4 online.
2627 			 */
2628 			rc = emlxs_init_bootstrap_mb(hba);
2629 			if (rc) {
2630 				return (rc);
2631 			}
2632 		}
2633 
2634 		bzero((void *)&mboxq, sizeof (MAILBOXQ));
2635 		emlxs_mb_resetport(hba, &mboxq);
2636 
2637 		if (quiesce == 0) {
2638 			if (emlxs_sli4_issue_mbox_cmd(hba, &mboxq,
2639 			    MBX_POLL, 0) != MBX_SUCCESS) {
2640 				/* Timeout occurred */
2641 				EMLXS_MSGF(EMLXS_CONTEXT,
2642 				    &emlxs_reset_failed_msg,
2643 				    "Timeout: RESET");
2644 				EMLXS_STATE_CHANGE(hba, FC_ERROR);
2645 				/* Log a dump event - not supported */
2646 				return (1);
2647 			}
2648 		} else {
2649 			if (emlxs_sli4_issue_mbox_cmd4quiesce(hba, &mboxq,
2650 			    MBX_POLL, 0) != MBX_SUCCESS) {
2651 				EMLXS_STATE_CHANGE(hba, FC_ERROR);
2652 				/* Log a dump event - not supported */
2653 				return (1);
2654 			}
2655 		}
2656 		emlxs_data_dump(port, "resetPort", (uint32_t *)&mboxq, 12, 0);
2657 		break;
2658 
2659 	case SLI_INTF_IF_TYPE_2:
2660 	case SLI_INTF_IF_TYPE_6:
2661 		if (quiesce == 0) {
2662 			emlxs_sli4_hba_kill(hba);
2663 		}
2664 
2665 		rc = emlxs_check_hdw_ready(hba);
2666 		if (rc > 1) {
2667 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
2668 			    "Adapter not ready for reset.");
2669 			return (1);
2670 		}
2671 
2672 		if (rc == 1) {
2673 			err1 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
2674 			    hba->sli.sli4.ERR1_reg_addr);
2675 			err2 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
2676 			    hba->sli.sli4.ERR2_reg_addr);
2677 
2678 			/* Don't generate an event if dump was forced */
2679 			if ((err1 != 0x2) || (err2 != 0x2)) {
2680 				generate_event = 1;
2681 			}
2682 		}
2683 
2684 		/* Reset the port now */
2685 
2686 		mutex_enter(&EMLXS_PORT_LOCK);
2687 		value = SLI_CNTL_INIT_PORT;
2688 
2689 		ddi_put32(hba->sli.sli4.bar0_acc_handle,
2690 		    hba->sli.sli4.CNTL_reg_addr, value);
2691 		mutex_exit(&EMLXS_PORT_LOCK);
2692 
2693 		break;
2694 	}
2695 
2696 	/* Reset the hba structure */
2697 	hba->flag &= FC_RESET_MASK;
2698 
2699 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
2700 		cp = &hba->chan[channelno];
2701 		cp->hba = hba;
2702 		cp->channelno = channelno;
2703 	}
2704 
2705 	hba->channel_tx_count = 0;
2706 	hba->io_count = 0;
2707 	hba->iodone_count = 0;
2708 	hba->topology = 0;
2709 	hba->linkspeed = 0;
2710 	hba->heartbeat_active = 0;
2711 	hba->discovery_timer = 0;
2712 	hba->linkup_timer = 0;
2713 	hba->loopback_tics = 0;
2714 
2715 	/* Specific to ATTO G5 boards */
2716 	if (hba->model_info.flags & EMLXS_GPIO_LEDS) {
2717 		/* Assume the boot driver enabled all LEDs */
2718 		hba->gpio_current =
2719 		    EMLXS_GPIO_LO | EMLXS_GPIO_HI | EMLXS_GPIO_ACT;
2720 		hba->gpio_desired = 0;
2721 		hba->gpio_bit = 0;
2722 	}
2723 
2724 	/* Reset the port objects */
2725 	for (i = 0; i < MAX_VPORTS; i++) {
2726 		vport = &VPORT(i);
2727 
2728 		vport->flag &= EMLXS_PORT_RESET_MASK;
2729 		vport->did = 0;
2730 		vport->prev_did = 0;
2731 		vport->lip_type = 0;
2732 		bzero(&vport->fabric_sparam, sizeof (SERV_PARM));
2733 		bzero(&vport->prev_fabric_sparam, sizeof (SERV_PARM));
2734 
2735 		bzero((caddr_t)&vport->node_base, sizeof (NODELIST));
2736 		vport->node_base.nlp_Rpi = 0;
2737 		vport->node_base.nlp_DID = 0xffffff;
2738 		vport->node_base.nlp_list_next = NULL;
2739 		vport->node_base.nlp_list_prev = NULL;
2740 		vport->node_base.nlp_active = 1;
2741 		vport->node_count = 0;
2742 
2743 		if (vport->ub_count < EMLXS_UB_TOKEN_OFFSET) {
2744 			vport->ub_count = EMLXS_UB_TOKEN_OFFSET;
2745 		}
2746 	}
2747 
2748 	if (emlxs_check_hdw_ready(hba)) {
2749 		return (1);
2750 	}
2751 
2752 	if (generate_event) {
2753 		status = emlxs_sli4_read_status(hba);
2754 		if (status & SLI_STATUS_DUMP_IMAGE_PRESENT) {
2755 			emlxs_log_dump_event(port, NULL, 0);
2756 		}
2757 	}
2758 
2759 	return (0);
2760 
2761 } /* emlxs_sli4_hba_reset */
2762 
2763 
2764 #define	SGL_CMD		0
2765 #define	SGL_RESP	1
2766 #define	SGL_DATA	2
2767 #define	SGL_LAST	0x80
2768 
2769 /*ARGSUSED*/
2770 static ULP_SGE64 *
emlxs_pkt_to_sgl(emlxs_port_t * port,fc_packet_t * pkt,ULP_SGE64 * sge,uint32_t sgl_type,uint32_t * pcnt)2771 emlxs_pkt_to_sgl(emlxs_port_t *port, fc_packet_t *pkt, ULP_SGE64 *sge,
2772     uint32_t sgl_type, uint32_t *pcnt)
2773 {
2774 #ifdef DEBUG_SGE
2775 	emlxs_hba_t *hba = HBA;
2776 #endif /* DEBUG_SGE */
2777 	ddi_dma_cookie_t *cp;
2778 	uint_t i;
2779 	uint_t last;
2780 	int32_t	size;
2781 	int32_t	sge_size;
2782 	uint64_t sge_addr;
2783 	int32_t	len;
2784 	uint32_t cnt;
2785 	uint_t cookie_cnt;
2786 	ULP_SGE64 stage_sge;
2787 
2788 	last = sgl_type & SGL_LAST;
2789 	sgl_type &= ~SGL_LAST;
2790 
2791 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2792 	switch (sgl_type) {
2793 	case SGL_CMD:
2794 		cp = pkt->pkt_cmd_cookie;
2795 		cookie_cnt = pkt->pkt_cmd_cookie_cnt;
2796 		size = (int32_t)pkt->pkt_cmdlen;
2797 		break;
2798 
2799 	case SGL_RESP:
2800 		cp = pkt->pkt_resp_cookie;
2801 		cookie_cnt = pkt->pkt_resp_cookie_cnt;
2802 		size = (int32_t)pkt->pkt_rsplen;
2803 		break;
2804 
2805 
2806 	case SGL_DATA:
2807 		cp = pkt->pkt_data_cookie;
2808 		cookie_cnt = pkt->pkt_data_cookie_cnt;
2809 		size = (int32_t)pkt->pkt_datalen;
2810 		break;
2811 
2812 	default:
2813 		return (NULL);
2814 	}
2815 
2816 #else
2817 	switch (sgl_type) {
2818 	case SGL_CMD:
2819 		cp = &pkt->pkt_cmd_cookie;
2820 		cookie_cnt = 1;
2821 		size = (int32_t)pkt->pkt_cmdlen;
2822 		break;
2823 
2824 	case SGL_RESP:
2825 		cp = &pkt->pkt_resp_cookie;
2826 		cookie_cnt = 1;
2827 		size = (int32_t)pkt->pkt_rsplen;
2828 		break;
2829 
2830 
2831 	case SGL_DATA:
2832 		cp = &pkt->pkt_data_cookie;
2833 		cookie_cnt = 1;
2834 		size = (int32_t)pkt->pkt_datalen;
2835 		break;
2836 
2837 	default:
2838 		return (NULL);
2839 	}
2840 #endif	/* >= EMLXS_MODREV3 */
2841 
2842 	stage_sge.offset = 0;
2843 	stage_sge.type = 0;
2844 	stage_sge.last = 0;
2845 	cnt = 0;
2846 	for (i = 0; i < cookie_cnt && size > 0; i++, cp++) {
2847 
2848 		sge_size = cp->dmac_size;
2849 		sge_addr = cp->dmac_laddress;
2850 		while (sge_size && size) {
2851 			if (cnt) {
2852 				/* Copy staged SGE before we build next one */
2853 				BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
2854 				    (uint8_t *)sge, sizeof (ULP_SGE64));
2855 				sge++;
2856 			}
2857 			len = MIN(EMLXS_MAX_SGE_SIZE, sge_size);
2858 			len = MIN(size, len);
2859 
2860 			stage_sge.addrHigh =
2861 			    PADDR_HI(sge_addr);
2862 			stage_sge.addrLow =
2863 			    PADDR_LO(sge_addr);
2864 			stage_sge.length = len;
2865 			if (sgl_type == SGL_DATA) {
2866 				stage_sge.offset = cnt;
2867 			}
2868 #ifdef DEBUG_SGE
2869 			emlxs_data_dump(port, "SGE", (uint32_t *)&stage_sge,
2870 			    4, 0);
2871 #endif /* DEBUG_SGE */
2872 			sge_addr += len;
2873 			sge_size -= len;
2874 
2875 			cnt += len;
2876 			size -= len;
2877 		}
2878 	}
2879 
2880 	if (last) {
2881 		stage_sge.last = 1;
2882 	}
2883 	BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
2884 	    sizeof (ULP_SGE64));
2885 
2886 	sge++;
2887 
2888 	if (pcnt) {
2889 		*pcnt = cnt;
2890 	}
2891 	return (sge);
2892 
2893 } /* emlxs_pkt_to_sgl */
2894 
2895 
2896 /*ARGSUSED*/
2897 uint32_t
emlxs_sli4_bde_setup(emlxs_port_t * port,emlxs_buf_t * sbp)2898 emlxs_sli4_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2899 {
2900 	emlxs_hba_t *hba = HBA;
2901 	fc_packet_t *pkt;
2902 	XRIobj_t *xrip;
2903 	ULP_SGE64 *sge;
2904 	emlxs_wqe_t *wqe;
2905 	IOCBQ *iocbq;
2906 	ddi_dma_cookie_t *cp_cmd;
2907 	ddi_dma_cookie_t *cp_data;
2908 	uint64_t sge_addr;
2909 	uint32_t cmd_cnt;
2910 	uint32_t resp_cnt;
2911 
2912 	iocbq = (IOCBQ *) &sbp->iocbq;
2913 	wqe = &iocbq->wqe;
2914 	pkt = PRIV2PKT(sbp);
2915 	xrip = sbp->xrip;
2916 	sge = xrip->SGList->virt;
2917 
2918 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2919 	cp_cmd = pkt->pkt_cmd_cookie;
2920 	cp_data = pkt->pkt_data_cookie;
2921 #else
2922 	cp_cmd  = &pkt->pkt_cmd_cookie;
2923 	cp_data = &pkt->pkt_data_cookie;
2924 #endif	/* >= EMLXS_MODREV3 */
2925 
2926 	iocbq = &sbp->iocbq;
2927 	if (iocbq->flag & IOCB_FCP_CMD) {
2928 
2929 		if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
2930 			return (1);
2931 		}
2932 
2933 		/* CMD payload */
2934 		sge = emlxs_pkt_to_sgl(port, pkt, sge, SGL_CMD, &cmd_cnt);
2935 		if (! sge) {
2936 			return (1);
2937 		}
2938 
2939 		/* DATA payload */
2940 		if (pkt->pkt_datalen != 0) {
2941 			/* RSP payload */
2942 			sge = emlxs_pkt_to_sgl(port, pkt, sge,
2943 			    SGL_RESP, &resp_cnt);
2944 			if (! sge) {
2945 				return (1);
2946 			}
2947 
2948 			/* Data payload */
2949 			sge = emlxs_pkt_to_sgl(port, pkt, sge,
2950 			    SGL_DATA | SGL_LAST, 0);
2951 			if (! sge) {
2952 				return (1);
2953 			}
2954 
2955 			if (hba->sli.sli4.flag & EMLXS_SLI4_PHON) {
2956 				sge_addr = cp_data->dmac_laddress;
2957 				wqe->FirstData.addrHigh = PADDR_HI(sge_addr);
2958 				wqe->FirstData.addrLow = PADDR_LO(sge_addr);
2959 				wqe->FirstData.tus.f.bdeSize =
2960 				    cp_data->dmac_size;
2961 			}
2962 		} else {
2963 			/* RSP payload */
2964 			sge = emlxs_pkt_to_sgl(port, pkt, sge,
2965 			    SGL_RESP | SGL_LAST, &resp_cnt);
2966 			if (! sge) {
2967 				return (1);
2968 			}
2969 		}
2970 
2971 		wqe->un.FcpCmd.Payload.addrHigh =
2972 		    PADDR_HI(cp_cmd->dmac_laddress);
2973 		wqe->un.FcpCmd.Payload.addrLow =
2974 		    PADDR_LO(cp_cmd->dmac_laddress);
2975 		wqe->un.FcpCmd.Payload.tus.f.bdeSize = cmd_cnt;
2976 		wqe->un.FcpCmd.PayloadLength = cmd_cnt + resp_cnt;
2977 
2978 	} else {
2979 
2980 		if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
2981 			/* CMD payload */
2982 			sge = emlxs_pkt_to_sgl(port, pkt, sge,
2983 			    SGL_CMD | SGL_LAST, &cmd_cnt);
2984 			if (! sge) {
2985 				return (1);
2986 			}
2987 		} else {
2988 			/* CMD payload */
2989 			sge = emlxs_pkt_to_sgl(port, pkt, sge,
2990 			    SGL_CMD, &cmd_cnt);
2991 			if (! sge) {
2992 				return (1);
2993 			}
2994 
2995 			/* RSP payload */
2996 			sge = emlxs_pkt_to_sgl(port, pkt, sge,
2997 			    SGL_RESP | SGL_LAST, &resp_cnt);
2998 			if (! sge) {
2999 				return (1);
3000 			}
3001 			wqe->un.GenReq.PayloadLength = cmd_cnt;
3002 		}
3003 
3004 		wqe->un.GenReq.Payload.addrHigh =
3005 		    PADDR_HI(cp_cmd->dmac_laddress);
3006 		wqe->un.GenReq.Payload.addrLow =
3007 		    PADDR_LO(cp_cmd->dmac_laddress);
3008 		wqe->un.GenReq.Payload.tus.f.bdeSize = cmd_cnt;
3009 	}
3010 	return (0);
3011 } /* emlxs_sli4_bde_setup */
3012 
3013 
3014 
3015 
3016 #ifdef SFCT_SUPPORT
3017 /*ARGSUSED*/
3018 static uint32_t
emlxs_sli4_fct_bde_setup(emlxs_port_t * port,emlxs_buf_t * sbp)3019 emlxs_sli4_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
3020 {
3021 	emlxs_hba_t *hba = HBA;
3022 	emlxs_wqe_t *wqe;
3023 	ULP_SGE64 stage_sge;
3024 	ULP_SGE64 *sge;
3025 	IOCB *iocb;
3026 	IOCBQ *iocbq;
3027 	MATCHMAP *mp;
3028 	MATCHMAP *fct_mp;
3029 	XRIobj_t *xrip;
3030 	uint64_t sge_addr;
3031 	uint32_t sge_size;
3032 	uint32_t cnt;
3033 	uint32_t len;
3034 	uint32_t size;
3035 	uint32_t *xrdy_vaddr;
3036 	stmf_data_buf_t *dbuf;
3037 
3038 	iocbq = &sbp->iocbq;
3039 	iocb = &iocbq->iocb;
3040 	wqe = &iocbq->wqe;
3041 	xrip = sbp->xrip;
3042 
3043 	if (!sbp->fct_buf) {
3044 		return (0);
3045 	}
3046 
3047 	size = sbp->fct_buf->db_data_size;
3048 
3049 	/*
3050 	 * The hardware will automaticlly round up
3051 	 * to multiple of 4.
3052 	 *
3053 	 * if (size & 3) {
3054 	 *	size = (size + 3) & 0xfffffffc;
3055 	 * }
3056 	 */
3057 	fct_mp = (MATCHMAP *)sbp->fct_buf->db_port_private;
3058 
3059 	if (sbp->fct_buf->db_sglist_length != 1) {
3060 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_error_msg,
3061 		    "fct_bde_setup: Only 1 sglist entry supported: %d",
3062 		    sbp->fct_buf->db_sglist_length);
3063 		return (1);
3064 	}
3065 
3066 	sge = xrip->SGList->virt;
3067 
3068 	if (iocb->ULPCOMMAND == CMD_FCP_TRECEIVE64_CX) {
3069 
3070 		mp = emlxs_mem_buf_alloc(hba, EMLXS_XFER_RDY_SIZE);
3071 		if (!mp || !mp->virt || !mp->phys) {
3072 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_error_msg,
3073 			    "fct_bde_setup: Cannot allocate XRDY memory");
3074 			return (1);
3075 		}
3076 		/* Save the MATCHMAP info to free this memory later */
3077 		iocbq->bp = mp;
3078 
3079 		/* Point to XRDY payload */
3080 		xrdy_vaddr = (uint32_t *)(mp->virt);
3081 
3082 		/* Fill in burstsize in payload */
3083 		*xrdy_vaddr++ = 0;
3084 		*xrdy_vaddr++ = LE_SWAP32(size);
3085 		*xrdy_vaddr = 0;
3086 
3087 		/* First 2 SGEs are XRDY and SKIP */
3088 		stage_sge.addrHigh = PADDR_HI(mp->phys);
3089 		stage_sge.addrLow = PADDR_LO(mp->phys);
3090 		stage_sge.length = EMLXS_XFER_RDY_SIZE;
3091 		stage_sge.offset = 0;
3092 		stage_sge.type = 0;
3093 		stage_sge.last = 0;
3094 
3095 		/* Words  0-3 */
3096 		wqe->un.FcpCmd.Payload.addrHigh = stage_sge.addrHigh;
3097 		wqe->un.FcpCmd.Payload.addrLow = stage_sge.addrLow;
3098 		wqe->un.FcpCmd.Payload.tus.f.bdeSize = EMLXS_XFER_RDY_SIZE;
3099 		wqe->un.FcpCmd.PayloadLength = EMLXS_XFER_RDY_SIZE;
3100 
3101 	} else {	/* CMD_FCP_TSEND64_CX */
3102 		/* First 2 SGEs are SKIP */
3103 		stage_sge.addrHigh = 0;
3104 		stage_sge.addrLow = 0;
3105 		stage_sge.length = 0;
3106 		stage_sge.offset = 0;
3107 		stage_sge.type = EMLXS_SGE_TYPE_SKIP;
3108 		stage_sge.last = 0;
3109 
3110 		/* Words  0-3 */
3111 		wqe->un.FcpCmd.Payload.addrHigh = PADDR_HI(fct_mp->phys);
3112 		wqe->un.FcpCmd.Payload.addrLow = PADDR_LO(fct_mp->phys);
3113 
3114 		/* The BDE should match the contents of the first SGE payload */
3115 		len = MIN(EMLXS_MAX_SGE_SIZE, size);
3116 		wqe->un.FcpCmd.Payload.tus.f.bdeSize = len;
3117 
3118 		/* The PayloadLength should be set to 0 for TSEND64. */
3119 		wqe->un.FcpCmd.PayloadLength = 0;
3120 	}
3121 
3122 	dbuf = sbp->fct_buf;
3123 	/*
3124 	 * TotalTransferCount equals to Relative Offset field (Word 4)
3125 	 * in both TSEND64 and TRECEIVE64 WQE.
3126 	 */
3127 	wqe->un.FcpCmd.TotalTransferCount = dbuf->db_relative_offset;
3128 
3129 	/* Copy staged SGE into SGL */
3130 	BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
3131 	    (uint8_t *)sge, sizeof (ULP_SGE64));
3132 	sge++;
3133 
3134 	stage_sge.addrHigh = 0;
3135 	stage_sge.addrLow = 0;
3136 	stage_sge.length = 0;
3137 	stage_sge.offset = 0;
3138 	stage_sge.type = EMLXS_SGE_TYPE_SKIP;
3139 	stage_sge.last = 0;
3140 
3141 	/* Copy staged SGE into SGL */
3142 	BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
3143 	    (uint8_t *)sge, sizeof (ULP_SGE64));
3144 	sge++;
3145 
3146 	sge_size = size;
3147 	sge_addr = fct_mp->phys;
3148 	cnt = 0;
3149 
3150 	/* Build SGEs */
3151 	while (sge_size) {
3152 		if (cnt) {
3153 			/* Copy staged SGE before we build next one */
3154 			BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
3155 			    (uint8_t *)sge, sizeof (ULP_SGE64));
3156 			sge++;
3157 		}
3158 
3159 		len = MIN(EMLXS_MAX_SGE_SIZE, sge_size);
3160 
3161 		stage_sge.addrHigh = PADDR_HI(sge_addr);
3162 		stage_sge.addrLow = PADDR_LO(sge_addr);
3163 		stage_sge.length = len;
3164 		stage_sge.offset = cnt;
3165 		stage_sge.type = EMLXS_SGE_TYPE_DATA;
3166 
3167 		sge_addr += len;
3168 		sge_size -= len;
3169 		cnt += len;
3170 	}
3171 
3172 	stage_sge.last = 1;
3173 
3174 	if (hba->sli.sli4.flag & EMLXS_SLI4_PHON) {
3175 		wqe->FirstData.addrHigh = stage_sge.addrHigh;
3176 		wqe->FirstData.addrLow = stage_sge.addrLow;
3177 		wqe->FirstData.tus.f.bdeSize = stage_sge.length;
3178 	}
3179 	/* Copy staged SGE into SGL */
3180 	BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
3181 	    (uint8_t *)sge, sizeof (ULP_SGE64));
3182 
3183 	return (0);
3184 
3185 } /* emlxs_sli4_fct_bde_setup */
3186 #endif /* SFCT_SUPPORT */
3187 
3188 
3189 static void
emlxs_sli4_issue_iocb_cmd(emlxs_hba_t * hba,CHANNEL * cp,IOCBQ * iocbq)3190 emlxs_sli4_issue_iocb_cmd(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
3191 {
3192 	emlxs_port_t *port = &PPORT;
3193 	emlxs_buf_t *sbp;
3194 	uint32_t channelno;
3195 	int32_t throttle;
3196 	emlxs_wqe_t *wqe;
3197 	emlxs_wqe_t *wqeslot;
3198 	WQ_DESC_t *wq;
3199 	uint32_t flag;
3200 	uint16_t next_wqe;
3201 	off_t offset;
3202 #ifdef NODE_THROTTLE_SUPPORT
3203 	int32_t node_throttle;
3204 	NODELIST *marked_node = NULL;
3205 #endif /* NODE_THROTTLE_SUPPORT */
3206 
3207 
3208 	channelno = cp->channelno;
3209 	wq = (WQ_DESC_t *)cp->iopath;
3210 
3211 #ifdef DEBUG_FASTPATH
3212 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3213 	    "ISSUE WQE channel: %x  %p", channelno, wq);
3214 #endif /* DEBUG_FASTPATH */
3215 
3216 	throttle = 0;
3217 
3218 	/* Check if FCP ring and adapter is not ready */
3219 	/* We may use any ring for FCP_CMD */
3220 	if (iocbq && (iocbq->flag & IOCB_FCP_CMD) && (hba->state != FC_READY)) {
3221 		if (!(iocbq->flag & IOCB_SPECIAL) || !iocbq->port ||
3222 		    (((emlxs_port_t *)iocbq->port)->mode == MODE_INITIATOR)) {
3223 			emlxs_tx_put(iocbq, 1);
3224 			return;
3225 		}
3226 	}
3227 
3228 	/* Attempt to acquire CMD_RING lock */
3229 	if (mutex_tryenter(&EMLXS_QUE_LOCK(channelno)) == 0) {
3230 		/* Queue it for later */
3231 		if (iocbq) {
3232 			if ((hba->io_count -
3233 			    hba->channel_tx_count) > 10) {
3234 				emlxs_tx_put(iocbq, 1);
3235 				return;
3236 			} else {
3237 
3238 				mutex_enter(&EMLXS_QUE_LOCK(channelno));
3239 			}
3240 		} else {
3241 			return;
3242 		}
3243 	}
3244 	/* EMLXS_QUE_LOCK acquired */
3245 
3246 	/* Throttle check only applies to non special iocb */
3247 	if (iocbq && (!(iocbq->flag & IOCB_SPECIAL))) {
3248 		/* Check if HBA is full */
3249 		throttle = hba->io_throttle - hba->io_active;
3250 		if (throttle <= 0) {
3251 			/* Hitting adapter throttle limit */
3252 			/* Queue it for later */
3253 			if (iocbq) {
3254 				emlxs_tx_put(iocbq, 1);
3255 			}
3256 
3257 			goto busy;
3258 		}
3259 	}
3260 
3261 	/* Check to see if we have room for this WQE */
3262 	next_wqe = wq->host_index + 1;
3263 	if (next_wqe >= wq->max_index) {
3264 		next_wqe = 0;
3265 	}
3266 
3267 	if (next_wqe == wq->port_index) {
3268 		/* Queue it for later */
3269 		if (iocbq) {
3270 			emlxs_tx_put(iocbq, 1);
3271 		}
3272 		goto busy;
3273 	}
3274 
3275 	/*
3276 	 * We have a command ring slot available
3277 	 * Make sure we have an iocb to send
3278 	 */
3279 	if (iocbq) {
3280 		mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3281 
3282 		/* Check if the ring already has iocb's waiting */
3283 		if (cp->nodeq.q_first != NULL) {
3284 			/* Put the current iocbq on the tx queue */
3285 			emlxs_tx_put(iocbq, 0);
3286 
3287 			/*
3288 			 * Attempt to replace it with the next iocbq
3289 			 * in the tx queue
3290 			 */
3291 			iocbq = emlxs_tx_get(cp, 0);
3292 		}
3293 
3294 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3295 	} else {
3296 		iocbq = emlxs_tx_get(cp, 1);
3297 	}
3298 
3299 	/* Process each iocbq */
3300 	while (iocbq) {
3301 		sbp = iocbq->sbp;
3302 
3303 #ifdef NODE_THROTTLE_SUPPORT
3304 		if (sbp && sbp->node && sbp->node->io_throttle) {
3305 			node_throttle = sbp->node->io_throttle -
3306 			    sbp->node->io_active;
3307 			if (node_throttle <= 0) {
3308 				/* Node is busy */
3309 				/* Queue this iocb and get next iocb from */
3310 				/* channel */
3311 
3312 				if (!marked_node) {
3313 					marked_node = sbp->node;
3314 				}
3315 
3316 				mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3317 				emlxs_tx_put(iocbq, 0);
3318 
3319 				if (cp->nodeq.q_first == marked_node) {
3320 					mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3321 					goto busy;
3322 				}
3323 
3324 				iocbq = emlxs_tx_get(cp, 0);
3325 				mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3326 				continue;
3327 			}
3328 		}
3329 		marked_node = 0;
3330 #endif /* NODE_THROTTLE_SUPPORT */
3331 
3332 		wqe = &iocbq->wqe;
3333 #ifdef DEBUG_FASTPATH
3334 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3335 		    "ISSUE QID %d WQE iotag:%x xri:%d", wq->qid,
3336 		    wqe->RequestTag, wqe->XRITag);
3337 #endif /* DEBUG_FASTPATH */
3338 
3339 		if (sbp) {
3340 			/* If exchange removed after wqe was prep'ed, drop it */
3341 			if (!(sbp->xrip)) {
3342 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3343 				    "Xmit WQE iotag:%x xri:%d aborted",
3344 				    wqe->RequestTag, wqe->XRITag);
3345 
3346 				/* Get next iocb from the tx queue */
3347 				iocbq = emlxs_tx_get(cp, 1);
3348 				continue;
3349 			}
3350 
3351 			if (sbp->pkt_flags & PACKET_DELAY_REQUIRED) {
3352 
3353 				/* Perform delay */
3354 				if ((channelno == hba->channel_els) &&
3355 				    !(iocbq->flag & IOCB_FCP_CMD)) {
3356 					drv_usecwait(100000);
3357 				} else {
3358 					drv_usecwait(20000);
3359 				}
3360 			}
3361 
3362 			/* Check for ULP pkt request */
3363 			mutex_enter(&sbp->mtx);
3364 
3365 			if (sbp->node == NULL) {
3366 				/* Set node to base node by default */
3367 				iocbq->node = (void *)&port->node_base;
3368 				sbp->node = (void *)&port->node_base;
3369 			}
3370 
3371 			sbp->pkt_flags |= PACKET_IN_CHIPQ;
3372 			mutex_exit(&sbp->mtx);
3373 
3374 			atomic_inc_32(&hba->io_active);
3375 #ifdef NODE_THROTTLE_SUPPORT
3376 			if (sbp->node) {
3377 				atomic_inc_32(&sbp->node->io_active);
3378 			}
3379 #endif /* NODE_THROTTLE_SUPPORT */
3380 
3381 			sbp->xrip->flag |= EMLXS_XRI_PENDING_IO;
3382 #ifdef SFCT_SUPPORT
3383 #ifdef FCT_IO_TRACE
3384 			if (sbp->fct_cmd) {
3385 				emlxs_fct_io_trace(port, sbp->fct_cmd,
3386 				    EMLXS_FCT_IOCB_ISSUED);
3387 				emlxs_fct_io_trace(port, sbp->fct_cmd,
3388 				    icmd->ULPCOMMAND);
3389 			}
3390 #endif /* FCT_IO_TRACE */
3391 #endif /* SFCT_SUPPORT */
3392 			cp->hbaSendCmd_sbp++;
3393 			iocbq->channel = cp;
3394 		} else {
3395 			cp->hbaSendCmd++;
3396 		}
3397 
3398 		flag = iocbq->flag;
3399 
3400 		/*
3401 		 * At this point, we have a command ring slot available
3402 		 * and an iocb to send
3403 		 */
3404 		wq->release_depth--;
3405 		if (wq->release_depth == 0) {
3406 			wq->release_depth = WQE_RELEASE_DEPTH;
3407 			wqe->WQEC = 1;
3408 		}
3409 
3410 		HBASTATS.IocbIssued[channelno]++;
3411 		wq->num_proc++;
3412 
3413 		/* Send the iocb */
3414 		wqeslot = (emlxs_wqe_t *)wq->addr.virt;
3415 		wqeslot += wq->host_index;
3416 
3417 		wqe->CQId = wq->cqid;
3418 		if (hba->sli.sli4.param.PHWQ) {
3419 			WQE_PHWQ_WQID(wqe, wq->qid);
3420 		}
3421 		BE_SWAP32_BCOPY((uint8_t *)wqe, (uint8_t *)wqeslot,
3422 		    sizeof (emlxs_wqe_t));
3423 #ifdef DEBUG_WQE
3424 		emlxs_data_dump(port, "WQE", (uint32_t *)wqe, 18, 0);
3425 #endif /* DEBUG_WQE */
3426 		offset = (off_t)((uint64_t)((unsigned long)
3427 		    wq->addr.virt) -
3428 		    (uint64_t)((unsigned long)
3429 		    hba->sli.sli4.slim2.virt));
3430 
3431 		EMLXS_MPDATA_SYNC(wq->addr.dma_handle, offset,
3432 		    4096, DDI_DMA_SYNC_FORDEV);
3433 
3434 		/*
3435 		 * After this, the sbp / iocb / wqe should not be
3436 		 * accessed in the xmit path.
3437 		 */
3438 
3439 		/* Ring the WQ Doorbell */
3440 		emlxs_sli4_write_wqdb(hba, wq->qid, 1, wq->host_index);
3441 		wq->host_index = next_wqe;
3442 
3443 		if (!sbp) {
3444 			emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq);
3445 		}
3446 
3447 		if (iocbq && (!(flag & IOCB_SPECIAL))) {
3448 			/* Check if HBA is full */
3449 			throttle = hba->io_throttle - hba->io_active;
3450 			if (throttle <= 0) {
3451 				goto busy;
3452 			}
3453 		}
3454 
3455 		/* Check to see if we have room for another WQE */
3456 		next_wqe++;
3457 		if (next_wqe >= wq->max_index) {
3458 			next_wqe = 0;
3459 		}
3460 
3461 		if (next_wqe == wq->port_index) {
3462 			/* Queue it for later */
3463 			goto busy;
3464 		}
3465 
3466 		/* Get the next iocb from the tx queue if there is one */
3467 		iocbq = emlxs_tx_get(cp, 1);
3468 	}
3469 
3470 	mutex_exit(&EMLXS_QUE_LOCK(channelno));
3471 
3472 	return;
3473 
3474 busy:
3475 	wq->num_busy++;
3476 	if (throttle <= 0) {
3477 		HBASTATS.IocbThrottled++;
3478 	} else {
3479 		HBASTATS.IocbRingFull[channelno]++;
3480 	}
3481 
3482 	mutex_exit(&EMLXS_QUE_LOCK(channelno));
3483 
3484 	return;
3485 
3486 } /* emlxs_sli4_issue_iocb_cmd() */
3487 
3488 
3489 /*ARGSUSED*/
3490 static uint32_t
emlxs_sli4_issue_mq(emlxs_port_t * port,MAILBOX4 * mqe,MAILBOX * mb,uint32_t tmo)3491 emlxs_sli4_issue_mq(emlxs_port_t *port, MAILBOX4 *mqe, MAILBOX *mb,
3492     uint32_t tmo)
3493 {
3494 	emlxs_hba_t *hba = HBA;
3495 	MAILBOXQ	*mbq;
3496 	MAILBOX4	*mb4;
3497 	MATCHMAP	*mp;
3498 	uint32_t	*iptr;
3499 	off_t		offset;
3500 
3501 	mbq = (MAILBOXQ *)mb;
3502 	mb4 = (MAILBOX4 *)mb;
3503 	mp = (MATCHMAP *) mbq->nonembed;
3504 	hba->mbox_mqe = (void *)mqe;
3505 
3506 	if ((mb->mbxCommand != MBX_SLI_CONFIG) ||
3507 	    (mb4->un.varSLIConfig.be.embedded)) {
3508 		/*
3509 		 * If this is an embedded mbox, everything should fit
3510 		 * into the mailbox area.
3511 		 */
3512 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)mqe,
3513 		    MAILBOX_CMD_SLI4_BSIZE);
3514 
3515 		EMLXS_MPDATA_SYNC(hba->sli.sli4.mq.addr.dma_handle, 0,
3516 		    4096, DDI_DMA_SYNC_FORDEV);
3517 
3518 		if (mb->mbxCommand != MBX_HEARTBEAT) {
3519 			emlxs_data_dump(port, "MBOX CMD", (uint32_t *)mqe,
3520 			    18, 0);
3521 		}
3522 	} else {
3523 		/* SLI_CONFIG and non-embedded */
3524 
3525 		/*
3526 		 * If this is not embedded, the MQ area
3527 		 * MUST contain a SGE pointer to a larger area for the
3528 		 * non-embedded mailbox command.
3529 		 * mp will point to the actual mailbox command which
3530 		 * should be copied into the non-embedded area.
3531 		 */
3532 		mb4->un.varSLIConfig.be.sge_cnt = 1;
3533 		mb4->un.varSLIConfig.be.payload_length = mp->size;
3534 		iptr = (uint32_t *)&mb4->un.varSLIConfig.be.un_hdr.hdr_req;
3535 		*iptr++ = (uint32_t)PADDR_LO(mp->phys);
3536 		*iptr++ = (uint32_t)PADDR_HI(mp->phys);
3537 		*iptr = mp->size;
3538 
3539 		BE_SWAP32_BUFFER(mp->virt, mp->size);
3540 
3541 		EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
3542 		    DDI_DMA_SYNC_FORDEV);
3543 
3544 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)mqe,
3545 		    MAILBOX_CMD_SLI4_BSIZE);
3546 
3547 		offset = (off_t)((uint64_t)((unsigned long)
3548 		    hba->sli.sli4.mq.addr.virt) -
3549 		    (uint64_t)((unsigned long)
3550 		    hba->sli.sli4.slim2.virt));
3551 
3552 		EMLXS_MPDATA_SYNC(hba->sli.sli4.mq.addr.dma_handle, offset,
3553 		    4096, DDI_DMA_SYNC_FORDEV);
3554 
3555 		emlxs_data_dump(port, "MBOX EXT", (uint32_t *)mqe, 12, 0);
3556 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3557 		    "Extension Addr %p %p", mp->phys, (uint32_t *)(mp->virt));
3558 		emlxs_data_dump(port, "EXT AREA", (uint32_t *)mp->virt, 24, 0);
3559 	}
3560 
3561 	/* Ring the MQ Doorbell */
3562 	if (mb->mbxCommand != MBX_HEARTBEAT) {
3563 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3564 		    "MQ RING: Qid %04x", hba->sli.sli4.mq.qid);
3565 	}
3566 
3567 	emlxs_sli4_write_mqdb(hba, hba->sli.sli4.mq.qid, 1);
3568 
3569 	return (MBX_SUCCESS);
3570 
3571 } /* emlxs_sli4_issue_mq() */
3572 
3573 
3574 /*ARGSUSED*/
3575 static uint32_t
emlxs_sli4_issue_bootstrap(emlxs_hba_t * hba,MAILBOX * mb,uint32_t tmo)3576 emlxs_sli4_issue_bootstrap(emlxs_hba_t *hba, MAILBOX *mb, uint32_t tmo)
3577 {
3578 	emlxs_port_t	*port = &PPORT;
3579 	MAILBOXQ	*mbq;
3580 	MAILBOX4	*mb4;
3581 	MATCHMAP	*mp = NULL;
3582 	uint32_t	*iptr;
3583 	int		nonembed = 0;
3584 
3585 	mbq = (MAILBOXQ *)mb;
3586 	mb4 = (MAILBOX4 *)mb;
3587 	mp = (MATCHMAP *) mbq->nonembed;
3588 	hba->mbox_mqe = hba->sli.sli4.bootstrapmb.virt;
3589 
3590 	if ((mb->mbxCommand != MBX_SLI_CONFIG) ||
3591 	    (mb4->un.varSLIConfig.be.embedded)) {
3592 		/*
3593 		 * If this is an embedded mbox, everything should fit
3594 		 * into the bootstrap mailbox area.
3595 		 */
3596 		iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
3597 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)iptr,
3598 		    MAILBOX_CMD_SLI4_BSIZE);
3599 
3600 		EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
3601 		    MAILBOX_CMD_SLI4_BSIZE, DDI_DMA_SYNC_FORDEV);
3602 		emlxs_data_dump(port, "MBOX CMD", iptr, 18, 0);
3603 	} else {
3604 		/*
3605 		 * If this is not embedded, the bootstrap mailbox area
3606 		 * MUST contain a SGE pointer to a larger area for the
3607 		 * non-embedded mailbox command.
3608 		 * mp will point to the actual mailbox command which
3609 		 * should be copied into the non-embedded area.
3610 		 */
3611 		nonembed = 1;
3612 		mb4->un.varSLIConfig.be.sge_cnt = 1;
3613 		mb4->un.varSLIConfig.be.payload_length = mp->size;
3614 		iptr = (uint32_t *)&mb4->un.varSLIConfig.be.un_hdr.hdr_req;
3615 		*iptr++ = (uint32_t)PADDR_LO(mp->phys);
3616 		*iptr++ = (uint32_t)PADDR_HI(mp->phys);
3617 		*iptr = mp->size;
3618 
3619 		BE_SWAP32_BUFFER(mp->virt, mp->size);
3620 
3621 		EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
3622 		    DDI_DMA_SYNC_FORDEV);
3623 
3624 		iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
3625 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)iptr,
3626 		    MAILBOX_CMD_SLI4_BSIZE);
3627 
3628 		EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
3629 		    EMLXS_BOOTSTRAP_MB_SIZE + MBOX_EXTENSION_SIZE,
3630 		    DDI_DMA_SYNC_FORDEV);
3631 
3632 		emlxs_data_dump(port, "MBOX EXT", iptr, 12, 0);
3633 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3634 		    "Extension Addr %p %p", mp->phys,
3635 		    (uint32_t *)((uint8_t *)mp->virt));
3636 		iptr = (uint32_t *)((uint8_t *)mp->virt);
3637 		emlxs_data_dump(port, "EXT AREA", (uint32_t *)mp->virt, 24, 0);
3638 	}
3639 
3640 
3641 	/* NOTE: tmo is in 10ms ticks */
3642 	if (!emlxs_issue_bootstrap_mb(hba, tmo)) {
3643 		return (MBX_TIMEOUT);
3644 	}
3645 
3646 	if ((mb->mbxCommand != MBX_SLI_CONFIG) ||
3647 	    (mb4->un.varSLIConfig.be.embedded)) {
3648 		EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
3649 		    MAILBOX_CMD_SLI4_BSIZE, DDI_DMA_SYNC_FORKERNEL);
3650 
3651 		iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
3652 		BE_SWAP32_BCOPY((uint8_t *)iptr, (uint8_t *)mb,
3653 		    MAILBOX_CMD_SLI4_BSIZE);
3654 
3655 		emlxs_data_dump(port, "MBOX CMP", iptr, 18, 0);
3656 
3657 	} else {
3658 		EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
3659 		    EMLXS_BOOTSTRAP_MB_SIZE + MBOX_EXTENSION_SIZE,
3660 		    DDI_DMA_SYNC_FORKERNEL);
3661 
3662 		EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
3663 		    DDI_DMA_SYNC_FORKERNEL);
3664 
3665 		BE_SWAP32_BUFFER(mp->virt, mp->size);
3666 
3667 		iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
3668 		BE_SWAP32_BCOPY((uint8_t *)iptr, (uint8_t *)mb,
3669 		    MAILBOX_CMD_SLI4_BSIZE);
3670 
3671 		emlxs_data_dump(port, "MBOX CMP", iptr, 12, 0);
3672 		iptr = (uint32_t *)((uint8_t *)mp->virt);
3673 		emlxs_data_dump(port, "EXT AREA", (uint32_t *)iptr, 24, 0);
3674 	}
3675 
3676 #ifdef FMA_SUPPORT
3677 	if (nonembed && mp) {
3678 		if (emlxs_fm_check_dma_handle(hba, mp->dma_handle)
3679 		    != DDI_FM_OK) {
3680 			EMLXS_MSGF(EMLXS_CONTEXT,
3681 			    &emlxs_invalid_dma_handle_msg,
3682 			    "sli4_issue_bootstrap: mp_hdl=%p",
3683 			    mp->dma_handle);
3684 			return (MBXERR_DMA_ERROR);
3685 		}
3686 	}
3687 
3688 	if (emlxs_fm_check_dma_handle(hba,
3689 	    hba->sli.sli4.bootstrapmb.dma_handle)
3690 	    != DDI_FM_OK) {
3691 		EMLXS_MSGF(EMLXS_CONTEXT,
3692 		    &emlxs_invalid_dma_handle_msg,
3693 		    "sli4_issue_bootstrap: hdl=%p",
3694 		    hba->sli.sli4.bootstrapmb.dma_handle);
3695 		return (MBXERR_DMA_ERROR);
3696 	}
3697 #endif
3698 
3699 	return (MBX_SUCCESS);
3700 
3701 } /* emlxs_sli4_issue_bootstrap() */
3702 
3703 
3704 /*ARGSUSED*/
3705 static uint32_t
emlxs_sli4_issue_mbox_cmd(emlxs_hba_t * hba,MAILBOXQ * mbq,int32_t flag,uint32_t tmo)3706 emlxs_sli4_issue_mbox_cmd(emlxs_hba_t *hba, MAILBOXQ *mbq, int32_t flag,
3707     uint32_t tmo)
3708 {
3709 	emlxs_port_t	*port;
3710 	MAILBOX4	*mb4;
3711 	MAILBOX		*mb;
3712 	mbox_rsp_hdr_t	*hdr_rsp;
3713 	MATCHMAP	*mp;
3714 	uint32_t	*iptr;
3715 	uint32_t	rc;
3716 	uint32_t	i;
3717 	uint32_t	tmo_local;
3718 
3719 	if (!mbq->port) {
3720 		mbq->port = &PPORT;
3721 	}
3722 
3723 	port = (emlxs_port_t *)mbq->port;
3724 
3725 	mb4 = (MAILBOX4 *)mbq;
3726 	mb = (MAILBOX *)mbq;
3727 
3728 	mb->mbxStatus = MBX_SUCCESS;
3729 	rc = MBX_SUCCESS;
3730 
3731 	/* Check for minimum timeouts */
3732 	switch (mb->mbxCommand) {
3733 	/* Mailbox commands that erase/write flash */
3734 	case MBX_DOWN_LOAD:
3735 	case MBX_UPDATE_CFG:
3736 	case MBX_LOAD_AREA:
3737 	case MBX_LOAD_EXP_ROM:
3738 	case MBX_WRITE_NV:
3739 	case MBX_FLASH_WR_ULA:
3740 	case MBX_DEL_LD_ENTRY:
3741 	case MBX_LOAD_SM:
3742 	case MBX_DUMP_MEMORY:
3743 	case MBX_WRITE_VPARMS:
3744 	case MBX_ACCESS_VDATA:
3745 		if (tmo < 300) {
3746 			tmo = 300;
3747 		}
3748 		break;
3749 
3750 	case MBX_SLI_CONFIG: {
3751 		mbox_req_hdr_t *hdr_req;
3752 
3753 		hdr_req = (mbox_req_hdr_t *)
3754 		    &mb4->un.varSLIConfig.be.un_hdr.hdr_req;
3755 
3756 		if (hdr_req->subsystem == IOCTL_SUBSYSTEM_COMMON) {
3757 			switch (hdr_req->opcode) {
3758 			case COMMON_OPCODE_WRITE_OBJ:
3759 			case COMMON_OPCODE_READ_OBJ:
3760 			case COMMON_OPCODE_READ_OBJ_LIST:
3761 			case COMMON_OPCODE_DELETE_OBJ:
3762 			case COMMON_OPCODE_SET_BOOT_CFG:
3763 			case COMMON_OPCODE_GET_PROFILE_CFG:
3764 			case COMMON_OPCODE_SET_PROFILE_CFG:
3765 			case COMMON_OPCODE_GET_PROFILE_LIST:
3766 			case COMMON_OPCODE_SET_ACTIVE_PROFILE:
3767 			case COMMON_OPCODE_GET_PROFILE_CAPS:
3768 			case COMMON_OPCODE_GET_MR_PROFILE_CAPS:
3769 			case COMMON_OPCODE_SET_MR_PROFILE_CAPS:
3770 			case COMMON_OPCODE_SET_FACTORY_PROFILE_CFG:
3771 			case COMMON_OPCODE_SEND_ACTIVATION:
3772 			case COMMON_OPCODE_RESET_LICENSES:
3773 			case COMMON_OPCODE_SET_PHYSICAL_LINK_CFG_V1:
3774 			case COMMON_OPCODE_GET_VPD_DATA:
3775 				if (tmo < 300) {
3776 					tmo = 300;
3777 				}
3778 				break;
3779 			default:
3780 				if (tmo < 30) {
3781 					tmo = 30;
3782 				}
3783 			}
3784 		} else if (hdr_req->subsystem == IOCTL_SUBSYSTEM_FCOE) {
3785 			switch (hdr_req->opcode) {
3786 			case FCOE_OPCODE_SET_FCLINK_SETTINGS:
3787 				if (tmo < 300) {
3788 					tmo = 300;
3789 				}
3790 				break;
3791 			default:
3792 				if (tmo < 30) {
3793 					tmo = 30;
3794 				}
3795 			}
3796 		} else {
3797 			if (tmo < 30) {
3798 				tmo = 30;
3799 			}
3800 		}
3801 
3802 		/*
3803 		 * Also: VENDOR_MANAGE_FFV  (0x13, 0x02) (not currently used)
3804 		 */
3805 
3806 		break;
3807 	}
3808 	default:
3809 		if (tmo < 30) {
3810 			tmo = 30;
3811 		}
3812 		break;
3813 	}
3814 
3815 	/* Convert tmo seconds to 10 millisecond tics */
3816 	tmo_local = tmo * 100;
3817 
3818 	mutex_enter(&EMLXS_PORT_LOCK);
3819 
3820 	/* Adjust wait flag */
3821 	if (flag != MBX_NOWAIT) {
3822 		if (hba->sli.sli4.flag & EMLXS_SLI4_INTR_ENABLED) {
3823 			flag = MBX_SLEEP;
3824 		} else {
3825 			flag = MBX_POLL;
3826 		}
3827 	} else {
3828 		/* Must have interrupts enabled to perform MBX_NOWAIT */
3829 		if (!(hba->sli.sli4.flag & EMLXS_SLI4_INTR_ENABLED)) {
3830 
3831 			mb->mbxStatus = MBX_HARDWARE_ERROR;
3832 			mutex_exit(&EMLXS_PORT_LOCK);
3833 
3834 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3835 			    "Interrupts disabled. %s failed.",
3836 			    emlxs_mb_cmd_xlate(mb->mbxCommand));
3837 
3838 			return (MBX_HARDWARE_ERROR);
3839 		}
3840 	}
3841 
3842 	/* Check for hardware error ; special case SLI_CONFIG */
3843 	if ((hba->flag & FC_HARDWARE_ERROR) &&
3844 	    ! ((mb4->mbxCommand == MBX_SLI_CONFIG) &&
3845 	    (mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode ==
3846 	    COMMON_OPCODE_RESET))) {
3847 		mb->mbxStatus = MBX_HARDWARE_ERROR;
3848 
3849 		mutex_exit(&EMLXS_PORT_LOCK);
3850 
3851 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3852 		    "Hardware error reported. %s failed. status=%x mb=%p",
3853 		    emlxs_mb_cmd_xlate(mb->mbxCommand), mb->mbxStatus, mb);
3854 
3855 		return (MBX_HARDWARE_ERROR);
3856 	}
3857 
3858 	if (hba->mbox_queue_flag) {
3859 		/* If we are not polling, then queue it for later */
3860 		if (flag == MBX_NOWAIT) {
3861 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3862 			    "Busy.      %s: mb=%p NoWait.",
3863 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3864 
3865 			emlxs_mb_put(hba, mbq);
3866 
3867 			HBASTATS.MboxBusy++;
3868 
3869 			mutex_exit(&EMLXS_PORT_LOCK);
3870 
3871 			return (MBX_BUSY);
3872 		}
3873 
3874 		while (hba->mbox_queue_flag) {
3875 			mutex_exit(&EMLXS_PORT_LOCK);
3876 
3877 			if (tmo_local-- == 0) {
3878 				EMLXS_MSGF(EMLXS_CONTEXT,
3879 				    &emlxs_mbox_event_msg,
3880 				    "Timeout.   %s: mb=%p tmo=%d Waiting.",
3881 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3882 				    tmo);
3883 
3884 				/* Non-lethalStatus mailbox timeout */
3885 				/* Does not indicate a hardware error */
3886 				mb->mbxStatus = MBX_TIMEOUT;
3887 				return (MBX_TIMEOUT);
3888 			}
3889 
3890 			BUSYWAIT_MS(10);
3891 			mutex_enter(&EMLXS_PORT_LOCK);
3892 
3893 			/* Check for hardware error ; special case SLI_CONFIG */
3894 			if ((hba->flag & FC_HARDWARE_ERROR) &&
3895 			    ! ((mb4->mbxCommand == MBX_SLI_CONFIG) &&
3896 			    (mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode ==
3897 			    COMMON_OPCODE_RESET))) {
3898 				mb->mbxStatus = MBX_HARDWARE_ERROR;
3899 
3900 				mutex_exit(&EMLXS_PORT_LOCK);
3901 
3902 				EMLXS_MSGF(EMLXS_CONTEXT,
3903 				    &emlxs_mbox_detail_msg,
3904 				    "Hardware error reported. %s failed. "
3905 				    "status=%x mb=%p",
3906 				    emlxs_mb_cmd_xlate(mb->mbxCommand),
3907 				    mb->mbxStatus, mb);
3908 
3909 				return (MBX_HARDWARE_ERROR);
3910 			}
3911 		}
3912 	}
3913 
3914 	/* Initialize mailbox area */
3915 	emlxs_mb_init(hba, mbq, flag, tmo);
3916 
3917 	if (mb->mbxCommand == MBX_DOWN_LINK) {
3918 		hba->sli.sli4.flag |= EMLXS_SLI4_DOWN_LINK;
3919 	}
3920 
3921 	mutex_exit(&EMLXS_PORT_LOCK);
3922 	switch (flag) {
3923 
3924 	case MBX_NOWAIT:
3925 		if (mb->mbxCommand != MBX_HEARTBEAT) {
3926 			if (mb->mbxCommand != MBX_DOWN_LOAD
3927 			    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3928 				EMLXS_MSGF(EMLXS_CONTEXT,
3929 				    &emlxs_mbox_detail_msg,
3930 				    "Sending.   %s: mb=%p NoWait. embedded %d",
3931 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3932 				    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
3933 				    (mb4->un.varSLIConfig.be.embedded)));
3934 			}
3935 		}
3936 
3937 		iptr = hba->sli.sli4.mq.addr.virt;
3938 		iptr += (hba->sli.sli4.mq.host_index * MAILBOX_CMD_SLI4_WSIZE);
3939 		hba->sli.sli4.mq.host_index++;
3940 		if (hba->sli.sli4.mq.host_index >= hba->sli.sli4.mq.max_index) {
3941 			hba->sli.sli4.mq.host_index = 0;
3942 		}
3943 
3944 		if (mbq->bp) {
3945 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3946 			    "BDE virt %p phys %p size x%x",
3947 			    ((MATCHMAP *)mbq->bp)->virt,
3948 			    ((MATCHMAP *)mbq->bp)->phys,
3949 			    ((MATCHMAP *)mbq->bp)->size);
3950 			emlxs_data_dump(port, "DATA",
3951 			    (uint32_t *)(((MATCHMAP *)mbq->bp)->virt), 30, 0);
3952 		}
3953 		rc = emlxs_sli4_issue_mq(port, (MAILBOX4 *)iptr, mb, tmo_local);
3954 		break;
3955 
3956 	case MBX_POLL:
3957 		if (mb->mbxCommand != MBX_DOWN_LOAD
3958 		    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3959 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3960 			    "Sending.   %s: mb=%p Poll. embedded %d",
3961 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3962 			    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
3963 			    (mb4->un.varSLIConfig.be.embedded)));
3964 		}
3965 
3966 		rc = emlxs_sli4_issue_bootstrap(hba, mb, tmo_local);
3967 
3968 		/* Clean up the mailbox area */
3969 		if (rc == MBX_TIMEOUT) {
3970 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3971 			    "Timeout.   %s: mb=%p tmo=%x Poll. embedded %d",
3972 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo,
3973 			    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
3974 			    (mb4->un.varSLIConfig.be.embedded)));
3975 
3976 			hba->flag |= FC_MBOX_TIMEOUT;
3977 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
3978 			emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
3979 
3980 		} else {
3981 			if (mb->mbxCommand != MBX_DOWN_LOAD
3982 			    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3983 				EMLXS_MSGF(EMLXS_CONTEXT,
3984 				    &emlxs_mbox_detail_msg,
3985 				    "Completed.   %s: mb=%p status=%x rc=%x"
3986 				    " Poll. embedded %d",
3987 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3988 				    rc, mb->mbxStatus,
3989 				    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
3990 				    (mb4->un.varSLIConfig.be.embedded)));
3991 			}
3992 
3993 			/* Process the result */
3994 			if (!(mbq->flag & MBQ_PASSTHRU)) {
3995 				if (mbq->mbox_cmpl) {
3996 					(void) (mbq->mbox_cmpl)(hba, mbq);
3997 				}
3998 			}
3999 
4000 			emlxs_mb_fini(hba, NULL, mb->mbxStatus);
4001 		}
4002 
4003 		mp = (MATCHMAP *)mbq->nonembed;
4004 		if (mp) {
4005 			hdr_rsp = (mbox_rsp_hdr_t *)mp->virt;
4006 			if (hdr_rsp->status) {
4007 				EMLXS_MSGF(EMLXS_CONTEXT,
4008 				    &emlxs_mbox_detail_msg,
4009 				    "%s: MBX_NONEMBED_ERROR: 0x%x, 0x%x",
4010 				    emlxs_mb_cmd_xlate(mb->mbxCommand),
4011 				    hdr_rsp->status, hdr_rsp->extra_status);
4012 
4013 				mb->mbxStatus = MBX_NONEMBED_ERROR;
4014 			}
4015 		}
4016 		rc = mb->mbxStatus;
4017 
4018 		/* Attempt to send pending mailboxes */
4019 		mbq = (MAILBOXQ *)emlxs_mb_get(hba);
4020 		if (mbq) {
4021 			/* Attempt to send pending mailboxes */
4022 			i =  emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
4023 			if ((i != MBX_BUSY) && (i != MBX_SUCCESS)) {
4024 				emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
4025 			}
4026 		}
4027 		break;
4028 
4029 	case MBX_SLEEP:
4030 		if (mb->mbxCommand != MBX_DOWN_LOAD
4031 		    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
4032 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
4033 			    "Sending.   %s: mb=%p Sleep. embedded %d",
4034 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
4035 			    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
4036 			    (mb4->un.varSLIConfig.be.embedded)));
4037 		}
4038 
4039 		iptr = hba->sli.sli4.mq.addr.virt;
4040 		iptr += (hba->sli.sli4.mq.host_index * MAILBOX_CMD_SLI4_WSIZE);
4041 		hba->sli.sli4.mq.host_index++;
4042 		if (hba->sli.sli4.mq.host_index >= hba->sli.sli4.mq.max_index) {
4043 			hba->sli.sli4.mq.host_index = 0;
4044 		}
4045 
4046 		rc = emlxs_sli4_issue_mq(port, (MAILBOX4 *)iptr, mb, tmo_local);
4047 
4048 		if (rc != MBX_SUCCESS) {
4049 			break;
4050 		}
4051 
4052 		/* Wait for completion */
4053 		/* The driver clock is timing the mailbox. */
4054 
4055 		mutex_enter(&EMLXS_MBOX_LOCK);
4056 		while (!(mbq->flag & MBQ_COMPLETED)) {
4057 			cv_wait(&EMLXS_MBOX_CV, &EMLXS_MBOX_LOCK);
4058 		}
4059 		mutex_exit(&EMLXS_MBOX_LOCK);
4060 
4061 		mp = (MATCHMAP *)mbq->nonembed;
4062 		if (mp) {
4063 			hdr_rsp = (mbox_rsp_hdr_t *)mp->virt;
4064 			if (hdr_rsp->status) {
4065 				EMLXS_MSGF(EMLXS_CONTEXT,
4066 				    &emlxs_mbox_detail_msg,
4067 				    "%s: MBX_NONEMBED_ERROR: 0x%x, 0x%x",
4068 				    emlxs_mb_cmd_xlate(mb->mbxCommand),
4069 				    hdr_rsp->status, hdr_rsp->extra_status);
4070 
4071 				mb->mbxStatus = MBX_NONEMBED_ERROR;
4072 			}
4073 		}
4074 		rc = mb->mbxStatus;
4075 
4076 		if (rc == MBX_TIMEOUT) {
4077 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
4078 			    "Timeout.   %s: mb=%p tmo=%x Sleep. embedded %d",
4079 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo,
4080 			    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
4081 			    (mb4->un.varSLIConfig.be.embedded)));
4082 		} else {
4083 			if (mb->mbxCommand != MBX_DOWN_LOAD
4084 			    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
4085 				EMLXS_MSGF(EMLXS_CONTEXT,
4086 				    &emlxs_mbox_detail_msg,
4087 				    "Completed.   %s: mb=%p status=%x Sleep. "
4088 				    "embedded %d",
4089 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, rc,
4090 				    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
4091 				    (mb4->un.varSLIConfig.be.embedded)));
4092 			}
4093 		}
4094 		break;
4095 	}
4096 
4097 	return (rc);
4098 
4099 } /* emlxs_sli4_issue_mbox_cmd() */
4100 
4101 
4102 
4103 /*ARGSUSED*/
4104 static uint32_t
emlxs_sli4_issue_mbox_cmd4quiesce(emlxs_hba_t * hba,MAILBOXQ * mbq,int32_t flag,uint32_t tmo)4105 emlxs_sli4_issue_mbox_cmd4quiesce(emlxs_hba_t *hba, MAILBOXQ *mbq, int32_t flag,
4106     uint32_t tmo)
4107 {
4108 	emlxs_port_t	*port = &PPORT;
4109 	MAILBOX		*mb;
4110 	mbox_rsp_hdr_t	*hdr_rsp;
4111 	MATCHMAP	*mp;
4112 	uint32_t	rc;
4113 	uint32_t	tmo_local;
4114 
4115 	mb = (MAILBOX *)mbq;
4116 
4117 	mb->mbxStatus = MBX_SUCCESS;
4118 	rc = MBX_SUCCESS;
4119 
4120 	if (tmo < 30) {
4121 		tmo = 30;
4122 	}
4123 
4124 	/* Convert tmo seconds to 10 millisecond tics */
4125 	tmo_local = tmo * 100;
4126 
4127 	flag = MBX_POLL;
4128 
4129 	/* Check for hardware error */
4130 	if (hba->flag & FC_HARDWARE_ERROR) {
4131 		mb->mbxStatus = MBX_HARDWARE_ERROR;
4132 		return (MBX_HARDWARE_ERROR);
4133 	}
4134 
4135 	/* Initialize mailbox area */
4136 	emlxs_mb_init(hba, mbq, flag, tmo);
4137 
4138 	switch (flag) {
4139 
4140 	case MBX_POLL:
4141 
4142 		rc = emlxs_sli4_issue_bootstrap(hba, mb, tmo_local);
4143 
4144 		/* Clean up the mailbox area */
4145 		if (rc == MBX_TIMEOUT) {
4146 			hba->flag |= FC_MBOX_TIMEOUT;
4147 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
4148 			emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
4149 
4150 		} else {
4151 			/* Process the result */
4152 			if (!(mbq->flag & MBQ_PASSTHRU)) {
4153 				if (mbq->mbox_cmpl) {
4154 					(void) (mbq->mbox_cmpl)(hba, mbq);
4155 				}
4156 			}
4157 
4158 			emlxs_mb_fini(hba, NULL, mb->mbxStatus);
4159 		}
4160 
4161 		mp = (MATCHMAP *)mbq->nonembed;
4162 		if (mp) {
4163 			hdr_rsp = (mbox_rsp_hdr_t *)mp->virt;
4164 			if (hdr_rsp->status) {
4165 				EMLXS_MSGF(EMLXS_CONTEXT,
4166 				    &emlxs_mbox_detail_msg,
4167 				    "%s: MBX_NONEMBED_ERROR: 0x%x, 0x%x",
4168 				    emlxs_mb_cmd_xlate(mb->mbxCommand),
4169 				    hdr_rsp->status, hdr_rsp->extra_status);
4170 
4171 				mb->mbxStatus = MBX_NONEMBED_ERROR;
4172 			}
4173 		}
4174 		rc = mb->mbxStatus;
4175 
4176 		break;
4177 	}
4178 
4179 	return (rc);
4180 
4181 } /* emlxs_sli4_issue_mbox_cmd4quiesce() */
4182 
4183 
4184 
4185 #ifdef SFCT_SUPPORT
4186 /*ARGSUSED*/
4187 extern uint32_t
emlxs_sli4_prep_fct_iocb(emlxs_port_t * port,emlxs_buf_t * cmd_sbp,int channel)4188 emlxs_sli4_prep_fct_iocb(emlxs_port_t *port, emlxs_buf_t *cmd_sbp, int channel)
4189 {
4190 	emlxs_hba_t *hba = HBA;
4191 	emlxs_config_t *cfg = &CFG;
4192 	fct_cmd_t *fct_cmd;
4193 	stmf_data_buf_t *dbuf;
4194 	scsi_task_t *fct_task;
4195 	fc_packet_t *pkt;
4196 	CHANNEL *cp;
4197 	XRIobj_t *xrip;
4198 	emlxs_node_t *ndlp;
4199 	IOCBQ *iocbq;
4200 	IOCB *iocb;
4201 	emlxs_wqe_t *wqe;
4202 	ULP_SGE64 stage_sge;
4203 	ULP_SGE64 *sge;
4204 	RPIobj_t *rpip;
4205 	int32_t	sge_size;
4206 	uint64_t sge_addr;
4207 	uint32_t did;
4208 	uint32_t timeout;
4209 
4210 	ddi_dma_cookie_t *cp_cmd;
4211 
4212 	pkt = PRIV2PKT(cmd_sbp);
4213 
4214 	cp = (CHANNEL *)cmd_sbp->channel;
4215 
4216 	iocbq = &cmd_sbp->iocbq;
4217 	iocb = &iocbq->iocb;
4218 
4219 	did = cmd_sbp->did;
4220 	if (iocb->ULPCOMMAND == CMD_ABORT_XRI_CX) {
4221 
4222 		ndlp = cmd_sbp->node;
4223 		rpip = EMLXS_NODE_TO_RPI(port, ndlp);
4224 
4225 		if (!rpip) {
4226 			/* Use the fabric rpi */
4227 			rpip = port->vpip->fabric_rpip;
4228 		}
4229 
4230 		/* Next allocate an Exchange for this command */
4231 		xrip = emlxs_sli4_alloc_xri(port, cmd_sbp, rpip,
4232 		    EMLXS_XRI_SOL_BLS_TYPE);
4233 
4234 		if (!xrip) {
4235 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4236 			    "Adapter Busy. Unable to allocate exchange. "
4237 			    "did=0x%x", did);
4238 
4239 			return (FC_TRAN_BUSY);
4240 		}
4241 
4242 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4243 		    "FCT Abort Request: xri=%d iotag=%d sbp=%p rxid=%x",
4244 		    xrip->XRI, xrip->iotag, cmd_sbp, pkt->pkt_cmd_fhdr.rx_id);
4245 
4246 		cmd_sbp->xrip = xrip;
4247 
4248 		cp->ulpSendCmd++;
4249 
4250 		/* Initalize iocbq */
4251 		iocbq->port = (void *)port;
4252 		iocbq->node = (void *)ndlp;
4253 		iocbq->channel = (void *)cp;
4254 
4255 		/*
4256 		 * Don't give the abort priority, we want the IOCB
4257 		 * we are aborting to be processed first.
4258 		 */
4259 		iocbq->flag |= IOCB_SPECIAL;
4260 
4261 		wqe = &iocbq->wqe;
4262 		bzero((void *)wqe, sizeof (emlxs_wqe_t));
4263 
4264 		wqe = &iocbq->wqe;
4265 		wqe->un.Abort.Criteria = ABORT_XRI_TAG;
4266 		wqe->RequestTag = xrip->iotag;
4267 		wqe->AbortTag = pkt->pkt_cmd_fhdr.rx_id;
4268 		wqe->Command = CMD_ABORT_XRI_CX;
4269 		wqe->Class = CLASS3;
4270 		wqe->CQId = 0xffff;
4271 		wqe->CmdType = WQE_TYPE_ABORT;
4272 
4273 		if (hba->state >= FC_LINK_UP) {
4274 			wqe->un.Abort.IA = 0;
4275 		} else {
4276 			wqe->un.Abort.IA = 1;
4277 		}
4278 
4279 		/* Set the pkt timer */
4280 		cmd_sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
4281 		    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
4282 
4283 		return (IOERR_SUCCESS);
4284 
4285 	} else if (iocb->ULPCOMMAND == CMD_FCP_TRSP64_CX) {
4286 
4287 		timeout = pkt->pkt_timeout;
4288 		ndlp = cmd_sbp->node;
4289 		if (!ndlp) {
4290 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4291 			    "Unable to find rpi. did=0x%x", did);
4292 
4293 			emlxs_set_pkt_state(cmd_sbp, IOSTAT_LOCAL_REJECT,
4294 			    IOERR_INVALID_RPI, 0);
4295 			return (0xff);
4296 		}
4297 
4298 		cp->ulpSendCmd++;
4299 
4300 		/* Initalize iocbq */
4301 		iocbq->port = (void *)port;
4302 		iocbq->node = (void *)ndlp;
4303 		iocbq->channel = (void *)cp;
4304 
4305 		wqe = &iocbq->wqe;
4306 		bzero((void *)wqe, sizeof (emlxs_wqe_t));
4307 
4308 		xrip = emlxs_sli4_register_xri(port, cmd_sbp,
4309 		    pkt->pkt_cmd_fhdr.rx_id, did);
4310 
4311 		if (!xrip) {
4312 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4313 			    "Unable to register xri %x. did=0x%x",
4314 			    pkt->pkt_cmd_fhdr.rx_id, did);
4315 
4316 			emlxs_set_pkt_state(cmd_sbp, IOSTAT_LOCAL_REJECT,
4317 			    IOERR_NO_XRI, 0);
4318 			return (0xff);
4319 		}
4320 
4321 		cmd_sbp->iotag = xrip->iotag;
4322 		cmd_sbp->channel = cp;
4323 
4324 #if (EMLXS_MODREV >= EMLXS_MODREV3)
4325 		cp_cmd = pkt->pkt_cmd_cookie;
4326 #else
4327 		cp_cmd  = &pkt->pkt_cmd_cookie;
4328 #endif	/* >= EMLXS_MODREV3 */
4329 
4330 		sge_size = pkt->pkt_cmdlen;
4331 		/* Make size a multiple of 4 */
4332 		if (sge_size & 3) {
4333 			sge_size = (sge_size + 3) & 0xfffffffc;
4334 		}
4335 		sge_addr = cp_cmd->dmac_laddress;
4336 		sge = xrip->SGList->virt;
4337 
4338 		stage_sge.addrHigh = PADDR_HI(sge_addr);
4339 		stage_sge.addrLow = PADDR_LO(sge_addr);
4340 		stage_sge.length = sge_size;
4341 		stage_sge.offset = 0;
4342 		stage_sge.type = 0;
4343 		stage_sge.last = 1;
4344 
4345 		/* Copy staged SGE into SGL */
4346 		BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
4347 		    (uint8_t *)sge, sizeof (ULP_SGE64));
4348 
4349 		/* Words  0-3 */
4350 		wqe->un.FcpCmd.Payload.addrHigh = stage_sge.addrHigh;
4351 		wqe->un.FcpCmd.Payload.addrLow = stage_sge.addrLow;
4352 		wqe->un.FcpCmd.Payload.tus.f.bdeSize = sge_size;
4353 		wqe->un.FcpCmd.PayloadLength = sge_size;
4354 
4355 		/*  Word  6 */
4356 		wqe->ContextTag = ndlp->nlp_Rpi;
4357 		wqe->XRITag = xrip->XRI;
4358 
4359 		/*  Word  7 */
4360 		wqe->Command  = iocb->ULPCOMMAND;
4361 		wqe->Class = cmd_sbp->class;
4362 		wqe->ContextType = WQE_RPI_CONTEXT;
4363 		wqe->Timer = ((timeout > 0xff) ? 0 : timeout);
4364 
4365 		/*  Word  8 */
4366 		wqe->AbortTag = 0;
4367 
4368 		/*  Word  9 */
4369 		wqe->RequestTag = xrip->iotag;
4370 		wqe->OXId = (uint16_t)xrip->rx_id;
4371 
4372 		/*  Word  10 */
4373 		if (xrip->flag & EMLXS_XRI_BUSY) {
4374 			wqe->XC = 1;
4375 		}
4376 
4377 		if (!(hba->sli.sli4.param.PHWQ)) {
4378 			wqe->QOSd = 1;
4379 			wqe->DBDE = 1; /* Data type for BDE 0 */
4380 		}
4381 
4382 		/*  Word  11 */
4383 		wqe->CmdType = WQE_TYPE_TRSP;
4384 		wqe->CQId = (uint16_t)0xffff;  /* default CQ for response */
4385 
4386 		/* Set the pkt timer */
4387 		cmd_sbp->ticks = hba->timer_tics + timeout +
4388 		    ((timeout > 0xff) ? 0 : 10);
4389 
4390 		if (pkt->pkt_cmdlen) {
4391 			EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
4392 			    DDI_DMA_SYNC_FORDEV);
4393 		}
4394 
4395 		return (IOERR_SUCCESS);
4396 	}
4397 
4398 	fct_cmd = cmd_sbp->fct_cmd;
4399 	did = fct_cmd->cmd_rportid;
4400 	dbuf = cmd_sbp->fct_buf;
4401 	fct_task = (scsi_task_t *)fct_cmd->cmd_specific;
4402 	ndlp = *(emlxs_node_t **)fct_cmd->cmd_rp->rp_fca_private;
4403 	if (!ndlp) {
4404 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4405 		    "Unable to find rpi. did=0x%x", did);
4406 
4407 		emlxs_set_pkt_state(cmd_sbp, IOSTAT_LOCAL_REJECT,
4408 		    IOERR_INVALID_RPI, 0);
4409 		return (0xff);
4410 	}
4411 
4412 
4413 	/* Initalize iocbq */
4414 	iocbq->port = (void *) port;
4415 	iocbq->node = (void *)ndlp;
4416 	iocbq->channel = (void *) cp;
4417 
4418 	wqe = &iocbq->wqe;
4419 	bzero((void *)wqe, sizeof (emlxs_wqe_t));
4420 
4421 	xrip = cmd_sbp->xrip;
4422 	if (!xrip) {
4423 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4424 		    "Unable to find xri. did=0x%x", did);
4425 
4426 		emlxs_set_pkt_state(cmd_sbp, IOSTAT_LOCAL_REJECT,
4427 		    IOERR_NO_XRI, 0);
4428 		return (0xff);
4429 	}
4430 
4431 	if (emlxs_sli4_register_xri(port, cmd_sbp,
4432 	    xrip->XRI, ndlp->nlp_DID) == NULL) {
4433 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4434 		    "Unable to register xri. did=0x%x", did);
4435 
4436 		emlxs_set_pkt_state(cmd_sbp, IOSTAT_LOCAL_REJECT,
4437 		    IOERR_NO_XRI, 0);
4438 		return (0xff);
4439 	}
4440 	cmd_sbp->iotag = xrip->iotag;
4441 	cmd_sbp->channel = cp;
4442 
4443 	if (cfg[CFG_TIMEOUT_ENABLE].current) {
4444 		timeout =
4445 		    ((2 * hba->fc_ratov) < 60) ? 60 : (2 * hba->fc_ratov);
4446 	} else {
4447 		timeout = 0x80000000;
4448 	}
4449 	cmd_sbp->ticks =
4450 	    hba->timer_tics + timeout + ((timeout > 0xff) ? 0 : 10);
4451 
4452 
4453 	iocb->ULPCT = 0;
4454 	if (fct_task->task_flags & TF_WRITE_DATA) {
4455 		iocb->ULPCOMMAND = CMD_FCP_TRECEIVE64_CX;
4456 		wqe->CmdType = WQE_TYPE_TRECEIVE;		/* Word 11 */
4457 
4458 	} else { /* TF_READ_DATA */
4459 
4460 		iocb->ULPCOMMAND = CMD_FCP_TSEND64_CX;
4461 		wqe->CmdType = WQE_TYPE_TSEND;			/* Word 11 */
4462 
4463 		if ((dbuf->db_data_size >=
4464 		    fct_task->task_expected_xfer_length)) {
4465 			/* enable auto-rsp AP feature */
4466 			wqe->AR = 0x1;
4467 			iocb->ULPCT = 0x1; /* for cmpl */
4468 		}
4469 	}
4470 
4471 	(void) emlxs_sli4_fct_bde_setup(port, cmd_sbp);
4472 
4473 	/*  Word  6 */
4474 	wqe->ContextTag = ndlp->nlp_Rpi;
4475 	wqe->XRITag = xrip->XRI;
4476 
4477 	/*  Word  7 */
4478 	wqe->Command  = iocb->ULPCOMMAND;
4479 	wqe->Class = cmd_sbp->class;
4480 	wqe->ContextType = WQE_RPI_CONTEXT;
4481 	wqe->Timer = ((timeout > 0xff) ? 0 : timeout);
4482 	wqe->PU = 1;
4483 
4484 	/*  Word  8 */
4485 	wqe->AbortTag = 0;
4486 
4487 	/*  Word  9 */
4488 	wqe->RequestTag = xrip->iotag;
4489 	wqe->OXId = (uint16_t)fct_cmd->cmd_oxid;
4490 
4491 	/*  Word  10 */
4492 	if (xrip->flag & EMLXS_XRI_BUSY) {
4493 		wqe->XC = 1;
4494 	}
4495 
4496 	if (!(hba->sli.sli4.param.PHWQ)) {
4497 		wqe->QOSd = 1;
4498 		wqe->DBDE = 1; /* Data type for BDE 0 */
4499 	}
4500 
4501 	/*  Word  11 */
4502 	wqe->CQId = (uint16_t)0xffff;  /* default CQ for response */
4503 
4504 	/*  Word  12 */
4505 	wqe->CmdSpecific = dbuf->db_data_size;
4506 
4507 	return (IOERR_SUCCESS);
4508 
4509 } /* emlxs_sli4_prep_fct_iocb() */
4510 #endif /* SFCT_SUPPORT */
4511 
4512 
4513 /*ARGSUSED*/
4514 extern uint32_t
emlxs_sli4_prep_fcp_iocb(emlxs_port_t * port,emlxs_buf_t * sbp,int channel)4515 emlxs_sli4_prep_fcp_iocb(emlxs_port_t *port, emlxs_buf_t *sbp, int channel)
4516 {
4517 	emlxs_hba_t *hba = HBA;
4518 	fc_packet_t *pkt;
4519 	CHANNEL *cp;
4520 	RPIobj_t *rpip;
4521 	XRIobj_t *xrip;
4522 	emlxs_wqe_t *wqe;
4523 	IOCBQ *iocbq;
4524 	IOCB *iocb;
4525 	NODELIST *node;
4526 	uint16_t iotag;
4527 	uint32_t did;
4528 
4529 	pkt = PRIV2PKT(sbp);
4530 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
4531 	cp = &hba->chan[channel];
4532 
4533 	iocbq = &sbp->iocbq;
4534 	iocbq->channel = (void *) cp;
4535 	iocbq->port = (void *) port;
4536 
4537 	wqe = &iocbq->wqe;
4538 	iocb = &iocbq->iocb;
4539 	bzero((void *)wqe, sizeof (emlxs_wqe_t));
4540 	bzero((void *)iocb, sizeof (IOCB));
4541 
4542 	/* Find target node object */
4543 	node = (NODELIST *)iocbq->node;
4544 	rpip = EMLXS_NODE_TO_RPI(port, node);
4545 
4546 	if (!rpip) {
4547 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4548 		    "Unable to find rpi. did=0x%x", did);
4549 
4550 		emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
4551 		    IOERR_INVALID_RPI, 0);
4552 		return (0xff);
4553 	}
4554 
4555 	sbp->channel = cp;
4556 	/* Next allocate an Exchange for this command */
4557 	xrip = emlxs_sli4_alloc_xri(port, sbp, rpip,
4558 	    EMLXS_XRI_SOL_FCP_TYPE);
4559 
4560 	if (!xrip) {
4561 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4562 		    "Adapter Busy. Unable to allocate exchange. did=0x%x", did);
4563 
4564 		return (FC_TRAN_BUSY);
4565 	}
4566 	sbp->bmp = NULL;
4567 	iotag = sbp->iotag;
4568 
4569 #ifdef DEBUG_FASTPATH
4570 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4571 	    "FCP: Prep xri=%d iotag=%d oxid=%x rpi=%d",
4572 	    xrip->XRI, xrip->iotag, xrip->rx_id, rpip->RPI);
4573 #endif /* DEBUG_FASTPATH */
4574 
4575 	/* Indicate this is a FCP cmd */
4576 	iocbq->flag |= IOCB_FCP_CMD;
4577 
4578 	if (emlxs_sli4_bde_setup(port, sbp)) {
4579 		emlxs_sli4_free_xri(port, sbp, xrip, 1);
4580 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4581 		    "Adapter Busy. Unable to setup SGE. did=0x%x", did);
4582 
4583 		return (FC_TRAN_BUSY);
4584 	}
4585 
4586 	/* DEBUG */
4587 #ifdef DEBUG_FCP
4588 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4589 	    "FCP: SGLaddr virt %p phys %p size %d", xrip->SGList->virt,
4590 	    xrip->SGList->phys, pkt->pkt_datalen);
4591 	emlxs_data_dump(port, "FCP: SGL",
4592 	    (uint32_t *)xrip->SGList->virt, 32, 0);
4593 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4594 	    "FCP: CMD virt %p len %d:%d:%d",
4595 	    pkt->pkt_cmd, pkt->pkt_cmdlen, pkt->pkt_rsplen, pkt->pkt_datalen);
4596 	emlxs_data_dump(port, "FCP: CMD", (uint32_t *)pkt->pkt_cmd, 10, 0);
4597 #endif /* DEBUG_FCP */
4598 
4599 	EMLXS_MPDATA_SYNC(xrip->SGList->dma_handle, 0,
4600 	    xrip->SGList->size, DDI_DMA_SYNC_FORDEV);
4601 
4602 	/* if device is FCP-2 device, set the following bit */
4603 	/* that says to run the FC-TAPE protocol. */
4604 	if (node->nlp_fcp_info & NLP_FCP_2_DEVICE) {
4605 		wqe->ERP = 1;
4606 	}
4607 
4608 	if (pkt->pkt_datalen == 0) {
4609 		iocb->ULPCOMMAND = CMD_FCP_ICMND64_CR;
4610 		wqe->Command = CMD_FCP_ICMND64_CR;
4611 		wqe->CmdType = WQE_TYPE_FCP_DATA_IN;
4612 	} else if (pkt->pkt_tran_type == FC_PKT_FCP_READ) {
4613 		iocb->ULPCOMMAND = CMD_FCP_IREAD64_CR;
4614 		wqe->Command = CMD_FCP_IREAD64_CR;
4615 		wqe->CmdType = WQE_TYPE_FCP_DATA_IN;
4616 		wqe->PU = PARM_XFER_CHECK;
4617 	} else {
4618 		iocb->ULPCOMMAND = CMD_FCP_IWRITE64_CR;
4619 		wqe->Command = CMD_FCP_IWRITE64_CR;
4620 		wqe->CmdType = WQE_TYPE_FCP_DATA_OUT;
4621 	}
4622 	wqe->un.FcpCmd.TotalTransferCount = pkt->pkt_datalen;
4623 
4624 	if (!(hba->sli.sli4.param.PHWQ)) {
4625 		wqe->DBDE = 1; /* Data type for BDE 0 */
4626 	}
4627 	wqe->ContextTag = rpip->RPI;
4628 	wqe->ContextType = WQE_RPI_CONTEXT;
4629 	wqe->XRITag = xrip->XRI;
4630 	wqe->Timer =
4631 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
4632 
4633 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_CHAINED_SEQ) {
4634 		wqe->CCPE = 1;
4635 		wqe->CCP = pkt->pkt_cmd_fhdr.rsvd;
4636 	}
4637 
4638 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
4639 	case FC_TRAN_CLASS2:
4640 		wqe->Class = CLASS2;
4641 		break;
4642 	case FC_TRAN_CLASS3:
4643 	default:
4644 		wqe->Class = CLASS3;
4645 		break;
4646 	}
4647 	sbp->class = wqe->Class;
4648 	wqe->RequestTag = iotag;
4649 	wqe->CQId = (uint16_t)0xffff;  /* default CQ for response */
4650 
4651 	return (FC_SUCCESS);
4652 } /* emlxs_sli4_prep_fcp_iocb() */
4653 
4654 
4655 /*ARGSUSED*/
4656 static uint32_t
emlxs_sli4_prep_ip_iocb(emlxs_port_t * port,emlxs_buf_t * sbp)4657 emlxs_sli4_prep_ip_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
4658 {
4659 	return (FC_TRAN_BUSY);
4660 
4661 } /* emlxs_sli4_prep_ip_iocb() */
4662 
4663 
4664 /*ARGSUSED*/
4665 static uint32_t
emlxs_sli4_prep_els_iocb(emlxs_port_t * port,emlxs_buf_t * sbp)4666 emlxs_sli4_prep_els_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
4667 {
4668 	emlxs_hba_t *hba = HBA;
4669 	fc_packet_t *pkt;
4670 	IOCBQ *iocbq;
4671 	IOCB *iocb;
4672 	emlxs_wqe_t *wqe;
4673 	FCFIobj_t *fcfp;
4674 	RPIobj_t *reserved_rpip = NULL;
4675 	RPIobj_t *rpip = NULL;
4676 	XRIobj_t *xrip;
4677 	CHANNEL *cp;
4678 	uint32_t did;
4679 	uint32_t cmd;
4680 	ULP_SGE64 stage_sge;
4681 	ULP_SGE64 *sge;
4682 	ddi_dma_cookie_t *cp_cmd;
4683 	ddi_dma_cookie_t *cp_resp;
4684 	emlxs_node_t *node;
4685 
4686 	pkt = PRIV2PKT(sbp);
4687 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
4688 
4689 	iocbq = &sbp->iocbq;
4690 	wqe = &iocbq->wqe;
4691 	iocb = &iocbq->iocb;
4692 	bzero((void *)wqe, sizeof (emlxs_wqe_t));
4693 	bzero((void *)iocb, sizeof (IOCB));
4694 	cp = &hba->chan[hba->channel_els];
4695 
4696 	/* Initalize iocbq */
4697 	iocbq->port = (void *) port;
4698 	iocbq->channel = (void *) cp;
4699 
4700 	sbp->channel = cp;
4701 	sbp->bmp = NULL;
4702 
4703 #if (EMLXS_MODREV >= EMLXS_MODREV3)
4704 	cp_cmd = pkt->pkt_cmd_cookie;
4705 	cp_resp = pkt->pkt_resp_cookie;
4706 #else
4707 	cp_cmd  = &pkt->pkt_cmd_cookie;
4708 	cp_resp = &pkt->pkt_resp_cookie;
4709 #endif	/* >= EMLXS_MODREV3 */
4710 
4711 	/* CMD payload */
4712 	sge = &stage_sge;
4713 	sge->addrHigh = PADDR_HI(cp_cmd->dmac_laddress);
4714 	sge->addrLow = PADDR_LO(cp_cmd->dmac_laddress);
4715 	sge->length = pkt->pkt_cmdlen;
4716 	sge->offset = 0;
4717 	sge->type = 0;
4718 
4719 	cmd = *((uint32_t *)pkt->pkt_cmd);
4720 	cmd &= ELS_CMD_MASK;
4721 
4722 	/* Initalize iocb */
4723 	if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
4724 		/* ELS Response */
4725 
4726 		sbp->xrip = 0;
4727 		xrip = emlxs_sli4_register_xri(port, sbp,
4728 		    pkt->pkt_cmd_fhdr.rx_id, did);
4729 
4730 		if (!xrip) {
4731 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
4732 			    "Unable to find XRI. rxid=%x",
4733 			    pkt->pkt_cmd_fhdr.rx_id);
4734 
4735 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
4736 			    IOERR_NO_XRI, 0);
4737 			return (0xff);
4738 		}
4739 
4740 		rpip = xrip->rpip;
4741 
4742 		if (!rpip) {
4743 			/* This means that we had a node registered */
4744 			/* when the unsol request came in but the node */
4745 			/* has since been unregistered. */
4746 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
4747 			    "Unable to find RPI. rxid=%x",
4748 			    pkt->pkt_cmd_fhdr.rx_id);
4749 
4750 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
4751 			    IOERR_INVALID_RPI, 0);
4752 			return (0xff);
4753 		}
4754 
4755 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4756 		    "ELS: Prep xri=%d iotag=%d oxid=%x rpi=%d",
4757 		    xrip->XRI, xrip->iotag, xrip->rx_id, rpip->RPI);
4758 
4759 		iocb->ULPCOMMAND = CMD_XMIT_ELS_RSP64_CX;
4760 		wqe->Command = CMD_XMIT_ELS_RSP64_CX;
4761 		wqe->CmdType = WQE_TYPE_GEN;
4762 		if (!(hba->sli.sli4.param.PHWQ)) {
4763 			wqe->DBDE = 1; /* Data type for BDE 0 */
4764 		}
4765 
4766 		wqe->un.ElsRsp.Payload.addrHigh = sge->addrHigh;
4767 		wqe->un.ElsRsp.Payload.addrLow = sge->addrLow;
4768 		wqe->un.ElsRsp.Payload.tus.f.bdeSize = pkt->pkt_cmdlen;
4769 		wqe->un.ElsCmd.PayloadLength = pkt->pkt_cmdlen;
4770 
4771 		wqe->un.ElsRsp.RemoteId = did;
4772 		wqe->PU = 0x3;
4773 		wqe->OXId = xrip->rx_id;
4774 
4775 		sge->last = 1;
4776 		/* Now sge is fully staged */
4777 
4778 		sge = xrip->SGList->virt;
4779 		BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
4780 		    sizeof (ULP_SGE64));
4781 
4782 		if (rpip->RPI == FABRIC_RPI) {
4783 			wqe->ContextTag = port->vpip->VPI;
4784 			wqe->ContextType = WQE_VPI_CONTEXT;
4785 		} else {
4786 			wqe->ContextTag = rpip->RPI;
4787 			wqe->ContextType = WQE_RPI_CONTEXT;
4788 		}
4789 
4790 		if ((cmd == ELS_CMD_ACC) && (sbp->ucmd == ELS_CMD_FLOGI)) {
4791 			wqe->un.ElsCmd.SP = 1;
4792 			wqe->un.ElsCmd.LocalId = 0xFFFFFE;
4793 		}
4794 
4795 	} else {
4796 		/* ELS Request */
4797 
4798 		fcfp = port->vpip->vfip->fcfp;
4799 		node = (emlxs_node_t *)iocbq->node;
4800 		rpip = EMLXS_NODE_TO_RPI(port, node);
4801 
4802 		if (!rpip) {
4803 			/* Use the fabric rpi */
4804 			rpip = port->vpip->fabric_rpip;
4805 		}
4806 
4807 		/* Next allocate an Exchange for this command */
4808 		xrip = emlxs_sli4_alloc_xri(port, sbp, rpip,
4809 		    EMLXS_XRI_SOL_ELS_TYPE);
4810 
4811 		if (!xrip) {
4812 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4813 			    "Adapter Busy. Unable to allocate exchange. "
4814 			    "did=0x%x", did);
4815 
4816 			return (FC_TRAN_BUSY);
4817 		}
4818 
4819 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4820 		    "ELS: Prep xri=%d iotag=%d rpi=%d",
4821 		    xrip->XRI, xrip->iotag, rpip->RPI);
4822 
4823 		iocb->ULPCOMMAND = CMD_ELS_REQUEST64_CR;
4824 		wqe->Command = CMD_ELS_REQUEST64_CR;
4825 		wqe->CmdType = WQE_TYPE_ELS;
4826 		if (!(hba->sli.sli4.param.PHWQ)) {
4827 			wqe->DBDE = 1; /* Data type for BDE 0 */
4828 		}
4829 
4830 		wqe->un.ElsCmd.Payload.addrHigh = sge->addrHigh;
4831 		wqe->un.ElsCmd.Payload.addrLow = sge->addrLow;
4832 		wqe->un.ElsCmd.Payload.tus.f.bdeSize = pkt->pkt_cmdlen;
4833 
4834 		wqe->un.ElsCmd.RemoteId = did;
4835 		wqe->Timer = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
4836 
4837 		/* setup for rsp */
4838 		iocb->un.elsreq64.remoteID = (did == BCAST_DID) ? 0 : did;
4839 		iocb->ULPPU = 1;	/* Wd4 is relative offset */
4840 
4841 		sge->last = 0;
4842 
4843 		sge = xrip->SGList->virt;
4844 		BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
4845 		    sizeof (ULP_SGE64));
4846 
4847 		wqe->un.ElsCmd.PayloadLength =
4848 		    pkt->pkt_cmdlen; /* Byte offset of rsp data */
4849 
4850 		/* RSP payload */
4851 		sge = &stage_sge;
4852 		sge->addrHigh = PADDR_HI(cp_resp->dmac_laddress);
4853 		sge->addrLow = PADDR_LO(cp_resp->dmac_laddress);
4854 		sge->length = pkt->pkt_rsplen;
4855 		sge->offset = 0;
4856 		sge->last = 1;
4857 		/* Now sge is fully staged */
4858 
4859 		sge = xrip->SGList->virt;
4860 		sge++;
4861 		BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
4862 		    sizeof (ULP_SGE64));
4863 #ifdef DEBUG_ELS
4864 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4865 		    "ELS: SGLaddr virt %p phys %p",
4866 		    xrip->SGList->virt, xrip->SGList->phys);
4867 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4868 		    "ELS: PAYLOAD virt %p phys %p",
4869 		    pkt->pkt_cmd, cp_cmd->dmac_laddress);
4870 		emlxs_data_dump(port, "ELS: SGL",
4871 		    (uint32_t *)xrip->SGList->virt, 12, 0);
4872 #endif /* DEBUG_ELS */
4873 
4874 		switch (cmd) {
4875 		case ELS_CMD_FLOGI:
4876 			wqe->un.ElsCmd.SP = 1;
4877 
4878 			if ((hba->sli_intf & SLI_INTF_IF_TYPE_MASK) ==
4879 			    SLI_INTF_IF_TYPE_0) {
4880 				wqe->ContextTag = fcfp->FCFI;
4881 				wqe->ContextType = WQE_FCFI_CONTEXT;
4882 			} else {
4883 				wqe->ContextTag = port->vpip->VPI;
4884 				wqe->ContextType = WQE_VPI_CONTEXT;
4885 			}
4886 
4887 			if (hba->flag & FC_FIP_SUPPORTED) {
4888 				wqe->CmdType |= WQE_TYPE_MASK_FIP;
4889 			}
4890 
4891 			if (hba->topology == TOPOLOGY_LOOP) {
4892 				wqe->un.ElsCmd.LocalId = port->did;
4893 			}
4894 
4895 			wqe->ELSId = WQE_ELSID_FLOGI;
4896 			break;
4897 		case ELS_CMD_FDISC:
4898 			wqe->un.ElsCmd.SP = 1;
4899 			wqe->ContextTag = port->vpip->VPI;
4900 			wqe->ContextType = WQE_VPI_CONTEXT;
4901 
4902 			if (hba->flag & FC_FIP_SUPPORTED) {
4903 				wqe->CmdType |= WQE_TYPE_MASK_FIP;
4904 			}
4905 
4906 			wqe->ELSId = WQE_ELSID_FDISC;
4907 			break;
4908 		case ELS_CMD_LOGO:
4909 			if ((did == FABRIC_DID) &&
4910 			    (hba->flag & FC_FIP_SUPPORTED)) {
4911 				wqe->CmdType |= WQE_TYPE_MASK_FIP;
4912 			}
4913 
4914 			wqe->ContextTag = port->vpip->VPI;
4915 			wqe->ContextType = WQE_VPI_CONTEXT;
4916 			wqe->ELSId = WQE_ELSID_LOGO;
4917 			break;
4918 		case ELS_CMD_PLOGI:
4919 			if (rpip->RPI == FABRIC_RPI) {
4920 				if (hba->flag & FC_PT_TO_PT) {
4921 					wqe->un.ElsCmd.SP = 1;
4922 					wqe->un.ElsCmd.LocalId = port->did;
4923 				}
4924 
4925 				wqe->ContextTag = port->vpip->VPI;
4926 				wqe->ContextType = WQE_VPI_CONTEXT;
4927 			} else {
4928 				wqe->ContextTag = rpip->RPI;
4929 				wqe->ContextType = WQE_RPI_CONTEXT;
4930 			}
4931 
4932 			wqe->ELSId = WQE_ELSID_PLOGI;
4933 			break;
4934 		default:
4935 			if (rpip->RPI == FABRIC_RPI) {
4936 				wqe->ContextTag = port->vpip->VPI;
4937 				wqe->ContextType = WQE_VPI_CONTEXT;
4938 			} else {
4939 				wqe->ContextTag = rpip->RPI;
4940 				wqe->ContextType = WQE_RPI_CONTEXT;
4941 			}
4942 
4943 			wqe->ELSId = WQE_ELSID_CMD;
4944 			break;
4945 		}
4946 
4947 #ifdef SFCT_SUPPORT
4948 		/* This allows fct to abort the request */
4949 		if (sbp->fct_cmd) {
4950 			sbp->fct_cmd->cmd_oxid = xrip->XRI;
4951 			sbp->fct_cmd->cmd_rxid = 0xFFFF;
4952 		}
4953 #endif /* SFCT_SUPPORT */
4954 	}
4955 
4956 	if (wqe->ContextType == WQE_VPI_CONTEXT) {
4957 		reserved_rpip = emlxs_rpi_reserve_notify(port, did, xrip);
4958 
4959 		if (!reserved_rpip) {
4960 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
4961 			    "Unable to alloc reserved RPI. rxid=%x. Rejecting.",
4962 			    pkt->pkt_cmd_fhdr.rx_id);
4963 
4964 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
4965 			    IOERR_INVALID_RPI, 0);
4966 			return (0xff);
4967 		}
4968 
4969 		/* Store the reserved rpi */
4970 		if (wqe->Command == CMD_ELS_REQUEST64_CR) {
4971 			wqe->OXId = reserved_rpip->RPI;
4972 		} else {
4973 			wqe->CmdSpecific = reserved_rpip->RPI;
4974 		}
4975 	}
4976 
4977 	EMLXS_MPDATA_SYNC(xrip->SGList->dma_handle, 0,
4978 	    xrip->SGList->size, DDI_DMA_SYNC_FORDEV);
4979 
4980 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_CHAINED_SEQ) {
4981 		wqe->CCPE = 1;
4982 		wqe->CCP = pkt->pkt_cmd_fhdr.rsvd;
4983 	}
4984 
4985 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
4986 	case FC_TRAN_CLASS2:
4987 		wqe->Class = CLASS2;
4988 		break;
4989 	case FC_TRAN_CLASS3:
4990 	default:
4991 		wqe->Class = CLASS3;
4992 		break;
4993 	}
4994 	sbp->class = wqe->Class;
4995 	wqe->XRITag = xrip->XRI;
4996 	wqe->RequestTag = xrip->iotag;
4997 	wqe->CQId = (uint16_t)0xffff;  /* default CQ for response */
4998 	return (FC_SUCCESS);
4999 
5000 } /* emlxs_sli4_prep_els_iocb() */
5001 
5002 
5003 /*ARGSUSED*/
5004 static uint32_t
emlxs_sli4_prep_ct_iocb(emlxs_port_t * port,emlxs_buf_t * sbp)5005 emlxs_sli4_prep_ct_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
5006 {
5007 	emlxs_hba_t *hba = HBA;
5008 	fc_packet_t *pkt;
5009 	IOCBQ *iocbq;
5010 	IOCB *iocb;
5011 	emlxs_wqe_t *wqe;
5012 	NODELIST *node = NULL;
5013 	CHANNEL *cp;
5014 	RPIobj_t *rpip;
5015 	XRIobj_t *xrip;
5016 	uint32_t did;
5017 
5018 	pkt = PRIV2PKT(sbp);
5019 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
5020 
5021 	iocbq = &sbp->iocbq;
5022 	wqe = &iocbq->wqe;
5023 	iocb = &iocbq->iocb;
5024 	bzero((void *)wqe, sizeof (emlxs_wqe_t));
5025 	bzero((void *)iocb, sizeof (IOCB));
5026 
5027 	cp = &hba->chan[hba->channel_ct];
5028 
5029 	iocbq->port = (void *) port;
5030 	iocbq->channel = (void *) cp;
5031 
5032 	sbp->bmp = NULL;
5033 	sbp->channel = cp;
5034 
5035 	/* Initalize wqe */
5036 	if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
5037 		/* CT Response */
5038 
5039 		sbp->xrip = 0;
5040 		xrip = emlxs_sli4_register_xri(port, sbp,
5041 		    pkt->pkt_cmd_fhdr.rx_id, did);
5042 
5043 		if (!xrip) {
5044 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
5045 			    "Unable to find XRI. rxid=%x",
5046 			    pkt->pkt_cmd_fhdr.rx_id);
5047 
5048 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
5049 			    IOERR_NO_XRI, 0);
5050 			return (0xff);
5051 		}
5052 
5053 		rpip = xrip->rpip;
5054 
5055 		if (!rpip) {
5056 			/* This means that we had a node registered */
5057 			/* when the unsol request came in but the node */
5058 			/* has since been unregistered. */
5059 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
5060 			    "Unable to find RPI. rxid=%x",
5061 			    pkt->pkt_cmd_fhdr.rx_id);
5062 
5063 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
5064 			    IOERR_INVALID_RPI, 0);
5065 			return (0xff);
5066 		}
5067 
5068 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5069 		    "CT: Prep xri=%d iotag=%d oxid=%x rpi=%d",
5070 		    xrip->XRI, xrip->iotag, xrip->rx_id, rpip->RPI);
5071 
5072 		if (emlxs_sli4_bde_setup(port, sbp)) {
5073 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
5074 			    "Adapter Busy. Unable to setup SGE. did=0x%x", did);
5075 
5076 			return (FC_TRAN_BUSY);
5077 		}
5078 
5079 		if (!(hba->model_info.chip & EMLXS_BE_CHIPS)) {
5080 			wqe->un.XmitSeq.Rsvd0 = 0; /* Word3 now reserved */
5081 		}
5082 
5083 		if (!(hba->sli.sli4.param.PHWQ)) {
5084 			wqe->DBDE = 1; /* Data type for BDE 0 */
5085 		}
5086 
5087 		iocb->ULPCOMMAND = CMD_XMIT_SEQUENCE64_CR;
5088 		wqe->CmdType = WQE_TYPE_GEN;
5089 		wqe->Command = CMD_XMIT_SEQUENCE64_CR;
5090 		wqe->LenLoc = 2;
5091 
5092 		if (((SLI_CT_REQUEST *) pkt->pkt_cmd)->CommandResponse.bits.
5093 		    CmdRsp == (LE_SWAP16(SLI_CT_LOOPBACK))) {
5094 			wqe->un.XmitSeq.xo = 1;
5095 		} else {
5096 			wqe->un.XmitSeq.xo = 0;
5097 		}
5098 
5099 		if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_LAST_SEQ) {
5100 			wqe->un.XmitSeq.ls = 1;
5101 		}
5102 
5103 		if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
5104 			wqe->un.XmitSeq.si = 1;
5105 		}
5106 
5107 		wqe->un.XmitSeq.DFctl  = pkt->pkt_cmd_fhdr.df_ctl;
5108 		wqe->un.XmitSeq.Rctl  = pkt->pkt_cmd_fhdr.r_ctl;
5109 		wqe->un.XmitSeq.Type  = pkt->pkt_cmd_fhdr.type;
5110 		wqe->OXId = xrip->rx_id;
5111 		wqe->XC = 0; /* xri_tag is a new exchange */
5112 		wqe->CmdSpecific = wqe->un.GenReq.Payload.tus.f.bdeSize;
5113 
5114 	} else {
5115 		/* CT Request */
5116 
5117 		node = (emlxs_node_t *)iocbq->node;
5118 		rpip = EMLXS_NODE_TO_RPI(port, node);
5119 
5120 		if (!rpip) {
5121 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
5122 			    "Unable to find rpi. did=0x%x rpi=%d",
5123 			    did, node->nlp_Rpi);
5124 
5125 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
5126 			    IOERR_INVALID_RPI, 0);
5127 			return (0xff);
5128 		}
5129 
5130 		/* Next allocate an Exchange for this command */
5131 		xrip = emlxs_sli4_alloc_xri(port, sbp, rpip,
5132 		    EMLXS_XRI_SOL_CT_TYPE);
5133 
5134 		if (!xrip) {
5135 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
5136 			    "Adapter Busy. Unable to allocate exchange. "
5137 			    "did=0x%x", did);
5138 
5139 			return (FC_TRAN_BUSY);
5140 		}
5141 
5142 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5143 		    "CT: Prep xri=%d iotag=%d oxid=%x rpi=%d",
5144 		    xrip->XRI, xrip->iotag, xrip->rx_id, rpip->RPI);
5145 
5146 		if (emlxs_sli4_bde_setup(port, sbp)) {
5147 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
5148 			    "Adapter Busy. Unable to setup SGE. did=0x%x", did);
5149 
5150 			emlxs_sli4_free_xri(port, sbp, xrip, 1);
5151 			return (FC_TRAN_BUSY);
5152 		}
5153 
5154 		if (!(hba->sli.sli4.param.PHWQ)) {
5155 			wqe->DBDE = 1; /* Data type for BDE 0 */
5156 		}
5157 
5158 		iocb->ULPCOMMAND = CMD_GEN_REQUEST64_CR;
5159 		wqe->CmdType = WQE_TYPE_GEN;
5160 		wqe->Command = CMD_GEN_REQUEST64_CR;
5161 		wqe->un.GenReq.la = 1;
5162 		wqe->un.GenReq.DFctl  = pkt->pkt_cmd_fhdr.df_ctl;
5163 		wqe->un.GenReq.Rctl  = pkt->pkt_cmd_fhdr.r_ctl;
5164 		wqe->un.GenReq.Type  = pkt->pkt_cmd_fhdr.type;
5165 
5166 #ifdef DEBUG_CT
5167 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5168 		    "CT: SGLaddr virt %p phys %p", xrip->SGList->virt,
5169 		    xrip->SGList->phys);
5170 		emlxs_data_dump(port, "CT: SGL", (uint32_t *)xrip->SGList->virt,
5171 		    12, 0);
5172 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5173 		    "CT: CMD virt %p len %d:%d",
5174 		    pkt->pkt_cmd, pkt->pkt_cmdlen, pkt->pkt_rsplen);
5175 		emlxs_data_dump(port, "CT: DATA", (uint32_t *)pkt->pkt_cmd,
5176 		    20, 0);
5177 #endif /* DEBUG_CT */
5178 
5179 #ifdef SFCT_SUPPORT
5180 		/* This allows fct to abort the request */
5181 		if (sbp->fct_cmd) {
5182 			sbp->fct_cmd->cmd_oxid = xrip->XRI;
5183 			sbp->fct_cmd->cmd_rxid = 0xFFFF;
5184 		}
5185 #endif /* SFCT_SUPPORT */
5186 	}
5187 
5188 	/* Setup for rsp */
5189 	iocb->un.genreq64.w5.hcsw.Rctl  = pkt->pkt_cmd_fhdr.r_ctl;
5190 	iocb->un.genreq64.w5.hcsw.Type  = pkt->pkt_cmd_fhdr.type;
5191 	iocb->un.genreq64.w5.hcsw.Dfctl  = pkt->pkt_cmd_fhdr.df_ctl;
5192 	iocb->ULPPU = 1;	/* Wd4 is relative offset */
5193 
5194 	EMLXS_MPDATA_SYNC(xrip->SGList->dma_handle, 0,
5195 	    xrip->SGList->size, DDI_DMA_SYNC_FORDEV);
5196 
5197 	wqe->ContextTag = rpip->RPI;
5198 	wqe->ContextType = WQE_RPI_CONTEXT;
5199 	wqe->XRITag = xrip->XRI;
5200 	wqe->Timer = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
5201 
5202 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_CHAINED_SEQ) {
5203 		wqe->CCPE = 1;
5204 		wqe->CCP = pkt->pkt_cmd_fhdr.rsvd;
5205 	}
5206 
5207 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
5208 	case FC_TRAN_CLASS2:
5209 		wqe->Class = CLASS2;
5210 		break;
5211 	case FC_TRAN_CLASS3:
5212 	default:
5213 		wqe->Class = CLASS3;
5214 		break;
5215 	}
5216 	sbp->class = wqe->Class;
5217 	wqe->RequestTag = xrip->iotag;
5218 	wqe->CQId = (uint16_t)0xffff;  /* default CQ for response */
5219 	return (FC_SUCCESS);
5220 
5221 } /* emlxs_sli4_prep_ct_iocb() */
5222 
5223 
5224 /*ARGSUSED*/
5225 static int
emlxs_sli4_read_eq(emlxs_hba_t * hba,EQ_DESC_t * eq)5226 emlxs_sli4_read_eq(emlxs_hba_t *hba, EQ_DESC_t *eq)
5227 {
5228 	uint32_t *ptr;
5229 	EQE_u eqe;
5230 	int rc = 0;
5231 	off_t offset;
5232 
5233 	mutex_enter(&EMLXS_PORT_LOCK);
5234 
5235 	ptr = eq->addr.virt;
5236 	ptr += eq->host_index;
5237 
5238 	offset = (off_t)((uint64_t)((unsigned long)
5239 	    eq->addr.virt) -
5240 	    (uint64_t)((unsigned long)
5241 	    hba->sli.sli4.slim2.virt));
5242 
5243 	EMLXS_MPDATA_SYNC(eq->addr.dma_handle, offset,
5244 	    4096, DDI_DMA_SYNC_FORKERNEL);
5245 
5246 	eqe.word = *ptr;
5247 	eqe.word = BE_SWAP32(eqe.word);
5248 
5249 	if ((eqe.word & EQE_VALID) == eq->qe_valid) {
5250 		rc = 1;
5251 	}
5252 
5253 	mutex_exit(&EMLXS_PORT_LOCK);
5254 
5255 	return (rc);
5256 
5257 } /* emlxs_sli4_read_eq */
5258 
5259 
5260 static void
emlxs_sli4_poll_intr(emlxs_hba_t * hba)5261 emlxs_sli4_poll_intr(emlxs_hba_t *hba)
5262 {
5263 	int rc = 0;
5264 	int i;
5265 	char arg[] = {0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7};
5266 
5267 	/* Check attention bits once and process if required */
5268 
5269 	for (i = 0; i < hba->intr_count; i++) {
5270 		rc = emlxs_sli4_read_eq(hba, &hba->sli.sli4.eq[i]);
5271 		if (rc == 1) {
5272 			break;
5273 		}
5274 	}
5275 
5276 	if (rc != 1) {
5277 		return;
5278 	}
5279 
5280 	(void) emlxs_sli4_msi_intr((char *)hba,
5281 	    (char *)(unsigned long)arg[i]);
5282 
5283 	return;
5284 
5285 } /* emlxs_sli4_poll_intr() */
5286 
5287 
5288 /*ARGSUSED*/
5289 static void
emlxs_sli4_process_async_event(emlxs_hba_t * hba,CQE_ASYNC_t * cqe)5290 emlxs_sli4_process_async_event(emlxs_hba_t *hba, CQE_ASYNC_t *cqe)
5291 {
5292 	emlxs_port_t *port = &PPORT;
5293 	uint8_t status;
5294 
5295 	/* Save the event tag */
5296 	if (hba->link_event_tag == cqe->un.link.event_tag) {
5297 		HBASTATS.LinkMultiEvent++;
5298 	} else if (hba->link_event_tag + 1 < cqe->un.link.event_tag) {
5299 		HBASTATS.LinkMultiEvent++;
5300 	}
5301 	hba->link_event_tag = cqe->un.link.event_tag;
5302 
5303 	switch (cqe->event_code) {
5304 	case ASYNC_EVENT_CODE_FCOE_LINK_STATE:
5305 		HBASTATS.LinkEvent++;
5306 
5307 		switch (cqe->un.link.link_status) {
5308 		case ASYNC_EVENT_PHYS_LINK_UP:
5309 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5310 			    "Link Async Event: PHYS_LINK_UP. val=%d "
5311 			    "type=%x event=%x",
5312 			    cqe->valid, cqe->event_type, HBASTATS.LinkEvent);
5313 			break;
5314 
5315 		case ASYNC_EVENT_LOGICAL_LINK_UP:
5316 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5317 			    "Link Async Event: LOGICAL_LINK_UP. val=%d "
5318 			    "type=%x event=%x",
5319 			    cqe->valid, cqe->event_type, HBASTATS.LinkEvent);
5320 
5321 			emlxs_sli4_handle_fcoe_link_event(hba, cqe);
5322 			break;
5323 
5324 		case ASYNC_EVENT_PHYS_LINK_DOWN:
5325 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5326 			    "Link Async Event: PHYS_LINK_DOWN. val=%d "
5327 			    "type=%x event=%x",
5328 			    cqe->valid, cqe->event_type, HBASTATS.LinkEvent);
5329 
5330 			emlxs_sli4_handle_fcoe_link_event(hba, cqe);
5331 			break;
5332 
5333 		case ASYNC_EVENT_LOGICAL_LINK_DOWN:
5334 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5335 			    "Link Async Event: LOGICAL_LINK_DOWN. val=%d "
5336 			    "type=%x event=%x",
5337 			    cqe->valid, cqe->event_type, HBASTATS.LinkEvent);
5338 
5339 			emlxs_sli4_handle_fcoe_link_event(hba, cqe);
5340 			break;
5341 		default:
5342 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5343 			    "Link Async Event: Unknown link status=%d event=%x",
5344 			    cqe->un.link.link_status, HBASTATS.LinkEvent);
5345 			break;
5346 		}
5347 		break;
5348 	case ASYNC_EVENT_CODE_FCOE_FIP:
5349 		switch (cqe->un.fcoe.evt_type) {
5350 		case ASYNC_EVENT_NEW_FCF_DISC:
5351 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5352 			    "FIP Async Event: FCF_FOUND %d:%d",
5353 			    cqe->un.fcoe.ref_index, cqe->un.fcoe.fcf_count);
5354 
5355 			(void) emlxs_fcf_found_notify(port,
5356 			    cqe->un.fcoe.ref_index);
5357 			break;
5358 		case ASYNC_EVENT_FCF_TABLE_FULL:
5359 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5360 			    "FIP Async Event: FCFTAB_FULL %d:%d",
5361 			    cqe->un.fcoe.ref_index, cqe->un.fcoe.fcf_count);
5362 
5363 			(void) emlxs_fcf_full_notify(port);
5364 			break;
5365 		case ASYNC_EVENT_FCF_DEAD:
5366 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5367 			    "FIP Async Event: FCF_LOST %d:%d",
5368 			    cqe->un.fcoe.ref_index, cqe->un.fcoe.fcf_count);
5369 
5370 			(void) emlxs_fcf_lost_notify(port,
5371 			    cqe->un.fcoe.ref_index);
5372 			break;
5373 		case ASYNC_EVENT_VIRT_LINK_CLEAR:
5374 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5375 			    "FIP Async Event: CVL %d",
5376 			    cqe->un.fcoe.ref_index);
5377 
5378 			(void) emlxs_fcf_cvl_notify(port,
5379 			    emlxs_sli4_vpi_to_index(hba,
5380 			    cqe->un.fcoe.ref_index));
5381 			break;
5382 
5383 		case ASYNC_EVENT_FCF_MODIFIED:
5384 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5385 			    "FIP Async Event: FCF_CHANGED %d",
5386 			    cqe->un.fcoe.ref_index);
5387 
5388 			(void) emlxs_fcf_changed_notify(port,
5389 			    cqe->un.fcoe.ref_index);
5390 			break;
5391 		default:
5392 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5393 			    "FIP Async Event: Unknown event type=%d",
5394 			    cqe->un.fcoe.evt_type);
5395 			break;
5396 		}
5397 		break;
5398 	case ASYNC_EVENT_CODE_DCBX:
5399 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5400 		    "DCBX Async Event: type=%d. Not supported.",
5401 		    cqe->event_type);
5402 		break;
5403 	case ASYNC_EVENT_CODE_GRP_5:
5404 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5405 		    "Group 5 Async Event: type=%d.", cqe->event_type);
5406 		if (cqe->event_type == ASYNC_EVENT_QOS_SPEED) {
5407 			hba->qos_linkspeed = cqe->un.qos.qos_link_speed;
5408 		}
5409 		break;
5410 	case ASYNC_EVENT_CODE_FC_EVENT:
5411 		switch (cqe->event_type) {
5412 		case ASYNC_EVENT_FC_LINK_ATT:
5413 			HBASTATS.LinkEvent++;
5414 
5415 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5416 			    "FC Async Event: Link Attention. event=%x",
5417 			    HBASTATS.LinkEvent);
5418 
5419 			emlxs_sli4_handle_fc_link_att(hba, cqe);
5420 			break;
5421 		case ASYNC_EVENT_FC_SHARED_LINK_ATT:
5422 			HBASTATS.LinkEvent++;
5423 
5424 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5425 			    "FC Async Event: Shared Link Attention. event=%x",
5426 			    HBASTATS.LinkEvent);
5427 
5428 			emlxs_sli4_handle_fc_link_att(hba, cqe);
5429 			break;
5430 		default:
5431 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5432 			    "FC Async Event: Unknown event. type=%d event=%x",
5433 			    cqe->event_type, HBASTATS.LinkEvent);
5434 		}
5435 		break;
5436 	case ASYNC_EVENT_CODE_PORT:
5437 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5438 		    "SLI Port Async Event: type=%d", cqe->event_type);
5439 
5440 		switch (cqe->event_type) {
5441 		case ASYNC_EVENT_PORT_OTEMP:
5442 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5443 			    "SLI Port Async Event: Temperature limit exceeded");
5444 			cmn_err(CE_WARN,
5445 			    "^%s%d: Temperature limit exceeded. Fibre channel "
5446 			    "controller temperature %u degrees C",
5447 			    DRIVER_NAME, hba->ddiinst,
5448 			    BE_SWAP32(*(uint32_t *)cqe->un.port.link_status));
5449 			break;
5450 
5451 		case ASYNC_EVENT_PORT_NTEMP:
5452 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5453 			    "SLI Port Async Event: Temperature returned to "
5454 			    "normal");
5455 			cmn_err(CE_WARN,
5456 			    "^%s%d: Temperature returned to normal",
5457 			    DRIVER_NAME, hba->ddiinst);
5458 			break;
5459 
5460 		case ASYNC_EVENT_MISCONFIG_PORT:
5461 			*((uint32_t *)cqe->un.port.link_status) =
5462 			    BE_SWAP32(*((uint32_t *)cqe->un.port.link_status));
5463 			status =
5464 			    cqe->un.port.link_status[hba->sli.sli4.link_number];
5465 
5466 			switch (status) {
5467 				case 0 :
5468 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5469 				    "SLI Port Async Event: link%d misconfig "
5470 				    "functional", hba->sli.sli4.link_number);
5471 				break;
5472 
5473 				case 1 :
5474 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5475 				    "SLI Port Async Event: Physical media not "
5476 				    "detected");
5477 				cmn_err(CE_WARN,
5478 				    "^%s%d: Optics faulted/incorrectly "
5479 				    "installed/not installed - Reseat optics, "
5480 				    "if issue not resolved, replace.",
5481 				    DRIVER_NAME, hba->ddiinst);
5482 				break;
5483 
5484 				case 2 :
5485 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5486 				    "SLI Port Async Event: Wrong physical "
5487 				    "media detected");
5488 				cmn_err(CE_WARN,
5489 				    "^%s%d: Optics of two types installed - "
5490 				    "Remove one optic or install matching"
5491 				    "pair of optics.",
5492 				    DRIVER_NAME, hba->ddiinst);
5493 				break;
5494 
5495 				case 3 :
5496 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5497 				    "SLI Port Async Event: Unsupported "
5498 				    "physical media detected");
5499 				cmn_err(CE_WARN,
5500 				    "^%s%d:  Incompatible optics - Replace "
5501 				    "with compatible optics for card to "
5502 				    "function.",
5503 				    DRIVER_NAME, hba->ddiinst);
5504 				break;
5505 
5506 				default :
5507 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5508 				    "SLI Port Async Event: Physical media "
5509 				    "error, status=%x", status);
5510 				cmn_err(CE_WARN,
5511 				    "^%s%d: Misconfigured port: status=0x%x - "
5512 				    "Check optics on card.",
5513 				    DRIVER_NAME, hba->ddiinst, status);
5514 				break;
5515 			}
5516 			break;
5517 		}
5518 
5519 		break;
5520 	case ASYNC_EVENT_CODE_VF:
5521 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5522 		    "VF Async Event: type=%d",
5523 		    cqe->event_type);
5524 		break;
5525 	case ASYNC_EVENT_CODE_MR:
5526 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5527 		    "MR Async Event: type=%d",
5528 		    cqe->event_type);
5529 		break;
5530 	default:
5531 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5532 		    "Unknown Async Event: code=%d type=%d.",
5533 		    cqe->event_code, cqe->event_type);
5534 		break;
5535 	}
5536 
5537 } /* emlxs_sli4_process_async_event() */
5538 
5539 
5540 /*ARGSUSED*/
5541 static void
emlxs_sli4_process_mbox_event(emlxs_hba_t * hba,CQE_MBOX_t * cqe)5542 emlxs_sli4_process_mbox_event(emlxs_hba_t *hba, CQE_MBOX_t *cqe)
5543 {
5544 	emlxs_port_t *port = &PPORT;
5545 	MAILBOX4 *mb;
5546 	MATCHMAP *mbox_bp;
5547 	MATCHMAP *mbox_nonembed;
5548 	MAILBOXQ *mbq = NULL;
5549 	uint32_t size;
5550 	uint32_t *iptr;
5551 	int rc;
5552 	off_t offset;
5553 
5554 	if (cqe->consumed && !cqe->completed) {
5555 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5556 		    "CQ ENTRY: Mbox event. Entry consumed but not completed");
5557 		return;
5558 	}
5559 
5560 	mutex_enter(&EMLXS_PORT_LOCK);
5561 	switch (hba->mbox_queue_flag) {
5562 	case 0:
5563 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_mbox_intr_msg,
5564 		    "CQ ENTRY: Mbox event. No mailbox active.");
5565 
5566 		mutex_exit(&EMLXS_PORT_LOCK);
5567 		return;
5568 
5569 	case MBX_POLL:
5570 
5571 		/* Mark mailbox complete, this should wake up any polling */
5572 		/* threads. This can happen if interrupts are enabled while */
5573 		/* a polled mailbox command is outstanding. If we don't set */
5574 		/* MBQ_COMPLETED here, the polling thread may wait until */
5575 		/* timeout error occurs */
5576 
5577 		mutex_enter(&EMLXS_MBOX_LOCK);
5578 		mbq = (MAILBOXQ *)hba->mbox_mbq;
5579 		if (mbq) {
5580 			port = (emlxs_port_t *)mbq->port;
5581 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5582 			    "CQ ENTRY: Mbox event. Completing Polled command.");
5583 			mbq->flag |= MBQ_COMPLETED;
5584 		}
5585 		mutex_exit(&EMLXS_MBOX_LOCK);
5586 
5587 		mutex_exit(&EMLXS_PORT_LOCK);
5588 		return;
5589 
5590 	case MBX_SLEEP:
5591 	case MBX_NOWAIT:
5592 		/* Check mbox_timer, it acts as a service flag too */
5593 		/* The first to service the mbox queue will clear the timer */
5594 		if (hba->mbox_timer) {
5595 			hba->mbox_timer = 0;
5596 
5597 			mutex_enter(&EMLXS_MBOX_LOCK);
5598 			mbq = (MAILBOXQ *)hba->mbox_mbq;
5599 			mutex_exit(&EMLXS_MBOX_LOCK);
5600 		}
5601 
5602 		if (!mbq) {
5603 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5604 			    "Mailbox event. No service required.");
5605 			mutex_exit(&EMLXS_PORT_LOCK);
5606 			return;
5607 		}
5608 
5609 		mb = (MAILBOX4 *)mbq;
5610 		mutex_exit(&EMLXS_PORT_LOCK);
5611 		break;
5612 
5613 	default:
5614 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_completion_error_msg,
5615 		    "CQ ENTRY: Mbox event. Invalid Mailbox flag (%x).",
5616 		    hba->mbox_queue_flag);
5617 
5618 		mutex_exit(&EMLXS_PORT_LOCK);
5619 		return;
5620 	}
5621 
5622 	/* Set port context */
5623 	port = (emlxs_port_t *)mbq->port;
5624 
5625 	offset = (off_t)((uint64_t)((unsigned long)
5626 	    hba->sli.sli4.mq.addr.virt) -
5627 	    (uint64_t)((unsigned long)
5628 	    hba->sli.sli4.slim2.virt));
5629 
5630 	/* Now that we are the owner, DMA Sync entire MQ if needed */
5631 	EMLXS_MPDATA_SYNC(hba->sli.sli4.mq.addr.dma_handle, offset,
5632 	    4096, DDI_DMA_SYNC_FORDEV);
5633 
5634 	BE_SWAP32_BCOPY((uint8_t *)hba->mbox_mqe, (uint8_t *)mb,
5635 	    MAILBOX_CMD_SLI4_BSIZE);
5636 
5637 	if (mb->mbxCommand != MBX_HEARTBEAT) {
5638 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5639 		    "CQ ENTRY: Mbox event. Mbox complete. status=%x cmd=%x",
5640 		    mb->mbxStatus, mb->mbxCommand);
5641 
5642 		emlxs_data_dump(port, "MBOX CMP", (uint32_t *)hba->mbox_mqe,
5643 		    12, 0);
5644 	}
5645 
5646 	if (mb->mbxCommand == MBX_SLI_CONFIG) {
5647 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5648 		    "Mbox sge_cnt: %d length: %d embed: %d",
5649 		    mb->un.varSLIConfig.be.sge_cnt,
5650 		    mb->un.varSLIConfig.be.payload_length,
5651 		    mb->un.varSLIConfig.be.embedded);
5652 	}
5653 
5654 	/* Now sync the memory buffer if one was used */
5655 	if (mbq->bp) {
5656 		mbox_bp = (MATCHMAP *)mbq->bp;
5657 		EMLXS_MPDATA_SYNC(mbox_bp->dma_handle, 0, mbox_bp->size,
5658 		    DDI_DMA_SYNC_FORKERNEL);
5659 #ifdef FMA_SUPPORT
5660 		if (emlxs_fm_check_dma_handle(hba, mbox_bp->dma_handle)
5661 		    != DDI_FM_OK) {
5662 			EMLXS_MSGF(EMLXS_CONTEXT,
5663 			    &emlxs_invalid_dma_handle_msg,
5664 			    "sli4_process_mbox_event: hdl=%p",
5665 			    mbox_bp->dma_handle);
5666 
5667 			mb->mbxStatus = MBXERR_DMA_ERROR;
5668 }
5669 #endif
5670 	}
5671 
5672 	/* Now sync the memory buffer if one was used */
5673 	if (mbq->nonembed) {
5674 		mbox_nonembed = (MATCHMAP *)mbq->nonembed;
5675 		size = mbox_nonembed->size;
5676 		EMLXS_MPDATA_SYNC(mbox_nonembed->dma_handle, 0, size,
5677 		    DDI_DMA_SYNC_FORKERNEL);
5678 		iptr = (uint32_t *)((uint8_t *)mbox_nonembed->virt);
5679 		BE_SWAP32_BCOPY((uint8_t *)iptr, (uint8_t *)iptr, size);
5680 
5681 #ifdef FMA_SUPPORT
5682 		if (emlxs_fm_check_dma_handle(hba,
5683 		    mbox_nonembed->dma_handle) != DDI_FM_OK) {
5684 			EMLXS_MSGF(EMLXS_CONTEXT,
5685 			    &emlxs_invalid_dma_handle_msg,
5686 			    "sli4_process_mbox_event: hdl=%p",
5687 			    mbox_nonembed->dma_handle);
5688 
5689 			mb->mbxStatus = MBXERR_DMA_ERROR;
5690 		}
5691 #endif
5692 		emlxs_data_dump(port, "EXT AREA", (uint32_t *)iptr, 24, 0);
5693 	}
5694 
5695 	/* Mailbox has been completely received at this point */
5696 
5697 	if (mb->mbxCommand == MBX_HEARTBEAT) {
5698 		hba->heartbeat_active = 0;
5699 		goto done;
5700 	}
5701 
5702 	if (hba->mbox_queue_flag == MBX_SLEEP) {
5703 		if (mb->mbxCommand != MBX_DOWN_LOAD
5704 		    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
5705 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5706 			    "Received.  %s: status=%x Sleep.",
5707 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
5708 			    mb->mbxStatus);
5709 		}
5710 	} else {
5711 		if (mb->mbxCommand != MBX_DOWN_LOAD
5712 		    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
5713 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5714 			    "Completed. %s: status=%x",
5715 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
5716 			    mb->mbxStatus);
5717 		}
5718 	}
5719 
5720 	/* Filter out passthru mailbox */
5721 	if (mbq->flag & MBQ_PASSTHRU) {
5722 		goto done;
5723 	}
5724 
5725 	if (mb->mbxStatus) {
5726 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5727 		    "%s: status=0x%x", emlxs_mb_cmd_xlate(mb->mbxCommand),
5728 		    (uint32_t)mb->mbxStatus);
5729 	}
5730 
5731 	if (mbq->mbox_cmpl) {
5732 		rc = (mbq->mbox_cmpl)(hba, mbq);
5733 
5734 		/* If mbox was retried, return immediately */
5735 		if (rc) {
5736 			return;
5737 		}
5738 	}
5739 
5740 done:
5741 
5742 	/* Clean up the mailbox area */
5743 	emlxs_mb_fini(hba, (MAILBOX *)mb, mb->mbxStatus);
5744 
5745 	/* Attempt to send pending mailboxes */
5746 	mbq = (MAILBOXQ *)emlxs_mb_get(hba);
5747 	if (mbq) {
5748 		/* Attempt to send pending mailboxes */
5749 		rc =  emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
5750 		if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
5751 			emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
5752 		}
5753 	}
5754 	return;
5755 
5756 } /* emlxs_sli4_process_mbox_event() */
5757 
5758 
5759 /*ARGSUSED*/
5760 static void
emlxs_CQE_to_IOCB(emlxs_hba_t * hba,CQE_CmplWQ_t * cqe,emlxs_buf_t * sbp)5761 emlxs_CQE_to_IOCB(emlxs_hba_t *hba, CQE_CmplWQ_t *cqe, emlxs_buf_t *sbp)
5762 {
5763 #ifdef DEBUG_FASTPATH
5764 	emlxs_port_t *port = &PPORT;
5765 #endif /* DEBUG_FASTPATH */
5766 	IOCBQ *iocbq;
5767 	IOCB *iocb;
5768 	uint32_t *iptr;
5769 	fc_packet_t *pkt;
5770 	emlxs_wqe_t *wqe;
5771 
5772 	iocbq = &sbp->iocbq;
5773 	wqe = &iocbq->wqe;
5774 	iocb = &iocbq->iocb;
5775 
5776 #ifdef DEBUG_FASTPATH
5777 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5778 	    "CQE to IOCB: cmd:%x iotag:%x xri:%d", wqe->Command,
5779 	    wqe->RequestTag, wqe->XRITag);
5780 #endif /* DEBUG_FASTPATH */
5781 
5782 	iocb->ULPSTATUS = cqe->Status;
5783 	iocb->un.ulpWord[4] = cqe->Parameter;
5784 	iocb->ULPIOTAG = cqe->RequestTag;
5785 	iocb->ULPCONTEXT = wqe->XRITag;
5786 
5787 	switch (wqe->Command) {
5788 
5789 	case CMD_FCP_ICMND64_CR:
5790 		iocb->ULPCOMMAND = CMD_FCP_ICMND64_CX;
5791 		break;
5792 
5793 	case CMD_FCP_IREAD64_CR:
5794 		iocb->ULPCOMMAND = CMD_FCP_IREAD64_CX;
5795 		iocb->ULPPU = PARM_XFER_CHECK;
5796 		if (iocb->ULPSTATUS ==  IOSTAT_FCP_RSP_ERROR) {
5797 			iocb->un.fcpi64.fcpi_parm =
5798 			    wqe->un.FcpCmd.TotalTransferCount -
5799 			    cqe->CmdSpecific;
5800 		}
5801 		break;
5802 
5803 	case CMD_FCP_IWRITE64_CR:
5804 		iocb->ULPCOMMAND = CMD_FCP_IWRITE64_CX;
5805 		if (iocb->ULPSTATUS ==  IOSTAT_FCP_RSP_ERROR) {
5806 			if (wqe->un.FcpCmd.TotalTransferCount >
5807 			    cqe->CmdSpecific) {
5808 				iocb->un.fcpi64.fcpi_parm =
5809 				    wqe->un.FcpCmd.TotalTransferCount -
5810 				    cqe->CmdSpecific;
5811 			} else {
5812 				iocb->un.fcpi64.fcpi_parm = 0;
5813 			}
5814 		}
5815 		break;
5816 
5817 	case CMD_ELS_REQUEST64_CR:
5818 		iocb->ULPCOMMAND = CMD_ELS_REQUEST64_CX;
5819 		iocb->un.elsreq64.bdl.bdeSize = cqe->CmdSpecific;
5820 		if (iocb->ULPSTATUS == 0) {
5821 			iocb->unsli3.ext_iocb.rsplen = cqe->CmdSpecific;
5822 		}
5823 		if (iocb->ULPSTATUS == IOSTAT_LS_RJT) {
5824 			/* For LS_RJT, the driver populates the rsp buffer */
5825 			pkt = PRIV2PKT(sbp);
5826 			iptr = (uint32_t *)pkt->pkt_resp;
5827 			*iptr++ = ELS_CMD_LS_RJT;
5828 			*iptr = cqe->Parameter;
5829 		}
5830 		break;
5831 
5832 	case CMD_GEN_REQUEST64_CR:
5833 		iocb->ULPCOMMAND = CMD_GEN_REQUEST64_CX;
5834 		iocb->unsli3.ext_iocb.rsplen = cqe->CmdSpecific;
5835 		break;
5836 
5837 	case CMD_XMIT_SEQUENCE64_CR:
5838 		iocb->ULPCOMMAND = CMD_XMIT_SEQUENCE64_CX;
5839 		break;
5840 
5841 	case CMD_ABORT_XRI_CX:
5842 		iocb->ULPCONTEXT = wqe->AbortTag;
5843 		break;
5844 
5845 	case CMD_FCP_TRECEIVE64_CX:
5846 		/* free memory for XRDY */
5847 		if (iocbq->bp) {
5848 			emlxs_mem_buf_free(hba, iocbq->bp);
5849 			iocbq->bp = 0;
5850 		}
5851 
5852 		/*FALLTHROUGH*/
5853 
5854 	case CMD_FCP_TSEND64_CX:
5855 	case CMD_FCP_TRSP64_CX:
5856 	default:
5857 		iocb->ULPCOMMAND = wqe->Command;
5858 
5859 	}
5860 } /* emlxs_CQE_to_IOCB() */
5861 
5862 
5863 /*ARGSUSED*/
5864 static void
emlxs_sli4_hba_flush_chipq(emlxs_hba_t * hba)5865 emlxs_sli4_hba_flush_chipq(emlxs_hba_t *hba)
5866 {
5867 	emlxs_port_t *port = &PPORT;
5868 	CHANNEL *cp;
5869 	emlxs_buf_t *sbp;
5870 	IOCBQ *iocbq;
5871 	uint16_t i;
5872 	uint32_t trigger = 0;
5873 	CQE_CmplWQ_t cqe;
5874 
5875 	mutex_enter(&EMLXS_FCTAB_LOCK);
5876 	for (i = 0; i < hba->max_iotag; i++) {
5877 		sbp = hba->fc_table[i];
5878 		if (sbp == NULL || sbp == STALE_PACKET) {
5879 			continue;
5880 		}
5881 		hba->fc_table[i] = STALE_PACKET;
5882 		hba->io_count--;
5883 		sbp->iotag = 0;
5884 		mutex_exit(&EMLXS_FCTAB_LOCK);
5885 
5886 		cp = sbp->channel;
5887 		bzero(&cqe, sizeof (CQE_CmplWQ_t));
5888 		cqe.RequestTag = i;
5889 		cqe.Status = IOSTAT_LOCAL_REJECT;
5890 		cqe.Parameter = IOERR_SEQUENCE_TIMEOUT;
5891 
5892 		cp->hbaCmplCmd_sbp++;
5893 
5894 #ifdef SFCT_SUPPORT
5895 #ifdef FCT_IO_TRACE
5896 		if (sbp->fct_cmd) {
5897 			emlxs_fct_io_trace(port, sbp->fct_cmd,
5898 			    EMLXS_FCT_IOCB_COMPLETE);
5899 		}
5900 #endif /* FCT_IO_TRACE */
5901 #endif /* SFCT_SUPPORT */
5902 
5903 		if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
5904 			atomic_dec_32(&hba->io_active);
5905 #ifdef NODE_THROTTLE_SUPPORT
5906 			if (sbp->node) {
5907 				atomic_dec_32(&sbp->node->io_active);
5908 			}
5909 #endif /* NODE_THROTTLE_SUPPORT */
5910 		}
5911 
5912 		/* Copy entry to sbp's iocbq */
5913 		iocbq = &sbp->iocbq;
5914 		emlxs_CQE_to_IOCB(hba, &cqe, sbp);
5915 
5916 		iocbq->next = NULL;
5917 
5918 		/* Exchange is no longer busy on-chip, free it */
5919 		emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
5920 
5921 		if (!(sbp->pkt_flags &
5922 		    (PACKET_POLLED | PACKET_ALLOCATED))) {
5923 			/* Add the IOCB to the channel list */
5924 			mutex_enter(&cp->rsp_lock);
5925 			if (cp->rsp_head == NULL) {
5926 				cp->rsp_head = iocbq;
5927 				cp->rsp_tail = iocbq;
5928 			} else {
5929 				cp->rsp_tail->next = iocbq;
5930 				cp->rsp_tail = iocbq;
5931 			}
5932 			mutex_exit(&cp->rsp_lock);
5933 			trigger = 1;
5934 		} else {
5935 			emlxs_proc_channel_event(hba, cp, iocbq);
5936 		}
5937 		mutex_enter(&EMLXS_FCTAB_LOCK);
5938 	}
5939 	mutex_exit(&EMLXS_FCTAB_LOCK);
5940 
5941 	if (trigger) {
5942 		for (i = 0; i < hba->chan_count; i++) {
5943 			cp = &hba->chan[i];
5944 			if (cp->rsp_head != NULL) {
5945 				emlxs_thread_trigger2(&cp->intr_thread,
5946 				    emlxs_proc_channel, cp);
5947 			}
5948 		}
5949 	}
5950 
5951 } /* emlxs_sli4_hba_flush_chipq() */
5952 
5953 
5954 /*ARGSUSED*/
5955 static void
emlxs_sli4_process_oor_wqe_cmpl(emlxs_hba_t * hba,CQ_DESC_t * cq,CQE_CmplWQ_t * cqe)5956 emlxs_sli4_process_oor_wqe_cmpl(emlxs_hba_t *hba,
5957     CQ_DESC_t *cq, CQE_CmplWQ_t *cqe)
5958 {
5959 	emlxs_port_t *port = &PPORT;
5960 	CHANNEL *cp;
5961 	uint16_t request_tag;
5962 
5963 	request_tag = cqe->RequestTag;
5964 
5965 	/* 1 to 1 mapping between CQ and channel */
5966 	cp = cq->channelp;
5967 
5968 	cp->hbaCmplCmd++;
5969 
5970 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5971 	    "CQ ENTRY: OOR Cmpl: iotag=%d", request_tag);
5972 
5973 	emlxs_data_dump(port, "CQE", (uint32_t *)cqe, 4, 0);
5974 
5975 } /* emlxs_sli4_process_oor_wqe_cmpl() */
5976 
5977 
5978 /*ARGSUSED*/
5979 static void
emlxs_sli4_process_wqe_cmpl(emlxs_hba_t * hba,CQ_DESC_t * cq,CQE_CmplWQ_t * cqe)5980 emlxs_sli4_process_wqe_cmpl(emlxs_hba_t *hba, CQ_DESC_t *cq, CQE_CmplWQ_t *cqe)
5981 {
5982 	emlxs_port_t *port = &PPORT;
5983 	CHANNEL *cp;
5984 	emlxs_buf_t *sbp;
5985 	IOCBQ *iocbq;
5986 	uint16_t request_tag;
5987 #ifdef SFCT_SUPPORT
5988 #ifdef FCT_IO_TRACE
5989 	fct_cmd_t *fct_cmd;
5990 	emlxs_buf_t *cmd_sbp;
5991 #endif /* FCT_IO_TRACE */
5992 #endif /* SFCT_SUPPORT */
5993 
5994 	request_tag = cqe->RequestTag;
5995 
5996 	/* 1 to 1 mapping between CQ and channel */
5997 	cp = cq->channelp;
5998 
5999 	mutex_enter(&EMLXS_FCTAB_LOCK);
6000 	sbp = hba->fc_table[request_tag];
6001 
6002 	if (!sbp) {
6003 		cp->hbaCmplCmd++;
6004 		mutex_exit(&EMLXS_FCTAB_LOCK);
6005 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6006 		    "CQ ENTRY: NULL sbp. iotag=%d. Dropping...",
6007 		    request_tag);
6008 		return;
6009 	}
6010 
6011 	if (sbp == STALE_PACKET) {
6012 		cp->hbaCmplCmd_sbp++;
6013 		mutex_exit(&EMLXS_FCTAB_LOCK);
6014 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6015 		    "CQ ENTRY: Stale sbp. iotag=%d. Dropping...", request_tag);
6016 		return;
6017 	}
6018 
6019 	if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
6020 		atomic_add_32(&hba->io_active, -1);
6021 #ifdef NODE_THROTTLE_SUPPORT
6022 		if (sbp->node) {
6023 			atomic_add_32(&sbp->node->io_active, -1);
6024 		}
6025 #endif /* NODE_THROTTLE_SUPPORT */
6026 	}
6027 
6028 	if (!(sbp->xrip)) {
6029 		cp->hbaCmplCmd++;
6030 		mutex_exit(&EMLXS_FCTAB_LOCK);
6031 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6032 		    "CQ ENTRY: NULL sbp xrip %p. iotag=%d. Dropping...",
6033 		    sbp, request_tag);
6034 		return;
6035 	}
6036 
6037 #ifdef DEBUG_FASTPATH
6038 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6039 	    "CQ ENTRY: process wqe compl");
6040 #endif /* DEBUG_FASTPATH */
6041 	cp->hbaCmplCmd_sbp++;
6042 
6043 	/* Copy entry to sbp's iocbq */
6044 	iocbq = &sbp->iocbq;
6045 	emlxs_CQE_to_IOCB(hba, cqe, sbp);
6046 
6047 	iocbq->next = NULL;
6048 
6049 	if (cqe->XB) {
6050 		/* Mark exchange as ABORT in progress */
6051 		sbp->xrip->flag &= ~EMLXS_XRI_PENDING_IO;
6052 		sbp->xrip->flag |= EMLXS_XRI_BUSY;
6053 
6054 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6055 		    "CQ ENTRY: XRI BUSY: iotag=%d xri=%d", request_tag,
6056 		    sbp->xrip->XRI);
6057 
6058 		emlxs_sli4_free_xri(port, sbp, 0, 0);
6059 	} else {
6060 		/* Exchange is no longer busy on-chip, free it */
6061 		emlxs_sli4_free_xri(port, sbp, sbp->xrip, 0);
6062 	}
6063 
6064 	mutex_exit(&EMLXS_FCTAB_LOCK);
6065 
6066 #ifdef SFCT_SUPPORT
6067 #ifdef FCT_IO_TRACE
6068 	fct_cmd = sbp->fct_cmd;
6069 	if (fct_cmd) {
6070 		cmd_sbp = (emlxs_buf_t *)fct_cmd->cmd_fca_private;
6071 		mutex_enter(&cmd_sbp->fct_mtx);
6072 		EMLXS_FCT_STATE_CHG(fct_cmd, cmd_sbp, EMLXS_FCT_IOCB_COMPLETE);
6073 		mutex_exit(&cmd_sbp->fct_mtx);
6074 	}
6075 #endif /* FCT_IO_TRACE */
6076 #endif /* SFCT_SUPPORT */
6077 
6078 	/*
6079 	 * If this is NOT a polled command completion
6080 	 * or a driver allocated pkt, then defer pkt
6081 	 * completion.
6082 	 */
6083 	if (!(sbp->pkt_flags &
6084 	    (PACKET_POLLED | PACKET_ALLOCATED))) {
6085 		/* Add the IOCB to the channel list */
6086 		mutex_enter(&cp->rsp_lock);
6087 		if (cp->rsp_head == NULL) {
6088 			cp->rsp_head = iocbq;
6089 			cp->rsp_tail = iocbq;
6090 		} else {
6091 			cp->rsp_tail->next = iocbq;
6092 			cp->rsp_tail = iocbq;
6093 		}
6094 		mutex_exit(&cp->rsp_lock);
6095 
6096 		/* Delay triggering thread till end of ISR */
6097 		cp->chan_flag |= EMLXS_NEEDS_TRIGGER;
6098 	} else {
6099 		emlxs_proc_channel_event(hba, cp, iocbq);
6100 	}
6101 
6102 } /* emlxs_sli4_process_wqe_cmpl() */
6103 
6104 
6105 /*ARGSUSED*/
6106 static void
emlxs_sli4_process_release_wqe(emlxs_hba_t * hba,CQ_DESC_t * cq,CQE_RelWQ_t * cqe)6107 emlxs_sli4_process_release_wqe(emlxs_hba_t *hba, CQ_DESC_t *cq,
6108     CQE_RelWQ_t *cqe)
6109 {
6110 	emlxs_port_t *port = &PPORT;
6111 	WQ_DESC_t *wq;
6112 	CHANNEL *cp;
6113 	uint32_t i;
6114 	uint16_t wqi;
6115 
6116 	wqi = emlxs_sli4_wqid_to_index(hba, (uint16_t)cqe->WQid);
6117 
6118 	/* Verify WQ index */
6119 	if (wqi == 0xffff) {
6120 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6121 		    "CQ ENTRY: Invalid WQid:%d. Dropping...",
6122 		    cqe->WQid);
6123 		return;
6124 	}
6125 
6126 	wq = &hba->sli.sli4.wq[wqi];
6127 
6128 #ifdef DEBUG_FASTPATH
6129 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6130 	    "CQ ENTRY: process release wqe: old %d new %d", wq->port_index,
6131 	    cqe->WQindex);
6132 #endif /* DEBUG_FASTPATH */
6133 
6134 	wq->port_index = cqe->WQindex;
6135 
6136 	/* Cmd ring may be available. Try sending more iocbs */
6137 	for (i = 0; i < hba->chan_count; i++) {
6138 		cp = &hba->chan[i];
6139 		if (wq == (WQ_DESC_t *)cp->iopath) {
6140 			emlxs_sli4_issue_iocb_cmd(hba, cp, 0);
6141 		}
6142 	}
6143 
6144 } /* emlxs_sli4_process_release_wqe() */
6145 
6146 
6147 /*ARGSUSED*/
6148 emlxs_iocbq_t *
emlxs_sli4_rxq_get(emlxs_hba_t * hba,fc_frame_hdr_t * fchdr)6149 emlxs_sli4_rxq_get(emlxs_hba_t *hba, fc_frame_hdr_t *fchdr)
6150 {
6151 	emlxs_queue_t *q;
6152 	emlxs_iocbq_t *iocbq;
6153 	emlxs_iocbq_t *prev;
6154 	fc_frame_hdr_t *fchdr2;
6155 	RXQ_DESC_t *rxq;
6156 
6157 	switch (fchdr->type) {
6158 	case 1: /* ELS */
6159 		rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_ELS];
6160 		break;
6161 	case 0x20: /* CT */
6162 		rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_CT];
6163 		break;
6164 	default:
6165 		return (NULL);
6166 	}
6167 
6168 	mutex_enter(&rxq->lock);
6169 
6170 	q = &rxq->active;
6171 	iocbq  = (emlxs_iocbq_t *)q->q_first;
6172 	prev = NULL;
6173 
6174 	while (iocbq) {
6175 
6176 		fchdr2 = (fc_frame_hdr_t *)iocbq->iocb.un.ulpWord;
6177 
6178 		if ((fchdr2->s_id == fchdr->s_id) &&
6179 		    (fchdr2->ox_id == fchdr->ox_id) &&
6180 		    (fchdr2->seq_id == fchdr->seq_id)) {
6181 			/* Remove iocbq */
6182 			if (prev) {
6183 				prev->next = iocbq->next;
6184 			}
6185 			if (q->q_first == (uint8_t *)iocbq) {
6186 				q->q_first = (uint8_t *)iocbq->next;
6187 			}
6188 			if (q->q_last == (uint8_t *)iocbq) {
6189 				q->q_last = (uint8_t *)prev;
6190 			}
6191 			q->q_cnt--;
6192 
6193 			break;
6194 		}
6195 
6196 		prev  = iocbq;
6197 		iocbq = iocbq->next;
6198 	}
6199 
6200 	mutex_exit(&rxq->lock);
6201 
6202 	return (iocbq);
6203 
6204 } /* emlxs_sli4_rxq_get() */
6205 
6206 
6207 /*ARGSUSED*/
6208 void
emlxs_sli4_rxq_put(emlxs_hba_t * hba,emlxs_iocbq_t * iocbq)6209 emlxs_sli4_rxq_put(emlxs_hba_t *hba, emlxs_iocbq_t *iocbq)
6210 {
6211 	emlxs_queue_t *q;
6212 	fc_frame_hdr_t *fchdr;
6213 	RXQ_DESC_t *rxq;
6214 
6215 	fchdr = (fc_frame_hdr_t *)iocbq->iocb.RXFCHDR;
6216 
6217 	switch (fchdr->type) {
6218 	case 1: /* ELS */
6219 		rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_ELS];
6220 		break;
6221 	case 0x20: /* CT */
6222 		rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_CT];
6223 		break;
6224 	default:
6225 		return;
6226 	}
6227 
6228 	mutex_enter(&rxq->lock);
6229 
6230 	q = &rxq->active;
6231 
6232 	if (q->q_last) {
6233 		((emlxs_iocbq_t *)q->q_last)->next = iocbq;
6234 		q->q_cnt++;
6235 	} else {
6236 		q->q_first = (uint8_t *)iocbq;
6237 		q->q_cnt = 1;
6238 	}
6239 
6240 	q->q_last = (uint8_t *)iocbq;
6241 	iocbq->next = NULL;
6242 
6243 	mutex_exit(&rxq->lock);
6244 
6245 	return;
6246 
6247 } /* emlxs_sli4_rxq_put() */
6248 
6249 
6250 static void
emlxs_sli4_rq_post(emlxs_port_t * port,uint16_t rqid)6251 emlxs_sli4_rq_post(emlxs_port_t *port, uint16_t rqid)
6252 {
6253 	emlxs_hba_t *hba = HBA;
6254 
6255 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6256 	    "RQ POST: rqid=%d count=1", rqid);
6257 
6258 	/* Ring the RQ doorbell once to repost the RQ buffer */
6259 
6260 	emlxs_sli4_write_rqdb(hba, rqid, 1);
6261 
6262 } /* emlxs_sli4_rq_post() */
6263 
6264 
6265 /*ARGSUSED*/
6266 static void
emlxs_sli4_process_unsol_rcv(emlxs_hba_t * hba,CQ_DESC_t * cq,CQE_UnsolRcv_t * cqe)6267 emlxs_sli4_process_unsol_rcv(emlxs_hba_t *hba, CQ_DESC_t *cq,
6268     CQE_UnsolRcv_t *cqe)
6269 {
6270 	emlxs_port_t *port = &PPORT;
6271 	emlxs_port_t *vport;
6272 	RQ_DESC_t *hdr_rq;
6273 	RQ_DESC_t *data_rq;
6274 	MBUF_INFO *hdr_mp;
6275 	MBUF_INFO *data_mp;
6276 	MATCHMAP *seq_mp;
6277 	uint32_t *data;
6278 	fc_frame_hdr_t fchdr;
6279 	uint16_t hdr_rqi;
6280 	uint32_t host_index;
6281 	emlxs_iocbq_t *iocbq = NULL;
6282 	emlxs_iocb_t *iocb;
6283 	emlxs_node_t *node = NULL;
6284 	uint32_t i;
6285 	uint32_t seq_len;
6286 	uint32_t seq_cnt;
6287 	uint32_t buf_type;
6288 	char label[32];
6289 	emlxs_wqe_t *wqe;
6290 	CHANNEL *cp;
6291 	XRIobj_t *xrip;
6292 	RPIobj_t *rpip = NULL;
6293 	uint32_t	cmd;
6294 	uint32_t posted = 0;
6295 	uint32_t abort = 1;
6296 	off_t offset;
6297 	uint32_t status;
6298 	uint32_t data_size;
6299 	uint16_t rqid;
6300 	uint32_t hdr_size;
6301 	fc_packet_t *pkt;
6302 	emlxs_buf_t *sbp;
6303 
6304 	if (cqe->Code == CQE_TYPE_UNSOL_RCV_V1) {
6305 		CQE_UnsolRcvV1_t *cqeV1 = (CQE_UnsolRcvV1_t *)cqe;
6306 
6307 		status	  = cqeV1->Status;
6308 		data_size = cqeV1->data_size;
6309 		rqid	  = cqeV1->RQid;
6310 		hdr_size  = cqeV1->hdr_size;
6311 	} else {
6312 		status	  = cqe->Status;
6313 		data_size = cqe->data_size;
6314 		rqid	  = cqe->RQid;
6315 		hdr_size  = cqe->hdr_size;
6316 	}
6317 
6318 	/* Validate the CQE */
6319 
6320 	/* Check status */
6321 	switch (status) {
6322 	case RQ_STATUS_SUCCESS: /* 0x10 */
6323 		break;
6324 
6325 	case RQ_STATUS_BUFLEN_EXCEEDED:  /* 0x11 */
6326 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6327 		    "CQ ENTRY: Unsol Rcv: Payload truncated.");
6328 		break;
6329 
6330 	case RQ_STATUS_NEED_BUFFER: /* 0x12 */
6331 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6332 		    "CQ ENTRY: Unsol Rcv: Payload buffer needed.");
6333 		return;
6334 
6335 	case RQ_STATUS_FRAME_DISCARDED:  /* 0x13 */
6336 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6337 		    "CQ ENTRY: Unsol Rcv: Payload buffer discarded.");
6338 		return;
6339 
6340 	default:
6341 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6342 		    "CQ ENTRY: Unsol Rcv: Unknown status=%x.",
6343 		    status);
6344 		break;
6345 	}
6346 
6347 	/* Make sure there is a frame header */
6348 	if (hdr_size < sizeof (fc_frame_hdr_t)) {
6349 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6350 		    "CQ ENTRY: Unsol Rcv: FC header too small. Dropping...");
6351 		return;
6352 	}
6353 
6354 	hdr_rqi = emlxs_sli4_rqid_to_index(hba, rqid);
6355 
6356 	/* Verify RQ index */
6357 	if (hdr_rqi == 0xffff) {
6358 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6359 		    "CQ ENTRY: Unsol Rcv: Invalid RQID:%d. Dropping...",
6360 		    rqid);
6361 		return;
6362 	}
6363 
6364 	hdr_rq  = &hba->sli.sli4.rq[hdr_rqi];
6365 	data_rq = &hba->sli.sli4.rq[hdr_rqi + 1];
6366 
6367 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6368 	    "CQ ENTRY: Unsol Rcv:%x rqid=%d,%d index=%d status=%x "
6369 	    "hdr_size=%d data_size=%d",
6370 	    cqe->Code, rqid, hdr_rqi, hdr_rq->host_index, status, hdr_size,
6371 	    data_size);
6372 
6373 	hdr_rq->num_proc++;
6374 
6375 	/* Update host index */
6376 	mutex_enter(&hba->sli.sli4.rq[hdr_rqi].lock);
6377 	host_index = hdr_rq->host_index;
6378 	hdr_rq->host_index++;
6379 
6380 	if (hdr_rq->host_index >= hdr_rq->max_index) {
6381 		hdr_rq->host_index = 0;
6382 	}
6383 	data_rq->host_index = hdr_rq->host_index;
6384 	mutex_exit(&hba->sli.sli4.rq[hdr_rqi].lock);
6385 
6386 	/* Get the next header rqb */
6387 	hdr_mp  = &hdr_rq->rqb[host_index];
6388 
6389 	offset = (off_t)((uint64_t)((unsigned long)hdr_mp->virt) -
6390 	    (uint64_t)((unsigned long)hba->sli.sli4.slim2.virt));
6391 
6392 	EMLXS_MPDATA_SYNC(hdr_mp->dma_handle, offset,
6393 	    sizeof (fc_frame_hdr_t), DDI_DMA_SYNC_FORKERNEL);
6394 
6395 	LE_SWAP32_BCOPY(hdr_mp->virt, (uint8_t *)&fchdr,
6396 	    sizeof (fc_frame_hdr_t));
6397 
6398 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6399 	    "RQ HDR[%d]: rctl:%x type:%x "
6400 	    "sid:%x did:%x oxid:%x rxid:%x",
6401 	    host_index, fchdr.r_ctl, fchdr.type,
6402 	    fchdr.s_id,  fchdr.d_id, fchdr.ox_id, fchdr.rx_id);
6403 
6404 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6405 	    "RQ HDR[%d]: fctl:%x seq_id:%x seq_cnt:%x df_ctl:%x ro:%x",
6406 	    host_index, fchdr.f_ctl, fchdr.seq_id, fchdr.seq_cnt,
6407 	    fchdr.df_ctl, fchdr.ro);
6408 
6409 	/* Verify fc header type */
6410 	switch (fchdr.type) {
6411 	case 0: /* BLS */
6412 		if (fchdr.r_ctl != 0x81) {
6413 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6414 			    "RQ ENTRY: Unexpected FC rctl (0x%x) "
6415 			    "received. Dropping...",
6416 			    fchdr.r_ctl);
6417 
6418 			goto done;
6419 		}
6420 
6421 		/* Make sure there is no payload */
6422 		if (data_size != 0) {
6423 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6424 			    "RQ ENTRY: ABTS payload provided. Dropping...");
6425 
6426 			goto done;
6427 		}
6428 
6429 		buf_type = 0xFFFFFFFF;
6430 		(void) strlcpy(label, "ABTS", sizeof (label));
6431 		cp = &hba->chan[hba->channel_els];
6432 		break;
6433 
6434 	case 0x01: /* ELS */
6435 		/* Make sure there is a payload */
6436 		if (data_size == 0) {
6437 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6438 			    "RQ ENTRY: Unsol Rcv: No ELS payload provided. "
6439 			    "Dropping...");
6440 
6441 			goto done;
6442 		}
6443 
6444 		buf_type = MEM_ELSBUF;
6445 		(void) strlcpy(label, "Unsol ELS", sizeof (label));
6446 		cp = &hba->chan[hba->channel_els];
6447 		break;
6448 
6449 	case 0x20: /* CT */
6450 		/* Make sure there is a payload */
6451 		if (data_size == 0) {
6452 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6453 			    "RQ ENTRY: Unsol Rcv: No CT payload provided. "
6454 			    "Dropping...");
6455 
6456 			goto done;
6457 		}
6458 
6459 		buf_type = MEM_CTBUF;
6460 		(void) strlcpy(label, "Unsol CT", sizeof (label));
6461 		cp = &hba->chan[hba->channel_ct];
6462 		break;
6463 
6464 	case 0x08: /* FCT */
6465 		/* Make sure there is a payload */
6466 		if (data_size == 0) {
6467 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6468 			    "RQ ENTRY: Unsol Rcv: No FCP payload provided. "
6469 			    "Dropping...");
6470 
6471 			goto done;
6472 		}
6473 
6474 		buf_type = MEM_FCTBUF;
6475 		(void) strlcpy(label, "Unsol FCT", sizeof (label));
6476 		cp = &hba->chan[hba->CHANNEL_FCT];
6477 		break;
6478 
6479 	default:
6480 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6481 		    "RQ ENTRY: Unexpected FC type (0x%x) received. Dropping...",
6482 		    fchdr.type);
6483 
6484 		goto done;
6485 	}
6486 	/* Fc Header is valid */
6487 
6488 	/* Check if this is an active sequence */
6489 	iocbq = emlxs_sli4_rxq_get(hba, &fchdr);
6490 
6491 	if (!iocbq) {
6492 		if (fchdr.type != 0) {
6493 			if (!(fchdr.f_ctl & F_CTL_FIRST_SEQ)) {
6494 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6495 				    "RQ ENTRY: %s: First of sequence not"
6496 				    " set.  Dropping...",
6497 				    label);
6498 
6499 				goto done;
6500 			}
6501 		}
6502 
6503 		if ((fchdr.type != 0) && (fchdr.seq_cnt != 0)) {
6504 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6505 			    "RQ ENTRY: %s: Sequence count not zero (%d).  "
6506 			    "Dropping...",
6507 			    label, fchdr.seq_cnt);
6508 
6509 			goto done;
6510 		}
6511 
6512 		/* Find vport */
6513 		for (i = 0; i < MAX_VPORTS; i++) {
6514 			vport = &VPORT(i);
6515 
6516 			if (vport->did == fchdr.d_id) {
6517 				port = vport;
6518 				break;
6519 			}
6520 		}
6521 
6522 		if (i == MAX_VPORTS) {
6523 			/* Allow unsol FLOGI & PLOGI for P2P */
6524 			if ((fchdr.type != 1 /* ELS*/) ||
6525 			    ((fchdr.d_id != FABRIC_DID) &&
6526 			    !(hba->flag & FC_PT_TO_PT))) {
6527 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6528 				    "RQ ENTRY: %s: Invalid did=%x. Dropping...",
6529 				    label, fchdr.d_id);
6530 
6531 				goto done;
6532 			}
6533 		}
6534 
6535 		/* Allocate an IOCBQ */
6536 		iocbq = (emlxs_iocbq_t *)emlxs_mem_get(hba, MEM_IOCB);
6537 
6538 		if (!iocbq) {
6539 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6540 			    "RQ ENTRY: %s: Out of IOCB "
6541 			    "resources.  Dropping...",
6542 			    label);
6543 
6544 			goto done;
6545 		}
6546 
6547 		seq_mp = NULL;
6548 		if (fchdr.type != 0) {
6549 			/* Allocate a buffer */
6550 			seq_mp = (MATCHMAP *)emlxs_mem_get(hba, buf_type);
6551 
6552 			if (!seq_mp) {
6553 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6554 				    "RQ ENTRY: %s: Out of buffer "
6555 				    "resources.  Dropping...",
6556 				    label);
6557 
6558 				goto done;
6559 			}
6560 
6561 			iocbq->bp = (uint8_t *)seq_mp;
6562 		}
6563 
6564 		node = (void *)emlxs_node_find_did(port, fchdr.s_id, 1);
6565 		if (node == NULL) {
6566 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6567 			    "RQ ENTRY: %s: Node not found. sid=%x",
6568 			    label, fchdr.s_id);
6569 		}
6570 
6571 		/* Initialize the iocbq */
6572 		iocbq->port = port;
6573 		iocbq->channel = cp;
6574 		iocbq->node = node;
6575 
6576 		iocb = &iocbq->iocb;
6577 		iocb->RXSEQCNT = 0;
6578 		iocb->RXSEQLEN = 0;
6579 
6580 		seq_len = 0;
6581 		seq_cnt = 0;
6582 
6583 	} else {
6584 
6585 		iocb = &iocbq->iocb;
6586 		port = iocbq->port;
6587 		node = (emlxs_node_t *)iocbq->node;
6588 
6589 		seq_mp = (MATCHMAP *)iocbq->bp;
6590 		seq_len = iocb->RXSEQLEN;
6591 		seq_cnt = iocb->RXSEQCNT;
6592 
6593 		/* Check sequence order */
6594 		if (fchdr.seq_cnt != seq_cnt) {
6595 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6596 			    "RQ ENTRY: %s: Out of order frame received "
6597 			    "(%d != %d).  Dropping...",
6598 			    label, fchdr.seq_cnt, seq_cnt);
6599 
6600 			goto done;
6601 		}
6602 	}
6603 
6604 	/* We now have an iocbq */
6605 
6606 	if (!port->vpip->vfip) {
6607 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6608 		    "RQ ENTRY: %s: No fabric connection. "
6609 		    "Dropping...",
6610 		    label);
6611 
6612 		goto done;
6613 	}
6614 
6615 	/* Save the frame data to our seq buffer */
6616 	if (data_size && seq_mp) {
6617 		/* Get the next data rqb */
6618 		data_mp = &data_rq->rqb[host_index];
6619 
6620 		offset = (off_t)((uint64_t)((unsigned long)
6621 		    data_mp->virt) -
6622 		    (uint64_t)((unsigned long)
6623 		    hba->sli.sli4.slim2.virt));
6624 
6625 		EMLXS_MPDATA_SYNC(data_mp->dma_handle, offset,
6626 		    data_size, DDI_DMA_SYNC_FORKERNEL);
6627 
6628 		data = (uint32_t *)data_mp->virt;
6629 
6630 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6631 		    "RQ DAT[%d]: %08x %08x %08x %08x %08x %08x ...",
6632 		    host_index, data[0], data[1], data[2], data[3],
6633 		    data[4], data[5]);
6634 
6635 		/* Check sequence length */
6636 		if ((seq_len + data_size) > seq_mp->size) {
6637 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6638 			    "RQ ENTRY: %s: Sequence buffer overflow. "
6639 			    "(%d > %d). Dropping...",
6640 			    label, (seq_len + data_size), seq_mp->size);
6641 
6642 			goto done;
6643 		}
6644 
6645 		/* Copy data to local receive buffer */
6646 		bcopy((uint8_t *)data, ((uint8_t *)seq_mp->virt +
6647 		    seq_len), data_size);
6648 
6649 		seq_len += data_size;
6650 	}
6651 
6652 	/* If this is not the last frame of sequence, queue it. */
6653 	if (!(fchdr.f_ctl & F_CTL_END_SEQ)) {
6654 		/* Save sequence header */
6655 		if (seq_cnt == 0) {
6656 			bcopy((uint8_t *)&fchdr, (uint8_t *)iocb->RXFCHDR,
6657 			    sizeof (fc_frame_hdr_t));
6658 		}
6659 
6660 		/* Update sequence info in iocb */
6661 		iocb->RXSEQCNT = seq_cnt + 1;
6662 		iocb->RXSEQLEN = seq_len;
6663 
6664 		/* Queue iocbq for next frame */
6665 		emlxs_sli4_rxq_put(hba, iocbq);
6666 
6667 		/* Don't free resources */
6668 		iocbq = NULL;
6669 
6670 		/* No need to abort */
6671 		abort = 0;
6672 
6673 		goto done;
6674 	}
6675 
6676 	emlxs_sli4_rq_post(port, hdr_rq->qid);
6677 	posted = 1;
6678 
6679 	/* End of sequence found. Process request now. */
6680 
6681 	if (seq_cnt > 0) {
6682 		/* Retrieve first frame of sequence */
6683 		bcopy((uint8_t *)iocb->RXFCHDR, (uint8_t *)&fchdr,
6684 		    sizeof (fc_frame_hdr_t));
6685 
6686 		bzero((uint8_t *)iocb, sizeof (emlxs_iocb_t));
6687 	}
6688 
6689 	/* Build rcv iocb and process it */
6690 	switch (fchdr.type) {
6691 	case 0: /* BLS */
6692 
6693 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6694 		    "RQ ENTRY: %s: oxid:%x rxid %x sid:%x. Sending BLS ACC...",
6695 		    label, fchdr.ox_id, fchdr.rx_id, fchdr.s_id);
6696 
6697 		/* Try to send abort response */
6698 		if (!(pkt = emlxs_pkt_alloc(port, 0, 0, 0, KM_NOSLEEP))) {
6699 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6700 			    "RQ ENTRY: %s: Unable to alloc pkt. Dropping...",
6701 			    label);
6702 			goto done;
6703 		}
6704 
6705 		/* Setup sbp / iocb for driver initiated cmd */
6706 		sbp = PKT2PRIV(pkt);
6707 
6708 		/* Free the temporary iocbq */
6709 		emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq);
6710 
6711 		iocbq = (emlxs_iocbq_t *)&sbp->iocbq;
6712 		iocbq->port = port;
6713 		iocbq->channel = cp;
6714 		iocbq->node = node;
6715 
6716 		sbp->pkt_flags &= ~PACKET_ULP_OWNED;
6717 
6718 		if (node) {
6719 			sbp->node = node;
6720 			sbp->did  = node->nlp_DID;
6721 		}
6722 
6723 		iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
6724 
6725 		/* BLS ACC Response */
6726 		wqe = &iocbq->wqe;
6727 		bzero((void *)wqe, sizeof (emlxs_wqe_t));
6728 
6729 		iocbq->iocb.ULPCOMMAND = CMD_XMIT_BLS_RSP64_CX;
6730 		wqe->Command = CMD_XMIT_BLS_RSP64_CX;
6731 		wqe->CmdType = WQE_TYPE_GEN;
6732 
6733 		wqe->un.BlsRsp.Payload0 = 0x80;
6734 		wqe->un.BlsRsp.Payload1 = fchdr.seq_id;
6735 
6736 		wqe->un.BlsRsp.OXId = fchdr.ox_id;
6737 		wqe->un.BlsRsp.RXId = fchdr.rx_id;
6738 
6739 		wqe->un.BlsRsp.SeqCntLow = 0;
6740 		wqe->un.BlsRsp.SeqCntHigh = 0xFFFF;
6741 
6742 		wqe->un.BlsRsp.XO = ((fchdr.f_ctl & F_CTL_XCHG_CONTEXT)? 1:0);
6743 		wqe->un.BlsRsp.AR = 0;
6744 
6745 		rpip = EMLXS_NODE_TO_RPI(port, node);
6746 
6747 		if (rpip) {
6748 			wqe->ContextType = WQE_RPI_CONTEXT;
6749 			wqe->ContextTag = rpip->RPI;
6750 		} else {
6751 			wqe->ContextType = WQE_VPI_CONTEXT;
6752 			wqe->ContextTag = port->vpip->VPI;
6753 
6754 			rpip = emlxs_rpi_reserve_notify(port, fchdr.s_id, 0);
6755 
6756 			if (!rpip) {
6757 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6758 				    "RQ ENTRY: %s: Unable to alloc "
6759 				    "reserved RPI. Dropping...",
6760 				    label);
6761 
6762 				goto done;
6763 			}
6764 
6765 			/* Store the reserved rpi */
6766 			wqe->CmdSpecific = rpip->RPI;
6767 
6768 			wqe->un.BlsRsp.RemoteId = fchdr.s_id;
6769 			wqe->un.BlsRsp.LocalId = fchdr.d_id;
6770 		}
6771 
6772 		if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
6773 			wqe->CCPE = 1;
6774 			wqe->CCP = fchdr.rsvd;
6775 		}
6776 
6777 		/* Allocate an exchange for this command */
6778 		xrip = emlxs_sli4_alloc_xri(port, sbp, rpip,
6779 		    EMLXS_XRI_SOL_BLS_TYPE);
6780 
6781 		if (!xrip) {
6782 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6783 			    "RQ ENTRY: %s: Unable to alloc XRI. Dropping...",
6784 			    label);
6785 			goto done;
6786 		}
6787 
6788 		wqe->XRITag = xrip->XRI;
6789 		wqe->Class = CLASS3;
6790 		wqe->RequestTag = xrip->iotag;
6791 		wqe->CQId = (uint16_t)0xffff;  /* default CQ for response */
6792 
6793 		sbp->ticks = hba->timer_tics + 30;
6794 
6795 		emlxs_sli4_issue_iocb_cmd(hba, iocbq->channel, iocbq);
6796 
6797 		/* The temporary iocbq has been freed already */
6798 		iocbq = NULL;
6799 
6800 		break;
6801 
6802 	case 1: /* ELS */
6803 		cmd = *((uint32_t *)seq_mp->virt);
6804 		cmd &= ELS_CMD_MASK;
6805 
6806 		if (!(port->vpip->flag & EMLXS_VPI_PORT_ENABLED)) {
6807 			uint32_t dropit = 1;
6808 
6809 			/* Allow for P2P handshaking */
6810 			switch (cmd) {
6811 			case ELS_CMD_FLOGI:
6812 				dropit = 0;
6813 				break;
6814 
6815 			case ELS_CMD_PLOGI:
6816 			case ELS_CMD_PRLI:
6817 				if (hba->flag & FC_PT_TO_PT) {
6818 					dropit = 0;
6819 				}
6820 				break;
6821 			}
6822 
6823 			if (dropit) {
6824 				EMLXS_MSGF(EMLXS_CONTEXT,
6825 				    &emlxs_sli_detail_msg,
6826 				    "RQ ENTRY: %s: Port not yet enabled. "
6827 				    "Dropping...",
6828 				    label);
6829 				goto done;
6830 			}
6831 		}
6832 
6833 		rpip = NULL;
6834 
6835 		if (cmd != ELS_CMD_LOGO) {
6836 			rpip = EMLXS_NODE_TO_RPI(port, node);
6837 		}
6838 
6839 		if (!rpip) {
6840 			/* Use the fabric rpi */
6841 			rpip = port->vpip->fabric_rpip;
6842 		}
6843 
6844 		xrip = emlxs_sli4_reserve_xri(port, rpip,
6845 		    EMLXS_XRI_UNSOL_ELS_TYPE, fchdr.ox_id);
6846 
6847 		if (!xrip) {
6848 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6849 			    "RQ ENTRY: %s: Out of exchange "
6850 			    "resources.  Dropping...",
6851 			    label);
6852 
6853 			goto done;
6854 		}
6855 
6856 		/* Build CMD_RCV_ELS64_CX */
6857 		iocb->un.rcvels64.elsReq.tus.f.bdeFlags = 0;
6858 		iocb->un.rcvels64.elsReq.tus.f.bdeSize  = seq_len;
6859 		iocb->un.rcvels64.elsReq.addrLow  = PADDR_LO(seq_mp->phys);
6860 		iocb->un.rcvels64.elsReq.addrHigh = PADDR_HI(seq_mp->phys);
6861 		iocb->ULPBDECOUNT = 1;
6862 
6863 		iocb->un.rcvels64.remoteID = fchdr.s_id;
6864 		iocb->un.rcvels64.parmRo = fchdr.d_id;
6865 
6866 		iocb->ULPPU = 0x3;
6867 		iocb->ULPCONTEXT = xrip->XRI;
6868 		iocb->ULPIOTAG = ((node)? node->nlp_Rpi:0);
6869 		iocb->ULPCLASS = CLASS3;
6870 		iocb->ULPCOMMAND = CMD_RCV_ELS64_CX;
6871 
6872 		iocb->unsli3.ext_rcv.seq_len = seq_len;
6873 		iocb->unsli3.ext_rcv.vpi = port->vpip->VPI;
6874 		iocb->unsli3.ext_rcv.oxid = fchdr.ox_id;
6875 
6876 		if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
6877 			iocb->unsli3.ext_rcv.ccpe = 1;
6878 			iocb->unsli3.ext_rcv.ccp = fchdr.rsvd;
6879 		}
6880 
6881 		if (port->mode == MODE_INITIATOR) {
6882 			(void) emlxs_els_handle_unsol_req(port, iocbq->channel,
6883 			    iocbq, seq_mp, seq_len);
6884 		}
6885 #ifdef SFCT_SUPPORT
6886 		else if (port->mode == MODE_TARGET) {
6887 			(void) emlxs_fct_handle_unsol_els(port, iocbq->channel,
6888 			    iocbq, seq_mp, seq_len);
6889 		}
6890 #endif /* SFCT_SUPPORT */
6891 		break;
6892 
6893 #ifdef SFCT_SUPPORT
6894 	case 8: /* FCT */
6895 		if (!(port->VPIobj.flag & EMLXS_VPI_PORT_ENABLED)) {
6896 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6897 			    "RQ ENTRY: %s: Port not yet enabled. "
6898 			    "Dropping...",
6899 			    label);
6900 
6901 			goto done;
6902 		}
6903 
6904 		rpip = EMLXS_NODE_TO_RPI(port, node);
6905 
6906 		if (!rpip) {
6907 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6908 			    "RQ ENTRY: %s: Port not logged in. "
6909 			    "Dropping...",
6910 			    label);
6911 
6912 			goto done;
6913 		}
6914 
6915 		xrip = emlxs_sli4_reserve_xri(port, rpip,
6916 		    EMLXS_XRI_UNSOL_FCP_TYPE, fchdr.ox_id);
6917 
6918 		if (!xrip) {
6919 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6920 			    "RQ ENTRY: %s: Out of exchange "
6921 			    "resources.  Dropping...",
6922 			    label);
6923 
6924 			goto done;
6925 		}
6926 
6927 		/* Build CMD_RCV_SEQUENCE64_CX */
6928 		iocb->un.rcvseq64.rcvBde.tus.f.bdeFlags = 0;
6929 		iocb->un.rcvseq64.rcvBde.tus.f.bdeSize  = seq_len;
6930 		iocb->un.rcvseq64.rcvBde.addrLow  = PADDR_LO(seq_mp->phys);
6931 		iocb->un.rcvseq64.rcvBde.addrHigh = PADDR_HI(seq_mp->phys);
6932 		iocb->ULPBDECOUNT = 1;
6933 
6934 		iocb->ULPPU = 0x3;
6935 		iocb->ULPCONTEXT = xrip->XRI;
6936 		iocb->ULPIOTAG = ((node)? node->nlp_Rpi:0);
6937 		iocb->ULPCLASS = CLASS3;
6938 		iocb->ULPCOMMAND = CMD_RCV_ELS64_CX;
6939 
6940 		iocb->unsli3.ext_rcv.seq_len = seq_len;
6941 		iocb->unsli3.ext_rcv.vpi = port->VPIobj.VPI;
6942 		iocb->unsli3.ext_rcv.oxid = fchdr.ox_id;
6943 
6944 		if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
6945 			iocb->unsli3.ext_rcv.ccpe = 1;
6946 			iocb->unsli3.ext_rcv.ccp = fchdr.rsvd;
6947 		}
6948 
6949 		/* pass xrip to FCT in the iocbq */
6950 		iocbq->sbp = xrip;
6951 
6952 		(void) emlxs_fct_handle_unsol_req(port, cp, iocbq,
6953 		    seq_mp, seq_len);
6954 		break;
6955 #endif /* SFCT_SUPPORT */
6956 
6957 	case 0x20: /* CT */
6958 		if (!(port->vpip->flag & EMLXS_VPI_PORT_ENABLED) &&
6959 		    !(hba->flag & FC_LOOPBACK_MODE)) {
6960 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6961 			    "RQ ENTRY: %s: Port not yet enabled. "
6962 			    "Dropping...",
6963 			    label);
6964 
6965 			goto done;
6966 		}
6967 
6968 		if (!node) {
6969 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6970 			    "RQ ENTRY: %s: Node not found (did=%x).  "
6971 			    "Dropping...",
6972 			    label, fchdr.d_id);
6973 
6974 			goto done;
6975 		}
6976 
6977 		rpip = EMLXS_NODE_TO_RPI(port, node);
6978 
6979 		if (!rpip) {
6980 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6981 			    "RQ ENTRY: %s: RPI not found (did=%x rpi=%d).  "
6982 			    "Dropping...",
6983 			    label, fchdr.d_id, node->nlp_Rpi);
6984 
6985 			goto done;
6986 		}
6987 
6988 		xrip = emlxs_sli4_reserve_xri(port, rpip,
6989 		    EMLXS_XRI_UNSOL_CT_TYPE, fchdr.ox_id);
6990 
6991 		if (!xrip) {
6992 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6993 			    "RQ ENTRY: %s: Out of exchange "
6994 			    "resources.  Dropping...",
6995 			    label);
6996 
6997 			goto done;
6998 		}
6999 
7000 		/* Build CMD_RCV_SEQ64_CX */
7001 		iocb->un.rcvseq64.rcvBde.tus.f.bdeFlags = 0;
7002 		iocb->un.rcvseq64.rcvBde.tus.f.bdeSize  = seq_len;
7003 		iocb->un.rcvseq64.rcvBde.addrLow  = PADDR_LO(seq_mp->phys);
7004 		iocb->un.rcvseq64.rcvBde.addrHigh = PADDR_HI(seq_mp->phys);
7005 		iocb->ULPBDECOUNT = 1;
7006 
7007 		iocb->un.rcvseq64.xrsqRo = 0;
7008 		iocb->un.rcvseq64.w5.hcsw.Rctl = fchdr.r_ctl;
7009 		iocb->un.rcvseq64.w5.hcsw.Type = fchdr.type;
7010 		iocb->un.rcvseq64.w5.hcsw.Dfctl = fchdr.df_ctl;
7011 		iocb->un.rcvseq64.w5.hcsw.Fctl = fchdr.f_ctl;
7012 
7013 		iocb->ULPPU = 0x3;
7014 		iocb->ULPCONTEXT = xrip->XRI;
7015 		iocb->ULPIOTAG = rpip->RPI;
7016 		iocb->ULPCLASS = CLASS3;
7017 		iocb->ULPCOMMAND = CMD_RCV_SEQ64_CX;
7018 
7019 		iocb->unsli3.ext_rcv.seq_len = seq_len;
7020 		iocb->unsli3.ext_rcv.vpi = port->vpip->VPI;
7021 
7022 		if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
7023 			iocb->unsli3.ext_rcv.ccpe = 1;
7024 			iocb->unsli3.ext_rcv.ccp = fchdr.rsvd;
7025 		}
7026 
7027 		(void) emlxs_ct_handle_unsol_req(port, iocbq->channel,
7028 		    iocbq, seq_mp, seq_len);
7029 
7030 		break;
7031 	}
7032 
7033 	/* Sequence handled, no need to abort */
7034 	abort = 0;
7035 
7036 done:
7037 
7038 	if (!posted) {
7039 		emlxs_sli4_rq_post(port, hdr_rq->qid);
7040 	}
7041 
7042 	if (abort) {
7043 		/* Send ABTS for this exchange */
7044 		/* !!! Currently, we have no implementation for this !!! */
7045 		abort = 0;
7046 	}
7047 
7048 	/* Return memory resources to pools */
7049 	if (iocbq) {
7050 		if (iocbq->bp) {
7051 			emlxs_mem_put(hba, buf_type, (void *)iocbq->bp);
7052 			iocbq->bp = 0;
7053 		}
7054 
7055 		emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq);
7056 	}
7057 
7058 #ifdef FMA_SUPPORT
7059 	if (emlxs_fm_check_dma_handle(hba,
7060 	    hba->sli.sli4.slim2.dma_handle)
7061 	    != DDI_FM_OK) {
7062 		EMLXS_MSGF(EMLXS_CONTEXT,
7063 		    &emlxs_invalid_dma_handle_msg,
7064 		    "sli4_process_unsol_rcv: hdl=%p",
7065 		    hba->sli.sli4.slim2.dma_handle);
7066 
7067 		emlxs_thread_spawn(hba, emlxs_restart_thread,
7068 		    0, 0);
7069 	}
7070 #endif
7071 	return;
7072 
7073 } /* emlxs_sli4_process_unsol_rcv() */
7074 
7075 
7076 /*ARGSUSED*/
7077 static void
emlxs_sli4_process_xri_aborted(emlxs_hba_t * hba,CQ_DESC_t * cq,CQE_XRI_Abort_t * cqe)7078 emlxs_sli4_process_xri_aborted(emlxs_hba_t *hba, CQ_DESC_t *cq,
7079     CQE_XRI_Abort_t *cqe)
7080 {
7081 	emlxs_port_t *port = &PPORT;
7082 	XRIobj_t *xrip;
7083 
7084 	mutex_enter(&EMLXS_FCTAB_LOCK);
7085 
7086 	xrip = emlxs_sli4_find_xri(port, cqe->XRI);
7087 	if (xrip == NULL) {
7088 		/* EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg, */
7089 		/*    "CQ ENTRY: process xri aborted ignored");  */
7090 
7091 		mutex_exit(&EMLXS_FCTAB_LOCK);
7092 		return;
7093 	}
7094 
7095 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7096 	    "CQ ENTRY: XRI Aborted: xri=%d IA=%d EO=%d BR=%d",
7097 	    cqe->XRI, cqe->IA, cqe->EO, cqe->BR);
7098 
7099 	if (!(xrip->flag & EMLXS_XRI_BUSY)) {
7100 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
7101 		    "CQ ENTRY: XRI Aborted: xri=%d flag=%x. Bad state.",
7102 		    xrip->XRI, xrip->flag);
7103 
7104 		mutex_exit(&EMLXS_FCTAB_LOCK);
7105 		return;
7106 	}
7107 
7108 	/* Exchange is no longer busy on-chip, free it */
7109 	emlxs_sli4_free_xri(port, 0, xrip, 0);
7110 
7111 	mutex_exit(&EMLXS_FCTAB_LOCK);
7112 
7113 	return;
7114 
7115 } /* emlxs_sli4_process_xri_aborted () */
7116 
7117 
7118 /*ARGSUSED*/
7119 static void
emlxs_sli4_process_cq(emlxs_hba_t * hba,CQ_DESC_t * cq)7120 emlxs_sli4_process_cq(emlxs_hba_t *hba, CQ_DESC_t *cq)
7121 {
7122 	emlxs_port_t *port = &PPORT;
7123 	CQE_u *cqe;
7124 	CQE_u cq_entry;
7125 	int num_entries = 0;
7126 	off_t offset;
7127 
7128 	/* EMLXS_PORT_LOCK must be held when entering this routine */
7129 
7130 	cqe = (CQE_u *)cq->addr.virt;
7131 	cqe += cq->host_index;
7132 
7133 	offset = (off_t)((uint64_t)((unsigned long)
7134 	    cq->addr.virt) -
7135 	    (uint64_t)((unsigned long)
7136 	    hba->sli.sli4.slim2.virt));
7137 
7138 	EMLXS_MPDATA_SYNC(cq->addr.dma_handle, offset,
7139 	    4096, DDI_DMA_SYNC_FORKERNEL);
7140 
7141 	for (;;) {
7142 		cq_entry.word[3] = BE_SWAP32(cqe->word[3]);
7143 		if (((cq_entry.word[3]>>31) & 0x01) != cq->qe_valid) {
7144 #ifdef	DEBUG_CQE
7145 			if (num_entries == 0) {
7146 				EMLXS_MSGF(EMLXS_CONTEXT,
7147 				    &emlxs_sli_detail_msg, "CQE: Invalid CQE:"
7148 				    " eqid=%x cqid=%x cqe=%p %08x %08x %08x"
7149 				    " %08x. host_index=%x valid=%d Break...",
7150 				    cq->eqid, cq->qid, cqe,
7151 				    cqe->word[0], cqe->word[1],
7152 				    cqe->word[2], cqe->word[3],
7153 				    cq->host_index, cq->qe_valid);
7154 			}
7155 #endif /* DEBUG_CQE */
7156 			break;
7157 		}
7158 
7159 		cq_entry.word[2] = BE_SWAP32(cqe->word[2]);
7160 		cq_entry.word[1] = BE_SWAP32(cqe->word[1]);
7161 		cq_entry.word[0] = BE_SWAP32(cqe->word[0]);
7162 
7163 #ifdef	DEBUG_CQE
7164 		emlxs_data_dump(port, "CQE", (uint32_t *)cqe, 6, 0);
7165 #endif /* DEBUG_CQE */
7166 		num_entries++;
7167 		if (hba->sli.sli4.param.CqAV)
7168 			/* do not attach the valid bit */
7169 			cqe->word[3] &=	BE_SWAP32(CQE_VALID);
7170 		else
7171 			cqe->word[3] = 0;
7172 
7173 		cq->host_index++;
7174 		if (cq->host_index >= cq->max_index) {
7175 			cq->host_index = 0;
7176 			cqe = (CQE_u *)cq->addr.virt;
7177 			if (hba->sli.sli4.param.CqAV)
7178 				cq->qe_valid ^= 1;
7179 		} else {
7180 			cqe++;
7181 		}
7182 		mutex_exit(&EMLXS_PORT_LOCK);
7183 
7184 		/* Now handle specific cq type */
7185 		if (cq->type == EMLXS_CQ_TYPE_GROUP1) {
7186 			if (cq_entry.cqAsyncEntry.async_evt) {
7187 				emlxs_sli4_process_async_event(hba,
7188 				    (CQE_ASYNC_t *)&cq_entry);
7189 			} else {
7190 				emlxs_sli4_process_mbox_event(hba,
7191 				    (CQE_MBOX_t *)&cq_entry);
7192 			}
7193 		} else { /* EMLXS_CQ_TYPE_GROUP2 */
7194 			switch (cq_entry.cqCmplEntry.Code) {
7195 			case CQE_TYPE_WQ_COMPLETION:
7196 				if (cq_entry.cqCmplEntry.RequestTag <
7197 				    hba->max_iotag) {
7198 					emlxs_sli4_process_wqe_cmpl(hba, cq,
7199 					    (CQE_CmplWQ_t *)&cq_entry);
7200 				} else {
7201 					emlxs_sli4_process_oor_wqe_cmpl(hba, cq,
7202 					    (CQE_CmplWQ_t *)&cq_entry);
7203 				}
7204 				break;
7205 			case CQE_TYPE_RELEASE_WQE:
7206 				emlxs_sli4_process_release_wqe(hba, cq,
7207 				    (CQE_RelWQ_t *)&cq_entry);
7208 				break;
7209 			case CQE_TYPE_UNSOL_RCV:
7210 			case CQE_TYPE_UNSOL_RCV_V1:
7211 				emlxs_sli4_process_unsol_rcv(hba, cq,
7212 				    (CQE_UnsolRcv_t *)&cq_entry);
7213 				break;
7214 			case CQE_TYPE_XRI_ABORTED:
7215 				emlxs_sli4_process_xri_aborted(hba, cq,
7216 				    (CQE_XRI_Abort_t *)&cq_entry);
7217 				break;
7218 			default:
7219 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
7220 				    "Invalid CQ entry eqid=%x qid=%x code=%d: "
7221 				    "%08x %08x %08x %08x, host_index=%x "
7222 				    "valid=%d",
7223 				    cq->eqid, cq->qid,
7224 				    cq_entry.cqCmplEntry.Code, cq_entry.word[0],
7225 				    cq_entry.word[1], cq_entry.word[2],
7226 				    cq_entry.word[3], cq->host_index,
7227 				    cq->qe_valid);
7228 				break;
7229 			}
7230 		}
7231 
7232 		mutex_enter(&EMLXS_PORT_LOCK);
7233 	}
7234 
7235 	/* Number of times this routine gets called for this CQ */
7236 	cq->isr_count++;
7237 
7238 	/* num_entries is the number of CQEs we process in this specific CQ */
7239 	cq->num_proc += num_entries;
7240 	if (cq->max_proc < num_entries)
7241 		cq->max_proc = num_entries;
7242 
7243 	emlxs_sli4_write_cqdb(hba, cq->qid, num_entries, B_TRUE);
7244 
7245 	/* EMLXS_PORT_LOCK must be held when exiting this routine */
7246 
7247 } /* emlxs_sli4_process_cq() */
7248 
7249 
7250 /*ARGSUSED*/
7251 static void
emlxs_sli4_process_eq(emlxs_hba_t * hba,EQ_DESC_t * eq)7252 emlxs_sli4_process_eq(emlxs_hba_t *hba, EQ_DESC_t *eq)
7253 {
7254 	emlxs_port_t *port = &PPORT;
7255 	uint32_t *ptr;
7256 	CHANNEL *cp;
7257 	EQE_u eqe;
7258 	uint32_t i;
7259 	uint16_t cqi;
7260 	int num_entries = 0;
7261 	off_t offset;
7262 
7263 	/* EMLXS_PORT_LOCK must be held when entering this routine */
7264 
7265 	hba->intr_busy_cnt ++;
7266 
7267 	ptr = eq->addr.virt;
7268 	ptr += eq->host_index;
7269 
7270 	offset = (off_t)((uint64_t)((unsigned long)
7271 	    eq->addr.virt) -
7272 	    (uint64_t)((unsigned long)
7273 	    hba->sli.sli4.slim2.virt));
7274 
7275 	EMLXS_MPDATA_SYNC(eq->addr.dma_handle, offset,
7276 	    4096, DDI_DMA_SYNC_FORKERNEL);
7277 
7278 	for (;;) {
7279 		eqe.word = *ptr;
7280 		eqe.word = BE_SWAP32(eqe.word);
7281 
7282 		if ((eqe.word & EQE_VALID) != eq->qe_valid) {
7283 #ifdef DEBUG_FASTPATH
7284 			if (num_entries == 0) {
7285 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7286 				    "EQE: Invalid EQE: %x. host_index=%x "
7287 				    "valid=%d Break...",
7288 				    eqe.word, eq->qe_valid);
7289 			}
7290 #endif /* DEBUG_FASTPATH */
7291 			break;
7292 		}
7293 
7294 #ifdef DEBUG_FASTPATH
7295 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7296 		    "EQE00: %08x", eqe.word);
7297 #endif /* DEBUG_FASTPATH */
7298 
7299 		if (hba->sli.sli4.param.EqAV)
7300 			*ptr &= BE_SWAP32(EQE_VALID);
7301 		else
7302 			*ptr = 0;
7303 		num_entries++;
7304 		eq->host_index++;
7305 		if (eq->host_index >= eq->max_index) {
7306 			eq->host_index = 0;
7307 			ptr = eq->addr.virt;
7308 			if (hba->sli.sli4.param.EqAV)
7309 				eq->qe_valid ^= 1;
7310 		} else {
7311 			ptr++;
7312 		}
7313 
7314 		cqi = emlxs_sli4_cqid_to_index(hba, eqe.entry.CQId);
7315 
7316 		/* Verify CQ index */
7317 		if (cqi == 0xffff) {
7318 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
7319 			    "EQE: Invalid CQid: %d. valid=%d Dropping...",
7320 			    eqe.entry.CQId, eq->qe_valid);
7321 			continue;
7322 		}
7323 
7324 #ifdef DEBUG_FASTPATH
7325 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7326 		    "EQE: qid=%x host_index=%x valid=%d iptr=%p CQIndex:%x "
7327 		    "cqid:%x",
7328 		    eq->qid, eq->host_index, eq->qe_valid, ptr, cqi,
7329 		    eqe.entry.CQId);
7330 #endif /* DEBUG_FASTPATH */
7331 
7332 		emlxs_sli4_process_cq(hba, &hba->sli.sli4.cq[cqi]);
7333 	}
7334 
7335 	/* Number of times the ISR for this EQ gets called */
7336 	eq->isr_count++;
7337 
7338 	/* num_entries is the number of EQEs we process in this specific ISR */
7339 	eq->num_proc += num_entries;
7340 	if (eq->max_proc < num_entries) {
7341 		eq->max_proc = num_entries;
7342 	}
7343 
7344 	if (num_entries != 0) {
7345 		for (i = 0; i < hba->chan_count; i++) {
7346 			cp = &hba->chan[i];
7347 			if (cp->chan_flag & EMLXS_NEEDS_TRIGGER) {
7348 				cp->chan_flag &= ~EMLXS_NEEDS_TRIGGER;
7349 				emlxs_thread_trigger2(&cp->intr_thread,
7350 				    emlxs_proc_channel, cp);
7351 			}
7352 		}
7353 	}
7354 
7355 	emlxs_sli4_write_eqdb(hba, eq->qid, num_entries, B_TRUE);
7356 
7357 	/* EMLXS_PORT_LOCK must be held when exiting this routine */
7358 
7359 	hba->intr_busy_cnt --;
7360 
7361 } /* emlxs_sli4_process_eq() */
7362 
7363 
7364 #ifdef MSI_SUPPORT
7365 /*ARGSUSED*/
7366 static uint32_t
emlxs_sli4_msi_intr(char * arg1,char * arg2)7367 emlxs_sli4_msi_intr(char *arg1, char *arg2)
7368 {
7369 	emlxs_hba_t *hba = (emlxs_hba_t *)arg1;
7370 #ifdef DEBUG_FASTPATH
7371 	emlxs_port_t *port = &PPORT;
7372 #endif /* DEBUG_FASTPATH */
7373 	uint16_t msgid;
7374 	int rc;
7375 
7376 #ifdef DEBUG_FASTPATH
7377 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7378 	    "msiINTR arg1:%p arg2:%p", arg1, arg2);
7379 #endif /* DEBUG_FASTPATH */
7380 
7381 	/* Check for legacy interrupt handling */
7382 	if (hba->intr_type == DDI_INTR_TYPE_FIXED) {
7383 		rc = emlxs_sli4_intx_intr(arg1);
7384 		return (rc);
7385 	}
7386 
7387 	/* Get MSI message id */
7388 	msgid = (uint16_t)((unsigned long)arg2);
7389 
7390 	/* Validate the message id */
7391 	if (msgid >= hba->intr_count) {
7392 		msgid = 0;
7393 	}
7394 	mutex_enter(&EMLXS_PORT_LOCK);
7395 
7396 	if ((hba->state == FC_KILLED) || (hba->flag & FC_OFFLINE_MODE)) {
7397 		mutex_exit(&EMLXS_PORT_LOCK);
7398 		return (DDI_INTR_UNCLAIMED);
7399 	}
7400 
7401 	/* The eq[] index == the MSI vector number */
7402 	emlxs_sli4_process_eq(hba, &hba->sli.sli4.eq[msgid]);
7403 
7404 	mutex_exit(&EMLXS_PORT_LOCK);
7405 	return (DDI_INTR_CLAIMED);
7406 
7407 } /* emlxs_sli4_msi_intr() */
7408 #endif /* MSI_SUPPORT */
7409 
7410 
7411 /*ARGSUSED*/
7412 static int
emlxs_sli4_intx_intr(char * arg)7413 emlxs_sli4_intx_intr(char *arg)
7414 {
7415 	emlxs_hba_t *hba = (emlxs_hba_t *)arg;
7416 #ifdef DEBUG_FASTPATH
7417 	emlxs_port_t *port = &PPORT;
7418 #endif /* DEBUG_FASTPATH */
7419 
7420 #ifdef DEBUG_FASTPATH
7421 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7422 	    "intxINTR arg:%p", arg);
7423 #endif /* DEBUG_FASTPATH */
7424 
7425 	mutex_enter(&EMLXS_PORT_LOCK);
7426 
7427 	if ((hba->state == FC_KILLED) || (hba->flag & FC_OFFLINE_MODE)) {
7428 		mutex_exit(&EMLXS_PORT_LOCK);
7429 		return (DDI_INTR_UNCLAIMED);
7430 	}
7431 
7432 	emlxs_sli4_process_eq(hba, &hba->sli.sli4.eq[0]);
7433 
7434 	mutex_exit(&EMLXS_PORT_LOCK);
7435 	return (DDI_INTR_CLAIMED);
7436 } /* emlxs_sli4_intx_intr() */
7437 
7438 
7439 static void
emlxs_sli4_hba_kill(emlxs_hba_t * hba)7440 emlxs_sli4_hba_kill(emlxs_hba_t *hba)
7441 {
7442 	emlxs_port_t *port = &PPORT;
7443 	uint32_t j;
7444 
7445 	mutex_enter(&EMLXS_PORT_LOCK);
7446 	if (hba->flag & FC_INTERLOCKED) {
7447 		EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
7448 
7449 		mutex_exit(&EMLXS_PORT_LOCK);
7450 
7451 		return;
7452 	}
7453 
7454 	j = 0;
7455 	while (j++ < 10000) {
7456 		if ((hba->mbox_queue_flag == 0) &&
7457 		    (hba->intr_busy_cnt == 0)) {
7458 			break;
7459 		}
7460 
7461 		mutex_exit(&EMLXS_PORT_LOCK);
7462 		BUSYWAIT_US(100);
7463 		mutex_enter(&EMLXS_PORT_LOCK);
7464 	}
7465 
7466 	if ((hba->mbox_queue_flag != 0) || (hba->intr_busy_cnt > 0)) {
7467 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
7468 		    "Board kill failed. Adapter busy, %d, %d.",
7469 		    hba->mbox_queue_flag, hba->intr_busy_cnt);
7470 		mutex_exit(&EMLXS_PORT_LOCK);
7471 		return;
7472 	}
7473 
7474 	hba->flag |= FC_INTERLOCKED;
7475 
7476 	EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
7477 
7478 	mutex_exit(&EMLXS_PORT_LOCK);
7479 
7480 } /* emlxs_sli4_hba_kill() */
7481 
7482 
7483 extern void
emlxs_sli4_hba_reset_all(emlxs_hba_t * hba,uint32_t flag)7484 emlxs_sli4_hba_reset_all(emlxs_hba_t *hba, uint32_t flag)
7485 {
7486 	emlxs_port_t *port = &PPORT;
7487 	uint32_t value;
7488 
7489 	mutex_enter(&EMLXS_PORT_LOCK);
7490 
7491 	if ((hba->sli_intf & SLI_INTF_IF_TYPE_MASK) != SLI_INTF_IF_TYPE_2 &&
7492 	    (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) != SLI_INTF_IF_TYPE_6) {
7493 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
7494 		    "Reset All failed. Invalid Operation.");
7495 		mutex_exit(&EMLXS_PORT_LOCK);
7496 		return;
7497 	}
7498 
7499 	/* Issue a Firmware Reset All Request */
7500 	if (flag) {
7501 		value = SLI_PHYDEV_FRST | SLI_PHYDEV_FRL_ALL | SLI_PHYDEV_DD;
7502 	} else {
7503 		value = SLI_PHYDEV_FRST | SLI_PHYDEV_FRL_ALL;
7504 	}
7505 
7506 	ddi_put32(hba->sli.sli4.bar0_acc_handle,
7507 	    hba->sli.sli4.PHYSDEV_reg_addr, value);
7508 
7509 	mutex_exit(&EMLXS_PORT_LOCK);
7510 
7511 } /* emlxs_sli4_hba_reset_all() */
7512 
7513 
7514 static void
emlxs_sli4_enable_intr(emlxs_hba_t * hba)7515 emlxs_sli4_enable_intr(emlxs_hba_t *hba)
7516 {
7517 	emlxs_config_t *cfg = &CFG;
7518 	int i;
7519 	int num_cq;
7520 
7521 	hba->sli.sli4.flag |= EMLXS_SLI4_INTR_ENABLED;
7522 
7523 	num_cq = (hba->intr_count * cfg[CFG_NUM_WQ].current) +
7524 	    EMLXS_CQ_OFFSET_WQ;
7525 
7526 	/* ARM EQ / CQs */
7527 	for (i = 0; i < num_cq; i++) {
7528 		emlxs_sli4_write_cqdb(hba, hba->sli.sli4.cq[i].qid, 0, B_TRUE);
7529 	}
7530 
7531 	for (i = 0; i < hba->intr_count; i++) {
7532 		emlxs_sli4_write_eqdb(hba, hba->sli.sli4.eq[i].qid, 0, B_TRUE);
7533 	}
7534 } /* emlxs_sli4_enable_intr() */
7535 
7536 
7537 static void
emlxs_sli4_disable_intr(emlxs_hba_t * hba,uint32_t att)7538 emlxs_sli4_disable_intr(emlxs_hba_t *hba, uint32_t att)
7539 {
7540 	if (att) {
7541 		return;
7542 	}
7543 
7544 	hba->sli.sli4.flag &= ~EMLXS_SLI4_INTR_ENABLED;
7545 
7546 	/* Short of reset, we cannot disable interrupts */
7547 } /* emlxs_sli4_disable_intr() */
7548 
7549 static void
emlxs_sli4_resource_free(emlxs_hba_t * hba)7550 emlxs_sli4_resource_free(emlxs_hba_t *hba)
7551 {
7552 	emlxs_port_t	*port = &PPORT;
7553 	MBUF_INFO	*buf_info;
7554 	uint32_t	i;
7555 
7556 	buf_info = &hba->sli.sli4.slim2;
7557 	if (buf_info->virt == 0) {
7558 		/* Already free */
7559 		return;
7560 	}
7561 
7562 	emlxs_fcf_fini(hba);
7563 
7564 	mutex_enter(&EMLXS_PORT_LOCK);
7565 
7566 	buf_info = &hba->sli.sli4.HeaderTmplate;
7567 	if (buf_info->virt) {
7568 		bzero(buf_info, sizeof (MBUF_INFO));
7569 	}
7570 
7571 	if (hba->sli.sli4.XRIp) {
7572 		XRIobj_t	*xrip;
7573 
7574 		if ((hba->sli.sli4.XRIinuse_f !=
7575 		    (XRIobj_t *)&hba->sli.sli4.XRIinuse_f) ||
7576 		    (hba->sli.sli4.XRIinuse_b !=
7577 		    (XRIobj_t *)&hba->sli.sli4.XRIinuse_f)) {
7578 			xrip = (XRIobj_t *)hba->sli.sli4.XRIinuse_f;
7579 			while (xrip != (XRIobj_t *)&hba->sli.sli4.XRIinuse_f) {
7580 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
7581 				    "XRIs in use during free!: %p %p != %p "
7582 				    "XRI:%d iotag:%d\n",
7583 				    hba->sli.sli4.XRIinuse_f,
7584 				    hba->sli.sli4.XRIinuse_b, xrip, xrip->XRI,
7585 				    xrip->iotag);
7586 				xrip = xrip->_f;
7587 			}
7588 		}
7589 
7590 		xrip = hba->sli.sli4.XRIp;
7591 		for (i = 0; i < hba->sli.sli4.XRICount; i++) {
7592 			xrip->XRI = emlxs_sli4_index_to_xri(hba, i);
7593 
7594 			if (xrip->XRI != 0)
7595 				emlxs_mem_put(hba, xrip->SGSeg, xrip->SGList);
7596 
7597 			xrip++;
7598 		}
7599 
7600 		kmem_free(hba->sli.sli4.XRIp,
7601 		    (sizeof (XRIobj_t) * hba->sli.sli4.XRICount));
7602 		hba->sli.sli4.XRIp = NULL;
7603 
7604 		hba->sli.sli4.XRIfree_f =
7605 		    (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
7606 		hba->sli.sli4.XRIfree_b =
7607 		    (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
7608 		hba->sli.sli4.xrif_count = 0;
7609 	}
7610 
7611 	for (i = 0; i < hba->intr_count; i++) {
7612 		mutex_destroy(&hba->sli.sli4.eq[i].lastwq_lock);
7613 		bzero(&hba->sli.sli4.eq[i], sizeof (EQ_DESC_t));
7614 		hba->sli.sli4.eq[i].qid = 0xffff;
7615 	}
7616 	for (i = 0; i < EMLXS_MAX_CQS; i++) {
7617 		bzero(&hba->sli.sli4.cq[i], sizeof (CQ_DESC_t));
7618 		hba->sli.sli4.cq[i].qid = 0xffff;
7619 	}
7620 	for (i = 0; i < EMLXS_MAX_WQS; i++) {
7621 		bzero(&hba->sli.sli4.wq[i], sizeof (WQ_DESC_t));
7622 		hba->sli.sli4.wq[i].qid = 0xffff;
7623 	}
7624 	for (i = 0; i < EMLXS_MAX_RXQS; i++) {
7625 		mutex_destroy(&hba->sli.sli4.rxq[i].lock);
7626 		bzero(&hba->sli.sli4.rxq[i], sizeof (RXQ_DESC_t));
7627 	}
7628 	for (i = 0; i < EMLXS_MAX_RQS; i++) {
7629 		mutex_destroy(&hba->sli.sli4.rq[i].lock);
7630 		bzero(&hba->sli.sli4.rq[i], sizeof (RQ_DESC_t));
7631 		hba->sli.sli4.rq[i].qid = 0xffff;
7632 	}
7633 
7634 	/* Free the MQ */
7635 	bzero(&hba->sli.sli4.mq, sizeof (MQ_DESC_t));
7636 
7637 
7638 	buf_info = &hba->sli.sli4.slim2;
7639 	if (buf_info->virt) {
7640 		buf_info->flags = FC_MBUF_DMA;
7641 		emlxs_mem_free(hba, buf_info);
7642 		bzero(buf_info, sizeof (MBUF_INFO));
7643 	}
7644 
7645 	mutex_exit(&EMLXS_PORT_LOCK);
7646 
7647 	/* GPIO lock */
7648 	if (hba->model_info.flags & EMLXS_GPIO_LEDS)
7649 		mutex_destroy(&hba->gpio_lock);
7650 
7651 
7652 } /* emlxs_sli4_resource_free() */
7653 
7654 static int
emlxs_sli4_resource_alloc(emlxs_hba_t * hba)7655 emlxs_sli4_resource_alloc(emlxs_hba_t *hba)
7656 {
7657 	emlxs_port_t	*port = &PPORT;
7658 	emlxs_config_t	*cfg = &CFG;
7659 	MBUF_INFO	*buf_info;
7660 	int		num_eq;
7661 	int		num_wq;
7662 	uint16_t	i;
7663 	uint32_t	j;
7664 	uint32_t	k;
7665 	uint16_t	cq_depth;
7666 	uint32_t	cq_size;
7667 	uint32_t	word;
7668 	XRIobj_t	*xrip;
7669 	RQE_t		*rqe;
7670 	MBUF_INFO	*rqb;
7671 	uint64_t	phys;
7672 	uint64_t	tmp_phys;
7673 	char		*virt;
7674 	char		*tmp_virt;
7675 	void		*data_handle;
7676 	void		*dma_handle;
7677 	int32_t		size;
7678 	off_t		offset;
7679 	uint32_t	count = 0;
7680 	uint32_t	hddr_size = 0;
7681 	uint32_t	align;
7682 	uint32_t	iotag;
7683 	uint32_t	mseg;
7684 
7685 	buf_info = &hba->sli.sli4.slim2;
7686 	if (buf_info->virt) {
7687 		/* Already allocated */
7688 		return (0);
7689 	}
7690 
7691 	emlxs_fcf_init(hba);
7692 
7693 	switch (hba->sli.sli4.param.CQV) {
7694 	case 0:
7695 		cq_depth = CQ_DEPTH;
7696 		break;
7697 	case 2:
7698 	default:
7699 		cq_depth = CQ_DEPTH_V2;
7700 		break;
7701 	}
7702 	cq_size = (cq_depth * CQE_SIZE);
7703 
7704 	/* EQs - 1 per Interrupt vector */
7705 	num_eq = hba->intr_count;
7706 
7707 	/* CQs  - number of WQs + 1 for RQs + 1 for mbox/async events */
7708 	num_wq = cfg[CFG_NUM_WQ].current * num_eq;
7709 
7710 	/* Calculate total dmable memory we need */
7711 	/* WARNING: make sure each section is aligned on 4K boundary */
7712 
7713 	/* EQ */
7714 	count += num_eq * 4096;
7715 
7716 	/* CQ */
7717 	count += (num_wq + EMLXS_CQ_OFFSET_WQ) * cq_size;
7718 
7719 	/* WQ */
7720 	count += num_wq * (4096 * EMLXS_NUM_WQ_PAGES);
7721 
7722 	/* MQ */
7723 	count +=  EMLXS_MAX_MQS * 4096;
7724 
7725 	/* RQ */
7726 	count +=  EMLXS_MAX_RQS * 4096;
7727 
7728 	/* RQB/E */
7729 	count += RQB_COUNT * (RQB_DATA_SIZE + RQB_HEADER_SIZE);
7730 	count += (4096 - (count%4096)); /* Ensure 4K alignment */
7731 
7732 	/* RPI Header Templates */
7733 	if (hba->sli.sli4.param.HDRR) {
7734 		/* Bytes per extent */
7735 		j = hba->sli.sli4.RPIExtSize * sizeof (RPIHdrTmplate_t);
7736 
7737 		/* Pages required per extent (page == 4096 bytes) */
7738 		k = (j/4096) + ((j%4096)? 1:0);
7739 
7740 		/* Total size */
7741 		hddr_size = (k * hba->sli.sli4.RPIExtCount * 4096);
7742 
7743 		count += hddr_size;
7744 	}
7745 
7746 	/* Allocate slim2 for SLI4 */
7747 	buf_info = &hba->sli.sli4.slim2;
7748 	buf_info->size = count;
7749 	buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG;
7750 	buf_info->align = ddi_ptob(hba->dip, 1L);
7751 
7752 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
7753 	    "Allocating memory for slim2: %d", count);
7754 
7755 	(void) emlxs_mem_alloc(hba, buf_info);
7756 
7757 	if (buf_info->virt == NULL) {
7758 		EMLXS_MSGF(EMLXS_CONTEXT,
7759 		    &emlxs_init_failed_msg,
7760 		    "Unable to allocate internal memory for SLI4: %d",
7761 		    count);
7762 		goto failed;
7763 	}
7764 	bzero(buf_info->virt, buf_info->size);
7765 	EMLXS_MPDATA_SYNC(buf_info->dma_handle, 0,
7766 	    buf_info->size, DDI_DMA_SYNC_FORDEV);
7767 
7768 	/* Assign memory to Head Template, EQ, CQ, WQ, RQ and MQ */
7769 	data_handle = buf_info->data_handle;
7770 	dma_handle = buf_info->dma_handle;
7771 	phys = buf_info->phys;
7772 	virt = (char *)buf_info->virt;
7773 
7774 	/* Allocate space for queues */
7775 
7776 	/* EQ */
7777 	size = 4096;
7778 	for (i = 0; i < num_eq; i++) {
7779 		bzero(&hba->sli.sli4.eq[i], sizeof (EQ_DESC_t));
7780 
7781 		buf_info = &hba->sli.sli4.eq[i].addr;
7782 		buf_info->size = size;
7783 		buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG;
7784 		buf_info->align = ddi_ptob(hba->dip, 1L);
7785 		buf_info->phys = phys;
7786 		buf_info->virt = (void *)virt;
7787 		buf_info->data_handle = data_handle;
7788 		buf_info->dma_handle = dma_handle;
7789 
7790 		phys += size;
7791 		virt += size;
7792 
7793 		hba->sli.sli4.eq[i].max_index = EQ_DEPTH;
7794 		hba->sli.sli4.eq[i].qid = 0xffff;
7795 
7796 		mutex_init(&hba->sli.sli4.eq[i].lastwq_lock, NULL,
7797 		    MUTEX_DRIVER, NULL);
7798 		hba->sli.sli4.eq[i].qe_valid = 1;
7799 	}
7800 
7801 
7802 	/* CQ */
7803 	for (i = 0; i < (num_wq + EMLXS_CQ_OFFSET_WQ); i++) {
7804 		bzero(&hba->sli.sli4.cq[i], sizeof (CQ_DESC_t));
7805 
7806 		buf_info = &hba->sli.sli4.cq[i].addr;
7807 		buf_info->size = cq_size;
7808 		buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG;
7809 		buf_info->align = ddi_ptob(hba->dip, 1L);
7810 		buf_info->phys = phys;
7811 		buf_info->virt = (void *)virt;
7812 		buf_info->data_handle = data_handle;
7813 		buf_info->dma_handle = dma_handle;
7814 
7815 		phys += cq_size;
7816 		virt += cq_size;
7817 
7818 		hba->sli.sli4.cq[i].max_index = cq_depth;
7819 		hba->sli.sli4.cq[i].qid = 0xffff;
7820 		hba->sli.sli4.cq[i].qe_valid = 1;
7821 	}
7822 
7823 
7824 	/* WQ */
7825 	size = 4096 * EMLXS_NUM_WQ_PAGES;
7826 	for (i = 0; i < num_wq; i++) {
7827 		bzero(&hba->sli.sli4.wq[i], sizeof (WQ_DESC_t));
7828 
7829 		buf_info = &hba->sli.sli4.wq[i].addr;
7830 		buf_info->size = size;
7831 		buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG;
7832 		buf_info->align = ddi_ptob(hba->dip, 1L);
7833 		buf_info->phys = phys;
7834 		buf_info->virt = (void *)virt;
7835 		buf_info->data_handle = data_handle;
7836 		buf_info->dma_handle = dma_handle;
7837 
7838 		phys += size;
7839 		virt += size;
7840 
7841 		hba->sli.sli4.wq[i].max_index = WQ_DEPTH;
7842 		hba->sli.sli4.wq[i].release_depth = WQE_RELEASE_DEPTH;
7843 		hba->sli.sli4.wq[i].qid = 0xFFFF;
7844 	}
7845 
7846 
7847 	/* MQ */
7848 	size = 4096;
7849 	bzero(&hba->sli.sli4.mq, sizeof (MQ_DESC_t));
7850 
7851 	buf_info = &hba->sli.sli4.mq.addr;
7852 	buf_info->size = size;
7853 	buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG;
7854 	buf_info->align = ddi_ptob(hba->dip, 1L);
7855 	buf_info->phys = phys;
7856 	buf_info->virt = (void *)virt;
7857 	buf_info->data_handle = data_handle;
7858 	buf_info->dma_handle = dma_handle;
7859 
7860 	phys += size;
7861 	virt += size;
7862 
7863 	hba->sli.sli4.mq.max_index = MQ_DEPTH;
7864 
7865 
7866 	/* RXQ */
7867 	for (i = 0; i < EMLXS_MAX_RXQS; i++) {
7868 		bzero(&hba->sli.sli4.rxq[i], sizeof (RXQ_DESC_t));
7869 
7870 		mutex_init(&hba->sli.sli4.rxq[i].lock, NULL, MUTEX_DRIVER,
7871 		    NULL);
7872 	}
7873 
7874 
7875 	/* RQ */
7876 	size = 4096;
7877 	for (i = 0; i < EMLXS_MAX_RQS; i++) {
7878 		bzero(&hba->sli.sli4.rq[i], sizeof (RQ_DESC_t));
7879 
7880 		buf_info = &hba->sli.sli4.rq[i].addr;
7881 		buf_info->size = size;
7882 		buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG;
7883 		buf_info->align = ddi_ptob(hba->dip, 1L);
7884 		buf_info->phys = phys;
7885 		buf_info->virt = (void *)virt;
7886 		buf_info->data_handle = data_handle;
7887 		buf_info->dma_handle = dma_handle;
7888 
7889 		phys += size;
7890 		virt += size;
7891 
7892 		hba->sli.sli4.rq[i].max_index = RQ_DEPTH;
7893 		hba->sli.sli4.rq[i].qid = 0xFFFF;
7894 
7895 		mutex_init(&hba->sli.sli4.rq[i].lock, NULL, MUTEX_DRIVER, NULL);
7896 	}
7897 
7898 
7899 	/* RQB/E */
7900 	for (i = 0; i < EMLXS_MAX_RQS; i++) {
7901 		size = (i & 0x1) ? RQB_DATA_SIZE : RQB_HEADER_SIZE;
7902 		tmp_phys = phys;
7903 		tmp_virt = virt;
7904 
7905 		/* Initialize the RQEs */
7906 		rqe = (RQE_t *)hba->sli.sli4.rq[i].addr.virt;
7907 		for (j = 0; j < (RQ_DEPTH/RQB_COUNT); j++) {
7908 			phys = tmp_phys;
7909 			virt = tmp_virt;
7910 			for (k = 0; k < RQB_COUNT; k++) {
7911 				word = PADDR_HI(phys);
7912 				rqe->AddrHi = BE_SWAP32(word);
7913 
7914 				word = PADDR_LO(phys);
7915 				rqe->AddrLo = BE_SWAP32(word);
7916 
7917 				rqb = &hba->sli.sli4.rq[i].
7918 				    rqb[k + (j * RQB_COUNT)];
7919 				rqb->size = size;
7920 				rqb->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG;
7921 				rqb->align = ddi_ptob(hba->dip, 1L);
7922 				rqb->phys = phys;
7923 				rqb->virt = (void *)virt;
7924 				rqb->data_handle = data_handle;
7925 				rqb->dma_handle = dma_handle;
7926 
7927 				phys += size;
7928 				virt += size;
7929 #ifdef DEBUG_RQE
7930 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7931 				    "RQ_ALLOC: rq[%d] rqb[%d,%d]=%p flags=%d",
7932 				    i, j, k, rqb, rqb->flags);
7933 #endif /* DEBUG_RQE */
7934 
7935 				rqe++;
7936 			}
7937 		}
7938 
7939 		offset = (off_t)((uint64_t)((unsigned long)
7940 		    hba->sli.sli4.rq[i].addr.virt) -
7941 		    (uint64_t)((unsigned long)
7942 		    hba->sli.sli4.slim2.virt));
7943 
7944 		/* Sync the RQ buffer list */
7945 		EMLXS_MPDATA_SYNC(hba->sli.sli4.rq[i].addr.dma_handle, offset,
7946 		    hba->sli.sli4.rq[i].addr.size, DDI_DMA_SYNC_FORDEV);
7947 	}
7948 
7949 	/* 4K Alignment */
7950 	align = (4096 - (phys%4096));
7951 	phys += align;
7952 	virt += align;
7953 
7954 	/* RPI Header Templates */
7955 	if (hba->sli.sli4.param.HDRR) {
7956 		buf_info = &hba->sli.sli4.HeaderTmplate;
7957 		bzero(buf_info, sizeof (MBUF_INFO));
7958 		buf_info->size = hddr_size;
7959 		buf_info->flags = FC_MBUF_DMA;
7960 		buf_info->align = ddi_ptob(hba->dip, 1L);
7961 		buf_info->phys = phys;
7962 		buf_info->virt = (void *)virt;
7963 		buf_info->data_handle = data_handle;
7964 		buf_info->dma_handle = dma_handle;
7965 	}
7966 
7967 	/* SGL */
7968 
7969 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
7970 	    "Allocating memory for %d SGLs: %d/%d",
7971 	    hba->sli.sli4.XRICount, sizeof (XRIobj_t), size);
7972 
7973 	/* Initialize double linked lists */
7974 	hba->sli.sli4.XRIinuse_f =
7975 	    (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
7976 	hba->sli.sli4.XRIinuse_b =
7977 	    (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
7978 	hba->sli.sli4.xria_count = 0;
7979 
7980 	hba->sli.sli4.XRIfree_f =
7981 	    (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
7982 	hba->sli.sli4.XRIfree_b =
7983 	    (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
7984 	hba->sli.sli4.xrif_count = 0;
7985 
7986 	switch (hba->sli.sli4.mem_sgl_size) {
7987 	case 1024:
7988 		mseg = MEM_SGL1K;
7989 		break;
7990 	case 2048:
7991 		mseg = MEM_SGL2K;
7992 		break;
7993 	case 4096:
7994 		mseg = MEM_SGL4K;
7995 		break;
7996 	default:
7997 		EMLXS_MSGF(EMLXS_CONTEXT,
7998 		    &emlxs_init_failed_msg,
7999 		    "Unsupported SGL Size: %d", hba->sli.sli4.mem_sgl_size);
8000 		goto failed;
8001 	}
8002 
8003 	hba->sli.sli4.XRIp = (XRIobj_t *)kmem_zalloc(
8004 	    (sizeof (XRIobj_t) * hba->sli.sli4.XRICount), KM_SLEEP);
8005 
8006 	xrip = hba->sli.sli4.XRIp;
8007 	iotag = 1;
8008 
8009 	for (i = 0; i < hba->sli.sli4.XRICount; i++) {
8010 		xrip->XRI = emlxs_sli4_index_to_xri(hba, i);
8011 
8012 		/* We don't use XRI==0, since it also represents an */
8013 		/* uninitialized exchange */
8014 		if (xrip->XRI == 0) {
8015 			xrip++;
8016 			continue;
8017 		}
8018 
8019 		xrip->iotag = iotag++;
8020 		xrip->sge_count =
8021 		    (hba->sli.sli4.mem_sgl_size / sizeof (ULP_SGE64));
8022 
8023 		/* Add xrip to end of free list */
8024 		xrip->_b = hba->sli.sli4.XRIfree_b;
8025 		hba->sli.sli4.XRIfree_b->_f = xrip;
8026 		xrip->_f = (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
8027 		hba->sli.sli4.XRIfree_b = xrip;
8028 		hba->sli.sli4.xrif_count++;
8029 
8030 		/* Allocate SGL for this xrip */
8031 		xrip->SGSeg = mseg;
8032 		xrip->SGList = emlxs_mem_get(hba, xrip->SGSeg);
8033 
8034 		if (xrip->SGList == NULL) {
8035 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
8036 			    "Unable to allocate memory for SGL %d", i);
8037 			goto failed;
8038 		}
8039 
8040 		EMLXS_MPDATA_SYNC(xrip->SGList->dma_handle, 0,
8041 		    xrip->SGList->size, DDI_DMA_SYNC_FORDEV);
8042 
8043 		xrip++;
8044 	}
8045 
8046 	/* GPIO lock */
8047 	if (hba->model_info.flags & EMLXS_GPIO_LEDS)
8048 		mutex_init(&hba->gpio_lock, NULL, MUTEX_DRIVER, NULL);
8049 
8050 #ifdef FMA_SUPPORT
8051 	if (hba->sli.sli4.slim2.dma_handle) {
8052 		if (emlxs_fm_check_dma_handle(hba,
8053 		    hba->sli.sli4.slim2.dma_handle)
8054 		    != DDI_FM_OK) {
8055 			EMLXS_MSGF(EMLXS_CONTEXT,
8056 			    &emlxs_invalid_dma_handle_msg,
8057 			    "sli4_resource_alloc: hdl=%p",
8058 			    hba->sli.sli4.slim2.dma_handle);
8059 			goto failed;
8060 		}
8061 	}
8062 #endif /* FMA_SUPPORT */
8063 
8064 	return (0);
8065 
8066 failed:
8067 
8068 	(void) emlxs_sli4_resource_free(hba);
8069 	return (ENOMEM);
8070 
8071 } /* emlxs_sli4_resource_alloc */
8072 
8073 
8074 extern void
emlxs_sli4_zero_queue_stat(emlxs_hba_t * hba)8075 emlxs_sli4_zero_queue_stat(emlxs_hba_t *hba)
8076 {
8077 	uint32_t i;
8078 	uint32_t num_wq;
8079 	emlxs_config_t	*cfg = &CFG;
8080 	clock_t		time;
8081 
8082 	/* EQ */
8083 	for (i = 0; i < hba->intr_count; i++) {
8084 		hba->sli.sli4.eq[i].num_proc = 0;
8085 		hba->sli.sli4.eq[i].max_proc = 0;
8086 		hba->sli.sli4.eq[i].isr_count = 0;
8087 	}
8088 	num_wq = cfg[CFG_NUM_WQ].current * hba->intr_count;
8089 	/* CQ */
8090 	for (i = 0; i < (num_wq + EMLXS_CQ_OFFSET_WQ); i++) {
8091 		hba->sli.sli4.cq[i].num_proc = 0;
8092 		hba->sli.sli4.cq[i].max_proc = 0;
8093 		hba->sli.sli4.cq[i].isr_count = 0;
8094 	}
8095 	/* WQ */
8096 	for (i = 0; i < num_wq; i++) {
8097 		hba->sli.sli4.wq[i].num_proc = 0;
8098 		hba->sli.sli4.wq[i].num_busy = 0;
8099 	}
8100 	/* RQ */
8101 	for (i = 0; i < EMLXS_MAX_RQS; i++) {
8102 		hba->sli.sli4.rq[i].num_proc = 0;
8103 	}
8104 	(void) drv_getparm(LBOLT, &time);
8105 	hba->sli.sli4.que_stat_timer = (uint32_t)time;
8106 
8107 } /* emlxs_sli4_zero_queue_stat */
8108 
8109 
8110 extern XRIobj_t *
emlxs_sli4_reserve_xri(emlxs_port_t * port,RPIobj_t * rpip,uint32_t type,uint16_t rx_id)8111 emlxs_sli4_reserve_xri(emlxs_port_t *port,  RPIobj_t *rpip, uint32_t type,
8112     uint16_t rx_id)
8113 {
8114 	emlxs_hba_t *hba = HBA;
8115 	XRIobj_t	*xrip;
8116 	uint16_t	iotag;
8117 
8118 	mutex_enter(&EMLXS_FCTAB_LOCK);
8119 
8120 	xrip = hba->sli.sli4.XRIfree_f;
8121 
8122 	if (xrip == (XRIobj_t *)&hba->sli.sli4.XRIfree_f) {
8123 		mutex_exit(&EMLXS_FCTAB_LOCK);
8124 
8125 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
8126 		    "Unable to reserve XRI. type=%d",
8127 		    type);
8128 
8129 		return (NULL);
8130 	}
8131 
8132 	iotag = xrip->iotag;
8133 
8134 	if ((!iotag) ||
8135 	    ((hba->fc_table[iotag] != NULL) &&
8136 	    (hba->fc_table[iotag] != STALE_PACKET))) {
8137 		/*
8138 		 * No more command slots available, retry later
8139 		 */
8140 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8141 		    "Adapter Busy. Unable to reserve iotag. type=%d",
8142 		    type);
8143 
8144 		mutex_exit(&EMLXS_FCTAB_LOCK);
8145 		return (NULL);
8146 	}
8147 
8148 	xrip->state = XRI_STATE_ALLOCATED;
8149 	xrip->type = type;
8150 	xrip->flag = EMLXS_XRI_RESERVED;
8151 	xrip->sbp = NULL;
8152 
8153 	xrip->rpip = rpip;
8154 	xrip->rx_id = rx_id;
8155 	rpip->xri_count++;
8156 
8157 	/* Take it off free list */
8158 	(xrip->_b)->_f = xrip->_f;
8159 	(xrip->_f)->_b = xrip->_b;
8160 	xrip->_f = NULL;
8161 	xrip->_b = NULL;
8162 	hba->sli.sli4.xrif_count--;
8163 
8164 	/* Add it to end of inuse list */
8165 	xrip->_b = hba->sli.sli4.XRIinuse_b;
8166 	hba->sli.sli4.XRIinuse_b->_f = xrip;
8167 	xrip->_f = (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
8168 	hba->sli.sli4.XRIinuse_b = xrip;
8169 	hba->sli.sli4.xria_count++;
8170 
8171 	mutex_exit(&EMLXS_FCTAB_LOCK);
8172 	return (xrip);
8173 
8174 } /* emlxs_sli4_reserve_xri() */
8175 
8176 
8177 extern uint32_t
emlxs_sli4_unreserve_xri(emlxs_port_t * port,uint16_t xri,uint32_t lock)8178 emlxs_sli4_unreserve_xri(emlxs_port_t *port, uint16_t xri, uint32_t lock)
8179 {
8180 	emlxs_hba_t *hba = HBA;
8181 	XRIobj_t *xrip;
8182 
8183 	if (lock) {
8184 		mutex_enter(&EMLXS_FCTAB_LOCK);
8185 	}
8186 
8187 	xrip = emlxs_sli4_find_xri(port, xri);
8188 
8189 	if (!xrip || xrip->state == XRI_STATE_FREE) {
8190 		if (lock) {
8191 			mutex_exit(&EMLXS_FCTAB_LOCK);
8192 		}
8193 
8194 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8195 		    "sli4_unreserve_xri:%d already freed.", xri);
8196 		return (0);
8197 	}
8198 
8199 	/* Flush this unsolicited ct command */
8200 	if (xrip->type == EMLXS_XRI_UNSOL_CT_TYPE) {
8201 		(void) emlxs_flush_ct_event(port, xrip->rx_id);
8202 	}
8203 
8204 	if (!(xrip->flag & EMLXS_XRI_RESERVED)) {
8205 		if (lock) {
8206 			mutex_exit(&EMLXS_FCTAB_LOCK);
8207 		}
8208 
8209 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8210 		    "sli4_unreserve_xri:%d in use. type=%d",
8211 		    xrip->XRI, xrip->type);
8212 		return (1);
8213 	}
8214 
8215 	if (xrip->iotag &&
8216 	    (hba->fc_table[xrip->iotag] != NULL) &&
8217 	    (hba->fc_table[xrip->iotag] != STALE_PACKET)) {
8218 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
8219 		    "sli4_unreserve_xri:%d  sbp dropped:%p type=%d",
8220 		    xrip->XRI, hba->fc_table[xrip->iotag], xrip->type);
8221 
8222 		hba->fc_table[xrip->iotag] = NULL;
8223 		hba->io_count--;
8224 	}
8225 
8226 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8227 	    "sli4_unreserve_xri:%d unreserved. type=%d",
8228 	    xrip->XRI, xrip->type);
8229 
8230 	xrip->state = XRI_STATE_FREE;
8231 	xrip->type = 0;
8232 
8233 	if (xrip->rpip) {
8234 		xrip->rpip->xri_count--;
8235 		xrip->rpip = NULL;
8236 	}
8237 
8238 	if (xrip->reserved_rpip) {
8239 		xrip->reserved_rpip->xri_count--;
8240 		xrip->reserved_rpip = NULL;
8241 	}
8242 
8243 	/* Take it off inuse list */
8244 	(xrip->_b)->_f = xrip->_f;
8245 	(xrip->_f)->_b = xrip->_b;
8246 	xrip->_f = NULL;
8247 	xrip->_b = NULL;
8248 	hba->sli.sli4.xria_count--;
8249 
8250 	/* Add it to end of free list */
8251 	xrip->_b = hba->sli.sli4.XRIfree_b;
8252 	hba->sli.sli4.XRIfree_b->_f = xrip;
8253 	xrip->_f = (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
8254 	hba->sli.sli4.XRIfree_b = xrip;
8255 	hba->sli.sli4.xrif_count++;
8256 
8257 	if (lock) {
8258 		mutex_exit(&EMLXS_FCTAB_LOCK);
8259 	}
8260 
8261 	return (0);
8262 
8263 } /* emlxs_sli4_unreserve_xri() */
8264 
8265 
8266 XRIobj_t *
emlxs_sli4_register_xri(emlxs_port_t * port,emlxs_buf_t * sbp,uint16_t xri,uint32_t did)8267 emlxs_sli4_register_xri(emlxs_port_t *port, emlxs_buf_t *sbp, uint16_t xri,
8268     uint32_t did)
8269 {
8270 	emlxs_hba_t *hba = HBA;
8271 	uint16_t	iotag;
8272 	XRIobj_t	*xrip;
8273 	emlxs_node_t	*node;
8274 	RPIobj_t	*rpip;
8275 
8276 	mutex_enter(&EMLXS_FCTAB_LOCK);
8277 
8278 	xrip = sbp->xrip;
8279 	if (!xrip) {
8280 		xrip = emlxs_sli4_find_xri(port, xri);
8281 
8282 		if (!xrip) {
8283 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8284 			    "sli4_register_xri:%d XRI not found.", xri);
8285 
8286 			mutex_exit(&EMLXS_FCTAB_LOCK);
8287 			return (NULL);
8288 		}
8289 	}
8290 
8291 	if ((xrip->state == XRI_STATE_FREE) ||
8292 	    !(xrip->flag & EMLXS_XRI_RESERVED)) {
8293 
8294 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8295 		    "sli4_register_xri:%d Invalid XRI. xrip=%p "
8296 		    "state=%x flag=%x",
8297 		    xrip->XRI, xrip, xrip->state, xrip->flag);
8298 
8299 		mutex_exit(&EMLXS_FCTAB_LOCK);
8300 		return (NULL);
8301 	}
8302 
8303 	iotag = xrip->iotag;
8304 
8305 	if ((!iotag) ||
8306 	    ((hba->fc_table[iotag] != NULL) &&
8307 	    (hba->fc_table[iotag] != STALE_PACKET))) {
8308 
8309 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8310 		    "sli4_register_xri:%d Invalid fc_table entry. "
8311 		    "iotag=%d entry=%p",
8312 		    xrip->XRI, iotag, hba->fc_table[iotag]);
8313 
8314 		mutex_exit(&EMLXS_FCTAB_LOCK);
8315 		return (NULL);
8316 	}
8317 
8318 	hba->fc_table[iotag] = sbp;
8319 	hba->io_count++;
8320 
8321 	sbp->iotag = iotag;
8322 	sbp->xrip = xrip;
8323 
8324 	xrip->flag &= ~EMLXS_XRI_RESERVED;
8325 	xrip->sbp = sbp;
8326 
8327 	/* If we did not have a registered RPI when we reserved */
8328 	/* this exchange, check again now. */
8329 	if (xrip->rpip && (xrip->rpip->RPI == FABRIC_RPI)) {
8330 		node = emlxs_node_find_did(port, did, 1);
8331 		rpip = EMLXS_NODE_TO_RPI(port, node);
8332 
8333 		if (rpip && (rpip->RPI != FABRIC_RPI)) {
8334 			/* Move the XRI to the new RPI */
8335 			xrip->rpip->xri_count--;
8336 			xrip->rpip = rpip;
8337 			rpip->xri_count++;
8338 		}
8339 	}
8340 
8341 	mutex_exit(&EMLXS_FCTAB_LOCK);
8342 
8343 	return (xrip);
8344 
8345 } /* emlxs_sli4_register_xri() */
8346 
8347 
8348 /* Performs both reserve and register functions for XRI */
8349 static XRIobj_t *
emlxs_sli4_alloc_xri(emlxs_port_t * port,emlxs_buf_t * sbp,RPIobj_t * rpip,uint32_t type)8350 emlxs_sli4_alloc_xri(emlxs_port_t *port, emlxs_buf_t *sbp, RPIobj_t *rpip,
8351     uint32_t type)
8352 {
8353 	emlxs_hba_t *hba = HBA;
8354 	XRIobj_t	*xrip;
8355 	uint16_t	iotag;
8356 
8357 	mutex_enter(&EMLXS_FCTAB_LOCK);
8358 
8359 	xrip = hba->sli.sli4.XRIfree_f;
8360 
8361 	if (xrip == (XRIobj_t *)&hba->sli.sli4.XRIfree_f) {
8362 		mutex_exit(&EMLXS_FCTAB_LOCK);
8363 
8364 		return (NULL);
8365 	}
8366 
8367 	/* Get the iotag by registering the packet */
8368 	iotag = xrip->iotag;
8369 
8370 	if ((!iotag) ||
8371 	    ((hba->fc_table[iotag] != NULL) &&
8372 	    (hba->fc_table[iotag] != STALE_PACKET))) {
8373 		/*
8374 		 * No more command slots available, retry later
8375 		 */
8376 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8377 		    "Adapter Busy. Unable to alloc iotag:(0x%x)(%p) type=%d",
8378 		    iotag, hba->fc_table[iotag], type);
8379 
8380 		mutex_exit(&EMLXS_FCTAB_LOCK);
8381 		return (NULL);
8382 	}
8383 
8384 	hba->fc_table[iotag] = sbp;
8385 	hba->io_count++;
8386 
8387 	sbp->iotag = iotag;
8388 	sbp->xrip = xrip;
8389 
8390 	xrip->state = XRI_STATE_ALLOCATED;
8391 	xrip->type = type;
8392 	xrip->flag = 0;
8393 	xrip->sbp = sbp;
8394 
8395 	xrip->rpip = rpip;
8396 	rpip->xri_count++;
8397 
8398 	/* Take it off free list */
8399 	(xrip->_b)->_f = xrip->_f;
8400 	(xrip->_f)->_b = xrip->_b;
8401 	xrip->_f = NULL;
8402 	xrip->_b = NULL;
8403 	hba->sli.sli4.xrif_count--;
8404 
8405 	/* Add it to end of inuse list */
8406 	xrip->_b = hba->sli.sli4.XRIinuse_b;
8407 	hba->sli.sli4.XRIinuse_b->_f = xrip;
8408 	xrip->_f = (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
8409 	hba->sli.sli4.XRIinuse_b = xrip;
8410 	hba->sli.sli4.xria_count++;
8411 
8412 	mutex_exit(&EMLXS_FCTAB_LOCK);
8413 
8414 	return (xrip);
8415 
8416 } /* emlxs_sli4_alloc_xri() */
8417 
8418 
8419 /* EMLXS_FCTAB_LOCK must be held to enter */
8420 extern XRIobj_t *
emlxs_sli4_find_xri(emlxs_port_t * port,uint16_t xri)8421 emlxs_sli4_find_xri(emlxs_port_t *port, uint16_t xri)
8422 {
8423 	emlxs_hba_t *hba = HBA;
8424 	XRIobj_t	*xrip;
8425 
8426 	xrip = (XRIobj_t *)hba->sli.sli4.XRIinuse_f;
8427 	while (xrip != (XRIobj_t *)&hba->sli.sli4.XRIinuse_f) {
8428 		if ((xrip->state >= XRI_STATE_ALLOCATED) &&
8429 		    (xrip->XRI == xri)) {
8430 			return (xrip);
8431 		}
8432 		xrip = xrip->_f;
8433 	}
8434 
8435 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8436 	    "Unable to find XRI x%x", xri);
8437 
8438 	return (NULL);
8439 
8440 } /* emlxs_sli4_find_xri() */
8441 
8442 
8443 
8444 
8445 extern void
emlxs_sli4_free_xri(emlxs_port_t * port,emlxs_buf_t * sbp,XRIobj_t * xrip,uint8_t lock)8446 emlxs_sli4_free_xri(emlxs_port_t *port, emlxs_buf_t *sbp, XRIobj_t *xrip,
8447     uint8_t lock)
8448 {
8449 	emlxs_hba_t *hba = HBA;
8450 
8451 	if (lock) {
8452 		mutex_enter(&EMLXS_FCTAB_LOCK);
8453 	}
8454 
8455 	if (xrip) {
8456 		if (xrip->state == XRI_STATE_FREE) {
8457 			if (lock) {
8458 				mutex_exit(&EMLXS_FCTAB_LOCK);
8459 			}
8460 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8461 			    "Free XRI:%x, Already freed. type=%d",
8462 			    xrip->XRI, xrip->type);
8463 			return;
8464 		}
8465 
8466 		if (xrip->type == EMLXS_XRI_UNSOL_CT_TYPE) {
8467 			(void) emlxs_flush_ct_event(port, xrip->rx_id);
8468 		}
8469 
8470 		if (xrip->iotag &&
8471 		    (hba->fc_table[xrip->iotag] != NULL) &&
8472 		    (hba->fc_table[xrip->iotag] != STALE_PACKET)) {
8473 			hba->fc_table[xrip->iotag] = NULL;
8474 			hba->io_count--;
8475 		}
8476 
8477 		xrip->state = XRI_STATE_FREE;
8478 		xrip->type  = 0;
8479 		xrip->flag  = 0;
8480 
8481 		if (xrip->rpip) {
8482 			xrip->rpip->xri_count--;
8483 			xrip->rpip = NULL;
8484 		}
8485 
8486 		if (xrip->reserved_rpip) {
8487 			xrip->reserved_rpip->xri_count--;
8488 			xrip->reserved_rpip = NULL;
8489 		}
8490 
8491 		/* Take it off inuse list */
8492 		(xrip->_b)->_f = xrip->_f;
8493 		(xrip->_f)->_b = xrip->_b;
8494 		xrip->_f = NULL;
8495 		xrip->_b = NULL;
8496 		hba->sli.sli4.xria_count--;
8497 
8498 		/* Add it to end of free list */
8499 		xrip->_b = hba->sli.sli4.XRIfree_b;
8500 		hba->sli.sli4.XRIfree_b->_f = xrip;
8501 		xrip->_f = (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
8502 		hba->sli.sli4.XRIfree_b = xrip;
8503 		hba->sli.sli4.xrif_count++;
8504 	}
8505 
8506 	if (sbp) {
8507 		if (!(sbp->pkt_flags & PACKET_VALID) ||
8508 		    (sbp->pkt_flags &
8509 		    (PACKET_ULP_OWNED|PACKET_COMPLETED|PACKET_IN_COMPLETION))) {
8510 			if (lock) {
8511 				mutex_exit(&EMLXS_FCTAB_LOCK);
8512 			}
8513 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8514 			    "Free XRI: sbp invalid. sbp=%p flags=%x xri=%d",
8515 			    sbp, sbp->pkt_flags, ((xrip)? xrip->XRI:0));
8516 			return;
8517 		}
8518 
8519 		if (xrip && (xrip->iotag != sbp->iotag)) {
8520 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
8521 			    "sbp/iotag mismatch %p iotag:%d %d", sbp,
8522 			    sbp->iotag, xrip->iotag);
8523 		}
8524 
8525 		if (sbp->iotag) {
8526 			if (sbp == hba->fc_table[sbp->iotag]) {
8527 				hba->fc_table[sbp->iotag] = NULL;
8528 				hba->io_count--;
8529 
8530 				if (sbp->xrip) {
8531 					/* Exchange is still reserved */
8532 					sbp->xrip->flag |= EMLXS_XRI_RESERVED;
8533 				}
8534 			}
8535 			sbp->iotag = 0;
8536 		}
8537 
8538 		if (xrip) {
8539 			sbp->xrip = 0;
8540 		}
8541 
8542 		if (lock) {
8543 			mutex_exit(&EMLXS_FCTAB_LOCK);
8544 		}
8545 
8546 		/* Clean up the sbp */
8547 		mutex_enter(&sbp->mtx);
8548 
8549 		if (sbp->pkt_flags & PACKET_IN_TXQ) {
8550 			sbp->pkt_flags &= ~PACKET_IN_TXQ;
8551 			hba->channel_tx_count--;
8552 		}
8553 
8554 		if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
8555 			sbp->pkt_flags &= ~PACKET_IN_CHIPQ;
8556 		}
8557 
8558 		mutex_exit(&sbp->mtx);
8559 	} else {
8560 		if (lock) {
8561 			mutex_exit(&EMLXS_FCTAB_LOCK);
8562 		}
8563 	}
8564 
8565 } /* emlxs_sli4_free_xri() */
8566 
8567 
8568 static int
emlxs_sli4_post_sgl_pages(emlxs_hba_t * hba,MAILBOXQ * mbq)8569 emlxs_sli4_post_sgl_pages(emlxs_hba_t *hba, MAILBOXQ *mbq)
8570 {
8571 	MAILBOX4	*mb = (MAILBOX4 *)mbq;
8572 	emlxs_port_t	*port = &PPORT;
8573 	XRIobj_t	*xrip;
8574 	MATCHMAP	*mp;
8575 	mbox_req_hdr_t	*hdr_req;
8576 	uint32_t	i;
8577 	uint32_t	cnt;
8578 	uint32_t	xri_cnt;
8579 	uint32_t	j;
8580 	uint32_t	size;
8581 	IOCTL_FCOE_CFG_POST_SGL_PAGES *post_sgl;
8582 
8583 	bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE);
8584 	mbq->bp = NULL;
8585 	mbq->mbox_cmpl = NULL;
8586 
8587 	if ((mp = emlxs_mem_buf_alloc(hba, EMLXS_MAX_NONEMBED_SIZE)) == 0) {
8588 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8589 		    "Unable to POST_SGL. Mailbox cmd=%x  ",
8590 		    mb->mbxCommand);
8591 		return (EIO);
8592 	}
8593 	mbq->nonembed = (void *)mp;
8594 
8595 	/*
8596 	 * Signifies a non embedded command
8597 	 */
8598 	mb->un.varSLIConfig.be.embedded = 0;
8599 	mb->mbxCommand = MBX_SLI_CONFIG;
8600 	mb->mbxOwner = OWN_HOST;
8601 
8602 	hdr_req = (mbox_req_hdr_t *)mp->virt;
8603 	post_sgl =
8604 	    (IOCTL_FCOE_CFG_POST_SGL_PAGES *)(hdr_req + 1);
8605 
8606 	xrip = hba->sli.sli4.XRIp;
8607 
8608 	/* For each extent */
8609 	for (j = 0; j < hba->sli.sli4.XRIExtCount; j++) {
8610 		cnt = hba->sli.sli4.XRIExtSize;
8611 		while (cnt) {
8612 			if (xrip->XRI == 0) {
8613 				cnt--;
8614 				xrip++;
8615 				continue;
8616 			}
8617 
8618 			bzero((void *) hdr_req, mp->size);
8619 			size = mp->size - IOCTL_HEADER_SZ;
8620 
8621 			mb->un.varSLIConfig.be.payload_length =
8622 			    mp->size;
8623 			mb->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
8624 			    IOCTL_SUBSYSTEM_FCOE;
8625 			mb->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
8626 			    FCOE_OPCODE_CFG_POST_SGL_PAGES;
8627 			mb->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
8628 			mb->un.varSLIConfig.be.un_hdr.hdr_req.req_length = size;
8629 
8630 			hdr_req->subsystem = IOCTL_SUBSYSTEM_FCOE;
8631 			hdr_req->opcode = FCOE_OPCODE_CFG_POST_SGL_PAGES;
8632 			hdr_req->timeout = 0;
8633 			hdr_req->req_length = size;
8634 
8635 			post_sgl->params.request.xri_count = 0;
8636 			post_sgl->params.request.xri_start = xrip->XRI;
8637 
8638 			xri_cnt = (size -
8639 			    sizeof (IOCTL_FCOE_CFG_POST_SGL_PAGES)) /
8640 			    sizeof (FCOE_SGL_PAGES);
8641 
8642 			for (i = 0; (i < xri_cnt) && cnt; i++) {
8643 				post_sgl->params.request.xri_count++;
8644 				post_sgl->params.request.pages[i].\
8645 				    sgl_page0.addrLow =
8646 				    PADDR_LO(xrip->SGList->phys);
8647 				post_sgl->params.request.pages[i].\
8648 				    sgl_page0.addrHigh =
8649 				    PADDR_HI(xrip->SGList->phys);
8650 
8651 				cnt--;
8652 				xrip++;
8653 			}
8654 
8655 			if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8656 			    MBX_SUCCESS) {
8657 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8658 				    "Unable to POST_SGL. Mailbox cmd=%x "
8659 				    "status=%x XRI cnt:%d start:%d",
8660 				    mb->mbxCommand, mb->mbxStatus,
8661 				    post_sgl->params.request.xri_count,
8662 				    post_sgl->params.request.xri_start);
8663 				emlxs_mem_buf_free(hba, mp);
8664 				mbq->nonembed = NULL;
8665 				return (EIO);
8666 			}
8667 		}
8668 	}
8669 
8670 	emlxs_mem_buf_free(hba, mp);
8671 	mbq->nonembed = NULL;
8672 	return (0);
8673 
8674 } /* emlxs_sli4_post_sgl_pages() */
8675 
8676 
8677 static int
emlxs_sli4_post_hdr_tmplates(emlxs_hba_t * hba,MAILBOXQ * mbq)8678 emlxs_sli4_post_hdr_tmplates(emlxs_hba_t *hba, MAILBOXQ *mbq)
8679 {
8680 	MAILBOX4	*mb = (MAILBOX4 *)mbq;
8681 	emlxs_port_t	*port = &PPORT;
8682 	uint32_t	j;
8683 	uint32_t	k;
8684 	uint64_t	addr;
8685 	IOCTL_FCOE_POST_HDR_TEMPLATES *post_hdr;
8686 	uint16_t	num_pages;
8687 
8688 	if (!(hba->sli.sli4.param.HDRR)) {
8689 		return (0);
8690 	}
8691 
8692 	/* Bytes per extent */
8693 	j = hba->sli.sli4.RPIExtSize * sizeof (RPIHdrTmplate_t);
8694 
8695 	/* Pages required per extent (page == 4096 bytes) */
8696 	num_pages = (j/4096) + ((j%4096)? 1:0);
8697 
8698 	addr = hba->sli.sli4.HeaderTmplate.phys;
8699 
8700 	/* For each extent */
8701 	for (j = 0; j < hba->sli.sli4.RPIExtCount; j++) {
8702 		bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE);
8703 		mbq->bp = NULL;
8704 		mbq->mbox_cmpl = NULL;
8705 
8706 		/*
8707 		 * Signifies an embedded command
8708 		 */
8709 		mb->un.varSLIConfig.be.embedded = 1;
8710 
8711 		mb->mbxCommand = MBX_SLI_CONFIG;
8712 		mb->mbxOwner = OWN_HOST;
8713 		mb->un.varSLIConfig.be.payload_length =
8714 		    sizeof (IOCTL_FCOE_POST_HDR_TEMPLATES) + IOCTL_HEADER_SZ;
8715 		mb->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
8716 		    IOCTL_SUBSYSTEM_FCOE;
8717 		mb->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
8718 		    FCOE_OPCODE_POST_HDR_TEMPLATES;
8719 		mb->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
8720 		mb->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
8721 		    sizeof (IOCTL_FCOE_POST_HDR_TEMPLATES);
8722 
8723 		post_hdr =
8724 		    (IOCTL_FCOE_POST_HDR_TEMPLATES *)
8725 		    &mb->un.varSLIConfig.payload;
8726 		post_hdr->params.request.num_pages = num_pages;
8727 		post_hdr->params.request.rpi_offset = hba->sli.sli4.RPIBase[j];
8728 
8729 		for (k = 0; k < num_pages; k++) {
8730 			post_hdr->params.request.pages[k].addrLow =
8731 			    PADDR_LO(addr);
8732 			post_hdr->params.request.pages[k].addrHigh =
8733 			    PADDR_HI(addr);
8734 			addr += 4096;
8735 		}
8736 
8737 		if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8738 		    MBX_SUCCESS) {
8739 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8740 			    "Unable to POST_HDR_TEMPLATES. Mailbox cmd=%x "
8741 			    "status=%x ",
8742 			    mb->mbxCommand, mb->mbxStatus);
8743 			return (EIO);
8744 		}
8745 		emlxs_data_dump(port, "POST_HDR", (uint32_t *)mb, 18, 0);
8746 	}
8747 
8748 	return (0);
8749 
8750 } /* emlxs_sli4_post_hdr_tmplates() */
8751 
8752 
8753 static int
emlxs_sli4_create_queues(emlxs_hba_t * hba,MAILBOXQ * mbq)8754 emlxs_sli4_create_queues(emlxs_hba_t *hba, MAILBOXQ *mbq)
8755 {
8756 	MAILBOX4	*mb = (MAILBOX4 *)mbq;
8757 	emlxs_port_t	*port = &PPORT;
8758 	emlxs_config_t	*cfg = &CFG;
8759 	IOCTL_COMMON_EQ_CREATE *eq;
8760 	IOCTL_COMMON_CQ_CREATE *cq;
8761 	IOCTL_FCOE_WQ_CREATE *wq;
8762 	IOCTL_FCOE_RQ_CREATE *rq;
8763 	IOCTL_COMMON_MQ_CREATE *mq;
8764 	IOCTL_COMMON_MQ_CREATE_EXT *mq_ext;
8765 	uint16_t i, j;
8766 	uint16_t num_cq, total_cq;
8767 	uint16_t num_wq, total_wq;
8768 
8769 	/*
8770 	 * The first CQ is reserved for ASYNC events,
8771 	 * the second is reserved for unsol rcv, the rest
8772 	 * correspond to WQs. (WQ0 -> CQ2, WQ1 -> CQ3, ...)
8773 	 */
8774 
8775 	total_cq = 0;
8776 	total_wq = 0;
8777 
8778 	/* Create EQ's */
8779 	for (i = 0; i < hba->intr_count; i++) {
8780 		emlxs_mb_eq_create(hba, mbq, i);
8781 		if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8782 		    MBX_SUCCESS) {
8783 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
8784 			    "Unable to Create EQ %d: Mailbox cmd=%x status=%x ",
8785 			    i, mb->mbxCommand, mb->mbxStatus);
8786 			return (EIO);
8787 		}
8788 		eq = (IOCTL_COMMON_EQ_CREATE *)&mb->un.varSLIConfig.payload;
8789 		hba->sli.sli4.eq[i].qid = eq->params.response.EQId;
8790 		hba->sli.sli4.eq[i].lastwq = total_wq;
8791 		hba->sli.sli4.eq[i].msix_vector = i;
8792 
8793 		emlxs_data_dump(port, "EQ0_CREATE", (uint32_t *)mb, 18, 0);
8794 		num_wq = cfg[CFG_NUM_WQ].current;
8795 		num_cq = num_wq;
8796 		if (i == 0) {
8797 			/* One for RQ handling, one for mbox/event handling */
8798 			num_cq += EMLXS_CQ_OFFSET_WQ;
8799 		}
8800 
8801 		/* Create CQ's */
8802 		for (j = 0; j < num_cq; j++) {
8803 			/* Reuse mbq from previous mbox */
8804 			bzero(mbq, sizeof (MAILBOXQ));
8805 
8806 			hba->sli.sli4.cq[total_cq].eqid =
8807 			    hba->sli.sli4.eq[i].qid;
8808 
8809 			emlxs_mb_cq_create(hba, mbq, total_cq);
8810 			if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8811 			    MBX_SUCCESS) {
8812 				EMLXS_MSGF(EMLXS_CONTEXT,
8813 				    &emlxs_init_failed_msg, "Unable to Create "
8814 				    "CQ %d: hba=%p Mailbox cmd=%x status=%x ",
8815 				    total_cq, hba, mb->mbxCommand,
8816 				    mb->mbxStatus);
8817 				return (EIO);
8818 			}
8819 			cq = (IOCTL_COMMON_CQ_CREATE *)
8820 			    &mb->un.varSLIConfig.payload;
8821 			hba->sli.sli4.cq[total_cq].qid =
8822 			    cq->params.response.CQId;
8823 
8824 			switch (total_cq) {
8825 			case EMLXS_CQ_MBOX:
8826 				/* First CQ is for async event handling */
8827 				hba->sli.sli4.cq[total_cq].type =
8828 				    EMLXS_CQ_TYPE_GROUP1;
8829 				break;
8830 
8831 			case EMLXS_CQ_RCV:
8832 				/* Second CQ is for unsol receive handling */
8833 				hba->sli.sli4.cq[total_cq].type =
8834 				    EMLXS_CQ_TYPE_GROUP2;
8835 				break;
8836 
8837 			default:
8838 				/* Setup CQ to channel mapping */
8839 				hba->sli.sli4.cq[total_cq].type =
8840 				    EMLXS_CQ_TYPE_GROUP2;
8841 				hba->sli.sli4.cq[total_cq].channelp =
8842 				    &hba->chan[total_cq - EMLXS_CQ_OFFSET_WQ];
8843 				break;
8844 			}
8845 			hba->sli.sli4.cq[total_cq].qe_valid = 1;
8846 			emlxs_data_dump(port, "CQX_CREATE", (uint32_t *)mb,
8847 			    18, 0);
8848 			total_cq++;
8849 		}
8850 
8851 		/* Create WQ's */
8852 		for (j = 0; j < num_wq; j++) {
8853 			/* Reuse mbq from previous mbox */
8854 			bzero(mbq, sizeof (MAILBOXQ));
8855 
8856 			hba->sli.sli4.wq[total_wq].cqid =
8857 			    hba->sli.sli4.cq[total_wq + EMLXS_CQ_OFFSET_WQ].qid;
8858 
8859 			emlxs_mb_wq_create(hba, mbq, total_wq);
8860 			if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8861 			    MBX_SUCCESS) {
8862 				EMLXS_MSGF(EMLXS_CONTEXT,
8863 				    &emlxs_init_failed_msg, "Unable to Create "
8864 				    "WQ %d: Mailbox cmd=%x status=%x ",
8865 				    total_wq, mb->mbxCommand, mb->mbxStatus);
8866 				return (EIO);
8867 			}
8868 			wq = (IOCTL_FCOE_WQ_CREATE *)
8869 			    &mb->un.varSLIConfig.payload;
8870 			hba->sli.sli4.wq[total_wq].qid =
8871 			    wq->params.response.WQId;
8872 
8873 			hba->sli.sli4.wq[total_wq].cqid =
8874 			    hba->sli.sli4.cq[total_wq+EMLXS_CQ_OFFSET_WQ].qid;
8875 			emlxs_data_dump(port, "WQ_CREATE", (uint32_t *)mb,
8876 			    18, 0);
8877 			total_wq++;
8878 		}
8879 		hba->last_msiid = i;
8880 	}
8881 
8882 	/* We assume 1 RQ pair will handle ALL incoming data */
8883 	/* Create RQs */
8884 	for (i = 0; i < EMLXS_MAX_RQS; i++) {
8885 		/* Personalize the RQ */
8886 		switch (i) {
8887 		case 0:
8888 			hba->sli.sli4.rq[i].cqid =
8889 			    hba->sli.sli4.cq[EMLXS_CQ_RCV].qid;
8890 			break;
8891 		case 1:
8892 			hba->sli.sli4.rq[i].cqid =
8893 			    hba->sli.sli4.cq[EMLXS_CQ_RCV].qid;
8894 			break;
8895 		default:
8896 			hba->sli.sli4.rq[i].cqid = 0xffff;
8897 		}
8898 
8899 		/* Reuse mbq from previous mbox */
8900 		bzero(mbq, sizeof (MAILBOXQ));
8901 
8902 		emlxs_mb_rq_create(hba, mbq, i);
8903 		if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8904 		    MBX_SUCCESS) {
8905 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
8906 			    "Unable to Create RQ %d: Mailbox cmd=%x status=%x ",
8907 			    i, mb->mbxCommand, mb->mbxStatus);
8908 			return (EIO);
8909 		}
8910 
8911 		rq = (IOCTL_FCOE_RQ_CREATE *)&mb->un.varSLIConfig.payload;
8912 		hba->sli.sli4.rq[i].qid = rq->params.response.RQId;
8913 		emlxs_data_dump(port, "RQ CREATE", (uint32_t *)mb, 18, 0);
8914 
8915 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8916 		    "RQ CREATE: rq[%d].qid=%d cqid=%d",
8917 		    i, hba->sli.sli4.rq[i].qid, hba->sli.sli4.rq[i].cqid);
8918 
8919 		/* Initialize the host_index */
8920 		hba->sli.sli4.rq[i].host_index = 0;
8921 
8922 		/* If Data queue was just created, */
8923 		/* then post buffers using the header qid */
8924 		if ((i & 0x1)) {
8925 			/* Ring the RQ doorbell to post buffers */
8926 
8927 			emlxs_sli4_write_rqdb(hba, hba->sli.sli4.rq[i-1].qid,
8928 			    RQB_COUNT);
8929 
8930 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8931 			    "RQ CREATE: Doorbell rang: qid=%d count=%d",
8932 			    hba->sli.sli4.rq[i-1].qid, RQB_COUNT);
8933 		}
8934 	}
8935 
8936 	/* Create MQ */
8937 
8938 	/* Personalize the MQ */
8939 	hba->sli.sli4.mq.cqid = hba->sli.sli4.cq[EMLXS_CQ_MBOX].qid;
8940 
8941 	/* Reuse mbq from previous mbox */
8942 	bzero(mbq, sizeof (MAILBOXQ));
8943 
8944 	emlxs_mb_mq_create_ext(hba, mbq);
8945 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8946 	    MBX_SUCCESS) {
8947 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
8948 		    "Unable to Create MQ_EXT %d: Mailbox cmd=%x status=%x ",
8949 		    i, mb->mbxCommand, mb->mbxStatus);
8950 
8951 		/* Reuse mbq from previous mbox */
8952 		bzero(mbq, sizeof (MAILBOXQ));
8953 
8954 		emlxs_mb_mq_create(hba, mbq);
8955 		if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8956 		    MBX_SUCCESS) {
8957 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
8958 			    "Unable to Create MQ %d: Mailbox cmd=%x status=%x ",
8959 			    i, mb->mbxCommand, mb->mbxStatus);
8960 			return (EIO);
8961 		}
8962 
8963 		mq = (IOCTL_COMMON_MQ_CREATE *)&mb->un.varSLIConfig.payload;
8964 		hba->sli.sli4.mq.qid = mq->params.response.MQId;
8965 		return (0);
8966 	}
8967 
8968 	mq_ext = (IOCTL_COMMON_MQ_CREATE_EXT *)&mb->un.varSLIConfig.payload;
8969 	hba->sli.sli4.mq.qid = mq_ext->params.response.MQId;
8970 	return (0);
8971 
8972 } /* emlxs_sli4_create_queues() */
8973 
8974 
8975 extern void
emlxs_sli4_timer(emlxs_hba_t * hba)8976 emlxs_sli4_timer(emlxs_hba_t *hba)
8977 {
8978 	/* Perform SLI4 level timer checks */
8979 
8980 	emlxs_fcf_timer_notify(hba);
8981 
8982 	emlxs_sli4_timer_check_mbox(hba);
8983 
8984 	return;
8985 
8986 } /* emlxs_sli4_timer() */
8987 
8988 
8989 static void
emlxs_sli4_timer_check_mbox(emlxs_hba_t * hba)8990 emlxs_sli4_timer_check_mbox(emlxs_hba_t *hba)
8991 {
8992 	emlxs_port_t *port = &PPORT;
8993 	emlxs_config_t *cfg = &CFG;
8994 	MAILBOX *mb = NULL;
8995 
8996 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
8997 		return;
8998 	}
8999 
9000 	mutex_enter(&EMLXS_PORT_LOCK);
9001 
9002 	/* Return if timer hasn't expired */
9003 	if (!hba->mbox_timer || (hba->timer_tics < hba->mbox_timer)) {
9004 		mutex_exit(&EMLXS_PORT_LOCK);
9005 		return;
9006 	}
9007 
9008 	/* The first to service the mbox queue will clear the timer */
9009 	hba->mbox_timer = 0;
9010 
9011 	if (hba->mbox_queue_flag) {
9012 		if (hba->mbox_mbq) {
9013 			mb = (MAILBOX *)hba->mbox_mbq;
9014 		}
9015 	}
9016 
9017 	if (mb) {
9018 		switch (hba->mbox_queue_flag) {
9019 		case MBX_NOWAIT:
9020 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
9021 			    "%s: Nowait.",
9022 			    emlxs_mb_cmd_xlate(mb->mbxCommand));
9023 			break;
9024 
9025 		case MBX_SLEEP:
9026 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
9027 			    "%s: mb=%p Sleep.",
9028 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
9029 			    mb);
9030 			break;
9031 
9032 		case MBX_POLL:
9033 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
9034 			    "%s: mb=%p Polled.",
9035 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
9036 			    mb);
9037 			break;
9038 
9039 		default:
9040 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
9041 			    "%s: mb=%p (%d).",
9042 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
9043 			    mb, hba->mbox_queue_flag);
9044 			break;
9045 		}
9046 	} else {
9047 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg, NULL);
9048 	}
9049 
9050 	hba->flag |= FC_MBOX_TIMEOUT;
9051 	EMLXS_STATE_CHANGE_LOCKED(hba, FC_ERROR);
9052 
9053 	mutex_exit(&EMLXS_PORT_LOCK);
9054 
9055 	/* Perform mailbox cleanup */
9056 	/* This will wake any sleeping or polling threads */
9057 	emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
9058 
9059 	/* Trigger adapter shutdown */
9060 	emlxs_thread_spawn(hba, emlxs_shutdown_thread, 0, 0);
9061 
9062 	return;
9063 
9064 } /* emlxs_sli4_timer_check_mbox() */
9065 
9066 static void
emlxs_sli4_gpio_timer_start(emlxs_hba_t * hba)9067 emlxs_sli4_gpio_timer_start(emlxs_hba_t *hba)
9068 {
9069 	mutex_enter(&hba->gpio_lock);
9070 
9071 	if (!hba->gpio_timer) {
9072 		hba->gpio_timer = timeout(emlxs_sli4_gpio_timer, (void *)hba,
9073 		    drv_usectohz(100000));
9074 	}
9075 
9076 	mutex_exit(&hba->gpio_lock);
9077 
9078 } /* emlxs_sli4_gpio_timer_start() */
9079 
9080 static void
emlxs_sli4_gpio_timer_stop(emlxs_hba_t * hba)9081 emlxs_sli4_gpio_timer_stop(emlxs_hba_t *hba)
9082 {
9083 	mutex_enter(&hba->gpio_lock);
9084 
9085 	if (hba->gpio_timer) {
9086 		(void) untimeout(hba->gpio_timer);
9087 		hba->gpio_timer = 0;
9088 	}
9089 
9090 	mutex_exit(&hba->gpio_lock);
9091 
9092 	delay(drv_usectohz(300000));
9093 } /* emlxs_sli4_gpio_timer_stop() */
9094 
9095 static void
emlxs_sli4_gpio_timer(void * arg)9096 emlxs_sli4_gpio_timer(void *arg)
9097 {
9098 	emlxs_hba_t *hba = (emlxs_hba_t *)arg;
9099 
9100 	mutex_enter(&hba->gpio_lock);
9101 
9102 	if (hba->gpio_timer) {
9103 		emlxs_sli4_check_gpio(hba);
9104 		hba->gpio_timer = timeout(emlxs_sli4_gpio_timer, (void *)hba,
9105 		    drv_usectohz(100000));
9106 	}
9107 
9108 	mutex_exit(&hba->gpio_lock);
9109 } /* emlxs_sli4_gpio_timer() */
9110 
9111 static void
emlxs_sli4_check_gpio(emlxs_hba_t * hba)9112 emlxs_sli4_check_gpio(emlxs_hba_t *hba)
9113 {
9114 	hba->gpio_desired = 0;
9115 
9116 	if (hba->flag & FC_GPIO_LINK_UP) {
9117 		if (hba->io_active)
9118 			hba->gpio_desired |= EMLXS_GPIO_ACT;
9119 
9120 		/* This is model specific to ATTO gen5 lancer cards */
9121 
9122 		switch (hba->linkspeed) {
9123 			case LA_4GHZ_LINK:
9124 				hba->gpio_desired |= EMLXS_GPIO_LO;
9125 				break;
9126 
9127 			case LA_8GHZ_LINK:
9128 				hba->gpio_desired |= EMLXS_GPIO_HI;
9129 				break;
9130 
9131 			case LA_16GHZ_LINK:
9132 				hba->gpio_desired |=
9133 				    EMLXS_GPIO_LO | EMLXS_GPIO_HI;
9134 				break;
9135 		}
9136 	}
9137 
9138 	if (hba->gpio_current != hba->gpio_desired) {
9139 		emlxs_port_t *port = &PPORT;
9140 		uint8_t pin;
9141 		uint8_t pinval;
9142 		MAILBOXQ *mbq;
9143 		uint32_t rval;
9144 
9145 		if (!emlxs_sli4_fix_gpio(hba, &pin, &pinval))
9146 			return;
9147 
9148 		if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX)) == NULL) {
9149 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9150 			    "Unable to allocate GPIO mailbox.");
9151 
9152 			hba->gpio_bit = 0;
9153 			return;
9154 		}
9155 
9156 		emlxs_mb_gpio_write(hba, mbq, pin, pinval);
9157 		mbq->mbox_cmpl = emlxs_sli4_fix_gpio_mbcmpl;
9158 
9159 		rval = emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
9160 
9161 		if ((rval != MBX_BUSY) && (rval != MBX_SUCCESS)) {
9162 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9163 			    "Unable to start GPIO mailbox.");
9164 
9165 			hba->gpio_bit = 0;
9166 			emlxs_mem_put(hba, MEM_MBOX, mbq);
9167 			return;
9168 		}
9169 	}
9170 } /* emlxs_sli4_check_gpio */
9171 
9172 static uint32_t
emlxs_sli4_fix_gpio(emlxs_hba_t * hba,uint8_t * pin,uint8_t * pinval)9173 emlxs_sli4_fix_gpio(emlxs_hba_t *hba, uint8_t *pin, uint8_t *pinval)
9174 {
9175 	uint8_t dif = hba->gpio_desired ^ hba->gpio_current;
9176 	uint8_t bit;
9177 	uint8_t i;
9178 
9179 	/* Get out if no pins to set a GPIO request is pending */
9180 
9181 	if (dif == 0 || hba->gpio_bit)
9182 		return (0);
9183 
9184 	/* Fix one pin at a time */
9185 
9186 	bit = dif & -dif;
9187 	hba->gpio_bit = bit;
9188 	dif = hba->gpio_current ^ bit;
9189 
9190 	for (i = EMLXS_GPIO_PIN_LO; bit > 1; ++i) {
9191 		dif >>= 1;
9192 		bit >>= 1;
9193 	}
9194 
9195 	/* Pins are active low so invert the bit value */
9196 
9197 	*pin = hba->gpio_pin[i];
9198 	*pinval = ~dif & bit;
9199 
9200 	return (1);
9201 } /* emlxs_sli4_fix_gpio */
9202 
9203 static uint32_t
emlxs_sli4_fix_gpio_mbcmpl(emlxs_hba_t * hba,MAILBOXQ * mbq)9204 emlxs_sli4_fix_gpio_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq)
9205 {
9206 	MAILBOX *mb;
9207 	uint8_t pin;
9208 	uint8_t pinval;
9209 
9210 	mb = (MAILBOX *)mbq;
9211 
9212 	mutex_enter(&hba->gpio_lock);
9213 
9214 	if (mb->mbxStatus == 0)
9215 		hba->gpio_current ^= hba->gpio_bit;
9216 
9217 	hba->gpio_bit = 0;
9218 
9219 	if (emlxs_sli4_fix_gpio(hba, &pin, &pinval)) {
9220 		emlxs_port_t *port = &PPORT;
9221 		MAILBOXQ *mbq;
9222 		uint32_t rval;
9223 
9224 		/*
9225 		 * We're not using the mb_retry routine here because for some
9226 		 * reason it doesn't preserve the completion routine. Just let
9227 		 * this mbox cmd fail to start here and run when the mailbox
9228 		 * is no longer busy.
9229 		 */
9230 
9231 		if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX)) == NULL) {
9232 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9233 			    "Unable to allocate GPIO mailbox.");
9234 
9235 			hba->gpio_bit = 0;
9236 			goto done;
9237 		}
9238 
9239 		emlxs_mb_gpio_write(hba, mbq, pin, pinval);
9240 		mbq->mbox_cmpl = emlxs_sli4_fix_gpio_mbcmpl;
9241 
9242 		rval = emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
9243 
9244 		if ((rval != MBX_BUSY) && (rval != MBX_SUCCESS)) {
9245 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9246 			    "Unable to start GPIO mailbox.");
9247 
9248 			hba->gpio_bit = 0;
9249 			emlxs_mem_put(hba, MEM_MBOX, mbq);
9250 			goto done;
9251 		}
9252 	}
9253 
9254 done:
9255 	mutex_exit(&hba->gpio_lock);
9256 
9257 	return (0);
9258 }
9259 
9260 extern void
emlxs_data_dump(emlxs_port_t * port,char * str,uint32_t * iptr,int cnt,int err)9261 emlxs_data_dump(emlxs_port_t *port, char *str, uint32_t *iptr, int cnt, int err)
9262 {
9263 	void *msg;
9264 
9265 	if (!port || !str || !iptr || !cnt) {
9266 		return;
9267 	}
9268 
9269 	if (err) {
9270 		msg = &emlxs_sli_err_msg;
9271 	} else {
9272 		msg = &emlxs_sli_detail_msg;
9273 	}
9274 
9275 	if (cnt) {
9276 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
9277 		    "%s00: %08x %08x %08x %08x %08x %08x", str, *iptr,
9278 		    *(iptr+1), *(iptr+2), *(iptr+3), *(iptr+4), *(iptr+5));
9279 	}
9280 	if (cnt > 6) {
9281 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
9282 		    "%s06: %08x %08x %08x %08x %08x %08x", str, *(iptr+6),
9283 		    *(iptr+7), *(iptr+8), *(iptr+9), *(iptr+10), *(iptr+11));
9284 	}
9285 	if (cnt > 12) {
9286 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
9287 		    "%s12: %08x %08x %08x %08x %08x %08x", str, *(iptr+12),
9288 		    *(iptr+13), *(iptr+14), *(iptr+15), *(iptr+16), *(iptr+17));
9289 	}
9290 	if (cnt > 18) {
9291 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
9292 		    "%s18: %08x %08x %08x %08x %08x %08x", str, *(iptr+18),
9293 		    *(iptr+19), *(iptr+20), *(iptr+21), *(iptr+22), *(iptr+23));
9294 	}
9295 	if (cnt > 24) {
9296 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
9297 		    "%s24: %08x %08x %08x %08x %08x %08x", str, *(iptr+24),
9298 		    *(iptr+25), *(iptr+26), *(iptr+27), *(iptr+28), *(iptr+29));
9299 	}
9300 	if (cnt > 30) {
9301 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
9302 		    "%s30: %08x %08x %08x %08x %08x %08x", str, *(iptr+30),
9303 		    *(iptr+31), *(iptr+32), *(iptr+33), *(iptr+34), *(iptr+35));
9304 	}
9305 	if (cnt > 36) {
9306 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
9307 		    "%s36: %08x %08x %08x %08x %08x %08x", str, *(iptr+36),
9308 		    *(iptr+37), *(iptr+38), *(iptr+39), *(iptr+40), *(iptr+41));
9309 	}
9310 
9311 } /* emlxs_data_dump() */
9312 
9313 
9314 extern void
emlxs_ue_dump(emlxs_hba_t * hba,char * str)9315 emlxs_ue_dump(emlxs_hba_t *hba, char *str)
9316 {
9317 	emlxs_port_t *port = &PPORT;
9318 	uint32_t status;
9319 	uint32_t ue_h;
9320 	uint32_t ue_l;
9321 	uint32_t on1;
9322 	uint32_t on2;
9323 
9324 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
9325 	case SLI_INTF_IF_TYPE_0:
9326 		ue_l = ddi_get32(hba->pci_acc_handle,
9327 		    hba->sli.sli4.ERR1_reg_addr);
9328 		ue_h = ddi_get32(hba->pci_acc_handle,
9329 		    hba->sli.sli4.ERR2_reg_addr);
9330 
9331 		on1 = ddi_get32(hba->pci_acc_handle,
9332 		    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_ONLINE1));
9333 		on2 = ddi_get32(hba->pci_acc_handle,
9334 		    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_ONLINE2));
9335 
9336 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
9337 		    "%s: ueLow:%08x ueHigh:%08x on1:%08x on2:%08x", str,
9338 		    ue_l, ue_h, on1, on2);
9339 		break;
9340 
9341 	case SLI_INTF_IF_TYPE_2:
9342 	case SLI_INTF_IF_TYPE_6:
9343 		status = ddi_get32(hba->sli.sli4.bar0_acc_handle,
9344 		    hba->sli.sli4.STATUS_reg_addr);
9345 
9346 		ue_l = ddi_get32(hba->sli.sli4.bar0_acc_handle,
9347 		    hba->sli.sli4.ERR1_reg_addr);
9348 		ue_h = ddi_get32(hba->sli.sli4.bar0_acc_handle,
9349 		    hba->sli.sli4.ERR2_reg_addr);
9350 
9351 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
9352 		    "%s: status:%08x err1:%08x err2:%08x", str,
9353 		    status, ue_l, ue_h);
9354 
9355 		break;
9356 	}
9357 
9358 #ifdef FMA_SUPPORT
9359 	/* Access handle validation */
9360 	EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle);
9361 #endif  /* FMA_SUPPORT */
9362 
9363 } /* emlxs_ue_dump() */
9364 
9365 
9366 static void
emlxs_sli4_poll_erratt(emlxs_hba_t * hba)9367 emlxs_sli4_poll_erratt(emlxs_hba_t *hba)
9368 {
9369 	emlxs_port_t *port = &PPORT;
9370 	uint32_t status;
9371 	uint32_t ue_h;
9372 	uint32_t ue_l;
9373 	uint32_t error = 0;
9374 
9375 	if (hba->flag & FC_HARDWARE_ERROR) {
9376 		return;
9377 	}
9378 
9379 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
9380 	case SLI_INTF_IF_TYPE_0:
9381 		ue_l = ddi_get32(hba->pci_acc_handle,
9382 		    hba->sli.sli4.ERR1_reg_addr);
9383 		ue_h = ddi_get32(hba->pci_acc_handle,
9384 		    hba->sli.sli4.ERR2_reg_addr);
9385 
9386 		if ((~hba->sli.sli4.ue_mask_lo & ue_l) ||
9387 		    (~hba->sli.sli4.ue_mask_hi & ue_h) ||
9388 		    (hba->sli.sli4.flag & EMLXS_SLI4_HW_ERROR)) {
9389 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
9390 			    "Host Error: ueLow:%08x ueHigh:%08x maskLow:%08x "
9391 			    "maskHigh:%08x flag:%08x",
9392 			    ue_l, ue_h, hba->sli.sli4.ue_mask_lo,
9393 			    hba->sli.sli4.ue_mask_hi, hba->sli.sli4.flag);
9394 
9395 			error = 2;
9396 		}
9397 		break;
9398 
9399 	case SLI_INTF_IF_TYPE_2:
9400 	case SLI_INTF_IF_TYPE_6:
9401 		status = ddi_get32(hba->sli.sli4.bar0_acc_handle,
9402 		    hba->sli.sli4.STATUS_reg_addr);
9403 
9404 		if ((status & SLI_STATUS_ERROR) ||
9405 		    (hba->sli.sli4.flag & EMLXS_SLI4_HW_ERROR)) {
9406 			ue_l = ddi_get32(hba->sli.sli4.bar0_acc_handle,
9407 			    hba->sli.sli4.ERR1_reg_addr);
9408 			ue_h = ddi_get32(hba->sli.sli4.bar0_acc_handle,
9409 			    hba->sli.sli4.ERR2_reg_addr);
9410 
9411 			error = (status & SLI_STATUS_RESET_NEEDED)? 1:2;
9412 
9413 			if (error == 1) {
9414 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
9415 				    "Host Error: status:%08x err1:%08x "
9416 				    "err2:%08x flag:%08x reset",
9417 				    status, ue_l, ue_h, hba->sli.sli4.flag);
9418 			} else {
9419 				EMLXS_MSGF(EMLXS_CONTEXT,
9420 				    &emlxs_hardware_error_msg,
9421 				    "Host Error: status:%08x err1:%08x "
9422 				    "err2:%08x flag:%08x shutdown",
9423 				    status, ue_l, ue_h, hba->sli.sli4.flag);
9424 			}
9425 		}
9426 		break;
9427 	}
9428 
9429 	if (error == 2) {
9430 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
9431 
9432 		emlxs_sli4_hba_flush_chipq(hba);
9433 
9434 		emlxs_thread_spawn(hba, emlxs_shutdown_thread, 0, 0);
9435 
9436 	} else if (error == 1) {
9437 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
9438 
9439 		emlxs_sli4_hba_flush_chipq(hba);
9440 
9441 		emlxs_thread_spawn(hba, emlxs_restart_thread, 0, 0);
9442 	}
9443 
9444 #ifdef FMA_SUPPORT
9445 	/* Access handle validation */
9446 	EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle);
9447 #endif  /* FMA_SUPPORT */
9448 
9449 } /* emlxs_sli4_poll_erratt() */
9450 
9451 
9452 static uint32_t
emlxs_sli4_reg_did(emlxs_port_t * port,uint32_t did,SERV_PARM * param,emlxs_buf_t * sbp,fc_unsol_buf_t * ubp,IOCBQ * iocbq)9453 emlxs_sli4_reg_did(emlxs_port_t *port, uint32_t did, SERV_PARM *param,
9454     emlxs_buf_t *sbp, fc_unsol_buf_t *ubp, IOCBQ *iocbq)
9455 {
9456 	emlxs_hba_t	*hba = HBA;
9457 	NODELIST	*node;
9458 	RPIobj_t	*rpip;
9459 	uint32_t	rval;
9460 
9461 	/* Check for invalid node ids to register */
9462 	if ((did == 0) && (!(hba->flag & FC_LOOPBACK_MODE))) {
9463 		return (1);
9464 	}
9465 
9466 	if (did & 0xff000000) {
9467 		return (1);
9468 	}
9469 
9470 	/* We don't register our own did */
9471 	if ((did == port->did) && (!(hba->flag & FC_LOOPBACK_MODE))) {
9472 		return (1);
9473 	}
9474 
9475 	if (did != FABRIC_DID) {
9476 		if ((rval = emlxs_mb_check_sparm(hba, param))) {
9477 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg,
9478 			    "Invalid service parameters. did=%06x rval=%d", did,
9479 			    rval);
9480 
9481 			return (1);
9482 		}
9483 	}
9484 
9485 	/* Check if the node limit has been reached */
9486 	if (port->node_count >= hba->max_nodes) {
9487 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg,
9488 		    "Limit reached. did=%06x count=%d", did,
9489 		    port->node_count);
9490 
9491 		return (1);
9492 	}
9493 
9494 	node = emlxs_node_find_did(port, did, 1);
9495 	rpip = EMLXS_NODE_TO_RPI(port, node);
9496 
9497 	rval = emlxs_rpi_online_notify(port, rpip, did, param, (void *)sbp,
9498 	    (void *)ubp, (void *)iocbq);
9499 
9500 	return (rval);
9501 
9502 } /* emlxs_sli4_reg_did() */
9503 
9504 
9505 static uint32_t
emlxs_sli4_unreg_node(emlxs_port_t * port,emlxs_node_t * node,emlxs_buf_t * sbp,fc_unsol_buf_t * ubp,IOCBQ * iocbq)9506 emlxs_sli4_unreg_node(emlxs_port_t *port, emlxs_node_t *node,
9507     emlxs_buf_t *sbp, fc_unsol_buf_t *ubp, IOCBQ *iocbq)
9508 {
9509 	RPIobj_t	*rpip;
9510 	uint32_t	rval;
9511 
9512 	if (!node) {
9513 		/* Unreg all nodes */
9514 		(void) emlxs_sli4_unreg_all_nodes(port);
9515 		return (1);
9516 	}
9517 
9518 	/* Check for base node */
9519 	if (node == &port->node_base) {
9520 		/* Just flush base node */
9521 		(void) emlxs_tx_node_flush(port, &port->node_base,
9522 		    0, 0, 0);
9523 
9524 		(void) emlxs_chipq_node_flush(port, 0,
9525 		    &port->node_base, 0);
9526 
9527 		port->did = 0;
9528 
9529 		/* Return now */
9530 		return (1);
9531 	}
9532 
9533 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
9534 	    "unreg_node:%p did=%x rpi=%d",
9535 	    node, node->nlp_DID, node->nlp_Rpi);
9536 
9537 	rpip = EMLXS_NODE_TO_RPI(port, node);
9538 
9539 	if (!rpip) {
9540 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
9541 		    "unreg_node:%p did=%x rpi=%d. RPI not found.",
9542 		    node, node->nlp_DID, node->nlp_Rpi);
9543 
9544 		emlxs_node_rm(port, node);
9545 		return (1);
9546 	}
9547 
9548 	rval = emlxs_rpi_offline_notify(port, rpip, (void *)sbp, (void *)ubp,
9549 	    (void *)iocbq);
9550 
9551 	return (rval);
9552 
9553 } /* emlxs_sli4_unreg_node() */
9554 
9555 
9556 extern uint32_t
emlxs_sli4_unreg_all_nodes(emlxs_port_t * port)9557 emlxs_sli4_unreg_all_nodes(emlxs_port_t *port)
9558 {
9559 	NODELIST	*nlp;
9560 	int		i;
9561 	uint32_t	found;
9562 
9563 	/* Set the node tags */
9564 	/* We will process all nodes with this tag */
9565 	rw_enter(&port->node_rwlock, RW_READER);
9566 	found = 0;
9567 	for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
9568 		nlp = port->node_table[i];
9569 		while (nlp != NULL) {
9570 			found = 1;
9571 			nlp->nlp_tag = 1;
9572 			nlp = nlp->nlp_list_next;
9573 		}
9574 	}
9575 	rw_exit(&port->node_rwlock);
9576 
9577 	if (!found) {
9578 		return (0);
9579 	}
9580 
9581 	for (;;) {
9582 		rw_enter(&port->node_rwlock, RW_READER);
9583 		found = 0;
9584 		for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
9585 			nlp = port->node_table[i];
9586 			while (nlp != NULL) {
9587 				if (!nlp->nlp_tag) {
9588 					nlp = nlp->nlp_list_next;
9589 					continue;
9590 				}
9591 				nlp->nlp_tag = 0;
9592 				found = 1;
9593 				break;
9594 			}
9595 
9596 			if (found) {
9597 				break;
9598 			}
9599 		}
9600 		rw_exit(&port->node_rwlock);
9601 
9602 		if (!found) {
9603 			break;
9604 		}
9605 
9606 		(void) emlxs_sli4_unreg_node(port, nlp, 0, 0, 0);
9607 	}
9608 
9609 	return (0);
9610 
9611 } /* emlxs_sli4_unreg_all_nodes() */
9612 
9613 
9614 static void
emlxs_sli4_handle_fcoe_link_event(emlxs_hba_t * hba,CQE_ASYNC_t * cqe)9615 emlxs_sli4_handle_fcoe_link_event(emlxs_hba_t *hba, CQE_ASYNC_t *cqe)
9616 {
9617 	emlxs_port_t *port = &PPORT;
9618 
9619 	/* Handle link down */
9620 	if ((cqe->un.link.link_status == ASYNC_EVENT_LOGICAL_LINK_DOWN) ||
9621 	    (cqe->un.link.link_status == ASYNC_EVENT_PHYS_LINK_DOWN)) {
9622 		(void) emlxs_fcf_linkdown_notify(port);
9623 
9624 		mutex_enter(&EMLXS_PORT_LOCK);
9625 		hba->sli.sli4.flag &= ~EMLXS_SLI4_DOWN_LINK;
9626 		mutex_exit(&EMLXS_PORT_LOCK);
9627 		return;
9628 	}
9629 
9630 	/* Link is up */
9631 
9632 	/* Set linkspeed */
9633 	switch (cqe->un.link.port_speed) {
9634 	case PHY_1GHZ_LINK:
9635 		hba->linkspeed = LA_1GHZ_LINK;
9636 		break;
9637 	case PHY_10GHZ_LINK:
9638 		hba->linkspeed = LA_10GHZ_LINK;
9639 		break;
9640 	default:
9641 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
9642 		    "sli4_handle_fcoe_link_event: Unknown link speed=%x.",
9643 		    cqe->un.link.port_speed);
9644 		hba->linkspeed = 0;
9645 		break;
9646 	}
9647 
9648 	/* Set qos_linkspeed */
9649 	hba->qos_linkspeed = cqe->un.link.qos_link_speed;
9650 
9651 	/* Set topology */
9652 	hba->topology = TOPOLOGY_PT_PT;
9653 
9654 	mutex_enter(&EMLXS_PORT_LOCK);
9655 	hba->sli.sli4.flag &= ~EMLXS_SLI4_DOWN_LINK;
9656 	mutex_exit(&EMLXS_PORT_LOCK);
9657 
9658 	(void) emlxs_fcf_linkup_notify(port);
9659 
9660 	return;
9661 
9662 } /* emlxs_sli4_handle_fcoe_link_event()  */
9663 
9664 
9665 static void
emlxs_sli4_handle_fc_link_att(emlxs_hba_t * hba,CQE_ASYNC_t * cqe)9666 emlxs_sli4_handle_fc_link_att(emlxs_hba_t *hba, CQE_ASYNC_t *cqe)
9667 {
9668 	emlxs_port_t *port = &PPORT;
9669 
9670 	/* Handle link down */
9671 	if (cqe->un.fc.att_type == ATT_TYPE_LINK_DOWN) {
9672 		(void) emlxs_fcf_linkdown_notify(port);
9673 
9674 		mutex_enter(&EMLXS_PORT_LOCK);
9675 		hba->sli.sli4.flag &= ~EMLXS_SLI4_DOWN_LINK;
9676 		mutex_exit(&EMLXS_PORT_LOCK);
9677 		return;
9678 	}
9679 
9680 	/* Link is up */
9681 
9682 	/* Set linkspeed */
9683 	switch (cqe->un.fc.port_speed) {
9684 	case 1:
9685 		hba->linkspeed = LA_1GHZ_LINK;
9686 		break;
9687 	case 2:
9688 		hba->linkspeed = LA_2GHZ_LINK;
9689 		break;
9690 	case 4:
9691 		hba->linkspeed = LA_4GHZ_LINK;
9692 		break;
9693 	case 8:
9694 		hba->linkspeed = LA_8GHZ_LINK;
9695 		break;
9696 	case 10:
9697 		hba->linkspeed = LA_10GHZ_LINK;
9698 		break;
9699 	case 16:
9700 		hba->linkspeed = LA_16GHZ_LINK;
9701 		break;
9702 	case 32:
9703 		hba->linkspeed = LA_32GHZ_LINK;
9704 		break;
9705 	default:
9706 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
9707 		    "sli4_handle_fc_link_att: Unknown link speed=%x.",
9708 		    cqe->un.fc.port_speed);
9709 		hba->linkspeed = 0;
9710 		break;
9711 	}
9712 
9713 	/* Set qos_linkspeed */
9714 	hba->qos_linkspeed = cqe->un.fc.link_speed;
9715 
9716 	/* Set topology */
9717 	hba->topology = cqe->un.fc.topology;
9718 
9719 	mutex_enter(&EMLXS_PORT_LOCK);
9720 	hba->sli.sli4.flag &= ~EMLXS_SLI4_DOWN_LINK;
9721 	mutex_exit(&EMLXS_PORT_LOCK);
9722 
9723 	(void) emlxs_fcf_linkup_notify(port);
9724 
9725 	return;
9726 
9727 } /* emlxs_sli4_handle_fc_link_att() */
9728 
9729 
9730 static int
emlxs_sli4_init_extents(emlxs_hba_t * hba,MAILBOXQ * mbq)9731 emlxs_sli4_init_extents(emlxs_hba_t *hba, MAILBOXQ *mbq)
9732 {
9733 	emlxs_port_t *port = &PPORT;
9734 	MAILBOX4 *mb4;
9735 	IOCTL_COMMON_EXTENTS *ep;
9736 	uint32_t i;
9737 	uint32_t ExtentCnt;
9738 
9739 	if (!(hba->sli.sli4.param.EXT)) {
9740 		return (0);
9741 	}
9742 
9743 	mb4 = (MAILBOX4 *) mbq;
9744 
9745 	/* Discover XRI Extents */
9746 	bzero(mbq, sizeof (MAILBOXQ));
9747 	emlxs_mb_get_extents_info(hba, mbq, RSC_TYPE_FCOE_XRI);
9748 
9749 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9750 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9751 		    "Unable to discover XRI extents.  Mailbox cmd=%x status=%x",
9752 		    mb4->mbxCommand, mb4->mbxStatus);
9753 
9754 		return (EIO);
9755 	}
9756 
9757 	ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9758 	hba->sli.sli4.XRIExtSize = ep->params.response.ExtentSize;
9759 	ExtentCnt = ep->params.response.ExtentCnt;
9760 
9761 	/* Allocate XRI Extents */
9762 	bzero(mbq, sizeof (MAILBOXQ));
9763 	emlxs_mb_alloc_extents(hba, mbq, RSC_TYPE_FCOE_XRI, ExtentCnt);
9764 
9765 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9766 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9767 		    "Unable to allocate XRI extents.  Mailbox cmd=%x status=%x",
9768 		    mb4->mbxCommand, mb4->mbxStatus);
9769 
9770 		return (EIO);
9771 	}
9772 	ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9773 
9774 	bcopy((uint8_t *)ep->params.response.RscId,
9775 	    (uint8_t *)hba->sli.sli4.XRIBase,
9776 	    (ep->params.response.ExtentCnt * sizeof (uint16_t)));
9777 
9778 	hba->sli.sli4.XRIExtCount = ep->params.response.ExtentCnt;
9779 	hba->sli.sli4.XRICount = hba->sli.sli4.XRIExtCount *
9780 	    hba->sli.sli4.XRIExtSize;
9781 
9782 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9783 	    "XRI Ext: size=%d cnt=%d/%d",
9784 	    hba->sli.sli4.XRIExtSize,
9785 	    hba->sli.sli4.XRIExtCount, ExtentCnt);
9786 
9787 	for (i = 0; i < ep->params.response.ExtentCnt; i += 4) {
9788 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9789 		    "XRI Ext%d: %d, %d, %d, %d", i,
9790 		    hba->sli.sli4.XRIBase[i],
9791 		    hba->sli.sli4.XRIBase[i+1],
9792 		    hba->sli.sli4.XRIBase[i+2],
9793 		    hba->sli.sli4.XRIBase[i+3]);
9794 	}
9795 
9796 
9797 	/* Discover RPI Extents */
9798 	bzero(mbq, sizeof (MAILBOXQ));
9799 	emlxs_mb_get_extents_info(hba, mbq, RSC_TYPE_FCOE_RPI);
9800 
9801 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9802 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9803 		    "Unable to discover RPI extents.  Mailbox cmd=%x status=%x",
9804 		    mb4->mbxCommand, mb4->mbxStatus);
9805 
9806 		return (EIO);
9807 	}
9808 
9809 	ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9810 	hba->sli.sli4.RPIExtSize = ep->params.response.ExtentSize;
9811 	ExtentCnt = ep->params.response.ExtentCnt;
9812 
9813 	/* Allocate RPI Extents */
9814 	bzero(mbq, sizeof (MAILBOXQ));
9815 	emlxs_mb_alloc_extents(hba, mbq, RSC_TYPE_FCOE_RPI, ExtentCnt);
9816 
9817 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9818 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9819 		    "Unable to allocate RPI extents.  Mailbox cmd=%x status=%x",
9820 		    mb4->mbxCommand, mb4->mbxStatus);
9821 
9822 		return (EIO);
9823 	}
9824 	ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9825 
9826 	bcopy((uint8_t *)ep->params.response.RscId,
9827 	    (uint8_t *)hba->sli.sli4.RPIBase,
9828 	    (ep->params.response.ExtentCnt * sizeof (uint16_t)));
9829 
9830 	hba->sli.sli4.RPIExtCount = ep->params.response.ExtentCnt;
9831 	hba->sli.sli4.RPICount = hba->sli.sli4.RPIExtCount *
9832 	    hba->sli.sli4.RPIExtSize;
9833 
9834 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9835 	    "RPI Ext: size=%d cnt=%d/%d",
9836 	    hba->sli.sli4.RPIExtSize,
9837 	    hba->sli.sli4.RPIExtCount, ExtentCnt);
9838 
9839 	for (i = 0; i < ep->params.response.ExtentCnt; i += 4) {
9840 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9841 		    "RPI Ext%d: %d, %d, %d, %d", i,
9842 		    hba->sli.sli4.RPIBase[i],
9843 		    hba->sli.sli4.RPIBase[i+1],
9844 		    hba->sli.sli4.RPIBase[i+2],
9845 		    hba->sli.sli4.RPIBase[i+3]);
9846 	}
9847 
9848 
9849 	/* Discover VPI Extents */
9850 	bzero(mbq, sizeof (MAILBOXQ));
9851 	emlxs_mb_get_extents_info(hba, mbq, RSC_TYPE_FCOE_VPI);
9852 
9853 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9854 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9855 		    "Unable to discover VPI extents.  Mailbox cmd=%x status=%x",
9856 		    mb4->mbxCommand, mb4->mbxStatus);
9857 
9858 		return (EIO);
9859 	}
9860 
9861 	ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9862 	hba->sli.sli4.VPIExtSize = ep->params.response.ExtentSize;
9863 	ExtentCnt = ep->params.response.ExtentCnt;
9864 
9865 	/* Allocate VPI Extents */
9866 	bzero(mbq, sizeof (MAILBOXQ));
9867 	emlxs_mb_alloc_extents(hba, mbq, RSC_TYPE_FCOE_VPI, ExtentCnt);
9868 
9869 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9870 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9871 		    "Unable to allocate VPI extents.  Mailbox cmd=%x status=%x",
9872 		    mb4->mbxCommand, mb4->mbxStatus);
9873 
9874 		return (EIO);
9875 	}
9876 	ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9877 
9878 	bcopy((uint8_t *)ep->params.response.RscId,
9879 	    (uint8_t *)hba->sli.sli4.VPIBase,
9880 	    (ep->params.response.ExtentCnt * sizeof (uint16_t)));
9881 
9882 	hba->sli.sli4.VPIExtCount = ep->params.response.ExtentCnt;
9883 	hba->sli.sli4.VPICount = hba->sli.sli4.VPIExtCount *
9884 	    hba->sli.sli4.VPIExtSize;
9885 
9886 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9887 	    "VPI Ext: size=%d cnt=%d/%d",
9888 	    hba->sli.sli4.VPIExtSize,
9889 	    hba->sli.sli4.VPIExtCount, ExtentCnt);
9890 
9891 	for (i = 0; i < ep->params.response.ExtentCnt; i += 4) {
9892 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9893 		    "VPI Ext%d: %d, %d, %d, %d", i,
9894 		    hba->sli.sli4.VPIBase[i],
9895 		    hba->sli.sli4.VPIBase[i+1],
9896 		    hba->sli.sli4.VPIBase[i+2],
9897 		    hba->sli.sli4.VPIBase[i+3]);
9898 	}
9899 
9900 	/* Discover VFI Extents */
9901 	bzero(mbq, sizeof (MAILBOXQ));
9902 	emlxs_mb_get_extents_info(hba, mbq, RSC_TYPE_FCOE_VFI);
9903 
9904 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9905 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9906 		    "Unable to discover VFI extents.  Mailbox cmd=%x status=%x",
9907 		    mb4->mbxCommand, mb4->mbxStatus);
9908 
9909 		return (EIO);
9910 	}
9911 
9912 	ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9913 	hba->sli.sli4.VFIExtSize = ep->params.response.ExtentSize;
9914 	ExtentCnt = ep->params.response.ExtentCnt;
9915 
9916 	/* Allocate VFI Extents */
9917 	bzero(mbq, sizeof (MAILBOXQ));
9918 	emlxs_mb_alloc_extents(hba, mbq, RSC_TYPE_FCOE_VFI, ExtentCnt);
9919 
9920 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9921 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9922 		    "Unable to allocate VFI extents.  Mailbox cmd=%x status=%x",
9923 		    mb4->mbxCommand, mb4->mbxStatus);
9924 
9925 		return (EIO);
9926 	}
9927 	ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9928 
9929 	bcopy((uint8_t *)ep->params.response.RscId,
9930 	    (uint8_t *)hba->sli.sli4.VFIBase,
9931 	    (ep->params.response.ExtentCnt * sizeof (uint16_t)));
9932 
9933 	hba->sli.sli4.VFIExtCount = ep->params.response.ExtentCnt;
9934 	hba->sli.sli4.VFICount = hba->sli.sli4.VFIExtCount *
9935 	    hba->sli.sli4.VFIExtSize;
9936 
9937 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9938 	    "VFI Ext: size=%d cnt=%d/%d",
9939 	    hba->sli.sli4.VFIExtSize,
9940 	    hba->sli.sli4.VFIExtCount, ExtentCnt);
9941 
9942 	for (i = 0; i < ep->params.response.ExtentCnt; i += 4) {
9943 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9944 		    "VFI Ext%d: %d, %d, %d, %d", i,
9945 		    hba->sli.sli4.VFIBase[i],
9946 		    hba->sli.sli4.VFIBase[i+1],
9947 		    hba->sli.sli4.VFIBase[i+2],
9948 		    hba->sli.sli4.VFIBase[i+3]);
9949 	}
9950 
9951 	return (0);
9952 
9953 } /* emlxs_sli4_init_extents() */
9954 
9955 
9956 extern uint32_t
emlxs_sli4_index_to_rpi(emlxs_hba_t * hba,uint32_t index)9957 emlxs_sli4_index_to_rpi(emlxs_hba_t *hba, uint32_t index)
9958 {
9959 	uint32_t i;
9960 	uint32_t j;
9961 	uint32_t rpi;
9962 
9963 	i = index / hba->sli.sli4.RPIExtSize;
9964 	j = index % hba->sli.sli4.RPIExtSize;
9965 	rpi = hba->sli.sli4.RPIBase[i] + j;
9966 
9967 	return (rpi);
9968 
9969 } /* emlxs_sli4_index_to_rpi */
9970 
9971 
9972 extern uint32_t
emlxs_sli4_rpi_to_index(emlxs_hba_t * hba,uint32_t rpi)9973 emlxs_sli4_rpi_to_index(emlxs_hba_t *hba, uint32_t rpi)
9974 {
9975 	uint32_t i;
9976 	uint32_t lo;
9977 	uint32_t hi;
9978 	uint32_t index = hba->sli.sli4.RPICount;
9979 
9980 	for (i = 0; i < hba->sli.sli4.RPIExtCount; i++) {
9981 		lo = hba->sli.sli4.RPIBase[i];
9982 		hi = lo + hba->sli.sli4.RPIExtSize;
9983 
9984 		if ((rpi < hi) && (rpi >= lo)) {
9985 			index = (i * hba->sli.sli4.RPIExtSize) + (rpi - lo);
9986 			break;
9987 		}
9988 	}
9989 
9990 	return (index);
9991 
9992 } /* emlxs_sli4_rpi_to_index */
9993 
9994 
9995 extern uint32_t
emlxs_sli4_index_to_xri(emlxs_hba_t * hba,uint32_t index)9996 emlxs_sli4_index_to_xri(emlxs_hba_t *hba, uint32_t index)
9997 {
9998 	uint32_t i;
9999 	uint32_t j;
10000 	uint32_t xri;
10001 
10002 	i = index / hba->sli.sli4.XRIExtSize;
10003 	j = index % hba->sli.sli4.XRIExtSize;
10004 	xri = hba->sli.sli4.XRIBase[i] + j;
10005 
10006 	return (xri);
10007 
10008 } /* emlxs_sli4_index_to_xri */
10009 
10010 
10011 
10012 
10013 extern uint32_t
emlxs_sli4_index_to_vpi(emlxs_hba_t * hba,uint32_t index)10014 emlxs_sli4_index_to_vpi(emlxs_hba_t *hba, uint32_t index)
10015 {
10016 	uint32_t i;
10017 	uint32_t j;
10018 	uint32_t vpi;
10019 
10020 	i = index / hba->sli.sli4.VPIExtSize;
10021 	j = index % hba->sli.sli4.VPIExtSize;
10022 	vpi = hba->sli.sli4.VPIBase[i] + j;
10023 
10024 	return (vpi);
10025 
10026 } /* emlxs_sli4_index_to_vpi */
10027 
10028 
10029 extern uint32_t
emlxs_sli4_vpi_to_index(emlxs_hba_t * hba,uint32_t vpi)10030 emlxs_sli4_vpi_to_index(emlxs_hba_t *hba, uint32_t vpi)
10031 {
10032 	uint32_t i;
10033 	uint32_t lo;
10034 	uint32_t hi;
10035 	uint32_t index = hba->sli.sli4.VPICount;
10036 
10037 	for (i = 0; i < hba->sli.sli4.VPIExtCount; i++) {
10038 		lo = hba->sli.sli4.VPIBase[i];
10039 		hi = lo + hba->sli.sli4.VPIExtSize;
10040 
10041 		if ((vpi < hi) && (vpi >= lo)) {
10042 			index = (i * hba->sli.sli4.VPIExtSize) + (vpi - lo);
10043 			break;
10044 		}
10045 	}
10046 
10047 	return (index);
10048 
10049 } /* emlxs_sli4_vpi_to_index */
10050 
10051 
10052 
10053 
10054 extern uint32_t
emlxs_sli4_index_to_vfi(emlxs_hba_t * hba,uint32_t index)10055 emlxs_sli4_index_to_vfi(emlxs_hba_t *hba, uint32_t index)
10056 {
10057 	uint32_t i;
10058 	uint32_t j;
10059 	uint32_t vfi;
10060 
10061 	i = index / hba->sli.sli4.VFIExtSize;
10062 	j = index % hba->sli.sli4.VFIExtSize;
10063 	vfi = hba->sli.sli4.VFIBase[i] + j;
10064 
10065 	return (vfi);
10066 
10067 } /* emlxs_sli4_index_to_vfi */
10068 
10069 
10070 static uint16_t
emlxs_sli4_rqid_to_index(emlxs_hba_t * hba,uint16_t rqid)10071 emlxs_sli4_rqid_to_index(emlxs_hba_t *hba, uint16_t rqid)
10072 {
10073 	uint16_t i;
10074 
10075 	if (rqid < 0xffff) {
10076 		for (i = 0; i < EMLXS_MAX_RQS; i++) {
10077 			if (hba->sli.sli4.rq[i].qid == rqid) {
10078 				return (i);
10079 			}
10080 		}
10081 	}
10082 
10083 	return (0xffff);
10084 
10085 } /* emlxs_sli4_rqid_to_index */
10086 
10087 
10088 static uint16_t
emlxs_sli4_wqid_to_index(emlxs_hba_t * hba,uint16_t wqid)10089 emlxs_sli4_wqid_to_index(emlxs_hba_t *hba, uint16_t wqid)
10090 {
10091 	uint16_t i;
10092 
10093 	if (wqid < 0xffff) {
10094 		for (i = 0; i < EMLXS_MAX_WQS; i++) {
10095 			if (hba->sli.sli4.wq[i].qid == wqid) {
10096 				return (i);
10097 			}
10098 		}
10099 	}
10100 
10101 	return (0xffff);
10102 
10103 } /* emlxs_sli4_wqid_to_index */
10104 
10105 
10106 static uint16_t
emlxs_sli4_cqid_to_index(emlxs_hba_t * hba,uint16_t cqid)10107 emlxs_sli4_cqid_to_index(emlxs_hba_t *hba, uint16_t cqid)
10108 {
10109 	uint16_t i;
10110 
10111 	if (cqid < 0xffff) {
10112 		for (i = 0; i < EMLXS_MAX_CQS; i++) {
10113 			if (hba->sli.sli4.cq[i].qid == cqid) {
10114 				return (i);
10115 			}
10116 		}
10117 	}
10118 
10119 	return (0xffff);
10120 
10121 } /* emlxs_sli4_cqid_to_index */
10122